summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/obsolete/sysfs-bus-iio182
-rw-r--r--Documentation/ABI/stable/sysfs-driver-w1_ds243813
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uac22
-rw-r--r--Documentation/ABI/testing/debugfs-driver-habanalabs8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-counter85
-rw-r--r--Documentation/ABI/testing/sysfs-bus-counter-104-quad-861
-rw-r--r--Documentation/ABI/testing/sysfs-bus-counter-ftm-quaddec16
-rw-r--r--Documentation/ABI/testing/sysfs-bus-cxl103
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio125
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-frequency-adf437133
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-health-afe440x10
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-icm4260020
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-light-lm3533-als11
-rw-r--r--Documentation/ABI/testing/sysfs-bus-papr-pmem8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor31
-rw-r--r--Documentation/ABI/testing/sysfs-bus-thunderbolt82
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb11
-rw-r--r--Documentation/ABI/testing/sysfs-class-spi-eeprom19
-rw-r--r--Documentation/ABI/testing/sysfs-devices-removable18
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs126
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-memmap4
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs57
-rw-r--r--Documentation/ABI/testing/sysfs-ptp20
-rw-r--r--Documentation/PCI/pci-error-recovery.rst2
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst6
-rw-r--r--Documentation/admin-guide/bootconfig.rst30
-rw-r--r--Documentation/admin-guide/kernel-parameters.rst5
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt53
-rw-r--r--Documentation/admin-guide/mm/hugetlbpage.rst11
-rw-r--r--Documentation/admin-guide/mm/memory-hotplug.rst13
-rw-r--r--Documentation/admin-guide/mm/pagemap.rst2
-rw-r--r--Documentation/admin-guide/mm/userfaultfd.rst3
-rw-r--r--Documentation/admin-guide/thunderbolt.rst29
-rw-r--r--Documentation/arm64/tagged-address-abi.rst26
-rw-r--r--Documentation/bpf/libbpf/libbpf_naming_convention.rst4
-rw-r--r--Documentation/core-api/kernel-api.rst7
-rw-r--r--Documentation/core-api/printk-formats.rst11
-rw-r--r--Documentation/cpu-freq/cpu-drivers.rst6
-rw-r--r--Documentation/dev-tools/kcsan.rst93
-rw-r--r--Documentation/dev-tools/kunit/index.rst1
-rw-r--r--Documentation/dev-tools/kunit/kunit-tool.rst188
-rw-r--r--Documentation/dev-tools/kunit/running_tips.rst247
-rw-r--r--Documentation/dev-tools/kunit/start.rst4
-rw-r--r--Documentation/dev-tools/kunit/usage.rst57
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic,scpi.txt27
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scmi.txt239
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scpi.txt219
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scu.yaml46
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml7
-rw-r--r--Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt31
-rw-r--r--Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml58
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml13
-rw-r--r--Documentation/devicetree/bindings/arm/intel,keembay.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/juno,scpi.txt26
-rw-r--r--Documentation/devicetree/bindings/arm/keystone/ti,sci.txt86
-rw-r--r--Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml129
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek.yaml29
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/qcom.yaml14
-rw-r--r--Documentation/devicetree/bindings/arm/renesas.yaml18
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.yaml5
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip/pmu.txt16
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip/pmu.yaml55
-rw-r--r--Documentation/devicetree/bindings/arm/scu.txt28
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.yaml11
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml35
-rw-r--r--Documentation/devicetree/bindings/arm/ux500/boards.txt4
-rw-r--r--Documentation/devicetree/bindings/ata/nvidia,tegra-ahci.yaml1
-rw-r--r--Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml11
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt313
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml394
-rw-r--r--Documentation/devicetree/bindings/clock/idt,versaclock5.yaml7
-rw-r--r--Documentation/devicetree/bindings/clock/nvidia,tegra114-car.txt63
-rw-r--r--Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt107
-rw-r--r--Documentation/devicetree/bindings/clock/nvidia,tegra124-car.yaml115
-rw-r--r--Documentation/devicetree/bindings/clock/nvidia,tegra20-car.txt63
-rw-r--r--Documentation/devicetree/bindings/clock/nvidia,tegra20-car.yaml69
-rw-r--r--Documentation/devicetree/bindings/clock/nvidia,tegra210-car.txt56
-rw-r--r--Documentation/devicetree/bindings/clock/nvidia,tegra30-car.txt63
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sdx55.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,emev2-smu.txt98
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,emev2-smu.yaml140
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.txt46
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml62
-rw-r--r--Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/ti,sci-clk.txt36
-rw-r--r--Documentation/devicetree/bindings/clock/ti,sci-clk.yaml49
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt8
-rw-r--r--Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml2
-rw-r--r--Documentation/devicetree/bindings/crypto/arm,cryptocell.yaml53
-rw-r--r--Documentation/devicetree/bindings/crypto/arm-cryptocell.txt25
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-dcp.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-hdmi-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt143
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7511.yaml240
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml184
-rw-r--r--Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/lvds.yaml46
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-dsi.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.yaml4
-rw-r--r--Documentation/devicetree/bindings/dma/altr,msgdma.yaml61
-rw-r--r--Documentation/devicetree/bindings/dma/arm-pl08x.txt59
-rw-r--r--Documentation/devicetree/bindings/dma/arm-pl08x.yaml136
-rw-r--r--Documentation/devicetree/bindings/dma/qcom,gpi.yaml1
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml1
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,shdma.txt84
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt4
-rw-r--r--Documentation/devicetree/bindings/dvfs/performance-domain.yaml74
-rw-r--r--Documentation/devicetree/bindings/edac/amazon,al-mc-edac.yaml2
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml1
-rw-r--r--Documentation/devicetree/bindings/eeprom/at25.yaml31
-rw-r--r--Documentation/devicetree/bindings/example-schema.yaml2
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-sm5502.txt21
-rw-r--r--Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml52
-rw-r--r--Documentation/devicetree/bindings/firmware/arm,scmi.yaml341
-rw-r--r--Documentation/devicetree/bindings/firmware/arm,scpi.yaml247
-rw-r--r--Documentation/devicetree/bindings/firmware/qcom,scm.txt1
-rw-r--r--Documentation/devicetree/bindings/fpga/fpga-region.txt22
-rw-r--r--Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.txt19
-rw-r--r--Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.yaml52
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-davinci.txt167
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-davinci.yaml185
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-omap.txt45
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt69
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt42
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-stp-xway.yaml99
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-zynq.txt36
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-zynq.yaml59
-rw-r--r--Documentation/devicetree/bindings/gpio/idt,32434-gpio.yaml67
-rw-r--r--Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml103
-rw-r--r--Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.txt32
-rw-r--r--Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.yaml50
-rw-r--r--Documentation/devicetree/bindings/gpio/ti,omap-gpio.yaml108
-rw-r--r--Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml1
-rw-r--r--Documentation/devicetree/bindings/gpu/vivante,gc.yaml1
-rw-r--r--Documentation/devicetree/bindings/hwlock/allwinner,sun6i-a31-hwspinlock.yaml48
-rw-r--r--Documentation/devicetree/bindings/hwmon/adt7475.yaml22
-rw-r--r--Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml74
-rw-r--r--Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-aspeed.txt49
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-at91.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-davinci.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt5
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.txt99
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.yaml124
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt74
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.yaml110
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux.txt73
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux.yaml87
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-omap.txt37
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-pxa-pci-ce4100.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt5
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c.txt7
-rw-r--r--Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/mellanox,i2c-mlxbf.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,i2c.txt67
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,iic-emev2.txt22
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml54
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,iic.txt72
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,rcar-i2c.yaml158
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,riic.txt32
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,riic.yaml93
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,rmobile-iic.yaml149
-rw-r--r--Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml102
-rw-r--r--Documentation/devicetree/bindings/iio/accel/adi,adis16201.yaml55
-rw-r--r--Documentation/devicetree/bindings/iio/accel/bosch,bma180.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/accel/bosch,bma220.yaml50
-rw-r--r--Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml9
-rw-r--r--Documentation/devicetree/bindings/iio/accel/fsl,mma7455.yaml82
-rw-r--r--Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/accel/murata,sca3300.yaml44
-rw-r--r--Documentation/devicetree/bindings/iio/accel/nxp,fxls8962af.yaml80
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adc.yaml12
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7298.yaml48
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7476.yaml174
-rw-r--r--Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti,tsc2046.yaml115
-rw-r--r--Documentation/devicetree/bindings/iio/afe/current-sense-shunt.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/cdc/adi,ad7746.yaml77
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.yaml7
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ad5755.txt124
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad5755.yaml169
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ti,dac082s085.yaml72
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ti-dac082s085.txt34
-rw-r--r--Documentation/devicetree/bindings/iio/light/amstaos,tsl2591.yaml50
-rw-r--r--Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt39
-rw-r--r--Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.yaml70
-rw-r--r--Documentation/devicetree/bindings/iio/st,st-sensors.yaml221
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml41
-rw-r--r--Documentation/devicetree/bindings/input/fsl-mma8450.txt12
-rw-r--r--Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma340.yaml148
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt93
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt33
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.yaml86
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/sitronix,st1232.yaml50
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt28
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml6
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml12
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,vic.txt41
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,vic.yaml81
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,pruss-intc.yaml2
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu-v3.yaml1
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml9
-rw-r--r--Documentation/devicetree/bindings/iommu/iommu.txt18
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml1
-rw-r--r--Documentation/devicetree/bindings/iommu/rockchip,iommu.txt38
-rw-r--r--Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml83
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/richtek,rt4831-backlight.yaml62
-rw-r--r--Documentation/devicetree/bindings/mailbox/arm,mhu.yaml48
-rw-r--r--Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml25
-rw-r--r--Documentation/devicetree/bindings/mailbox/omap-mailbox.txt184
-rw-r--r--Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml308
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adv7180.yaml8
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adv7604.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/i2c/imx258.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/maxim,max9286.yaml23
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sm8250-venus.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/renesas,drif.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vin.yaml3
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml130
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml6
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml1
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.txt130
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.yaml230
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/pl353-smc.txt47
-rw-r--r--Documentation/devicetree/bindings/mfd/google,cros-ec.yaml20
-rw-r--r--Documentation/devicetree/bindings/mfd/motorola-cpcap.txt4
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml121
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt4
-rw-r--r--Documentation/devicetree/bindings/mfd/richtek,rt4831.yaml90
-rw-r--r--Documentation/devicetree/bindings/mfd/rk808.txt188
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml278
-rw-r--r--Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml19
-rw-r--r--Documentation/devicetree/bindings/misc/eeprom-93xx46.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.yaml4
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-am654.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-pxa.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml53
-rw-r--r--Documentation/devicetree/bindings/mtd/arm-versatile.txt26
-rw-r--r--Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt186
-rw-r--r--Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml234
-rw-r--r--Documentation/devicetree/bindings/mtd/common.txt16
-rw-r--r--Documentation/devicetree/bindings/mtd/cortina,gemini-flash.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt13
-rw-r--r--Documentation/devicetree/bindings/mtd/intel,ixp4xx-flash.txt22
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml6
-rw-r--r--Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml45
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.txt114
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.yaml208
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd.yaml89
-rw-r--r--Documentation/devicetree/bindings/mtd/nand-controller.yaml18
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt5
-rw-r--r--Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt51
-rw-r--r--Documentation/devicetree/bindings/mtd/ti,am654-hbmc.yaml69
-rw-r--r--Documentation/devicetree/bindings/mux/adi,adg792a.txt2
-rw-r--r--Documentation/devicetree/bindings/mux/adi,adgs1408.txt2
-rw-r--r--Documentation/devicetree/bindings/mux/gpio-mux.txt69
-rw-r--r--Documentation/devicetree/bindings/mux/gpio-mux.yaml92
-rw-r--r--Documentation/devicetree/bindings/mux/mux-consumer.yaml46
-rw-r--r--Documentation/devicetree/bindings/mux/mux-controller.txt157
-rw-r--r--Documentation/devicetree/bindings/mux/mux-controller.yaml182
-rw-r--r--Documentation/devicetree/bindings/mux/reg-mux.txt129
-rw-r--r--Documentation/devicetree/bindings/mux/reg-mux.yaml143
-rw-r--r--Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt2
-rw-r--r--Documentation/devicetree/bindings/net/can/bosch,m_can.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/can/c_can.txt4
-rw-r--r--Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/gpmc-eth.txt2
-rw-r--r--Documentation/devicetree/bindings/net/imx-dwmac.txt56
-rw-r--r--Documentation/devicetree/bindings/net/mdio-gpio.txt27
-rw-r--r--Documentation/devicetree/bindings/net/mdio-gpio.yaml57
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-gpio.txt119
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-gpio.yaml135
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt75
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-mmioreg.yaml78
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-multiplexer.txt82
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-multiplexer.yaml82
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux.txt129
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux.yaml44
-rw-r--r--Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml93
-rw-r--r--Documentation/devicetree/bindings/net/smsc,lan9115.yaml110
-rw-r--r--Documentation/devicetree/bindings/net/smsc911x.txt43
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/stm32-dwmac.yaml5
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml100
-rw-r--r--Documentation/devicetree/bindings/pci/loongson.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/pci-keystone.txt115
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie.txt24
-rw-r--r--Documentation/devicetree/bindings/pci/ti,am65-pci-ep.yaml74
-rw-r--r--Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml96
-rw-r--r--Documentation/devicetree/bindings/perf/arm,cmn.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,bcm63xx-usbh-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/hisilicon,hi3670-usb3.yaml (renamed from drivers/staging/hikey9xx/phy-hi3670-usb3.yaml)0
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,mt7621-pci-phy.yaml5
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,tphy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt12
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml12
-rw-r--r--Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml11
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,ipq806x-usb-phy-hs.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,ipq806x-usb-phy-ss.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml28
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-pcie.txt24
-rw-r--r--Documentation/devicetree/bindings/phy/renesas,rcar-gen3-pcie-phy.yaml53
-rw-r--r--Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/renesas,usb3-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml79
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt52
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-usb-phy.yaml81
-rw-r--r--Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml56
-rw-r--r--Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml56
-rw-r--r--Documentation/devicetree/bindings/pinctrl/actions,s500-pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml13
-rw-r--r--Documentation/devicetree/bindings/power/qcom,rpmpd.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/renesas,rzg2l-sysc.yaml63
-rw-r--r--Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt100
-rw-r--r--Documentation/devicetree/bindings/power/renesas,sysc-rmobile.yaml121
-rw-r--r--Documentation/devicetree/bindings/power/rockchip,power-controller.yaml248
-rw-r--r--Documentation/devicetree/bindings/power/supply/charger-manager.txt91
-rw-r--r--Documentation/devicetree/bindings/power/supply/charger-manager.yaml215
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml54
-rw-r--r--Documentation/devicetree/bindings/property-units.txt48
-rw-r--r--Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml4
-rw-r--r--Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt21
-rw-r--r--Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.yaml45
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiecap.txt51
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiecap.yaml64
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt50
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.yaml65
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm.yaml9
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml13
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml26
-rw-r--r--Documentation/devicetree/bindings/regulator/fixed-regulator.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml6
-rw-r--r--Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml12
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt228
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml547
-rw-r--r--Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,k3-r5f-rproc.yaml31
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt4
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml5
-rw-r--r--Documentation/devicetree/bindings/reset/fsl,imx-src.yaml1
-rw-r--r--Documentation/devicetree/bindings/reset/microchip,rst.yaml58
-rw-r--r--Documentation/devicetree/bindings/reset/ti,sci-reset.txt62
-rw-r--r--Documentation/devicetree/bindings/reset/ti,sci-reset.yaml51
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml1
-rw-r--r--Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml36
-rw-r--r--Documentation/devicetree/bindings/rng/mtk-rng.txt22
-rw-r--r--Documentation/devicetree/bindings/rng/mtk-rng.yaml54
-rw-r--r--Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml1
-rw-r--r--Documentation/devicetree/bindings/rtc/arm,pl031.yaml58
-rw-r--r--Documentation/devicetree/bindings/rtc/epson,rx8900.txt22
-rw-r--r--Documentation/devicetree/bindings/rtc/epson,rx8900.yaml49
-rw-r--r--Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt28
-rw-r--r--Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml58
-rw-r--r--Documentation/devicetree/bindings/rtc/imxdi-rtc.yaml1
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf8563.yaml58
-rw-r--r--Documentation/devicetree/bindings/rtc/pcf85363.txt17
-rw-r--r--Documentation/devicetree/bindings/rtc/pcf8563.txt29
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-m41t80.txt9
-rw-r--r--Documentation/devicetree/bindings/rtc/ti,bq32000.yaml49
-rw-r--r--Documentation/devicetree/bindings/rtc/ti,bq32k.txt18
-rw-r--r--Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml65
-rw-r--r--Documentation/devicetree/bindings/rtc/xlnx-rtc.txt25
-rw-r--r--Documentation/devicetree/bindings/serial/8250.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/8250_omap.yaml118
-rw-r--r--Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml5
-rw-r--r--Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml12
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-lpuart.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/ingenic,uart.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/mvebu-uart.txt6
-rw-r--r--Documentation/devicetree/bindings/serial/omap_serial.txt40
-rw-r--r--Documentation/devicetree/bindings/serial/pl011.yaml13
-rw-r--r--Documentation/devicetree/bindings/serial/qca,ar9330-uart.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,em-uart.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/samsung_uart.yaml3
-rw-r--r--Documentation/devicetree/bindings/serial/serial.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/sifive-serial.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/st,stm32-uart.yaml3
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/pwrap.txt1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.txt61
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml261
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/power_domain.txt136
-rw-r--r--Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt65
-rw-r--r--Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml59
-rw-r--r--Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml3
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml9
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun8i-a33-codec.yaml8
-rw-r--r--Documentation/devicetree/bindings/sound/cs42l42.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,spdif.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/fsl-sai.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audio-card.yaml122
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audmux.txt28
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audmux.yaml119
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml58
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml30
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wcd938x-sdw.yaml70
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml146
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.yaml10
-rw-r--r--Documentation/devicetree/bindings/sound/sgtl5000.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-sai.yaml3
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic32x4.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/wm8750.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/wm8750.yaml42
-rw-r--r--Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-controller.yaml32
-rw-r--r--Documentation/devicetree/bindings/spi/spi-davinci.txt2
-rw-r--r--Documentation/devicetree/bindings/spmi/hisilicon,hisi-spmi-controller.yaml (renamed from drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml)10
-rw-r--r--Documentation/devicetree/bindings/spmi/spmi.yaml15
-rw-r--r--Documentation/devicetree/bindings/sram/sram.yaml5
-rw-r--r--Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml2
-rw-r--r--Documentation/devicetree/bindings/thermal/nvidia,tegra30-tsensor.yaml73
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.yaml3
-rw-r--r--Documentation/devicetree/bindings/thermal/rockchip-thermal.txt85
-rw-r--r--Documentation/devicetree/bindings/thermal/rockchip-thermal.yaml96
-rw-r--r--Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml42
-rw-r--r--Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/arm,arch_timer.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/arm,twd-timer.yaml56
-rw-r--r--Documentation/devicetree/bindings/timer/arm,twd.txt53
-rw-r--r--Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tmu.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tpu.txt21
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tpu.yaml56
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml12
-rw-r--r--Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/cdns,usb3.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/maxim,max3420-udc.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/nxp,isp1760.yaml67
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/realtek,rts5411.yaml62
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usbhs.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml2
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml16
-rw-r--r--Documentation/devicetree/bindings/virtio/mmio.txt47
-rw-r--r--Documentation/devicetree/bindings/virtio/mmio.yaml60
-rw-r--r--Documentation/devicetree/bindings/watchdog/arm,sbsa-gwdt.yaml51
-rw-r--r--Documentation/devicetree/bindings/watchdog/arm,twd-wdt.yaml50
-rw-r--r--Documentation/devicetree/bindings/watchdog/atmel,sama5d4-wdt.yaml74
-rw-r--r--Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt34
-rw-r--r--Documentation/devicetree/bindings/watchdog/mstar,msc313e-wdt.yaml40
-rw-r--r--Documentation/devicetree/bindings/watchdog/mtk-wdt.txt6
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/sbsa-gwdt.txt31
-rw-r--r--Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml1
-rw-r--r--Documentation/devicetree/bindings/writing-bindings.rst3
-rw-r--r--Documentation/driver-api/auxiliary_bus.rst2
-rw-r--r--Documentation/driver-api/cxl/memory-devices.rst20
-rw-r--r--Documentation/driver-api/driver-model/devres.rst4
-rw-r--r--Documentation/driver-api/early-userspace/early_userspace_support.rst8
-rw-r--r--Documentation/driver-api/generic-counter.rst2
-rw-r--r--Documentation/driver-api/pwm.rst7
-rw-r--r--Documentation/driver-api/serial/moxa-smartio.rst496
-rw-r--r--Documentation/driver-api/usb/error-codes.rst3
-rw-r--r--Documentation/driver-api/vfio-mediated-device.rst35
-rw-r--r--Documentation/features/core/thread-info-in-task/arch-support.txt32
-rw-r--r--Documentation/features/time/arch-tick-broadcast/arch-support.txt2
-rw-r--r--Documentation/filesystems/debugfs.rst4
-rw-r--r--Documentation/filesystems/f2fs.rst50
-rw-r--r--Documentation/filesystems/path-lookup.rst6
-rw-r--r--Documentation/filesystems/porting.rst18
-rw-r--r--Documentation/filesystems/proc.rst48
-rw-r--r--Documentation/filesystems/ramfs-rootfs-initramfs.rst2
-rw-r--r--Documentation/firmware-guide/acpi/enumeration.rst32
-rw-r--r--Documentation/fpga/dfl.rst4
-rw-r--r--Documentation/gpu/rfc/i915_gem_lmem.rst109
-rw-r--r--Documentation/i2c/i2c-sysfs.rst395
-rw-r--r--Documentation/i2c/index.rst1
-rw-r--r--Documentation/networking/af_xdp.rst6
-rw-r--r--Documentation/networking/caif/caif.rst4
-rw-r--r--Documentation/networking/ethtool-netlink.rst22
-rw-r--r--Documentation/networking/ip-sysctl.rst2
-rw-r--r--Documentation/networking/netdev-FAQ.rst17
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.rst16
-rw-r--r--Documentation/networking/operstates.rst6
-rw-r--r--Documentation/networking/tipc.rst121
-rw-r--r--Documentation/s390/vfio-ap.rst1
-rw-r--r--Documentation/scsi/scsi_mid_low_api.rst7
-rw-r--r--Documentation/sound/hd-audio/controls.rst2
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst13
-rw-r--r--Documentation/trace/boottime-trace.rst6
-rw-r--r--Documentation/trace/histogram.rst2
-rw-r--r--Documentation/trace/hwlat_detector.rst13
-rw-r--r--Documentation/trace/index.rst2
-rw-r--r--Documentation/trace/osnoise-tracer.rst152
-rw-r--r--Documentation/trace/timerlat-tracer.rst181
-rw-r--r--Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst5
-rw-r--r--Documentation/translations/zh_CN/process/2.Process.rst4
-rw-r--r--Documentation/usb/gadget-testing.rst2
-rw-r--r--Documentation/userspace-api/accelerators/ocxl.rst2
-rw-r--r--Documentation/userspace-api/seccomp_filter.rst2
-rw-r--r--Documentation/virt/kvm/api.rst30
-rw-r--r--Documentation/virt/kvm/locking.rst8
-rw-r--r--Documentation/vm/hmm.rst19
-rw-r--r--Documentation/vm/unevictable-lru.rst33
-rw-r--r--Documentation/w1/slaves/w1_ds2438.rst19
-rw-r--r--Documentation/x86/elf_auxvec.rst53
-rw-r--r--Documentation/x86/index.rst1
-rw-r--r--LICENSES/dual/CC-BY-4.02
-rw-r--r--MAINTAINERS278
-rw-r--r--Makefile156
-rw-r--r--arch/alpha/Kbuild3
-rw-r--r--arch/alpha/Kconfig8
-rw-r--r--arch/alpha/Makefile2
-rw-r--r--arch/alpha/boot/bootp.c2
-rw-r--r--arch/alpha/boot/bootpz.c2
-rw-r--r--arch/alpha/boot/misc.c2
-rw-r--r--arch/alpha/configs/defconfig1
-rw-r--r--arch/alpha/include/asm/compiler.h11
-rw-r--r--arch/alpha/include/asm/pgalloc.h1
-rw-r--r--arch/alpha/include/asm/pgtable.h9
-rw-r--r--arch/alpha/include/asm/syscall.h6
-rw-r--r--arch/alpha/include/asm/unaligned.h12
-rw-r--r--arch/alpha/include/uapi/asm/mman.h3
-rw-r--r--arch/alpha/kernel/osf_sys.c4
-rw-r--r--arch/alpha/kernel/perf_event.c2
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/setup.c15
-rw-r--r--arch/alpha/kernel/smp.c2
-rw-r--r--arch/alpha/kernel/srmcons.c9
-rw-r--r--arch/alpha/kernel/sys_nautilus.c2
-rw-r--r--arch/alpha/kernel/traps.c2
-rw-r--r--arch/alpha/math-emu/math.c8
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/Makefile3
-rw-r--r--arch/arc/include/asm/checksum.h2
-rw-r--r--arch/arc/include/asm/perf_event.h2
-rw-r--r--arch/arc/include/asm/pgalloc.h2
-rw-r--r--arch/arc/include/asm/pgtable.h8
-rw-r--r--arch/arc/kernel/fpu.c9
-rw-r--r--arch/arc/kernel/unwind.c10
-rw-r--r--arch/arc/kernel/vmlinux.lds.S2
-rw-r--r--arch/arc/mm/init.c5
-rw-r--r--arch/arm/Kconfig12
-rw-r--r--arch/arm/Kconfig.debug17
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/compressed/atags_to_fdt.c2
-rw-r--r--arch/arm/boot/dts/Makefile13
-rw-r--r--arch/arm/boot/dts/am335x-baltos.dtsi4
-rw-r--r--arch/arm/boot/dts/am335x-boneblack-wireless.dts2
-rw-r--r--arch/arm/boot/dts/am335x-boneblue.dts2
-rw-r--r--arch/arm/boot/dts/am335x-bonegreen-wireless.dts4
-rw-r--r--arch/arm/boot/dts/am335x-cm-t335.dts4
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts2
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts4
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts4
-rw-r--r--arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi2
-rw-r--r--arch/arm/boot/dts/am335x-moxa-uc-8100-common.dtsi2
-rw-r--r--arch/arm/boot/dts/am335x-osd3358-sm-red.dts132
-rw-r--r--arch/arm/boot/dts/am335x-shc.dts8
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi34
-rw-r--r--arch/arm/boot/dts/am437x-gp-evm.dts9
-rw-r--r--arch/arm/boot/dts/am437x-l4.dtsi51
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts8
-rw-r--r--arch/arm/boot/dts/am5718.dtsi6
-rw-r--r--arch/arm/boot/dts/am57xx-cl-som-am57x.dts13
-rw-r--r--arch/arm/boot/dts/arm-realview-eb.dtsi2
-rw-r--r--arch/arm/boot/dts/arm-realview-pb1176.dts2
-rw-r--r--arch/arm/boot/dts/arm-realview-pb11mp.dts2
-rw-r--r--arch/arm/boot/dts/arm-realview-pbx.dtsi2
-rw-r--r--arch/arm/boot/dts/aspeed-ast2500-evb.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-ast2600-evb-a1.dts15
-rw-r--r--arch/arm/boot/dts/aspeed-ast2600-evb.dts119
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ampere-mtjade.dts59
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts6
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-bytedance-g220a.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-facebook-cmm.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts1933
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts30
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts5
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-inspur-on5263m5.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-lenovo-hr630.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-lenovo-hr855xg2.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-microsoft-olympus.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-mowgli.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-nicole.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-swift.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts8
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-vesnin.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-supermicro-x11spi.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi1
-rw-r--r--arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi10
-rw-r--r--arch/arm/boot/dts/bcm-cygnus.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm-hr2.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm2711-rpi-4-b.dts85
-rw-r--r--arch/arm/boot/dts/bcm2711-rpi-400.dts45
-rw-r--r--arch/arm/boot/dts/bcm2711-rpi.dtsi74
-rw-r--r--arch/arm/boot/dts/bcm2711.dtsi3
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-a-plus.dts4
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-a.dts2
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b-plus.dts4
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts2
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b.dts2
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-cm1.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-zero-w.dts2
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-zero.dts2
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm2836-rpi-2-b.dts4
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts4
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts4
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b.dts2
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm283x-rpi-usb-otg.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm283x-rpi-usb-peripheral.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts4
-rw-r--r--arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts4
-rw-r--r--arch/arm/boot/dts/bcm47094.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm5301x-nand-cs0.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi26
-rw-r--r--arch/arm/boot/dts/bcm63138.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm7445-bcm97445svmb.dts4
-rw-r--r--arch/arm/boot/dts/bcm7445.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm911360_entphn.dts4
-rw-r--r--arch/arm/boot/dts/bcm953012k.dts4
-rw-r--r--arch/arm/boot/dts/bcm958300k.dts4
-rw-r--r--arch/arm/boot/dts/bcm958305k.dts4
-rw-r--r--arch/arm/boot/dts/bcm958522er.dts4
-rw-r--r--arch/arm/boot/dts/bcm958525er.dts4
-rw-r--r--arch/arm/boot/dts/bcm958525xmc.dts4
-rw-r--r--arch/arm/boot/dts/bcm958622hr.dts4
-rw-r--r--arch/arm/boot/dts/bcm958623hr.dts4
-rw-r--r--arch/arm/boot/dts/bcm958625hr.dts4
-rw-r--r--arch/arm/boot/dts/bcm958625k.dts4
-rw-r--r--arch/arm/boot/dts/bcm963138dvt.dts4
-rw-r--r--arch/arm/boot/dts/bcm988312hr.dts4
-rw-r--r--arch/arm/boot/dts/da850.dtsi21
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi6
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts8
-rw-r--r--arch/arm/boot/dts/dra7-ipu-dsp-common.dtsi6
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi52
-rw-r--r--arch/arm/boot/dts/dra71-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra71x.dtsi4
-rw-r--r--arch/arm/boot/dts/dra72-evm-common.dtsi6
-rw-r--r--arch/arm/boot/dts/dra72x.dtsi10
-rw-r--r--arch/arm/boot/dts/dra74-ipu-dsp-common.dtsi2
-rw-r--r--arch/arm/boot/dts/dra74x.dtsi100
-rw-r--r--arch/arm/boot/dts/dra76-evm.dts43
-rw-r--r--arch/arm/boot/dts/exynos3250-rinato.dts2
-rw-r--r--arch/arm/boot/dts/exynos4210-i9100.dts8
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts2
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts2
-rw-r--r--arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos4412-midas.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos4412-n710x.dts4
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos4412-origen.dts1
-rw-r--r--arch/arm/boot/dts/exynos4412-p4note.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos5250-arndale.dts1
-rw-r--r--arch/arm/boot/dts/exynos5410-odroidxu.dts1
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts1
-rw-r--r--arch/arm/boot/dts/exynos5420-smdk5420.dts1
-rw-r--r--arch/arm/boot/dts/exynos5422-odroid-core.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidhc1.dts2
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu4.dts2
-rw-r--r--arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi4
-rw-r--r--arch/arm/boot/dts/gemini-dlink-dir-685.dts11
-rw-r--r--arch/arm/boot/dts/gemini-dlink-dns-313.dts4
-rw-r--r--arch/arm/boot/dts/gemini-nas4220b.dts2
-rw-r--r--arch/arm/boot/dts/gemini-rut1xx.dts14
-rw-r--r--arch/arm/boot/dts/gemini-sl93512r.dts6
-rw-r--r--arch/arm/boot/dts/gemini-sq201.dts6
-rw-r--r--arch/arm/boot/dts/gemini-wbd111.dts2
-rw-r--r--arch/arm/boot/dts/gemini-wbd222.dts2
-rw-r--r--arch/arm/boot/dts/gemini.dtsi13
-rw-r--r--arch/arm/boot/dts/hi3620.dtsi4
-rw-r--r--arch/arm/boot/dts/hip01-ca9x2.dts4
-rw-r--r--arch/arm/boot/dts/hip01.dtsi4
-rw-r--r--arch/arm/boot/dts/hip04.dtsi4
-rw-r--r--arch/arm/boot/dts/hisi-x5hd2-dkb.dts2
-rw-r--r--arch/arm/boot/dts/hisi-x5hd2.dtsi2
-rw-r--r--arch/arm/boot/dts/imx25-pinfunc.h12
-rw-r--r--arch/arm/boot/dts/imx28-lwe.dtsi170
-rw-r--r--arch/arm/boot/dts/imx28-xea.dts99
-rw-r--r--arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts2
-rw-r--r--arch/arm/boot/dts/imx53-m53menlo.dts4
-rw-r--r--arch/arm/boot/dts/imx53-ppd.dts2
-rw-r--r--arch/arm/boot/dts/imx6dl-b105pv2.dts32
-rw-r--r--arch/arm/boot/dts/imx6dl-b105v2.dts32
-rw-r--r--arch/arm/boot/dts/imx6dl-b125pv2.dts30
-rw-r--r--arch/arm/boot/dts/imx6dl-b125v2.dts30
-rw-r--r--arch/arm/boot/dts/imx6dl-b155v2.dts32
-rw-r--r--arch/arm/boot/dts/imx6dl-b1x5pv2.dtsi413
-rw-r--r--arch/arm/boot/dts/imx6dl-b1x5v2.dtsi58
-rw-r--r--arch/arm/boot/dts/imx6dl-plym2m.dts6
-rw-r--r--arch/arm/boot/dts/imx6dl-prtvt7.dts36
-rw-r--r--arch/arm/boot/dts/imx6dl-qmx6.dtsi612
-rw-r--r--arch/arm/boot/dts/imx6dl-riotboard.dts2
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-common.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6q-dhcom-som.dtsi71
-rw-r--r--arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-ds.dts17
-rw-r--r--arch/arm/boot/dts/imx6qdl-ds.dtsi458
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi5
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi10
-rw-r--r--arch/arm/boot/dts/imx6qdl-sr-som.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6qdl-vicut1.dtsi41
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dtsi50
-rw-r--r--arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi1
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts7
-rw-r--r--arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts79
-rw-r--r--arch/arm/boot/dts/intel-ixp42x-welltech-epbx100.dts76
-rw-r--r--arch/arm/boot/dts/intel-ixp42x.dtsi4
-rw-r--r--arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts104
-rw-r--r--arch/arm/boot/dts/intel-ixp43x.dtsi4
-rw-r--r--arch/arm/boot/dts/intel-ixp45x-ixp46x.dtsi33
-rw-r--r--arch/arm/boot/dts/intel-ixp4xx.dtsi92
-rw-r--r--arch/arm/boot/dts/keystone-k2g-evm.dts11
-rw-r--r--arch/arm/boot/dts/keystone-k2g.dtsi6
-rw-r--r--arch/arm/boot/dts/meson.dtsi1
-rw-r--r--arch/arm/boot/dts/mstar-v7.dtsi14
-rw-r--r--arch/arm/boot/dts/omap2.dtsi4
-rw-r--r--arch/arm/boot/dts/omap2420.dtsi5
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-evm-processor-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-gta04a5.dts2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi8
-rw-r--r--arch/arm/boot/dts/omap4-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi11
-rw-r--r--arch/arm/boot/dts/omap5-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/openbmc-flash-layout-64.dtsi18
-rw-r--r--arch/arm/boot/dts/qcom-apq8060-dragonboard.dts4
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064-rb3011.dts58
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064.dtsi425
-rw-r--r--arch/arm/boot/dts/qcom-sdx55-t55.dts2
-rw-r--r--arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts4
-rw-r--r--arch/arm/boot/dts/r8a7742.dtsi6
-rw-r--r--arch/arm/boot/dts/r8a7743.dtsi4
-rw-r--r--arch/arm/boot/dts/r8a7744.dtsi4
-rw-r--r--arch/arm/boot/dts/r8a7745.dtsi4
-rw-r--r--arch/arm/boot/dts/r8a77470.dtsi4
-rw-r--r--arch/arm/boot/dts/r8a7778.dtsi3
-rw-r--r--arch/arm/boot/dts/r8a7779-marzen.dts2
-rw-r--r--arch/arm/boot/dts/r8a7779.dtsi4
-rw-r--r--arch/arm/boot/dts/r8a7790-lager.dts8
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi10
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts4
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi4
-rw-r--r--arch/arm/boot/dts/r8a7792-blanche.dts8
-rw-r--r--arch/arm/boot/dts/r8a7792.dtsi4
-rw-r--r--arch/arm/boot/dts/r8a7793-gose.dts10
-rw-r--r--arch/arm/boot/dts/r8a7793.dtsi3
-rw-r--r--arch/arm/boot/dts/r8a7794-alt.dts42
-rw-r--r--arch/arm/boot/dts/r8a7794-silk.dts10
-rw-r--r--arch/arm/boot/dts/r8a7794.dtsi4
-rw-r--r--arch/arm/boot/dts/rk3036-kylin.dts2
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi72
-rw-r--r--arch/arm/boot/dts/rk3066a-bqcurie2.dts5
-rw-r--r--arch/arm/boot/dts/rk3066a-marsboard.dts4
-rw-r--r--arch/arm/boot/dts/rk3066a-mk808.dts5
-rw-r--r--arch/arm/boot/dts/rk3066a-rayeager.dts8
-rw-r--r--arch/arm/boot/dts/rk3066a.dtsi71
-rw-r--r--arch/arm/boot/dts/rk3188-bqedison2qc.dts6
-rw-r--r--arch/arm/boot/dts/rk3188-px3-evb.dts5
-rw-r--r--arch/arm/boot/dts/rk3188-radxarock.dts4
-rw-r--r--arch/arm/boot/dts/rk3188.dtsi74
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi158
-rw-r--r--arch/arm/boot/dts/rk3288-rock2-som.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3288-vyasa.dts4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi18
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi17
-rw-r--r--arch/arm/boot/dts/rv1108.dtsi2
-rw-r--r--arch/arm/boot/dts/s5pv210-goni.dts9
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi2
-rw-r--r--arch/arm/boot/dts/sd5203.dts2
-rw-r--r--arch/arm/boot/dts/ste-ab8500.dtsi28
-rw-r--r--arch/arm/boot/dts/ste-ab8505.dtsi24
-rw-r--r--arch/arm/boot/dts/ste-href-ab8500.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi6
-rw-r--r--arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi4
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi9
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi4
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts2
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-golden.dts3
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-janice.dts11
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-skomer.dts3
-rw-r--r--arch/arm/boot/dts/stm32429i-eval.dts8
-rw-r--r--arch/arm/boot/dts/stm32746g-eval.dts6
-rw-r--r--arch/arm/boot/dts/stm32f4-pinctrl.dtsi2
-rw-r--r--arch/arm/boot/dts/stm32f429-disco.dts6
-rw-r--r--arch/arm/boot/dts/stm32f429-pinctrl.dtsi72
-rw-r--r--arch/arm/boot/dts/stm32f429.dtsi10
-rw-r--r--arch/arm/boot/dts/stm32f469-disco.dts6
-rw-r--r--arch/arm/boot/dts/stm32f469-pinctrl.dtsi74
-rw-r--r--arch/arm/boot/dts/stm32f7-pinctrl.dtsi2
-rw-r--r--arch/arm/boot/dts/stm32f746.dtsi12
-rw-r--r--arch/arm/boot/dts/stm32f769-disco.dts6
-rw-r--r--arch/arm/boot/dts/stm32h743.dtsi4
-rw-r--r--arch/arm/boot/dts/stm32mp15-pinctrl.dtsi33
-rw-r--r--arch/arm/boot/dts/stm32mp151.dtsi16
-rw-r--r--arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dts2
-rw-r--r--arch/arm/boot/dts/stm32mp157a-stinger96.dtsi7
-rw-r--r--arch/arm/boot/dts/stm32mp157c-odyssey-som.dtsi7
-rw-r--r--arch/arm/boot/dts/stm32mp157c-odyssey.dts2
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi31
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi22
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi2
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-osd32.dtsi7
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-r40-feta40i.dtsi106
-rw-r--r--arch/arm/boot/dts/sun8i-r40-oka40i-c.dts203
-rw-r--r--arch/arm/boot/dts/sun8i-r40.dtsi56
-rw-r--r--arch/arm/boot/dts/sun8i-v3.dtsi31
-rw-r--r--arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts17
-rw-r--r--arch/arm/boot/dts/sun8i-v3s.dtsi48
-rw-r--r--arch/arm/boot/dts/tegra124.dtsi3
-rw-r--r--arch/arm/boot/dts/tegra20-acer-a500-picasso.dts21
-rw-r--r--arch/arm/boot/dts/tegra20-harmony.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-medcom-wide.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-paz00.dts43
-rw-r--r--arch/arm/boot/dts/tegra20-plutux.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-tec.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-ventana.dts2
-rw-r--r--arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi16
-rw-r--r--arch/arm/boot/dts/tegra30-asus-nexus7-grouper-maxim-pmic.dtsi9
-rw-r--r--arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi11
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30-ouya.dts4
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi3
-rw-r--r--arch/arm/boot/dts/versatile-ab.dts5
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2m-rs1.dtsi2
-rw-r--r--arch/arm/boot/dts/vexpress-v2m.dtsi2
-rw-r--r--arch/arm/configs/aspeed_g4_defconfig1
-rw-r--r--arch/arm/configs/aspeed_g5_defconfig2
-rw-r--r--arch/arm/configs/at91_dt_defconfig2
-rw-r--r--arch/arm/configs/exynos_defconfig1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/configs/integrator_defconfig5
-rw-r--r--arch/arm/configs/ixp4xx_defconfig3
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/configs/nhk8815_defconfig8
-rw-r--r--arch/arm/configs/omap2plus_defconfig5
-rw-r--r--arch/arm/configs/realview_defconfig4
-rw-r--r--arch/arm/configs/shmobile_defconfig2
-rw-r--r--arch/arm/configs/tegra_defconfig1
-rw-r--r--arch/arm/configs/u8500_defconfig5
-rw-r--r--arch/arm/configs/versatile_defconfig4
-rw-r--r--arch/arm/configs/vexpress_defconfig17
-rw-r--r--arch/arm/include/asm/ftrace.h3
-rw-r--r--arch/arm/include/asm/insn.h8
-rw-r--r--arch/arm/include/asm/memory.h15
-rw-r--r--arch/arm/include/asm/module.h10
-rw-r--r--arch/arm/include/asm/pgalloc.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h2
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/system_misc.h1
-rw-r--r--arch/arm/include/asm/unaligned.h27
-rw-r--r--arch/arm/kernel/ftrace.c46
-rw-r--r--arch/arm/kernel/head.S30
-rw-r--r--arch/arm/kernel/insn.c19
-rw-r--r--arch/arm/kernel/module-plts.c49
-rw-r--r--arch/arm/kernel/reboot.c6
-rw-r--r--arch/arm/kernel/setup.c25
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/mach-davinci/Kconfig1
-rw-r--r--arch/arm/mach-exynos/exynos.c2
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/common.h3
-rw-r--r--arch/arm/mach-imx/headsmp.S9
-rw-r--r--arch/arm/mach-imx/hotplug.c3
-rw-r--r--arch/arm/mach-imx/mach-imx50.c7
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c88
-rw-r--r--arch/arm/mach-imx/mach-imx6sx.c26
-rw-r--r--arch/arm/mach-imx/mach-imx7d.c25
-rw-r--r--arch/arm/mach-imx/mmdc.c17
-rw-r--r--arch/arm/mach-imx/platsmp.c26
-rw-r--r--arch/arm/mach-imx/src.c101
-rw-r--r--arch/arm/mach-imx/suspend-imx53.S4
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig36
-rw-r--r--arch/arm/mach-ixp4xx/avila-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/common.c47
-rw-r--r--arch/arm/mach-ixp4xx/fsg-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/cpu.h54
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h109
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/platform.h14
-rw-r--r--arch/arm/mach-ixp4xx/ixp4xx-of.c8
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c1
-rw-r--r--arch/arm/mach-omap1/Kconfig12
-rw-r--r--arch/arm/mach-omap2/cm.h1
-rw-r--r--arch/arm/mach-omap2/cm_common.c13
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/control.c5
-rw-r--r--arch/arm/mach-omap2/control.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c10
-rw-r--r--arch/arm/mach-omap2/pm33xx-core.c40
-rw-r--r--arch/arm/mach-rpc/riscpc.c1
-rw-r--r--arch/arm/mach-s3c/mach-rx1950.c1
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/abort-ev7.S26
-rw-r--r--arch/arm/mm/init.c13
-rw-r--r--arch/arm/mm/ioremap.c4
-rw-r--r--arch/arm/mm/mmu.c166
-rw-r--r--arch/arm/net/bpf_jit_32.c3
-rw-r--r--arch/arm/probes/kprobes/test-thumb.c10
-rw-r--r--arch/arm/tools/Makefile25
-rw-r--r--arch/arm/tools/syscallnr.sh3
-rw-r--r--arch/arm/xen/enlighten.c12
-rw-r--r--arch/arm64/Kconfig25
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/Makefile24
-rw-r--r--arch/arm64/boot/dts/allwinner/Makefile1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi25
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi62
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-r1s-h5.dts195
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi9
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi41
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts646
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts13
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts17
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts48
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi9
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1.dtsi10
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi6
-rw-r--r--arch/arm64/boot/dts/broadcom/Makefile3
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2711-rpi-400.dts2
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi2
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi8
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi14
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi26
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi12
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi24
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi24
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi16
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi16
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi24
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi50
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts1019
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi363
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi3
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi147
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts139
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi25
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi28
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-evk.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts182
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi16
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-coresight.dtsi2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660.dtsi2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3670-hikey970.dts2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3670.dtsi2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-coresight.dtsi2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip05-d02.dts2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip05.dtsi2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip06-d03.dts2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip06.dtsi2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip07-d05.dts2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip07.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts9
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi10
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap807.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/cn9130-db.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8167.dtsi121
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi60
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-evb.dts5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-burnet.dts30
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts44
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts32
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel.dtsi27
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14.dts16
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kappa.dts16
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku0.dts13
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku1.dts12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi26
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi23
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi110
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi595
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi173
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile15
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi89
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts24
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074-hk10-c1.dts11
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074-hk10-c2.dts10
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi76
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts26
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts454
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi131
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts47
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts26
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts12
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts8
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-pins.dtsi653
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi997
-rw-r--r--arch/arm64/boot/dts/qcom/pm6150.dtsi24
-rw-r--r--arch/arm64/boot/dts/qcom/pm7325.dtsi53
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/pm8350c.dtsi32
-rw-r--r--arch/arm64/boot/dts/qcom/pm8994.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/pmi8994.dtsi13
-rw-r--r--arch/arm64/boot/dts/qcom/pmk8350.dtsi51
-rw-r--r--arch/arm64/boot/dts/qcom/pmm8155au_1.dtsi135
-rw-r--r--arch/arm64/boot/dts/qcom/pmm8155au_2.dtsi108
-rw-r--r--arch/arm64/boot/dts/qcom/pmr735a.dtsi32
-rw-r--r--arch/arm64/boot/dts/qcom/qrb5165-rb5.dts98
-rw-r--r--arch/arm64/boot/dts/qcom/sa8155p-adp.dts360
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-idp.dts169
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1-lte.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1.dts22
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r3-lte.dts (renamed from arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2-lte.dts)6
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r3.dts (renamed from arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2.dts)4
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi64
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts9
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts9
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts9
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi9
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1.dts12
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2-lte.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2.dts38
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r3-lte.dts14
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r3.dts15
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi29
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi121
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180.dtsi140
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280-idp.dts36
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi1171
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts5
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi20
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts114
-rw-r--r--arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-hdk.dts28
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts543
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-mtp.dts28
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano-bahamut.dts19
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano-griffin.dts13
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi452
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi76
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-hdk.dts33
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-mtp.dts10
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts15
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts35
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi636
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi192
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350-mtp.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350.dtsi64
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile2
-rw-r--r--arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774e1.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77951.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961.dtsi19
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970-eagle.dts8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980-condor.dts8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77995.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0-falcon-csi-dsi.dtsi21
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044.dtsi132
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044l1.dtsi25
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044l2-smarc.dts21
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044l2.dtsi13
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi27
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile1
-rw-r--r--arch/arm64/boot/dts/rockchip/px30.dtsi28
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308.dtsi74
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts37
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts36
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts9
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi13
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-firefly.dts162
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts121
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi68
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts79
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-pinctrl.dtsi3111
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568.dtsi593
-rw-r--r--arch/arm64/boot/dts/rockchip/rockchip-pinconf.dtsi344
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-main.dtsi222
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi6
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-evm.dts110
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-sk.dts121
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi63
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi37
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts5
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board.dts49
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-main.dtsi25
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi6
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts62
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-main.dtsi126
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi4
-rw-r--r--arch/arm64/boot/dts/toshiba/tmpv7708-rm-mbrc.dts8
-rw-r--r--arch/arm64/boot/dts/toshiba/tmpv7708.dtsi9
-rw-r--r--arch/arm64/boot/dts/toshiba/tmpv7708_pins.dtsi5
-rw-r--r--arch/arm64/configs/defconfig17
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h6
-rw-r--r--arch/arm64/include/asm/el2_setup.h3
-rw-r--r--arch/arm64/include/asm/hugetlb.h3
-rw-r--r--arch/arm64/include/asm/kfence.h2
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/page.h2
-rw-r--r--arch/arm64/include/asm/pgalloc.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h10
-rw-r--r--arch/arm64/include/asm/ptrace.h12
-rw-r--r--arch/arm64/include/asm/set_memory.h17
-rw-r--r--arch/arm64/include/asm/smp_plat.h1
-rw-r--r--arch/arm64/include/asm/stacktrace.h2
-rw-r--r--arch/arm64/include/asm/syscall.h19
-rw-r--r--arch/arm64/include/asm/system_misc.h2
-rw-r--r--arch/arm64/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/cpufeature.c1
-rw-r--r--arch/arm64/kernel/entry-common.c2
-rw-r--r--arch/arm64/kernel/kaslr.c4
-rw-r--r--arch/arm64/kernel/machine_kexec.c1
-rw-r--r--arch/arm64/kernel/mte.c15
-rw-r--r--arch/arm64/kernel/process.c7
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/arm64/kernel/setup.c6
-rw-r--r--arch/arm64/kernel/signal.c3
-rw-r--r--arch/arm64/kernel/smccc-call.S9
-rw-r--r--arch/arm64/kernel/stacktrace.c4
-rw-r--r--arch/arm64/kernel/syscall.c9
-rw-r--r--arch/arm64/kvm/arm.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c2
-rw-r--r--arch/arm64/kvm/mmu.c4
-rw-r--r--arch/arm64/lib/copy_from_user.S13
-rw-r--r--arch/arm64/lib/copy_in_user.S21
-rw-r--r--arch/arm64/lib/copy_to_user.S14
-rw-r--r--arch/arm64/lib/strlen.S10
-rw-r--r--arch/arm64/mm/dma-mapping.c2
-rw-r--r--arch/arm64/mm/hugetlbpage.c5
-rw-r--r--arch/arm64/mm/init.c31
-rw-r--r--arch/arm64/mm/ioremap.c4
-rw-r--r--arch/arm64/mm/mmu.c9
-rw-r--r--arch/arm64/mm/pageattr.c13
-rw-r--r--arch/arm64/net/bpf_jit_comp.c13
-rw-r--r--arch/csky/Kbuild1
-rw-r--r--arch/csky/Kconfig3
-rw-r--r--arch/csky/include/asm/pgalloc.h2
-rw-r--r--arch/csky/include/asm/pgtable.h1
-rw-r--r--arch/csky/kernel/setup.c5
-rw-r--r--arch/csky/mm/syscache.c12
-rw-r--r--arch/h8300/Kbuild2
-rw-r--r--arch/h8300/Kconfig.cpu1
-rw-r--r--arch/h8300/Makefile3
-rw-r--r--arch/h8300/kernel/setup.c5
-rw-r--r--arch/hexagon/Kbuild2
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/hexagon/Makefile4
-rw-r--r--arch/hexagon/include/asm/pgtable.h4
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S9
-rw-r--r--arch/ia64/Kbuild1
-rw-r--r--arch/ia64/Kconfig8
-rw-r--r--arch/ia64/include/asm/pal.h1
-rw-r--r--arch/ia64/include/asm/pgalloc.h1
-rw-r--r--arch/ia64/include/asm/pgtable.h5
-rw-r--r--arch/ia64/include/asm/unaligned.h12
-rw-r--r--arch/m68k/68000/dragen2.c1
-rw-r--r--arch/m68k/68000/screen.h804
-rw-r--r--arch/m68k/Kconfig7
-rw-r--r--arch/m68k/Kconfig.machine1
-rw-r--r--arch/m68k/Makefile1
-rw-r--r--arch/m68k/coldfire/m525x.c2
-rw-r--r--arch/m68k/emu/nfcon.c2
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h2
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h2
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h1
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h4
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h1
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h1
-rw-r--r--arch/m68k/include/asm/unaligned.h26
-rw-r--r--arch/m68k/kernel/setup_mm.c5
-rw-r--r--arch/m68k/kernel/setup_no.c5
-rw-r--r--arch/microblaze/Kbuild1
-rw-r--r--arch/microblaze/Kconfig4
-rw-r--r--arch/microblaze/include/asm/pgalloc.h2
-rw-r--r--arch/microblaze/include/asm/pgtable.h2
-rw-r--r--arch/microblaze/include/asm/unaligned.h27
-rw-r--r--arch/mips/Kconfig8
-rw-r--r--arch/mips/Makefile5
-rw-r--r--arch/mips/boot/compressed/Makefile4
-rw-r--r--arch/mips/boot/compressed/decompress.c2
-rw-r--r--arch/mips/crypto/crc32-mips.c2
-rw-r--r--arch/mips/include/asm/fpu.h2
-rw-r--r--arch/mips/include/asm/irq.h3
-rw-r--r--arch/mips/include/asm/mach-ralink/spaces.h10
-rw-r--r--arch/mips/include/asm/pgalloc.h18
-rw-r--r--arch/mips/include/asm/pgtable-32.h1
-rw-r--r--arch/mips/include/asm/pgtable-64.h9
-rw-r--r--arch/mips/include/asm/vdso/vdso.h2
-rw-r--r--arch/mips/include/uapi/asm/mman.h3
-rw-r--r--arch/mips/kernel/irq.c16
-rw-r--r--arch/mips/kernel/relocate.c1
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/mti-malta/malta-platform.c6
-rw-r--r--arch/mips/net/ebpf_jit.c3
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c1
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c1
-rw-r--r--arch/nds32/Kbuild1
-rw-r--r--arch/nds32/boot/.gitignore2
-rw-r--r--arch/nds32/include/asm/pgalloc.h5
-rw-r--r--arch/nds32/kernel/setup.c5
-rw-r--r--arch/nds32/mm/mmap.c2
-rw-r--r--arch/nios2/Kbuild1
-rw-r--r--arch/nios2/include/asm/pgalloc.h1
-rw-r--r--arch/nios2/include/asm/pgtable.h2
-rw-r--r--arch/nios2/kernel/setup.c5
-rw-r--r--arch/openrisc/Makefile1
-rw-r--r--arch/openrisc/include/asm/pgalloc.h2
-rw-r--r--arch/openrisc/include/asm/pgtable.h1
-rw-r--r--arch/openrisc/include/asm/unaligned.h47
-rw-r--r--arch/openrisc/kernel/setup.c5
-rw-r--r--arch/parisc/Kbuild1
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/pgalloc.h1
-rw-r--r--arch/parisc/include/asm/pgtable.h6
-rw-r--r--arch/parisc/include/asm/unaligned.h6
-rw-r--r--arch/parisc/include/uapi/asm/mman.h3
-rw-r--r--arch/parisc/kernel/pdc_chassis.c1
-rw-r--r--arch/parisc/kernel/pdc_cons.c8
-rw-r--r--arch/parisc/kernel/syscalls/Makefile18
-rw-r--r--arch/powerpc/Kconfig13
-rw-r--r--arch/powerpc/Kconfig.debug5
-rw-r--r--arch/powerpc/Makefile13
-rw-r--r--arch/powerpc/boot/Makefile6
-rw-r--r--arch/powerpc/boot/decompress.c4
-rw-r--r--arch/powerpc/boot/devtree.c59
-rw-r--r--arch/powerpc/boot/dts/microwatt.dts138
-rw-r--r--arch/powerpc/boot/microwatt.c24
-rw-r--r--arch/powerpc/boot/ns16550.c9
-rwxr-xr-xarch/powerpc/boot/wrapper5
-rw-r--r--arch/powerpc/boot/zImage.ps3.lds.S2
-rw-r--r--arch/powerpc/configs/32-bit.config1
-rw-r--r--arch/powerpc/configs/64-bit.config1
-rw-r--r--arch/powerpc/configs/microwatt_defconfig98
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig25
-rw-r--r--arch/powerpc/configs/powernv_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h9
-rw-r--r--arch/powerpc/include/asm/barrier.h2
-rw-r--r--arch/powerpc/include/asm/book3s/32/hash.h45
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h195
-rw-r--r--arch/powerpc/include/asm/book3s/32/mmu-hash.h41
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h38
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h14
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h2
-rw-r--r--arch/powerpc/include/asm/book3s/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/checksum.h2
-rw-r--r--arch/powerpc/include/asm/code-patching.h34
-rw-r--r--arch/powerpc/include/asm/head-64.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h10
-rw-r--r--arch/powerpc/include/asm/hw_irq.h23
-rw-r--r--arch/powerpc/include/asm/inst.h94
-rw-r--r--arch/powerpc/include/asm/interrupt.h70
-rw-r--r--arch/powerpc/include/asm/irq.h2
-rw-r--r--arch/powerpc/include/asm/kup.h50
-rw-r--r--arch/powerpc/include/asm/kvm_guest.h4
-rw-r--r--arch/powerpc/include/asm/livepatch.h2
-rw-r--r--arch/powerpc/include/asm/mmu.h19
-rw-r--r--arch/powerpc/include/asm/mmu_context.h1
-rw-r--r--arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h5
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h46
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-44x.h1
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h43
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-4k.h6
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h8
-rw-r--r--arch/powerpc/include/asm/paca.h9
-rw-r--r--arch/powerpc/include/asm/pgalloc.h5
-rw-r--r--arch/powerpc/include/asm/pgtable.h11
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h84
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h15
-rw-r--r--arch/powerpc/include/asm/probes.h4
-rw-r--r--arch/powerpc/include/asm/processor.h21
-rw-r--r--arch/powerpc/include/asm/ps3.h4
-rw-r--r--arch/powerpc/include/asm/ptrace.h66
-rw-r--r--arch/powerpc/include/asm/reg.h13
-rw-r--r--arch/powerpc/include/asm/security_features.h4
-rw-r--r--arch/powerpc/include/asm/set_memory.h34
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/include/asm/sstep.h7
-rw-r--r--arch/powerpc/include/asm/tlb.h6
-rw-r--r--arch/powerpc/include/asm/unaligned.h22
-rw-r--r--arch/powerpc/include/asm/uprobes.h4
-rw-r--r--arch/powerpc/include/asm/vas.h109
-rw-r--r--arch/powerpc/include/asm/xics.h4
-rw-r--r--arch/powerpc/include/uapi/asm/papr_pdsm.h6
-rw-r--r--arch/powerpc/include/uapi/asm/vas-api.h6
-rw-r--r--arch/powerpc/kernel/asm-offsets.c104
-rw-r--r--arch/powerpc/kernel/crash_dump.c6
-rw-r--r--arch/powerpc/kernel/entry_32.S54
-rw-r--r--arch/powerpc/kernel/entry_64.S516
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c4
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S52
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S251
-rw-r--r--arch/powerpc/kernel/firmware.c10
-rw-r--r--arch/powerpc/kernel/fpu.S4
-rw-r--r--arch/powerpc/kernel/head_32.h41
-rw-r--r--arch/powerpc/kernel/head_40x.S36
-rw-r--r--arch/powerpc/kernel/head_44x.S50
-rw-r--r--arch/powerpc/kernel/head_64.S25
-rw-r--r--arch/powerpc/kernel/head_8xx.S25
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S141
-rw-r--r--arch/powerpc/kernel/head_booke.h58
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S37
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c4
-rw-r--r--arch/powerpc/kernel/interrupt.c484
-rw-r--r--arch/powerpc/kernel/interrupt_64.S774
-rw-r--r--arch/powerpc/kernel/irq.c103
-rw-r--r--arch/powerpc/kernel/jump_label.c4
-rw-r--r--arch/powerpc/kernel/kgdb.c19
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c4
-rw-r--r--arch/powerpc/kernel/kprobes.c64
-rw-r--r--arch/powerpc/kernel/mce.c2
-rw-r--r--arch/powerpc/kernel/mce_power.c50
-rw-r--r--arch/powerpc/kernel/misc_32.S6
-rw-r--r--arch/powerpc/kernel/module.c4
-rw-r--r--arch/powerpc/kernel/module_32.c19
-rw-r--r--arch/powerpc/kernel/module_64.c55
-rw-r--r--arch/powerpc/kernel/optprobes.c155
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/process.c107
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c121
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-adv.c20
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-noadv.c14
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-view.c5
-rw-r--r--arch/powerpc/kernel/rtas-rtc.c2
-rw-r--r--arch/powerpc/kernel/rtas.c14
-rw-r--r--arch/powerpc/kernel/security.c21
-rw-r--r--arch/powerpc/kernel/setup-common.c8
-rw-r--r--arch/powerpc/kernel/setup_32.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c13
-rw-r--r--arch/powerpc/kernel/signal.c12
-rw-r--r--arch/powerpc/kernel/signal_32.c106
-rw-r--r--arch/powerpc/kernel/signal_64.c49
-rw-r--r--arch/powerpc/kernel/smp.c15
-rw-r--r--arch/powerpc/kernel/stacktrace.c34
-rw-r--r--arch/powerpc/kernel/syscalls.c3
-rw-r--r--arch/powerpc/kernel/sysfs.c14
-rw-r--r--arch/powerpc/kernel/tau_6xx.c2
-rw-r--r--arch/powerpc/kernel/time.c13
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c51
-rw-r--r--arch/powerpc/kernel/traps.c58
-rw-r--r--arch/powerpc/kernel/udbg_16550.c39
-rw-r--r--arch/powerpc/kernel/uprobes.c8
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile7
-rw-r--r--arch/powerpc/kernel/vector.S8
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S19
-rw-r--r--arch/powerpc/kernel/watchdog.c1
-rw-r--r--arch/powerpc/kexec/crash.c4
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c20
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c25
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c25
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/code-patching.c178
-rw-r--r--arch/powerpc/lib/error-inject.c2
-rw-r--r--arch/powerpc/lib/feature-fixups.c266
-rw-r--r--arch/powerpc/lib/restart_table.c56
-rw-r--r--arch/powerpc/lib/sstep.c39
-rw-r--r--arch/powerpc/lib/test_emulate_step.c38
-rw-r--r--arch/powerpc/math-emu/math.c2
-rw-r--r--arch/powerpc/math-emu/math_efp.c2
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/book3s32/Makefile1
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S6
-rw-r--r--arch/powerpc/mm/book3s32/kuap.c33
-rw-r--r--arch/powerpc/mm/book3s32/kuep.c42
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c20
-rw-r--r--arch/powerpc/mm/book3s32/mmu_context.c48
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c24
-rw-r--r--arch/powerpc/mm/book3s64/radix_hugetlbpage.c8
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c6
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c70
-rw-r--r--arch/powerpc/mm/fault.c4
-rw-r--r--arch/powerpc/mm/ioremap_32.c4
-rw-r--r--arch/powerpc/mm/ioremap_64.c2
-rw-r--r--arch/powerpc/mm/maccess.c4
-rw-r--r--arch/powerpc/mm/mem.c7
-rw-r--r--arch/powerpc/mm/nohash/44x.c17
-rw-r--r--arch/powerpc/mm/nohash/8xx.c52
-rw-r--r--arch/powerpc/mm/nohash/mmu_context.c173
-rw-r--r--arch/powerpc/mm/nohash/tlb_low.S13
-rw-r--r--arch/powerpc/mm/pageattr.c134
-rw-r--r--arch/powerpc/mm/pgtable.c8
-rw-r--r--arch/powerpc/mm/pgtable_32.c60
-rw-r--r--arch/powerpc/mm/pgtable_64.c4
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c22
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c13
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c240
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c24
-rw-r--r--arch/powerpc/perf/Makefile6
-rw-r--r--arch/powerpc/perf/callchain.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c4
-rw-r--r--arch/powerpc/perf/generic-compat-pmu.c170
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c1
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c4
-rw-r--r--arch/powerpc/platforms/Kconfig3
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype18
-rw-r--r--arch/powerpc/platforms/Makefile2
-rw-r--r--arch/powerpc/platforms/book3s/Kconfig15
-rw-r--r--arch/powerpc/platforms/book3s/Makefile2
-rw-r--r--arch/powerpc/platforms/book3s/vas-api.c493
-rw-r--r--arch/powerpc/platforms/cell/smp.c3
-rw-r--r--arch/powerpc/platforms/cell/spider-pci.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c4
-rw-r--r--arch/powerpc/platforms/microwatt/Kconfig13
-rw-r--r--arch/powerpc/platforms/microwatt/Makefile1
-rw-r--r--arch/powerpc/platforms/microwatt/rng.c48
-rw-r--r--arch/powerpc/platforms/microwatt/setup.c41
-rw-r--r--arch/powerpc/platforms/pasemi/idle.c5
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c2
-rw-r--r--arch/powerpc/platforms/powermac/smp.c5
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig14
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/opal-call.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci.c2
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c10
-rw-r--r--arch/powerpc/platforms/powernv/vas-api.c278
-rw-r--r--arch/powerpc/platforms/powernv/vas-debug.c27
-rw-r--r--arch/powerpc/platforms/powernv/vas-fault.c173
-rw-r--r--arch/powerpc/platforms/powernv/vas-trace.h4
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c264
-rw-r--r--arch/powerpc/platforms/powernv/vas.h50
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig9
-rw-r--r--arch/powerpc/platforms/ps3/mm.c12
-rw-r--r--arch/powerpc/platforms/ps3/setup.c43
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c9
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c9
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c92
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S29
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c101
-rw-r--r--arch/powerpc/platforms/pseries/ras.c6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c16
-rw-r--r--arch/powerpc/platforms/pseries/smp.c7
-rw-r--r--arch/powerpc/platforms/pseries/vas.c595
-rw-r--r--arch/powerpc/platforms/pseries/vas.h125
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c2
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c4
-rw-r--r--arch/powerpc/sysdev/xics/Kconfig3
-rw-r--r--arch/powerpc/sysdev/xics/Makefile1
-rw-r--r--arch/powerpc/sysdev/xics/ics-native.c257
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c2
-rw-r--r--arch/powerpc/sysdev/xive/common.c42
-rw-r--r--arch/powerpc/xmon/xmon.c196
-rw-r--r--arch/riscv/Kconfig16
-rw-r--r--arch/riscv/Makefile1
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts2
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h3
-rw-r--r--arch/riscv/include/asm/efi.h4
-rw-r--r--arch/riscv/include/asm/io.h13
-rw-r--r--arch/riscv/include/asm/kfence.h63
-rw-r--r--arch/riscv/include/asm/kprobes.h7
-rw-r--r--arch/riscv/include/asm/mmu_context.h2
-rw-r--r--arch/riscv/include/asm/page.h84
-rw-r--r--arch/riscv/include/asm/pci.h2
-rw-r--r--arch/riscv/include/asm/pgalloc.h2
-rw-r--r--arch/riscv/include/asm/pgtable-64.h9
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h5
-rw-r--r--arch/riscv/include/asm/pgtable.h173
-rw-r--r--arch/riscv/include/asm/ptrace.h31
-rw-r--r--arch/riscv/include/asm/sections.h17
-rw-r--r--arch/riscv/include/asm/set_memory.h24
-rw-r--r--arch/riscv/include/asm/switch_to.h11
-rw-r--r--arch/riscv/include/asm/thread_info.h15
-rw-r--r--arch/riscv/include/asm/tlbflush.h5
-rw-r--r--arch/riscv/include/asm/unistd.h1
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/asm-offsets.c2
-rw-r--r--arch/riscv/kernel/cpufeature.c6
-rw-r--r--arch/riscv/kernel/entry.S108
-rw-r--r--arch/riscv/kernel/head.S4
-rw-r--r--arch/riscv/kernel/kexec_relocate.S4
-rw-r--r--arch/riscv/kernel/machine_kexec.c2
-rw-r--r--arch/riscv/kernel/probes/kprobes.c40
-rw-r--r--arch/riscv/kernel/process.c2
-rw-r--r--arch/riscv/kernel/setup.c23
-rw-r--r--arch/riscv/kernel/signal.c4
-rw-r--r--arch/riscv/kernel/stacktrace.c8
-rw-r--r--arch/riscv/kernel/traps.c35
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S1
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S2
-rw-r--r--arch/riscv/lib/uaccess.S180
-rw-r--r--arch/riscv/mm/context.c14
-rw-r--r--arch/riscv/mm/fault.c11
-rw-r--r--arch/riscv/mm/init.c324
-rw-r--r--arch/riscv/mm/physaddr.c2
-rw-r--r--arch/riscv/mm/ptdump.c6
-rw-r--r--arch/riscv/mm/tlbflush.c69
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c4
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c4
-rw-r--r--arch/s390/Kconfig14
-rw-r--r--arch/s390/Makefile17
-rw-r--r--arch/s390/boot/als.c6
-rw-r--r--arch/s390/boot/boot.h1
-rw-r--r--arch/s390/boot/compressed/Makefile7
-rw-r--r--arch/s390/boot/compressed/clz_ctz.c2
-rw-r--r--arch/s390/boot/compressed/decompressor.c8
-rw-r--r--arch/s390/boot/head.S3
-rw-r--r--arch/s390/boot/ipl_parm.c59
-rw-r--r--arch/s390/boot/mem_detect.c73
-rw-r--r--arch/s390/boot/startup.c92
-rw-r--r--arch/s390/boot/text_dma.S19
-rw-r--r--arch/s390/boot/uv.c26
-rw-r--r--arch/s390/configs/debug_defconfig29
-rw-r--r--arch/s390/configs/defconfig29
-rw-r--r--arch/s390/configs/zfcpdump_defconfig3
-rw-r--r--arch/s390/hypfs/hypfs_sprp.c13
-rw-r--r--arch/s390/include/asm/ap.h220
-rw-r--r--arch/s390/include/asm/bitops.h10
-rw-r--r--arch/s390/include/asm/ccwgroup.h3
-rw-r--r--arch/s390/include/asm/checksum.h10
-rw-r--r--arch/s390/include/asm/cio.h1
-rw-r--r--arch/s390/include/asm/cmpxchg.h44
-rw-r--r--arch/s390/include/asm/cpu_mcf.h37
-rw-r--r--arch/s390/include/asm/ctl_reg.h2
-rw-r--r--arch/s390/include/asm/elf.h15
-rw-r--r--arch/s390/include/asm/entry-common.h1
-rw-r--r--arch/s390/include/asm/facility.h25
-rw-r--r--arch/s390/include/asm/ftrace.h1
-rw-r--r--arch/s390/include/asm/irqflags.h16
-rw-r--r--arch/s390/include/asm/kasan.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h18
-rw-r--r--arch/s390/include/asm/linkage.h2
-rw-r--r--arch/s390/include/asm/lowcore.h38
-rw-r--r--arch/s390/include/asm/mmu_context.h15
-rw-r--r--arch/s390/include/asm/nmi.h6
-rw-r--r--arch/s390/include/asm/page.h15
-rw-r--r--arch/s390/include/asm/pci.h7
-rw-r--r--arch/s390/include/asm/percpu.h27
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/pgtable.h67
-rw-r--r--arch/s390/include/asm/preempt.h16
-rw-r--r--arch/s390/include/asm/processor.h6
-rw-r--r--arch/s390/include/asm/ptrace.h36
-rw-r--r--arch/s390/include/asm/setup.h5
-rw-r--r--arch/s390/include/asm/sigp.h14
-rw-r--r--arch/s390/include/asm/softirq_stack.h13
-rw-r--r--arch/s390/include/asm/stacktrace.h116
-rw-r--r--arch/s390/include/asm/string.h59
-rw-r--r--arch/s390/include/asm/timex.h26
-rw-r--r--arch/s390/include/asm/tpi.h24
-rw-r--r--arch/s390/include/asm/types.h19
-rw-r--r--arch/s390/include/asm/uaccess.h36
-rw-r--r--arch/s390/include/asm/uv.h8
-rw-r--r--arch/s390/include/asm/vdso.h25
-rw-r--r--arch/s390/include/asm/vdso/gettimeofday.h1
-rw-r--r--arch/s390/include/uapi/asm/schid.h3
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/alternative.c3
-rw-r--r--arch/s390/kernel/asm-offsets.c11
-rw-r--r--arch/s390/kernel/compat_signal.c13
-rw-r--r--arch/s390/kernel/cpcmd.c42
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kernel/diag.c11
-rw-r--r--arch/s390/kernel/early.c10
-rw-r--r--arch/s390/kernel/entry.S190
-rw-r--r--arch/s390/kernel/ftrace.c2
-rw-r--r--arch/s390/kernel/ipl.c15
-rw-r--r--arch/s390/kernel/irq.c34
-rw-r--r--arch/s390/kernel/kprobes.c14
-rw-r--r--arch/s390/kernel/machine_kexec.c3
-rw-r--r--arch/s390/kernel/mcount.S4
-rw-r--r--arch/s390/kernel/nmi.c129
-rw-r--r--arch/s390/kernel/nospec-branch.c17
-rw-r--r--arch/s390/kernel/nospec-sysfs.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c1082
-rw-r--r--arch/s390/kernel/perf_cpum_cf_common.c28
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c1148
-rw-r--r--arch/s390/kernel/process.c6
-rw-r--r--arch/s390/kernel/processor.c4
-rw-r--r--arch/s390/kernel/ptrace.c4
-rw-r--r--arch/s390/kernel/setup.c115
-rw-r--r--arch/s390/kernel/signal.c39
-rw-r--r--arch/s390/kernel/smp.c146
-rw-r--r--arch/s390/kernel/sthyi.c13
-rw-r--r--arch/s390/kernel/syscall.c39
-rw-r--r--arch/s390/kernel/sysinfo.c19
-rw-r--r--arch/s390/kernel/traps.c18
-rw-r--r--arch/s390/kernel/uprobes.c1
-rw-r--r--arch/s390/kernel/uv.c18
-rw-r--r--arch/s390/kernel/vdso.c63
-rw-r--r--arch/s390/kernel/vdso32/.gitignore2
-rw-r--r--arch/s390/kernel/vdso32/Makefile76
-rwxr-xr-xarch/s390/kernel/vdso32/gen_vdso_offsets.sh15
-rw-r--r--arch/s390/kernel/vdso32/note.S13
-rw-r--r--arch/s390/kernel/vdso32/vdso32.lds.S142
-rw-r--r--arch/s390/kernel/vdso32/vdso32_wrapper.S15
-rw-r--r--arch/s390/kernel/vdso32/vdso_user_wrapper.S21
-rw-r--r--arch/s390/kernel/vdso64/Makefile8
-rwxr-xr-xarch/s390/kernel/vdso64/gen_vdso_offsets.sh15
-rw-r--r--arch/s390/kernel/vdso64/vdso64.lds.S6
-rw-r--r--arch/s390/kernel/vdso64/vdso_user_wrapper.S17
-rw-r--r--arch/s390/kvm/diag.c18
-rw-r--r--arch/s390/kvm/kvm-s390.c30
-rw-r--r--arch/s390/lib/string.c131
-rw-r--r--arch/s390/lib/test_unwind.c5
-rw-r--r--arch/s390/lib/uaccess.c25
-rw-r--r--arch/s390/lib/xor.c5
-rw-r--r--arch/s390/mm/fault.c49
-rw-r--r--arch/s390/mm/kasan_init.c35
-rw-r--r--arch/s390/mm/maccess.c32
-rw-r--r--arch/s390/mm/page-states.c12
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c7
-rw-r--r--arch/s390/pci/pci_insn.c55
-rw-r--r--arch/s390/pci/pci_irq.c46
-rw-r--r--arch/s390/pci/pci_mmio.c24
-rw-r--r--arch/s390/purgatory/Makefile1
-rw-r--r--arch/sh/Kbuild4
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/Makefile5
-rw-r--r--arch/sh/include/asm/pgalloc.h1
-rw-r--r--arch/sh/include/asm/pgtable-3level.h4
-rw-r--r--arch/sh/include/asm/pgtable.h2
-rw-r--r--arch/sh/include/asm/unaligned-sh4a.h199
-rw-r--r--arch/sh/include/asm/unaligned.h13
-rw-r--r--arch/sh/kernel/setup.c5
-rw-r--r--arch/sparc/Kconfig6
-rw-r--r--arch/sparc/Makefile3
-rw-r--r--arch/sparc/include/asm/cmpxchg_64.h2
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h1
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h1
-rw-r--r--arch/sparc/include/asm/pgtable_32.h9
-rw-r--r--arch/sparc/include/asm/pgtable_64.h18
-rw-r--r--arch/sparc/include/asm/unaligned.h11
-rw-r--r--arch/sparc/include/asm/vio.h2
-rw-r--r--arch/sparc/kernel/ds.c6
-rw-r--r--arch/sparc/kernel/sstate.c1
-rw-r--r--arch/sparc/kernel/syscalls/Makefile18
-rw-r--r--arch/sparc/kernel/vio.c4
-rw-r--r--arch/sparc/mm/hugetlbpage.c6
-rw-r--r--arch/sparc/mm/init_64.c1
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c3
-rw-r--r--arch/um/Kbuild1
-rw-r--r--arch/um/Kconfig33
-rw-r--r--arch/um/Makefile3
-rw-r--r--arch/um/drivers/Kconfig20
-rw-r--r--arch/um/drivers/Makefile1
-rw-r--r--arch/um/drivers/chan_user.c3
-rw-r--r--arch/um/drivers/line.c15
-rw-r--r--arch/um/drivers/line.h5
-rw-r--r--arch/um/drivers/mconsole_kern.c3
-rw-r--r--arch/um/drivers/slip_user.c3
-rw-r--r--arch/um/drivers/ssl.c1
-rw-r--r--arch/um/drivers/stdio_console.c1
-rw-r--r--arch/um/drivers/ubd_kern.c163
-rw-r--r--arch/um/drivers/virt-pci.c895
-rw-r--r--arch/um/drivers/virtio_uml.c40
-rw-r--r--arch/um/include/asm/Kbuild4
-rw-r--r--arch/um/include/asm/cacheflush.h9
-rw-r--r--arch/um/include/asm/cpufeature.h157
-rw-r--r--arch/um/include/asm/fpu/api.h20
-rw-r--r--arch/um/include/asm/futex.h14
-rw-r--r--arch/um/include/asm/io.h7
-rw-r--r--arch/um/include/asm/irq.h8
-rw-r--r--arch/um/include/asm/irqflags.h10
-rw-r--r--arch/um/include/asm/msi.h1
-rw-r--r--arch/um/include/asm/pci.h39
-rw-r--r--arch/um/include/asm/pgalloc.h1
-rw-r--r--arch/um/include/asm/pgtable-2level.h1
-rw-r--r--arch/um/include/asm/pgtable-3level.h3
-rw-r--r--arch/um/include/asm/processor-generic.h8
-rw-r--r--arch/um/include/asm/tlb.h2
-rw-r--r--arch/um/include/asm/xor.h17
-rw-r--r--arch/um/include/linux/time-internal.h12
-rw-r--r--arch/um/include/linux/virtio-uml.h13
-rw-r--r--arch/um/include/shared/irq_user.h1
-rw-r--r--arch/um/include/shared/kern_util.h1
-rw-r--r--arch/um/include/shared/longjmp.h14
-rw-r--r--arch/um/include/shared/os.h8
-rw-r--r--arch/um/include/shared/timetravel.h22
-rw-r--r--arch/um/kernel/Makefile14
-rw-r--r--arch/um/kernel/ioport.c13
-rw-r--r--arch/um/kernel/irq.c52
-rw-r--r--arch/um/kernel/ksyms.c2
-rw-r--r--arch/um/kernel/skas/clone.c2
-rw-r--r--arch/um/kernel/skas/uaccess.c136
-rw-r--r--arch/um/kernel/time.c35
-rw-r--r--arch/um/kernel/um_arch.c49
-rw-r--r--arch/um/os-Linux/helper.c4
-rw-r--r--arch/um/os-Linux/signal.c64
-rw-r--r--arch/um/os-Linux/skas/process.c2
-rw-r--r--arch/um/os-Linux/start_up.c32
-rw-r--r--arch/x86/Kconfig18
-rw-r--r--arch/x86/Makefile3
-rw-r--r--arch/x86/Makefile.um2
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--arch/x86/events/core.c12
-rw-r--r--arch/x86/events/intel/core.c23
-rw-r--r--arch/x86/events/intel/cstate.c23
-rw-r--r--arch/x86/events/intel/lbr.c6
-rw-r--r--arch/x86/events/intel/uncore_snbep.c6
-rw-r--r--arch/x86/events/perf_event.h18
-rw-r--r--arch/x86/include/asm/desc.h1
-rw-r--r--arch/x86/include/asm/elf.h4
-rw-r--r--arch/x86/include/asm/fpu/internal.h202
-rw-r--r--arch/x86/include/asm/fpu/signal.h2
-rw-r--r--arch/x86/include/asm/fpu/xstate.h78
-rw-r--r--arch/x86/include/asm/kvm_host.h7
-rw-r--r--arch/x86/include/asm/pgalloc.h2
-rw-r--r--arch/x86/include/asm/pgtable.h65
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
-rw-r--r--arch/x86/include/asm/pkeys.h9
-rw-r--r--arch/x86/include/asm/pkru.h62
-rw-r--r--arch/x86/include/asm/processor.h9
-rw-r--r--arch/x86/include/asm/sigframe.h2
-rw-r--r--arch/x86/include/asm/special_insns.h14
-rw-r--r--arch/x86/include/asm/svm.h2
-rw-r--r--arch/x86/include/asm/unaligned.h15
-rw-r--r--arch/x86/include/uapi/asm/auxvec.h4
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/apic/io_apic.c6
-rw-r--r--arch/x86/kernel/apic/msi.c11
-rw-r--r--arch/x86/kernel/cpu/common.c37
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c3
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c27
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/fpu/core.c282
-rw-r--r--arch/x86/kernel/fpu/init.c15
-rw-r--r--arch/x86/kernel/fpu/regset.c223
-rw-r--r--arch/x86/kernel/fpu/signal.c438
-rw-r--r--arch/x86/kernel/fpu/xstate.c644
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/jump_label.c7
-rw-r--r--arch/x86/kernel/kprobes/core.c6
-rw-r--r--arch/x86/kernel/process.c22
-rw-r--r--arch/x86/kernel/process_64.c28
-rw-r--r--arch/x86/kernel/setup.c6
-rw-r--r--arch/x86/kernel/signal.c88
-rw-r--r--arch/x86/kernel/trace.c234
-rw-r--r--arch/x86/kernel/traps.c5
-rw-r--r--arch/x86/kvm/cpuid.c58
-rw-r--r--arch/x86/kvm/hyperv.c20
-rw-r--r--arch/x86/kvm/ioapic.c2
-rw-r--r--arch/x86/kvm/ioapic.h4
-rw-r--r--arch/x86/kvm/mmu/mmu.c32
-rw-r--r--arch/x86/kvm/mmu/paging.h14
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h4
-rw-r--r--arch/x86/kvm/mmu/spte.h6
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c35
-rw-r--r--arch/x86/kvm/svm/avic.c2
-rw-r--r--arch/x86/kvm/svm/nested.c70
-rw-r--r--arch/x86/kvm/svm/sev.c60
-rw-r--r--arch/x86/kvm/svm/svm.c104
-rw-r--r--arch/x86/kvm/svm/svm.h7
-rw-r--r--arch/x86/kvm/svm/svm_onhyperv.h2
-rw-r--r--arch/x86/kvm/trace.h15
-rw-r--r--arch/x86/kvm/vmx/nested.c56
-rw-r--r--arch/x86/kvm/vmx/vmx.h4
-rw-r--r--arch/x86/kvm/x86.c78
-rw-r--r--arch/x86/math-emu/fpu_proto.h2
-rw-r--r--arch/x86/math-emu/load_store.c2
-rw-r--r--arch/x86/math-emu/reg_ld_str.c2
-rw-r--r--arch/x86/mm/extable.c2
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/init_64.c9
-rw-r--r--arch/x86/mm/pat/set_memory.c4
-rw-r--r--arch/x86/mm/pgtable.c2
-rw-r--r--arch/x86/mm/pkeys.c22
-rw-r--r--arch/x86/net/bpf_jit_comp.c10
-rw-r--r--arch/x86/net/bpf_jit_comp32.c6
-rw-r--r--arch/x86/pci/mmconfig-shared.c10
-rw-r--r--arch/x86/purgatory/purgatory.c2
-rw-r--r--arch/x86/tools/chkobjdump.awk1
-rw-r--r--arch/x86/tools/relocs.c8
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/xtensa/Kbuild1
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/include/asm/pgalloc.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h1
-rw-r--r--arch/xtensa/include/asm/unaligned.h29
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h3
-rw-r--r--arch/xtensa/platforms/iss/console.c9
-rw-r--r--arch/xtensa/platforms/iss/setup.c1
-rw-r--r--block/Kconfig9
-rw-r--r--block/Kconfig.iosched6
-rw-r--r--block/Makefile2
-rw-r--r--block/blk-cgroup.c29
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-exec.c25
-rw-r--r--block/blk-flush.c13
-rw-r--r--block/blk-iocost.c19
-rw-r--r--block/blk-iolatency.c6
-rw-r--r--block/blk-mq-sched.c17
-rw-r--r--block/blk-mq.c41
-rw-r--r--block/blk.h6
-rw-r--r--block/bsg-lib.c4
-rw-r--r--block/bsg.c6
-rw-r--r--block/genhd.c7
-rw-r--r--block/kyber-iosched.c2
-rw-r--r--block/mq-deadline-cgroup.c126
-rw-r--r--block/mq-deadline-cgroup.h114
-rw-r--r--block/mq-deadline.c (renamed from block/mq-deadline-main.c)73
-rw-r--r--block/partitions/core.c29
-rw-r--r--block/partitions/ldm.c4
-rw-r--r--block/partitions/ldm.h3
-rw-r--r--block/partitions/msdos.c24
-rw-r--r--block/scsi_ioctl.c19
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/testmgr.c5
-rw-r--r--crypto/testmgr.h49
-rw-r--r--drivers/accessibility/braille/braille_console.c3
-rw-r--r--drivers/accessibility/speakup/i18n.c7
-rw-r--r--drivers/accessibility/speakup/i18n.h9
-rw-r--r--drivers/accessibility/speakup/main.c4
-rw-r--r--drivers/accessibility/speakup/spk_ttyio.c9
-rw-r--r--drivers/acpi/Kconfig15
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/acpi_amba.c1
-rw-r--r--drivers/acpi/acpi_video.c9
-rw-r--r--drivers/acpi/acpica/nsrepair2.c7
-rw-r--r--drivers/acpi/arm64/Makefile1
-rw-r--r--drivers/acpi/arm64/dma.c50
-rw-r--r--drivers/acpi/arm64/iort.c132
-rw-r--r--drivers/acpi/bus.c5
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c51
-rw-r--r--drivers/acpi/nfit/core.c3
-rw-r--r--drivers/acpi/prmt.c6
-rw-r--r--drivers/acpi/resource.c9
-rw-r--r--drivers/acpi/scan.c78
-rw-r--r--drivers/acpi/utils.c7
-rw-r--r--drivers/acpi/viot.c366
-rw-r--r--drivers/acpi/x86/s2idle.c26
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/libata-scsi.c30
-rw-r--r--drivers/ata/libata-sff.c35
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c1
-rw-r--r--drivers/base/Makefile3
-rw-r--r--drivers/base/arch_topology.c27
-rw-r--r--drivers/base/attribute_container.c6
-rw-r--r--drivers/base/auxiliary.c8
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/bus.c8
-rw-r--r--drivers/base/component.c96
-rw-r--r--drivers/base/core.c37
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/base/dd.c196
-rw-r--r--drivers/base/devcoredump.c4
-rw-r--r--drivers/base/devres.c105
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile1
-rw-r--r--drivers/base/firmware_loader/fallback.c14
-rw-r--r--drivers/base/firmware_loader/firmware.h10
-rw-r--r--drivers/base/firmware_loader/main.c2
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/node.c4
-rw-r--r--drivers/base/platform.c22
-rw-r--r--drivers/base/power/domain.c38
-rw-r--r--drivers/base/power/main.c5
-rw-r--r--drivers/base/property.c16
-rw-r--r--drivers/base/test/property-entry-test.c56
-rw-r--r--drivers/base/trace.c10
-rw-r--r--drivers/base/trace.h56
-rw-r--r--drivers/block/loop.c297
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c150
-rw-r--r--drivers/block/n64cart.c2
-rw-r--r--drivers/block/nbd.c76
-rw-r--r--drivers/block/null_blk/main.c1
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rbd.c33
-rw-r--r--drivers/block/sunvdc.c3
-rw-r--r--drivers/block/virtio_blk.c56
-rw-r--r--drivers/block/xen-blkfront.c224
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/bluetooth/hci_ldisc.c12
-rw-r--r--drivers/bus/brcmstb_gisb.c1
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c8
-rw-r--r--drivers/bus/fsl-mc/dprc.c4
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c10
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c19
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-msi.c2
-rw-r--r--drivers/bus/fsl-mc/mc-io.c6
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c19
-rw-r--r--drivers/bus/mhi/core/internal.h2
-rw-r--r--drivers/bus/mhi/core/main.c26
-rw-r--r--drivers/bus/mhi/core/pm.c19
-rw-r--r--drivers/bus/mhi/pci_generic.c50
-rw-r--r--drivers/bus/qcom-ebi2.c4
-rw-r--r--drivers/bus/ti-sysc.c22
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/Kconfig34
-rw-r--r--drivers/char/Makefile3
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/ixp4xx-rng.c53
-rw-r--r--drivers/char/hw_random/pseries-rng.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c1
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c7
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c3
-rw-r--r--drivers/char/pcmcia/scr24x_cs.c1
-rw-r--r--drivers/char/pcmcia/synclink_cs.c24
-rw-r--r--drivers/char/powernv-op-panel.c1
-rw-r--r--drivers/char/raw.c362
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c8
-rw-r--r--drivers/char/ttyprintk.c52
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/char/xillybus/Kconfig22
-rw-r--r--drivers/char/xillybus/Makefile2
-rw-r--r--drivers/char/xillybus/xillybus.h10
-rw-r--r--drivers/char/xillybus/xillybus_class.c262
-rw-r--r--drivers/char/xillybus/xillybus_class.h30
-rw-r--r--drivers/char/xillybus/xillybus_core.c180
-rw-r--r--drivers/char/xillybus/xillybus_of.c1
-rw-r--r--drivers/char/xillybus/xillybus_pcie.c1
-rw-r--r--drivers/char/xillybus/xillyusb.c2259
-rw-r--r--drivers/clk/analogbits/wrpll-cln28hpc.c4
-rw-r--r--drivers/clk/clk-devres.c9
-rw-r--r--drivers/clk/clk-divider.c75
-rw-r--r--drivers/clk/clk-k210.c1
-rw-r--r--drivers/clk/clk-lmk04832.c20
-rw-r--r--drivers/clk/clk-stm32f4.c10
-rw-r--r--drivers/clk/clk-stm32mp1.c1
-rw-r--r--drivers/clk/hisilicon/Kconfig1
-rw-r--r--drivers/clk/hisilicon/clk-hi3559a.c39
-rw-r--r--drivers/clk/meson/clk-regmap.c19
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c2
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c79
-rw-r--r--drivers/clk/renesas/renesas-rzg2l-cpg.c110
-rw-r--r--drivers/clk/renesas/renesas-rzg2l-cpg.h37
-rw-r--r--drivers/clk/tegra/clk-sdmmc-mux.c10
-rw-r--r--drivers/comedi/drivers/comedi_8254.c3
-rw-r--r--drivers/comedi/drivers/comedi_isadma.c2
-rw-r--r--drivers/comedi/drivers/jr3_pci.c15
-rw-r--r--drivers/comedi/drivers/ni_routes.c7
-rw-r--r--drivers/comedi/drivers/ni_routes.h1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes.h1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/all.h1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6070e.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6220.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6221.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6229.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6251.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6254.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6259.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6534.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6602.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6713.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6723.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6733.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6030e.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6224.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6225.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6251.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6733.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6251.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6535.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6738.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_route_values.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_route_values.h1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_route_values/all.h1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_route_values/ni_660x.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_route_values/ni_eseries.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_route_values/ni_mseries.c1
-rw-r--r--drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c1
-rwxr-xr-xdrivers/comedi/drivers/ni_routing/tools/convert_csv_to_c.py7
-rwxr-xr-xdrivers/comedi/drivers/ni_routing/tools/convert_py_to_csv.py1
-rw-r--r--drivers/comedi/drivers/ni_routing/tools/csv_collection.py1
-rwxr-xr-xdrivers/comedi/drivers/ni_routing/tools/make_blank_csv.py1
-rw-r--r--drivers/comedi/drivers/ni_routing/tools/ni_names.py1
-rw-r--r--drivers/comedi/drivers/ni_tio.c12
-rw-r--r--drivers/comedi/drivers/tests/comedi_example_test.c1
-rw-r--r--drivers/comedi/drivers/tests/ni_routes_test.c1
-rw-r--r--drivers/comedi/drivers/tests/unittest.h1
-rw-r--r--drivers/counter/104-quad-8.c13
-rw-r--r--drivers/counter/Kconfig10
-rw-r--r--drivers/counter/Makefile1
-rw-r--r--drivers/counter/ftm-quaddec.c2
-rw-r--r--drivers/counter/intel-qep.c544
-rw-r--r--drivers/counter/interrupt-cnt.c8
-rw-r--r--drivers/counter/microchip-tcb-capture.c4
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c4
-rw-r--r--drivers/counter/stm32-timer-cnt.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm10
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c6
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c324
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c12
-rw-r--r--drivers/cpufreq/cpufreq.c44
-rw-r--r--drivers/cpufreq/intel_pstate.c11
-rw-r--r--drivers/cpufreq/longhaul.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c1
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c23
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c14
-rw-r--r--drivers/cpuidle/governors/teo.c48
-rw-r--r--drivers/crypto/Kconfig6
-rw-r--r--drivers/crypto/gemini/sl3516-ce-cipher.c2
-rw-r--r--drivers/crypto/gemini/sl3516-ce-core.c2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c3
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/crypto/nx/Kconfig1
-rw-r--r--drivers/crypto/nx/Makefile2
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c6
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c (renamed from drivers/crypto/nx/nx-842-pseries.c)138
-rw-r--r--drivers/crypto/omap-crypto.c3
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/cxl/Kconfig43
-rw-r--r--drivers/cxl/Makefile12
-rw-r--r--drivers/cxl/acpi.c434
-rw-r--r--drivers/cxl/bus.c29
-rw-r--r--drivers/cxl/core.c1067
-rw-r--r--drivers/cxl/cxl.h332
-rw-r--r--drivers/cxl/mem.h81
-rw-r--r--drivers/cxl/pci.c (renamed from drivers/cxl/mem.c)439
-rw-r--r--drivers/cxl/pci.h2
-rw-r--r--drivers/cxl/pmem.c230
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/dma-buf/sync_file.c13
-rw-r--r--drivers/dma/altera-msgdma.c20
-rw-r--r--drivers/dma/fsl-qdma.c6
-rw-r--r--drivers/dma/hsu/hsu.c3
-rw-r--r--drivers/dma/idxd/cdev.c2
-rw-r--r--drivers/dma/idxd/idxd.h14
-rw-r--r--drivers/dma/idxd/init.c33
-rw-r--r--drivers/dma/idxd/irq.c27
-rw-r--r--drivers/dma/idxd/submit.c92
-rw-r--r--drivers/dma/idxd/sysfs.c2
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c58
-rw-r--r--drivers/dma/ipu/ipu_idmac.c2
-rw-r--r--drivers/dma/mpc512x_dma.c1
-rw-r--r--drivers/dma/of-dma.c9
-rw-r--r--drivers/dma/qcom/gpi.c1
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c5
-rw-r--r--drivers/dma/sh/Makefile2
-rw-r--r--drivers/dma/sh/shdma-of.c76
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/dma/stm32-dma.c4
-rw-r--r--drivers/dma/stm32-dmamux.c6
-rw-r--r--drivers/dma/sun4i-dma.c5
-rw-r--r--drivers/dma/ti/k3-udma.c1
-rw-r--r--drivers/dma/ti/omap-dma.c3
-rw-r--r--drivers/dma/uniphier-xdmac.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c12
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c44
-rw-r--r--drivers/edac/Kconfig2
-rw-r--r--drivers/edac/altera_edac.c1
-rw-r--r--drivers/eisa/eisa-bus.c23
-rw-r--r--drivers/extcon/Kconfig2
-rw-r--r--drivers/extcon/extcon-intel-mrfld.c9
-rw-r--r--drivers/extcon/extcon-max8997.c1
-rw-r--r--drivers/extcon/extcon-sm5502.c212
-rw-r--r--drivers/extcon/extcon-sm5502.h82
-rw-r--r--drivers/firewire/nosy.c43
-rw-r--r--drivers/firmware/Kconfig3
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_ffa/Kconfig21
-rw-r--r--drivers/firmware/arm_ffa/Makefile6
-rw-r--r--drivers/firmware/arm_ffa/bus.c210
-rw-r--r--drivers/firmware/arm_ffa/common.h31
-rw-r--r--drivers/firmware/arm_ffa/driver.c733
-rw-r--r--drivers/firmware/arm_ffa/smccc.c39
-rw-r--r--drivers/firmware/arm_scmi/bus.c8
-rw-r--r--drivers/firmware/arm_scmi/common.h2
-rw-r--r--drivers/firmware/arm_scmi/driver.c38
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c3
-rw-r--r--drivers/firmware/arm_scmi/notify.c4
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c26
-rw-r--r--drivers/firmware/arm_scmi/sensors.c6
-rw-r--r--drivers/firmware/arm_scmi/smc.c3
-rw-r--r--drivers/firmware/arm_scpi.c11
-rw-r--r--drivers/firmware/broadcom/tee_bnxt_fw.c14
-rw-r--r--drivers/firmware/efi/dev-path-parser.c1
-rw-r--r--drivers/firmware/efi/efi.c13
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c71
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c4
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c2
-rw-r--r--drivers/firmware/efi/mokvar-table.c5
-rw-r--r--drivers/firmware/efi/tpm.c8
-rw-r--r--drivers/firmware/google/gsmi.c1
-rw-r--r--drivers/firmware/psci/psci.c12
-rw-r--r--drivers/firmware/qcom_scm.c3
-rw-r--r--drivers/firmware/stratix10-svc.c22
-rw-r--r--drivers/firmware/tegra/Makefile1
-rw-r--r--drivers/firmware/tegra/bpmp-private.h3
-rw-r--r--drivers/firmware/tegra/bpmp-tegra210.c2
-rw-r--r--drivers/firmware/tegra/bpmp.c3
-rw-r--r--drivers/firmware/turris-mox-rwtm.c56
-rw-r--r--drivers/fpga/Kconfig4
-rw-r--r--drivers/fpga/altera-pr-ip-core.c10
-rw-r--r--drivers/fpga/dfl-fme-perf.c2
-rw-r--r--drivers/fpga/fpga-bridge.c40
-rw-r--r--drivers/fpga/fpga-mgr.c42
-rw-r--r--drivers/fpga/fpga-region.c30
-rw-r--r--drivers/fpga/machxo2-spi.c2
-rw-r--r--drivers/fpga/of-fpga-region.c8
-rw-r--r--drivers/fpga/stratix10-soc.c3
-rw-r--r--drivers/fsi/fsi-core.c4
-rw-r--r--drivers/fsi/fsi-master-aspeed.c33
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c2
-rw-r--r--drivers/fsi/fsi-master-gpio.c1
-rw-r--r--drivers/fsi/fsi-occ.c12
-rw-r--r--drivers/fsi/fsi-sbefifo.c10
-rw-r--r--drivers/fsi/fsi-scom.c105
-rw-r--r--drivers/gpio/Kconfig12
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-104-idio-16.c23
-rw-r--r--drivers/gpio/gpio-adp5520.c18
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c11
-rw-r--r--drivers/gpio/gpio-ath79.c9
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c11
-rw-r--r--drivers/gpio/gpio-da9052.c11
-rw-r--r--drivers/gpio/gpio-da9055.c11
-rw-r--r--drivers/gpio/gpio-dwapb.c23
-rw-r--r--drivers/gpio/gpio-idt3243x.c206
-rw-r--r--drivers/gpio/gpio-logicvc.c4
-rw-r--r--drivers/gpio/gpio-lp87565.c6
-rw-r--r--drivers/gpio/gpio-mockup.c9
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-mxs.c4
-rw-r--r--drivers/gpio/gpio-pca953x.c1
-rw-r--r--drivers/gpio/gpio-regmap.c32
-rw-r--r--drivers/gpio/gpio-spear-spics.c12
-rw-r--r--drivers/gpio/gpio-sprd.c10
-rw-r--r--drivers/gpio/gpio-sta2x11.c10
-rw-r--r--drivers/gpio/gpio-stmpe.c32
-rw-r--r--drivers/gpio/gpio-tc3589x.c11
-rw-r--r--drivers/gpio/gpio-tegra186.c14
-rw-r--r--drivers/gpio/gpio-tps65218.c13
-rw-r--r--drivers/gpio/gpio-tps6586x.c13
-rw-r--r--drivers/gpio/gpio-tps65910.c12
-rw-r--r--drivers/gpio/gpio-tps65912.c12
-rw-r--r--drivers/gpio/gpio-tps68470.c12
-rw-r--r--drivers/gpio/gpio-tqmx86.c6
-rw-r--r--drivers/gpio/gpio-visconti.c10
-rw-r--r--drivers/gpio/gpio-wm831x.c12
-rw-r--r--drivers/gpio/gpio-wm8350.c12
-rw-r--r--drivers/gpio/gpio-wm8994.c13
-rw-r--r--drivers/gpio/gpio-xgene.c11
-rw-r--r--drivers/gpio/gpio-xilinx.c391
-rw-r--r--drivers/gpio/gpio-zynq.c32
-rw-r--r--drivers/gpio/gpiolib-sysfs.c34
-rw-r--r--drivers/gpio/gpiolib.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c269
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c271
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c186
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c100
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c275
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c250
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c28
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c68
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h16
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c5
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h355
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h531
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h2
-rw-r--r--drivers/gpu/drm/amd/include/navi10_enum.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c95
-rw-r--r--drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h57
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c46
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c311
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c52
-rw-r--r--drivers/gpu/drm/drm_ioc32.c4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c7
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c227
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c136
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c19
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c31
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/i915_request.c8
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c9
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c22
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.h5
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c15
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c60
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h5
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000c.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c161
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c92
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c22
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c1
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c2
-rw-r--r--drivers/hid/hid-apple.c2
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-ft260.c27
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c15
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c29
-rw-r--r--drivers/hid/usbhid/Kconfig2
-rw-r--r--drivers/hid/wacom_wac.c5
-rw-r--r--drivers/hv/channel_mgmt.c96
-rw-r--r--drivers/hv/vmbus_drv.c1
-rw-r--r--drivers/hwmon/occ/common.c7
-rw-r--r--drivers/hwspinlock/Kconfig9
-rw-r--r--drivers/hwspinlock/Makefile1
-rw-r--r--drivers/hwspinlock/sun6i_hwspinlock.c210
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c2
-rw-r--r--drivers/hwtracing/intel_th/core.c29
-rw-r--r--drivers/hwtracing/intel_th/gth.c16
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h3
-rw-r--r--drivers/hwtracing/intel_th/msu.c48
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c5
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c12
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c4
-rw-r--r--drivers/i2c/busses/i2c-cadence.c57
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c3
-rw-r--r--drivers/i2c/busses/i2c-davinci.c5
-rw-r--r--drivers/i2c/busses/i2c-i801.c136
-rw-r--r--drivers/i2c/busses/i2c-imx.c19
-rw-r--r--drivers/i2c/busses/i2c-mpc.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c9
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c1
-rw-r--r--drivers/i2c/busses/i2c-rcar.c1
-rw-r--r--drivers/i2c/busses/i2c-riic.c23
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c73
-rw-r--r--drivers/i2c/busses/i2c-xiic.c9
-rw-r--r--drivers/i2c/i2c-core-base.c108
-rw-r--r--drivers/i2c/i2c-core-smbus.c12
-rw-r--r--drivers/i2c/i2c-dev.c5
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c2
-rw-r--r--drivers/i3c/master/svc-i3c-master.c3
-rw-r--r--drivers/iio/accel/Kconfig53
-rw-r--r--drivers/iio/accel/Makefile4
-rw-r--r--drivers/iio/accel/adis16201.c3
-rw-r--r--drivers/iio/accel/adis16209.c3
-rw-r--r--drivers/iio/accel/adxl372.c4
-rw-r--r--drivers/iio/accel/bma180.c112
-rw-r--r--drivers/iio/accel/bma220_spi.c10
-rw-r--r--drivers/iio/accel/bma400_core.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c234
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c245
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c31
-rw-r--r--drivers/iio/accel/bmc150-accel.h72
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c27
-rw-r--r--drivers/iio/accel/fxls8962af-core.c968
-rw-r--r--drivers/iio/accel/fxls8962af-i2c.c57
-rw-r--r--drivers/iio/accel/fxls8962af-spi.c57
-rw-r--r--drivers/iio/accel/fxls8962af.h22
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c19
-rw-r--r--drivers/iio/accel/kxcjk-1013.c253
-rw-r--r--drivers/iio/accel/kxsd9.c2
-rw-r--r--drivers/iio/accel/mma8452.c7
-rw-r--r--drivers/iio/accel/mma9551.c1
-rw-r--r--drivers/iio/accel/mma9551_core.c4
-rw-r--r--drivers/iio/accel/mma9553.c1
-rw-r--r--drivers/iio/accel/mxc4005.c12
-rw-r--r--drivers/iio/accel/sca3300.c472
-rw-r--r--drivers/iio/accel/st_accel.h12
-rw-r--r--drivers/iio/accel/st_accel_core.c230
-rw-r--r--drivers/iio/accel/st_accel_i2c.c17
-rw-r--r--drivers/iio/accel/st_accel_spi.c17
-rw-r--r--drivers/iio/accel/stk8312.c27
-rw-r--r--drivers/iio/accel/stk8ba50.c19
-rw-r--r--drivers/iio/adc/Kconfig12
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad7124.c48
-rw-r--r--drivers/iio/adc/ad7192.c93
-rw-r--r--drivers/iio/adc/ad7298.c8
-rw-r--r--drivers/iio/adc/ad7476.c120
-rw-r--r--drivers/iio/adc/ad7606.c3
-rw-r--r--drivers/iio/adc/ad7766.c10
-rw-r--r--drivers/iio/adc/ad7768-1.c4
-rw-r--r--drivers/iio/adc/ad7780.c38
-rw-r--r--drivers/iio/adc/ad7791.c44
-rw-r--r--drivers/iio/adc/ad7793.c53
-rw-r--r--drivers/iio/adc/ad7887.c1
-rw-r--r--drivers/iio/adc/ad9467.c2
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c82
-rw-r--r--drivers/iio/adc/adi-axi-adc.c24
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c5
-rw-r--r--drivers/iio/adc/at91_adc.c4
-rw-r--r--drivers/iio/adc/dln2-adc.c3
-rw-r--r--drivers/iio/adc/ep93xx_adc.c4
-rw-r--r--drivers/iio/adc/exynos_adc.c2
-rw-r--r--drivers/iio/adc/hi8435.c1
-rw-r--r--drivers/iio/adc/hx711.c4
-rw-r--r--drivers/iio/adc/ina2xx-adc.c3
-rw-r--r--drivers/iio/adc/max1027.c2
-rw-r--r--drivers/iio/adc/max11100.c34
-rw-r--r--drivers/iio/adc/max1118.c68
-rw-r--r--drivers/iio/adc/max1241.c2
-rw-r--r--drivers/iio/adc/mp2629_adc.c1
-rw-r--r--drivers/iio/adc/mt6360-adc.c1
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c5
-rw-r--r--drivers/iio/adc/palmas_gpadc.c4
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c15
-rw-r--r--drivers/iio/adc/sc27xx_adc.c1
-rw-r--r--drivers/iio/adc/stm32-adc.c28
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c6
-rw-r--r--drivers/iio/adc/ti-adc081c.c43
-rw-r--r--drivers/iio/adc/ti-adc0832.c39
-rw-r--r--drivers/iio/adc/ti-adc084s021.c3
-rw-r--r--drivers/iio/adc/ti-adc108s102.c45
-rw-r--r--drivers/iio/adc/ti-adc161s626.c51
-rw-r--r--drivers/iio/adc/ti-ads1015.c17
-rw-r--r--drivers/iio/adc/ti-ads124s08.c2
-rw-r--r--drivers/iio/adc/ti-ads131e08.c3
-rw-r--r--drivers/iio/adc/ti-ads7950.c1
-rw-r--r--drivers/iio/adc/ti-ads8688.c3
-rw-r--r--drivers/iio/adc/ti-tsc2046.c712
-rw-r--r--drivers/iio/adc/vf610_adc.c10
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c2
-rw-r--r--drivers/iio/afe/iio-rescale.c41
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c22
-rw-r--r--drivers/iio/buffer/industrialio-hw-consumer.c25
-rw-r--r--drivers/iio/buffer/industrialio-triggered-buffer.c23
-rw-r--r--drivers/iio/chemical/Kconfig27
-rw-r--r--drivers/iio/chemical/Makefile2
-rw-r--r--drivers/iio/chemical/atlas-sensor.c19
-rw-r--r--drivers/iio/chemical/bme680_i2c.c8
-rw-r--r--drivers/iio/chemical/bme680_spi.c8
-rw-r--r--drivers/iio/chemical/ccs811.c2
-rw-r--r--drivers/iio/chemical/scd30_core.c3
-rw-r--r--drivers/iio/chemical/sgp30.c2
-rw-r--r--drivers/iio/chemical/sps30.c275
-rw-r--r--drivers/iio/chemical/sps30.h35
-rw-r--r--drivers/iio/chemical/sps30_i2c.c258
-rw-r--r--drivers/iio/chemical/sps30_serial.c431
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c32
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c24
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.h3
-rw-r--r--drivers/iio/common/scmi_sensors/Makefile2
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c1
-rw-r--r--drivers/iio/dac/ad5766.c2
-rw-r--r--drivers/iio/dac/stm32-dac.c3
-rw-r--r--drivers/iio/dummy/Kconfig1
-rw-r--r--drivers/iio/frequency/adf4350.c6
-rw-r--r--drivers/iio/gyro/adis16136.c20
-rw-r--r--drivers/iio/gyro/adis16260.c9
-rw-r--r--drivers/iio/gyro/adxrs290.c2
-rw-r--r--drivers/iio/gyro/bmg160_core.c17
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c13
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c6
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c2
-rw-r--r--drivers/iio/gyro/itg3200_core.c3
-rw-r--r--drivers/iio/gyro/mpu3050-core.c4
-rw-r--r--drivers/iio/gyro/st_gyro.h12
-rw-r--r--drivers/iio/gyro/st_gyro_core.c53
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c17
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c17
-rw-r--r--drivers/iio/health/afe4403.c2
-rw-r--r--drivers/iio/health/afe4404.c2
-rw-r--r--drivers/iio/humidity/am2315.c25
-rw-r--r--drivers/iio/humidity/hdc100x.c6
-rw-r--r--drivers/iio/humidity/hdc2010.c1
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c2
-rw-r--r--drivers/iio/imu/Kconfig1
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis.c7
-rw-r--r--drivers/iio/imu/adis16400.c27
-rw-r--r--drivers/iio/imu/adis16460.c3
-rw-r--r--drivers/iio/imu/adis16475.c11
-rw-r--r--drivers/iio/imu/adis16480.c166
-rw-r--r--drivers/iio/imu/adis_buffer.c27
-rw-r--r--drivers/iio/imu/adis_trigger.c3
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c6
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c27
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c8
-rw-r--r--drivers/iio/imu/kmx61.c7
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c8
-rw-r--r--drivers/iio/imu/st_lsm9ds0/Kconfig28
-rw-r--r--drivers/iio/imu/st_lsm9ds0/Makefile5
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h23
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c163
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c84
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c83
-rw-r--r--drivers/iio/industrialio-buffer.c13
-rw-r--r--drivers/iio/industrialio-core.c126
-rw-r--r--drivers/iio/industrialio-trigger.c37
-rw-r--r--drivers/iio/industrialio-triggered-event.c2
-rw-r--r--drivers/iio/inkern.c107
-rw-r--r--drivers/iio/light/Kconfig11
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/acpi-als.c3
-rw-r--r--drivers/iio/light/hid-sensor-als.c6
-rw-r--r--drivers/iio/light/hid-sensor-prox.c6
-rw-r--r--drivers/iio/light/isl29028.c5
-rw-r--r--drivers/iio/light/isl29125.c10
-rw-r--r--drivers/iio/light/ltr501.c25
-rw-r--r--drivers/iio/light/pa12203001.c4
-rw-r--r--drivers/iio/light/rpr0521.c9
-rw-r--r--drivers/iio/light/si1133.c18
-rw-r--r--drivers/iio/light/si1145.c12
-rw-r--r--drivers/iio/light/tcs3414.c10
-rw-r--r--drivers/iio/light/tcs3472.c16
-rw-r--r--drivers/iio/light/tsl2583.c13
-rw-r--r--drivers/iio/light/tsl2591.c1225
-rw-r--r--drivers/iio/light/us5182d.c4
-rw-r--r--drivers/iio/light/vcnl4000.c9
-rw-r--r--drivers/iio/light/vcnl4035.c9
-rw-r--r--drivers/iio/light/veml6030.c2
-rw-r--r--drivers/iio/magnetometer/ak8974.c3
-rw-r--r--drivers/iio/magnetometer/ak8975.c2
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c26
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c7
-rw-r--r--drivers/iio/magnetometer/hmc5843.h8
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c7
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c5
-rw-r--r--drivers/iio/magnetometer/st_magn.h4
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c181
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c14
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c14
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c2
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c6
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c5
-rw-r--r--drivers/iio/position/hid-sensor-custom-intel-hinge.c4
-rw-r--r--drivers/iio/potentiostat/lmp91000.c7
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c6
-rw-r--r--drivers/iio/pressure/icp10100.c5
-rw-r--r--drivers/iio/pressure/st_pressure.h4
-rw-r--r--drivers/iio/pressure/st_pressure_core.c15
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c17
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c17
-rw-r--r--drivers/iio/pressure/zpa2326.c3
-rw-r--r--drivers/iio/proximity/as3935.c13
-rw-r--r--drivers/iio/proximity/isl29501.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c14
-rw-r--r--drivers/iio/proximity/srf04.c8
-rw-r--r--drivers/iio/proximity/srf08.c14
-rw-r--r--drivers/iio/proximity/sx9310.c2
-rw-r--r--drivers/iio/proximity/sx9500.c2
-rw-r--r--drivers/iio/temperature/Kconfig10
-rw-r--r--drivers/iio/temperature/Makefile1
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c2
-rw-r--r--drivers/iio/temperature/mlx90614.c25
-rw-r--r--drivers/iio/temperature/tmp117.c185
-rw-r--r--drivers/iio/test/iio-test-format.c2
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c2
-rw-r--r--drivers/infiniband/core/cma.c17
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h3
-rw-r--r--drivers/infiniband/hw/hfi1/trace_misc.h4
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rc.h4
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tid.h6
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c4
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c13
-rw-r--r--drivers/infiniband/hw/irdma/hw.c11
-rw-r--r--drivers/infiniband/hw/irdma/main.c9
-rw-r--r--drivers/infiniband/hw/irdma/type.h3
-rw-r--r--drivers/infiniband/hw/irdma/uk.c5
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c3
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_cq.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_mr.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_qp.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_rc.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c27
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c2
-rw-r--r--drivers/input/Kconfig1
-rw-r--r--drivers/input/evbug.c3
-rw-r--r--drivers/input/gameport/Kconfig1
-rw-r--r--drivers/input/joydev.c2
-rw-r--r--drivers/input/joystick/Kconfig10
-rw-r--r--drivers/input/joystick/Makefile1
-rw-r--r--drivers/input/joystick/qwiic-joystick.c146
-rw-r--r--drivers/input/joystick/sidewinder.c1
-rw-r--r--drivers/input/joystick/xpad.c6
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c2
-rw-r--r--drivers/input/keyboard/hil_kbd.c1
-rw-r--r--drivers/input/misc/ims-pcu.c6
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c103
-rw-r--r--drivers/input/mouse/trackpoint.c2
-rw-r--r--drivers/input/serio/i8042.c4
-rw-r--r--drivers/input/serio/serport.c8
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c167
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c47
-rw-r--r--drivers/input/touchscreen/cyttsp_core.h3
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c.c10
-rw-r--r--drivers/input/touchscreen/cyttsp_spi.c10
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c38
-rw-r--r--drivers/input/touchscreen/elants_i2c.c11
-rw-r--r--drivers/input/touchscreen/hideep.c13
-rw-r--r--drivers/input/touchscreen/resistive-adc-touch.c136
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c8
-rw-r--r--drivers/interconnect/core.c9
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile2
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c22
-rw-r--r--drivers/interconnect/qcom/sc7280.c1938
-rw-r--r--drivers/interconnect/qcom/sc7280.h154
-rw-r--r--drivers/iommu/Kconfig4
-rw-r--r--drivers/iommu/amd/amd_iommu.h2
-rw-r--r--drivers/iommu/amd/init.c20
-rw-r--r--drivers/iommu/amd/iommu.c33
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c59
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c224
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h48
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c90
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c43
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c39
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h1
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c1
-rw-r--r--drivers/iommu/dma-iommu.c20
-rw-r--r--drivers/iommu/exynos-iommu.c1
-rw-r--r--drivers/iommu/intel/Kconfig6
-rw-r--r--drivers/iommu/intel/Makefile1
-rw-r--r--drivers/iommu/intel/debugfs.c111
-rw-r--r--drivers/iommu/intel/dmar.c54
-rw-r--r--drivers/iommu/intel/iommu.c206
-rw-r--r--drivers/iommu/intel/pasid.c12
-rw-r--r--drivers/iommu/intel/pasid.h6
-rw-r--r--drivers/iommu/intel/perf.c166
-rw-r--r--drivers/iommu/intel/perf.h73
-rw-r--r--drivers/iommu/intel/svm.c644
-rw-r--r--drivers/iommu/iommu.c6
-rw-r--r--drivers/iommu/iova.c18
-rw-r--r--drivers/iommu/ipmmu-vmsa.c1
-rw-r--r--drivers/iommu/msm_iommu.c1
-rw-r--r--drivers/iommu/mtk_iommu.c1
-rw-r--r--drivers/iommu/mtk_iommu_v1.c1
-rw-r--r--drivers/iommu/of_iommu.c68
-rw-r--r--drivers/iommu/omap-iommu.c1
-rw-r--r--drivers/iommu/rockchip-iommu.c177
-rw-r--r--drivers/iommu/tegra-smmu.c16
-rw-r--r--drivers/iommu/virtio-iommu.c12
-rw-r--r--drivers/ipack/carriers/tpci200.c9
-rw-r--r--drivers/ipack/carriers/tpci200.h4
-rw-r--r--drivers/ipack/devices/ipoctal.c8
-rw-r--r--drivers/ipack/devices/ipoctal.h6
-rw-r--r--drivers/irqchip/irq-mips-cpu.c10
-rw-r--r--drivers/irqchip/irq-mips-gic.c8
-rw-r--r--drivers/irqchip/irq-pic32-evic.c5
-rw-r--r--drivers/isdn/capi/capi.c40
-rw-r--r--drivers/leds/Kconfig2
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c53
-rw-r--r--drivers/leds/led-class.c6
-rw-r--r--drivers/leds/leds-as3645a.c3
-rw-r--r--drivers/leds/leds-bcm6328.c2
-rw-r--r--drivers/leds/leds-blinkm.c5
-rw-r--r--drivers/leds/leds-el15203000.c37
-rw-r--r--drivers/leds/leds-gpio-register.c1
-rw-r--r--drivers/leds/leds-is31fl32xx.c3
-rw-r--r--drivers/leds/leds-ktd2692.c27
-rw-r--r--drivers/leds/leds-lm3530.c2
-rw-r--r--drivers/leds/leds-lm3532.c7
-rw-r--r--drivers/leds/leds-lm36274.c4
-rw-r--r--drivers/leds/leds-lm3692x.c26
-rw-r--r--drivers/leds/leds-lm3697.c12
-rw-r--r--drivers/leds/leds-lp3944.c6
-rw-r--r--drivers/leds/leds-lp50xx.c2
-rw-r--r--drivers/leds/leds-lp55xx-common.c2
-rw-r--r--drivers/leds/leds-lp8860.c16
-rw-r--r--drivers/leds/leds-lt3593.c8
-rw-r--r--drivers/leds/leds-mlxcpld.c38
-rw-r--r--drivers/leds/leds-mlxreg.c3
-rw-r--r--drivers/leds/leds-pwm.c16
-rw-r--r--drivers/leds/leds-tlc591xx.c8
-rw-r--r--drivers/leds/leds-turris-omnia.c1
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c1
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c2
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c1
-rw-r--r--drivers/leds/trigger/ledtrig-panic.c1
-rw-r--r--drivers/mcb/mcb-lpc.c13
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c13
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.c6
-rw-r--r--drivers/media/pci/ngene/ngene-core.c2
-rw-r--r--drivers/media/pci/ngene/ngene.h14
-rw-r--r--drivers/media/platform/atmel/Kconfig8
-rw-r--r--drivers/media/platform/atmel/Makefile5
-rw-r--r--drivers/media/platform/atmel/atmel-isc-base.c11
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c14
-rw-r--r--drivers/memory/atmel-ebi.c4
-rw-r--r--drivers/memory/emif.c678
-rw-r--r--drivers/memory/fsl_ifc.c8
-rw-r--r--drivers/memory/pl353-smc.c315
-rw-r--r--drivers/memory/stm32-fmc2-ebi.c4
-rw-r--r--drivers/memory/tegra/Kconfig18
-rw-r--r--drivers/memory/tegra/Makefile6
-rw-r--r--drivers/memory/tegra/mc.c321
-rw-r--r--drivers/memory/tegra/mc.h25
-rw-r--r--drivers/memory/tegra/tegra114.c1245
-rw-r--r--drivers/memory/tegra/tegra124-emc.c8
-rw-r--r--drivers/memory/tegra/tegra124.c1306
-rw-r--r--drivers/memory/tegra/tegra186.c1679
-rw-r--r--drivers/memory/tegra/tegra194.c1351
-rw-r--r--drivers/memory/tegra/tegra20-emc.c52
-rw-r--r--drivers/memory/tegra/tegra20.c110
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c4
-rw-r--r--drivers/memory/tegra/tegra210.c1433
-rw-r--r--drivers/memory/tegra/tegra30-emc.c56
-rw-r--r--drivers/memory/tegra/tegra30.c1292
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptfc.c35
-rw-r--r--drivers/message/fusion/mptsas.c119
-rw-r--r--drivers/mfd/88pm800.c14
-rw-r--r--drivers/mfd/88pm805.c29
-rw-r--r--drivers/mfd/Kconfig29
-rw-r--r--drivers/mfd/Makefile16
-rw-r--r--drivers/mfd/ab8500-core.c33
-rw-r--r--drivers/mfd/arizona-core.c2
-rw-r--r--drivers/mfd/as3722.c14
-rw-r--r--drivers/mfd/asic3.c12
-rw-r--r--drivers/mfd/axp20x.c24
-rw-r--r--drivers/mfd/cros_ec_dev.c21
-rw-r--r--drivers/mfd/da9052-i2c.c10
-rw-r--r--drivers/mfd/da9055-core.c38
-rw-r--r--drivers/mfd/da9062-core.c13
-rw-r--r--drivers/mfd/da9063-i2c.c2
-rw-r--r--drivers/mfd/db8500-prcmu.c6
-rw-r--r--drivers/mfd/hi655x-pmic.c2
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c20
-rw-r--r--drivers/mfd/iqs62x.c2
-rw-r--r--drivers/mfd/janz-cmodio.c6
-rw-r--r--drivers/mfd/kempld-core.c19
-rw-r--r--drivers/mfd/lp87565.c27
-rw-r--r--drivers/mfd/max8907.c8
-rw-r--r--drivers/mfd/max8997.c9
-rw-r--r--drivers/mfd/max8998.c8
-rw-r--r--drivers/mfd/mfd-core.c15
-rw-r--r--drivers/mfd/motorola-cpcap.c4
-rw-r--r--drivers/mfd/mt6360-core.c552
-rw-r--r--drivers/mfd/mt6397-core.c20
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/omap-usb-tll.c2
-rw-r--r--drivers/mfd/pcf50633-core.c12
-rw-r--r--drivers/mfd/qcom-pm8008.c260
-rw-r--r--drivers/mfd/rk808.c81
-rw-r--r--drivers/mfd/rn5t618.c2
-rw-r--r--drivers/mfd/rt4831.c115
-rw-r--r--drivers/mfd/sec-core.c70
-rw-r--r--drivers/mfd/sec-irq.c4
-rw-r--r--drivers/mfd/si476x-cmd.c28
-rw-r--r--drivers/mfd/si476x-i2c.c10
-rw-r--r--drivers/mfd/sm501.c8
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/sun6i-prcm.c30
-rw-r--r--drivers/mfd/syscon.c2
-rw-r--r--drivers/mfd/t7l66xb.c12
-rw-r--r--drivers/mfd/timberdale.c6
-rw-r--r--drivers/mfd/tps80031.c6
-rw-r--r--drivers/mfd/twl-core.c2
-rw-r--r--drivers/mfd/ucb1x00-assabet.c2
-rw-r--r--drivers/mfd/wcd934x.c50
-rw-r--r--drivers/mfd/wm831x-core.c250
-rw-r--r--drivers/mfd/wm831x-otp.c6
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_dev.c1
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.c6
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.h2
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c2
-rw-r--r--drivers/misc/cardreader/alcor_pci.c8
-rw-r--r--drivers/misc/cxl/file.c5
-rw-r--r--drivers/misc/eeprom/Kconfig5
-rw-r--r--drivers/misc/eeprom/at24.c17
-rw-r--r--drivers/misc/eeprom/at25.c158
-rw-r--r--drivers/misc/eeprom/ee1004.c223
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c90
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c41
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c81
-rw-r--r--drivers/misc/habanalabs/common/context.c9
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c5
-rw-r--r--drivers/misc/habanalabs/common/device.c82
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c1792
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h280
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c24
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c23
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c42
-rw-r--r--drivers/misc/habanalabs/common/irq.c24
-rw-r--r--drivers/misc/habanalabs/common/memory.c22
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c14
-rw-r--r--drivers/misc/habanalabs/common/pci/pci.c34
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c2
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c1013
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h1
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c6
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c15
-rw-r--r--drivers/misc/habanalabs/goya/goya.c251
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h2
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c2
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h45
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h184
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h14
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h31
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h46
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h15
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h10
-rw-r--r--drivers/misc/hpilo.c10
-rw-r--r--drivers/misc/hpilo.h3
-rw-r--r--drivers/misc/ibmasm/heartbeat.c1
-rw-r--r--drivers/misc/ibmasm/module.c5
-rw-r--r--drivers/misc/ibmasm/remote.h2
-rw-r--r--drivers/misc/lattice-ecp3-config.c2
-rw-r--r--drivers/misc/lkdtm/bugs.c11
-rw-r--r--drivers/misc/lkdtm/cfi.c3
-rw-r--r--drivers/misc/lkdtm/core.c58
-rw-r--r--drivers/misc/lkdtm/fortify.c3
-rw-r--r--drivers/misc/lkdtm/heap.c97
-rw-r--r--drivers/misc/lkdtm/lkdtm.h46
-rw-r--r--drivers/misc/lkdtm/stackleak.c4
-rw-r--r--drivers/misc/lkdtm/usercopy.c7
-rw-r--r--drivers/misc/mei/bus-fixup.c2
-rw-r--r--drivers/misc/mei/client.c22
-rw-r--r--drivers/misc/mei/hbm.c2
-rw-r--r--drivers/misc/mei/hdcp/Kconfig1
-rw-r--r--drivers/misc/mei/hw-me.c4
-rw-r--r--drivers/misc/mei/hw.h28
-rw-r--r--drivers/misc/mei/interrupt.c23
-rw-r--r--drivers/misc/mei/main.c4
-rw-r--r--drivers/misc/mei/mei-trace.h6
-rw-r--r--drivers/misc/mei/pci-txe.c2
-rw-r--r--drivers/misc/pvpanic/pvpanic-mmio.c17
-rw-r--r--drivers/misc/pvpanic/pvpanic-pci.c22
-rw-r--r--drivers/misc/pvpanic/pvpanic.c34
-rw-r--r--drivers/misc/pvpanic/pvpanic.h3
-rw-r--r--drivers/misc/sram.c6
-rw-r--r--drivers/misc/ti-st/st_core.c30
-rw-r--r--drivers/misc/uacce/uacce.c11
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c2
-rw-r--r--drivers/misc/xilinx_sdfec.c3
-rw-r--r--drivers/mmc/core/block.c60
-rw-r--r--drivers/mmc/core/core.c7
-rw-r--r--drivers/mmc/core/host.c20
-rw-r--r--drivers/mmc/core/queue.c23
-rw-r--r--drivers/mmc/core/queue.h2
-rw-r--r--drivers/mmc/core/sdio_uart.c6
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-iproc.c21
-rw-r--r--drivers/mmc/host/sdhci-msm.c18
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c14
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed-test.c34
-rw-r--r--drivers/mmc/host/sdhci.c10
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c2
-rw-r--r--drivers/mtd/chips/cfi_util.c4
-rw-r--r--drivers/mtd/chips/chipreg.c5
-rw-r--r--drivers/mtd/devices/Kconfig6
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/mchp48l640.c374
-rw-r--r--drivers/mtd/devices/ms02-nv.c1
-rw-r--r--drivers/mtd/devices/phram.c1
-rw-r--r--drivers/mtd/inftlmount.c17
-rw-r--r--drivers/mtd/maps/amd76xrom.c6
-rw-r--r--drivers/mtd/maps/ck804xrom.c8
-rw-r--r--drivers/mtd/maps/esb2rom.c7
-rw-r--r--drivers/mtd/maps/ichxrom.c6
-rw-r--r--drivers/mtd/maps/plat-ram.c1
-rw-r--r--drivers/mtd/maps/sun_uflash.c4
-rw-r--r--drivers/mtd/mtd_blkdevs.c11
-rw-r--r--drivers/mtd/mtdcore.c248
-rw-r--r--drivers/mtd/mtdoops.c4
-rw-r--r--drivers/mtd/mtdpart.c9
-rw-r--r--drivers/mtd/nand/bbt.c2
-rw-r--r--drivers/mtd/nand/raw/Kconfig8
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c341
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c11
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c6
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h2
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c4
-rw-r--r--drivers/mtd/nand/raw/internals.h5
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c6
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.c4
-rw-r--r--drivers/mtd/nand/raw/nand_base.c374
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c2
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c5
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c370
-rw-r--r--drivers/mtd/nand/raw/omap2.c229
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c2
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c1194
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c23
-rw-r--r--drivers/mtd/nand/raw/r852.c7
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c4
-rw-r--r--drivers/mtd/nand/spi/core.c129
-rw-r--r--drivers/mtd/nand/spi/macronix.c112
-rw-r--r--drivers/mtd/nftlcore.c1
-rw-r--r--drivers/mtd/nftlmount.c7
-rw-r--r--drivers/mtd/parsers/Kconfig2
-rw-r--r--drivers/mtd/parsers/parser_trx.c9
-rw-r--r--drivers/mtd/parsers/qcomsmempart.c10
-rw-r--r--drivers/mtd/parsers/redboot.c76
-rw-r--r--drivers/mtd/rfd_ftl.c5
-rw-r--r--drivers/mtd/sm_ftl.c51
-rw-r--r--drivers/mtd/spi-nor/Makefile2
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi-pci.c1
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c2
-rw-r--r--drivers/mtd/spi-nor/core.c22
-rw-r--r--drivers/mtd/spi-nor/core.h16
-rw-r--r--drivers/mtd/spi-nor/macronix.c5
-rw-r--r--drivers/mtd/spi-nor/otp.c160
-rw-r--r--drivers/mtd/spi-nor/sfdp.c58
-rw-r--r--drivers/mtd/spi-nor/sysfs.c93
-rw-r--r--drivers/mtd/spi-nor/winbond.c1
-rw-r--r--drivers/mtd/tests/oobtest.c7
-rw-r--r--drivers/mtd/tests/torturetest.c2
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/net/bareudp.c16
-rw-r--r--drivers/net/bonding/bond_main.c183
-rw-r--r--drivers/net/caif/Kconfig9
-rw-r--r--drivers/net/caif/Makefile3
-rw-r--r--drivers/net/caif/caif_hsi.c1454
-rw-r--r--drivers/net/caif/caif_serial.c13
-rw-r--r--drivers/net/can/m_can/m_can.c8
-rw-r--r--drivers/net/can/slcan.c10
-rw-r--r--drivers/net/can/spi/hi311x.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c1
-rw-r--r--drivers/net/can/usb/ems_usb.c14
-rw-r--r--drivers/net/can/usb/esd_usb2.c16
-rw-r--r--drivers/net/can/usb/mcba_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c10
-rw-r--r--drivers/net/can/usb/usb_8dev.c15
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c7
-rw-r--r--drivers/net/dsa/lan9303-core.c34
-rw-r--r--drivers/net/dsa/lantiq_gswip.c14
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c82
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h9
-rw-r--r--drivers/net/dsa/mt7530.c3
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c6
-rw-r--r--drivers/net/dsa/qca/ar9331.c87
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c27
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c119
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c208
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h76
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c23
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c52
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c1
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c19
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c19
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c24
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c15
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c20
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c20
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c294
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c114
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c200
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c93
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c56
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c259
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c173
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c229
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c21
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c16
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c9
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c197
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h11
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c41
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c7
-rw-r--r--drivers/net/ethernet/renesas/ravb.h2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c22
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c56
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c3
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h4
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c3
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c5
-rw-r--r--drivers/net/fddi/defza.c3
-rw-r--r--drivers/net/fjes/fjes_trace.h4
-rw-r--r--drivers/net/hamradio/6pack.c20
-rw-r--r--drivers/net/hamradio/mkiss.c13
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c6
-rw-r--r--drivers/net/ipa/ipa_smp2p.c1
-rw-r--r--drivers/net/mdio/mdio-mux.c37
-rw-r--r--drivers/net/mhi/net.c2
-rw-r--r--drivers/net/netdevsim/ipsec.c8
-rw-r--r--drivers/net/pcs/pcs-xpcs.c2
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/marvell10g.c40
-rw-r--r--drivers/net/phy/micrel.c12
-rw-r--r--drivers/net/ppp/ppp_async.c12
-rw-r--r--drivers/net/ppp/ppp_generic.c21
-rw-r--r--drivers/net/ppp/ppp_synctty.c12
-rw-r--r--drivers/net/slip/slip.c9
-rw-r--r--drivers/net/usb/asix_common.c70
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/hso.c41
-rw-r--r--drivers/net/usb/lan78xx.c16
-rw-r--r--drivers/net/usb/pegasus.c152
-rw-r--r--drivers/net/usb/r8152.c53
-rw-r--r--drivers/net/virtio_net.c75
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c22
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/wan/hdlc_cisco.c8
-rw-r--r--drivers/net/wan/hdlc_fr.c8
-rw-r--r--drivers/net/wan/hdlc_ppp.c8
-rw-r--r--drivers/net/wan/hdlc_raw.c8
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c8
-rw-r--r--drivers/net/wan/hdlc_x25.c8
-rw-r--r--drivers/net/wan/ixp4xx_hss.c1
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c34
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.h3
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c29
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c70
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c2
-rw-r--r--drivers/net/wireless/virt_wifi.c52
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c7
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c21
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.h4
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.h2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c4
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_uevent.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c13
-rw-r--r--drivers/net/wwan/mhi_wwan_ctrl.c14
-rw-r--r--drivers/net/wwan/wwan_core.c14
-rw-r--r--drivers/nfc/nfcsim.c3
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c2
-rw-r--r--drivers/nvdimm/bus.c64
-rw-r--r--drivers/nvdimm/dimm_devs.c18
-rw-r--r--drivers/nvdimm/namespace_devs.c17
-rw-r--r--drivers/nvme/host/core.c84
-rw-r--r--drivers/nvme/host/fabrics.c13
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/fc.c74
-rw-r--r--drivers/nvme/host/ioctl.c6
-rw-r--r--drivers/nvme/host/multipath.c9
-rw-r--r--drivers/nvme/host/nvme.h15
-rw-r--r--drivers/nvme/host/pci.c71
-rw-r--r--drivers/nvme/host/rdma.c3
-rw-r--r--drivers/nvme/host/tcp.c6
-rw-r--r--drivers/nvme/host/trace.h6
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvme/target/passthru.c8
-rw-r--r--drivers/nvme/target/tcp.c1
-rw-r--r--drivers/nvmem/core.c27
-rw-r--r--drivers/nvmem/qfprom.c9
-rw-r--r--drivers/nvmem/sprd-efuse.c2
-rw-r--r--drivers/nvmem/sunxi_sid.c1
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/address.c128
-rw-r--r--drivers/of/fdt.c14
-rw-r--r--drivers/of/of_private.h4
-rw-r--r--drivers/of/of_reserved_mem.c17
-rw-r--r--drivers/of/platform.c1
-rw-r--r--drivers/of/unittest.c6
-rw-r--r--drivers/opp/core.c15
-rw-r--r--drivers/opp/of.c5
-rw-r--r--drivers/parisc/power.c1
-rw-r--r--drivers/parport/probe.c11
-rw-r--r--drivers/pci/controller/Kconfig8
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h7
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c25
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c4
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c11
-rw-r--r--drivers/pci/controller/pci-aardvark.c13
-rw-r--r--drivers/pci/controller/pci-ftpci100.c30
-rw-r--r--drivers/pci/controller/pci-hyperv.c64
-rw-r--r--drivers/pci/controller/pci-ixp4xx.c671
-rw-r--r--drivers/pci/controller/pci-tegra.c1
-rw-r--r--drivers/pci/controller/pci-xgene.c4
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c33
-rw-r--r--drivers/pci/controller/pcie-iproc.c24
-rw-r--r--drivers/pci/controller/pcie-iproc.h16
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c1
-rw-r--r--drivers/pci/controller/pcie-mediatek.c4
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c12
-rw-r--r--drivers/pci/ecam.c54
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h3
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c7
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c8
-rw-r--r--drivers/pci/hotplug/pciehp.h3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c36
-rw-r--r--drivers/pci/hotplug/rpadlpar_sysfs.c4
-rw-r--r--drivers/pci/hotplug/shpchp_sysfs.c38
-rw-r--r--drivers/pci/iov.c23
-rw-r--r--drivers/pci/msi.c133
-rw-r--r--drivers/pci/p2pdma.c376
-rw-r--r--drivers/pci/pci-label.c22
-rw-r--r--drivers/pci/pci-sysfs.c4
-rw-r--r--drivers/pci/pci.c60
-rw-r--r--drivers/pci/pci.h8
-rw-r--r--drivers/pci/pcie/aer.c24
-rw-r--r--drivers/pci/pcie/aspm.c4
-rw-r--r--drivers/pci/pcie/dpc.c74
-rw-r--r--drivers/pci/probe.c23
-rw-r--r--drivers/pci/proc.c2
-rw-r--r--drivers/pci/quirks.c12
-rw-r--r--drivers/pci/slot.c18
-rw-r--r--drivers/pci/switch/switchtec.c18
-rw-r--r--drivers/pcmcia/i82092.c1
-rw-r--r--drivers/phy/Kconfig9
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb3.c4
-rw-r--r--drivers/phy/hisilicon/Kconfig10
-rw-r--r--drivers/phy/hisilicon/Makefile1
-rw-r--r--drivers/phy/hisilicon/phy-hi3670-usb3.c (renamed from drivers/staging/hikey9xx/phy-hi3670-usb3.c)19
-rw-r--r--drivers/phy/intel/phy-intel-keembay-emmc.c3
-rw-r--r--drivers/phy/marvell/phy-mmp3-hsic.c4
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c4
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.c4
-rw-r--r--drivers/phy/phy-can-transceiver.c146
-rw-r--r--drivers/phy/phy-core-mipi-dphy.c2
-rw-r--r--drivers/phy/phy-core.c16
-rw-r--r--drivers/phy/phy-xgene.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c315
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h189
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c34
-rw-r--r--drivers/phy/ralink/Kconfig2
-rw-r--r--drivers/phy/ralink/phy-mt7621-pci.c37
-rw-r--r--drivers/phy/rockchip/Kconfig9
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-csidphy.c459
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c44
-rw-r--r--drivers/phy/socionext/phy-uniphier-pcie.c11
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c31
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c550
-rw-r--r--drivers/phy/tegra/xusb-tegra210.c1533
-rw-r--r--drivers/phy/tegra/xusb.c92
-rw-r--r--drivers/phy/tegra/xusb.h22
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c17
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c26
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c8
-rw-r--r--drivers/pinctrl/pinctrl-amd.c3
-rw-r--r--drivers/pinctrl/pinctrl-k210.c26
-rw-r--r--drivers/pinctrl/qcom/Kconfig63
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c8
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/platform/x86/amd-pmc.c249
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c28
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c2
-rw-r--r--drivers/platform/x86/dual_accel_detect.h76
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c3
-rw-r--r--drivers/platform/x86/intel-hid.c22
-rw-r--r--drivers/platform/x86/intel-vbtn.c18
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c2
-rw-r--r--drivers/platform/x86/think-lmi.c45
-rw-r--r--drivers/platform/x86/think-lmi.h1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c3
-rw-r--r--drivers/platform/x86/wireless-hotkey.c2
-rw-r--r--drivers/pnp/card.c7
-rw-r--r--drivers/pnp/driver.c9
-rw-r--r--drivers/pnp/isapnp/compat.c1
-rw-r--r--drivers/pnp/manager.c7
-rw-r--r--drivers/pnp/support.c1
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c4
-rw-r--r--drivers/power/reset/gpio-poweroff.c1
-rw-r--r--drivers/power/reset/keystone-reset.c1
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c1
-rw-r--r--drivers/power/reset/regulator-poweroff.c1
-rw-r--r--drivers/power/supply/Kconfig12
-rw-r--r--drivers/power/supply/Makefile3
-rw-r--r--drivers/power/supply/ab8500-bm.h7
-rw-r--r--drivers/power/supply/ab8500-chargalg.h2
-rw-r--r--drivers/power/supply/ab8500_btemp.c126
-rw-r--r--drivers/power/supply/ab8500_charger.c381
-rw-r--r--drivers/power/supply/ab8500_fg.c147
-rw-r--r--drivers/power/supply/abx500_chargalg.c118
-rw-r--r--drivers/power/supply/axp20x_battery.c17
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c26
-rw-r--r--drivers/power/supply/bd70528-charger.c710
-rw-r--r--drivers/power/supply/bq24190_charger.c11
-rw-r--r--drivers/power/supply/charger-manager.c1
-rw-r--r--drivers/power/supply/cpcap-battery.c19
-rw-r--r--drivers/power/supply/cpcap-charger.c39
-rw-r--r--drivers/power/supply/max17040_battery.c42
-rw-r--r--drivers/power/supply/max17042_battery.c2
-rw-r--r--drivers/power/supply/pm2301_charger.c1249
-rw-r--r--drivers/power/supply/rn5t618_power.c235
-rw-r--r--drivers/power/supply/rt5033_battery.c7
-rw-r--r--drivers/power/supply/sbs-battery.c153
-rw-r--r--drivers/power/supply/sc2731_charger.c1
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c1
-rw-r--r--drivers/power/supply/smb347-charger.c1
-rw-r--r--drivers/power/supply/surface_battery.c14
-rw-r--r--drivers/power/supply/surface_charger.c2
-rw-r--r--drivers/pps/clients/pps-ldisc.c11
-rw-r--r--drivers/ps3/ps3-vuart.c2
-rw-r--r--drivers/ps3/ps3av.c22
-rw-r--r--drivers/ptp/Kconfig3
-rw-r--r--drivers/ptp/Makefile2
-rw-r--r--drivers/ptp/ptp_clock.c44
-rw-r--r--drivers/ptp/ptp_private.h39
-rw-r--r--drivers/ptp/ptp_sysfs.c160
-rw-r--r--drivers/ptp/ptp_vclock.c219
-rw-r--r--drivers/pwm/core.c222
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c2
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c2
-rw-r--r--drivers/pwm/pwm-atmel.c2
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c2
-rw-r--r--drivers/pwm/pwm-bcm-kona.c2
-rw-r--r--drivers/pwm/pwm-bcm2835.c2
-rw-r--r--drivers/pwm/pwm-berlin.c162
-rw-r--r--drivers/pwm/pwm-clps711x.c12
-rw-r--r--drivers/pwm/pwm-crc.c12
-rw-r--r--drivers/pwm/pwm-ep93xx.c102
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c2
-rw-r--r--drivers/pwm/pwm-hibvt.c2
-rw-r--r--drivers/pwm/pwm-img.c2
-rw-r--r--drivers/pwm/pwm-imx-tpm.c2
-rw-r--r--drivers/pwm/pwm-imx1.c14
-rw-r--r--drivers/pwm/pwm-imx27.c3
-rw-r--r--drivers/pwm/pwm-jz4740.c2
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c2
-rw-r--r--drivers/pwm/pwm-lpss-pci.c4
-rw-r--r--drivers/pwm/pwm-lpss-platform.c4
-rw-r--r--drivers/pwm/pwm-lpss.c8
-rw-r--r--drivers/pwm/pwm-lpss.h1
-rw-r--r--drivers/pwm/pwm-meson.c14
-rw-r--r--drivers/pwm/pwm-mxs.c2
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c2
-rw-r--r--drivers/pwm/pwm-pca9685.c198
-rw-r--r--drivers/pwm/pwm-pxa.c40
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c2
-rw-r--r--drivers/pwm/pwm-rockchip.c5
-rw-r--r--drivers/pwm/pwm-samsung.c3
-rw-r--r--drivers/pwm/pwm-sifive.c2
-rw-r--r--drivers/pwm/pwm-spear.c37
-rw-r--r--drivers/pwm/pwm-sprd.c15
-rw-r--r--drivers/pwm/pwm-stm32-lp.c2
-rw-r--r--drivers/pwm/pwm-stm32.c2
-rw-r--r--drivers/pwm/pwm-sun4i.c2
-rw-r--r--drivers/pwm/pwm-tegra.c24
-rw-r--r--drivers/pwm/pwm-tiecap.c55
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c2
-rw-r--r--drivers/pwm/pwm-visconti.c17
-rw-r--r--drivers/pwm/pwm-vt8500.c10
-rw-r--r--drivers/regulator/bd9576-regulator.c4
-rw-r--r--drivers/regulator/hi6421-regulator.c22
-rw-r--r--drivers/regulator/hi6421v600-regulator.c16
-rw-r--r--drivers/regulator/lp87565-regulator.c11
-rw-r--r--drivers/regulator/mtk-dvfsrc-regulator.c3
-rw-r--r--drivers/regulator/rtmv20-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig1
-rw-r--r--drivers/remoteproc/imx_rproc.c209
-rw-r--r--drivers/remoteproc/pru_rproc.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c22
-rw-r--r--drivers/remoteproc/qcom_wcnss.c5
-rw-r--r--drivers/remoteproc/remoteproc_cdev.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c75
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c12
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c6
-rw-r--r--drivers/remoteproc/stm32_rproc.c16
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c151
-rw-r--r--drivers/reset/Kconfig17
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c25
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c2
-rw-r--r--drivers/reset/reset-a10sr.c1
-rw-r--r--drivers/reset/reset-bcm6345.c2
-rw-r--r--drivers/reset/reset-berlin.c12
-rw-r--r--drivers/reset/reset-brcmstb.c1
-rw-r--r--drivers/reset/reset-lantiq.c2
-rw-r--r--drivers/reset/reset-microchip-sparx5.c146
-rw-r--r--drivers/reset/reset-oxnas.c2
-rw-r--r--drivers/reset/reset-ti-syscon.c4
-rw-r--r--drivers/reset/reset-uniphier.c2
-rw-r--r--drivers/reset/reset-zynqmp.c4
-rw-r--r--drivers/reset/sti/reset-syscfg.c2
-rw-r--r--drivers/rtc/Kconfig6
-rw-r--r--drivers/rtc/proc.c4
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-au1xxx.c5
-rw-r--r--drivers/rtc/rtc-bd70528.c316
-rw-r--r--drivers/rtc/rtc-ds1374.c7
-rw-r--r--drivers/rtc/rtc-efi.c1
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c1
-rw-r--r--drivers/rtc/rtc-imxdi.c4
-rw-r--r--drivers/rtc/rtc-m41t80.c32
-rw-r--r--drivers/rtc/rtc-max6900.c8
-rw-r--r--drivers/rtc/rtc-max77686.c4
-rw-r--r--drivers/rtc/rtc-mxc_v2.c1
-rw-r--r--drivers/rtc/rtc-palmas.c15
-rw-r--r--drivers/rtc/rtc-pcf2127.c194
-rw-r--r--drivers/rtc/rtc-pcf85063.c6
-rw-r--r--drivers/rtc/rtc-pcf8523.c146
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-rtd119x.c3
-rw-r--r--drivers/rtc/rtc-s5m.c4
-rw-r--r--drivers/rtc/rtc-sc27xx.c2
-rw-r--r--drivers/rtc/rtc-spear.c5
-rw-r--r--drivers/rtc/rtc-stm32.c6
-rw-r--r--drivers/rtc/rtc-tps6586x.c15
-rw-r--r--drivers/rtc/rtc-tps80031.c15
-rw-r--r--drivers/rtc/rtc-v3020.c2
-rw-r--r--drivers/rtc/sysfs.c2
-rw-r--r--drivers/s390/block/dasd.c1
-rw-r--r--drivers/s390/block/dasd_diag.c21
-rw-r--r--drivers/s390/block/dasd_eckd.c15
-rw-r--r--drivers/s390/block/dasd_eckd.h6
-rw-r--r--drivers/s390/block/dcssblk.c106
-rw-r--r--drivers/s390/block/xpram.c63
-rw-r--r--drivers/s390/char/con3215.c5
-rw-r--r--drivers/s390/char/con3270.c1
-rw-r--r--drivers/s390/char/monreader.c125
-rw-r--r--drivers/s390/char/monwriter.c90
-rw-r--r--drivers/s390/char/sclp.c175
-rw-r--r--drivers/s390/char/sclp.h13
-rw-r--r--drivers/s390/char/sclp_cmd.c34
-rw-r--r--drivers/s390/char/sclp_con.c54
-rw-r--r--drivers/s390/char/sclp_ftp.c1
-rw-r--r--drivers/s390/char/sclp_quiesce.c37
-rw-r--r--drivers/s390/char/sclp_rw.c11
-rw-r--r--drivers/s390/char/sclp_rw.h8
-rw-r--r--drivers/s390/char/sclp_tty.c13
-rw-r--r--drivers/s390/char/sclp_vt220.c66
-rw-r--r--drivers/s390/char/tape_char.c2
-rw-r--r--drivers/s390/char/tty3270.c22
-rw-r--r--drivers/s390/char/vmlogrdr.c24
-rw-r--r--drivers/s390/char/zcore.c1
-rw-r--r--drivers/s390/cio/airq.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c60
-rw-r--r--drivers/s390/cio/chp.c3
-rw-r--r--drivers/s390/cio/chsc.c2
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/cio.h13
-rw-r--r--drivers/s390/cio/cmf.c13
-rw-r--r--drivers/s390/cio/ioasm.c143
-rw-r--r--drivers/s390/cio/qdio.h25
-rw-r--r--drivers/s390/cio/qdio_main.c62
-rw-r--r--drivers/s390/cio/trace.h6
-rw-r--r--drivers/s390/crypto/ap_bus.c134
-rw-r--r--drivers/s390/crypto/ap_bus.h14
-rw-r--r--drivers/s390/crypto/ap_card.c18
-rw-r--r--drivers/s390/crypto/ap_queue.c28
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c13
-rw-r--r--drivers/s390/crypto/zcrypt_api.c7
-rw-r--r--drivers/s390/crypto/zcrypt_api.h3
-rw-r--r--drivers/s390/crypto/zcrypt_card.c30
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c14
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h4
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c9
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c28
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c59
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h2
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c22
-rw-r--r--drivers/s390/net/ctcm_fsms.c1
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c5
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c1
-rw-r--r--drivers/s390/virtio/virtio_ccw.c30
-rw-r--r--drivers/scsi/3w-9xxx.c74
-rw-r--r--drivers/scsi/3w-9xxx.h121
-rw-r--r--drivers/scsi/3w-xxxx.c6
-rw-r--r--drivers/scsi/53c700.c6
-rw-r--r--drivers/scsi/FlashPoint.c197
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR5380.c10
-rw-r--r--drivers/scsi/aacraid/aachba.c10
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/advansys.c4
-rw-r--r--drivers/scsi/aha152x.c33
-rw-r--r--drivers/scsi/aha1740.c7
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c19
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c43
-rw-r--r--drivers/scsi/arm/acornscsi.c47
-rw-r--r--drivers/scsi/arm/fas216.c20
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c25
-rw-r--r--drivers/scsi/be2iscsi/be_main.c110
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h2
-rw-r--r--drivers/scsi/bfa/bfa_svc.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c32
-rw-r--r--drivers/scsi/ch.c5
-rw-r--r--drivers/scsi/constants.c17
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c17
-rw-r--r--drivers/scsi/cxlflash/superpipe.c3
-rw-r--r--drivers/scsi/dc395x.c80
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c81
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c4
-rw-r--r--drivers/scsi/elx/Kconfig9
-rw-r--r--drivers/scsi/elx/Makefile18
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c786
-rw-r--r--drivers/scsi/elx/efct/efct_driver.h109
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c3581
-rw-r--r--drivers/scsi/elx/efct/efct_hw.h764
-rw-r--r--drivers/scsi/elx/efct/efct_hw_queues.c677
-rw-r--r--drivers/scsi/elx/efct/efct_io.c191
-rw-r--r--drivers/scsi/elx/efct/efct_io.h174
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c1698
-rw-r--r--drivers/scsi/elx/efct/efct_lio.h189
-rw-r--r--drivers/scsi/elx/efct/efct_scsi.c1159
-rw-r--r--drivers/scsi/elx/efct/efct_scsi.h203
-rw-r--r--drivers/scsi/elx/efct/efct_unsol.c492
-rw-r--r--drivers/scsi/elx/efct/efct_unsol.h17
-rw-r--r--drivers/scsi/elx/efct/efct_xport.c1111
-rw-r--r--drivers/scsi/elx/efct/efct_xport.h186
-rw-r--r--drivers/scsi/elx/include/efc_common.h37
-rw-r--r--drivers/scsi/elx/libefc/efc.h52
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.c777
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.h35
-rw-r--r--drivers/scsi/elx/libefc/efc_device.c1603
-rw-r--r--drivers/scsi/elx/libefc/efc_device.h72
-rw-r--r--drivers/scsi/elx/libefc/efc_domain.c1088
-rw-r--r--drivers/scsi/elx/libefc/efc_domain.h54
-rw-r--r--drivers/scsi/elx/libefc/efc_els.c1098
-rw-r--r--drivers/scsi/elx/libefc/efc_els.h107
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.c1564
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.h116
-rw-r--r--drivers/scsi/elx/libefc/efc_node.c1102
-rw-r--r--drivers/scsi/elx/libefc/efc_node.h191
-rw-r--r--drivers/scsi/elx/libefc/efc_nport.c777
-rw-r--r--drivers/scsi/elx/libefc/efc_nport.h50
-rw-r--r--drivers/scsi/elx/libefc/efc_sm.c54
-rw-r--r--drivers/scsi/elx/libefc/efc_sm.h197
-rw-r--r--drivers/scsi/elx/libefc/efclib.c81
-rw-r--r--drivers/scsi/elx/libefc/efclib.h620
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c5160
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.h4132
-rw-r--r--drivers/scsi/esas2r/atioctl.h2
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/esp_scsi.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c6
-rw-r--r--drivers/scsi/fdomain.c22
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c99
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c21
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c11
-rw-r--r--drivers/scsi/hosts.c14
-rw-r--r--drivers/scsi/hptiop.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c79
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/imm.c15
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/ips.c10
-rw-r--r--drivers/scsi/isci/init.c1
-rw-r--r--drivers/scsi/isci/request.c10
-rw-r--r--drivers/scsi/isci/task.c6
-rw-r--r--drivers/scsi/iscsi_tcp.c7
-rw-r--r--drivers/scsi/libfc/fc_encode.h256
-rw-r--r--drivers/scsi/libfc/fc_lport.c88
-rw-r--r--drivers/scsi/libfc/fc_rport.c13
-rw-r--r--drivers/scsi/libiscsi.c234
-rw-r--r--drivers/scsi/libsas/sas_ata.c7
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c9
-rw-r--r--drivers/scsi/libsas/sas_task.c4
-rw-r--r--drivers/scsi/lpfc/lpfc.h124
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c298
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c665
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c229
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h124
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c114
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c416
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c66
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c45
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c21
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c102
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c21
-rw-r--r--drivers/scsi/mesh.c9
-rw-r--r--drivers/scsi/mpi3mr/Kconfig7
-rw-r--r--drivers/scsi/mpi3mr/Makefile4
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h1880
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h216
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h159
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h1004
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h33
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h463
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h901
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_debug.h60
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c3957
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c4046
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c381
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c18
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c200
-rw-r--r--drivers/scsi/mvsas/mv_init.c27
-rw-r--r--drivers/scsi/mvsas/mv_sas.c10
-rw-r--r--drivers/scsi/mvumi.c10
-rw-r--r--drivers/scsi/myrb.c64
-rw-r--r--drivers/scsi/myrs.c9
-rw-r--r--drivers/scsi/nsp32.c419
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c48
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c34
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c30
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c83
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c64
-rw-r--r--drivers/scsi/pmcraid.h4
-rw-r--r--drivers/scsi/ppa.c14
-rw-r--r--drivers/scsi/ps3rom.c7
-rw-r--r--drivers/scsi/qedf/qedf_attr.c14
-rw-r--r--drivers/scsi/qedf/qedf_dbg.c3
-rw-r--r--drivers/scsi/qedf/qedf_io.c27
-rw-r--r--drivers/scsi/qedf/qedf_main.c9
-rw-r--r--drivers/scsi/qedi/qedi.h1
-rw-r--r--drivers/scsi/qedi/qedi_fw.c291
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h4
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c105
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h5
-rw-r--r--drivers/scsi/qedi/qedi_main.c9
-rw-r--r--drivers/scsi/qedi/qedi_sysfs.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/qlogicfas408.c138
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/scsi_debug.c20
-rw-r--r--drivers/scsi/scsi_error.c72
-rw-r--r--drivers/scsi/scsi_ioctl.c7
-rw-r--r--drivers/scsi/scsi_lib.c142
-rw-r--r--drivers/scsi/scsi_logging.c10
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c21
-rw-r--r--drivers/scsi/scsi_sysfs.c9
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c589
-rw-r--r--drivers/scsi/scsi_transport_sas.c9
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/sd.c67
-rw-r--r--drivers/scsi/sd_zbc.c3
-rw-r--r--drivers/scsi/sg.c11
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c3
-rw-r--r--drivers/scsi/snic/snic_ctl.c5
-rw-r--r--drivers/scsi/snic/snic_debugfs.c23
-rw-r--r--drivers/scsi/snic/snic_trc.h3
-rw-r--r--drivers/scsi/sr.c6
-rw-r--r--drivers/scsi/sr_ioctl.c6
-rw-r--r--drivers/scsi/st.c10
-rw-r--r--drivers/scsi/stex.c9
-rw-r--r--drivers/scsi/storvsc_drv.c131
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c6
-rw-r--r--drivers/scsi/ufs/Kconfig1
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c2
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pci.c2
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c6
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.h2
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c31
-rw-r--r--drivers/scsi/ufs/ufs-exynos.h26
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c4
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c45
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c2
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c269
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c6
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c36
-rw-r--r--drivers/scsi/ufs/ufshcd.c1167
-rw-r--r--drivers/scsi/ufs/ufshcd.h91
-rw-r--r--drivers/scsi/ufs/ufshci.h1
-rw-r--r--drivers/scsi/virtio_scsi.c8
-rw-r--r--drivers/scsi/vmw_pvscsi.c6
-rw-r--r--drivers/scsi/wd33c93.c43
-rw-r--r--drivers/scsi/xen-scsifront.c8
-rw-r--r--drivers/siox/siox-bus-gpio.c19
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c46
-rw-r--r--drivers/soc/bcm/brcmstb/common.c5
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c1
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c84
-rw-r--r--drivers/soc/imx/gpcv2.c634
-rw-r--r--drivers/soc/imx/soc-imx.c3
-rw-r--r--drivers/soc/imx/soc-imx8m.c84
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c12
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c10
-rw-r--r--drivers/soc/litex/Kconfig12
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c3
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c1
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c42
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c35
-rw-r--r--drivers/soc/qcom/rpmhpd.c21
-rw-r--r--drivers/soc/qcom/rpmpd.c22
-rw-r--r--drivers/soc/qcom/smd-rpm.c2
-rw-r--r--drivers/soc/qcom/smem_state.c36
-rw-r--r--drivers/soc/qcom/socinfo.c56
-rw-r--r--drivers/soc/renesas/Kconfig5
-rw-r--r--drivers/soc/renesas/renesas-soc.c33
-rw-r--r--drivers/soc/rockchip/pm_domains.c252
-rw-r--r--drivers/soc/tegra/Kconfig8
-rw-r--r--drivers/soc/tegra/common.c97
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c6
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c3
-rw-r--r--drivers/soc/tegra/pmc.c144
-rw-r--r--drivers/soc/tegra/regulators-tegra20.c94
-rw-r--r--drivers/soc/tegra/regulators-tegra30.c93
-rw-r--r--drivers/soc/ti/smartreflex.c4
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c4
-rw-r--r--drivers/soundwire/Kconfig1
-rw-r--r--drivers/soundwire/bus.c180
-rw-r--r--drivers/soundwire/bus.h13
-rw-r--r--drivers/soundwire/cadence_master.c21
-rw-r--r--drivers/soundwire/cadence_master.h3
-rw-r--r--drivers/soundwire/dmi-quirks.c2
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c14
-rw-r--r--drivers/soundwire/intel.c56
-rw-r--r--drivers/soundwire/intel.h14
-rw-r--r--drivers/soundwire/intel_init.c232
-rw-r--r--drivers/soundwire/slave.c4
-rw-r--r--drivers/soundwire/stream.c13
-rw-r--r--drivers/spi/spi-atmel.c9
-rw-r--r--drivers/spi/spi-bcm2835.c12
-rw-r--r--drivers/spi/spi-cadence-quadspi.c51
-rw-r--r--drivers/spi/spi-cadence.c14
-rw-r--r--drivers/spi/spi-imx.c52
-rw-r--r--drivers/spi/spi-meson-spicc.c2
-rw-r--r--drivers/spi/spi-mt65xx.c29
-rw-r--r--drivers/spi/spi-mux.c8
-rw-r--r--drivers/spi/spi-stm32.c24
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/spmi/Kconfig9
-rw-r--r--drivers/spmi/Makefile1
-rw-r--r--drivers/spmi/hisi-spmi-controller.c (renamed from drivers/staging/hikey9xx/hisi-spmi-controller.c)2
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/ashmem.c3
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c18
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c11
-rw-r--r--drivers/staging/fbtft/TODO5
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c30
-rw-r--r--drivers/staging/fbtft/fb_bd663474.c4
-rw-r--r--drivers/staging/fbtft/fb_hx8347d.c29
-rw-r--r--drivers/staging/fbtft/fb_ili9163.c4
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c4
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c4
-rw-r--r--drivers/staging/fbtft/fb_ili9340.c1
-rw-r--r--drivers/staging/fbtft/fb_s6d1121.c4
-rw-r--r--drivers/staging/fbtft/fb_sh1106.c1
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c4
-rw-r--r--drivers/staging/fbtft/fb_ssd1325.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c6
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c1
-rw-r--r--drivers/staging/fbtft/fb_upd161704.c4
-rw-r--r--drivers/staging/fbtft/fb_watterott.c1
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c3
-rw-r--r--drivers/staging/fbtft/fbtft-core.c25
-rw-r--r--drivers/staging/fbtft/fbtft-io.c12
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-client.h2
-rw-r--r--drivers/staging/fieldbus/anybuss/hms-profinet.c3
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c8
-rw-r--r--drivers/staging/fwserial/fwserial.c60
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c21
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c2
-rw-r--r--drivers/staging/greybus/audio_topology.c4
-rw-r--r--drivers/staging/greybus/gbphy.c4
-rw-r--r--drivers/staging/greybus/spilib.c1
-rw-r--r--drivers/staging/greybus/uart.c22
-rw-r--r--drivers/staging/gs_fpgaboot/README2
-rw-r--r--drivers/staging/hikey9xx/Kconfig22
-rw-r--r--drivers/staging/hikey9xx/Makefile3
-rw-r--r--drivers/staging/hikey9xx/hi6421-spmi-pmic.c116
-rw-r--r--drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml7
-rw-r--r--drivers/staging/iio/accel/adis16203.c6
-rw-r--r--drivers/staging/iio/accel/adis16240.c6
-rw-r--r--drivers/staging/iio/addac/adt7316.c12
-rw-r--r--drivers/staging/iio/cdc/ad7746.c114
-rw-r--r--drivers/staging/iio/cdc/ad7746.h28
-rw-r--r--drivers/staging/iio/frequency/ad9834.c5
-rw-r--r--drivers/staging/kpc2000/Kconfig59
-rw-r--r--drivers/staging/kpc2000/Makefile6
-rw-r--r--drivers/staging/kpc2000/TODO2
-rw-r--r--drivers/staging/kpc2000/kpc.h23
-rw-r--r--drivers/staging/kpc2000/kpc2000/Makefile4
-rw-r--r--drivers/staging/kpc2000/kpc2000/cell_probe.c548
-rw-r--r--drivers/staging/kpc2000/kpc2000/core.c565
-rw-r--r--drivers/staging/kpc2000/kpc2000/dma_common_defs.h23
-rw-r--r--drivers/staging/kpc2000/kpc2000/pcie.h90
-rw-r--r--drivers/staging/kpc2000/kpc2000/uapi.h22
-rw-r--r--drivers/staging/kpc2000/kpc2000_i2c.c731
-rw-r--r--drivers/staging/kpc2000/kpc2000_spi.c517
-rw-r--r--drivers/staging/kpc2000/kpc_dma/Makefile6
-rw-r--r--drivers/staging/kpc2000/kpc_dma/dma.c270
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c363
-rw-r--r--drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c249
-rw-r--r--drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h222
-rw-r--r--drivers/staging/kpc2000/kpc_dma/uapi.h11
-rw-r--r--drivers/staging/ks7010/ks_hostif.c14
-rw-r--r--drivers/staging/ks7010/ks_hostif.h24
-rw-r--r--drivers/staging/most/dim2/dim2.c23
-rw-r--r--drivers/staging/most/dim2/hal.c10
-rw-r--r--drivers/staging/most/i2c/i2c.c12
-rw-r--r--drivers/staging/most/net/net.c6
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts6
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi75
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c327
-rw-r--r--drivers/staging/nvec/nvec.c7
-rw-r--r--drivers/staging/octeon/ethernet-tx.c1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c1
-rw-r--r--drivers/staging/qlge/qlge_ethtool.c2
-rw-r--r--drivers/staging/qlge/qlge_main.c10
-rw-r--r--drivers/staging/rtl8188eu/Makefile1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c144
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c153
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c187
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c62
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c111
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c112
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c53
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c236
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c483
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c76
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c289
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c33
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c59
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c111
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c210
-rw-r--r--drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c123
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c161
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_rtl8188e.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c53
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c28
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c57
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c65
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c32
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c28
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c117
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h1
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h1
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h14
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h2
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h96
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h32
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h284
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h3
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_android.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h131
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h7
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h3
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h36
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c353
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c8
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mon.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c97
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c8
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c18
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c30
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c135
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c28
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c10
-rw-r--r--drivers/staging/rtl8192e/rtllib.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c3
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c1
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c25
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c15
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c6
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h19
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c41
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c48
-rw-r--r--drivers/staging/rtl8712/hal_init.c27
-rw-r--r--drivers/staging/rtl8712/os_intfs.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c8
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c5
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_debug.h23
-rw-r--r--drivers/staging/rtl8712/rtl871x_led.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c31
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h127
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c12
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c1
-rw-r--r--drivers/staging/rtl8712/usb_intf.c82
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c1
-rw-r--r--drivers/staging/rtl8723bs/Kconfig1
-rw-r--r--drivers/staging/rtl8723bs/Makefile2
-rw-r--r--drivers/staging/rtl8723bs/TODO2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c244
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c41
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_debug.c74
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_eeprom.c210
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c14
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c28
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c35
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c103
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c440
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c26
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c672
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c43
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c194
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c68
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c959
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c519
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h68
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c79
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c49
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c658
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.c340
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c221
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c175
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c261
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c1059
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c3
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c103
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h46
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.c107
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c322
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c22
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.c38
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c36
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_PathDiv.c34
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_PathDiv.h21
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RTL8723B.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c71
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_debug.c44
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_debug.h165
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_precomp.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c21
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c65
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c69
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c9
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c35
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c18
-rw-r--r--drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h2
-rw-r--r--drivers/staging/rtl8723bs/include/autoconf.h50
-rw-r--r--drivers/staging/rtl8723bs/include/drv_conf.h19
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h12
-rw-r--r--drivers/staging/rtl8723bs/include/hal_btcoex.h4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h62
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_phycfg.h198
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_reg.h1
-rw-r--r--drivers/staging/rtl8723bs/include/hal_data.h40
-rw-r--r--drivers/staging/rtl8723bs/include/hal_pg.h2
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy.h24
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h67
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_spec.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_xmit.h21
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ap.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_debug.h170
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ht.h4
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_io.h44
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h84
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mp.h1
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_rf.h33
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_security.h104
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_ops_linux.h2
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h3
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c32
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c67
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c36
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c28
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c10
-rw-r--r--drivers/staging/rts5208/ms.c1
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c4
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c101
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c1
-rw-r--r--drivers/staging/vc04_services/Makefile2
-rw-r--r--drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c27
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c307
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c1636
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h93
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c6
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c2
-rw-r--r--drivers/staging/vt6655/upc.h2
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c30
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h2
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c6
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c19
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c21
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c4
-rw-r--r--drivers/target/loopback/tcm_loop.c1
-rw-r--r--drivers/target/sbp/sbp_target.c1
-rw-r--r--drivers/target/target_core_alua.c6
-rw-r--r--drivers/target/target_core_configfs.c50
-rw-r--r--drivers/target/target_core_device.c5
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pr.c8
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_sbc.c45
-rw-r--r--drivers/target/target_core_spc.c97
-rw-r--r--drivers/target/target_core_transport.c2
-rw-r--r--drivers/target/target_core_user.c10
-rw-r--r--drivers/target/target_core_xcopy.c19
-rw-r--r--drivers/tee/optee/call.c38
-rw-r--r--drivers/tee/optee/core.c43
-rw-r--r--drivers/tee/optee/optee_private.h1
-rw-r--r--drivers/tee/optee/rpc.c5
-rw-r--r--drivers/tee/optee/shm_pool.c20
-rw-r--r--drivers/tee/tee_shm.c20
-rw-r--r--drivers/thermal/devfreq_cooling.c2
-rw-r--r--drivers/thermal/imx_sc_thermal.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/Makefile3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3401_thermal.c82
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c309
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h9
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c373
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c163
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c12
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c55
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c15
-rw-r--r--drivers/thermal/mtk_thermal.c6
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c7
-rw-r--r--drivers/thermal/rockchip_thermal.c121
-rw-r--r--drivers/thermal/sprd_thermal.c16
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c13
-rw-r--r--drivers/thermal/thermal_core.c2
-rw-r--r--drivers/thermal/thermal_of.c3
-rw-r--r--drivers/thunderbolt/Makefile2
-rw-r--r--drivers/thunderbolt/acpi.c206
-rw-r--r--drivers/thunderbolt/dma_port.c94
-rw-r--r--drivers/thunderbolt/domain.c9
-rw-r--r--drivers/thunderbolt/eeprom.c19
-rw-r--r--drivers/thunderbolt/icm.c20
-rw-r--r--drivers/thunderbolt/lc.c6
-rw-r--r--drivers/thunderbolt/nhi.c71
-rw-r--r--drivers/thunderbolt/nhi.h2
-rw-r--r--drivers/thunderbolt/nvm.c95
-rw-r--r--drivers/thunderbolt/path.c4
-rw-r--r--drivers/thunderbolt/quirks.c30
-rw-r--r--drivers/thunderbolt/retimer.c108
-rw-r--r--drivers/thunderbolt/sb_regs.h2
-rw-r--r--drivers/thunderbolt/switch.c289
-rw-r--r--drivers/thunderbolt/tb.c71
-rw-r--r--drivers/thunderbolt/tb.h116
-rw-r--r--drivers/thunderbolt/tb_regs.h4
-rw-r--r--drivers/thunderbolt/test.c645
-rw-r--r--drivers/thunderbolt/tunnel.c401
-rw-r--r--drivers/thunderbolt/tunnel.h2
-rw-r--r--drivers/thunderbolt/usb4.c438
-rw-r--r--drivers/thunderbolt/usb4_port.c280
-rw-r--r--drivers/thunderbolt/xdomain.c10
-rw-r--r--drivers/tty/Kconfig7
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/amiserial.c12
-rw-r--r--drivers/tty/ehv_bytechan.c4
-rw-r--r--drivers/tty/goldfish.c4
-rw-r--r--drivers/tty/hvc/hvc_console.c8
-rw-r--r--drivers/tty/hvc/hvc_iucv.c53
-rw-r--r--drivers/tty/hvc/hvc_vio.c2
-rw-r--r--drivers/tty/hvc/hvcs.c4
-rw-r--r--drivers/tty/hvc/hvsi.c6
-rw-r--r--drivers/tty/ipwireless/tty.c6
-rw-r--r--drivers/tty/mips_ejtag_fdc.c8
-rw-r--r--drivers/tty/moxa.c22
-rw-r--r--drivers/tty/mxser.c1958
-rw-r--r--drivers/tty/mxser.h151
-rw-r--r--drivers/tty/n_gsm.c34
-rw-r--r--drivers/tty/n_hdlc.c18
-rw-r--r--drivers/tty/n_null.c7
-rw-r--r--drivers/tty/n_r3964.c1283
-rw-r--r--drivers/tty/n_tty.c201
-rw-r--r--drivers/tty/nozomi.c26
-rw-r--r--drivers/tty/pty.c89
-rw-r--r--drivers/tty/serdev/core.c2
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c59
-rw-r--r--drivers/tty/serial/8250/8250_core.c27
-rw-r--r--drivers/tty/serial/8250/8250_exar.c20
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c5
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c5
-rw-r--r--drivers/tty/serial/8250/8250_of.c4
-rw-r--r--drivers/tty/serial/8250/8250_omap.c22
-rw-r--r--drivers/tty/serial/8250/8250_pci.c179
-rw-r--r--drivers/tty/serial/8250/8250_port.c74
-rw-r--r--drivers/tty/serial/8250/serial_cs.c13
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c19
-rw-r--r--drivers/tty/serial/dz.c4
-rw-r--r--drivers/tty/serial/fsl_lpuart.c135
-rw-r--r--drivers/tty/serial/icom.c2
-rw-r--r--drivers/tty/serial/imx.c27
-rw-r--r--drivers/tty/serial/ip22zilog.c2
-rw-r--r--drivers/tty/serial/kgdb_nmi.c4
-rw-r--r--drivers/tty/serial/liteuart.c21
-rw-r--r--drivers/tty/serial/max310x.c41
-rw-r--r--drivers/tty/serial/meson_uart.c8
-rw-r--r--drivers/tty/serial/mux.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c21
-rw-r--r--drivers/tty/serial/mxs-auart.c26
-rw-r--r--drivers/tty/serial/omap-serial.c10
-rw-r--r--drivers/tty/serial/pmac_zilog.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c22
-rw-r--r--drivers/tty/serial/samsung_tty.c3
-rw-r--r--drivers/tty/serial/sb1250-duart.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c26
-rw-r--r--drivers/tty/serial/serial-tegra.c6
-rw-r--r--drivers/tty/serial/serial_core.c58
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c41
-rw-r--r--drivers/tty/serial/sh-sci.h1
-rw-r--r--drivers/tty/serial/st-asc.c4
-rw-r--r--drivers/tty/serial/stm32-usart.c197
-rw-r--r--drivers/tty/serial/sunsab.c2
-rw-r--r--drivers/tty/serial/sunsu.c2
-rw-r--r--drivers/tty/serial/sunzilog.c2
-rw-r--r--drivers/tty/serial/tegra-tcu.c26
-rw-r--r--drivers/tty/serial/uartlite.c27
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/vr41xx_siu.c2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c12
-rw-r--r--drivers/tty/synclink_gt.c27
-rw-r--r--drivers/tty/tty_baudrate.c13
-rw-r--r--drivers/tty/tty_buffer.c28
-rw-r--r--drivers/tty/tty_io.c198
-rw-r--r--drivers/tty/tty_ioctl.c96
-rw-r--r--drivers/tty/tty_jobctrl.c88
-rw-r--r--drivers/tty/tty_ldisc.c29
-rw-r--r--drivers/tty/tty_port.c18
-rw-r--r--drivers/tty/ttynull.c2
-rw-r--r--drivers/tty/vcc.c28
-rw-r--r--drivers/tty/vt/keyboard.c2
-rw-r--r--drivers/tty/vt/selection.c20
-rw-r--r--drivers/tty/vt/vt.c18
-rw-r--r--drivers/uio/Kconfig2
-rw-r--r--drivers/uio/uio_aec.c2
-rw-r--r--drivers/uio/uio_pci_generic.c32
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/cdns3/cdns3-ep0.c7
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c40
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c2
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c2
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c2
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c9
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h4
-rw-r--r--drivers/usb/cdns3/cdnsp-mem.c5
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c18
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.h2
-rw-r--r--drivers/usb/cdns3/core.c4
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/debug.c34
-rw-r--r--drivers/usb/chipidea/otg.c9
-rw-r--r--drivers/usb/chipidea/udc.c2
-rw-r--r--drivers/usb/class/cdc-acm.c26
-rw-r--r--drivers/usb/class/cdc-wdm.c11
-rw-r--r--drivers/usb/class/usbtmc.c9
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/common/usb-conn-gpio.c62
-rw-r--r--drivers/usb/common/usb-otg-fsm.c6
-rw-r--r--drivers/usb/core/devio.c4
-rw-r--r--drivers/usb/core/hcd.c130
-rw-r--r--drivers/usb/core/hub.c154
-rw-r--r--drivers/usb/core/message.c6
-rw-r--r--drivers/usb/core/quirks.c5
-rw-r--r--drivers/usb/core/sysfs.c24
-rw-r--r--drivers/usb/core/urb.c9
-rw-r--r--drivers/usb/dwc2/core.c30
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/core_intr.c3
-rw-r--r--drivers/usb/dwc2/gadget.c45
-rw-r--r--drivers/usb/dwc2/hcd.c6
-rw-r--r--drivers/usb/dwc2/hcd_queue.c2
-rw-r--r--drivers/usb/dwc2/params.c5
-rw-r--r--drivers/usb/dwc2/pci.c2
-rw-r--r--drivers/usb/dwc2/platform.c2
-rw-r--r--drivers/usb/dwc3/core.c7
-rw-r--r--drivers/usb/dwc3/core.h3
-rw-r--r--drivers/usb/dwc3/debugfs.c8
-rw-r--r--drivers/usb/dwc3/drd.c1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c8
-rw-r--r--drivers/usb/dwc3/ep0.c10
-rw-r--r--drivers/usb/dwc3/gadget.c36
-rw-r--r--drivers/usb/dwc3/trace.h2
-rw-r--r--drivers/usb/gadget/function/f_eem.c43
-rw-r--r--drivers/usb/gadget/function/f_fs.c65
-rw-r--r--drivers/usb/gadget/function/f_hid.c48
-rw-r--r--drivers/usb/gadget/function/f_printer.c3
-rw-r--r--drivers/usb/gadget/function/f_uac2.c144
-rw-r--r--drivers/usb/gadget/function/u_audio.c225
-rw-r--r--drivers/usb/gadget/function/u_audio.h12
-rw-r--r--drivers/usb/gadget/function/u_hid.h4
-rw-r--r--drivers/usb/gadget/function/u_midi.h4
-rw-r--r--drivers/usb/gadget/function/u_serial.c14
-rw-r--r--drivers/usb/gadget/function/u_uac2.h4
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c5
-rw-r--r--drivers/usb/gadget/legacy/hid.c4
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c7
-rw-r--r--drivers/usb/gadget/udc/core.c49
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c11
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c6
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c44
-rw-r--r--drivers/usb/gadget/udc/fsl_usb2_udc.h19
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c7
-rw-r--r--drivers/usb/gadget/udc/gr_udc.h2
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c5
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c14
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c2
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/net2272.c41
-rw-r--r--drivers/usb/gadget/udc/net2272.h1
-rw-r--r--drivers/usb/gadget/udc/net2280.c51
-rw-r--r--drivers/usb/gadget/udc/net2280.h1
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c4
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.h4
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c6
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.h4
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c5
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c9
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.h1
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c31
-rw-r--r--drivers/usb/gadget/udc/trace.c2
-rw-r--r--drivers/usb/gadget/udc/trace.h2
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c4
-rw-r--r--drivers/usb/host/ehci-fsl.c8
-rw-r--r--drivers/usb/host/ehci-hcd.c26
-rw-r--r--drivers/usb/host/ehci-hub.c139
-rw-r--r--drivers/usb/host/ehci-q.c2
-rw-r--r--drivers/usb/host/fotg210-hcd.c5
-rw-r--r--drivers/usb/host/fotg210.h3
-rw-r--r--drivers/usb/host/max3421-hcd.c44
-rw-r--r--drivers/usb/host/ohci-at91.c9
-rw-r--r--drivers/usb/host/u132-hcd.c6
-rw-r--r--drivers/usb/host/xhci-dbgtty.c8
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-mem.c3
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c60
-rw-r--r--drivers/usb/host/xhci-mtk.c2
-rw-r--r--drivers/usb/host/xhci-mtk.h10
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c2
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/host/xhci-ring.c7
-rw-r--r--drivers/usb/host/xhci-tegra.c621
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/isp1760/Kconfig5
-rw-r--r--drivers/usb/isp1760/isp1760-core.c513
-rw-r--r--drivers/usb/isp1760/isp1760-core.h44
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c1020
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.h57
-rw-r--r--drivers/usb/isp1760/isp1760-if.c41
-rw-r--r--drivers/usb/isp1760/isp1760-regs.h435
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c251
-rw-r--r--drivers/usb/isp1760/isp1760-udc.h13
-rw-r--r--drivers/usb/misc/ftdi-elan.c1
-rw-r--r--drivers/usb/mtu3/mtu3.h30
-rw-r--r--drivers/usb/mtu3/mtu3_core.c20
-rw-r--r--drivers/usb/mtu3/mtu3_debugfs.c1
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c169
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c2
-rw-r--r--drivers/usb/mtu3/mtu3_host.c6
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c95
-rw-r--r--drivers/usb/musb/musb_core.c71
-rw-r--r--drivers/usb/musb/musb_gadget.c2
-rw-r--r--drivers/usb/musb/musb_host.c18
-rw-r--r--drivers/usb/musb/musb_host.h4
-rw-r--r--drivers/usb/musb/musb_trace.h17
-rw-r--r--drivers/usb/musb/omap2430.c73
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--drivers/usb/phy/phy-isp1301.c25
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c15
-rw-r--r--drivers/usb/phy/phy.c61
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c7
-rw-r--r--drivers/usb/roles/class.c9
-rw-r--r--drivers/usb/serial/belkin_sa.c20
-rw-r--r--drivers/usb/serial/ch341.c1
-rw-r--r--drivers/usb/serial/cp210x.c194
-rw-r--r--drivers/usb/serial/cyberjack.c4
-rw-r--r--drivers/usb/serial/cypress_m8.c37
-rw-r--r--drivers/usb/serial/digi_acceleport.c46
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h3
-rw-r--r--drivers/usb/serial/garmin_gps.c2
-rw-r--r--drivers/usb/serial/generic.c12
-rw-r--r--drivers/usb/serial/io_edgeport.c39
-rw-r--r--drivers/usb/serial/io_ti.c12
-rw-r--r--drivers/usb/serial/ir-usb.c6
-rw-r--r--drivers/usb/serial/keyspan.c4
-rw-r--r--drivers/usb/serial/kobil_sct.c4
-rw-r--r--drivers/usb/serial/metro-usb.c12
-rw-r--r--drivers/usb/serial/mos7720.c29
-rw-r--r--drivers/usb/serial/mos7840.c17
-rw-r--r--drivers/usb/serial/opticon.c6
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/usb/serial/oti6858.c12
-rw-r--r--drivers/usb/serial/pl2303.c57
-rw-r--r--drivers/usb/serial/quatech2.c6
-rw-r--r--drivers/usb/serial/sierra.c8
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c16
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/usb/serial/usb-wwan.h4
-rw-r--r--drivers/usb/serial/usb_wwan.c12
-rw-r--r--drivers/usb/serial/whiteheat.c9
-rw-r--r--drivers/usb/storage/cypress_atacb.c4
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/class.c4
-rw-r--r--drivers/usb/typec/mux.c39
-rw-r--r--drivers/usb/typec/mux.h6
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c28
-rw-r--r--drivers/usb/typec/stusb160x.c20
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c46
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c90
-rw-r--r--drivers/usb/typec/tcpm/wcove.c2
-rw-r--r--drivers/usb/typec/tipd/core.c9
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c4
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c4
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h14
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c47
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h2
-rw-r--r--drivers/vdpa/mlx5/core/mr.c104
-rw-r--r--drivers/vdpa/mlx5/core/resources.c7
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c84
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c8
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c1
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c47
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c16
-rw-r--r--drivers/vfio/mdev/Kconfig7
-rw-r--r--drivers/vfio/mdev/Makefile3
-rw-r--r--drivers/vfio/mdev/mdev_core.c46
-rw-r--r--drivers/vfio/mdev/mdev_driver.c10
-rw-r--r--drivers/vfio/mdev/mdev_private.h2
-rw-r--r--drivers/vfio/mdev/vfio_mdev.c37
-rw-r--r--drivers/vfio/pci/vfio_pci.c47
-rw-r--r--drivers/vfio/platform/vfio_amba.c1
-rw-r--r--drivers/vfio/platform/vfio_platform.c1
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c6
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h1
-rw-r--r--drivers/vfio/vfio.c10
-rw-r--r--drivers/vfio/vfio_iommu_type1.c34
-rw-r--r--drivers/vhost/iotlb.c2
-rw-r--r--drivers/vhost/scsi.c21
-rw-r--r--drivers/vhost/vdpa.c7
-rw-r--r--drivers/vhost/vhost.c18
-rw-r--r--drivers/vhost/vhost.h21
-rw-r--r--drivers/vhost/vringh.c2
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/backlight/Kconfig30
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/lm3630a_bl.c54
-rw-r--r--drivers/video/backlight/qcom-wled.c1
-rw-r--r--drivers/video/backlight/rt4831-backlight.c203
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/fbdev/Kconfig1
-rw-r--r--drivers/video/fbdev/core/fbmem.c12
-rw-r--r--drivers/video/fbdev/hyperv_fb.c1
-rw-r--r--drivers/video/fbdev/xilinxfb.c2
-rw-r--r--drivers/virt/acrn/vm.c16
-rw-r--r--drivers/virt/nitro_enclaves/ne_pci_dev.c2
-rw-r--r--drivers/virtio/virtio.c1
-rw-r--r--drivers/virtio/virtio_mem.c340
-rw-r--r--drivers/virtio/virtio_pci_common.c7
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c21
-rw-r--r--drivers/virtio/virtio_ring.c247
-rw-r--r--drivers/virtio/virtio_vdpa.c18
-rw-r--r--drivers/visorbus/visorchipset.c6
-rw-r--r--drivers/w1/masters/ds2482.c94
-rw-r--r--drivers/w1/slaves/w1_ds2438.c122
-rw-r--r--drivers/w1/slaves/w1_therm.c5
-rw-r--r--drivers/watchdog/Kconfig77
-rw-r--r--drivers/watchdog/Makefile3
-rw-r--r--drivers/watchdog/aspeed_wdt.c6
-rw-r--r--drivers/watchdog/bcm7038_wdt.c31
-rw-r--r--drivers/watchdog/booke_wdt.c2
-rw-r--r--drivers/watchdog/diag288_wdt.c4
-rw-r--r--drivers/watchdog/dw_wdt.c9
-rw-r--r--drivers/watchdog/eurotechwdt.c2
-rw-r--r--drivers/watchdog/hpwdt.c1
-rw-r--r--drivers/watchdog/iTCO_wdt.c16
-rw-r--r--drivers/watchdog/imx2_wdt.c10
-rw-r--r--drivers/watchdog/imx_sc_wdt.c11
-rw-r--r--drivers/watchdog/it87_wdt.c8
-rw-r--r--drivers/watchdog/jz4740_wdt.c4
-rw-r--r--drivers/watchdog/keembay_wdt.c34
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c2
-rw-r--r--drivers/watchdog/mei_wdt.c8
-rw-r--r--drivers/watchdog/meson_wdt.c8
-rw-r--r--drivers/watchdog/msc313e_wdt.c166
-rw-r--r--drivers/watchdog/mtk_wdt.c77
-rw-r--r--drivers/watchdog/mtx-1_wdt.c2
-rw-r--r--drivers/watchdog/mv64x60_wdt.c324
-rw-r--r--drivers/watchdog/octeon-wdt-main.c12
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c38
-rw-r--r--drivers/watchdog/orion_wdt.c2
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c4
-rw-r--r--drivers/watchdog/sama5d4_wdt.c10
-rw-r--r--drivers/watchdog/sbc60xxwdt.c2
-rw-r--r--drivers/watchdog/sbsa_gwdt.c54
-rw-r--r--drivers/watchdog/sc520_wdt.c2
-rw-r--r--drivers/watchdog/sl28cpld_wdt.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c43
-rw-r--r--drivers/watchdog/w83877f_wdt.c2
-rw-r--r--drivers/watchdog/watchdog_core.h48
-rw-r--r--drivers/watchdog/watchdog_dev.c86
-rw-r--r--drivers/watchdog/watchdog_hrtimer_pretimeout.c44
-rw-r--r--drivers/watchdog/watchdog_pretimeout.c9
-rw-r--r--drivers/watchdog/wdat_wdt.c4
-rw-r--r--drivers/watchdog/wdt.c4
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/watchdog/ziirave_wdt.c21
-rw-r--r--drivers/xen/events/events_base.c22
-rw-r--r--drivers/xen/pcpu.c6
-rw-r--r--drivers/xen/xen-balloon.c28
-rw-r--r--drivers/xen/xen-scsiback.c17
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c15
-rw-r--r--fs/Kconfig15
-rw-r--r--fs/Kconfig.binfmt15
-rw-r--r--fs/Makefile1
-rw-r--r--fs/afs/cmservice.c25
-rw-r--r--fs/afs/dir.c10
-rw-r--r--fs/afs/write.c18
-rw-r--r--fs/binfmt_em86.c110
-rw-r--r--fs/block_dev.c8
-rw-r--r--fs/btrfs/backref.c6
-rw-r--r--fs/btrfs/backref.h3
-rw-r--r--fs/btrfs/block-group.c367
-rw-r--r--fs/btrfs/block-group.h6
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/ctree.c67
-rw-r--r--fs/btrfs/delayed-ref.c4
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/file.c23
-rw-r--r--fs/btrfs/inode.c159
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/ordered-data.h3
-rw-r--r--fs/btrfs/qgroup.c38
-rw-r--r--fs/btrfs/qgroup.h2
-rw-r--r--fs/btrfs/tests/qgroup-tests.c20
-rw-r--r--fs/btrfs/transaction.c15
-rw-r--r--fs/btrfs/transaction.h9
-rw-r--r--fs/btrfs/tree-log.c37
-rw-r--r--fs/btrfs/volumes.c356
-rw-r--r--fs/btrfs/volumes.h5
-rw-r--r--fs/btrfs/zoned.c12
-rw-r--r--fs/ceph/addr.c26
-rw-r--r--fs/ceph/caps.c142
-rw-r--r--fs/ceph/debugfs.c37
-rw-r--r--fs/ceph/dir.c16
-rw-r--r--fs/ceph/export.c1
-rw-r--r--fs/ceph/file.c24
-rw-r--r--fs/ceph/inode.c38
-rw-r--r--fs/ceph/mds_client.c81
-rw-r--r--fs/ceph/mds_client.h6
-rw-r--r--fs/ceph/metric.c167
-rw-r--r--fs/ceph/metric.h89
-rw-r--r--fs/ceph/quota.c9
-rw-r--r--fs/ceph/snap.c79
-rw-r--r--fs/ceph/super.h5
-rw-r--r--fs/cifs/cifs_dfs_ref.c9
-rw-r--r--fs/cifs/cifsfs.c4
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h18
-rw-r--r--fs/cifs/cifspdu.h1
-rw-r--r--fs/cifs/cifssmb.c34
-rw-r--r--fs/cifs/connect.c119
-rw-r--r--fs/cifs/dfs_cache.c229
-rw-r--r--fs/cifs/dfs_cache.h3
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/dns_resolve.c10
-rw-r--r--fs/cifs/dns_resolve.h2
-rw-r--r--fs/cifs/file.c37
-rw-r--r--fs/cifs/fs_context.c38
-rw-r--r--fs/cifs/fs_context.h1
-rw-r--r--fs/cifs/inode.c19
-rw-r--r--fs/cifs/misc.c52
-rw-r--r--fs/cifs/smb2ops.c54
-rw-r--r--fs/cifs/smb2pdu.c87
-rw-r--r--fs/cifs/smb2pdu.h8
-rw-r--r--fs/cifs/transport.c2
-rw-r--r--fs/configfs/file.c192
-rw-r--r--fs/coredump.c4
-rw-r--r--fs/d_path.c324
-rw-r--r--fs/dax.c2
-rw-r--r--fs/debugfs/file.c38
-rw-r--r--fs/exec.c3
-rw-r--r--fs/exfat/dir.c8
-rw-r--r--fs/exfat/super.c2
-rw-r--r--fs/ext2/dir.c12
-rw-r--r--fs/ext2/ext2.h3
-rw-r--r--fs/ext2/namei.c4
-rw-r--r--fs/ext4/ext4_jbd2.c5
-rw-r--r--fs/ext4/ioctl.c16
-rw-r--r--fs/ext4/mballoc.c9
-rw-r--r--fs/ext4/mmp.c33
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/resize.c4
-rw-r--r--fs/ext4/super.c26
-rw-r--r--fs/f2fs/checkpoint.c4
-rw-r--r--fs/f2fs/compress.c255
-rw-r--r--fs/f2fs/data.c360
-rw-r--r--fs/f2fs/debug.c13
-rw-r--r--fs/f2fs/dir.c25
-rw-r--r--fs/f2fs/f2fs.h228
-rw-r--r--fs/f2fs/file.c37
-rw-r--r--fs/f2fs/gc.c16
-rw-r--r--fs/f2fs/inline.c4
-rw-r--r--fs/f2fs/inode.c23
-rw-r--r--fs/f2fs/namei.c36
-rw-r--r--fs/f2fs/node.c35
-rw-r--r--fs/f2fs/node.h33
-rw-r--r--fs/f2fs/recovery.c29
-rw-r--r--fs/f2fs/segment.c34
-rw-r--r--fs/f2fs/super.c188
-rw-r--r--fs/f2fs/sysfs.c232
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/fhandle.c2
-rw-r--r--fs/fs-writeback.c3
-rw-r--r--fs/fs_context.c54
-rw-r--r--fs/fuse/dax.c12
-rw-r--r--fs/fuse/dev.c14
-rw-r--r--fs/fuse/dir.c63
-rw-r--r--fs/fuse/file.c18
-rw-r--r--fs/fuse/fuse_i.h24
-rw-r--r--fs/fuse/inode.c100
-rw-r--r--fs/fuse/readdir.c7
-rw-r--r--fs/fuse/virtio_fs.c4
-rw-r--r--fs/hfs/bfind.c14
-rw-r--r--fs/hfs/bnode.c25
-rw-r--r--fs/hfs/btree.h7
-rw-r--r--fs/hfs/super.c10
-rw-r--r--fs/hfsplus/inode.c5
-rw-r--r--fs/hfsplus/xattr.c1
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/internal.h3
-rw-r--r--fs/io-wq.c96
-rw-r--r--fs/io_uring.c360
-rw-r--r--fs/iomap/buffered-io.c43
-rw-r--r--fs/iomap/seek.c25
-rw-r--r--fs/jbd2/checkpoint.c4
-rw-r--r--fs/jbd2/journal.c149
-rw-r--r--fs/jfs/inode.c3
-rw-r--r--fs/jfs/jfs_dinode.h14
-rw-r--r--fs/jfs/jfs_dmap.c2
-rw-r--r--fs/jfs/jfs_imap.c8
-rw-r--r--fs/jfs/jfs_incore.h12
-rw-r--r--fs/jfs/jfs_logmgr.c1
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/super.c3
-rw-r--r--fs/kernel_read_file.c2
-rw-r--r--fs/kernfs/dir.c86
-rw-r--r--fs/lockd/svc.c43
-rw-r--r--fs/lockd/svcxdr.h151
-rw-r--r--fs/lockd/xdr.c402
-rw-r--r--fs/lockd/xdr4.c403
-rw-r--r--fs/namei.c80
-rw-r--r--fs/namespace.c42
-rw-r--r--fs/nfs/delegation.c94
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/direct.c17
-rw-r--r--fs/nfs/fscache.c18
-rw-r--r--fs/nfs/getroot.c12
-rw-r--r--fs/nfs/inode.c61
-rw-r--r--fs/nfs/nfs3proc.c4
-rw-r--r--fs/nfs/nfs4_fs.h4
-rw-r--r--fs/nfs/nfs4client.c82
-rw-r--r--fs/nfs/nfs4file.c8
-rw-r--r--fs/nfs/nfs4proc.c114
-rw-r--r--fs/nfs/nfs4trace.h6
-rw-r--r--fs/nfs/nfstrace.h8
-rw-r--r--fs/nfs/pagelist.c8
-rw-r--r--fs/nfs/pnfs.c68
-rw-r--r--fs/nfs/pnfs_nfs.c54
-rw-r--r--fs/nfs/read.c20
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nfs_common/grace.c1
-rw-r--r--fs/nfsd/blocklayout.c2
-rw-r--r--fs/nfsd/netns.h6
-rw-r--r--fs/nfsd/nfs3acl.c3
-rw-r--r--fs/nfsd/nfs4callback.c47
-rw-r--r--fs/nfsd/nfs4proc.c154
-rw-r--r--fs/nfsd/nfs4state.c179
-rw-r--r--fs/nfsd/nfsd.h4
-rw-r--r--fs/nfsd/nfsfh.h7
-rw-r--r--fs/nfsd/nfssvc.c3
-rw-r--r--fs/nfsd/trace.h250
-rw-r--r--fs/nfsd/vfs.c26
-rw-r--r--fs/nfsd/xdr4.h1
-rw-r--r--fs/nilfs2/btree.c1
-rw-r--r--fs/notify/fanotify/fanotify_user.c17
-rw-r--r--fs/notify/inotify/inotify_user.c17
-rw-r--r--fs/ntfs/file.c33
-rw-r--r--fs/ocfs2/file.c103
-rw-r--r--fs/open.c17
-rw-r--r--fs/orangefs/inode.c7
-rw-r--r--fs/orangefs/super.c2
-rw-r--r--fs/overlayfs/export.c2
-rw-r--r--fs/overlayfs/file.c47
-rw-r--r--fs/overlayfs/readdir.c5
-rw-r--r--fs/pipe.c32
-rw-r--r--fs/proc/base.c6
-rw-r--r--fs/proc/bootconfig.c2
-rw-r--r--fs/proc/fd.c20
-rw-r--r--fs/proc/kcore.c67
-rw-r--r--fs/proc/proc_sysctl.c2
-rw-r--r--fs/proc/task_mmu.c34
-rw-r--r--fs/reiserfs/stree.c31
-rw-r--r--fs/reiserfs/super.c8
-rw-r--r--fs/seq_file.c46
-rw-r--r--fs/ubifs/debug.c2
-rw-r--r--fs/ubifs/dir.c7
-rw-r--r--fs/ubifs/journal.c3
-rw-r--r--fs/ubifs/master.c2
-rw-r--r--fs/ubifs/replay.c2
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--fs/ubifs/tnc_commit.c2
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/ubifs/xattr.c48
-rw-r--r--fs/userfaultfd.c41
-rw-r--r--fs/vboxsf/dir.c76
-rw-r--r--fs/vboxsf/file.c71
-rw-r--r--fs/vboxsf/vfsmod.h7
-rw-r--r--fs/xfs/libxfs/xfs_ag.c288
-rw-r--r--fs/xfs/libxfs/xfs_ag.h136
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c11
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.h15
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c111
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h2
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c31
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.h9
-rw-r--r--fs/xfs/libxfs/xfs_attr.c910
-rw-r--r--fs/xfs/libxfs/xfs_attr.h403
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c5
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h2
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c167
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.h8
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c3
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h1
-rw-r--r--fs/xfs/libxfs/xfs_btree.c15
-rw-r--r--fs/xfs/libxfs/xfs_btree.h10
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c692
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h43
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c46
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h13
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c30
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h14
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c122
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h9
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c39
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.h7
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c147
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h6
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c46
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h6
-rw-r--r--fs/xfs/libxfs/xfs_sb.c146
-rw-r--r--fs/xfs/libxfs/xfs_sb.h9
-rw-r--r--fs/xfs/libxfs/xfs_shared.h20
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c10
-rw-r--r--fs/xfs/libxfs/xfs_types.c4
-rw-r--r--fs/xfs/libxfs/xfs_types.h1
-rw-r--r--fs/xfs/scrub/agheader.c1
-rw-r--r--fs/xfs/scrub/agheader_repair.c33
-rw-r--r--fs/xfs/scrub/alloc.c3
-rw-r--r--fs/xfs/scrub/bmap.c21
-rw-r--r--fs/xfs/scrub/common.c15
-rw-r--r--fs/xfs/scrub/fscounters.c42
-rw-r--r--fs/xfs/scrub/health.c2
-rw-r--r--fs/xfs/scrub/ialloc.c9
-rw-r--r--fs/xfs/scrub/inode.c18
-rw-r--r--fs/xfs/scrub/refcount.c3
-rw-r--r--fs/xfs/scrub/repair.c14
-rw-r--r--fs/xfs/scrub/rmap.c3
-rw-r--r--fs/xfs/scrub/trace.c3
-rw-r--r--fs/xfs/xfs_attr_inactive.c2
-rw-r--r--fs/xfs/xfs_bio_io.c35
-rw-r--r--fs/xfs/xfs_bmap_util.c6
-rw-r--r--fs/xfs/xfs_buf.c311
-rw-r--r--fs/xfs/xfs_buf.h3
-rw-r--r--fs/xfs/xfs_buf_item.c97
-rw-r--r--fs/xfs/xfs_buf_item_recover.c15
-rw-r--r--fs/xfs/xfs_discard.c6
-rw-r--r--fs/xfs/xfs_dquot_item.c2
-rw-r--r--fs/xfs/xfs_extent_busy.c33
-rw-r--r--fs/xfs/xfs_extent_busy.h7
-rw-r--r--fs/xfs/xfs_file.c70
-rw-r--r--fs/xfs/xfs_filestream.c2
-rw-r--r--fs/xfs/xfs_fsmap.c80
-rw-r--r--fs/xfs/xfs_fsops.c24
-rw-r--r--fs/xfs/xfs_health.c15
-rw-r--r--fs/xfs/xfs_icache.c1156
-rw-r--r--fs/xfs/xfs_icache.h58
-rw-r--r--fs/xfs/xfs_inode.c247
-rw-r--r--fs/xfs/xfs_inode.h9
-rw-r--r--fs/xfs/xfs_inode_item.c18
-rw-r--r--fs/xfs/xfs_inode_item.h2
-rw-r--r--fs/xfs/xfs_inode_item_recover.c39
-rw-r--r--fs/xfs/xfs_ioctl.c70
-rw-r--r--fs/xfs/xfs_iops.c4
-rw-r--r--fs/xfs/xfs_iwalk.c84
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_log.c492
-rw-r--r--fs/xfs/xfs_log.h5
-rw-r--r--fs/xfs/xfs_log_cil.c149
-rw-r--r--fs/xfs/xfs_log_priv.h49
-rw-r--r--fs/xfs/xfs_log_recover.c61
-rw-r--r--fs/xfs/xfs_mount.c136
-rw-r--r--fs/xfs/xfs_mount.h110
-rw-r--r--fs/xfs/xfs_qm.c10
-rw-r--r--fs/xfs/xfs_qm.h1
-rw-r--r--fs/xfs/xfs_qm_syscalls.c54
-rw-r--r--fs/xfs/xfs_reflink.c13
-rw-r--r--fs/xfs/xfs_rtalloc.c49
-rw-r--r--fs/xfs/xfs_super.c10
-rw-r--r--fs/xfs/xfs_super.h1
-rw-r--r--fs/xfs/xfs_symlink.c9
-rw-r--r--fs/xfs/xfs_trace.c2
-rw-r--r--fs/xfs/xfs_trace.h118
-rw-r--r--fs/xfs/xfs_trans.c6
-rw-r--r--fs/xfs/xfs_trans.h4
-rw-r--r--fs/zonefs/super.c3
-rw-r--r--include/acpi/acpi_bus.h11
-rw-r--r--include/asm-generic/bug.h3
-rw-r--r--include/asm-generic/logic_io.h78
-rw-r--r--include/asm-generic/pgtable-nop4d.h2
-rw-r--r--include/asm-generic/pgtable-nopmd.h2
-rw-r--r--include/asm-generic/pgtable-nopud.h2
-rw-r--r--include/asm-generic/uaccess.h4
-rw-r--r--include/asm-generic/unaligned.h141
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/crypto/scatterwalk.h7
-rw-r--r--include/drm/drm_ioctl.h1
-rw-r--r--include/dt-bindings/clock/mt8173-clk.h1
-rw-r--r--include/dt-bindings/clock/r9a07g044-cpg.h236
-rw-r--r--include/dt-bindings/interconnect/qcom,sc7280.h165
-rw-r--r--include/dt-bindings/leds/rt4831-backlight.h23
-rw-r--r--include/dt-bindings/mailbox/qcom-ipcc.h1
-rw-r--r--include/dt-bindings/mfd/qcom-pm8008.h19
-rw-r--r--include/dt-bindings/pinctrl/hisi.h2
-rw-r--r--include/dt-bindings/power/imx8mm-power.h22
-rw-r--r--include/dt-bindings/power/imx8mn-power.h15
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h21
-rw-r--r--include/dt-bindings/power/rk3568-power.h32
-rw-r--r--include/dt-bindings/sound/qcom,q6afe.h2
-rw-r--r--include/kunit/test.h116
-rw-r--r--include/linux/acpi.h5
-rw-r--r--include/linux/acpi_iort.h14
-rw-r--r--include/linux/acpi_viot.h19
-rw-r--r--include/linux/arch_topology.h1
-rw-r--r--include/linux/arm_ffa.h267
-rw-r--r--include/linux/ascii85.h3
-rw-r--r--include/linux/blk-cgroup.h63
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blk_types.h3
-rw-r--r--include/linux/blkdev.h39
-rw-r--r--include/linux/bootconfig.h62
-rw-r--r--include/linux/bootmem_info.h66
-rw-r--r--include/linux/bpf-cgroup.h4
-rw-r--r--include/linux/bpf.h1
-rw-r--r--include/linux/bpf_types.h1
-rw-r--r--include/linux/bpf_verifier.h3
-rw-r--r--include/linux/buildid.h8
-rw-r--r--include/linux/cgroup.h6
-rw-r--r--include/linux/clk-provider.h6
-rw-r--r--include/linux/compaction.h4
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/compiler-clang.h17
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/compiler_types.h2
-rw-r--r--include/linux/cpufreq.h10
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/crash_core.h12
-rw-r--r--include/linux/debugfs.h26
-rw-r--r--include/linux/debugobjects.h2
-rw-r--r--include/linux/device.h50
-rw-r--r--include/linux/dma-iommu.h4
-rw-r--r--include/linux/dma-resv.h2
-rw-r--r--include/linux/dmaengine.h11
-rw-r--r--include/linux/eeprom_93xx46.h3
-rw-r--r--include/linux/ethtool.h10
-rw-r--r--include/linux/export.h5
-rw-r--r--include/linux/f2fs_fs.h2
-rw-r--r--include/linux/filter.h15
-rw-r--r--include/linux/firmware.h4
-rw-r--r--include/linux/fpga/altera-pr-ip-core.h1
-rw-r--r--include/linux/fpga/fpga-bridge.h2
-rw-r--r--include/linux/fpga/fpga-mgr.h2
-rw-r--r--include/linux/fs.h11
-rw-r--r--include/linux/fs_context.h3
-rw-r--r--include/linux/ftrace_irq.h13
-rw-r--r--include/linux/gpio/regmap.h6
-rw-r--r--include/linux/highmem.h6
-rw-r--r--include/linux/hmm.h2
-rw-r--r--include/linux/huge_mm.h70
-rw-r--r--include/linux/hugetlb.h48
-rw-r--r--include/linux/hugetlb_cgroup.h19
-rw-r--r--include/linux/i2c.h11
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h4
-rw-r--r--include/linux/iio/common/cros_ec_sensors_core.h2
-rw-r--r--include/linux/iio/common/st_sensors.h36
-rw-r--r--include/linux/iio/iio-opaque.h22
-rw-r--r--include/linux/iio/iio.h32
-rw-r--r--include/linux/iio/imu/adis.h2
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/input/cy8ctmg110_pdata.h1
-rw-r--r--include/linux/input/cyttsp.h29
-rw-r--r--include/linux/intel-iommu.h44
-rw-r--r--include/linux/intel-ish-client-if.h2
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/jbd2.h6
-rw-r--r--include/linux/kallsyms.h21
-rw-r--r--include/linux/kasan.h1
-rw-r--r--include/linux/kconfig.h6
-rw-r--r--include/linux/kcore.h3
-rw-r--r--include/linux/kernel.h227
-rw-r--r--include/linux/kfence.h7
-rw-r--r--include/linux/kgdb.h8
-rw-r--r--include/linux/kprobes.h4
-rw-r--r--include/linux/kstrtox.h155
-rw-r--r--include/linux/libnvdimm.h1
-rw-r--r--include/linux/list_lru.h4
-rw-r--r--include/linux/litex.h103
-rw-r--r--include/linux/lockd/xdr.h6
-rw-r--r--include/linux/lockd/xdr4.h7
-rw-r--r--include/linux/logic_iomem.h62
-rw-r--r--include/linux/lru_cache.h8
-rw-r--r--include/linux/marvell_phy.h6
-rw-r--r--include/linux/max17040_battery.h16
-rw-r--r--include/linux/mcb.h2
-rw-r--r--include/linux/mdev.h2
-rw-r--r--include/linux/memblock.h8
-rw-r--r--include/linux/memcontrol.h29
-rw-r--r--include/linux/memory_hotplug.h27
-rw-r--r--include/linux/mempolicy.h9
-rw-r--r--include/linux/memremap.h2
-rw-r--r--include/linux/mfd/hi655x-pmic.h2
-rw-r--r--include/linux/mfd/lp87565.h40
-rw-r--r--include/linux/mfd/mt6358/registers.h2
-rw-r--r--include/linux/mfd/mt6360.h240
-rw-r--r--include/linux/mfd/rk808.h81
-rw-r--r--include/linux/mfd/rt5033-private.h4
-rw-r--r--include/linux/mfd/samsung/core.h33
-rw-r--r--include/linux/mfd/wcd934x/registers.h57
-rw-r--r--include/linux/mhi.h7
-rw-r--r--include/linux/migrate.h24
-rw-r--r--include/linux/mlx5/driver.h3
-rw-r--r--include/linux/mlx5/mlx5_ifc.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h10
-rw-r--r--include/linux/mm.h18
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmu_notifier.h34
-rw-r--r--include/linux/mmzone.h27
-rw-r--r--include/linux/module.h9
-rw-r--r--include/linux/mpi.h4
-rw-r--r--include/linux/msi.h2
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/onfi.h41
-rw-r--r--include/linux/mtd/rawnand.h161
-rw-r--r--include/linux/mtd/spi-nor.h2
-rw-r--r--include/linux/mv643xx.h8
-rw-r--r--include/linux/n_r3964.h175
-rw-r--r--include/linux/namei.h3
-rw-r--r--include/linux/netfilter/ipset/ip_set.h3
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_ssc.h14
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/nodemask.h6
-rw-r--r--include/linux/nvmem-provider.h3
-rw-r--r--include/linux/of.h11
-rw-r--r--include/linux/of_address.h54
-rw-r--r--include/linux/of_iommu.h17
-rw-r--r--include/linux/of_reserved_mem.h14
-rw-r--r--include/linux/once.h4
-rw-r--r--include/linux/page-flags.h22
-rw-r--r--include/linux/panic.h98
-rw-r--r--include/linux/panic_notifier.h12
-rw-r--r--include/linux/pci-ecam.h1
-rw-r--r--include/linux/pci-ep-cfs.h2
-rw-r--r--include/linux/pci-epc.h5
-rw-r--r--include/linux/pci-epf.h5
-rw-r--r--include/linux/pci.h9
-rw-r--r--include/linux/pci_hotplug.h2
-rw-r--r--include/linux/percpu-defs.h2
-rw-r--r--include/linux/percpu-refcount.h2
-rw-r--r--include/linux/pgtable.h22
-rw-r--r--include/linux/phy/phy.h2
-rw-r--r--include/linux/phy/tegra/xusb.h10
-rw-r--r--include/linux/pipe_fs_i.h2
-rw-r--r--include/linux/pkeys.h4
-rw-r--r--include/linux/pl353-smc.h30
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h60
-rw-r--r--include/linux/platform_data/pata_ixp4xx_cf.h21
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h3
-rw-r--r--include/linux/platform_device.h3
-rw-r--r--include/linux/pm2301_charger.h48
-rw-r--r--include/linux/power/ab8500.h16
-rw-r--r--include/linux/ptp_clock_kernel.h31
-rw-r--r--include/linux/pwm.h15
-rw-r--r--include/linux/rcupdate.h72
-rw-r--r--include/linux/rcutiny.h1
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--include/linux/remoteproc.h50
-rw-r--r--include/linux/reset-controller.h22
-rw-r--r--include/linux/rmap.h17
-rw-r--r--include/linux/scatterlist.h2
-rw-r--r--include/linux/sched/signal.h19
-rw-r--r--include/linux/scmi_protocol.h14
-rw-r--r--include/linux/scpi_protocol.h8
-rw-r--r--include/linux/secretmem.h54
-rw-r--r--include/linux/security.h3
-rw-r--r--include/linux/seq_file.h10
-rw-r--r--include/linux/serial_8250.h2
-rw-r--r--include/linux/serial_core.h29
-rw-r--r--include/linux/set_memory.h12
-rw-r--r--include/linux/shmem_fs.h19
-rw-r--r--include/linux/shrinker.h2
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/skmsg.h54
-rw-r--r--include/linux/soc/ixp4xx/cpu.h106
-rw-r--r--include/linux/soc/qcom/smem_state.h8
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h6
-rw-r--r--include/linux/soundwire/sdw.h8
-rw-r--r--include/linux/soundwire/sdw_intel.h6
-rw-r--r--include/linux/srcu.h6
-rw-r--r--include/linux/srcutree.h2
-rw-r--r--include/linux/stm.h2
-rw-r--r--include/linux/stmmac.h2
-rw-r--r--include/linux/string.h7
-rw-r--r--include/linux/string_helpers.h31
-rw-r--r--include/linux/sunrpc/cache.h1
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/sunrpc/xprt.h10
-rw-r--r--include/linux/sunrpc/xprtmultipath.h6
-rw-r--r--include/linux/sunrpc/xprtsock.h1
-rw-r--r--include/linux/swap.h19
-rw-r--r--include/linux/swapops.h125
-rw-r--r--include/linux/syscalls.h1
-rw-r--r--include/linux/sysfs.h6
-rw-r--r--include/linux/tee_drv.h2
-rw-r--r--include/linux/thread_info.h1
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/trace.h7
-rw-r--r--include/linux/tracepoint.h10
-rw-r--r--include/linux/tty.h78
-rw-r--r--include/linux/tty_driver.h12
-rw-r--r--include/linux/tty_flip.h4
-rw-r--r--include/linux/tty_ldisc.h6
-rw-r--r--include/linux/uio.h66
-rw-r--r--include/linux/unaligned/access_ok.h68
-rw-r--r--include/linux/unaligned/be_byteshift.h71
-rw-r--r--include/linux/unaligned/be_memmove.h37
-rw-r--r--include/linux/unaligned/be_struct.h37
-rw-r--r--include/linux/unaligned/generic.h115
-rw-r--r--include/linux/unaligned/le_byteshift.h71
-rw-r--r--include/linux/unaligned/le_memmove.h37
-rw-r--r--include/linux/unaligned/le_struct.h37
-rw-r--r--include/linux/unaligned/memmove.h46
-rw-r--r--include/linux/usb.h9
-rw-r--r--include/linux/usb/composite.h2
-rw-r--r--include/linux/usb/gadget.h3
-rw-r--r--include/linux/usb/hcd.h17
-rw-r--r--include/linux/usb/isp1760.h19
-rw-r--r--include/linux/usb/otg-fsm.h7
-rw-r--r--include/linux/usb/otg.h2
-rw-r--r--include/linux/usb/quirks.h2
-rw-r--r--include/linux/usb/role.h6
-rw-r--r--include/linux/usb/serial.h10
-rw-r--r--include/linux/usb/tcpm.h4
-rw-r--r--include/linux/usb/typec_dp.h2
-rw-r--r--include/linux/userfaultfd_k.h5
-rw-r--r--include/linux/vdpa.h36
-rw-r--r--include/linux/virtio.h1
-rw-r--r--include/linux/virtio_pci_modern.h1
-rw-r--r--include/linux/vmalloc.h19
-rw-r--r--include/linux/vringh.h1
-rw-r--r--include/linux/vt_kern.h1
-rw-r--r--include/linux/zbud.h23
-rw-r--r--include/math-emu/op-common.h2
-rw-r--r--include/memory/renesas-rpc-if.h6
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/bonding.h9
-rw-r--r--include/net/busy_poll.h2
-rw-r--r--include/net/caif/caif_hsi.h200
-rw-r--r--include/net/checksum.h14
-rw-r--r--include/net/dst_metadata.h4
-rw-r--r--include/net/flow_offload.h14
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/llc_pdu.h31
-rw-r--r--include/net/mptcp.h5
-rw-r--r--include/net/netfilter/nf_conntrack_core.h1
-rw-r--r--include/net/netns/conntrack.h3
-rw-r--r--include/net/netns/xfrm.h1
-rw-r--r--include/net/pkt_cls.h4
-rw-r--r--include/net/psample.h2
-rw-r--r--include/net/sctp/constants.h4
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/sock.h8
-rw-r--r--include/net/tcp.h5
-rw-r--r--include/scsi/fc/fc_ms.h59
-rw-r--r--include/scsi/iscsi_proto.h2
-rw-r--r--include/scsi/libfc.h6
-rw-r--r--include/scsi/libiscsi.h20
-rw-r--r--include/scsi/libsas.h12
-rw-r--r--include/scsi/scsi.h159
-rw-r--r--include/scsi/scsi_bsg_iscsi.h2
-rw-r--r--include/scsi/scsi_cmnd.h38
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/scsi/scsi_proto.h58
-rw-r--r--include/scsi/scsi_status.h74
-rw-r--r--include/scsi/scsi_transport_fc.h25
-rw-r--r--include/scsi/scsi_transport_iscsi.h14
-rw-r--r--include/scsi/sg.h35
-rw-r--r--include/soc/imx/cpu.h1
-rw-r--r--include/soc/tegra/common.h31
-rw-r--r--include/soc/tegra/fuse.h20
-rw-r--r--include/soc/tegra/mc.h70
-rw-r--r--include/soc/tegra/pmc.h7
-rw-r--r--include/sound/core.h50
-rw-r--r--include/sound/hdmi-codec.h12
-rw-r--r--include/sound/memalloc.h80
-rw-r--r--include/sound/pcm.h20
-rw-r--r--include/sound/pcm_iec958.h8
-rw-r--r--include/sound/rawmidi.h2
-rw-r--r--include/sound/soc-dai.h55
-rw-r--r--include/sound/soc-topology.h2
-rw-r--r--include/sound/soc.h27
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/afs.h67
-rw-r--r--include/trace/events/btrfs.h2
-rw-r--r--include/trace/events/dma_fence.h4
-rw-r--r--include/trace/events/intel_iommu.h37
-rw-r--r--include/trace/events/mmflags.h4
-rw-r--r--include/trace/events/net.h2
-rw-r--r--include/trace/events/osnoise.h142
-rw-r--r--include/trace/events/qdisc.h28
-rw-r--r--include/trace/events/rcu.h1
-rw-r--r--include/trace/events/rpcgss.h4
-rw-r--r--include/trace/events/sched.h2
-rw-r--r--include/trace/events/scsi.h48
-rw-r--r--include/trace/events/sunrpc.h40
-rw-r--r--include/trace/events/ufs.h20
-rw-r--r--include/trace/events/vmscan.h41
-rw-r--r--include/trace/events/writeback.h3
-rw-r--r--include/trace/trace_events.h25
-rw-r--r--include/uapi/asm-generic/mman-common.h3
-rw-r--r--include/uapi/asm-generic/unistd.h7
-rw-r--r--include/uapi/linux/auxvec.h3
-rw-r--r--include/uapi/linux/cxl_mem.h12
-rw-r--r--include/uapi/linux/ethtool_netlink.h15
-rw-r--r--include/uapi/linux/fuse.h10
-rw-r--r--include/uapi/linux/idxd.h2
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/mempolicy.h1
-rw-r--r--include/uapi/linux/n_r3964.h99
-rw-r--r--include/uapi/linux/nbd-netlink.h1
-rw-r--r--include/uapi/linux/neighbour.h7
-rw-r--r--include/uapi/linux/net_tstamp.h17
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_hook.h9
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_log.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h4
-rw-r--r--include/uapi/linux/pcitest.h2
-rw-r--r--include/uapi/linux/raw.h17
-rw-r--r--include/uapi/linux/userfaultfd.h7
-rw-r--r--include/uapi/linux/virtio_ids.h12
-rw-r--r--include/uapi/linux/virtio_pcidev.h64
-rw-r--r--include/uapi/misc/habanalabs.h13
-rw-r--r--include/uapi/rdma/irdma-abi.h2
-rw-r--r--include/uapi/sound/asound.h30
-rw-r--r--include/xen/interface/io/ring.h278
-rw-r--r--init/Makefile12
-rw-r--r--init/main.c67
-rw-r--r--ipc/msg.c6
-rw-r--r--ipc/sem.c25
-rw-r--r--ipc/shm.c6
-rw-r--r--ipc/util.c44
-rw-r--r--ipc/util.h3
-rw-r--r--kernel/bpf/core.c34
-rw-r--r--kernel/bpf/devmap.c6
-rw-r--r--kernel/bpf/disasm.c16
-rw-r--r--kernel/bpf/hashtab.c4
-rw-r--r--kernel/bpf/helpers.c30
-rw-r--r--kernel/bpf/verifier.c211
-rw-r--r--kernel/cfi.c8
-rw-r--r--kernel/cgroup/cgroup-v1.c16
-rw-r--r--kernel/cgroup/cgroup.c25
-rw-r--r--kernel/cgroup/rstat.c19
-rw-r--r--kernel/crash_core.c50
-rw-r--r--kernel/debug/debug_core.c3
-rw-r--r--kernel/debug/gdbstub.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c8
-rw-r--r--kernel/debug/kdb/kdb_private.h2
-rw-r--r--kernel/dma/coherent.c2
-rw-r--r--kernel/dma/debug.c6
-rw-r--r--kernel/dma/ops_helpers.c12
-rw-r--r--kernel/events/core.c35
-rwxr-xr-xkernel/gen_kheaders.sh4
-rw-r--r--kernel/hung_task.c1
-rw-r--r--kernel/irq/chip.c5
-rw-r--r--kernel/irq/irqdesc.c1
-rw-r--r--kernel/irq/msi.c13
-rw-r--r--kernel/irq/timings.c5
-rw-r--r--kernel/jump_label.c13
-rw-r--r--kernel/kallsyms.c104
-rw-r--r--kernel/kcsan/core.c53
-rw-r--r--kernel/kcsan/kcsan.h39
-rw-r--r--kernel/kcsan/report.c169
-rw-r--r--kernel/kexec_core.c1
-rw-r--r--kernel/kprobes.c18
-rw-r--r--kernel/locking/lockdep.c6
-rw-r--r--kernel/locking/lockdep_proc.c26
-rw-r--r--kernel/locking/rtmutex.c2
-rw-r--r--kernel/module.c48
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/power/hibernate.c5
-rw-r--r--kernel/rcu/Kconfig.debug2
-rw-r--r--kernel/rcu/rcu.h14
-rw-r--r--kernel/rcu/rcutorture.c315
-rw-r--r--kernel/rcu/refscale.c115
-rw-r--r--kernel/rcu/srcutree.c28
-rw-r--r--kernel/rcu/sync.c4
-rw-r--r--kernel/rcu/tasks.h64
-rw-r--r--kernel/rcu/tiny.c1
-rw-r--r--kernel/rcu/tree.c315
-rw-r--r--kernel/rcu/tree.h14
-rw-r--r--kernel/rcu/tree_plugin.h239
-rw-r--r--kernel/rcu/tree_stall.h84
-rw-r--r--kernel/rcu/update.c8
-rw-r--r--kernel/scftorture.c6
-rw-r--r--kernel/sched/core.c91
-rw-r--r--kernel/sched/fair.c7
-rw-r--r--kernel/sched/sched.h21
-rw-r--r--kernel/seccomp.c2
-rw-r--r--kernel/signal.c34
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/static_call.c13
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/sysctl-test.c24
-rw-r--r--kernel/sysctl.c4
-rw-r--r--kernel/time/posix-cpu-timers.c10
-rw-r--r--kernel/time/timer.c28
-rw-r--r--kernel/trace/Kconfig67
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/bpf_trace.c16
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/ring_buffer.c30
-rw-r--r--kernel/trace/trace.c292
-rw-r--r--kernel/trace/trace.h67
-rw-r--r--kernel/trace/trace_boot.c27
-rw-r--r--kernel/trace/trace_entries.h41
-rw-r--r--kernel/trace/trace_events_hist.c64
-rw-r--r--kernel/trace/trace_events_synth.c8
-rw-r--r--kernel/trace/trace_events_trigger.c3
-rw-r--r--kernel/trace/trace_hwlat.c536
-rw-r--r--kernel/trace/trace_osnoise.c2113
-rw-r--r--kernel/trace/trace_output.c119
-rw-r--r--kernel/trace/trace_sched_wakeup.c24
-rw-r--r--kernel/trace/trace_synth.h2
-rw-r--r--kernel/tracepoint.c188
-rw-r--r--kernel/ucount.c29
-rw-r--r--kernel/usermode_driver.c2
-rw-r--r--kernel/workqueue.c20
-rw-r--r--lib/Kconfig17
-rw-r--r--lib/Kconfig.debug44
-rw-r--r--lib/Makefile2
-rw-r--r--lib/asn1_encoder.c2
-rw-r--r--lib/bitmap.c14
-rw-r--r--lib/bootconfig.c76
-rw-r--r--lib/buildid.c74
-rw-r--r--lib/cmdline_kunit.c2
-rw-r--r--lib/decompress_bunzip2.c6
-rw-r--r--lib/decompress_unlz4.c8
-rw-r--r--lib/decompress_unlzo.c3
-rw-r--r--lib/decompress_unxz.c2
-rw-r--r--lib/decompress_unzstd.c4
-rw-r--r--lib/devmem_is_allowed.c2
-rw-r--r--lib/devres.c6
-rw-r--r--lib/dump_stack.c13
-rw-r--r--lib/dynamic_debug.c8
-rw-r--r--lib/fonts/font_pearl_8x8.c2
-rw-r--r--lib/iov_iter.c1230
-rw-r--r--lib/kfifo.c2
-rw-r--r--lib/kstrtox.c5
-rw-r--r--lib/kunit/debugfs.c2
-rw-r--r--lib/kunit/executor.c53
-rw-r--r--lib/kunit/executor_test.c133
-rw-r--r--lib/kunit/kunit-example-test.c31
-rw-r--r--lib/kunit/kunit-test.c42
-rw-r--r--lib/kunit/string-stream.h6
-rw-r--r--lib/kunit/test.c77
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/logic_iomem.c318
-rw-r--r--lib/lz4/lz4_decompress.c2
-rw-r--r--lib/math/Makefile1
-rw-r--r--lib/math/rational-test.c56
-rw-r--r--lib/math/rational.c16
-rw-r--r--lib/mpi/longlong.h4
-rw-r--r--lib/mpi/mpicoder.c6
-rw-r--r--lib/mpi/mpiutil.c2
-rw-r--r--lib/nlattr.c4
-rw-r--r--lib/oid_registry.c2
-rw-r--r--lib/once.c11
-rw-r--r--lib/parser.c1
-rw-r--r--lib/pldmfw/pldmfw.c2
-rw-r--r--lib/reed_solomon/test_rslib.c2
-rw-r--r--lib/refcount.c2
-rw-r--r--lib/rhashtable.c2
-rw-r--r--lib/sbitmap.c2
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/seq_buf.c10
-rw-r--r--lib/sort.c2
-rw-r--r--lib/stackdepot.c2
-rw-r--r--lib/string.c2
-rw-r--r--lib/string_helpers.c102
-rw-r--r--lib/test-string_helpers.c157
-rw-r--r--lib/test_bitmap.c7
-rw-r--r--lib/test_bitops.c2
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_firmware.c10
-rw-r--r--lib/test_hmm.c125
-rw-r--r--lib/test_hmm_uapi.h2
-rw-r--r--lib/test_kasan.c14
-rw-r--r--lib/test_kmod.c6
-rw-r--r--lib/test_list_sort.c129
-rw-r--r--lib/test_scanf.c2
-rw-r--r--lib/test_string.c5
-rw-r--r--lib/vsprintf.c11
-rw-r--r--lib/xz/xz_dec_bcj.c2
-rw-r--r--lib/xz/xz_dec_lzma2.c8
-rw-r--r--lib/zlib_inflate/inffast.c2
-rw-r--r--lib/zstd/huf.h2
-rw-r--r--mm/Kconfig20
-rw-r--r--mm/Makefile3
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/bootmem_info.c127
-rw-r--r--mm/compaction.c20
-rw-r--r--mm/debug_vm_pgtable.c109
-rw-r--r--mm/filemap.c36
-rw-r--r--mm/gup.c73
-rw-r--r--mm/hmm.c12
-rw-r--r--mm/huge_memory.c265
-rw-r--r--mm/hugetlb.c385
-rw-r--r--mm/hugetlb_vmemmap.c298
-rw-r--r--mm/hugetlb_vmemmap.h45
-rw-r--r--mm/init-mm.c9
-rw-r--r--mm/internal.h32
-rw-r--r--mm/kasan/kasan.h12
-rw-r--r--mm/kfence/core.c23
-rw-r--r--mm/kfence/kfence_test.c2
-rw-r--r--mm/khugepaged.c20
-rw-r--r--mm/kmemleak.c6
-rw-r--r--mm/madvise.c68
-rw-r--r--mm/mapping_dirty_helpers.c2
-rw-r--r--mm/memblock.c57
-rw-r--r--mm/memcontrol.c13
-rw-r--r--mm/memory-failure.c50
-rw-r--r--mm/memory.c246
-rw-r--r--mm/memory_hotplug.c159
-rw-r--r--mm/mempolicy.c303
-rw-r--r--mm/migrate.c314
-rw-r--r--mm/mlock.c15
-rw-r--r--mm/mmap.c5
-rw-r--r--mm/mmap_lock.c59
-rw-r--r--mm/mprotect.c18
-rw-r--r--mm/mremap.c108
-rw-r--r--mm/nommu.c5
-rw-r--r--mm/oom_kill.c4
-rw-r--r--mm/page_alloc.c94
-rw-r--r--mm/page_vma_mapped.c15
-rw-r--r--mm/percpu-km.c6
-rw-r--r--mm/percpu-vm.c5
-rw-r--r--mm/percpu.c32
-rw-r--r--mm/rmap.c662
-rw-r--r--mm/secretmem.c255
-rw-r--r--mm/shmem.c137
-rw-r--r--mm/slab.h18
-rw-r--r--mm/slab_common.c12
-rw-r--r--mm/slub.c61
-rw-r--r--mm/sparse-vmemmap.c354
-rw-r--r--mm/sparse.c1
-rw-r--r--mm/swap.c2
-rw-r--r--mm/swap_state.c7
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/userfaultfd.c225
-rw-r--r--mm/util.c52
-rw-r--r--mm/vmalloc.c37
-rw-r--r--mm/vmscan.c50
-rw-r--r--mm/workingset.c10
-rw-r--r--mm/z3fold.c39
-rw-r--r--mm/zbud.c235
-rw-r--r--mm/zsmalloc.c3
-rw-r--r--mm/zswap.c26
-rw-r--r--net/802/garp.c14
-rw-r--r--net/802/mrp.c14
-rw-r--r--net/bluetooth/hci_core.c16
-rw-r--r--net/bluetooth/hci_sock.c49
-rw-r--r--net/bluetooth/hci_sysfs.c3
-rw-r--r--net/bluetooth/rfcomm/tty.c4
-rw-r--r--net/bpf/test_run.c7
-rw-r--r--net/bridge/br_fdb.c25
-rw-r--r--net/bridge/br_if.c19
-rw-r--r--net/bridge/br_multicast.c6
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c6
-rw-r--r--net/caif/caif_socket.c3
-rw-r--r--net/can/j1939/transport.c11
-rw-r--r--net/can/raw.c20
-rw-r--r--net/ceph/auth.c7
-rw-r--r--net/ceph/auth_none.c4
-rw-r--r--net/ceph/auth_none.h1
-rw-r--r--net/ceph/cls_lock_client.c12
-rw-r--r--net/core/dev.c50
-rw-r--r--net/core/devlink.c10
-rw-r--r--net/core/flow_dissector.c18
-rw-r--r--net/core/link_watch.c5
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/page_pool.c10
-rw-r--r--net/core/skbuff.c19
-rw-r--r--net/core/skmsg.c55
-rw-r--r--net/core/sock.c71
-rw-r--r--net/dccp/dccp.h6
-rw-r--r--net/decnet/af_decnet.c27
-rw-r--r--net/dsa/slave.c16
-rw-r--r--net/dsa/switch.c8
-rw-r--r--net/dsa/tag_ksz.c9
-rw-r--r--net/ethtool/Makefile2
-rw-r--r--net/ethtool/common.c14
-rw-r--r--net/ethtool/netlink.c10
-rw-r--r--net/ethtool/netlink.h2
-rw-r--r--net/ethtool/phc_vclocks.c94
-rw-r--r--net/ieee802154/socket.c7
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/igmp.c21
-rw-r--r--net/ipv4/inet_diag.c5
-rw-r--r--net/ipv4/ip_tunnel.c20
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/raw_diag.c7
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_bbr.c2
-rw-r--r--net/ipv4/tcp_bpf.c2
-rw-r--r--net/ipv4/tcp_fastopen.c28
-rw-r--r--net/ipv4/tcp_input.c21
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/tcp_offload.c3
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp.c31
-rw-r--r--net/ipv4/udp_bpf.c2
-rw-r--r--net/ipv4/udp_diag.c6
-rw-r--r--net/ipv4/udp_offload.c10
-rw-r--r--net/ipv6/ip6_output.c37
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/tcp_ipv6.c21
-rw-r--r--net/ipv6/udp.c27
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/iucv/iucv.c22
-rw-r--r--net/llc/af_llc.c10
-rw-r--r--net/llc/llc_s_ac.c2
-rw-r--r--net/mac80211/cfg.c19
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/tx.c57
-rw-r--r--net/mptcp/mib.c1
-rw-r--r--net/mptcp/mib.h1
-rw-r--r--net/mptcp/mptcp_diag.c6
-rw-r--r--net/mptcp/options.c29
-rw-r--r--net/mptcp/pm_netlink.c45
-rw-r--r--net/mptcp/protocol.c12
-rw-r--r--net/mptcp/protocol.h10
-rw-r--r--net/mptcp/sockopt.c68
-rw-r--r--net/mptcp/subflow.c11
-rw-r--r--net/mptcp/syncookies.c16
-rw-r--r--net/ncsi/Kconfig6
-rw-r--r--net/ncsi/internal.h5
-rw-r--r--net/ncsi/ncsi-manage.c51
-rw-r--r--net/ncsi/ncsi-rsp.c11
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c9
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmark.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c3
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c3
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c3
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c11
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c16
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c11
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c16
-rw-r--r--net/netfilter/nf_conntrack_core.c89
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_conntrack_proto.c7
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c70
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c1
-rw-r--r--net/netfilter/nf_conntrack_standalone.c26
-rw-r--r--net/netfilter/nf_flow_table_core.c17
-rw-r--r--net/netfilter/nf_tables_api.c15
-rw-r--r--net/netfilter/nfnetlink_hook.c26
-rw-r--r--net/netfilter/nft_last.c32
-rw-r--r--net/netfilter/nft_nat.c4
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/netrom/nr_timer.c20
-rw-r--r--net/nfc/nci/uart.c7
-rw-r--r--net/openvswitch/flow.c13
-rw-r--r--net/openvswitch/flow_table.c6
-rw-r--r--net/openvswitch/vport.c1
-rw-r--r--net/qrtr/mhi.c16
-rw-r--r--net/qrtr/qrtr.c6
-rw-r--r--net/rds/ib_frmr.c4
-rw-r--r--net/sched/act_ct.c14
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/act_skbmod.c12
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_tcindex.c5
-rw-r--r--net/sched/sch_cake.c2
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_taprio.c4
-rw-r--r--net/sctp/auth.c12
-rw-r--r--net/sctp/diag.c6
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/output.c4
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/sm_statefuns.c15
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/transport.c36
-rw-r--r--net/smc/af_smc.c2
-rw-r--r--net/smc/smc_core.c4
-rw-r--r--net/smc/smc_core.h4
-rw-r--r--net/smc/smc_llc.c10
-rw-r--r--net/smc/smc_tx.c18
-rw-r--r--net/smc/smc_wr.c10
-rw-r--r--net/socket.c19
-rw-r--r--net/sunrpc/Makefile2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/clnt.c30
-rw-r--r--net/sunrpc/sched.c12
-rw-r--r--net/sunrpc/sunrpc_syms.c10
-rw-r--r--net/sunrpc/sysfs.c588
-rw-r--r--net/sunrpc/sysfs.h42
-rw-r--r--net/sunrpc/xdr.c7
-rw-r--r--net/sunrpc/xprt.c30
-rw-r--r--net/sunrpc/xprtmultipath.c38
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c6
-rw-r--r--net/sunrpc/xprtrdma/transport.c2
-rw-r--r--net/sunrpc/xprtsock.c19
-rw-r--r--net/tipc/crypto.c14
-rw-r--r--net/tipc/link.c6
-rw-r--r--net/tipc/socket.c30
-rw-r--r--net/unix/af_unix.c51
-rw-r--r--net/unix/diag.c6
-rw-r--r--net/vmw_vsock/virtio_transport.c7
-rw-r--r--net/vmw_vsock/virtio_transport_common.c3
-rw-r--r--net/wireless/nl80211.c5
-rw-r--r--net/wireless/scan.c6
-rw-r--r--net/xfrm/xfrm_compat.c49
-rw-r--r--net/xfrm/xfrm_ipcomp.c2
-rw-r--r--net/xfrm/xfrm_policy.c32
-rw-r--r--net/xfrm/xfrm_user.c10
-rw-r--r--samples/Kconfig6
-rw-r--r--samples/bpf/Makefile1
-rw-r--r--samples/bpf/xdpsock_user.c28
-rw-r--r--samples/vfio-mdev/mbochs.c163
-rw-r--r--samples/vfio-mdev/mdpy.c160
-rw-r--r--samples/vfio-mdev/mtty.c219
-rw-r--r--scripts/Kbuild.include7
-rw-r--r--scripts/Makefile.build4
-rw-r--r--scripts/Makefile.modfinal2
-rwxr-xr-xscripts/checkpatch.pl16
-rwxr-xr-xscripts/checksyscalls.sh4
-rwxr-xr-xscripts/checkversion.pl18
-rwxr-xr-xscripts/coccicheck2
-rw-r--r--scripts/coccinelle/api/kobj_to_dev.cocci45
-rw-r--r--scripts/coccinelle/free/kfree.cocci12
-rw-r--r--scripts/coccinelle/misc/flexible_array.cocci23
-rw-r--r--scripts/coccinelle/misc/irqf_oneshot.cocci4
-rw-r--r--scripts/coccinelle/misc/minmax.cocci222
-rw-r--r--scripts/coccinelle/misc/swap.cocci122
-rw-r--r--scripts/coccinelle/misc/uninitialized_var.cocci15
-rwxr-xr-xscripts/decode_stacktrace.sh89
-rw-r--r--scripts/kconfig/conf.c2
-rwxr-xr-xscripts/link-vmlinux.sh4
-rwxr-xr-xscripts/min-tool-version.sh7
-rwxr-xr-xscripts/mkcompile_h18
-rwxr-xr-xscripts/mkmakefile17
-rw-r--r--scripts/mod/modpost.c7
-rwxr-xr-xscripts/recordmcount.pl40
-rwxr-xr-xscripts/setlocalversion86
-rwxr-xr-xscripts/spdxcheck.py2
-rwxr-xr-xscripts/syscallhdr.sh2
-rw-r--r--scripts/syscallnr.sh74
-rwxr-xr-xscripts/syscalltbl.sh7
-rwxr-xr-xscripts/tracing/draw_functrace.py6
-rw-r--r--security/apparmor/policy_unpack.c2
-rw-r--r--security/security.c3
-rw-r--r--security/selinux/ss/policydb.c10
-rw-r--r--sound/ac97/bus.c2
-rw-r--r--sound/aoa/soundbus/i2sbus/pcm.c4
-rw-r--r--sound/arm/pxa2xx-ac97.c4
-rw-r--r--sound/core/control.c38
-rw-r--r--sound/core/control_compat.c14
-rw-r--r--sound/core/control_led.c36
-rw-r--r--sound/core/hwdep.c6
-rw-r--r--sound/core/info_oss.c3
-rw-r--r--sound/core/init.c77
-rw-r--r--sound/core/memalloc.c444
-rw-r--r--sound/core/memalloc_local.h19
-rw-r--r--sound/core/oss/mixer_oss.c45
-rw-r--r--sound/core/oss/pcm_oss.c70
-rw-r--r--sound/core/oss/pcm_plugin.c26
-rw-r--r--sound/core/pcm.c4
-rw-r--r--sound/core/pcm_compat.c6
-rw-r--r--sound/core/pcm_iec958.c174
-rw-r--r--sound/core/pcm_lib.c64
-rw-r--r--sound/core/pcm_local.h5
-rw-r--r--sound/core/pcm_memory.c21
-rw-r--r--sound/core/pcm_misc.c12
-rw-r--r--sound/core/pcm_native.c81
-rw-r--r--sound/core/rawmidi.c93
-rw-r--r--sound/core/rawmidi_compat.c4
-rw-r--r--sound/core/seq/oss/seq_oss.c26
-rw-r--r--sound/core/seq/oss/seq_oss_init.c10
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c33
-rw-r--r--sound/core/seq/oss/seq_oss_rw.c3
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c9
-rw-r--r--sound/core/seq/oss/seq_oss_writeq.c3
-rw-r--r--sound/core/seq/seq_clientmgr.c51
-rw-r--r--sound/core/seq/seq_dummy.c3
-rw-r--r--sound/core/seq/seq_fifo.c3
-rw-r--r--sound/core/seq/seq_memory.c6
-rw-r--r--sound/core/seq/seq_midi.c27
-rw-r--r--sound/core/seq/seq_ports.c39
-rw-r--r--sound/core/seq/seq_queue.c21
-rw-r--r--sound/core/seq/seq_virmidi.c9
-rw-r--r--sound/core/sgbuf.c90
-rw-r--r--sound/core/sound.c3
-rw-r--r--sound/core/sound_oss.c3
-rw-r--r--sound/drivers/mpu401/mpu401.c9
-rw-r--r--sound/drivers/mpu401/mpu401_uart.c19
-rw-r--r--sound/drivers/mtpav.c15
-rw-r--r--sound/drivers/mts64.c15
-rw-r--r--sound/drivers/opl3/opl3_lib.c42
-rw-r--r--sound/drivers/opl3/opl3_midi.c3
-rw-r--r--sound/drivers/opl3/opl3_oss.c6
-rw-r--r--sound/drivers/opl3/opl3_seq.c9
-rw-r--r--sound/drivers/portman2x4.c15
-rw-r--r--sound/drivers/serial-u16550.c27
-rw-r--r--sound/drivers/vx/vx_core.c60
-rw-r--r--sound/drivers/vx/vx_hwdep.c12
-rw-r--r--sound/drivers/vx/vx_mixer.c39
-rw-r--r--sound/drivers/vx/vx_pcm.c32
-rw-r--r--sound/firewire/Kconfig30
-rw-r--r--sound/firewire/amdtp-am824.c4
-rw-r--r--sound/firewire/amdtp-am824.h2
-rw-r--r--sound/firewire/amdtp-stream-trace.h2
-rw-r--r--sound/firewire/amdtp-stream.c1229
-rw-r--r--sound/firewire/amdtp-stream.h105
-rw-r--r--sound/firewire/bebob/bebob.c282
-rw-r--r--sound/firewire/bebob/bebob.h22
-rw-r--r--sound/firewire/bebob/bebob_stream.c90
-rw-r--r--sound/firewire/dice/dice-stream.c50
-rw-r--r--sound/firewire/dice/dice-transaction.c2
-rw-r--r--sound/firewire/dice/dice.c138
-rw-r--r--sound/firewire/dice/dice.h4
-rw-r--r--sound/firewire/digi00x/amdtp-dot.c9
-rw-r--r--sound/firewire/digi00x/digi00x-stream.c13
-rw-r--r--sound/firewire/digi00x/digi00x.c101
-rw-r--r--sound/firewire/digi00x/digi00x.h3
-rw-r--r--sound/firewire/fireface/amdtp-ff.c2
-rw-r--r--sound/firewire/fireface/ff-stream.c13
-rw-r--r--sound/firewire/fireface/ff.c90
-rw-r--r--sound/firewire/fireface/ff.h3
-rw-r--r--sound/firewire/fireworks/fireworks.c120
-rw-r--r--sound/firewire/fireworks/fireworks.h13
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c24
-rw-r--r--sound/firewire/lib.c32
-rw-r--r--sound/firewire/lib.h3
-rw-r--r--sound/firewire/motu/Makefile3
-rw-r--r--sound/firewire/motu/amdtp-motu.c139
-rw-r--r--sound/firewire/motu/motu-protocol-v1.c470
-rw-r--r--sound/firewire/motu/motu-protocol-v2.c181
-rw-r--r--sound/firewire/motu/motu-protocol-v3.c92
-rw-r--r--sound/firewire/motu/motu-stream.c34
-rw-r--r--sound/firewire/motu/motu.c91
-rw-r--r--sound/firewire/motu/motu.h47
-rw-r--r--sound/firewire/oxfw/oxfw-stream.c62
-rw-r--r--sound/firewire/oxfw/oxfw.c261
-rw-r--r--sound/firewire/oxfw/oxfw.h24
-rw-r--r--sound/firewire/tascam/amdtp-tascam.c6
-rw-r--r--sound/firewire/tascam/tascam-stream.c26
-rw-r--r--sound/firewire/tascam/tascam.c92
-rw-r--r--sound/firewire/tascam/tascam.h3
-rw-r--r--sound/hda/intel-dsp-config.c4
-rw-r--r--sound/i2c/cs8427.c24
-rw-r--r--sound/i2c/other/ak4114.c3
-rw-r--r--sound/i2c/other/ak4117.c3
-rw-r--r--sound/i2c/tea6330t.c26
-rw-r--r--sound/isa/ad1816a/ad1816a.c23
-rw-r--r--sound/isa/ad1816a/ad1816a_lib.c27
-rw-r--r--sound/isa/als100.c18
-rw-r--r--sound/isa/azt2320.c21
-rw-r--r--sound/isa/cmi8330.c39
-rw-r--r--sound/isa/cs423x/cs4236.c21
-rw-r--r--sound/isa/cs423x/cs4236_lib.c12
-rw-r--r--sound/isa/es1688/es1688_lib.c3
-rw-r--r--sound/isa/es18xx.c51
-rw-r--r--sound/isa/gus/gus_main.c20
-rw-r--r--sound/isa/gus/gus_mem.c3
-rw-r--r--sound/isa/gus/gus_mixer.c6
-rw-r--r--sound/isa/gus/gus_pcm.c23
-rw-r--r--sound/isa/gus/gus_uart.c3
-rw-r--r--sound/isa/gus/gusclassic.c6
-rw-r--r--sound/isa/gus/gusextreme.c6
-rw-r--r--sound/isa/gus/gusmax.c36
-rw-r--r--sound/isa/gus/interwave.c90
-rw-r--r--sound/isa/opl3sa2.c68
-rw-r--r--sound/isa/opti9xx/miro.c27
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c27
-rw-r--r--sound/isa/sb/emu8000.c21
-rw-r--r--sound/isa/sb/emu8000_patch.c3
-rw-r--r--sound/isa/sb/emu8000_pcm.c6
-rw-r--r--sound/isa/sb/sb16.c65
-rw-r--r--sound/isa/sb/sb16_csp.c33
-rw-r--r--sound/isa/sb/sb16_main.c6
-rw-r--r--sound/isa/sb/sb8.c38
-rw-r--r--sound/isa/sb/sb8_main.c3
-rw-r--r--sound/isa/sb/sb8_midi.c3
-rw-r--r--sound/isa/sb/sb_common.c9
-rw-r--r--sound/isa/sb/sb_mixer.c55
-rw-r--r--sound/isa/wavefront/wavefront.c6
-rw-r--r--sound/isa/wavefront/wavefront_midi.c20
-rw-r--r--sound/isa/wavefront/wavefront_synth.c56
-rw-r--r--sound/isa/wss/wss_lib.c9
-rw-r--r--sound/mips/snd-n64.c4
-rw-r--r--sound/oss/dmasound/dmasound_core.c14
-rw-r--r--sound/parisc/harmony.c7
-rw-r--r--sound/pci/ac97/ac97_codec.c205
-rw-r--r--sound/pci/ac97/ac97_patch.c137
-rw-r--r--sound/pci/ad1889.c15
-rw-r--r--sound/pci/ak4531_codec.c9
-rw-r--r--sound/pci/als300.c22
-rw-r--r--sound/pci/als4000.c55
-rw-r--r--sound/pci/atiixp.c33
-rw-r--r--sound/pci/atiixp_modem.c40
-rw-r--r--sound/pci/au88x0/au88x0.c77
-rw-r--r--sound/pci/au88x0/au88x0_a3d.c28
-rw-r--r--sound/pci/au88x0/au88x0_core.c47
-rw-r--r--sound/pci/au88x0/au88x0_eq.c20
-rw-r--r--sound/pci/au88x0/au88x0_mixer.c3
-rw-r--r--sound/pci/au88x0/au88x0_mpu401.c14
-rw-r--r--sound/pci/au88x0/au88x0_pcm.c15
-rw-r--r--sound/pci/azt3328.c3
-rw-r--r--sound/pci/bt87x.c3
-rw-r--r--sound/pci/ca0106/ca0106_main.c18
-rw-r--r--sound/pci/ca0106/ca_midi.c3
-rw-r--r--sound/pci/cmipci.c83
-rw-r--r--sound/pci/cs4281.c54
-rw-r--r--sound/pci/cs46xx/cs46xx.c31
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c51
-rw-r--r--sound/pci/cs46xx/dsp_spos.c3
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c28
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c10
-rw-r--r--sound/pci/echoaudio/darla20_dsp.c6
-rw-r--r--sound/pci/echoaudio/darla24_dsp.c6
-rw-r--r--sound/pci/echoaudio/echo3g_dsp.c3
-rw-r--r--sound/pci/echoaudio/echoaudio.c220
-rw-r--r--sound/pci/echoaudio/echoaudio_dsp.c12
-rw-r--r--sound/pci/echoaudio/echoaudio_gml.c3
-rw-r--r--sound/pci/echoaudio/gina20_dsp.c6
-rw-r--r--sound/pci/echoaudio/gina24_dsp.c6
-rw-r--r--sound/pci/echoaudio/indigo_dsp.c6
-rw-r--r--sound/pci/echoaudio/indigodj_dsp.c6
-rw-r--r--sound/pci/echoaudio/indigoio_dsp.c6
-rw-r--r--sound/pci/echoaudio/layla20_dsp.c6
-rw-r--r--sound/pci/echoaudio/layla24_dsp.c9
-rw-r--r--sound/pci/echoaudio/mia_dsp.c6
-rw-r--r--sound/pci/echoaudio/midi.c4
-rw-r--r--sound/pci/echoaudio/mona_dsp.c6
-rw-r--r--sound/pci/emu10k1/emu10k1.c42
-rw-r--r--sound/pci/emu10k1/emu10k1_callback.c3
-rw-r--r--sound/pci/emu10k1/emu10k1x.c85
-rw-r--r--sound/pci/emu10k1/emufx.c13
-rw-r--r--sound/pci/emu10k1/emumixer.c78
-rw-r--r--sound/pci/emu10k1/emumpu401.c12
-rw-r--r--sound/pci/emu10k1/emupcm.c24
-rw-r--r--sound/pci/emu10k1/memory.c21
-rw-r--r--sound/pci/emu10k1/p16v.c13
-rw-r--r--sound/pci/emu10k1/timer.c3
-rw-r--r--sound/pci/ens1370.c45
-rw-r--r--sound/pci/es1938.c42
-rw-r--r--sound/pci/es1968.c75
-rw-r--r--sound/pci/fm801.c63
-rw-r--r--sound/pci/hda/hda_bind.c7
-rw-r--r--sound/pci/hda/hda_codec.c10
-rw-r--r--sound/pci/hda/hda_generic.c12
-rw-r--r--sound/pci/hda/hda_intel.c28
-rw-r--r--sound/pci/hda/hda_local.h2
-rw-r--r--sound/pci/hda/hda_tegra.c7
-rw-r--r--sound/pci/hda/patch_ca0132.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c146
-rw-r--r--sound/pci/hda/patch_via.c1
-rw-r--r--sound/pci/ice1712/delta.c6
-rw-r--r--sound/pci/ice1712/ews.c24
-rw-r--r--sound/pci/intel8x0.c50
-rw-r--r--sound/pci/intel8x0m.c38
-rw-r--r--sound/pci/korg1212/korg1212.c24
-rw-r--r--sound/pci/lx6464es/lx_core.c4
-rw-r--r--sound/pci/maestro3.c21
-rw-r--r--sound/pci/mixart/mixart.c29
-rw-r--r--sound/pci/mixart/mixart_hwdep.c17
-rw-r--r--sound/pci/mixart/mixart_mixer.c33
-rw-r--r--sound/pci/nm256/nm256.c27
-rw-r--r--sound/pci/pcxhr/pcxhr.c22
-rw-r--r--sound/pci/pcxhr/pcxhr_hwdep.c9
-rw-r--r--sound/pci/riptide/riptide.c88
-rw-r--r--sound/pci/rme32.c76
-rw-r--r--sound/pci/rme96.c148
-rw-r--r--sound/pci/rme9652/hdsp.c124
-rw-r--r--sound/pci/rme9652/rme9652.c98
-rw-r--r--sound/pci/sonicvibes.c67
-rw-r--r--sound/pci/trident/trident.c41
-rw-r--r--sound/pci/trident/trident.h1
-rw-r--r--sound/pci/trident/trident_main.c95
-rw-r--r--sound/pci/trident/trident_memory.c51
-rw-r--r--sound/pci/via82xx.c78
-rw-r--r--sound/pci/via82xx_modem.c52
-rw-r--r--sound/pci/vx222/vx222.c18
-rw-r--r--sound/pci/vx222/vx222_ops.c12
-rw-r--r--sound/pci/ymfpci/ymfpci.c71
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c72
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c3
-rw-r--r--sound/pcmcia/vx/vxp_mixer.c6
-rw-r--r--sound/pcmcia/vx/vxp_ops.c6
-rw-r--r--sound/pcmcia/vx/vxpocket.c3
-rw-r--r--sound/ppc/beep.c5
-rw-r--r--sound/ppc/daca.c24
-rw-r--r--sound/ppc/keywest.c6
-rw-r--r--sound/ppc/pmac.c6
-rw-r--r--sound/ppc/powermac.c27
-rw-r--r--sound/ppc/tumbler.c72
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/adi/axi-i2s.c3
-rw-r--r--sound/soc/adi/axi-spdif.c3
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c10
-rw-r--r--sound/soc/amd/acp-pcm-dma.c2
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c2
-rw-r--r--sound/soc/amd/renoir/acp3x-pdm-dma.c6
-rw-r--r--sound/soc/amd/renoir/rn-pci-acp3x.c2
-rw-r--r--sound/soc/atmel/atmel-classd.c3
-rw-r--r--sound/soc/atmel/atmel-i2s.c38
-rw-r--r--sound/soc/atmel/atmel-pdmic.c3
-rw-r--r--sound/soc/atmel/mchp-i2s-mcc.c3
-rw-r--r--sound/soc/atmel/mikroe-proto.c9
-rw-r--r--sound/soc/atmel/sam9x5_wm8731.c3
-rw-r--r--sound/soc/bcm/cygnus-ssp.c7
-rw-r--r--sound/soc/codecs/Kconfig60
-rw-r--r--sound/soc/codecs/Makefile13
-rw-r--r--sound/soc/codecs/ak4613.c11
-rw-r--r--sound/soc/codecs/cirrus_legacy.h21
-rw-r--r--sound/soc/codecs/cs35l32.c34
-rw-r--r--sound/soc/codecs/cs35l33.c15
-rw-r--r--sound/soc/codecs/cs35l34.c39
-rw-r--r--sound/soc/codecs/cs35l35.c37
-rw-r--r--sound/soc/codecs/cs35l35.h3
-rw-r--r--sound/soc/codecs/cs35l36.c18
-rw-r--r--sound/soc/codecs/cs4265.c10
-rw-r--r--sound/soc/codecs/cs42l42.c302
-rw-r--r--sound/soc/codecs/cs42l42.h10
-rw-r--r--sound/soc/codecs/cs42l52.c19
-rw-r--r--sound/soc/codecs/cs42l56.c7
-rw-r--r--sound/soc/codecs/cs42l73.c30
-rw-r--r--sound/soc/codecs/cs43130.c55
-rw-r--r--sound/soc/codecs/cs47l24.c2
-rw-r--r--sound/soc/codecs/cs53l30.c22
-rw-r--r--sound/soc/codecs/cx20442.c4
-rw-r--r--sound/soc/codecs/hdmi-codec.c238
-rw-r--r--sound/soc/codecs/lpass-rx-macro.c2
-rw-r--r--sound/soc/codecs/lpass-wsa-macro.c4
-rw-r--r--sound/soc/codecs/max98373-sdw.c14
-rw-r--r--sound/soc/codecs/max98373.h2
-rw-r--r--sound/soc/codecs/mt6359-accdet.c2
-rw-r--r--sound/soc/codecs/mt6359.c2
-rw-r--r--sound/soc/codecs/nau8824.c42
-rw-r--r--sound/soc/codecs/pcm3168a.c26
-rw-r--r--sound/soc/codecs/rk3328_codec.c28
-rw-r--r--sound/soc/codecs/rk817_codec.c541
-rw-r--r--sound/soc/codecs/rt1019.c5
-rw-r--r--sound/soc/codecs/rt1308-sdw.c2
-rw-r--r--sound/soc/codecs/rt1316-sdw.c2
-rw-r--r--sound/soc/codecs/rt286.c1
-rw-r--r--sound/soc/codecs/rt5631.c2
-rw-r--r--sound/soc/codecs/rt5682-i2c.c12
-rw-r--r--sound/soc/codecs/rt5682-sdw.c57
-rw-r--r--sound/soc/codecs/rt5682.c9
-rw-r--r--sound/soc/codecs/rt5682.h2
-rw-r--r--sound/soc/codecs/rt700-sdw.c36
-rw-r--r--sound/soc/codecs/rt700.c4
-rw-r--r--sound/soc/codecs/rt700.h2
-rw-r--r--sound/soc/codecs/rt711-sdca-sdw.c58
-rw-r--r--sound/soc/codecs/rt711-sdca.c8
-rw-r--r--sound/soc/codecs/rt711-sdca.h2
-rw-r--r--sound/soc/codecs/rt711-sdw.c36
-rw-r--r--sound/soc/codecs/rt711-sdw.h2
-rw-r--r--sound/soc/codecs/rt711.c34
-rw-r--r--sound/soc/codecs/rt711.h31
-rw-r--r--sound/soc/codecs/rt715-sdca-sdw.c3
-rw-r--r--sound/soc/codecs/rt715-sdca-sdw.h1
-rw-r--r--sound/soc/codecs/rt715-sdca.c9
-rw-r--r--sound/soc/codecs/rt715-sdca.h3
-rw-r--r--sound/soc/codecs/rt715-sdw.c2
-rw-r--r--sound/soc/codecs/sigmadsp.h1
-rw-r--r--sound/soc/codecs/tfa989x.c357
-rw-r--r--sound/soc/codecs/tlv320aic26.c12
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c12
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h4
-rw-r--r--sound/soc/codecs/tlv320aic32x4-i2c.c22
-rw-r--r--sound/soc/codecs/tlv320aic32x4-spi.c23
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c171
-rw-r--r--sound/soc/codecs/tlv320aic32x4.h10
-rw-r--r--sound/soc/codecs/wcd-clsh-v2.c348
-rw-r--r--sound/soc/codecs/wcd-clsh-v2.h16
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.c1475
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.h340
-rw-r--r--sound/soc/codecs/wcd934x.c884
-rw-r--r--sound/soc/codecs/wcd938x-sdw.c320
-rw-r--r--sound/soc/codecs/wcd938x.c3737
-rw-r--r--sound/soc/codecs/wcd938x.h718
-rw-r--r--sound/soc/codecs/wm2200.c1
-rw-r--r--sound/soc/codecs/wm5102.c2
-rw-r--r--sound/soc/codecs/wm5110.c2
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c6
-rw-r--r--sound/soc/codecs/wm8962.c7
-rw-r--r--sound/soc/codecs/wm_adsp.c25
-rw-r--r--sound/soc/codecs/wm_adsp.h2
-rw-r--r--sound/soc/dwc/dwc-i2s.c3
-rw-r--r--sound/soc/fsl/Kconfig13
-rw-r--r--sound/soc/fsl/Makefile2
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c9
-rw-r--r--sound/soc/fsl/fsl_asrc.c3
-rw-r--r--sound/soc/fsl/fsl_aud2htx.c3
-rw-r--r--sound/soc/fsl/fsl_easrc.c7
-rw-r--r--sound/soc/fsl/fsl_esai.c3
-rw-r--r--sound/soc/fsl/fsl_micfil.c3
-rw-r--r--sound/soc/fsl/fsl_sai.c3
-rw-r--r--sound/soc/fsl/fsl_spdif.c97
-rw-r--r--sound/soc/fsl/fsl_spdif.h1
-rw-r--r--sound/soc/fsl/fsl_ssi.c3
-rw-r--r--sound/soc/fsl/fsl_xcvr.c16
-rw-r--r--sound/soc/fsl/imx-audio-rpmsg.c12
-rw-r--r--sound/soc/fsl/imx-audmix.c4
-rw-r--r--sound/soc/fsl/imx-card.c844
-rw-r--r--sound/soc/fsl/imx-es8328.c12
-rw-r--r--sound/soc/fsl/imx-pcm-rpmsg.c6
-rw-r--r--sound/soc/fsl/imx-pcm-rpmsg.h4
-rw-r--r--sound/soc/fsl/imx-rpmsg.c1
-rw-r--r--sound/soc/generic/simple-card-utils.c16
-rw-r--r--sound/soc/generic/simple-card.c1
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.c17
-rw-r--r--sound/soc/img/img-i2s-in.c5
-rw-r--r--sound/soc/img/img-i2s-out.c3
-rw-r--r--sound/soc/img/img-parallel-out.c3
-rw-r--r--sound/soc/img/img-spdif-in.c3
-rw-r--r--sound/soc/img/img-spdif-out.c3
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c3
-rw-r--r--sound/soc/intel/boards/Kconfig43
-rw-r--r--sound/soc/intel/boards/Makefile30
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c4
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c11
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c4
-rw-r--r--sound/soc/intel/boards/bytcht_cx2072x.c1
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c1
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c1
-rw-r--r--sound/soc/intel/boards/bytcht_nocodec.c1
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c1
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c1
-rw-r--r--sound/soc/intel/boards/bytcr_wm5102.c1
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c1
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c1
-rw-r--r--sound/soc/intel/boards/cml_rt1011_rt5682.c1
-rw-r--r--sound/soc/intel/boards/ehl_rt5660.c4
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c5
-rw-r--r--sound/soc/intel/boards/hda_dsp_common.c5
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98357a.c4
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c5
-rw-r--r--sound/soc/intel/boards/kbl_rt5660.c2
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c3
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c2
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c1
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c3
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c3
-rw-r--r--sound/soc/intel/boards/skl_rt286.c3
-rw-r--r--sound/soc/intel/boards/sof_cs42l42.c508
-rw-r--r--sound/soc/intel/boards/sof_da7219_max98373.c10
-rw-r--r--sound/soc/intel/boards/sof_maxim_common.c84
-rw-r--r--sound/soc/intel/boards/sof_maxim_common.h14
-rw-r--r--sound/soc/intel/boards/sof_pcm512x.c2
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c132
-rw-r--r--sound/soc/intel/boards/sof_sdw.c78
-rw-r--r--sound/soc/intel/boards/sof_sdw_common.h9
-rw-r--r--sound/soc/intel/boards/sof_sdw_hdmi.c37
-rw-r--r--sound/soc/intel/boards/sof_sdw_max98373.c81
-rw-r--r--sound/soc/intel/boards/sof_wm8804.c1
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-adl-match.c95
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-bxt-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cml-match.c22
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cnl-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-glk-match.c14
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-icl-match.c12
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-jsl-match.c6
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-kbl-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-tgl-match.c38
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c6
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c7
-rw-r--r--sound/soc/jz4740/jz4740-i2s.h2
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c26
-rw-r--r--sound/soc/mediatek/common/mtk-btcvsd.c24
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-dai-adda.c1
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-dai-adda.c2
-rw-r--r--sound/soc/meson/g12a-toacodec.c143
-rw-r--r--sound/soc/meson/meson-card-utils.c6
-rw-r--r--sound/soc/qcom/apq8016_sbc.c7
-rw-r--r--sound/soc/qcom/lpass-cpu.c49
-rw-r--r--sound/soc/qcom/lpass-platform.c12
-rw-r--r--sound/soc/qcom/lpass-sc7180.c1
-rw-r--r--sound/soc/qcom/lpass.h2
-rw-r--r--sound/soc/qcom/qdsp6/q6afe-dai.c41
-rw-r--r--sound/soc/qcom/qdsp6/q6afe.c8
-rw-r--r--sound/soc/qcom/qdsp6/q6afe.h2
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c11
-rw-r--r--sound/soc/qcom/sdm845.c8
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c3
-rw-r--r--sound/soc/rockchip/rockchip_pdm.c3
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c3
-rw-r--r--sound/soc/samsung/i2s.c3
-rw-r--r--sound/soc/samsung/pcm.c3
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c3
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c3
-rw-r--r--sound/soc/sh/fsi.c17
-rw-r--r--sound/soc/sh/rcar/Makefile2
-rw-r--r--sound/soc/sh/rcar/adg.c180
-rw-r--r--sound/soc/sh/rcar/cmd.c14
-rw-r--r--sound/soc/sh/rcar/core.c154
-rw-r--r--sound/soc/sh/rcar/ctu.c14
-rw-r--r--sound/soc/sh/rcar/debugfs.c96
-rw-r--r--sound/soc/sh/rcar/dma.c39
-rw-r--r--sound/soc/sh/rcar/dvc.c16
-rw-r--r--sound/soc/sh/rcar/gen.c9
-rw-r--r--sound/soc/sh/rcar/mix.c14
-rw-r--r--sound/soc/sh/rcar/rsnd.h75
-rw-r--r--sound/soc/sh/rcar/src.c32
-rw-r--r--sound/soc/sh/rcar/ssi.c160
-rw-r--r--sound/soc/sh/rcar/ssiu.c147
-rw-r--r--sound/soc/soc-component.c63
-rw-r--r--sound/soc/soc-core.c379
-rw-r--r--sound/soc/soc-dai.c76
-rw-r--r--sound/soc/soc-pcm.c24
-rw-r--r--sound/soc/soc-topology.c466
-rw-r--r--sound/soc/soc-utils.c29
-rw-r--r--sound/soc/sof/compress.c2
-rw-r--r--sound/soc/sof/compress.h2
-rw-r--r--sound/soc/sof/imx/imx8.c1
-rw-r--r--sound/soc/sof/intel/Kconfig4
-rw-r--r--sound/soc/sof/intel/Makefile5
-rw-r--r--sound/soc/sof/intel/atom.c463
-rw-r--r--sound/soc/sof/intel/atom.h74
-rw-r--r--sound/soc/sof/intel/byt.c768
-rw-r--r--sound/soc/sof/intel/hda-ipc.c4
-rw-r--r--sound/soc/sof/intel/hda-loader.c5
-rw-r--r--sound/soc/sof/intel/hda.c39
-rw-r--r--sound/soc/sof/intel/pci-tgl.c3
-rw-r--r--sound/soc/sof/intel/pci-tng.c171
-rw-r--r--sound/soc/sof/loader.c2
-rw-r--r--sound/soc/sof/ops.h10
-rw-r--r--sound/soc/sof/sof-acpi-dev.c5
-rw-r--r--sound/soc/sof/sof-of-dev.c5
-rw-r--r--sound/soc/sof/sof-pci-dev.c7
-rw-r--r--sound/soc/sof/topology.c17
-rw-r--r--sound/soc/spear/spdif_out.c3
-rw-r--r--sound/soc/sprd/sprd-mcdt.c3
-rw-r--r--sound/soc/sti/sti_uniperif.c12
-rw-r--r--sound/soc/stm/stm32_i2s.c3
-rw-r--r--sound/soc/stm/stm32_sai_sub.c3
-rw-r--r--sound/soc/stm/stm32_spdifrx.c3
-rw-r--r--sound/soc/sunxi/sun4i-codec.c3
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c3
-rw-r--r--sound/soc/sunxi/sun4i-spdif.c3
-rw-r--r--sound/soc/tegra/Kconfig12
-rw-r--r--sound/soc/tegra/Makefile19
-rw-r--r--sound/soc/tegra/tegra20_i2s.c3
-rw-r--r--sound/soc/tegra/tegra20_spdif.c3
-rw-r--r--sound/soc/tegra/tegra210_admaif.c4
-rw-r--r--sound/soc/tegra/tegra30_ahub.c3
-rw-r--r--sound/soc/tegra/tegra_alc5632.c259
-rw-r--r--sound/soc/tegra/tegra_asoc_machine.c854
-rw-r--r--sound/soc/tegra/tegra_asoc_machine.h49
-rw-r--r--sound/soc/tegra/tegra_asoc_utils.c225
-rw-r--r--sound/soc/tegra/tegra_asoc_utils.h38
-rw-r--r--sound/soc/tegra/tegra_max98090.c276
-rw-r--r--sound/soc/tegra/tegra_pcm.c30
-rw-r--r--sound/soc/tegra/tegra_rt5640.c222
-rw-r--r--sound/soc/tegra/tegra_rt5677.c324
-rw-r--r--sound/soc/tegra/tegra_sgtl5000.c211
-rw-r--r--sound/soc/tegra/tegra_wm8753.c185
-rw-r--r--sound/soc/tegra/tegra_wm8903.c351
-rw-r--r--sound/soc/tegra/tegra_wm9712.c166
-rw-r--r--sound/soc/tegra/trimslice.c172
-rw-r--r--sound/soc/ti/ams-delta.c11
-rw-r--r--sound/soc/ti/davinci-mcasp.c1
-rw-r--r--sound/soc/ti/j721e-evm.c18
-rw-r--r--sound/soc/ti/omap-mcbsp.c2
-rw-r--r--sound/soc/uniphier/aio-dma.c2
-rw-r--r--sound/soc/xilinx/xlnx_formatter_pcm.c4
-rw-r--r--sound/sparc/amd7930.c14
-rw-r--r--sound/sparc/cs4231.c10
-rw-r--r--sound/sparc/dbri.c11
-rw-r--r--sound/synth/emux/emux.c3
-rw-r--r--sound/synth/emux/emux_effect.c13
-rw-r--r--sound/synth/emux/emux_hwdep.c6
-rw-r--r--sound/synth/emux/emux_nrpn.c2
-rw-r--r--sound/synth/emux/soundfont.c46
-rw-r--r--sound/usb/card.c2
-rw-r--r--sound/usb/card.h11
-rw-r--r--sound/usb/clock.c311
-rw-r--r--sound/usb/endpoint.c5
-rw-r--r--sound/usb/endpoint.h1
-rw-r--r--sound/usb/format.c2
-rw-r--r--sound/usb/media.c2
-rw-r--r--sound/usb/mixer.c51
-rw-r--r--sound/usb/mixer.h1
-rw-r--r--sound/usb/mixer_quirks.c6
-rw-r--r--sound/usb/mixer_s1810c.c2
-rw-r--r--sound/usb/mixer_scarlett.c2
-rw-r--r--sound/usb/mixer_scarlett_gen2.c3247
-rw-r--r--sound/usb/pcm.c214
-rw-r--r--sound/usb/pcm.h3
-rw-r--r--sound/usb/quirks-table.h70
-rw-r--r--sound/usb/quirks.c4
-rw-r--r--sound/usb/usx2y/us122l.c63
-rw-r--r--sound/usb/usx2y/us122l.h2
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c135
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.h2
-rw-r--r--sound/usb/usx2y/usb_stream.c82
-rw-r--r--sound/usb/usx2y/usb_stream.h23
-rw-r--r--sound/usb/usx2y/usbus428ctldefs.h104
-rw-r--r--sound/usb/usx2y/usbusx2y.c389
-rw-r--r--sound/usb/usx2y/usbusx2y.h65
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c684
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c627
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.h4
-rw-r--r--sound/x86/intel_hdmi_audio.c6
-rw-r--r--sound/x86/intel_hdmi_audio.h2
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h11
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h1
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h3
-rw-r--r--tools/arch/x86/include/asm/msr-index.h4
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h13
-rw-r--r--tools/arch/x86/include/uapi/asm/svm.h3
-rw-r--r--tools/bootconfig/main.c64
-rw-r--r--tools/bootconfig/samples/bad-override.bconf3
-rw-r--r--tools/bootconfig/samples/bad-override2.bconf3
-rw-r--r--tools/bootconfig/samples/good-mixed-append.bconf4
-rw-r--r--tools/bootconfig/samples/good-mixed-kv1.bconf (renamed from tools/bootconfig/samples/bad-mixed-kv1.bconf)0
-rw-r--r--tools/bootconfig/samples/good-mixed-kv2.bconf (renamed from tools/bootconfig/samples/bad-mixed-kv2.bconf)0
-rw-r--r--tools/bootconfig/samples/good-mixed-kv3.bconf6
-rw-r--r--tools/bootconfig/samples/good-mixed-override.bconf4
-rw-r--r--tools/bpf/Makefile7
-rw-r--r--tools/bpf/bpftool/common.c5
-rw-r--r--tools/bpf/bpftool/jit_disasm.c6
-rw-r--r--tools/bpf/runqslower/runqslower.bpf.c2
-rw-r--r--tools/include/linux/bitmap.h11
-rw-r--r--tools/include/linux/kconfig.h6
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h3
-rw-r--r--tools/include/uapi/asm-generic/unistd.h10
-rw-r--r--tools/include/uapi/drm/drm.h28
-rw-r--r--tools/include/uapi/drm/i915_drm.h393
-rw-r--r--tools/include/uapi/linux/kvm.h105
-rw-r--r--tools/include/uapi/linux/mount.h1
-rw-r--r--tools/include/uapi/sound/asound.h30
-rw-r--r--tools/io_uring/io_uring-cp.c31
-rw-r--r--tools/lib/bitmap.c14
-rw-r--r--tools/lib/bpf/btf.c3
-rw-r--r--tools/lib/bpf/libbpf.c4
-rw-r--r--tools/lib/bpf/libbpf_probes.c4
-rw-r--r--tools/lib/perf/Build2
-rw-r--r--tools/lib/perf/Makefile30
-rw-r--r--tools/lib/perf/evlist.c22
-rw-r--r--tools/lib/perf/evsel.c42
-rw-r--r--tools/lib/perf/include/internal/evlist.h2
-rw-r--r--tools/lib/perf/include/internal/evsel.h5
-rw-r--r--tools/lib/perf/include/internal/tests.h4
-rw-r--r--tools/lib/perf/include/perf/evlist.h1
-rw-r--r--tools/lib/perf/libperf.map1
-rw-r--r--tools/lib/perf/tests/Build5
-rw-r--r--tools/lib/perf/tests/Makefile40
-rw-r--r--tools/lib/perf/tests/main.c15
-rw-r--r--tools/lib/perf/tests/test-cpumap.c3
-rw-r--r--tools/lib/perf/tests/test-evlist.c30
-rw-r--r--tools/lib/perf/tests/test-evsel.c3
-rw-r--r--tools/lib/perf/tests/test-threadmap.c3
-rw-r--r--tools/lib/perf/tests/tests.h10
-rw-r--r--tools/memory-model/Documentation/explanation.txt2
-rw-r--r--tools/perf/Documentation/itrace.txt1
-rw-r--r--tools/perf/Documentation/perf-annotate.txt7
-rw-r--r--tools/perf/Documentation/perf-config.txt6
-rw-r--r--tools/perf/Documentation/perf-dlfilter.txt251
-rw-r--r--tools/perf/Documentation/perf-inject.txt10
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt119
-rw-r--r--tools/perf/Documentation/perf-probe.txt19
-rw-r--r--tools/perf/Documentation/perf-script-python.txt46
-rw-r--r--tools/perf/Documentation/perf-script.txt15
-rw-r--r--tools/perf/Documentation/perf-top.txt12
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt33
-rw-r--r--tools/perf/Makefile.config14
-rw-r--r--tools/perf/Makefile.perf21
-rw-r--r--tools/perf/arch/arm/include/arch-tests.h5
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c133
-rw-r--r--tools/perf/arch/arm64/include/arch-tests.h5
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c45
-rw-r--r--tools/perf/arch/arm64/util/mem-events.c2
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl2
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/powerpc/include/arch-tests.h7
-rw-r--r--tools/perf/arch/powerpc/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/powerpc/util/mem-events.c2
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl3
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h12
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/x86/util/evlist.c6
-rw-r--r--tools/perf/arch/x86/util/iostat.c4
-rw-r--r--tools/perf/arch/x86/util/kvm-stat.c46
-rw-r--r--tools/perf/arch/x86/util/mem-events.c54
-rw-r--r--tools/perf/builtin-annotate.c11
-rw-r--r--tools/perf/builtin-c2c.c40
-rw-r--r--tools/perf/builtin-diff.c4
-rw-r--r--tools/perf/builtin-inject.c111
-rw-r--r--tools/perf/builtin-mem.c51
-rw-r--r--tools/perf/builtin-probe.c14
-rw-r--r--tools/perf/builtin-record.c40
-rw-r--r--tools/perf/builtin-report.c49
-rw-r--r--tools/perf/builtin-sched.c35
-rw-r--r--tools/perf/builtin-script.c252
-rw-r--r--tools/perf/builtin-stat.c18
-rw-r--r--tools/perf/builtin-top.c18
-rw-r--r--tools/perf/builtin-trace.c45
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json424
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/cache.json724
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/floating-point.json101
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/frontend.json610
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json273
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/memory.json654
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/other.json1089
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/pipeline.json1169
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json251
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/cache.json706
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/floating-point.json95
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/frontend.json469
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/memory.json291
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/other.json181
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/pipeline.json972
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json333
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json2476
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json245
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Context.c168
-rw-r--r--tools/perf/scripts/python/bin/intel-pt-events-record4
-rw-r--r--tools/perf/scripts/python/bin/intel-pt-events-report4
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py89
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py283
-rw-r--r--tools/perf/scripts/python/libxed.py107
-rw-r--r--tools/perf/tests/bpf.c4
-rw-r--r--tools/perf/tests/builtin-test.c50
-rw-r--r--tools/perf/tests/dwarf-unwind.c4
-rw-r--r--tools/perf/tests/event_update.c6
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c9
-rw-r--r--tools/perf/tests/make7
-rw-r--r--tools/perf/tests/maps.c2
-rw-r--r--tools/perf/tests/mmap-basic.c8
-rw-r--r--tools/perf/tests/parse-events.c90
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c3
-rw-r--r--tools/perf/tests/pfm.c14
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh16
-rw-r--r--tools/perf/tests/tests.h2
-rw-r--r--tools/perf/tests/topology.c1
-rw-r--r--tools/perf/ui/browsers/annotate.c34
-rw-r--r--tools/perf/ui/gtk/annotate.c4
-rw-r--r--tools/perf/util/Build8
-rw-r--r--tools/perf/util/annotate.c8
-rw-r--r--tools/perf/util/arm-spe.c73
-rw-r--r--tools/perf/util/auxtrace.c30
-rw-r--r--tools/perf/util/auxtrace.h53
-rw-r--r--tools/perf/util/bpf_counter.c59
-rw-r--r--tools/perf/util/bpf_counter.h52
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c307
-rw-r--r--tools/perf/util/bpf_skel/bperf_cgroup.bpf.c191
-rw-r--r--tools/perf/util/cgroup.c48
-rw-r--r--tools/perf/util/cgroup.h13
-rw-r--r--tools/perf/util/cputopo.c80
-rw-r--r--tools/perf/util/cputopo.h13
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c61
-rw-r--r--tools/perf/util/cs-etm.c271
-rw-r--r--tools/perf/util/cs-etm.h4
-rw-r--r--tools/perf/util/data.c5
-rw-r--r--tools/perf/util/data.h1
-rw-r--r--tools/perf/util/db-export.c12
-rw-r--r--tools/perf/util/db-export.h2
-rw-r--r--tools/perf/util/dlfilter.c615
-rw-r--r--tools/perf/util/dlfilter.h97
-rw-r--r--tools/perf/util/dso.c4
-rw-r--r--tools/perf/util/dwarf-aux.c8
-rw-r--r--tools/perf/util/dwarf-aux.h2
-rw-r--r--tools/perf/util/env.c14
-rw-r--r--tools/perf/util/env.h16
-rw-r--r--tools/perf/util/evlist.c65
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c55
-rw-r--r--tools/perf/util/evsel.h14
-rw-r--r--tools/perf/util/header.c272
-rw-r--r--tools/perf/util/header.h2
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c736
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h21
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.h5
-rw-r--r--tools/perf/util/intel-pt.c229
-rw-r--r--tools/perf/util/llvm-utils.c2
-rw-r--r--tools/perf/util/lzma.c8
-rw-r--r--tools/perf/util/mem-events.c103
-rw-r--r--tools/perf/util/mem-events.h4
-rw-r--r--tools/perf/util/metricgroup.c22
-rw-r--r--tools/perf/util/parse-events.c8
-rw-r--r--tools/perf/util/parse-events.y2
-rw-r--r--tools/perf/util/perf_dlfilter.h150
-rw-r--r--tools/perf/util/pfm.c4
-rw-r--r--tools/perf/util/pmu-hybrid.h11
-rw-r--r--tools/perf/util/pmu.c62
-rw-r--r--tools/perf/util/pmu.h1
-rw-r--r--tools/perf/util/probe-event.c254
-rw-r--r--tools/perf/util/probe-event.h6
-rw-r--r--tools/perf/util/probe-file.c99
-rw-r--r--tools/perf/util/probe-finder.c20
-rw-r--r--tools/perf/util/probe-finder.h2
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/record.c6
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c13
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c376
-rw-r--r--tools/perf/util/session.c12
-rw-r--r--tools/perf/util/sort.c2
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/srccode.c3
-rw-r--r--tools/perf/util/stat-display.c31
-rw-r--r--tools/perf/util/stat-shadow.c2
-rw-r--r--tools/perf/util/stat.c14
-rw-r--r--tools/perf/util/stream.c2
-rw-r--r--tools/perf/util/symbol-elf.c99
-rw-r--r--tools/perf/util/trace-event-scripting.c32
-rw-r--r--tools/perf/util/trace-event.h29
-rw-r--r--tools/rcu/rcu-cbs.py46
-rw-r--r--tools/testing/ktest/examples/bootconfigs/boottrace.bconf49
-rw-r--r--tools/testing/ktest/examples/bootconfigs/config-bootconfig1
-rw-r--r--tools/testing/ktest/examples/bootconfigs/functiongraph.bconf15
-rw-r--r--tools/testing/ktest/examples/bootconfigs/tracing.bconf33
-rwxr-xr-xtools/testing/ktest/examples/bootconfigs/verify-boottrace.sh84
-rwxr-xr-xtools/testing/ktest/examples/bootconfigs/verify-functiongraph.sh61
-rwxr-xr-xtools/testing/ktest/examples/bootconfigs/verify-tracing.sh72
-rw-r--r--tools/testing/ktest/examples/include/bootconfig.conf69
-rw-r--r--tools/testing/ktest/examples/kvm.conf1
-rw-r--r--tools/testing/kunit/configs/all_tests.config3
-rw-r--r--tools/testing/kunit/configs/default.config (renamed from arch/um/configs/kunit_defconfig)2
-rwxr-xr-xtools/testing/kunit/kunit.py59
-rw-r--r--tools/testing/kunit/kunit_config.py7
-rw-r--r--tools/testing/kunit/kunit_kernel.py179
-rw-r--r--tools/testing/kunit/kunit_parser.py223
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py74
-rw-r--r--tools/testing/kunit/qemu_config.py16
-rw-r--r--tools/testing/kunit/qemu_configs/alpha.py10
-rw-r--r--tools/testing/kunit/qemu_configs/arm.py13
-rw-r--r--tools/testing/kunit/qemu_configs/arm64.py12
-rw-r--r--tools/testing/kunit/qemu_configs/i386.py10
-rw-r--r--tools/testing/kunit/qemu_configs/powerpc.py12
-rw-r--r--tools/testing/kunit/qemu_configs/riscv.py31
-rw-r--r--tools/testing/kunit/qemu_configs/s390.py14
-rw-r--r--tools/testing/kunit/qemu_configs/sparc.py10
-rw-r--r--tools/testing/kunit/qemu_configs/x86_64.py10
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log (renamed from tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log)0
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log2
-rw-r--r--tools/testing/kunit/test_data/test_skip_all_tests.log15
-rw-r--r--tools/testing/kunit/test_data/test_skip_tests.log15
-rw-r--r--tools/testing/nvdimm/test/nfit.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c36
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c18
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c12
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c229
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc7
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc18
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile1
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list.c3
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c429
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c1
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h3
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/hyperv.h5
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c2
-rw-r--r--tools/testing/selftests/kvm/lib/guest_modes.c16
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c5
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c3
-rw-r--r--tools/testing/selftests/kvm/steal_time.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_clock.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_features.c43
-rw-r--r--tools/testing/selftests/kvm/x86_64/mmu_role_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c70
-rw-r--r--tools/testing/selftests/lib.mk1
-rw-r--r--tools/testing/selftests/lkdtm/config7
-rwxr-xr-xtools/testing/selftests/lkdtm/run.sh12
-rwxr-xr-xtools/testing/selftests/lkdtm/stack-entropy.sh1
-rw-r--r--tools/testing/selftests/lkdtm/tests.txt11
-rwxr-xr-xtools/testing/selftests/memory-hotplug/mem-on-off-test.sh4
-rwxr-xr-xtools/testing/selftests/net/icmp_redirect.sh5
-rw-r--r--tools/testing/selftests/net/ipsec.c165
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh2
-rw-r--r--tools/testing/selftests/net/nettest.c55
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh212
-rw-r--r--tools/testing/selftests/net/timestamping.c55
-rw-r--r--tools/testing/selftests/net/tls.c3
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh167
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/null_syscall.c3
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c17
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb.h2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/regs_access_pmccext_test.c63
-rw-r--r--tools/testing/selftests/powerpc/security/Makefile2
-rwxr-xr-xtools/testing/selftests/powerpc/security/mitigation-patching.sh75
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-again.sh33
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh6
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-end-run-stats.sh40
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-find-errors.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-remote.sh249
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh61
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/torture.sh2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST17
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot8
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/TREE2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/TREE542
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT2
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/PREEMPT2
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h2
-rw-r--r--tools/testing/selftests/resctrl/README2
-rw-r--r--tools/testing/selftests/resctrl/resctrl_tests.c4
-rw-r--r--tools/testing/selftests/sgx/call.S6
-rw-r--r--tools/testing/selftests/sgx/defines.h10
-rw-r--r--tools/testing/selftests/sgx/load.c19
-rw-r--r--tools/testing/selftests/sgx/main.c239
-rw-r--r--tools/testing/selftests/sgx/main.h4
-rw-r--r--tools/testing/selftests/sgx/sigstruct.c41
-rw-r--r--tools/testing/selftests/sgx/test_encl.c19
-rw-r--r--tools/testing/selftests/sgx/test_encl.lds3
-rw-r--r--tools/testing/selftests/sigaltstack/sas.c20
-rwxr-xr-xtools/testing/selftests/splice/short_splice_read.sh119
-rw-r--r--tools/testing/selftests/timers/rtcpie.c10
-rw-r--r--tools/testing/selftests/vm/.gitignore4
-rw-r--r--tools/testing/selftests/vm/Makefile8
-rw-r--r--tools/testing/selftests/vm/hmm-tests.c158
-rw-r--r--tools/testing/selftests/vm/khugepaged.c4
-rw-r--r--tools/testing/selftests/vm/madv_populate.c342
-rw-r--r--tools/testing/selftests/vm/memfd_secret.c296
-rw-r--r--tools/testing/selftests/vm/mremap_test.c118
-rw-r--r--tools/testing/selftests/vm/pkey-x86.h1
-rw-r--r--tools/testing/selftests/vm/protection_keys.c85
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests.sh33
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c1056
-rw-r--r--tools/testing/selftests/x86/Makefile5
-rw-r--r--tools/testing/selftests/x86/corrupt_xstate_header.c114
-rw-r--r--tools/testing/selftests/x86/sigaltstack.c128
-rw-r--r--tools/virtio/Makefile3
-rw-r--r--tools/virtio/linux/spinlock.h56
-rw-r--r--tools/virtio/linux/virtio.h2
-rw-r--r--virt/kvm/coalesced_mmio.c2
-rw-r--r--virt/kvm/kvm_main.c49
6684 files changed, 252623 insertions, 107444 deletions
diff --git a/Documentation/ABI/obsolete/sysfs-bus-iio b/Documentation/ABI/obsolete/sysfs-bus-iio
new file mode 100644
index 000000000000..c9531bb64816
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-bus-iio
@@ -0,0 +1,182 @@
+What: /sys/bus/iio/devices/iio:deviceX/buffer/length
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Number of scans contained by the buffer.
+
+ Since Kernel 5.11, multiple buffers are supported.
+ so, it is better to use, instead:
+ /sys/bus/iio/devices/iio:deviceX/bufferY/length
+
+What: /sys/bus/iio/devices/iio:deviceX/buffer/enable
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Actually start the buffer capture up. Will start trigger
+ if first device and appropriate.
+
+ Since Kernel 5.11, multiple buffers are supported.
+ so, it is better to use, instead:
+ /sys/bus/iio/devices/iio:deviceX/bufferY/enable
+
+What: /sys/bus/iio/devices/iio:deviceX/scan_elements
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Directory containing interfaces for elements that will be
+ captured for a single triggered sample set in the buffer.
+
+ Since kernel 5.11 the scan_elements attributes are merged into
+ the bufferY directory, to be configurable per buffer.
+
+What: /sys/.../iio:deviceX/scan_elements/in_accel_x_en
+What: /sys/.../iio:deviceX/scan_elements/in_accel_y_en
+What: /sys/.../iio:deviceX/scan_elements/in_accel_z_en
+What: /sys/.../iio:deviceX/scan_elements/in_anglvel_x_en
+What: /sys/.../iio:deviceX/scan_elements/in_anglvel_y_en
+What: /sys/.../iio:deviceX/scan_elements/in_anglvel_z_en
+What: /sys/.../iio:deviceX/scan_elements/in_magn_x_en
+What: /sys/.../iio:deviceX/scan_elements/in_magn_y_en
+What: /sys/.../iio:deviceX/scan_elements/in_magn_z_en
+What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_magnetic_en
+What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_true_en
+What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_magnetic_tilt_comp_en
+What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_true_tilt_comp_en
+What: /sys/.../iio:deviceX/scan_elements/in_timestamp_en
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY_supply_en
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY_en
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY-voltageZ_en
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY_i_en
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY_q_en
+What: /sys/.../iio:deviceX/scan_elements/in_voltage_i_en
+What: /sys/.../iio:deviceX/scan_elements/in_voltage_q_en
+What: /sys/.../iio:deviceX/scan_elements/in_incli_x_en
+What: /sys/.../iio:deviceX/scan_elements/in_incli_y_en
+What: /sys/.../iio:deviceX/scan_elements/in_pressureY_en
+What: /sys/.../iio:deviceX/scan_elements/in_pressure_en
+What: /sys/.../iio:deviceX/scan_elements/in_rot_quaternion_en
+What: /sys/.../iio:deviceX/scan_elements/in_proximity_en
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Scan element control for triggered data capture.
+
+ Since kernel 5.11 the scan_elements attributes are merged into
+ the bufferY directory, to be configurable per buffer.
+
+What: /sys/.../iio:deviceX/scan_elements/in_accel_type
+What: /sys/.../iio:deviceX/scan_elements/in_anglvel_type
+What: /sys/.../iio:deviceX/scan_elements/in_magn_type
+What: /sys/.../iio:deviceX/scan_elements/in_incli_type
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY_type
+What: /sys/.../iio:deviceX/scan_elements/in_voltage_type
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY_supply_type
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY_i_type
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY_q_type
+What: /sys/.../iio:deviceX/scan_elements/in_voltage_i_type
+What: /sys/.../iio:deviceX/scan_elements/in_voltage_q_type
+What: /sys/.../iio:deviceX/scan_elements/in_timestamp_type
+What: /sys/.../iio:deviceX/scan_elements/in_pressureY_type
+What: /sys/.../iio:deviceX/scan_elements/in_pressure_type
+What: /sys/.../iio:deviceX/scan_elements/in_rot_quaternion_type
+What: /sys/.../iio:deviceX/scan_elements/in_proximity_type
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Description of the scan element data storage within the buffer
+ and hence the form in which it is read from user-space.
+ Form is [be|le]:[s|u]bits/storagebits[>>shift].
+ be or le specifies big or little endian. s or u specifies if
+ signed (2's complement) or unsigned. bits is the number of bits
+ of data and storagebits is the space (after padding) that it
+ occupies in the buffer. shift if specified, is the shift that
+ needs to be applied prior to masking out unused bits. Some
+ devices put their data in the middle of the transferred elements
+ with additional information on both sides. Note that some
+ devices will have additional information in the unused bits
+ so to get a clean value, the bits value must be used to mask
+ the buffer output value appropriately. The storagebits value
+ also specifies the data alignment. So s48/64>>2 will be a
+ signed 48 bit integer stored in a 64 bit location aligned to
+ a 64 bit boundary. To obtain the clean value, shift right 2
+ and apply a mask to zero the top 16 bits of the result.
+ For other storage combinations this attribute will be extended
+ appropriately.
+
+ Since kernel 5.11 the scan_elements attributes are merged into
+ the bufferY directory, to be configurable per buffer.
+
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY_index
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY_supply_index
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY_i_index
+What: /sys/.../iio:deviceX/scan_elements/in_voltageY_q_index
+What: /sys/.../iio:deviceX/scan_elements/in_voltage_i_index
+What: /sys/.../iio:deviceX/scan_elements/in_voltage_q_index
+What: /sys/.../iio:deviceX/scan_elements/in_accel_x_index
+What: /sys/.../iio:deviceX/scan_elements/in_accel_y_index
+What: /sys/.../iio:deviceX/scan_elements/in_accel_z_index
+What: /sys/.../iio:deviceX/scan_elements/in_anglvel_x_index
+What: /sys/.../iio:deviceX/scan_elements/in_anglvel_y_index
+What: /sys/.../iio:deviceX/scan_elements/in_anglvel_z_index
+What: /sys/.../iio:deviceX/scan_elements/in_magn_x_index
+What: /sys/.../iio:deviceX/scan_elements/in_magn_y_index
+What: /sys/.../iio:deviceX/scan_elements/in_magn_z_index
+What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_magnetic_index
+What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_true_index
+What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_magnetic_tilt_comp_index
+What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_true_tilt_comp_index
+What: /sys/.../iio:deviceX/scan_elements/in_incli_x_index
+What: /sys/.../iio:deviceX/scan_elements/in_incli_y_index
+What: /sys/.../iio:deviceX/scan_elements/in_timestamp_index
+What: /sys/.../iio:deviceX/scan_elements/in_pressureY_index
+What: /sys/.../iio:deviceX/scan_elements/in_pressure_index
+What: /sys/.../iio:deviceX/scan_elements/in_rot_quaternion_index
+What: /sys/.../iio:deviceX/scan_elements/in_proximity_index
+KernelVersion: 2.6.37
+Description:
+ A single positive integer specifying the position of this
+ scan element in the buffer. Note these are not dependent on
+ what is enabled and may not be contiguous. Thus for user-space
+ to establish the full layout these must be used in conjunction
+ with all _en attributes to establish which channels are present,
+ and the relevant _type attributes to establish the data storage
+ format.
+
+ Since kernel 5.11 the scan_elements attributes are merged into
+ the bufferY directory, to be configurable per buffer.
+
+What: /sys/bus/iio/devices/iio:deviceX/buffer/watermark
+KernelVersion: 4.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ A single positive integer specifying the maximum number of scan
+ elements to wait for.
+
+ Poll will block until the watermark is reached.
+
+ Blocking read will wait until the minimum between the requested
+ read amount or the low water mark is available.
+
+ Non-blocking read will retrieve the available samples from the
+ buffer even if there are less samples then watermark level. This
+ allows the application to block on poll with a timeout and read
+ the available samples after the timeout expires and thus have a
+ maximum delay guarantee.
+
+ Since Kernel 5.11, multiple buffers are supported.
+ so, it is better to use, instead:
+ /sys/bus/iio/devices/iio:deviceX/bufferY/watermark
+
+What: /sys/bus/iio/devices/iio:deviceX/buffer/data_available
+KernelVersion: 4.16
+Contact: linux-iio@vger.kernel.org
+Description:
+ A read-only value indicating the bytes of data available in the
+ buffer. In the case of an output buffer, this indicates the
+ amount of empty space available to write data to. In the case of
+ an input buffer, this indicates the amount of data available for
+ reading.
+
+ Since Kernel 5.11, multiple buffers are supported.
+ so, it is better to use, instead:
+ /sys/bus/iio/devices/iio:deviceX/bufferY/data_available
diff --git a/Documentation/ABI/stable/sysfs-driver-w1_ds2438 b/Documentation/ABI/stable/sysfs-driver-w1_ds2438
new file mode 100644
index 000000000000..d2e7681cc287
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-driver-w1_ds2438
@@ -0,0 +1,13 @@
+What: /sys/bus/w1/devices/.../page1
+Date: April 2021
+Contact: Luiz Sampaio <sampaio.ime@gmail.com>
+Description: read the contents of the page1 of the DS2438
+ see Documentation/w1/slaves/w1_ds2438.rst for detailed information
+Users: any user space application which wants to communicate with DS2438
+
+What: /sys/bus/w1/devices/.../offset
+Date: April 2021
+Contact: Luiz Sampaio <sampaio.ime@gmail.com>
+Description: write the contents to the offset register of the DS2438
+ see Documentation/w1/slaves/w1_ds2438.rst for detailed information
+Users: any user space application which wants to communicate with DS2438
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uac2 b/Documentation/ABI/testing/configfs-usb-gadget-uac2
index d4356c8b8cd6..26fb8e9b4e61 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uac2
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uac2
@@ -8,6 +8,8 @@ Description:
c_chmask capture channel mask
c_srate capture sampling rate
c_ssize capture sample size (bytes)
+ c_sync capture synchronization type (async/adaptive)
+ fb_max maximum extra bandwidth in async mode
p_chmask playback channel mask
p_srate playback sampling rate
p_ssize playback sample size (bytes)
diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs
index e89c6351503c..a5c28f606865 100644
--- a/Documentation/ABI/testing/debugfs-driver-habanalabs
+++ b/Documentation/ABI/testing/debugfs-driver-habanalabs
@@ -207,6 +207,14 @@ Contact: ogabbay@kernel.org
Description: Sets the PCI power state. Valid values are "1" for D0 and "2"
for D3Hot
+What: /sys/kernel/debug/habanalabs/hl<n>/skip_reset_on_timeout
+Date: Jun 2021
+KernelVersion: 5.13
+Contact: ynudelman@habana.ai
+Description: Sets the skip reset on timeout option for the device. Value of
+ "0" means device will be reset in case some CS has timed out,
+ otherwise it will not be reset.
+
What: /sys/kernel/debug/habanalabs/hl<n>/stop_on_err
Date: Mar 2020
KernelVersion: 5.6
diff --git a/Documentation/ABI/testing/sysfs-bus-counter b/Documentation/ABI/testing/sysfs-bus-counter
index 566bd99fe0a5..20fe5afd4f9e 100644
--- a/Documentation/ABI/testing/sysfs-bus-counter
+++ b/Documentation/ABI/testing/sysfs-bus-counter
@@ -57,6 +57,7 @@ Description:
What: /sys/bus/counter/devices/counterX/countY/count_mode_available
What: /sys/bus/counter/devices/counterX/countY/error_noise_available
What: /sys/bus/counter/devices/counterX/countY/function_available
+What: /sys/bus/counter/devices/counterX/countY/prescaler_available
What: /sys/bus/counter/devices/counterX/countY/signalZ_action_available
KernelVersion: 5.2
Contact: linux-iio@vger.kernel.org
@@ -154,6 +155,15 @@ Description:
Count Y. If possible, this should match the name of the
respective channel as it appears in the device datasheet.
+What: /sys/bus/counter/devices/counterX/countY/prescaler
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Configure the prescaler value associated with Count Y.
+ On the FlexTimer, the counter clock source passes through a
+ prescaler (i.e. a counter). This acts like a clock
+ divider.
+
What: /sys/bus/counter/devices/counterX/countY/preset
KernelVersion: 5.2
Contact: linux-iio@vger.kernel.org
@@ -193,6 +203,15 @@ Description:
both edges:
Any state transition.
+What: /sys/bus/counter/devices/counterX/countY/spike_filter_ns
+KernelVersion: 5.14
+Contact: linux-iio@vger.kernel.org
+Description:
+ If the counter device supports programmable spike filter this
+ attribute indicates the value in nanoseconds where noise pulses
+ shorter or equal to configured value are ignored. Value 0 means
+ filter is disabled.
+
What: /sys/bus/counter/devices/counterX/name
KernelVersion: 5.2
Contact: linux-iio@vger.kernel.org
@@ -215,11 +234,45 @@ Description:
Read-only attribute that indicates the total number of Signals
belonging to the Counter.
-What: /sys/bus/counter/devices/counterX/signalY/signal
+What: /sys/bus/counter/devices/counterX/signalY/cable_fault
+KernelVersion: 5.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read-only attribute that indicates whether a differential
+ encoder cable fault (not connected or loose wires) is detected
+ for the respective channel of Signal Y. Valid attribute values
+ are boolean. Detection must first be enabled via the
+ corresponding cable_fault_enable attribute.
+
+What: /sys/bus/counter/devices/counterX/signalY/cable_fault_enable
+KernelVersion: 5.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ Whether detection of differential encoder cable faults for the
+ respective channel of Signal Y is enabled. Valid attribute
+ values are boolean.
+
+What: /sys/bus/counter/devices/counterX/signalY/filter_clock_prescaler
+KernelVersion: 5.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ Filter clock factor for input Signal Y. This prescaler value
+ affects the inputs of both quadrature pair signals.
+
+What: /sys/bus/counter/devices/counterX/signalY/index_polarity
KernelVersion: 5.2
Contact: linux-iio@vger.kernel.org
Description:
- Signal data of Signal Y represented as a string.
+ Active level of index input Signal Y; irrelevant in
+ non-synchronous load mode.
+
+What: /sys/bus/counter/devices/counterX/signalY/index_polarity_available
+What: /sys/bus/counter/devices/counterX/signalY/synchronous_mode_available
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Discrete set of available values for the respective Signal Y
+ configuration are listed in this file.
What: /sys/bus/counter/devices/counterX/signalY/name
KernelVersion: 5.2
@@ -228,3 +281,31 @@ Description:
Read-only attribute that indicates the device-specific name of
Signal Y. If possible, this should match the name of the
respective signal as it appears in the device datasheet.
+
+What: /sys/bus/counter/devices/counterX/signalY/signal
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Signal data of Signal Y represented as a string.
+
+What: /sys/bus/counter/devices/counterX/signalY/synchronous_mode
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Configure the counter associated with Signal Y for
+ non-synchronous or synchronous load mode. Synchronous load mode
+ cannot be selected in non-quadrature (Pulse-Direction) clock
+ mode.
+
+ non-synchronous:
+ A logic low level is the active level at this index
+ input. The index function (as enabled via preset_enable)
+ is performed directly on the active level of the index
+ input.
+
+ synchronous:
+ Intended for interfacing with encoder Index output in
+ quadrature clock mode. The active level is configured
+ via index_polarity. The index function (as enabled via
+ preset_enable) is performed synchronously with the
+ quadrature clock on the active level of the index input.
diff --git a/Documentation/ABI/testing/sysfs-bus-counter-104-quad-8 b/Documentation/ABI/testing/sysfs-bus-counter-104-quad-8
deleted file mode 100644
index eac32180c40d..000000000000
--- a/Documentation/ABI/testing/sysfs-bus-counter-104-quad-8
+++ /dev/null
@@ -1,61 +0,0 @@
-What: /sys/bus/counter/devices/counterX/signalY/cable_fault
-KernelVersion: 5.7
-Contact: linux-iio@vger.kernel.org
-Description:
- Read-only attribute that indicates whether a differential
- encoder cable fault (not connected or loose wires) is detected
- for the respective channel of Signal Y. Valid attribute values
- are boolean. Detection must first be enabled via the
- corresponding cable_fault_enable attribute.
-
-What: /sys/bus/counter/devices/counterX/signalY/cable_fault_enable
-KernelVersion: 5.7
-Contact: linux-iio@vger.kernel.org
-Description:
- Whether detection of differential encoder cable faults for the
- respective channel of Signal Y is enabled. Valid attribute
- values are boolean.
-
-What: /sys/bus/counter/devices/counterX/signalY/filter_clock_prescaler
-KernelVersion: 5.7
-Contact: linux-iio@vger.kernel.org
-Description:
- Filter clock factor for input Signal Y. This prescaler value
- affects the inputs of both quadrature pair signals.
-
-What: /sys/bus/counter/devices/counterX/signalY/index_polarity
-KernelVersion: 5.2
-Contact: linux-iio@vger.kernel.org
-Description:
- Active level of index input Signal Y; irrelevant in
- non-synchronous load mode.
-
-What: /sys/bus/counter/devices/counterX/signalY/index_polarity_available
-What: /sys/bus/counter/devices/counterX/signalY/synchronous_mode_available
-KernelVersion: 5.2
-Contact: linux-iio@vger.kernel.org
-Description:
- Discrete set of available values for the respective Signal Y
- configuration are listed in this file.
-
-What: /sys/bus/counter/devices/counterX/signalY/synchronous_mode
-KernelVersion: 5.2
-Contact: linux-iio@vger.kernel.org
-Description:
- Configure the counter associated with Signal Y for
- non-synchronous or synchronous load mode. Synchronous load mode
- cannot be selected in non-quadrature (Pulse-Direction) clock
- mode.
-
- non-synchronous:
- A logic low level is the active level at this index
- input. The index function (as enabled via preset_enable)
- is performed directly on the active level of the index
- input.
-
- synchronous:
- Intended for interfacing with encoder Index output in
- quadrature clock mode. The active level is configured
- via index_polarity. The index function (as enabled via
- preset_enable) is performed synchronously with the
- quadrature clock on the active level of the index input.
diff --git a/Documentation/ABI/testing/sysfs-bus-counter-ftm-quaddec b/Documentation/ABI/testing/sysfs-bus-counter-ftm-quaddec
deleted file mode 100644
index 7d2e7b363467..000000000000
--- a/Documentation/ABI/testing/sysfs-bus-counter-ftm-quaddec
+++ /dev/null
@@ -1,16 +0,0 @@
-What: /sys/bus/counter/devices/counterX/countY/prescaler_available
-KernelVersion: 5.2
-Contact: linux-iio@vger.kernel.org
-Description:
- Discrete set of available values for the respective Count Y
- configuration are listed in this file. Values are delimited by
- newline characters.
-
-What: /sys/bus/counter/devices/counterX/countY/prescaler
-KernelVersion: 5.2
-Contact: linux-iio@vger.kernel.org
-Description:
- Configure the prescaler value associated with Count Y.
- On the FlexTimer, the counter clock source passes through a
- prescaler (i.e. a counter). This acts like a clock
- divider.
diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl
index 2fe7490ad6a8..0b6a2e6e8fbb 100644
--- a/Documentation/ABI/testing/sysfs-bus-cxl
+++ b/Documentation/ABI/testing/sysfs-bus-cxl
@@ -24,3 +24,106 @@ Description:
(RO) "Persistent Only Capacity" as bytes. Represents the
identically named field in the Identify Memory Device Output
Payload in the CXL-2.0 specification.
+
+What: /sys/bus/cxl/devices/*/devtype
+Date: June, 2021
+KernelVersion: v5.14
+Contact: linux-cxl@vger.kernel.org
+Description:
+ CXL device objects export the devtype attribute which mirrors
+ the same value communicated in the DEVTYPE environment variable
+ for uevents for devices on the "cxl" bus.
+
+What: /sys/bus/cxl/devices/portX/uport
+Date: June, 2021
+KernelVersion: v5.14
+Contact: linux-cxl@vger.kernel.org
+Description:
+ CXL port objects are enumerated from either a platform firmware
+ device (ACPI0017 and ACPI0016) or PCIe switch upstream port with
+ CXL component registers. The 'uport' symlink connects the CXL
+ portX object to the device that published the CXL port
+ capability.
+
+What: /sys/bus/cxl/devices/portX/dportY
+Date: June, 2021
+KernelVersion: v5.14
+Contact: linux-cxl@vger.kernel.org
+Description:
+ CXL port objects are enumerated from either a platform firmware
+ device (ACPI0017 and ACPI0016) or PCIe switch upstream port with
+ CXL component registers. The 'dportY' symlink identifies one or
+ more downstream ports that the upstream port may target in its
+ decode of CXL memory resources. The 'Y' integer reflects the
+ hardware port unique-id used in the hardware decoder target
+ list.
+
+What: /sys/bus/cxl/devices/decoderX.Y
+Date: June, 2021
+KernelVersion: v5.14
+Contact: linux-cxl@vger.kernel.org
+Description:
+ CXL decoder objects are enumerated from either a platform
+ firmware description, or a CXL HDM decoder register set in a
+ PCIe device (see CXL 2.0 section 8.2.5.12 CXL HDM Decoder
+ Capability Structure). The 'X' in decoderX.Y represents the
+ cxl_port container of this decoder, and 'Y' represents the
+ instance id of a given decoder resource.
+
+What: /sys/bus/cxl/devices/decoderX.Y/{start,size}
+Date: June, 2021
+KernelVersion: v5.14
+Contact: linux-cxl@vger.kernel.org
+Description:
+ The 'start' and 'size' attributes together convey the physical
+ address base and number of bytes mapped in the decoder's decode
+ window. For decoders of devtype "cxl_decoder_root" the address
+ range is fixed. For decoders of devtype "cxl_decoder_switch" the
+ address is bounded by the decode range of the cxl_port ancestor
+ of the decoder's cxl_port, and dynamically updates based on the
+ active memory regions in that address space.
+
+What: /sys/bus/cxl/devices/decoderX.Y/locked
+Date: June, 2021
+KernelVersion: v5.14
+Contact: linux-cxl@vger.kernel.org
+Description:
+ CXL HDM decoders have the capability to lock the configuration
+ until the next device reset. For decoders of devtype
+ "cxl_decoder_root" there is no standard facility to unlock them.
+ For decoders of devtype "cxl_decoder_switch" a secondary bus
+ reset, of the PCIe bridge that provides the bus for this
+ decoders uport, unlocks / resets the decoder.
+
+What: /sys/bus/cxl/devices/decoderX.Y/target_list
+Date: June, 2021
+KernelVersion: v5.14
+Contact: linux-cxl@vger.kernel.org
+Description:
+ Display a comma separated list of the current decoder target
+ configuration. The list is ordered by the current configured
+ interleave order of the decoder's dport instances. Each entry in
+ the list is a dport id.
+
+What: /sys/bus/cxl/devices/decoderX.Y/cap_{pmem,ram,type2,type3}
+Date: June, 2021
+KernelVersion: v5.14
+Contact: linux-cxl@vger.kernel.org
+Description:
+ When a CXL decoder is of devtype "cxl_decoder_root", it
+ represents a fixed memory window identified by platform
+ firmware. A fixed window may only support a subset of memory
+ types. The 'cap_*' attributes indicate whether persistent
+ memory, volatile memory, accelerator memory, and / or expander
+ memory may be mapped behind this decoder's memory window.
+
+What: /sys/bus/cxl/devices/decoderX.Y/target_type
+Date: June, 2021
+KernelVersion: v5.14
+Contact: linux-cxl@vger.kernel.org
+Description:
+ When a CXL decoder is of devtype "cxl_decoder_switch", it can
+ optionally decode either accelerator memory (type-2) or expander
+ memory (type-3). The 'target_type' attribute indicates the
+ current setting which may dynamically change based on what
+ memory regions are activated in this decode hierarchy.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 6f98b6a9b785..6ad47a67521c 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -455,6 +455,19 @@ Contact: linux-iio@vger.kernel.org
Description:
Hardware applied calibration offset (assumed to fix production
inaccuracies).
+ icm42600: For this device values are real physical offsets
+ expressed in SI units (m/s^2 for accelerometers and rad/s
+ for gyroscope)/
+
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_calibbias_available
+What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_calibbias_available
+KernelVersion: 5.8
+Contact: linux-iio@vger.kernel.org
+Description:
+ Available values of calibbias. Maybe expressed as either of:
+
+ - a small discrete set of values like "0 2 4 6 8"
+ - a range specified as "[min step max]"
What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_calibscale
@@ -652,6 +665,25 @@ Description:
Output frequency for channel Y in Hz. The number must always be
specified and unique if the output corresponds to a single
channel.
+ Some drivers have additional constraints:
+ ADF4371 has an integrated VCO with fundamendal output
+ frequency ranging from 4000000000 Hz 8000000000 Hz.
+
+ out_altvoltage0_frequency:
+ A divide by 1, 2, 4, 8, 16, 32 or circuit generates
+ frequencies from 62500000 Hz to 8000000000 Hz.
+ out_altvoltage1_frequency:
+ This channel duplicates the channel 0 frequency
+ out_altvoltage2_frequency:
+ A frequency doubler generates frequencies from
+ 8000000000 Hz to 16000000000 Hz.
+ out_altvoltage3_frequency:
+ A frequency quadrupler generates frequencies from
+ 16000000000 Hz to 32000000000 Hz.
+
+ Note: writes to one of the channels will affect the frequency of
+ all the other channels, since it involves changing the VCO
+ fundamental output frequency.
What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_phase
KernelVersion: 3.4.0
@@ -663,6 +695,17 @@ Description:
specified and unique if the output corresponds to a single
channel.
+What: /sys/bus/iio/devices/iio:deviceX/out_currentY_raw
+Date: May 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set/get output current for channel Y. Units after application
+ of scale and offset are milliamps.
+ For some devices current channels are used to specify
+ current supplied to elements used in taking a measurement
+ of a different type. E.g. LED currents.
+
What: /sys/bus/iio/devices/iio:deviceX/events
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
@@ -1195,16 +1238,12 @@ Description:
The name of the trigger source being used, as per string given
in /sys/class/iio/triggerY/name.
-What: /sys/bus/iio/devices/iio:deviceX/buffer/length
-KernelVersion: 2.6.35
What: /sys/bus/iio/devices/iio:deviceX/bufferY/length
KernelVersion: 5.11
Contact: linux-iio@vger.kernel.org
Description:
Number of scans contained by the buffer.
-What: /sys/bus/iio/devices/iio:deviceX/buffer/enable
-KernelVersion: 2.6.35
What: /sys/bus/iio/devices/iio:deviceX/bufferY/enable
KernelVersion: 5.11
Contact: linux-iio@vger.kernel.org
@@ -1212,8 +1251,6 @@ Description:
Actually start the buffer capture up. Will start trigger
if first device and appropriate.
-What: /sys/bus/iio/devices/iio:deviceX/scan_elements
-KernelVersion: 2.6.37
What: /sys/bus/iio/devices/iio:deviceX/bufferY
KernelVersion: 5.11
Contact: linux-iio@vger.kernel.org
@@ -1224,34 +1261,6 @@ Description:
Since kernel 5.11 the scan_elements attributes are merged into
the bufferY directory, to be configurable per buffer.
-What: /sys/.../iio:deviceX/scan_elements/in_accel_x_en
-What: /sys/.../iio:deviceX/scan_elements/in_accel_y_en
-What: /sys/.../iio:deviceX/scan_elements/in_accel_z_en
-What: /sys/.../iio:deviceX/scan_elements/in_anglvel_x_en
-What: /sys/.../iio:deviceX/scan_elements/in_anglvel_y_en
-What: /sys/.../iio:deviceX/scan_elements/in_anglvel_z_en
-What: /sys/.../iio:deviceX/scan_elements/in_magn_x_en
-What: /sys/.../iio:deviceX/scan_elements/in_magn_y_en
-What: /sys/.../iio:deviceX/scan_elements/in_magn_z_en
-What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_magnetic_en
-What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_true_en
-What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_magnetic_tilt_comp_en
-What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_true_tilt_comp_en
-What: /sys/.../iio:deviceX/scan_elements/in_timestamp_en
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY_supply_en
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY_en
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY-voltageZ_en
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY_i_en
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY_q_en
-What: /sys/.../iio:deviceX/scan_elements/in_voltage_i_en
-What: /sys/.../iio:deviceX/scan_elements/in_voltage_q_en
-What: /sys/.../iio:deviceX/scan_elements/in_incli_x_en
-What: /sys/.../iio:deviceX/scan_elements/in_incli_y_en
-What: /sys/.../iio:deviceX/scan_elements/in_pressureY_en
-What: /sys/.../iio:deviceX/scan_elements/in_pressure_en
-What: /sys/.../iio:deviceX/scan_elements/in_rot_quaternion_en
-What: /sys/.../iio:deviceX/scan_elements/in_proximity_en
-KernelVersion: 2.6.37
What: /sys/.../iio:deviceX/bufferY/in_accel_x_en
What: /sys/.../iio:deviceX/bufferY/in_accel_y_en
What: /sys/.../iio:deviceX/bufferY/in_accel_z_en
@@ -1284,23 +1293,6 @@ Contact: linux-iio@vger.kernel.org
Description:
Scan element control for triggered data capture.
-What: /sys/.../iio:deviceX/scan_elements/in_accel_type
-What: /sys/.../iio:deviceX/scan_elements/in_anglvel_type
-What: /sys/.../iio:deviceX/scan_elements/in_magn_type
-What: /sys/.../iio:deviceX/scan_elements/in_incli_type
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY_type
-What: /sys/.../iio:deviceX/scan_elements/in_voltage_type
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY_supply_type
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY_i_type
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY_q_type
-What: /sys/.../iio:deviceX/scan_elements/in_voltage_i_type
-What: /sys/.../iio:deviceX/scan_elements/in_voltage_q_type
-What: /sys/.../iio:deviceX/scan_elements/in_timestamp_type
-What: /sys/.../iio:deviceX/scan_elements/in_pressureY_type
-What: /sys/.../iio:deviceX/scan_elements/in_pressure_type
-What: /sys/.../iio:deviceX/scan_elements/in_rot_quaternion_type
-What: /sys/.../iio:deviceX/scan_elements/in_proximity_type
-KernelVersion: 2.6.37
What: /sys/.../iio:deviceX/bufferY/in_accel_type
What: /sys/.../iio:deviceX/bufferY/in_anglvel_type
What: /sys/.../iio:deviceX/bufferY/in_magn_type
@@ -1347,33 +1339,6 @@ Description:
If the type parameter can take one of a small set of values,
this attribute lists them.
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY_index
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY_supply_index
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY_i_index
-What: /sys/.../iio:deviceX/scan_elements/in_voltageY_q_index
-What: /sys/.../iio:deviceX/scan_elements/in_voltage_i_index
-What: /sys/.../iio:deviceX/scan_elements/in_voltage_q_index
-What: /sys/.../iio:deviceX/scan_elements/in_accel_x_index
-What: /sys/.../iio:deviceX/scan_elements/in_accel_y_index
-What: /sys/.../iio:deviceX/scan_elements/in_accel_z_index
-What: /sys/.../iio:deviceX/scan_elements/in_anglvel_x_index
-What: /sys/.../iio:deviceX/scan_elements/in_anglvel_y_index
-What: /sys/.../iio:deviceX/scan_elements/in_anglvel_z_index
-What: /sys/.../iio:deviceX/scan_elements/in_magn_x_index
-What: /sys/.../iio:deviceX/scan_elements/in_magn_y_index
-What: /sys/.../iio:deviceX/scan_elements/in_magn_z_index
-What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_magnetic_index
-What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_true_index
-What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_magnetic_tilt_comp_index
-What: /sys/.../iio:deviceX/scan_elements/in_rot_from_north_true_tilt_comp_index
-What: /sys/.../iio:deviceX/scan_elements/in_incli_x_index
-What: /sys/.../iio:deviceX/scan_elements/in_incli_y_index
-What: /sys/.../iio:deviceX/scan_elements/in_timestamp_index
-What: /sys/.../iio:deviceX/scan_elements/in_pressureY_index
-What: /sys/.../iio:deviceX/scan_elements/in_pressure_index
-What: /sys/.../iio:deviceX/scan_elements/in_rot_quaternion_index
-What: /sys/.../iio:deviceX/scan_elements/in_proximity_index
-KernelVersion: 2.6.37
What: /sys/.../iio:deviceX/bufferY/in_voltageY_index
What: /sys/.../iio:deviceX/bufferY/in_voltageY_supply_index
What: /sys/.../iio:deviceX/bufferY/in_voltageY_i_index
@@ -1613,8 +1578,6 @@ Description:
Specifies number of seconds in which we compute the steps
that occur in order to decide if the consumer is making steps.
-What: /sys/bus/iio/devices/iio:deviceX/buffer/watermark
-KernelVersion: 4.2
What: /sys/bus/iio/devices/iio:deviceX/bufferY/watermark
KernelVersion: 5.11
Contact: linux-iio@vger.kernel.org
@@ -1633,8 +1596,6 @@ Description:
the available samples after the timeout expires and thus have a
maximum delay guarantee.
-What: /sys/bus/iio/devices/iio:deviceX/buffer/data_available
-KernelVersion: 4.16
What: /sys/bus/iio/devices/iio:deviceX/bufferY/data_available
KernelVersion: 5.11
Contact: linux-iio@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4371 b/Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4371
index 544548ee794c..7fe6935d1448 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4371
+++ b/Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4371
@@ -1,28 +1,3 @@
-What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_frequency
-KernelVersion:
-Contact: linux-iio@vger.kernel.org
-Description:
- Stores the PLL frequency in Hz for channel Y.
- Reading returns the actual frequency in Hz.
- The ADF4371 has an integrated VCO with fundamendal output
- frequency ranging from 4000000000 Hz 8000000000 Hz.
-
- out_altvoltage0_frequency:
- A divide by 1, 2, 4, 8, 16, 32 or circuit generates
- frequencies from 62500000 Hz to 8000000000 Hz.
- out_altvoltage1_frequency:
- This channel duplicates the channel 0 frequency
- out_altvoltage2_frequency:
- A frequency doubler generates frequencies from
- 8000000000 Hz to 16000000000 Hz.
- out_altvoltage3_frequency:
- A frequency quadrupler generates frequencies from
- 16000000000 Hz to 32000000000 Hz.
-
- Note: writes to one of the channels will affect the frequency of
- all the other channels, since it involves changing the VCO
- fundamental output frequency.
-
What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_name
KernelVersion:
Contact: linux-iio@vger.kernel.org
@@ -34,11 +9,3 @@ Description:
out_altvoltage2_name: RF16x
out_altvoltage3_name: RF32x
-What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_powerdown
-KernelVersion:
-Contact: linux-iio@vger.kernel.org
-Description:
- This attribute allows the user to power down the PLL and it's
- RFOut buffers.
- Writing 1 causes the specified channel to power down.
- Clearing returns to normal operation.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-health-afe440x b/Documentation/ABI/testing/sysfs-bus-iio-health-afe440x
index 66b621f10223..a8e04b41d9ff 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-health-afe440x
+++ b/Documentation/ABI/testing/sysfs-bus-iio-health-afe440x
@@ -18,6 +18,8 @@ Description:
respectively which simply helper channels containing the
calculated difference in the value of stage 1 - 2 and 3 - 4.
The values are expressed in 24-bit twos complement.
+ The LED current for the stage is controlled via
+ out_currentY_raw.
What: /sys/bus/iio/devices/iio:deviceX/in_intensityY_offset
Date: May 2016
@@ -35,11 +37,3 @@ Contact: Andrew F. Davis <afd@ti.com>
Description:
Get and set the resistance and the capacitance settings for the
Transimpedance Amplifier during the associated stage.
-
-What: /sys/bus/iio/devices/iio:deviceX/out_currentY_raw
-Date: May 2016
-KernelVersion:
-Contact: Andrew F. Davis <afd@ti.com>
-Description:
- Get and set the LED current for the specified LED active during
- this stage. Y is the specific stage number.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-icm42600 b/Documentation/ABI/testing/sysfs-bus-iio-icm42600
deleted file mode 100644
index 0bf1fd4f5bf1..000000000000
--- a/Documentation/ABI/testing/sysfs-bus-iio-icm42600
+++ /dev/null
@@ -1,20 +0,0 @@
-What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_calibbias
-What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_calibbias
-What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_calibbias
-What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_x_calibbias
-What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_y_calibbias
-What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_z_calibbias
-KernelVersion: 5.8
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware applied calibration offset (assumed to fix production
- inaccuracies). Values represent a real physical offset expressed
- in SI units (m/s^2 for accelerometer and rad/s for gyroscope).
-
-What: /sys/bus/iio/devices/iio:deviceX/in_accel_calibbias_available
-What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_calibbias_available
-KernelVersion: 5.8
-Contact: linux-iio@vger.kernel.org
-Description:
- Range of available values for hardware offset. Values in SI
- units (m/s^2 for accelerometer and rad/s for gyroscope).
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-light-lm3533-als b/Documentation/ABI/testing/sysfs-bus-iio-light-lm3533-als
index 22c5ea670971..c476d48d0f82 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-light-lm3533-als
+++ b/Documentation/ABI/testing/sysfs-bus-iio-light-lm3533-als
@@ -41,14 +41,6 @@ Description:
Get the current light zone (0..4) as defined by the
in_illuminance0_threshY_{falling,rising} thresholds.
-What: /sys/bus/iio/devices/iio:deviceX/out_currentY_raw
-Date: May 2012
-KernelVersion: 3.5
-Contact: Johan Hovold <jhovold@gmail.com>
-Description:
- Get output current for channel Y (0..255), that is,
- out_currentY_currentZ_raw, where Z is the current zone.
-
What: /sys/bus/iio/devices/iio:deviceX/out_currentY_currentZ_raw
Date: May 2012
KernelVersion: 3.5
@@ -59,3 +51,6 @@ Description:
These values correspond to the ALS-mapper target registers for
ALS-mapper Y + 1.
+
+ Note that out_currentY_raw provides the current for the
+ current zone.
diff --git a/Documentation/ABI/testing/sysfs-bus-papr-pmem b/Documentation/ABI/testing/sysfs-bus-papr-pmem
index 92e2db0e2d3d..95254cec92bf 100644
--- a/Documentation/ABI/testing/sysfs-bus-papr-pmem
+++ b/Documentation/ABI/testing/sysfs-bus-papr-pmem
@@ -39,9 +39,11 @@ KernelVersion: v5.9
Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, nvdimm@lists.linux.dev,
Description:
(RO) Report various performance stats related to papr-scm NVDIMM
- device. Each stat is reported on a new line with each line
- composed of a stat-identifier followed by it value. Below are
- currently known dimm performance stats which are reported:
+ device. This attribute is only available for NVDIMM devices
+ that support reporting NVDIMM performance stats. Each stat is
+ reported on a new line with each line composed of a
+ stat-identifier followed by it value. Below are currently known
+ dimm performance stats which are reported:
* "CtlResCt" : Controller Reset Count
* "CtlResTm" : Controller Reset Elapsed Time
diff --git a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor
new file mode 100644
index 000000000000..d76cd3946434
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor
@@ -0,0 +1,31 @@
+What: /sys/bus/spi/devices/.../spi-nor/jedec_id
+Date: April 2021
+KernelVersion: 5.14
+Contact: linux-mtd@lists.infradead.org
+Description: (RO) The JEDEC ID of the SPI NOR flash as reported by the
+ flash device.
+
+
+What: /sys/bus/spi/devices/.../spi-nor/manufacturer
+Date: April 2021
+KernelVersion: 5.14
+Contact: linux-mtd@lists.infradead.org
+Description: (RO) Manufacturer of the SPI NOR flash.
+
+
+What: /sys/bus/spi/devices/.../spi-nor/partname
+Date: April 2021
+KernelVersion: 5.14
+Contact: linux-mtd@lists.infradead.org
+Description: (RO) Part name of the SPI NOR flash.
+
+
+What: /sys/bus/spi/devices/.../spi-nor/sfdp
+Date: April 2021
+KernelVersion: 5.14
+Contact: linux-mtd@lists.infradead.org
+Description: (RO) This attribute is only present if the SPI NOR flash
+ device supports the "Read SFDP" command (5Ah).
+
+ If present, it contains the complete SFDP (serial flash
+ discoverable parameters) binary data of the flash.
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
index c41c68f64693..95c21d6c9a84 100644
--- a/Documentation/ABI/testing/sysfs-bus-thunderbolt
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -1,4 +1,4 @@
-What: /sys/bus/thunderbolt/devices/.../domainX/boot_acl
+What: /sys/bus/thunderbolt/devices/.../domainX/boot_acl
Date: Jun 2018
KernelVersion: 4.17
Contact: thunderbolt-software@lists.01.org
@@ -21,7 +21,7 @@ Description: Holds a comma separated list of device unique_ids that
If a device is authorized automatically during boot its
boot attribute is set to 1.
-What: /sys/bus/thunderbolt/devices/.../domainX/deauthorization
+What: /sys/bus/thunderbolt/devices/.../domainX/deauthorization
Date: May 2021
KernelVersion: 5.12
Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
@@ -30,7 +30,7 @@ Description: This attribute tells whether the system supports
de-authorize PCIe tunnel by writing 0 to authorized
attribute under each device.
-What: /sys/bus/thunderbolt/devices/.../domainX/iommu_dma_protection
+What: /sys/bus/thunderbolt/devices/.../domainX/iommu_dma_protection
Date: Mar 2019
KernelVersion: 4.21
Contact: thunderbolt-software@lists.01.org
@@ -39,7 +39,7 @@ Description: This attribute tells whether the system uses IOMMU
it is not (DMA protection is solely based on Thunderbolt
security levels).
-What: /sys/bus/thunderbolt/devices/.../domainX/security
+What: /sys/bus/thunderbolt/devices/.../domainX/security
Date: Sep 2017
KernelVersion: 4.13
Contact: thunderbolt-software@lists.01.org
@@ -61,7 +61,7 @@ Description: This attribute holds current Thunderbolt security level
the BIOS.
======= ==================================================
-What: /sys/bus/thunderbolt/devices/.../authorized
+What: /sys/bus/thunderbolt/devices/.../authorized
Date: Sep 2017
KernelVersion: 4.13
Contact: thunderbolt-software@lists.01.org
@@ -95,14 +95,14 @@ Description: This attribute is used to authorize Thunderbolt devices
EKEYREJECTED if the challenge response did not match.
== ========================================================
-What: /sys/bus/thunderbolt/devices/.../boot
+What: /sys/bus/thunderbolt/devices/.../boot
Date: Jun 2018
KernelVersion: 4.17
Contact: thunderbolt-software@lists.01.org
Description: This attribute contains 1 if Thunderbolt device was already
authorized on boot and 0 otherwise.
-What: /sys/bus/thunderbolt/devices/.../generation
+What: /sys/bus/thunderbolt/devices/.../generation
Date: Jan 2020
KernelVersion: 5.5
Contact: Christian Kellner <christian@kellner.me>
@@ -110,7 +110,7 @@ Description: This attribute contains the generation of the Thunderbolt
controller associated with the device. It will contain 4
for USB4.
-What: /sys/bus/thunderbolt/devices/.../key
+What: /sys/bus/thunderbolt/devices/.../key
Date: Sep 2017
KernelVersion: 4.13
Contact: thunderbolt-software@lists.01.org
@@ -213,12 +213,15 @@ Description: When new NVM image is written to the non-active NVM
restarted with the new NVM firmware. If the image
verification fails an error code is returned instead.
- This file will accept writing values "1" or "2"
+ This file will accept writing values "1", "2" or "3".
- Writing "1" will flush the image to the storage
area and authenticate the image in one action.
- Writing "2" will run some basic validation on the image
and flush it to the storage area.
+ - Writing "3" will authenticate the image that is
+ currently written in the storage area. This is only
+ supported with USB4 devices and retimers.
When read holds status of the last authentication
operation if an error occurred during the process. This
@@ -226,6 +229,20 @@ Description: When new NVM image is written to the non-active NVM
based mailbox before the device is power cycled. Writing
0 here clears the status.
+What: /sys/bus/thunderbolt/devices/.../nvm_authenticate_on_disconnect
+Date: Oct 2020
+KernelVersion: v5.9
+Contact: Mario Limonciello <mario.limonciello@dell.com>
+Description: For supported devices, automatically authenticate the new Thunderbolt
+ image when the device is disconnected from the host system.
+
+ This file will accept writing values "1" or "2"
+
+ - Writing "1" will flush the image to the storage
+ area and prepare the device for authentication on disconnect.
+ - Writing "2" will run some basic validation on the image
+ and flush it to the storage area.
+
What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/key
Date: Jan 2018
KernelVersion: 4.15
@@ -276,6 +293,39 @@ Contact: thunderbolt-software@lists.01.org
Description: This contains XDomain service specific settings as
bitmask. Format: %x
+What: /sys/bus/thunderbolt/devices/usb4_portX/link
+Date: Sep 2021
+KernelVersion: v5.14
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
+Description: Returns the current link mode. Possible values are
+ "usb4", "tbt" and "none".
+
+What: /sys/bus/thunderbolt/devices/usb4_portX/offline
+Date: Sep 2021
+KernelVersion: v5.14
+Contact: Rajmohan Mani <rajmohan.mani@intel.com>
+Description: Writing 1 to this attribute puts the USB4 port into
+ offline mode. Only allowed when there is nothing
+ connected to the port (link attribute returns "none").
+ Once the port is in offline mode it does not receive any
+ hotplug events. This is used to update NVM firmware of
+ on-board retimers. Writing 0 puts the port back to
+ online mode.
+
+ This attribute is only visible if the platform supports
+ powering on retimers when there is no cable connected.
+
+What: /sys/bus/thunderbolt/devices/usb4_portX/rescan
+Date: Sep 2021
+KernelVersion: v5.14
+Contact: Rajmohan Mani <rajmohan.mani@intel.com>
+Description: When the USB4 port is in offline mode writing 1 to this
+ attribute forces rescan of the sideband for on-board
+ retimers. Each retimer appear under the USB4 port as if
+ the USB4 link was up. These retimers act in the same way
+ as if the cable was connected so upgrading their NVM
+ firmware can be done the usual way.
+
What: /sys/bus/thunderbolt/devices/<device>:<port>.<index>/device
Date: Oct 2020
KernelVersion: v5.9
@@ -308,17 +358,3 @@ Date: Oct 2020
KernelVersion: v5.9
Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: Retimer vendor identifier read from the hardware.
-
-What: /sys/bus/thunderbolt/devices/.../nvm_authenticate_on_disconnect
-Date: Oct 2020
-KernelVersion: v5.9
-Contact: Mario Limonciello <mario.limonciello@dell.com>
-Description: For supported devices, automatically authenticate the new Thunderbolt
- image when the device is disconnected from the host system.
-
- This file will accept writing values "1" or "2"
-
- - Writing "1" will flush the image to the storage
- area and prepare the device for authentication on disconnect.
- - Writing "2" will run some basic validation on the image
- and flush it to the storage area.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index bf2c1968525f..73eb23bc1f34 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -154,17 +154,6 @@ Description:
files hold a string value (enable or disable) indicating whether
or not USB3 hardware LPM U1 or U2 is enabled for the device.
-What: /sys/bus/usb/devices/.../removable
-Date: February 2012
-Contact: Matthew Garrett <mjg@redhat.com>
-Description:
- Some information about whether a given USB device is
- physically fixed to the platform can be inferred from a
- combination of hub descriptor bits and platform-specific data
- such as ACPI. This file will read either "removable" or
- "fixed" if the information is available, and "unknown"
- otherwise.
-
What: /sys/bus/usb/devices/.../ltm_capable
Date: July 2012
Contact: Sarah Sharp <sarah.a.sharp@linux.intel.com>
diff --git a/Documentation/ABI/testing/sysfs-class-spi-eeprom b/Documentation/ABI/testing/sysfs-class-spi-eeprom
new file mode 100644
index 000000000000..1ff757982079
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-spi-eeprom
@@ -0,0 +1,19 @@
+What: /sys/class/spi_master/spi<bus>/spi<bus>.<dev>/fram
+Date: June 2021
+KernelVersion: 5.14
+Contact: Jiri Prchal <jiri.prchal@aksignal.cz>
+Description:
+ Contains the FRAM binary data. Same as EEPROM, just another file
+ name to indicate that it employs ferroelectric process.
+ It performs write operations at bus speed - no write delays.
+
+What: /sys/class/spi_master/spi<bus>/spi<bus>.<dev>/sernum
+Date: May 2021
+KernelVersion: 5.14
+Contact: Jiri Prchal <jiri.prchal@aksignal.cz>
+Description:
+ Contains the serial number of the Cypress FRAM (FM25VN) if it is
+ present. It will be displayed as a 8 byte hex string, as read
+ from the device.
+
+ This is a read-only attribute.
diff --git a/Documentation/ABI/testing/sysfs-devices-removable b/Documentation/ABI/testing/sysfs-devices-removable
new file mode 100644
index 000000000000..bda6c320c8d3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-removable
@@ -0,0 +1,18 @@
+What: /sys/devices/.../removable
+Date: May 2021
+Contact: Rajat Jain <rajatxjain@gmail.com>
+Description:
+ Information about whether a given device can be removed from the
+ platform by the user. This is determined by its subsystem in a
+ bus / platform-specific way. This attribute is only present for
+ devices that can support determining such information:
+
+ "removable": device can be removed from the platform by the user
+ "fixed": device is fixed to the platform / cannot be removed
+ by the user.
+ "unknown": The information is unavailable / cannot be deduced.
+
+ Currently this is only supported by USB (which infers the
+ information from a combination of hub descriptor bits and
+ platform-specific data such as ACPI) and PCI (which gets this
+ from ACPI / device tree).
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index eaac6898f0c0..b4a5d55fa19f 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -995,6 +995,132 @@ Description: This entry shows the target state of an UFS UIC link
The file is read only.
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/monitor_enable
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows the status of performance monitor enablement
+ and it can be used to start/stop the monitor. When the monitor
+ is stopped, the performance data collected is also cleared.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/monitor_chunk_size
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file tells the monitor to focus on requests transferring
+ data of specific chunk size (in Bytes). 0 means any chunk size.
+ It can only be changed when monitor is disabled.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_total_sectors
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows how many sectors (in 512 Bytes) have been
+ sent from device to host after monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_total_busy
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows how long (in micro seconds) has been spent
+ sending data from device to host after monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_nr_requests
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows how many read requests have been sent after
+ monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_max
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows the maximum latency (in micro seconds) of
+ read requests after monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_min
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows the minimum latency (in micro seconds) of
+ read requests after monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_avg
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows the average latency (in micro seconds) of
+ read requests after monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_sum
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows the total latency (in micro seconds) of
+ read requests sent after monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_total_sectors
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows how many sectors (in 512 Bytes) have been sent
+ from host to device after monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_total_busy
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows how long (in micro seconds) has been spent
+ sending data from host to device after monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_nr_requests
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows how many write requests have been sent after
+ monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_max
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows the maximum latency (in micro seconds) of write
+ requests after monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_min
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows the minimum latency (in micro seconds) of write
+ requests after monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_avg
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows the average latency (in micro seconds) of write
+ requests after monitor gets started.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_sum
+Date: January 2021
+Contact: Can Guo <cang@codeaurora.org>
+Description: This file shows the total latency (in micro seconds) of write
+ requests after monitor gets started.
+
+ The file is read only.
+
What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/wb_presv_us_en
Date: June 2020
Contact: Asutosh Das <asutoshd@codeaurora.org>
diff --git a/Documentation/ABI/testing/sysfs-firmware-memmap b/Documentation/ABI/testing/sysfs-firmware-memmap
index 1f6f4d3a32c0..9205122fa4b1 100644
--- a/Documentation/ABI/testing/sysfs-firmware-memmap
+++ b/Documentation/ABI/testing/sysfs-firmware-memmap
@@ -56,6 +56,10 @@ Description:
- System RAM
- ACPI Tables
- ACPI Non-volatile Storage
+ - Unusable memory
+ - Persistent Memory (legacy)
+ - Persistent Memory
+ - Soft Reserved
- reserved
Following shell snippet can be used to display that memory
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 5d9ae27bd462..ef4b9218ae1e 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -203,7 +203,34 @@ Description: Shows total written kbytes issued to disk.
What: /sys/fs/f2fs/<disk>/features
Date: July 2017
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
-Description: Shows all enabled features in current device.
+Description: <deprecated: should use /sys/fs/f2fs/<disk>/feature_list/
+ Shows all enabled features in current device.
+ Supported features:
+ encryption, blkzoned, extra_attr, projquota, inode_checksum,
+ flexible_inline_xattr, quota_ino, inode_crtime, lost_found,
+ verity, sb_checksum, casefold, readonly, compression, pin_file.
+
+What: /sys/fs/f2fs/<disk>/feature_list/
+Date: June 2021
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description: Expand /sys/fs/f2fs/<disk>/features to meet sysfs rule.
+ Supported on-disk features:
+ encryption, block_zoned (aka blkzoned), extra_attr,
+ project_quota (aka projquota), inode_checksum,
+ flexible_inline_xattr, quota_ino, inode_crtime, lost_found,
+ verity, sb_checksum, casefold, readonly, compression.
+ Note that, pin_file is moved into /sys/fs/f2fs/features/.
+
+What: /sys/fs/f2fs/features/
+Date: July 2017
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description: Shows all enabled kernel features.
+ Supported features:
+ encryption, block_zoned, extra_attr, project_quota,
+ inode_checksum, flexible_inline_xattr, quota_ino,
+ inode_crtime, lost_found, verity, sb_checksum,
+ casefold, readonly, compression, test_dummy_encryption_v2,
+ atomic_write, pin_file, encrypted_casefold.
What: /sys/fs/f2fs/<disk>/inject_rate
Date: May 2016
@@ -438,3 +465,31 @@ Description: Show the count of inode newly enabled for compression since mount.
Note that when the compression is disabled for the files, this count
doesn't decrease. If you write "0" here, you can initialize
compr_new_inode to "0".
+
+What: /sys/fs/f2fs/<disk>/atgc_candidate_ratio
+Date: May 2021
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Description: When ATGC is on, it controls candidate ratio in order to limit total
+ number of potential victim in all candidates, the value should be in
+ range of [0, 100], by default it was initialized as 20(%).
+
+What: /sys/fs/f2fs/<disk>/atgc_candidate_count
+Date: May 2021
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Description: When ATGC is on, it controls candidate count in order to limit total
+ number of potential victim in all candidates, by default it was
+ initialized as 10 (sections).
+
+What: /sys/fs/f2fs/<disk>/atgc_age_weight
+Date: May 2021
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Description: When ATGC is on, it controls age weight to balance weight proportion
+ in between aging and valid blocks, the value should be in range of
+ [0, 100], by default it was initialized as 60(%).
+
+What: /sys/fs/f2fs/<disk>/atgc_age_threshold
+Date: May 2021
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Description: When ATGC is on, it controls age threshold to bypass GCing young
+ candidates whose age is not beyond the threshold, by default it was
+ initialized as 604800 seconds (equals to 7 days).
diff --git a/Documentation/ABI/testing/sysfs-ptp b/Documentation/ABI/testing/sysfs-ptp
index 2363ad810ddb..d378f57c1b73 100644
--- a/Documentation/ABI/testing/sysfs-ptp
+++ b/Documentation/ABI/testing/sysfs-ptp
@@ -33,6 +33,13 @@ Description:
frequency adjustment value (a positive integer) in
parts per billion.
+What: /sys/class/ptp/ptpN/max_vclocks
+Date: May 2021
+Contact: Yangbo Lu <yangbo.lu@nxp.com>
+Description:
+ This file contains the maximum number of ptp vclocks.
+ Write integer to re-configure it.
+
What: /sys/class/ptp/ptpN/n_alarms
Date: September 2010
Contact: Richard Cochran <richardcochran@gmail.com>
@@ -61,6 +68,19 @@ Description:
This file contains the number of programmable pins
offered by the PTP hardware clock.
+What: /sys/class/ptp/ptpN/n_vclocks
+Date: May 2021
+Contact: Yangbo Lu <yangbo.lu@nxp.com>
+Description:
+ This file contains the number of virtual PTP clocks in
+ use. By default, the value is 0 meaning that only the
+ physical clock is in use. Setting the value creates
+ the corresponding number of virtual clocks and causes
+ the physical clock to become free running. Setting the
+ value back to 0 deletes the virtual clocks and
+ switches the physical clock back to normal, adjustable
+ operation.
+
What: /sys/class/ptp/ptpN/pins
Date: March 2014
Contact: Richard Cochran <richardcochran@gmail.com>
diff --git a/Documentation/PCI/pci-error-recovery.rst b/Documentation/PCI/pci-error-recovery.rst
index 84ceebb08cac..187f43a03200 100644
--- a/Documentation/PCI/pci-error-recovery.rst
+++ b/Documentation/PCI/pci-error-recovery.rst
@@ -295,7 +295,7 @@ and let the driver restart normal I/O processing.
A driver can still return a critical failure for this function if
it can't get the device operational after reset. If the platform
previously tried a soft reset, it might now try a hard reset (power
-cycle) and then call slot_reset() again. It the device still can't
+cycle) and then call slot_reset() again. If the device still can't
be recovered, there is nothing more that can be done; the platform
will typically report a "permanent failure" in such a case. The
device will be considered "dead" in this case.
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
index a648b423ba0e..11cdab037bff 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
@@ -21,7 +21,7 @@ Any code that happens after the end of a given RCU grace period is guaranteed
to see the effects of all accesses prior to the beginning of that grace
period that are within RCU read-side critical sections.
Similarly, any code that happens before the beginning of a given RCU grace
-period is guaranteed to see the effects of all accesses following the end
+period is guaranteed to not see the effects of all accesses following the end
of that grace period that are within RCU read-side critical sections.
Note well that RCU-sched read-side critical sections include any region
@@ -339,14 +339,14 @@ The diagram below shows the path of ordering if the leftmost
leftmost ``rcu_node`` structure offlines its last CPU and if the next
``rcu_node`` structure has no online CPUs).
-.. kernel-figure:: TreeRCU-gp-init-1.svg
+.. kernel-figure:: TreeRCU-gp-init-2.svg
The final ``rcu_gp_init()`` pass through the ``rcu_node`` tree traverses
breadth-first, setting each ``rcu_node`` structure's ``->gp_seq`` field
to the newly advanced value from the ``rcu_state`` structure, as shown
in the following diagram.
-.. kernel-figure:: TreeRCU-gp-init-1.svg
+.. kernel-figure:: TreeRCU-gp-init-3.svg
This change will also cause each CPU's next call to
``__note_gp_changes()`` to notice that a new grace period has started,
diff --git a/Documentation/admin-guide/bootconfig.rst b/Documentation/admin-guide/bootconfig.rst
index 452b7dcd7f6b..6a79f2e59396 100644
--- a/Documentation/admin-guide/bootconfig.rst
+++ b/Documentation/admin-guide/bootconfig.rst
@@ -89,13 +89,35 @@ you can use ``+=`` operator. For example::
In this case, the key ``foo`` has ``bar``, ``baz`` and ``qux``.
-However, a sub-key and a value can not co-exist under a parent key.
-For example, following config is NOT allowed.::
+Moreover, sub-keys and a value can coexist under a parent key.
+For example, following config is allowed.::
foo = value1
- foo.bar = value2 # !ERROR! subkey "bar" and value "value1" can NOT co-exist
- foo.bar := value2 # !ERROR! even with the override operator, this is NOT allowed.
+ foo.bar = value2
+ foo := value3 # This will update foo's value.
+
+Note, since there is no syntax to put a raw value directly under a
+structured key, you have to define it outside of the brace. For example::
+
+ foo {
+ bar = value1
+ bar {
+ baz = value2
+ qux = value3
+ }
+ }
+
+Also, the order of the value node under a key is fixed. If there
+are a value and subkeys, the value is always the first child node
+of the key. Thus if user specifies subkeys first, e.g.::
+
+ foo.bar = value1
+ foo = value2
+
+In the program (and /proc/bootconfig), it will be shown as below::
+ foo = value2
+ foo.bar = value1
Comments
--------
diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst
index 3996b54158bf..01ba293a2d70 100644
--- a/Documentation/admin-guide/kernel-parameters.rst
+++ b/Documentation/admin-guide/kernel-parameters.rst
@@ -76,6 +76,11 @@ to change, such as less cores in the CPU list, then N and any ranges using N
will also change. Use the same on a small 4 core system, and "16-N" becomes
"16-3" and now the same boot input will be flagged as invalid (start > end).
+The special case-tolerant group name "all" has a meaning of selecting all CPUs,
+so that "nohz_full=all" is the equivalent of "nohz_full=0-N".
+
+The semantics of "N" and "all" is supported on a level of bitmaps and holds for
+all users of bitmap_parse().
This document may not be entirely up to date and comprehensive. The command
"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 0315407f5f57..bdb22006f713 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -301,6 +301,9 @@
allowed anymore to lift isolation
requirements as needed. This option
does not override iommu=pt
+ force_enable - Force enable the IOMMU on platforms known
+ to be buggy with IOMMU enabled. Use this
+ option with care.
amd_iommu_dump= [HW,X86-64]
Enable AMD IOMMU driver option to dump the ACPI table
@@ -1119,6 +1122,11 @@
the driver will use only 32-bit accessors to read/write
the device registers.
+ liteuart,<addr>
+ Start an early console on a litex serial port at the
+ specified address. The serial port must already be
+ setup and configured. Options are not yet supported.
+
meson,<addr>
Start an early, polled-mode console on a meson serial
port at the specified address. The serial port must
@@ -1594,6 +1602,23 @@
Documentation/admin-guide/mm/hugetlbpage.rst.
Format: size[KMG]
+ hugetlb_free_vmemmap=
+ [KNL] Reguires CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+ enabled.
+ Allows heavy hugetlb users to free up some more
+ memory (6 * PAGE_SIZE for each 2MB hugetlb page).
+ Format: { on | off (default) }
+
+ on: enable the feature
+ off: disable the feature
+
+ Built with CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON=y,
+ the default is on.
+
+ This is not compatible with memory_hotplug.memmap_on_memory.
+ If both parameters are enabled, hugetlb_free_vmemmap takes
+ precedence over memory_hotplug.memmap_on_memory.
+
hung_task_panic=
[KNL] Should the hung task detector generate panics.
Format: 0 | 1
@@ -2014,7 +2039,7 @@
forcing Dual Address Cycle for PCI cards supporting
greater than 32-bit addressing.
- iommu.strict= [ARM64] Configure TLB invalidation behaviour
+ iommu.strict= [ARM64, X86] Configure TLB invalidation behaviour
Format: { "0" | "1" }
0 - Lazy mode.
Request that DMA unmap operations use deferred
@@ -2025,6 +2050,10 @@
1 - Strict mode (default).
DMA unmap operations invalidate IOMMU hardware TLBs
synchronously.
+ Note: on x86, the default behaviour depends on the
+ equivalent driver-specific parameters, but a strict
+ mode explicitly specified by either method takes
+ precedence.
iommu.passthrough=
[ARM64, X86] Configure DMA to bypass the IOMMU by default.
@@ -2860,6 +2889,10 @@
Note that even when enabled, there are a few cases where
the feature is not effective.
+ This is not compatible with hugetlb_free_vmemmap. If
+ both parameters are enabled, hugetlb_free_vmemmap takes
+ precedence over memory_hotplug.memmap_on_memory.
+
memtest= [KNL,X86,ARM,PPC,RISCV] Enable memtest
Format: <integer>
default : 0 <disable>
@@ -4326,6 +4359,11 @@
whole algorithm to behave better in low memory
condition.
+ rcutree.rcu_delay_page_cache_fill_msec= [KNL]
+ Set the page-cache refill delay (in milliseconds)
+ in response to low-memory conditions. The range
+ of permitted values is in the range 0:100000.
+
rcutree.jiffies_till_first_fqs= [KNL]
Set delay from grace-period initialization to
first attempt to force quiescent states.
@@ -5644,12 +5682,25 @@
Note, echoing 1 into this file without the
tracepoint_printk kernel cmdline option has no effect.
+ The tp_printk_stop_on_boot (see below) can also be used
+ to stop the printing of events to console at
+ late_initcall_sync.
+
** CAUTION **
Having tracepoints sent to printk() and activating high
frequency tracepoints such as irq or sched, can cause
the system to live lock.
+ tp_printk_stop_on_boot[FTRACE]
+ When tp_printk (above) is set, it can cause a lot of noise
+ on the console. It may be useful to only include the
+ printing of events during boot up, as user space may
+ make the system inoperable.
+
+ This command line option will stop the printing of events
+ to console at the late_initcall_sync() time frame.
+
traceoff_on_warning
[FTRACE] enable this option to disable tracing when a
warning is hit. This turns off "tracing_on". Tracing can
diff --git a/Documentation/admin-guide/mm/hugetlbpage.rst b/Documentation/admin-guide/mm/hugetlbpage.rst
index f7b1c7462991..8abaeb144e44 100644
--- a/Documentation/admin-guide/mm/hugetlbpage.rst
+++ b/Documentation/admin-guide/mm/hugetlbpage.rst
@@ -60,6 +60,10 @@ HugePages_Surp
the pool above the value in ``/proc/sys/vm/nr_hugepages``. The
maximum number of surplus huge pages is controlled by
``/proc/sys/vm/nr_overcommit_hugepages``.
+ Note: When the feature of freeing unused vmemmap pages associated
+ with each hugetlb page is enabled, the number of surplus huge pages
+ may be temporarily larger than the maximum number of surplus huge
+ pages when the system is under memory pressure.
Hugepagesize
is the default hugepage size (in Kb).
Hugetlb
@@ -80,6 +84,10 @@ returned to the huge page pool when freed by a task. A user with root
privileges can dynamically allocate more or free some persistent huge pages
by increasing or decreasing the value of ``nr_hugepages``.
+Note: When the feature of freeing unused vmemmap pages associated with each
+hugetlb page is enabled, we can fail to free the huge pages triggered by
+the user when ths system is under memory pressure. Please try again later.
+
Pages that are used as huge pages are reserved inside the kernel and cannot
be used for other purposes. Huge pages cannot be swapped out under
memory pressure.
@@ -145,6 +153,9 @@ default_hugepagesz
will all result in 256 2M huge pages being allocated. Valid default
huge page size is architecture dependent.
+hugetlb_free_vmemmap
+ When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set, this enables freeing
+ unused vmemmap pages associated with each HugeTLB page.
When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages``
indicates the current number of pre-allocated huge pages of the default size.
diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst
index 05d51d2d8beb..c6bae2d77160 100644
--- a/Documentation/admin-guide/mm/memory-hotplug.rst
+++ b/Documentation/admin-guide/mm/memory-hotplug.rst
@@ -357,6 +357,19 @@ creates ZONE_MOVABLE as following.
Unfortunately, there is no information to show which memory block belongs
to ZONE_MOVABLE. This is TBD.
+ Memory offlining can fail when dissolving a free huge page on ZONE_MOVABLE
+ and the feature of freeing unused vmemmap pages associated with each hugetlb
+ page is enabled.
+
+ This can happen when we have plenty of ZONE_MOVABLE memory, but not enough
+ kernel memory to allocate vmemmmap pages. We may even be able to migrate
+ huge page contents, but will not be able to dissolve the source huge page.
+ This will prevent an offline operation and is unfortunate as memory offlining
+ is expected to succeed on movable zones. Users that depend on memory hotplug
+ to succeed for movable zones should carefully consider whether the memory
+ savings gained from this feature are worth the risk of possibly not being
+ able to offline memory in certain situations.
+
.. note::
Techniques that rely on long-term pinnings of memory (especially, RDMA and
vfio) are fundamentally problematic with ZONE_MOVABLE and, therefore, memory
diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst
index 340a5aee9b80..fb578fbbb76c 100644
--- a/Documentation/admin-guide/mm/pagemap.rst
+++ b/Documentation/admin-guide/mm/pagemap.rst
@@ -21,6 +21,8 @@ There are four components to pagemap:
* Bit 55 pte is soft-dirty (see
:ref:`Documentation/admin-guide/mm/soft-dirty.rst <soft_dirty>`)
* Bit 56 page exclusively mapped (since 4.2)
+ * Bit 57 pte is uffd-wp write-protected (since 5.13) (see
+ :ref:`Documentation/admin-guide/mm/userfaultfd.rst <userfaultfd>`)
* Bits 57-60 zero
* Bit 61 page is file-page or shared-anon (since 3.5)
* Bit 62 page swapped
diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst
index 3aa38e8b8361..6528036093e1 100644
--- a/Documentation/admin-guide/mm/userfaultfd.rst
+++ b/Documentation/admin-guide/mm/userfaultfd.rst
@@ -77,7 +77,8 @@ events, except page fault notifications, may be generated:
- ``UFFD_FEATURE_MINOR_HUGETLBFS`` indicates that the kernel supports
``UFFDIO_REGISTER_MODE_MINOR`` registration for hugetlbfs virtual memory
- areas.
+ areas. ``UFFD_FEATURE_MINOR_SHMEM`` is the analogous feature indicating
+ support for shmem virtual memory areas.
The userland application should set the feature flags it intends to use
when invoking the ``UFFDIO_API`` ioctl, to request that those features be
diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst
index f18e881373c4..2ed79f41a411 100644
--- a/Documentation/admin-guide/thunderbolt.rst
+++ b/Documentation/admin-guide/thunderbolt.rst
@@ -256,6 +256,35 @@ Note names of the NVMem devices ``nvm_activeN`` and ``nvm_non_activeN``
depend on the order they are registered in the NVMem subsystem. N in
the name is the identifier added by the NVMem subsystem.
+Upgrading on-board retimer NVM when there is no cable connected
+---------------------------------------------------------------
+If the platform supports, it may be possible to upgrade the retimer NVM
+firmware even when there is nothing connected to the USB4
+ports. When this is the case the ``usb4_portX`` devices have two special
+attributes: ``offline`` and ``rescan``. The way to upgrade the firmware
+is to first put the USB4 port into offline mode::
+
+ # echo 1 > /sys/bus/thunderbolt/devices/0-0/usb4_port1/offline
+
+This step makes sure the port does not respond to any hotplug events,
+and also ensures the retimers are powered on. The next step is to scan
+for the retimers::
+
+ # echo 1 > /sys/bus/thunderbolt/devices/0-0/usb4_port1/rescan
+
+This enumerates and adds the on-board retimers. Now retimer NVM can be
+upgraded in the same way than with cable connected (see previous
+section). However, the retimer is not disconnected as we are offline
+mode) so after writing ``1`` to ``nvm_authenticate`` one should wait for
+5 or more seconds before running rescan again::
+
+ # echo 1 > /sys/bus/thunderbolt/devices/0-0/usb4_port1/rescan
+
+This point if everything went fine, the port can be put back to
+functional state again::
+
+ # echo 0 > /sys/bus/thunderbolt/devices/0-0/usb4_port1/offline
+
Upgrading NVM when host controller is in safe mode
--------------------------------------------------
If the existing NVM is not properly authenticated (or is missing) the
diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst
index 459e6b66ff68..0c9120ec58ae 100644
--- a/Documentation/arm64/tagged-address-abi.rst
+++ b/Documentation/arm64/tagged-address-abi.rst
@@ -45,14 +45,24 @@ how the user addresses are used by the kernel:
1. User addresses not accessed by the kernel but used for address space
management (e.g. ``mprotect()``, ``madvise()``). The use of valid
- tagged pointers in this context is allowed with the exception of
- ``brk()``, ``mmap()`` and the ``new_address`` argument to
- ``mremap()`` as these have the potential to alias with existing
- user addresses.
-
- NOTE: This behaviour changed in v5.6 and so some earlier kernels may
- incorrectly accept valid tagged pointers for the ``brk()``,
- ``mmap()`` and ``mremap()`` system calls.
+ tagged pointers in this context is allowed with these exceptions:
+
+ - ``brk()``, ``mmap()`` and the ``new_address`` argument to
+ ``mremap()`` as these have the potential to alias with existing
+ user addresses.
+
+ NOTE: This behaviour changed in v5.6 and so some earlier kernels may
+ incorrectly accept valid tagged pointers for the ``brk()``,
+ ``mmap()`` and ``mremap()`` system calls.
+
+ - The ``range.start``, ``start`` and ``dst`` arguments to the
+ ``UFFDIO_*`` ``ioctl()``s used on a file descriptor obtained from
+ ``userfaultfd()``, as fault addresses subsequently obtained by reading
+ the file descriptor will be untagged, which may otherwise confuse
+ tag-unaware programs.
+
+ NOTE: This behaviour changed in v5.14 and so some earlier kernels may
+ incorrectly accept valid tagged pointers for this system call.
2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
relaxation is disabled by default and the application thread needs to
diff --git a/Documentation/bpf/libbpf/libbpf_naming_convention.rst b/Documentation/bpf/libbpf/libbpf_naming_convention.rst
index 3de1d51e41da..6bf9c5ac7576 100644
--- a/Documentation/bpf/libbpf/libbpf_naming_convention.rst
+++ b/Documentation/bpf/libbpf/libbpf_naming_convention.rst
@@ -108,7 +108,7 @@ This bump in ABI version is at most once per kernel development cycle.
For example, if current state of ``libbpf.map`` is:
-.. code-block:: c
+.. code-block:: none
LIBBPF_0.0.1 {
global:
@@ -121,7 +121,7 @@ For example, if current state of ``libbpf.map`` is:
, and a new symbol ``bpf_func_c`` is being introduced, then
``libbpf.map`` should be changed like this:
-.. code-block:: c
+.. code-block:: none
LIBBPF_0.0.1 {
global:
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 741aa37dc181..2a7444e3a4c2 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -24,11 +24,8 @@ String Conversions
.. kernel-doc:: lib/vsprintf.c
:export:
-.. kernel-doc:: include/linux/kernel.h
- :functions: kstrtol
-
-.. kernel-doc:: include/linux/kernel.h
- :functions: kstrtoul
+.. kernel-doc:: include/linux/kstrtox.h
+ :functions: kstrtol kstrtoul
.. kernel-doc:: lib/kstrtox.c
:export:
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index 4346ae17a72c..d941717a191b 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -125,6 +125,17 @@ used when printing stack backtraces. The specifier takes into
consideration the effect of compiler optimisations which may occur
when tail-calls are used and marked with the noreturn GCC attribute.
+If the pointer is within a module, the module name and optionally build ID is
+printed after the symbol name with an extra ``b`` appended to the end of the
+specifier.
+
+::
+ %pS versatile_init+0x0/0x110 [module_name]
+ %pSb versatile_init+0x0/0x110 [module_name ed5019fdf5e53be37cb1ba7899292d7e143b259e]
+ %pSRb versatile_init+0x9/0x110 [module_name ed5019fdf5e53be37cb1ba7899292d7e143b259e]
+ (with __builtin_extract_return_addr() translation)
+ %pBb prev_fn_of_versatile_init+0x88/0x88 [module_name ed5019fdf5e53be37cb1ba7899292d7e143b259e]
+
Probed Pointers from BPF / tracing
----------------------------------
diff --git a/Documentation/cpu-freq/cpu-drivers.rst b/Documentation/cpu-freq/cpu-drivers.rst
index a697278ce190..d84ededb66f9 100644
--- a/Documentation/cpu-freq/cpu-drivers.rst
+++ b/Documentation/cpu-freq/cpu-drivers.rst
@@ -58,9 +58,6 @@ And optionally
.driver_data - cpufreq driver specific data.
- .resolve_freq - Returns the most appropriate frequency for a target
- frequency. Doesn't change the frequency though.
-
.get_intermediate and target_intermediate - Used to switch to stable
frequency while changing CPU frequency.
@@ -71,9 +68,6 @@ And optionally
.exit - A pointer to a per-policy cleanup function called during
CPU_POST_DEAD phase of cpu hotplug process.
- .stop_cpu - A pointer to a per-policy stop function called during
- CPU_DOWN_PREPARE phase of cpu hotplug process.
-
.suspend - A pointer to a per-policy suspend function which is called
with interrupts disabled and _after_ the governor is stopped for the
policy.
diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst
index d85ce238ace7..6a600cf8430b 100644
--- a/Documentation/dev-tools/kcsan.rst
+++ b/Documentation/dev-tools/kcsan.rst
@@ -27,75 +27,57 @@ Error reports
A typical data race report looks like this::
==================================================================
- BUG: KCSAN: data-race in generic_permission / kernfs_refresh_inode
-
- write to 0xffff8fee4c40700c of 4 bytes by task 175 on cpu 4:
- kernfs_refresh_inode+0x70/0x170
- kernfs_iop_permission+0x4f/0x90
- inode_permission+0x190/0x200
- link_path_walk.part.0+0x503/0x8e0
- path_lookupat.isra.0+0x69/0x4d0
- filename_lookup+0x136/0x280
- user_path_at_empty+0x47/0x60
- vfs_statx+0x9b/0x130
- __do_sys_newlstat+0x50/0xb0
- __x64_sys_newlstat+0x37/0x50
- do_syscall_64+0x85/0x260
- entry_SYSCALL_64_after_hwframe+0x44/0xa9
-
- read to 0xffff8fee4c40700c of 4 bytes by task 166 on cpu 6:
- generic_permission+0x5b/0x2a0
- kernfs_iop_permission+0x66/0x90
- inode_permission+0x190/0x200
- link_path_walk.part.0+0x503/0x8e0
- path_lookupat.isra.0+0x69/0x4d0
- filename_lookup+0x136/0x280
- user_path_at_empty+0x47/0x60
- do_faccessat+0x11a/0x390
- __x64_sys_access+0x3c/0x50
- do_syscall_64+0x85/0x260
- entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ BUG: KCSAN: data-race in test_kernel_read / test_kernel_write
+
+ write to 0xffffffffc009a628 of 8 bytes by task 487 on cpu 0:
+ test_kernel_write+0x1d/0x30
+ access_thread+0x89/0xd0
+ kthread+0x23e/0x260
+ ret_from_fork+0x22/0x30
+
+ read to 0xffffffffc009a628 of 8 bytes by task 488 on cpu 6:
+ test_kernel_read+0x10/0x20
+ access_thread+0x89/0xd0
+ kthread+0x23e/0x260
+ ret_from_fork+0x22/0x30
+
+ value changed: 0x00000000000009a6 -> 0x00000000000009b2
Reported by Kernel Concurrency Sanitizer on:
- CPU: 6 PID: 166 Comm: systemd-journal Not tainted 5.3.0-rc7+ #1
- Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
+ CPU: 6 PID: 488 Comm: access_thread Not tainted 5.12.0-rc2+ #1
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
==================================================================
The header of the report provides a short summary of the functions involved in
the race. It is followed by the access types and stack traces of the 2 threads
-involved in the data race.
+involved in the data race. If KCSAN also observed a value change, the observed
+old value and new value are shown on the "value changed" line respectively.
The other less common type of data race report looks like this::
==================================================================
- BUG: KCSAN: data-race in e1000_clean_rx_irq+0x551/0xb10
-
- race at unknown origin, with read to 0xffff933db8a2ae6c of 1 bytes by interrupt on cpu 0:
- e1000_clean_rx_irq+0x551/0xb10
- e1000_clean+0x533/0xda0
- net_rx_action+0x329/0x900
- __do_softirq+0xdb/0x2db
- irq_exit+0x9b/0xa0
- do_IRQ+0x9c/0xf0
- ret_from_intr+0x0/0x18
- default_idle+0x3f/0x220
- arch_cpu_idle+0x21/0x30
- do_idle+0x1df/0x230
- cpu_startup_entry+0x14/0x20
- rest_init+0xc5/0xcb
- arch_call_rest_init+0x13/0x2b
- start_kernel+0x6db/0x700
+ BUG: KCSAN: data-race in test_kernel_rmw_array+0x71/0xd0
+
+ race at unknown origin, with read to 0xffffffffc009bdb0 of 8 bytes by task 515 on cpu 2:
+ test_kernel_rmw_array+0x71/0xd0
+ access_thread+0x89/0xd0
+ kthread+0x23e/0x260
+ ret_from_fork+0x22/0x30
+
+ value changed: 0x0000000000002328 -> 0x0000000000002329
Reported by Kernel Concurrency Sanitizer on:
- CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.3.0-rc7+ #2
- Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
+ CPU: 2 PID: 515 Comm: access_thread Not tainted 5.12.0-rc2+ #1
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
==================================================================
This report is generated where it was not possible to determine the other
racing thread, but a race was inferred due to the data value of the watched
-memory location having changed. These can occur either due to missing
-instrumentation or e.g. DMA accesses. These reports will only be generated if
-``CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=y`` (selected by default).
+memory location having changed. These reports always show a "value changed"
+line. A common reason for reports of this type are missing instrumentation in
+the racing thread, but could also occur due to e.g. DMA accesses. Such reports
+are shown only if ``CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=y``, which is
+enabled by default.
Selective analysis
~~~~~~~~~~~~~~~~~~
@@ -106,7 +88,8 @@ the below options are available:
* KCSAN understands the ``data_race(expr)`` annotation, which tells KCSAN that
any data races due to accesses in ``expr`` should be ignored and resulting
- behaviour when encountering a data race is deemed safe.
+ behaviour when encountering a data race is deemed safe. Please see
+ `"Marking Shared-Memory Accesses" in the LKMM`_ for more information.
* Disabling data race detection for entire functions can be accomplished by
using the function attribute ``__no_kcsan``::
@@ -128,6 +111,8 @@ the below options are available:
KCSAN_SANITIZE := n
+.. _"Marking Shared-Memory Accesses" in the LKMM: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/access-marking.txt
+
Furthermore, it is possible to tell KCSAN to show or hide entire classes of
data races, depending on preferences. These can be changed via the following
Kconfig options:
diff --git a/Documentation/dev-tools/kunit/index.rst b/Documentation/dev-tools/kunit/index.rst
index 25d92a9a05ea..cacb35ec658d 100644
--- a/Documentation/dev-tools/kunit/index.rst
+++ b/Documentation/dev-tools/kunit/index.rst
@@ -14,6 +14,7 @@ KUnit - Unit Testing for the Linux Kernel
style
faq
tips
+ running_tips
What is KUnit?
==============
diff --git a/Documentation/dev-tools/kunit/kunit-tool.rst b/Documentation/dev-tools/kunit/kunit-tool.rst
index 29ae2fee8123..c7ff9afe407a 100644
--- a/Documentation/dev-tools/kunit/kunit-tool.rst
+++ b/Documentation/dev-tools/kunit/kunit-tool.rst
@@ -22,14 +22,19 @@ not require any virtualization support: it is just a regular program.
What is a .kunitconfig?
=======================
-It's just a defconfig that kunit_tool looks for in the base directory.
-kunit_tool uses it to generate a .config as you might expect. In addition, it
-verifies that the generated .config contains the CONFIG options in the
-.kunitconfig; the reason it does this is so that it is easy to be sure that a
-CONFIG that enables a test actually ends up in the .config.
+It's just a defconfig that kunit_tool looks for in the build directory
+(``.kunit`` by default). kunit_tool uses it to generate a .config as you might
+expect. In addition, it verifies that the generated .config contains the CONFIG
+options in the .kunitconfig; the reason it does this is so that it is easy to
+be sure that a CONFIG that enables a test actually ends up in the .config.
-How do I use kunit_tool?
-========================
+It's also possible to pass a separate .kunitconfig fragment to kunit_tool,
+which is useful if you have several different groups of tests you wish
+to run independently, or if you want to use pre-defined test configs for
+certain subsystems.
+
+Getting Started with kunit_tool
+===============================
If a kunitconfig is present at the root directory, all you have to do is:
@@ -48,10 +53,177 @@ However, you most likely want to use it with the following options:
.. note::
This command will work even without a .kunitconfig file: if no
- .kunitconfig is present, a default one will be used instead.
+ .kunitconfig is present, a default one will be used instead.
+
+If you wish to use a different .kunitconfig file (such as one provided for
+testing a particular subsystem), you can pass it as an option.
+
+.. code-block:: bash
+
+ ./tools/testing/kunit/kunit.py run --kunitconfig=fs/ext4/.kunitconfig
For a list of all the flags supported by kunit_tool, you can run:
.. code-block:: bash
./tools/testing/kunit/kunit.py run --help
+
+Configuring, Building, and Running Tests
+========================================
+
+It's also possible to run just parts of the KUnit build process independently,
+which is useful if you want to make manual changes to part of the process.
+
+A .config can be generated from a .kunitconfig by using the ``config`` argument
+when running kunit_tool:
+
+.. code-block:: bash
+
+ ./tools/testing/kunit/kunit.py config
+
+Similarly, if you just want to build a KUnit kernel from the current .config,
+you can use the ``build`` argument:
+
+.. code-block:: bash
+
+ ./tools/testing/kunit/kunit.py build
+
+And, if you already have a built UML kernel with built-in KUnit tests, you can
+run the kernel and display the test results with the ``exec`` argument:
+
+.. code-block:: bash
+
+ ./tools/testing/kunit/kunit.py exec
+
+The ``run`` command which is discussed above is equivalent to running all three
+of these in sequence.
+
+All of these commands accept a number of optional command-line arguments. The
+``--help`` flag will give a complete list of these, or keep reading this page
+for a guide to some of the more useful ones.
+
+Parsing Test Results
+====================
+
+KUnit tests output their results in TAP (Test Anything Protocol) format.
+kunit_tool will, when running tests, parse this output and print a summary
+which is much more pleasant to read. If you wish to look at the raw test
+results in TAP format, you can pass the ``--raw_output`` argument.
+
+.. code-block:: bash
+
+ ./tools/testing/kunit/kunit.py run --raw_output
+
+.. note::
+ The raw output from test runs may contain other, non-KUnit kernel log
+ lines.
+
+If you have KUnit results in their raw TAP format, you can parse them and print
+the human-readable summary with the ``parse`` command for kunit_tool. This
+accepts a filename for an argument, or will read from standard input.
+
+.. code-block:: bash
+
+ # Reading from a file
+ ./tools/testing/kunit/kunit.py parse /var/log/dmesg
+ # Reading from stdin
+ dmesg | ./tools/testing/kunit/kunit.py parse
+
+This is very useful if you wish to run tests in a configuration not supported
+by kunit_tool (such as on real hardware, or an unsupported architecture).
+
+Filtering Tests
+===============
+
+It's possible to run only a subset of the tests built into a kernel by passing
+a filter to the ``exec`` or ``run`` commands. For example, if you only wanted
+to run KUnit resource tests, you could use:
+
+.. code-block:: bash
+
+ ./tools/testing/kunit/kunit.py run 'kunit-resource*'
+
+This uses the standard glob format for wildcards.
+
+Running Tests on QEMU
+=====================
+
+kunit_tool supports running tests on QEMU as well as via UML (as mentioned
+elsewhere). The default way of running tests on QEMU requires two flags:
+
+``--arch``
+ Selects a collection of configs (Kconfig as well as QEMU configs
+ options, etc) that allow KUnit tests to be run on the specified
+ architecture in a minimal way; this is usually not much slower than
+ using UML. The architecture argument is the same as the name of the
+ option passed to the ``ARCH`` variable used by Kbuild. Not all
+ architectures are currently supported by this flag, but can be handled
+ by the ``--qemu_config`` discussed later. If ``um`` is passed (or this
+ this flag is ignored) the tests will run via UML. Non-UML architectures,
+ e.g. i386, x86_64, arm, um, etc. Non-UML run on QEMU.
+
+``--cross_compile``
+ Specifies the use of a toolchain by Kbuild. The argument passed here is
+ the same passed to the ``CROSS_COMPILE`` variable used by Kbuild. As a
+ reminder this will be the prefix for the toolchain binaries such as gcc
+ for example ``sparc64-linux-gnu-`` if you have the sparc toolchain
+ installed on your system, or
+ ``$HOME/toolchains/microblaze/gcc-9.2.0-nolibc/microblaze-linux/bin/microblaze-linux-``
+ if you have downloaded the microblaze toolchain from the 0-day website
+ to a directory in your home directory called ``toolchains``.
+
+In many cases it is likely that you may want to run an architecture which is
+not supported by the ``--arch`` flag, or you may want to just run KUnit tests
+on QEMU using a non-default configuration. For this use case, you can write
+your own QemuConfig. These QemuConfigs are written in Python. They must have an
+import line ``from ..qemu_config import QemuArchParams`` at the top of the file
+and the file must contain a variable called ``QEMU_ARCH`` that has an instance
+of ``QemuArchParams`` assigned to it. An example can be seen in
+``tools/testing/kunit/qemu_configs/x86_64.py``.
+
+Once you have a QemuConfig you can pass it into kunit_tool using the
+``--qemu_config`` flag; when used this flag replaces the ``--arch`` flag. If we
+were to do this with the ``x86_64.py`` example from above, the invocation would
+look something like this:
+
+.. code-block:: bash
+
+ ./tools/testing/kunit/kunit.py run \
+ --timeout=60 \
+ --jobs=12 \
+ --qemu_config=./tools/testing/kunit/qemu_configs/x86_64.py
+
+Other Useful Options
+====================
+
+kunit_tool has a number of other command-line arguments which can be useful
+when adapting it to fit your environment or needs.
+
+Some of the more useful ones are:
+
+``--help``
+ Lists all of the available options. Note that different commands
+ (``config``, ``build``, ``run``, etc) will have different supported
+ options. Place ``--help`` before the command to list common options,
+ and after the command for options specific to that command.
+
+``--build_dir``
+ Specifies the build directory that kunit_tool will use. This is where
+ the .kunitconfig file is located, as well as where the .config and
+ compiled kernel will be placed. Defaults to ``.kunit``.
+
+``--make_options``
+ Specifies additional options to pass to ``make`` when compiling a
+ kernel (with the ``build`` or ``run`` commands). For example, to enable
+ compiler warnings, you can pass ``--make_options W=1``.
+
+``--alltests``
+ Builds a UML kernel with all config options enabled using ``make
+ allyesconfig``. This allows you to run as many tests as is possible,
+ but is very slow and prone to breakage as new options are added or
+ modified. In most cases, enabling all tests which have satisfied
+ dependencies by adding ``CONFIG_KUNIT_ALL_TESTS=1`` to your
+ .kunitconfig is preferable.
+
+There are several other options (and new ones are often added), so do check
+``--help`` if you're looking for something not mentioned here.
diff --git a/Documentation/dev-tools/kunit/running_tips.rst b/Documentation/dev-tools/kunit/running_tips.rst
new file mode 100644
index 000000000000..d1626d548fa5
--- /dev/null
+++ b/Documentation/dev-tools/kunit/running_tips.rst
@@ -0,0 +1,247 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================
+Tips For Running KUnit Tests
+============================
+
+Using ``kunit.py run`` ("kunit tool")
+=====================================
+
+Running from any directory
+--------------------------
+
+It can be handy to create a bash function like:
+
+.. code-block:: bash
+
+ function run_kunit() {
+ ( cd "$(git rev-parse --show-toplevel)" && ./tools/testing/kunit/kunit.py run $@ )
+ }
+
+.. note::
+ Early versions of ``kunit.py`` (before 5.6) didn't work unless run from
+ the kernel root, hence the use of a subshell and ``cd``.
+
+Running a subset of tests
+-------------------------
+
+``kunit.py run`` accepts an optional glob argument to filter tests. Currently
+this only matches against suite names, but this may change in the future.
+
+Say that we wanted to run the sysctl tests, we could do so via:
+
+.. code-block:: bash
+
+ $ echo -e 'CONFIG_KUNIT=y\nCONFIG_KUNIT_ALL_TESTS=y' > .kunit/.kunitconfig
+ $ ./tools/testing/kunit/kunit.py run 'sysctl*'
+
+We're paying the cost of building more tests than we need this way, but it's
+easier than fiddling with ``.kunitconfig`` files or commenting out
+``kunit_suite``'s.
+
+However, if we wanted to define a set of tests in a less ad hoc way, the next
+tip is useful.
+
+Defining a set of tests
+-----------------------
+
+``kunit.py run`` (along with ``build``, and ``config``) supports a
+``--kunitconfig`` flag. So if you have a set of tests that you want to run on a
+regular basis (especially if they have other dependencies), you can create a
+specific ``.kunitconfig`` for them.
+
+E.g. kunit has one for its tests:
+
+.. code-block:: bash
+
+ $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit/.kunitconfig
+
+Alternatively, if you're following the convention of naming your
+file ``.kunitconfig``, you can just pass in the dir, e.g.
+
+.. code-block:: bash
+
+ $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit
+
+.. note::
+ This is a relatively new feature (5.12+) so we don't have any
+ conventions yet about on what files should be checked in versus just
+ kept around locally. It's up to you and your maintainer to decide if a
+ config is useful enough to submit (and therefore have to maintain).
+
+.. note::
+ Having ``.kunitconfig`` fragments in a parent and child directory is
+ iffy. There's discussion about adding an "import" statement in these
+ files to make it possible to have a top-level config run tests from all
+ child directories. But that would mean ``.kunitconfig`` files are no
+ longer just simple .config fragments.
+
+ One alternative would be to have kunit tool recursively combine configs
+ automagically, but tests could theoretically depend on incompatible
+ options, so handling that would be tricky.
+
+Generating code coverage reports under UML
+------------------------------------------
+
+.. note::
+ TODO(brendanhiggins@google.com): There are various issues with UML and
+ versions of gcc 7 and up. You're likely to run into missing ``.gcda``
+ files or compile errors.
+
+This is different from the "normal" way of getting coverage information that is
+documented in Documentation/dev-tools/gcov.rst.
+
+Instead of enabling ``CONFIG_GCOV_KERNEL=y``, we can set these options:
+
+.. code-block:: none
+
+ CONFIG_DEBUG_KERNEL=y
+ CONFIG_DEBUG_INFO=y
+ CONFIG_GCOV=y
+
+
+Putting it together into a copy-pastable sequence of commands:
+
+.. code-block:: bash
+
+ # Append coverage options to the current config
+ $ echo -e "CONFIG_DEBUG_KERNEL=y\nCONFIG_DEBUG_INFO=y\nCONFIG_GCOV=y" >> .kunit/.kunitconfig
+ $ ./tools/testing/kunit/kunit.py run
+ # Extract the coverage information from the build dir (.kunit/)
+ $ lcov -t "my_kunit_tests" -o coverage.info -c -d .kunit/
+
+ # From here on, it's the same process as with CONFIG_GCOV_KERNEL=y
+ # E.g. can generate an HTML report in a tmp dir like so:
+ $ genhtml -o /tmp/coverage_html coverage.info
+
+
+If your installed version of gcc doesn't work, you can tweak the steps:
+
+.. code-block:: bash
+
+ $ ./tools/testing/kunit/kunit.py run --make_options=CC=/usr/bin/gcc-6
+ $ lcov -t "my_kunit_tests" -o coverage.info -c -d .kunit/ --gcov-tool=/usr/bin/gcov-6
+
+
+Running tests manually
+======================
+
+Running tests without using ``kunit.py run`` is also an important use case.
+Currently it's your only option if you want to test on architectures other than
+UML.
+
+As running the tests under UML is fairly straightforward (configure and compile
+the kernel, run the ``./linux`` binary), this section will focus on testing
+non-UML architectures.
+
+
+Running built-in tests
+----------------------
+
+When setting tests to ``=y``, the tests will run as part of boot and print
+results to dmesg in TAP format. So you just need to add your tests to your
+``.config``, build and boot your kernel as normal.
+
+So if we compiled our kernel with:
+
+.. code-block:: none
+
+ CONFIG_KUNIT=y
+ CONFIG_KUNIT_EXAMPLE_TEST=y
+
+Then we'd see output like this in dmesg signaling the test ran and passed:
+
+.. code-block:: none
+
+ TAP version 14
+ 1..1
+ # Subtest: example
+ 1..1
+ # example_simple_test: initializing
+ ok 1 - example_simple_test
+ ok 1 - example
+
+Running tests as modules
+------------------------
+
+Depending on the tests, you can build them as loadable modules.
+
+For example, we'd change the config options from before to
+
+.. code-block:: none
+
+ CONFIG_KUNIT=y
+ CONFIG_KUNIT_EXAMPLE_TEST=m
+
+Then after booting into our kernel, we can run the test via
+
+.. code-block:: none
+
+ $ modprobe kunit-example-test
+
+This will then cause it to print TAP output to stdout.
+
+.. note::
+ The ``modprobe`` will *not* have a non-zero exit code if any test
+ failed (as of 5.13). But ``kunit.py parse`` would, see below.
+
+.. note::
+ You can set ``CONFIG_KUNIT=m`` as well, however, some features will not
+ work and thus some tests might break. Ideally tests would specify they
+ depend on ``KUNIT=y`` in their ``Kconfig``'s, but this is an edge case
+ most test authors won't think about.
+ As of 5.13, the only difference is that ``current->kunit_test`` will
+ not exist.
+
+Pretty-printing results
+-----------------------
+
+You can use ``kunit.py parse`` to parse dmesg for test output and print out
+results in the same familiar format that ``kunit.py run`` does.
+
+.. code-block:: bash
+
+ $ ./tools/testing/kunit/kunit.py parse /var/log/dmesg
+
+
+Retrieving per suite results
+----------------------------
+
+Regardless of how you're running your tests, you can enable
+``CONFIG_KUNIT_DEBUGFS`` to expose per-suite TAP-formatted results:
+
+.. code-block:: none
+
+ CONFIG_KUNIT=y
+ CONFIG_KUNIT_EXAMPLE_TEST=m
+ CONFIG_KUNIT_DEBUGFS=y
+
+The results for each suite will be exposed under
+``/sys/kernel/debug/kunit/<suite>/results``.
+So using our example config:
+
+.. code-block:: bash
+
+ $ modprobe kunit-example-test > /dev/null
+ $ cat /sys/kernel/debug/kunit/example/results
+ ... <TAP output> ...
+
+ # After removing the module, the corresponding files will go away
+ $ modprobe -r kunit-example-test
+ $ cat /sys/kernel/debug/kunit/example/results
+ /sys/kernel/debug/kunit/example/results: No such file or directory
+
+Generating code coverage reports
+--------------------------------
+
+See Documentation/dev-tools/gcov.rst for details on how to do this.
+
+The only vaguely KUnit-specific advice here is that you probably want to build
+your tests as modules. That way you can isolate the coverage from tests from
+other code executed during boot, e.g.
+
+.. code-block:: bash
+
+ # Reset coverage counters before running the test.
+ $ echo 0 > /sys/kernel/debug/gcov/reset
+ $ modprobe kunit-example-test
diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst
index 63ef7b625c13..1e00f9226f74 100644
--- a/Documentation/dev-tools/kunit/start.rst
+++ b/Documentation/dev-tools/kunit/start.rst
@@ -36,7 +36,7 @@ A good starting point for a ``.kunitconfig`` is the KUnit defconfig:
.. code-block:: bash
cd $PATH_TO_LINUX_REPO
- cp arch/um/configs/kunit_defconfig .kunitconfig
+ cp tools/testing/kunit/configs/default.config .kunitconfig
You can then add any other Kconfig options you wish, e.g.:
@@ -236,5 +236,7 @@ Next Steps
==========
* Check out the Documentation/dev-tools/kunit/tips.rst page for tips on
writing idiomatic KUnit tests.
+* Check out the :doc:`running_tips` page for tips on
+ how to make running KUnit tests easier.
* Optional: see the :doc:`usage` page for a more
in-depth explanation of KUnit.
diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst
index 3ee7ab91f712..63f1bb89ebf5 100644
--- a/Documentation/dev-tools/kunit/usage.rst
+++ b/Documentation/dev-tools/kunit/usage.rst
@@ -467,10 +467,9 @@ fictitious example for ``sha1sum(1)``
.. code-block:: c
- /* Note: the cast is to satisfy overly strict type-checking. */
#define TEST_SHA1(in, want) \
sha1sum(in, out); \
- KUNIT_EXPECT_STREQ_MSG(test, (char *)out, want, "sha1sum(%s)", in);
+ KUNIT_EXPECT_STREQ_MSG(test, out, want, "sha1sum(%s)", in);
char out[40];
TEST_SHA1("hello world", "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed");
@@ -509,7 +508,7 @@ In some cases, it can be helpful to write a *table-driven test* instead, e.g.
};
for (i = 0; i < ARRAY_SIZE(cases); ++i) {
sha1sum(cases[i].str, out);
- KUNIT_EXPECT_STREQ_MSG(test, (char *)out, cases[i].sha1,
+ KUNIT_EXPECT_STREQ_MSG(test, out, cases[i].sha1,
"sha1sum(%s)", cases[i].str);
}
@@ -570,7 +569,7 @@ Reusing the same ``cases`` array from above, we can write the test as a
struct sha1_test_case *test_param = (struct sha1_test_case *)(test->param_value);
sha1sum(test_param->str, out);
- KUNIT_EXPECT_STREQ_MSG(test, (char *)out, test_param->sha1,
+ KUNIT_EXPECT_STREQ_MSG(test, out, test_param->sha1,
"sha1sum(%s)", test_param->str);
}
@@ -611,17 +610,45 @@ non-UML architectures:
None of these are reasons not to run your KUnit tests on real hardware; they are
only things to be aware of when doing so.
-The biggest impediment will likely be that certain KUnit features and
-infrastructure may not support your target environment. For example, at this
-time the KUnit Wrapper (``tools/testing/kunit/kunit.py``) does not work outside
-of UML. Unfortunately, there is no way around this. Using UML (or even just a
-particular architecture) allows us to make a lot of assumptions that make it
-possible to do things which might otherwise be impossible.
-
-Nevertheless, all core KUnit framework features are fully supported on all
-architectures, and using them is straightforward: all you need to do is to take
-your kunitconfig, your Kconfig options for the tests you would like to run, and
-merge them into whatever config your are using for your platform. That's it!
+Currently, the KUnit Wrapper (``tools/testing/kunit/kunit.py``) (aka
+kunit_tool) only fully supports running tests inside of UML and QEMU; however,
+this is only due to our own time limitations as humans working on KUnit. It is
+entirely possible to support other emulators and even actual hardware, but for
+now QEMU and UML is what is fully supported within the KUnit Wrapper. Again, to
+be clear, this is just the Wrapper. The actualy KUnit tests and the KUnit
+library they are written in is fully architecture agnostic and can be used in
+virtually any setup, you just won't have the benefit of typing a single command
+out of the box and having everything magically work perfectly.
+
+Again, all core KUnit framework features are fully supported on all
+architectures, and using them is straightforward: Most popular architectures
+are supported directly in the KUnit Wrapper via QEMU. Currently, supported
+architectures on QEMU include:
+
+* i386
+* x86_64
+* arm
+* arm64
+* alpha
+* powerpc
+* riscv
+* s390
+* sparc
+
+In order to run KUnit tests on one of these architectures via QEMU with the
+KUnit wrapper, all you need to do is specify the flags ``--arch`` and
+``--cross_compile`` when invoking the KUnit Wrapper. For example, we could run
+the default KUnit tests on ARM in the following manner (assuming we have an ARM
+toolchain installed):
+
+.. code-block:: bash
+
+ tools/testing/kunit/kunit.py run --timeout=60 --jobs=12 --arch=arm --cross_compile=arm-linux-gnueabihf-
+
+Alternatively, if you want to run your tests on real hardware or in some other
+emulation environment, all you need to do is to take your kunitconfig, your
+Kconfig options for the tests you would like to run, and merge them into
+whatever config your are using for your platform. That's it!
For example, let's say you have the following kunitconfig:
diff --git a/Documentation/devicetree/bindings/arm/amlogic,scpi.txt b/Documentation/devicetree/bindings/arm/amlogic,scpi.txt
deleted file mode 100644
index 5ab59da052df..000000000000
--- a/Documentation/devicetree/bindings/arm/amlogic,scpi.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-System Control and Power Interface (SCPI) Message Protocol
-(in addition to the standard binding in [0])
-----------------------------------------------------------
-Required properties
-
-- compatible : should be "amlogic,meson-gxbb-scpi"
-
-AMLOGIC SRAM and Shared Memory for SCPI
-------------------------------------
-
-Required properties:
-- compatible : should be "amlogic,meson-gxbb-sram"
-
-Each sub-node represents the reserved area for SCPI.
-
-Required sub-node properties:
-- compatible : should be "amlogic,meson-gxbb-scp-shmem" for SRAM based shared
- memory on Amlogic GXBB SoC.
-
-Sensor bindings for the sensors based on SCPI Message Protocol
---------------------------------------------------------------
-SCPI provides an API to access the various sensors on the SoC.
-
-Required properties:
-- compatible : should be "amlogic,meson-gxbb-scpi-sensors".
-
-[0] Documentation/devicetree/bindings/arm/arm,scpi.txt
diff --git a/Documentation/devicetree/bindings/arm/amlogic.yaml b/Documentation/devicetree/bindings/arm/amlogic.yaml
index 97fb96266344..6423377710ee 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.yaml
+++ b/Documentation/devicetree/bindings/arm/amlogic.yaml
@@ -167,6 +167,7 @@ properties:
- description: Boards with the Amlogic Meson SM1 S905X3/D3/Y3 SoC
items:
- enum:
+ - bananapi,bpi-m5
- hardkernel,odroid-c4
- hardkernel,odroid-hc4
- khadas,vim3l
diff --git a/Documentation/devicetree/bindings/arm/arm,scmi.txt b/Documentation/devicetree/bindings/arm/arm,scmi.txt
deleted file mode 100644
index 667d58e0a659..000000000000
--- a/Documentation/devicetree/bindings/arm/arm,scmi.txt
+++ /dev/null
@@ -1,239 +0,0 @@
-System Control and Management Interface (SCMI) Message Protocol
-----------------------------------------------------------
-
-The SCMI is intended to allow agents such as OSPM to manage various functions
-that are provided by the hardware platform it is running on, including power
-and performance functions.
-
-This binding is intended to define the interface the firmware implementing
-the SCMI as described in ARM document number ARM DEN 0056A ("ARM System Control
-and Management Interface Platform Design Document")[0] provide for OSPM in
-the device tree.
-
-Required properties:
-
-The scmi node with the following properties shall be under the /firmware/ node.
-
-- compatible : shall be "arm,scmi" or "arm,scmi-smc" for smc/hvc transports
-- mboxes: List of phandle and mailbox channel specifiers. It should contain
- exactly one or two mailboxes, one for transmitting messages("tx")
- and another optional for receiving the notifications("rx") if
- supported.
-- shmem : List of phandle pointing to the shared memory(SHM) area as per
- generic mailbox client binding.
-- #address-cells : should be '1' if the device has sub-nodes, maps to
- protocol identifier for a given sub-node.
-- #size-cells : should be '0' as 'reg' property doesn't have any size
- associated with it.
-- arm,smc-id : SMC id required when using smc or hvc transports
-
-Optional properties:
-
-- mbox-names: shall be "tx" or "rx" depending on mboxes entries.
-
-- interrupts : when using smc or hvc transports, this optional
- property indicates that msg completion by the platform is indicated
- by an interrupt rather than by the return of the smc call. This
- should not be used except when the platform requires such behavior.
-
-- interrupt-names : if "interrupts" is present, interrupt-names must also
- be present and have the value "a2p".
-
-See Documentation/devicetree/bindings/mailbox/mailbox.txt for more details
-about the generic mailbox controller and client driver bindings.
-
-The mailbox is the only permitted method of calling the SCMI firmware.
-Mailbox doorbell is used as a mechanism to alert the presence of a
-messages and/or notification.
-
-Each protocol supported shall have a sub-node with corresponding compatible
-as described in the following sections. If the platform supports dedicated
-communication channel for a particular protocol, the 3 properties namely:
-mboxes, mbox-names and shmem shall be present in the sub-node corresponding
-to that protocol.
-
-Clock/Performance bindings for the clocks/OPPs based on SCMI Message Protocol
-------------------------------------------------------------
-
-This binding uses the common clock binding[1].
-
-Required properties:
-- #clock-cells : Should be 1. Contains the Clock ID value used by SCMI commands.
-
-Power domain bindings for the power domains based on SCMI Message Protocol
-------------------------------------------------------------
-
-This binding for the SCMI power domain providers uses the generic power
-domain binding[2].
-
-Required properties:
- - #power-domain-cells : Should be 1. Contains the device or the power
- domain ID value used by SCMI commands.
-
-Regulator bindings for the SCMI Regulator based on SCMI Message Protocol
-------------------------------------------------------------
-An SCMI Regulator is permanently bound to a well defined SCMI Voltage Domain,
-and should be always positioned as a root regulator.
-It does not support any current operation.
-
-SCMI Regulators are grouped under a 'regulators' node which in turn is a child
-of the SCMI Voltage protocol node inside the desired SCMI instance node.
-
-This binding uses the common regulator binding[6].
-
-Required properties:
- - reg : shall identify an existent SCMI Voltage Domain.
-
-Sensor bindings for the sensors based on SCMI Message Protocol
---------------------------------------------------------------
-SCMI provides an API to access the various sensors on the SoC.
-
-Required properties:
-- #thermal-sensor-cells: should be set to 1. This property follows the
- thermal device tree bindings[3].
-
- Valid cell values are raw identifiers (Sensor ID)
- as used by the firmware. Refer to platform details
- for your implementation for the IDs to use.
-
-Reset signal bindings for the reset domains based on SCMI Message Protocol
-------------------------------------------------------------
-
-This binding for the SCMI reset domain providers uses the generic reset
-signal binding[5].
-
-Required properties:
- - #reset-cells : Should be 1. Contains the reset domain ID value used
- by SCMI commands.
-
-SRAM and Shared Memory for SCMI
--------------------------------
-
-A small area of SRAM is reserved for SCMI communication between application
-processors and SCP.
-
-The properties should follow the generic mmio-sram description found in [4]
-
-Each sub-node represents the reserved area for SCMI.
-
-Required sub-node properties:
-- reg : The base offset and size of the reserved area with the SRAM
-- compatible : should be "arm,scmi-shmem" for Non-secure SRAM based
- shared memory
-
-[0] http://infocenter.arm.com/help/topic/com.arm.doc.den0056a/index.html
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/power/power-domain.yaml
-[3] Documentation/devicetree/bindings/thermal/thermal*.yaml
-[4] Documentation/devicetree/bindings/sram/sram.yaml
-[5] Documentation/devicetree/bindings/reset/reset.txt
-[6] Documentation/devicetree/bindings/regulator/regulator.yaml
-
-Example:
-
-sram@50000000 {
- compatible = "mmio-sram";
- reg = <0x0 0x50000000 0x0 0x10000>;
-
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x0 0x50000000 0x10000>;
-
- cpu_scp_lpri: scp-shmem@0 {
- compatible = "arm,scmi-shmem";
- reg = <0x0 0x200>;
- };
-
- cpu_scp_hpri: scp-shmem@200 {
- compatible = "arm,scmi-shmem";
- reg = <0x200 0x200>;
- };
-};
-
-mailbox@40000000 {
- ....
- #mbox-cells = <1>;
- reg = <0x0 0x40000000 0x0 0x10000>;
-};
-
-firmware {
-
- ...
-
- scmi {
- compatible = "arm,scmi";
- mboxes = <&mailbox 0 &mailbox 1>;
- mbox-names = "tx", "rx";
- shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- scmi_devpd: protocol@11 {
- reg = <0x11>;
- #power-domain-cells = <1>;
- };
-
- scmi_dvfs: protocol@13 {
- reg = <0x13>;
- #clock-cells = <1>;
- };
-
- scmi_clk: protocol@14 {
- reg = <0x14>;
- #clock-cells = <1>;
- };
-
- scmi_sensors0: protocol@15 {
- reg = <0x15>;
- #thermal-sensor-cells = <1>;
- };
-
- scmi_reset: protocol@16 {
- reg = <0x16>;
- #reset-cells = <1>;
- };
-
- scmi_voltage: protocol@17 {
- reg = <0x17>;
-
- regulators {
- regulator_devX: regulator@0 {
- reg = <0x0>;
- regulator-max-microvolt = <3300000>;
- };
-
- regulator_devY: regulator@9 {
- reg = <0x9>;
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <4200000>;
- };
-
- ...
- };
- };
- };
-};
-
-cpu@0 {
- ...
- reg = <0 0>;
- clocks = <&scmi_dvfs 0>;
-};
-
-hdlcd@7ff60000 {
- ...
- reg = <0 0x7ff60000 0 0x1000>;
- clocks = <&scmi_clk 4>;
- power-domains = <&scmi_devpd 1>;
- resets = <&scmi_reset 10>;
-};
-
-thermal-zones {
- soc_thermal {
- polling-delay-passive = <100>;
- polling-delay = <1000>;
- /* sensor ID */
- thermal-sensors = <&scmi_sensors0 3>;
- ...
- };
-};
diff --git a/Documentation/devicetree/bindings/arm/arm,scpi.txt b/Documentation/devicetree/bindings/arm/arm,scpi.txt
deleted file mode 100644
index bcd6c3ec471e..000000000000
--- a/Documentation/devicetree/bindings/arm/arm,scpi.txt
+++ /dev/null
@@ -1,219 +0,0 @@
-System Control and Power Interface (SCPI) Message Protocol
-----------------------------------------------------------
-
-Firmware implementing the SCPI described in ARM document number ARM DUI 0922B
-("ARM Compute Subsystem SCP: Message Interface Protocols")[0] can be used
-by Linux to initiate various system control and power operations.
-
-Required properties:
-
-- compatible : should be
- * "arm,scpi" : For implementations complying to SCPI v1.0 or above
- * "arm,scpi-pre-1.0" : For implementations complying to all
- unversioned releases prior to SCPI v1.0
-- mboxes: List of phandle and mailbox channel specifiers
- All the channels reserved by remote SCP firmware for use by
- SCPI message protocol should be specified in any order
-- shmem : List of phandle pointing to the shared memory(SHM) area between the
- processors using these mailboxes for IPC, one for each mailbox
- SHM can be any memory reserved for the purpose of this communication
- between the processors.
-
-See Documentation/devicetree/bindings/mailbox/mailbox.txt
-for more details about the generic mailbox controller and
-client driver bindings.
-
-Clock bindings for the clocks based on SCPI Message Protocol
-------------------------------------------------------------
-
-This binding uses the common clock binding[1].
-
-Container Node
-==============
-Required properties:
-- compatible : should be "arm,scpi-clocks"
- All the clocks provided by SCP firmware via SCPI message
- protocol much be listed as sub-nodes under this node.
-
-Sub-nodes
-=========
-Required properties:
-- compatible : shall include one of the following
- "arm,scpi-dvfs-clocks" - all the clocks that are variable and index based.
- These clocks don't provide an entire range of values between the
- limits but only discrete points within the range. The firmware
- provides the mapping for each such operating frequency and the
- index associated with it. The firmware also manages the
- voltage scaling appropriately with the clock scaling.
- "arm,scpi-variable-clocks" - all the clocks that are variable and provide full
- range within the specified range. The firmware provides the
- range of values within a specified range.
-
-Other required properties for all clocks(all from common clock binding):
-- #clock-cells : Should be 1. Contains the Clock ID value used by SCPI commands.
-- clock-output-names : shall be the corresponding names of the outputs.
-- clock-indices: The identifying number for the clocks(i.e.clock_id) in the
- node. It can be non linear and hence provide the mapping of identifiers
- into the clock-output-names array.
-
-SRAM and Shared Memory for SCPI
--------------------------------
-
-A small area of SRAM is reserved for SCPI communication between application
-processors and SCP.
-
-The properties should follow the generic mmio-sram description found in [3]
-
-Each sub-node represents the reserved area for SCPI.
-
-Required sub-node properties:
-- reg : The base offset and size of the reserved area with the SRAM
-- compatible : should be "arm,scp-shmem" for Non-secure SRAM based
- shared memory
-
-Sensor bindings for the sensors based on SCPI Message Protocol
---------------------------------------------------------------
-SCPI provides an API to access the various sensors on the SoC.
-
-Required properties:
-- compatible : should be "arm,scpi-sensors".
-- #thermal-sensor-cells: should be set to 1. This property follows the
- thermal device tree bindings[2].
-
- Valid cell values are raw identifiers (Sensor ID)
- as used by the firmware. Refer to platform details
- for your implementation for the IDs to use.
-
-Power domain bindings for the power domains based on SCPI Message Protocol
-------------------------------------------------------------
-
-This binding uses the generic power domain binding[4].
-
-PM domain providers
-===================
-
-Required properties:
- - #power-domain-cells : Should be 1. Contains the device or the power
- domain ID value used by SCPI commands.
- - num-domains: Total number of power domains provided by SCPI. This is
- needed as the SCPI message protocol lacks a mechanism to
- query this information at runtime.
-
-PM domain consumers
-===================
-
-Required properties:
- - power-domains : A phandle and PM domain specifier as defined by bindings of
- the power controller specified by phandle.
-
-[0] http://infocenter.arm.com/help/topic/com.arm.doc.dui0922b/index.html
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/thermal/thermal*.yaml
-[3] Documentation/devicetree/bindings/sram/sram.yaml
-[4] Documentation/devicetree/bindings/power/power-domain.yaml
-
-Example:
-
-sram: sram@50000000 {
- compatible = "arm,juno-sram-ns", "mmio-sram";
- reg = <0x0 0x50000000 0x0 0x10000>;
-
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x0 0x50000000 0x10000>;
-
- cpu_scp_lpri: scp-shmem@0 {
- compatible = "arm,juno-scp-shmem";
- reg = <0x0 0x200>;
- };
-
- cpu_scp_hpri: scp-shmem@200 {
- compatible = "arm,juno-scp-shmem";
- reg = <0x200 0x200>;
- };
-};
-
-mailbox: mailbox0@40000000 {
- ....
- #mbox-cells = <1>;
-};
-
-scpi_protocol: scpi@2e000000 {
- compatible = "arm,scpi";
- mboxes = <&mailbox 0 &mailbox 1>;
- shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
-
- clocks {
- compatible = "arm,scpi-clocks";
-
- scpi_dvfs: scpi_clocks@0 {
- compatible = "arm,scpi-dvfs-clocks";
- #clock-cells = <1>;
- clock-indices = <0>, <1>, <2>;
- clock-output-names = "atlclk", "aplclk","gpuclk";
- };
- scpi_clk: scpi_clocks@3 {
- compatible = "arm,scpi-variable-clocks";
- #clock-cells = <1>;
- clock-indices = <3>, <4>;
- clock-output-names = "pxlclk0", "pxlclk1";
- };
- };
-
- scpi_sensors0: sensors {
- compatible = "arm,scpi-sensors";
- #thermal-sensor-cells = <1>;
- };
-
- scpi_devpd: scpi-power-domains {
- compatible = "arm,scpi-power-domains";
- num-domains = <2>;
- #power-domain-cells = <1>;
- };
-};
-
-cpu@0 {
- ...
- reg = <0 0>;
- clocks = <&scpi_dvfs 0>;
-};
-
-hdlcd@7ff60000 {
- ...
- reg = <0 0x7ff60000 0 0x1000>;
- clocks = <&scpi_clk 4>;
- power-domains = <&scpi_devpd 1>;
-};
-
-thermal-zones {
- soc_thermal {
- polling-delay-passive = <100>;
- polling-delay = <1000>;
-
- /* sensor ID */
- thermal-sensors = <&scpi_sensors0 3>;
- ...
- };
-};
-
-In the above example, the #clock-cells is set to 1 as required.
-scpi_dvfs has 3 output clocks namely: atlclk, aplclk, and gpuclk with 0,
-1 and 2 as clock-indices. scpi_clk has 2 output clocks namely: pxlclk0
-and pxlclk1 with 3 and 4 as clock-indices.
-
-The first consumer in the example is cpu@0 and it has '0' as the clock
-specifier which points to the first entry in the output clocks of
-scpi_dvfs i.e. "atlclk".
-
-Similarly the second example is hdlcd@7ff60000 and it has pxlclk1 as input
-clock. '4' in the clock specifier here points to the second entry
-in the output clocks of scpi_clocks i.e. "pxlclk1"
-
-The thermal-sensors property in the soc_thermal node uses the
-temperature sensor provided by SCP firmware to setup a thermal
-zone. The ID "3" is the sensor identifier for the temperature sensor
-as used by the firmware.
-
-The num-domains property in scpi-power-domains domain specifies that
-SCPI provides 2 power domains. The hdlcd node uses the power domain with
-domain ID 1.
diff --git a/Documentation/devicetree/bindings/arm/arm,scu.yaml b/Documentation/devicetree/bindings/arm/arm,scu.yaml
new file mode 100644
index 000000000000..dae2aa27e641
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/arm,scu.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/arm,scu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Snoop Control Unit (SCU)
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ As part of the MPCore complex, Cortex-A5 and Cortex-A9 are provided
+ with a Snoop Control Unit. The register range is usually 256 (0x100)
+ bytes.
+
+ References:
+ - Cortex-A9: see DDI0407E Cortex-A9 MPCore Technical Reference Manual
+ Revision r2p0
+ - Cortex-A5: see DDI0434B Cortex-A5 MPCore Technical Reference Manual
+ Revision r0p1
+ - ARM11 MPCore: see DDI0360F ARM 11 MPCore Processor Technical Reference
+ Manial Revision r2p0
+
+properties:
+ compatible:
+ enum:
+ - arm,cortex-a9-scu
+ - arm,cortex-a5-scu
+ - arm,arm11mp-scu
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ scu@a0410000 {
+ compatible = "arm,cortex-a9-scu";
+ reg = <0xa0410000 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml b/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml
index 812ae8cc5959..230b80d9d6cf 100644
--- a/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml
+++ b/Documentation/devicetree/bindings/arm/bcm/bcm2835.yaml
@@ -18,6 +18,7 @@ properties:
- description: BCM2711 based Boards
items:
- enum:
+ - raspberrypi,400
- raspberrypi,4-model-b
- const: brcm,bcm2711
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index f3c7249c73d6..9a2432a88074 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -257,6 +257,13 @@ properties:
where voltage is in V, frequency is in MHz.
+ performance-domains:
+ maxItems: 1
+ description:
+ List of phandles and performance domain specifiers, as defined by
+ bindings of the performance domain provider. See also
+ dvfs/performance-domain.yaml.
+
power-domains:
description:
List of phandles and PM domain specifiers, as defined by bindings of the
diff --git a/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt
deleted file mode 100644
index d38834c67dff..000000000000
--- a/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-OP-TEE Device Tree Bindings
-
-OP-TEE is a piece of software using hardware features to provide a Trusted
-Execution Environment. The security can be provided with ARM TrustZone, but
-also by virtualization or a separate chip.
-
-We're using "linaro" as the first part of the compatible property for
-the reference implementation maintained by Linaro.
-
-* OP-TEE based on ARM TrustZone required properties:
-
-- compatible : should contain "linaro,optee-tz"
-
-- method : The method of calling the OP-TEE Trusted OS. Permitted
- values are:
-
- "smc" : SMC #0, with the register assignments specified
- in drivers/tee/optee/optee_smc.h
-
- "hvc" : HVC #0, with the register assignments specified
- in drivers/tee/optee/optee_smc.h
-
-
-
-Example:
- firmware {
- optee {
- compatible = "linaro,optee-tz";
- method = "smc";
- };
- };
diff --git a/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml
new file mode 100644
index 000000000000..c24047c1fdd5
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/firmware/linaro,optee-tz.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OP-TEE Device Tree Bindings
+
+maintainers:
+ - Jens Wiklander <jens.wiklander@linaro.org>
+
+description: |
+ OP-TEE is a piece of software using hardware features to provide a Trusted
+ Execution Environment. The security can be provided with ARM TrustZone, but
+ also by virtualization or a separate chip.
+
+ We're using "linaro" as the first part of the compatible property for
+ the reference implementation maintained by Linaro.
+
+properties:
+ $nodename:
+ const: optee
+
+ compatible:
+ const: linaro,optee-tz
+
+ method:
+ enum: [smc, hvc]
+ description: |
+ The method of calling the OP-TEE Trusted OS depending on smc or hvc
+ instruction usage.
+ SMC #0, register assignments
+ or
+ HVC #0, register assignments
+ register assignments are specified in drivers/tee/optee/optee_smc.h
+
+required:
+ - compatible
+ - method
+
+additionalProperties: false
+
+examples:
+ - |
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
+
+ - |
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "hvc";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index e3c50f231d71..1c827c1954dc 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -197,6 +197,7 @@ properties:
- boundary,imx6q-nitrogen6x
- compulab,cm-fx6 # CompuLab CM-FX6
- dmo,imx6q-edmqmx6 # Data Modul eDM-QMX6 Board
+ - ds,imx6q-sbc # Da Sheng COM-9XX Modules
- embest,imx6q-marsboard # Embest MarS Board i.MX6Dual
- emtrion,emcon-mx6 # emCON-MX6D or emCON-MX6Q SoM
- emtrion,emcon-mx6-avari # emCON-MX6D or emCON-MX6Q SoM on Avari Base
@@ -400,6 +401,17 @@ properties:
- const: armadeus,imx6dl-apf6 # APF6 (Solo) SoM
- const: fsl,imx6dl
+ - description: i.MX6DL based congatec QMX6 Boards
+ items:
+ - enum:
+ - ge,imx6dl-b105v2 # General Electric B105v2
+ - ge,imx6dl-b105pv2 # General Electric B105Pv2
+ - ge,imx6dl-b125v2 # General Electric B125v2
+ - ge,imx6dl-b125pv2 # General Electric B125Pv2
+ - ge,imx6dl-b155v2 # General Electric B155v2
+ - const: congatec,qmx6
+ - const: fsl,imx6dl
+
- description: i.MX6DL based DFI FS700-M60-6DL Board
items:
- const: dfi,fs700-m60-6dl
@@ -685,6 +697,7 @@ properties:
- gw,imx8mm-gw71xx-0x # i.MX8MM Gateworks Development Kit
- gw,imx8mm-gw72xx-0x # i.MX8MM Gateworks Development Kit
- gw,imx8mm-gw73xx-0x # i.MX8MM Gateworks Development Kit
+ - gw,imx8mm-gw7901 # i.MX8MM Gateworks Board
- kontron,imx8mm-n801x-som # i.MX8MM Kontron SL (N801X) SOM
- variscite,var-som-mx8mm # i.MX8MM Variscite VAR-SOM-MX8MM module
- const: fsl,imx8mm
diff --git a/Documentation/devicetree/bindings/arm/intel,keembay.yaml b/Documentation/devicetree/bindings/arm/intel,keembay.yaml
index 69cd30872928..107e686ab207 100644
--- a/Documentation/devicetree/bindings/arm/intel,keembay.yaml
+++ b/Documentation/devicetree/bindings/arm/intel,keembay.yaml
@@ -11,6 +11,8 @@ maintainers:
- Daniele Alessandrelli <daniele.alessandrelli@intel.com>
properties:
+ $nodename:
+ const: '/'
compatible:
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml b/Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml
index d72e92bdf7c1..230bffeec0e5 100644
--- a/Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml
+++ b/Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml
@@ -17,6 +17,7 @@ properties:
- items:
- enum:
- linksys,nslu2
+ - welltech,epbx100
- const: intel,ixp42x
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/juno,scpi.txt b/Documentation/devicetree/bindings/arm/juno,scpi.txt
deleted file mode 100644
index 2ace8696bbee..000000000000
--- a/Documentation/devicetree/bindings/arm/juno,scpi.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-System Control and Power Interface (SCPI) Message Protocol
-(in addition to the standard binding in [0])
-
-Juno SRAM and Shared Memory for SCPI
-------------------------------------
-
-Required properties:
-- compatible : should be "arm,juno-sram-ns" for Non-secure SRAM
-
-Each sub-node represents the reserved area for SCPI.
-
-Required sub-node properties:
-- reg : The base offset and size of the reserved area with the SRAM
-- compatible : should be "arm,juno-scp-shmem" for Non-secure SRAM based
- shared memory on Juno platforms
-
-Sensor bindings for the sensors based on SCPI Message Protocol
---------------------------------------------------------------
-Required properties:
-- compatible : should be "arm,scpi-sensors".
-- #thermal-sensor-cells: should be set to 1.
- For Juno R0 and Juno R1 refer to [1] for the
- sensor identifiers
-
-[0] Documentation/devicetree/bindings/arm/arm,scpi.txt
-[1] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0922b/apas03s22.html
diff --git a/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt b/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
deleted file mode 100644
index 6f0cd31c1520..000000000000
--- a/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
+++ /dev/null
@@ -1,86 +0,0 @@
-Texas Instruments System Control Interface (TI-SCI) Message Protocol
---------------------------------------------------------------------
-
-Texas Instrument's processors including those belonging to Keystone generation
-of processors have separate hardware entity which is now responsible for the
-management of the System on Chip (SoC) system. These include various system
-level functions as well.
-
-An example of such an SoC is K2G, which contains the system control hardware
-block called Power Management Micro Controller (PMMC). This hardware block is
-initialized early into boot process and provides services to Operating Systems
-on multiple processors including ones running Linux.
-
-See http://processors.wiki.ti.com/index.php/TISCI for protocol definition.
-
-TI-SCI controller Device Node:
-=============================
-
-The TI-SCI node describes the Texas Instrument's System Controller entity node.
-This parent node may optionally have additional children nodes which describe
-specific functionality such as clocks, power domain, reset or additional
-functionality as may be required for the SoC. This hierarchy also describes the
-relationship between the TI-SCI parent node to the child node.
-
-Required properties:
--------------------
-- compatible: should be "ti,k2g-sci" for TI 66AK2G SoC
- should be "ti,am654-sci" for for TI AM654 SoC
-- mbox-names:
- "rx" - Mailbox corresponding to receive path
- "tx" - Mailbox corresponding to transmit path
-
-- mboxes: Mailboxes corresponding to the mbox-names. Each value of the mboxes
- property should contain a phandle to the mailbox controller device
- node and an args specifier that will be the phandle to the intended
- sub-mailbox child node to be used for communication.
-
-See Documentation/devicetree/bindings/mailbox/mailbox.txt for more details
-about the generic mailbox controller and client driver bindings. Also see
-Documentation/devicetree/bindings/mailbox/ti,message-manager.txt for typical
-controller that is used to communicate with this System controllers.
-
-Optional Properties:
--------------------
-- reg-names:
- debug_messages - Map the Debug message region
-- reg: register space corresponding to the debug_messages
-- ti,system-reboot-controller: If system reboot can be triggered by SoC reboot
-- ti,host-id: Integer value corresponding to the host ID assigned by Firmware
- for identification of host processing entities such as virtual
- machines
-
-Example (K2G):
--------------
- pmmc: pmmc {
- compatible = "ti,k2g-sci";
- ti,host-id = <2>;
- mbox-names = "rx", "tx";
- mboxes= <&msgmgr &msgmgr_proxy_pmmc_rx>,
- <&msgmgr &msgmgr_proxy_pmmc_tx>;
- reg-names = "debug_messages";
- reg = <0x02921800 0x800>;
- };
-
-
-TI-SCI Client Device Node:
-=========================
-
-Client nodes are maintained as children of the relevant TI-SCI device node.
-
-Example (K2G):
--------------
- pmmc: pmmc {
- compatible = "ti,k2g-sci";
- ...
-
- my_clk_node: clk_node {
- ...
- ...
- };
-
- my_pd_node: pd_node {
- ...
- ...
- };
- };
diff --git a/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml b/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml
new file mode 100644
index 000000000000..34f5f877d444
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/keystone/ti,sci.yaml
@@ -0,0 +1,129 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/keystone/ti,sci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI-SCI controller device node bindings
+
+maintainers:
+ - Nishanth Menon <nm@ti.com>
+
+description: |
+ Texas Instrument's processors including those belonging to Keystone generation
+ of processors have separate hardware entity which is now responsible for the
+ management of the System on Chip (SoC) system. These include various system
+ level functions as well.
+
+ An example of such an SoC is K2G, which contains the system control hardware
+ block called Power Management Micro Controller (PMMC). This hardware block is
+ initialized early into boot process and provides services to Operating Systems
+ on multiple processors including ones running Linux.
+
+ See http://processors.wiki.ti.com/index.php/TISCI for protocol definition.
+
+ The TI-SCI node describes the Texas Instrument's System Controller entity node.
+ This parent node may optionally have additional children nodes which describe
+ specific functionality such as clocks, power domain, reset or additional
+ functionality as may be required for the SoC. This hierarchy also describes the
+ relationship between the TI-SCI parent node to the child node.
+
+properties:
+ $nodename:
+ pattern: "^system-controller@[0-9a-f]+$"
+
+ compatible:
+ oneOf:
+ - description: System controller on TI 66AK2G SoC and other K3 SoCs
+ items:
+ - const: ti,k2g-sci
+ - description: System controller on TI AM654 SoC
+ items:
+ - const: ti,am654-sci
+
+ reg-names:
+ description: |
+ Specifies the debug messages memory mapped region that is optionally
+ made available from TI-SCI controller.
+ const: debug_messages
+
+ reg:
+ minItems: 1
+
+ mbox-names:
+ description: |
+ Specifies the mailboxes used to communicate with TI-SCI Controller
+ made available from TI-SCI controller.
+ items:
+ - const: rx
+ - const: tx
+
+ mboxes:
+ minItems: 2
+
+ ti,system-reboot-controller:
+ description: Determines If system reboot can be triggered by SoC reboot
+ type: boolean
+
+ ti,host-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Value corresponding to the host ID assigned by Firmware
+ for identification of host processing entities such as virtual machines.
+
+ power-controller:
+ type: object
+ $ref: /schemas/soc/ti/sci-pm-domain.yaml#
+
+ clock-controller:
+ type: object
+ $ref: /schemas/clock/ti,sci-clk.yaml#
+
+ reset-controller:
+ type: object
+ $ref: /schemas/reset/ti,sci-reset.yaml#
+
+required:
+ - compatible
+ - mbox-names
+ - mboxes
+
+additionalProperties: false
+
+examples:
+ - |
+ pmmc: system-controller@2921800 {
+ compatible = "ti,k2g-sci";
+ ti,system-reboot-controller;
+ mbox-names = "rx", "tx";
+ mboxes= <&msgmgr 5 2>,
+ <&msgmgr 0 0>;
+ reg-names = "debug_messages";
+ reg = <0x02921800 0x800>;
+ };
+
+ - |
+ dmsc: system-controller@44083000 {
+ compatible = "ti,k2g-sci";
+ ti,host-id = <12>;
+ mbox-names = "rx", "tx";
+ mboxes= <&secure_proxy_main 11>,
+ <&secure_proxy_main 13>;
+ reg-names = "debug_messages";
+ reg = <0x44083000 0x1000>;
+
+ k3_pds: power-controller {
+ compatible = "ti,sci-pm-domain";
+ #power-domain-cells = <2>;
+ };
+
+ k3_clks: clock-controller {
+ compatible = "ti,k2g-sci-clk";
+ #clock-cells = <2>;
+ };
+
+ k3_reset: reset-controller {
+ compatible = "ti,sci-reset";
+ #reset-cells = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/mediatek.yaml b/Documentation/devicetree/bindings/arm/mediatek.yaml
index aff57a8c8c30..80a05f6fee85 100644
--- a/Documentation/devicetree/bindings/arm/mediatek.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek.yaml
@@ -122,6 +122,10 @@ properties:
- enum:
- mediatek,mt8195-evb
- const: mediatek,mt8195
+ - description: Google Burnet (HP Chromebook x360 11MK G3 EE)
+ items:
+ - const: google,burnet
+ - const: mediatek,mt8183
- description: Google Krane (Lenovo IdeaPad Duet, 10e,...)
items:
- enum:
@@ -133,9 +137,19 @@ properties:
items:
- const: google,damu
- const: mediatek,mt8183
- - description: Google Juniper (Acer Chromebook Spin 311)
+ - description: Google Fennel (Lenovo IdeaPad 3 Chromebook)
+ items:
+ - enum:
+ - google,fennel-sku0
+ - google,fennel-sku1
+ - google,fennel-sku6
+ - const: google,fennel
+ - const: mediatek,mt8183
+ - description: Google Juniper (Acer Chromebook Spin 311) / Kenzo (Acer Chromebook 311)
items:
- - const: google,juniper-sku16
+ - enum:
+ - google,juniper-sku16
+ - google,juniper-sku17
- const: google,juniper
- const: mediatek,mt8183
- description: Google Kakadu (ASUS Chromebook Detachable CM3)
@@ -144,6 +158,10 @@ properties:
- const: google,kakadu-rev2
- const: google,kakadu
- const: mediatek,mt8183
+ - description: Google Kappa (HP Chromebook 11a)
+ items:
+ - const: google,kappa
+ - const: mediatek,mt8183
- description: Google Kodama (Lenovo 10e Chromebook Tablet)
items:
- enum:
@@ -153,6 +171,13 @@ properties:
- google,kodama-sku32
- const: google,kodama
- const: mediatek,mt8183
+ - description: Google Willow (Acer Chromebook 311 C722/C722T)
+ items:
+ - enum:
+ - google,willow-sku0
+ - google,willow-sku1
+ - const: google,willow
+ - const: mediatek,mt8183
- items:
- enum:
- mediatek,mt8183-pumpkin
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt b/Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt
index ae4afc6dcfe0..94d50a949be1 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt
@@ -25,6 +25,7 @@ PROPERTIES
"qcom,saw2"
A more specific value could be one of:
"qcom,apq8064-saw2-v1.1-cpu"
+ "qcom,msm8226-saw2-v2.1-cpu"
"qcom,msm8974-saw2-v2.1-cpu"
"qcom,apq8084-saw2-v2.1-cpu"
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index 9b27e991bddc..9720b00c41d2 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -36,17 +36,20 @@ description: |
msm8992
msm8994
msm8996
+ sa8155p
sc7180
sc7280
sdm630
sdm660
sdm845
sdx55
+ sm8150
sm8250
sm8350
The 'board' element must be one of the following strings:
+ adp
cdp
cp01-c1
dragonboard
@@ -178,6 +181,7 @@ properties:
- items:
- enum:
- qcom,sc7280-idp
+ - google,senor
- const: qcom,sc7280
- items:
@@ -200,6 +204,16 @@ properties:
- items:
- enum:
+ - qcom,sa8155p-adp
+ - const: qcom,sa8155p
+
+ - items:
+ - enum:
+ - qcom,sm8150-mtp
+ - const: qcom,sm8150
+
+ - items:
+ - enum:
- qcom,qrb5165-rb5
- qcom,sm8250-mtp
- const: qcom,sm8250
diff --git a/Documentation/devicetree/bindings/arm/renesas.yaml b/Documentation/devicetree/bindings/arm/renesas.yaml
index 5fd0696a9f91..a0cce4e25039 100644
--- a/Documentation/devicetree/bindings/arm/renesas.yaml
+++ b/Documentation/devicetree/bindings/arm/renesas.yaml
@@ -302,6 +302,24 @@ properties:
- renesas,rzn1d400-db # RZN1D-DB (RZ/N1D Demo Board for the RZ/N1D 400 pins package)
- const: renesas,r9a06g032
+ - description: RZ/G2UL (R9A07G043)
+ items:
+ - enum:
+ - renesas,r9a07g043u11 # RZ/G2UL Type-1
+ - renesas,r9a07g043u12 # RZ/G2UL Type-2
+ - const: renesas,r9a07g043
+
+ - description: RZ/G2{L,LC} (R9A07G044)
+ items:
+ - enum:
+ - renesas,smarc-evk # SMARC EVK
+ - enum:
+ - renesas,r9a07g044c1 # Single Cortex-A55 RZ/G2LC
+ - renesas,r9a07g044c2 # Dual Cortex-A55 RZ/G2LC
+ - renesas,r9a07g044l1 # Single Cortex-A55 RZ/G2L
+ - renesas,r9a07g044l2 # Dual Cortex-A55 RZ/G2L
+ - const: renesas,r9a07g044
+
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index 4a6f772c1043..6546b015fc62 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -600,6 +600,11 @@ properties:
- const: zkmagic,a95x-z2
- const: rockchip,rk3318
+ - description: Rockchip RK3568 Evaluation board
+ items:
+ - const: rockchip,rk3568-evb1-v10
+ - const: rockchip,rk3568
+
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/arm/rockchip/pmu.txt b/Documentation/devicetree/bindings/arm/rockchip/pmu.txt
deleted file mode 100644
index 3ee9b428b2f7..000000000000
--- a/Documentation/devicetree/bindings/arm/rockchip/pmu.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-Rockchip power-management-unit:
--------------------------------
-
-The pmu is used to turn off and on different power domains of the SoCs
-This includes the power to the CPU cores.
-
-Required node properties:
-- compatible value : = "rockchip,rk3066-pmu";
-- reg : physical base address and the size of the registers window
-
-Example:
-
- pmu@20004000 {
- compatible = "rockchip,rk3066-pmu";
- reg = <0x20004000 0x100>;
- };
diff --git a/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml b/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml
new file mode 100644
index 000000000000..53115b92d17f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/rockchip/pmu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip Power Management Unit (PMU)
+
+maintainers:
+ - Elaine Zhang <zhangqing@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+ The PMU is used to turn on and off different power domains of the SoCs.
+ This includes the power to the CPU cores.
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,px30-pmu
+ - rockchip,rk3066-pmu
+ - rockchip,rk3288-pmu
+ - rockchip,rk3399-pmu
+
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - rockchip,px30-pmu
+ - rockchip,rk3066-pmu
+ - rockchip,rk3288-pmu
+ - rockchip,rk3399-pmu
+ - const: syscon
+ - const: simple-mfd
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: true
+
+examples:
+ - |
+ pmu@20004000 {
+ compatible = "rockchip,rk3066-pmu", "syscon", "simple-mfd";
+ reg = <0x20004000 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/scu.txt b/Documentation/devicetree/bindings/arm/scu.txt
deleted file mode 100644
index 74d0a780ce51..000000000000
--- a/Documentation/devicetree/bindings/arm/scu.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* ARM Snoop Control Unit (SCU)
-
-As part of the MPCore complex, Cortex-A5 and Cortex-A9 are provided
-with a Snoop Control Unit. The register range is usually 256 (0x100)
-bytes.
-
-References:
-
-- Cortex-A9: see DDI0407E Cortex-A9 MPCore Technical Reference Manual
- Revision r2p0
-- Cortex-A5: see DDI0434B Cortex-A5 MPCore Technical Reference Manual
- Revision r0p1
-- ARM11 MPCore: see DDI0360F ARM 11 MPCore Processor Technical Reference
- Manial Revision r2p0
-
-- compatible : Should be:
- "arm,cortex-a9-scu"
- "arm,cortex-a5-scu"
- "arm,arm11mp-scu"
-
-- reg : Specify the base address and the size of the SCU register window.
-
-Example:
-
-scu@a0410000 {
- compatible = "arm,cortex-a9-scu";
- reg = <0xa0410000 0x100>;
-};
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
index ac750025a2eb..889128acf49a 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -224,6 +224,12 @@ properties:
- const: empire-electronix,m712
- const: allwinner,sun5i-a13
+ - description: Forlinx OKA40i-C Development board
+ items:
+ - const: forlinx,oka40i-c
+ - const: forlinx,feta40i-c
+ - const: allwinner,sun8i-r40
+
- description: FriendlyARM NanoPi A64
items:
- const: friendlyarm,nanopi-a64
@@ -269,6 +275,11 @@ properties:
- const: friendlyarm,nanopi-r1
- const: allwinner,sun8i-h3
+ - description: FriendlyARM NanoPi R1S H5
+ items:
+ - const: friendlyarm,nanopi-r1s-h5
+ - const: allwinner,sun50i-h5
+
- description: FriendlyARM ZeroPi
items:
- const: friendlyarm,zeropi
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
index 43fd2f8927d0..0afec83cc723 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
@@ -301,6 +301,33 @@ patternProperties:
additionalProperties: false
+ core-domain:
+ type: object
+ description: |
+ The vast majority of hardware blocks of Tegra SoC belong to a
+ Core power domain, which has a dedicated voltage rail that powers
+ the blocks.
+
+ properties:
+ operating-points-v2:
+ description:
+ Should contain level, voltages and opp-supported-hw property.
+ The supported-hw is a bitfield indicating SoC speedo or process
+ ID mask.
+
+ "#power-domain-cells":
+ const: 0
+
+ required:
+ - operating-points-v2
+ - "#power-domain-cells"
+
+ additionalProperties: false
+
+ core-supply:
+ description:
+ Phandle to voltage regulator connected to the SoC Core power rail.
+
required:
- compatible
- reg
@@ -325,6 +352,7 @@ examples:
tegra_pmc: pmc@7000e400 {
compatible = "nvidia,tegra210-pmc";
reg = <0x7000e400 0x400>;
+ core-supply = <&regulator>;
clocks = <&tegra_car TEGRA210_CLK_PCLK>, <&clk32k_in>;
clock-names = "pclk", "clk32k_in";
#clock-cells = <1>;
@@ -338,17 +366,24 @@ examples:
nvidia,core-power-req-active-high;
nvidia,sys-clock-req-active-high;
+ pd_core: core-domain {
+ operating-points-v2 = <&core_opp_table>;
+ #power-domain-cells = <0>;
+ };
+
powergates {
pd_audio: aud {
clocks = <&tegra_car TEGRA210_CLK_APE>,
<&tegra_car TEGRA210_CLK_APB2APE>;
resets = <&tegra_car 198>;
+ power-domains = <&pd_core>;
#power-domain-cells = <0>;
};
pd_xusbss: xusba {
clocks = <&tegra_car TEGRA210_CLK_XUSB_SS>;
resets = <&tegra_car TEGRA210_CLK_XUSB_SS>;
+ power-domains = <&pd_core>;
#power-domain-cells = <0>;
};
};
diff --git a/Documentation/devicetree/bindings/arm/ux500/boards.txt b/Documentation/devicetree/bindings/arm/ux500/boards.txt
index 89408de55bfd..18d55532d31e 100644
--- a/Documentation/devicetree/bindings/arm/ux500/boards.txt
+++ b/Documentation/devicetree/bindings/arm/ux500/boards.txt
@@ -20,13 +20,13 @@ during retention, system won't boot without this):
compatible = "ste,dbx500-backupram"
scu:
- see binding for arm/scu.txt
+ see binding for arm/arm,scu.yaml
interrupt-controller:
see binding for interrupt-controller/arm,gic.txt
timer:
- see binding for timer/arm,twd.txt
+ see binding for timer/arm,twd-timer.yaml
clocks:
see binding for clocks/ux500.txt
diff --git a/Documentation/devicetree/bindings/ata/nvidia,tegra-ahci.yaml b/Documentation/devicetree/bindings/ata/nvidia,tegra-ahci.yaml
index a75e9a8f539a..3c7a2425f3e6 100644
--- a/Documentation/devicetree/bindings/ata/nvidia,tegra-ahci.yaml
+++ b/Documentation/devicetree/bindings/ata/nvidia,tegra-ahci.yaml
@@ -20,7 +20,6 @@ properties:
reg:
minItems: 2
- maxItems: 3
items:
- description: AHCI registers
- description: SATA configuration and IPFS registers
diff --git a/Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml b/Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
index d06096a7ba4b..c060c7914cae 100644
--- a/Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
+++ b/Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
@@ -53,6 +53,17 @@ required:
- reg
- interrupts
- clocks
+ - power-domains
+
+if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: renesas,sata-r8a7779
+then:
+ required:
+ - resets
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
index a27025cd3909..c4b7243ddcf2 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
@@ -51,7 +51,6 @@ properties:
clocks:
minItems: 2
- maxItems: 4
items:
- description: High Frequency Oscillator (usually at 24MHz)
- description: Low Frequency Oscillator (usually at 32kHz)
@@ -60,7 +59,6 @@ properties:
clock-names:
minItems: 2
- maxItems: 4
items:
- const: hosc
- const: losc
diff --git a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt
deleted file mode 100644
index ab730ea0a560..000000000000
--- a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt
+++ /dev/null
@@ -1,313 +0,0 @@
-Broadcom iProc Family Clocks
-
-This binding uses the common clock binding:
- Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-The iProc clock controller manages clocks that are common to the iProc family.
-An SoC from the iProc family may have several PPLs, e.g., ARMPLL, GENPLL,
-LCPLL0, MIPIPLL, and etc., all derived from an onboard crystal. Each PLL
-comprises of several leaf clocks
-
-Required properties for a PLL and its leaf clocks:
-
-- compatible:
- Should have a value of the form "brcm,<soc>-<pll>". For example, GENPLL on
-Cygnus has a compatible string of "brcm,cygnus-genpll"
-
-- #clock-cells:
- Have a value of <1> since there are more than 1 leaf clock of a given PLL
-
-- reg:
- Define the base and range of the I/O address space that contain the iProc
-clock control registers required for the PLL
-
-- clocks:
- The input parent clock phandle for the PLL. For most iProc PLLs, this is an
-onboard crystal with a fixed rate
-
-- clock-output-names:
- An ordered list of strings defining the names of the clocks
-
-Example:
-
- osc: oscillator {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <25000000>;
- };
-
- genpll: genpll {
- #clock-cells = <1>;
- compatible = "brcm,cygnus-genpll";
- reg = <0x0301d000 0x2c>, <0x0301c020 0x4>;
- clocks = <&osc>;
- clock-output-names = "genpll", "axi21", "250mhz", "ihost_sys",
- "enet_sw", "audio_125", "can";
- };
-
-Required properties for ASIU clocks:
-
-ASIU clocks are a special case. These clocks are derived directly from the
-reference clock of the onboard crystal
-
-- compatible:
- Should have a value of the form "brcm,<soc>-asiu-clk". For example, ASIU
-clocks for Cygnus have a compatible string of "brcm,cygnus-asiu-clk"
-
-- #clock-cells:
- Have a value of <1> since there are more than 1 ASIU clocks
-
-- reg:
- Define the base and range of the I/O address space that contain the iProc
-clock control registers required for ASIU clocks
-
-- clocks:
- The input parent clock phandle for the ASIU clock, i.e., the onboard
-crystal
-
-- clock-output-names:
- An ordered list of strings defining the names of the ASIU clocks
-
-Example:
-
- osc: oscillator {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <25000000>;
- };
-
- asiu_clks: asiu_clks {
- #clock-cells = <1>;
- compatible = "brcm,cygnus-asiu-clk";
- reg = <0x0301d048 0xc>, <0x180aa024 0x4>;
- clocks = <&osc>;
- clock-output-names = "keypad", "adc/touch", "pwm";
- };
-
-Cygnus
-------
-PLL and leaf clock compatible strings for Cygnus are:
- "brcm,cygnus-armpll"
- "brcm,cygnus-genpll"
- "brcm,cygnus-lcpll0"
- "brcm,cygnus-mipipll"
- "brcm,cygnus-asiu-clk"
- "brcm,cygnus-audiopll"
-
-The following table defines the set of PLL/clock index and ID for Cygnus.
-These clock IDs are defined in:
- "include/dt-bindings/clock/bcm-cygnus.h"
-
- Clock Source (Parent) Index ID
- --- ----- ----- ---------
- crystal N/A N/A N/A
-
- armpll crystal N/A N/A
-
- keypad crystal (ASIU) 0 BCM_CYGNUS_ASIU_KEYPAD_CLK
- adc/tsc crystal (ASIU) 1 BCM_CYGNUS_ASIU_ADC_CLK
- pwm crystal (ASIU) 2 BCM_CYGNUS_ASIU_PWM_CLK
-
- genpll crystal 0 BCM_CYGNUS_GENPLL
- axi21 genpll 1 BCM_CYGNUS_GENPLL_AXI21_CLK
- 250mhz genpll 2 BCM_CYGNUS_GENPLL_250MHZ_CLK
- ihost_sys genpll 3 BCM_CYGNUS_GENPLL_IHOST_SYS_CLK
- enet_sw genpll 4 BCM_CYGNUS_GENPLL_ENET_SW_CLK
- audio_125 genpll 5 BCM_CYGNUS_GENPLL_AUDIO_125_CLK
- can genpll 6 BCM_CYGNUS_GENPLL_CAN_CLK
-
- lcpll0 crystal 0 BCM_CYGNUS_LCPLL0
- pcie_phy lcpll0 1 BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK
- ddr_phy lcpll0 2 BCM_CYGNUS_LCPLL0_DDR_PHY_CLK
- sdio lcpll0 3 BCM_CYGNUS_LCPLL0_SDIO_CLK
- usb_phy lcpll0 4 BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK
- smart_card lcpll0 5 BCM_CYGNUS_LCPLL0_SMART_CARD_CLK
- ch5_unused lcpll0 6 BCM_CYGNUS_LCPLL0_CH5_UNUSED
-
- mipipll crystal 0 BCM_CYGNUS_MIPIPLL
- ch0_unused mipipll 1 BCM_CYGNUS_MIPIPLL_CH0_UNUSED
- ch1_lcd mipipll 2 BCM_CYGNUS_MIPIPLL_CH1_LCD
- ch2_v3d mipipll 3 BCM_CYGNUS_MIPIPLL_CH2_V3D
- ch3_unused mipipll 4 BCM_CYGNUS_MIPIPLL_CH3_UNUSED
- ch4_unused mipipll 5 BCM_CYGNUS_MIPIPLL_CH4_UNUSED
- ch5_unused mipipll 6 BCM_CYGNUS_MIPIPLL_CH5_UNUSED
-
- audiopll crystal 0 BCM_CYGNUS_AUDIOPLL
- ch0_audio audiopll 1 BCM_CYGNUS_AUDIOPLL_CH0
- ch1_audio audiopll 2 BCM_CYGNUS_AUDIOPLL_CH1
- ch2_audio audiopll 3 BCM_CYGNUS_AUDIOPLL_CH2
-
-Hurricane 2
-------
-PLL and leaf clock compatible strings for Hurricane 2 are:
- "brcm,hr2-armpll"
-
-The following table defines the set of PLL/clock for Hurricane 2:
-
- Clock Source Index ID
- --- ----- ----- ---------
- crystal N/A N/A N/A
-
- armpll crystal N/A N/A
-
-
-Northstar and Northstar Plus
-------
-PLL and leaf clock compatible strings for Northstar and Northstar Plus are:
- "brcm,nsp-armpll"
- "brcm,nsp-genpll"
- "brcm,nsp-lcpll0"
-
-The following table defines the set of PLL/clock index and ID for Northstar and
-Northstar Plus. These clock IDs are defined in:
- "include/dt-bindings/clock/bcm-nsp.h"
-
- Clock Source Index ID
- --- ----- ----- ---------
- crystal N/A N/A N/A
-
- armpll crystal N/A N/A
-
- genpll crystal 0 BCM_NSP_GENPLL
- phy genpll 1 BCM_NSP_GENPLL_PHY_CLK
- ethernetclk genpll 2 BCM_NSP_GENPLL_ENET_SW_CLK
- usbclk genpll 3 BCM_NSP_GENPLL_USB_PHY_REF_CLK
- iprocfast genpll 4 BCM_NSP_GENPLL_IPROCFAST_CLK
- sata1 genpll 5 BCM_NSP_GENPLL_SATA1_CLK
- sata2 genpll 6 BCM_NSP_GENPLL_SATA2_CLK
-
- lcpll0 crystal 0 BCM_NSP_LCPLL0
- pcie_phy lcpll0 1 BCM_NSP_LCPLL0_PCIE_PHY_REF_CLK
- sdio lcpll0 2 BCM_NSP_LCPLL0_SDIO_CLK
- ddr_phy lcpll0 3 BCM_NSP_LCPLL0_DDR_PHY_CLK
-
-Northstar 2
------------
-PLL and leaf clock compatible strings for Northstar 2 are:
- "brcm,ns2-genpll-scr"
- "brcm,ns2-genpll-sw"
- "brcm,ns2-lcpll-ddr"
- "brcm,ns2-lcpll-ports"
-
-The following table defines the set of PLL/clock index and ID for Northstar 2.
-These clock IDs are defined in:
- "include/dt-bindings/clock/bcm-ns2.h"
-
- Clock Source Index ID
- --- ----- ----- ---------
- crystal N/A N/A N/A
-
- genpll_scr crystal 0 BCM_NS2_GENPLL_SCR
- scr genpll_scr 1 BCM_NS2_GENPLL_SCR_SCR_CLK
- fs genpll_scr 2 BCM_NS2_GENPLL_SCR_FS_CLK
- audio_ref genpll_scr 3 BCM_NS2_GENPLL_SCR_AUDIO_CLK
- ch3_unused genpll_scr 4 BCM_NS2_GENPLL_SCR_CH3_UNUSED
- ch4_unused genpll_scr 5 BCM_NS2_GENPLL_SCR_CH4_UNUSED
- ch5_unused genpll_scr 6 BCM_NS2_GENPLL_SCR_CH5_UNUSED
-
- genpll_sw crystal 0 BCM_NS2_GENPLL_SW
- rpe genpll_sw 1 BCM_NS2_GENPLL_SW_RPE_CLK
- 250 genpll_sw 2 BCM_NS2_GENPLL_SW_250_CLK
- nic genpll_sw 3 BCM_NS2_GENPLL_SW_NIC_CLK
- chimp genpll_sw 4 BCM_NS2_GENPLL_SW_CHIMP_CLK
- port genpll_sw 5 BCM_NS2_GENPLL_SW_PORT_CLK
- sdio genpll_sw 6 BCM_NS2_GENPLL_SW_SDIO_CLK
-
- lcpll_ddr crystal 0 BCM_NS2_LCPLL_DDR
- pcie_sata_usb lcpll_ddr 1 BCM_NS2_LCPLL_DDR_PCIE_SATA_USB_CLK
- ddr lcpll_ddr 2 BCM_NS2_LCPLL_DDR_DDR_CLK
- ch2_unused lcpll_ddr 3 BCM_NS2_LCPLL_DDR_CH2_UNUSED
- ch3_unused lcpll_ddr 4 BCM_NS2_LCPLL_DDR_CH3_UNUSED
- ch4_unused lcpll_ddr 5 BCM_NS2_LCPLL_DDR_CH4_UNUSED
- ch5_unused lcpll_ddr 6 BCM_NS2_LCPLL_DDR_CH5_UNUSED
-
- lcpll_ports crystal 0 BCM_NS2_LCPLL_PORTS
- wan lcpll_ports 1 BCM_NS2_LCPLL_PORTS_WAN_CLK
- rgmii lcpll_ports 2 BCM_NS2_LCPLL_PORTS_RGMII_CLK
- ch2_unused lcpll_ports 3 BCM_NS2_LCPLL_PORTS_CH2_UNUSED
- ch3_unused lcpll_ports 4 BCM_NS2_LCPLL_PORTS_CH3_UNUSED
- ch4_unused lcpll_ports 5 BCM_NS2_LCPLL_PORTS_CH4_UNUSED
- ch5_unused lcpll_ports 6 BCM_NS2_LCPLL_PORTS_CH5_UNUSED
-
-BCM63138
---------
-PLL and leaf clock compatible strings for BCM63138 are:
- "brcm,bcm63138-armpll"
-
-Stingray
------------
-PLL and leaf clock compatible strings for Stingray are:
- "brcm,sr-genpll0"
- "brcm,sr-genpll1"
- "brcm,sr-genpll2"
- "brcm,sr-genpll3"
- "brcm,sr-genpll4"
- "brcm,sr-genpll5"
- "brcm,sr-genpll6"
-
- "brcm,sr-lcpll0"
- "brcm,sr-lcpll1"
- "brcm,sr-lcpll-pcie"
-
-
-The following table defines the set of PLL/clock index and ID for Stingray.
-These clock IDs are defined in:
- "include/dt-bindings/clock/bcm-sr.h"
-
- Clock Source Index ID
- --- ----- ----- ---------
- crystal N/A N/A N/A
- crmu_ref25m crystal N/A N/A
-
- genpll0 crystal 0 BCM_SR_GENPLL0
- clk_125m genpll0 1 BCM_SR_GENPLL0_125M_CLK
- clk_scr genpll0 2 BCM_SR_GENPLL0_SCR_CLK
- clk_250 genpll0 3 BCM_SR_GENPLL0_250M_CLK
- clk_pcie_axi genpll0 4 BCM_SR_GENPLL0_PCIE_AXI_CLK
- clk_paxc_axi_x2 genpll0 5 BCM_SR_GENPLL0_PAXC_AXI_X2_CLK
- clk_paxc_axi genpll0 6 BCM_SR_GENPLL0_PAXC_AXI_CLK
-
- genpll1 crystal 0 BCM_SR_GENPLL1
- clk_pcie_tl genpll1 1 BCM_SR_GENPLL1_PCIE_TL_CLK
- clk_mhb_apb genpll1 2 BCM_SR_GENPLL1_MHB_APB_CLK
-
- genpll2 crystal 0 BCM_SR_GENPLL2
- clk_nic genpll2 1 BCM_SR_GENPLL2_NIC_CLK
- clk_ts_500_ref genpll2 2 BCM_SR_GENPLL2_TS_500_REF_CLK
- clk_125_nitro genpll2 3 BCM_SR_GENPLL2_125_NITRO_CLK
- clk_chimp genpll2 4 BCM_SR_GENPLL2_CHIMP_CLK
- clk_nic_flash genpll2 5 BCM_SR_GENPLL2_NIC_FLASH_CLK
- clk_fs genpll2 6 BCM_SR_GENPLL2_FS_CLK
-
- genpll3 crystal 0 BCM_SR_GENPLL3
- clk_hsls genpll3 1 BCM_SR_GENPLL3_HSLS_CLK
- clk_sdio genpll3 2 BCM_SR_GENPLL3_SDIO_CLK
-
- genpll4 crystal 0 BCM_SR_GENPLL4
- clk_ccn genpll4 1 BCM_SR_GENPLL4_CCN_CLK
- clk_tpiu_pll genpll4 2 BCM_SR_GENPLL4_TPIU_PLL_CLK
- clk_noc genpll4 3 BCM_SR_GENPLL4_NOC_CLK
- clk_chclk_fs4 genpll4 4 BCM_SR_GENPLL4_CHCLK_FS4_CLK
- clk_bridge_fscpu genpll4 5 BCM_SR_GENPLL4_BRIDGE_FSCPU_CLK
-
- genpll5 crystal 0 BCM_SR_GENPLL5
- clk_fs4_hf genpll5 1 BCM_SR_GENPLL5_FS4_HF_CLK
- clk_crypto_ae genpll5 2 BCM_SR_GENPLL5_CRYPTO_AE_CLK
- clk_raid_ae genpll5 3 BCM_SR_GENPLL5_RAID_AE_CLK
-
- genpll6 crystal 0 BCM_SR_GENPLL6
- clk_48_usb genpll6 1 BCM_SR_GENPLL6_48_USB_CLK
-
- lcpll0 crystal 0 BCM_SR_LCPLL0
- clk_sata_refp lcpll0 1 BCM_SR_LCPLL0_SATA_REFP_CLK
- clk_sata_refn lcpll0 2 BCM_SR_LCPLL0_SATA_REFN_CLK
- clk_sata_350 lcpll0 3 BCM_SR_LCPLL0_SATA_350_CLK
- clk_sata_500 lcpll0 4 BCM_SR_LCPLL0_SATA_500_CLK
-
- lcpll1 crystal 0 BCM_SR_LCPLL1
- clk_wan lcpll1 1 BCM_SR_LCPLL1_WAN_CLK
- clk_usb_ref lcpll1 2 BCM_SR_LCPLL1_USB_REF_CLK
- clk_crmu_ts lcpll1 3 BCM_SR_LCPLL1_CRMU_TS_CLK
-
- lcpll_pcie crystal 0 BCM_SR_LCPLL_PCIE
- clk_pcie_phy_ref lcpll1 1 BCM_SR_LCPLL_PCIE_PHY_REF_CLK
diff --git a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml
new file mode 100644
index 000000000000..1174c9aa9934
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml
@@ -0,0 +1,394 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/brcm,iproc-clocks.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom iProc Family Clocks
+
+maintainers:
+ - Ray Jui <rjui@broadcom.com>
+ - Scott Branden <sbranden@broadcom.com>
+
+description: |
+ The iProc clock controller manages clocks that are common to the iProc family.
+ An SoC from the iProc family may have several PLLs, e.g., ARMPLL, GENPLL,
+ LCPLL0, MIPIPLL, and etc., all derived from an onboard crystal. Each PLL
+ comprises of several leaf clocks
+
+ ASIU clocks are a special case. These clocks are derived directly from the
+ reference clock of the onboard crystal.
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm63138-armpll
+ - brcm,cygnus-armpll
+ - brcm,cygnus-genpll
+ - brcm,cygnus-lcpll0
+ - brcm,cygnus-mipipll
+ - brcm,cygnus-asiu-clk
+ - brcm,cygnus-audiopll
+ - brcm,hr2-armpll
+ - brcm,nsp-armpll
+ - brcm,nsp-genpll
+ - brcm,nsp-lcpll0
+ - brcm,ns2-genpll-scr
+ - brcm,ns2-genpll-sw
+ - brcm,ns2-lcpll-ddr
+ - brcm,ns2-lcpll-ports
+ - brcm,sr-genpll0
+ - brcm,sr-genpll1
+ - brcm,sr-genpll2
+ - brcm,sr-genpll3
+ - brcm,sr-genpll4
+ - brcm,sr-genpll5
+ - brcm,sr-genpll6
+ - brcm,sr-lcpll0
+ - brcm,sr-lcpll1
+ - brcm,sr-lcpll-pcie
+
+ reg:
+ minItems: 1
+ items:
+ - description: base register
+ - description: power register
+ - description: ASIU or split status register
+
+ clocks:
+ description: The input parent clock phandle for the PLL / ASIU clock. For
+ most iProc PLLs, this is an onboard crystal with a fixed rate.
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clock-output-names:
+ minItems: 1
+ maxItems: 45
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,cygnus-armpll
+ - brcm,cygnus-genpll
+ - brcm,cygnus-lcpll0
+ - brcm,cygnus-mipipll
+ - brcm,cygnus-asiu-clk
+ - brcm,cygnus-audiopll
+ then:
+ properties:
+ clock-output-names:
+ description: |
+ The following table defines the set of PLL/clock index and ID for Cygnus.
+ These clock IDs are defined in:
+ "include/dt-bindings/clock/bcm-cygnus.h"
+
+ Clock Source (Parent) Index ID
+ ----- --------------- ----- --
+ crystal N/A N/A N/A
+
+ armpll crystal N/A N/A
+
+ keypad crystal (ASIU) 0 BCM_CYGNUS_ASIU_KEYPAD_CLK
+ adc/tsc crystal (ASIU) 1 BCM_CYGNUS_ASIU_ADC_CLK
+ pwm crystal (ASIU) 2 BCM_CYGNUS_ASIU_PWM_CLK
+
+ genpll crystal 0 BCM_CYGNUS_GENPLL
+ axi21 genpll 1 BCM_CYGNUS_GENPLL_AXI21_CLK
+ 250mhz genpll 2 BCM_CYGNUS_GENPLL_250MHZ_CLK
+ ihost_sys genpll 3 BCM_CYGNUS_GENPLL_IHOST_SYS_CLK
+ enet_sw genpll 4 BCM_CYGNUS_GENPLL_ENET_SW_CLK
+ audio_125 genpll 5 BCM_CYGNUS_GENPLL_AUDIO_125_CLK
+ can genpll 6 BCM_CYGNUS_GENPLL_CAN_CLK
+
+ lcpll0 crystal 0 BCM_CYGNUS_LCPLL0
+ pcie_phy lcpll0 1 BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK
+ ddr_phy lcpll0 2 BCM_CYGNUS_LCPLL0_DDR_PHY_CLK
+ sdio lcpll0 3 BCM_CYGNUS_LCPLL0_SDIO_CLK
+ usb_phy lcpll0 4 BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK
+ smart_card lcpll0 5 BCM_CYGNUS_LCPLL0_SMART_CARD_CLK
+ ch5_unused lcpll0 6 BCM_CYGNUS_LCPLL0_CH5_UNUSED
+
+ mipipll crystal 0 BCM_CYGNUS_MIPIPLL
+ ch0_unused mipipll 1 BCM_CYGNUS_MIPIPLL_CH0_UNUSED
+ ch1_lcd mipipll 2 BCM_CYGNUS_MIPIPLL_CH1_LCD
+ ch2_v3d mipipll 3 BCM_CYGNUS_MIPIPLL_CH2_V3D
+ ch3_unused mipipll 4 BCM_CYGNUS_MIPIPLL_CH3_UNUSED
+ ch4_unused mipipll 5 BCM_CYGNUS_MIPIPLL_CH4_UNUSED
+ ch5_unused mipipll 6 BCM_CYGNUS_MIPIPLL_CH5_UNUSED
+
+ audiopll crystal 0 BCM_CYGNUS_AUDIOPLL
+ ch0_audio audiopll 1 BCM_CYGNUS_AUDIOPLL_CH0
+ ch1_audio audiopll 2 BCM_CYGNUS_AUDIOPLL_CH1
+ ch2_audio audiopll 3 BCM_CYGNUS_AUDIOPLL_CH2
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,hr2-armpll
+ then:
+ properties:
+ clock-output-names:
+ description: |
+ The following table defines the set of PLL/clock for Hurricane 2:
+
+ Clock Source Index ID
+ ----- ------ ----- --
+ crystal N/A N/A N/A
+
+ armpll crystal N/A N/A
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,nsp-armpll
+ - brcm,nsp-genpll
+ - brcm,nsp-lcpll0
+ then:
+ properties:
+ clock-output-names:
+ description: |
+ The following table defines the set of PLL/clock index and ID for Northstar and
+ Northstar Plus. These clock IDs are defined in:
+ "include/dt-bindings/clock/bcm-nsp.h"
+
+ Clock Source Index ID
+ ----- ------ ----- --
+ crystal N/A N/A N/A
+
+ armpll crystal N/A N/A
+
+ genpll crystal 0 BCM_NSP_GENPLL
+ phy genpll 1 BCM_NSP_GENPLL_PHY_CLK
+ ethernetclk genpll 2 BCM_NSP_GENPLL_ENET_SW_CLK
+ usbclk genpll 3 BCM_NSP_GENPLL_USB_PHY_REF_CLK
+ iprocfast genpll 4 BCM_NSP_GENPLL_IPROCFAST_CLK
+ sata1 genpll 5 BCM_NSP_GENPLL_SATA1_CLK
+ sata2 genpll 6 BCM_NSP_GENPLL_SATA2_CLK
+
+ lcpll0 crystal 0 BCM_NSP_LCPLL0
+ pcie_phy lcpll0 1 BCM_NSP_LCPLL0_PCIE_PHY_REF_CLK
+ sdio lcpll0 2 BCM_NSP_LCPLL0_SDIO_CLK
+ ddr_phy lcpll0 3 BCM_NSP_LCPLL0_DDR_PHY_CLK
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,ns2-genpll-scr
+ - brcm,ns2-genpll-sw
+ - brcm,ns2-lcpll-ddr
+ - brcm,ns2-lcpll-ports
+ then:
+ properties:
+ clock-output-names:
+ description: |
+ The following table defines the set of PLL/clock index and ID for Northstar 2.
+ These clock IDs are defined in:
+ "include/dt-bindings/clock/bcm-ns2.h"
+
+ Clock Source Index ID
+ ----- ------ ----- --
+ crystal N/A N/A N/A
+
+ genpll_scr crystal 0 BCM_NS2_GENPLL_SCR
+ scr genpll_scr 1 BCM_NS2_GENPLL_SCR_SCR_CLK
+ fs genpll_scr 2 BCM_NS2_GENPLL_SCR_FS_CLK
+ audio_ref genpll_scr 3 BCM_NS2_GENPLL_SCR_AUDIO_CLK
+ ch3_unused genpll_scr 4 BCM_NS2_GENPLL_SCR_CH3_UNUSED
+ ch4_unused genpll_scr 5 BCM_NS2_GENPLL_SCR_CH4_UNUSED
+ ch5_unused genpll_scr 6 BCM_NS2_GENPLL_SCR_CH5_UNUSED
+
+ genpll_sw crystal 0 BCM_NS2_GENPLL_SW
+ rpe genpll_sw 1 BCM_NS2_GENPLL_SW_RPE_CLK
+ 250 genpll_sw 2 BCM_NS2_GENPLL_SW_250_CLK
+ nic genpll_sw 3 BCM_NS2_GENPLL_SW_NIC_CLK
+ chimp genpll_sw 4 BCM_NS2_GENPLL_SW_CHIMP_CLK
+ port genpll_sw 5 BCM_NS2_GENPLL_SW_PORT_CLK
+ sdio genpll_sw 6 BCM_NS2_GENPLL_SW_SDIO_CLK
+
+ lcpll_ddr crystal 0 BCM_NS2_LCPLL_DDR
+ pcie_sata_usb lcpll_ddr 1 BCM_NS2_LCPLL_DDR_PCIE_SATA_USB_CLK
+ ddr lcpll_ddr 2 BCM_NS2_LCPLL_DDR_DDR_CLK
+ ch2_unused lcpll_ddr 3 BCM_NS2_LCPLL_DDR_CH2_UNUSED
+ ch3_unused lcpll_ddr 4 BCM_NS2_LCPLL_DDR_CH3_UNUSED
+ ch4_unused lcpll_ddr 5 BCM_NS2_LCPLL_DDR_CH4_UNUSED
+ ch5_unused lcpll_ddr 6 BCM_NS2_LCPLL_DDR_CH5_UNUSED
+
+ lcpll_ports crystal 0 BCM_NS2_LCPLL_PORTS
+ wan lcpll_ports 1 BCM_NS2_LCPLL_PORTS_WAN_CLK
+ rgmii lcpll_ports 2 BCM_NS2_LCPLL_PORTS_RGMII_CLK
+ ch2_unused lcpll_ports 3 BCM_NS2_LCPLL_PORTS_CH2_UNUSED
+ ch3_unused lcpll_ports 4 BCM_NS2_LCPLL_PORTS_CH3_UNUSED
+ ch4_unused lcpll_ports 5 BCM_NS2_LCPLL_PORTS_CH4_UNUSED
+ ch5_unused lcpll_ports 6 BCM_NS2_LCPLL_PORTS_CH5_UNUSED
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,sr-genpll0
+ - brcm,sr-genpll1
+ - brcm,sr-genpll2
+ - brcm,sr-genpll3
+ - brcm,sr-genpll4
+ - brcm,sr-genpll5
+ - brcm,sr-genpll6
+ - brcm,sr-lcpll0
+ - brcm,sr-lcpll1
+ - brcm,sr-lcpll-pcie
+ then:
+ properties:
+ clock-output-names:
+ description: |
+ The following table defines the set of PLL/clock index and ID for Stingray.
+ These clock IDs are defined in:
+ "include/dt-bindings/clock/bcm-sr.h"
+
+ Clock Source Index ID
+ ----- ------ ----- --
+ crystal N/A N/A N/A
+ crmu_ref25m crystal N/A N/A
+
+ genpll0 crystal 0 BCM_SR_GENPLL0
+ clk_125m genpll0 1 BCM_SR_GENPLL0_125M_CLK
+ clk_scr genpll0 2 BCM_SR_GENPLL0_SCR_CLK
+ clk_250 genpll0 3 BCM_SR_GENPLL0_250M_CLK
+ clk_pcie_axi genpll0 4 BCM_SR_GENPLL0_PCIE_AXI_CLK
+ clk_paxc_axi_x2 genpll0 5 BCM_SR_GENPLL0_PAXC_AXI_X2_CLK
+ clk_paxc_axi genpll0 6 BCM_SR_GENPLL0_PAXC_AXI_CLK
+
+ genpll1 crystal 0 BCM_SR_GENPLL1
+ clk_pcie_tl genpll1 1 BCM_SR_GENPLL1_PCIE_TL_CLK
+ clk_mhb_apb genpll1 2 BCM_SR_GENPLL1_MHB_APB_CLK
+
+ genpll2 crystal 0 BCM_SR_GENPLL2
+ clk_nic genpll2 1 BCM_SR_GENPLL2_NIC_CLK
+ clk_ts_500_ref genpll2 2 BCM_SR_GENPLL2_TS_500_REF_CLK
+ clk_125_nitro genpll2 3 BCM_SR_GENPLL2_125_NITRO_CLK
+ clk_chimp genpll2 4 BCM_SR_GENPLL2_CHIMP_CLK
+ clk_nic_flash genpll2 5 BCM_SR_GENPLL2_NIC_FLASH_CLK
+ clk_fs genpll2 6 BCM_SR_GENPLL2_FS_CLK
+
+ genpll3 crystal 0 BCM_SR_GENPLL3
+ clk_hsls genpll3 1 BCM_SR_GENPLL3_HSLS_CLK
+ clk_sdio genpll3 2 BCM_SR_GENPLL3_SDIO_CLK
+
+ genpll4 crystal 0 BCM_SR_GENPLL4
+ clk_ccn genpll4 1 BCM_SR_GENPLL4_CCN_CLK
+ clk_tpiu_pll genpll4 2 BCM_SR_GENPLL4_TPIU_PLL_CLK
+ clk_noc genpll4 3 BCM_SR_GENPLL4_NOC_CLK
+ clk_chclk_fs4 genpll4 4 BCM_SR_GENPLL4_CHCLK_FS4_CLK
+ clk_bridge_fscpu genpll4 5 BCM_SR_GENPLL4_BRIDGE_FSCPU_CLK
+
+ genpll5 crystal 0 BCM_SR_GENPLL5
+ clk_fs4_hf genpll5 1 BCM_SR_GENPLL5_FS4_HF_CLK
+ clk_crypto_ae genpll5 2 BCM_SR_GENPLL5_CRYPTO_AE_CLK
+ clk_raid_ae genpll5 3 BCM_SR_GENPLL5_RAID_AE_CLK
+
+ genpll6 crystal 0 BCM_SR_GENPLL6
+ clk_48_usb genpll6 1 BCM_SR_GENPLL6_48_USB_CLK
+
+ lcpll0 crystal 0 BCM_SR_LCPLL0
+ clk_sata_refp lcpll0 1 BCM_SR_LCPLL0_SATA_REFP_CLK
+ clk_sata_refn lcpll0 2 BCM_SR_LCPLL0_SATA_REFN_CLK
+ clk_sata_350 lcpll0 3 BCM_SR_LCPLL0_SATA_350_CLK
+ clk_sata_500 lcpll0 4 BCM_SR_LCPLL0_SATA_500_CLK
+
+ lcpll1 crystal 0 BCM_SR_LCPLL1
+ clk_wan lcpll1 1 BCM_SR_LCPLL1_WAN_CLK
+ clk_usb_ref lcpll1 2 BCM_SR_LCPLL1_USB_REF_CLK
+ clk_crmu_ts lcpll1 3 BCM_SR_LCPLL1_CRMU_TS_CLK
+
+ lcpll_pcie crystal 0 BCM_SR_LCPLL_PCIE
+ clk_pcie_phy_ref lcpll1 1 BCM_SR_LCPLL_PCIE_PHY_REF_CLK
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,cygnus-genpll
+ then:
+ properties:
+ clock-output-names:
+ items:
+ - const: genpll
+ - const: axi21
+ - const: 250mhz
+ - const: ihost_sys
+ - const: enet_sw
+ - const: audio_125
+ - const: can
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,nsp-lcpll0
+ then:
+ properties:
+ clock-output-names:
+ items:
+ - const: lcpll0
+ - const: pcie_phy
+ - const: sdio
+ - const: ddr_phy
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,nsp-genpll
+ then:
+ properties:
+ clock-output-names:
+ items:
+ - const: genpll
+ - const: phy
+ - const: ethernetclk
+ - const: usbclk
+ - const: iprocfast
+ - const: sata1
+ - const: sata2
+
+required:
+ - reg
+ - clocks
+ - '#clock-cells'
+ - clock-output-names
+
+additionalProperties: false
+
+examples:
+ - |
+ osc1: oscillator {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <25000000>;
+ };
+
+ genpll@301d000 {
+ #clock-cells = <1>;
+ compatible = "brcm,cygnus-genpll";
+ reg = <0x301d000 0x2c>, <0x301c020 0x4>;
+ clocks = <&os1c>;
+ clock-output-names = "genpll", "axi21", "250mhz", "ihost_sys",
+ "enet_sw", "audio_125", "can";
+ };
+ - |
+ osc2: oscillator {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <25000000>;
+ };
+
+ asiu_clks@301d048 {
+ #clock-cells = <1>;
+ compatible = "brcm,cygnus-asiu-clk";
+ reg = <0x301d048 0xc>, <0x180aa024 0x4>;
+ clocks = <&osc2>;
+ clock-output-names = "keypad", "adc/touch", "pwm";
+ };
diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
index 28675b0b80f1..26ed040bc717 100644
--- a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
+++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
@@ -84,6 +84,7 @@ patternProperties:
idt,slew-percent:
description: The Slew rate control for CMOS single-ended.
enum: [ 80, 85, 90, 100 ]
+ additionalProperties: false
required:
- compatible
@@ -139,13 +140,13 @@ examples:
clock-names = "xin";
OUT1 {
- idt,drive-mode = <VC5_CMOSD>;
- idt,voltage-microvolts = <1800000>;
+ idt,mode = <VC5_CMOSD>;
+ idt,voltage-microvolt = <1800000>;
idt,slew-percent = <80>;
};
OUT4 {
- idt,drive-mode = <VC5_LVDS>;
+ idt,mode = <VC5_LVDS>;
};
};
};
diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra114-car.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra114-car.txt
deleted file mode 100644
index 9acea9d93160..000000000000
--- a/Documentation/devicetree/bindings/clock/nvidia,tegra114-car.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-NVIDIA Tegra114 Clock And Reset Controller
-
-This binding uses the common clock binding:
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-The CAR (Clock And Reset) Controller on Tegra is the HW module responsible
-for muxing and gating Tegra's clocks, and setting their rates.
-
-Required properties :
-- compatible : Should be "nvidia,tegra114-car"
-- reg : Should contain CAR registers location and length
-- clocks : Should contain phandle and clock specifiers for two clocks:
- the 32 KHz "32k_in", and the board-specific oscillator "osc".
-- #clock-cells : Should be 1.
- In clock consumers, this cell represents the clock ID exposed by the
- CAR. The assignments may be found in header file
- <dt-bindings/clock/tegra114-car.h>.
-- #reset-cells : Should be 1.
- In clock consumers, this cell represents the bit number in the CAR's
- array of CLK_RST_CONTROLLER_RST_DEVICES_* registers.
-
-Example SoC include file:
-
-/ {
- tegra_car: clock {
- compatible = "nvidia,tegra114-car";
- reg = <0x60006000 0x1000>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
- usb@c5004000 {
- clocks = <&tegra_car TEGRA114_CLK_USB2>;
- };
-};
-
-Example board file:
-
-/ {
- clocks {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- osc: clock@0 {
- compatible = "fixed-clock";
- reg = <0>;
- #clock-cells = <0>;
- clock-frequency = <12000000>;
- };
-
- clk_32k: clock@1 {
- compatible = "fixed-clock";
- reg = <1>;
- #clock-cells = <0>;
- clock-frequency = <32768>;
- };
- };
-
- &tegra_car {
- clocks = <&clk_32k> <&osc>;
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt
deleted file mode 100644
index 7f02fb4ca4ad..000000000000
--- a/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt
+++ /dev/null
@@ -1,107 +0,0 @@
-NVIDIA Tegra124 and Tegra132 Clock And Reset Controller
-
-This binding uses the common clock binding:
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-The CAR (Clock And Reset) Controller on Tegra is the HW module responsible
-for muxing and gating Tegra's clocks, and setting their rates.
-
-Required properties :
-- compatible : Should be "nvidia,tegra124-car" or "nvidia,tegra132-car"
-- reg : Should contain CAR registers location and length
-- clocks : Should contain phandle and clock specifiers for two clocks:
- the 32 KHz "32k_in", and the board-specific oscillator "osc".
-- #clock-cells : Should be 1.
- In clock consumers, this cell represents the clock ID exposed by the
- CAR. The assignments may be found in the header files
- <dt-bindings/clock/tegra124-car-common.h> (which covers IDs common
- to Tegra124 and Tegra132) and <dt-bindings/clock/tegra124-car.h>
- (for Tegra124-specific clocks).
-- #reset-cells : Should be 1.
- In clock consumers, this cell represents the bit number in the CAR's
- array of CLK_RST_CONTROLLER_RST_DEVICES_* registers.
-- nvidia,external-memory-controller : phandle of the EMC driver.
-
-The node should contain a "emc-timings" subnode for each supported RAM type (see
-field RAM_CODE in register PMC_STRAPPING_OPT_A).
-
-Required properties for "emc-timings" nodes :
-- nvidia,ram-code : Should contain the value of RAM_CODE this timing set
- is used for.
-
-Each "emc-timings" node should contain a "timing" subnode for every supported
-EMC clock rate.
-
-Required properties for "timing" nodes :
-- clock-frequency : Should contain the memory clock rate to which this timing
-relates.
-- nvidia,parent-clock-frequency : Should contain the rate at which the current
-parent of the EMC clock should be running at this timing.
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names : Must include the following entries:
- - emc-parent : the clock that should be the parent of the EMC clock at this
-timing.
-
-Example SoC include file:
-
-/ {
- tegra_car: clock@60006000 {
- compatible = "nvidia,tegra124-car";
- reg = <0x60006000 0x1000>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- nvidia,external-memory-controller = <&emc>;
- };
-
- usb@c5004000 {
- clocks = <&tegra_car TEGRA124_CLK_USB2>;
- };
-};
-
-Example board file:
-
-/ {
- clocks {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- osc: clock@0 {
- compatible = "fixed-clock";
- reg = <0>;
- #clock-cells = <0>;
- clock-frequency = <112400000>;
- };
-
- clk_32k: clock@1 {
- compatible = "fixed-clock";
- reg = <1>;
- #clock-cells = <0>;
- clock-frequency = <32768>;
- };
- };
-
- &tegra_car {
- clocks = <&clk_32k> <&osc>;
- };
-
- clock@60006000 {
- emc-timings-3 {
- nvidia,ram-code = <3>;
-
- timing-12750000 {
- clock-frequency = <12750000>;
- nvidia,parent-clock-frequency = <408000000>;
- clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
- clock-names = "emc-parent";
- };
- timing-20400000 {
- clock-frequency = <20400000>;
- nvidia,parent-clock-frequency = <408000000>;
- clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
- clock-names = "emc-parent";
- };
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.yaml b/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.yaml
new file mode 100644
index 000000000000..ec7ab1483652
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.yaml
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/nvidia,tegra124-car.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra Clock and Reset Controller
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+description: |
+ The Clock and Reset (CAR) is the HW module responsible for muxing and gating
+ Tegra's clocks, and setting their rates. It comprises CLKGEN and RSTGEN units.
+
+ CLKGEN provides the registers to program the PLLs. It controls most of
+ the clock source programming and most of the clock dividers.
+
+ CLKGEN input signals include the external clock for the reference frequency
+ (12 MHz, 26 MHz) and the external clock for the Real Time Clock (32.768 KHz).
+
+ Outputs from CLKGEN are inputs clock of the h/w blocks in the Tegra system.
+
+ RSTGEN provides the registers needed to control resetting of each block in
+ the Tegra system.
+
+properties:
+ compatible:
+ const: nvidia,tegra124-car
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+ nvidia,external-memory-controller:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle of the external memory controller node
+
+patternProperties:
+ "^emc-timings-[0-9]+$":
+ type: object
+ properties:
+ nvidia,ram-code:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ value of the RAM_CODE field in the PMC_STRAPPING_OPT_A register that
+ this timing set is used for
+
+ patternProperties:
+ "^timing-[0-9]+$":
+ type: object
+ properties:
+ clock-frequency:
+ description:
+ external memory clock rate in Hz
+ minimum: 1000000
+ maximum: 1000000000
+
+ nvidia,parent-clock-frequency:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ rate of parent clock in Hz
+ minimum: 1000000
+ maximum: 1000000000
+
+ clocks:
+ items:
+ - description: parent clock of EMC
+
+ clock-names:
+ items:
+ - const: emc-parent
+
+ required:
+ - clock-frequency
+ - nvidia,parent-clock-frequency
+ - clocks
+ - clock-names
+
+ additionalProperties: false
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra124-car.h>
+
+ car: clock-controller@60006000 {
+ compatible = "nvidia,tegra124-car";
+ reg = <0x60006000 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ usb-controller@c5004000 {
+ compatible = "nvidia,tegra20-ehci";
+ reg = <0xc5004000 0x4000>;
+ clocks = <&car TEGRA124_CLK_USB2>;
+ resets = <&car TEGRA124_CLK_USB2>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra20-car.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra20-car.txt
deleted file mode 100644
index 6c5901b503d0..000000000000
--- a/Documentation/devicetree/bindings/clock/nvidia,tegra20-car.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-NVIDIA Tegra20 Clock And Reset Controller
-
-This binding uses the common clock binding:
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-The CAR (Clock And Reset) Controller on Tegra is the HW module responsible
-for muxing and gating Tegra's clocks, and setting their rates.
-
-Required properties :
-- compatible : Should be "nvidia,tegra20-car"
-- reg : Should contain CAR registers location and length
-- clocks : Should contain phandle and clock specifiers for two clocks:
- the 32 KHz "32k_in", and the board-specific oscillator "osc".
-- #clock-cells : Should be 1.
- In clock consumers, this cell represents the clock ID exposed by the
- CAR. The assignments may be found in header file
- <dt-bindings/clock/tegra20-car.h>.
-- #reset-cells : Should be 1.
- In clock consumers, this cell represents the bit number in the CAR's
- array of CLK_RST_CONTROLLER_RST_DEVICES_* registers.
-
-Example SoC include file:
-
-/ {
- tegra_car: clock {
- compatible = "nvidia,tegra20-car";
- reg = <0x60006000 0x1000>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
- usb@c5004000 {
- clocks = <&tegra_car TEGRA20_CLK_USB2>;
- };
-};
-
-Example board file:
-
-/ {
- clocks {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- osc: clock@0 {
- compatible = "fixed-clock";
- reg = <0>;
- #clock-cells = <0>;
- clock-frequency = <12000000>;
- };
-
- clk_32k: clock@1 {
- compatible = "fixed-clock";
- reg = <1>;
- #clock-cells = <0>;
- clock-frequency = <32768>;
- };
- };
-
- &tegra_car {
- clocks = <&clk_32k> <&osc>;
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra20-car.yaml b/Documentation/devicetree/bindings/clock/nvidia,tegra20-car.yaml
new file mode 100644
index 000000000000..459d2a525393
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nvidia,tegra20-car.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/nvidia,tegra20-car.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra Clock and Reset Controller
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+description: |
+ The Clock and Reset (CAR) is the HW module responsible for muxing and gating
+ Tegra's clocks, and setting their rates. It comprises CLKGEN and RSTGEN units.
+
+ CLKGEN provides the registers to program the PLLs. It controls most of
+ the clock source programming and most of the clock dividers.
+
+ CLKGEN input signals include the external clock for the reference frequency
+ (12 MHz, 26 MHz) and the external clock for the Real Time Clock (32.768 KHz).
+
+ Outputs from CLKGEN are inputs clock of the h/w blocks in the Tegra system.
+
+ RSTGEN provides the registers needed to control resetting of each block in
+ the Tegra system.
+
+properties:
+ compatible:
+ enum:
+ - nvidia,tegra20-car
+ - nvidia,tegra30-car
+ - nvidia,tegra114-car
+ - nvidia,tegra210-car
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra20-car.h>
+
+ car: clock-controller@60006000 {
+ compatible = "nvidia,tegra20-car";
+ reg = <0x60006000 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ usb-controller@c5004000 {
+ compatible = "nvidia,tegra20-ehci";
+ reg = <0xc5004000 0x4000>;
+ clocks = <&car TEGRA20_CLK_USB2>;
+ resets = <&car TEGRA20_CLK_USB2>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra210-car.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra210-car.txt
deleted file mode 100644
index 26f237f641b7..000000000000
--- a/Documentation/devicetree/bindings/clock/nvidia,tegra210-car.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-NVIDIA Tegra210 Clock And Reset Controller
-
-This binding uses the common clock binding:
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-The CAR (Clock And Reset) Controller on Tegra is the HW module responsible
-for muxing and gating Tegra's clocks, and setting their rates.
-
-Required properties :
-- compatible : Should be "nvidia,tegra210-car"
-- reg : Should contain CAR registers location and length
-- clocks : Should contain phandle and clock specifiers for two clocks:
- the 32 KHz "32k_in".
-- #clock-cells : Should be 1.
- In clock consumers, this cell represents the clock ID exposed by the
- CAR. The assignments may be found in header file
- <dt-bindings/clock/tegra210-car.h>.
-- #reset-cells : Should be 1.
- In clock consumers, this cell represents the bit number in the CAR's
- array of CLK_RST_CONTROLLER_RST_DEVICES_* registers.
-
-Example SoC include file:
-
-/ {
- tegra_car: clock {
- compatible = "nvidia,tegra210-car";
- reg = <0x60006000 0x1000>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
- usb@c5004000 {
- clocks = <&tegra_car TEGRA210_CLK_USB2>;
- };
-};
-
-Example board file:
-
-/ {
- clocks {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- clk_32k: clock@1 {
- compatible = "fixed-clock";
- reg = <1>;
- #clock-cells = <0>;
- clock-frequency = <32768>;
- };
- };
-
- &tegra_car {
- clocks = <&clk_32k>;
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra30-car.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra30-car.txt
deleted file mode 100644
index 63618cde12df..000000000000
--- a/Documentation/devicetree/bindings/clock/nvidia,tegra30-car.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-NVIDIA Tegra30 Clock And Reset Controller
-
-This binding uses the common clock binding:
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-The CAR (Clock And Reset) Controller on Tegra is the HW module responsible
-for muxing and gating Tegra's clocks, and setting their rates.
-
-Required properties :
-- compatible : Should be "nvidia,tegra30-car"
-- reg : Should contain CAR registers location and length
-- clocks : Should contain phandle and clock specifiers for two clocks:
- the 32 KHz "32k_in", and the board-specific oscillator "osc".
-- #clock-cells : Should be 1.
- In clock consumers, this cell represents the clock ID exposed by the
- CAR. The assignments may be found in header file
- <dt-bindings/clock/tegra30-car.h>.
-- #reset-cells : Should be 1.
- In clock consumers, this cell represents the bit number in the CAR's
- array of CLK_RST_CONTROLLER_RST_DEVICES_* registers.
-
-Example SoC include file:
-
-/ {
- tegra_car: clock {
- compatible = "nvidia,tegra30-car";
- reg = <0x60006000 0x1000>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
- usb@c5004000 {
- clocks = <&tegra_car TEGRA30_CLK_USB2>;
- };
-};
-
-Example board file:
-
-/ {
- clocks {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- osc: clock@0 {
- compatible = "fixed-clock";
- reg = <0>;
- #clock-cells = <0>;
- clock-frequency = <12000000>;
- };
-
- clk_32k: clock@1 {
- compatible = "fixed-clock";
- reg = <1>;
- #clock-cells = <0>;
- clock-frequency = <32768>;
- };
- };
-
- &tegra_car {
- clocks = <&clk_32k> <&osc>;
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
index eacccc88bbf6..8e2eac6cbfb9 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
@@ -46,7 +46,6 @@ properties:
nvmem-cell-names:
minItems: 1
- maxItems: 2
items:
- const: calib
- const: calib_backup
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sdx55.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sdx55.yaml
index 1121b3934cb9..b0d1c65aa354 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sdx55.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sdx55.yaml
@@ -27,7 +27,6 @@ properties:
- description: Sleep clock source
- description: PLL test clock source (Optional clock)
minItems: 2
- maxItems: 3
clock-names:
items:
@@ -35,7 +34,6 @@ properties:
- const: sleep_clk
- const: core_bi_pll_test_se # Optional clock
minItems: 2
- maxItems: 3
'#clock-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml
index 78f35832aa41..1122700dcc2b 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml
@@ -36,7 +36,6 @@ properties:
- description: USB3 phy wrapper pipe clock source (Optional clock)
- description: USB3 phy sec pipe clock source (Optional clock)
minItems: 2
- maxItems: 13
clock-names:
items:
@@ -54,7 +53,6 @@ properties:
- const: usb3_phy_wrapper_gcc_usb30_pipe_clk # Optional clock
- const: usb3_uni_phy_sec_gcc_usb30_pipe_clk # Optional clock
minItems: 2
- maxItems: 13
'#clock-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/clock/renesas,emev2-smu.txt b/Documentation/devicetree/bindings/clock/renesas,emev2-smu.txt
deleted file mode 100644
index 268ca615459e..000000000000
--- a/Documentation/devicetree/bindings/clock/renesas,emev2-smu.txt
+++ /dev/null
@@ -1,98 +0,0 @@
-Device tree Clock bindings for Renesas EMMA Mobile EV2
-
-This binding uses the common clock binding.
-
-* SMU
-System Management Unit described in user's manual R19UH0037EJ1000_SMU.
-This is not a clock provider, but clocks under SMU depend on it.
-
-Required properties:
-- compatible: Should be "renesas,emev2-smu"
-- reg: Address and Size of SMU registers
-
-* SMU_CLKDIV
-Function block with an input mux and a divider, which corresponds to
-"Serial clock generator" in fig."Clock System Overview" of the manual,
-and "xxx frequency division setting register" (XXXCLKDIV) registers.
-This makes internal (neither input nor output) clock that is provided
-to input of xxxGCLK block.
-
-Required properties:
-- compatible: Should be "renesas,emev2-smu-clkdiv"
-- reg: Byte offset from SMU base and Bit position in the register
-- clocks: Parent clocks. Input clocks as described in clock-bindings.txt
-- #clock-cells: Should be <0>
-
-* SMU_GCLK
-Clock gating node shown as "Clock stop processing block" in the
-fig."Clock System Overview" of the manual.
-Registers are "xxx clock gate control register" (XXXGCLKCTRL).
-
-Required properties:
-- compatible: Should be "renesas,emev2-smu-gclk"
-- reg: Byte offset from SMU base and Bit position in the register
-- clocks: Input clock as described in clock-bindings.txt
-- #clock-cells: Should be <0>
-
-Example of provider:
-
-usia_u0_sclkdiv: usia_u0_sclkdiv {
- compatible = "renesas,emev2-smu-clkdiv";
- reg = <0x610 0>;
- clocks = <&pll3_fo>, <&pll4_fo>, <&pll1_fo>, <&osc1_fo>;
- #clock-cells = <0>;
-};
-
-usia_u0_sclk: usia_u0_sclk {
- compatible = "renesas,emev2-smu-gclk";
- reg = <0x4a0 1>;
- clocks = <&usia_u0_sclkdiv>;
- #clock-cells = <0>;
-};
-
-Example of consumer:
-
-serial@e1020000 {
- compatible = "renesas,em-uart";
- reg = <0xe1020000 0x38>;
- interrupts = <0 8 0>;
- clocks = <&usia_u0_sclk>;
- clock-names = "sclk";
-};
-
-Example of clock-tree description:
-
- This describes a clock path in the clock tree
- c32ki -> pll3_fo -> usia_u0_sclkdiv -> usia_u0_sclk
-
-smu@e0110000 {
- compatible = "renesas,emev2-smu";
- reg = <0xe0110000 0x10000>;
- #address-cells = <2>;
- #size-cells = <0>;
-
- c32ki: c32ki {
- compatible = "fixed-clock";
- clock-frequency = <32768>;
- #clock-cells = <0>;
- };
- pll3_fo: pll3_fo {
- compatible = "fixed-factor-clock";
- clocks = <&c32ki>;
- clock-div = <1>;
- clock-mult = <7000>;
- #clock-cells = <0>;
- };
- usia_u0_sclkdiv: usia_u0_sclkdiv {
- compatible = "renesas,emev2-smu-clkdiv";
- reg = <0x610 0>;
- clocks = <&pll3_fo>;
- #clock-cells = <0>;
- };
- usia_u0_sclk: usia_u0_sclk {
- compatible = "renesas,emev2-smu-gclk";
- reg = <0x4a0 1>;
- clocks = <&usia_u0_sclkdiv>;
- #clock-cells = <0>;
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/renesas,emev2-smu.yaml b/Documentation/devicetree/bindings/clock/renesas,emev2-smu.yaml
new file mode 100644
index 000000000000..4d9a64800481
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/renesas,emev2-smu.yaml
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/renesas,emev2-smu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas EMMA Mobile EV2 System Management Unit
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+ - Magnus Damm <magnus.damm@gmail.com>
+
+description: |
+ The System Management Unit is described in user's manual R19UH0037EJ1000_SMU.
+ This is not a clock provider, but clocks under SMU depend on it.
+
+properties:
+ compatible:
+ const: renesas,emev2-smu
+
+ reg:
+ maxItems: 1
+
+ '#address-cells':
+ const: 2
+
+ '#size-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - '#address-cells'
+ - '#size-cells'
+
+patternProperties:
+ ".*sclkdiv@.*":
+ type: object
+
+ description: |
+ Function block with an input mux and a divider, which corresponds to
+ "Serial clock generator" in fig. "Clock System Overview" of the manual,
+ and "xxx frequency division setting register" (XXXCLKDIV) registers.
+ This makes internal (neither input nor output) clock that is provided
+ to input of xxxGCLK block.
+
+ properties:
+ compatible:
+ const: renesas,emev2-smu-clkdiv
+
+ reg:
+ maxItems: 1
+ description:
+ Byte offset from SMU base and Bit position in the register.
+
+ clocks:
+ minItems: 1
+ maxItems: 4
+
+ '#clock-cells':
+ const: 0
+
+ required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+
+ additionalProperties: false
+
+ ".*sclk@.*":
+ type: object
+
+ description: |
+ Clock gating node shown as "Clock stop processing block" in the
+ fig. "Clock System Overview" of the manual.
+ Registers are "xxx clock gate control register" (XXXGCLKCTRL).
+
+ properties:
+ compatible:
+ const: renesas,emev2-smu-gclk
+
+ reg:
+ maxItems: 1
+ description:
+ Byte offset from SMU base and Bit position in the register.
+
+ clocks:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 0
+
+ required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+
+ additionalProperties: false
+
+additionalProperties: true
+
+examples:
+ - |
+ // Example of clock-tree description:
+ //
+ // This describes a clock path in the clock tree
+ // c32ki -> pll3_fo -> usia_u0_sclkdiv -> usia_u0_sclk
+ clocks@e0110000 {
+ compatible = "renesas,emev2-smu";
+ reg = <0xe0110000 0x10000>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ c32ki: c32ki {
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ #clock-cells = <0>;
+ };
+ pll3_fo: pll3_fo {
+ compatible = "fixed-factor-clock";
+ clocks = <&c32ki>;
+ clock-div = <1>;
+ clock-mult = <7000>;
+ #clock-cells = <0>;
+ };
+ usia_u0_sclkdiv: usia_u0_sclkdiv@610,0 {
+ compatible = "renesas,emev2-smu-clkdiv";
+ reg = <0x610 0>;
+ clocks = <&pll3_fo>;
+ #clock-cells = <0>;
+ };
+ usia_u0_sclk: usia_u0_sclk@4a0,1 {
+ compatible = "renesas,emev2-smu-gclk";
+ reg = <0x4a0 1>;
+ clocks = <&usia_u0_sclkdiv>;
+ #clock-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.txt b/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.txt
deleted file mode 100644
index aed713cf0831..000000000000
--- a/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-* Renesas R9A06G032 SYSCTRL
-
-Required Properties:
-
- - compatible: Must be:
- - "renesas,r9a06g032-sysctrl"
- - reg: Base address and length of the SYSCTRL IO block.
- - #clock-cells: Must be 1
- - clocks: References to the parent clocks:
- - external 40mhz crystal.
- - external (optional) 32.768khz
- - external (optional) jtag input
- - external (optional) RGMII_REFCLK
- - clock-names: Must be:
- clock-names = "mclk", "rtc", "jtag", "rgmii_ref_ext";
- - #power-domain-cells: Must be 0
-
-Examples
---------
-
- - SYSCTRL node:
-
- sysctrl: system-controller@4000c000 {
- compatible = "renesas,r9a06g032-sysctrl";
- reg = <0x4000c000 0x1000>;
- #clock-cells = <1>;
-
- clocks = <&ext_mclk>, <&ext_rtc_clk>,
- <&ext_jtag_clk>, <&ext_rgmii_ref>;
- clock-names = "mclk", "rtc", "jtag", "rgmii_ref_ext";
- #power-domain-cells = <0>;
- };
-
- - Other nodes can use the clocks provided by SYSCTRL as in:
-
- #include <dt-bindings/clock/r9a06g032-sysctrl.h>
- uart0: serial@40060000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x40060000 0x400>;
- interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&sysctrl R9A06G032_CLK_UART0>, <&sysctrl R9A06G032_HCLK_UART0>;
- clock-names = "baudclk", "apb_pclk";
- power-domains = <&sysctrl>;
- };
diff --git a/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml b/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml
new file mode 100644
index 000000000000..25dbb0fac065
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/renesas,r9a06g032-sysctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/N1D (R9A06G032) System Controller
+
+maintainers:
+ - Gareth Williams <gareth.williams.jx@renesas.com>
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+properties:
+ compatible:
+ const: renesas,r9a06g032-sysctrl
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ items:
+ - description: External 40 MHz crystal
+ - description: Optional external 32.768 kHz crystal
+ - description: Optional external JTAG input
+ - description: Optional external RGMII_REFCLK
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: mclk
+ - const: rtc
+ - const: jtag
+ - const: rgmii_ref_ext
+
+ '#clock-cells':
+ const: 1
+
+ '#power-domain-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#clock-cells'
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ sysctrl: system-controller@4000c000 {
+ compatible = "renesas,r9a06g032-sysctrl";
+ reg = <0x4000c000 0x1000>;
+ clocks = <&ext_mclk>, <&ext_rtc_clk>, <&ext_jtag_clk>,
+ <&ext_rgmii_ref>;
+ clock-names = "mclk", "rtc", "jtag", "rgmii_ref_ext";
+ #clock-cells = <1>;
+ #power-domain-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml b/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml
index 4069e09cb62d..47e1ab08c95d 100644
--- a/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml
@@ -40,7 +40,6 @@ properties:
clock-names:
minItems: 1
- maxItems: 4
items:
- const: ext-26m
- const: ext-32k
diff --git a/Documentation/devicetree/bindings/clock/ti,sci-clk.txt b/Documentation/devicetree/bindings/clock/ti,sci-clk.txt
deleted file mode 100644
index 4e59dc6b1778..000000000000
--- a/Documentation/devicetree/bindings/clock/ti,sci-clk.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Texas Instruments TI-SCI Clocks
-===============================
-
-All clocks on Texas Instruments' SoCs that contain a System Controller,
-are only controlled by this entity. Communication between a host processor
-running an OS and the System Controller happens through a protocol known
-as TI-SCI[1]. This clock implementation plugs into the common clock
-framework and makes use of the TI-SCI protocol on clock API requests.
-
-[1] Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
-
-Required properties:
--------------------
-- compatible: Must be "ti,k2g-sci-clk"
-- #clock-cells: Shall be 2.
- In clock consumers, this cell represents the device ID and clock ID
- exposed by the PM firmware. The list of valid values for the device IDs
- and clocks IDs for 66AK2G SoC are documented at
- http://processors.wiki.ti.com/index.php/TISCI#66AK2G02_Data
-
-Examples:
---------
-
-pmmc: pmmc {
- compatible = "ti,k2g-sci";
-
- k2g_clks: clocks {
- compatible = "ti,k2g-sci-clk";
- #clock-cells = <2>;
- };
-};
-
-uart0: serial@2530c00 {
- compatible = "ns16550a";
- clocks = <&k2g_clks 0x2c 0>;
-};
diff --git a/Documentation/devicetree/bindings/clock/ti,sci-clk.yaml b/Documentation/devicetree/bindings/clock/ti,sci-clk.yaml
new file mode 100644
index 000000000000..0e370289a053
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti,sci-clk.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/ti,sci-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI-SCI clock controller node bindings
+
+maintainers:
+ - Nishanth Menon <nm@ti.com>
+
+description: |
+ Some TI SoCs contain a system controller (like the Power Management Micro
+ Controller (PMMC) on Keystone 66AK2G SoC) that are responsible for controlling
+ the state of the various hardware modules present on the SoC. Communication
+ between the host processor running an OS and the system controller happens
+ through a protocol called TI System Control Interface (TI-SCI protocol).
+
+ This clock controller node uses the TI SCI protocol to perform various clock
+ management of various hardware modules (devices) present on the SoC. This
+ node must be a child node of the associated TI-SCI system controller node.
+
+properties:
+ $nodename:
+ pattern: "^clock-controller$"
+
+ compatible:
+ const: ti,k2g-sci-clk
+
+ "#clock-cells":
+ const: 2
+ description:
+ The two cells represent values that the TI-SCI controller defines.
+
+ The first cell should contain the device ID.
+
+ The second cell should contain the clock ID.
+
+ Please see http://processors.wiki.ti.com/index.php/TISCI for
+ protocol documentation for the values to be used for different devices.
+
+additionalProperties: false
+
+examples:
+ - |
+ k3_clks: clock-controller {
+ compatible = "ti,k2g-sci-clk";
+ #clock-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
index ea4994b35207..ef68711716fb 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
@@ -202,11 +202,11 @@ Example 2 (MT8173 SoC):
cpu2: cpu@100 {
device_type = "cpu";
- compatible = "arm,cortex-a57";
+ compatible = "arm,cortex-a72";
reg = <0x100>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
- clocks = <&infracfg CLK_INFRA_CA57SEL>,
+ clocks = <&infracfg CLK_INFRA_CA72SEL>,
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cpu_opp_table_b>;
@@ -214,11 +214,11 @@ Example 2 (MT8173 SoC):
cpu3: cpu@101 {
device_type = "cpu";
- compatible = "arm,cortex-a57";
+ compatible = "arm,cortex-a72";
reg = <0x101>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
- clocks = <&infracfg CLK_INFRA_CA57SEL>,
+ clocks = <&infracfg CLK_INFRA_CA72SEL>,
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cpu_opp_table_b>;
diff --git a/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml b/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml
index 6ab07eba7778..00648f9d9278 100644
--- a/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml
+++ b/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml
@@ -30,7 +30,6 @@ properties:
- description: Module clock
- description: MBus clock
minItems: 2
- maxItems: 3
clock-names:
items:
@@ -38,7 +37,6 @@ properties:
- const: mod
- const: ram
minItems: 2
- maxItems: 3
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/crypto/arm,cryptocell.yaml b/Documentation/devicetree/bindings/crypto/arm,cryptocell.yaml
new file mode 100644
index 000000000000..9c97874a6dbd
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/arm,cryptocell.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/arm,cryptocell.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Arm TrustZone CryptoCell cryptographic engine
+
+maintainers:
+ - Gilad Ben-Yossef <gilad@benyossef.com>
+
+properties:
+ compatible:
+ enum:
+ - arm,cryptocell-713-ree
+ - arm,cryptocell-703-ree
+ - arm,cryptocell-712-ree
+ - arm,cryptocell-710-ree
+ - arm,cryptocell-630p-ree
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ arm_cc712: crypto@80000000 {
+ compatible = "arm,cryptocell-712-ree";
+ reg = <0x80000000 0x10000>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
deleted file mode 100644
index 6130e6eb4af8..000000000000
--- a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Arm TrustZone CryptoCell cryptographic engine
-
-Required properties:
-- compatible: Should be one of -
- "arm,cryptocell-713-ree"
- "arm,cryptocell-703-ree"
- "arm,cryptocell-712-ree"
- "arm,cryptocell-710-ree"
- "arm,cryptocell-630p-ree"
-- reg: Base physical address of the engine and length of memory mapped region.
-- interrupts: Interrupt number for the device.
-
-Optional properties:
-- clocks: Reference to the crypto engine clock.
-- dma-coherent: Present if dma operations are coherent.
-
-Examples:
-
- arm_cc712: crypto@80000000 {
- compatible = "arm,cryptocell-712-ree";
- interrupt-parent = <&intc>;
- interrupts = < 0 30 4 >;
- reg = < 0x80000000 0x10000 >;
-
- };
diff --git a/Documentation/devicetree/bindings/crypto/fsl-dcp.yaml b/Documentation/devicetree/bindings/crypto/fsl-dcp.yaml
index a30bf38a4a49..99be01539fcd 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-dcp.yaml
+++ b/Documentation/devicetree/bindings/crypto/fsl-dcp.yaml
@@ -27,7 +27,6 @@ properties:
- description: MXS DCP DCP interrupt
- description: MXS DCP secure interrupt
minItems: 2
- maxItems: 3
clocks:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
index 12a7df0e38b2..3d8ea3c2d8dd 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
@@ -26,14 +26,12 @@ properties:
reg:
minItems: 1
- maxItems: 2
items:
- description: Display Backend registers
- description: SAT registers
reg-names:
minItems: 1
- maxItems: 2
items:
- const: be
- const: sat
@@ -43,7 +41,6 @@ properties:
clocks:
minItems: 3
- maxItems: 4
items:
- description: The backend interface clock
- description: The backend module clock
@@ -52,7 +49,6 @@ properties:
clock-names:
minItems: 3
- maxItems: 4
items:
- const: ahb
- const: mod
@@ -61,14 +57,12 @@ properties:
resets:
minItems: 1
- maxItems: 2
items:
- description: The Backend reset line
- description: The SAT reset line
reset-names:
minItems: 1
- maxItems: 2
items:
- const: be
- const: sat
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
index a738d7c12a97..bf0bdf54e5f9 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
@@ -24,7 +24,6 @@ properties:
clocks:
minItems: 1
- maxItems: 2
items:
- description: Bus Clock
- description: Module Clock
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
index 907fb47cc84a..5d42d36608d9 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
@@ -46,7 +46,6 @@ properties:
clocks:
minItems: 3
- maxItems: 6
items:
- description: Bus Clock
- description: Register Clock
@@ -57,7 +56,6 @@ properties:
clock-names:
minItems: 3
- maxItems: 6
items:
- const: iahb
- const: isfr
@@ -68,14 +66,12 @@ properties:
resets:
minItems: 1
- maxItems: 2
items:
- description: HDMI Controller Reset
- description: HDCP Reset
reset-names:
minItems: 1
- maxItems: 2
items:
- const: ctrl
- const: hdcp
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-hdmi-phy.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-hdmi-phy.yaml
index 501cec16168c..a97366aaf924 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-hdmi-phy.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-hdmi-phy.yaml
@@ -27,7 +27,6 @@ properties:
clocks:
minItems: 2
- maxItems: 4
items:
- description: Bus Clock
- description: Module Clock
@@ -36,7 +35,6 @@ properties:
clock-names:
minItems: 2
- maxItems: 4
items:
- const: bus
- const: mod
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
index ec21e8bf2767..61ef7b337218 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
@@ -48,7 +48,6 @@ properties:
clocks:
minItems: 2
- maxItems: 6
items:
- description: The TCON TOP interface clock
- description: The TCON TOP TV0 clock
@@ -59,7 +58,6 @@ properties:
clock-names:
minItems: 2
- maxItems: 6
items:
- const: bus
- const: tcon-tv0
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
deleted file mode 100644
index 659523f538bf..000000000000
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
+++ /dev/null
@@ -1,143 +0,0 @@
-Analog Devices ADV7511(W)/13/33/35 HDMI Encoders
-------------------------------------------------
-
-The ADV7511, ADV7511W, ADV7513, ADV7533 and ADV7535 are HDMI audio and video
-transmitters compatible with HDMI 1.4 and DVI 1.0. They support color space
-conversion, S/PDIF, CEC and HDCP. ADV7533/5 supports the DSI interface for input
-pixels, while the others support RGB interface.
-
-Required properties:
-
-- compatible: Should be one of:
- "adi,adv7511"
- "adi,adv7511w"
- "adi,adv7513"
- "adi,adv7533"
- "adi,adv7535"
-
-- reg: I2C slave addresses
- The ADV7511 internal registers are split into four pages exposed through
- different I2C addresses, creating four register maps. Each map has it own
- I2C address and acts as a standard slave device on the I2C bus. The main
- address is mandatory, others are optional and revert to defaults if not
- specified.
-
-
-The ADV7511 supports a large number of input data formats that differ by their
-color depth, color format, clock mode, bit justification and random
-arrangement of components on the data bus. The combination of the following
-properties describe the input and map directly to the video input tables of the
-ADV7511 datasheet that document all the supported combinations.
-
-- adi,input-depth: Number of bits per color component at the input (8, 10 or
- 12).
-- adi,input-colorspace: The input color space, one of "rgb", "yuv422" or
- "yuv444".
-- adi,input-clock: The input clock type, one of "1x" (one clock cycle per
- pixel), "2x" (two clock cycles per pixel), "ddr" (one clock cycle per pixel,
- data driven on both edges).
-
-The following input format properties are required except in "rgb 1x" and
-"yuv444 1x" modes, in which case they must not be specified.
-
-- adi,input-style: The input components arrangement variant (1, 2 or 3), as
- listed in the input format tables in the datasheet.
-- adi,input-justification: The input bit justification ("left", "evenly",
- "right").
-
-- avdd-supply: A 1.8V supply that powers up the AVDD pin on the chip.
-- dvdd-supply: A 1.8V supply that powers up the DVDD pin on the chip.
-- pvdd-supply: A 1.8V supply that powers up the PVDD pin on the chip.
-- dvdd-3v-supply: A 3.3V supply that powers up the pin called DVDD_3V
- on the chip.
-- bgvdd-supply: A 1.8V supply that powers up the BGVDD pin. This is
- needed only for ADV7511.
-
-The following properties are required for ADV7533 and ADV7535:
-
-- adi,dsi-lanes: Number of DSI data lanes connected to the DSI host. It should
- be one of 1, 2, 3 or 4.
-- a2vdd-supply: 1.8V supply that powers up the A2VDD pin on the chip.
-- v3p3-supply: A 3.3V supply that powers up the V3P3 pin on the chip.
-- v1p2-supply: A supply that powers up the V1P2 pin on the chip. It can be
- either 1.2V or 1.8V for ADV7533 but only 1.8V for ADV7535.
-
-Optional properties:
-
-- interrupts: Specifier for the ADV7511 interrupt
-- pd-gpios: Specifier for the GPIO connected to the power down signal
-
-- adi,clock-delay: Video data clock delay relative to the pixel clock, in ps
- (-1200 ps .. 1600 ps). Defaults to no delay.
-- adi,embedded-sync: The input uses synchronization signals embedded in the
- data stream (similar to BT.656). Defaults to separate H/V synchronization
- signals.
-- adi,disable-timing-generator: Only for ADV7533 and ADV7535. Disables the
- internal timing generator. The chip will rely on the sync signals in the
- DSI data lanes, rather than generate its own timings for HDMI output.
-- clocks: from common clock binding: reference to the CEC clock.
-- clock-names: from common clock binding: must be "cec".
-- reg-names : Names of maps with programmable addresses.
- It can contain any map needing a non-default address.
- Possible maps names are : "main", "edid", "cec", "packet"
-
-Required nodes:
-
-The ADV7511 has two video ports. Their connections are modelled using the OF
-graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533/5, the
- remote endpoint phandle should be a reference to a valid mipi_dsi_host device
- node.
-- Video port 1 for the HDMI output
-- Audio port 2 for the HDMI audio input
-
-
-Example
--------
-
- adv7511w: hdmi@39 {
- compatible = "adi,adv7511w";
- /*
- * The EDID page will be accessible on address 0x66 on the I2C
- * bus. All other maps continue to use their default addresses.
- */
- reg = <0x39>, <0x66>;
- reg-names = "main", "edid";
- interrupt-parent = <&gpio3>;
- interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
- clocks = <&cec_clock>;
- clock-names = "cec";
-
- adi,input-depth = <8>;
- adi,input-colorspace = "rgb";
- adi,input-clock = "1x";
- adi,input-style = <1>;
- adi,input-justification = "evenly";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- adv7511w_in: endpoint {
- remote-endpoint = <&dpi_out>;
- };
- };
-
- port@1 {
- reg = <1>;
- adv7511_out: endpoint {
- remote-endpoint = <&hdmi_connector_in>;
- };
- };
-
- port@2 {
- reg = <2>;
- codec_endpoint: endpoint {
- remote-endpoint = <&i2s0_cpu_endpoint>;
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.yaml b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.yaml
new file mode 100644
index 000000000000..d3dd7a79b909
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.yaml
@@ -0,0 +1,240 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/adi,adv7511.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADV7511/11W/13 HDMI Encoders
+
+maintainers:
+ - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+description: |
+ The ADV7511, ADV7511W and ADV7513 are HDMI audio and video
+ transmitters compatible with HDMI 1.4 and DVI 1.0. They support color
+ space conversion, S/PDIF, CEC and HDCP. The transmitter input is
+ parallel RGB or YUV data.
+
+properties:
+ compatible:
+ enum:
+ - adi,adv7511
+ - adi,adv7511w
+ - adi,adv7513
+
+ reg:
+ description: |
+ I2C slave addresses.
+
+ The ADV7511/11W/13 internal registers are split into four pages
+ exposed through different I2C addresses, creating four register
+ maps. Each map has it own I2C address and acts as a standard slave
+ device on the I2C bus. The main address is mandatory, others are
+ optional and revert to defaults if not specified.
+ minItems: 1
+ maxItems: 4
+
+ reg-names:
+ description:
+ Names of maps with programmable addresses. It can contain any map
+ needing a non-default address.
+ minItems: 1
+ items:
+ - const: main
+ - const: edid
+ - const: cec
+ - const: packet
+
+ clocks:
+ description: Reference to the CEC clock.
+ maxItems: 1
+
+ clock-names:
+ const: cec
+
+ interrupts:
+ maxItems: 1
+
+ pd-gpios:
+ description: GPIO connected to the power down signal.
+ maxItems: 1
+
+ avdd-supply:
+ description: A 1.8V supply that powers up the AVDD pin.
+
+ dvdd-supply:
+ description: A 1.8V supply that powers up the DVDD pin.
+
+ pvdd-supply:
+ description: A 1.8V supply that powers up the PVDD pin.
+
+ dvdd-3v-supply:
+ description: A 3.3V supply that powers up the DVDD_3V pin.
+
+ bgvdd-supply:
+ description: A 1.8V supply that powers up the BGVDD pin.
+
+ adi,input-depth:
+ description: Number of bits per color component at the input.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 8, 10, 12 ]
+
+ adi,input-colorspace:
+ description: Input color space.
+ enum: [ rgb, yuv422, yuv444 ]
+
+ adi,input-clock:
+ description: |
+ Input clock type.
+ "1x": one clock cycle per pixel
+ "2x": two clock cycles per pixel
+ "dd": one clock cycle per pixel, data driven on both edges
+ enum: [ 1x, 2x, dd ]
+
+ adi,clock-delay:
+ description:
+ Video data clock delay relative to the pixel clock, in ps
+ (-1200ps .. 1600 ps).
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
+
+ adi,embedded-sync:
+ description:
+ If defined, the input uses synchronization signals embedded in the
+ data stream (similar to BT.656).
+ type: boolean
+
+ adi,input-style:
+ description:
+ Input components arrangement variant as listed in the input
+ format tables in the datasheet.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 1, 2, 3 ]
+
+ adi,input-justification:
+ description: Input bit justification.
+ enum: [ left, evenly, right ]
+
+ ports:
+ description:
+ The ADV7511(W)/13 has two video ports and one audio port. This node
+ models their connections as documented in
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+ Documentation/devicetree/bindings/graph.txt
+ type: object
+ properties:
+ port@0:
+ description: Video port for the RGB or YUV input.
+ type: object
+
+ port@1:
+ description: Video port for the HDMI output.
+ type: object
+
+ port@2:
+ description: Audio port for the HDMI output.
+ type: object
+
+# adi,input-colorspace and adi,input-clock are required except in
+# "rgb 1x" and "yuv444 1x" modes, in which case they must not be
+# specified.
+if:
+ not:
+ properties:
+ adi,input-colorspace:
+ contains:
+ enum: [ rgb, yuv444 ]
+ adi,input-clock:
+ contains:
+ const: 1x
+
+then:
+ required:
+ - adi,input-style
+ - adi,input-justification
+
+else:
+ properties:
+ adi,input-style: false
+ adi,input-justification: false
+
+
+required:
+ - compatible
+ - reg
+ - ports
+ - adi,input-depth
+ - adi,input-colorspace
+ - adi,input-clock
+ - avdd-supply
+ - dvdd-supply
+ - pvdd-supply
+ - dvdd-3v-supply
+ - bgvdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c@e6500000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0 0xe6500000>;
+
+ adv7511w: hdmi@39 {
+ compatible = "adi,adv7511w";
+ /*
+ * The EDID page will be accessible on address 0x66 on the I2C
+ * bus. All other maps continue to use their default addresses.
+ */
+ reg = <0x39>, <0x66>;
+ reg-names = "main", "edid";
+ interrupt-parent = <&gpio3>;
+ interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
+ clocks = <&cec_clock>;
+ clock-names = "cec";
+ avdd-supply = <&v1v8>;
+ dvdd-supply = <&v1v8>;
+ pvdd-supply = <&v1v8>;
+ dvdd-3v-supply = <&v3v3>;
+ bgvdd-supply = <&v1v8>;
+
+ adi,input-depth = <8>;
+ adi,input-colorspace = "yuv422";
+ adi,input-clock = "1x";
+
+ adi,input-style = <3>;
+ adi,input-justification = "right";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7511w_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7511_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ codec_endpoint: endpoint {
+ remote-endpoint = <&i2s0_cpu_endpoint>;
+ };
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
new file mode 100644
index 000000000000..f36209137c8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
@@ -0,0 +1,184 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/adi,adv7533.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADV7533/35 HDMI Encoders
+
+maintainers:
+ - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+description: |
+ The ADV7533 and ADV7535 are HDMI audio and video transmitters
+ compatible with HDMI 1.4 and DVI 1.0. They support color space
+ conversion, S/PDIF, CEC and HDCP. The transmitter input is MIPI DSI.
+
+properties:
+ compatible:
+ enum:
+ - adi,adv7533
+ - adi,adv7535
+
+ reg:
+ description: |
+ I2C slave addresses.
+
+ The ADV7533/35 internal registers are split into four pages
+ exposed through different I2C addresses, creating four register
+ maps. Each map has it own I2C address and acts as a standard slave
+ device on the I2C bus. The main address is mandatory, others are
+ optional and revert to defaults if not specified.
+ minItems: 1
+ maxItems: 4
+
+ reg-names:
+ description:
+ Names of maps with programmable addresses. It can contain any map
+ needing a non-default address.
+ minItems: 1
+ items:
+ - const: main
+ - const: edid
+ - const: cec
+ - const: packet
+
+ clocks:
+ description: Reference to the CEC clock.
+ maxItems: 1
+
+ clock-names:
+ const: cec
+
+ interrupts:
+ maxItems: 1
+
+ pd-gpios:
+ description: GPIO connected to the power down signal.
+ maxItems: 1
+
+ avdd-supply:
+ description: A 1.8V supply that powers up the AVDD pin.
+
+ dvdd-supply:
+ description: A 1.8V supply that powers up the DVDD pin.
+
+ pvdd-supply:
+ description: A 1.8V supply that powers up the PVDD pin.
+
+ a2vdd-supply:
+ description: A 1.8V supply that powers up the A2VDD pin.
+
+ v3p3-supply:
+ description: A 3.3V supply that powers up the V3P3 pin.
+
+ v1p2-supply:
+ description:
+ A supply that powers up the V1P2 pin. It can be either 1.2V
+ or 1.8V for ADV7533 but only 1.8V for ADV7535.
+
+ adi,disable-timing-generator:
+ description:
+ Disables the internal timing generator. The chip will rely on the
+ sync signals in the DSI data lanes, rather than generating its own
+ timings for HDMI output.
+ type: boolean
+
+ adi,dsi-lanes:
+ description: Number of DSI data lanes connected to the DSI host.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 1, 2, 3, 4 ]
+
+ ports:
+ description:
+ The ADV7533/35 has two video ports and one audio port. This node
+ models their connections as documented in
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+ Documentation/devicetree/bindings/graph.txt
+ type: object
+ properties:
+ port@0:
+ description:
+ Video port for the DSI input. The remote endpoint phandle
+ should be a reference to a valid mipi_dsi_host_device.
+ type: object
+
+ port@1:
+ description: Video port for the HDMI output.
+ type: object
+
+ port@2:
+ description: Audio port for the HDMI output.
+ type: object
+
+required:
+ - compatible
+ - reg
+ - ports
+ - adi,dsi-lanes
+ - avdd-supply
+ - dvdd-supply
+ - pvdd-supply
+ - a2vdd-supply
+ - v3p3-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c@e6500000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0 0xe6500000>;
+
+ adv7533: hdmi@39 {
+ compatible = "adi,adv7533";
+ /*
+ * The EDID page will be accessible on address 0x66 on the I2C
+ * bus. All other maps continue to use their default addresses.
+ */
+ reg = <0x39>, <0x66>;
+ reg-names = "main", "edid";
+ interrupt-parent = <&gpio3>;
+ interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
+ clocks = <&cec_clock>;
+ clock-names = "cec";
+ adi,dsi-lanes = <4>;
+ avdd-supply = <&v1v8>;
+ dvdd-supply = <&v1v8>;
+ pvdd-supply = <&v1v8>;
+ a2vdd-supply = <&v1v8>;
+ v3p3-supply = <&v3v3>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7533_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7533_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ codec_endpoint: endpoint {
+ remote-endpoint = <&i2s0_cpu_endpoint>;
+ };
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
index 2333fdbe9296..b2e8bc6da9d0 100644
--- a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
@@ -18,7 +18,6 @@ properties:
reg:
minItems: 1
- maxItems: 3
items:
- description:
Register block of mhdptx apb registers up to PHY mapped area (AUX_CONFIG_P).
@@ -31,7 +30,6 @@ properties:
reg-names:
minItems: 1
- maxItems: 3
items:
- const: mhdptx
- const: j721e-intg
diff --git a/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml b/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
index 735d0233a7d6..674891ee2f8e 100644
--- a/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
@@ -29,7 +29,8 @@ properties:
properties:
port@0:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description:
Primary MIPI port for MIPI input
diff --git a/Documentation/devicetree/bindings/display/panel/lvds.yaml b/Documentation/devicetree/bindings/display/panel/lvds.yaml
index 31164608ba1d..49460c9dceea 100644
--- a/Documentation/devicetree/bindings/display/panel/lvds.yaml
+++ b/Documentation/devicetree/bindings/display/panel/lvds.yaml
@@ -51,37 +51,37 @@ properties:
- "jeida-18" - 18-bit data mapping compatible with the [JEIDA], [LDI] and
[VESA] specifications. Data are transferred as follows on 3 LVDS lanes.
- Slot 0 1 2 3 4 5 6
- ________________ _________________
- Clock \_______________________/
- ______ ______ ______ ______ ______ ______ ______
- DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__><
- DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__><
- DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__><
+ Slot 0 1 2 3 4 5 6
+ ________________ _________________
+ Clock \_______________________/
+ ______ ______ ______ ______ ______ ______ ______
+ DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__><
+ DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__><
+ DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__><
- "jeida-24" - 24-bit data mapping compatible with the [DSIM] and [LDI]
specifications. Data are transferred as follows on 4 LVDS lanes.
- Slot 0 1 2 3 4 5 6
- ________________ _________________
- Clock \_______________________/
- ______ ______ ______ ______ ______ ______ ______
- DATA0 ><__G2__><__R7__><__R6__><__R5__><__R4__><__R3__><__R2__><
- DATA1 ><__B3__><__B2__><__G7__><__G6__><__G5__><__G4__><__G3__><
- DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B7__><__B6__><__B5__><__B4__><
- DATA3 ><_CTL3_><__B1__><__B0__><__G1__><__G0__><__R1__><__R0__><
+ Slot 0 1 2 3 4 5 6
+ ________________ _________________
+ Clock \_______________________/
+ ______ ______ ______ ______ ______ ______ ______
+ DATA0 ><__G2__><__R7__><__R6__><__R5__><__R4__><__R3__><__R2__><
+ DATA1 ><__B3__><__B2__><__G7__><__G6__><__G5__><__G4__><__G3__><
+ DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B7__><__B6__><__B5__><__B4__><
+ DATA3 ><_CTL3_><__B1__><__B0__><__G1__><__G0__><__R1__><__R0__><
- "vesa-24" - 24-bit data mapping compatible with the [VESA] specification.
Data are transferred as follows on 4 LVDS lanes.
- Slot 0 1 2 3 4 5 6
- ________________ _________________
- Clock \_______________________/
- ______ ______ ______ ______ ______ ______ ______
- DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__><
- DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__><
- DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__><
- DATA3 ><_CTL3_><__B7__><__B6__><__G7__><__G6__><__R7__><__R6__><
+ Slot 0 1 2 3 4 5 6
+ ________________ _________________
+ Clock \_______________________/
+ ______ ______ ______ ______ ______ ______ ______
+ DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__><
+ DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__><
+ DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__><
+ DATA3 ><_CTL3_><__B7__><__B6__><__G7__><__G6__><__R7__><__R6__><
Control signals are mapped as follows.
diff --git a/Documentation/devicetree/bindings/display/renesas,du.yaml b/Documentation/devicetree/bindings/display/renesas,du.yaml
index 121596f106da..e3ca5389c17d 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.yaml
+++ b/Documentation/devicetree/bindings/display/renesas,du.yaml
@@ -55,7 +55,7 @@ properties:
maxItems: 1
ports:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/properties/ports
description: |
The connections to the DU output video ports are modeled using the OF
graph bindings specified in Documentation/devicetree/bindings/graph.txt.
@@ -92,7 +92,6 @@ required:
- reg
- clocks
- interrupts
- - resets
- ports
allOf:
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml
index 75cd9c686e98..da3b889ad8fc 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-hdmi.yaml
@@ -29,7 +29,6 @@ properties:
clocks:
minItems: 2
- maxItems: 5
items:
- {}
- {}
@@ -41,7 +40,6 @@ properties:
clock-names:
minItems: 2
- maxItems: 5
items:
- {}
- {}
diff --git a/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml b/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
index 679daed4124e..ed310bbe3afe 100644
--- a/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
@@ -29,7 +29,6 @@ properties:
- description: DSI bus clock
- description: Pixel clock
minItems: 2
- maxItems: 3
clock-names:
items:
@@ -37,7 +36,6 @@ properties:
- const: ref
- const: px_clk
minItems: 2
- maxItems: 3
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml b/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
index d54f9ca207af..4ae3d75492d3 100644
--- a/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
+++ b/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
@@ -22,7 +22,6 @@ properties:
- description: events interrupt line.
- description: errors interrupt line.
minItems: 1
- maxItems: 2
clocks:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.yaml b/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.yaml
index 403d57977ee7..d88bd93f4b80 100644
--- a/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.yaml
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.yaml
@@ -65,7 +65,6 @@ properties:
The APB clock and at least one video clock are mandatory, the audio clock
is optional.
minItems: 2
- maxItems: 4
items:
- description: dp_apb_clk is the APB clock
- description: dp_aud_clk is the Audio clock
@@ -78,13 +77,11 @@ properties:
clock-names:
oneOf:
- minItems: 2
- maxItems: 3
items:
- const: dp_apb_clk
- enum: [ dp_vtc_pixel_clk_in, dp_live_video_in_clk ]
- enum: [ dp_vtc_pixel_clk_in, dp_live_video_in_clk ]
- minItems: 3
- maxItems: 4
items:
- const: dp_apb_clk
- const: dp_aud_clk
@@ -116,7 +113,6 @@ properties:
maxItems: 2
phy-names:
minItems: 1
- maxItems: 2
items:
- const: dp-phy0
- const: dp-phy1
diff --git a/Documentation/devicetree/bindings/dma/altr,msgdma.yaml b/Documentation/devicetree/bindings/dma/altr,msgdma.yaml
new file mode 100644
index 000000000000..a4f9fe23dcd9
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/altr,msgdma.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/altr,msgdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera mSGDMA IP core
+
+maintainers:
+ - Olivier Dautricourt <olivier.dautricourt@orolia.com>
+
+description: |
+ Altera / Intel modular Scatter-Gather Direct Memory Access (mSGDMA)
+ intellectual property (IP)
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ compatible:
+ const: altr,socfpga-msgdma
+
+ reg:
+ items:
+ - description: Control and Status Register Slave Port
+ - description: Descriptor Slave Port
+ - description: Response Slave Port
+
+ reg-names:
+ items:
+ - const: csr
+ - const: desc
+ - const: resp
+
+ interrupts:
+ maxItems: 1
+
+ "#dma-cells":
+ const: 1
+ description:
+ The cell identifies the channel id (must be 0)
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ msgdma_controller: dma-controller@ff200b00 {
+ compatible = "altr,socfpga-msgdma";
+ reg = <0xff200b00 0x100>, <0xff200c00 0x100>, <0xff200d00 0x100>;
+ reg-names = "csr", "desc", "resp";
+ interrupts = <0 67 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/arm-pl08x.txt b/Documentation/devicetree/bindings/dma/arm-pl08x.txt
deleted file mode 100644
index 0ba81f79266f..000000000000
--- a/Documentation/devicetree/bindings/dma/arm-pl08x.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-* ARM PrimeCells PL080 and PL081 and derivatives DMA controller
-
-Required properties:
-- compatible: "arm,pl080", "arm,primecell";
- "arm,pl081", "arm,primecell";
- "faraday,ftdmac020", "arm,primecell"
-- arm,primecell-periphid: on the FTDMAC020 the primecell ID is not hard-coded
- in the hardware and must be specified here as <0x0003b080>. This number
- follows the PrimeCell standard numbering using the JEP106 vendor code 0x38
- for Faraday Technology.
-- reg: Address range of the PL08x registers
-- interrupt: The PL08x interrupt number
-- clocks: The clock running the IP core clock
-- clock-names: Must contain "apb_pclk"
-- lli-bus-interface-ahb1: if AHB master 1 is eligible for fetching LLIs
-- lli-bus-interface-ahb2: if AHB master 2 is eligible for fetching LLIs
-- mem-bus-interface-ahb1: if AHB master 1 is eligible for fetching memory contents
-- mem-bus-interface-ahb2: if AHB master 2 is eligible for fetching memory contents
-- #dma-cells: must be <2>. First cell should contain the DMA request,
- second cell should contain either 1 or 2 depending on
- which AHB master that is used.
-
-Optional properties:
-- dma-channels: contains the total number of DMA channels supported by the DMAC
-- dma-requests: contains the total number of DMA requests supported by the DMAC
-- memcpy-burst-size: the size of the bursts for memcpy: 1, 4, 8, 16, 32
- 64, 128 or 256 bytes are legal values
-- memcpy-bus-width: the bus width used for memcpy in bits: 8, 16 or 32 are legal
- values, the Faraday FTDMAC020 can also accept 64 bits
-
-Clients
-Required properties:
-- dmas: List of DMA controller phandle, request channel and AHB master id
-- dma-names: Names of the aforementioned requested channels
-
-Example:
-
-dmac0: dma-controller@10130000 {
- compatible = "arm,pl080", "arm,primecell";
- reg = <0x10130000 0x1000>;
- interrupt-parent = <&vica>;
- interrupts = <15>;
- clocks = <&hclkdma0>;
- clock-names = "apb_pclk";
- lli-bus-interface-ahb1;
- lli-bus-interface-ahb2;
- mem-bus-interface-ahb2;
- memcpy-burst-size = <256>;
- memcpy-bus-width = <32>;
- #dma-cells = <2>;
-};
-
-device@40008000 {
- ...
- dmas = <&dmac0 0 2
- &dmac0 1 2>;
- dma-names = "tx", "rx";
- ...
-};
diff --git a/Documentation/devicetree/bindings/dma/arm-pl08x.yaml b/Documentation/devicetree/bindings/dma/arm-pl08x.yaml
new file mode 100644
index 000000000000..3bd9eea543ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/arm-pl08x.yaml
@@ -0,0 +1,136 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/arm-pl08x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM PrimeCells PL080 and PL081 and derivatives DMA controller
+
+maintainers:
+ - Vinod Koul <vkoul@kernel.org>
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+# We need a select here so we don't match all nodes with 'arm,primecell'
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - arm,pl080
+ - arm,pl081
+ required:
+ - compatible
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - arm,pl080
+ - arm,pl081
+ - const: arm,primecell
+ - items:
+ - const: faraday,ftdma020
+ - const: arm,pl080
+ - const: arm,primecell
+
+ reg:
+ maxItems: 1
+ description: Address range of the PL08x registers
+
+ interrupts:
+ minItems: 1
+ description: The PL08x interrupt number
+
+ clocks:
+ minItems: 1
+ description: The clock running the IP core clock
+
+ clock-names:
+ maxItems: 1
+
+ lli-bus-interface-ahb1:
+ type: boolean
+ description: if AHB master 1 is eligible for fetching LLIs
+
+ lli-bus-interface-ahb2:
+ type: boolean
+ description: if AHB master 2 is eligible for fetching LLIs
+
+ mem-bus-interface-ahb1:
+ type: boolean
+ description: if AHB master 1 is eligible for fetching memory contents
+
+ mem-bus-interface-ahb2:
+ type: boolean
+ description: if AHB master 2 is eligible for fetching memory contents
+
+ memcpy-burst-size:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 1
+ - 4
+ - 8
+ - 16
+ - 32
+ - 64
+ - 128
+ - 256
+ description: the size of the bursts for memcpy
+
+ memcpy-bus-width:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 8
+ - 16
+ - 32
+ - 64
+ description: bus width used for memcpy in bits. FTDMAC020 also accept 64 bits
+
+required:
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - "#dma-cells"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ dmac0: dma-controller@10130000 {
+ compatible = "arm,pl080", "arm,primecell";
+ reg = <0x10130000 0x1000>;
+ interrupt-parent = <&vica>;
+ interrupts = <15>;
+ clocks = <&hclkdma0>;
+ clock-names = "apb_pclk";
+ lli-bus-interface-ahb1;
+ lli-bus-interface-ahb2;
+ mem-bus-interface-ahb2;
+ memcpy-burst-size = <256>;
+ memcpy-bus-width = <32>;
+ #dma-cells = <2>;
+ };
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/reset/cortina,gemini-reset.h>
+ #include <dt-bindings/clock/cortina,gemini-clock.h>
+ dma-controller@67000000 {
+ compatible = "faraday,ftdma020", "arm,pl080", "arm,primecell";
+ /* Faraday Technology FTDMAC020 variant */
+ arm,primecell-periphid = <0x0003b080>;
+ reg = <0x67000000 0x1000>;
+ interrupts = <9 IRQ_TYPE_EDGE_RISING>;
+ resets = <&syscon GEMINI_RESET_DMAC>;
+ clocks = <&syscon GEMINI_CLK_AHB>;
+ clock-names = "apb_pclk";
+ /* Bus interface AHB1 (AHB0) is totally tilted */
+ lli-bus-interface-ahb2;
+ mem-bus-interface-ahb2;
+ memcpy-burst-size = <256>;
+ memcpy-bus-width = <32>;
+ #dma-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
index e302147e53c6..e614fe3187bb 100644
--- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
+++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
@@ -21,6 +21,7 @@ properties:
enum:
- qcom,sdm845-gpi-dma
- qcom,sm8150-gpi-dma
+ - qcom,sm8250-gpi-dma
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
index 7f2a54bc732d..d8142cbd13d3 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
@@ -52,7 +52,6 @@ properties:
interrupt-names:
minItems: 9
- maxItems: 17
items:
- const: error
- pattern: "^ch([0-9]|1[0-5])$"
diff --git a/Documentation/devicetree/bindings/dma/renesas,shdma.txt b/Documentation/devicetree/bindings/dma/renesas,shdma.txt
deleted file mode 100644
index a91920a49433..000000000000
--- a/Documentation/devicetree/bindings/dma/renesas,shdma.txt
+++ /dev/null
@@ -1,84 +0,0 @@
-* SHDMA Device Tree bindings
-
-Sh-/r-mobile and R-Car systems often have multiple identical DMA controller
-instances, capable of serving any of a common set of DMA slave devices, using
-the same configuration. To describe this topology we require all compatible
-SHDMA DT nodes to be placed under a DMA multiplexer node. All such compatible
-DMAC instances have the same number of channels and use the same DMA
-descriptors. Therefore respective DMA DT bindings can also all be placed in the
-multiplexer node. Even if there is only one such DMAC instance on a system, it
-still has to be placed under such a multiplexer node.
-
-* DMA multiplexer
-
-Required properties:
-- compatible: should be "renesas,shdma-mux"
-- #dma-cells: should be <1>, see "dmas" property below
-
-Optional properties (currently unused):
-- dma-channels: number of DMA channels
-- dma-requests: number of DMA request signals
-
-* DMA controller
-
-Required properties:
-- compatible: should be of the form "renesas,shdma-<soc>", where <soc> should
- be replaced with the desired SoC model, e.g.
- "renesas,shdma-r8a73a4" for the system DMAC on r8a73a4 SoC
-
-Example:
- dmac: dma-multiplexer@0 {
- compatible = "renesas,shdma-mux";
- #dma-cells = <1>;
- dma-channels = <20>;
- dma-requests = <256>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- dma0: dma-controller@e6700020 {
- compatible = "renesas,shdma-r8a73a4";
- reg = <0 0xe6700020 0 0x89e0>;
- interrupt-parent = <&gic>;
- interrupts = <0 220 4
- 0 200 4
- 0 201 4
- 0 202 4
- 0 203 4
- 0 204 4
- 0 205 4
- 0 206 4
- 0 207 4
- 0 208 4
- 0 209 4
- 0 210 4
- 0 211 4
- 0 212 4
- 0 213 4
- 0 214 4
- 0 215 4
- 0 216 4
- 0 217 4
- 0 218 4
- 0 219 4>;
- interrupt-names = "error",
- "ch0", "ch1", "ch2", "ch3",
- "ch4", "ch5", "ch6", "ch7",
- "ch8", "ch9", "ch10", "ch11",
- "ch12", "ch13", "ch14", "ch15",
- "ch16", "ch17", "ch18", "ch19";
- };
- };
-
-* DMA client
-
-Required properties:
-- dmas: a list of <[DMA multiplexer phandle] [MID/RID value]> pairs,
- where MID/RID values are fixed handles, specified in the SoC
- manual
-- dma-names: a list of DMA channel names, one per "dmas" entry
-
-Example:
- dmas = <&dmac 0xd1
- &dmac 0xd2>;
- dma-names = "tx", "rx";
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index 29fcd37082e8..f719e1612b0a 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -33,7 +33,7 @@ The following are mandatory properties for 66AK2G SoCs only:
- power-domains:Should contain a phandle to a PM domain provider node
and an args specifier containing the device id
value. This property is as per the binding,
- Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
Optional properties:
-------------------
@@ -70,7 +70,7 @@ The following are mandatory properties for 66AK2G SoCs only:
- power-domains:Should contain a phandle to a PM domain provider node
and an args specifier containing the device id
value. This property is as per the binding,
- Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
Optional properties:
-------------------
diff --git a/Documentation/devicetree/bindings/dvfs/performance-domain.yaml b/Documentation/devicetree/bindings/dvfs/performance-domain.yaml
new file mode 100644
index 000000000000..c8b91207f34d
--- /dev/null
+++ b/Documentation/devicetree/bindings/dvfs/performance-domain.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dvfs/performance-domain.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic performance domains
+
+maintainers:
+ - Sudeep Holla <sudeep.holla@arm.com>
+
+description: |+
+ This binding is intended for performance management of groups of devices or
+ CPUs that run in the same performance domain. Performance domains must not
+ be confused with power domains. A performance domain is defined by a set
+ of devices that always have to run at the same performance level. For a given
+ performance domain, there is a single point of control that affects all the
+ devices in the domain, making it impossible to set the performance level of
+ an individual device in the domain independently from other devices in
+ that domain. For example, a set of CPUs that share a voltage domain, and
+ have a common frequency control, is said to be in the same performance
+ domain.
+
+ This device tree binding can be used to bind performance domain consumer
+ devices with their performance domains provided by performance domain
+ providers. A performance domain provider can be represented by any node in
+ the device tree and can provide one or more performance domains. A consumer
+ node can refer to the provider by a phandle and a set of phandle arguments
+ (so called performance domain specifiers) of length specified by the
+ \#performance-domain-cells property in the performance domain provider node.
+
+select: true
+
+properties:
+ "#performance-domain-cells":
+ description:
+ Number of cells in a performance domain specifier. Typically 0 for nodes
+ representing a single performance domain and 1 for nodes providing
+ multiple performance domains (e.g. performance controllers), but can be
+ any value as specified by device tree binding documentation of particular
+ provider.
+ enum: [ 0, 1 ]
+
+ performance-domains:
+ $ref: '/schemas/types.yaml#/definitions/phandle-array'
+ maxItems: 1
+ description:
+ A phandle and performance domain specifier as defined by bindings of the
+ performance controller/provider specified by phandle.
+
+additionalProperties: true
+
+examples:
+ - |
+ performance: performance-controller@12340000 {
+ compatible = "qcom,cpufreq-hw";
+ reg = <0x12340000 0x1000>;
+ #performance-domain-cells = <1>;
+ };
+
+ // The node above defines a performance controller that is a performance
+ // domain provider and expects one cell as its phandle argument.
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x0 0x0>;
+ performance-domains = <&performance 1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/edac/amazon,al-mc-edac.yaml b/Documentation/devicetree/bindings/edac/amazon,al-mc-edac.yaml
index 57e5270a0741..4cfc3a187004 100644
--- a/Documentation/devicetree/bindings/edac/amazon,al-mc-edac.yaml
+++ b/Documentation/devicetree/bindings/edac/amazon,al-mc-edac.yaml
@@ -30,14 +30,12 @@ properties:
interrupts:
minItems: 1
- maxItems: 2
items:
- description: uncorrectable error interrupt
- description: correctable error interrupt
interrupt-names:
minItems: 1
- maxItems: 2
items:
- const: ue
- const: ce
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index 021d8ae42da3..914a423ec449 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -32,7 +32,6 @@ properties:
oneOf:
- allOf:
- minItems: 1
- maxItems: 2
items:
- pattern: "^(atmel|catalyst|microchip|nxp|ramtron|renesas|rohm|st),(24(c|cs|lc|mac)[0-9]+|spd)$"
- pattern: "^atmel,(24(c|cs|mac)[0-9]+|spd)$"
diff --git a/Documentation/devicetree/bindings/eeprom/at25.yaml b/Documentation/devicetree/bindings/eeprom/at25.yaml
index 6a2dc8b3ed14..fbf99e346966 100644
--- a/Documentation/devicetree/bindings/eeprom/at25.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at25.yaml
@@ -4,14 +4,16 @@
$id: "http://devicetree.org/schemas/eeprom/at25.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
-title: SPI EEPROMs compatible with Atmel's AT25
+title: SPI EEPROMs or FRAMs compatible with Atmel's AT25
maintainers:
- Christian Eggers <ceggers@arri.de>
properties:
$nodename:
- pattern: "^eeprom@[0-9a-f]{1,2}$"
+ anyOf:
+ - pattern: "^eeprom@[0-9a-f]{1,2}$"
+ - pattern: "^fram@[0-9a-f]{1,2}$"
# There are multiple known vendors who manufacture EEPROM chips compatible
# with Atmel's AT25. The compatible string requires two items where the
@@ -31,6 +33,7 @@ properties:
- microchip,25lc040
- st,m95m02
- st,m95256
+ - cypress,fm25
- const: atmel,at25
@@ -47,7 +50,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [1, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072]
description:
- Size of the eeprom page.
+ Size of the eeprom page. FRAMs don't have pages.
size:
$ref: /schemas/types.yaml#/definitions/uint32
@@ -100,9 +103,19 @@ required:
- compatible
- reg
- spi-max-frequency
- - pagesize
- - size
- - address-width
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ const: cypress,fm25
+ then:
+ required:
+ - pagesize
+ - size
+ - address-width
additionalProperties: false
@@ -125,4 +138,10 @@ examples:
size = <32768>;
address-width = <16>;
};
+
+ fram@1 {
+ compatible = "cypress,fm25", "atmel,at25";
+ reg = <1>;
+ spi-max-frequency = <40000000>;
+ };
};
diff --git a/Documentation/devicetree/bindings/example-schema.yaml b/Documentation/devicetree/bindings/example-schema.yaml
index a97f39109f8d..ff6ec65145cf 100644
--- a/Documentation/devicetree/bindings/example-schema.yaml
+++ b/Documentation/devicetree/bindings/example-schema.yaml
@@ -91,7 +91,6 @@ properties:
interrupts:
# Either 1 or 2 interrupts can be present
minItems: 1
- maxItems: 2
items:
- description: tx or combined interrupt
- description: rx interrupt
@@ -105,7 +104,6 @@ properties:
interrupt-names:
# minItems must be specified here because the default would be 2
minItems: 1
- maxItems: 2
items:
- const: tx irq
- const: rx irq
diff --git a/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt b/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt
deleted file mode 100644
index fc3888e09549..000000000000
--- a/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-
-* SM5502 MUIC (Micro-USB Interface Controller) device
-
-The Silicon Mitus SM5502 is a MUIC (Micro-USB Interface Controller) device
-which can detect the state of external accessory when external accessory is
-attached or detached and button is pressed or released. It is interfaced to
-the host controller using an I2C interface.
-
-Required properties:
-- compatible: Should be "siliconmitus,sm5502-muic"
-- reg: Specifies the I2C slave address of the MUIC block. It should be 0x25
-- interrupts: Interrupt specifiers for detection interrupt sources.
-
-Example:
-
- sm5502@25 {
- compatible = "siliconmitus,sm5502-muic";
- interrupt-parent = <&gpx1>;
- interrupts = <5 0>;
- reg = <0x25>;
- };
diff --git a/Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml b/Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml
new file mode 100644
index 000000000000..fd2e55088888
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/extcon/siliconmitus,sm5502-muic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SM5502/SM5504 MUIC (Micro-USB Interface Controller) device
+
+maintainers:
+ - Chanwoo Choi <cw00.choi@samsung.com>
+
+description:
+ The Silicon Mitus SM5502 is a MUIC (Micro-USB Interface Controller) device
+ which can detect the state of external accessory when external accessory is
+ attached or detached and button is pressed or released. It is interfaced to
+ the host controller using an I2C interface.
+
+properties:
+ compatible:
+ enum:
+ - siliconmitus,sm5502-muic
+ - siliconmitus,sm5504-muic
+
+ reg:
+ maxItems: 1
+ description: I2C slave address of the device. Usually 0x25 for SM5502,
+ 0x14 for SM5504.
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ extcon@25 {
+ compatible = "siliconmitus,sm5502-muic";
+ reg = <0x25>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
new file mode 100644
index 000000000000..cebf6ffe70d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml
@@ -0,0 +1,341 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2021 ARM Ltd.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/firmware/arm,scmi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: System Control and Management Interface (SCMI) Message Protocol bindings
+
+maintainers:
+ - Sudeep Holla <sudeep.holla@arm.com>
+
+description: |
+ The SCMI is intended to allow agents such as OSPM to manage various functions
+ that are provided by the hardware platform it is running on, including power
+ and performance functions.
+
+ This binding is intended to define the interface the firmware implementing
+ the SCMI as described in ARM document number ARM DEN 0056 ("ARM System Control
+ and Management Interface Platform Design Document")[0] provide for OSPM in
+ the device tree.
+
+ [0] https://developer.arm.com/documentation/den0056/latest
+
+properties:
+ $nodename:
+ const: scmi
+
+ compatible:
+ oneOf:
+ - description: SCMI compliant firmware with mailbox transport
+ items:
+ - const: arm,scmi
+ - description: SCMI compliant firmware with ARM SMC/HVC transport
+ items:
+ - const: arm,scmi-smc
+
+ interrupts:
+ description:
+ The interrupt that indicates message completion by the platform
+ rather than by the return of the smc call. This should not be used
+ except when the platform requires such behavior.
+ maxItems: 1
+
+ interrupt-names:
+ const: a2p
+
+ mbox-names:
+ description:
+ Specifies the mailboxes used to communicate with SCMI compliant
+ firmware.
+ items:
+ - const: tx
+ - const: rx
+
+ mboxes:
+ description:
+ List of phandle and mailbox channel specifiers. It should contain
+ exactly one or two mailboxes, one for transmitting messages("tx")
+ and another optional for receiving the notifications("rx") if supported.
+ minItems: 1
+ maxItems: 2
+
+ shmem:
+ description:
+ List of phandle pointing to the shared memory(SHM) area, for each
+ transport channel specified.
+ minItems: 1
+ maxItems: 2
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ arm,smc-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ SMC id required when using smc or hvc transports
+
+ protocol@11:
+ type: object
+ properties:
+ reg:
+ const: 0x11
+
+ '#power-domain-cells':
+ const: 1
+
+ required:
+ - '#power-domain-cells'
+
+ protocol@13:
+ type: object
+ properties:
+ reg:
+ const: 0x13
+
+ '#clock-cells':
+ const: 1
+
+ required:
+ - '#clock-cells'
+
+ protocol@14:
+ type: object
+ properties:
+ reg:
+ const: 0x14
+
+ '#clock-cells':
+ const: 1
+
+ required:
+ - '#clock-cells'
+
+ protocol@15:
+ type: object
+ properties:
+ reg:
+ const: 0x15
+
+ '#thermal-sensor-cells':
+ const: 1
+
+ required:
+ - '#thermal-sensor-cells'
+
+ protocol@16:
+ type: object
+ properties:
+ reg:
+ const: 0x16
+
+ '#reset-cells':
+ const: 1
+
+ required:
+ - '#reset-cells'
+
+ protocol@17:
+ type: object
+ properties:
+ reg:
+ const: 0x17
+
+ regulators:
+ type: object
+ description:
+ The list of all regulators provided by this SCMI controller.
+
+ patternProperties:
+ '^regulators@[0-9a-f]+$':
+ type: object
+ $ref: "../regulator/regulator.yaml#"
+
+ properties:
+ reg:
+ maxItems: 1
+ description: Identifier for the voltage regulator.
+
+ required:
+ - reg
+
+additionalProperties: false
+
+patternProperties:
+ '^protocol@[0-9a-f]+$':
+ type: object
+ description:
+ Each sub-node represents a protocol supported. If the platform
+ supports a dedicated communication channel for a particular protocol,
+ then the corresponding transport properties must be present.
+
+ properties:
+ reg:
+ maxItems: 1
+
+ mbox-names:
+ items:
+ - const: tx
+ - const: rx
+
+ mboxes:
+ minItems: 1
+ maxItems: 2
+
+ shmem:
+ minItems: 1
+ maxItems: 2
+
+ required:
+ - reg
+
+required:
+ - compatible
+ - shmem
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: arm,scmi
+then:
+ properties:
+ interrupts: false
+ interrupt-names: false
+
+ required:
+ - mboxes
+
+else:
+ if:
+ properties:
+ compatible:
+ contains:
+ const: arm,scmi-smc
+ then:
+ required:
+ - arm,smc-id
+
+examples:
+ - |
+ firmware {
+ scmi {
+ compatible = "arm,scmi";
+ mboxes = <&mhuB 0 0>,
+ <&mhuB 0 1>;
+ mbox-names = "tx", "rx";
+ shmem = <&cpu_scp_lpri0>,
+ <&cpu_scp_lpri1>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_devpd: protocol@11 {
+ reg = <0x11>;
+ #power-domain-cells = <1>;
+ };
+
+ scmi_dvfs: protocol@13 {
+ reg = <0x13>;
+ #clock-cells = <1>;
+
+ mboxes = <&mhuB 1 0>,
+ <&mhuB 1 1>;
+ mbox-names = "tx", "rx";
+ shmem = <&cpu_scp_hpri0>,
+ <&cpu_scp_hpri1>;
+ };
+
+ scmi_clk: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+
+ scmi_sensors: protocol@15 {
+ reg = <0x15>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ scmi_reset: protocol@16 {
+ reg = <0x16>;
+ #reset-cells = <1>;
+ };
+
+ scmi_voltage: protocol@17 {
+ reg = <0x17>;
+ regulators {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ regulator_devX: regulator@0 {
+ reg = <0x0>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ regulator_devY: regulator@9 {
+ reg = <0x9>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <4200000>;
+ };
+ };
+ };
+ };
+ };
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ sram@50000000 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x50000000 0x0 0x10000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x50000000 0x10000>;
+
+ cpu_scp_lpri0: scp-sram-section@0 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0x80>;
+ };
+
+ cpu_scp_lpri1: scp-sram-section@80 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x80 0x80>;
+ };
+
+ cpu_scp_hpri0: scp-sram-section@100 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x100 0x80>;
+ };
+
+ cpu_scp_hpri2: scp-sram-section@180 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x180 0x80>;
+ };
+ };
+ };
+
+ - |
+ firmware {
+ scmi {
+ compatible = "arm,scmi-smc";
+ shmem = <&cpu_scp_lpri0 &cpu_scp_lpri1>;
+ arm,smc-id = <0xc3000001>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_devpd1: protocol@11 {
+ reg = <0x11>;
+ #power-domain-cells = <1>;
+ };
+
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/firmware/arm,scpi.yaml b/Documentation/devicetree/bindings/firmware/arm,scpi.yaml
new file mode 100644
index 000000000000..d7113b06454b
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/arm,scpi.yaml
@@ -0,0 +1,247 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2021 ARM Ltd.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/firmware/arm,scpi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: System Control and Power Interface (SCPI) Message Protocol bindings
+
+maintainers:
+ - Sudeep Holla <sudeep.holla@arm.com>
+
+description: |
+ Firmware implementing the SCPI described in ARM document number ARM DUI
+ 0922B ("ARM Compute Subsystem SCP: Message Interface Protocols")[0] can be
+ used by Linux to initiate various system control and power operations.
+
+ This binding is intended to define the interface the firmware implementing
+ the SCPI provide for OSPM in the device tree.
+
+ [0] http://infocenter.arm.com/help/topic/com.arm.doc.dui0922b/index.html
+
+properties:
+ $nodename:
+ const: scpi
+
+ compatible:
+ description:
+ SCPI compliant firmware complying to SCPI v1.0 and above OR
+ SCPI compliant firmware complying to all unversioned releases
+ prior to SCPI v1.0
+ oneOf:
+ - const: arm,scpi # SCPI v1.0 and above
+ - const: arm,scpi-pre-1.0 # Unversioned SCPI before v1.0
+ - items:
+ - enum:
+ - amlogic,meson-gxbb-scpi
+ - const: arm,scpi-pre-1.0
+
+ mboxes:
+ description:
+ List of phandle and mailbox channel specifiers. All the channels reserved
+ by remote SCP firmware for use by SCPI message protocol should be
+ specified in any order.
+ minItems: 1
+
+ shmem:
+ description:
+ List of phandle pointing to the shared memory(SHM) area between the
+ processors using these mailboxes for IPC, one for each mailbox SHM can
+ be any memory reserved for the purpose of this communication between the
+ processors.
+ minItems: 1
+
+ power-controller:
+ type: object
+ description:
+ This sub-node represents SCPI power domain controller.
+
+ properties:
+ compatible:
+ const: arm,scpi-power-domains
+
+ '#power-domain-cells':
+ const: 1
+
+ num-domains:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Total number of power domains provided by SCPI. This is needed as
+ the SCPI message protocol lacks a mechanism to query this
+ information at runtime.
+
+ required:
+ - compatible
+ - '#power-domain-cells'
+ - num-domains
+
+ additionalProperties: false
+
+ sensors:
+ type: object
+ description: |
+ This sub-node represents SCPI sensors controller.
+
+ properties:
+ compatible:
+ oneOf:
+ - const: arm,scpi-sensors
+ - items:
+ - enum:
+ - amlogic,meson-gxbb-scpi-sensors
+ - const: arm,scpi-sensors
+
+ '#thermal-sensor-cells':
+ const: 1
+
+ required:
+ - compatible
+ - '#thermal-sensor-cells'
+
+ additionalProperties: false
+
+ clocks:
+ type: object
+ description:
+ This is the container node. Each sub-node represents one of the types
+ of clock controller - indexed or full range.
+
+ properties:
+ compatible:
+ const: arm,scpi-clocks
+
+ patternProperties:
+ "^clocks-[0-9a-f]+$":
+ type: object
+ description: |
+ This sub-node represents one of the types of clock controller
+ - indexed or full range.
+
+ "arm,scpi-dvfs-clocks" - all the clocks that are variable and index
+ based. These clocks don't provide an entire range of values between
+ the limits but only discrete points within the range. The firmware
+ provides the mapping for each such operating frequency and the index
+ associated with it. The firmware also manages the voltage scaling
+ appropriately with the clock scaling.
+
+ "arm,scpi-variable-clocks" - all the clocks that are variable and
+ provide full range within the specified range. The firmware provides
+ the range of values within a specified range.
+
+ properties:
+ compatible:
+ oneOf:
+ - const: arm,scpi-dvfs-clocks
+ - const: arm,scpi-variable-clocks
+
+ '#clock-cells':
+ const: 1
+
+ clock-output-names: true
+
+ clock-indices:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ The identifying number for the clocks(i.e.clock_id) in the node.
+ It can be non linear and hence provide the mapping of identifiers
+ into the clock-output-names array.
+
+ required:
+ - compatible
+ - '#clock-cells'
+ - clock-output-names
+ - clock-indices
+
+ additionalProperties: false
+
+ required:
+ - compatible
+
+ additionalProperties: false
+
+additionalProperties: false
+
+required:
+ - compatible
+ - mboxes
+ - shmem
+
+examples:
+ - |
+ firmware {
+ scpi {
+ compatible = "arm,scpi";
+ mboxes = <&mhuA 1>;
+ shmem = <&cpu_scp_hpri>; /* HP-NonSecure */
+
+ scpi_devpd: power-controller {
+ compatible = "arm,scpi-power-domains";
+ num-domains = <2>;
+ #power-domain-cells = <1>;
+ };
+
+ clocks {
+ compatible = "arm,scpi-clocks";
+
+ scpi_dvfs: clocks-0 {
+ compatible = "arm,scpi-dvfs-clocks";
+ #clock-cells = <1>;
+ clock-indices = <0>, <1>, <2>;
+ clock-output-names = "atlclk", "aplclk","gpuclk";
+ };
+
+ scpi_clk: clocks-1 {
+ compatible = "arm,scpi-variable-clocks";
+ #clock-cells = <1>;
+ clock-indices = <3>, <4>;
+ clock-output-names = "pxlclk0", "pxlclk1";
+ };
+ };
+
+ scpi_sensors: sensors {
+ compatible = "arm,scpi-sensors";
+ #thermal-sensor-cells = <1>;
+ };
+
+ };
+ };
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ sram@50000000 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x50000000 0x0 0x10000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x50000000 0x10000>;
+
+ cpu_scp_lpri: scp-sram-section@0 {
+ compatible = "arm,scp-shmem";
+ reg = <0x0 0x200>;
+ };
+
+ cpu_scp_hpri: scp-sram-section@200 {
+ compatible = "arm,scp-shmem";
+ reg = <0x200 0x200>;
+ };
+ };
+ };
+
+ - |
+ firmware {
+ scpi {
+ compatible = "amlogic,meson-gxbb-scpi", "arm,scpi-pre-1.0";
+ mboxes = <&mailbox 1 &mailbox 2>;
+ shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
+
+ scpi_sensors1: sensors {
+ compatible = "amlogic,meson-gxbb-scpi-sensors", "arm,scpi-sensors";
+ #thermal-sensor-cells = <1>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
index e747d73687cb..a7333ad938d2 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -12,6 +12,7 @@ Required properties:
* "qcom,scm-ipq4019"
* "qcom,scm-ipq806x"
* "qcom,scm-ipq8074"
+ * "qcom,scm-mdm9607"
* "qcom,scm-msm8660"
* "qcom,scm-msm8916"
* "qcom,scm-msm8960"
diff --git a/Documentation/devicetree/bindings/fpga/fpga-region.txt b/Documentation/devicetree/bindings/fpga/fpga-region.txt
index d787d57491a1..7d3515264838 100644
--- a/Documentation/devicetree/bindings/fpga/fpga-region.txt
+++ b/Documentation/devicetree/bindings/fpga/fpga-region.txt
@@ -38,7 +38,7 @@ Partial Reconfiguration (PR)
Partial Reconfiguration Region (PRR)
* Also called a "reconfigurable partition"
- * A PRR is a specific section of a FPGA reserved for reconfiguration.
+ * A PRR is a specific section of an FPGA reserved for reconfiguration.
* A base (or static) FPGA image may create a set of PRR's that later may
be independently reprogrammed many times.
* The size and specific location of each PRR is fixed.
@@ -105,7 +105,7 @@ reprogrammed independently while the rest of the system continues to function.
Sequence
========
-When a DT overlay that targets a FPGA Region is applied, the FPGA Region will
+When a DT overlay that targets an FPGA Region is applied, the FPGA Region will
do the following:
1. Disable appropriate FPGA bridges.
@@ -134,8 +134,8 @@ The intended use is that a Device Tree overlay (DTO) can be used to reprogram an
FPGA while an operating system is running.
An FPGA Region that exists in the live Device Tree reflects the current state.
-If the live tree shows a "firmware-name" property or child nodes under a FPGA
-Region, the FPGA already has been programmed. A DTO that targets a FPGA Region
+If the live tree shows a "firmware-name" property or child nodes under an FPGA
+Region, the FPGA already has been programmed. A DTO that targets an FPGA Region
and adds the "firmware-name" property is taken as a request to reprogram the
FPGA. After reprogramming is successful, the overlay is accepted into the live
tree.
@@ -152,9 +152,9 @@ These FPGA regions are children of FPGA bridges which are then children of the
base FPGA region. The "Full Reconfiguration to add PRR's" example below shows
this.
-If an FPGA Region does not specify a FPGA Manager, it will inherit the FPGA
+If an FPGA Region does not specify an FPGA Manager, it will inherit the FPGA
Manager specified by its ancestor FPGA Region. This supports both the case
-where the same FPGA Manager is used for all of a FPGA as well the case where
+where the same FPGA Manager is used for all of an FPGA as well the case where
a different FPGA Manager is used for each region.
FPGA Regions do not inherit their ancestor FPGA regions' bridges. This prevents
@@ -166,7 +166,7 @@ within the static image of the FPGA.
Required properties:
- compatible : should contain "fpga-region"
- fpga-mgr : should contain a phandle to an FPGA Manager. Child FPGA Regions
- inherit this property from their ancestor regions. A fpga-mgr property
+ inherit this property from their ancestor regions. An fpga-mgr property
in a region will override any inherited FPGA manager.
- #address-cells, #size-cells, ranges : must be present to handle address space
mapping for child nodes.
@@ -175,12 +175,12 @@ Optional properties:
- firmware-name : should contain the name of an FPGA image file located on the
firmware search path. If this property shows up in a live device tree
it indicates that the FPGA has already been programmed with this image.
- If this property is in an overlay targeting a FPGA region, it is a
+ If this property is in an overlay targeting an FPGA region, it is a
request to program the FPGA with that image.
- fpga-bridges : should contain a list of phandles to FPGA Bridges that must be
controlled during FPGA programming along with the parent FPGA bridge.
This property is optional if the FPGA Manager handles the bridges.
- If the fpga-region is the child of a fpga-bridge, the list should not
+ If the fpga-region is the child of an fpga-bridge, the list should not
contain the parent bridge.
- partial-fpga-config : boolean, set if partial reconfiguration is to be done,
otherwise full reconfiguration is done.
@@ -279,7 +279,7 @@ Supported Use Models
In all cases the live DT must have the FPGA Manager, FPGA Bridges (if any), and
a FPGA Region. The target of the Device Tree Overlay is the FPGA Region. Some
-uses are specific to a FPGA device.
+uses are specific to an FPGA device.
* No FPGA Bridges
In this case, the FPGA Manager which programs the FPGA also handles the
@@ -300,7 +300,7 @@ uses are specific to a FPGA device.
bridges need to exist in the FPGA that can gate the buses going to each FPGA
region while the buses are enabled for other sections. Before any partial
reconfiguration can be done, a base FPGA image must be loaded which includes
- PRR's with FPGA bridges. The device tree should have a FPGA region for each
+ PRR's with FPGA bridges. The device tree should have an FPGA region for each
PRR.
Device Tree Examples
diff --git a/Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.txt b/Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.txt
deleted file mode 100644
index 7018aa896835..000000000000
--- a/Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Xilinx Zynq FPGA Manager
-
-Required properties:
-- compatible: should contain "xlnx,zynq-devcfg-1.0"
-- reg: base address and size for memory mapped io
-- interrupts: interrupt for the FPGA manager device
-- clocks: phandle for clocks required operation
-- clock-names: name for the clock, should be "ref_clk"
-- syscon: phandle for access to SLCR registers
-
-Example:
- devcfg: devcfg@f8007000 {
- compatible = "xlnx,zynq-devcfg-1.0";
- reg = <0xf8007000 0x100>;
- interrupts = <0 8 4>;
- clocks = <&clkc 12>;
- clock-names = "ref_clk";
- syscon = <&slcr>;
- };
diff --git a/Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.yaml b/Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.yaml
new file mode 100644
index 000000000000..29daca4be47f
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/fpga/xilinx-zynq-fpga-mgr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Zynq FPGA Manager Device Tree Bindings
+
+maintainers:
+ - Michal Simek <michal.simek@xilinx.com>
+
+properties:
+ compatible:
+ const: xlnx,zynq-devcfg-1.0
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ref_clk
+
+ syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to syscon block which provide access to SLCR registers
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - syscon
+
+additionalProperties: false
+
+examples:
+ - |
+ devcfg: devcfg@f8007000 {
+ compatible = "xlnx,zynq-devcfg-1.0";
+ reg = <0xf8007000 0x100>;
+ interrupts = <0 8 4>;
+ clocks = <&clkc 12>;
+ clock-names = "ref_clk";
+ syscon = <&slcr>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-davinci.txt b/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
deleted file mode 100644
index 696ea46227d1..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
+++ /dev/null
@@ -1,167 +0,0 @@
-Davinci/Keystone GPIO controller bindings
-
-Required Properties:
-- compatible: should be "ti,dm6441-gpio": for Davinci da850 SoCs
- "ti,keystone-gpio": for Keystone 2 66AK2H/K, 66AK2L,
- 66AK2E SoCs
- "ti,k2g-gpio", "ti,keystone-gpio": for 66AK2G
- "ti,am654-gpio", "ti,keystone-gpio": for TI K3 AM654
- "ti,j721e-gpio", "ti,keystone-gpio": for J721E SoCs
- "ti,am64-gpio", "ti,keystone-gpio": for AM64 SoCs
-
-- reg: Physical base address of the controller and the size of memory mapped
- registers.
-
-- gpio-controller : Marks the device node as a gpio controller.
-
-- #gpio-cells : Should be two.
- - first cell is the pin number
- - second cell is used to specify optional parameters (unused)
-
-- interrupts: Array of GPIO interrupt number. Only banked or unbanked IRQs are
- supported at a time.
-
-- ti,ngpio: The number of GPIO pins supported.
-
-- ti,davinci-gpio-unbanked: The number of GPIOs that have an individual interrupt
- line to processor.
-
-- clocks: Should contain the device's input clock, and should be defined as per
- the appropriate clock bindings consumer usage in,
-
- Documentation/devicetree/bindings/clock/keystone-gate.txt
- for 66AK2HK/66AK2L/66AK2E SoCs or,
-
- Documentation/devicetree/bindings/clock/ti,sci-clk.txt
- for 66AK2G SoCs
-
-- clock-names: Name should be "gpio";
-
-Currently clock-names and clocks are needed for all keystone 2 platforms
-Davinci platforms do not have DT clocks as of now.
-
-The GPIO controller also acts as an interrupt controller. It uses the default
-two cells specifier as described in Documentation/devicetree/bindings/
-interrupt-controller/interrupts.txt.
-
-Example:
-
-gpio: gpio@1e26000 {
- compatible = "ti,dm6441-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x226000 0x1000>;
- interrupt-parent = <&intc>;
- interrupts = <42 IRQ_TYPE_EDGE_BOTH 43 IRQ_TYPE_EDGE_BOTH
- 44 IRQ_TYPE_EDGE_BOTH 45 IRQ_TYPE_EDGE_BOTH
- 46 IRQ_TYPE_EDGE_BOTH 47 IRQ_TYPE_EDGE_BOTH
- 48 IRQ_TYPE_EDGE_BOTH 49 IRQ_TYPE_EDGE_BOTH
- 50 IRQ_TYPE_EDGE_BOTH>;
- ti,ngpio = <144>;
- ti,davinci-gpio-unbanked = <0>;
- interrupt-controller;
- #interrupt-cells = <2>;
-};
-
-leds {
- compatible = "gpio-leds";
-
- led1 {
- label = "davinci:green:usr1";
- gpios = <&gpio 10 GPIO_ACTIVE_HIGH>;
- ...
- };
-
- led2 {
- label = "davinci:red:debug1";
- gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
- ...
- };
-};
-
-Example for 66AK2G:
-
-gpio0: gpio@2603000 {
- compatible = "ti,k2g-gpio", "ti,keystone-gpio";
- reg = <0x02603000 0x100>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupts = <GIC_SPI 432 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 433 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 434 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 435 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 436 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 437 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 438 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 439 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 440 IRQ_TYPE_EDGE_RISING>;
- interrupt-controller;
- #interrupt-cells = <2>;
- ti,ngpio = <144>;
- ti,davinci-gpio-unbanked = <0>;
- clocks = <&k2g_clks 0x001b 0x0>;
- clock-names = "gpio";
-};
-
-Example for 66AK2HK/66AK2L/66AK2E:
-
-gpio0: gpio@260bf00 {
- compatible = "ti,keystone-gpio";
- reg = <0x0260bf00 0x100>;
- gpio-controller;
- #gpio-cells = <2>;
- /* HW Interrupts mapped to GPIO pins */
- interrupts = <GIC_SPI 120 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 121 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 122 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 123 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 124 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 125 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 126 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 127 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 128 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 129 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 130 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 131 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 132 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 133 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 134 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 135 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 136 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 137 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 138 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 139 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 140 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 142 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 144 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 145 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 146 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 147 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 148 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 149 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 150 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 151 IRQ_TYPE_EDGE_RISING>;
- clocks = <&clkgpio>;
- clock-names = "gpio";
- ti,ngpio = <32>;
- ti,davinci-gpio-unbanked = <32>;
-};
-
-Example for K3 AM654:
-
-wkup_gpio0: wkup_gpio0@42110000 {
- compatible = "ti,am654-gpio", "ti,keystone-gpio";
- reg = <0x42110000 0x100>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-parent = <&intr_wkup_gpio>;
- interrupts = <59 128>, <59 129>, <59 130>, <59 131>;
- interrupt-controller;
- #interrupt-cells = <2>;
- ti,ngpio = <56>;
- ti,davinci-gpio-unbanked = <0>;
- clocks = <&k3_clks 59 0>;
- clock-names = "gpio";
-};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml b/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml
new file mode 100644
index 000000000000..f32e09ef937c
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml
@@ -0,0 +1,185 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/gpio-davinci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GPIO controller for Davinci and keystone devices
+
+maintainers:
+ - Keerthy <j-keerthy@ti.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - ti,k2g-gpio
+ - ti,am654-gpio
+ - ti,j721e-gpio
+ - ti,am64-gpio
+ - const: ti,keystone-gpio
+
+ - items:
+ - enum:
+ - ti,dm6441-gpio
+ - ti,keystone-gpio
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ gpio-ranges: true
+
+ gpio-line-names:
+ description: strings describing the names of each gpio line.
+ minItems: 1
+ maxItems: 100
+
+ "#gpio-cells":
+ const: 2
+ description:
+ first cell is the pin number and second cell is used to specify optional parameters (unused).
+
+ interrupts:
+ description:
+ The interrupts are specified as per the interrupt parent. Only banked
+ or unbanked IRQs are supported at a time. If the interrupts are
+ banked then provide list of interrupts corresponding to each bank, else
+ provide the list of interrupts for each gpio.
+ minItems: 1
+ maxItems: 100
+
+ ti,ngpio:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: The number of GPIO pins supported consecutively.
+ minimum: 1
+
+ ti,davinci-gpio-unbanked:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: The number of GPIOs that have an individual interrupt line to processor.
+ minimum: 0
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: gpio
+
+ interrupt-controller: true
+
+ power-domains:
+ maxItems: 1
+
+ "#interrupt-cells":
+ const: 2
+
+patternProperties:
+ "^(.+-hog(-[0-9]+)?)$":
+ type: object
+
+ required:
+ - gpio-hog
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - "#gpio-cells"
+ - interrupts
+ - ti,ngpio
+ - ti,davinci-gpio-unbanked
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include<dt-bindings/interrupt-controller/arm-gic.h>
+
+ gpio0: gpio@2603000 {
+ compatible = "ti,k2g-gpio", "ti,keystone-gpio";
+ reg = <0x02603000 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupts = <GIC_SPI 432 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 433 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 434 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 435 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 436 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 437 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 438 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 439 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 440 IRQ_TYPE_EDGE_RISING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,ngpio = <144>;
+ ti,davinci-gpio-unbanked = <0>;
+ clocks = <&k2g_clks 0x001b 0x0>;
+ clock-names = "gpio";
+ };
+
+ - |
+ #include<dt-bindings/interrupt-controller/arm-gic.h>
+
+ gpio1: gpio@260bf00 {
+ compatible = "ti,keystone-gpio";
+ reg = <0x0260bf00 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /* HW Interrupts mapped to GPIO pins */
+ interrupts = <GIC_SPI 120 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 121 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 122 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 123 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 124 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 125 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 126 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 127 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 128 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 129 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 130 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 131 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 132 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 133 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 134 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 135 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 136 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 137 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 138 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 139 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 140 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 142 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 144 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 145 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 146 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 147 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 148 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 149 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 150 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 151 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkgpio>;
+ clock-names = "gpio";
+ ti,ngpio = <32>;
+ ti,davinci-gpio-unbanked = <32>;
+ };
+
+ - |
+ wkup_gpio0: gpio0@42110000 {
+ compatible = "ti,am654-gpio", "ti,keystone-gpio";
+ reg = <0x42110000 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&intr_wkup_gpio>;
+ interrupts = <60>, <61>, <62>, <63>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,ngpio = <56>;
+ ti,davinci-gpio-unbanked = <0>;
+ clocks = <&k3_clks 59 0>;
+ clock-names = "gpio";
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-omap.txt b/Documentation/devicetree/bindings/gpio/gpio-omap.txt
deleted file mode 100644
index e57b2cb28f6c..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-omap.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-OMAP GPIO controller bindings
-
-Required properties:
-- compatible:
- - "ti,omap2-gpio" for OMAP2 controllers
- - "ti,omap3-gpio" for OMAP3 controllers
- - "ti,omap4-gpio" for OMAP4 controllers
-- reg : Physical base address of the controller and length of memory mapped
- region.
-- gpio-controller : Marks the device node as a GPIO controller.
-- #gpio-cells : Should be two.
- - first cell is the pin number
- - second cell is used to specify optional parameters (unused)
-- interrupt-controller: Mark the device node as an interrupt controller.
-- #interrupt-cells : Should be 2.
- The first cell is the GPIO number.
- The second cell is used to specify flags:
- bits[3:0] trigger type and level flags:
- 1 = low-to-high edge triggered.
- 2 = high-to-low edge triggered.
- 4 = active high level-sensitive.
- 8 = active low level-sensitive.
-- interrupts : The interrupt the controller is rising as output when an
- interrupt occures
-
-OMAP specific properties:
-- ti,hwmods: Name of the hwmod associated to the GPIO:
- "gpio<X>", <X> being the 1-based instance number
- from the HW spec.
-- ti,gpio-always-on: Indicates if a GPIO bank is always powered and
- so will never lose its logic state.
-
-
-Example:
-
-gpio0: gpio@44e07000 {
- compatible = "ti,omap4-gpio";
- reg = <0x44e07000 0x1000>;
- ti,hwmods = "gpio1";
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <96>;
-};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt b/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
deleted file mode 100644
index a482455a205b..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-* PCF857x-compatible I/O expanders
-
-The PCF857x-compatible chips have "quasi-bidirectional" I/O lines that can be
-driven high by a pull-up current source or driven low to ground. This combines
-the direction and output level into a single bit per line, which can't be read
-back. We can't actually know at initialization time whether a line is configured
-(a) as output and driving the signal low/high, or (b) as input and reporting a
-low/high value, without knowing the last value written since the chip came out
-of reset (if any). The only reliable solution for setting up line direction is
-thus to do it explicitly.
-
-Required Properties:
-
- - compatible: should be one of the following.
- - "maxim,max7328": For the Maxim MAX7378
- - "maxim,max7329": For the Maxim MAX7329
- - "nxp,pca8574": For the NXP PCA8574
- - "nxp,pca8575": For the NXP PCA8575
- - "nxp,pca9670": For the NXP PCA9670
- - "nxp,pca9671": For the NXP PCA9671
- - "nxp,pca9672": For the NXP PCA9672
- - "nxp,pca9673": For the NXP PCA9673
- - "nxp,pca9674": For the NXP PCA9674
- - "nxp,pca9675": For the NXP PCA9675
- - "nxp,pcf8574": For the NXP PCF8574
- - "nxp,pcf8574a": For the NXP PCF8574A
- - "nxp,pcf8575": For the NXP PCF8575
-
- - reg: I2C slave address.
-
- - gpio-controller: Marks the device node as a gpio controller.
- - #gpio-cells: Should be 2. The first cell is the GPIO number and the second
- cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>. Only the
- GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
-
-Optional Properties:
-
- - lines-initial-states: Bitmask that specifies the initial state of each
- line. When a bit is set to zero, the corresponding line will be initialized to
- the input (pulled-up) state. When the bit is set to one, the line will be
- initialized the low-level output state. If the property is not specified
- all lines will be initialized to the input state.
-
- The I/O expander can detect input state changes, and thus optionally act as
- an interrupt controller. When the expander interrupt line is connected all the
- following properties must be set. For more information please see the
- interrupt controller device tree bindings documentation available at
- Documentation/devicetree/bindings/interrupt-controller/interrupts.txt.
-
- - interrupt-controller: Identifies the node as an interrupt controller.
- - #interrupt-cells: Number of cells to encode an interrupt source, shall be 2.
- - interrupts: Interrupt specifier for the controllers interrupt.
-
-
-Please refer to gpio.txt in this directory for details of the common GPIO
-bindings used by client devices.
-
-Example: PCF8575 I/O expander node
-
- pcf8575: gpio@20 {
- compatible = "nxp,pcf8575";
- reg = <0x20>;
- interrupt-parent = <&irqpin2>;
- interrupts = <3 0>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt b/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt
deleted file mode 100644
index 78458adbf4b7..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-Lantiq SoC Serial To Parallel (STP) GPIO controller
-
-The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
-peripheral controller used to drive external shift register cascades. At most
-3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
-to drive the 2 LSBs of the cascade automatically.
-
-
-Required properties:
-- compatible : Should be "lantiq,gpio-stp-xway"
-- reg : Address and length of the register set for the device
-- #gpio-cells : Should be two. The first cell is the pin number and
- the second cell is used to specify optional parameters (currently
- unused).
-- gpio-controller : Marks the device node as a gpio controller.
-
-Optional properties:
-- lantiq,shadow : The default value that we shall assume as already set on the
- shift register cascade.
-- lantiq,groups : Set the 3 bit mask to select which of the 3 groups are enabled
- in the shift register cascade.
-- lantiq,dsl : The dsl core can control the 2 LSBs of the gpio cascade. This 2 bit
- property can enable this feature.
-- lantiq,phy1 : The gphy1 core can control 3 bits of the gpio cascade.
-- lantiq,phy2 : The gphy2 core can control 3 bits of the gpio cascade.
-- lantiq,rising : use rising instead of falling edge for the shift register
-
-Example:
-
-gpio1: stp@e100bb0 {
- compatible = "lantiq,gpio-stp-xway";
- reg = <0xE100BB0 0x40>;
- #gpio-cells = <2>;
- gpio-controller;
-
- lantiq,shadow = <0xffff>;
- lantiq,groups = <0x7>;
- lantiq,dsl = <0x3>;
- lantiq,phy1 = <0x7>;
- lantiq,phy2 = <0x7>;
- /* lantiq,rising; */
-};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-stp-xway.yaml b/Documentation/devicetree/bindings/gpio/gpio-stp-xway.yaml
new file mode 100644
index 000000000000..d565c4b63dbf
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-stp-xway.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/gpio-stp-xway.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lantiq SoC Serial To Parallel (STP) GPIO controller
+
+description: |
+ The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
+ peripheral controller used to drive external shift register cascades. At most
+ 3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
+ and Ethernet PHYs to drive some bytes of the cascade automatically.
+
+maintainers:
+ - John Crispin <john@phrozen.org>
+
+properties:
+ $nodename:
+ pattern: "^gpio@[0-9a-f]+$"
+
+ compatible:
+ const: lantiq,gpio-stp-xway
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ description:
+ The first cell is the pin number and the second cell is used to specify
+ consumer flags.
+ const: 2
+
+ lantiq,shadow:
+ description:
+ The default value that we shall assume as already set on the
+ shift register cascade.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0x000000
+ maximum: 0xffffff
+
+ lantiq,groups:
+ description:
+ Set the 3 bit mask to select which of the 3 groups are enabled
+ in the shift register cascade.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0x0
+ maximum: 0x7
+
+ lantiq,dsl:
+ description:
+ The dsl core can control the 2 LSBs of the gpio cascade. This 2 bit
+ property can enable this feature.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0x0
+ maximum: 0x3
+
+ lantiq,rising:
+ description:
+ Use rising instead of falling edge for the shift register.
+ type: boolean
+
+patternProperties:
+ "^lantiq,phy[1-4]$":
+ description:
+ The gphy core can control 3 bits of the gpio cascade. In the xRX200 family
+ phy[1-2] are available, in xRX330 phy[1-3] and in XRX330 phy[1-4].
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0x0
+ maximum: 0x7
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - "#gpio-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ gpio@e100bb0 {
+ compatible = "lantiq,gpio-stp-xway";
+ reg = <0xE100BB0 0x40>;
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ pinctrl-0 = <&stp_pins>;
+ pinctrl-names = "default";
+
+ lantiq,shadow = <0xffffff>;
+ lantiq,groups = <0x7>;
+ lantiq,dsl = <0x3>;
+ lantiq,phy1 = <0x7>;
+ lantiq,phy2 = <0x7>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
deleted file mode 100644
index f693e82b4c0f..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Xilinx Zynq GPIO controller Device Tree Bindings
--------------------------------------------
-
-Required properties:
-- #gpio-cells : Should be two
- - First cell is the GPIO line number
- - Second cell is used to specify optional
- parameters (unused)
-- compatible : Should be "xlnx,zynq-gpio-1.0" or
- "xlnx,zynqmp-gpio-1.0" or "xlnx,versal-gpio-1.0
- or "xlnx,pmc-gpio-1.0
-- clocks : Clock specifier (see clock bindings for details)
-- gpio-controller : Marks the device node as a GPIO controller.
-- interrupts : Interrupt specifier (see interrupt bindings for
- details)
-- interrupt-controller : Marks the device node as an interrupt controller.
-- #interrupt-cells : Should be 2. The first cell is the GPIO number.
- The second cell bits[3:0] is used to specify trigger type and level flags:
- 1 = low-to-high edge triggered.
- 2 = high-to-low edge triggered.
- 4 = active high level-sensitive.
- 8 = active low level-sensitive.
-- reg : Address and length of the register set for the device
-
-Example:
- gpio@e000a000 {
- #gpio-cells = <2>;
- compatible = "xlnx,zynq-gpio-1.0";
- clocks = <&clkc 42>;
- gpio-controller;
- interrupt-parent = <&intc>;
- interrupts = <0 20 4>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0xe000a000 0x1000>;
- };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-zynq.yaml b/Documentation/devicetree/bindings/gpio/gpio-zynq.yaml
new file mode 100644
index 000000000000..378da2649e66
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-zynq.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/gpio-zynq.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Zynq GPIO controller Device Tree Bindings
+
+maintainers:
+ - Michal Simek <michal.simek@xilinx.com>
+
+properties:
+ compatible:
+ const: xlnx,zynq-gpio-1.0
+
+ reg:
+ maxItems: 1
+
+ "#gpio-cells":
+ const: 2
+
+ interrupts:
+ maxItems: 1
+
+ gpio-controller: true
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - "#gpio-cells"
+ - interrupts
+ - gpio-controller
+ - interrupt-controller
+ - "#interrupt-cells"
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ gpio@e000a000 {
+ #gpio-cells = <2>;
+ compatible = "xlnx,zynq-gpio-1.0";
+ clocks = <&clkc 42>;
+ gpio-controller;
+ interrupt-parent = <&intc>;
+ interrupts = <0 20 4>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0xe000a000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/idt,32434-gpio.yaml b/Documentation/devicetree/bindings/gpio/idt,32434-gpio.yaml
new file mode 100644
index 000000000000..d38de8144656
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/idt,32434-gpio.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/idt,32434-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: IDT 79RC32434 GPIO controller
+
+maintainers:
+ - Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+
+properties:
+ compatible:
+ const: idt,32434-gpio
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: gpio
+ - const: pic
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+ ngpios:
+ minimum: 1
+ maximum: 32
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - gpio-controller
+ - "#gpio-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ gpio0: gpio@50004 {
+ compatible = "idt,32434-gpio";
+ reg = <0x50004 0x10>, <0x38030 0x0c>;
+ reg-names = "gpio", "pic";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ ngpios = <14>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml b/Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml
new file mode 100644
index 000000000000..f0ff66c4c74e
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/nxp,pcf8575.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: PCF857x-compatible I/O expanders
+
+maintainers:
+ - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+
+description:
+ The PCF857x-compatible chips have "quasi-bidirectional" I/O lines that can be
+ driven high by a pull-up current source or driven low to ground. This
+ combines the direction and output level into a single bit per line, which
+ can't be read back. We can't actually know at initialization time whether a
+ line is configured (a) as output and driving the signal low/high, or (b) as
+ input and reporting a low/high value, without knowing the last value written
+ since the chip came out of reset (if any). The only reliable solution for
+ setting up line direction is thus to do it explicitly.
+
+properties:
+ compatible:
+ enum:
+ - maxim,max7328
+ - maxim,max7329
+ - nxp,pca8574
+ - nxp,pca8575
+ - nxp,pca9670
+ - nxp,pca9671
+ - nxp,pca9672
+ - nxp,pca9673
+ - nxp,pca9674
+ - nxp,pca9675
+ - nxp,pcf8574
+ - nxp,pcf8574a
+ - nxp,pcf8575
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+ description:
+ The first cell is the GPIO number and the second cell specifies GPIO
+ flags, as defined in <dt-bindings/gpio/gpio.h>. Only the GPIO_ACTIVE_HIGH
+ and GPIO_ACTIVE_LOW flags are supported.
+
+ lines-initial-states:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Bitmask that specifies the initial state of each line.
+ When a bit is set to zero, the corresponding line will be initialized to
+ the input (pulled-up) state.
+ When the bit is set to one, the line will be initialized to the
+ low-level output state.
+ If the property is not specified all lines will be initialized to the
+ input state.
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ wakeup-source: true
+
+patternProperties:
+ "^(.+-hog(-[0-9]+)?)$":
+ type: object
+
+ required:
+ - gpio-hog
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - '#gpio-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pcf8575: gpio@20 {
+ compatible = "nxp,pcf8575";
+ reg = <0x20>;
+ interrupt-parent = <&irqpin2>;
+ interrupts = <3 0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.txt b/Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.txt
deleted file mode 100644
index f9231df17c2b..000000000000
--- a/Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-Rockchip RK3328 GRF (General Register Files) GPIO controller.
-
-In Rockchip RK3328, the output only GPIO_MUTE pin, originally for codec mute
-control, can also be used for general purpose. It is manipulated by the
-GRF_SOC_CON10 register in GRF. Aside from the GPIO_MUTE pin, the HDMI pins can
-also be set in the same way.
-
-Currently this GPIO controller only supports the mute pin. If needed in the
-future, the HDMI pins support can also be added.
-
-Required properties:
-- compatible: Should contain "rockchip,rk3328-grf-gpio".
-- gpio-controller: Marks the device node as a gpio controller.
-- #gpio-cells: Should be 2. The first cell is the pin number and
- the second cell is used to specify the gpio polarity:
- 0 = Active high,
- 1 = Active low.
-
-Example:
-
- grf: syscon@ff100000 {
- compatible = "rockchip,rk3328-grf", "syscon", "simple-mfd";
-
- grf_gpio: grf-gpio {
- compatible = "rockchip,rk3328-grf-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
- };
-
-Note: The grf_gpio node should be declared as the child of the GRF (General
-Register File) node. The GPIO_MUTE pin is referred to as <&grf_gpio 0>.
diff --git a/Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.yaml b/Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.yaml
new file mode 100644
index 000000000000..d8cce73ea0ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/rockchip,rk3328-grf-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3328 General Register Files GPIO controller
+
+description:
+ The Rockchip RK3328 General Register File (GRF) outputs only the
+ GPIO_MUTE pin, originally for codec mute control, but it can also be used
+ for general purpose. It is manipulated by the GRF_SOC_CON10 register.
+ If needed in the future support for the HDMI pins can also be added.
+ The GPIO node should be declared as the child of the GRF node.
+
+ The GPIO_MUTE pin is referred to in the format
+
+ <&grf_gpio 0 GPIO_ACTIVE_LOW>
+
+ The first cell is the pin number and
+ the second cell is used to specify the GPIO polarity
+ 0 = Active high
+ 1 = Active low
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ const: rockchip,rk3328-grf-gpio
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+required:
+ - compatible
+ - gpio-controller
+ - "#gpio-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ grf_gpio: gpio {
+ compatible = "rockchip,rk3328-grf-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/ti,omap-gpio.yaml b/Documentation/devicetree/bindings/gpio/ti,omap-gpio.yaml
new file mode 100644
index 000000000000..7087e4a5013f
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/ti,omap-gpio.yaml
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/ti,omap-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OMAP GPIO controller bindings
+
+maintainers:
+ - Grygorii Strashko <grygorii.strashko@ti.com>
+
+description: |
+ The general-purpose interface combines general-purpose input/output (GPIO) banks.
+ Each GPIO banks provides up to 32 dedicated general-purpose pins with input
+ and output capabilities; interrupt generation in active mode and wake-up
+ request generation in idle mode upon the detection of external events.
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - ti,omap2-gpio
+ - ti,omap3-gpio
+ - ti,omap4-gpio
+ - items:
+ - const: ti,am4372-gpio
+ - const: ti,omap4-gpio
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ interrupts:
+ maxItems: 1
+
+ gpio-ranges: true
+
+ gpio-line-names:
+ minItems: 1
+ maxItems: 32
+
+ ti,gpio-always-on:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Indicates if a GPIO bank is always powered and will never lose its logic state.
+
+ ti,hwmods:
+ $ref: /schemas/types.yaml#/definitions/string
+ deprecated: true
+ description:
+ Name of the hwmod associated with the GPIO. Needed on some legacy OMAP
+ SoCs which have not been converted to the ti,sysc interconnect hierarachy.
+
+ ti,no-reset-on-init:
+ $ref: /schemas/types.yaml#/definitions/flag
+ deprecated: true
+ description:
+ Do not reset on init. Used with ti,hwmods on some legacy OMAP SoCs which
+ have not been converted to the ti,sysc interconnect hierarachy.
+
+patternProperties:
+ "^(.+-hog(-[0-9]+)?)$":
+ type: object
+
+ required:
+ - gpio-hog
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - "#gpio-cells"
+ - interrupt-controller
+ - "#interrupt-cells"
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ gpio0: gpio@0 {
+ compatible = "ti,omap4-gpio";
+ reg = <0x0 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <96>;
+ ti,gpio-always-on;
+
+ ls-buf-en-hog {
+ gpio-hog;
+ gpios = <10 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "LS_BUF_EN";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
index 9d72264fa90a..e6485f7b046f 100644
--- a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
+++ b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
@@ -34,7 +34,6 @@ properties:
- enum: [ bridge, gca ]
- enum: [ bridge, gca ]
minItems: 2
- maxItems: 4
interrupts:
items:
diff --git a/Documentation/devicetree/bindings/gpu/vivante,gc.yaml b/Documentation/devicetree/bindings/gpu/vivante,gc.yaml
index 3ed172629974..93e7244cdc0e 100644
--- a/Documentation/devicetree/bindings/gpu/vivante,gc.yaml
+++ b/Documentation/devicetree/bindings/gpu/vivante,gc.yaml
@@ -36,7 +36,6 @@ properties:
- description: AHB/slave interface clock (only required if GPU can gate
slave interface independently)
minItems: 1
- maxItems: 4
clock-names:
items:
diff --git a/Documentation/devicetree/bindings/hwlock/allwinner,sun6i-a31-hwspinlock.yaml b/Documentation/devicetree/bindings/hwlock/allwinner,sun6i-a31-hwspinlock.yaml
new file mode 100644
index 000000000000..10e5a53e447b
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwlock/allwinner,sun6i-a31-hwspinlock.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwlock/allwinner,sun6i-a31-hwspinlock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SUN6I hardware spinlock driver for Allwinner sun6i compatible SoCs
+
+maintainers:
+ - Wilken Gottwalt <wilken.gottwalt@posteo.net>
+
+description:
+ The hardware unit provides semaphores between the ARM cores and the embedded
+ companion core on the SoC.
+
+properties:
+ compatible:
+ const: allwinner,sun6i-a31-hwspinlock
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sun8i-a23-a33-ccu.h>
+ #include <dt-bindings/reset/sun8i-a23-a33-ccu.h>
+
+ hwlock@1c18000 {
+ compatible = "allwinner,sun6i-a31-hwspinlock";
+ reg = <0x01c18000 0x1000>;
+ clocks = <&ccu CLK_BUS_SPINLOCK>;
+ resets = <&ccu RST_BUS_SPINLOCK>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/hwmon/adt7475.yaml b/Documentation/devicetree/bindings/hwmon/adt7475.yaml
index ad0ec9f35bd8..7d9c083632b9 100644
--- a/Documentation/devicetree/bindings/hwmon/adt7475.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adt7475.yaml
@@ -39,17 +39,7 @@ properties:
reg:
maxItems: 1
-patternProperties:
- "^adi,bypass-attenuator-in[0-4]$":
- description: |
- Configures bypassing the individual voltage input attenuator. If
- set to 1 the attenuator is bypassed if set to 0 the attenuator is
- not bypassed. If the property is absent then the attenuator
- retains it's configuration from the bios/bootloader.
- $ref: /schemas/types.yaml#/definitions/uint32
- enum: [0, 1]
-
- "^adi,pwm-active-state$":
+ adi,pwm-active-state:
description: |
Integer array, represents the active state of the pwm outputs If set to 0
the pwm uses a logic low output for 100% duty cycle. If set to 1 the pwm
@@ -61,6 +51,16 @@ patternProperties:
enum: [0, 1]
default: 1
+patternProperties:
+ "^adi,bypass-attenuator-in[0-4]$":
+ description: |
+ Configures bypassing the individual voltage input attenuator. If
+ set to 1 the attenuator is bypassed if set to 0 the attenuator is
+ not bypassed. If the property is absent then the attenuator
+ retains it's configuration from the bios/bootloader.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml b/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
new file mode 100644
index 000000000000..ea643e6c3ef5
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/aspeed,i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ASPEED I2C on the AST24XX, AST25XX, and AST26XX SoCs Device Tree Bindings
+
+maintainers:
+ - Rayn Chen <rayn_chen@aspeedtech.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - aspeed,ast2400-i2c-bus
+ - aspeed,ast2500-i2c-bus
+ - aspeed,ast2600-i2c-bus
+
+ reg:
+ minItems: 1
+ items:
+ - description: address offset and range of bus
+ - description: address offset and range of bus buffer
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+ description:
+ root clock of bus, should reference the APB
+ clock in the second cell
+
+ resets:
+ maxItems: 1
+
+ bus-frequency:
+ minimum: 500
+ maximum: 4000000
+ default: 100000
+ description: frequency of the bus clock in Hz defaults to 100 kHz when not
+ specified
+
+ multi-master:
+ type: boolean
+ description:
+ states that there is another master active on this bus
+
+required:
+ - reg
+ - compatible
+ - clocks
+ - resets
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/aspeed-clock.h>
+ i2c0: i2c-bus@40 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ compatible = "aspeed,ast2500-i2c-bus";
+ reg = <0x40 0x40>;
+ clocks = <&syscon ASPEED_CLK_APB>;
+ resets = <&syscon ASPEED_RESET_I2C>;
+ bus-frequency = <100000>;
+ interrupts = <0>;
+ interrupt-parent = <&i2c_ic>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml b/Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
index edbca2476128..7070c04469ed 100644
--- a/Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
@@ -21,7 +21,6 @@ properties:
reg:
minItems: 1
- maxItems: 2
items:
- description: BSC register range
- description: Auto-I2C register range
diff --git a/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt b/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
deleted file mode 100644
index b47f6ccb196a..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-Device tree configuration for the I2C busses on the AST24XX, AST25XX, and AST26XX SoCs.
-
-Required Properties:
-- #address-cells : should be 1
-- #size-cells : should be 0
-- reg : address offset and range of bus
-- compatible : should be "aspeed,ast2400-i2c-bus"
- or "aspeed,ast2500-i2c-bus"
- or "aspeed,ast2600-i2c-bus"
-- clocks : root clock of bus, should reference the APB
- clock in the second cell
-- resets : phandle to reset controller with the reset number in
- the second cell
-- interrupts : interrupt number
-
-Optional Properties:
-- bus-frequency : frequency of the bus clock in Hz defaults to 100 kHz when not
- specified
-- multi-master : states that there is another master active on this bus.
-
-Example:
-
-i2c {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x1e78a000 0x1000>;
-
- i2c_ic: interrupt-controller@0 {
- #interrupt-cells = <1>;
- compatible = "aspeed,ast2400-i2c-ic";
- reg = <0x0 0x40>;
- interrupts = <12>;
- interrupt-controller;
- };
-
- i2c0: i2c-bus@40 {
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
- reg = <0x40 0x40>;
- compatible = "aspeed,ast2400-i2c-bus";
- clocks = <&syscon ASPEED_CLK_APB>;
- resets = <&syscon ASPEED_RESET_I2C>;
- bus-frequency = <100000>;
- interrupts = <0>;
- interrupt-parent = <&i2c_ic>;
- };
-};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-at91.txt b/Documentation/devicetree/bindings/i2c/i2c-at91.txt
index 96c914e048f5..2015f50aed0f 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-at91.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-at91.txt
@@ -73,7 +73,7 @@ i2c0: i2c@f8034600 {
pinctrl-0 = <&pinctrl_i2c0>;
pinctrl-1 = <&pinctrl_i2c0_gpio>;
sda-gpios = <&pioA 30 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioA 31 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA 31 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
wm8731: wm8731@1a {
compatible = "wm8731";
diff --git a/Documentation/devicetree/bindings/i2c/i2c-davinci.txt b/Documentation/devicetree/bindings/i2c/i2c-davinci.txt
index b745f3706120..6590501c53d4 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-davinci.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-davinci.txt
@@ -8,7 +8,7 @@ Required properties:
- reg : Offset and length of the register set for the device
- clocks: I2C functional clock phandle.
For 66AK2G this property should be set per binding,
- Documentation/devicetree/bindings/clock/ti,sci-clk.txt
+ Documentation/devicetree/bindings/clock/ti,sci-clk.yaml
SoC-specific Required Properties:
@@ -17,7 +17,7 @@ The following are mandatory properties for Keystone 2 66AK2G SoCs only:
- power-domains: Should contain a phandle to a PM domain provider node
and an args specifier containing the I2C device id
value. This property is as per the binding,
- Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
Recommended properties :
- interrupts : standard interrupt property.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
index 81b5d55086fa..86b2e433a969 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
@@ -27,7 +27,7 @@ Required properties:
- i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C
parents.
-Furthermore, I2C mux properties and child nodes. See i2c-mux.txt in this
+Furthermore, I2C mux properties and child nodes. See i2c-mux.yaml in this
directory.
Example:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
index 7f0194fdd0cc..5ea216ae7084 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
@@ -15,6 +15,7 @@ Required properties:
"mediatek,mt8173-i2c": for MediaTek MT8173
"mediatek,mt8183-i2c": for MediaTek MT8183
"mediatek,mt8192-i2c": for MediaTek MT8192
+ "mediatek,mt8195-i2c", "mediatek,mt8192-i2c": for MediaTek MT8195
"mediatek,mt8516-i2c", "mediatek,mt2712-i2c": for MediaTek MT8516
- reg: physical base address of the controller and dma base, length of memory
mapped region.
@@ -32,6 +33,7 @@ Optional properties:
- mediatek,have-pmic: platform can control i2c form special pmic side.
Only mt6589 and mt8135 support this feature.
- mediatek,use-push-pull: IO config use push-pull mode.
+ - vbus-supply: phandle to the regulator that provides power to SCL/SDA.
Example:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
index 21da3ecbb370..d4cf10582a26 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
@@ -22,8 +22,8 @@ Required properties:
- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
port is connected to.
- mux-gpios: list of gpios used to control the muxer
-* Standard I2C mux properties. See i2c-mux.txt in this directory.
-* I2C child bus nodes. See i2c-mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.yaml in this directory.
+* I2C child bus nodes. See i2c-mux.yaml in this directory.
Optional properties:
- idle-state: value to set the muxer to when idle. When no value is
@@ -62,7 +62,6 @@ Example:
reg = <0x3c>;
pwms = <&pwm 4 3000>;
reset-gpios = <&gpio2 7 1>;
- reset-active-low;
};
};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.txt
deleted file mode 100644
index 8b444b94e92f..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.txt
+++ /dev/null
@@ -1,99 +0,0 @@
-General Purpose I2C Bus Mux
-
-This binding describes an I2C bus multiplexer that uses a mux controller
-from the mux subsystem to route the I2C signals.
-
- .-----. .-----.
- | dev | | dev |
- .------------. '-----' '-----'
- | SoC | | |
- | | .--------+--------'
- | .------. | .------+ child bus A, on MUX value set to 0
- | | I2C |-|--| Mux |
- | '------' | '--+---+ child bus B, on MUX value set to 1
- | .------. | | '----------+--------+--------.
- | | MUX- | | | | | |
- | | Ctrl |-|-----+ .-----. .-----. .-----.
- | '------' | | dev | | dev | | dev |
- '------------' '-----' '-----' '-----'
-
-Required properties:
-- compatible: i2c-mux
-- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
- port is connected to.
-- mux-controls: The phandle of the mux controller to use for operating the
- mux.
-* Standard I2C mux properties. See i2c-mux.txt in this directory.
-* I2C child bus nodes. See i2c-mux.txt in this directory. The sub-bus number
- is also the mux-controller state described in ../mux/mux-controller.txt
-
-Optional properties:
-- mux-locked: If present, explicitly allow unrelated I2C transactions on the
- parent I2C adapter at these times:
- + during setup of the multiplexer
- + between setup of the multiplexer and the child bus I2C transaction
- + between the child bus I2C transaction and releasing of the multiplexer
- + during releasing of the multiplexer
- However, I2C transactions to devices behind all I2C multiplexers connected
- to the same parent adapter that this multiplexer is connected to are blocked
- for the full duration of the complete multiplexed I2C transaction (i.e.
- including the times covered by the above list).
- If mux-locked is not present, the multiplexer is assumed to be parent-locked.
- This means that no unrelated I2C transactions are allowed on the parent I2C
- adapter for the complete multiplexed I2C transaction.
- The properties of mux-locked and parent-locked multiplexers are discussed
- in more detail in Documentation/i2c/i2c-topology.rst.
-
-For each i2c child node, an I2C child bus will be created. They will
-be numbered based on their order in the device tree.
-
-Whenever an access is made to a device on a child bus, the value set
-in the relevant node's reg property will be set as the state in the
-mux controller.
-
-Example:
- mux: mux-controller {
- compatible = "gpio-mux";
- #mux-control-cells = <0>;
-
- mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
- <&pioA 1 GPIO_ACTIVE_HIGH>;
- };
-
- i2c-mux {
- compatible = "i2c-mux";
- mux-locked;
- i2c-parent = <&i2c1>;
-
- mux-controls = <&mux>;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- i2c@1 {
- reg = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- ssd1307: oled@3c {
- compatible = "solomon,ssd1307fb-i2c";
- reg = <0x3c>;
- pwms = <&pwm 4 3000>;
- reset-gpios = <&gpio2 7 1>;
- reset-active-low;
- };
- };
-
- i2c@3 {
- reg = <3>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pca9555: pca9555@20 {
- compatible = "nxp,pca9555";
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x20>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.yaml b/Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.yaml
new file mode 100644
index 000000000000..9b0603a72f40
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.yaml
@@ -0,0 +1,124 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/i2c-mux-gpmux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: General Purpose I2C Bus Mux
+
+maintainers:
+ - Peter Rosin <peda@axentia.se>
+
+description: |+
+ This binding describes an I2C bus multiplexer that uses a mux controller
+ from the mux subsystem to route the I2C signals.
+
+ .-----. .-----.
+ | dev | | dev |
+ .------------. '-----' '-----'
+ | SoC | | |
+ | | .--------+--------'
+ | .------. | .------+ child bus A, on MUX value set to 0
+ | | I2C |-|--| Mux |
+ | '------' | '--+---+ child bus B, on MUX value set to 1
+ | .------. | | '----------+--------+--------.
+ | | MUX- | | | | | |
+ | | Ctrl |-|-----+ .-----. .-----. .-----.
+ | '------' | | dev | | dev | | dev |
+ '------------' '-----' '-----' '-----'
+
+
+
+allOf:
+ - $ref: /schemas/i2c/i2c-mux.yaml#
+
+properties:
+ compatible:
+ const: i2c-mux
+
+ i2c-parent:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ The phandle of the I2C bus that this multiplexer's master-side port is
+ connected to.
+
+ mux-controls:
+ maxItems: 1
+ description:
+ The mux-controller states are the I2C sub-bus numbers.
+
+ mux-locked:
+ type: boolean
+ description: |
+ Explicitly allow unrelated I2C transactions on the parent I2C adapter at
+ these times:
+ - during setup of the multiplexer
+ - between setup of the multiplexer and the child bus I2C transaction
+ - between the child bus I2C transaction and releasing of the multiplexer
+ - during releasing of the multiplexer
+
+ However, I2C transactions to devices behind all I2C multiplexers connected
+ to the same parent adapter that this multiplexer is connected to are blocked
+ for the full duration of the complete multiplexed I2C transaction (i.e.
+ including the times covered by the above list).
+ If mux-locked is not present, the multiplexer is assumed to be parent-locked.
+ This means that no unrelated I2C transactions are allowed on the parent I2C
+ adapter for the complete multiplexed I2C transaction.
+ The properties of mux-locked and parent-locked multiplexers are discussed
+ in more detail in Documentation/i2c/i2c-topology.rst.
+
+required:
+ - compatible
+ - i2c-parent
+ - mux-controls
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ mux: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+ <&pioA 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ i2c-mux {
+ compatible = "i2c-mux";
+ mux-locked;
+ i2c-parent = <&i2c1>;
+
+ mux-controls = <&mux>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio@20 {
+ compatible = "nxp,pca9555";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x20>;
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio@20 {
+ compatible = "nxp,pca9555";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x20>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt
index 8b1e49cdce3f..29c4550c9782 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt
@@ -8,8 +8,8 @@ Required Properties:
The following required properties are defined externally:
- - Standard I2C mux properties. See i2c-mux.txt in this directory.
- - I2C child bus nodes. See i2c-mux.txt in this directory.
+ - Standard I2C mux properties. See i2c-mux.yaml in this directory.
+ - I2C child bus nodes. See i2c-mux.yaml in this directory.
Optional Properties:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
deleted file mode 100644
index 7abda506b828..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
+++ /dev/null
@@ -1,74 +0,0 @@
-* NXP PCA954x I2C bus switch
-
-The driver supports NXP PCA954x and PCA984x I2C mux/switch devices.
-
-Required Properties:
-
- - compatible: Must contain one of the following.
- "nxp,pca9540",
- "nxp,pca9542",
- "nxp,pca9543",
- "nxp,pca9544",
- "nxp,pca9545",
- "nxp,pca9546", "nxp,pca9846",
- "nxp,pca9547", "nxp,pca9847",
- "nxp,pca9548", "nxp,pca9848",
- "nxp,pca9849"
-
- - reg: The I2C address of the device.
-
- The following required properties are defined externally:
-
- - Standard I2C mux properties. See i2c-mux.txt in this directory.
- - I2C child bus nodes. See i2c-mux.txt in this directory.
-
-Optional Properties:
-
- - reset-gpios: Reference to the GPIO connected to the reset input.
- - idle-state: if present, overrides i2c-mux-idle-disconnect,
- Please refer to Documentation/devicetree/bindings/mux/mux-controller.txt
- - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all
- children in idle state. This is necessary for example, if there are several
- multiplexers on the bus and the devices behind them use same I2C addresses.
- - interrupts: Interrupt mapping for IRQ.
- - interrupt-controller: Marks the device node as an interrupt controller.
- - #interrupt-cells : Should be two.
- - first cell is the pin number
- - second cell is used to specify flags.
- See also Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-
-Example:
-
- i2c-switch@74 {
- compatible = "nxp,pca9548";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x74>;
-
- interrupt-parent = <&ipic>;
- interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
- interrupt-controller;
- #interrupt-cells = <2>;
-
- i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
-
- eeprom@54 {
- compatible = "atmel,24c08";
- reg = <0x54>;
- };
- };
-
- i2c@4 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <4>;
-
- rtc@51 {
- compatible = "nxp,pcf8563";
- reg = <0x51>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.yaml b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.yaml
new file mode 100644
index 000000000000..9f1726d0356b
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/i2c-mux-pca954x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP PCA954x I2C bus switch
+
+maintainers:
+ - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+description:
+ The binding supports NXP PCA954x and PCA984x I2C mux/switch devices.
+
+allOf:
+ - $ref: /schemas/i2c/i2c-mux.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - nxp,pca9540
+ - nxp,pca9542
+ - nxp,pca9543
+ - nxp,pca9544
+ - nxp,pca9545
+ - nxp,pca9546
+ - nxp,pca9547
+ - nxp,pca9548
+ - nxp,pca9846
+ - nxp,pca9847
+ - nxp,pca9848
+ - nxp,pca9849
+ - items:
+ - const: nxp,pca9646
+ - const: nxp,pca9546
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#interrupt-cells":
+ const: 2
+
+ interrupt-controller: true
+
+ reset-gpios:
+ maxItems: 1
+
+ i2c-mux-idle-disconnect:
+ type: boolean
+ description: Forces mux to disconnect all children in idle state. This is
+ necessary for example, if there are several multiplexers on the bus and
+ the devices behind them use same I2C addresses.
+
+ idle-state:
+ description: if present, overrides i2c-mux-idle-disconnect
+ $ref: /schemas/mux/mux-controller.yaml#/properties/idle-state
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c-mux@74 {
+ compatible = "nxp,pca9548";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x74>;
+
+ interrupt-parent = <&ipic>;
+ interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ eeprom@54 {
+ compatible = "atmel,24c08";
+ reg = <0x54>;
+ };
+ };
+
+ i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
index 33119a98e144..997a287ed3f6 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
@@ -28,9 +28,9 @@ Also required are:
* Standard pinctrl properties that specify the pin mux state for each child
bus. See ../pinctrl/pinctrl-bindings.txt.
-* Standard I2C mux properties. See i2c-mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.yaml in this directory.
-* I2C child bus nodes. See i2c-mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.yaml in this directory.
For each named state defined in the pinctrl-names property, an I2C child bus
will be created. I2C child bus numbers are assigned based on the index into
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
index de00d7fc450b..b9d9755e4172 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
@@ -7,8 +7,8 @@ Required properties:
- compatible: i2c-mux-reg
- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
port is connected to.
-* Standard I2C mux properties. See i2c-mux.txt in this directory.
-* I2C child bus nodes. See i2c-mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.yaml in this directory.
+* I2C child bus nodes. See i2c-mux.yaml in this directory.
Optional properties:
- reg: this pair of <offset size> specifies the register to control the mux.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux.txt b/Documentation/devicetree/bindings/i2c/i2c-mux.txt
deleted file mode 100644
index b38f58a1c878..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-mux.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-Common i2c bus multiplexer/switch properties.
-
-An i2c bus multiplexer/switch will have several child busses that are
-numbered uniquely in a device dependent manner. The nodes for an i2c bus
-multiplexer/switch will have one child node for each child bus.
-
-Optional properties:
-- #address-cells = <1>;
- This property is required if the i2c-mux child node does not exist.
-
-- #size-cells = <0>;
- This property is required if the i2c-mux child node does not exist.
-
-- i2c-mux
- For i2c multiplexers/switches that have child nodes that are a mixture
- of both i2c child busses and other child nodes, the 'i2c-mux' subnode
- can be used for populating the i2c child busses. If an 'i2c-mux'
- subnode is present, only subnodes of this will be considered as i2c
- child busses.
-
-Required properties for the i2c-mux child node:
-- #address-cells = <1>;
-- #size-cells = <0>;
-
-Required properties for i2c child bus nodes:
-- #address-cells = <1>;
-- #size-cells = <0>;
-- reg : The sub-bus number.
-
-Optional properties for i2c child bus nodes:
-- Other properties specific to the multiplexer/switch hardware.
-- Child nodes conforming to i2c bus binding
-
-
-Example :
-
- /*
- An NXP pca9548 8 channel I2C multiplexer at address 0x70
- with two NXP pca8574 GPIO expanders attached, one each to
- ports 3 and 4.
- */
-
- mux@70 {
- compatible = "nxp,pca9548";
- reg = <0x70>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- i2c@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <3>;
-
- gpio1: gpio@38 {
- compatible = "nxp,pca8574";
- reg = <0x38>;
- #gpio-cells = <2>;
- gpio-controller;
- };
- };
- i2c@4 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <4>;
-
- gpio2: gpio@38 {
- compatible = "nxp,pca8574";
- reg = <0x38>;
- #gpio-cells = <2>;
- gpio-controller;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux.yaml b/Documentation/devicetree/bindings/i2c/i2c-mux.yaml
new file mode 100644
index 000000000000..24cac36037f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/i2c-mux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common i2c bus multiplexer/switch properties.
+
+maintainers:
+ - Peter Rosin <peda@axentia.se>
+
+description: |+
+ An i2c bus multiplexer/switch will have several child busses that are numbered
+ uniquely in a device dependent manner. The nodes for an i2c bus
+ multiplexer/switch will have one child node for each child bus.
+
+ For i2c multiplexers/switches that have child nodes that are a mixture of both
+ i2c child busses and other child nodes, the 'i2c-mux' subnode can be used for
+ populating the i2c child busses. If an 'i2c-mux' subnode is present, only
+ subnodes of this will be considered as i2c child busses.
+
+properties:
+ $nodename:
+ pattern: '^(i2c-?)?mux'
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ '^i2c@[0-9a-f]+$':
+ $ref: /schemas/i2c/i2c-controller.yaml
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ description: The mux selector sub-bus number for the child I2C bus.
+ maxItems: 1
+
+additionalProperties: true
+
+examples:
+ - |
+ /*
+ * An NXP pca9548 8 channel I2C multiplexer at address 0x70
+ * with two NXP pca8574 GPIO expanders attached, one each to
+ * ports 3 and 4.
+ */
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9548";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ gpio@20 {
+ compatible = "nxp,pca9555";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x20>;
+ };
+ };
+ i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+
+ gpio@20 {
+ compatible = "nxp,pca9555";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x20>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/i2c/i2c-omap.txt b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
deleted file mode 100644
index a425b91af48f..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-omap.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-I2C for OMAP platforms
-
-Required properties :
-- compatible : Must be
- "ti,omap2420-i2c" for OMAP2420 SoCs
- "ti,omap2430-i2c" for OMAP2430 SoCs
- "ti,omap3-i2c" for OMAP3 SoCs
- "ti,omap4-i2c" for OMAP4+ SoCs
- "ti,am654-i2c", "ti,omap4-i2c" for AM654 SoCs
- "ti,j721e-i2c", "ti,omap4-i2c" for J721E SoCs
- "ti,am64-i2c", "ti,omap4-i2c" for AM64 SoCs
-- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
-- #address-cells = <1>;
-- #size-cells = <0>;
-
-Recommended properties :
-- clock-frequency : Desired I2C bus clock frequency in Hz. Otherwise
- the default 100 kHz frequency will be used.
-
-Optional properties:
-- Child nodes conforming to i2c bus binding
-
-Note: Current implementation will fetch base address, irq and dma
-from omap hwmod data base during device registration.
-Future plan is to migrate hwmod data base contents into device tree
-blob so that, all the required data will be used from device tree dts
-file.
-
-Examples :
-
-i2c1: i2c@0 {
- compatible = "ti,omap3-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- ti,hwmods = "i2c1";
- clock-frequency = <400000>;
-};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-pxa-pci-ce4100.txt b/Documentation/devicetree/bindings/i2c/i2c-pxa-pci-ce4100.txt
index 569b16248514..1ff6f8487a2d 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-pxa-pci-ce4100.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-pxa-pci-ce4100.txt
@@ -71,7 +71,7 @@ This is an example which is used on FalconFalls:
/* This I2C controller has one gpio controller */
gpio@26 {
#gpio-cells = <2>;
- compatible = "ti,pcf8575";
+ compatible = "nxp,pcf8575";
reg = <0x26>;
gpio-controller;
};
@@ -85,7 +85,7 @@ This is an example which is used on FalconFalls:
gpio@26 {
#gpio-cells = <2>;
- compatible = "ti,pcf8575";
+ compatible = "nxp,pcf8575";
reg = <0x26>;
gpio-controller;
};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt b/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt
index c6668b7c66e6..7b9fc0c22eaf 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt
@@ -9,6 +9,7 @@ PROPERTIES:
"qcom,msm8916-cci"
"qcom,msm8996-cci"
"qcom,sdm845-cci"
+ "qcom,sm8250-cci"
- reg
Usage: required
@@ -41,8 +42,8 @@ PROPERTIES:
SUBNODES:
-The CCI provides I2C masters for one (msm8916) or two i2c busses (msm8996 and
-sdm845), described as subdevices named "i2c-bus@0" and "i2c-bus@1".
+The CCI provides I2C masters for one (msm8916) or two i2c busses (msm8996,
+sdm845 and sm8250), described as subdevices named "i2c-bus@0" and "i2c-bus@1".
PROPERTIES:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
index 7f254d79558c..5339dd4fc370 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
@@ -36,6 +36,7 @@ properties:
- rockchip,px30-i2c
- rockchip,rk3308-i2c
- rockchip,rk3328-i2c
+ - rockchip,rk3568-i2c
- const: rockchip,rk3399-i2c
reg:
diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt
index df41f72afc87..b864916e087f 100644
--- a/Documentation/devicetree/bindings/i2c/i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c.txt
@@ -89,8 +89,11 @@ wants to support one of the below features, it should adapt these bindings.
- smbus
states that additional SMBus restrictions and features apply to this bus.
- Examples of features are SMBusHostNotify and SMBusAlert. Examples of
- restrictions are more reserved addresses and timeout definitions.
+ An example of feature is SMBusHostNotify. Examples of restrictions are
+ more reserved addresses and timeout definitions.
+
+- smbus-alert
+ states that the optional SMBus-Alert feature apply to this bus.
Required properties (per child device)
--------------------------------------
diff --git a/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml b/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
index eb72dd571def..f771c09aabfc 100644
--- a/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
@@ -43,14 +43,12 @@ properties:
clocks:
minItems: 1
- maxItems: 2
items:
- description: Reference clock for the I2C bus
- description: Bus clock (Only for Armada 7K/8K)
clock-names:
minItems: 1
- maxItems: 2
items:
- const: core
- const: reg
diff --git a/Documentation/devicetree/bindings/i2c/mellanox,i2c-mlxbf.yaml b/Documentation/devicetree/bindings/i2c/mellanox,i2c-mlxbf.yaml
index d2b401d062b9..93198d5d43a6 100644
--- a/Documentation/devicetree/bindings/i2c/mellanox,i2c-mlxbf.yaml
+++ b/Documentation/devicetree/bindings/i2c/mellanox,i2c-mlxbf.yaml
@@ -20,7 +20,6 @@ properties:
reg:
minItems: 3
- maxItems: 4
items:
- description: Smbus block registers
- description: Cause master registers
diff --git a/Documentation/devicetree/bindings/i2c/renesas,i2c.txt b/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
deleted file mode 100644
index 5762d2d1ab9c..000000000000
--- a/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-I2C for R-Car platforms
-
-Required properties:
-- compatible:
- "renesas,i2c-r8a7742" if the device is a part of a R8A7742 SoC.
- "renesas,i2c-r8a7743" if the device is a part of a R8A7743 SoC.
- "renesas,i2c-r8a7744" if the device is a part of a R8A7744 SoC.
- "renesas,i2c-r8a7745" if the device is a part of a R8A7745 SoC.
- "renesas,i2c-r8a77470" if the device is a part of a R8A77470 SoC.
- "renesas,i2c-r8a774a1" if the device is a part of a R8A774A1 SoC.
- "renesas,i2c-r8a774b1" if the device is a part of a R8A774B1 SoC.
- "renesas,i2c-r8a774c0" if the device is a part of a R8A774C0 SoC.
- "renesas,i2c-r8a774e1" if the device is a part of a R8A774E1 SoC.
- "renesas,i2c-r8a7778" if the device is a part of a R8A7778 SoC.
- "renesas,i2c-r8a7779" if the device is a part of a R8A7779 SoC.
- "renesas,i2c-r8a7790" if the device is a part of a R8A7790 SoC.
- "renesas,i2c-r8a7791" if the device is a part of a R8A7791 SoC.
- "renesas,i2c-r8a7792" if the device is a part of a R8A7792 SoC.
- "renesas,i2c-r8a7793" if the device is a part of a R8A7793 SoC.
- "renesas,i2c-r8a7794" if the device is a part of a R8A7794 SoC.
- "renesas,i2c-r8a7795" if the device is a part of a R8A7795 SoC.
- "renesas,i2c-r8a7796" if the device is a part of a R8A77960 SoC.
- "renesas,i2c-r8a77961" if the device is a part of a R8A77961 SoC.
- "renesas,i2c-r8a77965" if the device is a part of a R8A77965 SoC.
- "renesas,i2c-r8a77970" if the device is a part of a R8A77970 SoC.
- "renesas,i2c-r8a77980" if the device is a part of a R8A77980 SoC.
- "renesas,i2c-r8a77990" if the device is a part of a R8A77990 SoC.
- "renesas,i2c-r8a77995" if the device is a part of a R8A77995 SoC.
- "renesas,i2c-r8a779a0" if the device is a part of a R8A779A0 SoC.
- "renesas,rcar-gen1-i2c" for a generic R-Car Gen1 compatible device.
- "renesas,rcar-gen2-i2c" for a generic R-Car Gen2 or RZ/G1 compatible
- device.
- "renesas,rcar-gen3-i2c" for a generic R-Car Gen3 or RZ/G2 compatible
- device.
- "renesas,i2c-rcar" (deprecated)
-
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first followed
- by the generic version.
-
-- reg: physical base address of the controller and length of memory mapped
- region.
-- interrupts: interrupt specifier.
-
-Optional properties:
-- clock-frequency: desired I2C bus clock frequency in Hz. The absence of this
- property indicates the default frequency 100 kHz.
-- clocks: clock specifier.
-- dmas: Must contain a list of two references to DMA specifiers, one for
- transmission, and one for reception.
-- dma-names: Must contain a list of two DMA names, "tx" and "rx".
-
-- i2c-scl-falling-time-ns: see i2c.txt
-- i2c-scl-internal-delay-ns: see i2c.txt
-- i2c-scl-rising-time-ns: see i2c.txt
-
-Examples :
-
-i2c0: i2c@e6508000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "renesas,i2c-r8a7791", "renesas,rcar-gen2-i2c";
- reg = <0 0xe6508000 0 0x40>;
- interrupts = <0 287 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp9_clks R8A7791_CLK_I2C0>;
- clock-frequency = <400000>;
-};
diff --git a/Documentation/devicetree/bindings/i2c/renesas,iic-emev2.txt b/Documentation/devicetree/bindings/i2c/renesas,iic-emev2.txt
deleted file mode 100644
index 5ed1ea1c7e14..000000000000
--- a/Documentation/devicetree/bindings/i2c/renesas,iic-emev2.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Device tree configuration for Renesas EMEV2 IIC controller
-
-Required properties:
-- compatible : "renesas,iic-emev2"
-- reg : address start and address range size of device
-- interrupts : specifier for the IIC controller interrupt
-- clocks : phandle to the IP core SCLK
-- clock-names : must be "sclk"
-- #address-cells : should be <1>
-- #size-cells : should be <0>
-
-Example:
-
- iic0: i2c@e0070000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "renesas,iic-emev2";
- reg = <0xe0070000 0x28>;
- interrupts = <0 32 IRQ_TYPE_EDGE_RISING>;
- clocks = <&iic0_sclk>;
- clock-names = "sclk";
- };
diff --git a/Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml b/Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
new file mode 100644
index 000000000000..17c1102562be
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/renesas,iic-emev2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas EMMA Mobile EV2 IIC Interface
+
+maintainers:
+ - Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ const: renesas,iic-emev2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: sclk
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - '#address-cells'
+ - '#size-cells'
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ iic0: i2c@e0070000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,iic-emev2";
+ reg = <0xe0070000 0x28>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&iic0_sclk>;
+ clock-names = "sclk";
+ };
diff --git a/Documentation/devicetree/bindings/i2c/renesas,iic.txt b/Documentation/devicetree/bindings/i2c/renesas,iic.txt
deleted file mode 100644
index 93d412832e66..000000000000
--- a/Documentation/devicetree/bindings/i2c/renesas,iic.txt
+++ /dev/null
@@ -1,72 +0,0 @@
-Device tree configuration for Renesas IIC (sh_mobile) driver
-
-Required properties:
-- compatible :
- - "renesas,iic-r8a73a4" (R-Mobile APE6)
- - "renesas,iic-r8a7740" (R-Mobile A1)
- - "renesas,iic-r8a7742" (RZ/G1H)
- - "renesas,iic-r8a7743" (RZ/G1M)
- - "renesas,iic-r8a7744" (RZ/G1N)
- - "renesas,iic-r8a7745" (RZ/G1E)
- - "renesas,iic-r8a774a1" (RZ/G2M)
- - "renesas,iic-r8a774b1" (RZ/G2N)
- - "renesas,iic-r8a774c0" (RZ/G2E)
- - "renesas,iic-r8a774e1" (RZ/G2H)
- - "renesas,iic-r8a7790" (R-Car H2)
- - "renesas,iic-r8a7791" (R-Car M2-W)
- - "renesas,iic-r8a7792" (R-Car V2H)
- - "renesas,iic-r8a7793" (R-Car M2-N)
- - "renesas,iic-r8a7794" (R-Car E2)
- - "renesas,iic-r8a7795" (R-Car H3)
- - "renesas,iic-r8a7796" (R-Car M3-W)
- - "renesas,iic-r8a77961" (R-Car M3-W+)
- - "renesas,iic-r8a77965" (R-Car M3-N)
- - "renesas,iic-r8a77990" (R-Car E3)
- - "renesas,iic-sh73a0" (SH-Mobile AG5)
- - "renesas,rcar-gen2-iic" (generic R-Car Gen2 or RZ/G1
- compatible device)
- - "renesas,rcar-gen3-iic" (generic R-Car Gen3 or RZ/G2
- compatible device)
- - "renesas,rmobile-iic" (generic device)
-
- When compatible with a generic R-Car version, nodes
- must list the SoC-specific version corresponding to
- the platform first followed by the generic R-Car
- version.
-
- When compatible with "renesas,rmobile-iic" it should
- be the last compatibility string listed.
-
- The r8a77990 (R-Car E3) and r8a774c0 (RZ/G2E)
- controllers are not considered compatible with
- "renesas,rcar-gen3-iic" or "renesas,rmobile-iic"
- due to the absence of automatic transmission registers.
-
-- reg : address start and address range size of device
-- interrupts : interrupt of device
-- clocks : clock for device
-- #address-cells : should be <1>
-- #size-cells : should be <0>
-
-Optional properties:
-- clock-frequency : frequency of bus clock in Hz. Default 100kHz if unset.
-- dmas : Must contain a list of two references to DMA
- specifiers, one for transmission, and one for
- reception.
-- dma-names : Must contain a list of two DMA names, "tx" and "rx".
-
-
-Pinctrl properties might be needed, too. See there.
-
-Example:
-
- iic0: i2c@e6500000 {
- compatible = "renesas,iic-r8a7790", "renesas,rcar-gen2-iic",
- "renesas,rmobile-iic";
- reg = <0 0xe6500000 0 0x425>;
- interrupts = <0 174 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp3_clks R8A7790_CLK_IIC0>;
- clock-frequency = <400000>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/i2c/renesas,rcar-i2c.yaml b/Documentation/devicetree/bindings/i2c/renesas,rcar-i2c.yaml
new file mode 100644
index 000000000000..052aad44e781
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/renesas,rcar-i2c.yaml
@@ -0,0 +1,158 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/renesas,rcar-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car I2C Controller
+
+maintainers:
+ - Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,i2c-r8a7778 # R-Car M1A
+ - renesas,i2c-r8a7779 # R-Car H1
+ - const: renesas,rcar-gen1-i2c # R-Car Gen1
+
+ - items:
+ - enum:
+ - renesas,i2c-r8a7742 # RZ/G1H
+ - renesas,i2c-r8a7743 # RZ/G1M
+ - renesas,i2c-r8a7744 # RZ/G1N
+ - renesas,i2c-r8a7745 # RZ/G1E
+ - renesas,i2c-r8a77470 # RZ/G1C
+ - renesas,i2c-r8a7790 # R-Car H2
+ - renesas,i2c-r8a7791 # R-Car M2-W
+ - renesas,i2c-r8a7792 # R-Car V2H
+ - renesas,i2c-r8a7793 # R-Car M2-N
+ - renesas,i2c-r8a7794 # R-Car E2
+ - const: renesas,rcar-gen2-i2c # R-Car Gen2 and RZ/G1
+
+ - items:
+ - enum:
+ - renesas,i2c-r8a774a1 # RZ/G2M
+ - renesas,i2c-r8a774b1 # RZ/G2N
+ - renesas,i2c-r8a774c0 # RZ/G2E
+ - renesas,i2c-r8a774e1 # RZ/G2H
+ - renesas,i2c-r8a7795 # R-Car H3
+ - renesas,i2c-r8a7796 # R-Car M3-W
+ - renesas,i2c-r8a77961 # R-Car M3-W+
+ - renesas,i2c-r8a77965 # R-Car M3-N
+ - renesas,i2c-r8a77970 # R-Car V3M
+ - renesas,i2c-r8a77980 # R-Car V3H
+ - renesas,i2c-r8a77990 # R-Car E3
+ - renesas,i2c-r8a77995 # R-Car D3
+ - renesas,i2c-r8a779a0 # R-Car V3U
+ - const: renesas,rcar-gen3-i2c # R-Car Gen3 and RZ/G2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clock-frequency:
+ description:
+ Desired I2C bus clock frequency in Hz. The absence of this property
+ indicates the default frequency 100 kHz.
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ minItems: 2
+ maxItems: 4
+ description:
+ Must contain a list of pairs of references to DMA specifiers, one for
+ transmission, and one for reception.
+
+ dma-names:
+ minItems: 2
+ maxItems: 4
+ items:
+ enum:
+ - tx
+ - rx
+
+ i2c-scl-falling-time-ns:
+ default: 35
+ description:
+ Number of nanoseconds the SCL signal takes to fall; t(f) in the I2C
+ specification.
+
+ i2c-scl-internal-delay-ns:
+ default: 50
+ description:
+ Number of nanoseconds the IP core additionally needs to setup SCL.
+
+ i2c-scl-rising-time-ns:
+ default: 200
+ description:
+ Number of nanoseconds the SCL signal takes to rise; t(r) in the I2C
+ specification.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - power-domains
+ - '#address-cells'
+ - '#size-cells'
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rcar-gen1-i2c
+ - renesas,rcar-gen2-i2c
+ then:
+ properties:
+ dmas: false
+ dma-names: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rcar-gen2-i2c
+ - renesas,rcar-gen3-i2c
+ then:
+ required:
+ - resets
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7791-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7791-sysc.h>
+
+ i2c0: i2c@e6508000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a7791", "renesas,rcar-gen2-i2c";
+ reg = <0xe6508000 0x40>;
+ interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <400000>;
+ clocks = <&cpg CPG_MOD 931>;
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 931>;
+ i2c-scl-internal-delay-ns = <6>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/renesas,riic.txt b/Documentation/devicetree/bindings/i2c/renesas,riic.txt
deleted file mode 100644
index e26fe3ad86a9..000000000000
--- a/Documentation/devicetree/bindings/i2c/renesas,riic.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-Device tree configuration for Renesas RIIC driver
-
-Required properties:
-- compatible :
- "renesas,riic-r7s72100" if the device is a part of a R7S72100 SoC.
- "renesas,riic-r7s9210" if the device is a part of a R7S9210 SoC.
- "renesas,riic-rz" for a generic RZ/A compatible device.
-- reg : address start and address range size of device
-- interrupts : 8 interrupts (TEI, RI, TI, SPI, STI, NAKI, ALI, TMOI)
-- clock-frequency : frequency of bus clock in Hz
-- #address-cells : should be <1>
-- #size-cells : should be <0>
-
-Pinctrl properties might be needed, too. See there.
-
-Example:
-
- i2c0: i2c@fcfee000 {
- compatible = "renesas,riic-r7s72100", "renesas,riic-rz";
- reg = <0xfcfee000 0x44>;
- interrupts = <0 157 IRQ_TYPE_LEVEL_HIGH>,
- <0 158 IRQ_TYPE_EDGE_RISING>,
- <0 159 IRQ_TYPE_EDGE_RISING>,
- <0 160 IRQ_TYPE_LEVEL_HIGH>,
- <0 161 IRQ_TYPE_LEVEL_HIGH>,
- <0 162 IRQ_TYPE_LEVEL_HIGH>,
- <0 163 IRQ_TYPE_LEVEL_HIGH>,
- <0 164 IRQ_TYPE_LEVEL_HIGH>;
- clock-frequency = <100000>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
new file mode 100644
index 000000000000..52d92ec7ec0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/renesas,riic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/A and RZ/G2L I2C Bus Interface (RIIC)
+
+maintainers:
+ - Chris Brandt <chris.brandt@renesas.com>
+ - Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,riic-r7s72100 # RZ/A1H
+ - renesas,riic-r7s9210 # RZ/A2M
+ - renesas,riic-r9a07g044 # RZ/G2{L,LC}
+ - const: renesas,riic-rz # RZ/A or RZ/G2L
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Transmit End Interrupt (TEI)
+ - description: Receive Data Full Interrupt (RI)
+ - description: Transmit Data Empty Interrupt (TI)
+ - description: Stop Condition Detection Interrupt (SPI)
+ - description: Start Condition Detection Interrupt (STI)
+ - description: NACK Reception Interrupt (NAKI)
+ - description: Arbitration-Lost Interrupt (ALI)
+ - description: Timeout Interrupt (TMOI)
+
+ clock-frequency:
+ description:
+ Desired I2C bus clock frequency in Hz. The absence of this property
+ indicates the default frequency 100 kHz.
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-frequency
+ - power-domains
+ - '#address-cells'
+ - '#size-cells'
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,riic-r9a07g044
+then:
+ required:
+ - resets
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r7s72100-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ i2c0: i2c@fcfee000 {
+ compatible = "renesas,riic-r7s72100", "renesas,riic-rz";
+ reg = <0xfcfee000 0x44>;
+ interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 158 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 159 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R7S72100_CLK_I2C0>;
+ clock-frequency = <100000>;
+ power-domains = <&cpg_clocks>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/renesas,rmobile-iic.yaml b/Documentation/devicetree/bindings/i2c/renesas,rmobile-iic.yaml
new file mode 100644
index 000000000000..04e4ffd80bc0
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/renesas,rmobile-iic.yaml
@@ -0,0 +1,149 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/renesas,rmobile-iic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Mobile I2C Bus Interface (IIC)
+
+maintainers:
+ - Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,iic-r8a73a4 # R-Mobile APE6
+ - renesas,iic-r8a7740 # R-Mobile A1
+ - renesas,iic-sh73a0 # SH-Mobile AG5
+ - const: renesas,rmobile-iic # Generic
+
+ - items:
+ - enum:
+ - renesas,iic-r8a7742 # RZ/G1H
+ - renesas,iic-r8a7743 # RZ/G1M
+ - renesas,iic-r8a7744 # RZ/G1N
+ - renesas,iic-r8a7745 # RZ/G1E
+ - renesas,iic-r8a7790 # R-Car H2
+ - renesas,iic-r8a7791 # R-Car M2-W
+ - renesas,iic-r8a7792 # R-Car V2H
+ - renesas,iic-r8a7793 # R-Car M2-N
+ - renesas,iic-r8a7794 # R-Car E2
+ - const: renesas,rcar-gen2-iic # R-Car Gen2 and RZ/G1
+ - const: renesas,rmobile-iic # Generic
+
+ - items:
+ - enum:
+ - renesas,iic-r8a774a1 # RZ/G2M
+ - renesas,iic-r8a774b1 # RZ/G2N
+ - renesas,iic-r8a774c0 # RZ/G2E
+ - renesas,iic-r8a774e1 # RZ/G2H
+ - renesas,iic-r8a7795 # R-Car H3
+ - renesas,iic-r8a7796 # R-Car M3-W
+ - renesas,iic-r8a77961 # R-Car M3-W+
+ - renesas,iic-r8a77965 # R-Car M3-N
+ - renesas,iic-r8a77990 # R-Car E3
+ - const: renesas,rcar-gen3-iic # R-Car Gen3 and RZ/G2
+ - const: renesas,rmobile-iic # Generic
+
+ reg:
+ maxItems: 1
+
+ interrupts: true
+
+ clock-frequency:
+ description:
+ Desired I2C bus clock frequency in Hz. The absence of this property
+ indicates the default frequency 100 kHz.
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ minItems: 2
+ maxItems: 4
+ description:
+ Must contain a list of pairs of references to DMA specifiers, one for
+ transmission, and one for reception.
+
+ dma-names:
+ minItems: 2
+ maxItems: 4
+ items:
+ enum:
+ - tx
+ - rx
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - power-domains
+ - '#address-cells'
+ - '#size-cells'
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,iic-r8a7740
+ - renesas,iic-sh73a0
+ then:
+ properties:
+ interrupts:
+ items:
+ - description: Arbitration Lost Interrupt (ALI)
+ - description: Non-acknowledge Detection Interrupt (TACKI)
+ - description: Wait Interrupt (WAITI)
+ - description: Data Transmit Enable interrupt (DTEI)
+ else:
+ properties:
+ interrupts:
+ items:
+ - description: Single combined interrupt
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rcar-gen2-iic
+ - renesas,rcar-gen3-iic
+ then:
+ required:
+ - resets
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+
+ iic0: i2c@e6500000 {
+ compatible = "renesas,iic-r8a7790", "renesas,rcar-gen2-iic",
+ "renesas,rmobile-iic";
+ reg = <0xe6500000 0x425>;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 318>;
+ clock-frequency = <400000>;
+ dmas = <&dmac0 0x61>, <&dmac0 0x62>, <&dmac1 0x61>, <&dmac1 0x62>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 318>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml b/Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml
new file mode 100644
index 000000000000..ff165ad1bee8
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/ti,omap4-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bindings for I2C controllers on TI's OMAP and K3 SoCs
+
+maintainers:
+ - Vignesh Raghavendra <vigneshr@ti.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - ti,omap2420-i2c
+ - ti,omap2430-i2c
+ - ti,omap3-i2c
+ - ti,omap4-i2c
+ - items:
+ - enum:
+ - ti,am4372-i2c
+ - ti,am64-i2c
+ - ti,am654-i2c
+ - ti,j721e-i2c
+ - const: ti,omap4-i2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: fck
+
+ clock-frequency: true
+
+ power-domains: true
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ ti,hwmods:
+ description:
+ Must be "i2c<n>", n being the instance number (1-based).
+ This property is applicable only on legacy platforms mainly omap2/3
+ and ti81xx and should not be used on other platforms.
+ $ref: /schemas/types.yaml#/definitions/string
+ deprecated: true
+
+# subnode's properties
+patternProperties:
+ "@[0-9a-f]+$":
+ type: object
+ description:
+ Flash device uses the below defined properties in the subnode.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+if:
+ properties:
+ compatible:
+ oneOf:
+ - const: ti,omap2420-i2c
+ - const: ti,omap2430-i2c
+ - const: ti,omap3-i2c
+ - const: ti,omap4-i2c
+
+then:
+ properties:
+ ti,hwmods:
+ items:
+ - pattern: "^i2c([1-9])$"
+
+else:
+ properties:
+ ti,hwmods: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ main_i2c0: i2c@2000000 {
+ compatible = "ti,j721e-i2c", "ti,omap4-i2c";
+ reg = <0x2000000 0x100>;
+ interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adis16201.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adis16201.yaml
new file mode 100644
index 000000000000..6f8f8a6258fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/adi,adis16201.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/accel/adi,adis16201.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ADIS16201 Dual Axis Inclinometer and similar
+
+maintainers:
+ - Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+description: |
+ Two similar parts from external interface point of view.
+ SPI interface.
+ https://www.analog.com/en/products/adis16201.html
+ https://www.analog.com/en/products/adis16209.html
+
+properties:
+ compatible:
+ enum:
+ - adi,adis16201
+ - adi,adis16209
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ spi-max-frequency: true
+
+ vdd-supply: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ accelerometer@0 {
+ compatible = "adi,adis16201";
+ reg = <0>;
+ spi-max-frequency = <2500000>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/accel/bosch,bma180.yaml b/Documentation/devicetree/bindings/iio/accel/bosch,bma180.yaml
index 45b3abde298f..a7e84089cc3d 100644
--- a/Documentation/devicetree/bindings/iio/accel/bosch,bma180.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/bosch,bma180.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/iio/accel/bosch,bma180.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Bosch BMA023 / BMA150/ BMA180 / BMA25x / SMB380 triaxial accelerometers
+title: Bosch BMA023 / BMA150/ BMA180 / BMA250 / SMB380 triaxial accelerometers
maintainers:
- Jonathan Cameron <jic23@kernel.org>
@@ -21,7 +21,6 @@ properties:
- bosch,bma150
- bosch,bma180
- bosch,bma250
- - bosch,bma254
- bosch,smb380
reg:
diff --git a/Documentation/devicetree/bindings/iio/accel/bosch,bma220.yaml b/Documentation/devicetree/bindings/iio/accel/bosch,bma220.yaml
new file mode 100644
index 000000000000..942b23ad0712
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/bosch,bma220.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/accel/bosch,bma220.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bosch BMA220 Trixial Acceleration Sensor
+
+maintainers:
+ - Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+properties:
+ compatible:
+ enum:
+ - bosch,bma220
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ spi-max-frequency: true
+
+ vdda-supply: true
+ vddd-supply: true
+ vddio-supply: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ accelerometer@0 {
+ compatible = "bosch,bma220";
+ reg = <0>;
+ spi-max-frequency = <2500000>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml b/Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml
index c2efbb813ca2..e830d5295b92 100644
--- a/Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml
@@ -18,6 +18,8 @@ properties:
enum:
- bosch,bmc150_accel
- bosch,bmi055_accel
+ - bosch,bma253
+ - bosch,bma254
- bosch,bma255
- bosch,bma250e
- bosch,bma222
@@ -31,7 +33,12 @@ properties:
vddio-supply: true
interrupts:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
+ description: |
+ The first interrupt listed must be the one connected to the INT1 pin,
+ the second (optional) interrupt listed must be the one connected to the
+ INT2 pin (if available).
mount-matrix:
description: an optional 3x3 mounting rotation matrix.
diff --git a/Documentation/devicetree/bindings/iio/accel/fsl,mma7455.yaml b/Documentation/devicetree/bindings/iio/accel/fsl,mma7455.yaml
new file mode 100644
index 000000000000..7c8f8bdc2333
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/fsl,mma7455.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/accel/fsl,mma7455.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale MMA7455 and MMA7456 three axis accelerometers
+
+maintainers:
+ - Joachim Eastwood <manabian@gmail.com>
+ - Jonathan Cameron <jic23@kernel.org>
+
+description:
+ Devices support both SPI and I2C interfaces.
+
+properties:
+ compatible:
+ enum:
+ - fsl,mma7455
+ - fsl,mma7456
+ reg:
+ maxItems: 1
+
+ avdd-supply: true
+ vddio-supply: true
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ description:
+ Data ready is only available on INT1, but events can use either or
+ both pins. If not specified, first element assumed to correspond
+ to INT1 and second (where present) to INT2.
+ minItems: 1
+ maxItems: 2
+ items:
+ enum:
+ - "INT1"
+ - "INT2"
+
+ spi-max-frequency: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ # include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ accelerometer@18 {
+ compatible = "fsl,mma7455";
+ reg = <0x18>;
+ vddio-supply = <&iovdd>;
+ avdd-supply = <&avdd>;
+ interrupts = <57 IRQ_TYPE_EDGE_FALLING>, <58 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "INT2", "INT1";
+ };
+ };
+ - |
+ # include <dt-bindings/interrupt-controller/irq.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ accelerometer@0 {
+ compatible = "fsl,mma7456";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ vddio-supply = <&iovdd>;
+ avdd-supply = <&avdd>;
+ interrupts = <57 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "INT1";
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml
index fbb714431e3d..52fa0f7c2d0e 100644
--- a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml
@@ -16,6 +16,7 @@ properties:
- kionix,kxcj91008
- kionix,kxtj21009
- kionix,kxtf9
+ - kionix,kx023-1025
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/accel/murata,sca3300.yaml b/Documentation/devicetree/bindings/iio/accel/murata,sca3300.yaml
new file mode 100644
index 000000000000..55fd3548e3b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/murata,sca3300.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/accel/murata,sca3300.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Murata SCA3300 Accelerometer
+
+description: |
+ 3-axis industrial accelerometer with digital SPI interface
+ https://www.murata.com/en-global/products/sensor/accel/sca3300
+
+maintainers:
+ - Tomas Melin <tomas.melin@vaisala.com>
+
+properties:
+ compatible:
+ enum:
+ - murata,sca3300
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 8000000
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ accelerometer@0 {
+ compatible = "murata,sca3300";
+ reg = <0x0>;
+ spi-max-frequency = <4000000>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/accel/nxp,fxls8962af.yaml b/Documentation/devicetree/bindings/iio/accel/nxp,fxls8962af.yaml
new file mode 100644
index 000000000000..ad529ab2c6e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/nxp,fxls8962af.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/accel/nxp,fxls8962af.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP FXLS8962AF/FXLS8964AF Accelerometer driver
+
+maintainers:
+ - Sean Nyekjaer <sean@geanix.com>
+
+description: |
+ NXP FXLS8962AF/FXLS8964AF Accelerometer driver that supports
+ SPI and I2C interface.
+ https://www.nxp.com/docs/en/data-sheet/FXLS8962AF.pdf
+ https://www.nxp.com/docs/en/data-sheet/FXLS8964AF.pdf
+
+properties:
+ compatible:
+ enum:
+ - nxp,fxls8962af
+ - nxp,fxls8964af
+
+ reg:
+ maxItems: 1
+
+ vdd-supply:
+ description: phandle to the regulator that provides power to the accelerometer
+
+ spi-max-frequency: true
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ enum:
+ - INT1
+ - INT2
+
+ drive-open-drain:
+ type: boolean
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Example for a I2C device node */
+ accelerometer@62 {
+ compatible = "nxp,fxls8962af";
+ reg = <0x62>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "INT1";
+ };
+ };
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Example for a SPI device node */
+ accelerometer@0 {
+ compatible = "nxp,fxls8962af";
+ reg = <0>;
+ spi-max-frequency = <4000000>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "INT1";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/adc.yaml b/Documentation/devicetree/bindings/iio/adc/adc.yaml
index 912a7635edc4..db348fcbb52c 100644
--- a/Documentation/devicetree/bindings/iio/adc/adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adc.yaml
@@ -39,4 +39,16 @@ properties:
The first value specifies the positive input pin, the second
specifies the negative input pin.
+ settling-time-us:
+ description:
+ Time between enabling the channel and first stable readings.
+
+ oversampling-ratio:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Oversampling is used as replacement of or addition to the low-pass filter.
+ In some cases, the desired filtering characteristics are a function the
+ device design and can interact with other characteristics such as
+ settling time.
+
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7298.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7298.yaml
new file mode 100644
index 000000000000..ca414bb396c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7298.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Analog Devices Inc.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,ad7298.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD7298 ADC
+
+maintainers:
+ - Michael Hennerich <michael.hennerich@analog.com>
+
+description: |
+ Bindings for the Analog Devices AD7298 ADC device. Datasheet can be
+ found here:
+ https://www.analog.com/en/products/ad7298.html
+
+properties:
+ compatible:
+ const: adi,ad7298
+
+ reg:
+ maxItems: 1
+
+ vref-supply: true
+ vdd-supply: true
+ spi-max-frequency: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "adi,ad7298";
+ reg = <0>;
+ spi-max-frequency = <5000000>;
+ vref-supply = <&adc_vref>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7476.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7476.yaml
new file mode 100644
index 000000000000..cf711082ad7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7476.yaml
@@ -0,0 +1,174 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Analog Devices Inc.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,ad7476.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AD7476 and similar simple SPI ADCs from multiple manufacturers.
+
+maintainers:
+ - Michael Hennerich <michael.hennerich@analog.com>
+
+description: |
+ A lot of simple SPI ADCs have very straight forward interfaces.
+ They typically don't provide a MOSI pin, simply reading out data
+ on MISO when the clock toggles.
+
+properties:
+ compatible:
+ enum:
+ - adi,ad7091
+ - adi,ad7091r
+ - adi,ad7273
+ - adi,ad7274
+ - adi,ad7276
+ - adi,ad7277
+ - adi,ad7278
+ - adi,ad7466
+ - adi,ad7467
+ - adi,ad7468
+ - adi,ad7475
+ - adi,ad7476
+ - adi,ad7476a
+ - adi,ad7477
+ - adi,ad7477a
+ - adi,ad7478
+ - adi,ad7478a
+ - adi,ad7495
+ - adi,ad7910
+ - adi,ad7920
+ - adi,ad7940
+ - ti,adc081s
+ - ti,adc101s
+ - ti,adc121s
+ - ti,ads7866
+ - ti,ads7867
+ - ti,ads7868
+ - lltc,ltc2314-14
+
+ reg:
+ maxItems: 1
+
+ vcc-supply:
+ description:
+ Main powersupply voltage for the chips, sometimes referred to as VDD on
+ datasheets. If there is no separate vref-supply, then this is needed
+ to establish channel scaling.
+
+ vdrive-supply:
+ description:
+ Some devices have separate supply for their digital control side.
+
+ vref-supply:
+ description:
+ Some devices have a specific reference voltage supplied on a different pin
+ to the other supplies. Needed to be able to establish channel scaling
+ unless there is also an internal reference available (e.g. ad7091r)
+
+ spi-max-frequency: true
+
+ adi,conversion-start-gpios:
+ description: A GPIO used to trigger the start of a conversion
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+allOf:
+ # Devices where reference is vcc
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad7091
+ - adi,ad7276
+ - adi,ad7277
+ - adi,ad7278
+ - adi,ad7466
+ - adi,ad7467
+ - adi,ad7468
+ - adi,ad7940
+ - ti,adc081s
+ - ti,adc101s
+ - ti,adc121s
+ - ti,ads7866
+ - ti,ads7868
+ required:
+ - vcc-supply
+ # Devices with a vref
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad7091r
+ - adi,ad7273
+ - adi,ad7274
+ - adi,ad7475
+ - lltc,ltc2314-14
+ then:
+ properties:
+ vref-supply: true
+ else:
+ properties:
+ vref-supply: false
+ # Devices with a vref where it is not optional
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad7273
+ - adi,ad7274
+ - adi,ad7475
+ - lltc,ltc2314-14
+ then:
+ required:
+ - vref-supply
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad7475
+ - adi,ad7495
+ then:
+ properties:
+ vdrive-supply: true
+ else:
+ properties:
+ vdrive-supply: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad7091
+ - adi,ad7091r
+ then:
+ properties:
+ adi,conversion-start-gpios: true
+ else:
+ properties:
+ adi,conversion-start-gpios: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "adi,ad7091r";
+ reg = <0>;
+ spi-max-frequency = <5000000>;
+ vcc-supply = <&adc_vcc>;
+ vref-supply = <&adc_vref>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.yaml b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.yaml
index 3be8955587e4..7e8328e9ce13 100644
--- a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.yaml
@@ -41,7 +41,6 @@ properties:
clock-names:
minItems: 2
- maxItems: 4
items:
- const: clkin
- const: core
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
index 1e7894e524f9..733351dee252 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
@@ -38,14 +38,12 @@ properties:
dfsdm clock can also feed CLKOUT, when CLKOUT is used.
- description: audio clock can be used as an alternate to feed CLKOUT.
minItems: 1
- maxItems: 2
clock-names:
items:
- const: dfsdm
- const: audio
minItems: 1
- maxItems: 2
"#address-cells":
const: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/ti,tsc2046.yaml b/Documentation/devicetree/bindings/iio/adc/ti,tsc2046.yaml
new file mode 100644
index 000000000000..601d69971d84
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ti,tsc2046.yaml
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/ti,tsc2046.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments TSC2046 touch screen controller.
+
+maintainers:
+ - Oleksij Rempel <o.rempel@pengutronix.de>
+
+description: |
+ TSC2046 is a touch screen controller with 8 channels ADC.
+
+properties:
+ compatible:
+ enum:
+ - ti,tsc2046e-adc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ spi-max-frequency: true
+
+ "#io-channel-cells":
+ const: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+
+patternProperties:
+ "^channel@[0-7]$":
+ $ref: "adc.yaml"
+ type: object
+
+ properties:
+ reg:
+ description: |
+ The channel number. It can have up to 8 channels
+ items:
+ minimum: 0
+ maximum: 7
+
+ settling-time-us: true
+ oversampling-ratio: true
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "ti,tsc2046e-adc";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ interrupts-extended = <&gpio3 20 IRQ_TYPE_LEVEL_LOW>;
+ #io-channel-cells = <1>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ };
+ channel@1 {
+ reg = <1>;
+ settling-time-us = <700>;
+ oversampling-ratio = <5>;
+ };
+ channel@2 {
+ reg = <2>;
+ };
+ channel@3 {
+ reg = <3>;
+ settling-time-us = <700>;
+ oversampling-ratio = <5>;
+ };
+ channel@4 {
+ reg = <4>;
+ settling-time-us = <700>;
+ oversampling-ratio = <5>;
+ };
+ channel@5 {
+ reg = <5>;
+ settling-time-us = <700>;
+ oversampling-ratio = <5>;
+ };
+ channel@6 {
+ reg = <6>;
+ };
+ channel@7 {
+ reg = <7>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/afe/current-sense-shunt.yaml b/Documentation/devicetree/bindings/iio/afe/current-sense-shunt.yaml
index 90439a8dc785..f8a112c9a822 100644
--- a/Documentation/devicetree/bindings/iio/afe/current-sense-shunt.yaml
+++ b/Documentation/devicetree/bindings/iio/afe/current-sense-shunt.yaml
@@ -24,6 +24,9 @@ properties:
description: |
Channel node of a voltage io-channel.
+ "#io-channel-cells":
+ const: 0
+
shunt-resistor-micro-ohms:
description: The shunt resistance.
@@ -57,6 +60,7 @@ examples:
sysi {
compatible = "current-sense-shunt";
io-channels = <&tiadc 0>;
+ #io-channel-cells = <0>;
/* Divide the voltage by 3300000/1000000 (or 3.3) for the current. */
shunt-resistor-micro-ohms = <3300000>;
diff --git a/Documentation/devicetree/bindings/iio/cdc/adi,ad7746.yaml b/Documentation/devicetree/bindings/iio/cdc/adi,ad7746.yaml
new file mode 100644
index 000000000000..a02036ef9e8d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/cdc/adi,ad7746.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/cdc/adi,ad7746.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AD7746 24-Bit Capacitance-to-Digital Converter with Temperature Sensor
+
+maintainers:
+ - Michael Hennerich <michael.hennerich@analog.com>
+
+description: |
+ AD7746 24-Bit Capacitance-to-Digital Converter with Temperature Sensor
+
+ Specifications about the part can be found at:
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7291.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ad7745
+ - adi,ad7746
+ - adi,ad7747
+
+ reg:
+ maxItems: 1
+
+ adi,excitation-vdd-permille:
+ description: |
+ Set VDD per mille to be used as the excitation voltage.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [125, 250, 375, 500]
+
+ adi,exca-output-en:
+ description: Enables the EXCA pin as the excitation output.
+ type: boolean
+
+ adi,exca-output-invert:
+ description: |
+ Inverts the excitation output in the EXCA pin.
+ Normally only one of the EXCX pins would be inverted, check the following
+ application notes for more details
+ https://www.analog.com/media/en/technical-documentation/application-notes/AN-1585.pdf
+ type: boolean
+
+ adi,excb-output-en:
+ description: Enables the EXCB pin as the excitation output.
+ type: boolean
+
+ adi,excb-output-invert:
+ description: Inverts the excitation output in the EXCB pin.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ad7746: cdc@48 {
+ compatible = "adi,ad7746";
+ reg = <0x48>;
+ adi,excitation-vdd-permille = <125>;
+
+ adi,exca-output-en;
+ adi,exca-output-invert;
+ adi,excb-output-en;
+ adi,excb-output-invert;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.yaml b/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.yaml
index a93d1972a5c2..967500b7e773 100644
--- a/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.yaml
+++ b/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.yaml
@@ -22,7 +22,6 @@ properties:
required:
- compatible
- - reg
additionalProperties: false
@@ -37,5 +36,11 @@ examples:
reg = <0x69>;
};
};
+ - |
+ serial {
+ air-pollution-sensor {
+ compatible = "sensirion,sps30";
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/iio/dac/ad5755.txt b/Documentation/devicetree/bindings/iio/dac/ad5755.txt
deleted file mode 100644
index 502e1e55adbd..000000000000
--- a/Documentation/devicetree/bindings/iio/dac/ad5755.txt
+++ /dev/null
@@ -1,124 +0,0 @@
-* Analog Devices AD5755 IIO Multi-Channel DAC Linux Driver
-
-Required properties:
- - compatible: Has to contain one of the following:
- adi,ad5755
- adi,ad5755-1
- adi,ad5757
- adi,ad5735
- adi,ad5737
-
- - reg: spi chip select number for the device
- - spi-cpha or spi-cpol: is the only modes that is supported
-
-Recommended properties:
- - spi-max-frequency: Definition as per
- Documentation/devicetree/bindings/spi/spi-bus.txt
-
-Optional properties:
-See include/dt-bindings/iio/ad5755.h
- - adi,ext-dc-dc-compenstation-resistor: boolean set if the hardware have an
- external resistor and thereby bypasses
- the internal compensation resistor.
- - adi,dc-dc-phase:
- Valid values for DC DC Phase control is:
- 0: All dc-to-dc converters clock on the same edge.
- 1: Channel A and Channel B clock on the same edge,
- Channel C and Channel D clock on opposite edges.
- 2: Channel A and Channel C clock on the same edge,
- Channel B and Channel D clock on opposite edges.
- 3: Channel A, Channel B, Channel C, and Channel D
- clock 90 degrees out of phase from each other.
- - adi,dc-dc-freq-hz:
- Valid values for DC DC frequency is [Hz]:
- 250000
- 410000
- 650000
- - adi,dc-dc-max-microvolt:
- Valid values for the maximum allowed Vboost voltage supplied by
- the dc-to-dc converter is:
- 23000000
- 24500000
- 27000000
- 29500000
-
-Optional for every channel:
- - adi,mode:
- Valid values for DAC modes is:
- 0: 0 V to 5 V voltage range.
- 1: 0 V to 10 V voltage range.
- 2: Plus minus 5 V voltage range.
- 3: Plus minus 10 V voltage range.
- 4: 4 mA to 20 mA current range.
- 5: 0 mA to 20 mA current range.
- 6: 0 mA to 24 mA current range.
- - adi,ext-current-sense-resistor: boolean set if the hardware a external
- current sense resistor.
- - adi,enable-voltage-overrange: boolean enable voltage overrange
- - adi,slew: Array of slewrate settings should contain 3 fields:
- 1: Should be either 0 or 1 in order to enable or disable slewrate.
- 2: Slew rate settings:
- Valid values for the slew rate update frequency:
- 64000
- 32000
- 16000
- 8000
- 4000
- 2000
- 1000
- 500
- 250
- 125
- 64
- 32
- 16
- 8
- 4
- 0
- 3: Slew step size:
- Valid values for the step size LSBs:
- 1
- 2
- 4
- 16
- 32
- 64
- 128
- 256
-
-Example:
-dac@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "adi,ad5755";
- reg = <0>;
- spi-max-frequency = <1000000>;
- spi-cpha;
- adi,dc-dc-phase = <0>;
- adi,dc-dc-freq-hz = <410000>;
- adi,dc-dc-max-microvolt = <23000000>;
- channel@0 {
- reg = <0>;
- adi,mode = <4>;
- adi,ext-current-sense-resistor;
- adi,slew = <0 64000 1>;
- };
- channel@1 {
- reg = <1>;
- adi,mode = <4>;
- adi,ext-current-sense-resistor;
- adi,slew = <0 64000 1>;
- };
- channel@2 {
- reg = <2>;
- adi,mode = <4>;
- adi,ext-current-sense-resistor;
- adi,slew = <0 64000 1>;
- };
- channel@3 {
- reg = <3>;
- adi,mode = <4>;
- adi,ext-current-sense-resistor;
- adi,slew = <0 64000 1>;
- };
-};
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5755.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5755.yaml
new file mode 100644
index 000000000000..be419ac46caa
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5755.yaml
@@ -0,0 +1,169 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/adi,ad5755.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD5755 Multi-Channel DAC
+
+maintainers:
+ - Sean Nyekjaer <sean.nyekjaer@prevas.dk>
+
+properties:
+ compatible:
+ enum:
+ - adi,ad5755
+ - adi,ad5755-1
+ - adi,ad5757
+ - adi,ad5735
+ - adi,ad5737
+
+ reg:
+ maxItems: 1
+
+ spi-cpha:
+ description: Either this or spi-cpol but not both.
+ spi-cpol: true
+
+ spi-max-frequency: true
+
+ adi,ext-dc-dc-compenstation-resistor:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set if the hardware have an external resistor and thereby bypasses
+ the internal compensation resistor.
+
+ adi,dc-dc-phase:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ description: |
+ Valid values for DC DC Phase control is:
+ 0: All dc-to-dc converters clock on the same edge.
+ 1: Channel A and Channel B clock on the same edge,
+ Channel C and Channel D clock on opposite edges.
+ 2: Channel A and Channel C clock on the same edge,
+ Channel B and Channel D clock on opposite edges.
+ 3: Channel A, Channel B, Channel C, and Channel D
+ clock 90 degrees out of phase from each other.
+
+ adi,dc-dc-freq-hz:
+ enum: [250000, 410000, 650000]
+
+ adi,dc-dc-max-microvolt:
+ description:
+ Maximum allowed Vboost voltage supplied by the dc-to-dc converter.
+ enum: [23000000, 24500000, 27000000, 29500000]
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ "#io-channel-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+patternProperties:
+ "^channel@[0-7]$":
+ type: object
+ description: Child node to describe a channel
+ properties:
+ reg:
+ maxItems: 1
+
+ adi,mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 6
+ description: |
+ Valid values for DAC modes is:
+ 0: 0 V to 5 V voltage range.
+ 1: 0 V to 10 V voltage range.
+ 2: Plus minus 5 V voltage range.
+ 3: Plus minus 10 V voltage range.
+ 4: 4 mA to 20 mA current range.
+ 5: 0 mA to 20 mA current range.
+ 6: 0 mA to 24 mA current range.
+
+ adi,ext-current-sense-resistor:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set if the hardware has an external current sense resistor
+
+ adi,enable-voltage-overrange:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Enable voltage overrange
+
+ adi,slew:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description: |
+ Array of slewrate settings should contain 3 fields:
+ 1: Should be either 0 or 1 in order to enable or disable slewrate.
+ 2: Slew rate update frequency
+ 3: Slew step size
+ items:
+ - enum: [0, 1]
+ - enum: [64000, 32000, 16000, 8000, 4000, 2000, 1000, 500, 250, 125, 64, 32, 16, 8, 4, 0]
+ - enum: [1, 2, 4, 16, 32, 64, 128, 256]
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+oneOf:
+ - required:
+ - spi-cpha
+ - required:
+ - spi-cpol
+
+examples:
+ - |
+ #include <dt-bindings/iio/adi,ad5592r.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dac@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "adi,ad5755";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ spi-cpha;
+ adi,dc-dc-phase = <0>;
+ adi,dc-dc-freq-hz = <410000>;
+ adi,dc-dc-max-microvolt = <23000000>;
+ channel@0 {
+ reg = <0>;
+ adi,mode = <4>;
+ adi,ext-current-sense-resistor;
+ adi,slew = <0 64000 1>;
+ };
+ channel@1 {
+ reg = <1>;
+ adi,mode = <4>;
+ adi,ext-current-sense-resistor;
+ adi,slew = <0 64000 1>;
+ };
+ channel@2 {
+ reg = <2>;
+ adi,mode = <4>;
+ adi,ext-current-sense-resistor;
+ adi,slew = <0 64000 1>;
+ };
+ channel@3 {
+ reg = <3>;
+ adi,mode = <4>;
+ adi,ext-current-sense-resistor;
+ adi,slew = <0 64000 1>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/dac/ti,dac082s085.yaml b/Documentation/devicetree/bindings/iio/dac/ti,dac082s085.yaml
new file mode 100644
index 000000000000..b0157050f1ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/ti,dac082s085.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/ti,dac082s085.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments DAC082s085 and similar DACs
+
+description:
+ A family of Texas Instruments 8/10/12-bit 2/4-channel DACs
+
+maintainers:
+ - Lukas Wunner <lukas@wunner.de>
+
+properties:
+ compatible:
+ enum:
+ - ti,dac082s085
+ - ti,dac102s085
+ - ti,dac122s085
+ - ti,dac084s085
+ - ti,dac104s085
+ - ti,dac124s085
+
+ reg:
+ maxItems: 1
+
+ spi-cpha: true
+ spi-cpol:
+ description:
+ Must be either spi-cpha, or spi-cpol but not both.
+
+ vref-supply:
+ description: Needed to provide output scaling.
+
+ spi-max-frequency: true
+
+required:
+ - compatible
+ - reg
+ - vref-supply
+
+additionalProperties: false
+
+oneOf:
+ - required:
+ - spi-cpha
+ - required:
+ - spi-cpol
+
+examples:
+ - |
+ vref_2v5_reg: regulator-vref {
+ compatible = "regulator-fixed";
+ regulator-name = "2v5";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dac@0 {
+ compatible = "ti,dac082s085";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+ spi-cpol;
+ vref-supply = <&vref_2v5_reg>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/dac/ti-dac082s085.txt b/Documentation/devicetree/bindings/iio/dac/ti-dac082s085.txt
deleted file mode 100644
index 9cb0e10df704..000000000000
--- a/Documentation/devicetree/bindings/iio/dac/ti-dac082s085.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-Texas Instruments 8/10/12-bit 2/4-channel DAC driver
-
-Required properties:
- - compatible: Must be one of:
- "ti,dac082s085"
- "ti,dac102s085"
- "ti,dac122s085"
- "ti,dac084s085"
- "ti,dac104s085"
- "ti,dac124s085"
- - reg: Chip select number.
- - spi-cpha, spi-cpol: SPI mode (0,1) or (1,0) must be used, so specify
- either spi-cpha or spi-cpol (but not both).
- - vref-supply: Phandle to the external reference voltage supply.
-
-For other required and optional properties of SPI slave nodes please refer to
-../../spi/spi-bus.txt.
-
-Example:
- vref_2v5_reg: regulator-vref {
- compatible = "regulator-fixed";
- regulator-name = "2v5";
- regulator-min-microvolt = <2500000>;
- regulator-max-microvolt = <2500000>;
- regulator-always-on;
- };
-
- dac@0 {
- compatible = "ti,dac082s085";
- reg = <0>;
- spi-max-frequency = <40000000>;
- spi-cpol;
- vref-supply = <&vref_2v5_reg>;
- };
diff --git a/Documentation/devicetree/bindings/iio/light/amstaos,tsl2591.yaml b/Documentation/devicetree/bindings/iio/light/amstaos,tsl2591.yaml
new file mode 100644
index 000000000000..83b88c6a243d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/amstaos,tsl2591.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/amstaos,tsl2591.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AMS/TAOS TSL2591 Ambient Light Sensor (ALS)
+
+maintainers:
+ - Joe Sandom <joe.g.sandom@gmail.com>
+
+description: |
+ AMS/TAOS TSL2591 is a very-high sensitivity
+ light-to-digital converter that transforms light intensity into a digital
+ signal.
+
+properties:
+ compatible:
+ const: amstaos,tsl2591
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+ description:
+ Interrupt (INT:Pin 2) Active low. Should be set to IRQ_TYPE_EDGE_FALLING.
+ interrupt is used to detect if the light intensity has fallen below
+ or reached above the configured threshold values.
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tsl2591@29 {
+ compatible = "amstaos,tsl2591";
+ reg = <0x29>;
+ interrupts = <20 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
deleted file mode 100644
index 89647d714387..000000000000
--- a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-I/O channel multiplexer bindings
-
-If a multiplexer is used to select which hardware signal is fed to
-e.g. an ADC channel, these bindings describe that situation.
-
-Required properties:
-- compatible : "io-channel-mux"
-- io-channels : Channel node of the parent channel that has multiplexed
- input.
-- io-channel-names : Should be "parent".
-- #address-cells = <1>;
-- #size-cells = <0>;
-- mux-controls : Mux controller node to use for operating the mux
-- channels : List of strings, labeling the mux controller states.
-
-For each non-empty string in the channels property, an io-channel will
-be created. The number of this io-channel is the same as the index into
-the list of strings in the channels property, and also matches the mux
-controller state. The mux controller state is described in
-../mux/mux-controller.txt
-
-Example:
- mux: mux-controller {
- compatible = "gpio-mux";
- #mux-control-cells = <0>;
-
- mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
- <&pioA 1 GPIO_ACTIVE_HIGH>;
- };
-
- adc-mux {
- compatible = "io-channel-mux";
- io-channels = <&adc 0>;
- io-channel-names = "parent";
-
- mux-controls = <&mux>;
-
- channels = "sync", "in", "system-regulator";
- };
diff --git a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.yaml b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.yaml
new file mode 100644
index 000000000000..870b043406d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/multiplexer/io-channel-mux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: I/O channel multiplexer bindings
+
+maintainers:
+ - Peter Rosin <peda@axentia.se>
+
+description: |
+ If a multiplexer is used to select which hardware signal is fed to
+ e.g. an ADC channel, these bindings describe that situation.
+
+ For each non-empty string in the channels property, an io-channel will be
+ created. The number of this io-channel is the same as the index into the list
+ of strings in the channels property, and also matches the mux controller
+ state. The mux controller state is described in
+ Documentation/devicetree/bindings/mux/mux-controller.yaml
+
+properties:
+
+ compatible:
+ const: io-channel-mux
+
+ io-channels:
+ maxItems: 1
+ description: Channel node of the parent channel that has multiplexed input.
+
+ io-channel-names:
+ const: parent
+
+ mux-controls: true
+ mux-control-names: true
+
+ channels:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ List of strings, labeling the mux controller states.
+
+required:
+ - compatible
+ - io-channels
+ - io-channel-names
+ - mux-controls
+ - channels
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ mux: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+ <&pioA 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ adc-mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 0>;
+ io-channel-names = "parent";
+
+ mux-controls = <&mux>;
+ channels = "sync", "in", "system-regulator";
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/st,st-sensors.yaml b/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
index 7e98f47987dc..71de5631ebae 100644
--- a/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
+++ b/Documentation/devicetree/bindings/iio/st,st-sensors.yaml
@@ -6,7 +6,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: STMicroelectronics MEMS sensors
-description: |
+description: The STMicroelectronics sensor devices are pretty straight-forward
+ I2C or SPI devices, all sharing the same device tree descriptions no matter
+ what type of sensor it is.
Note that whilst this covers many STMicro MEMs sensors, some more complex
IMUs need their own bindings.
The STMicroelectronics sensor devices are pretty straight-forward I2C or
@@ -15,90 +17,140 @@ description: |
maintainers:
- Denis Ciocca <denis.ciocca@st.com>
+ - Linus Walleij <linus.walleij@linaro.org>
properties:
compatible:
- description: |
- Some values are deprecated.
- st,lis3lv02d (deprecated, use st,lis3lv02dl-accel)
- st,lis302dl-spi (deprecated, use st,lis3lv02dl-accel)
- enum:
- # Accelerometers
- - st,lis3lv02d
- - st,lis302dl-spi
- - st,lis3lv02dl-accel
- - st,lsm303dlh-accel
- - st,lsm303dlhc-accel
- - st,lis3dh-accel
- - st,lsm330d-accel
- - st,lsm330dl-accel
- - st,lsm330dlc-accel
- - st,lis331dl-accel
- - st,lis331dlh-accel
- - st,lsm303dl-accel
- - st,lsm303dlm-accel
- - st,lsm330-accel
- - st,lsm303agr-accel
- - st,lis2dh12-accel
- - st,h3lis331dl-accel
- - st,lng2dm-accel
- - st,lis3l02dq
- - st,lis2dw12
- - st,lis3dhh
- - st,lis3de
- - st,lis2de12
- - st,lis2hh12
- # Gyroscopes
- - st,l3g4200d-gyro
- - st,lsm330d-gyro
- - st,lsm330dl-gyro
- - st,lsm330dlc-gyro
- - st,l3gd20-gyro
- - st,l3gd20h-gyro
- - st,l3g4is-gyro
- - st,lsm330-gyro
- - st,lsm9ds0-gyro
- # Magnetometers
- - st,lsm303agr-magn
- - st,lsm303dlh-magn
- - st,lsm303dlhc-magn
- - st,lsm303dlm-magn
- - st,lis3mdl-magn
- - st,lis2mdl
- - st,lsm9ds1-magn
- - st,iis2mdc
- # Pressure sensors
- - st,lps001wp-press
- - st,lps25h-press
- - st,lps331ap-press
- - st,lps22hb-press
- - st,lps33hw
- - st,lps35hw
- - st,lps22hh
+ oneOf:
+ - description: STMicroelectronics Accelerometers
+ enum:
+ - st,h3lis331dl-accel
+ - st,lis2de12
+ - st,lis2dw12
+ - st,lis2hh12
+ - st,lis2dh12-accel
+ - st,lis331dl-accel
+ - st,lis331dlh-accel
+ - st,lis3de
+ - st,lis3dh-accel
+ - st,lis3dhh
+ - st,lis3l02dq
+ - st,lis3lv02dl-accel
+ - st,lng2dm-accel
+ - st,lsm303agr-accel
+ - st,lsm303dl-accel
+ - st,lsm303dlh-accel
+ - st,lsm303dlhc-accel
+ - st,lsm303dlm-accel
+ - st,lsm330-accel
+ - st,lsm330d-accel
+ - st,lsm330dl-accel
+ - st,lsm330dlc-accel
+ - description: STMicroelectronics Gyroscopes
+ enum:
+ - st,l3g4200d-gyro
+ - st,l3g4is-gyro
+ - st,l3gd20-gyro
+ - st,l3gd20h-gyro
+ - st,lsm330-gyro
+ - st,lsm330d-gyro
+ - st,lsm330dl-gyro
+ - st,lsm330dlc-gyro
+ - st,lsm9ds0-gyro
+ - description: STMicroelectronics Magnetometers
+ enum:
+ - st,lis2mdl
+ - st,lis3mdl-magn
+ - st,lsm303agr-magn
+ - st,lsm303dlh-magn
+ - st,lsm303dlhc-magn
+ - st,lsm303dlm-magn
+ - st,lsm9ds1-magn
+ - description: STMicroelectronics Pressure Sensors
+ enum:
+ - st,lps001wp-press
+ - st,lps22hb-press
+ - st,lps22hh
+ - st,lps25h-press
+ - st,lps331ap-press
+ - st,lps33hw
+ - st,lps35hw
+ - description: IMUs
+ enum:
+ - st,lsm9ds0-imu
+ - description: Deprecated bindings
+ enum:
+ - st,lis302dl-spi
+ - st,lis3lv02d
+ deprecated: true
reg:
maxItems: 1
interrupts:
+ description: interrupt line(s) connected to the DRDY line(s) and/or the
+ Intertial interrupt lines INT1 and INT2 if these exist. This means up to
+ three interrupts, and the DRDY must be the first one if it exists on
+ the package. The trigger edge of the interrupts is sometimes software
+ configurable in the hardware so the operating system should parse this
+ flag and set up the trigger edge as indicated in the device tree.
minItems: 1
+ maxItems: 2
vdd-supply: true
vddio-supply: true
st,drdy-int-pin:
+ description: the pin on the package that will be used to signal
+ "data ready" (valid values 1 or 2). This property is not configurable
+ on all sensors.
$ref: /schemas/types.yaml#/definitions/uint32
- description:
- Some sensors have multiple possible pins via which they can provide
- a data ready interrupt. This selects which one.
- enum:
- - 1
- - 2
+ enum: [1, 2]
drive-open-drain:
$ref: /schemas/types.yaml#/definitions/flag
- description: |
- The interrupt/data ready line will be configured as open drain, which
- is useful if several sensors share the same interrupt line.
+ description: the interrupt/data ready line will be configured
+ as open drain, which is useful if several sensors share the same
+ interrupt line. (This binding is taken from pinctrl.)
+
+ mount-matrix:
+ description: an optional 3x3 mounting rotation matrix.
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ # These have no interrupts
+ - st,lps001wp
+ then:
+ properties:
+ interrupts: false
+ st,drdy-int-pin: false
+ drive-open-drain: false
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ # These have only DRDY
+ - st,lis2mdl
+ - st,lis3l02dq
+ - st,lis3lv02dl-accel
+ - st,lps22hb-press
+ - st,lps22hh
+ - st,lps25h-press
+ - st,lps33hw
+ - st,lps35hw
+ - st,lsm303agr-magn
+ - st,lsm303dlh-magn
+ - st,lsm303dlhc-magn
+ - st,lsm303dlm-magn
+ then:
+ properties:
+ interrupts:
+ maxItems: 1
+ st,drdy-int-pin: false
required:
- compatible
@@ -110,15 +162,30 @@ examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- accelerometer@1d {
- compatible = "st,lis3lv02dl-accel";
- reg = <0x1d>;
- interrupt-parent = <&gpio2>;
- interrupts = <18 IRQ_TYPE_EDGE_RISING>;
- pinctrl-0 = <&lis3lv02dl_nhk_mode>;
- pinctrl-names = "default";
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ accelerometer@1c {
+ compatible = "st,lis331dl-accel";
+ reg = <0x1c>;
+ st,drdy-int-pin = <1>;
+ vdd-supply = <&ldo1>;
+ vddio-supply = <&ldo2>;
+ interrupt-parent = <&gpio>;
+ interrupts = <18 IRQ_TYPE_EDGE_RISING>, <19 IRQ_TYPE_EDGE_RISING>;
+ };
+ };
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ num-cs = <1>;
+
+ l3g4200d: gyroscope@0 {
+ compatible = "st,l3g4200d-gyro";
+ st,drdy-int-pin = <2>;
+ reg = <0>;
+ vdd-supply = <&vcc_io>;
+ vddio-supply = <&vcc_io>;
+ };
};
...
diff --git a/Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml b/Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml
new file mode 100644
index 000000000000..347bc16a4671
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/iio/temperature/ti,tmp117.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: "TI TMP117 - Digital temperature sensor with integrated NV memory"
+
+description: |
+ TI TMP117 - Digital temperature sensor with integrated NV memory that supports
+ I2C interface.
+ https://www.ti.com/lit/gpn/tmp1
+
+maintainers:
+ - Puranjay Mohan <puranjay12@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - ti,tmp117
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tmp117@48 {
+ compatible = "ti,tmp117";
+ reg = <0x48>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/fsl-mma8450.txt b/Documentation/devicetree/bindings/input/fsl-mma8450.txt
deleted file mode 100644
index 0b96e5737d3a..000000000000
--- a/Documentation/devicetree/bindings/input/fsl-mma8450.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-* Freescale MMA8450 3-Axis Accelerometer
-
-Required properties:
-- compatible : "fsl,mma8450".
-- reg: the I2C address of MMA8450
-
-Example:
-
-accelerometer: mma8450@1c {
- compatible = "fsl,mma8450";
- reg = <0x1c>;
-};
diff --git a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
index 34ab5763f494..6cd08bca2c66 100644
--- a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
+++ b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
@@ -8,6 +8,8 @@ PROPERTIES
Definition: must be one of:
"qcom,pm8941-pwrkey"
"qcom,pm8941-resin"
+ "qcom,pmk8350-pwrkey"
+ "qcom,pmk8350-resin"
- reg:
Usage: required
diff --git a/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma340.yaml b/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma340.yaml
new file mode 100644
index 000000000000..762e56ee90cd
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma340.yaml
@@ -0,0 +1,148 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/cypress,cy8ctma340.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cypress CY8CTMA340 series touchscreen controller bindings
+
+description: The Cypress CY8CTMA340 series (also known as "CYTTSP" after
+ the marketing name Cypress TrueTouch Standard Product) touchscreens can
+ be connected to either I2C or SPI buses.
+
+maintainers:
+ - Javier Martinez Canillas <javier@dowhile0.org>
+ - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+ - $ref: touchscreen.yaml#
+
+properties:
+ $nodename:
+ pattern: "^touchscreen(@.*)?$"
+
+ compatible:
+ oneOf:
+ - const: cypress,cy8ctma340
+ - const: cypress,cy8ctst341
+ - const: cypress,cyttsp-spi
+ description: Legacy compatible for SPI connected CY8CTMA340
+ deprecated: true
+ - const: cypress,cyttsp-i2c
+ description: Legacy compatible for I2C connected CY8CTMA340
+ deprecated: true
+
+ reg:
+ description: I2C address when used on the I2C bus, or the SPI chip
+ select index when used on the SPI bus
+
+ clock-frequency:
+ description: I2C client clock frequency, defined for host when using
+ the device on the I2C bus
+ minimum: 0
+ maximum: 400000
+
+ spi-max-frequency:
+ description: SPI clock frequency, defined for host, defined when using
+ the device on the SPI bus. The throughput is maximum 2 Mbps so the
+ typical value is 2000000, if higher rates are used the total throughput
+ needs to be restricted to 2 Mbps.
+ minimum: 0
+ maximum: 6000000
+
+ interrupts:
+ description: Interrupt to host
+ maxItems: 1
+
+ vcpin-supply:
+ description: Analog power supply regulator on VCPIN pin
+
+ vdd-supply:
+ description: Digital power supply regulator on VDD pin
+
+ reset-gpios:
+ description: Reset line for the touchscreen, should be tagged
+ as GPIO_ACTIVE_LOW
+
+ bootloader-key:
+ description: the 8-byte bootloader key that is required to switch
+ the chip from bootloader mode (default mode) to application mode
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ minItems: 8
+ maxItems: 8
+
+ touchscreen-size-x: true
+ touchscreen-size-y: true
+ touchscreen-fuzz-x: true
+ touchscreen-fuzz-y: true
+
+ active-distance:
+ description: the distance in pixels beyond which a touch must move
+ before movement is detected and reported by the device
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+
+ active-interval-ms:
+ description: the minimum period in ms between consecutive
+ scanning/processing cycles when the chip is in active mode
+ minimum: 0
+ maximum: 255
+
+ lowpower-interval-ms:
+ description: the minimum period in ms between consecutive
+ scanning/processing cycles when the chip is in low-power mode
+ minimum: 0
+ maximum: 2550
+
+ touch-timeout-ms:
+ description: minimum time in ms spent in the active power state while no
+ touches are detected before entering low-power mode
+ minimum: 0
+ maximum: 2550
+
+ use-handshake:
+ description: enable register-based handshake (boolean). This should only
+ be used if the chip is configured to use 'blocking communication with
+ timeout' (in this case the device generates an interrupt at the end of
+ every scanning/processing cycle)
+ $ref: /schemas/types.yaml#/definitions/flag
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - bootloader-key
+ - touchscreen-size-x
+ - touchscreen-size-y
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ num-cs = <1>;
+ cs-gpios = <&gpio 2 GPIO_ACTIVE_HIGH>;
+
+ touchscreen@0 {
+ compatible = "cypress,cy8ctma340";
+ reg = <0>;
+ interrupt-parent = <&gpio>;
+ interrupts = <20 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio 21 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&ldo_aux1_reg>;
+ vcpin-supply = <&ldo_aux2_reg>;
+ bootloader-key = /bits/ 8 <0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07>;
+ touchscreen-size-x = <480>;
+ touchscreen-size-y = <800>;
+ active-interval-ms = <0>;
+ touch-timeout-ms = <255>;
+ lowpower-interval-ms = <10>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt b/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt
deleted file mode 100644
index 6ee274aa8b03..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-* Cypress cyttsp touchscreen controller
-
-Required properties:
- - compatible : must be "cypress,cyttsp-i2c" or "cypress,cyttsp-spi"
- - reg : Device I2C address or SPI chip select number
- - spi-max-frequency : Maximum SPI clocking speed of the device (for cyttsp-spi)
- - interrupts : (gpio) interrupt to which the chip is connected
- (see interrupt binding[0]).
- - bootloader-key : the 8-byte bootloader key that is required to switch
- the chip from bootloader mode (default mode) to
- application mode.
- This property has to be specified as an array of 8
- '/bits/ 8' values.
-
-Optional properties:
- - reset-gpios : the reset gpio the chip is connected to
- (see GPIO binding[1] for more details).
- - touchscreen-size-x : horizontal resolution of touchscreen (in pixels)
- - touchscreen-size-y : vertical resolution of touchscreen (in pixels)
- - touchscreen-fuzz-x : horizontal noise value of the absolute input device
- (in pixels)
- - touchscreen-fuzz-y : vertical noise value of the absolute input device
- (in pixels)
- - active-distance : the distance in pixels beyond which a touch must move
- before movement is detected and reported by the device.
- Valid values: 0-15.
- - active-interval-ms : the minimum period in ms between consecutive
- scanning/processing cycles when the chip is in active mode.
- Valid values: 0-255.
- - lowpower-interval-ms : the minimum period in ms between consecutive
- scanning/processing cycles when the chip is in low-power mode.
- Valid values: 0-2550
- - touch-timeout-ms : minimum time in ms spent in the active power state while no
- touches are detected before entering low-power mode.
- Valid values: 0-2550
- - use-handshake : enable register-based handshake (boolean). This should
- only be used if the chip is configured to use 'blocking
- communication with timeout' (in this case the device
- generates an interrupt at the end of every
- scanning/processing cycle).
-
-[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-[1]: Documentation/devicetree/bindings/gpio/gpio.txt
-
-Example:
- &i2c1 {
- /* ... */
- cyttsp@a {
- compatible = "cypress,cyttsp-i2c";
- reg = <0xa>;
- interrupt-parent = <&gpio0>;
- interrupts = <28 0>;
- reset-gpios = <&gpio3 4 GPIO_ACTIVE_LOW>;
-
- touchscreen-size-x = <800>;
- touchscreen-size-y = <480>;
- touchscreen-fuzz-x = <4>;
- touchscreen-fuzz-y = <7>;
-
- bootloader-key = /bits/ 8 <0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08>;
- active-distance = <8>;
- active-interval-ms = <0>;
- lowpower-interval-ms = <200>;
- touch-timeout-ms = <100>;
- };
-
- /* ... */
- };
-
- &mcspi1 {
- /* ... */
- cyttsp@0 {
- compatible = "cypress,cyttsp-spi";
- spi-max-frequency = <6000000>;
- reg = <0>;
- interrupt-parent = <&gpio0>;
- interrupts = <28 0>;
- reset-gpios = <&gpio3 4 GPIO_ACTIVE_LOW>;
-
- touchscreen-size-x = <800>;
- touchscreen-size-y = <480>;
- touchscreen-fuzz-x = <4>;
- touchscreen-fuzz-y = <7>;
-
- bootloader-key = /bits/ 8 <0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08>;
- active-distance = <8>;
- active-interval-ms = <0>;
- lowpower-interval-ms = <200>;
- touch-timeout-ms = <100>;
- };
-
- /* ... */
- };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
index bfc3a8b5e118..2e8da7470513 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
@@ -56,6 +56,7 @@ properties:
wakeup-source: true
vcc-supply: true
+ iovcc-supply: true
gain:
description: Allows setting the sensitivity in the range from 0 to 31.
diff --git a/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt b/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt
deleted file mode 100644
index af5223bb5bdd..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-Generic resistive touchscreen ADC
-
-Required properties:
-
- - compatible: must be "resistive-adc-touch"
-The device must be connected to an ADC device that provides channels for
-position measurement and optional pressure.
-Refer to
-https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
-for details
-
- - iio-channels: must have at least two channels connected to an ADC device.
-These should correspond to the channels exposed by the ADC device and should
-have the right index as the ADC device registers them. These channels
-represent the relative position on the "x" and "y" axes.
- - iio-channel-names: must have all the channels' names. Mandatory channels
-are "x" and "y".
-
-Optional properties:
- - iio-channels: The third channel named "pressure" is optional and can be
-used if the ADC device also measures pressure besides position.
-If this channel is missing, pressure will be ignored and the touchscreen
-will only report position.
- - iio-channel-names: optional channel named "pressure".
-
-Example:
-
- resistive_touch: resistive_touch {
- compatible = "resistive-adc-touch";
- touchscreen-min-pressure = <50000>;
- io-channels = <&adc 24>, <&adc 25>, <&adc 26>;
- io-channel-names = "x", "y", "pressure";
- };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.yaml b/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.yaml
new file mode 100644
index 000000000000..7fc22a403d48
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/resistive-adc-touch.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic resistive touchscreen ADC
+
+maintainers:
+ - Oleksij Rempel <o.rempel@pengutronix.de>
+
+description: |
+ Generic ADC based resistive touchscreen controller
+ The device must be connected to an ADC device that provides channels for
+ position measurement and optional pressure.
+
+allOf:
+ - $ref: touchscreen.yaml#
+
+properties:
+ compatible:
+ const: resistive-adc-touch
+
+ io-channels:
+ minItems: 2
+ items:
+ - description: x
+ - description: y
+ - description: pressure (optional)
+ - description: z1 (optional)
+ - description: z2 (optional)
+
+ io-channel-names:
+ oneOf:
+ - items:
+ - enum: [x, y]
+ - enum: [x, y]
+ - items:
+ - enum: [x, y, pressure]
+ - enum: [x, y, pressure]
+ - enum: [x, y, pressure]
+ - items:
+ - enum: [x, y, z1, z2]
+ - enum: [x, y, z1, z2]
+ - enum: [x, y, z1, z2]
+ - enum: [x, y, z1, z2]
+
+ touchscreen-size-x: true
+ touchscreen-size-y: true
+ touchscreen-fuzz-x: true
+ touchscreen-fuzz-y: true
+ touchscreen-inverted-x: true
+ touchscreen-inverted-y: true
+ touchscreen-swapped-x-y: true
+ touchscreen-min-pressure: true
+ touchscreen-x-plate-ohms: true
+
+additionalProperties: false
+
+required:
+ - compatible
+ - io-channels
+ - io-channel-names
+
+examples:
+ - |
+ touchscreen {
+ compatible = "resistive-adc-touch";
+ io-channels = <&adc 24>, <&adc 25>;
+ io-channel-names = "y", "x";
+ };
+ - |
+ touchscreen {
+ compatible = "resistive-adc-touch";
+ touchscreen-min-pressure = <50000>;
+ io-channels = <&adc 24>, <&adc 25>, <&adc 26>;
+ io-channel-names = "y", "pressure", "x";
+ };
+ - |
+ touchscreen {
+ compatible = "resistive-adc-touch";
+ touchscreen-min-pressure = <50000>;
+ io-channels = <&adc 1>, <&adc 2>, <&adc 3>, <&adc 4>;
+ io-channel-names = "x", "z1", "z2", "y";
+ touchscreen-x-plate-ohms = <800>;
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sitronix,st1232.yaml b/Documentation/devicetree/bindings/input/touchscreen/sitronix,st1232.yaml
new file mode 100644
index 000000000000..1d8ca19fd37a
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/sitronix,st1232.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/sitronix,st1232.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sitronix st1232 or st1633 touchscreen controller
+
+maintainers:
+ - Bastian Hecht <hechtb@gmail.com>
+
+allOf:
+ - $ref: touchscreen.yaml#
+
+properties:
+ compatible:
+ enum:
+ - sitronix,st1232
+ - sitronix,st1633
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ gpios:
+ description: A phandle to the reset GPIO
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen@55 {
+ compatible = "sitronix,st1232";
+ reg = <0x55>;
+ interrupts = <2 0>;
+ gpios = <&gpio1 166 0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt b/Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt
deleted file mode 100644
index 019373253b28..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Sitronix st1232 or st1633 touchscreen controller
-
-Required properties:
-- compatible: must contain one of
- * "sitronix,st1232"
- * "sitronix,st1633"
-- reg: I2C address of the chip
-- interrupts: interrupt to which the chip is connected
-
-Optional properties:
-- gpios: a phandle to the reset GPIO
-
-For additional optional properties see: touchscreen.txt
-
-Example:
-
- i2c@00000000 {
- /* ... */
-
- touchscreen@55 {
- compatible = "sitronix,st1232";
- reg = <0x55>;
- interrupts = <2 0>;
- gpios = <&gpio1 166 0>;
- };
-
- /* ... */
- };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
index 046ace461cc9..4b5b212c772c 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
@@ -74,6 +74,12 @@ properties:
touchscreen-y-mm:
description: vertical length in mm of the touchscreen
+ touchscreen-x-plate-ohms:
+ description: Resistance of the X-plate in Ohms
+
+ touchscreen-y-plate-ohms:
+ description: Resistance of the Y-plate in Ohms
+
dependencies:
touchscreen-size-x: [ touchscreen-size-y ]
touchscreen-size-y: [ touchscreen-size-x ]
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
index 36c955965d90..5accc0d113be 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
@@ -37,6 +37,18 @@ properties:
- qcom,sc7180-npu-noc
- qcom,sc7180-qup-virt
- qcom,sc7180-system-noc
+ - qcom,sc7280-aggre1-noc
+ - qcom,sc7280-aggre2-noc
+ - qcom,sc7280-clk-virt
+ - qcom,sc7280-cnoc2
+ - qcom,sc7280-cnoc3
+ - qcom,sc7280-dc-noc
+ - qcom,sc7280-gem-noc
+ - qcom,sc7280-lpass-ag-noc
+ - qcom,sc7280-mc-virt
+ - qcom,sc7280-mmss-noc
+ - qcom,sc7280-nsp-noc
+ - qcom,sc7280-system-noc
- qcom,sdm845-aggre1-noc
- qcom,sdm845-aggre2-noc
- qcom,sdm845-config-noc
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,vic.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,vic.txt
deleted file mode 100644
index dd527216c5fb..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,vic.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-* ARM Vectored Interrupt Controller
-
-One or more Vectored Interrupt Controllers (VIC's) can be connected in an ARM
-system for interrupt routing. For multiple controllers they can either be
-nested or have the outputs wire-OR'd together.
-
-Required properties:
-
-- compatible : should be one of
- "arm,pl190-vic"
- "arm,pl192-vic"
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : The number of cells to define the interrupts. Must be 1 as
- the VIC has no configuration options for interrupt sources. The cell is a u32
- and defines the interrupt number.
-- reg : The register bank for the VIC.
-
-Optional properties:
-
-- interrupts : Interrupt source for parent controllers if the VIC is nested.
-- valid-mask : A one cell big bit mask of valid interrupt sources. Each bit
- represents single interrupt source, starting from source 0 at LSb and ending
- at source 31 at MSb. A bit that is set means that the source is wired and
- clear means otherwise. If unspecified, defaults to all valid.
-- valid-wakeup-mask : A one cell big bit mask of interrupt sources that can be
- configured as wake up source for the system. Order of bits is the same as for
- valid-mask property. A set bit means that this interrupt source can be
- configured as a wake up source for the system. If unspecied, defaults to all
- interrupt sources configurable as wake up sources.
-
-Example:
-
- vic0: interrupt-controller@60000 {
- compatible = "arm,pl192-vic";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x60000 0x1000>;
-
- valid-mask = <0xffffff7f>;
- valid-wakeup-mask = <0x0000ff7f>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,vic.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,vic.yaml
new file mode 100644
index 000000000000..0075e72fe8c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,vic.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/arm,vic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Vectored Interrupt Controller
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+description: |+
+ One or more Vectored Interrupt Controllers (VIC's) can be connected in an
+ ARM system for interrupt routing. For multiple controllers they can either
+ be nested or have the outputs wire-OR'd together.
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - arm,pl190-vic
+ - arm,pl192-vic
+ - arm,versatile-vic
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 1
+ description:
+ The number of cells to define the interrupts. It must be 1 as the
+ VIC has no configuration options for interrupt sources. The single
+ cell defines the interrupt number.
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ valid-mask:
+ description:
+ A one cell big bit mask of valid interrupt sources. Each bit
+ represents single interrupt source, starting from source 0 at
+ LSb and ending at source 31 at MSb. A bit that is set means
+ that the source is wired and clear means otherwise. If unspecified,
+ defaults to all valid.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ valid-wakeup-mask:
+ description:
+ A one cell big bit mask of interrupt sources that can be configured
+ as wake up source for the system. Order of bits is the same as for
+ valid-mask property. A set bit means that this interrupt source
+ can be configured as a wake up source for the system. If unspecied,
+ defaults to all interrupt sources configurable as wake up sources.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - "#interrupt-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ // PL192 VIC
+ vic0: interrupt-controller@60000 {
+ compatible = "arm,pl192-vic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x60000 0x1000>;
+
+ valid-mask = <0xffffff7f>;
+ valid-wakeup-mask = <0x0000ff7f>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
index 3b11a1a15398..bcb5e20fa9ca 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
@@ -35,7 +35,6 @@ properties:
- description: output interrupt 6
- description: output interrupt 7
minItems: 1
- maxItems: 8
clocks:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml b/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml
index 067165c4b836..edf26452dc72 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml
@@ -50,7 +50,6 @@ properties:
- const: int2
- const: int3
minItems: 1
- maxItems: 4
'#interrupt-cells':
const: 2
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,pruss-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/ti,pruss-intc.yaml
index 9731dd4421a1..051beb45d998 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,pruss-intc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,pruss-intc.yaml
@@ -134,7 +134,7 @@ examples:
/* AM4376 PRU-ICSS */
#include <dt-bindings/interrupt-controller/arm-gic.h>
pruss@0 {
- compatible = "ti,am4376-pruss";
+ compatible = "ti,am4376-pruss1";
reg = <0x0 0x40000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.yaml
index 5951c6f98c74..e87bfbcc6913 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.yaml
@@ -38,7 +38,6 @@ properties:
If provided, then the combined interrupt will be used in preference to
any others.
- minItems: 2
- maxItems: 4
items:
- const: eventq # Event Queue not empty
- const: gerror # Global Error activated
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index 9d27aa5111d4..03f2b2d4db30 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -52,10 +52,14 @@ properties:
items:
- const: marvell,ap806-smmu-500
- const: arm,mmu-500
- - description: NVIDIA SoCs that program two ARM MMU-500s identically
+ - description: NVIDIA SoCs that require memory controller interaction
+ and may program multiple ARM MMU-500s identically with the memory
+ controller interleaving translations between multiple instances
+ for improved performance.
items:
- enum:
- nvidia,tegra194-smmu
+ - nvidia,tegra186-smmu
- const: nvidia,smmu-500
- items:
- const: arm,mmu-500
@@ -165,10 +169,11 @@ allOf:
contains:
enum:
- nvidia,tegra194-smmu
+ - nvidia,tegra186-smmu
then:
properties:
reg:
- minItems: 2
+ minItems: 1
maxItems: 2
else:
properties:
diff --git a/Documentation/devicetree/bindings/iommu/iommu.txt b/Documentation/devicetree/bindings/iommu/iommu.txt
index 3c36334e4f94..26ba9e530f13 100644
--- a/Documentation/devicetree/bindings/iommu/iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/iommu.txt
@@ -92,6 +92,24 @@ Optional properties:
tagging DMA transactions with an address space identifier. By default,
this is 0, which means that the device only has one address space.
+- dma-can-stall: When present, the master can wait for a transaction to
+ complete for an indefinite amount of time. Upon translation fault some
+ IOMMUs, instead of aborting the translation immediately, may first
+ notify the driver and keep the transaction in flight. This allows the OS
+ to inspect the fault and, for example, make physical pages resident
+ before updating the mappings and completing the transaction. Such IOMMU
+ accepts a limited number of simultaneous stalled transactions before
+ having to either put back-pressure on the master, or abort new faulting
+ transactions.
+
+ Firmware has to opt-in stalling, because most buses and masters don't
+ support it. In particular it isn't compatible with PCI, where
+ transactions have to complete before a time limit. More generally it
+ won't work in systems and masters that haven't been designed for
+ stalling. For example the OS, in order to handle a stalled transaction,
+ may attempt to retrieve pages from secondary storage in a stalled
+ domain, leading to a deadlock.
+
Notes:
======
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml
index dda44976acc1..02c69a95c332 100644
--- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml
+++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml
@@ -49,7 +49,6 @@ properties:
interrupts:
minItems: 1
- maxItems: 2
description:
Specifiers for the MMU fault interrupts. Not required for cache IPMMUs.
items:
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
deleted file mode 100644
index 6ecefea1c6f9..000000000000
--- a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-Rockchip IOMMU
-==============
-
-A Rockchip DRM iommu translates io virtual addresses to physical addresses for
-its master device. Each slave device is bound to a single master device, and
-shares its clocks, power domain and irq.
-
-Required properties:
-- compatible : Should be "rockchip,iommu"
-- reg : Address space for the configuration registers
-- interrupts : Interrupt specifier for the IOMMU instance
-- interrupt-names : Interrupt name for the IOMMU instance
-- #iommu-cells : Should be <0>. This indicates the iommu is a
- "single-master" device, and needs no additional information
- to associate with its master device. See:
- Documentation/devicetree/bindings/iommu/iommu.txt
-- clocks : A list of clocks required for the IOMMU to be accessible by
- the host CPU.
-- clock-names : Should contain the following:
- "iface" - Main peripheral bus clock (PCLK/HCL) (required)
- "aclk" - AXI bus clock (required)
-
-Optional properties:
-- rockchip,disable-mmu-reset : Don't use the mmu reset operation.
- Some mmu instances may produce unexpected results
- when the reset operation is used.
-
-Example:
-
- vopl_mmu: iommu@ff940300 {
- compatible = "rockchip,iommu";
- reg = <0xff940300 0x100>;
- interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "vopl_mmu";
- clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>;
- clock-names = "aclk", "iface";
- #iommu-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml b/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml
new file mode 100644
index 000000000000..ba9124f721f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iommu/rockchip,iommu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip IOMMU
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |+
+ A Rockchip DRM iommu translates io virtual addresses to physical addresses for
+ its master device. Each slave device is bound to a single master device and
+ shares its clocks, power domain and irq.
+
+ For information on assigning IOMMU controller to its peripheral devices,
+ see generic IOMMU bindings.
+
+properties:
+ compatible:
+ enum:
+ - rockchip,iommu
+ - rockchip,rk3568-iommu
+
+ reg:
+ items:
+ - description: configuration registers for MMU instance 0
+ - description: configuration registers for MMU instance 1
+ minItems: 1
+
+ interrupts:
+ items:
+ - description: interruption for MMU instance 0
+ - description: interruption for MMU instance 1
+ minItems: 1
+
+ clocks:
+ items:
+ - description: Core clock
+ - description: Interface clock
+
+ clock-names:
+ items:
+ - const: aclk
+ - const: iface
+
+ "#iommu-cells":
+ const: 0
+
+ power-domains:
+ maxItems: 1
+
+ rockchip,disable-mmu-reset:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: |
+ Do not use the mmu reset operation.
+ Some mmu instances may produce unexpected results
+ when the reset operation is used.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - "#iommu-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3399-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ vopl_mmu: iommu@ff940300 {
+ compatible = "rockchip,iommu";
+ reg = <0xff940300 0x100>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>;
+ clock-names = "aclk", "iface";
+ #iommu-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/backlight/richtek,rt4831-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/richtek,rt4831-backlight.yaml
new file mode 100644
index 000000000000..e0ac68694b63
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/backlight/richtek,rt4831-backlight.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/backlight/richtek,rt4831-backlight.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT4831 Backlight
+
+maintainers:
+ - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+ RT4831 is a mutifunctional device that can provide power to the LCD display
+ and LCD backlight.
+
+ For the LCD backlight, it can provide four channel WLED driving capability.
+ Each channel driving current is up to 30mA
+
+ Datasheet is available at
+ https://www.richtek.com/assets/product_file/RT4831A/DS4831A-05.pdf
+
+allOf:
+ - $ref: common.yaml#
+
+properties:
+ compatible:
+ const: richtek,rt4831-backlight
+
+ default-brightness:
+ minimum: 0
+ maximum: 2048
+
+ max-brightness:
+ minimum: 0
+ maximum: 2048
+
+ richtek,pwm-enable:
+ description: |
+ Specify the backlight dimming following by PWM duty or by SW control.
+ type: boolean
+
+ richtek,bled-ovp-sel:
+ description: |
+ Backlight OVP level selection, currently support 17V/21V/25V/29V.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ default: 1
+ minimum: 0
+ maximum: 3
+
+ richtek,channel-use:
+ description: |
+ Backlight LED channel to be used.
+ BIT 0/1/2/3 is used to indicate led channel 1/2/3/4 enable or disable.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 15
+
+required:
+ - compatible
+ - richtek,channel-use
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/mailbox/arm,mhu.yaml b/Documentation/devicetree/bindings/mailbox/arm,mhu.yaml
index d07eb00b97c8..bd49c201477d 100644
--- a/Documentation/devicetree/bindings/mailbox/arm,mhu.yaml
+++ b/Documentation/devicetree/bindings/mailbox/arm,mhu.yaml
@@ -101,11 +101,19 @@ examples:
clocks = <&clock 0 2 1>;
clock-names = "apb_pclk";
};
+ };
- mhu_client_scb: scb@2e000000 {
- compatible = "fujitsu,mb86s70-scb-1.0";
- reg = <0 0x2e000000 0 0x4000>;
+ firmware {
+ scpi {
+ compatible = "arm,scpi";
mboxes = <&mhuA 1>; /* HP-NonSecure */
+ shmem = <&cpu_scp_hpri>; /* HP-NonSecure */
+
+ scpi_devpd: power-controller {
+ compatible = "arm,scpi-power-domains";
+ num-domains = <2>;
+ #power-domain-cells = <1>;
+ };
};
};
@@ -125,10 +133,36 @@ examples:
clocks = <&clock 0 2 1>;
clock-names = "apb_pclk";
};
+ };
- mhu_client_scpi: scpi@2f000000 {
- compatible = "arm,scpi";
- reg = <0 0x2f000000 0 0x200>;
- mboxes = <&mhuB 1 4>; /* HP-NonSecure, 5th doorbell */
+ firmware {
+ scmi {
+ compatible = "arm,scmi";
+ mboxes = <&mhuB 0 0>, /* LP-NonSecure, 1st doorbell */
+ <&mhuB 0 1>; /* LP-NonSecure, 2nd doorbell */
+ mbox-names = "tx", "rx";
+ shmem = <&cpu_scp_lpri0>,
+ <&cpu_scp_lpri1>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_devpd: protocol@11 {
+ reg = <0x11>;
+ #power-domain-cells = <1>;
+ };
+
+ scmi_dvfs: protocol@13 {
+ reg = <0x13>;
+ #clock-cells = <1>;
+
+ mboxes = <&mhuB 1 2>, /* HP-NonSecure, 3rd doorbell */
+ <&mhuB 1 3>; /* HP-NonSecure, 4th doorbell */
+ mbox-names = "tx", "rx";
+ shmem = <&cpu_scp_hpri0>,
+ <&cpu_scp_hpri1>;
+ };
};
};
+
+...
diff --git a/Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml b/Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml
index 6608545ea66f..a4f1fe63659a 100644
--- a/Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml
+++ b/Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml
@@ -192,18 +192,17 @@ examples:
arm,mhuv2-protocols = <1 1>, <1 7>, <0 2>;
};
- mhu_client: scb@2e000000 {
- compatible = "fujitsu,mb86s70-scb-1.0";
- reg = <0 0x2e000000 0 0x4000>;
-
- mboxes =
- //data-transfer protocol with 5 windows, mhu-tx
- <&mhu_tx 2 0>,
- //data-transfer protocol with 7 windows, mhu-tx
- <&mhu_tx 3 0>,
- //doorbell protocol channel 4, doorbell 27, mhu-tx
- <&mhu_tx 4 27>,
- //data-transfer protocol with 1 window, mhu-rx
- <&mhu_rx 0 0>;
+ mhu_client: dsp@596e8000 {
+ compatible = "fsl,imx8qxp-dsp";
+ reg = <0 0x596e8000 0 0x88000>;
+ clocks = <&adma_lpcg 0>, <&adma_lpcg 1>, <&adma_lpcg 2>;
+ clock-names = "ipg", "ocram", "core";
+ power-domains = <&pd 0>, <&pd 1>, <&pd 2>, <&pd 3>;
+ mbox-names = "txdb0", "txdb1", "rxdb0", "rxdb1";
+ mboxes = <&mhu_tx 2 0>, //data-transfer protocol with 5 windows, mhu-tx
+ <&mhu_tx 3 0>, //data-transfer protocol with 7 windows, mhu-tx
+ <&mhu_rx 2 27>, //doorbell protocol channel 2, doorbell 27, mhu-rx
+ <&mhu_rx 0 0>; //data-transfer protocol with 1 window, mhu-rx
+ memory-region = <&dsp_reserved>;
};
};
diff --git a/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt b/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt
deleted file mode 100644
index 12371f5c6cd9..000000000000
--- a/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt
+++ /dev/null
@@ -1,184 +0,0 @@
-OMAP2+ and K3 Mailbox
-=====================
-
-The OMAP mailbox hardware facilitates communication between different processors
-using a queued mailbox interrupt mechanism. The IP block is external to the
-various processor subsystems and is connected on an interconnect bus. The
-communication is achieved through a set of registers for message storage and
-interrupt configuration registers.
-
-Each mailbox IP block/cluster has a certain number of h/w fifo queues and output
-interrupt lines. An output interrupt line is routed to an interrupt controller
-within a processor subsystem, and there can be more than one line going to a
-specific processor's interrupt controller. The interrupt line connections are
-fixed for an instance and are dictated by the IP integration into the SoC
-(excluding the SoCs that have a Interrupt Crossbar IP). Each interrupt line is
-programmable through a set of interrupt configuration registers, and have a rx
-and tx interrupt source per h/w fifo. Communication between different processors
-is achieved through the appropriate programming of the rx and tx interrupt
-sources on the appropriate interrupt lines.
-
-The number of h/w fifo queues and interrupt lines dictate the usable registers.
-All the current OMAP SoCs except for the newest DRA7xx SoC has a single IP
-instance. DRA7xx has multiple instances with different number of h/w fifo queues
-and interrupt lines between different instances. The interrupt lines can also be
-routed to different processor sub-systems on DRA7xx as they are routed through
-the Crossbar, a kind of interrupt router/multiplexer. The K3 AM65x and J721E
-SoCs has each of these instances form a cluster and combine multiple clusters
-into a single IP block present within the Main NavSS. The interrupt lines from
-all these clusters are multiplexed and routed to different processor subsystems
-over a limited number of common interrupt output lines of an Interrupt Router.
-The AM64x SoCS also uses a single IP block comprising of multiple clusters,
-but the number of clusters are smaller, and the interrupt output lines are
-connected directly to various processors.
-
-Mailbox Device Node:
-====================
-A Mailbox device node is used to represent a Mailbox IP instance/cluster within
-a SoC. The sub-mailboxes are represented as child nodes of this parent node.
-
-Required properties:
---------------------
-- compatible: Should be one of the following,
- "ti,omap2-mailbox" for OMAP2420, OMAP2430 SoCs
- "ti,omap3-mailbox" for OMAP3430, OMAP3630 SoCs
- "ti,omap4-mailbox" for OMAP44xx, OMAP54xx, AM33xx,
- AM43xx and DRA7xx SoCs
- "ti,am654-mailbox" for K3 AM65x and J721E SoCs
- "ti,am64-mailbox" for K3 AM64x SoCs
-- reg: Contains the mailbox register address range (base
- address and length)
-- interrupts: Contains the interrupt information for the mailbox
- device. The format is dependent on which interrupt
- controller the Mailbox device uses
-- #mbox-cells: Common mailbox binding property to identify the number
- of cells required for the mailbox specifier. Should be
- 1
-- ti,mbox-num-users: Number of targets (processor devices) that the mailbox
- device can interrupt
-- ti,mbox-num-fifos: Number of h/w fifo queues within the mailbox IP block
-
-SoC-specific Required properties:
----------------------------------
-The following are mandatory properties for the OMAP architecture based SoCs
-only:
-- ti,hwmods: Name of the hwmod associated with the mailbox. This
- should be defined in the mailbox node only if the node
- is not defined as a child node of a corresponding sysc
- interconnect node.
-
-The following are mandatory properties for the K3 AM65x and J721E SoCs only:
-- interrupt-parent: Should contain a phandle to the TI-SCI interrupt
- controller node that is used to dynamically program
- the interrupt routes between the IP and the main GIC
- controllers. See the following binding for additional
- details,
- Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml
-
-Child Nodes:
-============
-A child node is used for representing the actual sub-mailbox device that is
-used for the communication between the host processor and a remote processor.
-Each child node should have a unique node name across all the different
-mailbox device nodes.
-
-Required properties:
---------------------
-- ti,mbox-tx: sub-mailbox descriptor property defining a Tx fifo
-- ti,mbox-rx: sub-mailbox descriptor property defining a Rx fifo
-
-Sub-mailbox Descriptor Data
----------------------------
-Each of the above ti,mbox-tx and ti,mbox-rx properties should have 3 cells of
-data that represent the following:
- Cell #1 (fifo_id) - mailbox fifo id used either for transmitting
- (ti,mbox-tx) or for receiving (ti,mbox-rx)
- Cell #2 (irq_id) - irq identifier index number to use from the parent's
- interrupts data. Should be 0 for most of the cases, a
- positive index value is seen only on mailboxes that have
- multiple interrupt lines connected to the MPU processor.
- Cell #3 (usr_id) - mailbox user id for identifying the interrupt line
- associated with generating a tx/rx fifo interrupt.
-
-Optional Properties:
---------------------
-- ti,mbox-send-noirq: Quirk flag to allow the client user of this sub-mailbox
- to send messages without triggering a Tx ready interrupt,
- and to control the Tx ticker. Should be used only on
- sub-mailboxes used to communicate with WkupM3 remote
- processor on AM33xx/AM43xx SoCs.
-
-Mailbox Users:
-==============
-A device needing to communicate with a target processor device should specify
-them using the common mailbox binding properties, "mboxes" and the optional
-"mbox-names" (please see Documentation/devicetree/bindings/mailbox/mailbox.txt
-for details). Each value of the mboxes property should contain a phandle to the
-mailbox controller device node and an args specifier that will be the phandle to
-the intended sub-mailbox child node to be used for communication. The equivalent
-"mbox-names" property value can be used to give a name to the communication channel
-to be used by the client user.
-
-
-Example:
---------
-
-1. /* OMAP4 */
-mailbox: mailbox@4a0f4000 {
- compatible = "ti,omap4-mailbox";
- reg = <0x4a0f4000 0x200>;
- interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "mailbox";
- #mbox-cells = <1>;
- ti,mbox-num-users = <3>;
- ti,mbox-num-fifos = <8>;
- mbox_ipu: mbox_ipu {
- ti,mbox-tx = <0 0 0>;
- ti,mbox-rx = <1 0 0>;
- };
- mbox_dsp: mbox_dsp {
- ti,mbox-tx = <3 0 0>;
- ti,mbox-rx = <2 0 0>;
- };
-};
-
-dsp {
- ...
- mboxes = <&mailbox &mbox_dsp>;
- ...
-};
-
-2. /* AM33xx */
-mailbox: mailbox@480c8000 {
- compatible = "ti,omap4-mailbox";
- reg = <0x480C8000 0x200>;
- interrupts = <77>;
- ti,hwmods = "mailbox";
- #mbox-cells = <1>;
- ti,mbox-num-users = <4>;
- ti,mbox-num-fifos = <8>;
- mbox_wkupm3: wkup_m3 {
- ti,mbox-tx = <0 0 0>;
- ti,mbox-rx = <0 0 3>;
- };
-};
-
-3. /* AM65x */
-&cbass_main {
- cbass_main_navss: interconnect0 {
- mailbox0_cluster0: mailbox@31f80000 {
- compatible = "ti,am654-mailbox";
- reg = <0x00 0x31f80000 0x00 0x200>;
- #mbox-cells = <1>;
- ti,mbox-num-users = <4>;
- ti,mbox-num-fifos = <16>;
- interrupt-parent = <&intr_main_navss>;
- interrupts = <164 0>;
-
- mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 {
- ti,mbox-tx = <1 0 0>;
- ti,mbox-rx = <0 0 0>;
- };
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml
index 3b7ab61a144f..b15da9ba90b2 100644
--- a/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml
+++ b/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml
@@ -32,7 +32,6 @@ properties:
- description: tx channel free
- description: wakeup source
minItems: 2
- maxItems: 3
interrupt-names:
items:
@@ -40,7 +39,6 @@ properties:
- const: tx
- const: wakeup
minItems: 2
- maxItems: 3
wakeup-source: true
diff --git a/Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml b/Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml
new file mode 100644
index 000000000000..e864d798168d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml
@@ -0,0 +1,308 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/ti,omap-mailbox.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI OMAP2+ and K3 Mailbox devices
+
+maintainers:
+ - Suman Anna <s-anna@ti.com>
+
+description: |
+ The OMAP Mailbox hardware facilitates communication between different
+ processors using a queued mailbox interrupt mechanism. The IP block is
+ external to the various processor subsystems and is connected on an
+ interconnect bus. The communication is achieved through a set of registers
+ for message storage and interrupt configuration registers.
+
+ Each mailbox IP block/cluster has a certain number of h/w fifo queues and
+ output interrupt lines. An output interrupt line is routed to an interrupt
+ controller within a processor subsystem, and there can be more than one line
+ going to a specific processor's interrupt controller. The interrupt line
+ connections are fixed for an instance and are dictated by the IP integration
+ into the SoC (excluding the SoCs that have an Interrupt Crossbar or an
+ Interrupt Router IP). Each interrupt line is programmable through a set of
+ interrupt configuration registers, and have a rx and tx interrupt source per
+ h/w fifo. Communication between different processors is achieved through the
+ appropriate programming of the rx and tx interrupt sources on the appropriate
+ interrupt lines.
+
+ The number of h/w fifo queues and interrupt lines dictate the usable
+ registers. All the current OMAP SoCs except for the newest DRA7xx SoC has a
+ single IP instance. DRA7xx has multiple instances with different number of
+ h/w fifo queues and interrupt lines between different instances. The interrupt
+ lines can also be routed to different processor sub-systems on DRA7xx as they
+ are routed through the Crossbar, a kind of interrupt router/multiplexer. The
+ K3 AM65x, J721E and J7200 SoCs has each of these instances form a cluster and
+ combine multiple clusters into a single IP block present within the Main
+ NavSS. The interrupt lines from all these clusters are multiplexed and routed
+ to different processor subsystems over a limited number of common interrupt
+ output lines of an Interrupt Router. The AM64x SoCS also uses a single IP
+ block comprising of multiple clusters, but the number of clusters are
+ smaller, and the interrupt output lines are connected directly to various
+ processors.
+
+ Mailbox Controller Nodes
+ =========================
+ A Mailbox device node is used to represent a Mailbox IP instance/cluster
+ within a SoC. The sub-mailboxes (actual communication channels) are
+ represented as child nodes of this parent node.
+
+ Mailbox Users
+ ==============
+ A device needing to communicate with a target processor device should specify
+ them using the common mailbox binding properties, "mboxes" and the optional
+ "mbox-names" (please see Documentation/devicetree/bindings/mailbox/mailbox.txt
+ for details). Each value of the mboxes property should contain a phandle to
+ the mailbox controller device node and an args specifier that will be the
+ phandle to the intended sub-mailbox child node to be used for communication.
+ The equivalent "mbox-names" property value can be used to give a name to the
+ communication channel to be used by the client user.
+
+$defs:
+ omap-mbox-descriptor:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ The omap-mbox-descriptor is made of up of 3 cells and represents a single
+ uni-directional communication channel. A typical sub-mailbox device uses
+ two such channels - one for transmitting (Tx) and one for receiving (Rx).
+ items:
+ - description:
+ mailbox fifo id used either for transmitting on ti,mbox-tx channel or
+ for receiving on ti,mbox-rx channel (fifo_id). This is the hardware
+ fifo number within a mailbox cluster.
+ - description:
+ irq identifier index number to use from the parent's interrupts data.
+ Should be 0 for most of the cases, a positive index value is seen only
+ on mailboxes that have multiple interrupt lines connected to the MPU
+ processor (irq_id). This is an index number in the listed interrupts
+ property in the DT nodes.
+ - description:
+ mailbox user id for identifying the interrupt line associated with
+ generating a tx/rx fifo interrupt (usr_id). This is the hardware
+ user id number within a mailbox cluster.
+
+ omap-sub-mailbox:
+ type: object
+ description:
+ The omap-sub-mailbox is a child node within a Mailbox controller device
+ node and represents the actual communication channel used to send and
+ receive messages between the host processor and a remote processor. Each
+ child node should have a unique node name across all the different mailbox
+ device nodes.
+
+ properties:
+ ti,mbox-tx:
+ $ref: "#/$defs/omap-mbox-descriptor"
+ description: sub-mailbox descriptor property defining a Tx fifo.
+
+ ti,mbox-rx:
+ $ref: "#/$defs/omap-mbox-descriptor"
+ description: sub-mailbox descriptor property defining a Rx fifo.
+
+ ti,mbox-send-noirq:
+ type: boolean
+ description:
+ Quirk flag to allow the client user of this sub-mailbox to send
+ messages without triggering a Tx ready interrupt, and to control
+ the Tx ticker. Should be used only on sub-mailboxes used to
+ communicate with WkupM3 remote processor on AM33xx/AM43xx SoCs.
+
+ required:
+ - ti,mbox-tx
+ - ti,mbox-rx
+
+properties:
+ compatible:
+ enum:
+ - ti,omap2-mailbox # for OMAP2420, OMAP2430 SoCs
+ - ti,omap3-mailbox # for OMAP3430, OMAP3630 SoCs
+ - ti,omap4-mailbox # for OMAP44xx, OMAP54xx, AM33xx, AM43xx and DRA7xx SoCs
+ - ti,am654-mailbox # for K3 AM65x, J721E and J7200 SoCs
+ - ti,am64-mailbox # for K3 AM64x SoCs
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Contains the interrupt information for the mailbox device. The format is
+ dependent on which interrupt controller the Mailbox device uses. The
+ number of interrupts listed will at most be the value specified in
+ ti,mbox-num-users property, but is usually limited by the number of
+ interrupts reaching the main processor. An interrupt-parent property
+ is required on SoCs where the interrupt lines are connected through a
+ Interrupt Router before reaching the main processor's GIC.
+
+ "#mbox-cells":
+ const: 1
+ description:
+ The specifier is a phandle to an omap-sub-mailbox device.
+
+ ti,mbox-num-users:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Number of targets (processor devices) that the mailbox device can
+ interrupt.
+
+ ti,mbox-num-fifos:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Number of h/w fifo queues within the mailbox IP block.
+
+ ti,hwmods:
+ $ref: /schemas/types.yaml#/definitions/string
+ deprecated: true
+ description:
+ Name of the hwmod associated with the mailbox. This should be defined
+ in the mailbox node only if the node is not defined as a child node of
+ a corresponding sysc interconnect node.
+
+ This property is only needed on some legacy OMAP SoCs which have not
+ yet been converted to the ti,sysc interconnect hierarachy, but is
+ otherwise considered obsolete.
+
+patternProperties:
+ "^mbox-[a-z0-9-]+$":
+ $ref: "#/$defs/omap-sub-mailbox"
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#mbox-cells"
+ - ti,mbox-num-users
+ - ti,mbox-num-fifos
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - ti,am654-mailbox
+ then:
+ required:
+ - interrupt-parent
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - ti,am654-mailbox
+ - ti,am64-mailbox
+ then:
+ properties:
+ ti,mbox-num-users:
+ const: 4
+ ti,mbox-num-fifos:
+ const: 16
+ interrupts:
+ minItems: 1
+ maxItems: 4
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - ti,omap4-mailbox
+ then:
+ properties:
+ ti,mbox-num-users:
+ enum: [3, 4]
+ ti,mbox-num-fifos:
+ enum: [8, 12]
+ interrupts:
+ minItems: 1
+ maxItems: 4
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - ti,omap3-mailbox
+ then:
+ properties:
+ ti,mbox-num-users:
+ const: 2
+ ti,mbox-num-fifos:
+ const: 2
+ interrupts:
+ minItems: 1
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - ti,omap2-mailbox
+ then:
+ properties:
+ ti,mbox-num-users:
+ const: 4
+ ti,mbox-num-fifos:
+ const: 6
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+additionalProperties: false
+
+examples:
+ - |
+ /* OMAP4 */
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ mailbox: mailbox@4a0f4000 {
+ compatible = "ti,omap4-mailbox";
+ reg = <0x4a0f4000 0x200>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <3>;
+ ti,mbox-num-fifos = <8>;
+
+ mbox_ipu: mbox-ipu {
+ ti,mbox-tx = <0 0 0>;
+ ti,mbox-rx = <1 0 0>;
+ };
+ mbox_dsp: mbox-dsp {
+ ti,mbox-tx = <3 0 0>;
+ ti,mbox-rx = <2 0 0>;
+ };
+ };
+
+ dsp {
+ mboxes = <&mailbox &mbox_dsp>;
+ };
+
+ - |
+ /* AM33xx */
+ mailbox1: mailbox@480c8000 {
+ compatible = "ti,omap4-mailbox";
+ reg = <0x480c8000 0x200>;
+ interrupts = <77>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <8>;
+
+ mbox_wkupm3: mbox-wkup-m3 {
+ ti,mbox-tx = <0 0 0>;
+ ti,mbox-rx = <0 0 3>;
+ ti,mbox-send-noirq;
+ };
+ };
+
+ - |
+ /* AM65x */
+ mailbox0_cluster0: mailbox@31f80000 {
+ compatible = "ti,am654-mailbox";
+ reg = <0x31f80000 0x200>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <16>;
+ interrupt-parent = <&intr_main_navss>;
+ interrupts = <436>;
+
+ mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 {
+ ti,mbox-tx = <1 0 0>;
+ ti,mbox-rx = <0 0 0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
index b902495d278b..5044c4bb94e0 100644
--- a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
+++ b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
@@ -67,7 +67,6 @@ properties:
clock-names:
minItems: 4
- maxItems: 5
items:
- const: dos_parser
- const: dos
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7180.yaml b/Documentation/devicetree/bindings/media/i2c/adv7180.yaml
index bcfd93739b4f..3ce4af143a3a 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv7180.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/adv7180.yaml
@@ -36,7 +36,13 @@ properties:
maxItems: 1
port:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
ports: true
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7604.yaml b/Documentation/devicetree/bindings/media/i2c/adv7604.yaml
index df634b0c1f8c..de15cebe2955 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv7604.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/adv7604.yaml
@@ -30,7 +30,6 @@ properties:
reg-names:
minItems: 1
- maxItems: 13
items:
- const: main
- enum: [ avlink, cec, infoframe, esdp, dpp, afe, rep, edid, hdmi, test, cp, vdp ]
diff --git a/Documentation/devicetree/bindings/media/i2c/imx258.yaml b/Documentation/devicetree/bindings/media/i2c/imx258.yaml
index 515317eff41a..cde0f7383b2a 100644
--- a/Documentation/devicetree/bindings/media/i2c/imx258.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/imx258.yaml
@@ -49,7 +49,7 @@ properties:
# See ../video-interfaces.txt for more details
port:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
additionalProperties: false
properties:
diff --git a/Documentation/devicetree/bindings/media/i2c/maxim,max9286.yaml b/Documentation/devicetree/bindings/media/i2c/maxim,max9286.yaml
index ee16102fdfe7..02f656e78700 100644
--- a/Documentation/devicetree/bindings/media/i2c/maxim,max9286.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/maxim,max9286.yaml
@@ -111,17 +111,10 @@ properties:
i2c-mux:
type: object
+ $ref: /schemas/i2c/i2c-mux.yaml#
+ unevaluatedProperties: false
description: |
- Each GMSL link is modelled as a child bus of an i2c bus
- multiplexer/switch, in accordance with bindings described in
- Documentation/devicetree/bindings/i2c/i2c-mux.txt.
-
- properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
+ Each GMSL link is modelled as a child bus of an i2c bus multiplexer/switch.
patternProperties:
"^i2c@[0-3]$":
@@ -133,12 +126,6 @@ properties:
channels.
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
reg:
description: The index of the GMSL channel.
maxItems: 1
@@ -173,10 +160,6 @@ properties:
additionalProperties: false
- additionalProperties: false
-
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml
index 9149f5685688..246dc5fec716 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml
@@ -45,7 +45,7 @@ properties:
port:
description: MIPI CSI-2 transmitter port
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
additionalProperties: false
properties:
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml
index 0699c7e4fdeb..b962863e4f65 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml
@@ -45,7 +45,7 @@ properties:
port:
description: MIPI CSI-2 transmitter port
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
additionalProperties: false
properties:
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
index 27cc5b7ff613..f5055b9db693 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
@@ -37,7 +37,7 @@ properties:
port:
additionalProperties: false
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
properties:
endpoint:
diff --git a/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml b/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml
index c14c7d827b00..b39b84c5f012 100644
--- a/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml
+++ b/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml
@@ -43,7 +43,6 @@ properties:
clocks:
minItems: 1
- maxItems: 3
items:
- description: AXI bus interface clock
- description: Peripheral clock
diff --git a/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml b/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml
index 04013e5dd044..90b4af2c9724 100644
--- a/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml
@@ -30,7 +30,6 @@ properties:
power-domain-names:
minItems: 2
- maxItems: 3
items:
- const: venus
- const: vcodec0
diff --git a/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml b/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
index 04b9af4db191..177bf81544b1 100644
--- a/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
@@ -30,7 +30,6 @@ properties:
power-domain-names:
minItems: 3
- maxItems: 4
items:
- const: venus
- const: vcodec0
diff --git a/Documentation/devicetree/bindings/media/qcom,sm8250-venus.yaml b/Documentation/devicetree/bindings/media/qcom,sm8250-venus.yaml
index 7b81bd7f2399..ebf8f3d866a5 100644
--- a/Documentation/devicetree/bindings/media/qcom,sm8250-venus.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sm8250-venus.yaml
@@ -30,7 +30,6 @@ properties:
power-domain-names:
minItems: 2
- maxItems: 3
items:
- const: venus
- const: vcodec0
diff --git a/Documentation/devicetree/bindings/media/renesas,drif.yaml b/Documentation/devicetree/bindings/media/renesas,drif.yaml
index 9cd56ff2c316..817a6d566738 100644
--- a/Documentation/devicetree/bindings/media/renesas,drif.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,drif.yaml
@@ -78,7 +78,6 @@ properties:
dma-names:
minItems: 1
- maxItems: 2
items:
- const: rx
- const: rx
diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
index 39bb6db2fb32..c0442e79cbb4 100644
--- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
@@ -120,7 +120,8 @@ properties:
properties:
port@0:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description:
Input port node, single endpoint describing a parallel input source.
diff --git a/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml b/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml
new file mode 100644
index 000000000000..01c9acf9275d
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml
@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/arm,pl353-smc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM PL353 Static Memory Controller (SMC) device-tree bindings
+
+maintainers:
+ - Miquel Raynal <miquel.raynal@bootlin.com>
+ - Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+
+description:
+ The PL353 Static Memory Controller is a bus where you can connect two kinds
+ of memory interfaces, which are NAND and memory mapped interfaces (such as
+ SRAM or NOR).
+
+# We need a select here so we don't match all nodes with 'arm,primecell'
+select:
+ properties:
+ compatible:
+ contains:
+ const: arm,pl353-smc-r2p1
+ required:
+ - compatible
+
+properties:
+ $nodename:
+ pattern: "^memory-controller@[0-9a-f]+$"
+
+ compatible:
+ items:
+ - const: arm,pl353-smc-r2p1
+ - const: arm,primecell
+
+ "#address-cells":
+ const: 2
+
+ "#size-cells":
+ const: 1
+
+ reg:
+ items:
+ - description:
+ Configuration registers for the host and sub-controllers.
+ The three chip select regions are defined in 'ranges'.
+
+ clocks:
+ items:
+ - description: clock for the memory device bus
+ - description: main clock of the SMC
+
+ clock-names:
+ items:
+ - const: memclk
+ - const: apb_pclk
+
+ ranges:
+ minItems: 1
+ description: |
+ Memory bus areas for interacting with the devices. Reflects
+ the memory layout with four integer values following:
+ <cs-number> 0 <offset> <size>
+ items:
+ - description: NAND bank 0
+ - description: NOR/SRAM bank 0
+ - description: NOR/SRAM bank 1
+
+ interrupts: true
+
+patternProperties:
+ "@[0-3],[a-f0-9]+$":
+ type: object
+ description: |
+ The child device node represents the controller connected to the SMC
+ bus. The controller can be a NAND controller or a pair of any memory
+ mapped controllers such as NOR and SRAM controllers.
+
+ properties:
+ compatible:
+ description:
+ Compatible of memory controller.
+
+ reg:
+ items:
+ - items:
+ - description: |
+ Chip-select ID, as in the parent range property.
+ minimum: 0
+ maximum: 2
+ - description: |
+ Offset of the memory region requested by the device.
+ - description: |
+ Length of the memory region requested by the device.
+
+ required:
+ - compatible
+ - reg
+
+required:
+ - compatible
+ - reg
+ - clock-names
+ - clocks
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ smcc: memory-controller@e000e000 {
+ compatible = "arm,pl353-smc-r2p1", "arm,primecell";
+ reg = <0xe000e000 0x0001000>;
+ clock-names = "memclk", "apb_pclk";
+ clocks = <&clkc 11>, <&clkc 44>;
+ ranges = <0x0 0x0 0xe1000000 0x1000000 /* Nand CS region */
+ 0x1 0x0 0xe2000000 0x2000000 /* SRAM/NOR CS0 region */
+ 0x2 0x0 0xe4000000 0x2000000>; /* SRAM/NOR CS1 region */
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ nfc0: nand-controller@0,0 {
+ compatible = "arm,pl353-nand-r2p1";
+ reg = <0 0 0x1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
index a08a32340987..e87e4382807c 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
@@ -53,14 +53,12 @@ properties:
apb and smi are mandatory. the async is only for generation 1 smi HW.
gals(global async local sync) also is optional, see below.
minItems: 2
- maxItems: 4
items:
- description: apb is Advanced Peripheral Bus clock, It's the clock for
setting the register.
- description: smi is the clock for transfer data and command.
- - description: async is asynchronous clock, it help transform the smi
- clock into the emi clock domain.
- - description: gals0 is the path0 clock of gals.
+ - description: Either asynchronous clock to help transform the smi clock
+ into the emi clock domain on Gen1 h/w, or the path0 clock of gals.
- description: gals1 is the path1 clock of gals.
clock-names:
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
index 7ed7839ff0a7..2353f6cf3c80 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
@@ -37,7 +37,6 @@ properties:
description: |
apb and smi are mandatory. gals(global async local sync) is optional.
minItems: 2
- maxItems: 3
items:
- description: apb is Advanced Peripheral Bus clock, It's the clock for
setting the register.
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.txt b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.txt
deleted file mode 100644
index d2250498c36d..000000000000
--- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.txt
+++ /dev/null
@@ -1,130 +0,0 @@
-Embedded Memory Controller
-
-Properties:
-- name : Should be emc
-- #address-cells : Should be 1
-- #size-cells : Should be 0
-- compatible : Should contain "nvidia,tegra20-emc".
-- reg : Offset and length of the register set for the device
-- nvidia,use-ram-code : If present, the sub-nodes will be addressed
- and chosen using the ramcode board selector. If omitted, only one
- set of tables can be present and said tables will be used
- irrespective of ram-code configuration.
-- interrupts : Should contain EMC General interrupt.
-- clocks : Should contain EMC clock.
-- nvidia,memory-controller : Phandle of the Memory Controller node.
-- #interconnect-cells : Should be 0.
-- operating-points-v2: See ../bindings/opp/opp.txt for details.
-
-For each opp entry in 'operating-points-v2' table:
-- opp-supported-hw: One bitfield indicating SoC process ID mask
-
- A bitwise AND is performed against this value and if any bit
- matches, the OPP gets enabled.
-
-Optional properties:
-- power-domains: Phandle of the SoC "core" power domain.
-
-Child device nodes describe the memory settings for different configurations and clock rates.
-
-Example:
-
- opp_table: opp-table {
- compatible = "operating-points-v2";
-
- opp@36000000 {
- opp-microvolt = <950000 950000 1300000>;
- opp-hz = /bits/ 64 <36000000>;
- };
- ...
- };
-
- memory-controller@7000f400 {
- #address-cells = < 1 >;
- #size-cells = < 0 >;
- #interconnect-cells = <0>;
- compatible = "nvidia,tegra20-emc";
- reg = <0x7000f400 0x400>;
- interrupts = <0 78 0x04>;
- clocks = <&tegra_car TEGRA20_CLK_EMC>;
- nvidia,memory-controller = <&mc>;
- power-domains = <&domain>;
- operating-points-v2 = <&opp_table>;
- }
-
-
-Embedded Memory Controller ram-code table
-
-If the emc node has the nvidia,use-ram-code property present, then the
-next level of nodes below the emc table are used to specify which settings
-apply for which ram-code settings.
-
-If the emc node lacks the nvidia,use-ram-code property, this level is omitted
-and the tables are stored directly under the emc node (see below).
-
-Properties:
-
-- name : Should be emc-tables
-- nvidia,ram-code : the binary representation of the ram-code board strappings
- for which this node (and children) are valid.
-
-
-
-Embedded Memory Controller configuration table
-
-This is a table containing the EMC register settings for the various
-operating speeds of the memory controller. They are always located as
-subnodes of the emc controller node.
-
-There are two ways of specifying which tables to use:
-
-* The simplest is if there is just one set of tables in the device tree,
- and they will always be used (based on which frequency is used).
- This is the preferred method, especially when firmware can fill in
- this information based on the specific system information and just
- pass it on to the kernel.
-
-* The slightly more complex one is when more than one memory configuration
- might exist on the system. The Tegra20 platform handles this during
- early boot by selecting one out of possible 4 memory settings based
- on a 2-pin "ram code" bootstrap setting on the board. The values of
- these strappings can be read through a register in the SoC, and thus
- used to select which tables to use.
-
-Properties:
-- name : Should be emc-table
-- compatible : Should contain "nvidia,tegra20-emc-table".
-- reg : either an opaque enumerator to tell different tables apart, or
- the valid frequency for which the table should be used (in kHz).
-- clock-frequency : the clock frequency for the EMC at which this
- table should be used (in kHz).
-- nvidia,emc-registers : a 46 word array of EMC registers to be programmed
- for operation at the 'clock-frequency' setting.
- The order and contents of the registers are:
- RC, RFC, RAS, RP, R2W, W2R, R2P, W2P, RD_RCD, WR_RCD, RRD, REXT,
- WDV, QUSE, QRST, QSAFE, RDV, REFRESH, BURST_REFRESH_NUM, PDEX2WR,
- PDEX2RD, PCHG2PDEN, ACT2PDEN, AR2PDEN, RW2PDEN, TXSR, TCKE, TFAW,
- TRPAB, TCLKSTABLE, TCLKSTOP, TREFBW, QUSE_EXTRA, FBIO_CFG6, ODT_WRITE,
- ODT_READ, FBIO_CFG5, CFG_DIG_DLL, DLL_XFORM_DQS, DLL_XFORM_QUSE,
- ZCAL_REF_CNT, ZCAL_WAIT_CNT, AUTO_CAL_INTERVAL, CFG_CLKTRIM_0,
- CFG_CLKTRIM_1, CFG_CLKTRIM_2
-
- emc-table@166000 {
- reg = <166000>;
- compatible = "nvidia,tegra20-emc-table";
- clock-frequency = < 166000 >;
- nvidia,emc-registers = < 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- 0 0 0 0 >;
- };
-
- emc-table@333000 {
- reg = <333000>;
- compatible = "nvidia,tegra20-emc-table";
- clock-frequency = < 333000 >;
- nvidia,emc-registers = < 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- 0 0 0 0 >;
- };
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.yaml
new file mode 100644
index 000000000000..cac6842dc8f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-emc.yaml
@@ -0,0 +1,230 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/nvidia,tegra20-emc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra20 SoC External Memory Controller
+
+maintainers:
+ - Dmitry Osipenko <digetx@gmail.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+description: |
+ The External Memory Controller (EMC) interfaces with the off-chip SDRAM to
+ service the request stream sent from Memory Controller. The EMC also has
+ various performance-affecting settings beyond the obvious SDRAM configuration
+ parameters and initialization settings. Tegra20 EMC supports multiple JEDEC
+ standard protocols: DDR1, LPDDR2 and DDR2.
+
+properties:
+ compatible:
+ const: nvidia,tegra20-emc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ "#interconnect-cells":
+ const: 0
+
+ nvidia,memory-controller:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle of the Memory Controller node.
+
+ power-domains:
+ maxItems: 1
+ description:
+ Phandle of the SoC "core" power domain.
+
+ operating-points-v2:
+ description:
+ Should contain freqs and voltages and opp-supported-hw property, which
+ is a bitfield indicating SoC process ID mask.
+
+ nvidia,use-ram-code:
+ type: boolean
+ description:
+ If present, the emc-tables@ sub-nodes will be addressed.
+
+$defs:
+ emc-table:
+ type: object
+ properties:
+ compatible:
+ const: nvidia,tegra20-emc-table
+
+ clock-frequency:
+ description:
+ Memory clock rate in kHz.
+ minimum: 1000
+ maximum: 900000
+
+ reg:
+ maxItems: 1
+ description:
+ Either an opaque enumerator to tell different tables apart, or
+ the valid frequency for which the table should be used (in kHz).
+
+ nvidia,emc-registers:
+ description:
+ EMC timing characterization data. These are the registers
+ (see section "15.4.1 EMC Registers" in the TRM) whose values
+ need to be specified, according to the board documentation.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description: EMC_RC
+ - description: EMC_RFC
+ - description: EMC_RAS
+ - description: EMC_RP
+ - description: EMC_R2W
+ - description: EMC_W2R
+ - description: EMC_R2P
+ - description: EMC_W2P
+ - description: EMC_RD_RCD
+ - description: EMC_WR_RCD
+ - description: EMC_RRD
+ - description: EMC_REXT
+ - description: EMC_WDV
+ - description: EMC_QUSE
+ - description: EMC_QRST
+ - description: EMC_QSAFE
+ - description: EMC_RDV
+ - description: EMC_REFRESH
+ - description: EMC_BURST_REFRESH_NUM
+ - description: EMC_PDEX2WR
+ - description: EMC_PDEX2RD
+ - description: EMC_PCHG2PDEN
+ - description: EMC_ACT2PDEN
+ - description: EMC_AR2PDEN
+ - description: EMC_RW2PDEN
+ - description: EMC_TXSR
+ - description: EMC_TCKE
+ - description: EMC_TFAW
+ - description: EMC_TRPAB
+ - description: EMC_TCLKSTABLE
+ - description: EMC_TCLKSTOP
+ - description: EMC_TREFBW
+ - description: EMC_QUSE_EXTRA
+ - description: EMC_FBIO_CFG6
+ - description: EMC_ODT_WRITE
+ - description: EMC_ODT_READ
+ - description: EMC_FBIO_CFG5
+ - description: EMC_CFG_DIG_DLL
+ - description: EMC_DLL_XFORM_DQS
+ - description: EMC_DLL_XFORM_QUSE
+ - description: EMC_ZCAL_REF_CNT
+ - description: EMC_ZCAL_WAIT_CNT
+ - description: EMC_AUTO_CAL_INTERVAL
+ - description: EMC_CFG_CLKTRIM_0
+ - description: EMC_CFG_CLKTRIM_1
+ - description: EMC_CFG_CLKTRIM_2
+
+ required:
+ - clock-frequency
+ - compatible
+ - reg
+ - nvidia,emc-registers
+
+ additionalProperties: false
+
+patternProperties:
+ "^emc-table@[0-9]+$":
+ $ref: "#/$defs/emc-table"
+
+ "^emc-tables@[a-z0-9-]+$":
+ type: object
+ properties:
+ reg:
+ maxItems: 1
+ description:
+ An opaque enumerator to tell different tables apart.
+
+ nvidia,ram-code:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Value of RAM_CODE this timing set is used for.
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ patternProperties:
+ "^emc-table@[0-9]+$":
+ $ref: "#/$defs/emc-table"
+
+ required:
+ - nvidia,ram-code
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - nvidia,memory-controller
+ - "#interconnect-cells"
+ - operating-points-v2
+
+additionalProperties: false
+
+examples:
+ - |
+ external-memory-controller@7000f400 {
+ compatible = "nvidia,tegra20-emc";
+ reg = <0x7000f400 0x400>;
+ interrupts = <0 78 4>;
+ clocks = <&clock_controller 57>;
+
+ nvidia,memory-controller = <&mc>;
+ operating-points-v2 = <&dvfs_opp_table>;
+ power-domains = <&domain>;
+
+ #interconnect-cells = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nvidia,use-ram-code;
+
+ emc-tables@0 {
+ nvidia,ram-code = <0>;
+ reg = <0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ emc-table@333000 {
+ reg = <333000>;
+ compatible = "nvidia,tegra20-emc-table";
+ clock-frequency = <333000>;
+ nvidia,emc-registers = <0x00000018 0x00000033
+ 0x00000012 0x00000004 0x00000004 0x00000005
+ 0x00000003 0x0000000c 0x00000006 0x00000006
+ 0x00000003 0x00000001 0x00000004 0x00000005
+ 0x00000004 0x00000009 0x0000000d 0x00000bff
+ 0x00000000 0x00000003 0x00000003 0x00000006
+ 0x00000006 0x00000001 0x00000011 0x000000c8
+ 0x00000003 0x0000000e 0x00000007 0x00000008
+ 0x00000002 0x00000000 0x00000000 0x00000002
+ 0x00000000 0x00000000 0x00000083 0xf0440303
+ 0x007fe010 0x00001414 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/memory-controllers/pl353-smc.txt b/Documentation/devicetree/bindings/memory-controllers/pl353-smc.txt
deleted file mode 100644
index d56615fd343a..000000000000
--- a/Documentation/devicetree/bindings/memory-controllers/pl353-smc.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-Device tree bindings for ARM PL353 static memory controller
-
-PL353 static memory controller supports two kinds of memory
-interfaces.i.e NAND and SRAM/NOR interfaces.
-The actual devices are instantiated from the child nodes of pl353 smc node.
-
-Required properties:
-- compatible : Should be "arm,pl353-smc-r2p1", "arm,primecell".
-- reg : Controller registers map and length.
-- clock-names : List of input clock names - "memclk", "apb_pclk"
- (See clock bindings for details).
-- clocks : Clock phandles (see clock bindings for details).
-- address-cells : Must be 2.
-- size-cells : Must be 1.
-
-Child nodes:
- For NAND the "arm,pl353-nand-r2p1" and for NOR the "cfi-flash" drivers are
-supported as child nodes.
-
-for NAND partition information please refer the below file
-Documentation/devicetree/bindings/mtd/partition.txt
-
-Example:
- smcc: memory-controller@e000e000
- compatible = "arm,pl353-smc-r2p1", "arm,primecell";
- clock-names = "memclk", "apb_pclk";
- clocks = <&clkc 11>, <&clkc 44>;
- reg = <0xe000e000 0x1000>;
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0x0 0x0 0xe1000000 0x1000000 //Nand CS Region
- 0x1 0x0 0xe2000000 0x2000000 //SRAM/NOR CS Region
- 0x2 0x0 0xe4000000 0x2000000>; //SRAM/NOR CS Region
- nand_0: flash@e1000000 {
- compatible = "arm,pl353-nand-r2p1"
- reg = <0 0 0x1000000>;
- (...)
- };
- nor0: flash@e2000000 {
- compatible = "cfi-flash";
- reg = <1 0 0x2000000>;
- };
- nor1: flash@e4000000 {
- compatible = "cfi-flash";
- reg = <2 0 0x2000000>;
- };
- };
diff --git a/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml b/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
index 4dfa70a013ae..d793dd0316b7 100644
--- a/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
+++ b/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
@@ -117,6 +117,22 @@ properties:
- "#address-cells"
- "#size-cells"
+ cbas:
+ type: object
+
+ description:
+ This device is used to signal when a detachable base is attached
+ to a Chrome OS tablet. This device cannot be detected at runtime.
+
+ properties:
+ compatible:
+ const: google,cros-cbas
+
+ required:
+ - compatible
+
+ additionalProperties: false
+
patternProperties:
"^i2c-tunnel[0-9]*$":
type: object
@@ -187,6 +203,10 @@ examples:
proximity {
compatible = "google,cros-ec-mkbp-proximity";
};
+
+ cbas {
+ compatible = "google,cros-cbas";
+ };
};
};
diff --git a/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt b/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
index b52e7a33f0f9..190230216de8 100644
--- a/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
+++ b/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
@@ -16,8 +16,8 @@ Optional subnodes:
The sub-functions of CPCAP get their own node with their own compatible values,
which are described in the following files:
-- Documentation/devicetree/bindings/power/supply/cpcap-battery.txt
-- Documentation/devicetree/bindings/power/supply/cpcap-charger.txt
+- Documentation/devicetree/bindings/power/supply/cpcap-battery.yaml
+- Documentation/devicetree/bindings/power/supply/cpcap-charger.yaml
- Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
- Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt
- Documentation/devicetree/bindings/input/cpcap-pwrbutton.txt
diff --git a/Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml b/Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml
new file mode 100644
index 000000000000..779936850ee0
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml
@@ -0,0 +1,121 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/qcom,pm8008.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. PM8008 PMIC bindings
+
+maintainers:
+ - Guru Das Srinagesh <gurus@codeaurora.org>
+
+description: |
+ Qualcomm Technologies, Inc. PM8008 is a dedicated camera PMIC that integrates
+ all the necessary power management, housekeeping, and interface support
+ functions into a single IC.
+
+properties:
+ compatible:
+ const: qcom,pm8008
+
+ reg:
+ description:
+ I2C slave address.
+
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ description: Parent interrupt.
+
+ "#interrupt-cells":
+ const: 2
+
+ description: |
+ The first cell is the IRQ number, the second cell is the IRQ trigger
+ flag. All interrupts are listed in include/dt-bindings/mfd/qcom-pm8008.h.
+
+ interrupt-controller: true
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^gpio@[0-9a-f]+$":
+ type: object
+
+ description: |
+ The GPIO peripheral. This node may be specified twice, one for each GPIO.
+
+ properties:
+ compatible:
+ const: qcom,pm8008-gpio
+
+ reg:
+ description: Peripheral address of one of the two GPIO peripherals.
+ maxItems: 1
+
+ gpio-controller: true
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ "#gpio-cells":
+ const: 2
+
+ required:
+ - compatible
+ - reg
+ - gpio-controller
+ - interrupt-controller
+ - "#gpio-cells"
+ - "#interrupt-cells"
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#address-cells"
+ - "#size-cells"
+ - "#interrupt-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/mfd/qcom-pm8008.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ qupv3_se13_i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pm8008i@8 {
+ compatible = "qcom,pm8008";
+ reg = <0x8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <32 IRQ_TYPE_EDGE_RISING>;
+
+ gpio@c000 {
+ compatible = "qcom,pm8008-gpio";
+ reg = <0xc000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
index 79367a43b27d..5ef79bf3d035 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
+++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
@@ -34,6 +34,10 @@ Required properties:
"qcom,pm8998",
"qcom,pmi8998",
"qcom,pm8005",
+ "qcom,pm8350c",
+ "qcom,pmk8350",
+ "qcom,pm7325",
+ "qcom,pmr735a",
or generalized "qcom,spmi-pmic".
- reg: Specifies the SPMI USID slave address for this device.
For more information see:
diff --git a/Documentation/devicetree/bindings/mfd/richtek,rt4831.yaml b/Documentation/devicetree/bindings/mfd/richtek,rt4831.yaml
new file mode 100644
index 000000000000..4762eb1439ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/richtek,rt4831.yaml
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/richtek,rt4831.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT4831 DSV and Backlight Integrated IC
+
+maintainers:
+ - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+ RT4831 is a multifunctional device that can provide power to the LCD display
+ and LCD backlight.
+
+ For Display Bias Voltage DSVP and DSVN, the output range is about 4V to 6.5V.
+ It's sufficient to meet the current LCD power requirement.
+
+ For the LCD backlight, it can provide four channel WLED driving capability.
+ Each channel driving current is up to 30mA
+
+ Datasheet is available at
+ https://www.richtek.com/assets/product_file/RT4831A/DS4831A-05.pdf
+
+properties:
+ compatible:
+ const: richtek,rt4831
+
+ reg:
+ description: I2C device address.
+ maxItems: 1
+
+ enable-gpios:
+ description: |
+ GPIO to enable/disable the chip. It is optional.
+ Some usage directly tied this pin to follow VIO 1.8V power on sequence.
+ maxItems: 1
+
+ regulators:
+ $ref: ../regulator/richtek,rt4831-regulator.yaml
+
+ backlight:
+ $ref: ../leds/backlight/richtek,rt4831-backlight.yaml
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/rt4831-backlight.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rt4831@11 {
+ compatible = "richtek,rt4831";
+ reg = <0x11>;
+
+ regulators {
+ DSVLCM {
+ regulator-min-microvolt = <4000000>;
+ regulator-max-microvolt = <7150000>;
+ regulator-allow-bypass;
+ };
+ DSVP {
+ regulator-name = "rt4831-dsvp";
+ regulator-min-microvolt = <4000000>;
+ regulator-max-microvolt = <6500000>;
+ regulator-boot-on;
+ };
+ DSVN {
+ regulator-name = "rt4831-dsvn";
+ regulator-min-microvolt = <4000000>;
+ regulator-max-microvolt = <6500000>;
+ regulator-boot-on;
+ };
+ };
+
+ backlight {
+ compatible = "richtek,rt4831-backlight";
+ default-brightness = <1024>;
+ max-brightness = <2048>;
+ richtek,bled-ovp-sel = /bits/ 8 <RT4831_BLOVPLVL_21V>;
+ richtek,channel-use = /bits/ 8 <RT4831_BLED_ALLCHEN>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/rk808.txt b/Documentation/devicetree/bindings/mfd/rk808.txt
index 04df07f6f793..23a17a6663ec 100644
--- a/Documentation/devicetree/bindings/mfd/rk808.txt
+++ b/Documentation/devicetree/bindings/mfd/rk808.txt
@@ -23,6 +23,7 @@ Optional properties:
default output clock name
- rockchip,system-power-controller: Telling whether or not this pmic is controlling
the system power.
+- wakeup-source: Device can be used as a wakeup source.
Optional RK805 properties:
- vcc1-supply: The input supply for DCDC_REG1
@@ -63,8 +64,18 @@ Optional RK809 properties:
- vcc9-supply: The input supply for DCDC_REG5, SWITCH_REG2
Optional RK817 properties:
+- clocks: The input clock for the audio codec
+- clock-names: The clock name for the codec clock. Should be "mclk".
+- #sound-dai-cells: Needed for the interpretation of sound dais. Should be 0.
+
- vcc8-supply: The input supply for BOOST
- vcc9-supply: The input supply for OTG_SWITCH
+- codec: The child node for the codec to hold additional properties.
+ If no additional properties are required for the codec, this
+ node can be omitted.
+
+- rockchip,mic-in-differential: Telling if the microphone uses differential
+ mode. Should be under the codec child node.
Optional RK818 properties:
- vcc1-supply: The input supply for DCDC_REG1
@@ -275,3 +286,180 @@ Example:
};
};
};
+
+ rk817: pmic@20 {
+ compatible = "rockchip,rk817";
+ reg = <0x20>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
+ clock-output-names = "rk808-clkout1", "xin32k";
+ clock-names = "mclk";
+ clocks = <&cru SCLK_I2S1_OUT>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int>, <&i2s1_2ch_mclk>;
+ wakeup-source;
+ #clock-cells = <1>;
+ #sound-dai-cells = <0>;
+
+ vcc1-supply = <&vccsys>;
+ vcc2-supply = <&vccsys>;
+ vcc3-supply = <&vccsys>;
+ vcc4-supply = <&vccsys>;
+ vcc5-supply = <&vccsys>;
+ vcc6-supply = <&vccsys>;
+ vcc7-supply = <&vccsys>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-name = "vdd_logic";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-ramp-delay = <6001>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vdd_arm: DCDC_REG2 {
+ regulator-name = "vdd_arm";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_3v3: DCDC_REG4 {
+ regulator-name = "vcc_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_1v8: LDO_REG2 {
+ regulator-name = "vcc_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_1v0: LDO_REG3 {
+ regulator-name = "vdd_1v0";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG4 {
+ regulator-name = "vcc3v3_pmu";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_sd: LDO_REG6 {
+ regulator-name = "vcc_sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_bl: LDO_REG7 {
+ regulator-name = "vcc_bl";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_lcd: LDO_REG8 {
+ regulator-name = "vcc_lcd";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <2800000>;
+ };
+ };
+
+ vcc_cam: LDO_REG9 {
+ regulator-name = "vcc_cam";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+ };
+
+ rk817_codec: codec {
+ rockchip,mic-in-differential;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml b/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
index 0f16c8864a87..dace35362a7a 100644
--- a/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
@@ -119,8 +119,6 @@ patternProperties:
- compatible
required:
- - "#address-cells"
- - "#size-cells"
- compatible
- reg
- clocks
diff --git a/Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml b/Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml
new file mode 100644
index 000000000000..a0d4bad5dc81
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml
@@ -0,0 +1,278 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/stericsson,db8500-prcmu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ST-Ericsson DB8500 PRCMU - Power Reset and Control Management Unit
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description:
+ The DB8500 Power Reset and Control Management Unit is an XP70 8-bit
+ microprocessor that is embedded in the always-on power domain of the
+ DB8500 SoCs to manage the low power states, powering up and down parts
+ of the silicon, and controlling reset of different IP blocks.
+
+properties:
+ $nodename:
+ pattern: '^prcmu@[0-9a-f]+$'
+
+ compatible:
+ description: The device is compatible both to the device-specific
+ compatible "stericsson,db8500-prcmu" and "syscon". The latter
+ compatible is needed for the device to be exposed as a system
+ controller so that arbitrary registers can be access by
+ different operating system components.
+ items:
+ - const: stericsson,db8500-prcmu
+ - const: syscon
+
+ reg:
+ items:
+ - description: Main PRCMU register area
+ - description: PRCMU TCPM register area
+ - description: PRCMU TCDM register area
+
+ reg-names:
+ items:
+ - const: prcmu
+ - const: prcmu-tcpm
+ - const: prcmu-tcdm
+
+ interrupts:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 1
+
+ ranges: true
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ db8500-prcmu-regulators:
+ description: Node describing the DB8500 regulators. These are mainly
+ power rails inside the silicon but some of those are also routed
+ out to external pins.
+ type: object
+
+ properties:
+ compatible:
+ const: stericsson,db8500-prcmu-regulator
+
+ db8500_vape:
+ description: The voltage for the application processor, the
+ main voltage domain for the chip.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_varm:
+ description: The voltage for the ARM Cortex A-9 CPU.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_vmodem:
+ description: The voltage for the modem subsystem.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_vpll:
+ description: The voltage for the phase locked loop clocks.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_vsmps1:
+ description: Also known as VIO12, is a step-down voltage regulator
+ for 1.2V I/O. SMPS means System Management Power Source.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_vsmps2:
+ description: Also known as VIO18, is a step-down voltage regulator
+ for 1.8V I/O. SMPS means System Management Power Source.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_vsmps3:
+ description: This is a step-down voltage regulator
+ for 0.87 thru 1.875V I/O. SMPS means System Management Power Source.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_vrf1:
+ description: RF transciever voltage regulator.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_sva_mmdsp:
+ description: Smart Video Accelerator (SVA) multimedia DSP (MMDSP)
+ voltage regulator. This is the voltage for the accelerator DSP
+ for video encoding and decoding.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_sva_mmdsp_ret:
+ description: Smart Video Accelerator (SVA) multimedia DSP (MMDSP)
+ voltage regulator for retention mode.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_sva_pipe:
+ description: Smart Video Accelerator (SVA) multimedia DSP (MMDSP)
+ voltage regulator for the data pipe.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_sia_mmdsp:
+ description: Smart Image Accelerator (SIA) multimedia DSP (MMDSP)
+ voltage regulator. This is the voltage for the accelerator DSP
+ for image encoding and decoding.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_sia_mmdsp_ret:
+ description: Smart Image Accelerator (SIA) multimedia DSP (MMDSP)
+ voltage regulator for retention mode.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_sia_pipe:
+ description: Smart Image Accelerator (SIA) multimedia DSP (MMDSP)
+ voltage regulator for the data pipe.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_sga:
+ description: Smart Graphics Accelerator (SGA) voltage regulator.
+ This is in effect controlling the power to the MALI400 3D
+ accelerator block.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_b2r2_mcde:
+ description: Blit Blend Rotate and Rescale (B2R2), and Multi-Channel
+ Display Engine (MCDE) voltage regulator. These are two graphics
+ blocks.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_esram12:
+ description: Embedded Static RAM (ESRAM) 1 and 2 voltage regulator.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_esram12_ret:
+ description: Embedded Static RAM (ESRAM) 1 and 2 voltage regulator for
+ retention mode.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_esram34:
+ description: Embedded Static RAM (ESRAM) 3 and 4 voltage regulator.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ db8500_esram34_ret:
+ description: Embedded Static RAM (ESRAM) 3 and 4 voltage regulator for
+ retention mode.
+ type: object
+ $ref: ../regulator/regulator.yaml#
+
+ required:
+ - compatible
+ - db8500_vape
+ - db8500_varm
+ - db8500_vmodem
+ - db8500_vpll
+ - db8500_vsmps1
+ - db8500_vsmps2
+ - db8500_vsmps3
+ - db8500_vrf1
+ - db8500_sva_mmdsp
+ - db8500_sva_mmdsp_ret
+ - db8500_sva_pipe
+ - db8500_sia_mmdsp
+ - db8500_sia_mmdsp_ret
+ - db8500_sia_pipe
+ - db8500_sga
+ - db8500_b2r2_mcde
+ - db8500_esram12
+ - db8500_esram12_ret
+ - db8500_esram34
+ - db8500_esram34_ret
+
+ additionalProperties: false
+
+patternProperties:
+ "^thermal@[0-9a-f]+$":
+ description: Node describing the DB8500 thermal control functions.
+ This binds to an operating system driver that monitors the
+ temperature of the SoC.
+ type: object
+
+ properties:
+ compatible:
+ const: stericsson,db8500-thermal
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Hotmon low interrupt (falling temperature)
+ - description: Hotmon high interrupt (rising temperature)
+
+ interrupt-names:
+ items:
+ - const: IRQ_HOTMON_LOW
+ - const: IRQ_HOTMON_HIGH
+
+ '#thermal-sensor-cells':
+ const: 0
+
+ additionalProperties: false
+
+ "^prcmu-timer-4@[0-9a-f]+$":
+ description: Node describing the externally visible timer 4 in the
+ PRCMU block. This timer is interesting to the operating system
+ since even thought it has a very low resolution (32768 Hz) it is
+ always on, and thus provides a consistent monotonic timeline for
+ the system.
+ type: object
+
+ properties:
+ compatible:
+ const: stericsson,db8500-prcmu-timer-4
+
+ reg:
+ maxItems: 1
+
+ additionalProperties: false
+
+ "^ab850[05]$":
+ description: Node describing the Analog Baseband 8500 mixed-signals
+ ASIC AB8500 and subcomponents. The AB8500 is accessed through the
+ PRCMU and hence it appears here. This component has a separate
+ set of devicetree bindings. The AB8505 is a newer version of the
+ same ASIC.
+ type: object
+
+required:
+ - compatible
+ - reg
+ - '#address-cells'
+ - '#size-cells'
+ - ranges
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+ - db8500-prcmu-regulators
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml b/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
index 19fcf59fd2fe..272832e9f8f2 100644
--- a/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
+++ b/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
@@ -43,12 +43,10 @@ properties:
patternProperties:
# Optional children
- "^serdes-ln-ctrl@[0-9a-f]+$":
+ "^mux-controller@[0-9a-f]+$":
type: object
- description: |
- This is the SERDES lane control mux. It should follow the bindings
- specified in
- Documentation/devicetree/bindings/mux/reg-mux.txt
+ description:
+ This is the SERDES lane control mux.
required:
- compatible
@@ -68,9 +66,18 @@ examples:
#size-cells = <1>;
ranges;
- serdes_ln_ctrl: serdes-ln-ctrl@4080 {
+ serdes_ln_ctrl: mux-controller@4080 {
compatible = "mmio-mux";
reg = <0x00004080 0x50>;
+
+ #mux-control-cells = <1>;
+ mux-reg-masks =
+ <0x4080 0x3>, <0x4084 0x3>, /* SERDES0 lane0/1 select */
+ <0x4090 0x3>, <0x4094 0x3>, /* SERDES1 lane0/1 select */
+ <0x40a0 0x3>, <0x40a4 0x3>, /* SERDES2 lane0/1 select */
+ <0x40b0 0x3>, <0x40b4 0x3>, /* SERDES3 lane0/1 select */
+ <0x40c0 0x3>, <0x40c4 0x3>, <0x40c8 0x3>, <0x40cc 0x3>;
+ /* SERDES4 lane0/1/2/3 select */
};
};
...
diff --git a/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt b/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt
index 7b636b7a8311..72ea0af368d4 100644
--- a/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt
+++ b/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt
@@ -2,7 +2,10 @@ EEPROMs (SPI) compatible with Microchip Technology 93xx46 family.
Required properties:
- compatible : shall be one of:
+ "atmel,at93c46"
"atmel,at93c46d"
+ "atmel,at93c56"
+ "atmel,at93c66"
"eeprom-93xx46"
"microchip,93lc46b"
- data-size : number of data bits per word (either 8 or 16)
diff --git a/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml b/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
index e75b3a8ba816..4f62ad6ce50c 100644
--- a/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
+++ b/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
@@ -64,7 +64,6 @@ properties:
clocks:
minItems: 2
- maxItems: 4
items:
- description: Bus Clock
- description: Module Clock
@@ -73,7 +72,6 @@ properties:
clock-names:
minItems: 2
- maxItems: 4
items:
- const: ahb
- const: mmc
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml
index 369471814496..b5baf439fbac 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml
@@ -116,7 +116,6 @@ properties:
pinctrl-names:
minItems: 1
- maxItems: 4
items:
- const: default
- const: state_100mhz
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.yaml b/Documentation/devicetree/bindings/mmc/mtk-sd.yaml
index 8648d48dbbfd..e866e985549e 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.yaml
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.yaml
@@ -31,6 +31,8 @@ properties:
- const: mediatek,mt2701-mmc
- items:
- const: mediatek,mt8192-mmc
+ - const: mediatek,mt8183-mmc
+ - items:
- const: mediatek,mt8195-mmc
- const: mediatek,mt8183-mmc
@@ -38,7 +40,6 @@ properties:
description:
Should contain phandle for the clock feeding the MMC controller.
minItems: 2
- maxItems: 8
items:
- description: source clock (required).
- description: HCLK which used for host (required).
@@ -51,7 +52,6 @@ properties:
clock-names:
minItems: 2
- maxItems: 8
items:
- const: source
- const: hclk
diff --git a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
index 1118b6fa93c9..677989bc5924 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
+++ b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
@@ -75,7 +75,6 @@ properties:
clock-names:
minItems: 1
- maxItems: 2
items:
- const: core
- const: cd
@@ -107,7 +106,6 @@ properties:
pinctrl-names:
minItems: 1
- maxItems: 2
items:
- const: default
- const: state_uhs
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml b/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml
index 29399e88ac53..224303f5b913 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml
+++ b/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml
@@ -44,7 +44,6 @@ properties:
clock-names:
minItems: 1
- maxItems: 2
items:
- const: clk_ahb
- const: clk_xin
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-pxa.yaml b/Documentation/devicetree/bindings/mmc/sdhci-pxa.yaml
index aa12480648a5..1c87f4218e18 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-pxa.yaml
+++ b/Documentation/devicetree/bindings/mmc/sdhci-pxa.yaml
@@ -57,7 +57,6 @@ properties:
clock-names:
minItems: 1
- maxItems: 2
items:
- const: io
- const: core
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
index 4a9145ef15d6..57d077c0b7c1 100644
--- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
@@ -25,10 +25,10 @@ The following are mandatory properties for 66AK2G SoCs only:
- power-domains:Should contain a phandle to a PM domain provider node
and an args specifier containing the MMC device id
value. This property is as per the binding,
- Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
- clocks: Must contain an entry for each entry in clock-names. Should
be defined as per the he appropriate clock bindings consumer
- usage in Documentation/devicetree/bindings/clock/ti,sci-clk.txt
+ usage in Documentation/devicetree/bindings/clock/ti,sci-clk.yaml
- clock-names: Shall be "fck" for the functional clock,
and "mmchsdb_fck" for the debounce clock.
diff --git a/Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml b/Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml
new file mode 100644
index 000000000000..5f126bb9b202
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/arm,pl353-nand-r2p1.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: PL353 NAND Controller device tree bindings
+
+allOf:
+ - $ref: "nand-controller.yaml"
+
+maintainers:
+ - Miquel Raynal <miquel.raynal@bootlin.com>
+ - Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+
+properties:
+ compatible:
+ items:
+ - const: arm,pl353-nand-r2p1
+
+ reg:
+ items:
+ - items:
+ - description: CS with regard to the parent ranges property
+ - description: Offset of the memory region requested by the device
+ - description: Length of the memory region requested by the device
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ smcc: memory-controller@e000e000 {
+ compatible = "arm,pl353-smc-r2p1", "arm,primecell";
+ reg = <0xe000e000 0x0001000>;
+ clock-names = "memclk", "apb_pclk";
+ clocks = <&clkc 11>, <&clkc 44>;
+ ranges = <0x0 0x0 0xe1000000 0x1000000 /* Nand CS region */
+ 0x1 0x0 0xe2000000 0x2000000 /* SRAM/NOR CS0 region */
+ 0x2 0x0 0xe4000000 0x2000000>; /* SRAM/NOR CS1 region */
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ nfc0: nand-controller@0,0 {
+ compatible = "arm,pl353-nand-r2p1";
+ reg = <0 0 0x1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/arm-versatile.txt b/Documentation/devicetree/bindings/mtd/arm-versatile.txt
deleted file mode 100644
index 4ec28796a3c0..000000000000
--- a/Documentation/devicetree/bindings/mtd/arm-versatile.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Flash device on ARM Versatile board
-
-These flash chips are found in the ARM reference designs like Integrator,
-Versatile, RealView, Versatile Express etc.
-
-They are regular CFI compatible (Intel or AMD extended) flash chips with
-some special write protect/VPP bits that can be controlled by the machine's
-system controller.
-
-Required properties:
-- compatible : must be "arm,versatile-flash", "cfi-flash";
-- reg : memory address for the flash chip
-- bank-width : width in bytes of flash interface.
-
-For the rest of the properties, see mtd-physmap.txt.
-
-The device tree may optionally contain sub-nodes describing partitions of the
-address space. See partition.txt for more detail.
-
-Example:
-
-flash@34000000 {
- compatible = "arm,versatile-flash", "cfi-flash";
- reg = <0x34000000 0x4000000>;
- bank-width = <4>;
-};
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
deleted file mode 100644
index 44335a4f8bfb..000000000000
--- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
+++ /dev/null
@@ -1,186 +0,0 @@
-* Broadcom STB NAND Controller
-
-The Broadcom Set-Top Box NAND controller supports low-level access to raw NAND
-flash chips. It has a memory-mapped register interface for both control
-registers and for its data input/output buffer. On some SoCs, this controller is
-paired with a custom DMA engine (inventively named "Flash DMA") which supports
-basic PROGRAM and READ functions, among other features.
-
-This controller was originally designed for STB SoCs (BCM7xxx) but is now
-available on a variety of Broadcom SoCs, including some BCM3xxx, BCM63xx, and
-iProc/Cygnus. Its history includes several similar (but not fully register
-compatible) versions.
-
-Required properties:
-- compatible : May contain an SoC-specific compatibility string (see below)
- to account for any SoC-specific hardware bits that may be
- added on top of the base core controller.
- In addition, must contain compatibility information about
- the core NAND controller, of the following form:
- "brcm,brcmnand" and an appropriate version compatibility
- string, like "brcm,brcmnand-v7.0"
- Possible values:
- brcm,brcmnand-v2.1
- brcm,brcmnand-v2.2
- brcm,brcmnand-v4.0
- brcm,brcmnand-v5.0
- brcm,brcmnand-v6.0
- brcm,brcmnand-v6.1
- brcm,brcmnand-v6.2
- brcm,brcmnand-v7.0
- brcm,brcmnand-v7.1
- brcm,brcmnand-v7.2
- brcm,brcmnand-v7.3
- brcm,brcmnand
-- reg : the register start and length for NAND register region.
- (optional) Flash DMA register range (if present)
- (optional) NAND flash cache range (if at non-standard offset)
-- reg-names : a list of the names corresponding to the previous register
- ranges. Should contain "nand" and (optionally)
- "flash-dma" or "flash-edu" and/or "nand-cache".
-- interrupts : The NAND CTLRDY interrupt, (if Flash DMA is available)
- FLASH_DMA_DONE and if EDU is avaialble and used FLASH_EDU_DONE
-- interrupt-names : May be "nand_ctlrdy" or "flash_dma_done" or "flash_edu_done",
- if broken out as individual interrupts.
- May be "nand", if the SoC has the individual NAND
- interrupts multiplexed behind another custom piece of
- hardware
-- #address-cells : <1> - subnodes give the chip-select number
-- #size-cells : <0>
-
-Optional properties:
-- clock : reference to the clock for the NAND controller
-- clock-names : "nand" (required for the above clock)
-- brcm,nand-has-wp : Some versions of this IP include a write-protect
- (WP) control bit. It is always available on >=
- v7.0. Use this property to describe the rare
- earlier versions of this core that include WP
-
- -- Additional SoC-specific NAND controller properties --
-
-The NAND controller is integrated differently on the variety of SoCs on which it
-is found. Part of this integration involves providing status and enable bits
-with which to control the 8 exposed NAND interrupts, as well as hardware for
-configuring the endianness of the data bus. On some SoCs, these features are
-handled via standard, modular components (e.g., their interrupts look like a
-normal IRQ chip), but on others, they are controlled in unique and interesting
-ways, sometimes with registers that lump multiple NAND-related functions
-together. The former case can be described simply by the standard interrupts
-properties in the main controller node. But for the latter exceptional cases,
-we define additional 'compatible' properties and associated register resources within the NAND controller node above.
-
- - compatible: Can be one of several SoC-specific strings. Each SoC may have
- different requirements for its additional properties, as described below each
- bullet point below.
-
- * "brcm,nand-bcm63138"
- - reg: (required) the 'NAND_INT_BASE' register range, with separate status
- and enable registers
- - reg-names: (required) "nand-int-base"
-
- * "brcm,nand-bcm6368"
- - compatible: should contain "brcm,nand-bcm<soc>", "brcm,nand-bcm6368"
- - reg: (required) the 'NAND_INTR_BASE' register range, with combined status
- and enable registers, and boot address registers
- - reg-names: (required) "nand-int-base"
-
- * "brcm,nand-iproc"
- - reg: (required) the "IDM" register range, for interrupt enable and APB
- bus access endianness configuration, and the "EXT" register range,
- for interrupt status/ack.
- - reg-names: (required) a list of the names corresponding to the previous
- register ranges. Should contain "iproc-idm" and "iproc-ext".
-
-
-* NAND chip-select
-
-Each controller (compatible: "brcm,brcmnand") may contain one or more subnodes
-to represent enabled chip-selects which (may) contain NAND flash chips. Their
-properties are as follows.
-
-Required properties:
-- compatible : should contain "brcm,nandcs"
-- reg : a single integer representing the chip-select
- number (e.g., 0, 1, 2, etc.)
-- #address-cells : see partition.txt
-- #size-cells : see partition.txt
-
-Optional properties:
-- nand-ecc-strength : see nand-controller.yaml
-- nand-ecc-step-size : must be 512 or 1024. See nand-controller.yaml
-- nand-on-flash-bbt : boolean, to enable the on-flash BBT for this
- chip-select. See nand-controller.yaml
-- brcm,nand-oob-sector-size : integer, to denote the spare area sector size
- expected for the ECC layout in use. This size, in
- addition to the strength and step-size,
- determines how the hardware BCH engine will lay
- out the parity bytes it stores on the flash.
- This property can be automatically determined by
- the flash geometry (particularly the NAND page
- and OOB size) in many cases, but when booting
- from NAND, the boot controller has only a limited
- number of available options for its default ECC
- layout.
-
-Each nandcs device node may optionally contain sub-nodes describing the flash
-partition mapping. See partition.txt for more detail.
-
-
-Example:
-
-nand@f0442800 {
- compatible = "brcm,brcmnand-v7.0", "brcm,brcmnand";
- reg = <0xF0442800 0x600>,
- <0xF0443000 0x100>;
- reg-names = "nand", "flash-dma";
- interrupt-parent = <&hif_intr2_intc>;
- interrupts = <24>, <4>;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- nandcs@1 {
- compatible = "brcm,nandcs";
- reg = <1>; // Chip select 1
- nand-on-flash-bbt;
- nand-ecc-strength = <12>;
- nand-ecc-step-size = <512>;
-
- // Partitions
- #address-cells = <1>; // <2>, for 64-bit offset
- #size-cells = <1>; // <2>, for 64-bit length
- flash0.rootfs@0 {
- reg = <0 0x10000000>;
- };
- flash0@0 {
- reg = <0 0>; // MTDPART_SIZ_FULL
- };
- flash0.kernel@10000000 {
- reg = <0x10000000 0x400000>;
- };
- };
-};
-
-nand@10000200 {
- compatible = "brcm,nand-bcm63168", "brcm,nand-bcm6368",
- "brcm,brcmnand-v4.0", "brcm,brcmnand";
- reg = <0x10000200 0x180>,
- <0x10000600 0x200>,
- <0x100000b0 0x10>;
- reg-names = "nand", "nand-cache", "nand-int-base";
- interrupt-parent = <&periph_intc>;
- interrupts = <50>;
- clocks = <&periph_clk 20>;
- clock-names = "nand";
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- nand0: nandcs@0 {
- compatible = "brcm,nandcs";
- reg = <0>;
- nand-on-flash-bbt;
- nand-ecc-strength = <1>;
- nand-ecc-step-size = <512>;
- };
-};
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml
new file mode 100644
index 000000000000..dd5a64969e37
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml
@@ -0,0 +1,234 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/brcm,brcmnand.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom STB NAND Controller
+
+maintainers:
+ - Brian Norris <computersforpeace@gmail.com>
+ - Kamal Dasu <kdasu.kdev@gmail.com>
+
+description: |
+ The Broadcom Set-Top Box NAND controller supports low-level access to raw NAND
+ flash chips. It has a memory-mapped register interface for both control
+ registers and for its data input/output buffer. On some SoCs, this controller
+ is paired with a custom DMA engine (inventively named "Flash DMA") which
+ supports basic PROGRAM and READ functions, among other features.
+
+ This controller was originally designed for STB SoCs (BCM7xxx) but is now
+ available on a variety of Broadcom SoCs, including some BCM3xxx, BCM63xx, and
+ iProc/Cygnus. Its history includes several similar (but not fully register
+ compatible) versions.
+
+ -- Additional SoC-specific NAND controller properties --
+
+ The NAND controller is integrated differently on the variety of SoCs on which
+ it is found. Part of this integration involves providing status and enable
+ bits with which to control the 8 exposed NAND interrupts, as well as hardware
+ for configuring the endianness of the data bus. On some SoCs, these features
+ are handled via standard, modular components (e.g., their interrupts look like
+ a normal IRQ chip), but on others, they are controlled in unique and
+ interesting ways, sometimes with registers that lump multiple NAND-related
+ functions together. The former case can be described simply by the standard
+ interrupts properties in the main controller node. But for the latter
+ exceptional cases, we define additional 'compatible' properties and associated
+ register resources within the NAND controller node above.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - brcm,brcmnand-v2.1
+ - brcm,brcmnand-v2.2
+ - brcm,brcmnand-v4.0
+ - brcm,brcmnand-v5.0
+ - brcm,brcmnand-v6.0
+ - brcm,brcmnand-v6.1
+ - brcm,brcmnand-v6.2
+ - brcm,brcmnand-v7.0
+ - brcm,brcmnand-v7.1
+ - brcm,brcmnand-v7.2
+ - brcm,brcmnand-v7.3
+ - const: brcm,brcmnand
+ - description: BCM63138 SoC-specific NAND controller
+ items:
+ - const: brcm,nand-bcm63138
+ - enum:
+ - brcm,brcmnand-v7.0
+ - brcm,brcmnand-v7.1
+ - const: brcm,brcmnand
+ - description: iProc SoC-specific NAND controller
+ items:
+ - const: brcm,nand-iproc
+ - const: brcm,brcmnand-v6.1
+ - const: brcm,brcmnand
+ - description: BCM63168 SoC-specific NAND controller
+ items:
+ - const: brcm,nand-bcm63168
+ - const: brcm,nand-bcm6368
+ - const: brcm,brcmnand-v4.0
+ - const: brcm,brcmnand
+
+ reg:
+ minItems: 1
+ maxItems: 6
+
+ reg-names:
+ minItems: 1
+ maxItems: 6
+ items:
+ enum: [ nand, flash-dma, flash-edu, nand-cache, nand-int-base, iproc-idm, iproc-ext ]
+
+ interrupts:
+ minItems: 1
+ items:
+ - description: NAND CTLRDY interrupt
+ - description: FLASH_DMA_DONE if flash DMA is available
+ - description: FLASH_EDU_DONE if EDU is available
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: nand_ctlrdy
+ - const: flash_dma_done
+ - const: flash_edu_done
+
+ clocks:
+ maxItems: 1
+ description: reference to the clock for the NAND controller
+
+ clock-names:
+ const: nand
+
+ brcm,nand-has-wp:
+ description: >
+ Some versions of this IP include a write-protect
+ (WP) control bit. It is always available on >=
+ v7.0. Use this property to describe the rare
+ earlier versions of this core that include WP
+ type: boolean
+
+patternProperties:
+ "^nand@[a-f0-9]$":
+ type: object
+ properties:
+ compatible:
+ const: brcm,nandcs
+
+ nand-ecc-step-size:
+ enum: [ 512, 1024 ]
+
+ brcm,nand-oob-sector-size:
+ description: |
+ integer, to denote the spare area sector size
+ expected for the ECC layout in use. This size, in
+ addition to the strength and step-size,
+ determines how the hardware BCH engine will lay
+ out the parity bytes it stores on the flash.
+ This property can be automatically determined by
+ the flash geometry (particularly the NAND page
+ and OOB size) in many cases, but when booting
+ from NAND, the boot controller has only a limited
+ number of available options for its default ECC
+ layout.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+allOf:
+ - $ref: nand-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,nand-bcm63138
+ then:
+ properties:
+ reg-names:
+ items:
+ - const: nand
+ - const: nand-int-base
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,nand-bcm6368
+ then:
+ properties:
+ reg-names:
+ items:
+ - const: nand
+ - const: nand-int-base
+ - const: nand-cache
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,nand-iproc
+ then:
+ properties:
+ reg-names:
+ items:
+ - const: nand
+ - const: iproc-idm
+ - const: iproc-ext
+
+unevaluatedProperties: false
+
+required:
+ - reg
+ - reg-names
+ - interrupts
+
+examples:
+ - |
+ nand-controller@f0442800 {
+ compatible = "brcm,brcmnand-v7.0", "brcm,brcmnand";
+ reg = <0xf0442800 0x600>,
+ <0xf0443000 0x100>;
+ reg-names = "nand", "flash-dma";
+ interrupt-parent = <&hif_intr2_intc>;
+ interrupts = <24>, <4>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@1 {
+ compatible = "brcm,nandcs";
+ reg = <1>; // Chip select 1
+ nand-on-flash-bbt;
+ nand-ecc-strength = <12>;
+ nand-ecc-step-size = <512>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+ - |
+ nand-controller@10000200 {
+ compatible = "brcm,nand-bcm63168", "brcm,nand-bcm6368",
+ "brcm,brcmnand-v4.0", "brcm,brcmnand";
+ reg = <0x10000200 0x180>,
+ <0x100000b0 0x10>,
+ <0x10000600 0x200>;
+ reg-names = "nand", "nand-int-base", "nand-cache";
+ interrupt-parent = <&periph_intc>;
+ interrupts = <50>;
+ clocks = <&periph_clk 20>;
+ clock-names = "nand";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ nand-on-flash-bbt;
+ nand-ecc-strength = <1>;
+ nand-ecc-step-size = <512>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/common.txt b/Documentation/devicetree/bindings/mtd/common.txt
index fc068b923d7a..ae16f9ea8606 100644
--- a/Documentation/devicetree/bindings/mtd/common.txt
+++ b/Documentation/devicetree/bindings/mtd/common.txt
@@ -1,15 +1 @@
-* Common properties of all MTD devices
-
-Optional properties:
-- label: user-defined MTD device name. Can be used to assign user
- friendly names to MTD devices (instead of the flash model or flash
- controller based name) in order to ease flash device identification
- and/or describe what they are used for.
-
-Example:
-
- flash@0 {
- label = "System-firmware";
-
- /* flash type specific properties */
- };
+This file has been moved to mtd.yaml.
diff --git a/Documentation/devicetree/bindings/mtd/cortina,gemini-flash.txt b/Documentation/devicetree/bindings/mtd/cortina,gemini-flash.txt
index 3fa1b34d69ad..efa5b2aba829 100644
--- a/Documentation/devicetree/bindings/mtd/cortina,gemini-flash.txt
+++ b/Documentation/devicetree/bindings/mtd/cortina,gemini-flash.txt
@@ -9,7 +9,7 @@ Required properties:
- syscon : must be a phandle to the system controller
- bank-width : width in bytes of flash interface, should be <2>
-For the rest of the properties, see mtd-physmap.txt.
+For the rest of the properties, see mtd-physmap.yaml.
The device tree may optionally contain sub-nodes describing partitions of the
address space. See partition.txt for more detail.
diff --git a/Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt b/Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt
deleted file mode 100644
index ad42f4db32f1..000000000000
--- a/Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-Bindings for HyperFlash NOR flash chips compliant with Cypress HyperBus
-specification and supports Cypress CFI specification 1.5 command set.
-
-Required properties:
-- compatible : "cypress,hyperflash", "cfi-flash" for HyperFlash NOR chips
-- reg : Address of flash's memory map
-
-Example:
-
- flash@0 {
- compatible = "cypress,hyperflash", "cfi-flash";
- reg = <0x0 0x4000000>;
- };
diff --git a/Documentation/devicetree/bindings/mtd/intel,ixp4xx-flash.txt b/Documentation/devicetree/bindings/mtd/intel,ixp4xx-flash.txt
deleted file mode 100644
index 4bdcb92ae381..000000000000
--- a/Documentation/devicetree/bindings/mtd/intel,ixp4xx-flash.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Flash device on Intel IXP4xx SoC
-
-This flash is regular CFI compatible (Intel or AMD extended) flash chips with
-specific big-endian or mixed-endian memory access pattern.
-
-Required properties:
-- compatible : must be "intel,ixp4xx-flash", "cfi-flash";
-- reg : memory address for the flash chip
-- bank-width : width in bytes of flash interface, should be <2>
-
-For the rest of the properties, see mtd-physmap.txt.
-
-The device tree may optionally contain sub-nodes describing partitions of the
-address space. See partition.txt for more detail.
-
-Example:
-
-flash@50000000 {
- compatible = "intel,ixp4xx-flash", "cfi-flash";
- reg = <0x50000000 0x01000000>;
- bank-width = <2>;
-};
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
index 5e7e5349f9a1..ed590d7c6e37 100644
--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
@@ -9,6 +9,9 @@ title: SPI NOR flash ST M25Pxx (and similar) serial flash chips
maintainers:
- Rob Herring <robh@kernel.org>
+allOf:
+ - $ref: "mtd.yaml#"
+
properties:
compatible:
oneOf:
@@ -82,6 +85,9 @@ patternProperties:
'^partition@':
type: object
+ "^otp(-[0-9]+)?$":
+ type: object
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml b/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml
new file mode 100644
index 000000000000..2cdf6bf3dc4a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/mtd/microchip,mchp48l640.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Microchip 48l640 (and similar) serial EERAM bindings
+
+maintainers:
+ - Heiko Schocher <hs@denx.de>
+
+description: |
+ The Microchip 48l640 is a 8KByte EERAM connected via SPI.
+
+ datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20006055B.pdf
+
+properties:
+ compatible:
+ items:
+ - const: microchip,48l640
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeram@0 {
+ compatible = "microchip,48l640";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
deleted file mode 100644
index c69f4f065d23..000000000000
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
+++ /dev/null
@@ -1,114 +0,0 @@
-CFI or JEDEC memory-mapped NOR flash, MTD-RAM (NVRAM...)
-
-Flash chips (Memory Technology Devices) are often used for solid state
-file systems on embedded devices.
-
- - compatible : should contain the specific model of mtd chip(s)
- used, if known, followed by either "cfi-flash", "jedec-flash",
- "mtd-ram" or "mtd-rom".
- - reg : Address range(s) of the mtd chip(s)
- It's possible to (optionally) define multiple "reg" tuples so that
- non-identical chips can be described in one node.
- - bank-width : Width (in bytes) of the bank. Equal to the
- device width times the number of interleaved chips.
- - device-width : (optional) Width of a single mtd chip. If
- omitted, assumed to be equal to 'bank-width'.
- - #address-cells, #size-cells : Must be present if the device has
- sub-nodes representing partitions (see below). In this case
- both #address-cells and #size-cells must be equal to 1.
- - no-unaligned-direct-access: boolean to disable the default direct
- mapping of the flash.
- On some platforms (e.g. MPC5200) a direct 1:1 mapping may cause
- problems with JFFS2 usage, as the local bus (LPB) doesn't support
- unaligned accesses as implemented in the JFFS2 code via memcpy().
- By defining "no-unaligned-direct-access", the flash will not be
- exposed directly to the MTD users (e.g. JFFS2) any more.
- - linux,mtd-name: allow to specify the mtd name for retro capability with
- physmap-flash drivers as boot loader pass the mtd partition via the old
- device name physmap-flash.
- - use-advanced-sector-protection: boolean to enable support for the
- advanced sector protection (Spansion: PPB - Persistent Protection
- Bits) locking.
- - addr-gpios : (optional) List of GPIO descriptors that will be used to
- address the MSBs address lines. The order goes from LSB to MSB.
-
-For JEDEC compatible devices, the following additional properties
-are defined:
-
- - vendor-id : Contains the flash chip's vendor id (1 byte).
- - device-id : Contains the flash chip's device id (1 byte).
-
-For ROM compatible devices (and ROM fallback from cfi-flash), the following
-additional (optional) property is defined:
-
- - erase-size : The chip's physical erase block size in bytes.
-
- The device tree may optionally contain endianness property.
- little-endian or big-endian : It Represents the endianness that should be used
- by the controller to properly read/write data
- from/to the flash. If this property is missing,
- the endianness is chosen by the system
- (potentially based on extra configuration options).
-
-The device tree may optionally contain sub-nodes describing partitions of the
-address space. See partition.txt for more detail.
-
-Example:
-
- flash@ff000000 {
- compatible = "amd,am29lv128ml", "cfi-flash";
- reg = <ff000000 01000000>;
- bank-width = <4>;
- device-width = <1>;
- #address-cells = <1>;
- #size-cells = <1>;
- fs@0 {
- label = "fs";
- reg = <0 f80000>;
- };
- firmware@f80000 {
- label ="firmware";
- reg = <f80000 80000>;
- read-only;
- };
- };
-
-Here an example with multiple "reg" tuples:
-
- flash@f0000000,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "intel,PC48F4400P0VB", "cfi-flash";
- reg = <0 0x00000000 0x02000000
- 0 0x02000000 0x02000000>;
- bank-width = <2>;
- partition@0 {
- label = "test-part1";
- reg = <0 0x04000000>;
- };
- };
-
-An example using SRAM:
-
- sram@2,0 {
- compatible = "samsung,k6f1616u6a", "mtd-ram";
- reg = <2 0 0x00200000>;
- bank-width = <2>;
- };
-
-An example using gpio-addrs
-
- flash@20000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash", "jedec-flash";
- reg = <0x20000000 0x02000000>;
- ranges = <0 0x00000000 0x02000000
- 1 0x02000000 0x02000000>;
- bank-width = <2>;
- addr-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
- partition@0 {
- label = "test-part1";
- reg = <0 0x04000000>;
- };
- };
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml b/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml
new file mode 100644
index 000000000000..13c29cc91b59
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml
@@ -0,0 +1,208 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/mtd-physmap.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: CFI or JEDEC memory-mapped NOR flash, MTD-RAM (NVRAM...)
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+description: |
+ Flash chips (Memory Technology Devices) are often used for solid state
+ file systems on embedded devices.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - amd,s29gl01gp
+ - amd,s29gl032a
+ - amd,s29gl256n
+ - amd,s29gl512n
+ - arm,versatile-flash
+ - cortina,gemini-flash
+ - cypress,hyperflash
+ - ge,imp3a-firmware-mirror
+ - ge,imp3a-paged-flash
+ - gef,ppc9a-firmware-mirror
+ - gef,ppc9a-paged-flash
+ - gef,sbc310-firmware-mirror
+ - gef,sbc310-paged-flash
+ - gef,sbc610-firmware-mirror
+ - gef,sbc610-paged-flash
+ - intel,28f128j3
+ - intel,dt28f160
+ - intel,ixp4xx-flash
+ - intel,JS28F128
+ - intel,JS28F640
+ - intel,PC28F640P30T85
+ - numonyx,js28f00a
+ - numonyx,js28f128
+ - sst,sst39vf320
+ - xlnx,xps-mch-emc-2.00.a
+ - const: cfi-flash
+ - items:
+ - enum:
+ - cypress,cy7c1019dv33-10zsxi
+ - arm,vexpress-psram
+ - const: mtd-ram
+ - enum:
+ - cfi-flash
+ - jedec-flash
+ - mtd-ram
+ - mtd-rom
+
+ reg:
+ description: |
+ It's possible to (optionally) define multiple "reg" tuples so that
+ non-identical chips can be described in one node.
+ minItems: 1
+ maxItems: 8
+
+ bank-width:
+ description: Width (in bytes) of the bank. Equal to the device width times
+ the number of interleaved chips.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 1, 2, 4 ]
+
+ device-width:
+ description:
+ Width of a single mtd chip. If omitted, assumed to be equal to 'bank-width'.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 1, 2 ]
+
+ no-unaligned-direct-access:
+ type: boolean
+ description: |
+ Disables the default direct mapping of the flash.
+
+ On some platforms (e.g. MPC5200) a direct 1:1 mapping may cause problems
+ with JFFS2 usage, as the local bus (LPB) doesn't support unaligned
+ accesses as implemented in the JFFS2 code via memcpy(). By defining
+ "no-unaligned-direct-access", the flash will not be exposed directly to
+ the MTD users (e.g. JFFS2) any more.
+
+ linux,mtd-name:
+ description:
+ Allows specifying the mtd name for retro capability with physmap-flash
+ drivers as boot loader pass the mtd partition via the old device name
+ physmap-flash.
+ $ref: /schemas/types.yaml#/definitions/string
+
+ use-advanced-sector-protection:
+ type: boolean
+ description: |
+ Enables support for the advanced sector protection (Spansion: PPB -
+ Persistent Protection Bits) locking.
+
+ erase-size:
+ description: The chip's physical erase block size in bytes.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ addr-gpios:
+ description:
+ List of GPIO descriptors that will be used to address the MSBs address
+ lines. The order goes from LSB to MSB.
+ minItems: 1
+ maxItems: 8
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 1
+
+ big-endian: true
+ little-endian: true
+
+patternProperties:
+ '@[0-9a-f]+$':
+ $ref: partitions/partition.yaml
+
+required:
+ - compatible
+ - reg
+
+# FIXME: A parent bus may define timing properties
+additionalProperties: true
+
+examples:
+ - |
+
+ flash@ff000000 {
+ compatible = "cfi-flash";
+ reg = <0xff000000 0x01000000>;
+ bank-width = <4>;
+ device-width = <1>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0xff000000 0x01000000>;
+
+ fs@0 {
+ label = "fs";
+ reg = <0 0xf80000>;
+ };
+ firmware@f80000 {
+ label ="firmware";
+ reg = <0xf80000 0x80000>;
+ read-only;
+ };
+ };
+
+ - |
+ /* An example with multiple "reg" tuples */
+
+ flash@0 {
+ compatible = "intel,PC28F640P30T85", "cfi-flash";
+ reg = <0x00000000 0x02000000>,
+ <0x02000000 0x02000000>;
+ bank-width = <2>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x04000000>;
+
+ partition@0 {
+ label = "test-part1";
+ reg = <0 0x04000000>;
+ };
+ };
+
+ - |
+ /* An example using SRAM */
+ bus {
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ sram@2,0 {
+ compatible = "mtd-ram";
+ reg = <2 0 0x00200000>;
+ bank-width = <2>;
+ };
+ };
+
+ - |
+ /* An example using addr-gpios */
+ #include <dt-bindings/gpio/gpio.h>
+
+ flash@20000000 {
+ compatible = "cfi-flash";
+ reg = <0x20000000 0x02000000>;
+ bank-width = <2>;
+ addr-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x00000000 0x02000000>,
+ <1 0x02000000 0x02000000>;
+
+ partition@0 {
+ label = "test-part1";
+ reg = <0 0x04000000>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mtd/mtd.yaml b/Documentation/devicetree/bindings/mtd/mtd.yaml
new file mode 100644
index 000000000000..376b679cfc70
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/mtd.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/mtd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MTD (Memory Technology Device) Device Tree Bindings
+
+maintainers:
+ - Miquel Raynal <miquel.raynal@bootlin.com>
+ - Richard Weinberger <richard@nod.at>
+
+properties:
+ $nodename:
+ pattern: "^flash(@.*)?$"
+
+ label:
+ description:
+ User-defined MTD device name. Can be used to assign user friendly
+ names to MTD devices (instead of the flash model or flash controller
+ based name) in order to ease flash device identification and/or
+ describe what they are used for.
+
+patternProperties:
+ "^otp(-[0-9]+)?$":
+ type: object
+ $ref: ../nvmem/nvmem.yaml#
+
+ description: |
+ An OTP memory region. Some flashes provide a one-time-programmable
+ memory whose content can either be programmed by a user or is already
+ pre-programmed by the factory. Some flashes might provide both.
+
+ properties:
+ compatible:
+ enum:
+ - user-otp
+ - factory-otp
+
+ required:
+ - compatible
+
+additionalProperties: true
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash@0 {
+ reg = <0>;
+ compatible = "jedec,spi-nor";
+ label = "System-firmware";
+ };
+ };
+
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash@0 {
+ reg = <0>;
+ compatible = "jedec,spi-nor";
+
+ otp-1 {
+ compatible = "factory-otp";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ electronic-serial-number@0 {
+ reg = <0 8>;
+ };
+ };
+
+ otp-2 {
+ compatible = "user-otp";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mac-address@0 {
+ reg = <0 6>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mtd/nand-controller.yaml b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
index 678b39952502..bd217e6f5018 100644
--- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml
+++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
@@ -38,6 +38,17 @@ properties:
ranges: true
+ cs-gpios:
+ minItems: 1
+ maxItems: 8
+ description:
+ Array of chip-select available to the controller. The first
+ entries are a 1:1 mapping of the available chip-select on the
+ NAND controller (even if they are not used). As many additional
+ chip-select as needed may follow and should be phandles of GPIO
+ lines. 'reg' entries of the NAND chip subnodes become indexes of
+ this array when this property is present.
+
patternProperties:
"^nand@[a-f0-9]$":
type: object
@@ -164,14 +175,19 @@ examples:
nand-controller {
#address-cells = <1>;
#size-cells = <0>;
+ cs-gpios = <0>, <&gpioA 1>; /* A single native CS is available */
/* controller specific properties */
nand@0 {
- reg = <0>;
+ reg = <0>; /* Native CS */
nand-use-soft-ecc-engine;
nand-ecc-algo = "bch";
/* controller specific properties */
};
+
+ nand@1 {
+ reg = <1>; /* GPIO CS */
+ };
};
diff --git a/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt b/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt
index b677147ca4e1..c2175d3c82ec 100644
--- a/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt
+++ b/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt
@@ -28,6 +28,11 @@ detected by a software parsing TRX header.
Required properties:
- compatible : (required) must be "brcm,trx"
+Optional properties:
+
+- brcm,trx-magic: TRX magic, if it is different from the default magic
+ 0x30524448 as a u32.
+
Example:
flash@0 {
diff --git a/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt b/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt
deleted file mode 100644
index faa81c2e5da6..000000000000
--- a/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-Bindings for HyperBus Memory Controller (HBMC) on TI's K3 family of SoCs
-
-Required properties:
-- compatible : "ti,am654-hbmc" for AM654 SoC
-- reg : Two entries:
- First entry pointed to the register space of HBMC controller
- Second entry pointing to the memory map region dedicated for
- MMIO access to attached flash devices
-- ranges : Address translation from offset within CS to allocated MMIO
- space in SoC
-
-Optional properties:
-- mux-controls : phandle to the multiplexer that controls selection of
- HBMC vs OSPI inside Flash SubSystem (FSS). Default is OSPI,
- if property is absent.
- See Documentation/devicetree/bindings/mux/reg-mux.txt
- for mmio-mux binding details
-
-Example:
-
- system-controller@47000000 {
- compatible = "syscon", "simple-mfd";
- reg = <0x0 0x47000000 0x0 0x100>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- hbmc_mux: multiplexer {
- compatible = "mmio-mux";
- #mux-control-cells = <1>;
- mux-reg-masks = <0x4 0x2>; /* 0: reg 0x4, bit 1 */
- };
- };
-
- hbmc: hyperbus@47034000 {
- compatible = "ti,am654-hbmc";
- reg = <0x0 0x47034000 0x0 0x100>,
- <0x5 0x00000000 0x1 0x0000000>;
- power-domains = <&k3_pds 55>;
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0x0 0x0 0x5 0x00000000 0x4000000>, /* CS0 - 64MB */
- <0x1 0x0 0x5 0x04000000 0x4000000>; /* CS1 - 64MB */
- mux-controls = <&hbmc_mux 0>;
-
- /* Slave flash node */
- flash@0,0 {
- compatible = "cypress,hyperflash", "cfi-flash";
- reg = <0x0 0x0 0x4000000>;
- };
- };
diff --git a/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.yaml b/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.yaml
new file mode 100644
index 000000000000..30b458c41cac
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/ti,am654-hbmc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HyperBus Memory Controller (HBMC) on TI's K3 family of SoCs
+
+maintainers:
+ - Vignesh Raghavendra <vigneshr@ti.com>
+
+properties:
+ compatible:
+ const: ti,am654-hbmc
+
+ reg:
+ maxItems: 2
+
+ power-domains: true
+ '#address-cells': true
+ '#size-cells': true
+ ranges: true
+
+ mux-controls:
+ description: MMIO mux controller node to select b/w OSPI and HBMC.
+
+ clocks:
+ maxItems: 1
+
+patternProperties:
+ "^flash@[0-1],[0-9a-f]+$":
+ type: object
+
+required:
+ - compatible
+ - reg
+ - ranges
+ - clocks
+ - '#address-cells'
+ - '#size-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ hbmc: memory-controller@47034000 {
+ compatible = "ti,am654-hbmc";
+ reg = <0x0 0x47034000 0x0 0x100>,
+ <0x5 0x00000000 0x1 0x0000000>;
+ ranges = <0x0 0x0 0x5 0x00000000 0x4000000>, /* CS0 - 64MB */
+ <0x1 0x0 0x5 0x04000000 0x4000000>; /* CS1 - 64MB */
+ clocks = <&k3_clks 102 0>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ power-domains = <&k3_pds 55>;
+ mux-controls = <&hbmc_mux 0>;
+
+ flash@0,0 {
+ compatible = "cypress,hyperflash", "cfi-flash";
+ reg = <0x0 0x0 0x4000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mux/adi,adg792a.txt b/Documentation/devicetree/bindings/mux/adi,adg792a.txt
index 96b787a69f50..b0e5bf6903d8 100644
--- a/Documentation/devicetree/bindings/mux/adi,adg792a.txt
+++ b/Documentation/devicetree/bindings/mux/adi,adg792a.txt
@@ -5,7 +5,7 @@ Required properties:
- #mux-control-cells : <0> if parallel (the three muxes are bound together
with a single mux controller controlling all three muxes), or <1> if
not (one mux controller for each mux).
-* Standard mux-controller bindings as described in mux-controller.txt
+* Standard mux-controller bindings as described in mux-controller.yaml
Optional properties for ADG792G:
- gpio-controller : if present, #gpio-cells below is required.
diff --git a/Documentation/devicetree/bindings/mux/adi,adgs1408.txt b/Documentation/devicetree/bindings/mux/adi,adgs1408.txt
index be6947f4d86b..453a38961c13 100644
--- a/Documentation/devicetree/bindings/mux/adi,adgs1408.txt
+++ b/Documentation/devicetree/bindings/mux/adi,adgs1408.txt
@@ -4,7 +4,7 @@ Required properties:
- compatible : Should be one of
* "adi,adgs1408"
* "adi,adgs1409"
-* Standard mux-controller bindings as described in mux-controller.txt
+* Standard mux-controller bindings as described in mux-controller.yaml
Optional properties for ADGS1408/1409:
- gpio-controller : if present, #gpio-cells is required.
diff --git a/Documentation/devicetree/bindings/mux/gpio-mux.txt b/Documentation/devicetree/bindings/mux/gpio-mux.txt
deleted file mode 100644
index b8f746344d80..000000000000
--- a/Documentation/devicetree/bindings/mux/gpio-mux.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-GPIO-based multiplexer controller bindings
-
-Define what GPIO pins are used to control a multiplexer. Or several
-multiplexers, if the same pins control more than one multiplexer.
-
-Required properties:
-- compatible : "gpio-mux"
-- mux-gpios : list of gpios used to control the multiplexer, least
- significant bit first.
-- #mux-control-cells : <0>
-* Standard mux-controller bindings as decribed in mux-controller.txt
-
-Optional properties:
-- idle-state : if present, the state the mux will have when idle. The
- special state MUX_IDLE_AS_IS is the default.
-
-The multiplexer state is defined as the number represented by the
-multiplexer GPIO pins, where the first pin is the least significant
-bit. An active pin is a binary 1, an inactive pin is a binary 0.
-
-Example:
-
- mux: mux-controller {
- compatible = "gpio-mux";
- #mux-control-cells = <0>;
-
- mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
- <&pioA 1 GPIO_ACTIVE_HIGH>;
- };
-
- adc-mux {
- compatible = "io-channel-mux";
- io-channels = <&adc 0>;
- io-channel-names = "parent";
-
- mux-controls = <&mux>;
-
- channels = "sync-1", "in", "out", "sync-2";
- };
-
- i2c-mux {
- compatible = "i2c-mux";
- i2c-parent = <&i2c1>;
-
- mux-controls = <&mux>;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- i2c@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- ssd1307: oled@3c {
- /* ... */
- };
- };
-
- i2c@3 {
- reg = <3>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pca9555: pca9555@20 {
- /* ... */
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/mux/gpio-mux.yaml b/Documentation/devicetree/bindings/mux/gpio-mux.yaml
new file mode 100644
index 000000000000..0a7c8d64981a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/gpio-mux.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mux/gpio-mux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GPIO-based multiplexer controller bindings
+
+maintainers:
+ - Peter Rosin <peda@axentia.se>
+
+description: |+
+ Define what GPIO pins are used to control a multiplexer. Or several
+ multiplexers, if the same pins control more than one multiplexer.
+
+ The multiplexer state is defined as the number represented by the
+ multiplexer GPIO pins, where the first pin is the least significant
+ bit. An active pin is a binary 1, an inactive pin is a binary 0.
+
+properties:
+ compatible:
+ const: gpio-mux
+
+ mux-gpios:
+ description:
+ List of gpios used to control the multiplexer, least significant bit first.
+
+ '#mux-control-cells':
+ const: 0
+
+ idle-state:
+ default: -1
+
+required:
+ - compatible
+ - mux-gpios
+ - "#mux-control-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ mux: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+ <&pioA 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ adc-mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 0>;
+ io-channel-names = "parent";
+
+ mux-controls = <&mux>;
+
+ channels = "sync-1", "in", "out", "sync-2";
+ };
+
+ i2c-mux {
+ compatible = "i2c-mux";
+ i2c-parent = <&i2c1>;
+
+ mux-controls = <&mux>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssd1307: oled@3c {
+ reg = <0x3c>;
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pca9555: pca9555@20 {
+ reg = <0x20>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mux/mux-consumer.yaml b/Documentation/devicetree/bindings/mux/mux-consumer.yaml
new file mode 100644
index 000000000000..7af93298ab5c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/mux-consumer.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mux/mux-consumer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common multiplexer controller consumer bindings
+
+maintainers:
+ - Peter Rosin <peda@axentia.se>
+
+description: |
+ Mux controller consumers should specify a list of mux controllers that they
+ want to use with a property containing a 'mux-ctrl-list':
+
+ mux-ctrl-list ::= <single-mux-ctrl> [mux-ctrl-list]
+ single-mux-ctrl ::= <mux-ctrl-phandle> [mux-ctrl-specifier]
+ mux-ctrl-phandle : phandle to mux controller node
+ mux-ctrl-specifier : array of #mux-control-cells specifying the
+ given mux controller (controller specific)
+
+ Mux controller properties should be named "mux-controls". The exact meaning of
+ each mux controller property must be documented in the device tree binding for
+ each consumer. An optional property "mux-control-names" may contain a list of
+ strings to label each of the mux controllers listed in the "mux-controls"
+ property.
+
+ mux-ctrl-specifier typically encodes the chip-relative mux controller number.
+ If the mux controller chip only provides a single mux controller, the
+ mux-ctrl-specifier can typically be left out.
+
+select: true
+
+properties:
+ mux-controls:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+
+ mux-control-names:
+ description:
+ Devices that use more than a single mux controller can use the
+ "mux-control-names" property to map the name of the requested mux
+ controller to an index into the list given by the "mux-controls" property.
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/mux/mux-controller.txt b/Documentation/devicetree/bindings/mux/mux-controller.txt
deleted file mode 100644
index 4f47e4bd2fa0..000000000000
--- a/Documentation/devicetree/bindings/mux/mux-controller.txt
+++ /dev/null
@@ -1,157 +0,0 @@
-Common multiplexer controller bindings
-======================================
-
-A multiplexer (or mux) controller will have one, or several, consumer devices
-that uses the mux controller. Thus, a mux controller can possibly control
-several parallel multiplexers. Presumably there will be at least one
-multiplexer needed by each consumer, but a single mux controller can of course
-control several multiplexers for a single consumer.
-
-A mux controller provides a number of states to its consumers, and the state
-space is a simple zero-based enumeration. I.e. 0-1 for a 2-way multiplexer,
-0-7 for an 8-way multiplexer, etc.
-
-
-Consumers
----------
-
-Mux controller consumers should specify a list of mux controllers that they
-want to use with a property containing a 'mux-ctrl-list':
-
- mux-ctrl-list ::= <single-mux-ctrl> [mux-ctrl-list]
- single-mux-ctrl ::= <mux-ctrl-phandle> [mux-ctrl-specifier]
- mux-ctrl-phandle : phandle to mux controller node
- mux-ctrl-specifier : array of #mux-control-cells specifying the
- given mux controller (controller specific)
-
-Mux controller properties should be named "mux-controls". The exact meaning of
-each mux controller property must be documented in the device tree binding for
-each consumer. An optional property "mux-control-names" may contain a list of
-strings to label each of the mux controllers listed in the "mux-controls"
-property.
-
-Drivers for devices that use more than a single mux controller can use the
-"mux-control-names" property to map the name of the requested mux controller
-to an index into the list given by the "mux-controls" property.
-
-mux-ctrl-specifier typically encodes the chip-relative mux controller number.
-If the mux controller chip only provides a single mux controller, the
-mux-ctrl-specifier can typically be left out.
-
-Example:
-
- /* One consumer of a 2-way mux controller (one GPIO-line) */
- mux: mux-controller {
- compatible = "gpio-mux";
- #mux-control-cells = <0>;
-
- mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>;
- };
-
- adc-mux {
- compatible = "io-channel-mux";
- io-channels = <&adc 0>;
- io-channel-names = "parent";
-
- mux-controls = <&mux>;
- mux-control-names = "adc";
-
- channels = "sync", "in";
- };
-
-Note that in the example above, specifying the "mux-control-names" is redundant
-because there is only one mux controller in the list. However, if the driver
-for the consumer node in fact asks for a named mux controller, that name is of
-course still required.
-
- /*
- * Two consumers (one for an ADC line and one for an i2c bus) of
- * parallel 4-way multiplexers controlled by the same two GPIO-lines.
- */
- mux: mux-controller {
- compatible = "gpio-mux";
- #mux-control-cells = <0>;
-
- mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
- <&pioA 1 GPIO_ACTIVE_HIGH>;
- };
-
- adc-mux {
- compatible = "io-channel-mux";
- io-channels = <&adc 0>;
- io-channel-names = "parent";
-
- mux-controls = <&mux>;
-
- channels = "sync-1", "in", "out", "sync-2";
- };
-
- i2c-mux {
- compatible = "i2c-mux";
- i2c-parent = <&i2c1>;
-
- mux-controls = <&mux>;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- i2c@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- ssd1307: oled@3c {
- /* ... */
- };
- };
-
- i2c@3 {
- reg = <3>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pca9555: pca9555@20 {
- /* ... */
- };
- };
- };
-
-
-Mux controller nodes
---------------------
-
-Mux controller nodes must specify the number of cells used for the
-specifier using the '#mux-control-cells' property.
-
-Optionally, mux controller nodes can also specify the state the mux should
-have when it is idle. The idle-state property is used for this. If the
-idle-state is not present, the mux controller is typically left as is when
-it is idle. For multiplexer chips that expose several mux controllers, the
-idle-state property is an array with one idle state for each mux controller.
-
-The special value (-1) may be used to indicate that the mux should be left
-as is when it is idle. This is the default, but can still be useful for
-mux controller chips with more than one mux controller, particularly when
-there is a need to "step past" a mux controller and set some other idle
-state for a mux controller with a higher index.
-
-Some mux controllers have the ability to disconnect the input/output of the
-multiplexer. Using this disconnected high-impedance state as the idle state
-is indicated with idle state (-2).
-
-These constants are available in
-
- #include <dt-bindings/mux/mux.h>
-
-as MUX_IDLE_AS_IS (-1) and MUX_IDLE_DISCONNECT (-2).
-
-An example mux controller node look like this (the adg972a chip is a triple
-4-way multiplexer):
-
- mux: mux-controller@50 {
- compatible = "adi,adg792a";
- reg = <0x50>;
- #mux-control-cells = <1>;
-
- idle-state = <MUX_IDLE_DISCONNECT MUX_IDLE_AS_IS 2>;
- };
diff --git a/Documentation/devicetree/bindings/mux/mux-controller.yaml b/Documentation/devicetree/bindings/mux/mux-controller.yaml
new file mode 100644
index 000000000000..736a84c3b6a5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/mux-controller.yaml
@@ -0,0 +1,182 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mux/mux-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common multiplexer controller provider bindings
+
+maintainers:
+ - Peter Rosin <peda@axentia.se>
+
+description: |
+ A multiplexer (or mux) controller will have one, or several, consumer devices
+ that uses the mux controller. Thus, a mux controller can possibly control
+ several parallel multiplexers. Presumably there will be at least one
+ multiplexer needed by each consumer, but a single mux controller can of course
+ control several multiplexers for a single consumer.
+
+ A mux controller provides a number of states to its consumers, and the state
+ space is a simple zero-based enumeration. I.e. 0-1 for a 2-way multiplexer,
+ 0-7 for an 8-way multiplexer, etc.
+
+
+ Mux controller nodes
+ --------------------
+
+ Mux controller nodes must specify the number of cells used for the
+ specifier using the '#mux-control-cells' property.
+
+ Optionally, mux controller nodes can also specify the state the mux should
+ have when it is idle. The idle-state property is used for this. If the
+ idle-state is not present, the mux controller is typically left as is when
+ it is idle. For multiplexer chips that expose several mux controllers, the
+ idle-state property is an array with one idle state for each mux controller.
+
+ The special value (-1) may be used to indicate that the mux should be left
+ as is when it is idle. This is the default, but can still be useful for
+ mux controller chips with more than one mux controller, particularly when
+ there is a need to "step past" a mux controller and set some other idle
+ state for a mux controller with a higher index.
+
+ Some mux controllers have the ability to disconnect the input/output of the
+ multiplexer. Using this disconnected high-impedance state as the idle state
+ is indicated with idle state (-2).
+
+ These constants are available in
+
+ #include <dt-bindings/mux/mux.h>
+
+ as MUX_IDLE_AS_IS (-1) and MUX_IDLE_DISCONNECT (-2).
+
+ An example mux controller node look like this (the adg972a chip is a triple
+ 4-way multiplexer):
+
+ mux: mux-controller@50 {
+ compatible = "adi,adg792a";
+ reg = <0x50>;
+ #mux-control-cells = <1>;
+
+ idle-state = <MUX_IDLE_DISCONNECT MUX_IDLE_AS_IS 2>;
+ };
+
+select:
+ anyOf:
+ - properties:
+ $nodename:
+ pattern: '^mux-controller'
+ - required:
+ - '#mux-control-cells'
+
+properties:
+ $nodename:
+ pattern: '^mux-controller(@.*|-[0-9a-f]+)?$'
+
+ '#mux-control-cells':
+ enum: [ 0, 1 ]
+
+ idle-state:
+ $ref: /schemas/types.yaml#/definitions/int32
+ minimum: -2
+
+ idle-states:
+ description: |
+ Mux controller nodes can specify the state the mux should have when it is
+ idle. If the idle-state is not present, the mux controller is typically
+ left as is when it is idle. For multiplexer chips that expose several mux
+ controllers, the idle-state property is an array with one idle state for
+ each mux controller.
+
+ The special value (-1) may be used to indicate that the mux should be left
+ as is when it is idle. This is the default, but can still be useful for
+ mux controller chips with more than one mux controller, particularly when
+ there is a need to "step past" a mux controller and set some other idle
+ state for a mux controller with a higher index.
+
+ Some mux controllers have the ability to disconnect the input/output of the
+ multiplexer. Using this disconnected high-impedance state as the idle state
+ is indicated with idle state (-2).
+ $ref: /schemas/types.yaml#/definitions/int32-array
+ items:
+ minimum: -2
+
+additionalProperties: true
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ /* One consumer of a 2-way mux controller (one GPIO-line) */
+ mux: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ adc-mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 0>;
+ io-channel-names = "parent";
+
+ mux-controls = <&mux>;
+ mux-control-names = "adc";
+
+ channels = "sync", "in";
+ };
+
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ /*
+ * Two consumers (one for an ADC line and one for an i2c bus) of
+ * parallel 4-way multiplexers controlled by the same two GPIO-lines.
+ */
+ mux2: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+ <&pioA 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ adc-mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 0>;
+ io-channel-names = "parent";
+
+ mux-controls = <&mux2>;
+
+ channels = "sync-1", "in", "out", "sync-2";
+ };
+
+ i2c-mux {
+ compatible = "i2c-mux";
+ i2c-parent = <&i2c1>;
+
+ mux-controls = <&mux2>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssd1307: oled@3c {
+ reg = <0x3c>;
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pca9555: pca9555@20 {
+ reg = <0x20>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mux/reg-mux.txt b/Documentation/devicetree/bindings/mux/reg-mux.txt
deleted file mode 100644
index 4afd7ba73d60..000000000000
--- a/Documentation/devicetree/bindings/mux/reg-mux.txt
+++ /dev/null
@@ -1,129 +0,0 @@
-Generic register bitfield-based multiplexer controller bindings
-
-Define register bitfields to be used to control multiplexers. The parent
-device tree node must be a device node to provide register r/w access.
-
-Required properties:
-- compatible : should be one of
- "reg-mux" : if parent device of mux controller is not syscon device
- "mmio-mux" : if parent device of mux controller is syscon device
-- #mux-control-cells : <1>
-- mux-reg-masks : an array of register offset and pre-shifted bitfield mask
- pairs, each describing a single mux control.
-* Standard mux-controller bindings as decribed in mux-controller.txt
-
-Optional properties:
-- idle-states : if present, the state the muxes will have when idle. The
- special state MUX_IDLE_AS_IS is the default.
-
-The multiplexer state of each multiplexer is defined as the value of the
-bitfield described by the corresponding register offset and bitfield mask
-pair in the mux-reg-masks array.
-
-Example 1:
-The parent device of mux controller is not a syscon device.
-
-&i2c0 {
- fpga@66 { // fpga connected to i2c
- compatible = "fsl,lx2160aqds-fpga", "fsl,fpga-qixis-i2c",
- "simple-mfd";
- reg = <0x66>;
-
- mux: mux-controller {
- compatible = "reg-mux";
- #mux-control-cells = <1>;
- mux-reg-masks = <0x54 0xf8>, /* 0: reg 0x54, bits 7:3 */
- <0x54 0x07>; /* 1: reg 0x54, bits 2:0 */
- };
- };
-};
-
-mdio-mux-1 {
- compatible = "mdio-mux-multiplexer";
- mux-controls = <&mux 0>;
- mdio-parent-bus = <&emdio1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- mdio@0 {
- reg = <0x0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- mdio@8 {
- reg = <0x8>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- ..
- ..
-};
-
-mdio-mux-2 {
- compatible = "mdio-mux-multiplexer";
- mux-controls = <&mux 1>;
- mdio-parent-bus = <&emdio2>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- mdio@0 {
- reg = <0x0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- mdio@1 {
- reg = <0x1>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- ..
- ..
-};
-
-Example 2:
-The parent device of mux controller is syscon device.
-
-syscon {
- compatible = "syscon";
-
- mux: mux-controller {
- compatible = "mmio-mux";
- #mux-control-cells = <1>;
-
- mux-reg-masks = <0x3 0x30>, /* 0: reg 0x3, bits 5:4 */
- <0x3 0x40>, /* 1: reg 0x3, bit 6 */
- idle-states = <MUX_IDLE_AS_IS>, <0>;
- };
-};
-
-video-mux {
- compatible = "video-mux";
- mux-controls = <&mux 0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- ports {
- /* inputs 0..3 */
- port@0 {
- reg = <0>;
- };
- port@1 {
- reg = <1>;
- };
- port@2 {
- reg = <2>;
- };
- port@3 {
- reg = <3>;
- };
-
- /* output */
- port@4 {
- reg = <4>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/mux/reg-mux.yaml b/Documentation/devicetree/bindings/mux/reg-mux.yaml
new file mode 100644
index 000000000000..60d5746eb39d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/reg-mux.yaml
@@ -0,0 +1,143 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mux/reg-mux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic register bitfield-based multiplexer controller bindings
+
+maintainers:
+ - Peter Rosin <peda@axentia.se>
+
+description: |+
+ Define register bitfields to be used to control multiplexers. The parent
+ device tree node must be a device node to provide register r/w access.
+
+properties:
+ compatible:
+ enum:
+ - reg-mux # parent device of mux controller is not syscon device
+ - mmio-mux # parent device of mux controller is syscon device
+
+ reg: true
+
+ '#mux-control-cells':
+ const: 1
+
+ mux-reg-masks:
+ description: an array of register offset and pre-shifted bitfield mask
+ pairs, each describing a single mux control.
+
+ idle-states: true
+
+required:
+ - compatible
+ - mux-reg-masks
+ - '#mux-control-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ /* The parent device of mux controller is not a syscon device. */
+
+ #include <dt-bindings/mux/mux.h>
+
+ mux-controller {
+ compatible = "reg-mux";
+ #mux-control-cells = <1>;
+ mux-reg-masks =
+ <0x54 0xf8>, /* 0: reg 0x54, bits 7:3 */
+ <0x54 0x07>; /* 1: reg 0x54, bits 2:0 */
+ };
+
+ mdio-mux-1 {
+ compatible = "mdio-mux-multiplexer";
+ mux-controls = <&mux1 0>;
+ mdio-parent-bus = <&emdio1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mdio@8 {
+ reg = <0x8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ mdio-mux-2 {
+ compatible = "mdio-mux-multiplexer";
+ mux-controls = <&mux1 1>;
+ mdio-parent-bus = <&emdio2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mdio@1 {
+ reg = <0x1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ - |
+ /* The parent device of mux controller is syscon device. */
+
+ #include <dt-bindings/mux/mux.h>
+ syscon@1000 {
+ compatible = "fsl,imx7d-iomuxc-gpr", "fsl,imx6q-iomuxc-gpr", "syscon", "simple-mfd";
+ reg = <0x1000 0x100>;
+
+ mux2: mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <1>;
+
+ mux-reg-masks =
+ <0x3 0x30>, /* 0: reg 0x3, bits 5:4 */
+ <0x3 0x40>; /* 1: reg 0x3, bit 6 */
+ idle-states = <MUX_IDLE_AS_IS>, <0>;
+ };
+ };
+
+ video-mux {
+ compatible = "video-mux";
+ mux-controls = <&mux2 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* inputs 0..3 */
+ port@0 {
+ reg = <0>;
+ };
+ port@1 {
+ reg = <1>;
+ };
+ port@2 {
+ reg = <2>;
+ };
+ port@3 {
+ reg = <3>;
+ };
+
+ /* output */
+ port@4 {
+ reg = <4>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
index 0467441d7037..608e1d62bed5 100644
--- a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
@@ -43,7 +43,6 @@ allOf:
properties:
clocks:
minItems: 3
- maxItems: 4
items:
- description: GMAC main clock
- description: First parent clock of the internal mux
@@ -52,7 +51,6 @@ allOf:
clock-names:
minItems: 3
- maxItems: 4
items:
- const: stmmaceth
- const: clkin0
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml b/Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
index 2f46e45dcd60..a93d2f165899 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
+++ b/Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
@@ -23,14 +23,12 @@ properties:
interrupts:
minItems: 1
- maxItems: 2
items:
- description: RX interrupt
- description: TX interrupt
interrupt-names:
minItems: 1
- maxItems: 2
items:
- const: rx
- const: tx
diff --git a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
index b58843f29591..deb9e852ea27 100644
--- a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
+++ b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
@@ -17,7 +17,7 @@ Optional properties:
- clocks: phandle of the core clock which drives the mdio block.
Additional information regarding generic multiplexer properties can be found
-at- Documentation/devicetree/bindings/net/mdio-mux.txt
+at- Documentation/devicetree/bindings/net/mdio-mux.yaml
for example:
diff --git a/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
index 798fa5fb7bb2..f84e31348d80 100644
--- a/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
+++ b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
@@ -30,14 +30,12 @@ properties:
- description: interrupt line0
- description: interrupt line1
minItems: 1
- maxItems: 2
interrupt-names:
items:
- const: int0
- const: int1
minItems: 1
- maxItems: 2
clocks:
items:
diff --git a/Documentation/devicetree/bindings/net/can/c_can.txt b/Documentation/devicetree/bindings/net/can/c_can.txt
index 2d504256b0d8..366479806acb 100644
--- a/Documentation/devicetree/bindings/net/can/c_can.txt
+++ b/Documentation/devicetree/bindings/net/can/c_can.txt
@@ -19,10 +19,10 @@ The following are mandatory properties for Keystone 2 66AK2G SoCs only:
- power-domains : Should contain a phandle to a PM domain provider node
and an args specifier containing the DCAN device id
value. This property is as per the binding,
- Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
- clocks : CAN functional clock phandle. This property is as per the
binding,
- Documentation/devicetree/bindings/clock/ti,sci-clk.txt
+ Documentation/devicetree/bindings/clock/ti,sci-clk.yaml
Optional properties:
- syscon-raminit : Handle to system control region that contains the
diff --git a/Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml b/Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml
index d730fe5a4355..d159ac78cec1 100644
--- a/Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml
@@ -48,14 +48,12 @@ properties:
clocks:
minItems: 1
- maxItems: 2
items:
- description: switch's main clock
- description: dividing of the switch core clock
clock-names:
minItems: 1
- maxItems: 2
items:
- const: sw_switch
- const: sw_switch_mdiv
diff --git a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
index 0b8a05dd52e6..f978f8719d8e 100644
--- a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
@@ -67,8 +67,8 @@ properties:
reg:
oneOf:
- enum:
- - 0
- - 1
+ - 0
+ - 1
required:
- compatible
diff --git a/Documentation/devicetree/bindings/net/gpmc-eth.txt b/Documentation/devicetree/bindings/net/gpmc-eth.txt
index f7da3d73ca1b..32821066a85b 100644
--- a/Documentation/devicetree/bindings/net/gpmc-eth.txt
+++ b/Documentation/devicetree/bindings/net/gpmc-eth.txt
@@ -13,7 +13,7 @@ Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
For the properties relevant to the ethernet controller connected to the GPMC
refer to the binding documentation of the device. For example, the documentation
-for the SMSC 911x is Documentation/devicetree/bindings/net/smsc911x.txt
+for the SMSC 911x is Documentation/devicetree/bindings/net/smsc,lan9115.yaml
Child nodes need to specify the GPMC bus address width using the "bank-width"
property but is possible that an ethernet controller also has a property to
diff --git a/Documentation/devicetree/bindings/net/imx-dwmac.txt b/Documentation/devicetree/bindings/net/imx-dwmac.txt
deleted file mode 100644
index 921d522fe8d7..000000000000
--- a/Documentation/devicetree/bindings/net/imx-dwmac.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-IMX8 glue layer controller, NXP imx8 families support Synopsys MAC 5.10a IP.
-
-This file documents platform glue layer for IMX.
-Please see stmmac.txt for the other unchanged properties.
-
-The device node has following properties.
-
-Required properties:
-- compatible: Should be "nxp,imx8mp-dwmac-eqos" to select glue layer
- and "snps,dwmac-5.10a" to select IP version.
-- clocks: Must contain a phandle for each entry in clock-names.
-- clock-names: Should be "stmmaceth" for the host clock.
- Should be "pclk" for the MAC apb clock.
- Should be "ptp_ref" for the MAC timer clock.
- Should be "tx" for the MAC RGMII TX clock:
- Should be "mem" for EQOS MEM clock.
- - "mem" clock is required for imx8dxl platform.
- - "mem" clock is not required for imx8mp platform.
-- interrupt-names: Should contain a list of interrupt names corresponding to
- the interrupts in the interrupts property, if available.
- Should be "macirq" for the main MAC IRQ
- Should be "eth_wake_irq" for the IT which wake up system
-- intf_mode: Should be phandle/offset pair. The phandle to the syscon node which
- encompases the GPR register, and the offset of the GPR register.
- - required for imx8mp platform.
- - is optional for imx8dxl platform.
-
-Optional properties:
-- intf_mode: is optional for imx8dxl platform.
-- snps,rmii_refclk_ext: to select RMII reference clock from external.
-
-Example:
- eqos: ethernet@30bf0000 {
- compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a";
- reg = <0x30bf0000 0x10000>;
- interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "eth_wake_irq", "macirq";
- clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
- <&clk IMX8MP_CLK_QOS_ENET_ROOT>,
- <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
- <&clk IMX8MP_CLK_ENET_QOS>;
- clock-names = "stmmaceth", "pclk", "ptp_ref", "tx";
- assigned-clocks = <&clk IMX8MP_CLK_ENET_AXI>,
- <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
- <&clk IMX8MP_CLK_ENET_QOS>;
- assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_266M>,
- <&clk IMX8MP_SYS_PLL2_100M>,
- <&clk IMX8MP_SYS_PLL2_125M>;
- assigned-clock-rates = <0>, <100000000>, <125000000>;
- nvmem-cells = <&eth_mac0>;
- nvmem-cell-names = "mac-address";
- nvmem_macaddr_swap;
- intf_mode = <&gpr 0x4>;
- status = "disabled";
- };
diff --git a/Documentation/devicetree/bindings/net/mdio-gpio.txt b/Documentation/devicetree/bindings/net/mdio-gpio.txt
deleted file mode 100644
index 4d91a36c5cf5..000000000000
--- a/Documentation/devicetree/bindings/net/mdio-gpio.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-MDIO on GPIOs
-
-Currently defined compatibles:
-- virtual,gpio-mdio
-- microchip,mdio-smi0
-
-MDC and MDIO lines connected to GPIO controllers are listed in the
-gpios property as described in section VIII.1 in the following order:
-
-MDC, MDIO.
-
-Note: Each gpio-mdio bus should have an alias correctly numbered in "aliases"
-node.
-
-Example:
-
-aliases {
- mdio-gpio0 = &mdio0;
-};
-
-mdio0: mdio {
- compatible = "virtual,mdio-gpio";
- #address-cells = <1>;
- #size-cells = <0>;
- gpios = <&qe_pio_a 11
- &qe_pio_c 6>;
-};
diff --git a/Documentation/devicetree/bindings/net/mdio-gpio.yaml b/Documentation/devicetree/bindings/net/mdio-gpio.yaml
new file mode 100644
index 000000000000..1d83b8dcce2c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-gpio.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/mdio-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MDIO on GPIOs
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Heiner Kallweit <hkallweit1@gmail.com>
+ - Russell King <linux@armlinux.org.uk>
+
+allOf:
+ - $ref: "mdio.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - virtual,mdio-gpio
+ - microchip,mdio-smi0
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ gpios:
+ minItems: 2
+ items:
+ - description: MDC
+ - description: MDIO
+ - description: MDO
+
+#Note: Each gpio-mdio bus should have an alias correctly numbered in "aliases"
+#node.
+additionalProperties:
+ type: object
+
+examples:
+ - |
+ aliases {
+ mdio-gpio0 = &mdio0;
+ };
+
+ mdio0: mdio {
+ compatible = "virtual,mdio-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpios = <&qe_pio_a 11>,
+ <&qe_pio_c 6>;
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt b/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt
deleted file mode 100644
index 694987d3c17a..000000000000
--- a/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt
+++ /dev/null
@@ -1,119 +0,0 @@
-Properties for an MDIO bus multiplexer/switch controlled by GPIO pins.
-
-This is a special case of a MDIO bus multiplexer. One or more GPIO
-lines are used to control which child bus is connected.
-
-Required properties in addition to the generic multiplexer properties:
-
-- compatible : mdio-mux-gpio.
-- gpios : GPIO specifiers for each GPIO line. One or more must be specified.
-
-
-Example :
-
- /* The parent MDIO bus. */
- smi1: mdio@1180000001900 {
- compatible = "cavium,octeon-3860-mdio";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x11800 0x00001900 0x0 0x40>;
- };
-
- /*
- An NXP sn74cbtlv3253 dual 1-of-4 switch controlled by a
- pair of GPIO lines. Child busses 2 and 3 populated with 4
- PHYs each.
- */
- mdio-mux {
- compatible = "mdio-mux-gpio";
- gpios = <&gpio1 3 0>, <&gpio1 4 0>;
- mdio-parent-bus = <&smi1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- mdio@2 {
- reg = <2>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- phy11: ethernet-phy@1 {
- reg = <1>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <10 8>; /* Pin 10, active low */
- };
- phy12: ethernet-phy@2 {
- reg = <2>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <10 8>; /* Pin 10, active low */
- };
- phy13: ethernet-phy@3 {
- reg = <3>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <10 8>; /* Pin 10, active low */
- };
- phy14: ethernet-phy@4 {
- reg = <4>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <10 8>; /* Pin 10, active low */
- };
- };
-
- mdio@3 {
- reg = <3>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- phy21: ethernet-phy@1 {
- reg = <1>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <12 8>; /* Pin 12, active low */
- };
- phy22: ethernet-phy@2 {
- reg = <2>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <12 8>; /* Pin 12, active low */
- };
- phy23: ethernet-phy@3 {
- reg = <3>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <12 8>; /* Pin 12, active low */
- };
- phy24: ethernet-phy@4 {
- reg = <4>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <12 8>; /* Pin 12, active low */
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-gpio.yaml b/Documentation/devicetree/bindings/net/mdio-mux-gpio.yaml
new file mode 100644
index 000000000000..71c25c4580ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux-gpio.yaml
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/mdio-mux-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Properties for an MDIO bus multiplexer/switch controlled by GPIO pins.
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+
+description:
+ This is a special case of a MDIO bus multiplexer. One or more GPIO
+ lines are used to control which child bus is connected.
+
+allOf:
+ - $ref: /schemas/net/mdio-mux.yaml#
+
+properties:
+ compatible:
+ const: mdio-mux-gpio
+
+ gpios:
+ description:
+ List of GPIOs used to control the multiplexer, least significant bit first.
+ minItems: 1
+ maxItems: 32
+
+required:
+ - compatible
+ - gpios
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ /*
+ An NXP sn74cbtlv3253 dual 1-of-4 switch controlled by a
+ pair of GPIO lines. Child busses 2 and 3 populated with 4
+ PHYs each.
+ */
+ mdio-mux {
+ compatible = "mdio-mux-gpio";
+ gpios = <&gpio1 3 0>, <&gpio1 4 0>;
+ mdio-parent-bus = <&smi1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio@2 {
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@1 {
+ reg = <1>;
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <10 8>; /* Pin 10, active low */
+ };
+ ethernet-phy@2 {
+ reg = <2>;
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <10 8>; /* Pin 10, active low */
+ };
+ ethernet-phy@3 {
+ reg = <3>;
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <10 8>; /* Pin 10, active low */
+ };
+ ethernet-phy@4 {
+ reg = <4>;
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <10 8>; /* Pin 10, active low */
+ };
+ };
+
+ mdio@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@1 {
+ reg = <1>;
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+ ethernet-phy@2 {
+ reg = <2>;
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+ ethernet-phy@3 {
+ reg = <3>;
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+ ethernet-phy@4 {
+ reg = <4>;
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt b/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
deleted file mode 100644
index 065e8bdb957d..000000000000
--- a/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-Properties for an MDIO bus multiplexer controlled by a memory-mapped device
-
-This is a special case of a MDIO bus multiplexer. A memory-mapped device,
-like an FPGA, is used to control which child bus is connected. The mdio-mux
-node must be a child of the memory-mapped device. The driver currently only
-supports devices with 8, 16 or 32-bit registers.
-
-Required properties in addition to the generic multiplexer properties:
-
-- compatible : string, must contain "mdio-mux-mmioreg"
-
-- reg : integer, contains the offset of the register that controls the bus
- multiplexer. The size field in the 'reg' property is the size of
- register, and must therefore be 1, 2, or 4.
-
-- mux-mask : integer, contains an eight-bit mask that specifies which
- bits in the register control the actual bus multiplexer. The
- 'reg' property of each child mdio-mux node must be constrained by
- this mask.
-
-Example:
-
-The FPGA node defines a memory-mapped FPGA with a register space of 0x30 bytes.
-For the "EMI2" MDIO bus, register 9 (BRDCFG1) controls the mux on that bus.
-A bitmask of 0x6 means that bits 1 and 2 (bit 0 is lsb) are the bits on
-BRDCFG1 that control the actual mux.
-
- /* The FPGA node */
- fpga: board-control@3,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p5020ds-fpga", "fsl,fpga-ngpixis";
- reg = <3 0 0x30>;
- ranges = <0 3 0 0x30>;
-
- mdio-mux-emi2 {
- compatible = "mdio-mux-mmioreg", "mdio-mux";
- mdio-parent-bus = <&xmdio0>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <9 1>; // BRDCFG1
- mux-mask = <0x6>; // EMI2
-
- emi2_slot1: mdio@0 { // Slot 1 XAUI (FM2)
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- phy_xgmii_slot1: ethernet-phy@0 {
- compatible = "ethernet-phy-ieee802.3-c45";
- reg = <4>;
- };
- };
-
- emi2_slot2: mdio@2 { // Slot 2 XAUI (FM1)
- reg = <2>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- phy_xgmii_slot2: ethernet-phy@4 {
- compatible = "ethernet-phy-ieee802.3-c45";
- reg = <0>;
- };
- };
- };
- };
-
- /* The parent MDIO bus. */
- xmdio0: mdio@f1000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,fman-xmdio";
- reg = <0xf1000 0x1000>;
- interrupts = <100 1 0 0>;
- };
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.yaml b/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.yaml
new file mode 100644
index 000000000000..cf86bb0b0b62
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/mdio-mux-mmioreg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Properties for an MDIO bus multiplexer controlled by a memory-mapped device
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+
+description: |+
+ This is a special case of a MDIO bus multiplexer. A memory-mapped device,
+ like an FPGA, is used to control which child bus is connected. The mdio-mux
+ node must be a child of the memory-mapped device. The driver currently only
+ supports devices with 8, 16 or 32-bit registers.
+
+allOf:
+ - $ref: /schemas/net/mdio-mux.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: mdio-mux-mmioreg
+ - const: mdio-mux
+
+ reg:
+ description: Contains the offset of the register that controls the bus
+ multiplexer. The size field in the 'reg' property is the size of register,
+ and must therefore be 1, 2, or 4.
+ maxItems: 1
+
+ mux-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Contains an eight-bit mask that specifies which bits in the
+ register control the actual bus multiplexer. The 'reg' property of each
+ child mdio-mux node must be constrained by this mask.
+
+required:
+ - compatible
+ - reg
+ - mux-mask
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio-mux@9 {
+ compatible = "mdio-mux-mmioreg", "mdio-mux";
+ mdio-parent-bus = <&xmdio0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <9 1>; // BRDCFG1
+ mux-mask = <0x6>; // EMI2
+
+ mdio@0 { // Slot 1 XAUI (FM2)
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy_xgmii_slot1: ethernet-phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <4>;
+ };
+ };
+
+ mdio@2 { // Slot 2 XAUI (FM1)
+ reg = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <4>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-multiplexer.txt b/Documentation/devicetree/bindings/net/mdio-mux-multiplexer.txt
deleted file mode 100644
index 534e38058fe0..000000000000
--- a/Documentation/devicetree/bindings/net/mdio-mux-multiplexer.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-Properties for an MDIO bus multiplexer consumer device
-
-This is a special case of MDIO mux when MDIO mux is defined as a consumer
-of a mux producer device. The mux producer can be of any type like mmio mux
-producer, gpio mux producer or generic register based mux producer.
-
-Required properties in addition to the MDIO Bus multiplexer properties:
-
-- compatible : should be "mmio-mux-multiplexer"
-- mux-controls : mux controller node to use for operating the mux
-- mdio-parent-bus : phandle to the parent MDIO bus.
-
-each child node of mdio bus multiplexer consumer device represent a mdio
-bus.
-
-for more information please refer
-Documentation/devicetree/bindings/mux/mux-controller.txt
-and Documentation/devicetree/bindings/net/mdio-mux.txt
-
-Example:
-In below example the Mux producer and consumer are separate nodes.
-
-&i2c0 {
- fpga@66 { // fpga connected to i2c
- compatible = "fsl,lx2160aqds-fpga", "fsl,fpga-qixis-i2c",
- "simple-mfd";
- reg = <0x66>;
-
- mux: mux-controller { // Mux Producer
- compatible = "reg-mux";
- #mux-control-cells = <1>;
- mux-reg-masks = <0x54 0xf8>, /* 0: reg 0x54, bits 7:3 */
- <0x54 0x07>; /* 1: reg 0x54, bits 2:0 */
- };
- };
-};
-
-mdio-mux-1 { // Mux consumer
- compatible = "mdio-mux-multiplexer";
- mux-controls = <&mux 0>;
- mdio-parent-bus = <&emdio1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- mdio@0 {
- reg = <0x0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- mdio@8 {
- reg = <0x8>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- ..
- ..
-};
-
-mdio-mux-2 { // Mux consumer
- compatible = "mdio-mux-multiplexer";
- mux-controls = <&mux 1>;
- mdio-parent-bus = <&emdio2>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- mdio@0 {
- reg = <0x0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- mdio@1 {
- reg = <0x1>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- ..
- ..
-};
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-multiplexer.yaml b/Documentation/devicetree/bindings/net/mdio-mux-multiplexer.yaml
new file mode 100644
index 000000000000..282987074ee4
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux-multiplexer.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/mdio-mux-multiplexer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Properties for an MDIO bus multiplexer consumer device
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+
+description: |+
+ This is a special case of MDIO mux when MDIO mux is defined as a consumer
+ of a mux producer device. The mux producer can be of any type like mmio mux
+ producer, gpio mux producer or generic register based mux producer.
+
+
+allOf:
+ - $ref: /schemas/net/mdio-mux.yaml#
+
+properties:
+ compatible:
+ const: mdio-mux-multiplexer
+
+ mux-controls:
+ maxItems: 1
+
+required:
+ - compatible
+ - mux-controls
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mux: mux-controller { // Mux Producer
+ compatible = "reg-mux";
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x54 0xf8>, /* 0: reg 0x54, bits 7:3 */
+ <0x54 0x07>; /* 1: reg 0x54, bits 2:0 */
+ };
+
+ mdio-mux-1 { // Mux consumer
+ compatible = "mdio-mux-multiplexer";
+ mux-controls = <&mux 0>;
+ mdio-parent-bus = <&emdio1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mdio@8 {
+ reg = <0x8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ mdio-mux-2 { // Mux consumer
+ compatible = "mdio-mux-multiplexer";
+ mux-controls = <&mux 1>;
+ mdio-parent-bus = <&emdio2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mdio@1 {
+ reg = <0x1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/net/mdio-mux.txt b/Documentation/devicetree/bindings/net/mdio-mux.txt
deleted file mode 100644
index f58571f36570..000000000000
--- a/Documentation/devicetree/bindings/net/mdio-mux.txt
+++ /dev/null
@@ -1,129 +0,0 @@
-Common MDIO bus multiplexer/switch properties.
-
-An MDIO bus multiplexer/switch will have several child busses that are
-numbered uniquely in a device dependent manner. The nodes for an MDIO
-bus multiplexer/switch will have one child node for each child bus.
-
-Required properties:
-- #address-cells = <1>;
-- #size-cells = <0>;
-
-Optional properties:
-- mdio-parent-bus : phandle to the parent MDIO bus.
-
-- Other properties specific to the multiplexer/switch hardware.
-
-Required properties for child nodes:
-- #address-cells = <1>;
-- #size-cells = <0>;
-- reg : The sub-bus number.
-
-
-Example :
-
- /* The parent MDIO bus. */
- smi1: mdio@1180000001900 {
- compatible = "cavium,octeon-3860-mdio";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x11800 0x00001900 0x0 0x40>;
- };
-
- /*
- An NXP sn74cbtlv3253 dual 1-of-4 switch controlled by a
- pair of GPIO lines. Child busses 2 and 3 populated with 4
- PHYs each.
- */
- mdio-mux {
- compatible = "mdio-mux-gpio";
- gpios = <&gpio1 3 0>, <&gpio1 4 0>;
- mdio-parent-bus = <&smi1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- mdio@2 {
- reg = <2>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- phy11: ethernet-phy@1 {
- reg = <1>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <10 8>; /* Pin 10, active low */
- };
- phy12: ethernet-phy@2 {
- reg = <2>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <10 8>; /* Pin 10, active low */
- };
- phy13: ethernet-phy@3 {
- reg = <3>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <10 8>; /* Pin 10, active low */
- };
- phy14: ethernet-phy@4 {
- reg = <4>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <10 8>; /* Pin 10, active low */
- };
- };
-
- mdio@3 {
- reg = <3>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- phy21: ethernet-phy@1 {
- reg = <1>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <12 8>; /* Pin 12, active low */
- };
- phy22: ethernet-phy@2 {
- reg = <2>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <12 8>; /* Pin 12, active low */
- };
- phy23: ethernet-phy@3 {
- reg = <3>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <12 8>; /* Pin 12, active low */
- };
- phy24: ethernet-phy@4 {
- reg = <4>;
- marvell,reg-init = <3 0x10 0 0x5777>,
- <3 0x11 0 0x00aa>,
- <3 0x12 0 0x4105>,
- <3 0x13 0 0x0a60>;
- interrupt-parent = <&gpio>;
- interrupts = <12 8>; /* Pin 12, active low */
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/net/mdio-mux.yaml b/Documentation/devicetree/bindings/net/mdio-mux.yaml
new file mode 100644
index 000000000000..d169adf5d9f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/mdio-mux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common MDIO bus multiplexer/switch properties.
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+
+description: |+
+ An MDIO bus multiplexer/switch will have several child busses that are
+ numbered uniquely in a device dependent manner. The nodes for an MDIO
+ bus multiplexer/switch will have one child node for each child bus.
+
+properties:
+ $nodename:
+ pattern: '^mdio-mux[\-@]?'
+
+ mdio-parent-bus:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ The phandle of the MDIO bus that this multiplexer's master-side port is
+ connected to.
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ '^mdio@[0-9a-f]+$':
+ type: object
+
+ properties:
+ reg:
+ maxItems: 1
+ description: The sub-bus number.
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
new file mode 100644
index 000000000000..5629b2e4ccf8
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/nxp,dwmac-imx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX8 DWMAC glue layer Device Tree Bindings
+
+maintainers:
+ - Joakim Zhang <qiangqing.zhang@nxp.com>
+
+# We need a select here so we don't match all nodes with 'snps,dwmac'
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nxp,imx8mp-dwmac-eqos
+ - nxp,imx8dxl-dwmac-eqos
+ required:
+ - compatible
+
+allOf:
+ - $ref: "snps,dwmac.yaml#"
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - nxp,imx8mp-dwmac-eqos
+ - nxp,imx8dxl-dwmac-eqos
+ - const: snps,dwmac-5.10a
+
+ clocks:
+ minItems: 3
+ maxItems: 5
+ items:
+ - description: MAC host clock
+ - description: MAC apb clock
+ - description: MAC timer clock
+ - description: MAC RGMII TX clock
+ - description: EQOS MEM clock
+
+ clock-names:
+ minItems: 3
+ maxItems: 5
+ contains:
+ enum:
+ - stmmaceth
+ - pclk
+ - ptp_ref
+ - tx
+ - mem
+
+ intf_mode:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ Should be phandle/offset pair. The phandle to the syscon node which
+ encompases the GPR register, and the offset of the GPR register.
+
+ snps,rmii_refclk_ext:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ To select RMII reference clock from external.
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/imx8mp-clock.h>
+
+ eqos: ethernet@30bf0000 {
+ compatible = "nxp,imx8mp-dwmac-eqos","snps,dwmac-5.10a";
+ reg = <0x30bf0000 0x10000>;
+ interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_wake_irq";
+ clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
+ <&clk IMX8MP_CLK_QOS_ENET_ROOT>,
+ <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
+ <&clk IMX8MP_CLK_ENET_QOS>;
+ clock-names = "stmmaceth", "pclk", "ptp_ref", "tx";
+ phy-mode = "rgmii";
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/net/smsc,lan9115.yaml b/Documentation/devicetree/bindings/net/smsc,lan9115.yaml
new file mode 100644
index 000000000000..f86667cbcca8
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/smsc,lan9115.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/smsc,lan9115.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller
+
+maintainers:
+ - Shawn Guo <shawnguo@kernel.org>
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: smsc,lan9115
+ - items:
+ - enum:
+ - smsc,lan89218
+ - smsc,lan9117
+ - smsc,lan9118
+ - smsc,lan9220
+ - smsc,lan9221
+ - const: smsc,lan9115
+
+ reg:
+ maxItems: 1
+
+ reg-shift: true
+
+ reg-io-width:
+ enum: [ 2, 4 ]
+ default: 2
+
+ interrupts:
+ minItems: 1
+ items:
+ - description:
+ LAN interrupt line
+ - description:
+ Optional PME (power management event) interrupt that is able to wake
+ up the host system with a 50ms pulse on network activity
+
+ clocks:
+ maxItems: 1
+
+ phy-mode: true
+
+ smsc,irq-active-high:
+ type: boolean
+ description: Indicates the IRQ polarity is active-high
+
+ smsc,irq-push-pull:
+ type: boolean
+ description: Indicates the IRQ type is push-pull
+
+ smsc,force-internal-phy:
+ type: boolean
+ description: Forces SMSC LAN controller to use internal PHY
+
+ smsc,force-external-phy:
+ type: boolean
+ description: Forces SMSC LAN controller to use external PHY
+
+ smsc,save-mac-address:
+ type: boolean
+ description:
+ Indicates that MAC address needs to be saved before resetting the
+ controller
+
+ reset-gpios:
+ maxItems: 1
+ description:
+ A GPIO line connected to the RESET (active low) signal of the device.
+ On many systems this is wired high so the device goes out of reset at
+ power-on, but if it is under program control, this optional GPIO can
+ wake up in response to it.
+
+ vdd33a-supply:
+ description: 3.3V analog power supply
+
+ vddvario-supply:
+ description: IO logic power supply
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+# There are lots of bus-specific properties ("qcom,*", "samsung,*", "fsl,*",
+# "gpmc,*", ...) to be found, that actually depend on the compatible value of
+# the parent node.
+additionalProperties: true
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ ethernet@f4000000 {
+ compatible = "smsc,lan9220", "smsc,lan9115";
+ reg = <0xf4000000 0x2000000>;
+ phy-mode = "mii";
+ interrupt-parent = <&gpio1>;
+ interrupts = <31>, <32>;
+ reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
+ reg-io-width = <4>;
+ smsc,irq-push-pull;
+ };
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt
deleted file mode 100644
index acfafc8e143c..000000000000
--- a/Documentation/devicetree/bindings/net/smsc911x.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-* Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller
-
-Required properties:
-- compatible : Should be "smsc,lan<model>", "smsc,lan9115"
-- reg : Address and length of the io space for SMSC LAN
-- interrupts : one or two interrupt specifiers
- - The first interrupt is the SMSC LAN interrupt line
- - The second interrupt (if present) is the PME (power
- management event) interrupt that is able to wake up the host
- system with a 50ms pulse on network activity
-- phy-mode : See ethernet.txt file in the same directory
-
-Optional properties:
-- reg-shift : Specify the quantity to shift the register offsets by
-- reg-io-width : Specify the size (in bytes) of the IO accesses that
- should be performed on the device. Valid value for SMSC LAN is
- 2 or 4. If it's omitted or invalid, the size would be 2.
-- smsc,irq-active-high : Indicates the IRQ polarity is active-high
-- smsc,irq-push-pull : Indicates the IRQ type is push-pull
-- smsc,force-internal-phy : Forces SMSC LAN controller to use
- internal PHY
-- smsc,force-external-phy : Forces SMSC LAN controller to use
- external PHY
-- smsc,save-mac-address : Indicates that mac address needs to be saved
- before resetting the controller
-- reset-gpios : a GPIO line connected to the RESET (active low) signal
- of the device. On many systems this is wired high so the device goes
- out of reset at power-on, but if it is under program control, this
- optional GPIO can wake up in response to it.
-- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
-
-Examples:
-
-lan9220@f4000000 {
- compatible = "smsc,lan9220", "smsc,lan9115";
- reg = <0xf4000000 0x2000000>;
- phy-mode = "mii";
- interrupt-parent = <&gpio1>;
- interrupts = <31>, <32>;
- reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
- reg-io-width = <4>;
- smsc,irq-push-pull;
-};
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 56f2235f5fb5..42689b7d03a2 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -28,6 +28,7 @@ select:
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
+ - snps,dwmac-5.10a
- snps,dwxgmac
- snps,dwxgmac-2.10
@@ -82,6 +83,7 @@ properties:
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
+ - snps,dwmac-5.10a
- snps,dwxgmac
- snps,dwxgmac-2.10
@@ -91,7 +93,6 @@ properties:
interrupts:
minItems: 1
- maxItems: 3
items:
- description: Combined signal for various interrupt events
- description: The interrupt to manage the remote wake-up packet detection
@@ -99,7 +100,6 @@ properties:
interrupt-names:
minItems: 1
- maxItems: 3
items:
- const: macirq
- const: eth_wake_irq
@@ -377,6 +377,7 @@ allOf:
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
+ - snps,dwmac-5.10a
- snps,dwxgmac
- snps,dwxgmac-2.10
- st,spear600-gmac
diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
index 27eb6066793f..d3f05d5934d5 100644
--- a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
@@ -46,17 +46,17 @@ properties:
clocks:
minItems: 3
- maxItems: 5
items:
- description: GMAC main clock
- description: MAC TX clock
- description: MAC RX clock
- description: For MPU family, used for power mode
- description: For MPU family, used for PHY without quartz
+ - description: PTP clock
clock-names:
minItems: 3
- maxItems: 5
+ maxItems: 6
contains:
enum:
- stmmaceth
@@ -64,6 +64,7 @@ properties:
- mac-clk-rx
- ethstp
- eth-ck
+ - ptp_ref
st,syscon:
$ref: "/schemas/types.yaml#/definitions/phandle-array"
diff --git a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
index f90557f6deb8..b9589a0daa5c 100644
--- a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -25,14 +25,12 @@ properties:
interrupts:
minItems: 1
- maxItems: 2
items:
- description: PCIe host controller
- description: builtin MSI controller
interrupt-names:
minItems: 1
- maxItems: 2
items:
- const: pcie
- const: msi
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
index de4b2baf91e8..d8971ab99274 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
@@ -38,6 +38,9 @@ Optional properties:
The regulator will be enabled when initializing the PCIe host and
disabled either as part of the init process or when shutting down the
host.
+- vph-supply: Should specify the regulator in charge of VPH one of the three
+ PCIe PHY powers. This regulator can be supplied by both 1.8v and 3.3v voltage
+ supplies.
Additional required properties for imx6sx-pcie:
- clock names: Must include the following additional entries:
diff --git a/Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml b/Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
new file mode 100644
index 000000000000..debfb54a8042
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/intel,ixp4xx-pci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel IXP4xx PCI controller
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: PCI host controller found in the Intel IXP4xx SoC series.
+
+allOf:
+ - $ref: /schemas/pci/pci-bus.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - intel,ixp42x-pci
+ - intel,ixp43x-pci
+ description: The two supported variants are ixp42x and ixp43x,
+ though more variants may exist.
+
+ reg:
+ items:
+ - description: IXP4xx-specific registers
+
+ interrupts:
+ items:
+ - description: Main PCI interrupt
+ - description: PCI DMA interrupt 1
+ - description: PCI DMA interrupt 2
+
+ ranges:
+ maxItems: 2
+ description: Typically one memory range of 64MB and one IO
+ space range of 64KB.
+
+ dma-ranges:
+ maxItems: 1
+ description: The DMA range tells the PCI host which addresses
+ the RAM is at. It can map only 64MB so if the RAM is bigger
+ than 64MB the DMA access has to be restricted to these
+ addresses.
+
+ "#interrupt-cells": true
+
+ interrupt-map: true
+
+ interrupt-map-mask:
+ items:
+ - const: 0xf800
+ - const: 0
+ - const: 0
+ - const: 7
+
+required:
+ - compatible
+ - reg
+ - dma-ranges
+ - "#interrupt-cells"
+ - interrupt-map
+ - interrupt-map-mask
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ pci@c0000000 {
+ compatible = "intel,ixp43x-pci";
+ reg = <0xc0000000 0x1000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ bus-range = <0x00 0xff>;
+
+ ranges =
+ <0x02000000 0 0x48000000 0x48000000 0 0x04000000>,
+ <0x01000000 0 0x00000000 0x4c000000 0 0x00010000>;
+ dma-ranges =
+ <0x02000000 0 0x00000000 0x00000000 0 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map =
+ <0x0800 0 0 1 &gpio0 11 3>, /* INT A on slot 1 is irq 11 */
+ <0x0800 0 0 2 &gpio0 10 3>, /* INT B on slot 1 is irq 10 */
+ <0x0800 0 0 3 &gpio0 9 3>, /* INT C on slot 1 is irq 9 */
+ <0x0800 0 0 4 &gpio0 8 3>, /* INT D on slot 1 is irq 8 */
+ <0x1000 0 0 1 &gpio0 10 3>, /* INT A on slot 2 is irq 10 */
+ <0x1000 0 0 2 &gpio0 9 3>, /* INT B on slot 2 is irq 9 */
+ <0x1000 0 0 3 &gpio0 8 3>, /* INT C on slot 2 is irq 8 */
+ <0x1000 0 0 4 &gpio0 11 3>, /* INT D on slot 2 is irq 11 */
+ <0x1800 0 0 1 &gpio0 9 3>, /* INT A on slot 3 is irq 9 */
+ <0x1800 0 0 2 &gpio0 8 3>, /* INT B on slot 3 is irq 8 */
+ <0x1800 0 0 3 &gpio0 11 3>, /* INT C on slot 3 is irq 11 */
+ <0x1800 0 0 4 &gpio0 10 3>; /* INT D on slot 3 is irq 10 */
+ };
diff --git a/Documentation/devicetree/bindings/pci/loongson.yaml b/Documentation/devicetree/bindings/pci/loongson.yaml
index 81bae060cbde..82bc6c486ca3 100644
--- a/Documentation/devicetree/bindings/pci/loongson.yaml
+++ b/Documentation/devicetree/bindings/pci/loongson.yaml
@@ -24,7 +24,6 @@ properties:
reg:
minItems: 1
- maxItems: 2
items:
- description: CFG0 standard config space register
- description: CFG1 extended config space register
diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
index e7b1f9892da4..742206dbd965 100644
--- a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
+++ b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
@@ -70,7 +70,6 @@ properties:
reset-names:
minItems: 1
- maxItems: 2
items:
- const: phy
- const: mac
diff --git a/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml b/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
index 04251d71f56b..fb95c276a986 100644
--- a/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
+++ b/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
@@ -26,14 +26,12 @@ properties:
interrupts:
minItems: 1
- maxItems: 2
items:
- description: PCIe host controller
- description: builtin MSI controller
interrupt-names:
minItems: 1
- maxItems: 2
items:
- const: pcie
- const: msi
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt
deleted file mode 100644
index 47202a2938f2..000000000000
--- a/Documentation/devicetree/bindings/pci/pci-keystone.txt
+++ /dev/null
@@ -1,115 +0,0 @@
-TI Keystone PCIe interface
-
-Keystone PCI host Controller is based on the Synopsys DesignWare PCI
-hardware version 3.65. It shares common functions with the PCIe DesignWare
-core driver and inherits common properties defined in
-Documentation/devicetree/bindings/pci/designware-pcie.txt
-
-Please refer to Documentation/devicetree/bindings/pci/designware-pcie.txt
-for the details of DesignWare DT bindings. Additional properties are
-described here as well as properties that are not applicable.
-
-Required Properties:-
-
-compatibility: Should be "ti,keystone-pcie" for RC on Keystone2 SoC
- Should be "ti,am654-pcie-rc" for RC on AM654x SoC
-reg: Three register ranges as listed in the reg-names property
-reg-names: "dbics" for the DesignWare PCIe registers, "app" for the
- TI specific application registers, "config" for the
- configuration space address
-
-pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
- interrupt-cells: should be set to 1
- interrupts: GIC interrupt lines connected to PCI MSI interrupt lines
- (required if the compatible is "ti,keystone-pcie")
-msi-map: As specified in Documentation/devicetree/bindings/pci/pci-msi.txt
- (required if the compatible is "ti,am654-pcie-rc".
-
-ti,syscon-pcie-id : phandle to the device control module required to set device
- id and vendor id.
-ti,syscon-pcie-mode : phandle to the device control module required to configure
- PCI in either RC mode or EP mode.
-
- Example:
- pcie_msi_intc: msi-interrupt-controller {
- interrupt-controller;
- #interrupt-cells = <1>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 30 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 31 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 32 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 33 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>;
- };
-
-pcie_intc: Interrupt controller device node for Legacy IRQ chip
- interrupt-cells: should be set to 1
-
- Example:
- pcie_intc: legacy-interrupt-controller {
- interrupt-controller;
- #interrupt-cells = <1>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 28 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 29 IRQ_TYPE_EDGE_RISING>;
- };
-
-Optional properties:-
- phys: phandle to generic Keystone SerDes PHY for PCI
- phy-names: name of the generic Keystone SerDes PHY for PCI
- - If boot loader already does PCI link establishment, then phys and
- phy-names shouldn't be present.
- interrupts: platform interrupt for error interrupts.
-
-DesignWare DT Properties not applicable for Keystone PCI
-
-1. pcie_bus clock-names not used. Instead, a phandle to phys is used.
-
-AM654 PCIe Endpoint
-===================
-
-Required Properties:-
-
-compatibility: Should be "ti,am654-pcie-ep" for EP on AM654x SoC
-reg: Four register ranges as listed in the reg-names property
-reg-names: "dbics" for the DesignWare PCIe registers, "app" for the
- TI specific application registers, "atu" for the
- Address Translation Unit configuration registers and
- "addr_space" used to map remote RC address space
-num-ib-windows: As specified in
- Documentation/devicetree/bindings/pci/designware-pcie.txt
-num-ob-windows: As specified in
- Documentation/devicetree/bindings/pci/designware-pcie.txt
-num-lanes: As specified in
- Documentation/devicetree/bindings/pci/designware-pcie.txt
-power-domains: As documented by the generic PM domain bindings in
- Documentation/devicetree/bindings/power/power_domain.txt.
-ti,syscon-pcie-mode: phandle to the device control module required to configure
- PCI in either RC mode or EP mode.
-
-Optional properties:-
-
-phys: list of PHY specifiers (used by generic PHY framework)
-phy-names: must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
- number of lanes as specified in *num-lanes* property.
-("phys" and "phy-names" DT bindings are specified in
-Documentation/devicetree/bindings/phy/phy-bindings.txt)
-interrupts: platform interrupt for error interrupts.
-
-pcie-ep {
- compatible = "ti,am654-pcie-ep";
- reg = <0x5500000 0x1000>, <0x5501000 0x1000>,
- <0x10000000 0x8000000>, <0x5506000 0x1000>;
- reg-names = "app", "dbics", "addr_space", "atu";
- power-domains = <&k3_pds 120>;
- ti,syscon-pcie-mode = <&pcie0_mode>;
- num-lanes = <1>;
- num-ib-windows = <16>;
- num-ob-windows = <16>;
- interrupts = <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
-};
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.txt b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
index 0da458a051b6..25f4def468bf 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.txt
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
@@ -14,6 +14,7 @@
- "qcom,pcie-qcs404" for qcs404
- "qcom,pcie-sdm845" for sdm845
- "qcom,pcie-sm8250" for sm8250
+ - "qcom,pcie-ipq6018" for ipq6018
- reg:
Usage: required
@@ -124,6 +125,16 @@
- "aux" Auxiliary clock
- clock-names:
+ Usage: required for ipq6018
+ Value type: <stringlist>
+ Definition: Should contain the following entries
+ - "iface" PCIe to SysNOC BIU clock
+ - "axi_m" AXI Master clock
+ - "axi_s" AXI Slave clock
+ - "axi_bridge" AXI bridge clock
+ - "rchng"
+
+- clock-names:
Usage: required for qcs404
Value type: <stringlist>
Definition: Should contain the following entries
@@ -210,6 +221,19 @@
- "axi_m_sticky" AXI Master Sticky reset
- reset-names:
+ Usage: required for ipq6018
+ Value type: <stringlist>
+ Definition: Should contain the following entries
+ - "pipe" PIPE reset
+ - "sleep" Sleep reset
+ - "sticky" Core Sticky reset
+ - "axi_m" AXI Master reset
+ - "axi_s" AXI Slave reset
+ - "ahb" AHB Reset
+ - "axi_m_sticky" AXI Master Sticky reset
+ - "axi_s_sticky" AXI Slave Sticky reset
+
+- reset-names:
Usage: required for qcs404
Value type: <stringlist>
Definition: Should contain the following entries
diff --git a/Documentation/devicetree/bindings/pci/ti,am65-pci-ep.yaml b/Documentation/devicetree/bindings/pci/ti,am65-pci-ep.yaml
new file mode 100644
index 000000000000..78c217d362a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/ti,am65-pci-ep.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2021 Texas Instruments Incorporated - http://www.ti.com/
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/ti,am65-pci-ep.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI AM65 PCI Endpoint
+
+maintainers:
+ - Kishon Vijay Abraham I <kishon@ti.com>
+
+allOf:
+ - $ref: pci-ep.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ti,am654-pcie-ep
+
+ reg:
+ maxItems: 4
+
+ reg-names:
+ items:
+ - const: app
+ - const: dbics
+ - const: addr_space
+ - const: atu
+
+ power-domains:
+ maxItems: 1
+
+ ti,syscon-pcie-mode:
+ description: Phandle to the SYSCON entry required for configuring PCIe in RC or EP mode.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ interrupts:
+ minItems: 1
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - max-link-speed
+ - power-domains
+ - ti,syscon-pcie-mode
+ - dma-coherent
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+ pcie0_ep: pcie-ep@5500000 {
+ compatible = "ti,am654-pcie-ep";
+ reg = <0x5500000 0x1000>,
+ <0x5501000 0x1000>,
+ <0x10000000 0x8000000>,
+ <0x5506000 0x1000>;
+ reg-names = "app", "dbics", "addr_space", "atu";
+ power-domains = <&k3_pds 120 TI_SCI_PD_EXCLUSIVE>;
+ ti,syscon-pcie-mode = <&pcie0_mode>;
+ num-ib-windows = <16>;
+ num-ob-windows = <16>;
+ max-link-speed = <2>;
+ dma-coherent;
+ interrupts = <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml b/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml
new file mode 100644
index 000000000000..834dc1c1743c
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2021 Texas Instruments Incorporated - http://www.ti.com/
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/ti,am65-pci-host.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI AM65 PCI Host
+
+maintainers:
+ - Kishon Vijay Abraham I <kishon@ti.com>
+
+allOf:
+ - $ref: /schemas/pci/pci-bus.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ti,am654-pcie-rc
+ - ti,keystone-pcie
+
+ reg:
+ maxItems: 4
+
+ reg-names:
+ items:
+ - const: app
+ - const: dbics
+ - const: config
+ - const: atu
+
+ power-domains:
+ maxItems: 1
+
+ ti,syscon-pcie-id:
+ description: Phandle to the SYSCON entry required for getting PCIe device/vendor ID
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ ti,syscon-pcie-mode:
+ description: Phandle to the SYSCON entry required for configuring PCIe in RC or EP mode.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ msi-map: true
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - max-link-speed
+ - ti,syscon-pcie-id
+ - ti,syscon-pcie-mode
+ - ranges
+
+if:
+ properties:
+ compatible:
+ enum:
+ - ti,am654-pcie-rc
+then:
+ required:
+ - dma-coherent
+ - power-domains
+ - msi-map
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+ pcie0_rc: pcie@5500000 {
+ compatible = "ti,am654-pcie-rc";
+ reg = <0x5500000 0x1000>,
+ <0x5501000 0x1000>,
+ <0x10000000 0x2000>,
+ <0x5506000 0x1000>;
+ reg-names = "app", "dbics", "config", "atu";
+ power-domains = <&k3_pds 120 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x81000000 0 0 0x10020000 0 0x00010000>,
+ <0x82000000 0 0x10030000 0x10030000 0 0x07FD0000>;
+ ti,syscon-pcie-id = <&pcie_devid>;
+ ti,syscon-pcie-mode = <&pcie0_mode>;
+ bus-range = <0x0 0xff>;
+ num-viewport = <16>;
+ max-link-speed = <2>;
+ dma-coherent;
+ interrupts = <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
+ msi-map = <0x0 &gic_its 0x0 0x10000>;
+ device_type = "pci";
+ };
diff --git a/Documentation/devicetree/bindings/perf/arm,cmn.yaml b/Documentation/devicetree/bindings/perf/arm,cmn.yaml
index e4fcc0de25e2..42424ccbdd0c 100644
--- a/Documentation/devicetree/bindings/perf/arm,cmn.yaml
+++ b/Documentation/devicetree/bindings/perf/arm,cmn.yaml
@@ -21,7 +21,6 @@ properties:
interrupts:
minItems: 1
- maxItems: 4
items:
- description: Overflow interrupt for DTC0
- description: Overflow interrupt for DTC1
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml
index f80431060803..e288450e0844 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml
@@ -15,7 +15,9 @@ properties:
const: 1
compatible:
- const: allwinner,sun8i-h3-usb-phy
+ enum:
+ - allwinner,sun8i-h3-usb-phy
+ - allwinner,sun50i-h616-usb-phy
reg:
items:
diff --git a/Documentation/devicetree/bindings/phy/brcm,bcm63xx-usbh-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,bcm63xx-usbh-phy.yaml
index 9a2e779e6d38..0f0bcde9eb88 100644
--- a/Documentation/devicetree/bindings/phy/brcm,bcm63xx-usbh-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/brcm,bcm63xx-usbh-phy.yaml
@@ -28,7 +28,6 @@ properties:
clock-names:
minItems: 1
- maxItems: 2
items:
- const: usbh
- const: usb_ref
diff --git a/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml
index 5f9e91bfb5ff..43a4b880534c 100644
--- a/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml
@@ -22,7 +22,6 @@ properties:
reg:
minItems: 1
- maxItems: 6
items:
- description: the base CTRL register
- description: XHCI EC register
@@ -33,7 +32,6 @@ properties:
reg-names:
minItems: 1
- maxItems: 6
items:
- const: ctrl
- const: xhci_ec
@@ -51,7 +49,6 @@ properties:
clock-names:
minItems: 1
- maxItems: 2
items:
- const: sw_usb
- const: sw_usb3
diff --git a/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
index 04edda504ab6..cb1aa325336f 100644
--- a/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
@@ -35,7 +35,6 @@ properties:
reg-names:
minItems: 1
- maxItems: 2
items:
- const: phy
- const: phy-ctrl
diff --git a/drivers/staging/hikey9xx/phy-hi3670-usb3.yaml b/Documentation/devicetree/bindings/phy/hisilicon,hi3670-usb3.yaml
index ebd78acfe2de..ebd78acfe2de 100644
--- a/drivers/staging/hikey9xx/phy-hi3670-usb3.yaml
+++ b/Documentation/devicetree/bindings/phy/hisilicon,hi3670-usb3.yaml
diff --git a/Documentation/devicetree/bindings/phy/mediatek,mt7621-pci-phy.yaml b/Documentation/devicetree/bindings/phy/mediatek,mt7621-pci-phy.yaml
index 0ccaded3f245..29d4123323c2 100644
--- a/Documentation/devicetree/bindings/phy/mediatek,mt7621-pci-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/mediatek,mt7621-pci-phy.yaml
@@ -16,6 +16,9 @@ properties:
reg:
maxItems: 1
+ clocks:
+ maxItems: 1
+
"#phy-cells":
const: 1
description: selects if the phy is dual-ported
@@ -23,6 +26,7 @@ properties:
required:
- compatible
- reg
+ - clocks
- "#phy-cells"
additionalProperties: false
@@ -32,5 +36,6 @@ examples:
pcie0_phy: pcie-phy@1e149000 {
compatible = "mediatek,mt7621-pci-phy";
reg = <0x1e149000 0x0700>;
+ clocks = <&sysc 0>;
#phy-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
index b8a7651a3d9a..ef9d9d4e6875 100644
--- a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
+++ b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
@@ -131,7 +131,6 @@ patternProperties:
clocks:
minItems: 1
- maxItems: 2
items:
- description: Reference clock, (HS is 48Mhz, SS/P is 24~27Mhz)
- description: Reference clock of analog phy
@@ -141,7 +140,6 @@ patternProperties:
clock-names:
minItems: 1
- maxItems: 2
items:
- const: ref
- const: da_ref
diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml
index 84383e2e0b34..e71b32c9c0d1 100644
--- a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml
@@ -31,14 +31,12 @@ properties:
resets:
minItems: 1
- maxItems: 2
items:
- description: Sierra PHY reset.
- description: Sierra APB reset. This is optional.
reset-names:
minItems: 1
- maxItems: 2
items:
- const: sierra_reset
- const: sierra_apb
diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
index 320a232c7208..bd9ae11c9994 100644
--- a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
@@ -52,28 +52,24 @@ properties:
reg:
minItems: 1
- maxItems: 2
items:
- description: Offset of the Torrent PHY configuration registers.
- description: Offset of the DPTX PHY configuration registers.
reg-names:
minItems: 1
- maxItems: 2
items:
- const: torrent_phy
- const: dptx_phy
resets:
minItems: 1
- maxItems: 2
items:
- description: Torrent PHY reset.
- description: Torrent APB reset. This is optional.
reset-names:
minItems: 1
- maxItems: 2
items:
- const: torrent_reset
- const: torrent_apb
diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
index 8c60e6985950..5ffd0f55d010 100644
--- a/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
+++ b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
@@ -42,22 +42,22 @@ Required properties (child nodes):
Examples:
- cpm_comphy: phy@120000 {
+ CP11X_LABEL(comphy): phy@120000 {
compatible = "marvell,comphy-cp110";
reg = <0x120000 0x6000>;
- marvell,system-controller = <&cpm_syscon0>;
- clocks = <&CP110_LABEL(clk) 1 5>, <&CP110_LABEL(clk) 1 6>,
- <&CP110_LABEL(clk) 1 18>;
+ marvell,system-controller = <&CP11X_LABEL(syscon0)>;
+ clocks = <&CP11X_LABEL(clk) 1 5>, <&CP11X_LABEL(clk) 1 6>,
+ <&CP11X_LABEL(clk) 1 18>;
clock-names = "mg_clk", "mg_core_clk", "axi_clk";
#address-cells = <1>;
#size-cells = <0>;
- cpm_comphy0: phy@0 {
+ CP11X_LABEL(comphy0): phy@0 {
reg = <0>;
#phy-cells = <1>;
};
- cpm_comphy1: phy@1 {
+ CP11X_LABEL(comphy1): phy@1 {
reg = <1>;
#phy-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml
index fb29ad807b68..5bebd86bf8b6 100644
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml
@@ -14,6 +14,7 @@ properties:
enum:
- rockchip,px30-usb2phy
- rockchip,rk3228-usb2phy
+ - rockchip,rk3308-usb2phy
- rockchip,rk3328-usb2phy
- rockchip,rk3366-usb2phy
- rockchip,rk3399-usb2phy
@@ -29,9 +30,6 @@ properties:
"#clock-cells":
const: 0
- "#phy-cells":
- const: 0
-
clocks:
maxItems: 1
@@ -119,7 +117,6 @@ required:
- reg
- clock-output-names
- "#clock-cells"
- - "#phy-cells"
- host-port
- otg-port
@@ -130,26 +127,25 @@ examples:
#include <dt-bindings/clock/rk3399-cru.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
- u2phy0: usb2-phy@e450 {
+ u2phy0: usb2phy@e450 {
compatible = "rockchip,rk3399-usb2phy";
reg = <0xe450 0x10>;
clocks = <&cru SCLK_USB2PHY0_REF>;
clock-names = "phyclk";
clock-output-names = "clk_usbphy0_480m";
#clock-cells = <0>;
- #phy-cells = <0>;
u2phy0_host: host-port {
- #phy-cells = <0>;
interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "linestate";
+ #phy-cells = <0>;
};
u2phy0_otg: otg-port {
- #phy-cells = <0>;
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "otg-bvalid", "otg-id", "linestate";
+ #phy-cells = <0>;
};
};
diff --git a/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml b/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml
index 018cc1246ee1..3329f1d33a4f 100644
--- a/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml
@@ -74,6 +74,13 @@ patternProperties:
"#phy-cells":
enum: [ 0x0, 0x1 ]
+ connector:
+ type: object
+ allOf:
+ - $ref: ../connector/usb-connector.yaml
+ properties:
+ vbus-supply: true
+
allOf:
- if:
properties:
@@ -130,6 +137,10 @@ examples:
reg = <0>;
phy-supply = <&vdd_usb>;
#phy-cells = <0>;
+ connector {
+ compatible = "usb-a-connector";
+ vbus-supply = <&vbus_sw>;
+ };
};
usbphyc_port1: usb-phy@1 {
diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq806x-usb-phy-hs.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq806x-usb-phy-hs.yaml
index 17f132ce5516..35296c588e78 100644
--- a/Documentation/devicetree/bindings/phy/qcom,ipq806x-usb-phy-hs.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,ipq806x-usb-phy-hs.yaml
@@ -30,7 +30,6 @@ properties:
clock-names:
minItems: 1
- maxItems: 2
items:
- const: ref
- const: xo
diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq806x-usb-phy-ss.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq806x-usb-phy-ss.yaml
index 17fd7f6b83bb..6cf5c6c06072 100644
--- a/Documentation/devicetree/bindings/phy/qcom,ipq806x-usb-phy-ss.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,ipq806x-usb-phy-ss.yaml
@@ -30,7 +30,6 @@ properties:
clock-names:
minItems: 1
- maxItems: 2
items:
- const: ref
- const: xo
diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
index 7808ec8bc712..f0497b8623ad 100644
--- a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
@@ -17,6 +17,7 @@ description:
properties:
compatible:
enum:
+ - qcom,ipq6018-qmp-pcie-phy
- qcom,ipq8074-qmp-pcie-phy
- qcom,ipq8074-qmp-usb3-phy
- qcom,msm8996-qmp-pcie-phy
@@ -45,11 +46,11 @@ properties:
- qcom,sm8350-qmp-ufs-phy
- qcom,sm8350-qmp-usb3-phy
- qcom,sm8350-qmp-usb3-uni-phy
+ - qcom,sdx55-qmp-pcie-phy
- qcom,sdx55-qmp-usb3-uni-phy
reg:
minItems: 1
- maxItems: 2
items:
- description: Address and length of PHY's common serdes block.
- description: Address and length of PHY's DP_COM control block.
@@ -301,8 +302,33 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq6018-qmp-pcie-phy
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Phy aux clock.
+ - description: Phy config clock.
+ clock-names:
+ items:
+ - const: aux
+ - const: cfg_ahb
+ resets:
+ items:
+ - description: reset of phy block.
+ - description: phy common block reset.
+ reset-names:
+ items:
+ - const: phy
+ - const: common
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,sdm845-qhp-pcie-phy
- qcom,sdm845-qmp-pcie-phy
+ - qcom,sdx55-qmp-pcie-phy
- qcom,sm8250-qmp-gen3x1-pcie-phy
- qcom,sm8250-qmp-gen3x2-pcie-phy
- qcom,sm8250-qmp-modem-pcie-phy
diff --git a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
index 9f9cf07b7d45..ec9ccaaba098 100644
--- a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
@@ -23,6 +23,8 @@ properties:
- qcom,msm8998-qusb2-phy
- qcom,sdm660-qusb2-phy
- qcom,ipq6018-qusb2-phy
+ - qcom,sm4250-qusb2-phy
+ - qcom,sm6115-qusb2-phy
- items:
- enum:
- qcom,sc7180-qusb2-phy
@@ -36,7 +38,6 @@ properties:
clocks:
minItems: 2
- maxItems: 3
items:
- description: phy config clock
- description: 19.2 MHz ref clk
@@ -44,7 +45,6 @@ properties:
clock-names:
minItems: 2
- maxItems: 3
items:
- const: cfg_ahb
- const: ref
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-pcie.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-pcie.txt
deleted file mode 100644
index 63853b35e083..000000000000
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-pcie.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-* Renesas R-Car generation 3 PCIe PHY
-
-This file provides information on what the device node for the R-Car
-generation 3 PCIe PHY contains.
-
-Required properties:
-- compatible: "renesas,r8a77980-pcie-phy" if the device is a part of the
- R8A77980 SoC.
-- reg: offset and length of the register block.
-- clocks: clock phandle and specifier pair.
-- power-domains: power domain phandle and specifier pair.
-- resets: reset phandle and specifier pair.
-- #phy-cells: see phy-bindings.txt in the same directory, must be <0>.
-
-Example (R-Car V3H):
-
- pcie-phy@e65d0000 {
- compatible = "renesas,r8a77980-pcie-phy";
- reg = <0 0xe65d0000 0 0x8000>;
- #phy-cells = <0>;
- clocks = <&cpg CPG_MOD 319>;
- power-domains = <&sysc 32>;
- resets = <&cpg 319>;
- };
diff --git a/Documentation/devicetree/bindings/phy/renesas,rcar-gen3-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,rcar-gen3-pcie-phy.yaml
new file mode 100644
index 000000000000..247ef7c47cf5
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/renesas,rcar-gen3-pcie-phy.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/renesas,rcar-gen3-pcie-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car Generation 3 PCIe PHY
+
+maintainers:
+ - Sergei Shtylyov <sergei.shtylyov@gmail.com>
+
+properties:
+ compatible:
+ const: renesas,r8a77980-pcie-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ '#phy-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - power-domains
+ - resets
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a77980-cpg-mssr.h>
+ #include <dt-bindings/power/r8a77980-sysc.h>
+
+ pcie-phy@e65d0000 {
+ compatible = "renesas,r8a77980-pcie-phy";
+ reg = <0xe65d0000 0x8000>;
+ #phy-cells = <0>;
+ clocks = <&cpg CPG_MOD 319>;
+ power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
+ resets = <&cpg 319>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
index 0f358d5b84ef..d5dc5a3cdceb 100644
--- a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
@@ -39,7 +39,6 @@ properties:
clock-names:
minItems: 1
- maxItems: 2
items:
- const: fck
- const: usb_x1
@@ -61,7 +60,6 @@ properties:
resets:
minItems: 1
- maxItems: 2
items:
- description: reset of USB 2.0 host side
- description: reset of USB 2.0 peripheral side
diff --git a/Documentation/devicetree/bindings/phy/renesas,usb3-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,usb3-phy.yaml
index f3ef738a3ff6..b8483f9edbfc 100644
--- a/Documentation/devicetree/bindings/phy/renesas,usb3-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/renesas,usb3-phy.yaml
@@ -33,7 +33,6 @@ properties:
# If you want to use the ssc, the clock-frequency of usb_extal
# must not be 0.
minItems: 2
- maxItems: 3
items:
- const: usb3-if # The funcional clock
- const: usb3s_clk # The usb3's external clock
diff --git a/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml b/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml
new file mode 100644
index 000000000000..bb4a2e4b8ab0
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/rockchip-inno-csi-dphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip SoC MIPI RX0 D-PHY Device Tree Bindings
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+ The Rockchip SoC has a MIPI CSI D-PHY based on an Innosilicon IP wich
+ connects to the ISP1 (Image Signal Processing unit v1.0) for CSI cameras.
+
+properties:
+ compatible:
+ enum:
+ - rockchip,px30-csi-dphy
+ - rockchip,rk1808-csi-dphy
+ - rockchip,rk3326-csi-dphy
+ - rockchip,rk3368-csi-dphy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: pclk
+
+ '#phy-cells':
+ const: 0
+
+ power-domains:
+ description: Video in/out power domain.
+ maxItems: 1
+
+ resets:
+ items:
+ - description: exclusive PHY reset line
+
+ reset-names:
+ items:
+ - const: apb
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Some additional phy settings are access through GRF regs.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#phy-cells'
+ - power-domains
+ - resets
+ - reset-names
+ - rockchip,grf
+
+additionalProperties: false
+
+examples:
+ - |
+
+ csi_dphy: phy@ff2f0000 {
+ compatible = "rockchip,px30-csi-dphy";
+ reg = <0xff2f0000 0x4000>;
+ clocks = <&cru 1>;
+ clock-names = "pclk";
+ #phy-cells = <0>;
+ power-domains = <&power 1>;
+ resets = <&cru 1>;
+ reset-names = "apb";
+ rockchip,grf = <&grf>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt
deleted file mode 100644
index 4ed569046daf..000000000000
--- a/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-ROCKCHIP USB2 PHY
-
-Required properties:
- - compatible: matching the soc type, one of
- "rockchip,rk3066a-usb-phy"
- "rockchip,rk3188-usb-phy"
- "rockchip,rk3288-usb-phy"
- - #address-cells: should be 1
- - #size-cells: should be 0
-
-Deprecated properties:
- - rockchip,grf : phandle to the syscon managing the "general
- register files" - phy should be a child of the GRF instead
-
-Sub-nodes:
-Each PHY should be represented as a sub-node.
-
-Sub-nodes
-required properties:
-- #phy-cells: should be 0
-- reg: PHY configure reg address offset in GRF
- "0x320" - for PHY attach to OTG controller
- "0x334" - for PHY attach to HOST0 controller
- "0x348" - for PHY attach to HOST1 controller
-
-Optional Properties:
-- clocks : phandle + clock specifier for the phy clocks
-- clock-names: string, clock name, must be "phyclk"
-- #clock-cells: for users of the phy-pll, should be 0
-- reset-names: Only allow the following entries:
- - phy-reset
-- resets: Must contain an entry for each entry in reset-names.
-- vbus-supply: power-supply phandle for vbus power source
-
-Example:
-
-grf: syscon@ff770000 {
- compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd";
-
-...
-
- usbphy: phy {
- compatible = "rockchip,rk3288-usb-phy";
- #address-cells = <1>;
- #size-cells = <0>;
-
- usbphy0: usb-phy0 {
- #phy-cells = <0>;
- reg = <0x320>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/phy/rockchip-usb-phy.yaml b/Documentation/devicetree/bindings/phy/rockchip-usb-phy.yaml
new file mode 100644
index 000000000000..f0fc8275dcd0
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/rockchip-usb-phy.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/rockchip-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip USB2.0 phy
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ oneOf:
+ - const: rockchip,rk3288-usb-phy
+ - items:
+ - enum:
+ - rockchip,rk3066a-usb-phy
+ - rockchip,rk3188-usb-phy
+ - const: rockchip,rk3288-usb-phy
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+required:
+ - compatible
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+patternProperties:
+ "usb-phy@[0-9a-f]+$":
+ type: object
+
+ properties:
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: phyclk
+
+ "#clock-cells":
+ const: 0
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: phy-reset
+
+ vbus-supply:
+ description: phandle for vbus power source
+
+ required:
+ - reg
+ - "#phy-cells"
+
+ additionalProperties: false
+
+examples:
+ - |
+ usbphy: usbphy {
+ compatible = "rockchip,rk3288-usb-phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usbphy0: usb-phy@320 {
+ reg = <0x320>;
+ #phy-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
index 5272b6f284ba..dcd63908aeae 100644
--- a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
+++ b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
@@ -77,6 +77,34 @@ properties:
Type-C spec states minimum CC pin debounce of 100 ms and maximum
of 200 ms. However, some solutions might need more than 200 ms.
+ refclk-dig:
+ type: object
+ description: |
+ WIZ node should have subnode for refclk_dig to select the reference
+ clock source for the reference clock used in the PHY and PMA digital
+ logic.
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 4
+ description: Phandle to two (Torrent) or four (Sierra) clock nodes representing
+ the inputs to refclk_dig
+
+ "#clock-cells":
+ const: 0
+
+ assigned-clocks:
+ maxItems: 1
+
+ assigned-clock-parents:
+ maxItems: 1
+
+ required:
+ - clocks
+ - "#clock-cells"
+ - assigned-clocks
+ - assigned-clock-parents
+
patternProperties:
"^pll[0|1]-refclk$":
type: object
@@ -121,34 +149,6 @@ patternProperties:
- clocks
- "#clock-cells"
- "^refclk-dig$":
- type: object
- description: |
- WIZ node should have subnode for refclk_dig to select the reference
- clock source for the reference clock used in the PHY and PMA digital
- logic.
- properties:
- clocks:
- minItems: 2
- maxItems: 4
- description: Phandle to two (Torrent) or four (Sierra) clock nodes representing
- the inputs to refclk_dig
-
- "#clock-cells":
- const: 0
-
- assigned-clocks:
- maxItems: 1
-
- assigned-clock-parents:
- maxItems: 1
-
- required:
- - clocks
- - "#clock-cells"
- - assigned-clocks
- - assigned-clock-parents
-
"^serdes@[0-9a-f]+$":
type: object
description: |
diff --git a/Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml b/Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml
new file mode 100644
index 000000000000..6107880e5246
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/ti,tcan104x-can.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: TCAN104x CAN TRANSCEIVER PHY
+
+maintainers:
+ - Aswath Govindraju <a-govindraju@ti.com>
+
+properties:
+ $nodename:
+ pattern: "^can-phy"
+
+ compatible:
+ enum:
+ - ti,tcan1042
+ - ti,tcan1043
+
+ '#phy-cells':
+ const: 0
+
+ standby-gpios:
+ description:
+ gpio node to toggle standby signal on transceiver
+ maxItems: 1
+
+ enable-gpios:
+ description:
+ gpio node to toggle enable signal on transceiver
+ maxItems: 1
+
+ max-bitrate:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ max bit rate supported in bps
+ minimum: 1
+
+required:
+ - compatible
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ transceiver1: can-phy {
+ compatible = "ti,tcan1043";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ standby-gpios = <&wakeup_gpio1 16 GPIO_ACTIVE_LOW>;
+ enable-gpios = <&main_gpio1 67 GPIO_ACTIVE_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/actions,s500-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/actions,s500-pinctrl.yaml
index ccdd9e3820d7..3f94f6944740 100644
--- a/Documentation/devicetree/bindings/pinctrl/actions,s500-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/actions,s500-pinctrl.yaml
@@ -26,7 +26,6 @@ properties:
- description: PAD Pull Control + PAD Schmitt Trigger Enable + PAD Control
- description: PAD Drive Capacity Select
minItems: 1
- maxItems: 4
clocks:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
index d30f85cc395e..f005abac7079 100644
--- a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
@@ -37,7 +37,6 @@ properties:
clock-names:
minItems: 1
- maxItems: 2
items:
- const: vpu
- const: vapb
diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml
index a96e6dbf1858..01bdda167eef 100644
--- a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml
+++ b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml
@@ -25,7 +25,9 @@ properties:
compatible:
enum:
- fsl,imx7d-gpc
+ - fsl,imx8mn-gpc
- fsl,imx8mq-gpc
+ - fsl,imx8mm-gpc
reg:
maxItems: 1
@@ -54,6 +56,7 @@ properties:
Power domain index. Valid values are defined in
include/dt-bindings/power/imx7-power.h for fsl,imx7d-gpc and
include/dt-bindings/power/imx8m-power.h for fsl,imx8mq-gpc
+ include/dt-bindings/power/imx8mm-power.h for fsl,imx8mm-gpc
maxItems: 1
clocks:
@@ -66,6 +69,16 @@ properties:
power-supply: true
+ resets:
+ description: |
+ A number of phandles to resets that need to be asserted during
+ power-up sequencing of the domain. The resets belong to devices
+ located inside the power domain, which need to be held in reset
+ across the power-up sequence. So no means to specify what each
+ reset is in a generic power-domain binding.
+ minItems: 1
+ maxItems: 4
+
required:
- '#power-domain-cells'
- reg
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
index ff21bfef8204..4807b560f00d 100644
--- a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
+++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
@@ -16,6 +16,7 @@ description:
properties:
compatible:
enum:
+ - qcom,mdm9607-rpmpd
- qcom,msm8916-rpmpd
- qcom,msm8939-rpmpd
- qcom,msm8976-rpmpd
@@ -26,6 +27,7 @@ properties:
- qcom,sdm660-rpmpd
- qcom,sc7180-rpmhpd
- qcom,sc7280-rpmhpd
+ - qcom,sc8180x-rpmhpd
- qcom,sdm845-rpmhpd
- qcom,sdx55-rpmhpd
- qcom,sm8150-rpmhpd
diff --git a/Documentation/devicetree/bindings/power/renesas,rzg2l-sysc.yaml b/Documentation/devicetree/bindings/power/renesas,rzg2l-sysc.yaml
new file mode 100644
index 000000000000..84ddc772b003
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/renesas,rzg2l-sysc.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/power/renesas,rzg2l-sysc.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas RZ/G2L System Controller (SYSC)
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+description:
+ The RZ/G2L System Controller (SYSC) performs system control of the LSI and
+ supports following functions,
+ - External terminal state capture function
+ - 34-bit address space access function
+ - Low power consumption control
+ - WDT stop control
+
+properties:
+ compatible:
+ enum:
+ - renesas,r9a07g044-sysc # RZ/G2{L,LC}
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: CA55/CM33 Sleep/Software Standby Mode request interrupt
+ - description: CA55 Software Standby Mode release request interrupt
+ - description: CM33 Software Standby Mode release request interrupt
+ - description: CA55 ACE Asynchronous Bridge Master/Slave interface deny request interrupt
+
+ interrupt-names:
+ items:
+ - const: lpm_int
+ - const: ca55stbydone_int
+ - const: cm33stbyr_int
+ - const: ca55_deny
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ sysc: system-controller@11020000 {
+ compatible = "renesas,r9a07g044-sysc";
+ reg = <0x11020000 0x10000>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "lpm_int", "ca55stbydone_int", "cm33stbyr_int",
+ "ca55_deny";
+ };
diff --git a/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt b/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt
deleted file mode 100644
index 49aba15dff8b..000000000000
--- a/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt
+++ /dev/null
@@ -1,100 +0,0 @@
-DT bindings for the Renesas R-Mobile System Controller
-
-== System Controller Node ==
-
-The R-Mobile System Controller provides the following functions:
- - Boot mode management,
- - Reset generation,
- - Power management.
-
-Required properties:
-- compatible: Should be "renesas,sysc-<soctype>", "renesas,sysc-rmobile" as
- fallback.
- Examples with soctypes are:
- - "renesas,sysc-r8a73a4" (R-Mobile APE6)
- - "renesas,sysc-r8a7740" (R-Mobile A1)
- - "renesas,sysc-sh73a0" (SH-Mobile AG5)
-- reg: Two address start and address range blocks for the device:
- - The first block refers to the normally accessible registers,
- - the second block refers to the registers protected by the HPB
- semaphore.
-
-Optional nodes:
-- pm-domains: This node contains a hierarchy of PM domain nodes, which should
- match the Power Area Hierarchy in the Power Domain Specifications section of
- the device's datasheet.
-
-
-== PM Domain Nodes ==
-
-Each of the PM domain nodes represents a PM domain, as documented by the
-generic PM domain bindings in
-Documentation/devicetree/bindings/power/power-domain.yaml.
-
-The nodes should be named by the real power area names, and thus their names
-should be unique.
-
-Required properties:
- - #power-domain-cells: Must be 0.
-
-Optional properties:
-- reg: If the PM domain is not always-on, this property must contain the bit
- index number for the corresponding power area in the various Power
- Control and Status Registers. The parent's node must contain the
- following two properties:
- - #address-cells: Must be 1,
- - #size-cells: Must be 0.
- If the PM domain is always-on, this property must be omitted.
-
-
-Example:
-
-This shows a subset of the r8a7740 PM domain hierarchy, containing the
-C5 "always-on" domain, 2 of its subdomains (A4S and A4SU), and the A3SP domain,
-which is a subdomain of A4S.
-
- sysc: system-controller@e6180000 {
- compatible = "renesas,sysc-r8a7740", "renesas,sysc-rmobile";
- reg = <0xe6180000 0x8000>, <0xe6188000 0x8000>;
-
- pm-domains {
- pd_c5: c5 {
- #address-cells = <1>;
- #size-cells = <0>;
- #power-domain-cells = <0>;
-
- pd_a4s: a4s@10 {
- reg = <10>;
- #address-cells = <1>;
- #size-cells = <0>;
- #power-domain-cells = <0>;
-
- pd_a3sp: a3sp@11 {
- reg = <11>;
- #power-domain-cells = <0>;
- };
- };
-
- pd_a4su: a4su@20 {
- reg = <20>;
- #power-domain-cells = <0>;
- };
- };
- };
- };
-
-
-== PM Domain Consumers ==
-
-Hardware blocks belonging to a PM domain should contain a "power-domains"
-property that is a phandle pointing to the corresponding PM domain node.
-
-Example:
-
- tpu: pwm@e6600000 {
- compatible = "renesas,tpu-r8a7740", "renesas,tpu";
- reg = <0xe6600000 0x100>;
- clocks = <&mstp3_clks R8A7740_CLK_TPU0>;
- power-domains = <&pd_a3sp>;
- #pwm-cells = <3>;
- };
diff --git a/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.yaml b/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.yaml
new file mode 100644
index 000000000000..559718997de7
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.yaml
@@ -0,0 +1,121 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/renesas,sysc-rmobile.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Mobile System Controller
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+ - Magnus Damm <magnus.damm@gmail.com>
+
+description: |
+ The R-Mobile System Controller provides the following functions:
+ - Boot mode management,
+ - Reset generation,
+ - Power management.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,sysc-r8a73a4 # R-Mobile APE6
+ - renesas,sysc-r8a7740 # R-Mobile A1
+ - renesas,sysc-sh73a0 # SH-Mobile AG5
+ - const: renesas,sysc-rmobile # Generic SH/R-Mobile
+
+ reg:
+ items:
+ - description: Normally accessible register block
+ - description: Register block protected by the HPB semaphore
+
+ pm-domains:
+ type: object
+ description: |
+ This node contains a hierarchy of PM domain nodes, which should match the
+ Power Area Hierarchy in the Power Domain Specifications section of the
+ device's datasheet.
+
+ properties:
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ additionalProperties:
+ $ref: "#/$defs/pd-node"
+
+required:
+ - compatible
+ - reg
+ - pm-domains
+
+additionalProperties: false
+
+$defs:
+ pd-node:
+ type: object
+ description:
+ PM domain node representing a PM domain. This node hould be named by
+ the real power area name, and thus its name should be unique.
+
+ properties:
+ reg:
+ maxItems: 1
+ description:
+ If the PM domain is not always-on, this property must contain the
+ bit index number for the corresponding power area in the various
+ Power Control and Status Registers.
+ If the PM domain is always-on, this property must be omitted.
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ '#power-domain-cells':
+ const: 0
+
+ required:
+ - '#power-domain-cells'
+
+ additionalProperties:
+ $ref: "#/$defs/pd-node"
+
+examples:
+ - |
+ // This shows a subset of the r8a7740 PM domain hierarchy, containing the
+ // C5 "always-on" domain, 2 of its subdomains (A4S and A4SU), and the A3SP
+ // domain, which is a subdomain of A4S.
+ sysc: system-controller@e6180000 {
+ compatible = "renesas,sysc-r8a7740", "renesas,sysc-rmobile";
+ reg = <0xe6180000 0x8000>, <0xe6188000 0x8000>;
+
+ pm-domains {
+ pd_c5: c5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a4s: a4s@10 {
+ reg = <10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <0>;
+
+ pd_a3sp: a3sp@11 {
+ reg = <11>;
+ #power-domain-cells = <0>;
+ };
+ };
+
+ pd_a4su: a4su@20 {
+ reg = <20>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml b/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml
new file mode 100644
index 000000000000..9b9d71087466
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml
@@ -0,0 +1,248 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/rockchip,power-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip Power Domains
+
+maintainers:
+ - Elaine Zhang <zhangqing@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+ Rockchip processors include support for multiple power domains
+ which can be powered up/down by software based on different
+ application scenarios to save power.
+
+ Power domains contained within power-controller node are
+ generic power domain providers documented in
+ Documentation/devicetree/bindings/power/power-domain.yaml.
+
+ IP cores belonging to a power domain should contain a
+ "power-domains" property that is a phandle for the
+ power domain node representing the domain.
+
+properties:
+ $nodename:
+ const: power-controller
+
+ compatible:
+ enum:
+ - rockchip,px30-power-controller
+ - rockchip,rk3036-power-controller
+ - rockchip,rk3066-power-controller
+ - rockchip,rk3128-power-controller
+ - rockchip,rk3188-power-controller
+ - rockchip,rk3228-power-controller
+ - rockchip,rk3288-power-controller
+ - rockchip,rk3328-power-controller
+ - rockchip,rk3366-power-controller
+ - rockchip,rk3368-power-controller
+ - rockchip,rk3399-power-controller
+ - rockchip,rk3568-power-controller
+
+ "#power-domain-cells":
+ const: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+required:
+ - compatible
+ - "#power-domain-cells"
+
+additionalProperties: false
+
+patternProperties:
+ "^power-domain@[0-9a-f]+$":
+
+ $ref: "#/$defs/pd-node"
+
+ unevaluatedProperties: false
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ patternProperties:
+ "^power-domain@[0-9a-f]+$":
+
+ $ref: "#/$defs/pd-node"
+
+ unevaluatedProperties: false
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ patternProperties:
+ "^power-domain@[0-9a-f]+$":
+
+ $ref: "#/$defs/pd-node"
+
+ unevaluatedProperties: false
+
+ properties:
+ "#power-domain-cells":
+ const: 0
+
+$defs:
+ pd-node:
+ type: object
+ description: |
+ Represents the power domains within the power controller node.
+
+ properties:
+ reg:
+ maxItems: 1
+ description: |
+ Power domain index. Valid values are defined in
+ "include/dt-bindings/power/px30-power.h"
+ "include/dt-bindings/power/rk3036-power.h"
+ "include/dt-bindings/power/rk3066-power.h"
+ "include/dt-bindings/power/rk3128-power.h"
+ "include/dt-bindings/power/rk3188-power.h"
+ "include/dt-bindings/power/rk3228-power.h"
+ "include/dt-bindings/power/rk3288-power.h"
+ "include/dt-bindings/power/rk3328-power.h"
+ "include/dt-bindings/power/rk3366-power.h"
+ "include/dt-bindings/power/rk3368-power.h"
+ "include/dt-bindings/power/rk3399-power.h"
+ "include/dt-bindings/power/rk3568-power.h"
+
+ clocks:
+ minItems: 1
+ maxItems: 30
+ description: |
+ A number of phandles to clocks that need to be enabled
+ while power domain switches state.
+
+ pm_qos:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ A number of phandles to qos blocks which need to be saved and restored
+ while power domain switches state.
+
+ "#power-domain-cells":
+ enum: [0, 1]
+ description:
+ Must be 0 for nodes representing a single PM domain and 1 for nodes
+ providing multiple PM domains.
+
+ required:
+ - reg
+ - "#power-domain-cells"
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3399-cru.h>
+ #include <dt-bindings/power/rk3399-power.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ qos_hdcp: qos@ffa90000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa90000 0x0 0x20>;
+ };
+
+ qos_iep: qos@ffa98000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffa98000 0x0 0x20>;
+ };
+
+ qos_rga_r: qos@ffab0000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffab0000 0x0 0x20>;
+ };
+
+ qos_rga_w: qos@ffab0080 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffab0080 0x0 0x20>;
+ };
+
+ qos_video_m0: qos@ffab8000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffab8000 0x0 0x20>;
+ };
+
+ qos_video_m1_r: qos@ffac0000 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffac0000 0x0 0x20>;
+ };
+
+ qos_video_m1_w: qos@ffac0080 {
+ compatible = "rockchip,rk3399-qos", "syscon";
+ reg = <0x0 0xffac0080 0x0 0x20>;
+ };
+
+ power-management@ff310000 {
+ compatible = "rockchip,rk3399-pmu", "syscon", "simple-mfd";
+ reg = <0x0 0xff310000 0x0 0x1000>;
+
+ power-controller {
+ compatible = "rockchip,rk3399-power-controller";
+ #power-domain-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* These power domains are grouped by VD_CENTER */
+ power-domain@RK3399_PD_IEP {
+ reg = <RK3399_PD_IEP>;
+ clocks = <&cru ACLK_IEP>,
+ <&cru HCLK_IEP>;
+ pm_qos = <&qos_iep>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_RGA {
+ reg = <RK3399_PD_RGA>;
+ clocks = <&cru ACLK_RGA>,
+ <&cru HCLK_RGA>;
+ pm_qos = <&qos_rga_r>,
+ <&qos_rga_w>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_VCODEC {
+ reg = <RK3399_PD_VCODEC>;
+ clocks = <&cru ACLK_VCODEC>,
+ <&cru HCLK_VCODEC>;
+ pm_qos = <&qos_video_m0>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_VDU {
+ reg = <RK3399_PD_VDU>;
+ clocks = <&cru ACLK_VDU>,
+ <&cru HCLK_VDU>;
+ pm_qos = <&qos_video_m1_r>,
+ <&qos_video_m1_w>;
+ #power-domain-cells = <0>;
+ };
+ power-domain@RK3399_PD_VIO {
+ reg = <RK3399_PD_VIO>;
+ #power-domain-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-domain@RK3399_PD_HDCP {
+ reg = <RK3399_PD_HDCP>;
+ clocks = <&cru ACLK_HDCP>,
+ <&cru HCLK_HDCP>,
+ <&cru PCLK_HDCP>;
+ pm_qos = <&qos_hdcp>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/charger-manager.txt b/Documentation/devicetree/bindings/power/supply/charger-manager.txt
deleted file mode 100644
index b5ae9061b7a0..000000000000
--- a/Documentation/devicetree/bindings/power/supply/charger-manager.txt
+++ /dev/null
@@ -1,91 +0,0 @@
-charger-manager bindings
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-Required properties :
- - compatible : "charger-manager"
- - <>-supply : for regulator consumer, named according to cm-regulator-name
- - cm-chargers : name of chargers
- - cm-fuel-gauge : name of battery fuel gauge
- - subnode <regulator> :
- - cm-regulator-name : name of charger regulator
- - subnode <cable> :
- - cm-cable-name : name of charger cable - one of USB, USB-HOST,
- SDP, DCP, CDP, ACA, FAST-CHARGER, SLOW-CHARGER, WPT,
- PD, DOCK, JIG, or MECHANICAL
- - cm-cable-extcon : name of extcon dev
-(optional) - cm-cable-min : minimum current of cable
-(optional) - cm-cable-max : maximum current of cable
-
-Optional properties :
- - cm-name : charger manager's name (default : "battery")
- - cm-poll-mode : polling mode - 0 for disabled, 1 for always, 2 for when
- external power is connected, or 3 for when charging. If not present,
- then polling is disabled
- - cm-poll-interval : polling interval (in ms)
- - cm-battery-stat : battery status - 0 for battery always present, 1 for no
- battery, 2 to check presence via fuel gauge, or 3 to check presence
- via charger
- - cm-fullbatt-vchkdrop-volt : voltage drop (in uV) before restarting charging
- - cm-fullbatt-voltage : voltage (in uV) of full battery
- - cm-fullbatt-soc : state of charge to consider as full battery
- - cm-fullbatt-capacity : capcity (in uAh) to consider as full battery
- - cm-thermal-zone : name of external thermometer's thermal zone
- - cm-battery-* : threshold battery temperature for charging
- -cold : critical cold temperature of battery for charging
- -cold-in-minus : flag that cold temperature is in minus degrees
- -hot : critical hot temperature of battery for charging
- -temp-diff : temperature difference to allow recharging
- - cm-dis/charging-max = limits of charging duration
-
-Deprecated properties:
- - cm-num-chargers
- - cm-fullbatt-vchkdrop-ms
-
-Example :
- charger-manager@0 {
- compatible = "charger-manager";
- chg-reg-supply = <&charger_regulator>;
-
- cm-name = "battery";
- /* Always polling ON : 30s */
- cm-poll-mode = <1>;
- cm-poll-interval = <30000>;
-
- cm-fullbatt-vchkdrop-volt = <150000>;
- cm-fullbatt-soc = <100>;
-
- cm-battery-stat = <3>;
-
- cm-chargers = "charger0", "charger1", "charger2";
-
- cm-fuel-gauge = "fuelgauge0";
-
- cm-thermal-zone = "thermal_zone.1"
- /* in deci centigrade */
- cm-battery-cold = <50>;
- cm-battery-cold-in-minus;
- cm-battery-hot = <800>;
- cm-battery-temp-diff = <100>;
-
- /* Allow charging for 5hr */
- cm-charging-max = <18000000>;
- /* Allow discharging for 2hr */
- cm-discharging-max = <7200000>;
-
- regulator@0 {
- cm-regulator-name = "chg-reg";
- cable@0 {
- cm-cable-name = "USB";
- cm-cable-extcon = "extcon-dev.0";
- cm-cable-min = <475000>;
- cm-cable-max = <500000>;
- };
- cable@1 {
- cm-cable-name = "SDP";
- cm-cable-extcon = "extcon-dev.0";
- cm-cable-min = <650000>;
- cm-cable-max = <675000>;
- };
- };
-
- };
diff --git a/Documentation/devicetree/bindings/power/supply/charger-manager.yaml b/Documentation/devicetree/bindings/power/supply/charger-manager.yaml
new file mode 100644
index 000000000000..c863cfa67865
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/charger-manager.yaml
@@ -0,0 +1,215 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/charger-manager.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Charger Manager
+
+maintainers:
+ - Sebastian Reichel <sre@kernel.org>
+
+description: |
+ Binding for the legacy charger manager driver.
+ Please do not use for new products.
+
+properties:
+ compatible:
+ const: charger-manager
+
+ cm-chargers:
+ description: name of chargers
+ $ref: /schemas/types.yaml#/definitions/string-array
+
+ cm-num-chargers:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ deprecated: true
+
+ cm-fuel-gauge:
+ description: name of battery fuel gauge
+ $ref: /schemas/types.yaml#/definitions/string
+
+ cm-name:
+ description: name of the charger manager
+ default: battery
+ $ref: /schemas/types.yaml#/definitions/string
+
+ cm-poll-mode:
+ description: polling mode
+ default: 0
+ enum:
+ - 0 # disabled
+ - 1 # always
+ - 2 # when external power is connected
+ - 3 # when charging
+
+ cm-poll-interval:
+ description: polling interval (in ms)
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ cm-battery-stat:
+ description: battery status
+ enum:
+ - 0 # battery always present
+ - 1 # no battery
+ - 2 # check presence via fuel gauge
+ - 3 # check presence via charger
+
+ cm-fullbatt-vchkdrop-volt:
+ description: voltage drop before restarting charging in uV
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ cm-fullbatt-vchkdrop-ms:
+ deprecated: true
+
+ cm-fullbatt-voltage:
+ description: voltage of full battery in uV
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ cm-fullbatt-soc:
+ description: state of charge to consider as full battery in %
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ cm-fullbatt-capacity:
+ description: capcity to consider as full battery in uAh
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ cm-thermal-zone:
+ description: name of external thermometer's thermal zone
+ $ref: /schemas/types.yaml#/definitions/string
+
+ cm-discharging-max:
+ description: limits of discharging duration in ms
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ cm-charging-max:
+ description: limits of charging duration in ms
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ cm-battery-cold:
+ description: critical cold temperature of battery for charging in deci-degree celsius
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ cm-battery-cold-in-minus:
+ description: if set cm-battery-cold temperature is in minus degrees
+ type: boolean
+
+ cm-battery-hot:
+ description: critical hot temperature of battery for charging in deci-degree celsius
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ cm-battery-temp-diff:
+ description: temperature difference to allow recharging in deci-degree celsius
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+patternProperties:
+ "-supply$":
+ description: regulator consumer, named according to cm-regulator-name
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ "^regulator[@-][0-9]$":
+ type: object
+ properties:
+ cm-regulator-name:
+ description: name of charger regulator
+ $ref: /schemas/types.yaml#/definitions/string
+
+ required:
+ - cm-regulator-name
+
+ additionalProperties: false
+
+ patternProperties:
+ "^cable[@-][0-9]$":
+ type: object
+ properties:
+ cm-cable-name:
+ description: name of charger cable
+ enum:
+ - USB
+ - USB-HOST
+ - SDP
+ - DCP
+ - CDP
+ - ACA
+ - FAST-CHARGER
+ - SLOW-CHARGER
+ - WPT
+ - PD
+ - DOCK
+ - JIG
+ - MECHANICAL
+
+ cm-cable-extcon:
+ description: name of extcon dev
+ $ref: /schemas/types.yaml#/definitions/string
+
+ cm-cable-min:
+ description: minimum current of cable in uA
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ cm-cable-max:
+ description: maximum current of cable in uA
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ required:
+ - cm-cable-name
+ - cm-cable-extcon
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - cm-chargers
+ - cm-fuel-gauge
+
+additionalProperties: false
+
+examples:
+ - |
+ charger-manager {
+ compatible = "charger-manager";
+ chg-reg-supply = <&charger_regulator>;
+
+ cm-name = "battery";
+ /* Always polling ON : 30s */
+ cm-poll-mode = <1>;
+ cm-poll-interval = <30000>;
+
+ cm-fullbatt-vchkdrop-volt = <150000>;
+ cm-fullbatt-soc = <100>;
+
+ cm-battery-stat = <3>;
+
+ cm-chargers = "charger0", "charger1", "charger2";
+
+ cm-fuel-gauge = "fuelgauge0";
+
+ cm-thermal-zone = "thermal_zone.1";
+ /* in deci centigrade */
+ cm-battery-cold = <50>;
+ cm-battery-cold-in-minus;
+ cm-battery-hot = <800>;
+ cm-battery-temp-diff = <100>;
+
+ /* Allow charging for 5hr */
+ cm-charging-max = <18000000>;
+ /* Allow discharging for 2hr */
+ cm-discharging-max = <7200000>;
+
+ regulator-0 {
+ cm-regulator-name = "chg-reg";
+ cable-0 {
+ cm-cable-name = "USB";
+ cm-cable-extcon = "extcon-dev.0";
+ cm-cable-min = <475000>;
+ cm-cable-max = <500000>;
+ };
+ cable-1 {
+ cm-cable-name = "SDP";
+ cm-cable-extcon = "extcon-dev.0";
+ cm-cable-min = <650000>;
+ cm-cable-max = <675000>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml
index de91cf3f058c..f792d06db413 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml
@@ -89,7 +89,7 @@ examples:
reg = <0x36>;
maxim,alert-low-soc-level = <10>;
interrupt-parent = <&gpio7>;
- interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
wakeup-source;
};
};
diff --git a/Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml b/Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml
new file mode 100644
index 000000000000..ae647d3355a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/power/supply/richtek,rt5033-battery.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Richtek RT5033 PMIC Fuel Gauge
+
+maintainers:
+ - Stephan Gerhold <stephan@gerhold.net>
+
+allOf:
+ - $ref: power-supply.yaml#
+
+properties:
+ compatible:
+ const: richtek,rt5033-battery
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ battery@35 {
+ compatible = "richtek,rt5033-battery";
+ reg = <0x35>;
+ };
+ };
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ battery@35 {
+ compatible = "richtek,rt5033-battery";
+ reg = <0x35>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <121 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/property-units.txt b/Documentation/devicetree/bindings/property-units.txt
deleted file mode 100644
index 218f99fa311f..000000000000
--- a/Documentation/devicetree/bindings/property-units.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Standard Unit Suffixes for Property names
-
-Properties which have a unit of measure are recommended to have a unit
-suffix appended to the property name. The list below contains the
-recommended suffixes. Other variations exist in bindings, but should not
-be used in new bindings or added here. The inconsistency in the unit
-prefixes is due to selecting the most commonly used variants.
-
-It is also recommended to use the units listed here and not add additional
-unit prefixes.
-
-Time/Frequency
-----------------------------------------
--mhz : megahertz
--hz : hertz (preferred)
--sec : second
--ms : millisecond
--us : microsecond
--ns : nanosecond
--ps : picosecond
-
-Distance
-----------------------------------------
--mm : millimeter
-
-Electricity
-----------------------------------------
--microamp : microampere
--microamp-hours : microampere hour
--ohms : ohm
--micro-ohms : microohm
--microwatt-hours: microwatt hour
--microvolt : microvolt
--picofarads : picofarad
--femtofarads : femtofarad
-
-Temperature
-----------------------------------------
--celsius : degree Celsius
--millicelsius : millidegree Celsius
-
-Pressure
-----------------------------------------
--kpascal : kilopascal
-
-Throughput
-----------------------------------------
--kBps : kilobytes per second
diff --git a/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml b/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
index 7dcab2bf8128..800d511502c4 100644
--- a/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
@@ -25,6 +25,9 @@ properties:
- const: allwinner,sun8i-a83t-pwm
- const: allwinner,sun8i-h3-pwm
- items:
+ - const: allwinner,sun8i-v3s-pwm
+ - const: allwinner,sun7i-a20-pwm
+ - items:
- const: allwinner,sun50i-a64-pwm
- const: allwinner,sun5i-a13-pwm
- items:
@@ -37,7 +40,6 @@ properties:
clocks:
minItems: 1
- maxItems: 2
items:
- description: Module Clock
- description: Bus Clock
diff --git a/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt b/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt
deleted file mode 100644
index 655f6cd4ef46..000000000000
--- a/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Broadcom iProc PWM controller device tree bindings
-
-This controller has 4 channels.
-
-Required Properties :
-- compatible: must be "brcm,iproc-pwm"
-- reg: physical base address and length of the controller's registers
-- clocks: phandle + clock specifier pair for the external clock
-- #pwm-cells: Should be 3. See pwm.yaml in this directory for a
- description of the cells format.
-
-Refer to clocks/clock-bindings.txt for generic clock consumer properties.
-
-Example:
-
-pwm: pwm@18031000 {
- compatible = "brcm,iproc-pwm";
- reg = <0x18031000 0x28>;
- clocks = <&osc>;
- #pwm-cells = <3>;
-};
diff --git a/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.yaml b/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.yaml
new file mode 100644
index 000000000000..218ab06c34d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/brcm,iproc-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom iProc PWM controller
+
+maintainers:
+ - Rafał Miłecki <rafal@milecki.pl>
+
+description:
+ This controller has 4 channels.
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ const: brcm,iproc-pwm
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description: external clock
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 3
+
+unevaluatedProperties: false
+
+required:
+ - reg
+ - clocks
+
+examples:
+ - |
+ pwm@18031000 {
+ compatible = "brcm,iproc-pwm";
+ reg = <0x18031000 0x28>;
+ clocks = <&osc>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
deleted file mode 100644
index c7c4347a769a..000000000000
--- a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-TI SOC ECAP based APWM controller
-
-Required properties:
-- compatible: Must be "ti,<soc>-ecap".
- for am33xx - compatible = "ti,am3352-ecap", "ti,am33xx-ecap";
- for am4372 - compatible = "ti,am4372-ecap", "ti,am3352-ecap", "ti,am33xx-ecap";
- for da850 - compatible = "ti,da850-ecap", "ti,am3352-ecap", "ti,am33xx-ecap";
- for dra746 - compatible = "ti,dra746-ecap", "ti,am3352-ecap";
- for 66ak2g - compatible = "ti,k2g-ecap", "ti,am3352-ecap";
- for am654 - compatible = "ti,am654-ecap", "ti,am3352-ecap";
-- #pwm-cells: should be 3. See pwm.yaml in this directory for a description of
- the cells format. The PWM channel index ranges from 0 to 4. The only third
- cell flag supported by this binding is PWM_POLARITY_INVERTED.
-- reg: physical base address and size of the registers map.
-
-Optional properties:
-- clocks: Handle to the ECAP's functional clock.
-- clock-names: Must be set to "fck".
-
-Example:
-
-ecap0: ecap@48300100 { /* ECAP on am33xx */
- compatible = "ti,am3352-ecap", "ti,am33xx-ecap";
- #pwm-cells = <3>;
- reg = <0x48300100 0x80>;
- clocks = <&l4ls_gclk>;
- clock-names = "fck";
-};
-
-ecap0: ecap@48300100 { /* ECAP on am4372 */
- compatible = "ti,am4372-ecap", "ti,am3352-ecap", "ti,am33xx-ecap";
- #pwm-cells = <3>;
- reg = <0x48300100 0x80>;
- ti,hwmods = "ecap0";
- clocks = <&l4ls_gclk>;
- clock-names = "fck";
-};
-
-ecap0: ecap@1f06000 { /* ECAP on da850 */
- compatible = "ti,da850-ecap", "ti,am3352-ecap", "ti,am33xx-ecap";
- #pwm-cells = <3>;
- reg = <0x1f06000 0x80>;
-};
-
-ecap0: ecap@4843e100 {
- compatible = "ti,dra746-ecap", "ti,am3352-ecap";
- #pwm-cells = <3>;
- reg = <0x4843e100 0x80>;
- clocks = <&l4_root_clk_div>;
- clock-names = "fck";
-};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiecap.yaml b/Documentation/devicetree/bindings/pwm/pwm-tiecap.yaml
new file mode 100644
index 000000000000..ed35b6cc48d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiecap.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/pwm-tiecap.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI SOC ECAP based APWM controller
+
+maintainers:
+ - Vignesh R <vigneshr@ti.com>
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: ti,am3352-ecap
+ - items:
+ - enum:
+ - ti,da850-ecap
+ - ti,am4372-ecap
+ - ti,dra746-ecap
+ - ti,k2g-ecap
+ - ti,am654-ecap
+ - ti,am64-ecap
+ - const: ti,am3352-ecap
+
+ reg:
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 3
+ description: |
+ See pwm.yaml in this directory for a description of the cells format.
+ The only third cell flag supported by this binding is PWM_POLARITY_INVERTED.
+
+ clock-names:
+ const: fck
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - "#pwm-cells"
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ ecap0: pwm@48300100 { /* ECAP on am33xx */
+ compatible = "ti,am3352-ecap";
+ #pwm-cells = <3>;
+ reg = <0x48300100 0x80>;
+ clocks = <&l4ls_gclk>;
+ clock-names = "fck";
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
deleted file mode 100644
index c7e28f6d28be..000000000000
--- a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-TI SOC EHRPWM based PWM controller
-
-Required properties:
-- compatible: Must be "ti,<soc>-ehrpwm".
- for am33xx - compatible = "ti,am3352-ehrpwm", "ti,am33xx-ehrpwm";
- for am4372 - compatible = "ti,am4372-ehrpwm", "ti-am3352-ehrpwm", "ti,am33xx-ehrpwm";
- for am654 - compatible = "ti,am654-ehrpwm", "ti-am3352-ehrpwm";
- for da850 - compatible = "ti,da850-ehrpwm", "ti-am3352-ehrpwm", "ti,am33xx-ehrpwm";
- for dra746 - compatible = "ti,dra746-ehrpwm", "ti-am3352-ehrpwm";
-- #pwm-cells: should be 3. See pwm.yaml in this directory for a description of
- the cells format. The only third cell flag supported by this binding is
- PWM_POLARITY_INVERTED.
-- reg: physical base address and size of the registers map.
-
-Optional properties:
-- clocks: Handle to the PWM's time-base and functional clock.
-- clock-names: Must be set to "tbclk" and "fck".
-
-Example:
-
-ehrpwm0: pwm@48300200 { /* EHRPWM on am33xx */
- compatible = "ti,am3352-ehrpwm", "ti,am33xx-ehrpwm";
- #pwm-cells = <3>;
- reg = <0x48300200 0x100>;
- clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>;
- clock-names = "tbclk", "fck";
-};
-
-ehrpwm0: pwm@48300200 { /* EHRPWM on am4372 */
- compatible = "ti,am4372-ehrpwm", "ti,am3352-ehrpwm", "ti,am33xx-ehrpwm";
- #pwm-cells = <3>;
- reg = <0x48300200 0x80>;
- clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>;
- clock-names = "tbclk", "fck";
- ti,hwmods = "ehrpwm0";
-};
-
-ehrpwm0: pwm@1f00000 { /* EHRPWM on da850 */
- compatible = "ti,da850-ehrpwm", "ti,am3352-ehrpwm", "ti,am33xx-ehrpwm";
- #pwm-cells = <3>;
- reg = <0x1f00000 0x2000>;
-};
-
-ehrpwm0: pwm@4843e200 { /* EHRPWM on dra746 */
- compatible = "ti,dra746-ehrpwm", "ti,am3352-ehrpwm";
- #pwm-cells = <3>;
- reg = <0x4843e200 0x80>;
- clocks = <&ehrpwm0_tbclk>, <&l4_root_clk_div>;
- clock-names = "tbclk", "fck";
-};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.yaml b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.yaml
new file mode 100644
index 000000000000..ee312cb210e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/pwm-tiehrpwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI SOC EHRPWM based PWM controller
+
+maintainers:
+ - Vignesh R <vigneshr@ti.com>
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: ti,am3352-ehrpwm
+ - items:
+ - enum:
+ - ti,da850-ehrpwm
+ - ti,am4372-ehrpwm
+ - ti,dra746-ehrpwm
+ - ti,am654-ehrpwm
+ - ti,am64-epwm
+ - const: ti,am3352-ehrpwm
+
+ reg:
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 3
+ description: |
+ See pwm.yaml in this directory for a description of the cells format.
+ The only third cell flag supported by this binding is PWM_POLARITY_INVERTED.
+
+ clock-names:
+ items:
+ - const: tbclk
+ - const: fck
+
+ clocks:
+ maxItems: 2
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - "#pwm-cells"
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ ehrpwm0: pwm@48300200 { /* EHRPWM on am33xx */
+ compatible = "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x48300200 0x100>;
+ clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>;
+ clock-names = "tbclk", "fck";
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm.yaml b/Documentation/devicetree/bindings/pwm/pwm.yaml
index 7d1f687cee9c..2effe6c0de6b 100644
--- a/Documentation/devicetree/bindings/pwm/pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/pwm.yaml
@@ -24,8 +24,9 @@ additionalProperties: true
examples:
- |
- pwm: pwm@7000a000 {
- compatible = "nvidia,tegra20-pwm";
- reg = <0x7000a000 0x100>;
- #pwm-cells = <2>;
+ pwm: pwm@1c20e00 {
+ compatible = "allwinner,sun7i-a20-pwm";
+ reg = <0x01c20e00 0xc>;
+ clocks = <&osc24M>;
+ #pwm-cells = <3>;
};
diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
index 3c2fa2e93d1b..7ea1070b4b3a 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
+++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
@@ -61,6 +61,19 @@ required:
- reg
- '#pwm-cells'
- clocks
+ - power-domains
+
+if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,pwm-r8a7778
+ - renesas,pwm-r8a7779
+then:
+ required:
+ - resets
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
index aa9a4570c906..81ccb2110162 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
@@ -9,6 +9,15 @@ title: Renesas R-Car Timer Pulse Unit PWM Controller
maintainers:
- Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+select:
+ properties:
+ compatible:
+ contains:
+ const: renesas,tpu
+ required:
+ - compatible
+ - '#pwm-cells'
+
properties:
compatible:
items:
@@ -58,6 +67,23 @@ required:
- compatible
- reg
- '#pwm-cells'
+ - clocks
+ - power-domains
+
+allOf:
+ - $ref: pwm.yaml#
+
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,tpu-r8a73a4
+ - renesas,tpu-r8a7740
+ then:
+ required:
+ - resets
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
index 8850c01bd470..9b131c6facbc 100644
--- a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
@@ -57,12 +57,14 @@ properties:
maxItems: 1
power-domains:
+ deprecated: true
description:
Power domain to use for enable control. This binding is only
available if the compatible is chosen to regulator-fixed-domain.
maxItems: 1
required-opps:
+ deprecated: true
description:
Performance state to use for enable control. This binding is only
available if the compatible is chosen to regulator-fixed-domain. The
diff --git a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
index 12b8963615c3..c2e8c54e5311 100644
--- a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
+++ b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
@@ -36,12 +36,12 @@ properties:
switching frequency must be one of following corresponding value
1.1MHz, 1.65MHz, 2.2MHz, 2.75MHz
- patternProperties:
- "^ldo[1-4]$":
+ ldortc:
type: object
$ref: regulator.yaml#
- "^ldortc$":
+ patternProperties:
+ "^ldo[1-4]$":
type: object
$ref: regulator.yaml#
diff --git a/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
index 8761437ed8ad..aabf50f5b39e 100644
--- a/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
@@ -83,7 +83,8 @@ properties:
unevaluatedProperties: false
- "^vsnvs$":
+ properties:
+ vsnvs:
type: object
$ref: regulator.yaml#
description:
diff --git a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
index 208a628f8d6c..fc16d903353e 100644
--- a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
@@ -17,7 +17,11 @@ properties:
enum:
- fsl,imx8mq-cm4
- fsl,imx8mm-cm4
+ - fsl,imx8mn-cm7
+ - fsl,imx8mp-cm7
+ - fsl,imx8ulp-cm33
- fsl,imx7d-cm4
+ - fsl,imx7ulp-cm4
- fsl,imx6sx-cm4
clocks:
@@ -49,10 +53,14 @@ properties:
minItems: 1
maxItems: 32
+ fsl,auto-boot:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Indicate whether need to load the default firmware and start the remote
+ processor automatically.
+
required:
- compatible
- - clocks
- - syscon
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
deleted file mode 100644
index 229f908fd831..000000000000
--- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
+++ /dev/null
@@ -1,228 +0,0 @@
-Qualcomm ADSP Peripheral Image Loader
-
-This document defines the binding for a component that loads and boots firmware
-on the Qualcomm ADSP Hexagon core.
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: must be one of:
- "qcom,msm8974-adsp-pil"
- "qcom,msm8996-adsp-pil"
- "qcom,msm8996-slpi-pil"
- "qcom,msm8998-adsp-pas"
- "qcom,msm8998-slpi-pas"
- "qcom,qcs404-adsp-pas"
- "qcom,qcs404-cdsp-pas"
- "qcom,qcs404-wcss-pas"
- "qcom,sc7180-mpss-pas"
- "qcom,sdm845-adsp-pas"
- "qcom,sdm845-cdsp-pas"
- "qcom,sdx55-mpss-pas"
- "qcom,sm8150-adsp-pas"
- "qcom,sm8150-cdsp-pas"
- "qcom,sm8150-mpss-pas"
- "qcom,sm8150-slpi-pas"
- "qcom,sm8250-adsp-pas"
- "qcom,sm8250-cdsp-pas"
- "qcom,sm8250-slpi-pas"
- "qcom,sm8350-adsp-pas"
- "qcom,sm8350-cdsp-pas"
- "qcom,sm8350-slpi-pas"
- "qcom,sm8350-mpss-pas"
-
-- interrupts-extended:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: reference to the interrupts that match interrupt-names
-
-- interrupt-names:
- Usage: required
- Value type: <stringlist>
- Definition: The interrupts needed depends on the compatible
- string:
- qcom,msm8974-adsp-pil:
- qcom,msm8996-adsp-pil:
- qcom,msm8996-slpi-pil:
- qcom,msm8998-adsp-pas:
- qcom,msm8998-slpi-pas:
- qcom,qcs404-adsp-pas:
- qcom,qcs404-cdsp-pas:
- qcom,sdm845-adsp-pas:
- qcom,sdm845-cdsp-pas:
- qcom,sm8150-adsp-pas:
- qcom,sm8150-cdsp-pas:
- qcom,sm8150-slpi-pas:
- qcom,sm8250-adsp-pas:
- qcom,sm8250-cdsp-pas:
- qcom,sm8250-slpi-pas:
- qcom,sm8350-adsp-pas:
- qcom,sm8350-cdsp-pas:
- qcom,sm8350-slpi-pas:
- must be "wdog", "fatal", "ready", "handover", "stop-ack"
- qcom,qcs404-wcss-pas:
- qcom,sc7180-mpss-pas:
- qcom,sdx55-mpss-pas:
- qcom,sm8150-mpss-pas:
- qcom,sm8350-mpss-pas:
- must be "wdog", "fatal", "ready", "handover", "stop-ack",
- "shutdown-ack"
-
-- firmware-name:
- Usage: optional
- Value type: <string>
- Definition: must list the relative firmware image path for the
- Hexagon Core.
-
-- clocks:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: reference to the xo clock and optionally aggre2 clock to be
- held on behalf of the booting Hexagon core
-
-- clock-names:
- Usage: required
- Value type: <stringlist>
- Definition: must be "xo" and optionally include "aggre2"
-
-- cx-supply:
- Usage: required
- Value type: <phandle>
- Definition: reference to the regulator to be held on behalf of the
- booting Hexagon core
-
-- px-supply:
- Usage: required
- Value type: <phandle>
- Definition: reference to the px regulator to be held on behalf of the
- booting Hexagon core
-
-- power-domains:
- Usage: required
- Value type: <phandle>
- Definition: reference to power-domains that match the power-domain-names
-
-- power-domain-names:
- Usage: required
- Value type: <stringlist>
- Definition: The power-domains needed depend on the compatible string:
- qcom,msm8974-adsp-pil:
- qcom,msm8996-adsp-pil:
- qcom,msm8998-adsp-pas:
- must be "cx"
- qcom,msm8996-slpi-pil:
- must be "ss_cx"
- qcom,msm8998-slpi-pas:
- must be "ssc_cx"
- qcom,qcs404-adsp-pas:
- must be "lpi_cx"
- qcom,qcs404-cdsp-pas:
- qcom,qcs404-wcss-pas:
- must be "mx"
- qcom,sdm845-adsp-pas:
- qcom,sdm845-cdsp-pas:
- qcom,sm8150-adsp-pas:
- qcom,sm8150-cdsp-pas:
- qcom,sm8250-cdsp-pas:
- qcom,sm8350-cdsp-pas:
- must be "cx", "load_state"
- qcom,sc7180-mpss-pas:
- qcom,sm8150-mpss-pas:
- qcom,sm8350-mpss-pas:
- must be "cx", "load_state", "mss"
- qcom,sdx55-mpss-pas:
- must be "cx", "mss"
- qcom,sm8250-adsp-pas:
- qcom,sm8350-adsp-pas:
- qcom,sm8150-slpi-pas:
- qcom,sm8250-slpi-pas:
- qcom,sm8350-slpi-pas:
- must be "lcx", "lmx", "load_state"
-
-- memory-region:
- Usage: required
- Value type: <phandle>
- Definition: reference to the reserved-memory for the ADSP
-
-- qcom,smem-states:
- Usage: required
- Value type: <phandle>
- Definition: reference to the smem state for requesting the ADSP to
- shut down
-
-- qcom,smem-state-names:
- Usage: required
- Value type: <stringlist>
- Definition: must be "stop"
-
-
-= SUBNODES
-The adsp node may have an subnode named either "smd-edge" or "glink-edge" that
-describes the communication edge, channels and devices related to the ADSP.
-See ../soc/qcom/qcom,smd.txt and ../soc/qcom/qcom,glink.txt for details on how
-to describe these.
-
-
-= EXAMPLE
-The following example describes the resources needed to boot control the
-ADSP, as it is found on MSM8974 boards.
-
- adsp {
- compatible = "qcom,msm8974-adsp-pil";
-
- interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
- <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
- <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
- <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
- <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "wdog",
- "fatal",
- "ready",
- "handover",
- "stop-ack";
-
- clocks = <&rpmcc RPM_CXO_CLK>;
- clock-names = "xo";
-
- cx-supply = <&pm8841_s2>;
-
- memory-region = <&adsp_region>;
-
- qcom,smem-states = <&adsp_smp2p_out 0>;
- qcom,smem-state-names = "stop";
-
- smd-edge {
- interrupts = <0 156 IRQ_TYPE_EDGE_RISING>;
-
- qcom,ipc = <&apcs 8 8>;
- qcom,smd-edge = <1>;
- };
- };
-
-The following example describes the resources needed to boot control the
-SLPI, as it is found on MSM8996 boards.
-
- slpi {
- compatible = "qcom,msm8996-slpi-pil";
- interrupts-extended = <&intc 0 390 IRQ_TYPE_EDGE_RISING>,
- <&slpi_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
- <&slpi_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
- <&slpi_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
- <&slpi_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "wdog",
- "fatal",
- "ready",
- "handover",
- "stop-ack";
-
- clocks = <&rpmcc MSM8996_RPM_SMD_XO_CLK_SRC>,
- <&rpmcc MSM8996_RPM_SMD_AGGR2_NOC_CLK>;
- clock-names = "xo", "aggre2";
-
- cx-supply = <&pm8994_l26>;
- px-supply = <&pm8994_lvs2>;
-
- memory-region = <&slpi_region>;
- qcom,smem-states = <&slpi_smp2p_out 0>;
- qcom,smem-state-names = "stop";
- };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
new file mode 100644
index 000000000000..c597ccced623
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
@@ -0,0 +1,547 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/qcom,adsp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm ADSP Peripheral Image Loader binding
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+description:
+ This document defines the binding for a component that loads and boots
+ firmware on the Qualcomm ADSP Hexagon core.
+
+properties:
+ compatible:
+ enum:
+ - qcom,msm8974-adsp-pil
+ - qcom,msm8996-adsp-pil
+ - qcom,msm8996-slpi-pil
+ - qcom,msm8998-adsp-pas
+ - qcom,msm8998-slpi-pas
+ - qcom,qcs404-adsp-pas
+ - qcom,qcs404-cdsp-pas
+ - qcom,qcs404-wcss-pas
+ - qcom,sc7180-mpss-pas
+ - qcom,sc8180x-adsp-pas
+ - qcom,sc8180x-cdsp-pas
+ - qcom,sc8180x-mpss-pas
+ - qcom,sdm845-adsp-pas
+ - qcom,sdm845-cdsp-pas
+ - qcom,sdx55-mpss-pas
+ - qcom,sm8150-adsp-pas
+ - qcom,sm8150-cdsp-pas
+ - qcom,sm8150-mpss-pas
+ - qcom,sm8150-slpi-pas
+ - qcom,sm8250-adsp-pas
+ - qcom,sm8250-cdsp-pas
+ - qcom,sm8250-slpi-pas
+ - qcom,sm8350-adsp-pas
+ - qcom,sm8350-cdsp-pas
+ - qcom,sm8350-slpi-pas
+ - qcom,sm8350-mpss-pas
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 8
+
+ clock-names:
+ minItems: 1
+ maxItems: 8
+
+ interrupts:
+ minItems: 5
+ maxItems: 6
+
+ interrupt-names:
+ minItems: 5
+ maxItems: 6
+
+ resets:
+ minItems: 1
+ maxItems: 3
+
+ reset-names:
+ minItems: 1
+ maxItems: 3
+
+ cx-supply:
+ description: Phandle to the CX regulator
+
+ px-supply:
+ description: Phandle to the PX regulator
+
+ power-domains:
+ minItems: 1
+ maxItems: 3
+
+ power-domain-names:
+ minItems: 1
+ maxItems: 3
+
+ firmware-name:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: Firmware name for the Hexagon core
+
+ memory-region:
+ maxItems: 1
+ description: Reference to the reserved-memory for the Hexagon core
+
+ qcom,smem-states:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: States used by the AP to signal the Hexagon core
+ items:
+ - description: Stop the modem
+
+ qcom,smem-state-names:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description: The names of the state bits used for SMP2P output
+ items:
+ - const: stop
+
+ qcom,halt-regs:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ Phandle reference to a syscon representing TCSR followed by the
+ three offsets within syscon for q6, modem and nc halt registers.
+
+ smd-edge:
+ type: object
+ description:
+ Qualcomm Shared Memory subnode which represents communication edge,
+ channels and devices related to the ADSP.
+
+ glink-edge:
+ type: object
+ description:
+ Qualcomm G-Link subnode which represents communication edge, channels
+ and devices related to the ADSP.
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - memory-region
+ - qcom,smem-states
+ - qcom,smem-state-names
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8974-adsp-pil
+ - qcom,msm8996-adsp-pil
+ - qcom,msm8996-slpi-pil
+ - qcom,msm8998-adsp-pas
+ - qcom,qcs404-adsp-pas
+ - qcom,qcs404-wcss-pas
+ - qcom,sc8180x-adsp-pas
+ - qcom,sc8180x-cdsp-pas
+ - qcom,sc8180x-mpss-pas
+ - qcom,sdm845-adsp-pas
+ - qcom,sdm845-cdsp-pas
+ - qcom,sm8150-adsp-pas
+ - qcom,sm8150-cdsp-pas
+ - qcom,sm8150-mpss-pas
+ - qcom,sm8150-slpi-pas
+ - qcom,sm8250-adsp-pas
+ - qcom,sm8250-cdsp-pas
+ - qcom,sm8250-slpi-pas
+ - qcom,sm8350-adsp-pas
+ - qcom,sm8350-cdsp-pas
+ - qcom,sm8350-slpi-pas
+ - qcom,sm8350-mpss-pas
+ then:
+ properties:
+ clocks:
+ items:
+ - description: XO clock
+ clock-names:
+ items:
+ - const: xo
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8998-slpi-pas
+ then:
+ properties:
+ clocks:
+ items:
+ - description: XO clock
+ - description: AGGRE2 clock
+ clock-names:
+ items:
+ - const: xo
+ - const: aggre2
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,qcs404-cdsp-pas
+ then:
+ properties:
+ clocks:
+ items:
+ - description: XO clock
+ - description: SWAY clock
+ - description: TBU clock
+ - description: BIMC clock
+ - description: AHB AON clock
+ - description: Q6SS SLAVE clock
+ - description: Q6SS MASTER clock
+ - description: Q6 AXIM clock
+ clock-names:
+ items:
+ - const: xo
+ - const: sway
+ - const: tbu
+ - const: bimc
+ - const: ahb_aon
+ - const: q6ss_slave
+ - const: q6ss_master
+ - const: q6_axim
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sc7180-mpss-pas
+ then:
+ properties:
+ clocks:
+ items:
+ - description: XO clock
+ - description: IFACE clock
+ - description: BUS clock
+ - description: NAC clock
+ - description: SNOC AXI clock
+ - description: MNOC AXI clock
+ clock-names:
+ items:
+ - const: xo
+ - const: iface
+ - const: bus
+ - const: nav
+ - const: snoc_axi
+ - const: mnoc_axi
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8974-adsp-pil
+ - qcom,msm8996-adsp-pil
+ - qcom,msm8996-slpi-pil
+ - qcom,msm8998-adsp-pas
+ - qcom,msm8998-slpi-pas
+ - qcom,qcs404-adsp-pas
+ - qcom,qcs404-cdsp-pas
+ - qcom,qcs404-wcss-pas
+ - qcom,sc8180x-adsp-pas
+ - qcom,sc8180x-cdsp-pas
+ - qcom,sdm845-adsp-pas
+ - qcom,sdm845-cdsp-pas
+ - qcom,sm8150-adsp-pas
+ - qcom,sm8150-cdsp-pas
+ - qcom,sm8150-slpi-pas
+ - qcom,sm8250-adsp-pas
+ - qcom,sm8250-cdsp-pas
+ - qcom,sm8250-slpi-pas
+ - qcom,sm8350-adsp-pas
+ - qcom,sm8350-cdsp-pas
+ - qcom,sm8350-slpi-pas
+ then:
+ properties:
+ interrupts:
+ items:
+ - description: Watchdog interrupt
+ - description: Fatal interrupt
+ - description: Ready interrupt
+ - description: Handover interrupt
+ - description: Stop acknowledge interrupt
+ interrupt-names:
+ items:
+ - const: wdog
+ - const: fatal
+ - const: ready
+ - const: handover
+ - const: stop-ack
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sc7180-mpss-pas
+ - qcom,sc8180x-mpss-pas
+ - qcom,sdx55-mpss-pas
+ - qcom,sm8150-mpss-pas
+ - qcom,sm8350-mpss-pas
+ then:
+ properties:
+ interrupts:
+ items:
+ - description: Watchdog interrupt
+ - description: Fatal interrupt
+ - description: Ready interrupt
+ - description: Handover interrupt
+ - description: Stop acknowledge interrupt
+ - description: Shutdown acknowledge interrupt
+ interrupt-names:
+ items:
+ - const: wdog
+ - const: fatal
+ - const: ready
+ - const: handover
+ - const: stop-ack
+ - const: shutdown-ack
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8974-adsp-pil
+ then:
+ required:
+ - cx-supply
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8996-adsp-pil
+ - qcom,msm8998-adsp-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: CX power domain
+ power-domain-names:
+ items:
+ - const: cx
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8996-slpi-pil
+ - qcom,msm8998-slpi-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: SSC-CX power domain
+ power-domain-names:
+ items:
+ - const: ssc_cx
+ required:
+ - px-supply
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sc7180-mpss-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: Load State power domain
+ - description: CX power domain
+ - description: MX power domain
+ - description: MSS power domain
+ power-domain-names:
+ items:
+ - const: load_state
+ - const: cx
+ - const: mx
+ - const: mss
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8150-adsp-pas
+ - qcom,sm8150-cdsp-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: Load State power domain
+ - description: CX power domain
+ power-domain-names:
+ items:
+ - const: load_state
+ - const: cx
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8150-mpss-pas
+ - qcom,sm8350-mpss-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: Load State power domain
+ - description: CX power domain
+ - description: MSS power domain
+ power-domain-names:
+ items:
+ - const: load_state
+ - const: cx
+ - const: mss
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sdx55-mpss-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: CX power domain
+ - description: MSS power domain
+ power-domain-names:
+ items:
+ - const: cx
+ - const: mss
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sc8180x-adsp-pas
+ - qcom,sc8180x-cdsp-pas
+ - qcom,sm8150-slpi-pas
+ - qcom,sm8250-adsp-pas
+ - qcom,sm8250-slpi-pas
+ - qcom,sm8350-adsp-pas
+ - qcom,sm8350-slpi-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: Load State power domain
+ - description: LCX power domain
+ - description: LMX power domain
+ power-domain-names:
+ items:
+ - const: load_state
+ - const: lcx
+ - const: lmx
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8350-cdsp-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: Load State power domain
+ - description: CX power domain
+ - description: MXC power domain
+ power-domain-names:
+ items:
+ - const: load_state
+ - const: cx
+ - const: mxc
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,qcs404-cdsp-pas
+ then:
+ properties:
+ resets:
+ items:
+ - description: CDSP restart
+ reset-names:
+ items:
+ - const: restart
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sc7180-mpss-pas
+ then:
+ properties:
+ resets:
+ items:
+ - description: MSS restart
+ - description: PDC reset
+ reset-names:
+ items:
+ - const: mss_restart
+ - const: pdc_reset
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ adsp {
+ compatible = "qcom,msm8974-adsp-pil";
+
+ interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack";
+
+ clocks = <&rpmcc RPM_CXO_CLK>;
+ clock-names = "xo";
+
+ cx-supply = <&pm8841_s2>;
+
+ memory-region = <&adsp_region>;
+
+ qcom,smem-states = <&adsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ smd-edge {
+ interrupts = <0 156 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 8 8>;
+ qcom,smd-edge = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
index 64afdcfb613d..1e6225677e00 100644
--- a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
@@ -72,7 +72,6 @@ properties:
- from local to remote, where ACK from the remote means that communnication
as been stopped on the remote side.
minItems: 1
- maxItems: 4
mbox-names:
items:
@@ -81,7 +80,6 @@ properties:
- const: shutdown
- const: detach
minItems: 1
- maxItems: 4
memory-region:
description:
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,k3-r5f-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,k3-r5f-rproc.yaml
index d905d614502b..130fbaacc4b1 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,k3-r5f-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/ti,k3-r5f-rproc.yaml
@@ -14,8 +14,12 @@ description: |
processor subsystems/clusters (R5FSS). The dual core cluster can be used
either in a LockStep mode providing safety/fault tolerance features or in a
Split mode providing two individual compute cores for doubling the compute
- capacity. These are used together with other processors present on the SoC
- to achieve various system level goals.
+ capacity on most SoCs. These are used together with other processors present
+ on the SoC to achieve various system level goals.
+
+ AM64x SoCs do not support LockStep mode, but rather a new non-safety mode
+ called "Single-CPU" mode, where only Core0 is used, but with ability to use
+ Core1's TCMs as well.
Each Dual-Core R5F sub-system is represented as a single DTS node
representing the cluster, with a pair of child DT nodes representing
@@ -33,6 +37,7 @@ properties:
- ti,am654-r5fss
- ti,j721e-r5fss
- ti,j7200-r5fss
+ - ti,am64-r5fss
power-domains:
description: |
@@ -56,11 +61,12 @@ properties:
ti,cluster-mode:
$ref: /schemas/types.yaml#/definitions/uint32
- enum: [0, 1]
description: |
Configuration Mode for the Dual R5F cores within the R5F cluster.
- Should be either a value of 1 (LockStep mode) or 0 (Split mode),
- default is LockStep mode if omitted.
+ Should be either a value of 1 (LockStep mode) or 0 (Split mode) on
+ most SoCs (AM65x, J721E, J7200), default is LockStep mode if omitted;
+ and should be either a value of 0 (Split mode) or 2 (Single-CPU mode)
+ on AM64x SoCs, default is Split mode if omitted.
# R5F Processor Child Nodes:
# ==========================
@@ -97,6 +103,7 @@ patternProperties:
- ti,am654-r5f
- ti,j721e-r5f
- ti,j7200-r5f
+ - ti,am64-r5f
reg:
items:
@@ -198,6 +205,20 @@ patternProperties:
unevaluatedProperties: false
+if:
+ properties:
+ compatible:
+ enum:
+ - ti,am64-r5fss
+then:
+ properties:
+ ti,cluster-mode:
+ enum: [0, 2]
+else:
+ properties:
+ ti,cluster-mode:
+ enum: [0, 1]
+
required:
- compatible
- power-domains
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt b/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt
index 461dc1d8d570..463a97c11eff 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt
+++ b/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt
@@ -48,7 +48,7 @@ The following are the mandatory properties:
bindings for the reset argument specifier as per SoC,
Documentation/devicetree/bindings/reset/ti-syscon-reset.txt
for 66AK2HK/66AK2L/66AK2E SoCs or,
- Documentation/devicetree/bindings/reset/ti,sci-reset.txt
+ Documentation/devicetree/bindings/reset/ti,sci-reset.yaml
for 66AK2G SoCs
- interrupts: Should contain an entry for each value in 'interrupt-names'.
@@ -82,7 +82,7 @@ The following are mandatory properties for Keystone 2 66AK2G SoCs only:
- power-domains: Should contain a phandle to a PM domain provider node
and an args specifier containing the DSP device id
value. This property is as per the binding,
- Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
Optional properties:
--------------------
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml
index 73400bc6e91d..c6c12129d6b7 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml
@@ -65,7 +65,7 @@ properties:
OMAP Mailbox specifier denoting the sub-mailbox, to be used for
communication with the remote processor. The specifier format is
as per the bindings,
- Documentation/devicetree/bindings/mailbox/omap-mailbox.txt
+ Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml
This property should match with the sub-mailbox node used in
the firmware image.
@@ -116,7 +116,6 @@ properties:
list, in the specified order, each representing the corresponding
internal RAM memory region.
minItems: 1
- maxItems: 3
items:
- const: l2ram
- const: l1pram
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml
index 63071eef1632..d7c3a78e37e6 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml
@@ -36,6 +36,9 @@ properties:
enum:
- ti,am3356-pru # for AM335x SoC family (AM3356+ SoCs only)
- ti,am4376-pru # for AM437x SoC family (AM4376+ SoCs only)
+ - ti,am642-pru # for PRUs in K3 AM64x SoC family
+ - ti,am642-rtu # for RTUs in K3 AM64x SoC family
+ - ti,am642-tx-pru # for Tx_PRUs in K3 AM64x SoC family
- ti,am5728-pru # for AM57xx SoC family
- ti,k2g-pru # for 66AK2G SoC family
- ti,am654-pru # for PRUs in K3 AM65x SoC family
@@ -68,6 +71,7 @@ if:
enum:
- ti,am654-rtu
- ti,j721e-rtu
+ - ti,am642-rtu
then:
properties:
$nodename:
@@ -79,6 +83,7 @@ else:
enum:
- ti,am654-tx-pru
- ti,j721e-tx-pru
+ - ti,am642-tx-pru
then:
properties:
$nodename:
diff --git a/Documentation/devicetree/bindings/reset/fsl,imx-src.yaml b/Documentation/devicetree/bindings/reset/fsl,imx-src.yaml
index 27c5e34a3ac6..b11ac533f914 100644
--- a/Documentation/devicetree/bindings/reset/fsl,imx-src.yaml
+++ b/Documentation/devicetree/bindings/reset/fsl,imx-src.yaml
@@ -59,7 +59,6 @@ properties:
- description: SRC interrupt
- description: CPU WDOG interrupts out of SRC
minItems: 1
- maxItems: 2
'#reset-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/reset/microchip,rst.yaml b/Documentation/devicetree/bindings/reset/microchip,rst.yaml
new file mode 100644
index 000000000000..370579aeeca1
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/microchip,rst.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/reset/microchip,rst.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Microchip Sparx5 Switch Reset Controller
+
+maintainers:
+ - Steen Hegelund <steen.hegelund@microchip.com>
+ - Lars Povlsen <lars.povlsen@microchip.com>
+
+description: |
+ The Microchip Sparx5 Switch provides reset control and implements the following
+ functions
+ - One Time Switch Core Reset (Soft Reset)
+
+properties:
+ $nodename:
+ pattern: "^reset-controller@[0-9a-f]+$"
+
+ compatible:
+ const: microchip,sparx5-switch-reset
+
+ reg:
+ items:
+ - description: global control block registers
+
+ reg-names:
+ items:
+ - const: gcb
+
+ "#reset-cells":
+ const: 1
+
+ cpu-syscon:
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+ description: syscon used to access CPU reset
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - "#reset-cells"
+ - cpu-syscon
+
+additionalProperties: false
+
+examples:
+ - |
+ reset: reset-controller@11010008 {
+ compatible = "microchip,sparx5-switch-reset";
+ reg = <0x11010008 0x4>;
+ reg-names = "gcb";
+ #reset-cells = <1>;
+ cpu-syscon = <&cpu_ctrl>;
+ };
+
diff --git a/Documentation/devicetree/bindings/reset/ti,sci-reset.txt b/Documentation/devicetree/bindings/reset/ti,sci-reset.txt
deleted file mode 100644
index 8b1cf022f18a..000000000000
--- a/Documentation/devicetree/bindings/reset/ti,sci-reset.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-Texas Instruments System Control Interface (TI-SCI) Reset Controller
-=====================================================================
-
-Some TI SoCs contain a system controller (like the Power Management Micro
-Controller (PMMC) on Keystone 66AK2G SoC) that are responsible for controlling
-the state of the various hardware modules present on the SoC. Communication
-between the host processor running an OS and the system controller happens
-through a protocol called TI System Control Interface (TI-SCI protocol).
-For TI SCI details, please refer to the document,
-Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
-
-TI-SCI Reset Controller Node
-============================
-This reset controller node uses the TI SCI protocol to perform the reset
-management of various hardware modules present on the SoC. Must be a child
-node of the associated TI-SCI system controller node.
-
-Required properties:
---------------------
- - compatible : Should be "ti,sci-reset"
- - #reset-cells : Should be 2. Please see the reset consumer node below for
- usage details.
-
-TI-SCI Reset Consumer Nodes
-===========================
-Each of the reset consumer nodes should have the following properties,
-in addition to their own properties.
-
-Required properties:
---------------------
- - resets : A phandle and reset specifier pair, one pair for each reset
- signal that affects the device, or that the device manages.
- The phandle should point to the TI-SCI reset controller node,
- and the reset specifier should have 2 cell-values. The first
- cell should contain the device ID. The second cell should
- contain the reset mask value used by system controller.
- Please refer to the protocol documentation for these values
- to be used for different devices,
- http://processors.wiki.ti.com/index.php/TISCI#66AK2G02_Data
-
-Please also refer to Documentation/devicetree/bindings/reset/reset.txt for
-common reset controller usage by consumers.
-
-Example:
---------
-The following example demonstrates both a TI-SCI reset controller node and a
-consumer (a DSP device) on the 66AK2G SoC.
-
-pmmc: pmmc {
- compatible = "ti,k2g-sci";
-
- k2g_reset: reset-controller {
- compatible = "ti,sci-reset";
- #reset-cells = <2>;
- };
-};
-
-dsp0: dsp@10800000 {
- ...
- resets = <&k2g_reset 0x0046 0x1>;
- ...
-};
diff --git a/Documentation/devicetree/bindings/reset/ti,sci-reset.yaml b/Documentation/devicetree/bindings/reset/ti,sci-reset.yaml
new file mode 100644
index 000000000000..4639d2cec557
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/ti,sci-reset.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/ti,sci-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI-SCI reset controller node bindings
+
+maintainers:
+ - Nishanth Menon <nm@ti.com>
+
+description: |
+ Some TI SoCs contain a system controller (like the Power Management Micro
+ Controller (PMMC) on Keystone 66AK2G SoC) that are responsible for controlling
+ the state of the various hardware modules present on the SoC. Communication
+ between the host processor running an OS and the system controller happens
+ through a protocol called TI System Control Interface (TI-SCI protocol).
+
+ This reset controller node uses the TI SCI protocol to perform the reset
+ management of various hardware modules present on the SoC. Must be a child
+ node of the associated TI-SCI system controller node.
+
+properties:
+ $nodename:
+ pattern: "^reset-controller$"
+
+ compatible:
+ const: ti,sci-reset
+
+ "#reset-cells":
+ const: 2
+ description:
+ The two cells represent values that the TI-SCI controller defines.
+
+ The first cell should contain the device ID.
+
+ The second cell should contain the reset mask corresponding to the device
+ used by system controller.
+
+ Please see http://processors.wiki.ti.com/index.php/TISCI for
+ protocol documentation for the values to be used for different devices.
+
+
+additionalProperties: false
+
+examples:
+ - |
+ k3_reset: reset-controller {
+ compatible = "ti,sci-reset";
+ #reset-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
index 23b227614366..1d38ff76d18f 100644
--- a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
+++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
@@ -56,7 +56,6 @@ properties:
interrupts:
minItems: 3
- maxItems: 4
items:
- description: DirError interrupt
- description: DataError interrupt
diff --git a/Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml b/Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml
new file mode 100644
index 000000000000..61963fa9347e
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/intel,ixp46x-rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel IXP46x RNG bindings
+
+description: |
+ The Intel IXP46x has a random number generator at a fixed physical
+ location in memory. Each read is guaranteed to provide a unique
+ 32 bit random number.
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+properties:
+ compatible:
+ const: intel,ixp46x-rng
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ rng@70002100 {
+ compatible = "intel,ixp46x-rng";
+ reg = <0x70002100 4>;
+ };
+
diff --git a/Documentation/devicetree/bindings/rng/mtk-rng.txt b/Documentation/devicetree/bindings/rng/mtk-rng.txt
deleted file mode 100644
index dfdcb5cd2ea8..000000000000
--- a/Documentation/devicetree/bindings/rng/mtk-rng.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Device-Tree bindings for Mediatek random number generator
-found in MediaTek SoC family
-
-Required properties:
-- compatible : Should be
- "mediatek,mt7622-rng", "mediatek,mt7623-rng" : for MT7622
- "mediatek,mt7629-rng", "mediatek,mt7623-rng" : for MT7629
- "mediatek,mt7623-rng" : for MT7623
- "mediatek,mt8516-rng", "mediatek,mt7623-rng" : for MT8516
-- clocks : list of clock specifiers, corresponding to
- entries in clock-names property;
-- clock-names : Should contain "rng" entries;
-- reg : Specifies base physical address and size of the registers
-
-Example:
-
-rng: rng@1020f000 {
- compatible = "mediatek,mt7623-rng";
- reg = <0 0x1020f000 0 0x1000>;
- clocks = <&infracfg CLK_INFRA_TRNG>;
- clock-names = "rng";
-};
diff --git a/Documentation/devicetree/bindings/rng/mtk-rng.yaml b/Documentation/devicetree/bindings/rng/mtk-rng.yaml
new file mode 100644
index 000000000000..61888e07bda0
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/mtk-rng.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/rng/mtk-rng.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: MediaTek Random number generator
+
+maintainers:
+ - Sean Wang <sean.wang@mediatek.com>
+
+properties:
+ $nodename:
+ pattern: "^rng@[0-9a-f]+$"
+
+ compatible:
+ oneOf:
+ - enum:
+ - mediatek,mt7623-rng
+ - items:
+ - enum:
+ - mediatek,mt7622-rng
+ - mediatek,mt7629-rng
+ - mediatek,mt8365-rng
+ - mediatek,mt8516-rng
+ - const: mediatek,mt7623-rng
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: rng
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt2701-clk.h>
+ rng: rng@1020f000 {
+ compatible = "mediatek,mt7623-rng";
+ reg = <0x1020f000 0x1000>;
+ clocks = <&infracfg CLK_INFRA_TRNG>;
+ clock-names = "rng";
+ };
diff --git a/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml b/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
index b1b0ee769b71..beeb90e55727 100644
--- a/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
@@ -32,7 +32,6 @@ properties:
interrupts:
minItems: 1
- maxItems: 2
items:
- description: RTC Alarm 0
- description: RTC Alarm 1
diff --git a/Documentation/devicetree/bindings/rtc/arm,pl031.yaml b/Documentation/devicetree/bindings/rtc/arm,pl031.yaml
new file mode 100644
index 000000000000..fa5f2eda372e
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/arm,pl031.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/arm,pl031.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Arm Primecell PL031 Real Time Clock
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: arm,pl031
+ required:
+ - compatible
+
+allOf:
+ - $ref: rtc.yaml#
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+properties:
+ compatible:
+ items:
+ - const: arm,pl031
+ - const: arm,primecell
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ maxItems: 1
+
+ start-year: true
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ rtc@10017000 {
+ compatible = "arm,pl031", "arm,primecell";
+ reg = <0x10017000 0x1000>;
+ clocks = <&pclk>;
+ clock-names = "apb_pclk";
+ };
diff --git a/Documentation/devicetree/bindings/rtc/epson,rx8900.txt b/Documentation/devicetree/bindings/rtc/epson,rx8900.txt
deleted file mode 100644
index 3f61e516ecf6..000000000000
--- a/Documentation/devicetree/bindings/rtc/epson,rx8900.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Real Time Clock driver for:
- - Epson RX8900
- - Micro Crystal rv8803
-
-Required properties:
-- compatible: should be: "microcrystal,rv8803" or "epson,rx8900"
-- reg : the I2C address of the device for I2C
-
-Optional properties:
-- epson,vdet-disable : boolean, if present will disable voltage detector.
- Should be set if no backup battery is used.
-- trickle-diode-disable : boolean, if present will disable internal trickle
- charger diode
-
-Example:
-
- rtc: rtc@32 {
- compatible = "epson,rx8900"
- reg = <0x32>;
- epson,vdet-disable;
- trickle-diode-disable;
- };
diff --git a/Documentation/devicetree/bindings/rtc/epson,rx8900.yaml b/Documentation/devicetree/bindings/rtc/epson,rx8900.yaml
new file mode 100644
index 000000000000..29fe39bb08ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/epson,rx8900.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/epson,rx8900.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: EPSON RX8900 / Microcrystal RV8803 Real-Time Clock DT bindings
+
+maintainers:
+ - Marek Vasut <marex@denx.de>
+
+allOf:
+ - $ref: rtc.yaml#
+
+properties:
+ compatible:
+ enum:
+ - epson,rx8900
+ - microcrystal,rv8803
+
+ reg:
+ maxItems: 1
+
+ epson,vdet-disable:
+ type: boolean
+ description: |
+ Disable voltage detector. Should be set if no backup battery is used.
+
+ trickle-diode-disable: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@32 {
+ compatible = "epson,rx8900";
+ reg = <0x32>;
+ epson,vdet-disable;
+ trickle-diode-disable;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt b/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
deleted file mode 100644
index e3938f5e0b6c..000000000000
--- a/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Faraday Technology FTRTC010 Real Time Clock
-
-This RTC appears in for example the Storlink Gemini family of
-SoCs.
-
-Required properties:
-- compatible : Should be one of:
- "faraday,ftrtc010"
- "cortina,gemini-rtc", "faraday,ftrtc010"
-
-Optional properties:
-- clocks: when present should contain clock references to the
- PCLK and EXTCLK clocks. Faraday calls the later CLK1HZ and
- says the clock should be 1 Hz, but implementers actually seem
- to choose different clocks here, like Cortina who chose
- 32768 Hz (a typical low-power clock).
-- clock-names: should name the clocks "PCLK" and "EXTCLK"
- respectively.
-
-Examples:
-
-rtc@45000000 {
- compatible = "cortina,gemini-rtc";
- reg = <0x45000000 0x100>;
- interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&foo 0>, <&foo 1>;
- clock-names = "PCLK", "EXTCLK";
-};
diff --git a/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml b/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml
new file mode 100644
index 000000000000..056d42daae06
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/faraday,ftrtc010.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Faraday Technology FTRTC010 Real Time Clock
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ This RTC appears in for example the Storlink Gemini family of SoCs.
+
+properties:
+ compatible:
+ oneOf:
+ - const: faraday,ftrtc010
+ - items:
+ - const: cortina,gemini-rtc
+ - const: faraday,ftrtc010
+
+ resets:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: PCLK clocks
+ - description: EXTCLK clocks. Faraday calls it CLK1HZ and says the clock
+ should be 1 Hz, but implementers actually seem to choose different
+ clocks here, like Cortina who chose 32768 Hz (a typical low-power clock).
+
+ clock-names:
+ items:
+ - const: "PCLK"
+ - const: "EXTCLK"
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ rtc@45000000 {
+ compatible = "cortina,gemini-rtc", "faraday,ftrtc010";
+ reg = <0x45000000 0x100>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&foo 0>, <&foo 1>;
+ clock-names = "PCLK", "EXTCLK";
+ };
diff --git a/Documentation/devicetree/bindings/rtc/imxdi-rtc.yaml b/Documentation/devicetree/bindings/rtc/imxdi-rtc.yaml
index 06bd737821c1..4807c95a663c 100644
--- a/Documentation/devicetree/bindings/rtc/imxdi-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/imxdi-rtc.yaml
@@ -21,7 +21,6 @@ properties:
- description: rtc alarm interrupt
- description: dryice security violation interrupt
minItems: 1
- maxItems: 2
clocks:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf8563.yaml b/Documentation/devicetree/bindings/rtc/nxp,pcf8563.yaml
new file mode 100644
index 000000000000..a98b72752349
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf8563.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/nxp,pcf8563.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Philips PCF8563/Epson RTC8564 Real Time Clock
+
+maintainers:
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+allOf:
+ - $ref: rtc.yaml#
+
+properties:
+ compatible:
+ enum:
+ - epson,rtc8564
+ - microcrystal,rv8564
+ - nxp,pca8565
+ - nxp,pcf8563
+ - nxp,pcf85263
+ - nxp,pcf85363
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 0
+
+ clock-output-names:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ start-year: true
+ wakeup-source: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/rtc/pcf85363.txt b/Documentation/devicetree/bindings/rtc/pcf85363.txt
deleted file mode 100644
index 94adc1cf93d9..000000000000
--- a/Documentation/devicetree/bindings/rtc/pcf85363.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-NXP PCF85263/PCF85363 Real Time Clock
-============================
-
-Required properties:
-- compatible: Should contain "nxp,pcf85263" or "nxp,pcf85363".
-- reg: I2C address for chip.
-
-Optional properties:
-- interrupts: IRQ line for the RTC (not implemented).
-
-Example:
-
-pcf85363: pcf85363@51 {
- compatible = "nxp,pcf85363";
- reg = <0x51>;
-};
-
diff --git a/Documentation/devicetree/bindings/rtc/pcf8563.txt b/Documentation/devicetree/bindings/rtc/pcf8563.txt
deleted file mode 100644
index 0a900f7c8977..000000000000
--- a/Documentation/devicetree/bindings/rtc/pcf8563.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-* Philips PCF8563/Epson RTC8564 Real Time Clock
-
-Philips PCF8563/Epson RTC8564 Real Time Clock
-
-Required properties:
-- compatible: Should contain "nxp,pcf8563",
- "epson,rtc8564" or
- "microcrystal,rv8564" or
- "nxp,pca8565"
-- reg: I2C address for chip.
-
-Optional property:
-- #clock-cells: Should be 0.
-- clock-output-names:
- overwrite the default clock name "pcf8563-clkout"
-
-Example:
-
-pcf8563: pcf8563@51 {
- compatible = "nxp,pcf8563";
- reg = <0x51>;
- #clock-cells = <0>;
-};
-
-device {
-...
- clocks = <&pcf8563>;
-...
-};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt b/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt
index c746cb221210..cdd196b1e9bd 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt
@@ -21,10 +21,19 @@ Optional properties:
clock name
- wakeup-source: Enables wake up of host system on alarm
+Optional child node:
+- clock: Provide this if the square wave pin is used as boot-enabled fixed clock.
+
Example:
rtc@68 {
compatible = "st,m41t80";
reg = <0x68>;
interrupt-parent = <&UIC0>;
interrupts = <0x9 0x8>;
+
+ clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
};
diff --git a/Documentation/devicetree/bindings/rtc/ti,bq32000.yaml b/Documentation/devicetree/bindings/rtc/ti,bq32000.yaml
new file mode 100644
index 000000000000..bf9c1c4ddb7e
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/ti,bq32000.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/ti,bq32000.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI BQ32000 I2C Serial Real-Time Clock
+
+maintainers:
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+allOf:
+ - $ref: rtc.yaml#
+
+properties:
+ compatible:
+ const: ti,bq32000
+
+ reg:
+ const: 0x68
+
+ interrupts:
+ maxItems: 1
+
+ start-year: true
+
+ trickle-resistor-ohms:
+ enum: [ 1120, 20180 ]
+
+ trickle-diode-disable: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bq32000: rtc@68 {
+ compatible = "ti,bq32000";
+ reg = <0x68>;
+ trickle-resistor-ohms = <1120>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rtc/ti,bq32k.txt b/Documentation/devicetree/bindings/rtc/ti,bq32k.txt
deleted file mode 100644
index e204906b9ad3..000000000000
--- a/Documentation/devicetree/bindings/rtc/ti,bq32k.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-* TI BQ32000 I2C Serial Real-Time Clock
-
-Required properties:
-- compatible: Should contain "ti,bq32000".
-- reg: I2C address for chip
-
-Optional properties:
-- trickle-resistor-ohms : Selected resistor for trickle charger
- Values usable are 1120 and 20180
- Should be given if trickle charger should be enabled
-- trickle-diode-disable : Do not use internal trickle charger diode
- Should be given if internal trickle charger diode should be disabled
-Example:
- bq32000: rtc@68 {
- compatible = "ti,bq32000";
- trickle-resistor-ohms = <1120>;
- reg = <0x68>;
- };
diff --git a/Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml b/Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml
new file mode 100644
index 000000000000..bdb72d3ddf2a
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/xlnx,zynqmp-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Zynq Ultrascale+ MPSoC Real Time Clock
+
+description:
+ RTC controller for the Xilinx Zynq MPSoC Real Time Clock.
+ The RTC controller has separate IRQ lines for seconds and alarm.
+
+maintainers:
+ - Michal Simek <michal.simek@xilinx.com>
+
+allOf:
+ - $ref: rtc.yaml#
+
+properties:
+ compatible:
+ const: xlnx,zynqmp-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 2
+
+ interrupt-names:
+ items:
+ - const: alarm
+ - const: sec
+
+ calibration:
+ description: |
+ calibration value for 1 sec period which will
+ be programmed directly to calibration register.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0x1
+ maximum: 0x1FFFFF
+ default: 0x198233
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+
+additionalProperties: false
+
+examples:
+ - |
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ rtc: rtc@ffa60000 {
+ compatible = "xlnx,zynqmp-rtc";
+ reg = <0x0 0xffa60000 0x0 0x100>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 26 4>, <0 27 4>;
+ interrupt-names = "alarm", "sec";
+ calibration = <0x198233>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rtc/xlnx-rtc.txt b/Documentation/devicetree/bindings/rtc/xlnx-rtc.txt
deleted file mode 100644
index 0df6f016b1b7..000000000000
--- a/Documentation/devicetree/bindings/rtc/xlnx-rtc.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-* Xilinx Zynq Ultrascale+ MPSoC Real Time Clock
-
-RTC controller for the Xilinx Zynq MPSoC Real Time Clock
-Separate IRQ lines for seconds and alarm
-
-Required properties:
-- compatible: Should be "xlnx,zynqmp-rtc"
-- reg: Physical base address of the controller and length
- of memory mapped region.
-- interrupts: IRQ lines for the RTC.
-- interrupt-names: interrupt line names eg. "sec" "alarm"
-
-Optional:
-- calibration: calibration value for 1 sec period which will
- be programmed directly to calibration register
-
-Example:
-rtc: rtc@ffa60000 {
- compatible = "xlnx,zynqmp-rtc";
- reg = <0x0 0xffa60000 0x100>;
- interrupt-parent = <&gic>;
- interrupts = <0 26 4>, <0 27 4>;
- interrupt-names = "alarm", "sec";
- calibration = <0x198233>;
-};
diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
index 41f57c448621..a0bf061b80fe 100644
--- a/Documentation/devicetree/bindings/serial/8250.yaml
+++ b/Documentation/devicetree/bindings/serial/8250.yaml
@@ -10,7 +10,7 @@ maintainers:
- devicetree@vger.kernel.org
allOf:
- - $ref: /schemas/serial.yaml#
+ - $ref: serial.yaml#
- if:
anyOf:
- required:
diff --git a/Documentation/devicetree/bindings/serial/8250_omap.yaml b/Documentation/devicetree/bindings/serial/8250_omap.yaml
new file mode 100644
index 000000000000..1c826fcf5828
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/8250_omap.yaml
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/8250_omap.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bindings for 8250 compliant UARTs on TI's OMAP2+ and K3 SoCs
+
+maintainers:
+ - Vignesh Raghavendra <vigneshr@ti.com>
+
+allOf:
+ - $ref: /schemas/serial/serial.yaml#
+ - $ref: /schemas/serial/rs485.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - ti,am3352-uart
+ - ti,am4372-uart
+ - ti,am654-uart
+ - ti,dra742-uart
+ - ti,omap2-uart
+ - ti,omap3-uart
+ - ti,omap4-uart
+ - items:
+ - enum:
+ - ti,am64-uart
+ - ti,j721e-uart
+ - const: ti,am654-uart
+
+ ti,hwmods:
+ description:
+ Must be "uart<n>", n being the instance number (1-based)
+ This property is applicable only on legacy platforms mainly omap2/3
+ and ti81xx and should not be used on other platforms.
+ $ref: /schemas/types.yaml#/definitions/string
+ deprecated: true
+
+ dmas:
+ minItems: 1
+ maxItems: 2
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+ description:
+ First entry is module IRQ required for normal IO operation.
+ Second entry is optional and corresponds to system wakeup IRQ
+ where supported.
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: fclk
+
+ rts-gpios: true
+ cts-gpios: true
+ dtr-gpios: true
+ dsr-gpios: true
+ rng-gpios: true
+ dcd-gpios: true
+ rs485-rts-delay: true
+ rs485-rts-active-low: true
+ rs485-rx-during-tx: true
+ rs485-rts-active-high: true
+ linux,rs485-enabled-at-boot-time: true
+ rts-gpio: true
+ power-domains: true
+ clock-frequency: true
+ current-speed: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+if:
+ properties:
+ compatible:
+ oneOf:
+ - const: ti,omap2-uart
+ - const: ti,omap3-uart
+ - const: ti,omap4-uart
+
+then:
+ properties:
+ ti,hwmods:
+ items:
+ - pattern: "^uart([1-9])$"
+
+else:
+ properties:
+ ti,hwmods: false
+
+examples:
+ - |
+ serial@49042000 {
+ compatible = "ti,omap3-uart";
+ reg = <0x49042000 0x400>;
+ interrupts = <80>;
+ dmas = <&sdma 81 &sdma 82>;
+ dma-names = "tx", "rx";
+ ti,hwmods = "uart4";
+ clock-frequency = <48000000>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
index 75ebc9952a99..7487aa6ef849 100644
--- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
@@ -55,6 +55,11 @@ properties:
- const: pclk
- const: baud
+ fifo-size:
+ description: The fifo size supported by the UART channel.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [64, 128]
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml b/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
index 46c62745f901..6d176588df47 100644
--- a/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
@@ -10,7 +10,7 @@ maintainers:
- Al Cooper <alcooperx@gmail.com>
allOf:
- - $ref: /schemas/serial.yaml#
+ - $ref: serial.yaml#
description: |+
The Broadcom UART is based on the basic 8250 UART but with
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml b/Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml
index 2b06c6ce4a75..9d949296a142 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml
@@ -71,6 +71,18 @@ properties:
received, and that the peripheral should invert its input using the
INVR registers.
+ fsl,dma-info:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 2
+ description: |
+ First cell contains the size of DMA buffer chunks, second cell contains
+ the amount of chunks used for the device. Multiplying both numbers is
+ the total size of memory used for receiving data.
+ When not being configured the system will use default settings, which
+ are sensible for most use cases. If you need low latency processing on
+ slow connections this needs to be configured appropriately.
+
uart-has-rtscts: true
rs485-rts-delay: true
diff --git a/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml b/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
index bd21060d26e0..a90c971b4f1f 100644
--- a/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
+++ b/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
@@ -36,14 +36,12 @@ properties:
- description: ipg clock
- description: baud clock
minItems: 1
- maxItems: 2
clock-names:
items:
- const: ipg
- const: baud
minItems: 1
- maxItems: 2
dmas:
items:
diff --git a/Documentation/devicetree/bindings/serial/ingenic,uart.yaml b/Documentation/devicetree/bindings/serial/ingenic,uart.yaml
index 7748d8c3bab8..b432d4dff730 100644
--- a/Documentation/devicetree/bindings/serial/ingenic,uart.yaml
+++ b/Documentation/devicetree/bindings/serial/ingenic,uart.yaml
@@ -10,7 +10,7 @@ maintainers:
- Paul Cercueil <paul@crapouillou.net>
allOf:
- - $ref: /schemas/serial.yaml#
+ - $ref: serial.yaml#
properties:
$nodename:
diff --git a/Documentation/devicetree/bindings/serial/mvebu-uart.txt b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
index b7e0e32b9ac6..2d0dbdf32d1d 100644
--- a/Documentation/devicetree/bindings/serial/mvebu-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
@@ -5,10 +5,10 @@ Required properties:
- compatible:
- "marvell,armada-3700-uart" for the standard variant of the UART
(32 bytes FIFO, no DMA, level interrupts, 8-bit access to the
- FIFO, baudrate limited to 230400).
+ FIFO), called also UART1.
- "marvell,armada-3700-uart-ext" for the extended variant of the
UART (128 bytes FIFO, DMA, front interrupts, 8-bit or 32-bit
- accesses to the FIFO, baudrate unlimited by the dividers).
+ accesses to the FIFO), called also UART2.
- reg: offset and length of the register set for the device.
- clocks: UART reference clock used to derive the baudrate. If no clock
is provided (possible only with the "marvell,armada-3700-uart"
@@ -33,7 +33,7 @@ Required properties:
Example:
uart0: serial@12000 {
compatible = "marvell,armada-3700-uart";
- reg = <0x12000 0x200>;
+ reg = <0x12000 0x18>;
clocks = <&xtalclk>;
interrupts =
<GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/serial/omap_serial.txt b/Documentation/devicetree/bindings/serial/omap_serial.txt
deleted file mode 100644
index c2db8cabf2ab..000000000000
--- a/Documentation/devicetree/bindings/serial/omap_serial.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-OMAP UART controller
-
-Required properties:
-- compatible : should be "ti,am64-uart", "ti,am654-uart" for AM64 controllers
-- compatible : should be "ti,j721e-uart", "ti,am654-uart" for J721E controllers
-- compatible : should be "ti,am654-uart" for AM654 controllers
-- compatible : should be "ti,omap2-uart" for OMAP2 controllers
-- compatible : should be "ti,omap3-uart" for OMAP3 controllers
-- compatible : should be "ti,omap4-uart" for OMAP4 controllers
-- compatible : should be "ti,am4372-uart" for AM437x controllers
-- compatible : should be "ti,am3352-uart" for AM335x controllers
-- compatible : should be "ti,dra742-uart" for DRA7x controllers
-- reg : address and length of the register space
-- interrupts or interrupts-extended : Should contain the uart interrupt
- specifier or both the interrupt
- controller phandle and interrupt
- specifier.
-- ti,hwmods : Must be "uart<n>", n being the instance number (1-based)
-
-Optional properties:
-- clock-frequency : frequency of the clock input to the UART
-- dmas : DMA specifier, consisting of a phandle to the DMA controller
- node and a DMA channel number.
-- dma-names : "rx" for receive channel, "tx" for transmit channel.
-- rs485-rts-delay, rs485-rx-during-tx, linux,rs485-enabled-at-boot-time: see rs485.txt
-- rs485-rts-active-high: drive RTS high when sending (default is low).
-- clocks: phandle to the functional clock as per
- Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Example:
-
- uart4: serial@49042000 {
- compatible = "ti,omap3-uart";
- reg = <0x49042000 0x400>;
- interrupts = <80>;
- dmas = <&sdma 81 &sdma 82>;
- dma-names = "tx", "rx";
- ti,hwmods = "uart4";
- clock-frequency = <48000000>;
- };
diff --git a/Documentation/devicetree/bindings/serial/pl011.yaml b/Documentation/devicetree/bindings/serial/pl011.yaml
index 1f8e9f2644b6..5ea00f8a283d 100644
--- a/Documentation/devicetree/bindings/serial/pl011.yaml
+++ b/Documentation/devicetree/bindings/serial/pl011.yaml
@@ -10,7 +10,7 @@ maintainers:
- Rob Herring <robh@kernel.org>
allOf:
- - $ref: /schemas/serial.yaml#
+ - $ref: serial.yaml#
# Need a custom select here or 'arm,primecell' will match on lots of nodes
select:
@@ -24,12 +24,9 @@ select:
properties:
compatible:
- oneOf:
- - items:
- - const: arm,pl011
- - const: arm,primecell
- - items:
- - const: arm,primecell
+ items:
+ - const: arm,pl011
+ - const: arm,primecell
reg:
maxItems: 1
@@ -103,7 +100,7 @@ dependencies:
poll-rate-ms: [ auto-poll ]
poll-timeout-ms: [ auto-poll ]
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/serial/qca,ar9330-uart.yaml b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.yaml
index a344369285b6..a644e5af12b2 100644
--- a/Documentation/devicetree/bindings/serial/qca,ar9330-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.yaml
@@ -10,7 +10,7 @@ maintainers:
- Oleksij Rempel <o.rempel@pengutronix.de>
allOf:
- - $ref: /schemas/serial.yaml#
+ - $ref: serial.yaml#
properties:
compatible:
@@ -35,7 +35,7 @@ required:
- clocks
- clock-names
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml b/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml
index 82aefdb0d45e..e98ec48fee46 100644
--- a/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml
@@ -35,7 +35,7 @@ required:
- clocks
- clock-names
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
index 22d76829f7ae..6b8731f7f2fb 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -64,6 +64,10 @@ properties:
- const: renesas,rcar-gen3-scif # R-Car Gen3 and RZ/G2
- const: renesas,scif # generic SCIF compatible UART
+ - items:
+ - enum:
+ - renesas,scif-r9a07g044 # RZ/G2{L,LC}
+
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/serial/samsung_uart.yaml b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
index 97ec8a093bf3..f064e5b76cf1 100644
--- a/Documentation/devicetree/bindings/serial/samsung_uart.yaml
+++ b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
@@ -44,7 +44,6 @@ properties:
clock-names:
description: N = 0 is allowed for SoCs without internal baud clock mux.
minItems: 2
- maxItems: 5
items:
- const: uart
- pattern: '^clk_uart_baud[0-3]$'
@@ -82,7 +81,7 @@ required:
unevaluatedProperties: false
allOf:
- - $ref: /schemas/serial.yaml#
+ - $ref: serial.yaml#
- if:
properties:
diff --git a/Documentation/devicetree/bindings/serial/serial.yaml b/Documentation/devicetree/bindings/serial/serial.yaml
index 2fdf4ed198da..c75ba3fb6465 100644
--- a/Documentation/devicetree/bindings/serial/serial.yaml
+++ b/Documentation/devicetree/bindings/serial/serial.yaml
@@ -23,6 +23,8 @@ properties:
$nodename:
pattern: "^serial(@.*)?$"
+ label: true
+
cts-gpios:
maxItems: 1
description:
diff --git a/Documentation/devicetree/bindings/serial/sifive-serial.yaml b/Documentation/devicetree/bindings/serial/sifive-serial.yaml
index 5fa94dacbba9..09aae43f65a7 100644
--- a/Documentation/devicetree/bindings/serial/sifive-serial.yaml
+++ b/Documentation/devicetree/bindings/serial/sifive-serial.yaml
@@ -12,7 +12,7 @@ maintainers:
- Palmer Dabbelt <palmer@sifive.com>
allOf:
- - $ref: /schemas/serial.yaml#
+ - $ref: serial.yaml#
properties:
compatible:
@@ -49,7 +49,7 @@ required:
- interrupts
- clocks
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
index 87ef1e218152..b49fda5e608f 100644
--- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
@@ -10,7 +10,7 @@ maintainers:
- Rob Herring <robh@kernel.org>
allOf:
- - $ref: /schemas/serial.yaml#
+ - $ref: serial.yaml#
properties:
compatible:
@@ -23,6 +23,7 @@ properties:
- items:
- enum:
- rockchip,px30-uart
+ - rockchip,rk1808-uart
- rockchip,rk3036-uart
- rockchip,rk3066-uart
- rockchip,rk3188-uart
@@ -31,6 +32,7 @@ properties:
- rockchip,rk3328-uart
- rockchip,rk3368-uart
- rockchip,rk3399-uart
+ - rockchip,rk3568-uart
- rockchip,rv1108-uart
- const: snps,dw-apb-uart
- items:
diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
index 71a6426bc558..f50f4ca893a0 100644
--- a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
@@ -112,8 +112,7 @@ required:
- interrupts
- clocks
-additionalProperties:
- type: object
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
index 8051c17e640e..d74a7a5ae9f2 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
+++ b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
@@ -27,6 +27,7 @@ Required properties in pwrap device node.
"mediatek,mt8135-pwrap" for MT8135 SoCs
"mediatek,mt8173-pwrap" for MT8173 SoCs
"mediatek,mt8183-pwrap" for MT8183 SoCs
+ "mediatek,mt8195-pwrap" for MT8195 SoCs
"mediatek,mt8516-pwrap" for MT8516 SoCs
- interrupts: IRQ for pwrap in SOC
- reg-names: Must include the following entries:
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml
index 84671950ca0d..4663c2bcad50 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml
@@ -164,7 +164,6 @@ patternProperties:
interrupts:
minItems: 1
- maxItems: 2
items:
- description: UART core irq
- description: Wakeup irq (RX GPIO)
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
index 2684f22a1d85..d511f01fcac6 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
@@ -32,12 +32,14 @@ properties:
enum:
- qcom,rpm-apq8084
- qcom,rpm-ipq6018
+ - qcom,rpm-msm8226
- qcom,rpm-msm8916
- qcom,rpm-msm8974
- qcom,rpm-msm8976
- qcom,rpm-msm8996
- qcom,rpm-msm8998
- qcom,rpm-sdm660
+ - qcom,rpm-sm6125
- qcom,rpm-qcs404
qcom,smd-channels:
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.txt b/Documentation/devicetree/bindings/soc/rockchip/grf.txt
deleted file mode 100644
index f96511aa3897..000000000000
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-* Rockchip General Register Files (GRF)
-
-The general register file will be used to do static set by software, which
-is composed of many registers for system control.
-
-From RK3368 SoCs, the GRF is divided into two sections,
-- GRF, used for general non-secure system,
-- SGRF, used for general secure system,
-- PMUGRF, used for always on system
-
-On RK3328 SoCs, the GRF adds a section for USB2PHYGRF,
-
-ON RK3308 SoC, the GRF is divided into four sections:
-- GRF, used for general non-secure system,
-- SGRF, used for general secure system,
-- DETECTGRF, used for audio codec system,
-- COREGRF, used for pvtm,
-
-Required Properties:
-
-- compatible: GRF should be one of the following:
- - "rockchip,px30-grf", "syscon": for px30
- - "rockchip,rk3036-grf", "syscon": for rk3036
- - "rockchip,rk3066-grf", "syscon": for rk3066
- - "rockchip,rk3188-grf", "syscon": for rk3188
- - "rockchip,rk3228-grf", "syscon": for rk3228
- - "rockchip,rk3288-grf", "syscon": for rk3288
- - "rockchip,rk3308-grf", "syscon": for rk3308
- - "rockchip,rk3328-grf", "syscon": for rk3328
- - "rockchip,rk3368-grf", "syscon": for rk3368
- - "rockchip,rk3399-grf", "syscon": for rk3399
- - "rockchip,rv1108-grf", "syscon": for rv1108
-- compatible: DETECTGRF should be one of the following:
- - "rockchip,rk3308-detect-grf", "syscon": for rk3308
-- compatilbe: COREGRF should be one of the following:
- - "rockchip,rk3308-core-grf", "syscon": for rk3308
-- compatible: PMUGRF should be one of the following:
- - "rockchip,px30-pmugrf", "syscon": for px30
- - "rockchip,rk3368-pmugrf", "syscon": for rk3368
- - "rockchip,rk3399-pmugrf", "syscon": for rk3399
-- compatible: SGRF should be one of the following:
- - "rockchip,rk3288-sgrf", "syscon": for rk3288
-- compatible: USB2PHYGRF should be one of the following:
- - "rockchip,px30-usb2phy-grf", "syscon": for px30
- - "rockchip,rk3328-usb2phy-grf", "syscon": for rk3328
-- compatible: USBGRF should be one of the following:
- - "rockchip,rv1108-usbgrf", "syscon": for rv1108
-- reg: physical base address of the controller and length of memory mapped
- region.
-
-Example: GRF and PMUGRF of RK3399 SoCs
-
- pmugrf: syscon@ff320000 {
- compatible = "rockchip,rk3399-pmugrf", "syscon";
- reg = <0x0 0xff320000 0x0 0x1000>;
- };
-
- grf: syscon@ff770000 {
- compatible = "rockchip,rk3399-grf", "syscon";
- reg = <0x0 0xff770000 0x0 0x10000>;
- };
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
new file mode 100644
index 000000000000..62fa72cfea34
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -0,0 +1,261 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/rockchip/grf.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip General Register Files (GRF)
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - rockchip,rk3288-sgrf
+ - rockchip,rv1108-pmugrf
+ - rockchip,rv1108-usbgrf
+ - const: syscon
+ - items:
+ - enum:
+ - rockchip,px30-grf
+ - rockchip,px30-pmugrf
+ - rockchip,px30-usb2phy-grf
+ - rockchip,rk3036-grf
+ - rockchip,rk3066-grf
+ - rockchip,rk3188-grf
+ - rockchip,rk3228-grf
+ - rockchip,rk3288-grf
+ - rockchip,rk3308-core-grf
+ - rockchip,rk3308-detect-grf
+ - rockchip,rk3308-grf
+ - rockchip,rk3308-usb2phy-grf
+ - rockchip,rk3328-grf
+ - rockchip,rk3328-usb2phy-grf
+ - rockchip,rk3368-grf
+ - rockchip,rk3368-pmugrf
+ - rockchip,rk3399-grf
+ - rockchip,rk3399-pmugrf
+ - rockchip,rk3568-grf
+ - rockchip,rk3568-pmugrf
+ - rockchip,rv1108-grf
+ - const: syscon
+ - const: simple-mfd
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties:
+ type: object
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,px30-grf
+
+ then:
+ properties:
+ lvds:
+ description:
+ Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3288-grf
+
+ then:
+ properties:
+ edp-phy:
+ description:
+ Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,rk3066-grf
+ - rockchip,rk3188-grf
+ - rockchip,rk3288-grf
+
+ then:
+ properties:
+ usbphy:
+ type: object
+
+ $ref: "/schemas/phy/rockchip-usb-phy.yaml#"
+
+ unevaluatedProperties: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3328-grf
+
+ then:
+ properties:
+ gpio:
+ type: object
+
+ $ref: "/schemas/gpio/rockchip,rk3328-grf-gpio.yaml#"
+
+ unevaluatedProperties: false
+
+ power-controller:
+ type: object
+
+ $ref: "/schemas/power/rockchip,power-controller.yaml#"
+
+ unevaluatedProperties: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3399-grf
+
+ then:
+ properties:
+ mipi-dphy-rx0:
+ type: object
+
+ $ref: "/schemas/phy/rockchip-mipi-dphy-rx0.yaml#"
+
+ unevaluatedProperties: false
+
+ pcie-phy:
+ description:
+ Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt
+
+ patternProperties:
+ "phy@[0-9a-f]+$":
+ description:
+ Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,px30-pmugrf
+ - rockchip,rk3036-grf
+ - rockchip,rk3308-grf
+ - rockchip,rk3368-pmugrf
+
+ then:
+ properties:
+ reboot-mode:
+ type: object
+
+ $ref: "/schemas/power/reset/syscon-reboot-mode.yaml#"
+
+ unevaluatedProperties: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,px30-usb2phy-grf
+ - rockchip,rk3228-grf
+ - rockchip,rk3308-usb2phy-grf
+ - rockchip,rk3328-usb2phy-grf
+ - rockchip,rk3399-grf
+ - rockchip,rv1108-grf
+
+ then:
+ required:
+ - "#address-cells"
+ - "#size-cells"
+
+ patternProperties:
+ "usb2phy@[0-9a-f]+$":
+ type: object
+
+ $ref: "/schemas/phy/phy-rockchip-inno-usb2.yaml#"
+
+ unevaluatedProperties: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,px30-pmugrf
+ - rockchip,px30-grf
+ - rockchip,rk3228-grf
+ - rockchip,rk3288-grf
+ - rockchip,rk3328-grf
+ - rockchip,rk3368-pmugrf
+ - rockchip,rk3368-grf
+ - rockchip,rk3399-pmugrf
+ - rockchip,rk3399-grf
+
+ then:
+ properties:
+ io-domains:
+ description:
+ Documentation/devicetree/bindings/power/rockchip-io-domain.txt
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3399-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/rk3399-power.h>
+ grf: syscon@ff770000 {
+ compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
+ reg = <0xff770000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mipi_dphy_rx0: mipi-dphy-rx0 {
+ compatible = "rockchip,rk3399-mipi-dphy-rx0";
+ clocks = <&cru SCLK_MIPIDPHY_REF>,
+ <&cru SCLK_DPHY_RX0_CFG>,
+ <&cru PCLK_VIO_GRF>;
+ clock-names = "dphy-ref", "dphy-cfg", "grf";
+ power-domains = <&power RK3399_PD_VIO>;
+ #phy-cells = <0>;
+ };
+
+ u2phy0: usb2phy@e450 {
+ compatible = "rockchip,rk3399-usb2phy";
+ reg = <0xe450 0x10>;
+ clocks = <&cru SCLK_USB2PHY0_REF>;
+ clock-names = "phyclk";
+ #clock-cells = <0>;
+ clock-output-names = "clk_usbphy0_480m";
+
+ u2phy0_host: host-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "linestate";
+ };
+
+ u2phy0_otg: otg-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "otg-bvalid", "otg-id",
+ "linestate";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/rockchip/power_domain.txt b/Documentation/devicetree/bindings/soc/rockchip/power_domain.txt
deleted file mode 100644
index 8304eceb62e4..000000000000
--- a/Documentation/devicetree/bindings/soc/rockchip/power_domain.txt
+++ /dev/null
@@ -1,136 +0,0 @@
-* Rockchip Power Domains
-
-Rockchip processors include support for multiple power domains which can be
-powered up/down by software based on different application scenes to save power.
-
-Required properties for power domain controller:
-- compatible: Should be one of the following.
- "rockchip,px30-power-controller" - for PX30 SoCs.
- "rockchip,rk3036-power-controller" - for RK3036 SoCs.
- "rockchip,rk3066-power-controller" - for RK3066 SoCs.
- "rockchip,rk3128-power-controller" - for RK3128 SoCs.
- "rockchip,rk3188-power-controller" - for RK3188 SoCs.
- "rockchip,rk3228-power-controller" - for RK3228 SoCs.
- "rockchip,rk3288-power-controller" - for RK3288 SoCs.
- "rockchip,rk3328-power-controller" - for RK3328 SoCs.
- "rockchip,rk3366-power-controller" - for RK3366 SoCs.
- "rockchip,rk3368-power-controller" - for RK3368 SoCs.
- "rockchip,rk3399-power-controller" - for RK3399 SoCs.
-- #power-domain-cells: Number of cells in a power-domain specifier.
- Should be 1 for multiple PM domains.
-- #address-cells: Should be 1.
-- #size-cells: Should be 0.
-
-Required properties for power domain sub nodes:
-- reg: index of the power domain, should use macros in:
- "include/dt-bindings/power/px30-power.h" - for PX30 type power domain.
- "include/dt-bindings/power/rk3036-power.h" - for RK3036 type power domain.
- "include/dt-bindings/power/rk3066-power.h" - for RK3066 type power domain.
- "include/dt-bindings/power/rk3128-power.h" - for RK3128 type power domain.
- "include/dt-bindings/power/rk3188-power.h" - for RK3188 type power domain.
- "include/dt-bindings/power/rk3228-power.h" - for RK3228 type power domain.
- "include/dt-bindings/power/rk3288-power.h" - for RK3288 type power domain.
- "include/dt-bindings/power/rk3328-power.h" - for RK3328 type power domain.
- "include/dt-bindings/power/rk3366-power.h" - for RK3366 type power domain.
- "include/dt-bindings/power/rk3368-power.h" - for RK3368 type power domain.
- "include/dt-bindings/power/rk3399-power.h" - for RK3399 type power domain.
-- clocks (optional): phandles to clocks which need to be enabled while power domain
- switches state.
-- pm_qos (optional): phandles to qos blocks which need to be saved and restored
- while power domain switches state.
-
-Qos Example:
-
- qos_gpu: qos_gpu@ffaf0000 {
- compatible ="syscon";
- reg = <0x0 0xffaf0000 0x0 0x20>;
- };
-
-Example:
-
- power: power-controller {
- compatible = "rockchip,rk3288-power-controller";
- #power-domain-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pd_gpu {
- reg = <RK3288_PD_GPU>;
- clocks = <&cru ACLK_GPU>;
- pm_qos = <&qos_gpu>;
- };
- };
-
- power: power-controller {
- compatible = "rockchip,rk3368-power-controller";
- #power-domain-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pd_gpu_1 {
- reg = <RK3368_PD_GPU_1>;
- clocks = <&cru ACLK_GPU_CFG>;
- };
- };
-
-Example 2:
- power: power-controller {
- compatible = "rockchip,rk3399-power-controller";
- #power-domain-cells = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pd_vio {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <RK3399_PD_VIO>;
-
- pd_vo {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <RK3399_PD_VO>;
-
- pd_vopb {
- reg = <RK3399_PD_VOPB>;
- };
-
- pd_vopl {
- reg = <RK3399_PD_VOPL>;
- };
- };
- };
- };
-
-Node of a device using power domains must have a power-domains property,
-containing a phandle to the power device node and an index specifying which
-power domain to use.
-The index should use macros in:
- "include/dt-bindings/power/px30-power.h" - for px30 type power domain.
- "include/dt-bindings/power/rk3036-power.h" - for rk3036 type power domain.
- "include/dt-bindings/power/rk3128-power.h" - for rk3128 type power domain.
- "include/dt-bindings/power/rk3128-power.h" - for rk3228 type power domain.
- "include/dt-bindings/power/rk3288-power.h" - for rk3288 type power domain.
- "include/dt-bindings/power/rk3328-power.h" - for rk3328 type power domain.
- "include/dt-bindings/power/rk3366-power.h" - for rk3366 type power domain.
- "include/dt-bindings/power/rk3368-power.h" - for rk3368 type power domain.
- "include/dt-bindings/power/rk3399-power.h" - for rk3399 type power domain.
-
-Example of the node using power domain:
-
- node {
- /* ... */
- power-domains = <&power RK3288_PD_GPU>;
- /* ... */
- };
-
- node {
- /* ... */
- power-domains = <&power RK3368_PD_GPU_1>;
- /* ... */
- };
-
- node {
- /* ... */
- power-domains = <&power RK3399_PD_VOPB>;
- /* ... */
- };
diff --git a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
deleted file mode 100644
index 6217e64309de..000000000000
--- a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-Texas Instruments TI-SCI Generic Power Domain
----------------------------------------------
-
-Some TI SoCs contain a system controller (like the PMMC, etc...) that is
-responsible for controlling the state of the IPs that are present.
-Communication between the host processor running an OS and the system
-controller happens through a protocol known as TI-SCI [1].
-
-[1] Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
-
-PM Domain Node
-==============
-The PM domain node represents the global PM domain managed by the PMMC, which
-in this case is the implementation as documented by the generic PM domain
-bindings in Documentation/devicetree/bindings/power/power-domain.yaml. Because
-this relies on the TI SCI protocol to communicate with the PMMC it must be a
-child of the pmmc node.
-
-Required Properties:
---------------------
-- compatible: should be "ti,sci-pm-domain"
-- #power-domain-cells: Can be one of the following:
- 1: Containing the device id of each node
- 2: First entry should be device id
- Second entry should be one of the floowing:
- TI_SCI_PD_EXCLUSIVE: To allow device to be
- exclusively controlled by
- the requesting hosts.
- TI_SCI_PD_SHARED: To allow device to be shared
- by multiple hosts.
-
-Example (K2G):
--------------
- pmmc: pmmc {
- compatible = "ti,k2g-sci";
- ...
-
- k2g_pds: power-controller {
- compatible = "ti,sci-pm-domain";
- #power-domain-cells = <1>;
- };
- };
-
-PM Domain Consumers
-===================
-Hardware blocks belonging to a PM domain should contain a "power-domains"
-property that is a phandle pointing to the corresponding PM domain node
-along with an index representing the device id to be passed to the PMMC
-for device control.
-
-Required Properties:
---------------------
-- power-domains: phandle pointing to the corresponding PM domain node
- and an ID representing the device.
-
-See http://processors.wiki.ti.com/index.php/TISCI#66AK2G02_Data for the list
-of valid identifiers for k2g.
-
-Example (K2G):
---------------------
- uart0: serial@2530c00 {
- compatible = "ns16550a";
- ...
- power-domains = <&k2g_pds 0x002c>;
- };
diff --git a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
new file mode 100644
index 000000000000..9e6cb4ee9755
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/ti/sci-pm-domain.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI-SCI generic power domain node bindings
+
+maintainers:
+ - Nishanth Menon <nm@ti.com>
+
+allOf:
+ - $ref: /schemas/power/power-domain.yaml#
+
+description: |
+ Some TI SoCs contain a system controller (like the Power Management Micro
+ Controller (PMMC) on Keystone 66AK2G SoC) that are responsible for controlling
+ the state of the various hardware modules present on the SoC. Communication
+ between the host processor running an OS and the system controller happens
+ through a protocol called TI System Control Interface (TI-SCI protocol).
+
+ This PM domain node represents the global PM domain managed by the TI-SCI
+ controller. Since this relies on the TI SCI protocol to communicate with
+ the TI-SCI controller, it must be a child of the TI-SCI controller node.
+
+properties:
+ compatible:
+ const: ti,sci-pm-domain
+
+ "#power-domain-cells":
+ enum: [1, 2]
+ description:
+ The two cells represent values that the TI-SCI controller defines.
+
+ The first cell should contain the device ID.
+
+ The second cell, if cell-value is 2, should be one of the following
+ TI_SCI_PD_EXCLUSIVE - Allows the device to be exclusively controlled
+ or
+ TI_SCI_PD_SHARED - Allows the device to be shared by multiple hosts.
+ Please refer to dt-bindings/soc/ti,sci_pm_domain.h for the definitions.
+
+ Please see http://processors.wiki.ti.com/index.php/TISCI for
+ protocol documentation for the values to be used for different devices.
+
+additionalProperties: false
+
+examples:
+ - |
+ k2g_pds: power-controller {
+ compatible = "ti,sci-pm-domain";
+ #power-domain-cells = <1>;
+ };
+
+ - |
+ k3_pds: power-controller {
+ compatible = "ti,sci-pm-domain";
+ #power-domain-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml b/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
index dbc62821c60b..9790617af1bc 100644
--- a/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
+++ b/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
@@ -100,7 +100,6 @@ patternProperties:
properties:
reg:
minItems: 2 # On AM437x one of two PRUSS units don't contain Shared RAM.
- maxItems: 3
items:
- description: Address and size of the Data RAM0.
- description: Address and size of the Data RAM1.
@@ -111,7 +110,6 @@ patternProperties:
reg-names:
minItems: 2
- maxItems: 3
items:
- const: dram0
- const: dram1
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml
index a16e37b01e1d..39b66e9ce3e3 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml
@@ -20,6 +20,9 @@ properties:
- const: allwinner,sun6i-a31-i2s
- const: allwinner,sun8i-a83t-i2s
- const: allwinner,sun8i-h3-i2s
+ - items:
+ - const: allwinner,sun8i-v3-i2s
+ - const: allwinner,sun8i-h3-i2s
- const: allwinner,sun50i-a64-codec-i2s
- items:
- const: allwinner,sun50i-a64-i2s
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml
index 9718358826ab..26eca21e1f0f 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml
@@ -12,12 +12,15 @@ maintainers:
properties:
compatible:
- enum:
+ oneOf:
# FIXME: This is documented in the PRCM binding, but needs to be
# migrated here at some point
# - allwinner,sun8i-a23-codec-analog
- - allwinner,sun8i-h3-codec-analog
- - allwinner,sun8i-v3s-codec-analog
+ - const: allwinner,sun8i-h3-codec-analog
+ - items:
+ - const: allwinner,sun8i-v3-codec-analog
+ - const: allwinner,sun8i-h3-codec-analog
+ - const: allwinner,sun8i-v3s-codec-analog
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun8i-a33-codec.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun8i-a33-codec.yaml
index 67405e6d8168..19f111f40225 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun8i-a33-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun8i-a33-codec.yaml
@@ -12,7 +12,11 @@ maintainers:
properties:
"#sound-dai-cells":
- const: 0
+ minimum: 0
+ maximum: 1
+ description:
+ A value of 0 is deprecated. When used, it only allows access to
+ the ADC/DAC and AIF1 (the CPU DAI), not the other two AIFs/DAIs.
compatible:
oneOf:
@@ -50,7 +54,7 @@ additionalProperties: false
examples:
- |
audio-codec@1c22e00 {
- #sound-dai-cells = <0>;
+ #sound-dai-cells = <1>;
compatible = "allwinner,sun8i-a33-codec";
reg = <0x01c22e00 0x400>;
interrupts = <0 29 4>;
diff --git a/Documentation/devicetree/bindings/sound/cs42l42.txt b/Documentation/devicetree/bindings/sound/cs42l42.txt
index 7dfaa2ab906f..5d416fdaf023 100644
--- a/Documentation/devicetree/bindings/sound/cs42l42.txt
+++ b/Documentation/devicetree/bindings/sound/cs42l42.txt
@@ -81,6 +81,13 @@ Optional properties:
< x1 x2 x3 x4 >
Default = < 15 8 4 1>
+ - cirrus,hs-bias-sense-disable: This is boolean property. If present the
+ HSBIAS sense is disabled. Configures HSBIAS output current sense through
+ the external 2.21-k resistor. HSBIAS_SENSE is hardware feature to reduce
+ the potential pop noise during the headset plug out slowly. But on some
+ platforms ESD voltage will affect it causing test to fail, especially
+ with CTIA headset type. For different hardware setups, a designer might
+ want to tweak default behavior.
Example:
diff --git a/Documentation/devicetree/bindings/sound/fsl,spdif.yaml b/Documentation/devicetree/bindings/sound/fsl,spdif.yaml
index 4454aca34d56..f226ec13167a 100644
--- a/Documentation/devicetree/bindings/sound/fsl,spdif.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,spdif.yaml
@@ -25,6 +25,7 @@ properties:
- fsl,imx8mq-spdif
- fsl,imx8mm-spdif
- fsl,imx8mn-spdif
+ - fsl,imx8ulp-spdif
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/fsl-sai.txt b/Documentation/devicetree/bindings/sound/fsl-sai.txt
index 0dc83cc4a236..c71c5861d787 100644
--- a/Documentation/devicetree/bindings/sound/fsl-sai.txt
+++ b/Documentation/devicetree/bindings/sound/fsl-sai.txt
@@ -9,8 +9,10 @@ Required properties:
- compatible : Compatible list, contains "fsl,vf610-sai",
"fsl,imx6sx-sai", "fsl,imx6ul-sai",
- "fsl,imx7ulp-sai", "fsl,imx8mq-sai" or
- "fsl,imx8qm-sai".
+ "fsl,imx7ulp-sai", "fsl,imx8mq-sai",
+ "fsl,imx8qm-sai", "fsl,imx8mm-sai",
+ "fsl,imx8mn-sai", "fsl,imx8mp-sai", or
+ "fsl,imx8ulp-sai".
- reg : Offset and length of the register set for the device.
diff --git a/Documentation/devicetree/bindings/sound/imx-audio-card.yaml b/Documentation/devicetree/bindings/sound/imx-audio-card.yaml
new file mode 100644
index 000000000000..d1816dd061cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/imx-audio-card.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/imx-audio-card.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX audio sound card.
+
+maintainers:
+ - Shengjiu Wang <shengjiu.wang@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx-audio-card
+
+ model:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: User specified audio sound card name
+
+ audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description:
+ A list of the connections between audio components. Each entry is a
+ pair of strings, the first being the connection's sink, the second
+ being the connection's source. Valid names could be power supplies,
+ MicBias of codec and the jacks on the board.
+
+patternProperties:
+ ".*-dai-link$":
+ description:
+ Each subnode represents a dai link. Subnodes of each dai links would be
+ cpu/codec dais.
+
+ type: object
+
+ properties:
+ link-name:
+ description: Indicates dai-link name and PCM stream name.
+ $ref: /schemas/types.yaml#/definitions/string
+ maxItems: 1
+
+ format:
+ description: audio format.
+ items:
+ enum:
+ - i2s
+ - dsp_b
+
+ dai-tdm-slot-num:
+ description: see tdm-slot.txt.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ dai-tdm-slot-width:
+ description: see tdm-slot.txt.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ cpu:
+ description: Holds subnode which indicates cpu dai.
+ type: object
+ properties:
+ sound-dai: true
+
+ codec:
+ description: Holds subnode which indicates codec dai.
+ type: object
+ properties:
+ sound-dai: true
+
+ fsl,mclk-equal-bclk:
+ description: Indicates mclk can be equal to bclk, especially for sai interface
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ required:
+ - link-name
+ - cpu
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - model
+
+additionalProperties: false
+
+examples:
+ - |
+ sound-ak4458 {
+ compatible = "fsl,imx-audio-card";
+ model = "ak4458-audio";
+ pri-dai-link {
+ link-name = "akcodec";
+ format = "i2s";
+ fsl,mclk-equal-bclk;
+ cpu {
+ sound-dai = <&sai1>;
+ };
+ codec {
+ sound-dai = <&ak4458_1>, <&ak4458_2>;
+ };
+ };
+ fe-dai-link {
+ link-name = "HiFi-ASRC-FE";
+ format = "i2s";
+ cpu {
+ sound-dai = <&easrc>;
+ };
+ };
+ be-dai-link {
+ link-name = "HiFi-ASRC-BE";
+ format = "dsp_b";
+ dai-tdm-slot-num = <8>;
+ dai-tdm-slot-width = <32>;
+ fsl,mclk-equal-bclk;
+ cpu {
+ sound-dai = <&sai1>;
+ };
+ codec {
+ sound-dai = <&ak4458_1>, <&ak4458_2>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/imx-audmux.txt b/Documentation/devicetree/bindings/sound/imx-audmux.txt
deleted file mode 100644
index 2db4dcbee1b9..000000000000
--- a/Documentation/devicetree/bindings/sound/imx-audmux.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Freescale Digital Audio Mux (AUDMUX) device
-
-Required properties:
-
- - compatible : "fsl,imx21-audmux" for AUDMUX version firstly used
- on i.MX21, or "fsl,imx31-audmux" for the version
- firstly used on i.MX31.
-
- - reg : Should contain AUDMUX registers location and length.
-
-An initial configuration can be setup using child nodes.
-
-Required properties of optional child nodes:
-
- - fsl,audmux-port : Integer of the audmux port that is configured by this
- child node.
-
- - fsl,port-config : List of configuration options for the specific port.
- For imx31-audmux and above, it is a list of tuples
- <ptcr pdcr>. For imx21-audmux it is a list of pcr
- values.
-
-Example:
-
-audmux@21d8000 {
- compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
- reg = <0x021d8000 0x4000>;
-};
diff --git a/Documentation/devicetree/bindings/sound/imx-audmux.yaml b/Documentation/devicetree/bindings/sound/imx-audmux.yaml
new file mode 100644
index 000000000000..dab45c310670
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/imx-audmux.yaml
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/imx-audmux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Digital Audio Mux device
+
+maintainers:
+ - Oleksij Rempel <o.rempel@pengutronix.de>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - fsl,imx27-audmux
+ - const: fsl,imx21-audmux
+ - items:
+ - enum:
+ - fsl,imx25-audmux
+ - fsl,imx35-audmux
+ - fsl,imx50-audmux
+ - fsl,imx51-audmux
+ - fsl,imx53-audmux
+ - fsl,imx6q-audmux
+ - fsl,imx6sl-audmux
+ - fsl,imx6sll-audmux
+ - fsl,imx6sx-audmux
+ - const: fsl,imx31-audmux
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: audmux
+
+patternProperties:
+ "^mux-[0-9a-z]*$":
+ type: object
+ properties:
+ fsl,audmux-port:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Integer of the audmux port that is configured by this child node
+
+ fsl,port-config:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description: |
+ List of configuration options for the specific port.
+ For imx31-audmux and above, it is a list of tuples ptcr pdcr.
+ For imx21-audmux it is a list of pcr values.
+
+ required:
+ - fsl,audmux-port
+ - fsl,port-config
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ audmux@21d8000 {
+ compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
+ reg = <0x021d8000 0x4000>;
+ };
+ - |
+ audmux@10016000 {
+ compatible = "fsl,imx27-audmux", "fsl,imx21-audmux";
+ reg = <0x10016000 0x1000>;
+ clocks = <&clks 1>;
+ clock-names = "audmux";
+
+ mux-ssi0 {
+ fsl,audmux-port = <0>;
+ fsl,port-config = <0xcb205000>;
+ };
+
+ mux-pins4 {
+ fsl,audmux-port = <2>;
+ fsl,port-config = <0x00001000>;
+ };
+ };
+ - |
+ #include <dt-bindings/sound/fsl-imx-audmux.h>
+ audmux@21d8000 {
+ compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
+ reg = <0x021d8000 0x4000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux>;
+
+ mux-ssi1 {
+ fsl,audmux-port = <0>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN 0
+ IMX_AUDMUX_V2_PTCR_TFSEL(2) 0
+ IMX_AUDMUX_V2_PTCR_TCSEL(2) 0
+ IMX_AUDMUX_V2_PTCR_TFSDIR 0
+ IMX_AUDMUX_V2_PTCR_TCLKDIR IMX_AUDMUX_V2_PDCR_RXDSEL(2)
+ >;
+ };
+
+ mux-pins3 {
+ fsl,audmux-port = <2>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN IMX_AUDMUX_V2_PDCR_RXDSEL(0)
+ 0 IMX_AUDMUX_V2_PDCR_TXRXEN
+ >;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml
index 249970952202..5bdd30a8a404 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml
@@ -28,7 +28,6 @@ properties:
minItems: 2
clock-names:
- minItems: 2
items:
- const: pll_a
- const: plla_out0
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml
index 38e52e7dbb40..63370709c768 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml
@@ -34,7 +34,6 @@ properties:
clocks:
minItems: 1
- maxItems: 2
items:
- description: I2S bit clock
- description:
@@ -48,7 +47,6 @@ properties:
clock-names:
minItems: 1
- maxItems: 2
items:
- const: i2s
- const: sync_input
diff --git a/Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml b/Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
new file mode 100644
index 000000000000..ffb8fcfeb629
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nxp,tfa989x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP/Goodix TFA989X (TFA1) Audio Amplifiers
+
+maintainers:
+ - Stephan Gerhold <stephan@gerhold.net>
+
+properties:
+ compatible:
+ enum:
+ - nxp,tfa9895
+ - nxp,tfa9897
+
+ reg:
+ maxItems: 1
+
+ '#sound-dai-cells':
+ const: 0
+
+ sound-name-prefix:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ Used as prefix for sink/source names of the component. Must be a
+ unique string among multiple instances of the same component.
+
+ vddd-supply:
+ description: regulator phandle for the VDDD power supply.
+
+required:
+ - compatible
+ - reg
+ - '#sound-dai-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ audio-codec@34 {
+ compatible = "nxp,tfa9895";
+ reg = <0x34>;
+ sound-name-prefix = "Speaker Left";
+ #sound-dai-cells = <0>;
+ };
+ audio-codec@36 {
+ compatible = "nxp,tfa9895";
+ reg = <0x36>;
+ sound-name-prefix = "Speaker Right";
+ #sound-dai-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml b/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
index e8f716b5f875..9b225dbf8b79 100644
--- a/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
@@ -77,6 +77,31 @@ properties:
minimum: 1800000
maximum: 2850000
+ qcom,hphl-jack-type-normally-closed:
+ description: Indicates that HPHL jack switch type is normally closed
+ type: boolean
+
+ qcom,ground-jack-type-normally-closed:
+ description: Indicates that Headset Ground switch type is normally closed
+ type: boolean
+
+ qcom,mbhc-headset-vthreshold-microvolt:
+ description: Voltage threshold value for headset detection
+ minimum: 0
+ maximum: 2850000
+
+ qcom,mbhc-headphone-vthreshold-microvolt:
+ description: Voltage threshold value for headphone detection
+ minimum: 0
+ maximum: 2850000
+
+ qcom,mbhc-buttons-vthreshold-microvolt:
+ description:
+ Array of 8 Voltage threshold values corresponding to headset
+ button0 - button7
+ minItems: 8
+ maxItems: 8
+
clock-output-names:
const: mclk
@@ -159,6 +184,11 @@ examples:
qcom,micbias2-microvolt = <1800000>;
qcom,micbias3-microvolt = <1800000>;
qcom,micbias4-microvolt = <1800000>;
+ qcom,hphl-jack-type-normally-closed;
+ qcom,ground-jack-type-normally-closed;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
clock-names = "extclk";
clocks = <&rpmhcc 2>;
diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd938x-sdw.yaml b/Documentation/devicetree/bindings/sound/qcom,wcd938x-sdw.yaml
new file mode 100644
index 000000000000..49a267b306f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,wcd938x-sdw.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/qcom,wcd938x-sdw.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bindings for Qualcomm SoundWire Slave devices on WCD9380/WCD9385
+
+maintainers:
+ - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+description: |
+ Qualcomm WCD9380/WCD9385 Codec is a standalone Hi-Fi audio codec IC.
+ It has RX and TX Soundwire slave devices. This bindings is for the
+ slave devices.
+
+properties:
+ compatible:
+ const: sdw20217010d00
+
+ reg:
+ maxItems: 1
+
+ qcom,tx-port-mapping:
+ description: |
+ Specifies static port mapping between slave and master tx ports.
+ In the order of slave port index.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 4
+ maxItems: 4
+
+ qcom,rx-port-mapping:
+ description: |
+ Specifies static port mapping between slave and master rx ports.
+ In the order of slave port index.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 5
+ maxItems: 5
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ soundwire@3210000 {
+ #address-cells = <2>;
+ #size-cells = <0>;
+ reg = <0x03210000 0x2000>;
+ wcd938x_rx: codec@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+ };
+
+ soundwire@3230000 {
+ #address-cells = <2>;
+ #size-cells = <0>;
+ reg = <0x03230000 0x2000>;
+ wcd938x_tx: codec@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <2 3 4 5>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml b/Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml
new file mode 100644
index 000000000000..cb74ce40c2e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,wcd938x.yaml
@@ -0,0 +1,146 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/qcom,wcd938x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bindings for Qualcomm WCD9380/WCD9385 Audio Codec
+
+maintainers:
+ - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+description: |
+ Qualcomm WCD9380/WCD9385 Codec is a standalone Hi-Fi audio codec IC.
+ It has RX and TX Soundwire slave devices.
+
+properties:
+ compatible:
+ enum:
+ - qcom,wcd9380-codec
+ - qcom,wcd9385-codec
+
+ reset-gpios:
+ description: GPIO spec for reset line to use
+ maxItems: 1
+
+ vdd-buck-supply:
+ description: A reference to the 1.8V buck supply
+
+ vdd-rxtx-supply:
+ description: A reference to the 1.8V rx supply
+
+ vdd-io-supply:
+ description: A reference to the 1.8V I/O supply
+
+ qcom,tx-device:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: A reference to Soundwire tx device phandle
+
+ qcom,rx-device:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: A reference to Soundwire rx device phandle
+
+ qcom,micbias1-microvolt:
+ description: micbias1 voltage
+ minimum: 1800000
+ maximum: 2850000
+
+ qcom,micbias2-microvolt:
+ description: micbias2 voltage
+ minimum: 1800000
+ maximum: 2850000
+
+ qcom,micbias3-microvolt:
+ description: micbias3 voltage
+ minimum: 1800000
+ maximum: 2850000
+
+ qcom,micbias4-microvolt:
+ description: micbias4 voltage
+ minimum: 1800000
+ maximum: 2850000
+
+ qcom,hphl-jack-type-normally-closed:
+ description: Indicates that HPHL jack switch type is normally closed
+ type: boolean
+
+ qcom,ground-jack-type-normally-closed:
+ description: Indicates that Headset Ground switch type is normally closed
+ type: boolean
+
+ qcom,mbhc-headset-vthreshold-microvolt:
+ description: Voltage threshold value for headset detection
+ minimum: 0
+ maximum: 2850000
+
+ qcom,mbhc-headphone-vthreshold-microvolt:
+ description: Voltage threshold value for headphone detection
+ minimum: 0
+ maximum: 2850000
+
+ qcom,mbhc-buttons-vthreshold-microvolt:
+ description:
+ Array of 8 Voltage threshold values corresponding to headset
+ button0 - button7
+ minItems: 8
+ maxItems: 8
+
+ '#sound-dai-cells':
+ const: 1
+
+required:
+ - compatible
+ - reset-gpios
+ - qcom,tx-device
+ - qcom,rx-device
+ - qcom,micbias1-microvolt
+ - qcom,micbias2-microvolt
+ - qcom,micbias3-microvolt
+ - qcom,micbias4-microvolt
+ - "#sound-dai-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ codec {
+ compatible = "qcom,wcd9380-codec";
+ reset-gpios = <&tlmm 32 0>;
+ #sound-dai-cells = <1>;
+ qcom,tx-device = <&wcd938x_tx>;
+ qcom,rx-device = <&wcd938x_rx>;
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,hphl-jack-type-normally-closed;
+ qcom,ground-jack-type-normally-closed;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ };
+
+ /* ... */
+
+ soundwire@3210000 {
+ #address-cells = <2>;
+ #size-cells = <0>;
+ reg = <0x03210000 0x2000>;
+ wcd938x_rx: codec@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+ };
+
+ soundwire@3230000 {
+ #address-cells = <2>;
+ #size-cells = <0>;
+ reg = <0x03230000 0x2000>;
+ wcd938x_tx: codec@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <2 3 4 5>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
index 605de3a5847f..c2930d65728e 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
@@ -86,9 +86,11 @@ properties:
power-domains: true
resets:
+ minItems: 1
maxItems: 11
reset-names:
+ minItems: 1
maxItems: 11
clocks:
@@ -110,6 +112,13 @@ properties:
- pattern: '^dvc\.[0-1]$'
- pattern: '^clk_(a|b|c|i)$'
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ patternProperties:
+ port(@[0-9a-f]+)?:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
port:
$ref: audio-graph-port.yaml#
unevaluatedProperties: false
@@ -257,7 +266,6 @@ required:
- "#sound-dai-cells"
allOf:
- - $ref: audio-graph.yaml#
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.yaml b/Documentation/devicetree/bindings/sound/sgtl5000.yaml
index 70b4a8831073..e762c320b574 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.yaml
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.yaml
@@ -75,6 +75,10 @@ properties:
$ref: "/schemas/types.yaml#/definitions/uint32"
enum: [ 0, 1, 2, 3 ]
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.yaml b/Documentation/devicetree/bindings/sound/st,stm32-sai.yaml
index f2443b651282..06e83461705c 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-sai.yaml
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.yaml
@@ -26,7 +26,6 @@ properties:
- description: Base address and size of SAI common register set.
- description: Base address and size of SAI identification register set.
minItems: 1
- maxItems: 2
ranges:
maxItems: 1
@@ -81,14 +80,12 @@ patternProperties:
- description: sai_ck clock feeding the internal clock generator.
- description: MCLK clock from a SAI set as master clock provider.
minItems: 1
- maxItems: 2
clock-names:
items:
- const: sai_ck
- const: MCLK
minItems: 1
- maxItems: 2
dmas:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt b/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt
index ca75890f0d07..f59125bc79d1 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt
@@ -6,6 +6,7 @@ Required properties:
- compatible - "string" - One of:
"ti,tlv320aic32x4" TLV320AIC3204
"ti,tlv320aic32x6" TLV320AIC3206, TLV320AIC3256
+ "ti,tas2505" TAS2505, TAS2521
- reg: I2C slave address
- supply-*: Required supply regulators are:
"iov" - digital IO power supply
diff --git a/Documentation/devicetree/bindings/sound/wm8750.txt b/Documentation/devicetree/bindings/sound/wm8750.txt
deleted file mode 100644
index 682f221f6f38..000000000000
--- a/Documentation/devicetree/bindings/sound/wm8750.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-WM8750 and WM8987 audio CODECs
-
-These devices support both I2C and SPI (configured with pin strapping
-on the board).
-
-Required properties:
-
- - compatible : "wlf,wm8750" or "wlf,wm8987"
-
- - reg : the I2C address of the device for I2C, the chip select
- number for SPI.
-
-Example:
-
-wm8750: codec@1a {
- compatible = "wlf,wm8750";
- reg = <0x1a>;
-};
diff --git a/Documentation/devicetree/bindings/sound/wm8750.yaml b/Documentation/devicetree/bindings/sound/wm8750.yaml
new file mode 100644
index 000000000000..24246ac7bbdf
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/wm8750.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/wm8750.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: WM8750 and WM8987 audio CODECs
+
+description: |
+ These devices support both I2C and SPI (configured with pin strapping
+ on the board).
+
+maintainers:
+ - Mark Brown <broonie@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - wlf,wm8750
+ - wlf,wm8987
+
+ reg:
+ description:
+ The I2C address of the device for I2C, the chip select number for SPI
+ maxItems: 1
+
+additionalProperties: false
+
+required:
+ - reg
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@1a {
+ compatible = "wlf,wm8750";
+ reg = <0x1a>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml b/Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml
index e3fb553d9180..4d46c49ec32b 100644
--- a/Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml
+++ b/Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml
@@ -35,7 +35,6 @@ properties:
clocks:
minItems: 1
- maxItems: 2
items:
- description: controller register bus clock
- description: baud rate generator and delay control clock
diff --git a/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.yaml b/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.yaml
index 6ee19d49fd3c..ec5873919170 100644
--- a/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.yaml
+++ b/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.yaml
@@ -56,7 +56,6 @@ properties:
reg-names:
minItems: 1
- maxItems: 5
items:
- const: mspi
- const: bspi
@@ -71,7 +70,6 @@ properties:
interrupt-names:
oneOf:
- minItems: 1
- maxItems: 7
items:
- const: mspi_done
- const: mspi_halted
diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml
index faef4f6f55b8..8246891602e7 100644
--- a/Documentation/devicetree/bindings/spi/spi-controller.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml
@@ -79,22 +79,7 @@ properties:
description:
The SPI controller acts as a slave, instead of a master.
-allOf:
- - if:
- not:
- required:
- - spi-slave
- then:
- properties:
- "#address-cells":
- const: 1
- else:
- properties:
- "#address-cells":
- const: 0
-
-patternProperties:
- "^slave$":
+ slave:
type: object
properties:
@@ -105,6 +90,7 @@ patternProperties:
required:
- compatible
+patternProperties:
"^.*@[0-9a-f]+$":
type: object
@@ -180,6 +166,20 @@ patternProperties:
- compatible
- reg
+allOf:
+ - if:
+ not:
+ required:
+ - spi-slave
+ then:
+ properties:
+ "#address-cells":
+ const: 1
+ else:
+ properties:
+ "#address-cells":
+ const: 0
+
additionalProperties: true
examples:
diff --git a/Documentation/devicetree/bindings/spi/spi-davinci.txt b/Documentation/devicetree/bindings/spi/spi-davinci.txt
index e2198a389484..200c7fc7b089 100644
--- a/Documentation/devicetree/bindings/spi/spi-davinci.txt
+++ b/Documentation/devicetree/bindings/spi/spi-davinci.txt
@@ -25,7 +25,7 @@ Required properties:
- interrupts: interrupt number mapped to CPU.
- clocks: spi clk phandle
For 66AK2G this property should be set per binding,
- Documentation/devicetree/bindings/clock/ti,sci-clk.txt
+ Documentation/devicetree/bindings/clock/ti,sci-clk.yaml
SoC-specific Required Properties:
diff --git a/drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml b/Documentation/devicetree/bindings/spmi/hisilicon,hisi-spmi-controller.yaml
index 6b755039a74c..f882903769f9 100644
--- a/drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml
+++ b/Documentation/devicetree/bindings/spmi/hisilicon,hisi-spmi-controller.yaml
@@ -14,7 +14,7 @@ description: |
It is a MIPI System Power Management (SPMI) controller.
The PMIC part is provided by
- drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml.
+ ./Documentation/devicetree/bindings/mfd/hisilicon,hi6421-spmi-pmic.yaml.
allOf:
- $ref: spmi.yaml#
@@ -30,7 +30,7 @@ properties:
reg:
maxItems: 1
- spmi-channel:
+ hisilicon,spmi-channel:
$ref: /schemas/types.yaml#/definitions/uint32
description: |
number of the Kirin 970 SPMI channel where the SPMI devices are connected.
@@ -38,10 +38,12 @@ properties:
required:
- compatible
- reg
- - spmi-channel
+ - hisilicon,spmi-channel
patternProperties:
"@[0-9a-f]$":
+ type: object
+
description: |
PMIC properties, which are specific to the used SPMI PMIC device(s).
When used in combination with HiSilicon 6421v600, the properties
@@ -61,7 +63,7 @@ examples:
#address-cells = <2>;
#size-cells = <0>;
reg = <0x0 0xfff24000 0x0 0x1000>;
- spmi-channel = <2>;
+ hisilicon,spmi-channel = <2>;
pmic@0 {
reg = <0 0>;
diff --git a/Documentation/devicetree/bindings/spmi/spmi.yaml b/Documentation/devicetree/bindings/spmi/spmi.yaml
index 173940930719..1d243faef2f8 100644
--- a/Documentation/devicetree/bindings/spmi/spmi.yaml
+++ b/Documentation/devicetree/bindings/spmi/spmi.yaml
@@ -40,14 +40,15 @@ patternProperties:
properties:
reg:
- minItems: 1
- maxItems: 2
items:
- - minimum: 0
- maximum: 0xf
- - enum: [ 0 ]
- description: |
- 0 means user ID address. 1 is reserved for group ID address.
+ - minItems: 1
+ items:
+ - minimum: 0
+ maximum: 0xf
+ - enum: [ 0 ]
+ description:
+ 0 means user ID address. 1 is reserved for group ID
+ address.
required:
- reg
diff --git a/Documentation/devicetree/bindings/sram/sram.yaml b/Documentation/devicetree/bindings/sram/sram.yaml
index c1a5afa73cfe..3eda5049d183 100644
--- a/Documentation/devicetree/bindings/sram/sram.yaml
+++ b/Documentation/devicetree/bindings/sram/sram.yaml
@@ -28,6 +28,8 @@ properties:
contains:
enum:
- mmio-sram
+ - amlogic,meson-gxbb-sram
+ - arm,juno-sram-ns
- atmel,sama5d2-securam
- rockchip,rk3288-pmu-sram
@@ -80,6 +82,9 @@ patternProperties:
- amlogic,meson8b-smp-sram
- amlogic,meson-gxbb-scp-shmem
- amlogic,meson-axg-scp-shmem
+ - arm,juno-scp-shmem
+ - arm,scmi-shmem
+ - arm,scp-shmem
- renesas,smp-sram
- rockchip,rk3066-smp-sram
- samsung,exynos4210-sysram
diff --git a/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml b/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
index bf97d1fb33e7..6e0b110153b0 100644
--- a/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
+++ b/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
@@ -23,14 +23,12 @@ properties:
clocks:
minItems: 1
- maxItems: 2
items:
- description: Bus Clock
- description: Module Clock
clock-names:
minItems: 1
- maxItems: 2
items:
- const: bus
- const: mod
diff --git a/Documentation/devicetree/bindings/thermal/nvidia,tegra30-tsensor.yaml b/Documentation/devicetree/bindings/thermal/nvidia,tegra30-tsensor.yaml
new file mode 100644
index 000000000000..a35da257b070
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/nvidia,tegra30-tsensor.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/nvidia,tegra30-tsensor.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra30 Thermal Sensor
+
+maintainers:
+ - Dmitry Osipenko <digetx@gmail.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+description: |
+ TSENSOR provides thermal and voltage sensors which monitor temperature
+ and voltage of the chip. Sensors are placed across the die to gauge the
+ temperature of the whole chip. The TSENSOR module:
+
+ Generates an interrupt to SW to lower temperature via DVFS on reaching
+ a certain thermal/voltage threshold.
+
+ Generates a signal to the CAR to reduce CPU frequency by half on reaching
+ a certain thermal/voltage threshold.
+
+ Generates a signal to the PMC when the temperature reaches dangerously high
+ levels to reset the chip and sets a flag in the PMC.
+
+ TSENSOR has two channels which monitor two different spots of the SoC.
+
+properties:
+ compatible:
+ const: nvidia,tegra30-tsensor
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#thermal-sensor-cells":
+ const: 1
+
+ assigned-clock-parents: true
+ assigned-clock-rates: true
+ assigned-clocks: true
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+ - interrupts
+ - "#thermal-sensor-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ thermal-sensor@70014000 {
+ compatible = "nvidia,tegra30-tsensor";
+ reg = <0x70014000 0x500>;
+ interrupts = <0 102 4>;
+ clocks = <&clk 100>;
+ resets = <&rst 100>;
+
+ #thermal-sensor-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
index 0242fd91b622..4a2eaf28e3fd 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
@@ -46,6 +46,8 @@ properties:
- qcom,msm8996-tsens
- qcom,msm8998-tsens
- qcom,sc7180-tsens
+ - qcom,sc7280-tsens
+ - qcom,sc8180x-tsens
- qcom,sdm845-tsens
- qcom,sm8150-tsens
- qcom,sm8250-tsens
@@ -77,7 +79,6 @@ properties:
nvmem-cell-names:
minItems: 1
- maxItems: 2
items:
- const: calib
- enum:
diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
deleted file mode 100644
index 7f94669e9ebe..000000000000
--- a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
+++ /dev/null
@@ -1,85 +0,0 @@
-* Temperature Sensor ADC (TSADC) on rockchip SoCs
-
-Required properties:
-- compatible : should be "rockchip,<name>-tsadc"
- "rockchip,px30-tsadc": found on PX30 SoCs
- "rockchip,rv1108-tsadc": found on RV1108 SoCs
- "rockchip,rk3228-tsadc": found on RK3228 SoCs
- "rockchip,rk3288-tsadc": found on RK3288 SoCs
- "rockchip,rk3328-tsadc": found on RK3328 SoCs
- "rockchip,rk3368-tsadc": found on RK3368 SoCs
- "rockchip,rk3399-tsadc": found on RK3399 SoCs
-- reg : physical base address of the controller and length of memory mapped
- region.
-- interrupts : The interrupt number to the cpu. The interrupt specifier format
- depends on the interrupt controller.
-- clocks : Must contain an entry for each entry in clock-names.
-- clock-names : Shall be "tsadc" for the converter-clock, and "apb_pclk" for
- the peripheral clock.
-- resets : Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
-- reset-names : Must include the name "tsadc-apb".
-- pinctrl-names : The pin control state names;
-- pinctrl-0 : The "init" pinctrl state, it will be set before device probe.
-- pinctrl-1 : The "default" pinctrl state, it will be set after reset the
- TSADC controller.
-- pinctrl-2 : The "sleep" pinctrl state, it will be in for suspend.
-- #thermal-sensor-cells : Should be 1. See Documentation/devicetree/bindings/thermal/thermal-sensor.yaml for a description.
-
-Optional properties:
-- rockchip,hw-tshut-temp : The hardware-controlled shutdown temperature value.
-- rockchip,hw-tshut-mode : The hardware-controlled shutdown mode 0:CRU 1:GPIO.
-- rockchip,hw-tshut-polarity : The hardware-controlled active polarity 0:LOW
- 1:HIGH.
-- rockchip,grf : The phandle of the syscon node for the general register file.
-
-Exiample:
-tsadc: tsadc@ff280000 {
- compatible = "rockchip,rk3288-tsadc";
- reg = <0xff280000 0x100>;
- interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
- clock-names = "tsadc", "apb_pclk";
- resets = <&cru SRST_TSADC>;
- reset-names = "tsadc-apb";
- pinctrl-names = "init", "default", "sleep";
- pinctrl-0 = <&otp_gpio>;
- pinctrl-1 = <&otp_out>;
- pinctrl-2 = <&otp_gpio>;
- #thermal-sensor-cells = <1>;
- rockchip,hw-tshut-temp = <95000>;
- rockchip,hw-tshut-mode = <0>;
- rockchip,hw-tshut-polarity = <0>;
-};
-
-Example: referring to thermal sensors:
-thermal-zones {
- cpu_thermal: cpu_thermal {
- polling-delay-passive = <1000>; /* milliseconds */
- polling-delay = <5000>; /* milliseconds */
-
- /* sensor ID */
- thermal-sensors = <&tsadc 1>;
-
- trips {
- cpu_alert0: cpu_alert {
- temperature = <70000>; /* millicelsius */
- hysteresis = <2000>; /* millicelsius */
- type = "passive";
- };
- cpu_crit: cpu_crit {
- temperature = <90000>; /* millicelsius */
- hysteresis = <2000>; /* millicelsius */
- type = "critical";
- };
- };
-
- cooling-maps {
- map0 {
- trip = <&cpu_alert0>;
- cooling-device =
- <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
- };
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.yaml b/Documentation/devicetree/bindings/thermal/rockchip-thermal.yaml
new file mode 100644
index 000000000000..b96ea277b558
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.yaml
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/rockchip-thermal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Temperature Sensor ADC (TSADC) on Rockchip SoCs
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ enum:
+ - rockchip,px30-tsadc # PX30 SoCs
+ - rockchip,rv1108-tsadc # RV1108 SoCs
+ - rockchip,rk3228-tsadc # RK3228 SoCs
+ - rockchip,rk3288-tsadc # RK3288 SoCs
+ - rockchip,rk3328-tsadc # RK3328 SoCs
+ - rockchip,rk3368-tsadc # RK3368 SoCs
+ - rockchip,rk3399-tsadc # RK3399 SoCs
+ - rockchip,rk3568-tsadc # RK3568 SoCs
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: tsadc
+ - const: apb_pclk
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ items:
+ - const: tsadc-apb
+
+ "#thermal-sensor-cells":
+ const: 1
+
+ rockchip,grf:
+ description: The phandle of the syscon node for the general register file.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ rockchip,hw-tshut-temp:
+ description: The hardware-controlled shutdown temperature value.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ rockchip,hw-tshut-mode:
+ description: The hardware-controlled shutdown mode 0:CRU 1:GPIO.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+
+ rockchip,hw-tshut-polarity:
+ description: The hardware-controlled active polarity 0:LOW 1:HIGH.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - "#thermal-sensor-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/rk3288-cru.h>
+
+ tsadc: tsadc@ff280000 {
+ compatible = "rockchip,rk3288-tsadc";
+ reg = <0xff280000 0x100>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
+ clock-names = "tsadc", "apb_pclk";
+ resets = <&cru SRST_TSADC>;
+ reset-names = "tsadc-apb";
+ #thermal-sensor-cells = <1>;
+ rockchip,hw-tshut-temp = <95000>;
+ rockchip,hw-tshut-mode = <0>;
+ rockchip,hw-tshut-polarity = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml b/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
index 1c7cf32e7ac2..53fd24bdc34e 100644
--- a/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
@@ -12,11 +12,18 @@ maintainers:
properties:
compatible:
- enum:
- - allwinner,sun4i-a10-timer
- - allwinner,sun8i-a23-timer
- - allwinner,sun8i-v3s-timer
- - allwinner,suniv-f1c100s-timer
+ oneOf:
+ - enum:
+ - allwinner,sun4i-a10-timer
+ - allwinner,sun8i-a23-timer
+ - allwinner,sun8i-v3s-timer
+ - allwinner,suniv-f1c100s-timer
+ - items:
+ - enum:
+ - allwinner,sun50i-a64-timer
+ - allwinner,sun50i-h6-timer
+ - allwinner,sun50i-h616-timer
+ - const: allwinner,sun8i-a23-timer
reg:
maxItems: 1
@@ -34,8 +41,8 @@ allOf:
- if:
properties:
compatible:
- items:
- const: allwinner,sun4i-a10-timer
+ enum:
+ - allwinner,sun4i-a10-timer
then:
properties:
@@ -46,8 +53,8 @@ allOf:
- if:
properties:
compatible:
- items:
- const: allwinner,sun8i-a23-timer
+ enum:
+ - allwinner,sun8i-a23-timer
then:
properties:
@@ -58,20 +65,9 @@ allOf:
- if:
properties:
compatible:
- items:
- const: allwinner,sun8i-v3s-timer
-
- then:
- properties:
- interrupts:
- minItems: 3
- maxItems: 3
-
- - if:
- properties:
- compatible:
- items:
- const: allwinner,suniv-f1c100s-timer
+ enum:
+ - allwinner,sun8i-v3s-timer
+ - allwinner,suniv-f1c100s-timer
then:
properties:
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml
index b6a6d03a08b2..2ecac754e1cd 100644
--- a/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml
+++ b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml
@@ -24,7 +24,6 @@ properties:
interrupts:
minItems: 2
- maxItems: 4
items:
- description: Timer 0 Interrupt
- description: Timer 1 Interrupt
diff --git a/Documentation/devicetree/bindings/timer/arm,arch_timer.yaml b/Documentation/devicetree/bindings/timer/arm,arch_timer.yaml
index 7f5e3af58255..df8ce87fd54b 100644
--- a/Documentation/devicetree/bindings/timer/arm,arch_timer.yaml
+++ b/Documentation/devicetree/bindings/timer/arm,arch_timer.yaml
@@ -35,7 +35,6 @@ properties:
interrupts:
minItems: 1
- maxItems: 5
items:
- description: secure timer irq
- description: non-secure timer irq
diff --git a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
index d83a1f97f911..cd2176cad53a 100644
--- a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
+++ b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
@@ -71,14 +71,12 @@ patternProperties:
interrupts:
minItems: 1
- maxItems: 2
items:
- description: physical timer irq
- description: virtual timer irq
reg:
minItems: 1
- maxItems: 2
items:
- description: 1st view base address
- description: 2nd optional view base address
diff --git a/Documentation/devicetree/bindings/timer/arm,twd-timer.yaml b/Documentation/devicetree/bindings/timer/arm,twd-timer.yaml
new file mode 100644
index 000000000000..5684df6448ef
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/arm,twd-timer.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/arm,twd-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Timer-Watchdog Timer
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+description:
+ ARM 11MP, Cortex-A5 and Cortex-A9 are often associated with a per-core
+ Timer-Watchdog (aka TWD), which provides both a per-cpu local timer
+ and watchdog.
+
+ The TWD is usually attached to a GIC to deliver its two per-processor
+ interrupts.
+
+properties:
+ compatible:
+ enum:
+ - arm,cortex-a9-twd-timer
+ - arm,cortex-a5-twd-timer
+ - arm,arm11mp-twd-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ always-on:
+ description:
+ If present, the timer is powered through an always-on power domain,
+ therefore it never loses context.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ timer@2c000600 {
+ compatible = "arm,arm11mp-twd-timer";
+ reg = <0x2c000600 0x20>;
+ interrupts = <GIC_PPI 13 0xf01>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/arm,twd.txt b/Documentation/devicetree/bindings/timer/arm,twd.txt
deleted file mode 100644
index 383ea19c2bf0..000000000000
--- a/Documentation/devicetree/bindings/timer/arm,twd.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-* ARM Timer Watchdog
-
-ARM 11MP, Cortex-A5 and Cortex-A9 are often associated with a per-core
-Timer-Watchdog (aka TWD), which provides both a per-cpu local timer
-and watchdog.
-
-The TWD is usually attached to a GIC to deliver its two per-processor
-interrupts.
-
-** Timer node required properties:
-
-- compatible : Should be one of:
- "arm,cortex-a9-twd-timer"
- "arm,cortex-a5-twd-timer"
- "arm,arm11mp-twd-timer"
-
-- interrupts : One interrupt to each core
-
-- reg : Specify the base address and the size of the TWD timer
- register window.
-
-Optional
-
-- always-on : a boolean property. If present, the timer is powered through
- an always-on power domain, therefore it never loses context.
-
-Example:
-
- twd-timer@2c000600 {
- compatible = "arm,arm11mp-twd-timer"";
- reg = <0x2c000600 0x20>;
- interrupts = <1 13 0xf01>;
- };
-
-** Watchdog node properties:
-
-- compatible : Should be one of:
- "arm,cortex-a9-twd-wdt"
- "arm,cortex-a5-twd-wdt"
- "arm,arm11mp-twd-wdt"
-
-- interrupts : One interrupt to each core
-
-- reg : Specify the base address and the size of the TWD watchdog
- register window.
-
-Example:
-
- twd-watchdog@2c000620 {
- compatible = "arm,arm11mp-twd-wdt";
- reg = <0x2c000620 0x20>;
- interrupts = <1 14 0xf01>;
- };
diff --git a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
index a8de99b0c0f9..f32575d4b5aa 100644
--- a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
@@ -22,7 +22,6 @@ properties:
interrupts:
minItems: 1
- maxItems: 2
items:
- description: Timer 1 interrupt
- description: Timer 2 interrupt
diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.yaml b/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
index f0f0f121c355..c57169118b68 100644
--- a/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
@@ -36,6 +36,7 @@ properties:
- renesas,tmu-r8a77980 # R-Car V3H
- renesas,tmu-r8a77990 # R-Car E3
- renesas,tmu-r8a77995 # R-Car D3
+ - renesas,tmu-r8a779a0 # R-Car V3U
- const: renesas,tmu
reg:
diff --git a/Documentation/devicetree/bindings/timer/renesas,tpu.txt b/Documentation/devicetree/bindings/timer/renesas,tpu.txt
deleted file mode 100644
index 1d46f9de4feb..000000000000
--- a/Documentation/devicetree/bindings/timer/renesas,tpu.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-* Renesas H8/300 Timer Pulse Unit
-
-The TPU is a 16bit timer/counter with configurable clock inputs and
-programmable compare match.
-This implementation support only cascade mode.
-
-Required Properties:
-
- - compatible: must contain "renesas,tpu"
- - reg: base address and length of the registers block in 2 channel.
- - clocks: a list of phandle, one for each entry in clock-names.
- - clock-names: must contain "peripheral_clk" for the functional clock.
-
-
-Example:
- tpu: tpu@ffffe0 {
- compatible = "renesas,tpu";
- reg = <0xffffe0 16>, <0xfffff0 12>;
- clocks = <&pclk>;
- clock-names = "peripheral_clk";
- };
diff --git a/Documentation/devicetree/bindings/timer/renesas,tpu.yaml b/Documentation/devicetree/bindings/timer/renesas,tpu.yaml
new file mode 100644
index 000000000000..01554dff23d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/renesas,tpu.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/renesas,tpu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas H8/300 Timer Pulse Unit
+
+maintainers:
+ - Yoshinori Sato <ysato@users.sourceforge.jp>
+
+description:
+ The TPU is a 16bit timer/counter with configurable clock inputs and
+ programmable compare match.
+ This implementation supports only cascade mode.
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: renesas,tpu
+ '#pwm-cells': false
+ required:
+ - compatible
+
+properties:
+ compatible:
+ const: renesas,tpu
+
+ reg:
+ items:
+ - description: First channel
+ - description: Second channel
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: fck
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ tpu: tpu@ffffe0 {
+ compatible = "renesas,tpu";
+ reg = <0xffffe0 16>, <0xfffff0 12>;
+ clocks = <&pclk>;
+ clock-names = "fck";
+ };
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 37ac0a3ae3b4..919a4bf03a5a 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -177,8 +177,14 @@ properties:
- meas,tsys01
# MEMSIC magnetometer
- memsic,mmc35240
+ # MEMSIC 3-axis accelerometer
+ - memsic,mx4005
# MEMSIC 2-axis 8-bit digital accelerometer
- memsic,mxc6225
+ # MEMSIC 2-axis 8-bit digital accelerometer
+ - memsic,mxc6255
+ # MEMSIC 3-axis accelerometer
+ - memsic,mxc6655
# Microchip differential I2C ADC, 1 Channel, 18 bit
- microchip,mcp3421
# Microchip differential I2C ADC, 2 Channel, 18 bit
@@ -263,6 +269,10 @@ properties:
- sensirion,sgpc3
# Sensirion multi-pixel gas sensor with I2C interface
- sensirion,sgp30
+ # Sensortek 3 axis accelerometer
+ - sensortek,stk8312
+ # Sensortek 3 axis accelerometer
+ - sensortek,stk8ba50
# SGX Sensortech VZ89X Sensors
- sgx,vz89x
# Relative Humidity and Temperature Sensors
@@ -272,6 +282,8 @@ properties:
# Socionext SynQuacer TPM MMIO module
- socionext,synquacer-tpm-mmio
# i2c serial eeprom (24cxx)
+ - sparkfun,qwiic-joystick
+ # SparkFun Qwiic Joystick (COM-15168) with i2c interface
- st,24c256
# Ambient Light Sensor with SMBUS/Two Wire Serial Interface
- taos,tsl2550
diff --git a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml
index 0f520f17735e..933fa356d2ce 100644
--- a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml
+++ b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml
@@ -22,6 +22,9 @@ properties:
- allwinner,sun8i-a83t-musb
- allwinner,sun50i-h6-musb
- const: allwinner,sun8i-a33-musb
+ - items:
+ - const: allwinner,sun50i-h616-musb
+ - const: allwinner,sun8i-h3-musb
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml
index a407e1143cf4..dc9d6ed0781d 100644
--- a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml
+++ b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml
@@ -28,9 +28,9 @@ properties:
interrupts:
minItems: 3
items:
- - description: OTG/DRD controller interrupt
- description: XHCI host controller interrupt
- description: Device controller interrupt
+ - description: OTG/DRD controller interrupt
- description: interrupt used to wake up core, e.g when usbcmd.rs is
cleared by xhci core, this interrupt is optional
@@ -75,6 +75,7 @@ required:
- reg
- reg-names
- interrupts
+ - interrupt-names
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/dwc2.yaml b/Documentation/devicetree/bindings/usb/dwc2.yaml
index e5ee51b7b470..10c7d9b6cc53 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc2.yaml
@@ -24,6 +24,7 @@ properties:
- rockchip,rk3188-usb
- rockchip,rk3228-usb
- rockchip,rk3288-usb
+ - rockchip,rk3308-usb
- rockchip,rk3328-usb
- rockchip,rk3368-usb
- rockchip,rv1108-usb
diff --git a/Documentation/devicetree/bindings/usb/maxim,max3420-udc.yaml b/Documentation/devicetree/bindings/usb/maxim,max3420-udc.yaml
index 4241d38d5864..1d893d3d3432 100644
--- a/Documentation/devicetree/bindings/usb/maxim,max3420-udc.yaml
+++ b/Documentation/devicetree/bindings/usb/maxim,max3420-udc.yaml
@@ -30,14 +30,12 @@ properties:
- description: usb irq from max3420
- description: vbus detection irq
minItems: 1
- maxItems: 2
interrupt-names:
items:
- const: udc
- const: vbus
minItems: 1
- maxItems: 2
spi-max-frequency:
maximum: 26000000
diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml b/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml
index e60e590dbe12..8428415896ce 100644
--- a/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml
+++ b/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml
@@ -25,7 +25,6 @@ properties:
reg:
minItems: 2
- maxItems: 3
items:
- description: XUSB device controller registers
- description: XUSB device PCI Config registers
@@ -33,7 +32,6 @@ properties:
reg-names:
minItems: 2
- maxItems: 3
items:
- const: base
- const: fpci
@@ -45,7 +43,6 @@ properties:
clocks:
minItems: 4
- maxItems: 5
items:
- description: Clock to enable core XUSB dev clock.
- description: Clock to enable XUSB super speed clock.
@@ -55,7 +52,6 @@ properties:
clock-names:
minItems: 4
- maxItems: 5
items:
- const: dev
- const: ss
diff --git a/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml b/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
new file mode 100644
index 000000000000..f238848ad094
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/nxp,isp1760.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP ISP1760 family controller bindings
+
+maintainers:
+ - Sebastian Siewior <bigeasy@linutronix.de>
+ - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+description: |
+ NXP ISP1760 family, which includes ISP1760/1761/1763 devicetree controller
+ bindings
+
+properties:
+ compatible:
+ enum:
+ - nxp,usb-isp1760
+ - nxp,usb-isp1761
+ - nxp,usb-isp1763
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ items:
+ - description: Host controller interrupt
+ - description: Device controller interrupt in isp1761
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: host
+ - const: peripheral
+
+ bus-width:
+ description:
+ Number of data lines.
+ enum: [8, 16, 32]
+ default: 32
+
+ dr_mode:
+ enum:
+ - host
+ - peripheral
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ usb@40200000 {
+ compatible = "nxp,usb-isp1763";
+ reg = <0x40200000 0x100000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ bus-width = <16>;
+ dr_mode = "host";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index 413299b5fe2b..4e6451789806 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -19,6 +19,8 @@ properties:
- qcom,sc7280-dwc3
- qcom,sdm845-dwc3
- qcom,sdx55-dwc3
+ - qcom,sm4250-dwc3
+ - qcom,sm6115-dwc3
- qcom,sm8150-dwc3
- qcom,sm8250-dwc3
- qcom,sm8350-dwc3
diff --git a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
new file mode 100644
index 000000000000..04ee255eb4f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/realtek,rts5411.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Binding for the Realtek RTS5411 USB 3.0 hub controller
+
+maintainers:
+ - Matthias Kaehlcke <mka@chromium.org>
+
+allOf:
+ - $ref: usb-device.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - usbbda,5411
+ - usbbda,411
+
+ reg: true
+
+ vdd-supply:
+ description:
+ phandle to the regulator that provides power to the hub.
+
+ companion-hub:
+ $ref: '/schemas/types.yaml#/definitions/phandle'
+ description:
+ phandle to the companion hub on the controller.
+
+required:
+ - companion-hub
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ usb {
+ dr_mode = "host";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* 2.0 hub on port 1 */
+ hub_2_0: hub@1 {
+ compatible = "usbbda,5411";
+ reg = <1>;
+ vdd-supply = <&pp3300_hub>;
+ companion-hub = <&hub_3_0>;
+ };
+
+ /* 3.0 hub on port 2 */
+ hub_3_0: hub@2 {
+ compatible = "usbbda,411";
+ reg = <2>;
+ vdd-supply = <&pp3300_hub>;
+ companion-hub = <&hub_2_0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
index e67223d90bb7..ad73339ffe1d 100644
--- a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
+++ b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
@@ -53,7 +53,6 @@ properties:
clocks:
minItems: 1
- maxItems: 3
items:
- description: USB 2.0 host
- description: USB 2.0 peripheral
@@ -86,7 +85,6 @@ properties:
dma-names:
minItems: 2
- maxItems: 4
items:
- const: ch0
- const: ch1
@@ -100,7 +98,6 @@ properties:
resets:
minItems: 1
- maxItems: 2
items:
- description: USB 2.0 host
- description: USB 2.0 peripheral
diff --git a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
index 7ec87a783c5c..a634774c537c 100644
--- a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
@@ -27,7 +27,7 @@ properties:
description:
PM domain provider node and an args specifier containing
the USB device id value. See,
- Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
maxItems: 1
clocks:
diff --git a/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml b/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
index 9a068d3bc73b..f6e91a5fd8fe 100644
--- a/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
@@ -47,7 +47,7 @@ properties:
description: Should contain a phandle to a PM domain provider node
and an args specifier containing the USB device id
value. This property is as per the binding,
- Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
phys:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 9cb628ec960b..07fb0d25fc15 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -249,6 +249,8 @@ patternProperties:
description: Colorful GRP, Shenzhen Xueyushi Technology Ltd.
"^compulab,.*":
description: CompuLab Ltd.
+ "^congatec,.*":
+ description: congatec GmbH
"^coreriver,.*":
description: CORERIVER Semiconductor Co.,Ltd.
"^corpro,.*":
@@ -315,6 +317,8 @@ patternProperties:
description: DPTechnics
"^dragino,.*":
description: Dragino Technology Co., Limited
+ "^ds,.*":
+ description: DaSheng, Inc.
"^dserve,.*":
description: dServe Technology B.V.
"^dynaimage,.*":
@@ -409,6 +413,8 @@ patternProperties:
description: Firefly
"^focaltech,.*":
description: FocalTech Systems Co.,Ltd
+ "^forlinx,.*":
+ description: Baoding Forlinx Embedded Technology Co., Ltd.
"^frida,.*":
description: Shenzhen Frida LCD Co., Ltd.
"^friendlyarm,.*":
@@ -472,7 +478,7 @@ patternProperties:
"^hirschmann,.*":
description: Hirschmann Automation and Control GmbH
"^hisilicon,.*":
- description: Hisilicon Limited.
+ description: HiSilicon Limited.
"^hit,.*":
description: Hitachi Ltd.
"^hitex,.*":
@@ -533,6 +539,8 @@ patternProperties:
description: Innolux Corporation
"^inside-secure,.*":
description: INSIDE Secure
+ "^insignal,.*":
+ description: Insignal Ltd.
"^inspur,.*":
description: Inspur Corporation
"^intel,.*":
@@ -1078,6 +1086,8 @@ patternProperties:
description: Sony Corporation
"^spansion,.*":
description: Spansion Inc.
+ "^sparkfun,.*":
+ description: SparkFun Electronics
"^sprd,.*":
description: Spreadtrum Communications Inc.
"^sst,.*":
@@ -1087,6 +1097,8 @@ patternProperties:
(formerly part of MStar Semiconductor, Inc.)
"^st,.*":
description: STMicroelectronics
+ "^starfive,.*":
+ description: StarFive Technology Co. Ltd.
"^starry,.*":
description: Starry Electronic Technology (ShenZhen) Co., LTD
"^startek,.*":
@@ -1246,6 +1258,8 @@ patternProperties:
description: Western Digital Corp.
"^we,.*":
description: Würth Elektronik GmbH.
+ "^welltech,.*":
+ description: Welltech Computer Co., Limited.
"^wetek,.*":
description: WeTek Electronics, limited.
"^wexler,.*":
diff --git a/Documentation/devicetree/bindings/virtio/mmio.txt b/Documentation/devicetree/bindings/virtio/mmio.txt
deleted file mode 100644
index 0a575f329f6e..000000000000
--- a/Documentation/devicetree/bindings/virtio/mmio.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-* virtio memory mapped device
-
-See https://ozlabs.org/~rusty/virtio-spec/ for more details.
-
-Required properties:
-
-- compatible: "virtio,mmio" compatibility string
-- reg: control registers base address and size including configuration space
-- interrupts: interrupt generated by the device
-
-Required properties for virtio-iommu:
-
-- #iommu-cells: When the node corresponds to a virtio-iommu device, it is
- linked to DMA masters using the "iommus" or "iommu-map"
- properties [1][2]. #iommu-cells specifies the size of the
- "iommus" property. For virtio-iommu #iommu-cells must be
- 1, each cell describing a single endpoint ID.
-
-Optional properties:
-
-- iommus: If the device accesses memory through an IOMMU, it should
- have an "iommus" property [1]. Since virtio-iommu itself
- does not access memory through an IOMMU, the "virtio,mmio"
- node cannot have both an "#iommu-cells" and an "iommus"
- property.
-
-Example:
-
- virtio_block@3000 {
- compatible = "virtio,mmio";
- reg = <0x3000 0x100>;
- interrupts = <41>;
-
- /* Device has endpoint ID 23 */
- iommus = <&viommu 23>
- }
-
- viommu: iommu@3100 {
- compatible = "virtio,mmio";
- reg = <0x3100 0x100>;
- interrupts = <42>;
-
- #iommu-cells = <1>
- }
-
-[1] Documentation/devicetree/bindings/iommu/iommu.txt
-[2] Documentation/devicetree/bindings/pci/pci-iommu.txt
diff --git a/Documentation/devicetree/bindings/virtio/mmio.yaml b/Documentation/devicetree/bindings/virtio/mmio.yaml
new file mode 100644
index 000000000000..d46597028cf1
--- /dev/null
+++ b/Documentation/devicetree/bindings/virtio/mmio.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/virtio/mmio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: virtio memory mapped devices
+
+maintainers:
+ - Jean-Philippe Brucker <jean-philippe@linaro.org>
+
+description:
+ See https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=virtio for
+ more details.
+
+properties:
+ compatible:
+ const: virtio,mmio
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#iommu-cells':
+ description: Required when the node corresponds to a virtio-iommu device.
+ const: 1
+
+ iommus:
+ description: Required for devices making accesses thru an IOMMU.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ virtio@3000 {
+ compatible = "virtio,mmio";
+ reg = <0x3000 0x100>;
+ interrupts = <41>;
+
+ /* Device has endpoint ID 23 */
+ iommus = <&viommu 23>;
+ };
+
+ viommu: iommu@3100 {
+ compatible = "virtio,mmio";
+ reg = <0x3100 0x100>;
+ interrupts = <42>;
+
+ #iommu-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/watchdog/arm,sbsa-gwdt.yaml b/Documentation/devicetree/bindings/watchdog/arm,sbsa-gwdt.yaml
new file mode 100644
index 000000000000..6bfa46353c4e
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/arm,sbsa-gwdt.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/arm,sbsa-gwdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SBSA (Server Base System Architecture) Generic Watchdog
+
+maintainers:
+ - Fu Wei <fu.wei@linaro.org>
+
+description: |
+ The SBSA Generic Watchdog Timer is used to force a reset of the system after
+ two stages of timeout have elapsed. A detailed definition of the watchdog
+ timer can be found in the ARM document: ARM-DEN-0029 - Server Base System
+ Architecture (SBSA)
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ const: arm,sbsa-gwdt
+
+ reg:
+ items:
+ - description: Watchdog control frame
+ - description: Refresh frame
+
+ interrupts:
+ description: The Watchdog Signal 0 (WS0) SPI (Shared Peripheral Interrupt)
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+
+ watchdog@2a440000 {
+ compatible = "arm,sbsa-gwdt";
+ reg = <0x2a440000 0x1000>,
+ <0x2a450000 0x1000>;
+ interrupts = <0 27 4>;
+ timeout-sec = <30>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/watchdog/arm,twd-wdt.yaml b/Documentation/devicetree/bindings/watchdog/arm,twd-wdt.yaml
new file mode 100644
index 000000000000..bb8901854222
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/arm,twd-wdt.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/arm,twd-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Timer-Watchdog Watchdog
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+description:
+ ARM 11MP, Cortex-A5 and Cortex-A9 are often associated with a per-core
+ Timer-Watchdog (aka TWD), which provides both a per-cpu local timer
+ and watchdog.
+
+ The TWD is usually attached to a GIC to deliver its two per-processor
+ interrupts.
+
+properties:
+ compatible:
+ enum:
+ - arm,cortex-a9-twd-wdt
+ - arm,cortex-a5-twd-wdt
+ - arm,arm11mp-twd-wdt
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ watchdog@2c000620 {
+ compatible = "arm,arm11mp-twd-wdt";
+ reg = <0x2c000620 0x20>;
+ interrupts = <GIC_PPI 14 0xf01>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/atmel,sama5d4-wdt.yaml b/Documentation/devicetree/bindings/watchdog/atmel,sama5d4-wdt.yaml
new file mode 100644
index 000000000000..9856cd76c28d
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/atmel,sama5d4-wdt.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/atmel,sama5d4-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel SAMA5D4 Watchdog Timer (WDT) Controller
+
+maintainers:
+ - Eugen Hristev <eugen.hristev@microchip.com>
+
+allOf:
+ - $ref: "watchdog.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - atmel,sama5d4-wdt
+ - microchip,sam9x60-wdt
+ - microchip,sama7g5-wdt
+
+ reg:
+ maxItems: 1
+
+ atmel,watchdog-type:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: should be hardware or software.
+ oneOf:
+ - description:
+ Enable watchdog fault reset. A watchdog fault triggers
+ watchdog reset.
+ const: hardware
+ - description:
+ Enable watchdog fault interrupt. A watchdog fault asserts
+ watchdog interrupt.
+ const: software
+ default: hardware
+
+ atmel,idle-halt:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: |
+ present if you want to stop the watchdog when the CPU is in idle state.
+ CAUTION: This property should be used with care, it actually makes the
+ watchdog not counting when the CPU is in idle state, therefore the
+ watchdog reset time depends on mean CPU usage and will not reset at all
+ if the CPU stop working while it is in idle state, which is probably
+ not what you want.
+
+ atmel,dbg-halt:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: |
+ present if you want to stop the watchdog when the CPU is in debug state.
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ watchdog@fc068640 {
+ compatible = "atmel,sama5d4-wdt";
+ reg = <0xfc068640 0x10>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH 5>;
+ timeout-sec = <10>;
+ atmel,watchdog-type = "hardware";
+ atmel,dbg-halt;
+ atmel,idle-halt;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt
deleted file mode 100644
index 44727fcc2729..000000000000
--- a/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-* Atmel SAMA5D4 Watchdog Timer (WDT) Controller
-
-Required properties:
-- compatible: "atmel,sama5d4-wdt" or "microchip,sam9x60-wdt"
-- reg: base physical address and length of memory mapped region.
-
-Optional properties:
-- timeout-sec: watchdog timeout value (in seconds).
-- interrupts: interrupt number to the CPU.
-- atmel,watchdog-type: should be "hardware" or "software".
- "hardware": enable watchdog fault reset. A watchdog fault triggers
- watchdog reset.
- "software": enable watchdog fault interrupt. A watchdog fault asserts
- watchdog interrupt.
-- atmel,idle-halt: present if you want to stop the watchdog when the CPU is
- in idle state.
- CAUTION: This property should be used with care, it actually makes the
- watchdog not counting when the CPU is in idle state, therefore the
- watchdog reset time depends on mean CPU usage and will not reset at all
- if the CPU stop working while it is in idle state, which is probably
- not what you want.
-- atmel,dbg-halt: present if you want to stop the watchdog when the CPU is
- in debug state.
-
-Example:
- watchdog@fc068640 {
- compatible = "atmel,sama5d4-wdt";
- reg = <0xfc068640 0x10>;
- interrupts = <4 IRQ_TYPE_LEVEL_HIGH 5>;
- timeout-sec = <10>;
- atmel,watchdog-type = "hardware";
- atmel,dbg-halt;
- atmel,idle-halt;
- };
diff --git a/Documentation/devicetree/bindings/watchdog/mstar,msc313e-wdt.yaml b/Documentation/devicetree/bindings/watchdog/mstar,msc313e-wdt.yaml
new file mode 100644
index 000000000000..e3e8b86dbf63
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/mstar,msc313e-wdt.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/mstar,msc313e-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MStar Watchdog Device Tree Bindings
+
+maintainers:
+ - Daniel Palmer <daniel@0x0f.com>
+ - Romain Perier <romain.perier@gmail.com>
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ enum:
+ - mstar,msc313e-wdt
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ watchdog@6000 {
+ compatible = "mstar,msc313e-wdt";
+ reg = <0x6000 0x1f>;
+ clocks = <&xtal_div2>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
index e36ba60de829..416d716403f6 100644
--- a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
@@ -1,5 +1,8 @@
Mediatek SoCs Watchdog timer
+The watchdog supports a pre-timeout interrupt that fires timeout-sec/2
+before the expiry.
+
Required properties:
- compatible should contain:
@@ -13,10 +16,12 @@ Required properties:
"mediatek,mt8183-wdt": for MT8183
"mediatek,mt8516-wdt", "mediatek,mt6589-wdt": for MT8516
"mediatek,mt8192-wdt": for MT8192
+ "mediatek,mt8195-wdt", "mediatek,mt6589-wdt": for MT8195
- reg : Specifies base physical address and size of the registers.
Optional properties:
+- interrupts: Watchdog pre-timeout (bark) interrupt.
- timeout-sec: contains the watchdog timeout in seconds.
- #reset-cells: Should be 1.
@@ -26,6 +31,7 @@ watchdog: watchdog@10007000 {
compatible = "mediatek,mt8183-wdt",
"mediatek,mt6589-wdt";
reg = <0 0x10007000 0 0x100>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>;
timeout-sec = <10>;
#reset-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
index b8e4118945a0..ba60bdf1fecc 100644
--- a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
@@ -17,6 +17,7 @@ properties:
enum:
- qcom,apss-wdt-qcs404
- qcom,apss-wdt-sc7180
+ - qcom,apss-wdt-sc7280
- qcom,apss-wdt-sdm845
- qcom,apss-wdt-sdx55
- qcom,apss-wdt-sm8150
diff --git a/Documentation/devicetree/bindings/watchdog/sbsa-gwdt.txt b/Documentation/devicetree/bindings/watchdog/sbsa-gwdt.txt
deleted file mode 100644
index 6f2d5f91964d..000000000000
--- a/Documentation/devicetree/bindings/watchdog/sbsa-gwdt.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-* SBSA (Server Base System Architecture) Generic Watchdog
-
-The SBSA Generic Watchdog Timer is used to force a reset of the system
-after two stages of timeout have elapsed. A detailed definition of the
-watchdog timer can be found in the ARM document: ARM-DEN-0029 - Server
-Base System Architecture (SBSA)
-
-Required properties:
-- compatible: Should at least contain "arm,sbsa-gwdt".
-
-- reg: Each entry specifies the base physical address of a register frame
- and the length of that frame; currently, two frames must be defined,
- in this order:
- 1: Watchdog control frame;
- 2: Refresh frame.
-
-- interrupts: Should contain the Watchdog Signal 0 (WS0) SPI (Shared
- Peripheral Interrupt) number of SBSA Generic Watchdog.
-
-Optional properties
-- timeout-sec: Watchdog timeout values (in seconds).
-
-Example for FVP Foundation Model v8:
-
-watchdog@2a440000 {
- compatible = "arm,sbsa-gwdt";
- reg = <0x0 0x2a440000 0 0x1000>,
- <0x0 0x2a450000 0 0x1000>;
- interrupts = <0 27 4>;
- timeout-sec = <30>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
index b58596b1831d..6461eb4f4a27 100644
--- a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
@@ -27,6 +27,7 @@ properties:
- rockchip,rk3328-wdt
- rockchip,rk3368-wdt
- rockchip,rk3399-wdt
+ - rockchip,rk3568-wdt
- rockchip,rv1108-wdt
- const: snps,dw-wdt
diff --git a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml
index 3f1ba1d6c6b5..481bf91f988a 100644
--- a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml
+++ b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml
@@ -27,7 +27,6 @@ properties:
- description: Low speed clock
- description: Optional peripheral clock
minItems: 1
- maxItems: 2
clock-names:
items:
diff --git a/Documentation/devicetree/bindings/writing-bindings.rst b/Documentation/devicetree/bindings/writing-bindings.rst
index 45ff426d0019..f7dfb98c156e 100644
--- a/Documentation/devicetree/bindings/writing-bindings.rst
+++ b/Documentation/devicetree/bindings/writing-bindings.rst
@@ -52,7 +52,8 @@ Properties
constraints specific to the device.
- DO use common property unit suffixes for properties with scientific units.
- See property-units.txt.
+ Recommended suffixes are listed at
+ https://github.com/devicetree-org/dt-schema/blob/master/schemas/property-units.yaml
- DO define properties in terms of constraints. How many entries? What are
possible values? What is the order?
diff --git a/Documentation/driver-api/auxiliary_bus.rst b/Documentation/driver-api/auxiliary_bus.rst
index fff96c7ba7a8..ef902daf0d68 100644
--- a/Documentation/driver-api/auxiliary_bus.rst
+++ b/Documentation/driver-api/auxiliary_bus.rst
@@ -11,7 +11,7 @@ too complex for a single device to be managed by a monolithic driver
(e.g. Sound Open Firmware), multiple devices might implement a common
intersection of functionality (e.g. NICs + RDMA), or a driver may want to
export an interface for another subsystem to drive (e.g. SIOV Physical Function
-export Virtual Function management). A split of the functinoality into child-
+export Virtual Function management). A split of the functionality into child-
devices representing sub-domains of functionality makes it possible to
compartmentalize, layer, and distribute domain-specific concerns via a Linux
device-driver model.
diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst
index 1bad466f9167..487ce4f41d77 100644
--- a/Documentation/driver-api/cxl/memory-devices.rst
+++ b/Documentation/driver-api/cxl/memory-devices.rst
@@ -22,16 +22,22 @@ This section covers the driver infrastructure for a CXL memory device.
CXL Memory Device
-----------------
-.. kernel-doc:: drivers/cxl/mem.c
- :doc: cxl mem
+.. kernel-doc:: drivers/cxl/pci.c
+ :doc: cxl pci
-.. kernel-doc:: drivers/cxl/mem.c
+.. kernel-doc:: drivers/cxl/pci.c
:internal:
-CXL Bus
--------
-.. kernel-doc:: drivers/cxl/bus.c
- :doc: cxl bus
+CXL Core
+--------
+.. kernel-doc:: drivers/cxl/cxl.h
+ :doc: cxl objects
+
+.. kernel-doc:: drivers/cxl/cxl.h
+ :internal:
+
+.. kernel-doc:: drivers/cxl/core.c
+ :doc: cxl core
External Interfaces
===================
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index e0814d214048..650096523f4f 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -314,7 +314,6 @@ IOMAP
devm_ioremap_resource() : checks resource, requests memory region, ioremaps
devm_ioremap_resource_wc()
devm_platform_ioremap_resource() : calls devm_ioremap_resource() for platform device
- devm_platform_ioremap_resource_wc()
devm_platform_ioremap_resource_byname()
devm_platform_get_and_ioremap_resource()
devm_iounmap()
@@ -401,7 +400,8 @@ POWER
PWM
devm_pwm_get()
- devm_pwm_put()
+ devm_of_pwm_get()
+ devm_fwnode_pwm_get()
REGULATOR
devm_regulator_bulk_get()
diff --git a/Documentation/driver-api/early-userspace/early_userspace_support.rst b/Documentation/driver-api/early-userspace/early_userspace_support.rst
index 8a58c61932ff..61bdeac1bae5 100644
--- a/Documentation/driver-api/early-userspace/early_userspace_support.rst
+++ b/Documentation/driver-api/early-userspace/early_userspace_support.rst
@@ -69,17 +69,17 @@ early userspace image can be built by an unprivileged user.
As a technical note, when directories and files are specified, the
entire CONFIG_INITRAMFS_SOURCE is passed to
-usr/gen_initramfs_list.sh. This means that CONFIG_INITRAMFS_SOURCE
+usr/gen_initramfs.sh. This means that CONFIG_INITRAMFS_SOURCE
can really be interpreted as any legal argument to
-gen_initramfs_list.sh. If a directory is specified as an argument then
+gen_initramfs.sh. If a directory is specified as an argument then
the contents are scanned, uid/gid translation is performed, and
usr/gen_init_cpio file directives are output. If a directory is
-specified as an argument to usr/gen_initramfs_list.sh then the
+specified as an argument to usr/gen_initramfs.sh then the
contents of the file are simply copied to the output. All of the output
directives from directory scanning and file contents copying are
processed by usr/gen_init_cpio.
-See also 'usr/gen_initramfs_list.sh -h'.
+See also 'usr/gen_initramfs.sh -h'.
Where's this all leading?
=========================
diff --git a/Documentation/driver-api/generic-counter.rst b/Documentation/driver-api/generic-counter.rst
index b02c52cd69d6..64fe7db080e5 100644
--- a/Documentation/driver-api/generic-counter.rst
+++ b/Documentation/driver-api/generic-counter.rst
@@ -307,7 +307,7 @@ Determining the type of extension to create is a matter of scope.
* Device extensions are attributes that expose information/control
non-specific to a particular Count or Signal. This is where you would
- put your global features or other miscellanous functionality.
+ put your global features or other miscellaneous functionality.
For example, if your device has an overtemp sensor, you can report the
chip overheated via a device extension called "error_overtemp":
diff --git a/Documentation/driver-api/pwm.rst b/Documentation/driver-api/pwm.rst
index a7ca4f58305a..ccb06e485756 100644
--- a/Documentation/driver-api/pwm.rst
+++ b/Documentation/driver-api/pwm.rst
@@ -40,7 +40,8 @@ after usage with pwm_free().
New users should use the pwm_get() function and pass to it the consumer
device or a consumer name. pwm_put() is used to free the PWM device. Managed
-variants of these functions, devm_pwm_get() and devm_pwm_put(), also exist.
+variants of the getter, devm_pwm_get(), devm_of_pwm_get(),
+devm_fwnode_pwm_get(), also exist.
After being requested, a PWM has to be configured using::
@@ -48,6 +49,10 @@ After being requested, a PWM has to be configured using::
This API controls both the PWM period/duty_cycle config and the
enable/disable state.
+There is also a usage_power setting: If set, the PWM driver is only required to
+maintain the power output but has more freedom regarding signal form.
+If supported by the driver, the signal can be optimized, for example to improve
+EMI by phase shifting the individual channels of a chip.
The pwm_config(), pwm_enable() and pwm_disable() functions are just wrappers
around pwm_apply_state() and should not be used if the user wants to change
diff --git a/Documentation/driver-api/serial/moxa-smartio.rst b/Documentation/driver-api/serial/moxa-smartio.rst
index 156100f17c3f..af25bc5cc3e6 100644
--- a/Documentation/driver-api/serial/moxa-smartio.rst
+++ b/Documentation/driver-api/serial/moxa-smartio.rst
@@ -2,14 +2,8 @@
MOXA Smartio/Industio Family Device Driver Installation Guide
=============================================================
-.. note::
-
- This file is outdated. It needs some care in order to make it
- updated to Kernel 5.0 and upper
-
Copyright (C) 2008, Moxa Inc.
-
-Date: 01/21/2008
+Copyright (C) 2021, Jiri Slaby
.. Content
@@ -17,12 +11,7 @@ Date: 01/21/2008
2. System Requirement
3. Installation
3.1 Hardware installation
- 3.2 Driver files
- 3.3 Device naming convention
- 3.4 Module driver configuration
- 3.5 Static driver configuration for Linux kernel 2.4.x and 2.6.x.
- 3.6 Custom configuration
- 3.7 Verify driver installation
+ 3.2 Device naming convention
4. Utilities
5. Setserial
6. Troubleshooting
@@ -31,14 +20,13 @@ Date: 01/21/2008
^^^^^^^^^^^^^^^
The Smartio/Industio/UPCI family Linux driver supports following multiport
- boards.
+ boards:
- 2 ports multiport board
CP-102U, CP-102UL, CP-102UF
CP-132U-I, CP-132UL,
CP-132, CP-132I, CP132S, CP-132IS,
- CI-132, CI-132I, CI-132IS,
- (C102H, C102HI, C102HIS, C102P, CP-102, CP-102S)
+ (CP-102, CP-102S)
- 4 ports multiport board
CP-104EL,
@@ -46,10 +34,7 @@ Date: 01/21/2008
CP-134U, CP-134U-I,
C104H/PCI, C104HS/PCI,
CP-114, CP-114I, CP-114S, CP-114IS, CP-114UL,
- C104H, C104HS,
- CI-104J, CI-104JS,
- CI-134, CI-134I, CI-134IS,
- (C114HI, CT-114I, C104P),
+ (C114HI, CT-114I),
POS-104UL,
CB-114,
CB-134I
@@ -58,15 +43,10 @@ Date: 01/21/2008
CP-118EL, CP-168EL,
CP-118U, CP-168U,
C168H/PCI,
- C168H, C168HS,
- (C168P),
CB-108
- This driver and installation procedure have been developed upon Linux Kernel
- 2.4.x and 2.6.x. This driver supports Intel x86 hardware platform. In order
- to maintain compatibility, this version has also been properly tested with
- RedHat, Mandrake, Fedora and S.u.S.E Linux. However, if compatibility problem
- occurs, please contact Moxa at support@moxa.com.tw.
+ If a compatibility problem occurs, please contact Moxa at
+ support@moxa.com.tw.
In addition to device driver, useful utilities are also provided in this
version. They are:
@@ -78,22 +58,19 @@ Date: 01/21/2008
Monitor program to observe data count and line status signals.
- msterm A simple terminal program which is useful in testing serial
ports.
- - io-irq.exe
- Configuration program to setup ISA boards. Please note that
- this program can only be executed under DOS.
All the drivers and utilities are published in form of source code under
GNU General Public License in this version. Please refer to GNU General
Public License announcement in each source code file for more detail.
- In Moxa's Web sites, you may always find latest driver at http://www.moxa.com/.
+ In Moxa's Web sites, you may always find the latest driver at
+ https://www.moxa.com/.
- This version of driver can be installed as Loadable Module (Module driver)
- or built-in into kernel (Static driver). You may refer to following
- installation procedure for suitable one. Before you install the driver,
+ This version of driver can be installed as a Loadable Module (Module driver)
+ or built-in into kernel (Static driver). Before you install the driver,
please refer to hardware installation procedure in the User's Manual.
- We assume the user should be familiar with following documents.
+ We assume the user should be familiar with following documents:
- Serial-HOWTO
- Kernel-HOWTO
@@ -101,9 +78,6 @@ Date: 01/21/2008
2. System Requirement
^^^^^^^^^^^^^^^^^^^^^
- - Hardware platform: Intel x86 machine
- - Kernel version: 2.4.x or 2.6.x
- - gcc version 2.72 or later
- Maximum 4 boards can be installed in combination
3. Installation
@@ -112,23 +86,12 @@ Date: 01/21/2008
3.1 Hardware installation
=========================
- There are two types of buses, ISA and PCI, for Smartio/Industio
- family multiport board.
-
-ISA board
----------
-
- You'll have to configure CAP address, I/O address, Interrupt Vector
- as well as IRQ before installing this driver. Please refer to hardware
- installation procedure in User's Manual before proceed any further.
- Please make sure the JP1 is open after the ISA board is set properly.
-
PCI/UPCI board
--------------
- You may need to adjust IRQ usage in BIOS to avoid from IRQ conflict
- with other ISA devices. Please refer to hardware installation
- procedure in User's Manual in advance.
+ You may need to adjust IRQ usage in BIOS to avoid IRQ conflict with other
+ ISA devices. Please refer to hardware installation procedure in User's
+ Manual in advance.
PCI IRQ Sharing
---------------
@@ -138,42 +101,11 @@ PCI IRQ Sharing
together on one system and they can share the same IRQ.
-3.2 Driver files
-================
-
- The driver file may be obtained from ftp, CD-ROM or floppy disk. The
- first step, anyway, is to copy driver file "mxser.tgz" into specified
- directory. e.g. /moxa. The execute commands as below::
-
- # cd /
- # mkdir moxa
- # cd /moxa
- # tar xvf /dev/fd0
-
-or::
- # cd /
- # mkdir moxa
- # cd /moxa
- # cp /mnt/cdrom/<driver directory>/mxser.tgz .
- # tar xvfz mxser.tgz
-
-
-3.3 Device naming convention
+3.2 Device naming convention
============================
- You may find all the driver and utilities files in /moxa/mxser.
- Following installation procedure depends on the model you'd like to
- run the driver. If you prefer module driver, please refer to 3.4.
- If static driver is required, please refer to 3.5.
-
-Dialin and callout port
------------------------
-
- This driver remains traditional serial device properties. There are
- two special file name for each serial port. One is dial-in port
- which is named "ttyMxx". For callout port, the naming convention
- is "cumxx".
+ The device node is named "ttyMxx".
Device naming when more than 2 boards installed
-----------------------------------------------
@@ -181,322 +113,13 @@ Device naming when more than 2 boards installed
Naming convention for each Smartio/Industio multiport board is
pre-defined as below.
- ============ =============== ==============
- Board Num. Dial-in Port Callout port
- 1st board ttyM0 - ttyM7 cum0 - cum7
- 2nd board ttyM8 - ttyM15 cum8 - cum15
- 3rd board ttyM16 - ttyM23 cum16 - cum23
- 4th board ttyM24 - ttym31 cum24 - cum31
- ============ =============== ==============
-
-.. note::
-
- Under Kernel 2.6 and upper, the cum Device is Obsolete. So use ttyM*
- device instead.
-
-Board sequence
---------------
-
- This driver will activate ISA boards according to the parameter set
- in the driver. After all specified ISA board activated, PCI board
- will be installed in the system automatically driven.
- Therefore the board number is sorted by the CAP address of ISA boards.
- For PCI boards, their sequence will be after ISA boards and C168H/PCI
- has higher priority than C104H/PCI boards.
-
-3.4 Module driver configuration
-===============================
-
- Module driver is easiest way to install. If you prefer static driver
- installation, please skip this paragraph.
-
-
- ------------- Prepare to use the MOXA driver --------------------
-
-3.4.1 Create tty device with correct major number
--------------------------------------------------
-
- Before using MOXA driver, your system must have the tty devices
- which are created with driver's major number. We offer one shell
- script "msmknod" to simplify the procedure.
- This step is only needed to be executed once. But you still
- need to do this procedure when:
-
- a. You change the driver's major number. Please refer the "3.7"
- section.
- b. Your total installed MOXA boards number is changed. Maybe you
- add/delete one MOXA board.
- c. You want to change the tty name. This needs to modify the
- shell script "msmknod"
-
- The procedure is::
-
- # cd /moxa/mxser/driver
- # ./msmknod
-
- This shell script will require the major number for dial-in
- device and callout device to create tty device. You also need
- to specify the total installed MOXA board number. Default major
- numbers for dial-in device and callout device are 30, 35. If
- you need to change to other number, please refer section "3.7"
- for more detailed procedure.
- Msmknod will delete any special files occupying the same device
- naming.
-
-3.4.2 Build the MOXA driver and utilities
------------------------------------------
-
- Before using the MOXA driver and utilities, you need compile the
- all the source code. This step is only need to be executed once.
- But you still re-compile the source code if you modify the source
- code. For example, if you change the driver's major number (see
- "3.7" section), then you need to do this step again.
-
- Find "Makefile" in /moxa/mxser, then run
-
- # make clean; make install
-
- ..note::
-
- For Red Hat 9, Red Hat Enterprise Linux AS3/ES3/WS3 & Fedora Core1:
- # make clean; make installsp1
-
- For Red Hat Enterprise Linux AS4/ES4/WS4:
- # make clean; make installsp2
-
- The driver files "mxser.o" and utilities will be properly compiled
- and copied to system directories respectively.
-
-------------- Load MOXA driver--------------------
-
-3.4.3 Load the MOXA driver
---------------------------
-
- ::
-
- # modprobe mxser <argument>
-
- will activate the module driver. You may run "lsmod" to check
- if "mxser" is activated. If the MOXA board is ISA board, the
- <argument> is needed. Please refer to section "3.4.5" for more
- information.
-
-------------- Load MOXA driver on boot --------------------
-
-3.4.4 Load the mxser driver
----------------------------
-
-
- For the above description, you may manually execute
- "modprobe mxser" to activate this driver and run
- "rmmod mxser" to remove it.
-
- However, it's better to have a boot time configuration to
- eliminate manual operation. Boot time configuration can be
- achieved by rc file. We offer one "rc.mxser" file to simplify
- the procedure under "moxa/mxser/driver".
-
- But if you use ISA board, please modify the "modprobe ..." command
- to add the argument (see "3.4.5" section). After modifying the
- rc.mxser, please try to execute "/moxa/mxser/driver/rc.mxser"
- manually to make sure the modification is ok. If any error
- encountered, please try to modify again. If the modification is
- completed, follow the below step.
-
- Run following command for setting rc files::
-
- # cd /moxa/mxser/driver
- # cp ./rc.mxser /etc/rc.d
- # cd /etc/rc.d
-
- Check "rc.serial" is existed or not. If "rc.serial" doesn't exist,
- create it by vi, run "chmod 755 rc.serial" to change the permission.
-
- Add "/etc/rc.d/rc.mxser" in last line.
-
- Reboot and check if moxa.o activated by "lsmod" command.
-
-3.4.5. specify CAP address
---------------------------
-
- If you'd like to drive Smartio/Industio ISA boards in the system,
- you'll have to add parameter to specify CAP address of given
- board while activating "mxser.o". The format for parameters are
- as follows.::
-
- modprobe mxser ioaddr=0x???,0x???,0x???,0x???
- | | | |
- | | | +- 4th ISA board
- | | +------ 3rd ISA board
- | +------------ 2nd ISA board
- +-------------------1st ISA board
-
-3.5 Static driver configuration for Linux kernel 2.4.x and 2.6.x
-================================================================
-
- Note:
- To use static driver, you must install the linux kernel
- source package.
-
-3.5.1 Backup the built-in driver in the kernel
-----------------------------------------------
-
- ::
-
- # cd /usr/src/linux/drivers/char
- # mv mxser.c mxser.c.old
-
- For Red Hat 7.x user, you need to create link:
- # cd /usr/src
- # ln -s linux-2.4 linux
-
-3.5.2 Create link
------------------
- ::
-
- # cd /usr/src/linux/drivers/char
- # ln -s /moxa/mxser/driver/mxser.c mxser.c
-
-3.5.3 Add CAP address list for ISA boards.
-------------------------------------------
-
- For PCI boards user, please skip this step.
-
- In module mode, the CAP address for ISA board is given by
- parameter. In static driver configuration, you'll have to
- assign it within driver's source code. If you will not
- install any ISA boards, you may skip to next portion.
- The instructions to modify driver source code are as
- below.
-
- a. run::
-
- # cd /moxa/mxser/driver
- # vi mxser.c
-
- b. Find the array mxserBoardCAP[] as below::
-
- static int mxserBoardCAP[] = {0x00, 0x00, 0x00, 0x00};
-
- c. Change the address within this array using vi. For
- example, to driver 2 ISA boards with CAP address
- 0x280 and 0x180 as 1st and 2nd board. Just to change
- the source code as follows::
-
- static int mxserBoardCAP[] = {0x280, 0x180, 0x00, 0x00};
-
-3.5.4 Setup kernel configuration
---------------------------------
-
- Configure the kernel::
-
- # cd /usr/src/linux
- # make menuconfig
-
- You will go into a menu-driven system. Please select [Character
- devices][Non-standard serial port support], enable the [Moxa
- SmartIO support] driver with "[*]" for built-in (not "[M]"), then
- select [Exit] to exit this program.
-
-3.5.5 Rebuild kernel
---------------------
-
- The following are for Linux kernel rebuilding, for your
- reference only.
-
- For appropriate details, please refer to the Linux document:
-
- a. Run the following commands::
-
- cd /usr/src/linux
- make clean # take a few minutes
- make dep # take a few minutes
- make bzImage # take probably 10-20 minutes
- make install # copy boot image to correct position
-
- f. Please make sure the boot kernel (vmlinuz) is in the
- correct position.
- g. If you use 'lilo' utility, you should check /etc/lilo.conf
- 'image' item specified the path which is the 'vmlinuz' path,
- or you will load wrong (or old) boot kernel image (vmlinuz).
- After checking /etc/lilo.conf, please run "lilo".
-
- Note that if the result of "make bzImage" is ERROR, then you have to
- go back to Linux configuration Setup. Type "make menuconfig" in
- directory /usr/src/linux.
-
-
-3.5.6 Make tty device and special file
---------------------------------------
-
- ::
- # cd /moxa/mxser/driver
- # ./msmknod
-
-3.5.7 Make utility
-------------------
-
- ::
-
- # cd /moxa/mxser/utility
- # make clean; make install
-
-3.5.8 Reboot
-------------
-
-
-
-3.6 Custom configuration
-========================
-
- Although this driver already provides you default configuration, you
- still can change the device name and major number. The instruction to
- change these parameters are shown as below.
-
-a. Change Device name
-
- If you'd like to use other device names instead of default naming
- convention, all you have to do is to modify the internal code
- within the shell script "msmknod". First, you have to open "msmknod"
- by vi. Locate each line contains "ttyM" and "cum" and change them
- to the device name you desired. "msmknod" creates the device names
- you need next time executed.
-
-b. Change Major number
-
- If major number 30 and 35 had been occupied, you may have to select
- 2 free major numbers for this driver. There are 3 steps to change
- major numbers.
-
-3.6.1 Find free major numbers
------------------------------
-
- In /proc/devices, you may find all the major numbers occupied
- in the system. Please select 2 major numbers that are available.
- e.g. 40, 45.
-
-3.6.2 Create special files
---------------------------
-
- Run /moxa/mxser/driver/msmknod to create special files with
- specified major numbers.
-
-3.6.3 Modify driver with new major number
------------------------------------------
-
- Run vi to open /moxa/mxser/driver/mxser.c. Locate the line
- contains "MXSERMAJOR". Change the content as below::
-
- #define MXSERMAJOR 40
- #define MXSERCUMAJOR 45
-
- 3.6.4 Run "make clean; make install" in /moxa/mxser/driver.
-
-3.7 Verify driver installation
-==============================
-
- You may refer to /var/log/messages to check the latest status
- log reported by this driver whenever it's activated.
+ ============ ===============
+ Board Num. Device node
+ 1st board ttyM0 - ttyM7
+ 2nd board ttyM8 - ttyM15
+ 3rd board ttyM16 - ttyM23
+ 4th board ttyM24 - ttyM31
+ ============ ===============
4. Utilities
^^^^^^^^^^^^
@@ -505,14 +128,11 @@ b. Change Major number
msterm. These 3 utilities are released in form of source code. They should
be compiled into executable file and copied into /usr/bin.
- Before using these utilities, please load driver (refer 3.4 & 3.5) and
- make sure you had run the "msmknod" utility.
-
msdiag - Diagnostic
===================
This utility provides the function to display what Moxa Smartio/Industio
- board found by driver in the system.
+ board was found by the driver in the system.
msmon - Port Monitoring
=======================
@@ -542,23 +162,23 @@ msterm - Terminal Emulation
Supported Setserial parameters are listed as below.
- ============== =========================================================
- uart set UART type(16450-->disable FIFO, 16550A-->enable FIFO)
- close_delay set the amount of time(in 1/100 of a second) that DTR
+ ============== =============================================================
+ uart set UART type(16450 --> disable FIFO, 16550A --> enable FIFO)
+ close_delay set the amount of time (in 1/100 of a second) that DTR
should be kept low while being closed.
- closing_wait set the amount of time(in 1/100 of a second) that the
+ closing_wait set the amount of time (in 1/100 of a second) that the
serial port should wait for data to be drained while
- being closed, before the receiver is disable.
- spd_hi Use 57.6kb when the application requests 38.4kb.
- spd_vhi Use 115.2kb when the application requests 38.4kb.
- spd_shi Use 230.4kb when the application requests 38.4kb.
- spd_warp Use 460.8kb when the application requests 38.4kb.
- spd_normal Use 38.4kb when the application requests 38.4kb.
- spd_cust Use the custom divisor to set the speed when the
+ being closed, before the receiver is disabled.
+ spd_hi Use 57.6kb when the application requests 38.4kb.
+ spd_vhi Use 115.2kb when the application requests 38.4kb.
+ spd_shi Use 230.4kb when the application requests 38.4kb.
+ spd_warp Use 460.8kb when the application requests 38.4kb.
+ spd_normal Use 38.4kb when the application requests 38.4kb.
+ spd_cust Use the custom divisor to set the speed when the
application requests 38.4kb.
- divisor This option set the custom division.
- baud_base This option set the base baud rate.
- ============== =========================================================
+ divisor This option sets the custom division.
+ baud_base This option sets the base baud rate.
+ ============== =============================================================
6. Troubleshooting
^^^^^^^^^^^^^^^^^^
@@ -575,41 +195,3 @@ msterm - Terminal Emulation
Solution:
To avoid this problem, please unplug fifth and after board, because Moxa
driver supports up to 4 boards.
-
- Error msg:
- Request_irq fail, IRQ(?) may be conflict with another device.
-
- Solution:
- Other PCI or ISA devices occupy the assigned IRQ. If you are not sure
- which device causes the situation, please check /proc/interrupts to find
- free IRQ and simply change another free IRQ for Moxa board.
-
- Error msg:
- Board #: C1xx Series(CAP=xxx) interrupt number invalid.
-
- Solution:
- Each port within the same multiport board shares the same IRQ. Please set
- one IRQ (IRQ doesn't equal to zero) for one Moxa board.
-
- Error msg:
- No interrupt vector be set for Moxa ISA board(CAP=xxx).
-
- Solution:
- Moxa ISA board needs an interrupt vector.Please refer to user's manual
- "Hardware Installation" chapter to set interrupt vector.
-
- Error msg:
- Couldn't install MOXA Smartio/Industio family driver!
-
- Solution:
- Load Moxa driver fail, the major number may conflict with other devices.
- Please refer to previous section 3.7 to change a free major number for
- Moxa driver.
-
- Error msg:
- Couldn't install MOXA Smartio/Industio family callout driver!
-
- Solution:
- Load Moxa callout driver fail, the callout device major number may
- conflict with other devices. Please refer to previous section 3.7 to
- change a free callout device major number for Moxa driver.
diff --git a/Documentation/driver-api/usb/error-codes.rst b/Documentation/driver-api/usb/error-codes.rst
index a3e84bfac776..8f9790c2d6f8 100644
--- a/Documentation/driver-api/usb/error-codes.rst
+++ b/Documentation/driver-api/usb/error-codes.rst
@@ -61,6 +61,9 @@ USB-specific:
(c) requested data transfer length is invalid: negative
or too large for the host controller.
+``-EBADR`` The wLength value in a control URB's setup packet does
+ not match the URB's transfer_buffer_length.
+
``-ENOSPC`` This request would overcommit the usb bandwidth reserved
for periodic transfers (interrupt, isochronous).
diff --git a/Documentation/driver-api/vfio-mediated-device.rst b/Documentation/driver-api/vfio-mediated-device.rst
index 1779b85f014e..9f26079cacae 100644
--- a/Documentation/driver-api/vfio-mediated-device.rst
+++ b/Documentation/driver-api/vfio-mediated-device.rst
@@ -93,7 +93,7 @@ interfaces:
Registration Interface for a Mediated Bus Driver
------------------------------------------------
-The registration interface for a mediated bus driver provides the following
+The registration interface for a mediated device driver provides the following
structure to represent a mediated device's driver::
/*
@@ -136,37 +136,26 @@ The structures in the mdev_parent_ops structure are as follows:
* dev_attr_groups: attributes of the parent device
* mdev_attr_groups: attributes of the mediated device
* supported_config: attributes to define supported configurations
+* device_driver: device driver to bind for mediated device instances
-The functions in the mdev_parent_ops structure are as follows:
+The mdev_parent_ops also still has various functions pointers. Theses exist
+for historical reasons only and shall not be used for new drivers.
-* create: allocate basic resources in a driver for a mediated device
-* remove: free resources in a driver when a mediated device is destroyed
-
-(Note that mdev-core provides no implicit serialization of create/remove
-callbacks per mdev parent device, per mdev type, or any other categorization.
-Vendor drivers are expected to be fully asynchronous in this respect or
-provide their own internal resource protection.)
-
-The callbacks in the mdev_parent_ops structure are as follows:
-
-* open: open callback of mediated device
-* close: close callback of mediated device
-* ioctl: ioctl callback of mediated device
-* read : read emulation callback
-* write: write emulation callback
-* mmap: mmap emulation callback
-
-A driver should use the mdev_parent_ops structure in the function call to
-register itself with the mdev core driver::
+When a driver wants to add the GUID creation sysfs to an existing device it has
+probe'd to then it should call::
extern int mdev_register_device(struct device *dev,
const struct mdev_parent_ops *ops);
-However, the mdev_parent_ops structure is not required in the function call
-that a driver should use to unregister itself with the mdev core driver::
+This will provide the 'mdev_supported_types/XX/create' files which can then be
+used to trigger the creation of a mdev_device. The created mdev_device will be
+attached to the specified driver.
+
+When the driver needs to remove itself it calls::
extern void mdev_unregister_device(struct device *dev);
+Which will unbind and destroy all the created mdevs and remove the sysfs files.
Mediated Device Management Interface Through sysfs
==================================================
diff --git a/Documentation/features/core/thread-info-in-task/arch-support.txt b/Documentation/features/core/thread-info-in-task/arch-support.txt
new file mode 100644
index 000000000000..9f0259bbd7df
--- /dev/null
+++ b/Documentation/features/core/thread-info-in-task/arch-support.txt
@@ -0,0 +1,32 @@
+#
+# Feature name: thread-info-in-task
+# Kconfig: THREAD_INFO_IN_TASK
+# description: arch makes use of the core kernel facility to embedd thread_info in task_struct
+#
+ -----------------------
+ | arch |status|
+ -----------------------
+ | alpha: | TODO |
+ | arc: | TODO |
+ | arm: | TODO |
+ | arm64: | ok |
+ | csky: | TODO |
+ | h8300: | TODO |
+ | hexagon: | TODO |
+ | ia64: | TODO |
+ | m68k: | TODO |
+ | microblaze: | TODO |
+ | mips: | TODO |
+ | nds32: | ok |
+ | nios2: | TODO |
+ | openrisc: | TODO |
+ | parisc: | TODO |
+ | powerpc: | ok |
+ | riscv: | ok |
+ | s390: | ok |
+ | sh: | TODO |
+ | sparc: | TODO |
+ | um: | TODO |
+ | x86: | ok |
+ | xtensa: | TODO |
+ -----------------------
diff --git a/Documentation/features/time/arch-tick-broadcast/arch-support.txt b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
index 8639fe8315f5..8dcaab070c7b 100644
--- a/Documentation/features/time/arch-tick-broadcast/arch-support.txt
+++ b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
@@ -22,7 +22,7 @@
| openrisc: | TODO |
| parisc: | TODO |
| powerpc: | ok |
- | riscv: | TODO |
+ | riscv: | ok |
| s390: | TODO |
| sh: | ok |
| sparc: | TODO |
diff --git a/Documentation/filesystems/debugfs.rst b/Documentation/filesystems/debugfs.rst
index 0f2292e367e6..71b1fee56d2a 100644
--- a/Documentation/filesystems/debugfs.rst
+++ b/Documentation/filesystems/debugfs.rst
@@ -120,8 +120,8 @@ and hexadecimal::
Boolean values can be placed in debugfs with::
- struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent, bool *value);
+ void debugfs_create_bool(const char *name, umode_t mode,
+ struct dentry *parent, bool *value);
A read on the resulting file will yield either Y (for non-zero values) or
N, followed by a newline. If written to, it will accept either upper- or
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index 992bf91eeec8..ff9e7cc97c65 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -281,6 +281,18 @@ compress_extension=%s Support adding specified extension, so that f2fs can enab
For other files, we can still enable compression via ioctl.
Note that, there is one reserved special extension '*', it
can be set to enable compression for all files.
+nocompress_extension=%s Support adding specified extension, so that f2fs can disable
+ compression on those corresponding files, just contrary to compression extension.
+ If you know exactly which files cannot be compressed, you can use this.
+ The same extension name can't appear in both compress and nocompress
+ extension at the same time.
+ If the compress extension specifies all files, the types specified by the
+ nocompress extension will be treated as special cases and will not be compressed.
+ Don't allow use '*' to specifie all file in nocompress extension.
+ After add nocompress_extension, the priority should be:
+ dir_flag < comp_extention,nocompress_extension < comp_file_flag,no_comp_file_flag.
+ See more in compression sections.
+
compress_chksum Support verifying chksum of raw data in compressed cluster.
compress_mode=%s Control file compression mode. This supports "fs" and "user"
modes. In "fs" mode (default), f2fs does automatic compression
@@ -289,6 +301,9 @@ compress_mode=%s Control file compression mode. This supports "fs" and "user"
choosing the target file and the timing. The user can do manual
compression/decompression on the compression enabled files using
ioctls.
+compress_cache Support to use address space of a filesystem managed inode to
+ cache compressed block, in order to improve cache hit ratio of
+ random read.
inlinecrypt When possible, encrypt/decrypt the contents of encrypted
files using the blk-crypto framework rather than
filesystem-layer encryption. This allows the use of
@@ -717,10 +732,10 @@ users.
===================== ======================== ===================
User F2FS Block
===================== ======================== ===================
- META WRITE_LIFE_NOT_SET
- HOT_NODE "
- WARM_NODE "
- COLD_NODE "
+N/A META WRITE_LIFE_NOT_SET
+N/A HOT_NODE "
+N/A WARM_NODE "
+N/A COLD_NODE "
ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
extension list " "
@@ -746,10 +761,10 @@ WRITE_LIFE_LONG " WRITE_LIFE_LONG
===================== ======================== ===================
User F2FS Block
===================== ======================== ===================
- META WRITE_LIFE_MEDIUM;
- HOT_NODE WRITE_LIFE_NOT_SET
- WARM_NODE "
- COLD_NODE WRITE_LIFE_NONE
+N/A META WRITE_LIFE_MEDIUM;
+N/A HOT_NODE WRITE_LIFE_NOT_SET
+N/A WARM_NODE "
+N/A COLD_NODE WRITE_LIFE_NONE
ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
extension list " "
@@ -814,13 +829,30 @@ Compression implementation
all logical blocks in cluster contain valid data and compress ratio of
cluster data is lower than specified threshold.
-- To enable compression on regular inode, there are three ways:
+- To enable compression on regular inode, there are four ways:
* chattr +c file
* chattr +c dir; touch dir/file
* mount w/ -o compress_extension=ext; touch file.ext
* mount w/ -o compress_extension=*; touch any_file
+- To disable compression on regular inode, there are two ways:
+
+ * chattr -c file
+ * mount w/ -o nocompress_extension=ext; touch file.ext
+
+- Priority in between FS_COMPR_FL, FS_NOCOMP_FS, extensions:
+
+ * compress_extension=so; nocompress_extension=zip; chattr +c dir; touch
+ dir/foo.so; touch dir/bar.zip; touch dir/baz.txt; then foo.so and baz.txt
+ should be compresse, bar.zip should be non-compressed. chattr +c dir/bar.zip
+ can enable compress on bar.zip.
+ * compress_extension=so; nocompress_extension=zip; chattr -c dir; touch
+ dir/foo.so; touch dir/bar.zip; touch dir/baz.txt; then foo.so should be
+ compresse, bar.zip and baz.txt should be non-compressed.
+ chattr+c dir/bar.zip; chattr+c dir/baz.txt; can enable compress on bar.zip
+ and baz.txt.
+
- At this point, compression feature doesn't expose compressed space to user
directly in order to guarantee potential data updates later to the space.
Instead, the main goal is to reduce data writes to flash disk as much as
diff --git a/Documentation/filesystems/path-lookup.rst b/Documentation/filesystems/path-lookup.rst
index a6fa7619b69e..2b2df6aa5432 100644
--- a/Documentation/filesystems/path-lookup.rst
+++ b/Documentation/filesystems/path-lookup.rst
@@ -1297,18 +1297,18 @@ to lookup: RCU-walk, REF-walk, and REF-walk with forced revalidation.
yet. This is primarily used to tell the audit subsystem the full
context of a particular access being audited.
-``LOOKUP_ROOT`` indicates that the ``root`` field in the ``nameidata`` was
+``ND_ROOT_PRESET`` indicates that the ``root`` field in the ``nameidata`` was
provided by the caller, so it shouldn't be released when it is no
longer needed.
-``LOOKUP_JUMPED`` means that the current dentry was chosen not because
+``ND_JUMPED`` means that the current dentry was chosen not because
it had the right name but for some other reason. This happens when
following "``..``", following a symlink to ``/``, crossing a mount point
or accessing a "``/proc/$PID/fd/$FD``" symlink (also known as a "magic
link"). In this case the filesystem has not been asked to revalidate the
name (with ``d_revalidate()``). In such cases the inode may still need
to be revalidated, so ``d_op->d_weak_revalidate()`` is called if
-``LOOKUP_JUMPED`` is set when the look completes - which may be at the
+``ND_JUMPED`` is set when the look completes - which may be at the
final component or, when creating, unlinking, or renaming, at the penultimate component.
Resolution-restriction flags
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 0302035781be..bf19fd6b86e7 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -890,3 +890,21 @@ been called or returned with non -EIOCBQUEUED code.
mnt_want_write_file() can now only be paired with mnt_drop_write_file(),
whereas previously it could be paired with mnt_drop_write() as well.
+
+---
+
+**mandatory**
+
+iov_iter_copy_from_user_atomic() is gone; use copy_page_from_iter_atomic().
+The difference is copy_page_from_iter_atomic() advances the iterator and
+you don't need iov_iter_advance() after it. However, if you decide to use
+only a part of obtained data, you should do iov_iter_revert().
+
+---
+
+**mandatory**
+
+Calling conventions for file_open_root() changed; now it takes struct path *
+instead of passing mount and dentry separately. For callers that used to
+pass <mnt, mnt->mnt_root> pair (i.e. the root of given mount), a new helper
+is provided - file_open_root_mnt(). In-tree users adjusted.
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 81bfe3c800cc..042c418f4090 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -933,8 +933,15 @@ meminfo
~~~~~~~
Provides information about distribution and utilization of memory. This
-varies by architecture and compile options. The following is from a
-16GB PIII, which has highmem enabled. You may not have all of these fields.
+varies by architecture and compile options. Some of the counters reported
+here overlap. The memory reported by the non overlapping counters may not
+add up to the overall memory usage and the difference for some workloads
+can be substantial. In many cases there are other means to find out
+additional memory using subsystem specific interfaces, for instance
+/proc/net/sockstat for TCP memory allocations.
+
+The following is from a 16GB PIII, which has highmem enabled.
+You may not have all of these fields.
::
@@ -1913,18 +1920,20 @@ if precise results are needed.
3.8 /proc/<pid>/fdinfo/<fd> - Information about opened file
---------------------------------------------------------------
This file provides information associated with an opened file. The regular
-files have at least three fields -- 'pos', 'flags' and 'mnt_id'. The 'pos'
-represents the current offset of the opened file in decimal form [see lseek(2)
-for details], 'flags' denotes the octal O_xxx mask the file has been
-created with [see open(2) for details] and 'mnt_id' represents mount ID of
-the file system containing the opened file [see 3.5 /proc/<pid>/mountinfo
-for details].
+files have at least four fields -- 'pos', 'flags', 'mnt_id' and 'ino'.
+The 'pos' represents the current offset of the opened file in decimal
+form [see lseek(2) for details], 'flags' denotes the octal O_xxx mask the
+file has been created with [see open(2) for details] and 'mnt_id' represents
+mount ID of the file system containing the opened file [see 3.5
+/proc/<pid>/mountinfo for details]. 'ino' represents the inode number of
+the file.
A typical output is::
pos: 0
flags: 0100002
mnt_id: 19
+ ino: 63107
All locks associated with a file descriptor are shown in its fdinfo too::
@@ -1941,6 +1950,7 @@ Eventfd files
pos: 0
flags: 04002
mnt_id: 9
+ ino: 63107
eventfd-count: 5a
where 'eventfd-count' is hex value of a counter.
@@ -1953,6 +1963,7 @@ Signalfd files
pos: 0
flags: 04002
mnt_id: 9
+ ino: 63107
sigmask: 0000000000000200
where 'sigmask' is hex value of the signal mask associated
@@ -1966,6 +1977,7 @@ Epoll files
pos: 0
flags: 02
mnt_id: 9
+ ino: 63107
tfd: 5 events: 1d data: ffffffffffffffff pos:0 ino:61af sdev:7
where 'tfd' is a target file descriptor number in decimal form,
@@ -1982,6 +1994,8 @@ For inotify files the format is the following::
pos: 0
flags: 02000000
+ mnt_id: 9
+ ino: 63107
inotify wd:3 ino:9e7e sdev:800013 mask:800afce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:7e9e0000640d1b6d
where 'wd' is a watch descriptor in decimal form, i.e. a target file
@@ -2004,6 +2018,7 @@ For fanotify files the format is::
pos: 0
flags: 02
mnt_id: 9
+ ino: 63107
fanotify flags:10 event-flags:0
fanotify mnt_id:12 mflags:40 mask:38 ignored_mask:40000003
fanotify ino:4f969 sdev:800013 mflags:0 mask:3b ignored_mask:40000000 fhandle-bytes:8 fhandle-type:1 f_handle:69f90400c275b5b4
@@ -2028,6 +2043,7 @@ Timerfd files
pos: 0
flags: 02
mnt_id: 9
+ ino: 63107
clockid: 0
ticks: 0
settime flags: 01
@@ -2042,6 +2058,22 @@ details]. 'it_value' is remaining time until the timer expiration.
with TIMER_ABSTIME option which will be shown in 'settime flags', but 'it_value'
still exhibits timer's remaining time.
+DMA Buffer files
+~~~~~~~~~~~~~~~~
+
+::
+
+ pos: 0
+ flags: 04002
+ mnt_id: 9
+ ino: 63107
+ size: 32768
+ count: 2
+ exp_name: system-heap
+
+where 'size' is the size of the DMA buffer in bytes. 'count' is the file count of
+the DMA buffer file. 'exp_name' is the name of the DMA buffer exporter.
+
3.9 /proc/<pid>/map_files - Information about memory mapped files
---------------------------------------------------------------------
This directory contains symbolic links which represent memory mapped files
diff --git a/Documentation/filesystems/ramfs-rootfs-initramfs.rst b/Documentation/filesystems/ramfs-rootfs-initramfs.rst
index 4598b0d90b60..164960631925 100644
--- a/Documentation/filesystems/ramfs-rootfs-initramfs.rst
+++ b/Documentation/filesystems/ramfs-rootfs-initramfs.rst
@@ -170,7 +170,7 @@ Documentation/driver-api/early-userspace/early_userspace_support.rst for more de
The kernel does not depend on external cpio tools. If you specify a
directory instead of a configuration file, the kernel's build infrastructure
creates a configuration file from that directory (usr/Makefile calls
-usr/gen_initramfs_list.sh), and proceeds to package up that directory
+usr/gen_initramfs.sh), and proceeds to package up that directory
using the config file (by feeding it to usr/gen_init_cpio, which is created
from usr/gen_init_cpio.c). The kernel's build-time cpio creation code is
entirely self-contained, and the kernel's boot-time extractor is also
diff --git a/Documentation/firmware-guide/acpi/enumeration.rst b/Documentation/firmware-guide/acpi/enumeration.rst
index 18074eb71860..74b830b2fd59 100644
--- a/Documentation/firmware-guide/acpi/enumeration.rst
+++ b/Documentation/firmware-guide/acpi/enumeration.rst
@@ -258,6 +258,38 @@ input driver::
.id_table = mpu3050_ids,
};
+Reference to PWM device
+=======================
+
+Sometimes a device can be a consumer of PWM channel. Obviously OS would like
+to know which one. To provide this mapping the special property has been
+introduced, i.e.::
+
+ Device (DEV)
+ {
+ Name (_DSD, Package ()
+ {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "compatible", Package () { "pwm-leds" } },
+ Package () { "label", "alarm-led" },
+ Package () { "pwms",
+ Package () {
+ "\\_SB.PCI0.PWM", // <PWM device reference>
+ 0, // <PWM index>
+ 600000000, // <PWM period>
+ 0, // <PWM flags>
+ }
+ }
+ }
+
+ })
+ ...
+
+In the above example the PWM-based LED driver references to the PWM channel 0
+of \_SB.PCI0.PWM device with initial period setting equal to 600 ms (note that
+value is given in nanoseconds).
+
GPIO support
============
diff --git a/Documentation/fpga/dfl.rst b/Documentation/fpga/dfl.rst
index f3a1223f2517..75df90d1e54c 100644
--- a/Documentation/fpga/dfl.rst
+++ b/Documentation/fpga/dfl.rst
@@ -57,7 +57,7 @@ FPGA Interface Unit (FIU) represents a standalone functional unit for the
interface to FPGA, e.g. the FPGA Management Engine (FME) and Port (more
descriptions on FME and Port in later sections).
-Accelerated Function Unit (AFU) represents a FPGA programmable region and
+Accelerated Function Unit (AFU) represents an FPGA programmable region and
always connects to a FIU (e.g. a Port) as its child as illustrated above.
Private Features represent sub features of the FIU and AFU. They could be
@@ -311,7 +311,7 @@ The driver organization in virtualization case is illustrated below:
| PCI PF Device | | | PCI VF Device |
+---------------+ | +---------------+
-FPGA PCIe device driver is always loaded first once a FPGA PCIe PF or VF device
+FPGA PCIe device driver is always loaded first once an FPGA PCIe PF or VF device
is detected. It:
* Finishes enumeration on both FPGA PCIe PF and VF device using common
diff --git a/Documentation/gpu/rfc/i915_gem_lmem.rst b/Documentation/gpu/rfc/i915_gem_lmem.rst
index 675ba8620d66..b421a3c1806e 100644
--- a/Documentation/gpu/rfc/i915_gem_lmem.rst
+++ b/Documentation/gpu/rfc/i915_gem_lmem.rst
@@ -18,114 +18,5 @@ real, with all the uAPI bits is:
* Route shmem backend over to TTM SYSTEM for discrete
* TTM purgeable object support
* Move i915 buddy allocator over to TTM
- * MMAP ioctl mode(see `I915 MMAP`_)
- * SET/GET ioctl caching(see `I915 SET/GET CACHING`_)
* Send RFC(with mesa-dev on cc) for final sign off on the uAPI
* Add pciid for DG1 and turn on uAPI for real
-
-New object placement and region query uAPI
-==========================================
-Starting from DG1 we need to give userspace the ability to allocate buffers from
-device local-memory. Currently the driver supports gem_create, which can place
-buffers in system memory via shmem, and the usual assortment of other
-interfaces, like dumb buffers and userptr.
-
-To support this new capability, while also providing a uAPI which will work
-beyond just DG1, we propose to offer three new bits of uAPI:
-
-DRM_I915_QUERY_MEMORY_REGIONS
------------------------------
-New query ID which allows userspace to discover the list of supported memory
-regions(like system-memory and local-memory) for a given device. We identify
-each region with a class and instance pair, which should be unique. The class
-here would be DEVICE or SYSTEM, and the instance would be zero, on platforms
-like DG1.
-
-Side note: The class/instance design is borrowed from our existing engine uAPI,
-where we describe every physical engine in terms of its class, and the
-particular instance, since we can have more than one per class.
-
-In the future we also want to expose more information which can further
-describe the capabilities of a region.
-
-.. kernel-doc:: include/uapi/drm/i915_drm.h
- :functions: drm_i915_gem_memory_class drm_i915_gem_memory_class_instance drm_i915_memory_region_info drm_i915_query_memory_regions
-
-GEM_CREATE_EXT
---------------
-New ioctl which is basically just gem_create but now allows userspace to provide
-a chain of possible extensions. Note that if we don't provide any extensions and
-set flags=0 then we get the exact same behaviour as gem_create.
-
-Side note: We also need to support PXP[1] in the near future, which is also
-applicable to integrated platforms, and adds its own gem_create_ext extension,
-which basically lets userspace mark a buffer as "protected".
-
-.. kernel-doc:: include/uapi/drm/i915_drm.h
- :functions: drm_i915_gem_create_ext
-
-I915_GEM_CREATE_EXT_MEMORY_REGIONS
-----------------------------------
-Implemented as an extension for gem_create_ext, we would now allow userspace to
-optionally provide an immutable list of preferred placements at creation time,
-in priority order, for a given buffer object. For the placements we expect
-them each to use the class/instance encoding, as per the output of the regions
-query. Having the list in priority order will be useful in the future when
-placing an object, say during eviction.
-
-.. kernel-doc:: include/uapi/drm/i915_drm.h
- :functions: drm_i915_gem_create_ext_memory_regions
-
-One fair criticism here is that this seems a little over-engineered[2]. If we
-just consider DG1 then yes, a simple gem_create.flags or something is totally
-all that's needed to tell the kernel to allocate the buffer in local-memory or
-whatever. However looking to the future we need uAPI which can also support
-upcoming Xe HP multi-tile architecture in a sane way, where there can be
-multiple local-memory instances for a given device, and so using both class and
-instance in our uAPI to describe regions is desirable, although specifically
-for DG1 it's uninteresting, since we only have a single local-memory instance.
-
-Existing uAPI issues
-====================
-Some potential issues we still need to resolve.
-
-I915 MMAP
----------
-In i915 there are multiple ways to MMAP GEM object, including mapping the same
-object using different mapping types(WC vs WB), i.e multiple active mmaps per
-object. TTM expects one MMAP at most for the lifetime of the object. If it
-turns out that we have to backpedal here, there might be some potential
-userspace fallout.
-
-I915 SET/GET CACHING
---------------------
-In i915 we have set/get_caching ioctl. TTM doesn't let us to change this, but
-DG1 doesn't support non-snooped pcie transactions, so we can just always
-allocate as WB for smem-only buffers. If/when our hw gains support for
-non-snooped pcie transactions then we must fix this mode at allocation time as
-a new GEM extension.
-
-This is related to the mmap problem, because in general (meaning, when we're
-not running on intel cpus) the cpu mmap must not, ever, be inconsistent with
-allocation mode.
-
-Possible idea is to let the kernel picks the mmap mode for userspace from the
-following table:
-
-smem-only: WB. Userspace does not need to call clflush.
-
-smem+lmem: We only ever allow a single mode, so simply allocate this as uncached
-memory, and always give userspace a WC mapping. GPU still does snooped access
-here(assuming we can't turn it off like on DG1), which is a bit inefficient.
-
-lmem only: always WC
-
-This means on discrete you only get a single mmap mode, all others must be
-rejected. That's probably going to be a new default mode or something like
-that.
-
-Links
-=====
-[1] https://patchwork.freedesktop.org/series/86798/
-
-[2] https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5599#note_553791
diff --git a/Documentation/i2c/i2c-sysfs.rst b/Documentation/i2c/i2c-sysfs.rst
new file mode 100644
index 000000000000..6b68b95cd427
--- /dev/null
+++ b/Documentation/i2c/i2c-sysfs.rst
@@ -0,0 +1,395 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+Linux I2C Sysfs
+===============
+
+Overview
+========
+
+I2C topology can be complex because of the existence of I2C MUX
+(I2C Multiplexer). The Linux
+kernel abstracts the MUX channels into logical I2C bus numbers. However, there
+is a gap of knowledge to map from the I2C bus physical number and MUX topology
+to logical I2C bus number. This doc is aimed to fill in this gap, so the
+audience (hardware engineers and new software developers for example) can learn
+the concept of logical I2C buses in the kernel, by knowing the physical I2C
+topology and navigating through the I2C sysfs in Linux shell. This knowledge is
+useful and essential to use ``i2c-tools`` for the purpose of development and
+debugging.
+
+Target audience
+---------------
+
+People who need to use Linux shell to interact with I2C subsystem on a system
+which the Linux is running on.
+
+Prerequisites
+-------------
+
+1. Knowledge of general Linux shell file system commands and operations.
+
+2. General knowledge of I2C, I2C MUX and I2C topology.
+
+Location of I2C Sysfs
+=====================
+
+Typically, the Linux Sysfs filesystem is mounted at the ``/sys`` directory,
+so you can find the I2C Sysfs under ``/sys/bus/i2c/devices``
+where you can directly ``cd`` to it.
+There is a list of symbolic links under that directory. The links that
+start with ``i2c-`` are I2C buses, which may be either physical or logical. The
+other links that begin with numbers and end with numbers are I2C devices, where
+the first number is I2C bus number, and the second number is I2C address.
+
+Google Pixel 3 phone for example::
+
+ blueline:/sys/bus/i2c/devices $ ls
+ 0-0008 0-0061 1-0028 3-0043 4-0036 4-0041 i2c-1 i2c-3
+ 0-000c 0-0066 2-0049 4-000b 4-0040 i2c-0 i2c-2 i2c-4
+
+``i2c-2`` is an I2C bus whose number is 2, and ``2-0049`` is an I2C device
+on bus 2 address 0x49 bound with a kernel driver.
+
+Terminologies
+=============
+
+First, let us define a couple of terminologies to avoid confusions in the later
+sections.
+
+(Physical) I2C Bus Controller
+-----------------------------
+
+The hardware system that the Linux kernel is running on may have multiple
+physical I2C bus controllers. The controllers are hardware and physical, and the
+system may define multiple registers in the memory space to manipulate the
+controllers. Linux kernel has I2C bus drivers under source directory
+``drivers/i2c/busses`` to translate kernel I2C API into register
+operations for different systems. This terminology is not limited to Linux
+kernel only.
+
+I2C Bus Physical Number
+-----------------------
+
+For each physical I2C bus controller, the system vendor may assign a physical
+number to each controller. For example, the first I2C bus controller which has
+the lowest register addresses may be called ``I2C-0``.
+
+Logical I2C Bus
+---------------
+
+Every I2C bus number you see in Linux I2C Sysfs is a logical I2C bus with a
+number assigned. This is similar to the fact that software code is usually
+written upon virtual memory space, instead of physical memory space.
+
+Each logical I2C bus may be an abstraction of a physical I2C bus controller, or
+an abstraction of a channel behind an I2C MUX. In case it is an abstraction of a
+MUX channel, whenever we access an I2C device via a such logical bus, the kernel
+will switch the I2C MUX for you to the proper channel as part of the
+abstraction.
+
+Physical I2C Bus
+----------------
+
+If the logical I2C bus is a direct abstraction of a physical I2C bus controller,
+let us call it a physical I2C bus.
+
+Caveat
+------
+
+This may be a confusing part for people who only know about the physical I2C
+design of a board. It is actually possible to rename the I2C bus physical number
+to a different number in logical I2C bus level in Device Tree Source (DTS) under
+section ``aliases``. See
+`arch/arm/boot/dts/nuvoton-npcm730-gsj.dts
+<../../arch/arm/boot/dts/nuvoton-npcm730-gsj.dts>`_
+for an example of DTS file.
+
+Best Practice: **(To kernel software developers)** It is better to keep the I2C
+bus physical number the same as their corresponding logical I2C bus number,
+instead of renaming or mapping them, so that it may be less confusing to other
+users. These physical I2C buses can be served as good starting points for I2C
+MUX fanouts. For the following examples, we will assume that the physical I2C
+bus has a number same as their I2C bus physical number.
+
+Walk through Logical I2C Bus
+============================
+
+For the following content, we will use a more complex I2C topology as an
+example. Here is a brief graph for the I2C topology. If you do not understand
+this graph at the first glance, do not be afraid to continue reading this doc
+and review it when you finish reading.
+
+::
+
+ i2c-7 (physical I2C bus controller 7)
+ `-- 7-0071 (4-channel I2C MUX at 0x71)
+ |-- i2c-60 (channel-0)
+ |-- i2c-73 (channel-1)
+ | |-- 73-0040 (I2C sensor device with hwmon directory)
+ | |-- 73-0070 (I2C MUX at 0x70, exists in DTS, but failed to probe)
+ | `-- 73-0072 (8-channel I2C MUX at 0x72)
+ | |-- i2c-78 (channel-0)
+ | |-- ... (channel-1...6, i2c-79...i2c-84)
+ | `-- i2c-85 (channel-7)
+ |-- i2c-86 (channel-2)
+ `-- i2c-203 (channel-3)
+
+Distinguish Physical and Logical I2C Bus
+----------------------------------------
+
+One simple way to distinguish between a physical I2C bus and a logical I2C bus,
+is to read the symbolic link ``device`` under the I2C bus directory by using
+command ``ls -l`` or ``readlink``.
+
+An alternative symbolic link to check is ``mux_device``. This link only exists
+in logical I2C bus directory which is fanned out from another I2C bus.
+Reading this link will also tell you which I2C MUX device created
+this logical I2C bus.
+
+If the symbolic link points to a directory ending with ``.i2c``, it should be a
+physical I2C bus, directly abstracting a physical I2C bus controller. For
+example::
+
+ $ readlink /sys/bus/i2c/devices/i2c-7/device
+ ../../f0087000.i2c
+ $ ls /sys/bus/i2c/devices/i2c-7/mux_device
+ ls: /sys/bus/i2c/devices/i2c-7/mux_device: No such file or directory
+
+In this case, ``i2c-7`` is a physical I2C bus, so it does not have the symbolic
+link ``mux_device`` under its directory. And if the kernel software developer
+follows the common practice by not renaming physical I2C buses, this should also
+mean the physical I2C bus controller 7 of the system.
+
+On the other hand, if the symbolic link points to another I2C bus, the I2C bus
+presented by the current directory has to be a logical bus. The I2C bus pointed
+by the link is the parent bus which may be either a physical I2C bus or a
+logical one. In this case, the I2C bus presented by the current directory
+abstracts an I2C MUX channel under the parent bus.
+
+For example::
+
+ $ readlink /sys/bus/i2c/devices/i2c-73/device
+ ../../i2c-7
+ $ readlink /sys/bus/i2c/devices/i2c-73/mux_device
+ ../7-0071
+
+``i2c-73`` is a logical bus fanout by an I2C MUX under ``i2c-7``
+whose I2C address is 0x71.
+Whenever we access an I2C device with bus 73, the kernel will always
+switch the I2C MUX addressed 0x71 to the proper channel for you as part of the
+abstraction.
+
+Finding out Logical I2C Bus Number
+----------------------------------
+
+In this section, we will describe how to find out the logical I2C bus number
+representing certain I2C MUX channels based on the knowledge of physical
+hardware I2C topology.
+
+In this example, we have a system which has a physical I2C bus 7 and not renamed
+in DTS. There is a 4-channel MUX at address 0x71 on that bus. There is another
+8-channel MUX at address 0x72 behind the channel 1 of the 0x71 MUX. Let us
+navigate through Sysfs and find out the logical I2C bus number of the channel 3
+of the 0x72 MUX.
+
+First of all, let us go to the directory of ``i2c-7``::
+
+ ~$ cd /sys/bus/i2c/devices/i2c-7
+ /sys/bus/i2c/devices/i2c-7$ ls
+ 7-0071 i2c-60 name subsystem
+ delete_device i2c-73 new_device uevent
+ device i2c-86 of_node
+ i2c-203 i2c-dev power
+
+There, we see the 0x71 MUX as ``7-0071``. Go inside it::
+
+ /sys/bus/i2c/devices/i2c-7$ cd 7-0071/
+ /sys/bus/i2c/devices/i2c-7/7-0071$ ls -l
+ channel-0 channel-3 modalias power
+ channel-1 driver name subsystem
+ channel-2 idle_state of_node uevent
+
+Read the link ``channel-1`` using ``readlink`` or ``ls -l``::
+
+ /sys/bus/i2c/devices/i2c-7/7-0071$ readlink channel-1
+ ../i2c-73
+
+We find out that the channel 1 of 0x71 MUX on ``i2c-7`` is assigned
+with a logical I2C bus number of 73.
+Let us continue the journey to directory ``i2c-73`` in either ways::
+
+ # cd to i2c-73 under I2C Sysfs root
+ /sys/bus/i2c/devices/i2c-7/7-0071$ cd /sys/bus/i2c/devices/i2c-73
+ /sys/bus/i2c/devices/i2c-73$
+
+ # cd the channel symbolic link
+ /sys/bus/i2c/devices/i2c-7/7-0071$ cd channel-1
+ /sys/bus/i2c/devices/i2c-7/7-0071/channel-1$
+
+ # cd the link content
+ /sys/bus/i2c/devices/i2c-7/7-0071$ cd ../i2c-73
+ /sys/bus/i2c/devices/i2c-7/i2c-73$
+
+Either ways, you will end up in the directory of ``i2c-73``. Similar to above,
+we can now find the 0x72 MUX and what logical I2C bus numbers
+that its channels are assigned::
+
+ /sys/bus/i2c/devices/i2c-73$ ls
+ 73-0040 device i2c-83 new_device
+ 73-004e i2c-78 i2c-84 of_node
+ 73-0050 i2c-79 i2c-85 power
+ 73-0070 i2c-80 i2c-dev subsystem
+ 73-0072 i2c-81 mux_device uevent
+ delete_device i2c-82 name
+ /sys/bus/i2c/devices/i2c-73$ cd 73-0072
+ /sys/bus/i2c/devices/i2c-73/73-0072$ ls
+ channel-0 channel-4 driver of_node
+ channel-1 channel-5 idle_state power
+ channel-2 channel-6 modalias subsystem
+ channel-3 channel-7 name uevent
+ /sys/bus/i2c/devices/i2c-73/73-0072$ readlink channel-3
+ ../i2c-81
+
+There, we find out the logical I2C bus number of the channel 3 of the 0x72 MUX
+is 81. We can later use this number to switch to its own I2C Sysfs directory or
+issue ``i2c-tools`` commands.
+
+Tip: Once you understand the I2C topology with MUX, command
+`i2cdetect -l
+<https://manpages.debian.org/unstable/i2c-tools/i2cdetect.8.en.html>`_
+in
+`I2C Tools
+<https://i2c.wiki.kernel.org/index.php/I2C_Tools>`_
+can give you
+an overview of the I2C topology easily, if it is available on your system. For
+example::
+
+ $ i2cdetect -l | grep -e '\-73' -e _7 | sort -V
+ i2c-7 i2c npcm_i2c_7 I2C adapter
+ i2c-73 i2c i2c-7-mux (chan_id 1) I2C adapter
+ i2c-78 i2c i2c-73-mux (chan_id 0) I2C adapter
+ i2c-79 i2c i2c-73-mux (chan_id 1) I2C adapter
+ i2c-80 i2c i2c-73-mux (chan_id 2) I2C adapter
+ i2c-81 i2c i2c-73-mux (chan_id 3) I2C adapter
+ i2c-82 i2c i2c-73-mux (chan_id 4) I2C adapter
+ i2c-83 i2c i2c-73-mux (chan_id 5) I2C adapter
+ i2c-84 i2c i2c-73-mux (chan_id 6) I2C adapter
+ i2c-85 i2c i2c-73-mux (chan_id 7) I2C adapter
+
+Pinned Logical I2C Bus Number
+-----------------------------
+
+If not specified in DTS, when an I2C MUX driver is applied and the MUX device is
+successfully probed, the kernel will assign the MUX channels with a logical bus
+number based on the current biggest logical bus number incrementally. For
+example, if the system has ``i2c-15`` as the highest logical bus number, and a
+4-channel MUX is applied successfully, we will have ``i2c-16`` for the
+MUX channel 0, and all the way to ``i2c-19`` for the MUX channel 3.
+
+The kernel software developer is able to pin the fanout MUX channels to a static
+logical I2C bus number in the DTS. This doc will not go through the details on
+how to implement this in DTS, but we can see an example in:
+`arch/arm/boot/dts/aspeed-bmc-facebook-wedge400.dts
+<../../arch/arm/boot/dts/aspeed-bmc-facebook-wedge400.dts>`_
+
+In the above example, there is an 8-channel I2C MUX at address 0x70 on physical
+I2C bus 2. The channel 2 of the MUX is defined as ``imux18`` in DTS,
+and pinned to logical I2C bus number 18 with the line of ``i2c18 = &imux18;``
+in section ``aliases``.
+
+Take it further, it is possible to design a logical I2C bus number schema that
+can be easily remembered by humans or calculated arithmetically. For example, we
+can pin the fanout channels of a MUX on bus 3 to start at 30. So 30 will be the
+logical bus number of the channel 0 of the MUX on bus 3, and 37 will be the
+logical bus number of the channel 7 of the MUX on bus 3.
+
+I2C Devices
+===========
+
+In previous sections, we mostly covered the I2C bus. In this section, let us see
+what we can learn from the I2C device directory whose link name is in the format
+of ``${bus}-${addr}``. The ``${bus}`` part in the name is a logical I2C bus
+decimal number, while the ``${addr}`` part is a hex number of the I2C address
+of each device.
+
+I2C Device Directory Content
+----------------------------
+
+Inside each I2C device directory, there is a file named ``name``.
+This file tells what device name it was used for the kernel driver to
+probe this device. Use command ``cat`` to read its content. For example::
+
+ /sys/bus/i2c/devices/i2c-73$ cat 73-0040/name
+ ina230
+ /sys/bus/i2c/devices/i2c-73$ cat 73-0070/name
+ pca9546
+ /sys/bus/i2c/devices/i2c-73$ cat 73-0072/name
+ pca9547
+
+There is a symbolic link named ``driver`` to tell what Linux kernel driver was
+used to probe this device::
+
+ /sys/bus/i2c/devices/i2c-73$ readlink -f 73-0040/driver
+ /sys/bus/i2c/drivers/ina2xx
+ /sys/bus/i2c/devices/i2c-73$ readlink -f 73-0072/driver
+ /sys/bus/i2c/drivers/pca954x
+
+But if the link ``driver`` does not exist at the first place,
+it may mean that the kernel driver failed to probe this device due to
+some errors. The error may be found in ``dmesg``::
+
+ /sys/bus/i2c/devices/i2c-73$ ls 73-0070/driver
+ ls: 73-0070/driver: No such file or directory
+ /sys/bus/i2c/devices/i2c-73$ dmesg | grep 73-0070
+ pca954x 73-0070: probe failed
+ pca954x 73-0070: probe failed
+
+Depending on what the I2C device is and what kernel driver was used to probe the
+device, we may have different content in the device directory.
+
+I2C MUX Device
+--------------
+
+While you may be already aware of this in previous sections, an I2C MUX device
+will have symbolic link ``channel-*`` inside its device directory.
+These symbolic links point to their logical I2C bus directories::
+
+ /sys/bus/i2c/devices/i2c-73$ ls -l 73-0072/channel-*
+ lrwxrwxrwx ... 73-0072/channel-0 -> ../i2c-78
+ lrwxrwxrwx ... 73-0072/channel-1 -> ../i2c-79
+ lrwxrwxrwx ... 73-0072/channel-2 -> ../i2c-80
+ lrwxrwxrwx ... 73-0072/channel-3 -> ../i2c-81
+ lrwxrwxrwx ... 73-0072/channel-4 -> ../i2c-82
+ lrwxrwxrwx ... 73-0072/channel-5 -> ../i2c-83
+ lrwxrwxrwx ... 73-0072/channel-6 -> ../i2c-84
+ lrwxrwxrwx ... 73-0072/channel-7 -> ../i2c-85
+
+I2C Sensor Device / Hwmon
+-------------------------
+
+I2C sensor device is also common to see. If they are bound by a kernel hwmon
+(Hardware Monitoring) driver successfully, you will see a ``hwmon`` directory
+inside the I2C device directory. Keep digging into it, you will find the Hwmon
+Sysfs for the I2C sensor device::
+
+ /sys/bus/i2c/devices/i2c-73/73-0040/hwmon/hwmon17$ ls
+ curr1_input in0_lcrit_alarm name subsystem
+ device in1_crit power uevent
+ in0_crit in1_crit_alarm power1_crit update_interval
+ in0_crit_alarm in1_input power1_crit_alarm
+ in0_input in1_lcrit power1_input
+ in0_lcrit in1_lcrit_alarm shunt_resistor
+
+For more info on the Hwmon Sysfs, refer to the doc:
+
+`Naming and data format standards for sysfs files
+<../hwmon/sysfs-interface.rst>`_
+
+Instantiate I2C Devices in I2C Sysfs
+------------------------------------
+
+Refer to the doc:
+
+`How to instantiate I2C devices, Method 4: Instantiate from user-space
+<instantiating-devices.rst#method-4-instantiate-from-user-space>`_
diff --git a/Documentation/i2c/index.rst b/Documentation/i2c/index.rst
index 8b76217e370a..6270f1fd7d4e 100644
--- a/Documentation/i2c/index.rst
+++ b/Documentation/i2c/index.rst
@@ -17,6 +17,7 @@ Introduction
busses/index
i2c-topology
muxes/i2c-mux-gpio
+ i2c-sysfs
Writing device drivers
======================
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index 42576880aa4a..60b217b436be 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -243,8 +243,8 @@ Configuration Flags and Socket Options
These are the various configuration flags that can be used to control
and monitor the behavior of AF_XDP sockets.
-XDP_COPY and XDP_ZERO_COPY bind flags
--------------------------------------
+XDP_COPY and XDP_ZEROCOPY bind flags
+------------------------------------
When you bind to a socket, the kernel will first try to use zero-copy
copy. If zero-copy is not supported, it will fall back on using copy
@@ -252,7 +252,7 @@ mode, i.e. copying all packets out to user space. But if you would
like to force a certain mode, you can use the following flags. If you
pass the XDP_COPY flag to the bind call, the kernel will force the
socket into copy mode. If it cannot use copy mode, the bind call will
-fail with an error. Conversely, the XDP_ZERO_COPY flag will force the
+fail with an error. Conversely, the XDP_ZEROCOPY flag will force the
socket into zero-copy mode or fail.
XDP_SHARED_UMEM bind flag
diff --git a/Documentation/networking/caif/caif.rst b/Documentation/networking/caif/caif.rst
index 81a14373d780..d922d419c513 100644
--- a/Documentation/networking/caif/caif.rst
+++ b/Documentation/networking/caif/caif.rst
@@ -69,9 +69,9 @@ There are debugfs parameters provided for serial communication.
- 0x01 - tty->warned is on.
- 0x04 - tty->packed is on.
- - 0x08 - tty->flow_stopped is on.
+ - 0x08 - tty->flow.tco_stopped is on.
- 0x10 - tty->hw_stopped is on.
- - 0x20 - tty->stopped is on.
+ - 0x20 - tty->flow.stopped is on.
* last_tx_msg: Binary blob Prints the last transmitted frame.
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index 6ea91e41593f..c86628e6a235 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -212,6 +212,7 @@ Userspace to kernel:
``ETHTOOL_MSG_FEC_SET`` set FEC settings
``ETHTOOL_MSG_MODULE_EEPROM_GET`` read SFP module EEPROM
``ETHTOOL_MSG_STATS_GET`` get standard statistics
+ ``ETHTOOL_MSG_PHC_VCLOCKS_GET`` get PHC virtual clocks info
===================================== ================================
Kernel to userspace:
@@ -250,6 +251,7 @@ Kernel to userspace:
``ETHTOOL_MSG_FEC_NTF`` FEC settings
``ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY`` read SFP module EEPROM
``ETHTOOL_MSG_STATS_GET_REPLY`` standard statistics
+ ``ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY`` PHC virtual clocks info
======================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@@ -1477,6 +1479,25 @@ Low and high bounds are inclusive, for example:
etherStatsPkts512to1023Octets 512 1023
============================= ==== ====
+PHC_VCLOCKS_GET
+===============
+
+Query device PHC virtual clocks information.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_PHC_VCLOCKS_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_PHC_VCLOCKS_HEADER`` nested reply header
+ ``ETHTOOL_A_PHC_VCLOCKS_NUM`` u32 PHC virtual clocks number
+ ``ETHTOOL_A_PHC_VCLOCKS_INDEX`` s32 PHC index array
+ ==================================== ====== ==========================
+
Request translation
===================
@@ -1575,4 +1596,5 @@ are netlink only.
n/a ``ETHTOOL_MSG_CABLE_TEST_ACT``
n/a ``ETHTOOL_MSG_CABLE_TEST_TDR_ACT``
n/a ``ETHTOOL_MSG_TUNNEL_INFO_GET``
+ n/a ``ETHTOOL_MSG_PHC_VCLOCKS_GET``
=================================== =====================================
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index b3fa522e4cd9..316c7dfa9693 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -826,7 +826,7 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER
initial value when the blackhole issue goes away.
0 to disable the blackhole detection.
- By default, it is set to 1hr.
+ By default, it is set to 0 (feature is disabled).
tcp_fastopen_key - list of comma separated 32-digit hexadecimal INTEGERs
The list consists of a primary key and an optional backup key. The
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
index 91b2cf712801..e26532f49760 100644
--- a/Documentation/networking/netdev-FAQ.rst
+++ b/Documentation/networking/netdev-FAQ.rst
@@ -228,6 +228,23 @@ before posting to the mailing list. The patchwork build bot instance
gets overloaded very easily and netdev@vger really doesn't need more
traffic if we can help it.
+netdevsim is great, can I extend it for my out-of-tree tests?
+-------------------------------------------------------------
+
+No, `netdevsim` is a test vehicle solely for upstream tests.
+(Please add your tests under tools/testing/selftests/.)
+
+We also give no guarantees that `netdevsim` won't change in the future
+in a way which would break what would normally be considered uAPI.
+
+Is netdevsim considered a "user" of an API?
+-------------------------------------------
+
+Linux kernel has a long standing rule that no API should be added unless
+it has a real, in-tree user. Mock-ups and tests based on `netdevsim` are
+strongly encouraged when adding new APIs, but `netdevsim` in itself
+is **not** considered a use case/user.
+
Any other tips to help ensure my net/net-next patch gets OK'd?
--------------------------------------------------------------
Attention to detail. Re-read your own work as if you were the
diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst
index 0467b30e4abe..024d784157c8 100644
--- a/Documentation/networking/nf_conntrack-sysctl.rst
+++ b/Documentation/networking/nf_conntrack-sysctl.rst
@@ -110,6 +110,12 @@ nf_conntrack_tcp_be_liberal - BOOLEAN
Be conservative in what you do, be liberal in what you accept from others.
If it's non-zero, we mark only out of window RST segments as INVALID.
+nf_conntrack_tcp_ignore_invalid_rst - BOOLEAN
+ - 0 - disabled (default)
+ - 1 - enabled
+
+ If it's 1, we don't mark out of window RST segments as INVALID.
+
nf_conntrack_tcp_loose - BOOLEAN
- 0 - disabled
- not 0 - enabled (default)
@@ -185,19 +191,9 @@ nf_flowtable_tcp_timeout - INTEGER (seconds)
TCP connections may be offloaded from nf conntrack to nf flow table.
Once aged, the connection is returned to nf conntrack with tcp pickup timeout.
-nf_flowtable_tcp_pickup - INTEGER (seconds)
- default 120
-
- TCP connection timeout after being aged from nf flow table offload.
-
nf_flowtable_udp_timeout - INTEGER (seconds)
default 30
Control offload timeout for udp connections.
UDP connections may be offloaded from nf conntrack to nf flow table.
Once aged, the connection is returned to nf conntrack with udp pickup timeout.
-
-nf_flowtable_udp_pickup - INTEGER (seconds)
- default 30
-
- UDP connection timeout after being aged from nf flow table offload.
diff --git a/Documentation/networking/operstates.rst b/Documentation/networking/operstates.rst
index 9c918f7cb0e8..1ee2141e8ef1 100644
--- a/Documentation/networking/operstates.rst
+++ b/Documentation/networking/operstates.rst
@@ -73,7 +73,9 @@ IF_OPER_LOWERLAYERDOWN (3):
state (f.e. VLAN).
IF_OPER_TESTING (4):
- Unused in current kernel.
+ Interface is in testing mode, for example executing driver self-tests
+ or media (cable) test. It can't be used for normal traffic until tests
+ complete.
IF_OPER_DORMANT (5):
Interface is L1 up, but waiting for an external event, f.e. for a
@@ -111,7 +113,7 @@ it as lower layer.
Note that for certain kind of soft-devices, which are not managing any
real hardware, it is possible to set this bit from userspace. One
-should use TVL IFLA_CARRIER to do so.
+should use TLV IFLA_CARRIER to do so.
netif_carrier_ok() can be used to query that bit.
diff --git a/Documentation/networking/tipc.rst b/Documentation/networking/tipc.rst
index 76775f24cdc8..ab63d298cca2 100644
--- a/Documentation/networking/tipc.rst
+++ b/Documentation/networking/tipc.rst
@@ -4,10 +4,125 @@
Linux Kernel TIPC
=================
-TIPC (Transparent Inter Process Communication) is a protocol that is
-specially designed for intra-cluster communication.
+Introduction
+============
-For more information about TIPC, see http://tipc.sourceforge.net.
+TIPC (Transparent Inter Process Communication) is a protocol that is specially
+designed for intra-cluster communication. It can be configured to transmit
+messages either on UDP or directly across Ethernet. Message delivery is
+sequence guaranteed, loss free and flow controlled. Latency times are shorter
+than with any other known protocol, while maximal throughput is comparable to
+that of TCP.
+
+TIPC Features
+-------------
+
+- Cluster wide IPC service
+
+ Have you ever wished you had the convenience of Unix Domain Sockets even when
+ transmitting data between cluster nodes? Where you yourself determine the
+ addresses you want to bind to and use? Where you don't have to perform DNS
+ lookups and worry about IP addresses? Where you don't have to start timers
+ to monitor the continuous existence of peer sockets? And yet without the
+ downsides of that socket type, such as the risk of lingering inodes?
+
+ Welcome to the Transparent Inter Process Communication service, TIPC in short,
+ which gives you all of this, and a lot more.
+
+- Service Addressing
+
+ A fundamental concept in TIPC is that of Service Addressing which makes it
+ possible for a programmer to chose his own address, bind it to a server
+ socket and let client programs use only that address for sending messages.
+
+- Service Tracking
+
+ A client wanting to wait for the availability of a server, uses the Service
+ Tracking mechanism to subscribe for binding and unbinding/close events for
+ sockets with the associated service address.
+
+ The service tracking mechanism can also be used for Cluster Topology Tracking,
+ i.e., subscribing for availability/non-availability of cluster nodes.
+
+ Likewise, the service tracking mechanism can be used for Cluster Connectivity
+ Tracking, i.e., subscribing for up/down events for individual links between
+ cluster nodes.
+
+- Transmission Modes
+
+ Using a service address, a client can send datagram messages to a server socket.
+
+ Using the same address type, it can establish a connection towards an accepting
+ server socket.
+
+ It can also use a service address to create and join a Communication Group,
+ which is the TIPC manifestation of a brokerless message bus.
+
+ Multicast with very good performance and scalability is available both in
+ datagram mode and in communication group mode.
+
+- Inter Node Links
+
+ Communication between any two nodes in a cluster is maintained by one or two
+ Inter Node Links, which both guarantee data traffic integrity and monitor
+ the peer node's availability.
+
+- Cluster Scalability
+
+ By applying the Overlapping Ring Monitoring algorithm on the inter node links
+ it is possible to scale TIPC clusters up to 1000 nodes with a maintained
+ neighbor failure discovery time of 1-2 seconds. For smaller clusters this
+ time can be made much shorter.
+
+- Neighbor Discovery
+
+ Neighbor Node Discovery in the cluster is done by Ethernet broadcast or UDP
+ multicast, when any of those services are available. If not, configured peer
+ IP addresses can be used.
+
+- Configuration
+
+ When running TIPC in single node mode no configuration whatsoever is needed.
+ When running in cluster mode TIPC must as a minimum be given a node address
+ (before Linux 4.17) and told which interface to attach to. The "tipc"
+ configuration tool makes is possible to add and maintain many more
+ configuration parameters.
+
+- Performance
+
+ TIPC message transfer latency times are better than in any other known protocol.
+ Maximal byte throughput for inter-node connections is still somewhat lower than
+ for TCP, while they are superior for intra-node and inter-container throughput
+ on the same host.
+
+- Language Support
+
+ The TIPC user API has support for C, Python, Perl, Ruby, D and Go.
+
+More Information
+----------------
+
+- How to set up TIPC:
+
+ http://tipc.io/getting_started.html
+
+- How to program with TIPC:
+
+ http://tipc.io/programming.html
+
+- How to contribute to TIPC:
+
+- http://tipc.io/contacts.html
+
+- More details about TIPC specification:
+
+ http://tipc.io/protocol.html
+
+
+Implementation
+==============
+
+TIPC is implemented as a kernel module in net/tipc/ directory.
TIPC Base Types
---------------
diff --git a/Documentation/s390/vfio-ap.rst b/Documentation/s390/vfio-ap.rst
index e15436599086..f57ae621f33e 100644
--- a/Documentation/s390/vfio-ap.rst
+++ b/Documentation/s390/vfio-ap.rst
@@ -514,7 +514,6 @@ These are the steps:
* S390_AP_IOMMU
* VFIO
* VFIO_MDEV
- * VFIO_MDEV_DEVICE
* KVM
If using make menuconfig select the following to build the vfio_ap module::
diff --git a/Documentation/scsi/scsi_mid_low_api.rst b/Documentation/scsi/scsi_mid_low_api.rst
index 096ffe9cae0e..63ddea2b9640 100644
--- a/Documentation/scsi/scsi_mid_low_api.rst
+++ b/Documentation/scsi/scsi_mid_low_api.rst
@@ -1172,10 +1172,9 @@ Members of interest:
of 0 implies a successfully completed command (and all
data (if any) has been transferred to or from the SCSI
target device). 'result' is a 32 bit unsigned integer that
- can be viewed as 4 related bytes. The SCSI status value is
- in the LSB. See include/scsi/scsi.h status_byte(),
- msg_byte(), host_byte() and driver_byte() macros and
- related constants.
+ can be viewed as 2 related bytes. The SCSI status value is
+ in the LSB. See include/scsi/scsi.h status_byte() and
+ host_byte() macros and related constants.
sense_buffer
- an array (maximum size: SCSI_SENSE_BUFFERSIZE bytes) that
should be written when the SCSI status (LSB of 'result')
diff --git a/Documentation/sound/hd-audio/controls.rst b/Documentation/sound/hd-audio/controls.rst
index f2ebc4f79b44..dbe6483f4ff4 100644
--- a/Documentation/sound/hd-audio/controls.rst
+++ b/Documentation/sound/hd-audio/controls.rst
@@ -102,7 +102,7 @@ Conexant codecs
---------------
Auto-Mute Mode
- See Reatek codecs.
+ See Realtek codecs.
Analog codecs
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index e6365836fa8b..01d59b8aea92 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -3508,14 +3508,15 @@ field must be set, though).
“IEC958 Playback Con Mask” is used to return the bit-mask for the IEC958
status bits of consumer mode. Similarly, “IEC958 Playback Pro Mask”
-returns the bitmask for professional mode. They are read-only controls,
-and are defined as MIXER controls (iface =
-``SNDRV_CTL_ELEM_IFACE_MIXER``).
+returns the bitmask for professional mode. They are read-only controls.
Meanwhile, “IEC958 Playback Default” control is defined for getting and
-setting the current default IEC958 bits. Note that this one is usually
-defined as a PCM control (iface = ``SNDRV_CTL_ELEM_IFACE_PCM``),
-although in some places it's defined as a MIXER control.
+setting the current default IEC958 bits.
+
+Due to historical reasons, both variants of the Playback Mask and the
+Playback Default controls can be implemented on either a
+``SNDRV_CTL_ELEM_IFACE_PCM`` or a ``SNDRV_CTL_ELEM_IFACE_MIXER`` iface.
+Drivers should expose the mask and default on the same iface though.
In addition, you can define the control switches to enable/disable or to
set the raw bit mode. The implementation will depend on the chip, but
diff --git a/Documentation/trace/boottime-trace.rst b/Documentation/trace/boottime-trace.rst
index 89b64334929b..8053898cfeb4 100644
--- a/Documentation/trace/boottime-trace.rst
+++ b/Documentation/trace/boottime-trace.rst
@@ -99,6 +99,12 @@ These options are setting per-event options.
ftrace.[instance.INSTANCE.]event.GROUP.EVENT.enable
Enable GROUP:EVENT tracing.
+ftrace.[instance.INSTANCE.]event.GROUP.enable
+ Enable all event tracing within GROUP.
+
+ftrace.[instance.INSTANCE.]event.enable
+ Enable all event tracing.
+
ftrace.[instance.INSTANCE.]event.GROUP.EVENT.filter = FILTER
Set FILTER rule to the GROUP:EVENT.
diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst
index b71e09f745c3..f99be8062bc8 100644
--- a/Documentation/trace/histogram.rst
+++ b/Documentation/trace/histogram.rst
@@ -191,7 +191,7 @@ Documentation written by Tom Zanussi
with the event, in nanoseconds. May be
modified by .usecs to have timestamps
interpreted as microseconds.
- cpu int the cpu on which the event occurred.
+ common_cpu int the cpu on which the event occurred.
====================== ==== =======================================
Extended error information
diff --git a/Documentation/trace/hwlat_detector.rst b/Documentation/trace/hwlat_detector.rst
index 5739349649c8..de94b499b0bc 100644
--- a/Documentation/trace/hwlat_detector.rst
+++ b/Documentation/trace/hwlat_detector.rst
@@ -76,8 +76,13 @@ in /sys/kernel/tracing:
- tracing_cpumask - the CPUs to move the hwlat thread across
- hwlat_detector/width - specified amount of time to spin within window (usecs)
- hwlat_detector/window - amount of time between (width) runs (usecs)
+ - hwlat_detector/mode - the thread mode
-The hwlat detector's kernel thread will migrate across each CPU specified in
-tracing_cpumask between each window. To limit the migration, either modify
-tracing_cpumask, or modify the hwlat kernel thread (named [hwlatd]) CPU
-affinity directly, and the migration will stop.
+By default, one hwlat detector's kernel thread will migrate across each CPU
+specified in cpumask at the beginning of a new window, in a round-robin
+fashion. This behavior can be changed by changing the thread mode,
+the available options are:
+
+ - none: do not force migration
+ - round-robin: migrate across each CPU specified in cpumask [default]
+ - per-cpu: create one thread for each cpu in tracing_cpumask
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index f634b36fd3aa..3769b9b7aed8 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -23,6 +23,8 @@ Linux Tracing Technologies
histogram-design
boottime-trace
hwlat_detector
+ osnoise-tracer
+ timerlat-tracer
intel_th
ring-buffer-design
stm
diff --git a/Documentation/trace/osnoise-tracer.rst b/Documentation/trace/osnoise-tracer.rst
new file mode 100644
index 000000000000..b648cb9bf1f0
--- /dev/null
+++ b/Documentation/trace/osnoise-tracer.rst
@@ -0,0 +1,152 @@
+==============
+OSNOISE Tracer
+==============
+
+In the context of high-performance computing (HPC), the Operating System
+Noise (*osnoise*) refers to the interference experienced by an application
+due to activities inside the operating system. In the context of Linux,
+NMIs, IRQs, SoftIRQs, and any other system thread can cause noise to the
+system. Moreover, hardware-related jobs can also cause noise, for example,
+via SMIs.
+
+hwlat_detector is one of the tools used to identify the most complex
+source of noise: *hardware noise*.
+
+In a nutshell, the hwlat_detector creates a thread that runs
+periodically for a given period. At the beginning of a period, the thread
+disables interrupt and starts sampling. While running, the hwlatd
+thread reads the time in a loop. As interrupts are disabled, threads,
+IRQs, and SoftIRQs cannot interfere with the hwlatd thread. Hence, the
+cause of any gap between two different reads of the time roots either on
+NMI or in the hardware itself. At the end of the period, hwlatd enables
+interrupts and reports the max observed gap between the reads. It also
+prints a NMI occurrence counter. If the output does not report NMI
+executions, the user can conclude that the hardware is the culprit for
+the latency. The hwlat detects the NMI execution by observing
+the entry and exit of a NMI.
+
+The osnoise tracer leverages the hwlat_detector by running a
+similar loop with preemption, SoftIRQs and IRQs enabled, thus allowing
+all the sources of *osnoise* during its execution. Using the same approach
+of hwlat, osnoise takes note of the entry and exit point of any
+source of interferences, increasing a per-cpu interference counter. The
+osnoise tracer also saves an interference counter for each source of
+interference. The interference counter for NMI, IRQs, SoftIRQs, and
+threads is increased anytime the tool observes these interferences' entry
+events. When a noise happens without any interference from the operating
+system level, the hardware noise counter increases, pointing to a
+hardware-related noise. In this way, osnoise can account for any
+source of interference. At the end of the period, the osnoise tracer
+prints the sum of all noise, the max single noise, the percentage of CPU
+available for the thread, and the counters for the noise sources.
+
+Usage
+-----
+
+Write the ASCII text "osnoise" into the current_tracer file of the
+tracing system (generally mounted at /sys/kernel/tracing).
+
+For example::
+
+ [root@f32 ~]# cd /sys/kernel/tracing/
+ [root@f32 tracing]# echo osnoise > current_tracer
+
+It is possible to follow the trace by reading the trace trace file::
+
+ [root@f32 tracing]# cat trace
+ # tracer: osnoise
+ #
+ # _-----=> irqs-off
+ # / _----=> need-resched
+ # | / _---=> hardirq/softirq
+ # || / _--=> preempt-depth MAX
+ # || / SINGLE Interference counters:
+ # |||| RUNTIME NOISE % OF CPU NOISE +-----------------------------+
+ # TASK-PID CPU# |||| TIMESTAMP IN US IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD
+ # | | | |||| | | | | | | | | | |
+ <...>-859 [000] .... 81.637220: 1000000 190 99.98100 9 18 0 1007 18 1
+ <...>-860 [001] .... 81.638154: 1000000 656 99.93440 74 23 0 1006 16 3
+ <...>-861 [002] .... 81.638193: 1000000 5675 99.43250 202 6 0 1013 25 21
+ <...>-862 [003] .... 81.638242: 1000000 125 99.98750 45 1 0 1011 23 0
+ <...>-863 [004] .... 81.638260: 1000000 1721 99.82790 168 7 0 1002 49 41
+ <...>-864 [005] .... 81.638286: 1000000 263 99.97370 57 6 0 1006 26 2
+ <...>-865 [006] .... 81.638302: 1000000 109 99.98910 21 3 0 1006 18 1
+ <...>-866 [007] .... 81.638326: 1000000 7816 99.21840 107 8 0 1016 39 19
+
+In addition to the regular trace fields (from TASK-PID to TIMESTAMP), the
+tracer prints a message at the end of each period for each CPU that is
+running an osnoise/ thread. The osnoise specific fields report:
+
+ - The RUNTIME IN US reports the amount of time in microseconds that
+ the osnoise thread kept looping reading the time.
+ - The NOISE IN US reports the sum of noise in microseconds observed
+ by the osnoise tracer during the associated runtime.
+ - The % OF CPU AVAILABLE reports the percentage of CPU available for
+ the osnoise thread during the runtime window.
+ - The MAX SINGLE NOISE IN US reports the maximum single noise observed
+ during the runtime window.
+ - The Interference counters display how many each of the respective
+ interference happened during the runtime window.
+
+Note that the example above shows a high number of HW noise samples.
+The reason being is that this sample was taken on a virtual machine,
+and the host interference is detected as a hardware interference.
+
+Tracer options
+---------------------
+
+The tracer has a set of options inside the osnoise directory, they are:
+
+ - osnoise/cpus: CPUs at which a osnoise thread will execute.
+ - osnoise/period_us: the period of the osnoise thread.
+ - osnoise/runtime_us: how long an osnoise thread will look for noise.
+ - osnoise/stop_tracing_us: stop the system tracing if a single noise
+ higher than the configured value happens. Writing 0 disables this
+ option.
+ - osnoise/stop_tracing_total_us: stop the system tracing if total noise
+ higher than the configured value happens. Writing 0 disables this
+ option.
+ - tracing_threshold: the minimum delta between two time() reads to be
+ considered as noise, in us. When set to 0, the default value will
+ will be used, which is currently 5 us.
+
+Additional Tracing
+------------------
+
+In addition to the tracer, a set of tracepoints were added to
+facilitate the identification of the osnoise source.
+
+ - osnoise:sample_threshold: printed anytime a noise is higher than
+ the configurable tolerance_ns.
+ - osnoise:nmi_noise: noise from NMI, including the duration.
+ - osnoise:irq_noise: noise from an IRQ, including the duration.
+ - osnoise:softirq_noise: noise from a SoftIRQ, including the
+ duration.
+ - osnoise:thread_noise: noise from a thread, including the duration.
+
+Note that all the values are *net values*. For example, if while osnoise
+is running, another thread preempts the osnoise thread, it will start a
+thread_noise duration at the start. Then, an IRQ takes place, preempting
+the thread_noise, starting a irq_noise. When the IRQ ends its execution,
+it will compute its duration, and this duration will be subtracted from
+the thread_noise, in such a way as to avoid the double accounting of the
+IRQ execution. This logic is valid for all sources of noise.
+
+Here is one example of the usage of these tracepoints::
+
+ osnoise/8-961 [008] d.h. 5789.857532: irq_noise: local_timer:236 start 5789.857529929 duration 1845 ns
+ osnoise/8-961 [008] dNh. 5789.858408: irq_noise: local_timer:236 start 5789.858404871 duration 2848 ns
+ migration/8-54 [008] d... 5789.858413: thread_noise: migration/8:54 start 5789.858409300 duration 3068 ns
+ osnoise/8-961 [008] .... 5789.858413: sample_threshold: start 5789.858404555 duration 8812 ns interferences 2
+
+In this example, a noise sample of 8 microseconds was reported in the last
+line, pointing to two interferences. Looking backward in the trace, the
+two previous entries were about the migration thread running after a
+timer IRQ execution. The first event is not part of the noise because
+it took place one millisecond before.
+
+It is worth noticing that the sum of the duration reported in the
+tracepoints is smaller than eight us reported in the sample_threshold.
+The reason roots in the overhead of the entry and exit code that happens
+before and after any interference execution. This justifies the dual
+approach: measuring thread and tracing.
diff --git a/Documentation/trace/timerlat-tracer.rst b/Documentation/trace/timerlat-tracer.rst
new file mode 100644
index 000000000000..c7cbb557aee7
--- /dev/null
+++ b/Documentation/trace/timerlat-tracer.rst
@@ -0,0 +1,181 @@
+###############
+Timerlat tracer
+###############
+
+The timerlat tracer aims to help the preemptive kernel developers to
+find souces of wakeup latencies of real-time threads. Like cyclictest,
+the tracer sets a periodic timer that wakes up a thread. The thread then
+computes a *wakeup latency* value as the difference between the *current
+time* and the *absolute time* that the timer was set to expire. The main
+goal of timerlat is tracing in such a way to help kernel developers.
+
+Usage
+-----
+
+Write the ASCII text "timerlat" into the current_tracer file of the
+tracing system (generally mounted at /sys/kernel/tracing).
+
+For example::
+
+ [root@f32 ~]# cd /sys/kernel/tracing/
+ [root@f32 tracing]# echo timerlat > current_tracer
+
+It is possible to follow the trace by reading the trace trace file::
+
+ [root@f32 tracing]# cat trace
+ # tracer: timerlat
+ #
+ # _-----=> irqs-off
+ # / _----=> need-resched
+ # | / _---=> hardirq/softirq
+ # || / _--=> preempt-depth
+ # || /
+ # |||| ACTIVATION
+ # TASK-PID CPU# |||| TIMESTAMP ID CONTEXT LATENCY
+ # | | | |||| | | | |
+ <idle>-0 [000] d.h1 54.029328: #1 context irq timer_latency 932 ns
+ <...>-867 [000] .... 54.029339: #1 context thread timer_latency 11700 ns
+ <idle>-0 [001] dNh1 54.029346: #1 context irq timer_latency 2833 ns
+ <...>-868 [001] .... 54.029353: #1 context thread timer_latency 9820 ns
+ <idle>-0 [000] d.h1 54.030328: #2 context irq timer_latency 769 ns
+ <...>-867 [000] .... 54.030330: #2 context thread timer_latency 3070 ns
+ <idle>-0 [001] d.h1 54.030344: #2 context irq timer_latency 935 ns
+ <...>-868 [001] .... 54.030347: #2 context thread timer_latency 4351 ns
+
+
+The tracer creates a per-cpu kernel thread with real-time priority that
+prints two lines at every activation. The first is the *timer latency*
+observed at the *hardirq* context before the activation of the thread.
+The second is the *timer latency* observed by the thread. The ACTIVATION
+ID field serves to relate the *irq* execution to its respective *thread*
+execution.
+
+The *irq*/*thread* splitting is important to clarify at which context
+the unexpected high value is coming from. The *irq* context can be
+delayed by hardware related actions, such as SMIs, NMIs, IRQs
+or by a thread masking interrupts. Once the timer happens, the delay
+can also be influenced by blocking caused by threads. For example, by
+postponing the scheduler execution via preempt_disable(), by the
+scheduler execution, or by masking interrupts. Threads can
+also be delayed by the interference from other threads and IRQs.
+
+Tracer options
+---------------------
+
+The timerlat tracer is built on top of osnoise tracer.
+So its configuration is also done in the osnoise/ config
+directory. The timerlat configs are:
+
+ - cpus: CPUs at which a timerlat thread will execute.
+ - timerlat_period_us: the period of the timerlat thread.
+ - osnoise/stop_tracing_us: stop the system tracing if a
+ timer latency at the *irq* context higher than the configured
+ value happens. Writing 0 disables this option.
+ - stop_tracing_total_us: stop the system tracing if a
+ timer latency at the *thread* context higher than the configured
+ value happens. Writing 0 disables this option.
+ - print_stack: save the stack of the IRQ ocurrence, and print
+ it afte the *thread context* event".
+
+timerlat and osnoise
+----------------------------
+
+The timerlat can also take advantage of the osnoise: traceevents.
+For example::
+
+ [root@f32 ~]# cd /sys/kernel/tracing/
+ [root@f32 tracing]# echo timerlat > current_tracer
+ [root@f32 tracing]# echo 1 > events/osnoise/enable
+ [root@f32 tracing]# echo 25 > osnoise/stop_tracing_total_us
+ [root@f32 tracing]# tail -10 trace
+ cc1-87882 [005] d..h... 548.771078: #402268 context irq timer_latency 13585 ns
+ cc1-87882 [005] dNLh1.. 548.771082: irq_noise: local_timer:236 start 548.771077442 duration 7597 ns
+ cc1-87882 [005] dNLh2.. 548.771099: irq_noise: qxl:21 start 548.771085017 duration 7139 ns
+ cc1-87882 [005] d...3.. 548.771102: thread_noise: cc1:87882 start 548.771078243 duration 9909 ns
+ timerlat/5-1035 [005] ....... 548.771104: #402268 context thread timer_latency 39960 ns
+
+In this case, the root cause of the timer latency does not point to a
+single cause, but to multiple ones. Firstly, the timer IRQ was delayed
+for 13 us, which may point to a long IRQ disabled section (see IRQ
+stacktrace section). Then the timer interrupt that wakes up the timerlat
+thread took 7597 ns, and the qxl:21 device IRQ took 7139 ns. Finally,
+the cc1 thread noise took 9909 ns of time before the context switch.
+Such pieces of evidence are useful for the developer to use other
+tracing methods to figure out how to debug and optimize the system.
+
+It is worth mentioning that the *duration* values reported
+by the osnoise: events are *net* values. For example, the
+thread_noise does not include the duration of the overhead caused
+by the IRQ execution (which indeed accounted for 12736 ns). But
+the values reported by the timerlat tracer (timerlat_latency)
+are *gross* values.
+
+The art below illustrates a CPU timeline and how the timerlat tracer
+observes it at the top and the osnoise: events at the bottom. Each "-"
+in the timelines means circa 1 us, and the time moves ==>::
+
+ External timer irq thread
+ clock latency latency
+ event 13585 ns 39960 ns
+ | ^ ^
+ v | |
+ |-------------| |
+ |-------------+-------------------------|
+ ^ ^
+ ========================================================================
+ [tmr irq] [dev irq]
+ [another thread...^ v..^ v.......][timerlat/ thread] <-- CPU timeline
+ =========================================================================
+ |-------| |-------|
+ |--^ v-------|
+ | | |
+ | | + thread_noise: 9909 ns
+ | +-> irq_noise: 6139 ns
+ +-> irq_noise: 7597 ns
+
+IRQ stacktrace
+---------------------------
+
+The osnoise/print_stack option is helpful for the cases in which a thread
+noise causes the major factor for the timer latency, because of preempt or
+irq disabled. For example::
+
+ [root@f32 tracing]# echo 500 > osnoise/stop_tracing_total_us
+ [root@f32 tracing]# echo 500 > osnoise/print_stack
+ [root@f32 tracing]# echo timerlat > current_tracer
+ [root@f32 tracing]# tail -21 per_cpu/cpu7/trace
+ insmod-1026 [007] dN.h1.. 200.201948: irq_noise: local_timer:236 start 200.201939376 duration 7872 ns
+ insmod-1026 [007] d..h1.. 200.202587: #29800 context irq timer_latency 1616 ns
+ insmod-1026 [007] dN.h2.. 200.202598: irq_noise: local_timer:236 start 200.202586162 duration 11855 ns
+ insmod-1026 [007] dN.h3.. 200.202947: irq_noise: local_timer:236 start 200.202939174 duration 7318 ns
+ insmod-1026 [007] d...3.. 200.203444: thread_noise: insmod:1026 start 200.202586933 duration 838681 ns
+ timerlat/7-1001 [007] ....... 200.203445: #29800 context thread timer_latency 859978 ns
+ timerlat/7-1001 [007] ....1.. 200.203446: <stack trace>
+ => timerlat_irq
+ => __hrtimer_run_queues
+ => hrtimer_interrupt
+ => __sysvec_apic_timer_interrupt
+ => asm_call_irq_on_stack
+ => sysvec_apic_timer_interrupt
+ => asm_sysvec_apic_timer_interrupt
+ => delay_tsc
+ => dummy_load_1ms_pd_init
+ => do_one_initcall
+ => do_init_module
+ => __do_sys_finit_module
+ => do_syscall_64
+ => entry_SYSCALL_64_after_hwframe
+
+In this case, it is possible to see that the thread added the highest
+contribution to the *timer latency* and the stack trace, saved during
+the timerlat IRQ handler, points to a function named
+dummy_load_1ms_pd_init, which had the following code (on purpose)::
+
+ static int __init dummy_load_1ms_pd_init(void)
+ {
+ preempt_disable();
+ mdelay(1);
+ preempt_enable();
+ return 0;
+
+ }
diff --git a/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst b/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
index 0ca2cb646666..5ae9cfa2ec55 100644
--- a/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
+++ b/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
@@ -64,8 +64,6 @@ CPUfreq核心层注册一个cpufreq_driver结构体。
.driver_data - cpufreq驱动程序的特定数据。
- .resolve_freq - 返回最适合目标频率的频率。不过并不能改变频率。
-
.get_intermediate 和 target_intermediate - 用于在改变CPU频率时切换到稳定
的频率。
@@ -76,9 +74,6 @@ CPUfreq核心层注册一个cpufreq_driver结构体。
.exit - 一个指向per-policy清理函数的指针,该函数在cpu热插拔过程的CPU_POST_DEAD
阶段被调用。
- .stop_cpu - 一个指向per-policy停止函数的指针,该函数在cpu热插拔过程的CPU_DOWN_PREPARE
- 阶段被调用。
-
.suspend - 一个指向per-policy暂停函数的指针,该函数在关中断且在该策略的调节器停止
后被调用。
diff --git a/Documentation/translations/zh_CN/process/2.Process.rst b/Documentation/translations/zh_CN/process/2.Process.rst
index 229629e305ca..4a6ed0219494 100644
--- a/Documentation/translations/zh_CN/process/2.Process.rst
+++ b/Documentation/translations/zh_CN/process/2.Process.rst
@@ -47,7 +47,7 @@
(顺便说一句,值得注意的是,合并窗口期间集成的更改并不是凭空产生的;它们是经
提前收集、测试和分级的。稍后将详细描述该过程的工作方式。)
-合并窗口持续大约两周。在这段时间结束时,LinusTorvalds将声明窗口已关闭,并
+合并窗口持续大约两周。在这段时间结束时,Linus Torvalds将声明窗口已关闭,并
释放第一个“rc”内核。例如,对于目标为5.6的内核,在合并窗口结束时发生的释放
将被称为5.6-rc1。-rc1 版本是一个信号,表示合并新特性的时间已经过去,稳定下一
个内核的时间已经到来。
@@ -168,7 +168,7 @@ Greg Kroah-Hartman领导。稳定团队将使用5.x.y编号方案不定期地发
补丁如何进入内核
----------------
-只有一个人可以将补丁合并到主线内核存储库中:LinusTorvalds。但是,在进入
+只有一个人可以将补丁合并到主线内核存储库中:Linus Torvalds。但是,在进入
2.6.38内核的9500多个补丁中,只有112个(大约1.3%)是由Linus自己直接选择的。
内核项目已经发展到一个没有一个开发人员可以在没有支持的情况下检查和选择每个
补丁的规模。内核开发人员处理这种增长的方式是使用围绕信任链构建的助理系统。
diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst
index 2085e7b24eeb..9d6276f82774 100644
--- a/Documentation/usb/gadget-testing.rst
+++ b/Documentation/usb/gadget-testing.rst
@@ -728,6 +728,8 @@ The uac2 function provides these attributes in its function directory:
c_chmask capture channel mask
c_srate capture sampling rate
c_ssize capture sample size (bytes)
+ c_sync capture synchronization type (async/adaptive)
+ fb_max maximum extra bandwidth in async mode
p_chmask playback channel mask
p_srate playback sampling rate
p_ssize playback sample size (bytes)
diff --git a/Documentation/userspace-api/accelerators/ocxl.rst b/Documentation/userspace-api/accelerators/ocxl.rst
index 14cefc020e2d..db7570d5e50d 100644
--- a/Documentation/userspace-api/accelerators/ocxl.rst
+++ b/Documentation/userspace-api/accelerators/ocxl.rst
@@ -6,7 +6,7 @@ OpenCAPI is an interface between processors and accelerators. It aims
at being low-latency and high-bandwidth. The specification is
developed by the `OpenCAPI Consortium <http://opencapi.org/>`_.
-It allows an accelerator (which could be a FPGA, ASICs, ...) to access
+It allows an accelerator (which could be an FPGA, ASICs, ...) to access
the host memory coherently, using virtual addresses. An OpenCAPI
device can also host its own memory, that can be accessed from the
host.
diff --git a/Documentation/userspace-api/seccomp_filter.rst b/Documentation/userspace-api/seccomp_filter.rst
index d61219889e49..539e9d4a4860 100644
--- a/Documentation/userspace-api/seccomp_filter.rst
+++ b/Documentation/userspace-api/seccomp_filter.rst
@@ -263,7 +263,7 @@ Userspace can also add file descriptors to the notifying process via
``ioctl(SECCOMP_IOCTL_NOTIF_ADDFD)``. The ``id`` member of
``struct seccomp_notif_addfd`` should be the same ``id`` as in
``struct seccomp_notif``. The ``newfd_flags`` flag may be used to set flags
-like O_EXEC on the file descriptor in the notifying process. If the supervisor
+like O_CLOEXEC on the file descriptor in the notifying process. If the supervisor
wants to inject the file descriptor with a specific number, the
``SECCOMP_ADDFD_FLAG_SETFD`` flag can be used, and set the ``newfd`` member to
the specific number to use. If that file descriptor is already open in the
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index c7b165ca70b6..dae68e68ca23 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -855,7 +855,7 @@ in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to
use PPIs designated for specific cpus. The irq field is interpreted
like this::
-  bits: | 31 ... 28 | 27 ... 24 | 23 ... 16 | 15 ... 0 |
+ bits: | 31 ... 28 | 27 ... 24 | 23 ... 16 | 15 ... 0 |
field: | vcpu2_index | irq_type | vcpu_index | irq_id |
The irq_type field has the following values:
@@ -2149,10 +2149,10 @@ prior to calling the KVM_RUN ioctl.
Errors:
====== ============================================================
-  ENOENT   no such register
-  EINVAL   invalid register ID, or no such register or used with VMs in
+ ENOENT no such register
+ EINVAL invalid register ID, or no such register or used with VMs in
protected virtualization mode on s390
-  EPERM    (arm64) register access not allowed before vcpu finalization
+ EPERM (arm64) register access not allowed before vcpu finalization
====== ============================================================
(These error codes are indicative only: do not rely on a specific error
@@ -2590,10 +2590,10 @@ following id bit patterns::
Errors include:
======== ============================================================
-  ENOENT   no such register
-  EINVAL   invalid register ID, or no such register or used with VMs in
+ ENOENT no such register
+ EINVAL invalid register ID, or no such register or used with VMs in
protected virtualization mode on s390
-  EPERM    (arm64) register access not allowed before vcpu finalization
+ EPERM (arm64) register access not allowed before vcpu finalization
======== ============================================================
(These error codes are indicative only: do not rely on a specific error
@@ -3112,13 +3112,13 @@ current state. "addr" is ignored.
Errors:
====== =================================================================
-  EINVAL    the target is unknown, or the combination of features is invalid.
-  ENOENT    a features bit specified is unknown.
+ EINVAL the target is unknown, or the combination of features is invalid.
+ ENOENT a features bit specified is unknown.
====== =================================================================
This tells KVM what type of CPU to present to the guest, and what
-optional features it should have.  This will cause a reset of the cpu
-registers to their initial values.  If this is not called, KVM_RUN will
+optional features it should have. This will cause a reset of the cpu
+registers to their initial values. If this is not called, KVM_RUN will
return ENOEXEC for that vcpu.
The initial values are defined as:
@@ -3239,8 +3239,8 @@ VCPU matching underlying host.
Errors:
===== ==============================================================
-  E2BIG     the reg index list is too big to fit in the array specified by
-             the user (the number required will be written into n).
+ E2BIG the reg index list is too big to fit in the array specified by
+ the user (the number required will be written into n).
===== ==============================================================
::
@@ -3288,7 +3288,7 @@ specific device.
ARM/arm64 divides the id field into two parts, a device id and an
address type id specific to the individual device::
-  bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 |
+ bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 |
field: | 0x00000000 | device id | addr type id |
ARM/arm64 currently only require this when using the in-kernel GIC
@@ -7049,7 +7049,7 @@ In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to
trap and emulate MSRs that are outside of the scope of KVM as well as
limit the attack surface on KVM's MSR emulation code.
-8.28 KVM_CAP_ENFORCE_PV_CPUID
+8.28 KVM_CAP_ENFORCE_PV_FEATURE_CPUID
-----------------------------
Architectures: x86
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index 35eca377543d..88fa495abbac 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -25,10 +25,10 @@ On x86:
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
-- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock is
- taken inside kvm->arch.mmu_lock, and cannot be taken without already
- holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise
- there's no need to take kvm->arch.tdp_mmu_pages_lock at all).
+- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and
+ kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
+ cannot be taken without already holding kvm->arch.mmu_lock (typically with
+ ``read_lock`` for the TDP MMU, thus the need for additional spinlocks).
Everything else is a leaf: no other lock is taken inside the critical
sections.
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index 09e28507f5b2..a14c2938e7af 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -332,7 +332,7 @@ between device driver specific code and shared common code:
walks to fill in the ``args->src`` array with PFNs to be migrated.
The ``invalidate_range_start()`` callback is passed a
``struct mmu_notifier_range`` with the ``event`` field set to
- ``MMU_NOTIFY_MIGRATE`` and the ``migrate_pgmap_owner`` field set to
+ ``MMU_NOTIFY_MIGRATE`` and the ``owner`` field set to
the ``args->pgmap_owner`` field passed to migrate_vma_setup(). This is
allows the device driver to skip the invalidation callback and only
invalidate device private MMU mappings that are actually migrating.
@@ -405,6 +405,23 @@ between device driver specific code and shared common code:
The lock can now be released.
+Exclusive access memory
+=======================
+
+Some devices have features such as atomic PTE bits that can be used to implement
+atomic access to system memory. To support atomic operations to a shared virtual
+memory page such a device needs access to that page which is exclusive of any
+userspace access from the CPU. The ``make_device_exclusive_range()`` function
+can be used to make a memory range inaccessible from userspace.
+
+This replaces all mappings for pages in the given range with special swap
+entries. Any attempt to access the swap entry results in a fault which is
+resovled by replacing the entry with the original mapping. A driver gets
+notified that the mapping has been changed by MMU notifiers, after which point
+it will no longer have exclusive access to the page. Exclusive access is
+guranteed to last until the driver drops the page lock and page reference, at
+which point any CPU faults on the page may proceed as described.
+
Memory cgroup (memcg) and rss accounting
========================================
diff --git a/Documentation/vm/unevictable-lru.rst b/Documentation/vm/unevictable-lru.rst
index 0e1490524f53..eae3af17f2d9 100644
--- a/Documentation/vm/unevictable-lru.rst
+++ b/Documentation/vm/unevictable-lru.rst
@@ -389,14 +389,14 @@ mlocked, munlock_vma_page() updates that zone statistics for the number of
mlocked pages. Note, however, that at this point we haven't checked whether
the page is mapped by other VM_LOCKED VMAs.
-We can't call try_to_munlock(), the function that walks the reverse map to
+We can't call page_mlock(), the function that walks the reverse map to
check for other VM_LOCKED VMAs, without first isolating the page from the LRU.
-try_to_munlock() is a variant of try_to_unmap() and thus requires that the page
+page_mlock() is a variant of try_to_unmap() and thus requires that the page
not be on an LRU list [more on these below]. However, the call to
-isolate_lru_page() could fail, in which case we couldn't try_to_munlock(). So,
+isolate_lru_page() could fail, in which case we can't call page_mlock(). So,
we go ahead and clear PG_mlocked up front, as this might be the only chance we
-have. If we can successfully isolate the page, we go ahead and
-try_to_munlock(), which will restore the PG_mlocked flag and update the zone
+have. If we can successfully isolate the page, we go ahead and call
+page_mlock(), which will restore the PG_mlocked flag and update the zone
page statistics if it finds another VMA holding the page mlocked. If we fail
to isolate the page, we'll have left a potentially mlocked page on the LRU.
This is fine, because we'll catch it later if and if vmscan tries to reclaim
@@ -545,31 +545,24 @@ munlock or munmap system calls, mm teardown (munlock_vma_pages_all), reclaim,
holepunching, and truncation of file pages and their anonymous COWed pages.
-try_to_munlock() Reverse Map Scan
+page_mlock() Reverse Map Scan
---------------------------------
-.. warning::
- [!] TODO/FIXME: a better name might be page_mlocked() - analogous to the
- page_referenced() reverse map walker.
-
When munlock_vma_page() [see section :ref:`munlock()/munlockall() System Call
Handling <munlock_munlockall_handling>` above] tries to munlock a
page, it needs to determine whether or not the page is mapped by any
VM_LOCKED VMA without actually attempting to unmap all PTEs from the
page. For this purpose, the unevictable/mlock infrastructure
-introduced a variant of try_to_unmap() called try_to_munlock().
+introduced a variant of try_to_unmap() called page_mlock().
-try_to_munlock() calls the same functions as try_to_unmap() for anonymous and
-mapped file and KSM pages with a flag argument specifying unlock versus unmap
-processing. Again, these functions walk the respective reverse maps looking
-for VM_LOCKED VMAs. When such a VMA is found, as in the try_to_unmap() case,
-the functions mlock the page via mlock_vma_page() and return SWAP_MLOCK. This
-undoes the pre-clearing of the page's PG_mlocked done by munlock_vma_page.
+page_mlock() walks the respective reverse maps looking for VM_LOCKED VMAs. When
+such a VMA is found the page is mlocked via mlock_vma_page(). This undoes the
+pre-clearing of the page's PG_mlocked done by munlock_vma_page.
-Note that try_to_munlock()'s reverse map walk must visit every VMA in a page's
+Note that page_mlock()'s reverse map walk must visit every VMA in a page's
reverse map to determine that a page is NOT mapped into any VM_LOCKED VMA.
However, the scan can terminate when it encounters a VM_LOCKED VMA.
-Although try_to_munlock() might be called a great many times when munlocking a
+Although page_mlock() might be called a great many times when munlocking a
large region or tearing down a large address space that has been mlocked via
mlockall(), overall this is a fairly rare event.
@@ -602,7 +595,7 @@ inactive lists to the appropriate node's unevictable list.
shrink_inactive_list() should only see SHM_LOCK'd pages that became SHM_LOCK'd
after shrink_active_list() had moved them to the inactive list, or pages mapped
into VM_LOCKED VMAs that munlock_vma_page() couldn't isolate from the LRU to
-recheck via try_to_munlock(). shrink_inactive_list() won't notice the latter,
+recheck via page_mlock(). shrink_inactive_list() won't notice the latter,
but will pass on to shrink_page_list().
shrink_page_list() again culls obviously unevictable pages that it could
diff --git a/Documentation/w1/slaves/w1_ds2438.rst b/Documentation/w1/slaves/w1_ds2438.rst
index a29309a3f8e5..4fa671fbc93f 100644
--- a/Documentation/w1/slaves/w1_ds2438.rst
+++ b/Documentation/w1/slaves/w1_ds2438.rst
@@ -22,7 +22,7 @@ is also often used in weather stations and applications such as: rain gauge,
wind speed/direction measuring, humidity sensing, etc.
Current support is provided through the following sysfs files (all files
-except "iad" are readonly):
+except "iad" and "offset" are readonly):
"iad"
-----
@@ -44,6 +44,23 @@ Internally when this file is read, the additional CRC byte is also obtained
from the slave device. If it is correct, the 8 bytes page data are passed
to userspace, otherwise an I/O error is returned.
+"page1"
+-------
+This file provides full 8 bytes of the chip Page 1 (01h).
+This page contains the ICA, elapsed time meter and current offset data of the DS2438.
+Internally when this file is read, the additional CRC byte is also obtained
+from the slave device. If it is correct, the 8 bytes page data are passed
+to userspace, otherwise an I/O error is returned.
+
+"offset"
+--------
+This file controls the 2-byte Offset Register of the chip.
+Writing a 2-byte value will change the Offset Register, which changes the
+current measurement done by the chip. Changing this register to the two's complement
+of the current register while forcing zero current through the load will calibrate
+the chip, canceling offset errors in the current ADC.
+
+
"temperature"
-------------
Opening and reading this file initiates the CONVERT_T (temperature conversion)
diff --git a/Documentation/x86/elf_auxvec.rst b/Documentation/x86/elf_auxvec.rst
new file mode 100644
index 000000000000..18e4744717f9
--- /dev/null
+++ b/Documentation/x86/elf_auxvec.rst
@@ -0,0 +1,53 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================
+x86-specific ELF Auxiliary Vectors
+==================================
+
+This document describes the semantics of the x86 auxiliary vectors.
+
+Introduction
+============
+
+ELF Auxiliary vectors enable the kernel to efficiently provide
+configuration-specific parameters to userspace. In this example, a program
+allocates an alternate stack based on the kernel-provided size::
+
+ #include <sys/auxv.h>
+ #include <elf.h>
+ #include <signal.h>
+ #include <stdlib.h>
+ #include <assert.h>
+ #include <err.h>
+
+ #ifndef AT_MINSIGSTKSZ
+ #define AT_MINSIGSTKSZ 51
+ #endif
+
+ ....
+ stack_t ss;
+
+ ss.ss_sp = malloc(ss.ss_size);
+ assert(ss.ss_sp);
+
+ ss.ss_size = getauxval(AT_MINSIGSTKSZ) + SIGSTKSZ;
+ ss.ss_flags = 0;
+
+ if (sigaltstack(&ss, NULL))
+ err(1, "sigaltstack");
+
+
+The exposed auxiliary vectors
+=============================
+
+AT_SYSINFO is used for locating the vsyscall entry point. It is not
+exported on 64-bit mode.
+
+AT_SYSINFO_EHDR is the start address of the page containing the vDSO.
+
+AT_MINSIGSTKSZ denotes the minimum stack size required by the kernel to
+deliver a signal to user-space. AT_MINSIGSTKSZ comprehends the space
+consumed by the kernel to accommodate the user context for the current
+hardware configuration. It does not comprehend subsequent user-space stack
+consumption, which must be added by the user. (e.g. Above, user-space adds
+SIGSTKSZ to AT_MINSIGSTKSZ.)
diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst
index 0004f5d2283e..383048396336 100644
--- a/Documentation/x86/index.rst
+++ b/Documentation/x86/index.rst
@@ -36,3 +36,4 @@ x86-specific Documentation
sva
sgx
features
+ elf_auxvec
diff --git a/LICENSES/dual/CC-BY-4.0 b/LICENSES/dual/CC-BY-4.0
index 45a81b8e4669..869cad3d1643 100644
--- a/LICENSES/dual/CC-BY-4.0
+++ b/LICENSES/dual/CC-BY-4.0
@@ -392,7 +392,7 @@ Section 8 -- Interpretation.
Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
-will be considered the “Licensor.” The text of the Creative Commons
+will be considered the "Licensor." The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
diff --git a/MAINTAINERS b/MAINTAINERS
index e5891bbf5443..c6b8a720c0bc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -299,7 +299,6 @@ M: William Breathitt Gray <vilhelm.gray@gmail.com>
M: Syed Nayyar Waris <syednwaris@gmail.com>
L: linux-iio@vger.kernel.org
S: Maintained
-F: Documentation/ABI/testing/sysfs-bus-counter-104-quad-8
F: drivers/counter/104-quad-8.c
ACCES PCI-IDIO-16 GPIO DRIVER
@@ -431,6 +430,14 @@ W: https://01.org/linux-acpi
B: https://bugzilla.kernel.org
F: drivers/acpi/acpi_video.c
+ACPI VIOT DRIVER
+M: Jean-Philippe Brucker <jean-philippe@linaro.org>
+L: linux-acpi@vger.kernel.org
+L: iommu@lists.linux-foundation.org
+S: Maintained
+F: drivers/acpi/viot.c
+F: include/linux/acpi_viot.h
+
ACPI WMI DRIVER
L: platform-driver-x86@vger.kernel.org
S: Orphan
@@ -438,7 +445,7 @@ F: drivers/platform/x86/wmi.c
F: include/uapi/linux/wmi.h
ACRN HYPERVISOR SERVICE MODULE
-M: Shuo Liu <shuo.a.liu@intel.com>
+M: Fei Li <fei1.li@intel.com>
L: acrn-dev@lists.projectacrn.org (subscribers-only)
S: Supported
W: https://projectacrn.org
@@ -745,6 +752,12 @@ L: linux-crypto@vger.kernel.org
S: Maintained
F: drivers/crypto/allwinner/
+ALLWINNER HARDWARE SPINLOCK SUPPORT
+M: Wilken Gottwalt <wilken.gottwalt@posteo.net>
+S: Maintained
+F: Documentation/devicetree/bindings/hwlock/allwinner,sun6i-hwspinlock.yaml
+F: drivers/hwspinlock/sun6i_hwspinlock.c
+
ALLWINNER THERMAL DRIVER
M: Vasily Khoruzhick <anarsoul@gmail.com>
M: Yangtao Li <tiny.windzz@gmail.com>
@@ -783,6 +796,14 @@ M: Ley Foon Tan <ley.foon.tan@intel.com>
S: Maintained
F: drivers/mailbox/mailbox-altera.c
+ALTERA MSGDMA IP CORE DRIVER
+M: Olivier Dautricourt <olivier.dautricourt@orolia.com>
+R: Stefan Roese <sr@denx.de>
+L: dmaengine@vger.kernel.org
+S: Odd Fixes
+F: Documentation/devicetree/bindings/dma/altr,msgdma.yaml
+F: drivers/dma/altera-msgdma.c
+
ALTERA PIO DRIVER
M: Joyce Ooi <joyce.ooi@intel.com>
L: linux-gpio@vger.kernel.org
@@ -912,6 +933,7 @@ F: drivers/video/fbdev/geode/
AMD IOMMU (AMD-VI)
M: Joerg Roedel <joro@8bytes.org>
+R: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
L: iommu@lists.linux-foundation.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
@@ -1311,6 +1333,7 @@ W: http://www.aquantia.com
F: drivers/net/ethernet/aquantia/atlantic/aq_ptp*
ARASAN NAND CONTROLLER DRIVER
+M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Naga Sureshkumar Relli <nagasure@xilinx.com>
L: linux-mtd@lists.infradead.org
S: Maintained
@@ -1452,6 +1475,22 @@ S: Odd Fixes
F: drivers/amba/
F: include/linux/amba/bus.h
+ARM PRIMECELL PL35X NAND CONTROLLER DRIVER
+M: Miquel Raynal <miquel.raynal@bootlin.com@bootlin.com>
+M: Naga Sureshkumar Relli <nagasure@xilinx.com>
+L: linux-mtd@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml
+F: drivers/mtd/nand/raw/pl35x-nand-controller.c
+
+ARM PRIMECELL PL35X SMC DRIVER
+M: Miquel Raynal <miquel.raynal@bootlin.com@bootlin.com>
+M: Naga Sureshkumar Relli <nagasure@xilinx.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/mtd/arm,pl353-smc.yaml
+F: drivers/memory/pl353-smc.c
+
ARM PRIMECELL CLCD PL110 DRIVER
M: Russell King <linux@armlinux.org.uk>
S: Odd Fixes
@@ -1812,6 +1851,7 @@ F: Documentation/devicetree/bindings/arm/gemini.txt
F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt
F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
+F: arch/arm/boot/dts/gemini*
F: arch/arm/mach-gemini/
F: drivers/crypto/gemini/
F: drivers/net/ethernet/cortina/
@@ -2172,6 +2212,7 @@ M: Daniel Palmer <daniel@thingy.jp>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: http://linux-chenxing.org/
+T: git git://github.com/linux-chenxing/linux.git
F: Documentation/devicetree/bindings/arm/mstar/*
F: Documentation/devicetree/bindings/clock/mstar,msc313-mpll.yaml
F: Documentation/devicetree/bindings/gpio/mstar,msc313-gpio.yaml
@@ -2179,6 +2220,7 @@ F: arch/arm/boot/dts/mstar-*
F: arch/arm/mach-mstar/
F: drivers/clk/mstar/
F: drivers/gpio/gpio-msc313.c
+F: drivers/watchdog/msc313e_wdt.c
F: include/dt-bindings/clock/mstar-*
F: include/dt-bindings/gpio/msc313-gpio.h
@@ -2440,9 +2482,12 @@ F: drivers/*/*/*s3c24*
F: drivers/*/*s3c24*
F: drivers/*/*s3c64xx*
F: drivers/*/*s5pv210*
+F: drivers/clocksource/samsung_pwm_timer.c
F: drivers/memory/samsung/
+F: drivers/pwm/pwm-samsung.c
F: drivers/soc/samsung/
F: drivers/tty/serial/samsung*
+F: include/clocksource/samsung_pwm.h
F: include/linux/platform_data/*s3c*
F: include/linux/serial_s3c.h
F: include/linux/soc/samsung/
@@ -3737,6 +3782,17 @@ S: Supported
F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
F: drivers/gpio/gpio-bcm-kona.c
+BROADCOM MPI3 STORAGE CONTROLLER DRIVER
+M: Sathya Prakash Veerichetty <sathya.prakash@broadcom.com>
+M: Kashyap Desai <kashyap.desai@broadcom.com>
+M: Sumit Saxena <sumit.saxena@broadcom.com>
+M: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+L: mpi3mr-linuxdrv.pdl@broadcom.com
+L: linux-scsi@vger.kernel.org
+S: Supported
+W: https://www.broadcom.com/support/storage
+F: drivers/scsi/mpi3mr/
+
BROADCOM NETXTREME-E ROCE DRIVER
M: Selvin Xavier <selvin.xavier@broadcom.com>
M: Naresh Kumar PBS <nareshkumar.pbs@broadcom.com>
@@ -3810,6 +3866,16 @@ L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
F: drivers/mtd/nand/raw/brcmnand/
+BROADCOM STB PCIE DRIVER
+M: Jim Quinlan <jim2101024@gmail.com>
+M: Nicolas Saenz Julienne <nsaenz@kernel.org>
+M: Florian Fainelli <f.fainelli@gmail.com>
+M: bcm-kernel-feedback-list@broadcom.com
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+F: drivers/pci/controller/pcie-brcmstb.c
+
BROADCOM SYSTEMPORT ETHERNET DRIVER
M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
@@ -4042,7 +4108,9 @@ W: https://github.com/linux-can
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
F: Documentation/devicetree/bindings/net/can/
+F: Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml
F: drivers/net/can/
+F: drivers/phy/phy-can-transceiver.c
F: include/linux/can/bittiming.h
F: include/linux/can/dev.h
F: include/linux/can/led.h
@@ -4440,7 +4508,7 @@ L: clang-built-linux@googlegroups.com
S: Supported
W: https://clangbuiltlinux.github.io/
B: https://github.com/ClangBuiltLinux/linux/issues
-C: irc://chat.freenode.net/clangbuiltlinux
+C: irc://irc.libera.chat/clangbuiltlinux
F: Documentation/kbuild/llvm.rst
F: include/linux/compiler-clang.h
F: scripts/clang-tools/
@@ -4703,7 +4771,7 @@ COUNTER SUBSYSTEM
M: William Breathitt Gray <vilhelm.gray@gmail.com>
L: linux-iio@vger.kernel.org
S: Maintained
-F: Documentation/ABI/testing/sysfs-bus-counter*
+F: Documentation/ABI/testing/sysfs-bus-counter
F: Documentation/driver-api/generic-counter.rst
F: drivers/counter/
F: include/linux/counter.h
@@ -5026,11 +5094,10 @@ S: Maintained
F: drivers/input/touchscreen/cy8ctma140.c
CYTTSP TOUCHSCREEN DRIVER
-M: Ferruh Yigit <fery@cypress.com>
+M: Linus Walleij <linus.walleij@linaro.org>
L: linux-input@vger.kernel.org
-S: Supported
+S: Maintained
F: drivers/input/touchscreen/cyttsp*
-F: include/linux/input/cyttsp.h
D-LINK DIR-685 TOUCHKEYS DRIVER
M: Linus Walleij <linus.walleij@linaro.org>
@@ -6792,6 +6859,15 @@ S: Supported
W: http://www.broadcom.com
F: drivers/scsi/lpfc/
+EMULEX/BROADCOM EFCT FC/FCOE SCSI TARGET DRIVER
+M: James Smart <james.smart@broadcom.com>
+M: Ram Vegesna <ram.vegesna@broadcom.com>
+L: linux-scsi@vger.kernel.org
+L: target-devel@vger.kernel.org
+S: Supported
+W: http://www.broadcom.com
+F: drivers/scsi/elx/
+
ENE CB710 FLASH CARD READER DRIVER
M: Michał Mirosław <mirq-linux@rere.qmqm.pl>
S: Maintained
@@ -6961,7 +7037,7 @@ F: drivers/iommu/exynos-iommu.c
F2FS FILE SYSTEM
M: Jaegeuk Kim <jaegeuk@kernel.org>
-M: Chao Yu <yuchao0@huawei.com>
+M: Chao Yu <chao@kernel.org>
L: linux-f2fs-devel@lists.sourceforge.net
S: Maintained
W: https://f2fs.wiki.kernel.org/
@@ -7128,6 +7204,13 @@ F: include/linux/firewire.h
F: include/uapi/linux/firewire*.h
F: tools/firewire/
+FIRMWARE FRAMEWORK FOR ARMV8-A
+M: Sudeep Holla <sudeep.holla@arm.com>
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: drivers/firmware/arm_ffa/
+F: include/linux/arm_ffa.h
+
FIRMWARE LOADER (request_firmware)
M: Luis Chamberlain <mcgrof@kernel.org>
L: linux-kernel@vger.kernel.org
@@ -7146,7 +7229,6 @@ FLEXTIMER FTM-QUADDEC DRIVER
M: Patrick Havelange <patrick.havelange@essensium.com>
L: linux-iio@vger.kernel.org
S: Maintained
-F: Documentation/ABI/testing/sysfs-bus-counter-ftm-quaddec
F: Documentation/devicetree/bindings/counter/ftm-quaddec.txt
F: drivers/counter/ftm-quaddec.c
@@ -7162,6 +7244,13 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/input/joystick/fsia6b.c
+FOCUSRITE SCARLETT GEN 2/3 MIXER DRIVER
+M: Geoffrey D. Bennett <g@b4.vu>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
+F: sound/usb/mixer_scarlett_gen2.c
+
FORCEDETH GIGABIT ETHERNET DRIVER
M: Rain River <rain.1986.08.12@gmail.com>
M: Zhu Yanjun <zyjzyj2000@gmail.com>
@@ -7704,6 +7793,14 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/input/touchscreen/resistive-adc-touch.c
+GENERIC STRING LIBRARY
+R: Andy Shevchenko <andy@kernel.org>
+S: Maintained
+F: lib/string.c
+F: lib/string_helpers.c
+F: lib/test_string.c
+F: lib/test-string_helpers.c
+
GENERIC UIO DRIVER FOR PCI DEVICES
M: "Michael S. Tsirkin" <mst@redhat.com>
L: kvm@vger.kernel.org
@@ -7771,9 +7868,9 @@ S: Maintained
F: drivers/input/touchscreen/goodix.c
GOOGLE ETHERNET DRIVERS
-M: Catherine Sullivan <csully@google.com>
-R: Sagi Shahar <sagis@google.com>
-R: Jon Olson <jonolson@google.com>
+M: Jeroen de Borst <jeroendb@google.com>
+R: Catherine Sullivan <csully@google.com>
+R: David Awogbemila <awogbemila@google.com>
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/device_drivers/ethernet/google/gve.rst
@@ -8339,6 +8436,13 @@ S: Maintained
W: http://www.hisilicon.com
F: drivers/spi/spi-hisi-kunpeng.c
+HISILICON SPMI CONTROLLER DRIVER FOR HIKEY 970
+M: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/spmi/hisilicon,hisi-spmi-controller.yaml
+F: drivers/spmi/hisi-spmi-controller.c
+
HISILICON STAGING DRIVERS FOR HIKEY 960/970
M: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
S: Maintained
@@ -8906,7 +9010,7 @@ IIO MULTIPLEXER
M: Peter Rosin <peda@axentia.se>
L: linux-iio@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
+F: Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.yaml
F: drivers/iio/multiplexer/iio-mux.c
IIO SCMI BASED DRIVER
@@ -9311,6 +9415,7 @@ F: include/linux/soc/ixp4xx/qmgr.h
INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT
M: Deepak Saxena <dsaxena@plexity.net>
S: Maintained
+F: Documentation/devicetree/bindings/display/intel,ixp46x-rng.yaml
F: drivers/char/hw_random/ixp4xx-rng.c
INTEL KEEM BAY DRM DRIVER
@@ -9362,16 +9467,6 @@ F: drivers/hwmon/intel-m10-bmc-hwmon.c
F: drivers/mfd/intel-m10-bmc.c
F: include/linux/mfd/intel-m10-bmc.h
-INTEL MAX 10 BMC MFD DRIVER
-M: Xu Yilun <yilun.xu@intel.com>
-R: Tom Rix <trix@redhat.com>
-S: Maintained
-F: Documentation/ABI/testing/sysfs-driver-intel-m10-bmc
-F: Documentation/hwmon/intel-m10-bmc-hwmon.rst
-F: drivers/hwmon/intel-m10-bmc-hwmon.c
-F: drivers/mfd/intel-m10-bmc.c
-F: include/linux/mfd/intel-m10-bmc.h
-
INTEL MENLOW THERMAL DRIVER
M: Sujith Thomas <sujith.thomas@intel.com>
L: platform-driver-x86@vger.kernel.org
@@ -9427,6 +9522,11 @@ L: linux-pm@vger.kernel.org
S: Supported
F: drivers/cpufreq/intel_pstate.c
+INTEL QUADRATURE ENCODER PERIPHERAL DRIVER
+M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+L: linux-iio@vger.kernel.org
+F: drivers/counter/intel-qep.c
+
INTEL SCU DRIVERS
M: Mika Westerberg <mika.westerberg@linux.intel.com>
S: Maintained
@@ -10966,7 +11066,7 @@ MARVELL ARMADA 3700 PHY DRIVERS
M: Miquel Raynal <miquel.raynal@bootlin.com>
S: Maintained
F: Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
-F: Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt
+F: Documentation/devicetree/bindings/phy/marvell,armada-3700-utmi-phy.yaml
F: drivers/phy/marvell/phy-mvebu-a3700-comphy.c
F: drivers/phy/marvell/phy-mvebu-a3700-utmi.c
@@ -11237,6 +11337,12 @@ W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/media/radio/radio-maxiradio*
+MCBA MICROCHIP CAN BUS ANALYZER TOOL DRIVER
+R: Yasushi SHOJI <yashi@spacecubics.com>
+L: linux-can@vger.kernel.org
+S: Maintained
+F: drivers/net/can/usb/mcba_usb.c
+
MCAN MMIO DEVICE DRIVER
M: Chandrasekar Ramakrishnan <rcsekar@samsung.com>
L: linux-can@vger.kernel.org
@@ -11668,6 +11774,7 @@ F: drivers/char/hw_random/mtk-rng.c
MEDIATEK SWITCH DRIVER
M: Sean Wang <sean.wang@mediatek.com>
M: Landen Chao <Landen.Chao@mediatek.com>
+M: DENG Qingfang <dqfext@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/dsa/mt7530.*
@@ -11876,6 +11983,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
F: Documentation/devicetree/bindings/memory-controllers/
F: drivers/memory/
F: include/dt-bindings/memory/
+F: include/memory/
MEMORY FREQUENCY SCALING DRIVERS FOR NVIDIA TEGRA
M: Dmitry Osipenko <digetx@gmail.com>
@@ -11900,6 +12008,7 @@ F: include/linux/mmzone.h
F: include/linux/pagewalk.h
F: include/linux/vmalloc.h
F: mm/
+F: tools/testing/selftests/vm/
MEMORY TECHNOLOGY DEVICES (MTD)
M: Miquel Raynal <miquel.raynal@bootlin.com>
@@ -12468,7 +12577,8 @@ S: Maintained
F: drivers/net/phy/motorcomm.c
MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
-S: Orphan
+M: Jiri Slaby <jirislaby@kernel.org>
+S: Maintained
F: Documentation/driver-api/serial/moxa-smartio.rst
F: drivers/tty/mxser.*
@@ -13295,6 +13405,13 @@ S: Maintained
F: Documentation/devicetree/bindings/sound/tfa9879.txt
F: sound/soc/codecs/tfa9879*
+NXP/Goodix TFA989X (TFA1) DRIVER
+M: Stephan Gerhold <stephan@gerhold.net>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
+F: sound/soc/codecs/tfa989x.c
+
NXP-NCI NFC DRIVER
R: Charles Gorand <charles.gorand@effinnov.com>
L: linux-nfc@lists.01.org (subscribers-only)
@@ -13415,7 +13532,7 @@ M: Santosh Shilimkar <ssantosh@kernel.org>
M: Kevin Hilman <khilman@kernel.org>
L: linux-omap@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/gpio/gpio-omap.txt
+F: Documentation/devicetree/bindings/gpio/ti,omap-gpio.yaml
F: drivers/gpio/gpio-omap.c
OMAP HARDWARE SPINLOCK SUPPORT
@@ -13436,12 +13553,6 @@ L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod*data*
-OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
-M: Benoît Cousson <bcousson@baylibre.com>
-L: linux-omap@vger.kernel.org
-S: Maintained
-F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c
-
OMAP HWMOD SUPPORT
M: Benoît Cousson <bcousson@baylibre.com>
M: Paul Walmsley <paul@pwsan.com>
@@ -13454,7 +13565,7 @@ M: Vignesh R <vigneshr@ti.com>
L: linux-omap@vger.kernel.org
L: linux-i2c@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/i2c/i2c-omap.txt
+F: Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml
F: drivers/i2c/busses/i2c-omap.c
OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS)
@@ -14032,8 +14143,7 @@ F: Documentation/devicetree/bindings/pci/aardvark-pci.txt
F: drivers/pci/controller/pci-aardvark.c
PCI DRIVER FOR ALTERA PCIE IP
-M: Ley Foon Tan <ley.foon.tan@intel.com>
-L: rfi@lists.rocketboards.org (moderated for non-subscribers)
+M: Joyce Ooi <joyce.ooi@intel.com>
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/pci/altera-pcie.txt
@@ -14106,6 +14216,12 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml
F: drivers/pci/controller/dwc/pcie-fu740.c
+PCI DRIVER FOR INTEL IXP4XX
+M: Linus Walleij <linus.walleij@linaro.org>
+S: Maintained
+F: Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
+F: drivers/pci/controller/pci-ixp4xx.c
+
PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
M: Jonathan Derrick <jonathan.derrick@intel.com>
L: linux-pci@vger.kernel.org
@@ -14231,8 +14347,7 @@ S: Supported
F: Documentation/PCI/pci-error-recovery.rst
PCI MSI DRIVER FOR ALTERA MSI IP
-M: Ley Foon Tan <ley.foon.tan@intel.com>
-L: rfi@lists.rocketboards.org (moderated for non-subscribers)
+M: Joyce Ooi <joyce.ooi@intel.com>
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
@@ -14325,6 +14440,13 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
F: drivers/pci/controller/dwc/pcie-histb.c
+PCIE DRIVER FOR INTEL LGM GW SOC
+M: Rahul Tanwar <rtanwar@maxlinear.com>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
+F: drivers/pci/controller/dwc/pcie-intel-gw.c
+
PCIE DRIVER FOR MEDIATEK
M: Ryder Lee <ryder.lee@mediatek.com>
M: Jianjun Wang <jianjun.wang@mediatek.com>
@@ -14753,6 +14875,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply.git
F: Documentation/ABI/testing/sysfs-class-power
F: Documentation/devicetree/bindings/power/supply/
F: drivers/power/supply/
+F: include/linux/power/
F: include/linux/power_supply.h
POWERNV OPERATOR PANEL LCD DISPLAY DRIVER
@@ -14911,6 +15034,13 @@ F: drivers/net/phy/dp83640*
F: drivers/ptp/*
F: include/linux/ptp_cl*
+PTP VIRTUAL CLOCK SUPPORT
+M: Yangbo Lu <yangbo.lu@nxp.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/ptp/ptp_vclock.c
+F: net/ethtool/phc_vclocks.c
+
PTRACE SUPPORT
M: Oleg Nesterov <oleg@redhat.com>
S: Maintained
@@ -15361,6 +15491,8 @@ M: Pan, Xinhui <Xinhui.Pan@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
+B: https://gitlab.freedesktop.org/drm/amd/-/issues
+C: irc://irc.oftc.net/radeon
F: drivers/gpu/drm/amd/
F: drivers/gpu/drm/radeon/
F: include/uapi/drm/amdgpu_drm.h
@@ -15682,12 +15814,13 @@ F: drivers/clk/renesas/
RENESAS EMEV2 I2C DRIVER
M: Wolfram Sang <wsa+renesas@sang-engineering.com>
+L: linux-renesas-soc@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.txt
+F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
F: drivers/i2c/busses/i2c-emev2.c
RENESAS ETHERNET DRIVERS
-R: Sergei Shtylyov <sergei.shtylyov@gmail.com>
+R: Sergey Shtylyov <s.shtylyov@omp.ru>
L: netdev@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
F: Documentation/devicetree/bindings/net/renesas,*.yaml
@@ -15703,9 +15836,10 @@ F: drivers/iio/adc/rcar-gyroadc.c
RENESAS R-CAR I2C DRIVERS
M: Wolfram Sang <wsa+renesas@sang-engineering.com>
+L: linux-renesas-soc@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/i2c/renesas,i2c.txt
-F: Documentation/devicetree/bindings/i2c/renesas,iic.txt
+F: Documentation/devicetree/bindings/i2c/renesas,rcar-i2c.yaml
+F: Documentation/devicetree/bindings/i2c/renesas,rmobile-iic.yaml
F: drivers/i2c/busses/i2c-rcar.c
F: drivers/i2c/busses/i2c-sh_mobile.c
@@ -15720,8 +15854,9 @@ F: drivers/thermal/rcar_thermal.c
RENESAS RIIC DRIVER
M: Chris Brandt <chris.brandt@renesas.com>
+L: linux-renesas-soc@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/i2c/renesas,riic.txt
+F: Documentation/devicetree/bindings/i2c/renesas,riic.yaml
F: drivers/i2c/busses/i2c-riic.c
RENESAS USB PHY DRIVER
@@ -16136,7 +16271,7 @@ W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/s390/scsi/zfcp_*
S3C ADC BATTERY DRIVER
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
L: linux-samsung-soc@vger.kernel.org
S: Odd Fixes
F: drivers/power/supply/s3c_adc_battery.c
@@ -16602,6 +16737,8 @@ M: Tomasz Duszynski <tduszyns@gmail.com>
S: Maintained
F: Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.yaml
F: drivers/iio/chemical/sps30.c
+F: drivers/iio/chemical/sps30_i2c.c
+F: drivers/iio/chemical/sps30_serial.c
SERIAL DEVICE BUS
M: Rob Herring <robh@kernel.org>
@@ -17695,7 +17832,7 @@ F: include/linux/sync_file.h
F: include/uapi/linux/sync_file.h
SYNOPSYS ARC ARCHITECTURE
-M: Vineet Gupta <vgupta@synopsys.com>
+M: Vineet Gupta <vgupta@kernel.org>
L: linux-snps-arc@lists.infradead.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git
@@ -17828,7 +17965,7 @@ M: Sudeep Holla <sudeep.holla@arm.com>
R: Cristian Marussi <cristian.marussi@arm.com>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
-F: Documentation/devicetree/bindings/arm/arm,sc[mp]i.txt
+F: Documentation/devicetree/bindings/firmware/arm,sc[mp]i.yaml
F: drivers/clk/clk-sc[mp]i.c
F: drivers/cpufreq/sc[mp]i-cpufreq.c
F: drivers/firmware/arm_scmi/
@@ -18048,6 +18185,7 @@ F: include/media/i2c/tw9910.h
TEE SUBSYSTEM
M: Jens Wiklander <jens.wiklander@linaro.org>
+R: Sumit Garg <sumit.garg@linaro.org>
L: op-tee@lists.trustedfirmware.org
S: Maintained
F: Documentation/staging/tee.rst
@@ -18201,11 +18339,11 @@ L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml
F: Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
-F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt
+F: Documentation/devicetree/bindings/clock/ti,sci-clk.yaml
F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml
F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml
-F: Documentation/devicetree/bindings/reset/ti,sci-reset.txt
-F: Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+F: Documentation/devicetree/bindings/reset/ti,sci-reset.yaml
+F: Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
F: drivers/clk/keystone/sci-clk.c
F: drivers/firmware/ti_sci*
F: drivers/irqchip/irq-ti-sci-inta.c
@@ -18226,6 +18364,13 @@ F: Documentation/devicetree/bindings/hwmon/ti,tps23861.yaml
F: Documentation/hwmon/tps23861.rst
F: drivers/hwmon/tps23861.c
+TEXAS INSTRUMENTS' TMP117 TEMPERATURE SENSOR DRIVER
+M: Puranjay Mohan <puranjay12@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml
+F: drivers/iio/temperature/tmp117.c
+
THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -18389,7 +18534,7 @@ TI DAVINCI SERIES GPIO DRIVER
M: Keerthy <j-keerthy@ti.com>
L: linux-gpio@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/gpio/gpio-davinci.txt
+F: Documentation/devicetree/bindings/gpio/gpio-davinci.yaml
F: drivers/gpio/gpio-davinci.c
TI DAVINCI SERIES MEDIA DRIVER
@@ -18462,6 +18607,14 @@ S: Supported
F: Documentation/devicetree/bindings/net/nfc/trf7970a.txt
F: drivers/nfc/trf7970a.c
+TI TSC2046 ADC DRIVER
+M: Oleksij Rempel <o.rempel@pengutronix.de>
+R: kernel@pengutronix.de
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/adc/ti,tsc2046.yaml
+F: drivers/iio/adc/ti-tsc2046.c
+
TI TWL4030 SERIES SOC CODEC DRIVER
M: Peter Ujfalusi <peter.ujfalusi@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -18685,9 +18838,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git
F: Documentation/driver-api/serial/
F: drivers/tty/
F: drivers/tty/serial/serial_core.c
+F: include/linux/selection.h
F: include/linux/serial.h
F: include/linux/serial_core.h
-F: include/linux/tty.h
+F: include/linux/sysrq.h
+F: include/linux/tty*.h
+F: include/linux/vt.h
+F: include/linux/vt_*.h
F: include/uapi/linux/serial.h
F: include/uapi/linux/serial_core.h
F: include/uapi/linux/tty.h
@@ -18978,7 +19135,7 @@ L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/roles/intel-xhci-usb-role-switch.c
-USB IP DRIVER FOR HISILICON KIRIN
+USB IP DRIVER FOR HISILICON KIRIN 960
M: Yu Chen <chenyu56@huawei.com>
M: Binghui Wang <wangbinghui@hisilicon.com>
L: linux-usb@vger.kernel.org
@@ -18986,6 +19143,13 @@ S: Maintained
F: Documentation/devicetree/bindings/phy/hisilicon,hi3660-usb3.yaml
F: drivers/phy/hisilicon/phy-hi3660-usb3.c
+USB IP DRIVER FOR HISILICON KIRIN 970
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/phy/hisilicon,hi3670-usb3.yaml
+F: drivers/phy/hisilicon/phy-hi3670-usb3.c
+
USB ISP116X DRIVER
M: Olav Kongas <ok@artecdesign.ee>
L: linux-usb@vger.kernel.org
@@ -19662,6 +19826,14 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/ptp/ptp_vmw.c
+VMWARE VMCI DRIVER
+M: Jorgen Hansen <jhansen@vmware.com>
+M: Vishnu Dasa <vdasa@vmware.com>
+L: linux-kernel@vger.kernel.org
+L: pv-drivers@vmware.com (private)
+S: Maintained
+F: drivers/misc/vmw_vmci/
+
VMWARE VMMOUSE SUBDRIVER
M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>
@@ -19862,7 +20034,8 @@ F: Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
F: Documentation/devicetree/bindings/mfd/wlf,arizona.yaml
F: Documentation/devicetree/bindings/mfd/wm831x.txt
F: Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
-F: Documentation/devicetree/bindings/sound/wlf,arizona.yaml
+F: Documentation/devicetree/bindings/sound/wlf,*.yaml
+F: Documentation/devicetree/bindings/sound/wm*
F: Documentation/hwmon/wm83??.rst
F: arch/arm/mach-s3c/mach-crag6410*
F: drivers/clk/clk-wm83*.c
@@ -20307,7 +20480,6 @@ M: Seth Jennings <sjenning@redhat.com>
M: Dan Streetman <ddstreet@ieee.org>
L: linux-mm@kvack.org
S: Maintained
-F: include/linux/zbud.h
F: mm/zbud.c
ZD1211RW WIRELESS DRIVER
diff --git a/Makefile b/Makefile
index 88888fff4c62..c19d1638da25 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
-PATCHLEVEL = 13
+PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc6
NAME = Opossums on Parade
# *DOCUMENTATION*
@@ -129,6 +129,11 @@ endif
$(if $(word 2, $(KBUILD_EXTMOD)), \
$(error building multiple external modules is not supported))
+# Remove trailing slashes
+ifneq ($(filter %/, $(KBUILD_EXTMOD)),)
+KBUILD_EXTMOD := $(shell dirname $(KBUILD_EXTMOD).)
+endif
+
export KBUILD_EXTMOD
# Kbuild will save output files in the current working directory.
@@ -541,17 +546,23 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
PHONY += scripts_basic
scripts_basic:
$(Q)$(MAKE) $(build)=scripts/basic
- $(Q)rm -f .tmp_quiet_recordmcount
PHONY += outputmakefile
+ifdef building_out_of_srctree
# Before starting out-of-tree build, make sure the source tree is clean.
# outputmakefile generates a Makefile in the output directory, if using a
# separate output directory. This allows convenient use of make in the
# output directory.
# At the same time when output Makefile generated, generate .gitignore to
# ignore whole output directory
+
+quiet_cmd_makefile = GEN Makefile
+ cmd_makefile = { \
+ echo "\# Automatically generated by $(srctree)/Makefile: don't edit"; \
+ echo "include $(srctree)/Makefile"; \
+ } > Makefile
+
outputmakefile:
-ifdef building_out_of_srctree
$(Q)if [ -f $(srctree)/.config -o \
-d $(srctree)/include/config -o \
-d $(srctree)/arch/$(SRCARCH)/include/generated ]; then \
@@ -562,7 +573,7 @@ ifdef building_out_of_srctree
false; \
fi
$(Q)ln -fsn $(srctree) source
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
+ $(call cmd,makefile)
$(Q)test -e .gitignore || \
{ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
endif
@@ -658,7 +669,7 @@ endif
ifeq ($(KBUILD_EXTMOD),)
# Objects we will link into vmlinux / subdirs we need to visit
-core-y := init/ usr/
+core-y := init/ usr/ arch/$(SRCARCH)/
drivers-y := drivers/ sound/
drivers-$(CONFIG_SAMPLES) += samples/
drivers-$(CONFIG_NET) += net/
@@ -716,11 +727,12 @@ $(KCONFIG_CONFIG):
# This exploits the 'multi-target pattern rule' trick.
# The syncconfig should be executed only once to make all the targets.
# (Note: use the grouped target '&:' when we bump to GNU Make 4.3)
-quiet_cmd_syncconfig = SYNC $@
- cmd_syncconfig = $(MAKE) -f $(srctree)/Makefile syncconfig
-
+#
+# Do not use $(call cmd,...) here. That would suppress prompts from syncconfig,
+# so you cannot notice that Kconfig is waiting for the user input.
%/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h: $(KCONFIG_CONFIG)
- +$(call cmd,syncconfig)
+ $(Q)$(kecho) " SYNC $@"
+ $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
else # !may-sync-config
# External modules and some install targets need include/generated/autoconf.h
# and include/config/auto.conf but do not care if they are up-to-date.
@@ -790,7 +802,7 @@ else
# Warn about unmarked fall-throughs in switch statement.
# Disabled for clang while comment to attribute conversion happens and
# https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
-KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
+KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough=5,)
endif
# These warnings generated too much noise in a regular build.
@@ -961,8 +973,8 @@ KBUILD_CFLAGS += $(CC_FLAGS_CFI)
export CC_FLAGS_CFI
endif
-ifdef CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B
-KBUILD_CFLAGS += -falign-functions=32
+ifdef CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B
+KBUILD_CFLAGS += -falign-functions=64
endif
# arch Makefile may override CC so keep this after arch Makefile is included
@@ -1089,41 +1101,6 @@ export INSTALL_DTBS_PATH ?= $(INSTALL_PATH)/dtbs/$(KERNELRELEASE)
MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE)
export MODLIB
-HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
-
-has_libelf = $(call try-run,\
- echo "int main() {}" | $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
-
-ifdef CONFIG_STACK_VALIDATION
- ifeq ($(has_libelf),1)
- objtool_target := tools/objtool FORCE
- else
- SKIP_STACK_VALIDATION := 1
- export SKIP_STACK_VALIDATION
- endif
-endif
-
-PHONY += resolve_btfids_clean
-
-resolve_btfids_O = $(abspath $(objtree))/tools/bpf/resolve_btfids
-
-# tools/bpf/resolve_btfids directory might not exist
-# in output directory, skip its clean in that case
-resolve_btfids_clean:
-ifneq ($(wildcard $(resolve_btfids_O)),)
- $(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
-endif
-
-ifdef CONFIG_BPF
-ifdef CONFIG_DEBUG_INFO_BTF
- ifeq ($(has_libelf),1)
- resolve_btfids_target := tools/bpf/resolve_btfids FORCE
- else
- ERROR_RESOLVE_BTFIDS := 1
- endif
-endif # CONFIG_DEBUG_INFO_BTF
-endif # CONFIG_BPF
-
PHONY += prepare0
export extmod_prefix = $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)
@@ -1235,7 +1212,7 @@ prepare0: archprepare
$(Q)$(MAKE) $(build)=.
# All the preparing..
-prepare: prepare0 prepare-objtool prepare-resolve_btfids
+prepare: prepare0
PHONY += remove-stale-files
remove-stale-files:
@@ -1252,26 +1229,6 @@ uapi-asm-generic:
$(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/uapi/asm \
generic=include/uapi/asm-generic
-PHONY += prepare-objtool prepare-resolve_btfids
-prepare-objtool: $(objtool_target)
-ifeq ($(SKIP_STACK_VALIDATION),1)
-ifdef CONFIG_FTRACE_MCOUNT_USE_OBJTOOL
- @echo "error: Cannot generate __mcount_loc for CONFIG_DYNAMIC_FTRACE=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
- @false
-endif
-ifdef CONFIG_UNWINDER_ORC
- @echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
- @false
-else
- @echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
-endif
-endif
-
-prepare-resolve_btfids: $(resolve_btfids_target)
-ifeq ($(ERROR_RESOLVE_BTFIDS),1)
- @echo "error: Cannot resolve BTF IDs for CONFIG_DEBUG_INFO_BTF, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
- @false
-endif
# Generate some files
# ---------------------------------------------------------------------------
@@ -1360,6 +1317,53 @@ scripts_unifdef: scripts_basic
$(Q)$(MAKE) $(build)=scripts scripts/unifdef
# ---------------------------------------------------------------------------
+# Install
+
+# Many distributions have the custom install script, /sbin/installkernel.
+# If DKMS is installed, 'make install' will eventually recuses back
+# to the this Makefile to build and install external modules.
+# Cancel sub_make_done so that options such as M=, V=, etc. are parsed.
+
+install: sub_make_done :=
+
+# ---------------------------------------------------------------------------
+# Tools
+
+ifdef CONFIG_STACK_VALIDATION
+prepare: tools/objtool
+endif
+
+ifdef CONFIG_BPF
+ifdef CONFIG_DEBUG_INFO_BTF
+prepare: tools/bpf/resolve_btfids
+endif
+endif
+
+PHONY += resolve_btfids_clean
+
+resolve_btfids_O = $(abspath $(objtree))/tools/bpf/resolve_btfids
+
+# tools/bpf/resolve_btfids directory might not exist
+# in output directory, skip its clean in that case
+resolve_btfids_clean:
+ifneq ($(wildcard $(resolve_btfids_O)),)
+ $(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
+endif
+
+# Clear a bunch of variables before executing the submake
+ifeq ($(quiet),silent_)
+tools_silent=s
+endif
+
+tools/: FORCE
+ $(Q)mkdir -p $(objtree)/tools
+ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
+
+tools/%: FORCE
+ $(Q)mkdir -p $(objtree)/tools
+ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
+
+# ---------------------------------------------------------------------------
# Kernel selftest
PHONY += kselftest
@@ -1959,20 +1963,6 @@ kernelversion:
image_name:
@echo $(KBUILD_IMAGE)
-# Clear a bunch of variables before executing the submake
-
-ifeq ($(quiet),silent_)
-tools_silent=s
-endif
-
-tools/: FORCE
- $(Q)mkdir -p $(objtree)/tools
- $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
-
-tools/%: FORCE
- $(Q)mkdir -p $(objtree)/tools
- $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
-
quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files)))
cmd_rmfiles = rm -rf $(rm-files)
diff --git a/arch/alpha/Kbuild b/arch/alpha/Kbuild
new file mode 100644
index 000000000000..c2302017403a
--- /dev/null
+++ b/arch/alpha/Kbuild
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += kernel/ mm/
+obj-$(CONFIG_MATHEMU) += math-emu/
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 8954216b9956..6c50877841df 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -14,7 +14,6 @@ config ALPHA
select PCI_SYSCALL if PCI
select HAVE_AOUT
select HAVE_ASM_MODVERSIONS
- select HAVE_IDE
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select NEED_DMA_MAP_STATE
@@ -40,6 +39,7 @@ config ALPHA
select MMU_GATHER_NO_RANGE
select SET_FS
select SPARSEMEM_EXTREME if SPARSEMEM
+ select ZONE_DMA
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
@@ -65,10 +65,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
-config ZONE_DMA
- bool
- default y
-
config GENERIC_ISA_DMA
bool
default y
@@ -535,7 +531,7 @@ config SMP
will run faster if you say N here.
See also the SMP-HOWTO available at
- <http://www.tldp.org/docs.html#howto>.
+ <https://www.tldp.org/docs.html#howto>.
If you don't know what to do here, say N.
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
index c2946431d88d..52529ee42dac 100644
--- a/arch/alpha/Makefile
+++ b/arch/alpha/Makefile
@@ -38,8 +38,6 @@ KBUILD_CFLAGS += $(cflags-y) -Wa,-mev6
head-y := arch/alpha/kernel/head.o
-core-y += arch/alpha/kernel/ arch/alpha/mm/
-core-$(CONFIG_MATHEMU) += arch/alpha/math-emu/
libs-y += arch/alpha/lib/
# export what is needed by arch/alpha/boot/Makefile
diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c
index 00266e6e1b71..b4faba2432d5 100644
--- a/arch/alpha/boot/bootp.c
+++ b/arch/alpha/boot/bootp.c
@@ -23,7 +23,7 @@
#include "ksize.h"
extern unsigned long switch_to_osf_pal(unsigned long nr,
- struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
+ struct pcb_struct *pcb_va, struct pcb_struct *pcb_pa,
unsigned long *vptb);
extern void move_stack(unsigned long new_stack);
diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c
index 43af71835adf..90a2b341e9c0 100644
--- a/arch/alpha/boot/bootpz.c
+++ b/arch/alpha/boot/bootpz.c
@@ -200,7 +200,7 @@ extern char _end;
START_ADDR KSEG address of the entry point of kernel code.
ZERO_PGE KSEG address of page full of zeroes, but
- upon entry to kerne cvan be expected
+ upon entry to kernel, it can be expected
to hold the parameter list and possible
INTRD information.
diff --git a/arch/alpha/boot/misc.c b/arch/alpha/boot/misc.c
index d65192202703..325d4dd4f904 100644
--- a/arch/alpha/boot/misc.c
+++ b/arch/alpha/boot/misc.c
@@ -30,7 +30,7 @@ extern long srm_printk(const char *, ...)
__attribute__ ((format (printf, 1, 2)));
/*
- * gzip delarations
+ * gzip declarations
*/
#define OF(args) args
#define STATIC static
diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig
index dd2dd9f0861f..7f1ca30b115b 100644
--- a/arch/alpha/configs/defconfig
+++ b/arch/alpha/configs/defconfig
@@ -70,3 +70,4 @@ CONFIG_DEBUG_INFO=y
CONFIG_ALPHA_LEGACY_START_ADDRESS=y
CONFIG_MATHEMU=y
CONFIG_CRYPTO_HMAC=y
+CONFIG_DEVTMPFS=y
diff --git a/arch/alpha/include/asm/compiler.h b/arch/alpha/include/asm/compiler.h
index 5159ba259d65..ae645959018a 100644
--- a/arch/alpha/include/asm/compiler.h
+++ b/arch/alpha/include/asm/compiler.h
@@ -4,15 +4,4 @@
#include <uapi/asm/compiler.h>
-/* Some idiots over in <linux/compiler.h> thought inline should imply
- always_inline. This breaks stuff. We'll include this file whenever
- we run into such problems. */
-
-#include <linux/compiler.h>
-#undef inline
-#undef __inline__
-#undef __inline
-#undef __always_inline
-#define __always_inline inline __attribute__((always_inline))
-
#endif /* __ALPHA_COMPILER_H */
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index 9c6a24fe493d..68be7adbfe58 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -18,7 +18,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
{
pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index e1757b7cfe3d..02f0429f1068 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -46,7 +46,6 @@ struct vm_area_struct;
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/* Number of pointers that fit on a page: this will go away. */
#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
@@ -237,8 +236,10 @@ pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
#define pud_page(pud) (pfn_to_page(pud_val(pud) >> 32))
-extern inline unsigned long pud_page_vaddr(pud_t pgd)
-{ return PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
+extern inline pmd_t *pud_pgtable(pud_t pgd)
+{
+ return (pmd_t *)(PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)));
+}
extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
@@ -288,7 +289,7 @@ extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; retu
/* Find an entry in the second-level page table.. */
extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
{
- pmd_t *ret = (pmd_t *) pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
+ pmd_t *ret = pud_pgtable(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
smp_rmb(); /* see above */
return ret;
}
diff --git a/arch/alpha/include/asm/syscall.h b/arch/alpha/include/asm/syscall.h
index 11c688c1d7ec..f21babaeed85 100644
--- a/arch/alpha/include/asm/syscall.h
+++ b/arch/alpha/include/asm/syscall.h
@@ -9,4 +9,10 @@ static inline int syscall_get_arch(struct task_struct *task)
return AUDIT_ARCH_ALPHA;
}
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
#endif /* _ASM_ALPHA_SYSCALL_H */
diff --git a/arch/alpha/include/asm/unaligned.h b/arch/alpha/include/asm/unaligned.h
deleted file mode 100644
index 863c807b66f8..000000000000
--- a/arch/alpha/include/asm/unaligned.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_ALPHA_UNALIGNED_H
-#define _ASM_ALPHA_UNALIGNED_H
-
-#include <linux/unaligned/le_struct.h>
-#include <linux/unaligned/be_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-
-#endif /* _ASM_ALPHA_UNALIGNED_H */
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index a18ec7f63888..56b4ee5a6c9e 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -71,6 +71,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index d5367a1c6300..d31167e3269c 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -834,7 +834,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
return -EFAULT;
state = &current_thread_info()->ieee_state;
- /* Update softare trap enable bits. */
+ /* Update software trap enable bits. */
*state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK);
/* Update the real fpcr. */
@@ -854,7 +854,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
state = &current_thread_info()->ieee_state;
exc &= IEEE_STATUS_MASK;
- /* Update softare trap enable bits. */
+ /* Update software trap enable bits. */
swcr = (*state & IEEE_SW_MASK) | exc;
*state |= exc;
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index e7a59d927d78..efcf7321701b 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -574,7 +574,7 @@ static void alpha_pmu_start(struct perf_event *event, int flags)
* Check that CPU performance counters are supported.
* - currently support EV67 and later CPUs.
* - actually some later revisions of the EV6 have the same PMC model as the
- * EV67 but we don't do suffiently deep CPU detection to detect them.
+ * EV67 but we don't do sufficiently deep CPU detection to detect them.
* Bad luck to the very few people who might have one, I guess.
*/
static int supported_cpu(void)
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index ef0c08ed0481..a5123ea426ce 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -256,7 +256,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childstack->r26 = (unsigned long) ret_from_kernel_thread;
childstack->r9 = usp; /* function */
childstack->r10 = kthread_arg;
- childregs->hae = alpha_mv.hae_cache,
+ childregs->hae = alpha_mv.hae_cache;
childti->pcb.usp = 0;
return 0;
}
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 5f6858e9dc28..b4fbbba30aa2 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/ioport.h>
+#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
#include <linux/memblock.h>
#include <linux/pci.h>
@@ -46,7 +47,6 @@
#include <linux/log2.h>
#include <linux/export.h>
-extern struct atomic_notifier_head panic_notifier_list;
static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
static struct notifier_block alpha_panic_block = {
alpha_panic_event,
@@ -319,18 +319,19 @@ setup_memory(void *kernel_end)
i, cluster->usage, cluster->start_pfn,
cluster->start_pfn + cluster->numpages);
- /* Bit 0 is console/PALcode reserved. Bit 1 is
- non-volatile memory -- we might want to mark
- this for later. */
- if (cluster->usage & 3)
- continue;
-
end = cluster->start_pfn + cluster->numpages;
if (end > max_low_pfn)
max_low_pfn = end;
memblock_add(PFN_PHYS(cluster->start_pfn),
cluster->numpages << PAGE_SHIFT);
+
+ /* Bit 0 is console/PALcode reserved. Bit 1 is
+ non-volatile memory -- we might want to mark
+ this for later. */
+ if (cluster->usage & 3)
+ memblock_reserve(PFN_PHYS(cluster->start_pfn),
+ cluster->numpages << PAGE_SHIFT);
}
/*
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 4b2575f936d4..cb64e4797d2a 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -582,7 +582,7 @@ void
smp_send_stop(void)
{
cpumask_t to_whom;
- cpumask_copy(&to_whom, cpu_possible_mask);
+ cpumask_copy(&to_whom, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &to_whom);
#ifdef DEBUG_IPI_MSG
if (hard_smp_processor_id() != boot_cpu_id)
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c
index 438b10c44d73..fc03471a0b0f 100644
--- a/arch/alpha/kernel/srmcons.c
+++ b/arch/alpha/kernel/srmcons.c
@@ -142,19 +142,13 @@ srmcons_write(struct tty_struct *tty,
return count;
}
-static int
+static unsigned int
srmcons_write_room(struct tty_struct *tty)
{
return 512;
}
static int
-srmcons_chars_in_buffer(struct tty_struct *tty)
-{
- return 0;
-}
-
-static int
srmcons_open(struct tty_struct *tty, struct file *filp)
{
struct srmcons_private *srmconsp = &srmcons_singleton;
@@ -200,7 +194,6 @@ static const struct tty_operations srmcons_ops = {
.close = srmcons_close,
.write = srmcons_write,
.write_room = srmcons_write_room,
- .chars_in_buffer= srmcons_chars_in_buffer,
};
static int __init
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 53adf43dcd44..96fd6ff3fe81 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -212,7 +212,7 @@ nautilus_init_pci(void)
/* Use default IO. */
pci_add_resource(&bridge->windows, &ioport_resource);
- /* Irongate PCI memory aperture, calculate requred size before
+ /* Irongate PCI memory aperture, calculate required size before
setting it up. */
pci_add_resource(&bridge->windows, &irongate_mem);
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 921d4b6e4d95..5398f982bdd1 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -730,7 +730,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
long error;
/* Check the UAC bits to decide what the user wants us to do
- with the unaliged access. */
+ with the unaligned access. */
if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
if (__ratelimit(&ratelimit)) {
diff --git a/arch/alpha/math-emu/math.c b/arch/alpha/math-emu/math.c
index d568cd9a3e43..f7cef66af88d 100644
--- a/arch/alpha/math-emu/math.c
+++ b/arch/alpha/math-emu/math.c
@@ -65,7 +65,7 @@ static long (*save_emul) (unsigned long pc);
long do_alpha_fp_emul_imprecise(struct pt_regs *, unsigned long);
long do_alpha_fp_emul(unsigned long);
-int init_module(void)
+static int alpha_fp_emul_init_module(void)
{
save_emul_imprecise = alpha_fp_emul_imprecise;
save_emul = alpha_fp_emul;
@@ -73,12 +73,14 @@ int init_module(void)
alpha_fp_emul = do_alpha_fp_emul;
return 0;
}
+module_init(alpha_fp_emul_init_module);
-void cleanup_module(void)
+static void alpha_fp_emul_cleanup_module(void)
{
alpha_fp_emul_imprecise = save_emul_imprecise;
alpha_fp_emul = save_emul;
}
+module_exit(alpha_fp_emul_cleanup_module);
#undef alpha_fp_emul_imprecise
#define alpha_fp_emul_imprecise do_alpha_fp_emul_imprecise
@@ -401,3 +403,5 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
egress:
return si_code;
}
+
+EXPORT_SYMBOL(__udiv_qrnnd);
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index d8f51eb8963b..b5bf68e74732 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -409,7 +409,7 @@ choice
help
Depending on the configuration, CPU can contain DSP registers
(ACC0_GLO, ACC0_GHI, DSP_BFLY0, DSP_CTRL, DSP_FFT_CTRL).
- Bellow is options describing how to handle these registers in
+ Below are options describing how to handle these registers in
interrupt entry / exit and in context switch.
config ARC_DSP_NONE
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index e47adc97a89b..c0d87ac2e221 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -85,9 +85,6 @@ KBUILD_LDFLAGS += $(ldflags-y)
head-y := arch/arc/kernel/head.o
-# See arch/arc/Kbuild for content of core part of the kernel
-core-y += arch/arc/
-
# w/o this dtb won't embed into kernel binary
core-y += arch/arc/boot/dts/
diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h
index 69debd77cd04..0b485800a392 100644
--- a/arch/arc/include/asm/checksum.h
+++ b/arch/arc/include/asm/checksum.h
@@ -24,7 +24,7 @@
*/
static inline __sum16 csum_fold(__wsum s)
{
- unsigned r = s << 16 | s >> 16; /* ror */
+ unsigned int r = s << 16 | s >> 16; /* ror */
s = ~s;
s -= r;
return s >> 16;
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 30b9ae511ea9..e1971d34ef30 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -123,7 +123,7 @@ static const char * const arc_pmu_ev_hw_map[] = {
#define C(_x) PERF_COUNT_HW_CACHE_##_x
#define CACHE_OP_UNSUPPORTED 0xffff
-static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 6147db925248..a32ca3104ced 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -129,6 +129,4 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
-#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
-
#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 5878846f00cf..320cc0ae8a08 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -222,12 +222,6 @@
*/
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-/*
- * No special requirements for lowest virtual address we permit any user space
- * mapping to be mapped at.
- */
-#define FIRST_USER_ADDRESS 0UL
-
/****************************************************************
* Bucket load of VM Helpers
@@ -356,6 +350,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
#define kern_addr_valid(addr) (1)
+#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
+
/*
* remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
diff --git a/arch/arc/kernel/fpu.c b/arch/arc/kernel/fpu.c
index c67c0f0f5f77..ec640219d989 100644
--- a/arch/arc/kernel/fpu.c
+++ b/arch/arc/kernel/fpu.c
@@ -57,23 +57,26 @@ void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
void fpu_init_task(struct pt_regs *regs)
{
+ const unsigned int fwe = 0x80000000;
+
/* default rounding mode */
write_aux_reg(ARC_REG_FPU_CTRL, 0x100);
- /* set "Write enable" to allow explicit write to exception flags */
- write_aux_reg(ARC_REG_FPU_STATUS, 0x80000000);
+ /* Initialize to zero: setting requires FWE be set */
+ write_aux_reg(ARC_REG_FPU_STATUS, fwe);
}
void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
{
struct arc_fpu *save = &prev->thread.fpu;
struct arc_fpu *restore = &next->thread.fpu;
+ const unsigned int fwe = 0x80000000;
save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL);
save->status = read_aux_reg(ARC_REG_FPU_STATUS);
write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl);
- write_aux_reg(ARC_REG_FPU_STATUS, restore->status);
+ write_aux_reg(ARC_REG_FPU_STATUS, (fwe | restore->status));
}
#endif
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 47bab67f8649..9e28058cdba8 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -260,7 +260,7 @@ static void init_unwind_hdr(struct unwind_table *table,
{
const u8 *ptr;
unsigned long tableSize = table->size, hdrSize;
- unsigned n;
+ unsigned int n;
const u32 *fde;
struct {
u8 version;
@@ -462,7 +462,7 @@ static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
{
const u8 *cur = *pcur;
uleb128_t value;
- unsigned shift;
+ unsigned int shift;
for (shift = 0, value = 0; cur < end; shift += 7) {
if (shift + 7 > 8 * sizeof(value)
@@ -483,7 +483,7 @@ static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
{
const u8 *cur = *pcur;
sleb128_t value;
- unsigned shift;
+ unsigned int shift;
for (shift = 0, value = 0; cur < end; shift += 7) {
if (shift + 7 > 8 * sizeof(value)
@@ -609,7 +609,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
static signed fde_pointer_type(const u32 *cie)
{
const u8 *ptr = (const u8 *)(cie + 2);
- unsigned version = *ptr;
+ unsigned int version = *ptr;
if (*++ptr) {
const char *aug;
@@ -904,7 +904,7 @@ int arc_unwind(struct unwind_frame_info *frame)
const u8 *ptr = NULL, *end = NULL;
unsigned long pc = UNW_PC(frame) - frame->call_frame;
unsigned long startLoc = 0, endLoc = 0, cfa;
- unsigned i;
+ unsigned int i;
signed ptrType = -1;
uleb128_t retAddrReg = 0;
const struct unwind_table *table;
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index e2146a8da195..529ae50f9fe2 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -88,6 +88,8 @@ SECTIONS
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index abfeef7bf6f8..c083bf660cec 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -89,10 +89,7 @@ void __init setup_arch_memory(void)
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
- init_mm.start_code = (unsigned long)_text;
- init_mm.end_code = (unsigned long)_etext;
- init_mm.end_data = (unsigned long)_edata;
- init_mm.brk = (unsigned long)_end;
+ setup_initial_init_mm(_text, _etext, _edata, _end);
/* first page of system - kernel .vector starts here */
min_low_pfn = virt_to_pfn(CONFIG_LINUX_RAM_BASE);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 809317b5a6c6..2fb7012c3246 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -95,7 +95,6 @@ config ARM
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
- select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
@@ -218,9 +217,6 @@ config GENERIC_CALIBRATE_DELAY
config ARCH_MAY_HAVE_PC_FDC
bool
-config ZONE_DMA
- bool
-
config ARCH_SUPPORTS_UPROBES
def_bool y
@@ -364,7 +360,6 @@ config ARCH_FOOTBRIDGE
bool "FootBridge"
select CPU_SA110
select FOOTBRIDGE
- select HAVE_IDE
select NEED_MACH_IO_H if !MMU
select NEED_MACH_MEMORY_H
help
@@ -397,7 +392,8 @@ config ARCH_IXP4XX
select HAVE_PCI
select IXP4XX_IRQ
select IXP4XX_TIMER
- select NEED_MACH_IO_H
+ # With the new PCI driver this is not needed
+ select NEED_MACH_IO_H if IXP4XX_PCI_LEGACY
select USB_EHCI_BIG_ENDIAN_DESC
select USB_EHCI_BIG_ENDIAN_MMIO
help
@@ -432,7 +428,6 @@ config ARCH_PXA
select GENERIC_IRQ_MULTI_HANDLER
select GPIO_PXA
select GPIOLIB
- select HAVE_IDE
select IRQ_DOMAIN
select PLAT_PXA
select SPARSE_IRQ
@@ -448,7 +443,6 @@ config ARCH_RPC
select ARM_HAS_SG_CHAIN
select CPU_SA110
select FIQ
- select HAVE_IDE
select HAVE_PATA_PLATFORM
select ISA_DMA_API
select LEGACY_TIMER_TICK
@@ -471,7 +465,6 @@ config ARCH_SA1100
select CPU_SA1100
select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
- select HAVE_IDE
select IRQ_DOMAIN
select ISA
select NEED_MACH_MEMORY_H
@@ -507,7 +500,6 @@ config ARCH_OMAP1
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
- select HAVE_IDE
select HAVE_LEGACY_CLK
select IRQ_DOMAIN
select NEED_MACH_IO_H if PCCARD
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 36016497b1b3..1c4384db223d 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -66,6 +66,8 @@ config UNWINDER_FRAME_POINTER
config UNWINDER_ARM
bool "ARM EABI stack unwinder"
depends on AEABI && !FUNCTION_GRAPH_TRACER
+ # https://github.com/ClangBuiltLinux/linux/issues/732
+ depends on !LD_IS_LLD || LLD_VERSION >= 110000
select ARM_UNWIND
help
This option enables stack unwinding support in the kernel
@@ -607,6 +609,14 @@ choice
when u-boot hands over to the kernel, the system
silently crashes, with no serial output at all.
+ config DEBUG_MSTARV7_PMUART
+ bool "Kernel low-level debugging messages via MSTARV7 PM UART"
+ depends on ARCH_MSTARV7
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ for MSTAR ARMv7-based platforms on PM UART.
+
config DEBUG_MT6589_UART0
bool "Mediatek mt6589 UART0"
depends on ARCH_MEDIATEK
@@ -1605,6 +1615,7 @@ config DEBUG_UART_PHYS
default 0x18000400 if DEBUG_BCM_HR2
default 0x18023000 if DEBUG_BCM_IPROC_UART3
default 0x1c090000 if DEBUG_VEXPRESS_UART0_RS1
+ default 0x1f221000 if DEBUG_MSTARV7_PMUART
default 0x20001000 if DEBUG_HIP01_UART
default 0x20060000 if DEBUG_RK29_UART0
default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
@@ -1722,6 +1733,7 @@ config DEBUG_UART_VIRT
default 0xf0100000 if DEBUG_DIGICOLOR_UA0
default 0xf01fb000 if DEBUG_NOMADIK_UART
default 0xf0201000 if DEBUG_BCM2835 || DEBUG_BCM2836
+ default 0xf0221000 if DEBUG_MSTARV7_PMUART
default 0xf1000300 if DEBUG_BCM_5301X
default 0xf1000400 if DEBUG_BCM_HR2
default 0xf1002000 if DEBUG_MT8127_UART0
@@ -1803,8 +1815,8 @@ config DEBUG_UART_VIRT
default 0xfedc0000 if DEBUG_EP93XX
default 0xfee003f8 if DEBUG_FOOTBRIDGE_COM1
default 0xfee20000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
- default 0xfef00000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
- default 0xfef00003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
+ default 0xfec00000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
+ default 0xfec00003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
default 0xfef36000 if DEBUG_HIGHBANK_UART
default 0xfefb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
default 0xfefb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
@@ -1827,6 +1839,7 @@ config DEBUG_UART_8250_SHIFT
default 0 if DEBUG_FOOTBRIDGE_COM1 || ARCH_IOP32X || DEBUG_BCM_5301X || \
DEBUG_BCM_HR2 || DEBUG_OMAP7XXUART1 || DEBUG_OMAP7XXUART2 || \
DEBUG_OMAP7XXUART3
+ default 3 if DEBUG_MSTARV7_PMUART
default 2
config DEBUG_UART_8250_WORD
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 415c3514573a..173da685a52e 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -252,7 +252,6 @@ endif
export TEXT_OFFSET GZFLAGS MMUEXT
-core-y += arch/arm/
# If we have a machine-specific directory, then include it in the build.
core-y += $(machdirs) $(platdirs)
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 8eb70c1febce..9d91ae1091b0 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -100,7 +100,7 @@ targets := vmlinux vmlinux.lds piggy_data piggy.o \
lib1funcs.o ashldi3.o bswapsdi2.o \
head.o $(OBJS)
-clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S hyp-stub.S
+clean-files += lib1funcs.S ashldi3.S bswapsdi2.S hyp-stub.S
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index 31927d2fe297..1feb6b0f7a1f 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -121,7 +121,7 @@ static void hex_str(char *out, uint32_t value)
/*
* Convert and fold provided ATAGs into the provided FDT.
*
- * REturn values:
+ * Return values:
* = 0 -> pretend success
* = 1 -> bad ATAG (may retry with another possible ATAG pointer)
* < 0 -> error from libfdt
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index f8f09c5066e7..863347b6b65e 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -88,6 +88,7 @@ dtb-$(CONFIG_ARCH_BCM2835) += \
bcm2837-rpi-3-b.dtb \
bcm2837-rpi-3-b-plus.dtb \
bcm2837-rpi-cm3-io3.dtb \
+ bcm2711-rpi-400.dtb \
bcm2711-rpi-4-b.dtb \
bcm2835-rpi-zero.dtb \
bcm2835-rpi-zero-w.dtb
@@ -240,6 +241,7 @@ dtb-$(CONFIG_ARCH_INTEGRATOR) += \
integratorcp.dtb
dtb-$(CONFIG_ARCH_IXP4XX) += \
intel-ixp42x-linksys-nslu2.dtb \
+ intel-ixp42x-welltech-epbx100.dtb \
intel-ixp43x-gateworks-gw2358.dtb
dtb-$(CONFIG_ARCH_KEYSTONE) += \
keystone-k2hk-evm.dtb \
@@ -513,8 +515,14 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6q-display5-tianma-tm070-1280x768.dtb \
imx6q-dmo-edmqmx6.dtb \
imx6q-dms-ba16.dtb \
+ imx6q-ds.dtb \
imx6q-emcon-avari.dtb \
imx6q-evi.dtb \
+ imx6dl-b105pv2.dtb \
+ imx6dl-b105v2.dtb \
+ imx6dl-b125v2.dtb \
+ imx6dl-b125pv2.dtb \
+ imx6dl-b155v2.dtb \
imx6q-gk802.dtb \
imx6q-gw51xx.dtb \
imx6q-gw52xx.dtb \
@@ -725,7 +733,8 @@ dtb-$(CONFIG_ARCH_MXS) += \
imx28-m28evk.dtb \
imx28-sps1.dtb \
imx28-ts4600.dtb \
- imx28-tx28.dtb
+ imx28-tx28.dtb \
+ imx28-xea.dtb
dtb-$(CONFIG_ARCH_NOMADIK) += \
ste-nomadik-s8815.dtb \
ste-nomadik-nhk15.dtb
@@ -1234,6 +1243,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-r16-nintendo-super-nes-classic.dtb \
sun8i-r16-parrot.dtb \
sun8i-r40-bananapi-m2-ultra.dtb \
+ sun8i-r40-oka40i-c.dtb \
sun8i-s3-elimo-initium.dtb \
sun8i-s3-lichee-zero-plus.dtb \
sun8i-s3-pinecube.dtb \
@@ -1415,6 +1425,7 @@ dtb-$(CONFIG_ARCH_MSTARV7) += \
mstar-mercury5-ssc8336n-midrived08.dtb
dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-ast2500-evb.dtb \
+ aspeed-ast2600-evb-a1.dtb \
aspeed-ast2600-evb.dtb \
aspeed-bmc-amd-ethanolx.dtb \
aspeed-bmc-ampere-mtjade.dtb \
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi
index 3ea286180382..1103a2cb836f 100644
--- a/arch/arm/boot/dts/am335x-baltos.dtsi
+++ b/arch/arm/boot/dts/am335x-baltos.dtsi
@@ -393,10 +393,10 @@
status = "okay";
};
-&gpio0 {
+&gpio0_target {
ti,no-reset-on-init;
};
-&gpio3 {
+&gpio3_target {
ti,no-reset-on-init;
};
diff --git a/arch/arm/boot/dts/am335x-boneblack-wireless.dts b/arch/arm/boot/dts/am335x-boneblack-wireless.dts
index 86cad9912906..80116646a3fe 100644
--- a/arch/arm/boot/dts/am335x-boneblack-wireless.dts
+++ b/arch/arm/boot/dts/am335x-boneblack-wireless.dts
@@ -101,7 +101,7 @@
};
&gpio3 {
- ls_buf_en {
+ ls-buf-en-hog {
gpio-hog;
gpios = <10 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm/boot/dts/am335x-boneblue.dts b/arch/arm/boot/dts/am335x-boneblue.dts
index 69acaf4ea0f3..0afcc2ee0b63 100644
--- a/arch/arm/boot/dts/am335x-boneblue.dts
+++ b/arch/arm/boot/dts/am335x-boneblue.dts
@@ -436,7 +436,7 @@
};
&gpio3 {
- ls_buf_en {
+ ls-buf-en-hog {
gpio-hog;
gpios = <10 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm/boot/dts/am335x-bonegreen-wireless.dts b/arch/arm/boot/dts/am335x-bonegreen-wireless.dts
index 7615327d906a..74db0fc39397 100644
--- a/arch/arm/boot/dts/am335x-bonegreen-wireless.dts
+++ b/arch/arm/boot/dts/am335x-bonegreen-wireless.dts
@@ -101,7 +101,7 @@
};
&gpio1 {
- ls_buf_en {
+ ls-buf-en-hog {
gpio-hog;
gpios = <29 GPIO_ACTIVE_HIGH>;
output-high;
@@ -118,7 +118,7 @@
/* an external pulldown on U21 pin 4. */
&gpio3 {
- bt_aud_in {
+ bt-aud-in-hog {
gpio-hog;
gpios = <16 GPIO_ACTIVE_HIGH>;
output-low;
diff --git a/arch/arm/boot/dts/am335x-cm-t335.dts b/arch/arm/boot/dts/am335x-cm-t335.dts
index 36d963db4026..688e14e82eba 100644
--- a/arch/arm/boot/dts/am335x-cm-t335.dts
+++ b/arch/arm/boot/dts/am335x-cm-t335.dts
@@ -333,7 +333,7 @@ status = "okay";
&epwmss0 {
status = "okay";
- ecap0: ecap@100 {
+ ecap0: pwm@100 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&ecap0_pins>;
@@ -496,7 +496,7 @@ status = "okay";
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&spi0_pins>;
- ti,pindir-d0-out-d1-in = <1>;
+ ti,pindir-d0-out-d1-in;
/* WLS1271 WiFi */
wlcore: wlcore@1 {
compatible = "ti,wl1271";
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 902e295b309e..9cf39c93defb 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -495,7 +495,7 @@
&epwmss0 {
status = "okay";
- ecap0: ecap@100 {
+ ecap0: pwm@100 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&ecap0_pins>;
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index d5f8d5e2eb5d..001657be0381 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -510,7 +510,7 @@
&epwmss2 {
status = "okay";
- ecap2: ecap@100 {
+ ecap2: pwm@100 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&ecap2_pins>;
@@ -646,7 +646,7 @@
status = "okay";
};
-&gpio0 {
+&gpio0_target {
ti,no-reset-on-init;
};
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index e923d065304d..5e598ac96dcc 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -458,14 +458,14 @@
};
&gpio3 {
- p4 {
+ pr1-mii-ctl-hog {
gpio-hog;
gpios = <4 GPIO_ACTIVE_HIGH>;
output-high;
line-name = "PR1_MII_CTRL";
};
- p10 {
+ mux-mii-hog {
gpio-hog;
gpios = <10 GPIO_ACTIVE_HIGH>;
/* ETH1 mux: Low for MII-PRU, high for RMII-CPSW */
diff --git a/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi b/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi
index 4e90f9c23d2e..8121a199607c 100644
--- a/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi
+++ b/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi
@@ -150,7 +150,7 @@
status = "okay";
};
-&gpio0 {
+&gpio0_target {
ti,no-reset-on-init;
};
diff --git a/arch/arm/boot/dts/am335x-moxa-uc-8100-common.dtsi b/arch/arm/boot/dts/am335x-moxa-uc-8100-common.dtsi
index 98d8ed4ad967..39e5d2ce600a 100644
--- a/arch/arm/boot/dts/am335x-moxa-uc-8100-common.dtsi
+++ b/arch/arm/boot/dts/am335x-moxa-uc-8100-common.dtsi
@@ -353,7 +353,7 @@
status = "okay";
};
-&gpio0 {
+&gpio0_target {
ti,no-reset-on-init;
};
diff --git a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
index f841afb27844..5403e47c07e2 100644
--- a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
+++ b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
@@ -25,10 +25,6 @@
regulator-always-on;
};
-&mmc1 {
- vmmc-supply = <&vmmcsd_fixed>;
-};
-
&mmc2 {
vmmc-supply = <&vmmcsd_fixed>;
pinctrl-names = "default";
@@ -37,68 +33,6 @@
status = "okay";
};
-&am33xx_pinmux {
- nxp_hdmi_bonelt_pins: nxp-hdmi-bonelt-pins {
- pinctrl-single,pins = <
- AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLDOWN, MUX_MODE3)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
- >;
- };
-
- nxp_hdmi_bonelt_off_pins: nxp-hdmi-bonelt-off-pins {
- pinctrl-single,pins = <
- AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLDOWN, MUX_MODE3)
- >;
- };
-
- mcasp0_pins: mcasp0-pins {
- pinctrl-single,pins = <
- AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLUP, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKR, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mcasp0_ahclkr.mcasp0_axr2*/
- AM33XX_PADCONF(AM335X_PIN_MCASP0_FSX, PIN_OUTPUT_PULLUP, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a11.GPIO1_27 */
- >;
- };
-
- flash_enable: flash-enable {
- pinctrl-single,pins = <
- AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* rmii1_ref_clk.gpio0_29 */
- >;
- };
-
- imu_interrupt: imu-interrupt {
- pinctrl-single,pins = <
- AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7) /* mii1_rx_er.gpio3_2 */
- >;
- };
-
- ethernet_interrupt: ethernet-interrupt{
- pinctrl-single,pins = <
- AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLDOWN, MUX_MODE7) /* mii1_col.gpio3_0 */
- >;
- };
-};
-
&lcdc {
status = "okay";
@@ -167,10 +101,6 @@
};
};
-&rtc {
- system-power-controller;
-};
-
&mcasp0 {
#sound-dai-cells = <0>;
pinctrl-names = "default";
@@ -267,6 +197,66 @@
pinctrl-names = "default";
pinctrl-0 = <&clkout2_pin>;
+ nxp_hdmi_bonelt_pins: nxp-hdmi-bonelt-pins {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLDOWN, MUX_MODE3)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ >;
+ };
+
+ nxp_hdmi_bonelt_off_pins: nxp-hdmi-bonelt-off-pins {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLDOWN, MUX_MODE3)
+ >;
+ };
+
+ mcasp0_pins: mcasp0-pins {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKR, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mcasp0_ahclkr.mcasp0_axr2*/
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_FSX, PIN_OUTPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a11.GPIO1_27 */
+ >;
+ };
+
+ flash_enable: flash-enable {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* rmii1_ref_clk.gpio0_29 */
+ >;
+ };
+
+ imu_interrupt: imu-interrupt {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7) /* mii1_rx_er.gpio3_2 */
+ >;
+ };
+
+ ethernet_interrupt: ethernet-interrupt{
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLDOWN, MUX_MODE7) /* mii1_col.gpio3_0 */
+ >;
+ };
+
user_leds_s0: user-leds-s0 {
pinctrl-single,pins = <
AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a5.gpio1_21 */
@@ -427,6 +417,7 @@
&mmc1 {
status = "okay";
+ vmmc-supply = <&vmmcsd_fixed>;
bus-width = <0x4>;
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
@@ -434,6 +425,7 @@
};
&rtc {
+ system-power-controller;
clocks = <&clk_32768_ck>, <&clk_24mhz_clkctrl AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL 0>;
clock-names = "ext-clk", "int-clk";
};
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
index 1eaa26533466..2bfe60d32783 100644
--- a/arch/arm/boot/dts/am335x-shc.dts
+++ b/arch/arm/boot/dts/am335x-shc.dts
@@ -140,14 +140,14 @@
};
&gpio1 {
- hmtc_rst {
+ hmtc-rst-hog {
gpio-hog;
gpios = <24 GPIO_ACTIVE_LOW>;
output-high;
line-name = "homematic_reset";
};
- hmtc_prog {
+ hmtc-prog-hog {
gpio-hog;
gpios = <27 GPIO_ACTIVE_LOW>;
output-high;
@@ -156,14 +156,14 @@
};
&gpio3 {
- zgb_rst {
+ zgb-rst-hog {
gpio-hog;
gpios = <18 GPIO_ACTIVE_LOW>;
output-low;
line-name = "zigbee_reset";
};
- zgb_boot {
+ zgb-boot-hog {
gpio-hog;
gpios = <19 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index 039a9ab4c7ea..859e760df4c8 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -1486,7 +1486,7 @@
#mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <8>;
- mbox_wkupm3: wkup_m3 {
+ mbox_wkupm3: mbox-wkup-m3 {
ti,mbox-send-noirq;
ti,mbox-tx = <0 0 0>;
ti,mbox-rx = <0 0 3>;
@@ -1789,7 +1789,7 @@
};
};
- target-module@ae000 { /* 0x481ae000, ap 56 3a.0 */
+ gpio3_target: target-module@ae000 { /* 0x481ae000, ap 56 3a.0 */
compatible = "ti,sysc-omap2", "ti,sysc";
reg = <0xae000 0x4>,
<0xae010 0x4>,
@@ -1995,15 +1995,12 @@
status = "disabled";
ranges = <0 0 0x1000>;
- ecap0: ecap@100 {
- compatible = "ti,am3352-ecap",
- "ti,am33xx-ecap";
+ ecap0: pwm@100 {
+ compatible = "ti,am3352-ecap";
#pwm-cells = <3>;
reg = <0x100 0x80>;
clocks = <&l4ls_gclk>;
clock-names = "fck";
- interrupts = <31>;
- interrupt-names = "ecap0";
status = "disabled";
};
@@ -2017,8 +2014,7 @@
};
ehrpwm0: pwm@200 {
- compatible = "ti,am3352-ehrpwm",
- "ti,am33xx-ehrpwm";
+ compatible = "ti,am3352-ehrpwm";
#pwm-cells = <3>;
reg = <0x200 0x80>;
clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>;
@@ -2056,15 +2052,12 @@
status = "disabled";
ranges = <0 0 0x1000>;
- ecap1: ecap@100 {
- compatible = "ti,am3352-ecap",
- "ti,am33xx-ecap";
+ ecap1: pwm@100 {
+ compatible = "ti,am3352-ecap";
#pwm-cells = <3>;
reg = <0x100 0x80>;
clocks = <&l4ls_gclk>;
clock-names = "fck";
- interrupts = <47>;
- interrupt-names = "ecap1";
status = "disabled";
};
@@ -2078,8 +2071,7 @@
};
ehrpwm1: pwm@200 {
- compatible = "ti,am3352-ehrpwm",
- "ti,am33xx-ehrpwm";
+ compatible = "ti,am3352-ehrpwm";
#pwm-cells = <3>;
reg = <0x200 0x80>;
clocks = <&ehrpwm1_tbclk>, <&l4ls_gclk>;
@@ -2117,15 +2109,12 @@
status = "disabled";
ranges = <0 0 0x1000>;
- ecap2: ecap@100 {
- compatible = "ti,am3352-ecap",
- "ti,am33xx-ecap";
+ ecap2: pwm@100 {
+ compatible = "ti,am3352-ecap";
#pwm-cells = <3>;
reg = <0x100 0x80>;
clocks = <&l4ls_gclk>;
clock-names = "fck";
- interrupts = <61>;
- interrupt-names = "ecap2";
status = "disabled";
};
@@ -2139,8 +2128,7 @@
};
ehrpwm2: pwm@200 {
- compatible = "ti,am3352-ehrpwm",
- "ti,am33xx-ehrpwm";
+ compatible = "ti,am3352-ehrpwm";
#pwm-cells = <3>;
reg = <0x200 0x80>;
clocks = <&ehrpwm2_tbclk>, <&l4ls_gclk>;
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index 6e4d05d649e9..e2677682b540 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -786,7 +786,7 @@
pinctrl-0 = <&gpio0_pins>;
status = "okay";
- p23 {
+ sel-emmc-nand-hog {
gpio-hog;
gpios = <23 GPIO_ACTIVE_HIGH>;
/* SelEMMCorNAND selects between eMMC and NAND:
@@ -813,13 +813,16 @@
status = "okay";
};
+&gpio5_target {
+ ti,no-reset-on-init;
+};
+
&gpio5 {
pinctrl-names = "default";
pinctrl-0 = <&display_mux_pins>;
status = "okay";
- ti,no-reset-on-init;
- p8 {
+ sel-lcd-hdmi-hog {
/*
* SelLCDorHDMI selects between display and audio paths:
* Low: HDMI display with audio via HDMI
diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
index e217ffc09770..ba58e6b0da1d 100644
--- a/arch/arm/boot/dts/am437x-l4.dtsi
+++ b/arch/arm/boot/dts/am437x-l4.dtsi
@@ -194,7 +194,7 @@
ranges = <0x0 0x9000 0x1000>;
uart0: serial@0 {
- compatible = "ti,am4372-uart","ti,omap2-uart";
+ compatible = "ti,am4372-uart";
reg = <0x0 0x2000>;
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -712,7 +712,7 @@
ranges = <0x0 0x22000 0x1000>;
uart1: serial@0 {
- compatible = "ti,am4372-uart","ti,omap2-uart";
+ compatible = "ti,am4372-uart";
reg = <0x0 0x2000>;
interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -740,7 +740,7 @@
ranges = <0x0 0x24000 0x1000>;
uart2: serial@0 {
- compatible = "ti,am4372-uart","ti,omap2-uart";
+ compatible = "ti,am4372-uart";
reg = <0x0 0x2000>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -1168,7 +1168,7 @@
#mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <8>;
- mbox_wkupm3: wkup_m3 {
+ mbox_wkupm3: mbox-wkup-m3 {
ti,mbox-send-noirq;
ti,mbox-tx = <0 0 0>;
ti,mbox-rx = <0 0 3>;
@@ -1399,7 +1399,7 @@
ranges = <0x0 0xa6000 0x1000>;
uart3: serial@0 {
- compatible = "ti,am4372-uart","ti,omap2-uart";
+ compatible = "ti,am4372-uart";
reg = <0x0 0x2000>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -1427,7 +1427,7 @@
ranges = <0x0 0xa8000 0x1000>;
uart4: serial@0 {
- compatible = "ti,am4372-uart","ti,omap2-uart";
+ compatible = "ti,am4372-uart";
reg = <0x0 0x2000>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -1455,7 +1455,7 @@
ranges = <0x0 0xaa000 0x1000>;
uart5: serial@0 {
- compatible = "ti,am4372-uart","ti,omap2-uart";
+ compatible = "ti,am4372-uart";
reg = <0x0 0x2000>;
interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -1595,7 +1595,7 @@
compatible = "ti,am4372-d_can", "ti,am3352-d_can";
reg = <0x0 0x2000>;
clocks = <&dcan1_fck>;
- clock-name = "fck";
+ clock-names = "fck";
syscon-raminit = <&scm_conf 0x644 1>;
interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -1747,10 +1747,9 @@
ranges = <0 0 0x1000>;
status = "disabled";
- ecap0: ecap@100 {
+ ecap0: pwm@100 {
compatible = "ti,am4372-ecap",
- "ti,am3352-ecap",
- "ti,am33xx-ecap";
+ "ti,am3352-ecap";
#pwm-cells = <3>;
reg = <0x100 0x80>;
clocks = <&l4ls_gclk>;
@@ -1760,8 +1759,7 @@
ehrpwm0: pwm@200 {
compatible = "ti,am4372-ehrpwm",
- "ti,am3352-ehrpwm",
- "ti,am33xx-ehrpwm";
+ "ti,am3352-ehrpwm";
#pwm-cells = <3>;
reg = <0x200 0x80>;
clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>;
@@ -1799,10 +1797,9 @@
ranges = <0 0 0x1000>;
status = "disabled";
- ecap1: ecap@100 {
+ ecap1: pwm@100 {
compatible = "ti,am4372-ecap",
- "ti,am3352-ecap",
- "ti,am33xx-ecap";
+ "ti,am3352-ecap";
#pwm-cells = <3>;
reg = <0x100 0x80>;
clocks = <&l4ls_gclk>;
@@ -1812,8 +1809,7 @@
ehrpwm1: pwm@200 {
compatible = "ti,am4372-ehrpwm",
- "ti,am3352-ehrpwm",
- "ti,am33xx-ehrpwm";
+ "ti,am3352-ehrpwm";
#pwm-cells = <3>;
reg = <0x200 0x80>;
clocks = <&ehrpwm1_tbclk>, <&l4ls_gclk>;
@@ -1851,10 +1847,9 @@
ranges = <0 0 0x1000>;
status = "disabled";
- ecap2: ecap@100 {
+ ecap2: pwm@100 {
compatible = "ti,am4372-ecap",
- "ti,am3352-ecap",
- "ti,am33xx-ecap";
+ "ti,am3352-ecap";
#pwm-cells = <3>;
reg = <0x100 0x80>;
clocks = <&l4ls_gclk>;
@@ -1864,8 +1859,7 @@
ehrpwm2: pwm@200 {
compatible = "ti,am4372-ehrpwm",
- "ti,am3352-ehrpwm",
- "ti,am33xx-ehrpwm";
+ "ti,am3352-ehrpwm";
#pwm-cells = <3>;
reg = <0x200 0x80>;
clocks = <&ehrpwm2_tbclk>, <&l4ls_gclk>;
@@ -1905,8 +1899,7 @@
ehrpwm3: pwm@200 {
compatible = "ti,am4372-ehrpwm",
- "ti,am3352-ehrpwm",
- "ti,am33xx-ehrpwm";
+ "ti,am3352-ehrpwm";
#pwm-cells = <3>;
reg = <0x200 0x80>;
clocks = <&ehrpwm3_tbclk>, <&l4ls_gclk>;
@@ -1946,8 +1939,7 @@
ehrpwm4: pwm@48308200 {
compatible = "ti,am4372-ehrpwm",
- "ti,am3352-ehrpwm",
- "ti,am33xx-ehrpwm";
+ "ti,am3352-ehrpwm";
#pwm-cells = <3>;
reg = <0x200 0x80>;
clocks = <&ehrpwm4_tbclk>, <&l4ls_gclk>;
@@ -1987,8 +1979,7 @@
ehrpwm5: pwm@200 {
compatible = "ti,am4372-ehrpwm",
- "ti,am3352-ehrpwm",
- "ti,am33xx-ehrpwm";
+ "ti,am3352-ehrpwm";
#pwm-cells = <3>;
reg = <0x200 0x80>;
clocks = <&ehrpwm5_tbclk>, <&l4ls_gclk>;
@@ -2070,7 +2061,7 @@
};
};
- target-module@22000 { /* 0x48322000, ap 116 64.0 */
+ gpio5_target: target-module@22000 { /* 0x48322000, ap 116 64.0 */
compatible = "ti,sysc-omap2", "ti,sysc";
reg = <0x22000 0x4>,
<0x22010 0x4>,
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index f517d1e843cf..2aa75abf85a9 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -582,7 +582,7 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
- clock-frequency = <400000>;
+ clock-frequency = <100000>;
tps65218: tps65218@24 {
reg = <0x24>;
@@ -725,7 +725,7 @@
pinctrl-0 = <&display_mux_pins>;
status = "okay";
- p1 {
+ sel-lcd-hdmi-hog {
/*
* SelLCDorHDMI selects between display and audio paths:
* Low: HDMI display with audio via HDMI
@@ -860,7 +860,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&spi0_pins_default>;
pinctrl-1 = <&spi0_pins_sleep>;
- ti,pindir-d0-out-d1-in = <1>;
+ ti,pindir-d0-out-d1-in;
};
&spi1 {
@@ -868,7 +868,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&spi1_pins_default>;
pinctrl-1 = <&spi1_pins_sleep>;
- ti,pindir-d0-out-d1-in = <1>;
+ ti,pindir-d0-out-d1-in;
};
&usb2_phy1 {
diff --git a/arch/arm/boot/dts/am5718.dtsi b/arch/arm/boot/dts/am5718.dtsi
index ebf4d3cc1cfb..6d7530a48c73 100644
--- a/arch/arm/boot/dts/am5718.dtsi
+++ b/arch/arm/boot/dts/am5718.dtsi
@@ -17,17 +17,13 @@
* VCP1, VCP2
* MLB
* ISS
- * USB3, USB4
+ * USB3
*/
&usb3_tm {
status = "disabled";
};
-&usb4_tm {
- status = "disabled";
-};
-
&atl_tm {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
index 0d5fe2bfb683..aed81568a297 100644
--- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
+++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
@@ -454,20 +454,20 @@
&mailbox5 {
status = "okay";
- mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
+ mbox_ipu1_ipc3x: mbox-ipu1-ipc3x {
status = "okay";
};
- mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
+ mbox_dsp1_ipc3x: mbox-dsp1-ipc3x {
status = "okay";
};
};
&mailbox6 {
status = "okay";
- mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
+ mbox_ipu2_ipc3x: mbox-ipu2-ipc3x {
status = "okay";
};
- mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
+ mbox_dsp2_ipc3x: mbox-dsp2-ipc3x {
status = "okay";
};
};
@@ -610,12 +610,11 @@
>;
};
-&gpio3 {
- status = "okay";
+&gpio3_target {
ti,no-reset-on-init;
};
-&gpio2 {
+&gpio2_target {
status = "okay";
ti,no-reset-on-init;
};
diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi
index a534a8e444d9..04e8a27ba1eb 100644
--- a/arch/arm/boot/dts/arm-realview-eb.dtsi
+++ b/arch/arm/boot/dts/arm-realview-eb.dtsi
@@ -148,7 +148,7 @@
usb: usb@4f000000 {
compatible = "nxp,usb-isp1761";
reg = <0x4f000000 0x20000>;
- port1-otg;
+ dr_mode = "peripheral";
};
bridge {
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts
index f925782f8560..366687fb1ee3 100644
--- a/arch/arm/boot/dts/arm-realview-pb1176.dts
+++ b/arch/arm/boot/dts/arm-realview-pb1176.dts
@@ -166,7 +166,7 @@
reg = <0x3b000000 0x20000>;
interrupt-parent = <&intc_fpga1176>;
interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
- port1-otg;
+ dr_mode = "peripheral";
};
bridge {
diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts
index 0c7dabef4a5f..228a51a38f95 100644
--- a/arch/arm/boot/dts/arm-realview-pb11mp.dts
+++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts
@@ -712,7 +712,7 @@
reg = <0x4f000000 0x20000>;
interrupt-parent = <&intc_tc11mp>;
interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>;
- port1-otg;
+ dr_mode = "peripheral";
};
};
};
diff --git a/arch/arm/boot/dts/arm-realview-pbx.dtsi b/arch/arm/boot/dts/arm-realview-pbx.dtsi
index ac95667ed781..ccf6f756b6ed 100644
--- a/arch/arm/boot/dts/arm-realview-pbx.dtsi
+++ b/arch/arm/boot/dts/arm-realview-pbx.dtsi
@@ -164,7 +164,7 @@
usb: usb@4f000000 {
compatible = "nxp,usb-isp1761";
reg = <0x4f000000 0x20000>;
- port1-otg;
+ dr_mode = "peripheral";
};
bridge {
diff --git a/arch/arm/boot/dts/aspeed-ast2500-evb.dts b/arch/arm/boot/dts/aspeed-ast2500-evb.dts
index 8bec21ed0de5..583a241f1151 100644
--- a/arch/arm/boot/dts/aspeed-ast2500-evb.dts
+++ b/arch/arm/boot/dts/aspeed-ast2500-evb.dts
@@ -13,7 +13,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=tty0 console=ttyS4,115200 earlyprintk";
+ bootargs = "console=tty0 console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-ast2600-evb-a1.dts b/arch/arm/boot/dts/aspeed-ast2600-evb-a1.dts
new file mode 100644
index 000000000000..dd7148060c4a
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-ast2600-evb-a1.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2021 IBM Corp.
+
+#include "aspeed-ast2600-evb.dts"
+
+/ {
+ model = "AST2600 A1 EVB";
+
+ /delete-node/regulator-vcc-sdhci0;
+ /delete-node/regulator-vcc-sdhci1;
+ /delete-node/regulator-vccq-sdhci0;
+ /delete-node/regulator-vccq-sdhci1;
+};
+
+/delete-node/ &sdc;
diff --git a/arch/arm/boot/dts/aspeed-ast2600-evb.dts b/arch/arm/boot/dts/aspeed-ast2600-evb.dts
index 2772796e215e..b7eb552640cb 100644
--- a/arch/arm/boot/dts/aspeed-ast2600-evb.dts
+++ b/arch/arm/boot/dts/aspeed-ast2600-evb.dts
@@ -4,6 +4,7 @@
/dts-v1/;
#include "aspeed-g6.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
/ {
model = "AST2600 EVB";
@@ -21,6 +22,46 @@
device_type = "memory";
reg = <0x80000000 0x80000000>;
};
+
+ vcc_sdhci0: regulator-vcc-sdhci0 {
+ compatible = "regulator-fixed";
+ regulator-name = "SDHCI0 Vcc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpios = <&gpio0 ASPEED_GPIO(V, 0) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vccq_sdhci0: regulator-vccq-sdhci0 {
+ compatible = "regulator-gpio";
+ regulator-name = "SDHCI0 VccQ";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ gpios = <&gpio0 ASPEED_GPIO(V, 1) GPIO_ACTIVE_HIGH>;
+ gpios-states = <1>;
+ states = <3300000 1>,
+ <1800000 0>;
+ };
+
+ vcc_sdhci1: regulator-vcc-sdhci1 {
+ compatible = "regulator-fixed";
+ regulator-name = "SDHCI1 Vcc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpios = <&gpio0 ASPEED_GPIO(V, 2) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vccq_sdhci1: regulator-vccq-sdhci1 {
+ compatible = "regulator-gpio";
+ regulator-name = "SDHCI1 VccQ";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ gpios = <&gpio0 ASPEED_GPIO(V, 3) GPIO_ACTIVE_HIGH>;
+ gpios-states = <1>;
+ states = <3300000 1>,
+ <1800000 0>;
+ };
};
&mdio0 {
@@ -107,7 +148,8 @@
&emmc {
non-removable;
bus-width = <4>;
- max-frequency = <52000000>;
+ max-frequency = <100000000>;
+ clk-phase-mmc-hs200 = <9>, <225>;
};
&rtc {
@@ -121,37 +163,7 @@
m25p,fast-read;
label = "bmc";
spi-max-frequency = <50000000>;
-
- partitions {
- compatible = "fixed-partitions";
- #address-cells = <1>;
- #size-cells = <1>;
-
- u-boot@0 {
- reg = <0x0 0xe0000>; // 896KB
- label = "u-boot";
- };
-
- u-boot-env@e0000 {
- reg = <0xe0000 0x20000>; // 128KB
- label = "u-boot-env";
- };
-
- kernel@100000 {
- reg = <0x100000 0x900000>; // 9MB
- label = "kernel";
- };
-
- rofs@a00000 {
- reg = <0xa00000 0x2000000>; // 32MB
- label = "rofs";
- };
-
- rwfs@6000000 {
- reg = <0x2a00000 0x1600000>; // 22MB
- label = "rwfs";
- };
- };
+#include "openbmc-flash-layout-64.dtsi"
};
};
@@ -245,3 +257,46 @@
&uhci {
status = "okay";
};
+
+&sdc {
+ status = "okay";
+};
+
+/*
+ * The signal voltage of sdhci0 and sdhci1 on AST2600-A2 EVB is able to be
+ * toggled by GPIO pins.
+ * In the reference design, GPIOV0 of AST2600-A2 EVB is connected to the
+ * power load switch that provides 3.3v to sdhci0 vdd, GPIOV1 is connected to
+ * a 1.8v and a 3.3v power load switch that provides signal voltage to
+ * sdhci0 bus.
+ * If GPIOV0 is active high, sdhci0 is enabled, otherwise, sdhci0 is disabled.
+ * If GPIOV1 is active high, 3.3v power load switch is enabled, sdhci0 signal
+ * voltage is 3.3v, otherwise, 1.8v power load switch will be enabled,
+ * sdhci0 signal voltage becomes 1.8v.
+ * AST2600-A2 EVB also supports toggling signal voltage for sdhci1.
+ * The design is the same as sdhci0, it uses GPIOV2 as power-gpio and GPIOV3
+ * as power-switch-gpio.
+ */
+&sdhci0 {
+ status = "okay";
+ bus-width = <4>;
+ max-frequency = <100000000>;
+ sdhci-drive-type = /bits/ 8 <3>;
+ sdhci-caps-mask = <0x7 0x0>;
+ sdhci,wp-inverted;
+ vmmc-supply = <&vcc_sdhci0>;
+ vqmmc-supply = <&vccq_sdhci0>;
+ clk-phase-sd-hs = <7>, <200>;
+};
+
+&sdhci1 {
+ status = "okay";
+ bus-width = <4>;
+ max-frequency = <100000000>;
+ sdhci-drive-type = /bits/ 8 <3>;
+ sdhci-caps-mask = <0x7 0x0>;
+ sdhci,wp-inverted;
+ vmmc-supply = <&vcc_sdhci1>;
+ vqmmc-supply = <&vccq_sdhci1>;
+ clk-phase-sd-hs = <7>, <200>;
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts b/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts
index 6aeb47c44eba..79d17841b3d7 100644
--- a/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts
@@ -34,7 +34,7 @@
};
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
leds {
compatible = "gpio-leds";
diff --git a/arch/arm/boot/dts/aspeed-bmc-ampere-mtjade.dts b/arch/arm/boot/dts/aspeed-bmc-ampere-mtjade.dts
index 8f5ec22e51c2..57b0c45a2298 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ampere-mtjade.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ampere-mtjade.dts
@@ -9,7 +9,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
@@ -110,6 +110,30 @@
linux,code = <ASPEED_GPIO(Q, 5)>;
};
+ psu1_vin_good {
+ label = "PSU1_VIN_GOOD";
+ gpios = <&gpio ASPEED_GPIO(H, 4) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(H, 4)>;
+ };
+
+ psu2_vin_good {
+ label = "PSU2_VIN_GOOD";
+ gpios = <&gpio ASPEED_GPIO(H, 5) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(H, 5)>;
+ };
+
+ psu1_present {
+ label = "PSU1_PRESENT";
+ gpios = <&gpio ASPEED_GPIO(I, 0) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(I, 0)>;
+ };
+
+ psu2_present {
+ label = "PSU2_PRESENT";
+ gpios = <&gpio ASPEED_GPIO(I, 1) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(I, 1)>;
+ };
+
};
gpioA0mux: mux-controller {
@@ -280,7 +304,7 @@
m25p,fast-read;
label = "bmc";
/* spi-max-frequency = <50000000>; */
-#include "openbmc-flash-layout.dtsi"
+#include "openbmc-flash-layout-64.dtsi"
};
};
@@ -332,6 +356,16 @@
status = "okay";
};
+&mac0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii1_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>,
+ <&syscon ASPEED_CLK_MAC1RCLK>;
+ clock-names = "MACCLK", "RCLK";
+ use-ncsi;
+};
+
&mac1 {
status = "okay";
pinctrl-names = "default";
@@ -426,6 +460,19 @@
status = "okay";
};
+&i2c10 {
+ status = "okay";
+ adm1278@10 {
+ compatible = "adi,adm1278";
+ reg = <0x10>;
+ };
+
+ adm1278@11 {
+ compatible = "adi,adm1278";
+ reg = <0x11>;
+ };
+};
+
&gfx {
status = "okay";
memory-region = <&gfx_memory>;
@@ -529,8 +576,9 @@
"S1_DDR_SAVE","","",
/*G0-G7*/ "S0_FW_BOOT_OK","SHD_REQ_L","","S0_OVERTEMP_L","","",
"","",
- /*H0-H7*/ "","","","","","","","",
- /*I0-I7*/ "","","S1_BMC_SPECIAL_BOOT","","","","","",
+ /*H0-H7*/ "","","","","PSU1_VIN_GOOD","PSU2_VIN_GOOD","","",
+ /*I0-I7*/ "PSU1_PRESENT","PSU2_PRESENT","S1_BMC_SPECIAL_BOOT",
+ "","","","","",
/*J0-J7*/ "S0_HIGHTEMP_L","S0_FAULT_L","S0_SCP_AUTH_FAIL_L","",
"","","","",
/*K0-K7*/ "","","","","","","","",
@@ -540,7 +588,8 @@
/*O0-O7*/ "","","","","","","","",
/*P0-P7*/ "","","","","","","","",
/*Q0-Q7*/ "","","","","","UID_BUTTON","","",
- /*R0-R7*/ "","","BMC_EXT_HIGHTEMP_L","","","RESET_BUTTON","","",
+ /*R0-R7*/ "","","BMC_EXT_HIGHTEMP_L","OCP_AUX_PWREN",
+ "OCP_MAIN_PWREN","RESET_BUTTON","","",
/*S0-S7*/ "","","","","","","","",
/*T0-T7*/ "","","","","","","","",
/*U0-U7*/ "","","","","","","","",
diff --git a/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts b/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
index c2ece0b91885..3395de96ee11 100644
--- a/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
@@ -10,7 +10,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts b/arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts
index 2c29ac037d32..7c6af7f226e7 100644
--- a/arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts
@@ -10,7 +10,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
index dcab6e78dfa4..9b4cf5ebe6d5 100644
--- a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
@@ -4,6 +4,7 @@
#include "aspeed-g5.dtsi"
#include <dt-bindings/gpio/aspeed-gpio.h>
#include <dt-bindings/i2c/i2c.h>
+#include <dt-bindings/interrupt-controller/irq.h>
/{
model = "ASRock E3C246D4I BMC";
@@ -15,7 +16,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=tty0 console=ttyS4,115200 earlyprintk";
+ bootargs = "console=tty0 console=ttyS4,115200 earlycon";
};
memory@80000000 {
@@ -73,7 +74,8 @@
&vuart {
status = "okay";
- aspeed,sirq-active-high;
+ aspeed,lpc-io-reg = <0x2f8>;
+ aspeed,lpc-interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
};
&mac0 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-bytedance-g220a.dts b/arch/arm/boot/dts/aspeed-bmc-bytedance-g220a.dts
index 5ef88c377358..01dace8f5e5f 100644
--- a/arch/arm/boot/dts/aspeed-bmc-bytedance-g220a.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-bytedance-g220a.dts
@@ -55,7 +55,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-cmm.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-cmm.dts
index 2fb8b147f489..90a3f485c67a 100644
--- a/arch/arm/boot/dts/aspeed-bmc-facebook-cmm.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-cmm.dts
@@ -280,7 +280,7 @@
chosen {
stdout-path = &uart1;
- bootargs = "console=ttyS1,9600n8 root=/dev/ram rw earlyprintk";
+ bootargs = "console=ttyS1,9600n8 root=/dev/ram rw earlycon";
};
ast-adc-hwmon {
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
index 7b4b2b126ad8..b6b16356f571 100644
--- a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
@@ -37,7 +37,7 @@
};
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
index 3295c8c7c05c..aa24cac8e5be 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
@@ -202,6 +202,35 @@
linux,code = <12>;
};
};
+
+ leds {
+ compatible = "gpio-leds";
+
+ /* RTC battery fault LED at the back */
+ led-rtc-battery {
+ gpios = <&gpio0 ASPEED_GPIO(H, 0) GPIO_ACTIVE_LOW>;
+ };
+
+ /* BMC Card fault LED at the back */
+ led-bmc {
+ gpios = <&gpio0 ASPEED_GPIO(H, 1) GPIO_ACTIVE_LOW>;
+ };
+
+ /* Enclosure Identify LED at the back */
+ led-rear-enc-id0 {
+ gpios = <&gpio0 ASPEED_GPIO(H, 2) GPIO_ACTIVE_LOW>;
+ };
+
+ /* Enclosure fault LED at the back */
+ led-rear-enc-fault0 {
+ gpios = <&gpio0 ASPEED_GPIO(H, 3) GPIO_ACTIVE_LOW>;
+ };
+
+ /* PCIE slot power LED */
+ led-pcieslot-power {
+ gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_LOW>;
+ };
+ };
};
&gpio0 {
@@ -214,7 +243,7 @@
/*F0-F7*/ "PIN_HOLE_RESET_IN_N","","",
"PIN_HOLE_RESET_OUT_N","","","","",
/*G0-G7*/ "","","","","","","","",
- /*H0-H7*/ "","","","","","","","",
+ /*H0-H7*/ "led-rtc-battery","led-bmc","led-rear-enc-id0","led-rear-enc-fault0","","","","",
/*I0-I7*/ "","","","","","","","",
/*J0-J7*/ "","","","","","","","",
/*K0-K7*/ "","","","","","","","",
@@ -222,7 +251,7 @@
/*M0-M7*/ "","","","","","","","",
/*N0-N7*/ "","","","","","","","",
/*O0-O7*/ "","","","","","","","",
- /*P0-P7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","led-pcieslot-power","","","",
/*Q0-Q7*/ "","","","","","","","",
/*R0-R7*/ "","","","","","I2C_FLASH_MICRO_N","","",
/*S0-S7*/ "","","","","","","","",
@@ -353,10 +382,47 @@
&i2c1 {
status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+
+ power-supply@68 {
+ compatible = "ibm,cffps";
+ reg = <0x68>;
+ };
- pca2: pca9552@61 {
+ power-supply@69 {
+ compatible = "ibm,cffps";
+ reg = <0x69>;
+ };
+
+ power-supply@6b {
+ compatible = "ibm,cffps";
+ reg = <0x6b>;
+ };
+
+ power-supply@6d {
+ compatible = "ibm,cffps";
+ reg = <0x6d>;
+ };
+};
+
+&i2c4 {
+ status = "okay";
+
+ pca2: pca9552@65 {
compatible = "nxp,pca9552";
- reg = <0x61>;
+ reg = <0x65>;
#address-cells = <1>;
#size-cells = <0>;
@@ -424,12 +490,222 @@
reg = <9>;
type = <PCA955X_TYPE_GPIO>;
};
+ };
+
+ i2c-switch@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ i2c-mux-idle-disconnect;
+
+ i2c4mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+
+ pca_cable_card_c01: pca9551@62 {
+ compatible = "nxp,pca9551";
+ reg = <0x62>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "cablecard-c01-cxp-top";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "cablecard-c01-cxp-bot";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
+ };
+
+ i2c4mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ pca_cable_card_c02: pca9551@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "cablecard-c02-cxp-top";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "cablecard-c02-cxp-bot";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
+ };
+
+ i2c4mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+
+ pca_cable_card_c03: pca9551@61 {
+ compatible = "nxp,pca9551";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "cablecard-c03-cxp-top";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "cablecard-c03-cxp-bot";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
+ };
};
+};
+
+&i2c5 {
+ status = "okay";
- pca3: pca9552@62 {
+ pca3: pca9552@66 {
compatible = "nxp,pca9552";
- reg = <0x62>;
+ reg = <0x66>;
#address-cells = <1>;
#size-cells = <0>;
@@ -512,44 +788,6 @@
};
-};
-
-&i2c2 {
- status = "okay";
-};
-
-&i2c3 {
- status = "okay";
-
- eeprom@54 {
- compatible = "atmel,24c128";
- reg = <0x54>;
- };
-
- power-supply@68 {
- compatible = "ibm,cffps";
- reg = <0x68>;
- };
-
- power-supply@69 {
- compatible = "ibm,cffps";
- reg = <0x69>;
- };
-
- power-supply@6a {
- compatible = "ibm,cffps";
- reg = <0x6a>;
- };
-
- power-supply@6b {
- compatible = "ibm,cffps";
- reg = <0x6b>;
- };
-};
-
-&i2c4 {
- status = "okay";
-
i2c-switch@70 {
compatible = "nxp,pca9546";
reg = <0x70>;
@@ -558,56 +796,69 @@
status = "okay";
i2c-mux-idle-disconnect;
- i2c4mux0chn0: i2c@0 {
+ i2c5mux0chn0: i2c@0 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
- eeprom@52 {
- compatible = "atmel,24c64";
- reg = <0x52>;
- };
- };
-
- i2c4mux0chn1: i2c@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
eeprom@50 {
compatible = "atmel,24c64";
reg = <0x50>;
};
- };
- i2c4mux0chn2: i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <2>;
- eeprom@51 {
- compatible = "atmel,24c64";
- reg = <0x51>;
- };
- };
- };
-};
+ pca_cable_card_c04: pca9551@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
-&i2c5 {
- status = "okay";
+ gpio-controller;
+ #gpio-cells = <2>;
- i2c-switch@70 {
- compatible = "nxp,pca9546";
- reg = <0x70>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "okay";
- i2c-mux-idle-disconnect;
+ led@0 {
+ label = "cablecard-c04-cxp-top";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
- i2c5mux0chn0: i2c@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- eeprom@50 {
- compatible = "atmel,24c64";
- reg = <0x50>;
+ led@1 {
+ label = "cablecard-c04-cxp-bot";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
};
};
@@ -619,6 +870,62 @@
compatible = "atmel,24c64";
reg = <0x51>;
};
+
+ pca_cable_card_c05: pca9551@61 {
+ compatible = "nxp,pca9551";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "cablecard-c05-cxp-top";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "cablecard-c05-cxp-bot";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
};
i2c5mux0chn2: i2c@2 {
@@ -629,6 +936,62 @@
compatible = "atmel,24c64";
reg = <0x52>;
};
+
+ pca_cable_card_c06: pca9551@62 {
+ compatible = "nxp,pca9551";
+ reg = <0x62>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "cablecard-c06-cxp-top";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "cablecard-c06-cxp-bot";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
};
i2c5mux0chn3: i2c@3 {
@@ -639,6 +1002,62 @@
compatible = "atmel,24c64";
reg = <0x53>;
};
+
+ pca_cable_card_c07: pca9551@63 {
+ compatible = "nxp,pca9551";
+ reg = <0x63>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "cablecard-c07-cxp-top";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "cablecard-c07-cxp-bot";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
};
};
};
@@ -659,8 +1078,64 @@
#size-cells = <0>;
reg = <0>;
eeprom@50 {
- compatible = "atmel,24c64";
- reg = <0x50>;
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ pca_cable_card_c08: pca9551@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "cablecard-c08-cxp-top";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "cablecard-c08-cxp-bot";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
};
};
@@ -672,6 +1147,62 @@
compatible = "atmel,24c64";
reg = <0x52>;
};
+
+ pca_cable_card_c09: pca9551@62 {
+ compatible = "nxp,pca9551";
+ reg = <0x62>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "cablecard-c09-cxp-top";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "cablecard-c09-cxp-bot";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
};
i2c6mux0chn2: i2c@2 {
@@ -682,6 +1213,62 @@
compatible = "atmel,24c64";
reg = <0x53>;
};
+
+ pca_cable_card_c10: pca9551@63 {
+ compatible = "nxp,pca9551";
+ reg = <0x63>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "cablecard-c10-cxp-top";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "cablecard-c10-cxp-bot";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
};
i2c6mux0chn3: i2c@3 {
@@ -692,12 +1279,1013 @@
compatible = "atmel,24c64";
reg = <0x51>;
};
+
+ pca_cable_card_c11: pca9551@61 {
+ compatible = "nxp,pca9551";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "cablecard-c11-cxp-top";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "cablecard-c11-cxp-bot";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
+ };
+ };
+
+ pca_pcie_slot: pca9552@65 {
+ compatible = "nxp,pca9552";
+ reg = <0x65>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio@0 {
+ reg = <0>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ led@1 {
+ label = "pcieslot-c01";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ label = "pcieslot-c02";
+ reg = <2>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ label = "pcieslot-c03";
+ reg = <3>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ label = "pcieslot-c04";
+ reg = <4>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ label = "pcieslot-c05";
+ reg = <5>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ label = "pcieslot-c06";
+ reg = <6>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ label = "pcieslot-c07";
+ reg = <7>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ label = "pcieslot-c08";
+ reg = <8>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ label = "pcieslot-c09";
+ reg = <9>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ label = "pcieslot-c10";
+ reg = <10>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ label = "pcieslot-c11";
+ reg = <11>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@12 {
+ reg = <12>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@13 {
+ reg = <13>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@14 {
+ reg = <14>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@15 {
+ reg = <15>;
+ type = <PCA955X_TYPE_GPIO>;
};
};
};
&i2c7 {
status = "okay";
+
+ pic0_dimm: pca9552@31 {
+ compatible = "ibm,pca9552";
+ reg = <0x31>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "ddimm0";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "ddimm1";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ label = "ddimm2";
+ reg = <2>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ label = "ddimm3";
+ reg = <3>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ label = "ddimm4";
+ reg = <4>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ label = "ddimm5";
+ reg = <5>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ label = "ddimm6";
+ reg = <6>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ label = "ddimm7";
+ reg = <7>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ label = "ddimm8";
+ reg = <8>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ label = "ddimm9";
+ reg = <9>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ label = "ddimm10";
+ reg = <10>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ label = "ddimm11";
+ reg = <11>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ label = "ddimm12";
+ reg = <12>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ label = "ddimm13";
+ reg = <13>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ label = "ddimm14";
+ reg = <14>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ label = "ddimm15";
+ reg = <15>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ pic1_dimm: pca9552@32 {
+ compatible = "ibm,pca9552";
+ reg = <0x32>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "ddimm16";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "ddimm17";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ label = "ddimm18";
+ reg = <2>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ label = "ddimm19";
+ reg = <3>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ label = "ddimm20";
+ reg = <4>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ label = "ddimm21";
+ reg = <5>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ label = "ddimm22";
+ reg = <6>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ label = "ddimm23";
+ reg = <7>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ label = "ddimm24";
+ reg = <8>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ label = "ddimm25";
+ reg = <9>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ label = "ddimm26";
+ reg = <10>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ label = "ddimm27";
+ reg = <11>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ label = "ddimm28";
+ reg = <12>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ label = "ddimm29";
+ reg = <13>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ label = "ddimm30";
+ reg = <14>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ label = "ddimm31";
+ reg = <15>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ pic2_dimm: pca9552@33 {
+ compatible = "ibm,pca9552";
+ reg = <0x33>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "ddimm32";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "ddimm33";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ label = "ddimm34";
+ reg = <2>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ label = "ddimm35";
+ reg = <3>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ label = "ddimm36";
+ reg = <4>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ label = "ddimm37";
+ reg = <5>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ label = "ddimm38";
+ reg = <6>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ label = "ddimm39";
+ reg = <7>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ label = "ddimm40";
+ reg = <8>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ label = "ddimm41";
+ reg = <9>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ label = "ddimm42";
+ reg = <10>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ label = "ddimm43";
+ reg = <11>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ label = "ddimm44";
+ reg = <12>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ label = "ddimm45";
+ reg = <13>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ label = "ddimm46";
+ reg = <14>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ label = "ddimm47";
+ reg = <15>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ pic3_dimm: pca9552@30 {
+ compatible = "ibm,pca9552";
+ reg = <0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "ddimm48";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "ddimm49";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ label = "ddimm50";
+ reg = <2>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ label = "ddimm51";
+ reg = <3>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ label = "ddimm52";
+ reg = <4>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ label = "ddimm53";
+ reg = <5>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ label = "ddimm54";
+ reg = <6>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ label = "ddimm55";
+ reg = <7>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ label = "ddimm56";
+ reg = <8>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ label = "ddimm57";
+ reg = <9>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ label = "ddimm58";
+ reg = <10>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ label = "ddimm59";
+ reg = <11>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ label = "ddimm60";
+ reg = <12>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ label = "ddimm61";
+ reg = <13>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ label = "ddimm62";
+ reg = <14>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ label = "ddimm63";
+ reg = <15>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ pic0_vrm_misc: pca9552@34 {
+ compatible = "ibm,pca9552";
+ reg = <0x34>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "planar";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "tpm";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ label = "cpu3-c61";
+ reg = <2>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ label = "cpu0-c14";
+ reg = <3>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ label = "opencapi-connector3";
+ reg = <4>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ label = "opencapi-connector4";
+ reg = <5>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ label = "opencapi-connector5";
+ reg = <6>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ led@8 {
+ label = "vrm4";
+ reg = <8>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ label = "vrm5";
+ reg = <9>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ label = "vrm6";
+ reg = <10>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ label = "vrm7";
+ reg = <11>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ label = "vrm12";
+ reg = <12>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ label = "vrm13";
+ reg = <13>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ label = "vrm14";
+ reg = <14>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ label = "vrm15";
+ reg = <15>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
+
+ pic1_vrm_misc: pca9552@35 {
+ compatible = "ibm,pca9552";
+ reg = <0x35>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "dasd-backplane";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "power-distribution";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ label = "cpu1-c19";
+ reg = <2>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ label = "cpu2-c56";
+ reg = <3>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ label = "opencapi-connector0";
+ reg = <4>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ label = "opencapi-connector1";
+ reg = <5>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ label = "opencapi-connector2";
+ reg = <6>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ led@8 {
+ label = "vrm0";
+ reg = <8>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ label = "vrm1";
+ reg = <9>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ label = "vrm2";
+ reg = <10>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ label = "vrm3";
+ reg = <11>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ label = "vrm8";
+ reg = <12>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ label = "vrm9";
+ reg = <13>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@14 {
+ label = "vrm10";
+ reg = <14>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@15 {
+ label = "vrm11";
+ reg = <15>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
};
&i2c8 {
@@ -863,6 +2451,48 @@
compatible = "atmel,24c32";
reg = <0x50>;
};
+
+ pca_oppanel: pca9551@60 {
+ compatible = "nxp,pca9551";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "front-sys-id0";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "front-check-log0";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ label = "front-enc-fault1";
+ reg = <2>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ label = "front-sys-pwron0";
+ reg = <3>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+ };
};
i2c14mux0chn3: i2c@3 {
@@ -901,6 +2531,138 @@
};
};
+ pca_fan_nvme: pca9552@60 {
+ compatible = "nxp,pca9552";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ led@0 {
+ label = "nvme0";
+ reg = <0>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@1 {
+ label = "nvme1";
+ reg = <1>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@2 {
+ label = "nvme2";
+ reg = <2>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@3 {
+ label = "nvme3";
+ reg = <3>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@4 {
+ label = "nvme4";
+ reg = <4>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@5 {
+ label = "nvme5";
+ reg = <5>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@6 {
+ label = "nvme6";
+ reg = <6>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@7 {
+ label = "nvme7";
+ reg = <7>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@8 {
+ label = "nvme8";
+ reg = <8>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@9 {
+ label = "nvme9";
+ reg = <9>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@10 {
+ label = "fan0";
+ reg = <10>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@11 {
+ label = "fan1";
+ reg = <11>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@12 {
+ label = "fan2";
+ reg = <12>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ led@13 {
+ label = "fan3";
+ reg = <13>;
+ retain-state-shutdown;
+ default-state = "keep";
+ type = <PCA955X_TYPE_LED>;
+ };
+
+ gpio@14 {
+ reg = <14>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@15 {
+ reg = <15>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
+
pca0: pca9552@61 {
compatible = "nxp,pca9552";
#address-cells = <1>;
@@ -1070,6 +2832,7 @@
&emmc {
status = "okay";
+ clk-phase-mmc-hs200 = <180>, <180>;
};
&fsim0 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts
index f7fd3b3c90d0..342546a3c0f5 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts
@@ -19,33 +19,3 @@
reg = <0x6b>;
};
};
-
-&fan0 {
- tach-pulses = <4>;
- /delete-property/ maxim,fan-dual-tach;
-};
-
-&fan1 {
- tach-pulses = <4>;
- /delete-property/ maxim,fan-dual-tach;
-};
-
-&fan2 {
- tach-pulses = <4>;
- /delete-property/ maxim,fan-dual-tach;
-};
-
-&fan3 {
- tach-pulses = <4>;
- /delete-property/ maxim,fan-dual-tach;
-};
-
-&fan4 {
- tach-pulses = <4>;
- /delete-property/ maxim,fan-dual-tach;
-};
-
-&fan5 {
- tach-pulses = <4>;
- /delete-property/ maxim,fan-dual-tach;
-};
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
index 941c0489479a..481d0ee1f85f 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
@@ -280,10 +280,7 @@
/*W0-W7*/ "","","","","","","","",
/*X0-X7*/ "","","","","","","","",
/*Y0-Y7*/ "","","","","","","","",
- /*Z0-Z7*/ "","","","","","","","",
- /*AA0-AA7*/ "","","","","","","","",
- /*AB0-AB7*/ "","","","","","","","",
- /*AC0-AC7*/ "","","","","","","","";
+ /*Z0-Z7*/ "","","","","","","","";
pin_mclr_vpp {
gpio-hog;
diff --git a/arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts b/arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts
index 07593897fc9a..1752f3250e44 100644
--- a/arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts
@@ -10,7 +10,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-inspur-on5263m5.dts b/arch/arm/boot/dts/aspeed-bmc-inspur-on5263m5.dts
index 80c92e065a10..5a98a19f445e 100644
--- a/arch/arm/boot/dts/aspeed-bmc-inspur-on5263m5.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-inspur-on5263m5.dts
@@ -11,7 +11,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "earlyprintk";
+ bootargs = "earlycon";
};
memory {
diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
index 6e9baf3bba53..d5b7d28cda88 100644
--- a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
@@ -10,7 +10,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "earlyprintk";
+ bootargs = "earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-lenovo-hr630.dts b/arch/arm/boot/dts/aspeed-bmc-lenovo-hr630.dts
index c29e5f4d86ad..8f543cca7c21 100644
--- a/arch/arm/boot/dts/aspeed-bmc-lenovo-hr630.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-lenovo-hr630.dts
@@ -27,7 +27,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=tty0 console=ttyS4,115200 earlyprintk";
+ bootargs = "console=tty0 console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-lenovo-hr855xg2.dts b/arch/arm/boot/dts/aspeed-bmc-lenovo-hr855xg2.dts
index 084c455ad4cb..bcc1820f5c07 100644
--- a/arch/arm/boot/dts/aspeed-bmc-lenovo-hr855xg2.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-lenovo-hr855xg2.dts
@@ -27,7 +27,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=tty0 console=ttyS4,115200 earlyprintk";
+ bootargs = "console=tty0 console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-microsoft-olympus.dts b/arch/arm/boot/dts/aspeed-bmc-microsoft-olympus.dts
index 73319917cb74..3ef8358ff764 100644
--- a/arch/arm/boot/dts/aspeed-bmc-microsoft-olympus.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-microsoft-olympus.dts
@@ -11,7 +11,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@40000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts b/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
index 42b37a204241..c0847636f20b 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
@@ -11,7 +11,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts b/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts
index 15c1f0ac81dc..a52a289cee85 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts
@@ -57,7 +57,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-mowgli.dts b/arch/arm/boot/dts/aspeed-bmc-opp-mowgli.dts
index 8503152faaf0..7d38d121ec6d 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-mowgli.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-mowgli.dts
@@ -11,7 +11,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-nicole.dts b/arch/arm/boot/dts/aspeed-bmc-opp-nicole.dts
index 91dced7e7849..3d4bdad27c2d 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-nicole.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-nicole.dts
@@ -10,7 +10,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts b/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
index eb4e93a57ff4..cd660c1ff3f5 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
@@ -10,7 +10,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@40000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts b/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
index fd2e014dae75..084f54866f38 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
@@ -9,7 +9,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-swift.dts b/arch/arm/boot/dts/aspeed-bmc-opp-swift.dts
index d56b5ed09b37..4816486c0c9e 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-swift.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-swift.dts
@@ -10,7 +10,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
index c1478d2db602..e33153dcaea8 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
@@ -13,7 +13,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200n8";
+ bootargs = "console=ttyS4,115200n8 earlycon";
};
memory@80000000 {
@@ -136,10 +136,7 @@
/*W0-W7*/ "","","","","","","","",
/*X0-X7*/ "","","","","","","","",
/*Y0-Y7*/ "","","","","","","","",
- /*Z0-Z7*/ "","","","","","","","",
- /*AA0-AA7*/ "","","","","","","","",
- /*AB0-AB7*/ "","","","","","","","",
- /*AC0-AC7*/ "","","","","","","","";
+ /*Z0-Z7*/ "","","","","","","","";
};
&fmc {
@@ -189,6 +186,7 @@
&emmc {
status = "okay";
+ clk-phase-mmc-hs200 = <36>, <270>;
};
&fsim0 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-vesnin.dts b/arch/arm/boot/dts/aspeed-bmc-opp-vesnin.dts
index 01074b6e3e03..328ef472c479 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-vesnin.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-vesnin.dts
@@ -11,7 +11,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@40000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts b/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
index 85d58a63ae90..230f3584bcab 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
@@ -10,7 +10,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts b/arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts
index 4bcc82046362..7ae4ea0d2931 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts
@@ -17,7 +17,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
index 03c161493ffc..61bc74b423cf 100644
--- a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
@@ -14,7 +14,7 @@
};
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts b/arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts
index a68ff0675c28..9605e53f5bbf 100644
--- a/arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts
@@ -28,7 +28,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@40000000 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-supermicro-x11spi.dts b/arch/arm/boot/dts/aspeed-bmc-supermicro-x11spi.dts
index bc16ad2b5c80..50f3c6a5c0c8 100644
--- a/arch/arm/boot/dts/aspeed-bmc-supermicro-x11spi.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-supermicro-x11spi.dts
@@ -11,7 +11,7 @@
chosen {
stdout-path = &uart5;
- bootargs = "earlyprintk";
+ bootargs = "earlycon";
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index d733c1f161c1..329eaeef66fb 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -264,6 +264,7 @@
reg-io-width = <4>;
clocks = <&syscon ASPEED_CLK_GATE_D1CLK>;
resets = <&syscon ASPEED_RESET_CRT1>;
+ syscon = <&syscon>;
status = "disabled";
interrupts = <0x19>;
};
diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
index 7028e21bdd98..7e90d713f5e5 100644
--- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
@@ -862,11 +862,21 @@
groups = "SGPM1";
};
+ pinctrl_sgpm2_default: sgpm2_default {
+ function = "SGPM2";
+ groups = "SGPM2";
+ };
+
pinctrl_sgps1_default: sgps1_default {
function = "SGPS1";
groups = "SGPS1";
};
+ pinctrl_sgps2_default: sgps2_default {
+ function = "SGPS2";
+ groups = "SGPS2";
+ };
+
pinctrl_sioonctrl_default: sioonctrl_default {
function = "SIOONCTRL";
groups = "SIOONCTRL";
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index 0025c88f660c..8ecb7861ce10 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -460,7 +460,7 @@
status = "disabled";
};
- nand: nand@18046000 {
+ nand_controller: nand-controller@18046000 {
compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1";
reg = <0x18046000 0x600>, <0xf8105408 0x600>,
<0x18046f00 0x20>;
diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi
index e8df458aad39..84cda16f68a2 100644
--- a/arch/arm/boot/dts/bcm-hr2.dtsi
+++ b/arch/arm/boot/dts/bcm-hr2.dtsi
@@ -179,7 +179,7 @@
status = "disabled";
};
- nand: nand@26000 {
+ nand_controller: nand-controller@26000 {
compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1";
reg = <0x26000 0x600>,
<0x11b408 0x600>,
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index b4d2cc70afb1..748df7955ae6 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -269,7 +269,7 @@
dma-coherent;
};
- nand: nand@26000 {
+ nand_controller: nand-controller@26000 {
compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1";
reg = <0x026000 0x600>,
<0x11b408 0x600>,
diff --git a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
index 3b4ab947492a..f24bdd0870a5 100644
--- a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
+++ b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
@@ -1,11 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
#include "bcm2711.dtsi"
-#include "bcm2835-rpi.dtsi"
+#include "bcm2711-rpi.dtsi"
#include "bcm283x-rpi-usb-peripheral.dtsi"
-#include <dt-bindings/reset/raspberrypi,firmware-reset.h>
-
/ {
compatible = "raspberrypi,4-model-b", "brcm,bcm2711";
model = "Raspberry Pi 4 Model B";
@@ -15,25 +13,12 @@
stdout-path = "serial1:115200n8";
};
- /* Will be filled by the bootloader */
- memory@0 {
- device_type = "memory";
- reg = <0 0 0>;
- };
-
- aliases {
- emmc2bus = &emmc2bus;
- ethernet0 = &genet;
- pcie0 = &pcie0;
- blconfig = &blconfig;
- };
-
leds {
- act {
+ led-act {
gpios = <&gpio 42 GPIO_ACTIVE_HIGH>;
};
- pwr {
+ led-pwr {
label = "PWR";
gpios = <&expgpio 2 GPIO_ACTIVE_LOW>;
default-state = "keep";
@@ -79,31 +64,15 @@
status = "okay";
};
-&firmware {
- firmware_clocks: clocks {
- compatible = "raspberrypi,firmware-clocks";
- #clock-cells = <1>;
- };
-
- expgpio: gpio {
- compatible = "raspberrypi,firmware-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- gpio-line-names = "BT_ON",
- "WL_ON",
- "PWR_LED_OFF",
- "GLOBAL_RESET",
- "VDD_SD_IO_SEL",
- "CAM_GPIO",
- "SD_PWR_ON",
- "";
- status = "okay";
- };
-
- reset: reset {
- compatible = "raspberrypi,firmware-reset";
- #reset-cells = <1>;
- };
+&expgpio {
+ gpio-line-names = "BT_ON",
+ "WL_ON",
+ "PWR_LED_OFF",
+ "GLOBAL_RESET",
+ "VDD_SD_IO_SEL",
+ "CAM_GPIO",
+ "SD_PWR_ON",
+ "";
};
&gpio {
@@ -180,23 +149,13 @@
};
&hdmi0 {
- clocks = <&firmware_clocks 13>, <&firmware_clocks 14>, <&dvp 0>, <&clk_27MHz>;
- clock-names = "hdmi", "bvb", "audio", "cec";
- wifi-2.4ghz-coexistence;
status = "okay";
};
&hdmi1 {
- clocks = <&firmware_clocks 13>, <&firmware_clocks 14>, <&dvp 1>, <&clk_27MHz>;
- clock-names = "hdmi", "bvb", "audio", "cec";
- wifi-2.4ghz-coexistence;
status = "okay";
};
-&hvs {
- clocks = <&firmware_clocks 4>;
-};
-
&pixelvalve0 {
status = "okay";
};
@@ -219,22 +178,6 @@
status = "okay";
};
-&rmem {
- /*
- * RPi4's co-processor will copy the board's bootloader configuration
- * into memory for the OS to consume. It'll also update this node with
- * its placement information.
- */
- blconfig: nvram@0 {
- compatible = "raspberrypi,bootloader-config", "nvmem-rmem";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x0 0x0 0x0>;
- no-map;
- status = "disabled";
- };
-};
-
/* SDHCI is used to control the SDIO for wireless */
&sdhci {
#address-cells = <1>;
@@ -309,10 +252,6 @@
status = "okay";
};
-&vchiq {
- interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
-};
-
&vc4 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm2711-rpi-400.dts b/arch/arm/boot/dts/bcm2711-rpi-400.dts
new file mode 100644
index 000000000000..f4d2fc20397c
--- /dev/null
+++ b/arch/arm/boot/dts/bcm2711-rpi-400.dts
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+#include "bcm2711-rpi-4-b.dts"
+
+/ {
+ compatible = "raspberrypi,400", "brcm,bcm2711";
+ model = "Raspberry Pi 400";
+
+ chosen {
+ /* 8250 auxiliary UART instead of pl011 */
+ stdout-path = "serial1:115200n8";
+ };
+
+ leds {
+ /delete-node/ led-act;
+
+ led-pwr {
+ gpios = <&gpio 42 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ gpio-poweroff {
+ compatible = "gpio-poweroff";
+ gpios = <&expgpio 5 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&expgpio {
+ gpio-line-names = "BT_ON",
+ "WL_ON",
+ "",
+ "GLOBAL_RESET",
+ "VDD_SD_IO_SEL",
+ "CAM_GPIO",
+ "SD_PWR_ON",
+ "SD_OC_N";
+};
+
+&genet_mdio {
+ clock-frequency = <1950000>;
+};
+
+&pm {
+ /delete-property/ system-power-controller;
+};
diff --git a/arch/arm/boot/dts/bcm2711-rpi.dtsi b/arch/arm/boot/dts/bcm2711-rpi.dtsi
new file mode 100644
index 000000000000..ca266c5d9f9b
--- /dev/null
+++ b/arch/arm/boot/dts/bcm2711-rpi.dtsi
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcm2835-rpi.dtsi"
+
+#include <dt-bindings/reset/raspberrypi,firmware-reset.h>
+
+/ {
+ /* Will be filled by the bootloader */
+ memory@0 {
+ device_type = "memory";
+ reg = <0 0 0>;
+ };
+
+ aliases {
+ emmc2bus = &emmc2bus;
+ ethernet0 = &genet;
+ pcie0 = &pcie0;
+ blconfig = &blconfig;
+ };
+};
+
+&firmware {
+ firmware_clocks: clocks {
+ compatible = "raspberrypi,firmware-clocks";
+ #clock-cells = <1>;
+ };
+
+ expgpio: gpio {
+ compatible = "raspberrypi,firmware-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ status = "okay";
+ };
+
+ reset: reset {
+ compatible = "raspberrypi,firmware-reset";
+ #reset-cells = <1>;
+ };
+};
+
+&hdmi0 {
+ clocks = <&firmware_clocks 13>, <&firmware_clocks 14>, <&dvp 0>, <&clk_27MHz>;
+ clock-names = "hdmi", "bvb", "audio", "cec";
+ wifi-2.4ghz-coexistence;
+};
+
+&hdmi1 {
+ clocks = <&firmware_clocks 13>, <&firmware_clocks 14>, <&dvp 1>, <&clk_27MHz>;
+ clock-names = "hdmi", "bvb", "audio", "cec";
+ wifi-2.4ghz-coexistence;
+};
+
+&hvs {
+ clocks = <&firmware_clocks 4>;
+};
+
+&rmem {
+ /*
+ * RPi4's co-processor will copy the board's bootloader configuration
+ * into memory for the OS to consume. It'll also update this node with
+ * its placement information.
+ */
+ blconfig: nvram@0 {
+ compatible = "raspberrypi,bootloader-config", "nvmem-rmem";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0 0x0 0x0>;
+ no-map;
+ status = "disabled";
+ };
+};
+
+&vchiq {
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
index 720beec54d61..b8a4096192aa 100644
--- a/arch/arm/boot/dts/bcm2711.dtsi
+++ b/arch/arm/boot/dts/bcm2711.dtsi
@@ -413,7 +413,7 @@
ranges = <0x0 0x7e000000 0x0 0xfe000000 0x01800000>;
dma-ranges = <0x0 0xc0000000 0x0 0x00000000 0x40000000>;
- emmc2: emmc2@7e340000 {
+ emmc2: mmc@7e340000 {
compatible = "brcm,bcm2711-emmc2";
reg = <0x0 0x7e340000 0x100>;
interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
@@ -1087,5 +1087,6 @@
};
&vec {
+ compatible = "brcm,bcm2711-vec";
interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
index 6c8ce39833bf..40b9405f1a8e 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
@@ -14,11 +14,11 @@
};
leds {
- act {
+ led-act {
gpios = <&gpio 47 GPIO_ACTIVE_HIGH>;
};
- pwr {
+ led-pwr {
label = "PWR";
gpios = <&gpio 35 GPIO_ACTIVE_HIGH>;
default-state = "keep";
diff --git a/arch/arm/boot/dts/bcm2835-rpi-a.dts b/arch/arm/boot/dts/bcm2835-rpi-a.dts
index 17fdd48346ff..11edb581dbaf 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-a.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-a.dts
@@ -14,7 +14,7 @@
};
leds {
- act {
+ led-act {
gpios = <&gpio 16 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
index b0355c229cdc..1b435c64bd9c 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
@@ -15,11 +15,11 @@
};
leds {
- act {
+ led-act {
gpios = <&gpio 47 GPIO_ACTIVE_HIGH>;
};
- pwr {
+ led-pwr {
label = "PWR";
gpios = <&gpio 35 GPIO_ACTIVE_HIGH>;
default-state = "keep";
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
index 33b3b5c02521..a23c25c00eea 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
@@ -15,7 +15,7 @@
};
leds {
- act {
+ led-act {
gpios = <&gpio 16 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b.dts b/arch/arm/boot/dts/bcm2835-rpi-b.dts
index 2b69957e0113..1b63d6b19750 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b.dts
@@ -15,7 +15,7 @@
};
leds {
- act {
+ led-act {
gpios = <&gpio 16 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/bcm2835-rpi-cm1.dtsi b/arch/arm/boot/dts/bcm2835-rpi-cm1.dtsi
index 58059c2ce129..e4e6b6abbfc1 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-cm1.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi-cm1.dtsi
@@ -5,7 +5,7 @@
/ {
leds {
- act {
+ led-act {
gpios = <&gpio 47 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
index f65448c01e31..33b2b77aa47d 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
@@ -23,7 +23,7 @@
};
leds {
- act {
+ led-act {
gpios = <&gpio 47 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero.dts b/arch/arm/boot/dts/bcm2835-rpi-zero.dts
index 6dd93c6f4966..6f9b3a908f28 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-zero.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-zero.dts
@@ -18,7 +18,7 @@
};
leds {
- act {
+ led-act {
gpios = <&gpio 47 GPIO_ACTIVE_HIGH>;
};
};
diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
index d94357b21f7e..87ddcad76083 100644
--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
@@ -4,7 +4,7 @@
leds {
compatible = "gpio-leds";
- act {
+ led-act {
label = "ACT";
default-state = "keep";
linux,default-trigger = "heartbeat";
diff --git a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
index 0455a680394a..d8af8eeac7b6 100644
--- a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
+++ b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
@@ -15,11 +15,11 @@
};
leds {
- act {
+ led-act {
gpios = <&gpio 47 GPIO_ACTIVE_HIGH>;
};
- pwr {
+ led-pwr {
label = "PWR";
gpios = <&gpio 35 GPIO_ACTIVE_HIGH>;
default-state = "keep";
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts
index 28be0332c1c8..77099a7871b0 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts
@@ -19,11 +19,11 @@
};
leds {
- act {
+ led-act {
gpios = <&gpio 29 GPIO_ACTIVE_HIGH>;
};
- pwr {
+ led-pwr {
label = "PWR";
gpios = <&expgpio 2 GPIO_ACTIVE_LOW>;
default-state = "keep";
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
index 37343148643d..61010266ca9a 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
@@ -20,11 +20,11 @@
};
leds {
- act {
+ led-act {
gpios = <&gpio 29 GPIO_ACTIVE_HIGH>;
};
- pwr {
+ led-pwr {
label = "PWR";
gpios = <&expgpio 2 GPIO_ACTIVE_LOW>;
default-state = "keep";
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
index 054ecaa355c9..dd4a48604097 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
@@ -20,7 +20,7 @@
};
leds {
- act {
+ led-act {
gpios = <&expgpio 2 GPIO_ACTIVE_HIGH>;
};
};
diff --git a/arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi b/arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi
index 925cb37c22f0..828a20561b96 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi
+++ b/arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi
@@ -14,7 +14,7 @@
* Since there is no upstream GPIO driver yet,
* remove the incomplete node.
*/
- /delete-node/ act;
+ /delete-node/ led-act;
};
reg_3v3: fixed-regulator {
diff --git a/arch/arm/boot/dts/bcm283x-rpi-usb-otg.dtsi b/arch/arm/boot/dts/bcm283x-rpi-usb-otg.dtsi
index 20322de2f8bf..e2fd9610e125 100644
--- a/arch/arm/boot/dts/bcm283x-rpi-usb-otg.dtsi
+++ b/arch/arm/boot/dts/bcm283x-rpi-usb-otg.dtsi
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
&usb {
dr_mode = "otg";
- g-rx-fifo-size = <558>;
+ g-rx-fifo-size = <256>;
g-np-tx-fifo-size = <32>;
/*
* According to dwc2 the sum of all device EP
diff --git a/arch/arm/boot/dts/bcm283x-rpi-usb-peripheral.dtsi b/arch/arm/boot/dts/bcm283x-rpi-usb-peripheral.dtsi
index 1409d1b559c1..0ff0e9e25327 100644
--- a/arch/arm/boot/dts/bcm283x-rpi-usb-peripheral.dtsi
+++ b/arch/arm/boot/dts/bcm283x-rpi-usb-peripheral.dtsi
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
&usb {
dr_mode = "peripheral";
- g-rx-fifo-size = <558>;
+ g-rx-fifo-size = <256>;
g-np-tx-fifo-size = <32>;
g-tx-fifo-size = <256 256 512 512 512 768 768>;
};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index b83a864e2e8b..0f3be55201a5 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -420,7 +420,7 @@
status = "disabled";
};
- sdhci: sdhci@7e300000 {
+ sdhci: mmc@7e300000 {
compatible = "brcm,bcm2835-sdhci";
reg = <0x7e300000 0x100>;
interrupts = <2 30>;
diff --git a/arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts b/arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts
index 8636600385fd..c81944cd6d0b 100644
--- a/arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts
+++ b/arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts
@@ -24,8 +24,8 @@
reg = <0x00000000 0x08000000>;
};
- nand: nand@18028000 {
- nandcs@0 {
+ nand_controller: nand-controller@18028000 {
+ nand@0 {
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
index e635a15041dd..a6e2aeb28675 100644
--- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
+++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
@@ -25,8 +25,8 @@
<0x88000000 0x08000000>;
};
- nand: nand@18028000 {
- nandcs@0 {
+ nand_controller: nand-controller@18028000 {
+ nand@0 {
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm47094.dtsi b/arch/arm/boot/dts/bcm47094.dtsi
index 2a8f7312d1be..6282363313e1 100644
--- a/arch/arm/boot/dts/bcm47094.dtsi
+++ b/arch/arm/boot/dts/bcm47094.dtsi
@@ -11,7 +11,7 @@
&pinctrl {
compatible = "brcm,bcm4709-pinmux";
- pinmux_mdio: mdio {
+ pinmux_mdio: mdio-pins {
groups = "mdio_grp";
function = "mdio";
};
diff --git a/arch/arm/boot/dts/bcm5301x-nand-cs0.dtsi b/arch/arm/boot/dts/bcm5301x-nand-cs0.dtsi
index 925a7c9ce5b7..be9a00ff752d 100644
--- a/arch/arm/boot/dts/bcm5301x-nand-cs0.dtsi
+++ b/arch/arm/boot/dts/bcm5301x-nand-cs0.dtsi
@@ -6,8 +6,8 @@
*/
/ {
- nand@18028000 {
- nandcs: nandcs@0 {
+ nand-controller@18028000 {
+ nandcs: nand@0 {
compatible = "brcm,nandcs";
reg = <0>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 7db72a2f1020..f92089290ccd 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -458,18 +458,18 @@
function = "spi";
};
- pinmux_i2c: i2c {
+ pinmux_i2c: i2c-pins {
groups = "i2c_grp";
function = "i2c";
};
- pinmux_pwm: pwm {
+ pinmux_pwm: pwm-pins {
groups = "pwm0_grp", "pwm1_grp",
"pwm2_grp", "pwm3_grp";
function = "pwm";
};
- pinmux_uart1: uart1 {
+ pinmux_uart1: uart1-pins {
groups = "uart1_grp";
function = "uart1";
};
@@ -501,7 +501,7 @@
reg = <0x18004000 0x14>;
};
- nand: nand@18028000 {
+ nand_controller: nand-controller@18028000 {
compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1", "brcm,brcmnand";
reg = <0x18028000 0x600>, <0x1811a408 0x600>, <0x18028f00 0x20>;
reg-names = "nand", "iproc-idm", "iproc-ext";
@@ -520,27 +520,27 @@
<0x1811b408 0x004>,
<0x180293a0 0x01c>;
reg-names = "mspi", "bspi", "intr_regs", "intr_status_reg";
- interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "spi_lr_fullness_reached",
+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mspi_done",
+ "mspi_halted",
+ "spi_lr_fullness_reached",
"spi_lr_session_aborted",
"spi_lr_impatient",
"spi_lr_session_done",
- "spi_lr_overhead",
- "mspi_done",
- "mspi_halted";
+ "spi_lr_overread";
clocks = <&iprocmed>;
clock-names = "iprocmed";
num-cs = <2>;
#address-cells = <1>;
#size-cells = <0>;
- spi_nor: spi-nor@0 {
+ spi_nor: flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <20000000>;
diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
index 9c0325cf9e22..cca49a2e2d62 100644
--- a/arch/arm/boot/dts/bcm63138.dtsi
+++ b/arch/arm/boot/dts/bcm63138.dtsi
@@ -203,7 +203,7 @@
status = "disabled";
};
- nand: nand@2000 {
+ nand_controller: nand-controller@2000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.0", "brcm,brcmnand";
diff --git a/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts b/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts
index 8313b7cad542..f92d2cf85972 100644
--- a/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts
+++ b/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts
@@ -14,10 +14,10 @@
};
};
-&nand {
+&nand_controller {
status = "okay";
- nandcs@1 {
+ nand@1 {
compatible = "brcm,nandcs";
reg = <1>;
nand-ecc-step-size = <512>;
diff --git a/arch/arm/boot/dts/bcm7445.dtsi b/arch/arm/boot/dts/bcm7445.dtsi
index 58f67c9b830b..5ac2042515b8 100644
--- a/arch/arm/boot/dts/bcm7445.dtsi
+++ b/arch/arm/boot/dts/bcm7445.dtsi
@@ -148,7 +148,7 @@
reg-names = "aon-ctrl", "aon-sram";
};
- nand: nand@3e2800 {
+ nand_controller: nand-controller@3e2800 {
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/bcm911360_entphn.dts b/arch/arm/boot/dts/bcm911360_entphn.dts
index b2d323f4a5ab..a76c74b44bba 100644
--- a/arch/arm/boot/dts/bcm911360_entphn.dts
+++ b/arch/arm/boot/dts/bcm911360_entphn.dts
@@ -82,8 +82,8 @@
status = "okay";
};
-&nand {
- nandcs@1 {
+&nand_controller {
+ nand@1 {
compatible = "brcm,nandcs";
reg = <0>;
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts
index 046c59fb4846..de40bd59a5fa 100644
--- a/arch/arm/boot/dts/bcm953012k.dts
+++ b/arch/arm/boot/dts/bcm953012k.dts
@@ -49,8 +49,8 @@
};
};
-&nand {
- nandcs@0 {
+&nand_controller {
+ nand@0 {
compatible = "brcm,nandcs";
reg = <0>;
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/bcm958300k.dts b/arch/arm/boot/dts/bcm958300k.dts
index b4a1392bd5a6..dda3e11b711f 100644
--- a/arch/arm/boot/dts/bcm958300k.dts
+++ b/arch/arm/boot/dts/bcm958300k.dts
@@ -60,8 +60,8 @@
status = "okay";
};
-&nand {
- nandcs@1 {
+&nand_controller {
+ nand@1 {
compatible = "brcm,nandcs";
reg = <0>;
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/bcm958305k.dts b/arch/arm/boot/dts/bcm958305k.dts
index 3378683321d3..ea3c6b88b313 100644
--- a/arch/arm/boot/dts/bcm958305k.dts
+++ b/arch/arm/boot/dts/bcm958305k.dts
@@ -68,8 +68,8 @@
status = "okay";
};
-&nand {
- nandcs@1 {
+&nand_controller {
+ nand@1 {
compatible = "brcm,nandcs";
reg = <0>;
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts
index 5443fc079e6e..1f73885ec274 100644
--- a/arch/arm/boot/dts/bcm958522er.dts
+++ b/arch/arm/boot/dts/bcm958522er.dts
@@ -74,8 +74,8 @@
status = "okay";
};
-&nand {
- nandcs@0 {
+&nand_controller {
+ nand@0 {
compatible = "brcm,nandcs";
reg = <0>;
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts
index e1e3c26cef19..b6b9ca8b0972 100644
--- a/arch/arm/boot/dts/bcm958525er.dts
+++ b/arch/arm/boot/dts/bcm958525er.dts
@@ -74,8 +74,8 @@
status = "okay";
};
-&nand {
- nandcs@0 {
+&nand_controller {
+ nand@0 {
compatible = "brcm,nandcs";
reg = <0>;
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts
index f161ba2e7e5e..ecf426f6ad5d 100644
--- a/arch/arm/boot/dts/bcm958525xmc.dts
+++ b/arch/arm/boot/dts/bcm958525xmc.dts
@@ -90,8 +90,8 @@
};
};
-&nand {
- nandcs@0 {
+&nand_controller {
+ nand@0 {
compatible = "brcm,nandcs";
reg = <0>;
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts
index 83cb877d63db..8ca18da981ad 100644
--- a/arch/arm/boot/dts/bcm958622hr.dts
+++ b/arch/arm/boot/dts/bcm958622hr.dts
@@ -78,8 +78,8 @@
status = "okay";
};
-&nand {
- nandcs@0 {
+&nand_controller {
+ nand@0 {
compatible = "brcm,nandcs";
reg = <0>;
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
index 4e106ce1384a..9747378db531 100644
--- a/arch/arm/boot/dts/bcm958623hr.dts
+++ b/arch/arm/boot/dts/bcm958623hr.dts
@@ -78,8 +78,8 @@
status = "okay";
};
-&nand {
- nandcs@0 {
+&nand_controller {
+ nand@0 {
compatible = "brcm,nandcs";
reg = <0>;
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index cda6cc281e18..0f92b773afb8 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -89,8 +89,8 @@
status = "okay";
};
-&nand {
- nandcs@0 {
+&nand_controller {
+ nand@0 {
compatible = "brcm,nandcs";
reg = <0>;
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/bcm958625k.dts b/arch/arm/boot/dts/bcm958625k.dts
index ffbff0014c65..9e984ca0e6df 100644
--- a/arch/arm/boot/dts/bcm958625k.dts
+++ b/arch/arm/boot/dts/bcm958625k.dts
@@ -68,8 +68,8 @@
status = "okay";
};
-&nand {
- nandcs@0 {
+&nand_controller {
+ nand@0 {
compatible = "brcm,nandcs";
reg = <0>;
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/bcm963138dvt.dts b/arch/arm/boot/dts/bcm963138dvt.dts
index 5b177274f182..df5c8ab90627 100644
--- a/arch/arm/boot/dts/bcm963138dvt.dts
+++ b/arch/arm/boot/dts/bcm963138dvt.dts
@@ -31,10 +31,10 @@
status = "okay";
};
-&nand {
+&nand_controller {
status = "okay";
- nandcs@0 {
+ nand@0 {
compatible = "brcm,nandcs";
reg = <0>;
nand-ecc-strength = <4>;
diff --git a/arch/arm/boot/dts/bcm988312hr.dts b/arch/arm/boot/dts/bcm988312hr.dts
index 3fd39c479a3c..5475dab8181d 100644
--- a/arch/arm/boot/dts/bcm988312hr.dts
+++ b/arch/arm/boot/dts/bcm988312hr.dts
@@ -74,8 +74,8 @@
status = "okay";
};
-&nand {
- nandcs@0 {
+&nand_controller {
+ nand@0 {
compatible = "brcm,nandcs";
reg = <0>;
nand-on-flash-bbt;
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index 7cf31b6e48b7..c3942b4e82ad 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -574,8 +574,7 @@
status = "disabled";
};
ehrpwm0: pwm@300000 {
- compatible = "ti,da850-ehrpwm", "ti,am3352-ehrpwm",
- "ti,am33xx-ehrpwm";
+ compatible = "ti,da850-ehrpwm", "ti,am3352-ehrpwm";
#pwm-cells = <3>;
reg = <0x300000 0x2000>;
clocks = <&psc1 17>, <&ehrpwm_tbclk>;
@@ -584,8 +583,7 @@
status = "disabled";
};
ehrpwm1: pwm@302000 {
- compatible = "ti,da850-ehrpwm", "ti,am3352-ehrpwm",
- "ti,am33xx-ehrpwm";
+ compatible = "ti,da850-ehrpwm", "ti,am3352-ehrpwm";
#pwm-cells = <3>;
reg = <0x302000 0x2000>;
clocks = <&psc1 17>, <&ehrpwm_tbclk>;
@@ -593,9 +591,8 @@
power-domains = <&psc1 17>;
status = "disabled";
};
- ecap0: ecap@306000 {
- compatible = "ti,da850-ecap", "ti,am3352-ecap",
- "ti,am33xx-ecap";
+ ecap0: pwm@306000 {
+ compatible = "ti,da850-ecap", "ti,am3352-ecap";
#pwm-cells = <3>;
reg = <0x306000 0x80>;
clocks = <&psc1 20>;
@@ -603,9 +600,8 @@
power-domains = <&psc1 20>;
status = "disabled";
};
- ecap1: ecap@307000 {
- compatible = "ti,da850-ecap", "ti,am3352-ecap",
- "ti,am33xx-ecap";
+ ecap1: pwm@307000 {
+ compatible = "ti,da850-ecap", "ti,am3352-ecap";
#pwm-cells = <3>;
reg = <0x307000 0x80>;
clocks = <&psc1 20>;
@@ -613,9 +609,8 @@
power-domains = <&psc1 20>;
status = "disabled";
};
- ecap2: ecap@308000 {
- compatible = "ti,da850-ecap", "ti,am3352-ecap",
- "ti,am33xx-ecap";
+ ecap2: pwm@308000 {
+ compatible = "ti,da850-ecap", "ti,am3352-ecap";
#pwm-cells = <3>;
reg = <0x308000 0x80>;
clocks = <&psc1 20>;
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 3551a64963f8..a9e7274806f4 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -314,8 +314,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <70>;
- dmas = <&edma 58 0 &edma 59 0>;
- dma-names = "tx", "rx";
};
i2c2: i2c@4802a000 {
@@ -325,8 +323,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <71>;
- dmas = <&edma 60 0 &edma 61 0>;
- dma-names = "tx", "rx";
};
intc: interrupt-controller@48200000 {
@@ -351,7 +347,7 @@
#mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <12>;
- mbox_dsp: mbox_dsp {
+ mbox_dsp: mbox-dsp {
ti,mbox-tx = <3 0 0>;
ti,mbox-rx = <0 0 0>;
};
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 38530dbb89a0..87deb6a76eff 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -319,7 +319,7 @@
};
pcf_lcd: gpio@20 {
- compatible = "ti,pcf8575", "nxp,pcf8575";
+ compatible = "nxp,pcf8575";
reg = <0x20>;
gpio-controller;
#gpio-cells = <2>;
@@ -330,7 +330,7 @@
};
pcf_gpio_21: gpio@21 {
- compatible = "ti,pcf8575", "nxp,pcf8575";
+ compatible = "nxp,pcf8575";
reg = <0x21>;
lines-initial-states = <0x1408>;
gpio-controller;
@@ -362,11 +362,11 @@
clock-frequency = <400000>;
pcf_hdmi: gpio@26 {
- compatible = "ti,pcf8575", "nxp,pcf8575";
+ compatible = "nxp,pcf8575";
reg = <0x26>;
gpio-controller;
#gpio-cells = <2>;
- p1 {
+ hdmi-audio-hog {
/* vin6_sel_s0: high: VIN6, low: audio */
gpio-hog;
gpios = <1 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/dra7-ipu-dsp-common.dtsi b/arch/arm/boot/dts/dra7-ipu-dsp-common.dtsi
index a25749a1c365..a5bdc6431d8d 100644
--- a/arch/arm/boot/dts/dra7-ipu-dsp-common.dtsi
+++ b/arch/arm/boot/dts/dra7-ipu-dsp-common.dtsi
@@ -5,17 +5,17 @@
&mailbox5 {
status = "okay";
- mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
+ mbox_ipu1_ipc3x: mbox-ipu1-ipc3x {
status = "okay";
};
- mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
+ mbox_dsp1_ipc3x: mbox-dsp1-ipc3x {
status = "okay";
};
};
&mailbox6 {
status = "okay";
- mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
+ mbox_ipu2_ipc3x: mbox-ipu2-ipc3x {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index 149144cdff35..956a26d52a4c 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -1159,7 +1159,7 @@
ranges = <0x0 0x20000 0x1000>;
uart3: serial@0 {
- compatible = "ti,dra742-uart", "ti,omap4-uart";
+ compatible = "ti,dra742-uart";
reg = <0x0 0x100>;
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
@@ -1343,7 +1343,7 @@
};
};
- target-module@55000 { /* 0x48055000, ap 13 0e.0 */
+ gpio2_target: target-module@55000 { /* 0x48055000, ap 13 0e.0 */
compatible = "ti,sysc-omap2", "ti,sysc";
reg = <0x55000 0x4>,
<0x55010 0x4>,
@@ -1376,7 +1376,7 @@
};
};
- target-module@57000 { /* 0x48057000, ap 15 06.0 */
+ gpio3_target: target-module@57000 { /* 0x48057000, ap 15 06.0 */
compatible = "ti,sysc-omap2", "ti,sysc";
reg = <0x57000 0x4>,
<0x57010 0x4>,
@@ -1562,7 +1562,7 @@
ranges = <0x0 0x66000 0x1000>;
uart5: serial@0 {
- compatible = "ti,dra742-uart", "ti,omap4-uart";
+ compatible = "ti,dra742-uart";
reg = <0x0 0x100>;
interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
@@ -1594,7 +1594,7 @@
ranges = <0x0 0x68000 0x1000>;
uart6: serial@0 {
- compatible = "ti,dra742-uart", "ti,omap4-uart";
+ compatible = "ti,dra742-uart";
reg = <0x0 0x100>;
interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
@@ -1626,7 +1626,7 @@
ranges = <0x0 0x6a000 0x1000>;
uart1: serial@0 {
- compatible = "ti,dra742-uart", "ti,omap4-uart";
+ compatible = "ti,dra742-uart";
reg = <0x0 0x100>;
interrupts-extended = <&crossbar_mpu GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
@@ -1658,7 +1658,7 @@
ranges = <0x0 0x6c000 0x1000>;
uart2: serial@0 {
- compatible = "ti,dra742-uart", "ti,omap4-uart";
+ compatible = "ti,dra742-uart";
reg = <0x0 0x100>;
interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
@@ -1690,7 +1690,7 @@
ranges = <0x0 0x6e000 0x1000>;
uart4: serial@0 {
- compatible = "ti,dra742-uart", "ti,omap4-uart";
+ compatible = "ti,dra742-uart";
reg = <0x0 0x100>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
@@ -2424,7 +2424,7 @@
ranges = <0x0 0x20000 0x1000>;
uart7: serial@0 {
- compatible = "ti,dra742-uart", "ti,omap4-uart";
+ compatible = "ti,dra742-uart";
reg = <0x0 0x100>;
interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
@@ -2454,7 +2454,7 @@
ranges = <0x0 0x22000 0x1000>;
uart8: serial@0 {
- compatible = "ti,dra742-uart", "ti,omap4-uart";
+ compatible = "ti,dra742-uart";
reg = <0x0 0x100>;
interrupts = <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
@@ -2484,7 +2484,7 @@
ranges = <0x0 0x24000 0x1000>;
uart9: serial@0 {
- compatible = "ti,dra742-uart", "ti,omap4-uart";
+ compatible = "ti,dra742-uart";
reg = <0x0 0x100>;
interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
@@ -2561,7 +2561,7 @@
status = "disabled";
ranges = <0 0 0x1000>;
- ecap0: ecap@100 {
+ ecap0: pwm@100 {
compatible = "ti,dra746-ecap",
"ti,am3352-ecap";
#pwm-cells = <3>;
@@ -2607,7 +2607,7 @@
status = "disabled";
ranges = <0 0 0x1000>;
- ecap1: ecap@100 {
+ ecap1: pwm@100 {
compatible = "ti,dra746-ecap",
"ti,am3352-ecap";
#pwm-cells = <3>;
@@ -2653,7 +2653,7 @@
status = "disabled";
ranges = <0 0 0x1000>;
- ecap2: ecap@100 {
+ ecap2: pwm@100 {
compatible = "ti,dra746-ecap",
"ti,am3352-ecap";
#pwm-cells = <3>;
@@ -4129,28 +4129,6 @@
};
};
- usb4_tm: target-module@140000 { /* 0x48940000, ap 75 3c.0 */
- compatible = "ti,sysc-omap4", "ti,sysc";
- reg = <0x140000 0x4>,
- <0x140010 0x4>;
- reg-names = "rev", "sysc";
- ti,sysc-mask = <SYSC_OMAP4_DMADISABLE>;
- ti,sysc-midle = <SYSC_IDLE_FORCE>,
- <SYSC_IDLE_NO>,
- <SYSC_IDLE_SMART>,
- <SYSC_IDLE_SMART_WKUP>;
- ti,sysc-sidle = <SYSC_IDLE_FORCE>,
- <SYSC_IDLE_NO>,
- <SYSC_IDLE_SMART>,
- <SYSC_IDLE_SMART_WKUP>;
- /* Domains (P, C): l3init_pwrdm, l3init_clkdm */
- clocks = <&l3init_clkctrl DRA7_L3INIT_USB_OTG_SS4_CLKCTRL 0>;
- clock-names = "fck";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x140000 0x20000>;
- };
-
target-module@170000 { /* 0x48970000, ap 21 0a.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
reg = <0x170010 0x4>;
@@ -4530,7 +4508,7 @@
ranges = <0x0 0xb000 0x1000>;
uart10: serial@0 {
- compatible = "ti,dra742-uart", "ti,omap4-uart";
+ compatible = "ti,dra742-uart";
reg = <0x0 0x100>;
interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts
index 6d2cca6b4488..a64364443031 100644
--- a/arch/arm/boot/dts/dra71-evm.dts
+++ b/arch/arm/boot/dts/dra71-evm.dts
@@ -187,7 +187,7 @@
};
&pcf_hdmi {
- p0 {
+ hdmi-i2c-disable-hog {
/*
* PM_OEn to High: Disable routing I2C3 to PM_I2C
* With this PM_SEL(p3) should not matter
diff --git a/arch/arm/boot/dts/dra71x.dtsi b/arch/arm/boot/dts/dra71x.dtsi
index cad0e4a2bd8d..9c270d8f75d5 100644
--- a/arch/arm/boot/dts/dra71x.dtsi
+++ b/arch/arm/boot/dts/dra71x.dtsi
@@ -11,7 +11,3 @@
&rtctarget {
status = "disabled";
};
-
-&usb4_tm {
- status = "disabled";
-};
diff --git a/arch/arm/boot/dts/dra72-evm-common.dtsi b/arch/arm/boot/dts/dra72-evm-common.dtsi
index b65b2dd094d0..f12825268188 100644
--- a/arch/arm/boot/dts/dra72-evm-common.dtsi
+++ b/arch/arm/boot/dts/dra72-evm-common.dtsi
@@ -226,7 +226,7 @@
};
pcf_gpio_21: gpio@21 {
- compatible = "ti,pcf8575", "nxp,pcf8575";
+ compatible = "nxp,pcf8575";
reg = <0x21>;
lines-initial-states = <0x1408>;
gpio-controller;
@@ -256,7 +256,7 @@
clock-frequency = <400000>;
pcf_hdmi: pcf8575@26 {
- compatible = "ti,pcf8575", "nxp,pcf8575";
+ compatible = "nxp,pcf8575";
reg = <0x26>;
gpio-controller;
#gpio-cells = <2>;
@@ -268,7 +268,7 @@
*/
lines-initial-states = <0x0f2b>;
- p1 {
+ hdmi-audio-hog {
/* vin6_sel_s0: high: VIN6, low: audio */
gpio-hog;
gpios = <1 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/dra72x.dtsi b/arch/arm/boot/dts/dra72x.dtsi
index d403acc754b6..90617261373c 100644
--- a/arch/arm/boot/dts/dra72x.dtsi
+++ b/arch/arm/boot/dts/dra72x.dtsi
@@ -77,12 +77,12 @@
};
&mailbox5 {
- mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
+ mbox_ipu1_ipc3x: mbox-ipu1-ipc3x {
ti,mbox-tx = <6 2 2>;
ti,mbox-rx = <4 2 2>;
status = "disabled";
};
- mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
+ mbox_dsp1_ipc3x: mbox-dsp1-ipc3x {
ti,mbox-tx = <5 2 2>;
ti,mbox-rx = <1 2 2>;
status = "disabled";
@@ -90,7 +90,7 @@
};
&mailbox6 {
- mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
+ mbox_ipu2_ipc3x: mbox-ipu2-ipc3x {
ti,mbox-tx = <6 2 2>;
ti,mbox-rx = <4 2 2>;
status = "disabled";
@@ -108,7 +108,3 @@
&pcie2_rc {
compatible = "ti,dra726-pcie-rc", "ti,dra7-pcie";
};
-
-&usb4_tm {
- status = "disabled";
-};
diff --git a/arch/arm/boot/dts/dra74-ipu-dsp-common.dtsi b/arch/arm/boot/dts/dra74-ipu-dsp-common.dtsi
index b1147a4b77f9..3256631510c5 100644
--- a/arch/arm/boot/dts/dra74-ipu-dsp-common.dtsi
+++ b/arch/arm/boot/dts/dra74-ipu-dsp-common.dtsi
@@ -6,7 +6,7 @@
#include "dra7-ipu-dsp-common.dtsi"
&mailbox6 {
- mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
+ mbox_dsp2_ipc3x: mbox-dsp2-ipc3x {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index e1850d6c841a..cfb39dde4930 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -49,49 +49,6 @@
reg = <0x41500000 0x100>;
};
- target-module@48940000 {
- compatible = "ti,sysc-omap4", "ti,sysc";
- reg = <0x48940000 0x4>,
- <0x48940010 0x4>;
- reg-names = "rev", "sysc";
- ti,sysc-mask = <SYSC_OMAP4_DMADISABLE>;
- ti,sysc-midle = <SYSC_IDLE_FORCE>,
- <SYSC_IDLE_NO>,
- <SYSC_IDLE_SMART>,
- <SYSC_IDLE_SMART_WKUP>;
- ti,sysc-sidle = <SYSC_IDLE_FORCE>,
- <SYSC_IDLE_NO>,
- <SYSC_IDLE_SMART>,
- <SYSC_IDLE_SMART_WKUP>;
- clocks = <&l3init_clkctrl DRA7_L3INIT_USB_OTG_SS4_CLKCTRL 0>;
- clock-names = "fck";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x48940000 0x20000>;
-
- omap_dwc3_4: omap_dwc3_4@0 {
- compatible = "ti,dwc3";
- reg = <0 0x10000>;
- interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <1>;
- utmi-mode = <2>;
- ranges;
- status = "disabled";
- usb4: usb@10000 {
- compatible = "snps,dwc3";
- reg = <0x10000 0x17000>;
- interrupts = <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "peripheral",
- "host",
- "otg";
- maximum-speed = "high-speed";
- dr_mode = "otg";
- };
- };
- };
target-module@41501000 {
compatible = "ti,sysc-omap2", "ti,sysc";
@@ -188,12 +145,12 @@
};
&mailbox5 {
- mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
+ mbox_ipu1_ipc3x: mbox-ipu1-ipc3x {
ti,mbox-tx = <6 2 2>;
ti,mbox-rx = <4 2 2>;
status = "disabled";
};
- mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
+ mbox_dsp1_ipc3x: mbox-dsp1-ipc3x {
ti,mbox-tx = <5 2 2>;
ti,mbox-rx = <1 2 2>;
status = "disabled";
@@ -201,12 +158,12 @@
};
&mailbox6 {
- mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
+ mbox_ipu2_ipc3x: mbox-ipu2-ipc3x {
ti,mbox-tx = <6 2 2>;
ti,mbox-rx = <4 2 2>;
status = "disabled";
};
- mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
+ mbox_dsp2_ipc3x: mbox-dsp2-ipc3x {
ti,mbox-tx = <5 2 2>;
ti,mbox-rx = <1 2 2>;
status = "disabled";
@@ -224,3 +181,52 @@
&pcie2_rc {
compatible = "ti,dra746-pcie-rc", "ti,dra7-pcie";
};
+
+&l4_per3 {
+ segment@0 {
+ usb4_tm: target-module@140000 { /* 0x48940000, ap 75 3c.0 */
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x140000 0x4>,
+ <0x140010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_DMADISABLE>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (P, C): l3init_pwrdm, l3init_clkdm */
+ clocks = <&l3init_clkctrl DRA7_L3INIT_USB_OTG_SS4_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x140000 0x20000>;
+
+ omap_dwc3_4: omap_dwc3_4@0 {
+ compatible = "ti,dwc3";
+ reg = <0 0x10000>;
+ interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ utmi-mode = <2>;
+ ranges;
+ status = "disabled";
+ usb4: usb@10000 {
+ compatible = "snps,dwc3";
+ reg = <0x10000 0x17000>;
+ interrupts = <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "peripheral",
+ "host",
+ "otg";
+ maximum-speed = "high-speed";
+ dr_mode = "otg";
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/dra76-evm.dts b/arch/arm/boot/dts/dra76-evm.dts
index 9bd01ae40b1d..e2b7fcb061cf 100644
--- a/arch/arm/boot/dts/dra76-evm.dts
+++ b/arch/arm/boot/dts/dra76-evm.dts
@@ -158,12 +158,6 @@
regulator-max-microvolt = <1800000>;
};
- clk_ov5640_fixed: clock {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- };
-
hdmi0: connector {
compatible = "hdmi-connector";
label = "hdmi";
@@ -355,7 +349,7 @@
};
pcf_lcd: pcf8757@20 {
- compatible = "ti,pcf8575", "nxp,pcf8575";
+ compatible = "nxp,pcf8575";
reg = <0x20>;
gpio-controller;
#gpio-cells = <2>;
@@ -366,7 +360,7 @@
};
pcf_gpio_21: pcf8757@21 {
- compatible = "ti,pcf8575", "nxp,pcf8575";
+ compatible = "nxp,pcf8575";
reg = <0x21>;
gpio-controller;
#gpio-cells = <2>;
@@ -377,11 +371,11 @@
};
pcf_hdmi: pcf8575@26 {
- compatible = "ti,pcf8575", "nxp,pcf8575";
+ compatible = "nxp,pcf8575";
reg = <0x26>;
gpio-controller;
#gpio-cells = <2>;
- p1 {
+ hdmi-audio-hog {
/* vin6_sel_s0: high: VIN6, low: audio */
gpio-hog;
gpios = <1 GPIO_ACTIVE_HIGH>;
@@ -406,27 +400,6 @@
};
};
-&i2c5 {
- status = "okay";
- clock-frequency = <400000>;
-
- ov5640@3c {
- compatible = "ovti,ov5640";
- reg = <0x3c>;
-
- clocks = <&clk_ov5640_fixed>;
- clock-names = "xclk";
-
- port {
- csi2_cam0: endpoint {
- remote-endpoint = <&csi2_phy0>;
- clock-lanes = <0>;
- data-lanes = <1 2>;
- };
- };
- };
-};
-
&cpu0 {
vdd-supply = <&buck10_reg>;
};
@@ -573,14 +546,6 @@
};
};
-&csi2_0 {
- csi2_phy0: endpoint {
- remote-endpoint = <&csi2_cam0>;
- clock-lanes = <0>;
- data-lanes = <1 2>;
- };
-};
-
&ipu2 {
status = "okay";
memory-region = <&ipu2_cma_pool>;
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index c52b9cf4f74c..f6ba5e426040 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -653,7 +653,7 @@
mmc-pwrseq = <&wlan_pwrseq>;
brcmf: wifi@1 {
- compatible = "brcm,bcm4334-fmac";
+ compatible = "brcm,bcm4334-fmac", "brcm,bcm4329-fmac";
reg = <1>;
interrupt-parent = <&gpx1>;
diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
index 525ff3d2fac3..55922176807e 100644
--- a/arch/arm/boot/dts/exynos4210-i9100.dts
+++ b/arch/arm/boot/dts/exynos4210-i9100.dts
@@ -314,7 +314,8 @@
};
&fimc_1 {
- status = "okay";
+ /* Back camera not implemented */
+ status = "disabled";
assigned-clocks = <&clock CLK_MOUT_FIMC1>, <&clock CLK_SCLK_FIMC1>;
assigned-clock-parents = <&clock CLK_SCLK_MPLL>;
@@ -330,7 +331,8 @@
};
&fimc_3 {
- status = "okay";
+ /* Back camera not implemented */
+ status = "disabled";
assigned-clocks = <&clock CLK_MOUT_FIMC3>, <&clock CLK_SCLK_FIMC3>;
assigned-clock-parents = <&clock CLK_SCLK_MPLL>;
@@ -806,7 +808,7 @@
pinctrl-0 = <&sd3_clk>, <&sd3_cmd>, <&sd3_bus4>;
brcmf: wifi@1 {
- compatible = "brcm,bcm4330-fmac";
+ compatible = "brcm,bcm4330-fmac", "brcm,bcm4329-fmac";
reg = <1>;
interrupt-parent = <&gpx2>;
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index d2406c9146b8..3eb8df319246 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -521,7 +521,7 @@
pinctrl-0 = <&sd3_clk>, <&sd3_cmd>, <&sd3_bus4>;
brcmf: wifi@1 {
- compatible = "brcm,bcm4330-fmac";
+ compatible = "brcm,bcm4330-fmac", "brcm,bcm4329-fmac";
reg = <1>;
interrupt-parent = <&gpx2>;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index dd44ad2c6ad6..f052853244a4 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -614,7 +614,7 @@
pinctrl-0 = <&sd3_clk>, <&sd3_cmd>, <&sd3_bus4>;
brcmf: wifi@1 {
- compatible = "brcm,bcm4330-fmac";
+ compatible = "brcm,bcm4330-fmac", "brcm,bcm4329-fmac";
reg = <1>;
interrupt-parent = <&gpx2>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi b/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi
index 4583d342af39..b3726d4d7d93 100644
--- a/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi
+++ b/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi
@@ -163,6 +163,7 @@
<1025000>, <950000>,
<918750>, <900000>,
<875000>, <831250>;
+ wakeup-source;
regulators {
ldo1_reg: LDO1 {
diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
index fc77c1bfd844..968c7943653e 100644
--- a/arch/arm/boot/dts/exynos4412-midas.dtsi
+++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
@@ -668,6 +668,7 @@
interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
pinctrl-0 = <&max77686_irq>;
pinctrl-names = "default";
+ wakeup-source;
reg = <0x09>;
#clock-cells = <1>;
diff --git a/arch/arm/boot/dts/exynos4412-n710x.dts b/arch/arm/boot/dts/exynos4412-n710x.dts
index c49dbb7847b8..2c792142605c 100644
--- a/arch/arm/boot/dts/exynos4412-n710x.dts
+++ b/arch/arm/boot/dts/exynos4412-n710x.dts
@@ -50,8 +50,8 @@
reg = <0x48>;
interrupt-parent = <&gpm2>;
interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
- x-size = <720>;
- y-size = <1280>;
+ touchscreen-size-x = <720>;
+ touchscreen-size-y = <1280>;
avdd-supply = <&ldo23_reg>;
vdd-supply = <&ldo24_reg>;
};
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 5bd05866d7a3..5b1d4591b35c 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -282,6 +282,7 @@
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77686_irq>;
+ wakeup-source;
reg = <0x09>;
#clock-cells = <1>;
diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts
index e1f6de53e20e..5479ef09f9f3 100644
--- a/arch/arm/boot/dts/exynos4412-origen.dts
+++ b/arch/arm/boot/dts/exynos4412-origen.dts
@@ -144,6 +144,7 @@
<1200000>, <1200000>,
<1200000>, <1200000>,
<1200000>, <1200000>;
+ wakeup-source;
s5m8767_osc: clocks {
compatible = "samsung,s5m8767-clk";
diff --git a/arch/arm/boot/dts/exynos4412-p4note.dtsi b/arch/arm/boot/dts/exynos4412-p4note.dtsi
index 9e750890edb8..22c3086e0076 100644
--- a/arch/arm/boot/dts/exynos4412-p4note.dtsi
+++ b/arch/arm/boot/dts/exynos4412-p4note.dtsi
@@ -325,6 +325,7 @@
interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
pinctrl-0 = <&max77686_irq>;
pinctrl-names = "default";
+ wakeup-source;
reg = <0x09>;
#clock-cells = <1>;
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index a161f6237c7f..a771542e28b8 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -218,6 +218,7 @@
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s5m8767_irq>;
+ wakeup-source;
vinb1-supply = <&main_dc_reg>;
vinb2-supply = <&main_dc_reg>;
diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
index 949c0721cdb4..884fef55836c 100644
--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
+++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
@@ -190,6 +190,7 @@
pinctrl-names = "default";
pinctrl-0 = <&max77802_irq>, <&pmic_dvs_1>, <&pmic_dvs_2>,
<&pmic_dvs_3>;
+ wakeup-source;
#clock-cells = <1>;
inl1-supply = <&buck5_reg>;
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index 1aad4859c5f1..dfc7f14f5772 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -352,6 +352,7 @@
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s2mps11_irq>;
+ wakeup-source;
s2mps11_osc: clocks {
compatible = "samsung,s2mps11-clk";
diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts
index d506da9fa661..a4f0e3ffedbd 100644
--- a/arch/arm/boot/dts/exynos5420-smdk5420.dts
+++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts
@@ -132,6 +132,7 @@
pmic@66 {
compatible = "samsung,s2mps11-pmic";
reg = <0x66>;
+ wakeup-source;
s2mps11_osc: clocks {
compatible = "samsung,s2mps11-clk";
diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
index 6d690b1db099..e7958dbecfd2 100644
--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
@@ -512,6 +512,7 @@
interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s2mps11_irq>;
+ wakeup-source;
s2mps11_osc: clocks {
compatible = "samsung,s2mps11-clk";
diff --git a/arch/arm/boot/dts/exynos5422-odroidhc1.dts b/arch/arm/boot/dts/exynos5422-odroidhc1.dts
index 20c222b33f98..d91f7fa2cf80 100644
--- a/arch/arm/boot/dts/exynos5422-odroidhc1.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidhc1.dts
@@ -22,7 +22,7 @@
label = "blue:heartbeat";
pwms = <&pwm 2 2000000 0>;
pwm-names = "pwm2";
- max_brightness = <255>;
+ max-brightness = <255>;
linux,default-trigger = "heartbeat";
};
};
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
index ede782257643..1c24f9b35973 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
@@ -24,7 +24,7 @@
label = "blue:heartbeat";
pwms = <&pwm 2 2000000 0>;
pwm-names = "pwm2";
- max_brightness = <255>;
+ max-brightness = <255>;
linux,default-trigger = "heartbeat";
};
};
diff --git a/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi b/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi
index 2fc3e86dc5f7..982752e1df24 100644
--- a/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi
+++ b/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi
@@ -22,7 +22,7 @@
* Green LED is much brighter than the others
* so limit its max brightness
*/
- max_brightness = <127>;
+ max-brightness = <127>;
linux,default-trigger = "mmc0";
};
@@ -30,7 +30,7 @@
label = "blue:heartbeat";
pwms = <&pwm 2 2000000 0>;
pwm-names = "pwm2";
- max_brightness = <255>;
+ max-brightness = <255>;
linux,default-trigger = "heartbeat";
};
};
diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
index cc39289e99dd..c79a2a02dd6b 100644
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
@@ -61,9 +61,9 @@
#size-cells = <0>;
/* Collides with IDE pins, that's cool (we do not use them) */
- gpio-sck = <&gpio1 5 GPIO_ACTIVE_HIGH>;
- gpio-miso = <&gpio1 8 GPIO_ACTIVE_HIGH>;
- gpio-mosi = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+ sck-gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+ miso-gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
cs-gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
num-chipselects = <1>;
@@ -169,7 +169,7 @@
* The touchpad input is connected to a GPIO bit-banged
* I2C bus.
*/
- gpio-i2c {
+ i2c {
compatible = "i2c-gpio";
/* Collides with ICE */
sda-gpios = <&gpio0 5 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
@@ -492,8 +492,7 @@
display-controller@6a000000 {
status = "okay";
- port@0 {
- reg = <0>;
+ port {
display_out: endpoint {
remote-endpoint = <&panel_in>;
};
diff --git a/arch/arm/boot/dts/gemini-dlink-dns-313.dts b/arch/arm/boot/dts/gemini-dlink-dns-313.dts
index c6f3d90e3e90..eba1c94ed7f7 100644
--- a/arch/arm/boot/dts/gemini-dlink-dns-313.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dns-313.dts
@@ -82,7 +82,7 @@
/* Global Mixed-Mode Technology G751 mounted on GPIO I2C */
- gpio-i2c {
+ i2c {
compatible = "i2c-gpio";
sda-gpios = <&gpio0 15 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
scl-gpios = <&gpio0 16 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
@@ -140,7 +140,7 @@
};
};
- mdio0: ethernet-phy {
+ mdio0: mdio {
compatible = "virtual,mdio-gpio";
/* Uses MDC and MDIO */
gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>, /* MDC */
diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts
index 43c45f7e1e0a..13112a8a5dd8 100644
--- a/arch/arm/boot/dts/gemini-nas4220b.dts
+++ b/arch/arm/boot/dts/gemini-nas4220b.dts
@@ -62,7 +62,7 @@
};
};
- mdio0: ethernet-phy {
+ mdio0: mdio {
compatible = "virtual,mdio-gpio";
gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>, /* MDC */
<&gpio0 21 GPIO_ACTIVE_HIGH>; /* MDIO */
diff --git a/arch/arm/boot/dts/gemini-rut1xx.dts b/arch/arm/boot/dts/gemini-rut1xx.dts
index 9611ddf06792..0ebda4efd9d0 100644
--- a/arch/arm/boot/dts/gemini-rut1xx.dts
+++ b/arch/arm/boot/dts/gemini-rut1xx.dts
@@ -56,7 +56,7 @@
};
};
- mdio0: ethernet-phy {
+ mdio0: mdio {
compatible = "virtual,mdio-gpio";
gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>, /* MDC */
<&gpio0 21 GPIO_ACTIVE_HIGH>; /* MDIO */
@@ -125,18 +125,6 @@
};
};
- ethernet@60000000 {
- status = "okay";
-
- ethernet-port@0 {
- phy-mode = "rgmii";
- phy-handle = <&phy0>;
- };
- ethernet-port@1 {
- /* Not used in this platform */
- };
- };
-
usb@68000000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/gemini-sl93512r.dts b/arch/arm/boot/dts/gemini-sl93512r.dts
index a0916d3c1059..c78e55fd2562 100644
--- a/arch/arm/boot/dts/gemini-sl93512r.dts
+++ b/arch/arm/boot/dts/gemini-sl93512r.dts
@@ -87,9 +87,9 @@
#address-cells = <1>;
#size-cells = <0>;
/* Check pin collisions */
- gpio-sck = <&gpio1 28 GPIO_ACTIVE_HIGH>;
- gpio-miso = <&gpio1 30 GPIO_ACTIVE_HIGH>;
- gpio-mosi = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+ sck-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ miso-gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
cs-gpios = <&gpio1 31 GPIO_ACTIVE_HIGH>;
num-chipselects = <1>;
diff --git a/arch/arm/boot/dts/gemini-sq201.dts b/arch/arm/boot/dts/gemini-sq201.dts
index 0c6e6d35bfaa..1b64cc80b55a 100644
--- a/arch/arm/boot/dts/gemini-sq201.dts
+++ b/arch/arm/boot/dts/gemini-sq201.dts
@@ -72,9 +72,9 @@
#address-cells = <1>;
#size-cells = <0>;
/* Check pin collisions */
- gpio-sck = <&gpio1 28 GPIO_ACTIVE_HIGH>;
- gpio-miso = <&gpio1 30 GPIO_ACTIVE_HIGH>;
- gpio-mosi = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+ sck-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ miso-gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
cs-gpios = <&gpio1 31 GPIO_ACTIVE_HIGH>;
num-chipselects = <1>;
diff --git a/arch/arm/boot/dts/gemini-wbd111.dts b/arch/arm/boot/dts/gemini-wbd111.dts
index 3a2761dd460f..5602ba8f30f2 100644
--- a/arch/arm/boot/dts/gemini-wbd111.dts
+++ b/arch/arm/boot/dts/gemini-wbd111.dts
@@ -68,7 +68,7 @@
};
};
- mdio0: ethernet-phy {
+ mdio0: mdio {
compatible = "virtual,mdio-gpio";
gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>, /* MDC */
<&gpio0 21 GPIO_ACTIVE_HIGH>; /* MDIO */
diff --git a/arch/arm/boot/dts/gemini-wbd222.dts b/arch/arm/boot/dts/gemini-wbd222.dts
index 52b4dbc0c072..a4a260c36d75 100644
--- a/arch/arm/boot/dts/gemini-wbd222.dts
+++ b/arch/arm/boot/dts/gemini-wbd222.dts
@@ -67,7 +67,7 @@
};
};
- mdio0: ethernet-phy {
+ mdio0: mdio {
compatible = "virtual,mdio-gpio";
gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>, /* MDC */
<&gpio0 21 GPIO_ACTIVE_HIGH>; /* MDIO */
diff --git a/arch/arm/boot/dts/gemini.dtsi b/arch/arm/boot/dts/gemini.dtsi
index 065ed10a79fa..cc053af3c347 100644
--- a/arch/arm/boot/dts/gemini.dtsi
+++ b/arch/arm/boot/dts/gemini.dtsi
@@ -191,7 +191,7 @@
};
rtc@45000000 {
- compatible = "cortina,gemini-rtc";
+ compatible = "cortina,gemini-rtc", "faraday,ftrtc010";
reg = <0x45000000 0x100>;
interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
resets = <&syscon GEMINI_RESET_RTC>;
@@ -286,6 +286,7 @@
clock-names = "PCLK", "PCICLK";
pinctrl-names = "default";
pinctrl-0 = <&pci_default_pins>;
+ device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
@@ -356,6 +357,14 @@
};
};
+ crypto: crypto@62000000 {
+ compatible = "cortina,sl3516-crypto";
+ reg = <0x62000000 0x10000>;
+ interrupts = <7 IRQ_TYPE_EDGE_RISING>;
+ resets = <&syscon GEMINI_RESET_SECURITY>;
+ clocks = <&syscon GEMINI_CLK_GATE_SECURITY>;
+ };
+
ide@63000000 {
compatible = "cortina,gemini-pata", "faraday,ftide010";
reg = <0x63000000 0x1000>;
@@ -409,8 +418,6 @@
clock-names = "PCLK", "TVE";
pinctrl-names = "default";
pinctrl-0 = <&tvc_default_pins>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/hi3620.dtsi b/arch/arm/boot/dts/hi3620.dtsi
index 905900bf3e82..cf48ec14af43 100644
--- a/arch/arm/boot/dts/hi3620.dtsi
+++ b/arch/arm/boot/dts/hi3620.dtsi
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Hisilicon Ltd. Hi3620 SoC
+ * HiSilicon Ltd. Hi3620 SoC
*
- * Copyright (C) 2012-2013 Hisilicon Ltd.
+ * Copyright (C) 2012-2013 HiSilicon Ltd.
* Copyright (C) 2012-2013 Linaro Ltd.
*
* Author: Haojian Zhuang <haojian.zhuang@linaro.org>
diff --git a/arch/arm/boot/dts/hip01-ca9x2.dts b/arch/arm/boot/dts/hip01-ca9x2.dts
index 031476304d94..f3faf247cd61 100644
--- a/arch/arm/boot/dts/hip01-ca9x2.dts
+++ b/arch/arm/boot/dts/hip01-ca9x2.dts
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Hisilicon Ltd. HiP01 SoC
+ * HiSilicon Ltd. HiP01 SoC
*
- * Copyright (C) 2014 Hisilicon Ltd.
+ * Copyright (C) 2014 HiSilicon Ltd.
* Copyright (C) 2014 Huawei Ltd.
*
* Author: Wang Long <long.wanglong@huawei.com>
diff --git a/arch/arm/boot/dts/hip01.dtsi b/arch/arm/boot/dts/hip01.dtsi
index 2a7963605390..e17f36bd9006 100644
--- a/arch/arm/boot/dts/hip01.dtsi
+++ b/arch/arm/boot/dts/hip01.dtsi
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Hisilicon Ltd. HiP01 SoC
+ * HiSilicon Ltd. HiP01 SoC
*
- * Copyright (c) 2014 Hisilicon Ltd.
+ * Copyright (c) 2014 HiSilicon Ltd.
* Copyright (c) 2014 Huawei Ltd.
*
* Author: Wang Long <long.wanglong@huawei.com>
diff --git a/arch/arm/boot/dts/hip04.dtsi b/arch/arm/boot/dts/hip04.dtsi
index bccf5ba3d855..2424cc545c9c 100644
--- a/arch/arm/boot/dts/hip04.dtsi
+++ b/arch/arm/boot/dts/hip04.dtsi
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Hisilicon Ltd. HiP04 SoC
+ * HiSilicon Ltd. HiP04 SoC
*
- * Copyright (C) 2013-2014 Hisilicon Ltd.
+ * Copyright (C) 2013-2014 HiSilicon Ltd.
* Copyright (C) 2013-2014 Linaro Ltd.
*
* Author: Haojian Zhuang <haojian.zhuang@linaro.org>
diff --git a/arch/arm/boot/dts/hisi-x5hd2-dkb.dts b/arch/arm/boot/dts/hisi-x5hd2-dkb.dts
index 22b122d3f514..7758c19038f0 100644
--- a/arch/arm/boot/dts/hisi-x5hd2-dkb.dts
+++ b/arch/arm/boot/dts/hisi-x5hd2-dkb.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2014 Linaro Ltd.
- * Copyright (c) 2013-2014 Hisilicon Limited.
+ * Copyright (c) 2013-2014 HiSilicon Limited.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/hisi-x5hd2.dtsi b/arch/arm/boot/dts/hisi-x5hd2.dtsi
index 97211385dc89..dc991ba2a9fb 100644
--- a/arch/arm/boot/dts/hisi-x5hd2.dtsi
+++ b/arch/arm/boot/dts/hisi-x5hd2.dtsi
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2014 Linaro Ltd.
- * Copyright (c) 2013-2014 Hisilicon Limited.
+ * Copyright (c) 2013-2014 HiSilicon Limited.
*/
#include <dt-bindings/clock/hix5hd2-clock.h>
diff --git a/arch/arm/boot/dts/imx25-pinfunc.h b/arch/arm/boot/dts/imx25-pinfunc.h
index f984b702efc5..908caf810351 100644
--- a/arch/arm/boot/dts/imx25-pinfunc.h
+++ b/arch/arm/boot/dts/imx25-pinfunc.h
@@ -563,15 +563,15 @@
#define MX25_PAD_DE_B__DE_B 0x1f0 0x3ec 0x000 0x00 0x000
#define MX25_PAD_DE_B__GPIO_2_20 0x1f0 0x3ec 0x000 0x05 0x000
-#define MX25_PAD_GPIO_A__GPIO_A 0x1f4 0x3f0 0x000 0x00 0x000
+#define MX25_PAD_GPIO_A__GPIO_1_0 0x1f4 0x3f0 0x000 0x00 0x000
#define MX25_PAD_GPIO_A__CAN1_TX 0x1f4 0x3f0 0x000 0x06 0x000
#define MX25_PAD_GPIO_A__USBOTG_PWR 0x1f4 0x3f0 0x000 0x02 0x000
-#define MX25_PAD_GPIO_B__GPIO_B 0x1f8 0x3f4 0x000 0x00 0x000
+#define MX25_PAD_GPIO_B__GPIO_1_1 0x1f8 0x3f4 0x000 0x00 0x000
#define MX25_PAD_GPIO_B__USBOTG_OC 0x1f8 0x3f4 0x57c 0x02 0x001
#define MX25_PAD_GPIO_B__CAN1_RX 0x1f8 0x3f4 0x480 0x06 0x001
-#define MX25_PAD_GPIO_C__GPIO_C 0x1fc 0x3f8 0x000 0x00 0x000
+#define MX25_PAD_GPIO_C__GPIO_1_2 0x1fc 0x3f8 0x000 0x00 0x000
#define MX25_PAD_GPIO_C__PWM4_PWMO 0x1fc 0x3f8 0x000 0x01 0x000
#define MX25_PAD_GPIO_C__I2C2_SCL 0x1fc 0x3f8 0x51c 0x02 0x001
#define MX25_PAD_GPIO_C__KPP_COL4 0x1fc 0x3f8 0x52c 0x03 0x001
@@ -580,18 +580,18 @@
#define MX25_PAD_GPIO_C__CAN2_TX 0x1fc 0x3f8 0x000 0x06 0x000
#define MX25_PAD_GPIO_C__CSPI2_SS2 0x1fc 0x3f8 0x000 0x07 0x000
-#define MX25_PAD_GPIO_D__GPIO_D 0x200 0x3fc 0x000 0x00 0x000
+#define MX25_PAD_GPIO_D__GPIO_1_3 0x200 0x3fc 0x000 0x00 0x000
#define MX25_PAD_GPIO_D__I2C2_SDA 0x200 0x3fc 0x520 0x02 0x001
#define MX25_PAD_GPIO_D__CAN2_RX 0x200 0x3fc 0x484 0x06 0x001
#define MX25_PAD_GPIO_D__CSPI3_SS2 0x200 0x3fc 0x4c4 0x07 0x001
-#define MX25_PAD_GPIO_E__GPIO_E 0x204 0x400 0x000 0x00 0x000
+#define MX25_PAD_GPIO_E__GPIO_1_4 0x204 0x400 0x000 0x00 0x000
#define MX25_PAD_GPIO_E__I2C3_CLK 0x204 0x400 0x524 0x01 0x002
#define MX25_PAD_GPIO_E__LD16 0x204 0x400 0x000 0x02 0x000
#define MX25_PAD_GPIO_E__AUD7_TXD 0x204 0x400 0x000 0x04 0x000
#define MX25_PAD_GPIO_E__UART4_RXD 0x204 0x400 0x570 0x06 0x002
-#define MX25_PAD_GPIO_F__GPIO_F 0x208 0x404 0x000 0x00 0x000
+#define MX25_PAD_GPIO_F__GPIO_1_5 0x208 0x404 0x000 0x00 0x000
#define MX25_PAD_GPIO_F__LD17 0x208 0x404 0x000 0x02 0x000
#define MX25_PAD_GPIO_F__AUD7_TXC 0x208 0x404 0x000 0x04 0x000
#define MX25_PAD_GPIO_F__UART4_TXD 0x208 0x404 0x000 0x06 0x000
diff --git a/arch/arm/boot/dts/imx28-lwe.dtsi b/arch/arm/boot/dts/imx28-lwe.dtsi
new file mode 100644
index 000000000000..bb971e660db8
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-lwe.dtsi
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2021
+ * Lukasz Majewski, DENX Software Engineering, lukma@denx.de
+ */
+
+/dts-v1/;
+#include "imx28.dtsi"
+
+/ {
+ aliases {
+ spi2 = &ssp3;
+ };
+
+ chosen {
+ bootargs = "root=/dev/mmcblk0p2 rootfstype=ext4 ro rootwait console=ttyAMA0,115200 panic=1";
+ };
+
+ memory@40000000 {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ reg_3v3: regulator-reg-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_usb_5v: regulator-reg-usb-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_fec_3v3: regulator-reg-fec-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "fec-phy";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&duart {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+};
+
+&saif0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif0_pins_a>;
+ #sound-dai-cells = <0>;
+ assigned-clocks = <&clks 53>;
+ assigned-clock-rates = <12000000>;
+ status = "okay";
+};
+
+&saif1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif1_pins_a>;
+ fsl,saif-master = <&saif0>;
+ #sound-dai-cells = <0>;
+ status = "okay";
+};
+
+&spi3_pins_a {
+ fsl,pinmux-ids = <
+ MX28_PAD_AUART2_RX__SSP3_D4
+ MX28_PAD_AUART2_TX__SSP3_D5
+ MX28_PAD_SSP3_SCK__SSP3_SCK
+ MX28_PAD_SSP3_MOSI__SSP3_CMD
+ MX28_PAD_SSP3_MISO__SSP3_D0
+ MX28_PAD_SSP3_SS0__SSP3_D3
+ MX28_PAD_AUART2_TX__GPIO_3_9
+ >;
+};
+
+&ssp0 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_8bit_pins_a>;
+ bus-width = <8>;
+ vmmc-supply = <&reg_3v3>;
+ non-removable;
+ status = "okay";
+};
+
+&ssp2 {
+ compatible = "fsl,imx28-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pins_a>;
+ status = "okay";
+};
+
+&ssp3 {
+ compatible = "fsl,imx28-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi3_pins_a>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <40000000>;
+ reg = <0>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0 0x80000>;
+ read-only;
+ };
+
+ partition@80000 {
+ label = "env0";
+ reg = <0x80000 0x10000>;
+ };
+
+ partition@90000 {
+ label = "env1";
+ reg = <0x90000 0x10000>;
+ };
+
+ partition@100000 {
+ label = "kernel";
+ reg = <0x100000 0x400000>;
+ };
+
+ partition@500000 {
+ label = "swupdate";
+ reg = <0x500000 0x800000>;
+ };
+ };
+ };
+};
+
+&usb0 {
+ vbus-supply = <&reg_usb_5v>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_pins_b>, <&usb0_id_pins_a>;
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbphy0 {
+ status = "okay";
+};
+
+&usb1 {
+ vbus-supply = <&reg_usb_5v>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb1_pins_b>;
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbphy1 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx28-xea.dts b/arch/arm/boot/dts/imx28-xea.dts
new file mode 100644
index 000000000000..a400c108f66a
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-xea.dts
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2021
+ * Lukasz Majewski, DENX Software Engineering, lukma@denx.de
+ */
+
+/dts-v1/;
+#include "imx28-lwe.dtsi"
+
+/ {
+ compatible = "lwn,imx28-xea", "fsl,imx28";
+};
+
+&can0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can1_pins_a>;
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins_b>;
+ status = "okay";
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a &hog_pins_tiva>;
+
+ hog_pins_a: hog@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_D00__GPIO_0_0
+ MX28_PAD_GPMI_D02__GPIO_0_2
+ MX28_PAD_GPMI_D05__GPIO_0_5
+ MX28_PAD_GPMI_CE1N__GPIO_0_17
+ MX28_PAD_GPMI_RDY0__GPIO_0_20
+ MX28_PAD_GPMI_RDY1__GPIO_0_21
+ MX28_PAD_GPMI_RDY2__GPIO_0_22
+ MX28_PAD_GPMI_RDN__GPIO_0_24
+ MX28_PAD_GPMI_CLE__GPIO_0_27
+ MX28_PAD_LCD_VSYNC__GPIO_1_28
+ MX28_PAD_SSP1_SCK__GPIO_2_12
+ MX28_PAD_SSP1_CMD__GPIO_2_13
+ MX28_PAD_SSP2_SS1__GPIO_2_20
+ MX28_PAD_SSP2_SS2__GPIO_2_21
+ MX28_PAD_LCD_D00__GPIO_1_0
+ MX28_PAD_LCD_D01__GPIO_1_1
+ MX28_PAD_LCD_D02__GPIO_1_2
+ MX28_PAD_LCD_D03__GPIO_1_3
+ MX28_PAD_LCD_D04__GPIO_1_4
+ MX28_PAD_LCD_D05__GPIO_1_5
+ MX28_PAD_LCD_D06__GPIO_1_6
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ hog_pins_tiva: hog@1 {
+ reg = <1>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_RDY3__GPIO_0_23
+ MX28_PAD_GPMI_WRN__GPIO_0_25
+ >;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ hog_pins_coding: hog@2 {
+ reg = <2>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_D01__GPIO_0_1
+ MX28_PAD_GPMI_D03__GPIO_0_3
+ MX28_PAD_GPMI_D04__GPIO_0_4
+ MX28_PAD_GPMI_D06__GPIO_0_6
+ MX28_PAD_GPMI_D07__GPIO_0_7
+ >;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+};
+
+&reg_fec_3v3 {
+ gpio = <&gpio0 0 0>;
+};
+
+&reg_usb_5v {
+ gpio = <&gpio0 2 0>;
+};
+
+&spi2_pins_a {
+ fsl,pinmux-ids = <
+ MX28_PAD_SSP2_SCK__SSP2_SCK
+ MX28_PAD_SSP2_MOSI__SSP2_CMD
+ MX28_PAD_SSP2_MISO__SSP2_D0
+ MX28_PAD_SSP2_SS0__GPIO_2_19
+ >;
+};
diff --git a/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi b/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi
index 16addb3a2a1b..7d4970417dce 100644
--- a/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi
+++ b/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi
@@ -206,7 +206,7 @@
pinctrl-0 = <&pinctrl_weim>;
status = "okay";
- lan9221: lan9221@5,0 {
+ lan9221: ethernet@5,0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lan9221>;
compatible = "smsc,lan9221", "smsc,lan9115";
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index 9a2e1fde7128..6208fbb2e741 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -24,7 +24,7 @@
reg = <0xf4000000 0x3ff0000>;
ranges;
- lan9220@f4000000 {
+ ethernet@f4000000 {
compatible = "smsc,lan9220", "smsc,lan9115";
reg = <0xf4000000 0x2000000>;
phy-mode = "mii";
diff --git a/arch/arm/boot/dts/imx53-m53menlo.dts b/arch/arm/boot/dts/imx53-m53menlo.dts
index f98691ae4415..d3082b9774e4 100644
--- a/arch/arm/boot/dts/imx53-m53menlo.dts
+++ b/arch/arm/boot/dts/imx53-m53menlo.dts
@@ -388,13 +388,13 @@
pinctrl_power_button: powerbutgrp {
fsl,pins = <
- MX53_PAD_SD2_DATA2__GPIO1_13 0x1e4
+ MX53_PAD_SD2_DATA0__GPIO1_15 0x1e4
>;
};
pinctrl_power_out: poweroutgrp {
fsl,pins = <
- MX53_PAD_SD2_DATA0__GPIO1_15 0x1e4
+ MX53_PAD_SD2_DATA2__GPIO1_13 0x1e4
>;
};
diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
index be040b6a02fa..5a5fa6190a52 100644
--- a/arch/arm/boot/dts/imx53-ppd.dts
+++ b/arch/arm/boot/dts/imx53-ppd.dts
@@ -651,6 +651,7 @@
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
+ fsl,dma-info = <24 20>;
status = "okay";
};
@@ -670,6 +671,7 @@
&uart5 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart5>;
+ fsl,dma-info = <4096 4>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6dl-b105pv2.dts b/arch/arm/boot/dts/imx6dl-b105pv2.dts
new file mode 100644
index 000000000000..411aa72d344b
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-b105pv2.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+//
+// Device Tree Source for General Electric B105Pv2
+//
+// Copyright 2018-2021 General Electric Company
+// Copyright 2018-2021 Collabora
+
+/dts-v1/;
+#include "imx6dl-b1x5pv2.dtsi"
+
+/ {
+ model = "General Electric B105Pv2";
+ compatible = "ge,imx6dl-b105pv2", "congatec,qmx6", "fsl,imx6dl";
+
+ panel {
+ compatible = "auo,g101evn010";
+ };
+};
+
+&i2c3 {
+ touchscreen@41 {
+ compatible = "ilitek,ili251x";
+ reg = <0x41>;
+ pinctrl-names = "default";
+ pinctrl-0 =<&pinctrl_q7_gpio0>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&tca6424a 21 GPIO_ACTIVE_LOW>;
+ touchscreen-size-x = <1280>;
+ touchscreen-size-y = <800>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-b105v2.dts b/arch/arm/boot/dts/imx6dl-b105v2.dts
new file mode 100644
index 000000000000..d011127c635b
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-b105v2.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+//
+// Device Tree Source for General Electric B105v2
+//
+// Copyright 2018-2021 General Electric Company
+// Copyright 2018-2021 Collabora
+
+/dts-v1/;
+#include "imx6dl-b1x5v2.dtsi"
+
+/ {
+ model = "General Electric B105v2";
+ compatible = "ge,imx6dl-b105v2", "congatec,qmx6", "fsl,imx6dl";
+
+ panel {
+ compatible = "auo,g101evn010";
+ };
+};
+
+&i2c3 {
+ touchscreen@41 {
+ compatible = "ilitek,ili251x";
+ reg = <0x41>;
+ pinctrl-names = "default";
+ pinctrl-0 =<&pinctrl_q7_gpio0>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&tca6424a 21 GPIO_ACTIVE_LOW>;
+ touchscreen-size-x = <1280>;
+ touchscreen-size-y = <800>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-b125pv2.dts b/arch/arm/boot/dts/imx6dl-b125pv2.dts
new file mode 100644
index 000000000000..ca840fa84052
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-b125pv2.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+//
+// Device Tree Source for General Electric B125Pv2
+//
+// Copyright 2018-2021 General Electric Company
+// Copyright 2018-2021 Collabora
+
+/dts-v1/;
+#include "imx6dl-b1x5pv2.dtsi"
+
+/ {
+ model = "General Electric B125Pv2";
+ compatible = "ge,imx6dl-b125pv2", "congatec,qmx6", "fsl,imx6dl";
+
+ panel {
+ compatible = "auo,g121ean01";
+ };
+};
+
+&i2c3 {
+ touchscreen@2a {
+ compatible = "eeti,exc80h60";
+ reg = <0x2a>;
+ pinctrl-names = "default";
+ pinctrl-0 =<&pinctrl_q7_gpio0>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&tca6424a 21 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-b125v2.dts b/arch/arm/boot/dts/imx6dl-b125v2.dts
new file mode 100644
index 000000000000..81e5a9cb8900
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-b125v2.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+//
+// Device Tree Source for General Electric B125v2
+//
+// Copyright 2018-2021 General Electric Company
+// Copyright 2018-2021 Collabora
+
+/dts-v1/;
+#include "imx6dl-b1x5v2.dtsi"
+
+/ {
+ model = "General Electric B125v2";
+ compatible = "ge,imx6dl-b125v2", "congatec,qmx6", "fsl,imx6dl";
+
+ panel {
+ compatible = "auo,g121ean01";
+ };
+};
+
+&i2c3 {
+ touchscreen@2a {
+ compatible = "eeti,exc80h60";
+ reg = <0x2a>;
+ pinctrl-names = "default";
+ pinctrl-0 =<&pinctrl_q7_gpio0>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&tca6424a 21 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-b155v2.dts b/arch/arm/boot/dts/imx6dl-b155v2.dts
new file mode 100644
index 000000000000..c861937b30f6
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-b155v2.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+//
+// Device Tree Source for General Electric B155v2
+//
+// Copyright 2018-2021 General Electric Company
+// Copyright 2018-2021 Collabora
+
+/dts-v1/;
+#include "imx6dl-b1x5v2.dtsi"
+
+/ {
+ model = "General Electric B155v2";
+ compatible = "ge,imx6dl-b155v2", "congatec,qmx6", "fsl,imx6dl";
+
+ panel {
+ compatible = "auo,g156xtn01";
+ };
+};
+
+&i2c3 {
+ touchscreen@2a {
+ compatible = "eeti,exc80h84";
+ reg = <0x2a>;
+ pinctrl-names = "default";
+ pinctrl-0 =<&pinctrl_q7_gpio0>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ touchscreen-inverted-x;
+ touchscreen-inverted-y;
+ reset-gpios = <&tca6424a 21 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-b1x5pv2.dtsi b/arch/arm/boot/dts/imx6dl-b1x5pv2.dtsi
new file mode 100644
index 000000000000..ec5b66453156
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-b1x5pv2.dtsi
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+//
+// Device Tree Source for General Electric B1x5Pv2
+// patient monitor series
+//
+// Copyright 2018-2021 General Electric Company
+// Copyright 2018-2021 Collabora
+
+#include <dt-bindings/input/input.h>
+#include "imx6dl-qmx6.dtsi"
+
+/ {
+ chosen {
+ stdout-path = &uart3;
+ };
+
+ /* Do not allow frequencies above 800MHz */
+ cpus {
+ cpu@0 {
+ operating-points = <
+ /* kHz uV */
+ 792000 1175000
+ 396000 1150000
+ >;
+ fsl,soc-operating-points = <
+ /* ARM kHz SOC-PU uV */
+ 792000 1175000
+ 396000 1175000
+ >;
+ };
+
+ cpu@1 {
+ operating-points = <
+ /* kHz uV */
+ 792000 1175000
+ 396000 1150000
+ >;
+ fsl,soc-operating-points = <
+ /* ARM kHz SOC-PU uV */
+ 792000 1175000
+ 396000 1175000
+ >;
+ };
+ };
+
+ reg_syspwr: regulator-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "SYS_PWR";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ reg_5v_pmc: regulator-5v-pmc {
+ compatible = "regulator-fixed";
+ regulator-name = "5V PMC";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&reg_syspwr>;
+ };
+
+ reg_5v: regulator-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&reg_syspwr>;
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&reg_syspwr>;
+ };
+
+ reg_5v0_audio: regulator-5v0-audio {
+ compatible = "regulator-fixed";
+ regulator-name = "5V0_AUDIO";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&reg_5v>;
+ gpio = <&tca6424a 16 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ /*
+ * This must be always-on for da7212, which has some not
+ * properly documented dependencies for it's speaker supply
+ * pin. The issue manifests as speaker volume being very low.
+ */
+ regulator-always-on;
+ };
+
+
+ reg_3v3_audio: regulator-3v3-audio {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3_AUDIO";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&reg_3v3>;
+ pinctrl-0 = <&pinctrl_q7_hda_reset>;
+ pinctrl-names = "default";
+ gpio = <&gpio6 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_2v5_audio: regulator-2v5-audio {
+ compatible = "regulator-fixed";
+ regulator-name = "2V5_AUDIO";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ vin-supply = <&reg_3v3_audio>;
+
+ };
+
+ reg_wlan: regulator-wlan {
+ compatible = "regulator-fixed";
+ regulator-name = "WLAN";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&reg_3v3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_q7_sdio_power>;
+ gpio = <&gpio4 30 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ startup-delay-us = <70000>;
+ };
+
+ reg_bl: regulator-backlight {
+ compatible = "regulator-fixed";
+ regulator-name = "LED_VCC";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ vin-supply = <&reg_syspwr>;
+ pinctrl-0 = <&pinctrl_q7_lcd_power>;
+ pinctrl-names = "default";
+ gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_lcd: regulator-lcd {
+ compatible = "regulator-fixed";
+ regulator-name = "LCD_5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&reg_5v>;
+ };
+
+ usb_power: regulator-usb-power {
+ compatible = "regulator-fixed";
+ regulator-name = "USB POWER";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&reg_5v>;
+ };
+
+ charger: battery-charger {
+ compatible = "gpio-charger"; /* ti,bq24172 */
+ charger-type = "mains";
+ gpios = <&tca6424a 3 GPIO_ACTIVE_LOW>;
+ charge-current-limit-gpios = <&tca6424a 11 GPIO_ACTIVE_HIGH>,
+ <&tca6424a 12 GPIO_ACTIVE_HIGH>;
+ charge-current-limit-mapping = <1300000 0x0>,
+ <700000 0x1>,
+ <0 0x2>;
+ charge-status-gpios = <&tca6424a 6 GPIO_ACTIVE_HIGH>;
+ };
+
+ poweroff {
+ compatible = "gpio-poweroff";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_q7_spi_cs1>;
+ gpios = <&gpio4 25 GPIO_ACTIVE_LOW>;
+ };
+
+ power-button-key {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_q7_sleep_button>;
+
+ power-button {
+ label = "power button";
+ gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ };
+ };
+
+ rotary-encoder-key {
+ compatible = "gpio-keys";
+
+ rotary-encoder-press {
+ label = "rotary-encoder press";
+ gpios = <&tca6424a 0 GPIO_ACTIVE_HIGH>;
+ linux,code = <KEY_ENTER>;
+ linux,can-disable;
+ };
+ };
+
+ rotary-encoder {
+ compatible = "rotary-encoder";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_q7_gpio2 &pinctrl_q7_gpio4>;
+ gpios = <&gpio4 26 GPIO_ACTIVE_LOW>, <&gpio1 0 GPIO_ACTIVE_LOW>;
+ rotary-encoder,relative-axis;
+ rotary-encoder,steps-per-period = <2>;
+ wakeup-source;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_q7_gpio1 &pinctrl_q7_gpio3 &pinctrl_q7_gpio5>;
+
+ alarm1 {
+ label = "alarm:red";
+ gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ };
+
+ alarm2 {
+ label = "alarm:yellow";
+ gpios = <&gpio4 27 GPIO_ACTIVE_HIGH>;
+ };
+
+ alarm3 {
+ label = "alarm:blue";
+ gpios = <&gpio4 15 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_q7_backlight_enable>;
+ power-supply = <&reg_bl>;
+ pwms = <&pwm4 0 5000000 0>;
+ brightness-levels = <0 255>;
+ num-interpolated-steps = <255>;
+ default-brightness-level = <179>;
+ enable-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ };
+
+ panel {
+ backlight = <&backlight>;
+ power-supply = <&reg_lcd>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&dailink_master>;
+ simple-audio-card,frame-master = <&dailink_master>;
+ simple-audio-card,widgets = "Speaker", "Ext Spk";
+ simple-audio-card,audio-routing = "Ext Spk", "LINE";
+
+ simple-audio-card,cpu {
+ sound-dai = <&ssi1>;
+ };
+
+ dailink_master: simple-audio-card,codec {
+ sound-dai = <&codec>;
+ };
+ };
+
+ clk_ext_audio_codec: clock-codec {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <12288000>;
+ };
+};
+
+&audmux {
+ status = "okay";
+};
+
+&fec {
+ status = "okay";
+};
+
+&hdmi {
+ status = "okay";
+};
+
+&i2c1 {
+ battery: battery@b {
+ compatible = "ti,bq20z65", "sbs,sbs-battery";
+ reg = <0x0b>;
+ sbs,battery-detect-gpios = <&tca6424a 5 GPIO_ACTIVE_LOW>;
+ sbs,i2c-retry-count = <5>;
+ power-supplies = <&charger>;
+ };
+
+ codec: audio-codec@1a {
+ compatible = "dlg,da7212";
+ reg = <0x1a>;
+ #sound-dai-cells = <0>;
+ VDDA-supply = <&reg_2v5_audio>;
+ VDDSP-supply = <&reg_5v0_audio>;
+ VDDMIC-supply = <&reg_3v3_audio>;
+ VDDIO-supply = <&reg_3v3_audio>;
+ clocks = <&clk_ext_audio_codec>;
+ clock-names = "mclk";
+ };
+};
+
+&i2c5 {
+ tca6424a: gpio-controller@22 {
+ compatible = "ti,tca6424";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ vcc-supply = <&reg_3v3>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_q7_gpio6>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-line-names = "GPIO_ROTOR#", "ACM_IO_INT", "TMP_SENSOR_IRQ", "AC_IN",
+ "TF_S", "BATT_T", "LED_INC_CHAR", "ACM1_OCF",
+ "ACM2_OCF", "ACM_IO_RST", "USB1_POWER_EN", "EGPIO_CC_CTL0",
+ "EGPIO_CC_CTL1", "12V_OEMNBP_EN", "CP2105_RST", "",
+ "SPEAKER_PA_EN", "ARM7_UPI_RESET", "ARM7_PWR_RST", "NURSE_CALL",
+ "MARKER_EN", "EGPIO_TOUCH_RST", "PRESSURE_INT1", "PRESSURE_INT2";
+
+ };
+
+ tmp75: temperature-sensor@48 {
+ compatible = "ti,tmp75";
+ reg = <0x48>;
+ vs-supply = <&reg_3v3>;
+ interrupt-parent = <&tca6424a>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+ };
+};
+
+&ldb {
+ status = "okay";
+
+ lvds0: lvds-channel@0 {
+ status = "okay";
+ fsl,data-mapping = "spwg";
+ fsl,data-width = <24>;
+
+ port@4 {
+ reg = <4>;
+
+ lvds0_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+};
+
+&pwm4 {
+ status = "okay";
+};
+
+&ssi1 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&usb_power>;
+ disable-over-current;
+ dr_mode = "host";
+ status = "okay";
+
+ /*
+ * TPS2051BDGN fault-gpio is connected to Q7[86] USB_0_1_OC_N.
+ * On QMX6 this is not connceted to the i.MX6, but to the USB Hub
+ * from &usbh1. This means, that we cannot easily detect and handle
+ * over-current events. Fortunately the regulator limits the current
+ * automatically, so the hardware is still protected.
+ */
+};
+
+&usdhc4 {
+ /* WiFi module */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc4>;
+ bus-width = <4>;
+ no-1-8-v;
+ non-removable;
+ wakeup-source;
+ keep-power-in-suspend;
+ cap-power-off-card;
+ max-frequency = <25000000>;
+ vmmc-supply = <&reg_wlan>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1837";
+ reg = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_q7_gpio7>;
+
+ interrupt-parent = <&gpio4>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
+
+ tcxo-clock-frequency = <26000000>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-b1x5v2.dtsi b/arch/arm/boot/dts/imx6dl-b1x5v2.dtsi
new file mode 100644
index 000000000000..a326a331508e
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-b1x5v2.dtsi
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+//
+// Device Tree Source for General Electric B1x5v2
+// patient monitor series
+//
+// Copyright 2018-2021 General Electric Company
+// Copyright 2018-2021 Collabora
+
+#include <dt-bindings/input/input.h>
+#include "imx6dl-b1x5pv2.dtsi"
+
+/ {
+ reg_3v3_acm: regulator-3v3-acm {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3 ACM";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ vin-supply = <&reg_3v3>;
+ };
+};
+
+&i2c1 {
+ tca6416: gpio-controller@21 {
+ compatible = "ti,tca6416";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ reset-gpios = <&tca6424a 9 GPIO_ACTIVE_LOW>;
+ vcc-supply = <&reg_3v3_acm>;
+ gpio-line-names = "ACM1_EN", "ACM1_CL0", "ACM1_CL1", "ACM1_CL2",
+ "", "ACM2_EN", "ACM2_CL0", "ACM2_CL1",
+ "ACM2_CL2", "", "", "",
+ "", "", "", "";
+
+ /*
+ * The interrupt pin is connected to &tca6424a pin 1, but the Linux
+ * TCA6424 driver cannot handle low type interrupts at the moment
+ * (and support cannot be added without some ugly hacks). Since this
+ * controller does not have any input type GPIOs, just pretend
+ * that the interrupt pin is unconnected.
+ */
+ };
+};
+
+&i2c5 {
+ mpl3115a2: pressure-sensor@60 {
+ compatible = "fsl,mpl3115";
+ reg = <0x60>;
+ vcc-supply = <&reg_3v3_acm>;
+
+ /*
+ * The MPL3115 interrupts are connected to pin 22 and 23
+ * of &tca6424a, but the binding does not yet support
+ * interrupts.
+ */
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-plym2m.dts b/arch/arm/boot/dts/imx6dl-plym2m.dts
index 4d0d3d3386af..60fe5f14666e 100644
--- a/arch/arm/boot/dts/imx6dl-plym2m.dts
+++ b/arch/arm/boot/dts/imx6dl-plym2m.dts
@@ -138,8 +138,6 @@
interrupts-extended = <&gpio3 20 IRQ_TYPE_EDGE_FALLING>;
pendown-gpio = <&gpio3 20 GPIO_ACTIVE_LOW>;
- touchscreen-size-x = <800>;
- touchscreen-size-y = <480>;
touchscreen-inverted-x;
touchscreen-inverted-y;
touchscreen-max-pressure = <4095>;
@@ -147,7 +145,9 @@
ti,vref-delay-usecs = /bits/ 16 <100>;
ti,x-plate-ohms = /bits/ 16 <800>;
ti,y-plate-ohms = /bits/ 16 <300>;
-
+ ti,debounce-max = /bits/ 16 <3>;
+ ti,debounce-tol = /bits/ 16 <70>;
+ ti,debounce-rep = /bits/ 16 <3>;
wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/imx6dl-prtvt7.dts b/arch/arm/boot/dts/imx6dl-prtvt7.dts
index ae6da241f13e..190d26642bc8 100644
--- a/arch/arm/boot/dts/imx6dl-prtvt7.dts
+++ b/arch/arm/boot/dts/imx6dl-prtvt7.dts
@@ -21,14 +21,11 @@
backlight_lcd: backlight-lcd {
compatible = "pwm-backlight";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_backlight>;
- pwms = <&pwm1 0 500000>;
+ pwms = <&pwm1 0 500000 0>;
brightness-levels = <0 20 81 248 1000>;
default-brightness-level = <20>;
num-interpolated-steps = <21>;
power-supply = <&reg_bl_12v0>;
- enable-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
};
keys {
@@ -223,6 +220,24 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ecspi2>;
status = "okay";
+
+ touchscreen@0 {
+ compatible = "ti,tsc2046";
+ reg = <0>;
+ pinctrl-0 = <&pinctrl_tsc>;
+ pinctrl-names ="default";
+ spi-max-frequency = <100000>;
+ interrupts-extended = <&gpio3 20 IRQ_TYPE_EDGE_FALLING>;
+ pendown-gpio = <&gpio3 20 GPIO_ACTIVE_LOW>;
+ touchscreen-max-pressure = <4095>;
+ ti,vref-delay-usecs = /bits/ 16 <100>;
+ ti,x-plate-ohms = /bits/ 16 <800>;
+ ti,y-plate-ohms = /bits/ 16 <300>;
+ ti,debounce-max = /bits/ 16 <3>;
+ ti,debounce-tol = /bits/ 16 <70>;
+ ti,debounce-rep = /bits/ 16 <3>;
+ wakeup-source;
+ };
};
&i2c1 {
@@ -261,7 +276,6 @@
};
&pwm1 {
- #pwm-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
status = "okay";
@@ -276,8 +290,6 @@
};
&ssi1 {
- #sound-dai-cells = <0>;
- fsl,mode = "ac97-slave";
status = "okay";
};
@@ -285,10 +297,6 @@
status = "disabled";
};
-&vpu {
- status = "disabled";
-};
-
&iomuxc {
pinctrl_audmux: audmuxgrp {
fsl,pins = <
@@ -300,12 +308,6 @@
>;
};
- pinctrl_backlight: backlightgrp {
- fsl,pins = <
- MX6QDL_PAD_DISP0_DAT7__GPIO4_IO28 0x1b0b0
- >;
- };
-
pinctrl_can1phy: can1phy {
fsl,pins = <
/* CAN1_SR */
diff --git a/arch/arm/boot/dts/imx6dl-qmx6.dtsi b/arch/arm/boot/dts/imx6dl-qmx6.dtsi
new file mode 100644
index 000000000000..150d69858255
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-qmx6.dtsi
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+//
+// Device Tree Source for i.MX6DL based congatec QMX6
+// System on Module
+//
+// Copyright 2018-2021 General Electric Company
+// Copyright 2018-2021 Collabora
+// Copyright 2016 congatec AG
+
+#include "imx6dl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/sound/fsl-imx-audmux.h>
+
+/ {
+ memory@10000000 {
+ reg = <0x10000000 0x40000000>;
+ };
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ i2cmux {
+ compatible = "i2c-mux-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ mux-gpios = <&gpio6 9 GPIO_ACTIVE_HIGH>;
+ i2c-parent = <&i2c2>;
+
+ i2c5: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c6: i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux>;
+
+ audmux_ssi1 {
+ fsl,audmux-port = <MX51_AUDMUX_PORT1_SSI0>;
+ fsl,port-config = <
+ (IMX_AUDMUX_V2_PTCR_TFSDIR |
+ IMX_AUDMUX_V2_PTCR_TFSEL(MX51_AUDMUX_PORT6) |
+ IMX_AUDMUX_V2_PTCR_TCLKDIR |
+ IMX_AUDMUX_V2_PTCR_TCSEL(MX51_AUDMUX_PORT6) |
+ IMX_AUDMUX_V2_PTCR_SYN)
+ IMX_AUDMUX_V2_PDCR_RXDSEL(MX51_AUDMUX_PORT6)
+ >;
+ };
+
+ audmux_aud6 {
+ fsl,audmux-port = <MX51_AUDMUX_PORT6>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN
+ IMX_AUDMUX_V2_PDCR_RXDSEL(MX51_AUDMUX_PORT1_SSI0)
+ >;
+ };
+};
+
+&clks {
+ clocks = <&rtc_sqw>;
+ clock-names = "ckil";
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+ <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
+ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>;
+};
+
+&ecspi1 {
+ cs-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ status = "okay";
+
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "sst,sst25vf032b", "jedec,spi-nor";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+
+ partition@0 {
+ label = "bootloader";
+ reg = <0x0000000 0x100000>;
+ };
+
+ partition@100000 {
+ label = "user";
+ reg = <0x0100000 0x2fc000>;
+ };
+
+ partition@3fc000 {
+ label = "reserved";
+ reg = <0x03fc000 0x4000>;
+ read-only;
+ };
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet &pinctrl_phy_reset>;
+ phy-mode = "rgmii-id";
+ phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
+ fsl,magic-packet;
+ phy-handle = <&phy0>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@6 {
+ reg = <6>;
+ qca,clk-out-frequency = <125000000>;
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio3 21 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio3 28 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ scl-gpios = <&gpio4 12 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio4 13 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ pinctrl-1 = <&pinctrl_i2c3_gpio>;
+ scl-gpios = <&gpio1 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio1 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+
+ rtc: m41t62@68 {
+ compatible = "st,m41t62";
+ reg = <0x68>;
+
+ rtc_sqw: clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+ };
+};
+
+&i2c6 {
+ pmic@8 {
+ compatible = "fsl,pfuze100";
+ reg = <0x08>;
+
+ regulators {
+ sw1a_reg: sw1ab {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw1c_reg: sw1c {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3a {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3b_reg: sw3b {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw4_reg: sw4 {
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /*
+ * keep VGEN3, VGEN4 and VGEN5 enabled in order to
+ * maintain backward compatibility with hw-rev. A.0
+ */
+ vgen3_reg: vgen3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen4_reg: vgen4 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vgen5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ /* supply voltage for eMMC */
+ vgen6_reg: vgen6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&pcie {
+ reset-gpio = <&gpio1 20 0>;
+};
+
+&pwm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm4>;
+};
+
+&reg_arm {
+ vin-supply = <&sw1a_reg>;
+};
+
+&reg_pu {
+ vin-supply = <&sw1c_reg>;
+};
+
+&reg_soc {
+ vin-supply = <&sw1c_reg>;
+};
+
+&snvs_poweroff {
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&usbh1 {
+ /* Connected to USB-Hub SMSC USB2514, provides P0, P2, P3, P4 on Qseven connector */
+ vbus-supply = <&reg_5v>;
+ status = "okay";
+};
+
+&usbotg {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+};
+
+&usdhc2 {
+ /* MicroSD card slot */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ no-1-8-v;
+ keep-power-in-suspend;
+ wakeup-source;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
+
+&usdhc3 {
+ /* eMMC module */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ non-removable;
+ bus-width = <8>;
+ no-1-8-v;
+ keep-power-in-suspend;
+ wakeup-source;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ qmx6mux: imx6qdl-qmx6 {
+ pinctrl_audmux: audmuxgrp {
+ fsl,pins = <
+ MX6QDL_PAD_DI0_PIN2__AUD6_TXD 0x110b0 /* Q7[67] HDA_SDO */
+ MX6QDL_PAD_DI0_PIN3__AUD6_TXFS 0x30b0 /* Q7[59] HDA_SYNC */
+ MX6QDL_PAD_DI0_PIN4__AUD6_RXD 0x30b0 /* Q7[65] HDA_SDI */
+ MX6QDL_PAD_DI0_PIN15__AUD6_TXC 0x30b0 /* Q7[63] HDA_BITCLK */
+ >;
+ };
+
+ /* PHY is on System on Module, Q7[3-15] have Ethernet lines */
+ pinctrl_enet: enet {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
+ >;
+ };
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* PCIE_WAKE_B */
+ MX6QDL_PAD_NANDF_WP_B__GPIO6_IO09 0x80000000 /* I2C multiplexer */
+ MX6QDL_PAD_NANDF_D6__GPIO2_IO06 0x80000000 /* SD4_CD# */
+ MX6QDL_PAD_NANDF_D7__GPIO2_IO07 0x80000000 /* SD4_WP */
+ MX6QDL_PAD_CSI0_MCLK__CCM_CLKO1 0x80000000 /* Camera MCLK */
+ >;
+ };
+
+ pinctrl_i2c1: i2c1 {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1 /* Q7[66] I2C_CLK */
+ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1 /* Q7[68] I2C_DAT */
+ >;
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__GPIO3_IO21 0x1b0b0 /* Q7[66] I2C_CLK */
+ MX6QDL_PAD_EIM_D28__GPIO3_IO28 0x1b0b0 /* Q7[68] I2C_DAT */
+ >;
+ };
+
+ pinctrl_i2c2: i2c2 {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1 /* Q7[152] SDVO_CTRL_CLK */
+ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1 /* Q7[150] SDVO_CTRL_DAT */
+ >;
+ };
+
+ pinctrl_i2c2_gpio: i2c2-gpio {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__GPIO4_IO12 0x1b0b0 /* Q7[152] SDVO_CTRL_CLK */
+ MX6QDL_PAD_KEY_ROW3__GPIO4_IO13 0x1b0b0 /* Q7[150] SDVO_CTRL_DAT */
+ >;
+ };
+
+ pinctrl_i2c3: i2c3 {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1 /* Q7[60] SMB_CLK */
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1 /* Q7[62] SMB_DAT */
+ >;
+ };
+
+ pinctrl_i2c3_gpio: i2c3-gpio {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_3__GPIO1_IO03 0x1b0b0 /* Q7[60] SMB_CLK */
+ MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x1b0b0 /* Q7[62] SMB_DAT */
+ >;
+ };
+
+ pinctrl_phy_reset: phy-reset {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x1b0b0 /* RGMII Phy Reset */
+ >;
+ };
+
+ pinctrl_pwm4: pwm4 {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1 /* Q7[123] LVDS_BLT_CTRL */
+ >;
+ };
+
+ pinctrl_q7_backlight_enable: q7-backlight-enable {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x1b0b0 /* Q7[112] LVDS_BLEN */
+ >;
+ };
+
+ pinctrl_q7_gpio0: q7-gpio0 {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x1b0b0 /* Q7[185] GPIO0 */
+ >;
+ };
+
+ pinctrl_q7_gpio1: q7-gpio1 {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x1b0b0 /* Q7[186] GPIO1 */
+ >;
+ };
+
+ pinctrl_q7_gpio2: q7-gpio2 {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT5__GPIO4_IO26 0x1b0b0 /* Q7[187] GPIO2 */
+ >;
+ };
+
+ pinctrl_q7_gpio3: q7-gpio3 {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT6__GPIO4_IO27 0x1b0b0 /* Q7[188] GPIO3 */
+ >;
+ };
+
+ pinctrl_q7_gpio4: q7-gpio4 {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0 /* Q7[189] GPIO4 */
+ >;
+ };
+
+ pinctrl_q7_gpio5: q7-gpio5 {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x1b0b0 /* Q7[190] GPIO5 */
+ >;
+ };
+
+ pinctrl_q7_gpio6: q7-gpio6 {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_16__GPIO7_IO11 0x1b0b0 /* Q7[191] GPIO6 */
+ >;
+ };
+
+ pinctrl_q7_gpio7: q7-gpio7 {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL4__GPIO4_IO14 0x1b0b0 /* Q7[192] GPIO7 */
+ >;
+ };
+
+ pinctrl_q7_hda_reset: q7-hda-reset {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_ALE__GPIO6_IO08 0x1b0b0 /* Q7[61] HDA_RST_N */
+ >;
+ };
+
+ pinctrl_q7_lcd_power: lcd-power {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_7__GPIO1_IO07 0x1b0b0 /* Q7[111] LVDS_PPEN */
+ >;
+ };
+
+ pinctrl_q7_sdio_power: q7-sdio-power {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT9__GPIO4_IO30 0x1b0b0 /* Q7[47] SDIO_PWR# */
+ >;
+ };
+
+ pinctrl_q7_sleep_button: q7-sleep-button {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x1b0b0 /* Q7[21] SLP_BTN# */
+ >;
+ };
+
+ pinctrl_q7_spi_cs1: spi-cs1 {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT4__GPIO4_IO25 0x1b0b0 /* Q7[202] SPI_CS1# */
+ >;
+ };
+
+ /* SPI1 bus does not leave System on Module */
+ pinctrl_spi1: spi1 {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
+ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
+ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x1b0b0
+ >;
+ };
+
+ /* Debug connector on Q7 module */
+ pinctrl_uart2: uart2 {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart3: uart3 {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1 /* Q7[177] UART0_RX */
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1 /* Q7[171] UART0_TX */
+ >;
+ };
+
+ pinctrl_usbotg: usbotg {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059 /* Q7[92] USB_ID */
+ >;
+ };
+
+ /* µSD card slot on Q7 module */
+ pinctrl_usdhc2: usdhc2 {
+ fsl,pins = <
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b0 /* SD2_CD */
+ >;
+ };
+
+ /* eMMC module on Q7 module */
+ pinctrl_usdhc3: usdhc3 {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
+ >;
+ };
+
+ pinctrl_usdhc4: usdhc4 {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059 /* Q7[45] SDIO_CMD */
+ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x17059 /* Q7[42] SDIO_CLK */
+ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059 /* Q7[48] SDIO_DAT1 */
+ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059 /* Q7[49] SDIO_DAT0 */
+ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059 /* Q7[50] SDIO_DAT3 */
+ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059 /* Q7[51] SDIO_DAT2 */
+ >;
+ };
+
+ pinctrl_wdog: wdog {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__WDOG1_B 0x1b0b0 /* Watchdog output signal */
+ >;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index 065d3ab0f50a..e7d9bfbfd0e4 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -106,6 +106,8 @@
reset-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
reset-assert-us = <10000>;
reset-deassert-us = <1000>;
+ qca,smarteee-tw-us-1g = <24>;
+ qca,clk-out-frequency = <125000000>;
};
};
};
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
index 9148a01ed6d9..cb8b539eb29d 100644
--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
@@ -11,6 +11,8 @@
aliases: aliases {
ethernet1 = &eth1;
ethernet2 = &eth2;
+ mmc0 = &usdhc3;
+ mmc1 = &usdhc4;
};
backlight: backlight {
@@ -345,6 +347,7 @@
solomon,height = <64>;
solomon,width = <128>;
solomon,page-offset = <0>;
+ solomon,col-offset = <4>;
solomon,prechargep2 = <15>;
reset-gpios = <&gpio_oled 1 GPIO_ACTIVE_LOW>;
vbat-supply = <&sw2_reg>;
diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
index d0768ae429fa..4bf51f3ce003 100644
--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
@@ -12,10 +12,20 @@
/ {
aliases {
+ i2c0 = &i2c2;
+ i2c1 = &i2c1;
+ i2c2 = &i2c3;
mmc0 = &usdhc2;
mmc1 = &usdhc3;
mmc2 = &usdhc4;
mmc3 = &usdhc1;
+ rtc0 = &rtc_i2c;
+ rtc1 = &snvs_rtc;
+ serial0 = &uart1;
+ serial1 = &uart5;
+ serial2 = &uart4;
+ serial3 = &uart2;
+ serial4 = &uart3;
};
memory@10000000 {
@@ -23,6 +33,19 @@
reg = <0x10000000 0x40000000>;
};
+ reg_eth_vio: regulator-eth-vio {
+ compatible = "regulator-fixed";
+ gpio = <&gpio1 7 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&pinctrl_enet_vio>;
+ pinctrl-names = "default";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "eth_vio";
+ vin-supply = <&sw2_reg>;
+ };
+
reg_usb_otg_vbus: regulator-usb-otg-vbus {
compatible = "regulator-fixed";
regulator-name = "usb_otg_vbus";
@@ -96,30 +119,40 @@
reg = <0>;
max-speed = <100>;
reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
- reset-delay-us = <1000>;
- reset-post-delay-us = <1000>;
+ reset-assert-us = <1000>;
+ reset-deassert-us = <1000>;
+ smsc,disable-energy-detect; /* Make plugin detection reliable */
};
};
};
&i2c1 {
clock-frequency = <100000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio3 21 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio3 28 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
};
&i2c2 {
clock-frequency = <100000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ scl-gpios = <&gpio4 12 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio4 13 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
};
&i2c3 {
clock-frequency = <100000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c3>;
+ pinctrl-1 = <&pinctrl_i2c3_gpio>;
+ scl-gpios = <&gpio1 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio1 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
ltc3676: pmic@3c {
@@ -203,7 +236,7 @@
pagesize = <16>;
};
- rtc@56 {
+ rtc_i2c: rtc@56 {
compatible = "microcrystal,rv3029";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_rtc_hw300>;
@@ -260,6 +293,11 @@
MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
MX6QDL_PAD_EIM_WAIT__GPIO5_IO00 0x000b0
MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x000b1
+ >;
+ };
+
+ pinctrl_enet_vio: enet-vio-grp {
+ fsl,pins = <
MX6QDL_PAD_GPIO_7__GPIO1_IO07 0x120b0
>;
};
@@ -285,6 +323,13 @@
>;
};
+ pinctrl_i2c1_gpio: i2c1-gpio-grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__GPIO3_IO21 0x4001b8b1
+ MX6QDL_PAD_EIM_D28__GPIO3_IO28 0x4001b8b1
+ >;
+ };
+
pinctrl_i2c2: i2c2-grp {
fsl,pins = <
MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
@@ -292,6 +337,13 @@
>;
};
+ pinctrl_i2c2_gpio: i2c2-gpio-grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__GPIO4_IO12 0x4001b8b1
+ MX6QDL_PAD_KEY_ROW3__GPIO4_IO13 0x4001b8b1
+ >;
+ };
+
pinctrl_i2c3: i2c3-grp {
fsl,pins = <
MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
@@ -299,6 +351,13 @@
>;
};
+ pinctrl_i2c3_gpio: i2c3-gpio-grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_3__GPIO1_IO03 0x4001b8b1
+ MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x4001b8b1
+ >;
+ };
+
pinctrl_pmic_hw300: pmic-hw300-grp {
fsl,pins = <
MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x1B0B0
diff --git a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
index fa2307d8ce86..c713ac03b3b9 100644
--- a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
+++ b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
@@ -112,7 +112,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
phy-supply = <&vgen2_1v2_eth>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6q-ds.dts b/arch/arm/boot/dts/imx6q-ds.dts
new file mode 100644
index 000000000000..b0a63a133977
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-ds.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2021 Dillon Min <dillon.minfei@gmail.com>
+//
+// Based on imx6qdl-sabresd.dtsi which is:
+// Copyright 2012 Freescale Semiconductor, Inc.
+// Copyright 2011 Linaro Ltd.
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-ds.dtsi"
+
+/ {
+ model = "DaSheng i.MX6 Quad Com-9xx Board";
+ compatible = "ds,imx6q-sbc", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-ds.dtsi b/arch/arm/boot/dts/imx6qdl-ds.dtsi
new file mode 100644
index 000000000000..f7e517555697
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-ds.dtsi
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2021 Dillon Min <dillon.minfei@gmail.com>
+//
+// Based on imx6qdl-sabresd.dtsi which is:
+// Copyright 2012 Freescale Semiconductor, Inc.
+// Copyright 2011 Linaro Ltd.
+
+#include <dt-bindings/clock/imx6qdl-clock.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ chosen {
+ stdout-path = &uart4;
+ };
+
+ memory@10000000 {
+ device_type = "memory";
+ reg = <0x10000000 0x80000000>;
+ };
+
+ reg_usb_otg_vbus: regulator-usb-otg-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_usb_h1_vbus: regulator-usb-h1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_h1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led-0 {
+ gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
+
+&ipu1_csi0_from_ipu1_csi0_mux {
+ bus-width = <8>;
+ data-shift = <12>; /* Lines 19:12 used */
+ hsync-active = <1>;
+ vsync-active = <1>;
+};
+
+&ipu1_csi0_mux_from_parallel_sensor {
+ remote-endpoint = <&ov2659_to_ipu1_csi0_mux>;
+};
+
+&ipu1_csi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ipu1_csi0>;
+ status = "okay";
+};
+
+&ecspi1 {
+ cs-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>, <&pinctrl_ecspi1_gpio>;
+ status = "okay";
+
+ m25p80: flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p80", "jedec,spi-nor";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&phy>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy: ethernet-phy@1 {
+ reg = <1>;
+ qca,clk-out-frequency = <125000000>;
+ reset-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ };
+ };
+};
+
+&hdmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi_cec>;
+ ddc-i2c-bus = <&i2c3>;
+ status = "okay";
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ pfuze100: pmic@8 {
+ compatible = "fsl,pfuze100";
+ reg = <0x08>;
+
+ regulators {
+ sw1a_reg: sw1ab {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw1c_reg: sw1c {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw3a_reg: sw3a {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3b_reg: sw3b {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw4_reg: sw4 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen1_reg: vgen1 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen2_reg: vgen2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen3_reg: vgen3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vgen4_reg: vgen4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vgen5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vgen6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ ov2659: camera@30 {
+ compatible = "ovti,ov2659";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ov2659>;
+ clocks = <&clks IMX6QDL_CLK_CKO>;
+ clock-names = "xvclk";
+ reg = <0x30>;
+ powerdown-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ port {
+ ov2659_to_ipu1_csi0_mux: endpoint {
+ remote-endpoint = <&ipu1_csi0_mux_from_parallel_sensor>;
+ link-frequencies = /bits/ 64 <70000000>;
+ bus-width = <8>;
+ hsync-active = <1>;
+ vsync-active = <1>;
+ };
+ };
+ };
+};
+
+&iomuxc {
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
+ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
+ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
+ >;
+ };
+
+ pinctrl_ecspi1_gpio: ecspi1grpgpiogrp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW1__GPIO4_IO09 0x1b0b0
+ MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x1b0b0
+ >;
+ };
+
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_KEY_COL2__GPIO4_IO10 0x1b0b0
+ >;
+ };
+
+ pinctrl_hdmi_cec: hdmicecgrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_A25__HDMI_TX_CEC_LINE 0x1f8b0
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
+ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_ipu1_csi0: ipu1csi0grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19 0x1b0b0
+ MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x1b0b0
+ MX6QDL_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC 0x1b0b0
+ MX6QDL_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC 0x1b0b0
+ >;
+ };
+
+ pinctrl_ov2659: ov2659grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_16__GPIO7_IO11 0x1b0b0
+ MX6QDL_PAD_GPIO_7__GPIO1_IO07 0x1b0b0
+ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x130b0
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
+ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
+ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
+ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
+ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
+ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
+ >;
+ };
+
+ pinctrl_usdhc1_gpio: usdhc1grpgpiogrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2grpgpiogrp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_D0__GPIO2_IO00 0x1b0b0
+ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_1__WDOG2_B 0x1b0b0
+ >;
+ };
+
+ pinctrl_gpio_leds: gpioledsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL1__GPIO4_IO08 0x1b0b0
+ >;
+ };
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&usbh1 {
+ vbus-supply = <&reg_usb_h1_vbus>;
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>, <&pinctrl_usdhc1_gpio>;
+ bus-width = <4>;
+ cd-gpios = <&gpio1 28 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ bus-width = <4>;
+ cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+ status = "disabled";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ bus-width = <8>;
+ non-removable;
+ no-1-8-v;
+ status = "okay";
+};
+
+&wdog1 {
+ status = "disabled";
+};
+
+&wdog2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 7bd658b7bdda..f3236204cb5a 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -322,8 +322,8 @@
fsl,pins = <
MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
- MX6QDL_PAD_EIM_D30__UART3_RTS_B 0x1b0b1
- MX6QDL_PAD_EIM_D31__UART3_CTS_B 0x1b0b1
+ MX6QDL_PAD_EIM_D31__UART3_RTS_B 0x1b0b1
+ MX6QDL_PAD_EIM_D30__UART3_CTS_B 0x1b0b1
>;
};
@@ -410,6 +410,7 @@
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
+ uart-has-rtscts;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index f824c9abd11a..0c0105468a2f 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -97,8 +97,11 @@
compatible = "fsl,imx6q-sabresd-wm8962",
"fsl,imx-audio-wm8962";
model = "wm8962-audio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hp>;
ssi-controller = <&ssi2>;
audio-codec = <&codec>;
+ audio-asrc = <&asrc>;
audio-routing =
"Headphone Jack", "HPOUTL",
"Headphone Jack", "HPOUTR",
@@ -545,6 +548,13 @@
>;
};
+ pinctrl_hp: hpgrp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_RST__GPIO7_IO08 0x1b0b0
+ MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x1b0b0
+ >;
+ };
+
pinctrl_i2c1: i2c1grp {
fsl,pins = <
MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b8b1
diff --git a/arch/arm/boot/dts/imx6qdl-sr-som.dtsi b/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
index 0ad8ccde0cf8..f86efd0ccc40 100644
--- a/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
@@ -54,7 +54,13 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
phy-mode = "rgmii-id";
- phy-reset-duration = <2>;
+
+ /*
+ * The PHY seems to require a long-enough reset duration to avoid
+ * some rare issues where the PHY gets stuck in an inconsistent and
+ * non-functional state at boot-up. 10ms proved to be fine .
+ */
+ phy-reset-duration = <10>;
phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-vicut1.dtsi b/arch/arm/boot/dts/imx6qdl-vicut1.dtsi
index eb25d21a2ace..b9e305774fed 100644
--- a/arch/arm/boot/dts/imx6qdl-vicut1.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-vicut1.dtsi
@@ -40,6 +40,27 @@
};
};
+ counter-0 {
+ compatible = "interrupt-counter";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_counter0>;
+ gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+ };
+
+ counter-1 {
+ compatible = "interrupt-counter";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_counter1>;
+ gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ };
+
+ counter-2 {
+ compatible = "interrupt-counter";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_counter2>;
+ gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+ };
+
gpio-keys {
compatible = "gpio-keys";
autorepeat;
@@ -254,7 +275,7 @@
&gpio2 {
gpio-line-names =
- "", "", "", "", "", "", "", "",
+ "count0", "count1", "count2", "", "", "", "", "",
"REV_ID0", "REV_ID1", "REV_ID2", "REV_ID3", "REV_ID4",
"BOARD_ID0", "BOARD_ID1", "BOARD_ID2",
"", "", "", "", "", "", "", "ON_SWITCH",
@@ -572,6 +593,24 @@
>;
};
+ pinctrl_counter0: counter0grp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_D0__GPIO2_IO00 0x1b000
+ >;
+ };
+
+ pinctrl_counter1: counter1grp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x1b000
+ >;
+ };
+
+ pinctrl_counter2: counter2grp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x1b000
+ >;
+ };
+
pinctrl_ecspi1: ecspi1grp {
fsl,pins = <
MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
index b9b698f72b26..bf86b639fdac 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
@@ -142,7 +142,6 @@
imx6qdl-wandboard {
pinctrl_hog: hoggrp {
fsl,pins = <
- MX6QDL_PAD_EIM_D22__USB_OTG_PWR 0x80000000 /* USB Power Enable */
MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* USDHC1 CD */
MX6QDL_PAD_EIM_DA9__GPIO3_IO09 0x80000000 /* uSDHC3 CD */
MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x1f0b1 /* RGMII PHY reset */
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
index 5a1e10def6ef..779cc536566d 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
@@ -60,38 +60,26 @@
gpios = <&gpio_spi 3 GPIO_ACTIVE_LOW>;
};
- sound {
- compatible = "simple-audio-card";
- simple-audio-card,name = "mx6ul-wm8960";
- simple-audio-card,format = "i2s";
- simple-audio-card,bitclock-master = <&dailink_master>;
- simple-audio-card,frame-master = <&dailink_master>;
- simple-audio-card,widgets =
- "Microphone", "Mic Jack",
- "Line", "Line In",
- "Line", "Line Out",
- "Speaker", "Speaker",
- "Headphone", "Headphone Jack";
- simple-audio-card,routing =
+ sound-wm8960 {
+ compatible = "fsl,imx-audio-wm8960";
+ model = "wm8960-audio";
+ audio-cpu = <&sai2>;
+ audio-codec = <&codec>;
+ audio-asrc = <&asrc>;
+ hp-det-gpio = <&gpio5 4 0>;
+ audio-routing =
"Headphone Jack", "HP_L",
"Headphone Jack", "HP_R",
- "Speaker", "SPK_LP",
- "Speaker", "SPK_LN",
- "Speaker", "SPK_RP",
- "Speaker", "SPK_RN",
- "LINPUT1", "Mic Jack",
+ "Ext Spk", "SPK_LP",
+ "Ext Spk", "SPK_LN",
+ "Ext Spk", "SPK_RP",
+ "Ext Spk", "SPK_RN",
+ "LINPUT2", "Mic Jack",
"LINPUT3", "Mic Jack",
- "RINPUT1", "Mic Jack",
- "RINPUT2", "Mic Jack";
-
- simple-audio-card,cpu {
- sound-dai = <&sai2>;
- };
-
- dailink_master: simple-audio-card,codec {
- sound-dai = <&codec>;
- clocks = <&clks IMX6UL_CLK_SAI2>;
- };
+ "RINPUT1", "AMIC",
+ "RINPUT2", "AMIC",
+ "Mic Jack", "MICB",
+ "AMIC", "MICB";
};
spi4 {
@@ -145,6 +133,10 @@
compatible = "wlf,wm8960";
reg = <0x1a>;
wlf,shared-lrclk;
+ wlf,hp-cfg = <3 2 3>;
+ wlf,gpio-cfg = <1 3>;
+ clocks = <&clks IMX6UL_CLK_SAI2>;
+ clock-names = "mclk";
};
camera@3c {
diff --git a/arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi b/arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi
index a0545431b3dc..9f1e38282bee 100644
--- a/arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi
+++ b/arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi
@@ -43,6 +43,7 @@
assigned-clock-rates = <0>, <198000000>;
cap-power-off-card;
keep-power-in-suspend;
+ max-frequency = <25000000>;
mmc-pwrseq = <&wifi_pwrseq>;
no-1-8-v;
non-removable;
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index ac0751bc1177..4a0d83784d7d 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -164,6 +164,13 @@
"LINPUT1", "AMIC",
"AMIC", "MICB";
};
+
+ sound-hdmi {
+ compatible = "fsl,imx-audio-sii902x";
+ model = "sii902x-audio";
+ audio-cpu = <&sai3>;
+ hdmi-out;
+ };
};
&adc1 {
diff --git a/arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts b/arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts
index 8fcd95805ff4..5b8dcc19deee 100644
--- a/arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts
+++ b/arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts
@@ -90,20 +90,71 @@
timeout-ms = <5000>;
};
- /* The first 16MB region on the expansion bus */
- flash@50000000 {
- compatible = "intel,ixp4xx-flash", "cfi-flash";
- bank-width = <2>;
- /*
- * 8 MB of Flash in 0x20000 byte blocks
- * mapped in at 0x50000000
- */
- reg = <0x50000000 0x800000>;
-
- partitions {
- compatible = "redboot-fis";
- /* Eraseblock at 0x7e0000 */
- fis-index-block = <0x3f>;
+ gpio-beeper {
+ compatible = "gpio-beeper";
+ gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+ };
+
+ soc {
+ bus@50000000 {
+ /* The first 16MB region at CS0 on the expansion bus */
+ flash@0 {
+ compatible = "intel,ixp4xx-flash", "cfi-flash";
+ bank-width = <2>;
+ /*
+ * 8 MB of Flash in 0x20000 byte blocks
+ * mapped in at CS0.
+ */
+ reg = <0x00000000 0x800000>;
+
+ partitions {
+ compatible = "redboot-fis";
+ /* Eraseblock at 0x7e0000 */
+ fis-index-block = <0x3f>;
+ };
+ };
+ };
+
+ pci@c0000000 {
+ status = "ok";
+
+ /*
+ * Taken from NSLU2 PCI boardfile, INT A, B, C swizzled D constant
+ * We have slots (IDSEL) 1, 2 and 3.
+ */
+ interrupt-map =
+ /* IDSEL 1 */
+ <0x0800 0 0 1 &gpio0 11 3>, /* INT A on slot 1 is irq 11 */
+ <0x0800 0 0 2 &gpio0 10 3>, /* INT B on slot 1 is irq 10 */
+ <0x0800 0 0 3 &gpio0 9 3>, /* INT C on slot 1 is irq 9 */
+ <0x0800 0 0 4 &gpio0 8 3>, /* INT D on slot 1 is irq 8 */
+ /* IDSEL 2 */
+ <0x1000 0 0 1 &gpio0 10 3>, /* INT A on slot 2 is irq 10 */
+ <0x1000 0 0 2 &gpio0 9 3>, /* INT B on slot 2 is irq 9 */
+ <0x1000 0 0 3 &gpio0 11 3>, /* INT C on slot 2 is irq 11 */
+ <0x1000 0 0 4 &gpio0 8 3>, /* INT D on slot 2 is irq 8 */
+ /* IDSEL 3 */
+ <0x1800 0 0 1 &gpio0 9 3>, /* INT A on slot 3 is irq 9 */
+ <0x1800 0 0 2 &gpio0 11 3>, /* INT B on slot 3 is irq 11 */
+ <0x1800 0 0 3 &gpio0 10 3>, /* INT C on slot 3 is irq 10 */
+ <0x1800 0 0 4 &gpio0 8 3>; /* INT D on slot 3 is irq 8 */
+ };
+
+ ethernet@c8009000 {
+ status = "ok";
+ queue-rx = <&qmgr 3>;
+ queue-txready = <&qmgr 20>;
+ phy-mode = "rgmii";
+ phy-handle = <&phy1>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/intel-ixp42x-welltech-epbx100.dts b/arch/arm/boot/dts/intel-ixp42x-welltech-epbx100.dts
new file mode 100644
index 000000000000..84158503be2a
--- /dev/null
+++ b/arch/arm/boot/dts/intel-ixp42x-welltech-epbx100.dts
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com>
+ */
+
+/dts-v1/;
+
+#include "intel-ixp42x.dtsi"
+
+/ {
+ model = "Welltech EPBX100";
+ compatible = "welltech,epbx100", "intel,ixp42x";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory@0 {
+ /* 64 MB SDRAM */
+ device_type = "memory";
+ reg = <0x00000000 0x4000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8 root=/dev/ram0 initrd=0x00800000,9M";
+ stdout-path = "uart0:115200n8";
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ flash@50000000 {
+ compatible = "intel,ixp4xx-flash", "cfi-flash";
+ bank-width = <2>;
+ /*
+ * 16 MB of Flash
+ */
+ reg = <0x50000000 0x1000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "RedBoot";
+ reg = <0x00000000 0x00080000>;
+ read-only;
+ };
+ partition@80000 {
+ label = "zImage";
+ reg = <0x00080000 0x00100000>;
+ read-only;
+ };
+ partition@180000 {
+ label = "ramdisk";
+ reg = <0x00180000 0x00300000>;
+ read-only;
+ };
+ partition@480000 {
+ label = "User";
+ reg = <0x00480000 0x00b60000>;
+ read-only;
+ };
+ partition@fe0000 {
+ label = "FIS directory";
+ reg = <0x00fe0000 0x001f000>;
+ read-only;
+ };
+ partition@fff000 {
+ label = "RedBoot config";
+ reg = <0x00fff000 0x0001000>;
+ read-only;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/intel-ixp42x.dtsi b/arch/arm/boot/dts/intel-ixp42x.dtsi
index a9622ca850cc..5fa063ed396c 100644
--- a/arch/arm/boot/dts/intel-ixp42x.dtsi
+++ b/arch/arm/boot/dts/intel-ixp42x.dtsi
@@ -7,6 +7,10 @@
/ {
soc {
+ pci@c0000000 {
+ compatible = "intel,ixp42x-pci";
+ };
+
interrupt-controller@c8003000 {
compatible = "intel,ixp42x-interrupt";
};
diff --git a/arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts b/arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts
index ba1163a1e1e7..60a1228a970f 100644
--- a/arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts
+++ b/arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts
@@ -76,19 +76,97 @@
};
};
- flash@50000000 {
- compatible = "intel,ixp4xx-flash", "cfi-flash";
- bank-width = <2>;
- /*
- * 32 MB of Flash in 0x20000 byte blocks
- * mapped in at 0x50000000
- */
- reg = <0x50000000 0x2000000>;
-
- partitions {
- compatible = "redboot-fis";
- /* Eraseblock at 0x1fe0000 */
- fis-index-block = <0xff>;
+ soc {
+ bus@50000000 {
+ flash@0 {
+ compatible = "intel,ixp4xx-flash", "cfi-flash";
+ bank-width = <2>;
+ /*
+ * 32 MB of Flash in 0x20000 byte blocks
+ * mapped in at CS0.
+ */
+ reg = <0x00000000 0x2000000>;
+
+ partitions {
+ compatible = "redboot-fis";
+ /* Eraseblock at 0x1fe0000 */
+ fis-index-block = <0xff>;
+ };
+ };
+ };
+
+ pci@c0000000 {
+ status = "ok";
+
+ /*
+ * In the boardfile for the Cambria from OpenWRT the interrupts
+ * are assigned one per IDSEL, so all 4 interrupts from IDSEL
+ * 1 are connected to IRQ 11, all 4 interrupts from IDSEL 2
+ * connected to IRQ 10 etc. I find this highly unlikely so I
+ * have instead assumed that they are rotated (swizzled) like
+ * this with 11, 10, 9, 8 for the 4 pins on IDSEL 1 etc.
+ */
+ interrupt-map =
+ /* IDSEL 1 */
+ <0x0800 0 0 1 &gpio0 11 3>, /* INT A on slot 1 is irq 11 */
+ <0x0800 0 0 2 &gpio0 10 3>, /* INT B on slot 1 is irq 10 */
+ <0x0800 0 0 3 &gpio0 9 3>, /* INT C on slot 1 is irq 9 */
+ <0x0800 0 0 4 &gpio0 8 3>, /* INT D on slot 1 is irq 8 */
+ /* IDSEL 2 */
+ <0x1000 0 0 1 &gpio0 10 3>, /* INT A on slot 2 is irq 10 */
+ <0x1000 0 0 2 &gpio0 9 3>, /* INT B on slot 2 is irq 9 */
+ <0x1000 0 0 3 &gpio0 8 3>, /* INT C on slot 2 is irq 8 */
+ <0x1000 0 0 4 &gpio0 11 3>, /* INT D on slot 2 is irq 11 */
+ /* IDSEL 3 */
+ <0x1800 0 0 1 &gpio0 9 3>, /* INT A on slot 3 is irq 9 */
+ <0x1800 0 0 2 &gpio0 8 3>, /* INT B on slot 3 is irq 8 */
+ <0x1800 0 0 3 &gpio0 11 3>, /* INT C on slot 3 is irq 11 */
+ <0x1800 0 0 4 &gpio0 10 3>, /* INT D on slot 3 is irq 10 */
+ /* IDSEL 4 */
+ <0x2000 0 0 1 &gpio0 8 3>, /* INT A on slot 3 is irq 8 */
+ <0x2000 0 0 2 &gpio0 11 3>, /* INT B on slot 3 is irq 11 */
+ <0x2000 0 0 3 &gpio0 10 3>, /* INT C on slot 3 is irq 10 */
+ <0x2000 0 0 4 &gpio0 9 3>, /* INT D on slot 3 is irq 9 */
+ /* IDSEL 6 */
+ <0x3000 0 0 1 &gpio0 10 3>, /* INT A on slot 3 is irq 10 */
+ <0x3000 0 0 2 &gpio0 9 3>, /* INT B on slot 3 is irq 9 */
+ <0x3000 0 0 3 &gpio0 8 3>, /* INT C on slot 3 is irq 8 */
+ <0x3000 0 0 4 &gpio0 11 3>, /* INT D on slot 3 is irq 11 */
+ /* IDSEL 15 */
+ <0x7800 0 0 1 &gpio0 8 3>, /* INT A on slot 3 is irq 8 */
+ <0x7800 0 0 2 &gpio0 11 3>, /* INT B on slot 3 is irq 11 */
+ <0x7800 0 0 3 &gpio0 10 3>, /* INT C on slot 3 is irq 10 */
+ <0x7800 0 0 4 &gpio0 9 3>; /* INT D on slot 3 is irq 9 */
+ };
+
+ ethernet@c800a000 {
+ status = "ok";
+ queue-rx = <&qmgr 4>;
+ queue-txready = <&qmgr 21>;
+ phy-mode = "rgmii";
+ phy-handle = <&phy1>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+ };
+ };
+
+ ethernet@c800c000 {
+ status = "ok";
+ queue-rx = <&qmgr 2>;
+ queue-txready = <&qmgr 19>;
+ phy-mode = "rgmii";
+ phy-handle = <&phy2>;
+ intel,npe-handle = <&npe 0>;
};
};
};
diff --git a/arch/arm/boot/dts/intel-ixp43x.dtsi b/arch/arm/boot/dts/intel-ixp43x.dtsi
index 494fb2ff57a0..1d0817c6e3f9 100644
--- a/arch/arm/boot/dts/intel-ixp43x.dtsi
+++ b/arch/arm/boot/dts/intel-ixp43x.dtsi
@@ -8,6 +8,10 @@
/ {
soc {
+ pci@c0000000 {
+ compatible = "intel,ixp43x-pci";
+ };
+
interrupt-controller@c8003000 {
compatible = "intel,ixp43x-interrupt";
};
diff --git a/arch/arm/boot/dts/intel-ixp45x-ixp46x.dtsi b/arch/arm/boot/dts/intel-ixp45x-ixp46x.dtsi
index f8cd506659dc..cce49e809043 100644
--- a/arch/arm/boot/dts/intel-ixp45x-ixp46x.dtsi
+++ b/arch/arm/boot/dts/intel-ixp45x-ixp46x.dtsi
@@ -30,5 +30,38 @@
interrupts = <33 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
+
+ /* This is known as EthB1 */
+ ethernet@c800d000 {
+ compatible = "intel,ixp4xx-ethernet";
+ reg = <0xc800d000 0x1000>;
+ status = "disabled";
+ intel,npe = <1>;
+ /* Dummy values that depend on firmware */
+ queue-rx = <&qmgr 0>;
+ queue-txready = <&qmgr 0>;
+ };
+
+ /* This is known as EthB2 */
+ ethernet@c800e000 {
+ compatible = "intel,ixp4xx-ethernet";
+ reg = <0xc800e000 0x1000>;
+ status = "disabled";
+ intel,npe = <2>;
+ /* Dummy values that depend on firmware */
+ queue-rx = <&qmgr 0>;
+ queue-txready = <&qmgr 0>;
+ };
+
+ /* This is known as EthB3 */
+ ethernet@c800f000 {
+ compatible = "intel,ixp4xx-ethernet";
+ reg = <0xc800f000 0x1000>;
+ status = "disabled";
+ intel,npe = <3>;
+ /* Dummy values that depend on firmware */
+ queue-rx = <&qmgr 0>;
+ queue-txready = <&qmgr 0>;
+ };
};
};
diff --git a/arch/arm/boot/dts/intel-ixp4xx.dtsi b/arch/arm/boot/dts/intel-ixp4xx.dtsi
index d4a09584f417..a50427ad05e7 100644
--- a/arch/arm/boot/dts/intel-ixp4xx.dtsi
+++ b/arch/arm/boot/dts/intel-ixp4xx.dtsi
@@ -14,12 +14,61 @@
compatible = "simple-bus";
interrupt-parent = <&intcon>;
+ /*
+ * The IXP4xx expansion bus is a set of 16 or 32MB
+ * windows in the 256MB space from 0x50000000 to
+ * 0x5fffffff.
+ */
+ bus@50000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x50000000 0x10000000>;
+ dma-ranges = <0x00000000 0x50000000 0x10000000>;
+ };
+
qmgr: queue-manager@60000000 {
compatible = "intel,ixp4xx-ahb-queue-manager";
reg = <0x60000000 0x4000>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>, <4 IRQ_TYPE_LEVEL_HIGH>;
};
+ pci@c0000000 {
+ /* compatible filled in by per-soc device tree */
+ reg = <0xc0000000 0x1000>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>,
+ <9 IRQ_TYPE_LEVEL_HIGH>,
+ <10 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ bus-range = <0x00 0xff>;
+ status = "disabled";
+
+ ranges =
+ /*
+ * 64MB 32bit non-prefetchable memory 0x48000000-0x4bffffff
+ * done in 4 chunks of 16MB each.
+ */
+ <0x02000000 0 0x48000000 0x48000000 0 0x04000000>,
+ /* 64KB I/O space at 0x4c000000 */
+ <0x01000000 0 0x00000000 0x4c000000 0 0x00010000>;
+
+ /*
+ * This needs to map to the start of physical memory so
+ * PCI devices can see all (hopefully) memory. This is done
+ * using 4 1:1 16MB windows, so the RAM should not be more than
+ * 64 MB for this to work. If your memory is anywhere else
+ * than at 0x0 you need to alter this.
+ */
+ dma-ranges =
+ <0x02000000 0 0x00000000 0x00000000 0 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ /* Each unique DTS using PCI must specify the swizzling */
+ };
+
uart0: serial@c8000000 {
compatible = "intel,xscale-uart";
reg = <0xc8000000 0x1000>;
@@ -61,9 +110,50 @@
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
};
- npe@c8006000 {
+ npe: npe@c8006000 {
compatible = "intel,ixp4xx-network-processing-engine";
reg = <0xc8006000 0x1000>, <0xc8007000 0x1000>, <0xc8008000 0x1000>;
+
+ /* NPE-C contains a crypto accelerator */
+ crypto {
+ compatible = "intel,ixp4xx-crypto";
+ intel,npe-handle = <&npe 2>;
+ queue-rx = <&qmgr 30>;
+ queue-txready = <&qmgr 29>;
+ };
+ };
+
+ /* This is known as EthB */
+ ethernet@c8009000 {
+ compatible = "intel,ixp4xx-ethernet";
+ reg = <0xc8009000 0x1000>;
+ status = "disabled";
+ /* Dummy values that depend on firmware */
+ queue-rx = <&qmgr 3>;
+ queue-txready = <&qmgr 20>;
+ intel,npe-handle = <&npe 1>;
+ };
+
+ /* This is known as EthC */
+ ethernet@c800a000 {
+ compatible = "intel,ixp4xx-ethernet";
+ reg = <0xc800a000 0x1000>;
+ status = "disabled";
+ /* Dummy values that depend on firmware */
+ queue-rx = <&qmgr 0>;
+ queue-txready = <&qmgr 0>;
+ intel,npe-handle = <&npe 2>;
+ };
+
+ /* This is known as EthA */
+ ethernet@c800c000 {
+ compatible = "intel,ixp4xx-ethernet";
+ reg = <0xc800c000 0x1000>;
+ status = "disabled";
+ intel,npe = <0>;
+ /* Dummy values that depend on firmware */
+ queue-rx = <&qmgr 0>;
+ queue-txready = <&qmgr 0>;
};
};
};
diff --git a/arch/arm/boot/dts/keystone-k2g-evm.dts b/arch/arm/boot/dts/keystone-k2g-evm.dts
index 14e26a4fd62a..d800f26b6275 100644
--- a/arch/arm/boot/dts/keystone-k2g-evm.dts
+++ b/arch/arm/boot/dts/keystone-k2g-evm.dts
@@ -544,20 +544,15 @@
};
};
-&k2g_clks {
- /* on the board 22.5792MHz is connected to AUDOSC_IN */
- assigned-clocks = <&k2g_clks 0x4c 2>;
- assigned-clock-rates = <22579200>;
-};
-
&mcasp2 {
#sound-dai-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&mcasp2_pins>;
- assigned-clocks = <&k2g_clks 0x6 1>;
- assigned-clock-parents = <&k2g_clks 0x6 2>;
+ assigned-clocks = <&k2g_clks 0x4c 2>, <&k2g_clks 0x6 1>;
+ assigned-clock-parents = <0>, <&k2g_clks 0x6 2>;
+ assigned-clock-rates = <22579200>, <0>;
status = "okay";
diff --git a/arch/arm/boot/dts/keystone-k2g.dtsi b/arch/arm/boot/dts/keystone-k2g.dtsi
index 05a75019275e..37198294f4b2 100644
--- a/arch/arm/boot/dts/keystone-k2g.dtsi
+++ b/arch/arm/boot/dts/keystone-k2g.dtsi
@@ -242,7 +242,7 @@
status = "disabled";
};
- msgmgr: msgmgr@2a00000 {
+ msgmgr: mailbox@2a00000 {
compatible = "ti,k2g-message-manager";
#mbox-cells = <2>;
reg-names = "queue_proxy_region",
@@ -254,7 +254,7 @@
<GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>;
};
- pmmc: pmmc@2921c00 {
+ pmmc: system-controller@2921c00 {
compatible = "ti,k2g-sci";
/*
* In case of rare platforms that does not use k2g as
@@ -272,7 +272,7 @@
#power-domain-cells = <1>;
};
- k2g_clks: clocks {
+ k2g_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 8bae6ed0abb2..bd0e864964e9 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -50,6 +50,7 @@
compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
reg = <0x84c0 0x18>;
interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
+ fifo-size = <128>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/mstar-v7.dtsi b/arch/arm/boot/dts/mstar-v7.dtsi
index 075d583d6f40..2273295e140f 100644
--- a/arch/arm/boot/dts/mstar-v7.dtsi
+++ b/arch/arm/boot/dts/mstar-v7.dtsi
@@ -60,6 +60,14 @@
clock-frequency = <32768>;
status = "disabled";
};
+
+ xtal_div2: xtal_div2 {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&xtal>;
+ clock-div = <2>;
+ clock-mult = <1>;
+ };
};
soc: soc {
@@ -101,6 +109,12 @@
mask = <0x79>;
};
+ watchdog@6000 {
+ compatible = "mstar,msc313e-wdt";
+ reg = <0x6000 0x1f>;
+ clocks = <&xtal_div2>;
+ };
+
intc_fiq: interrupt-controller@201310 {
compatible = "mstar,mst-intc";
reg = <0x201310 0x40>;
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index f9c2a9938898..5750ca1233cc 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -120,8 +120,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <56>;
- dmas = <&sdma 27 &sdma 28>;
- dma-names = "tx", "rx";
};
i2c2: i2c@48072000 {
@@ -131,8 +129,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <57>;
- dmas = <&sdma 29 &sdma 30>;
- dma-names = "tx", "rx";
};
mcspi1: spi@48098000 {
diff --git a/arch/arm/boot/dts/omap2420.dtsi b/arch/arm/boot/dts/omap2420.dtsi
index 494bf6972005..bb529a2a295d 100644
--- a/arch/arm/boot/dts/omap2420.dtsi
+++ b/arch/arm/boot/dts/omap2420.dtsi
@@ -192,16 +192,15 @@
compatible = "ti,omap2-mailbox";
reg = <0x48094000 0x200>;
interrupts = <26>, <34>;
- interrupt-names = "dsp", "iva";
ti,hwmods = "mailbox";
#mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <6>;
- mbox_dsp: dsp {
+ mbox_dsp: mbox-dsp {
ti,mbox-tx = <0 0 0>;
ti,mbox-rx = <1 0 0>;
};
- mbox_iva: iva {
+ mbox_iva: mbox-iva {
ti,mbox-tx = <2 1 3>;
ti,mbox-rx = <3 1 3>;
};
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index d19d8ba3b607..23115ba61bc0 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -284,7 +284,7 @@
#mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <6>;
- mbox_dsp: dsp {
+ mbox_dsp: mbox-dsp {
ti,mbox-tx = <0 0 0>;
ti,mbox-rx = <1 0 0>;
};
diff --git a/arch/arm/boot/dts/omap3-evm-processor-common.dtsi b/arch/arm/boot/dts/omap3-evm-processor-common.dtsi
index b4109f48ec18..e6ba30a21166 100644
--- a/arch/arm/boot/dts/omap3-evm-processor-common.dtsi
+++ b/arch/arm/boot/dts/omap3-evm-processor-common.dtsi
@@ -195,7 +195,7 @@
* for bus switch SN74CB3Q3384A, level-shifter SN74AVC16T245DGGR, and 1.8V.
*/
&gpio2 {
- en_usb2_port {
+ en-usb2-port-hog {
gpio-hog;
gpios = <29 GPIO_ACTIVE_HIGH>; /* gpio_61 */
output-low;
diff --git a/arch/arm/boot/dts/omap3-gta04a5.dts b/arch/arm/boot/dts/omap3-gta04a5.dts
index fd84bbf3b9cc..9ce8d81250aa 100644
--- a/arch/arm/boot/dts/omap3-gta04a5.dts
+++ b/arch/arm/boot/dts/omap3-gta04a5.dts
@@ -37,7 +37,7 @@
};
&gpio5 {
- irda_en {
+ irda-en-hog {
gpio-hog;
gpios = <(175-160) GPIO_ACTIVE_HIGH>;
output-high; /* activate gpio_175 to disable IrDA receiver */
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index c5b9da0d7e6c..64b7e6fddd1b 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -403,8 +403,6 @@
compatible = "ti,omap3-i2c";
reg = <0x48070000 0x80>;
interrupts = <56>;
- dmas = <&sdma 27 &sdma 28>;
- dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
ti,hwmods = "i2c1";
@@ -414,8 +412,6 @@
compatible = "ti,omap3-i2c";
reg = <0x48072000 0x80>;
interrupts = <57>;
- dmas = <&sdma 29 &sdma 30>;
- dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
ti,hwmods = "i2c2";
@@ -425,8 +421,6 @@
compatible = "ti,omap3-i2c";
reg = <0x48060000 0x80>;
interrupts = <61>;
- dmas = <&sdma 25 &sdma 26>;
- dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
ti,hwmods = "i2c3";
@@ -440,7 +434,7 @@
#mbox-cells = <1>;
ti,mbox-num-users = <2>;
ti,mbox-num-fifos = <2>;
- mbox_dsp: dsp {
+ mbox_dsp: mbox-dsp {
ti,mbox-tx = <0 0 0>;
ti,mbox-rx = <1 0 0>;
};
diff --git a/arch/arm/boot/dts/omap4-l4.dtsi b/arch/arm/boot/dts/omap4-l4.dtsi
index 99721673d7af..46b8f9efd413 100644
--- a/arch/arm/boot/dts/omap4-l4.dtsi
+++ b/arch/arm/boot/dts/omap4-l4.dtsi
@@ -600,11 +600,11 @@
#mbox-cells = <1>;
ti,mbox-num-users = <3>;
ti,mbox-num-fifos = <8>;
- mbox_ipu: mbox_ipu {
+ mbox_ipu: mbox-ipu {
ti,mbox-tx = <0 0 0>;
ti,mbox-rx = <1 0 0>;
};
- mbox_dsp: mbox_dsp {
+ mbox_dsp: mbox-dsp {
ti,mbox-tx = <3 0 0>;
ti,mbox-rx = <2 0 0>;
};
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index d8f13626cfd1..373984c130e0 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -30,14 +30,6 @@
regulator-max-microvolt = <5000000>;
};
- vdds_1v8_main: fixedregulator-vdds_1v8_main {
- compatible = "regulator-fixed";
- regulator-name = "vdds_1v8_main";
- vin-supply = <&smps7_reg>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
vmmcsd_fixed: fixedregulator-mmcsd {
compatible = "regulator-fixed";
regulator-name = "vmmcsd_fixed";
@@ -149,7 +141,7 @@
&gpio8 {
/* TI trees use GPIO instead of msecure, see also muxing */
- p234 {
+ msecure-hog {
gpio-hog;
gpios = <10 GPIO_ACTIVE_HIGH>;
output-high;
@@ -487,6 +479,7 @@
regulator-boot-on;
};
+ vdds_1v8_main:
smps7_reg: smps7 {
/* VDDS_1v8_OMAP over VDDS_1v8_MAIN */
regulator-name = "smps7";
diff --git a/arch/arm/boot/dts/omap5-l4.dtsi b/arch/arm/boot/dts/omap5-l4.dtsi
index b148b289e830..06cc3a19ddaa 100644
--- a/arch/arm/boot/dts/omap5-l4.dtsi
+++ b/arch/arm/boot/dts/omap5-l4.dtsi
@@ -616,11 +616,11 @@
#mbox-cells = <1>;
ti,mbox-num-users = <3>;
ti,mbox-num-fifos = <8>;
- mbox_ipu: mbox_ipu {
+ mbox_ipu: mbox-ipu {
ti,mbox-tx = <0 0 0>;
ti,mbox-rx = <1 0 0>;
};
- mbox_dsp: mbox_dsp {
+ mbox_dsp: mbox-dsp {
ti,mbox-tx = <3 0 0>;
ti,mbox-rx = <2 0 0>;
};
diff --git a/arch/arm/boot/dts/openbmc-flash-layout-64.dtsi b/arch/arm/boot/dts/openbmc-flash-layout-64.dtsi
index 91163867be34..31f59de5190b 100644
--- a/arch/arm/boot/dts/openbmc-flash-layout-64.dtsi
+++ b/arch/arm/boot/dts/openbmc-flash-layout-64.dtsi
@@ -9,27 +9,27 @@ partitions {
#size-cells = <1>;
u-boot@0 {
- reg = <0x0 0x60000>; // 384KB
+ reg = <0x0 0xe0000>; // 896KB
label = "u-boot";
};
- u-boot-env@60000 {
- reg = <0x60000 0x20000>; // 128KB
+ u-boot-env@e0000 {
+ reg = <0xe0000 0x20000>; // 128KB
label = "u-boot-env";
};
- kernel@80000 {
- reg = <0x80000 0x500000>; // 5MB
+ kernel@100000 {
+ reg = <0x100000 0x900000>; // 9MB
label = "kernel";
};
- rofs@580000 {
- reg = <0x580000 0x2a80000>; // 42.5MB
+ rofs@a00000 {
+ reg = <0xa00000 0x2000000>; // 32MB
label = "rofs";
};
- rwfs@3000000 {
- reg = <0x3000000 0x1000000>; // 16MB
+ rwfs@6000000 {
+ reg = <0x2a00000 0x1600000>; // 22MB
label = "rwfs";
};
};
diff --git a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
index dace8ffeb991..0a4ffd10c484 100644
--- a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
+++ b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
@@ -581,7 +581,7 @@
* EBI2. This has a 25MHz chrystal next to it, so no
* clocking is needed.
*/
- ethernet-ebi2@2,0 {
+ ethernet@2,0 {
compatible = "smsc,lan9221", "smsc,lan9115";
reg = <2 0x0 0x100>;
/*
@@ -598,8 +598,6 @@
phy-mode = "mii";
reg-io-width = <2>;
smsc,force-external-phy;
- /* IRQ on edge falling = active low */
- smsc,irq-active-low;
smsc,irq-push-pull;
/*
diff --git a/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts b/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts
index 282b89ce3d45..f7ea2e5dd191 100644
--- a/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts
@@ -216,6 +216,10 @@
};
};
+&adm_dma {
+ status = "okay";
+};
+
&gmac0 {
status = "okay";
@@ -251,6 +255,39 @@
status = "okay";
};
+&hs_phy_1 {
+ status = "okay";
+};
+
+&nand {
+ status = "okay";
+
+ nandcs@0 {
+ compatible = "qcom,nandcs";
+ reg = <0>;
+
+ nand-ecc-strength = <4>;
+ nand-bus-width = <8>;
+ nand-ecc-step-size = <512>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ boot@0 {
+ label = "RouterBoard NAND 1 Boot";
+ reg = <0x0000000 0x0800000>;
+ };
+
+ main@800000 {
+ label = "RouterBoard NAND 1 Main";
+ reg = <0x0800000 0x7800000>;
+ };
+ };
+ };
+};
+
&qcom_pinmux {
buttons_pins: buttons_pins {
mux {
@@ -305,4 +342,25 @@
input-disable;
};
};
+
+ usb1_pwr_en_pins: usb1_pwr_en_pins {
+ mux {
+ pins = "gpio4";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-high;
+ };
+ };
+};
+
+&ss_phy_1 {
+ status = "okay";
+};
+
+&usb3_1 {
+ pinctrl-0 = <&usb1_pwr_en_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index 98995ead4413..7bcf5ef92157 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -2,6 +2,8 @@
/dts-v1/;
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/mfd/qcom-rpm.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/clock/qcom,gcc-ipq806x.h>
#include <dt-bindings/clock/qcom,lcc-ipq806x.h>
#include <dt-bindings/gpio/gpio.h>
@@ -46,6 +48,228 @@
};
};
+ thermal-zones {
+ tsens_tz_sensor0 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens 0>;
+
+ trips {
+ cpu-critical {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ cpu-hot {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ tsens_tz_sensor1 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens 1>;
+
+ trips {
+ cpu-critical {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ cpu-hot {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ tsens_tz_sensor2 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens 2>;
+
+ trips {
+ cpu-critical {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ cpu-hot {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ tsens_tz_sensor3 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens 3>;
+
+ trips {
+ cpu-critical {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ cpu-hot {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ tsens_tz_sensor4 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens 4>;
+
+ trips {
+ cpu-critical {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ cpu-hot {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ tsens_tz_sensor5 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens 5>;
+
+ trips {
+ cpu-critical {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ cpu-hot {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ tsens_tz_sensor6 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens 6>;
+
+ trips {
+ cpu-critical {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ cpu-hot {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ tsens_tz_sensor7 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens 7>;
+
+ trips {
+ cpu-critical {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ cpu-hot {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ tsens_tz_sensor8 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens 8>;
+
+ trips {
+ cpu-critical {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ cpu-hot {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ tsens_tz_sensor9 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens 9>;
+
+ trips {
+ cpu-critical {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ cpu-hot {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ tsens_tz_sensor10 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens 10>;
+
+ trips {
+ cpu-critical {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ cpu-hot {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+ };
+
memory {
device_type = "memory";
reg = <0x0 0x0>;
@@ -185,6 +409,31 @@
bias-pull-up;
};
};
+
+ nand_pins: nand_pins {
+ mux {
+ pins = "gpio34", "gpio35", "gpio36",
+ "gpio37", "gpio38", "gpio39",
+ "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45",
+ "gpio46", "gpio47";
+ function = "nand";
+ drive-strength = <10>;
+ bias-disable;
+ };
+
+ pullups {
+ pins = "gpio39";
+ bias-pull-up;
+ };
+
+ hold {
+ pins = "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45",
+ "gpio46", "gpio47";
+ bias-bus-hold;
+ };
+ };
};
intc: interrupt-controller@2000000 {
@@ -226,6 +475,26 @@
reg = <0x02098000 0x1000>, <0x02008000 0x1000>;
};
+ adm_dma: dma-controller@18300000 {
+ compatible = "qcom,adm";
+ reg = <0x18300000 0x100000>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+
+ clocks = <&gcc ADM0_CLK>, <&gcc ADM0_PBUS_CLK>;
+ clock-names = "core", "iface";
+
+ resets = <&gcc ADM0_RESET>,
+ <&gcc ADM0_PBUS_RESET>,
+ <&gcc ADM0_C0_RESET>,
+ <&gcc ADM0_C1_RESET>,
+ <&gcc ADM0_C2_RESET>;
+ reset-names = "clk", "pbus", "c0", "c1", "c2";
+ qcom,ee = <0>;
+
+ status = "disabled";
+ };
+
saw0: regulator@2089000 {
compatible = "qcom,saw2";
reg = <0x02089000 0x1000>, <0x02009000 0x1000>;
@@ -403,6 +672,28 @@
status = "disabled";
};
+ nand: nand-controller@1ac00000 {
+ compatible = "qcom,ipq806x-nand";
+ reg = <0x1ac00000 0x800>;
+
+ pinctrl-0 = <&nand_pins>;
+ pinctrl-names = "default";
+
+ clocks = <&gcc EBI2_CLK>,
+ <&gcc EBI2_AON_CLK>;
+ clock-names = "core", "aon";
+
+ dmas = <&adm_dma 3>;
+ dma-names = "rxtx";
+ qcom,cmd-crci = <15>;
+ qcom,data-crci = <3>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
sata: sata@29000000 {
compatible = "qcom,ipq806x-ahci", "generic-ahci";
reg = <0x29000000 0x180>;
@@ -436,6 +727,12 @@
reg = <0x00700000 0x1000>;
#address-cells = <1>;
#size-cells = <1>;
+ tsens_calib: calib@400 {
+ reg = <0x400 0xb>;
+ };
+ tsens_calib_backup: calib_backup@410 {
+ reg = <0x410 0xb>;
+ };
};
gcc: clock-controller@900000 {
@@ -443,6 +740,38 @@
reg = <0x00900000 0x4000>;
#clock-cells = <1>;
#reset-cells = <1>;
+ #power-domain-cells = <1>;
+
+ tsens: thermal-sensor@900000 {
+ compatible = "qcom,ipq8064-tsens";
+
+ nvmem-cells = <&tsens_calib>, <&tsens_calib_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow";
+
+ #qcom,sensors = <11>;
+ #thermal-sensor-cells = <1>;
+ };
+ };
+
+ rpm: rpm@108000 {
+ compatible = "qcom,rpm-ipq8064";
+ reg = <0x108000 0x1000>;
+ qcom,ipc = <&l2cc 0x8 2>;
+
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ack", "err", "wakeup";
+
+ clocks = <&gcc RPM_MSG_RAM_H_CLK>;
+ clock-names = "ram";
+
+ rpmcc: clock-controller {
+ compatible = "qcom,rpmcc-ipq806x", "qcom,rpmcc";
+ #clock-cells = <1>;
+ };
};
tcsr: syscon@1a400000 {
@@ -450,6 +779,14 @@
reg = <0x1a400000 0x100>;
};
+ l2cc: clock-controller@2011000 {
+ compatible = "qcom,kpss-gcc", "syscon";
+ reg = <0x2011000 0x1000>;
+ clocks = <&gcc PLL8_VOTE>, <&gcc PXO_SRC>;
+ clock-names = "pll8_vote", "pxo";
+ clock-output-names = "acpu_l2_aux";
+ };
+
lcc: clock-controller@28000000 {
compatible = "qcom,lcc-ipq8064";
reg = <0x28000000 0x1000>;
@@ -718,6 +1055,94 @@
status = "disabled";
};
+ hs_phy_0: phy@100f8800 {
+ compatible = "qcom,ipq806x-usb-phy-hs";
+ reg = <0x100f8800 0x30>;
+ clocks = <&gcc USB30_0_UTMI_CLK>;
+ clock-names = "ref";
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ ss_phy_0: phy@100f8830 {
+ compatible = "qcom,ipq806x-usb-phy-ss";
+ reg = <0x100f8830 0x30>;
+ clocks = <&gcc USB30_0_MASTER_CLK>;
+ clock-names = "ref";
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+
+ usb3_0: usb3@100f8800 {
+ compatible = "qcom,dwc3", "syscon";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x100f8800 0x8000>;
+ clocks = <&gcc USB30_0_MASTER_CLK>;
+ clock-names = "core";
+
+ ranges;
+
+ resets = <&gcc USB30_0_MASTER_RESET>;
+ reset-names = "master";
+
+ status = "disabled";
+
+ dwc3_0: dwc3@10000000 {
+ compatible = "snps,dwc3";
+ reg = <0x10000000 0xcd00>;
+ interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hs_phy_0>, <&ss_phy_0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ dr_mode = "host";
+ snps,dis_u3_susphy_quirk;
+ };
+ };
+
+ hs_phy_1: phy@110f8800 {
+ compatible = "qcom,ipq806x-usb-phy-hs";
+ reg = <0x110f8800 0x30>;
+ clocks = <&gcc USB30_1_UTMI_CLK>;
+ clock-names = "ref";
+ #phy-cells = <0>;
+ };
+
+ ss_phy_1: phy@110f8830 {
+ compatible = "qcom,ipq806x-usb-phy-ss";
+ reg = <0x110f8830 0x30>;
+ clocks = <&gcc USB30_1_MASTER_CLK>;
+ clock-names = "ref";
+ #phy-cells = <0>;
+ };
+
+ usb3_1: usb3@110f8800 {
+ compatible = "qcom,dwc3", "syscon";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x110f8800 0x8000>;
+ clocks = <&gcc USB30_1_MASTER_CLK>;
+ clock-names = "core";
+
+ ranges;
+
+ resets = <&gcc USB30_1_MASTER_RESET>;
+ reset-names = "master";
+
+ status = "disabled";
+
+ dwc3_1: dwc3@11000000 {
+ compatible = "snps,dwc3";
+ reg = <0x11000000 0xcd00>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&hs_phy_1>, <&ss_phy_1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ dr_mode = "host";
+ snps,dis_u3_susphy_quirk;
+ };
+ };
+
vsdcc_fixed: vsdcc-regulator {
compatible = "regulator-fixed";
regulator-name = "SDCC Power";
diff --git a/arch/arm/boot/dts/qcom-sdx55-t55.dts b/arch/arm/boot/dts/qcom-sdx55-t55.dts
index ddcd53aa533d..2ffcd085904d 100644
--- a/arch/arm/boot/dts/qcom-sdx55-t55.dts
+++ b/arch/arm/boot/dts/qcom-sdx55-t55.dts
@@ -250,7 +250,7 @@
nand-ecc-step-size = <512>;
nand-bus-width = <8>;
/* efs2 partition is secured */
- secure-regions = <0x500000 0xb00000>;
+ secure-regions = /bits/ 64 <0x500000 0xb00000>;
};
};
diff --git a/arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts b/arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts
index 3065f84634b8..80c40da79604 100644
--- a/arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts
+++ b/arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts
@@ -250,8 +250,8 @@
nand-ecc-step-size = <512>;
nand-bus-width = <8>;
/* ico and efs2 partitions are secured */
- secure-regions = <0x500000 0x500000
- 0xa00000 0xb00000>;
+ secure-regions = /bits/ 64 <0x500000 0x500000
+ 0xa00000 0xb00000>;
};
};
diff --git a/arch/arm/boot/dts/r8a7742.dtsi b/arch/arm/boot/dts/r8a7742.dtsi
index dd1b976d2a6c..a2279686ffcc 100644
--- a/arch/arm/boot/dts/r8a7742.dtsi
+++ b/arch/arm/boot/dts/r8a7742.dtsi
@@ -47,7 +47,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -56,6 +55,7 @@
clock-frequency = <1400000000>;
clocks = <&cpg CPG_CORE R8A7742_CLK_Z>;
power-domains = <&sysc R8A7742_PD_CA15_CPU0>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
capacity-dmips-mhz = <1024>;
voltage-tolerance = <1>; /* 1% */
@@ -77,6 +77,7 @@
clock-frequency = <1400000000>;
clocks = <&cpg CPG_CORE R8A7742_CLK_Z>;
power-domains = <&sysc R8A7742_PD_CA15_CPU1>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
capacity-dmips-mhz = <1024>;
voltage-tolerance = <1>; /* 1% */
@@ -98,6 +99,7 @@
clock-frequency = <1400000000>;
clocks = <&cpg CPG_CORE R8A7742_CLK_Z>;
power-domains = <&sysc R8A7742_PD_CA15_CPU2>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
capacity-dmips-mhz = <1024>;
voltage-tolerance = <1>; /* 1% */
@@ -119,6 +121,7 @@
clock-frequency = <1400000000>;
clocks = <&cpg CPG_CORE R8A7742_CLK_Z>;
power-domains = <&sysc R8A7742_PD_CA15_CPU3>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
capacity-dmips-mhz = <1024>;
voltage-tolerance = <1>; /* 1% */
@@ -750,6 +753,7 @@
reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
resets = <&cpg 812>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi
index 6e37b8da278b..7e5e09d210ec 100644
--- a/arch/arm/boot/dts/r8a7743.dtsi
+++ b/arch/arm/boot/dts/r8a7743.dtsi
@@ -49,7 +49,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -59,6 +58,7 @@
clocks = <&cpg CPG_CORE R8A7743_CLK_Z>;
clock-latency = <300000>; /* 300 us */
power-domains = <&sysc R8A7743_PD_CA15_CPU0>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
/* kHz - uV - OPPs unknown yet */
@@ -78,6 +78,7 @@
clocks = <&cpg CPG_CORE R8A7743_CLK_Z>;
clock-latency = <300000>; /* 300 us */
power-domains = <&sysc R8A7743_PD_CA15_CPU1>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
/* kHz - uV - OPPs unknown yet */
@@ -702,6 +703,7 @@
reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
resets = <&cpg 812>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/r8a7744.dtsi b/arch/arm/boot/dts/r8a7744.dtsi
index ace20861c0c4..8419683a9d83 100644
--- a/arch/arm/boot/dts/r8a7744.dtsi
+++ b/arch/arm/boot/dts/r8a7744.dtsi
@@ -49,7 +49,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -59,6 +58,7 @@
clocks = <&cpg CPG_CORE R8A7744_CLK_Z>;
clock-latency = <300000>; /* 300 us */
power-domains = <&sysc R8A7744_PD_CA15_CPU0>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
/* kHz - uV - OPPs unknown yet */
@@ -78,6 +78,7 @@
clocks = <&cpg CPG_CORE R8A7744_CLK_Z>;
clock-latency = <300000>; /* 300 us */
power-domains = <&sysc R8A7744_PD_CA15_CPU1>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
/* kHz - uV - OPPs unknown yet */
@@ -702,6 +703,7 @@
reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
resets = <&cpg 812>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/r8a7745.dtsi b/arch/arm/boot/dts/r8a7745.dtsi
index be33bdabe452..f877c51f769c 100644
--- a/arch/arm/boot/dts/r8a7745.dtsi
+++ b/arch/arm/boot/dts/r8a7745.dtsi
@@ -64,7 +64,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -73,6 +72,7 @@
clock-frequency = <1000000000>;
clocks = <&cpg CPG_CORE R8A7745_CLK_Z2>;
power-domains = <&sysc R8A7745_PD_CA7_CPU0>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA7>;
};
@@ -83,6 +83,7 @@
clock-frequency = <1000000000>;
clocks = <&cpg CPG_CORE R8A7745_CLK_Z2>;
power-domains = <&sysc R8A7745_PD_CA7_CPU1>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA7>;
};
@@ -645,6 +646,7 @@
reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
resets = <&cpg 812>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/r8a77470.dtsi b/arch/arm/boot/dts/r8a77470.dtsi
index a1d7f6e7a2e3..13ef1e9bf4d5 100644
--- a/arch/arm/boot/dts/r8a77470.dtsi
+++ b/arch/arm/boot/dts/r8a77470.dtsi
@@ -25,7 +25,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -34,6 +33,7 @@
clock-frequency = <1000000000>;
clocks = <&cpg CPG_CORE R8A77470_CLK_Z2>;
power-domains = <&sysc R8A77470_PD_CA7_CPU0>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA7>;
};
@@ -44,6 +44,7 @@
clock-frequency = <1000000000>;
clocks = <&cpg CPG_CORE R8A77470_CLK_Z2>;
power-domains = <&sysc R8A77470_PD_CA7_CPU1>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA7>;
};
@@ -537,6 +538,7 @@
reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
resets = <&cpg 812>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index c9f8735860bf..95efbafb0b70 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -166,6 +166,7 @@
interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7778_CLK_I2C1>;
power-domains = <&cpg_clocks>;
+ i2c-scl-internal-delay-ns = <5>;
status = "disabled";
};
@@ -177,6 +178,7 @@
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7778_CLK_I2C2>;
power-domains = <&cpg_clocks>;
+ i2c-scl-internal-delay-ns = <5>;
status = "disabled";
};
@@ -188,6 +190,7 @@
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7778_CLK_I2C3>;
power-domains = <&cpg_clocks>;
+ i2c-scl-internal-delay-ns = <5>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/r8a7779-marzen.dts b/arch/arm/boot/dts/r8a7779-marzen.dts
index d2240b89ee52..465845323495 100644
--- a/arch/arm/boot/dts/r8a7779-marzen.dts
+++ b/arch/arm/boot/dts/r8a7779-marzen.dts
@@ -145,7 +145,7 @@
status = "okay";
clocks = <&mstp1_clks R8A7779_CLK_DU>, <&x3_clk>;
- clock-names = "du", "dclkin.0";
+ clock-names = "du.0", "dclkin.0";
ports {
port@0 {
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index 74d7e9084eab..39fc58f32df6 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -198,6 +198,7 @@
interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7779_CLK_I2C1>;
power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
+ i2c-scl-internal-delay-ns = <5>;
status = "disabled";
};
@@ -209,6 +210,7 @@
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7779_CLK_I2C2>;
power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
+ i2c-scl-internal-delay-ns = <5>;
status = "disabled";
};
@@ -220,6 +222,7 @@
interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7779_CLK_I2C3>;
power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
+ i2c-scl-internal-delay-ns = <5>;
status = "disabled";
};
@@ -463,6 +466,7 @@
reg = <0xfff80000 0x40000>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks R8A7779_CLK_DU>;
+ clock-names = "du.0";
power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
status = "disabled";
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index 2dad0742d2ba..fa6d986b5d46 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -81,6 +81,9 @@
keyboard {
compatible = "gpio-keys";
+ pinctrl-0 = <&keyboard_pins>;
+ pinctrl-names = "default";
+
one {
linux,code = <KEY_1>;
label = "SW2-1";
@@ -659,6 +662,11 @@
groups = "audio_clk_a";
function = "audio_clk";
};
+
+ keyboard_pins: keyboard {
+ pins = "GP_1_14", "GP_1_24", "GP_1_26", "GP_1_28";
+ bias-pull-up;
+ };
};
&ether {
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index de29394eed63..ed6dd4fcc503 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -69,7 +69,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -78,6 +77,7 @@
clock-frequency = <1300000000>;
clocks = <&cpg CPG_CORE R8A7790_CLK_Z>;
power-domains = <&sysc R8A7790_PD_CA15_CPU0>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
capacity-dmips-mhz = <1024>;
voltage-tolerance = <1>; /* 1% */
@@ -99,6 +99,7 @@
clock-frequency = <1300000000>;
clocks = <&cpg CPG_CORE R8A7790_CLK_Z>;
power-domains = <&sysc R8A7790_PD_CA15_CPU1>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
capacity-dmips-mhz = <1024>;
voltage-tolerance = <1>; /* 1% */
@@ -120,6 +121,7 @@
clock-frequency = <1300000000>;
clocks = <&cpg CPG_CORE R8A7790_CLK_Z>;
power-domains = <&sysc R8A7790_PD_CA15_CPU2>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
capacity-dmips-mhz = <1024>;
voltage-tolerance = <1>; /* 1% */
@@ -141,6 +143,7 @@
clock-frequency = <1300000000>;
clocks = <&cpg CPG_CORE R8A7790_CLK_Z>;
power-domains = <&sysc R8A7790_PD_CA15_CPU3>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
capacity-dmips-mhz = <1024>;
voltage-tolerance = <1>; /* 1% */
@@ -162,6 +165,7 @@
clock-frequency = <780000000>;
clocks = <&cpg CPG_CORE R8A7790_CLK_Z2>;
power-domains = <&sysc R8A7790_PD_CA7_CPU0>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA7>;
capacity-dmips-mhz = <539>;
};
@@ -173,6 +177,7 @@
clock-frequency = <780000000>;
clocks = <&cpg CPG_CORE R8A7790_CLK_Z2>;
power-domains = <&sysc R8A7790_PD_CA7_CPU1>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA7>;
capacity-dmips-mhz = <539>;
};
@@ -184,6 +189,7 @@
clock-frequency = <780000000>;
clocks = <&cpg CPG_CORE R8A7790_CLK_Z2>;
power-domains = <&sysc R8A7790_PD_CA7_CPU2>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA7>;
capacity-dmips-mhz = <539>;
};
@@ -195,6 +201,7 @@
clock-frequency = <780000000>;
clocks = <&cpg CPG_CORE R8A7790_CLK_Z2>;
power-domains = <&sysc R8A7790_PD_CA7_CPU3>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA7>;
capacity-dmips-mhz = <539>;
};
@@ -768,6 +775,7 @@
reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
resets = <&cpg 812>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index 61e881bbbf6e..2a8b6fd9095c 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -81,7 +81,7 @@
keyboard {
compatible = "gpio-keys";
- pinctrl-0 = <&sw2_pins>;
+ pinctrl-0 = <&keyboard_pins>;
pinctrl-names = "default";
key-1 {
@@ -622,7 +622,7 @@
function = "audio_clk";
};
- sw2_pins: sw2 {
+ keyboard_pins: keyboard {
pins = "GP_5_0", "GP_5_1", "GP_5_2", "GP_5_3";
bias-pull-up;
};
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 9d8320f71a6a..0ccc162d3c2c 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -68,7 +68,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -77,6 +76,7 @@
clock-frequency = <1500000000>;
clocks = <&cpg CPG_CORE R8A7791_CLK_Z>;
power-domains = <&sysc R8A7791_PD_CA15_CPU0>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
voltage-tolerance = <1>; /* 1% */
clock-latency = <300000>; /* 300 us */
@@ -97,6 +97,7 @@
clock-frequency = <1500000000>;
clocks = <&cpg CPG_CORE R8A7791_CLK_Z>;
power-domains = <&sysc R8A7791_PD_CA15_CPU1>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
voltage-tolerance = <1>; /* 1% */
clock-latency = <300000>; /* 300 us */
@@ -728,6 +729,7 @@
reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
resets = <&cpg 812>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/r8a7792-blanche.dts b/arch/arm/boot/dts/r8a7792-blanche.dts
index c100ae903a46..62aa9f61321b 100644
--- a/arch/arm/boot/dts/r8a7792-blanche.dts
+++ b/arch/arm/boot/dts/r8a7792-blanche.dts
@@ -112,6 +112,9 @@
keyboard {
compatible = "gpio-keys";
+ pinctrl-0 = <&keyboard_pins>;
+ pinctrl-names = "default";
+
key-1 {
linux,code = <KEY_1>;
label = "SW2-1";
@@ -235,6 +238,11 @@
function = "du1";
};
+ keyboard_pins: keyboard {
+ pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_02";
+ bias-pull-up;
+ };
+
pmic_irq_pins: pmicirq {
groups = "intc_irq2";
function = "intc";
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
index 253e8bf643d1..9cdb73894ac2 100644
--- a/arch/arm/boot/dts/r8a7792.dtsi
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -45,7 +45,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -54,6 +53,7 @@
clock-frequency = <1000000000>;
clocks = <&cpg CPG_CORE R8A7792_CLK_Z>;
power-domains = <&sysc R8A7792_PD_CA15_CPU0>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
};
@@ -64,6 +64,7 @@
clock-frequency = <1000000000>;
clocks = <&cpg CPG_CORE R8A7792_CLK_Z>;
power-domains = <&sysc R8A7792_PD_CA15_CPU1>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA15>;
};
@@ -537,6 +538,7 @@
reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
resets = <&cpg 812>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
index 87fa57a99399..479e0fdf0c37 100644
--- a/arch/arm/boot/dts/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/r8a7793-gose.dts
@@ -64,9 +64,12 @@
reg = <0 0x40000000 0 0x40000000>;
};
- gpio-keys {
+ keyboard {
compatible = "gpio-keys";
+ pinctrl-0 = <&keyboard_pins>;
+ pinctrl-names = "default";
+
key-1 {
gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
linux,code = <KEY_1>;
@@ -567,6 +570,11 @@
function = "audio_clk";
};
+ keyboard_pins: keyboard {
+ pins = "GP_5_0", "GP_5_1", "GP_5_2", "GP_5_3";
+ bias-pull-up;
+ };
+
vin0_pins: vin0 {
groups = "vin0_data24", "vin0_sync", "vin0_clkenb", "vin0_clk";
function = "vin0";
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index 6d74475030ed..dea4b1e108af 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -60,7 +60,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -69,6 +68,7 @@
clock-frequency = <1500000000>;
clocks = <&cpg CPG_CORE R8A7793_CLK_Z>;
power-domains = <&sysc R8A7793_PD_CA15_CPU0>;
+ enable-method = "renesas,apmu";
voltage-tolerance = <1>; /* 1% */
clock-latency = <300000>; /* 300 us */
@@ -89,6 +89,7 @@
clock-frequency = <1500000000>;
clocks = <&cpg CPG_CORE R8A7793_CLK_Z>;
power-domains = <&sysc R8A7793_PD_CA15_CPU1>;
+ enable-method = "renesas,apmu";
voltage-tolerance = <1>; /* 1% */
clock-latency = <300000>; /* 300 us */
diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
index f9dba5688d3f..f330d796a772 100644
--- a/arch/arm/boot/dts/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/r8a7794-alt.dts
@@ -8,6 +8,7 @@
/dts-v1/;
#include "r8a7794.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
/ {
model = "Alt";
@@ -94,6 +95,42 @@
#size-cells = <1>;
};
+ keyboard {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&keyboard_pins>;
+ pinctrl-names = "default";
+
+ one {
+ linux,code = <KEY_1>;
+ label = "SW2-1";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
+ };
+ two {
+ linux,code = <KEY_2>;
+ label = "SW2-2";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio3 10 GPIO_ACTIVE_LOW>;
+ };
+ three {
+ linux,code = <KEY_3>;
+ label = "SW2-3";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
+ };
+ four {
+ linux,code = <KEY_4>;
+ label = "SW2-4";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
+ };
+ };
+
vga-encoder {
compatible = "adi,adv7123";
@@ -319,6 +356,11 @@
groups = "usb1";
function = "usb1";
};
+
+ keyboard_pins: keyboard {
+ pins = "GP_3_9", "GP_3_10", "GP_3_11", "GP_3_12";
+ bias-pull-up;
+ };
};
&cmt0 {
diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
index eb89a27a6ed0..cafa3046daa4 100644
--- a/arch/arm/boot/dts/r8a7794-silk.dts
+++ b/arch/arm/boot/dts/r8a7794-silk.dts
@@ -45,9 +45,12 @@
reg = <0 0x40000000 0 0x40000000>;
};
- gpio-keys {
+ keyboard {
compatible = "gpio-keys";
+ pinctrl-0 = <&keyboard_pins>;
+ pinctrl-names = "default";
+
key-3 {
gpios = <&gpio5 10 GPIO_ACTIVE_LOW>;
linux,code = <KEY_3>;
@@ -358,6 +361,11 @@
function = "du1";
};
+ keyboard_pins: keyboard {
+ pins = "GP_3_9", "GP_3_10", "GP_3_11", "GP_3_12";
+ bias-pull-up;
+ };
+
ssi_pins: sound {
groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
function = "ssi";
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index 330dc516ecd1..eac9ed8df0be 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -62,7 +62,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -71,6 +70,7 @@
clock-frequency = <1000000000>;
clocks = <&cpg CPG_CORE R8A7794_CLK_Z2>;
power-domains = <&sysc R8A7794_PD_CA7_CPU0>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA7>;
};
@@ -81,6 +81,7 @@
clock-frequency = <1000000000>;
clocks = <&cpg CPG_CORE R8A7794_CLK_Z2>;
power-domains = <&sysc R8A7794_PD_CA7_CPU1>;
+ enable-method = "renesas,apmu";
next-level-cache = <&L2_CA7>;
};
@@ -598,6 +599,7 @@
reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
resets = <&cpg 812>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/rk3036-kylin.dts b/arch/arm/boot/dts/rk3036-kylin.dts
index 7154b827ea2f..e817eba8c622 100644
--- a/arch/arm/boot/dts/rk3036-kylin.dts
+++ b/arch/arm/boot/dts/rk3036-kylin.dts
@@ -390,7 +390,7 @@
};
};
- sleep {
+ suspend {
global_pwroff: global-pwroff {
rockchip,pins = <2 RK_PA7 1 &pcfg_pull_none>;
};
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index e24230d50a78..ffa9bc7ed3d0 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/pinctrl/rockchip.h>
#include <dt-bindings/clock/rk3036-cru.h>
#include <dt-bindings/soc/rockchip,boot-mode.h>
+#include <dt-bindings/power/rk3036-power.h>
/ {
#address-cells = <1>;
@@ -111,10 +112,32 @@
assigned-clock-rates = <100000000>;
clocks = <&cru SCLK_GPU>, <&cru SCLK_GPU>;
clock-names = "bus", "core";
+ power-domains = <&power RK3036_PD_GPU>;
resets = <&cru SRST_GPU>;
status = "disabled";
};
+ vpu: video-codec@10108000 {
+ compatible = "rockchip,rk3036-vpu";
+ reg = <0x10108000 0x800>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "vdpu";
+ clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
+ clock-names = "aclk", "hclk";
+ iommus = <&vpu_mmu>;
+ power-domains = <&power RK3036_PD_VPU>;
+ };
+
+ vpu_mmu: iommu@10108800 {
+ compatible = "rockchip,iommu";
+ reg = <0x10108800 0x100>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
+ clock-names = "aclk", "iface";
+ power-domains = <&power RK3036_PD_VPU>;
+ #iommu-cells = <0>;
+ };
+
vop: vop@10118000 {
compatible = "rockchip,rk3036-vop";
reg = <0x10118000 0x19c>;
@@ -124,6 +147,7 @@
resets = <&cru SRST_LCDC1_A>, <&cru SRST_LCDC1_H>, <&cru SRST_LCDC1_D>;
reset-names = "axi", "ahb", "dclk";
iommus = <&vop_mmu>;
+ power-domains = <&power RK3036_PD_VIO>;
status = "disabled";
vop_out: port {
@@ -140,13 +164,28 @@
compatible = "rockchip,iommu";
reg = <0x10118300 0x100>;
interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "vop_mmu";
clocks = <&cru ACLK_LCDC>, <&cru HCLK_LCDC>;
clock-names = "aclk", "iface";
+ power-domains = <&power RK3036_PD_VIO>;
#iommu-cells = <0>;
status = "disabled";
};
+ qos_gpu: qos@1012d000 {
+ compatible = "rockchip,rk3036-qos", "syscon";
+ reg = <0x1012d000 0x20>;
+ };
+
+ qos_vpu: qos@1012e000 {
+ compatible = "rockchip,rk3036-qos", "syscon";
+ reg = <0x1012e000 0x20>;
+ };
+
+ qos_vio: qos@1012f000 {
+ compatible = "rockchip,rk3036-qos", "syscon";
+ reg = <0x1012f000 0x20>;
+ };
+
gic: interrupt-controller@10139000 {
compatible = "arm,gic-400";
interrupt-controller;
@@ -302,6 +341,37 @@
compatible = "rockchip,rk3036-grf", "syscon", "simple-mfd";
reg = <0x20008000 0x1000>;
+ power: power-controller {
+ compatible = "rockchip,rk3036-power-controller";
+ #power-domain-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-domain@RK3036_PD_VIO {
+ reg = <RK3036_PD_VIO>;
+ clocks = <&cru ACLK_LCDC>,
+ <&cru HCLK_LCDC>,
+ <&cru SCLK_LCDC>;
+ pm_qos = <&qos_vio>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@RK3036_PD_VPU {
+ reg = <RK3036_PD_VPU>;
+ clocks = <&cru ACLK_VCODEC>,
+ <&cru HCLK_VCODEC>;
+ pm_qos = <&qos_vpu>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@RK3036_PD_GPU {
+ reg = <RK3036_PD_GPU>;
+ clocks = <&cru SCLK_GPU>;
+ pm_qos = <&qos_gpu>;
+ #power-domain-cells = <0>;
+ };
+ };
+
reboot-mode {
compatible = "syscon-reboot-mode";
offset = <0x1d8>;
diff --git a/arch/arm/boot/dts/rk3066a-bqcurie2.dts b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
index eba7a1344976..390aa33cd55a 100644
--- a/arch/arm/boot/dts/rk3066a-bqcurie2.dts
+++ b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
@@ -12,6 +12,11 @@
model = "bq Curie 2";
compatible = "mundoreader,bq-curie2", "rockchip,rk3066a";
+ aliases {
+ mmc0 = &mmc0;
+ mmc1 = &mmc1;
+ };
+
memory@60000000 {
device_type = "memory";
reg = <0x60000000 0x40000000>;
diff --git a/arch/arm/boot/dts/rk3066a-marsboard.dts b/arch/arm/boot/dts/rk3066a-marsboard.dts
index 6b121658d93c..a66d915aa0f6 100644
--- a/arch/arm/boot/dts/rk3066a-marsboard.dts
+++ b/arch/arm/boot/dts/rk3066a-marsboard.dts
@@ -10,6 +10,10 @@
model = "MarsBoard RK3066";
compatible = "haoyu,marsboard-rk3066", "rockchip,rk3066a";
+ aliases {
+ mmc0 = &mmc0;
+ };
+
memory@60000000 {
device_type = "memory";
reg = <0x60000000 0x40000000>;
diff --git a/arch/arm/boot/dts/rk3066a-mk808.dts b/arch/arm/boot/dts/rk3066a-mk808.dts
index eed9e60cffa2..9790bc63b50a 100644
--- a/arch/arm/boot/dts/rk3066a-mk808.dts
+++ b/arch/arm/boot/dts/rk3066a-mk808.dts
@@ -10,6 +10,11 @@
model = "Rikomagic MK808";
compatible = "rikomagic,mk808", "rockchip,rk3066a";
+ aliases {
+ mmc0 = &mmc0;
+ mmc1 = &mmc1;
+ };
+
chosen {
stdout-path = "serial2:115200n8";
};
diff --git a/arch/arm/boot/dts/rk3066a-rayeager.dts b/arch/arm/boot/dts/rk3066a-rayeager.dts
index 309518403d86..12b2e59aebc4 100644
--- a/arch/arm/boot/dts/rk3066a-rayeager.dts
+++ b/arch/arm/boot/dts/rk3066a-rayeager.dts
@@ -11,6 +11,12 @@
model = "Rayeager PX2";
compatible = "chipspark,rayeager-px2", "rockchip,rk3066a";
+ aliases {
+ mmc0 = &mmc0;
+ mmc1 = &mmc1;
+ mmc2 = &emmc;
+ };
+
memory@60000000 {
device_type = "memory";
reg = <0x60000000 0x40000000>;
@@ -58,7 +64,7 @@
};
/* input for 5V_STDBY is VSYS or DC5V, selectable by jumper J4 */
- vcc_stdby: 5v-stdby-regulator {
+ vcc_stdby: stdby-regulator {
compatible = "regulator-fixed";
regulator-name = "5v_stdby";
regulator-min-microvolt = <5000000>;
diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
index 252750c97f97..f5a665b5d209 100644
--- a/arch/arm/boot/dts/rk3066a.dtsi
+++ b/arch/arm/boot/dts/rk3066a.dtsi
@@ -217,7 +217,7 @@
<150000000>, <75000000>;
};
- timer@2000e000 {
+ timer2: timer@2000e000 {
compatible = "snps,dw-apb-timer-osc";
reg = <0x2000e000 0x100>;
interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
@@ -238,7 +238,7 @@
};
};
- timer@20038000 {
+ timer0: timer@20038000 {
compatible = "snps,dw-apb-timer-osc";
reg = <0x20038000 0x100>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
@@ -246,7 +246,7 @@
clock-names = "timer", "pclk";
};
- timer@2003a000 {
+ timer1: timer@2003a000 {
compatible = "snps,dw-apb-timer-osc";
reg = <0x2003a000 0x100>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
@@ -266,30 +266,6 @@
status = "disabled";
};
- usbphy: phy {
- compatible = "rockchip,rk3066a-usb-phy", "rockchip,rk3288-usb-phy";
- rockchip,grf = <&grf>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- usbphy0: usb-phy@17c {
- #phy-cells = <0>;
- reg = <0x17c>;
- clocks = <&cru SCLK_OTGPHY0>;
- clock-names = "phyclk";
- #clock-cells = <0>;
- };
-
- usbphy1: usb-phy@188 {
- #phy-cells = <0>;
- reg = <0x188>;
- clocks = <&cru SCLK_OTGPHY1>;
- clock-names = "phyclk";
- #clock-cells = <0>;
- };
- };
-
pinctrl: pinctrl {
compatible = "rockchip,rk3066a-pinctrl";
rockchip,grf = <&grf>;
@@ -702,6 +678,34 @@
power-domains = <&power RK3066_PD_GPU>;
};
+&grf {
+ compatible = "rockchip,rk3066-grf", "syscon", "simple-mfd";
+
+ usbphy: usbphy {
+ compatible = "rockchip,rk3066a-usb-phy",
+ "rockchip,rk3288-usb-phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ usbphy0: usb-phy@17c {
+ reg = <0x17c>;
+ clocks = <&cru SCLK_OTGPHY0>;
+ clock-names = "phyclk";
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ };
+
+ usbphy1: usb-phy@188 {
+ reg = <0x188>;
+ clocks = <&cru SCLK_OTGPHY1>;
+ clock-names = "phyclk";
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ };
+ };
+};
+
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_xfer>;
@@ -755,7 +759,7 @@
#address-cells = <1>;
#size-cells = <0>;
- pd_vio@RK3066_PD_VIO {
+ power-domain@RK3066_PD_VIO {
reg = <RK3066_PD_VIO>;
clocks = <&cru ACLK_LCDC0>,
<&cru ACLK_LCDC1>,
@@ -780,21 +784,24 @@
<&qos_cif1>,
<&qos_ipp>,
<&qos_rga>;
+ #power-domain-cells = <0>;
};
- pd_video@RK3066_PD_VIDEO {
+ power-domain@RK3066_PD_VIDEO {
reg = <RK3066_PD_VIDEO>;
clocks = <&cru ACLK_VDPU>,
<&cru ACLK_VEPU>,
<&cru HCLK_VDPU>,
<&cru HCLK_VEPU>;
pm_qos = <&qos_vpu>;
+ #power-domain-cells = <0>;
};
- pd_gpu@RK3066_PD_GPU {
+ power-domain@RK3066_PD_GPU {
reg = <RK3066_PD_GPU>;
clocks = <&cru ACLK_GPU>;
pm_qos = <&qos_gpu>;
+ #power-domain-cells = <0>;
};
};
};
@@ -861,6 +868,10 @@
pinctrl-0 = <&uart3_xfer>;
};
+&vpu {
+ power-domains = <&power RK3066_PD_VIDEO>;
+};
+
&wdt {
compatible = "rockchip,rk3066-wdt", "snps,dw-wdt";
};
diff --git a/arch/arm/boot/dts/rk3188-bqedison2qc.dts b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
index 66a0ff196eb1..85d3fce0142f 100644
--- a/arch/arm/boot/dts/rk3188-bqedison2qc.dts
+++ b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
@@ -13,6 +13,12 @@
model = "BQ Edison2 Quad-Core";
compatible = "mundoreader,bq-edison2qc", "rockchip,rk3188";
+ aliases {
+ mmc0 = &mmc0;
+ mmc1 = &mmc1;
+ mmc2 = &emmc;
+ };
+
memory@60000000 {
device_type = "memory";
reg = <0x60000000 0x80000000>;
diff --git a/arch/arm/boot/dts/rk3188-px3-evb.dts b/arch/arm/boot/dts/rk3188-px3-evb.dts
index c32e1d441cf7..39c60426c9c9 100644
--- a/arch/arm/boot/dts/rk3188-px3-evb.dts
+++ b/arch/arm/boot/dts/rk3188-px3-evb.dts
@@ -11,6 +11,11 @@
model = "Rockchip PX3-EVB";
compatible = "rockchip,px3-evb", "rockchip,px3", "rockchip,rk3188";
+ aliases {
+ mmc0 = &mmc0;
+ mmc1 = &emmc;
+ };
+
chosen {
stdout-path = "serial2:115200n8";
};
diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts
index b0fef82c0a71..36c0945f43b2 100644
--- a/arch/arm/boot/dts/rk3188-radxarock.dts
+++ b/arch/arm/boot/dts/rk3188-radxarock.dts
@@ -11,6 +11,10 @@
model = "Radxa Rock";
compatible = "radxa,rock", "rockchip,rk3188";
+ aliases {
+ mmc0 = &mmc0;
+ };
+
memory@60000000 {
device_type = "memory";
reg = <0x60000000 0x80000000>;
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index 2298a8d840ba..793a1b9117fe 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -150,16 +150,16 @@
compatible = "rockchip,rk3188-timer", "rockchip,rk3288-timer";
reg = <0x2000e000 0x20>;
interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru SCLK_TIMER3>, <&cru PCLK_TIMER3>;
- clock-names = "timer", "pclk";
+ clocks = <&cru PCLK_TIMER3>, <&cru SCLK_TIMER3>;
+ clock-names = "pclk", "timer";
};
timer6: timer@200380a0 {
compatible = "rockchip,rk3188-timer", "rockchip,rk3288-timer";
reg = <0x200380a0 0x20>;
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru SCLK_TIMER6>, <&cru PCLK_TIMER0>;
- clock-names = "timer", "pclk";
+ clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER6>;
+ clock-names = "pclk", "timer";
};
i2s0: i2s@1011a000 {
@@ -214,30 +214,6 @@
};
};
- usbphy: phy {
- compatible = "rockchip,rk3188-usb-phy", "rockchip,rk3288-usb-phy";
- rockchip,grf = <&grf>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- usbphy0: usb-phy@10c {
- #phy-cells = <0>;
- reg = <0x10c>;
- clocks = <&cru SCLK_OTGPHY0>;
- clock-names = "phyclk";
- #clock-cells = <0>;
- };
-
- usbphy1: usb-phy@11c {
- #phy-cells = <0>;
- reg = <0x11c>;
- clocks = <&cru SCLK_OTGPHY1>;
- clock-names = "phyclk";
- #clock-cells = <0>;
- };
- };
-
pinctrl: pinctrl {
compatible = "rockchip,rk3188-pinctrl";
rockchip,grf = <&grf>;
@@ -662,6 +638,34 @@
power-domains = <&power RK3188_PD_GPU>;
};
+&grf{
+ compatible = "rockchip,rk3188-grf", "syscon", "simple-mfd";
+
+ usbphy: usbphy {
+ compatible = "rockchip,rk3188-usb-phy",
+ "rockchip,rk3288-usb-phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ usbphy0: usb-phy@10c {
+ reg = <0x10c>;
+ clocks = <&cru SCLK_OTGPHY0>;
+ clock-names = "phyclk";
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ };
+
+ usbphy1: usb-phy@11c {
+ reg = <0x11c>;
+ clocks = <&cru SCLK_OTGPHY1>;
+ clock-names = "phyclk";
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ };
+ };
+};
+
&i2c0 {
compatible = "rockchip,rk3188-i2c";
pinctrl-names = "default";
@@ -699,7 +703,7 @@
#address-cells = <1>;
#size-cells = <0>;
- pd_vio@RK3188_PD_VIO {
+ power-domain@RK3188_PD_VIO {
reg = <RK3188_PD_VIO>;
clocks = <&cru ACLK_LCDC0>,
<&cru ACLK_LCDC1>,
@@ -719,21 +723,24 @@
<&qos_cif0>,
<&qos_ipp>,
<&qos_rga>;
+ #power-domain-cells = <0>;
};
- pd_video@RK3188_PD_VIDEO {
+ power-domain@RK3188_PD_VIDEO {
reg = <RK3188_PD_VIDEO>;
clocks = <&cru ACLK_VDPU>,
<&cru ACLK_VEPU>,
<&cru HCLK_VDPU>,
<&cru HCLK_VEPU>;
pm_qos = <&qos_vpu>;
+ #power-domain-cells = <0>;
};
- pd_gpu@RK3188_PD_GPU {
+ power-domain@RK3188_PD_GPU {
reg = <RK3188_PD_GPU>;
clocks = <&cru ACLK_GPU>;
pm_qos = <&qos_gpu>;
+ #power-domain-cells = <0>;
};
};
};
@@ -794,6 +801,11 @@
pinctrl-0 = <&uart3_xfer>;
};
+&vpu {
+ compatible = "rockchip,rk3188-vpu", "rockchip,rk3066-vpu";
+ power-domains = <&power RK3188_PD_VIDEO>;
+};
+
&wdt {
compatible = "rockchip,rk3188-wdt", "snps,dw-wdt";
};
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 208f21245095..75af99c76d7e 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/pinctrl/rockchip.h>
#include <dt-bindings/clock/rk3228-cru.h>
#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/power/rk3228-power.h>
/ {
#address-cells = <1>;
@@ -190,7 +191,65 @@
status = "disabled";
};
- u2phy0: usb2-phy@760 {
+ power: power-controller {
+ compatible = "rockchip,rk3228-power-controller";
+ #power-domain-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-domain@RK3228_PD_VIO {
+ reg = <RK3228_PD_VIO>;
+ clocks = <&cru ACLK_HDCP>,
+ <&cru SCLK_HDCP>,
+ <&cru ACLK_IEP>,
+ <&cru HCLK_IEP>,
+ <&cru ACLK_RGA>,
+ <&cru HCLK_RGA>,
+ <&cru SCLK_RGA>;
+ pm_qos = <&qos_hdcp>,
+ <&qos_iep>,
+ <&qos_rga_r>,
+ <&qos_rga_w>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@RK3228_PD_VOP {
+ reg = <RK3228_PD_VOP>;
+ clocks =<&cru ACLK_VOP>,
+ <&cru DCLK_VOP>,
+ <&cru HCLK_VOP>;
+ pm_qos = <&qos_vop>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@RK3228_PD_VPU {
+ reg = <RK3228_PD_VPU>;
+ clocks = <&cru ACLK_VPU>,
+ <&cru HCLK_VPU>;
+ pm_qos = <&qos_vpu>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@RK3228_PD_RKVDEC {
+ reg = <RK3228_PD_RKVDEC>;
+ clocks = <&cru ACLK_RKVDEC>,
+ <&cru HCLK_RKVDEC>,
+ <&cru SCLK_VDEC_CABAC>,
+ <&cru SCLK_VDEC_CORE>;
+ pm_qos = <&qos_rkvdec_r>,
+ <&qos_rkvdec_w>;
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@RK3228_PD_GPU {
+ reg = <RK3228_PD_GPU>;
+ clocks = <&cru ACLK_GPU>;
+ pm_qos = <&qos_gpu>;
+ #power-domain-cells = <0>;
+ };
+ };
+
+ u2phy0: usb2phy@760 {
compatible = "rockchip,rk3228-usb2phy";
reg = <0x0760 0x0c>;
clocks = <&cru SCLK_OTGPHY0>;
@@ -217,7 +276,7 @@
};
};
- u2phy1: usb2-phy@800 {
+ u2phy1: usb2phy@800 {
compatible = "rockchip,rk3228-usb2phy";
reg = <0x0800 0x0c>;
clocks = <&cru SCLK_OTGPHY1>;
@@ -379,7 +438,6 @@
reg = <0x110b0000 0x10>;
#pwm-cells = <3>;
clocks = <&cru PCLK_PWM>;
- clock-names = "pwm";
pinctrl-names = "default";
pinctrl-0 = <&pwm0_pin>;
status = "disabled";
@@ -390,7 +448,6 @@
reg = <0x110b0010 0x10>;
#pwm-cells = <3>;
clocks = <&cru PCLK_PWM>;
- clock-names = "pwm";
pinctrl-names = "default";
pinctrl-0 = <&pwm1_pin>;
status = "disabled";
@@ -401,7 +458,6 @@
reg = <0x110b0020 0x10>;
#pwm-cells = <3>;
clocks = <&cru PCLK_PWM>;
- clock-names = "pwm";
pinctrl-names = "default";
pinctrl-0 = <&pwm2_pin>;
status = "disabled";
@@ -412,7 +468,6 @@
reg = <0x110b0030 0x10>;
#pwm-cells = <2>;
clocks = <&cru PCLK_PWM>;
- clock-names = "pwm";
pinctrl-names = "default";
pinctrl-0 = <&pwm3_pin>;
status = "disabled";
@@ -517,7 +572,7 @@
pinctrl-0 = <&otp_pin>;
pinctrl-1 = <&otp_out>;
pinctrl-2 = <&otp_pin>;
- #thermal-sensor-cells = <0>;
+ #thermal-sensor-cells = <1>;
rockchip,hw-tshut-temp = <95000>;
status = "disabled";
};
@@ -550,30 +605,54 @@
"ppmmu1";
clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>;
clock-names = "bus", "core";
+ power-domains = <&power RK3228_PD_GPU>;
resets = <&cru SRST_GPU_A>;
status = "disabled";
};
+ vpu: video-codec@20020000 {
+ compatible = "rockchip,rk3228-vpu", "rockchip,rk3399-vpu";
+ reg = <0x20020000 0x800>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "vepu", "vdpu";
+ clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>;
+ clock-names = "aclk", "hclk";
+ iommus = <&vpu_mmu>;
+ power-domains = <&power RK3228_PD_VPU>;
+ };
+
vpu_mmu: iommu@20020800 {
compatible = "rockchip,iommu";
reg = <0x20020800 0x100>;
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "vpu_mmu";
clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>;
clock-names = "aclk", "iface";
- iommu-cells = <0>;
- status = "disabled";
+ power-domains = <&power RK3228_PD_VPU>;
+ #iommu-cells = <0>;
+ };
+
+ vdec: video-codec@20030000 {
+ compatible = "rockchip,rk3228-vdec", "rockchip,rk3399-vdec";
+ reg = <0x20030000 0x480>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_RKVDEC>, <&cru HCLK_RKVDEC>,
+ <&cru SCLK_VDEC_CABAC>, <&cru SCLK_VDEC_CORE>;
+ clock-names = "axi", "ahb", "cabac", "core";
+ assigned-clocks = <&cru SCLK_VDEC_CABAC>, <&cru SCLK_VDEC_CORE>;
+ assigned-clock-rates = <300000000>, <300000000>;
+ iommus = <&vdec_mmu>;
+ power-domains = <&power RK3228_PD_RKVDEC>;
};
vdec_mmu: iommu@20030480 {
compatible = "rockchip,iommu";
reg = <0x20030480 0x40>, <0x200304c0 0x40>;
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "vdec_mmu";
clocks = <&cru ACLK_RKVDEC>, <&cru HCLK_RKVDEC>;
clock-names = "aclk", "iface";
- iommu-cells = <0>;
- status = "disabled";
+ power-domains = <&power RK3228_PD_RKVDEC>;
+ #iommu-cells = <0>;
};
vop: vop@20050000 {
@@ -585,6 +664,7 @@
resets = <&cru SRST_VOP_A>, <&cru SRST_VOP_H>, <&cru SRST_VOP_D>;
reset-names = "axi", "ahb", "dclk";
iommus = <&vop_mmu>;
+ power-domains = <&power RK3228_PD_VOP>;
status = "disabled";
vop_out: port {
@@ -602,9 +682,9 @@
compatible = "rockchip,iommu";
reg = <0x20053f00 0x100>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "vop_mmu";
clocks = <&cru ACLK_VOP>, <&cru HCLK_VOP>;
clock-names = "aclk", "iface";
+ power-domains = <&power RK3228_PD_VOP>;
#iommu-cells = <0>;
status = "disabled";
};
@@ -615,6 +695,7 @@
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru ACLK_RGA>, <&cru HCLK_RGA>, <&cru SCLK_RGA>;
clock-names = "aclk", "hclk", "sclk";
+ power-domains = <&power RK3228_PD_VIO>;
resets = <&cru SRST_RGA>, <&cru SRST_RGA_A>, <&cru SRST_RGA_H>;
reset-names = "core", "axi", "ahb";
};
@@ -623,10 +704,10 @@
compatible = "rockchip,iommu";
reg = <0x20070800 0x100>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "iep_mmu";
clocks = <&cru ACLK_IEP>, <&cru HCLK_IEP>;
clock-names = "aclk", "iface";
- iommu-cells = <0>;
+ power-domains = <&power RK3228_PD_VIO>;
+ #iommu-cells = <0>;
status = "disabled";
};
@@ -800,6 +881,51 @@
status = "disabled";
};
+ qos_iep: qos@31030080 {
+ compatible = "rockchip,rk3228-qos", "syscon";
+ reg = <0x31030080 0x20>;
+ };
+
+ qos_rga_w: qos@31030100 {
+ compatible = "rockchip,rk3228-qos", "syscon";
+ reg = <0x31030100 0x20>;
+ };
+
+ qos_hdcp: qos@31030180 {
+ compatible = "rockchip,rk3228-qos", "syscon";
+ reg = <0x31030180 0x20>;
+ };
+
+ qos_rga_r: qos@31030200 {
+ compatible = "rockchip,rk3228-qos", "syscon";
+ reg = <0x31030200 0x20>;
+ };
+
+ qos_vpu: qos@31040000 {
+ compatible = "rockchip,rk3228-qos", "syscon";
+ reg = <0x31040000 0x20>;
+ };
+
+ qos_gpu: qos@31050000 {
+ compatible = "rockchip,rk3228-qos", "syscon";
+ reg = <0x31050000 0x20>;
+ };
+
+ qos_vop: qos@31060000 {
+ compatible = "rockchip,rk3228-qos", "syscon";
+ reg = <0x31060000 0x20>;
+ };
+
+ qos_rkvdec_r: qos@31070000 {
+ compatible = "rockchip,rk3228-qos", "syscon";
+ reg = <0x31070000 0x20>;
+ };
+
+ qos_rkvdec_w: qos@31070080 {
+ compatible = "rockchip,rk3228-qos", "syscon";
+ reg = <0x31070080 0x20>;
+ };
+
gic: interrupt-controller@32010000 {
compatible = "arm,gic-400";
interrupt-controller;
diff --git a/arch/arm/boot/dts/rk3288-rock2-som.dtsi b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
index 44bb5e6f83b1..76363b8afcb9 100644
--- a/arch/arm/boot/dts/rk3288-rock2-som.dtsi
+++ b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
@@ -218,7 +218,7 @@
flash0-supply = <&vcc_flash>;
flash1-supply = <&vccio_pmu>;
gpio30-supply = <&vccio_pmu>;
- gpio1830 = <&vcc_io>;
+ gpio1830-supply = <&vcc_io>;
lcdc-supply = <&vcc_io>;
sdcard-supply = <&vccio_sd>;
wifi-supply = <&vcc_18>;
diff --git a/arch/arm/boot/dts/rk3288-vyasa.dts b/arch/arm/boot/dts/rk3288-vyasa.dts
index aa50f8ed4ca0..b156a83eb7d7 100644
--- a/arch/arm/boot/dts/rk3288-vyasa.dts
+++ b/arch/arm/boot/dts/rk3288-vyasa.dts
@@ -379,10 +379,10 @@
audio-supply = <&vcc_18>;
bb-supply = <&vcc_io>;
dvp-supply = <&vcc_io>;
- flash0-suuply = <&vcc_18>;
+ flash0-supply = <&vcc_18>;
flash1-supply = <&vcc_lan>;
gpio30-supply = <&vcc_io>;
- gpio1830 = <&vcc_io>;
+ gpio1830-supply = <&vcc_io>;
lcdc-supply = <&vcc_io>;
sdcard-supply = <&vccio_sd>;
wifi-supply = <&vcc_18>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 05557ad02b33..9c5a7791a1ab 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -196,8 +196,8 @@
compatible = "rockchip,rk3288-timer";
reg = <0x0 0xff810000 0x0 0x20>;
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&xin24m>, <&cru PCLK_TIMER>;
- clock-names = "timer", "pclk";
+ clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clock-names = "pclk", "timer";
};
display-subsystem {
@@ -765,7 +765,7 @@
* *_HDMI HDMI
* *_MIPI_* MIPI
*/
- pd_vio@RK3288_PD_VIO {
+ power-domain@RK3288_PD_VIO {
reg = <RK3288_PD_VIO>;
clocks = <&cru ACLK_IEP>,
<&cru ACLK_ISP>,
@@ -801,19 +801,21 @@
<&qos_vio2_rga_r>,
<&qos_vio2_rga_w>,
<&qos_vio1_isp_r>;
+ #power-domain-cells = <0>;
};
/*
* Note: The following 3 are HEVC(H.265) clocks,
* and on the ACLK_HEVC_NIU (NOC).
*/
- pd_hevc@RK3288_PD_HEVC {
+ power-domain@RK3288_PD_HEVC {
reg = <RK3288_PD_HEVC>;
clocks = <&cru ACLK_HEVC>,
<&cru SCLK_HEVC_CABAC>,
<&cru SCLK_HEVC_CORE>;
pm_qos = <&qos_hevc_r>,
<&qos_hevc_w>;
+ #power-domain-cells = <0>;
};
/*
@@ -821,22 +823,24 @@
* (video endecoder & decoder) clocks that on the
* ACLK_VCODEC_NIU and HCLK_VCODEC_NIU (NOC).
*/
- pd_video@RK3288_PD_VIDEO {
+ power-domain@RK3288_PD_VIDEO {
reg = <RK3288_PD_VIDEO>;
clocks = <&cru ACLK_VCODEC>,
<&cru HCLK_VCODEC>;
pm_qos = <&qos_video>;
+ #power-domain-cells = <0>;
};
/*
* Note: ACLK_GPU is the GPU clock,
* and on the ACLK_GPU_NIU (NOC).
*/
- pd_gpu@RK3288_PD_GPU {
+ power-domain@RK3288_PD_GPU {
reg = <RK3288_PD_GPU>;
clocks = <&cru ACLK_GPU>;
pm_qos = <&qos_gpu_r>,
<&qos_gpu_w>;
+ #power-domain-cells = <0>;
};
};
@@ -1582,7 +1586,7 @@
drive-strength = <12>;
};
- sleep {
+ suspend {
global_pwroff: global-pwroff {
rockchip,pins = <0 RK_PA0 1 &pcfg_pull_none>;
};
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index 755c946f11de..616a828e0c6e 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -21,9 +21,6 @@
i2c2 = &i2c2;
i2c3 = &i2c3;
i2c4 = &i2c4;
- mshc0 = &emmc;
- mshc1 = &mmc0;
- mshc2 = &mmc1;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -50,6 +47,18 @@
status = "disabled";
};
+ vpu: video-codec@10104000 {
+ compatible = "rockchip,rk3066-vpu";
+ reg = <0x10104000 0x800>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "vepu", "vdpu";
+ clocks = <&cru ACLK_VDPU>, <&cru HCLK_VDPU>,
+ <&cru ACLK_VEPU>, <&cru HCLK_VEPU>;
+ clock-names = "aclk_vdpu", "hclk_vdpu",
+ "aclk_vepu", "hclk_vepu";
+ };
+
L2: cache-controller@10138000 {
compatible = "arm,pl310-cache";
reg = <0x10138000 0x1000>;
@@ -256,7 +265,7 @@
};
grf: grf@20008000 {
- compatible = "syscon";
+ compatible = "syscon", "simple-mfd";
reg = <0x20008000 0x200>;
};
diff --git a/arch/arm/boot/dts/rv1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi
index 884872ca5207..9bd0acf3b708 100644
--- a/arch/arm/boot/dts/rv1108.dtsi
+++ b/arch/arm/boot/dts/rv1108.dtsi
@@ -265,7 +265,7 @@
#address-cells = <1>;
#size-cells = <1>;
- u2phy: usb2-phy@100 {
+ u2phy: usb2phy@100 {
compatible = "rockchip,rv1108-usb2phy";
reg = <0x100 0x0c>;
clocks = <&cru SCLK_USBPHY>;
diff --git a/arch/arm/boot/dts/s5pv210-goni.dts b/arch/arm/boot/dts/s5pv210-goni.dts
index 5c1e12d39747..c6f39147cb96 100644
--- a/arch/arm/boot/dts/s5pv210-goni.dts
+++ b/arch/arm/boot/dts/s5pv210-goni.dts
@@ -358,15 +358,6 @@
reg = <0x4a>;
interrupt-parent = <&gpj0>;
interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
-
- atmel,x-line = <17>;
- atmel,y-line = <11>;
- atmel,x-size = <800>;
- atmel,y-size = <480>;
- atmel,burst-length = <0x21>;
- atmel,threshold = <0x28>;
- atmel,orientation = <1>;
-
vdd-supply = <&tsp_reg>;
};
};
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index e47e1ca63043..f6e3e6f57252 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -794,7 +794,7 @@
0xffffffff 0x3ffcfe7c 0x1c010101 /* pioA */
0x7fffffff 0xfffccc3a 0x3f00cc3a /* pioB */
0xffffffff 0x3ff83fff 0xff00ffff /* pioC */
- 0x0003ff00 0x8002a800 0x00000000 /* pioD */
+ 0xb003ff00 0x8002a800 0x00000000 /* pioD */
0xffffffff 0x7fffffff 0x76fff1bf /* pioE */
>;
diff --git a/arch/arm/boot/dts/sd5203.dts b/arch/arm/boot/dts/sd5203.dts
index 3cc9a23910be..a61a078ea042 100644
--- a/arch/arm/boot/dts/sd5203.dts
+++ b/arch/arm/boot/dts/sd5203.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2020 Hisilicon Limited.
+ * Copyright (c) 2020 HiSilicon Limited.
*
* DTS file for Hisilicon SD5203 Board
*/
diff --git a/arch/arm/boot/dts/ste-ab8500.dtsi b/arch/arm/boot/dts/ste-ab8500.dtsi
index a16a00fb5fa5..d0fe3f9aa183 100644
--- a/arch/arm/boot/dts/ste-ab8500.dtsi
+++ b/arch/arm/boot/dts/ste-ab8500.dtsi
@@ -34,7 +34,7 @@
#clock-cells = <1>;
};
- ab8500_gpio: ab8500-gpio {
+ ab8500_gpio: ab8500-gpiocontroller {
compatible = "stericsson,ab8500-gpio";
gpio-controller;
#gpio-cells = <2>;
@@ -42,15 +42,15 @@
ab8500-rtc {
compatible = "stericsson,ab8500-rtc";
- interrupts = <17 IRQ_TYPE_LEVEL_HIGH
- 18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>,
+ <18 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "60S", "ALARM";
};
gpadc: ab8500-gpadc {
compatible = "stericsson,ab8500-gpadc";
- interrupts = <32 IRQ_TYPE_LEVEL_HIGH
- 39 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <32 IRQ_TYPE_LEVEL_HIGH>,
+ <39 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "HW_CONV_END", "SW_CONV_END";
vddadc-supply = <&ab8500_ldo_tvout_reg>;
#address-cells = <1>;
@@ -219,13 +219,13 @@
ab8500_usb {
compatible = "stericsson,ab8500-usb";
- interrupts = < 90 IRQ_TYPE_LEVEL_HIGH
- 96 IRQ_TYPE_LEVEL_HIGH
- 14 IRQ_TYPE_LEVEL_HIGH
- 15 IRQ_TYPE_LEVEL_HIGH
- 79 IRQ_TYPE_LEVEL_HIGH
- 74 IRQ_TYPE_LEVEL_HIGH
- 75 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <90 IRQ_TYPE_LEVEL_HIGH>,
+ <96 IRQ_TYPE_LEVEL_HIGH>,
+ <14 IRQ_TYPE_LEVEL_HIGH>,
+ <15 IRQ_TYPE_LEVEL_HIGH>,
+ <79 IRQ_TYPE_LEVEL_HIGH>,
+ <74 IRQ_TYPE_LEVEL_HIGH>,
+ <75 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "ID_WAKEUP_R",
"ID_WAKEUP_F",
"VBUS_DET_F",
@@ -242,8 +242,8 @@
ab8500-ponkey {
compatible = "stericsson,ab8500-poweron-key";
- interrupts = <6 IRQ_TYPE_LEVEL_HIGH
- 7 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <6 IRQ_TYPE_LEVEL_HIGH>,
+ <7 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "ONKEY_DBF", "ONKEY_DBR";
};
diff --git a/arch/arm/boot/dts/ste-ab8505.dtsi b/arch/arm/boot/dts/ste-ab8505.dtsi
index cc045b2fc217..0defc15b9bbc 100644
--- a/arch/arm/boot/dts/ste-ab8505.dtsi
+++ b/arch/arm/boot/dts/ste-ab8505.dtsi
@@ -31,7 +31,7 @@
#clock-cells = <1>;
};
- ab8505_gpio: ab8505-gpio {
+ ab8505_gpio: ab8505-gpiocontroller {
compatible = "stericsson,ab8505-gpio";
gpio-controller;
#gpio-cells = <2>;
@@ -39,8 +39,8 @@
ab8500-rtc {
compatible = "stericsson,ab8500-rtc";
- interrupts = <17 IRQ_TYPE_LEVEL_HIGH
- 18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>,
+ <18 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "60S", "ALARM";
};
@@ -182,13 +182,13 @@
ab8500_usb: ab8500_usb {
compatible = "stericsson,ab8500-usb";
- interrupts = < 90 IRQ_TYPE_LEVEL_HIGH
- 96 IRQ_TYPE_LEVEL_HIGH
- 14 IRQ_TYPE_LEVEL_HIGH
- 15 IRQ_TYPE_LEVEL_HIGH
- 79 IRQ_TYPE_LEVEL_HIGH
- 74 IRQ_TYPE_LEVEL_HIGH
- 75 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <90 IRQ_TYPE_LEVEL_HIGH>,
+ <96 IRQ_TYPE_LEVEL_HIGH>,
+ <14 IRQ_TYPE_LEVEL_HIGH>,
+ <15 IRQ_TYPE_LEVEL_HIGH>,
+ <79 IRQ_TYPE_LEVEL_HIGH>,
+ <74 IRQ_TYPE_LEVEL_HIGH>,
+ <75 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "ID_WAKEUP_R",
"ID_WAKEUP_F",
"VBUS_DET_F",
@@ -205,8 +205,8 @@
ab8500-ponkey {
compatible = "stericsson,ab8500-poweron-key";
- interrupts = <6 IRQ_TYPE_LEVEL_HIGH
- 7 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <6 IRQ_TYPE_LEVEL_HIGH>,
+ <7 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "ONKEY_DBF", "ONKEY_DBR";
};
diff --git a/arch/arm/boot/dts/ste-href-ab8500.dtsi b/arch/arm/boot/dts/ste-href-ab8500.dtsi
index 4946743de7b9..3ccb7b5c7162 100644
--- a/arch/arm/boot/dts/ste-href-ab8500.dtsi
+++ b/arch/arm/boot/dts/ste-href-ab8500.dtsi
@@ -9,7 +9,7 @@
soc {
prcmu@80157000 {
ab8500 {
- ab8500-gpio {
+ ab8500-gpiocontroller {
/* Hog a few default settings */
pinctrl-names = "default";
pinctrl-0 = <&gpio2_default_mode>,
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi
index 8d59202cebd6..37e59403c01f 100644
--- a/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi
+++ b/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi
@@ -104,6 +104,9 @@
* <&gpio1 0 IRQ_TYPE_EDGE_FALLING>,
* <&gpio2 19 IRQ_TYPE_EDGE_FALLING>;
*/
+ mount-matrix = "0", "1", "0",
+ "1", "0", "0",
+ "0", "0", "-1";
};
magnetometer@1e {
/* Magnetometer */
@@ -136,6 +139,9 @@
/* INT2 would need to be open drain */
interrupts = <18 IRQ_TYPE_EDGE_RISING>,
<19 IRQ_TYPE_EDGE_RISING>;
+ mount-matrix = "0", "-1", "0",
+ "-1", "0", "0",
+ "0", "0", "-1";
};
magnetometer@f {
/* Magnetometer */
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
index 70f058352efc..00ce9d79f540 100644
--- a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
+++ b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
@@ -89,10 +89,12 @@
<19 IRQ_TYPE_EDGE_RISING>;
pinctrl-names = "default";
pinctrl-0 = <&accel_tvk_mode>;
+ mount-matrix = "0", "-1", "0",
+ "-1", "0", "0",
+ "0", "0", "-1";
};
magnetometer@1e {
compatible = "st,lsm303dlm-magn";
- st,drdy-int-pin = <1>;
reg = <0x1e>;
vdd-supply = <&ab8500_ldo_aux1_reg>;
vddio-supply = <&db8500_vsmps2_reg>;
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index 83b179692dff..c97e8d29004f 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -4,6 +4,7 @@
*/
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
#include "ste-href-family-pinctrl.dtsi"
/ {
@@ -64,17 +65,20 @@
reg = <0>;
led-cur = /bits/ 8 <0x2f>;
max-cur = /bits/ 8 <0x5f>;
+ color = <LED_COLOR_ID_BLUE>;
linux,default-trigger = "heartbeat";
};
chan@1 {
reg = <1>;
led-cur = /bits/ 8 <0x2f>;
max-cur = /bits/ 8 <0x5f>;
+ color = <LED_COLOR_ID_BLUE>;
};
chan@2 {
reg = <2>;
led-cur = /bits/ 8 <0x2f>;
max-cur = /bits/ 8 <0x5f>;
+ color = <LED_COLOR_ID_BLUE>;
};
};
lp5521@34 {
@@ -88,16 +92,19 @@
reg = <0>;
led-cur = /bits/ 8 <0x2f>;
max-cur = /bits/ 8 <0x5f>;
+ color = <LED_COLOR_ID_BLUE>;
};
chan@1 {
reg = <1>;
led-cur = /bits/ 8 <0x2f>;
max-cur = /bits/ 8 <0x5f>;
+ color = <LED_COLOR_ID_BLUE>;
};
chan@2 {
reg = <2>;
led-cur = /bits/ 8 <0x2f>;
max-cur = /bits/ 8 <0x5f>;
+ color = <LED_COLOR_ID_BLUE>;
};
};
bh1780@29 {
@@ -202,7 +209,7 @@
prcmu@80157000 {
ab8500 {
- ab8500-gpio {
+ ab8500-gpiocontroller {
};
ab8500_usb {
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index c9b906432341..1815361fe73c 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -755,14 +755,14 @@
status = "disabled";
};
- vica: intc@10140000 {
+ vica: interrupt-controller@10140000 {
compatible = "arm,versatile-vic";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0x10140000 0x20>;
};
- vicb: intc@10140020 {
+ vicb: interrupt-controller@10140020 {
compatible = "arm,versatile-vic";
interrupt-controller;
#interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index b344b3748143..40f1d7c9c1d4 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -376,7 +376,7 @@
prcmu@80157000 {
ab8500 {
- ab8500-gpio {
+ ab8500-gpiocontroller {
/*
* AB8500 GPIOs are numbered starting from 1, so the first
* index 0 is what in the datasheet is called "GPIO1", and
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-golden.dts b/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
index 0d43ee6583cf..40df7c61bf69 100644
--- a/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
+++ b/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
@@ -121,7 +121,7 @@
#size-cells = <0>;
wifi@1 {
- compatible = "brcm,bcm4329-fmac";
+ compatible = "brcm,bcm4334-fmac", "brcm,bcm4329-fmac";
reg = <1>;
/* GPIO216 (WLAN_HOST_WAKE) */
@@ -162,6 +162,7 @@
pinctrl-1 = <&u0_a_1_sleep>;
bluetooth {
+ /* BCM4334B0 actually */
compatible = "brcm,bcm4330-bt";
/* GPIO222 (BT_VREG_ON) */
shutdown-gpios = <&gpio6 30 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-janice.dts b/arch/arm/boot/dts/ste-ux500-samsung-janice.dts
index f24369873ce2..25af066f6f3a 100644
--- a/arch/arm/boot/dts/ste-ux500-samsung-janice.dts
+++ b/arch/arm/boot/dts/ste-ux500-samsung-janice.dts
@@ -401,8 +401,7 @@
status = "okay";
wifi@1 {
- /* Actually BRCM4330 */
- compatible = "brcm,bcm4329-fmac";
+ compatible = "brcm,bcm4330-fmac", "brcm,bcm4329-fmac";
reg = <1>;
/* GPIO216 WL_HOST_WAKE */
interrupt-parent = <&gpio6>;
@@ -436,6 +435,7 @@
status = "okay";
bluetooth {
+ /* BCM4330B1 actually */
compatible = "brcm,bcm4330-bt";
/* GPIO222 rail BT_VREG_EN to BT_REG_ON */
shutdown-gpios = <&gpio6 30 GPIO_ACTIVE_HIGH>;
@@ -583,10 +583,9 @@
accelerometer@08 {
compatible = "bosch,bma222";
reg = <0x08>;
- /* FIXME: no idea about this */
- mount-matrix = "1", "0", "0",
- "0", "1", "0",
- "0", "0", "1";
+ mount-matrix = "0", "1", "0",
+ "-1", "0", "0",
+ "0", "0", "-1";
vddio-supply = <&ab8500_ldo_aux2_reg>; // 1.8V
vdd-supply = <&ab8500_ldo_aux1_reg>; // 3V
};
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
index d28a00757d0b..94afd7a0fe1f 100644
--- a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
+++ b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
@@ -211,7 +211,7 @@
#size-cells = <0>;
wifi@1 {
- compatible = "brcm,bcm4329-fmac";
+ compatible = "brcm,bcm4334-fmac", "brcm,bcm4329-fmac";
reg = <1>;
/* GPIO216 WL_HOST_WAKE */
interrupt-parent = <&gpio6>;
@@ -247,6 +247,7 @@
/* FIXME: not quite working yet, probably needs regulators */
bluetooth {
+ /* BCM4334B0 actually */
compatible = "brcm,bcm4330-bt";
shutdown-gpios = <&gpio6 30 GPIO_ACTIVE_HIGH>;
device-wakeup-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts
index 7e10ae744c9d..9ac1ffe53413 100644
--- a/arch/arm/boot/dts/stm32429i-eval.dts
+++ b/arch/arm/boot/dts/stm32429i-eval.dts
@@ -119,17 +119,15 @@
};
};
- gpio_keys {
+ gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
autorepeat;
- button@0 {
+ button-0 {
label = "Wake up";
linux,code = <KEY_WAKEUP>;
gpios = <&gpioa 0 0>;
};
- button@1 {
+ button-1 {
label = "Tamper";
linux,code = <KEY_RESTART>;
gpios = <&gpioc 13 0>;
diff --git a/arch/arm/boot/dts/stm32746g-eval.dts b/arch/arm/boot/dts/stm32746g-eval.dts
index ca8c192449ee..327613fd9666 100644
--- a/arch/arm/boot/dts/stm32746g-eval.dts
+++ b/arch/arm/boot/dts/stm32746g-eval.dts
@@ -81,12 +81,10 @@
};
};
- gpio_keys {
+ gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
autorepeat;
- button@0 {
+ button-0 {
label = "Wake up";
linux,code = <KEY_WAKEUP>;
gpios = <&gpioc 13 0>;
diff --git a/arch/arm/boot/dts/stm32f4-pinctrl.dtsi b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi
index 4774163af54b..155d9ffacc83 100644
--- a/arch/arm/boot/dts/stm32f4-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi
@@ -45,7 +45,7 @@
/ {
soc {
- pinctrl: pin-controller {
+ pinctrl: pin-controller@40020000 {
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x40020000 0x3000>;
diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts
index 3dc068b91ca1..075ac57d0bf4 100644
--- a/arch/arm/boot/dts/stm32f429-disco.dts
+++ b/arch/arm/boot/dts/stm32f429-disco.dts
@@ -81,12 +81,10 @@
};
};
- gpio_keys {
+ gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
autorepeat;
- button@0 {
+ button-0 {
label = "User";
linux,code = <KEY_HOME>;
gpios = <&gpioa 0 0>;
diff --git a/arch/arm/boot/dts/stm32f429-pinctrl.dtsi b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi
index 3e7a17d9112e..e10d7a1f3207 100644
--- a/arch/arm/boot/dts/stm32f429-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi
@@ -42,54 +42,50 @@
#include "stm32f4-pinctrl.dtsi"
-/ {
- soc {
- pinctrl: pin-controller {
- compatible = "st,stm32f429-pinctrl";
+&pinctrl {
+ compatible = "st,stm32f429-pinctrl";
- gpioa: gpio@40020000 {
- gpio-ranges = <&pinctrl 0 0 16>;
- };
+ gpioa: gpio@40020000 {
+ gpio-ranges = <&pinctrl 0 0 16>;
+ };
- gpiob: gpio@40020400 {
- gpio-ranges = <&pinctrl 0 16 16>;
- };
+ gpiob: gpio@40020400 {
+ gpio-ranges = <&pinctrl 0 16 16>;
+ };
- gpioc: gpio@40020800 {
- gpio-ranges = <&pinctrl 0 32 16>;
- };
+ gpioc: gpio@40020800 {
+ gpio-ranges = <&pinctrl 0 32 16>;
+ };
- gpiod: gpio@40020c00 {
- gpio-ranges = <&pinctrl 0 48 16>;
- };
+ gpiod: gpio@40020c00 {
+ gpio-ranges = <&pinctrl 0 48 16>;
+ };
- gpioe: gpio@40021000 {
- gpio-ranges = <&pinctrl 0 64 16>;
- };
+ gpioe: gpio@40021000 {
+ gpio-ranges = <&pinctrl 0 64 16>;
+ };
- gpiof: gpio@40021400 {
- gpio-ranges = <&pinctrl 0 80 16>;
- };
+ gpiof: gpio@40021400 {
+ gpio-ranges = <&pinctrl 0 80 16>;
+ };
- gpiog: gpio@40021800 {
- gpio-ranges = <&pinctrl 0 96 16>;
- };
+ gpiog: gpio@40021800 {
+ gpio-ranges = <&pinctrl 0 96 16>;
+ };
- gpioh: gpio@40021c00 {
- gpio-ranges = <&pinctrl 0 112 16>;
- };
+ gpioh: gpio@40021c00 {
+ gpio-ranges = <&pinctrl 0 112 16>;
+ };
- gpioi: gpio@40022000 {
- gpio-ranges = <&pinctrl 0 128 16>;
- };
+ gpioi: gpio@40022000 {
+ gpio-ranges = <&pinctrl 0 128 16>;
+ };
- gpioj: gpio@40022400 {
- gpio-ranges = <&pinctrl 0 144 16>;
- };
+ gpioj: gpio@40022400 {
+ gpio-ranges = <&pinctrl 0 144 16>;
+ };
- gpiok: gpio@40022800 {
- gpio-ranges = <&pinctrl 0 160 8>;
- };
- };
+ gpiok: gpio@40022800 {
+ gpio-ranges = <&pinctrl 0 160 8>;
};
};
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index f6530d724d00..8748d5850298 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -283,8 +283,6 @@
};
timers13: timers@40001c00 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "st,stm32-timers";
reg = <0x40001C00 0x400>;
clocks = <&rcc 0 STM32F4_APB1_CLOCK(TIM13)>;
@@ -299,8 +297,6 @@
};
timers14: timers@40002000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "st,stm32-timers";
reg = <0x40002000 0x400>;
clocks = <&rcc 0 STM32F4_APB1_CLOCK(TIM14)>;
@@ -633,8 +629,6 @@
};
timers10: timers@40014400 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "st,stm32-timers";
reg = <0x40014400 0x400>;
clocks = <&rcc 0 STM32F4_APB2_CLOCK(TIM10)>;
@@ -649,8 +643,6 @@
};
timers11: timers@40014800 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "st,stm32-timers";
reg = <0x40014800 0x400>;
clocks = <&rcc 0 STM32F4_APB2_CLOCK(TIM11)>;
@@ -709,7 +701,7 @@
status = "disabled";
};
- rcc: rcc@40023810 {
+ rcc: rcc@40023800 {
#reset-cells = <1>;
#clock-cells = <2>;
compatible = "st,stm32f42xx-rcc", "st,stm32-rcc";
diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts
index 2e1b3bbbe4b5..8c982ae79f43 100644
--- a/arch/arm/boot/dts/stm32f469-disco.dts
+++ b/arch/arm/boot/dts/stm32f469-disco.dts
@@ -104,12 +104,10 @@
};
};
- gpio_keys {
+ gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
autorepeat;
- button@0 {
+ button-0 {
label = "User";
linux,code = <KEY_WAKEUP>;
gpios = <&gpioa 0 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/stm32f469-pinctrl.dtsi b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi
index fff542662eea..6bf60263dff8 100644
--- a/arch/arm/boot/dts/stm32f469-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi
@@ -42,55 +42,51 @@
#include "stm32f4-pinctrl.dtsi"
-/ {
- soc {
- pinctrl: pin-controller {
- compatible = "st,stm32f469-pinctrl";
+&pinctrl {
+ compatible = "st,stm32f469-pinctrl";
- gpioa: gpio@40020000 {
- gpio-ranges = <&pinctrl 0 0 16>;
- };
+ gpioa: gpio@40020000 {
+ gpio-ranges = <&pinctrl 0 0 16>;
+ };
- gpiob: gpio@40020400 {
- gpio-ranges = <&pinctrl 0 16 16>;
- };
+ gpiob: gpio@40020400 {
+ gpio-ranges = <&pinctrl 0 16 16>;
+ };
- gpioc: gpio@40020800 {
- gpio-ranges = <&pinctrl 0 32 16>;
- };
+ gpioc: gpio@40020800 {
+ gpio-ranges = <&pinctrl 0 32 16>;
+ };
- gpiod: gpio@40020c00 {
- gpio-ranges = <&pinctrl 0 48 16>;
- };
+ gpiod: gpio@40020c00 {
+ gpio-ranges = <&pinctrl 0 48 16>;
+ };
- gpioe: gpio@40021000 {
- gpio-ranges = <&pinctrl 0 64 16>;
- };
+ gpioe: gpio@40021000 {
+ gpio-ranges = <&pinctrl 0 64 16>;
+ };
- gpiof: gpio@40021400 {
- gpio-ranges = <&pinctrl 0 80 16>;
- };
+ gpiof: gpio@40021400 {
+ gpio-ranges = <&pinctrl 0 80 16>;
+ };
- gpiog: gpio@40021800 {
- gpio-ranges = <&pinctrl 0 96 16>;
- };
+ gpiog: gpio@40021800 {
+ gpio-ranges = <&pinctrl 0 96 16>;
+ };
- gpioh: gpio@40021c00 {
- gpio-ranges = <&pinctrl 0 112 16>;
- };
+ gpioh: gpio@40021c00 {
+ gpio-ranges = <&pinctrl 0 112 16>;
+ };
- gpioi: gpio@40022000 {
- gpio-ranges = <&pinctrl 0 128 16>;
- };
+ gpioi: gpio@40022000 {
+ gpio-ranges = <&pinctrl 0 128 16>;
+ };
- gpioj: gpio@40022400 {
- gpio-ranges = <&pinctrl 0 144 6>,
- <&pinctrl 12 156 4>;
- };
+ gpioj: gpio@40022400 {
+ gpio-ranges = <&pinctrl 0 144 6>,
+ <&pinctrl 12 156 4>;
+ };
- gpiok: gpio@40022800 {
- gpio-ranges = <&pinctrl 3 163 5>;
- };
- };
+ gpiok: gpio@40022800 {
+ gpio-ranges = <&pinctrl 3 163 5>;
};
};
diff --git a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
index fe4cfda72a47..1cf8a23c2644 100644
--- a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
@@ -9,7 +9,7 @@
/ {
soc {
- pinctrl: pin-controller {
+ pinctrl: pin-controller@40020000 {
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x40020000 0x3000>;
diff --git a/arch/arm/boot/dts/stm32f746.dtsi b/arch/arm/boot/dts/stm32f746.dtsi
index e1df603fc981..014b416f57e6 100644
--- a/arch/arm/boot/dts/stm32f746.dtsi
+++ b/arch/arm/boot/dts/stm32f746.dtsi
@@ -265,8 +265,6 @@
};
timers13: timers@40001c00 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "st,stm32-timers";
reg = <0x40001C00 0x400>;
clocks = <&rcc 0 STM32F7_APB1_CLOCK(TIM13)>;
@@ -281,8 +279,6 @@
};
timers14: timers@40002000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "st,stm32-timers";
reg = <0x40002000 0x400>;
clocks = <&rcc 0 STM32F7_APB1_CLOCK(TIM14)>;
@@ -364,9 +360,9 @@
status = "disabled";
};
- i2c3: i2c@40005C00 {
+ i2c3: i2c@40005c00 {
compatible = "st,stm32f7-i2c";
- reg = <0x40005C00 0x400>;
+ reg = <0x40005c00 0x400>;
interrupts = <72>,
<73>;
resets = <&rcc STM32F7_APB1_RESET(I2C3)>;
@@ -531,8 +527,6 @@
};
timers10: timers@40014400 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "st,stm32-timers";
reg = <0x40014400 0x400>;
clocks = <&rcc 0 STM32F7_APB2_CLOCK(TIM10)>;
@@ -547,8 +541,6 @@
};
timers11: timers@40014800 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "st,stm32-timers";
reg = <0x40014800 0x400>;
clocks = <&rcc 0 STM32F7_APB2_CLOCK(TIM11)>;
diff --git a/arch/arm/boot/dts/stm32f769-disco.dts b/arch/arm/boot/dts/stm32f769-disco.dts
index 0ce7fbc20fa4..be943b701980 100644
--- a/arch/arm/boot/dts/stm32f769-disco.dts
+++ b/arch/arm/boot/dts/stm32f769-disco.dts
@@ -75,12 +75,10 @@
};
};
- gpio_keys {
+ gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
autorepeat;
- button@0 {
+ button-0 {
label = "User";
linux,code = <KEY_HOME>;
gpios = <&gpioa 0 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi
index 05ecdf9ff03a..6e42ca2dada2 100644
--- a/arch/arm/boot/dts/stm32h743.dtsi
+++ b/arch/arm/boot/dts/stm32h743.dtsi
@@ -485,8 +485,6 @@
};
lptimer4: timer@58002c00 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "st,stm32-lptimer";
reg = <0x58002c00 0x400>;
clocks = <&rcc LPTIM4_CK>;
@@ -501,8 +499,6 @@
};
lptimer5: timer@58003000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "st,stm32-lptimer";
reg = <0x58003000 0x400>;
clocks = <&rcc LPTIM5_CK>;
diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
index 060baa8b7e9d..5b60ecbd718f 100644
--- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
@@ -118,6 +118,39 @@
};
};
+ dcmi_pins_b: dcmi-1 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 4, AF13)>,/* DCMI_HSYNC */
+ <STM32_PINMUX('B', 7, AF13)>,/* DCMI_VSYNC */
+ <STM32_PINMUX('A', 6, AF13)>,/* DCMI_PIXCLK */
+ <STM32_PINMUX('C', 6, AF13)>,/* DCMI_D0 */
+ <STM32_PINMUX('H', 10, AF13)>,/* DCMI_D1 */
+ <STM32_PINMUX('H', 11, AF13)>,/* DCMI_D2 */
+ <STM32_PINMUX('E', 1, AF13)>,/* DCMI_D3 */
+ <STM32_PINMUX('E', 11, AF13)>,/* DCMI_D4 */
+ <STM32_PINMUX('D', 3, AF13)>,/* DCMI_D5 */
+ <STM32_PINMUX('E', 13, AF13)>,/* DCMI_D6 */
+ <STM32_PINMUX('B', 9, AF13)>;/* DCMI_D7 */
+ bias-disable;
+ };
+ };
+
+ dcmi_sleep_pins_b: dcmi-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 4, ANALOG)>,/* DCMI_HSYNC */
+ <STM32_PINMUX('B', 7, ANALOG)>,/* DCMI_VSYNC */
+ <STM32_PINMUX('A', 6, ANALOG)>,/* DCMI_PIXCLK */
+ <STM32_PINMUX('C', 6, ANALOG)>,/* DCMI_D0 */
+ <STM32_PINMUX('H', 10, ANALOG)>,/* DCMI_D1 */
+ <STM32_PINMUX('H', 11, ANALOG)>,/* DCMI_D2 */
+ <STM32_PINMUX('E', 1, ANALOG)>,/* DCMI_D3 */
+ <STM32_PINMUX('E', 11, ANALOG)>,/* DCMI_D4 */
+ <STM32_PINMUX('D', 3, ANALOG)>,/* DCMI_D5 */
+ <STM32_PINMUX('E', 13, ANALOG)>,/* DCMI_D6 */
+ <STM32_PINMUX('B', 9, ANALOG)>;/* DCMI_D7 */
+ };
+ };
+
ethernet0_rgmii_pins_a: rgmii-0 {
pins1 {
pinmux = <STM32_PINMUX('G', 5, AF11)>, /* ETH_RGMII_CLK125 */
diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi
index fcd3230c469b..bd289bf5d269 100644
--- a/arch/arm/boot/dts/stm32mp151.dtsi
+++ b/arch/arm/boot/dts/stm32mp151.dtsi
@@ -1369,8 +1369,8 @@
reg = <0x58003000 0x1000>, <0x70000000 0x10000000>;
reg-names = "qspi", "qspi_mm";
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&mdma1 22 0x2 0x100002 0x0 0x0>,
- <&mdma1 22 0x2 0x100008 0x0 0x0>;
+ dmas = <&mdma1 22 0x2 0x10100002 0x0 0x0>,
+ <&mdma1 22 0x2 0x10100008 0x0 0x0>;
dma-names = "tx", "rx";
clocks = <&rcc QSPI_K>;
resets = <&rcc QSPI_R>;
@@ -1416,12 +1416,6 @@
status = "disabled";
};
- stmmac_axi_config_0: stmmac-axi-config {
- snps,wr_osr_lmt = <0x7>;
- snps,rd_osr_lmt = <0x7>;
- snps,blen = <0 0 0 0 16 8 4>;
- };
-
ethernet0: ethernet@5800a000 {
compatible = "st,stm32mp1-dwmac", "snps,dwmac-4.20a";
reg = <0x5800a000 0x2000>;
@@ -1447,6 +1441,12 @@
snps,axi-config = <&stmmac_axi_config_0>;
snps,tso;
status = "disabled";
+
+ stmmac_axi_config_0: stmmac-axi-config {
+ snps,wr_osr_lmt = <0x7>;
+ snps,rd_osr_lmt = <0x7>;
+ snps,blen = <0 0 0 0 16 8 4>;
+ };
};
usbh_ohci: usb@5800c000 {
diff --git a/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dts b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dts
index 674b2d330dc4..5670b23812a2 100644
--- a/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dts
+++ b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dts
@@ -89,7 +89,7 @@
};
&pinctrl {
- ltdc_pins: ltdc {
+ ltdc_pins: ltdc-0 {
pins {
pinmux = <STM32_PINMUX('G', 10, AF14)>, /* LTDC_B2 */
<STM32_PINMUX('H', 12, AF14)>, /* LTDC_R6 */
diff --git a/arch/arm/boot/dts/stm32mp157a-stinger96.dtsi b/arch/arm/boot/dts/stm32mp157a-stinger96.dtsi
index 113c48b2ef93..a4b14ef3caee 100644
--- a/arch/arm/boot/dts/stm32mp157a-stinger96.dtsi
+++ b/arch/arm/boot/dts/stm32mp157a-stinger96.dtsi
@@ -184,8 +184,6 @@
vdd_usb: ldo4 {
regulator-name = "vdd_usb";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
interrupts = <IT_CURLIM_LDO4 0>;
};
@@ -208,7 +206,6 @@
vref_ddr: vref_ddr {
regulator-name = "vref_ddr";
regulator-always-on;
- regulator-over-current-protection;
};
bst_out: boost {
@@ -219,13 +216,13 @@
vbus_otg: pwr_sw1 {
regulator-name = "vbus_otg";
interrupts = <IT_OCP_OTG 0>;
- regulator-active-discharge;
+ regulator-active-discharge = <1>;
};
vbus_sw: pwr_sw2 {
regulator-name = "vbus_sw";
interrupts = <IT_OCP_SWOUT 0>;
- regulator-active-discharge;
+ regulator-active-discharge = <1>;
};
};
diff --git a/arch/arm/boot/dts/stm32mp157c-odyssey-som.dtsi b/arch/arm/boot/dts/stm32mp157c-odyssey-som.dtsi
index 6cf49a0a9e69..2d9461006810 100644
--- a/arch/arm/boot/dts/stm32mp157c-odyssey-som.dtsi
+++ b/arch/arm/boot/dts/stm32mp157c-odyssey-som.dtsi
@@ -173,8 +173,6 @@
vdd_usb: ldo4 {
regulator-name = "vdd_usb";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
interrupts = <IT_CURLIM_LDO4 0>;
};
@@ -197,7 +195,6 @@
vref_ddr: vref_ddr {
regulator-name = "vref_ddr";
regulator-always-on;
- regulator-over-current-protection;
};
bst_out: boost {
@@ -213,7 +210,7 @@
vbus_sw: pwr_sw2 {
regulator-name = "vbus_sw";
interrupts = <IT_OCP_SWOUT 0>;
- regulator-active-discharge;
+ regulator-active-discharge = <1>;
};
};
@@ -269,7 +266,7 @@
st,neg-edge;
bus-width = <8>;
vmmc-supply = <&v3v3>;
- vqmmc-supply = <&v3v3>;
+ vqmmc-supply = <&vdd>;
mmc-ddr-3_3v;
status = "okay";
};
diff --git a/arch/arm/boot/dts/stm32mp157c-odyssey.dts b/arch/arm/boot/dts/stm32mp157c-odyssey.dts
index a7ffec8f1516..be1dd5e9e744 100644
--- a/arch/arm/boot/dts/stm32mp157c-odyssey.dts
+++ b/arch/arm/boot/dts/stm32mp157c-odyssey.dts
@@ -64,7 +64,7 @@
pinctrl-0 = <&sdmmc1_b4_pins_a>;
pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
- cd-gpios = <&gpiob 7 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ cd-gpios = <&gpioi 3 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
disable-wp;
st,neg-edge;
bus-width = <4>;
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
index 5523f4138fd6..6cf1c8b4c6e2 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
@@ -34,11 +34,10 @@
gpio-keys-polled {
compatible = "gpio-keys-polled";
- #size-cells = <0>;
poll-interval = <20>;
/*
- * The EXTi IRQ line 3 is shared with touchscreen and ethernet,
+ * The EXTi IRQ line 3 is shared with ethernet,
* so mark this as polled GPIO key.
*/
button-0 {
@@ -48,6 +47,16 @@
};
/*
+ * The EXTi IRQ line 6 is shared with touchscreen,
+ * so mark this as polled GPIO key.
+ */
+ button-1 {
+ label = "TA2-GPIO-B";
+ linux,code = <KEY_B>;
+ gpios = <&gpiod 6 GPIO_ACTIVE_LOW>;
+ };
+
+ /*
* The EXTi IRQ line 0 is shared with PMIC,
* so mark this as polled GPIO key.
*/
@@ -60,14 +69,6 @@
gpio-keys {
compatible = "gpio-keys";
- #size-cells = <0>;
-
- button-1 {
- label = "TA2-GPIO-B";
- linux,code = <KEY_B>;
- gpios = <&gpiod 6 GPIO_ACTIVE_LOW>;
- wakeup-source;
- };
button-3 {
label = "TA4-GPIO-D";
@@ -84,6 +85,7 @@
label = "green:led5";
gpios = <&gpioc 6 GPIO_ACTIVE_HIGH>;
default-state = "off";
+ status = "disabled";
};
led-1 {
@@ -184,12 +186,11 @@
};
- polytouch@38 {
- compatible = "edt,edt-ft5x06";
+ touchscreen@38 {
+ compatible = "edt,edt-ft5406";
reg = <0x38>;
- interrupt-parent = <&gpiog>;
- interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */
- linux,wakeup;
+ interrupt-parent = <&gpioc>;
+ interrupts = <6 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */
};
};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
index 272a1a67a9ad..8c41f819f776 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
@@ -12,6 +12,8 @@
aliases {
ethernet0 = &ethernet0;
ethernet1 = &ksz8851;
+ rtc0 = &hwrtc;
+ rtc1 = &rtc;
};
memory@c0000000 {
@@ -123,7 +125,6 @@
max-speed = <100>;
phy-handle = <&phy0>;
st,eth-ref-clk-sel;
- phy-reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>;
mdio0 {
#address-cells = <1>;
@@ -132,6 +133,14 @@
phy0: ethernet-phy@1 {
reg = <1>;
+ /* LAN8710Ai */
+ compatible = "ethernet-phy-id0007.c0f0",
+ "ethernet-phy-ieee802.3-c22";
+ clocks = <&rcc ETHCK_K>;
+ reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <500>;
+ reset-deassert-us = <500>;
+ smsc,disable-energy-detect;
interrupt-parent = <&gpioi>;
interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
};
@@ -144,7 +153,7 @@
pinctrl-1 = <&fmc_sleep_pins_b>;
status = "okay";
- ksz8851: ks8851mll@1,0 {
+ ksz8851: ethernet@1,0 {
compatible = "micrel,ks8851-mll";
reg = <1 0x0 0x2>, <1 0x2 0x20000>;
interrupt-parent = <&gpioc>;
@@ -242,7 +251,7 @@
/delete-property/dmas;
/delete-property/dma-names;
- rtc@32 {
+ hwrtc: rtc@32 {
compatible = "microcrystal,rv8803";
reg = <0x32>;
};
@@ -327,8 +336,6 @@
vdd_usb: ldo4 {
regulator-name = "vdd_usb";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
interrupts = <IT_CURLIM_LDO4 0>;
};
@@ -350,7 +357,6 @@
vref_ddr: vref_ddr {
regulator-name = "vref_ddr";
regulator-always-on;
- regulator-over-current-protection;
};
bst_out: boost {
@@ -366,7 +372,7 @@
vbus_sw: pwr_sw2 {
regulator-name = "vbus_sw";
interrupts = <IT_OCP_SWOUT 0>;
- regulator-active-discharge;
+ regulator-active-discharge = <1>;
};
};
@@ -431,7 +437,7 @@
#size-cells = <0>;
status = "okay";
- flash0: mx66l51235l@0 {
+ flash0: flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-rx-bus-width = <4>;
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
index 013ae369791d..2b0ac605549d 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
@@ -198,7 +198,7 @@
#size-cells = <0>;
status = "okay";
- flash0: spi-flash@0 {
+ flash0: flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-rx-bus-width = <4>;
diff --git a/arch/arm/boot/dts/stm32mp15xx-osd32.dtsi b/arch/arm/boot/dts/stm32mp15xx-osd32.dtsi
index 713485a95795..6706d8311a66 100644
--- a/arch/arm/boot/dts/stm32mp15xx-osd32.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-osd32.dtsi
@@ -146,8 +146,6 @@
vdd_usb: ldo4 {
regulator-name = "vdd_usb";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
interrupts = <IT_CURLIM_LDO4 0>;
};
@@ -171,7 +169,6 @@
vref_ddr: vref_ddr {
regulator-name = "vref_ddr";
regulator-always-on;
- regulator-over-current-protection;
};
bst_out: boost {
@@ -182,13 +179,13 @@
vbus_otg: pwr_sw1 {
regulator-name = "vbus_otg";
interrupts = <IT_OCP_OTG 0>;
- regulator-active-discharge;
+ regulator-active-discharge = <1>;
};
vbus_sw: pwr_sw2 {
regulator-name = "vbus_sw";
interrupts = <IT_OCP_SWOUT 0>;
- regulator-active-discharge;
+ regulator-active-discharge = <1>;
};
};
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 7344c37107c6..2beddbb3c518 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -198,7 +198,7 @@
};
link_codec: simple-audio-card,codec {
- sound-dai = <&codec>;
+ sound-dai = <&codec 0>;
};
};
@@ -238,7 +238,7 @@
};
codec: codec@1c22e00 {
- #sound-dai-cells = <0>;
+ #sound-dai-cells = <1>;
compatible = "allwinner,sun8i-a33-codec";
reg = <0x01c22e00 0x400>;
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
index 97f497854e05..d05fa679dcd3 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
@@ -85,7 +85,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-r40-feta40i.dtsi b/arch/arm/boot/dts/sun8i-r40-feta40i.dtsi
new file mode 100644
index 000000000000..265e0fa57a32
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-r40-feta40i.dtsi
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+// Copyright (C) 2021 Ivan Uvarov <i.uvarov@cognitivepilot.com>
+// Based on the sun8i-r40-bananapi-m2-ultra.dts, which is:
+// Copyright (C) 2017 Chen-Yu Tsai <wens@csie.org>
+// Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+
+#include "sun8i-r40.dtsi"
+
+&i2c0 {
+ status = "okay";
+
+ axp22x: pmic@34 {
+ compatible = "x-powers,axp221";
+ reg = <0x34>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+#include "axp22x.dtsi"
+
+&mmc2 {
+ vmmc-supply = <&reg_dcdc1>;
+ vqmmc-supply = <&reg_aldo2>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&pio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&clk_out_a_pin>;
+ vcc-pa-supply = <&reg_dcdc1>;
+ vcc-pc-supply = <&reg_aldo2>;
+ vcc-pd-supply = <&reg_dcdc1>;
+ vcc-pf-supply = <&reg_dldo4>;
+ vcc-pg-supply = <&reg_dldo1>;
+};
+
+&reg_aldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pa";
+};
+
+&reg_aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "avcc";
+};
+
+&reg_dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-3v3";
+};
+
+&reg_dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-sys";
+};
+
+&reg_dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vcc-dram";
+};
+
+&reg_dldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi-io";
+};
+
+&reg_dldo4 {
+ regulator-always-on;
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-name = "vdd2v5-sata";
+};
+
+&reg_eldo2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vdd1v2-sata";
+};
+
+&reg_eldo3 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-name = "vcc-pe";
+};
diff --git a/arch/arm/boot/dts/sun8i-r40-oka40i-c.dts b/arch/arm/boot/dts/sun8i-r40-oka40i-c.dts
new file mode 100644
index 000000000000..0bd1336206b8
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-r40-oka40i-c.dts
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+// Copyright (C) 2021 Ivan Uvarov <i.uvarov@cognitivepilot.com>
+// Based on the sun8i-r40-bananapi-m2-ultra.dts, which is:
+// Copyright (C) 2017 Chen-Yu Tsai <wens@csie.org>
+// Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+
+/dts-v1/;
+#include "sun8i-r40-feta40i.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "Forlinx OKA40i-C";
+ compatible = "forlinx,oka40i-c", "forlinx,feta40i-c", "allwinner,sun8i-r40";
+
+ aliases {
+ ethernet0 = &gmac;
+ serial0 = &uart0;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ serial4 = &uart4;
+ serial5 = &uart5; /* RS485 */
+ serial7 = &uart7;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-5 { /* this is how the leds are labeled on the board */
+ gpios = <&pio 7 26 GPIO_ACTIVE_LOW>; /* PH26 */
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_STATUS;
+ };
+
+ led-6 {
+ gpios = <&pio 8 15 GPIO_ACTIVE_LOW>; /* PI15 */
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_STATUS;
+ };
+ };
+
+ reg_vcc5v0: vcc5v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ wifi_pwrseq: wifi_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&pio 1 10 GPIO_ACTIVE_LOW>; // PB10 WIFI_EN
+ clocks = <&ccu CLK_OUTA>;
+ clock-names = "ext_clock";
+ };
+};
+
+&ahci {
+ ahci-supply = <&reg_dldo4>;
+ phy-supply = <&reg_eldo2>;
+ status = "okay";
+};
+
+&de {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ehci2 {
+ status = "okay";
+};
+
+&gmac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac_rgmii_pins>;
+ phy-handle = <&phy1>;
+ phy-mode = "rgmii-id";
+ phy-supply = <&reg_dcdc1>;
+ status = "okay";
+};
+
+&gmac_mdio {
+ phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+};
+
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&mmc0 {
+ vmmc-supply = <&reg_dcdc1>;
+ vqmmc-supply = <&reg_dcdc1>;
+ bus-width = <4>;
+ cd-gpios = <&pio 8 11 GPIO_ACTIVE_LOW>; // PI11
+ status = "okay";
+};
+
+&mmc3 {
+ vmmc-supply = <&reg_dcdc1>;
+ vqmmc-supply = <&reg_dcdc1>;
+ bus-width = <4>;
+ cd-gpios = <&pio 8 10 GPIO_ACTIVE_LOW>; // PI10
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ohci2 {
+ status = "okay";
+};
+
+&reg_dc1sw {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-lcd";
+};
+
+&reg_dldo2 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi";
+};
+
+&tcon_tv0 {
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pb_pins>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pi_pins>, <&uart2_rts_cts_pi_pins>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pg_pins>, <&uart3_rts_cts_pg_pins>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_pg_pins>;
+ status = "okay";
+};
+
+&uart5 { /* RS485 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart5_ph_pins>;
+ status = "okay";
+};
+
+&uart7 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart7_pi_pins>;
+ status = "okay";
+};
+
+&usbphy {
+ usb1_vbus-supply = <&reg_vcc5v0>;
+ usb2_vbus-supply = <&reg_vcc5v0>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi
index d5ad3b9efd12..291f4784e86c 100644
--- a/arch/arm/boot/dts/sun8i-r40.dtsi
+++ b/arch/arm/boot/dts/sun8i-r40.dtsi
@@ -357,6 +357,8 @@
clock-names = "ahb", "mmc";
resets = <&ccu RST_BUS_MMC3>;
reset-names = "ahb";
+ pinctrl-0 = <&mmc3_pins>;
+ pinctrl-names = "default";
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
#address-cells = <1>;
@@ -602,6 +604,15 @@
};
/omit-if-no-ref/
+ mmc3_pins: mmc3-pins {
+ pins = "PI4", "PI5", "PI6",
+ "PI7", "PI8", "PI9";
+ function = "mmc3";
+ drive-strength = <30>;
+ bias-pull-up;
+ };
+
+ /omit-if-no-ref/
spi0_pc_pins: spi0-pc-pins {
pins = "PC0", "PC1", "PC2";
function = "spi0";
@@ -631,20 +642,65 @@
function = "spi1";
};
+ /omit-if-no-ref/
uart0_pb_pins: uart0-pb-pins {
pins = "PB22", "PB23";
function = "uart0";
};
+ /omit-if-no-ref/
+ uart2_pi_pins: uart2-pi-pins {
+ pins = "PI18", "PI19";
+ function = "uart2";
+ };
+
+ /omit-if-no-ref/
+ uart2_rts_cts_pi_pins: uart2-rts-cts-pi-pins{
+ pins = "PI16", "PI17";
+ function = "uart2";
+ };
+
+ /omit-if-no-ref/
uart3_pg_pins: uart3-pg-pins {
pins = "PG6", "PG7";
function = "uart3";
};
+ /omit-if-no-ref/
uart3_rts_cts_pg_pins: uart3-rts-cts-pg-pins {
pins = "PG8", "PG9";
function = "uart3";
};
+
+ /omit-if-no-ref/
+ uart4_pg_pins: uart4-pg-pins {
+ pins = "PG10", "PG11";
+ function = "uart4";
+ };
+
+ /omit-if-no-ref/
+ uart5_ph_pins: uart5-ph-pins {
+ pins = "PH6", "PH7";
+ function = "uart5";
+ };
+
+ /omit-if-no-ref/
+ uart7_pi_pins: uart7-pi-pins {
+ pins = "PI20", "PI21";
+ function = "uart7";
+ };
+ };
+
+ timer@1c20c00 {
+ compatible = "allwinner,sun4i-a10-timer";
+ reg = <0x01c20c00 0x90>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&osc24M>;
};
wdt: watchdog@1c20c90 {
diff --git a/arch/arm/boot/dts/sun8i-v3.dtsi b/arch/arm/boot/dts/sun8i-v3.dtsi
index c279e13583ba..186c30cbe6ee 100644
--- a/arch/arm/boot/dts/sun8i-v3.dtsi
+++ b/arch/arm/boot/dts/sun8i-v3.dtsi
@@ -1,14 +1,40 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (C) 2019 Icenowy Zheng <icenowy@aosc.io>
+ * Copyright (C) 2021 Tobias Schramm <t.schramm@manjaro.org>
*/
#include "sun8i-v3s.dtsi"
+/ {
+ soc {
+ i2s0: i2s@1c22000 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun8i-v3-i2s",
+ "allwinner,sun8i-h3-i2s";
+ reg = <0x01c22000 0x400>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2S0>, <&ccu CLK_I2S0>;
+ clock-names = "apb", "mod";
+ dmas = <&dma 3>, <&dma 3>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_pins>;
+ resets = <&ccu RST_BUS_I2S0>;
+ status = "disabled";
+ };
+ };
+};
+
&ccu {
compatible = "allwinner,sun8i-v3-ccu";
};
+&codec_analog {
+ compatible = "allwinner,sun8i-v3-codec-analog",
+ "allwinner,sun8i-h3-codec-analog";
+};
+
&emac {
/delete-property/ phy-handle;
/delete-property/ phy-mode;
@@ -25,6 +51,11 @@
&pio {
compatible = "allwinner,sun8i-v3-pinctrl";
+ i2s0_pins: i2s0-pins {
+ pins = "PG10", "PG11", "PG12", "PG13";
+ function = "i2s";
+ };
+
uart1_pg_pins: uart1-pg-pins {
pins = "PG6", "PG7";
function = "uart1";
diff --git a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
index db5cd0b8574b..752ad05c8f83 100644
--- a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
+++ b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
@@ -49,16 +49,18 @@
compatible = "licheepi,licheepi-zero-dock", "licheepi,licheepi-zero",
"allwinner,sun8i-v3s";
+ aliases {
+ ethernet0 = &emac;
+ };
+
leds {
/* The LEDs use PG0~2 pins, which conflict with MMC1 */
status = "disabled";
};
};
-&mmc1 {
- broken-cd;
- bus-width = <4>;
- vmmc-supply = <&reg_vcc3v3>;
+&emac {
+ allwinner,leds-active-low;
status = "okay";
};
@@ -94,3 +96,10 @@
voltage = <800000>;
};
};
+
+&mmc1 {
+ broken-cd;
+ bus-width = <4>;
+ vmmc-supply = <&reg_vcc3v3>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
index eb4cb63fef13..b30bc1a25ebb 100644
--- a/arch/arm/boot/dts/sun8i-v3s.dtsi
+++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz>
+ * Copyright (C) 2021 Tobias Schramm <t.schramm@manjaro.org>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -172,6 +173,15 @@
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
};
+ dma: dma-controller@1c02000 {
+ compatible = "allwinner,sun8i-v3s-dma";
+ reg = <0x01c02000 0x1000>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_DMA>;
+ resets = <&ccu RST_BUS_DMA>;
+ #dma-cells = <1>;
+ };
+
tcon0: lcd-controller@1c0c000 {
compatible = "allwinner,sun8i-v3s-tcon";
reg = <0x01c0c000 0x1000>;
@@ -275,6 +285,8 @@
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_BUS_CE>, <&ccu CLK_CE>;
clock-names = "ahb", "mod";
+ dmas = <&dma 16>, <&dma 16>;
+ dma-names = "rx", "tx";
resets = <&ccu RST_BUS_CE>;
reset-names = "ahb";
};
@@ -422,6 +434,15 @@
clocks = <&osc24M>;
};
+ pwm: pwm@1c21400 {
+ compatible = "allwinner,sun8i-v3s-pwm",
+ "allwinner,sun7i-a20-pwm";
+ reg = <0x01c21400 0xc>;
+ clocks = <&osc24M>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
lradc: lradc@1c22800 {
compatible = "allwinner,sun4i-a10-lradc-keys";
reg = <0x01c22800 0x400>;
@@ -429,6 +450,25 @@
status = "disabled";
};
+ codec: codec@1c22c00 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun8i-v3s-codec";
+ reg = <0x01c22c00 0x400>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_CODEC>, <&ccu CLK_AC_DIG>;
+ clock-names = "apb", "codec";
+ resets = <&ccu RST_BUS_CODEC>;
+ dmas = <&dma 15>, <&dma 15>;
+ dma-names = "rx", "tx";
+ allwinner,codec-analog-controls = <&codec_analog>;
+ status = "disabled";
+ };
+
+ codec_analog: codec-analog@1c23000 {
+ compatible = "allwinner,sun8i-v3s-codec-analog";
+ reg = <0x01c23000 0x4>;
+ };
+
uart0: serial@1c28000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28000 0x400>;
@@ -436,6 +476,8 @@
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&ccu CLK_BUS_UART0>;
+ dmas = <&dma 6>, <&dma 6>;
+ dma-names = "rx", "tx";
resets = <&ccu RST_BUS_UART0>;
status = "disabled";
};
@@ -447,6 +489,8 @@
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&ccu CLK_BUS_UART1>;
+ dmas = <&dma 7>, <&dma 7>;
+ dma-names = "rx", "tx";
resets = <&ccu RST_BUS_UART1>;
status = "disabled";
};
@@ -458,6 +502,8 @@
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&ccu CLK_BUS_UART2>;
+ dmas = <&dma 8>, <&dma 8>;
+ dma-names = "rx", "tx";
resets = <&ccu RST_BUS_UART2>;
pinctrl-0 = <&uart2_pins>;
pinctrl-names = "default";
@@ -537,6 +583,8 @@
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
clock-names = "ahb", "mod";
+ dmas = <&dma 23>, <&dma 23>;
+ dma-names = "rx", "tx";
pinctrl-names = "default";
pinctrl-0 = <&spi0_pins>;
resets = <&ccu RST_BUS_SPI0>;
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index 0b678afb2a5c..8b38f123f554 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -283,7 +283,7 @@
reg = <0x0 0x60007000 0x0 0x1000>;
};
- actmon@6000c800 {
+ actmon: actmon@6000c800 {
compatible = "nvidia,tegra124-actmon";
reg = <0x0 0x6000c800 0x0 0x400>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
@@ -295,6 +295,7 @@
operating-points-v2 = <&emc_bw_dfs_opp_table>;
interconnects = <&mc TEGRA124_MC_MPCORER &emc>;
interconnect-names = "cpu-read";
+ #cooling-cells = <2>;
};
gpio: gpio@6000d000 {
diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
index 2298fc034183..1976c383912a 100644
--- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
+++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
@@ -420,11 +420,14 @@
reg = <0x1a>;
interrupt-parent = <&gpio>;
- interrupts = <TEGRA_GPIO(X, 3) IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <TEGRA_GPIO(X, 3) IRQ_TYPE_EDGE_BOTH>;
gpio-controller;
#gpio-cells = <2>;
+ micdet-cfg = <0>;
+ micdet-delay = <100>;
+
gpio-cfg = <
0x0000 /* MIC_LR_OUT# GPIO, output, low */
0x0000 /* FM2018-enable GPIO, output, low */
@@ -759,7 +762,7 @@
mmc-pwrseq = <&brcm_wifi_pwrseq>;
vmmc-supply = <&vdd_3v3_sys>;
- vqmmc-supply = <&vdd_3v3_sys>;
+ vqmmc-supply = <&vdd_1v8_sys>;
/* Azurewave AW-NH611 BCM4329 */
wifi@1 {
@@ -1030,7 +1033,7 @@
nvidia,audio-codec = <&wm8903>;
nvidia,spkr-en-gpios = <&wm8903 2 GPIO_ACTIVE_HIGH>;
- nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(W, 2) GPIO_ACTIVE_HIGH>;
+ nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(W, 2) GPIO_ACTIVE_LOW>;
nvidia,int-mic-en-gpios = <&wm8903 1 GPIO_ACTIVE_HIGH>;
nvidia,headset;
@@ -1056,15 +1059,15 @@
trips {
trip0: cpu-alert0 {
- /* start throttling at 50C */
- temperature = <50000>;
+ /* start throttling at 60C */
+ temperature = <60000>;
hysteresis = <200>;
type = "passive";
};
trip1: cpu-crit {
- /* shut down at 60C */
- temperature = <60000>;
+ /* shut down at 70C */
+ temperature = <70000>;
hysteresis = <2000>;
type = "critical";
};
@@ -1085,6 +1088,7 @@
emc-tables@0 {
nvidia,ram-code = <0>; /* elpida-8gb */
+ reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1182,6 +1186,7 @@
emc-tables@1 {
nvidia,ram-code = <1>; /* elpida-4gb */
+ reg = <1>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1279,6 +1284,7 @@
emc-tables@2 {
nvidia,ram-code = <2>; /* hynix-8gb */
+ reg = <2>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1376,6 +1382,7 @@
emc-tables@3 {
nvidia,ram-code = <3>; /* hynix-4gb */
+ reg = <3>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/tegra20-harmony.dts b/arch/arm/boot/dts/tegra20-harmony.dts
index 86494cb4d5a1..ae4312eedcbd 100644
--- a/arch/arm/boot/dts/tegra20-harmony.dts
+++ b/arch/arm/boot/dts/tegra20-harmony.dts
@@ -748,7 +748,7 @@
nvidia,spkr-en-gpios = <&wm8903 2 GPIO_ACTIVE_HIGH>;
nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(W, 2)
- GPIO_ACTIVE_HIGH>;
+ GPIO_ACTIVE_LOW>;
nvidia,int-mic-en-gpios = <&gpio TEGRA_GPIO(X, 0)
GPIO_ACTIVE_HIGH>;
nvidia,ext-mic-en-gpios = <&gpio TEGRA_GPIO(X, 1)
diff --git a/arch/arm/boot/dts/tegra20-medcom-wide.dts b/arch/arm/boot/dts/tegra20-medcom-wide.dts
index a348ca30e522..b31c9bca16e6 100644
--- a/arch/arm/boot/dts/tegra20-medcom-wide.dts
+++ b/arch/arm/boot/dts/tegra20-medcom-wide.dts
@@ -84,7 +84,7 @@
nvidia,audio-codec = <&wm8903>;
nvidia,spkr-en-gpios = <&wm8903 2 GPIO_ACTIVE_HIGH>;
- nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(W, 2) GPIO_ACTIVE_HIGH>;
+ nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(W, 2) GPIO_ACTIVE_LOW>;
clocks = <&tegra_car TEGRA20_CLK_PLL_A>,
<&tegra_car TEGRA20_CLK_PLL_A_OUT0>,
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index 940a9f31cd86..3180bff90756 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -2,6 +2,8 @@
/dts-v1/;
#include <dt-bindings/input/input.h>
+#include <dt-bindings/thermal/thermal.h>
+
#include "tegra20.dtsi"
#include "tegra20-cpu-opp.dtsi"
#include "tegra20-cpu-opp-microvolt.dtsi"
@@ -318,6 +320,7 @@
nvidia,ram-code = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
+ reg = <0>;
emc-table@166500 {
reg = <166500>;
@@ -497,9 +500,10 @@
};
};
- adt7461@4c {
+ adt7461: temperature-sensor@4c {
compatible = "adi,adt7461";
reg = <0x4c>;
+ #thermal-sensor-cells = <1>;
};
};
@@ -654,11 +658,46 @@
cpu0: cpu@0 {
cpu-supply = <&cpu_vdd_reg>;
operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
};
- cpu@1 {
+ cpu1: cpu@1 {
cpu-supply = <&cpu_vdd_reg>;
operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
+ };
+ };
+
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <500>; /* milliseconds */
+ polling-delay = <1500>; /* milliseconds */
+
+ thermal-sensors = <&adt7461 1>;
+
+ trips {
+ trip0: cpu-alert0 {
+ /* start throttling at 80C */
+ temperature = <80000>;
+ hysteresis = <200>;
+ type = "passive";
+ };
+
+ trip1: cpu-crit {
+ /* shut down at 85C */
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&trip0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/tegra20-plutux.dts b/arch/arm/boot/dts/tegra20-plutux.dts
index 378f23b2958b..5811b7006a9b 100644
--- a/arch/arm/boot/dts/tegra20-plutux.dts
+++ b/arch/arm/boot/dts/tegra20-plutux.dts
@@ -52,7 +52,7 @@
nvidia,audio-codec = <&wm8903>;
nvidia,spkr-en-gpios = <&wm8903 2 GPIO_ACTIVE_HIGH>;
- nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(W, 2) GPIO_ACTIVE_HIGH>;
+ nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(W, 2) GPIO_ACTIVE_LOW>;
clocks = <&tegra_car TEGRA20_CLK_PLL_A>,
<&tegra_car TEGRA20_CLK_PLL_A_OUT0>,
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index c24d4a37613e..92d494b8c3d2 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -911,7 +911,7 @@
nvidia,audio-codec = <&wm8903>;
nvidia,spkr-en-gpios = <&wm8903 2 GPIO_ACTIVE_HIGH>;
- nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(X, 1) GPIO_ACTIVE_HIGH>;
+ nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(X, 1) GPIO_ACTIVE_LOW>;
clocks = <&tegra_car TEGRA20_CLK_PLL_A>,
<&tegra_car TEGRA20_CLK_PLL_A_OUT0>,
diff --git a/arch/arm/boot/dts/tegra20-tec.dts b/arch/arm/boot/dts/tegra20-tec.dts
index 44ced60315de..10ff09d86efa 100644
--- a/arch/arm/boot/dts/tegra20-tec.dts
+++ b/arch/arm/boot/dts/tegra20-tec.dts
@@ -61,7 +61,7 @@
nvidia,spkr-en-gpios = <&wm8903 2 GPIO_ACTIVE_HIGH>;
nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(W, 2)
- GPIO_ACTIVE_HIGH>;
+ GPIO_ACTIVE_LOW>;
clocks = <&tegra_car TEGRA20_CLK_PLL_A>,
<&tegra_car TEGRA20_CLK_PLL_A_OUT0>,
diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts
index 99a356c1ccec..5a2578b3707f 100644
--- a/arch/arm/boot/dts/tegra20-ventana.dts
+++ b/arch/arm/boot/dts/tegra20-ventana.dts
@@ -709,7 +709,7 @@
nvidia,audio-codec = <&wm8903>;
nvidia,spkr-en-gpios = <&wm8903 2 GPIO_ACTIVE_HIGH>;
- nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(W, 2) GPIO_ACTIVE_HIGH>;
+ nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(W, 2) GPIO_ACTIVE_LOW>;
nvidia,int-mic-en-gpios = <&gpio TEGRA_GPIO(X, 0)
GPIO_ACTIVE_HIGH>;
nvidia,ext-mic-en-gpios = <&gpio TEGRA_GPIO(X, 1)
diff --git a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi
index dc773b1bf8ee..ae8300baa2d4 100644
--- a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi
+++ b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi
@@ -927,7 +927,6 @@
compatible = "ti,bq27541";
reg = <0x55>;
power-supplies = <&power_supply>;
- monitored-battery = <&battery_cell>;
};
power_supply: charger@6a {
@@ -1252,13 +1251,6 @@
};
thermal-zones {
- skin-thermal {
- polling-delay-passive = <1000>; /* milliseconds */
- polling-delay = <0>; /* milliseconds */
-
- thermal-sensors = <&nct72 0>;
- };
-
cpu-thermal {
polling-delay-passive = <1000>; /* milliseconds */
polling-delay = <5000>; /* milliseconds */
@@ -1274,8 +1266,8 @@
};
trip1: cpu-crit {
- /* shut down at 60C */
- temperature = <60000>;
+ /* shut down at 65C */
+ temperature = <65000>;
hysteresis = <2000>;
type = "critical";
};
@@ -1287,7 +1279,9 @@
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&actmon THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
};
};
};
diff --git a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-maxim-pmic.dtsi b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-maxim-pmic.dtsi
index 17b6682ffce8..53966fa4eef2 100644
--- a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-maxim-pmic.dtsi
+++ b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-maxim-pmic.dtsi
@@ -182,4 +182,13 @@
enable-active-high;
vin-supply = <&vdd_3v3_sys>;
};
+
+ pmc@7000e400 {
+ i2c-thermtrip {
+ nvidia,i2c-controller-id = <4>;
+ nvidia,bus-addr = <0x3c>;
+ nvidia,reg-addr = <0x41>;
+ nvidia,reg-data = <0xe0>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi
index b97da45ebdb4..9365ae607239 100644
--- a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi
+++ b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi
@@ -144,7 +144,16 @@
};
vdd_3v3_sys: regulator@1 {
- gpio = <&pmic 7 GPIO_ACTIVE_HIGH>;
+ gpio = <&pmic 6 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
+
+ pmc@7000e400 {
+ i2c-thermtrip {
+ nvidia,i2c-controller-id = <4>;
+ nvidia,bus-addr = <0x2d>;
+ nvidia,reg-addr = <0x3f>;
+ nvidia,reg-data = <0x80>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index 2dff14b87f3e..d9dd11569d4b 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -630,7 +630,7 @@
nvidia,spkr-en-gpios = <&wm8903 2 GPIO_ACTIVE_HIGH>;
nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(W, 2)
- GPIO_ACTIVE_HIGH>;
+ GPIO_ACTIVE_LOW>;
clocks = <&tegra_car TEGRA30_CLK_PLL_A>,
<&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
diff --git a/arch/arm/boot/dts/tegra30-ouya.dts b/arch/arm/boot/dts/tegra30-ouya.dts
index 9a10e0d69762..ab8744f3d72d 100644
--- a/arch/arm/boot/dts/tegra30-ouya.dts
+++ b/arch/arm/boot/dts/tegra30-ouya.dts
@@ -463,7 +463,9 @@
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&actmon THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
};
};
};
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 44a6dbba7081..c577c191be4b 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -410,7 +410,7 @@
reg = <0x6000c000 0x150>; /* AHB Arbitration + Gizmo Controller */
};
- actmon@6000c800 {
+ actmon: actmon@6000c800 {
compatible = "nvidia,tegra30-actmon";
reg = <0x6000c800 0x400>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
@@ -422,6 +422,7 @@
operating-points-v2 = <&emc_bw_dfs_opp_table>;
interconnects = <&mc TEGRA30_MC_MPCORER &emc>;
interconnect-names = "cpu-read";
+ #cooling-cells = <2>;
};
gpio: gpio@6000d000 {
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
index 37bd41ff8dff..151c0220047d 100644
--- a/arch/arm/boot/dts/versatile-ab.dts
+++ b/arch/arm/boot/dts/versatile-ab.dts
@@ -195,16 +195,15 @@
#size-cells = <1>;
ranges;
- vic: intc@10140000 {
+ vic: interrupt-controller@10140000 {
compatible = "arm,versatile-vic";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0x10140000 0x1000>;
- clear-mask = <0xffffffff>;
valid-mask = <0xffffffff>;
};
- sic: intc@10003000 {
+ sic: interrupt-controller@10003000 {
compatible = "arm,versatile-sic";
interrupt-controller;
#interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 06a0fdf24026..e7e751a858d8 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -7,7 +7,7 @@
amba {
/* The Versatile PB is using more SIC IRQ lines than the AB */
- sic: intc@10003000 {
+ sic: interrupt-controller@10003000 {
clear-mask = <0xffffffff>;
/*
* Valid interrupt lines mask according to
diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
index 4f7220b11f2d..2ad9fd7c94ec 100644
--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
@@ -144,7 +144,7 @@
compatible = "nxp,usb-isp1761";
reg = <2 0x03000000 0x20000>;
interrupts = <16>;
- port1-otg;
+ dr_mode = "peripheral";
};
iofpga-bus@300000000 {
diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi
index 2ac41ed3a57c..ec13ceb9ed36 100644
--- a/arch/arm/boot/dts/vexpress-v2m.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m.dtsi
@@ -62,7 +62,7 @@
compatible = "nxp,usb-isp1761";
reg = <3 0x03000000 0x20000>;
interrupts = <16>;
- port1-otg;
+ dr_mode = "peripheral";
};
iofpga@7,00000000 {
diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig
index 58d293b63581..a3ee647b1ebb 100644
--- a/arch/arm/configs/aspeed_g4_defconfig
+++ b/arch/arm/configs/aspeed_g4_defconfig
@@ -254,6 +254,7 @@ CONFIG_DEBUG_INFO_REDUCED=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_GDB_SCRIPTS=y
CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
CONFIG_SCHED_STACK_END_CHECK=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_PANIC_TIMEOUT=-1
diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig
index 047975eccefb..2db48438c5d2 100644
--- a/arch/arm/configs/aspeed_g5_defconfig
+++ b/arch/arm/configs/aspeed_g5_defconfig
@@ -247,6 +247,7 @@ CONFIG_FSI_MASTER_ASPEED=y
CONFIG_FSI_SCOM=y
CONFIG_FSI_SBEFIFO=y
CONFIG_FSI_OCC=y
+CONFIG_EXT4_FS=y
CONFIG_FANOTIFY=y
CONFIG_OVERLAY_FS=y
CONFIG_TMPFS=y
@@ -276,6 +277,7 @@ CONFIG_DEBUG_INFO_REDUCED=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_GDB_SCRIPTS=y
CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
CONFIG_SOFTLOCKUP_DETECTOR=y
# CONFIG_DETECT_HUNG_TASK is not set
CONFIG_WQ_WATCHDOG=y
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index 06c888a45eb3..b1564e0aa000 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -1,6 +1,8 @@
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 513f56b3c059..f4e1873912a3 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -236,6 +236,7 @@ CONFIG_DRM_SII9234=y
CONFIG_DRM_TOSHIBA_TC358764=y
CONFIG_DRM_LIMA=y
CONFIG_DRM_PANFROST=y
+CONFIG_FB=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=y
CONFIG_BACKLIGHT_PWM=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 70928cc48939..079fcd8d1d11 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -150,6 +150,8 @@ CONFIG_USB_USBNET=y
CONFIG_USB_NET_CDC_EEM=m
CONFIG_USB_NET_SMSC95XX=y
CONFIG_USB_NET_MCS7830=y
+CONFIG_ATH10K=m
+CONFIG_ATH10K_SDIO=m
CONFIG_BRCMFMAC=m
CONFIG_MWIFIEX=m
CONFIG_MWIFIEX_SDIO=m
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index b06e537d5149..4dfe321a79f6 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -57,10 +57,7 @@ CONFIG_DRM=y
CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_MATROX=y
-CONFIG_FB_MATROX_MILLENIUM=y
-CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_LOGO=y
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
index 0d6edeb27659..9a7667383df9 100644
--- a/arch/arm/configs/ixp4xx_defconfig
+++ b/arch/arm/configs/ixp4xx_defconfig
@@ -109,6 +109,8 @@ CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_IXP4XX=y
CONFIG_MTD_RAW_NAND=m
@@ -152,6 +154,7 @@ CONFIG_INPUT_IXP4XX_BEEPER=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_SENSORS_W83781D=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 52a0400fdd92..d9abaae118dd 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -821,7 +821,7 @@ CONFIG_USB_ISP1760=y
CONFIG_USB_HSIC_USB3503=y
CONFIG_AB8500_USB=y
CONFIG_KEYSTONE_USB_PHY=m
-CONFIG_NOP_USB_XCEIV=m
+CONFIG_NOP_USB_XCEIV=y
CONFIG_AM335X_PHY_USB=m
CONFIG_TWL6030_USB=m
CONFIG_USB_GPIO_VBUS=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 3f35761dc9ff..23595fc5a29a 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -15,8 +15,6 @@ CONFIG_SLAB=y
CONFIG_ARCH_NOMADIK=y
CONFIG_MACH_NOMADIK_8815NHK=y
CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -52,9 +50,9 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_ONENAND=y
CONFIG_MTD_ONENAND_VERIFY_WRITE=y
CONFIG_MTD_ONENAND_GENERIC=y
-CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_FSMC=y
+CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
@@ -97,6 +95,7 @@ CONFIG_REGULATOR=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_TPO_TPG110=y
CONFIG_DRM_PL111=y
+CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -136,9 +135,8 @@ CONFIG_NLS_ISO8859_15=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_DES=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 77b4bea12feb..2ac2418084ab 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -276,6 +276,7 @@ CONFIG_BT_MRVL_SDIO=m
CONFIG_AF_RXRPC=m
CONFIG_RXKAD=y
CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
CONFIG_PCI=y
CONFIG_PCI_MSI=y
@@ -374,6 +375,8 @@ CONFIG_LIBERTAS_DEBUG=y
CONFIG_MWIFIEX=m
CONFIG_MWIFIEX_SDIO=m
CONFIG_MWIFIEX_USB=m
+CONFIG_WL1251=m
+CONFIG_WL1251_SPI=m
CONFIG_WL12XX=m
CONFIG_WL18XX=m
CONFIG_WLCORE_SPI=m
@@ -617,6 +620,8 @@ CONFIG_LEDS_CLASS=m
CONFIG_LEDS_CPCAP=m
CONFIG_LEDS_LM3532=m
CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_LP55XX_COMMON=m
+CONFIG_LEDS_LP5523=m
CONFIG_LEDS_PCA963X=m
CONFIG_LEDS_PWM=m
CONFIG_LEDS_TRIGGERS=y
diff --git a/arch/arm/configs/realview_defconfig b/arch/arm/configs/realview_defconfig
index 483c400dd391..4c01e313099f 100644
--- a/arch/arm/configs/realview_defconfig
+++ b/arch/arm/configs/realview_defconfig
@@ -64,11 +64,9 @@ CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
-CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_DRIVERS is not set
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index f25b09efb816..d9a27e4e0914 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -135,6 +135,7 @@ CONFIG_DRM_SII902X=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_I2C_ADV7511=y
CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_FB=y
CONFIG_FB_SH_MOBILE_LCDC=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_BACKLIGHT_AS3711=y
@@ -202,7 +203,6 @@ CONFIG_AK8975=y
CONFIG_PWM=y
CONFIG_PWM_RCAR=y
CONFIG_PWM_RENESAS_TPU=y
-CONFIG_RESET_CONTROLLER=y
CONFIG_PHY_RCAR_GEN2=y
CONFIG_PHY_RCAR_GEN3_USB2=y
# CONFIG_DNOTIFY is not set
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 13ef3e4dcbb7..3d8d8af9524d 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -167,6 +167,7 @@ CONFIG_SENSORS_LM95245=y
CONFIG_THERMAL=y
CONFIG_THERMAL_STATISTICS=y
CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
CONFIG_TEGRA_SOCTHERM=m
CONFIG_WATCHDOG=y
CONFIG_MAX77620_WATCHDOG=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index dbb1ef601762..3b30913d7d8d 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -61,6 +61,10 @@ CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_TOUCHSCREEN_BU21013=y
CONFIG_TOUCHSCREEN_CY8CTMA140=y
+CONFIG_TOUCHSCREEN_CYTTSP_CORE=y
+CONFIG_TOUCHSCREEN_CYTTSP_SPI=y
+CONFIG_TOUCHSCREEN_MMS114=y
+CONFIG_TOUCHSCREEN_ZINITIX=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_AB8500_PONKEY=y
CONFIG_INPUT_GPIO_VIBRA=y
@@ -100,6 +104,7 @@ CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI=y
CONFIG_DRM_PANEL_SONY_ACX424AKP=y
CONFIG_DRM_LIMA=y
CONFIG_DRM_MCDE=y
+CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_KTD253=y
CONFIG_BACKLIGHT_GPIO=y
diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig
index e7ecfb365e91..b703f4757021 100644
--- a/arch/arm/configs/versatile_defconfig
+++ b/arch/arm/configs/versatile_defconfig
@@ -60,7 +60,7 @@ CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
-CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_LOGO=y
CONFIG_SOUND=y
@@ -88,8 +88,6 @@ CONFIG_NFSD=y
CONFIG_NFSD_V3=y
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
-CONFIG_FONTS=y
-CONFIG_FONT_ACORN_8x8=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index 4479369540f2..b5e246dd23f4 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -11,9 +11,6 @@ CONFIG_CPUSETS=y
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_PROFILING=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_VEXPRESS_DCSCB=y
CONFIG_ARCH_VEXPRESS_TC2_PM=y
@@ -23,14 +20,17 @@ CONFIG_MCPM=y
CONFIG_VMSPLIT_2G=y
CONFIG_NR_CPUS=8
CONFIG_ARM_PSCI=y
-CONFIG_CMA=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="console=ttyAMA0"
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CMA=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -43,7 +43,6 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_DEVTMPFS=y
-CONFIG_DMA_CMA=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
@@ -59,7 +58,6 @@ CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_VIRTIO=y
CONFIG_ATA=y
-# CONFIG_SATA_PMP is not set
CONFIG_NETDEVICES=y
CONFIG_VIRTIO_NET=y
CONFIG_SMC91X=y
@@ -81,11 +79,9 @@ CONFIG_DRM=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_SII902X=y
CONFIG_DRM_PL111=y
-CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_DRIVERS is not set
@@ -136,10 +132,11 @@ CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_DMA_CMA=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_DEBUG_USER=y
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 48ec1d0337da..a4dbac07e4ef 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -15,6 +15,9 @@ extern void __gnu_mcount_nc(void);
#ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace {
+#ifdef CONFIG_ARM_MODULE_PLTS
+ struct module *mod;
+#endif
};
static inline unsigned long ftrace_call_adjust(unsigned long addr)
diff --git a/arch/arm/include/asm/insn.h b/arch/arm/include/asm/insn.h
index f20e08ac85ae..5475cbf9fb6b 100644
--- a/arch/arm/include/asm/insn.h
+++ b/arch/arm/include/asm/insn.h
@@ -13,18 +13,18 @@ arm_gen_nop(void)
}
unsigned long
-__arm_gen_branch(unsigned long pc, unsigned long addr, bool link);
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn);
static inline unsigned long
arm_gen_branch(unsigned long pc, unsigned long addr)
{
- return __arm_gen_branch(pc, addr, false);
+ return __arm_gen_branch(pc, addr, false, true);
}
static inline unsigned long
-arm_gen_branch_link(unsigned long pc, unsigned long addr)
+arm_gen_branch_link(unsigned long pc, unsigned long addr, bool warn)
{
- return __arm_gen_branch(pc, addr, true);
+ return __arm_gen_branch(pc, addr, true, warn);
}
#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index a711322d9f40..cfc9dfd70aad 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -20,8 +20,14 @@
#endif
#include <asm/kasan_def.h>
-/* PAGE_OFFSET - the virtual address of the start of the kernel image */
+/*
+ * PAGE_OFFSET: the virtual address of the start of lowmem, memory above
+ * the virtual address range for userspace.
+ * KERNEL_OFFSET: the virtual address of the start of the kernel image.
+ * we may further offset this with TEXT_OFFSET in practice.
+ */
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+#define KERNEL_OFFSET (PAGE_OFFSET)
#ifdef CONFIG_MMU
@@ -153,6 +159,13 @@ extern unsigned long vectors_base;
#ifndef __ASSEMBLY__
/*
+ * Physical start and end address of the kernel sections. These addresses are
+ * 2MB-aligned to match the section mappings placed over the kernel.
+ */
+extern u32 kernel_sec_start;
+extern u32 kernel_sec_end;
+
+/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 4b0df09cbe67..cfffae67c04e 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -19,8 +19,18 @@ enum {
};
#endif
+#define PLT_ENT_STRIDE L1_CACHE_BYTES
+#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
+#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
+
+struct plt_entries {
+ u32 ldr[PLT_ENT_COUNT];
+ u32 lit[PLT_ENT_COUNT];
+};
+
struct mod_plt_sec {
struct elf32_shdr *plt;
+ struct plt_entries *plt_ent;
int plt_count;
};
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index fdee1f04f4f3..a17f01235c29 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -143,7 +143,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
__pmd_populate(pmdp, page_to_phys(ptep), prot);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index d4edab51a77c..eabe72ff7381 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -130,7 +130,7 @@
flush_pmd_entry(pudp); \
} while (0)
-static inline pmd_t *pud_page_vaddr(pud_t pud)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
}
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index d63a5bb6bd0c..cd1f84bb40ae 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -306,7 +306,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
+#define __swp_entry_to_pte(swp) __pte((swp).val | PTE_TYPE_FAULT)
/*
* It is an error for the kernel to have more swap files than we can
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 66f6a3ae68d2..98b37340376b 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -13,7 +13,6 @@
extern void cpu_init(void);
void soft_restart(unsigned long);
-extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
extern void (*arm_pm_idle)(void);
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
deleted file mode 100644
index ab905ffcf193..000000000000
--- a/arch/arm/include/asm/unaligned.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __ASM_ARM_UNALIGNED_H
-#define __ASM_ARM_UNALIGNED_H
-
-/*
- * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
- * but we don't want to use linux/unaligned/access_ok.h since that can lead
- * to traps on unaligned stm/ldm or strd/ldrd.
- */
-#include <asm/byteorder.h>
-
-#if defined(__LITTLE_ENDIAN)
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
-
-#endif /* __ASM_ARM_UNALIGNED_H */
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 9a79ef6b1876..3c83b5d29697 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -68,9 +68,10 @@ int ftrace_arch_code_modify_post_process(void)
return 0;
}
-static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
+static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr,
+ bool warn)
{
- return arm_gen_branch_link(pc, addr);
+ return arm_gen_branch_link(pc, addr, warn);
}
static int ftrace_modify_code(unsigned long pc, unsigned long old,
@@ -104,14 +105,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
int ret;
pc = (unsigned long)&ftrace_call;
- new = ftrace_call_replace(pc, (unsigned long)func);
+ new = ftrace_call_replace(pc, (unsigned long)func, true);
ret = ftrace_modify_code(pc, 0, new, false);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
if (!ret) {
pc = (unsigned long)&ftrace_regs_call;
- new = ftrace_call_replace(pc, (unsigned long)func);
+ new = ftrace_call_replace(pc, (unsigned long)func, true);
ret = ftrace_modify_code(pc, 0, new, false);
}
@@ -124,10 +125,22 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long new, old;
unsigned long ip = rec->ip;
+ unsigned long aaddr = adjust_address(rec, addr);
+ struct module *mod = NULL;
+
+#ifdef CONFIG_ARM_MODULE_PLTS
+ mod = rec->arch.mod;
+#endif
old = ftrace_nop_replace(rec);
- new = ftrace_call_replace(ip, adjust_address(rec, addr));
+ new = ftrace_call_replace(ip, aaddr, !mod);
+#ifdef CONFIG_ARM_MODULE_PLTS
+ if (!new && mod) {
+ aaddr = get_module_plt(mod, ip, aaddr);
+ new = ftrace_call_replace(ip, aaddr, true);
+ }
+#endif
return ftrace_modify_code(rec->ip, old, new, true);
}
@@ -140,9 +153,9 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long new, old;
unsigned long ip = rec->ip;
- old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
+ old = ftrace_call_replace(ip, adjust_address(rec, old_addr), true);
- new = ftrace_call_replace(ip, adjust_address(rec, addr));
+ new = ftrace_call_replace(ip, adjust_address(rec, addr), true);
return ftrace_modify_code(rec->ip, old, new, true);
}
@@ -152,12 +165,29 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
+ unsigned long aaddr = adjust_address(rec, addr);
unsigned long ip = rec->ip;
unsigned long old;
unsigned long new;
int ret;
- old = ftrace_call_replace(ip, adjust_address(rec, addr));
+#ifdef CONFIG_ARM_MODULE_PLTS
+ /* mod is only supplied during module loading */
+ if (!mod)
+ mod = rec->arch.mod;
+ else
+ rec->arch.mod = mod;
+#endif
+
+ old = ftrace_call_replace(ip, aaddr,
+ !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod);
+#ifdef CONFIG_ARM_MODULE_PLTS
+ if (!old && mod) {
+ aaddr = get_module_plt(mod, ip, aaddr);
+ old = ftrace_call_replace(ip, aaddr, true);
+ }
+#endif
+
new = ftrace_nop_replace(rec);
ret = ftrace_modify_code(ip, old, new, true);
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 7f62c5eccdf3..9eb0b4dbcc12 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -23,7 +23,6 @@
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
#endif
-
/*
* swapper_pg_dir is the virtual address of the initial page table.
* We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
@@ -31,7 +30,7 @@
* the least significant 16 bits to be 0x8000, but we could probably
* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
*/
-#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
+#define KERNEL_RAM_VADDR (KERNEL_OFFSET + TEXT_OFFSET)
#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif
@@ -48,6 +47,20 @@
.globl swapper_pg_dir
.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
+ /*
+ * This needs to be assigned at runtime when the linker symbols are
+ * resolved.
+ */
+ .pushsection .data
+ .align 2
+ .globl kernel_sec_start
+ .globl kernel_sec_end
+kernel_sec_start:
+ .long 0
+kernel_sec_end:
+ .long 0
+ .popsection
+
.macro pgtbl, rd, phys
add \rd, \phys, #TEXT_OFFSET
sub \rd, \rd, #PG_DIR_SIZE
@@ -230,16 +243,23 @@ __create_page_tables:
blo 1b
/*
- * Map our RAM from the start to the end of the kernel .bss section.
+ * The main matter: map in the kernel using section mappings, and
+ * set two variables to indicate the physical start and end of the
+ * kernel.
*/
- add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
ldr r6, =(_end - 1)
- orr r3, r8, r7
+ adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
+ str r8, [r5] @ Save physical start of kernel
+ orr r3, r8, r7 @ Add the MMU flags
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: str r3, [r0], #1 << PMD_ORDER
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
bls 1b
+ eor r3, r3, r7 @ Remove the MMU flags
+ adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
+ str r3, [r5] @ Save physical end of kernel
#ifdef CONFIG_XIP_KERNEL
/*
diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c
index 2e844b70386b..db0acbb7d7a0 100644
--- a/arch/arm/kernel/insn.c
+++ b/arch/arm/kernel/insn.c
@@ -3,8 +3,9 @@
#include <linux/kernel.h>
#include <asm/opcodes.h>
-static unsigned long
-__arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
+static unsigned long __arm_gen_branch_thumb2(unsigned long pc,
+ unsigned long addr, bool link,
+ bool warn)
{
unsigned long s, j1, j2, i1, i2, imm10, imm11;
unsigned long first, second;
@@ -12,7 +13,7 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
offset = (long)addr - (long)(pc + 4);
if (offset < -16777216 || offset > 16777214) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(warn);
return 0;
}
@@ -33,8 +34,8 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
return __opcode_thumb32_compose(first, second);
}
-static unsigned long
-__arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
+static unsigned long __arm_gen_branch_arm(unsigned long pc, unsigned long addr,
+ bool link, bool warn)
{
unsigned long opcode = 0xea000000;
long offset;
@@ -44,7 +45,7 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
offset = (long)addr - (long)(pc + 8);
if (unlikely(offset < -33554432 || offset > 33554428)) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(warn);
return 0;
}
@@ -54,10 +55,10 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
}
unsigned long
-__arm_gen_branch(unsigned long pc, unsigned long addr, bool link)
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn)
{
if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
- return __arm_gen_branch_thumb2(pc, addr, link);
+ return __arm_gen_branch_thumb2(pc, addr, link, warn);
else
- return __arm_gen_branch_arm(pc, addr, link);
+ return __arm_gen_branch_arm(pc, addr, link, warn);
}
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 6e626abaefc5..1fc309b41f94 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -4,6 +4,7 @@
*/
#include <linux/elf.h>
+#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sort.h>
@@ -12,10 +13,6 @@
#include <asm/cache.h>
#include <asm/opcodes.h>
-#define PLT_ENT_STRIDE L1_CACHE_BYTES
-#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
-#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
-
#ifdef CONFIG_THUMB2_KERNEL
#define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \
(PLT_ENT_STRIDE - 4))
@@ -24,9 +21,11 @@
(PLT_ENT_STRIDE - 8))
#endif
-struct plt_entries {
- u32 ldr[PLT_ENT_COUNT];
- u32 lit[PLT_ENT_COUNT];
+static const u32 fixed_plts[] = {
+#ifdef CONFIG_DYNAMIC_FTRACE
+ FTRACE_ADDR,
+ MCOUNT_ADDR,
+#endif
};
static bool in_init(const struct module *mod, unsigned long loc)
@@ -34,14 +33,40 @@ static bool in_init(const struct module *mod, unsigned long loc)
return loc - (u32)mod->init_layout.base < mod->init_layout.size;
}
+static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)
+{
+ int i;
+
+ if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count)
+ return;
+ pltsec->plt_count = ARRAY_SIZE(fixed_plts);
+
+ for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i)
+ plt->ldr[i] = PLT_ENT_LDR;
+
+ BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit));
+ memcpy(plt->lit, fixed_plts, sizeof(fixed_plts));
+}
+
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
{
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init;
+ struct plt_entries *plt;
+ int idx;
+
+ /* cache the address, ELF header is available only during module load */
+ if (!pltsec->plt_ent)
+ pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr;
+ plt = pltsec->plt_ent;
- struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
- int idx = 0;
+ prealloc_fixed(pltsec, plt);
+
+ for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx)
+ if (plt->lit[idx] == val)
+ return (u32)&plt->ldr[idx];
+ idx = 0;
/*
* Look for an existing entry pointing to 'val'. Given that the
* relocations are sorted, this will be the last entry we allocated.
@@ -189,8 +214,8 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
- unsigned long core_plts = 0;
- unsigned long init_plts = 0;
+ unsigned long core_plts = ARRAY_SIZE(fixed_plts);
+ unsigned long init_plts = ARRAY_SIZE(fixed_plts);
Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
Elf32_Sym *syms = NULL;
@@ -245,6 +270,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
sizeof(struct plt_entries));
mod->arch.core.plt_count = 0;
+ mod->arch.core.plt_ent = NULL;
mod->arch.init.plt->sh_type = SHT_NOBITS;
mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
@@ -252,6 +278,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
sizeof(struct plt_entries));
mod->arch.init.plt_count = 0;
+ mod->arch.init.plt_ent = NULL;
pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 0ce388f15422..3044fcb8d073 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -18,7 +18,6 @@ typedef void (*phys_reset_t)(unsigned long, bool);
/*
* Function pointers to optional machine specific functions
*/
-void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
@@ -138,10 +137,7 @@ void machine_restart(char *cmd)
local_irq_disable();
smp_send_stop();
- if (arm_pm_restart)
- arm_pm_restart(reboot_mode, cmd);
- else
- do_kernel_restart(cmd);
+ do_kernel_restart(cmd);
/* Give a grace period for failure to restart of 1s */
mdelay(1000);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 73ca7797b92f..f97eb2371672 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1083,6 +1083,20 @@ void __init hyp_mode_check(void)
#endif
}
+static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+
+static int arm_restart(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ __arm_pm_restart(action, data);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block arm_restart_nb = {
+ .notifier_call = arm_restart,
+ .priority = 128,
+};
+
void __init setup_arch(char **cmdline_p)
{
const struct machine_desc *mdesc = NULL;
@@ -1116,10 +1130,7 @@ void __init setup_arch(char **cmdline_p)
if (mdesc->reboot_mode != REBOOT_HARD)
reboot_mode = mdesc->reboot_mode;
- init_mm.start_code = (unsigned long) _text;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) _end;
+ setup_initial_init_mm(_text, _etext, _edata, _end);
/* populate cmd_line too for later use, preserving boot_command_line */
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
@@ -1151,8 +1162,10 @@ void __init setup_arch(char **cmdline_p)
kasan_init();
request_standard_resources(mdesc);
- if (mdesc->restart)
- arm_pm_restart = mdesc->restart;
+ if (mdesc->restart) {
+ __arm_pm_restart = mdesc->restart;
+ register_restart_handler(&arm_restart_nb);
+ }
unflatten_device_tree();
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index f7f4620d59c3..20c4f6d20c7a 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -47,7 +47,7 @@ SECTIONS
#endif
}
- . = PAGE_OFFSET + TEXT_OFFSET;
+ . = KERNEL_OFFSET + TEXT_OFFSET;
.head.text : {
_text = .;
HEAD_TEXT
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index de11030748d0..1d3aef84287d 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -9,7 +9,6 @@ menuconfig ARCH_DAVINCI
select PM_GENERIC_DOMAINS_OF if PM && OF
select REGMAP_MMIO
select RESET_CONTROLLER
- select HAVE_IDE
select PINCTRL_SINGLE
if ARCH_DAVINCI
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 25b01da4771b..8b48326be9fd 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -55,6 +55,7 @@ void __init exynos_sysram_init(void)
sysram_base_addr = of_iomap(node, 0);
sysram_base_phys = of_translate_address(node,
of_get_address(node, 0, NULL, NULL));
+ of_node_put(node);
break;
}
@@ -62,6 +63,7 @@ void __init exynos_sysram_init(void)
if (!of_device_is_available(node))
continue;
sysram_ns_base_addr = of_iomap(node, 0);
+ of_node_put(node);
break;
}
}
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 9cebd360d58e..d1506ef7a537 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -35,7 +35,7 @@ obj-$(CONFIG_HAVE_IMX_ANATOP) += anatop.o
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
obj-$(CONFIG_HAVE_IMX_SRC) += src.o
-ifneq ($(CONFIG_SOC_IMX6)$(CONFIG_SOC_LS1021A),)
+ifneq ($(CONFIG_SOC_IMX6)$(CONFIG_SOC_IMX7D_CA7)$(CONFIG_SOC_LS1021A),)
AFLAGS_headsmp.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SMP) += headsmp.o platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 474dedb73bc7..13f3068e9845 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -73,12 +73,14 @@ static inline void imx_scu_map_io(void) {}
static inline void imx_smp_prepare(void) {}
#endif
void imx_src_init(void);
+void imx7_src_init(void);
void imx_gpc_pre_suspend(bool arm_power_off);
void imx_gpc_post_resume(void);
void imx_gpc_mask_all(void);
void imx_gpc_restore_all(void);
void imx_gpc_hwirq_mask(unsigned int hwirq);
void imx_gpc_hwirq_unmask(unsigned int hwirq);
+void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn);
void imx_anatop_init(void);
void imx_anatop_pre_suspend(void);
void imx_anatop_post_resume(void);
@@ -131,6 +133,7 @@ static inline void imx_init_l2cache(void) {}
#endif
extern const struct smp_operations imx_smp_ops;
+extern const struct smp_operations imx7_smp_ops;
extern const struct smp_operations ls1021a_smp_ops;
#endif
diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S
index 766dbdb2ae27..fcba58be8e79 100644
--- a/arch/arm/mach-imx/headsmp.S
+++ b/arch/arm/mach-imx/headsmp.S
@@ -21,6 +21,15 @@ diag_reg_offset:
ENTRY(v7_secondary_startup)
ARM_BE8(setend be) @ go BE8 if entered LE
+ mrc p15, 0, r0, c0, c0, 0
+ lsl r0, r0, #16
+ lsr r0, r0, #20
+ /* 0xc07 is cortex A7's ID */
+ mov r1, #0xc00
+ orr r1, #0x7
+ cmp r0, r1
+ beq secondary_startup
+
set_diag_reg
b secondary_startup
ENDPROC(v7_secondary_startup)
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 82e22398d43d..e24a46dc5703 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -11,6 +11,7 @@
#include <asm/proc-fns.h>
#include "common.h"
+#include "hardware.h"
/*
* platform-specific code to shutdown a CPU
@@ -40,5 +41,7 @@ int imx_cpu_kill(unsigned int cpu)
return 0;
imx_enable_cpu(cpu, false);
imx_set_cpu_arg(cpu, 0);
+ if (cpu_is_imx7d())
+ imx_gpcv2_set_core1_pdn_pup_by_software(true);
return 1;
}
diff --git a/arch/arm/mach-imx/mach-imx50.c b/arch/arm/mach-imx/mach-imx50.c
index f4da205f57db..a2d35f9ba474 100644
--- a/arch/arm/mach-imx/mach-imx50.c
+++ b/arch/arm/mach-imx/mach-imx50.c
@@ -9,6 +9,12 @@
#include <asm/mach/arch.h>
#include "common.h"
+#include "hardware.h"
+
+static void __init imx50_init_early(void)
+{
+ mxc_set_cpu_type(MXC_CPU_MX50);
+}
static const char * const imx50_dt_board_compat[] __initconst = {
"fsl,imx50",
@@ -16,5 +22,6 @@ static const char * const imx50_dt_board_compat[] __initconst = {
};
DT_MACHINE_START(IMX50_DT, "Freescale i.MX50 (Device Tree Support)")
+ .init_early = imx50_init_early,
.dt_compat = imx50_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 703998ebb52e..11dcc369ec14 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -40,27 +40,6 @@ static int ksz9021rn_phy_fixup(struct phy_device *phydev)
return 0;
}
-static void mmd_write_reg(struct phy_device *dev, int device, int reg, int val)
-{
- phy_write(dev, 0x0d, device);
- phy_write(dev, 0x0e, reg);
- phy_write(dev, 0x0d, (1 << 14) | device);
- phy_write(dev, 0x0e, val);
-}
-
-static int ksz9031rn_phy_fixup(struct phy_device *dev)
-{
- /*
- * min rx data delay, max rx/tx clock delay,
- * min rx/tx control delay
- */
- mmd_write_reg(dev, 2, 4, 0);
- mmd_write_reg(dev, 2, 5, 0);
- mmd_write_reg(dev, 2, 8, 0x003ff);
-
- return 0;
-}
-
/*
* fixup for PLX PEX8909 bridge to configure GPIO1-7 as output High
* as they are used for slots1-7 PERST#
@@ -89,75 +68,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8609, ventana_pciesw_early_fixup);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8606, ventana_pciesw_early_fixup);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8604, ventana_pciesw_early_fixup);
-static int ar8031_phy_fixup(struct phy_device *dev)
-{
- u16 val;
-
- /* To enable AR8031 output a 125MHz clk from CLK_25M */
- phy_write(dev, 0xd, 0x7);
- phy_write(dev, 0xe, 0x8016);
- phy_write(dev, 0xd, 0x4007);
-
- val = phy_read(dev, 0xe);
- val &= 0xffe3;
- val |= 0x18;
- phy_write(dev, 0xe, val);
-
- /* introduce tx clock delay */
- phy_write(dev, 0x1d, 0x5);
- val = phy_read(dev, 0x1e);
- val |= 0x0100;
- phy_write(dev, 0x1e, val);
-
- return 0;
-}
-
-#define PHY_ID_AR8031 0x004dd074
-
-static int ar8035_phy_fixup(struct phy_device *dev)
-{
- u16 val;
-
- /* Ar803x phy SmartEEE feature cause link status generates glitch,
- * which cause ethernet link down/up issue, so disable SmartEEE
- */
- phy_write(dev, 0xd, 0x3);
- phy_write(dev, 0xe, 0x805d);
- phy_write(dev, 0xd, 0x4003);
-
- val = phy_read(dev, 0xe);
- phy_write(dev, 0xe, val & ~(1 << 8));
-
- /*
- * Enable 125MHz clock from CLK_25M on the AR8031. This
- * is fed in to the IMX6 on the ENET_REF_CLK (V22) pad.
- * Also, introduce a tx clock delay.
- *
- * This is the same as is the AR8031 fixup.
- */
- ar8031_phy_fixup(dev);
-
- /*check phy power*/
- val = phy_read(dev, 0x0);
- if (val & BMCR_PDOWN)
- phy_write(dev, 0x0, val & ~BMCR_PDOWN);
-
- return 0;
-}
-
-#define PHY_ID_AR8035 0x004dd072
-
static void __init imx6q_enet_phy_init(void)
{
if (IS_BUILTIN(CONFIG_PHYLIB)) {
phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
ksz9021rn_phy_fixup);
- phy_register_fixup_for_uid(PHY_ID_KSZ9031, MICREL_PHY_ID_MASK,
- ksz9031rn_phy_fixup);
- phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef,
- ar8031_phy_fixup);
- phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef,
- ar8035_phy_fixup);
}
}
@@ -257,9 +172,6 @@ static void __init imx6q_init_machine(void)
imx_get_soc_revision());
imx6q_enet_phy_init();
-
- of_platform_default_populate(NULL, NULL, NULL);
-
imx_anatop_init();
cpu_is_imx6q() ? imx6q_pm_init() : imx6dl_pm_init();
imx6q_1588_init();
diff --git a/arch/arm/mach-imx/mach-imx6sx.c b/arch/arm/mach-imx/mach-imx6sx.c
index 781e2a94fdd7..e65ed5218f53 100644
--- a/arch/arm/mach-imx/mach-imx6sx.c
+++ b/arch/arm/mach-imx/mach-imx6sx.c
@@ -15,31 +15,6 @@
#include "common.h"
#include "cpuidle.h"
-static int ar8031_phy_fixup(struct phy_device *dev)
-{
- u16 val;
-
- /* Set RGMII IO voltage to 1.8V */
- phy_write(dev, 0x1d, 0x1f);
- phy_write(dev, 0x1e, 0x8);
-
- /* introduce tx clock delay */
- phy_write(dev, 0x1d, 0x5);
- val = phy_read(dev, 0x1e);
- val |= 0x0100;
- phy_write(dev, 0x1e, val);
-
- return 0;
-}
-
-#define PHY_ID_AR8031 0x004dd074
-static void __init imx6sx_enet_phy_init(void)
-{
- if (IS_BUILTIN(CONFIG_PHYLIB))
- phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff,
- ar8031_phy_fixup);
-}
-
static void __init imx6sx_enet_clk_sel(void)
{
struct regmap *gpr;
@@ -57,7 +32,6 @@ static void __init imx6sx_enet_clk_sel(void)
static inline void imx6sx_enet_init(void)
{
- imx6sx_enet_phy_init();
imx6sx_enet_clk_sel();
}
diff --git a/arch/arm/mach-imx/mach-imx7d.c b/arch/arm/mach-imx/mach-imx7d.c
index 879c35929a13..6fdd06bcf988 100644
--- a/arch/arm/mach-imx/mach-imx7d.c
+++ b/arch/arm/mach-imx/mach-imx7d.c
@@ -14,25 +14,6 @@
#include "common.h"
-static int ar8031_phy_fixup(struct phy_device *dev)
-{
- u16 val;
-
- /* Set RGMII IO voltage to 1.8V */
- phy_write(dev, 0x1d, 0x1f);
- phy_write(dev, 0x1e, 0x8);
-
- /* disable phy AR8031 SmartEEE function. */
- phy_write(dev, 0xd, 0x3);
- phy_write(dev, 0xe, 0x805d);
- phy_write(dev, 0xd, 0x4003);
- val = phy_read(dev, 0xe);
- val &= ~(0x1 << 8);
- phy_write(dev, 0xe, val);
-
- return 0;
-}
-
static int bcm54220_phy_fixup(struct phy_device *dev)
{
/* enable RXC skew select RGMII copper mode */
@@ -44,14 +25,11 @@ static int bcm54220_phy_fixup(struct phy_device *dev)
return 0;
}
-#define PHY_ID_AR8031 0x004dd074
#define PHY_ID_BCM54220 0x600d8589
static void __init imx7d_enet_phy_init(void)
{
if (IS_BUILTIN(CONFIG_PHYLIB)) {
- phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff,
- ar8031_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_BCM54220, 0xffffffff,
bcm54220_phy_fixup);
}
@@ -91,7 +69,7 @@ static void __init imx7d_init_late(void)
static void __init imx7d_init_irq(void)
{
imx_init_revision_from_anatop();
- imx_src_init();
+ imx7_src_init();
irqchip_init();
}
@@ -102,6 +80,7 @@ static const char *const imx7d_dt_compat[] __initconst = {
};
DT_MACHINE_START(IMX7D, "Freescale i.MX7 Dual (Device Tree)")
+ .smp = smp_ops(imx7_smp_ops),
.init_irq = imx7d_init_irq,
.init_machine = imx7d_init_machine,
.init_late = imx7d_init_late,
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index 0dfd0ae7a63d..af12668d0bf5 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -103,6 +103,7 @@ struct mmdc_pmu {
struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
struct hlist_node node;
struct fsl_mmdc_devtype_data *devtype_data;
+ struct clk *mmdc_ipg_clk;
};
/*
@@ -462,11 +463,14 @@ static int imx_mmdc_remove(struct platform_device *pdev)
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
perf_pmu_unregister(&pmu_mmdc->pmu);
+ iounmap(pmu_mmdc->mmdc_base);
+ clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk);
kfree(pmu_mmdc);
return 0;
}
-static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base)
+static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base,
+ struct clk *mmdc_ipg_clk)
{
struct mmdc_pmu *pmu_mmdc;
char *name;
@@ -494,6 +498,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
}
mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
+ pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
if (mmdc_num == 0)
name = "mmdc";
else
@@ -529,7 +534,7 @@ pmu_free:
#else
#define imx_mmdc_remove NULL
-#define imx_mmdc_perf_init(pdev, mmdc_base) 0
+#define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0
#endif
static int imx_mmdc_probe(struct platform_device *pdev)
@@ -567,7 +572,13 @@ static int imx_mmdc_probe(struct platform_device *pdev)
val &= ~(1 << BP_MMDC_MAPSR_PSD);
writel_relaxed(val, reg);
- return imx_mmdc_perf_init(pdev, mmdc_base);
+ err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk);
+ if (err) {
+ iounmap(mmdc_base);
+ clk_disable_unprepare(mmdc_ipg_clk);
+ }
+
+ return err;
}
int imx_mmdc_get_ddr_type(void)
diff --git a/arch/arm/mach-imx/platsmp.c b/arch/arm/mach-imx/platsmp.c
index cf4e9335831c..972639038be5 100644
--- a/arch/arm/mach-imx/platsmp.c
+++ b/arch/arm/mach-imx/platsmp.c
@@ -92,6 +92,32 @@ const struct smp_operations imx_smp_ops __initconst = {
#endif
};
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+static void __init imx7_smp_init_cpus(void)
+{
+ struct device_node *np;
+ int i, ncores = 0;
+
+ /* The iMX7D SCU does not report core count, get it from DT */
+ for_each_of_cpu_node(np)
+ ncores++;
+
+ for (i = ncores; i < NR_CPUS; i++)
+ set_cpu_possible(i, false);
+}
+
+const struct smp_operations imx7_smp_ops __initconst = {
+ .smp_init_cpus = imx7_smp_init_cpus,
+ .smp_boot_secondary = imx_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_die = imx_cpu_die,
+ .cpu_kill = imx_cpu_kill,
+#endif
+};
+
#define DCFG_CCSR_SCRATCHRW1 0x200
static int ls1021a_boot_secondary(unsigned int cpu, struct task_struct *idle)
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index f52f371292ac..95fd1fbb0826 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -6,15 +6,19 @@
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
#include <linux/smp.h>
#include <asm/smp_plat.h>
#include "common.h"
+#include "hardware.h"
#define SRC_SCR 0x000
-#define SRC_GPR1 0x020
+#define SRC_GPR1_V1 0x020
+#define SRC_GPR1_V2 0x074
+#define SRC_GPR1(gpr_v2) ((gpr_v2) ? SRC_GPR1_V2 : SRC_GPR1_V1)
#define BP_SRC_SCR_WARM_RESET_ENABLE 0
#define BP_SRC_SCR_SW_GPU_RST 1
#define BP_SRC_SCR_SW_VPU_RST 2
@@ -23,9 +27,18 @@
#define BP_SRC_SCR_SW_IPU2_RST 12
#define BP_SRC_SCR_CORE1_RST 14
#define BP_SRC_SCR_CORE1_ENABLE 22
+/* below is for i.MX7D */
+#define SRC_A7RCR1 0x008
+#define BP_SRC_A7RCR1_A7_CORE1_ENABLE 1
+#define GPC_CPU_PGC_SW_PUP_REQ 0xf0
+#define GPC_CPU_PGC_SW_PDN_REQ 0xfc
+#define GPC_PGC_C1 0x840
+#define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 0x2
static void __iomem *src_base;
static DEFINE_SPINLOCK(scr_lock);
+static bool gpr_v2;
+static void __iomem *gpc_base;
static const int sw_reset_bits[5] = {
BP_SRC_SCR_SW_GPU_RST,
@@ -73,17 +86,64 @@ static struct reset_controller_dev imx_reset_controller = {
.nr_resets = ARRAY_SIZE(sw_reset_bits),
};
+static void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset)
+{
+ writel_relaxed(enable, gpc_base + offset);
+}
+
+/*
+ * The motivation for bringing up the second i.MX7D core inside the kernel
+ * is that legacy vendor bootloaders usually do not implement PSCI support.
+ * This is a significant blocker for systems in the field that are running old
+ * bootloader versions to upgrade to a modern mainline kernel version, as only
+ * one CPU of the i.MX7D would be brought up.
+ * Bring up the second i.MX7D core inside the kernel to make the migration
+ * path to mainline kernel easier for the existing iMX7D users.
+ */
+void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn)
+{
+ u32 reg = pdn ? GPC_CPU_PGC_SW_PDN_REQ : GPC_CPU_PGC_SW_PUP_REQ;
+ u32 val, pup;
+ int ret;
+
+ imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C1);
+ val = readl_relaxed(gpc_base + reg);
+ val |= BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7;
+ writel_relaxed(val, gpc_base + reg);
+
+ ret = readl_relaxed_poll_timeout_atomic(gpc_base + reg, pup,
+ !(pup & BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7),
+ 5, 1000000);
+ if (ret < 0) {
+ pr_err("i.MX7D: CORE1_A7 power up timeout\n");
+ val &= ~BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7;
+ writel_relaxed(val, gpc_base + reg);
+ }
+
+ imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C1);
+}
+
void imx_enable_cpu(int cpu, bool enable)
{
u32 mask, val;
cpu = cpu_logical_map(cpu);
- mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
spin_lock(&scr_lock);
- val = readl_relaxed(src_base + SRC_SCR);
- val = enable ? val | mask : val & ~mask;
- val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1);
- writel_relaxed(val, src_base + SRC_SCR);
+ if (gpr_v2) {
+ if (enable)
+ imx_gpcv2_set_core1_pdn_pup_by_software(false);
+
+ mask = 1 << (BP_SRC_A7RCR1_A7_CORE1_ENABLE + cpu - 1);
+ val = readl_relaxed(src_base + SRC_A7RCR1);
+ val = enable ? val | mask : val & ~mask;
+ writel_relaxed(val, src_base + SRC_A7RCR1);
+ } else {
+ mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
+ val = readl_relaxed(src_base + SRC_SCR);
+ val = enable ? val | mask : val & ~mask;
+ val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1);
+ writel_relaxed(val, src_base + SRC_SCR);
+ }
spin_unlock(&scr_lock);
}
@@ -91,19 +151,19 @@ void imx_set_cpu_jump(int cpu, void *jump_addr)
{
cpu = cpu_logical_map(cpu);
writel_relaxed(__pa_symbol(jump_addr),
- src_base + SRC_GPR1 + cpu * 8);
+ src_base + SRC_GPR1(gpr_v2) + cpu * 8);
}
u32 imx_get_cpu_arg(int cpu)
{
cpu = cpu_logical_map(cpu);
- return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
+ return readl_relaxed(src_base + SRC_GPR1(gpr_v2) + cpu * 8 + 4);
}
void imx_set_cpu_arg(int cpu, u32 arg)
{
cpu = cpu_logical_map(cpu);
- writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
+ writel_relaxed(arg, src_base + SRC_GPR1(gpr_v2) + cpu * 8 + 4);
}
void __init imx_src_init(void)
@@ -131,3 +191,26 @@ void __init imx_src_init(void)
writel_relaxed(val, src_base + SRC_SCR);
spin_unlock(&scr_lock);
}
+
+void __init imx7_src_init(void)
+{
+ struct device_node *np;
+
+ gpr_v2 = true;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-src");
+ if (!np)
+ return;
+
+ src_base = of_iomap(np, 0);
+ if (!src_base)
+ return;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-gpc");
+ if (!np)
+ return;
+
+ gpc_base = of_iomap(np, 0);
+ if (!gpc_base)
+ return;
+}
diff --git a/arch/arm/mach-imx/suspend-imx53.S b/arch/arm/mach-imx/suspend-imx53.S
index 41b8aad65363..46570ec2fbcf 100644
--- a/arch/arm/mach-imx/suspend-imx53.S
+++ b/arch/arm/mach-imx/suspend-imx53.S
@@ -28,11 +28,11 @@
* ^
* ^
* imx53_suspend code
- * PM_INFO structure(imx53_suspend_info)
+ * PM_INFO structure(imx5_cpu_suspend_info)
* ======================== low address =======================
*/
-/* Offsets of members of struct imx53_suspend_info */
+/* Offsets of members of struct imx5_cpu_suspend_info */
#define SUSPEND_INFO_MX53_M4IF_V_OFFSET 0x0
#define SUSPEND_INFO_MX53_IOMUXC_V_OFFSET 0x4
#define SUSPEND_INFO_MX53_IO_COUNT_OFFSET 0x8
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 165c184801e1..34a1c7742088 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -7,7 +7,7 @@ comment "IXP4xx Platforms"
config MACH_IXP4XX_OF
bool
- prompt "Devce Tree IXP4xx boards"
+ prompt "Device Tree IXP4xx boards"
default y
select ARM_APPENDED_DTB # Old Redboot bootloaders deployed
select I2C
@@ -20,7 +20,7 @@ config MACH_IXP4XX_OF
config MACH_NSLU2
bool
prompt "Linksys NSLU2"
- select FORCE_PCI
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support Linksys's
NSLU2 NAS device. For more information on this platform,
@@ -28,7 +28,7 @@ config MACH_NSLU2
config MACH_AVILA
bool "Avila"
- select FORCE_PCI
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support the Gateworks
Avila Network Platform. For more information on this platform,
@@ -44,7 +44,7 @@ config MACH_LOFT
config ARCH_ADI_COYOTE
bool "Coyote"
- select FORCE_PCI
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support the ADI
Engineering Coyote Gateway Reference Platform. For more
@@ -52,7 +52,7 @@ config ARCH_ADI_COYOTE
config MACH_GATEWAY7001
bool "Gateway 7001"
- select FORCE_PCI
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support Gateway's
7001 Access Point. For more information on this platform,
@@ -60,7 +60,7 @@ config MACH_GATEWAY7001
config MACH_WG302V2
bool "Netgear WG302 v2 / WAG302 v2"
- select FORCE_PCI
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support Netgear's
WG302 v2 or WAG302 v2 Access Points. For more information
@@ -68,6 +68,7 @@ config MACH_WG302V2
config ARCH_IXDP425
bool "IXDP425"
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support Intel's
IXDP425 Development Platform (Also known as Richfield).
@@ -75,6 +76,7 @@ config ARCH_IXDP425
config MACH_IXDPG425
bool "IXDPG425"
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support Intel's
IXDPG425 Development Platform (Also known as Montajade).
@@ -89,6 +91,7 @@ config MACH_IXDP465
config MACH_GORAMO_MLR
bool "GORAMO Multi Link Router"
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support GORAMO
MultiLink router.
@@ -120,7 +123,7 @@ config ARCH_PRPMC1100
config MACH_NAS100D
bool
prompt "NAS100D"
- select FORCE_PCI
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support Iomega's
NAS 100d device. For more information on this platform,
@@ -129,7 +132,7 @@ config MACH_NAS100D
config MACH_DSMG600
bool
prompt "D-Link DSM-G600 RevA"
- select FORCE_PCI
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support D-Link's
DSM-G600 RevA device. For more information on this platform,
@@ -143,7 +146,7 @@ config ARCH_IXDP4XX
config MACH_FSG
bool
prompt "Freecom FSG-3"
- select FORCE_PCI
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support Freecom's
FSG-3 device. For more information on this platform,
@@ -152,7 +155,7 @@ config MACH_FSG
config MACH_ARCOM_VULCAN
bool
prompt "Arcom/Eurotech Vulcan"
- select FORCE_PCI
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support Arcom's
Vulcan board.
@@ -173,7 +176,7 @@ config CPU_IXP43X
config MACH_GTWX5715
bool "Gemtek WX5715 (Linksys WRV54G)"
depends on ARCH_IXP4XX
- select FORCE_PCI
+ depends on IXP4XX_PCI_LEGACY
help
This board is currently inside the Linksys WRV54G Gateways.
@@ -196,7 +199,7 @@ config MACH_DEVIXP
config MACH_MICCPT
bool "Omicron MICCPT"
- select FORCE_PCI
+ depends on IXP4XX_PCI_LEGACY
help
Say 'Y' here if you want your kernel to support the MICCPT
board from OMICRON electronics GmbH.
@@ -209,9 +212,16 @@ config MACH_MIC256
comment "IXP4xx Options"
+config IXP4XX_PCI_LEGACY
+ bool "IXP4xx legacy PCI driver support"
+ depends on PCI
+ help
+ Selects legacy PCI driver.
+ Not recommended for new development.
+
config IXP4XX_INDIRECT_PCI
bool "Use indirect PCI memory access"
- depends on PCI
+ depends on IXP4XX_PCI_LEGACY
help
IXP4xx provides two methods of accessing PCI memory space:
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c
index 1981b33109cb..ec1d3029f80c 100644
--- a/arch/arm/mach-ixp4xx/avila-setup.c
+++ b/arch/arm/mach-ixp4xx/avila-setup.c
@@ -19,6 +19,7 @@
#include <linux/tty.h>
#include <linux/serial_8250.h>
#include <linux/gpio/machine.h>
+#include <linux/platform_data/pata_ixp4xx_cf.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 007a44412e24..b5eadd70d903 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -27,12 +27,12 @@
#include <linux/cpu.h>
#include <linux/pci.h>
#include <linux/sched_clock.h>
+#include <linux/soc/ixp4xx/cpu.h>
#include <linux/irqchip/irq-ixp4xx.h>
#include <linux/platform_data/timer-ixp4xx.h>
#include <linux/dma-map-ops.h>
#include <mach/udc.h>
#include <mach/hardware.h>
-#include <mach/io.h>
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/exception.h>
@@ -44,6 +44,27 @@
#include "irqs.h"
+u32 ixp4xx_read_feature_bits(void)
+{
+ u32 val = ~__raw_readl(IXP4XX_EXP_CFG2);
+
+ if (cpu_is_ixp42x_rev_a0())
+ return IXP42X_FEATURE_MASK & ~(IXP4XX_FEATURE_RCOMP |
+ IXP4XX_FEATURE_AES);
+ if (cpu_is_ixp42x())
+ return val & IXP42X_FEATURE_MASK;
+ if (cpu_is_ixp43x())
+ return val & IXP43X_FEATURE_MASK;
+ return val & IXP46X_FEATURE_MASK;
+}
+EXPORT_SYMBOL(ixp4xx_read_feature_bits);
+
+void ixp4xx_write_feature_bits(u32 value)
+{
+ __raw_writel(~value, IXP4XX_EXP_CFG2);
+}
+EXPORT_SYMBOL(ixp4xx_write_feature_bits);
+
#define IXP4XX_TIMER_FREQ 66666000
/*************************************************************************
@@ -215,6 +236,27 @@ static struct resource ixp46x_i2c_resources[] = {
}
};
+/* A single 32-bit register on IXP46x */
+#define IXP4XX_HWRANDOM_BASE_PHYS 0x70002100
+
+static struct resource ixp46x_hwrandom_resource[] = {
+ {
+ .start = IXP4XX_HWRANDOM_BASE_PHYS,
+ .end = IXP4XX_HWRANDOM_BASE_PHYS + 0x3,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device ixp46x_hwrandom_device = {
+ .name = "ixp4xx-hwrandom",
+ .id = -1,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .resource = ixp46x_hwrandom_resource,
+ .num_resources = ARRAY_SIZE(ixp46x_hwrandom_resource),
+};
+
/*
* I2C controller. The IXP46x uses the same block as the IOP3xx, so
* we just use the same device name.
@@ -227,7 +269,8 @@ static struct platform_device ixp46x_i2c_controller = {
};
static struct platform_device *ixp46x_devices[] __initdata = {
- &ixp46x_i2c_controller
+ &ixp46x_hwrandom_device,
+ &ixp46x_i2c_controller,
};
unsigned long ixp4xx_exp_bus_size;
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c
index 507ee3878769..844329c5610d 100644
--- a/arch/arm/mach-ixp4xx/fsg-setup.c
+++ b/arch/arm/mach-ixp4xx/fsg-setup.c
@@ -28,6 +28,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#include <mach/hardware.h>
#include "irqs.h"
diff --git a/arch/arm/mach-ixp4xx/include/mach/cpu.h b/arch/arm/mach-ixp4xx/include/mach/cpu.h
deleted file mode 100644
index b872a5354ddd..000000000000
--- a/arch/arm/mach-ixp4xx/include/mach/cpu.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-ixp4xx/include/mach/cpu.h
- *
- * IXP4XX cpu type detection
- *
- * Copyright (C) 2007 MontaVista Software, Inc.
- */
-
-#ifndef __ASM_ARCH_CPU_H__
-#define __ASM_ARCH_CPU_H__
-
-#include <linux/io.h>
-#include <asm/cputype.h>
-
-/* Processor id value in CP15 Register 0 */
-#define IXP42X_PROCESSOR_ID_VALUE 0x690541c0 /* including unused 0x690541Ex */
-#define IXP42X_PROCESSOR_ID_MASK 0xffffffc0
-
-#define IXP43X_PROCESSOR_ID_VALUE 0x69054040
-#define IXP43X_PROCESSOR_ID_MASK 0xfffffff0
-
-#define IXP46X_PROCESSOR_ID_VALUE 0x69054200 /* including IXP455 */
-#define IXP46X_PROCESSOR_ID_MASK 0xfffffff0
-
-#define cpu_is_ixp42x_rev_a0() ((read_cpuid_id() & (IXP42X_PROCESSOR_ID_MASK | 0xF)) == \
- IXP42X_PROCESSOR_ID_VALUE)
-#define cpu_is_ixp42x() ((read_cpuid_id() & IXP42X_PROCESSOR_ID_MASK) == \
- IXP42X_PROCESSOR_ID_VALUE)
-#define cpu_is_ixp43x() ((read_cpuid_id() & IXP43X_PROCESSOR_ID_MASK) == \
- IXP43X_PROCESSOR_ID_VALUE)
-#define cpu_is_ixp46x() ((read_cpuid_id() & IXP46X_PROCESSOR_ID_MASK) == \
- IXP46X_PROCESSOR_ID_VALUE)
-
-static inline u32 ixp4xx_read_feature_bits(void)
-{
- u32 val = ~__raw_readl(IXP4XX_EXP_CFG2);
-
- if (cpu_is_ixp42x_rev_a0())
- return IXP42X_FEATURE_MASK & ~(IXP4XX_FEATURE_RCOMP |
- IXP4XX_FEATURE_AES);
- if (cpu_is_ixp42x())
- return val & IXP42X_FEATURE_MASK;
- if (cpu_is_ixp43x())
- return val & IXP43X_FEATURE_MASK;
- return val & IXP46X_FEATURE_MASK;
-}
-
-static inline void ixp4xx_write_feature_bits(u32 value)
-{
- __raw_writel(~value, IXP4XX_EXP_CFG2);
-}
-
-#endif /* _ASM_ARCH_CPU_H */
diff --git a/arch/arm/mach-ixp4xx/include/mach/hardware.h b/arch/arm/mach-ixp4xx/include/mach/hardware.h
index b884eedcd0fc..b2b7301ce503 100644
--- a/arch/arm/mach-ixp4xx/include/mach/hardware.h
+++ b/arch/arm/mach-ixp4xx/include/mach/hardware.h
@@ -23,7 +23,7 @@
#include "ixp4xx-regs.h"
#ifndef __ASSEMBLER__
-#include <mach/cpu.h>
+#include <linux/soc/ixp4xx/cpu.h>
#endif
/* Platform helper functions and definitions */
diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
index 708d085ce39f..74e63d4531aa 100644
--- a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
+++ b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
@@ -45,21 +45,21 @@
* it can be used with the low-level debug code.
*/
#define IXP4XX_PERIPHERAL_BASE_PHYS 0xC8000000
-#define IXP4XX_PERIPHERAL_BASE_VIRT IOMEM(0xFEF00000)
+#define IXP4XX_PERIPHERAL_BASE_VIRT IOMEM(0xFEC00000)
#define IXP4XX_PERIPHERAL_REGION_SIZE 0x00013000
/*
* PCI Config registers
*/
#define IXP4XX_PCI_CFG_BASE_PHYS 0xC0000000
-#define IXP4XX_PCI_CFG_BASE_VIRT IOMEM(0xFEF13000)
+#define IXP4XX_PCI_CFG_BASE_VIRT IOMEM(0xFEC13000)
#define IXP4XX_PCI_CFG_REGION_SIZE 0x00001000
/*
* Expansion BUS Configuration registers
*/
#define IXP4XX_EXP_CFG_BASE_PHYS 0xC4000000
-#define IXP4XX_EXP_CFG_BASE_VIRT 0xFEF14000
+#define IXP4XX_EXP_CFG_BASE_VIRT 0xFEC14000
#define IXP4XX_EXP_CFG_REGION_SIZE 0x00001000
#define IXP4XX_EXP_CS0_OFFSET 0x00
@@ -120,6 +120,7 @@
#define IXP4XX_SSP_BASE_PHYS (IXP4XX_PERIPHERAL_BASE_PHYS + 0x12000)
+/* The UART is explicitly put in the beginning of fixmap */
#define IXP4XX_UART1_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0x0000)
#define IXP4XX_UART2_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0x1000)
#define IXP4XX_PMU_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0x2000)
@@ -217,30 +218,30 @@
/*
* PCI Control/Status Registers
*/
-#define IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x)))
-
-#define PCI_NP_AD IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET)
-#define PCI_NP_CBE IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET)
-#define PCI_NP_WDATA IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET)
-#define PCI_NP_RDATA IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET)
-#define PCI_CRP_AD_CBE IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET)
-#define PCI_CRP_WDATA IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET)
-#define PCI_CRP_RDATA IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET)
-#define PCI_CSR IXP4XX_PCI_CSR(PCI_CSR_OFFSET)
-#define PCI_ISR IXP4XX_PCI_CSR(PCI_ISR_OFFSET)
-#define PCI_INTEN IXP4XX_PCI_CSR(PCI_INTEN_OFFSET)
-#define PCI_DMACTRL IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET)
-#define PCI_AHBMEMBASE IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET)
-#define PCI_AHBIOBASE IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET)
-#define PCI_PCIMEMBASE IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET)
-#define PCI_AHBDOORBELL IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET)
-#define PCI_PCIDOORBELL IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET)
-#define PCI_ATPDMA0_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET)
-#define PCI_ATPDMA0_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET)
-#define PCI_ATPDMA0_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET)
-#define PCI_ATPDMA1_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
-#define PCI_ATPDMA1_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
-#define PCI_ATPDMA1_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
+#define _IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x)))
+
+#define PCI_NP_AD _IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET)
+#define PCI_NP_CBE _IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET)
+#define PCI_NP_WDATA _IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET)
+#define PCI_NP_RDATA _IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET)
+#define PCI_CRP_AD_CBE _IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET)
+#define PCI_CRP_WDATA _IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET)
+#define PCI_CRP_RDATA _IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET)
+#define PCI_CSR _IXP4XX_PCI_CSR(PCI_CSR_OFFSET)
+#define PCI_ISR _IXP4XX_PCI_CSR(PCI_ISR_OFFSET)
+#define PCI_INTEN _IXP4XX_PCI_CSR(PCI_INTEN_OFFSET)
+#define PCI_DMACTRL _IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET)
+#define PCI_AHBMEMBASE _IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET)
+#define PCI_AHBIOBASE _IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET)
+#define PCI_PCIMEMBASE _IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET)
+#define PCI_AHBDOORBELL _IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET)
+#define PCI_PCIDOORBELL _IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET)
+#define PCI_ATPDMA0_AHBADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET)
+#define PCI_ATPDMA0_PCIADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET)
+#define PCI_ATPDMA0_LENADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET)
+#define PCI_ATPDMA1_AHBADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
+#define PCI_ATPDMA1_PCIADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
+#define PCI_ATPDMA1_LENADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
/*
* PCI register values and bit definitions
@@ -299,58 +300,4 @@
#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
-/* "fuse" bits of IXP_EXP_CFG2 */
-/* All IXP4xx CPUs */
-#define IXP4XX_FEATURE_RCOMP (1 << 0)
-#define IXP4XX_FEATURE_USB_DEVICE (1 << 1)
-#define IXP4XX_FEATURE_HASH (1 << 2)
-#define IXP4XX_FEATURE_AES (1 << 3)
-#define IXP4XX_FEATURE_DES (1 << 4)
-#define IXP4XX_FEATURE_HDLC (1 << 5)
-#define IXP4XX_FEATURE_AAL (1 << 6)
-#define IXP4XX_FEATURE_HSS (1 << 7)
-#define IXP4XX_FEATURE_UTOPIA (1 << 8)
-#define IXP4XX_FEATURE_NPEB_ETH0 (1 << 9)
-#define IXP4XX_FEATURE_NPEC_ETH (1 << 10)
-#define IXP4XX_FEATURE_RESET_NPEA (1 << 11)
-#define IXP4XX_FEATURE_RESET_NPEB (1 << 12)
-#define IXP4XX_FEATURE_RESET_NPEC (1 << 13)
-#define IXP4XX_FEATURE_PCI (1 << 14)
-#define IXP4XX_FEATURE_UTOPIA_PHY_LIMIT (3 << 16)
-#define IXP4XX_FEATURE_XSCALE_MAX_FREQ (3 << 22)
-#define IXP42X_FEATURE_MASK (IXP4XX_FEATURE_RCOMP | \
- IXP4XX_FEATURE_USB_DEVICE | \
- IXP4XX_FEATURE_HASH | \
- IXP4XX_FEATURE_AES | \
- IXP4XX_FEATURE_DES | \
- IXP4XX_FEATURE_HDLC | \
- IXP4XX_FEATURE_AAL | \
- IXP4XX_FEATURE_HSS | \
- IXP4XX_FEATURE_UTOPIA | \
- IXP4XX_FEATURE_NPEB_ETH0 | \
- IXP4XX_FEATURE_NPEC_ETH | \
- IXP4XX_FEATURE_RESET_NPEA | \
- IXP4XX_FEATURE_RESET_NPEB | \
- IXP4XX_FEATURE_RESET_NPEC | \
- IXP4XX_FEATURE_PCI | \
- IXP4XX_FEATURE_UTOPIA_PHY_LIMIT | \
- IXP4XX_FEATURE_XSCALE_MAX_FREQ)
-
-
-/* IXP43x/46x CPUs */
-#define IXP4XX_FEATURE_ECC_TIMESYNC (1 << 15)
-#define IXP4XX_FEATURE_USB_HOST (1 << 18)
-#define IXP4XX_FEATURE_NPEA_ETH (1 << 19)
-#define IXP43X_FEATURE_MASK (IXP42X_FEATURE_MASK | \
- IXP4XX_FEATURE_ECC_TIMESYNC | \
- IXP4XX_FEATURE_USB_HOST | \
- IXP4XX_FEATURE_NPEA_ETH)
-
-/* IXP46x CPU (including IXP455) only */
-#define IXP4XX_FEATURE_NPEB_ETH_1_TO_3 (1 << 20)
-#define IXP4XX_FEATURE_RSA (1 << 21)
-#define IXP46X_FEATURE_MASK (IXP43X_FEATURE_MASK | \
- IXP4XX_FEATURE_NPEB_ETH_1_TO_3 | \
- IXP4XX_FEATURE_RSA)
-
#endif
diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h
index 6d403fe0bf52..d8b4df96db08 100644
--- a/arch/arm/mach-ixp4xx/include/mach/platform.h
+++ b/arch/arm/mach-ixp4xx/include/mach/platform.h
@@ -80,20 +80,6 @@ extern unsigned long ixp4xx_exp_bus_size;
#define IXP4XX_UART_XTAL 14745600
/*
- * This structure provide a means for the board setup code
- * to give information to th pata_ixp4xx driver. It is
- * passed as platform_data.
- */
-struct ixp4xx_pata_data {
- volatile u32 *cs0_cfg;
- volatile u32 *cs1_cfg;
- unsigned long cs0_bits;
- unsigned long cs1_bits;
- void __iomem *cs0;
- void __iomem *cs1;
-};
-
-/*
* Frequency of clock used for primary clocksource
*/
extern unsigned long ixp4xx_timer_freq;
diff --git a/arch/arm/mach-ixp4xx/ixp4xx-of.c b/arch/arm/mach-ixp4xx/ixp4xx-of.c
index 7449b8319c8a..f9904716ec7f 100644
--- a/arch/arm/mach-ixp4xx/ixp4xx-of.c
+++ b/arch/arm/mach-ixp4xx/ixp4xx-of.c
@@ -9,8 +9,12 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <mach/hardware.h>
-#include <mach/ixp4xx-regs.h>
+/*
+ * These are the only fixed phys to virt mappings we ever need
+ * we put it right after the UART mapping at 0xffc80000-0xffc81fff
+ */
+#define IXP4XX_EXP_CFG_BASE_PHYS 0xC4000000
+#define IXP4XX_EXP_CFG_BASE_VIRT 0xFEC14000
static struct map_desc ixp4xx_of_io_desc[] __initdata = {
/*
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index 6959ad2e3aec..6133cf01cbe4 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -33,6 +33,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#include <mach/hardware.h>
#include "irqs.h"
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index a428bb918703..8526a70e401b 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -31,6 +31,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/time.h>
+#include <mach/hardware.h>
#include "irqs.h"
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 9536b8f3c07d..208c700c2455 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -65,14 +65,14 @@ config MACH_OMAP_INNOVATOR
config MACH_OMAP_H2
bool "TI H2 Support"
depends on ARCH_OMAP16XX
- help
+ help
TI OMAP 1610/1611B H2 board support. Say Y here if you have such
a board.
config MACH_OMAP_H3
bool "TI H3 Support"
depends on ARCH_OMAP16XX
- help
+ help
TI OMAP 1710 H3 board support. Say Y here if you have such
a board.
@@ -85,14 +85,14 @@ config MACH_HERALD
config MACH_OMAP_OSK
bool "TI OSK Support"
depends on ARCH_OMAP16XX
- help
+ help
TI OMAP 5912 OSK (OMAP Starter Kit) board support. Say Y here
if you have such a board.
config OMAP_OSK_MISTRAL
bool "Mistral QVGA board Support"
depends on MACH_OMAP_OSK
- help
+ help
The OSK supports an optional add-on board with a Quarter-VGA
touchscreen, PDA-ish buttons, a resume button, bicolor LED,
and camera connector. Say Y here if you have this board.
@@ -100,14 +100,14 @@ config OMAP_OSK_MISTRAL
config MACH_OMAP_PERSEUS2
bool "TI Perseus2"
depends on ARCH_OMAP730
- help
+ help
Support for TI OMAP 730 Perseus2 board. Say Y here if you have such
a board.
config MACH_OMAP_FSAMPLE
bool "TI F-Sample"
depends on ARCH_OMAP730
- help
+ help
Support for TI OMAP 850 F-Sample board. Say Y here if you have such
a board.
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h
index d02fe63dab59..14beb59e5f7b 100644
--- a/arch/arm/mach-omap2/cm.h
+++ b/arch/arm/mach-omap2/cm.h
@@ -26,7 +26,6 @@
extern struct omap_domain_base cm_base;
extern struct omap_domain_base cm2_base;
-extern void omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2);
# endif
/*
diff --git a/arch/arm/mach-omap2/cm_common.c b/arch/arm/mach-omap2/cm_common.c
index b7ea609386d5..e2d069fe67f1 100644
--- a/arch/arm/mach-omap2/cm_common.c
+++ b/arch/arm/mach-omap2/cm_common.c
@@ -38,19 +38,6 @@ struct omap_domain_base cm2_base;
#define CM_SINGLE_INSTANCE 0x2
/**
- * omap2_set_globals_cm - set the CM/CM2 base addresses (for early use)
- * @cm: CM base virtual address
- * @cm2: CM2 base virtual address (if present on the booted SoC)
- *
- * XXX Will be replaced when the PRM/CM drivers are completed.
- */
-void __init omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2)
-{
- cm_base.va = cm;
- cm2_base.va = cm2;
-}
-
-/**
* cm_split_idlest_reg - split CM_IDLEST reg addr into its components
* @idlest_reg: CM_IDLEST* virtual address
* @prcm_inst: pointer to an s16 to return the PRCM instance offset
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index db446f271f5d..32f58f58515e 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -130,7 +130,6 @@ void am33xx_init_early(void);
void am35xx_init_early(void);
void ti814x_init_early(void);
void ti816x_init_early(void);
-void am33xx_init_early(void);
void am43xx_init_early(void);
void am43xx_init_late(void);
void omap4430_init_early(void);
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index 73338cf80d76..062d431fc33a 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -136,11 +136,6 @@ struct omap3_control_regs {
static struct omap3_control_regs control_context;
#endif /* CONFIG_ARCH_OMAP3 && CONFIG_PM */
-void __init omap2_set_globals_control(void __iomem *ctrl)
-{
- omap2_ctrl_base = ctrl;
-}
-
u8 omap_ctrl_readb(u16 offset)
{
u32 val;
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index eceb4b09adb2..c4ca30ba1790 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -528,7 +528,6 @@ extern int omap3_ctrl_save_padconf(void);
void omap3_ctrl_init(void);
int omap2_control_base_init(void);
int omap_control_init(void);
-void omap2_set_globals_control(void __iomem *ctrl);
void __init omap3_control_legacy_iomap_init(void);
#else
#define omap_ctrl_readb(x) 0
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 65934b2924fb..12b26e04686f 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -3776,6 +3776,7 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
struct omap_hwmod_ocp_if *oi;
struct clockdomain *clkdm;
struct clk_hw_omap *clk;
+ struct clk_hw *hw;
if (!oh)
return NULL;
@@ -3792,7 +3793,14 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
c = oi->_clk;
}
- clk = to_clk_hw_omap(__clk_get_hw(c));
+ hw = __clk_get_hw(c);
+ if (!hw)
+ return NULL;
+
+ clk = to_clk_hw_omap(hw);
+ if (!clk)
+ return NULL;
+
clkdm = clk->clkdm;
if (!clkdm)
return NULL;
diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c
index 56f2c0bcae5a..bf0d25fd2cea 100644
--- a/arch/arm/mach-omap2/pm33xx-core.c
+++ b/arch/arm/mach-omap2/pm33xx-core.c
@@ -8,6 +8,7 @@
#include <linux/cpuidle.h>
#include <linux/platform_data/pm33xx.h>
+#include <linux/suspend.h>
#include <asm/cpuidle.h>
#include <asm/smp_scu.h>
#include <asm/suspend.h>
@@ -324,6 +325,44 @@ static struct am33xx_pm_platform_data *am33xx_pm_get_pdata(void)
return NULL;
}
+#ifdef CONFIG_SUSPEND
+/*
+ * Block system suspend initially. Later on pm33xx sets up it's own
+ * platform_suspend_ops after probe. That depends also on loaded
+ * wkup_m3_ipc and booted am335x-pm-firmware.elf.
+ */
+static int amx3_suspend_block(suspend_state_t state)
+{
+ pr_warn("PM not initialized for pm33xx, wkup_m3_ipc, or am335x-pm-firmware.elf\n");
+
+ return -EINVAL;
+}
+
+static int amx3_pm_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static const struct platform_suspend_ops amx3_blocked_pm_ops = {
+ .begin = amx3_suspend_block,
+ .valid = amx3_pm_valid,
+};
+
+static void __init amx3_block_suspend(void)
+{
+ suspend_set_ops(&amx3_blocked_pm_ops);
+}
+#else
+static inline void amx3_block_suspend(void)
+{
+}
+#endif /* CONFIG_SUSPEND */
+
int __init amx3_common_pm_init(void)
{
struct am33xx_pm_platform_data *pdata;
@@ -337,6 +376,7 @@ int __init amx3_common_pm_init(void)
devinfo.size_data = sizeof(*pdata);
devinfo.id = -1;
platform_device_register_full(&devinfo);
+ amx3_block_suspend();
return 0;
}
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index d23970bd638d..f70fb9c4b0cb 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -49,6 +49,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
fallthrough; /* ??? */
case 256:
vram_size += PAGE_SIZE * 256;
+ break;
default:
break;
}
diff --git a/arch/arm/mach-s3c/mach-rx1950.c b/arch/arm/mach-s3c/mach-rx1950.c
index a3f46aa61c45..313e080e179e 100644
--- a/arch/arm/mach-s3c/mach-rx1950.c
+++ b/arch/arm/mach-s3c/mach-rx1950.c
@@ -271,7 +271,6 @@ static int rx1950_led_blink_set(struct gpio_desc *desc, int state,
break;
default:
return -EINVAL;
- break;
}
if (delay_on && delay_off && !*delay_on && !*delay_off)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 35f43d0aa056..8355c3895894 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -601,8 +601,6 @@ config CPU_TLB_V6
config CPU_TLB_V7
bool
-config VERIFY_PERMISSION_FAULT
- bool
endif
config CPU_HAS_ASID
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index f7cc5d68444b..f81bceacc660 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -17,31 +17,5 @@ ENTRY(v7_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
uaccess_disable ip @ disable userspace access
-
- /*
- * V6 code adjusts the returned DFSR.
- * New designs should not need to patch up faults.
- */
-
-#if defined(CONFIG_VERIFY_PERMISSION_FAULT)
- /*
- * Detect erroneous permission failures and fix
- */
- ldr r3, =0x40d @ On permission fault
- and r3, r1, r3
- cmp r3, #0x0d
- bne do_DataAbort
-
- mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR
- isb
- mrc p15, 0, ip, c7, c4, 0 @ Read the PAR
- and r3, ip, #0x7b @ On translation fault
- cmp r3, #0x0b
- bne do_DataAbort
- bic r1, r1, #0xf @ Fix up FSR FS[5:0]
- and ip, ip, #0x7e
- orr r1, r1, ip, LSR #1
-#endif
-
b do_DataAbort
ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9d4744a632c6..6162a070a410 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -125,11 +125,22 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
int pfn_valid(unsigned long pfn)
{
phys_addr_t addr = __pfn_to_phys(pfn);
+ unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages;
if (__phys_to_pfn(addr) != pfn)
return 0;
- return memblock_is_map_memory(addr);
+ /*
+ * If address less than pageblock_size bytes away from a present
+ * memory chunk there still will be a memory map entry for it
+ * because we round freed memory map to the pageblock boundaries.
+ */
+ if (memblock_overlaps_region(&memblock.memory,
+ ALIGN_DOWN(addr, pageblock_size),
+ pageblock_size))
+ return 1;
+
+ return 0;
}
EXPORT_SYMBOL(pfn_valid);
#endif
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 000e8210000b..80fb5a4a5c05 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -27,6 +27,7 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <linux/sizes.h>
+#include <linux/memblock.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
@@ -284,7 +285,8 @@ static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
* Don't allow RAM to be mapped with mismatched attributes - this
* causes problems with ARMv6+
*/
- if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
+ if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
+ mtype != MT_MEMORY_RW))
return NULL;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c1e12aab67b8..7583bda5ea7d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1121,31 +1121,32 @@ void __init debug_ll_io_init(void)
}
#endif
-static void * __initdata vmalloc_min =
- (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
+static unsigned long __initdata vmalloc_size = 240 * SZ_1M;
/*
* vmalloc=size forces the vmalloc area to be exactly 'size'
* bytes. This can be used to increase (or decrease) the vmalloc
- * area - the default is 240m.
+ * area - the default is 240MiB.
*/
static int __init early_vmalloc(char *arg)
{
unsigned long vmalloc_reserve = memparse(arg, NULL);
+ unsigned long vmalloc_max;
if (vmalloc_reserve < SZ_16M) {
vmalloc_reserve = SZ_16M;
- pr_warn("vmalloc area too small, limiting to %luMB\n",
+ pr_warn("vmalloc area is too small, limiting to %luMiB\n",
vmalloc_reserve >> 20);
}
- if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
- vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
- pr_warn("vmalloc area is too big, limiting to %luMB\n",
+ vmalloc_max = VMALLOC_END - (PAGE_OFFSET + SZ_32M + VMALLOC_OFFSET);
+ if (vmalloc_reserve > vmalloc_max) {
+ vmalloc_reserve = vmalloc_max;
+ pr_warn("vmalloc area is too big, limiting to %luMiB\n",
vmalloc_reserve >> 20);
}
- vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
+ vmalloc_size = vmalloc_reserve;
return 0;
}
early_param("vmalloc", early_vmalloc);
@@ -1165,7 +1166,8 @@ void __init adjust_lowmem_bounds(void)
* and may itself be outside the valid range for which phys_addr_t
* and therefore __pa() is defined.
*/
- vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+ vmalloc_limit = (u64)VMALLOC_END - vmalloc_size - VMALLOC_OFFSET -
+ PAGE_OFFSET + PHYS_OFFSET;
/*
* The first usable region must be PMD aligned. Mark its start
@@ -1246,7 +1248,7 @@ void __init adjust_lowmem_bounds(void)
memblock_set_current_limit(memblock_limit);
}
-static inline void prepare_page_table(void)
+static __init void prepare_page_table(void)
{
unsigned long addr;
phys_addr_t end;
@@ -1457,8 +1459,6 @@ static void __init kmap_init(void)
static void __init map_lowmem(void)
{
- phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
- phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
phys_addr_t start, end;
u64 i;
@@ -1466,55 +1466,126 @@ static void __init map_lowmem(void)
for_each_mem_range(i, &start, &end) {
struct map_desc map;
+ pr_debug("map lowmem start: 0x%08llx, end: 0x%08llx\n",
+ (long long)start, (long long)end);
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
break;
- if (end < kernel_x_start) {
- map.pfn = __phys_to_pfn(start);
- map.virtual = __phys_to_virt(start);
- map.length = end - start;
- map.type = MT_MEMORY_RWX;
+ /*
+ * If our kernel image is in the VMALLOC area we need to remove
+ * the kernel physical memory from lowmem since the kernel will
+ * be mapped separately.
+ *
+ * The kernel will typically be at the very start of lowmem,
+ * but any placement relative to memory ranges is possible.
+ *
+ * If the memblock contains the kernel, we have to chisel out
+ * the kernel memory from it and map each part separately. We
+ * get 6 different theoretical cases:
+ *
+ * +--------+ +--------+
+ * +-- start --+ +--------+ | Kernel | | Kernel |
+ * | | | Kernel | | case 2 | | case 5 |
+ * | | | case 1 | +--------+ | | +--------+
+ * | Memory | +--------+ | | | Kernel |
+ * | range | +--------+ | | | case 6 |
+ * | | | Kernel | +--------+ | | +--------+
+ * | | | case 3 | | Kernel | | |
+ * +-- end ----+ +--------+ | case 4 | | |
+ * +--------+ +--------+
+ */
- create_mapping(&map);
- } else if (start >= kernel_x_end) {
- map.pfn = __phys_to_pfn(start);
- map.virtual = __phys_to_virt(start);
- map.length = end - start;
- map.type = MT_MEMORY_RW;
+ /* Case 5: kernel covers range, don't map anything, should be rare */
+ if ((start > kernel_sec_start) && (end < kernel_sec_end))
+ break;
- create_mapping(&map);
- } else {
- /* This better cover the entire kernel */
- if (start < kernel_x_start) {
+ /* Cases where the kernel is starting inside the range */
+ if ((kernel_sec_start >= start) && (kernel_sec_start <= end)) {
+ /* Case 6: kernel is embedded in the range, we need two mappings */
+ if ((start < kernel_sec_start) && (end > kernel_sec_end)) {
+ /* Map memory below the kernel */
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
- map.length = kernel_x_start - start;
+ map.length = kernel_sec_start - start;
map.type = MT_MEMORY_RW;
-
create_mapping(&map);
- }
-
- map.pfn = __phys_to_pfn(kernel_x_start);
- map.virtual = __phys_to_virt(kernel_x_start);
- map.length = kernel_x_end - kernel_x_start;
- map.type = MT_MEMORY_RWX;
-
- create_mapping(&map);
-
- if (kernel_x_end < end) {
- map.pfn = __phys_to_pfn(kernel_x_end);
- map.virtual = __phys_to_virt(kernel_x_end);
- map.length = end - kernel_x_end;
+ /* Map memory above the kernel */
+ map.pfn = __phys_to_pfn(kernel_sec_end);
+ map.virtual = __phys_to_virt(kernel_sec_end);
+ map.length = end - kernel_sec_end;
map.type = MT_MEMORY_RW;
-
create_mapping(&map);
+ break;
}
+ /* Case 1: kernel and range start at the same address, should be common */
+ if (kernel_sec_start == start)
+ start = kernel_sec_end;
+ /* Case 3: kernel and range end at the same address, should be rare */
+ if (kernel_sec_end == end)
+ end = kernel_sec_start;
+ } else if ((kernel_sec_start < start) && (kernel_sec_end > start) && (kernel_sec_end < end)) {
+ /* Case 2: kernel ends inside range, starts below it */
+ start = kernel_sec_end;
+ } else if ((kernel_sec_start > start) && (kernel_sec_start < end) && (kernel_sec_end > end)) {
+ /* Case 4: kernel starts inside range, ends above it */
+ end = kernel_sec_start;
}
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY_RW;
+ create_mapping(&map);
}
}
+static void __init map_kernel(void)
+{
+ /*
+ * We use the well known kernel section start and end and split the area in the
+ * middle like this:
+ * . .
+ * | RW memory |
+ * +----------------+ kernel_x_start
+ * | Executable |
+ * | kernel memory |
+ * +----------------+ kernel_x_end / kernel_nx_start
+ * | Non-executable |
+ * | kernel memory |
+ * +----------------+ kernel_nx_end
+ * | RW memory |
+ * . .
+ *
+ * Notice that we are dealing with section sized mappings here so all of this
+ * will be bumped to the closest section boundary. This means that some of the
+ * non-executable part of the kernel memory is actually mapped as executable.
+ * This will only persist until we turn on proper memory management later on
+ * and we remap the whole kernel with page granularity.
+ */
+ phys_addr_t kernel_x_start = kernel_sec_start;
+ phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ phys_addr_t kernel_nx_start = kernel_x_end;
+ phys_addr_t kernel_nx_end = kernel_sec_end;
+ struct map_desc map;
+
+ map.pfn = __phys_to_pfn(kernel_x_start);
+ map.virtual = __phys_to_virt(kernel_x_start);
+ map.length = kernel_x_end - kernel_x_start;
+ map.type = MT_MEMORY_RWX;
+ create_mapping(&map);
+
+ /* If the nx part is small it may end up covered by the tail of the RWX section */
+ if (kernel_x_end == kernel_nx_end)
+ return;
+
+ map.pfn = __phys_to_pfn(kernel_nx_start);
+ map.virtual = __phys_to_virt(kernel_nx_start);
+ map.length = kernel_nx_end - kernel_nx_start;
+ map.type = MT_MEMORY_RW;
+ create_mapping(&map);
+}
+
#ifdef CONFIG_ARM_PV_FIXUP
typedef void pgtables_remap(long long offset, unsigned long pgd);
pgtables_remap lpae_pgtables_remap_asm;
@@ -1645,9 +1716,18 @@ void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
+ pr_debug("physical kernel sections: 0x%08x-0x%08x\n",
+ kernel_sec_start, kernel_sec_end);
+
prepare_page_table();
map_lowmem();
memblock_set_current_limit(arm_lowmem_limit);
+ pr_debug("lowmem limit is %08llx\n", (long long)arm_lowmem_limit);
+ /*
+ * After this point early_alloc(), i.e. the memblock allocator, can
+ * be used
+ */
+ map_kernel();
dma_contiguous_remap();
early_fixmap_shutdown();
devicemaps_init(mdesc);
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 897634d0a67c..a951276f0547 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1602,6 +1602,9 @@ exit:
rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:
diff --git a/arch/arm/probes/kprobes/test-thumb.c b/arch/arm/probes/kprobes/test-thumb.c
index 456c181a7bfe..4e11f0b760f8 100644
--- a/arch/arm/probes/kprobes/test-thumb.c
+++ b/arch/arm/probes/kprobes/test-thumb.c
@@ -441,21 +441,21 @@ void kprobe_thumb32_test_cases(void)
"3: mvn r0, r0 \n\t"
"2: nop \n\t")
- TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,"]",
+ TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,", lsl #1]",
"9: \n\t"
".short (2f-1b-4)>>1 \n\t"
".short (3f-1b-4)>>1 \n\t"
"3: mvn r0, r0 \n\t"
"2: nop \n\t")
- TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,"]",
+ TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,", lsl #1]",
"9: \n\t"
".short (2f-1b-4)>>1 \n\t"
".short (3f-1b-4)>>1 \n\t"
"3: mvn r0, r0 \n\t"
"2: nop \n\t")
- TEST_RRX("tbh [r",1,9f, ", r",14,1,"]",
+ TEST_RRX("tbh [r",1,9f, ", r",14,1,", lsl #1]",
"9: \n\t"
".short (2f-1b-4)>>1 \n\t"
".short (3f-1b-4)>>1 \n\t"
@@ -468,10 +468,10 @@ void kprobe_thumb32_test_cases(void)
TEST_UNSUPPORTED("strexb r0, r1, [r2]")
TEST_UNSUPPORTED("strexh r0, r1, [r2]")
- TEST_UNSUPPORTED("strexd r0, r1, [r2]")
+ TEST_UNSUPPORTED("strexd r0, r1, r2, [r2]")
TEST_UNSUPPORTED("ldrexb r0, [r1]")
TEST_UNSUPPORTED("ldrexh r0, [r1]")
- TEST_UNSUPPORTED("ldrexd r0, [r1]")
+ TEST_UNSUPPORTED("ldrexd r0, r1, [r1]")
TEST_GROUP("Data-processing (shifted register) and (modified immediate)")
diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile
index 87de1f63f649..4a5c50f67ced 100644
--- a/arch/arm/tools/Makefile
+++ b/arch/arm/tools/Makefile
@@ -33,39 +33,26 @@ _dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') \
$(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
quiet_cmd_gen_mach = GEN $@
- cmd_gen_mach = mkdir -p $(dir $@) && \
- $(AWK) -f $(filter-out $(PHONY),$^) > $@
+ cmd_gen_mach = $(AWK) -f $(real-prereqs) > $@
$(kapi)/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE
$(call if_changed,gen_mach)
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis $(abis) \
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis common,$* \
--offset __NR_SYSCALL_BASE $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis common,$* $< $@
quiet_cmd_sysnr = SYSNR $@
- cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \
- '$(syshdr_abi_$(basetarget))'
+ cmd_sysnr = $(CONFIG_SHELL) $(sysnr) $< $@
-$(uapi)/unistd-oabi.h: abis := common,oabi
-$(uapi)/unistd-oabi.h: $(syscall) $(syshdr) FORCE
+$(uapi)/unistd-%.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-$(uapi)/unistd-eabi.h: abis := common,eabi
-$(uapi)/unistd-eabi.h: $(syscall) $(syshdr) FORCE
- $(call if_changed,syshdr)
-
-sysnr_abi_unistd-nr := common,oabi,eabi,compat
$(kapi)/unistd-nr.h: $(syscall) $(sysnr) FORCE
$(call if_changed,sysnr)
-$(gen)/calls-oabi.S: abis := common,oabi
-$(gen)/calls-oabi.S: $(syscall) $(systbl) FORCE
- $(call if_changed,systbl)
-
-$(gen)/calls-eabi.S: abis := common,eabi
-$(gen)/calls-eabi.S: $(syscall) $(systbl) FORCE
+$(gen)/calls-%.S: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
diff --git a/arch/arm/tools/syscallnr.sh b/arch/arm/tools/syscallnr.sh
index df3ccd0ca831..9e386770b6b3 100644
--- a/arch/arm/tools/syscallnr.sh
+++ b/arch/arm/tools/syscallnr.sh
@@ -2,14 +2,13 @@
# SPDX-License-Identifier: GPL-2.0
in="$1"
out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
align=1
fileguard=_ASM_ARM_`basename "$out" | sed \
-e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
-e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | tail -n1 | (
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+" "$in" | sort -n | tail -n1 | (
echo "#ifndef ${fileguard}
#define ${fileguard} 1
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 8ad576ecd0f1..7f1c106b746f 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -29,6 +29,7 @@
#include <linux/cpu.h>
#include <linux/console.h>
#include <linux/pvclock_gtod.h>
+#include <linux/reboot.h>
#include <linux/time64.h>
#include <linux/timekeeping.h>
#include <linux/timekeeper_internal.h>
@@ -181,11 +182,18 @@ void xen_reboot(int reason)
BUG_ON(rc);
}
-static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
+static int xen_restart(struct notifier_block *nb, unsigned long action,
+ void *data)
{
xen_reboot(SHUTDOWN_reboot);
+
+ return NOTIFY_DONE;
}
+static struct notifier_block xen_restart_nb = {
+ .notifier_call = xen_restart,
+ .priority = 192,
+};
static void xen_power_off(void)
{
@@ -404,7 +412,7 @@ static int __init xen_pm_init(void)
return -ENODEV;
pm_power_off = xen_power_off;
- arm_pm_restart = xen_restart;
+ register_restart_handler(&xen_restart_nb);
if (!xen_initial_domain()) {
struct timespec64 ts;
xen_read_wallclock(&ts);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index be9083882f97..fdcd54d39c1e 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -42,6 +42,7 @@ config ARM64
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_ELF_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK if !PREEMPTION
@@ -155,7 +156,6 @@ config ARM64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
- select HAVE_ARCH_PFN_VALID
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
@@ -308,14 +308,6 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY
def_bool y
-config ZONE_DMA
- bool "Support DMA zone" if EXPERT
- default y
-
-config ZONE_DMA32
- bool "Support DMA32 zone" if EXPERT
- default y
-
config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
def_bool y
@@ -1053,9 +1045,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
def_bool y
depends on NUMA
-config HOLES_IN_ZONE
- def_bool y
-
source "kernel/Kconfig.hz"
config ARCH_SPARSEMEM_ENABLE
@@ -1616,7 +1605,8 @@ config ARM64_BTI_KERNEL
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
depends on !CC_IS_GCC || GCC_VERSION >= 100100
- depends on !(CC_IS_CLANG && GCOV_KERNEL)
+ # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
+ depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
Build the kernel with Branch Target Identification annotations
@@ -1810,11 +1800,11 @@ config RANDOMIZE_BASE
If unsure, say N.
config RANDOMIZE_MODULE_REGION_FULL
- bool "Randomize the module region over a 4 GB range"
+ bool "Randomize the module region over a 2 GB range"
depends on RANDOMIZE_BASE
default y
help
- Randomizes the location of the module region inside a 4 GB window
+ Randomizes the location of the module region inside a 2 GB window
covering the core kernel. This way, it is less likely for modules
to leak information about the location of core kernel data structures
but it does imply that function calls between modules and the core
@@ -1822,7 +1812,10 @@ config RANDOMIZE_MODULE_REGION_FULL
When this option is not set, the module region will be randomized over
a limited range that contains the [_stext, _etext] interval of the
- core kernel, so branch relocations are always in range.
+ core kernel, so branch relocations are almost always in range unless
+ ARM64_MODULE_PLTS is enabled and the region is exhausted. In this
+ particular case of region exhaustion, modules might be able to fall
+ back to a larger 2GB area.
config CC_HAVE_STACKPROTECTOR_SYSREG
def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 7336c1fd0dda..b0ce18d4cc98 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -16,6 +16,7 @@ config ARCH_SUNXI
select IRQ_FASTEOI_HIERARCHY_HANDLERS
select PINCTRL
select RESET_CONTROLLER
+ select SUN4I_TIMER
help
This enables support for Allwinner sunxi based SoCs like the A64.
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 3b5b1c480449..1110d386f3b4 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -21,19 +21,11 @@ LDFLAGS_vmlinux += -shared -Bsymbolic -z notext \
endif
ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
- ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
-$(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum)
- else
+ ifeq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
LDFLAGS_vmlinux += --fix-cortex-a53-843419
endif
endif
-ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
- ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
-$(warning LSE atomics not supported by binutils)
- endif
-endif
-
cc_has_k_constraint := $(call try-run,echo \
'int main(void) { \
asm volatile("and w0, w0, %w0" :: "K" (4294967295)); \
@@ -149,7 +141,6 @@ KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
-core-y += arch/arm64/
libs-y := arch/arm64/lib/ $(libs-y)
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
@@ -177,10 +168,23 @@ vdso_install:
archprepare:
$(Q)$(MAKE) $(build)=arch/arm64/tools kapi
+ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
+ ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
+ @echo "warning: ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum" >&2
+ endif
+endif
+ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS),y)
+ ifneq ($(CONFIG_ARM64_LSE_ATOMICS),y)
+ @echo "warning: LSE atomics not supported by binutils" >&2
+ endif
+endif
+
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
+ $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso
+ $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso32
ifeq ($(KBUILD_EXTMOD),)
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index 41ce680e5f8d..a96d9d2d8dd8 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -25,6 +25,7 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-libretech-all-h3-it.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-libretech-all-h5-cc.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-nanopi-neo2.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-nanopi-neo-plus2.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-nanopi-r1s-h5.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-pc2.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-prime.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-zero-plus.dtb
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
index 79adea3f8cc1..5b44a979f250 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
@@ -25,6 +25,11 @@
/* Backlight configuration differs per PinePhone revision. */
};
+ bt_sco_codec: bt-sco-codec {
+ #sound-dai-cells = <1>;
+ compatible = "linux,bt-sco";
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
@@ -91,6 +96,8 @@
};
&codec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&aif3_pins>;
status = "okay";
};
@@ -426,6 +433,7 @@
&sound {
status = "okay";
+ simple-audio-card,name = "PinePhone";
simple-audio-card,aux-devs = <&codec_analog>, <&speaker_amp>;
simple-audio-card,widgets = "Microphone", "Headset Microphone",
"Microphone", "Internal Microphone",
@@ -447,6 +455,23 @@
"MIC1", "Internal Microphone",
"Headset Microphone", "HBIAS",
"MIC2", "Headset Microphone";
+
+ simple-audio-card,dai-link@2 {
+ format = "dsp_a";
+ frame-master = <&link2_codec>;
+ bitclock-master = <&link2_codec>;
+ bitclock-inversion;
+
+ link2_cpu: cpu {
+ sound-dai = <&bt_sco_codec 0>;
+ };
+
+ link2_codec: codec {
+ sound-dai = <&codec 2>;
+ dai-tdm-slot-num = <1>;
+ dai-tdm-slot-width = <32>;
+ };
+ };
};
&uart0 {
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
index e22b94c83647..5e66ce1a334f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
@@ -79,7 +79,7 @@
&emac {
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
- phy-mode = "rgmii-id";
+ phy-mode = "rgmii-txid";
phy-handle = <&ext_rgmii_phy>;
phy-supply = <&reg_dc1sw>;
status = "okay";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 5b30e6c1fa05..6ddb717f2f98 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -131,12 +131,10 @@
};
sound: sound {
+ #address-cells = <1>;
+ #size-cells = <0>;
compatible = "simple-audio-card";
simple-audio-card,name = "sun50i-a64-audio";
- simple-audio-card,format = "i2s";
- simple-audio-card,frame-master = <&cpudai>;
- simple-audio-card,bitclock-master = <&cpudai>;
- simple-audio-card,mclk-fs = <128>;
simple-audio-card,aux-devs = <&codec_analog>;
simple-audio-card,routing =
"Left DAC", "DACL",
@@ -145,12 +143,19 @@
"ADCR", "Right ADC";
status = "disabled";
- cpudai: simple-audio-card,cpu {
- sound-dai = <&dai>;
- };
+ simple-audio-card,dai-link@0 {
+ format = "i2s";
+ frame-master = <&link0_cpu>;
+ bitclock-master = <&link0_cpu>;
+ mclk-fs = <128>;
+
+ link0_cpu: cpu {
+ sound-dai = <&dai>;
+ };
- link_codec: simple-audio-card,codec {
- sound-dai = <&codec>;
+ link0_codec: codec {
+ sound-dai = <&codec 0>;
+ };
};
};
@@ -659,6 +664,18 @@
interrupt-controller;
#interrupt-cells = <3>;
+ /omit-if-no-ref/
+ aif2_pins: aif2-pins {
+ pins = "PB4", "PB5", "PB6", "PB7";
+ function = "aif2";
+ };
+
+ /omit-if-no-ref/
+ aif3_pins: aif3-pins {
+ pins = "PG10", "PG11", "PG12", "PG13";
+ function = "aif3";
+ };
+
csi_pins: csi-pins {
pins = "PE0", "PE2", "PE3", "PE4", "PE5", "PE6",
"PE7", "PE8", "PE9", "PE10", "PE11";
@@ -799,6 +816,23 @@
};
};
+ timer@1c20c00 {
+ compatible = "allwinner,sun50i-a64-timer",
+ "allwinner,sun8i-a23-timer";
+ reg = <0x01c20c00 0xa0>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&osc24M>;
+ };
+
+ wdt0: watchdog@1c20ca0 {
+ compatible = "allwinner,sun50i-a64-wdt",
+ "allwinner,sun6i-a31-wdt";
+ reg = <0x01c20ca0 0x20>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&osc24M>;
+ };
+
spdif: spdif@1c21000 {
#sound-dai-cells = <0>;
compatible = "allwinner,sun50i-a64-spdif",
@@ -880,7 +914,7 @@
};
codec: codec@1c22e00 {
- #sound-dai-cells = <0>;
+ #sound-dai-cells = <1>;
compatible = "allwinner,sun50i-a64-codec",
"allwinner,sun8i-a33-codec";
reg = <0x01c22e00 0x600>;
@@ -1325,13 +1359,5 @@
#address-cells = <1>;
#size-cells = <0>;
};
-
- wdt0: watchdog@1c20ca0 {
- compatible = "allwinner,sun50i-a64-wdt",
- "allwinner,sun6i-a31-wdt";
- reg = <0x01c20ca0 0x20>;
- interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&osc24M>;
- };
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-r1s-h5.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-r1s-h5.dts
new file mode 100644
index 000000000000..55bcdf8d1a07
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-r1s-h5.dts
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2021 Chukun Pan <amadeus@jmu.edu.cn>
+ *
+ * Based on sun50i-h5-nanopi-neo-plus2.dts, which is:
+ * Copyright (C) 2017 Antony Antony <antony@phenome.org>
+ * Copyright (C) 2016 ARM Ltd.
+ */
+
+/dts-v1/;
+#include "sun50i-h5.dtsi"
+#include "sun50i-h5-cpu-opp.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "FriendlyARM NanoPi R1S H5";
+ compatible = "friendlyarm,nanopi-r1s-h5", "allwinner,sun50i-h5";
+
+ aliases {
+ ethernet0 = &emac;
+ ethernet1 = &rtl8189etv;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ function = LED_FUNCTION_LAN;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&pio 0 9 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-1 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ gpios = <&pio 0 10 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-2 {
+ function = LED_FUNCTION_WAN;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&pio 6 11 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ r-gpio-keys {
+ compatible = "gpio-keys";
+
+ reset {
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ reg_gmac_3v3: gmac-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "gmac-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <100000>;
+ enable-active-high;
+ gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>;
+ };
+
+ reg_vcc3v3: vcc3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_usb0_vbus: usb0-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb0-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&r_pio 0 2 GPIO_ACTIVE_HIGH>; /* PL2 */
+ status = "okay";
+ };
+
+ vdd_cpux: gpio-regulator {
+ compatible = "regulator-gpio";
+ regulator-name = "vdd-cpux";
+ regulator-type = "voltage";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-ramp-delay = <50>; /* 4ms */
+ gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>;
+ gpios-states = <0x1>;
+ states = <1100000 0x0>, <1300000 0x1>;
+ };
+
+ wifi_pwrseq: wifi_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
+ post-power-on-delay-ms = <200>;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_cpux>;
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ehci2 {
+ status = "okay";
+};
+
+&emac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&emac_rgmii_pins>;
+ phy-supply = <&reg_gmac_3v3>;
+ phy-handle = <&ext_rgmii_phy>;
+ phy-mode = "rgmii-id";
+ status = "okay";
+};
+
+&external_mdio {
+ ext_rgmii_phy: ethernet-phy@7 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <7>;
+ };
+};
+
+&i2c0 {
+ status = "okay";
+
+ eeprom@51 {
+ compatible = "microchip,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ };
+};
+
+&mmc0 {
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+ status = "okay";
+};
+
+&mmc1 {
+ vmmc-supply = <&reg_vcc3v3>;
+ vqmmc-supply = <&reg_vcc3v3>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ rtl8189etv: sdio_wifi@1 {
+ reg = <1>;
+ };
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ohci2 {
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pa_pins>;
+ status = "okay";
+};
+
+&usb_otg {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usbphy {
+ /* USB Type-A port's VBUS is always on */
+ usb0_id_det-gpios = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+ usb0_vbus-supply = <&reg_usb0_vbus>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index 50815867ce7b..30d396e8c762 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -271,6 +271,15 @@
};
};
+ timer@3009000 {
+ compatible = "allwinner,sun50i-h6-timer",
+ "allwinner,sun8i-a23-timer";
+ reg = <0x03009000 0xa0>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&osc24M>;
+ };
+
watchdog: watchdog@30090a0 {
compatible = "allwinner,sun50i-h6-wdt",
"allwinner,sun6i-a31-wdt";
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index a58ccecfcb55..faa0a79a34f5 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -48,6 +48,7 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxm-rbox-pro.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-s912-libretech-pc.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-vega-s96.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-wetek-core2.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-sm1-bananapi-m5.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-khadas-vim3l.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-odroid-c4.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-odroid-hc4.dtb
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index 895c43c7af9f..3f5254eeb47b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -1871,6 +1871,7 @@
status = "disabled";
clocks = <&xtal>, <&clkc CLKID_UART0>, <&xtal>;
clock-names = "xtal", "pclk", "baud";
+ fifo-size = <128>;
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index 793d48f72390..00c6f53290d4 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -2317,6 +2317,7 @@
clocks = <&xtal>, <&clkc CLKID_UART0>, <&xtal>;
clock-names = "xtal", "pclk", "baud";
status = "disabled";
+ fifo-size = <128>;
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 3d00404aae0f..6b457b2c30a4 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -301,6 +301,7 @@
reg = <0x0 0x84c0 0x0 0x18>;
interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
+ fifo-size = <128>;
};
uart_B: serial@84dc {
diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
index 66d67524b031..3cf4ecb6d52e 100644
--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
@@ -171,11 +171,16 @@
sound {
compatible = "amlogic,axg-sound-card";
model = "KHADAS-VIM3";
- audio-aux-devs = <&tdmout_a>;
+ audio-aux-devs = <&tdmin_a>, <&tdmout_a>;
audio-routing = "TDMOUT_A IN 0", "FRDDR_A OUT 0",
"TDMOUT_A IN 1", "FRDDR_B OUT 0",
"TDMOUT_A IN 2", "FRDDR_C OUT 0",
- "TDM_A Playback", "TDMOUT_A OUT";
+ "TDM_A Playback", "TDMOUT_A OUT",
+ "TDMIN_A IN 0", "TDM_A Capture",
+ "TDMIN_A IN 3", "TDM_A Loopback",
+ "TODDR_A IN 0", "TDMIN_A OUT",
+ "TODDR_B IN 0", "TDMIN_A OUT",
+ "TODDR_C IN 0", "TDMIN_A OUT";
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
@@ -198,8 +203,20 @@
sound-dai = <&frddr_c>;
};
- /* 8ch hdmi interface */
dai-link-3 {
+ sound-dai = <&toddr_a>;
+ };
+
+ dai-link-4 {
+ sound-dai = <&toddr_b>;
+ };
+
+ dai-link-5 {
+ sound-dai = <&toddr_c>;
+ };
+
+ /* 8ch hdmi interface */
+ dai-link-6 {
sound-dai = <&tdmif_a>;
dai-format = "i2s";
dai-tdm-slot-tx-mask-0 = <1 1>;
@@ -214,7 +231,7 @@
};
/* hdmi glue */
- dai-link-4 {
+ dai-link-7 {
sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
codec {
@@ -454,10 +471,26 @@
status = "okay";
};
+&tdmin_a {
+ status = "okay";
+};
+
&tdmout_a {
status = "okay";
};
+&toddr_a {
+ status = "okay";
+};
+
+&toddr_b {
+ status = "okay";
+};
+
+&toddr_c {
+ status = "okay";
+};
+
&tohdmitx {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
new file mode 100644
index 000000000000..effaa138b5f9
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 BayLibre SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+/dts-v1/;
+
+#include "meson-sm1.dtsi"
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+#include <dt-bindings/sound/meson-g12a-toacodec.h>
+#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
+
+/ {
+ compatible = "bananapi,bpi-m5", "amlogic,sm1";
+ model = "Banana Pi BPI-M5";
+
+ adc_keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 2>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+
+ key {
+ label = "SW3";
+ linux,code = <BTN_3>;
+ press-threshold-microvolt = <1700000>;
+ };
+ };
+
+ aliases {
+ serial0 = &uart_AO;
+ ethernet0 = &ethmac;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ /* TOFIX: handle CVBS_DET on SARADC channel 0 */
+ cvbs-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+ };
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio BOOT_12 GPIO_ACTIVE_LOW>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ key {
+ label = "SW1";
+ linux,code = <BTN_1>;
+ gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&gpio_intc>;
+ interrupts = <3 IRQ_TYPE_EDGE_BOTH>;
+ };
+ };
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ green {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
+ };
+
+ blue {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio_ao GPIOAO_11 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+
+ emmc_1v8: regulator-emmc_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "EMMC_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ };
+
+ dc_in: regulator-dc_in {
+ compatible = "regulator-fixed";
+ regulator-name = "DC_IN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ vddio_c: regulator-vddio_c {
+ compatible = "regulator-gpio";
+ regulator-name = "VDDIO_C";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+
+ gpios = <&gpio_ao GPIOAO_6 GPIO_OPEN_DRAIN>;
+ gpios-states = <1>;
+
+ states = <1800000 0>,
+ <3300000 1>;
+ };
+
+ tflash_vdd: regulator-tflash_vdd {
+ compatible = "regulator-fixed";
+ regulator-name = "TFLASH_VDD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&dc_in>;
+ gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ vddao_1v8: regulator-vddao_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ };
+
+ vddao_3v3: regulator-vddao_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&dc_in>;
+ regulator-always-on;
+ };
+
+ vddcpu: regulator-vddcpu {
+ /*
+ * SY8120B1ABC DC/DC Regulator.
+ */
+ compatible = "pwm-regulator";
+
+ regulator-name = "VDDCPU";
+ regulator-min-microvolt = <690000>;
+ regulator-max-microvolt = <1050000>;
+
+ vin-supply = <&dc_in>;
+
+ pwms = <&pwm_AO_cd 1 1250 0>;
+ pwm-dutycycle-range = <100 0>;
+
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* USB Hub Power Enable */
+ vl_pwr_en: regulator-vl_pwr_en {
+ compatible = "regulator-fixed";
+ regulator-name = "VL_PWR_EN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&dc_in>;
+
+ gpio = <&gpio GPIOH_6 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ sound {
+ compatible = "amlogic,axg-sound-card";
+ model = "BPI-M5";
+ audio-widgets = "Line", "Lineout";
+ audio-aux-devs = <&tdmout_b>, <&tdmout_c>,
+ <&tdmin_a>, <&tdmin_b>, <&tdmin_c>;
+ audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
+ "TDMOUT_B IN 1", "FRDDR_B OUT 1",
+ "TDMOUT_B IN 2", "FRDDR_C OUT 1",
+ "TDM_B Playback", "TDMOUT_B OUT",
+ "TDMOUT_C IN 0", "FRDDR_A OUT 2",
+ "TDMOUT_C IN 1", "FRDDR_B OUT 2",
+ "TDMOUT_C IN 2", "FRDDR_C OUT 2",
+ "TDM_C Playback", "TDMOUT_C OUT",
+ "TDMIN_A IN 4", "TDM_B Loopback",
+ "TDMIN_B IN 4", "TDM_B Loopback",
+ "TDMIN_C IN 4", "TDM_B Loopback",
+ "TDMIN_A IN 5", "TDM_C Loopback",
+ "TDMIN_B IN 5", "TDM_C Loopback",
+ "TDMIN_C IN 5", "TDM_C Loopback",
+ "TODDR_A IN 0", "TDMIN_A OUT",
+ "TODDR_B IN 0", "TDMIN_A OUT",
+ "TODDR_C IN 0", "TDMIN_A OUT",
+ "TODDR_A IN 1", "TDMIN_B OUT",
+ "TODDR_B IN 1", "TDMIN_B OUT",
+ "TODDR_C IN 1", "TDMIN_B OUT",
+ "TODDR_A IN 2", "TDMIN_C OUT",
+ "TODDR_B IN 2", "TDMIN_C OUT",
+ "TODDR_C IN 2", "TDMIN_C OUT",
+ "Lineout", "ACODEC LOLP",
+ "Lineout", "ACODEC LORP";
+
+ assigned-clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+ status = "okay";
+
+ dai-link-0 {
+ sound-dai = <&frddr_a>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&frddr_b>;
+ };
+
+ dai-link-2 {
+ sound-dai = <&frddr_c>;
+ };
+
+ dai-link-3 {
+ sound-dai = <&toddr_a>;
+ };
+
+ dai-link-4 {
+ sound-dai = <&toddr_b>;
+ };
+
+ dai-link-5 {
+ sound-dai = <&toddr_c>;
+ };
+
+ /* 8ch hdmi interface */
+ dai-link-6 {
+ sound-dai = <&tdmif_b>;
+ dai-format = "i2s";
+ dai-tdm-slot-tx-mask-0 = <1 1>;
+ dai-tdm-slot-tx-mask-1 = <1 1>;
+ dai-tdm-slot-tx-mask-2 = <1 1>;
+ dai-tdm-slot-tx-mask-3 = <1 1>;
+ mclk-fs = <256>;
+
+ codec-0 {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>;
+ };
+
+ codec-1 {
+ sound-dai = <&toacodec TOACODEC_IN_B>;
+ };
+ };
+
+ /* i2s jack output interface */
+ dai-link-7 {
+ sound-dai = <&tdmif_c>;
+ dai-format = "i2s";
+ dai-tdm-slot-tx-mask-0 = <1 1>;
+ mclk-fs = <256>;
+
+ codec-0 {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_IN_C>;
+ };
+
+ codec-1 {
+ sound-dai = <&toacodec TOACODEC_IN_C>;
+ };
+ };
+
+ /* hdmi glue */
+ dai-link-8 {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
+
+ codec {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+
+ /* acodec glue */
+ dai-link-9 {
+ sound-dai = <&toacodec TOACODEC_OUT>;
+
+ codec {
+ sound-dai = <&acodec>;
+ };
+ };
+ };
+};
+
+&acodec {
+ AVDD-supply = <&vddao_1v8>;
+ status = "okay";
+};
+
+&arb {
+ status = "okay";
+};
+
+&clkc_audio {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vddcpu>;
+ operating-points-v2 = <&cpu_opp_table>;
+ clocks = <&clkc CLKID_CPU_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu1 {
+ cpu-supply = <&vddcpu>;
+ operating-points-v2 = <&cpu_opp_table>;
+ clocks = <&clkc CLKID_CPU1_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu2 {
+ cpu-supply = <&vddcpu>;
+ operating-points-v2 = <&cpu_opp_table>;
+ clocks = <&clkc CLKID_CPU2_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu3 {
+ cpu-supply = <&vddcpu>;
+ operating-points-v2 = <&cpu_opp_table>;
+ clocks = <&clkc CLKID_CPU3_CLK>;
+ clock-latency = <50000>;
+};
+
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
+ };
+};
+
+&ext_mdio {
+ external_phy: ethernet-phy@0 {
+ /* Realtek RTL8211F (0x001cc916) */
+ reg = <0>;
+ max-speed = <1000>;
+
+ interrupt-parent = <&gpio_intc>;
+ /* MAC_INTR on GPIOZ_14 */
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&ethmac {
+ pinctrl-0 = <&eth_pins>, <&eth_rgmii_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ phy-mode = "rgmii-txid";
+ phy-handle = <&external_phy>;
+};
+
+&frddr_a {
+ status = "okay";
+};
+
+&frddr_b {
+ status = "okay";
+};
+
+&frddr_c {
+ status = "okay";
+};
+
+&gpio {
+ gpio-line-names =
+ /* GPIOZ */
+ "ETH_MDIO", /* GPIOZ_0 */
+ "ETH_MDC", /* GPIOZ_1 */
+ "ETH_RXCLK", /* GPIOZ_2 */
+ "ETH_RX_DV", /* GPIOZ_3 */
+ "ETH_RXD0", /* GPIOZ_4 */
+ "ETH_RXD1", /* GPIOZ_5 */
+ "ETH_RXD2", /* GPIOZ_6 */
+ "ETH_RXD3", /* GPIOZ_7 */
+ "ETH_TXCLK", /* GPIOZ_8 */
+ "ETH_TXEN", /* GPIOZ_9 */
+ "ETH_TXD0", /* GPIOZ_10 */
+ "ETH_TXD1", /* GPIOZ_11 */
+ "ETH_TXD2", /* GPIOZ_12 */
+ "ETH_TXD3", /* GPIOZ_13 */
+ "ETH_INTR", /* GPIOZ_14 */
+ "ETH_NRST", /* GPIOZ_15 */
+ /* GPIOH */
+ "HDMI_SDA", /* GPIOH_0 */
+ "HDMI_SCL", /* GPIOH_1 */
+ "HDMI_HPD", /* GPIOH_2 */
+ "HDMI_CEC", /* GPIOH_3 */
+ "VL-RST_N", /* GPIOH_4 */
+ "CON1-P36", /* GPIOH_5 */
+ "VL-PWREN", /* GPIOH_6 */
+ "WiFi_3V3_1V8", /* GPIOH_7 */
+ "TFLASH_VDD_EN", /* GPIOH_8 */
+ /* BOOT */
+ "eMMC_D0", /* BOOT_0 */
+ "eMMC_D1", /* BOOT_1 */
+ "eMMC_D2", /* BOOT_2 */
+ "eMMC_D3", /* BOOT_3 */
+ "eMMC_D4", /* BOOT_4 */
+ "eMMC_D5", /* BOOT_5 */
+ "eMMC_D6", /* BOOT_6 */
+ "eMMC_D7", /* BOOT_7 */
+ "eMMC_CLK", /* BOOT_8 */
+ "",
+ "eMMC_CMD", /* BOOT_10 */
+ "",
+ "eMMC_RST#", /* BOOT_12 */
+ "eMMC_DS", /* BOOT_13 */
+ /* GPIOC */
+ "SD_D0_B", /* GPIOC_0 */
+ "SD_D1_B", /* GPIOC_1 */
+ "SD_D2_B", /* GPIOC_2 */
+ "SD_D3_B", /* GPIOC_3 */
+ "SD_CLK_B", /* GPIOC_4 */
+ "SD_CMD_B", /* GPIOC_5 */
+ "CARD_EN_DET", /* GPIOC_6 */
+ "",
+ /* GPIOA */
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "",
+ "CON1-P27", /* GPIOA_14 */
+ "CON1-P28", /* GPIOA_15 */
+ /* GPIOX */
+ "CON1-P16", /* GPIOX_0 */
+ "CON1-P18", /* GPIOX_1 */
+ "CON1-P22", /* GPIOX_2 */
+ "CON1-P11", /* GPIOX_3 */
+ "CON1-P13", /* GPIOX_4 */
+ "CON1-P07", /* GPIOX_5 */
+ "CON1-P33", /* GPIOX_6 */
+ "CON1-P15", /* GPIOX_7 */
+ "CON1-P19", /* GPIOX_8 */
+ "CON1-P21", /* GPIOX_9 */
+ "CON1-P24", /* GPIOX_10 */
+ "CON1-P23", /* GPIOX_11 */
+ "CON1-P08", /* GPIOX_12 */
+ "CON1-P10", /* GPIOX_13 */
+ "CON1-P29", /* GPIOX_14 */
+ "CON1-P31", /* GPIOX_15 */
+ "CON1-P26", /* GPIOX_16 */
+ "CON1-P03", /* GPIOX_17 */
+ "CON1-P05", /* GPIOX_18 */
+ "CON1-P32"; /* GPIOX_19 */
+
+ /*
+ * WARNING: The USB Hub on the BPI-M5 needs a reset signal
+ * to be turned high in order to be detected by the USB Controller
+ * This signal should be handled by a USB specific power sequence
+ * in order to reset the Hub when USB bus is powered down.
+ */
+ usb-hub {
+ gpio-hog;
+ gpios = <GPIOH_4 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "usb-hub-reset";
+ };
+};
+
+&gpio_ao {
+ gpio-line-names =
+ /* GPIOAO */
+ "DEBUG TX", /* GPIOAO_0 */
+ "DEBUG RX", /* GPIOAO_1 */
+ "SYS_LED2", /* GPIOAO_2 */
+ "UPDATE_KEY", /* GPIOAO_3 */
+ "CON1-P40", /* GPIOAO_4 */
+ "IR_IN", /* GPIOAO_5 */
+ "TF_3V3N_1V8_EN", /* GPIOAO_6 */
+ "CON1-P35", /* GPIOAO_7 */
+ "CON1-P12", /* GPIOAO_8 */
+ "CON1-P37", /* GPIOAO_9 */
+ "CON1-P38", /* GPIOAO_10 */
+ "SYS_LED", /* GPIOAO_11 */
+ /* GPIOE */
+ "VDDEE_PWM", /* GPIOE_0 */
+ "VDDCPU_PWM", /* GPIOE_1 */
+ "TF_PWR_EN"; /* GPIOE_2 */
+};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
+ pinctrl-names = "default";
+ hdmi-supply = <&dc_in>;
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
+
+&ir {
+ status = "okay";
+ pinctrl-0 = <&remote_input_ao_pins>;
+ pinctrl-names = "default";
+};
+
+&pwm_AO_cd {
+ pinctrl-0 = <&pwm_ao_d_e_pins>;
+ pinctrl-names = "default";
+ clocks = <&xtal>;
+ clock-names = "clkin1";
+ status = "okay";
+};
+
+&saradc {
+ status = "okay";
+ vref-supply = <&vddao_1v8>;
+};
+
+/* SD card */
+&sd_emmc_b {
+ status = "okay";
+ pinctrl-0 = <&sdcard_c_pins>;
+ pinctrl-1 = <&sdcard_clk_gate_c_pins>;
+ pinctrl-names = "default", "clk-gate";
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <50000000>;
+ disable-wp;
+
+ /* TOFIX: SD card is barely usable in SDR modes */
+
+ cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&tflash_vdd>;
+ vqmmc-supply = <&vddio_c>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+ status = "okay";
+ pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
+
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ max-frequency = <200000000>;
+ disable-wp;
+
+ mmc-pwrseq = <&emmc_pwrseq>;
+ vmmc-supply = <&vddao_3v3>;
+ vqmmc-supply = <&emmc_1v8>;
+};
+
+&tdmif_b {
+ status = "okay";
+};
+
+&tdmif_c {
+ status = "okay";
+};
+
+&tdmin_a {
+ status = "okay";
+};
+
+&tdmin_b {
+ status = "okay";
+};
+
+&tdmin_c {
+ status = "okay";
+};
+
+&tdmout_b {
+ status = "okay";
+};
+
+&tdmout_c {
+ status = "okay";
+};
+
+&toacodec {
+ status = "okay";
+};
+
+&tohdmitx {
+ status = "okay";
+};
+
+&toddr_a {
+ status = "okay";
+};
+
+&toddr_b {
+ status = "okay";
+};
+
+&toddr_c {
+ status = "okay";
+};
+
+&uart_AO {
+ status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+};
+
+&usb {
+ status = "okay";
+};
+
+&usb2_phy0 {
+ phy-supply = <&dc_in>;
+};
+
+&usb2_phy1 {
+ /* Enable the hub which is connected to this port */
+ phy-supply = <&vl_pwr_en>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
index 06de0b1ce726..f2c098143594 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
@@ -32,6 +32,19 @@
regulator-boot-on;
regulator-always-on;
};
+
+ sound {
+ model = "G12B-KHADAS-VIM3L";
+ audio-routing = "TDMOUT_A IN 0", "FRDDR_A OUT 0",
+ "TDMOUT_A IN 1", "FRDDR_B OUT 0",
+ "TDMOUT_A IN 2", "FRDDR_C OUT 0",
+ "TDM_A Playback", "TDMOUT_A OUT",
+ "TDMIN_A IN 0", "TDM_A Capture",
+ "TDMIN_A IN 13", "TDM_A Loopback",
+ "TODDR_A IN 0", "TDMIN_A OUT",
+ "TODDR_B IN 0", "TDMIN_A OUT",
+ "TODDR_C IN 0", "TDMIN_A OUT";
+ };
};
&cpu0 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
index 8c327c03d845..8c30ce63686e 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
@@ -23,18 +23,6 @@
};
};
- hub_5v: regulator-hub_5v {
- compatible = "regulator-fixed";
- regulator-name = "HUB_5V";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- vin-supply = <&vcc_5v>;
-
- /* Connected to the Hub CHIPENABLE, LOW sets low power state */
- gpio = <&gpio GPIOH_4 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
sound {
model = "ODROID-C4";
};
@@ -58,8 +46,3 @@
&ir {
linux,rc-map-name = "rc-odroid";
};
-
-&usb2_phy1 {
- /* Enable the hub which is connected to this port */
- phy-supply = <&hub_5v>;
-};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
index bf15700c4b15..f3f953225bf5 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
@@ -44,6 +44,32 @@
};
};
+ /* Powers the SATA Disk 0 regulator, which is enabled when a disk load is detected */
+ p12v_0: regulator-p12v_0 {
+ compatible = "regulator-fixed";
+ regulator-name = "P12V_0";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ vin-supply = <&main_12v>;
+
+ gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ /* Powers the SATA Disk 1 regulator, which is enabled when a disk load is detected */
+ p12v_1: regulator-p12v_1 {
+ compatible = "regulator-fixed";
+ regulator-name = "P12V_1";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ vin-supply = <&main_12v>;
+
+ gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
sound {
model = "ODROID-HC4";
};
@@ -90,7 +116,25 @@
status = "disabled";
};
+&spifc {
+ status = "okay";
+ pinctrl-0 = <&nor_pins>;
+ pinctrl-names = "default";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <104000000>;
+ };
+};
+
&usb {
- phys = <&usb2_phy0>, <&usb2_phy1>;
- phy-names = "usb2-phy0", "usb2-phy1";
+ phys = <&usb2_phy1>;
+ phy-names = "usb2-phy1";
+};
+
+&usb2_phy0 {
+ status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
index d14716b3d0f1..fd0ad85c165b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
@@ -46,8 +46,13 @@
regulator-name = "TF_IO";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_5v>;
- gpios = <&gpio_ao GPIOAO_6 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+
+ gpios = <&gpio_ao GPIOAO_6 GPIO_OPEN_SOURCE>;
gpios-states = <0>;
states = <3300000 0>,
@@ -78,6 +83,8 @@
regulator-max-microvolt = <5000000>;
regulator-always-on;
vin-supply = <&main_12v>;
+ gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
+ enable-active-high;
};
vcc_1v8: regulator-vcc_1v8 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
index c309517abae3..3d8b1f4f2001 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
@@ -401,6 +401,16 @@
status = "disabled";
};
+ toacodec: audio-controller@740 {
+ compatible = "amlogic,sm1-toacodec",
+ "amlogic,g12a-toacodec";
+ reg = <0x0 0x740 0x0 0x4>;
+ #sound-dai-cells = <1>;
+ sound-name-prefix = "TOACODEC";
+ resets = <&clkc_audio AUD_RESET_TOACODEC>;
+ status = "disabled";
+ };
+
tohdmitx: audio-controller@744 {
compatible = "amlogic,sm1-tohdmitx",
"amlogic,g12a-tohdmitx";
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 1cc7fdcec51b..8e7a66943b01 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -568,13 +568,13 @@
clocks {
compatible = "arm,scpi-clocks";
- scpi_dvfs: scpi-dvfs {
+ scpi_dvfs: clocks-0 {
compatible = "arm,scpi-dvfs-clocks";
#clock-cells = <1>;
clock-indices = <0>, <1>, <2>;
clock-output-names = "atlclk", "aplclk","gpuclk";
};
- scpi_clk: scpi-clk {
+ scpi_clk: clocks-1 {
compatible = "arm,scpi-variable-clocks";
#clock-cells = <1>;
clock-indices = <3>;
@@ -582,7 +582,7 @@
};
};
- scpi_devpd: scpi-power-domains {
+ scpi_devpd: power-controller {
compatible = "arm,scpi-power-domains";
num-domains = <2>;
#power-domain-cells = <1>;
diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile
index 998e240aa698..11eae3e3a944 100644
--- a/arch/arm64/boot/dts/broadcom/Makefile
+++ b/arch/arm64/boot/dts/broadcom/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_ARCH_BCM2835) += bcm2711-rpi-4-b.dtb \
+dtb-$(CONFIG_ARCH_BCM2835) += bcm2711-rpi-400.dtb \
+ bcm2711-rpi-4-b.dtb \
bcm2837-rpi-3-a-plus.dtb \
bcm2837-rpi-3-b.dtb \
bcm2837-rpi-3-b-plus.dtb \
diff --git a/arch/arm64/boot/dts/broadcom/bcm2711-rpi-400.dts b/arch/arm64/boot/dts/broadcom/bcm2711-rpi-400.dts
new file mode 100644
index 000000000000..b9000f58beb5
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm2711-rpi-400.dts
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "arm/bcm2711-rpi-400.dts"
diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
index 8060178b365d..a5a64d17d9ea 100644
--- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
@@ -306,7 +306,7 @@
interrupt-names = "nand";
status = "okay";
- nandcs: nandcs@0 {
+ nandcs: nand@0 {
compatible = "brcm,nandcs";
reg = <0>;
};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
index 2ffb2c92182a..7b04dfe67bef 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
@@ -470,7 +470,7 @@
status = "disabled";
};
- uart0: uart@100000 {
+ uart0: serial@100000 {
device_type = "serial";
compatible = "snps,dw-apb-uart";
reg = <0x00100000 0x1000>;
@@ -481,7 +481,7 @@
status = "disabled";
};
- uart1: uart@110000 {
+ uart1: serial@110000 {
device_type = "serial";
compatible = "snps,dw-apb-uart";
reg = <0x00110000 0x1000>;
@@ -492,7 +492,7 @@
status = "disabled";
};
- uart2: uart@120000 {
+ uart2: serial@120000 {
device_type = "serial";
compatible = "snps,dw-apb-uart";
reg = <0x00120000 0x1000>;
@@ -503,7 +503,7 @@
status = "disabled";
};
- uart3: uart@130000 {
+ uart3: serial@130000 {
device_type = "serial";
compatible = "snps,dw-apb-uart";
reg = <0x00130000 0x1000>;
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
index 773d9abe3a44..cbcc01a66aab 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
@@ -391,6 +391,7 @@
interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
reg = <0x66>;
samsung,s2mps11-wrstbi-ground;
+ wakeup-source;
s2mps13_osc: clocks {
compatible = "samsung,s2mps13-clk";
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index 44890d56c194..25806c4924cb 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -41,6 +41,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mm-var-som-symphony.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw71xx-0x.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw72xx-0x.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw73xx-0x.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw7901.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-beacon-kit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-ddr4-evk.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts
index 6290e2f9de6a..e8562585d4ac 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts
@@ -24,6 +24,10 @@
status = "okay";
};
+&pcie1 {
+ status = "okay";
+};
+
&qspi {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index 9058cfa4980f..50a72cda4727 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -238,35 +238,35 @@
"fsl,sec-v4.0-rtic";
#address-cells = <1>;
#size-cells = <1>;
- reg = <0x60000 0x100 0x60e00 0x18>;
+ reg = <0x60000 0x100>, <0x60e00 0x18>;
ranges = <0x0 0x60100 0x500>;
rtic_a: rtic-a@0 {
compatible = "fsl,sec-v5.4-rtic-memory",
"fsl,sec-v5.0-rtic-memory",
"fsl,sec-v4.0-rtic-memory";
- reg = <0x00 0x20 0x100 0x100>;
+ reg = <0x00 0x20>, <0x100 0x100>;
};
rtic_b: rtic-b@20 {
compatible = "fsl,sec-v5.4-rtic-memory",
"fsl,sec-v5.0-rtic-memory",
"fsl,sec-v4.0-rtic-memory";
- reg = <0x20 0x20 0x200 0x100>;
+ reg = <0x20 0x20>, <0x200 0x100>;
};
rtic_c: rtic-c@40 {
compatible = "fsl,sec-v5.4-rtic-memory",
"fsl,sec-v5.0-rtic-memory",
"fsl,sec-v4.0-rtic-memory";
- reg = <0x40 0x20 0x300 0x100>;
+ reg = <0x40 0x20>, <0x300 0x100>;
};
rtic_d: rtic-d@60 {
compatible = "fsl,sec-v5.4-rtic-memory",
"fsl,sec-v5.0-rtic-memory",
"fsl,sec-v4.0-rtic-memory";
- reg = <0x60 0x20 0x400 0x100>;
+ reg = <0x60 0x20>, <0x400 0x100>;
};
};
};
@@ -522,8 +522,8 @@
pcie1: pcie@3400000 {
compatible = "fsl,ls1012a-pcie";
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
+ <0x40 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
interrupts = <0 118 0x4>, /* controller interrupt */
<0 117 0x4>; /* PME interrupt */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts
index dd764b720fb0..f6a79c8080d1 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts
@@ -54,6 +54,7 @@
&mscc_felix_port0 {
label = "swp0";
+ managed = "in-band-status";
phy-handle = <&phy0>;
phy-mode = "sgmii";
status = "okay";
@@ -61,6 +62,7 @@
&mscc_felix_port1 {
label = "swp1";
+ managed = "in-band-status";
phy-handle = <&phy1>;
phy-mode = "sgmii";
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
index 9322c6ad8e4a..d7b527272500 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
@@ -275,6 +275,10 @@
status = "okay";
};
+&optee {
+ status = "okay";
+};
+
&sai4 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index a30249ebffa8..343ecf0e8973 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -66,7 +66,7 @@
};
};
- sysclk: clock-sysclk {
+ sysclk: sysclk {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <100000000>;
@@ -88,7 +88,7 @@
};
firmware {
- optee {
+ optee: optee {
compatible = "linaro,optee-tz";
method = "smc";
status = "disabled";
@@ -617,8 +617,8 @@
pcie1: pcie@3400000 {
compatible = "fsl,ls1028a-pcie";
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x80 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
+ <0x80 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
@@ -644,8 +644,8 @@
pcie2: pcie@3500000 {
compatible = "fsl,ls1028a-pcie";
- reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
- 0x88 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03500000 0x0 0x00100000>, /* controller registers */
+ <0x88 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
@@ -990,19 +990,19 @@
msi-map = <0 &its 0x17 0xe>;
iommu-map = <0 &smmu 0x17 0xe>;
/* PF0-6 BAR0 - non-prefetchable memory */
- ranges = <0x82000000 0x0 0x00000000 0x1 0xf8000000 0x0 0x160000
+ ranges = <0x82000000 0x1 0xf8000000 0x1 0xf8000000 0x0 0x160000
/* PF0-6 BAR2 - prefetchable memory */
- 0xc2000000 0x0 0x00000000 0x1 0xf8160000 0x0 0x070000
+ 0xc2000000 0x1 0xf8160000 0x1 0xf8160000 0x0 0x070000
/* PF0: VF0-1 BAR0 - non-prefetchable memory */
- 0x82000000 0x0 0x00000000 0x1 0xf81d0000 0x0 0x020000
+ 0x82000000 0x1 0xf81d0000 0x1 0xf81d0000 0x0 0x020000
/* PF0: VF0-1 BAR2 - prefetchable memory */
- 0xc2000000 0x0 0x00000000 0x1 0xf81f0000 0x0 0x020000
+ 0xc2000000 0x1 0xf81f0000 0x1 0xf81f0000 0x0 0x020000
/* PF1: VF0-1 BAR0 - non-prefetchable memory */
- 0x82000000 0x0 0x00000000 0x1 0xf8210000 0x0 0x020000
+ 0x82000000 0x1 0xf8210000 0x1 0xf8210000 0x0 0x020000
/* PF1: VF0-1 BAR2 - prefetchable memory */
- 0xc2000000 0x0 0x00000000 0x1 0xf8230000 0x0 0x020000
+ 0xc2000000 0x1 0xf8230000 0x1 0xf8230000 0x0 0x020000
/* BAR4 (PF5) - non-prefetchable memory */
- 0x82000000 0x0 0x00000000 0x1 0xfc000000 0x0 0x400000>;
+ 0x82000000 0x1 0xfc000000 0x1 0xfc000000 0x0 0x400000>;
enetc_port0: ethernet@0,0 {
compatible = "fsl,enetc";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 28c51e521cb2..01b01e320411 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -869,8 +869,8 @@
pcie1: pcie@3400000 {
compatible = "fsl,ls1043a-pcie";
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
+ <0x40 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
interrupts = <0 118 0x4>, /* controller interrupt */
<0 117 0x4>; /* PME interrupt */
@@ -895,8 +895,8 @@
pcie2: pcie@3500000 {
compatible = "fsl,ls1043a-pcie";
- reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
- 0x48 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03500000 0x0 0x00100000>, /* controller registers */
+ <0x48 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
interrupts = <0 128 0x4>,
<0 127 0x4>;
@@ -921,8 +921,8 @@
pcie3: pcie@3600000 {
compatible = "fsl,ls1043a-pcie";
- reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
- 0x50 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03600000 0x0 0x00100000>, /* controller registers */
+ <0x50 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
interrupts = <0 162 0x4>,
<0 161 0x4>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 39458305e333..687fea6d8afa 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -773,8 +773,8 @@
pcie1: pcie@3400000 {
compatible = "fsl,ls1046a-pcie";
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
+ <0x40 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>; /* PME interrupt */
@@ -799,8 +799,8 @@
pcie_ep1: pcie_ep@3400000 {
compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep";
- reg = <0x00 0x03400000 0x0 0x00100000
- 0x40 0x00000000 0x8 0x00000000>;
+ reg = <0x00 0x03400000 0x0 0x00100000>,
+ <0x40 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
num-ib-windows = <6>;
num-ob-windows = <8>;
@@ -809,8 +809,8 @@
pcie2: pcie@3500000 {
compatible = "fsl,ls1046a-pcie";
- reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
- 0x48 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03500000 0x0 0x00100000>, /* controller registers */
+ <0x48 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
<GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>; /* PME interrupt */
@@ -835,8 +835,8 @@
pcie_ep2: pcie_ep@3500000 {
compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep";
- reg = <0x00 0x03500000 0x0 0x00100000
- 0x48 0x00000000 0x8 0x00000000>;
+ reg = <0x00 0x03500000 0x0 0x00100000>,
+ <0x48 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
num-ib-windows = <6>;
num-ob-windows = <8>;
@@ -845,8 +845,8 @@
pcie3: pcie@3600000 {
compatible = "fsl,ls1046a-pcie";
- reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
- 0x50 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03600000 0x0 0x00100000>, /* controller registers */
+ <0x50 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
<GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>; /* PME interrupt */
@@ -871,8 +871,8 @@
pcie_ep3: pcie_ep@3600000 {
compatible = "fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep";
- reg = <0x00 0x03600000 0x0 0x00100000
- 0x50 0x00000000 0x8 0x00000000>;
+ reg = <0x00 0x03600000 0x0 0x00100000>,
+ <0x50 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
num-ib-windows = <6>;
num-ob-windows = <8>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
index 8ffbc9fde041..2fa6cfbef01f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
@@ -536,8 +536,8 @@
pcie1: pcie@3400000 {
compatible = "fsl,ls1088a-pcie";
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x20 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
+ <0x20 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
interrupt-names = "aer";
@@ -562,8 +562,8 @@
pcie_ep1: pcie-ep@3400000 {
compatible = "fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep";
- reg = <0x00 0x03400000 0x0 0x00100000
- 0x20 0x00000000 0x8 0x00000000>;
+ reg = <0x00 0x03400000 0x0 0x00100000>,
+ <0x20 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
num-ib-windows = <24>;
num-ob-windows = <256>;
@@ -573,8 +573,8 @@
pcie2: pcie@3500000 {
compatible = "fsl,ls1088a-pcie";
- reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
- 0x28 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03500000 0x0 0x00100000>, /* controller registers */
+ <0x28 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
interrupt-names = "aer";
@@ -599,8 +599,8 @@
pcie_ep2: pcie-ep@3500000 {
compatible = "fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep";
- reg = <0x00 0x03500000 0x0 0x00100000
- 0x28 0x00000000 0x8 0x00000000>;
+ reg = <0x00 0x03500000 0x0 0x00100000>,
+ <0x28 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
num-ib-windows = <6>;
num-ob-windows = <6>;
@@ -609,8 +609,8 @@
pcie3: pcie@3600000 {
compatible = "fsl,ls1088a-pcie";
- reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
- 0x30 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03600000 0x0 0x00100000>, /* controller registers */
+ <0x30 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
interrupt-names = "aer";
@@ -635,8 +635,8 @@
pcie_ep3: pcie-ep@3600000 {
compatible = "fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep";
- reg = <0x00 0x03600000 0x0 0x00100000
- 0x30 0x00000000 0x8 0x00000000>;
+ reg = <0x00 0x03600000 0x0 0x00100000>,
+ <0x30 0x00000000 0x8 0x00000000>;
reg-names = "regs", "addr_space";
num-ib-windows = <6>;
num-ob-windows = <6>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index 76ab68d2de0b..6f6667b70028 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -120,32 +120,32 @@
};
&pcie1 {
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x10 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
+ <0x10 0x00000000 0x0 0x00002000>; /* configuration space */
ranges = <0x81000000 0x0 0x00000000 0x10 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x10 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
};
&pcie2 {
- reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
- 0x12 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03500000 0x0 0x00100000>, /* controller registers */
+ <0x12 0x00000000 0x0 0x00002000>; /* configuration space */
ranges = <0x81000000 0x0 0x00000000 0x12 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x12 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
};
&pcie3 {
- reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
- 0x14 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03600000 0x0 0x00100000>, /* controller registers */
+ <0x14 0x00000000 0x0 0x00002000>; /* configuration space */
ranges = <0x81000000 0x0 0x00000000 0x14 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x14 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
};
&pcie4 {
- reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
- 0x16 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03700000 0x0 0x00100000>, /* controller registers */
+ <0x16 0x00000000 0x0 0x00002000>; /* configuration space */
ranges = <0x81000000 0x0 0x00000000 0x16 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x16 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
index da24dc127698..c3dc38188c17 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
@@ -121,8 +121,8 @@
&pcie1 {
compatible = "fsl,ls2088a-pcie";
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x20 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
+ <0x20 0x00000000 0x0 0x00002000>; /* configuration space */
ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000
0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>;
@@ -130,8 +130,8 @@
&pcie2 {
compatible = "fsl,ls2088a-pcie";
- reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
- 0x28 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03500000 0x0 0x00100000>, /* controller registers */
+ <0x28 0x00000000 0x0 0x00002000>; /* configuration space */
ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000
0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>;
@@ -139,8 +139,8 @@
&pcie3 {
compatible = "fsl,ls2088a-pcie";
- reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
- 0x30 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03600000 0x0 0x00100000>, /* controller registers */
+ <0x30 0x00000000 0x0 0x00002000>; /* configuration space */
ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000
0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>;
@@ -148,8 +148,8 @@
&pcie4 {
compatible = "fsl,ls2088a-pcie";
- reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
- 0x38 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03700000 0x0 0x00100000>, /* controller registers */
+ <0x38 0x00000000 0x0 0x00002000>; /* configuration space */
ranges = <0x81000000 0x0 0x00000000 0x38 0x00010000 0x0 0x00010000
0x82000000 0x0 0x40000000 0x38 0x40000000 0x0 0x40000000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index 135ac8210871..801ba9612d36 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -929,7 +929,6 @@
QORIQ_CLK_PLL_DIV(4)>;
clock-names = "dspi";
spi-num-chipselects = <5>;
- bus-num = <0>;
};
esdhc: esdhc@2140000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index 0551f6f4c313..c4b1a59ba424 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -1089,8 +1089,8 @@
pcie1: pcie@3400000 {
compatible = "fsl,lx2160a-pcie";
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x80 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03400000 0x0 0x00100000>, /* controller registers */
+ <0x80 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "csr_axi_slave", "config_axi_slave";
interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
<GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
@@ -1117,8 +1117,8 @@
pcie2: pcie@3500000 {
compatible = "fsl,lx2160a-pcie";
- reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
- 0x88 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03500000 0x0 0x00100000>, /* controller registers */
+ <0x88 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "csr_axi_slave", "config_axi_slave";
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
@@ -1145,8 +1145,8 @@
pcie3: pcie@3600000 {
compatible = "fsl,lx2160a-pcie";
- reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
- 0x90 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03600000 0x0 0x00100000>, /* controller registers */
+ <0x90 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "csr_axi_slave", "config_axi_slave";
interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
@@ -1173,8 +1173,8 @@
pcie4: pcie@3700000 {
compatible = "fsl,lx2160a-pcie";
- reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
- 0x98 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03700000 0x0 0x00100000>, /* controller registers */
+ <0x98 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "csr_axi_slave", "config_axi_slave";
interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
<GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
@@ -1201,8 +1201,8 @@
pcie5: pcie@3800000 {
compatible = "fsl,lx2160a-pcie";
- reg = <0x00 0x03800000 0x0 0x00100000 /* controller registers */
- 0xa0 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03800000 0x0 0x00100000>, /* controller registers */
+ <0xa0 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "csr_axi_slave", "config_axi_slave";
interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
<GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
@@ -1229,8 +1229,8 @@
pcie6: pcie@3900000 {
compatible = "fsl,lx2160a-pcie";
- reg = <0x00 0x03900000 0x0 0x00100000 /* controller registers */
- 0xa8 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg = <0x00 0x03900000 0x0 0x00100000>, /* controller registers */
+ <0xa8 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "csr_axi_slave", "config_axi_slave";
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
index e1e81ca0ca69..a79f42a9618e 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
@@ -77,9 +77,12 @@ conn_subsys: bus@5b000000 {
<GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&enet0_lpcg IMX_LPCG_CLK_4>,
<&enet0_lpcg IMX_LPCG_CLK_2>,
- <&enet0_lpcg IMX_LPCG_CLK_1>,
+ <&enet0_lpcg IMX_LPCG_CLK_3>,
<&enet0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "enet_clk_ref", "ptp";
+ assigned-clocks = <&clk IMX_SC_R_ENET_0 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_ENET_0 IMX_SC_C_CLKDIV>;
+ assigned-clock-rates = <250000000>, <125000000>;
fsl,num-tx-queues=<3>;
fsl,num-rx-queues=<3>;
power-domains = <&pd IMX_SC_R_ENET_0>;
@@ -94,9 +97,12 @@ conn_subsys: bus@5b000000 {
<GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&enet1_lpcg IMX_LPCG_CLK_4>,
<&enet1_lpcg IMX_LPCG_CLK_2>,
- <&enet1_lpcg IMX_LPCG_CLK_1>,
+ <&enet1_lpcg IMX_LPCG_CLK_3>,
<&enet1_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "enet_clk_ref", "ptp";
+ assigned-clocks = <&clk IMX_SC_R_ENET_1 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_ENET_1 IMX_SC_C_CLKDIV>;
+ assigned-clock-rates = <250000000>, <125000000>;
fsl,num-tx-queues=<3>;
fsl,num-rx-queues=<3>;
power-domains = <&pd IMX_SC_R_ENET_1>;
@@ -152,15 +158,19 @@ conn_subsys: bus@5b000000 {
#clock-cells = <1>;
clocks = <&clk IMX_SC_R_ENET_0 IMX_SC_PM_CLK_PER>,
<&clk IMX_SC_R_ENET_0 IMX_SC_PM_CLK_PER>,
- <&conn_axi_clk>, <&conn_ipg_clk>, <&conn_ipg_clk>;
+ <&conn_axi_clk>,
+ <&clk IMX_SC_R_ENET_0 IMX_SC_C_TXCLK>,
+ <&conn_ipg_clk>,
+ <&conn_ipg_clk>;
clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
- <IMX_LPCG_CLK_2>, <IMX_LPCG_CLK_4>,
- <IMX_LPCG_CLK_5>;
- clock-output-names = "enet0_ipg_root_clk",
- "enet0_tx_clk",
- "enet0_ahb_clk",
- "enet0_ipg_clk",
- "enet0_ipg_s_clk";
+ <IMX_LPCG_CLK_2>, <IMX_LPCG_CLK_3>,
+ <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>;
+ clock-output-names = "enet0_lpcg_timer_clk",
+ "enet0_lpcg_txc_sampling_clk",
+ "enet0_lpcg_ahb_clk",
+ "enet0_lpcg_rgmii_txc_clk",
+ "enet0_lpcg_ipg_clk",
+ "enet0_lpcg_ipg_s_clk";
power-domains = <&pd IMX_SC_R_ENET_0>;
};
@@ -170,15 +180,19 @@ conn_subsys: bus@5b000000 {
#clock-cells = <1>;
clocks = <&clk IMX_SC_R_ENET_1 IMX_SC_PM_CLK_PER>,
<&clk IMX_SC_R_ENET_1 IMX_SC_PM_CLK_PER>,
- <&conn_axi_clk>, <&conn_ipg_clk>, <&conn_ipg_clk>;
+ <&conn_axi_clk>,
+ <&clk IMX_SC_R_ENET_1 IMX_SC_C_TXCLK>,
+ <&conn_ipg_clk>,
+ <&conn_ipg_clk>;
clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
- <IMX_LPCG_CLK_2>, <IMX_LPCG_CLK_4>,
- <IMX_LPCG_CLK_5>;
- clock-output-names = "enet1_ipg_root_clk",
- "enet1_tx_clk",
- "enet1_ahb_clk",
- "enet1_ipg_clk",
- "enet1_ipg_s_clk";
+ <IMX_LPCG_CLK_2>, <IMX_LPCG_CLK_3>,
+ <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>;
+ clock-output-names = "enet1_lpcg_timer_clk",
+ "enet1_lpcg_txc_sampling_clk",
+ "enet1_lpcg_ahb_clk",
+ "enet1_lpcg_rgmii_txc_clk",
+ "enet1_lpcg_ipg_clk",
+ "enet1_lpcg_ipg_s_clk";
power-domains = <&pd IMX_SC_R_ENET_1>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
index 6518f088b2c2..e033d0257b5a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
@@ -314,6 +314,7 @@
srp-disable;
adp-disable;
usb-role-switch;
+ disable-over-current;
samsung,picophy-pre-emp-curr-control = <3>;
samsung,picophy-dc-vol-level-adjust = <7>;
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
new file mode 100644
index 000000000000..5a1e9df39bec
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2020 Gateworks Corporation
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/leds/common.h>
+
+#include "imx8mm.dtsi"
+
+/ {
+ model = "Gateworks Venice GW7901 i.MX8MM board";
+ compatible = "gw,imx8mm-gw7901", "fsl,imx8mm";
+
+ aliases {
+ ethernet0 = &fec1;
+ ethernet1 = &lan1;
+ ethernet2 = &lan2;
+ ethernet3 = &lan3;
+ ethernet4 = &lan4;
+ usb0 = &usbotg1;
+ usb1 = &usbotg2;
+ };
+
+ chosen {
+ stdout-path = &uart2;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0 0x80000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ user-pb {
+ label = "user_pb";
+ gpios = <&gpio 2 GPIO_ACTIVE_LOW>;
+ linux,code = <BTN_0>;
+ };
+
+ user-pb1x {
+ label = "user_pb1x";
+ linux,code = <BTN_1>;
+ interrupt-parent = <&gsc>;
+ interrupts = <0>;
+ };
+
+ key-erased {
+ label = "key_erased";
+ linux,code = <BTN_2>;
+ interrupt-parent = <&gsc>;
+ interrupts = <1>;
+ };
+
+ eeprom-wp {
+ label = "eeprom_wp";
+ linux,code = <BTN_3>;
+ interrupt-parent = <&gsc>;
+ interrupts = <2>;
+ };
+
+ tamper {
+ label = "tamper";
+ linux,code = <BTN_4>;
+ interrupt-parent = <&gsc>;
+ interrupts = <5>;
+ };
+
+ switch-hold {
+ label = "switch_hold";
+ linux,code = <BTN_5>;
+ interrupt-parent = <&gsc>;
+ interrupts = <7>;
+ };
+ };
+
+ led-controller {
+ compatible = "gpio-leds";
+
+ led-0 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ label = "led01_red";
+ gpios = <&leds_gpio 0 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-1 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ label = "led01_grn";
+ gpios = <&leds_gpio 1 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-2 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ label = "led02_red";
+ gpios = <&leds_gpio 2 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-3 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ label = "led02_grn";
+ gpios = <&leds_gpio 3 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-4 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ label = "led03_red";
+ gpios = <&leds_gpio 4 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-5 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ label = "led03_grn";
+ gpios = <&leds_gpio 5 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-6 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ label = "led04_red";
+ gpios = <&leds_gpio 8 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-7 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ label = "led04_grn";
+ gpios = <&leds_gpio 9 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-8 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ label = "led05_red";
+ gpios = <&leds_gpio 10 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-9 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ label = "led05_grn";
+ gpios = <&leds_gpio 11 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-a {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ label = "led06_red";
+ gpios = <&leds_gpio 12 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-b {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ label = "led06_grn";
+ gpios = <&leds_gpio 13 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ regulator-ioexp {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_ioexp>;
+ compatible = "regulator-fixed";
+ regulator-name = "ioexp";
+ gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ startup-delay-us = <100>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ regulator-isouart {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_isouart>;
+ compatible = "regulator-fixed";
+ regulator-name = "iso_uart";
+ gpio = <&gpio1 13 GPIO_ACTIVE_LOW>;
+ startup-delay-us = <100>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_usb2_vbus: regulator-usb2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usb2>;
+ compatible = "regulator-fixed";
+ regulator-name = "usb_usb2_vbus";
+ gpio = <&gpio4 17 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_wifi: regulator-wifi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_wl>;
+ compatible = "regulator-fixed";
+ regulator-name = "wifi";
+ gpio = <&gpio3 25 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ startup-delay-us = <100>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&ddrc {
+ operating-points-v2 = <&ddrc_opp_table>;
+
+ ddrc_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-25M {
+ opp-hz = /bits/ 64 <25000000>;
+ };
+
+ opp-100M {
+ opp-hz = /bits/ 64 <100000000>;
+ };
+
+ opp-750M {
+ opp-hz = /bits/ 64 <750000000>;
+ };
+ };
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+ status = "okay";
+ };
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ local-mac-address = [00 00 00 00 00 00];
+ status = "okay";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ gsc: gsc@20 {
+ compatible = "gw,gsc";
+ reg = <0x20>;
+ pinctrl-0 = <&pinctrl_gsc>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc {
+ compatible = "gw,gsc-adc";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@6 {
+ gw,mode = <0>;
+ reg = <0x06>;
+ label = "temp";
+ };
+
+ channel@8 {
+ gw,mode = <1>;
+ reg = <0x08>;
+ label = "vdd_bat";
+ };
+
+ channel@82 {
+ gw,mode = <2>;
+ reg = <0x82>;
+ label = "vin_aux1";
+ gw,voltage-divider-ohms = <22100 1000>;
+ };
+
+ channel@84 {
+ gw,mode = <2>;
+ reg = <0x84>;
+ label = "vin_aux2";
+ gw,voltage-divider-ohms = <22100 1000>;
+ };
+
+ channel@86 {
+ gw,mode = <2>;
+ reg = <0x86>;
+ label = "vdd_vin";
+ gw,voltage-divider-ohms = <22100 1000>;
+ };
+
+ channel@88 {
+ gw,mode = <2>;
+ reg = <0x88>;
+ label = "vdd_3p3";
+ gw,voltage-divider-ohms = <10000 10000>;
+ };
+
+ channel@8c {
+ gw,mode = <2>;
+ reg = <0x8c>;
+ label = "vdd_2p5";
+ gw,voltage-divider-ohms = <10000 10000>;
+ };
+
+ channel@8e {
+ gw,mode = <2>;
+ reg = <0x8e>;
+ label = "vdd_0p95";
+ };
+
+ channel@90 {
+ gw,mode = <2>;
+ reg = <0x90>;
+ label = "vdd_soc";
+ };
+
+ channel@92 {
+ gw,mode = <2>;
+ reg = <0x92>;
+ label = "vdd_arm";
+ };
+
+ channel@98 {
+ gw,mode = <2>;
+ reg = <0x98>;
+ label = "vdd_1p8";
+ };
+
+ channel@9a {
+ gw,mode = <2>;
+ reg = <0x9a>;
+ label = "vdd_1p2";
+ };
+
+ channel@9c {
+ gw,mode = <2>;
+ reg = <0x9c>;
+ label = "vdd_dram";
+ };
+
+ channel@a2 {
+ gw,mode = <2>;
+ reg = <0xa2>;
+ label = "vdd_gsc";
+ gw,voltage-divider-ohms = <10000 10000>;
+ };
+ };
+ };
+
+ gpio: gpio@23 {
+ compatible = "nxp,pca9555";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gsc>;
+ interrupts = <4>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <16>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <16>;
+ };
+
+ rtc@68 {
+ compatible = "dallas,ds1672";
+ reg = <0x68>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ pmic@4b {
+ compatible = "rohm,bd71847";
+ reg = <0x4b>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
+ rohm,reset-snvs-powered;
+ #clock-cells = <0>;
+ clocks = <&osc_32k 0>;
+ clock-output-names = "clk-32k-out";
+
+ regulators {
+ /* vdd_soc: 0.805-0.900V (typ=0.8V) */
+ BUCK1 {
+ regulator-name = "buck1";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <1250>;
+ };
+
+ /* vdd_arm: 0.805-1.0V (typ=0.9V) */
+ BUCK2 {
+ regulator-name = "buck2";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <1250>;
+ rohm,dvs-run-voltage = <1000000>;
+ rohm,dvs-idle-voltage = <900000>;
+ };
+
+ /* vdd_0p9: 0.805-1.0V (typ=0.9V) */
+ BUCK3 {
+ regulator-name = "buck3";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* vdd_3p3 */
+ BUCK4 {
+ regulator-name = "buck4";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* vdd_1p8 */
+ BUCK5 {
+ regulator-name = "buck5";
+ regulator-min-microvolt = <1605000>;
+ regulator-max-microvolt = <1995000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* vdd_dram */
+ BUCK6 {
+ regulator-name = "buck6";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* nvcc_snvs_1p8 */
+ LDO1 {
+ regulator-name = "ldo1";
+ regulator-min-microvolt = <1600000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* vdd_snvs_0p8 */
+ LDO2 {
+ regulator-name = "ldo2";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* vdda_1p8 */
+ LDO3 {
+ regulator-name = "ldo3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO4 {
+ regulator-name = "ldo4";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO6 {
+ regulator-name = "ldo6";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ leds_gpio: gpio@20 {
+ compatible = "nxp,pca9555";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ switch: switch@5f {
+ compatible = "microchip,ksz9897";
+ reg = <0x5f>;
+ pinctrl-0 = <&pinctrl_ksz>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+ phy-mode = "rgmii-id";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ lan1: port@0 {
+ reg = <0>;
+ label = "lan1";
+ local-mac-address = [00 00 00 00 00 00];
+ };
+
+ lan2: port@1 {
+ reg = <1>;
+ label = "lan2";
+ local-mac-address = [00 00 00 00 00 00];
+ };
+
+ lan3: port@2 {
+ reg = <2>;
+ label = "lan3";
+ local-mac-address = [00 00 00 00 00 00];
+ };
+
+ lan4: port@3 {
+ reg = <3>;
+ label = "lan4";
+ local-mac-address = [00 00 00 00 00 00];
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&fec1>;
+ phy-mode = "rgmii-id";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+
+ crypto@60 {
+ compatible = "atmel,atecc508a";
+ reg = <0x60>;
+ };
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>, <&pinctrl_uart1_gpio>;
+ rts-gpios = <&gpio1 10 GPIO_ACTIVE_LOW>;
+ cts-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
+ dtr-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
+ dsr-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ dcd-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+/* console */
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
+ cts-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>;
+ rts-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>, <&pinctrl_uart4_gpio>;
+ cts-gpios = <&gpio5 11 GPIO_ACTIVE_LOW>;
+ rts-gpios = <&gpio5 12 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&usbotg1 {
+ dr_mode = "host";
+ disable-over-current;
+ status = "okay";
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ vbus-supply = <&reg_usb2_vbus>;
+ status = "okay";
+};
+
+/* SDIO WiFi */
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ bus-width = <4>;
+ non-removable;
+ vmmc-supply = <&reg_wifi>;
+ status = "okay";
+};
+
+/* microSD */
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
+
+/* eMMC */
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SPDIF_TX_GPIO5_IO3 0x40000041 /* DIG2_OUT */
+ MX8MM_IOMUXC_SPDIF_RX_GPIO5_IO4 0x40000041 /* DIG2_IN */
+ MX8MM_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x40000041 /* DIG1_IN */
+ MX8MM_IOMUXC_GPIO1_IO07_GPIO1_IO7 0x40000041 /* DIG1_OUT */
+ MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30 0x40000041 /* SIM2DET# */
+ MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29 0x40000041 /* SIM1DET# */
+ MX8MM_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0x40000041 /* SIM2SEL */
+ >;
+ };
+
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x3
+ MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x3
+ MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f
+ MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f
+ MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
+ MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
+ MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91
+ MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91
+ MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
+ MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
+ MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
+ MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
+ MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ MX8MM_IOMUXC_SAI1_TXD6_GPIO4_IO18 0x19 /* IRQ# */
+ MX8MM_IOMUXC_SAI1_TXD7_GPIO4_IO19 0x19 /* RST# */
+ >;
+ };
+
+ pinctrl_gsc: gscgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI1_TXD4_GPIO4_IO16 0x159
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_ksz: kszgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI1_TXD6_GPIO4_IO18 0x41
+ MX8MM_IOMUXC_SAI1_TXD7_GPIO4_IO19 0x41 /* RST# */
+ >;
+ };
+
+ pinctrl_pmic: pmicgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI5_RXC_GPIO3_IO20 0x41
+ >;
+ };
+
+ pinctrl_reg_isouart: regisouartgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x40000041
+ >;
+ };
+
+ pinctrl_reg_ioexp: regioexpgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI2_RXFS_GPIO4_IO21 0x40000041
+ >;
+ };
+
+ pinctrl_reg_wl: regwlgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI5_MCLK_GPIO3_IO25 0x40000041
+ >;
+ };
+
+ pinctrl_reg_usb2: regusb1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI1_TXD5_GPIO4_IO17 0x41
+ MX8MM_IOMUXC_GPIO1_IO15_USB2_OTG_OC 0x41
+ >;
+ };
+
+ pinctrl_spi1: spi1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ECSPI1_SCLK_ECSPI1_SCLK 0x82
+ MX8MM_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x82
+ MX8MM_IOMUXC_ECSPI1_MISO_ECSPI1_MISO 0x82
+ MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x140
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
+ MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140
+ MX8MM_IOMUXC_GPIO1_IO01_GPIO1_IO1 0x140
+ MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x140
+ MX8MM_IOMUXC_GPIO1_IO11_GPIO1_IO11 0x140
+ MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x140
+ MX8MM_IOMUXC_GPIO1_IO14_GPIO1_IO14 0x140
+ >;
+ };
+
+ pinctrl_uart1_gpio: uart1gpiogrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO00_GPIO1_IO0 0x40000041 /* RS422# */
+ MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x40000041 /* RS485# */
+ MX8MM_IOMUXC_GPIO1_IO05_GPIO1_IO5 0x40000041 /* RS232# */
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
+ MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX 0x140
+ MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x140
+ MX8MM_IOMUXC_SAI1_RXD7_GPIO4_IO9 0x140
+ MX8MM_IOMUXC_SAI1_TXFS_GPIO4_IO10 0x140
+ >;
+ };
+
+ pinctrl_uart3_gpio: uart3gpiogrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI1_RXD4_GPIO4_IO6 0x40000041 /* RS232# */
+ MX8MM_IOMUXC_SAI1_RXD5_GPIO4_IO7 0x40000041 /* RS422# */
+ MX8MM_IOMUXC_SAI1_RXD6_GPIO4_IO8 0x40000041 /* RS485# */
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART4_RXD_UART4_DCE_RX 0x140
+ MX8MM_IOMUXC_UART4_TXD_UART4_DCE_TX 0x140
+ MX8MM_IOMUXC_ECSPI2_MOSI_GPIO5_IO11 0x140
+ MX8MM_IOMUXC_ECSPI2_MISO_GPIO5_IO12 0x140
+ >;
+ };
+
+ pinctrl_uart4_gpio: uart4gpiogrp {
+ fsl,pins = <
+
+ MX8MM_IOMUXC_ECSPI2_SCLK_GPIO5_IO10 0x40000041 /* RS232# */
+ MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x40000041 /* RS422# */
+ MX8MM_IOMUXC_SAI2_MCLK_GPIO4_IO27 0x40000041 /* RS485# */
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2-gpiogrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x1c4
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x190
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d0
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d0
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d0
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d0
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d0
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d0
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d0
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d0
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d0
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x190
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x194
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d4
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d4
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d4
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d4
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d4
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d4
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d4
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d4
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d4
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x194
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x196
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d6
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d6
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d6
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d6
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d6
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d6
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d6
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d6
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d6
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x196
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6
+ >;
+ };
+};
+
+&cpu_alert0 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+};
+
+&cpu_crit0 {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index a27e02bee6b4..e7648c3b8390 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -261,6 +261,7 @@
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0x3e000000>;
+ dma-ranges = <0x40000000 0x0 0x40000000 0xc0000000>;
nvmem-cells = <&imx8mm_uid>;
nvmem-cell-names = "soc_unique_id";
@@ -271,117 +272,125 @@
#size-cells = <1>;
ranges = <0x30000000 0x30000000 0x400000>;
- sai1: sai@30010000 {
- #sound-dai-cells = <0>;
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
- reg = <0x30010000 0x10000>;
- interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_SAI1_IPG>,
- <&clk IMX8MM_CLK_SAI1_ROOT>,
- <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_CLK_DUMMY>;
- clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dmas = <&sdma2 0 2 0>, <&sdma2 1 2 0>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ spba2: spba-bus@30000000 {
+ compatible = "fsl,spba-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x30000000 0x100000>;
+ ranges;
+
+ sai1: sai@30010000 {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ reg = <0x30010000 0x10000>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_SAI1_IPG>,
+ <&clk IMX8MM_CLK_SAI1_ROOT>,
+ <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_CLK_DUMMY>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dmas = <&sdma2 0 2 0>, <&sdma2 1 2 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- sai2: sai@30020000 {
- #sound-dai-cells = <0>;
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
- reg = <0x30020000 0x10000>;
- interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_SAI2_IPG>,
- <&clk IMX8MM_CLK_SAI2_ROOT>,
- <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_CLK_DUMMY>;
- clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dmas = <&sdma2 2 2 0>, <&sdma2 3 2 0>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ sai2: sai@30020000 {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ reg = <0x30020000 0x10000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_SAI2_IPG>,
+ <&clk IMX8MM_CLK_SAI2_ROOT>,
+ <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_CLK_DUMMY>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dmas = <&sdma2 2 2 0>, <&sdma2 3 2 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- sai3: sai@30030000 {
- #sound-dai-cells = <0>;
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
- reg = <0x30030000 0x10000>;
- interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_SAI3_IPG>,
- <&clk IMX8MM_CLK_SAI3_ROOT>,
- <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_CLK_DUMMY>;
- clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dmas = <&sdma2 4 2 0>, <&sdma2 5 2 0>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ sai3: sai@30030000 {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ reg = <0x30030000 0x10000>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_SAI3_IPG>,
+ <&clk IMX8MM_CLK_SAI3_ROOT>,
+ <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_CLK_DUMMY>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dmas = <&sdma2 4 2 0>, <&sdma2 5 2 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- sai5: sai@30050000 {
- #sound-dai-cells = <0>;
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
- reg = <0x30050000 0x10000>;
- interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_SAI5_IPG>,
- <&clk IMX8MM_CLK_SAI5_ROOT>,
- <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_CLK_DUMMY>;
- clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dmas = <&sdma2 8 2 0>, <&sdma2 9 2 0>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ sai5: sai@30050000 {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ reg = <0x30050000 0x10000>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_SAI5_IPG>,
+ <&clk IMX8MM_CLK_SAI5_ROOT>,
+ <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_CLK_DUMMY>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dmas = <&sdma2 8 2 0>, <&sdma2 9 2 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- sai6: sai@30060000 {
- #sound-dai-cells = <0>;
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
- reg = <0x30060000 0x10000>;
- interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_SAI6_IPG>,
- <&clk IMX8MM_CLK_SAI6_ROOT>,
- <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_CLK_DUMMY>;
- clock-names = "bus", "mclk1", "mclk2", "mclk3";
- dmas = <&sdma2 10 2 0>, <&sdma2 11 2 0>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ sai6: sai@30060000 {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ reg = <0x30060000 0x10000>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_SAI6_IPG>,
+ <&clk IMX8MM_CLK_SAI6_ROOT>,
+ <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_CLK_DUMMY>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dmas = <&sdma2 10 2 0>, <&sdma2 11 2 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- micfil: audio-controller@30080000 {
- compatible = "fsl,imx8mm-micfil";
- reg = <0x30080000 0x10000>;
- interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_PDM_IPG>,
- <&clk IMX8MM_CLK_PDM_ROOT>,
- <&clk IMX8MM_AUDIO_PLL1_OUT>,
- <&clk IMX8MM_AUDIO_PLL2_OUT>,
- <&clk IMX8MM_CLK_EXT3>;
- clock-names = "ipg_clk", "ipg_clk_app",
- "pll8k", "pll11k", "clkext3";
- dmas = <&sdma2 24 25 0x80000000>;
- dma-names = "rx";
- status = "disabled";
- };
+ micfil: audio-controller@30080000 {
+ compatible = "fsl,imx8mm-micfil";
+ reg = <0x30080000 0x10000>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_PDM_IPG>,
+ <&clk IMX8MM_CLK_PDM_ROOT>,
+ <&clk IMX8MM_AUDIO_PLL1_OUT>,
+ <&clk IMX8MM_AUDIO_PLL2_OUT>,
+ <&clk IMX8MM_CLK_EXT3>;
+ clock-names = "ipg_clk", "ipg_clk_app",
+ "pll8k", "pll11k", "clkext3";
+ dmas = <&sdma2 24 25 0x80000000>;
+ dma-names = "rx";
+ status = "disabled";
+ };
- spdif1: spdif@30090000 {
- compatible = "fsl,imx35-spdif";
- reg = <0x30090000 0x10000>;
- interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_AUDIO_AHB>, /* core */
- <&clk IMX8MM_CLK_24M>, /* rxtx0 */
- <&clk IMX8MM_CLK_SPDIF1>, /* rxtx1 */
- <&clk IMX8MM_CLK_DUMMY>, /* rxtx2 */
- <&clk IMX8MM_CLK_DUMMY>, /* rxtx3 */
- <&clk IMX8MM_CLK_DUMMY>, /* rxtx4 */
- <&clk IMX8MM_CLK_AUDIO_AHB>, /* rxtx5 */
- <&clk IMX8MM_CLK_DUMMY>, /* rxtx6 */
- <&clk IMX8MM_CLK_DUMMY>, /* rxtx7 */
- <&clk IMX8MM_CLK_DUMMY>; /* spba */
- clock-names = "core", "rxtx0",
- "rxtx1", "rxtx2",
- "rxtx3", "rxtx4",
- "rxtx5", "rxtx6",
- "rxtx7", "spba";
- dmas = <&sdma2 28 18 0>, <&sdma2 29 18 0>;
- dma-names = "rx", "tx";
- status = "disabled";
+ spdif1: spdif@30090000 {
+ compatible = "fsl,imx35-spdif";
+ reg = <0x30090000 0x10000>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_AUDIO_AHB>, /* core */
+ <&clk IMX8MM_CLK_24M>, /* rxtx0 */
+ <&clk IMX8MM_CLK_SPDIF1>, /* rxtx1 */
+ <&clk IMX8MM_CLK_DUMMY>, /* rxtx2 */
+ <&clk IMX8MM_CLK_DUMMY>, /* rxtx3 */
+ <&clk IMX8MM_CLK_DUMMY>, /* rxtx4 */
+ <&clk IMX8MM_CLK_AUDIO_AHB>, /* rxtx5 */
+ <&clk IMX8MM_CLK_DUMMY>, /* rxtx6 */
+ <&clk IMX8MM_CLK_DUMMY>, /* rxtx7 */
+ <&clk IMX8MM_CLK_DUMMY>; /* spba */
+ clock-names = "core", "rxtx0",
+ "rxtx1", "rxtx2",
+ "rxtx3", "rxtx4",
+ "rxtx5", "rxtx6",
+ "rxtx7", "spba";
+ dmas = <&sdma2 28 18 0>, <&sdma2 29 18 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
};
gpio1: gpio@30200000 {
@@ -670,80 +679,88 @@
ranges = <0x30800000 0x30800000 0x400000>,
<0x8000000 0x8000000 0x10000000>;
- ecspi1: spi@30820000 {
- compatible = "fsl,imx8mm-ecspi", "fsl,imx51-ecspi";
+ spba1: spba-bus@30800000 {
+ compatible = "fsl,spba-bus", "simple-bus";
#address-cells = <1>;
- #size-cells = <0>;
- reg = <0x30820000 0x10000>;
- interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_ECSPI1_ROOT>,
- <&clk IMX8MM_CLK_ECSPI1_ROOT>;
- clock-names = "ipg", "per";
- dmas = <&sdma1 0 7 1>, <&sdma1 1 7 2>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ #size-cells = <1>;
+ reg = <0x30800000 0x100000>;
+ ranges;
+
+ ecspi1: spi@30820000 {
+ compatible = "fsl,imx8mm-ecspi", "fsl,imx51-ecspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30820000 0x10000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_ECSPI1_ROOT>,
+ <&clk IMX8MM_CLK_ECSPI1_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 0 7 1>, <&sdma1 1 7 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- ecspi2: spi@30830000 {
- compatible = "fsl,imx8mm-ecspi", "fsl,imx51-ecspi";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x30830000 0x10000>;
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_ECSPI2_ROOT>,
- <&clk IMX8MM_CLK_ECSPI2_ROOT>;
- clock-names = "ipg", "per";
- dmas = <&sdma1 2 7 1>, <&sdma1 3 7 2>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ ecspi2: spi@30830000 {
+ compatible = "fsl,imx8mm-ecspi", "fsl,imx51-ecspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30830000 0x10000>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_ECSPI2_ROOT>,
+ <&clk IMX8MM_CLK_ECSPI2_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 2 7 1>, <&sdma1 3 7 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- ecspi3: spi@30840000 {
- compatible = "fsl,imx8mm-ecspi", "fsl,imx51-ecspi";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x30840000 0x10000>;
- interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_ECSPI3_ROOT>,
- <&clk IMX8MM_CLK_ECSPI3_ROOT>;
- clock-names = "ipg", "per";
- dmas = <&sdma1 4 7 1>, <&sdma1 5 7 2>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ ecspi3: spi@30840000 {
+ compatible = "fsl,imx8mm-ecspi", "fsl,imx51-ecspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30840000 0x10000>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_ECSPI3_ROOT>,
+ <&clk IMX8MM_CLK_ECSPI3_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 4 7 1>, <&sdma1 5 7 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- uart1: serial@30860000 {
- compatible = "fsl,imx8mm-uart", "fsl,imx6q-uart";
- reg = <0x30860000 0x10000>;
- interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_UART1_ROOT>,
- <&clk IMX8MM_CLK_UART1_ROOT>;
- clock-names = "ipg", "per";
- dmas = <&sdma1 22 4 0>, <&sdma1 23 4 0>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ uart1: serial@30860000 {
+ compatible = "fsl,imx8mm-uart", "fsl,imx6q-uart";
+ reg = <0x30860000 0x10000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_UART1_ROOT>,
+ <&clk IMX8MM_CLK_UART1_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 22 4 0>, <&sdma1 23 4 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- uart3: serial@30880000 {
- compatible = "fsl,imx8mm-uart", "fsl,imx6q-uart";
- reg = <0x30880000 0x10000>;
- interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_UART3_ROOT>,
- <&clk IMX8MM_CLK_UART3_ROOT>;
- clock-names = "ipg", "per";
- dmas = <&sdma1 26 4 0>, <&sdma1 27 4 0>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ uart3: serial@30880000 {
+ compatible = "fsl,imx8mm-uart", "fsl,imx6q-uart";
+ reg = <0x30880000 0x10000>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_UART3_ROOT>,
+ <&clk IMX8MM_CLK_UART3_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 26 4 0>, <&sdma1 27 4 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- uart2: serial@30890000 {
- compatible = "fsl,imx8mm-uart", "fsl,imx6q-uart";
- reg = <0x30890000 0x10000>;
- interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_UART2_ROOT>,
- <&clk IMX8MM_CLK_UART2_ROOT>;
- clock-names = "ipg", "per";
- status = "disabled";
+ uart2: serial@30890000 {
+ compatible = "fsl,imx8mm-uart", "fsl,imx6q-uart";
+ reg = <0x30890000 0x10000>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_UART2_ROOT>,
+ <&clk IMX8MM_CLK_UART2_ROOT>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
};
crypto: crypto@30900000 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
index c35eeaff958f..54eaf3d6055b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
@@ -120,6 +120,9 @@
interrupt-parent = <&gpio1>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
rohm,reset-snvs-powered;
+ #clock-cells = <0>;
+ clocks = <&osc_32k 0>;
+ clock-output-names = "clk-32k-out";
regulators {
buck1_reg: BUCK1 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
index a0dddba2e561..85e65f8719ea 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
@@ -193,6 +193,7 @@
srp-disable;
adp-disable;
usb-role-switch;
+ disable-over-current;
samsung,picophy-pre-emp-curr-control = <3>;
samsung,picophy-dc-vol-level-adjust = <7>;
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index 4dac4da38f4c..d4231e061403 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -245,6 +245,7 @@
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0x3e000000>;
+ dma-ranges = <0x40000000 0x0 0x40000000 0xc0000000>;
nvmem-cells = <&imx8mn_uid>;
nvmem-cell-names = "soc_unique_id";
@@ -255,7 +256,7 @@
#size-cells = <1>;
ranges;
- spba: spba-bus@30000000 {
+ spba2: spba-bus@30000000 {
compatible = "fsl,spba-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -681,80 +682,88 @@
#size-cells = <1>;
ranges;
- ecspi1: spi@30820000 {
- compatible = "fsl,imx8mn-ecspi", "fsl,imx51-ecspi";
+ spba1: spba-bus@30800000 {
+ compatible = "fsl,spba-bus", "simple-bus";
#address-cells = <1>;
- #size-cells = <0>;
- reg = <0x30820000 0x10000>;
- interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_ECSPI1_ROOT>,
- <&clk IMX8MN_CLK_ECSPI1_ROOT>;
- clock-names = "ipg", "per";
- dmas = <&sdma1 0 7 1>, <&sdma1 1 7 2>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ #size-cells = <1>;
+ reg = <0x30800000 0x100000>;
+ ranges;
- ecspi2: spi@30830000 {
- compatible = "fsl,imx8mn-ecspi", "fsl,imx51-ecspi";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x30830000 0x10000>;
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_ECSPI2_ROOT>,
- <&clk IMX8MN_CLK_ECSPI2_ROOT>;
- clock-names = "ipg", "per";
- dmas = <&sdma1 2 7 1>, <&sdma1 3 7 2>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ ecspi1: spi@30820000 {
+ compatible = "fsl,imx8mn-ecspi", "fsl,imx51-ecspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30820000 0x10000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MN_CLK_ECSPI1_ROOT>,
+ <&clk IMX8MN_CLK_ECSPI1_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 0 7 1>, <&sdma1 1 7 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- ecspi3: spi@30840000 {
- compatible = "fsl,imx8mn-ecspi", "fsl,imx51-ecspi";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x30840000 0x10000>;
- interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_ECSPI3_ROOT>,
- <&clk IMX8MN_CLK_ECSPI3_ROOT>;
- clock-names = "ipg", "per";
- dmas = <&sdma1 4 7 1>, <&sdma1 5 7 2>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ ecspi2: spi@30830000 {
+ compatible = "fsl,imx8mn-ecspi", "fsl,imx51-ecspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30830000 0x10000>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MN_CLK_ECSPI2_ROOT>,
+ <&clk IMX8MN_CLK_ECSPI2_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 2 7 1>, <&sdma1 3 7 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- uart1: serial@30860000 {
- compatible = "fsl,imx8mn-uart", "fsl,imx6q-uart";
- reg = <0x30860000 0x10000>;
- interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_UART1_ROOT>,
- <&clk IMX8MN_CLK_UART1_ROOT>;
- clock-names = "ipg", "per";
- dmas = <&sdma1 22 4 0>, <&sdma1 23 4 0>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ ecspi3: spi@30840000 {
+ compatible = "fsl,imx8mn-ecspi", "fsl,imx51-ecspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30840000 0x10000>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MN_CLK_ECSPI3_ROOT>,
+ <&clk IMX8MN_CLK_ECSPI3_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 4 7 1>, <&sdma1 5 7 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- uart3: serial@30880000 {
- compatible = "fsl,imx8mn-uart", "fsl,imx6q-uart";
- reg = <0x30880000 0x10000>;
- interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_UART3_ROOT>,
- <&clk IMX8MN_CLK_UART3_ROOT>;
- clock-names = "ipg", "per";
- dmas = <&sdma1 26 4 0>, <&sdma1 27 4 0>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
+ uart1: serial@30860000 {
+ compatible = "fsl,imx8mn-uart", "fsl,imx6q-uart";
+ reg = <0x30860000 0x10000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MN_CLK_UART1_ROOT>,
+ <&clk IMX8MN_CLK_UART1_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 22 4 0>, <&sdma1 23 4 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
- uart2: serial@30890000 {
- compatible = "fsl,imx8mn-uart", "fsl,imx6q-uart";
- reg = <0x30890000 0x10000>;
- interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_UART2_ROOT>,
- <&clk IMX8MN_CLK_UART2_ROOT>;
- clock-names = "ipg", "per";
- status = "disabled";
+ uart3: serial@30880000 {
+ compatible = "fsl,imx8mn-uart", "fsl,imx6q-uart";
+ reg = <0x30880000 0x10000>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MN_CLK_UART3_ROOT>,
+ <&clk IMX8MN_CLK_UART3_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 26 4 0>, <&sdma1 27 4 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ uart2: serial@30890000 {
+ compatible = "fsl,imx8mn-uart", "fsl,imx6q-uart";
+ reg = <0x30890000 0x10000>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MN_CLK_UART2_ROOT>,
+ <&clk IMX8MN_CLK_UART2_ROOT>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
};
crypto: crypto@30900000 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
index 2c28e589677e..7b99fad6e4d6 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -81,6 +81,26 @@
status = "disabled";/* can2 pin conflict with pdm */
};
+&eqos {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ eee-broken-1000t;
+ };
+ };
+};
+
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec>;
@@ -104,6 +124,92 @@
};
};
+&i2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pmic@25 {
+ compatible = "nxp,pca9450c";
+ reg = <0x25>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ BUCK1 {
+ regulator-name = "BUCK1";
+ regulator-min-microvolt = <720000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ BUCK2 {
+ regulator-name = "BUCK2";
+ regulator-min-microvolt = <720000>;
+ regulator-max-microvolt = <1025000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ nxp,dvs-run-voltage = <950000>;
+ nxp,dvs-standby-voltage = <850000>;
+ };
+
+ BUCK4 {
+ regulator-name = "BUCK4";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3600000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ BUCK5 {
+ regulator-name = "BUCK5";
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <1950000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ BUCK6 {
+ regulator-name = "BUCK6";
+ regulator-min-microvolt = <1045000>;
+ regulator-max-microvolt = <1155000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO1 {
+ regulator-name = "LDO1";
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <1950000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO3 {
+ regulator-name = "LDO3";
+ regulator-min-microvolt = <1710000>;
+ regulator-max-microvolt = <1890000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ LDO5 {
+ regulator-name = "LDO5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
&i2c3 {
clock-frequency = <400000>;
pinctrl-names = "default";
@@ -177,6 +283,26 @@
};
&iomuxc {
+ pinctrl_eqos: eqosgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x3
+ MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x3
+ MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x91
+ MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x91
+ MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x91
+ MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x91
+ MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x91
+ MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x91
+ MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x1f
+ MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x1f
+ MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x1f
+ MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x1f
+ MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x1f
+ MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x1f
+ MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22 0x19
+ >;
+ };
+
pinctrl_fec: fecgrp {
fsl,pins = <
MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC 0x3
@@ -229,6 +355,13 @@
>;
};
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA 0x400001c3
+ >;
+ };
+
pinctrl_i2c3: i2c3grp {
fsl,pins = <
MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c3
@@ -236,6 +369,12 @@
>;
};
+ pinctrl_pmic: pmicgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO03__GPIO1_IO03 0x000001c0
+ >;
+ };
+
pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
fsl,pins = <
MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x41
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
index f3965ec5b31d..aa78e0d8c72b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
@@ -65,6 +65,20 @@
};
};
+&flexspi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexspi0>;
+ status = "okay";
+
+ som_flash: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <80000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ };
+};
+
&i2c1 {
clock-frequency = <400000>;
pinctrl-names = "default", "gpio";
@@ -217,6 +231,17 @@
>;
};
+ pinctrl_flexspi0: flexspi0grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_ALE__FLEXSPI_A_SCLK 0x1c2
+ MX8MP_IOMUXC_NAND_CE0_B__FLEXSPI_A_SS0_B 0x82
+ MX8MP_IOMUXC_NAND_DATA00__FLEXSPI_A_DATA00 0x82
+ MX8MP_IOMUXC_NAND_DATA01__FLEXSPI_A_DATA01 0x82
+ MX8MP_IOMUXC_NAND_DATA02__FLEXSPI_A_DATA02 0x82
+ MX8MP_IOMUXC_NAND_DATA03__FLEXSPI_A_DATA03 0x82
+ >;
+ };
+
pinctrl_i2c1: i2c1grp {
fsl,pins = <
MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c3
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index c2d51a46cb3c..f4eaab3ecf03 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -37,6 +37,7 @@
serial1 = &uart2;
serial2 = &uart3;
serial3 = &uart4;
+ spi0 = &flexspi;
};
cpus {
@@ -407,7 +408,6 @@
<&clk IMX8MP_CLK_GIC>,
<&clk IMX8MP_CLK_AUDIO_AHB>,
<&clk IMX8MP_CLK_AUDIO_AXI_SRC>,
- <&clk IMX8MP_CLK_IPG_AUDIO_ROOT>,
<&clk IMX8MP_AUDIO_PLL1>,
<&clk IMX8MP_AUDIO_PLL2>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
@@ -423,7 +423,6 @@
<500000000>,
<400000000>,
<800000000>,
- <400000000>,
<393216000>,
<361267200>;
};
@@ -580,7 +579,7 @@
};
flexcan1: can@308c0000 {
- compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan";
+ compatible = "fsl,imx8mp-flexcan";
reg = <0x308c0000 0x10000>;
interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MP_CLK_IPG_ROOT>,
@@ -595,7 +594,7 @@
};
flexcan2: can@308d0000 {
- compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan";
+ compatible = "fsl,imx8mp-flexcan";
reg = <0x308d0000 0x10000>;
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MP_CLK_IPG_ROOT>,
@@ -761,6 +760,21 @@
status = "disabled";
};
+ flexspi: spi@30bb0000 {
+ compatible = "nxp,imx8mp-fspi";
+ reg = <0x30bb0000 0x10000>, <0x8000000 0x10000000>;
+ reg-names = "fspi_base", "fspi_mmap";
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_QSPI_ROOT>,
+ <&clk IMX8MP_CLK_QSPI_ROOT>;
+ clock-names = "fspi", "fspi_en";
+ assigned-clock-rates = <80000000>;
+ assigned-clocks = <&clk IMX8MP_CLK_QSPI>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
sdma1: dma-controller@30bd0000 {
compatible = "fsl,imx8mp-sdma", "fsl,imx8mq-sdma";
reg = <0x30bd0000 0x10000>;
@@ -807,9 +821,9 @@
eqos: ethernet@30bf0000 {
compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a";
reg = <0x30bf0000 0x10000>;
- interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "eth_wake_irq", "macirq";
+ interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_wake_irq";
clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
<&clk IMX8MP_CLK_QOS_ENET_ROOT>,
<&clk IMX8MP_CLK_ENET_QOS_TIMER>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
index 85b045253a0e..4d2035e3dd7c 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
@@ -318,6 +318,7 @@
<&clk IMX8MQ_CLK_PCIE1_PHY>,
<&pcie0_refclk>;
clock-names = "pcie", "pcie_aux", "pcie_phy", "pcie_bus";
+ vph-supply = <&vgen5_reg>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts b/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts
index 81d269296610..f70fb32b96b0 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts
@@ -34,6 +34,30 @@
};
};
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ ddc-i2c-bus = <&ddc_i2c_bus>;
+ label = "hdmi";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&lt8912_out>;
+ };
+ };
+ };
+
+ reg_usb_otg_vbus: regulator-usb-otg-vbus {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usbotg_vbus>;
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
reg_vref_0v9: regulator-vref-0v9 {
compatible = "regulator-fixed";
regulator-name = "vref-0v9";
@@ -70,6 +94,9 @@
};
};
+&dphy {
+ status = "okay";
+};
&fec1 {
pinctrl-names = "default";
@@ -91,6 +118,15 @@
};
};
+/* Release reset of the USB Host HUB */
+&gpio1 {
+ usb-host-reset-hog {
+ gpio-hog;
+ gpios = <14 GPIO_ACTIVE_HIGH>;
+ output-high;
+ };
+};
+
&i2c1 {
clock-frequency = <400000>;
pinctrl-names = "default";
@@ -174,6 +210,98 @@
};
};
+&i2c4 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ status = "okay";
+
+ pca9546: i2cmux@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c4@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+
+ hdmi-bridge@48 {
+ compatible = "lontium,lt8912b";
+ reg = <0x48> ;
+ reset-gpios = <&max7323 0 GPIO_ACTIVE_LOW>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ hdmi_out_in: endpoint {
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&mipi_dsi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ lt8912_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+ };
+ };
+
+ ddc_i2c_bus: i2c4@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+ };
+
+ i2c4@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+
+ max7323: gpio-expander@68 {
+ compatible = "maxim,max7323";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_max7323>;
+ gpio-controller;
+ reg = <0x68>;
+ #gpio-cells = <2>;
+ };
+ };
+ };
+};
+
+&lcdif {
+ status = "okay";
+};
+
+&mipi_dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ ports {
+ port@1 {
+ reg = <1>;
+
+ mipi_dsi_out: endpoint {
+ remote-endpoint = <&hdmi_out_in>;
+ };
+ };
+ };
+};
+
&uart1 { /* console */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
@@ -190,6 +318,29 @@
status = "okay";
};
+&usb_dwc3_0 {
+ dr_mode = "otg";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb3_0>;
+ status = "okay";
+};
+
+&usb3_phy0 {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ status = "okay";
+};
+
+&usb_dwc3_1 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb3_phy1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb3_1>;
+ status = "okay";
+};
+
&usdhc1 {
assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>;
assigned-clock-rates = <400000000>;
@@ -321,6 +472,19 @@
>;
};
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_I2C4_SCL_I2C4_SCL 0x4000007f
+ MX8MQ_IOMUXC_I2C4_SDA_I2C4_SDA 0x4000007f
+ >;
+ };
+
+ pinctrl_max7323: max7323grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_NAND_RE_B_GPIO3_IO15 0x19
+ >;
+ };
+
pinctrl_reg_arm_dram: reg-arm-dramgrp {
fsl,pins = <
MX8MQ_IOMUXC_SAI5_RXD3_GPIO3_IO24 0x16
@@ -339,6 +503,12 @@
>;
};
+ pinctrl_reg_usbotg_vbus: reg-usbotg-vbusgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x16
+ >;
+ };
+
pinctrl_uart1: uart1grp {
fsl,pins = <
MX8MQ_IOMUXC_UART1_RXD_UART1_DCE_RX 0x45
@@ -353,6 +523,18 @@
>;
};
+ pinctrl_usb3_0: usb3-0grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x16
+ >;
+ };
+
+ pinctrl_usb3_1: usb3-1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_GPIO1_IO14_GPIO1_IO14 0x16
+ >;
+ };
+
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x83
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 17c449e12c2e..91df9c5350ae 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -1383,6 +1383,14 @@
<&src IMX8MQ_RESET_PCIE_CTRL_APPS_EN>,
<&src IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF>;
reset-names = "pciephy", "apps", "turnoff";
+ assigned-clocks = <&clk IMX8MQ_CLK_PCIE1_CTRL>,
+ <&clk IMX8MQ_CLK_PCIE1_PHY>,
+ <&clk IMX8MQ_CLK_PCIE1_AUX>;
+ assigned-clock-parents = <&clk IMX8MQ_SYS2_PLL_250M>,
+ <&clk IMX8MQ_SYS2_PLL_100M>,
+ <&clk IMX8MQ_SYS1_PLL_80M>;
+ assigned-clock-rates = <250000000>, <100000000>,
+ <10000000>;
status = "disabled";
};
@@ -1413,6 +1421,14 @@
<&src IMX8MQ_RESET_PCIE2_CTRL_APPS_EN>,
<&src IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF>;
reset-names = "pciephy", "apps", "turnoff";
+ assigned-clocks = <&clk IMX8MQ_CLK_PCIE2_CTRL>,
+ <&clk IMX8MQ_CLK_PCIE2_PHY>,
+ <&clk IMX8MQ_CLK_PCIE2_AUX>;
+ assigned-clock-parents = <&clk IMX8MQ_SYS2_PLL_250M>,
+ <&clk IMX8MQ_SYS2_PLL_100M>,
+ <&clk IMX8MQ_SYS1_PLL_80M>;
+ assigned-clock-rates = <250000000>, <100000000>,
+ <10000000>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-coresight.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660-coresight.dtsi
index d607f2f6698c..79a55a0fa2f1 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660-coresight.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-coresight.dtsi
@@ -3,7 +3,7 @@
/*
* dtsi for Hisilicon Hi3660 Coresight
*
- * Copyright (C) 2016-2018 Hisilicon Ltd.
+ * Copyright (C) 2016-2018 HiSilicon Ltd.
*
* Author: Wanglai Shi <shiwanglai@hisilicon.com>
*
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
index 963300eede17..f68580dc87d8 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
@@ -2,7 +2,7 @@
/*
* dts file for Hisilicon HiKey960 Development Board
*
- * Copyright (C) 2016, Hisilicon Ltd.
+ * Copyright (C) 2016, HiSilicon Ltd.
*
*/
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
index cab89dc6f596..f1ec87c05842 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
@@ -2,7 +2,7 @@
/*
* dts file for Hisilicon Hi3660 SoC
*
- * Copyright (C) 2016, Hisilicon Ltd.
+ * Copyright (C) 2016, HiSilicon Ltd.
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
diff --git a/arch/arm64/boot/dts/hisilicon/hi3670-hikey970.dts b/arch/arm64/boot/dts/hisilicon/hi3670-hikey970.dts
index 7f9f9886c349..d8abf442ee7e 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3670-hikey970.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi3670-hikey970.dts
@@ -2,7 +2,7 @@
/*
* dts file for Hisilicon HiKey970 Development Board
*
- * Copyright (C) 2016, Hisilicon Ltd.
+ * Copyright (C) 2016, HiSilicon Ltd.
* Copyright (C) 2018, Linaro Ltd.
*
*/
diff --git a/arch/arm64/boot/dts/hisilicon/hi3670.dtsi b/arch/arm64/boot/dts/hisilicon/hi3670.dtsi
index 8830795c8efc..20698cfd0637 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3670.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3670.dtsi
@@ -2,7 +2,7 @@
/*
* dts file for Hisilicon Hi3670 SoC
*
- * Copyright (C) 2016, Hisilicon Ltd.
+ * Copyright (C) 2016, HiSilicon Ltd.
* Copyright (C) 2018, Linaro Ltd.
*/
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-coresight.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220-coresight.dtsi
index 7b3010f448c5..3f387f4cf5e0 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-coresight.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-coresight.dtsi
@@ -2,7 +2,7 @@
/*
* dtsi file for Hisilicon Hi6220 coresight
*
- * Copyright (C) 2017 Hisilicon Ltd.
+ * Copyright (C) 2017 HiSilicon Ltd.
*
* Author: Pengcheng Li <lipengcheng8@huawei.com>
* Leo Yan <leo.yan@linaro.org>
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 91d08673c02e..3df2afb2f637 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -2,7 +2,7 @@
/*
* dts file for Hisilicon HiKey Development Board
*
- * Copyright (C) 2015, Hisilicon Ltd.
+ * Copyright (C) 2015, HiSilicon Ltd.
*
*/
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index d426c6c8722b..dde9371dc545 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -2,7 +2,7 @@
/*
* dts file for Hisilicon Hi6220 SoC
*
- * Copyright (C) 2015, Hisilicon Ltd.
+ * Copyright (C) 2015, HiSilicon Ltd.
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
diff --git a/arch/arm64/boot/dts/hisilicon/hip05-d02.dts b/arch/arm64/boot/dts/hisilicon/hip05-d02.dts
index 369b69b17b91..40f3e00ac832 100644
--- a/arch/arm64/boot/dts/hisilicon/hip05-d02.dts
+++ b/arch/arm64/boot/dts/hisilicon/hip05-d02.dts
@@ -2,7 +2,7 @@
/**
* dts file for Hisilicon D02 Development Board
*
- * Copyright (C) 2014,2015 Hisilicon Ltd.
+ * Copyright (C) 2014,2015 HiSilicon Ltd.
*/
/dts-v1/;
diff --git a/arch/arm64/boot/dts/hisilicon/hip05.dtsi b/arch/arm64/boot/dts/hisilicon/hip05.dtsi
index 4aed8d440b3a..7b2abd10d3d6 100644
--- a/arch/arm64/boot/dts/hisilicon/hip05.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip05.dtsi
@@ -2,7 +2,7 @@
/**
* dts file for Hisilicon D02 Development Board
*
- * Copyright (C) 2014,2015 Hisilicon Ltd.
+ * Copyright (C) 2014,2015 HiSilicon Ltd.
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
diff --git a/arch/arm64/boot/dts/hisilicon/hip06-d03.dts b/arch/arm64/boot/dts/hisilicon/hip06-d03.dts
index 9f4a930e734d..35af5d3821e8 100644
--- a/arch/arm64/boot/dts/hisilicon/hip06-d03.dts
+++ b/arch/arm64/boot/dts/hisilicon/hip06-d03.dts
@@ -2,7 +2,7 @@
/**
* dts file for Hisilicon D03 Development Board
*
- * Copyright (C) 2016 Hisilicon Ltd.
+ * Copyright (C) 2016 HiSilicon Ltd.
*/
/dts-v1/;
diff --git a/arch/arm64/boot/dts/hisilicon/hip06.dtsi b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
index 7deca5f763d5..70d7732dd348 100644
--- a/arch/arm64/boot/dts/hisilicon/hip06.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
@@ -2,7 +2,7 @@
/**
* dts file for Hisilicon D03 Development Board
*
- * Copyright (C) 2016 Hisilicon Ltd.
+ * Copyright (C) 2016 HiSilicon Ltd.
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
diff --git a/arch/arm64/boot/dts/hisilicon/hip07-d05.dts b/arch/arm64/boot/dts/hisilicon/hip07-d05.dts
index 81a2312c8a26..c3df67845f03 100644
--- a/arch/arm64/boot/dts/hisilicon/hip07-d05.dts
+++ b/arch/arm64/boot/dts/hisilicon/hip07-d05.dts
@@ -2,7 +2,7 @@
/**
* dts file for Hisilicon D05 Development Board
*
- * Copyright (C) 2016 Hisilicon Ltd.
+ * Copyright (C) 2016 HiSilicon Ltd.
*/
/dts-v1/;
diff --git a/arch/arm64/boot/dts/hisilicon/hip07.dtsi b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
index 2172d8071181..6baf6a686450 100644
--- a/arch/arm64/boot/dts/hisilicon/hip07.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
@@ -2,7 +2,7 @@
/**
* dts file for Hisilicon D05 Development Board
*
- * Copyright (C) 2016 Hisilicon Ltd.
+ * Copyright (C) 2016 HiSilicon Ltd.
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
index 53e817c5f6f3..a05b1ab2dd12 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
@@ -19,6 +19,8 @@
aliases {
spi0 = &spi0;
ethernet1 = &eth1;
+ mmc0 = &sdhci0;
+ mmc1 = &sdhci1;
};
chosen {
@@ -109,10 +111,8 @@
};
firmware {
- turris-mox-rwtm {
- compatible = "cznic,turris-mox-rwtm";
- mboxes = <&rwtm 0>;
- status = "okay";
+ armada-3700-rwtm {
+ compatible = "marvell,armada-3700-rwtm-firmware", "cznic,turris-mox-rwtm";
};
};
};
@@ -121,6 +121,7 @@
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
clock-frequency = <100000>;
+ /delete-property/ mrvl,i2c-fast-mode;
status = "okay";
rtc@6f {
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 456dcd4a7793..5db81a416cd6 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -134,7 +134,7 @@
uart0: serial@12000 {
compatible = "marvell,armada-3700-uart";
- reg = <0x12000 0x200>;
+ reg = <0x12000 0x18>;
clocks = <&xtalclk>;
interrupts =
<GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
@@ -504,4 +504,12 @@
};
};
};
+
+ firmware {
+ armada-3700-rwtm {
+ compatible = "marvell,armada-3700-rwtm-firmware";
+ mboxes = <&rwtm 0>;
+ status = "okay";
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap807.dtsi b/arch/arm64/boot/dts/marvell/armada-ap807.dtsi
index d9bbbfa4b4eb..4a23f65d475f 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap807.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap807.dtsi
@@ -29,6 +29,7 @@
};
&ap_sdhci0 {
- compatible = "marvell,armada-ap807-sdhci";
+ compatible = "marvell,armada-ap807-sdhci",
+ "marvell,armada-ap806-sdhci"; /* Backward compatibility */
};
diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dts b/arch/arm64/boot/dts/marvell/cn9130-db.dts
index 2c2af001619b..9758609541c7 100644
--- a/arch/arm64/boot/dts/marvell/cn9130-db.dts
+++ b/arch/arm64/boot/dts/marvell/cn9130-db.dts
@@ -260,7 +260,7 @@
};
partition@200000 {
label = "Linux";
- reg = <0x200000 0xd00000>;
+ reg = <0x200000 0xe00000>;
};
partition@1000000 {
label = "Filesystem";
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index a1c50adc98fa..4f68ebed2e31 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -13,8 +13,16 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm-hana.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm-hana-rev7.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-evb.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-burnet.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-damu.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-fennel-sku1.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-fennel-sku6.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-fennel14.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-juniper-sku16.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-kappa.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-kenzo.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-willow-sku0.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-willow-sku1.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kakadu.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kodama-sku16.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kodama-sku272.dtb
diff --git a/arch/arm64/boot/dts/mediatek/mt8167.dtsi b/arch/arm64/boot/dts/mediatek/mt8167.dtsi
index 1c5639ead622..9029051624a6 100644
--- a/arch/arm64/boot/dts/mediatek/mt8167.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8167.dtsi
@@ -7,6 +7,7 @@
#include <dt-bindings/clock/mt8167-clk.h>
#include <dt-bindings/memory/mt8167-larb-port.h>
+#include <dt-bindings/power/mt8167-power.h>
#include "mt8167-pinfunc.h"
@@ -34,6 +35,73 @@
#clock-cells = <1>;
};
+ scpsys: syscon@10006000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0 0x10006000 0 0x1000>;
+ #power-domain-cells = <1>;
+
+ spm: power-controller {
+ compatible = "mediatek,mt8167-power-controller";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+
+ /* power domains of the SoC */
+ power-domain@MT8167_POWER_DOMAIN_MM {
+ reg = <MT8167_POWER_DOMAIN_MM>;
+ clocks = <&topckgen CLK_TOP_SMI_MM>;
+ clock-names = "mm";
+ #power-domain-cells = <0>;
+ mediatek,infracfg = <&infracfg>;
+ };
+
+ power-domain@MT8167_POWER_DOMAIN_VDEC {
+ reg = <MT8167_POWER_DOMAIN_VDEC>;
+ clocks = <&topckgen CLK_TOP_SMI_MM>,
+ <&topckgen CLK_TOP_RG_VDEC>;
+ clock-names = "mm", "vdec";
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8167_POWER_DOMAIN_ISP {
+ reg = <MT8167_POWER_DOMAIN_ISP>;
+ clocks = <&topckgen CLK_TOP_SMI_MM>;
+ clock-names = "mm";
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8167_POWER_DOMAIN_MFG_ASYNC {
+ reg = <MT8167_POWER_DOMAIN_MFG_ASYNC>;
+ clocks = <&topckgen CLK_TOP_RG_AXI_MFG>,
+ <&topckgen CLK_TOP_RG_SLOW_MFG>;
+ clock-names = "axi_mfg", "mfg";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+ mediatek,infracfg = <&infracfg>;
+
+ power-domain@MT8167_POWER_DOMAIN_MFG_2D {
+ reg = <MT8167_POWER_DOMAIN_MFG_2D>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+
+ power-domain@MT8167_POWER_DOMAIN_MFG {
+ reg = <MT8167_POWER_DOMAIN_MFG>;
+ #power-domain-cells = <0>;
+ mediatek,infracfg = <&infracfg>;
+ };
+ };
+ };
+
+ power-domain@MT8167_POWER_DOMAIN_CONN {
+ reg = <MT8167_POWER_DOMAIN_CONN>;
+ #power-domain-cells = <0>;
+ mediatek,infracfg = <&infracfg>;
+ };
+ };
+ };
+
imgsys: syscon@15000000 {
compatible = "mediatek,mt8167-imgsys", "syscon";
reg = <0 0x15000000 0 0x1000>;
@@ -57,5 +125,58 @@
#interrupt-cells = <2>;
interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
};
+
+ mmsys: mmsys@14000000 {
+ compatible = "mediatek,mt8167-mmsys", "syscon";
+ reg = <0 0x14000000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ smi_common: smi@14017000 {
+ compatible = "mediatek,mt8167-smi-common";
+ reg = <0 0x14017000 0 0x1000>;
+ clocks = <&mmsys CLK_MM_SMI_COMMON>,
+ <&mmsys CLK_MM_SMI_COMMON>;
+ clock-names = "apb", "smi";
+ power-domains = <&spm MT8167_POWER_DOMAIN_MM>;
+ };
+
+ larb0: larb@14016000 {
+ compatible = "mediatek,mt8167-smi-larb";
+ reg = <0 0x14016000 0 0x1000>;
+ mediatek,smi = <&smi_common>;
+ clocks = <&mmsys CLK_MM_SMI_LARB0>,
+ <&mmsys CLK_MM_SMI_LARB0>;
+ clock-names = "apb", "smi";
+ power-domains = <&spm MT8167_POWER_DOMAIN_MM>;
+ };
+
+ larb1: larb@15001000 {
+ compatible = "mediatek,mt8167-smi-larb";
+ reg = <0 0x15001000 0 0x1000>;
+ mediatek,smi = <&smi_common>;
+ clocks = <&imgsys CLK_IMG_LARB1_SMI>,
+ <&imgsys CLK_IMG_LARB1_SMI>;
+ clock-names = "apb", "smi";
+ power-domains = <&spm MT8167_POWER_DOMAIN_ISP>;
+ };
+
+ larb2: larb@16010000 {
+ compatible = "mediatek,mt8167-smi-larb";
+ reg = <0 0x16010000 0 0x1000>;
+ mediatek,smi = <&smi_common>;
+ clocks = <&vdecsys CLK_VDEC_CKEN>,
+ <&vdecsys CLK_VDEC_LARB1_CKEN>;
+ clock-names = "apb", "smi";
+ power-domains = <&spm MT8167_POWER_DOMAIN_VDEC>;
+ };
+
+ iommu: m4u@10203000 {
+ compatible = "mediatek,mt8167-m4u";
+ reg = <0 0x10203000 0 0x1000>;
+ mediatek,larbs = <&larb0 &larb1 &larb2>;
+ interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_LOW>;
+ #iommu-cells = <1>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 003a5653c505..22f271b1f5b0 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -1459,14 +1459,11 @@
clock-names = "apb", "smi";
};
- vcodec_enc: vcodec@18002000 {
+ vcodec_enc_avc: vcodec@18002000 {
compatible = "mediatek,mt8173-vcodec-enc";
- reg = <0 0x18002000 0 0x1000>, /* VENC_SYS */
- <0 0x19002000 0 0x1000>; /* VENC_LT_SYS */
- interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>,
- <GIC_SPI 202 IRQ_TYPE_LEVEL_LOW>;
- mediatek,larb = <&larb3>,
- <&larb5>;
+ reg = <0 0x18002000 0 0x1000>; /* VENC_SYS */
+ interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
+ mediatek,larb = <&larb3>;
iommus = <&iommu M4U_PORT_VENC_RCPU>,
<&iommu M4U_PORT_VENC_REC>,
<&iommu M4U_PORT_VENC_BSDMA>,
@@ -1477,29 +1474,12 @@
<&iommu M4U_PORT_VENC_REF_LUMA>,
<&iommu M4U_PORT_VENC_REF_CHROMA>,
<&iommu M4U_PORT_VENC_NBM_RDMA>,
- <&iommu M4U_PORT_VENC_NBM_WDMA>,
- <&iommu M4U_PORT_VENC_RCPU_SET2>,
- <&iommu M4U_PORT_VENC_REC_FRM_SET2>,
- <&iommu M4U_PORT_VENC_BSDMA_SET2>,
- <&iommu M4U_PORT_VENC_SV_COMA_SET2>,
- <&iommu M4U_PORT_VENC_RD_COMA_SET2>,
- <&iommu M4U_PORT_VENC_CUR_LUMA_SET2>,
- <&iommu M4U_PORT_VENC_CUR_CHROMA_SET2>,
- <&iommu M4U_PORT_VENC_REF_LUMA_SET2>,
- <&iommu M4U_PORT_VENC_REC_CHROMA_SET2>;
+ <&iommu M4U_PORT_VENC_NBM_WDMA>;
mediatek,vpu = <&vpu>;
- clocks = <&topckgen CLK_TOP_VENCPLL_D2>,
- <&topckgen CLK_TOP_VENC_SEL>,
- <&topckgen CLK_TOP_UNIVPLL1_D2>,
- <&topckgen CLK_TOP_VENC_LT_SEL>;
- clock-names = "venc_sel_src",
- "venc_sel",
- "venc_lt_sel_src",
- "venc_lt_sel";
- assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>,
- <&topckgen CLK_TOP_VENC_LT_SEL>;
- assigned-clock-parents = <&topckgen CLK_TOP_VCODECPLL>,
- <&topckgen CLK_TOP_VCODECPLL_370P5>;
+ clocks = <&topckgen CLK_TOP_VENC_SEL>;
+ clock-names = "venc_sel";
+ assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_VCODECPLL>;
};
jpegdec: jpegdec@18004000 {
@@ -1531,5 +1511,27 @@
<&vencltsys CLK_VENCLT_CKE0>;
clock-names = "apb", "smi";
};
+
+ vcodec_enc_vp8: vcodec@19002000 {
+ compatible = "mediatek,mt8173-vcodec-enc-vp8";
+ reg = <0 0x19002000 0 0x1000>; /* VENC_LT_SYS */
+ interrupts = <GIC_SPI 202 IRQ_TYPE_LEVEL_LOW>;
+ iommus = <&iommu M4U_PORT_VENC_RCPU_SET2>,
+ <&iommu M4U_PORT_VENC_REC_FRM_SET2>,
+ <&iommu M4U_PORT_VENC_BSDMA_SET2>,
+ <&iommu M4U_PORT_VENC_SV_COMA_SET2>,
+ <&iommu M4U_PORT_VENC_RD_COMA_SET2>,
+ <&iommu M4U_PORT_VENC_CUR_LUMA_SET2>,
+ <&iommu M4U_PORT_VENC_CUR_CHROMA_SET2>,
+ <&iommu M4U_PORT_VENC_REF_LUMA_SET2>,
+ <&iommu M4U_PORT_VENC_REC_CHROMA_SET2>;
+ mediatek,larb = <&larb5>;
+ mediatek,vpu = <&vpu>;
+ clocks = <&topckgen CLK_TOP_VENC_LT_SEL>;
+ clock-names = "venc_lt_sel";
+ assigned-clocks = <&topckgen CLK_TOP_VENC_LT_SEL>;
+ assigned-clock-parents =
+ <&topckgen CLK_TOP_VCODECPLL_370P5>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
index edff1e03e6fe..7bc0a6a7fadf 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
@@ -42,6 +42,11 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&mt6358_vgpu_reg>;
+ sram-supply = <&mt6358_vsram_gpu_reg>;
+};
+
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c_pins_0>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-burnet.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-burnet.dts
new file mode 100644
index 000000000000..a8d6f32ade8d
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-burnet.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi.dtsi"
+
+/ {
+ model = "Google burnet board";
+ compatible = "google,burnet", "mediatek,mt8183";
+};
+
+&mt6358codec {
+ mediatek,dmic-mode = <1>; /* one-wire */
+};
+
+&i2c0 {
+ touchscreen@2c {
+ compatible = "hid-over-i2c";
+ reg = <0x2c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+ interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>;
+
+ post-power-on-delay-ms = <200>;
+ hid-descr-addr = <0x0020>;
+ };
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts
new file mode 100644
index 000000000000..ef6257c9a2d2
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi-fennel.dtsi"
+
+/ {
+ model = "Google fennel sku1 board";
+ compatible = "google,fennel-sku1", "google,fennel", "mediatek,mt8183";
+
+ pwmleds {
+ compatible = "pwm-leds";
+ keyboard_backlight: keyboard-backlight {
+ label = "cros_ec::kbd_backlight";
+ pwms = <&cros_ec_pwm 0>;
+ max-brightness = <1023>;
+ };
+ };
+};
+
+&cros_ec_pwm {
+ status = "okay";
+};
+
+&touchscreen {
+ status = "okay";
+
+ compatible = "hid-over-i2c";
+ reg = <0x10>;
+ interrupt-parent = <&pio>;
+ interrupts = <155 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+
+ post-power-on-delay-ms = <10>;
+ hid-descr-addr = <0x0001>;
+};
+
+&qca_wifi {
+ qcom,ath10k-calibration-variant = "GO_FENNEL";
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts
new file mode 100644
index 000000000000..899c2e42385c
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi-fennel.dtsi"
+
+/ {
+ model = "Google fennel sku6 board";
+ compatible = "google,fennel-sku6", "google,fennel", "mediatek,mt8183";
+};
+
+&touchscreen {
+ status = "okay";
+
+ compatible = "hid-over-i2c";
+ reg = <0x10>;
+ interrupt-parent = <&pio>;
+ interrupts = <155 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+
+ post-power-on-delay-ms = <10>;
+ hid-descr-addr = <0x0001>;
+};
+
+
+&qca_wifi {
+ qcom,ath10k-calibration-variant = "GO_FENNEL";
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel.dtsi
new file mode 100644
index 000000000000..bbe6c338f465
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel.dtsi
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi.dtsi"
+
+&mt6358codec {
+ mediatek,dmic-mode = <1>; /* one-wire */
+};
+
+&i2c2 {
+ trackpad@2c {
+ compatible = "hid-over-i2c";
+ reg = <0x2c>;
+ hid-descr-addr = <0x20>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&trackpad_pins>;
+
+ interrupts-extended = <&pio 7 IRQ_TYPE_LEVEL_LOW>;
+
+ wakeup-source;
+ };
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14.dts
new file mode 100644
index 000000000000..e8c41f6b4b0d
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel14.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi-fennel.dtsi"
+
+/ {
+ model = "Google fennel14 sku0 board";
+ compatible = "google,fennel-sku0", "google,fennel", "mediatek,mt8183";
+};
+
+&qca_wifi {
+ qcom,ath10k-calibration-variant = "GO_FENNEL14";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kappa.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kappa.dts
new file mode 100644
index 000000000000..b3f46c16e5d7
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kappa.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi.dtsi"
+
+/ {
+ model = "Google kappa board";
+ compatible = "google,kappa", "mediatek,mt8183";
+};
+
+&mt6358codec {
+ mediatek,dmic-mode = <1>; /* one-wire */
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
new file mode 100644
index 000000000000..6f1aa692753a
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi-juniper.dtsi"
+
+/ {
+ model = "Google kenzo sku17 board";
+ compatible = "google,juniper-sku17", "google,juniper", "mediatek,mt8183";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku0.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku0.dts
new file mode 100644
index 000000000000..281265f082db
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku0.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi-willow.dtsi"
+
+/ {
+ model = "Google willow board sku0";
+ compatible = "google,willow-sku0", "google,willow", "mediatek,mt8183";
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku1.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku1.dts
new file mode 100644
index 000000000000..22e56bdc1ee3
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow-sku1.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi-willow.dtsi"
+
+/ {
+ model = "Google willow board sku1";
+ compatible = "google,willow-sku1", "google,willow", "mediatek,mt8183";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi
new file mode 100644
index 000000000000..76d33540166f
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi.dtsi"
+
+&i2c2 {
+ trackpad@2c {
+ compatible = "hid-over-i2c";
+ reg = <0x2c>;
+ hid-descr-addr = <0x20>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&trackpad_pins>;
+
+ interrupts-extended = <&pio 7 IRQ_TYPE_LEVEL_LOW>;
+
+ wakeup-source;
+ };
+};
+
+&qca_wifi {
+ qcom,ath10k-calibration-variant = "GO_JUNIPER";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
index 4049dff8464b..d8826c82bcda 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
@@ -92,6 +92,14 @@
};
};
+&cros_ec {
+ cros_ec_pwm: ec-pwm {
+ compatible = "google,cros-ec-pwm";
+ #pwm-cells = <1>;
+ status = "disabled";
+ };
+};
+
&dsi0 {
status = "okay";
/delete-node/panel@0;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
index b442e38a3156..28966a65391b 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
@@ -88,11 +88,13 @@
pinctrl-0 = <&i2c2_pins>;
status = "okay";
clock-frequency = <400000>;
+ vbus-supply = <&mt6358_vcamio_reg>;
eeprom@58 {
compatible = "atmel,24c32";
reg = <0x58>;
pagesize = <32>;
+ vcc-supply = <&mt6358_vcama2_reg>;
};
};
@@ -101,11 +103,13 @@
pinctrl-0 = <&i2c4_pins>;
status = "okay";
clock-frequency = <400000>;
+ vbus-supply = <&mt6358_vcn18_reg>;
eeprom@54 {
compatible = "atmel,24c32";
reg = <0x54>;
pagesize = <32>;
+ vcc-supply = <&mt6358_vcn18_reg>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
index 2f5234a16ead..3aa79403c0c2 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
@@ -62,11 +62,13 @@
pinctrl-0 = <&i2c2_pins>;
status = "okay";
clock-frequency = <400000>;
+ vbus-supply = <&mt6358_vcamio_reg>;
eeprom@58 {
compatible = "atmel,24c64";
reg = <0x58>;
pagesize = <32>;
+ vcc-supply = <&mt6358_vcamio_reg>;
};
};
@@ -75,11 +77,13 @@
pinctrl-0 = <&i2c4_pins>;
status = "okay";
clock-frequency = <400000>;
+ vbus-supply = <&mt6358_vcn18_reg>;
eeprom@54 {
compatible = "atmel,24c64";
reg = <0x54>;
pagesize = <32>;
+ vcc-supply = <&mt6358_vcn18_reg>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
index fbc471ccf805..30c183c96a54 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
@@ -71,11 +71,13 @@
pinctrl-0 = <&i2c2_pins>;
status = "okay";
clock-frequency = <400000>;
+ vbus-supply = <&mt6358_vcamio_reg>;
eeprom@58 {
compatible = "atmel,24c32";
reg = <0x58>;
pagesize = <32>;
+ vcc-supply = <&mt6358_vcama2_reg>;
};
};
@@ -84,11 +86,13 @@
pinctrl-0 = <&i2c4_pins>;
status = "okay";
clock-frequency = <400000>;
+ vbus-supply = <&mt6358_vcn18_reg>;
eeprom@54 {
compatible = "atmel,24c32";
reg = <0x54>;
pagesize = <32>;
+ vcc-supply = <&mt6358_vcn18_reg>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index ff56bcfa3370..ae549d55a94f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -279,6 +279,11 @@
};
};
+&gpu {
+ mali-supply = <&mt6358_vgpu_reg>;
+ sram-supply = <&mt6358_vsram_gpu_reg>;
+};
+
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
@@ -816,6 +821,10 @@
compatible = "google,extcon-usbc-cros-ec";
google,usb-port-id = <0>;
};
+
+ cbas {
+ compatible = "google,cros-cbas";
+ };
};
};
@@ -847,6 +856,20 @@
status = "okay";
};
+&thermal_zones {
+ tboard1 {
+ polling-delay = <1000>; /* milliseconds */
+ polling-delay-passive = <0>; /* milliseconds */
+ thermal-sensors = <&tboard_thermistor1>;
+ };
+
+ tboard2 {
+ polling-delay = <1000>; /* milliseconds */
+ polling-delay-passive = <0>; /* milliseconds */
+ thermal-sensors = <&tboard_thermistor2>;
+ };
+};
+
&u3phy {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
index 0aff5eb52e88..ee912825cfc6 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
@@ -68,6 +68,11 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&mt6358_vgpu_reg>;
+ sram-supply = <&mt6358_vsram_gpu_reg>;
+};
+
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c_pins_0>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index c5e822b6b77a..f90df6439c08 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -197,6 +197,91 @@
};
};
+ gpu_opp_table: opp_table0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <625000>, <850000>;
+ };
+
+ opp-320000000 {
+ opp-hz = /bits/ 64 <320000000>;
+ opp-microvolt = <631250>, <850000>;
+ };
+
+ opp-340000000 {
+ opp-hz = /bits/ 64 <340000000>;
+ opp-microvolt = <637500>, <850000>;
+ };
+
+ opp-360000000 {
+ opp-hz = /bits/ 64 <360000000>;
+ opp-microvolt = <643750>, <850000>;
+ };
+
+ opp-380000000 {
+ opp-hz = /bits/ 64 <380000000>;
+ opp-microvolt = <650000>, <850000>;
+ };
+
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <656250>, <850000>;
+ };
+
+ opp-420000000 {
+ opp-hz = /bits/ 64 <420000000>;
+ opp-microvolt = <662500>, <850000>;
+ };
+
+ opp-460000000 {
+ opp-hz = /bits/ 64 <460000000>;
+ opp-microvolt = <675000>, <850000>;
+ };
+
+ opp-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <687500>, <850000>;
+ };
+
+ opp-540000000 {
+ opp-hz = /bits/ 64 <540000000>;
+ opp-microvolt = <700000>, <850000>;
+ };
+
+ opp-580000000 {
+ opp-hz = /bits/ 64 <580000000>;
+ opp-microvolt = <712500>, <850000>;
+ };
+
+ opp-620000000 {
+ opp-hz = /bits/ 64 <620000000>;
+ opp-microvolt = <725000>, <850000>;
+ };
+
+ opp-653000000 {
+ opp-hz = /bits/ 64 <653000000>;
+ opp-microvolt = <743750>, <850000>;
+ };
+
+ opp-698000000 {
+ opp-hz = /bits/ 64 <698000000>;
+ opp-microvolt = <768750>, <868750>;
+ };
+
+ opp-743000000 {
+ opp-hz = /bits/ 64 <743000000>;
+ opp-microvolt = <793750>, <893750>;
+ };
+
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <825000>, <925000>;
+ };
+ };
+
pmu-a53 {
compatible = "arm,cortex-a53-pmu";
interrupt-parent = <&gic>;
@@ -673,7 +758,7 @@
nvmem-cell-names = "calibration-data";
};
- thermal-zones {
+ thermal_zones: thermal-zones {
cpu_thermal: cpu_thermal {
polling-delay-passive = <100>;
polling-delay = <500>;
@@ -1118,6 +1203,26 @@
#clock-cells = <1>;
};
+ gpu: gpu@13040000 {
+ compatible = "mediatek,mt8183-mali", "arm,mali-bifrost";
+ reg = <0 0x13040000 0 0x4000>;
+ interrupts =
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 279 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 278 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "job", "mmu", "gpu";
+
+ clocks = <&topckgen CLK_TOP_MFGPLL_CK>;
+
+ power-domains =
+ <&spm MT8183_POWER_DOMAIN_MFG_CORE0>,
+ <&spm MT8183_POWER_DOMAIN_MFG_CORE1>,
+ <&spm MT8183_POWER_DOMAIN_MFG_2D>;
+ power-domain-names = "core0", "core1", "core2";
+
+ operating-points-v2 = <&gpu_opp_table>;
+ };
+
mmsys: syscon@14000000 {
compatible = "mediatek,mt8183-mmsys", "syscon";
reg = <0 0x14000000 0 0x1000>;
@@ -1263,13 +1368,14 @@
};
smi_common: smi@14019000 {
- compatible = "mediatek,mt8183-smi-common", "syscon";
+ compatible = "mediatek,mt8183-smi-common";
reg = <0 0x14019000 0 0x1000>;
clocks = <&mmsys CLK_MM_SMI_COMMON>,
<&mmsys CLK_MM_SMI_COMMON>,
<&mmsys CLK_MM_GALS_COMM0>,
<&mmsys CLK_MM_GALS_COMM1>;
clock-names = "apb", "smi", "gals0", "gals1";
+ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
};
imgsys: syscon@15020000 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
index 683743f81849..74c1a5df3fdb 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
@@ -817,7 +817,7 @@
};
hda@3510000 {
- nvidia,model = "jetson-tx2-hda";
+ nvidia,model = "NVIDIA Jetson TX2 HDA";
status = "okay";
};
@@ -1109,6 +1109,6 @@
<&i2s5_port>, <&i2s6_port>, <&dmic1_port>, <&dmic2_port>,
<&dmic3_port>, <&dspk1_port>, <&dspk2_port>;
- label = "jetson-tx2-ape";
+ label = "NVIDIA Jetson TX2 APE";
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 9f75bbf00cf7..d02f6bf3e2ca 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -1082,7 +1082,7 @@
};
smmu: iommu@12000000 {
- compatible = "arm,mmu-500";
+ compatible = "nvidia,tegra186-smmu", "nvidia,smmu-500";
reg = <0 0x12000000 0 0x800000>;
interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
@@ -1152,6 +1152,8 @@
stream-match-mask = <0x7f80>;
#global-interrupts = <1>;
#iommu-cells = <1>;
+
+ nvidia,memory-controller = <&mc>;
};
host1x@13e00000 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
index d618f197a1d3..96bd01cadb18 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
@@ -554,7 +554,7 @@
};
hda@3510000 {
- nvidia,model = "jetson-xavier-hda";
+ nvidia,model = "NVIDIA Jetson AGX Xavier HDA";
status = "okay";
};
@@ -831,7 +831,7 @@
<&i2s1_port>, <&i2s2_port>, <&i2s4_port>, <&i2s6_port>,
<&dmic3_port>;
- label = "jetson-xavier-ape";
+ label = "NVIDIA Jetson AGX Xavier APE";
widgets =
"Microphone", "CVB-RT MIC Jack",
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi
index d1d77220154f..836a7e0a4267 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi
@@ -15,6 +15,577 @@
interrupt-controller@2a40000 {
status = "okay";
};
+
+ ahub@2900800 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0x0>;
+
+ xbar_admaif0_ep: endpoint {
+ remote-endpoint = <&admaif0_ep>;
+ };
+ };
+
+ port@1 {
+ reg = <0x1>;
+
+ xbar_admaif1_ep: endpoint {
+ remote-endpoint = <&admaif1_ep>;
+ };
+ };
+
+ port@2 {
+ reg = <0x2>;
+
+ xbar_admaif2_ep: endpoint {
+ remote-endpoint = <&admaif2_ep>;
+ };
+ };
+
+ port@3 {
+ reg = <0x3>;
+
+ xbar_admaif3_ep: endpoint {
+ remote-endpoint = <&admaif3_ep>;
+ };
+ };
+
+ port@4 {
+ reg = <0x4>;
+
+ xbar_admaif4_ep: endpoint {
+ remote-endpoint = <&admaif4_ep>;
+ };
+ };
+
+ port@5 {
+ reg = <0x5>;
+
+ xbar_admaif5_ep: endpoint {
+ remote-endpoint = <&admaif5_ep>;
+ };
+ };
+
+ port@6 {
+ reg = <0x6>;
+
+ xbar_admaif6_ep: endpoint {
+ remote-endpoint = <&admaif6_ep>;
+ };
+ };
+
+ port@7 {
+ reg = <0x7>;
+
+ xbar_admaif7_ep: endpoint {
+ remote-endpoint = <&admaif7_ep>;
+ };
+ };
+
+ port@8 {
+ reg = <0x8>;
+
+ xbar_admaif8_ep: endpoint {
+ remote-endpoint = <&admaif8_ep>;
+ };
+ };
+
+ port@9 {
+ reg = <0x9>;
+
+ xbar_admaif9_ep: endpoint {
+ remote-endpoint = <&admaif9_ep>;
+ };
+ };
+
+ port@a {
+ reg = <0xa>;
+
+ xbar_admaif10_ep: endpoint {
+ remote-endpoint = <&admaif10_ep>;
+ };
+ };
+
+ port@b {
+ reg = <0xb>;
+
+ xbar_admaif11_ep: endpoint {
+ remote-endpoint = <&admaif11_ep>;
+ };
+ };
+
+ port@c {
+ reg = <0xc>;
+
+ xbar_admaif12_ep: endpoint {
+ remote-endpoint = <&admaif12_ep>;
+ };
+ };
+
+ port@d {
+ reg = <0xd>;
+
+ xbar_admaif13_ep: endpoint {
+ remote-endpoint = <&admaif13_ep>;
+ };
+ };
+
+ port@e {
+ reg = <0xe>;
+
+ xbar_admaif14_ep: endpoint {
+ remote-endpoint = <&admaif14_ep>;
+ };
+ };
+
+ port@f {
+ reg = <0xf>;
+
+ xbar_admaif15_ep: endpoint {
+ remote-endpoint = <&admaif15_ep>;
+ };
+ };
+
+ port@10 {
+ reg = <0x10>;
+
+ xbar_admaif16_ep: endpoint {
+ remote-endpoint = <&admaif16_ep>;
+ };
+ };
+
+ port@11 {
+ reg = <0x11>;
+
+ xbar_admaif17_ep: endpoint {
+ remote-endpoint = <&admaif17_ep>;
+ };
+ };
+
+ port@12 {
+ reg = <0x12>;
+
+ xbar_admaif18_ep: endpoint {
+ remote-endpoint = <&admaif18_ep>;
+ };
+ };
+
+ port@13 {
+ reg = <0x13>;
+
+ xbar_admaif19_ep: endpoint {
+ remote-endpoint = <&admaif19_ep>;
+ };
+ };
+
+ xbar_i2s3_port: port@16 {
+ reg = <0x16>;
+
+ xbar_i2s3_ep: endpoint {
+ remote-endpoint = <&i2s3_cif_ep>;
+ };
+ };
+
+ xbar_i2s5_port: port@18 {
+ reg = <0x18>;
+
+ xbar_i2s5_ep: endpoint {
+ remote-endpoint = <&i2s5_cif_ep>;
+ };
+ };
+
+ xbar_dmic1_port: port@1a {
+ reg = <0x1a>;
+
+ xbar_dmic1_ep: endpoint {
+ remote-endpoint = <&dmic1_cif_ep>;
+ };
+ };
+
+ xbar_dmic2_port: port@1b {
+ reg = <0x1b>;
+
+ xbar_dmic2_ep: endpoint {
+ remote-endpoint = <&dmic2_cif_ep>;
+ };
+ };
+
+ xbar_dmic4_port: port@1d {
+ reg = <0x1d>;
+
+ xbar_dmic4_ep: endpoint {
+ remote-endpoint = <&dmic4_cif_ep>;
+ };
+ };
+
+ xbar_dspk1_port: port@1e {
+ reg = <0x1e>;
+
+ xbar_dspk1_ep: endpoint {
+ remote-endpoint = <&dspk1_cif_ep>;
+ };
+ };
+
+ xbar_dspk2_port: port@1f {
+ reg = <0x1f>;
+
+ xbar_dspk2_ep: endpoint {
+ remote-endpoint = <&dspk2_cif_ep>;
+ };
+ };
+ };
+
+ admaif@290f000 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ admaif0_port: port@0 {
+ reg = <0x0>;
+
+ admaif0_ep: endpoint {
+ remote-endpoint = <&xbar_admaif0_ep>;
+ };
+ };
+
+ admaif1_port: port@1 {
+ reg = <0x1>;
+
+ admaif1_ep: endpoint {
+ remote-endpoint = <&xbar_admaif1_ep>;
+ };
+ };
+
+ admaif2_port: port@2 {
+ reg = <0x2>;
+
+ admaif2_ep: endpoint {
+ remote-endpoint = <&xbar_admaif2_ep>;
+ };
+ };
+
+ admaif3_port: port@3 {
+ reg = <0x3>;
+
+ admaif3_ep: endpoint {
+ remote-endpoint = <&xbar_admaif3_ep>;
+ };
+ };
+
+ admaif4_port: port@4 {
+ reg = <0x4>;
+
+ admaif4_ep: endpoint {
+ remote-endpoint = <&xbar_admaif4_ep>;
+ };
+ };
+
+ admaif5_port: port@5 {
+ reg = <0x5>;
+
+ admaif5_ep: endpoint {
+ remote-endpoint = <&xbar_admaif5_ep>;
+ };
+ };
+
+ admaif6_port: port@6 {
+ reg = <0x6>;
+
+ admaif6_ep: endpoint {
+ remote-endpoint = <&xbar_admaif6_ep>;
+ };
+ };
+
+ admaif7_port: port@7 {
+ reg = <0x7>;
+
+ admaif7_ep: endpoint {
+ remote-endpoint = <&xbar_admaif7_ep>;
+ };
+ };
+
+ admaif8_port: port@8 {
+ reg = <0x8>;
+
+ admaif8_ep: endpoint {
+ remote-endpoint = <&xbar_admaif8_ep>;
+ };
+ };
+
+ admaif9_port: port@9 {
+ reg = <0x9>;
+
+ admaif9_ep: endpoint {
+ remote-endpoint = <&xbar_admaif9_ep>;
+ };
+ };
+
+ admaif10_port: port@a {
+ reg = <0xa>;
+
+ admaif10_ep: endpoint {
+ remote-endpoint = <&xbar_admaif10_ep>;
+ };
+ };
+
+ admaif11_port: port@b {
+ reg = <0xb>;
+
+ admaif11_ep: endpoint {
+ remote-endpoint = <&xbar_admaif11_ep>;
+ };
+ };
+
+ admaif12_port: port@c {
+ reg = <0xc>;
+
+ admaif12_ep: endpoint {
+ remote-endpoint = <&xbar_admaif12_ep>;
+ };
+ };
+
+ admaif13_port: port@d {
+ reg = <0xd>;
+
+ admaif13_ep: endpoint {
+ remote-endpoint = <&xbar_admaif13_ep>;
+ };
+ };
+
+ admaif14_port: port@e {
+ reg = <0xe>;
+
+ admaif14_ep: endpoint {
+ remote-endpoint = <&xbar_admaif14_ep>;
+ };
+ };
+
+ admaif15_port: port@f {
+ reg = <0xf>;
+
+ admaif15_ep: endpoint {
+ remote-endpoint = <&xbar_admaif15_ep>;
+ };
+ };
+
+ admaif16_port: port@10 {
+ reg = <0x10>;
+
+ admaif16_ep: endpoint {
+ remote-endpoint = <&xbar_admaif16_ep>;
+ };
+ };
+
+ admaif17_port: port@11 {
+ reg = <0x11>;
+
+ admaif17_ep: endpoint {
+ remote-endpoint = <&xbar_admaif17_ep>;
+ };
+ };
+
+ admaif18_port: port@12 {
+ reg = <0x12>;
+
+ admaif18_ep: endpoint {
+ remote-endpoint = <&xbar_admaif18_ep>;
+ };
+ };
+
+ admaif19_port: port@13 {
+ reg = <0x13>;
+
+ admaif19_ep: endpoint {
+ remote-endpoint = <&xbar_admaif19_ep>;
+ };
+ };
+ };
+ };
+
+ i2s@2901200 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s3_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s3_ep>;
+ };
+ };
+
+ i2s3_port: port@1 {
+ reg = <1>;
+
+ i2s3_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@2901400 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s5_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s5_ep>;
+ };
+ };
+
+ i2s5_port: port@1 {
+ reg = <1>;
+
+ i2s5_dap_ep: endpoint@0 {
+ dai-format = "i2s";
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ dmic@2904000 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dmic1_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dmic1_ep>;
+ };
+ };
+
+ dmic1_port: port@1 {
+ reg = <1>;
+
+ dmic1_dap_ep: endpoint {
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ dmic@2904100 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dmic2_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dmic2_ep>;
+ };
+ };
+
+ dmic2_port: port@1 {
+ reg = <1>;
+
+ dmic2_dap_ep: endpoint {
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ dmic@2904300 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dmic4_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dmic4_ep>;
+ };
+ };
+
+ dmic4_port: port@1 {
+ reg = <1>;
+
+ dmic4_dap_ep: endpoint {
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ dspk@2905000 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dspk1_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dspk1_ep>;
+ };
+ };
+
+ dspk1_port: port@1 {
+ reg = <1>;
+
+ dspk1_dap_ep: endpoint {
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ dspk@2905100 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dspk2_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dspk2_ep>;
+ };
+ };
+
+ dspk2_port: port@1 {
+ reg = <1>;
+
+ dspk2_dap_ep: endpoint {
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+ };
};
ddc: i2c@3190000 {
@@ -36,7 +607,7 @@
};
hda@3510000 {
- nvidia,model = "jetson-xavier-nx-hda";
+ nvidia,model = "NVIDIA Jetson Xavier NX HDA";
status = "okay";
};
@@ -265,6 +836,28 @@
regulator-boot-on;
};
+ sound {
+ compatible = "nvidia,tegra186-audio-graph-card";
+ status = "okay";
+
+ dais = /* ADMAIF (FE) Ports */
+ <&admaif0_port>, <&admaif1_port>, <&admaif2_port>, <&admaif3_port>,
+ <&admaif4_port>, <&admaif5_port>, <&admaif6_port>, <&admaif7_port>,
+ <&admaif8_port>, <&admaif9_port>, <&admaif10_port>, <&admaif11_port>,
+ <&admaif12_port>, <&admaif13_port>, <&admaif14_port>, <&admaif15_port>,
+ <&admaif16_port>, <&admaif17_port>, <&admaif18_port>, <&admaif19_port>,
+ /* XBAR Ports */
+ <&xbar_i2s3_port>, <&xbar_i2s5_port>,
+ <&xbar_dmic1_port>, <&xbar_dmic2_port>, <&xbar_dmic4_port>,
+ <&xbar_dspk1_port>, <&xbar_dspk2_port>,
+ /* BE I/O Ports */
+ <&i2s3_port>, <&i2s5_port>,
+ <&dmic1_port>, <&dmic2_port>, <&dmic4_port>,
+ <&dspk1_port>, <&dspk2_port>;
+
+ label = "NVIDIA Jetson Xavier NX APE";
+ };
+
thermal-zones {
cpu {
polling-delay = <0>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index 9449156fae39..5ba7a4519b95 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -62,6 +62,7 @@
interconnects = <&mc TEGRA194_MEMORY_CLIENT_EQOSR &emc>,
<&mc TEGRA194_MEMORY_CLIENT_EQOSW &emc>;
interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_EQOS>;
status = "disabled";
snps,write-requests = <1>;
@@ -733,6 +734,7 @@
interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCRA &emc>,
<&mc TEGRA194_MEMORY_CLIENT_SDMMCWA &emc>;
interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_SDMMC1>;
nvidia,pad-autocal-pull-up-offset-3v3-timeout =
<0x07>;
nvidia,pad-autocal-pull-down-offset-3v3-timeout =
@@ -759,6 +761,7 @@
interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCR &emc>,
<&mc TEGRA194_MEMORY_CLIENT_SDMMCW &emc>;
interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_SDMMC3>;
nvidia,pad-autocal-pull-up-offset-1v8 = <0x00>;
nvidia,pad-autocal-pull-down-offset-1v8 = <0x7a>;
nvidia,pad-autocal-pull-up-offset-3v3-timeout = <0x07>;
@@ -790,6 +793,7 @@
interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCRAB &emc>,
<&mc TEGRA194_MEMORY_CLIENT_SDMMCWAB &emc>;
interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_SDMMC4>;
nvidia,pad-autocal-pull-up-offset-hs400 = <0x00>;
nvidia,pad-autocal-pull-down-offset-hs400 = <0x00>;
nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x0a>;
@@ -821,6 +825,7 @@
interconnects = <&mc TEGRA194_MEMORY_CLIENT_HDAR &emc>,
<&mc TEGRA194_MEMORY_CLIENT_HDAW &emc>;
interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_HDA>;
status = "disabled";
};
@@ -943,6 +948,10 @@
<&bpmp TEGRA194_CLK_XUSB_SS>,
<&bpmp TEGRA194_CLK_XUSB_FS>;
clock-names = "dev", "ss", "ss_src", "fs_src";
+ interconnects = <&mc TEGRA194_MEMORY_CLIENT_XUSB_DEVR &emc>,
+ <&mc TEGRA194_MEMORY_CLIENT_XUSB_DEVW &emc>;
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_XUSB_DEV>;
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_XUSBB>,
<&bpmp TEGRA194_POWER_DOMAIN_XUSBA>;
power-domain-names = "dev", "ss";
@@ -972,6 +981,10 @@
"xusb_ss", "xusb_ss_src", "xusb_hs_src",
"xusb_fs_src", "pll_u_480m", "clk_m",
"pll_e";
+ interconnects = <&mc TEGRA194_MEMORY_CLIENT_XUSB_HOSTR &emc>,
+ <&mc TEGRA194_MEMORY_CLIENT_XUSB_HOSTW &emc>;
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_XUSB_HOST>;
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_XUSBC>,
<&bpmp TEGRA194_POWER_DOMAIN_XUSBA>;
@@ -1300,6 +1313,84 @@
interrupt-controller;
};
+ smmu: iommu@12000000 {
+ compatible = "nvidia,tegra194-smmu", "nvidia,smmu-500";
+ reg = <0x12000000 0x800000>,
+ <0x11000000 0x800000>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+ stream-match-mask = <0x7f80>;
+ #global-interrupts = <2>;
+ #iommu-cells = <1>;
+
+ nvidia,memory-controller = <&mc>;
+ status = "okay";
+ };
+
host1x@13e00000 {
compatible = "nvidia,tegra194-host1x";
reg = <0x13e00000 0x10000>,
@@ -1319,6 +1410,7 @@
ranges = <0x15000000 0x15000000 0x01000000>;
interconnects = <&mc TEGRA194_MEMORY_CLIENT_HOST1XDMAR &emc>;
interconnect-names = "dma-mem";
+ iommus = <&smmu TEGRA194_SID_HOST1X>;
display-hub@15200000 {
compatible = "nvidia,tegra194-display";
@@ -1430,6 +1522,7 @@
interconnects = <&mc TEGRA194_MEMORY_CLIENT_VICSRD &emc>,
<&mc TEGRA194_MEMORY_CLIENT_VICSWR &emc>;
interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_VIC>;
};
dpaux0: dpaux@155c0000 {
@@ -1747,7 +1840,11 @@
interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE1R &emc>,
<&mc TEGRA194_MEMORY_CLIENT_PCIE1W &emc>;
- interconnect-names = "read", "write";
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_PCIE1>;
+ iommu-map = <0x0 &smmu TEGRA194_SID_PCIE1 0x1000>;
+ iommu-map-mask = <0x0>;
+ dma-coherent;
};
pcie@14120000 {
@@ -1797,7 +1894,11 @@
interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE2AR &emc>,
<&mc TEGRA194_MEMORY_CLIENT_PCIE2AW &emc>;
- interconnect-names = "read", "write";
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_PCIE2>;
+ iommu-map = <0x0 &smmu TEGRA194_SID_PCIE2 0x1000>;
+ iommu-map-mask = <0x0>;
+ dma-coherent;
};
pcie@14140000 {
@@ -1847,7 +1948,11 @@
interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE3R &emc>,
<&mc TEGRA194_MEMORY_CLIENT_PCIE3W &emc>;
- interconnect-names = "read", "write";
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_PCIE3>;
+ iommu-map = <0x0 &smmu TEGRA194_SID_PCIE3 0x1000>;
+ iommu-map-mask = <0x0>;
+ dma-coherent;
};
pcie@14160000 {
@@ -1897,7 +2002,11 @@
interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE4R &emc>,
<&mc TEGRA194_MEMORY_CLIENT_PCIE4W &emc>;
- interconnect-names = "read", "write";
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_PCIE4>;
+ iommu-map = <0x0 &smmu TEGRA194_SID_PCIE4 0x1000>;
+ iommu-map-mask = <0x0>;
+ dma-coherent;
};
pcie@14180000 {
@@ -1947,7 +2056,11 @@
interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE0R &emc>,
<&mc TEGRA194_MEMORY_CLIENT_PCIE0W &emc>;
- interconnect-names = "read", "write";
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_PCIE0>;
+ iommu-map = <0x0 &smmu TEGRA194_SID_PCIE0 0x1000>;
+ iommu-map-mask = <0x0>;
+ dma-coherent;
};
pcie@141a0000 {
@@ -2001,7 +2114,11 @@
interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE5R &emc>,
<&mc TEGRA194_MEMORY_CLIENT_PCIE5W &emc>;
- interconnect-names = "read", "write";
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_PCIE5>;
+ iommu-map = <0x0 &smmu TEGRA194_SID_PCIE5 0x1000>;
+ iommu-map-mask = <0x0>;
+ dma-coherent;
};
pcie_ep@14160000 {
@@ -2034,6 +2151,14 @@
nvidia,aspm-cmrt-us = <60>;
nvidia,aspm-pwr-on-t-us = <20>;
nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+ interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE4R &emc>,
+ <&mc TEGRA194_MEMORY_CLIENT_PCIE4W &emc>;
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_PCIE4>;
+ iommu-map = <0x0 &smmu TEGRA194_SID_PCIE4 0x1000>;
+ iommu-map-mask = <0x0>;
+ dma-coherent;
};
pcie_ep@14180000 {
@@ -2066,6 +2191,14 @@
nvidia,aspm-cmrt-us = <60>;
nvidia,aspm-pwr-on-t-us = <20>;
nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+ interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE0R &emc>,
+ <&mc TEGRA194_MEMORY_CLIENT_PCIE0W &emc>;
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_PCIE0>;
+ iommu-map = <0x0 &smmu TEGRA194_SID_PCIE0 0x1000>;
+ iommu-map-mask = <0x0>;
+ dma-coherent;
};
pcie_ep@141a0000 {
@@ -2101,6 +2234,14 @@
nvidia,aspm-cmrt-us = <60>;
nvidia,aspm-pwr-on-t-us = <20>;
nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+ interconnects = <&mc TEGRA194_MEMORY_CLIENT_PCIE5R &emc>,
+ <&mc TEGRA194_MEMORY_CLIENT_PCIE5W &emc>;
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_PCIE5>;
+ iommu-map = <0x0 &smmu TEGRA194_SID_PCIE5 0x1000>;
+ iommu-map-mask = <0x0>;
+ dma-coherent;
};
sram@40000000 {
@@ -2136,6 +2277,7 @@
<&mc TEGRA194_MEMORY_CLIENT_BPMPDMAR &emc>,
<&mc TEGRA194_MEMORY_CLIENT_BPMPDMAW &emc>;
interconnect-names = "read", "write", "dma-mem", "dma-write";
+ iommus = <&smmu TEGRA194_SID_BPMP>;
bpmp_i2c: i2c {
compatible = "nvidia,tegra186-bpmp-i2c";
@@ -2345,6 +2487,20 @@
};
};
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 385 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 387 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 388 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 389 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 390 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0_0 &cpu0_1 &cpu1_0 &cpu1_1
+ &cpu2_0 &cpu2_1 &cpu3_0 &cpu3_1>;
+ };
+
psci {
compatible = "arm,psci-1.0";
status = "okay";
@@ -2369,6 +2525,11 @@
* for 8x and 11.025x sample rate streams.
*/
assigned-clock-rates = <258000000>;
+
+ interconnects = <&mc TEGRA194_MEMORY_CLIENT_APEDMAR &emc>,
+ <&mc TEGRA194_MEMORY_CLIENT_APEDMAW &emc>;
+ interconnect-names = "dma-mem", "write";
+ iommus = <&smmu TEGRA194_SID_APE>;
};
tcu: tcu {
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
index 497635af7fab..7d3e3634743e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
@@ -424,6 +424,6 @@
<&i2s1_port>, <&i2s2_port>, <&i2s3_port>, <&i2s4_port>,
<&i2s5_port>, <&dmic1_port>, <&dmic2_port>, <&dmic3_port>;
- label = "jetson-tx1-ape";
+ label = "NVIDIA Jetson TX1 APE";
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index a9caaf7c0d67..d8409c1b4380 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -1345,7 +1345,7 @@
};
hda@70030000 {
- nvidia,model = "jetson-tx1-hda";
+ nvidia,model = "NVIDIA Jetson TX1 HDA";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
index 14c128a5e248..7dbb13f20de7 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
@@ -441,7 +441,7 @@
};
hda@70030000 {
- nvidia,model = "jetson-nano-hda";
+ nvidia,model = "NVIDIA Jetson Nano HDA";
status = "okay";
};
@@ -1043,6 +1043,6 @@
<&i2s3_port>, <&i2s4_port>,
<&dmic1_port>, <&dmic2_port>;
- label = "jetson-nano-ape";
+ label = "NVIDIA Jetson Nano APE";
};
};
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 456502aeee49..4f0597091976 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -5,8 +5,11 @@ dtb-$(CONFIG_ARCH_QCOM) += apq8096-db820c.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8096-ifc6640.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq6018-cp01-c1.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq8074-hk01.dtb
+dtb-$(CONFIG_ARCH_QCOM) += ipq8074-hk10-c1.dtb
+dtb-$(CONFIG_ARCH_QCOM) += ipq8074-hk10-c2.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-alcatel-idol347.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-asus-z00l.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-huawei-g7.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-longcheer-l8150.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-longcheer-l8910.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-mtp.dtb
@@ -32,11 +35,12 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8998-oneplus-dumpling.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-1000.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-4000.dtb
dtb-$(CONFIG_ARCH_QCOM) += qrb5165-rb5.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sa8155p-adp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-idp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r1.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r2-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r3-lte.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r0.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r1.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r1-kb.dtb
@@ -51,6 +55,8 @@ dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r1.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r1-lte.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r2.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r2-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r3-lte.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-r1.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-r1-lte.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7280-idp.dtb
@@ -70,8 +76,13 @@ dtb-$(CONFIG_ARCH_QCOM) += sdm845-oneplus-fajita.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm845-xiaomi-beryllium.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm850-lenovo-yoga-c630.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8150-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8150-microsoft-surface-duo.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8150-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8150-sony-xperia-kumano-bahamut.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8150-sony-xperia-kumano-griffin.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8250-hdk.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8250-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8250-sony-xperia-edo-pdx203.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8250-sony-xperia-edo-pdx206.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8350-hdk.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8350-mtp.dtb
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
index defcbd15edf9..51e17094d7b1 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
@@ -41,14 +41,14 @@
/ {
aliases {
- serial0 = &blsp2_uart1;
- serial1 = &blsp2_uart2;
- serial2 = &blsp1_uart1;
- i2c0 = &blsp1_i2c2;
+ serial0 = &blsp2_uart2;
+ serial1 = &blsp2_uart3;
+ serial2 = &blsp1_uart2;
+ i2c0 = &blsp1_i2c3;
i2c1 = &blsp2_i2c1;
- i2c2 = &blsp2_i2c0;
- spi0 = &blsp1_spi0;
- spi1 = &blsp2_spi5;
+ i2c2 = &blsp2_i2c1;
+ spi0 = &blsp1_spi1;
+ spi1 = &blsp2_spi6;
};
chosen {
@@ -133,24 +133,24 @@
};
};
-&blsp1_i2c2 {
+&blsp1_i2c3 {
/* On Low speed expansion */
label = "LS-I2C0";
status = "okay";
};
-&blsp1_spi0 {
+&blsp1_spi1 {
/* On Low speed expansion */
label = "LS-SPI0";
status = "okay";
};
-&blsp1_uart1 {
+&blsp1_uart2 {
label = "BT-UART";
status = "okay";
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp1_uart1_default>;
- pinctrl-1 = <&blsp1_uart1_sleep>;
+ pinctrl-0 = <&blsp1_uart2_default>;
+ pinctrl-1 = <&blsp1_uart2_sleep>;
bluetooth {
compatible = "qcom,qca6174-bt";
@@ -162,7 +162,11 @@
};
};
-&blsp2_i2c0 {
+&adsp_pil {
+ status = "okay";
+};
+
+&blsp2_i2c1 {
/* On High speed expansion */
label = "HS-I2C2";
status = "okay";
@@ -174,32 +178,36 @@
status = "okay";
};
-&blsp2_spi5 {
+&blsp2_spi6 {
/* On High speed expansion */
label = "HS-SPI1";
status = "okay";
};
-&blsp2_uart1 {
+&blsp2_uart2 {
label = "LS-UART1";
status = "okay";
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp2_uart1_2pins_default>;
- pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+ pinctrl-0 = <&blsp2_uart2_2pins_default>;
+ pinctrl-1 = <&blsp2_uart2_2pins_sleep>;
};
-&blsp2_uart2 {
+&blsp2_uart3 {
label = "LS-UART0";
status = "disabled";
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp2_uart2_4pins_default>;
- pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
+ pinctrl-0 = <&blsp2_uart3_4pins_default>;
+ pinctrl-1 = <&blsp2_uart3_4pins_sleep>;
};
&camss {
vdda-supply = <&vreg_l2a_1p25>;
};
+&gpu {
+ status = "okay";
+};
+
&hdmi {
status = "okay";
@@ -245,7 +253,12 @@
vdd-gfx-supply = <&vdd_gfx>;
};
-&msmgpio {
+&pm8994_resin {
+ status = "okay";
+ linux,code = <KEY_VOLUMEDOWN>;
+};
+
+&tlmm {
gpio-line-names =
"[SPI0_DOUT]", /* GPIO_0, BLSP1_SPI_MOSI, LSEC pin 14 */
"[SPI0_DIN]", /* GPIO_1, BLSP1_SPI_MISO, LSEC pin 10 */
@@ -424,7 +437,7 @@
};
};
- blsp1_uart1_default: blsp1_uart1_default {
+ blsp1_uart2_default: blsp1_uart2_default {
mux {
pins = "gpio41", "gpio42", "gpio43", "gpio44";
function = "blsp_uart2";
@@ -437,7 +450,7 @@
};
};
- blsp1_uart1_sleep: blsp1_uart1_sleep {
+ blsp1_uart2_sleep: blsp1_uart2_sleep {
mux {
pins = "gpio41", "gpio42", "gpio43", "gpio44";
function = "gpio";
@@ -505,20 +518,20 @@
&pcie0 {
status = "okay";
- perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ perst-gpio = <&tlmm 35 GPIO_ACTIVE_LOW>;
vddpe-3v3-supply = <&wlan_en>;
vdda-supply = <&vreg_l28a_0p925>;
};
&pcie1 {
status = "okay";
- perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
+ perst-gpio = <&tlmm 130 GPIO_ACTIVE_LOW>;
vdda-supply = <&vreg_l28a_0p925>;
};
&pcie2 {
status = "okay";
- perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
+ perst-gpio = <&tlmm 114 GPIO_ACTIVE_LOW>;
vdda-supply = <&vreg_l28a_0p925>;
};
@@ -929,9 +942,9 @@
&sdhc2 {
/* External SD card */
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
- pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
- cd-gpios = <&msmgpio 38 0x1>;
+ pinctrl-0 = <&sdc2_state_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_state_off &sdc2_cd_off>;
+ cd-gpios = <&tlmm 38 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vreg_l21a_2p95>;
vqmmc-supply = <&vreg_l13a_2p95>;
status = "okay";
@@ -1026,20 +1039,6 @@
};
};
-&spmi_bus {
- pmic@0 {
- pon@800 {
- resin {
- compatible = "qcom,pm8941-resin";
- interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
- debounce = <15625>;
- bias-pull-up;
- linux,code = <KEY_VOLUMEDOWN>;
- };
- };
- };
-};
-
&ufsphy {
status = "okay";
@@ -1089,6 +1088,10 @@
};
+&venus {
+ status = "okay";
+};
+
&wcd9335 {
clock-names = "mclk", "slimbus";
clocks = <&div1_mclk>,
diff --git a/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts b/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
index f6ddf17ada81..8c7a27e972b7 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
+++ b/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
@@ -17,7 +17,7 @@
qcom,board-id = <0x00010018 0>;
aliases {
- serial0 = &blsp2_uart1;
+ serial0 = &blsp2_uart2;
};
chosen {
@@ -81,14 +81,22 @@
};
};
-&blsp2_uart1 {
+&blsp2_uart2 {
status = "okay";
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp2_uart1_2pins_default>;
- pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+ pinctrl-0 = <&blsp2_uart2_2pins_default>;
+ pinctrl-1 = <&blsp2_uart2_2pins_sleep>;
};
-&msmgpio {
+&gpu {
+ status = "okay";
+};
+
+&mdss {
+ status = "okay";
+};
+
+&tlmm {
sdc2_pins_default: sdc2-pins-default {
clk {
pins = "sdc2_clk";
@@ -352,7 +360,7 @@
bus-width = <4>;
- cd-gpios = <&msmgpio 38 0x1>;
+ cd-gpios = <&tlmm 38 0x1>;
vmmc-supply = <&vreg_l21a_2p95>;
vqmmc-supply = <&vreg_l13a_2p95>;
@@ -383,3 +391,7 @@
vdda-phy-max-microamp = <18380>;
vdda-pll-max-microamp = <9440>;
};
+
+&venus {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk10-c1.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk10-c1.dts
new file mode 100644
index 000000000000..2bfcf42aeabc
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/ipq8074-hk10-c1.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+/dts-v1/;
+
+#include "ipq8074-hk10.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. IPQ8074/AP-HK10-C1";
+ compatible = "qcom,ipq8074-hk10-c1", "qcom,ipq8074";
+};
diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk10-c2.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk10-c2.dts
new file mode 100644
index 000000000000..7da39f1d979b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/ipq8074-hk10-c2.dts
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/dts-v1/;
+/* Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+#include "ipq8074-hk10.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. IPQ8074/AP-HK10-C2";
+ compatible = "qcom,ipq8074-hk10-c2", "qcom,ipq8074";
+};
diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi b/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
new file mode 100644
index 000000000000..07e670829676
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+/dts-v1/;
+
+#include "ipq8074.dtsi"
+
+/ {
+ #address-cells = <0x2>;
+ #size-cells = <0x2>;
+
+ interrupt-parent = <&intc>;
+
+ aliases {
+ serial0 = &blsp1_uart5;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0x0 0x20000000>;
+ };
+};
+
+&blsp1_spi1 {
+ status = "ok";
+
+ m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&blsp1_uart5 {
+ status = "ok";
+};
+
+&pcie0 {
+ status = "ok";
+ perst-gpio = <&tlmm 58 0x1>;
+};
+
+&pcie1 {
+ status = "ok";
+ perst-gpio = <&tlmm 61 0x1>;
+};
+
+&pcie_phy0 {
+ status = "ok";
+};
+
+&pcie_phy1 {
+ status = "ok";
+};
+
+&qpic_bam {
+ status = "ok";
+};
+
+&qpic_nand {
+ status = "ok";
+
+ nand@0 {
+ reg = <0>;
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+ nand-bus-width = <8>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index a32e5e79ab0b..f39bc10cc5bd 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -165,6 +165,7 @@
clock-names = "cfg_ahb", "ref";
resets = <&gcc GCC_QUSB2_0_PHY_BCR>;
+ status = "disabled";
};
pcie_phy0: phy@86000 {
@@ -372,6 +373,21 @@
status = "disabled";
};
+ blsp1_i2c6: i2c@78ba000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x078ba000 0x600>;
+ interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ clock-frequency = <100000>;
+ dmas = <&blsp_dma 23>, <&blsp_dma 22>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
qpic_bam: dma-controller@7984000 {
compatible = "qcom,bam-v1.7.0";
reg = <0x07984000 0x1a000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts b/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts
index 540b1fa4b260..670bd1bebd73 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts
@@ -45,6 +45,24 @@
status = "okay";
};
+&blsp_i2c4 {
+ status = "okay";
+
+ touchscreen@26 {
+ compatible = "mstar,msg2638";
+ reg = <0x26>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&msmgpio 100 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_int_reset_default>;
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l5>;
+ touchscreen-size-x = <2048>;
+ touchscreen-size-y = <2048>;
+ };
+};
+
&blsp_i2c5 {
status = "okay";
@@ -281,6 +299,14 @@
bias-pull-up;
};
+ ts_int_reset_default: ts-int-reset-default {
+ pins = "gpio13", "gpio100";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
usb_id_default: usb-id-default {
pins = "gpio69";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts b/arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts
new file mode 100644
index 000000000000..e0075b574190
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2021 Stephan Gerhold
+
+/dts-v1/;
+
+#include "msm8916-pm8916.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
+
+/*
+ * Note: The original firmware from Huawei can only boot 32-bit kernels.
+ * To boot arm64 kernels it is necessary to flash 64-bit TZ/HYP firmware
+ * with EDL, e.g. taken from the DragonBoard 410c. This works because Huawei
+ * forgot to set up (firmware) secure boot for some reason.
+ *
+ * Also note that Huawei no longer provides bootloader unlock codes.
+ * This can be bypassed by patching the bootloader from a custom HYP firmware,
+ * making it think the bootloader is unlocked.
+ *
+ * See: https://wiki.postmarketos.org/wiki/Huawei_Ascend_G7_(huawei-g7)
+ */
+
+/ {
+ model = "Huawei Ascend G7";
+ compatible = "huawei,g7", "qcom,msm8916";
+
+ aliases {
+ serial0 = &blsp1_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_default>;
+
+ label = "GPIO Buttons";
+
+ volume-up {
+ label = "Volume Up";
+ gpios = <&msmgpio 107 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_leds_default>;
+
+ led-0 {
+ gpios = <&msmgpio 8 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_RED>;
+ default-state = "off";
+ function = LED_FUNCTION_INDICATOR;
+ };
+
+ led-1 {
+ gpios = <&msmgpio 9 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
+ function = LED_FUNCTION_INDICATOR;
+ };
+
+ led-2 {
+ gpios = <&msmgpio 10 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_BLUE>;
+ default-state = "off";
+ function = LED_FUNCTION_INDICATOR;
+ };
+ };
+
+ usb_id: usb-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&msmgpio 117 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_id_default>;
+ };
+};
+
+&blsp_i2c2 {
+ status = "okay";
+
+ magnetometer@c {
+ compatible = "asahi-kasei,ak09911";
+ reg = <0x0c>;
+
+ vdd-supply = <&pm8916_l17>;
+ vid-supply = <&pm8916_l6>;
+
+ reset-gpios = <&msmgpio 36 GPIO_ACTIVE_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mag_reset_default>;
+ };
+
+ accelerometer@1e {
+ compatible = "kionix,kx023-1025";
+ reg = <0x1e>;
+
+ interrupt-parent = <&msmgpio>;
+ interrupts = <115 IRQ_TYPE_EDGE_RISING>;
+
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l6>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&accel_irq_default>;
+
+ mount-matrix = "-1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "1";
+ };
+
+ proximity@39 {
+ compatible = "avago,apds9930";
+ reg = <0x39>;
+
+ interrupt-parent = <&msmgpio>;
+ interrupts = <113 IRQ_TYPE_EDGE_FALLING>;
+
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l6>;
+
+ led-max-microamp = <100000>;
+ amstaos,proximity-diodes = <1>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&prox_irq_default>;
+ };
+
+ regulator@3e {
+ compatible = "ti,tps65132";
+ reg = <0x3e>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&reg_lcd_en_default>;
+
+ reg_lcd_pos: outp {
+ regulator-name = "outp";
+ regulator-min-microvolt = <5400000>;
+ regulator-max-microvolt = <5400000>;
+ enable-gpios = <&msmgpio 97 GPIO_ACTIVE_HIGH>;
+ regulator-active-discharge = <1>;
+ };
+
+ reg_lcd_neg: outn {
+ regulator-name = "outn";
+ regulator-min-microvolt = <5400000>;
+ regulator-max-microvolt = <5400000>;
+ enable-gpios = <&msmgpio 32 GPIO_ACTIVE_HIGH>;
+ regulator-active-discharge = <1>;
+ };
+ };
+};
+
+&blsp_i2c5 {
+ status = "okay";
+
+ rmi4@70 {
+ compatible = "syna,rmi4-i2c";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&msmgpio>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+
+ vdd-supply = <&pm8916_l17>;
+ vio-supply = <&pm8916_l16>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_irq_default>;
+
+ syna,startup-delay-ms = <100>;
+
+ rmi4-f01@1 {
+ reg = <0x1>;
+ syna,nosleep-mode = <1>; /* Allow sleeping */
+ };
+
+ rmi4-f11@11 {
+ reg = <0x11>;
+ syna,sensor-type = <1>; /* Touchscreen */
+ };
+ };
+};
+
+&blsp_i2c6 {
+ status = "okay";
+
+ nfc@28 {
+ compatible = "nxp,pn547", "nxp,nxp-nci-i2c";
+ reg = <0x28>;
+
+ interrupt-parent = <&msmgpio>;
+ interrupts = <21 IRQ_TYPE_EDGE_RISING>;
+
+ enable-gpios = <&msmgpio 20 GPIO_ACTIVE_HIGH>;
+ firmware-gpios = <&msmgpio 2 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&nfc_default>;
+ };
+};
+
+&blsp1_uart2 {
+ status = "okay";
+};
+
+&pm8916_resin {
+ status = "okay";
+ linux,code = <KEY_VOLUMEDOWN>;
+};
+
+&pm8916_vib {
+ status = "okay";
+};
+
+&pronto {
+ status = "okay";
+};
+
+&sdhc_1 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+};
+
+&sdhc_2 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdhc2_cd_default>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdhc2_cd_default>;
+
+ /*
+ * The Huawei device tree sets cd-gpios = <&msmgpio 38 GPIO_ACTIVE_HIGH>.
+ * However, gpio38 does not change its state when inserting/removing the
+ * SD card, it's just low all the time. The Huawei kernel seems to use
+ * polling for SD card detection instead.
+ *
+ * However, looking closer at the GPIO debug output it turns out that
+ * gpio56 switches its state when inserting/removing the SD card.
+ * It behaves just like gpio38 normally does. Usually GPIO56 is used as
+ * "UIM2_PRESENT", i.e. to check if a second SIM card is inserted.
+ * Maybe Huawei decided to replace the second SIM card slot with the
+ * SD card slot and forgot to re-route to gpio38.
+ */
+ cd-gpios = <&msmgpio 56 GPIO_ACTIVE_LOW>;
+};
+
+&usb {
+ status = "okay";
+ extcon = <&usb_id>, <&usb_id>;
+};
+
+&usb_hs_phy {
+ extcon = <&usb_id>;
+};
+
+&smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+ vdd_l7-supply = <&pm8916_s4>;
+
+ s3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2100000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ l4 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l8 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ l9 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l10 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ l11 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ regulator-system-load = <200000>;
+ };
+
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ l13 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l16 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+};
+
+&msmgpio {
+ accel_irq_default: accel-irq-default {
+ pins = "gpio115";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ gpio_keys_default: gpio-keys-default {
+ pins = "gpio107";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ gpio_leds_default: gpio-leds-default {
+ pins = "gpio8", "gpio9", "gpio10";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ nfc_default: nfc-default {
+ pins = "gpio2", "gpio20", "gpio21";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ mag_reset_default: mag-reset-default {
+ pins = "gpio36";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ prox_irq_default: prox-irq-default {
+ pins = "gpio113";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ reg_lcd_en_default: reg-lcd-en-default {
+ pins = "gpio32", "gpio97";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ sdhc2_cd_default: sdhc2-cd-default {
+ pins = "gpio56";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ ts_irq_default: ts-irq-default {
+ pins = "gpio13";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb_id_default: usb-id-default {
+ pins = "gpio117";
+ function = "gpio";
+
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
index 230ba3ce3277..9b4b7de7cec2 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
@@ -4,6 +4,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
/ {
aliases {
@@ -95,6 +96,63 @@
pinctrl-0 = <&muic_int_default>;
};
};
+
+ i2c-tkey {
+ compatible = "i2c-gpio";
+ sda-gpios = <&msmgpio 16 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&msmgpio 17 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&tkey_i2c_default>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchkey: touchkey@20 {
+ /* Note: Actually an ABOV MCU that implements same interface */
+ compatible = "coreriver,tc360-touchkey";
+ reg = <0x20>;
+
+ interrupt-parent = <&msmgpio>;
+ interrupts = <98 IRQ_TYPE_EDGE_FALLING>;
+
+ /* vcc/vdd-supply are board-specific */
+ vddio-supply = <&pm8916_l6>;
+
+ linux,keycodes = <KEY_APPSELECT KEY_BACK>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&tkey_default>;
+ };
+ };
+
+ i2c-nfc {
+ compatible = "i2c-gpio";
+ sda-gpios = <&msmgpio 0 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&msmgpio 1 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&nfc_i2c_default>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nfc@27 {
+ compatible = "samsung,s3fwrn5-i2c";
+ reg = <0x27>;
+
+ interrupt-parent = <&msmgpio>;
+ interrupts = <21 IRQ_TYPE_EDGE_RISING>;
+
+ en-gpios = <&msmgpio 20 GPIO_ACTIVE_HIGH>;
+ wake-gpios = <&msmgpio 49 GPIO_ACTIVE_HIGH>;
+
+ clocks = <&rpmcc RPM_SMD_BB_CLK2_PIN>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&nfc_default &nfc_clk_req>;
+ };
+ };
};
&blsp_i2c2 {
@@ -122,6 +180,20 @@
};
};
+&blsp_i2c4 {
+ status = "okay";
+
+ battery@35 {
+ compatible = "richtek,rt5033-battery";
+ reg = <0x35>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <121 IRQ_TYPE_EDGE_BOTH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&fg_alert_default>;
+ };
+};
+
&blsp1_uart2 {
status = "okay";
};
@@ -284,6 +356,14 @@
bias-disable;
};
+ fg_alert_default: fg-alert-default {
+ pins = "gpio121";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
gpio_keys_default: gpio-keys-default {
pins = "gpio107", "gpio109";
function = "gpio";
@@ -333,6 +413,46 @@
bias-disable;
};
+ nfc_default: nfc-default {
+ pins = "gpio20", "gpio49";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+
+ irq {
+ pins = "gpio21";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ nfc_i2c_default: nfc-i2c-default {
+ pins = "gpio0", "gpio1";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tkey_default: tkey-default {
+ pins = "gpio98";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tkey_i2c_default: tkey-i2c-default {
+ pins = "gpio16", "gpio17";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
tsp_en_default: tsp-en-default {
pins = "gpio73";
function = "gpio";
@@ -341,3 +461,14 @@
bias-disable;
};
};
+
+&pm8916_gpios {
+ nfc_clk_req: nfc-clk-req {
+ pins = "gpio2";
+ function = "func1";
+
+ input-enable;
+ bias-disable;
+ power-source = <PM8916_GPIO_L2>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts
index 661f41ad978b..6cc2eaeb1d33 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts
@@ -20,6 +20,37 @@
pinctrl-names = "default";
pinctrl-0 = <&panel_vdd3_default>;
};
+
+ reg_touch_key: regulator-touch-key {
+ compatible = "regulator-fixed";
+ regulator-name = "touch_key";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+
+ gpio = <&msmgpio 86 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&tkey_en_default>;
+ };
+
+ reg_key_led: regulator-key-led {
+ compatible = "regulator-fixed";
+ regulator-name = "key_led";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&msmgpio 60 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&tkey_led_en_default>;
+ };
+};
+
+&touchkey {
+ vcc-supply = <&reg_touch_key>;
+ vdd-supply = <&reg_key_led>;
};
&accelerometer {
@@ -81,6 +112,22 @@
bias-disable;
};
+ tkey_en_default: tkey-en-default {
+ pins = "gpio86";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tkey_led_en_default: tkey-led-en-default {
+ pins = "gpio60";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
ts_int_default: ts-int-default {
pins = "gpio13";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
index dd35c3344358..c2eff5aebf85 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
@@ -7,6 +7,19 @@
/ {
model = "Samsung Galaxy A5U (EUR)";
compatible = "samsung,a5u-eur", "qcom,msm8916";
+
+ reg_touch_key: regulator-touch-key {
+ compatible = "regulator-fixed";
+ regulator-name = "touch_key";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&msmgpio 97 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&tkey_en_default>;
+ };
};
&accelerometer {
@@ -42,7 +55,20 @@
};
};
+&touchkey {
+ vcc-supply = <&reg_touch_key>;
+ vdd-supply = <&reg_touch_key>;
+};
+
&msmgpio {
+ tkey_en_default: tkey-en-default {
+ pins = "gpio97";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
ts_int_default: ts-int-default {
pins = "gpio13";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts
index 23cdcc9f7c72..1ccca83292ac 100644
--- a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts
+++ b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015, LGE Inc. All rights reserved.
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
*/
/dts-v1/;
@@ -9,6 +10,9 @@
#include "pm8994.dtsi"
#include "pmi8994.dtsi"
+/* cont_splash_mem has different memory mapping */
+/delete-node/ &cont_splash_mem;
+
/ {
model = "LG Nexus 5X";
compatible = "lg,bullhead", "qcom,msm8992";
@@ -17,6 +21,9 @@
qcom,board-id = <0xb64 0>;
qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>;
+ /* Bullhead firmware doesn't support PSCI */
+ /delete-node/ psci;
+
aliases {
serial0 = &blsp1_uart2;
};
@@ -38,6 +45,11 @@
ftrace-size = <0x10000>;
pmsg-size = <0x20000>;
};
+
+ cont_splash_mem: memory@3400000 {
+ reg = <0 0x03400000 0 0x1200000>;
+ no-map;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
index baa55643b40f..c096b7758aa0 100644
--- a/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
+++ b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
@@ -1,12 +1,16 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015, Huawei Inc. All rights reserved.
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
*/
/dts-v1/;
#include "msm8994.dtsi"
+/* Angler's firmware does not report where the memory is allocated */
+/delete-node/ &cont_splash_mem;
+
/ {
model = "Huawei Nexus 6P";
compatible = "huawei,angler", "qcom,msm8994";
@@ -32,3 +36,7 @@
};
};
};
+
+&tlmm {
+ gpio-reserved-ranges = <85 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi
index 5f46a1427f1f..1e1514e9158c 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi
@@ -7,7 +7,7 @@
/ {
aliases {
- serial0 = &blsp2_uart1;
+ serial0 = &blsp2_uart2;
};
chosen {
diff --git a/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi
deleted file mode 100644
index ac1ede579361..000000000000
--- a/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi
+++ /dev/null
@@ -1,653 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- */
-
-&msmgpio {
-
- wcd9xxx_intr {
- wcd_intr_default: wcd_intr_default{
- mux {
- pins = "gpio54";
- function = "gpio";
- };
-
- config {
- pins = "gpio54";
- drive-strength = <2>; /* 2 mA */
- bias-pull-down; /* pull down */
- input-enable;
- };
- };
- };
-
- cdc_reset_ctrl {
- cdc_reset_sleep: cdc_reset_sleep {
- mux {
- pins = "gpio64";
- function = "gpio";
- };
- config {
- pins = "gpio64";
- drive-strength = <16>;
- bias-disable;
- output-low;
- };
- };
- cdc_reset_active:cdc_reset_active {
- mux {
- pins = "gpio64";
- function = "gpio";
- };
- config {
- pins = "gpio64";
- drive-strength = <16>;
- bias-pull-down;
- output-high;
- };
- };
- };
-
- blsp1_spi0_default: blsp1_spi0_default {
- pinmux {
- function = "blsp_spi1";
- pins = "gpio0", "gpio1", "gpio3";
- };
- pinmux_cs {
- function = "gpio";
- pins = "gpio2";
- };
- pinconf {
- pins = "gpio0", "gpio1", "gpio3";
- drive-strength = <12>;
- bias-disable;
- };
- pinconf_cs {
- pins = "gpio2";
- drive-strength = <16>;
- bias-disable;
- output-high;
- };
- };
-
- blsp1_spi0_sleep: blsp1_spi0_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio0", "gpio1", "gpio2", "gpio3";
- };
- pinconf {
- pins = "gpio0", "gpio1", "gpio2", "gpio3";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- blsp1_i2c2_default: blsp1_i2c2_default {
- pinmux {
- function = "blsp_i2c3";
- pins = "gpio47", "gpio48";
- };
- pinconf {
- pins = "gpio47", "gpio48";
- drive-strength = <16>;
- bias-disable = <0>;
- };
- };
-
- blsp1_i2c2_sleep: blsp1_i2c2_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio47", "gpio48";
- };
- pinconf {
- pins = "gpio47", "gpio48";
- drive-strength = <2>;
- bias-disable = <0>;
- };
- };
-
- blsp2_i2c0_default: blsp2_i2c0 {
- pinmux {
- function = "blsp_i2c7";
- pins = "gpio55", "gpio56";
- };
- pinconf {
- pins = "gpio55", "gpio56";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- blsp2_i2c0_sleep: blsp2_i2c0_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio55", "gpio56";
- };
- pinconf {
- pins = "gpio55", "gpio56";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- blsp2_uart1_2pins_default: blsp2_uart1_2pins {
- pinmux {
- function = "blsp_uart8";
- pins = "gpio4", "gpio5";
- };
- pinconf {
- pins = "gpio4", "gpio5";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- blsp2_uart1_2pins_sleep: blsp2_uart1_2pins_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio4", "gpio5";
- };
- pinconf {
- pins = "gpio4", "gpio5";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- blsp2_uart1_4pins_default: blsp2_uart1_4pins {
- pinmux {
- function = "blsp_uart8";
- pins = "gpio4", "gpio5", "gpio6", "gpio7";
- };
-
- pinconf {
- pins = "gpio4", "gpio5", "gpio6", "gpio7";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- blsp2_uart1_4pins_sleep: blsp2_uart1_4pins_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio4", "gpio5", "gpio6", "gpio7";
- };
-
- pinconf {
- pins = "gpio4", "gpio5", "gpio6", "gpio7";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- blsp2_i2c1_default: blsp2_i2c1 {
- pinmux {
- function = "blsp_i2c8";
- pins = "gpio6", "gpio7";
- };
- pinconf {
- pins = "gpio6", "gpio7";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- blsp2_i2c1_sleep: blsp2_i2c1_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio6", "gpio7";
- };
- pinconf {
- pins = "gpio6", "gpio7";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- blsp2_uart2_2pins_default: blsp2_uart2_2pins {
- pinmux {
- function = "blsp_uart9";
- pins = "gpio49", "gpio50";
- };
- pinconf {
- pins = "gpio49", "gpio50";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- blsp2_uart2_2pins_sleep: blsp2_uart2_2pins_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio49", "gpio50";
- };
- pinconf {
- pins = "gpio49", "gpio50";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- blsp2_uart2_4pins_default: blsp2_uart2_4pins {
- pinmux {
- function = "blsp_uart9";
- pins = "gpio49", "gpio50", "gpio51", "gpio52";
- };
-
- pinconf {
- pins = "gpio49", "gpio50", "gpio51", "gpio52";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- blsp2_uart2_4pins_sleep: blsp2_uart2_4pins_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio49", "gpio50", "gpio51", "gpio52";
- };
-
- pinconf {
- pins = "gpio49", "gpio50", "gpio51", "gpio52";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- blsp2_spi5_default: blsp2_spi5_default {
- pinmux {
- function = "blsp_spi12";
- pins = "gpio85", "gpio86", "gpio88";
- };
- pinmux_cs {
- function = "gpio";
- pins = "gpio87";
- };
- pinconf {
- pins = "gpio85", "gpio86", "gpio88";
- drive-strength = <12>;
- bias-disable;
- };
- pinconf_cs {
- pins = "gpio87";
- drive-strength = <16>;
- bias-disable;
- output-high;
- };
- };
-
- blsp2_spi5_sleep: blsp2_spi5_sleep {
- pinmux {
- function = "gpio";
- pins = "gpio85", "gpio86", "gpio87", "gpio88";
- };
- pinconf {
- pins = "gpio85", "gpio86", "gpio87", "gpio88";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- sdc2_clk_on: sdc2_clk_on {
- config {
- pins = "sdc2_clk";
- bias-disable; /* NO pull */
- drive-strength = <16>; /* 16 MA */
- };
- };
-
- sdc2_clk_off: sdc2_clk_off {
- config {
- pins = "sdc2_clk";
- bias-disable; /* NO pull */
- drive-strength = <2>; /* 2 MA */
- };
- };
-
- sdc2_cmd_on: sdc2_cmd_on {
- config {
- pins = "sdc2_cmd";
- bias-pull-up; /* pull up */
- drive-strength = <10>; /* 10 MA */
- };
- };
-
- sdc2_cmd_off: sdc2_cmd_off {
- config {
- pins = "sdc2_cmd";
- bias-pull-up; /* pull up */
- drive-strength = <2>; /* 2 MA */
- };
- };
-
- sdc2_data_on: sdc2_data_on {
- config {
- pins = "sdc2_data";
- bias-pull-up; /* pull up */
- drive-strength = <10>; /* 10 MA */
- };
- };
-
- sdc2_data_off: sdc2_data_off {
- config {
- pins = "sdc2_data";
- bias-pull-up; /* pull up */
- drive-strength = <2>; /* 2 MA */
- };
- };
-
- pcie0_clkreq_default: pcie0_clkreq_default {
- mux {
- pins = "gpio36";
- function = "pci_e0";
- };
-
- config {
- pins = "gpio36";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
-
- pcie0_perst_default: pcie0_perst_default {
- mux {
- pins = "gpio35";
- function = "gpio";
- };
-
- config {
- pins = "gpio35";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- pcie0_wake_default: pcie0_wake_default {
- mux {
- pins = "gpio37";
- function = "gpio";
- };
-
- config {
- pins = "gpio37";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
-
- pcie0_clkreq_sleep: pcie0_clkreq_sleep {
- mux {
- pins = "gpio36";
- function = "gpio";
- };
-
- config {
- pins = "gpio36";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- pcie0_wake_sleep: pcie0_wake_sleep {
- mux {
- pins = "gpio37";
- function = "gpio";
- };
-
- config {
- pins = "gpio37";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- pcie1_clkreq_default: pcie1_clkreq_default {
- mux {
- pins = "gpio131";
- function = "pci_e1";
- };
-
- config {
- pins = "gpio131";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
-
- pcie1_perst_default: pcie1_perst_default {
- mux {
- pins = "gpio130";
- function = "gpio";
- };
-
- config {
- pins = "gpio130";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- pcie1_wake_default: pcie1_wake_default {
- mux {
- pins = "gpio132";
- function = "gpio";
- };
-
- config {
- pins = "gpio132";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- pcie1_clkreq_sleep: pcie1_clkreq_sleep {
- mux {
- pins = "gpio131";
- function = "gpio";
- };
-
- config {
- pins = "gpio131";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- pcie1_wake_sleep: pcie1_wake_sleep {
- mux {
- pins = "gpio132";
- function = "gpio";
- };
-
- config {
- pins = "gpio132";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- pcie2_clkreq_default: pcie2_clkreq_default {
- mux {
- pins = "gpio115";
- function = "pci_e2";
- };
-
- config {
- pins = "gpio115";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
-
- pcie2_perst_default: pcie2_perst_default {
- mux {
- pins = "gpio114";
- function = "gpio";
- };
-
- config {
- pins = "gpio114";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- pcie2_wake_default: pcie2_wake_default {
- mux {
- pins = "gpio116";
- function = "gpio";
- };
-
- config {
- pins = "gpio116";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
-
- pcie2_clkreq_sleep: pcie2_clkreq_sleep {
- mux {
- pins = "gpio115";
- function = "gpio";
- };
-
- config {
- pins = "gpio115";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- pcie2_wake_sleep: pcie2_wake_sleep {
- mux {
- pins = "gpio116";
- function = "gpio";
- };
-
- config {
- pins = "gpio116";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- cci0_default: cci0_default {
- pinmux {
- function = "cci_i2c";
- pins = "gpio17", "gpio18";
- };
- pinconf {
- pins = "gpio17", "gpio18";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- cci1_default: cci1_default {
- pinmux {
- function = "cci_i2c";
- pins = "gpio19", "gpio20";
- };
- pinconf {
- pins = "gpio19", "gpio20";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- camera_board_default: camera_board_default {
- mux_pwdn {
- function = "gpio";
- pins = "gpio98";
- };
- config_pwdn {
- pins = "gpio98";
- drive-strength = <16>;
- bias-disable;
- };
-
- mux_rst {
- function = "gpio";
- pins = "gpio104";
- };
- config_rst {
- pins = "gpio104";
- drive-strength = <16>;
- bias-disable;
- };
-
- mux_mclk1 {
- function = "cam_mclk";
- pins = "gpio14";
- };
- config_mclk1 {
- pins = "gpio14";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- camera_front_default: camera_front_default {
- mux_pwdn {
- function = "gpio";
- pins = "gpio133";
- };
- config_pwdn {
- pins = "gpio133";
- drive-strength = <16>;
- bias-disable;
- };
-
- mux_rst {
- function = "gpio";
- pins = "gpio23";
- };
- config_rst {
- pins = "gpio23";
- drive-strength = <16>;
- bias-disable;
- };
-
- mux_mclk2 {
- function = "cam_mclk";
- pins = "gpio15";
- };
- config_mclk2 {
- pins = "gpio15";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- camera_rear_default: camera_rear_default {
- mux_pwdn {
- function = "gpio";
- pins = "gpio26";
- };
- config_pwdn {
- pins = "gpio26";
- drive-strength = <16>;
- bias-disable;
- };
-
- mux_rst {
- function = "gpio";
- pins = "gpio25";
- };
- config_rst {
- pins = "gpio25";
- drive-strength = <16>;
- bias-disable;
- };
-
- mux_mclk0 {
- function = "cam_mclk";
- pins = "gpio13";
- };
- config_mclk0 {
- pins = "gpio13";
- drive-strength = <16>;
- bias-disable;
- };
- };
-};
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index ce430ba9c118..78c55ca10ba9 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -6,7 +6,9 @@
#include <dt-bindings/clock/qcom,gcc-msm8996.h>
#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/soc/qcom,apr.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
interrupt-parent = <&intc>;
@@ -43,6 +45,9 @@
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
capacity-dmips-mhz = <1024>;
+ clocks = <&kryocc 0>;
+ operating-points-v2 = <&cluster0_opp>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_0>;
L2_0: l2-cache {
compatible = "cache";
@@ -57,6 +62,9 @@
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
capacity-dmips-mhz = <1024>;
+ clocks = <&kryocc 0>;
+ operating-points-v2 = <&cluster0_opp>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_0>;
};
@@ -67,6 +75,9 @@
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
capacity-dmips-mhz = <1024>;
+ clocks = <&kryocc 1>;
+ operating-points-v2 = <&cluster1_opp>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_1>;
L2_1: l2-cache {
compatible = "cache";
@@ -81,6 +92,9 @@
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
capacity-dmips-mhz = <1024>;
+ clocks = <&kryocc 1>;
+ operating-points-v2 = <&cluster1_opp>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_1>;
};
@@ -120,6 +134,227 @@
};
};
+ cluster0_opp: opp_table0 {
+ compatible = "operating-points-v2-kryo-cpu";
+ nvmem-cells = <&speedbin_efuse>;
+ opp-shared;
+
+ /* Nominal fmax for now */
+ opp-307200000 {
+ opp-hz = /bits/ 64 <307200000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-422400000 {
+ opp-hz = /bits/ 64 <422400000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-480000000 {
+ opp-hz = /bits/ 64 <480000000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-556800000 {
+ opp-hz = /bits/ 64 <556800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-652800000 {
+ opp-hz = /bits/ 64 <652800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-729600000 {
+ opp-hz = /bits/ 64 <729600000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-844800000 {
+ opp-hz = /bits/ 64 <844800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-960000000 {
+ opp-hz = /bits/ 64 <960000000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1036800000 {
+ opp-hz = /bits/ 64 <1036800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1113600000 {
+ opp-hz = /bits/ 64 <1113600000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1190400000 {
+ opp-hz = /bits/ 64 <1190400000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1228800000 {
+ opp-hz = /bits/ 64 <1228800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1324800000 {
+ opp-hz = /bits/ 64 <1324800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1401600000 {
+ opp-hz = /bits/ 64 <1401600000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1478400000 {
+ opp-hz = /bits/ 64 <1478400000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1593600000 {
+ opp-hz = /bits/ 64 <1593600000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ };
+
+ cluster1_opp: opp_table1 {
+ compatible = "operating-points-v2-kryo-cpu";
+ nvmem-cells = <&speedbin_efuse>;
+ opp-shared;
+
+ /* Nominal fmax for now */
+ opp-307200000 {
+ opp-hz = /bits/ 64 <307200000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-403200000 {
+ opp-hz = /bits/ 64 <403200000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-480000000 {
+ opp-hz = /bits/ 64 <480000000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-556800000 {
+ opp-hz = /bits/ 64 <556800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-652800000 {
+ opp-hz = /bits/ 64 <652800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-729600000 {
+ opp-hz = /bits/ 64 <729600000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-806400000 {
+ opp-hz = /bits/ 64 <806400000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-883200000 {
+ opp-hz = /bits/ 64 <883200000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-940800000 {
+ opp-hz = /bits/ 64 <940800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1036800000 {
+ opp-hz = /bits/ 64 <1036800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1113600000 {
+ opp-hz = /bits/ 64 <1113600000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1190400000 {
+ opp-hz = /bits/ 64 <1190400000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1248000000 {
+ opp-hz = /bits/ 64 <1248000000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1324800000 {
+ opp-hz = /bits/ 64 <1324800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1401600000 {
+ opp-hz = /bits/ 64 <1401600000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1478400000 {
+ opp-hz = /bits/ 64 <1478400000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1555200000 {
+ opp-hz = /bits/ 64 <1555200000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1632000000 {
+ opp-hz = /bits/ 64 <1632000000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1708800000 {
+ opp-hz = /bits/ 64 <1708800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1785600000 {
+ opp-hz = /bits/ 64 <1785600000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1824000000 {
+ opp-hz = /bits/ 64 <1824000000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1920000000 {
+ opp-hz = /bits/ 64 <1920000000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-1996800000 {
+ opp-hz = /bits/ 64 <1996800000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-2073600000 {
+ opp-hz = /bits/ 64 <2073600000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ opp-2150400000 {
+ opp-hz = /bits/ 64 <2150400000>;
+ opp-supported-hw = <0x77>;
+ clock-latency-ns = <200000>;
+ };
+ };
+
firmware {
scm {
compatible = "qcom,scm-msm8996";
@@ -424,7 +659,7 @@
bits = <1 4>;
};
- gpu_speed_bin: gpu_speed_bin@133 {
+ speedbin_efuse: speedbin@133 {
reg = <0x133 0x1>;
bits = <5 3>;
};
@@ -472,7 +707,7 @@
tcsr_mutex_regs: syscon@740000 {
compatible = "syscon";
- reg = <0x00740000 0x20000>;
+ reg = <0x00740000 0x40000>;
};
tcsr: syscon@7a0000 {
@@ -521,6 +756,8 @@
#size-cells = <1>;
ranges;
+ status = "disabled";
+
mdp: mdp@901000 {
compatible = "qcom,mdp5";
reg = <0x00901000 0x90000>;
@@ -542,6 +779,11 @@
iommus = <&mdp_smmu 0>;
+ assigned-clocks = <&mmcc MDSS_MDP_CLK>,
+ <&mmcc MDSS_VSYNC_CLK>;
+ assigned-clock-rates = <300000000>,
+ <19200000>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -552,9 +794,82 @@
remote-endpoint = <&hdmi_in>;
};
};
+
+ port@1 {
+ reg = <1>;
+ mdp5_intf1_out: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+ };
+ };
+
+ dsi0: dsi@994000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ reg = <0x00994000 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_MDP_CLK>,
+ <&mmcc MDSS_BYTE0_CLK>,
+ <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_AXI_CLK>,
+ <&mmcc MMSS_MISC_AHB_CLK>,
+ <&mmcc MDSS_PCLK0_CLK>,
+ <&mmcc MDSS_ESC0_CLK>;
+ clock-names = "mdp_core",
+ "byte",
+ "iface",
+ "bus",
+ "core_mmss",
+ "pixel",
+ "core";
+
+ phys = <&dsi0_phy>;
+ phy-names = "dsi";
+ status = "disabled";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&mdp5_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ };
+ };
};
};
+ dsi0_phy: dsi-phy@994400 {
+ compatible = "qcom,dsi-phy-14nm";
+ reg = <0x00994400 0x100>,
+ <0x00994500 0x300>,
+ <0x00994800 0x188>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>, <&xo_board>;
+ clock-names = "iface", "ref";
+ status = "disabled";
+ };
+
hdmi: hdmi-tx@9a0000 {
compatible = "qcom,hdmi-tx-8996";
reg = <0x009a0000 0x50c>,
@@ -618,7 +933,8 @@
"ref";
};
};
- gpu@b00000 {
+
+ gpu: gpu@b00000 {
compatible = "qcom,adreno-530.2", "qcom,adreno";
#stream-id-cells = <16>;
@@ -642,7 +958,7 @@
power-domains = <&mmcc GPU_GX_GDSC>;
iommus = <&adreno_smmu 0>;
- nvmem-cells = <&gpu_speed_bin>;
+ nvmem-cells = <&speedbin_efuse>;
nvmem-cell-names = "speed_bin";
qcom,gpu-quirk-two-pass-use-wfi;
@@ -650,6 +966,8 @@
operating-points-v2 = <&gpu_opp_table>;
+ status = "disabled";
+
gpu_opp_table: opp-table {
compatible ="operating-points-v2";
@@ -693,15 +1011,482 @@
};
};
- msmgpio: pinctrl@1010000 {
+ tlmm: pinctrl@1010000 {
compatible = "qcom,msm8996-pinctrl";
reg = <0x01010000 0x300000>;
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
- gpio-ranges = <&msmgpio 0 0 150>;
+ gpio-ranges = <&tlmm 0 0 150>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+
+ blsp1_spi1_default: blsp1-spi1-default {
+ spi {
+ pins = "gpio0", "gpio1", "gpio3";
+ function = "blsp_spi1";
+ drive-strength = <12>;
+ bias-disable;
+ };
+
+ cs {
+ pins = "gpio2";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ blsp1_spi1_sleep: blsp1-spi1-sleep {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ blsp2_uart2_2pins_default: blsp2-uart1-2pins {
+ pins = "gpio4", "gpio5";
+ function = "blsp_uart8";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ blsp2_uart2_2pins_sleep: blsp2-uart1-2pins-sleep {
+ pins = "gpio4", "gpio5";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp2_i2c2_default: blsp2-i2c2 {
+ pins = "gpio6", "gpio7";
+ function = "blsp_i2c8";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ blsp2_i2c2_sleep: blsp2-i2c2-sleep {
+ pins = "gpio6", "gpio7";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ cci0_default: cci0-default {
+ pins = "gpio17", "gpio18";
+ function = "cci_i2c";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ camera0_state_on:
+ camera_rear_default: camera-rear-default {
+ mclk0 {
+ pins = "gpio13";
+ function = "cam_mclk";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ rst {
+ pins = "gpio25";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ pwdn {
+ pins = "gpio26";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ cci1_default: cci1-default {
+ pins = "gpio19", "gpio20";
+ function = "cci_i2c";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ camera1_state_on:
+ camera_board_default: camera-board-default {
+ mclk1 {
+ pins = "gpio14";
+ function = "cam_mclk";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ pwdn {
+ pins = "gpio98";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ rst {
+ pins = "gpio104";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ camera2_state_on:
+ camera_front_default: camera-front-default {
+ mclk2 {
+ pins = "gpio15";
+ function = "cam_mclk";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ rst {
+ pins = "gpio23";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ pwdn {
+ pins = "gpio133";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ pcie0_state_on: pcie0-state-on {
+ perst {
+ pins = "gpio35";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ clkreq {
+ pins = "gpio36";
+ function = "pci_e0";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake {
+ pins = "gpio37";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie0_state_off: pcie0-state-off {
+ perst {
+ pins = "gpio35";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ clkreq {
+ pins = "gpio36";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake {
+ pins = "gpio37";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp1_i2c3_default: blsp1-i2c2-default {
+ pins = "gpio47", "gpio48";
+ function = "blsp_i2c3";
+ drive-strength = <16>;
+ bias-disable = <0>;
+ };
+
+ blsp1_i2c3_sleep: blsp1-i2c2-sleep {
+ pins = "gpio47", "gpio48";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable = <0>;
+ };
+
+ blsp2_uart3_4pins_default: blsp2-uart2-4pins {
+ pins = "gpio49", "gpio50", "gpio51", "gpio52";
+ function = "blsp_uart9";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ blsp2_uart3_4pins_sleep: blsp2-uart2-4pins-sleep {
+ pins = "gpio49", "gpio50", "gpio51", "gpio52";
+ function = "blsp_uart9";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wcd_intr_default: wcd-intr-default{
+ pins = "gpio54";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ input-enable;
+ };
+
+ blsp2_i2c1_default: blsp2-i2c1 {
+ pins = "gpio55", "gpio56";
+ function = "blsp_i2c7";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ blsp2_i2c1_sleep: blsp2-i2c0-sleep {
+ pins = "gpio55", "gpio56";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp2_i2c5_default: blsp2-i2c5 {
+ pins = "gpio60", "gpio61";
+ function = "blsp_i2c11";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ /* Sleep state for BLSP2_I2C5 is missing.. */
+
+ cdc_reset_active: cdc-reset-active {
+ pins = "gpio64";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-pull-down;
+ output-high;
+ };
+
+ cdc_reset_sleep: cdc-reset-sleep {
+ pins = "gpio64";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+
+ blsp2_spi6_default: blsp2-spi5-default {
+ spi {
+ pins = "gpio85", "gpio86", "gpio88";
+ function = "blsp_spi12";
+ drive-strength = <12>;
+ bias-disable;
+ };
+
+ cs {
+ pins = "gpio87";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ blsp2_spi6_sleep: blsp2-spi5-sleep {
+ pins = "gpio85", "gpio86", "gpio87", "gpio88";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ blsp2_i2c6_default: blsp2-i2c6 {
+ pins = "gpio87", "gpio88";
+ function = "blsp_i2c12";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ blsp2_i2c6_sleep: blsp2-i2c6-sleep {
+ pins = "gpio87", "gpio88";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pcie1_state_on: pcie1-state-on {
+ perst {
+ pins = "gpio130";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ clkreq {
+ pins = "gpio131";
+ function = "pci_e1";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake {
+ pins = "gpio132";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ pcie1_state_off: pcie1-state-off {
+ /* Perst is missing? */
+ clkreq {
+ pins = "gpio131";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake {
+ pins = "gpio132";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ pcie2_state_on: pcie2-state-on {
+ perst {
+ pins = "gpio114";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ clkreq {
+ pins = "gpio115";
+ function = "pci_e2";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake {
+ pins = "gpio116";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ pcie2_state_off: pcie2-state-off {
+ /* Perst is missing? */
+ clkreq {
+ pins = "gpio115";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake {
+ pins = "gpio116";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ sdc1_state_on: sdc1-state-on {
+ clk {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ cmd {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ data {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ rclk {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc1_state_off: sdc1-state-off {
+ clk {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ cmd {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ data {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ rclk {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc2_state_on: sdc2-clk-on {
+ clk {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ cmd {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ data {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+ };
+
+ sdc2_state_off: sdc2-clk-off {
+ clk {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ cmd {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ data {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
};
spmi_bus: qcom,spmi@400f000 {
@@ -762,8 +1547,8 @@
<0 0 0 4 &intc 0 248 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>;
- pinctrl-1 = <&pcie0_clkreq_sleep &pcie0_perst_default &pcie0_wake_sleep>;
+ pinctrl-0 = <&pcie0_state_on>;
+ pinctrl-1 = <&pcie0_state_off>;
linux,pci-domain = <0>;
@@ -816,8 +1601,8 @@
<0 0 0 4 &intc 0 275 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&pcie1_clkreq_default &pcie1_perst_default &pcie1_wake_default>;
- pinctrl-1 = <&pcie1_clkreq_sleep &pcie1_perst_default &pcie1_wake_sleep>;
+ pinctrl-0 = <&pcie1_state_on>;
+ pinctrl-1 = <&pcie1_state_off>;
linux,pci-domain = <1>;
@@ -867,8 +1652,8 @@
<0 0 0 4 &intc 0 145 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&pcie2_clkreq_default &pcie2_perst_default &pcie2_wake_default>;
- pinctrl-1 = <&pcie2_clkreq_sleep &pcie2_perst_default &pcie2_wake_sleep >;
+ pinctrl-0 = <&pcie2_state_on>;
+ pinctrl-1 = <&pcie2_state_off>;
linux,pci-domain = <2>;
clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
@@ -1136,7 +1921,7 @@
};
adreno_smmu: iommu@b40000 {
- compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
+ compatible = "qcom,msm8996-smmu-v2", "qcom,adreno-smmu", "qcom,smmu-v2";
reg = <0x00b40000 0x10000>;
#global-interrupts = <1>;
@@ -1152,7 +1937,7 @@
power-domains = <&mmcc GPU_GDSC>;
};
- video-codec@c00000 {
+ venus: video-codec@c00000 {
compatible = "qcom,msm8996-venus";
reg = <0x00c00000 0xff000>;
interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
@@ -1183,7 +1968,7 @@
<&venus_smmu 0x2d>,
<&venus_smmu 0x31>;
memory-region = <&venus_region>;
- status = "okay";
+ status = "disabled";
video-decoder {
compatible = "venus-decoder";
@@ -1745,9 +2530,14 @@
};
};
};
+
kryocc: clock-controller@6400000 {
- compatible = "qcom,apcc-msm8996";
+ compatible = "qcom,msm8996-apcc";
reg = <0x06400000 0x90000>;
+
+ clock-names = "xo";
+ clocks = <&xo_board>;
+
#clock-cells = <1>;
};
@@ -1758,6 +2548,10 @@
#size-cells = <1>;
ranges;
+ interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq";
+
clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
<&gcc GCC_USB30_MASTER_CLK>,
<&gcc GCC_AGGRE2_USB3_AXI_CLK>,
@@ -1841,34 +2635,75 @@
status = "disabled";
};
+ sdhc1: sdhci@7464900 {
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x07464900 0x11c>, <0x07464000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clock-names = "iface", "core", "xo";
+ clocks = <&gcc GCC_SDCC1_AHB_CLK>,
+ <&gcc GCC_SDCC1_APPS_CLK>,
+ <&xo_board>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc1_state_on>;
+ pinctrl-1 = <&sdc1_state_off>;
+
+ bus-width = <8>;
+ non-removable;
+ status = "disabled";
+ };
+
sdhc2: sdhci@74a4900 {
- status = "disabled";
- compatible = "qcom,sdhci-msm-v4";
- reg = <0x074a4900 0x314>, <0x074a4000 0x800>;
- reg-names = "hc_mem", "core_mem";
-
- interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>,
- <0 221 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "hc_irq", "pwr_irq";
-
- clock-names = "iface", "core", "xo";
- clocks = <&gcc GCC_SDCC2_AHB_CLK>,
- <&gcc GCC_SDCC2_APPS_CLK>,
- <&xo_board>;
- bus-width = <4>;
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x074a4900 0x314>, <0x074a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clock-names = "iface", "core", "xo";
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+ <&xo_board>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_state_on>;
+ pinctrl-1 = <&sdc2_state_off>;
+
+ bus-width = <4>;
+ status = "disabled";
};
- blsp1_uart1: serial@7570000 {
+ blsp1_dma: dma@7544000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x07544000 0x2b000>;
+ interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "bam_clk";
+ qcom,controlled-remotely;
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ };
+
+ blsp1_uart2: serial@7570000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x07570000 0x1000>;
interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
<&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp1_dma 2>, <&blsp1_dma 3>;
+ dma-names = "tx", "rx";
status = "disabled";
};
- blsp1_spi0: spi@7575000 {
+ blsp1_spi1: spi@7575000 {
compatible = "qcom,spi-qup-v2.2.1";
reg = <0x07575000 0x600>;
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
@@ -1876,14 +2711,16 @@
<&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "core", "iface";
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp1_spi0_default>;
- pinctrl-1 = <&blsp1_spi0_sleep>;
+ pinctrl-0 = <&blsp1_spi1_default>;
+ pinctrl-1 = <&blsp1_spi1_sleep>;
+ dmas = <&blsp1_dma 12>, <&blsp1_dma 13>;
+ dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
- blsp1_i2c2: i2c@7577000 {
+ blsp1_i2c3: i2c@7577000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x07577000 0x1000>;
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
@@ -1891,14 +2728,27 @@
<&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
clock-names = "iface", "core";
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp1_i2c2_default>;
- pinctrl-1 = <&blsp1_i2c2_sleep>;
+ pinctrl-0 = <&blsp1_i2c3_default>;
+ pinctrl-1 = <&blsp1_i2c3_sleep>;
+ dmas = <&blsp1_dma 16>, <&blsp1_dma 17>;
+ dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
- blsp2_uart1: serial@75b0000 {
+ blsp2_dma: dma@7584000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x07584000 0x2b000>;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "bam_clk";
+ qcom,controlled-remotely;
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ };
+
+ blsp2_uart2: serial@75b0000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x075b0000 0x1000>;
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
@@ -1908,7 +2758,7 @@
status = "disabled";
};
- blsp2_uart2: serial@75b1000 {
+ blsp2_uart3: serial@75b1000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x075b1000 0x1000>;
interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
@@ -1918,7 +2768,7 @@
status = "disabled";
};
- blsp2_i2c0: i2c@75b5000 {
+ blsp2_i2c1: i2c@75b5000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x075b5000 0x1000>;
interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
@@ -1926,14 +2776,16 @@
<&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>;
clock-names = "iface", "core";
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp2_i2c0_default>;
- pinctrl-1 = <&blsp2_i2c0_sleep>;
+ pinctrl-0 = <&blsp2_i2c1_default>;
+ pinctrl-1 = <&blsp2_i2c1_sleep>;
+ dmas = <&blsp2_dma 12>, <&blsp2_dma 13>;
+ dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
- blsp2_i2c1: i2c@75b6000 {
+ blsp2_i2c2: i2c@75b6000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x075b6000 0x1000>;
interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
@@ -1941,14 +2793,49 @@
<&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>;
clock-names = "iface", "core";
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp2_i2c1_default>;
- pinctrl-1 = <&blsp2_i2c1_sleep>;
+ pinctrl-0 = <&blsp2_i2c2_default>;
+ pinctrl-1 = <&blsp2_i2c2_sleep>;
+ dmas = <&blsp2_dma 14>, <&blsp2_dma 15>;
+ dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
- blsp2_spi5: spi@75ba000{
+ blsp2_i2c5: i2c@75b9000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x75b9000 0x1000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP5_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp2_i2c5_default>;
+ dmas = <&blsp2_dma 20>, <&blsp2_dma 21>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_i2c6: i2c@75ba000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x75ba000 0x1000>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP6_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c6_default>;
+ pinctrl-1 = <&blsp2_i2c6_sleep>;
+ dmas = <&blsp2_dma 22>, <&blsp2_dma 23>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_spi6: spi@75ba000{
compatible = "qcom,spi-qup-v2.2.1";
reg = <0x075ba000 0x600>;
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
@@ -1956,8 +2843,10 @@
<&gcc GCC_BLSP2_AHB_CLK>;
clock-names = "core", "iface";
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp2_spi5_default>;
- pinctrl-1 = <&blsp2_spi5_sleep>;
+ pinctrl-0 = <&blsp2_spi6_default>;
+ pinctrl-1 = <&blsp2_spi6_sleep>;
+ dmas = <&blsp2_dma 22>, <&blsp2_dma 23>;
+ dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -1981,6 +2870,7 @@
assigned-clock-rates = <19200000>, <60000000>;
power-domains = <&gcc USB30_GDSC>;
+ qcom,select-utmi-as-pipe-clk;
status = "disabled";
dwc3@7600000 {
@@ -1989,6 +2879,7 @@
interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
phys = <&hsusb_phy2>;
phy-names = "usb2-phy";
+ maximum-speed = "high-speed";
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
};
@@ -2032,13 +2923,13 @@
compatible = "slim217,1a0";
reg = <1 0>;
- interrupt-parent = <&msmgpio>;
+ interrupt-parent = <&tlmm>;
interrupts = <54 IRQ_TYPE_LEVEL_HIGH>,
<53 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "intr1", "intr2";
interrupt-controller;
#interrupt-cells = <1>;
- reset-gpios = <&msmgpio 64 0>;
+ reset-gpios = <&tlmm 64 0>;
slim-ifc-dev = <&tasha_ifd>;
@@ -2067,6 +2958,11 @@
qcom,smem-states = <&smp2p_adsp_out 0>;
qcom,smem-state-names = "stop";
+ power-domains = <&rpmpd MSM8996_VDDCX>;
+ power-domain-names = "cx";
+
+ status = "disabled";
+
smd-edge {
interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
@@ -2458,4 +3354,3 @@
<GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
};
};
-#include "msm8996-pins.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/pm6150.dtsi b/arch/arm64/boot/dts/qcom/pm6150.dtsi
index 8ab4f1f78bbf..8a4972e6a24c 100644
--- a/arch/arm64/boot/dts/qcom/pm6150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm6150.dtsi
@@ -7,6 +7,30 @@
#include <dt-bindings/spmi/spmi.h>
#include <dt-bindings/thermal/thermal.h>
+/ {
+ thermal-zones {
+ pm6150_thermal: pm6150-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm6150_temp>;
+
+ trips {
+ pm6150_trip0: trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ pm6150_crit: crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
+};
+
&spmi_bus {
pm6150_lsid0: pmic@0 {
compatible = "qcom,pm6150", "qcom,spmi-pmic";
diff --git a/arch/arm64/boot/dts/qcom/pm7325.dtsi b/arch/arm64/boot/dts/qcom/pm7325.dtsi
new file mode 100644
index 000000000000..e7f64a9ddc9c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm7325.dtsi
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: BSD-3-Clause
+// Copyright (c) 2021, The Linux Foundation. All rights reserved.
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+ pm7325: pmic@1 {
+ compatible = "qcom,pm7325", "qcom,spmi-pmic";
+ reg = <0x1 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm7325_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x1 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ pm7325_gpios: gpios@8800 {
+ compatible = "qcom,pm7325-gpio", "qcom,spmi-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ gpio-ranges = <&pm7325_gpios 0 0 10>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
+
+&thermal_zones {
+ pm7325_thermal: pm7325-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm7325_temp_alarm>;
+
+ trips {
+ pm7325_trip0: trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ pm7325_crit: pm7325-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index fa4ea7ded0ab..c566a64b1373 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -50,7 +50,8 @@
pon: power-on@800 {
compatible = "qcom,pm8916-pon";
reg = <0x0800>;
- pwrkey {
+
+ pon_pwrkey: pwrkey {
compatible = "qcom,pm8941-pwrkey";
interrupts = <0x0 0x8 0x0 IRQ_TYPE_EDGE_BOTH>;
debounce = <15625>;
@@ -59,6 +60,15 @@
status = "disabled";
};
+
+ pon_resin: resin {
+ compatible = "qcom,pm8941-resin";
+ interrupts = <0x0 0x8 0x1 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+
+ status = "disabled";
+ };
};
pm8150_temp: temp-alarm@2400 {
diff --git a/arch/arm64/boot/dts/qcom/pm8350c.dtsi b/arch/arm64/boot/dts/qcom/pm8350c.dtsi
index 2b9b75ecec60..e1b75ae0a823 100644
--- a/arch/arm64/boot/dts/qcom/pm8350c.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8350c.dtsi
@@ -13,13 +13,43 @@
#address-cells = <1>;
#size-cells = <0>;
+ pm8350c_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x2 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
pm8350c_gpios: gpio@8800 {
- compatible = "qcom,pm8350c-gpio";
+ compatible = "qcom,pm8350c-gpio", "qcom,spmi-gpio";
reg = <0x8800>;
gpio-controller;
+ gpio-ranges = <&pm8350c_gpios 0 0 9>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
};
};
+
+&thermal_zones {
+ pm8350c_thermal: pm8350c-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8350c_temp_alarm>;
+
+ trips {
+ pm8350c_trip0: trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ pm8350c_crit: pm8350c-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8994.dtsi b/arch/arm64/boot/dts/qcom/pm8994.dtsi
index c3876c82c874..ad19016df047 100644
--- a/arch/arm64/boot/dts/qcom/pm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8994.dtsi
@@ -45,7 +45,6 @@
pm8994_pon: pon@800 {
compatible = "qcom,pm8916-pon";
-
reg = <0x800>;
mode-bootloader = <0x2>;
mode-recovery = <0x1>;
@@ -58,6 +57,13 @@
linux,code = <KEY_POWER>;
};
+ pm8994_resin: resin {
+ compatible = "qcom,pm8941-resin";
+ interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ status = "disabled";
+ };
};
pm8994_temp: temp-alarm@2400 {
diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
index e5ed28ab9b2d..b4ac900ab115 100644
--- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
@@ -32,5 +32,18 @@
#address-cells = <1>;
#size-cells = <1>;
};
+
+ pmi8994_wled: wled@d800 {
+ compatible = "qcom,pmi8994-wled";
+ reg = <0xd800 0xd900>;
+ interrupts = <3 0xd8 0x02 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "short";
+ qcom,num-strings = <3>;
+ /* Yes, all four strings *have to* be defined or things won't work. */
+ qcom,enabled-strings = <0 1 2 3>;
+ qcom,cabc;
+ qcom,eternal-pfet;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/pmk8350.dtsi b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
index 1530b8ff270f..04fc2632a0b2 100644
--- a/arch/arm64/boot/dts/qcom/pmk8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
@@ -3,6 +3,8 @@
* Copyright (c) 2021, Linaro Limited
*/
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/spmi/spmi.h>
@@ -13,10 +15,57 @@
#address-cells = <1>;
#size-cells = <0>;
+ pmk8350_pon: pon@1300 {
+ compatible = "qcom,pm8998-pon";
+ reg = <0x1300>;
+
+ pwrkey {
+ compatible = "qcom,pmk8350-pwrkey";
+ interrupts = <0x0 0x13 0x7 IRQ_TYPE_EDGE_BOTH>;
+ linux,code = <KEY_POWER>;
+ };
+
+ resin {
+ compatible = "qcom,pmk8350-resin";
+ interrupts = <0x0 0x13 0x6 IRQ_TYPE_EDGE_BOTH>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+ };
+
+ pmk8350_vadc: adc@3100 {
+ compatible = "qcom,spmi-adc7";
+ reg = <0x3100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eoc-int-en-set";
+ #io-channel-cells = <1>;
+ io-channel-ranges;
+ };
+
+ pmk8350_adc_tm: adc-tm@3400 {
+ compatible = "qcom,adc-tm7";
+ reg = <0x3400>;
+ interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "threshold";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #thermal-sensor-cells = <1>;
+ status = "disabled";
+ };
+
+ pmk8350_rtc: rtc@6100 {
+ compatible = "qcom,pmk8350-rtc";
+ reg = <0x6100>, <0x6200>;
+ reg-names = "rtc", "alarm";
+ interrupts = <0x0 0x62 0x1 IRQ_TYPE_EDGE_RISING>;
+ };
+
pmk8350_gpios: gpio@b000 {
- compatible = "qcom,pmk8350-gpio";
+ compatible = "qcom,pmk8350-gpio", "qcom,spmi-gpio";
reg = <0xb000>;
gpio-controller;
+ gpio-ranges = <&pmk8350_gpios 0 0 4>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/pmm8155au_1.dtsi b/arch/arm64/boot/dts/qcom/pmm8155au_1.dtsi
new file mode 100644
index 000000000000..7072e5a2e73f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmm8155au_1.dtsi
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
+/ {
+ thermal-zones {
+ pmm8155au-1-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&pmm8155au_1_temp>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+
+ trip2 {
+ temperature = <145000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
+};
+
+&spmi_bus {
+ pmic@0 {
+ compatible = "qcom,pmm8155au", "qcom,spmi-pmic";
+ reg = <0x0 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pon: power-on@800 {
+ compatible = "qcom,pm8916-pon";
+ reg = <0x0800>;
+ pwrkey {
+ compatible = "qcom,pm8941-pwrkey";
+ interrupts = <0x0 0x8 0x0 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ linux,code = <KEY_POWER>;
+
+ status = "disabled";
+ };
+ };
+
+ pmm8155au_1_temp: temp-alarm@2400 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0x2400>;
+ interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_BOTH>;
+ io-channels = <&pmm8155au_1_adc ADC5_DIE_TEMP>;
+ io-channel-names = "thermal";
+ #thermal-sensor-cells = <0>;
+ };
+
+ pmm8155au_1_adc: adc@3100 {
+ compatible = "qcom,spmi-adc5";
+ reg = <0x3100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+ interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+
+ ref-gnd@0 {
+ reg = <ADC5_REF_GND>;
+ qcom,pre-scaling = <1 1>;
+ label = "ref_gnd";
+ };
+
+ vref-1p25@1 {
+ reg = <ADC5_1P25VREF>;
+ qcom,pre-scaling = <1 1>;
+ label = "vref_1p25";
+ };
+
+ die-temp@6 {
+ reg = <ADC5_DIE_TEMP>;
+ qcom,pre-scaling = <1 1>;
+ label = "die_temp";
+ };
+ };
+
+ pmm8155au_1_adc_tm: adc-tm@3500 {
+ compatible = "qcom,spmi-adc-tm5";
+ reg = <0x3500>;
+ interrupts = <0x0 0x35 0x0 IRQ_TYPE_EDGE_RISING>;
+ #thermal-sensor-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ pmm8155au_1_rtc: rtc@6000 {
+ compatible = "qcom,pm8941-rtc";
+ reg = <0x6000>;
+ reg-names = "rtc", "alarm";
+ interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>;
+
+ status = "disabled";
+ };
+
+ pmm8155au_1_gpios: gpio@c000 {
+ compatible = "qcom,pmm8155au-gpio";
+ reg = <0xc000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmm8155au_1_gpios 0 0 10>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ pmic@1 {
+ compatible = "qcom,pmm8155au", "qcom,spmi-pmic";
+ reg = <0x1 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pmm8155au_2.dtsi b/arch/arm64/boot/dts/qcom/pmm8155au_2.dtsi
new file mode 100644
index 000000000000..72075964fbb9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmm8155au_2.dtsi
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+/ {
+ thermal-zones {
+ pmm8155au-2-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&pmm8155au_2_temp>;
+
+ trips {
+ trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ trip1 {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "hot";
+ };
+
+ trip2 {
+ temperature = <145000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
+};
+
+&spmi_bus {
+ pmic@4 {
+ compatible = "qcom,pmm8155au", "qcom,spmi-pmic";
+ reg = <0x4 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-on@800 {
+ compatible = "qcom,pm8916-pon";
+ reg = <0x0800>;
+
+ status = "disabled";
+ };
+
+ pmm8155au_2_temp: temp-alarm@2400 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0x2400>;
+ interrupts = <0x4 0x24 0x0 IRQ_TYPE_EDGE_BOTH>;
+ io-channels = <&pmm8155au_2_adc ADC5_DIE_TEMP>;
+ io-channel-names = "thermal";
+ #thermal-sensor-cells = <0>;
+ };
+
+ pmm8155au_2_adc: adc@3100 {
+ compatible = "qcom,spmi-adc5";
+ reg = <0x3100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+ interrupts = <0x4 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+
+ ref-gnd@0 {
+ reg = <ADC5_REF_GND>;
+ qcom,pre-scaling = <1 1>;
+ label = "ref_gnd";
+ };
+
+ vref-1p25@1 {
+ reg = <ADC5_1P25VREF>;
+ qcom,pre-scaling = <1 1>;
+ label = "vref_1p25";
+ };
+
+ die-temp@6 {
+ reg = <ADC5_DIE_TEMP>;
+ qcom,pre-scaling = <1 1>;
+ label = "die_temp";
+ };
+ };
+
+ pmm8155au_2_gpios: gpio@c000 {
+ compatible = "qcom,pmm8155au-gpio";
+ reg = <0xc000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmm8155au_2_gpios 0 0 10>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ pmic@5 {
+ compatible = "qcom,pmm8155au", "qcom,spmi-pmic";
+ reg = <0x5 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pmr735a.dtsi b/arch/arm64/boot/dts/qcom/pmr735a.dtsi
index 1c675af13cbf..b4b6ba24f845 100644
--- a/arch/arm64/boot/dts/qcom/pmr735a.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmr735a.dtsi
@@ -13,13 +13,43 @@
#address-cells = <1>;
#size-cells = <0>;
+ pmr735a_temp_alarm: temp-alarm@a00 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0xa00>;
+ interrupts = <0x4 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+ #thermal-sensor-cells = <0>;
+ };
+
pmr735a_gpios: gpio@8800 {
- compatible = "qcom,pmr735a-gpio";
+ compatible = "qcom,pmr735a-gpio", "qcom,spmi-gpio";
reg = <0x8800>;
gpio-controller;
+ gpio-ranges = <&pmr735a_gpios 0 0 4>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
};
};
+
+&thermal_zones {
+ pmr735a_thermal: pmr735a-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmr735a_temp_alarm>;
+
+ trips {
+ pmr735a_trip0: trip0 {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ pmr735a_crit: pmr735a-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
index 5f41de20aa22..8ac96f8e79d4 100644
--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
@@ -5,7 +5,6 @@
/dts-v1/;
-#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include <dt-bindings/sound/qcom,q6afe.h>
#include <dt-bindings/sound/qcom,q6asm.h>
@@ -552,7 +551,13 @@
vdds-supply = <&vreg_l5a_0p88>;
};
+&gmu {
+ status = "okay";
+};
+
&gpu {
+ status = "okay";
+
zap-shader {
memory-region = <&gpu_mem>;
firmware-name = "qcom/sm8250/a650_zap.mbn";
@@ -664,10 +669,6 @@
&pcie0 {
status = "okay";
- perst-gpio = <&tlmm 79 GPIO_ACTIVE_LOW>;
- wake-gpio = <&tlmm 81 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie0_default_state>;
};
&pcie0_phy {
@@ -678,10 +679,6 @@
&pcie1 {
status = "okay";
- perst-gpio = <&tlmm 82 GPIO_ACTIVE_LOW>;
- wake-gpio = <&tlmm 84 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie1_default_state>;
};
&pcie1_phy {
@@ -692,10 +689,6 @@
&pcie2 {
status = "okay";
- perst-gpio = <&tlmm 85 GPIO_ACTIVE_LOW>;
- wake-gpio = <&tlmm 87 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&pcie2_default_state>;
};
&pcie2_phy {
@@ -1173,81 +1166,6 @@
bias-disable;
};
- pcie0_default_state: pcie0-default {
- clkreq {
- pins = "gpio80";
- function = "pci_e0";
- bias-pull-up;
- };
-
- reset-n {
- pins = "gpio79";
- function = "gpio";
-
- drive-strength = <2>;
- output-low;
- bias-pull-down;
- };
-
- wake-n {
- pins = "gpio81";
- function = "gpio";
-
- drive-strength = <2>;
- bias-pull-up;
- };
- };
-
- pcie1_default_state: pcie1-default {
- clkreq {
- pins = "gpio83";
- function = "pci_e1";
- bias-pull-up;
- };
-
- reset-n {
- pins = "gpio82";
- function = "gpio";
-
- drive-strength = <2>;
- output-low;
- bias-pull-down;
- };
-
- wake-n {
- pins = "gpio84";
- function = "gpio";
-
- drive-strength = <2>;
- bias-pull-up;
- };
- };
-
- pcie2_default_state: pcie2-default {
- clkreq {
- pins = "gpio86";
- function = "pci_e2";
- bias-pull-up;
- };
-
- reset-n {
- pins = "gpio85";
- function = "gpio";
-
- drive-strength = <2>;
- output-low;
- bias-pull-down;
- };
-
- wake-n {
- pins = "gpio87";
- function = "gpio";
-
- drive-strength = <2>;
- bias-pull-up;
- };
- };
-
sdc2_default_state: sdc2-default {
clk {
pins = "sdc2_clk";
@@ -1352,6 +1270,10 @@
qcom,dmic-sample-rate = <600000>;
};
+&venus {
+ status = "okay";
+};
+
/* PINCTRL - additions to nodes defined in sm8250.dtsi */
&qup_spi0_cs_gpio {
drive-strength = <6>;
diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
new file mode 100644
index 000000000000..0da7a3b8d1bf
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "sm8150.dtsi"
+#include "pmm8155au_1.dtsi"
+#include "pmm8155au_2.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SA8155P ADP";
+ compatible = "qcom,sa8155p-adp", "qcom,sa8155p";
+
+ aliases {
+ serial0 = &uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ vreg_3p3: vreg_3p3_regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_3p3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ /*
+ * S4A is always on and not controllable through RPMh.
+ * So model it as a fixed regulator.
+ */
+ vreg_s4a_1p8: smps4 {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s4a_1p8";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-allow-set-load;
+
+ vin-supply = <&vreg_3p3>;
+ };
+};
+
+&apps_rsc {
+ pmm8155au-1-rpmh-regulators {
+ compatible = "qcom,pmm8155au-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vdd-s1-supply = <&vreg_3p3>;
+ vdd-s2-supply = <&vreg_3p3>;
+ vdd-s3-supply = <&vreg_3p3>;
+ vdd-s4-supply = <&vreg_3p3>;
+ vdd-s5-supply = <&vreg_3p3>;
+ vdd-s6-supply = <&vreg_3p3>;
+ vdd-s7-supply = <&vreg_3p3>;
+ vdd-s8-supply = <&vreg_3p3>;
+ vdd-s9-supply = <&vreg_3p3>;
+ vdd-s10-supply = <&vreg_3p3>;
+
+ vdd-l1-l8-l11-supply = <&vreg_s6a_0p92>;
+ vdd-l2-l10-supply = <&vreg_3p3>;
+ vdd-l3-l4-l5-l18-supply = <&vreg_s6a_0p92>;
+ vdd-l6-l9-supply = <&vreg_s6a_0p92>;
+ vdd-l7-l12-l14-l15-supply = <&vreg_s5a_2p04>;
+ vdd-l13-l16-l17-supply = <&vreg_3p3>;
+
+ vreg_s5a_2p04: smps5 {
+ regulator-name = "vreg_s5a_2p04";
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2000000>;
+ };
+
+ vreg_s6a_0p92: smps6 {
+ regulator-name = "vreg_s6a_0p92";
+ regulator-min-microvolt = <920000>;
+ regulator-max-microvolt = <1128000>;
+ };
+
+ vreg_l1a_0p752: ldo1 {
+ regulator-name = "vreg_l1a_0p752";
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <752000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vdda_usb_hs_3p1:
+ vreg_l2a_3p072: ldo2 {
+ regulator-name = "vreg_l2a_3p072";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3a_0p8: ldo3 {
+ regulator-name = "vreg_l3a_0p8";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vdd_usb_hs_core:
+ vdda_usb_ss_dp_core_1:
+ vreg_l5a_0p88: ldo5 {
+ regulator-name = "vreg_l5a_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l7a_1p8: ldo7 {
+ regulator-name = "vreg_l7a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10a_2p96: ldo10 {
+ regulator-name = "vreg_l10a_2p96";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l11a_0p8: ldo11 {
+ regulator-name = "vreg_l11a_0p8";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vdda_usb_hs_1p8:
+ vreg_l12a_1p8: ldo12 {
+ regulator-name = "vreg_l12a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13a_2p7: ldo13 {
+ regulator-name = "vreg_l13a_2p7";
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2704000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15a_1p7: ldo15 {
+ regulator-name = "vreg_l15a_1p7";
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1704000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16a_2p7: ldo16 {
+ regulator-name = "vreg_l16a_2p7";
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17a_2p96: ldo17 {
+ regulator-name = "vreg_l17a_2p96";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pmm8155au-2-rpmh-regulators {
+ compatible = "qcom,pmm8155au-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-s1-supply = <&vreg_3p3>;
+ vdd-s2-supply = <&vreg_3p3>;
+ vdd-s3-supply = <&vreg_3p3>;
+ vdd-s4-supply = <&vreg_3p3>;
+ vdd-s5-supply = <&vreg_3p3>;
+ vdd-s6-supply = <&vreg_3p3>;
+ vdd-s7-supply = <&vreg_3p3>;
+ vdd-s8-supply = <&vreg_3p3>;
+ vdd-s9-supply = <&vreg_3p3>;
+ vdd-s10-supply = <&vreg_3p3>;
+
+ vdd-l1-l8-l11-supply = <&vreg_s4c_1p352>;
+ vdd-l2-l10-supply = <&vreg_3p3>;
+ vdd-l3-l4-l5-l18-supply = <&vreg_s4c_1p352>;
+ vdd-l6-l9-supply = <&vreg_s6c_1p128>;
+ vdd-l7-l12-l14-l15-supply = <&vreg_s5c_2p04>;
+ vdd-l13-l16-l17-supply = <&vreg_3p3>;
+
+ vreg_s4c_1p352: smps4 {
+ regulator-name = "vreg_s4c_1p352";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ };
+
+ vreg_s5c_2p04: smps5 {
+ regulator-name = "vreg_s5c_2p04";
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2000000>;
+ };
+
+ vreg_s6c_1p128: smps6 {
+ regulator-name = "vreg_s6c_1p128";
+ regulator-min-microvolt = <1128000>;
+ regulator-max-microvolt = <1128000>;
+ };
+
+ vreg_l1c_1p304: ldo1 {
+ regulator-name = "vreg_l1c_1p304";
+ regulator-min-microvolt = <1304000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_1p808: ldo2 {
+ regulator-name = "vreg_l2c_1p808";
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5c_1p2: ldo5 {
+ regulator-name = "vreg_l5c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l7c_1p8: ldo7 {
+ regulator-name = "vreg_l7c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c_1p2: ldo8 {
+ regulator-name = "vreg_l8c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l10c_3p3: ldo10 {
+ regulator-name = "vreg_l10c_3p3";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11c_0p8: ldo11 {
+ regulator-name = "vreg_l11c_0p8";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12c_1p808: ldo12 {
+ regulator-name = "vreg_l12c_1p808";
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13c_2p96: ldo13 {
+ regulator-name = "vreg_l13c_2p96";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15c_1p9: ldo15 {
+ regulator-name = "vreg_l15c_1p9";
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16c_3p008: ldo16 {
+ regulator-name = "vreg_l16c_3p008";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l18c_0p88: ldo18 {
+ regulator-name = "vreg_l18c_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&tlmm {
+ gpio-reserved-ranges = <0 4>;
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&ufs_mem_hc {
+ status = "okay";
+
+ reset-gpios = <&tlmm 175 GPIO_ACTIVE_LOW>;
+
+ vcc-supply = <&vreg_l10a_2p96>;
+ vcc-max-microamp = <750000>;
+ vccq-supply = <&vreg_l5c_1p2>;
+ vccq-max-microamp = <700000>;
+ vccq2-supply = <&vreg_s4a_1p8>;
+ vccq2-max-microamp = <750000>;
+};
+
+&ufs_mem_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l8c_1p2>;
+ vdda-max-microamp = <87100>;
+ vdda-pll-supply = <&vreg_l5a_0p88>;
+ vdda-pll-max-microamp = <18300>;
+};
+
+
+&usb_1_hsphy {
+ status = "okay";
+ vdda-pll-supply = <&vdd_usb_hs_core>;
+ vdda33-supply = <&vdda_usb_hs_3p1>;
+ vdda18-supply = <&vdda_usb_hs_1p8>;
+};
+
+&usb_1_qmpphy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l8c_1p2>;
+ vdda-pll-supply = <&vdda_usb_ss_dp_core_1>;
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "peripheral";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-idp.dts b/arch/arm64/boot/dts/qcom/sc7180-idp.dts
index e77a7926034a..acdb36f4479f 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-idp.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-idp.dts
@@ -9,6 +9,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include "sc7180.dtsi"
#include "pm6150.dtsi"
#include "pm6150l.dtsi"
@@ -45,7 +46,7 @@
/* Increase the size from 2MB to 8MB */
&rmtfs_mem {
- reg = <0x0 0x84400000 0x0 0x800000>;
+ reg = <0x0 0x94600000 0x0 0x800000>;
};
/ {
@@ -288,6 +289,57 @@
};
};
+&dsi0 {
+ status = "okay";
+
+ vdda-supply = <&vreg_l3c_1p2>;
+
+ panel@0 {
+ compatible = "visionox,rm69299-1080p-display";
+ reg = <0>;
+
+ vdda-supply = <&vreg_l8c_1p8>;
+ vdd3p3-supply = <&vreg_l18a_2p8>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&disp_pins>;
+
+ reset-gpios = <&pm6150l_gpio 3 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ panel0_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+ };
+
+ ports {
+ port@1 {
+ endpoint {
+ remote-endpoint = <&panel0_in>;
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
+};
+
+&dsi_phy {
+ status = "okay";
+};
+
+&mdp {
+ status = "okay";
+};
+
+&mdss {
+ status = "okay";
+};
+
&qfprom {
vcc-supply = <&vreg_l11a_1p8>;
};
@@ -414,6 +466,19 @@
/* PINCTRL - additions to nodes defined in sc7180.dtsi */
+&pm6150l_gpio {
+ disp_pins: disp-pins {
+ pinconf {
+ pins = "gpio3";
+ function = PMIC_GPIO_FUNC_FUNC1;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_MED>;
+ power-source = <0>;
+ bias-disable;
+ output-low;
+ };
+ };
+};
+
&qspi_clk {
pinconf {
pins = "gpio63";
@@ -598,4 +663,106 @@
bias-pull-up;
};
};
+
+ sdc1_on: sdc1-on {
+ pinconf-clk {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ pinconf-cmd {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ pinconf-data {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ pinconf-rclk {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc1_off: sdc1-off {
+ pinconf-clk {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ pinconf-cmd {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pinconf-data {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pinconf-rclk {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc2_on: sdc2-on {
+ pinconf-clk {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ pinconf-cmd {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ pinconf-data {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ pinconf-sd-cd {
+ pins = "gpio69";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+
+ sdc2_off: sdc2-off {
+ pinconf-clk {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ pinconf-cmd {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pinconf-data {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pinconf-sd-cd {
+ pins = "gpio69";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1-lte.dts
index 533c048903ea..82dc00cc7fb9 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1-lte.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1-lte.dts
@@ -9,8 +9,8 @@
#include "sc7180-trogdor-lte-sku.dtsi"
/ {
- model = "Google CoachZ (rev1) with LTE";
- compatible = "google,coachz-rev1-sku0", "qcom,sc7180";
+ model = "Google CoachZ (rev1 - 2) with LTE";
+ compatible = "google,coachz-rev1-sku0", "google,coachz-rev2-sku0", "qcom,sc7180";
};
&cros_ec_proximity {
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1.dts
index 1b1dbdb2a82f..21b516e0694a 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1.dts
@@ -10,8 +10,26 @@
#include "sc7180-trogdor-coachz.dtsi"
/ {
- model = "Google CoachZ (rev1)";
- compatible = "google,coachz-rev1", "qcom,sc7180";
+ model = "Google CoachZ (rev1 - 2)";
+ compatible = "google,coachz-rev1", "google,coachz-rev2", "qcom,sc7180";
+};
+
+/*
+ * CoachZ rev1 is stuffed with a 47k NTC as charger thermistor which currently
+ * is not supported by the PM6150 ADC driver. Disable the charger thermal zone
+ * to avoid using bogus temperature values.
+ */
+&charger_thermal {
+ status = "disabled";
+};
+
+/*
+ * CoachZ rev1 is stuffed with a 47k NTC as thermistor for skin temperature,
+ * which currently is not supported by the PM6150 ADC driver. Disable the
+ * skin temperature thermal zone to avoid using bogus temperature values.
+ */
+&skin_temp_thermal {
+ status = "disabled";
};
&tlmm {
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r3-lte.dts
index 6e7745801fae..d23409034e8c 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2-lte.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r3-lte.dts
@@ -2,14 +2,14 @@
/*
* Google CoachZ board device tree source
*
- * Copyright 2020 Google LLC.
+ * Copyright 2021 Google LLC.
*/
-#include "sc7180-trogdor-coachz-r2.dts"
+#include "sc7180-trogdor-coachz-r3.dts"
#include "sc7180-trogdor-lte-sku.dtsi"
/ {
- model = "Google CoachZ (rev2+) with LTE";
+ model = "Google CoachZ (rev3+) with LTE";
compatible = "google,coachz-sku0", "qcom,sc7180";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r3.dts
index 4f69b6ba299f..a02d2d57c78c 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r3.dts
@@ -2,7 +2,7 @@
/*
* Google CoachZ board device tree source
*
- * Copyright 2020 Google LLC.
+ * Copyright 2021 Google LLC.
*/
/dts-v1/;
@@ -10,6 +10,6 @@
#include "sc7180-trogdor-coachz.dtsi"
/ {
- model = "Google CoachZ (rev2+)";
+ model = "Google CoachZ (rev3+)";
compatible = "google,coachz", "qcom,sc7180";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
index 4c6e433c8226..6f9c07147551 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
@@ -23,8 +23,53 @@ ap_h1_spi: &spi0 {};
adau7002: audio-codec-1 {
compatible = "adi,adau7002";
IOVDD-supply = <&pp1800_l15a>;
+ wakeup-delay-ms = <15>;
#sound-dai-cells = <0>;
};
+
+ thermal-zones {
+ skin_temp_thermal: skin-temp-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&pm6150_adc_tm 1>;
+ sustainable-power = <814>;
+
+ trips {
+ skin_temp_alert0: trip-point0 {
+ temperature = <42000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ skin_temp_alert1: trip-point1 {
+ temperature = <45000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ skin-temp-crit {
+ temperature = <60000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&skin_temp_alert0>;
+ cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ trip = <&skin_temp_alert1>;
+ cooling-device = <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
};
&ap_spi_fp {
@@ -77,6 +122,25 @@ ap_ts_pen_1v8: &i2c4 {
compatible = "boe,nv110wtm-n61";
};
+&pm6150_adc {
+ skin-temp-thermistor@4e {
+ reg = <ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&pm6150_adc_tm {
+ status = "okay";
+
+ skin-temp-thermistor@1 {
+ reg = <1>;
+ io-channels = <&pm6150_adc ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
&pp3300_dx_edp {
gpio = <&tlmm 67 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts
index 5c997cd90069..30e3e769d2b4 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts
@@ -14,15 +14,6 @@
compatible = "google,lazor-rev0", "qcom,sc7180";
};
-/*
- * Lazor is stuffed with a 47k NTC as charger thermistor which currently is
- * not supported by the PM6150 ADC driver. Disable the charger thermal zone
- * to avoid using bogus temperature values.
- */
-&charger_thermal {
- status = "disabled";
-};
-
&pp3300_hub {
/* pp3300_l7c is used to power the USB hub */
/delete-property/regulator-always-on;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts
index d9fbcc7bc5bd..c2ef06367baf 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts
@@ -14,15 +14,6 @@
compatible = "google,lazor-rev1", "google,lazor-rev2", "qcom,sc7180";
};
-/*
- * Lazor is stuffed with a 47k NTC as charger thermistor which currently is
- * not supported by the PM6150 ADC driver. Disable the charger thermal zone
- * to avoid using bogus temperature values.
- */
-&charger_thermal {
- status = "disabled";
-};
-
&pp3300_hub {
/* pp3300_l7c is used to power the USB hub */
/delete-property/regulator-always-on;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts
index ea8c2ee09741..b474df47cd70 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts
@@ -14,12 +14,3 @@
model = "Google Lazor (rev3+)";
compatible = "google,lazor", "qcom,sc7180";
};
-
-/*
- * Lazor is stuffed with a 47k NTC as charger thermistor which currently is
- * not supported by the PM6150 ADC driver. Disable the charger thermal zone
- * to avoid using bogus temperature values.
- */
-&charger_thermal {
- status = "disabled";
-};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
index 6b10b96173e8..00535aaa43c9 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
@@ -21,6 +21,15 @@ ap_h1_spi: &spi0 {};
semtech,avg-pos-strength = <64>;
};
+/*
+ * Lazor is stuffed with a 47k NTC as charger thermistor which currently is
+ * not supported by the PM6150 ADC driver. Disable the charger thermal zone
+ * to avoid using bogus temperature values.
+ */
+&charger_thermal {
+ status = "disabled";
+};
+
ap_ts_pen_1v8: &i2c4 {
status = "okay";
clock-frequency = <400000>;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1.dts
index e720e7bd0d70..e122a6b481ff 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1.dts
@@ -9,11 +9,23 @@
#include "sc7180-trogdor-pompom.dtsi"
+/delete-node/ &keyboard_controller;
+#include <arm/cros-ec-keyboard.dtsi>
+
/ {
model = "Google Pompom (rev1)";
compatible = "google,pompom-rev1", "qcom,sc7180";
};
+/*
+ * Pompom rev1 is stuffed with a 47k NTC as charger thermistor which currently
+ * is not supported by the PM6150 ADC driver. Disable the charger thermal zone
+ * to avoid using bogus temperature values.
+ */
+&charger_thermal {
+ status = "disabled";
+};
+
&pp3300_hub {
/* pp3300_l7c is used to power the USB hub */
/delete-property/regulator-always-on;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2-lte.dts
index 791d496ad046..00e187c08eb9 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2-lte.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2-lte.dts
@@ -9,6 +9,6 @@
#include "sc7180-trogdor-lte-sku.dtsi"
/ {
- model = "Google Pompom (rev2+) with LTE";
- compatible = "google,pompom-sku0", "qcom,sc7180";
+ model = "Google Pompom (rev2) with LTE";
+ compatible = "google,pompom-rev2-sku0", "qcom,sc7180";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2.dts
index 984d7337da78..4f32e6733f4c 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2.dts
@@ -10,35 +10,15 @@
#include "sc7180-trogdor-pompom.dtsi"
/ {
- model = "Google Pompom (rev2+)";
- compatible = "google,pompom", "qcom,sc7180";
+ model = "Google Pompom (rev2)";
+ compatible = "google,pompom-rev2", "qcom,sc7180";
};
-&keyboard_controller {
- function-row-physmap = <
- MATRIX_KEY(0x00, 0x02, 0) /* T1 */
- MATRIX_KEY(0x03, 0x02, 0) /* T2 */
- MATRIX_KEY(0x02, 0x02, 0) /* T3 */
- MATRIX_KEY(0x01, 0x02, 0) /* T4 */
- MATRIX_KEY(0x03, 0x04, 0) /* T5 */
- MATRIX_KEY(0x02, 0x04, 0) /* T6 */
- MATRIX_KEY(0x01, 0x04, 0) /* T7 */
- MATRIX_KEY(0x02, 0x09, 0) /* T8 */
- MATRIX_KEY(0x01, 0x09, 0) /* T9 */
- MATRIX_KEY(0x00, 0x04, 0) /* T10 */
- >;
- linux,keymap = <
- MATRIX_KEY(0x00, 0x02, KEY_BACK)
- MATRIX_KEY(0x03, 0x02, KEY_REFRESH)
- MATRIX_KEY(0x02, 0x02, KEY_ZOOM)
- MATRIX_KEY(0x01, 0x02, KEY_SCALE)
- MATRIX_KEY(0x03, 0x04, KEY_SYSRQ)
- MATRIX_KEY(0x02, 0x04, KEY_BRIGHTNESSDOWN)
- MATRIX_KEY(0x01, 0x04, KEY_BRIGHTNESSUP)
- MATRIX_KEY(0x02, 0x09, KEY_MUTE)
- MATRIX_KEY(0x01, 0x09, KEY_VOLUMEDOWN)
- MATRIX_KEY(0x00, 0x04, KEY_VOLUMEUP)
-
- CROS_STD_MAIN_KEYMAP
- >;
+/*
+ * Pompom rev2 is stuffed with a 47k NTC as charger thermistor which currently
+ * is not supported by the PM6150 ADC driver. Disable the charger thermal zone
+ * to avoid using bogus temperature values.
+ */
+&charger_thermal {
+ status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r3-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r3-lte.dts
new file mode 100644
index 000000000000..e90b73c353bb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r3-lte.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Pompom board device tree source
+ *
+ * Copyright 2021 Google LLC.
+ */
+
+#include "sc7180-trogdor-pompom-r3.dts"
+#include "sc7180-trogdor-lte-sku.dtsi"
+
+/ {
+ model = "Google Pompom (rev3+) with LTE";
+ compatible = "google,pompom-sku0", "qcom,sc7180";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r3.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r3.dts
new file mode 100644
index 000000000000..f8aac63a53ef
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r3.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Pompom board device tree source
+ *
+ * Copyright 2021 Google LLC.
+ */
+
+/dts-v1/;
+
+#include "sc7180-trogdor-pompom.dtsi"
+
+/ {
+ model = "Google Pompom (rev3+)";
+ compatible = "google,pompom", "qcom,sc7180";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
index 622b5f1b88a2..a246dbd74cc1 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
@@ -107,6 +107,35 @@ ap_ts_pen_1v8: &i2c4 {
};
};
+&keyboard_controller {
+ function-row-physmap = <
+ MATRIX_KEY(0x00, 0x02, 0) /* T1 */
+ MATRIX_KEY(0x03, 0x02, 0) /* T2 */
+ MATRIX_KEY(0x02, 0x02, 0) /* T3 */
+ MATRIX_KEY(0x01, 0x02, 0) /* T4 */
+ MATRIX_KEY(0x03, 0x04, 0) /* T5 */
+ MATRIX_KEY(0x02, 0x04, 0) /* T6 */
+ MATRIX_KEY(0x01, 0x04, 0) /* T7 */
+ MATRIX_KEY(0x02, 0x09, 0) /* T8 */
+ MATRIX_KEY(0x01, 0x09, 0) /* T9 */
+ MATRIX_KEY(0x00, 0x04, 0) /* T10 */
+ >;
+ linux,keymap = <
+ MATRIX_KEY(0x00, 0x02, KEY_BACK)
+ MATRIX_KEY(0x03, 0x02, KEY_REFRESH)
+ MATRIX_KEY(0x02, 0x02, KEY_ZOOM)
+ MATRIX_KEY(0x01, 0x02, KEY_SCALE)
+ MATRIX_KEY(0x03, 0x04, KEY_SYSRQ)
+ MATRIX_KEY(0x02, 0x04, KEY_BRIGHTNESSDOWN)
+ MATRIX_KEY(0x01, 0x04, KEY_BRIGHTNESSUP)
+ MATRIX_KEY(0x02, 0x09, KEY_MUTE)
+ MATRIX_KEY(0x01, 0x09, KEY_VOLUMEDOWN)
+ MATRIX_KEY(0x00, 0x04, KEY_VOLUMEUP)
+
+ CROS_STD_MAIN_KEYMAP
+ >;
+};
+
&panel {
compatible = "kingdisplay,kd116n21-30nv-a010";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
index 24d293ef56d7..77ae7561d436 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
@@ -64,11 +64,6 @@
no-map;
};
- camera_mem: memory@8ec00000 {
- reg = <0x0 0x8ec00000 0x0 0x500000>;
- no-map;
- };
-
venus_mem: memory@8f600000 {
reg = <0 0x8f600000 0 0x500000>;
no-map;
@@ -335,8 +330,7 @@
compatible = "jedec,spi-nor";
reg = <0>;
- /* TODO: Increase frequency after testing */
- spi-max-frequency = <25000000>;
+ spi-max-frequency = <37500000>;
spi-tx-bus-width = <2>;
spi-rx-bus-width = <2>;
};
@@ -564,10 +558,6 @@
#size-cells = <0>;
};
- pdupdate {
- compatible = "google,cros-ec-pd-update";
- };
-
typec {
compatible = "google,cros-ec-typec";
#address-cells = <1>;
@@ -655,6 +645,8 @@ edp_brij_i2c: &i2c2 {
clocks = <&rpmhcc RPMH_LN_BB_CLK3>;
clock-names = "refclk";
+ no-hpd;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -772,7 +764,7 @@ hp_i2c: &i2c9 {
qcom,capture-sd-lines = <0>;
};
- mi2s@1 {
+ secondary_mi2s: mi2s@1 {
reg = <MI2S_SECONDARY>;
qcom,playback-sd-lines = <0>;
};
@@ -805,7 +797,7 @@ hp_i2c: &i2c9 {
};
};
-&pm6150_pwrkey {
+&pm6150_pon {
status = "disabled";
};
@@ -981,6 +973,7 @@ ap_spi_fp: &spi10 {
&qspi_clk {
pinconf {
pins = "gpio63";
+ drive-strength = <8>;
bias-disable;
};
};
@@ -1494,4 +1487,106 @@ ap_spi_fp: &spi10 {
drive-strength = <2>;
};
};
+
+ sdc1_on: sdc1-on {
+ pinconf-clk {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ pinconf-cmd {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ pinconf-data {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ pinconf-rclk {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc1_off: sdc1-off {
+ pinconf-clk {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ pinconf-cmd {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pinconf-data {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pinconf-rclk {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc2_on: sdc2-on {
+ pinconf-clk {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ pinconf-cmd {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ pinconf-data {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ pinconf-sd-cd {
+ pins = "gpio69";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+
+ sdc2_off: sdc2-off {
+ pinconf-clk {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ pinconf-cmd {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pinconf-data {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pinconf-sd-cd {
+ pins = "gpio69";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index 6228ba2d8513..a9a052f8c63c 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -701,8 +701,9 @@
interrupt-names = "hc_irq", "pwr_irq";
clocks = <&gcc GCC_SDCC1_APPS_CLK>,
- <&gcc GCC_SDCC1_AHB_CLK>;
- clock-names = "core", "iface";
+ <&gcc GCC_SDCC1_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "core", "iface", "xo";
interconnects = <&aggre1_noc MASTER_EMMC 0 &mc_virt SLAVE_EBI1 0>,
<&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_EMMC_CFG 0>;
interconnect-names = "sdhc-ddr","cpu-sdhc";
@@ -726,15 +727,15 @@
opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
required-opps = <&rpmhpd_opp_low_svs>;
- opp-peak-kBps = <100000 100000>;
- opp-avg-kBps = <100000 50000>;
+ opp-peak-kBps = <1800000 600000>;
+ opp-avg-kBps = <100000 0>;
};
opp-384000000 {
opp-hz = /bits/ 64 <384000000>;
- required-opps = <&rpmhpd_opp_svs_l1>;
- opp-peak-kBps = <600000 900000>;
- opp-avg-kBps = <261438 300000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <5400000 1600000>;
+ opp-avg-kBps = <390000 0>;
};
};
};
@@ -768,8 +769,6 @@
#size-cells = <2>;
ranges;
iommus = <&apps_smmu 0x43 0x0>;
- interconnects = <&qup_virt MASTER_QUP_CORE_0 0 &qup_virt SLAVE_QUP_CORE_0 0>;
- interconnect-names = "qup-core";
status = "disabled";
i2c0: i2c@880000 {
@@ -1059,8 +1058,6 @@
#size-cells = <2>;
ranges;
iommus = <&apps_smmu 0x4c3 0x0>;
- interconnects = <&qup_virt MASTER_QUP_CORE_1 0 &qup_virt SLAVE_QUP_CORE_1 0>;
- interconnect-names = "qup-core";
status = "disabled";
i2c6: i2c@a80000 {
@@ -1871,108 +1868,6 @@
function = "lpass_ext";
};
};
-
- sdc1_on: sdc1-on {
- pinconf-clk {
- pins = "sdc1_clk";
- bias-disable;
- drive-strength = <16>;
- };
-
- pinconf-cmd {
- pins = "sdc1_cmd";
- bias-pull-up;
- drive-strength = <10>;
- };
-
- pinconf-data {
- pins = "sdc1_data";
- bias-pull-up;
- drive-strength = <10>;
- };
-
- pinconf-rclk {
- pins = "sdc1_rclk";
- bias-pull-down;
- };
- };
-
- sdc1_off: sdc1-off {
- pinconf-clk {
- pins = "sdc1_clk";
- bias-disable;
- drive-strength = <2>;
- };
-
- pinconf-cmd {
- pins = "sdc1_cmd";
- bias-pull-up;
- drive-strength = <2>;
- };
-
- pinconf-data {
- pins = "sdc1_data";
- bias-pull-up;
- drive-strength = <2>;
- };
-
- pinconf-rclk {
- pins = "sdc1_rclk";
- bias-pull-down;
- };
- };
-
- sdc2_on: sdc2-on {
- pinconf-clk {
- pins = "sdc2_clk";
- bias-disable;
- drive-strength = <16>;
- };
-
- pinconf-cmd {
- pins = "sdc2_cmd";
- bias-pull-up;
- drive-strength = <10>;
- };
-
- pinconf-data {
- pins = "sdc2_data";
- bias-pull-up;
- drive-strength = <10>;
- };
-
- pinconf-sd-cd {
- pins = "gpio69";
- bias-pull-up;
- drive-strength = <2>;
- };
- };
-
- sdc2_off: sdc2-off {
- pinconf-clk {
- pins = "sdc2_clk";
- bias-disable;
- drive-strength = <2>;
- };
-
- pinconf-cmd {
- pins = "sdc2_cmd";
- bias-pull-up;
- drive-strength = <2>;
- };
-
- pinconf-data {
- pins = "sdc2_data";
- bias-pull-up;
- drive-strength = <2>;
- };
-
- pinconf-sd-cd {
- pins = "gpio69";
- bias-disable;
- drive-strength = <2>;
- };
- };
};
remoteproc_mpss: remoteproc@4080000 {
@@ -2670,8 +2565,9 @@
interrupt-names = "hc_irq", "pwr_irq";
clocks = <&gcc GCC_SDCC2_APPS_CLK>,
- <&gcc GCC_SDCC2_AHB_CLK>;
- clock-names = "core", "iface";
+ <&gcc GCC_SDCC2_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "core", "iface", "xo";
interconnects = <&aggre1_noc MASTER_SDCC_2 0 &mc_virt SLAVE_EBI1 0>,
<&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_SDCC_2 0>;
@@ -2689,15 +2585,15 @@
opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
required-opps = <&rpmhpd_opp_low_svs>;
- opp-peak-kBps = <160000 100000>;
- opp-avg-kBps = <80000 50000>;
+ opp-peak-kBps = <1800000 600000>;
+ opp-avg-kBps = <100000 0>;
};
opp-202000000 {
opp-hz = /bits/ 64 <202000000>;
- required-opps = <&rpmhpd_opp_svs_l1>;
- opp-peak-kBps = <200000 120000>;
- opp-avg-kBps = <100000 60000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ opp-peak-kBps = <5400000 1600000>;
+ opp-avg-kBps = <200000 0>;
};
};
};
@@ -2754,8 +2650,8 @@
usb_1_qmpphy: phy-wrapper@88e9000 {
compatible = "qcom,sc7180-qmp-usb3-dp-phy";
reg = <0 0x088e9000 0 0x18c>,
- <0 0x088e8000 0 0x38>,
- <0 0x088ea000 0 0x40>;
+ <0 0x088e8000 0 0x3c>,
+ <0 0x088ea000 0 0x18c>;
status = "disabled";
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dts b/arch/arm64/boot/dts/qcom/sc7280-idp.dts
index 54d2cb365b71..3900cfc09562 100644
--- a/arch/arm64/boot/dts/qcom/sc7280-idp.dts
+++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dts
@@ -7,11 +7,19 @@
/dts-v1/;
+#include <dt-bindings/iio/qcom,spmi-adc7-pmr735a.h>
+#include <dt-bindings/iio/qcom,spmi-adc7-pmr735b.h>
+#include <dt-bindings/iio/qcom,spmi-adc7-pm8350.h>
+#include <dt-bindings/iio/qcom,spmi-adc7-pmk8350.h>
#include "sc7280.dtsi"
+#include "pm7325.dtsi"
+#include "pmr735a.dtsi"
+#include "pm8350c.dtsi"
+#include "pmk8350.dtsi"
/ {
model = "Qualcomm Technologies, Inc. sc7280 IDP platform";
- compatible = "qcom,sc7280-idp", "qcom,sc7280";
+ compatible = "qcom,sc7280-idp", "google,senor", "qcom,sc7280";
aliases {
serial0 = &uart5;
@@ -234,6 +242,32 @@
};
};
+&pmk8350_vadc {
+ pm8350_die_temp {
+ reg = <PM8350_ADC7_DIE_TEMP>;
+ label = "pm8350_die_temp";
+ qcom,pre-scaling = <1 1>;
+ };
+
+ pmk8350_die_temp {
+ reg = <PMK8350_ADC7_DIE_TEMP>;
+ label = "pmk8350_die_temp";
+ qcom,pre-scaling = <1 1>;
+ };
+
+ pmr735a_die_temp {
+ reg = <PMR735A_ADC7_DIE_TEMP>;
+ label = "pmr735a_die_temp";
+ qcom,pre-scaling = <1 1>;
+ };
+
+ pmr735b_die_temp {
+ reg = <PMR735B_ADC7_DIE_TEMP>;
+ label = "pmr735b_die_temp";
+ qcom,pre-scaling = <1 1>;
+ };
+};
+
&qupv3_id_0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 2cc478553935..188c5768a55a 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -11,7 +11,10 @@
#include <dt-bindings/mailbox/qcom-ipcc.h>
#include <dt-bindings/power/qcom-aoss-qmp.h>
#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/reset/qcom,sdm845-aoss.h>
+#include <dt-bindings/reset/qcom,sdm845-pdc.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
interrupt-parent = <&intc>;
@@ -51,6 +54,11 @@
no-map;
};
+ smem_mem: memory@80900000 {
+ reg = <0x0 0x80900000 0x0 0x200000>;
+ no-map;
+ };
+
cpucp_mem: memory@80b00000 {
no-map;
reg = <0x0 0x80b00000 0x0 0x100000>;
@@ -70,6 +78,8 @@
&LITTLE_CPU_SLEEP_1
&CLUSTER_SLEEP_0>;
next-level-cache = <&L2_0>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ #cooling-cells = <2>;
L2_0: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -88,6 +98,8 @@
&LITTLE_CPU_SLEEP_1
&CLUSTER_SLEEP_0>;
next-level-cache = <&L2_100>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ #cooling-cells = <2>;
L2_100: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -103,6 +115,8 @@
&LITTLE_CPU_SLEEP_1
&CLUSTER_SLEEP_0>;
next-level-cache = <&L2_200>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ #cooling-cells = <2>;
L2_200: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -118,6 +132,8 @@
&LITTLE_CPU_SLEEP_1
&CLUSTER_SLEEP_0>;
next-level-cache = <&L2_300>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ #cooling-cells = <2>;
L2_300: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -133,6 +149,8 @@
&BIG_CPU_SLEEP_1
&CLUSTER_SLEEP_0>;
next-level-cache = <&L2_400>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ #cooling-cells = <2>;
L2_400: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -148,6 +166,8 @@
&BIG_CPU_SLEEP_1
&CLUSTER_SLEEP_0>;
next-level-cache = <&L2_500>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ #cooling-cells = <2>;
L2_500: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -163,6 +183,8 @@
&BIG_CPU_SLEEP_1
&CLUSTER_SLEEP_0>;
next-level-cache = <&L2_600>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ #cooling-cells = <2>;
L2_600: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -178,6 +200,8 @@
&BIG_CPU_SLEEP_1
&CLUSTER_SLEEP_0>;
next-level-cache = <&L2_700>;
+ qcom,freq-domain = <&cpufreq_hw 2>;
+ #cooling-cells = <2>;
L2_700: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -251,6 +275,125 @@
};
};
+ clk_virt: interconnect {
+ compatible = "qcom,sc7280-clk-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ smp2p-adsp {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ adsp_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ adsp_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-cdsp {
+ compatible = "qcom,smp2p";
+ qcom,smem = <94>, <432>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <5>;
+
+ cdsp_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ cdsp_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-mpss {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ modem_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ ipa_smp2p_out: ipa-ap-to-modem {
+ qcom,entry-name = "ipa";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ ipa_smp2p_in: ipa-modem-to-ap {
+ qcom,entry-name = "ipa";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-wpss {
+ compatible = "qcom,smp2p";
+ qcom,smem = <617>, <616>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_WPSS
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_WPSS
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <13>;
+
+ wpss_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ wpss_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
pmu {
compatible = "arm,armv8-pmuv3";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
@@ -316,6 +459,93 @@
};
};
+ cnoc2: interconnect@1500000 {
+ reg = <0 0x01500000 0 0x1000>;
+ compatible = "qcom,sc7280-cnoc2";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ cnoc3: interconnect@1502000 {
+ reg = <0 0x01502000 0 0x1000>;
+ compatible = "qcom,sc7280-cnoc3";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mc_virt: interconnect@1580000 {
+ reg = <0 0x01580000 0 0x4>;
+ compatible = "qcom,sc7280-mc-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ system_noc: interconnect@1680000 {
+ reg = <0 0x01680000 0 0x15480>;
+ compatible = "qcom,sc7280-system-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre1_noc: interconnect@16e0000 {
+ compatible = "qcom,sc7280-aggre1-noc";
+ reg = <0 0x016e0000 0 0x1c080>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre2_noc: interconnect@1700000 {
+ reg = <0 0x01700000 0 0x2b080>;
+ compatible = "qcom,sc7280-aggre2-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mmss_noc: interconnect@1740000 {
+ reg = <0 0x01740000 0 0x1e080>;
+ compatible = "qcom,sc7280-mmss-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ tcsr_mutex: hwlock@1f40000 {
+ compatible = "qcom,tcsr-mutex", "syscon";
+ reg = <0 0x01f40000 0 0x40000>;
+ #hwlock-cells = <1>;
+ };
+
+ lpasscc: lpasscc@3000000 {
+ compatible = "qcom,sc7280-lpasscc";
+ reg = <0 0x03000000 0 0x40>,
+ <0 0x03c04000 0 0x4>,
+ <0 0x03389000 0 0x24>;
+ reg-names = "qdsp6ss", "top_cc", "cc";
+ clocks = <&gcc GCC_CFG_NOC_LPASS_CLK>;
+ clock-names = "iface";
+ #clock-cells = <1>;
+ };
+
+ lpass_ag_noc: interconnect@3c40000 {
+ reg = <0 0x03c40000 0 0xf080>;
+ compatible = "qcom,sc7280-lpass-ag-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ gpucc: clock-controller@3d90000 {
+ compatible = "qcom,sc7280-gpucc";
+ reg = <0 0x03d90000 0 0x9000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_GPU_GPLL0_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+ clock-names = "bi_tcxo",
+ "gcc_gpu_gpll0_clk_src",
+ "gcc_gpu_gpll0_div_clk_src";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
stm@6002000 {
compatible = "arm,coresight-stm", "arm,primecell";
reg = <0 0x06002000 0 0x1000>,
@@ -805,6 +1035,20 @@
};
};
+ dc_noc: interconnect@90e0000 {
+ reg = <0 0x090e0000 0 0x5080>;
+ compatible = "qcom,sc7280-dc-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ gem_noc: interconnect@9100000 {
+ reg = <0 0x9100000 0 0xe2200>;
+ compatible = "qcom,sc7280-gem-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
system-cache-controller@9200000 {
compatible = "qcom,sc7280-llcc";
reg = <0 0x09200000 0 0xd0000>, <0 0x09600000 0 0x50000>;
@@ -812,6 +1056,42 @@
interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
};
+ nsp_noc: interconnect@a0c0000 {
+ reg = <0 0x0a0c0000 0 0x10000>;
+ compatible = "qcom,sc7280-nsp-noc";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ videocc: clock-controller@aaf0000 {
+ compatible = "qcom,sc7280-videocc";
+ reg = <0 0xaaf0000 0 0x10000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>;
+ clock-names = "bi_tcxo", "bi_tcxo_ao";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ dispcc: clock-controller@af00000 {
+ compatible = "qcom,sc7280-dispcc";
+ reg = <0 0xaf00000 0 0x20000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_DISP_GPLL0_CLK_SRC>,
+ <0>, <0>, <0>, <0>, <0>, <0>;
+ clock-names = "bi_tcxo", "gcc_disp_gpll0_clk",
+ "dsi0_phy_pll_out_byteclk",
+ "dsi0_phy_pll_out_dsiclk",
+ "dp_phy_pll_link_clk",
+ "dp_phy_pll_vco_div_clk",
+ "edp_phy_pll_link_clk",
+ "edp_phy_pll_vco_div_clk";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
pdc: interrupt-controller@b220000 {
compatible = "qcom,sc7280-pdc", "qcom,pdc";
reg = <0 0x0b220000 0 0x30000>;
@@ -825,6 +1105,40 @@
interrupt-controller;
};
+ pdc_reset: reset-controller@b5e0000 {
+ compatible = "qcom,sc7280-pdc-global";
+ reg = <0 0x0b5e0000 0 0x20000>;
+ #reset-cells = <1>;
+ };
+
+ tsens0: thermal-sensor@c263000 {
+ compatible = "qcom,sc7280-tsens","qcom,tsens-v2";
+ reg = <0 0x0c263000 0 0x1ff>, /* TM */
+ <0 0x0c222000 0 0x1ff>; /* SROT */
+ #qcom,sensors = <15>;
+ interrupts = <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow","critical";
+ #thermal-sensor-cells = <1>;
+ };
+
+ tsens1: thermal-sensor@c265000 {
+ compatible = "qcom,sc7280-tsens","qcom,tsens-v2";
+ reg = <0 0x0c265000 0 0x1ff>, /* TM */
+ <0 0x0c223000 0 0x1ff>; /* SROT */
+ #qcom,sensors = <12>;
+ interrupts = <GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 509 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow","critical";
+ #thermal-sensor-cells = <1>;
+ };
+
+ aoss_reset: reset-controller@c2a0000 {
+ compatible = "qcom,sc7280-aoss-cc", "qcom,sdm845-aoss-cc";
+ reg = <0 0x0c2a0000 0 0x31000>;
+ #reset-cells = <1>;
+ };
+
aoss_qmp: power-controller@c300000 {
compatible = "qcom,sc7280-aoss-qmp";
reg = <0 0x0c300000 0 0x100000>;
@@ -1063,6 +1377,10 @@
<WAKE_TCS 3>,
<CONTROL_TCS 1>;
+ apps_bcm_voter: bcm-voter {
+ compatible = "qcom,bcm-voter";
+ };
+
rpmhpd: power-controller {
compatible = "qcom,sc7280-rpmhpd";
#power-domain-cells = <1>;
@@ -1116,6 +1434,859 @@
#clock-cells = <1>;
};
};
+
+ cpufreq_hw: cpufreq@18591000 {
+ compatible = "qcom,cpufreq-epss";
+ reg = <0 0x18591000 0 0x1000>,
+ <0 0x18592000 0 0x1000>,
+ <0 0x18593000 0 0x1000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
+ clock-names = "xo", "alternate";
+ #freq-domain-cells = <1>;
+ };
+ };
+
+ thermal_zones: thermal-zones {
+ cpu0-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 1>;
+
+ trips {
+ cpu0_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu0_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu0_crit: cpu-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu0_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu0_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu1-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 2>;
+
+ trips {
+ cpu1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu1_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu1_crit: cpu-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu1_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu1_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu2-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 3>;
+
+ trips {
+ cpu2_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu2_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu2_crit: cpu-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu2_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu2_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu3-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 4>;
+
+ trips {
+ cpu3_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu3_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu3_crit: cpu-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu3_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu3_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu4-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 7>;
+
+ trips {
+ cpu4_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu4_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu4_crit: cpu-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu4_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu4_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu5-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 8>;
+
+ trips {
+ cpu5_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu5_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu5_crit: cpu-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu5_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu5_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu6-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 9>;
+
+ trips {
+ cpu6_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu6_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu6_crit: cpu-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu6_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu6_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu7-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 10>;
+
+ trips {
+ cpu7_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu7_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu7_crit: cpu-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu7_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu7_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu8-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 11>;
+
+ trips {
+ cpu8_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu8_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu8_crit: cpu-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu8_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu8_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu9-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 12>;
+
+ trips {
+ cpu9_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu9_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu9_crit: cpu-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu9_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu9_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu10-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 13>;
+
+ trips {
+ cpu10_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu10_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu10_crit: cpu-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu10_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu10_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu11-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 14>;
+
+ trips {
+ cpu11_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu11_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu11_crit: cpu-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu11_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu11_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ aoss0-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 0>;
+
+ trips {
+ aoss0_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ aoss0_crit: aoss0-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ aoss1-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens1 0>;
+
+ trips {
+ aoss1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ aoss1_crit: aoss1-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpuss0-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ cpuss0_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ cpuss0_crit: cluster0-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpuss1-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens0 6>;
+
+ trips {
+ cpuss1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ cpuss1_crit: cluster0-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpuss0-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens1 1>;
+
+ trips {
+ gpuss0_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ gpuss0_crit: gpuss0-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpuss1-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens1 2>;
+
+ trips {
+ gpuss1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ gpuss1_crit: gpuss1-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ nspss0-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens1 3>;
+
+ trips {
+ nspss0_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ nspss0_crit: nspss0-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ nspss1-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens1 4>;
+
+ trips {
+ nspss1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ nspss1_crit: nspss1-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ video-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens1 5>;
+
+ trips {
+ video_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ video_crit: video-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ ddr-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens1 6>;
+
+ trips {
+ ddr_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ ddr_crit: ddr-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ mdmss0-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens1 7>;
+
+ trips {
+ mdmss0_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ mdmss0_crit: mdmss0-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ mdmss1-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens1 8>;
+
+ trips {
+ mdmss1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ mdmss1_crit: mdmss1-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ mdmss2-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens1 9>;
+
+ trips {
+ mdmss2_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ mdmss2_crit: mdmss2-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ mdmss3-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens1 10>;
+
+ trips {
+ mdmss3_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ mdmss3_crit: mdmss3-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ camera0-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&tsens1 11>;
+
+ trips {
+ camera0_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ camera0_crit: camera0-crit {
+ temperature = <110000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
};
timer {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
index 216a74f0057c..dfd1b42c07fd 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
@@ -714,10 +714,6 @@ ap_ts_i2c: &i2c14 {
#address-cells = <1>;
#size-cells = <0>;
};
-
- pdupdate {
- compatible = "google,cros-ec-pd-update";
- };
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index 1372fe8601f5..91ede9296aff 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -448,6 +448,11 @@
clock-frequency = <400000>;
};
+&ipa {
+ status = "okay";
+ memory-region = <&ipa_fw_mem>;
+};
+
&mdss {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
index 8f617f7b6d34..eb6b1d15293d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
@@ -46,6 +46,14 @@
};
reserved-memory {
+ /* The rmtfs_mem needs to be guarded due to "XPU limitations"
+ * it is otherwise possible for an allocation adjacent to the
+ * rmtfs_mem region to trigger an XPU violation, causing a crash.
+ */
+ rmtfs_lower_guard: memory@f5b00000 {
+ no-map;
+ reg = <0 0xf5b00000 0 0x1000>;
+ };
/*
* The rmtfs memory region in downstream is 'dynamically allocated'
* but given the same address every time. Hard code it as this address is
@@ -59,6 +67,10 @@
qcom,client-id = <1>;
qcom,vmid = <15>;
};
+ rmtfs_upper_guard: memory@f5d01000 {
+ no-map;
+ reg = <0 0xf5d01000 0 0x1000>;
+ };
/*
* It seems like reserving the old rmtfs_mem region is also needed to prevent
@@ -66,7 +78,7 @@
*/
removed_region: memory@88f00000 {
no-map;
- reg = <0 0x88f00000 0 0x200000>;
+ reg = <0 0x88f00000 0 0x1c00000>;
};
ramoops: ramoops@ac300000 {
@@ -387,6 +399,12 @@
};
};
+&ipa {
+ status = "okay";
+
+ memory-region = <&ipa_fw_mem>;
+};
+
&mdss {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts
index 7d029425336e..c60c8c640e17 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts
@@ -5,6 +5,8 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
#include "sdm845.dtsi"
#include "pm8998.dtsi"
#include "pmi8998.dtsi"
@@ -311,6 +313,28 @@
};
};
+/* QUAT I2S Uses 1 I2S SD Line for audio on TAS2559/60 amplifiers */
+&q6afedai {
+ qi2s@22 {
+ reg = <22>;
+ qcom,sd-lines = <0>;
+ };
+};
+
+&q6asmdai {
+ dai@0 {
+ reg = <0>;
+ };
+
+ dai@1 {
+ reg = <1>;
+ };
+
+ dai@2 {
+ reg = <2>;
+ };
+};
+
&qupv3_id_0 {
status = "okay";
};
@@ -328,6 +352,70 @@
cd-gpios = <&tlmm 126 GPIO_ACTIVE_HIGH>;
};
+&sound {
+ compatible = "qcom,db845c-sndcard";
+ pinctrl-0 = <&quat_mi2s_active
+ &quat_mi2s_sd0_active>;
+ pinctrl-names = "default";
+ model = "Xiaomi Poco F1";
+ audio-routing =
+ "RX_BIAS", "MCLK",
+ "AMIC1", "MIC BIAS1",
+ "AMIC2", "MIC BIAS2",
+ "AMIC3", "MIC BIAS3";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ mm2-dai-link {
+ link-name = "MultiMedia2";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA2>;
+ };
+ };
+
+ mm3-dai-link {
+ link-name = "MultiMedia3";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>;
+ };
+ };
+
+ slim-dai-link {
+ link-name = "SLIM Playback";
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_0_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&wcd9340 0>;
+ };
+ };
+
+ slimcap-dai-link {
+ link-name = "SLIM Capture";
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_0_TX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&wcd9340 1>;
+ };
+ };
+};
+
&tlmm {
gpio-reserved-ranges = <0 4>, <81 4>;
@@ -356,6 +444,15 @@
function = "gpio";
bias-pull-up;
};
+
+ wcd_intr_default: wcd_intr_default {
+ pins = <54>;
+ function = "gpio";
+
+ input-enable;
+ bias-pull-down;
+ drive-strength = <2>;
+ };
};
&uart6 {
@@ -416,6 +513,23 @@
vdda-pll-supply = <&vreg_l1a_0p875>;
};
+&wcd9340{
+ pinctrl-0 = <&wcd_intr_default>;
+ pinctrl-names = "default";
+ clock-names = "extclk";
+ clocks = <&rpmhcc RPMH_LN_BB_CLK2>;
+ reset-gpios = <&tlmm 64 0>;
+ vdd-buck-supply = <&vreg_s4a_1p8>;
+ vdd-buck-sido-supply = <&vreg_s4a_1p8>;
+ vdd-tx-supply = <&vreg_s4a_1p8>;
+ vdd-rx-supply = <&vreg_s4a_1p8>;
+ vdd-io-supply = <&vreg_s4a_1p8>;
+ qcom,micbias1-microvolt = <2700000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <2700000>;
+ qcom,micbias4-microvolt = <2700000>;
+};
+
&wifi {
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
index 140db2d5ba31..d7591a4621a2 100644
--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
@@ -376,6 +376,8 @@
clocks = <&sn65dsi86_refclk>;
clock-names = "refclk";
+ no-hpd;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -698,7 +700,7 @@
left_spkr: wsa8810-left{
compatible = "sdw10217211000";
reg = <0 3>;
- powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+ powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
#thermal-sensor-cells = <0>;
sound-name-prefix = "SpkrLeft";
#sound-dai-cells = <0>;
@@ -706,7 +708,7 @@
right_spkr: wsa8810-right{
compatible = "sdw10217211000";
- powerdown-gpios = <&wcdgpio 3 GPIO_ACTIVE_HIGH>;
+ powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
reg = <0 4>;
#thermal-sensor-cells = <0>;
sound-name-prefix = "SpkrRight";
diff --git a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
index fb2cf3d987a1..335aa0753fc0 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
@@ -354,22 +354,26 @@
};
};
-&qupv3_id_1 {
+&gmu {
status = "okay";
};
-&pon {
- pwrkey {
- status = "okay";
- };
+&gpu {
+ status = "okay";
+};
- resin {
- compatible = "qcom,pm8941-resin";
- interrupts = <0x0 0x8 0x1 IRQ_TYPE_EDGE_BOTH>;
- debounce = <15625>;
- bias-pull-up;
- linux,code = <KEY_VOLUMEDOWN>;
- };
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ status = "okay";
+
+ linux,code = <KEY_VOLUMEDOWN>;
+};
+
+&qupv3_id_1 {
+ status = "okay";
};
&remoteproc_adsp {
diff --git a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
new file mode 100644
index 000000000000..736da9af44e0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (C) 2021, Microsoft Corporation
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "sm8150.dtsi"
+#include "pm8150.dtsi"
+#include "pm8150b.dtsi"
+#include "pm8150l.dtsi"
+
+/ {
+ model = "Microsoft Surface Duo";
+ compatible = "microsoft,surface-duo", "qcom,sm8150";
+
+ aliases {
+ serial0 = &uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ };
+
+ /*
+ * Apparently RPMh does not provide support for PM8150 S4 because it
+ * is always-on; model it as a fixed regulator.
+ */
+ vreg_s4a_1p8: pm8150-s4 {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s4a_1p8";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ vin-supply = <&vph_pwr>;
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+
+ vol_up {
+ label = "Volume Up";
+ gpios = <&pm8150_gpios 6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+};
+
+&apps_rsc {
+ pm8150-rpmh-regulators {
+ compatible = "qcom,pm8150-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-s9-supply = <&vph_pwr>;
+ vdd-s10-supply = <&vph_pwr>;
+
+ vdd-l1-l8-l11-supply = <&vreg_s6a_0p9>;
+ vdd-l2-l10-supply = <&vreg_bob>;
+ vdd-l3-l4-l5-l18-supply = <&vreg_s6a_0p9>;
+ vdd-l6-l9-supply = <&vreg_s8c_1p3>;
+ vdd-l7-l12-l14-l15-supply = <&vreg_s5a_2p0>;
+ vdd-l13-l16-l17-supply = <&vreg_bob>;
+
+ vreg_s5a_2p0: smps5 {
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2000000>;
+ };
+
+ vreg_s6a_0p9: smps6 {
+ regulator-min-microvolt = <920000>;
+ regulator-max-microvolt = <1128000>;
+ };
+
+ vdda_wcss_pll:
+ vreg_l1a_0p75: ldo1 {
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <752000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vdd_pdphy:
+ vdda_usb_hs_3p1:
+ vreg_l2a_3p1: ldo2 {
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3a_0p8: ldo3 {
+ regulator-min-microvolt = <480000>;
+ regulator-max-microvolt = <932000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vdd_usb_hs_core:
+ vdda_csi_0_0p9:
+ vdda_csi_1_0p9:
+ vdda_csi_2_0p9:
+ vdda_csi_3_0p9:
+ vdda_dsi_0_0p9:
+ vdda_dsi_1_0p9:
+ vdda_dsi_0_pll_0p9:
+ vdda_dsi_1_pll_0p9:
+ vdda_pcie_1ln_core:
+ vdda_pcie_2ln_core:
+ vdda_pll_hv_cc_ebi01:
+ vdda_pll_hv_cc_ebi23:
+ vdda_qrefs_0p875_5:
+ vdda_sp_sensor:
+ vdda_ufs_2ln_core_1:
+ vdda_ufs_2ln_core_2:
+ vdda_usb_ss_dp_core_1:
+ vdda_usb_ss_dp_core_2:
+ vdda_qlink_lv:
+ vdda_qlink_lv_ck:
+ vreg_l5a_0p875: ldo5 {
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6a_1p2: ldo6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7a_1p8: ldo7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vddpx_10:
+ vreg_l9a_1p2: ldo9 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10a_2p5: ldo10 {
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11a_0p8: ldo11 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vdd_qfprom:
+ vdd_qfprom_sp:
+ vdda_apc_cs_1p8:
+ vdda_gfx_cs_1p8:
+ vdda_usb_hs_1p8:
+ vdda_qrefs_vref_1p8:
+ vddpx_10_a:
+ vreg_l12a_1p8: ldo12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13a_2p7: ldo13 {
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2704000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14a_1p8: ldo14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15a_1p7: ldo15 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1704000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16a_2p7: ldo16 {
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17a_3p0: ldo17 {
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pm8150l-rpmh-regulators {
+ compatible = "qcom,pm8150l-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+
+ vdd-l1-l8-supply = <&vreg_s4a_1p8>;
+ vdd-l2-l3-supply = <&vreg_s8c_1p3>;
+ vdd-l4-l5-l6-supply = <&vreg_bob>;
+ vdd-l7-l11-supply = <&vreg_bob>;
+ vdd-l9-l10-supply = <&vreg_bob>;
+
+ vdd-bob-supply = <&vph_pwr>;
+ vdd-flash-supply = <&vreg_bob>;
+ vdd-rgb-supply = <&vreg_bob>;
+
+ vreg_bob: bob {
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <4000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ regulator-allow-bypass;
+ };
+
+ vreg_s8c_1p3: smps8 {
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ };
+
+ vreg_l1c_1p8: ldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vdda_wcss_adcdac_1:
+ vdda_wcss_adcdac_22:
+ vreg_l2c_1p3: ldo2 {
+ regulator-min-microvolt = <1304000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vdda_hv_ebi0:
+ vdda_hv_ebi1:
+ vdda_hv_ebi2:
+ vdda_hv_ebi3:
+ vdda_hv_refgen0:
+ vdda_qlink_hv_ck:
+ vreg_l3c_1p2: ldo3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vddpx_5:
+ vreg_l4c_1p8: ldo4 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vddpx_6:
+ vreg_l5c_1p8: ldo5 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vddpx_2:
+ vreg_l6c_2p9: ldo6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7c_3p0: ldo7 {
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c_1p8: ldo8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9c_2p9: ldo9 {
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10c_3p3: ldo10 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11c_3p3: ldo11 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pm8009-rpmh-regulators {
+ compatible = "qcom,pm8009-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vreg_bob>;
+
+ vdd-l2-supply = <&vreg_s8c_1p3>;
+ vdd-l5-l6-supply = <&vreg_bob>;
+
+ vreg_l2f_1p2: ldo2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5f_2p85: ldo5 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6f_2p85: ldo6 {
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <2856000>;
+ };
+ };
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ bq27742@55 {
+ compatible = "ti,bq27742";
+ reg = <0x55>;
+ };
+
+ da7280@4a {
+ compatible = "dlg,da7280";
+ reg = <0x4a>;
+ interrupts-extended = <&tlmm 42 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "da7280_default";
+ pinctrl-0 = <&da7280_intr_default>;
+
+ dlg,actuator-type = "LRA";
+ dlg,dlg,const-op-mode = <1>;
+ dlg,dlg,periodic-op-mode = <1>;
+ dlg,nom-microvolt = <2000000>;
+ dlg,abs-max-microvolt = <2000000>;
+ dlg,imax-microamp = <129000>;
+ dlg,resonant-freq-hz = <180>;
+ dlg,impd-micro-ohms = <14300000>;
+ dlg,freq-track-enable;
+ dlg,bemf-sens-enable;
+ dlg,mem-array = <
+ 0x06 0x08 0x10 0x11 0x12 0x13 0x14 0x15 0x1c 0x2a
+ 0x33 0x3c 0x42 0x4b 0x4c 0x4e 0x17 0x19 0x27 0x29
+ 0x17 0x19 0x03 0x84 0x5e 0x04 0x08 0x84 0x5d 0x01
+ 0x84 0x5e 0x02 0x00 0xa4 0x5d 0x03 0x84 0x5e 0x06
+ 0x08 0x84 0x5d 0x05 0x84 0x5d 0x06 0x84 0x5e 0x08
+ 0x84 0x5e 0x05 0x8c 0x5e 0x24 0x84 0x5f 0x10 0x84
+ 0x5e 0x05 0x84 0x5e 0x08 0x84 0x5f 0x01 0x8c 0x5e
+ 0x04 0x84 0x5e 0x08 0x84 0x5f 0x11 0x19 0x88 0x00
+ 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
+ 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00
+ >;
+ };
+
+ /* SMB1381 @ 0x44 */
+ /* MAX34417 @ 0x1c */
+};
+
+&i2c4 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ /* SMB1355 @ 0x0c */
+ /* SMB1390 @ 0x10 */
+};
+
+&i2c17 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ bq27742@55 {
+ compatible = "ti,bq27742";
+ reg = <0x55>;
+ };
+};
+
+&i2c19 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ /* MAX34417 @ 0x12 */
+ /* MAX34417 @ 0x1a */
+ /* MAX34417 @ 0x1e */
+};
+
+&pon {
+ pwrkey {
+ status = "okay";
+ };
+
+ resin {
+ compatible = "qcom,pm8941-resin";
+ interrupts = <0x0 0x8 0x1 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&qupv3_id_2 {
+ status = "okay";
+};
+
+&remoteproc_adsp {
+ status = "okay";
+ firmware-name = "qcom/sm8150/microsoft/adsp.mdt";
+};
+
+&remoteproc_cdsp {
+ status = "okay";
+ firmware-name = "qcom/sm8150/microsoft/cdsp.mdt";
+};
+
+&remoteproc_mpss {
+ status = "okay";
+ firmware-name = "qcom/sm8150/microsoft/modem.mdt";
+};
+
+&remoteproc_slpi {
+ status = "okay";
+ firmware-name = "qcom/sm8150/microsoft/slpi.mdt";
+};
+
+&tlmm {
+ gpio-reserved-ranges = <126 4>;
+
+ da7280_intr_default: da7280-intr-default {
+ pins = "gpio42";
+ function = "gpio";
+ bias-pull-up;
+ input-enable;
+ };
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&ufs_mem_hc {
+ status = "okay";
+
+ reset-gpios = <&tlmm 175 GPIO_ACTIVE_LOW>;
+
+ vcc-supply = <&vreg_l10a_2p5>;
+ vcc-max-microamp = <750000>;
+ vccq-supply = <&vreg_l9a_1p2>;
+ vccq-max-microamp = <700000>;
+ vccq2-supply = <&vreg_s4a_1p8>;
+ vccq2-max-microamp = <750000>;
+};
+
+&ufs_mem_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vdda_ufs_2ln_core_1>;
+ vdda-max-microamp = <90200>;
+ vdda-pll-supply = <&vreg_l3c_1p2>;
+ vdda-pll-max-microamp = <19000>;
+};
+
+&usb_1_hsphy {
+ status = "okay";
+ vdda-pll-supply = <&vdd_usb_hs_core>;
+ vdda33-supply = <&vdda_usb_hs_3p1>;
+ vdda18-supply = <&vdda_usb_hs_1p8>;
+};
+
+&usb_1_qmpphy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l3c_1p2>;
+ vdda-pll-supply = <&vdda_usb_ss_dp_core_1>;
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "peripheral";
+};
+
+&wifi {
+ status = "okay";
+
+ vdd-0.8-cx-mx-supply = <&vdda_wcss_pll>;
+ vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ vdd-1.3-rfa-supply = <&vdda_wcss_adcdac_1>;
+ vdd-3.3-ch0-supply = <&vreg_l11c_3p3>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8150-mtp.dts b/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
index 3774f8e63416..53edf7541169 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-mtp.dts
@@ -349,22 +349,26 @@
};
};
-&qupv3_id_1 {
+&gmu {
status = "okay";
};
-&pon {
- pwrkey {
- status = "okay";
- };
+&gpu {
+ status = "okay";
+};
- resin {
- compatible = "qcom,pm8941-resin";
- interrupts = <0x0 0x8 0x1 IRQ_TYPE_EDGE_BOTH>;
- debounce = <15625>;
- bias-pull-up;
- linux,code = <KEY_VOLUMEDOWN>;
- };
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ status = "okay";
+
+ linux,code = <KEY_VOLUMEDOWN>;
+};
+
+&qupv3_id_1 {
+ status = "okay";
};
&remoteproc_adsp {
diff --git a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano-bahamut.dts b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano-bahamut.dts
new file mode 100644
index 000000000000..3b55fdda767a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano-bahamut.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+/dts-v1/;
+
+#include "sm8150-sony-xperia-kumano.dtsi"
+
+/ {
+ model = "Sony Xperia 5";
+ compatible = "sony,bahamut-generic", "qcom,sm8150";
+};
+
+&framebuffer {
+ width = <1080>;
+ height = <2520>;
+ stride = <(1080 * 4)>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano-griffin.dts b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano-griffin.dts
new file mode 100644
index 000000000000..6f490ec284bd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano-griffin.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+/dts-v1/;
+
+#include "sm8150-sony-xperia-kumano.dtsi"
+
+/ {
+ model = "Sony Xperia 1";
+ compatible = "sony,griffin-generic", "qcom,sm8150";
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
new file mode 100644
index 000000000000..014fe3a31548
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "sm8150.dtsi"
+#include "pm8150.dtsi"
+#include "pm8150b.dtsi"
+#include "pm8150l.dtsi"
+
+/delete-node/ &cdsp_mem;
+/delete-node/ &gpu_mem;
+/delete-node/ &ipa_fw_mem;
+/delete-node/ &ipa_gsi_mem;
+/delete-node/ &mpss_mem;
+/delete-node/ &slpi_mem;
+/delete-node/ &spss_mem;
+/delete-node/ &venus_mem;
+
+/ {
+ qcom,msm-id = <339 0x20000>; /* SM8150 v2 */
+ qcom,board-id = <8 0>;
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ framebuffer: framebuffer@9c000000 {
+ compatible = "simple-framebuffer";
+ reg = <0 0x9c000000 0 0x2300000>;
+ width = <1644>;
+ height = <3840>;
+ stride = <(1644 * 4)>;
+ format = "a8r8g8b8";
+ /*
+ * That's (going to be) a lot of clocks, but it's necessary due
+ * to unused clk cleanup & no panel driver yet (& no dispcc either)..
+ */
+ clocks = <&gcc GCC_DISP_HF_AXI_CLK>,
+ <&gcc GCC_DISP_SF_AXI_CLK>;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ };
+
+ /*
+ * Apparently RPMh does not provide support for PM8150 S4 because it
+ * is always-on; model it as a fixed regulator.
+ */
+ vreg_s4a_1p8: pm8150-s4 {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s4a_1p8";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ vin-supply = <&vph_pwr>;
+ };
+
+ reserved-memory {
+ mpss_mem: memory@8dc00000 {
+ reg = <0x0 0x8dc00000 0x0 0x9600000>;
+ no-map;
+ };
+
+ venus_mem: memory@97200000 {
+ reg = <0x0 0x97200000 0x0 0x500000>;
+ no-map;
+ };
+
+ slpi_mem: memory@97700000 {
+ reg = <0x0 0x97700000 0x0 0x1400000>;
+ no-map;
+ };
+
+ ipa_fw_mem: memory@98b00000 {
+ reg = <0x0 0x98b00000 0x0 0x10000>;
+ no-map;
+ };
+
+ ipa_gsi_mem: memory@98b10000 {
+ reg = <0x0 0x98b10000 0x0 0x5000>;
+ no-map;
+ };
+
+ gpu_mem: memory@98b15000 {
+ reg = <0x0 0x98b15000 0x0 0x2000>;
+ no-map;
+ };
+
+ spss_mem: memory@98c00000 {
+ reg = <0x0 0x98c00000 0x0 0x100000>;
+ no-map;
+ };
+
+ cdsp_mem: memory@98d00000 {
+ reg = <0x0 0x98d00000 0x0 0x1400000>;
+ no-map;
+ };
+
+ cont_splash_mem: memory@9c000000 {
+ reg = <0x0 0x9c000000 0x0 0x2400000>;
+ no-map;
+ };
+
+ cdsp_sec_mem: memory@a4c00000 {
+ reg = <0x0 0xa4c00000 0x0 0x3c00000>;
+ no-map;
+ };
+
+ ramoops@ffc00000 {
+ compatible = "ramoops";
+ reg = <0x0 0xffc00000 0x0 0x100000>;
+ record-size = <0x1000>;
+ console-size = <0x40000>;
+ msg-size = <0x20000 0x20000>;
+ ecc-size = <16>;
+ no-map;
+ };
+ };
+};
+
+&adsp_mem {
+ reg = <0x0 0x8be00000 0x0 0x1e00000>;
+};
+
+&apps_rsc {
+ pm8150-rpmh-regulators {
+ compatible = "qcom,pm8150-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-s9-supply = <&vph_pwr>;
+ vdd-s10-supply = <&vph_pwr>;
+
+ vdd-l1-l8-l11-supply = <&vreg_s6a_0p9>;
+ vdd-l2-l10-supply = <&vreg_bob>;
+ vdd-l3-l4-l5-l18-supply = <&vreg_s6a_0p9>;
+ vdd-l6-l9-supply = <&vreg_s8c_1p3>;
+ vdd-l7-l12-l14-l15-supply = <&vreg_s5a_1p9>;
+ vdd-l13-l16-l17-supply = <&vreg_bob>;
+
+ vreg_s2a_0p6: smps2 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <600000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s5a_1p9: smps5 {
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2040000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s6a_0p9: smps6 {
+ regulator-min-microvolt = <920000>;
+ regulator-max-microvolt = <1128000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1a_0p75: ldo1 {
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <752000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2a_3p1: ldo2 {
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3a_0p8: ldo3 {
+ regulator-min-microvolt = <480000>;
+ regulator-max-microvolt = <932000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5a_0p875: ldo5 {
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6a_1p2: ldo6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7a_1p8: ldo7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9a_1p2: ldo9 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10a_2p5: ldo10 {
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11a_0p8: ldo11 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12a_1p8: ldo12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* L13 is unused. */
+
+ vreg_l14a_1p8: ldo14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15a_1p7: ldo15 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1704000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16a_2p7: ldo16 {
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17a_3p0: ldo17 {
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l18a_0p8: ldo18 {
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pm8150l-rpmh-regulators {
+ compatible = "qcom,pm8150l-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+
+ vdd-l1-l8-supply = <&vreg_s4a_1p8>;
+ vdd-l2-l3-supply = <&vreg_s8c_1p3>;
+ vdd-l4-l5-l6-supply = <&vreg_bob>;
+ vdd-l7-l11-supply = <&vreg_bob>;
+ vdd-l9-l10-supply = <&vreg_bob>;
+
+ vdd-bob-supply = <&vph_pwr>;
+ vdd-flash-supply = <&vreg_bob>;
+ vdd-rgb-supply = <&vreg_bob>;
+
+ vreg_bob: bob {
+ regulator-min-microvolt = <3350000>;
+ regulator-max-microvolt = <4000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ regulator-allow-bypass;
+ };
+
+ vreg_s1c_1p1: smps1 {
+ regulator-min-microvolt = <1128000>;
+ regulator-max-microvolt = <1128000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s8c_1p3: smps8 {
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c_1p8: ldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_1p3: ldo2 {
+ regulator-min-microvolt = <1304000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c_1p2: ldo3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4c_1p8: ldo4 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5c_1p8: ldo5 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6c_2p9: ldo6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l7c_3p0: ldo7 {
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c_1p8: ldo8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9c_2p9: ldo9 {
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l10c_3p3: ldo10 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11c_3p3: ldo11 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ /* PM8009 is not present on these boards, even if downstream sources suggest so. */
+};
+
+&i2c4 {
+ status = "okay";
+
+ /* Qcom SMB1355 @ c */
+ /* Qcom SMB1390 @ 10 */
+ /* NXP PN553 NFC @ 28 */
+ /* Qcom FSA4480 USB-C audio switch @ 43 */
+};
+
+&i2c7 {
+ status = "okay";
+
+ /* AMS TCS3490 RGB+IR color sensor @ 72 */
+};
+
+&i2c10 {
+ status = "okay";
+
+ /* Samsung touchscreen @ 48 */
+};
+
+&pon_pwrkey {
+ status = "okay";
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&tlmm {
+ gpio-reserved-ranges = <126 4>;
+};
+
+&uart2 {
+ status = "okay";
+};
+
+/* BIG WARNING! DO NOT TOUCH UFS, YOUR DEVICE WILL DIE! */
+&ufs_mem_hc { status = "disabled"; };
+&ufs_mem_phy { status = "disabled"; };
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "peripheral";
+};
+
+&usb_1_hsphy {
+ status = "okay";
+ vdda-pll-supply = <&vreg_l5a_0p875>;
+ vdda33-supply = <&vreg_l2a_3p1>;
+ vdda18-supply = <&vreg_l12a_1p8>;
+};
+
+&usb_1_qmpphy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l3c_1p2>;
+ vdda-pll-supply = <&vreg_l18a_0p8>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index 51235a9521c2..eef9d79157e9 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -4,6 +4,7 @@
* Copyright (c) 2019, Linaro Limited
*/
+#include <dt-bindings/dma/qcom-gpi.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom-aoss-qmp.h>
#include <dt-bindings/power/qcom-rpmpd.h>
@@ -577,6 +578,29 @@
<&sleep_clk>;
};
+ gpi_dma0: dma-controller@800000 {
+ compatible = "qcom,sm8150-gpi-dma";
+ reg = <0 0x800000 0 0x60000>;
+ interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
+ dma-channels = <13>;
+ dma-channel-mask = <0xfa>;
+ iommus = <&apps_smmu 0x00d6 0x0>;
+ #dma-cells = <3>;
+ status = "disabled";
+ };
+
qupv3_id_0: geniqup@8c0000 {
compatible = "qcom,geni-se-qup";
reg = <0x0 0x008c0000 0x0 0x6000>;
@@ -695,6 +719,29 @@
};
+ gpi_dma1: dma-controller@a00000 {
+ compatible = "qcom,sm8150-gpi-dma";
+ reg = <0 0xa00000 0 0x60000>;
+ interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 295 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 296 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>;
+ dma-channels = <13>;
+ dma-channel-mask = <0xfa>;
+ iommus = <&apps_smmu 0x0616 0x0>;
+ #dma-cells = <3>;
+ status = "disabled";
+ };
+
qupv3_id_1: geniqup@ac0000 {
compatible = "qcom,geni-se-qup";
reg = <0x0 0x00ac0000 0x0 0x6000>;
@@ -795,6 +842,29 @@
};
};
+ gpi_dma2: dma-controller@c00000 {
+ compatible = "qcom,sm8150-gpi-dma";
+ reg = <0 0xc00000 0 0x60000>;
+ interrupts = <GIC_SPI 588 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 589 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 590 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 591 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 592 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 593 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 594 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 595 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 596 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 597 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 598 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 599 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 600 IRQ_TYPE_LEVEL_HIGH>;
+ dma-channels = <13>;
+ dma-channel-mask = <0xfa>;
+ iommus = <&apps_smmu 0x07b6 0x0>;
+ #dma-cells = <3>;
+ status = "disabled";
+ };
+
qupv3_id_2: geniqup@cc0000 {
compatible = "qcom,geni-se-qup";
reg = <0x0 0x00cc0000 0x0 0x6000>;
@@ -1082,6 +1152,8 @@
qcom,gmu = <&gmu>;
+ status = "disabled";
+
zap-shader {
memory-region = <&gpu_mem>;
};
@@ -1149,6 +1221,8 @@
operating-points-v2 = <&gmu_opp_table>;
+ status = "disabled";
+
gmu_opp_table: opp-table {
compatible = "operating-points-v2";
@@ -1496,6 +1570,8 @@
qcom,smem-states = <&modem_smp2p_out 0>;
qcom,smem-state-names = "stop";
+ status = "disabled";
+
glink-edge {
interrupts = <GIC_SPI 449 IRQ_TYPE_EDGE_RISING>;
label = "modem";
diff --git a/arch/arm64/boot/dts/qcom/sm8250-hdk.dts b/arch/arm64/boot/dts/qcom/sm8250-hdk.dts
index c3a2c5aa6fe9..47742816ac2f 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8250-hdk.dts
@@ -6,7 +6,6 @@
/dts-v1/;
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
-#include <dt-bindings/gpio/gpio.h>
#include "sm8250.dtsi"
#include "pm8150.dtsi"
#include "pm8150b.dtsi"
@@ -365,22 +364,26 @@
};
};
-&qupv3_id_1 {
+&gmu {
status = "okay";
};
-&pon {
- pwrkey {
- status = "okay";
- };
+&gpu {
+ status = "okay";
+};
- resin {
- compatible = "qcom,pm8941-resin";
- interrupts = <0x0 0x8 0x1 IRQ_TYPE_EDGE_BOTH>;
- debounce = <15625>;
- bias-pull-up;
- linux,code = <KEY_VOLUMEDOWN>;
- };
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ status = "okay";
+
+ linux,code = <KEY_VOLUMEDOWN>;
+};
+
+&qupv3_id_1 {
+ status = "okay";
};
&tlmm {
@@ -452,3 +455,7 @@
&usb_2_dwc3 {
dr_mode = "host";
};
+
+&venus {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
index cfc4d1febe0f..062b944be91d 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
@@ -465,7 +465,13 @@
firmware-name = "qcom/sm8250/cdsp.mbn";
};
+&gmu {
+ status = "okay";
+};
+
&gpu {
+ status = "okay";
+
zap-shader {
memory-region = <&gpu_mem>;
firmware-name = "qcom/sm8250/a650_zap.mbn";
@@ -691,3 +697,7 @@
vdda-phy-supply = <&vreg_l9a_1p2>;
vdda-pll-supply = <&vreg_l18a_0p9>;
};
+
+&venus {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts
new file mode 100644
index 000000000000..79afeb07f4a2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+/dts-v1/;
+
+#include "sm8250-sony-xperia-edo.dtsi"
+
+/ {
+ model = "Sony Xperia 1 II";
+ compatible = "sony,pdx203-generic", "qcom,sm8250";
+};
+
+/delete-node/ &vreg_l7f_1p8;
diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts
new file mode 100644
index 000000000000..16c96e838534
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+/dts-v1/;
+
+#include "sm8250-sony-xperia-edo.dtsi"
+
+/ {
+ model = "Sony Xperia 5 II";
+ compatible = "sony,pdx206-generic", "qcom,sm8250";
+};
+
+&framebuffer {
+ width = <1080>;
+ height = <2520>;
+ stride = <(1080 * 4)>;
+};
+
+&gpio_keys {
+ g-assist-key {
+ label = "Google Assistant Key";
+ linux,code = <KEY_LEFTMETA>;
+ gpios = <&pm8150_gpios 6 GPIO_ACTIVE_LOW>;
+ debounce-interval = <15>;
+ linux,can-disable;
+ gpio-key,wakeup;
+ };
+};
+
+&vreg_l2f_1p3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
new file mode 100644
index 000000000000..d63f7a9bc4e9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "sm8250.dtsi"
+#include "pm8150.dtsi"
+#include "pm8150b.dtsi"
+#include "pm8150l.dtsi"
+#include "pm8009.dtsi"
+
+/delete-node/ &adsp_mem;
+/delete-node/ &spss_mem;
+/delete-node/ &cdsp_secure_heap;
+
+/ {
+ qcom,msm-id = <356 0x20001>; /* SM8250 v2.1 */
+ qcom,board-id = <0x10008 0>;
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ framebuffer: framebuffer@9c000000 {
+ compatible = "simple-framebuffer";
+ reg = <0 0x9c000000 0 0x2300000>;
+ width = <1644>;
+ height = <3840>;
+ stride = <(1644 * 4)>;
+ format = "a8r8g8b8";
+ /*
+ * That's a lot of clocks, but it's necessary due
+ * to unused clk cleanup & no panel driver yet..
+ */
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&gcc GCC_DISP_HF_AXI_CLK>,
+ <&gcc GCC_DISP_SF_AXI_CLK>,
+ <&dispcc DISP_CC_MDSS_VSYNC_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
+ <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK>,
+ <&dispcc DISP_CC_MDSS_ESC0_CLK>;
+ power-domains = <&dispcc MDSS_GDSC>;
+ };
+ };
+
+ gpio_keys: gpio-keys {
+ compatible = "gpio-keys";
+
+ /*
+ * Camera focus (light press) and camera snapshot (full press)
+ * seem not to work properly.. Adding the former one stalls the CPU
+ * and the latter kills the volume down key for whatever reason. In any
+ * case, they are both on &pm8150b_gpios: camera focus(2), camera snapshot(1).
+ */
+
+ vol-down {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ gpios = <&pm8150_gpios 1 GPIO_ACTIVE_LOW>;
+ debounce-interval = <15>;
+ linux,can-disable;
+ gpio-key,wakeup;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ };
+
+ /* S6c is really ebi.lvl but it's there for supply map completeness sake. */
+ vreg_s6c_0p88: smpc6-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s6c_0p88";
+
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-always-on;
+ vin-supply = <&vph_pwr>;
+ };
+
+ reserved-memory {
+ adsp_mem: memory@8a100000 {
+ reg = <0x0 0x8a100000 0x0 0x2500000>;
+ no-map;
+ };
+
+ spss_mem: memory@8c600000 {
+ reg = <0x0 0x8c600000 0x0 0x100000>;
+ no-map;
+ };
+
+ cdsp_secure_heap: memory@8c700000 {
+ reg = <0x0 0x8c700000 0x0 0x4600000>;
+ no-map;
+ };
+
+ cont_splash_mem: memory@9c000000 {
+ reg = <0x0 0x9c000000 0x0 0x2300000>;
+ no-map;
+ };
+
+ ramoops@ffc00000 {
+ compatible = "ramoops";
+ reg = <0x0 0xffc00000 0x0 0x100000>;
+ record-size = <0x1000>;
+ console-size = <0x40000>;
+ msg-size = <0x20000 0x20000>;
+ ecc-size = <16>;
+ no-map;
+ };
+ };
+};
+
+&adsp {
+ status = "okay";
+};
+
+&apps_rsc {
+ pm8150-rpmh-regulators {
+ compatible = "qcom,pm8150-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-s9-supply = <&vph_pwr>;
+ vdd-s10-supply = <&vph_pwr>;
+ vdd-l1-l8-l11-supply = <&vreg_s6c_0p88>;
+ vdd-l2-l10-supply = <&vreg_bob>;
+ vdd-l3-l4-l5-l18-supply = <&vreg_s6a_0p6>;
+ vdd-l6-l9-supply = <&vreg_s8c_1p2>;
+ vdd-l7-l12-l14-l15-supply = <&vreg_s5a_1p9>;
+ vdd-l13-l16-l17-supply = <&vreg_bob>;
+
+ /* (S1+S2+S3) - cx.lvl (ARC) */
+
+ vreg_s4a_1p8: smps4 {
+ regulator-name = "vreg_s4a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s5a_1p9: smps5 {
+ regulator-name = "vreg_s5a_1p9";
+ regulator-min-microvolt = <1824000>;
+ regulator-max-microvolt = <2040000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s6a_0p6: smps6 {
+ regulator-name = "vreg_s6a_0p6";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1128000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2a_3p1: ldo2 {
+ regulator-name = "vreg_l2a_3p1";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3a_0p9: ldo3 {
+ regulator-name = "vreg_l3a_0p9";
+ regulator-min-microvolt = <928000>;
+ regulator-max-microvolt = <932000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* L4 - lmx.lvl (ARC) */
+
+ vreg_l5a_0p88: ldo5 {
+ regulator-name = "vreg_l5a_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6a_1p2: ldo6 {
+ regulator-name = "vreg_l6a_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* L7 is unused. */
+
+ vreg_l9a_1p2: ldo9 {
+ regulator-name = "vreg_l9a_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* L10 is unused, L11 - lcx.lvl (ARC) */
+
+ vreg_l12a_1p8: ldo12 {
+ regulator-name = "vreg_l12a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* L13 is unused. */
+
+ vreg_l14a_1p8: ldo14 {
+ regulator-name = "vreg_l14a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* L15 & L16 are unused. */
+
+ vreg_l17a_3p0: ldo17 {
+ regulator-name = "vreg_l17a_3p0";
+ regulator-min-microvolt = <2496000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l18a_0p9: ldo18 {
+ regulator-name = "vreg_l18a_0p9";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ /*
+ * Remaining regulators that are not yet supported:
+ * OLEDB: 4925000-8100000
+ * ab: 4600000-6100000
+ * ibb: 800000-5400000
+ */
+ pm8150l-rpmh-regulators {
+ compatible = "qcom,pm8150l-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-l1-l8-supply = <&vreg_s4a_1p8>;
+ vdd-l2-l3-supply = <&vreg_s8c_1p2>;
+ vdd-l4-l5-l6-supply = <&vreg_bob>;
+ vdd-l7-l11-supply = <&vreg_bob>;
+ vdd-l9-l10-supply = <&vreg_bob>;
+ vdd-bob-supply = <&vph_pwr>;
+
+ vreg_bob: bob {
+ regulator-name = "vreg_bob";
+ regulator-min-microvolt = <3350000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ };
+
+ /*
+ * S1-S6 are ARCs:
+ * (S1+S2) - gfx.lvl,
+ * S3 - mx.lvl,
+ * (S4+S5) - mmcx.lvl,
+ * S6 - ebi.lvl
+ */
+
+ vreg_s7c_0p35: smps7 {
+ regulator-name = "vreg_s7c_0p35";
+ regulator-min-microvolt = <348000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s8c_1p2: smps8 {
+ regulator-name = "vreg_s8c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c_1p8: ldo1 {
+ regulator-name = "vreg_l1c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* L2-4 are unused. */
+
+ vreg_l5c_1p8: ldo5 {
+ regulator-name = "vreg_l5c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6c_2p9: ldo6 {
+ regulator-name = "vreg_l6c_2p9";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l7c_2p85: ldo7 {
+ regulator-name = "vreg_l7c_2p85";
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c_1p8: ldo8 {
+ regulator-name = "vreg_l8c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9c_2p9: ldo9 {
+ regulator-name = "vreg_l9c_2p9";
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l10c_3p3: ldo10 {
+ regulator-name = "vreg_l10c_3p3";
+ regulator-min-microvolt = <3296000>;
+ regulator-max-microvolt = <3296000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11c_3p0: ldo11 {
+ regulator-name = "vreg_l11c_3p0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pm8009-rpmh-regulators {
+ compatible = "qcom,pm8009-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vreg_bob>;
+ vdd-l2-supply = <&vreg_s8c_1p2>;
+ vdd-l5-l6-supply = <&vreg_bob>;
+ vdd-l7-supply = <&vreg_s4a_1p8>;
+
+ vreg_s1f_1p2: smps1 {
+ regulator-name = "vreg_s1f_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s2f_0p5: smps2 {
+ regulator-name = "vreg_s2f_0p5";
+ regulator-min-microvolt = <512000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* L1 is unused. */
+
+ vreg_l2f_1p3: ldo2 {
+ regulator-name = "vreg_l2f_1p3";
+ regulator-min-microvolt = <1304000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ /* L3 & L4 are unused. */
+
+ vreg_l5f_2p8: ldo5 {
+ regulator-name = "vreg_l5f_2p85";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6f_2p8: ldo6 {
+ regulator-name = "vreg_l6f_2p8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7f_1p8: ldo7 {
+ regulator-name = "vreg_l7f_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&cdsp {
+ status = "okay";
+};
+
+&gpi_dma0 {
+ status = "okay";
+};
+
+&gpi_dma1 {
+ status = "okay";
+};
+
+&gpi_dma2 {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ /* NXP PN553 NFC @ 28 */
+};
+
+&i2c2 {
+ status = "okay";
+ clock-frequency = <1000000>;
+
+ /* Dual Cirrus Logic CS35L41 amps @ 40, 41 */
+};
+
+&i2c5 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ /* Dialog SLG51000 CMIC @ 75 */
+};
+
+&i2c9 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ /* AMS TCS3490 RGB+IR color sensor @ 72 */
+};
+
+&i2c13 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ touchscreen@48 {
+ compatible = "samsung,s6sy761";
+ reg = <0x48>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <39 0x2008>;
+ /* It's "vddio" downstream but it works anyway! */
+ vdd-supply = <&vreg_l1c_1p8>;
+ avdd-supply = <&vreg_l10c_3p3>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_int_default>;
+ };
+};
+
+&i2c15 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ /* Qcom SMB1390 @ 10 */
+ /* Silicon Labs SI4704 FM Radio Receiver @ 11 */
+ /* Qcom SMB1390_slave @ 18 */
+ /* HALO HL6111R Qi charger @ 25 */
+ /* Richwave RTC6226 FM Radio Receiver @ 64 */
+};
+
+&pcie0 {
+ status = "okay";
+};
+
+&pcie0_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l5a_0p88>;
+ vdda-pll-supply = <&vreg_l9a_1p2>;
+};
+
+&pcie2 {
+ status = "okay";
+
+ pinctrl-0 = <&pcie2_default_state &mdm2ap_default &ap2mdm_default>;
+};
+
+&pcie2_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l5a_0p88>;
+ vdda-pll-supply = <&vreg_l9a_1p2>;
+};
+
+&pon_pwrkey {
+ status = "okay";
+};
+
+&pon_resin {
+ status = "okay";
+
+ linux,code = <KEY_VOLUMEUP>;
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&qupv3_id_2 {
+ status = "okay";
+};
+
+&sdhc_2 {
+ status = "okay";
+
+ cd-gpios = <&tlmm 77 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_default_state &sdc2_card_det_n>;
+ pinctrl-1 = <&sdc2_sleep_state &sdc2_card_det_n>;
+ vmmc-supply = <&vreg_l9c_2p9>;
+ vqmmc-supply = <&vreg_l6c_2p9>;
+ bus-width = <4>;
+ no-sdio;
+ no-emmc;
+};
+
+&slpi {
+ status = "okay";
+};
+
+&tlmm {
+ gpio-reserved-ranges = <40 4>, <52 4>;
+
+ sdc2_default_state: sdc2-default {
+ clk {
+ pins = "sdc2_clk";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ cmd {
+ pins = "sdc2_cmd";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+
+ data {
+ pins = "sdc2_data";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+ };
+
+ mdm2ap_default: mdm2ap-default {
+ pins = "gpio1", "gpio3";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ ts_int_default: ts-int-default {
+ pins = "gpio39";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disabled;
+ input-enable;
+ };
+
+ ap2mdm_default: ap2mdm-default {
+ pins = "gpio56", "gpio57";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ sdc2_card_det_n: sd-card-det-n {
+ pins = "gpio77";
+ function = "gpio";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+};
+
+&uart12 {
+ status = "okay";
+};
+
+/* BIG WARNING! DO NOT TOUCH UFS, YOUR DEVICE WILL DIE! */
+&ufs_mem_hc { status = "disabled"; };
+&ufs_mem_phy { status = "disabled"; };
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "peripheral";
+};
+
+&usb_1_hsphy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l5a_0p88>;
+ vdda18-supply = <&vreg_l12a_1p8>;
+ vdda33-supply = <&vreg_l2a_3p1>;
+};
+
+&usb_1_qmpphy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l9a_1p2>;
+ vdda-pll-supply = <&vreg_l18a_0p9>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index 4c0de12aaba6..4798368b02ef 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -8,6 +8,8 @@
#include <dt-bindings/clock/qcom,gcc-sm8250.h>
#include <dt-bindings/clock/qcom,gpucc-sm8250.h>
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/dma/qcom-gpi.h>
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interconnect/qcom,osm-l3.h>
#include <dt-bindings/interconnect/qcom,sm8250.h>
#include <dt-bindings/mailbox/qcom-ipcc.h>
@@ -519,6 +521,26 @@
};
};
+ gpi_dma2: dma-controller@800000 {
+ compatible = "qcom,sm8250-gpi-dma";
+ reg = <0 0x00800000 0 0x70000>;
+ interrupts = <GIC_SPI 588 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 589 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 590 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 591 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 592 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 593 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 594 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 595 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 596 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 597 IRQ_TYPE_LEVEL_HIGH>;
+ dma-channels = <10>;
+ dma-channel-mask = <0x3f>;
+ iommus = <&apps_smmu 0x76 0x0>;
+ #dma-cells = <3>;
+ status = "disabled";
+ };
+
qupv3_id_2: geniqup@8c0000 {
compatible = "qcom,geni-se-qup";
reg = <0x0 0x008c0000 0x0 0x6000>;
@@ -714,6 +736,29 @@
};
};
+ gpi_dma0: dma-controller@900000 {
+ compatible = "qcom,sm8250-gpi-dma";
+ reg = <0 0x00900000 0 0x70000>;
+ interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
+ dma-channels = <15>;
+ dma-channel-mask = <0x7ff>;
+ iommus = <&apps_smmu 0x5b6 0x0>;
+ #dma-cells = <3>;
+ status = "disabled";
+ };
+
qupv3_id_0: geniqup@9c0000 {
compatible = "qcom,geni-se-qup";
reg = <0x0 0x009c0000 0x0 0x6000>;
@@ -961,6 +1006,26 @@
};
};
+ gpi_dma1: dma-controller@a00000 {
+ compatible = "qcom,sm8250-gpi-dma";
+ reg = <0 0x00a00000 0 0x70000>;
+ interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 295 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 296 IRQ_TYPE_LEVEL_HIGH>;
+ dma-channels = <10>;
+ dma-channel-mask = <0x3f>;
+ iommus = <&apps_smmu 0x56 0x0>;
+ #dma-cells = <3>;
+ status = "disabled";
+ };
+
qupv3_id_1: geniqup@ac0000 {
compatible = "qcom,geni-se-qup";
reg = <0x0 0x00ac0000 0x0 0x6000>;
@@ -1249,6 +1314,12 @@
phys = <&pcie0_lane>;
phy-names = "pciephy";
+ perst-gpio = <&tlmm 79 GPIO_ACTIVE_LOW>;
+ enable-gpio = <&tlmm 81 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_default_state>;
+
status = "disabled";
};
@@ -1347,6 +1418,12 @@
phys = <&pcie1_lane>;
phy-names = "pciephy";
+ perst-gpio = <&tlmm 82 GPIO_ACTIVE_LOW>;
+ enable-gpio = <&tlmm 84 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie1_default_state>;
+
status = "disabled";
};
@@ -1447,6 +1524,12 @@
phys = <&pcie2_lane>;
phy-names = "pciephy";
+ perst-gpio = <&tlmm 85 GPIO_ACTIVE_LOW>;
+ enable-gpio = <&tlmm 87 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_default_state>;
+
status = "disabled";
};
@@ -1470,7 +1553,7 @@
status = "disabled";
- pcie2_lane: lanes@1c0e200 {
+ pcie2_lane: lanes@1c16200 {
reg = <0 0x1c16200 0 0x170>, /* tx0 */
<0 0x1c16400 0 0x200>, /* rx0 */
<0 0x1c16a00 0 0x1f0>, /* pcs */
@@ -1746,6 +1829,8 @@
qcom,gmu = <&gmu>;
+ status = "disabled";
+
zap-shader {
memory-region = <&gpu_mem>;
};
@@ -1819,6 +1904,8 @@
operating-points-v2 = <&gmu_opp_table>;
+ status = "disabled";
+
gmu_opp_table: opp-table {
compatible = "operating-points-v2";
@@ -2323,6 +2410,8 @@
<&videocc VIDEO_CC_MVS0C_CLK_ARES>;
reset-names = "bus", "core";
+ status = "disabled";
+
video-decoder {
compatible = "venus-decoder";
};
@@ -2370,7 +2459,7 @@
};
mdss: mdss@ae00000 {
- compatible = "qcom,sdm845-mdss";
+ compatible = "qcom,sm8250-mdss";
reg = <0 0x0ae00000 0 0x1000>;
reg-names = "mdss";
@@ -2402,7 +2491,7 @@
ranges;
mdss_mdp: mdp@ae01000 {
- compatible = "qcom,sdm845-dpu";
+ compatible = "qcom,sm8250-dpu";
reg = <0 0x0ae01000 0 0x8f000>,
<0 0x0aeb0000 0 0x2008>;
reg-names = "mdp", "vbif";
@@ -2424,8 +2513,6 @@
interrupt-parent = <&mdss>;
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
-
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -2499,6 +2586,9 @@
status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -2566,6 +2656,9 @@
status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -3395,6 +3488,95 @@
output-high;
};
};
+
+ sdc2_sleep_state: sdc2-sleep {
+ clk {
+ pins = "sdc2_clk";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ cmd {
+ pins = "sdc2_cmd";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ data {
+ pins = "sdc2_data";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie0_default_state: pcie0-default {
+ perst {
+ pins = "gpio79";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ clkreq {
+ pins = "gpio80";
+ function = "pci_e0";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake {
+ pins = "gpio81";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie1_default_state: pcie1-default {
+ perst {
+ pins = "gpio82";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ clkreq {
+ pins = "gpio83";
+ function = "pci_e1";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake {
+ pins = "gpio84";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie2_default_state: pcie2-default {
+ perst {
+ pins = "gpio85";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ clkreq {
+ pins = "gpio86";
+ function = "pci_e2";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ wake {
+ pins = "gpio87";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
};
apps_smmu: iommu@15000000 {
diff --git a/arch/arm64/boot/dts/qcom/sm8350-mtp.dts b/arch/arm64/boot/dts/qcom/sm8350-mtp.dts
index 6ca638b4e321..93740444dd1e 100644
--- a/arch/arm64/boot/dts/qcom/sm8350-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8350-mtp.dts
@@ -364,3 +364,9 @@
vdda-phy-supply = <&vreg_l6b_1p2>;
vdda-pll-supply = <&vreg_l5b_0p88>;
};
+
+&ipa {
+ status = "okay";
+
+ memory-region = <&pil_ipa_fw_mem>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
index ed0b51bc03ea..0d16392bb976 100644
--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -6,11 +6,13 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-sm8350.h>
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interconnect/qcom,sm8350.h>
#include <dt-bindings/mailbox/qcom-ipcc.h>
#include <dt-bindings/power/qcom-aoss-qmp.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/interconnect/qcom,sm8350.h>
/ {
interrupt-parent = <&intc>;
@@ -391,6 +393,17 @@
interrupt-controller;
#interrupt-cells = <2>;
};
+
+ ipa_smp2p_out: ipa-ap-to-modem {
+ qcom,entry-name = "ipa";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ ipa_smp2p_in: ipa-modem-to-ap {
+ qcom,entry-name = "ipa";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
smp2p-slpi {
@@ -629,6 +642,45 @@
qcom,bcm-voters = <&apps_bcm_voter>;
};
+ ipa: ipa@1e40000 {
+ compatible = "qcom,sm8350-ipa";
+
+ iommus = <&apps_smmu 0x5c0 0x0>,
+ <&apps_smmu 0x5c2 0x0>;
+ reg = <0 0x1e40000 0 0x8000>,
+ <0 0x1e50000 0 0x4b20>,
+ <0 0x1e04000 0 0x23000>;
+ reg-names = "ipa-reg",
+ "ipa-shared",
+ "gsi";
+
+ interrupts-extended = <&intc GIC_SPI 655 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>,
+ <&ipa_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&ipa_smp2p_in 1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ipa",
+ "gsi",
+ "ipa-clock-query",
+ "ipa-setup-ready";
+
+ clocks = <&rpmhcc RPMH_IPA_CLK>;
+ clock-names = "core";
+
+ interconnects = <&aggre2_noc MASTER_IPA &gem_noc SLAVE_LLCC>,
+ <&mc_virt MASTER_LLCC &mc_virt SLAVE_EBI1>,
+ <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_IPA_CFG>;
+ interconnect-names = "ipa_to_llcc",
+ "llcc_to_ebi1",
+ "appss_to_ipa";
+
+ qcom,smem-states = <&ipa_smp2p_out 0>,
+ <&ipa_smp2p_out 1>;
+ qcom,smem-state-names = "ipa-clock-enabled-valid",
+ "ipa-clock-enabled";
+
+ status = "disabled";
+ };
+
tcsr_mutex: hwlock@1f40000 {
compatible = "qcom,tcsr-mutex";
reg = <0x0 0x01f40000 0x0 0x40000>;
@@ -656,7 +708,7 @@
<&rpmhpd 12>;
power-domain-names = "load_state", "cx", "mss";
- interconnects = <&mc_virt 0 &mc_virt 1>;
+ interconnects = <&mc_virt MASTER_LLCC &mc_virt SLAVE_EBI1>;
memory-region = <&pil_modem_mem>;
@@ -689,7 +741,7 @@
interrupt-controller;
};
- tsens0: thermal-sensor@c222000 {
+ tsens0: thermal-sensor@c263000 {
compatible = "qcom,sm8350-tsens", "qcom,tsens-v2";
reg = <0 0x0c263000 0 0x1ff>, /* TM */
<0 0x0c222000 0 0x8>; /* SROT */
@@ -700,7 +752,7 @@
#thermal-sensor-cells = <1>;
};
- tsens1: thermal-sensor@c223000 {
+ tsens1: thermal-sensor@c265000 {
compatible = "qcom,sm8350-tsens", "qcom,tsens-v2";
reg = <0 0x0c265000 0 0x1ff>, /* TM */
<0 0x0c223000 0 0x8>; /* SROT */
@@ -1063,7 +1115,7 @@
<&rpmhpd 10>;
power-domain-names = "load_state", "cx", "mxc";
- interconnects = <&compute_noc 1 &mc_virt 1>;
+ interconnects = <&compute_noc MASTER_CDSP_PROC &mc_virt SLAVE_EBI1>;
memory-region = <&pil_cdsp_mem>;
@@ -1176,7 +1228,7 @@
};
};
- dc_noc: interconnect@90e0000 {
+ dc_noc: interconnect@90c0000 {
compatible = "qcom,sm8350-dc-noc";
reg = <0 0x090c0000 0 0x4200>;
#interconnect-cells = <1>;
@@ -1317,7 +1369,7 @@
};
};
- thermal-zones {
+ thermal_zones: thermal-zones {
cpu0-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index f2de2fa0c8b8..68e30e26564b 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -62,3 +62,5 @@ dtb-$(CONFIG_ARCH_R8A77990) += r8a77990-ebisu.dtb
dtb-$(CONFIG_ARCH_R8A77995) += r8a77995-draak.dtb
dtb-$(CONFIG_ARCH_R8A779A0) += r8a779a0-falcon.dtb
+
+dtb-$(CONFIG_ARCH_R9A07G044) += r9a07g044l2-smarc.dtb
diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
index d8046fedf9c1..e3c8b2fe143e 100644
--- a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
+++ b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
@@ -271,12 +271,12 @@
&ehci0 {
dr_mode = "otg";
status = "okay";
- clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>, <&usb2_clksel>, <&versaclock5 3>;
};
&ehci1 {
status = "okay";
- clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>, <&usb2_clksel>, <&versaclock5 3>;
};
&hdmi0 {
diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
index 8d3a4d6ee885..090dc9c4f57b 100644
--- a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
@@ -53,6 +53,8 @@
phy-handle = <&phy0>;
rx-internal-delay-ps = <1800>;
tx-internal-delay-ps = <2000>;
+ clocks = <&cpg CPG_MOD 812>, <&versaclock5 4>;
+ clock-names = "fck", "refclk";
status = "okay";
phy0: ethernet-phy@0 {
@@ -319,8 +321,10 @@
status = "okay";
};
-&usb_extal_clk {
- clock-frequency = <50000000>;
+&usb2_clksel {
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>,
+ <&versaclock5 3>, <&usb3s0_clk>;
+ status = "okay";
};
&usb3s0_clk {
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index 46f8dbf68904..78c121a89f11 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -76,6 +76,7 @@
opp-hz = /bits/ 64 <1500000000>;
opp-microvolt = <820000>;
clock-latency-ns = <300000>;
+ opp-suspend;
};
};
@@ -1127,6 +1128,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
resets = <&cpg 812>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
index d16a4be5ef77..28c612ce49c0 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
@@ -1001,6 +1001,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
resets = <&cpg 812>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 1aef34447abd..a5d4dce8476d 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -957,6 +957,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
resets = <&cpg 812>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
index 1f51237ab0a6..379a1300272b 100644
--- a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
@@ -1230,6 +1230,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A774E1_PD_ALWAYS_ON>;
resets = <&cpg 812>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/renesas/r8a77951.dtsi b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
index 85d66d15465a..2e4c18b8eee4 100644
--- a/arch/arm64/boot/dts/renesas/r8a77951.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
@@ -1312,6 +1312,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
resets = <&cpg 812>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
index 12476e354d74..2bd8169735d3 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
@@ -63,18 +63,19 @@
opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <820000>;
+ opp-microvolt = <830000>;
clock-latency-ns = <300000>;
};
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <820000>;
+ opp-microvolt = <830000>;
clock-latency-ns = <300000>;
};
opp-1500000000 {
opp-hz = /bits/ 64 <1500000000>;
- opp-microvolt = <820000>;
+ opp-microvolt = <830000>;
clock-latency-ns = <300000>;
+ opp-suspend;
};
opp-1600000000 {
opp-hz = /bits/ 64 <1600000000>;
@@ -1188,6 +1189,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
resets = <&cpg 812>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
index d9804768425a..91b501e0121e 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
@@ -52,18 +52,19 @@
opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <820000>;
+ opp-microvolt = <830000>;
clock-latency-ns = <300000>;
};
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <820000>;
+ opp-microvolt = <830000>;
clock-latency-ns = <300000>;
};
opp-1500000000 {
opp-hz = /bits/ 64 <1500000000>;
- opp-microvolt = <820000>;
+ opp-microvolt = <830000>;
clock-latency-ns = <300000>;
+ opp-suspend;
};
opp-1600000000 {
opp-hz = /bits/ 64 <1600000000>;
@@ -559,10 +560,19 @@
};
intc_ex: interrupt-controller@e61c0000 {
+ compatible = "renesas,intc-ex-r8a77961", "renesas,irqc";
#interrupt-cells = <2>;
interrupt-controller;
reg = <0 0xe61c0000 0 0x200>;
- /* placeholder */
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 407>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 407>;
};
tmu0: timer@e61e0000 {
@@ -1144,6 +1154,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
resets = <&cpg 812>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index dcb9df861d74..ad69da362a72 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -1050,6 +1050,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
resets = <&cpg 812>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts b/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts
index 874a7fc2730b..5c84681703ed 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts
@@ -73,6 +73,12 @@
/* first 128MB is reserved for secure area. */
reg = <0x0 0x48000000 0x0 0x38000000>;
};
+
+ x1_clk: x1-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <148500000>;
+ };
};
&avb {
@@ -104,6 +110,8 @@
};
&du {
+ clocks = <&cpg CPG_MOD 724>, <&x1_clk>;
+ clock-names = "du.0", "dclkin.0";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
index 7417cf5fea0f..2426e533128c 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
@@ -59,7 +59,7 @@
memory@48000000 {
device_type = "memory";
/* first 128MB is reserved for secure area. */
- reg = <0x0 0x48000000 0x0 0x38000000>;
+ reg = <0x0 0x48000000 0x0 0x78000000>;
};
osc5_clk: osc5-clock {
diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
index e8f6352c3665..517892cf6294 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
@@ -612,6 +612,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A77970_PD_ALWAYS_ON>;
resets = <&cpg 812>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
index 04d47c0c9bb9..7bde0a549c09 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
@@ -210,7 +210,7 @@
&mmc0 {
pinctrl-0 = <&mmc_pins>;
- pinctrl-1 = <&mmc_pins_uhs>;
+ pinctrl-1 = <&mmc_pins>;
pinctrl-names = "default", "state_uhs";
vmmc-supply = <&d3_3v>;
@@ -255,12 +255,6 @@
mmc_pins: mmc {
groups = "mmc_data8", "mmc_ctrl", "mmc_ds";
function = "mmc";
- power-source = <3300>;
- };
-
- mmc_pins_uhs: mmc_uhs {
- groups = "mmc_data8", "mmc_ctrl", "mmc_ds";
- function = "mmc";
power-source = <1800>;
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index 7b51d464de0e..6347d15e66b6 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -664,6 +664,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 812>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index 0eaea58f4210..4d0304bc9745 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -1000,6 +1000,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
resets = <&cpg 812>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/renesas/r8a77995.dtsi b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
index 2319271c881b..84dba3719381 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
@@ -760,6 +760,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 812>;
+ clock-names = "fck";
power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
resets = <&cpg 812>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-csi-dsi.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-csi-dsi.dtsi
index 14d3db5d6c16..f791c76f1bcf 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-csi-dsi.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-csi-dsi.dtsi
@@ -6,6 +6,27 @@
*/
&i2c0 {
+ pca9654_a: gpio@21 {
+ compatible = "onnn,pca9654";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ pca9654_b: gpio@22 {
+ compatible = "onnn,pca9654";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ pca9654_c: gpio@23 {
+ compatible = "onnn,pca9654";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
eeprom@52 {
compatible = "rohm,br24g01", "atmel,24c01";
label = "csi-dsi-sub-board-id";
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
index 70b3604e56cd..78ca75f619f6 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
@@ -618,6 +618,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 211>;
+ clock-names = "fck";
power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
resets = <&cpg 211>;
phy-mode = "rgmii";
@@ -665,6 +666,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 212>;
+ clock-names = "fck";
power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
resets = <&cpg 212>;
phy-mode = "rgmii";
@@ -712,6 +714,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 213>;
+ clock-names = "fck";
power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
resets = <&cpg 213>;
phy-mode = "rgmii";
@@ -759,6 +762,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 214>;
+ clock-names = "fck";
power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
resets = <&cpg 214>;
phy-mode = "rgmii";
@@ -806,6 +810,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 215>;
+ clock-names = "fck";
power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
resets = <&cpg 215>;
phy-mode = "rgmii";
@@ -853,6 +858,7 @@
"ch20", "ch21", "ch22", "ch23",
"ch24";
clocks = <&cpg CPG_MOD 216>;
+ clock-names = "fck";
power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
resets = <&cpg 216>;
phy-mode = "rgmii";
@@ -1096,7 +1102,6 @@
<0x0 0xf1060000 0 0x110000>;
interrupts = <GIC_PPI 9
(GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_HIGH)>;
- power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
};
fcpvd0: fcp@fea10000 {
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
new file mode 100644
index 000000000000..01482d227506
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/G2L and RZ/G2LC common SoC parts
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/r9a07g044-cpg.h>
+
+/ {
+ compatible = "renesas,r9a07g044";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ /* clock can be either from exclk or crystal oscillator (XIN/XOUT) */
+ extal_clk: extal {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0", "arm,psci-0.2";
+ method = "smc";
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+ core1 {
+ cpu = <&cpu1>;
+ };
+ };
+ };
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a55";
+ reg = <0>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@100 {
+ compatible = "arm,cortex-a55";
+ reg = <0x100>;
+ device_type = "cpu";
+ next-level-cache = <&L3_CA55>;
+ enable-method = "psci";
+ };
+
+ L3_CA55: cache-controller-0 {
+ compatible = "cache";
+ cache-unified;
+ cache-size = <0x40000>;
+ };
+ };
+
+ soc: soc {
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ scif0: serial@1004b800 {
+ compatible = "renesas,scif-r9a07g044";
+ reg = <0 0x1004b800 0 0x400>;
+ interrupts = <GIC_SPI 380 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi",
+ "bri", "dri", "tei";
+ clocks = <&cpg CPG_MOD R9A07G044_SCIF0_CLK_PCK>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G044_SCIF0_RST_SYSTEM_N>;
+ status = "disabled";
+ };
+
+ cpg: clock-controller@11010000 {
+ compatible = "renesas,r9a07g044-cpg";
+ reg = <0 0x11010000 0 0x10000>;
+ clocks = <&extal_clk>;
+ clock-names = "extal";
+ #clock-cells = <2>;
+ #reset-cells = <1>;
+ #power-domain-cells = <0>;
+ };
+
+ sysc: system-controller@11020000 {
+ compatible = "renesas,r9a07g044-sysc";
+ reg = <0 0x11020000 0 0x10000>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "lpm_int", "ca55stbydone_int",
+ "cm33stbyr_int", "ca55_deny";
+ status = "disabled";
+ };
+
+ gic: interrupt-controller@11900000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0x11900000 0 0x40000>,
+ <0x0 0x11940000 0 0x60000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044l1.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044l1.dtsi
new file mode 100644
index 000000000000..9d89d4590358
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a07g044l1.dtsi
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/G2L R9A07G044L1 SoC specific parts
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r9a07g044.dtsi"
+
+/ {
+ compatible = "renesas,r9a07g044l1", "renesas,r9a07g044";
+
+ cpus {
+ /delete-node/ cpu-map;
+ /delete-node/ cpu@100;
+ };
+
+ timer {
+ interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044l2-smarc.dts b/arch/arm64/boot/dts/renesas/r9a07g044l2-smarc.dts
new file mode 100644
index 000000000000..d3f72ec62f03
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a07g044l2-smarc.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/G2L SMARC EVK board
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r9a07g044l2.dtsi"
+#include "rzg2l-smarc.dtsi"
+
+/ {
+ model = "Renesas SMARC EVK based on r9a07g044l2";
+ compatible = "renesas,smarc-evk", "renesas,r9a07g044l2", "renesas,r9a07g044";
+
+ memory@48000000 {
+ device_type = "memory";
+ /* first 128MB is reserved for secure area. */
+ reg = <0x0 0x48000000 0x0 0x78000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044l2.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044l2.dtsi
new file mode 100644
index 000000000000..91dc10b2cdbb
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a07g044l2.dtsi
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/G2L R9A07G044L2 SoC specific parts
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r9a07g044.dtsi"
+
+/ {
+ compatible = "renesas,r9a07g044l2", "renesas,r9a07g044";
+};
diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi
new file mode 100644
index 000000000000..adcd4f50519e
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/G2L SMARC EVK common parts
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ aliases {
+ serial0 = &scif0;
+ };
+
+ chosen {
+ bootargs = "ignore_loglevel";
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&extal_clk {
+ clock-frequency = <24000000>;
+};
+
+&scif0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index c3e00c0e2db7..7fdb41de01ec 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -51,3 +51,4 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-rockpro64.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-sapphire.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-sapphire-excavator.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399pro-rock-pi-n10.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-evb1-v10.dtb
diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
index 09baa8a167ce..248ebb61aa79 100644
--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
@@ -244,28 +244,31 @@
#size-cells = <0>;
/* These power domains are grouped by VD_LOGIC */
- pd_usb@PX30_PD_USB {
+ power-domain@PX30_PD_USB {
reg = <PX30_PD_USB>;
clocks = <&cru HCLK_HOST>,
<&cru HCLK_OTG>,
<&cru SCLK_OTG_ADP>;
pm_qos = <&qos_usb_host>, <&qos_usb_otg>;
+ #power-domain-cells = <0>;
};
- pd_sdcard@PX30_PD_SDCARD {
+ power-domain@PX30_PD_SDCARD {
reg = <PX30_PD_SDCARD>;
clocks = <&cru HCLK_SDMMC>,
<&cru SCLK_SDMMC>;
pm_qos = <&qos_sdmmc>;
+ #power-domain-cells = <0>;
};
- pd_gmac@PX30_PD_GMAC {
+ power-domain@PX30_PD_GMAC {
reg = <PX30_PD_GMAC>;
clocks = <&cru ACLK_GMAC>,
<&cru PCLK_GMAC>,
<&cru SCLK_MAC_REF>,
<&cru SCLK_GMAC_RX_TX>;
pm_qos = <&qos_gmac>;
+ #power-domain-cells = <0>;
};
- pd_mmc_nand@PX30_PD_MMC_NAND {
+ power-domain@PX30_PD_MMC_NAND {
reg = <PX30_PD_MMC_NAND>;
clocks = <&cru HCLK_NANDC>,
<&cru HCLK_EMMC>,
@@ -277,15 +280,17 @@
<&cru SCLK_SFC>;
pm_qos = <&qos_emmc>, <&qos_nand>,
<&qos_sdio>, <&qos_sfc>;
+ #power-domain-cells = <0>;
};
- pd_vpu@PX30_PD_VPU {
+ power-domain@PX30_PD_VPU {
reg = <PX30_PD_VPU>;
clocks = <&cru ACLK_VPU>,
<&cru HCLK_VPU>,
<&cru SCLK_CORE_VPU>;
pm_qos = <&qos_vpu>, <&qos_vpu_r128>;
+ #power-domain-cells = <0>;
};
- pd_vo@PX30_PD_VO {
+ power-domain@PX30_PD_VO {
reg = <PX30_PD_VO>;
clocks = <&cru ACLK_RGA>,
<&cru ACLK_VOPB>,
@@ -300,8 +305,9 @@
<&cru SCLK_VOPB_PWM>;
pm_qos = <&qos_rga_rd>, <&qos_rga_wr>,
<&qos_vop_m0>, <&qos_vop_m1>;
+ #power-domain-cells = <0>;
};
- pd_vi@PX30_PD_VI {
+ power-domain@PX30_PD_VI {
reg = <PX30_PD_VI>;
clocks = <&cru ACLK_CIF>,
<&cru ACLK_ISP>,
@@ -311,11 +317,13 @@
pm_qos = <&qos_isp_128>, <&qos_isp_rd>,
<&qos_isp_wr>, <&qos_isp_m1>,
<&qos_vip>;
+ #power-domain-cells = <0>;
};
- pd_gpu@PX30_PD_GPU {
+ power-domain@PX30_PD_GPU {
reg = <PX30_PD_GPU>;
clocks = <&cru SCLK_GPU>;
pm_qos = <&qos_gpu>;
+ #power-domain-cells = <0>;
};
};
};
@@ -814,7 +822,7 @@
#address-cells = <1>;
#size-cells = <1>;
- u2phy: usb2-phy@100 {
+ u2phy: usb2phy@100 {
compatible = "rockchip,px30-usb2phy";
reg = <0x100 0x20>;
clocks = <&pmucru SCLK_USBPHY_REF>;
@@ -1087,7 +1095,6 @@
compatible = "rockchip,iommu";
reg = <0x0 0xff460f00 0x0 0x100>;
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "vopb_mmu";
clocks = <&cru ACLK_VOPB>, <&cru HCLK_VOPB>;
clock-names = "aclk", "iface";
power-domains = <&power PX30_PD_VO>;
@@ -1128,7 +1135,6 @@
compatible = "rockchip,iommu";
reg = <0x0 0xff470f00 0x0 0x100>;
interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "vopl_mmu";
clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>;
clock-names = "aclk", "iface";
power-domains = <&power PX30_PD_VO>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
index 3dddd4742c3a..665b2e69455d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
@@ -84,8 +84,8 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_HIGH>;
- states = <1800000 0x0
- 3300000 0x1>;
+ states = <1800000 0x0>,
+ <3300000 0x1>;
vin-supply = <&vcc5v0_sys>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
index b815ce73e5c6..a185901aba9a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
@@ -164,7 +164,7 @@
grf: grf@ff000000 {
compatible = "rockchip,rk3308-grf", "syscon", "simple-mfd";
- reg = <0x0 0xff000000 0x0 0x10000>;
+ reg = <0x0 0xff000000 0x0 0x08000>;
reboot-mode {
compatible = "syscon-reboot-mode";
@@ -177,6 +177,42 @@
};
};
+ usb2phy_grf: syscon@ff008000 {
+ compatible = "rockchip,rk3308-usb2phy-grf", "syscon", "simple-mfd";
+ reg = <0x0 0xff008000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ u2phy: usb2phy@100 {
+ compatible = "rockchip,rk3308-usb2phy";
+ reg = <0x100 0x10>;
+ assigned-clocks = <&cru USB480M>;
+ assigned-clock-parents = <&u2phy>;
+ clocks = <&cru SCLK_USBPHY_REF>;
+ clock-names = "phyclk";
+ clock-output-names = "usb480m_phy";
+ #clock-cells = <0>;
+ status = "disabled";
+
+ u2phy_otg: otg-port {
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "otg-bvalid", "otg-id",
+ "linestate";
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ u2phy_host: host-port {
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "linestate";
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+ };
+ };
+
detect_grf: syscon@ff00b000 {
compatible = "rockchip,rk3308-detect-grf", "syscon", "simple-mfd";
reg = <0x0 0xff00b000 0x0 0x1000>;
@@ -579,6 +615,42 @@
status = "disabled";
};
+ usb20_otg: usb@ff400000 {
+ compatible = "rockchip,rk3308-usb", "rockchip,rk3066-usb",
+ "snps,dwc2";
+ reg = <0x0 0xff400000 0x0 0x40000>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_OTG>;
+ clock-names = "otg";
+ dr_mode = "otg";
+ g-np-tx-fifo-size = <16>;
+ g-rx-fifo-size = <280>;
+ g-tx-fifo-size = <256 128 128 64 32 16>;
+ phys = <&u2phy_otg>;
+ phy-names = "usb2-phy";
+ status = "disabled";
+ };
+
+ usb_host_ehci: usb@ff440000 {
+ compatible = "generic-ehci";
+ reg = <0x0 0xff440000 0x0 0x10000>;
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_HOST>, <&cru HCLK_HOST_ARB>, <&u2phy>;
+ phys = <&u2phy_host>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ usb_host_ohci: usb@ff450000 {
+ compatible = "generic-ohci";
+ reg = <0x0 0xff450000 0x0 0x10000>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_HOST>, <&cru HCLK_HOST_ARB>, <&u2phy>;
+ phys = <&u2phy_host>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
sdmmc: mmc@ff480000 {
compatible = "rockchip,rk3308-dw-mshc", "rockchip,rk3288-dw-mshc";
reg = <0x0 0xff480000 0x0 0x4000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
index 49c97f76df77..7fc674a99a6c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
@@ -165,6 +165,31 @@
};
};
+ rk817-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "Analog";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,hp-det-gpio = <&gpio2 RK_PC6 GPIO_ACTIVE_HIGH>;
+ simple-audio-card,mclk-fs = <256>;
+ simple-audio-card,widgets =
+ "Microphone", "Mic Jack",
+ "Headphone", "Headphones",
+ "Speaker", "Speaker";
+ simple-audio-card,routing =
+ "MICL", "Mic Jack",
+ "Headphones", "HPOL",
+ "Headphones", "HPOR",
+ "Speaker", "SPKO";
+
+ simple-audio-card,codec {
+ sound-dai = <&rk817>;
+ };
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s1_2ch>;
+ };
+ };
+
vccsys: vccsys {
compatible = "regulator-fixed";
regulator-name = "vcc3v8_sys";
@@ -239,6 +264,7 @@
backlight = <&backlight>;
iovcc-supply = <&vcc_lcd>;
reset-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
+ rotation = <270>;
vdd-supply = <&vcc_lcd>;
port {
@@ -269,11 +295,14 @@
reg = <0x20>;
interrupt-parent = <&gpio0>;
interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
+ clock-output-names = "rk808-clkout1", "xin32k";
+ clock-names = "mclk";
+ clocks = <&cru SCLK_I2S1_OUT>;
pinctrl-names = "default";
- pinctrl-0 = <&pmic_int>;
+ pinctrl-0 = <&pmic_int>, <&i2s1_2ch_mclk>;
wakeup-source;
#clock-cells = <1>;
- clock-output-names = "rk808-clkout1", "xin32k";
+ #sound-dai-cells = <0>;
vcc1-supply = <&vccsys>;
vcc2-supply = <&vccsys>;
@@ -432,6 +461,10 @@
};
};
};
+
+ rk817_codec: codec {
+ rockchip,mic-in-differential;
+ };
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
index f807bc066ccb..3857d487ab84 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
@@ -14,6 +14,7 @@
compatible = "friendlyarm,nanopi-r2s", "rockchip,rk3328";
aliases {
+ ethernet1 = &rtl8153;
mmc0 = &sdmmc;
};
@@ -76,8 +77,8 @@
regulator-settling-time-us = <5000>;
regulator-type = "voltage";
startup-delay-us = <2000>;
- states = <1800000 0x1
- 3300000 0x0>;
+ states = <1800000 0x1>,
+ <3300000 0x0>;
vin-supply = <&vcc_io_33>;
};
@@ -101,6 +102,18 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
};
+
+ vdd_5v_lan: vdd-5v-lan {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio2 RK_PC6 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&lan_vdd_pin>;
+ pinctrl-names = "default";
+ regulator-name = "vdd_5v_lan";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vdd_5v>;
+ };
};
&cpu0 {
@@ -309,6 +322,12 @@
};
};
+ lan {
+ lan_vdd_pin: lan-vdd-pin {
+ rockchip,pins = <2 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
pmic {
pmic_int_l: pmic-int-l {
rockchip,pins = <1 RK_PD0 RK_FUNC_GPIO &pcfg_pull_up>;
@@ -368,6 +387,19 @@
dr_mode = "host";
};
+&usbdrd3 {
+ dr_mode = "host";
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Second port is for USB 3.0 */
+ rtl8153: device@2 {
+ compatible = "usbbda,8153";
+ reg = <2>;
+ };
+};
+
&usb_host0_ehci {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
index a05732b59f38..aa22a0c22265 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
@@ -50,8 +50,8 @@
vcc_sdio: sdmmcio-regulator {
compatible = "regulator-gpio";
gpios = <&grf_gpio 0 GPIO_ACTIVE_HIGH>;
- states = <1800000 0x1
- 3300000 0x0>;
+ states = <1800000 0x1>,
+ <3300000 0x0>;
regulator-name = "vcc_sdio";
regulator-type = "voltage";
regulator-min-microvolt = <1800000>;
@@ -363,6 +363,11 @@
status = "okay";
};
+&usbdrd3 {
+ dr_mode = "host";
+ status = "okay";
+};
+
&usb_host0_ehci {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
index c7e31efdd2e1..018a3a5075c7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
@@ -177,8 +177,6 @@
};
&gmac2phy {
- pinctrl-names = "default";
- pinctrl-0 = <&fephyled_linkm1>, <&fephyled_rxm1>;
status = "okay";
};
@@ -382,6 +380,11 @@
status = "okay";
};
+&usbdrd3 {
+ dr_mode = "host";
+ status = "okay";
+};
+
&usb_host0_ehci {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index 3bef1f39bc6e..1b0f7e4551ea 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -381,6 +381,11 @@
status = "okay";
};
+&usbdrd3 {
+ dr_mode = "host";
+ status = "okay";
+};
+
&usb_host0_ehci {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 3ed69ecbcf3c..8c821acb21ff 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -288,7 +288,7 @@
status = "disabled";
};
- grf_gpio: grf-gpio {
+ grf_gpio: gpio {
compatible = "rockchip,rk3328-grf-gpio";
gpio-controller;
#gpio-cells = <2>;
@@ -300,15 +300,18 @@
#address-cells = <1>;
#size-cells = <0>;
- pd_hevc@RK3328_PD_HEVC {
+ power-domain@RK3328_PD_HEVC {
reg = <RK3328_PD_HEVC>;
+ #power-domain-cells = <0>;
};
- pd_video@RK3328_PD_VIDEO {
+ power-domain@RK3328_PD_VIDEO {
reg = <RK3328_PD_VIDEO>;
+ #power-domain-cells = <0>;
};
- pd_vpu@RK3328_PD_VPU {
+ power-domain@RK3328_PD_VPU {
reg = <RK3328_PD_VPU>;
clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>;
+ #power-domain-cells = <0>;
};
};
@@ -816,7 +819,7 @@
#address-cells = <1>;
#size-cells = <1>;
- u2phy: usb2-phy@100 {
+ u2phy: usb2phy@100 {
compatible = "rockchip,rk3328-usb2phy";
reg = <0x100 0x10>;
clocks = <&xin24m>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index dfc6376171d0..4c64fbefb483 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -664,6 +664,8 @@
compatible = "rockchip,rk3368-timer", "rockchip,rk3288-timer";
reg = <0x0 0xff810000 0x0 0x20>;
interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER00>;
+ clock-names = "pclk", "timer";
};
spdif: spdif@ff880000 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
index 45254be1350d..c4dd2a6b4836 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
@@ -6,6 +6,7 @@
/dts-v1/;
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/usb/pd.h>
#include "rk3399.dtsi"
#include "rk3399-opp.dtsi"
@@ -94,6 +95,13 @@
};
};
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&ir_int>;
+ pinctrl-names = "default";
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -151,6 +159,23 @@
reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
};
+ sound-dit {
+ compatible = "audio-graph-card";
+ label = "SPDIF";
+ dais = <&spdif_p0>;
+ };
+
+ spdif-dit {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+
+ port {
+ dit_p0_0: endpoint {
+ remote-endpoint = <&spdif_p0_0>;
+ };
+ };
+ };
+
/* switched by pmic_sleep */
vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 {
compatible = "regulator-fixed";
@@ -196,6 +221,17 @@
vin-supply = <&vcc_sys>;
};
+ vcc5v0_typec: vcc5v0-typec-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PA3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_typec_en>;
+ regulator-name = "vcc5v0_typec";
+ regulator-always-on;
+ vin-supply = <&vcc_sys>;
+ };
+
vcc_sys: vcc-sys {
compatible = "regulator-fixed";
regulator-name = "vcc_sys";
@@ -521,6 +557,53 @@
i2c-scl-falling-time-ns = <20>;
status = "okay";
+ fusb0: typec-portc@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <RK_PA2 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&fusb0_int>;
+ vbus-supply = <&vcc5v0_typec>;
+ status = "okay";
+
+ connector {
+ compatible = "usb-c-connector";
+ data-role = "host";
+ label = "USB-C";
+ op-sink-microwatt = <1000000>;
+ power-role = "dual";
+ sink-pdos =
+ <PDO_FIXED(5000, 2500, PDO_FIXED_USB_COMM)>;
+ source-pdos =
+ <PDO_FIXED(5000, 1400, PDO_FIXED_USB_COMM)>;
+ try-power-role = "sink";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usbc_hs: endpoint {
+ remote-endpoint =
+ <&u2phy0_typec_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usbc_ss: endpoint {
+ remote-endpoint =
+ <&tcphy0_typec_ss>;
+ };
+ };
+ };
+ };
+ };
+
accelerometer@68 {
compatible = "invensense,mpu6500";
reg = <0x68>;
@@ -578,12 +661,34 @@
};
};
+ fusb302x {
+ fusb0_int: fusb0-int {
+ rockchip,pins = <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ ir {
+ ir_int: ir-int {
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
lcd-panel {
lcd_panel_reset: lcd-panel-reset {
rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
+ leds {
+ work_led_pin: work-led-pin {
+ rockchip,pins = <2 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ diy_led_pin: diy-led-pin {
+ rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
pcie {
pcie_pwr_en: pcie-pwr-en {
rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
@@ -595,6 +700,10 @@
};
pmic {
+ pmic_int_l: pmic-int-l {
+ rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
vsel1_pin: vsel1-pin {
rockchip,pins = <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_down>;
};
@@ -604,21 +713,21 @@
};
};
- sdio-pwrseq {
- wifi_enable_h: wifi-enable-h {
- rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-
rt5640 {
rt5640_hpcon: rt5640-hpcon {
rockchip,pins = <4 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
- pmic {
- pmic_int_l: pmic-int-l {
- rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+ sdio-pwrseq {
+ wifi_enable_h: wifi-enable-h {
+ rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb-typec {
+ vcc5v0_typec_en: vcc5v0_typec_en {
+ rockchip,pins = <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
@@ -633,16 +742,6 @@
rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
-
- leds {
- work_led_pin: work-led-pin {
- rockchip,pins = <2 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-
- diy_led_pin: diy-led-pin {
- rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
};
&pwm0 {
@@ -710,10 +809,29 @@
status = "okay";
};
+&spdif {
+ pinctrl-0 = <&spdif_bus_1>;
+ status = "okay";
+
+ spdif_p0: port {
+ spdif_p0_0: endpoint {
+ remote-endpoint = <&dit_p0_0>;
+ };
+ };
+};
+
&tcphy0 {
status = "okay";
};
+&tcphy0_usb3 {
+ port {
+ tcphy0_typec_ss: endpoint {
+ remote-endpoint = <&usbc_ss>;
+ };
+ };
+};
+
&tcphy1 {
status = "okay";
};
@@ -737,6 +855,12 @@
phy-supply = <&vcc5v0_host>;
status = "okay";
};
+
+ port {
+ u2phy0_typec_hs: endpoint {
+ remote-endpoint = <&usbc_hs>;
+ };
+ };
};
&u2phy1 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
index beee5fbb3443..5d7a9d96d163 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
@@ -245,7 +245,7 @@ pp1800_pcie: &pp1800_s0 {
};
&ppvar_sd_card_io {
- states = <1800000 0x0 3300000 0x1>;
+ states = <1800000 0x0>, <3300000 0x1>;
regulator-max-microvolt = <3300000>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index 4002742fed4c..c1bcc8ca3769 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -252,8 +252,8 @@
enable-active-high;
enable-gpio = <&gpio2 2 GPIO_ACTIVE_HIGH>;
gpios = <&gpio2 28 GPIO_ACTIVE_HIGH>;
- states = <1800000 0x1
- 3000000 0x0>;
+ states = <1800000 0x1>,
+ <3000000 0x0>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3000000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
index 19485b552bc4..738cfd21df3e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
@@ -23,6 +23,16 @@
mmc1 = &sdhci;
};
+ avdd_0v9_s0: avdd-0v9-s0 {
+ compatible = "regulator-fixed";
+ regulator-name = "avdd_0v9_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ vin-supply = <&vcc1v8_sys_s3>;
+ };
+
avdd_1v8_s0: avdd-1v8-s0 {
compatible = "regulator-fixed";
regulator-name = "avdd_1v8_s0";
@@ -40,6 +50,20 @@
#clock-cells = <0>;
};
+ fan1 {
+ /* fan connected to P7 */
+ compatible = "pwm-fan";
+ pwms = <&pwm0 0 40000 0>;
+ cooling-levels = <0 80 170 255>;
+ };
+
+ fan2 {
+ /* fan connected to P6 */
+ compatible = "pwm-fan";
+ pwms = <&pwm1 0 40000 0>;
+ cooling-levels = <0 80 170 255>;
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -58,6 +82,18 @@
};
};
+ pcie_power: pcie-power {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PD0 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&pcie_pwr>;
+ pinctrl-names = "default";
+ regulator-boot-on;
+ regulator-name = "pcie_power";
+ startup-delay-us = <10000>;
+ vin-supply = <&vcc5v0_perdev>;
+ };
+
vcc1v8_sys_s0: vcc1v8-sys-s0 {
compatible = "regulator-fixed";
regulator-name = "vcc1v8_sys_s0";
@@ -95,6 +131,16 @@
};
};
+ vcc5v0_perdev: vcc5v0-perdev {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_perdev";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin_bkup>;
+ };
+
vcc5v0_sys: vcc5v0-sys {
compatible = "regulator-fixed";
regulator-name = "vcc5v0_sys";
@@ -109,6 +155,20 @@
};
};
+ vcc5v0_usb: vcc5v0-usb {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PC6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_usb_en>;
+ regulator-name = "vcc5v0_usb";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_perdev>;
+ };
+
vcc12v_dcin: vcc12v-dcin {
compatible = "regulator-fixed";
regulator-name = "vcc12v_dcin";
@@ -317,6 +377,20 @@
status = "okay";
};
+&pcie_phy {
+ status = "okay";
+};
+
+&pcie0 {
+ num-lanes = <2>;
+ status = "okay";
+
+ vpcie12v-supply = <&vcc12v_dcin>;
+ vpcie3v3-supply = <&pcie_power>;
+ vpcie1v8-supply = <&avdd_1v8_s0>;
+ vpcie0v9-supply = <&avdd_0v9_s0>;
+};
+
&pinctrl {
gmac {
gphy_reset: gphy-reset {
@@ -334,12 +408,25 @@
};
};
+ pcie {
+ pcie_pwr: pcie-pwr {
+ rockchip,pins =
+ <1 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
pmic {
pmic_int_l: pmic-int-l {
rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
+ power {
+ vcc5v0_usb_en: vcc5v0-usb-en {
+ rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
vcc3v0-sd {
sdmmc0_pwr_h: sdmmc0-pwr-h {
rockchip,pins = <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up>;
@@ -352,6 +439,16 @@
status = "okay";
};
+&pwm0 {
+ /* pwm-fan on P7 */
+ status = "okay";
+};
+
+&pwm1 {
+ /* pwm-fan on P6 */
+ status = "okay";
+};
+
&sdhci {
bus-width = <8>;
mmc-hs200-1_8v;
@@ -372,6 +469,30 @@
status = "okay";
};
+&tcphy1 {
+ /* phy for &usbdrd_dwc3_1 */
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+
+ otg-port {
+ /* phy for &usbdrd_dwc3_1 */
+ phy-supply = <&vcc5v0_usb>;
+ status = "okay";
+ };
+};
+
&uart2 {
status = "okay";
};
+
+&usbdrd3_1 {
+ status = "okay";
+
+ usb@fe900000 {
+ dr_mode = "host";
+ status = "okay";
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts
index fa5809887643..cef4d18b599d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts
@@ -33,7 +33,7 @@
sys_led: led-sys {
gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
- label = "red:sys";
+ label = "red:power";
default-state = "on";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
index 16fd58c4a80f..8c0ff6c96e03 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
@@ -510,7 +510,6 @@
};
&pcie0 {
- max-link-speed = <2>;
num-lanes = <2>;
vpcie0v9-supply = <&vcca0v9_s3>;
vpcie1v8-supply = <&vcca1v8_s3>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
index c172f5a803e7..d1aaf8e83391 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
@@ -63,6 +63,13 @@
};
};
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir_int>;
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -389,6 +396,7 @@
vcc_sdio: LDO_REG4 {
regulator-name = "vcc_sdio";
+ regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3000000>;
@@ -493,6 +501,8 @@
regulator-min-microvolt = <712500>;
regulator-max-microvolt = <1500000>;
regulator-ramp-delay = <1000>;
+ regulator-always-on;
+ regulator-boot-on;
vin-supply = <&vcc3v3_sys>;
regulator-state-mem {
@@ -601,6 +611,12 @@
};
};
+ ir {
+ ir_int: ir-int {
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
lcd-panel {
lcd_panel_reset: lcd-panel-reset {
rockchip,pins = <4 RK_PD5 RK_FUNC_GPIO &pcfg_pull_up>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
index 7d0a7c697703..b28888ea9262 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
@@ -474,7 +474,6 @@
&pcie0 {
ep-gpios = <&gpio4 RK_PD3 GPIO_ACTIVE_HIGH>;
- max-link-speed = <2>;
num-lanes = <4>;
pinctrl-0 = <&pcie_clkreqnb_cpm>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 634a91af8e83..3871c7fd83b0 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -227,7 +227,7 @@
<&pcie_phy 2>, <&pcie_phy 3>;
phy-names = "pcie-phy-0", "pcie-phy-1",
"pcie-phy-2", "pcie-phy-3";
- ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x1e00000>,
+ ranges = <0x82000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x1e00000>,
<0x81000000 0x0 0xfbe00000 0x0 0xfbe00000 0x0 0x100000>;
resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
<&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
@@ -968,126 +968,146 @@
#size-cells = <0>;
/* These power domains are grouped by VD_CENTER */
- pd_iep@RK3399_PD_IEP {
+ power-domain@RK3399_PD_IEP {
reg = <RK3399_PD_IEP>;
clocks = <&cru ACLK_IEP>,
<&cru HCLK_IEP>;
pm_qos = <&qos_iep>;
+ #power-domain-cells = <0>;
};
- pd_rga@RK3399_PD_RGA {
+ power-domain@RK3399_PD_RGA {
reg = <RK3399_PD_RGA>;
clocks = <&cru ACLK_RGA>,
<&cru HCLK_RGA>;
pm_qos = <&qos_rga_r>,
<&qos_rga_w>;
+ #power-domain-cells = <0>;
};
- pd_vcodec@RK3399_PD_VCODEC {
+ power-domain@RK3399_PD_VCODEC {
reg = <RK3399_PD_VCODEC>;
clocks = <&cru ACLK_VCODEC>,
<&cru HCLK_VCODEC>;
pm_qos = <&qos_video_m0>;
+ #power-domain-cells = <0>;
};
- pd_vdu@RK3399_PD_VDU {
+ power-domain@RK3399_PD_VDU {
reg = <RK3399_PD_VDU>;
clocks = <&cru ACLK_VDU>,
<&cru HCLK_VDU>;
pm_qos = <&qos_video_m1_r>,
<&qos_video_m1_w>;
+ #power-domain-cells = <0>;
};
/* These power domains are grouped by VD_GPU */
- pd_gpu@RK3399_PD_GPU {
+ power-domain@RK3399_PD_GPU {
reg = <RK3399_PD_GPU>;
clocks = <&cru ACLK_GPU>;
pm_qos = <&qos_gpu>;
+ #power-domain-cells = <0>;
};
/* These power domains are grouped by VD_LOGIC */
- pd_edp@RK3399_PD_EDP {
+ power-domain@RK3399_PD_EDP {
reg = <RK3399_PD_EDP>;
clocks = <&cru PCLK_EDP_CTRL>;
+ #power-domain-cells = <0>;
};
- pd_emmc@RK3399_PD_EMMC {
+ power-domain@RK3399_PD_EMMC {
reg = <RK3399_PD_EMMC>;
clocks = <&cru ACLK_EMMC>;
pm_qos = <&qos_emmc>;
+ #power-domain-cells = <0>;
};
- pd_gmac@RK3399_PD_GMAC {
+ power-domain@RK3399_PD_GMAC {
reg = <RK3399_PD_GMAC>;
clocks = <&cru ACLK_GMAC>,
<&cru PCLK_GMAC>;
pm_qos = <&qos_gmac>;
+ #power-domain-cells = <0>;
};
- pd_sd@RK3399_PD_SD {
+ power-domain@RK3399_PD_SD {
reg = <RK3399_PD_SD>;
clocks = <&cru HCLK_SDMMC>,
<&cru SCLK_SDMMC>;
pm_qos = <&qos_sd>;
+ #power-domain-cells = <0>;
};
- pd_sdioaudio@RK3399_PD_SDIOAUDIO {
+ power-domain@RK3399_PD_SDIOAUDIO {
reg = <RK3399_PD_SDIOAUDIO>;
clocks = <&cru HCLK_SDIO>;
pm_qos = <&qos_sdioaudio>;
+ #power-domain-cells = <0>;
};
- pd_tcpc0@RK3399_PD_TCPD0 {
+ power-domain@RK3399_PD_TCPD0 {
reg = <RK3399_PD_TCPD0>;
clocks = <&cru SCLK_UPHY0_TCPDCORE>,
<&cru SCLK_UPHY0_TCPDPHY_REF>;
+ #power-domain-cells = <0>;
};
- pd_tcpc1@RK3399_PD_TCPD1 {
+ power-domain@RK3399_PD_TCPD1 {
reg = <RK3399_PD_TCPD1>;
clocks = <&cru SCLK_UPHY1_TCPDCORE>,
<&cru SCLK_UPHY1_TCPDPHY_REF>;
+ #power-domain-cells = <0>;
};
- pd_usb3@RK3399_PD_USB3 {
+ power-domain@RK3399_PD_USB3 {
reg = <RK3399_PD_USB3>;
clocks = <&cru ACLK_USB3>;
pm_qos = <&qos_usb_otg0>,
<&qos_usb_otg1>;
+ #power-domain-cells = <0>;
};
- pd_vio@RK3399_PD_VIO {
+ power-domain@RK3399_PD_VIO {
reg = <RK3399_PD_VIO>;
+ #power-domain-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
- pd_hdcp@RK3399_PD_HDCP {
+ power-domain@RK3399_PD_HDCP {
reg = <RK3399_PD_HDCP>;
clocks = <&cru ACLK_HDCP>,
<&cru HCLK_HDCP>,
<&cru PCLK_HDCP>;
pm_qos = <&qos_hdcp>;
+ #power-domain-cells = <0>;
};
- pd_isp0@RK3399_PD_ISP0 {
+ power-domain@RK3399_PD_ISP0 {
reg = <RK3399_PD_ISP0>;
clocks = <&cru ACLK_ISP0>,
<&cru HCLK_ISP0>;
pm_qos = <&qos_isp0_m0>,
<&qos_isp0_m1>;
+ #power-domain-cells = <0>;
};
- pd_isp1@RK3399_PD_ISP1 {
+ power-domain@RK3399_PD_ISP1 {
reg = <RK3399_PD_ISP1>;
clocks = <&cru ACLK_ISP1>,
<&cru HCLK_ISP1>;
pm_qos = <&qos_isp1_m0>,
<&qos_isp1_m1>;
+ #power-domain-cells = <0>;
};
- pd_vo@RK3399_PD_VO {
+ power-domain@RK3399_PD_VO {
reg = <RK3399_PD_VO>;
+ #power-domain-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
- pd_vopb@RK3399_PD_VOPB {
+ power-domain@RK3399_PD_VOPB {
reg = <RK3399_PD_VOPB>;
clocks = <&cru ACLK_VOP0>,
<&cru HCLK_VOP0>;
pm_qos = <&qos_vop_big_r>,
<&qos_vop_big_w>;
+ #power-domain-cells = <0>;
};
- pd_vopl@RK3399_PD_VOPL {
+ power-domain@RK3399_PD_VOPL {
reg = <RK3399_PD_VOPL>;
clocks = <&cru ACLK_VOP1>,
<&cru HCLK_VOP1>;
pm_qos = <&qos_vop_little>;
+ #power-domain-cells = <0>;
};
};
};
@@ -1398,7 +1418,7 @@
status = "disabled";
};
- u2phy0: usb2-phy@e450 {
+ u2phy0: usb2phy@e450 {
compatible = "rockchip,rk3399-usb2phy";
reg = <0xe450 0x10>;
clocks = <&cru SCLK_USB2PHY0_REF>;
@@ -1425,7 +1445,7 @@
};
};
- u2phy1: usb2-phy@e460 {
+ u2phy1: usb2phy@e460 {
compatible = "rockchip,rk3399-usb2phy";
reg = <0xe460 0x10>;
clocks = <&cru SCLK_USB2PHY1_REF>;
@@ -2354,7 +2374,7 @@
};
};
- sleep {
+ suspend {
ap_pwroff: ap-pwroff {
rockchip,pins = <1 RK_PA5 1 &pcfg_pull_none>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi b/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi
index c0074b3ed4af..01d1a75c8b4d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi
@@ -329,7 +329,6 @@
&pcie0 {
ep-gpios = <&gpio0 RK_PB4 GPIO_ACTIVE_HIGH>;
- max-link-speed = <2>;
num-lanes = <4>;
pinctrl-0 = <&pcie_clkreqnb_cpm>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
new file mode 100644
index 000000000000..69786557093d
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ *
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "rk3568.dtsi"
+
+/ {
+ model = "Rockchip RK3568 EVB1 DDR4 V10 Board";
+ compatible = "rockchip,rk3568-evb1-v10", "rockchip,rk3568";
+
+ chosen: chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ dc_12v: dc-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "dc_12v";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc3v3_sys: vcc3v3-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&dc_12v>;
+ };
+
+ vcc5v0_sys: vcc5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&dc_12v>;
+ };
+
+ vcc3v3_lcd0_n: vcc3v3-lcd0-n {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_lcd0_n";
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_lcd1_n: vcc3v3-lcd1-n {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_lcd1_n";
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&sdhci {
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ non-removable;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-pinctrl.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-pinctrl.dtsi
new file mode 100644
index 000000000000..a588ca95ace2
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568-pinctrl.dtsi
@@ -0,0 +1,3111 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ */
+
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "rockchip-pinconf.dtsi"
+
+/*
+ * This file is auto generated by pin2dts tool, please keep these code
+ * by adding changes at end of this file.
+ */
+&pinctrl {
+ acodec {
+ /omit-if-no-ref/
+ acodec_pins: acodec-pins {
+ rockchip,pins =
+ /* acodec_adc_sync */
+ <1 RK_PB1 5 &pcfg_pull_none>,
+ /* acodec_adcclk */
+ <1 RK_PA1 5 &pcfg_pull_none>,
+ /* acodec_adcdata */
+ <1 RK_PA0 5 &pcfg_pull_none>,
+ /* acodec_dac_datal */
+ <1 RK_PA7 5 &pcfg_pull_none>,
+ /* acodec_dac_datar */
+ <1 RK_PB0 5 &pcfg_pull_none>,
+ /* acodec_dacclk */
+ <1 RK_PA3 5 &pcfg_pull_none>,
+ /* acodec_dacsync */
+ <1 RK_PA5 5 &pcfg_pull_none>;
+ };
+ };
+
+ audiopwm {
+ /omit-if-no-ref/
+ audiopwm_lout: audiopwm-lout {
+ rockchip,pins =
+ /* audiopwm_lout */
+ <1 RK_PA0 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ audiopwm_loutn: audiopwm-loutn {
+ rockchip,pins =
+ /* audiopwm_loutn */
+ <1 RK_PA1 6 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ audiopwm_loutp: audiopwm-loutp {
+ rockchip,pins =
+ /* audiopwm_loutp */
+ <1 RK_PA0 6 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ audiopwm_rout: audiopwm-rout {
+ rockchip,pins =
+ /* audiopwm_rout */
+ <1 RK_PA1 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ audiopwm_routn: audiopwm-routn {
+ rockchip,pins =
+ /* audiopwm_routn */
+ <1 RK_PA7 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ audiopwm_routp: audiopwm-routp {
+ rockchip,pins =
+ /* audiopwm_routp */
+ <1 RK_PA6 4 &pcfg_pull_none>;
+ };
+ };
+
+ bt656 {
+ /omit-if-no-ref/
+ bt656m0_pins: bt656m0-pins {
+ rockchip,pins =
+ /* bt656_clkm0 */
+ <3 RK_PA0 2 &pcfg_pull_none>,
+ /* bt656_d0m0 */
+ <2 RK_PD0 2 &pcfg_pull_none>,
+ /* bt656_d1m0 */
+ <2 RK_PD1 2 &pcfg_pull_none>,
+ /* bt656_d2m0 */
+ <2 RK_PD2 2 &pcfg_pull_none>,
+ /* bt656_d3m0 */
+ <2 RK_PD3 2 &pcfg_pull_none>,
+ /* bt656_d4m0 */
+ <2 RK_PD4 2 &pcfg_pull_none>,
+ /* bt656_d5m0 */
+ <2 RK_PD5 2 &pcfg_pull_none>,
+ /* bt656_d6m0 */
+ <2 RK_PD6 2 &pcfg_pull_none>,
+ /* bt656_d7m0 */
+ <2 RK_PD7 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ bt656m1_pins: bt656m1-pins {
+ rockchip,pins =
+ /* bt656_clkm1 */
+ <4 RK_PB4 5 &pcfg_pull_none>,
+ /* bt656_d0m1 */
+ <3 RK_PC6 5 &pcfg_pull_none>,
+ /* bt656_d1m1 */
+ <3 RK_PC7 5 &pcfg_pull_none>,
+ /* bt656_d2m1 */
+ <3 RK_PD0 5 &pcfg_pull_none>,
+ /* bt656_d3m1 */
+ <3 RK_PD1 5 &pcfg_pull_none>,
+ /* bt656_d4m1 */
+ <3 RK_PD2 5 &pcfg_pull_none>,
+ /* bt656_d5m1 */
+ <3 RK_PD3 5 &pcfg_pull_none>,
+ /* bt656_d6m1 */
+ <3 RK_PD4 5 &pcfg_pull_none>,
+ /* bt656_d7m1 */
+ <3 RK_PD5 5 &pcfg_pull_none>;
+ };
+ };
+
+ bt1120 {
+ /omit-if-no-ref/
+ bt1120_pins: bt1120-pins {
+ rockchip,pins =
+ /* bt1120_clk */
+ <3 RK_PA6 2 &pcfg_pull_none>,
+ /* bt1120_d0 */
+ <3 RK_PA1 2 &pcfg_pull_none>,
+ /* bt1120_d1 */
+ <3 RK_PA2 2 &pcfg_pull_none>,
+ /* bt1120_d2 */
+ <3 RK_PA3 2 &pcfg_pull_none>,
+ /* bt1120_d3 */
+ <3 RK_PA4 2 &pcfg_pull_none>,
+ /* bt1120_d4 */
+ <3 RK_PA5 2 &pcfg_pull_none>,
+ /* bt1120_d5 */
+ <3 RK_PA7 2 &pcfg_pull_none>,
+ /* bt1120_d6 */
+ <3 RK_PB0 2 &pcfg_pull_none>,
+ /* bt1120_d7 */
+ <3 RK_PB1 2 &pcfg_pull_none>,
+ /* bt1120_d8 */
+ <3 RK_PB2 2 &pcfg_pull_none>,
+ /* bt1120_d9 */
+ <3 RK_PB3 2 &pcfg_pull_none>,
+ /* bt1120_d10 */
+ <3 RK_PB4 2 &pcfg_pull_none>,
+ /* bt1120_d11 */
+ <3 RK_PB5 2 &pcfg_pull_none>,
+ /* bt1120_d12 */
+ <3 RK_PB6 2 &pcfg_pull_none>,
+ /* bt1120_d13 */
+ <3 RK_PC1 2 &pcfg_pull_none>,
+ /* bt1120_d14 */
+ <3 RK_PC2 2 &pcfg_pull_none>,
+ /* bt1120_d15 */
+ <3 RK_PC3 2 &pcfg_pull_none>;
+ };
+ };
+
+ cam {
+ /omit-if-no-ref/
+ cam_clkout0: cam-clkout0 {
+ rockchip,pins =
+ /* cam_clkout0 */
+ <4 RK_PA7 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ cam_clkout1: cam-clkout1 {
+ rockchip,pins =
+ /* cam_clkout1 */
+ <4 RK_PB0 1 &pcfg_pull_none>;
+ };
+ };
+
+ can0 {
+ /omit-if-no-ref/
+ can0m0_pins: can0m0-pins {
+ rockchip,pins =
+ /* can0_rxm0 */
+ <0 RK_PB4 2 &pcfg_pull_none>,
+ /* can0_txm0 */
+ <0 RK_PB3 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ can0m1_pins: can0m1-pins {
+ rockchip,pins =
+ /* can0_rxm1 */
+ <2 RK_PA2 4 &pcfg_pull_none>,
+ /* can0_txm1 */
+ <2 RK_PA1 4 &pcfg_pull_none>;
+ };
+ };
+
+ can1 {
+ /omit-if-no-ref/
+ can1m0_pins: can1m0-pins {
+ rockchip,pins =
+ /* can1_rxm0 */
+ <1 RK_PA0 3 &pcfg_pull_none>,
+ /* can1_txm0 */
+ <1 RK_PA1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ can1m1_pins: can1m1-pins {
+ rockchip,pins =
+ /* can1_rxm1 */
+ <4 RK_PC2 3 &pcfg_pull_none>,
+ /* can1_txm1 */
+ <4 RK_PC3 3 &pcfg_pull_none>;
+ };
+ };
+
+ can2 {
+ /omit-if-no-ref/
+ can2m0_pins: can2m0-pins {
+ rockchip,pins =
+ /* can2_rxm0 */
+ <4 RK_PB4 3 &pcfg_pull_none>,
+ /* can2_txm0 */
+ <4 RK_PB5 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ can2m1_pins: can2m1-pins {
+ rockchip,pins =
+ /* can2_rxm1 */
+ <2 RK_PB1 4 &pcfg_pull_none>,
+ /* can2_txm1 */
+ <2 RK_PB2 4 &pcfg_pull_none>;
+ };
+ };
+
+ cif {
+ /omit-if-no-ref/
+ cif_clk: cif-clk {
+ rockchip,pins =
+ /* cif_clkout */
+ <4 RK_PC0 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ cif_dvp_clk: cif-dvp-clk {
+ rockchip,pins =
+ /* cif_clkin */
+ <4 RK_PC1 1 &pcfg_pull_none>,
+ /* cif_href */
+ <4 RK_PB6 1 &pcfg_pull_none>,
+ /* cif_vsync */
+ <4 RK_PB7 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ cif_dvp_bus16: cif-dvp-bus16 {
+ rockchip,pins =
+ /* cif_d8 */
+ <3 RK_PD6 1 &pcfg_pull_none>,
+ /* cif_d9 */
+ <3 RK_PD7 1 &pcfg_pull_none>,
+ /* cif_d10 */
+ <4 RK_PA0 1 &pcfg_pull_none>,
+ /* cif_d11 */
+ <4 RK_PA1 1 &pcfg_pull_none>,
+ /* cif_d12 */
+ <4 RK_PA2 1 &pcfg_pull_none>,
+ /* cif_d13 */
+ <4 RK_PA3 1 &pcfg_pull_none>,
+ /* cif_d14 */
+ <4 RK_PA4 1 &pcfg_pull_none>,
+ /* cif_d15 */
+ <4 RK_PA5 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ cif_dvp_bus8: cif-dvp-bus8 {
+ rockchip,pins =
+ /* cif_d0 */
+ <3 RK_PC6 1 &pcfg_pull_none>,
+ /* cif_d1 */
+ <3 RK_PC7 1 &pcfg_pull_none>,
+ /* cif_d2 */
+ <3 RK_PD0 1 &pcfg_pull_none>,
+ /* cif_d3 */
+ <3 RK_PD1 1 &pcfg_pull_none>,
+ /* cif_d4 */
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ /* cif_d5 */
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ /* cif_d6 */
+ <3 RK_PD4 1 &pcfg_pull_none>,
+ /* cif_d7 */
+ <3 RK_PD5 1 &pcfg_pull_none>;
+ };
+ };
+
+ clk32k {
+ /omit-if-no-ref/
+ clk32k_in: clk32k-in {
+ rockchip,pins =
+ /* clk32k_in */
+ <0 RK_PB0 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ clk32k_out0: clk32k-out0 {
+ rockchip,pins =
+ /* clk32k_out0 */
+ <0 RK_PB0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ clk32k_out1: clk32k-out1 {
+ rockchip,pins =
+ /* clk32k_out1 */
+ <2 RK_PC6 1 &pcfg_pull_none>;
+ };
+ };
+
+ cpu {
+ /omit-if-no-ref/
+ cpu_pins: cpu-pins {
+ rockchip,pins =
+ /* cpu_avs */
+ <0 RK_PB7 2 &pcfg_pull_none>;
+ };
+ };
+
+ ebc {
+ /omit-if-no-ref/
+ ebc_extern: ebc-extern {
+ rockchip,pins =
+ /* ebc_sdce1 */
+ <4 RK_PA7 2 &pcfg_pull_none>,
+ /* ebc_sdce2 */
+ <4 RK_PB0 2 &pcfg_pull_none>,
+ /* ebc_sdce3 */
+ <4 RK_PB1 2 &pcfg_pull_none>,
+ /* ebc_sdshr */
+ <4 RK_PB5 2 &pcfg_pull_none>,
+ /* ebc_vcom */
+ <4 RK_PB2 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ ebc_pins: ebc-pins {
+ rockchip,pins =
+ /* ebc_gdclk */
+ <4 RK_PC0 2 &pcfg_pull_none>,
+ /* ebc_gdoe */
+ <4 RK_PB3 2 &pcfg_pull_none>,
+ /* ebc_gdsp */
+ <4 RK_PB4 2 &pcfg_pull_none>,
+ /* ebc_sdce0 */
+ <4 RK_PA6 2 &pcfg_pull_none>,
+ /* ebc_sdclk */
+ <4 RK_PC1 2 &pcfg_pull_none>,
+ /* ebc_sddo0 */
+ <3 RK_PC6 2 &pcfg_pull_none>,
+ /* ebc_sddo1 */
+ <3 RK_PC7 2 &pcfg_pull_none>,
+ /* ebc_sddo2 */
+ <3 RK_PD0 2 &pcfg_pull_none>,
+ /* ebc_sddo3 */
+ <3 RK_PD1 2 &pcfg_pull_none>,
+ /* ebc_sddo4 */
+ <3 RK_PD2 2 &pcfg_pull_none>,
+ /* ebc_sddo5 */
+ <3 RK_PD3 2 &pcfg_pull_none>,
+ /* ebc_sddo6 */
+ <3 RK_PD4 2 &pcfg_pull_none>,
+ /* ebc_sddo7 */
+ <3 RK_PD5 2 &pcfg_pull_none>,
+ /* ebc_sddo8 */
+ <3 RK_PD6 2 &pcfg_pull_none>,
+ /* ebc_sddo9 */
+ <3 RK_PD7 2 &pcfg_pull_none>,
+ /* ebc_sddo10 */
+ <4 RK_PA0 2 &pcfg_pull_none>,
+ /* ebc_sddo11 */
+ <4 RK_PA1 2 &pcfg_pull_none>,
+ /* ebc_sddo12 */
+ <4 RK_PA2 2 &pcfg_pull_none>,
+ /* ebc_sddo13 */
+ <4 RK_PA3 2 &pcfg_pull_none>,
+ /* ebc_sddo14 */
+ <4 RK_PA4 2 &pcfg_pull_none>,
+ /* ebc_sddo15 */
+ <4 RK_PA5 2 &pcfg_pull_none>,
+ /* ebc_sdle */
+ <4 RK_PB6 2 &pcfg_pull_none>,
+ /* ebc_sdoe */
+ <4 RK_PB7 2 &pcfg_pull_none>;
+ };
+ };
+
+ edpdp {
+ /omit-if-no-ref/
+ edpdpm0_pins: edpdpm0-pins {
+ rockchip,pins =
+ /* edpdp_hpdinm0 */
+ <4 RK_PC4 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ edpdpm1_pins: edpdpm1-pins {
+ rockchip,pins =
+ /* edpdp_hpdinm1 */
+ <0 RK_PC2 2 &pcfg_pull_none>;
+ };
+ };
+
+ emmc {
+ /omit-if-no-ref/
+ emmc_rstnout: emmc-rstnout {
+ rockchip,pins =
+ /* emmc_rstn */
+ <1 RK_PC7 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ emmc_bus8: emmc-bus8 {
+ rockchip,pins =
+ /* emmc_d0 */
+ <1 RK_PB4 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d1 */
+ <1 RK_PB5 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d2 */
+ <1 RK_PB6 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d3 */
+ <1 RK_PB7 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d4 */
+ <1 RK_PC0 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d5 */
+ <1 RK_PC1 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d6 */
+ <1 RK_PC2 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d7 */
+ <1 RK_PC3 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ emmc_clk: emmc-clk {
+ rockchip,pins =
+ /* emmc_clkout */
+ <1 RK_PC5 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ emmc_cmd: emmc-cmd {
+ rockchip,pins =
+ /* emmc_cmd */
+ <1 RK_PC4 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ emmc_datastrobe: emmc-datastrobe {
+ rockchip,pins =
+ /* emmc_datastrobe */
+ <1 RK_PC6 1 &pcfg_pull_none>;
+ };
+ };
+
+ eth0 {
+ /omit-if-no-ref/
+ eth0_pins: eth0-pins {
+ rockchip,pins =
+ /* eth0_refclko25m */
+ <2 RK_PC1 2 &pcfg_pull_none>;
+ };
+ };
+
+ eth1 {
+ /omit-if-no-ref/
+ eth1m0_pins: eth1m0-pins {
+ rockchip,pins =
+ /* eth1_refclko25mm0 */
+ <3 RK_PB0 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ eth1m1_pins: eth1m1-pins {
+ rockchip,pins =
+ /* eth1_refclko25mm1 */
+ <4 RK_PB3 3 &pcfg_pull_none>;
+ };
+ };
+
+ flash {
+ /omit-if-no-ref/
+ flash_pins: flash-pins {
+ rockchip,pins =
+ /* flash_ale */
+ <1 RK_PD0 2 &pcfg_pull_none>,
+ /* flash_cle */
+ <1 RK_PC6 3 &pcfg_pull_none>,
+ /* flash_cs0n */
+ <1 RK_PD3 2 &pcfg_pull_none>,
+ /* flash_cs1n */
+ <1 RK_PD4 2 &pcfg_pull_none>,
+ /* flash_d0 */
+ <1 RK_PB4 2 &pcfg_pull_none>,
+ /* flash_d1 */
+ <1 RK_PB5 2 &pcfg_pull_none>,
+ /* flash_d2 */
+ <1 RK_PB6 2 &pcfg_pull_none>,
+ /* flash_d3 */
+ <1 RK_PB7 2 &pcfg_pull_none>,
+ /* flash_d4 */
+ <1 RK_PC0 2 &pcfg_pull_none>,
+ /* flash_d5 */
+ <1 RK_PC1 2 &pcfg_pull_none>,
+ /* flash_d6 */
+ <1 RK_PC2 2 &pcfg_pull_none>,
+ /* flash_d7 */
+ <1 RK_PC3 2 &pcfg_pull_none>,
+ /* flash_dqs */
+ <1 RK_PC5 2 &pcfg_pull_none>,
+ /* flash_rdn */
+ <1 RK_PD2 2 &pcfg_pull_none>,
+ /* flash_rdy */
+ <1 RK_PD1 2 &pcfg_pull_none>,
+ /* flash_volsel */
+ <0 RK_PA7 1 &pcfg_pull_none>,
+ /* flash_wpn */
+ <1 RK_PC7 3 &pcfg_pull_none>,
+ /* flash_wrn */
+ <1 RK_PC4 2 &pcfg_pull_none>;
+ };
+ };
+
+ fspi {
+ /omit-if-no-ref/
+ fspi_pins: fspi-pins {
+ rockchip,pins =
+ /* fspi_clk */
+ <1 RK_PD0 1 &pcfg_pull_none>,
+ /* fspi_cs0n */
+ <1 RK_PD3 1 &pcfg_pull_none>,
+ /* fspi_d0 */
+ <1 RK_PD1 1 &pcfg_pull_none>,
+ /* fspi_d1 */
+ <1 RK_PD2 1 &pcfg_pull_none>,
+ /* fspi_d2 */
+ <1 RK_PC7 2 &pcfg_pull_none>,
+ /* fspi_d3 */
+ <1 RK_PD4 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ fspi_cs1: fspi-cs1 {
+ rockchip,pins =
+ /* fspi_cs1n */
+ <1 RK_PC6 2 &pcfg_pull_up>;
+ };
+ };
+
+ gmac0 {
+ /omit-if-no-ref/
+ gmac0_miim: gmac0-miim {
+ rockchip,pins =
+ /* gmac0_mdc */
+ <2 RK_PC3 2 &pcfg_pull_none>,
+ /* gmac0_mdio */
+ <2 RK_PC4 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac0_clkinout: gmac0-clkinout {
+ rockchip,pins =
+ /* gmac0_mclkinout */
+ <2 RK_PC2 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac0_rx_er: gmac0-rx-er {
+ rockchip,pins =
+ /* gmac0_rxer */
+ <2 RK_PC5 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac0_rx_bus2: gmac0-rx-bus2 {
+ rockchip,pins =
+ /* gmac0_rxd0 */
+ <2 RK_PB6 1 &pcfg_pull_none>,
+ /* gmac0_rxd1 */
+ <2 RK_PB7 2 &pcfg_pull_none>,
+ /* gmac0_rxdvcrs */
+ <2 RK_PC0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac0_tx_bus2: gmac0-tx-bus2 {
+ rockchip,pins =
+ /* gmac0_txd0 */
+ <2 RK_PB3 1 &pcfg_pull_none_drv_level_2>,
+ /* gmac0_txd1 */
+ <2 RK_PB4 1 &pcfg_pull_none_drv_level_2>,
+ /* gmac0_txen */
+ <2 RK_PB5 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac0_rgmii_clk: gmac0-rgmii-clk {
+ rockchip,pins =
+ /* gmac0_rxclk */
+ <2 RK_PA5 2 &pcfg_pull_none>,
+ /* gmac0_txclk */
+ <2 RK_PB0 2 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ gmac0_rgmii_bus: gmac0-rgmii-bus {
+ rockchip,pins =
+ /* gmac0_rxd2 */
+ <2 RK_PA3 2 &pcfg_pull_none>,
+ /* gmac0_rxd3 */
+ <2 RK_PA4 2 &pcfg_pull_none>,
+ /* gmac0_txd2 */
+ <2 RK_PA6 2 &pcfg_pull_none_drv_level_2>,
+ /* gmac0_txd3 */
+ <2 RK_PA7 2 &pcfg_pull_none_drv_level_2>;
+ };
+ };
+
+ gmac1 {
+ /omit-if-no-ref/
+ gmac1m0_miim: gmac1m0-miim {
+ rockchip,pins =
+ /* gmac1_mdcm0 */
+ <3 RK_PC4 3 &pcfg_pull_none>,
+ /* gmac1_mdiom0 */
+ <3 RK_PC5 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m0_clkinout: gmac1m0-clkinout {
+ rockchip,pins =
+ /* gmac1_mclkinoutm0 */
+ <3 RK_PC0 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m0_rx_er: gmac1m0-rx-er {
+ rockchip,pins =
+ /* gmac1_rxerm0 */
+ <3 RK_PB4 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m0_rx_bus2: gmac1m0-rx-bus2 {
+ rockchip,pins =
+ /* gmac1_rxd0m0 */
+ <3 RK_PB1 3 &pcfg_pull_none>,
+ /* gmac1_rxd1m0 */
+ <3 RK_PB2 3 &pcfg_pull_none>,
+ /* gmac1_rxdvcrsm0 */
+ <3 RK_PB3 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m0_tx_bus2: gmac1m0-tx-bus2 {
+ rockchip,pins =
+ /* gmac1_txd0m0 */
+ <3 RK_PB5 3 &pcfg_pull_none_drv_level_2>,
+ /* gmac1_txd1m0 */
+ <3 RK_PB6 3 &pcfg_pull_none_drv_level_2>,
+ /* gmac1_txenm0 */
+ <3 RK_PB7 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m0_rgmii_clk: gmac1m0-rgmii-clk {
+ rockchip,pins =
+ /* gmac1_rxclkm0 */
+ <3 RK_PA7 3 &pcfg_pull_none>,
+ /* gmac1_txclkm0 */
+ <3 RK_PA6 3 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m0_rgmii_bus: gmac1m0-rgmii-bus {
+ rockchip,pins =
+ /* gmac1_rxd2m0 */
+ <3 RK_PA4 3 &pcfg_pull_none>,
+ /* gmac1_rxd3m0 */
+ <3 RK_PA5 3 &pcfg_pull_none>,
+ /* gmac1_txd2m0 */
+ <3 RK_PA2 3 &pcfg_pull_none_drv_level_2>,
+ /* gmac1_txd3m0 */
+ <3 RK_PA3 3 &pcfg_pull_none_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m1_miim: gmac1m1-miim {
+ rockchip,pins =
+ /* gmac1_mdcm1 */
+ <4 RK_PB6 3 &pcfg_pull_none>,
+ /* gmac1_mdiom1 */
+ <4 RK_PB7 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m1_clkinout: gmac1m1-clkinout {
+ rockchip,pins =
+ /* gmac1_mclkinoutm1 */
+ <4 RK_PC1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m1_rx_er: gmac1m1-rx-er {
+ rockchip,pins =
+ /* gmac1_rxerm1 */
+ <4 RK_PB2 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m1_rx_bus2: gmac1m1-rx-bus2 {
+ rockchip,pins =
+ /* gmac1_rxd0m1 */
+ <4 RK_PA7 3 &pcfg_pull_none>,
+ /* gmac1_rxd1m1 */
+ <4 RK_PB0 3 &pcfg_pull_none>,
+ /* gmac1_rxdvcrsm1 */
+ <4 RK_PB1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m1_tx_bus2: gmac1m1-tx-bus2 {
+ rockchip,pins =
+ /* gmac1_txd0m1 */
+ <4 RK_PA4 3 &pcfg_pull_none_drv_level_2>,
+ /* gmac1_txd1m1 */
+ <4 RK_PA5 3 &pcfg_pull_none_drv_level_2>,
+ /* gmac1_txenm1 */
+ <4 RK_PA6 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m1_rgmii_clk: gmac1m1-rgmii-clk {
+ rockchip,pins =
+ /* gmac1_rxclkm1 */
+ <4 RK_PA3 3 &pcfg_pull_none>,
+ /* gmac1_txclkm1 */
+ <4 RK_PA0 3 &pcfg_pull_none_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m1_rgmii_bus: gmac1m1-rgmii-bus {
+ rockchip,pins =
+ /* gmac1_rxd2m1 */
+ <4 RK_PA1 3 &pcfg_pull_none>,
+ /* gmac1_rxd3m1 */
+ <4 RK_PA2 3 &pcfg_pull_none>,
+ /* gmac1_txd2m1 */
+ <3 RK_PD6 3 &pcfg_pull_none_drv_level_2>,
+ /* gmac1_txd3m1 */
+ <3 RK_PD7 3 &pcfg_pull_none_drv_level_2>;
+ };
+ };
+
+ gpu {
+ /omit-if-no-ref/
+ gpu_pins: gpu-pins {
+ rockchip,pins =
+ /* gpu_avs */
+ <0 RK_PC0 2 &pcfg_pull_none>,
+ /* gpu_pwren */
+ <0 RK_PA6 4 &pcfg_pull_none>;
+ };
+ };
+
+ hdmitx {
+ /omit-if-no-ref/
+ hdmitxm0_cec: hdmitxm0-cec {
+ rockchip,pins =
+ /* hdmitxm0_cec */
+ <4 RK_PD1 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ hdmitxm1_cec: hdmitxm1-cec {
+ rockchip,pins =
+ /* hdmitxm1_cec */
+ <0 RK_PC7 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ hdmitx_scl: hdmitx-scl {
+ rockchip,pins =
+ /* hdmitx_scl */
+ <4 RK_PC7 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ hdmitx_sda: hdmitx-sda {
+ rockchip,pins =
+ /* hdmitx_sda */
+ <4 RK_PD0 1 &pcfg_pull_none>;
+ };
+ };
+
+ i2c0 {
+ /omit-if-no-ref/
+ i2c0_xfer: i2c0-xfer {
+ rockchip,pins =
+ /* i2c0_scl */
+ <0 RK_PB1 1 &pcfg_pull_none_smt>,
+ /* i2c0_sda */
+ <0 RK_PB2 1 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c1 {
+ /omit-if-no-ref/
+ i2c1_xfer: i2c1-xfer {
+ rockchip,pins =
+ /* i2c1_scl */
+ <0 RK_PB3 1 &pcfg_pull_none_smt>,
+ /* i2c1_sda */
+ <0 RK_PB4 1 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c2 {
+ /omit-if-no-ref/
+ i2c2m0_xfer: i2c2m0-xfer {
+ rockchip,pins =
+ /* i2c2_sclm0 */
+ <0 RK_PB5 1 &pcfg_pull_none_smt>,
+ /* i2c2_sdam0 */
+ <0 RK_PB6 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c2m1_xfer: i2c2m1-xfer {
+ rockchip,pins =
+ /* i2c2_sclm1 */
+ <4 RK_PB5 1 &pcfg_pull_none_smt>,
+ /* i2c2_sdam1 */
+ <4 RK_PB4 1 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c3 {
+ /omit-if-no-ref/
+ i2c3m0_xfer: i2c3m0-xfer {
+ rockchip,pins =
+ /* i2c3_sclm0 */
+ <1 RK_PA1 1 &pcfg_pull_none_smt>,
+ /* i2c3_sdam0 */
+ <1 RK_PA0 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c3m1_xfer: i2c3m1-xfer {
+ rockchip,pins =
+ /* i2c3_sclm1 */
+ <3 RK_PB5 4 &pcfg_pull_none_smt>,
+ /* i2c3_sdam1 */
+ <3 RK_PB6 4 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c4 {
+ /omit-if-no-ref/
+ i2c4m0_xfer: i2c4m0-xfer {
+ rockchip,pins =
+ /* i2c4_sclm0 */
+ <4 RK_PB3 1 &pcfg_pull_none_smt>,
+ /* i2c4_sdam0 */
+ <4 RK_PB2 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c4m1_xfer: i2c4m1-xfer {
+ rockchip,pins =
+ /* i2c4_sclm1 */
+ <2 RK_PB2 2 &pcfg_pull_none_smt>,
+ /* i2c4_sdam1 */
+ <2 RK_PB1 2 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c5 {
+ /omit-if-no-ref/
+ i2c5m0_xfer: i2c5m0-xfer {
+ rockchip,pins =
+ /* i2c5_sclm0 */
+ <3 RK_PB3 4 &pcfg_pull_none_smt>,
+ /* i2c5_sdam0 */
+ <3 RK_PB4 4 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c5m1_xfer: i2c5m1-xfer {
+ rockchip,pins =
+ /* i2c5_sclm1 */
+ <4 RK_PC7 2 &pcfg_pull_none_smt>,
+ /* i2c5_sdam1 */
+ <4 RK_PD0 2 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2s1 {
+ /omit-if-no-ref/
+ i2s1m0_lrckrx: i2s1m0-lrckrx {
+ rockchip,pins =
+ /* i2s1m0_lrckrx */
+ <1 RK_PA6 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_lrcktx: i2s1m0-lrcktx {
+ rockchip,pins =
+ /* i2s1m0_lrcktx */
+ <1 RK_PA5 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_mclk: i2s1m0-mclk {
+ rockchip,pins =
+ /* i2s1m0_mclk */
+ <1 RK_PA2 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sclkrx: i2s1m0-sclkrx {
+ rockchip,pins =
+ /* i2s1m0_sclkrx */
+ <1 RK_PA4 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sclktx: i2s1m0-sclktx {
+ rockchip,pins =
+ /* i2s1m0_sclktx */
+ <1 RK_PA3 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdi0: i2s1m0-sdi0 {
+ rockchip,pins =
+ /* i2s1m0_sdi0 */
+ <1 RK_PB3 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdi1: i2s1m0-sdi1 {
+ rockchip,pins =
+ /* i2s1m0_sdi1 */
+ <1 RK_PB2 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdi2: i2s1m0-sdi2 {
+ rockchip,pins =
+ /* i2s1m0_sdi2 */
+ <1 RK_PB1 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdi3: i2s1m0-sdi3 {
+ rockchip,pins =
+ /* i2s1m0_sdi3 */
+ <1 RK_PB0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdo0: i2s1m0-sdo0 {
+ rockchip,pins =
+ /* i2s1m0_sdo0 */
+ <1 RK_PA7 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdo1: i2s1m0-sdo1 {
+ rockchip,pins =
+ /* i2s1m0_sdo1 */
+ <1 RK_PB0 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdo2: i2s1m0-sdo2 {
+ rockchip,pins =
+ /* i2s1m0_sdo2 */
+ <1 RK_PB1 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m0_sdo3: i2s1m0-sdo3 {
+ rockchip,pins =
+ /* i2s1m0_sdo3 */
+ <1 RK_PB2 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_lrckrx: i2s1m1-lrckrx {
+ rockchip,pins =
+ /* i2s1m1_lrckrx */
+ <4 RK_PA7 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_lrcktx: i2s1m1-lrcktx {
+ rockchip,pins =
+ /* i2s1m1_lrcktx */
+ <3 RK_PD0 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_mclk: i2s1m1-mclk {
+ rockchip,pins =
+ /* i2s1m1_mclk */
+ <3 RK_PC6 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sclkrx: i2s1m1-sclkrx {
+ rockchip,pins =
+ /* i2s1m1_sclkrx */
+ <4 RK_PA6 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sclktx: i2s1m1-sclktx {
+ rockchip,pins =
+ /* i2s1m1_sclktx */
+ <3 RK_PC7 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdi0: i2s1m1-sdi0 {
+ rockchip,pins =
+ /* i2s1m1_sdi0 */
+ <3 RK_PD2 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdi1: i2s1m1-sdi1 {
+ rockchip,pins =
+ /* i2s1m1_sdi1 */
+ <3 RK_PD3 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdi2: i2s1m1-sdi2 {
+ rockchip,pins =
+ /* i2s1m1_sdi2 */
+ <3 RK_PD4 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdi3: i2s1m1-sdi3 {
+ rockchip,pins =
+ /* i2s1m1_sdi3 */
+ <3 RK_PD5 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdo0: i2s1m1-sdo0 {
+ rockchip,pins =
+ /* i2s1m1_sdo0 */
+ <3 RK_PD1 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdo1: i2s1m1-sdo1 {
+ rockchip,pins =
+ /* i2s1m1_sdo1 */
+ <4 RK_PB0 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdo2: i2s1m1-sdo2 {
+ rockchip,pins =
+ /* i2s1m1_sdo2 */
+ <4 RK_PB1 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m1_sdo3: i2s1m1-sdo3 {
+ rockchip,pins =
+ /* i2s1m1_sdo3 */
+ <4 RK_PB5 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_lrckrx: i2s1m2-lrckrx {
+ rockchip,pins =
+ /* i2s1m2_lrckrx */
+ <3 RK_PC5 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_lrcktx: i2s1m2-lrcktx {
+ rockchip,pins =
+ /* i2s1m2_lrcktx */
+ <2 RK_PD2 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_mclk: i2s1m2-mclk {
+ rockchip,pins =
+ /* i2s1m2_mclk */
+ <2 RK_PD0 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_sclkrx: i2s1m2-sclkrx {
+ rockchip,pins =
+ /* i2s1m2_sclkrx */
+ <3 RK_PC3 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_sclktx: i2s1m2-sclktx {
+ rockchip,pins =
+ /* i2s1m2_sclktx */
+ <2 RK_PD1 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_sdi0: i2s1m2-sdi0 {
+ rockchip,pins =
+ /* i2s1m2_sdi0 */
+ <2 RK_PD3 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_sdi1: i2s1m2-sdi1 {
+ rockchip,pins =
+ /* i2s1m2_sdi1 */
+ <2 RK_PD4 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_sdi2: i2s1m2-sdi2 {
+ rockchip,pins =
+ /* i2s1m2_sdi2 */
+ <2 RK_PD5 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_sdi3: i2s1m2-sdi3 {
+ rockchip,pins =
+ /* i2s1m2_sdi3 */
+ <2 RK_PD6 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_sdo0: i2s1m2-sdo0 {
+ rockchip,pins =
+ /* i2s1m2_sdo0 */
+ <2 RK_PD7 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_sdo1: i2s1m2-sdo1 {
+ rockchip,pins =
+ /* i2s1m2_sdo1 */
+ <3 RK_PA0 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_sdo2: i2s1m2-sdo2 {
+ rockchip,pins =
+ /* i2s1m2_sdo2 */
+ <3 RK_PC1 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1m2_sdo3: i2s1m2-sdo3 {
+ rockchip,pins =
+ /* i2s1m2_sdo3 */
+ <3 RK_PC2 5 &pcfg_pull_none>;
+ };
+ };
+
+ i2s2 {
+ /omit-if-no-ref/
+ i2s2m0_lrckrx: i2s2m0-lrckrx {
+ rockchip,pins =
+ /* i2s2m0_lrckrx */
+ <2 RK_PC0 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m0_lrcktx: i2s2m0-lrcktx {
+ rockchip,pins =
+ /* i2s2m0_lrcktx */
+ <2 RK_PC3 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m0_mclk: i2s2m0-mclk {
+ rockchip,pins =
+ /* i2s2m0_mclk */
+ <2 RK_PC1 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m0_sclkrx: i2s2m0-sclkrx {
+ rockchip,pins =
+ /* i2s2m0_sclkrx */
+ <2 RK_PB7 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m0_sclktx: i2s2m0-sclktx {
+ rockchip,pins =
+ /* i2s2m0_sclktx */
+ <2 RK_PC2 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m0_sdi: i2s2m0-sdi {
+ rockchip,pins =
+ /* i2s2m0_sdi */
+ <2 RK_PC5 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m0_sdo: i2s2m0-sdo {
+ rockchip,pins =
+ /* i2s2m0_sdo */
+ <2 RK_PC4 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m1_lrckrx: i2s2m1-lrckrx {
+ rockchip,pins =
+ /* i2s2m1_lrckrx */
+ <4 RK_PA5 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m1_lrcktx: i2s2m1-lrcktx {
+ rockchip,pins =
+ /* i2s2m1_lrcktx */
+ <4 RK_PA4 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m1_mclk: i2s2m1-mclk {
+ rockchip,pins =
+ /* i2s2m1_mclk */
+ <4 RK_PB6 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m1_sclkrx: i2s2m1-sclkrx {
+ rockchip,pins =
+ /* i2s2m1_sclkrx */
+ <4 RK_PC1 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m1_sclktx: i2s2m1-sclktx {
+ rockchip,pins =
+ /* i2s2m1_sclktx */
+ <4 RK_PB7 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m1_sdi: i2s2m1-sdi {
+ rockchip,pins =
+ /* i2s2m1_sdi */
+ <4 RK_PB2 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s2m1_sdo: i2s2m1-sdo {
+ rockchip,pins =
+ /* i2s2m1_sdo */
+ <4 RK_PB3 5 &pcfg_pull_none>;
+ };
+ };
+
+ i2s3 {
+ /omit-if-no-ref/
+ i2s3m0_lrck: i2s3m0-lrck {
+ rockchip,pins =
+ /* i2s3m0_lrck */
+ <3 RK_PA4 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s3m0_mclk: i2s3m0-mclk {
+ rockchip,pins =
+ /* i2s3m0_mclk */
+ <3 RK_PA2 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s3m0_sclk: i2s3m0-sclk {
+ rockchip,pins =
+ /* i2s3m0_sclk */
+ <3 RK_PA3 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s3m0_sdi: i2s3m0-sdi {
+ rockchip,pins =
+ /* i2s3m0_sdi */
+ <3 RK_PA6 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s3m0_sdo: i2s3m0-sdo {
+ rockchip,pins =
+ /* i2s3m0_sdo */
+ <3 RK_PA5 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s3m1_lrck: i2s3m1-lrck {
+ rockchip,pins =
+ /* i2s3m1_lrck */
+ <4 RK_PC4 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s3m1_mclk: i2s3m1-mclk {
+ rockchip,pins =
+ /* i2s3m1_mclk */
+ <4 RK_PC2 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s3m1_sclk: i2s3m1-sclk {
+ rockchip,pins =
+ /* i2s3m1_sclk */
+ <4 RK_PC3 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s3m1_sdi: i2s3m1-sdi {
+ rockchip,pins =
+ /* i2s3m1_sdi */
+ <4 RK_PC6 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s3m1_sdo: i2s3m1-sdo {
+ rockchip,pins =
+ /* i2s3m1_sdo */
+ <4 RK_PC5 5 &pcfg_pull_none>;
+ };
+ };
+
+ isp {
+ /omit-if-no-ref/
+ isp_pins: isp-pins {
+ rockchip,pins =
+ /* isp_flashtrigin */
+ <4 RK_PB4 4 &pcfg_pull_none>,
+ /* isp_flashtrigout */
+ <4 RK_PA6 1 &pcfg_pull_none>,
+ /* isp_prelighttrig */
+ <4 RK_PB1 1 &pcfg_pull_none>;
+ };
+ };
+
+ jtag {
+ /omit-if-no-ref/
+ jtag_pins: jtag-pins {
+ rockchip,pins =
+ /* jtag_tck */
+ <1 RK_PD7 2 &pcfg_pull_none>,
+ /* jtag_tms */
+ <2 RK_PA0 2 &pcfg_pull_none>;
+ };
+ };
+
+ lcdc {
+ /omit-if-no-ref/
+ lcdc_ctl: lcdc-ctl {
+ rockchip,pins =
+ /* lcdc_clk */
+ <3 RK_PA0 1 &pcfg_pull_none>,
+ /* lcdc_d0 */
+ <2 RK_PD0 1 &pcfg_pull_none>,
+ /* lcdc_d1 */
+ <2 RK_PD1 1 &pcfg_pull_none>,
+ /* lcdc_d2 */
+ <2 RK_PD2 1 &pcfg_pull_none>,
+ /* lcdc_d3 */
+ <2 RK_PD3 1 &pcfg_pull_none>,
+ /* lcdc_d4 */
+ <2 RK_PD4 1 &pcfg_pull_none>,
+ /* lcdc_d5 */
+ <2 RK_PD5 1 &pcfg_pull_none>,
+ /* lcdc_d6 */
+ <2 RK_PD6 1 &pcfg_pull_none>,
+ /* lcdc_d7 */
+ <2 RK_PD7 1 &pcfg_pull_none>,
+ /* lcdc_d8 */
+ <3 RK_PA1 1 &pcfg_pull_none>,
+ /* lcdc_d9 */
+ <3 RK_PA2 1 &pcfg_pull_none>,
+ /* lcdc_d10 */
+ <3 RK_PA3 1 &pcfg_pull_none>,
+ /* lcdc_d11 */
+ <3 RK_PA4 1 &pcfg_pull_none>,
+ /* lcdc_d12 */
+ <3 RK_PA5 1 &pcfg_pull_none>,
+ /* lcdc_d13 */
+ <3 RK_PA6 1 &pcfg_pull_none>,
+ /* lcdc_d14 */
+ <3 RK_PA7 1 &pcfg_pull_none>,
+ /* lcdc_d15 */
+ <3 RK_PB0 1 &pcfg_pull_none>,
+ /* lcdc_d16 */
+ <3 RK_PB1 1 &pcfg_pull_none>,
+ /* lcdc_d17 */
+ <3 RK_PB2 1 &pcfg_pull_none>,
+ /* lcdc_d18 */
+ <3 RK_PB3 1 &pcfg_pull_none>,
+ /* lcdc_d19 */
+ <3 RK_PB4 1 &pcfg_pull_none>,
+ /* lcdc_d20 */
+ <3 RK_PB5 1 &pcfg_pull_none>,
+ /* lcdc_d21 */
+ <3 RK_PB6 1 &pcfg_pull_none>,
+ /* lcdc_d22 */
+ <3 RK_PB7 1 &pcfg_pull_none>,
+ /* lcdc_d23 */
+ <3 RK_PC0 1 &pcfg_pull_none>,
+ /* lcdc_den */
+ <3 RK_PC3 1 &pcfg_pull_none>,
+ /* lcdc_hsync */
+ <3 RK_PC1 1 &pcfg_pull_none>,
+ /* lcdc_vsync */
+ <3 RK_PC2 1 &pcfg_pull_none>;
+ };
+ };
+
+ mcu {
+ /omit-if-no-ref/
+ mcu_pins: mcu-pins {
+ rockchip,pins =
+ /* mcu_jtagtck */
+ <0 RK_PB4 4 &pcfg_pull_none>,
+ /* mcu_jtagtdi */
+ <0 RK_PC1 4 &pcfg_pull_none>,
+ /* mcu_jtagtdo */
+ <0 RK_PB3 4 &pcfg_pull_none>,
+ /* mcu_jtagtms */
+ <0 RK_PC2 4 &pcfg_pull_none>,
+ /* mcu_jtagtrstn */
+ <0 RK_PC3 4 &pcfg_pull_none>;
+ };
+ };
+
+ npu {
+ /omit-if-no-ref/
+ npu_pins: npu-pins {
+ rockchip,pins =
+ /* npu_avs */
+ <0 RK_PC1 2 &pcfg_pull_none>;
+ };
+ };
+
+ pcie20 {
+ /omit-if-no-ref/
+ pcie20m0_pins: pcie20m0-pins {
+ rockchip,pins =
+ /* pcie20_clkreqnm0 */
+ <0 RK_PA5 3 &pcfg_pull_none>,
+ /* pcie20_perstnm0 */
+ <0 RK_PB6 3 &pcfg_pull_none>,
+ /* pcie20_wakenm0 */
+ <0 RK_PB5 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pcie20m1_pins: pcie20m1-pins {
+ rockchip,pins =
+ /* pcie20_clkreqnm1 */
+ <2 RK_PD0 4 &pcfg_pull_none>,
+ /* pcie20_perstnm1 */
+ <3 RK_PC1 4 &pcfg_pull_none>,
+ /* pcie20_wakenm1 */
+ <2 RK_PD1 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pcie20m2_pins: pcie20m2-pins {
+ rockchip,pins =
+ /* pcie20_clkreqnm2 */
+ <1 RK_PB0 4 &pcfg_pull_none>,
+ /* pcie20_perstnm2 */
+ <1 RK_PB2 4 &pcfg_pull_none>,
+ /* pcie20_wakenm2 */
+ <1 RK_PB1 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pcie20_buttonrstn: pcie20-buttonrstn {
+ rockchip,pins =
+ /* pcie20_buttonrstn */
+ <0 RK_PB4 3 &pcfg_pull_none>;
+ };
+ };
+
+ pcie30x1 {
+ /omit-if-no-ref/
+ pcie30x1m0_pins: pcie30x1m0-pins {
+ rockchip,pins =
+ /* pcie30x1_clkreqnm0 */
+ <0 RK_PA4 3 &pcfg_pull_none>,
+ /* pcie30x1_perstnm0 */
+ <0 RK_PC3 3 &pcfg_pull_none>,
+ /* pcie30x1_wakenm0 */
+ <0 RK_PC2 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pcie30x1m1_pins: pcie30x1m1-pins {
+ rockchip,pins =
+ /* pcie30x1_clkreqnm1 */
+ <2 RK_PD2 4 &pcfg_pull_none>,
+ /* pcie30x1_perstnm1 */
+ <3 RK_PA1 4 &pcfg_pull_none>,
+ /* pcie30x1_wakenm1 */
+ <2 RK_PD3 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pcie30x1m2_pins: pcie30x1m2-pins {
+ rockchip,pins =
+ /* pcie30x1_clkreqnm2 */
+ <1 RK_PA5 4 &pcfg_pull_none>,
+ /* pcie30x1_perstnm2 */
+ <1 RK_PA2 4 &pcfg_pull_none>,
+ /* pcie30x1_wakenm2 */
+ <1 RK_PA3 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pcie30x1_buttonrstn: pcie30x1-buttonrstn {
+ rockchip,pins =
+ /* pcie30x1_buttonrstn */
+ <0 RK_PB3 3 &pcfg_pull_none>;
+ };
+ };
+
+ pcie30x2 {
+ /omit-if-no-ref/
+ pcie30x2m0_pins: pcie30x2m0-pins {
+ rockchip,pins =
+ /* pcie30x2_clkreqnm0 */
+ <0 RK_PA6 2 &pcfg_pull_none>,
+ /* pcie30x2_perstnm0 */
+ <0 RK_PC6 3 &pcfg_pull_none>,
+ /* pcie30x2_wakenm0 */
+ <0 RK_PC5 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pcie30x2m1_pins: pcie30x2m1-pins {
+ rockchip,pins =
+ /* pcie30x2_clkreqnm1 */
+ <2 RK_PD4 4 &pcfg_pull_none>,
+ /* pcie30x2_perstnm1 */
+ <2 RK_PD6 4 &pcfg_pull_none>,
+ /* pcie30x2_wakenm1 */
+ <2 RK_PD5 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pcie30x2m2_pins: pcie30x2m2-pins {
+ rockchip,pins =
+ /* pcie30x2_clkreqnm2 */
+ <4 RK_PC2 4 &pcfg_pull_none>,
+ /* pcie30x2_perstnm2 */
+ <4 RK_PC4 4 &pcfg_pull_none>,
+ /* pcie30x2_wakenm2 */
+ <4 RK_PC3 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pcie30x2_buttonrstn: pcie30x2-buttonrstn {
+ rockchip,pins =
+ /* pcie30x2_buttonrstn */
+ <0 RK_PB0 3 &pcfg_pull_none>;
+ };
+ };
+
+ pdm {
+ /omit-if-no-ref/
+ pdmm0_clk: pdmm0-clk {
+ rockchip,pins =
+ /* pdm_clk0m0 */
+ <1 RK_PA6 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm0_clk1: pdmm0-clk1 {
+ rockchip,pins =
+ /* pdmm0_clk1 */
+ <1 RK_PA4 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm0_sdi0: pdmm0-sdi0 {
+ rockchip,pins =
+ /* pdmm0_sdi0 */
+ <1 RK_PB3 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm0_sdi1: pdmm0-sdi1 {
+ rockchip,pins =
+ /* pdmm0_sdi1 */
+ <1 RK_PB2 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm0_sdi2: pdmm0-sdi2 {
+ rockchip,pins =
+ /* pdmm0_sdi2 */
+ <1 RK_PB1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm0_sdi3: pdmm0-sdi3 {
+ rockchip,pins =
+ /* pdmm0_sdi3 */
+ <1 RK_PB0 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm1_clk: pdmm1-clk {
+ rockchip,pins =
+ /* pdm_clk0m1 */
+ <3 RK_PD6 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm1_clk1: pdmm1-clk1 {
+ rockchip,pins =
+ /* pdmm1_clk1 */
+ <4 RK_PA0 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm1_sdi0: pdmm1-sdi0 {
+ rockchip,pins =
+ /* pdmm1_sdi0 */
+ <3 RK_PD7 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm1_sdi1: pdmm1-sdi1 {
+ rockchip,pins =
+ /* pdmm1_sdi1 */
+ <4 RK_PA1 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm1_sdi2: pdmm1-sdi2 {
+ rockchip,pins =
+ /* pdmm1_sdi2 */
+ <4 RK_PA2 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm1_sdi3: pdmm1-sdi3 {
+ rockchip,pins =
+ /* pdmm1_sdi3 */
+ <4 RK_PA3 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm2_clk1: pdmm2-clk1 {
+ rockchip,pins =
+ /* pdmm2_clk1 */
+ <3 RK_PC4 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm2_sdi0: pdmm2-sdi0 {
+ rockchip,pins =
+ /* pdmm2_sdi0 */
+ <3 RK_PB3 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm2_sdi1: pdmm2-sdi1 {
+ rockchip,pins =
+ /* pdmm2_sdi1 */
+ <3 RK_PB4 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm2_sdi2: pdmm2-sdi2 {
+ rockchip,pins =
+ /* pdmm2_sdi2 */
+ <3 RK_PB7 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdmm2_sdi3: pdmm2-sdi3 {
+ rockchip,pins =
+ /* pdmm2_sdi3 */
+ <3 RK_PC0 5 &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ /omit-if-no-ref/
+ pmic_pins: pmic-pins {
+ rockchip,pins =
+ /* pmic_sleep */
+ <0 RK_PA2 1 &pcfg_pull_none>;
+ };
+ };
+
+ pmu {
+ /omit-if-no-ref/
+ pmu_pins: pmu-pins {
+ rockchip,pins =
+ /* pmu_debug0 */
+ <0 RK_PA5 4 &pcfg_pull_none>,
+ /* pmu_debug1 */
+ <0 RK_PA6 3 &pcfg_pull_none>,
+ /* pmu_debug2 */
+ <0 RK_PC4 4 &pcfg_pull_none>,
+ /* pmu_debug3 */
+ <0 RK_PC5 4 &pcfg_pull_none>,
+ /* pmu_debug4 */
+ <0 RK_PC6 4 &pcfg_pull_none>,
+ /* pmu_debug5 */
+ <0 RK_PC7 4 &pcfg_pull_none>;
+ };
+ };
+
+ pwm0 {
+ /omit-if-no-ref/
+ pwm0m0_pins: pwm0m0-pins {
+ rockchip,pins =
+ /* pwm0_m0 */
+ <0 RK_PB7 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pwm0m1_pins: pwm0m1-pins {
+ rockchip,pins =
+ /* pwm0_m1 */
+ <0 RK_PC7 2 &pcfg_pull_none>;
+ };
+ };
+
+ pwm1 {
+ /omit-if-no-ref/
+ pwm1m0_pins: pwm1m0-pins {
+ rockchip,pins =
+ /* pwm1_m0 */
+ <0 RK_PC0 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pwm1m1_pins: pwm1m1-pins {
+ rockchip,pins =
+ /* pwm1_m1 */
+ <0 RK_PB5 4 &pcfg_pull_none>;
+ };
+ };
+
+ pwm2 {
+ /omit-if-no-ref/
+ pwm2m0_pins: pwm2m0-pins {
+ rockchip,pins =
+ /* pwm2_m0 */
+ <0 RK_PC1 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pwm2m1_pins: pwm2m1-pins {
+ rockchip,pins =
+ /* pwm2_m1 */
+ <0 RK_PB6 4 &pcfg_pull_none>;
+ };
+ };
+
+ pwm3 {
+ /omit-if-no-ref/
+ pwm3_pins: pwm3-pins {
+ rockchip,pins =
+ /* pwm3_ir */
+ <0 RK_PC2 1 &pcfg_pull_none>;
+ };
+ };
+
+ pwm4 {
+ /omit-if-no-ref/
+ pwm4_pins: pwm4-pins {
+ rockchip,pins =
+ /* pwm4 */
+ <0 RK_PC3 1 &pcfg_pull_none>;
+ };
+ };
+
+ pwm5 {
+ /omit-if-no-ref/
+ pwm5_pins: pwm5-pins {
+ rockchip,pins =
+ /* pwm5 */
+ <0 RK_PC4 1 &pcfg_pull_none>;
+ };
+ };
+
+ pwm6 {
+ /omit-if-no-ref/
+ pwm6_pins: pwm6-pins {
+ rockchip,pins =
+ /* pwm6 */
+ <0 RK_PC5 1 &pcfg_pull_none>;
+ };
+ };
+
+ pwm7 {
+ /omit-if-no-ref/
+ pwm7_pins: pwm7-pins {
+ rockchip,pins =
+ /* pwm7_ir */
+ <0 RK_PC6 1 &pcfg_pull_none>;
+ };
+ };
+
+ pwm8 {
+ /omit-if-no-ref/
+ pwm8m0_pins: pwm8m0-pins {
+ rockchip,pins =
+ /* pwm8_m0 */
+ <3 RK_PB1 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pwm8m1_pins: pwm8m1-pins {
+ rockchip,pins =
+ /* pwm8_m1 */
+ <1 RK_PD5 4 &pcfg_pull_none>;
+ };
+ };
+
+ pwm9 {
+ /omit-if-no-ref/
+ pwm9m0_pins: pwm9m0-pins {
+ rockchip,pins =
+ /* pwm9_m0 */
+ <3 RK_PB2 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pwm9m1_pins: pwm9m1-pins {
+ rockchip,pins =
+ /* pwm9_m1 */
+ <1 RK_PD6 4 &pcfg_pull_none>;
+ };
+ };
+
+ pwm10 {
+ /omit-if-no-ref/
+ pwm10m0_pins: pwm10m0-pins {
+ rockchip,pins =
+ /* pwm10_m0 */
+ <3 RK_PB5 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pwm10m1_pins: pwm10m1-pins {
+ rockchip,pins =
+ /* pwm10_m1 */
+ <2 RK_PA1 2 &pcfg_pull_none>;
+ };
+ };
+
+ pwm11 {
+ /omit-if-no-ref/
+ pwm11m0_pins: pwm11m0-pins {
+ rockchip,pins =
+ /* pwm11_irm0 */
+ <3 RK_PB6 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pwm11m1_pins: pwm11m1-pins {
+ rockchip,pins =
+ /* pwm11_irm1 */
+ <4 RK_PC0 3 &pcfg_pull_none>;
+ };
+ };
+
+ pwm12 {
+ /omit-if-no-ref/
+ pwm12m0_pins: pwm12m0-pins {
+ rockchip,pins =
+ /* pwm12_m0 */
+ <3 RK_PB7 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pwm12m1_pins: pwm12m1-pins {
+ rockchip,pins =
+ /* pwm12_m1 */
+ <4 RK_PC5 1 &pcfg_pull_none>;
+ };
+ };
+
+ pwm13 {
+ /omit-if-no-ref/
+ pwm13m0_pins: pwm13m0-pins {
+ rockchip,pins =
+ /* pwm13_m0 */
+ <3 RK_PC0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pwm13m1_pins: pwm13m1-pins {
+ rockchip,pins =
+ /* pwm13_m1 */
+ <4 RK_PC6 1 &pcfg_pull_none>;
+ };
+ };
+
+ pwm14 {
+ /omit-if-no-ref/
+ pwm14m0_pins: pwm14m0-pins {
+ rockchip,pins =
+ /* pwm14_m0 */
+ <3 RK_PC4 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pwm14m1_pins: pwm14m1-pins {
+ rockchip,pins =
+ /* pwm14_m1 */
+ <4 RK_PC2 1 &pcfg_pull_none>;
+ };
+ };
+
+ pwm15 {
+ /omit-if-no-ref/
+ pwm15m0_pins: pwm15m0-pins {
+ rockchip,pins =
+ /* pwm15_irm0 */
+ <3 RK_PC5 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pwm15m1_pins: pwm15m1-pins {
+ rockchip,pins =
+ /* pwm15_irm1 */
+ <4 RK_PC3 1 &pcfg_pull_none>;
+ };
+ };
+
+ refclk {
+ /omit-if-no-ref/
+ refclk_pins: refclk-pins {
+ rockchip,pins =
+ /* refclk_ou */
+ <0 RK_PA0 1 &pcfg_pull_none>;
+ };
+ };
+
+ sata {
+ /omit-if-no-ref/
+ sata_pins: sata-pins {
+ rockchip,pins =
+ /* sata_cpdet */
+ <0 RK_PA4 2 &pcfg_pull_none>,
+ /* sata_cppod */
+ <0 RK_PA6 1 &pcfg_pull_none>,
+ /* sata_mpswitch */
+ <0 RK_PA5 2 &pcfg_pull_none>;
+ };
+ };
+
+ sata0 {
+ /omit-if-no-ref/
+ sata0_pins: sata0-pins {
+ rockchip,pins =
+ /* sata0_actled */
+ <4 RK_PC6 3 &pcfg_pull_none>;
+ };
+ };
+
+ sata1 {
+ /omit-if-no-ref/
+ sata1_pins: sata1-pins {
+ rockchip,pins =
+ /* sata1_actled */
+ <4 RK_PC5 3 &pcfg_pull_none>;
+ };
+ };
+
+ sata2 {
+ /omit-if-no-ref/
+ sata2_pins: sata2-pins {
+ rockchip,pins =
+ /* sata2_actled */
+ <4 RK_PC4 3 &pcfg_pull_none>;
+ };
+ };
+
+ scr {
+ /omit-if-no-ref/
+ scr_pins: scr-pins {
+ rockchip,pins =
+ /* scr_clk */
+ <1 RK_PA2 3 &pcfg_pull_none>,
+ /* scr_det */
+ <1 RK_PA7 3 &pcfg_pull_up>,
+ /* scr_io */
+ <1 RK_PA3 3 &pcfg_pull_up>,
+ /* scr_rst */
+ <1 RK_PA5 3 &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc0 {
+ /omit-if-no-ref/
+ sdmmc0_bus4: sdmmc0-bus4 {
+ rockchip,pins =
+ /* sdmmc0_d0 */
+ <1 RK_PD5 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc0_d1 */
+ <1 RK_PD6 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc0_d2 */
+ <1 RK_PD7 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc0_d3 */
+ <2 RK_PA0 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc0_clk: sdmmc0-clk {
+ rockchip,pins =
+ /* sdmmc0_clk */
+ <2 RK_PA2 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc0_cmd: sdmmc0-cmd {
+ rockchip,pins =
+ /* sdmmc0_cmd */
+ <2 RK_PA1 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc0_det: sdmmc0-det {
+ rockchip,pins =
+ /* sdmmc0_det */
+ <0 RK_PA4 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc0_pwren: sdmmc0-pwren {
+ rockchip,pins =
+ /* sdmmc0_pwren */
+ <0 RK_PA5 1 &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc1 {
+ /omit-if-no-ref/
+ sdmmc1_bus4: sdmmc1-bus4 {
+ rockchip,pins =
+ /* sdmmc1_d0 */
+ <2 RK_PA3 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc1_d1 */
+ <2 RK_PA4 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc1_d2 */
+ <2 RK_PA5 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc1_d3 */
+ <2 RK_PA6 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc1_clk: sdmmc1-clk {
+ rockchip,pins =
+ /* sdmmc1_clk */
+ <2 RK_PB0 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc1_cmd: sdmmc1-cmd {
+ rockchip,pins =
+ /* sdmmc1_cmd */
+ <2 RK_PA7 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc1_det: sdmmc1-det {
+ rockchip,pins =
+ /* sdmmc1_det */
+ <2 RK_PB2 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc1_pwren: sdmmc1-pwren {
+ rockchip,pins =
+ /* sdmmc1_pwren */
+ <2 RK_PB1 1 &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc2 {
+ /omit-if-no-ref/
+ sdmmc2m0_bus4: sdmmc2m0-bus4 {
+ rockchip,pins =
+ /* sdmmc2_d0m0 */
+ <3 RK_PC6 3 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc2_d1m0 */
+ <3 RK_PC7 3 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc2_d2m0 */
+ <3 RK_PD0 3 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc2_d3m0 */
+ <3 RK_PD1 3 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc2m0_clk: sdmmc2m0-clk {
+ rockchip,pins =
+ /* sdmmc2_clkm0 */
+ <3 RK_PD3 3 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc2m0_cmd: sdmmc2m0-cmd {
+ rockchip,pins =
+ /* sdmmc2_cmdm0 */
+ <3 RK_PD2 3 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc2m0_det: sdmmc2m0-det {
+ rockchip,pins =
+ /* sdmmc2_detm0 */
+ <3 RK_PD4 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc2m0_pwren: sdmmc2m0-pwren {
+ rockchip,pins =
+ /* sdmmc2m0_pwren */
+ <3 RK_PD5 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc2m1_bus4: sdmmc2m1-bus4 {
+ rockchip,pins =
+ /* sdmmc2_d0m1 */
+ <3 RK_PA1 5 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc2_d1m1 */
+ <3 RK_PA2 5 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc2_d2m1 */
+ <3 RK_PA3 5 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc2_d3m1 */
+ <3 RK_PA4 5 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc2m1_clk: sdmmc2m1-clk {
+ rockchip,pins =
+ /* sdmmc2_clkm1 */
+ <3 RK_PA6 5 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc2m1_cmd: sdmmc2m1-cmd {
+ rockchip,pins =
+ /* sdmmc2_cmdm1 */
+ <3 RK_PA5 5 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc2m1_det: sdmmc2m1-det {
+ rockchip,pins =
+ /* sdmmc2_detm1 */
+ <3 RK_PA7 4 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc2m1_pwren: sdmmc2m1-pwren {
+ rockchip,pins =
+ /* sdmmc2m1_pwren */
+ <3 RK_PB0 4 &pcfg_pull_none>;
+ };
+ };
+
+ spdif {
+ /omit-if-no-ref/
+ spdifm0_tx: spdifm0-tx {
+ rockchip,pins =
+ /* spdifm0_tx */
+ <1 RK_PA4 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spdifm1_tx: spdifm1-tx {
+ rockchip,pins =
+ /* spdifm1_tx */
+ <3 RK_PC5 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spdifm2_tx: spdifm2-tx {
+ rockchip,pins =
+ /* spdifm2_tx */
+ <4 RK_PC4 2 &pcfg_pull_none>;
+ };
+ };
+
+ spi0 {
+ /omit-if-no-ref/
+ spi0m0_pins: spi0m0-pins {
+ rockchip,pins =
+ /* spi0_clkm0 */
+ <0 RK_PB5 2 &pcfg_pull_none>,
+ /* spi0_misom0 */
+ <0 RK_PC5 2 &pcfg_pull_none>,
+ /* spi0_mosim0 */
+ <0 RK_PB6 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi0m0_cs0: spi0m0-cs0 {
+ rockchip,pins =
+ /* spi0_cs0m0 */
+ <0 RK_PC6 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi0m0_cs1: spi0m0-cs1 {
+ rockchip,pins =
+ /* spi0_cs1m0 */
+ <0 RK_PC4 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi0m1_pins: spi0m1-pins {
+ rockchip,pins =
+ /* spi0_clkm1 */
+ <2 RK_PD3 3 &pcfg_pull_none>,
+ /* spi0_misom1 */
+ <2 RK_PD0 3 &pcfg_pull_none>,
+ /* spi0_mosim1 */
+ <2 RK_PD1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi0m1_cs0: spi0m1-cs0 {
+ rockchip,pins =
+ /* spi0_cs0m1 */
+ <2 RK_PD2 3 &pcfg_pull_none>;
+ };
+ };
+
+ spi1 {
+ /omit-if-no-ref/
+ spi1m0_pins: spi1m0-pins {
+ rockchip,pins =
+ /* spi1_clkm0 */
+ <2 RK_PB5 3 &pcfg_pull_none>,
+ /* spi1_misom0 */
+ <2 RK_PB6 3 &pcfg_pull_none>,
+ /* spi1_mosim0 */
+ <2 RK_PB7 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi1m0_cs0: spi1m0-cs0 {
+ rockchip,pins =
+ /* spi1_cs0m0 */
+ <2 RK_PC0 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi1m0_cs1: spi1m0-cs1 {
+ rockchip,pins =
+ /* spi1_cs1m0 */
+ <2 RK_PC6 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi1m1_pins: spi1m1-pins {
+ rockchip,pins =
+ /* spi1_clkm1 */
+ <3 RK_PC3 3 &pcfg_pull_none>,
+ /* spi1_misom1 */
+ <3 RK_PC2 3 &pcfg_pull_none>,
+ /* spi1_mosim1 */
+ <3 RK_PC1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi1m1_cs0: spi1m1-cs0 {
+ rockchip,pins =
+ /* spi1_cs0m1 */
+ <3 RK_PA1 3 &pcfg_pull_none>;
+ };
+ };
+
+ spi2 {
+ /omit-if-no-ref/
+ spi2m0_pins: spi2m0-pins {
+ rockchip,pins =
+ /* spi2_clkm0 */
+ <2 RK_PC1 4 &pcfg_pull_none>,
+ /* spi2_misom0 */
+ <2 RK_PC2 4 &pcfg_pull_none>,
+ /* spi2_mosim0 */
+ <2 RK_PC3 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi2m0_cs0: spi2m0-cs0 {
+ rockchip,pins =
+ /* spi2_cs0m0 */
+ <2 RK_PC4 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi2m0_cs1: spi2m0-cs1 {
+ rockchip,pins =
+ /* spi2_cs1m0 */
+ <2 RK_PC5 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi2m1_pins: spi2m1-pins {
+ rockchip,pins =
+ /* spi2_clkm1 */
+ <3 RK_PA0 3 &pcfg_pull_none>,
+ /* spi2_misom1 */
+ <2 RK_PD7 3 &pcfg_pull_none>,
+ /* spi2_mosim1 */
+ <2 RK_PD6 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi2m1_cs0: spi2m1-cs0 {
+ rockchip,pins =
+ /* spi2_cs0m1 */
+ <2 RK_PD5 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi2m1_cs1: spi2m1-cs1 {
+ rockchip,pins =
+ /* spi2_cs1m1 */
+ <2 RK_PD4 3 &pcfg_pull_none>;
+ };
+ };
+
+ spi3 {
+ /omit-if-no-ref/
+ spi3m0_pins: spi3m0-pins {
+ rockchip,pins =
+ /* spi3_clkm0 */
+ <4 RK_PB3 4 &pcfg_pull_none>,
+ /* spi3_misom0 */
+ <4 RK_PB0 4 &pcfg_pull_none>,
+ /* spi3_mosim0 */
+ <4 RK_PB2 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi3m0_cs0: spi3m0-cs0 {
+ rockchip,pins =
+ /* spi3_cs0m0 */
+ <4 RK_PA6 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi3m0_cs1: spi3m0-cs1 {
+ rockchip,pins =
+ /* spi3_cs1m0 */
+ <4 RK_PA7 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi3m1_pins: spi3m1-pins {
+ rockchip,pins =
+ /* spi3_clkm1 */
+ <4 RK_PC2 2 &pcfg_pull_none>,
+ /* spi3_misom1 */
+ <4 RK_PC5 2 &pcfg_pull_none>,
+ /* spi3_mosim1 */
+ <4 RK_PC3 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi3m1_cs0: spi3m1-cs0 {
+ rockchip,pins =
+ /* spi3_cs0m1 */
+ <4 RK_PC6 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spi3m1_cs1: spi3m1-cs1 {
+ rockchip,pins =
+ /* spi3_cs1m1 */
+ <4 RK_PD1 2 &pcfg_pull_none>;
+ };
+ };
+
+ tsadc {
+ /omit-if-no-ref/
+ tsadcm0_shut: tsadcm0-shut {
+ rockchip,pins =
+ /* tsadcm0_shut */
+ <0 RK_PA1 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ tsadcm1_shut: tsadcm1-shut {
+ rockchip,pins =
+ /* tsadcm1_shut */
+ <0 RK_PA2 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ tsadc_shutorg: tsadc-shutorg {
+ rockchip,pins =
+ /* tsadc_shutorg */
+ <0 RK_PA1 2 &pcfg_pull_none>;
+ };
+ };
+
+ uart0 {
+ /omit-if-no-ref/
+ uart0_xfer: uart0-xfer {
+ rockchip,pins =
+ /* uart0_rx */
+ <0 RK_PC0 3 &pcfg_pull_up>,
+ /* uart0_tx */
+ <0 RK_PC1 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart0_ctsn: uart0-ctsn {
+ rockchip,pins =
+ /* uart0_ctsn */
+ <0 RK_PC7 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart0_rtsn: uart0-rtsn {
+ rockchip,pins =
+ /* uart0_rtsn */
+ <0 RK_PC4 3 &pcfg_pull_none>;
+ };
+ };
+
+ uart1 {
+ /omit-if-no-ref/
+ uart1m0_xfer: uart1m0-xfer {
+ rockchip,pins =
+ /* uart1_rxm0 */
+ <2 RK_PB3 2 &pcfg_pull_up>,
+ /* uart1_txm0 */
+ <2 RK_PB4 2 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart1m0_ctsn: uart1m0-ctsn {
+ rockchip,pins =
+ /* uart1m0_ctsn */
+ <2 RK_PB6 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart1m0_rtsn: uart1m0-rtsn {
+ rockchip,pins =
+ /* uart1m0_rtsn */
+ <2 RK_PB5 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart1m1_xfer: uart1m1-xfer {
+ rockchip,pins =
+ /* uart1_rxm1 */
+ <3 RK_PD7 4 &pcfg_pull_up>,
+ /* uart1_txm1 */
+ <3 RK_PD6 4 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart1m1_ctsn: uart1m1-ctsn {
+ rockchip,pins =
+ /* uart1m1_ctsn */
+ <4 RK_PC1 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart1m1_rtsn: uart1m1-rtsn {
+ rockchip,pins =
+ /* uart1m1_rtsn */
+ <4 RK_PB6 4 &pcfg_pull_none>;
+ };
+ };
+
+ uart2 {
+ /omit-if-no-ref/
+ uart2m0_xfer: uart2m0-xfer {
+ rockchip,pins =
+ /* uart2_rxm0 */
+ <0 RK_PD0 1 &pcfg_pull_up>,
+ /* uart2_txm0 */
+ <0 RK_PD1 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart2m1_xfer: uart2m1-xfer {
+ rockchip,pins =
+ /* uart2_rxm1 */
+ <1 RK_PD6 2 &pcfg_pull_up>,
+ /* uart2_txm1 */
+ <1 RK_PD5 2 &pcfg_pull_up>;
+ };
+ };
+
+ uart3 {
+ /omit-if-no-ref/
+ uart3m0_xfer: uart3m0-xfer {
+ rockchip,pins =
+ /* uart3_rxm0 */
+ <1 RK_PA0 2 &pcfg_pull_up>,
+ /* uart3_txm0 */
+ <1 RK_PA1 2 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart3m0_ctsn: uart3m0-ctsn {
+ rockchip,pins =
+ /* uart3m0_ctsn */
+ <1 RK_PA3 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart3m0_rtsn: uart3m0-rtsn {
+ rockchip,pins =
+ /* uart3m0_rtsn */
+ <1 RK_PA2 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart3m1_xfer: uart3m1-xfer {
+ rockchip,pins =
+ /* uart3_rxm1 */
+ <3 RK_PC0 4 &pcfg_pull_up>,
+ /* uart3_txm1 */
+ <3 RK_PB7 4 &pcfg_pull_up>;
+ };
+ };
+
+ uart4 {
+ /omit-if-no-ref/
+ uart4m0_xfer: uart4m0-xfer {
+ rockchip,pins =
+ /* uart4_rxm0 */
+ <1 RK_PA4 2 &pcfg_pull_up>,
+ /* uart4_txm0 */
+ <1 RK_PA6 2 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart4m0_ctsn: uart4m0-ctsn {
+ rockchip,pins =
+ /* uart4m0_ctsn */
+ <1 RK_PA7 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart4m0_rtsn: uart4m0-rtsn {
+ rockchip,pins =
+ /* uart4m0_rtsn */
+ <1 RK_PA5 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart4m1_xfer: uart4m1-xfer {
+ rockchip,pins =
+ /* uart4_rxm1 */
+ <3 RK_PB1 4 &pcfg_pull_up>,
+ /* uart4_txm1 */
+ <3 RK_PB2 4 &pcfg_pull_up>;
+ };
+ };
+
+ uart5 {
+ /omit-if-no-ref/
+ uart5m0_xfer: uart5m0-xfer {
+ rockchip,pins =
+ /* uart5_rxm0 */
+ <2 RK_PA1 3 &pcfg_pull_up>,
+ /* uart5_txm0 */
+ <2 RK_PA2 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart5m0_ctsn: uart5m0-ctsn {
+ rockchip,pins =
+ /* uart5m0_ctsn */
+ <1 RK_PD7 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart5m0_rtsn: uart5m0-rtsn {
+ rockchip,pins =
+ /* uart5m0_rtsn */
+ <2 RK_PA0 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart5m1_xfer: uart5m1-xfer {
+ rockchip,pins =
+ /* uart5_rxm1 */
+ <3 RK_PC3 4 &pcfg_pull_up>,
+ /* uart5_txm1 */
+ <3 RK_PC2 4 &pcfg_pull_up>;
+ };
+ };
+
+ uart6 {
+ /omit-if-no-ref/
+ uart6m0_xfer: uart6m0-xfer {
+ rockchip,pins =
+ /* uart6_rxm0 */
+ <2 RK_PA3 3 &pcfg_pull_up>,
+ /* uart6_txm0 */
+ <2 RK_PA4 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart6m0_ctsn: uart6m0-ctsn {
+ rockchip,pins =
+ /* uart6m0_ctsn */
+ <2 RK_PC0 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart6m0_rtsn: uart6m0-rtsn {
+ rockchip,pins =
+ /* uart6m0_rtsn */
+ <2 RK_PB7 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart6m1_xfer: uart6m1-xfer {
+ rockchip,pins =
+ /* uart6_rxm1 */
+ <1 RK_PD6 3 &pcfg_pull_up>,
+ /* uart6_txm1 */
+ <1 RK_PD5 3 &pcfg_pull_up>;
+ };
+ };
+
+ uart7 {
+ /omit-if-no-ref/
+ uart7m0_xfer: uart7m0-xfer {
+ rockchip,pins =
+ /* uart7_rxm0 */
+ <2 RK_PA5 3 &pcfg_pull_up>,
+ /* uart7_txm0 */
+ <2 RK_PA6 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart7m0_ctsn: uart7m0-ctsn {
+ rockchip,pins =
+ /* uart7m0_ctsn */
+ <2 RK_PC2 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart7m0_rtsn: uart7m0-rtsn {
+ rockchip,pins =
+ /* uart7m0_rtsn */
+ <2 RK_PC1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart7m1_xfer: uart7m1-xfer {
+ rockchip,pins =
+ /* uart7_rxm1 */
+ <3 RK_PC5 4 &pcfg_pull_up>,
+ /* uart7_txm1 */
+ <3 RK_PC4 4 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart7m2_xfer: uart7m2-xfer {
+ rockchip,pins =
+ /* uart7_rxm2 */
+ <4 RK_PA3 4 &pcfg_pull_up>,
+ /* uart7_txm2 */
+ <4 RK_PA2 4 &pcfg_pull_up>;
+ };
+ };
+
+ uart8 {
+ /omit-if-no-ref/
+ uart8m0_xfer: uart8m0-xfer {
+ rockchip,pins =
+ /* uart8_rxm0 */
+ <2 RK_PC6 2 &pcfg_pull_up>,
+ /* uart8_txm0 */
+ <2 RK_PC5 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart8m0_ctsn: uart8m0-ctsn {
+ rockchip,pins =
+ /* uart8m0_ctsn */
+ <2 RK_PB2 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart8m0_rtsn: uart8m0-rtsn {
+ rockchip,pins =
+ /* uart8m0_rtsn */
+ <2 RK_PB1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart8m1_xfer: uart8m1-xfer {
+ rockchip,pins =
+ /* uart8_rxm1 */
+ <3 RK_PA0 4 &pcfg_pull_up>,
+ /* uart8_txm1 */
+ <2 RK_PD7 4 &pcfg_pull_up>;
+ };
+ };
+
+ uart9 {
+ /omit-if-no-ref/
+ uart9m0_xfer: uart9m0-xfer {
+ rockchip,pins =
+ /* uart9_rxm0 */
+ <2 RK_PA7 3 &pcfg_pull_up>,
+ /* uart9_txm0 */
+ <2 RK_PB0 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart9m0_ctsn: uart9m0-ctsn {
+ rockchip,pins =
+ /* uart9m0_ctsn */
+ <2 RK_PC4 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart9m0_rtsn: uart9m0-rtsn {
+ rockchip,pins =
+ /* uart9m0_rtsn */
+ <2 RK_PC3 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart9m1_xfer: uart9m1-xfer {
+ rockchip,pins =
+ /* uart9_rxm1 */
+ <4 RK_PC6 4 &pcfg_pull_up>,
+ /* uart9_txm1 */
+ <4 RK_PC5 4 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart9m2_xfer: uart9m2-xfer {
+ rockchip,pins =
+ /* uart9_rxm2 */
+ <4 RK_PA5 4 &pcfg_pull_up>,
+ /* uart9_txm2 */
+ <4 RK_PA4 4 &pcfg_pull_up>;
+ };
+ };
+
+ vop {
+ /omit-if-no-ref/
+ vopm0_pins: vopm0-pins {
+ rockchip,pins =
+ /* vop_pwmm0 */
+ <0 RK_PC3 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ vopm1_pins: vopm1-pins {
+ rockchip,pins =
+ /* vop_pwmm1 */
+ <3 RK_PC4 2 &pcfg_pull_none>;
+ };
+ };
+};
+
+/*
+ * This part is edited handly.
+ */
+&pinctrl {
+ spi0-hs {
+ /omit-if-no-ref/
+ spi0m0_pins_hs: spi0m0-pins {
+ rockchip,pins =
+ /* spi0_clkm0 */
+ <0 RK_PB5 2 &pcfg_pull_up_drv_level_1>,
+ /* spi0_misom0 */
+ <0 RK_PC5 2 &pcfg_pull_up_drv_level_1>,
+ /* spi0_mosim0 */
+ <0 RK_PB6 2 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi0m0_cs0_hs: spi0m0-cs0 {
+ rockchip,pins =
+ /* spi0_cs0m0 */
+ <0 RK_PC6 2 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi0m0_cs1_hs: spi0m0-cs1 {
+ rockchip,pins =
+ /* spi0_cs1m0 */
+ <0 RK_PC4 2 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi0m1_pins_hs: spi0m1-pins {
+ rockchip,pins =
+ /* spi0_clkm1 */
+ <2 RK_PD3 3 &pcfg_pull_up_drv_level_1>,
+ /* spi0_misom1 */
+ <2 RK_PD0 3 &pcfg_pull_up_drv_level_1>,
+ /* spi0_mosim1 */
+ <2 RK_PD1 3 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi0m1_cs0_hs: spi0m1-cs0 {
+ rockchip,pins =
+ /* spi0_cs0m1 */
+ <2 RK_PD2 3 &pcfg_pull_up_drv_level_1>;
+ };
+ };
+
+ spi1-hs {
+ /omit-if-no-ref/
+ spi1m0_pins_hs: spi1m0-pins {
+ rockchip,pins =
+ /* spi1_clkm0 */
+ <2 RK_PB5 3 &pcfg_pull_up_drv_level_1>,
+ /* spi1_misom0 */
+ <2 RK_PB6 3 &pcfg_pull_up_drv_level_1>,
+ /* spi1_mosim0 */
+ <2 RK_PB7 4 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi1m0_cs0_hs: spi1m0-cs0 {
+ rockchip,pins =
+ /* spi1_cs0m0 */
+ <2 RK_PC0 4 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi1m0_cs1_hs: spi1m0-cs1 {
+ rockchip,pins =
+ /* spi1_cs1m0 */
+ <2 RK_PC6 3 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi1m1_pins_hs: spi1m1-pins {
+ rockchip,pins =
+ /* spi1_clkm1 */
+ <3 RK_PC3 3 &pcfg_pull_up_drv_level_1>,
+ /* spi1_misom1 */
+ <3 RK_PC2 3 &pcfg_pull_up_drv_level_1>,
+ /* spi1_mosim1 */
+ <3 RK_PC1 3 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi1m1_cs0_hs: spi1m1-cs0 {
+ rockchip,pins =
+ /* spi1_cs0m1 */
+ <3 RK_PA1 3 &pcfg_pull_up_drv_level_1>;
+ };
+ };
+
+ spi2-hs {
+ /omit-if-no-ref/
+ spi2m0_pins_hs: spi2m0-pins {
+ rockchip,pins =
+ /* spi2_clkm0 */
+ <2 RK_PC1 4 &pcfg_pull_up_drv_level_1>,
+ /* spi2_misom0 */
+ <2 RK_PC2 4 &pcfg_pull_up_drv_level_1>,
+ /* spi2_mosim0 */
+ <2 RK_PC3 4 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi2m0_cs0_hs: spi2m0-cs0 {
+ rockchip,pins =
+ /* spi2_cs0m0 */
+ <2 RK_PC4 4 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi2m0_cs1_hs: spi2m0-cs1 {
+ rockchip,pins =
+ /* spi2_cs1m0 */
+ <2 RK_PC5 4 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi2m1_pins_hs: spi2m1-pins {
+ rockchip,pins =
+ /* spi2_clkm1 */
+ <3 RK_PA0 3 &pcfg_pull_up_drv_level_1>,
+ /* spi2_misom1 */
+ <2 RK_PD7 3 &pcfg_pull_up_drv_level_1>,
+ /* spi2_mosim1 */
+ <2 RK_PD6 3 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi2m1_cs0_hs: spi2m1-cs0 {
+ rockchip,pins =
+ /* spi2_cs0m1 */
+ <2 RK_PD5 3 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi2m1_cs1_hs: spi2m1-cs1 {
+ rockchip,pins =
+ /* spi2_cs1m1 */
+ <2 RK_PD4 3 &pcfg_pull_up_drv_level_1>;
+ };
+ };
+
+ spi3-hs {
+ /omit-if-no-ref/
+ spi3m0_pins_hs: spi3m0-pins {
+ rockchip,pins =
+ /* spi3_clkm0 */
+ <4 RK_PB3 4 &pcfg_pull_up_drv_level_1>,
+ /* spi3_misom0 */
+ <4 RK_PB0 4 &pcfg_pull_up_drv_level_1>,
+ /* spi3_mosim0 */
+ <4 RK_PB2 4 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi3m0_cs0_hs: spi3m0-cs0 {
+ rockchip,pins =
+ /* spi3_cs0m0 */
+ <4 RK_PA6 4 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi3m0_cs1_hs: spi3m0-cs1 {
+ rockchip,pins =
+ /* spi3_cs1m0 */
+ <4 RK_PA7 4 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi3m1_pins_hs: spi3m1-pins {
+ rockchip,pins =
+ /* spi3_clkm1 */
+ <4 RK_PC2 2 &pcfg_pull_up_drv_level_1>,
+ /* spi3_misom1 */
+ <4 RK_PC5 2 &pcfg_pull_up_drv_level_1>,
+ /* spi3_mosim1 */
+ <4 RK_PC3 2 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi3m1_cs0_hs: spi3m1-cs0 {
+ rockchip,pins =
+ /* spi3_cs0m1 */
+ <4 RK_PC6 2 &pcfg_pull_up_drv_level_1>;
+ };
+
+ /omit-if-no-ref/
+ spi3m1_cs1_hs: spi3m1-cs1 {
+ rockchip,pins =
+ /* spi3_cs1m1 */
+ <4 RK_PD1 2 &pcfg_pull_up_drv_level_1>;
+ };
+ };
+
+ gmac-txd-level3 {
+ /omit-if-no-ref/
+ gmac0_tx_bus2_level3: gmac0-tx-bus2-level3 {
+ rockchip,pins =
+ /* gmac0_txd0 */
+ <2 RK_PB3 1 &pcfg_pull_none_drv_level_3>,
+ /* gmac0_txd1 */
+ <2 RK_PB4 1 &pcfg_pull_none_drv_level_3>,
+ /* gmac0_txen */
+ <2 RK_PB5 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac0_rgmii_bus_level3: gmac0-rgmii-bus-level3 {
+ rockchip,pins =
+ /* gmac0_rxd2 */
+ <2 RK_PA3 2 &pcfg_pull_none>,
+ /* gmac0_rxd3 */
+ <2 RK_PA4 2 &pcfg_pull_none>,
+ /* gmac0_txd2 */
+ <2 RK_PA6 2 &pcfg_pull_none_drv_level_3>,
+ /* gmac0_txd3 */
+ <2 RK_PA7 2 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m0_tx_bus2_level3: gmac1m0-tx-bus2-level3 {
+ rockchip,pins =
+ /* gmac1_txd0m0 */
+ <3 RK_PB5 3 &pcfg_pull_none_drv_level_3>,
+ /* gmac1_txd1m0 */
+ <3 RK_PB6 3 &pcfg_pull_none_drv_level_3>,
+ /* gmac1_txenm0 */
+ <3 RK_PB7 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m0_rgmii_bus_level3: gmac1m0-rgmii-bus-level3 {
+ rockchip,pins =
+ /* gmac1_rxd2m0 */
+ <3 RK_PA4 3 &pcfg_pull_none>,
+ /* gmac1_rxd3m0 */
+ <3 RK_PA5 3 &pcfg_pull_none>,
+ /* gmac1_txd2m0 */
+ <3 RK_PA2 3 &pcfg_pull_none_drv_level_3>,
+ /* gmac1_txd3m0 */
+ <3 RK_PA3 3 &pcfg_pull_none_drv_level_3>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m1_tx_bus2_level3: gmac1m1-tx-bus2-level3 {
+ rockchip,pins =
+ /* gmac1_txd0m1 */
+ <4 RK_PA4 3 &pcfg_pull_none_drv_level_3>,
+ /* gmac1_txd1m1 */
+ <4 RK_PA5 3 &pcfg_pull_none_drv_level_3>,
+ /* gmac1_txenm1 */
+ <4 RK_PA6 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m1_rgmii_bus_level3: gmac1m1-rgmii-bus-level3 {
+ rockchip,pins =
+ /* gmac1_rxd2m1 */
+ <4 RK_PA1 3 &pcfg_pull_none>,
+ /* gmac1_rxd3m1 */
+ <4 RK_PA2 3 &pcfg_pull_none>,
+ /* gmac1_txd2m1 */
+ <3 RK_PD6 3 &pcfg_pull_none_drv_level_3>,
+ /* gmac1_txd3m1 */
+ <3 RK_PD7 3 &pcfg_pull_none_drv_level_3>;
+ };
+ };
+
+ gmac-txc-level2 {
+ /omit-if-no-ref/
+ gmac0_rgmii_clk_level2: gmac0-rgmii-clk-level2 {
+ rockchip,pins =
+ /* gmac0_rxclk */
+ <2 RK_PA5 2 &pcfg_pull_none>,
+ /* gmac0_txclk */
+ <2 RK_PB0 2 &pcfg_pull_none_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m0_rgmii_clk_level2: gmac1m0-rgmii-clk-level2 {
+ rockchip,pins =
+ /* gmac1_rxclkm0 */
+ <3 RK_PA7 3 &pcfg_pull_none>,
+ /* gmac1_txclkm0 */
+ <3 RK_PA6 3 &pcfg_pull_none_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ gmac1m1_rgmii_clk_level2: gmac1m1-rgmii-clk-level2 {
+ rockchip,pins =
+ /* gmac1_rxclkm1 */
+ <4 RK_PA3 3 &pcfg_pull_none>,
+ /* gmac1_txclkm1 */
+ <4 RK_PA0 3 &pcfg_pull_none_drv_level_2>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
new file mode 100644
index 000000000000..d225e6a45d5c
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ */
+
+#include <dt-bindings/clock/rk3568-cru.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,boot-mode.h>
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+ compatible = "rockchip,rk3568";
+
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ gpio3 = &gpio3;
+ gpio4 = &gpio4;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ serial4 = &uart4;
+ serial5 = &uart5;
+ serial6 = &uart6;
+ serial7 = &uart7;
+ serial8 = &uart8;
+ serial9 = &uart9;
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x0>;
+ clocks = <&scmi_clk 0>;
+ enable-method = "psci";
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+
+ cpu1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+
+ cpu2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+
+ cpu3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+ };
+
+ cpu0_opp_table: cpu0-opp-table {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-408000000 {
+ opp-hz = /bits/ 64 <408000000>;
+ opp-microvolt = <900000 900000 1150000>;
+ clock-latency-ns = <40000>;
+ };
+
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <900000 900000 1150000>;
+ };
+
+ opp-816000000 {
+ opp-hz = /bits/ 64 <816000000>;
+ opp-microvolt = <900000 900000 1150000>;
+ opp-suspend;
+ };
+
+ opp-1104000000 {
+ opp-hz = /bits/ 64 <1104000000>;
+ opp-microvolt = <900000 900000 1150000>;
+ };
+
+ opp-1416000000 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <900000 900000 1150000>;
+ };
+
+ opp-1608000000 {
+ opp-hz = /bits/ 64 <1608000000>;
+ opp-microvolt = <975000 975000 1150000>;
+ };
+
+ opp-1800000000 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <1050000 1050000 1150000>;
+ };
+
+ opp-1992000000 {
+ opp-hz = /bits/ 64 <1992000000>;
+ opp-microvolt = <1150000 1150000 1150000>;
+ };
+ };
+
+ firmware {
+ scmi: scmi {
+ compatible = "arm,scmi-smc";
+ arm,smc-id = <0x82000010>;
+ shmem = <&scmi_shmem>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_clk: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+ };
+ };
+
+ pmu {
+ compatible = "arm,cortex-a55-pmu";
+ interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 230 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ arm,no-tick-in-suspend;
+ };
+
+ xin24m: xin24m {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "xin24m";
+ #clock-cells = <0>;
+ };
+
+ xin32k: xin32k {
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "xin32k";
+ pinctrl-0 = <&clk32k_out0>;
+ pinctrl-names = "default";
+ #clock-cells = <0>;
+ };
+
+ sram@10f000 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x0010f000 0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x0010f000 0x100>;
+
+ scmi_shmem: sram@0 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0x100>;
+ };
+ };
+
+ gic: interrupt-controller@fd400000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0xfd400000 0 0x10000>, /* GICD */
+ <0x0 0xfd460000 0 0x80000>; /* GICR */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ mbi-alias = <0x0 0xfd100000>;
+ mbi-ranges = <296 24>;
+ msi-controller;
+ };
+
+ pmugrf: syscon@fdc20000 {
+ compatible = "rockchip,rk3568-pmugrf", "syscon", "simple-mfd";
+ reg = <0x0 0xfdc20000 0x0 0x10000>;
+ };
+
+ grf: syscon@fdc60000 {
+ compatible = "rockchip,rk3568-grf", "syscon", "simple-mfd";
+ reg = <0x0 0xfdc60000 0x0 0x10000>;
+ };
+
+ pmucru: clock-controller@fdd00000 {
+ compatible = "rockchip,rk3568-pmucru";
+ reg = <0x0 0xfdd00000 0x0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ cru: clock-controller@fdd20000 {
+ compatible = "rockchip,rk3568-cru";
+ reg = <0x0 0xfdd20000 0x0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ i2c0: i2c@fdd40000 {
+ compatible = "rockchip,rk3568-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfdd40000 0x0 0x1000>;
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmucru CLK_I2C0>, <&pmucru PCLK_I2C0>;
+ clock-names = "i2c", "pclk";
+ pinctrl-0 = <&i2c0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ uart0: serial@fdd50000 {
+ compatible = "rockchip,rk3568-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfdd50000 0x0 0x100>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmucru SCLK_UART0>, <&pmucru PCLK_UART0>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 0>, <&dmac0 1>;
+ pinctrl-0 = <&uart0_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ sdmmc2: mmc@fe000000 {
+ compatible = "rockchip,rk3568-dw-mshc", "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xfe000000 0x0 0x4000>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_SDMMC2>, <&cru CLK_SDMMC2>,
+ <&cru SCLK_SDMMC2_DRV>, <&cru SCLK_SDMMC2_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ max-frequency = <150000000>;
+ resets = <&cru SRST_SDMMC2>;
+ reset-names = "reset";
+ status = "disabled";
+ };
+
+ sdmmc0: mmc@fe2b0000 {
+ compatible = "rockchip,rk3568-dw-mshc", "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xfe2b0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_SDMMC0>, <&cru CLK_SDMMC0>,
+ <&cru SCLK_SDMMC0_DRV>, <&cru SCLK_SDMMC0_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ max-frequency = <150000000>;
+ resets = <&cru SRST_SDMMC0>;
+ reset-names = "reset";
+ status = "disabled";
+ };
+
+ sdmmc1: mmc@fe2c0000 {
+ compatible = "rockchip,rk3568-dw-mshc", "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xfe2c0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_SDMMC1>, <&cru CLK_SDMMC1>,
+ <&cru SCLK_SDMMC1_DRV>, <&cru SCLK_SDMMC1_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
+ max-frequency = <150000000>;
+ resets = <&cru SRST_SDMMC1>;
+ reset-names = "reset";
+ status = "disabled";
+ };
+
+ sdhci: mmc@fe310000 {
+ compatible = "rockchip,rk3568-dwcmshc";
+ reg = <0x0 0xfe310000 0x0 0x10000>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ assigned-clocks = <&cru BCLK_EMMC>, <&cru TCLK_EMMC>;
+ assigned-clock-rates = <200000000>, <24000000>;
+ clocks = <&cru CCLK_EMMC>, <&cru HCLK_EMMC>,
+ <&cru ACLK_EMMC>, <&cru BCLK_EMMC>,
+ <&cru TCLK_EMMC>;
+ clock-names = "core", "bus", "axi", "block", "timer";
+ status = "disabled";
+ };
+
+ dmac0: dmac@fe530000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xfe530000 0x0 0x4000>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_BUS>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
+ dmac1: dmac@fe550000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xfe550000 0x0 0x4000>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_BUS>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
+ i2c1: i2c@fe5a0000 {
+ compatible = "rockchip,rk3568-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfe5a0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_I2C1>, <&cru PCLK_I2C1>;
+ clock-names = "i2c", "pclk";
+ pinctrl-0 = <&i2c1_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@fe5b0000 {
+ compatible = "rockchip,rk3568-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfe5b0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_I2C2>, <&cru PCLK_I2C2>;
+ clock-names = "i2c", "pclk";
+ pinctrl-0 = <&i2c2m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@fe5c0000 {
+ compatible = "rockchip,rk3568-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfe5c0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_I2C3>, <&cru PCLK_I2C3>;
+ clock-names = "i2c", "pclk";
+ pinctrl-0 = <&i2c3m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@fe5d0000 {
+ compatible = "rockchip,rk3568-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfe5d0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_I2C4>, <&cru PCLK_I2C4>;
+ clock-names = "i2c", "pclk";
+ pinctrl-0 = <&i2c4m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@fe5e0000 {
+ compatible = "rockchip,rk3568-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xfe5e0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru CLK_I2C5>, <&cru PCLK_I2C5>;
+ clock-names = "i2c", "pclk";
+ pinctrl-0 = <&i2c5m0_xfer>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ uart1: serial@fe650000 {
+ compatible = "rockchip,rk3568-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfe650000 0x0 0x100>;
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 2>, <&dmac0 3>;
+ pinctrl-0 = <&uart1m0_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart2: serial@fe660000 {
+ compatible = "rockchip,rk3568-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfe660000 0x0 0x100>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 4>, <&dmac0 5>;
+ pinctrl-0 = <&uart2m0_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart3: serial@fe670000 {
+ compatible = "rockchip,rk3568-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfe670000 0x0 0x100>;
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 6>, <&dmac0 7>;
+ pinctrl-0 = <&uart3m0_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart4: serial@fe680000 {
+ compatible = "rockchip,rk3568-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfe680000 0x0 0x100>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART4>, <&cru PCLK_UART4>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 8>, <&dmac0 9>;
+ pinctrl-0 = <&uart4m0_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart5: serial@fe690000 {
+ compatible = "rockchip,rk3568-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfe690000 0x0 0x100>;
+ interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART5>, <&cru PCLK_UART5>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 10>, <&dmac0 11>;
+ pinctrl-0 = <&uart5m0_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart6: serial@fe6a0000 {
+ compatible = "rockchip,rk3568-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfe6a0000 0x0 0x100>;
+ interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART6>, <&cru PCLK_UART6>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 12>, <&dmac0 13>;
+ pinctrl-0 = <&uart6m0_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart7: serial@fe6b0000 {
+ compatible = "rockchip,rk3568-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfe6b0000 0x0 0x100>;
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART7>, <&cru PCLK_UART7>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 14>, <&dmac0 15>;
+ pinctrl-0 = <&uart7m0_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart8: serial@fe6c0000 {
+ compatible = "rockchip,rk3568-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfe6c0000 0x0 0x100>;
+ interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART8>, <&cru PCLK_UART8>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 16>, <&dmac0 17>;
+ pinctrl-0 = <&uart8m0_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart9: serial@fe6d0000 {
+ compatible = "rockchip,rk3568-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xfe6d0000 0x0 0x100>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART9>, <&cru PCLK_UART9>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac0 18>, <&dmac0 19>;
+ pinctrl-0 = <&uart9m0_xfer>;
+ pinctrl-names = "default";
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ pinctrl: pinctrl {
+ compatible = "rockchip,rk3568-pinctrl";
+ rockchip,grf = <&grf>;
+ rockchip,pmu = <&pmugrf>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio0: gpio@fdd60000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xfdd60000 0x0 0x100>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmucru PCLK_GPIO0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@fe740000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xfe740000 0x0 0x100>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_GPIO1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@fe750000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xfe750000 0x0 0x100>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_GPIO2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@fe760000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xfe760000 0x0 0x100>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_GPIO3>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@fe770000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xfe770000 0x0 0x100>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_GPIO4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
+
+#include "rk3568-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/rockchip/rockchip-pinconf.dtsi b/arch/arm64/boot/dts/rockchip/rockchip-pinconf.dtsi
new file mode 100644
index 000000000000..5c645437b507
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rockchip-pinconf.dtsi
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ */
+
+&pinctrl {
+ /omit-if-no-ref/
+ pcfg_pull_up: pcfg-pull-up {
+ bias-pull-up;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down: pcfg-pull-down {
+ bias-pull-down;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none: pcfg-pull-none {
+ bias-disable;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_0: pcfg-pull-none-drv-level-0 {
+ bias-disable;
+ drive-strength = <0>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_1: pcfg-pull-none-drv-level-1 {
+ bias-disable;
+ drive-strength = <1>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_2: pcfg-pull-none-drv-level-2 {
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_3: pcfg-pull-none-drv-level-3 {
+ bias-disable;
+ drive-strength = <3>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_4: pcfg-pull-none-drv-level-4 {
+ bias-disable;
+ drive-strength = <4>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_5: pcfg-pull-none-drv-level-5 {
+ bias-disable;
+ drive-strength = <5>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_6: pcfg-pull-none-drv-level-6 {
+ bias-disable;
+ drive-strength = <6>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_7: pcfg-pull-none-drv-level-7 {
+ bias-disable;
+ drive-strength = <7>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_8: pcfg-pull-none-drv-level-8 {
+ bias-disable;
+ drive-strength = <8>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_9: pcfg-pull-none-drv-level-9 {
+ bias-disable;
+ drive-strength = <9>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_10: pcfg-pull-none-drv-level-10 {
+ bias-disable;
+ drive-strength = <10>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_11: pcfg-pull-none-drv-level-11 {
+ bias-disable;
+ drive-strength = <11>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_12: pcfg-pull-none-drv-level-12 {
+ bias-disable;
+ drive-strength = <12>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_13: pcfg-pull-none-drv-level-13 {
+ bias-disable;
+ drive-strength = <13>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_14: pcfg-pull-none-drv-level-14 {
+ bias-disable;
+ drive-strength = <14>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_15: pcfg-pull-none-drv-level-15 {
+ bias-disable;
+ drive-strength = <15>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_0: pcfg-pull-up-drv-level-0 {
+ bias-pull-up;
+ drive-strength = <0>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_1: pcfg-pull-up-drv-level-1 {
+ bias-pull-up;
+ drive-strength = <1>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_2: pcfg-pull-up-drv-level-2 {
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_3: pcfg-pull-up-drv-level-3 {
+ bias-pull-up;
+ drive-strength = <3>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_4: pcfg-pull-up-drv-level-4 {
+ bias-pull-up;
+ drive-strength = <4>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_5: pcfg-pull-up-drv-level-5 {
+ bias-pull-up;
+ drive-strength = <5>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_6: pcfg-pull-up-drv-level-6 {
+ bias-pull-up;
+ drive-strength = <6>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_7: pcfg-pull-up-drv-level-7 {
+ bias-pull-up;
+ drive-strength = <7>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_8: pcfg-pull-up-drv-level-8 {
+ bias-pull-up;
+ drive-strength = <8>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_9: pcfg-pull-up-drv-level-9 {
+ bias-pull-up;
+ drive-strength = <9>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_10: pcfg-pull-up-drv-level-10 {
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_11: pcfg-pull-up-drv-level-11 {
+ bias-pull-up;
+ drive-strength = <11>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_12: pcfg-pull-up-drv-level-12 {
+ bias-pull-up;
+ drive-strength = <12>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_13: pcfg-pull-up-drv-level-13 {
+ bias-pull-up;
+ drive-strength = <13>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_14: pcfg-pull-up-drv-level-14 {
+ bias-pull-up;
+ drive-strength = <14>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_drv_level_15: pcfg-pull-up-drv-level-15 {
+ bias-pull-up;
+ drive-strength = <15>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_0: pcfg-pull-down-drv-level-0 {
+ bias-pull-down;
+ drive-strength = <0>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_1: pcfg-pull-down-drv-level-1 {
+ bias-pull-down;
+ drive-strength = <1>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_2: pcfg-pull-down-drv-level-2 {
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_3: pcfg-pull-down-drv-level-3 {
+ bias-pull-down;
+ drive-strength = <3>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_4: pcfg-pull-down-drv-level-4 {
+ bias-pull-down;
+ drive-strength = <4>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_5: pcfg-pull-down-drv-level-5 {
+ bias-pull-down;
+ drive-strength = <5>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_6: pcfg-pull-down-drv-level-6 {
+ bias-pull-down;
+ drive-strength = <6>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_7: pcfg-pull-down-drv-level-7 {
+ bias-pull-down;
+ drive-strength = <7>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_8: pcfg-pull-down-drv-level-8 {
+ bias-pull-down;
+ drive-strength = <8>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_9: pcfg-pull-down-drv-level-9 {
+ bias-pull-down;
+ drive-strength = <9>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_10: pcfg-pull-down-drv-level-10 {
+ bias-pull-down;
+ drive-strength = <10>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_11: pcfg-pull-down-drv-level-11 {
+ bias-pull-down;
+ drive-strength = <11>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_12: pcfg-pull-down-drv-level-12 {
+ bias-pull-down;
+ drive-strength = <12>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_13: pcfg-pull-down-drv-level-13 {
+ bias-pull-down;
+ drive-strength = <13>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_14: pcfg-pull-down-drv-level-14 {
+ bias-pull-down;
+ drive-strength = <14>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_drv_level_15: pcfg-pull-down-drv-level-15 {
+ bias-pull-down;
+ drive-strength = <15>;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_up_smt: pcfg-pull-up-smt {
+ bias-pull-up;
+ input-schmitt-enable;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_down_smt: pcfg-pull-down-smt {
+ bias-pull-down;
+ input-schmitt-enable;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_smt: pcfg-pull-none-smt {
+ bias-disable;
+ input-schmitt-enable;
+ };
+
+ /omit-if-no-ref/
+ pcfg_pull_none_drv_level_0_smt: pcfg-pull-none-drv-level-0-smt {
+ bias-disable;
+ drive-strength = <0>;
+ input-schmitt-enable;
+ };
+
+ /omit-if-no-ref/
+ pcfg_output_high: pcfg-output-high {
+ output-high;
+ };
+
+ /omit-if-no-ref/
+ pcfg_output_low: pcfg-output-low {
+ output-low;
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
index ca59d1f711f8..02c3fdf9cc46 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
@@ -5,6 +5,17 @@
* Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/
*/
+#include <dt-bindings/phy/phy-cadence.h>
+#include <dt-bindings/phy/phy-ti.h>
+
+/ {
+ serdes_refclk: clock-cmnrefclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <0>;
+ };
+};
+
&cbass_main {
oc_sram: sram@70000000 {
compatible = "mmio-sram";
@@ -13,8 +24,30 @@
#size-cells = <1>;
ranges = <0x0 0x00 0x70000000 0x200000>;
- atf-sram@0 {
- reg = <0x0 0x1a000>;
+ tfa-sram@1c0000 {
+ reg = <0x1c0000 0x20000>;
+ };
+
+ dmsc-sram@1e0000 {
+ reg = <0x1e0000 0x1c000>;
+ };
+
+ sproxy-sram@1fc000 {
+ reg = <0x1fc000 0x4000>;
+ };
+ };
+
+ main_conf: syscon@43000000 {
+ compatible = "ti,j721e-system-controller", "syscon", "simple-mfd";
+ reg = <0x0 0x43000000 0x0 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x43000000 0x20000>;
+
+ serdes_ln_ctrl: mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x4080 0x3>; /* SERDES0 lane0 select */
};
};
@@ -189,8 +222,6 @@
main_uart0: serial@2800000 {
compatible = "ti,am64-uart", "ti,am654-uart";
reg = <0x00 0x02800000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -202,8 +233,6 @@
main_uart1: serial@2810000 {
compatible = "ti,am64-uart", "ti,am654-uart";
reg = <0x00 0x02810000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -215,8 +244,6 @@
main_uart2: serial@2820000 {
compatible = "ti,am64-uart", "ti,am654-uart";
reg = <0x00 0x02820000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -228,8 +255,6 @@
main_uart3: serial@2830000 {
compatible = "ti,am64-uart", "ti,am654-uart";
reg = <0x00 0x02830000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -241,8 +266,6 @@
main_uart4: serial@2840000 {
compatible = "ti,am64-uart", "ti,am654-uart";
reg = <0x00 0x02840000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -254,8 +277,6 @@
main_uart5: serial@2850000 {
compatible = "ti,am64-uart", "ti,am654-uart";
reg = <0x00 0x02850000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -267,8 +288,6 @@
main_uart6: serial@2860000 {
compatible = "ti,am64-uart", "ti,am654-uart";
reg = <0x00 0x02860000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -489,7 +508,8 @@
ti,mac-only;
label = "port1";
phys = <&phy_gmii_sel 1>;
- mac-address = [00 00 de ad be ef];
+ mac-address = [00 00 00 00 00 00];
+ ti,syscon-efuse = <&main_conf 0x200>;
};
cpsw_port2: port@2 {
@@ -497,7 +517,7 @@
ti,mac-only;
label = "port2";
phys = <&phy_gmii_sel 2>;
- mac-address = [00 01 de ad be ef];
+ mac-address = [00 00 00 00 00 00];
};
};
@@ -673,4 +693,170 @@
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <16>;
};
+
+ main_r5fss0: r5fss@78000000 {
+ compatible = "ti,am64-r5fss";
+ ti,cluster-mode = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x78000000 0x00 0x78000000 0x10000>,
+ <0x78100000 0x00 0x78100000 0x10000>,
+ <0x78200000 0x00 0x78200000 0x08000>,
+ <0x78300000 0x00 0x78300000 0x08000>;
+ power-domains = <&k3_pds 119 TI_SCI_PD_EXCLUSIVE>;
+
+ main_r5fss0_core0: r5f@78000000 {
+ compatible = "ti,am64-r5f";
+ reg = <0x78000000 0x00010000>,
+ <0x78100000 0x00010000>;
+ reg-names = "atcm", "btcm";
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <121>;
+ ti,sci-proc-ids = <0x01 0xff>;
+ resets = <&k3_reset 121 1>;
+ firmware-name = "am64-main-r5f0_0-fw";
+ ti,atcm-enable = <1>;
+ ti,btcm-enable = <1>;
+ ti,loczrama = <1>;
+ };
+
+ main_r5fss0_core1: r5f@78200000 {
+ compatible = "ti,am64-r5f";
+ reg = <0x78200000 0x00008000>,
+ <0x78300000 0x00008000>;
+ reg-names = "atcm", "btcm";
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <122>;
+ ti,sci-proc-ids = <0x02 0xff>;
+ resets = <&k3_reset 122 1>;
+ firmware-name = "am64-main-r5f0_1-fw";
+ ti,atcm-enable = <1>;
+ ti,btcm-enable = <1>;
+ ti,loczrama = <1>;
+ };
+ };
+
+ main_r5fss1: r5fss@78400000 {
+ compatible = "ti,am64-r5fss";
+ ti,cluster-mode = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x78400000 0x00 0x78400000 0x10000>,
+ <0x78500000 0x00 0x78500000 0x10000>,
+ <0x78600000 0x00 0x78600000 0x08000>,
+ <0x78700000 0x00 0x78700000 0x08000>;
+ power-domains = <&k3_pds 120 TI_SCI_PD_EXCLUSIVE>;
+
+ main_r5fss1_core0: r5f@78400000 {
+ compatible = "ti,am64-r5f";
+ reg = <0x78400000 0x00010000>,
+ <0x78500000 0x00010000>;
+ reg-names = "atcm", "btcm";
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <123>;
+ ti,sci-proc-ids = <0x06 0xff>;
+ resets = <&k3_reset 123 1>;
+ firmware-name = "am64-main-r5f1_0-fw";
+ ti,atcm-enable = <1>;
+ ti,btcm-enable = <1>;
+ ti,loczrama = <1>;
+ };
+
+ main_r5fss1_core1: r5f@78600000 {
+ compatible = "ti,am64-r5f";
+ reg = <0x78600000 0x00008000>,
+ <0x78700000 0x00008000>;
+ reg-names = "atcm", "btcm";
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <124>;
+ ti,sci-proc-ids = <0x07 0xff>;
+ resets = <&k3_reset 124 1>;
+ firmware-name = "am64-main-r5f1_1-fw";
+ ti,atcm-enable = <1>;
+ ti,btcm-enable = <1>;
+ ti,loczrama = <1>;
+ };
+ };
+
+ serdes_wiz0: wiz@f000000 {
+ compatible = "ti,am64-wiz-10g";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ power-domains = <&k3_pds 162 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 162 0>, <&k3_clks 162 1>, <&serdes_refclk>;
+ clock-names = "fck", "core_ref_clk", "ext_ref_clk";
+ num-lanes = <1>;
+ #reset-cells = <1>;
+ #clock-cells = <1>;
+ ranges = <0x0f000000 0x0 0x0f000000 0x00010000>;
+
+ assigned-clocks = <&k3_clks 162 1>;
+ assigned-clock-parents = <&k3_clks 162 5>;
+
+ serdes0: serdes@f000000 {
+ compatible = "ti,j721e-serdes-10g";
+ reg = <0x0f000000 0x00010000>;
+ reg-names = "torrent_phy";
+ resets = <&serdes_wiz0 0>;
+ reset-names = "torrent_reset";
+ clocks = <&serdes_wiz0 TI_WIZ_PLL0_REFCLK>,
+ <&serdes_wiz0 TI_WIZ_PHY_EN_REFCLK>;
+ clock-names = "refclk", "phy_en_refclk";
+ assigned-clocks = <&serdes_wiz0 TI_WIZ_PLL0_REFCLK>,
+ <&serdes_wiz0 TI_WIZ_PLL1_REFCLK>,
+ <&serdes_wiz0 TI_WIZ_REFCLK_DIG>;
+ assigned-clock-parents = <&k3_clks 162 1>,
+ <&k3_clks 162 1>,
+ <&k3_clks 162 1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <1>;
+ };
+ };
+
+ pcie0_rc: pcie@f102000 {
+ compatible = "ti,am64-pcie-host", "ti,j721e-pcie-host";
+ reg = <0x00 0x0f102000 0x00 0x1000>,
+ <0x00 0x0f100000 0x00 0x400>,
+ <0x00 0x0d000000 0x00 0x00800000>,
+ <0x00 0x68000000 0x00 0x00001000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 203 IRQ_TYPE_EDGE_RISING>;
+ device_type = "pci";
+ ti,syscon-pcie-ctrl = <&main_conf 0x4070>;
+ max-link-speed = <2>;
+ num-lanes = <1>;
+ power-domains = <&k3_pds 114 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 114 0>, <&serdes0 CDNS_TORRENT_REFCLK_DRIVER>;
+ clock-names = "fck", "pcie_refclk";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x0 0xff>;
+ cdns,no-bar-match-nbits = <64>;
+ vendor-id = <0x104c>;
+ device-id = <0xb010>;
+ msi-map = <0x0 &gic_its 0x0 0x10000>;
+ ranges = <0x01000000 0x00 0x68001000 0x00 0x68001000 0x00 0x0010000>,
+ <0x02000000 0x00 0x68011000 0x00 0x68011000 0x00 0x7fef000>;
+ dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x00000010 0x0>;
+ };
+
+ pcie0_ep: pcie-ep@f102000 {
+ compatible = "ti,am64-pcie-ep", "ti,j721e-pcie-ep";
+ reg = <0x00 0x0f102000 0x00 0x1000>,
+ <0x00 0x0f100000 0x00 0x400>,
+ <0x00 0x0d000000 0x00 0x00800000>,
+ <0x00 0x68000000 0x00 0x08000000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "mem";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 203 IRQ_TYPE_EDGE_RISING>;
+ ti,syscon-pcie-ctrl = <&main_conf 0x4070>;
+ max-link-speed = <2>;
+ num-lanes = <1>;
+ power-domains = <&k3_pds 114 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 114 0>;
+ clock-names = "fck";
+ max-functions = /bits/ 8 <1>;
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
index deb19ae5e168..59cc58f7d0c8 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
@@ -9,8 +9,6 @@
mcu_uart0: serial@4a00000 {
compatible = "ti,am64-uart", "ti,am654-uart";
reg = <0x00 0x04a00000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -22,8 +20,6 @@
mcu_uart1: serial@4a10000 {
compatible = "ti,am64-uart", "ti,am654-uart";
reg = <0x00 0x04a10000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -87,7 +83,7 @@
};
mcu_gpio0: gpio@4201000 {
- compatible = "ti,am64-gpio", "keystone-gpio";
+ compatible = "ti,am64-gpio", "ti,keystone-gpio";
reg = <0x0 0x4201000 0x0 0x100>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm.dts b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
index dad0efa961ed..030712221188 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
@@ -5,6 +5,8 @@
/dts-v1/;
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/mux/ti-serdes.h>
#include <dt-bindings/leds/common.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/net/ti-dp83867.h>
@@ -36,6 +38,60 @@
alignment = <0x1000>;
no-map;
};
+
+ main_r5fss0_core0_dma_memory_region: r5f-dma-memory@a0000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa0000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss0_core0_memory_region: r5f-memory@a0100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa0100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss0_core1_dma_memory_region: r5f-dma-memory@a1000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa1000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss0_core1_memory_region: r5f-memory@a1100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa1100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss1_core0_dma_memory_region: r5f-dma-memory@a2000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa2000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss1_core0_memory_region: r5f-memory@a2100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa2100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss1_core1_dma_memory_region: r5f-dma-memory@a3000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa3000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss1_core1_memory_region: r5f-memory@a3100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa3100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ rtos_ipc_memory_region: ipc-memories@a5000000 {
+ reg = <0x00 0xa5000000 0x00 0x00800000>;
+ alignment = <0x1000>;
+ no-map;
+ };
};
evm_12v0: fixedregulator-evm12v0 {
@@ -334,7 +390,7 @@
&main_spi0 {
pinctrl-names = "default";
pinctrl-0 = <&main_spi0_pins_default>;
- ti,pindir-d0-out-d1-in = <1>;
+ ti,pindir-d0-out-d1-in;
eeprom@0 {
compatible = "microchip,93lc46b";
reg = <0>;
@@ -466,3 +522,55 @@
&mailbox0_cluster7 {
status = "disabled";
};
+
+&main_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster2 &mbox_main_r5fss0_core0>;
+ memory-region = <&main_r5fss0_core0_dma_memory_region>,
+ <&main_r5fss0_core0_memory_region>;
+};
+
+&main_r5fss0_core1 {
+ mboxes = <&mailbox0_cluster2 &mbox_main_r5fss0_core1>;
+ memory-region = <&main_r5fss0_core1_dma_memory_region>,
+ <&main_r5fss0_core1_memory_region>;
+};
+
+&main_r5fss1_core0 {
+ mboxes = <&mailbox0_cluster4 &mbox_main_r5fss1_core0>;
+ memory-region = <&main_r5fss1_core0_dma_memory_region>,
+ <&main_r5fss1_core0_memory_region>;
+};
+
+&main_r5fss1_core1 {
+ mboxes = <&mailbox0_cluster4 &mbox_main_r5fss1_core1>;
+ memory-region = <&main_r5fss1_core1_dma_memory_region>,
+ <&main_r5fss1_core1_memory_region>;
+};
+
+&serdes_ln_ctrl {
+ idle-states = <AM64_SERDES0_LANE0_PCIE0>;
+};
+
+&serdes0 {
+ serdes0_pcie_link: phy@0 {
+ reg = <0>;
+ cdns,num-lanes = <1>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_PCIE>;
+ resets = <&serdes_wiz0 1>;
+ };
+};
+
+&pcie0_rc {
+ reset-gpios = <&exp1 5 GPIO_ACTIVE_HIGH>;
+ phys = <&serdes0_pcie_link>;
+ phy-names = "pcie-phy";
+ num-lanes = <1>;
+};
+
+&pcie0_ep {
+ phys = <&serdes0_pcie_link>;
+ phy-names = "pcie-phy";
+ num-lanes = <1>;
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am642-sk.dts b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
index 8424cd071955..d3aa2901e6fd 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
@@ -5,6 +5,8 @@
/dts-v1/;
+#include <dt-bindings/mux/ti-serdes.h>
+#include <dt-bindings/phy/phy.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/net/ti-dp83867.h>
#include "k3-am642.dtsi"
@@ -35,6 +37,60 @@
alignment = <0x1000>;
no-map;
};
+
+ main_r5fss0_core0_dma_memory_region: r5f-dma-memory@a0000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa0000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss0_core0_memory_region: r5f-memory@a0100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa0100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss0_core1_dma_memory_region: r5f-dma-memory@a1000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa1000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss0_core1_memory_region: r5f-memory@a1100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa1100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss1_core0_dma_memory_region: r5f-dma-memory@a2000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa2000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss1_core0_memory_region: r5f-memory@a2100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa2100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss1_core1_dma_memory_region: r5f-dma-memory@a3000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa3000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss1_core1_memory_region: r5f-memory@a3100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa3100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ rtos_ipc_memory_region: ipc-memories@a5000000 {
+ reg = <0x00 0xa5000000 0x00 0x00800000>;
+ alignment = <0x1000>;
+ no-map;
+ };
};
vusb_main: fixed-regulator-vusb-main5v0 {
@@ -85,6 +141,12 @@
>;
};
+ main_usb0_pins_default: main-usb0-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x02a8, PIN_OUTPUT, 0) /* (E19) USB0_DRVVBUS */
+ >;
+ };
+
main_i2c1_pins_default: main-i2c1-pins-default {
pinctrl-single,pins = <
AM64X_IOPAD(0x0268, PIN_INPUT_PULLUP, 0) /* (C18) I2C1_SCL */
@@ -235,6 +297,33 @@
disable-wp;
};
+&serdes_ln_ctrl {
+ idle-states = <AM64_SERDES0_LANE0_USB>;
+};
+
+&serdes0 {
+ serdes0_usb_link: phy@0 {
+ reg = <0>;
+ cdns,num-lanes = <1>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_USB3>;
+ resets = <&serdes_wiz0 1>;
+ };
+};
+
+&usbss0 {
+ ti,vbus-divider;
+};
+
+&usb0 {
+ dr_mode = "host";
+ maximum-speed = "super-speed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_usb0_pins_default>;
+ phys = <&serdes0_usb_link>;
+ phy-names = "cdns3,usb3-phy";
+};
+
&cpsw3g {
pinctrl-names = "default";
pinctrl-0 = <&mdio1_pins_default
@@ -332,3 +421,35 @@
&mailbox0_cluster7 {
status = "disabled";
};
+
+&main_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster2 &mbox_main_r5fss0_core0>;
+ memory-region = <&main_r5fss0_core0_dma_memory_region>,
+ <&main_r5fss0_core0_memory_region>;
+};
+
+&main_r5fss0_core1 {
+ mboxes = <&mailbox0_cluster2 &mbox_main_r5fss0_core1>;
+ memory-region = <&main_r5fss0_core1_dma_memory_region>,
+ <&main_r5fss0_core1_memory_region>;
+};
+
+&main_r5fss1_core0 {
+ mboxes = <&mailbox0_cluster4 &mbox_main_r5fss1_core0>;
+ memory-region = <&main_r5fss1_core0_dma_memory_region>,
+ <&main_r5fss1_core0_memory_region>;
+};
+
+&main_r5fss1_core1 {
+ mboxes = <&mailbox0_cluster4 &mbox_main_r5fss1_core1>;
+ memory-region = <&main_r5fss1_core1_dma_memory_region>,
+ <&main_r5fss1_core1_memory_region>;
+};
+
+&pcie0_rc {
+ status = "disabled";
+};
+
+&pcie0_ep {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
index de763ca9251c..1008e9162ba2 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
@@ -555,6 +555,7 @@
pinctrl-0 = <&main_mmc1_pins_default>;
ti,driver-strength-ohm = <50>;
disable-wp;
+ no-1-8-v;
};
&usb0 {
@@ -575,7 +576,7 @@
#address-cells = <1>;
#size-cells= <0>;
- ti,pindir-d0-out-d1-in = <1>;
+ ti,pindir-d0-out-d1-in;
};
&tscadc0 {
@@ -653,3 +654,63 @@
&pcie1_ep {
status = "disabled";
};
+
+&mailbox0_cluster0 {
+ status = "disabled";
+};
+
+&mailbox0_cluster1 {
+ status = "disabled";
+};
+
+&mailbox0_cluster2 {
+ status = "disabled";
+};
+
+&mailbox0_cluster3 {
+ status = "disabled";
+};
+
+&mailbox0_cluster4 {
+ status = "disabled";
+};
+
+&mailbox0_cluster5 {
+ status = "disabled";
+};
+
+&mailbox0_cluster6 {
+ status = "disabled";
+};
+
+&mailbox0_cluster7 {
+ status = "disabled";
+};
+
+&mailbox0_cluster8 {
+ status = "disabled";
+};
+
+&mailbox0_cluster9 {
+ status = "disabled";
+};
+
+&mailbox0_cluster10 {
+ status = "disabled";
+};
+
+&mailbox0_cluster11 {
+ status = "disabled";
+};
+
+&icssg0_mdio {
+ status = "disabled";
+};
+
+&icssg1_mdio {
+ status = "disabled";
+};
+
+&icssg2_mdio {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index 6cd3131eb9ff..ba4e5d3e1ed7 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -84,8 +84,6 @@
main_uart0: serial@2800000 {
compatible = "ti,am654-uart";
reg = <0x00 0x02800000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -95,8 +93,6 @@
main_uart1: serial@2810000 {
compatible = "ti,am654-uart";
reg = <0x00 0x02810000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
power-domains = <&k3_pds 147 TI_SCI_PD_EXCLUSIVE>;
@@ -105,8 +101,6 @@
main_uart2: serial@2820000 {
compatible = "ti,am654-uart";
reg = <0x00 0x02820000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
power-domains = <&k3_pds 148 TI_SCI_PD_EXCLUSIVE>;
@@ -301,7 +295,6 @@
ti,otap-del-sel = <0x2>;
ti,trm-icp = <0x8>;
dma-coherent;
- no-1-8-v;
};
scm_conf: scm-conf@100000 {
@@ -1053,6 +1046,16 @@
reg-names = "iram", "control", "debug";
firmware-name = "am65x-txpru0_1-fw";
};
+
+ icssg0_mdio: mdio@32400 {
+ compatible = "ti,davinci_mdio";
+ reg = <0x32400 0x100>;
+ clocks = <&k3_clks 62 3>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus_freq = <1000000>;
+ };
};
icssg1: icssg@b100000 {
@@ -1184,6 +1187,16 @@
reg-names = "iram", "control", "debug";
firmware-name = "am65x-txpru1_1-fw";
};
+
+ icssg1_mdio: mdio@32400 {
+ compatible = "ti,davinci_mdio";
+ reg = <0x32400 0x100>;
+ clocks = <&k3_clks 63 3>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus_freq = <1000000>;
+ };
};
icssg2: icssg@b200000 {
@@ -1315,5 +1328,15 @@
reg-names = "iram", "control", "debug";
firmware-name = "am65x-txpru2_1-fw";
};
+
+ icssg2_mdio: mdio@32400 {
+ compatible = "ti,davinci_mdio";
+ reg = <0x32400 0x100>;
+ clocks = <&k3_clks 64 3>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus_freq = <1000000>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index f5b8ef2f5f77..c93ff1520a0e 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -23,8 +23,6 @@
mcu_uart0: serial@40a00000 {
compatible = "ti,am654-uart";
reg = <0x00 0x40a00000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 565 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <96000000>;
current-speed = <115200>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
index 7cb864b4d74a..9d21cdf6fce8 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
@@ -50,8 +50,6 @@
wkup_uart0: serial@42300000 {
compatible = "ti,am654-uart";
reg = <0x42300000 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
diff --git a/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts b/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts
index 4f7e3f2a6265..94bb5dd39122 100644
--- a/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts
+++ b/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts
@@ -59,3 +59,8 @@
pinctrl-names = "default";
pinctrl-0 = <&main_uart0_pins_default>;
};
+
+&mcu_r5fss0 {
+ /* lock-step mode not supported on this board */
+ ti,cluster-mode = <0>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index eddb2ffb93ca..cfbcebfa37c1 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -85,6 +85,38 @@
gpios = <&wkup_gpio0 27 GPIO_ACTIVE_LOW>;
};
};
+
+ evm_12v0: fixedregulator-evm12v0 {
+ /* main supply */
+ compatible = "regulator-fixed";
+ regulator-name = "evm_12v0";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc3v3_io: fixedregulator-vcc3v3io {
+ /* Output of TPS54334 */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_io";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&evm_12v0>;
+ };
+
+ vdd_mmc1_sd: fixedregulator-sd {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_mmc1_sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ enable-active-high;
+ vin-supply = <&vcc3v3_io>;
+ gpio = <&pca9554 4 GPIO_ACTIVE_HIGH>;
+ };
};
&wkup_pmx0 {
@@ -136,7 +168,7 @@
AM65X_WKUP_IOPAD(0x007c, PIN_INPUT, 0) /* (L5) MCU_RGMII1_RD2 */
AM65X_WKUP_IOPAD(0x0080, PIN_INPUT, 0) /* (M6) MCU_RGMII1_RD1 */
AM65X_WKUP_IOPAD(0x0084, PIN_INPUT, 0) /* (L6) MCU_RGMII1_RD0 */
- AM65X_WKUP_IOPAD(0x0070, PIN_INPUT, 0) /* (N1) MCU_RGMII1_TXC */
+ AM65X_WKUP_IOPAD(0x0070, PIN_OUTPUT, 0) /* (N1) MCU_RGMII1_TXC */
AM65X_WKUP_IOPAD(0x0074, PIN_INPUT, 0) /* (M1) MCU_RGMII1_RXC */
>;
};
@@ -299,7 +331,7 @@
pinctrl-0 = <&main_spi0_pins_default>;
#address-cells = <1>;
#size-cells= <0>;
- ti,pindir-d0-out-d1-in = <1>;
+ ti,pindir-d0-out-d1-in;
flash@0{
compatible = "jedec,spi-nor";
@@ -327,6 +359,7 @@
* disable sdhci1
*/
&sdhci1 {
+ vmmc-supply = <&vdd_mmc1_sd>;
pinctrl-names = "default";
pinctrl-0 = <&main_mmc1_pins_default>;
ti,driver-strength-ohm = <50>;
@@ -506,3 +539,15 @@
&dss {
status = "disabled";
};
+
+&icssg0_mdio {
+ status = "disabled";
+};
+
+&icssg1_mdio {
+ status = "disabled";
+};
+
+&icssg2_mdio {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
index bedd01b7a32c..d14f3c18b65f 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
@@ -90,7 +90,7 @@
J721E_WKUP_IOPAD(0x008c, PIN_INPUT, 0) /* MCU_RGMII1_RD2 */
J721E_WKUP_IOPAD(0x0090, PIN_INPUT, 0) /* MCU_RGMII1_RD1 */
J721E_WKUP_IOPAD(0x0094, PIN_INPUT, 0) /* MCU_RGMII1_RD0 */
- J721E_WKUP_IOPAD(0x0080, PIN_INPUT, 0) /* MCU_RGMII1_TXC */
+ J721E_WKUP_IOPAD(0x0080, PIN_OUTPUT, 0) /* MCU_RGMII1_TXC */
J721E_WKUP_IOPAD(0x0084, PIN_INPUT, 0) /* MCU_RGMII1_RXC */
>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
index 19fea8adbcff..e8a41d09b45f 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
@@ -301,8 +301,6 @@
main_uart0: serial@2800000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02800000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -314,8 +312,6 @@
main_uart1: serial@2810000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02810000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -327,8 +323,6 @@
main_uart2: serial@2820000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02820000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -340,8 +334,6 @@
main_uart3: serial@2830000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02830000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -353,8 +345,6 @@
main_uart4: serial@2840000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02840000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -366,8 +356,6 @@
main_uart5: serial@2850000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02850000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -379,8 +367,6 @@
main_uart6: serial@2860000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02860000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -392,8 +378,6 @@
main_uart7: serial@2870000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02870000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -405,8 +389,6 @@
main_uart8: serial@2880000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02880000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -418,8 +400,6 @@
main_uart9: serial@2890000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02890000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -683,6 +663,7 @@
"otg";
maximum-speed = "super-speed";
dr_mode = "otg";
+ cdns,phyrst-a-enable;
};
};
@@ -696,7 +677,6 @@
<149>;
interrupt-controller;
#interrupt-cells = <2>;
- #address-cells = <0>;
ti,ngpio = <69>;
ti,davinci-gpio-unbanked = <0>;
power-domains = <&k3_pds 105 TI_SCI_PD_EXCLUSIVE>;
@@ -714,7 +694,6 @@
<158>;
interrupt-controller;
#interrupt-cells = <2>;
- #address-cells = <0>;
ti,ngpio = <69>;
ti,davinci-gpio-unbanked = <0>;
power-domains = <&k3_pds 107 TI_SCI_PD_EXCLUSIVE>;
@@ -732,7 +711,6 @@
<167>;
interrupt-controller;
#interrupt-cells = <2>;
- #address-cells = <0>;
ti,ngpio = <69>;
ti,davinci-gpio-unbanked = <0>;
power-domains = <&k3_pds 109 TI_SCI_PD_EXCLUSIVE>;
@@ -750,7 +728,6 @@
<176>;
interrupt-controller;
#interrupt-cells = <2>;
- #address-cells = <0>;
ti,ngpio = <69>;
ti,davinci-gpio-unbanked = <0>;
power-domains = <&k3_pds 111 TI_SCI_PD_EXCLUSIVE>;
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
index 5663fe3ea466..1044ec6c4b0d 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
@@ -73,8 +73,6 @@
wkup_uart0: serial@42300000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x42300000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 897 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -86,8 +84,6 @@
mcu_uart0: serial@40a00000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x40a00000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 846 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <96000000>;
current-speed = <115200>;
@@ -117,7 +113,6 @@
interrupts = <103>, <104>, <105>, <106>, <107>, <108>;
interrupt-controller;
#interrupt-cells = <2>;
- #address-cells = <0>;
ti,ngpio = <85>;
ti,davinci-gpio-unbanked = <0>;
power-domains = <&k3_pds 113 TI_SCI_PD_EXCLUSIVE>;
@@ -134,7 +129,6 @@
interrupts = <112>, <113>, <114>, <115>, <116>, <117>;
interrupt-controller;
#interrupt-cells = <2>;
- #address-cells = <0>;
ti,ngpio = <85>;
ti,davinci-gpio-unbanked = <0>;
power-domains = <&k3_pds 114 TI_SCI_PD_EXCLUSIVE>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
index 60764366e22b..8bd02d9e28ad 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
@@ -9,6 +9,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/net/ti-dp83867.h>
+#include <dt-bindings/phy/phy-cadence.h>
/ {
chosen {
@@ -237,7 +238,7 @@
J721E_WKUP_IOPAD(0x007c, PIN_INPUT, 0) /* MCU_RGMII1_RD2 */
J721E_WKUP_IOPAD(0x0080, PIN_INPUT, 0) /* MCU_RGMII1_RD1 */
J721E_WKUP_IOPAD(0x0084, PIN_INPUT, 0) /* MCU_RGMII1_RD0 */
- J721E_WKUP_IOPAD(0x0070, PIN_INPUT, 0) /* MCU_RGMII1_TXC */
+ J721E_WKUP_IOPAD(0x0070, PIN_OUTPUT, 0) /* MCU_RGMII1_TXC */
J721E_WKUP_IOPAD(0x0074, PIN_INPUT, 0) /* MCU_RGMII1_RXC */
>;
};
@@ -358,7 +359,7 @@
};
&serdes3 {
- serdes3_usb_link: link@0 {
+ serdes3_usb_link: phy@0 {
reg = <0>;
cdns,num-lanes = <2>;
#phy-cells = <0>;
@@ -635,8 +636,45 @@
status = "disabled";
};
+&cmn_refclk1 {
+ clock-frequency = <100000000>;
+};
+
+&wiz0_pll1_refclk {
+ assigned-clocks = <&wiz0_pll1_refclk>;
+ assigned-clock-parents = <&cmn_refclk1>;
+};
+
+&wiz0_refclk_dig {
+ assigned-clocks = <&wiz0_refclk_dig>;
+ assigned-clock-parents = <&cmn_refclk1>;
+};
+
+&wiz1_pll1_refclk {
+ assigned-clocks = <&wiz1_pll1_refclk>;
+ assigned-clock-parents = <&cmn_refclk1>;
+};
+
+&wiz1_refclk_dig {
+ assigned-clocks = <&wiz1_refclk_dig>;
+ assigned-clock-parents = <&cmn_refclk1>;
+};
+
+&wiz2_pll1_refclk {
+ assigned-clocks = <&wiz2_pll1_refclk>;
+ assigned-clock-parents = <&cmn_refclk1>;
+};
+
+&wiz2_refclk_dig {
+ assigned-clocks = <&wiz2_refclk_dig>;
+ assigned-clock-parents = <&cmn_refclk1>;
+};
+
&serdes0 {
- serdes0_pcie_link: link@0 {
+ assigned-clocks = <&serdes0 CDNS_SIERRA_PLL_CMNLC>;
+ assigned-clock-parents = <&wiz0_pll1_refclk>;
+
+ serdes0_pcie_link: phy@0 {
reg = <0>;
cdns,num-lanes = <1>;
#phy-cells = <0>;
@@ -646,7 +684,10 @@
};
&serdes1 {
- serdes1_pcie_link: link@0 {
+ assigned-clocks = <&serdes1 CDNS_SIERRA_PLL_CMNLC>;
+ assigned-clock-parents = <&wiz1_pll1_refclk>;
+
+ serdes1_pcie_link: phy@0 {
reg = <0>;
cdns,num-lanes = <2>;
#phy-cells = <0>;
@@ -656,7 +697,10 @@
};
&serdes2 {
- serdes2_pcie_link: link@0 {
+ assigned-clocks = <&serdes2 CDNS_SIERRA_PLL_CMNLC>;
+ assigned-clock-parents = <&wiz2_pll1_refclk>;
+
+ serdes2_pcie_link: phy@0 {
reg = <0>;
cdns,num-lanes = <2>;
#phy-cells = <0>;
@@ -718,3 +762,11 @@
&dss {
status = "disabled";
};
+
+&icssg0_mdio {
+ status = "disabled";
+};
+
+&icssg1_mdio {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index 3bcafe4c1742..cf3482376c1e 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -8,6 +8,20 @@
#include <dt-bindings/mux/mux.h>
#include <dt-bindings/mux/ti-serdes.h>
+/ {
+ cmn_refclk: clock-cmnrefclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <0>;
+ };
+
+ cmn_refclk1: clock-cmnrefclk1 {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <0>;
+ };
+};
+
&cbass_main {
msmc_ram: sram@70000000 {
compatible = "mmio-sram";
@@ -338,24 +352,12 @@
pinctrl-single,function-mask = <0xffffffff>;
};
- dummy_cmn_refclk: dummy-cmn-refclk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <100000000>;
- };
-
- dummy_cmn_refclk1: dummy-cmn-refclk1 {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <100000000>;
- };
-
serdes_wiz0: wiz@5000000 {
compatible = "ti,j721e-wiz-16g";
#address-cells = <1>;
#size-cells = <1>;
power-domains = <&k3_pds 292 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 292 5>, <&k3_clks 292 11>, <&dummy_cmn_refclk>;
+ clocks = <&k3_clks 292 5>, <&k3_clks 292 11>, <&cmn_refclk>;
clock-names = "fck", "core_ref_clk", "ext_ref_clk";
assigned-clocks = <&k3_clks 292 11>, <&k3_clks 292 0>;
assigned-clock-parents = <&k3_clks 292 15>, <&k3_clks 292 4>;
@@ -364,21 +366,21 @@
ranges = <0x5000000 0x0 0x5000000 0x10000>;
wiz0_pll0_refclk: pll0-refclk {
- clocks = <&k3_clks 292 11>, <&dummy_cmn_refclk>;
+ clocks = <&k3_clks 292 11>, <&cmn_refclk>;
#clock-cells = <0>;
assigned-clocks = <&wiz0_pll0_refclk>;
assigned-clock-parents = <&k3_clks 292 11>;
};
wiz0_pll1_refclk: pll1-refclk {
- clocks = <&k3_clks 292 0>, <&dummy_cmn_refclk1>;
+ clocks = <&k3_clks 292 0>, <&cmn_refclk1>;
#clock-cells = <0>;
assigned-clocks = <&wiz0_pll1_refclk>;
assigned-clock-parents = <&k3_clks 292 0>;
};
wiz0_refclk_dig: refclk-dig {
- clocks = <&k3_clks 292 11>, <&k3_clks 292 0>, <&dummy_cmn_refclk>, <&dummy_cmn_refclk1>;
+ clocks = <&k3_clks 292 11>, <&k3_clks 292 0>, <&cmn_refclk>, <&cmn_refclk1>;
#clock-cells = <0>;
assigned-clocks = <&wiz0_refclk_dig>;
assigned-clock-parents = <&k3_clks 292 11>;
@@ -400,10 +402,13 @@
reg = <0x5000000 0x10000>;
#address-cells = <1>;
#size-cells = <0>;
+ #clock-cells = <1>;
resets = <&serdes_wiz0 0>;
reset-names = "sierra_reset";
- clocks = <&wiz0_cmn_refclk_dig_div>, <&wiz0_cmn_refclk1_dig_div>;
- clock-names = "cmn_refclk_dig_div", "cmn_refclk1_dig_div";
+ clocks = <&wiz0_cmn_refclk_dig_div>, <&wiz0_cmn_refclk1_dig_div>,
+ <&wiz0_pll0_refclk>, <&wiz0_pll1_refclk>;
+ clock-names = "cmn_refclk_dig_div", "cmn_refclk1_dig_div",
+ "pll0_refclk", "pll1_refclk";
};
};
@@ -412,7 +417,7 @@
#address-cells = <1>;
#size-cells = <1>;
power-domains = <&k3_pds 293 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 293 5>, <&k3_clks 293 13>, <&dummy_cmn_refclk>;
+ clocks = <&k3_clks 293 5>, <&k3_clks 293 13>, <&cmn_refclk>;
clock-names = "fck", "core_ref_clk", "ext_ref_clk";
assigned-clocks = <&k3_clks 293 13>, <&k3_clks 293 0>;
assigned-clock-parents = <&k3_clks 293 17>, <&k3_clks 293 4>;
@@ -421,21 +426,21 @@
ranges = <0x5010000 0x0 0x5010000 0x10000>;
wiz1_pll0_refclk: pll0-refclk {
- clocks = <&k3_clks 293 13>, <&dummy_cmn_refclk>;
+ clocks = <&k3_clks 293 13>, <&cmn_refclk>;
#clock-cells = <0>;
assigned-clocks = <&wiz1_pll0_refclk>;
assigned-clock-parents = <&k3_clks 293 13>;
};
wiz1_pll1_refclk: pll1-refclk {
- clocks = <&k3_clks 293 0>, <&dummy_cmn_refclk1>;
+ clocks = <&k3_clks 293 0>, <&cmn_refclk1>;
#clock-cells = <0>;
assigned-clocks = <&wiz1_pll1_refclk>;
assigned-clock-parents = <&k3_clks 293 0>;
};
wiz1_refclk_dig: refclk-dig {
- clocks = <&k3_clks 293 13>, <&k3_clks 293 0>, <&dummy_cmn_refclk>, <&dummy_cmn_refclk1>;
+ clocks = <&k3_clks 293 13>, <&k3_clks 293 0>, <&cmn_refclk>, <&cmn_refclk1>;
#clock-cells = <0>;
assigned-clocks = <&wiz1_refclk_dig>;
assigned-clock-parents = <&k3_clks 293 13>;
@@ -457,10 +462,13 @@
reg = <0x5010000 0x10000>;
#address-cells = <1>;
#size-cells = <0>;
+ #clock-cells = <1>;
resets = <&serdes_wiz1 0>;
reset-names = "sierra_reset";
- clocks = <&wiz1_cmn_refclk_dig_div>, <&wiz1_cmn_refclk1_dig_div>;
- clock-names = "cmn_refclk_dig_div", "cmn_refclk1_dig_div";
+ clocks = <&wiz1_cmn_refclk_dig_div>, <&wiz1_cmn_refclk1_dig_div>,
+ <&wiz1_pll0_refclk>, <&wiz1_pll1_refclk>;
+ clock-names = "cmn_refclk_dig_div", "cmn_refclk1_dig_div",
+ "pll0_refclk", "pll1_refclk";
};
};
@@ -469,7 +477,7 @@
#address-cells = <1>;
#size-cells = <1>;
power-domains = <&k3_pds 294 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 294 5>, <&k3_clks 294 11>, <&dummy_cmn_refclk>;
+ clocks = <&k3_clks 294 5>, <&k3_clks 294 11>, <&cmn_refclk>;
clock-names = "fck", "core_ref_clk", "ext_ref_clk";
assigned-clocks = <&k3_clks 294 11>, <&k3_clks 294 0>;
assigned-clock-parents = <&k3_clks 294 15>, <&k3_clks 294 4>;
@@ -478,21 +486,21 @@
ranges = <0x5020000 0x0 0x5020000 0x10000>;
wiz2_pll0_refclk: pll0-refclk {
- clocks = <&k3_clks 294 11>, <&dummy_cmn_refclk>;
+ clocks = <&k3_clks 294 11>, <&cmn_refclk>;
#clock-cells = <0>;
assigned-clocks = <&wiz2_pll0_refclk>;
assigned-clock-parents = <&k3_clks 294 11>;
};
wiz2_pll1_refclk: pll1-refclk {
- clocks = <&k3_clks 294 0>, <&dummy_cmn_refclk1>;
+ clocks = <&k3_clks 294 0>, <&cmn_refclk1>;
#clock-cells = <0>;
assigned-clocks = <&wiz2_pll1_refclk>;
assigned-clock-parents = <&k3_clks 294 0>;
};
wiz2_refclk_dig: refclk-dig {
- clocks = <&k3_clks 294 11>, <&k3_clks 294 0>, <&dummy_cmn_refclk>, <&dummy_cmn_refclk1>;
+ clocks = <&k3_clks 294 11>, <&k3_clks 294 0>, <&cmn_refclk>, <&cmn_refclk1>;
#clock-cells = <0>;
assigned-clocks = <&wiz2_refclk_dig>;
assigned-clock-parents = <&k3_clks 294 11>;
@@ -514,10 +522,13 @@
reg = <0x5020000 0x10000>;
#address-cells = <1>;
#size-cells = <0>;
+ #clock-cells = <1>;
resets = <&serdes_wiz2 0>;
reset-names = "sierra_reset";
- clocks = <&wiz2_cmn_refclk_dig_div>, <&wiz2_cmn_refclk1_dig_div>;
- clock-names = "cmn_refclk_dig_div", "cmn_refclk1_dig_div";
+ clocks = <&wiz2_cmn_refclk_dig_div>, <&wiz2_cmn_refclk1_dig_div>,
+ <&wiz2_pll0_refclk>, <&wiz2_pll1_refclk>;
+ clock-names = "cmn_refclk_dig_div", "cmn_refclk1_dig_div",
+ "pll0_refclk", "pll1_refclk";
};
};
@@ -526,7 +537,7 @@
#address-cells = <1>;
#size-cells = <1>;
power-domains = <&k3_pds 295 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 295 5>, <&k3_clks 295 9>, <&dummy_cmn_refclk>;
+ clocks = <&k3_clks 295 5>, <&k3_clks 295 9>, <&cmn_refclk>;
clock-names = "fck", "core_ref_clk", "ext_ref_clk";
assigned-clocks = <&k3_clks 295 9>, <&k3_clks 295 0>;
assigned-clock-parents = <&k3_clks 295 13>, <&k3_clks 295 4>;
@@ -535,21 +546,21 @@
ranges = <0x5030000 0x0 0x5030000 0x10000>;
wiz3_pll0_refclk: pll0-refclk {
- clocks = <&k3_clks 295 9>, <&dummy_cmn_refclk>;
+ clocks = <&k3_clks 295 9>, <&cmn_refclk>;
#clock-cells = <0>;
assigned-clocks = <&wiz3_pll0_refclk>;
assigned-clock-parents = <&k3_clks 295 9>;
};
wiz3_pll1_refclk: pll1-refclk {
- clocks = <&k3_clks 295 0>, <&dummy_cmn_refclk1>;
+ clocks = <&k3_clks 295 0>, <&cmn_refclk1>;
#clock-cells = <0>;
assigned-clocks = <&wiz3_pll1_refclk>;
assigned-clock-parents = <&k3_clks 295 0>;
};
wiz3_refclk_dig: refclk-dig {
- clocks = <&k3_clks 295 9>, <&k3_clks 295 0>, <&dummy_cmn_refclk>, <&dummy_cmn_refclk1>;
+ clocks = <&k3_clks 295 9>, <&k3_clks 295 0>, <&cmn_refclk>, <&cmn_refclk1>;
#clock-cells = <0>;
assigned-clocks = <&wiz3_refclk_dig>;
assigned-clock-parents = <&k3_clks 295 9>;
@@ -571,10 +582,13 @@
reg = <0x5030000 0x10000>;
#address-cells = <1>;
#size-cells = <0>;
+ #clock-cells = <1>;
resets = <&serdes_wiz3 0>;
reset-names = "sierra_reset";
- clocks = <&wiz3_cmn_refclk_dig_div>, <&wiz3_cmn_refclk1_dig_div>;
- clock-names = "cmn_refclk_dig_div", "cmn_refclk1_dig_div";
+ clocks = <&wiz3_cmn_refclk_dig_div>, <&wiz3_cmn_refclk1_dig_div>,
+ <&wiz3_pll0_refclk>, <&wiz3_pll1_refclk>;
+ clock-names = "cmn_refclk_dig_div", "cmn_refclk1_dig_div",
+ "pll0_refclk", "pll1_refclk";
};
};
@@ -775,8 +789,6 @@
main_uart0: serial@2800000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02800000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -788,8 +800,6 @@
main_uart1: serial@2810000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02810000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -801,8 +811,6 @@
main_uart2: serial@2820000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02820000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -814,8 +822,6 @@
main_uart3: serial@2830000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02830000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -827,8 +833,6 @@
main_uart4: serial@2840000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02840000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -840,8 +844,6 @@
main_uart5: serial@2850000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02850000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -853,8 +855,6 @@
main_uart6: serial@2860000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02860000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -866,8 +866,6 @@
main_uart7: serial@2870000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02870000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -879,8 +877,6 @@
main_uart8: serial@2880000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02880000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -892,8 +888,6 @@
main_uart9: serial@2890000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x02890000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -1794,6 +1788,16 @@
reg-names = "iram", "control", "debug";
firmware-name = "j7-txpru0_1-fw";
};
+
+ icssg0_mdio: mdio@32400 {
+ compatible = "ti,davinci_mdio";
+ reg = <0x32400 0x100>;
+ clocks = <&k3_clks 119 1>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus_freq = <1000000>;
+ };
};
icssg1: icssg@b100000 {
@@ -1925,5 +1929,15 @@
reg-names = "iram", "control", "debug";
firmware-name = "j7-txpru1_1-fw";
};
+
+ icssg1_mdio: mdio@32400 {
+ compatible = "ti,davinci_mdio";
+ reg = <0x32400 0x100>;
+ clocks = <&k3_clks 120 4>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus_freq = <1000000>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index 5e825e4d0306..d2dceda72fe9 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -73,8 +73,6 @@
wkup_uart0: serial@42300000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x42300000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 897 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
current-speed = <115200>;
@@ -86,8 +84,6 @@
mcu_uart0: serial@40a00000 {
compatible = "ti,j721e-uart", "ti,am654-uart";
reg = <0x00 0x40a00000 0x00 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
interrupts = <GIC_SPI 846 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <96000000>;
current-speed = <115200>;
diff --git a/arch/arm64/boot/dts/toshiba/tmpv7708-rm-mbrc.dts b/arch/arm64/boot/dts/toshiba/tmpv7708-rm-mbrc.dts
index bf0620afe117..29a4d9fc1e47 100644
--- a/arch/arm64/boot/dts/toshiba/tmpv7708-rm-mbrc.dts
+++ b/arch/arm64/boot/dts/toshiba/tmpv7708-rm-mbrc.dts
@@ -68,3 +68,11 @@
&gpio {
status = "okay";
};
+
+&pwm_mux {
+ groups = "pwm0_gpio16_grp", "pwm1_gpio17_grp", "pwm2_gpio18_grp", "pwm3_gpio19_grp";
+};
+
+&pwm {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi b/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi
index 17934fd9a14c..4b4231ff43cf 100644
--- a/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi
+++ b/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi
@@ -432,6 +432,15 @@
reg = <0 0x28330000 0 0x1000>;
status = "disabled";
};
+
+ pwm: pwm@241c0000 {
+ compatible = "toshiba,visconti-pwm";
+ reg = <0 0x241c0000 0 0x1000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm_mux>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/toshiba/tmpv7708_pins.dtsi b/arch/arm64/boot/dts/toshiba/tmpv7708_pins.dtsi
index 34de00015a7f..a480c6ba5f5d 100644
--- a/arch/arm64/boot/dts/toshiba/tmpv7708_pins.dtsi
+++ b/arch/arm64/boot/dts/toshiba/tmpv7708_pins.dtsi
@@ -90,4 +90,9 @@
groups = "i2c8_grp";
bias-pull-up;
};
+
+ pwm_mux: pwm_mux {
+ function = "pwm";
+ };
+
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 08c6f769df9a..f423d08b9a71 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -289,6 +289,7 @@ CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=m
CONFIG_SCSI_UFS_HISI=y
+CONFIG_SCSI_UFS_EXYNOS=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
@@ -491,7 +492,6 @@ CONFIG_SPI_S3C64XX=y
CONFIG_SPI_SH_MSIOF=m
CONFIG_SPI_SUN6I=y
CONFIG_SPI_SPIDEV=m
-CONFIG_MTK_PMIC_WRAP=m
CONFIG_SPMI=y
CONFIG_PINCTRL_SINGLE=y
CONFIG_PINCTRL_MAX77620=y
@@ -530,6 +530,7 @@ CONFIG_GPIO_MXC=y
CONFIG_GPIO_PL061=y
CONFIG_GPIO_RCAR=y
CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_VISCONTI=y
CONFIG_GPIO_WCD934X=m
CONFIG_GPIO_XGENE=y
CONFIG_GPIO_XGENE_SB=y
@@ -662,6 +663,7 @@ CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
CONFIG_VIDEO_RENESAS_FDP1=m
CONFIG_VIDEO_RENESAS_FCP=m
CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_VIDEO_QCOM_VENUS=m
CONFIG_SDR_PLATFORM_DRIVERS=y
CONFIG_VIDEO_RCAR_DRIF=m
CONFIG_VIDEO_IMX219=m
@@ -701,6 +703,7 @@ CONFIG_DRM_PANEL_RAYDIUM_RM67191=m
CONFIG_DRM_PANEL_SITRONIX_ST7703=m
CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m
CONFIG_DRM_DISPLAY_CONNECTOR=m
+CONFIG_DRM_LONTIUM_LT8912B=m
CONFIG_DRM_NWL_MIPI_DSI=m
CONFIG_DRM_LONTIUM_LT9611=m
CONFIG_DRM_PARADE_PS8640=m
@@ -774,6 +777,8 @@ CONFIG_SND_SOC_AK4613=m
CONFIG_SND_SOC_ES7134=m
CONFIG_SND_SOC_ES7241=m
CONFIG_SND_SOC_GTM601=m
+CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
+CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
CONFIG_SND_SOC_PCM3168A_I2C=m
CONFIG_SND_SOC_RT5659=m
CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
@@ -795,6 +800,8 @@ CONFIG_USB_CONN_GPIO=m
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PCI=m
+CONFIG_USB_XHCI_PCI_RENESAS=m
CONFIG_USB_XHCI_TEGRA=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_EXYNOS=y
@@ -866,6 +873,7 @@ CONFIG_MMC_DW_K3=y
CONFIG_MMC_DW_ROCKCHIP=y
CONFIG_MMC_SUNXI=y
CONFIG_MMC_BCM2835=y
+CONFIG_MMC_MTK=y
CONFIG_MMC_SDHCI_XENON=y
CONFIG_MMC_SDHCI_AM654=y
CONFIG_MMC_OWL=y
@@ -884,6 +892,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_LEDS_TRIGGER_PANIC=y
CONFIG_EDAC=y
CONFIG_EDAC_GHES=y
+CONFIG_EDAC_LAYERSCAPE=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=m
CONFIG_RTC_DRV_HYM8563=m
@@ -987,6 +996,7 @@ CONFIG_SM_GPUCC_8250=y
CONFIG_SM_DISPCC_8250=y
CONFIG_QCOM_HFPLL=y
CONFIG_CLK_GFM_LPASS_SM8250=m
+CONFIG_CLK_RCAR_USB2_CLOCK_SEL=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_ARM_MHU=y
@@ -1043,6 +1053,7 @@ CONFIG_ARCH_R8A77980=y
CONFIG_ARCH_R8A77990=y
CONFIG_ARCH_R8A77995=y
CONFIG_ARCH_R8A779A0=y
+CONFIG_ARCH_R9A07G044=y
CONFIG_ROCKCHIP_PM_DOMAINS=y
CONFIG_ARCH_TEGRA_132_SOC=y
CONFIG_ARCH_TEGRA_210_SOC=y
@@ -1059,6 +1070,7 @@ CONFIG_RENESAS_RPCIF=m
CONFIG_IIO=y
CONFIG_EXYNOS_ADC=y
CONFIG_MAX9611=m
+CONFIG_QCOM_SPMI_VADC=m
CONFIG_QCOM_SPMI_ADC5=m
CONFIG_ROCKCHIP_SARADC=m
CONFIG_IIO_CROS_EC_SENSORS_CORE=m
@@ -1083,6 +1095,7 @@ CONFIG_PWM_SAMSUNG=y
CONFIG_PWM_SL28CPLD=m
CONFIG_PWM_SUN4I=m
CONFIG_PWM_TEGRA=m
+CONFIG_PWM_VISCONTI=m
CONFIG_SL28CPLD_INTC=y
CONFIG_QCOM_PDC=y
CONFIG_RESET_IMX7=y
@@ -1110,6 +1123,7 @@ CONFIG_PHY_ROCKCHIP_INNO_USB2=y
CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY=m
CONFIG_PHY_ROCKCHIP_PCIE=m
CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_SAMSUNG_UFS=y
CONFIG_PHY_UNIPHIER_USB2=y
CONFIG_PHY_UNIPHIER_USB3=y
CONFIG_PHY_TEGRA_XUSB=y
@@ -1148,6 +1162,7 @@ CONFIG_INTERCONNECT_QCOM_OSM_L3=m
CONFIG_INTERCONNECT_QCOM_SDM845=y
CONFIG_INTERCONNECT_QCOM_SM8150=m
CONFIG_INTERCONNECT_QCOM_SM8250=m
+CONFIG_INTERCONNECT_QCOM_SM8350=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 26889dbfe904..64202010b700 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -3,7 +3,6 @@ generic-y += early_ioremap.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += qspinlock.h
-generic-y += set_memory.h
generic-y += user.h
generated-y += cpucaps.h
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index a9c0716e7440..a074459f8f2f 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -47,7 +47,7 @@
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN (128)
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 543c997eb3b7..5a228e203ef9 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -144,12 +144,6 @@ static __always_inline void icache_inval_all_pou(void)
dsb(ish);
}
-int set_memory_valid(unsigned long addr, int numpages, int enable);
-
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
-bool kernel_page_present(struct page *page);
-
#include <asm-generic/cacheflush.h>
#endif /* __ASM_CACHEFLUSH_H */
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 21fa330f498d..b83fb24954b7 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -33,8 +33,7 @@
* EL2.
*/
.macro __init_el2_timers
- mrs x0, cnthctl_el2
- orr x0, x0, #3 // Enable EL1 physical timers
+ mov x0, #3 // Enable EL1 physical timers
msr cnthctl_el2, x0
msr cntvoff_el2, xzr // Clear virtual offset
.endm
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 5abf91e3494c..1242f71937f8 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -23,8 +23,7 @@ static inline void arch_clear_hugepage_flags(struct page *page)
}
#define arch_clear_hugepage_flags arch_clear_hugepage_flags
-extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable);
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
#define arch_make_huge_pte arch_make_huge_pte
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
index d061176d57ea..aa855c6a0ae6 100644
--- a/arch/arm64/include/asm/kfence.h
+++ b/arch/arm64/include/asm/kfence.h
@@ -8,7 +8,7 @@
#ifndef __ASM_KFENCE_H
#define __ASM_KFENCE_H
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
static inline bool arch_kfence_init_pool(void) { return true; }
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 1a35a4473598..824a3655dd93 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -351,7 +351,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define virt_addr_valid(addr) ({ \
__typeof__(addr) __addr = __tag_reset(addr); \
- __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
+ __is_lm_address(__addr) && pfn_is_map_memory(virt_to_pfn(__addr)); \
})
void dump_mem_limit(void);
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index ed1b9dcf12b2..993a27ea6f54 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -41,7 +41,7 @@ void tag_clear_highpage(struct page *to);
typedef struct page *pgtable_t;
-extern int pfn_valid(unsigned long);
+int pfn_is_map_memory(unsigned long pfn);
#include <asm/memory.h>
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 31fbab3d6f99..8433a2058eb1 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -86,6 +86,5 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
VM_BUG_ON(mm == &init_mm);
__pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE | PMD_TABLE_PXN);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c0ba8cdfa10a..f09bf5c02891 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -26,8 +26,6 @@
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
-#define FIRST_USER_ADDRESS 0UL
-
#ifndef __ASSEMBLY__
#include <asm/cmpxchg.h>
@@ -651,9 +649,9 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
return __pud_to_phys(pud);
}
-static inline unsigned long pud_page_vaddr(pud_t pud)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
- return (unsigned long)__va(pud_page_paddr(pud));
+ return (pmd_t *)__va(pud_page_paddr(pud));
}
/* Find an entry in the second-level page table. */
@@ -712,9 +710,9 @@ static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
return __p4d_to_phys(p4d);
}
-static inline unsigned long p4d_page_vaddr(p4d_t p4d)
+static inline pud_t *p4d_pgtable(p4d_t p4d)
{
- return (unsigned long)__va(p4d_page_paddr(p4d));
+ return (pud_t *)__va(p4d_page_paddr(p4d));
}
/* Find an entry in the frst-level page table. */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index e58bca832dff..41b332c054ab 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -320,7 +320,17 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
- return regs->regs[0];
+ unsigned long val = regs->regs[0];
+
+ /*
+ * Audit currently uses regs_return_value() instead of
+ * syscall_get_return_value(). Apply the same sign-extension here until
+ * audit is updated to use syscall_get_return_value().
+ */
+ if (compat_user_mode(regs))
+ val = sign_extend64(val, 31);
+
+ return val;
}
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h
new file mode 100644
index 000000000000..0f740b781187
--- /dev/null
+++ b/arch/arm64/include/asm/set_memory.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_ARM64_SET_MEMORY_H
+#define _ASM_ARM64_SET_MEMORY_H
+
+#include <asm-generic/set_memory.h>
+
+bool can_set_direct_map(void);
+#define can_set_direct_map can_set_direct_map
+
+int set_memory_valid(unsigned long addr, int numpages, int enable);
+
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+bool kernel_page_present(struct page *page);
+
+#endif /* _ASM_ARM64_SET_MEMORY_H */
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index 99ad77df8f52..97ddc6c203b7 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -10,6 +10,7 @@
#include <linux/cpumask.h>
+#include <asm/smp.h>
#include <asm/types.h>
struct mpidr_hash {
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 1801399204d7..8aebc00c1718 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -35,7 +35,7 @@ struct stack_info {
* accounting information necessary for robust unwinding.
*
* @fp: The fp value in the frame record (or the real fp)
- * @pc: The fp value in the frame record (or the real lr)
+ * @pc: The lr value in the frame record (or the real lr)
*
* @stacks_done: Stacks which have been entirely unwound, for which it is no
* longer valid to unwind to.
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index cfc0672013f6..03e20895453a 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -29,22 +29,23 @@ static inline void syscall_rollback(struct task_struct *task,
regs->regs[0] = regs->orig_x0;
}
-
-static inline long syscall_get_error(struct task_struct *task,
- struct pt_regs *regs)
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
{
- unsigned long error = regs->regs[0];
+ unsigned long val = regs->regs[0];
if (is_compat_thread(task_thread_info(task)))
- error = sign_extend64(error, 31);
+ val = sign_extend64(val, 31);
- return IS_ERR_VALUE(error) ? error : 0;
+ return val;
}
-static inline long syscall_get_return_value(struct task_struct *task,
- struct pt_regs *regs)
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
{
- return regs->regs[0];
+ unsigned long error = syscall_get_return_value(task, regs);
+
+ return IS_ERR_VALUE(error) ? error : 0;
}
static inline void syscall_set_return_value(struct task_struct *task,
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 673be2d1263c..305a7157c6a6 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -32,8 +32,6 @@ void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct mm_struct;
extern void __show_regs(struct pt_regs *);
-extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SYSTEM_MISC_H */
diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h
index f83a70e07df8..ce2ee8f1e361 100644
--- a/arch/arm64/include/uapi/asm/unistd.h
+++ b/arch/arm64/include/uapi/asm/unistd.h
@@ -20,5 +20,6 @@
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
#define __ARCH_WANT_SYS_CLONE3
+#define __ARCH_WANT_MEMFD_SECRET
#include <asm-generic/unistd.h>
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index cce308586fcc..3f1490bfb938 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -17,7 +17,7 @@ CFLAGS_syscall.o += -fno-stack-protector
# It's not safe to invoke KCOV when portions of the kernel environment aren't
# available or are out-of-sync with HW state. Since `noinstr` doesn't always
# inhibit KCOV instrumentation, disable it for the entire compilation unit.
-KCOV_INSTRUMENT_entry.o := n
+KCOV_INSTRUMENT_entry-common.o := n
KCOV_INSTRUMENT_idle.o := n
# Object file lists.
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 125d5c9471ac..0ead8bfedf20 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -81,6 +81,7 @@
#include <asm/mmu_context.h>
#include <asm/mte.h>
#include <asm/processor.h>
+#include <asm/smp.h>
#include <asm/sysreg.h>
#include <asm/traps.h>
#include <asm/virt.h>
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 12ce14a98b7c..db8b2e2d02c2 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -604,7 +604,7 @@ asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
__el0_fiq_handler_common(regs);
}
-static void __el0_error_handler_common(struct pt_regs *regs)
+static void noinstr __el0_error_handler_common(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index cfa2cfde3019..418b2bba1521 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -162,7 +162,9 @@ u64 __init kaslr_early_init(void)
* a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
* _stext) . This guarantees that the resulting region still
* covers [_stext, _etext], and that all relative branches can
- * be resolved without veneers.
+ * be resolved without veneers unless this region is exhausted
+ * and we fall back to a larger 2GB window in module_alloc()
+ * when ARM64_MODULE_PLTS is enabled.
*/
module_range = MODULES_VSIZE - (u64)(_etext - _stext);
module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 03ceabe4d912..213d56c14f60 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/page-flags.h>
+#include <linux/set_memory.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 69b3fde8759e..36f51b0e438a 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -193,18 +193,6 @@ void mte_check_tfsr_el1(void)
}
#endif
-static void update_gcr_el1_excl(u64 excl)
-{
-
- /*
- * Note that the mask controlled by the user via prctl() is an
- * include while GCR_EL1 accepts an exclude mask.
- * No need for ISB since this only affects EL0 currently, implicit
- * with ERET.
- */
- sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
-}
-
static void set_gcr_el1_excl(u64 excl)
{
current->thread.gcr_user_excl = excl;
@@ -265,7 +253,8 @@ void mte_suspend_exit(void)
if (!system_supports_mte())
return;
- update_gcr_el1_excl(gcr_kernel_excl);
+ sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, gcr_kernel_excl);
+ isb();
}
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 5ba0ed036dee..c8989b999250 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -70,8 +70,6 @@ EXPORT_SYMBOL(__stack_chk_guard);
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
-void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
@@ -142,10 +140,7 @@ void machine_restart(char *cmd)
efi_reboot(reboot_mode, NULL);
/* Now call the architecture specific reboot code. */
- if (arm_pm_restart)
- arm_pm_restart(reboot_mode, cmd);
- else
- do_kernel_restart(cmd);
+ do_kernel_restart(cmd);
/*
* Whoops - the architecture was unable to reboot.
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 499b6b2f9757..b381a1ee9ea7 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1862,7 +1862,7 @@ void syscall_trace_exit(struct pt_regs *regs)
audit_syscall_exit(regs);
if (flags & _TIF_SYSCALL_TRACEPOINT)
- trace_sys_exit(regs, regs_return_value(regs));
+ trace_sys_exit(regs, syscall_get_return_value(current, regs));
if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 8ed66142b088..be5f85b0a24d 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/fs.h>
+#include <linux/panic_notifier.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
@@ -292,10 +293,7 @@ u64 cpu_logical_map(unsigned int cpu)
void __init __no_sanitize_address setup_arch(char **cmdline_p)
{
- init_mm.start_code = (unsigned long) _stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) _end;
+ setup_initial_init_mm(_stext, _etext, _edata, _end);
*cmdline_p = boot_command_line;
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index f8192f4ae0b8..23036334f4dc 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -29,6 +29,7 @@
#include <asm/unistd.h>
#include <asm/fpsimd.h>
#include <asm/ptrace.h>
+#include <asm/syscall.h>
#include <asm/signal32.h>
#include <asm/traps.h>
#include <asm/vdso.h>
@@ -890,7 +891,7 @@ static void do_signal(struct pt_regs *regs)
retval == -ERESTART_RESTARTBLOCK ||
(retval == -ERESTARTSYS &&
!(ksig.ka.sa.sa_flags & SA_RESTART)))) {
- regs->regs[0] = -EINTR;
+ syscall_set_return_value(current, regs, -EINTR, 0);
regs->pc = continue_addr;
}
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index d3d37f932b97..487381164ff6 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -32,20 +32,23 @@ SYM_FUNC_END(__arm_smccc_sve_check)
EXPORT_SYMBOL(__arm_smccc_sve_check)
.macro SMCCC instr
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
alternative_if ARM64_SVE
bl __arm_smccc_sve_check
alternative_else_nop_endif
\instr #0
- ldr x4, [sp]
+ ldr x4, [sp, #16]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
- ldr x4, [sp, #8]
+ ldr x4, [sp, #24]
cbz x4, 1f /* no quirk structure */
ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS]
cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6
b.ne 1f
str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
-1: ret
+1: ldp x29, x30, [sp], #16
+ ret
.endm
/*
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index b189de5ca6cb..8982a2b78acf 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -153,7 +153,7 @@ NOKPROBE_SYMBOL(walk_stackframe);
static void dump_backtrace_entry(unsigned long where, const char *loglvl)
{
- printk("%s %pS\n", loglvl, (void *)where);
+ printk("%s %pSb\n", loglvl, (void *)where);
}
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
@@ -218,7 +218,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
#ifdef CONFIG_STACKTRACE
-noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
+noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 263d6c1a525f..50a0f1a38e84 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -54,10 +54,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
ret = do_ni_syscall(regs, scno);
}
- if (is_compat_task())
- ret = lower_32_bits(ret);
-
- regs->regs[0] = ret;
+ syscall_set_return_value(current, regs, 0, ret);
/*
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
@@ -115,7 +112,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
* syscall. do_notify_resume() will send a signal to userspace
* before the syscall is restarted.
*/
- regs->regs[0] = -ERESTARTNOINTR;
+ syscall_set_return_value(current, regs, -ERESTARTNOINTR, 0);
return;
}
@@ -136,7 +133,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
* anyway.
*/
if (scno == NO_SYSCALL)
- regs->regs[0] = -ENOSYS;
+ syscall_set_return_value(current, regs, -ENOSYS, 0);
scno = syscall_trace_enter(regs);
if (scno == NO_SYSCALL)
goto trace_exit;
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index e9a2b8f27792..0ca72f5cda41 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -94,10 +94,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
kvm->arch.return_nisv_io_abort_to_user = true;
break;
case KVM_CAP_ARM_MTE:
- if (!system_supports_mte() || kvm->created_vcpus)
- return -EINVAL;
- r = 0;
- kvm->arch.mte_enabled = true;
+ mutex_lock(&kvm->lock);
+ if (!system_supports_mte() || kvm->created_vcpus) {
+ r = -EINVAL;
+ } else {
+ r = 0;
+ kvm->arch.mte_enabled = true;
+ }
+ mutex_unlock(&kvm->lock);
break;
default:
r = -EINVAL;
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index d938ce95d3bd..a6ce991b1467 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -193,7 +193,7 @@ static bool range_is_memory(u64 start, u64 end)
{
struct kvm_mem_range r1, r2;
- if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
+ if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2))
return false;
if (r1.start != r2.start)
return false;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index f23dfa06433b..0625bf2353c2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -85,7 +85,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
static bool kvm_is_device_pfn(unsigned long pfn)
{
- return !pfn_valid(pfn);
+ return !pfn_is_map_memory(pfn);
}
static void *stage2_memcache_zalloc_page(void *arg)
@@ -947,7 +947,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_shift = get_vma_page_shift(vma, hva);
}
- shared = (vma->vm_flags & VM_PFNMAP);
+ shared = (vma->vm_flags & VM_SHARED);
switch (vma_shift) {
#ifndef __PAGETABLE_PMD_FOLDED
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 95cd62d67371..2cf999e41d30 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -29,7 +29,7 @@
.endm
.macro ldrh1 reg, ptr, val
- user_ldst 9998f, ldtrh, \reg, \ptr, \val
+ user_ldst 9997f, ldtrh, \reg, \ptr, \val
.endm
.macro strh1 reg, ptr, val
@@ -37,7 +37,7 @@
.endm
.macro ldr1 reg, ptr, val
- user_ldst 9998f, ldtr, \reg, \ptr, \val
+ user_ldst 9997f, ldtr, \reg, \ptr, \val
.endm
.macro str1 reg, ptr, val
@@ -45,7 +45,7 @@
.endm
.macro ldp1 reg1, reg2, ptr, val
- user_ldp 9998f, \reg1, \reg2, \ptr, \val
+ user_ldp 9997f, \reg1, \reg2, \ptr, \val
.endm
.macro stp1 reg1, reg2, ptr, val
@@ -53,8 +53,10 @@
.endm
end .req x5
+srcin .req x15
SYM_FUNC_START(__arch_copy_from_user)
add end, x0, x2
+ mov srcin, x1
#include "copy_template.S"
mov x0, #0 // Nothing to copy
ret
@@ -63,6 +65,11 @@ EXPORT_SYMBOL(__arch_copy_from_user)
.section .fixup,"ax"
.align 2
+9997: cmp dst, dstin
+ b.ne 9998f
+ // Before being absolutely sure we couldn't copy anything, try harder
+USER(9998f, ldtrb tmp1w, [srcin])
+ strb tmp1w, [dst], #1
9998: sub x0, end, dst // bytes not copied
ret
.previous
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 1f61cd0df062..dbea3799c3ef 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -30,33 +30,34 @@
.endm
.macro ldrh1 reg, ptr, val
- user_ldst 9998f, ldtrh, \reg, \ptr, \val
+ user_ldst 9997f, ldtrh, \reg, \ptr, \val
.endm
.macro strh1 reg, ptr, val
- user_ldst 9998f, sttrh, \reg, \ptr, \val
+ user_ldst 9997f, sttrh, \reg, \ptr, \val
.endm
.macro ldr1 reg, ptr, val
- user_ldst 9998f, ldtr, \reg, \ptr, \val
+ user_ldst 9997f, ldtr, \reg, \ptr, \val
.endm
.macro str1 reg, ptr, val
- user_ldst 9998f, sttr, \reg, \ptr, \val
+ user_ldst 9997f, sttr, \reg, \ptr, \val
.endm
.macro ldp1 reg1, reg2, ptr, val
- user_ldp 9998f, \reg1, \reg2, \ptr, \val
+ user_ldp 9997f, \reg1, \reg2, \ptr, \val
.endm
.macro stp1 reg1, reg2, ptr, val
- user_stp 9998f, \reg1, \reg2, \ptr, \val
+ user_stp 9997f, \reg1, \reg2, \ptr, \val
.endm
end .req x5
-
+srcin .req x15
SYM_FUNC_START(__arch_copy_in_user)
add end, x0, x2
+ mov srcin, x1
#include "copy_template.S"
mov x0, #0
ret
@@ -65,6 +66,12 @@ EXPORT_SYMBOL(__arch_copy_in_user)
.section .fixup,"ax"
.align 2
+9997: cmp dst, dstin
+ b.ne 9998f
+ // Before being absolutely sure we couldn't copy anything, try harder
+USER(9998f, ldtrb tmp1w, [srcin])
+USER(9998f, sttrb tmp1w, [dst])
+ add dst, dst, #1
9998: sub x0, end, dst // bytes not copied
ret
.previous
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 043da90f5dd7..9f380eecf653 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -32,7 +32,7 @@
.endm
.macro strh1 reg, ptr, val
- user_ldst 9998f, sttrh, \reg, \ptr, \val
+ user_ldst 9997f, sttrh, \reg, \ptr, \val
.endm
.macro ldr1 reg, ptr, val
@@ -40,7 +40,7 @@
.endm
.macro str1 reg, ptr, val
- user_ldst 9998f, sttr, \reg, \ptr, \val
+ user_ldst 9997f, sttr, \reg, \ptr, \val
.endm
.macro ldp1 reg1, reg2, ptr, val
@@ -48,12 +48,14 @@
.endm
.macro stp1 reg1, reg2, ptr, val
- user_stp 9998f, \reg1, \reg2, \ptr, \val
+ user_stp 9997f, \reg1, \reg2, \ptr, \val
.endm
end .req x5
+srcin .req x15
SYM_FUNC_START(__arch_copy_to_user)
add end, x0, x2
+ mov srcin, x1
#include "copy_template.S"
mov x0, #0
ret
@@ -62,6 +64,12 @@ EXPORT_SYMBOL(__arch_copy_to_user)
.section .fixup,"ax"
.align 2
+9997: cmp dst, dstin
+ b.ne 9998f
+ // Before being absolutely sure we couldn't copy anything, try harder
+ ldrb tmp1w, [srcin]
+USER(9998f, sttrb tmp1w, [dst])
+ add dst, dst, #1
9998: sub x0, end, dst // bytes not copied
ret
.previous
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
index 35fbdb7d6e1a..1648790e91b3 100644
--- a/arch/arm64/lib/strlen.S
+++ b/arch/arm64/lib/strlen.S
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/mte-def.h>
/* Assumptions:
*
@@ -42,7 +43,16 @@
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
+/*
+ * When KASAN_HW_TAGS is in use, memory is checked at MTE_GRANULE_SIZE
+ * (16-byte) granularity, and we must ensure that no access straddles this
+ * alignment boundary.
+ */
+#ifdef CONFIG_KASAN_HW_TAGS
+#define MIN_PAGE_SIZE MTE_GRANULE_SIZE
+#else
#define MIN_PAGE_SIZE 4096
+#endif
/* Since strings are short on average, we check the first 16 bytes
of the string for a NUL character. In order to do an unaligned ldp
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 4bf1dd3eb041..6719f9efea09 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -50,7 +50,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
dev->dma_coherent = coherent;
if (iommu)
- iommu_setup_dma_ops(dev, dma_base, size);
+ iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
#ifdef CONFIG_XEN
if (xen_swiotlb_detect())
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 58987a98e179..23505fc35324 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -339,10 +339,9 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return NULL;
}
-pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
- size_t pagesize = huge_page_size(hstate_vma(vma));
+ size_t pagesize = 1UL << shift;
if (pagesize == CONT_PTE_SIZE) {
entry = pte_mkcont(entry);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 6e1ca044ca90..8490ed2917ff 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -219,42 +219,17 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
free_area_init(max_zone_pfns);
}
-int pfn_valid(unsigned long pfn)
+int pfn_is_map_memory(unsigned long pfn)
{
phys_addr_t addr = PFN_PHYS(pfn);
- struct mem_section *ms;
- /*
- * Ensure the upper PAGE_SHIFT bits are clear in the
- * pfn. Else it might lead to false positives when
- * some of the upper bits are set, but the lower bits
- * match a valid pfn.
- */
+ /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
if (PHYS_PFN(addr) != pfn)
return 0;
- if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
- return 0;
-
- ms = __pfn_to_section(pfn);
- if (!valid_section(ms))
- return 0;
-
- /*
- * ZONE_DEVICE memory does not have the memblock entries.
- * memblock_is_map_memory() check for ZONE_DEVICE based
- * addresses will always fail. Even the normal hotplugged
- * memory will never have MEMBLOCK_NOMAP flag set in their
- * memblock entries. Skip memblock search for all non early
- * memory sections covering all of hotplug memory including
- * both normal and ZONE_DEVICE based.
- */
- if (!early_section(ms))
- return pfn_section_valid(ms, pfn);
-
return memblock_is_map_memory(addr);
}
-EXPORT_SYMBOL(pfn_valid);
+EXPORT_SYMBOL(pfn_is_map_memory);
static phys_addr_t memory_limit = PHYS_ADDR_MAX;
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index b5e83c46b23e..b7c81dacabf0 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -43,7 +43,7 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
/*
* Don't allow RAM to be mapped.
*/
- if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
+ if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
return NULL;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
@@ -84,7 +84,7 @@ EXPORT_SYMBOL(iounmap);
void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
{
/* For normal memory we already have a cacheable mapping. */
- if (pfn_valid(__phys_to_pfn(phys_addr)))
+ if (pfn_is_map_memory(__phys_to_pfn(phys_addr)))
return (void __iomem *)__phys_to_virt(phys_addr);
return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 0b28cc218091..9ff0de1b2b93 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
+#include <linux/set_memory.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -82,7 +83,7 @@ void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
- if (!pfn_valid(pfn))
+ if (!pfn_is_map_memory(pfn))
return pgprot_noncached(vma_prot);
else if (file->f_flags & O_SYNC)
return pgprot_writecombine(vma_prot);
@@ -515,8 +516,7 @@ static void __init map_mem(pgd_t *pgdp)
*/
BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
- if (rodata_full || crash_mem_map || debug_pagealloc_enabled() ||
- IS_ENABLED(CONFIG_KFENCE))
+ if (can_set_direct_map() || crash_mem_map || IS_ENABLED(CONFIG_KFENCE))
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
@@ -1485,8 +1485,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
* KFENCE requires linear map to be mapped at page granularity, so that
* it is possible to protect/unprotect single pages in the KFENCE pool.
*/
- if (rodata_full || debug_pagealloc_enabled() ||
- IS_ENABLED(CONFIG_KFENCE))
+ if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE))
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 92eccaf595c8..a3bacd79507a 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -19,6 +19,11 @@ struct page_change_data {
bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
+bool can_set_direct_map(void)
+{
+ return rodata_full || debug_pagealloc_enabled();
+}
+
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
{
struct page_change_data *cdata = data;
@@ -155,7 +160,7 @@ int set_direct_map_invalid_noflush(struct page *page)
.clear_mask = __pgprot(PTE_VALID),
};
- if (!debug_pagealloc_enabled() && !rodata_full)
+ if (!can_set_direct_map())
return 0;
return apply_to_page_range(&init_mm,
@@ -170,7 +175,7 @@ int set_direct_map_default_noflush(struct page *page)
.clear_mask = __pgprot(PTE_RDONLY),
};
- if (!debug_pagealloc_enabled() && !rodata_full)
+ if (!can_set_direct_map())
return 0;
return apply_to_page_range(&init_mm,
@@ -181,7 +186,7 @@ int set_direct_map_default_noflush(struct page *page)
#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
- if (!debug_pagealloc_enabled() && !rodata_full)
+ if (!can_set_direct_map())
return;
set_memory_valid((unsigned long)page_address(page), numpages, enable);
@@ -206,7 +211,7 @@ bool kernel_page_present(struct page *page)
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
- if (!debug_pagealloc_enabled() && !rodata_full)
+ if (!can_set_direct_map())
return true;
pgdp = pgd_offset_k(addr);
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index dccf98a37283..41c23f474ea6 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -823,6 +823,19 @@ emit_cond_jmp:
return ret;
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ /*
+ * Nothing required here.
+ *
+ * In case of arm64, we rely on the firmware mitigation of
+ * Speculative Store Bypass as controlled via the ssbd kernel
+ * parameter. Whenever the mitigation is enabled, it works
+ * for all of the kernel code with no need to provide any
+ * additional instructions.
+ */
+ break;
+
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:
diff --git a/arch/csky/Kbuild b/arch/csky/Kbuild
new file mode 100644
index 000000000000..a4e40e534e6a
--- /dev/null
+++ b/arch/csky/Kbuild
@@ -0,0 +1 @@
+# SPDX-License-Identifier: GPL-2.0-only
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 8de5b987edb9..2716f6395ba7 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -76,8 +76,6 @@ config CSKY
select PERF_USE_VMALLOC if CPU_CK610
select RTC_LIB
select TIMER_OF
- select USB_ARCH_HAS_EHCI
- select USB_ARCH_HAS_OHCI
select GENERIC_PCI_IOMAP
select HAVE_PCI
select PCI_DOMAINS_GENERIC if PCI
@@ -245,7 +243,6 @@ endchoice
menuconfig HAVE_TCM
bool "Tightly-Coupled/Sram Memory"
- select GENERIC_ALLOCATOR
help
The implementation are not only used by TCM (Tightly-Coupled Meory)
but also used by sram on SOC bus. It follow existed linux tcm
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index cd211aabbefd..bbbd0698b397 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -22,8 +22,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd(pmd, __pmd(__pa(page_address(pte))));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
extern void pgd_init(unsigned long *p);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index 0d60367b6bfa..151607ed5158 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -14,7 +14,6 @@
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define USER_PTRS_PER_PGD (PAGE_OFFSET/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/*
* C-SKY is two-level paging structure:
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index e93bc6f74432..c64e7be2045b 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -78,10 +78,7 @@ void __init setup_arch(char **cmdline_p)
pr_info("Phys. mem: %ldMB\n",
(unsigned long) memblock_phys_mem_size()/1024/1024);
- init_mm.start_code = (unsigned long) _stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) _end;
+ setup_initial_init_mm(_stext, _etext, _edata, _end);
parse_early_param();
diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c
index 4e51d63850c4..cd847ad62c7e 100644
--- a/arch/csky/mm/syscache.c
+++ b/arch/csky/mm/syscache.c
@@ -12,15 +12,17 @@ SYSCALL_DEFINE3(cacheflush,
int, cache)
{
switch (cache) {
- case ICACHE:
case BCACHE:
- flush_icache_mm_range(current->mm,
- (unsigned long)addr,
- (unsigned long)addr + bytes);
- fallthrough;
case DCACHE:
dcache_wb_range((unsigned long)addr,
(unsigned long)addr + bytes);
+ if (cache != BCACHE)
+ break;
+ fallthrough;
+ case ICACHE:
+ flush_icache_mm_range(current->mm,
+ (unsigned long)addr,
+ (unsigned long)addr + bytes);
break;
default:
return -EINVAL;
diff --git a/arch/h8300/Kbuild b/arch/h8300/Kbuild
new file mode 100644
index 000000000000..b2583e7efbd1
--- /dev/null
+++ b/arch/h8300/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += kernel/ mm/ boot/dts/
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
index b5e14d513e62..c30baa0499fc 100644
--- a/arch/h8300/Kconfig.cpu
+++ b/arch/h8300/Kconfig.cpu
@@ -44,7 +44,6 @@ config H8300_H8MAX
bool "H8MAX"
select H83069
select RAMKERNEL
- select HAVE_IDE
help
H8MAX Evaluation Board Support
More Information. (Japanese Only)
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
index ba0f26cfad61..eb4cb8f6830c 100644
--- a/arch/h8300/Makefile
+++ b/arch/h8300/Makefile
@@ -30,9 +30,6 @@ ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, h8300-unknown-linux- h8300-linux-)
endif
-core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
-core-y += arch/$(ARCH)/boot/dts/
-
libs-y += arch/$(ARCH)/lib/
boot := arch/h8300/boot
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index c3590b2e9592..61091a76eb7e 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -95,10 +95,7 @@ void __init setup_arch(char **cmdline_p)
{
unflatten_and_copy_device_tree();
- init_mm.start_code = (unsigned long) _stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) 0;
+ setup_initial_init_mm(_stext, _etext, _edata, NULL);
pr_notice("\r\n\nuClinux " CPU "\n");
pr_notice("Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");
diff --git a/arch/hexagon/Kbuild b/arch/hexagon/Kbuild
new file mode 100644
index 000000000000..8421baba1351
--- /dev/null
+++ b/arch/hexagon/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += kernel/ mm/ lib/
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 44a409967af1..e5a852080730 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -30,6 +30,7 @@ config HEXAGON
select MODULES_USE_ELF_RELA
select GENERIC_CPU_DEVICES
select SET_FS
+ select ARCH_WANT_LD_ORPHAN_WARN
help
Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications.
diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile
index 74b644ea8a00..44312bc147d8 100644
--- a/arch/hexagon/Makefile
+++ b/arch/hexagon/Makefile
@@ -34,7 +34,3 @@ KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__
KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
head-y := arch/hexagon/kernel/head.o
-
-core-y += arch/hexagon/kernel/ \
- arch/hexagon/mm/ \
- arch/hexagon/lib/
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index dbb22b80b8c4..18cd6ea9ab23 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -155,9 +155,6 @@ extern unsigned long _dflt_cache_att;
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */
-/* Seems to be zero even in architectures where the zero page is firewalled? */
-#define FIRST_USER_ADDRESS 0UL
-
/* HUGETLB not working currently */
#ifdef CONFIG_HUGETLB_PAGE
#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
@@ -242,7 +239,6 @@ static inline int pmd_bad(pmd_t pmd)
* pmd_page - converts a PMD entry to a page pointer
*/
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-#define pmd_pgtable(pmd) pmd_page(pmd)
/**
* pte_none - check if pte is mapped
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 35b18e55eae8..57465bff1fe4 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -38,6 +38,8 @@ SECTIONS
.text : AT(ADDR(.text)) {
_text = .;
TEXT_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
@@ -59,14 +61,9 @@ SECTIONS
_end = .;
- /DISCARD/ : {
- EXIT_TEXT
- EXIT_DATA
- EXIT_CALL
- }
-
STABS_DEBUG
DWARF_DEBUG
ELF_DETAILS
+ DISCARDS
}
diff --git a/arch/ia64/Kbuild b/arch/ia64/Kbuild
new file mode 100644
index 000000000000..a4e40e534e6a
--- /dev/null
+++ b/arch/ia64/Kbuild
@@ -0,0 +1 @@
+# SPDX-License-Identifier: GPL-2.0-only
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index da22a35e6f03..4993c7ac7ff6 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -25,7 +25,6 @@ config IA64
select HAVE_ASM_MODVERSIONS
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_EXIT_THREAD
- select HAVE_IDE
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_FTRACE_MCOUNT_RECORD
@@ -60,6 +59,7 @@ config IA64
select NUMA if !FLATMEM
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select SET_FS
+ select ZONE_DMA32
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@@ -72,9 +72,6 @@ config 64BIT
select ATA_NONSTANDARD if ATA
default y
-config ZONE_DMA32
- def_bool y
-
config MMU
bool
default y
@@ -308,9 +305,6 @@ config NODES_SHIFT
MAX_NUMNODES will be 2^(This value).
If in doubt, use the default.
-config HOLES_IN_ZONE
- bool
-
config HAVE_ARCH_NODEDATA_EXTENSION
def_bool y
depends on NUMA
diff --git a/arch/ia64/include/asm/pal.h b/arch/ia64/include/asm/pal.h
index 5c51fceedaf9..e6b652f9e45e 100644
--- a/arch/ia64/include/asm/pal.h
+++ b/arch/ia64/include/asm/pal.h
@@ -99,6 +99,7 @@
#include <linux/types.h>
#include <asm/fpu.h>
+#include <asm/intrinsics.h>
/*
* Data types needed to pass information into PAL procedures and
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
index 9601cfe83c94..0fb2b6291d58 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -52,7 +52,6 @@ pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
{
pmd_val(*pmd_entry) = page_to_phys(pte);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index d765fd948fae..9584b2c5f394 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -128,7 +128,6 @@
#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
-#define FIRST_USER_ADDRESS 0UL
/*
* All the normal masks have the "page accessed" bits on, as any time
@@ -274,7 +273,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
#define pud_present(pud) (pud_val(pud) != 0UL)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
-#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
+#define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & _PFN_MASK))
#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
#if CONFIG_PGTABLE_LEVELS == 4
@@ -282,7 +281,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define p4d_bad(p4d) (!ia64_phys_addr_valid(p4d_val(p4d)))
#define p4d_present(p4d) (p4d_val(p4d) != 0UL)
#define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
-#define p4d_page_vaddr(p4d) ((unsigned long) __va(p4d_val(p4d) & _PFN_MASK))
+#define p4d_pgtable(p4d) ((pud_t *) __va(p4d_val(p4d) & _PFN_MASK))
#define p4d_page(p4d) virt_to_page((p4d_val(p4d) + PAGE_OFFSET))
#endif
diff --git a/arch/ia64/include/asm/unaligned.h b/arch/ia64/include/asm/unaligned.h
deleted file mode 100644
index 328942e3cbce..000000000000
--- a/arch/ia64/include/asm/unaligned.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_IA64_UNALIGNED_H
-#define _ASM_IA64_UNALIGNED_H
-
-#include <linux/unaligned/le_struct.h>
-#include <linux/unaligned/be_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-
-#endif /* _ASM_IA64_UNALIGNED_H */
diff --git a/arch/m68k/68000/dragen2.c b/arch/m68k/68000/dragen2.c
index 584893c57c37..62f10a9e1ab7 100644
--- a/arch/m68k/68000/dragen2.c
+++ b/arch/m68k/68000/dragen2.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <asm/machdep.h>
#include <asm/MC68VZ328.h>
+#include "screen.h"
/***************************************************************************/
/* Init Drangon Engine hardware */
diff --git a/arch/m68k/68000/screen.h b/arch/m68k/68000/screen.h
new file mode 100644
index 000000000000..2089bdf02688
--- /dev/null
+++ b/arch/m68k/68000/screen.h
@@ -0,0 +1,804 @@
+/* Created with The GIMP */
+#define screen_width 320
+#define screen_height 240
+static unsigned char screen_bits[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x56,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x0d, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x5a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x03, 0x95, 0x6a, 0x00, 0x00, 0x00, 0x03, 0xf8, 0x70, 0xe0, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
+ 0x01, 0xf8, 0x00, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x5b, 0x55, 0x00, 0x00, 0x00, 0x0f,
+ 0xfc, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x38, 0x03, 0xfc, 0x00, 0x7f, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0xb5, 0x56,
+ 0x00, 0x00, 0x00, 0x1e, 0x0c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x03, 0x0e, 0x00, 0x61,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xd7, 0x55, 0x55, 0x00, 0x03, 0x8e, 0x1c, 0x00, 0x70, 0xe1, 0x9e,
+ 0x0e, 0x38, 0xe1, 0x80, 0x70, 0xc0, 0xf0, 0x73, 0x33, 0xc0, 0x78, 0x38,
+ 0x00, 0x0e, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0xa9, 0xaa, 0xad, 0x00, 0x03, 0x8e, 0x38,
+ 0x00, 0x70, 0xe1, 0xff, 0x0e, 0x38, 0xe3, 0x80, 0x71, 0xc3, 0xf8, 0x77,
+ 0x3f, 0xe1, 0xfc, 0x38, 0x00, 0x0e, 0x00, 0xef, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x2b, 0x55, 0x6a,
+ 0x00, 0x03, 0x8e, 0x38, 0x00, 0x70, 0xe1, 0xe7, 0x0e, 0x38, 0x73, 0x00,
+ 0x73, 0x83, 0x9c, 0x7f, 0x3c, 0xe1, 0xce, 0x38, 0x00, 0x1c, 0x00, 0xff,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x15, 0x56, 0xab, 0x55, 0x00, 0x03, 0x8e, 0x38, 0x00, 0x70, 0xe1, 0xc7,
+ 0x0e, 0x38, 0x76, 0x00, 0x77, 0x07, 0x1c, 0x78, 0x38, 0xe3, 0x8e, 0x38,
+ 0x00, 0x78, 0x00, 0xf3, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x69, 0x55, 0x5a, 0xb7, 0x00, 0x03, 0x8e, 0x38,
+ 0x00, 0x70, 0xe1, 0xc7, 0x0e, 0x38, 0x3c, 0x00, 0x7e, 0x07, 0xfc, 0x70,
+ 0x38, 0xe3, 0xfe, 0x38, 0x00, 0xf0, 0x00, 0xe1, 0xc0, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xa5, 0x9a, 0xab, 0x6d,
+ 0x00, 0x03, 0x8e, 0x3c, 0x00, 0x70, 0xe1, 0xc7, 0x0e, 0x38, 0x3e, 0x00,
+ 0x7f, 0x07, 0xfc, 0x70, 0x38, 0xe3, 0xfe, 0x38, 0x01, 0xc0, 0x00, 0xe1,
+ 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x4d, 0x75, 0xb6, 0xd5, 0x00, 0x03, 0x8e, 0x1c, 0x00, 0x70, 0xe1, 0xc7,
+ 0x0e, 0x38, 0x77, 0x00, 0x77, 0x87, 0x00, 0x70, 0x38, 0xe3, 0x80, 0x38,
+ 0x03, 0x80, 0x00, 0xe1, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0xaa, 0xca, 0xd5, 0x5b, 0x00, 0x03, 0x9e, 0x1f,
+ 0x0c, 0x70, 0xe1, 0xc7, 0x0e, 0x78, 0x67, 0x00, 0x73, 0xc7, 0x8c, 0x70,
+ 0x38, 0xe3, 0xc6, 0x38, 0x03, 0xfe, 0x38, 0x71, 0xc0, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0xb5, 0xbb, 0x5b, 0x6a,
+ 0x00, 0x03, 0xfe, 0x0f, 0xfc, 0x70, 0xe1, 0xc7, 0x0f, 0xf8, 0xe3, 0x80,
+ 0x71, 0xe3, 0xfc, 0x70, 0x38, 0xe1, 0xfe, 0x38, 0x03, 0xfe, 0x38, 0x7f,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa9,
+ 0x6b, 0x56, 0xd6, 0xad, 0x00, 0x01, 0xe6, 0x03, 0xf0, 0x70, 0xe1, 0xc7,
+ 0x07, 0x98, 0xc3, 0x80, 0x70, 0xe0, 0xf8, 0x70, 0x38, 0xe0, 0x7c, 0x38,
+ 0x03, 0xfe, 0x38, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x55, 0x55, 0x6a, 0xba, 0xeb, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x56, 0xd5, 0xd5, 0xd5, 0xac,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x5a,
+ 0xb7, 0x3d, 0x57, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x75, 0x6d, 0xaa, 0xd3, 0xac, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8b, 0x6a, 0xb6, 0xde, 0x6b, 0xb6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xba, 0xad,
+ 0xeb, 0x32, 0xda, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x1e, 0xaa, 0xb5, 0xad, 0x6e, 0x96, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x86, 0x00, 0x00, 0x07, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xfc, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x6b, 0x6f, 0x5a, 0xb5, 0x75, 0x69,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf5, 0xda, 0xd9,
+ 0x53, 0xeb, 0x6b, 0x4b, 0x00, 0x00, 0xf0, 0xde, 0x03, 0xe6, 0xf0, 0x78,
+ 0x06, 0x0c, 0x6c, 0x7c, 0x1f, 0x87, 0x86, 0xf0, 0xc0, 0xde, 0x0f, 0xcc,
+ 0xde, 0x0f, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0xd6, 0xab, 0x57, 0x6e, 0x8a, 0xd6, 0xba, 0x00, 0x01, 0x98, 0xe7,
+ 0x01, 0x87, 0x38, 0xcc, 0x06, 0x0c, 0x6c, 0x06, 0x31, 0x8c, 0xc7, 0x38,
+ 0xc0, 0xe7, 0x18, 0xcc, 0xe7, 0x19, 0x80, 0xc3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0e, 0x2d, 0x5e, 0xda, 0x55, 0xbb, 0x59, 0x42,
+ 0x00, 0x03, 0x0c, 0xc3, 0x01, 0x86, 0x19, 0x8c, 0x06, 0x0c, 0x70, 0x06,
+ 0x61, 0x98, 0x66, 0x18, 0xf8, 0xc3, 0x30, 0xcc, 0xc3, 0x31, 0x80, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xf7, 0x6b, 0x6a,
+ 0xab, 0x56, 0xd6, 0xbf, 0x00, 0x03, 0x0c, 0xc3, 0x01, 0x86, 0x19, 0xfc,
+ 0x06, 0x0c, 0x60, 0x7e, 0x61, 0x98, 0x66, 0x18, 0xc0, 0xc3, 0x30, 0xcc,
+ 0xc3, 0x3f, 0x80, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x75, 0x94, 0xdb, 0x75, 0x6e, 0xda, 0xaa, 0xa2, 0x00, 0x03, 0x0c, 0xc3,
+ 0x01, 0x86, 0x19, 0x80, 0x06, 0x0c, 0x60, 0xc6, 0x61, 0x98, 0x66, 0x18,
+ 0xc0, 0xc3, 0x30, 0xcc, 0xc3, 0x30, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0xdb, 0x6b, 0xad, 0x9b, 0x27, 0x55, 0x55, 0x55,
+ 0x00, 0x03, 0x0c, 0xc3, 0x01, 0x86, 0x19, 0x80, 0x06, 0x0c, 0x60, 0xc6,
+ 0x33, 0x98, 0x66, 0x18, 0xc0, 0xc3, 0x19, 0xcc, 0xc3, 0x30, 0x00, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x36, 0xde, 0xb5, 0x65,
+ 0x75, 0x5a, 0xd5, 0x56, 0x00, 0x01, 0x98, 0xc3, 0x01, 0x86, 0x18, 0xc4,
+ 0x06, 0x18, 0x60, 0xce, 0x1d, 0x8c, 0xc6, 0x18, 0xc0, 0xc3, 0x0e, 0xcc,
+ 0xc3, 0x18, 0x80, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
+ 0xed, 0xb5, 0x6d, 0x56, 0x55, 0x55, 0x55, 0xae, 0x00, 0x00, 0xf0, 0xc3,
+ 0x00, 0xe6, 0x18, 0x78, 0x07, 0xf0, 0x60, 0x77, 0x01, 0x87, 0x86, 0x18,
+ 0xfc, 0xc3, 0x00, 0xcc, 0xc3, 0x0f, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x7a, 0xab, 0x6d, 0xda, 0xaa, 0xca, 0xd5, 0x6d, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0xda, 0xaa, 0xea, 0xae,
+ 0x9b, 0x5a, 0xa9, 0x4b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xd5,
+ 0xbb, 0xfd, 0xad, 0xad, 0x69, 0xea, 0xcb, 0x55, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x05, 0xaf, 0xb6, 0x8a, 0x6a, 0xb9, 0x5a, 0x2d, 0xba, 0xb6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d, 0x7a, 0x75, 0x6e, 0xcd, 0x52,
+ 0x9b, 0xdb, 0x55, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xad,
+ 0xaf, 0x95, 0x55, 0xed, 0x55, 0x55, 0x6b, 0x55, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xea, 0xb5, 0xec, 0xfd, 0x59, 0x5a, 0xb5, 0x56, 0xaa, 0xb6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xb7, 0x6b, 0x36, 0x4a, 0xeb, 0xab,
+ 0x2d, 0x6a, 0x9b, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0xad, 0x6f,
+ 0x6b, 0xeb, 0xdd, 0x7b, 0x6a, 0x55, 0xb5, 0x56, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x17, 0x76, 0xda, 0xd6, 0x5d, 0x62, 0xc6, 0xd5, 0x36, 0xaa, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xdb, 0x56, 0xbc, 0xf2, 0xa5, 0x5d,
+ 0x96, 0xaa, 0xb6, 0xd6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xee, 0xfd,
+ 0xd5, 0x2c, 0x6d, 0x9a, 0x75, 0x5b, 0x6a, 0xb5, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0xee, 0xad, 0x55, 0x15, 0xef, 0x54, 0xf6, 0xc5, 0x6a, 0xa9, 0xa6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x5b, 0xd6, 0xad, 0xbe, 0xb0, 0xa6, 0x35,
+ 0x5b, 0xd5, 0x4a, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xeb, 0x5d, 0xf5,
+ 0xaa, 0xd7, 0xf4, 0x75, 0xba, 0x55, 0xaf, 0x6e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x37, 0x5e, 0xf7, 0x55, 0x61, 0xbc, 0x08, 0x5b, 0x55, 0x5a, 0xa9, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0xf6, 0xba, 0xaa, 0xaa, 0x9f, 0x69, 0xec, 0xd5,
+ 0x4b, 0xa9, 0xaa, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x2d, 0xd5, 0xed, 0x5a,
+ 0xf2, 0xd6, 0xae, 0xdb, 0x9e, 0x27, 0x5f, 0xb5, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f,
+ 0x6d, 0x5b, 0xaa, 0xda, 0xae, 0x95, 0x58, 0xd5, 0x34, 0x6d, 0x68, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x15, 0xdb, 0xd7, 0x56, 0xb5, 0xd5, 0x6d, 0x29, 0x5b,
+ 0x4b, 0xdb, 0x57, 0xb6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0xda, 0xac, 0xd5, 0x4b,
+ 0x57, 0x6a, 0x9b, 0x76, 0x5c, 0x95, 0x54, 0x2a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa,
+ 0xb7, 0xfb, 0xab, 0xb6, 0xea, 0xad, 0x62, 0x95, 0xa1, 0xf5, 0xa9, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x57, 0xaf, 0x6b, 0x72, 0x54, 0x99, 0xd5, 0x0e, 0xf3,
+ 0x5f, 0x15, 0x2a, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xea, 0xd4, 0xad, 0x5d, 0x35,
+ 0xb5, 0x34, 0xb2, 0xaa, 0x54, 0xba, 0xad, 0x55, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x5e,
+ 0xaf, 0xed, 0xab, 0xea, 0xb3, 0xaa, 0x6a, 0xad, 0xd5, 0xd5, 0xaa, 0xab,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x2a, 0xb9, 0xf5, 0xbb, 0xb5, 0x55, 0x64, 0x57, 0x55, 0x6b,
+ 0x5d, 0xd0, 0x52, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xab, 0x5b, 0xb6, 0x6d, 0x37,
+ 0x6f, 0xaa, 0x5b, 0xa2, 0xb5, 0x1f, 0xed, 0x73, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x65, 0xaa,
+ 0x6e, 0xfb, 0xd3, 0x5a, 0xd4, 0x54, 0xaa, 0x5e, 0xc3, 0xb5, 0x15, 0x56,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x07, 0xaf, 0x7f, 0xea, 0xb6, 0xaa, 0xd6, 0xad, 0xf1, 0xd2, 0xd5,
+ 0x56, 0x55, 0x6a, 0xd5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xbd, 0xda, 0xaf, 0x75, 0x6f, 0x5a,
+ 0x45, 0x26, 0xb7, 0x5b, 0x20, 0xdd, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xeb, 0x7f,
+ 0xf5, 0x4d, 0x95, 0x76, 0xd9, 0x56, 0xb5, 0x52, 0x6d, 0x12, 0xad, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x57, 0x3f, 0xa4, 0x8f, 0xb9, 0x6e, 0xa2, 0x6f, 0x1d, 0x6a, 0xef,
+ 0xb5, 0x6d, 0xaa, 0xb5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0xfd, 0x75, 0x2f, 0x7c, 0x57, 0x88, 0xf6,
+ 0x80, 0xb5, 0x57, 0x5c, 0xd7, 0x55, 0x54, 0xae, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x39, 0xff, 0xf6,
+ 0xab, 0x7a, 0x57, 0x94, 0xef, 0x0d, 0xe4, 0xea, 0xa8, 0xaa, 0xab, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0c, 0xf7, 0xff, 0x6d, 0xb7, 0x4b, 0x39, 0x17, 0x2a, 0xfa, 0xb7, 0x56,
+ 0xb7, 0xaa, 0xed, 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x18, 0xbe, 0xfd, 0xda, 0x9e, 0xec, 0xe6, 0xd5,
+ 0x52, 0x2a, 0x58, 0xa9, 0x54, 0x5a, 0x97, 0xe7, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0xe9, 0xf6, 0xbe,
+ 0xd4, 0x5b, 0xad, 0x63, 0x61, 0xf7, 0xb7, 0xaf, 0x55, 0x52, 0x9f, 0xee,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0xc9, 0xbd, 0xfb, 0x6b, 0xeb, 0xf5, 0x6b, 0x2d, 0x57, 0x52, 0x94, 0xaa,
+ 0xb1, 0xab, 0x4a, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0xdb, 0xcb, 0xff, 0xf5, 0x3b, 0x55, 0xa0, 0x00,
+ 0x45, 0x6e, 0xb5, 0xb5, 0x65, 0x52, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x2e, 0x7d, 0xdb, 0xa5,
+ 0xca, 0xbe, 0x80, 0x00, 0x0a, 0x54, 0xaa, 0xa5, 0x45, 0x08, 0x09, 0x15,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
+ 0xd8, 0xd3, 0x1b, 0xae, 0xb7, 0xea, 0x00, 0x00, 0x00, 0x95, 0xaa, 0x56,
+ 0xdc, 0xe1, 0x21, 0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x53, 0x41, 0xff, 0x77, 0xbd, 0xaa, 0x58, 0x00, 0x00,
+ 0x00, 0xdb, 0x75, 0xd4, 0xb2, 0xa4, 0x07, 0xea, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xef, 0x07, 0xbd, 0x65, 0xeb,
+ 0xa2, 0xd0, 0x00, 0x00, 0x00, 0x25, 0x4b, 0x35, 0x56, 0x80, 0x77, 0x6a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28,
+ 0x01, 0x28, 0x9a, 0xb6, 0xd7, 0x60, 0x00, 0x00, 0x00, 0x0c, 0xaa, 0xaa,
+ 0xaa, 0x02, 0x07, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x17, 0xe0, 0x17, 0xb1, 0xbf, 0xee, 0xb4, 0xc0, 0x00, 0x00,
+ 0x00, 0x08, 0xaa, 0xad, 0x68, 0x54, 0x04, 0x5a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x74, 0x87, 0x77, 0x72, 0x5a,
+ 0xab, 0x80, 0x00, 0x00, 0x00, 0x02, 0xd4, 0xb5, 0x52, 0x08, 0x5b, 0xd4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x00,
+ 0x1f, 0xd6, 0xef, 0xda, 0xd5, 0x00, 0x00, 0x00, 0x0c, 0x01, 0x52, 0xd5,
+ 0x40, 0xf1, 0x55, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x9b, 0x00, 0x17, 0x4b, 0x92, 0xb7, 0xaf, 0x00, 0x00, 0x00,
+ 0x0e, 0x01, 0x4e, 0xaa, 0xae, 0x95, 0x55, 0x6d, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xb8, 0x11, 0x2b, 0x13, 0x76, 0xef,
+ 0x54, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x54, 0xaa, 0xaa, 0xb5, 0xad, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x40, 0x00,
+ 0x7e, 0x7c, 0x65, 0xf4, 0x78, 0x00, 0x00, 0x00, 0x1f, 0x00, 0xab, 0x56,
+ 0xd5, 0x55, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0a, 0x40, 0x42, 0x6d, 0xce, 0xe5, 0xae, 0x54, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x6d, 0x75, 0x5d, 0x55, 0x4d, 0x52, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x82, 0x03, 0xdc, 0x54, 0xbf, 0x61,
+ 0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xad, 0xa2, 0xb5, 0x60, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x16,
+ 0xd9, 0xb5, 0xa4, 0xc7, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0xea,
+ 0xae, 0xd5, 0x57, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x54, 0x04, 0x0d, 0x76, 0xbb, 0x4b, 0xbc, 0x58, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x36, 0x95, 0xa9, 0x55, 0x54, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x50, 0x08, 0x5b, 0xc5, 0x3d, 0x97, 0x0a,
+ 0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xb6, 0xab, 0x2b, 0x55, 0xab,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x24, 0x20, 0x3d,
+ 0x59, 0x7b, 0x76, 0x37, 0x58, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d, 0x6d,
+ 0x75, 0xb5, 0x55, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x12, 0xf0, 0x01, 0xff, 0x21, 0xa8, 0xc3, 0x74, 0xa8, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x1b, 0xa9, 0x4b, 0x55, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x25, 0xc0, 0x41, 0xca, 0x9c, 0x77, 0x58, 0x9d,
+ 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x56, 0xb4, 0xad, 0xb2, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x80, 0x0f, 0xbe,
+ 0xc0, 0xf6, 0xd5, 0xb3, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x54,
+ 0xa5, 0xaa, 0x55, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x17, 0x04, 0x87, 0xbc, 0x6a, 0x3b, 0xac, 0x9d, 0x58, 0x00, 0x00, 0x03,
+ 0xe0, 0x00, 0x16, 0xab, 0x55, 0x4a, 0xd6, 0xa5, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x5c, 0x00, 0x1f, 0x54, 0xc9, 0x2a, 0xb7, 0xa6,
+ 0xd8, 0x0f, 0x00, 0x07, 0xf8, 0x00, 0x15, 0x6a, 0x55, 0x5a, 0xa4, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x08, 0x74, 0xca,
+ 0x9c, 0x5a, 0xa8, 0xc5, 0x30, 0x1f, 0x80, 0x0f, 0xfc, 0x00, 0x0d, 0x55,
+ 0xaa, 0xa5, 0x55, 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
+ 0xf0, 0x20, 0xea, 0x5a, 0xb0, 0xe7, 0x95, 0x7d, 0x10, 0x3f, 0xc0, 0x1f,
+ 0xfe, 0x00, 0x0a, 0xaa, 0xaa, 0xad, 0x4a, 0x95, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x03, 0xf4, 0x02, 0x7d, 0xb5, 0x8f, 0x9c, 0xaa, 0xe9,
+ 0xa0, 0x3f, 0xc0, 0x1f, 0xfe, 0x00, 0x06, 0xb5, 0x54, 0xa9, 0x2a, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x40, 0x47, 0xeb, 0xab,
+ 0x75, 0x51, 0x55, 0x4d, 0xd8, 0x3f, 0xe0, 0x3f, 0x3f, 0x00, 0x0a, 0xaa,
+ 0xa9, 0x4a, 0xaa, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23,
+ 0xc0, 0x06, 0xff, 0xe5, 0xb6, 0xbb, 0xa9, 0x34, 0x48, 0x30, 0xe0, 0x3c,
+ 0x0f, 0x00, 0x0a, 0xaa, 0x8a, 0xaa, 0xaa, 0xa4, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xaf, 0x80, 0x9f, 0xa1, 0x8d, 0xb5, 0xd6, 0x93, 0xcd,
+ 0x90, 0x62, 0x60, 0x3c, 0x27, 0x00, 0x06, 0xaa, 0xb5, 0xaa, 0xaa, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9f, 0x92, 0x3f, 0xb5, 0x36,
+ 0x56, 0xb5, 0x9d, 0x55, 0x90, 0x62, 0x60, 0x38, 0x17, 0x80, 0x0d, 0x54,
+ 0xaa, 0x54, 0xaa, 0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x7a,
+ 0x00, 0xba, 0xab, 0x73, 0xe7, 0xa2, 0xb6, 0x55, 0x2c, 0x61, 0x60, 0x38,
+ 0x17, 0x80, 0x09, 0x6b, 0x4a, 0xd5, 0x4a, 0x92, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x0d, 0xfe, 0x03, 0x6d, 0xea, 0x08, 0x51, 0x1d, 0x2b, 0x35,
+ 0x08, 0x61, 0x60, 0x38, 0x07, 0x80, 0x06, 0xda, 0xaa, 0xa9, 0x55, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0xf8, 0x4e, 0xfb, 0x89, 0xde,
+ 0x35, 0xea, 0x6b, 0x72, 0x28, 0x60, 0x70, 0x38, 0x07, 0x80, 0x05, 0x52,
+ 0xaa, 0xaa, 0x95, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf8,
+ 0x0d, 0x63, 0xbd, 0xe3, 0x57, 0x55, 0xb6, 0x49, 0xcc, 0x20, 0x7f, 0xf8,
+ 0x07, 0x80, 0x0a, 0xaa, 0xaa, 0xaa, 0xaa, 0xad, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x5d, 0xe1, 0x34, 0xc6, 0xab, 0xa7, 0x91, 0x77, 0x37, 0xcc,
+ 0x48, 0x30, 0x9f, 0x3c, 0x07, 0x80, 0x0a, 0xaa, 0xaa, 0xaa, 0xaa, 0x69,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3b, 0xc0, 0x34, 0xdb, 0x4e, 0xf9,
+ 0xae, 0xd5, 0x54, 0xe1, 0xb4, 0x19, 0x3f, 0xfe, 0x0f, 0x00, 0x05, 0x55,
+ 0x55, 0x55, 0x52, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x7f, 0x89,
+ 0xf1, 0xf7, 0xe5, 0x57, 0xea, 0x8d, 0x4c, 0xda, 0xac, 0x13, 0xff, 0xff,
+ 0x80, 0x00, 0x0a, 0xd5, 0xaa, 0xa4, 0x95, 0x54, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xf7, 0x02, 0x53, 0x6c, 0x72, 0x10, 0xa3, 0x6a, 0x74, 0xea,
+ 0x34, 0x07, 0xff, 0xff, 0xe0, 0x00, 0x05, 0x4e, 0x54, 0x95, 0x55, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xfc, 0x97, 0xd6, 0x55, 0xb6, 0xab,
+ 0xb7, 0x45, 0x46, 0xad, 0xf4, 0x1f, 0xff, 0xff, 0xfe, 0x00, 0x05, 0x2a,
+ 0xd5, 0x54, 0x95, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xcf, 0x0e,
+ 0xfb, 0x2d, 0x12, 0xd4, 0xb8, 0xb6, 0xeb, 0x6a, 0x10, 0x3f, 0xff, 0xff,
+ 0xff, 0x00, 0x02, 0xda, 0x8a, 0xab, 0x6a, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x57, 0xbc, 0x3a, 0xc6, 0x0d, 0x76, 0xb1, 0x77, 0x15, 0x2a, 0x56,
+ 0x34, 0x7f, 0xff, 0xff, 0xff, 0x80, 0x02, 0xa5, 0x75, 0x2a, 0x52, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x0f, 0xfc, 0xdf, 0x75, 0x9d, 0x5a, 0x67,
+ 0x15, 0x59, 0xb3, 0x6c, 0x4c, 0x7f, 0xff, 0xff, 0xff, 0x80, 0x05, 0x55,
+ 0x8a, 0xaa, 0xaa, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf9, 0xdb,
+ 0xed, 0x36, 0x5a, 0xeb, 0xad, 0x6b, 0x57, 0x5d, 0xa4, 0x7f, 0xff, 0xff,
+ 0xf3, 0x80, 0x05, 0x54, 0xb2, 0xaa, 0xaa, 0xa9, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x7f, 0xf4, 0x5a, 0xaa, 0xdb, 0x40, 0x20, 0xea, 0xaa, 0xa8, 0x19,
+ 0xe8, 0x1f, 0xff, 0xff, 0xe7, 0x00, 0x02, 0x55, 0x4b, 0xd5, 0x55, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0xff, 0xf2, 0xb5, 0x7d, 0x56, 0xaa, 0x76,
+ 0xa5, 0xce, 0xb4, 0xd1, 0x2c, 0x0f, 0xff, 0xff, 0x0f, 0x00, 0x02, 0xa5,
+ 0x5a, 0x2a, 0x55, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0xc1, 0x31,
+ 0x9a, 0x8d, 0x61, 0x2e, 0xb2, 0xb5, 0x57, 0x59, 0x88, 0x07, 0xff, 0xf8,
+ 0x7e, 0x06, 0x00, 0xaa, 0x65, 0xd2, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0x7e, 0x02, 0x55, 0x67, 0xb5, 0x5b, 0x05, 0xfa, 0xab, 0x0a, 0x8d,
+ 0xe4, 0x03, 0xff, 0xc3, 0xfe, 0x07, 0x01, 0xaa, 0xaa, 0x26, 0xaa, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x16, 0x7d, 0x10, 0x57, 0x9d, 0x4a, 0x8a, 0xd7,
+ 0x95, 0x5a, 0xd5, 0x4d, 0x58, 0x00, 0xfc, 0x1f, 0xe6, 0x03, 0xc1, 0x55,
+ 0x52, 0xd5, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0xe1, 0x42, 0xa4,
+ 0x7d, 0xd7, 0xba, 0xb0, 0x24, 0xd5, 0x97, 0xc5, 0xd2, 0x00, 0x00, 0x7f,
+ 0x87, 0x03, 0xe0, 0x42, 0xaa, 0x95, 0x55, 0x4b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0d, 0x42, 0x17, 0xfb, 0x8c, 0xac, 0x46, 0xae, 0xdd, 0xb3, 0x68, 0x92,
+ 0x6c, 0x03, 0x03, 0xfe, 0x0f, 0x01, 0xe0, 0x5a, 0x55, 0x62, 0xaa, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x3d, 0x0a, 0x32, 0xd5, 0x7b, 0xc9, 0xde, 0xad,
+ 0x91, 0x4a, 0xaa, 0xc6, 0x68, 0x21, 0xff, 0xf8, 0x7f, 0x00, 0x60, 0x2a,
+ 0xa9, 0x2e, 0xa9, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x64, 0xb3, 0x18,
+ 0x8e, 0xae, 0xb4, 0x46, 0x9a, 0xbb, 0x54, 0xd5, 0xb4, 0x30, 0xff, 0xe0,
+ 0xff, 0x80, 0x00, 0x52, 0xa5, 0x51, 0x55, 0x4a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x82, 0xc1, 0x52, 0xb3, 0xcb, 0xc5, 0xd4, 0xda, 0xd3, 0x4a, 0x68, 0x87,
+ 0x68, 0x30, 0x3f, 0x03, 0xff, 0x80, 0x00, 0x2a, 0xaa, 0x55, 0x45, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x5b, 0xc6, 0xa9, 0x19, 0x74, 0x3d, 0xa9, 0x92,
+ 0xab, 0x5b, 0x95, 0xb0, 0x28, 0x78, 0x00, 0x07, 0xff, 0xc0, 0x00, 0x15,
+ 0x2a, 0x95, 0x2a, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x10, 0x0f, 0x6b, 0xa2,
+ 0xb7, 0x22, 0x61, 0x2b, 0x52, 0xaa, 0x33, 0x1d, 0xa0, 0x7c, 0x00, 0x0f,
+ 0xff, 0xe0, 0x00, 0x14, 0xa4, 0xaa, 0xa9, 0x52, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09,
+ 0x00, 0x23, 0x27, 0x64, 0x8d, 0xfb, 0x64, 0xa7, 0x5a, 0xaa, 0xd3, 0x5a,
+ 0xa0, 0x7c, 0x00, 0x3f, 0xff, 0xe0, 0x00, 0x05, 0x29, 0x52, 0x45, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x4a, 0x5f, 0x66, 0x6a, 0xab, 0x22, 0xaf, 0xec,
+ 0xa9, 0x55, 0x17, 0x17, 0xa0, 0xfe, 0x00, 0x7f, 0xff, 0xf0, 0x00, 0x02,
+ 0xaa, 0x4a, 0xaa, 0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x8d, 0x83, 0x49, 0xad,
+ 0x9a, 0x57, 0x50, 0x6b, 0x6b, 0x5a, 0xd1, 0x1a, 0x80, 0xff, 0x00, 0xff,
+ 0xff, 0xf0, 0x00, 0x05, 0x52, 0x95, 0x29, 0x55, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c,
+ 0x1b, 0x4b, 0xcb, 0x45, 0x7b, 0x4e, 0x4b, 0x5d, 0x2a, 0xd5, 0x16, 0x29,
+ 0x41, 0xff, 0x87, 0xff, 0xff, 0xf8, 0x00, 0x01, 0x2a, 0xa5, 0x4a, 0x52,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x20, 0x52, 0x98, 0x9d, 0x0e, 0x95, 0x65, 0x38, 0x95,
+ 0x55, 0x6a, 0xab, 0x35, 0x81, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x00, 0x01,
+ 0xa9, 0x14, 0xa9, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x80, 0xe3, 0x2b, 0x74, 0x4d,
+ 0xb5, 0x55, 0xd5, 0x95, 0x69, 0x4a, 0xac, 0x6e, 0x03, 0xff, 0xff, 0xff,
+ 0xff, 0xfc, 0x00, 0x01, 0x25, 0x65, 0x2a, 0x95, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x9a, 0xb7, 0x0b, 0xb0, 0x6f, 0xfc, 0xfd, 0x6a, 0x8b, 0xd5, 0x4e, 0xa8,
+ 0x03, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00, 0x01, 0x49, 0x14, 0xa5, 0x6a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x02, 0x94, 0xab, 0x3d, 0xc9, 0x6b, 0xb5, 0x43, 0xee,
+ 0xb6, 0x95, 0x54, 0x9a, 0x07, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00, 0x00,
+ 0xaa, 0x54, 0xaa, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x13, 0x9f, 0x68, 0x5b,
+ 0xff, 0xfa, 0xd7, 0xfa, 0xa5, 0x6a, 0xac, 0xd4, 0x07, 0xff, 0xff, 0xff,
+ 0xff, 0xfe, 0x00, 0x00, 0x55, 0x53, 0x2a, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x04,
+ 0x65, 0x3c, 0x8b, 0x93, 0x87, 0x76, 0x75, 0xf5, 0x57, 0x55, 0x5c, 0xa8,
+ 0x0f, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00, 0x0b, 0x54, 0x95, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x2a, 0x7b, 0xfd, 0x75, 0xba, 0xae, 0xfe, 0x44, 0x5a,
+ 0x8a, 0xca, 0x9c, 0xf0, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00,
+ 0x14, 0x4a, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0xd8, 0xa2, 0xbd, 0x59, 0x52,
+ 0x8e, 0xec, 0x48, 0xf1, 0x2d, 0x52, 0x9c, 0xa0, 0x0f, 0xff, 0xff, 0xff,
+ 0xff, 0xfe, 0x02, 0x00, 0x02, 0x95, 0x54, 0x92, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x35,
+ 0x5f, 0xed, 0x6a, 0xf2, 0x95, 0x19, 0x09, 0xaa, 0x6a, 0x5d, 0x48, 0xc0,
+ 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x02, 0xaa, 0x52, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x04, 0x22, 0xa6, 0xc3, 0xdf, 0xd0, 0x66, 0x29, 0xf0, 0x01, 0x6a,
+ 0xca, 0xd5, 0x59, 0x80, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x80,
+ 0x05, 0x2a, 0xaa, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xa4, 0x03, 0xf2, 0x61, 0xed,
+ 0x17, 0xe2, 0x02, 0xaa, 0x99, 0x2a, 0x98, 0x80, 0x07, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x80, 0x40, 0x02, 0x55, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x09, 0x05,
+ 0x88, 0xa7, 0x47, 0xdc, 0xc0, 0x00, 0x2a, 0x6b, 0x4a, 0x6a, 0xb0, 0x00,
+ 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x20, 0x01, 0x55, 0x55, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x2e, 0x89, 0x11, 0xfb, 0x87, 0x9c, 0x90, 0x64, 0xaa, 0x9a,
+ 0x4a, 0xa9, 0x1a, 0x00, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x30,
+ 0x00, 0xa8, 0x42, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x2b, 0x16, 0x2b, 0x85, 0x09, 0xf8,
+ 0x83, 0x45, 0x5d, 0xad, 0x95, 0x49, 0x5c, 0x08, 0x1f, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xe0, 0x10, 0x00, 0xae, 0xea, 0x4a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x49, 0x28,
+ 0x3b, 0xf5, 0xbd, 0x30, 0x14, 0x96, 0x95, 0x6a, 0xd2, 0xb5, 0xaa, 0x10,
+ 0x1f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x18, 0x00, 0xa9, 0x15, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x19, 0x26, 0x58, 0x76, 0x4a, 0x1e, 0xe1, 0x7a, 0xba, 0xb1, 0x5b,
+ 0x4a, 0xab, 0x2c, 0x10, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x08,
+ 0x00, 0x2b, 0x6a, 0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x11, 0x21, 0x4d, 0x78, 0x6d, 0x20,
+ 0x0a, 0x84, 0xda, 0xaf, 0x55, 0x56, 0x52, 0x20, 0x3f, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xf0, 0x0c, 0x00, 0x54, 0xaa, 0xa5, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0xe8, 0x62,
+ 0xcd, 0x49, 0xd8, 0x42, 0xb4, 0xb8, 0x96, 0x3a, 0xa5, 0x54, 0xd4, 0x00,
+ 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x04, 0x00, 0x14, 0xaa, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x2d, 0x18, 0x4e, 0x0e, 0x61, 0xb0, 0x0a, 0xab, 0x6b, 0xaa, 0x37,
+ 0x0d, 0x5a, 0xa8, 0x00, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x06,
+ 0x00, 0x2b, 0x4a, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0xb1, 0xd8, 0x5f, 0xc1, 0x90, 0x55,
+ 0x55, 0x4a, 0x32, 0xed, 0x79, 0x49, 0x50, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xf8, 0x06, 0x00, 0x12, 0xaa, 0xa9, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x16, 0x00, 0xd1,
+ 0xac, 0x89, 0x91, 0x4a, 0xaa, 0xaa, 0xa9, 0xb2, 0xc5, 0x65, 0x68, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x03, 0x00, 0x15, 0x2a, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x64, 0x54, 0x81, 0xcf, 0x89, 0x00, 0x1f, 0xf2, 0xaa, 0xd5, 0x6b, 0x35,
+ 0x5a, 0xad, 0xa8, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x03,
+ 0x00, 0x05, 0x6a, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x23, 0xf6, 0x52, 0x28, 0x19, 0xd5,
+ 0x45, 0x55, 0x49, 0x54, 0xa5, 0x6a, 0xa0, 0x01, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xfc, 0x03, 0x00, 0x15, 0x24, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x09, 0x18, 0x04, 0x6c,
+ 0x64, 0x61, 0x21, 0x95, 0xad, 0x56, 0xaa, 0xd7, 0xab, 0x55, 0x50, 0x03,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x01, 0x80, 0x04, 0xaa, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x12, 0x46, 0x20, 0xdc, 0xc4, 0x01, 0x01, 0xaa, 0xaa, 0xa9, 0x5b, 0x49,
+ 0x2a, 0xad, 0xa0, 0x03, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x01,
+ 0x80, 0x02, 0xa4, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10, 0x44, 0x08, 0x00, 0xd1, 0x84, 0x20, 0x05, 0xaa,
+ 0xaa, 0x95, 0x66, 0xaa, 0xaa, 0xaa, 0xa0, 0x03, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xfe, 0x01, 0x80, 0x05, 0x55, 0x29, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x0c, 0xa8, 0x89, 0x99,
+ 0x00, 0x22, 0x03, 0xd5, 0x4a, 0xaa, 0xa7, 0x4d, 0x5a, 0xd5, 0x40, 0x07,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x01, 0x80, 0x02, 0x49, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48,
+ 0x18, 0x02, 0x03, 0x24, 0x10, 0xa8, 0x2f, 0x6a, 0xaa, 0xaa, 0x45, 0x51,
+ 0x4a, 0xb5, 0x40, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00,
+ 0x80, 0x04, 0x95, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x01, 0x05, 0x48, 0x07, 0xae, 0x83, 0x02, 0x8c, 0x2a,
+ 0x95, 0x52, 0xcd, 0x56, 0xad, 0x56, 0xc0, 0x07, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xfe, 0x00, 0xc0, 0x01, 0x52, 0x52, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x58, 0x98, 0x2f, 0x30,
+ 0x02, 0x82, 0x01, 0x6a, 0xaa, 0xaa, 0x8d, 0x29, 0x35, 0xb5, 0x04, 0x07,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0xc0, 0x02, 0x94, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x45, 0x30, 0x1f, 0xa0, 0x25, 0x1d, 0x15, 0x4a, 0xa5, 0x51, 0xad, 0x55,
+ 0x6d, 0x6a, 0x84, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00,
+ 0xc0, 0x01, 0x29, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0xa1, 0x2d, 0x21, 0x7f, 0x40, 0x4d, 0xb0, 0x35, 0x55,
+ 0x15, 0x15, 0x79, 0x25, 0x4a, 0xca, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xc0, 0x02, 0x52, 0x96, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x40, 0x7f, 0x24,
+ 0x8a, 0x58, 0xad, 0x55, 0x55, 0x52, 0x0a, 0x5a, 0xaa, 0xbb, 0x04, 0x0f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xc0, 0x00, 0xa5, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x09,
+ 0x50, 0x42, 0xfd, 0xc0, 0x59, 0x53, 0x53, 0x4c, 0xaa, 0x54, 0x5a, 0xa2,
+ 0xad, 0xaa, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0xc0, 0x01, 0x4d, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x02, 0x15, 0x90, 0x7d, 0x15, 0x76, 0xac, 0xad, 0x52,
+ 0x95, 0x55, 0xaa, 0x4c, 0xa5, 0x54, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xc0, 0x00, 0xa5, 0x12, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0c, 0x83, 0x01, 0xff, 0xf9,
+ 0x15, 0x29, 0x51, 0x55, 0x52, 0x2c, 0xa2, 0x54, 0xab, 0x6c, 0x04, 0x0f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x80, 0x01, 0x28, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98,
+ 0x06, 0x01, 0xff, 0xe9, 0x55, 0xd5, 0x56, 0xa5, 0x55, 0xaa, 0xad, 0x51,
+ 0x35, 0x54, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x80, 0x00, 0x4a, 0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x08, 0x31, 0x1c, 0x12, 0xf4, 0xc5, 0x44, 0x5a, 0xaa, 0x49,
+ 0xaa, 0xa9, 0xa8, 0x95, 0x55, 0x50, 0x04, 0x0f, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x01, 0x80, 0x01, 0x52, 0x82, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x40, 0x48, 0x1c, 0x41, 0x3e, 0x2e,
+ 0x8b, 0x54, 0xaa, 0xab, 0x52, 0xa5, 0x45, 0x24, 0x95, 0x50, 0x06, 0x0f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x01, 0x24, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00,
+ 0x38, 0x01, 0x1d, 0x64, 0x9a, 0xa9, 0x2a, 0x94, 0xaa, 0xab, 0x55, 0x55,
+ 0xaa, 0xd8, 0x02, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
+ 0x00, 0x01, 0x4a, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x13, 0xc8, 0x00, 0xb0, 0x02, 0x3e, 0x96, 0x35, 0x6a, 0xaa, 0x55,
+ 0x4a, 0x95, 0x4a, 0x54, 0x55, 0x28, 0x03, 0x0f, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x8f, 0xc0, 0x01, 0x29, 0x4a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x83, 0x49, 0x20, 0x8c, 0x78, 0xb5,
+ 0x65, 0x4a, 0xaa, 0xa6, 0xaa, 0xa5, 0x29, 0x45, 0xaa, 0xa8, 0x03, 0x0f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x70, 0x01, 0x45, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x03, 0x20,
+ 0x00, 0x30, 0x75, 0x4c, 0x95, 0x55, 0x54, 0xa9, 0x52, 0x95, 0x52, 0x95,
+ 0x2a, 0xa8, 0x01, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80,
+ 0x18, 0x00, 0x94, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0d, 0x0c, 0x0c, 0x44, 0x18, 0xe5, 0xb9, 0xb5, 0x54, 0x92, 0x96,
+ 0xaa, 0x55, 0x4a, 0x69, 0x55, 0x20, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0x00, 0x01, 0x29, 0x29, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x2e, 0x08, 0x1c, 0x01, 0x7b, 0xdc, 0x96,
+ 0x4a, 0xaa, 0xaa, 0xa9, 0x4a, 0xaa, 0x95, 0x0a, 0x73, 0x50, 0x00, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00, 0x01, 0x52, 0x52,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x20, 0x30,
+ 0x44, 0x79, 0x88, 0xd4, 0x95, 0x49, 0x54, 0xab, 0x52, 0x94, 0xa4, 0xaa,
+ 0xc6, 0x51, 0xe0, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00,
+ 0x00, 0x00, 0x8a, 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x38, 0x2a, 0x21, 0x01, 0xfb, 0xa0, 0x7d, 0x54, 0xb5, 0x55, 0x54,
+ 0x95, 0x55, 0x29, 0x55, 0x29, 0x57, 0xf8, 0x3f, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xfe, 0x00, 0x00, 0x02, 0x50, 0x52, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x78, 0x90, 0x00, 0x0d, 0xfb, 0xca, 0xd2,
+ 0xaa, 0x92, 0x49, 0x55, 0x55, 0x2a, 0x4a, 0x4a, 0xd3, 0x57, 0xfc, 0x1f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x00, 0x00, 0x02, 0x95, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x39, 0x20, 0xa4,
+ 0x13, 0xfd, 0xab, 0x2a, 0x52, 0xa5, 0x52, 0x55, 0x52, 0xa9, 0x55, 0x55,
+ 0x2a, 0x57, 0xff, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x00,
+ 0x00, 0x0d, 0x24, 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x30, 0xc0, 0x00, 0x77, 0xb5, 0x55, 0x69, 0x4a, 0xaa, 0x96, 0xaa,
+ 0x94, 0xaa, 0x91, 0x45, 0x55, 0x4f, 0xff, 0x03, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xcf, 0x00, 0x00, 0x1e, 0x55, 0x2a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x30, 0x81, 0x00, 0x67, 0xb4, 0x74, 0x52,
+ 0x55, 0x55, 0x2a, 0xaa, 0xaa, 0x52, 0x55, 0x35, 0x52, 0xaf, 0xff, 0x81,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x00, 0x00, 0x1e, 0xa0, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xe3, 0x00, 0x08,
+ 0x86, 0x4a, 0xd4, 0xd5, 0x54, 0x91, 0x49, 0x55, 0x51, 0x54, 0x95, 0x49,
+ 0x56, 0x4f, 0xff, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x80,
+ 0x00, 0x3e, 0x0b, 0x53, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0xc4, 0x02, 0x00, 0x7f, 0xbd, 0x8a, 0x25, 0x49, 0x55, 0x2a, 0xb5,
+ 0x55, 0x52, 0xa4, 0x96, 0xac, 0x8f, 0xff, 0xe0, 0x7f, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x9f, 0x80, 0x00, 0x7e, 0x54, 0xa4, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x43, 0x80, 0x20, 0x21, 0x4d, 0x3f, 0x52, 0xb4,
+ 0x55, 0x2a, 0x55, 0x55, 0x4a, 0xa4, 0x95, 0x32, 0xa5, 0x4f, 0xff, 0xe0,
+ 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xc0, 0x00, 0xfe, 0x49, 0x14,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, 0x04, 0x08,
+ 0x1a, 0x46, 0xd4, 0x49, 0xaa, 0xa4, 0xaa, 0xaa, 0x54, 0x95, 0x2a, 0xc4,
+ 0x94, 0x8f, 0xff, 0xf0, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xe0,
+ 0x01, 0xfe, 0x92, 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x8e, 0x40, 0x80, 0x00, 0x74, 0x44, 0x51, 0x55, 0x64, 0xa9, 0x49, 0x55,
+ 0x49, 0x51, 0x52, 0x5a, 0x2a, 0x1f, 0xff, 0xf8, 0x07, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x9f, 0xf0, 0x07, 0xfe, 0x24, 0x52, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0d, 0x90, 0x00, 0x41, 0xeb, 0x12, 0xab, 0x52,
+ 0xa9, 0x4a, 0xaa, 0xa5, 0x52, 0x95, 0x2a, 0xa2, 0xa8, 0x1f, 0xff, 0xf8,
+ 0x03, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xf8, 0x1f, 0xfe, 0x52, 0x8a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x3b, 0x81, 0x08, 0x05,
+ 0xe3, 0x15, 0xa5, 0x36, 0x95, 0x4a, 0x4a, 0xaa, 0x45, 0x2a, 0xa2, 0xac,
+ 0x00, 0x3f, 0xff, 0xfc, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff,
+ 0xff, 0xfe, 0x04, 0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xb2, 0x00, 0x00, 0x80, 0xe8, 0x52, 0xac, 0xa8, 0xa4, 0x92, 0xa9, 0x4a,
+ 0x95, 0x51, 0x2d, 0x50, 0xe1, 0xff, 0xff, 0xfe, 0x00, 0x7f, 0xff, 0xff,
+ 0xff, 0xff, 0x9f, 0xff, 0xff, 0xfe, 0x29, 0x05, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x04, 0x00, 0x35, 0x10, 0x09, 0xa9, 0x4a, 0xaa, 0xaa,
+ 0xa9, 0x54, 0x95, 0x2a, 0xa4, 0x8a, 0xa9, 0x43, 0xff, 0xff, 0xff, 0xfe,
+ 0x00, 0x3f, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff, 0xff, 0xfe, 0x12, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x01, 0x03,
+ 0x34, 0xa5, 0x19, 0x25, 0x55, 0x55, 0x55, 0x55, 0x15, 0x55, 0x45, 0x53,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x1f, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff,
+ 0xff, 0xfe, 0x04, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
+ 0x49, 0x3c, 0x00, 0x34, 0x75, 0x4a, 0xa2, 0xaa, 0x92, 0x92, 0x92, 0x54,
+ 0x52, 0x54, 0x99, 0x53, 0xff, 0xff, 0xff, 0xff, 0x80, 0x0f, 0xff, 0xff,
+ 0xff, 0xff, 0x9f, 0xff, 0xff, 0xff, 0x14, 0x94, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x78, 0x12, 0xfc, 0xaa, 0x92, 0x2d, 0x54,
+ 0xa4, 0xa5, 0x2a, 0x92, 0x94, 0x92, 0xa2, 0xa3, 0xff, 0xff, 0xff, 0xff,
+ 0x80, 0x0f, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xff, 0xff, 0xff, 0x01, 0x51,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x16, 0xf0, 0x80, 0x71,
+ 0xc5, 0x2a, 0x65, 0x25, 0x55, 0x4a, 0x52, 0xaa, 0x51, 0x52, 0x95, 0x21,
+ 0xff, 0xff, 0xff, 0xff, 0xc0, 0x07, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff,
+ 0xff, 0xff, 0x81, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ 0x8e, 0xa0, 0x00, 0xb3, 0x94, 0xa4, 0xa9, 0x55, 0x45, 0x54, 0xa5, 0x25,
+ 0x25, 0x2a, 0x65, 0x41, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x07, 0xff, 0xff,
+ 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0x80, 0x4a, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xa0, 0x0f, 0x42, 0x13, 0x46, 0x94, 0x94, 0xa5, 0x4a,
+ 0x54, 0x92, 0x88, 0xa8, 0x49, 0x55, 0x4a, 0x51, 0xff, 0xff, 0xff, 0xff,
+ 0xe0, 0x07, 0xff, 0xff, 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0xe0, 0x52,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x2d, 0x00, 0x87, 0x56,
+ 0x4b, 0x2a, 0x99, 0x55, 0x4a, 0xaa, 0x55, 0x4a, 0x92, 0xa2, 0x54, 0xa8,
+ 0xff, 0xff, 0xff, 0xff, 0xf0, 0x07, 0xff, 0xff, 0xff, 0xfd, 0x8f, 0xff,
+ 0xff, 0xff, 0xfe, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x88,
+ 0x18, 0x04, 0x1f, 0x11, 0x51, 0x51, 0x0b, 0x54, 0x94, 0xa5, 0x52, 0x92,
+ 0x55, 0x54, 0x95, 0x20, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x0f, 0xff, 0xff,
+ 0xff, 0xf9, 0x8f, 0xff, 0xff, 0xff, 0xff, 0x89, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x11, 0xe0, 0x38, 0x01, 0x3e, 0x4d, 0x0a, 0x55, 0x5a, 0x55,
+ 0x55, 0x4a, 0x25, 0x24, 0x81, 0x25, 0x29, 0x48, 0xff, 0xff, 0xff, 0xff,
+ 0xf8, 0x1f, 0xff, 0xff, 0xff, 0xf0, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xc2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x80, 0xf0, 0x90, 0x7e, 0xd4,
+ 0x68, 0x92, 0xaa, 0x54, 0x92, 0x92, 0xa9, 0x55, 0x2e, 0xaa, 0xa2, 0x58,
+ 0x7f, 0xff, 0xff, 0xff, 0xfc, 0x3f, 0xff, 0xff, 0xff, 0xe0, 0x8f, 0xff,
+ 0xff, 0xff, 0xff, 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x04,
+ 0x40, 0x00, 0x49, 0xe2, 0x8a, 0xa9, 0x54, 0x95, 0x2a, 0x54, 0x4a, 0x48,
+ 0x91, 0x51, 0x2a, 0xc8, 0x7f, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
+ 0xff, 0xc0, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x2e, 0x00, 0x00, 0x03, 0x79, 0x4a, 0xa9, 0x45, 0x49, 0x54,
+ 0xa4, 0x89, 0x49, 0x52, 0x8a, 0x85, 0x4a, 0x98, 0x7f, 0xff, 0xff, 0xff,
+ 0xfe, 0x7f, 0xff, 0xff, 0xff, 0x80, 0x8f, 0xff, 0xff, 0xff, 0xff, 0xc4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x48, 0x44, 0x91, 0x65, 0x55,
+ 0x4a, 0x54, 0x96, 0x85, 0x55, 0x52, 0xaa, 0x8a, 0x1a, 0xaa, 0x59, 0x30,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xfe, 0x01, 0x0f, 0xff,
+ 0xff, 0xff, 0xff, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x20, 0x00,
+ 0x00, 0x02, 0xc3, 0x50, 0x29, 0x49, 0x2d, 0x35, 0x24, 0x54, 0x89, 0x32,
+ 0x52, 0x91, 0x49, 0x40, 0xff, 0xff, 0xff, 0xff, 0xff, 0x30, 0xff, 0xff,
+ 0xfc, 0x01, 0x0f, 0xff, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x08, 0x80, 0x00, 0x87, 0x4b, 0x42, 0x95, 0x2a, 0x4a,
+ 0x4a, 0x89, 0x52, 0x44, 0x45, 0x25, 0x55, 0x10, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x98, 0x7f, 0xff, 0xf0, 0x01, 0x0f, 0xff, 0xff, 0xff, 0xfc, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x40, 0x40, 0x11, 0x15, 0x6a, 0x92,
+ 0x4d, 0x29, 0x4a, 0xaa, 0xa4, 0xaa, 0x55, 0x54, 0x9a, 0x74, 0x44, 0xb1,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x8c, 0x1f, 0xff, 0x00, 0x01, 0x0f, 0xff,
+ 0xff, 0xff, 0xf0, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x80, 0x01, 0x48, 0x54, 0x94, 0xa6, 0xa9, 0x52, 0x45, 0x24, 0x84, 0x92,
+ 0xa4, 0x85, 0x2a, 0x43, 0xff, 0xff, 0xff, 0xff, 0xff, 0x84, 0x00, 0x00,
+ 0x00, 0x01, 0x1f, 0xff, 0xff, 0xff, 0xc0, 0x92, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x09, 0x02, 0x03, 0x53, 0x15, 0x2a, 0x49, 0x4a, 0xa5,
+ 0x49, 0x29, 0x29, 0x24, 0x2a, 0xd5, 0x54, 0xa3, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xc6, 0x00, 0x00, 0x00, 0x03, 0x1f, 0xff, 0xff, 0xfe, 0x01, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x02, 0x20, 0x26, 0xd4, 0xa2,
+ 0x49, 0xb5, 0x15, 0x4a, 0x52, 0xa9, 0x4a, 0x92, 0xa5, 0x12, 0x52, 0xa3,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x00, 0x00, 0x00, 0x03, 0x1f, 0xff,
+ 0xff, 0xf0, 0x14, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0x00, 0x44,
+ 0x00, 0x0c, 0xc4, 0xa4, 0xa8, 0x25, 0x2a, 0xaa, 0x84, 0x45, 0x28, 0xa4,
+ 0xa8, 0xa5, 0x4a, 0x43, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc2, 0x00, 0x00,
+ 0x00, 0x06, 0x1f, 0xff, 0xff, 0xc0, 0x25, 0x24, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0x80, 0x12, 0x01, 0x11, 0x12, 0x8a, 0x95, 0x49, 0x55, 0x49,
+ 0x2a, 0xaa, 0x45, 0x0a, 0x4a, 0x94, 0xaa, 0x90, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xc2, 0x00, 0x00, 0x00, 0x06, 0x1f, 0xff, 0xff, 0x02, 0x48, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x01, 0x08, 0x00, 0x22, 0x01, 0x24, 0x95,
+ 0x21, 0x55, 0x55, 0x2a, 0x54, 0x92, 0x94, 0xa8, 0xa9, 0x2a, 0x92, 0xa8,
+ 0x1f, 0xff, 0xff, 0xff, 0xff, 0x82, 0x00, 0x00, 0x00, 0x06, 0x0f, 0xff,
+ 0xfe, 0x02, 0x49, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x01, 0x40,
+ 0x00, 0xc4, 0x89, 0x11, 0x54, 0x95, 0x29, 0x49, 0x49, 0x24, 0x29, 0x12,
+ 0x92, 0xa5, 0x55, 0x28, 0x01, 0xff, 0xff, 0xff, 0xff, 0x82, 0x00, 0x00,
+ 0x00, 0x02, 0x0f, 0xff, 0xfc, 0x14, 0x92, 0x49, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x42, 0x06, 0x44, 0x45, 0x08, 0x92, 0x24, 0x95, 0xaa, 0xa5, 0x52,
+ 0x92, 0x55, 0x42, 0x52, 0xaa, 0x4a, 0x49, 0x4a, 0x80, 0x0f, 0xff, 0xff,
+ 0xff, 0x84, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0xf0, 0x04, 0x92, 0x49,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x2a, 0x80, 0x01, 0x95, 0x2a, 0x8a,
+ 0xa0, 0x95, 0x2a, 0x95, 0x54, 0xa4, 0x54, 0x84, 0x44, 0x95, 0x55, 0x29,
+ 0x54, 0x00, 0x7f, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xff,
+ 0xe0, 0x29, 0x24, 0x92, 0x00, 0x00, 0x00, 0x00, 0x22, 0x04, 0x03, 0x03,
+ 0x42, 0x12, 0x52, 0x2a, 0x15, 0x55, 0x54, 0xa8, 0x92, 0xa9, 0x49, 0x29,
+ 0x54, 0xaa, 0x92, 0x52, 0xa5, 0x40, 0x07, 0xff, 0xff, 0x00, 0x15, 0x50,
+ 0x92, 0x44, 0x07, 0xff, 0xc0, 0x52, 0x54, 0xaa, 0x00, 0x00, 0x00, 0x00,
+ 0x84, 0x02, 0x86, 0x03, 0x08, 0x41, 0x48, 0x94, 0xa5, 0x52, 0xaa, 0x8a,
+ 0x49, 0x52, 0x92, 0x45, 0x49, 0x55, 0x2a, 0xaa, 0x94, 0x96, 0x00, 0x7f,
+ 0xfe, 0x01, 0x55, 0x2b, 0x25, 0x50, 0x03, 0xff, 0x00, 0xa4, 0x91, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x88, 0x2c, 0x18, 0x14, 0x0a, 0xaa, 0x92, 0x55,
+ 0x49, 0x55, 0x4a, 0xa9, 0x52, 0x44, 0x92, 0x94, 0x92, 0x92, 0xca, 0x4a,
+ 0x55, 0x27, 0xa0, 0x0f, 0xf8, 0x05, 0x35, 0x49, 0x68, 0x91, 0x00, 0xf8,
+ 0x04, 0x95, 0x25, 0x29, 0x00, 0x00, 0x00, 0x01, 0x08, 0x04, 0x18, 0x00,
+ 0x08, 0x04, 0x49, 0x08, 0x55, 0x4a, 0xa9, 0x12, 0x95, 0x55, 0x49, 0x22,
+ 0x65, 0x2a, 0x5a, 0x92, 0xa9, 0x49, 0x54, 0x00, 0xe0, 0x24, 0xb2, 0x55,
+ 0x15, 0x4c, 0x00, 0x00, 0x02, 0x48, 0x52, 0x52, 0x00, 0x00, 0x00, 0x02,
+ 0x21, 0x09, 0x50, 0x81, 0x51, 0x69, 0x48, 0x52, 0x92, 0x94, 0xaa, 0x54,
+ 0x25, 0x54, 0x92, 0x4a, 0x89, 0x54, 0x82, 0xaa, 0x4a, 0x53, 0x55, 0x00,
+ 0x00, 0x49, 0x65, 0x52, 0x51, 0x12, 0x80, 0x00, 0x14, 0x92, 0x92, 0x42,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x20, 0x50, 0x24, 0x2a, 0x95, 0x25, 0x29,
+ 0x4a, 0xb5, 0x48, 0xa5, 0x52, 0x49, 0x24, 0x94, 0xaa, 0xab, 0x15, 0x24,
+ 0x92, 0xa5, 0x54, 0x40, 0x00, 0xaa, 0xb5, 0x25, 0x4a, 0xa4, 0x80, 0x00,
+ 0x21, 0x24, 0xa8, 0x94, 0x00, 0x00, 0x00, 0x0a, 0x44, 0x02, 0xb1, 0x01,
+ 0x24, 0x80, 0x48, 0xa2, 0x55, 0x55, 0x52, 0x8a, 0x94, 0xaa, 0x49, 0x22,
+ 0x8a, 0x49, 0x49, 0x55, 0x4a, 0x51, 0x25, 0x48, 0x02, 0xa8, 0x6a, 0x69,
+ 0x12, 0x29, 0x50, 0x00, 0x55, 0x24, 0x81, 0x24, 0x00, 0x00, 0x00, 0x23,
+ 0x80, 0x90, 0x70, 0x14, 0x82, 0x55, 0x22, 0x8a, 0xa9, 0x14, 0xaa, 0xa9,
+ 0x25, 0x51, 0x2a, 0x55, 0x21, 0x55, 0x55, 0x12, 0x55, 0x32, 0xaa, 0x94,
+ 0xa5, 0x55, 0x5a, 0x92, 0x54, 0xa5, 0x52, 0x24, 0xa2, 0x49, 0x2a, 0x92,
+ 0x00, 0x00, 0x00, 0x0e, 0x08, 0x00, 0xe2, 0xa0, 0x04, 0x54, 0x84, 0x52,
+ 0x4a, 0xa4, 0xaa, 0x2a, 0xa9, 0x25, 0x45, 0x42, 0xae, 0x92, 0x44, 0xa4,
+ 0x91, 0x24, 0xaa, 0xaa, 0xaa, 0x91, 0x55, 0x55, 0x51, 0x55, 0x54, 0xa9,
+ 0x14, 0x92, 0x49, 0x24, 0x00, 0x00, 0x00, 0x24, 0x01, 0x82, 0x70, 0x09,
+ 0x2a, 0x41, 0x2a, 0x89, 0x14, 0x49, 0x29, 0x52, 0x4a, 0xaa, 0x2a, 0x15,
+ 0x12, 0xaa, 0xa9, 0x2a, 0xaa, 0xba, 0xa4, 0x94, 0x95, 0x55, 0x5a, 0x49,
+ 0x25, 0x20, 0x49, 0x22, 0xa5, 0x24, 0x92, 0x92, 0x00, 0x00, 0x01, 0x0c,
+ 0x93, 0x10, 0xe2, 0x11, 0x00, 0x94, 0x22, 0x52, 0x69, 0x53, 0x52, 0x45,
+ 0x49, 0x22, 0xa4, 0x4a, 0x55, 0x29, 0x2a, 0xa4, 0x52, 0x42, 0xaa, 0xa5,
+ 0x52, 0xa8, 0xaa, 0x55, 0x4a, 0xab, 0xa9, 0x4a, 0x54, 0x49, 0x32, 0x24 };
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 372e4e69c43a..d632a1d576f9 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -21,8 +21,8 @@ config M68K
select HAVE_AOUT if MMU
select HAVE_ASM_MODVERSIONS
select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
- select HAVE_IDE
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_UID16
select MMU_GATHER_NO_RANGE if MMU
@@ -34,6 +34,7 @@ config M68K
select SET_FS
select UACCESS_MEMCPY if !MMU
select VIRT_TO_BUS
+ select ZONE_DMA
config CPU_BIG_ENDIAN
def_bool y
@@ -62,10 +63,6 @@ config TIME_LOW_RES
config NO_IOPORT_MAP
def_bool y
-config ZONE_DMA
- bool
- default y
-
config HZ
int
default 1000 if CLEOPATRA
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index d964c1f27399..6a07a6817885 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -33,6 +33,7 @@ config MAC
depends on MMU
select MMU_MOTOROLA if MMU
select HAVE_ARCH_NVRAM_OPS
+ select HAVE_PATA_PLATFORM
select LEGACY_TIMER_TICK
help
This option enables support for the Apple Macintosh series of
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index c54055a3d284..dd0c0ec67f67 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -97,7 +97,6 @@ head-$(CONFIG_SUN3) := arch/m68k/kernel/sun3-head.o
head-$(CONFIG_M68000) := arch/m68k/68000/head.o
head-$(CONFIG_COLDFIRE) := arch/m68k/coldfire/head.o
-core-y += arch/m68k/
libs-y += arch/m68k/lib/
diff --git a/arch/m68k/coldfire/m525x.c b/arch/m68k/coldfire/m525x.c
index 2c4d2ca2f20d..485375112e28 100644
--- a/arch/m68k/coldfire/m525x.c
+++ b/arch/m68k/coldfire/m525x.c
@@ -26,7 +26,7 @@ DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
static struct clk_lookup m525x_clk_lookup[] = {
- CLKDEV_INIT(NULL, "pll.0", &pll),
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
CLKDEV_INIT(NULL, "sys.0", &clk_sys),
CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
diff --git a/arch/m68k/emu/nfcon.c b/arch/m68k/emu/nfcon.c
index 57e8c8fb5eba..92636c89d65b 100644
--- a/arch/m68k/emu/nfcon.c
+++ b/arch/m68k/emu/nfcon.c
@@ -85,7 +85,7 @@ static int nfcon_tty_put_char(struct tty_struct *tty, unsigned char ch)
return 1;
}
-static int nfcon_tty_write_room(struct tty_struct *tty)
+static unsigned int nfcon_tty_write_room(struct tty_struct *tty)
{
return 64;
}
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index bc1228e00518..5c2c0a864524 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -32,8 +32,6 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
#define pmd_populate_kernel pmd_populate
-#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
-
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
unsigned long address)
{
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index 8d4ec05996c5..6f2b87d7a50d 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -150,6 +150,8 @@
#ifndef __ASSEMBLY__
+#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index b4fc3b4f6bb3..74a817d9387f 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -88,7 +88,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
{
pmd_set(pmd, page);
}
-#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 8076467eff4b..022c3abc280d 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -105,6 +105,8 @@ extern unsigned long mm_cachebits;
#define __S110 PAGE_SHARED_C
#define __S111 PAGE_SHARED_C
+#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
@@ -129,7 +131,7 @@ static inline void pud_set(pud_t *pudp, pmd_t *pmdp)
#define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK))
#define pmd_page_vaddr(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
-#define pud_page_vaddr(pud) ((unsigned long)__va(pud_val(pud) & _TABLE_MASK))
+#define pud_pgtable(pud) ((pmd_t *)__va(pud_val(pud) & _TABLE_MASK))
#define pte_none(pte) (!pte_val(pte))
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index aca22c2c1ee2..143ba7de9bda 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -72,7 +72,6 @@
#define PTRS_PER_PGD 128
#endif
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/* Virtual address region for use by kernel_map() */
#ifdef CONFIG_SUN3
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 000f64869b91..198036aff519 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -32,7 +32,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
{
pmd_val(*pmd) = __pa((unsigned long)page_address(page));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
diff --git a/arch/m68k/include/asm/unaligned.h b/arch/m68k/include/asm/unaligned.h
deleted file mode 100644
index 98c8930d3d35..000000000000
--- a/arch/m68k/include/asm/unaligned.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_M68K_UNALIGNED_H
-#define _ASM_M68K_UNALIGNED_H
-
-
-#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
-#include <linux/unaligned/be_struct.h>
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-
-#else
-/*
- * The m68k can do unaligned accesses itself.
- */
-#include <linux/unaligned/access_ok.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-
-#endif
-
-#endif /* _ASM_M68K_UNALIGNED_H */
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 017bac3aab80..4b51bfd38e5f 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -258,10 +258,7 @@ void __init setup_arch(char **cmdline_p)
}
}
- init_mm.start_code = PAGE_OFFSET;
- init_mm.end_code = (unsigned long)_etext;
- init_mm.end_data = (unsigned long)_edata;
- init_mm.brk = (unsigned long)_end;
+ setup_initial_init_mm((void *)PAGE_OFFSET, _etext, _edata, _end);
#if defined(CONFIG_BOOTPARAM)
strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE);
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index d1b7988efc91..5e4104f07a44 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -87,10 +87,7 @@ void __init setup_arch(char **cmdline_p)
memory_start = PAGE_ALIGN(_ramstart);
memory_end = _ramend;
- init_mm.start_code = (unsigned long) &_stext;
- init_mm.end_code = (unsigned long) &_etext;
- init_mm.end_data = (unsigned long) &_edata;
- init_mm.brk = (unsigned long) 0;
+ setup_initial_init_mm(_stext, _etext, _edata, NULL);
config_BSP(&command_line[0], sizeof(command_line));
diff --git a/arch/microblaze/Kbuild b/arch/microblaze/Kbuild
new file mode 100644
index 000000000000..a4e40e534e6a
--- /dev/null
+++ b/arch/microblaze/Kbuild
@@ -0,0 +1 @@
+# SPDX-License-Identifier: GPL-2.0-only
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 0660f47012bc..14a67a42fcae 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -43,6 +43,7 @@ config MICROBLAZE
select MMU_GATHER_NO_RANGE
select SPARSE_IRQ
select SET_FS
+ select ZONE_DMA
# Endianness selection
choice
@@ -60,9 +61,6 @@ config CPU_LITTLE_ENDIAN
endchoice
-config ZONE_DMA
- def_bool y
-
config ARCH_HAS_ILOG2_U32
def_bool n
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index d56b9f670ad1..6c33b05f730f 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -28,8 +28,6 @@ static inline pgd_t *get_pgd(void)
#define pgd_alloc(mm) get_pgd()
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 9ae8d2c17dd5..71cd547655d9 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -25,8 +25,6 @@ extern int mem_init_done;
#include <asm/mmu.h>
#include <asm/page.h>
-#define FIRST_USER_ADDRESS 0UL
-
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
diff --git a/arch/microblaze/include/asm/unaligned.h b/arch/microblaze/include/asm/unaligned.h
deleted file mode 100644
index 448299beab69..000000000000
--- a/arch/microblaze/include/asm/unaligned.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2008 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2006 Atmark Techno, Inc.
- */
-
-#ifndef _ASM_MICROBLAZE_UNALIGNED_H
-#define _ASM_MICROBLAZE_UNALIGNED_H
-
-# ifdef __KERNEL__
-
-# ifdef __MICROBLAZEEL__
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-# else
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-# endif
-
-# include <linux/unaligned/generic.h>
-
-# endif /* __KERNEL__ */
-#endif /* _ASM_MICROBLAZE_UNALIGNED_H */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index e039e7d542c4..6dfb27d531dd 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -71,7 +71,6 @@ config MIPS
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO
- select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
@@ -3274,13 +3273,6 @@ config I8253
select CLKSRC_I8253
select CLKEVT_I8253
select MIPS_EXTERNAL_TIMER
-
-config ZONE_DMA
- bool
-
-config ZONE_DMA32
- bool
-
endmenu
config TRAD_SIGNALS
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 258234c35a09..653befc1b176 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -321,7 +321,7 @@ KBUILD_LDFLAGS += -m $(ld-emul)
ifdef CONFIG_MIPS
CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
- egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
+ egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
endif
@@ -332,9 +332,6 @@ head-y := arch/mips/kernel/head.o
libs-y += arch/mips/lib/
libs-$(CONFIG_MIPS_FP_SUPPORT) += arch/mips/math-emu/
-# See arch/mips/Kbuild for content of core part of the kernel
-core-y += arch/mips/
-
drivers-y += arch/mips/crypto/
# suspend and hibernation support
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index e4b7839293e1..3548b3b45269 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -40,7 +40,7 @@ GCOV_PROFILE := n
UBSAN_SANITIZE := n
# decompressor objects (linked with vmlinuz)
-vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
+vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o $(obj)/bswapsi.o
ifdef CONFIG_DEBUG_ZBOOT
vmlinuzobjs-$(CONFIG_DEBUG_ZBOOT) += $(obj)/dbg.o
@@ -54,7 +54,7 @@ extra-y += uart-ath79.c
$(obj)/uart-ath79.c: $(srctree)/arch/mips/ath79/early_printk.c
$(call cmd,shipped)
-vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o
+vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
extra-y += ashldi3.c
$(obj)/ashldi3.c: $(obj)/%.c: $(srctree)/lib/%.c FORCE
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index 3d70d15ada28..aae1346a509a 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -7,6 +7,8 @@
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
+#define DISABLE_BRANCH_PROFILING
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c
index faa88a6a74c0..0a03529cf317 100644
--- a/arch/mips/crypto/crc32-mips.c
+++ b/arch/mips/crypto/crc32-mips.c
@@ -8,13 +8,13 @@
* Copyright (C) 2018 MIPS Tech, LLC
*/
-#include <linux/unaligned/access_ok.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/mipsregs.h>
+#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 08f9dd6903b7..86310d6e1035 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -76,7 +76,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
/* we only have a 32-bit FPU */
return SIGFPE;
#endif
- fallthrough;
+ /* fallthrough */
case FPU_32BIT:
if (cpu_has_fre) {
/* clear FRE */
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index d1477ecb1af9..57561e0e6e8d 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -57,6 +57,9 @@ asmlinkage void plat_irq_dispatch(void);
extern void do_IRQ(unsigned int irq);
+struct irq_domain;
+extern void do_domain_IRQ(struct irq_domain *domain, unsigned int irq);
+
extern void arch_init_irq(void);
extern void spurious_interrupt(void);
diff --git a/arch/mips/include/asm/mach-ralink/spaces.h b/arch/mips/include/asm/mach-ralink/spaces.h
new file mode 100644
index 000000000000..87d085c9ad61
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/spaces.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_RALINK_SPACES_H_
+#define __ASM_MACH_RALINK_SPACES_H_
+
+#define PCI_IOBASE _AC(0xa0000000, UL)
+#define PCI_IOSIZE SZ_16M
+#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
+
+#include <asm/mach-generic/spaces.h>
+#endif
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index d0cf997b4ba8..c7925d0e9874 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -28,7 +28,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Initialize a new pmd table with invalid pointers.
@@ -59,15 +58,20 @@ do { \
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *pmd = NULL;
+ pmd_t *pmd;
struct page *pg;
- pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
- if (pg) {
- pgtable_pmd_page_ctor(pg);
- pmd = (pmd_t *)page_address(pg);
- pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
+ pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
+ if (!pg)
+ return NULL;
+
+ if (!pgtable_pmd_page_ctor(pg)) {
+ __free_pages(pg, PMD_ORDER);
+ return NULL;
}
+
+ pmd = (pmd_t *)page_address(pg);
+ pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
return pmd;
}
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 6c0532d7b211..95df9c293d8d 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -93,7 +93,6 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#endif
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define VMALLOC_START MAP_BASE
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 1e7d6ce9d8d6..41921acdc9d8 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -137,7 +137,6 @@
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
-#define FIRST_USER_ADDRESS 0UL
/*
* TLB refill handlers also map the vmalloc area into xuseg. Avoid
@@ -210,9 +209,9 @@ static inline void p4d_clear(p4d_t *p4dp)
p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
}
-static inline unsigned long p4d_page_vaddr(p4d_t p4d)
+static inline pud_t *p4d_pgtable(p4d_t p4d)
{
- return p4d_val(p4d);
+ return (pud_t *)p4d_val(p4d);
}
#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d))
@@ -314,9 +313,9 @@ static inline void pud_clear(pud_t *pudp)
#endif
#ifndef __PAGETABLE_PMD_FOLDED
-static inline unsigned long pud_page_vaddr(pud_t pud)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
- return pud_val(pud);
+ return (pmd_t *)pud_val(pud);
}
#define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
diff --git a/arch/mips/include/asm/vdso/vdso.h b/arch/mips/include/asm/vdso/vdso.h
index 737ddfc3411c..a327ca21270e 100644
--- a/arch/mips/include/asm/vdso/vdso.h
+++ b/arch/mips/include/asm/vdso/vdso.h
@@ -67,7 +67,7 @@ static inline const struct vdso_data *get_vdso_data(void)
static inline void __iomem *get_gic(const struct vdso_data *data)
{
- return (void __iomem *)data - PAGE_SIZE;
+ return (void __iomem *)((unsigned long)data & PAGE_MASK) - PAGE_SIZE;
}
#endif /* CONFIG_CLKSRC_MIPS_GIC */
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index 57dc2ac4f8bd..40b210c65a5a 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -98,6 +98,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 85b6c60f285d..d20e002b3246 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -21,6 +21,7 @@
#include <linux/kallsyms.h>
#include <linux/kgdb.h>
#include <linux/ftrace.h>
+#include <linux/irqdomain.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
@@ -107,3 +108,18 @@ void __irq_entry do_IRQ(unsigned int irq)
irq_exit();
}
+#ifdef CONFIG_IRQ_DOMAIN
+void __irq_entry do_domain_IRQ(struct irq_domain *domain, unsigned int hwirq)
+{
+ struct irq_desc *desc;
+
+ irq_enter();
+ check_stack_overflow();
+
+ desc = irq_resolve_mapping(domain, hwirq);
+ if (likely(desc))
+ handle_irq_desc(desc);
+
+ irq_exit();
+}
+#endif
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index 499a5357c09f..56b51de2dc51 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
+#include <linux/panic_notifier.h>
#include <linux/sched/task.h>
#include <linux/start_kernel.h>
#include <linux/string.h>
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index cd4afcdf3725..9adad24c2e65 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1383,6 +1383,7 @@ static void build_r4000_tlb_refill_handler(void)
switch (boot_cpu_type()) {
default:
if (sizeof(long) == 4) {
+ fallthrough;
case CPU_LOONGSON2EF:
/* Loongson2 ebase is different than r4k, we have more space */
if ((p - tlb_handler) > 64)
@@ -2169,6 +2170,7 @@ static void build_r4000_tlb_load_handler(void)
default:
if (cpu_has_mips_r2_exec_hazard) {
uasm_i_ehb(&p);
+ fallthrough;
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
index 11e9527c6e44..4ffbcc58c6f6 100644
--- a/arch/mips/mti-malta/malta-platform.c
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -33,7 +33,8 @@
.irq = int, \
.uartclk = 1843200, \
.iotype = UPIO_PORT, \
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | \
+ UPF_MAGIC_MULTIPLIER, \
.regshift = 0, \
}
@@ -47,7 +48,8 @@ static struct plat_serial8250_port uart8250_data[] = {
.mapbase = 0x1f000900, /* The CBUS UART */
.irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
.uartclk = 3686400, /* Twice the usual clk! */
- .iotype = UPIO_MEM32,
+ .iotype = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ?
+ UPIO_MEM32BE : UPIO_MEM32,
.flags = CBUS_UART_FLAGS,
.regshift = 3,
},
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 939dd06764bc..3a73e9375712 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -1355,6 +1355,9 @@ jeq_common:
}
break;
+ case BPF_ST | BPF_NOSPEC: /* speculation barrier */
+ break;
+
case BPF_ST | BPF_B | BPF_MEM:
case BPF_ST | BPF_H | BPF_MEM:
case BPF_ST | BPF_W | BPF_MEM:
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index c374f3ceec38..9028dbbb45dd 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/pm.h>
#include <linux/timer.h>
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 20d8637340be..18d1c115cd53 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/panic_notifier.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/notifier.h>
diff --git a/arch/nds32/Kbuild b/arch/nds32/Kbuild
new file mode 100644
index 000000000000..a4e40e534e6a
--- /dev/null
+++ b/arch/nds32/Kbuild
@@ -0,0 +1 @@
+# SPDX-License-Identifier: GPL-2.0-only
diff --git a/arch/nds32/boot/.gitignore b/arch/nds32/boot/.gitignore
new file mode 100644
index 000000000000..9182a3a1ea0a
--- /dev/null
+++ b/arch/nds32/boot/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/Image
diff --git a/arch/nds32/include/asm/pgalloc.h b/arch/nds32/include/asm/pgalloc.h
index 85c117347c86..a08e1ebca70e 100644
--- a/arch/nds32/include/asm/pgalloc.h
+++ b/arch/nds32/include/asm/pgalloc.h
@@ -12,11 +12,6 @@
#define __HAVE_ARCH_PTE_ALLOC_ONE
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
-/*
- * Since we have only two-level page tables, these are trivial
- */
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
index af82e996f412..41725eaf8bac 100644
--- a/arch/nds32/kernel/setup.c
+++ b/arch/nds32/kernel/setup.c
@@ -294,10 +294,7 @@ void __init setup_arch(char **cmdline_p)
setup_cpuinfo();
- init_mm.start_code = (unsigned long)&_stext;
- init_mm.end_code = (unsigned long)&_etext;
- init_mm.end_data = (unsigned long)&_edata;
- init_mm.brk = (unsigned long)&_end;
+ setup_initial_init_mm(_stext, _etext, _edata, _end);
/* setup bootmem allocator */
setup_memory();
diff --git a/arch/nds32/mm/mmap.c b/arch/nds32/mm/mmap.c
index c206b31ce07a..1bdf5e7d1b43 100644
--- a/arch/nds32/mm/mmap.c
+++ b/arch/nds32/mm/mmap.c
@@ -59,7 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/nios2/Kbuild b/arch/nios2/Kbuild
new file mode 100644
index 000000000000..a4e40e534e6a
--- /dev/null
+++ b/arch/nios2/Kbuild
@@ -0,0 +1 @@
+# SPDX-License-Identifier: GPL-2.0-only
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index e6600d2a5ae0..3c4ae74d5798 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -25,7 +25,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Initialize a new pmd table with invalid pointers.
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 2600d76c310c..4a995fa628ee 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -24,8 +24,6 @@
#include <asm/pgtable-bits.h>
#include <asm-generic/pgtable-nopmd.h>
-#define FIRST_USER_ADDRESS 0UL
-
#define VMALLOC_START CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
#define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index d2f21957e99c..cf8d687a2644 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -156,10 +156,7 @@ void __init setup_arch(char **cmdline_p)
memory_start = memblock_start_of_DRAM();
memory_end = memblock_end_of_DRAM();
- init_mm.start_code = (unsigned long) _stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) _end;
+ setup_initial_init_mm(_stext, _etext, _edata, _end);
init_task.thread.kregs = &fake_regs;
/* Keep a copy of command line */
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
index 410e7abfac69..c52de526e518 100644
--- a/arch/openrisc/Makefile
+++ b/arch/openrisc/Makefile
@@ -42,7 +42,6 @@ endif
head-y := arch/openrisc/kernel/head.o
-core-y += arch/openrisc/
libs-y += $(LIBGCC)
PHONY += vmlinux.bin
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 88820299ecc4..b7b2b8d16fad 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -72,6 +72,4 @@ do { \
tlb_remove_page((tlb), (pte)); \
} while (0)
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
#endif
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 9425bedab4fc..4ac591c9ca33 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -73,7 +73,6 @@ extern void paging_init(void);
*/
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/*
* Kernels own virtual memory area.
diff --git a/arch/openrisc/include/asm/unaligned.h b/arch/openrisc/include/asm/unaligned.h
deleted file mode 100644
index 14353f2101f2..000000000000
--- a/arch/openrisc/include/asm/unaligned.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * OpenRISC Linux
- *
- * Linux architectural port borrowing liberally from similar works of
- * others. All original copyrights apply as per the original source
- * declaration.
- *
- * OpenRISC implementation:
- * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
- * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- * et al.
- */
-
-#ifndef __ASM_OPENRISC_UNALIGNED_H
-#define __ASM_OPENRISC_UNALIGNED_H
-
-/*
- * This is copied from the generic implementation and the C-struct
- * variant replaced with the memmove variant. The GCC compiler
- * for the OR32 arch optimizes too aggressively for the C-struct
- * variant to work, so use the memmove variant instead.
- *
- * It may be worth considering implementing the unaligned access
- * exception handler and allowing unaligned accesses (access_ok.h)...
- * not sure if it would be much of a performance win without further
- * investigation.
- */
-#include <asm/byteorder.h>
-
-#if defined(__LITTLE_ENDIAN)
-# include <linux/unaligned/le_memmove.h>
-# include <linux/unaligned/be_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# include <linux/unaligned/be_memmove.h>
-# include <linux/unaligned/le_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
-
-#endif /* __ASM_OPENRISC_UNALIGNED_H */
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index c6f9e7b9f7cb..8ae2da6ac097 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -293,10 +293,7 @@ void __init setup_arch(char **cmdline_p)
#endif
/* process 1's initial memory region is the kernel code/data */
- init_mm.start_code = (unsigned long)_stext;
- init_mm.end_code = (unsigned long)_etext;
- init_mm.end_data = (unsigned long)_edata;
- init_mm.brk = (unsigned long)_end;
+ setup_initial_init_mm(_stext, _etext, _edata, _end);
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start == initrd_end) {
diff --git a/arch/parisc/Kbuild b/arch/parisc/Kbuild
new file mode 100644
index 000000000000..a4e40e534e6a
--- /dev/null
+++ b/arch/parisc/Kbuild
@@ -0,0 +1 @@
+# SPDX-License-Identifier: GPL-2.0-only
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index bde9907bc5b2..4f8c1fbf8f2f 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -3,7 +3,6 @@ config PARISC
def_bool y
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_MIGHT_HAVE_PC_PARPORT
- select HAVE_IDE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index dda557085311..6a7e98e71f1d 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -69,6 +69,5 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 39017210dbf0..43937af127b1 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -171,8 +171,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
* pgd entries used up by user/kernel:
*/
-#define FIRST_USER_ADDRESS 0UL
-
/* NB: The tlb miss handlers make certain assumptions about the order */
/* of the following bits, so be careful (One example, bits 25-31 */
/* are moved together in one instruction). */
@@ -324,8 +322,8 @@ static inline void pmd_clear(pmd_t *pmd) {
#if CONFIG_PGTABLE_LEVELS == 3
-#define pud_page_vaddr(pud) ((unsigned long) __va(pud_address(pud)))
-#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
+#define pud_pgtable(pud) ((pmd_t *) __va(pud_address(pud)))
+#define pud_page(pud) virt_to_page((void *)pud_pgtable(pud))
/* For 64 bit we have three level tables */
diff --git a/arch/parisc/include/asm/unaligned.h b/arch/parisc/include/asm/unaligned.h
index e9029c7c2a69..3bda16773ba6 100644
--- a/arch/parisc/include/asm/unaligned.h
+++ b/arch/parisc/include/asm/unaligned.h
@@ -2,11 +2,7 @@
#ifndef _ASM_PARISC_UNALIGNED_H
#define _ASM_PARISC_UNALIGNED_H
-#include <linux/unaligned/be_struct.h>
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/generic.h>
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
+#include <asm-generic/unaligned.h>
#ifdef __KERNEL__
struct pt_regs;
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index ab78cba446ed..9e3c010c0f61 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -52,6 +52,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c
index 75ae88d13909..da154406d368 100644
--- a/arch/parisc/kernel/pdc_chassis.c
+++ b/arch/parisc/kernel/pdc_chassis.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <linux/cache.h>
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index 7ed404c60a9e..39ccad063533 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -103,22 +103,16 @@ static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *bu
return count;
}
-static int pdc_console_tty_write_room(struct tty_struct *tty)
+static unsigned int pdc_console_tty_write_room(struct tty_struct *tty)
{
return 32768; /* no limit, no buffer used */
}
-static int pdc_console_tty_chars_in_buffer(struct tty_struct *tty)
-{
- return 0; /* no buffer */
-}
-
static const struct tty_operations pdc_console_tty_ops = {
.open = pdc_console_tty_open,
.close = pdc_console_tty_close,
.write = pdc_console_tty_write,
.write_room = pdc_console_tty_write_room,
- .chars_in_buffer = pdc_console_tty_chars_in_buffer,
};
static void pdc_console_poll(struct timer_list *unused)
diff --git a/arch/parisc/kernel/syscalls/Makefile b/arch/parisc/kernel/syscalls/Makefile
index 0f2ea5bcb0d7..d63f18dd058d 100644
--- a/arch/parisc/kernel/syscalls/Makefile
+++ b/arch/parisc/kernel/syscalls/Makefile
@@ -10,25 +10,15 @@ syshdr := $(srctree)/scripts/syscallhdr.sh
systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis $(abis) $< $@
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis common,$* $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis common,$* $< $@
-$(uapi)/unistd_32.h: abis := common,32
-$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
+$(uapi)/unistd_%.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-$(uapi)/unistd_64.h: abis := common,64
-$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE
- $(call if_changed,syshdr)
-
-$(kapi)/syscall_table_32.h: abis := common,32
-$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE
- $(call if_changed,systbl)
-
-$(kapi)/syscall_table_64.h: abis := common,64
-$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE
+$(kapi)/syscall_table_%.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h unistd_64.h
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 14b132cf95e2..663766fbf505 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -140,7 +140,9 @@ config PPC
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
+ select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
+ select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX && !PPC_BOOK3S_32
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN_SANITIZE_ALL
@@ -187,7 +189,7 @@ config PPC
select GENERIC_VDSO_TIME_NS
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
- select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
+ select HAVE_ARCH_HUGE_VMAP if PPC_RADIX_MMU || PPC_8xx
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
@@ -218,7 +220,6 @@ config PPC
select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC_BOOK3S_64 && SMP
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
- select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
@@ -266,6 +267,7 @@ config PPC
select PPC_DAWR if PPC64
select RTC_LIB
select SPARSE_IRQ
+ select STRICT_KERNEL_RWX if STRICT_MODULE_RWX
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select VIRT_TO_BUS if !PPC64
@@ -289,6 +291,7 @@ config PANIC_TIMEOUT
config COMPAT
bool "Enable support for 32bit binaries"
depends on PPC64
+ depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
default y if !CPU_LITTLE_ENDIAN
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
@@ -403,10 +406,6 @@ config PPC_ADV_DEBUG_DAC_RANGE
config PPC_DAWR
bool
-config ZONE_DMA
- bool
- default y if PPC_BOOK3E_64
-
config PGTABLE_LEVELS
int
default 2 if !PPC64
@@ -426,7 +425,7 @@ source "kernel/Kconfig.hz"
config MATH_EMULATION
bool "Math emulation"
- depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE
+ depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE || PPC_MICROWATT
select PPC_FPU_REGS
help
Some PowerPC chips designed for embedded applications do not have
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 6342f9da4545..205cd77f321f 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -84,6 +84,11 @@ config MSI_BITMAP_SELFTEST
config PPC_IRQ_SOFT_MASK_DEBUG
bool "Include extra checks for powerpc irq soft masking"
+ depends on PPC64
+
+config PPC_RFI_SRR_DEBUG
+ bool "Include extra checks for RFI SRR register validity"
+ depends on PPC_BOOK3S_64
config XMON
bool "Include xmon kernel debugger"
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 3212d076ac6a..6505d66f1193 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -267,9 +267,6 @@ head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
head-$(CONFIG_ALTIVEC) += arch/powerpc/kernel/vector.o
head-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += arch/powerpc/kernel/prom_init.o
-# See arch/powerpc/Kbuild for content of core part of the kernel
-core-y += arch/powerpc/
-
# Default to zImage, override when needed
all: zImage
@@ -376,6 +373,16 @@ ppc64_book3e_allmodconfig:
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/85xx-64bit.config \
-f $(srctree)/Makefile allmodconfig
+PHONY += ppc32_randconfig
+ppc32_randconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/32-bit.config \
+ -f $(srctree)/Makefile randconfig
+
+PHONY += ppc64_randconfig
+ppc64_randconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/64-bit.config \
+ -f $(srctree)/Makefile randconfig
+
define archhelp
@echo '* zImage - Build default images selected by kernel config'
@echo ' zImage.* - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)'
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 2b8da923ceca..e312ea802aa6 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -163,6 +163,8 @@ src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
src-plat-$(CONFIG_MVME7100) += motload-head.S mvme7100.c
+src-plat-$(CONFIG_PPC_MICROWATT) += fixed-head.S microwatt.c
+
src-wlib := $(sort $(src-wlib-y))
src-plat := $(sort $(src-plat-y))
src-boot := $(src-wlib) $(src-plat) empty.c
@@ -227,7 +229,7 @@ $(obj)/wrapper.a: $(obj-wlib) FORCE
hostprogs := addnote hack-coff mktree
-targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a)
+targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a) zImage.lds
extra-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
$(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds
@@ -355,6 +357,8 @@ image-$(CONFIG_MVME5100) += dtbImage.mvme5100
# Board port in arch/powerpc/platform/amigaone/Kconfig
image-$(CONFIG_AMIGAONE) += cuImage.amigaone
+image-$(CONFIG_PPC_MICROWATT) += dtbImage.microwatt
+
# For 32-bit powermacs, build the COFF and miboot images
# as well as the ELF images.
ifdef CONFIG_PPC32
diff --git a/arch/powerpc/boot/decompress.c b/arch/powerpc/boot/decompress.c
index 6098b879ac97..977eb15a6d17 100644
--- a/arch/powerpc/boot/decompress.c
+++ b/arch/powerpc/boot/decompress.c
@@ -99,8 +99,8 @@ static void print_err(char *s)
* partial_decompress - decompresses part or all of a compressed buffer
* @inbuf: input buffer
* @input_size: length of the input buffer
- * @outbuf: input buffer
- * @output_size: length of the input buffer
+ * @outbuf: output buffer
+ * @output_size: length of the output buffer
* @skip number of output bytes to ignore
*
* This function takes compressed data from inbuf, decompresses and write it to
diff --git a/arch/powerpc/boot/devtree.c b/arch/powerpc/boot/devtree.c
index 5d91036ad626..58fbcfcc98c9 100644
--- a/arch/powerpc/boot/devtree.c
+++ b/arch/powerpc/boot/devtree.c
@@ -13,6 +13,7 @@
#include "string.h"
#include "stdio.h"
#include "ops.h"
+#include "of.h"
void dt_fixup_memory(u64 start, u64 size)
{
@@ -23,21 +24,25 @@ void dt_fixup_memory(u64 start, u64 size)
root = finddevice("/");
if (getprop(root, "#address-cells", &naddr, sizeof(naddr)) < 0)
naddr = 2;
+ else
+ naddr = be32_to_cpu(naddr);
if (naddr < 1 || naddr > 2)
fatal("Can't cope with #address-cells == %d in /\n\r", naddr);
if (getprop(root, "#size-cells", &nsize, sizeof(nsize)) < 0)
nsize = 1;
+ else
+ nsize = be32_to_cpu(nsize);
if (nsize < 1 || nsize > 2)
fatal("Can't cope with #size-cells == %d in /\n\r", nsize);
i = 0;
if (naddr == 2)
- memreg[i++] = start >> 32;
- memreg[i++] = start & 0xffffffff;
+ memreg[i++] = cpu_to_be32(start >> 32);
+ memreg[i++] = cpu_to_be32(start & 0xffffffff);
if (nsize == 2)
- memreg[i++] = size >> 32;
- memreg[i++] = size & 0xffffffff;
+ memreg[i++] = cpu_to_be32(size >> 32);
+ memreg[i++] = cpu_to_be32(size & 0xffffffff);
memory = finddevice("/memory");
if (! memory) {
@@ -45,9 +50,9 @@ void dt_fixup_memory(u64 start, u64 size)
setprop_str(memory, "device_type", "memory");
}
- printf("Memory <- <0x%x", memreg[0]);
+ printf("Memory <- <0x%x", be32_to_cpu(memreg[0]));
for (i = 1; i < (naddr + nsize); i++)
- printf(" 0x%x", memreg[i]);
+ printf(" 0x%x", be32_to_cpu(memreg[i]));
printf("> (%ldMB)\n\r", (unsigned long)(size >> 20));
setprop(memory, "reg", memreg, (naddr + nsize)*sizeof(u32));
@@ -65,10 +70,10 @@ void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus)
printf("CPU bus-frequency <- 0x%x (%dMHz)\n\r", bus, MHZ(bus));
while ((devp = find_node_by_devtype(devp, "cpu"))) {
- setprop_val(devp, "clock-frequency", cpu);
- setprop_val(devp, "timebase-frequency", tb);
+ setprop_val(devp, "clock-frequency", cpu_to_be32(cpu));
+ setprop_val(devp, "timebase-frequency", cpu_to_be32(tb));
if (bus > 0)
- setprop_val(devp, "bus-frequency", bus);
+ setprop_val(devp, "bus-frequency", cpu_to_be32(bus));
}
timebase_period_ns = 1000000000 / tb;
@@ -80,7 +85,7 @@ void dt_fixup_clock(const char *path, u32 freq)
if (devp) {
printf("%s: clock-frequency <- %x (%dMHz)\n\r", path, freq, MHZ(freq));
- setprop_val(devp, "clock-frequency", freq);
+ setprop_val(devp, "clock-frequency", cpu_to_be32(freq));
}
}
@@ -133,8 +138,12 @@ void dt_get_reg_format(void *node, u32 *naddr, u32 *nsize)
{
if (getprop(node, "#address-cells", naddr, 4) != 4)
*naddr = 2;
+ else
+ *naddr = be32_to_cpu(*naddr);
if (getprop(node, "#size-cells", nsize, 4) != 4)
*nsize = 1;
+ else
+ *nsize = be32_to_cpu(*nsize);
}
static void copy_val(u32 *dest, u32 *src, int naddr)
@@ -163,9 +172,9 @@ static int add_reg(u32 *reg, u32 *add, int naddr)
int i, carry = 0;
for (i = MAX_ADDR_CELLS - 1; i >= MAX_ADDR_CELLS - naddr; i--) {
- u64 tmp = (u64)reg[i] + add[i] + carry;
+ u64 tmp = (u64)be32_to_cpu(reg[i]) + be32_to_cpu(add[i]) + carry;
carry = tmp >> 32;
- reg[i] = (u32)tmp;
+ reg[i] = cpu_to_be32((u32)tmp);
}
return !carry;
@@ -180,18 +189,18 @@ static int compare_reg(u32 *reg, u32 *range, u32 *rangesize)
u32 end;
for (i = 0; i < MAX_ADDR_CELLS; i++) {
- if (reg[i] < range[i])
+ if (be32_to_cpu(reg[i]) < be32_to_cpu(range[i]))
return 0;
- if (reg[i] > range[i])
+ if (be32_to_cpu(reg[i]) > be32_to_cpu(range[i]))
break;
}
for (i = 0; i < MAX_ADDR_CELLS; i++) {
- end = range[i] + rangesize[i];
+ end = be32_to_cpu(range[i]) + be32_to_cpu(rangesize[i]);
- if (reg[i] < end)
+ if (be32_to_cpu(reg[i]) < end)
break;
- if (reg[i] > end)
+ if (be32_to_cpu(reg[i]) > end)
return 0;
}
@@ -240,7 +249,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
return 0;
dt_get_reg_format(parent, &naddr, &nsize);
-
if (nsize > 2)
return 0;
@@ -252,10 +260,10 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
copy_val(last_addr, prop_buf + offset, naddr);
- ret_size = prop_buf[offset + naddr];
+ ret_size = be32_to_cpu(prop_buf[offset + naddr]);
if (nsize == 2) {
ret_size <<= 32;
- ret_size |= prop_buf[offset + naddr + 1];
+ ret_size |= be32_to_cpu(prop_buf[offset + naddr + 1]);
}
for (;;) {
@@ -278,7 +286,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
offset = find_range(last_addr, prop_buf, prev_naddr,
naddr, prev_nsize, buflen / 4);
-
if (offset < 0)
return 0;
@@ -296,8 +303,7 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
if (naddr > 2)
return 0;
- ret_addr = ((u64)last_addr[2] << 32) | last_addr[3];
-
+ ret_addr = ((u64)be32_to_cpu(last_addr[2]) << 32) | be32_to_cpu(last_addr[3]);
if (sizeof(void *) == 4 &&
(ret_addr >= 0x100000000ULL || ret_size > 0x100000000ULL ||
ret_addr + ret_size > 0x100000000ULL))
@@ -350,11 +356,14 @@ int dt_is_compatible(void *node, const char *compat)
int dt_get_virtual_reg(void *node, void **addr, int nres)
{
unsigned long xaddr;
- int n;
+ int n, i;
n = getprop(node, "virtual-reg", addr, nres * 4);
- if (n > 0)
+ if (n > 0) {
+ for (i = 0; i < n/4; i ++)
+ ((u32 *)addr)[i] = be32_to_cpu(((u32 *)addr)[i]);
return n / 4;
+ }
for (n = 0; n < nres; n++) {
if (!dt_xlate_reg(node, n, &xaddr, NULL))
diff --git a/arch/powerpc/boot/dts/microwatt.dts b/arch/powerpc/boot/dts/microwatt.dts
new file mode 100644
index 000000000000..974abbdda249
--- /dev/null
+++ b/arch/powerpc/boot/dts/microwatt.dts
@@ -0,0 +1,138 @@
+/dts-v1/;
+
+/ {
+ #size-cells = <0x02>;
+ #address-cells = <0x02>;
+ model-name = "microwatt";
+ compatible = "microwatt-soc";
+
+ aliases {
+ serial0 = &UART0;
+ };
+
+ reserved-memory {
+ #size-cells = <0x02>;
+ #address-cells = <0x02>;
+ ranges;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x00000000 0x00000000 0x10000000>;
+ };
+
+ cpus {
+ #size-cells = <0x00>;
+ #address-cells = <0x01>;
+
+ ibm,powerpc-cpu-features {
+ display-name = "Microwatt";
+ isa = <3000>;
+ device_type = "cpu-features";
+ compatible = "ibm,powerpc-cpu-features";
+
+ mmu-radix {
+ isa = <3000>;
+ usable-privilege = <2>;
+ };
+
+ little-endian {
+ isa = <2050>;
+ usable-privilege = <3>;
+ hwcap-bit-nr = <1>;
+ };
+
+ cache-inhibited-large-page {
+ isa = <2040>;
+ usable-privilege = <2>;
+ };
+
+ fixed-point-v3 {
+ isa = <3000>;
+ usable-privilege = <3>;
+ };
+
+ no-execute {
+ isa = <2010>;
+ usable-privilege = <2>;
+ };
+
+ floating-point {
+ hwcap-bit-nr = <27>;
+ isa = <0>;
+ usable-privilege = <3>;
+ };
+ };
+
+ PowerPC,Microwatt@0 {
+ i-cache-sets = <2>;
+ ibm,dec-bits = <64>;
+ reservation-granule-size = <64>;
+ clock-frequency = <100000000>;
+ timebase-frequency = <100000000>;
+ i-tlb-sets = <1>;
+ ibm,ppc-interrupt-server#s = <0>;
+ i-cache-block-size = <64>;
+ d-cache-block-size = <64>;
+ d-cache-sets = <2>;
+ i-tlb-size = <64>;
+ cpu-version = <0x990000>;
+ status = "okay";
+ i-cache-size = <0x1000>;
+ ibm,processor-radix-AP-encodings = <0x0c 0xa0000010 0x20000015 0x4000001e>;
+ tlb-size = <0>;
+ tlb-sets = <0>;
+ device_type = "cpu";
+ d-tlb-size = <128>;
+ d-tlb-sets = <2>;
+ reg = <0>;
+ general-purpose;
+ 64-bit;
+ d-cache-size = <0x1000>;
+ ibm,chip-id = <0>;
+ };
+ };
+
+ soc@c0000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&ICS>;
+
+ ranges = <0 0 0xc0000000 0x40000000>;
+
+ interrupt-controller@4000 {
+ compatible = "openpower,xics-presentation", "ibm,ppc-xicp";
+ ibm,interrupt-server-ranges = <0x0 0x1>;
+ reg = <0x4000 0x100>;
+ };
+
+ ICS: interrupt-controller@5000 {
+ compatible = "openpower,xics-sources";
+ interrupt-controller;
+ interrupt-ranges = <0x10 0x10>;
+ reg = <0x5000 0x100>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ };
+
+ UART0: serial@2000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x2000 0x8>;
+ clock-frequency = <100000000>;
+ current-speed = <115200>;
+ reg-shift = <2>;
+ fifo-size = <16>;
+ interrupts = <0x10 0x1>;
+ };
+ };
+
+ chosen {
+ bootargs = "";
+ ibm,architecture-vec-5 = [19 00 10 00 00 00 00 00 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00 40 00 40];
+ stdout-path = &UART0;
+ };
+};
diff --git a/arch/powerpc/boot/microwatt.c b/arch/powerpc/boot/microwatt.c
new file mode 100644
index 000000000000..ca9d83617fc1
--- /dev/null
+++ b/arch/powerpc/boot/microwatt.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <stddef.h>
+#include "stdio.h"
+#include "types.h"
+#include "io.h"
+#include "ops.h"
+
+BSS_STACK(8192);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5)
+{
+ unsigned long heapsize = 16*1024*1024 - (unsigned long)_end;
+
+ /*
+ * Disable interrupts and turn off MSR_RI, since we'll
+ * shortly be overwriting the interrupt vectors.
+ */
+ __asm__ volatile("mtmsrd %0,1" : : "r" (0));
+
+ simple_alloc_init(_end, heapsize, 32, 64);
+ fdt_init(_dtb_start);
+ serial_console_init();
+}
diff --git a/arch/powerpc/boot/ns16550.c b/arch/powerpc/boot/ns16550.c
index b0da4466d419..f16d2be1d0f3 100644
--- a/arch/powerpc/boot/ns16550.c
+++ b/arch/powerpc/boot/ns16550.c
@@ -15,6 +15,7 @@
#include "stdio.h"
#include "io.h"
#include "ops.h"
+#include "of.h"
#define UART_DLL 0 /* Out: Divisor Latch Low */
#define UART_DLM 1 /* Out: Divisor Latch High */
@@ -58,16 +59,20 @@ int ns16550_console_init(void *devp, struct serial_console_data *scdp)
int n;
u32 reg_offset;
- if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1)
+ if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1) {
+ printf("virt reg parse fail...\r\n");
return -1;
+ }
n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset));
if (n == sizeof(reg_offset))
- reg_base += reg_offset;
+ reg_base += be32_to_cpu(reg_offset);
n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift));
if (n != sizeof(reg_shift))
reg_shift = 0;
+ else
+ reg_shift = be32_to_cpu(reg_shift);
scdp->open = ns16550_open;
scdp->putc = ns16550_putc;
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index cdb796b76e2e..1f4676bab786 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -342,6 +342,11 @@ gamecube|wii)
link_address='0x600000'
platformo="$object/$platform-head.o $object/$platform.o"
;;
+microwatt)
+ link_address='0x500000'
+ platformo="$object/fixed-head.o $object/$platform.o"
+ binary=y
+ ;;
treeboot-currituck)
link_address='0x1000000'
;;
diff --git a/arch/powerpc/boot/zImage.ps3.lds.S b/arch/powerpc/boot/zImage.ps3.lds.S
index 7b2ff2eaa73a..d0ffb493614d 100644
--- a/arch/powerpc/boot/zImage.ps3.lds.S
+++ b/arch/powerpc/boot/zImage.ps3.lds.S
@@ -8,7 +8,7 @@ SECTIONS
.kernel:vmlinux.bin : { *(.kernel:vmlinux.bin) }
_vmlinux_end = .;
- . = ALIGN(4096);
+ . = ALIGN(8);
_dtb_start = .;
.kernel:dtb : { *(.kernel:dtb) }
_dtb_end = .;
diff --git a/arch/powerpc/configs/32-bit.config b/arch/powerpc/configs/32-bit.config
new file mode 100644
index 000000000000..ad6546850c68
--- /dev/null
+++ b/arch/powerpc/configs/32-bit.config
@@ -0,0 +1 @@
+# CONFIG_PPC64 is not set
diff --git a/arch/powerpc/configs/64-bit.config b/arch/powerpc/configs/64-bit.config
new file mode 100644
index 000000000000..0fe6406929e2
--- /dev/null
+++ b/arch/powerpc/configs/64-bit.config
@@ -0,0 +1 @@
+CONFIG_PPC64=y
diff --git a/arch/powerpc/configs/microwatt_defconfig b/arch/powerpc/configs/microwatt_defconfig
new file mode 100644
index 000000000000..a08b739123da
--- /dev/null
+++ b/arch/powerpc/configs/microwatt_defconfig
@@ -0,0 +1,98 @@
+# CONFIG_SWAP is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_PPC64=y
+# CONFIG_PPC_KUEP is not set
+# CONFIG_PPC_KUAP is not set
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_NR_IRQS=64
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_PPC_POWERNV is not set
+# CONFIG_PPC_PSERIES is not set
+CONFIG_PPC_MICROWATT=y
+# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
+CONFIG_CPU_FREQ=y
+CONFIG_HZ_100=y
+# CONFIG_PPC_MEM_KEYS is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+# CONFIG_COREDUMP is not set
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_INET=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_RAW_DIAG=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_PARTITIONED_MASTER=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_NETDEVICES=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_NVRAM is not set
+CONFIG_RANDOM_TRUST_CPU=y
+CONFIG_SPI=y
+CONFIG_SPI_DEBUG=y
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_SPIDEV=y
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_NVMEM is not set
+CONFIG_EXT4_FS=y
+# CONFIG_FILE_LOCKING is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_PRINTK_TIME=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MISC is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_PPC_DISABLE_WERROR=y
+CONFIG_XMON=y
+CONFIG_XMON_DEFAULT=y
+# CONFIG_XMON_DEFAULT_RO_MODE is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 949ff9ccda5e..d21f266cea9a 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -57,3 +57,28 @@ CONFIG_CRC32_SLICEBY4=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PPC_16K_PAGES=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_FS=y
+CONFIG_PPC_PTDUMP=y
+CONFIG_MODULES=y
+CONFIG_SPI=y
+CONFIG_SPI_FSL_SPI=y
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_DEV_TALITOS=y
+CONFIG_8xx_GPIO=y
+CONFIG_WATCHDOG=y
+CONFIG_8xxx_WDT=y
+CONFIG_SMC_UCODE_PATCH=y
+CONFIG_ADVANCED_OPTIONS=y
+CONFIG_PIN_TLB=y
+CONFIG_PERF_EVENTS=y
+CONFIG_MATH_EMULATION=y
+CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y
+CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_IPV6=y
+CONFIG_BPF_JIT=y
+CONFIG_DEBUG_VM_PGTABLE=y
+CONFIG_BDI_SWITCH=y
+CONFIG_PPC_EARLY_DEBUG=y
+CONFIG_PPC_EARLY_DEBUG_CPM_ADDR=0xff002008
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 2c87e856d839..8bfeea6c7de7 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -309,6 +309,7 @@ CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_FUNCTION_TRACER=y
CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_PPC_EMULATED_STATS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 701811c91a6f..0ad2291337a7 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -368,7 +368,9 @@ CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_FUNCTION_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 1fd9d1260f9e..ee09f7bb6ea9 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -621,7 +621,6 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_NVRAM=y
CONFIG_DTLK=m
-CONFIG_R3964=m
CONFIG_CARDMAN_4000=m
CONFIG_CARDMAN_4040=m
CONFIG_IPWIRELESS=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 50168dde4ea5..b183629f1bcf 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -289,7 +289,9 @@ CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_FUNCTION_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 02ee6f5ac9fe..222823861a67 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -71,8 +71,13 @@ void __init machine_init(u64 dt_ptr);
#endif
long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs);
notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
-notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr);
-notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr);
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs);
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs);
+#ifdef CONFIG_PPC64
+unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs);
+unsigned long interrupt_exit_user_restart(struct pt_regs *regs);
+unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs);
+#endif
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low);
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 7ae29cfb06c0..f0e687236484 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -46,6 +46,8 @@
# define SMPWMB eieio
#endif
+/* clang defines this macro for a builtin, which will not work with runtime patching */
+#undef __lwsync
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define dma_rmb() __lwsync()
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h
deleted file mode 100644
index 2a0a467d2985..000000000000
--- a/arch/powerpc/include/asm/book3s/32/hash.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_BOOK3S_32_HASH_H
-#define _ASM_POWERPC_BOOK3S_32_HASH_H
-#ifdef __KERNEL__
-
-/*
- * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
- * table containing PTEs, together with a set of 16 segment registers,
- * to define the virtual to physical address mapping.
- *
- * We use the hash table as an extended TLB, i.e. a cache of currently
- * active mappings. We maintain a two-level page table tree, much
- * like that used by the i386, for the sake of the Linux memory
- * management code. Low-level assembler code in hash_low_32.S
- * (procedure hash_page) is responsible for extracting ptes from the
- * tree and putting them into the hash table when necessary, and
- * updating the accessed and modified bits in the page table tree.
- */
-
-#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
-#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
-#define _PAGE_USER 0x004 /* usermode access allowed */
-#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
-#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
-#define _PAGE_DIRTY 0x080 /* C: page changed */
-#define _PAGE_ACCESSED 0x100 /* R: page referenced */
-#define _PAGE_EXEC 0x200 /* software: exec allowed */
-#define _PAGE_RW 0x400 /* software: user write access allowed */
-#define _PAGE_SPECIAL 0x800 /* software: Special page */
-
-#ifdef CONFIG_PTE_64BIT
-/* We never clear the high word of the pte */
-#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
-#else
-#define _PTE_NONE_MASK _PAGE_HASHPTE
-#endif
-
-#define _PMD_PRESENT 0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD (~PAGE_MASK)
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 1670dfe9d4f1..64201125a287 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -7,35 +7,104 @@
#ifndef __ASSEMBLY__
+#include <linux/jump_label.h>
+
+extern struct static_key_false disable_kuap_key;
+extern struct static_key_false disable_kuep_key;
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return !IS_ENABLED(CONFIG_PPC_KUAP) || static_branch_unlikely(&disable_kuap_key);
+}
+
+static __always_inline bool kuep_is_disabled(void)
+{
+ return !IS_ENABLED(CONFIG_PPC_KUEP) || static_branch_unlikely(&disable_kuep_key);
+}
+
+static inline void kuep_lock(void)
+{
+ if (kuep_is_disabled())
+ return;
+
+ update_user_segments(mfsr(0) | SR_NX);
+}
+
+static inline void kuep_unlock(void)
+{
+ if (kuep_is_disabled())
+ return;
+
+ update_user_segments(mfsr(0) & ~SR_NX);
+}
+
#ifdef CONFIG_PPC_KUAP
#include <linux/sched.h>
-static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
+#define KUAP_NONE (~0UL)
+#define KUAP_ALL (~1UL)
+
+static inline void kuap_lock_one(unsigned long addr)
{
- addr &= 0xf0000000; /* align addr to start of segment */
- barrier(); /* make sure thread.kuap is updated before playing with SRs */
- while (addr < end) {
- mtsr(sr, addr);
- sr += 0x111; /* next VSID */
- sr &= 0xf0ffffff; /* clear VSID overflow */
- addr += 0x10000000; /* address of next segment */
- }
+ mtsr(mfsr(addr) | SR_KS, addr);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_unlock_one(unsigned long addr)
+{
+ mtsr(mfsr(addr) & ~SR_KS, addr);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_lock_all(void)
+{
+ update_user_segments(mfsr(0) | SR_KS);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_unlock_all(void)
+{
+ update_user_segments(mfsr(0) & ~SR_KS);
isync(); /* Context sync required after mtsr() */
}
+void kuap_lock_all_ool(void);
+void kuap_unlock_all_ool(void);
+
+static inline void kuap_lock(unsigned long addr, bool ool)
+{
+ if (likely(addr != KUAP_ALL))
+ kuap_lock_one(addr);
+ else if (!ool)
+ kuap_lock_all();
+ else
+ kuap_lock_all_ool();
+}
+
+static inline void kuap_unlock(unsigned long addr, bool ool)
+{
+ if (likely(addr != KUAP_ALL))
+ kuap_unlock_one(addr);
+ else if (!ool)
+ kuap_unlock_all();
+ else
+ kuap_unlock_all_ool();
+}
+
static inline void kuap_save_and_lock(struct pt_regs *regs)
{
unsigned long kuap = current->thread.kuap;
- u32 addr = kuap & 0xf0000000;
- u32 end = kuap << 28;
+
+ if (kuap_is_disabled())
+ return;
regs->kuap = kuap;
- if (unlikely(!kuap))
+ if (unlikely(kuap == KUAP_NONE))
return;
- current->thread.kuap = 0;
- kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* Set Ks */
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(kuap, false);
}
static inline void kuap_user_restore(struct pt_regs *regs)
@@ -44,22 +113,22 @@ static inline void kuap_user_restore(struct pt_regs *regs)
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
- u32 addr = regs->kuap & 0xf0000000;
- u32 end = regs->kuap << 28;
+ if (kuap_is_disabled())
+ return;
current->thread.kuap = regs->kuap;
- if (unlikely(regs->kuap == kuap))
- return;
-
- kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */
+ kuap_unlock(regs->kuap, false);
}
static inline unsigned long kuap_get_and_assert_locked(void)
{
unsigned long kuap = current->thread.kuap;
- WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != 0);
+ if (kuap_is_disabled())
+ return KUAP_NONE;
+
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
return kuap;
}
@@ -72,84 +141,78 @@ static inline void kuap_assert_locked(void)
static __always_inline void allow_user_access(void __user *to, const void __user *from,
u32 size, unsigned long dir)
{
- u32 addr, end;
+ if (kuap_is_disabled())
+ return;
BUILD_BUG_ON(!__builtin_constant_p(dir));
- BUILD_BUG_ON(dir & ~KUAP_READ_WRITE);
if (!(dir & KUAP_WRITE))
return;
- addr = (__force u32)to;
-
- if (unlikely(addr >= TASK_SIZE || !size))
- return;
-
- end = min(addr + size, TASK_SIZE);
-
- current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
- kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */
+ current->thread.kuap = (__force u32)to;
+ kuap_unlock_one((__force u32)to);
}
-static __always_inline void prevent_user_access(void __user *to, const void __user *from,
- u32 size, unsigned long dir)
+static __always_inline void prevent_user_access(unsigned long dir)
{
- u32 addr, end;
-
- BUILD_BUG_ON(!__builtin_constant_p(dir));
+ u32 kuap = current->thread.kuap;
- if (dir & KUAP_CURRENT_WRITE) {
- u32 kuap = current->thread.kuap;
-
- if (unlikely(!kuap))
- return;
+ if (kuap_is_disabled())
+ return;
- addr = kuap & 0xf0000000;
- end = kuap << 28;
- } else if (dir & KUAP_WRITE) {
- addr = (__force u32)to;
- end = min(addr + size, TASK_SIZE);
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
- if (unlikely(addr >= TASK_SIZE || !size))
- return;
- } else {
+ if (!(dir & KUAP_WRITE))
return;
- }
- current->thread.kuap = 0;
- kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* set Ks */
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(kuap, true);
}
static inline unsigned long prevent_user_access_return(void)
{
unsigned long flags = current->thread.kuap;
- unsigned long addr = flags & 0xf0000000;
- unsigned long end = flags << 28;
- void __user *to = (__force void __user *)addr;
- if (flags)
- prevent_user_access(to, to, end - addr, KUAP_READ_WRITE);
+ if (kuap_is_disabled())
+ return KUAP_NONE;
+
+ if (flags != KUAP_NONE) {
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(flags, true);
+ }
return flags;
}
static inline void restore_user_access(unsigned long flags)
{
- unsigned long addr = flags & 0xf0000000;
- unsigned long end = flags << 28;
- void __user *to = (__force void __user *)addr;
+ if (kuap_is_disabled())
+ return;
- if (flags)
- allow_user_access(to, to, end - addr, KUAP_READ_WRITE);
+ if (flags != KUAP_NONE) {
+ current->thread.kuap = flags;
+ kuap_unlock(flags, true);
+ }
}
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
- unsigned long begin = regs->kuap & 0xf0000000;
- unsigned long end = regs->kuap << 28;
+ unsigned long kuap = regs->kuap;
+
+ if (kuap_is_disabled())
+ return false;
+
+ if (!is_write || kuap == KUAP_ALL)
+ return false;
+ if (kuap == KUAP_NONE)
+ return true;
+
+ /* If faulting address doesn't match unlocked segment, unlock all */
+ if ((kuap ^ address) & 0xf0000000)
+ regs->kuap = KUAP_ALL;
- return is_write && (address < begin || address >= end);
+ return false;
}
#endif /* CONFIG_PPC_KUAP */
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index b85f8e114a9c..f5be185cbdf8 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -67,6 +67,16 @@ struct ppc_bat {
#ifndef __ASSEMBLY__
/*
+ * This macro defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then hash functions will have to be
+ * changed to correspond.
+ */
+#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
+
+/*
* Hardware Page Table Entry
* Note that the xpn and x bitfields are used only by processors that
* support extended addressing; otherwise, those bits are reserved.
@@ -102,6 +112,37 @@ extern s32 patch__hash_page_B, patch__hash_page_C;
extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
extern s32 patch__flush_hash_B;
+#include <asm/reg.h>
+#include <asm/task_size_32.h>
+
+static __always_inline void update_user_segment(u32 n, u32 val)
+{
+ if (n << 28 < TASK_SIZE)
+ mtsr(val + n * 0x111, n << 28);
+}
+
+static __always_inline void update_user_segments(u32 val)
+{
+ val &= 0xf0ffffff;
+
+ update_user_segment(0, val);
+ update_user_segment(1, val);
+ update_user_segment(2, val);
+ update_user_segment(3, val);
+ update_user_segment(4, val);
+ update_user_segment(5, val);
+ update_user_segment(6, val);
+ update_user_segment(7, val);
+ update_user_segment(8, val);
+ update_user_segment(9, val);
+ update_user_segment(10, val);
+ update_user_segment(11, val);
+ update_user_segment(12, val);
+ update_user_segment(13, val);
+ update_user_segment(14, val);
+ update_user_segment(15, val);
+}
+
#endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 83c65845a1a9..609c80f67194 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -4,7 +4,43 @@
#include <asm-generic/pgtable-nopmd.h>
-#include <asm/book3s/32/hash.h>
+/*
+ * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
+ * table containing PTEs, together with a set of 16 segment registers,
+ * to define the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings. We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code. Low-level assembler code in hash_low_32.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
+ */
+
+#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
+#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
+#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x080 /* C: page changed */
+#define _PAGE_ACCESSED 0x100 /* R: page referenced */
+#define _PAGE_EXEC 0x200 /* software: exec allowed */
+#define _PAGE_RW 0x400 /* software: user write access allowed */
+#define _PAGE_SPECIAL 0x800 /* software: Special page */
+
+#ifdef CONFIG_PTE_64BIT
+/* We never clear the high word of the pte */
+#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
+#else
+#define _PTE_NONE_MASK _PAGE_HASHPTE
+#endif
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
/* And here we include common definitions */
diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
index 9700da3a4093..a1cc73a88710 100644
--- a/arch/powerpc/include/asm/book3s/64/kup.h
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -398,8 +398,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user
#endif /* !CONFIG_PPC_KUAP */
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
+static inline void prevent_user_access(unsigned long dir)
{
set_kuap(AMR_KUAP_BLOCKED);
if (static_branch_unlikely(&uaccess_flush_key))
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index a666d561b44d..5d34a8646f08 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -232,6 +232,9 @@ extern unsigned long __pmd_frag_size_shift;
#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+#define MAX_PTRS_PER_PGD (1 << (H_PGD_INDEX_SIZE > RADIX_PGD_INDEX_SIZE ? \
+ H_PGD_INDEX_SIZE : RADIX_PGD_INDEX_SIZE))
+
/* PMD_SHIFT determines what a second-level page table entry can map */
#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PMD_SIZE (1UL << PMD_SHIFT)
@@ -1048,8 +1051,15 @@ extern struct page *p4d_page(p4d_t p4d);
/* Pointers in the page table tree are physical addresses */
#define __pgtable_ptr_val(ptr) __pa(ptr)
-#define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS)
-#define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS)
+static inline pud_t *p4d_pgtable(p4d_t p4d)
+{
+ return (pud_t *)__va(p4d_val(p4d) & ~P4D_MASKED_BITS);
+}
+
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return (pmd_t *)__va(pud_val(pud) & ~PUD_MASKED_BITS);
+}
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index a46fd37ad552..77797a2a82eb 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -64,6 +64,8 @@ extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long end, int psize);
+void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long end, int psize);
extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
index 0e1263455d73..ad130e15a126 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -8,7 +8,6 @@
#include <asm/book3s/32/pgtable.h>
#endif
-#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
/* Insert a PTE, top-level function is out of line. It uses an inline
* low level function in the respective pgtable-* files
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index d5da7ddbf0fc..350de8f90250 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -91,7 +91,7 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
}
#define HAVE_ARCH_CSUM_ADD
-static inline __wsum csum_add(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
{
#ifdef __powerpc64__
u64 res = (__force u64)csum;
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index f1d029bf906e..a95f63788c6b 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -23,13 +23,13 @@
#define BRANCH_ABSOLUTE 0x2
bool is_offset_in_branch_range(long offset);
-int create_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+int create_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
-int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
-int patch_branch(struct ppc_inst *addr, unsigned long target, int flags);
-int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
-int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
+int patch_branch(u32 *addr, unsigned long target, int flags);
+int patch_instruction(u32 *addr, struct ppc_inst instr);
+int raw_patch_instruction(u32 *addr, struct ppc_inst instr);
static inline unsigned long patch_site_addr(s32 *site)
{
@@ -38,18 +38,18 @@ static inline unsigned long patch_site_addr(s32 *site)
static inline int patch_instruction_site(s32 *site, struct ppc_inst instr)
{
- return patch_instruction((struct ppc_inst *)patch_site_addr(site), instr);
+ return patch_instruction((u32 *)patch_site_addr(site), instr);
}
static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
{
- return patch_branch((struct ppc_inst *)patch_site_addr(site), target, flags);
+ return patch_branch((u32 *)patch_site_addr(site), target, flags);
}
static inline int modify_instruction(unsigned int *addr, unsigned int clr,
unsigned int set)
{
- return patch_instruction((struct ppc_inst *)addr, ppc_inst((*addr & ~clr) | set));
+ return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
}
static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
@@ -59,10 +59,8 @@ static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned
int instr_is_relative_branch(struct ppc_inst instr);
int instr_is_relative_link_branch(struct ppc_inst instr);
-int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr);
-unsigned long branch_target(const struct ppc_inst *instr);
-int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest,
- const struct ppc_inst *src);
+unsigned long branch_target(const u32 *instr);
+int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src);
extern bool is_conditional_branch(struct ppc_inst instr);
#ifdef CONFIG_PPC_BOOK3E_64
void __patch_exception(int exc, unsigned long addr);
@@ -73,9 +71,9 @@ void __patch_exception(int exc, unsigned long addr);
#endif
#define OP_RT_RA_MASK 0xffff0000UL
-#define LIS_R2 (PPC_INST_ADDIS | __PPC_RT(R2))
-#define ADDIS_R2_R12 (PPC_INST_ADDIS | __PPC_RT(R2) | __PPC_RA(R12))
-#define ADDI_R2_R2 (PPC_INST_ADDI | __PPC_RT(R2) | __PPC_RA(R2))
+#define LIS_R2 (PPC_RAW_LIS(_R2, 0))
+#define ADDIS_R2_R12 (PPC_RAW_ADDIS(_R2, _R12, 0))
+#define ADDI_R2_R2 (PPC_RAW_ADDI(_R2, _R2, 0))
static inline unsigned long ppc_function_entry(void *func)
@@ -180,12 +178,10 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
#define R2_STACK_OFFSET 40
#endif
-#define PPC_INST_LD_TOC (PPC_INST_LD | ___PPC_RT(__REG_R2) | \
- ___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
+#define PPC_INST_LD_TOC PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
/* usually preceded by a mflr r0 */
-#define PPC_INST_STD_LR (PPC_INST_STD | ___PPC_RS(__REG_R0) | \
- ___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
+#define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index 4cb9efa2eb21..242204e12993 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -16,7 +16,7 @@
.section ".head.data.\name\()","a",@progbits
.endm
.macro use_ftsec name
- .section ".head.text.\name\()"
+ .section ".head.text.\name\()","ax",@progbits
.endm
/*
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 7e4b2cef40c2..9bcf345cb208 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -294,6 +294,13 @@
#define H_RESIZE_HPT_COMMIT 0x370
#define H_REGISTER_PROC_TBL 0x37C
#define H_SIGNAL_SYS_RESET 0x380
+#define H_ALLOCATE_VAS_WINDOW 0x388
+#define H_MODIFY_VAS_WINDOW 0x38C
+#define H_DEALLOCATE_VAS_WINDOW 0x390
+#define H_QUERY_VAS_WINDOW 0x394
+#define H_QUERY_VAS_CAPABILITIES 0x398
+#define H_QUERY_NX_CAPABILITIES 0x39C
+#define H_GET_NX_FAULT 0x3A0
#define H_INT_GET_SOURCE_INFO 0x3A8
#define H_INT_SET_SOURCE_CONFIG 0x3AC
#define H_INT_GET_SOURCE_CONFIG 0x3B0
@@ -393,6 +400,9 @@
#define H_CPU_BEHAV_FAVOUR_SECURITY_H (1ull << 60) // IBM bit 3
#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
#define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6
+#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY (1ull << 56) // IBM bit 7
+#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS (1ull << 55) // IBM bit 8
+#define H_CPU_BEHAV_NO_STF_BARRIER (1ull << 54) // IBM bit 9
/* Flag values used in H_REGISTER_PROC_TBL hcall */
#define PROC_TABLE_OP_MASK 0x18
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 56a98936a6a9..21cc571ea9c2 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -18,8 +18,17 @@
* PACA flags in paca->irq_happened.
*
* This bits are set when interrupts occur while soft-disabled
- * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
- * is set whenever we manually hard disable.
+ * and allow a proper replay.
+ *
+ * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
+ * always in synch with the MSR[EE] state, except:
+ * - A window in interrupt entry, where hardware disables MSR[EE] and that
+ * must be "reconciled" with the soft mask state.
+ * - NMI interrupts that hit in awkward places, until they fix the state.
+ * - When local irqs are being enabled and state is being fixed up.
+ * - When returning from an interrupt there are some windows where this
+ * can become out of synch, but gets fixed before the RFI or before
+ * executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
*/
#define PACA_IRQ_HARD_DIS 0x01
#define PACA_IRQ_DBELL 0x02
@@ -389,7 +398,15 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
-static inline void may_hard_irq_enable(void) { }
+static inline bool may_hard_irq_enable(void)
+{
+ return false;
+}
+
+static inline void do_hard_irq_enable(void)
+{
+ BUILD_BUG();
+}
static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
{
diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h
index 268d3bd073c8..b11c0e2f9639 100644
--- a/arch/powerpc/include/asm/inst.h
+++ b/arch/powerpc/include/asm/inst.h
@@ -8,17 +8,17 @@
#define ___get_user_instr(gu_op, dest, ptr) \
({ \
- long __gui_ret = 0; \
- unsigned long __gui_ptr = (unsigned long)ptr; \
+ long __gui_ret; \
+ u32 __user *__gui_ptr = (u32 __user *)ptr; \
struct ppc_inst __gui_inst; \
unsigned int __prefix, __suffix; \
- __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \
+ \
+ __chk_user_ptr(ptr); \
+ __gui_ret = gu_op(__prefix, __gui_ptr); \
if (__gui_ret == 0) { \
if ((__prefix >> 26) == OP_PREFIX) { \
- __gui_ret = gu_op(__suffix, \
- (unsigned int __user *)__gui_ptr + 1); \
- __gui_inst = ppc_inst_prefix(__prefix, \
- __suffix); \
+ __gui_ret = gu_op(__suffix, __gui_ptr + 1); \
+ __gui_inst = ppc_inst_prefix(__prefix, __suffix); \
} else { \
__gui_inst = ppc_inst(__prefix); \
} \
@@ -29,14 +29,15 @@
})
#else /* !CONFIG_PPC64 */
#define ___get_user_instr(gu_op, dest, ptr) \
- gu_op((dest).val, (u32 __user *)(ptr))
+({ \
+ __chk_user_ptr(ptr); \
+ gu_op((dest).val, (u32 __user *)(ptr)); \
+})
#endif /* CONFIG_PPC64 */
-#define get_user_instr(x, ptr) \
- ___get_user_instr(get_user, x, ptr)
+#define get_user_instr(x, ptr) ___get_user_instr(get_user, x, ptr)
-#define __get_user_instr(x, ptr) \
- ___get_user_instr(__get_user, x, ptr)
+#define __get_user_instr(x, ptr) ___get_user_instr(__get_user, x, ptr)
/*
* Instruction data type for POWER
@@ -59,9 +60,9 @@ static inline int ppc_inst_primary_opcode(struct ppc_inst x)
return ppc_inst_val(x) >> 26;
}
-#ifdef CONFIG_PPC64
-#define ppc_inst(x) ((struct ppc_inst){ .val = (x), .suffix = 0xff })
+#define ppc_inst(x) ((struct ppc_inst){ .val = (x) })
+#ifdef CONFIG_PPC64
#define ppc_inst_prefix(x, y) ((struct ppc_inst){ .val = (x), .suffix = (y) })
static inline u32 ppc_inst_suffix(struct ppc_inst x)
@@ -69,68 +70,43 @@ static inline u32 ppc_inst_suffix(struct ppc_inst x)
return x.suffix;
}
-static inline bool ppc_inst_prefixed(struct ppc_inst x)
-{
- return (ppc_inst_primary_opcode(x) == 1) && ppc_inst_suffix(x) != 0xff;
-}
+#else
+#define ppc_inst_prefix(x, y) ppc_inst(x)
-static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
+static inline u32 ppc_inst_suffix(struct ppc_inst x)
{
- return ppc_inst_prefix(swab32(ppc_inst_val(x)),
- swab32(ppc_inst_suffix(x)));
+ return 0;
}
-static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr)
-{
- u32 val, suffix;
-
- val = *(u32 *)ptr;
- if ((val >> 26) == OP_PREFIX) {
- suffix = *((u32 *)ptr + 1);
- return ppc_inst_prefix(val, suffix);
- } else {
- return ppc_inst(val);
- }
-}
+#endif /* CONFIG_PPC64 */
-static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
+static inline struct ppc_inst ppc_inst_read(const u32 *ptr)
{
- return *(u64 *)&x == *(u64 *)&y;
+ if (IS_ENABLED(CONFIG_PPC64) && (*ptr >> 26) == OP_PREFIX)
+ return ppc_inst_prefix(*ptr, *(ptr + 1));
+ else
+ return ppc_inst(*ptr);
}
-#else
-
-#define ppc_inst(x) ((struct ppc_inst){ .val = x })
-
-#define ppc_inst_prefix(x, y) ppc_inst(x)
-
static inline bool ppc_inst_prefixed(struct ppc_inst x)
{
- return false;
-}
-
-static inline u32 ppc_inst_suffix(struct ppc_inst x)
-{
- return 0;
+ return IS_ENABLED(CONFIG_PPC64) && ppc_inst_primary_opcode(x) == OP_PREFIX;
}
static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
{
- return ppc_inst(swab32(ppc_inst_val(x)));
-}
-
-static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr)
-{
- return *ptr;
+ return ppc_inst_prefix(swab32(ppc_inst_val(x)), swab32(ppc_inst_suffix(x)));
}
static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
{
- return ppc_inst_val(x) == ppc_inst_val(y);
+ if (ppc_inst_val(x) != ppc_inst_val(y))
+ return false;
+ if (!ppc_inst_prefixed(x))
+ return true;
+ return ppc_inst_suffix(x) == ppc_inst_suffix(y);
}
-#endif /* CONFIG_PPC64 */
-
static inline int ppc_inst_len(struct ppc_inst x)
{
return ppc_inst_prefixed(x) ? 8 : 4;
@@ -140,13 +116,13 @@ static inline int ppc_inst_len(struct ppc_inst x)
* Return the address of the next instruction, if the instruction @value was
* located at @location.
*/
-static inline struct ppc_inst *ppc_inst_next(void *location, struct ppc_inst *value)
+static inline u32 *ppc_inst_next(u32 *location, u32 *value)
{
struct ppc_inst tmp;
tmp = ppc_inst_read(value);
- return location + ppc_inst_len(tmp);
+ return (void *)location + ppc_inst_len(tmp);
}
static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x)
@@ -178,6 +154,6 @@ static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_ins
__str; \
})
-int copy_inst_from_kernel_nofault(struct ppc_inst *inst, struct ppc_inst *src);
+int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src);
#endif /* _ASM_POWERPC_INST_H */
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 59f704408d65..6b800d3e2681 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -73,13 +73,47 @@
#include <asm/kprobes.h>
#include <asm/runlatch.h>
+#ifdef CONFIG_PPC_BOOK3S_64
+extern char __end_soft_masked[];
+bool search_kernel_soft_mask_table(unsigned long addr);
+unsigned long search_kernel_restart_table(unsigned long addr);
+
+DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
+
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ if (regs->msr & MSR_PR)
+ return false;
+
+ if (regs->nip >= (unsigned long)__end_soft_masked)
+ return false;
+
+ return search_kernel_soft_mask_table(regs->nip);
+}
+
+static inline void srr_regs_clobbered(void)
+{
+ local_paca->srr_valid = 0;
+ local_paca->hsrr_valid = 0;
+}
+#else
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline void srr_regs_clobbered(void)
+{
+}
+#endif
+
static inline void nap_adjust_return(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_970_NAP
if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
/* Can avoid a test-and-clear because NMIs do not call this */
clear_thread_local_flags(_TLF_NAPPING);
- regs->nip = (unsigned long)power4_idle_nap_return;
+ regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
}
#endif
}
@@ -129,9 +163,18 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
* CT_WARN_ON comes here via program_check_exception,
* so avoid recursion.
*/
- if (TRAP(regs) != INTERRUPT_PROGRAM)
+ if (TRAP(regs) != INTERRUPT_PROGRAM) {
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
+ BUG_ON(is_implicit_soft_masked(regs));
+ }
+#ifdef CONFIG_PPC_BOOK3S
+ /* Move this under a debugging check */
+ if (arch_irq_disabled_regs(regs))
+ BUG_ON(search_kernel_restart_table(regs->nip));
+#endif
}
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
#endif
booke_restore_dbcr0();
@@ -186,6 +229,7 @@ struct interrupt_nmi_state {
u8 irq_soft_mask;
u8 irq_happened;
u8 ftrace_enabled;
+ u64 softe;
#endif
};
@@ -211,6 +255,7 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
#ifdef CONFIG_PPC64
state->irq_soft_mask = local_paca->irq_soft_mask;
state->irq_happened = local_paca->irq_happened;
+ state->softe = regs->softe;
/*
* Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
@@ -220,12 +265,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
- regs->nip < (unsigned long)__end_interrupts) {
- // Kernel code running below __end_interrupts is
- // implicitly soft-masked.
+ if (is_implicit_soft_masked(regs)) {
+ // Adjust regs->softe soft implicit soft-mask, so
+ // arch_irq_disabled_regs(regs) behaves as expected.
regs->softe = IRQS_ALL_DISABLED;
}
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
/* Don't do any per-CPU operations until interrupt state is fixed */
@@ -258,11 +304,20 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
*/
#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
+ if (arch_irq_disabled_regs(regs)) {
+ unsigned long rst = search_kernel_restart_table(regs->nip);
+ if (rst)
+ regs_set_return_ip(regs, rst);
+ }
+#endif
+
if (nmi_disables_ftrace(regs))
this_cpu_set_ftrace_enabled(state->ftrace_enabled);
/* Check we didn't change the pending interrupt mask. */
WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
+ regs->softe = state->softe;
local_paca->irq_happened = state->irq_happened;
local_paca->irq_soft_mask = state->irq_soft_mask;
#endif
@@ -528,6 +583,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
+/* irq.c */
+DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
+
void __noreturn unrecoverable_exception(struct pt_regs *regs);
void replay_system_reset(void);
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 4982f3711fc3..2b3278534bc1 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -52,7 +52,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
extern void *hardirq_ctx[NR_CPUS];
extern void *softirq_ctx[NR_CPUS];
-extern void do_IRQ(struct pt_regs *regs);
+void __do_IRQ(struct pt_regs *regs);
extern void __init init_IRQ(void);
extern void __do_irq(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index ec96232529ac..1df763002726 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -5,14 +5,6 @@
#define KUAP_READ 1
#define KUAP_WRITE 2
#define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE)
-/*
- * For prevent_user_access() only.
- * Use the current saved situation instead of the to/from/size params.
- * Used on book3s/32
- */
-#define KUAP_CURRENT_READ 4
-#define KUAP_CURRENT_WRITE 8
-#define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE)
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kup.h>
@@ -46,10 +38,7 @@ void setup_kuep(bool disabled);
static inline void setup_kuep(bool disabled) { }
#endif /* CONFIG_PPC_KUEP */
-#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
-void kuep_lock(void);
-void kuep_unlock(void);
-#else
+#ifndef CONFIG_PPC_BOOK3S_32
static inline void kuep_lock(void) { }
static inline void kuep_unlock(void) { }
#endif
@@ -83,8 +72,7 @@ static inline unsigned long kuap_get_and_assert_locked(void)
#ifndef CONFIG_PPC_BOOK3S_64
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir) { }
+static inline void prevent_user_access(unsigned long dir) { }
static inline unsigned long prevent_user_access_return(void) { return 0UL; }
static inline void restore_user_access(unsigned long flags) { }
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -96,53 +84,53 @@ static __always_inline void setup_kup(void)
setup_kuap(disable_kuap);
}
-static inline void allow_read_from_user(const void __user *from, unsigned long size)
+static __always_inline void allow_read_from_user(const void __user *from, unsigned long size)
{
barrier_nospec();
allow_user_access(NULL, from, size, KUAP_READ);
}
-static inline void allow_write_to_user(void __user *to, unsigned long size)
+static __always_inline void allow_write_to_user(void __user *to, unsigned long size)
{
allow_user_access(to, NULL, size, KUAP_WRITE);
}
-static inline void allow_read_write_user(void __user *to, const void __user *from,
- unsigned long size)
+static __always_inline void allow_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
{
barrier_nospec();
allow_user_access(to, from, size, KUAP_READ_WRITE);
}
-static inline void prevent_read_from_user(const void __user *from, unsigned long size)
+static __always_inline void prevent_read_from_user(const void __user *from, unsigned long size)
{
- prevent_user_access(NULL, from, size, KUAP_READ);
+ prevent_user_access(KUAP_READ);
}
-static inline void prevent_write_to_user(void __user *to, unsigned long size)
+static __always_inline void prevent_write_to_user(void __user *to, unsigned long size)
{
- prevent_user_access(to, NULL, size, KUAP_WRITE);
+ prevent_user_access(KUAP_WRITE);
}
-static inline void prevent_read_write_user(void __user *to, const void __user *from,
- unsigned long size)
+static __always_inline void prevent_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
{
- prevent_user_access(to, from, size, KUAP_READ_WRITE);
+ prevent_user_access(KUAP_READ_WRITE);
}
-static inline void prevent_current_access_user(void)
+static __always_inline void prevent_current_access_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT);
+ prevent_user_access(KUAP_READ_WRITE);
}
-static inline void prevent_current_read_from_user(void)
+static __always_inline void prevent_current_read_from_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_READ);
+ prevent_user_access(KUAP_READ);
}
-static inline void prevent_current_write_to_user(void)
+static __always_inline void prevent_current_write_to_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_WRITE);
+ prevent_user_access(KUAP_WRITE);
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/kvm_guest.h b/arch/powerpc/include/asm/kvm_guest.h
index 2fca299f7e19..c63105d2c9e7 100644
--- a/arch/powerpc/include/asm/kvm_guest.h
+++ b/arch/powerpc/include/asm/kvm_guest.h
@@ -16,10 +16,10 @@ static inline bool is_kvm_guest(void)
return static_branch_unlikely(&kvm_guest);
}
-bool check_kvm_guest(void);
+int check_kvm_guest(void);
#else
static inline bool is_kvm_guest(void) { return false; }
-static inline bool check_kvm_guest(void) { return false; }
+static inline int check_kvm_guest(void) { return 0; }
#endif
#endif /* _ASM_POWERPC_KVM_GUEST_H_ */
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index ae25e6e72997..4fe018cc207b 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -16,7 +16,7 @@ static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
{
struct pt_regs *regs = ftrace_get_regs(fregs);
- regs->nip = ip;
+ regs_set_return_ip(regs, ip);
}
#define klp_get_ftrace_location klp_get_ftrace_location
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 607168b1aef4..27016b98ecb2 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -220,7 +220,7 @@ enum {
#elif defined(CONFIG_44x)
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_44x
#endif
-#if defined(CONFIG_E200) || defined(CONFIG_E500)
+#ifdef CONFIG_E500
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E
#endif
@@ -324,7 +324,6 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
}
#endif /* !CONFIG_DEBUG_VM */
-#ifdef CONFIG_PPC_RADIX_MMU
static inline bool radix_enabled(void)
{
return mmu_has_feature(MMU_FTR_TYPE_RADIX);
@@ -334,17 +333,6 @@ static inline bool early_radix_enabled(void)
{
return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
-#else
-static inline bool radix_enabled(void)
-{
- return false;
-}
-
-static inline bool early_radix_enabled(void)
-{
- return false;
-}
-#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
static inline bool strict_kernel_rwx_enabled(void)
@@ -357,6 +345,11 @@ static inline bool strict_kernel_rwx_enabled(void)
return false;
}
#endif
+
+static inline bool strict_module_rwx_enabled(void)
+{
+ return IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && strict_kernel_rwx_enabled();
+}
#endif /* !__ASSEMBLY__ */
/* The kernel use the constants below to index in the page sizes array.
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index db186c539d37..9ba6b585337f 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -57,7 +57,6 @@ static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
static inline void mm_iommu_init(struct mm_struct *mm) { }
#endif
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
-extern void set_context(unsigned long id, pgd_t *pgd);
#ifdef CONFIG_PPC_BOOK3S_64
extern void radix__switch_mmu_context(struct mm_struct *prev,
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
index 39be9aea86db..64b6c608eca4 100644
--- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
@@ -66,10 +66,9 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
}
#ifdef CONFIG_PPC_4K_PAGES
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
- size_t size = huge_page_size(hstate_vma(vma));
+ size_t size = 1UL << shift;
if (size == SZ_16K)
return __pte(pte_val(entry) & ~_PAGE_HUGE);
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 295ef5639609..882a0bc7887a 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -9,10 +9,22 @@
#ifndef __ASSEMBLY__
+#include <linux/jump_label.h>
+
#include <asm/reg.h>
+extern struct static_key_false disable_kuap_key;
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return static_branch_unlikely(&disable_kuap_key);
+}
+
static inline void kuap_save_and_lock(struct pt_regs *regs)
{
+ if (kuap_is_disabled())
+ return;
+
regs->kuap = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
@@ -23,12 +35,20 @@ static inline void kuap_user_restore(struct pt_regs *regs)
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, regs->kuap);
}
static inline unsigned long kuap_get_and_assert_locked(void)
{
- unsigned long kuap = mfspr(SPRN_MD_AP);
+ unsigned long kuap;
+
+ if (kuap_is_disabled())
+ return MD_APG_INIT;
+
+ kuap = mfspr(SPRN_MD_AP);
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16);
@@ -38,25 +58,35 @@ static inline unsigned long kuap_get_and_assert_locked(void)
static inline void kuap_assert_locked(void)
{
- if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && !kuap_is_disabled())
kuap_get_and_assert_locked();
}
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, MD_APG_INIT);
}
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
+static inline void prevent_user_access(unsigned long dir)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
static inline unsigned long prevent_user_access_return(void)
{
- unsigned long flags = mfspr(SPRN_MD_AP);
+ unsigned long flags;
+
+ if (kuap_is_disabled())
+ return MD_APG_INIT;
+
+ flags = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
@@ -65,12 +95,18 @@ static inline unsigned long prevent_user_access_return(void)
static inline void restore_user_access(unsigned long flags)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, flags);
}
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
+ if (kuap_is_disabled())
+ return false;
+
return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000);
}
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
index 2d92a39d8f2e..43ceca128531 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
@@ -113,6 +113,7 @@ typedef struct {
/* patch sites */
extern s32 patch__tlb_44x_hwater_D, patch__tlb_44x_hwater_I;
+extern s32 patch__tlb_44x_kuep, patch__tlb_47x_kuep;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 6e4faa0a9b35..997cec973406 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -178,6 +178,7 @@
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
+#include <linux/sizes.h>
void mmu_pin_tlb(unsigned long top, bool readonly);
@@ -225,6 +226,48 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
BUG();
}
+static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn,
+ unsigned int max_page_shift, unsigned long size)
+{
+ if (end - addr < size)
+ return false;
+
+ if ((1UL << max_page_shift) < size)
+ return false;
+
+ if (!IS_ALIGNED(addr, size))
+ return false;
+
+ if (!IS_ALIGNED(PFN_PHYS(pfn), size))
+ return false;
+
+ return true;
+}
+
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+ u64 pfn, unsigned int max_page_shift)
+{
+ if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K))
+ return SZ_512K;
+ if (PAGE_SIZE == SZ_16K)
+ return SZ_16K;
+ if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K))
+ return SZ_16K;
+ return PAGE_SIZE;
+}
+#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ if (size >= SZ_512K)
+ return 19;
+ else if (size >= SZ_16K)
+ return 14;
+ else
+ return PAGE_SHIFT;
+}
+#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+
/* patch sites */
extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1;
extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 96522f7f0618..f06ae00f2a65 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -54,7 +54,6 @@ extern int icache_44x_need_flush;
#define PGD_MASKED_BITS 0
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
index fe2f4c9acd9e..10f5cf444d72 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
@@ -56,10 +56,14 @@
#define p4d_none(p4d) (!p4d_val(p4d))
#define p4d_bad(p4d) (p4d_val(p4d) == 0)
#define p4d_present(p4d) (p4d_val(p4d) != 0)
-#define p4d_page_vaddr(p4d) (p4d_val(p4d) & ~P4D_MASKED_BITS)
#ifndef __ASSEMBLY__
+static inline pud_t *p4d_pgtable(p4d_t p4d)
+{
+ return (pud_t *) (p4d_val(p4d) & ~P4D_MASKED_BITS);
+}
+
static inline void p4d_clear(p4d_t *p4dp)
{
*p4dp = __p4d(0);
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 57cd3892bfe0..d081704b13fb 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -12,8 +12,6 @@
#include <asm/barrier.h>
#include <asm/asm-const.h>
-#define FIRST_USER_ADDRESS 0UL
-
/*
* Size of EA range mapped by our pagetables.
*/
@@ -164,7 +162,11 @@ static inline void pud_clear(pud_t *pudp)
#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
|| (pud_val(pud) & PUD_BAD_BITS))
#define pud_present(pud) (pud_val(pud) != 0)
-#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
+
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return (pmd_t *)(pud_val(pud) & ~PUD_MASKED_BITS);
+}
extern struct page *pud_page(pud_t pud);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index ec18ac818e3a..dc05a862e72a 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -149,11 +149,9 @@ struct paca_struct {
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_BOOK3S
- mm_context_id_t mm_ctx_id;
#ifdef CONFIG_PPC_MM_SLICES
unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
- unsigned long mm_ctx_slb_addr_limit;
#else
u16 mm_ctx_user_psize;
u16 mm_ctx_sllp;
@@ -167,9 +165,16 @@ struct paca_struct {
u64 kstack; /* Saved Kernel stack addr */
u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */
+#ifdef CONFIG_PPC64
+ u64 exit_save_r1; /* Syscall/interrupt R1 save */
+#endif
#ifdef CONFIG_PPC_BOOK3E
u16 trap_save; /* Used when bad stack is encountered */
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ u8 hsrr_valid; /* HSRRs set for HRFID */
+ u8 srr_valid; /* SRRs set for RFID */
+#endif
u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 6dd78a2dc03a..3360cad78ace 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -70,9 +70,4 @@ extern struct kmem_cache *pgtable_cache[];
#include <asm/nohash/pgalloc.h>
#endif
-static inline pgtable_t pmd_pgtable(pmd_t pmd)
-{
- return (pgtable_t)pmd_page_vaddr(pmd);
-}
-
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index c6a676714f04..d564d0ecd4cd 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -41,6 +41,10 @@ struct mm_struct;
#ifndef __ASSEMBLY__
+#ifndef MAX_PTRS_PER_PGD
+#define MAX_PTRS_PER_PGD PTRS_PER_PGD
+#endif
+
/* Keep these as a macros to avoid include dependency mess */
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
@@ -72,6 +76,7 @@ extern unsigned long empty_zero_page[];
extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
+void poking_init(void);
extern unsigned long ioremap_bot;
@@ -152,6 +157,12 @@ static inline bool p4d_is_leaf(p4d_t p4d)
}
#endif
+#define pmd_pgtable pmd_pgtable
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
+{
+ return (pgtable_t)pmd_page_vaddr(pmd);
+}
+
#ifdef CONFIG_PPC64
#define is_ioremap_addr is_ioremap_addr
static inline bool is_ioremap_addr(const void *x)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index ac41776661e9..bede76dd3db7 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -76,6 +76,40 @@
#define __REGA0_R30 30
#define __REGA0_R31 31
+/* For use with PPC_RAW_() macros */
+#define _R0 0
+#define _R1 1
+#define _R2 2
+#define _R3 3
+#define _R4 4
+#define _R5 5
+#define _R6 6
+#define _R7 7
+#define _R8 8
+#define _R9 9
+#define _R10 10
+#define _R11 11
+#define _R12 12
+#define _R13 13
+#define _R14 14
+#define _R15 15
+#define _R16 16
+#define _R17 17
+#define _R18 18
+#define _R19 19
+#define _R20 20
+#define _R21 21
+#define _R22 22
+#define _R23 23
+#define _R24 24
+#define _R25 25
+#define _R26 26
+#define _R27 27
+#define _R28 28
+#define _R29 29
+#define _R30 30
+#define _R31 31
+
#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
#define IMM_DS(i) ((uintptr_t)(i) & 0xfffc)
#define IMM_DQ(i) ((uintptr_t)(i) & 0xfff0)
@@ -222,13 +256,11 @@
#define PPC_INST_LWSYNC 0x7c2004ac
#define PPC_INST_SYNC 0x7c0004ac
#define PPC_INST_SYNC_MASK 0xfc0007fe
-#define PPC_INST_ISYNC 0x4c00012c
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
#define PPC_INST_MTMSRD 0x7c000164
-#define PPC_INST_NOP 0x60000000
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
#define PPC_INST_RFEBB 0x4c000124
@@ -241,10 +273,10 @@
#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
-#define PPC_INST_SC 0x44000002
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
#define PPC_INST_STRING_GEN_MASK 0xfc00067e
+#define PPC_INST_SETB 0x7c000100
#define PPC_INST_STSWI 0x7c0005aa
#define PPC_INST_STSWX 0x7c00052a
#define PPC_INST_TRECHKPT 0x7c0007dd
@@ -252,18 +284,9 @@
#define PPC_INST_TSR 0x7c0005dd
#define PPC_INST_LD 0xe8000000
#define PPC_INST_STD 0xf8000000
-#define PPC_INST_MFLR 0x7c0802a6
-#define PPC_INST_MTCTR 0x7c0903a6
-#define PPC_INST_ADDI 0x38000000
#define PPC_INST_ADDIS 0x3c000000
#define PPC_INST_ADD 0x7c000214
-#define PPC_INST_BLR 0x4e800020
-#define PPC_INST_BCTR 0x4e800420
-#define PPC_INST_BCTRL 0x4e800421
#define PPC_INST_DIVD 0x7c0003d2
-#define PPC_INST_RLDICR 0x78000004
-#define PPC_INST_ORI 0x60000000
-#define PPC_INST_ORIS 0x64000000
#define PPC_INST_BRANCH 0x48000000
#define PPC_INST_BL 0x48000001
#define PPC_INST_BRANCH_COND 0x40800000
@@ -323,6 +346,8 @@
#define PPC_LO(v) ((v) & 0xffff)
#define PPC_HI(v) (((v) >> 16) & 0xffff)
#define PPC_HA(v) PPC_HI((v) + 0x8000)
+#define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
+#define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
@@ -383,6 +408,10 @@
#define PPC_RAW_STBCIX(s, a, b) (0x7c0007aa | __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
#define PPC_RAW_DCBFPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21))
#define PPC_RAW_DCBSTPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21))
+#define PPC_RAW_SC() (0x44000002)
+#define PPC_RAW_SYNC() (0x7c0004ac)
+#define PPC_RAW_ISYNC() (0x4c00012c)
+
/*
* Define what the VSX XX1 form instructions will look like, then add
* the 128 bit load store instructions based on that.
@@ -404,10 +433,10 @@
#define PPC_RAW_STXVP(xsp, a, i) (0x18000001 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_DQ(i))
#define PPC_RAW_LXVPX(xtp, a, b) (0x7c00029a | __PPC_XTP(xtp) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_STXVPX(xsp, a, b) (0x7c00039a | __PPC_XSP(xsp) | ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_RAW_PLXVP(xtp, i, a, pr) \
- ((PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i)) << 32 | (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i)))
-#define PPC_RAW_PSTXVP(xsp, i, a, pr) \
- ((PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i)) << 32 | (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i)))
+#define PPC_RAW_PLXVP_P(xtp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
+#define PPC_RAW_PLXVP_S(xtp, i, a, pr) (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i))
+#define PPC_RAW_PSTXVP_P(xsp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
+#define PPC_RAW_PSTXVP_S(xsp, i, a, pr) (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i))
#define PPC_RAW_NAP (0x4c000364)
#define PPC_RAW_SLEEP (0x4c0003a4)
#define PPC_RAW_WINKLE (0x4c0003e4)
@@ -445,16 +474,17 @@
#define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
#define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_ADDC_DOT(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
-#define PPC_RAW_NOP() (PPC_INST_NOP)
-#define PPC_RAW_BLR() (PPC_INST_BLR)
+#define PPC_RAW_NOP() PPC_RAW_ORI(0, 0, 0)
+#define PPC_RAW_BLR() (0x4e800020)
#define PPC_RAW_BLRL() (0x4e800021)
#define PPC_RAW_MTLR(r) (0x7c0803a6 | ___PPC_RT(r))
-#define PPC_RAW_MFLR(t) (PPC_INST_MFLR | ___PPC_RT(t))
-#define PPC_RAW_BCTR() (PPC_INST_BCTR)
-#define PPC_RAW_MTCTR(r) (PPC_INST_MTCTR | ___PPC_RT(r))
-#define PPC_RAW_ADDI(d, a, i) (PPC_INST_ADDI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_MFLR(t) (0x7c0802a6 | ___PPC_RT(t))
+#define PPC_RAW_BCTR() (0x4e800420)
+#define PPC_RAW_BCTRL() (0x4e800421)
+#define PPC_RAW_MTCTR(r) (0x7c0903a6 | ___PPC_RT(r))
+#define PPC_RAW_ADDI(d, a, i) (0x38000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i)
-#define PPC_RAW_ADDIS(d, a, i) (PPC_INST_ADDIS | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_ADDIS(d, a, i) (0x3c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_ADDIC(d, a, i) (0x30000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_ADDIC_DOT(d, a, i) (0x34000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i)
@@ -499,8 +529,8 @@
#define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_OR(d, a, b) (0x7c000378 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a)
-#define PPC_RAW_ORI(d, a, i) (PPC_INST_ORI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
-#define PPC_RAW_ORIS(d, a, i) (PPC_INST_ORIS | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ORI(d, a, i) (0x60000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ORIS(d, a, i) (0x64000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_NOR(d, a, b) (0x7c0000f8 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
@@ -519,7 +549,7 @@
(0x54000001 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
#define PPC_RAW_RLWIMI(d, a, i, mb, me) (0x50000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
#define PPC_RAW_RLDICL(d, a, i, mb) (0x78000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb))
-#define PPC_RAW_RLDICR(d, a, i, me) (PPC_INST_RLDICR | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
+#define PPC_RAW_RLDICR(d, a, i, me) (0x78000004 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
#define PPC_RAW_SLWI(d, a, i) PPC_RAW_RLWINM(d, a, i, 0, 31-(i))
@@ -533,6 +563,8 @@
#define PPC_RAW_NEG(d, a) (0x7c0000d0 | ___PPC_RT(d) | ___PPC_RA(a))
#define PPC_RAW_MFSPR(d, spr) (0x7c0002a6 | ___PPC_RT(d) | __PPC_SPR(spr))
+#define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr))
+#define PPC_RAW_EIEIO() (0x7c0006ac)
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index d6739d700f0a..116c1519728a 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -762,6 +762,21 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
stringify_in_c(.long (_target) - . ;) \
stringify_in_c(.previous)
+#define SOFT_MASK_TABLE(_start, _end) \
+ stringify_in_c(.section __soft_mask_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.previous)
+
+#define RESTART_TABLE(_start, _end, _target) \
+ stringify_in_c(.section __restart_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.llong (_target);) \
+ stringify_in_c(.previous)
+
#ifdef CONFIG_PPC_FSL_BOOK3E
#define BTB_FLUSH(reg) \
lis reg,BUCSR_INIT@h; \
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h
index 84dd1addd434..c5d984700d24 100644
--- a/arch/powerpc/include/asm/probes.h
+++ b/arch/powerpc/include/asm/probes.h
@@ -34,14 +34,14 @@ typedef u32 ppc_opcode_t;
/* Enable single stepping for the current task */
static inline void enable_single_step(struct pt_regs *regs)
{
- regs->msr |= MSR_SINGLESTEP;
+ regs_set_return_msr(regs, regs->msr | MSR_SINGLESTEP);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* We turn off Critical Input Exception(CE) to ensure that the single
* step will be for the instruction we have the probe on; if we don't,
* it is possible we'd get the single step reported for CE.
*/
- regs->msr &= ~MSR_CE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_CE);
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
#ifdef CONFIG_PPC_47x
isync();
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 7bf8a15af224..f348e564f7dd 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -276,7 +276,15 @@ struct thread_struct {
#define SPEFSCR_INIT
#endif
-#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .pgdir = swapper_pg_dir, \
+ .kuap = ~0UL, /* KUAP_NONE */ \
+ .fpexc_mode = MSR_FE0 | MSR_FE1, \
+ SPEFSCR_INIT \
+}
+#elif defined(CONFIG_PPC32)
#define INIT_THREAD { \
.ksp = INIT_SP, \
.pgdir = swapper_pg_dir, \
@@ -339,17 +347,6 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
#define spin_end() HMT_medium()
-#define spin_until_cond(cond) \
-do { \
- if (unlikely(!(cond))) { \
- spin_begin(); \
- do { \
- spin_cpu_relax(); \
- } while (!(cond)); \
- spin_end(); \
- } \
-} while (0)
-
#endif
/* Check that a certain kernel stack pointer is valid in task_struct p */
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index e646c7f218bc..8a0d8fb35328 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -71,6 +71,7 @@ struct ps3_dma_region_ops;
* @bus_addr: The 'translated' bus address of the region.
* @len: The length in bytes of the region.
* @offset: The offset from the start of memory of the region.
+ * @dma_mask: Device dma_mask.
* @ioid: The IOID of the device who owns this region
* @chunk_list: Opaque variable used by the ioc page manager.
* @region_ops: struct ps3_dma_region_ops - dma region operations
@@ -85,6 +86,7 @@ struct ps3_dma_region {
enum ps3_dma_region_type region_type;
unsigned long len;
unsigned long offset;
+ u64 dma_mask;
/* driver variables (set by ps3_dma_region_create) */
unsigned long bus_addr;
@@ -232,7 +234,7 @@ enum lv1_result {
static inline const char* ps3_result(int result)
{
-#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT)
+#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT) || defined(CONFIG_PS3_VERBOSE_RESULT)
switch (result) {
case LV1_SUCCESS:
return "LV1_SUCCESS (0)";
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index b476a685f066..14422e851494 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -48,11 +48,12 @@ struct pt_regs
unsigned long result;
};
};
-
+#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_KUAP)
union {
struct {
#ifdef CONFIG_PPC64
unsigned long ppr;
+ unsigned long exit_result;
#endif
union {
#ifdef CONFIG_PPC_KUAP
@@ -68,6 +69,23 @@ struct pt_regs
};
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
};
+#endif
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+ struct { /* Must be a multiple of 16 bytes */
+ unsigned long mas0;
+ unsigned long mas1;
+ unsigned long mas2;
+ unsigned long mas3;
+ unsigned long mas6;
+ unsigned long mas7;
+ unsigned long srr0;
+ unsigned long srr1;
+ unsigned long csrr0;
+ unsigned long csrr1;
+ unsigned long dsrr0;
+ unsigned long dsrr1;
+ };
+#endif
};
#endif
@@ -122,6 +140,41 @@ struct pt_regs
#endif /* __powerpc64__ */
#ifndef __ASSEMBLY__
+#include <asm/paca.h>
+
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+long do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_leave(struct pt_regs *regs);
+
+static inline void set_return_regs_changed(void)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ local_paca->hsrr_valid = 0;
+ local_paca->srr_valid = 0;
+#endif
+}
+
+static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip)
+{
+ regs->nip = ip;
+ set_return_regs_changed();
+}
+
+static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr)
+{
+ regs->msr = msr;
+ set_return_regs_changed();
+}
+
+static inline void regs_add_return_ip(struct pt_regs *regs, long offset)
+{
+ regs_set_return_ip(regs, regs->nip + offset);
+}
static inline unsigned long instruction_pointer(struct pt_regs *regs)
{
@@ -131,7 +184,7 @@ static inline unsigned long instruction_pointer(struct pt_regs *regs)
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
- regs->nip = val;
+ regs_set_return_ip(regs, val);
}
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
@@ -144,15 +197,6 @@ static inline unsigned long frame_pointer(struct pt_regs *regs)
return 0;
}
-#ifdef CONFIG_SMP
-extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
-
-long do_syscall_trace_enter(struct pt_regs *regs);
-void do_syscall_trace_leave(struct pt_regs *regs);
-
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7c81d3e563b2..be85cf156a1f 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -393,6 +393,7 @@
#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */
#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */
+#define SPRN_TRIG2 0x372
#define SPRN_PMCR 0x374 /* Power Management Control Register */
#define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */
@@ -1435,8 +1436,6 @@ static inline void mtsr(u32 val, u32 idx)
}
#endif
-#define proc_trap() asm volatile("trap")
-
extern unsigned long current_stack_frame(void);
register unsigned long current_stack_pointer asm("r1");
@@ -1447,16 +1446,6 @@ extern void scom970_write(unsigned int address, unsigned long value);
struct pt_regs;
extern void ppc_save_regs(struct pt_regs *regs);
-
-static inline void update_power8_hid0(unsigned long hid0)
-{
- /*
- * The HID0 update on Power8 should at the very least be
- * preceded by a SYNC instruction followed by an ISYNC
- * instruction
- */
- asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
-}
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index b774a4477d5f..792eefaf230b 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -92,6 +92,9 @@ static inline bool security_ftr_enabled(u64 feature)
// The L1-D cache should be flushed after user accesses from the kernel
#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
+// The STF flush should be executed on privilege state switch
+#define SEC_FTR_STF_BARRIER 0x0000000000010000ull
+
// Features enabled by default
#define SEC_FTR_DEFAULT \
(SEC_FTR_L1D_FLUSH_HV | \
@@ -99,6 +102,7 @@ static inline bool security_ftr_enabled(u64 feature)
SEC_FTR_BNDS_CHK_SPEC_BAR | \
SEC_FTR_L1D_FLUSH_ENTRY | \
SEC_FTR_L1D_FLUSH_UACCESS | \
+ SEC_FTR_STF_BARRIER | \
SEC_FTR_FAVOUR_SECURITY)
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
new file mode 100644
index 000000000000..b040094f7920
--- /dev/null
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SET_MEMORY_H
+#define _ASM_POWERPC_SET_MEMORY_H
+
+#define SET_MEMORY_RO 0
+#define SET_MEMORY_RW 1
+#define SET_MEMORY_NX 2
+#define SET_MEMORY_X 3
+
+int change_memory_attr(unsigned long addr, int numpages, long action);
+
+static inline int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RO);
+}
+
+static inline int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RW);
+}
+
+static inline int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_NX);
+}
+
+static inline int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_X);
+}
+
+int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot);
+
+#endif
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index e89bfebd4e00..6c1a7d217d1a 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -10,7 +10,6 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
extern unsigned long long memory_limit;
extern bool init_mem_is_free;
-extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
struct device_node;
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
index 972ed0df154d..1df867c2e054 100644
--- a/arch/powerpc/include/asm/sstep.h
+++ b/arch/powerpc/include/asm/sstep.h
@@ -13,12 +13,11 @@ struct pt_regs;
* we don't allow putting a breakpoint on an mtmsrd instruction.
* Similarly we don't allow breakpoints on rfid instructions.
* These macros tell us if an instruction is a mtmsrd or rfid.
- * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
- * and an mtmsrd (64-bit).
+ * Note that these return true for both mtmsr/rfi (32-bit)
+ * and mtmsrd/rfid (64-bit).
*/
#define IS_MTMSRD(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
-#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000024)
-#define IS_RFI(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000064)
+#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x4c000024)
enum instruction_type {
COMPUTE, /* arith/logical/CR op, etc. */
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 160422a439aa..09a9ae5f3656 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -83,5 +83,11 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
}
#endif
+#define arch_supports_page_table_move arch_supports_page_table_move
+static inline bool arch_supports_page_table_move(void)
+{
+ return radix_enabled();
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_TLB_H */
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h
deleted file mode 100644
index ce69c5eff95e..000000000000
--- a/arch/powerpc/include/asm/unaligned.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_UNALIGNED_H
-#define _ASM_POWERPC_UNALIGNED_H
-
-#ifdef __KERNEL__
-
-/*
- * The PowerPC can do unaligned accesses itself based on its endian mode.
- */
-#include <linux/unaligned/access_ok.h>
-#include <linux/unaligned/generic.h>
-
-#ifdef __LITTLE_ENDIAN__
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-#else
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-#endif
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_UNALIGNED_H */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index 5bf65f5d44a9..fe683371336f 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -24,8 +24,8 @@ typedef ppc_opcode_t uprobe_opcode_t;
struct arch_uprobe {
union {
- struct ppc_inst insn;
- struct ppc_inst ixol;
+ u32 insn[2];
+ u32 ixol[2];
};
};
diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h
index e33f80b0ea81..57573d9c1e09 100644
--- a/arch/powerpc/include/asm/vas.h
+++ b/arch/powerpc/include/asm/vas.h
@@ -5,8 +5,10 @@
#ifndef _ASM_POWERPC_VAS_H
#define _ASM_POWERPC_VAS_H
-
-struct vas_window;
+#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
+#include <asm/icswx.h>
+#include <uapi/asm/vas-api.h>
/*
* Min and max FIFO sizes are based on Version 1.05 Section 3.1.4.25
@@ -49,6 +51,64 @@ enum vas_cop_type {
};
/*
+ * User space VAS windows are opened by tasks and take references
+ * to pid and mm until windows are closed.
+ * Stores pid, mm, and tgid for each window.
+ */
+struct vas_user_win_ref {
+ struct pid *pid; /* PID of owner */
+ struct pid *tgid; /* Thread group ID of owner */
+ struct mm_struct *mm; /* Linux process mm_struct */
+};
+
+/*
+ * Common VAS window struct on PowerNV and PowerVM
+ */
+struct vas_window {
+ u32 winid;
+ u32 wcreds_max; /* Window credits */
+ enum vas_cop_type cop;
+ struct vas_user_win_ref task_ref;
+ char *dbgname;
+ struct dentry *dbgdir;
+};
+
+/*
+ * User space window operations used for powernv and powerVM
+ */
+struct vas_user_win_ops {
+ struct vas_window * (*open_win)(int vas_id, u64 flags,
+ enum vas_cop_type);
+ u64 (*paste_addr)(struct vas_window *);
+ int (*close_win)(struct vas_window *);
+};
+
+static inline void put_vas_user_win_ref(struct vas_user_win_ref *ref)
+{
+ /* Drop references to pid, tgid, and mm */
+ put_pid(ref->pid);
+ put_pid(ref->tgid);
+ if (ref->mm)
+ mmdrop(ref->mm);
+}
+
+static inline void vas_user_win_add_mm_context(struct vas_user_win_ref *ref)
+{
+ mm_context_add_vas_window(ref->mm);
+ /*
+ * Even a process that has no foreign real address mapping can
+ * use an unpaired COPY instruction (to no real effect). Issue
+ * CP_ABORT to clear any pending COPY and prevent a covert
+ * channel.
+ *
+ * __switch_to() will issue CP_ABORT on future context switches
+ * if process / thread has any open VAS window (Use
+ * current->mm->context.vas_windows).
+ */
+ asm volatile(PPC_CP_ABORT);
+}
+
+/*
* Receive window attributes specified by the (in-kernel) owner of window.
*/
struct vas_rx_win_attr {
@@ -100,6 +160,7 @@ struct vas_tx_win_attr {
bool rx_win_ord_mode;
};
+#ifdef CONFIG_PPC_POWERNV
/*
* Helper to map a chip id to VAS id.
* For POWER9, this is a 1:1 mapping. In the future this maybe a 1:N
@@ -162,6 +223,43 @@ int vas_copy_crb(void *crb, int offset);
*/
int vas_paste_crb(struct vas_window *win, int offset, bool re);
+int vas_register_api_powernv(struct module *mod, enum vas_cop_type cop_type,
+ const char *name);
+void vas_unregister_api_powernv(void);
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+
+/* VAS Capabilities */
+#define VAS_GZIP_QOS_FEAT 0x1
+#define VAS_GZIP_DEF_FEAT 0x2
+#define VAS_GZIP_QOS_FEAT_BIT PPC_BIT(VAS_GZIP_QOS_FEAT) /* Bit 1 */
+#define VAS_GZIP_DEF_FEAT_BIT PPC_BIT(VAS_GZIP_DEF_FEAT) /* Bit 2 */
+
+/* NX Capabilities */
+#define VAS_NX_GZIP_FEAT 0x1
+#define VAS_NX_GZIP_FEAT_BIT PPC_BIT(VAS_NX_GZIP_FEAT) /* Bit 1 */
+
+/*
+ * These structs are used to retrieve overall VAS capabilities that
+ * the hypervisor provides.
+ */
+struct hv_vas_all_caps {
+ __be64 descriptor;
+ __be64 feat_type;
+} __packed __aligned(0x1000);
+
+struct vas_all_caps {
+ u64 descriptor;
+ u64 feat_type;
+};
+
+int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result);
+int vas_register_api_pseries(struct module *mod,
+ enum vas_cop_type cop_type, const char *name);
+void vas_unregister_api_pseries(void);
+#endif
+
/*
* Register / unregister coprocessor type to VAS API which will be exported
* to user space. Applications can use this API to open / close window
@@ -171,7 +269,12 @@ int vas_paste_crb(struct vas_window *win, int offset, bool re);
* used for others in future.
*/
int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
- const char *name);
+ const char *name,
+ const struct vas_user_win_ops *vops);
void vas_unregister_coproc_api(void);
+int get_vas_user_win_ref(struct vas_user_win_ref *task_ref);
+void vas_update_csb(struct coprocessor_request_block *crb,
+ struct vas_user_win_ref *task_ref);
+void vas_dump_crb(struct coprocessor_request_block *crb);
#endif /* __ASM_POWERPC_VAS_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 8e903b3f9c24..d9cf192368ad 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -65,8 +65,12 @@ struct icp_ops {
extern const struct icp_ops *icp_ops;
+#ifdef CONFIG_PPC_ICS_NATIVE
/* Native ICS */
extern int ics_native_init(void);
+#else
+static inline int ics_native_init(void) { return -ENODEV; }
+#endif
/* RTAS ICS */
#ifdef CONFIG_PPC_ICS_RTAS
diff --git a/arch/powerpc/include/uapi/asm/papr_pdsm.h b/arch/powerpc/include/uapi/asm/papr_pdsm.h
index 50ef95e2f5b1..82488b1e7276 100644
--- a/arch/powerpc/include/uapi/asm/papr_pdsm.h
+++ b/arch/powerpc/include/uapi/asm/papr_pdsm.h
@@ -77,6 +77,9 @@
/* Indicate that the 'dimm_fuel_gauge' field is valid */
#define PDSM_DIMM_HEALTH_RUN_GAUGE_VALID 1
+/* Indicate that the 'dimm_dsc' field is valid */
+#define PDSM_DIMM_DSC_VALID 2
+
/*
* Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH
* Various flags indicate the health status of the dimm.
@@ -105,6 +108,9 @@ struct nd_papr_pdsm_health {
/* Extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID */
__u16 dimm_fuel_gauge;
+
+ /* Extension flag PDSM_DIMM_DSC_VALID */
+ __u64 dimm_dsc;
};
__u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
};
diff --git a/arch/powerpc/include/uapi/asm/vas-api.h b/arch/powerpc/include/uapi/asm/vas-api.h
index ebd4b2424785..7c81301ecdba 100644
--- a/arch/powerpc/include/uapi/asm/vas-api.h
+++ b/arch/powerpc/include/uapi/asm/vas-api.h
@@ -13,11 +13,15 @@
#define VAS_MAGIC 'v'
#define VAS_TX_WIN_OPEN _IOW(VAS_MAGIC, 0x20, struct vas_tx_win_open_attr)
+/* Flags to VAS TX open window ioctl */
+/* To allocate a window with QoS credit, otherwise use default credit */
+#define VAS_TX_WIN_FLAG_QOS_CREDIT 0x0000000000000001
+
struct vas_tx_win_open_attr {
__u32 version;
__s16 vas_id; /* specific instance of vas or -1 for default */
__u16 reserved1;
- __u64 flags; /* Future use */
+ __u64 flags;
__u64 reserved2[6];
};
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index aa267d173ded..5bee245d832b 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -86,11 +86,7 @@ int main(void)
OFFSET(PACA_CANARY, paca_struct, canary);
#endif
#endif
- OFFSET(MMCONTEXTID, mm_struct, context.id);
-#ifdef CONFIG_PPC64
- DEFINE(SIGSEGV, SIGSEGV);
- DEFINE(NMI_MASK, NMI_MASK);
-#else
+#ifdef CONFIG_PPC32
#ifdef CONFIG_PPC_RTAS
OFFSET(RTAS_SP, thread_struct, rtas_sp);
#endif
@@ -119,7 +115,6 @@ int main(void)
#ifdef CONFIG_ALTIVEC
OFFSET(THREAD_VRSTATE, thread_struct, vr_state.vr);
OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area);
- OFFSET(THREAD_VRSAVE, thread_struct, vrsave);
OFFSET(THREAD_USED_VR, thread_struct, used_vr);
OFFSET(VRSTATE_VSCR, thread_vr_state, vscr);
OFFSET(THREAD_LOAD_VEC, thread_struct, load_vec);
@@ -150,22 +145,15 @@ int main(void)
#ifdef CONFIG_SPE
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
OFFSET(THREAD_ACC, thread_struct, acc);
- OFFSET(THREAD_SPEFSCR, thread_struct, spefscr);
OFFSET(THREAD_USED_SPE, thread_struct, used_spe);
#endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- OFFSET(THREAD_DBCR0, thread_struct, debug.dbcr0);
-#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
OFFSET(THREAD_KVM_SVCPU, thread_struct, kvm_shadow_vcpu);
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu);
#endif
-#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
- OFFSET(KUAP, thread_struct, kuap);
-#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
OFFSET(PACATMSCRATCH, paca_struct, tm_scratch);
@@ -185,19 +173,12 @@ int main(void)
sizeof(struct pt_regs) + 16);
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
- OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
- OFFSET(TI_PREEMPT, thread_info, preempt_count);
#ifdef CONFIG_PPC64
OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
OFFSET(DCACHEL1LOGBLOCKSIZE, ppc64_caches, l1d.log_block_size);
- OFFSET(DCACHEL1BLOCKSPERPAGE, ppc64_caches, l1d.blocks_per_page);
- OFFSET(ICACHEL1BLOCKSIZE, ppc64_caches, l1i.block_size);
- OFFSET(ICACHEL1LOGBLOCKSIZE, ppc64_caches, l1i.log_block_size);
- OFFSET(ICACHEL1BLOCKSPERPAGE, ppc64_caches, l1i.blocks_per_page);
/* paca */
- DEFINE(PACA_SIZE, sizeof(struct paca_struct));
OFFSET(PACAPACAINDEX, paca_struct, paca_index);
OFFSET(PACAPROCSTART, paca_struct, cpu_start);
OFFSET(PACAKSAVE, paca_struct, kstack);
@@ -209,18 +190,13 @@ int main(void)
OFFSET(PACATOC, paca_struct, kernel_toc);
OFFSET(PACAKBASE, paca_struct, kernelbase);
OFFSET(PACAKMSR, paca_struct, kernel_msr);
+#ifdef CONFIG_PPC_BOOK3S_64
+ OFFSET(PACAHSRR_VALID, paca_struct, hsrr_valid);
+ OFFSET(PACASRR_VALID, paca_struct, srr_valid);
+#endif
OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
-#ifdef CONFIG_PPC_BOOK3S
- OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
-#ifdef CONFIG_PPC_MM_SLICES
- OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
- OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
- OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
- DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
-#endif /* CONFIG_PPC_MM_SLICES */
-#endif
#ifdef CONFIG_PPC_BOOK3E
OFFSET(PACAPGD, paca_struct, pgd);
@@ -241,21 +217,9 @@ int main(void)
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_BOOK3S_64
- OFFSET(PACASLBCACHE, paca_struct, slb_cache);
- OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr);
- OFFSET(PACASTABRR, paca_struct, stab_rr);
- OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp);
-#ifdef CONFIG_PPC_MM_SLICES
- OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp);
-#else
- OFFSET(PACACONTEXTSLLP, paca_struct, mm_ctx_sllp);
-#endif /* CONFIG_PPC_MM_SLICES */
OFFSET(PACA_EXGEN, paca_struct, exgen);
OFFSET(PACA_EXMC, paca_struct, exmc);
OFFSET(PACA_EXNMI, paca_struct, exnmi);
-#ifdef CONFIG_PPC_PSERIES
- OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr);
-#endif
OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
@@ -264,9 +228,7 @@ int main(void)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
#endif
- OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx);
OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count);
- OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx);
#endif /* CONFIG_PPC_BOOK3S_64 */
OFFSET(PACAEMERGSP, paca_struct, emergency_sp);
#ifdef CONFIG_PPC_BOOK3S_64
@@ -282,6 +244,9 @@ int main(void)
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default);
+#ifdef CONFIG_PPC64
+ OFFSET(PACA_EXIT_SAVE_R1, paca_struct, exit_save_r1);
+#endif
#ifdef CONFIG_PPC_BOOK3E
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
#endif
@@ -343,35 +308,24 @@ int main(void)
STACK_PT_REGS_OFFSET(STACK_REGS_AMR, amr);
STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
#endif
-#ifdef CONFIG_PPC_KUAP
- STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
-#endif
-
-#if defined(CONFIG_PPC32)
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
- DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
- DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+ STACK_PT_REGS_OFFSET(MAS0, mas0);
/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
- DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
- DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
- DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
- DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
- DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
- DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
- DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
- DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
- DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
- DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
- DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
- DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
-#endif
+ STACK_PT_REGS_OFFSET(MMUCR, mas0);
+ STACK_PT_REGS_OFFSET(MAS1, mas1);
+ STACK_PT_REGS_OFFSET(MAS2, mas2);
+ STACK_PT_REGS_OFFSET(MAS3, mas3);
+ STACK_PT_REGS_OFFSET(MAS6, mas6);
+ STACK_PT_REGS_OFFSET(MAS7, mas7);
+ STACK_PT_REGS_OFFSET(_SRR0, srr0);
+ STACK_PT_REGS_OFFSET(_SRR1, srr1);
+ STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
+ STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
+ STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
+ STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
#endif
-#ifndef CONFIG_PPC64
- OFFSET(MM_PGD, mm_struct, pgd);
-#endif /* ! CONFIG_PPC64 */
-
/* About the CPU features table */
OFFSET(CPU_SPEC_FEATURES, cpu_spec, cpu_features);
OFFSET(CPU_SPEC_SETUP, cpu_spec, cpu_setup);
@@ -404,13 +358,6 @@ int main(void)
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- DEFINE(PGD_TABLE_SIZE, (sizeof(pgd_t) << max(RADIX_PGD_INDEX_SIZE, H_PGD_INDEX_SIZE)));
-#else
- DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
-#endif
- DEFINE(PTE_SIZE, sizeof(pte_t));
-
#ifdef CONFIG_KVM
OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
@@ -482,11 +429,9 @@ int main(void)
OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid);
OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr);
OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1);
- OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits);
OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls);
OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
OFFSET(KVM_RADIX, kvm, arch.radix);
- OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
@@ -514,7 +459,6 @@ int main(void)
OFFSET(VCPU_DAWRX1, kvm_vcpu, arch.dawrx1);
OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
- OFFSET(VCPU_DEC, kvm_vcpu, arch.dec);
OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires);
OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
@@ -525,7 +469,6 @@ int main(void)
OFFSET(VCPU_MMCRA, kvm_vcpu, arch.mmcra);
OFFSET(VCPU_MMCRS, kvm_vcpu, arch.mmcrs);
OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
- OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc);
OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar);
OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar);
OFFSET(VCPU_SIER, kvm_vcpu, arch.sier);
@@ -645,10 +588,8 @@ int main(void)
HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
- HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys);
HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt);
- HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend);
@@ -756,7 +697,6 @@ int main(void)
#endif
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
- DEFINE(PPC_DBELL_MSGTYPE, PPC_DBELL_MSGTYPE);
#ifdef CONFIG_PPC_8xx
DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 735e89337398..5693e1c67c2b 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -35,7 +35,7 @@ void __init reserve_kdump_trampoline(void)
static void __init create_trampoline(unsigned long addr)
{
- struct ppc_inst *p = (struct ppc_inst *)addr;
+ u32 *p = (u32 *)addr;
/* The maximum range of a single instruction branch, is the current
* instruction's address + (32 MB - 4) bytes. For the trampoline we
@@ -45,8 +45,8 @@ static void __init create_trampoline(unsigned long addr)
* branch to "addr" we jump to ("addr" + 32 MB). Although it requires
* two instructions it doesn't require any registers.
*/
- patch_instruction(p, ppc_inst(PPC_INST_NOP));
- patch_branch((void *)p + 4, addr + PHYSICAL_START, 0);
+ patch_instruction(p, ppc_inst(PPC_RAW_NOP()));
+ patch_branch(p + 1, addr + PHYSICAL_START, 0);
}
void __init setup_kdump_trampoline(void)
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 9160285cb2f4..0273a1349006 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -32,6 +32,7 @@
#include <asm/barrier.h>
#include <asm/kup.h>
#include <asm/bug.h>
+#include <asm/interrupt.h>
#include "head_32.h"
@@ -74,6 +75,24 @@ _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
.globl transfer_to_syscall
transfer_to_syscall:
+ stw r11, GPR1(r1)
+ stw r11, 0(r1)
+ mflr r12
+ stw r12, _LINK(r1)
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+ rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
+#endif
+ lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
+ SAVE_GPR(2, r1)
+ addi r12,r12,STACK_FRAME_REGS_MARKER@l
+ stw r9,_MSR(r1)
+ li r2, INTERRUPT_SYSCALL
+ stw r12,8(r1)
+ stw r2,_TRAP(r1)
+ SAVE_GPR(0, r1)
+ SAVE_4GPRS(3, r1)
+ SAVE_2GPRS(7, r1)
+ addi r2,r10,-THREAD
SAVE_NVGPRS(r1)
/* Calling convention has r9 = orig r0, r10 = regs */
@@ -176,28 +195,6 @@ _GLOBAL(_switch)
/* r3-r12 are caller saved -- Cort */
SAVE_NVGPRS(r1)
stw r0,_NIP(r1) /* Return to switch caller */
- mfmsr r11
- li r0,MSR_FP /* Disable floating-point */
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- oris r0,r0,MSR_VEC@h /* Disable altivec */
- mfspr r12,SPRN_VRSAVE /* save vrsave register value */
- stw r12,THREAD+THREAD_VRSAVE(r2)
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-BEGIN_FTR_SECTION
- oris r0,r0,MSR_SPE@h /* Disable SPE */
- mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
- stw r12,THREAD+THREAD_SPEFSCR(r2)
-END_FTR_SECTION_IFSET(CPU_FTR_SPE)
-#endif /* CONFIG_SPE */
- and. r0,r0,r11 /* FP or altivec or SPE enabled? */
- beq+ 1f
- andc r11,r11,r0
- mtmsr r11
- isync
-1: stw r11,_MSR(r1)
mfcr r10
stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */
@@ -218,19 +215,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
mr r3,r2
addi r2,r4,-THREAD /* Update current */
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- lwz r0,THREAD+THREAD_VRSAVE(r2)
- mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-BEGIN_FTR_SECTION
- lwz r0,THREAD+THREAD_SPEFSCR(r2)
- mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
-END_FTR_SECTION_IFSET(CPU_FTR_SPE)
-#endif /* CONFIG_SPE */
-
lwz r0,_CCR(r1)
mtcrf 0xFF,r0
/* r3-r12 are destroyed -- Cort */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 03727308d8cc..15720f8661a1 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -32,7 +32,6 @@
#include <asm/irqflags.h>
#include <asm/hw_irq.h>
#include <asm/context_tracking.h>
-#include <asm/tm.h>
#include <asm/ppc-opcode.h>
#include <asm/barrier.h>
#include <asm/export.h>
@@ -48,372 +47,7 @@
/*
* System calls.
*/
- .section ".toc","aw"
-SYS_CALL_TABLE:
- .tc sys_call_table[TC],sys_call_table
-
-#ifdef CONFIG_COMPAT
-COMPAT_SYS_CALL_TABLE:
- .tc compat_sys_call_table[TC],compat_sys_call_table
-#endif
-
-/* This value is used to mark exception frames on the stack. */
-exception_marker:
- .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
-
.section ".text"
- .align 7
-
-#ifdef CONFIG_PPC_BOOK3S
-.macro system_call_vectored name trapnr
- .globl system_call_vectored_\name
-system_call_vectored_\name:
-_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne .Ltabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
- SCV_INTERRUPT_TO_KERNEL
- mr r10,r1
- ld r1,PACAKSAVE(r13)
- std r10,0(r1)
- std r11,_NIP(r1)
- std r12,_MSR(r1)
- std r0,GPR0(r1)
- std r10,GPR1(r1)
- std r2,GPR2(r1)
- ld r2,PACATOC(r13)
- mfcr r12
- li r11,0
- /* Can we avoid saving r3-r8 in common case? */
- std r3,GPR3(r1)
- std r4,GPR4(r1)
- std r5,GPR5(r1)
- std r6,GPR6(r1)
- std r7,GPR7(r1)
- std r8,GPR8(r1)
- /* Zero r9-r12, this should only be required when restoring all GPRs */
- std r11,GPR9(r1)
- std r11,GPR10(r1)
- std r11,GPR11(r1)
- std r11,GPR12(r1)
- std r9,GPR13(r1)
- SAVE_NVGPRS(r1)
- std r11,_XER(r1)
- std r11,_LINK(r1)
- std r11,_CTR(r1)
-
- li r11,\trapnr
- std r11,_TRAP(r1)
- std r12,_CCR(r1)
- addi r10,r1,STACK_FRAME_OVERHEAD
- ld r11,exception_marker@toc(r2)
- std r11,-16(r10) /* "regshere" marker */
-
-BEGIN_FTR_SECTION
- HMT_MEDIUM
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
- /*
- * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
- * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
- * and interrupts may be masked and pending already.
- * system_call_exception() will call trace_hardirqs_off() which means
- * interrupts could already have been blocked before trace_hardirqs_off,
- * but this is the best we can do.
- */
-
- /* Calling convention has r9 = orig r0, r10 = regs */
- mr r9,r0
- bl system_call_exception
-
-.Lsyscall_vectored_\name\()_exit:
- addi r4,r1,STACK_FRAME_OVERHEAD
- li r5,1 /* scv */
- bl syscall_exit_prepare
-
- ld r2,_CCR(r1)
- ld r4,_NIP(r1)
- ld r5,_MSR(r1)
-
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
-BEGIN_FTR_SECTION
- HMT_MEDIUM_LOW
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
- cmpdi r3,0
- bne .Lsyscall_vectored_\name\()_restore_regs
-
- /* rfscv returns with LR->NIA and CTR->MSR */
- mtlr r4
- mtctr r5
-
- /* Could zero these as per ABI, but we may consider a stricter ABI
- * which preserves these if libc implementations can benefit, so
- * restore them for now until further measurement is done. */
- ld r0,GPR0(r1)
- ld r4,GPR4(r1)
- ld r5,GPR5(r1)
- ld r6,GPR6(r1)
- ld r7,GPR7(r1)
- ld r8,GPR8(r1)
- /* Zero volatile regs that may contain sensitive kernel data */
- li r9,0
- li r10,0
- li r11,0
- li r12,0
- mtspr SPRN_XER,r0
-
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * The value of AMR only matters while we're in the kernel.
- */
- mtcr r2
- ld r2,GPR2(r1)
- ld r3,GPR3(r1)
- ld r13,GPR13(r1)
- ld r1,GPR1(r1)
- RFSCV_TO_USER
- b . /* prevent speculative execution */
-
-.Lsyscall_vectored_\name\()_restore_regs:
- li r3,0
- mtmsrd r3,1
- mtspr SPRN_SRR0,r4
- mtspr SPRN_SRR1,r5
-
- ld r3,_CTR(r1)
- ld r4,_LINK(r1)
- ld r5,_XER(r1)
-
- REST_NVGPRS(r1)
- ld r0,GPR0(r1)
- mtcr r2
- mtctr r3
- mtlr r4
- mtspr SPRN_XER,r5
- REST_10GPRS(2, r1)
- REST_2GPRS(12, r1)
- ld r1,GPR1(r1)
- RFI_TO_USER
-.endm
-
-system_call_vectored common 0x3000
-/*
- * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
- * which is tested by system_call_exception when r0 is -1 (as set by vector
- * entry code).
- */
-system_call_vectored sigill 0x7ff0
-
-
-/*
- * Entered via kernel return set up by kernel/sstep.c, must match entry regs
- */
- .globl system_call_vectored_emulate
-system_call_vectored_emulate:
-_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
- li r10,IRQS_ALL_DISABLED
- stb r10,PACAIRQSOFTMASK(r13)
- b system_call_vectored_common
-#endif
-
- .balign IFETCH_ALIGN_BYTES
- .globl system_call_common_real
-system_call_common_real:
- ld r10,PACAKMSR(r13) /* get MSR value for kernel */
- mtmsrd r10
-
- .balign IFETCH_ALIGN_BYTES
- .globl system_call_common
-system_call_common:
-_ASM_NOKPROBE_SYMBOL(system_call_common)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne .Ltabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
- mr r10,r1
- ld r1,PACAKSAVE(r13)
- std r10,0(r1)
- std r11,_NIP(r1)
- std r12,_MSR(r1)
- std r0,GPR0(r1)
- std r10,GPR1(r1)
- std r2,GPR2(r1)
-#ifdef CONFIG_PPC_FSL_BOOK3E
-START_BTB_FLUSH_SECTION
- BTB_FLUSH(r10)
-END_BTB_FLUSH_SECTION
-#endif
- ld r2,PACATOC(r13)
- mfcr r12
- li r11,0
- /* Can we avoid saving r3-r8 in common case? */
- std r3,GPR3(r1)
- std r4,GPR4(r1)
- std r5,GPR5(r1)
- std r6,GPR6(r1)
- std r7,GPR7(r1)
- std r8,GPR8(r1)
- /* Zero r9-r12, this should only be required when restoring all GPRs */
- std r11,GPR9(r1)
- std r11,GPR10(r1)
- std r11,GPR11(r1)
- std r11,GPR12(r1)
- std r9,GPR13(r1)
- SAVE_NVGPRS(r1)
- std r11,_XER(r1)
- std r11,_CTR(r1)
- mflr r10
-
- /*
- * This clears CR0.SO (bit 28), which is the error indication on
- * return from this system call.
- */
- rldimi r12,r11,28,(63-28)
- li r11,0xc00
- std r10,_LINK(r1)
- std r11,_TRAP(r1)
- std r12,_CCR(r1)
- addi r10,r1,STACK_FRAME_OVERHEAD
- ld r11,exception_marker@toc(r2)
- std r11,-16(r10) /* "regshere" marker */
-
- /*
- * We always enter kernel from userspace with irq soft-mask enabled and
- * nothing pending. system_call_exception() will call
- * trace_hardirqs_off().
- */
- li r11,IRQS_ALL_DISABLED
- li r12,PACA_IRQ_HARD_DIS
- stb r11,PACAIRQSOFTMASK(r13)
- stb r12,PACAIRQHAPPENED(r13)
-
- /* Calling convention has r9 = orig r0, r10 = regs */
- mr r9,r0
- bl system_call_exception
-
-.Lsyscall_exit:
- addi r4,r1,STACK_FRAME_OVERHEAD
- li r5,0 /* !scv */
- bl syscall_exit_prepare
-
- ld r2,_CCR(r1)
- ld r4,_NIP(r1)
- ld r5,_MSR(r1)
- ld r6,_LINK(r1)
-
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- mtspr SPRN_SRR0,r4
- mtspr SPRN_SRR1,r5
- mtlr r6
-
- cmpdi r3,0
- bne .Lsyscall_restore_regs
- /* Zero volatile regs that may contain sensitive kernel data */
- li r0,0
- li r4,0
- li r5,0
- li r6,0
- li r7,0
- li r8,0
- li r9,0
- li r10,0
- li r11,0
- li r12,0
- mtctr r0
- mtspr SPRN_XER,r0
-.Lsyscall_restore_regs_cont:
-
-BEGIN_FTR_SECTION
- HMT_MEDIUM_LOW
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * The value of AMR only matters while we're in the kernel.
- */
- mtcr r2
- ld r2,GPR2(r1)
- ld r3,GPR3(r1)
- ld r13,GPR13(r1)
- ld r1,GPR1(r1)
- RFI_TO_USER
- b . /* prevent speculative execution */
-
-.Lsyscall_restore_regs:
- ld r3,_CTR(r1)
- ld r4,_XER(r1)
- REST_NVGPRS(r1)
- mtctr r3
- mtspr SPRN_XER,r4
- ld r0,GPR0(r1)
- REST_8GPRS(4, r1)
- ld r12,GPR12(r1)
- b .Lsyscall_restore_regs_cont
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-.Ltabort_syscall:
- /* Firstly we need to enable TM in the kernel */
- mfmsr r10
- li r9, 1
- rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r10, 0
-
- /* tabort, this dooms the transaction, nothing else */
- li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
- TABORT(R9)
-
- /*
- * Return directly to userspace. We have corrupted user register state,
- * but userspace will never see that register state. Execution will
- * resume after the tbegin of the aborted transaction with the
- * checkpointed register state.
- */
- li r9, MSR_RI
- andc r10, r10, r9
- mtmsrd r10, 1
- mtspr SPRN_SRR0, r11
- mtspr SPRN_SRR1, r12
- RFI_TO_USER
- b . /* prevent speculative execution */
-#endif
-
-#ifdef CONFIG_PPC_BOOK3S
-_GLOBAL(ret_from_fork_scv)
- bl schedule_tail
- REST_NVGPRS(r1)
- li r3,0 /* fork() return value */
- b .Lsyscall_vectored_common_exit
-#endif
-
-_GLOBAL(ret_from_fork)
- bl schedule_tail
- REST_NVGPRS(r1)
- li r3,0 /* fork() return value */
- b .Lsyscall_exit
-
-_GLOBAL(ret_from_kernel_thread)
- bl schedule_tail
- REST_NVGPRS(r1)
- mtctr r14
- mr r3,r15
-#ifdef PPC64_ELF_ABI_v2
- mr r12,r14
-#endif
- bctrl
- li r3,0
- b .Lsyscall_exit
#ifdef CONFIG_PPC_BOOK3S_64
@@ -630,156 +264,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
addi r1,r1,SWITCH_FRAME_SIZE
blr
- /*
- * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
- * touched, no exit work created, then this can be used.
- */
- .balign IFETCH_ALIGN_BYTES
- .globl fast_interrupt_return
-fast_interrupt_return:
-_ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
- kuap_check_amr r3, r4
- ld r5,_MSR(r1)
- andi. r0,r5,MSR_PR
-#ifdef CONFIG_PPC_BOOK3S
- bne .Lfast_user_interrupt_return_amr
- kuap_kernel_restore r3, r4
- andi. r0,r5,MSR_RI
- li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
- bne+ .Lfast_kernel_interrupt_return
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unrecoverable_exception
- b . /* should not get here */
-#else
- bne .Lfast_user_interrupt_return
- b .Lfast_kernel_interrupt_return
-#endif
-
- .balign IFETCH_ALIGN_BYTES
- .globl interrupt_return
-interrupt_return:
-_ASM_NOKPROBE_SYMBOL(interrupt_return)
- ld r4,_MSR(r1)
- andi. r0,r4,MSR_PR
- beq .Lkernel_interrupt_return
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl interrupt_exit_user_prepare
- cmpdi r3,0
- bne- .Lrestore_nvgprs
-
-#ifdef CONFIG_PPC_BOOK3S
-.Lfast_user_interrupt_return_amr:
- kuap_user_restore r3, r4
-#endif
-.Lfast_user_interrupt_return:
- ld r11,_NIP(r1)
- ld r12,_MSR(r1)
-BEGIN_FTR_SECTION
- ld r10,_PPR(r1)
- mtspr SPRN_PPR,r10
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
-
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-FTR_SECTION_ELSE
- ldarx r0,0,r1
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- ld r3,_CCR(r1)
- ld r4,_LINK(r1)
- ld r5,_CTR(r1)
- ld r6,_XER(r1)
- li r0,0
-
- REST_4GPRS(7, r1)
- REST_2GPRS(11, r1)
- REST_GPR(13, r1)
-
- mtcr r3
- mtlr r4
- mtctr r5
- mtspr SPRN_XER,r6
-
- REST_4GPRS(2, r1)
- REST_GPR(6, r1)
- REST_GPR(0, r1)
- REST_GPR(1, r1)
- RFI_TO_USER
- b . /* prevent speculative execution */
-
-.Lrestore_nvgprs:
- REST_NVGPRS(r1)
- b .Lfast_user_interrupt_return
-
- .balign IFETCH_ALIGN_BYTES
-.Lkernel_interrupt_return:
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl interrupt_exit_kernel_prepare
-
-.Lfast_kernel_interrupt_return:
- cmpdi cr1,r3,0
- ld r11,_NIP(r1)
- ld r12,_MSR(r1)
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
-
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-FTR_SECTION_ELSE
- ldarx r0,0,r1
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- ld r3,_LINK(r1)
- ld r4,_CTR(r1)
- ld r5,_XER(r1)
- ld r6,_CCR(r1)
- li r0,0
-
- REST_4GPRS(7, r1)
- REST_2GPRS(11, r1)
-
- mtlr r3
- mtctr r4
- mtspr SPRN_XER,r5
-
- /*
- * Leaving a stale exception_marker on the stack can confuse
- * the reliable stack unwinder later on. Clear it.
- */
- std r0,STACK_FRAME_OVERHEAD-16(r1)
-
- REST_4GPRS(2, r1)
-
- bne- cr1,1f /* emulate stack store */
- mtcr r6
- REST_GPR(6, r1)
- REST_GPR(0, r1)
- REST_GPR(1, r1)
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
-1: /*
- * Emulate stack store with update. New r1 value was already calculated
- * and updated in our interrupt regs by emulate_loadstore, but we can't
- * store the previous value of r1 to the stack before re-loading our
- * registers from it, otherwise they could be clobbered. Use
- * PACA_EXGEN as temporary storage to hold the store data, as
- * interrupts are disabled here so it won't be clobbered.
- */
- mtcr r6
- std r9,PACA_EXGEN+0(r13)
- addi r9,r1,INT_FRAME_SIZE /* get original r1 */
- REST_GPR(6, r1)
- REST_GPR(0, r1)
- REST_GPR(1, r1)
- std r9,0(r1) /* perform store component of stdu */
- ld r9,PACA_EXGEN+0(r13)
-
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
#ifdef CONFIG_PPC_RTAS
/*
* On CHRP, the Run-Time Abstraction Services (RTAS) have to be
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 2ed14d4a47f5..93b0f3ec8fb0 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -38,9 +38,9 @@ static int __init early_init_dt_scan_epapr(unsigned long node,
for (i = 0; i < (len / 4); i++) {
struct ppc_inst inst = ppc_inst(be32_to_cpu(insts[i]));
- patch_instruction((struct ppc_inst *)(epapr_hypercall_start + i), inst);
+ patch_instruction(epapr_hypercall_start + i, inst);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
- patch_instruction((struct ppc_inst *)(epapr_ev_idle_start + i), inst);
+ patch_instruction(epapr_ev_idle_start + i, inst);
#endif
}
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index f1ae710274bc..1401787b0b93 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -26,6 +26,10 @@
#include <asm/feature-fixups.h>
#include <asm/context_tracking.h>
+/* 64e interrupt returns always use SRR registers */
+#define fast_interrupt_return fast_interrupt_return_srr
+#define interrupt_return interrupt_return_srr
+
/* XXX This will ultimately add space for a special exception save
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
* when taking special interrupts. For now we don't support that,
@@ -897,6 +901,34 @@ kernel_dbg_exc:
bl unknown_exception
b interrupt_return
+.macro SEARCH_RESTART_TABLE
+#ifdef CONFIG_RELOCATABLE
+ ld r11,PACATOC(r13)
+ ld r14,__start___restart_table@got(r11)
+ ld r15,__stop___restart_table@got(r11)
+#else
+ LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table)
+ LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table)
+#endif
+300:
+ cmpd r14,r15
+ beq 302f
+ ld r11,0(r14)
+ cmpld r10,r11
+ blt 301f
+ ld r11,8(r14)
+ cmpld r10,r11
+ bge 301f
+ ld r11,16(r14)
+ b 303f
+301:
+ addi r14,r14,24
+ b 300b
+302:
+ li r11,0
+303:
+.endm
+
/*
* An interrupt came in while soft-disabled; We mark paca->irq_happened
* accordingly and if the interrupt is level sensitive, we hard disable
@@ -905,6 +937,9 @@ kernel_dbg_exc:
*/
.macro masked_interrupt_book3e paca_irq full_mask
+ std r14,PACA_EXGEN+EX_R14(r13)
+ std r15,PACA_EXGEN+EX_R15(r13)
+
lbz r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
@@ -914,15 +949,23 @@ kernel_dbg_exc:
stb r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
- rldicl r10,r11,48,1 /* clear MSR_EE */
- rotldi r11,r10,16
+ xori r11,r11,MSR_EE /* clear MSR_EE */
mtspr SPRN_SRR1,r11
.endif
+ mfspr r10,SPRN_SRR0
+ SEARCH_RESTART_TABLE
+ cmpdi r11,0
+ beq 1f
+ mtspr SPRN_SRR0,r11 /* return to restart address */
+1:
+
lwz r11,PACA_EXGEN+EX_CR(r13)
mtcr r11
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
mfspr r13,SPRN_SPRG_GEN_SCRATCH
rfi
b .
@@ -1282,7 +1325,12 @@ a2_tlbinit_code_start:
a2_tlbinit_after_linear_map:
/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+ ld r5,PACATOC(r13)
+ ld r3,1f@got(r5)
+#else
LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)
+#endif
mtctr r3
bctr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f7fc6e078d4e..4aec59a77d4c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -428,18 +428,31 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
/* If coming from user, skip soft-mask tests. */
andi. r10,r12,MSR_PR
- bne 2f
-
- /* Kernel code running below __end_interrupts is implicitly
- * soft-masked */
- LOAD_HANDLER(r10, __end_interrupts)
+ bne 3f
+
+ /*
+ * Kernel code running below __end_soft_masked may be
+ * implicitly soft-masked if it is within the regions
+ * in the soft mask table.
+ */
+ LOAD_HANDLER(r10, __end_soft_masked)
cmpld r11,r10
+ bge+ 1f
+
+ /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */
+ mtctr r12
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ SEARCH_SOFT_MASK_TABLE
+ cmpdi r12,0
+ mfctr r12 /* Restore r12 to SRR1 */
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
+ beq 1f /* Not in soft-mask table */
li r10,IMASK
- blt- 1f
+ b 2f /* In soft-mask table, always mask */
/* Test the soft mask state against our interrupt's bit */
- lbz r10,PACAIRQSOFTMASK(r13)
-1: andi. r10,r10,IMASK
+1: lbz r10,PACAIRQSOFTMASK(r13)
+2: andi. r10,r10,IMASK
/* Associate vector numbers with bits in paca->irq_happened */
.if IVEC == 0x500 || IVEC == 0xea0
li r10,PACA_IRQ_EE
@@ -470,7 +483,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
.if ISTACK
andi. r10,r12,MSR_PR /* See if coming from user */
-2: mr r10,r1 /* Save r1 */
+3: mr r10,r1 /* Save r1 */
subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */
beq- 100f
ld r1,PACAKSAVE(r13) /* kernel stack to use */
@@ -485,6 +498,20 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
std r0,GPR0(r1) /* save r0 in stackframe */
std r10,GPR1(r1) /* save r1 in stackframe */
+ /* Mark our [H]SRRs valid for return */
+ li r10,1
+ .if IHSRR_IF_HVMODE
+ BEGIN_FTR_SECTION
+ stb r10,PACAHSRR_VALID(r13)
+ FTR_SECTION_ELSE
+ stb r10,PACASRR_VALID(r13)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+ .elseif IHSRR
+ stb r10,PACAHSRR_VALID(r13)
+ .else
+ stb r10,PACASRR_VALID(r13)
+ .endif
+
.if ISET_RI
li r10,MSR_RI
mtmsrd r10,1 /* Set MSR_RI */
@@ -577,6 +604,66 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
__GEN_COMMON_BODY \name
.endm
+.macro SEARCH_RESTART_TABLE
+#ifdef CONFIG_RELOCATABLE
+ mr r12,r2
+ ld r2,PACATOC(r13)
+ LOAD_REG_ADDR(r9, __start___restart_table)
+ LOAD_REG_ADDR(r10, __stop___restart_table)
+ mr r2,r12
+#else
+ LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table)
+ LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table)
+#endif
+300:
+ cmpd r9,r10
+ beq 302f
+ ld r12,0(r9)
+ cmpld r11,r12
+ blt 301f
+ ld r12,8(r9)
+ cmpld r11,r12
+ bge 301f
+ ld r12,16(r9)
+ b 303f
+301:
+ addi r9,r9,24
+ b 300b
+302:
+ li r12,0
+303:
+.endm
+
+.macro SEARCH_SOFT_MASK_TABLE
+#ifdef CONFIG_RELOCATABLE
+ mr r12,r2
+ ld r2,PACATOC(r13)
+ LOAD_REG_ADDR(r9, __start___soft_mask_table)
+ LOAD_REG_ADDR(r10, __stop___soft_mask_table)
+ mr r2,r12
+#else
+ LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table)
+ LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table)
+#endif
+300:
+ cmpd r9,r10
+ beq 302f
+ ld r12,0(r9)
+ cmpld r11,r12
+ blt 301f
+ ld r12,8(r9)
+ cmpld r11,r12
+ bge 301f
+ li r12,1
+ b 303f
+301:
+ addi r9,r9,16
+ b 300b
+302:
+ li r12,0
+303:
+.endm
+
/*
* Restore all registers including H/SRR0/1 saved in a stack frame of a
* standard exception.
@@ -584,10 +671,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.macro EXCEPTION_RESTORE_REGS hsrr=0
/* Move original SRR0 and SRR1 into the respective regs */
ld r9,_MSR(r1)
+ li r10,0
.if \hsrr
mtspr SPRN_HSRR1,r9
+ stb r10,PACAHSRR_VALID(r13)
.else
mtspr SPRN_SRR1,r9
+ stb r10,PACASRR_VALID(r13)
.endif
ld r9,_NIP(r1)
.if \hsrr
@@ -704,17 +794,17 @@ __start_interrupts:
* scv instructions enter the kernel without changing EE, RI, ME, or HV.
* In particular, this means we can take a maskable interrupt at any point
* in the scv handler, which is unlike any other interrupt. This is solved
- * by treating the instruction addresses below __end_interrupts as being
- * soft-masked.
+ * by treating the instruction addresses in the handler as being soft-masked,
+ * by adding a SOFT_MASK_TABLE entry for them.
*
* AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
* ensure scv is never executed with relocation off, which means AIL-0
* should never happen.
*
- * Before leaving the below __end_interrupts text, at least of the following
- * must be true:
+ * Before leaving the following inside-__end_soft_masked text, at least of the
+ * following must be true:
* - MSR[PR]=1 (i.e., return to userspace)
- * - MSR_EE|MSR_RI is set (no reentrant exceptions)
+ * - MSR_EE|MSR_RI is clear (no reentrant exceptions)
* - Standard kernel environment is set up (stack, paca, etc)
*
* Call convention:
@@ -722,6 +812,7 @@ __start_interrupts:
* syscall register convention is in Documentation/powerpc/syscall64-abi.rst
*/
EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
+1:
/* SCV 0 */
mr r9,r13
GET_PACA(r13)
@@ -751,8 +842,11 @@ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
b system_call_vectored_sigill
#endif
.endr
+2:
EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
+SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above.
+
#ifdef CONFIG_RELOCATABLE
TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
__LOAD_HANDLER(r10, system_call_vectored_common)
@@ -1149,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
mtmsrd r10,1
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
- b interrupt_return
+ b interrupt_return_srr
#ifdef CONFIG_PPC_P7_NAP
@@ -1275,7 +1369,7 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
- b interrupt_return
+ b interrupt_return_srr
1: bl do_break
/*
@@ -1283,7 +1377,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
* If so, we need to restore them with their updated values.
*/
REST_NVGPRS(r1)
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1323,7 +1417,7 @@ BEGIN_MMU_FTR_SECTION
bl do_slb_fault
cmpdi r3,0
bne- 1f
- b fast_interrupt_return
+ b fast_interrupt_return_srr
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
@@ -1332,7 +1426,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1368,7 +1462,7 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1403,7 +1497,7 @@ BEGIN_MMU_FTR_SECTION
bl do_slb_fault
cmpdi r3,0
bne- 1f
- b fast_interrupt_return
+ b fast_interrupt_return_srr
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
@@ -1412,7 +1506,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1456,7 +1550,11 @@ EXC_COMMON_BEGIN(hardware_interrupt_common)
GEN_COMMON hardware_interrupt
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_IRQ
- b interrupt_return
+ BEGIN_FTR_SECTION
+ b interrupt_return_hsrr
+ FTR_SECTION_ELSE
+ b interrupt_return_srr
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
/**
@@ -1483,7 +1581,7 @@ EXC_COMMON_BEGIN(alignment_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl alignment_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1590,7 +1688,7 @@ EXC_COMMON_BEGIN(program_check_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl program_check_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
+ b interrupt_return_srr
/*
@@ -1633,12 +1731,12 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
bl load_up_fpu
- b fast_interrupt_return
+ b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
addi r3,r1,STACK_FRAME_OVERHEAD
bl fp_unavailable_tm
- b interrupt_return
+ b interrupt_return_srr
#endif
@@ -1677,7 +1775,7 @@ EXC_COMMON_BEGIN(decrementer_common)
GEN_COMMON decrementer
addi r3,r1,STACK_FRAME_OVERHEAD
bl timer_interrupt
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1714,6 +1812,8 @@ EXC_COMMON_BEGIN(hdecrementer_common)
*
* Be careful to avoid touching the kernel stack.
*/
+ li r10,0
+ stb r10,PACAHSRR_VALID(r13)
ld r10,PACA_EXGEN+EX_CTR(r13)
mtctr r10
mtcrf 0x80,r9
@@ -1761,7 +1861,7 @@ EXC_COMMON_BEGIN(doorbell_super_common)
#else
bl unknown_async_exception
#endif
- b interrupt_return
+ b interrupt_return_srr
EXC_REAL_NONE(0xb00, 0x100)
@@ -1838,8 +1938,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
mtctr r10
bctr
.else
- li r10,MSR_RI
- mtmsrd r10,1 /* Set RI (EE=0) */
#ifdef CONFIG_RELOCATABLE
__LOAD_HANDLER(r10, system_call_common)
mtctr r10
@@ -1925,7 +2023,7 @@ EXC_COMMON_BEGIN(single_step_common)
GEN_COMMON single_step
addi r3,r1,STACK_FRAME_OVERHEAD
bl single_step_exception
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1963,7 +2061,7 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl unknown_exception
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
- b interrupt_return
+ b interrupt_return_hsrr
/**
@@ -1988,7 +2086,7 @@ EXC_COMMON_BEGIN(h_instr_storage_common)
GEN_COMMON h_instr_storage
addi r3,r1,STACK_FRAME_OVERHEAD
bl unknown_exception
- b interrupt_return
+ b interrupt_return_hsrr
/**
@@ -2012,7 +2110,7 @@ EXC_COMMON_BEGIN(emulation_assist_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl emulation_assist_interrupt
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
+ b interrupt_return_hsrr
/**
@@ -2089,7 +2187,7 @@ EXC_COMMON_BEGIN(hmi_exception_common)
GEN_COMMON hmi_exception
addi r3,r1,STACK_FRAME_OVERHEAD
bl handle_hmi_exception
- b interrupt_return
+ b interrupt_return_hsrr
/**
@@ -2119,7 +2217,7 @@ EXC_COMMON_BEGIN(h_doorbell_common)
#else
bl unknown_async_exception
#endif
- b interrupt_return
+ b interrupt_return_hsrr
/**
@@ -2145,7 +2243,7 @@ EXC_COMMON_BEGIN(h_virt_irq_common)
GEN_COMMON h_virt_irq
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_IRQ
- b interrupt_return
+ b interrupt_return_hsrr
EXC_REAL_NONE(0xec0, 0x20)
@@ -2188,7 +2286,7 @@ EXC_COMMON_BEGIN(performance_monitor_common)
GEN_COMMON performance_monitor
addi r3,r1,STACK_FRAME_OVERHEAD
bl performance_monitor_exception
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -2225,19 +2323,19 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
#endif
bl load_up_altivec
- b fast_interrupt_return
+ b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_tm
- b interrupt_return
+ b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_exception
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -2278,14 +2376,14 @@ BEGIN_FTR_SECTION
2: /* User process was in a transaction */
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_tm
- b interrupt_return
+ b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_exception
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -2313,7 +2411,7 @@ EXC_COMMON_BEGIN(facility_unavailable_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl facility_unavailable_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -2341,7 +2439,7 @@ EXC_COMMON_BEGIN(h_facility_unavailable_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl facility_unavailable_exception
REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
- b interrupt_return
+ b interrupt_return_hsrr
EXC_REAL_NONE(0xfa0, 0x20)
@@ -2370,7 +2468,7 @@ EXC_COMMON_BEGIN(cbe_system_error_common)
GEN_COMMON cbe_system_error
addi r3,r1,STACK_FRAME_OVERHEAD
bl cbe_system_error_exception
- b interrupt_return
+ b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1200, 0x100)
@@ -2401,7 +2499,7 @@ EXC_COMMON_BEGIN(instruction_breakpoint_common)
GEN_COMMON instruction_breakpoint
addi r3,r1,STACK_FRAME_OVERHEAD
bl instruction_breakpoint_exception
- b interrupt_return
+ b interrupt_return_srr
EXC_REAL_NONE(0x1400, 0x100)
@@ -2509,6 +2607,8 @@ BEGIN_FTR_SECTION
ld r10,PACA_EXGEN+EX_CFAR(r13)
mtspr SPRN_CFAR,r10
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ li r10,0
+ stb r10,PACAHSRR_VALID(r13)
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
ld r12,PACA_EXGEN+EX_R12(r13)
@@ -2521,7 +2621,7 @@ EXC_COMMON_BEGIN(denorm_exception_common)
GEN_COMMON denorm_exception
addi r3,r1,STACK_FRAME_OVERHEAD
bl unknown_exception
- b interrupt_return
+ b interrupt_return_hsrr
#ifdef CONFIG_CBE_RAS
@@ -2538,7 +2638,7 @@ EXC_COMMON_BEGIN(cbe_maintenance_common)
GEN_COMMON cbe_maintenance
addi r3,r1,STACK_FRAME_OVERHEAD
bl cbe_maintenance_exception
- b interrupt_return
+ b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1600, 0x100)
@@ -2568,7 +2668,7 @@ EXC_COMMON_BEGIN(altivec_assist_common)
#else
bl unknown_exception
#endif
- b interrupt_return
+ b interrupt_return_srr
#ifdef CONFIG_CBE_RAS
@@ -2585,7 +2685,7 @@ EXC_COMMON_BEGIN(cbe_thermal_common)
GEN_COMMON cbe_thermal
addi r3,r1,STACK_FRAME_OVERHEAD
bl cbe_thermal_exception
- b interrupt_return
+ b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1800, 0x100)
@@ -2610,7 +2710,6 @@ INT_DEFINE_END(soft_nmi)
* and run it entirely with interrupts hard disabled.
*/
EXC_COMMON_BEGIN(soft_nmi_common)
- mfspr r11,SPRN_SRR0
mr r10,r1
ld r1,PACAEMERGSP(r13)
subi r1,r1,INT_FRAME_SIZE
@@ -2624,6 +2723,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
mtmsrd r9,1
kuap_kernel_restore r9, r10
+
EXCEPTION_RESTORE_REGS hsrr=0
RFI_TO_KERNEL
@@ -2645,19 +2745,24 @@ masked_Hinterrupt:
.else
masked_interrupt:
.endif
- lbz r11,PACAIRQHAPPENED(r13)
- or r11,r11,r10
- stb r11,PACAIRQHAPPENED(r13)
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ lbz r9,PACAIRQHAPPENED(r13)
+ or r9,r9,r10
+ stb r9,PACAIRQHAPPENED(r13)
+
+ .if ! \hsrr
cmpwi r10,PACA_IRQ_DEC
bne 1f
- lis r10,0x7fff
- ori r10,r10,0xffff
- mtspr SPRN_DEC,r10
+ LOAD_REG_IMMEDIATE(r9, 0x7fffffff)
+ mtspr SPRN_DEC,r9
#ifdef CONFIG_PPC_WATCHDOG
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
b soft_nmi_common
#else
b 2f
#endif
+ .endif
+
1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
beq 2f
xori r12,r12,MSR_EE /* clear MSR_EE */
@@ -2666,11 +2771,29 @@ masked_interrupt:
.else
mtspr SPRN_SRR1,r12
.endif
- ori r11,r11,PACA_IRQ_HARD_DIS
- stb r11,PACAIRQHAPPENED(r13)
+ ori r9,r9,PACA_IRQ_HARD_DIS
+ stb r9,PACAIRQHAPPENED(r13)
2: /* done */
- ld r10,PACA_EXGEN+EX_CTR(r13)
- mtctr r10
+ li r9,0
+ .if \hsrr
+ stb r9,PACAHSRR_VALID(r13)
+ .else
+ stb r9,PACASRR_VALID(r13)
+ .endif
+
+ SEARCH_RESTART_TABLE
+ cmpdi r12,0
+ beq 3f
+ .if \hsrr
+ mtspr SPRN_HSRR0,r12
+ .else
+ mtspr SPRN_SRR0,r12
+ .endif
+3:
+
+ ld r9,PACA_EXGEN+EX_CTR(r13)
+ mtctr r9
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
mtcrf 0x80,r9
std r1,PACAR1(r13)
ld r9,PACA_EXGEN+EX_R9(r13)
@@ -2881,7 +3004,7 @@ MASKED_INTERRUPT hsrr=1
USE_FIXED_SECTION(virt_trampolines)
/*
- * All code below __end_interrupts is treated as soft-masked. If
+ * All code below __end_soft_masked is treated as soft-masked. If
* any code runs here with MSR[EE]=1, it must then cope with pending
* soft interrupt being raised (i.e., by ensuring it is replayed).
*
diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c
index c9e2819b095a..c7022c41cc31 100644
--- a/arch/powerpc/kernel/firmware.c
+++ b/arch/powerpc/kernel/firmware.c
@@ -23,18 +23,20 @@ EXPORT_SYMBOL_GPL(powerpc_firmware_features);
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST)
DEFINE_STATIC_KEY_FALSE(kvm_guest);
-bool check_kvm_guest(void)
+int __init check_kvm_guest(void)
{
struct device_node *hyper_node;
hyper_node = of_find_node_by_path("/hypervisor");
if (!hyper_node)
- return false;
+ return 0;
if (!of_device_is_compatible(hyper_node, "linux,kvm"))
- return false;
+ return 0;
static_branch_enable(&kvm_guest);
- return true;
+
+ return 0;
}
+core_initcall(check_kvm_guest); // before kvm_guest_init()
#endif
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 2c57ece6671c..6010adcee16e 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -103,6 +103,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
ori r12,r12,MSR_FP
or r12,r12,r4
std r12,_MSR(r1)
+#ifdef CONFIG_PPC_BOOK3S_64
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+#endif
#endif
li r4,1
stb r4,THREAD_LOAD_FP(r5)
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index a8221ddcbd66..6b1ec9e3541b 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -142,42 +142,23 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
.macro SYSCALL_ENTRY trapno
mfspr r9, SPRN_SRR1
- mfspr r10, SPRN_SRR0
+ mfspr r12, SPRN_SRR0
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL) /* can take exceptions */
- lis r12, 1f@h
- ori r12, r12, 1f@l
+ lis r10, 1f@h
+ ori r10, r10, 1f@l
mtspr SPRN_SRR1, r11
- mtspr SPRN_SRR0, r12
- mfspr r12,SPRN_SPRG_THREAD
+ mtspr SPRN_SRR0, r10
+ mfspr r10,SPRN_SPRG_THREAD
mr r11, r1
- lwz r1,TASK_STACK-THREAD(r12)
- tovirt(r12, r12)
+ lwz r1,TASK_STACK-THREAD(r10)
+ tovirt(r10, r10)
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
rfi
1:
- stw r11,GPR1(r1)
- stw r11,0(r1)
- mr r11, r1
- stw r10,_NIP(r11)
- mflr r10
- stw r10, _LINK(r11)
- mfcr r10
- rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
- stw r10,_CCR(r11) /* save registers */
-#ifdef CONFIG_40x
- rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
-#endif
- lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
- stw r2,GPR2(r11)
- addi r10,r10,STACK_FRAME_REGS_MARKER@l
- stw r9,_MSR(r11)
- li r2, \trapno
- stw r10,8(r11)
- stw r2,_TRAP(r11)
- SAVE_GPR(0, r11)
- SAVE_4GPRS(3, r11)
- SAVE_2GPRS(7, r11)
- addi r2,r12,-THREAD
+ stw r12,_NIP(r1)
+ mfcr r12
+ rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
+ stw r12,_CCR(r1)
b transfer_to_syscall /* jump to handler */
.endm
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index e1360b88b6cb..7d72ee5ab387 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -701,39 +701,3 @@ _GLOBAL(abort)
mfspr r13,SPRN_DBCR0
oris r13,r13,DBCR0_RST_SYSTEM@h
mtspr SPRN_DBCR0,r13
-
-_GLOBAL(set_context)
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@ha
- stw r4, abatron_pteptrs@l + 0x4(r5)
-#endif
- sync
- mtspr SPRN_PID,r3
- isync /* Need an isync to flush shadow */
- /* TLBs after changing PID */
- blr
-
-/* We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
- .data
- .align 12
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space 4096
-EXPORT_SYMBOL(empty_zero_page)
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/* Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 5c106ac36626..ddc978a2d381 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -532,6 +532,10 @@ finish_tlb_load_44x:
andi. r10,r12,_PAGE_USER /* User page ? */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
+#ifdef CONFIG_PPC_KUEP
+0: rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */
+ patch_site 0b, patch__tlb_44x_kuep
+#endif
1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
/* Done...restore registers and get out of here.
@@ -743,6 +747,10 @@ finish_tlb_load_47x:
andi. r10,r12,_PAGE_USER /* User page ? */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
+#ifdef CONFIG_PPC_KUEP
+0: rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */
+ patch_site 0b, patch__tlb_47x_kuep
+#endif
1: tlbwe r11,r13,2
/* Done...restore registers and get out of here.
@@ -780,20 +788,6 @@ _GLOBAL(__fixup_440A_mcheck)
sync
blr
-_GLOBAL(set_context)
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@h
- ori r5, r5, abatron_pteptrs@l
- stw r4, 0x4(r5)
-#endif
- mtspr SPRN_PID,r3
- isync /* Force context change */
- blr
-
/*
* Init CPU state. This is called at boot time or for secondary CPUs
* to setup initial TLB entries, setup IVORs, etc...
@@ -1239,34 +1233,8 @@ head_start_common:
isync
blr
-/*
- * We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
- .data
- .align PAGE_SHIFT
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
-
-/*
- * To support >32-bit physical addresses, we use an 8KB pgdir.
- */
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/*
- * Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
-
#ifdef CONFIG_SMP
+ .data
.align 12
temp_boot_stack:
.space 1024
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index ece7f97bafff..79930b0bc781 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -194,8 +194,9 @@ CLOSE_FIXED_SECTION(first_256B)
/* This value is used to mark exception frames on the stack. */
.section ".toc","aw"
+/* This value is used to mark exception frames on the stack. */
exception_marker:
- .tc ID_72656773_68657265[TC],0x7265677368657265
+ .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
.previous
/*
@@ -211,6 +212,8 @@ OPEN_TEXT_SECTION(0x100)
USE_TEXT_SECTION()
+#include "interrupt_64.S"
+
#ifdef CONFIG_PPC_BOOK3E
/*
* The booting_thread_hwid holds the thread id we want to boot in cpu
@@ -997,23 +1000,3 @@ start_here_common:
0: trap
EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
.previous
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the bss, which is page-aligned.
- */
- .section ".bss"
-/*
- * pgd dir should be aligned to PGD_TABLE_SIZE which is 64K.
- * We will need to find a better way to fix this
- */
- .align 16
-
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
- .globl empty_zero_page
-empty_zero_page:
- .space PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 7d445e4342c0..9bdb95f5694f 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -786,28 +786,3 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_SRR1, r10
mtspr SPRN_SRR0, r11
rfi
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the data segment,
- * which is page-aligned.
- */
- .data
- .globl sdata
-sdata:
- .globl empty_zero_page
- .align PAGE_SHIFT
-empty_zero_page:
- .space PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
-
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/* Room for two PTE table pointers, usually the kernel and current user
- * pointer to their respective root page table (pgdir).
- */
- .globl abatron_pteptrs
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 065178f19a3d..68e5c0a7e99d 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -300,7 +300,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
EXCEPTION_PROLOG_1
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
prepare_transfer_to_handler
- lwz r5, _DSISR(r11)
+ lwz r5, _DSISR(r1)
andis. r0, r5, DSISR_DABRMATCH@h
bne- 1f
bl do_page_fault
@@ -518,8 +518,6 @@ BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
mtspr SPRN_RPA,r1
- mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
- mtcrf 0x80,r2
BEGIN_MMU_FTR_SECTION
li r0,1
mfspr r1,SPRN_SPRG_603_LRU
@@ -531,9 +529,15 @@ BEGIN_MMU_FTR_SECTION
mfspr r2,SPRN_SRR1
rlwimi r2,r0,31-14,14,14
mtspr SPRN_SRR1,r2
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
+ mtcrf 0x80,r2
+ tlbld r3
+ rfi
+MMU_FTR_SECTION_ELSE
+ mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
+ mtcrf 0x80,r2
tlbld r3
rfi
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
DataAddressInvalid:
mfspr r3,SPRN_SRR1
rlwinm r1,r3,9,6,6 /* Get load/store bit */
@@ -607,9 +611,15 @@ BEGIN_MMU_FTR_SECTION
mfspr r2,SPRN_SRR1
rlwimi r2,r0,31-14,14,14
mtspr SPRN_SRR1,r2
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
+ mtcrf 0x80,r2
+ tlbld r3
+ rfi
+MMU_FTR_SECTION_ELSE
+ mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
+ mtcrf 0x80,r2
tlbld r3
rfi
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
#ifndef CONFIG_ALTIVEC
#define altivec_assist_exception unknown_exception
@@ -756,9 +766,6 @@ PerformanceMonitor:
* the kernel image to physical address PHYSICAL_START.
*/
relocate_kernel:
- addis r9,r26,klimit@ha /* fetch klimit */
- lwz r25,klimit@l(r9)
- addis r25,r25,-KERNELBASE@h
lis r3,PHYSICAL_START@h /* Destination base address */
li r6,0 /* Destination offset */
li r5,0x4000 /* # bytes of memory to copy */
@@ -766,7 +773,8 @@ relocate_kernel:
addi r0,r3,4f@l /* jump to the address of 4f */
mtctr r0 /* in copy and do the rest. */
bctr /* jump to the copy */
-4: mr r5,r25
+4: lis r5,_end-KERNELBASE@h
+ ori r5,r5,_end-KERNELBASE@l
bl copy_and_flush /* copy the rest */
b turn_on_mmu
@@ -924,12 +932,6 @@ _GLOBAL(load_segment_registers)
li r0, NUM_USER_SEGMENTS /* load up user segment register values */
mtctr r0 /* for context 0 */
li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
-#ifdef CONFIG_PPC_KUEP
- oris r3, r3, SR_NX@h /* Set Nx */
-#endif
-#ifdef CONFIG_PPC_KUAP
- oris r3, r3, SR_KS@h /* Set Ks */
-#endif
li r4, 0
3: mtsrin r3, r4
addi r3, r3, 0x111 /* increment VSID */
@@ -1024,58 +1026,6 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
rfi
/*
- * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
- *
- * Set up the segment registers for a new context.
- */
-_ENTRY(switch_mmu_context)
- lwz r3,MMCONTEXTID(r4)
- cmpwi cr0,r3,0
- blt- 4f
- mulli r3,r3,897 /* multiply context by skew factor */
- rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
-#ifdef CONFIG_PPC_KUEP
- oris r3, r3, SR_NX@h /* Set Nx */
-#endif
-#ifdef CONFIG_PPC_KUAP
- oris r3, r3, SR_KS@h /* Set Ks */
-#endif
- li r0,NUM_USER_SEGMENTS
- mtctr r0
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is passed as second argument.
- */
- lwz r4, MM_PGD(r4)
- lis r5, abatron_pteptrs@ha
- stw r4, abatron_pteptrs@l + 0x4(r5)
-#endif
-BEGIN_MMU_FTR_SECTION
-#ifndef CONFIG_BDI_SWITCH
- lwz r4, MM_PGD(r4)
-#endif
- tophys(r4, r4)
- rlwinm r4, r4, 4, 0xffff01ff
- mtspr SPRN_SDR1, r4
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
- li r4,0
- isync
-3:
- mtsrin r3,r4
- addi r3,r3,0x111 /* next VSID */
- rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
- addis r4,r4,0x1000 /* address of next segment */
- bdnz 3b
- sync
- isync
- blr
-4: trap
- EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
- blr
-EXPORT_SYMBOL(switch_mmu_context)
-
-/*
* An undocumented "feature" of 604e requires that the v bit
* be cleared before changing BAT values.
*
@@ -1256,61 +1206,4 @@ setup_usbgecko_bat:
blr
#endif
-#ifdef CONFIG_8260
-/* Jump into the system reset for the rom.
- * We first disable the MMU, and then jump to the ROM reset address.
- *
- * r3 is the board info structure, r4 is the location for starting.
- * I use this for building a small kernel that can load other kernels,
- * rather than trying to write or rely on a rom monitor that can tftp load.
- */
- .globl m8260_gorom
-m8260_gorom:
- mfmsr r0
- rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
- sync
- mtmsr r0
- sync
- mfspr r11, SPRN_HID0
- lis r10, 0
- ori r10,r10,HID0_ICE|HID0_DCE
- andc r11, r11, r10
- mtspr SPRN_HID0, r11
- isync
- li r5, MSR_ME|MSR_RI
- lis r6,2f@h
- addis r6,r6,-KERNELBASE@h
- ori r6,r6,2f@l
- mtspr SPRN_SRR0,r6
- mtspr SPRN_SRR1,r5
- isync
- sync
- rfi
-2:
- mtlr r4
- blr
-#endif
-
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the data segment,
- * which is page-aligned.
- */
.data
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space 4096
-EXPORT_SYMBOL(empty_zero_page)
-
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/* Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index f82470091697..e5503420b6c6 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -128,37 +128,20 @@ BEGIN_FTR_SECTION
mr r12, r13
lwz r13, THREAD_NORMSAVE(2)(r10)
FTR_SECTION_ELSE
-#endif
mfcr r12
-#ifdef CONFIG_KVM_BOOKE_HV
ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
+#else
+ mfcr r12
#endif
mfspr r9, SPRN_SRR1
BOOKE_CLEAR_BTB(r11)
- lwz r11, TASK_STACK - THREAD(r10)
+ mr r11, r1
+ lwz r1, TASK_STACK - THREAD(r10)
rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
- ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE)
- stw r12, _CCR(r11) /* save various registers */
- mflr r12
- stw r12,_LINK(r11)
+ ALLOC_STACK_FRAME(r1, THREAD_SIZE - INT_FRAME_SIZE)
+ stw r12, _CCR(r1)
mfspr r12,SPRN_SRR0
- stw r1, GPR1(r11)
- stw r1, 0(r11)
- mr r1, r11
- stw r12,_NIP(r11)
- rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
- lis r12, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
- stw r2,GPR2(r11)
- addi r12, r12, STACK_FRAME_REGS_MARKER@l
- stw r9,_MSR(r11)
- li r2, \trapno
- stw r12, 8(r11)
- stw r2,_TRAP(r11)
- SAVE_GPR(0, r11)
- SAVE_4GPRS(3, r11)
- SAVE_2GPRS(7, r11)
-
- addi r2,r10,-THREAD
+ stw r12,_NIP(r1)
b transfer_to_syscall /* jump to handler */
.endm
@@ -185,20 +168,18 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
/* only on e500mc */
#define DBG_STACK_BASE dbgirq_ctx
-#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)
-
#ifdef CONFIG_SMP
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
mfspr r8,SPRN_PIR; \
slwi r8,r8,2; \
addis r8,r8,level##_STACK_BASE@ha; \
lwz r8,level##_STACK_BASE@l(r8); \
- addi r8,r8,EXC_LVL_FRAME_OVERHEAD;
+ addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
#else
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
lis r8,level##_STACK_BASE@ha; \
lwz r8,level##_STACK_BASE@l(r8); \
- addi r8,r8,EXC_LVL_FRAME_OVERHEAD;
+ addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
#endif
/*
@@ -225,7 +206,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
mtmsr r11; \
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
- addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
+ addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE; /* allocate stack frame */\
beq 1f; \
/* COMING FROM USER MODE */ \
stw r9,_CCR(r11); /* save CR */\
@@ -533,24 +514,5 @@ label:
bl kernel_fp_unavailable_exception; \
b interrupt_return
-#else /* __ASSEMBLY__ */
-struct exception_regs {
- unsigned long mas0;
- unsigned long mas1;
- unsigned long mas2;
- unsigned long mas3;
- unsigned long mas6;
- unsigned long mas7;
- unsigned long srr0;
- unsigned long srr1;
- unsigned long csrr0;
- unsigned long csrr1;
- unsigned long dsrr0;
- unsigned long dsrr1;
-};
-
-/* ensure this structure is always sized to a multiple of the stack alignment */
-#define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16)
-
#endif /* __ASSEMBLY__ */
#endif /* __HEAD_BOOKE_H__ */
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index a1a5c3f10dc4..0f9642f36b49 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -985,20 +985,6 @@ _GLOBAL(abort)
mtspr SPRN_DBCR0,r13
isync
-_GLOBAL(set_context)
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@h
- ori r5, r5, abatron_pteptrs@l
- stw r4, 0x4(r5)
-#endif
- mtspr SPRN_PID,r3
- isync /* Force context change */
- blr
-
#ifdef CONFIG_SMP
/* When we get here, r24 needs to hold the CPU # */
.globl __secondary_start
@@ -1226,26 +1212,3 @@ _GLOBAL(restore_to_as0)
*/
3: mr r3,r5
bl _start
-
-/*
- * We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
- .data
- .align 12
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space 4096
-EXPORT_SYMBOL(empty_zero_page)
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/*
- * Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 8fc7a14e4d71..21a638aff72f 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -486,7 +486,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
return;
reset:
- regs->msr &= ~MSR_SE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_SE);
for (i = 0; i < nr_wp_slots(); i++) {
info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
__set_breakpoint(i, info);
@@ -537,7 +537,7 @@ static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
current->thread.last_hit_ubp[i] = bp[i];
info[i] = NULL;
}
- regs->msr |= MSR_SE;
+ regs_set_return_msr(regs, regs->msr | MSR_SE);
return false;
}
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index e0938ba298f2..21bbd615ca41 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -3,6 +3,7 @@
#include <linux/context_tracking.h>
#include <linux/err.h>
#include <linux/compat.h>
+#include <linux/sched/debug.h> /* for show_regs */
#include <asm/asm-prototypes.h>
#include <asm/kup.h>
@@ -26,6 +27,53 @@ unsigned long global_dbcr0[NR_CPUS];
typedef long (*syscall_fn)(long, long, long, long, long, long);
+#ifdef CONFIG_PPC_BOOK3S_64
+DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
+static inline bool exit_must_hard_disable(void)
+{
+ return static_branch_unlikely(&interrupt_exit_not_reentrant);
+}
+#else
+static inline bool exit_must_hard_disable(void)
+{
+ return true;
+}
+#endif
+
+/*
+ * local irqs must be disabled. Returns false if the caller must re-enable
+ * them, check for new work, and try again.
+ *
+ * This should be called with local irqs disabled, but if they were previously
+ * enabled when the interrupt handler returns (indicating a process-context /
+ * synchronous interrupt) then irqs_enabled should be true.
+ *
+ * restartable is true then EE/RI can be left on because interrupts are handled
+ * with a restart sequence.
+ */
+static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
+{
+ /* This must be done with RI=1 because tracing may touch vmaps */
+ trace_hardirqs_on();
+
+ if (exit_must_hard_disable() || !restartable)
+ __hard_EE_RI_disable();
+
+#ifdef CONFIG_PPC64
+ /* This pattern matches prep_irq_for_idle */
+ if (unlikely(lazy_irq_pending_nocheck())) {
+ if (exit_must_hard_disable() || !restartable) {
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ __hard_RI_enable();
+ }
+ trace_hardirqs_off();
+
+ return false;
+ }
+#endif
+ return true;
+}
+
/* Has to run notrace because it is entered not completely "reconciled" */
notrace long system_call_exception(long r3, long r4, long r5,
long r6, long r7, long r8,
@@ -144,71 +192,6 @@ notrace long system_call_exception(long r3, long r4, long r5,
return f(r3, r4, r5, r6, r7, r8);
}
-/*
- * local irqs must be disabled. Returns false if the caller must re-enable
- * them, check for new work, and try again.
- *
- * This should be called with local irqs disabled, but if they were previously
- * enabled when the interrupt handler returns (indicating a process-context /
- * synchronous interrupt) then irqs_enabled should be true.
- */
-static notrace __always_inline bool __prep_irq_for_enabled_exit(bool clear_ri)
-{
- /* This must be done with RI=1 because tracing may touch vmaps */
- trace_hardirqs_on();
-
- /* This pattern matches prep_irq_for_idle */
- if (clear_ri)
- __hard_EE_RI_disable();
- else
- __hard_irq_disable();
-#ifdef CONFIG_PPC64
- if (unlikely(lazy_irq_pending_nocheck())) {
- /* Took an interrupt, may have more exit work to do. */
- if (clear_ri)
- __hard_RI_enable();
- trace_hardirqs_off();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-
- return false;
- }
- local_paca->irq_happened = 0;
- irq_soft_mask_set(IRQS_ENABLED);
-#endif
- return true;
-}
-
-static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri, bool irqs_enabled)
-{
- if (__prep_irq_for_enabled_exit(clear_ri))
- return true;
-
- /*
- * Must replay pending soft-masked interrupts now. Don't just
- * local_irq_enabe(); local_irq_disable(); because if we are
- * returning from an asynchronous interrupt here, another one
- * might hit after irqs are enabled, and it would exit via this
- * same path allowing another to fire, and so on unbounded.
- *
- * If interrupts were enabled when this interrupt exited,
- * indicating a process context (synchronous) interrupt,
- * local_irq_enable/disable can be used, which will enable
- * interrupts rather than keeping them masked (unclear how
- * much benefit this is over just replaying for all cases,
- * because we immediately disable again, so all we're really
- * doing is allowing hard interrupts to execute directly for
- * a very small time, rather than being masked and replayed).
- */
- if (irqs_enabled) {
- local_irq_enable();
- local_irq_disable();
- } else {
- replay_soft_interrupts();
- }
-
- return false;
-}
-
static notrace void booke_load_dbcr0(void)
{
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -231,57 +214,92 @@ static notrace void booke_load_dbcr0(void)
#endif
}
-/*
- * This should be called after a syscall returns, with r3 the return value
- * from the syscall. If this function returns non-zero, the system call
- * exit assembly should additionally load all GPR registers and CTR and XER
- * from the interrupt frame.
- *
- * The function graph tracer can not trace the return side of this function,
- * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
- */
-notrace unsigned long syscall_exit_prepare(unsigned long r3,
- struct pt_regs *regs,
- long scv)
+static void check_return_regs_valid(struct pt_regs *regs)
{
- unsigned long ti_flags;
- unsigned long ret = 0;
- bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
+#ifdef CONFIG_PPC_BOOK3S_64
+ unsigned long trap, srr0, srr1;
+ static bool warned;
+ u8 *validp;
+ char *h;
- CT_WARN_ON(ct_state() == CONTEXT_USER);
+ if (trap_is_scv(regs))
+ return;
- kuap_assert_locked();
+ trap = regs->trap;
+ // EE in HV mode sets HSRRs like 0xea0
+ if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)
+ trap = 0xea0;
+
+ switch (trap) {
+ case 0x980:
+ case INTERRUPT_H_DATA_STORAGE:
+ case 0xe20:
+ case 0xe40:
+ case INTERRUPT_HMI:
+ case 0xe80:
+ case 0xea0:
+ case INTERRUPT_H_FAC_UNAVAIL:
+ case 0x1200:
+ case 0x1500:
+ case 0x1600:
+ case 0x1800:
+ validp = &local_paca->hsrr_valid;
+ if (!*validp)
+ return;
+
+ srr0 = mfspr(SPRN_HSRR0);
+ srr1 = mfspr(SPRN_HSRR1);
+ h = "H";
+
+ break;
+ default:
+ validp = &local_paca->srr_valid;
+ if (!*validp)
+ return;
+
+ srr0 = mfspr(SPRN_SRR0);
+ srr1 = mfspr(SPRN_SRR1);
+ h = "";
+ break;
+ }
- regs->result = r3;
+ if (srr0 == regs->nip && srr1 == regs->msr)
+ return;
- /* Check whether the syscall is issued inside a restartable sequence */
- rseq_syscall(regs);
+ /*
+ * A NMI / soft-NMI interrupt may have come in after we found
+ * srr_valid and before the SRRs are loaded. The interrupt then
+ * comes in and clobbers SRRs and clears srr_valid. Then we load
+ * the SRRs here and test them above and find they don't match.
+ *
+ * Test validity again after that, to catch such false positives.
+ *
+ * This test in general will have some window for false negatives
+ * and may not catch and fix all such cases if an NMI comes in
+ * later and clobbers SRRs without clearing srr_valid, but hopefully
+ * such things will get caught most of the time, statistically
+ * enough to be able to get a warning out.
+ */
+ barrier();
- ti_flags = current_thread_info()->flags;
+ if (!*validp)
+ return;
- if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
- if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
- r3 = -r3;
- regs->ccr |= 0x10000000; /* Set SO bit in CR */
- }
+ if (!warned) {
+ warned = true;
+ printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip);
+ printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr);
+ show_regs(regs);
}
- if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
- if (ti_flags & _TIF_RESTOREALL)
- ret = _TIF_RESTOREALL;
- else
- regs->gpr[3] = r3;
- clear_bits(_TIF_PERSYSCALL_MASK, &current_thread_info()->flags);
- } else {
- regs->gpr[3] = r3;
- }
-
- if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
- do_syscall_trace_leave(regs);
- ret |= _TIF_RESTOREALL;
- }
+ *validp = 0; /* fixup */
+#endif
+}
- local_irq_disable();
+static notrace unsigned long
+interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
+{
+ unsigned long ti_flags;
again:
ti_flags = READ_ONCE(current_thread_info()->flags);
@@ -303,7 +321,7 @@ again:
ti_flags = READ_ONCE(current_thread_info()->flags);
}
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
unlikely((ti_flags & _TIF_RESTORE_TM))) {
restore_tm_state(regs);
@@ -327,10 +345,10 @@ again:
}
}
- user_enter_irqoff();
+ check_return_regs_valid(regs);
- /* scv need not set RI=0 because SRRs are not used */
- if (unlikely(!__prep_irq_for_enabled_exit(is_not_scv))) {
+ user_enter_irqoff();
+ if (!prep_irq_for_enabled_exit(true)) {
user_exit_irqoff();
local_irq_enable();
local_irq_disable();
@@ -352,90 +370,131 @@ again:
return ret;
}
-notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
+/*
+ * This should be called after a syscall returns, with r3 the return value
+ * from the syscall. If this function returns non-zero, the system call
+ * exit assembly should additionally load all GPR registers and CTR and XER
+ * from the interrupt frame.
+ *
+ * The function graph tracer can not trace the return side of this function,
+ * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
+ */
+notrace unsigned long syscall_exit_prepare(unsigned long r3,
+ struct pt_regs *regs,
+ long scv)
{
unsigned long ti_flags;
- unsigned long flags;
unsigned long ret = 0;
+ bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
- if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
- BUG_ON(!(regs->msr & MSR_RI));
- BUG_ON(!(regs->msr & MSR_PR));
- BUG_ON(arch_irq_disabled_regs(regs));
CT_WARN_ON(ct_state() == CONTEXT_USER);
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * AMR can only have been unlocked if we interrupted the kernel.
- */
kuap_assert_locked();
- local_irq_save(flags);
-
-again:
- ti_flags = READ_ONCE(current_thread_info()->flags);
- while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
- local_irq_enable(); /* returning to user: may enable */
- if (ti_flags & _TIF_NEED_RESCHED) {
- schedule();
- } else {
- if (ti_flags & _TIF_SIGPENDING)
- ret |= _TIF_RESTOREALL;
- do_notify_resume(regs, ti_flags);
- }
- local_irq_disable();
- ti_flags = READ_ONCE(current_thread_info()->flags);
- }
+ regs->result = r3;
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
- if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
- unlikely((ti_flags & _TIF_RESTORE_TM))) {
- restore_tm_state(regs);
- } else {
- unsigned long mathflags = MSR_FP;
+ /* Check whether the syscall is issued inside a restartable sequence */
+ rseq_syscall(regs);
- if (cpu_has_feature(CPU_FTR_VSX))
- mathflags |= MSR_VEC | MSR_VSX;
- else if (cpu_has_feature(CPU_FTR_ALTIVEC))
- mathflags |= MSR_VEC;
+ ti_flags = current_thread_info()->flags;
- /* See above restore_math comment */
- if ((regs->msr & mathflags) != mathflags)
- restore_math(regs);
+ if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
+ if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
+ r3 = -r3;
+ regs->ccr |= 0x10000000; /* Set SO bit in CR */
}
}
- user_enter_irqoff();
+ if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
+ if (ti_flags & _TIF_RESTOREALL)
+ ret = _TIF_RESTOREALL;
+ else
+ regs->gpr[3] = r3;
+ clear_bits(_TIF_PERSYSCALL_MASK, &current_thread_info()->flags);
+ } else {
+ regs->gpr[3] = r3;
+ }
- if (unlikely(!__prep_irq_for_enabled_exit(true))) {
- user_exit_irqoff();
- local_irq_enable();
- local_irq_disable();
- goto again;
+ if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
+ do_syscall_trace_leave(regs);
+ ret |= _TIF_RESTOREALL;
}
- booke_load_dbcr0();
+ local_irq_disable();
+ ret = interrupt_exit_user_prepare_main(ret, regs);
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- local_paca->tm_scratch = regs->msr;
+#ifdef CONFIG_PPC64
+ regs->exit_result = ret;
#endif
- account_cpu_user_exit();
+ return ret;
+}
- /* Restore user access locks last */
- kuap_user_restore(regs);
- kuep_unlock();
+#ifdef CONFIG_PPC64
+notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs)
+{
+ /*
+ * This is called when detecting a soft-pending interrupt as well as
+ * an alternate-return interrupt. So we can't just have the alternate
+ * return path clear SRR1[MSR] and set PACA_IRQ_HARD_DIS (unless
+ * the soft-pending case were to fix things up as well). RI might be
+ * disabled, in which case it gets re-enabled by __hard_irq_disable().
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ set_kuap(AMR_KUAP_BLOCKED);
+#endif
+
+ trace_hardirqs_off();
+ user_exit_irqoff();
+ account_cpu_user_entry();
+
+ BUG_ON(!user_mode(regs));
+
+ regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs);
+
+ return regs->exit_result;
+}
+#endif
+
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
+{
+ unsigned long ret;
+
+ if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
+ BUG_ON(!(regs->msr & MSR_RI));
+ BUG_ON(!(regs->msr & MSR_PR));
+ BUG_ON(arch_irq_disabled_regs(regs));
+ CT_WARN_ON(ct_state() == CONTEXT_USER);
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * AMR can only have been unlocked if we interrupted the kernel.
+ */
+ kuap_assert_locked();
+
+ local_irq_disable();
+
+ ret = interrupt_exit_user_prepare_main(0, regs);
+
+#ifdef CONFIG_PPC64
+ regs->exit_result = ret;
+#endif
return ret;
}
void preempt_schedule_irq(void);
-notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
{
unsigned long flags;
unsigned long ret = 0;
unsigned long kuap;
+ bool stack_store = current_thread_info()->flags &
+ _TIF_EMULATE_STACK_STORE;
if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) &&
unlikely(!(regs->msr & MSR_RI)))
@@ -450,11 +509,6 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
kuap = kuap_get_and_assert_locked();
- if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) {
- clear_bits(_TIF_EMULATE_STACK_STORE, &current_thread_info()->flags);
- ret = 1;
- }
-
local_irq_save(flags);
if (!arch_irq_disabled_regs(regs)) {
@@ -469,17 +523,58 @@ again:
}
}
- if (unlikely(!prep_irq_for_enabled_exit(true, !irqs_disabled_flags(flags))))
+ check_return_regs_valid(regs);
+
+ /*
+ * Stack store exit can't be restarted because the interrupt
+ * stack frame might have been clobbered.
+ */
+ if (!prep_irq_for_enabled_exit(unlikely(stack_store))) {
+ /*
+ * Replay pending soft-masked interrupts now. Don't
+ * just local_irq_enabe(); local_irq_disable(); because
+ * if we are returning from an asynchronous interrupt
+ * here, another one might hit after irqs are enabled,
+ * and it would exit via this same path allowing
+ * another to fire, and so on unbounded.
+ */
+ hard_irq_disable();
+ replay_soft_interrupts();
+ /* Took an interrupt, may have more exit work to do. */
goto again;
- } else {
- /* Returning to a kernel context with local irqs disabled. */
- __hard_EE_RI_disable();
+ }
#ifdef CONFIG_PPC64
+ /*
+ * An interrupt may clear MSR[EE] and set this concurrently,
+ * but it will be marked pending and the exit will be retried.
+ * This leaves a racy window where MSR[EE]=0 and HARD_DIS is
+ * clear, until interrupt_exit_kernel_restart() calls
+ * hard_irq_disable(), which will set HARD_DIS again.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+
+ } else {
+ check_return_regs_valid(regs);
+
+ if (unlikely(stack_store))
+ __hard_EE_RI_disable();
+ /*
+ * Returning to a kernel context with local irqs disabled.
+ * Here, if EE was enabled in the interrupted context, enable
+ * it on return as well. A problem exists here where a soft
+ * masked interrupt may have cleared MSR[EE] and set HARD_DIS
+ * here, and it will still exist on return to the caller. This
+ * will be resolved by the masked interrupt firing again.
+ */
if (regs->msr & MSR_EE)
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
-#endif
+#endif /* CONFIG_PPC64 */
}
+ if (unlikely(stack_store)) {
+ clear_bits(_TIF_EMULATE_STACK_STORE, &current_thread_info()->flags);
+ ret = 1;
+ }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
local_paca->tm_scratch = regs->msr;
@@ -494,3 +589,46 @@ again:
return ret;
}
+
+#ifdef CONFIG_PPC64
+notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs)
+{
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ set_kuap(AMR_KUAP_BLOCKED);
+#endif
+
+ trace_hardirqs_off();
+ user_exit_irqoff();
+ account_cpu_user_entry();
+
+ BUG_ON(!user_mode(regs));
+
+ regs->exit_result |= interrupt_exit_user_prepare(regs);
+
+ return regs->exit_result;
+}
+
+/*
+ * No real need to return a value here because the stack store case does not
+ * get restarted.
+ */
+notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs)
+{
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ set_kuap(AMR_KUAP_BLOCKED);
+#endif
+
+ if (regs->softe == IRQS_ENABLED)
+ trace_hardirqs_off();
+
+ BUG_ON(user_mode(regs));
+
+ return interrupt_exit_kernel_prepare(regs);
+}
+#endif
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
new file mode 100644
index 000000000000..d4212d2ff0b5
--- /dev/null
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -0,0 +1,774 @@
+#include <asm/asm-offsets.h>
+#include <asm/bug.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/exception-64s.h>
+#else
+#include <asm/exception-64e.h>
+#endif
+#include <asm/feature-fixups.h>
+#include <asm/head-64.h>
+#include <asm/hw_irq.h>
+#include <asm/kup.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/ptrace.h>
+#include <asm/tm.h>
+
+ .section ".toc","aw"
+SYS_CALL_TABLE:
+ .tc sys_call_table[TC],sys_call_table
+
+#ifdef CONFIG_COMPAT
+COMPAT_SYS_CALL_TABLE:
+ .tc compat_sys_call_table[TC],compat_sys_call_table
+#endif
+ .previous
+
+ .align 7
+
+.macro DEBUG_SRR_VALID srr
+#ifdef CONFIG_PPC_RFI_SRR_DEBUG
+ .ifc \srr,srr
+ mfspr r11,SPRN_SRR0
+ ld r12,_NIP(r1)
+100: tdne r11,r12
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ mfspr r11,SPRN_SRR1
+ ld r12,_MSR(r1)
+100: tdne r11,r12
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ .else
+ mfspr r11,SPRN_HSRR0
+ ld r12,_NIP(r1)
+100: tdne r11,r12
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ mfspr r11,SPRN_HSRR1
+ ld r12,_MSR(r1)
+100: tdne r11,r12
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ .endif
+#endif
+.endm
+
+#ifdef CONFIG_PPC_BOOK3S
+.macro system_call_vectored name trapnr
+ .globl system_call_vectored_\name
+system_call_vectored_\name:
+_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
+ bne tabort_syscall
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+ SCV_INTERRUPT_TO_KERNEL
+ mr r10,r1
+ ld r1,PACAKSAVE(r13)
+ std r10,0(r1)
+ std r11,_NIP(r1)
+ std r12,_MSR(r1)
+ std r0,GPR0(r1)
+ std r10,GPR1(r1)
+ std r2,GPR2(r1)
+ ld r2,PACATOC(r13)
+ mfcr r12
+ li r11,0
+ /* Can we avoid saving r3-r8 in common case? */
+ std r3,GPR3(r1)
+ std r4,GPR4(r1)
+ std r5,GPR5(r1)
+ std r6,GPR6(r1)
+ std r7,GPR7(r1)
+ std r8,GPR8(r1)
+ /* Zero r9-r12, this should only be required when restoring all GPRs */
+ std r11,GPR9(r1)
+ std r11,GPR10(r1)
+ std r11,GPR11(r1)
+ std r11,GPR12(r1)
+ std r9,GPR13(r1)
+ SAVE_NVGPRS(r1)
+ std r11,_XER(r1)
+ std r11,_LINK(r1)
+ std r11,_CTR(r1)
+
+ li r11,\trapnr
+ std r11,_TRAP(r1)
+ std r12,_CCR(r1)
+ addi r10,r1,STACK_FRAME_OVERHEAD
+ ld r11,exception_marker@toc(r2)
+ std r11,-16(r10) /* "regshere" marker */
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ /*
+ * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
+ * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
+ * and interrupts may be masked and pending already.
+ * system_call_exception() will call trace_hardirqs_off() which means
+ * interrupts could already have been blocked before trace_hardirqs_off,
+ * but this is the best we can do.
+ */
+
+ /* Calling convention has r9 = orig r0, r10 = regs */
+ mr r9,r0
+ bl system_call_exception
+
+.Lsyscall_vectored_\name\()_exit:
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r5,1 /* scv */
+ bl syscall_exit_prepare
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+.Lsyscall_vectored_\name\()_rst_start:
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- syscall_vectored_\name\()_restart
+ li r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+
+ ld r2,_CCR(r1)
+ ld r4,_NIP(r1)
+ ld r5,_MSR(r1)
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM_LOW
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ cmpdi r3,0
+ bne .Lsyscall_vectored_\name\()_restore_regs
+
+ /* rfscv returns with LR->NIA and CTR->MSR */
+ mtlr r4
+ mtctr r5
+
+ /* Could zero these as per ABI, but we may consider a stricter ABI
+ * which preserves these if libc implementations can benefit, so
+ * restore them for now until further measurement is done. */
+ ld r0,GPR0(r1)
+ ld r4,GPR4(r1)
+ ld r5,GPR5(r1)
+ ld r6,GPR6(r1)
+ ld r7,GPR7(r1)
+ ld r8,GPR8(r1)
+ /* Zero volatile regs that may contain sensitive kernel data */
+ li r9,0
+ li r10,0
+ li r11,0
+ li r12,0
+ mtspr SPRN_XER,r0
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
+ mtcr r2
+ ld r2,GPR2(r1)
+ ld r3,GPR3(r1)
+ ld r13,GPR13(r1)
+ ld r1,GPR1(r1)
+ RFSCV_TO_USER
+ b . /* prevent speculative execution */
+
+.Lsyscall_vectored_\name\()_restore_regs:
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r5
+
+ ld r3,_CTR(r1)
+ ld r4,_LINK(r1)
+ ld r5,_XER(r1)
+
+ REST_NVGPRS(r1)
+ ld r0,GPR0(r1)
+ mtcr r2
+ mtctr r3
+ mtlr r4
+ mtspr SPRN_XER,r5
+ REST_10GPRS(2, r1)
+ REST_2GPRS(12, r1)
+ ld r1,GPR1(r1)
+ RFI_TO_USER
+.Lsyscall_vectored_\name\()_rst_end:
+
+syscall_vectored_\name\()_restart:
+_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ ld r2,PACATOC(r13)
+ ld r3,RESULT(r1)
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl syscall_exit_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Lsyscall_vectored_\name\()_rst_start
+1:
+
+SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
+RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
+
+.endm
+
+system_call_vectored common 0x3000
+
+/*
+ * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
+ * which is tested by system_call_exception when r0 is -1 (as set by vector
+ * entry code).
+ */
+system_call_vectored sigill 0x7ff0
+
+
+/*
+ * Entered via kernel return set up by kernel/sstep.c, must match entry regs
+ */
+ .globl system_call_vectored_emulate
+system_call_vectored_emulate:
+_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
+ li r10,IRQS_ALL_DISABLED
+ stb r10,PACAIRQSOFTMASK(r13)
+ b system_call_vectored_common
+#endif /* CONFIG_PPC_BOOK3S */
+
+ .balign IFETCH_ALIGN_BYTES
+ .globl system_call_common_real
+system_call_common_real:
+_ASM_NOKPROBE_SYMBOL(system_call_common_real)
+ ld r10,PACAKMSR(r13) /* get MSR value for kernel */
+ mtmsrd r10
+
+ .balign IFETCH_ALIGN_BYTES
+ .globl system_call_common
+system_call_common:
+_ASM_NOKPROBE_SYMBOL(system_call_common)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
+ bne tabort_syscall
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+ mr r10,r1
+ ld r1,PACAKSAVE(r13)
+ std r10,0(r1)
+ std r11,_NIP(r1)
+ std r12,_MSR(r1)
+ std r0,GPR0(r1)
+ std r10,GPR1(r1)
+ std r2,GPR2(r1)
+#ifdef CONFIG_PPC_FSL_BOOK3E
+START_BTB_FLUSH_SECTION
+ BTB_FLUSH(r10)
+END_BTB_FLUSH_SECTION
+#endif
+ ld r2,PACATOC(r13)
+ mfcr r12
+ li r11,0
+ /* Can we avoid saving r3-r8 in common case? */
+ std r3,GPR3(r1)
+ std r4,GPR4(r1)
+ std r5,GPR5(r1)
+ std r6,GPR6(r1)
+ std r7,GPR7(r1)
+ std r8,GPR8(r1)
+ /* Zero r9-r12, this should only be required when restoring all GPRs */
+ std r11,GPR9(r1)
+ std r11,GPR10(r1)
+ std r11,GPR11(r1)
+ std r11,GPR12(r1)
+ std r9,GPR13(r1)
+ SAVE_NVGPRS(r1)
+ std r11,_XER(r1)
+ std r11,_CTR(r1)
+ mflr r10
+
+ /*
+ * This clears CR0.SO (bit 28), which is the error indication on
+ * return from this system call.
+ */
+ rldimi r12,r11,28,(63-28)
+ li r11,0xc00
+ std r10,_LINK(r1)
+ std r11,_TRAP(r1)
+ std r12,_CCR(r1)
+ addi r10,r1,STACK_FRAME_OVERHEAD
+ ld r11,exception_marker@toc(r2)
+ std r11,-16(r10) /* "regshere" marker */
+
+#ifdef CONFIG_PPC_BOOK3S
+ li r11,1
+ stb r11,PACASRR_VALID(r13)
+#endif
+
+ /*
+ * We always enter kernel from userspace with irq soft-mask enabled and
+ * nothing pending. system_call_exception() will call
+ * trace_hardirqs_off().
+ */
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+#ifdef CONFIG_PPC_BOOK3S
+ li r12,-1 /* Set MSR_EE and MSR_RI */
+ mtmsrd r12,1
+#else
+ wrteei 1
+#endif
+
+ /* Calling convention has r9 = orig r0, r10 = regs */
+ mr r9,r0
+ bl system_call_exception
+
+.Lsyscall_exit:
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r5,0 /* !scv */
+ bl syscall_exit_prepare
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+#ifdef CONFIG_PPC_BOOK3S
+.Lsyscall_rst_start:
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- syscall_restart
+#endif
+ li r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+
+ ld r2,_CCR(r1)
+ ld r6,_LINK(r1)
+ mtlr r6
+
+#ifdef CONFIG_PPC_BOOK3S
+ lbz r4,PACASRR_VALID(r13)
+ cmpdi r4,0
+ bne 1f
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+#endif
+ ld r4,_NIP(r1)
+ ld r5,_MSR(r1)
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r5
+1:
+ DEBUG_SRR_VALID srr
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ cmpdi r3,0
+ bne .Lsyscall_restore_regs
+ /* Zero volatile regs that may contain sensitive kernel data */
+ li r0,0
+ li r4,0
+ li r5,0
+ li r6,0
+ li r7,0
+ li r8,0
+ li r9,0
+ li r10,0
+ li r11,0
+ li r12,0
+ mtctr r0
+ mtspr SPRN_XER,r0
+.Lsyscall_restore_regs_cont:
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM_LOW
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
+ mtcr r2
+ ld r2,GPR2(r1)
+ ld r3,GPR3(r1)
+ ld r13,GPR13(r1)
+ ld r1,GPR1(r1)
+ RFI_TO_USER
+ b . /* prevent speculative execution */
+
+.Lsyscall_restore_regs:
+ ld r3,_CTR(r1)
+ ld r4,_XER(r1)
+ REST_NVGPRS(r1)
+ mtctr r3
+ mtspr SPRN_XER,r4
+ ld r0,GPR0(r1)
+ REST_8GPRS(4, r1)
+ ld r12,GPR12(r1)
+ b .Lsyscall_restore_regs_cont
+.Lsyscall_rst_end:
+
+#ifdef CONFIG_PPC_BOOK3S
+syscall_restart:
+_ASM_NOKPROBE_SYMBOL(syscall_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ ld r2,PACATOC(r13)
+ ld r3,RESULT(r1)
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl syscall_exit_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Lsyscall_rst_start
+1:
+
+SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
+RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
+#endif
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+tabort_syscall:
+_ASM_NOKPROBE_SYMBOL(tabort_syscall)
+ /* Firstly we need to enable TM in the kernel */
+ mfmsr r10
+ li r9, 1
+ rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r10, 0
+
+ /* tabort, this dooms the transaction, nothing else */
+ li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
+ TABORT(R9)
+
+ /*
+ * Return directly to userspace. We have corrupted user register state,
+ * but userspace will never see that register state. Execution will
+ * resume after the tbegin of the aborted transaction with the
+ * checkpointed register state.
+ */
+ li r9, MSR_RI
+ andc r10, r10, r9
+ mtmsrd r10, 1
+ mtspr SPRN_SRR0, r11
+ mtspr SPRN_SRR1, r12
+ RFI_TO_USER
+ b . /* prevent speculative execution */
+#endif
+
+ /*
+ * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
+ * touched, no exit work created, then this can be used.
+ */
+ .balign IFETCH_ALIGN_BYTES
+ .globl fast_interrupt_return_srr
+fast_interrupt_return_srr:
+_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
+ kuap_check_amr r3, r4
+ ld r5,_MSR(r1)
+ andi. r0,r5,MSR_PR
+#ifdef CONFIG_PPC_BOOK3S
+ beq 1f
+ kuap_user_restore r3, r4
+ b .Lfast_user_interrupt_return_srr
+1: kuap_kernel_restore r3, r4
+ andi. r0,r5,MSR_RI
+ li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
+ bne+ .Lfast_kernel_interrupt_return_srr
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl unrecoverable_exception
+ b . /* should not get here */
+#else
+ bne .Lfast_user_interrupt_return_srr
+ b .Lfast_kernel_interrupt_return_srr
+#endif
+
+.macro interrupt_return_macro srr
+ .balign IFETCH_ALIGN_BYTES
+ .globl interrupt_return_\srr
+interrupt_return_\srr\():
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
+ ld r4,_MSR(r1)
+ andi. r0,r4,MSR_PR
+ beq interrupt_return_\srr\()_kernel
+interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl interrupt_exit_user_prepare
+ cmpdi r3,0
+ bne- .Lrestore_nvgprs_\srr
+.Lrestore_nvgprs_\srr\()_cont:
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+#ifdef CONFIG_PPC_BOOK3S
+.Linterrupt_return_\srr\()_user_rst_start:
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- interrupt_return_\srr\()_user_restart
+#endif
+ li r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+
+.Lfast_user_interrupt_return_\srr\():
+#ifdef CONFIG_PPC_BOOK3S
+ .ifc \srr,srr
+ lbz r4,PACASRR_VALID(r13)
+ .else
+ lbz r4,PACAHSRR_VALID(r13)
+ .endif
+ cmpdi r4,0
+ li r4,0
+ bne 1f
+#endif
+ ld r11,_NIP(r1)
+ ld r12,_MSR(r1)
+ .ifc \srr,srr
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACASRR_VALID(r13)
+#endif
+ .else
+ mtspr SPRN_HSRR0,r11
+ mtspr SPRN_HSRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACAHSRR_VALID(r13)
+#endif
+ .endif
+ DEBUG_SRR_VALID \srr
+
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ lbz r4,PACAIRQSOFTMASK(r13)
+ tdnei r4,IRQS_ENABLED
+#endif
+
+BEGIN_FTR_SECTION
+ ld r10,_PPR(r1)
+ mtspr SPRN_PPR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ ld r3,_CCR(r1)
+ ld r4,_LINK(r1)
+ ld r5,_CTR(r1)
+ ld r6,_XER(r1)
+ li r0,0
+
+ REST_4GPRS(7, r1)
+ REST_2GPRS(11, r1)
+ REST_GPR(13, r1)
+
+ mtcr r3
+ mtlr r4
+ mtctr r5
+ mtspr SPRN_XER,r6
+
+ REST_4GPRS(2, r1)
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ .ifc \srr,srr
+ RFI_TO_USER
+ .else
+ HRFI_TO_USER
+ .endif
+ b . /* prevent speculative execution */
+.Linterrupt_return_\srr\()_user_rst_end:
+
+.Lrestore_nvgprs_\srr\():
+ REST_NVGPRS(r1)
+ b .Lrestore_nvgprs_\srr\()_cont
+
+#ifdef CONFIG_PPC_BOOK3S
+interrupt_return_\srr\()_user_restart:
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ ld r2,PACATOC(r13)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl interrupt_exit_user_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Linterrupt_return_\srr\()_user_rst_start
+1:
+
+SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
+RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
+#endif
+
+ .balign IFETCH_ALIGN_BYTES
+interrupt_return_\srr\()_kernel:
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl interrupt_exit_kernel_prepare
+
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+.Linterrupt_return_\srr\()_kernel_rst_start:
+ ld r11,SOFTE(r1)
+ cmpwi r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bne 1f
+#ifdef CONFIG_PPC_BOOK3S
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- interrupt_return_\srr\()_kernel_restart
+#endif
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+1:
+
+.Lfast_kernel_interrupt_return_\srr\():
+ cmpdi cr1,r3,0
+#ifdef CONFIG_PPC_BOOK3S
+ .ifc \srr,srr
+ lbz r4,PACASRR_VALID(r13)
+ .else
+ lbz r4,PACAHSRR_VALID(r13)
+ .endif
+ cmpdi r4,0
+ li r4,0
+ bne 1f
+#endif
+ ld r11,_NIP(r1)
+ ld r12,_MSR(r1)
+ .ifc \srr,srr
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACASRR_VALID(r13)
+#endif
+ .else
+ mtspr SPRN_HSRR0,r11
+ mtspr SPRN_HSRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACAHSRR_VALID(r13)
+#endif
+ .endif
+ DEBUG_SRR_VALID \srr
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ ld r3,_LINK(r1)
+ ld r4,_CTR(r1)
+ ld r5,_XER(r1)
+ ld r6,_CCR(r1)
+ li r0,0
+
+ REST_4GPRS(7, r1)
+ REST_2GPRS(11, r1)
+
+ mtlr r3
+ mtctr r4
+ mtspr SPRN_XER,r5
+
+ /*
+ * Leaving a stale exception_marker on the stack can confuse
+ * the reliable stack unwinder later on. Clear it.
+ */
+ std r0,STACK_FRAME_OVERHEAD-16(r1)
+
+ REST_4GPRS(2, r1)
+
+ bne- cr1,1f /* emulate stack store */
+ mtcr r6
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ .ifc \srr,srr
+ RFI_TO_KERNEL
+ .else
+ HRFI_TO_KERNEL
+ .endif
+ b . /* prevent speculative execution */
+
+1: /*
+ * Emulate stack store with update. New r1 value was already calculated
+ * and updated in our interrupt regs by emulate_loadstore, but we can't
+ * store the previous value of r1 to the stack before re-loading our
+ * registers from it, otherwise they could be clobbered. Use
+ * PACA_EXGEN as temporary storage to hold the store data, as
+ * interrupts are disabled here so it won't be clobbered.
+ */
+ mtcr r6
+ std r9,PACA_EXGEN+0(r13)
+ addi r9,r1,INT_FRAME_SIZE /* get original r1 */
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ std r9,0(r1) /* perform store component of stdu */
+ ld r9,PACA_EXGEN+0(r13)
+
+ .ifc \srr,srr
+ RFI_TO_KERNEL
+ .else
+ HRFI_TO_KERNEL
+ .endif
+ b . /* prevent speculative execution */
+.Linterrupt_return_\srr\()_kernel_rst_end:
+
+#ifdef CONFIG_PPC_BOOK3S
+interrupt_return_\srr\()_kernel_restart:
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ ld r2,PACATOC(r13)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl interrupt_exit_kernel_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Linterrupt_return_\srr\()_kernel_rst_start
+1:
+
+SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
+RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
+#endif
+
+.endm
+
+interrupt_return_macro srr
+#ifdef CONFIG_PPC_BOOK3S
+interrupt_return_macro hsrr
+
+ .globl __end_soft_masked
+__end_soft_masked:
+DEFINE_FIXED_SYMBOL(__end_soft_masked)
+#endif /* CONFIG_PPC_BOOK3S */
+
+#ifdef CONFIG_PPC_BOOK3S
+_GLOBAL(ret_from_fork_scv)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ li r3,0 /* fork() return value */
+ b .Lsyscall_vectored_common_exit
+#endif
+
+_GLOBAL(ret_from_fork)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ li r3,0 /* fork() return value */
+ b .Lsyscall_exit
+
+_GLOBAL(ret_from_kernel_thread)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ mtctr r14
+ mr r3,r15
+#ifdef PPC64_ELF_ABI_v2
+ mr r12,r14
+#endif
+ bctrl
+ li r3,0
+ b .Lsyscall_exit
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 72cb45393ef2..551b653228c4 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -121,6 +121,7 @@ void replay_soft_interrupts(void)
ppc_save_regs(&regs);
regs.softe = IRQS_ENABLED;
+ regs.msr |= MSR_EE;
again:
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
@@ -217,6 +218,100 @@ static inline void replay_soft_interrupts_irqrestore(void)
#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
#endif
+#ifdef CONFIG_CC_HAS_ASM_GOTO
+notrace void arch_local_irq_restore(unsigned long mask)
+{
+ unsigned char irq_happened;
+
+ /* Write the new soft-enabled value if it is a disable */
+ if (mask) {
+ irq_soft_mask_set(mask);
+ return;
+ }
+
+ /*
+ * After the stb, interrupts are unmasked and there are no interrupts
+ * pending replay. The restart sequence makes this atomic with
+ * respect to soft-masked interrupts. If this was just a simple code
+ * sequence, a soft-masked interrupt could become pending right after
+ * the comparison and before the stb.
+ *
+ * This allows interrupts to be unmasked without hard disabling, and
+ * also without new hard interrupts coming in ahead of pending ones.
+ */
+ asm_volatile_goto(
+"1: \n"
+" lbz 9,%0(13) \n"
+" cmpwi 9,0 \n"
+" bne %l[happened] \n"
+" stb 9,%1(13) \n"
+"2: \n"
+ RESTART_TABLE(1b, 2b, 1b)
+ : : "i" (offsetof(struct paca_struct, irq_happened)),
+ "i" (offsetof(struct paca_struct, irq_soft_mask))
+ : "cr0", "r9"
+ : happened);
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
+
+ return;
+
+happened:
+ irq_happened = get_irq_happened();
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!irq_happened);
+
+ if (irq_happened == PACA_IRQ_HARD_DIS) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+ irq_soft_mask_set(IRQS_ENABLED);
+ local_paca->irq_happened = 0;
+ __hard_irq_enable();
+ return;
+ }
+
+ /* Have interrupts to replay, need to hard disable first */
+ if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (!(mfmsr() & MSR_EE)) {
+ /*
+ * An interrupt could have come in and cleared
+ * MSR[EE] and set IRQ_HARD_DIS, so check
+ * IRQ_HARD_DIS again and warn if it is still
+ * clear.
+ */
+ irq_happened = get_irq_happened();
+ WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
+ }
+ }
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ } else {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (WARN_ON_ONCE(mfmsr() & MSR_EE))
+ __hard_irq_disable();
+ }
+ }
+
+ /*
+ * Disable preempt here, so that the below preempt_enable will
+ * perform resched if required (a replayed interrupt may set
+ * need_resched).
+ */
+ preempt_disable();
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ trace_hardirqs_off();
+
+ replay_soft_interrupts_irqrestore();
+ local_paca->irq_happened = 0;
+
+ trace_hardirqs_on();
+ irq_soft_mask_set(IRQS_ENABLED);
+ __hard_irq_enable();
+ preempt_enable();
+}
+#else
notrace void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
@@ -288,6 +383,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
__hard_irq_enable();
preempt_enable();
}
+#endif
EXPORT_SYMBOL(arch_local_irq_restore);
/*
@@ -654,7 +750,7 @@ void __do_irq(struct pt_regs *regs)
trace_irq_exit(regs);
}
-DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
+void __do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
void *cursp, *irqsp, *sirqsp;
@@ -678,6 +774,11 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
set_irq_regs(old_regs);
}
+DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
+{
+ __do_IRQ(regs);
+}
+
static void *__init alloc_vm_stack(void)
{
return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index ce87dc5ea23c..5277cf582c16 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -11,10 +11,10 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- struct ppc_inst *addr = (struct ppc_inst *)jump_entry_code(entry);
+ u32 *addr = (u32 *)jump_entry_code(entry);
if (type == JUMP_LABEL_JMP)
patch_branch(addr, jump_entry_target(entry), 0);
else
- patch_instruction(addr, ppc_inst(PPC_INST_NOP));
+ patch_instruction(addr, ppc_inst(PPC_RAW_NOP()));
}
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 7dd2ad3603ad..bdee7262c080 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -147,7 +147,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
return 0;
if (*(u32 *)regs->nip == BREAK_INSTR)
- regs->nip += BREAK_INSTR_SIZE;
+ regs_add_return_ip(regs, BREAK_INSTR_SIZE);
return 1;
}
@@ -372,7 +372,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
- regs->nip = pc;
+ regs_set_return_ip(regs, pc);
}
/*
@@ -394,7 +394,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
case 'c':
/* handle the optional parameter */
if (kgdb_hex2long(&ptr, &addr))
- linux_regs->nip = addr;
+ regs_set_return_ip(linux_regs, addr);
atomic_set(&kgdb_cpu_doing_single_step, -1);
/* set the trace bit if we're stepping */
@@ -402,9 +402,9 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
mtspr(SPRN_DBCR0,
mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
- linux_regs->msr |= MSR_DE;
+ regs_set_return_msr(linux_regs, linux_regs->msr | MSR_DE);
#else
- linux_regs->msr |= MSR_SE;
+ regs_set_return_msr(linux_regs, linux_regs->msr | MSR_SE);
#endif
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
@@ -417,11 +417,10 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
+ u32 instr, *addr = (u32 *)bpt->bpt_addr;
int err;
- unsigned int instr;
- struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr;
- err = get_kernel_nofault(instr, (unsigned *) addr);
+ err = get_kernel_nofault(instr, addr);
if (err)
return err;
@@ -429,7 +428,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
if (err)
return -EFAULT;
- *(unsigned int *)bpt->saved_instr = instr;
+ *(u32 *)bpt->saved_instr = instr;
return 0;
}
@@ -438,7 +437,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
unsigned int instr = *(unsigned int *)bpt->saved_instr;
- struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr;
+ u32 *addr = (u32 *)bpt->bpt_addr;
err = patch_instruction(addr, ppc_inst(instr));
if (err)
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 660138f6c4b2..7154d58338cc 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -39,7 +39,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
* On powerpc, NIP is *before* this instruction for the
* pre handler
*/
- regs->nip -= MCOUNT_INSN_SIZE;
+ regs_add_return_ip(regs, -MCOUNT_INSN_SIZE);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -48,7 +48,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
* Emulate singlestep (and also recover regs->nip)
* as if there is a nop
*/
- regs->nip += MCOUNT_INSN_SIZE;
+ regs_add_return_ip(regs, MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index c64a5feaebbe..7a7cd6bda53e 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -19,11 +19,13 @@
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
+#include <linux/moduleloader.h>
#include <asm/code-patching.h>
#include <asm/cacheflush.h>
#include <asm/sstep.h>
#include <asm/sections.h>
#include <asm/inst.h>
+#include <asm/set_memory.h>
#include <linux/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -103,28 +105,42 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
return addr;
}
+void *alloc_insn_page(void)
+{
+ void *page;
+
+ page = module_alloc(PAGE_SIZE);
+ if (!page)
+ return NULL;
+
+ if (strict_module_rwx_enabled()) {
+ set_memory_ro((unsigned long)page, 1);
+ set_memory_x((unsigned long)page, 1);
+ }
+ return page;
+}
+
int arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
struct kprobe *prev;
- struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
+ struct ppc_inst insn = ppc_inst_read(p->addr);
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
ret = -EINVAL;
- } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
- printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
+ } else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
+ printk("Cannot register a kprobe on mtmsr[d]/rfi[d]\n");
ret = -EINVAL;
} else if ((unsigned long)p->addr & ~PAGE_MASK &&
- ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
+ ppc_inst_prefixed(ppc_inst_read(p->addr - 1))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
preempt_disable();
prev = get_kprobe(p->addr - 1);
preempt_enable_no_resched();
- if (prev &&
- ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)prev->ainsn.insn))) {
+ if (prev && ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
@@ -138,7 +154,7 @@ int arch_prepare_kprobe(struct kprobe *p)
}
if (!ret) {
- patch_instruction((struct ppc_inst *)p->ainsn.insn, insn);
+ patch_instruction(p->ainsn.insn, insn);
p->opcode = ppc_inst_val(insn);
}
@@ -149,13 +165,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe);
void arch_arm_kprobe(struct kprobe *p)
{
- patch_instruction((struct ppc_inst *)p->addr, ppc_inst(BREAKPOINT_INSTRUCTION));
+ WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)));
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
void arch_disarm_kprobe(struct kprobe *p)
{
- patch_instruction((struct ppc_inst *)p->addr, ppc_inst(p->opcode));
+ WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(p->opcode)));
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
@@ -178,7 +194,7 @@ static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs
* variant as values in regs could play a part in
* if the trap is taken or not
*/
- regs->nip = (unsigned long)p->ainsn.insn;
+ regs_set_return_ip(regs, (unsigned long)p->ainsn.insn);
}
static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
@@ -228,7 +244,7 @@ NOKPROBE_SYMBOL(arch_prepare_kretprobe);
static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
{
int ret;
- struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
+ struct ppc_inst insn = ppc_inst_read(p->ainsn.insn);
/* regs->nip is also adjusted if emulate_step returns 1 */
ret = emulate_step(regs, insn);
@@ -276,7 +292,8 @@ int kprobe_handler(struct pt_regs *regs)
if (user_mode(regs))
return 0;
- if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
+ if (!IS_ENABLED(CONFIG_BOOKE) &&
+ (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
return 0;
/*
@@ -319,8 +336,9 @@ int kprobe_handler(struct pt_regs *regs)
kprobe_opcode_t insn = *p->ainsn.insn;
if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) {
/* Turn off 'trace' bits */
- regs->msr &= ~MSR_SINGLESTEP;
- regs->msr |= kcb->kprobe_saved_msr;
+ regs_set_return_msr(regs,
+ (regs->msr & ~MSR_SINGLESTEP) |
+ kcb->kprobe_saved_msr);
goto no_kprobe;
}
@@ -415,7 +433,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* we end up emulating it in kprobe_handler(), which increments the nip
* again.
*/
- regs->nip = orig_ret_address - 4;
+ regs_set_return_ip(regs, orig_ret_address - 4);
regs->link = orig_ret_address;
return 0;
@@ -439,7 +457,7 @@ int kprobe_post_handler(struct pt_regs *regs)
if (!cur || user_mode(regs))
return 0;
- len = ppc_inst_len(ppc_inst_read((struct ppc_inst *)cur->ainsn.insn));
+ len = ppc_inst_len(ppc_inst_read(cur->ainsn.insn));
/* make sure we got here for instruction we have a kprobe on */
if (((unsigned long)cur->ainsn.insn + len) != regs->nip)
return 0;
@@ -450,8 +468,8 @@ int kprobe_post_handler(struct pt_regs *regs)
}
/* Adjust nip to after the single-stepped instruction */
- regs->nip = (unsigned long)cur->addr + len;
- regs->msr |= kcb->kprobe_saved_msr;
+ regs_set_return_ip(regs, (unsigned long)cur->addr + len);
+ regs_set_return_msr(regs, regs->msr | kcb->kprobe_saved_msr);
/*Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
@@ -490,9 +508,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* and allow the page fault handler to continue as a
* normal page fault.
*/
- regs->nip = (unsigned long)cur->addr;
- regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
- regs->msr |= kcb->kprobe_saved_msr;
+ regs_set_return_ip(regs, (unsigned long)cur->addr);
+ /* Turn off 'trace' bits */
+ regs_set_return_msr(regs,
+ (regs->msr & ~MSR_SINGLESTEP) |
+ kcb->kprobe_saved_msr);
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
@@ -506,7 +526,7 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* zero, try to fix up.
*/
if ((entry = search_exception_tables(regs->nip)) != NULL) {
- regs->nip = extable_fixup(entry);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 15e7b4900689..47a683cd00d2 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -274,7 +274,7 @@ void mce_common_process_ue(struct pt_regs *regs,
entry = search_kernel_exception_table(regs->nip);
if (entry) {
mce_err->ignore_event = true;
- regs->nip = extable_fixup(entry);
+ regs_set_return_ip(regs, extable_fixup(entry));
}
}
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 667104d4c455..c2f55fe7092d 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -463,7 +463,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
pfn = addr_to_pfn(regs, regs->nip);
if (pfn != ULONG_MAX) {
instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK);
- instr = ppc_inst_read((struct ppc_inst *)instr_addr);
+ instr = ppc_inst_read((u32 *)instr_addr);
if (!analyse_instr(&op, &tmp, instr)) {
pfn = addr_to_pfn(regs, op.ea);
*addr = op.ea;
@@ -481,12 +481,11 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
return -1;
}
-static int mce_handle_ierror(struct pt_regs *regs,
+static int mce_handle_ierror(struct pt_regs *regs, unsigned long srr1,
const struct mce_ierror_table table[],
struct mce_error_info *mce_err, uint64_t *addr,
uint64_t *phys_addr)
{
- uint64_t srr1 = regs->msr;
int handled = 0;
int i;
@@ -695,19 +694,19 @@ static long mce_handle_ue_error(struct pt_regs *regs,
}
static long mce_handle_error(struct pt_regs *regs,
+ unsigned long srr1,
const struct mce_derror_table dtable[],
const struct mce_ierror_table itable[])
{
struct mce_error_info mce_err = { 0 };
uint64_t addr, phys_addr = ULONG_MAX;
- uint64_t srr1 = regs->msr;
long handled;
if (SRR1_MC_LOADSTORE(srr1))
handled = mce_handle_derror(regs, dtable, &mce_err, &addr,
&phys_addr);
else
- handled = mce_handle_ierror(regs, itable, &mce_err, &addr,
+ handled = mce_handle_ierror(regs, srr1, itable, &mce_err, &addr,
&phys_addr);
if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
@@ -723,16 +722,20 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
/* P7 DD1 leaves top bits of DSISR undefined */
regs->dsisr &= 0x0000ffff;
- return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
+ return mce_handle_error(regs, regs->msr,
+ mce_p7_derror_table, mce_p7_ierror_table);
}
long __machine_check_early_realmode_p8(struct pt_regs *regs)
{
- return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
+ return mce_handle_error(regs, regs->msr,
+ mce_p8_derror_table, mce_p8_ierror_table);
}
long __machine_check_early_realmode_p9(struct pt_regs *regs)
{
+ unsigned long srr1 = regs->msr;
+
/*
* On POWER9 DD2.1 and below, it's possible to get a machine check
* caused by a paste instruction where only DSISR bit 25 is set. This
@@ -746,10 +749,39 @@ long __machine_check_early_realmode_p9(struct pt_regs *regs)
if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
return 1;
- return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
+ /*
+ * Async machine check due to bad real address from store or foreign
+ * link time out comes with the load/store bit (PPC bit 42) set in
+ * SRR1, but the cause comes in SRR1 not DSISR. Clear bit 42 so we're
+ * directed to the ierror table so it will find the cause (which
+ * describes it correctly as a store error).
+ */
+ if (SRR1_MC_LOADSTORE(srr1) &&
+ ((srr1 & 0x081c0000) == 0x08140000 ||
+ (srr1 & 0x081c0000) == 0x08180000)) {
+ srr1 &= ~PPC_BIT(42);
+ }
+
+ return mce_handle_error(regs, srr1,
+ mce_p9_derror_table, mce_p9_ierror_table);
}
long __machine_check_early_realmode_p10(struct pt_regs *regs)
{
- return mce_handle_error(regs, mce_p10_derror_table, mce_p10_ierror_table);
+ unsigned long srr1 = regs->msr;
+
+ /*
+ * Async machine check due to bad real address from store comes with
+ * the load/store bit (PPC bit 42) set in SRR1, but the cause comes in
+ * SRR1 not DSISR. Clear bit 42 so we're directed to the ierror table
+ * so it will find the cause (which describes it correctly as a store
+ * error).
+ */
+ if (SRR1_MC_LOADSTORE(srr1) &&
+ (srr1 & 0x081c0000) == 0x08140000) {
+ srr1 &= ~PPC_BIT(42);
+ }
+
+ return mce_handle_error(regs, srr1,
+ mce_p10_derror_table, mce_p10_ierror_table);
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 6a076bef2932..39ab15419592 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -388,9 +388,3 @@ _GLOBAL(start_secondary_resume)
bl start_secondary
b .
#endif /* CONFIG_SMP */
-
-/*
- * This routine is just here to keep GCC happy - sigh...
- */
-_GLOBAL(__main)
- blr
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 3f35c8d20be7..ed04a3ba66fe 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -92,12 +92,14 @@ int module_finalize(const Elf_Ehdr *hdr,
static __always_inline void *
__module_alloc(unsigned long size, unsigned long start, unsigned long end)
{
+ pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
+
/*
* Don't do huge page allocations for modules yet until more testing
* is done. STRICT_MODULE_RWX may require extra work to support this
* too.
*/
- return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, PAGE_KERNEL_EXEC,
+ return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, prot,
VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0));
}
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index c27b8687b82a..f417afc08d33 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -145,10 +145,9 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
{
- if (entry->jump[0] != (PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val)))
+ if (entry->jump[0] != PPC_RAW_LIS(_R12, PPC_HA(val)))
return 0;
- if (entry->jump[1] != (PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) |
- PPC_LO(val)))
+ if (entry->jump[1] != PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))
return 0;
return 1;
}
@@ -175,16 +174,10 @@ static uint32_t do_plt_call(void *location,
entry++;
}
- /*
- * lis r12, sym@ha
- * addi r12, r12, sym@l
- * mtctr r12
- * bctr
- */
- entry->jump[0] = PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val);
- entry->jump[1] = PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) | PPC_LO(val);
- entry->jump[2] = PPC_INST_MTCTR | __PPC_RS(R12);
- entry->jump[3] = PPC_INST_BCTR;
+ entry->jump[0] = PPC_RAW_LIS(_R12, PPC_HA(val));
+ entry->jump[1] = PPC_RAW_ADDI(_R12, _R12, PPC_LO(val));
+ entry->jump[2] = PPC_RAW_MTCTR(_R12);
+ entry->jump[3] = PPC_RAW_BCTR();
pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
return (uint32_t)entry;
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ae2b188365b1..6baa676e7cb6 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -122,27 +122,19 @@ struct ppc64_stub_entry
* the stub, but it's significantly shorter to put these values at the
* end of the stub code, and patch the stub address (32-bits relative
* to the TOC ptr, r2) into the stub.
- *
- * addis r11,r2, <high>
- * addi r11,r11, <low>
- * std r2,R2_STACK_OFFSET(r1)
- * ld r12,32(r11)
- * ld r2,40(r11)
- * mtctr r12
- * bctr
*/
static u32 ppc64_stub_insns[] = {
- PPC_INST_ADDIS | __PPC_RT(R11) | __PPC_RA(R2),
- PPC_INST_ADDI | __PPC_RT(R11) | __PPC_RA(R11),
+ PPC_RAW_ADDIS(_R11, _R2, 0),
+ PPC_RAW_ADDI(_R11, _R11, 0),
/* Save current r2 value in magic place on the stack. */
- PPC_INST_STD | __PPC_RS(R2) | __PPC_RA(R1) | R2_STACK_OFFSET,
- PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R11) | 32,
+ PPC_RAW_STD(_R2, _R1, R2_STACK_OFFSET),
+ PPC_RAW_LD(_R12, _R11, 32),
#ifdef PPC64_ELF_ABI_v1
/* Set up new r2 from function descriptor */
- PPC_INST_LD | __PPC_RT(R2) | __PPC_RA(R11) | 40,
+ PPC_RAW_LD(_R2, _R11, 40),
#endif
- PPC_INST_MTCTR | __PPC_RS(R12),
- PPC_INST_BCTR,
+ PPC_RAW_MTCTR(_R12),
+ PPC_RAW_BCTR(),
};
/* Count how many different 24-bit relocations (different symbol,
@@ -336,21 +328,12 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
#ifdef CONFIG_MPROFILE_KERNEL
-#define PACATOC offsetof(struct paca_struct, kernel_toc)
-
-/*
- * ld r12,PACATOC(r13)
- * addis r12,r12,<high>
- * addi r12,r12,<low>
- * mtctr r12
- * bctr
- */
static u32 stub_insns[] = {
- PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R13) | PACATOC,
- PPC_INST_ADDIS | __PPC_RT(R12) | __PPC_RA(R12),
- PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12),
- PPC_INST_MTCTR | __PPC_RS(R12),
- PPC_INST_BCTR,
+ PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
+ PPC_RAW_ADDIS(_R12, _R12, 0),
+ PPC_RAW_ADDI(_R12, _R12, 0),
+ PPC_RAW_MTCTR(_R12),
+ PPC_RAW_BCTR(),
};
/*
@@ -507,7 +490,7 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me)
if (!instr_is_relative_link_branch(ppc_inst(*prev_insn)))
return 1;
- if (*instruction != PPC_INST_NOP) {
+ if (*instruction != PPC_RAW_NOP()) {
pr_err("%s: Expected nop after call, got %08x at %pS\n",
me->name, *instruction, instruction);
return 0;
@@ -696,21 +679,17 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
* ld r2, ...(r12)
* add r2, r2, r12
*/
- if ((((uint32_t *)location)[0] & ~0xfffc) !=
- (PPC_INST_LD | __PPC_RT(R2) | __PPC_RA(R12)))
+ if ((((uint32_t *)location)[0] & ~0xfffc) != PPC_RAW_LD(_R2, _R12, 0))
break;
- if (((uint32_t *)location)[1] !=
- (PPC_INST_ADD | __PPC_RT(R2) | __PPC_RA(R2) | __PPC_RB(R12)))
+ if (((uint32_t *)location)[1] != PPC_RAW_ADD(_R2, _R2, _R12))
break;
/*
* If found, replace it with:
* addis r2, r12, (.TOC.-func)@ha
* addi r2, r2, (.TOC.-func)@l
*/
- ((uint32_t *)location)[0] = PPC_INST_ADDIS | __PPC_RT(R2) |
- __PPC_RA(R12) | PPC_HA(value);
- ((uint32_t *)location)[1] = PPC_INST_ADDI | __PPC_RT(R2) |
- __PPC_RA(R2) | PPC_LO(value);
+ ((uint32_t *)location)[0] = PPC_RAW_ADDIS(_R2, _R12, PPC_HA(value));
+ ((uint32_t *)location)[1] = PPC_RAW_ADDI(_R2, _R2, PPC_LO(value));
break;
case R_PPC64_REL16_HA:
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index cdf87086fa33..c79899abcec8 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -18,24 +18,16 @@
#include <asm/ppc-opcode.h>
#include <asm/inst.h>
-#define TMPL_CALL_HDLR_IDX \
- (optprobe_template_call_handler - optprobe_template_entry)
-#define TMPL_EMULATE_IDX \
- (optprobe_template_call_emulate - optprobe_template_entry)
-#define TMPL_RET_IDX \
- (optprobe_template_ret - optprobe_template_entry)
-#define TMPL_OP_IDX \
- (optprobe_template_op_address - optprobe_template_entry)
-#define TMPL_INSN_IDX \
- (optprobe_template_insn - optprobe_template_entry)
-#define TMPL_END_IDX \
- (optprobe_template_end - optprobe_template_entry)
-
-DEFINE_INSN_CACHE_OPS(ppc_optinsn);
+#define TMPL_CALL_HDLR_IDX (optprobe_template_call_handler - optprobe_template_entry)
+#define TMPL_EMULATE_IDX (optprobe_template_call_emulate - optprobe_template_entry)
+#define TMPL_RET_IDX (optprobe_template_ret - optprobe_template_entry)
+#define TMPL_OP_IDX (optprobe_template_op_address - optprobe_template_entry)
+#define TMPL_INSN_IDX (optprobe_template_insn - optprobe_template_entry)
+#define TMPL_END_IDX (optprobe_template_end - optprobe_template_entry)
static bool insn_page_in_use;
-static void *__ppc_alloc_insn_page(void)
+void *alloc_optinsn_page(void)
{
if (insn_page_in_use)
return NULL;
@@ -43,20 +35,11 @@ static void *__ppc_alloc_insn_page(void)
return &optinsn_slot;
}
-static void __ppc_free_insn_page(void *page __maybe_unused)
+void free_optinsn_page(void *page)
{
insn_page_in_use = false;
}
-struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
- .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
- .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
- /* insn_size initialized later */
- .alloc = __ppc_alloc_insn_page,
- .free = __ppc_free_insn_page,
- .nr_garbage = 0,
-};
-
/*
* Check if we can optimize this probe. Returns NIP post-emulation if this can
* be optimized and 0 otherwise.
@@ -66,6 +49,7 @@ static unsigned long can_optimize(struct kprobe *p)
struct pt_regs regs;
struct instruction_op op;
unsigned long nip = 0;
+ unsigned long addr = (unsigned long)p->addr;
/*
* kprobe placed for kretprobe during boot time
@@ -73,7 +57,7 @@ static unsigned long can_optimize(struct kprobe *p)
* So further checks can be skipped.
*/
if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
- return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
+ return addr + sizeof(kprobe_opcode_t);
/*
* We only support optimizing kernel addresses, but not
@@ -81,11 +65,11 @@ static unsigned long can_optimize(struct kprobe *p)
*
* FIXME: Optimize kprobes placed in module addresses.
*/
- if (!is_kernel_addr((unsigned long)p->addr))
+ if (!is_kernel_addr(addr))
return 0;
memset(&regs, 0, sizeof(struct pt_regs));
- regs.nip = (unsigned long)p->addr;
+ regs.nip = addr;
regs.trap = 0x0;
regs.msr = MSR_KERNEL;
@@ -100,9 +84,8 @@ static unsigned long can_optimize(struct kprobe *p)
* Ensure that the instruction is not a conditional branch,
* and that can be emulated.
*/
- if (!is_conditional_branch(ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) &&
- analyse_instr(&op, &regs,
- ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) == 1) {
+ if (!is_conditional_branch(ppc_inst_read(p->ainsn.insn)) &&
+ analyse_instr(&op, &regs, ppc_inst_read(p->ainsn.insn)) == 1) {
emulate_update_regs(&regs, &op);
nip = regs.nip;
}
@@ -123,7 +106,7 @@ static void optimized_callback(struct optimized_kprobe *op,
kprobes_inc_nmissed_count(&op->kp);
} else {
__this_cpu_write(current_kprobe, &op->kp);
- regs->nip = (unsigned long)op->kp.addr;
+ regs_set_return_ip(regs, (unsigned long)op->kp.addr);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
opt_pre_handler(&op->kp, regs);
__this_cpu_write(current_kprobe, NULL);
@@ -136,19 +119,15 @@ NOKPROBE_SYMBOL(optimized_callback);
void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
{
if (op->optinsn.insn) {
- free_ppc_optinsn_slot(op->optinsn.insn, 1);
+ free_optinsn_slot(op->optinsn.insn, 1);
op->optinsn.insn = NULL;
}
}
static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
{
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_RAW_LIS(reg, IMM_H(val))));
- addr++;
-
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_RAW_ORI(reg, reg, IMM_L(val))));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HI(val))));
+ patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
}
/*
@@ -157,34 +136,11 @@ static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *
*/
static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr)
{
- /* lis reg,(op)@highest */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ADDIS | ___PPC_RT(reg) |
- ((val >> 48) & 0xffff)));
- addr++;
-
- /* ori reg,reg,(op)@higher */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
- ___PPC_RS(reg) | ((val >> 32) & 0xffff)));
- addr++;
-
- /* rldicr reg,reg,32,31 */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_RLDICR | ___PPC_RA(reg) |
- ___PPC_RS(reg) | __PPC_SH64(32) | __PPC_ME64(31)));
- addr++;
-
- /* oris reg,reg,(op)@h */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ORIS | ___PPC_RA(reg) |
- ___PPC_RS(reg) | ((val >> 16) & 0xffff)));
- addr++;
-
- /* ori reg,reg,(op)@l */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
- ___PPC_RS(reg) | (val & 0xffff)));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HIGHEST(val))));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_HIGHER(val))));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_SLDI(reg, reg, 32)));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_ORIS(reg, reg, PPC_HI(val))));
+ patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
}
static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
@@ -198,19 +154,18 @@ static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *ad
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
{
struct ppc_inst branch_op_callback, branch_emulate_step, temp;
- kprobe_opcode_t *op_callback_addr, *emulate_step_addr, *buff;
+ unsigned long op_callback_addr, emulate_step_addr;
+ kprobe_opcode_t *buff;
long b_offset;
unsigned long nip, size;
int rc, i;
- kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
-
nip = can_optimize(p);
if (!nip)
return -EILSEQ;
/* Allocate instruction slot for detour buffer */
- buff = get_ppc_optinsn_slot();
+ buff = get_optinsn_slot();
if (!buff)
return -ENOMEM;
@@ -228,8 +183,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
goto error;
/* Check if the return address is also within 32MB range */
- b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
- (unsigned long)nip;
+ b_offset = (unsigned long)(buff + TMPL_RET_IDX) - nip;
if (!is_offset_in_branch_range(b_offset))
goto error;
@@ -238,8 +192,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
pr_devel("Copying template to %p, size %lu\n", buff, size);
for (i = 0; i < size; i++) {
- rc = patch_instruction((struct ppc_inst *)(buff + i),
- ppc_inst(*(optprobe_template_entry + i)));
+ rc = patch_instruction(buff + i, ppc_inst(*(optprobe_template_entry + i)));
if (rc < 0)
goto error;
}
@@ -253,51 +206,48 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
/*
* 2. branch to optimized_callback() and emulate_step()
*/
- op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
- emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
+ op_callback_addr = ppc_kallsyms_lookup_name("optimized_callback");
+ emulate_step_addr = ppc_kallsyms_lookup_name("emulate_step");
if (!op_callback_addr || !emulate_step_addr) {
WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
goto error;
}
- rc = create_branch(&branch_op_callback,
- (struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
- (unsigned long)op_callback_addr,
- BRANCH_SET_LINK);
+ rc = create_branch(&branch_op_callback, buff + TMPL_CALL_HDLR_IDX,
+ op_callback_addr, BRANCH_SET_LINK);
- rc |= create_branch(&branch_emulate_step,
- (struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
- (unsigned long)emulate_step_addr,
- BRANCH_SET_LINK);
+ rc |= create_branch(&branch_emulate_step, buff + TMPL_EMULATE_IDX,
+ emulate_step_addr, BRANCH_SET_LINK);
if (rc)
goto error;
- patch_instruction((struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
- branch_op_callback);
- patch_instruction((struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
- branch_emulate_step);
+ patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
+ patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
/*
* 3. load instruction to be emulated into relevant register, and
*/
- temp = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
- patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
+ if (IS_ENABLED(CONFIG_PPC64)) {
+ temp = ppc_inst_read(p->ainsn.insn);
+ patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
+ } else {
+ patch_imm_load_insns((unsigned long)p->ainsn.insn, 4, buff + TMPL_INSN_IDX);
+ }
/*
* 4. branch back from trampoline
*/
- patch_branch((struct ppc_inst *)(buff + TMPL_RET_IDX), (unsigned long)nip, 0);
+ patch_branch(buff + TMPL_RET_IDX, nip, 0);
- flush_icache_range((unsigned long)buff,
- (unsigned long)(&buff[TMPL_END_IDX]));
+ flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX]));
op->optinsn.insn = buff;
return 0;
error:
- free_ppc_optinsn_slot(buff, 0);
+ free_optinsn_slot(buff, 0);
return -ERANGE;
}
@@ -328,12 +278,9 @@ void arch_optimize_kprobes(struct list_head *oplist)
* Backup instructions which will be replaced
* by jump address
*/
- memcpy(op->optinsn.copied_insn, op->kp.addr,
- RELATIVEJUMP_SIZE);
- create_branch(&instr,
- (struct ppc_inst *)op->kp.addr,
- (unsigned long)op->optinsn.insn, 0);
- patch_instruction((struct ppc_inst *)op->kp.addr, instr);
+ memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE);
+ create_branch(&instr, op->kp.addr, (unsigned long)op->optinsn.insn, 0);
+ patch_instruction(op->kp.addr, instr);
list_del_init(&op->list);
}
}
@@ -343,8 +290,7 @@ void arch_unoptimize_kprobe(struct optimized_kprobe *op)
arch_arm_kprobe(&op->kp);
}
-void arch_unoptimize_kprobes(struct list_head *oplist,
- struct list_head *done_list)
+void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list)
{
struct optimized_kprobe *op;
struct optimized_kprobe *tmp;
@@ -355,8 +301,7 @@ void arch_unoptimize_kprobes(struct list_head *oplist,
}
}
-int arch_within_optimized_kprobe(struct optimized_kprobe *op,
- unsigned long addr)
+int arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
{
return ((unsigned long)op->kp.addr <= addr &&
(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 7f5aae3c387d..9bd30cac852b 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -346,10 +346,8 @@ void copy_mm_to_paca(struct mm_struct *mm)
#ifdef CONFIG_PPC_BOOK3S
mm_context_t *context = &mm->context;
- get_paca()->mm_ctx_id = context->id;
#ifdef CONFIG_PPC_MM_SLICES
VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
- get_paca()->mm_ctx_slb_addr_limit = mm_ctx_slb_addr_limit(context);
memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
LOW_SLICE_ARRAY_SZ);
memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8935c5696bce..185beb290580 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -96,7 +96,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
if (tsk == current && tsk->thread.regs &&
MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
!test_thread_flag(TIF_RESTORE_TM)) {
- tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
+ regs_set_return_msr(&tsk->thread.ckpt_regs,
+ tsk->thread.regs->msr);
set_thread_flag(TIF_RESTORE_TM);
}
}
@@ -161,7 +162,7 @@ static void __giveup_fpu(struct task_struct *tsk)
msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;
- tsk->thread.regs->msr = msr;
+ regs_set_return_msr(tsk->thread.regs, msr);
}
void giveup_fpu(struct task_struct *tsk)
@@ -244,7 +245,7 @@ static void __giveup_altivec(struct task_struct *tsk)
msr &= ~MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;
- tsk->thread.regs->msr = msr;
+ regs_set_return_msr(tsk->thread.regs, msr);
}
void giveup_altivec(struct task_struct *tsk)
@@ -559,7 +560,7 @@ void notrace restore_math(struct pt_regs *regs)
msr_check_and_clear(new_msr);
- regs->msr |= new_msr | fpexc_mode;
+ regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode);
}
}
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -1114,7 +1115,7 @@ void restore_tm_state(struct pt_regs *regs)
#endif
restore_math(regs);
- regs->msr |= msr_diff;
+ regs_set_return_msr(regs, regs->msr | msr_diff);
}
#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -1129,6 +1130,10 @@ static inline void save_sprs(struct thread_struct *t)
if (cpu_has_feature(CPU_FTR_ALTIVEC))
t->vrsave = mfspr(SPRN_VRSAVE);
#endif
+#ifdef CONFIG_SPE
+ if (cpu_has_feature(CPU_FTR_SPE))
+ t->spefscr = mfspr(SPRN_SPEFSCR);
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR))
t->dscr = mfspr(SPRN_DSCR);
@@ -1159,6 +1164,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
old_thread->vrsave != new_thread->vrsave)
mtspr(SPRN_VRSAVE, new_thread->vrsave);
#endif
+#ifdef CONFIG_SPE
+ if (cpu_has_feature(CPU_FTR_SPE) &&
+ old_thread->spefscr != new_thread->spefscr)
+ mtspr(SPRN_SPEFSCR, new_thread->spefscr);
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR)) {
u64 dscr = get_paca()->dscr_default;
@@ -1213,6 +1223,19 @@ struct task_struct *__switch_to(struct task_struct *prev,
__flush_tlb_pending(batch);
batch->active = 0;
}
+
+ /*
+ * On POWER9 the copy-paste buffer can only paste into
+ * foreign real addresses, so unprivileged processes can not
+ * see the data or use it in any way unless they have
+ * foreign real mappings. If the new process has the foreign
+ * real address mappings, we must issue a cp_abort to clear
+ * any state and prevent snooping, corruption or a covert
+ * channel. ISA v3.1 supports paste into local memory.
+ */
+ if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
+ atomic_read(&new->mm->context.vas_windows)))
+ asm volatile(PPC_CP_ABORT);
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -1248,43 +1271,48 @@ struct task_struct *__switch_to(struct task_struct *prev,
}
/*
- * Call restore_sprs() before calling _switch(). If we move it after
- * _switch() then we miss out on calling it for new tasks. The reason
- * for this is we manually create a stack frame for new tasks that
- * directly returns through ret_from_fork() or
+ * Call restore_sprs() and set_return_regs_changed() before calling
+ * _switch(). If we move it after _switch() then we miss out on calling
+ * it for new tasks. The reason for this is we manually create a stack
+ * frame for new tasks that directly returns through ret_from_fork() or
* ret_from_kernel_thread(). See copy_thread() for details.
*/
restore_sprs(old_thread, new_thread);
+ set_return_regs_changed(); /* _switch changes stack (and regs) */
+
#ifdef CONFIG_PPC32
kuap_assert_locked();
#endif
last = _switch(old_thread, new_thread);
+ /*
+ * Nothing after _switch will be run for newly created tasks,
+ * because they switch directly to ret_from_fork/ret_from_kernel_thread
+ * etc. Code added here should have a comment explaining why that is
+ * okay.
+ */
+
#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * This applies to a process that was context switched while inside
+ * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
+ * deactivated above, before _switch(). This will never be the case
+ * for new tasks.
+ */
if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
- if (current->thread.regs) {
+ /*
+ * Math facilities are masked out of the child MSR in copy_thread.
+ * A new task does not need to restore_math because it will
+ * demand fault them.
+ */
+ if (current->thread.regs)
restore_math(current->thread.regs);
-
- /*
- * On POWER9 the copy-paste buffer can only paste into
- * foreign real addresses, so unprivileged processes can not
- * see the data or use it in any way unless they have
- * foreign real mappings. If the new process has the foreign
- * real address mappings, we must issue a cp_abort to clear
- * any state and prevent snooping, corruption or a covert
- * channel. ISA v3.1 supports paste into local memory.
- */
- if (current->mm &&
- (cpu_has_feature(CPU_FTR_ARCH_31) ||
- atomic_read(&current->mm->context.vas_windows)))
- asm volatile(PPC_CP_ABORT);
- }
#endif /* CONFIG_PPC_BOOK3S_64 */
return last;
@@ -1736,6 +1764,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#ifdef CONFIG_ALTIVEC
p->thread.vr_save_area = NULL;
#endif
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+ p->thread.kuap = KUAP_NONE;
+#endif
setup_ksp_vsid(p, sp);
@@ -1838,13 +1869,14 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
}
regs->gpr[2] = toc;
}
- regs->nip = entry;
- regs->msr = MSR_USER64;
+ regs_set_return_ip(regs, entry);
+ regs_set_return_msr(regs, MSR_USER64);
} else {
- regs->nip = start;
regs->gpr[2] = 0;
- regs->msr = MSR_USER32;
+ regs_set_return_ip(regs, start);
+ regs_set_return_msr(regs, MSR_USER32);
}
+
#endif
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
@@ -1875,7 +1907,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.tm_tfiar = 0;
current->thread.load_tm = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-
}
EXPORT_SYMBOL(start_thread);
@@ -1923,9 +1954,10 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
if (val > PR_FP_EXC_PRECISE)
return -EINVAL;
tsk->thread.fpexc_mode = __pack_fe01(val);
- if (regs != NULL && (regs->msr & MSR_FP) != 0)
- regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
- | tsk->thread.fpexc_mode;
+ if (regs != NULL && (regs->msr & MSR_FP) != 0) {
+ regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1))
+ | tsk->thread.fpexc_mode);
+ }
return 0;
}
@@ -1971,9 +2003,9 @@ int set_endian(struct task_struct *tsk, unsigned int val)
return -EINVAL;
if (val == PR_ENDIAN_BIG)
- regs->msr &= ~MSR_LE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_LE);
else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
- regs->msr |= MSR_LE;
+ regs_set_return_msr(regs, regs->msr | MSR_LE);
else
return -EINVAL;
@@ -2121,8 +2153,9 @@ unsigned long get_wchan(struct task_struct *p)
static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
-void show_stack(struct task_struct *tsk, unsigned long *stack,
- const char *loglvl)
+void __no_sanitize_address show_stack(struct task_struct *tsk,
+ unsigned long *stack,
+ const char *loglvl)
{
unsigned long sp, ip, lr, newsp;
int count = 0;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index fbe9deebc8e1..f620e04dc9bf 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -758,7 +758,7 @@ void __init early_init_devtree(void *params)
first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
setup_initial_memory_limit(memstart_addr, first_memblock_size);
/* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
- memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
+ memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
/* If relocatable, reserve first 32k for interrupt vectors etc. */
if (PHYSICAL_START > MEMORY_START)
memblock_reserve(MEMORY_START, 0x8000);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 41ed7e33d897..a5bf355ce1d6 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -27,10 +27,12 @@
#include <linux/initrd.h>
#include <linux/bitops.h>
#include <linux/pgtable.h>
+#include <linux/printk.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
#include <asm/processor.h>
+#include <asm/interrupt.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -242,13 +244,31 @@ static int __init prom_strcmp(const char *cs, const char *ct)
return 0;
}
-static char __init *prom_strcpy(char *dest, const char *src)
+static ssize_t __init prom_strscpy_pad(char *dest, const char *src, size_t n)
{
- char *tmp = dest;
+ ssize_t rc;
+ size_t i;
- while ((*dest++ = *src++) != '\0')
- /* nothing */;
- return tmp;
+ if (n == 0 || n > INT_MAX)
+ return -E2BIG;
+
+ // Copy up to n bytes
+ for (i = 0; i < n && src[i] != '\0'; i++)
+ dest[i] = src[i];
+
+ rc = i;
+
+ // If we copied all n then we have run out of space for the nul
+ if (rc == n) {
+ // Rewind by one character to ensure nul termination
+ i--;
+ rc = -E2BIG;
+ }
+
+ for (; i < n; i++)
+ dest[i] = '\0';
+
+ return rc;
}
static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
@@ -701,13 +721,12 @@ static int __init prom_setprop(phandle node, const char *nodename,
}
/* We can't use the standard versions because of relocation headaches. */
-#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
- || ('a' <= (c) && (c) <= 'f') \
- || ('A' <= (c) && (c) <= 'F'))
+#define prom_isxdigit(c) \
+ (('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F'))
-#define isdigit(c) ('0' <= (c) && (c) <= '9')
-#define islower(c) ('a' <= (c) && (c) <= 'z')
-#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
+#define prom_isdigit(c) ('0' <= (c) && (c) <= '9')
+#define prom_islower(c) ('a' <= (c) && (c) <= 'z')
+#define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c))
static unsigned long prom_strtoul(const char *cp, const char **endp)
{
@@ -716,14 +735,14 @@ static unsigned long prom_strtoul(const char *cp, const char **endp)
if (*cp == '0') {
base = 8;
cp++;
- if (toupper(*cp) == 'X') {
+ if (prom_toupper(*cp) == 'X') {
cp++;
base = 16;
}
}
- while (isxdigit(*cp) &&
- (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
+ while (prom_isxdigit(*cp) &&
+ (value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) {
result = result * base + value;
cp++;
}
@@ -927,6 +946,10 @@ struct option_vector6 {
u8 os_name;
} __packed;
+struct option_vector7 {
+ u8 os_id[256];
+} __packed;
+
struct ibm_arch_vec {
struct { u32 mask, val; } pvrs[14];
@@ -949,6 +972,9 @@ struct ibm_arch_vec {
u8 vec6_len;
struct option_vector6 vec6;
+
+ u8 vec7_len;
+ struct option_vector7 vec7;
} __packed;
static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
@@ -1095,6 +1121,9 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.secondary_pteg = 0,
.os_name = OV6_LINUX,
},
+
+ /* option vector 7: OS Identification */
+ .vec7_len = VECTOR_LENGTH(sizeof(struct option_vector7)),
};
static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
@@ -1323,6 +1352,8 @@ static void __init prom_check_platform_support(void)
memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
sizeof(ibm_architecture_vec));
+ prom_strscpy_pad(ibm_architecture_vec.vec7.os_id, linux_banner, 256);
+
if (prop_len > 1) {
int i;
u8 vec[8];
@@ -1762,6 +1793,8 @@ static int prom_rtas_hcall(uint64_t args)
asm volatile("sc 1\n" : "=r" (arg1) :
"r" (arg1),
"r" (arg2) :);
+ srr_regs_clobbered();
+
return arg1;
}
@@ -2702,7 +2735,7 @@ static void __init flatten_device_tree(void)
/* Add "phandle" in there, we'll need it */
namep = make_room(&mem_start, &mem_end, 16, 1);
- prom_strcpy(namep, "phandle");
+ prom_strscpy_pad(namep, "phandle", sizeof("phandle"));
mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
/* Build string array */
@@ -3210,54 +3243,6 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
#endif /* CONFIG_BLK_DEV_INITRD */
}
-#ifdef CONFIG_PPC64
-#ifdef CONFIG_RELOCATABLE
-static void reloc_toc(void)
-{
-}
-
-static void unreloc_toc(void)
-{
-}
-#else
-static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
-{
- unsigned long i;
- unsigned long *toc_entry;
-
- /* Get the start of the TOC by using r2 directly. */
- asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
-
- for (i = 0; i < nr_entries; i++) {
- *toc_entry = *toc_entry + offset;
- toc_entry++;
- }
-}
-
-static void reloc_toc(void)
-{
- unsigned long offset = reloc_offset();
- unsigned long nr_entries =
- (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
-
- __reloc_toc(offset, nr_entries);
-
- mb();
-}
-
-static void unreloc_toc(void)
-{
- unsigned long offset = reloc_offset();
- unsigned long nr_entries =
- (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
-
- mb();
-
- __reloc_toc(-offset, nr_entries);
-}
-#endif
-#endif
-
#ifdef CONFIG_PPC_SVM
/*
* Perform the Enter Secure Mode ultracall.
@@ -3291,14 +3276,12 @@ static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
* relocated it so the check will fail. Restore the original image by
* relocating it back to the kernel virtual base address.
*/
- if (IS_ENABLED(CONFIG_RELOCATABLE))
- relocate(KERNELBASE);
+ relocate(KERNELBASE);
ret = enter_secure_mode(kbase, fdt);
/* Relocate the kernel again. */
- if (IS_ENABLED(CONFIG_RELOCATABLE))
- relocate(kbase);
+ relocate(kbase);
if (ret != U_SUCCESS) {
prom_printf("Returned %d from switching to secure mode.\n", ret);
@@ -3326,8 +3309,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
#ifdef CONFIG_PPC32
unsigned long offset = reloc_offset();
reloc_got2(offset);
-#else
- reloc_toc();
#endif
/*
@@ -3504,8 +3485,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
#ifdef CONFIG_PPC32
reloc_got2(-offset);
-#else
- unreloc_toc();
#endif
/* Move to secure memory if we're supposed to be secure guests. */
diff --git a/arch/powerpc/kernel/ptrace/ptrace-adv.c b/arch/powerpc/kernel/ptrace/ptrace-adv.c
index 3990c01ef8cf..399f5d94a3df 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-adv.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-adv.c
@@ -12,7 +12,7 @@ void user_enable_single_step(struct task_struct *task)
if (regs != NULL) {
task->thread.debug.dbcr0 &= ~DBCR0_BT;
task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
@@ -24,7 +24,7 @@ void user_enable_block_step(struct task_struct *task)
if (regs != NULL) {
task->thread.debug.dbcr0 &= ~DBCR0_IC;
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
@@ -50,7 +50,7 @@ void user_disable_single_step(struct task_struct *task)
* All debug events were off.....
*/
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
- regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
}
}
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
@@ -82,6 +82,7 @@ int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
{
+ struct pt_regs *regs = task->thread.regs;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret;
struct thread_struct *thread = &task->thread;
@@ -112,7 +113,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
task->thread.debug.dbcr1)) {
- task->thread.regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
}
return 0;
@@ -132,7 +133,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l
dbcr_dac(task) |= DBCR_DAC1R;
if (data & 0x2UL)
dbcr_dac(task) |= DBCR_DAC1W;
- task->thread.regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
return 0;
}
@@ -220,7 +221,7 @@ static long set_instruction_bp(struct task_struct *child,
}
out:
child->thread.debug.dbcr0 |= DBCR0_IDM;
- child->thread.regs->msr |= MSR_DE;
+ regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
return slot;
}
@@ -336,7 +337,7 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
return -ENOSPC;
}
child->thread.debug.dbcr0 |= DBCR0_IDM;
- child->thread.regs->msr |= MSR_DE;
+ regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
return slot + 4;
}
@@ -430,7 +431,7 @@ static int set_dac_range(struct task_struct *child,
child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
else /* PPC_BREAKPOINT_MODE_MASK */
child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
- child->thread.regs->msr |= MSR_DE;
+ regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
return 5;
}
@@ -485,7 +486,8 @@ long ppc_del_hwdebug(struct task_struct *child, long data)
if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
child->thread.debug.dbcr1)) {
child->thread.debug.dbcr0 &= ~DBCR0_IDM;
- child->thread.regs->msr &= ~MSR_DE;
+ regs_set_return_msr(child->thread.regs,
+ child->thread.regs->msr & ~MSR_DE);
}
}
return rc;
diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
index aa36fcad36cd..a5dd7d2e2c9e 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
@@ -11,10 +11,8 @@ void user_enable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
- if (regs != NULL) {
- regs->msr &= ~MSR_BE;
- regs->msr |= MSR_SE;
- }
+ if (regs != NULL)
+ regs_set_return_msr(regs, (regs->msr & ~MSR_BE) | MSR_SE);
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
@@ -22,10 +20,8 @@ void user_enable_block_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
- if (regs != NULL) {
- regs->msr &= ~MSR_SE;
- regs->msr |= MSR_BE;
- }
+ if (regs != NULL)
+ regs_set_return_msr(regs, (regs->msr & ~MSR_SE) | MSR_BE);
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
@@ -34,7 +30,7 @@ void user_disable_single_step(struct task_struct *task)
struct pt_regs *regs = task->thread.regs;
if (regs != NULL)
- regs->msr &= ~(MSR_SE | MSR_BE);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_SE | MSR_BE));
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c
index 773bcc4ca843..b8be1d6668b5 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-view.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-view.c
@@ -113,8 +113,9 @@ static unsigned long get_user_msr(struct task_struct *task)
static __always_inline int set_user_msr(struct task_struct *task, unsigned long msr)
{
- task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
- task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
+ unsigned long newmsr = (task->thread.regs->msr & ~MSR_DEBUGCHANGE) |
+ (msr & MSR_DEBUGCHANGE);
+ regs_set_return_msr(task->thread.regs, newmsr);
return 0;
}
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c
index a28239b8b0c0..33c07c8af6c8 100644
--- a/arch/powerpc/kernel/rtas-rtc.c
+++ b/arch/powerpc/kernel/rtas-rtc.c
@@ -12,7 +12,7 @@
#define MAX_RTC_WAIT 5000 /* 5 sec */
-#define RTAS_CLOCK_BUSY (-2)
+
time64_t __init rtas_get_boot_time(void)
{
int ret[8];
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 6bada744402b..99f2cce635fb 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -25,6 +25,7 @@
#include <linux/reboot.h>
#include <linux/syscalls.h>
+#include <asm/interrupt.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/hvcall.h>
@@ -46,6 +47,13 @@
/* This is here deliberately so it's only used in this file */
void enter_rtas(unsigned long);
+static inline void do_enter_rtas(unsigned long args)
+{
+ enter_rtas(args);
+
+ srr_regs_clobbered(); /* rtas uses SRRs, invalidate */
+}
+
struct rtas_t rtas = {
.lock = __ARCH_SPIN_LOCK_UNLOCKED
};
@@ -384,7 +392,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
save_args = rtas.args;
rtas.args = err_args;
- enter_rtas(__pa(&rtas.args));
+ do_enter_rtas(__pa(&rtas.args));
err_args = rtas.args;
rtas.args = save_args;
@@ -430,7 +438,7 @@ va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
for (i = 0; i < nret; ++i)
args->rets[i] = 0;
- enter_rtas(__pa(args));
+ do_enter_rtas(__pa(args));
}
void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
@@ -1138,7 +1146,7 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
flags = lock_rtas();
rtas.args = args;
- enter_rtas(__pa(&rtas.args));
+ do_enter_rtas(__pa(&rtas.args));
args = rtas.args;
/* A -1 return code indicates that the last command couldn't
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index c17d1c9362b5..cc51fa52e783 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -300,9 +300,7 @@ static void stf_barrier_enable(bool enable)
void setup_stf_barrier(void)
{
enum stf_barrier_type type;
- bool enable, hv;
-
- hv = cpu_has_feature(CPU_FTR_HVMODE);
+ bool enable;
/* Default to fallback in case fw-features are not available */
if (cpu_has_feature(CPU_FTR_ARCH_300))
@@ -315,8 +313,7 @@ void setup_stf_barrier(void)
type = STF_BARRIER_NONE;
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
- (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
- (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
+ security_ftr_enabled(SEC_FTR_STF_BARRIER);
if (type == STF_BARRIER_FALLBACK) {
pr_info("stf-barrier: fallback barrier available\n");
@@ -439,8 +436,8 @@ static void update_branch_cache_flush(void)
site2 = &patch__call_kvm_flush_link_stack_p9;
// This controls the branch from guest_exit_cont to kvm_flush_link_stack
if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
- patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
- patch_instruction_site(site2, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
+ patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP()));
} else {
// Could use HW flush, but that could also flush count cache
patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
@@ -450,11 +447,11 @@ static void update_branch_cache_flush(void)
// Patch out the bcctr first, then nop the rest
site = &patch__call_flush_branch_caches3;
- patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
site = &patch__call_flush_branch_caches2;
- patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
site = &patch__call_flush_branch_caches1;
- patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
// This controls the branch from _switch to flush_branch_caches
if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
@@ -477,12 +474,12 @@ static void update_branch_cache_flush(void)
// If we just need to flush the link stack, early return
if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
patch_instruction_site(&patch__flush_link_stack_return,
- ppc_inst(PPC_INST_BLR));
+ ppc_inst(PPC_RAW_BLR()));
// If we have flush instruction, early return
} else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) {
patch_instruction_site(&patch__flush_count_cache_return,
- ppc_inst(PPC_INST_BLR));
+ ppc_inst(PPC_RAW_BLR()));
}
}
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 74a98fff2c2f..aa9c2d01424a 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -9,6 +9,7 @@
#undef DEBUG
#include <linux/export.h>
+#include <linux/panic_notifier.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/init.h>
@@ -91,8 +92,6 @@ EXPORT_SYMBOL_GPL(boot_cpuid);
int dcache_bsize;
int icache_bsize;
-unsigned long klimit = (unsigned long) _end;
-
/*
* This still seems to be needed... -- paulus
*/
@@ -927,10 +926,7 @@ void __init setup_arch(char **cmdline_p)
klp_init_thread_info(&init_task);
- init_mm.start_code = (unsigned long)_stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = klimit;
+ setup_initial_init_mm(_stext, _etext, _edata, _end);
mm_iommu_init(&init_mm);
irqstack_early_init();
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index d7c1f92152af..7ec5c47fce0e 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(DMA_MODE_WRITE);
*/
notrace void __init machine_init(u64 dt_ptr)
{
- struct ppc_inst *addr = (struct ppc_inst *)patch_site_addr(&patch__memset_nocache);
+ u32 *addr = (u32 *)patch_site_addr(&patch__memset_nocache);
struct ppc_inst insn;
/* Configure static keys first, now that we're relocated. */
@@ -85,7 +85,7 @@ notrace void __init machine_init(u64 dt_ptr)
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
- patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_RAW_NOP()));
create_cond_branch(&insn, addr, branch_target(addr), 0x820000);
patch_instruction(addr, insn); /* replace b by bne cr0 */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a35fbf4d0bce..1ff258f6c76c 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -33,6 +33,7 @@
#include <linux/pgtable.h>
#include <asm/debugfs.h>
+#include <asm/kvm_guest.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/prom.h>
@@ -939,16 +940,20 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh)
* disable it by default. Book3S has a soft-nmi hardlockup detector based
* on the decrementer interrupt, so it does not suffer from this problem.
*
- * It is likely to get false positives in VM guests, so disable it there
- * by default too.
+ * It is likely to get false positives in KVM guests, so disable it there
+ * by default too. PowerVM will not stop or arbitrarily oversubscribe
+ * CPUs, but give a minimum regular allotment even with SPLPAR, so enable
+ * the detector for non-KVM guests, assume PowerVM.
*/
static int __init disable_hardlockup_detector(void)
{
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
hardlockup_detector_disable();
#else
- if (firmware_has_feature(FW_FEATURE_LPAR))
- hardlockup_detector_disable();
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ if (is_kvm_guest())
+ hardlockup_detector_disable();
+ }
#endif
return 0;
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 9ded046edb0e..e600764a926c 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -214,7 +214,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
regs->gpr[0] = __NR_restart_syscall;
else
regs->gpr[3] = regs->orig_gpr3;
- regs->nip -= 4;
+ regs_add_return_ip(regs, -4);
regs->result = 0;
} else {
if (trap_is_scv(regs)) {
@@ -322,16 +322,16 @@ static unsigned long get_tm_stackpointer(struct task_struct *tsk)
* For signals taken in non-TM or suspended mode, we use the
* normal/non-checkpointed stack pointer.
*/
-
- unsigned long ret = tsk->thread.regs->gpr[1];
+ struct pt_regs *regs = tsk->thread.regs;
+ unsigned long ret = regs->gpr[1];
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BUG_ON(tsk != current);
- if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
+ if (MSR_TM_ACTIVE(regs->msr)) {
preempt_disable();
tm_reclaim_current(TM_CAUSE_SIGNAL);
- if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
+ if (MSR_TM_TRANSACTIONAL(regs->msr))
ret = tsk->thread.ckpt_regs.gpr[1];
/*
@@ -341,7 +341,7 @@ static unsigned long get_tm_stackpointer(struct task_struct *tsk)
* (tm_recheckpoint_new_task() would recheckpoint). Besides, we
* enter the signal handler in non-transactional state.
*/
- tsk->thread.regs->msr &= ~MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
preempt_enable();
}
#endif
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 8f05ed0da292..0608581967f0 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -354,14 +354,8 @@ static void prepare_save_tm_user_regs(void)
{
WARN_ON(tm_suspend_disabled);
-#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC))
current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
-#endif
-#ifdef CONFIG_SPE
- if (current->thread.used_spe)
- flush_spe_to_thread(current);
-#endif
}
static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
@@ -379,7 +373,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
*/
unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
-#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (current->thread.used_vr) {
unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
@@ -412,7 +405,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
else
unsafe_put_user(current->thread.ckvrsave,
(u32 __user *)&tm_frame->mc_vregs[32], failed);
-#endif /* CONFIG_ALTIVEC */
unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
if (msr & MSR_FP)
@@ -420,7 +412,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
else
unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
-#ifdef CONFIG_VSX
/*
* Copy VSR 0-31 upper half from thread_struct to local
* buffer, then write that to userspace. Also set MSR_VSX in
@@ -436,23 +427,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
msr |= MSR_VSX;
}
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
- /* SPE regs are not checkpointed with TM, so this section is
- * simply the same as in __unsafe_save_user_regs().
- */
- if (current->thread.used_spe) {
- unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
- ELF_NEVRREG * sizeof(u32), failed);
- /* set MSR_SPE in the saved MSR value to indicate that
- * frame->mc_vregs contains valid data */
- msr |= MSR_SPE;
- }
-
- /* We always copy to/from spefscr */
- unsafe_put_user(current->thread.spefscr,
- (u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
-#endif /* CONFIG_SPE */
unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
@@ -505,14 +479,14 @@ static long restore_user_regs(struct pt_regs *regs,
/* if doing signal return, restore the previous little-endian mode */
if (sig)
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
#ifdef CONFIG_ALTIVEC
/*
* Force the process to reload the altivec registers from
* current->thread when it next does altivec instructions
*/
- regs->msr &= ~MSR_VEC;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
unsafe_copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
@@ -534,7 +508,7 @@ static long restore_user_regs(struct pt_regs *regs,
* Force the process to reload the VSX registers from
* current->thread when it next does VSX instruction.
*/
- regs->msr &= ~MSR_VSX;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
@@ -550,12 +524,12 @@ static long restore_user_regs(struct pt_regs *regs,
* force the process to reload the FP registers from
* current->thread when it next does FP instructions
*/
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
#ifdef CONFIG_SPE
/* force the process to reload the spe registers from
current->thread when it next does spe instructions */
- regs->msr &= ~MSR_SPE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
if (msr & MSR_SPE) {
/* restore spe registers from the stack */
unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
@@ -587,9 +561,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *tm_sr)
{
unsigned long msr, msr_hi;
-#ifdef CONFIG_VSX
int i;
-#endif
if (tm_suspend_disabled)
return 1;
@@ -608,10 +580,9 @@ static long restore_tm_user_regs(struct pt_regs *regs,
unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
/* Restore the previous little-endian mode */
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
-#ifdef CONFIG_ALTIVEC
- regs->msr &= ~MSR_VEC;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
unsafe_copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
@@ -629,14 +600,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
(u32 __user *)&sr->mc_vregs[32], failed);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
-#endif /* CONFIG_ALTIVEC */
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
-#ifdef CONFIG_VSX
- regs->msr &= ~MSR_VSX;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
@@ -649,24 +618,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
}
-#endif /* CONFIG_VSX */
-
-#ifdef CONFIG_SPE
- /* SPE regs are not checkpointed with TM, so this section is
- * simply the same as in restore_user_regs().
- */
- regs->msr &= ~MSR_SPE;
- if (msr & MSR_SPE) {
- unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
- ELF_NEVRREG * sizeof(u32), failed);
- current->thread.used_spe = true;
- } else if (current->thread.used_spe)
- memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
-
- /* Always get SPEFSCR back */
- unsafe_get_user(current->thread.spefscr,
- (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
-#endif /* CONFIG_SPE */
user_read_access_end();
@@ -675,7 +626,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
unsafe_restore_general_regs(regs, tm_sr, failed);
-#ifdef CONFIG_ALTIVEC
/* restore altivec registers from the stack */
if (msr & MSR_VEC)
unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs,
@@ -684,11 +634,9 @@ static long restore_tm_user_regs(struct pt_regs *regs,
/* Always get VRSAVE back */
unsafe_get_user(current->thread.vrsave,
(u32 __user *)&tm_sr->mc_vregs[32], failed);
-#endif /* CONFIG_ALTIVEC */
unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
-#ifdef CONFIG_VSX
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
@@ -697,7 +645,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
current->thread.used_vsr = true;
}
-#endif /* CONFIG_VSX */
/* Get the top half of the MSR from the user context */
unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
@@ -725,7 +672,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
*
* Pull in the MSR TM bits from the user context
*/
- regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
/* Now, recheckpoint. This loads up all of the checkpointed (older)
* registers, including FP and V[S]Rs. After recheckpointing, the
* transactional versions should be loaded.
@@ -740,14 +687,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_FP) {
load_fp_state(&current->thread.fp_state);
- regs->msr |= (MSR_FP | current->thread.fpexc_mode);
+ regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
load_vr_state(&current->thread.vr_state);
- regs->msr |= MSR_VEC;
+ regs_set_return_msr(regs, regs->msr | MSR_VEC);
}
-#endif
preempt_enable();
@@ -828,10 +773,8 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
} else {
tramp = (unsigned long)mctx->mc_pad;
- /* Set up the sigreturn trampoline: li r0,sigret; sc */
- unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0],
- failed);
- unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
+ unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
+ unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
}
unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
@@ -858,10 +801,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
regs->gpr[4] = (unsigned long)&frame->info;
regs->gpr[5] = (unsigned long)&frame->uc;
regs->gpr[6] = (unsigned long)frame;
- regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
+ regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
/* enter the signal handler in native-endian mode */
- regs->msr &= ~MSR_LE;
- regs->msr |= (MSR_KERNEL & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
+
return 0;
failed:
@@ -926,9 +869,8 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
} else {
tramp = (unsigned long)mctx->mc_pad;
- /* Set up the sigreturn trampoline: li r0,sigret; sc */
- unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed);
- unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
+ unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
+ unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
}
user_access_end();
@@ -947,10 +889,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
regs->gpr[1] = newsp;
regs->gpr[3] = ksig->sig;
regs->gpr[4] = (unsigned long) sc;
- regs->nip = (unsigned long)ksig->ka.sa.sa_handler;
+ regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
/* enter the signal handler in native-endian mode */
- regs->msr &= ~MSR_LE;
- regs->msr |= (MSR_KERNEL & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
+
return 0;
failed:
@@ -1200,7 +1142,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
* set, and recheckpoint was not called. This avoid
* hitting a TM Bad thing at RFID
*/
- regs->msr &= ~MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
}
/* Fall through, for non-TM restore */
#endif
@@ -1289,7 +1231,7 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
affect the contents of these registers. After this point,
failure is a problem, anyway, and it's very unlikely unless
the user is really doing something wrong. */
- regs->msr = new_msr;
+ regs_set_return_msr(regs, new_msr);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
current->thread.debug.dbcr0 = new_dbcr0;
#endif
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index f9e1f5428b9e..1831bba0582e 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -354,7 +354,7 @@ static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_
/* get MSR separately, transfer the LE bit if doing signal return */
unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out);
if (sig)
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out);
unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out);
unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out);
@@ -376,7 +376,7 @@ static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_
* This has to be done before copying stuff into tsk->thread.fpr/vr
* for the reasons explained in the previous comment.
*/
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
#ifdef CONFIG_ALTIVEC
unsafe_get_user(v_regs, &sc->v_regs, efault_out);
@@ -468,7 +468,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
return -EINVAL;
/* pull in MSR LE from user context */
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
/* The following non-GPR non-FPR non-VR state is also checkpointed: */
err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]);
@@ -495,7 +495,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
* This has to be done before copying stuff into tsk->thread.fpr/vr
* for the reasons explained in the previous comment.
*/
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
#ifdef CONFIG_ALTIVEC
err |= __get_user(v_regs, &sc->v_regs);
@@ -565,7 +565,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
preempt_disable();
/* pull in MSR TS bits from user context */
- regs->msr |= msr & MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK));
/*
* Ensure that TM is enabled in regs->msr before we leave the signal
@@ -583,7 +583,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
* to be de-scheduled with MSR[TS] set but without calling
* tm_recheckpoint(). This can cause a bug.
*/
- regs->msr |= MSR_TM;
+ regs_set_return_msr(regs, regs->msr | MSR_TM);
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&tsk->thread);
@@ -591,11 +591,11 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_FP) {
load_fp_state(&tsk->thread.fp_state);
- regs->msr |= (MSR_FP | tsk->thread.fpexc_mode);
+ regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode));
}
if (msr & MSR_VEC) {
load_vr_state(&tsk->thread.vr_state);
- regs->msr |= MSR_VEC;
+ regs_set_return_msr(regs, regs->msr | MSR_VEC);
}
preempt_enable();
@@ -618,15 +618,12 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
int i;
long err = 0;
- /* bctrl # call the handler */
- err |= __put_user(PPC_INST_BCTRL, &tramp[0]);
- /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */
- err |= __put_user(PPC_INST_ADDI | __PPC_RT(R1) | __PPC_RA(R1) |
- (__SIGNAL_FRAMESIZE & 0xffff), &tramp[1]);
- /* li r0, __NR_[rt_]sigreturn| */
- err |= __put_user(PPC_INST_ADDI | (syscall & 0xffff), &tramp[2]);
- /* sc */
- err |= __put_user(PPC_INST_SC, &tramp[3]);
+ /* Call the handler and pop the dummy stackframe*/
+ err |= __put_user(PPC_RAW_BCTRL(), &tramp[0]);
+ err |= __put_user(PPC_RAW_ADDI(_R1, _R1, __SIGNAL_FRAMESIZE), &tramp[1]);
+
+ err |= __put_user(PPC_RAW_LI(_R0, syscall), &tramp[2]);
+ err |= __put_user(PPC_RAW_SC(), &tramp[3]);
/* Minimal traceback info */
for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++)
@@ -720,6 +717,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
/* This returns like rt_sigreturn */
set_thread_flag(TIF_RESTOREALL);
+
return 0;
efault_out:
@@ -786,7 +784,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
* the MSR[TS] that came from user context later, at
* restore_tm_sigcontexts.
*/
- regs->msr &= ~MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
goto badframe;
@@ -818,7 +816,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
* MSR[TS] set, but without CPU in the proper state,
* causing a TM bad thing.
*/
- current->thread.regs->msr &= ~MSR_TS_MASK;
+ regs_set_return_msr(current->thread.regs,
+ current->thread.regs->msr & ~MSR_TS_MASK);
if (!user_read_access_begin(&uc->uc_mcontext, sizeof(uc->uc_mcontext)))
goto badframe;
@@ -832,6 +831,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
goto badframe;
set_thread_flag(TIF_RESTOREALL);
+
return 0;
badframe_block:
@@ -911,12 +911,12 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
/* Set up to return from userspace. */
if (tsk->mm->context.vdso) {
- regs->nip = VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64);
+ regs_set_return_ip(regs, VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64));
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
if (err)
goto badframe;
- regs->nip = (unsigned long) &frame->tramp[0];
+ regs_set_return_ip(regs, (unsigned long) &frame->tramp[0]);
}
/* Allocate a dummy caller frame for the signal handler. */
@@ -941,14 +941,13 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
}
/* enter the signal handler in native-endian mode */
- regs->msr &= ~MSR_LE;
- regs->msr |= (MSR_KERNEL & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
regs->gpr[1] = newsp;
regs->gpr[3] = ksig->sig;
regs->result = 0;
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
- err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo);
- err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc);
+ regs->gpr[4] = (unsigned long)&frame->info;
+ regs->gpr[5] = (unsigned long)&frame->uc;
regs->gpr[6] = (unsigned long) frame;
} else {
regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 7ddc2d32c39e..447b78a87c8f 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -619,6 +619,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
/*
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
*/
+ set_cpu_online(smp_processor_id(), false);
+
spin_begin();
while (1)
spin_cpu_relax();
@@ -634,6 +636,15 @@ void smp_send_stop(void)
static void stop_this_cpu(void *dummy)
{
hard_irq_disable();
+
+ /*
+ * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
+ * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
+ * to know other CPUs are offline before it breaks locks to flush
+ * printk buffers, in case we panic()ed while holding the lock.
+ */
+ set_cpu_online(smp_processor_id(), false);
+
spin_begin();
while (1)
spin_cpu_relax();
@@ -1541,6 +1552,10 @@ void start_secondary(void *unused)
{
unsigned int cpu = raw_smp_processor_id();
+ /* PPC64 calls setup_kup() in early_setup_secondary() */
+ if (IS_ENABLED(CONFIG_PPC32))
+ setup_kup();
+
mmgrab(&init_mm);
current->active_mm = &init_mm;
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 1deb1bf331dd..2b0d04a1b7d2 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -23,8 +23,8 @@
#include <asm/paca.h>
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
- struct task_struct *task, struct pt_regs *regs)
+void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
unsigned long sp;
@@ -61,8 +61,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
*
* If the task is not 'current', the caller *must* ensure the task is inactive.
*/
-int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
- void *cookie, struct task_struct *task)
+int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task)
{
unsigned long sp;
unsigned long newsp;
@@ -172,17 +172,31 @@ static void handle_backtrace_ipi(struct pt_regs *regs)
static void raise_backtrace_ipi(cpumask_t *mask)
{
+ struct paca_struct *p;
unsigned int cpu;
+ u64 delay_us;
for_each_cpu(cpu, mask) {
- if (cpu == smp_processor_id())
+ if (cpu == smp_processor_id()) {
handle_backtrace_ipi(NULL);
- else
- smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
- }
+ continue;
+ }
- for_each_cpu(cpu, mask) {
- struct paca_struct *p = paca_ptrs[cpu];
+ delay_us = 5 * USEC_PER_SEC;
+
+ if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
+ // Now wait up to 5s for the other CPU to do its backtrace
+ while (cpumask_test_cpu(cpu, mask) && delay_us) {
+ udelay(1);
+ delay_us--;
+ }
+
+ // Other CPU cleared itself from the mask
+ if (delay_us)
+ continue;
+ }
+
+ p = paca_ptrs[cpu];
cpumask_clear_cpu(cpu, mask);
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index a552c9e68d7e..bf4ae0f0e36c 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -114,7 +114,8 @@ SYSCALL_DEFINE0(switch_endian)
{
struct thread_info *ti;
- current->thread.regs->msr ^= MSR_LE;
+ regs_set_return_msr(current->thread.regs,
+ current->thread.regs->msr ^ MSR_LE);
/*
* Set TIF_RESTOREALL so that r3 isn't clobbered on return to
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 2e08640bb3b4..defecb3b1b15 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -843,14 +843,14 @@ static int register_cpu_online(unsigned int cpu)
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
- nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(ibm_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
case PPC_PMC_G4:
attrs = g4_common_attrs;
- nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(g4_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
@@ -858,7 +858,7 @@ static int register_cpu_online(unsigned int cpu)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
- nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(pa6t_attrs);
pmc_attrs = NULL;
break;
#endif
@@ -940,14 +940,14 @@ static int unregister_cpu_online(unsigned int cpu)
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
- nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(ibm_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
case PPC_PMC_G4:
attrs = g4_common_attrs;
- nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(g4_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
@@ -955,7 +955,7 @@ static int unregister_cpu_online(unsigned int cpu)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
- nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(pa6t_attrs);
pmc_attrs = NULL;
break;
#endif
@@ -1167,7 +1167,7 @@ static int __init topology_init(void)
* CPU. For instance, the boot cpu might never be valid
* for hotplugging.
*/
- if (smp_ops->cpu_offline_self)
+ if (smp_ops && smp_ops->cpu_offline_self)
c->hotpluggable = 1;
#endif
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
index 6c31af7f4fa8..b9a047d92ec0 100644
--- a/arch/powerpc/kernel/tau_6xx.c
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -201,7 +201,7 @@ static int __init TAU_init(void)
tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
!strcmp(cur_cpu_spec->platform, "ppc750");
- tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0);
+ tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1);
if (!tau_workq)
return -ENOMEM;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index da995c5fb97d..c487ba5a6e11 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -231,24 +231,13 @@ static u64 scan_dispatch_log(u64 stop_tb)
void notrace accumulate_stolen_time(void)
{
u64 sst, ust;
- unsigned long save_irq_soft_mask = irq_soft_mask_return();
struct cpu_accounting_data *acct = &local_paca->accounting;
- /* We are called early in the exception entry, before
- * soft/hard_enabled are sync'ed to the expected state
- * for the exception. We are hard disabled but the PACA
- * needs to reflect that so various debug stuff doesn't
- * complain
- */
- irq_soft_mask_set(IRQS_DISABLED);
-
sst = scan_dispatch_log(acct->starttime_user);
ust = scan_dispatch_log(acct->starttime);
acct->stime -= sst;
acct->utime -= ust;
acct->steal_time += ust + sst;
-
- irq_soft_mask_set(save_irq_soft_mask);
}
static inline u64 calculate_stolen_time(u64 stop_tb)
@@ -597,7 +586,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
- do_IRQ(regs);
+ __do_IRQ(regs);
#endif
old_regs = set_irq_regs(regs);
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index ffe9537195aa..d89c5df4f206 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -49,7 +49,7 @@ ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
addr = ppc_function_entry((void *)addr);
/* if (link) set op to 'bl' else 'b' */
- create_branch(&op, (struct ppc_inst *)ip, addr, link ? 1 : 0);
+ create_branch(&op, (u32 *)ip, addr, link ? 1 : 0);
return op;
}
@@ -79,7 +79,7 @@ ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new)
}
/* replace the text with the new text */
- if (patch_instruction((struct ppc_inst *)ip, new))
+ if (patch_instruction((u32 *)ip, new))
return -EPERM;
return 0;
@@ -94,7 +94,7 @@ static int test_24bit_addr(unsigned long ip, unsigned long addr)
addr = ppc_function_entry((void *)addr);
/* use the create_branch to verify that this offset can be branched */
- return create_branch(&op, (struct ppc_inst *)ip, addr, 0) == 0;
+ return create_branch(&op, (u32 *)ip, addr, 0) == 0;
}
static int is_bl_op(struct ppc_inst op)
@@ -162,7 +162,7 @@ __ftrace_make_nop(struct module *mod,
#ifdef CONFIG_MPROFILE_KERNEL
/* When using -mkernel_profile there is no load to jump over */
- pop = ppc_inst(PPC_INST_NOP);
+ pop = ppc_inst(PPC_RAW_NOP());
if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
pr_err("Fetching instruction at %lx failed.\n", ip - 4);
@@ -170,7 +170,7 @@ __ftrace_make_nop(struct module *mod,
}
/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
- if (!ppc_inst_equal(op, ppc_inst(PPC_INST_MFLR)) &&
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
!ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
pr_err("Unexpected instruction %s around bl _mcount\n",
ppc_inst_as_str(op));
@@ -203,12 +203,12 @@ __ftrace_make_nop(struct module *mod,
}
if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
- pr_err("Expected %08x found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
+ pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
return -EINVAL;
}
#endif /* CONFIG_MPROFILE_KERNEL */
- if (patch_instruction((struct ppc_inst *)ip, pop)) {
+ if (patch_instruction((u32 *)ip, pop)) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
@@ -278,9 +278,9 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
- op = ppc_inst(PPC_INST_NOP);
+ op = ppc_inst(PPC_RAW_NOP());
- if (patch_instruction((struct ppc_inst *)ip, op))
+ if (patch_instruction((u32 *)ip, op))
return -EPERM;
return 0;
@@ -380,7 +380,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
return -1;
}
- if (patch_branch((struct ppc_inst *)tramp, ptr, 0)) {
+ if (patch_branch((u32 *)tramp, ptr, 0)) {
pr_debug("REL24 out of range!\n");
return -1;
}
@@ -424,7 +424,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
}
}
- if (patch_instruction((struct ppc_inst *)ip, ppc_inst(PPC_INST_NOP))) {
+ if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
@@ -446,7 +446,7 @@ int ftrace_make_nop(struct module *mod,
if (test_24bit_addr(ip, addr)) {
/* within range */
old = ftrace_call_replace(ip, addr, 1);
- new = ppc_inst(PPC_INST_NOP);
+ new = ppc_inst(PPC_RAW_NOP());
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip))
return __ftrace_make_nop_kernel(rec, addr);
@@ -510,7 +510,7 @@ static int
expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
{
/* look for patched "NOP" on ppc64 with -mprofile-kernel */
- if (!ppc_inst_equal(op0, ppc_inst(PPC_INST_NOP)))
+ if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())))
return 0;
return 1;
}
@@ -589,14 +589,14 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
int err;
struct ppc_inst op;
- unsigned long ip = rec->ip;
+ u32 *ip = (u32 *)rec->ip;
/* read where this goes */
- if (copy_inst_from_kernel_nofault(&op, (void *)ip))
+ if (copy_inst_from_kernel_nofault(&op, ip))
return -EFAULT;
/* It should be pointing to a nop */
- if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) {
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op));
return -EINVAL;
}
@@ -608,8 +608,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
}
/* create the branch to the trampoline */
- err = create_branch(&op, (struct ppc_inst *)ip,
- rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
+ err = create_branch(&op, ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
if (err) {
pr_err("REL24 out of range!\n");
return -EINVAL;
@@ -617,7 +616,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
pr_devel("write to %lx\n", rec->ip);
- if (patch_instruction((struct ppc_inst *)ip, op))
+ if (patch_instruction(ip, op))
return -EPERM;
return 0;
@@ -653,7 +652,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
return -EFAULT;
}
- if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) {
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op));
return -EINVAL;
}
@@ -684,7 +683,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
- old = ppc_inst(PPC_INST_NOP);
+ old = ppc_inst(PPC_RAW_NOP());
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip))
@@ -762,7 +761,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
/* The new target may be within range */
if (test_24bit_addr(ip, addr)) {
/* within range */
- if (patch_branch((struct ppc_inst *)ip, addr, BRANCH_SET_LINK)) {
+ if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
@@ -790,12 +789,12 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
/* Ensure branch is within 24 bits */
- if (create_branch(&op, (struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) {
+ if (create_branch(&op, (u32 *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("Branch out of range\n");
return -EINVAL;
}
- if (patch_branch((struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) {
+ if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
@@ -851,7 +850,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
struct ppc_inst old, new;
int ret;
- old = ppc_inst_read((struct ppc_inst *)&ftrace_call);
+ old = ppc_inst_read((u32 *)&ftrace_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
@@ -859,7 +858,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
/* Also update the regs callback function */
if (!ret) {
ip = (unsigned long)(&ftrace_regs_call);
- old = ppc_inst_read((struct ppc_inst *)&ftrace_regs_call);
+ old = ppc_inst_read((u32 *)&ftrace_regs_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index b4ab95c9e94a..d56254f05e17 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -67,6 +67,7 @@
#include <asm/kprobes.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
+#include <asm/disassemble.h>
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -427,7 +428,7 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
return;
nonrecoverable:
- regs->msr &= ~MSR_RI;
+ regs_set_return_msr(regs, regs->msr & ~MSR_RI);
#endif
}
DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
@@ -537,11 +538,11 @@ static inline int check_io_access(struct pt_regs *regs)
* For the debug message, we look at the preceding
* load or store.
*/
- if (*nip == PPC_INST_NOP)
+ if (*nip == PPC_RAW_NOP())
nip -= 2;
- else if (*nip == PPC_INST_ISYNC)
+ else if (*nip == PPC_RAW_ISYNC())
--nip;
- if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
+ if (*nip == PPC_RAW_SYNC() || get_op(*nip) == OP_TRAP) {
unsigned int rb;
--nip;
@@ -549,8 +550,8 @@ static inline int check_io_access(struct pt_regs *regs)
printk(KERN_DEBUG "%s bad port %lx at %p\n",
(*nip & 0x100)? "OUT to": "IN from",
regs->gpr[rb] - _IO_BASE, nip);
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
}
@@ -586,8 +587,8 @@ static inline int check_io_access(struct pt_regs *regs)
#define REASON_BOUNDARY SRR1_BOUNDARY
#define single_stepping(regs) ((regs)->msr & MSR_SE)
-#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
-#define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE)
+#define clear_single_step(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_SE))
+#define clear_br_trace(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_BE))
#endif
#define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4)
@@ -1031,7 +1032,7 @@ static void p9_hmi_special_emu(struct pt_regs *regs)
#endif /* !__LITTLE_ENDIAN__ */
/* Go to next instruction */
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
}
#endif /* CONFIG_VSX */
@@ -1103,7 +1104,7 @@ DEFINE_INTERRUPT_HANDLER(RunModeException)
_exception(SIGTRAP, regs, TRAP_UNK, 0);
}
-DEFINE_INTERRUPT_HANDLER(single_step_exception)
+static void __single_step_exception(struct pt_regs *regs)
{
clear_single_step(regs);
clear_br_trace(regs);
@@ -1120,6 +1121,11 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
}
+DEFINE_INTERRUPT_HANDLER(single_step_exception)
+{
+ __single_step_exception(regs);
+}
+
/*
* After we have successfully emulated an instruction, we have to
* check if the instruction was being single-stepped, and if so,
@@ -1129,7 +1135,7 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
static void emulate_single_step(struct pt_regs *regs)
{
if (single_stepping(regs))
- single_step_exception(regs);
+ __single_step_exception(regs);
}
static inline int __parse_fpscr(unsigned long fpscr)
@@ -1476,7 +1482,7 @@ static void do_program_check(struct pt_regs *regs)
if (!(regs->msr & MSR_PR) && /* not user-mode */
report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
return;
}
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
@@ -1538,7 +1544,7 @@ static void do_program_check(struct pt_regs *regs)
if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
switch (emulate_instruction(regs)) {
case 0:
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
emulate_single_step(regs);
return;
case -EFAULT:
@@ -1566,7 +1572,7 @@ DEFINE_INTERRUPT_HANDLER(program_check_exception)
*/
DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt)
{
- regs->msr |= REASON_ILLEGAL;
+ regs_set_return_msr(regs, regs->msr | REASON_ILLEGAL);
do_program_check(regs);
}
@@ -1593,7 +1599,7 @@ DEFINE_INTERRUPT_HANDLER(alignment_exception)
if (fixed == 1) {
/* skip over emulated instruction */
- regs->nip += inst_length(reason);
+ regs_add_return_ip(regs, inst_length(reason));
emulate_single_step(regs);
return;
}
@@ -1659,7 +1665,7 @@ static void tm_unavailable(struct pt_regs *regs)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (user_mode(regs)) {
current->thread.load_tm++;
- regs->msr |= MSR_TM;
+ regs_set_return_msr(regs, regs->msr | MSR_TM);
tm_enable();
tm_restore_sprs(&current->thread);
return;
@@ -1751,7 +1757,7 @@ DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception)
pr_err("DSCR based mfspr emulation failed\n");
return;
}
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
emulate_single_step(regs);
}
return;
@@ -1948,7 +1954,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
*/
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
current->thread.debug.dbcr1))
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
else
/* Make sure the IDM flag is off */
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
@@ -1969,7 +1975,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException)
* instead of stopping here when hitting a BT
*/
if (debug_status & DBSR_BT) {
- regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
/* Disable BT */
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
@@ -1980,7 +1986,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException)
if (user_mode(regs)) {
current->thread.debug.dbcr0 &= ~DBCR0_BT;
current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
return;
}
@@ -1994,7 +2000,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException)
if (debugger_sstep(regs))
return;
} else if (debug_status & DBSR_IC) { /* Instruction complete */
- regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
/* Disable instruction completion */
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
@@ -2016,7 +2022,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException)
current->thread.debug.dbcr0 &= ~DBCR0_IC;
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
current->thread.debug.dbcr1))
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
else
/* Make sure the IDM bit is off */
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
@@ -2044,7 +2050,7 @@ DEFINE_INTERRUPT_HANDLER(altivec_assist_exception)
PPC_WARN_EMULATED(altivec, regs);
err = emulate_altivec(regs);
if (err == 0) {
- regs->nip += 4; /* skip emulated instruction */
+ regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
@@ -2109,7 +2115,7 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException)
err = do_spe_mathemu(regs);
if (err == 0) {
- regs->nip += 4; /* skip emulated instruction */
+ regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
@@ -2140,10 +2146,10 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
giveup_spe(current);
preempt_enable();
- regs->nip -= 4;
+ regs_add_return_ip(regs, -4);
err = speround_handler(regs);
if (err == 0) {
- regs->nip += 4; /* skip emulated instruction */
+ regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 9356b60d6030..8513aa49614e 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -296,3 +296,42 @@ void __init udbg_init_40x_realmode(void)
}
#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_MICROWATT
+
+#define UDBG_UART_MW_ADDR ((void __iomem *)0xc0002000)
+
+static u8 udbg_uart_in_isa300_rm(unsigned int reg)
+{
+ uint64_t msr = mfmsr();
+ uint8_t c;
+
+ mtmsr(msr & ~(MSR_EE|MSR_DR));
+ isync();
+ eieio();
+ c = __raw_rm_readb(UDBG_UART_MW_ADDR + (reg << 2));
+ mtmsr(msr);
+ isync();
+ return c;
+}
+
+static void udbg_uart_out_isa300_rm(unsigned int reg, u8 val)
+{
+ uint64_t msr = mfmsr();
+
+ mtmsr(msr & ~(MSR_EE|MSR_DR));
+ isync();
+ eieio();
+ __raw_rm_writeb(val, UDBG_UART_MW_ADDR + (reg << 2));
+ mtmsr(msr);
+ isync();
+}
+
+void __init udbg_init_debug_microwatt(void)
+{
+ udbg_uart_in = udbg_uart_in_isa300_rm;
+ udbg_uart_out = udbg_uart_out_isa300_rm;
+ udbg_use_uart();
+}
+
+#endif /* CONFIG_PPC_EARLY_DEBUG_MICROWATT */
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index 186f69b11e94..c6975467d9ff 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -42,7 +42,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
return -EINVAL;
if (cpu_has_feature(CPU_FTR_ARCH_31) &&
- ppc_inst_prefixed(auprobe->insn) &&
+ ppc_inst_prefixed(ppc_inst_read(auprobe->insn)) &&
(addr & 0x3f) == 60) {
pr_info_ratelimited("Cannot register a uprobe on 64 byte unaligned prefixed instruction\n");
return -EINVAL;
@@ -62,7 +62,7 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
autask->saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
- regs->nip = current->utask->xol_vaddr;
+ regs_set_return_ip(regs, current->utask->xol_vaddr);
user_enable_single_step(current);
return 0;
@@ -119,7 +119,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
* support doesn't exist and have to fix-up the next instruction
* to be executed.
*/
- regs->nip = (unsigned long)ppc_inst_next((void *)utask->vaddr, &auprobe->insn);
+ regs_set_return_ip(regs, (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn));
user_disable_single_step(current);
return 0;
@@ -182,7 +182,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
* emulate_step() returns 1 if the insn was successfully emulated.
* For all other cases, we need to single-step in hardware.
*/
- ret = emulate_step(regs, ppc_inst_read(&auprobe->insn));
+ ret = emulate_step(regs, ppc_inst_read(auprobe->insn));
if (ret > 0)
return true;
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index 2813e3f98db6..3c5baaa6f1e7 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -27,6 +27,13 @@ KASAN_SANITIZE := n
ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
-Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
+
+# Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true
+# by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is
+# compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code
+# generation is minimal, it will just use r29 instead.
+ccflags-y += $(call cc-option, -ffixed-r30)
+
asflags-y := -D__VDSO64__ -s
targets += vdso64.lds
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index f5a52f444e36..fc120fac1910 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -73,6 +73,10 @@ _GLOBAL(load_up_altivec)
addi r5,r4,THREAD /* Get THREAD */
oris r12,r12,MSR_VEC@h
std r12,_MSR(r1)
+#ifdef CONFIG_PPC_BOOK3S_64
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+#endif
#endif
li r4,1
stb r4,THREAD_LOAD_VEC(r5)
@@ -131,7 +135,9 @@ _GLOBAL(load_up_vsx)
/* enable use of VSX after return */
oris r12,r12,MSR_VSX@h
std r12,_MSR(r1)
- b fast_interrupt_return
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+ b fast_interrupt_return_srr
#endif /* CONFIG_VSX */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 72fa3c00229a..40bdefe9caa7 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -9,6 +9,22 @@
#define EMITS_PT_NOTE
#define RO_EXCEPTION_TABLE_ALIGN 0
+#define SOFT_MASK_TABLE(align) \
+ . = ALIGN(align); \
+ __soft_mask_table : AT(ADDR(__soft_mask_table) - LOAD_OFFSET) { \
+ __start___soft_mask_table = .; \
+ KEEP(*(__soft_mask_table)) \
+ __stop___soft_mask_table = .; \
+ }
+
+#define RESTART_TABLE(align) \
+ . = ALIGN(align); \
+ __restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) { \
+ __start___restart_table = .; \
+ KEEP(*(__restart_table)) \
+ __stop___restart_table = .; \
+ }
+
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
@@ -124,6 +140,9 @@ SECTIONS
RO_DATA(PAGE_SIZE)
#ifdef CONFIG_PPC64
+ SOFT_MASK_TABLE(8)
+ RESTART_TABLE(8)
+
. = ALIGN(8);
__stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
__start___stf_entry_barrier_fixup = .;
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index c9a8f4781a10..a165635fd214 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -24,6 +24,7 @@
#include <linux/kdebug.h>
#include <linux/sched/debug.h>
#include <linux/delay.h>
+#include <linux/processor.h>
#include <linux/smp.h>
#include <asm/interrupt.h>
diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
index 0196d0c211ac..10f997e6bb95 100644
--- a/arch/powerpc/kexec/crash.c
+++ b/arch/powerpc/kexec/crash.c
@@ -105,8 +105,8 @@ void crash_ipi_callback(struct pt_regs *regs)
static void crash_kexec_prepare_cpus(int cpu)
{
unsigned int msecs;
- unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
- int tries = 0;
+ volatile unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+ volatile int tries = 0;
int (*old_handler)(struct pt_regs *regs);
printk(KERN_EMERG "Sending IPI to other CPUs\n");
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index e8e7b2c530d1..4b3a8d80cfa3 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -353,9 +353,6 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
preempt_enable();
}
-/* From mm/mmu_context_hash32.c */
-#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
-
int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 260e860d53a2..085fb8ecbf68 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2697,8 +2697,10 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
vcpu->arch.hfscr |= HFSCR_TM;
+#endif
}
if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;
@@ -4626,6 +4628,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&kvm->arch.vcpus_running);
+
+ srr_regs_clobbered();
+
return r;
}
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 8543ad538b0c..898f942eb198 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -302,6 +302,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.l1_ptcr == 0)
return H_NOT_AVAILABLE;
+ if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
+ return H_BAD_MODE;
+
/* copy parameters in */
hv_ptr = kvmppc_get_gpr(vcpu, 4);
regs_ptr = kvmppc_get_gpr(vcpu, 5);
@@ -322,6 +325,23 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (l2_hv.vcpu_token >= NR_CPUS)
return H_PARAMETER;
+ /*
+ * L1 must have set up a suspended state to enter the L2 in a
+ * transactional state, and only in that case. These have to be
+ * filtered out here to prevent causing a TM Bad Thing in the
+ * host HRFID. We could synthesize a TM Bad Thing back to the L1
+ * here but there doesn't seem like much point.
+ */
+ if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
+ if (!MSR_TM_ACTIVE(l2_regs.msr))
+ return H_BAD_MODE;
+ } else {
+ if (l2_regs.msr & MSR_TS_MASK)
+ return H_BAD_MODE;
+ if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
+ return H_BAD_MODE;
+ }
+
/* translate lpid */
l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
if (!l2)
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index 83f592eadcd2..961b3d70483c 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -317,6 +317,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
*/
mtspr(SPRN_HDEC, hdec);
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+tm_return_to_guest:
+#endif
mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
@@ -415,11 +418,23 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
* is in real suspend mode and is trying to transition to
* transactional mode.
*/
- if (local_paca->kvm_hstate.fake_suspend &&
+ if (!local_paca->kvm_hstate.fake_suspend &&
(vcpu->arch.shregs.msr & MSR_TS_S)) {
if (kvmhv_p9_tm_emulation_early(vcpu)) {
- /* Prevent it being handled again. */
- trap = 0;
+ /*
+ * Go straight back into the guest with the
+ * new NIP/MSR as set by TM emulation.
+ */
+ mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
+ mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr);
+
+ /*
+ * tm_return_to_guest re-loads SRR0/1, DAR,
+ * DSISR after RI is cleared, in case they had
+ * been clobbered by a MCE.
+ */
+ __mtmsrd(0, 1); /* clear RI */
+ goto tm_return_to_guest;
}
}
#endif
@@ -499,6 +514,10 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
* If we are in real mode, only switch MMU on after the MMU is
* switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
*/
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ vcpu->arch.shregs.msr & MSR_TS_MASK)
+ msr |= MSR_TS_S;
+
__mtmsrd(msr, 0);
end_timing(vcpu);
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 71bcb0140461..6bc9425acb32 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -25,6 +25,7 @@
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -1848,6 +1849,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
/* Make sure we save the guest TAR/EBB/DSCR state */
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
+ srr_regs_clobbered();
out:
vcpu->mode = OUTSIDE_GUEST_MODE;
return ret;
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index c5e677508d3b..0f847f1e5ddd 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -242,6 +242,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
* value so we can restore it on the way out.
*/
orig_rets = args.rets;
+ if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) {
+ /*
+ * Don't overflow our args array: ensure there is room for
+ * at least rets[0] (even if the call specifies 0 nret).
+ *
+ * Each handler must then check for the correct nargs and nret
+ * values, but they may always return failure in rets[0].
+ */
+ rc = -EINVAL;
+ goto fail;
+ }
args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
@@ -269,9 +280,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
fail:
/*
* We only get here if the guest has called RTAS with a bogus
- * args pointer. That means we can't get to the args, and so we
- * can't fail the RTAS call. So fail right out to userspace,
- * which should kill the guest.
+ * args pointer or nargs/nret values that would overflow the
+ * array. That means we can't get to the args, and so we can't
+ * fail the RTAS call. So fail right out to userspace, which
+ * should kill the guest.
+ *
+ * SLOF should actually pass the hcall return value from the
+ * rtas handler call in r3, so enter_rtas could be modified to
+ * return a failure indication in r3 and we could return such
+ * errors to the guest rather than failing to host userspace.
+ * However old guests that don't test for failure could then
+ * continue silently after errors, so for now we won't do this.
*/
return rc;
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index be33b5321a76..b4e6f70b97b9 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -2048,9 +2048,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
{
struct kvm_enable_cap cap;
r = -EFAULT;
- vcpu_load(vcpu);
if (copy_from_user(&cap, argp, sizeof(cap)))
goto out;
+ vcpu_load(vcpu);
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
vcpu_put(vcpu);
break;
@@ -2074,9 +2074,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_DIRTY_TLB: {
struct kvm_dirty_tlb dirty;
r = -EFAULT;
- vcpu_load(vcpu);
if (copy_from_user(&dirty, argp, sizeof(dirty)))
goto out;
+ vcpu_load(vcpu);
r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
vcpu_put(vcpu);
break;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index cc1a8a0f311e..99a7c9132422 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -39,7 +39,7 @@ extra-$(CONFIG_PPC64) += crtsavres.o
endif
obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
- memcpy_power7.o
+ memcpy_power7.o restart_table.o
obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
memcpy_64.o copy_mc_64.o
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 870b30d9be2f..f9a3019e37b4 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -18,8 +18,7 @@
#include <asm/setup.h>
#include <asm/inst.h>
-static int __patch_instruction(struct ppc_inst *exec_addr, struct ppc_inst instr,
- struct ppc_inst *patch_addr)
+static int __patch_instruction(u32 *exec_addr, struct ppc_inst instr, u32 *patch_addr)
{
if (!ppc_inst_prefixed(instr)) {
u32 val = ppc_inst_val(instr);
@@ -40,7 +39,7 @@ failed:
return -EFAULT;
}
-int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
+int raw_patch_instruction(u32 *addr, struct ppc_inst instr)
{
return __patch_instruction(addr, instr, addr);
}
@@ -70,22 +69,16 @@ static int text_area_cpu_down(unsigned int cpu)
}
/*
- * Run as a late init call. This allows all the boot time patching to be done
- * simply by patching the code, and then we're called here prior to
- * mark_rodata_ro(), which happens after all init calls are run. Although
- * BUG_ON() is rude, in this case it should only happen if ENOMEM, and we judge
- * it as being preferable to a kernel that will crash later when someone tries
- * to use patch_instruction().
+ * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
+ * we judge it as being preferable to a kernel that will crash later when
+ * someone tries to use patch_instruction().
*/
-static int __init setup_text_poke_area(void)
+void __init poking_init(void)
{
BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"powerpc/text_poke:online", text_area_cpu_up,
text_area_cpu_down));
-
- return 0;
}
-late_initcall(setup_text_poke_area);
/*
* This can be called for kernel text or a module.
@@ -148,10 +141,10 @@ static inline int unmap_patch_area(unsigned long addr)
return 0;
}
-static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
+static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
{
int err;
- struct ppc_inst *patch_addr = NULL;
+ u32 *patch_addr = NULL;
unsigned long flags;
unsigned long text_poke_addr;
unsigned long kaddr = (unsigned long)addr;
@@ -172,7 +165,7 @@ static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
goto out;
}
- patch_addr = (struct ppc_inst *)(text_poke_addr + (kaddr & ~PAGE_MASK));
+ patch_addr = (u32 *)(text_poke_addr + (kaddr & ~PAGE_MASK));
__patch_instruction(addr, instr, patch_addr);
@@ -187,14 +180,14 @@ out:
}
#else /* !CONFIG_STRICT_KERNEL_RWX */
-static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
+static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
{
return raw_patch_instruction(addr, instr);
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
-int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
+int patch_instruction(u32 *addr, struct ppc_inst instr)
{
/* Make sure we aren't patching a freed init section */
if (init_mem_is_free && init_section_contains(addr, 4)) {
@@ -205,7 +198,7 @@ int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
}
NOKPROBE_SYMBOL(patch_instruction);
-int patch_branch(struct ppc_inst *addr, unsigned long target, int flags)
+int patch_branch(u32 *addr, unsigned long target, int flags)
{
struct ppc_inst instr;
@@ -257,8 +250,7 @@ bool is_conditional_branch(struct ppc_inst instr)
}
NOKPROBE_SYMBOL(is_conditional_branch);
-int create_branch(struct ppc_inst *instr,
- const struct ppc_inst *addr,
+int create_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags)
{
long offset;
@@ -278,7 +270,7 @@ int create_branch(struct ppc_inst *instr,
return 0;
}
-int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags)
{
long offset;
@@ -325,39 +317,39 @@ int instr_is_relative_link_branch(struct ppc_inst instr)
return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK);
}
-static unsigned long branch_iform_target(const struct ppc_inst *instr)
+static unsigned long branch_iform_target(const u32 *instr)
{
signed long imm;
- imm = ppc_inst_val(*instr) & 0x3FFFFFC;
+ imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC;
/* If the top bit of the immediate value is set this is negative */
if (imm & 0x2000000)
imm -= 0x4000000;
- if ((ppc_inst_val(*instr) & BRANCH_ABSOLUTE) == 0)
+ if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
imm += (unsigned long)instr;
return (unsigned long)imm;
}
-static unsigned long branch_bform_target(const struct ppc_inst *instr)
+static unsigned long branch_bform_target(const u32 *instr)
{
signed long imm;
- imm = ppc_inst_val(*instr) & 0xFFFC;
+ imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC;
/* If the top bit of the immediate value is set this is negative */
if (imm & 0x8000)
imm -= 0x10000;
- if ((ppc_inst_val(*instr) & BRANCH_ABSOLUTE) == 0)
+ if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
imm += (unsigned long)instr;
return (unsigned long)imm;
}
-unsigned long branch_target(const struct ppc_inst *instr)
+unsigned long branch_target(const u32 *instr)
{
if (instr_is_branch_iform(ppc_inst_read(instr)))
return branch_iform_target(instr);
@@ -367,17 +359,7 @@ unsigned long branch_target(const struct ppc_inst *instr)
return 0;
}
-int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr)
-{
- if (instr_is_branch_iform(ppc_inst_read(instr)) ||
- instr_is_branch_bform(ppc_inst_read(instr)))
- return branch_target(instr) == addr;
-
- return 0;
-}
-
-int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest,
- const struct ppc_inst *src)
+int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src)
{
unsigned long target;
target = branch_target(src);
@@ -404,12 +386,21 @@ void __patch_exception(int exc, unsigned long addr)
* instruction of the exception, not the first one
*/
- patch_branch((struct ppc_inst *)(ibase + (exc / 4) + 1), addr, 0);
+ patch_branch(ibase + (exc / 4) + 1, addr, 0);
}
#endif
#ifdef CONFIG_CODE_PATCHING_SELFTEST
+static int instr_is_branch_to_addr(const u32 *instr, unsigned long addr)
+{
+ if (instr_is_branch_iform(ppc_inst_read(instr)) ||
+ instr_is_branch_bform(ppc_inst_read(instr)))
+ return branch_target(instr) == addr;
+
+ return 0;
+}
+
static void __init test_trampoline(void)
{
asm ("nop;\n");
@@ -422,9 +413,9 @@ static void __init test_branch_iform(void)
{
int err;
struct ppc_inst instr;
- unsigned long addr;
-
- addr = (unsigned long)&instr;
+ u32 tmp[2];
+ u32 *iptr = tmp;
+ unsigned long addr = (unsigned long)tmp;
/* The simplest case, branch to self, no flags */
check(instr_is_branch_iform(ppc_inst(0x48000000)));
@@ -445,63 +436,68 @@ static void __init test_branch_iform(void)
check(!instr_is_branch_iform(ppc_inst(0x7bfffffd)));
/* Absolute branch to 0x100 */
- instr = ppc_inst(0x48000103);
- check(instr_is_branch_to_addr(&instr, 0x100));
+ patch_instruction(iptr, ppc_inst(0x48000103));
+ check(instr_is_branch_to_addr(iptr, 0x100));
/* Absolute branch to 0x420fc */
- instr = ppc_inst(0x480420ff);
- check(instr_is_branch_to_addr(&instr, 0x420fc));
+ patch_instruction(iptr, ppc_inst(0x480420ff));
+ check(instr_is_branch_to_addr(iptr, 0x420fc));
/* Maximum positive relative branch, + 20MB - 4B */
- instr = ppc_inst(0x49fffffc);
- check(instr_is_branch_to_addr(&instr, addr + 0x1FFFFFC));
+ patch_instruction(iptr, ppc_inst(0x49fffffc));
+ check(instr_is_branch_to_addr(iptr, addr + 0x1FFFFFC));
/* Smallest negative relative branch, - 4B */
- instr = ppc_inst(0x4bfffffc);
- check(instr_is_branch_to_addr(&instr, addr - 4));
+ patch_instruction(iptr, ppc_inst(0x4bfffffc));
+ check(instr_is_branch_to_addr(iptr, addr - 4));
/* Largest negative relative branch, - 32 MB */
- instr = ppc_inst(0x4a000000);
- check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
+ patch_instruction(iptr, ppc_inst(0x4a000000));
+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
/* Branch to self, with link */
- err = create_branch(&instr, &instr, addr, BRANCH_SET_LINK);
- check(instr_is_branch_to_addr(&instr, addr));
+ err = create_branch(&instr, iptr, addr, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
/* Branch to self - 0x100, with link */
- err = create_branch(&instr, &instr, addr - 0x100, BRANCH_SET_LINK);
- check(instr_is_branch_to_addr(&instr, addr - 0x100));
+ err = create_branch(&instr, iptr, addr - 0x100, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
/* Branch to self + 0x100, no link */
- err = create_branch(&instr, &instr, addr + 0x100, 0);
- check(instr_is_branch_to_addr(&instr, addr + 0x100));
+ err = create_branch(&instr, iptr, addr + 0x100, 0);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
/* Maximum relative negative offset, - 32 MB */
- err = create_branch(&instr, &instr, addr - 0x2000000, BRANCH_SET_LINK);
- check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
+ err = create_branch(&instr, iptr, addr - 0x2000000, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
/* Out of range relative negative offset, - 32 MB + 4*/
- err = create_branch(&instr, &instr, addr - 0x2000004, BRANCH_SET_LINK);
+ err = create_branch(&instr, iptr, addr - 0x2000004, BRANCH_SET_LINK);
check(err);
/* Out of range relative positive offset, + 32 MB */
- err = create_branch(&instr, &instr, addr + 0x2000000, BRANCH_SET_LINK);
+ err = create_branch(&instr, iptr, addr + 0x2000000, BRANCH_SET_LINK);
check(err);
/* Unaligned target */
- err = create_branch(&instr, &instr, addr + 3, BRANCH_SET_LINK);
+ err = create_branch(&instr, iptr, addr + 3, BRANCH_SET_LINK);
check(err);
/* Check flags are masked correctly */
- err = create_branch(&instr, &instr, addr, 0xFFFFFFFC);
- check(instr_is_branch_to_addr(&instr, addr));
+ err = create_branch(&instr, iptr, addr, 0xFFFFFFFC);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
check(ppc_inst_equal(instr, ppc_inst(0x48000000)));
}
static void __init test_create_function_call(void)
{
- struct ppc_inst *iptr;
+ u32 *iptr;
unsigned long dest;
struct ppc_inst instr;
/* Check we can create a function call */
- iptr = (struct ppc_inst *)ppc_function_entry(test_trampoline);
+ iptr = (u32 *)ppc_function_entry(test_trampoline);
dest = ppc_function_entry(test_create_function_call);
create_branch(&instr, iptr, dest, BRANCH_SET_LINK);
patch_instruction(iptr, instr);
@@ -512,10 +508,11 @@ static void __init test_branch_bform(void)
{
int err;
unsigned long addr;
- struct ppc_inst *iptr, instr;
+ struct ppc_inst instr;
+ u32 tmp[2];
+ u32 *iptr = tmp;
unsigned int flags;
- iptr = &instr;
addr = (unsigned long)iptr;
/* The simplest case, branch to self, no flags */
@@ -528,39 +525,43 @@ static void __init test_branch_bform(void)
check(!instr_is_branch_bform(ppc_inst(0x7bffffff)));
/* Absolute conditional branch to 0x100 */
- instr = ppc_inst(0x43ff0103);
- check(instr_is_branch_to_addr(&instr, 0x100));
+ patch_instruction(iptr, ppc_inst(0x43ff0103));
+ check(instr_is_branch_to_addr(iptr, 0x100));
/* Absolute conditional branch to 0x20fc */
- instr = ppc_inst(0x43ff20ff);
- check(instr_is_branch_to_addr(&instr, 0x20fc));
+ patch_instruction(iptr, ppc_inst(0x43ff20ff));
+ check(instr_is_branch_to_addr(iptr, 0x20fc));
/* Maximum positive relative conditional branch, + 32 KB - 4B */
- instr = ppc_inst(0x43ff7ffc);
- check(instr_is_branch_to_addr(&instr, addr + 0x7FFC));
+ patch_instruction(iptr, ppc_inst(0x43ff7ffc));
+ check(instr_is_branch_to_addr(iptr, addr + 0x7FFC));
/* Smallest negative relative conditional branch, - 4B */
- instr = ppc_inst(0x43fffffc);
- check(instr_is_branch_to_addr(&instr, addr - 4));
+ patch_instruction(iptr, ppc_inst(0x43fffffc));
+ check(instr_is_branch_to_addr(iptr, addr - 4));
/* Largest negative relative conditional branch, - 32 KB */
- instr = ppc_inst(0x43ff8000);
- check(instr_is_branch_to_addr(&instr, addr - 0x8000));
+ patch_instruction(iptr, ppc_inst(0x43ff8000));
+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
/* All condition code bits set & link */
flags = 0x3ff000 | BRANCH_SET_LINK;
/* Branch to self */
err = create_cond_branch(&instr, iptr, addr, flags);
- check(instr_is_branch_to_addr(&instr, addr));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
/* Branch to self - 0x100 */
err = create_cond_branch(&instr, iptr, addr - 0x100, flags);
- check(instr_is_branch_to_addr(&instr, addr - 0x100));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
/* Branch to self + 0x100 */
err = create_cond_branch(&instr, iptr, addr + 0x100, flags);
- check(instr_is_branch_to_addr(&instr, addr + 0x100));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
/* Maximum relative negative offset, - 32 KB */
err = create_cond_branch(&instr, iptr, addr - 0x8000, flags);
- check(instr_is_branch_to_addr(&instr, addr - 0x8000));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
/* Out of range relative negative offset, - 32 KB + 4*/
err = create_cond_branch(&instr, iptr, addr - 0x8004, flags);
@@ -576,7 +577,8 @@ static void __init test_branch_bform(void)
/* Check flags are masked correctly */
err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC);
- check(instr_is_branch_to_addr(&instr, addr));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
check(ppc_inst_equal(instr, ppc_inst(0x43FF0000)));
}
@@ -715,9 +717,9 @@ static void __init test_prefixed_patching(void)
extern unsigned int code_patching_test1_expected[];
extern unsigned int end_code_patching_test1[];
- __patch_instruction((struct ppc_inst *)code_patching_test1,
+ __patch_instruction(code_patching_test1,
ppc_inst_prefix(OP_PREFIX << 26, 0x00000000),
- (struct ppc_inst *)code_patching_test1);
+ code_patching_test1);
check(!memcmp(code_patching_test1,
code_patching_test1_expected,
diff --git a/arch/powerpc/lib/error-inject.c b/arch/powerpc/lib/error-inject.c
index 407b992fb02f..e834079d2b5c 100644
--- a/arch/powerpc/lib/error-inject.c
+++ b/arch/powerpc/lib/error-inject.c
@@ -11,6 +11,6 @@ void override_function_with_return(struct pt_regs *regs)
* function in the kernel/module, captured on a kprobe. We don't need
* to worry about 32-bit userspace on a 64-bit kernel.
*/
- regs->nip = regs->link;
+ regs_set_return_ip(regs, regs->link);
}
NOKPROBE_SYMBOL(override_function_with_return);
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index fe26f2fa0f3f..cda17bee5afe 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -17,6 +17,7 @@
#include <linux/stop_machine.h>
#include <asm/cputable.h>
#include <asm/code-patching.h>
+#include <asm/interrupt.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -33,26 +34,25 @@ struct fixup_entry {
long alt_end_off;
};
-static struct ppc_inst *calc_addr(struct fixup_entry *fcur, long offset)
+static u32 *calc_addr(struct fixup_entry *fcur, long offset)
{
/*
* We store the offset to the code as a negative offset from
* the start of the alt_entry, to support the VDSO. This
* routine converts that back into an actual address.
*/
- return (struct ppc_inst *)((unsigned long)fcur + offset);
+ return (u32 *)((unsigned long)fcur + offset);
}
-static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest,
- struct ppc_inst *alt_start, struct ppc_inst *alt_end)
+static int patch_alt_instruction(u32 *src, u32 *dest, u32 *alt_start, u32 *alt_end)
{
int err;
struct ppc_inst instr;
instr = ppc_inst_read(src);
- if (instr_is_relative_branch(*src)) {
- struct ppc_inst *target = (struct ppc_inst *)branch_target(src);
+ if (instr_is_relative_branch(ppc_inst_read(src))) {
+ u32 *target = (u32 *)branch_target(src);
/* Branch within the section doesn't need translating */
if (target < alt_start || target > alt_end) {
@@ -69,7 +69,7 @@ static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest,
static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
{
- struct ppc_inst *start, *end, *alt_start, *alt_end, *src, *dest, nop;
+ u32 *start, *end, *alt_start, *alt_end, *src, *dest;
start = calc_addr(fcur, fcur->start_off);
end = calc_addr(fcur, fcur->end_off);
@@ -91,9 +91,8 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
return 1;
}
- nop = ppc_inst(PPC_INST_NOP);
- for (; dest < end; dest = ppc_inst_next(dest, &nop))
- raw_patch_instruction(dest, nop);
+ for (; dest < end; dest++)
+ raw_patch_instruction(dest, ppc_inst(PPC_RAW_NOP()));
return 0;
}
@@ -128,21 +127,21 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
start = PTRRELOC(&__start___stf_entry_barrier_fixup);
end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
i = 0;
if (types & STF_BARRIER_FALLBACK) {
- instrs[i++] = 0x7d4802a6; /* mflr r10 */
- instrs[i++] = 0x60000000; /* branch patched below */
- instrs[i++] = 0x7d4803a6; /* mtlr r10 */
+ instrs[i++] = PPC_RAW_MFLR(_R10);
+ instrs[i++] = PPC_RAW_NOP(); /* branch patched below */
+ instrs[i++] = PPC_RAW_MTLR(_R10);
} else if (types & STF_BARRIER_EIEIO) {
- instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
+ instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */
} else if (types & STF_BARRIER_SYNC_ORI) {
- instrs[i++] = 0x7c0004ac; /* hwsync */
- instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instrs[i++] = PPC_RAW_SYNC();
+ instrs[i++] = PPC_RAW_LD(_R10, _R13, 0);
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
}
for (i = 0; start < end; start++, i++) {
@@ -152,14 +151,14 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
// See comment in do_entry_flush_fixups() RE order of patching
if (types & STF_BARRIER_FALLBACK) {
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
- patch_branch((struct ppc_inst *)(dest + 1),
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1,
(unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK);
} else {
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
}
}
@@ -180,32 +179,31 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
start = PTRRELOC(&__start___stf_exit_barrier_fixup);
end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
- instrs[3] = 0x60000000; /* nop */
- instrs[4] = 0x60000000; /* nop */
- instrs[5] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
+ instrs[3] = PPC_RAW_NOP();
+ instrs[4] = PPC_RAW_NOP();
+ instrs[5] = PPC_RAW_NOP();
i = 0;
if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
if (cpu_has_feature(CPU_FTR_HVMODE)) {
- instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
- instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_HSPRG1, _R13);
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG0);
} else {
- instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
- instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_SPRG2, _R13);
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG1);
}
- instrs[i++] = 0x7c0004ac; /* hwsync */
- instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
- } else {
- instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
- }
+ instrs[i++] = PPC_RAW_SYNC();
+ instrs[i++] = PPC_RAW_LD(_R13, _R13, 0);
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG1);
+ else
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG2);
} else if (types & STF_BARRIER_EIEIO) {
- instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
+ instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */
}
for (i = 0; start < end; start++, i++) {
@@ -213,12 +211,12 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
- patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
- patch_instruction((struct ppc_inst *)(dest + 4), ppc_inst(instrs[4]));
- patch_instruction((struct ppc_inst *)(dest + 5), ppc_inst(instrs[5]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest + 3, ppc_inst(instrs[3]));
+ patch_instruction(dest + 4, ppc_inst(instrs[4]));
+ patch_instruction(dest + 5, ppc_inst(instrs[5]));
}
printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
(types == STF_BARRIER_NONE) ? "no" :
@@ -228,6 +226,9 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
: "unknown");
}
+static bool stf_exit_reentrant = false;
+static bool rfi_exit_reentrant = false;
+
static int __do_stf_barrier_fixups(void *data)
{
enum stf_barrier_type *types = data;
@@ -242,11 +243,27 @@ void do_stf_barrier_fixups(enum stf_barrier_type types)
{
/*
* The call to the fallback entry flush, and the fallback/sync-ori exit
- * flush can not be safely patched in/out while other CPUs are executing
- * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
- * spin in the stop machine core with interrupts hard disabled.
+ * flush can not be safely patched in/out while other CPUs are
+ * executing them. So call __do_stf_barrier_fixups() on one CPU while
+ * all other CPUs spin in the stop machine core with interrupts hard
+ * disabled.
+ *
+ * The branch to mark interrupt exits non-reentrant is enabled first,
+ * then stop_machine runs which will ensure all CPUs are out of the
+ * low level interrupt exit code before patching. After the patching,
+ * if allowed, then flip the branch to allow fast exits.
*/
+ static_branch_enable(&interrupt_exit_not_reentrant);
+
stop_machine(__do_stf_barrier_fixups, &types, NULL);
+
+ if ((types & STF_BARRIER_FALLBACK) || (types & STF_BARRIER_SYNC_ORI))
+ stf_exit_reentrant = false;
+ else
+ stf_exit_reentrant = true;
+
+ if (stf_exit_reentrant && rfi_exit_reentrant)
+ static_branch_disable(&interrupt_exit_not_reentrant);
}
void do_uaccess_flush_fixups(enum l1d_flush_type types)
@@ -258,35 +275,35 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
start = PTRRELOC(&__start___uaccess_flush_fixup);
end = PTRRELOC(&__stop___uaccess_flush_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
- instrs[3] = 0x4e800020; /* blr */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
+ instrs[3] = PPC_RAW_BLR();
i = 0;
if (types == L1D_FLUSH_FALLBACK) {
- instrs[3] = 0x60000000; /* nop */
+ instrs[3] = PPC_RAW_NOP();
/* fallthrough to fallback flush */
}
if (types & L1D_FLUSH_ORI) {
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
}
if (types & L1D_FLUSH_MTTRIG)
- instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
- patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest + 3, ppc_inst(instrs[3]));
}
printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
@@ -306,24 +323,24 @@ static int __do_entry_flush_fixups(void *data)
long *start, *end;
int i;
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
i = 0;
if (types == L1D_FLUSH_FALLBACK) {
- instrs[i++] = 0x7d4802a6; /* mflr r10 */
- instrs[i++] = 0x60000000; /* branch patched below */
- instrs[i++] = 0x7d4803a6; /* mtlr r10 */
+ instrs[i++] = PPC_RAW_MFLR(_R10);
+ instrs[i++] = PPC_RAW_NOP(); /* branch patched below */
+ instrs[i++] = PPC_RAW_MTLR(_R10);
}
if (types & L1D_FLUSH_ORI) {
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
}
if (types & L1D_FLUSH_MTTRIG)
- instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
/*
* If we're patching in or out the fallback flush we need to be careful about the
@@ -358,14 +375,14 @@ static int __do_entry_flush_fixups(void *data)
pr_devel("patching dest %lx\n", (unsigned long)dest);
if (types == L1D_FLUSH_FALLBACK) {
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
- patch_branch((struct ppc_inst *)(dest + 1),
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1,
(unsigned long)&entry_flush_fallback, BRANCH_SET_LINK);
} else {
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
}
}
@@ -377,14 +394,14 @@ static int __do_entry_flush_fixups(void *data)
pr_devel("patching dest %lx\n", (unsigned long)dest);
if (types == L1D_FLUSH_FALLBACK) {
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
- patch_branch((struct ppc_inst *)(dest + 1),
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1,
(unsigned long)&scv_entry_flush_fallback, BRANCH_SET_LINK);
} else {
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
}
}
@@ -412,8 +429,9 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
stop_machine(__do_entry_flush_fixups, &types, NULL);
}
-void do_rfi_flush_fixups(enum l1d_flush_type types)
+static int __do_rfi_flush_fixups(void *data)
{
+ enum l1d_flush_type types = *(enum l1d_flush_type *)data;
unsigned int instrs[3], *dest;
long *start, *end;
int i;
@@ -421,31 +439,31 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
start = PTRRELOC(&__start___rfi_flush_fixup);
end = PTRRELOC(&__stop___rfi_flush_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
if (types & L1D_FLUSH_FALLBACK)
/* b .+16 to fallback flush */
- instrs[0] = 0x48000010;
+ instrs[0] = PPC_INST_BRANCH | 16;
i = 0;
if (types & L1D_FLUSH_ORI) {
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
}
if (types & L1D_FLUSH_MTTRIG)
- instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
}
printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
@@ -456,6 +474,29 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
: "ori type" :
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
: "unknown");
+
+ return 0;
+}
+
+void do_rfi_flush_fixups(enum l1d_flush_type types)
+{
+ /*
+ * stop_machine gets all CPUs out of the interrupt exit handler same
+ * as do_stf_barrier_fixups. do_rfi_flush_fixups patching can run
+ * without stop_machine, so this could be achieved with a broadcast
+ * IPI instead, but this matches the stf sequence.
+ */
+ static_branch_enable(&interrupt_exit_not_reentrant);
+
+ stop_machine(__do_rfi_flush_fixups, &types, NULL);
+
+ if (types & L1D_FLUSH_FALLBACK)
+ rfi_exit_reentrant = false;
+ else
+ rfi_exit_reentrant = true;
+
+ if (stf_exit_reentrant && rfi_exit_reentrant)
+ static_branch_disable(&interrupt_exit_not_reentrant);
}
void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
@@ -467,18 +508,18 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
start = fixup_start;
end = fixup_end;
- instr = 0x60000000; /* nop */
+ instr = PPC_RAW_NOP();
if (enable) {
pr_info("barrier-nospec: using ORI speculation barrier\n");
- instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instr = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
}
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instr));
+ patch_instruction(dest, ppc_inst(instr));
}
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
@@ -508,21 +549,21 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
start = fixup_start;
end = fixup_end;
- instr[0] = PPC_INST_NOP;
- instr[1] = PPC_INST_NOP;
+ instr[0] = PPC_RAW_NOP();
+ instr[1] = PPC_RAW_NOP();
if (enable) {
pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
- instr[0] = PPC_INST_ISYNC;
- instr[1] = PPC_INST_SYNC;
+ instr[0] = PPC_RAW_ISYNC();
+ instr[1] = PPC_RAW_SYNC();
}
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instr[0]));
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instr[1]));
+ patch_instruction(dest, ppc_inst(instr[0]));
+ patch_instruction(dest + 1, ppc_inst(instr[1]));
}
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
@@ -536,7 +577,7 @@ static void patch_btb_flush_section(long *curr)
end = (void *)curr + *(curr + 1);
for (; start < end; start++) {
pr_devel("patching dest %lx\n", (unsigned long)start);
- patch_instruction((struct ppc_inst *)start, ppc_inst(PPC_INST_NOP));
+ patch_instruction(start, ppc_inst(PPC_RAW_NOP()));
}
}
@@ -555,7 +596,7 @@ void do_btb_flush_fixups(void)
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{
long *start, *end;
- struct ppc_inst *dest;
+ u32 *dest;
if (!(value & CPU_FTR_LWSYNC))
return ;
@@ -572,13 +613,14 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
static void do_final_fixups(void)
{
#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
- struct ppc_inst inst, *src, *dest, *end;
+ struct ppc_inst inst;
+ u32 *src, *dest, *end;
if (PHYSICAL_START == 0)
return;
- src = (struct ppc_inst *)(KERNELBASE + PHYSICAL_START);
- dest = (struct ppc_inst *)KERNELBASE;
+ src = (u32 *)(KERNELBASE + PHYSICAL_START);
+ dest = (u32 *)KERNELBASE;
end = (void *)src + (__end_interrupts - _stext);
while (src < end) {
diff --git a/arch/powerpc/lib/restart_table.c b/arch/powerpc/lib/restart_table.c
new file mode 100644
index 000000000000..bccb662c1b7b
--- /dev/null
+++ b/arch/powerpc/lib/restart_table.c
@@ -0,0 +1,56 @@
+#include <asm/interrupt.h>
+#include <asm/kprobes.h>
+
+struct soft_mask_table_entry {
+ unsigned long start;
+ unsigned long end;
+};
+
+struct restart_table_entry {
+ unsigned long start;
+ unsigned long end;
+ unsigned long fixup;
+};
+
+extern struct soft_mask_table_entry __start___soft_mask_table[];
+extern struct soft_mask_table_entry __stop___soft_mask_table[];
+
+extern struct restart_table_entry __start___restart_table[];
+extern struct restart_table_entry __stop___restart_table[];
+
+/* Given an address, look for it in the soft mask table */
+bool search_kernel_soft_mask_table(unsigned long addr)
+{
+ struct soft_mask_table_entry *smte = __start___soft_mask_table;
+
+ while (smte < __stop___soft_mask_table) {
+ unsigned long start = smte->start;
+ unsigned long end = smte->end;
+
+ if (addr >= start && addr < end)
+ return true;
+
+ smte++;
+ }
+ return false;
+}
+NOKPROBE_SYMBOL(search_kernel_soft_mask_table);
+
+/* Given an address, look for it in the kernel exception table */
+unsigned long search_kernel_restart_table(unsigned long addr)
+{
+ struct restart_table_entry *rte = __start___restart_table;
+
+ while (rte < __stop___restart_table) {
+ unsigned long start = rte->start;
+ unsigned long end = rte->end;
+ unsigned long fixup = rte->fixup;
+
+ if (addr >= start && addr < end)
+ return fixup;
+
+ rte++;
+ }
+ return 0;
+}
+NOKPROBE_SYMBOL(search_kernel_restart_table);
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 45bda2520755..d8d5f901cee1 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1700,6 +1700,28 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
op->val = regs->ccr & imm;
goto compute_done;
+ case 128: /* setb */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ /*
+ * 'ra' encodes the CR field number (bfa) in the top 3 bits.
+ * Since each CR field is 4 bits,
+ * we can simply mask off the bottom two bits (bfa * 4)
+ * to yield the first bit in the CR field.
+ */
+ ra = ra & ~0x3;
+ /* 'val' stores bits of the CR field (bfa) */
+ val = regs->ccr >> (CR0_SHIFT - ra);
+ /* checks if the LT bit of CR field (bfa) is set */
+ if (val & 8)
+ op->val = -1;
+ /* checks if the GT bit of CR field (bfa) is set */
+ else if (val & 4)
+ op->val = 1;
+ else
+ op->val = 0;
+ goto compute_done;
+
case 144: /* mtcrf */
op->type = COMPUTE + SETCC;
imm = 0xf0000000UL;
@@ -3203,7 +3225,7 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
default:
WARN_ON_ONCE(1);
}
- regs->nip = next_pc;
+ regs_set_return_ip(regs, next_pc);
}
NOKPROBE_SYMBOL(emulate_update_regs);
@@ -3541,7 +3563,7 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
/* can't step mtmsr[d] that would clear MSR_RI */
return -1;
/* here op.val is the mask of bits to change */
- regs->msr = (regs->msr & ~op.val) | (val & op.val);
+ regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
goto instr_done;
#ifdef CONFIG_PPC64
@@ -3554,7 +3576,7 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
cpu_has_feature(CPU_FTR_REAL_LE) &&
regs->gpr[0] == 0x1ebe) {
- regs->msr ^= MSR_LE;
+ regs_set_return_msr(regs, regs->msr ^ MSR_LE);
goto instr_done;
}
regs->gpr[9] = regs->gpr[13];
@@ -3562,8 +3584,8 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
regs->gpr[11] = regs->nip + 4;
regs->gpr[12] = regs->msr & MSR_MASK;
regs->gpr[13] = (unsigned long) get_paca();
- regs->nip = (unsigned long) &system_call_common;
- regs->msr = MSR_KERNEL;
+ regs_set_return_ip(regs, (unsigned long) &system_call_common);
+ regs_set_return_msr(regs, MSR_KERNEL);
return 1;
#ifdef CONFIG_PPC_BOOK3S_64
@@ -3573,8 +3595,8 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
regs->gpr[11] = regs->nip + 4;
regs->gpr[12] = regs->msr & MSR_MASK;
regs->gpr[13] = (unsigned long) get_paca();
- regs->nip = (unsigned long) &system_call_vectored_emulate;
- regs->msr = MSR_KERNEL;
+ regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate);
+ regs_set_return_msr(regs, MSR_KERNEL);
return 1;
#endif
@@ -3585,7 +3607,8 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
return 0;
instr_done:
- regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type));
+ regs_set_return_ip(regs,
+ truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
return 1;
}
NOKPROBE_SYMBOL(emulate_step);
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
index 783d1b85ecfe..8b4f6b3e96c4 100644
--- a/arch/powerpc/lib/test_emulate_step.c
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -53,6 +53,8 @@
ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \
PPC_RAW_ADDI(t, a, i))
+#define TEST_SETB(t, bfa) ppc_inst(PPC_INST_SETB | ___PPC_RT(t) | ___PPC_RA((bfa & 0x7) << 2))
+
static void __init init_pt_regs(struct pt_regs *regs)
{
@@ -824,8 +826,7 @@ static void __init test_plxvp_pstxvp(void)
* XTp = 32xTX + 2xTp
* let RA=3 R=0 D=d0||d1=0 R=0 Tp=1 TX=1
*/
- instr = ppc_inst_prefix(PPC_RAW_PLXVP(34, 0, 3, 0) >> 32,
- PPC_RAW_PLXVP(34, 0, 3, 0) & 0xffffffff);
+ instr = ppc_inst_prefix(PPC_RAW_PLXVP_P(34, 0, 3, 0), PPC_RAW_PLXVP_S(34, 0, 3, 0));
stepped = emulate_step(&regs, instr);
if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) {
@@ -853,8 +854,7 @@ static void __init test_plxvp_pstxvp(void)
* XSp = 32xSX + 2xSp
* let RA=3 D=d0||d1=0 R=0 Sp=1 SX=1
*/
- instr = ppc_inst_prefix(PPC_RAW_PSTXVP(34, 0, 3, 0) >> 32,
- PPC_RAW_PSTXVP(34, 0, 3, 0) & 0xffffffff);
+ instr = ppc_inst_prefix(PPC_RAW_PSTXVP_P(34, 0, 3, 0), PPC_RAW_PSTXVP_S(34, 0, 3, 0));
stepped = emulate_step(&regs, instr);
@@ -922,7 +922,7 @@ static struct compute_test compute_tests[] = {
.subtests = {
{
.descr = "R0 = LONG_MAX",
- .instr = ppc_inst(PPC_INST_NOP),
+ .instr = ppc_inst(PPC_RAW_NOP()),
.regs = {
.gpr[0] = LONG_MAX,
}
@@ -930,6 +930,33 @@ static struct compute_test compute_tests[] = {
}
},
{
+ .mnemonic = "setb",
+ .cpu_feature = CPU_FTR_ARCH_300,
+ .subtests = {
+ {
+ .descr = "BFA = 1, CR = GT",
+ .instr = TEST_SETB(20, 1),
+ .regs = {
+ .ccr = 0x4000000,
+ }
+ },
+ {
+ .descr = "BFA = 4, CR = LT",
+ .instr = TEST_SETB(20, 4),
+ .regs = {
+ .ccr = 0x8000,
+ }
+ },
+ {
+ .descr = "BFA = 5, CR = EQ",
+ .instr = TEST_SETB(20, 5),
+ .regs = {
+ .ccr = 0x200,
+ }
+ }
+ }
+ },
+ {
.mnemonic = "add",
.subtests = {
{
@@ -1582,6 +1609,7 @@ static int __init emulate_compute_instr(struct pt_regs *regs,
if (!regs || !ppc_inst_val(instr))
return -EINVAL;
+ /* This is not a return frame regs */
regs->nip = patch_site_addr(&patch__exec_instr);
analysed = analyse_instr(&op, regs, instr);
diff --git a/arch/powerpc/math-emu/math.c b/arch/powerpc/math-emu/math.c
index 327165f26ca6..36761bd00f38 100644
--- a/arch/powerpc/math-emu/math.c
+++ b/arch/powerpc/math-emu/math.c
@@ -453,7 +453,7 @@ do_mathemu(struct pt_regs *regs)
break;
}
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
return 0;
illegal:
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c
index 0a05e51964c1..39b84e7452e1 100644
--- a/arch/powerpc/math-emu/math_efp.c
+++ b/arch/powerpc/math-emu/math_efp.c
@@ -710,7 +710,7 @@ update_regs:
illegal:
if (have_e500_cpu_a005_erratum) {
/* according to e500 cpu a005 erratum, reissue efp inst */
- regs->nip -= 4;
+ regs_add_return_ip(regs, -4);
pr_debug("re-issue efp inst: %08lx\n", speinsn);
return 0;
}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 2ffcf540f08b..eae4ec2988fc 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -5,7 +5,7 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \
+obj-y := fault.o mem.o pgtable.o mmap.o maccess.o pageattr.o \
init_$(BITS).o pgtable_$(BITS).o \
pgtable-frag.o ioremap.o ioremap_$(BITS).o \
init-common.o mmu_context.o drmem.o \
diff --git a/arch/powerpc/mm/book3s32/Makefile b/arch/powerpc/mm/book3s32/Makefile
index 7f0c8a78ba0c..15f4773643d2 100644
--- a/arch/powerpc/mm/book3s32/Makefile
+++ b/arch/powerpc/mm/book3s32/Makefile
@@ -10,3 +10,4 @@ obj-y += mmu.o mmu_context.o
obj-$(CONFIG_PPC_BOOK3S_603) += nohash_low.o
obj-$(CONFIG_PPC_BOOK3S_604) += hash_low.o tlb.o
obj-$(CONFIG_PPC_KUEP) += kuep.o
+obj-$(CONFIG_PPC_KUAP) += kuap.o
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index fb4233a5bdf7..6925ce998557 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -27,8 +27,10 @@
#include <asm/code-patching-asm.h>
#ifdef CONFIG_PTE_64BIT
+#define PTE_T_SIZE 8
#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */
#else
+#define PTE_T_SIZE 4
#define PTE_FLAGS_OFFSET 0
#endif
@@ -488,7 +490,7 @@ _GLOBAL(flush_hash_pages)
bne 2f
ble cr1,19f
addi r4,r4,0x1000
- addi r5,r5,PTE_SIZE
+ addi r5,r5,PTE_T_SIZE
addi r6,r6,-1
b 1b
@@ -573,7 +575,7 @@ _GLOBAL(flush_hash_pages)
8: ble cr1,9f /* if all ptes checked */
81: addi r6,r6,-1
- addi r5,r5,PTE_SIZE
+ addi r5,r5,PTE_T_SIZE
addi r4,r4,0x1000
lwz r0,0(r5) /* check next pte */
cmpwi cr1,r6,1
diff --git a/arch/powerpc/mm/book3s32/kuap.c b/arch/powerpc/mm/book3s32/kuap.c
new file mode 100644
index 000000000000..0f920f09af57
--- /dev/null
+++ b/arch/powerpc/mm/book3s32/kuap.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <asm/kup.h>
+#include <asm/smp.h>
+
+struct static_key_false disable_kuap_key;
+EXPORT_SYMBOL(disable_kuap_key);
+
+void kuap_lock_all_ool(void)
+{
+ kuap_lock_all();
+}
+EXPORT_SYMBOL(kuap_lock_all_ool);
+
+void kuap_unlock_all_ool(void)
+{
+ kuap_unlock_all();
+}
+EXPORT_SYMBOL(kuap_unlock_all_ool);
+
+void setup_kuap(bool disabled)
+{
+ if (!disabled)
+ kuap_lock_all_ool();
+
+ if (smp_processor_id() != boot_cpuid)
+ return;
+
+ if (disabled)
+ static_branch_enable(&disable_kuap_key);
+ else
+ pr_info("Activating Kernel Userspace Access Protection\n");
+}
diff --git a/arch/powerpc/mm/book3s32/kuep.c b/arch/powerpc/mm/book3s32/kuep.c
index 8ed1b8634839..c20733d6e02c 100644
--- a/arch/powerpc/mm/book3s32/kuep.c
+++ b/arch/powerpc/mm/book3s32/kuep.c
@@ -1,40 +1,20 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <asm/kup.h>
-#include <asm/reg.h>
-#include <asm/task_size_32.h>
-#include <asm/mmu.h>
+#include <asm/smp.h>
-#define KUEP_UPDATE_TWO_USER_SEGMENTS(n) do { \
- if (TASK_SIZE > ((n) << 28)) \
- mtsr(val1, (n) << 28); \
- if (TASK_SIZE > (((n) + 1) << 28)) \
- mtsr(val2, ((n) + 1) << 28); \
- val1 = (val1 + 0x222) & 0xf0ffffff; \
- val2 = (val2 + 0x222) & 0xf0ffffff; \
-} while (0)
+struct static_key_false disable_kuep_key;
-static __always_inline void kuep_update(u32 val)
+void setup_kuep(bool disabled)
{
- int val1 = val;
- int val2 = (val + 0x111) & 0xf0ffffff;
+ if (!disabled)
+ kuep_lock();
- KUEP_UPDATE_TWO_USER_SEGMENTS(0);
- KUEP_UPDATE_TWO_USER_SEGMENTS(2);
- KUEP_UPDATE_TWO_USER_SEGMENTS(4);
- KUEP_UPDATE_TWO_USER_SEGMENTS(6);
- KUEP_UPDATE_TWO_USER_SEGMENTS(8);
- KUEP_UPDATE_TWO_USER_SEGMENTS(10);
- KUEP_UPDATE_TWO_USER_SEGMENTS(12);
- KUEP_UPDATE_TWO_USER_SEGMENTS(14);
-}
+ if (smp_processor_id() != boot_cpuid)
+ return;
-void kuep_lock(void)
-{
- kuep_update(mfsr(0) | SR_NX);
-}
-
-void kuep_unlock(void)
-{
- kuep_update(mfsr(0) & ~SR_NX);
+ if (disabled)
+ static_branch_enable(&disable_kuep_key);
+ else
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
}
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 159930351d9f..27061583a010 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -445,26 +445,6 @@ void __init print_system_hash_info(void)
pr_info("Hash_mask = 0x%lx\n", Hash_mask);
}
-#ifdef CONFIG_PPC_KUEP
-void __init setup_kuep(bool disabled)
-{
- pr_info("Activating Kernel Userspace Execution Prevention\n");
-
- if (disabled)
- pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n");
-}
-#endif
-
-#ifdef CONFIG_PPC_KUAP
-void __init setup_kuap(bool disabled)
-{
- pr_info("Activating Kernel Userspace Access Protection\n");
-
- if (disabled)
- pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n");
-}
-#endif
-
void __init early_init_mmu(void)
{
}
diff --git a/arch/powerpc/mm/book3s32/mmu_context.c b/arch/powerpc/mm/book3s32/mmu_context.c
index 218996e40a8e..e2708e387dc3 100644
--- a/arch/powerpc/mm/book3s32/mmu_context.c
+++ b/arch/powerpc/mm/book3s32/mmu_context.c
@@ -24,6 +24,12 @@
#include <asm/mmu_context.h>
/*
+ * Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+void *abatron_pteptrs[2];
+
+/*
* On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
* (virtual segment identifiers) for each context. Although the
* hardware supports 24-bit VSIDs, and thus >1 million contexts,
@@ -39,19 +45,6 @@
#define LAST_CONTEXT 32767
#define FIRST_CONTEXT 1
-/*
- * This function defines the mapping from contexts to VSIDs (virtual
- * segment IDs). We use a skew on both the context and the high 4 bits
- * of the 32-bit virtual address (the "effective segment ID") in order
- * to spread out the entries in the MMU hash table. Note, if this
- * function is changed then arch/ppc/mm/hashtable.S will have to be
- * changed to correspond.
- *
- *
- * CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
- * & 0xffffff)
- */
-
static unsigned long next_mmu_context;
static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
@@ -111,3 +104,32 @@ void __init mmu_context_init(void)
context_map[0] = (1 << FIRST_CONTEXT) - 1;
next_mmu_context = FIRST_CONTEXT;
}
+
+void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
+{
+ long id = next->context.id;
+ unsigned long val;
+
+ if (id < 0)
+ panic("mm_struct %p has no context ID", next);
+
+ isync();
+
+ val = CTX_TO_VSID(id, 0);
+ if (!kuep_is_disabled())
+ val |= SR_NX;
+ if (!kuap_is_disabled())
+ val |= SR_KS;
+
+ update_user_segments(val);
+
+ if (IS_ENABLED(CONFIG_BDI_SWITCH))
+ abatron_pteptrs[1] = next->pgd;
+
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ mtspr(SPRN_SDR1, rol32(__pa(next->pgd), 4) & 0xffff01ff);
+
+ mb(); /* sync */
+ isync();
+}
+EXPORT_SYMBOL(switch_mmu_context);
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 96d9aa164007..ac5720371c0d 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1522,8 +1522,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
}
EXPORT_SYMBOL_GPL(hash_page);
-DECLARE_INTERRUPT_HANDLER_RET(__do_hash_fault);
-DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
+DECLARE_INTERRUPT_HANDLER(__do_hash_fault);
+DEFINE_INTERRUPT_HANDLER(__do_hash_fault)
{
unsigned long ea = regs->dar;
unsigned long dsisr = regs->dsisr;
@@ -1533,6 +1533,11 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
unsigned int region_id;
long err;
+ if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT))) {
+ hash__do_page_fault(regs);
+ return;
+ }
+
region_id = get_region_id(ea);
if ((region_id == VMALLOC_REGION_ID) || (region_id == IO_REGION_ID))
mm = &init_mm;
@@ -1571,9 +1576,10 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
bad_page_fault(regs, SIGBUS);
}
err = 0;
- }
- return err;
+ } else if (err) {
+ hash__do_page_fault(regs);
+ }
}
/*
@@ -1582,13 +1588,6 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
*/
DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault)
{
- unsigned long dsisr = regs->dsisr;
-
- if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT))) {
- hash__do_page_fault(regs);
- return 0;
- }
-
/*
* If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
* don't call hash_page, just fail the fault. This is required to
@@ -1607,8 +1606,7 @@ DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault)
return 0;
}
- if (__do_hash_fault(regs))
- hash__do_page_fault(regs);
+ __do_hash_fault(regs);
return 0;
}
diff --git a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
index cb91071eef52..23d3e08911d3 100644
--- a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
@@ -32,7 +32,13 @@ void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long st
struct hstate *hstate = hstate_file(vma->vm_file);
psize = hstate_get_psize(hstate);
- radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
+ /*
+ * Flush PWC even if we get PUD_SIZE hugetlb invalidate to keep this simpler.
+ */
+ if (end - start >= PUD_SIZE)
+ radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize);
+ else
+ radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
}
/*
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 2176a5f70746..e50ddf129c15 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -820,7 +820,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
continue;
}
- pmd_base = (pmd_t *)pud_page_vaddr(*pud);
+ pmd_base = pud_pgtable(*pud);
remove_pmd_table(pmd_base, addr, next);
free_pmd_table(pmd_base, pud);
}
@@ -854,7 +854,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
continue;
}
- pud_base = (pud_t *)p4d_page_vaddr(*p4d);
+ pud_base = p4d_pgtable(*p4d);
remove_pud_table(pud_base, addr, next);
free_pud_table(pud_base, p4d);
}
@@ -1105,7 +1105,7 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
pmd_t *pmd;
int i;
- pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pmd = pud_pgtable(*pud);
pud_clear(pud);
flush_tlb_kernel_range(addr, addr + PUD_SIZE);
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 318ec4f33661..aefc100d79a7 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -354,22 +354,30 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
-static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
+static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{
int set;
asm volatile("ptesync": : :"memory");
- /*
- * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
- * also flush the entire Page Walk Cache.
- */
- __tlbiel_pid(pid, 0, ric);
+ switch (ric) {
+ case RIC_FLUSH_PWC:
- /* For PWC, only one flush is needed */
- if (ric == RIC_FLUSH_PWC) {
+ /* For PWC, only one flush is needed */
+ __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
ppc_after_tlbiel_barrier();
return;
+ case RIC_FLUSH_TLB:
+ __tlbiel_pid(pid, 0, RIC_FLUSH_TLB);
+ break;
+ case RIC_FLUSH_ALL:
+ default:
+ /*
+ * Flush the first set of the TLB, and if
+ * we're doing a RIC_FLUSH_ALL, also flush
+ * the entire Page Walk Cache.
+ */
+ __tlbiel_pid(pid, 0, RIC_FLUSH_ALL);
}
if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
@@ -1103,14 +1111,13 @@ static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_
static inline void __radix__flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
-
{
unsigned long pid;
unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
unsigned long page_size = 1UL << page_shift;
unsigned long nr_pages = (end - start) >> page_shift;
bool fullmm = (end == TLB_FLUSH_ALL);
- bool flush_pid;
+ bool flush_pid, flush_pwc = false;
enum tlb_flush_type type;
pid = mm->context.id;
@@ -1129,8 +1136,16 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
flush_pid = nr_pages > tlb_single_page_flush_ceiling;
else
flush_pid = nr_pages > tlb_local_single_page_flush_ceiling;
+ /*
+ * full pid flush already does the PWC flush. if it is not full pid
+ * flush check the range is more than PMD and force a pwc flush
+ * mremap() depends on this behaviour.
+ */
+ if (!flush_pid && (end - start) >= PMD_SIZE)
+ flush_pwc = true;
if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) {
+ unsigned long type = H_RPTI_TYPE_TLB;
unsigned long tgt = H_RPTI_TARGET_CMMU;
unsigned long pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize);
@@ -1138,19 +1153,20 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
pg_sizes |= psize_to_rpti_pgsize(MMU_PAGE_2M);
if (atomic_read(&mm->context.copros) > 0)
tgt |= H_RPTI_TARGET_NMMU;
- pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, pg_sizes,
- start, end);
+ if (flush_pwc)
+ type |= H_RPTI_TYPE_PWC;
+ pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end);
} else if (flush_pid) {
+ /*
+ * We are now flushing a range larger than PMD size force a RIC_FLUSH_ALL
+ */
if (type == FLUSH_TYPE_LOCAL) {
- _tlbiel_pid(pid, RIC_FLUSH_TLB);
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
} else {
if (cputlb_use_tlbie()) {
- if (mm_needs_flush_escalation(mm))
- _tlbie_pid(pid, RIC_FLUSH_ALL);
- else
- _tlbie_pid(pid, RIC_FLUSH_TLB);
+ _tlbie_pid(pid, RIC_FLUSH_ALL);
} else {
- _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
+ _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL);
}
}
} else {
@@ -1166,6 +1182,9 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
if (type == FLUSH_TYPE_LOCAL) {
asm volatile("ptesync": : :"memory");
+ if (flush_pwc)
+ /* For PWC, only one flush is needed */
+ __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
__tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
if (hflush)
__tlbiel_va_range(hstart, hend, pid,
@@ -1173,6 +1192,8 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
ppc_after_tlbiel_barrier();
} else if (cputlb_use_tlbie()) {
asm volatile("ptesync": : :"memory");
+ if (flush_pwc)
+ __tlbie_pid(pid, RIC_FLUSH_PWC);
__tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
if (hflush)
__tlbie_va_range(hstart, hend, pid,
@@ -1180,10 +1201,10 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
asm volatile("eieio; tlbsync; ptesync": : :"memory");
} else {
_tlbiel_va_range_multicast(mm,
- start, end, pid, page_size, mmu_virtual_psize, false);
+ start, end, pid, page_size, mmu_virtual_psize, flush_pwc);
if (hflush)
_tlbiel_va_range_multicast(mm,
- hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, false);
+ hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, flush_pwc);
}
}
out:
@@ -1257,9 +1278,6 @@ void radix__flush_all_lpid_guest(unsigned int lpid)
_tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
}
-static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
- unsigned long end, int psize);
-
void radix__tlb_flush(struct mmu_gather *tlb)
{
int psize = 0;
@@ -1290,7 +1308,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
}
}
-static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
+static void __radix__flush_tlb_range_psize(struct mm_struct *mm,
unsigned long start, unsigned long end,
int psize, bool also_pwc)
{
@@ -1366,8 +1384,8 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
}
-static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
- unsigned long end, int psize)
+void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long end, int psize)
{
__radix__flush_tlb_range_psize(mm, start, end, psize, true);
}
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 34f641d4a2fe..a8d0ce85d39a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -199,9 +199,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
{
int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
- /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
- if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
- DSISR_PROTFAULT))) {
+ if (is_exec) {
pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
address >= TASK_SIZE ? "exec-protected" : "user",
address,
diff --git a/arch/powerpc/mm/ioremap_32.c b/arch/powerpc/mm/ioremap_32.c
index 743e11384dea..9d13143b8be4 100644
--- a/arch/powerpc/mm/ioremap_32.c
+++ b/arch/powerpc/mm/ioremap_32.c
@@ -70,10 +70,10 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call
*/
pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller);
- err = early_ioremap_range(ioremap_bot - size, p, size, prot);
+ err = early_ioremap_range(ioremap_bot - size - PAGE_SIZE, p, size, prot);
if (err)
return NULL;
- ioremap_bot -= size;
+ ioremap_bot -= size + PAGE_SIZE;
return (void __iomem *)ioremap_bot + offset;
}
diff --git a/arch/powerpc/mm/ioremap_64.c b/arch/powerpc/mm/ioremap_64.c
index ba5cbb0d66bd..3acece00b33e 100644
--- a/arch/powerpc/mm/ioremap_64.c
+++ b/arch/powerpc/mm/ioremap_64.c
@@ -38,7 +38,7 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
return NULL;
ret = (void __iomem *)ioremap_bot + offset;
- ioremap_bot += size;
+ ioremap_bot += size + PAGE_SIZE;
return ret;
}
diff --git a/arch/powerpc/mm/maccess.c b/arch/powerpc/mm/maccess.c
index a3c30a884076..aad7c47e0030 100644
--- a/arch/powerpc/mm/maccess.c
+++ b/arch/powerpc/mm/maccess.c
@@ -12,7 +12,7 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
return is_kernel_addr((unsigned long)unsafe_src);
}
-int copy_inst_from_kernel_nofault(struct ppc_inst *inst, struct ppc_inst *src)
+int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src)
{
unsigned int val, suffix;
int err;
@@ -21,7 +21,7 @@ int copy_inst_from_kernel_nofault(struct ppc_inst *inst, struct ppc_inst *src)
if (err)
return err;
if (IS_ENABLED(CONFIG_PPC64) && get_op(val) == OP_PREFIX) {
- err = copy_from_kernel_nofault(&suffix, (void *)src + 4, 4);
+ err = copy_from_kernel_nofault(&suffix, src + 1, sizeof(suffix));
*inst = ppc_inst_prefix(val, suffix);
} else {
*inst = ppc_inst(val);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index c5e520c6f13b..ad198b439222 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -28,6 +28,9 @@
unsigned long long memory_limit;
bool init_mem_is_free;
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
+
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -299,6 +302,10 @@ void __init mem_init(void)
ioremap_bot, IOREMAP_TOP);
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
VMALLOC_START, VMALLOC_END);
+#ifdef MODULES_VADDR
+ pr_info(" * 0x%08lx..0x%08lx : modules\n",
+ MODULES_VADDR, MODULES_END);
+#endif
#endif /* CONFIG_PPC32 */
}
diff --git a/arch/powerpc/mm/nohash/44x.c b/arch/powerpc/mm/nohash/44x.c
index 3d6ae7c72412..e079f26b267e 100644
--- a/arch/powerpc/mm/nohash/44x.c
+++ b/arch/powerpc/mm/nohash/44x.c
@@ -25,6 +25,7 @@
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
+#include <asm/smp.h>
#include <mm/mmu_decl.h>
@@ -239,3 +240,19 @@ void __init mmu_init_secondary(int cpu)
}
}
#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_PPC_KUEP
+void setup_kuep(bool disabled)
+{
+ if (smp_processor_id() != boot_cpuid)
+ return;
+
+ if (disabled)
+ patch_instruction_site(&patch__tlb_44x_kuep, ppc_inst(PPC_RAW_NOP()));
+ else
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
+
+ if (IS_ENABLED(CONFIG_PPC_47x) && disabled)
+ patch_instruction_site(&patch__tlb_47x_kuep, ppc_inst(PPC_RAW_NOP()));
+}
+#endif
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 71bfdbedacee..0df9fe29dd56 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -212,37 +212,6 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
}
-/*
- * Set up to use a given MMU context.
- * id is context number, pgd is PGD pointer.
- *
- * We place the physical address of the new task page directory loaded
- * into the MMU base register, and set the ASID compare register with
- * the new "context."
- */
-void set_context(unsigned long id, pgd_t *pgd)
-{
- s16 offset = (s16)(__pa(swapper_pg_dir));
-
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is passed as second argument.
- */
- if (IS_ENABLED(CONFIG_BDI_SWITCH))
- abatron_pteptrs[1] = pgd;
-
- /* Register M_TWB will contain base address of level 1 table minus the
- * lower part of the kernel PGDIR base address, so that all accesses to
- * level 1 table are done relative to lower part of kernel PGDIR base
- * address.
- */
- mtspr(SPRN_M_TWB, __pa(pgd) - offset);
-
- /* Update context */
- mtspr(SPRN_M_CASID, id - 1);
- /* sync */
- mb();
-}
-
#ifdef CONFIG_PPC_KUEP
void __init setup_kuep(bool disabled)
{
@@ -256,13 +225,28 @@ void __init setup_kuep(bool disabled)
#endif
#ifdef CONFIG_PPC_KUAP
+struct static_key_false disable_kuap_key;
+EXPORT_SYMBOL(disable_kuap_key);
+
void __init setup_kuap(bool disabled)
{
- pr_info("Activating Kernel Userspace Access Protection\n");
+ if (disabled) {
+ static_branch_enable(&disable_kuap_key);
+ return;
+ }
- if (disabled)
- pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
+ pr_info("Activating Kernel Userspace Access Protection\n");
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
#endif
+
+int pud_clear_huge(pud_t *pud)
+{
+ return 0;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+ return 0;
+}
diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c
index aac81c9f84a5..44b2b5e7cabe 100644
--- a/arch/powerpc/mm/nohash/mmu_context.c
+++ b/arch/powerpc/mm/nohash/mmu_context.c
@@ -21,21 +21,6 @@
* also clear mm->cpu_vm_mask bits when processes are migrated
*/
-//#define DEBUG_MAP_CONSISTENCY
-//#define DEBUG_CLAMP_LAST_CONTEXT 31
-//#define DEBUG_HARDER
-
-/* We don't use DEBUG because it tends to be compiled in always nowadays
- * and this would generate way too much output
- */
-#ifdef DEBUG_HARDER
-#define pr_hard(args...) printk(KERN_DEBUG args)
-#define pr_hardcont(args...) printk(KERN_CONT args)
-#else
-#define pr_hard(args...) do { } while(0)
-#define pr_hardcont(args...) do { } while(0)
-#endif
-
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -47,10 +32,17 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#include <asm/smp.h>
#include <mm/mmu_decl.h>
/*
+ * Room for two PTE table pointers, usually the kernel and current user
+ * pointer to their respective root page table (pgdir).
+ */
+void *abatron_pteptrs[2];
+
+/*
* The MPC8xx has only 16 contexts. We rotate through them on each task switch.
* A better way would be to keep track of tasks that own contexts, and implement
* an LRU usage. That way very active tasks don't always have to pay the TLB
@@ -68,9 +60,7 @@
* -- BenH
*/
#define FIRST_CONTEXT 1
-#ifdef DEBUG_CLAMP_LAST_CONTEXT
-#define LAST_CONTEXT DEBUG_CLAMP_LAST_CONTEXT
-#elif defined(CONFIG_PPC_8xx)
+#if defined(CONFIG_PPC_8xx)
#define LAST_CONTEXT 16
#elif defined(CONFIG_PPC_47x)
#define LAST_CONTEXT 65535
@@ -80,9 +70,7 @@
static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
-#ifdef CONFIG_SMP
static unsigned long *stale_map[NR_CPUS];
-#endif
static struct mm_struct **context_mm;
static DEFINE_RAW_SPINLOCK(context_lock);
@@ -105,7 +93,6 @@ static DEFINE_RAW_SPINLOCK(context_lock);
* the stale map as we can just flush the local CPU
* -- benh
*/
-#ifdef CONFIG_SMP
static unsigned int steal_context_smp(unsigned int id)
{
struct mm_struct *mm;
@@ -127,7 +114,6 @@ static unsigned int steal_context_smp(unsigned int id)
id = FIRST_CONTEXT;
continue;
}
- pr_hardcont(" | steal %d from 0x%p", id, mm);
/* Mark this mm has having no context anymore */
mm->context.id = MMU_NO_CONTEXT;
@@ -158,34 +144,25 @@ static unsigned int steal_context_smp(unsigned int id)
/* This will cause the caller to try again */
return MMU_NO_CONTEXT;
}
-#endif /* CONFIG_SMP */
static unsigned int steal_all_contexts(void)
{
struct mm_struct *mm;
-#ifdef CONFIG_SMP
int cpu = smp_processor_id();
-#endif
unsigned int id;
for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
/* Pick up the victim mm */
mm = context_mm[id];
- pr_hardcont(" | steal %d from 0x%p", id, mm);
-
/* Mark this mm as having no context anymore */
mm->context.id = MMU_NO_CONTEXT;
if (id != FIRST_CONTEXT) {
context_mm[id] = NULL;
__clear_bit(id, context_map);
-#ifdef DEBUG_MAP_CONSISTENCY
- mm->context.active = 0;
-#endif
}
-#ifdef CONFIG_SMP
- __clear_bit(id, stale_map[cpu]);
-#endif
+ if (IS_ENABLED(CONFIG_SMP))
+ __clear_bit(id, stale_map[cpu]);
}
/* Flush the TLB for all contexts (not to be used on SMP) */
@@ -204,15 +181,11 @@ static unsigned int steal_all_contexts(void)
static unsigned int steal_context_up(unsigned int id)
{
struct mm_struct *mm;
-#ifdef CONFIG_SMP
int cpu = smp_processor_id();
-#endif
/* Pick up the victim mm */
mm = context_mm[id];
- pr_hardcont(" | steal %d from 0x%p", id, mm);
-
/* Flush the TLB for that context */
local_flush_tlb_mm(mm);
@@ -220,81 +193,64 @@ static unsigned int steal_context_up(unsigned int id)
mm->context.id = MMU_NO_CONTEXT;
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
-#ifdef CONFIG_SMP
- __clear_bit(id, stale_map[cpu]);
-#endif
+ if (IS_ENABLED(CONFIG_SMP))
+ __clear_bit(id, stale_map[cpu]);
return id;
}
-#ifdef DEBUG_MAP_CONSISTENCY
-static void context_check_map(void)
+static void set_context(unsigned long id, pgd_t *pgd)
{
- unsigned int id, nrf, nact;
+ if (IS_ENABLED(CONFIG_PPC_8xx)) {
+ s16 offset = (s16)(__pa(swapper_pg_dir));
+
+ /*
+ * Register M_TWB will contain base address of level 1 table minus the
+ * lower part of the kernel PGDIR base address, so that all accesses to
+ * level 1 table are done relative to lower part of kernel PGDIR base
+ * address.
+ */
+ mtspr(SPRN_M_TWB, __pa(pgd) - offset);
- nrf = nact = 0;
- for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
- int used = test_bit(id, context_map);
- if (!used)
- nrf++;
- if (used != (context_mm[id] != NULL))
- pr_err("MMU: Context %d is %s and MM is %p !\n",
- id, used ? "used" : "free", context_mm[id]);
- if (context_mm[id] != NULL)
- nact += context_mm[id]->context.active;
- }
- if (nrf != nr_free_contexts) {
- pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
- nr_free_contexts, nrf);
- nr_free_contexts = nrf;
+ /* Update context */
+ mtspr(SPRN_M_CASID, id - 1);
+
+ /* sync */
+ mb();
+ } else {
+ if (IS_ENABLED(CONFIG_40x))
+ mb(); /* sync */
+
+ mtspr(SPRN_PID, id);
+ isync();
}
- if (nact > num_online_cpus())
- pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
- nact, num_online_cpus());
- if (FIRST_CONTEXT > 0 && !test_bit(0, context_map))
- pr_err("MMU: Context 0 has been freed !!!\n");
}
-#else
-static void context_check_map(void) { }
-#endif
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned int id;
-#ifdef CONFIG_SMP
unsigned int i, cpu = smp_processor_id();
-#endif
unsigned long *map;
/* No lockless fast path .. yet */
raw_spin_lock(&context_lock);
- pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
- cpu, next, next->context.active, next->context.id);
-
-#ifdef CONFIG_SMP
- /* Mark us active and the previous one not anymore */
- next->context.active++;
- if (prev) {
- pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
- WARN_ON(prev->context.active < 1);
- prev->context.active--;
+ if (IS_ENABLED(CONFIG_SMP)) {
+ /* Mark us active and the previous one not anymore */
+ next->context.active++;
+ if (prev) {
+ WARN_ON(prev->context.active < 1);
+ prev->context.active--;
+ }
}
again:
-#endif /* CONFIG_SMP */
/* If we already have a valid assigned context, skip all that */
id = next->context.id;
- if (likely(id != MMU_NO_CONTEXT)) {
-#ifdef DEBUG_MAP_CONSISTENCY
- if (context_mm[id] != next)
- pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
- next, id, id, context_mm[id]);
-#endif
+ if (likely(id != MMU_NO_CONTEXT))
goto ctxt_ok;
- }
/* We really don't have a context, let's try to acquire one */
id = next_context;
@@ -304,14 +260,12 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
/* No more free contexts, let's try to steal one */
if (nr_free_contexts == 0) {
-#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
id = steal_context_smp(id);
if (id == MMU_NO_CONTEXT)
goto again;
goto stolen;
}
-#endif /* CONFIG_SMP */
if (IS_ENABLED(CONFIG_PPC_8xx))
id = steal_all_contexts();
else
@@ -330,20 +284,13 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
next_context = id + 1;
context_mm[id] = next;
next->context.id = id;
- pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
- context_check_map();
ctxt_ok:
/* If that context got marked stale on this CPU, then flush the
* local TLB for it and unmark it before we use it
*/
-#ifdef CONFIG_SMP
- if (test_bit(id, stale_map[cpu])) {
- pr_hardcont(" | stale flush %d [%d..%d]",
- id, cpu_first_thread_sibling(cpu),
- cpu_last_thread_sibling(cpu));
-
+ if (IS_ENABLED(CONFIG_SMP) && test_bit(id, stale_map[cpu])) {
local_flush_tlb_mm(next);
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
@@ -353,10 +300,10 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
__clear_bit(id, stale_map[i]);
}
}
-#endif
/* Flick the MMU and release lock */
- pr_hardcont(" -> %d\n", id);
+ if (IS_ENABLED(CONFIG_BDI_SWITCH))
+ abatron_pteptrs[1] = next->pgd;
set_context(id, next->pgd);
raw_spin_unlock(&context_lock);
}
@@ -366,8 +313,6 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
*/
int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
- pr_hard("initing context for mm @%p\n", mm);
-
/*
* We have MMU_NO_CONTEXT set to be ~0. Hence check
* explicitly against context.id == 0. This ensures that we properly
@@ -401,16 +346,12 @@ void destroy_context(struct mm_struct *mm)
if (id != MMU_NO_CONTEXT) {
__clear_bit(id, context_map);
mm->context.id = MMU_NO_CONTEXT;
-#ifdef DEBUG_MAP_CONSISTENCY
- mm->context.active = 0;
-#endif
context_mm[id] = NULL;
nr_free_contexts++;
}
raw_spin_unlock_irqrestore(&context_lock, flags);
}
-#ifdef CONFIG_SMP
static int mmu_ctx_cpu_prepare(unsigned int cpu)
{
/* We don't touch CPU 0 map, it's allocated at aboot and kept
@@ -419,7 +360,6 @@ static int mmu_ctx_cpu_prepare(unsigned int cpu)
if (cpu == boot_cpuid)
return 0;
- pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
return 0;
}
@@ -430,7 +370,6 @@ static int mmu_ctx_cpu_dead(unsigned int cpu)
if (cpu == boot_cpuid)
return 0;
- pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
kfree(stale_map[cpu]);
stale_map[cpu] = NULL;
@@ -440,8 +379,6 @@ static int mmu_ctx_cpu_dead(unsigned int cpu)
return 0;
}
-#endif /* CONFIG_SMP */
-
/*
* Initialize the context management stuff.
*/
@@ -465,16 +402,16 @@ void __init mmu_context_init(void)
if (!context_mm)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(void *) * (LAST_CONTEXT + 1));
-#ifdef CONFIG_SMP
- stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
- if (!stale_map[boot_cpuid])
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- CTX_MAP_SIZE);
-
- cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
- "powerpc/mmu/ctx:prepare",
- mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
-#endif
+ if (IS_ENABLED(CONFIG_SMP)) {
+ stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
+ if (!stale_map[boot_cpuid])
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ CTX_MAP_SIZE);
+
+ cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
+ "powerpc/mmu/ctx:prepare",
+ mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
+ }
printk(KERN_INFO
"MMU: Allocated %zu bytes of context maps for %d contexts\n",
diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S
index 68797e072f55..4613bf8e9aae 100644
--- a/arch/powerpc/mm/nohash/tlb_low.S
+++ b/arch/powerpc/mm/nohash/tlb_low.S
@@ -360,19 +360,6 @@ _GLOBAL(_tlbivax_bcast)
sync
wrtee r10
blr
-
-_GLOBAL(set_context)
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@h
- ori r5, r5, abatron_pteptrs@l
- stw r4, 0x4(r5)
-#endif
- mtspr SPRN_PID,r3
- isync /* Force context change */
- blr
#else
#error Unsupported processor type !
#endif
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
new file mode 100644
index 000000000000..0876216ceee6
--- /dev/null
+++ b/arch/powerpc/mm/pageattr.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * MMU-generic set_memory implementation for powerpc
+ *
+ * Copyright 2019-2021, IBM Corporation.
+ */
+
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/set_memory.h>
+
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+
+/*
+ * Updates the attributes of a page in three steps:
+ *
+ * 1. invalidate the page table entry
+ * 2. flush the TLB
+ * 3. install the new entry with the updated attributes
+ *
+ * Invalidating the pte means there are situations where this will not work
+ * when in theory it should.
+ * For example:
+ * - removing write from page whilst it is being executed
+ * - setting a page read-only whilst it is being read by another CPU
+ *
+ */
+static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
+{
+ long action = (long)data;
+ pte_t pte;
+
+ spin_lock(&init_mm.page_table_lock);
+
+ /* invalidate the PTE so it's safe to modify */
+ pte = ptep_get_and_clear(&init_mm, addr, ptep);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ /* modify the PTE bits as desired, then apply */
+ switch (action) {
+ case SET_MEMORY_RO:
+ pte = pte_wrprotect(pte);
+ break;
+ case SET_MEMORY_RW:
+ pte = pte_mkwrite(pte_mkdirty(pte));
+ break;
+ case SET_MEMORY_NX:
+ pte = pte_exprotect(pte);
+ break;
+ case SET_MEMORY_X:
+ pte = pte_mkexec(pte);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ set_pte_at(&init_mm, addr, ptep, pte);
+
+ /* See ptesync comment in radix__set_pte_at() */
+ if (radix_enabled())
+ asm volatile("ptesync": : :"memory");
+ spin_unlock(&init_mm.page_table_lock);
+
+ return 0;
+}
+
+int change_memory_attr(unsigned long addr, int numpages, long action)
+{
+ unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
+ unsigned long size = numpages * PAGE_SIZE;
+
+ if (!numpages)
+ return 0;
+
+ if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr) &&
+ is_vm_area_hugepages((void *)addr)))
+ return -EINVAL;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * On hash, the linear mapping is not in the Linux page table so
+ * apply_to_existing_page_range() will have no effect. If in the future
+ * the set_memory_* functions are used on the linear map this will need
+ * to be updated.
+ */
+ if (!radix_enabled()) {
+ int region = get_region_id(addr);
+
+ if (WARN_ON_ONCE(region != VMALLOC_REGION_ID && region != IO_REGION_ID))
+ return -EINVAL;
+ }
+#endif
+
+ return apply_to_existing_page_range(&init_mm, start, size,
+ change_page_attr, (void *)action);
+}
+
+/*
+ * Set the attributes of a page:
+ *
+ * This function is used by PPC32 at the end of init to set final kernel memory
+ * protection. It includes changing the maping of the page it is executing from
+ * and data pages it is using.
+ */
+static int set_page_attr(pte_t *ptep, unsigned long addr, void *data)
+{
+ pgprot_t prot = __pgprot((unsigned long)data);
+
+ spin_lock(&init_mm.page_table_lock);
+
+ set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot));
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ spin_unlock(&init_mm.page_table_lock);
+
+ return 0;
+}
+
+int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot)
+{
+ unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
+ unsigned long sz = numpages * PAGE_SIZE;
+
+ if (numpages <= 0)
+ return 0;
+
+ return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr,
+ (void *)pgprot_val(prot));
+}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 354611940118..cd16b407f47e 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -28,6 +28,14 @@
#include <asm/hugetlb.h>
#include <asm/pte-walk.h>
+#ifdef CONFIG_PPC64
+#define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD)
+#else
+#define PGD_ALIGN PAGE_SIZE
+#endif
+
+pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN);
+
static inline int is_exec_fault(void)
{
return current->thread.regs && TRAP(current->thread.regs) == 0x400;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index e0ec67a16887..dcf5ecca19d9 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -23,6 +23,7 @@
#include <linux/highmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
+#include <linux/set_memory.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
@@ -132,64 +133,20 @@ void __init mapin_ram(void)
}
}
-static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
-{
- pte_t *kpte;
- unsigned long address;
-
- BUG_ON(PageHighMem(page));
- address = (unsigned long)page_address(page);
-
- if (v_block_mapped(address))
- return 0;
- kpte = virt_to_kpte(address);
- if (!kpte)
- return -EINVAL;
- __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
-
- return 0;
-}
-
-/*
- * Change the page attributes of an page in the linear mapping.
- *
- * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
- */
-static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-{
- int i, err = 0;
- unsigned long flags;
- struct page *start = page;
-
- local_irq_save(flags);
- for (i = 0; i < numpages; i++, page++) {
- err = __change_page_attr_noflush(page, prot);
- if (err)
- break;
- }
- wmb();
- local_irq_restore(flags);
- flush_tlb_kernel_range((unsigned long)page_address(start),
- (unsigned long)page_address(page));
- return err;
-}
-
void mark_initmem_nx(void)
{
- struct page *page = virt_to_page(_sinittext);
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);
if (v_block_mapped((unsigned long)_sinittext))
mmu_mark_initmem_nx();
else
- change_page_attr(page, numpages, PAGE_KERNEL);
+ set_memory_attr((unsigned long)_sinittext, numpages, PAGE_KERNEL);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
{
- struct page *page;
unsigned long numpages;
if (v_block_mapped((unsigned long)_stext + 1)) {
@@ -198,20 +155,18 @@ void mark_rodata_ro(void)
return;
}
- page = virt_to_page(_stext);
numpages = PFN_UP((unsigned long)_etext) -
PFN_DOWN((unsigned long)_stext);
- change_page_attr(page, numpages, PAGE_KERNEL_ROX);
+ set_memory_attr((unsigned long)_stext, numpages, PAGE_KERNEL_ROX);
/*
* mark .rodata as read only. Use __init_begin rather than __end_rodata
* to cover NOTES and EXCEPTION_TABLE.
*/
- page = virt_to_page(__start_rodata);
numpages = PFN_UP((unsigned long)__init_begin) -
PFN_DOWN((unsigned long)__start_rodata);
- change_page_attr(page, numpages, PAGE_KERNEL_RO);
+ set_memory_attr((unsigned long)__start_rodata, numpages, PAGE_KERNEL_RO);
// mark_initmem_nx() should have already run by now
ptdump_check_wx();
@@ -221,9 +176,14 @@ void mark_rodata_ro(void)
#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
+ unsigned long addr = (unsigned long)page_address(page);
+
if (PageHighMem(page))
return;
- change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+ if (enable)
+ set_memory_attr(addr, numpages, PAGE_KERNEL);
+ else
+ set_memory_attr(addr, numpages, __pgprot(0));
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index cc6e2f94517f..78c8cf01db5f 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -105,7 +105,7 @@ struct page *p4d_page(p4d_t p4d)
VM_WARN_ON(!p4d_huge(p4d));
return pte_page(p4d_pte(p4d));
}
- return virt_to_page(p4d_page_vaddr(p4d));
+ return virt_to_page(p4d_pgtable(p4d));
}
#endif
@@ -115,7 +115,7 @@ struct page *pud_page(pud_t pud)
VM_WARN_ON(!pud_huge(pud));
return pte_page(pud_pte(pud));
}
- return virt_to_page(pud_page_vaddr(pud));
+ return virt_to_page(pud_pgtable(pud));
}
/*
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index aca354fb670b..5062c58b1e5b 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -58,8 +58,6 @@ struct pg_state {
const struct addr_marker *marker;
unsigned long start_address;
unsigned long start_pa;
- unsigned long last_pa;
- unsigned long page_size;
unsigned int level;
u64 current_flags;
bool check_wx;
@@ -163,8 +161,6 @@ static void dump_flag_info(struct pg_state *st, const struct flag_info
static void dump_addr(struct pg_state *st, unsigned long addr)
{
- unsigned long delta;
-
#ifdef CONFIG_PPC64
#define REG "0x%016lx"
#else
@@ -172,14 +168,8 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
#endif
pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
- if (st->start_pa == st->last_pa && st->start_address + st->page_size != addr) {
- pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa);
- delta = st->page_size >> 10;
- } else {
- pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
- delta = (addr - st->start_address) >> 10;
- }
- pt_dump_size(st->seq, delta);
+ pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
+ pt_dump_size(st->seq, (addr - st->start_address) >> 10);
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
@@ -208,7 +198,6 @@ static void note_page_update_state(struct pg_state *st, unsigned long addr,
st->current_flags = flag;
st->start_address = addr;
st->start_pa = pa;
- st->page_size = page_size;
while (addr >= st->marker[1].start_address) {
st->marker++;
@@ -220,7 +209,6 @@ static void note_page(struct pg_state *st, unsigned long addr,
unsigned int level, u64 val, unsigned long page_size)
{
u64 flag = val & pg_level[level].mask;
- u64 pa = val & PTE_RPN_MASK;
/* At first no level is set */
if (!st->level) {
@@ -232,12 +220,9 @@ static void note_page(struct pg_state *st, unsigned long addr,
* - we change levels in the tree.
* - the address is in a different section of memory and is thus
* used for a different purpose, regardless of the flags.
- * - the pa of this page is not adjacent to the last inspected page
*/
} else if (flag != st->current_flags || level != st->level ||
- addr >= st->marker[1].start_address ||
- (pa != st->last_pa + st->page_size &&
- (pa != st->start_pa || st->start_pa != st->last_pa))) {
+ addr >= st->marker[1].start_address) {
/* Check the PTE flags */
if (st->current_flags) {
@@ -259,7 +244,6 @@ static void note_page(struct pg_state *st, unsigned long addr,
*/
note_page_update_state(st, addr, level, val, page_size);
}
- st->last_pa = pa;
}
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 798ac4350a82..53aefee3fe70 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -237,6 +237,7 @@ skip_codegen_passes:
fp->jited_len = alloclen;
bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
+ bpf_jit_binary_lock_ro(bpf_hdr);
if (!fp->is_func || extra_pass) {
bpf_prog_fill_jited_linfo(fp, addrs);
out_addrs:
@@ -257,15 +258,3 @@ out:
return fp;
}
-
-/* Overriding bpf_jit_free() as we don't set images read-only. */
-void bpf_jit_free(struct bpf_prog *fp)
-{
- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
- struct bpf_binary_header *bpf_hdr = (void *)addr;
-
- if (fp->jited)
- bpf_jit_binary_free(bpf_hdr);
-
- bpf_prog_unlock_free(fp);
-}
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index bbb16099e8c7..beb12cbc8c29 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -108,20 +108,20 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
int i;
/* First arg comes in as a 32 bits pointer. */
- EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_1), __REG_R3));
+ EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_1), _R3));
EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_1) - 1, 0));
- EMIT(PPC_RAW_STWU(__REG_R1, __REG_R1, -BPF_PPC_STACKFRAME(ctx)));
+ EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
/*
* Initialize tail_call_cnt in stack frame if we do tail calls.
* Otherwise, put in NOPs so that it can be skipped when we are
* invoked through a tail call.
*/
- if (ctx->seen & SEEN_TAILCALL) {
- EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_1) - 1, __REG_R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
- } else {
+ if (ctx->seen & SEEN_TAILCALL)
+ EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_1) - 1, _R1,
+ bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+ else
EMIT(PPC_RAW_NOP());
- }
#define BPF_TAILCALL_PROLOGUE_SIZE 16
@@ -130,30 +130,30 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
* save/restore LR unless we call other functions
*/
if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_MFLR(__REG_R0));
+ EMIT(PPC_RAW_MFLR(_R0));
/*
* Back up non-volatile regs -- registers r18-r31
*/
for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
if (bpf_is_seen_register(ctx, i))
- EMIT(PPC_RAW_STW(i, __REG_R1, bpf_jit_stack_offsetof(ctx, i)));
+ EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
/* If needed retrieve arguments 9 and 10, ie 5th 64 bits arg.*/
if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) {
- EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5) - 1, __REG_R1, BPF_PPC_STACKFRAME(ctx)) + 8);
- EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5), __REG_R1, BPF_PPC_STACKFRAME(ctx)) + 12);
+ EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, BPF_PPC_STACKFRAME(ctx)) + 8);
+ EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5), _R1, BPF_PPC_STACKFRAME(ctx)) + 12);
}
/* Setup frame pointer to point to the bpf stack area */
if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_FP))) {
EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_FP) - 1, 0));
- EMIT(PPC_RAW_ADDI(bpf_to_ppc(ctx, BPF_REG_FP), __REG_R1,
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(ctx, BPF_REG_FP), _R1,
STACK_FRAME_MIN_SIZE + ctx->stack_size));
}
if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_STW(__REG_R0, __REG_R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+ EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
}
static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
@@ -163,24 +163,24 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
/* Restore NVRs */
for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
if (bpf_is_seen_register(ctx, i))
- EMIT(PPC_RAW_LWZ(i, __REG_R1, bpf_jit_stack_offsetof(ctx, i)));
+ EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
}
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
{
- EMIT(PPC_RAW_MR(__REG_R3, bpf_to_ppc(ctx, BPF_REG_0)));
+ EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_0)));
bpf_jit_emit_common_epilogue(image, ctx);
/* Tear down our stack frame */
if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_LWZ(__REG_R0, __REG_R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+ EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
- EMIT(PPC_RAW_ADDI(__REG_R1, __REG_R1, BPF_PPC_STACKFRAME(ctx)));
+ EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_MTLR(__REG_R0));
+ EMIT(PPC_RAW_MTLR(_R0));
EMIT(PPC_RAW_BLR());
}
@@ -193,10 +193,10 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
PPC_BL_ABS(func);
} else {
/* Load function address into r0 */
- EMIT(PPC_RAW_LIS(__REG_R0, IMM_H(func)));
- EMIT(PPC_RAW_ORI(__REG_R0, __REG_R0, IMM_L(func)));
- EMIT(PPC_RAW_MTLR(__REG_R0));
- EMIT(PPC_RAW_BLRL());
+ EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
+ EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func)));
+ EMIT(PPC_RAW_MTCTR(_R0));
+ EMIT(PPC_RAW_BCTRL());
}
}
@@ -215,47 +215,47 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* if (index >= array->map.max_entries)
* goto out;
*/
- EMIT(PPC_RAW_LWZ(__REG_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
- EMIT(PPC_RAW_CMPLW(b2p_index, __REG_R0));
- EMIT(PPC_RAW_LWZ(__REG_R0, __REG_R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+ EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
+ EMIT(PPC_RAW_CMPLW(b2p_index, _R0));
+ EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
PPC_BCC(COND_GE, out);
/*
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT(PPC_RAW_CMPLWI(__REG_R0, MAX_TAIL_CALL_CNT));
+ EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT));
/* tail_call_cnt++; */
- EMIT(PPC_RAW_ADDIC(__REG_R0, __REG_R0, 1));
+ EMIT(PPC_RAW_ADDIC(_R0, _R0, 1));
PPC_BCC(COND_GT, out);
/* prog = array->ptrs[index]; */
- EMIT(PPC_RAW_RLWINM(__REG_R3, b2p_index, 2, 0, 29));
- EMIT(PPC_RAW_ADD(__REG_R3, __REG_R3, b2p_bpf_array));
- EMIT(PPC_RAW_LWZ(__REG_R3, __REG_R3, offsetof(struct bpf_array, ptrs)));
- EMIT(PPC_RAW_STW(__REG_R0, __REG_R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+ EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
+ EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
+ EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
+ EMIT(PPC_RAW_STW(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
/*
* if (prog == NULL)
* goto out;
*/
- EMIT(PPC_RAW_CMPLWI(__REG_R3, 0));
+ EMIT(PPC_RAW_CMPLWI(_R3, 0));
PPC_BCC(COND_EQ, out);
/* goto *(prog->bpf_func + prologue_size); */
- EMIT(PPC_RAW_LWZ(__REG_R3, __REG_R3, offsetof(struct bpf_prog, bpf_func)));
+ EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_LWZ(__REG_R0, __REG_R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+ EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
- EMIT(PPC_RAW_ADDIC(__REG_R3, __REG_R3, BPF_TAILCALL_PROLOGUE_SIZE));
+ EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
if (ctx->seen & SEEN_FUNC)
- EMIT(PPC_RAW_MTLR(__REG_R0));
+ EMIT(PPC_RAW_MTLR(_R0));
- EMIT(PPC_RAW_MTCTR(__REG_R3));
+ EMIT(PPC_RAW_MTCTR(_R3));
- EMIT(PPC_RAW_MR(__REG_R3, bpf_to_ppc(ctx, BPF_REG_1)));
+ EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_1)));
/* tear restore NVRs, ... */
bpf_jit_emit_common_epilogue(image, ctx);
@@ -352,8 +352,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
if (imm >= -32768 && imm < 32768) {
EMIT(PPC_RAW_ADDIC(dst_reg, dst_reg, imm));
} else {
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
}
if (imm >= 0)
EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
@@ -362,11 +362,11 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
break;
case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
bpf_set_seen_register(ctx, tmp_reg);
- EMIT(PPC_RAW_MULW(__REG_R0, dst_reg, src_reg_h));
+ EMIT(PPC_RAW_MULW(_R0, dst_reg, src_reg_h));
EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, src_reg));
EMIT(PPC_RAW_MULHWU(tmp_reg, dst_reg, src_reg));
EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
- EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, __REG_R0));
+ EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg));
break;
case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
@@ -376,8 +376,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
if (imm >= -32768 && imm < 32768) {
EMIT(PPC_RAW_MULI(dst_reg, dst_reg, imm));
} else {
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_MULW(dst_reg, dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, _R0));
}
break;
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
@@ -398,17 +398,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, tmp_reg));
if (imm < 0)
EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, dst_reg));
- EMIT(PPC_RAW_MULHWU(__REG_R0, dst_reg, tmp_reg));
+ EMIT(PPC_RAW_MULHWU(_R0, dst_reg, tmp_reg));
EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp_reg));
- EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, __REG_R0));
+ EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
break;
case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
- EMIT(PPC_RAW_DIVWU(__REG_R0, dst_reg, src_reg));
- EMIT(PPC_RAW_MULW(__REG_R0, src_reg, __REG_R0));
- EMIT(PPC_RAW_SUB(dst_reg, dst_reg, __REG_R0));
+ EMIT(PPC_RAW_DIVWU(_R0, dst_reg, src_reg));
+ EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0));
break;
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
return -EOPNOTSUPP;
@@ -420,8 +420,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
if (imm == 1)
break;
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, _R0));
break;
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
if (!imm)
@@ -430,9 +430,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
if (!is_power_of_2((u32)imm)) {
bpf_set_seen_register(ctx, tmp_reg);
PPC_LI32(tmp_reg, imm);
- EMIT(PPC_RAW_DIVWU(__REG_R0, dst_reg, tmp_reg));
- EMIT(PPC_RAW_MULW(__REG_R0, tmp_reg, __REG_R0));
- EMIT(PPC_RAW_SUB(dst_reg, dst_reg, __REG_R0));
+ EMIT(PPC_RAW_DIVWU(_R0, dst_reg, tmp_reg));
+ EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0));
break;
}
if (imm == 1)
@@ -503,8 +503,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0,
32 - fls(imm), 32 - ffs(imm)));
} else {
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_AND(dst_reg, dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_AND(dst_reg, dst_reg, _R0));
}
break;
case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
@@ -555,12 +555,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
break;
case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
bpf_set_seen_register(ctx, tmp_reg);
- EMIT(PPC_RAW_SUBFIC(__REG_R0, src_reg, 32));
+ EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
EMIT(PPC_RAW_SLW(dst_reg_h, dst_reg_h, src_reg));
EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
- EMIT(PPC_RAW_SRW(__REG_R0, dst_reg, __REG_R0));
+ EMIT(PPC_RAW_SRW(_R0, dst_reg, _R0));
EMIT(PPC_RAW_SLW(tmp_reg, dst_reg, tmp_reg));
- EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, __REG_R0));
+ EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0));
EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg));
break;
@@ -591,12 +591,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
break;
case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
bpf_set_seen_register(ctx, tmp_reg);
- EMIT(PPC_RAW_SUBFIC(__REG_R0, src_reg, 32));
+ EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
- EMIT(PPC_RAW_SLW(__REG_R0, dst_reg_h, __REG_R0));
+ EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0));
EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg));
- EMIT(PPC_RAW_OR(dst_reg, dst_reg, __REG_R0));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
EMIT(PPC_RAW_SRW(dst_reg_h, dst_reg_h, src_reg));
EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
break;
@@ -627,15 +627,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
break;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
bpf_set_seen_register(ctx, tmp_reg);
- EMIT(PPC_RAW_SUBFIC(__REG_R0, src_reg, 32));
+ EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
- EMIT(PPC_RAW_SLW(__REG_R0, dst_reg_h, __REG_R0));
+ EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0));
EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
- EMIT(PPC_RAW_OR(dst_reg, dst_reg, __REG_R0));
- EMIT(PPC_RAW_RLWINM(__REG_R0, tmp_reg, 0, 26, 26));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
+ EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26));
EMIT(PPC_RAW_SRAW(tmp_reg, dst_reg_h, tmp_reg));
EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg_h, src_reg));
- EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, __REG_R0));
+ EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0));
EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
@@ -702,24 +702,24 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
* 2 bytes are already in their final position
* -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
*/
- EMIT(PPC_RAW_RLWINM(__REG_R0, dst_reg, 8, 0, 31));
+ EMIT(PPC_RAW_RLWINM(_R0, dst_reg, 8, 0, 31));
/* Rotate 24 bits and insert byte 1 */
- EMIT(PPC_RAW_RLWIMI(__REG_R0, dst_reg, 24, 0, 7));
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 0, 7));
/* Rotate 24 bits and insert byte 3 */
- EMIT(PPC_RAW_RLWIMI(__REG_R0, dst_reg, 24, 16, 23));
- EMIT(PPC_RAW_MR(dst_reg, __REG_R0));
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 16, 23));
+ EMIT(PPC_RAW_MR(dst_reg, _R0));
break;
case 64:
bpf_set_seen_register(ctx, tmp_reg);
EMIT(PPC_RAW_RLWINM(tmp_reg, dst_reg, 8, 0, 31));
- EMIT(PPC_RAW_RLWINM(__REG_R0, dst_reg_h, 8, 0, 31));
+ EMIT(PPC_RAW_RLWINM(_R0, dst_reg_h, 8, 0, 31));
/* Rotate 24 bits and insert byte 1 */
EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 0, 7));
- EMIT(PPC_RAW_RLWIMI(__REG_R0, dst_reg_h, 24, 0, 7));
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 0, 7));
/* Rotate 24 bits and insert byte 3 */
EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 16, 23));
- EMIT(PPC_RAW_RLWIMI(__REG_R0, dst_reg_h, 24, 16, 23));
- EMIT(PPC_RAW_MR(dst_reg, __REG_R0));
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 16, 23));
+ EMIT(PPC_RAW_MR(dst_reg, _R0));
EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
break;
}
@@ -738,58 +738,72 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
break;
/*
+ * BPF_ST NOSPEC (speculation barrier)
+ */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
+ /*
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
break;
case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_STB(__REG_R0, dst_reg, off));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STB(_R0, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
break;
case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_STH(__REG_R0, dst_reg, off));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STH(_R0, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
break;
case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_STW(__REG_R0, dst_reg, off));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STW(_R0, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off));
EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4));
break;
case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_STW(__REG_R0, dst_reg, off + 4));
- PPC_EX32(__REG_R0, imm);
- EMIT(PPC_RAW_STW(__REG_R0, dst_reg, off));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4));
+ PPC_EX32(_R0, imm);
+ EMIT(PPC_RAW_STW(_R0, dst_reg, off));
break;
/*
- * BPF_STX XADD (atomic_add)
+ * BPF_STX ATOMIC (atomic ops)
*/
- case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ if (imm != BPF_ADD) {
+ pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
+ code, i);
+ return -ENOTSUPP;
+ }
+
+ /* *(u32 *)(dst + off) += src */
+
bpf_set_seen_register(ctx, tmp_reg);
/* Get offset into TMP_REG */
EMIT(PPC_RAW_LI(tmp_reg, off));
/* load value from memory into r0 */
- EMIT(PPC_RAW_LWARX(__REG_R0, tmp_reg, dst_reg, 0));
+ EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
/* add value from src_reg into this */
- EMIT(PPC_RAW_ADD(__REG_R0, __REG_R0, src_reg));
+ EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
/* store result back */
- EMIT(PPC_RAW_STWCX(__REG_R0, tmp_reg, dst_reg));
+ EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, (ctx->idx - 3) * 4);
break;
- case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
+ case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */
return -EOPNOTSUPP;
/*
@@ -852,14 +866,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
return ret;
if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) {
- EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5) - 1, __REG_R1, 8));
- EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), __REG_R1, 12));
+ EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, 8));
+ EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), _R1, 12));
}
bpf_jit_emit_func_call_rel(image, ctx, func_addr);
- EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, __REG_R3));
- EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), __REG_R4));
+ EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, _R3));
+ EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), _R4));
break;
/*
@@ -967,12 +981,12 @@ cond_branch:
EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
break;
case BPF_JMP | BPF_JSET | BPF_X:
- EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg_h, src_reg_h));
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
- EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg, src_reg));
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
break;
case BPF_JMP32 | BPF_JSET | BPF_X: {
- EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg, src_reg));
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
break;
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_K:
@@ -990,11 +1004,11 @@ cond_branch:
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
} else {
/* sign-extending load ... but unsigned comparison */
- PPC_EX32(__REG_R0, imm);
- EMIT(PPC_RAW_CMPLW(dst_reg_h, __REG_R0));
- PPC_LI32(__REG_R0, imm);
+ PPC_EX32(_R0, imm);
+ EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0));
+ PPC_LI32(_R0, imm);
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
- EMIT(PPC_RAW_CMPLW(dst_reg, __REG_R0));
+ EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
}
break;
case BPF_JMP32 | BPF_JNE | BPF_K:
@@ -1006,8 +1020,8 @@ cond_branch:
if (imm >= 0 && imm < 65536) {
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
} else {
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_CMPLW(dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
}
break;
}
@@ -1022,9 +1036,9 @@ cond_branch:
} else {
/* sign-extending load */
EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
- PPC_LI32(__REG_R0, imm);
+ PPC_LI32(_R0, imm);
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
- EMIT(PPC_RAW_CMPLW(dst_reg, __REG_R0));
+ EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
}
break;
case BPF_JMP32 | BPF_JSGT | BPF_K:
@@ -1039,32 +1053,32 @@ cond_branch:
EMIT(PPC_RAW_CMPWI(dst_reg, imm));
} else {
/* sign-extending load */
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_CMPW(dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_CMPW(dst_reg, _R0));
}
break;
case BPF_JMP | BPF_JSET | BPF_K:
/* andi does not sign-extend the immediate */
if (imm >= 0 && imm < 32768) {
/* PPC_ANDI is _only/always_ dot-form */
- EMIT(PPC_RAW_ANDI(__REG_R0, dst_reg, imm));
+ EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
} else {
- PPC_LI32(__REG_R0, imm);
+ PPC_LI32(_R0, imm);
if (imm < 0) {
EMIT(PPC_RAW_CMPWI(dst_reg_h, 0));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
}
- EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg, __REG_R0));
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
}
break;
case BPF_JMP32 | BPF_JSET | BPF_K:
/* andi does not sign-extend the immediate */
if (imm >= -32768 && imm < 32768) {
/* PPC_ANDI is _only/always_ dot-form */
- EMIT(PPC_RAW_ANDI(__REG_R0, dst_reg, imm));
+ EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
} else {
- PPC_LI32(__REG_R0, imm);
- EMIT(PPC_RAW_AND_DOT(__REG_R0, dst_reg, __REG_R0));
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
}
break;
}
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 57a8c1153851..b87a63dba9c8 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -94,7 +94,7 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
* save/restore LR unless we call other functions
*/
if (ctx->seen & SEEN_FUNC) {
- EMIT(PPC_INST_MFLR | __PPC_RT(R0));
+ EMIT(PPC_RAW_MFLR(_R0));
PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
}
@@ -153,8 +153,8 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
PPC_LI64(b2p[TMP_REG_2], func);
/* Load actual entry point from function descriptor */
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
- /* ... and move it to LR */
- EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
+ /* ... and move it to CTR */
+ EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
/*
* Load TOC from function descriptor at offset 8.
* We can clobber r2 since we get called through a
@@ -165,9 +165,9 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
#else
/* We can clobber r12 */
PPC_FUNC_ADDR(12, func);
- EMIT(PPC_RAW_MTLR(12));
+ EMIT(PPC_RAW_MTCTR(12));
#endif
- EMIT(PPC_RAW_BLRL());
+ EMIT(PPC_RAW_BCTRL());
}
void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
@@ -202,8 +202,8 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
PPC_BPF_LL(12, 12, 0);
#endif
- EMIT(PPC_RAW_MTLR(12));
- EMIT(PPC_RAW_BLRL());
+ EMIT(PPC_RAW_MTCTR(12));
+ EMIT(PPC_RAW_BCTRL());
}
static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
@@ -628,6 +628,12 @@ emit_clear:
break;
/*
+ * BPF_ST NOSPEC (speculation barrier)
+ */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
+ /*
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
@@ -667,7 +673,7 @@ emit_clear:
* BPF_STX ATOMIC (atomic ops)
*/
case BPF_STX | BPF_ATOMIC | BPF_W:
- if (insn->imm != BPF_ADD) {
+ if (imm != BPF_ADD) {
pr_err_ratelimited(
"eBPF filter atomic op code %02x (@%d) unsupported\n",
code, i);
@@ -689,7 +695,7 @@ emit_clear:
PPC_BCC_SHORT(COND_NE, tmp_idx);
break;
case BPF_STX | BPF_ATOMIC | BPF_DW:
- if (insn->imm != BPF_ADD) {
+ if (imm != BPF_ADD) {
pr_err_ratelimited(
"eBPF filter atomic op code %02x (@%d) unsupported\n",
code, i);
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index c02854dea2b2..2f46e31c7612 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -1,9 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PERF_EVENTS) += callchain.o callchain_$(BITS).o perf_regs.o
-ifdef CONFIG_COMPAT
-obj-$(CONFIG_PERF_EVENTS) += callchain_32.o
-endif
+obj-y += callchain.o callchain_$(BITS).o perf_regs.o
+obj-$(CONFIG_COMPAT) += callchain_32.o
obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 6c028ee513c0..082f6d0308a4 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -40,7 +40,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
return 0;
}
-void
+void __no_sanitize_address
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
unsigned long sp, next_sp;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 51622411a7cc..bb0ee716de91 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -460,7 +460,7 @@ static __u64 power_pmu_bhrb_to(u64 addr)
sizeof(instr)))
return 0;
- return branch_target((struct ppc_inst *)&instr);
+ return branch_target(&instr);
}
/* Userspace: need copy instruction here then translate it */
@@ -468,7 +468,7 @@ static __u64 power_pmu_bhrb_to(u64 addr)
sizeof(instr)))
return 0;
- target = branch_target((struct ppc_inst *)&instr);
+ target = branch_target(&instr);
if ((!target) || (instr & BRANCH_ABSOLUTE))
return target;
diff --git a/arch/powerpc/perf/generic-compat-pmu.c b/arch/powerpc/perf/generic-compat-pmu.c
index eb8a6aaf4cc1..695975227e60 100644
--- a/arch/powerpc/perf/generic-compat-pmu.c
+++ b/arch/powerpc/perf/generic-compat-pmu.c
@@ -14,45 +14,119 @@
*
* 28 24 20 16 12 8 4 0
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ pmc ] [unit ] [ ] m [ pmcxsel ]
- * | |
- * | *- mark
- * |
- * |
- * *- combine
- *
- * Below uses IBM bit numbering.
- *
- * MMCR1[x:y] = unit (PMCxUNIT)
- * MMCR1[24] = pmc1combine[0]
- * MMCR1[25] = pmc1combine[1]
- * MMCR1[26] = pmc2combine[0]
- * MMCR1[27] = pmc2combine[1]
- * MMCR1[28] = pmc3combine[0]
- * MMCR1[29] = pmc3combine[1]
- * MMCR1[30] = pmc4combine[0]
- * MMCR1[31] = pmc4combine[1]
- *
+ * [ pmc ] [ pmcxsel ]
*/
/*
- * Some power9 event codes.
+ * Event codes defined in ISA v3.0B
*/
#define EVENT(_name, _code) _name = _code,
enum {
-EVENT(PM_CYC, 0x0001e)
-EVENT(PM_INST_CMPL, 0x00002)
+ /* Cycles, alternate code */
+ EVENT(PM_CYC_ALT, 0x100f0)
+ /* One or more instructions completed in a cycle */
+ EVENT(PM_CYC_INST_CMPL, 0x100f2)
+ /* Floating-point instruction completed */
+ EVENT(PM_FLOP_CMPL, 0x100f4)
+ /* Instruction ERAT/L1-TLB miss */
+ EVENT(PM_L1_ITLB_MISS, 0x100f6)
+ /* All instructions completed and none available */
+ EVENT(PM_NO_INST_AVAIL, 0x100f8)
+ /* A load-type instruction completed (ISA v3.0+) */
+ EVENT(PM_LD_CMPL, 0x100fc)
+ /* Instruction completed, alternate code (ISA v3.0+) */
+ EVENT(PM_INST_CMPL_ALT, 0x100fe)
+ /* A store-type instruction completed */
+ EVENT(PM_ST_CMPL, 0x200f0)
+ /* Instruction Dispatched */
+ EVENT(PM_INST_DISP, 0x200f2)
+ /* Run_cycles */
+ EVENT(PM_RUN_CYC, 0x200f4)
+ /* Data ERAT/L1-TLB miss/reload */
+ EVENT(PM_L1_DTLB_RELOAD, 0x200f6)
+ /* Taken branch completed */
+ EVENT(PM_BR_TAKEN_CMPL, 0x200fa)
+ /* Demand iCache Miss */
+ EVENT(PM_L1_ICACHE_MISS, 0x200fc)
+ /* L1 Dcache reload from memory */
+ EVENT(PM_L1_RELOAD_FROM_MEM, 0x200fe)
+ /* L1 Dcache store miss */
+ EVENT(PM_ST_MISS_L1, 0x300f0)
+ /* Alternate code for PM_INST_DISP */
+ EVENT(PM_INST_DISP_ALT, 0x300f2)
+ /* Branch direction or target mispredicted */
+ EVENT(PM_BR_MISPREDICT, 0x300f6)
+ /* Data TLB miss/reload */
+ EVENT(PM_DTLB_MISS, 0x300fc)
+ /* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+ EVENT(PM_DATA_FROM_L3MISS, 0x300fe)
+ /* L1 Dcache load miss */
+ EVENT(PM_LD_MISS_L1, 0x400f0)
+ /* Cycle when instruction(s) dispatched */
+ EVENT(PM_CYC_INST_DISP, 0x400f2)
+ /* Branch or branch target mispredicted */
+ EVENT(PM_BR_MPRED_CMPL, 0x400f6)
+ /* Instructions completed with run latch set */
+ EVENT(PM_RUN_INST_CMPL, 0x400fa)
+ /* Instruction TLB miss/reload */
+ EVENT(PM_ITLB_MISS, 0x400fc)
+ /* Load data not cached */
+ EVENT(PM_LD_NOT_CACHED, 0x400fe)
+ /* Instructions */
+ EVENT(PM_INST_CMPL, 0x500fa)
+ /* Cycles */
+ EVENT(PM_CYC, 0x600f4)
};
#undef EVENT
+/* Table of alternatives, sorted in increasing order of column 0 */
+/* Note that in each row, column 0 must be the smallest */
+static const unsigned int generic_event_alternatives[][MAX_ALT] = {
+ { PM_CYC_ALT, PM_CYC },
+ { PM_INST_CMPL_ALT, PM_INST_CMPL },
+ { PM_INST_DISP, PM_INST_DISP_ALT },
+};
+
+static int generic_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int num_alt = 0;
+
+ num_alt = isa207_get_alternatives(event, alt,
+ ARRAY_SIZE(generic_event_alternatives), flags,
+ generic_event_alternatives);
+
+ return num_alt;
+}
+
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_NO_INST_AVAIL);
+GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
+GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
+
+CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
+CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
+CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
+CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
+CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
+CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
+CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
static struct attribute *generic_compat_events_attr[] = {
GENERIC_EVENT_PTR(PM_CYC),
GENERIC_EVENT_PTR(PM_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_NO_INST_AVAIL),
+ GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
+ GENERIC_EVENT_PTR(PM_LD_MISS_L1),
+ CACHE_EVENT_PTR(PM_LD_MISS_L1),
+ CACHE_EVENT_PTR(PM_ST_MISS_L1),
+ CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
+ CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
+ CACHE_EVENT_PTR(PM_DTLB_MISS),
+ CACHE_EVENT_PTR(PM_ITLB_MISS),
NULL
};
@@ -63,17 +137,11 @@ static struct attribute_group generic_compat_pmu_events_group = {
PMU_FORMAT_ATTR(event, "config:0-19");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
-PMU_FORMAT_ATTR(mark, "config:8");
-PMU_FORMAT_ATTR(combine, "config:10-11");
-PMU_FORMAT_ATTR(unit, "config:12-15");
PMU_FORMAT_ATTR(pmc, "config:16-19");
static struct attribute *generic_compat_pmu_format_attr[] = {
&format_attr_event.attr,
&format_attr_pmcxsel.attr,
- &format_attr_mark.attr,
- &format_attr_combine.attr,
- &format_attr_unit.attr,
&format_attr_pmc.attr,
NULL,
};
@@ -92,6 +160,9 @@ static const struct attribute_group *generic_compat_pmu_attr_groups[] = {
static int compat_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_NO_INST_AVAIL,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
@@ -105,11 +176,11 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -119,7 +190,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1I) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -133,7 +204,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(LL) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -147,7 +218,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DTLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -161,7 +232,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ITLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -175,7 +246,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(BPU) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -204,13 +275,30 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
#undef C
+/*
+ * We set MMCR0[CC5-6RUN] so we can use counters 5 and 6 for
+ * PM_INST_CMPL and PM_CYC.
+ */
+static int generic_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[], u32 flags)
+{
+ int ret;
+
+ ret = isa207_compute_mmcr(event, n_ev, hwc, mmcr, pevents, flags);
+ if (!ret)
+ mmcr->mmcr0 |= MMCR0_C56RUN;
+ return ret;
+}
+
static struct power_pmu generic_compat_pmu = {
.name = "GENERIC_COMPAT",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
.test_adder = ISA207_TEST_ADDER,
- .compute_mmcr = isa207_compute_mmcr,
+ .compute_mmcr = generic_compute_mmcr,
.get_constraint = isa207_get_constraint,
+ .get_alternatives = generic_get_alternatives,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(compat_generic_events),
@@ -223,6 +311,16 @@ int init_generic_compat_pmu(void)
{
int rc = 0;
+ /*
+ * From ISA v2.07 on, PMU features are architected;
+ * we require >= v3.0 because (a) that has PM_LD_CMPL and
+ * PM_INST_CMPL_ALT, which v2.07 doesn't have, and
+ * (b) we don't expect any non-IBM Power ISA
+ * implementations that conform to v2.07 but not v3.0.
+ */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return -ENODEV;
+
rc = register_power_pmu(&generic_compat_pmu);
if (rc)
return rc;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 8c0d324f657e..3823df235f25 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -582,6 +582,7 @@ static long mpc52xx_wdt_ioctl(struct file *file, unsigned int cmd,
if (ret)
break;
/* fall through and return the timeout */
+ fallthrough;
case WDIOC_GETTIMEOUT:
/* we need to round here as to avoid e.g. the following
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index 87f524e4b09c..8a7e55acf090 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -73,7 +73,7 @@ smp_86xx_kick_cpu(int nr)
/* Setup fake reset vector to call __secondary_start_mpc86xx. */
target = (unsigned long) __secondary_start_mpc86xx;
- patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK);
+ patch_branch(vector, target, BRANCH_SET_LINK);
/* Kick that CPU */
smp_86xx_release_core(nr);
@@ -83,7 +83,7 @@ smp_86xx_kick_cpu(int nr)
mdelay(1);
/* Restore the exception vector */
- patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector));
+ patch_instruction(vector, ppc_inst(save_vector));
local_irq_restore(flags);
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 7a5e8f4541e3..e02d29a9d12f 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -20,6 +20,8 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig"
source "arch/powerpc/platforms/44x/Kconfig"
source "arch/powerpc/platforms/40x/Kconfig"
source "arch/powerpc/platforms/amigaone/Kconfig"
+source "arch/powerpc/platforms/book3s/Kconfig"
+source "arch/powerpc/platforms/microwatt/Kconfig"
config KVM_GUEST
bool "KVM Guest support"
@@ -49,6 +51,7 @@ config PPC_NATIVE
config PPC_OF_BOOT_TRAMPOLINE
bool "Support booting from Open Firmware or yaboot"
depends on PPC_BOOK3S_32 || PPC64
+ select RELOCATABLE if PPC64
default y
help
Support from booting from Open Firmware or yaboot using an
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index f998e655b570..6794145603de 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -61,6 +61,7 @@ config 44x
select 4xx_SOC
select HAVE_PCI
select PHYS_64BIT
+ select PPC_HAVE_KUEP
endchoice
@@ -101,6 +102,8 @@ config PPC_BOOK3S_64
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_NUMA_BALANCING
+ select HAVE_MOVE_PMD
+ select HAVE_MOVE_PUD
select IRQ_WORK
select PPC_MM_SLICES
select PPC_HAVE_KUEP
@@ -111,6 +114,7 @@ config PPC_BOOK3E_64
select PPC_FPU # Make it a choice ?
select PPC_SMP_MUXED_IPI
select PPC_DOORBELL
+ select ZONE_DMA
endchoice
@@ -389,7 +393,7 @@ config PPC_HAVE_KUEP
config PPC_KUEP
bool "Kernel Userspace Execution Prevention"
depends on PPC_HAVE_KUEP
- default y if !PPC_BOOK3S_32
+ default y
help
Enable support for Kernel Userspace Execution Prevention (KUEP)
@@ -401,7 +405,7 @@ config PPC_HAVE_KUAP
config PPC_KUAP
bool "Kernel Userspace Access Protection"
depends on PPC_HAVE_KUAP
- default y if !PPC_BOOK3S_32
+ default y
help
Enable support for Kernel Userspace Access Protection (KUAP)
@@ -424,10 +428,6 @@ config PPC_MMU_NOHASH
def_bool y
depends on !PPC_BOOK3S
-config PPC_MMU_NOHASH_32
- def_bool y
- depends on PPC_MMU_NOHASH && PPC32
-
config PPC_BOOK3E_MMU
def_bool y
depends on FSL_BOOKE || PPC_BOOK3E
@@ -476,9 +476,9 @@ config SMP
If you don't know what to do here, say N.
config NR_CPUS
- int "Maximum number of CPUs (2-8192)"
- range 2 8192
- depends on SMP
+ int "Maximum number of CPUs (2-8192)" if SMP
+ range 2 8192 if SMP
+ default "1" if !SMP
default "32" if PPC64
default "4"
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 143d4417f6cc..94470fb27c99 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -22,3 +22,5 @@ obj-$(CONFIG_PPC_CELL) += cell/
obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
obj-$(CONFIG_AMIGAONE) += amigaone/
+obj-$(CONFIG_PPC_BOOK3S) += book3s/
+obj-$(CONFIG_PPC_MICROWATT) += microwatt/
diff --git a/arch/powerpc/platforms/book3s/Kconfig b/arch/powerpc/platforms/book3s/Kconfig
new file mode 100644
index 000000000000..34c931592ef0
--- /dev/null
+++ b/arch/powerpc/platforms/book3s/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+config PPC_VAS
+ bool "IBM Virtual Accelerator Switchboard (VAS)"
+ depends on (PPC_POWERNV || PPC_PSERIES) && PPC_64K_PAGES
+ default y
+ help
+ This enables support for IBM Virtual Accelerator Switchboard (VAS).
+
+ VAS devices are found in POWER9-based and later systems, they
+ provide access to accelerator coprocessors such as NX-GZIP and
+ NX-842. This config allows the kernel to use NX-842 accelerators,
+ and user-mode APIs for the NX-GZIP accelerator on POWER9 PowerNV
+ and POWER10 PowerVM platforms.
+
+ If unsure, say "N".
diff --git a/arch/powerpc/platforms/book3s/Makefile b/arch/powerpc/platforms/book3s/Makefile
new file mode 100644
index 000000000000..e790f1910f61
--- /dev/null
+++ b/arch/powerpc/platforms/book3s/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PPC_VAS) += vas-api.o
diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
new file mode 100644
index 000000000000..30172e52e16b
--- /dev/null
+++ b/arch/powerpc/platforms/book3s/vas-api.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * VAS user space API for its accelerators (Only NX-GZIP is supported now)
+ * Copyright (C) 2019 Haren Myneni, IBM Corp
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/mmu_context.h>
+#include <linux/io.h>
+#include <asm/vas.h>
+#include <uapi/asm/vas-api.h>
+
+/*
+ * The driver creates the device node that can be used as follows:
+ * For NX-GZIP
+ *
+ * fd = open("/dev/crypto/nx-gzip", O_RDWR);
+ * rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr);
+ * paste_addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, fd, 0ULL).
+ * vas_copy(&crb, 0, 1);
+ * vas_paste(paste_addr, 0, 1);
+ * close(fd) or exit process to close window.
+ *
+ * where "vas_copy" and "vas_paste" are defined in copy-paste.h.
+ * copy/paste returns to the user space directly. So refer NX hardware
+ * documententation for exact copy/paste usage and completion / error
+ * conditions.
+ */
+
+/*
+ * Wrapper object for the nx-gzip device - there is just one instance of
+ * this node for the whole system.
+ */
+static struct coproc_dev {
+ struct cdev cdev;
+ struct device *device;
+ char *name;
+ dev_t devt;
+ struct class *class;
+ enum vas_cop_type cop_type;
+ const struct vas_user_win_ops *vops;
+} coproc_device;
+
+struct coproc_instance {
+ struct coproc_dev *coproc;
+ struct vas_window *txwin;
+};
+
+static char *coproc_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev));
+}
+
+/*
+ * Take reference to pid and mm
+ */
+int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
+{
+ /*
+ * Window opened by a child thread may not be closed when
+ * it exits. So take reference to its pid and release it
+ * when the window is free by parent thread.
+ * Acquire a reference to the task's pid to make sure
+ * pid will not be re-used - needed only for multithread
+ * applications.
+ */
+ task_ref->pid = get_task_pid(current, PIDTYPE_PID);
+ /*
+ * Acquire a reference to the task's mm.
+ */
+ task_ref->mm = get_task_mm(current);
+ if (!task_ref->mm) {
+ put_pid(task_ref->pid);
+ pr_err("VAS: pid(%d): mm_struct is not found\n",
+ current->pid);
+ return -EPERM;
+ }
+
+ mmgrab(task_ref->mm);
+ mmput(task_ref->mm);
+ /*
+ * Process closes window during exit. In the case of
+ * multithread application, the child thread can open
+ * window and can exit without closing it. So takes tgid
+ * reference until window closed to make sure tgid is not
+ * reused.
+ */
+ task_ref->tgid = find_get_pid(task_tgid_vnr(current));
+
+ return 0;
+}
+
+/*
+ * Successful return must release the task reference with
+ * put_task_struct
+ */
+static bool ref_get_pid_and_task(struct vas_user_win_ref *task_ref,
+ struct task_struct **tskp, struct pid **pidp)
+{
+ struct task_struct *tsk;
+ struct pid *pid;
+
+ pid = task_ref->pid;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ if (!tsk) {
+ pid = task_ref->tgid;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ /*
+ * Parent thread (tgid) will be closing window when it
+ * exits. So should not get here.
+ */
+ if (WARN_ON_ONCE(!tsk))
+ return false;
+ }
+
+ /* Return if the task is exiting. */
+ if (tsk->flags & PF_EXITING) {
+ put_task_struct(tsk);
+ return false;
+ }
+
+ *tskp = tsk;
+ *pidp = pid;
+
+ return true;
+}
+
+/*
+ * Update the CSB to indicate a translation error.
+ *
+ * User space will be polling on CSB after the request is issued.
+ * If NX can handle the request without any issues, it updates CSB.
+ * Whereas if NX encounters page fault, the kernel will handle the
+ * fault and update CSB with translation error.
+ *
+ * If we are unable to update the CSB means copy_to_user failed due to
+ * invalid csb_addr, send a signal to the process.
+ */
+void vas_update_csb(struct coprocessor_request_block *crb,
+ struct vas_user_win_ref *task_ref)
+{
+ struct coprocessor_status_block csb;
+ struct kernel_siginfo info;
+ struct task_struct *tsk;
+ void __user *csb_addr;
+ struct pid *pid;
+ int rc;
+
+ /*
+ * NX user space windows can not be opened for task->mm=NULL
+ * and faults will not be generated for kernel requests.
+ */
+ if (WARN_ON_ONCE(!task_ref->mm))
+ return;
+
+ csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
+
+ memset(&csb, 0, sizeof(csb));
+ csb.cc = CSB_CC_FAULT_ADDRESS;
+ csb.ce = CSB_CE_TERMINATION;
+ csb.cs = 0;
+ csb.count = 0;
+
+ /*
+ * NX operates and returns in BE format as defined CRB struct.
+ * So saves fault_storage_addr in BE as NX pastes in FIFO and
+ * expects user space to convert to CPU format.
+ */
+ csb.address = crb->stamp.nx.fault_storage_addr;
+ csb.flags = 0;
+
+ /*
+ * Process closes send window after all pending NX requests are
+ * completed. In multi-thread applications, a child thread can
+ * open a window and can exit without closing it. May be some
+ * requests are pending or this window can be used by other
+ * threads later. We should handle faults if NX encounters
+ * pages faults on these requests. Update CSB with translation
+ * error and fault address. If csb_addr passed by user space is
+ * invalid, send SEGV signal to pid saved in window. If the
+ * child thread is not running, send the signal to tgid.
+ * Parent thread (tgid) will close this window upon its exit.
+ *
+ * pid and mm references are taken when window is opened by
+ * process (pid). So tgid is used only when child thread opens
+ * a window and exits without closing it.
+ */
+
+ if (!ref_get_pid_and_task(task_ref, &tsk, &pid))
+ return;
+
+ kthread_use_mm(task_ref->mm);
+ rc = copy_to_user(csb_addr, &csb, sizeof(csb));
+ /*
+ * User space polls on csb.flags (first byte). So add barrier
+ * then copy first byte with csb flags update.
+ */
+ if (!rc) {
+ csb.flags = CSB_V;
+ /* Make sure update to csb.flags is visible now */
+ smp_mb();
+ rc = copy_to_user(csb_addr, &csb, sizeof(u8));
+ }
+ kthread_unuse_mm(task_ref->mm);
+ put_task_struct(tsk);
+
+ /* Success */
+ if (!rc)
+ return;
+
+
+ pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n",
+ csb_addr, pid_vnr(pid));
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_errno = EFAULT;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = csb_addr;
+ /*
+ * process will be polling on csb.flags after request is sent to
+ * NX. So generally CSB update should not fail except when an
+ * application passes invalid csb_addr. So an error message will
+ * be displayed and leave it to user space whether to ignore or
+ * handle this signal.
+ */
+ rcu_read_lock();
+ rc = kill_pid_info(SIGSEGV, &info, pid);
+ rcu_read_unlock();
+
+ pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
+ pid_vnr(pid), rc);
+}
+
+void vas_dump_crb(struct coprocessor_request_block *crb)
+{
+ struct data_descriptor_entry *dde;
+ struct nx_fault_stamp *nx;
+
+ dde = &crb->source;
+ pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
+ be64_to_cpu(dde->address), be32_to_cpu(dde->length),
+ dde->count, dde->index, dde->flags);
+
+ dde = &crb->target;
+ pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
+ be64_to_cpu(dde->address), be32_to_cpu(dde->length),
+ dde->count, dde->index, dde->flags);
+
+ nx = &crb->stamp.nx;
+ pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n",
+ be32_to_cpu(nx->pswid),
+ be64_to_cpu(crb->stamp.nx.fault_storage_addr),
+ nx->flags, nx->fault_status);
+}
+
+static int coproc_open(struct inode *inode, struct file *fp)
+{
+ struct coproc_instance *cp_inst;
+
+ cp_inst = kzalloc(sizeof(*cp_inst), GFP_KERNEL);
+ if (!cp_inst)
+ return -ENOMEM;
+
+ cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev,
+ cdev);
+ fp->private_data = cp_inst;
+
+ return 0;
+}
+
+static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+{
+ void __user *uptr = (void __user *)arg;
+ struct vas_tx_win_open_attr uattr;
+ struct coproc_instance *cp_inst;
+ struct vas_window *txwin;
+ int rc;
+
+ cp_inst = fp->private_data;
+
+ /*
+ * One window for file descriptor
+ */
+ if (cp_inst->txwin)
+ return -EEXIST;
+
+ rc = copy_from_user(&uattr, uptr, sizeof(uattr));
+ if (rc) {
+ pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
+ return -EFAULT;
+ }
+
+ if (uattr.version != 1) {
+ pr_err("Invalid window open API version\n");
+ return -EINVAL;
+ }
+
+ if (!cp_inst->coproc->vops && !cp_inst->coproc->vops->open_win) {
+ pr_err("VAS API is not registered\n");
+ return -EACCES;
+ }
+
+ txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
+ cp_inst->coproc->cop_type);
+ if (IS_ERR(txwin)) {
+ pr_err("%s() VAS window open failed, %ld\n", __func__,
+ PTR_ERR(txwin));
+ return PTR_ERR(txwin);
+ }
+
+ cp_inst->txwin = txwin;
+
+ return 0;
+}
+
+static int coproc_release(struct inode *inode, struct file *fp)
+{
+ struct coproc_instance *cp_inst = fp->private_data;
+ int rc;
+
+ if (cp_inst->txwin) {
+ if (cp_inst->coproc->vops &&
+ cp_inst->coproc->vops->close_win) {
+ rc = cp_inst->coproc->vops->close_win(cp_inst->txwin);
+ if (rc)
+ return rc;
+ }
+ cp_inst->txwin = NULL;
+ }
+
+ kfree(cp_inst);
+ fp->private_data = NULL;
+
+ /*
+ * We don't know here if user has other receive windows
+ * open, so we can't really call clear_thread_tidr().
+ * So, once the process calls set_thread_tidr(), the
+ * TIDR value sticks around until process exits, resulting
+ * in an extra copy in restore_sprs().
+ */
+
+ return 0;
+}
+
+static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct coproc_instance *cp_inst = fp->private_data;
+ struct vas_window *txwin;
+ unsigned long pfn;
+ u64 paste_addr;
+ pgprot_t prot;
+ int rc;
+
+ txwin = cp_inst->txwin;
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+ pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
+ (vma->vm_end - vma->vm_start), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /* Ensure instance has an open send window */
+ if (!txwin) {
+ pr_err("%s(): No send window open?\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!cp_inst->coproc->vops && !cp_inst->coproc->vops->paste_addr) {
+ pr_err("%s(): VAS API is not registered\n", __func__);
+ return -EACCES;
+ }
+
+ paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
+ if (!paste_addr) {
+ pr_err("%s(): Window paste address failed\n", __func__);
+ return -EINVAL;
+ }
+
+ pfn = paste_addr >> PAGE_SHIFT;
+
+ /* flags, page_prot from cxl_mmap(), except we want cachable */
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
+
+ prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
+
+ rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, prot);
+
+ pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
+ paste_addr, vma->vm_start, rc);
+
+ return rc;
+}
+
+static long coproc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case VAS_TX_WIN_OPEN:
+ return coproc_ioc_tx_win_open(fp, arg);
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct file_operations coproc_fops = {
+ .open = coproc_open,
+ .release = coproc_release,
+ .mmap = coproc_mmap,
+ .unlocked_ioctl = coproc_ioctl,
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ const char *name,
+ const struct vas_user_win_ops *vops)
+{
+ int rc = -EINVAL;
+ dev_t devno;
+
+ rc = alloc_chrdev_region(&coproc_device.devt, 1, 1, name);
+ if (rc) {
+ pr_err("Unable to allocate coproc major number: %i\n", rc);
+ return rc;
+ }
+
+ pr_devel("%s device allocated, dev [%i,%i]\n", name,
+ MAJOR(coproc_device.devt), MINOR(coproc_device.devt));
+
+ coproc_device.class = class_create(mod, name);
+ if (IS_ERR(coproc_device.class)) {
+ rc = PTR_ERR(coproc_device.class);
+ pr_err("Unable to create %s class %d\n", name, rc);
+ goto err_class;
+ }
+ coproc_device.class->devnode = coproc_devnode;
+ coproc_device.cop_type = cop_type;
+ coproc_device.vops = vops;
+
+ coproc_fops.owner = mod;
+ cdev_init(&coproc_device.cdev, &coproc_fops);
+
+ devno = MKDEV(MAJOR(coproc_device.devt), 0);
+ rc = cdev_add(&coproc_device.cdev, devno, 1);
+ if (rc) {
+ pr_err("cdev_add() failed %d\n", rc);
+ goto err_cdev;
+ }
+
+ coproc_device.device = device_create(coproc_device.class, NULL,
+ devno, NULL, name, MINOR(devno));
+ if (IS_ERR(coproc_device.device)) {
+ rc = PTR_ERR(coproc_device.device);
+ pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc);
+ goto err;
+ }
+
+ pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
+ MINOR(devno));
+
+ return 0;
+
+err:
+ cdev_del(&coproc_device.cdev);
+err_cdev:
+ class_destroy(coproc_device.class);
+err_class:
+ unregister_chrdev_region(coproc_device.devt, 1);
+ return rc;
+}
+
+void vas_unregister_coproc_api(void)
+{
+ dev_t devno;
+
+ cdev_del(&coproc_device.cdev);
+ devno = MKDEV(MAJOR(coproc_device.devt), 0);
+ device_destroy(coproc_device.class, devno);
+
+ class_destroy(coproc_device.class);
+ unregister_chrdev_region(coproc_device.devt, 1);
+}
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index c855a0aeb49c..d7ab868aab54 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -78,9 +78,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
pcpu = get_hard_smp_processor_id(lcpu);
- /* Fixup atomic count: it exited inside IRQ handler. */
- task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0;
-
/*
* If the RTAS start-cpu token does not exist then presume the
* cpu is already spinning.
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c
index 93ea41680f54..a1c293f42a1f 100644
--- a/arch/powerpc/platforms/cell/spider-pci.c
+++ b/arch/powerpc/platforms/cell/spider-pci.c
@@ -25,10 +25,9 @@ struct spiderpci_iowa_private {
static void spiderpci_io_flush(struct iowa_bus *bus)
{
struct spiderpci_iowa_private *priv;
- u32 val;
priv = bus->private;
- val = in_be32(priv->regs + SPIDER_PCI_DUMMY_READ);
+ in_be32(priv->regs + SPIDER_PCI_DUMMY_READ);
iosync();
}
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index d56b4e3241cd..b41e81b22fdc 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -1657,14 +1657,13 @@ static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
- u32 dummy = 0;
/* Restore, Step 66:
* If CSA.MB_Stat[P]=0 (mailbox empty) then
* read from the PPU_MB register.
*/
if ((csa->prob.mb_stat_R & 0xFF) == 0) {
- dummy = in_be32(&prob->pu_mb_R);
+ in_be32(&prob->pu_mb_R);
eieio();
}
}
@@ -1672,14 +1671,13 @@ static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
- u64 dummy = 0UL;
/* Restore, Step 66:
* If CSA.MB_Stat[I]=0 (mailbox empty) then
* read from the PPUINT_MB register.
*/
if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
- dummy = in_be64(&priv2->puint_mb_R);
+ in_be64(&priv2->puint_mb_R);
eieio();
spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
eieio();
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 53065d564161..85521b3e7098 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -251,8 +251,8 @@ static int ppc750_machine_check_exception(struct pt_regs *regs)
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
return 0;
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index 5565647dc879..d8da6a483e59 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -173,8 +173,8 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs)
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
return 0;
diff --git a/arch/powerpc/platforms/microwatt/Kconfig b/arch/powerpc/platforms/microwatt/Kconfig
new file mode 100644
index 000000000000..8f6a81978461
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+config PPC_MICROWATT
+ depends on PPC_BOOK3S_64 && !SMP
+ bool "Microwatt SoC platform"
+ select PPC_XICS
+ select PPC_ICS_NATIVE
+ select PPC_ICP_NATIVE
+ select PPC_NATIVE
+ select PPC_UDBG_16550
+ select ARCH_RANDOM
+ help
+ This option enables support for FPGA-based Microwatt implementations.
+
diff --git a/arch/powerpc/platforms/microwatt/Makefile b/arch/powerpc/platforms/microwatt/Makefile
new file mode 100644
index 000000000000..116d6d3ad3f0
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/Makefile
@@ -0,0 +1 @@
+obj-y += setup.o rng.o
diff --git a/arch/powerpc/platforms/microwatt/rng.c b/arch/powerpc/platforms/microwatt/rng.c
new file mode 100644
index 000000000000..3d8ee6eb7dad
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/rng.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Derived from arch/powerpc/platforms/powernv/rng.c, which is:
+ * Copyright 2013, Michael Ellerman, IBM Corporation.
+ */
+
+#define pr_fmt(fmt) "microwatt-rng: " fmt
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <asm/archrandom.h>
+#include <asm/cputable.h>
+#include <asm/machdep.h>
+
+#define DARN_ERR 0xFFFFFFFFFFFFFFFFul
+
+int microwatt_get_random_darn(unsigned long *v)
+{
+ unsigned long val;
+
+ /* Using DARN with L=1 - 64-bit conditioned random number */
+ asm volatile(PPC_DARN(%0, 1) : "=r"(val));
+
+ if (val == DARN_ERR)
+ return 0;
+
+ *v = val;
+
+ return 1;
+}
+
+static __init int rng_init(void)
+{
+ unsigned long val;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (microwatt_get_random_darn(&val)) {
+ ppc_md.get_random_seed = microwatt_get_random_darn;
+ return 0;
+ }
+ }
+
+ pr_warn("Unable to use DARN for get_random_seed()\n");
+
+ return -EIO;
+}
+machine_subsys_initcall(, rng_init);
diff --git a/arch/powerpc/platforms/microwatt/setup.c b/arch/powerpc/platforms/microwatt/setup.c
new file mode 100644
index 000000000000..0b02603bdb74
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/setup.c
@@ -0,0 +1,41 @@
+/*
+ * Microwatt FPGA-based SoC platform setup code.
+ *
+ * Copyright 2020 Paul Mackerras (paulus@ozlabs.org), IBM Corp.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <asm/machdep.h>
+#include <asm/time.h>
+#include <asm/xics.h>
+#include <asm/udbg.h>
+
+static void __init microwatt_init_IRQ(void)
+{
+ xics_init();
+}
+
+static int __init microwatt_probe(void)
+{
+ return of_machine_is_compatible("microwatt-soc");
+}
+
+static int __init microwatt_populate(void)
+{
+ return of_platform_default_populate(NULL, NULL, NULL);
+}
+machine_arch_initcall(microwatt, microwatt_populate);
+
+define_machine(microwatt) {
+ .name = "microwatt",
+ .probe = microwatt_probe,
+ .init_IRQ = microwatt_init_IRQ,
+ .progress = udbg_progress,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/pasemi/idle.c b/arch/powerpc/platforms/pasemi/idle.c
index 1c954c90b0f4..534b0317fc15 100644
--- a/arch/powerpc/platforms/pasemi/idle.c
+++ b/arch/powerpc/platforms/pasemi/idle.c
@@ -37,11 +37,12 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
*/
if (regs->msr & SRR1_WAKEMASK)
- regs->nip = regs->link;
+ regs_set_return_ip(regs, regs->link);
switch (regs->msr & SRR1_WAKEMASK) {
case SRR1_WAKEDEC:
set_dec(1);
+ break;
case SRR1_WAKEEE:
/*
* Handle these when interrupts get re-enabled and we take
@@ -58,7 +59,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
restore_astate(hard_smp_processor_id());
/* everything handled */
- regs->msr |= MSR_RI;
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
return 1;
}
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index 9d4ecd292255..d20ef35e6d9d 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -433,7 +433,7 @@ static void __init btext_welcome(boot_infos_t *bi)
bootx_printf("\nframe buffer at : 0x%x", bi->dispDeviceBase);
bootx_printf(" (phys), 0x%x", bi->logicalDisplayBase);
bootx_printf(" (log)");
- bootx_printf("\nklimit : 0x%x",(unsigned long)klimit);
+ bootx_printf("\nklimit : 0x%x",(unsigned long)_end);
bootx_printf("\nboot_info at : 0x%x", bi);
__asm__ __volatile__ ("mfmsr %0" : "=r" (flags));
bootx_printf("\nMSR : 0x%x", flags);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index adae2a6712e1..3256a316e884 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -146,6 +146,7 @@ static inline void psurge_clr_ipi(int cpu)
switch(psurge_type) {
case PSURGE_DUAL:
out_8(psurge_sec_intr, ~0);
+ break;
case PSURGE_NONE:
break;
default:
@@ -810,7 +811,7 @@ static int smp_core99_kick_cpu(int nr)
* b __secondary_start_pmac_0 + nr*8
*/
target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
- patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK);
+ patch_branch(vector, target, BRANCH_SET_LINK);
/* Put some life in our friend */
pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
@@ -823,7 +824,7 @@ static int smp_core99_kick_cpu(int nr)
mdelay(1);
/* Restore our exception vector */
- patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector));
+ patch_instruction(vector, ppc_inst(save_vector));
local_irq_restore(flags);
if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 619b093a0657..043eefbbdd28 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -33,20 +33,6 @@ config PPC_MEMTRACE
Enabling this option allows for runtime allocation of memory (RAM)
for hardware tracing.
-config PPC_VAS
- bool "IBM Virtual Accelerator Switchboard (VAS)"
- depends on PPC_POWERNV && PPC_64K_PAGES
- default y
- help
- This enables support for IBM Virtual Accelerator Switchboard (VAS).
-
- VAS allows accelerators in co-processors like NX-GZIP and NX-842
- to be accessible to kernel subsystems and user processes.
-
- VAS adapters are found in POWER9 based systems.
-
- If unsure, say N.
-
config SCOM_DEBUGFS
bool "Expose SCOM controllers via debugfs"
depends on DEBUG_FS
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index be2546b96816..dc7b37c23b60 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_OPAL_PRD) += opal-prd.o
obj-$(CONFIG_PERF_EVENTS) += opal-imc.o
obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o
-obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o vas-fault.o vas-api.o
+obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o vas-fault.o
obj-$(CONFIG_OCXL_BASE) += ocxl.o
obj-$(CONFIG_SCOM_DEBUGFS) += opal-xscom.o
obj-$(CONFIG_PPC_SECURE_BOOT) += opal-secvar.o
diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c
index 01401e3da7ca..f812c74c61e5 100644
--- a/arch/powerpc/platforms/powernv/opal-call.c
+++ b/arch/powerpc/platforms/powernv/opal-call.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/percpu.h>
#include <linux/jump_label.h>
+#include <asm/interrupt.h>
#include <asm/opal-api.h>
#include <asm/trace.h>
#include <asm/asm-prototypes.h>
@@ -100,6 +101,9 @@ static int64_t opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
bool mmu = (msr & (MSR_IR|MSR_DR));
int64_t ret;
+ /* OPAL call / firmware may use SRR and/or HSRR */
+ srr_regs_clobbered();
+
msr &= ~MSR_EE;
if (unlikely(!mmu))
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 303d7c775740..2835376e61a4 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -773,7 +773,7 @@ bool opal_mce_check_early_recovery(struct pt_regs *regs)
* Setup regs->nip to rfi into fixup address.
*/
if (recover_addr)
- regs->nip = recover_addr;
+ regs_set_return_ip(regs, recover_addr);
out:
return !!recover_addr;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b18468dc31ff..6bb3c52633fb 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -711,7 +711,7 @@ int pnv_pci_cfg_write(struct pci_dn *pdn,
return PCIBIOS_SUCCESSFUL;
}
-#if CONFIG_EEH
+#ifdef CONFIG_EEH
static bool pnv_pci_cfg_check(struct pci_dn *pdn)
{
struct eeh_dev *edev = NULL;
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 73207b53dc2b..7e98b00ea2e8 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -169,6 +169,16 @@ static void update_hid_in_slw(u64 hid0)
}
}
+static inline void update_power8_hid0(unsigned long hid0)
+{
+ /*
+ * The HID0 update on Power8 should at the very least be
+ * preceded by a SYNC instruction followed by an ISYNC
+ * instruction
+ */
+ asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
+}
+
static void unsplit_core(void)
{
u64 hid0, mask;
diff --git a/arch/powerpc/platforms/powernv/vas-api.c b/arch/powerpc/platforms/powernv/vas-api.c
deleted file mode 100644
index 98ed5d8c5441..000000000000
--- a/arch/powerpc/platforms/powernv/vas-api.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * VAS user space API for its accelerators (Only NX-GZIP is supported now)
- * Copyright (C) 2019 Haren Myneni, IBM Corp
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/cdev.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <asm/vas.h>
-#include <uapi/asm/vas-api.h>
-#include "vas.h"
-
-/*
- * The driver creates the device node that can be used as follows:
- * For NX-GZIP
- *
- * fd = open("/dev/crypto/nx-gzip", O_RDWR);
- * rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr);
- * paste_addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, fd, 0ULL).
- * vas_copy(&crb, 0, 1);
- * vas_paste(paste_addr, 0, 1);
- * close(fd) or exit process to close window.
- *
- * where "vas_copy" and "vas_paste" are defined in copy-paste.h.
- * copy/paste returns to the user space directly. So refer NX hardware
- * documententation for exact copy/paste usage and completion / error
- * conditions.
- */
-
-/*
- * Wrapper object for the nx-gzip device - there is just one instance of
- * this node for the whole system.
- */
-static struct coproc_dev {
- struct cdev cdev;
- struct device *device;
- char *name;
- dev_t devt;
- struct class *class;
- enum vas_cop_type cop_type;
-} coproc_device;
-
-struct coproc_instance {
- struct coproc_dev *coproc;
- struct vas_window *txwin;
-};
-
-static char *coproc_devnode(struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev));
-}
-
-static int coproc_open(struct inode *inode, struct file *fp)
-{
- struct coproc_instance *cp_inst;
-
- cp_inst = kzalloc(sizeof(*cp_inst), GFP_KERNEL);
- if (!cp_inst)
- return -ENOMEM;
-
- cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev,
- cdev);
- fp->private_data = cp_inst;
-
- return 0;
-}
-
-static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
-{
- void __user *uptr = (void __user *)arg;
- struct vas_tx_win_attr txattr = {};
- struct vas_tx_win_open_attr uattr;
- struct coproc_instance *cp_inst;
- struct vas_window *txwin;
- int rc, vasid;
-
- cp_inst = fp->private_data;
-
- /*
- * One window for file descriptor
- */
- if (cp_inst->txwin)
- return -EEXIST;
-
- rc = copy_from_user(&uattr, uptr, sizeof(uattr));
- if (rc) {
- pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
- return -EFAULT;
- }
-
- if (uattr.version != 1) {
- pr_err("Invalid version\n");
- return -EINVAL;
- }
-
- vasid = uattr.vas_id;
-
- vas_init_tx_win_attr(&txattr, cp_inst->coproc->cop_type);
-
- txattr.lpid = mfspr(SPRN_LPID);
- txattr.pidr = mfspr(SPRN_PID);
- txattr.user_win = true;
- txattr.rsvd_txbuf_count = false;
- txattr.pswid = false;
-
- pr_devel("Pid %d: Opening txwin, PIDR %ld\n", txattr.pidr,
- mfspr(SPRN_PID));
-
- txwin = vas_tx_win_open(vasid, cp_inst->coproc->cop_type, &txattr);
- if (IS_ERR(txwin)) {
- pr_err("%s() vas_tx_win_open() failed, %ld\n", __func__,
- PTR_ERR(txwin));
- return PTR_ERR(txwin);
- }
-
- cp_inst->txwin = txwin;
-
- return 0;
-}
-
-static int coproc_release(struct inode *inode, struct file *fp)
-{
- struct coproc_instance *cp_inst = fp->private_data;
-
- if (cp_inst->txwin) {
- vas_win_close(cp_inst->txwin);
- cp_inst->txwin = NULL;
- }
-
- kfree(cp_inst);
- fp->private_data = NULL;
-
- /*
- * We don't know here if user has other receive windows
- * open, so we can't really call clear_thread_tidr().
- * So, once the process calls set_thread_tidr(), the
- * TIDR value sticks around until process exits, resulting
- * in an extra copy in restore_sprs().
- */
-
- return 0;
-}
-
-static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
-{
- struct coproc_instance *cp_inst = fp->private_data;
- struct vas_window *txwin;
- unsigned long pfn;
- u64 paste_addr;
- pgprot_t prot;
- int rc;
-
- txwin = cp_inst->txwin;
-
- if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
- pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
- (vma->vm_end - vma->vm_start), PAGE_SIZE);
- return -EINVAL;
- }
-
- /* Ensure instance has an open send window */
- if (!txwin) {
- pr_err("%s(): No send window open?\n", __func__);
- return -EINVAL;
- }
-
- vas_win_paste_addr(txwin, &paste_addr, NULL);
- pfn = paste_addr >> PAGE_SHIFT;
-
- /* flags, page_prot from cxl_mmap(), except we want cachable */
- vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
-
- prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
-
- rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
- vma->vm_end - vma->vm_start, prot);
-
- pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
- paste_addr, vma->vm_start, rc);
-
- return rc;
-}
-
-static long coproc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case VAS_TX_WIN_OPEN:
- return coproc_ioc_tx_win_open(fp, arg);
- default:
- return -EINVAL;
- }
-}
-
-static struct file_operations coproc_fops = {
- .open = coproc_open,
- .release = coproc_release,
- .mmap = coproc_mmap,
- .unlocked_ioctl = coproc_ioctl,
-};
-
-/*
- * Supporting only nx-gzip coprocessor type now, but this API code
- * extended to other coprocessor types later.
- */
-int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
- const char *name)
-{
- int rc = -EINVAL;
- dev_t devno;
-
- rc = alloc_chrdev_region(&coproc_device.devt, 1, 1, name);
- if (rc) {
- pr_err("Unable to allocate coproc major number: %i\n", rc);
- return rc;
- }
-
- pr_devel("%s device allocated, dev [%i,%i]\n", name,
- MAJOR(coproc_device.devt), MINOR(coproc_device.devt));
-
- coproc_device.class = class_create(mod, name);
- if (IS_ERR(coproc_device.class)) {
- rc = PTR_ERR(coproc_device.class);
- pr_err("Unable to create %s class %d\n", name, rc);
- goto err_class;
- }
- coproc_device.class->devnode = coproc_devnode;
- coproc_device.cop_type = cop_type;
-
- coproc_fops.owner = mod;
- cdev_init(&coproc_device.cdev, &coproc_fops);
-
- devno = MKDEV(MAJOR(coproc_device.devt), 0);
- rc = cdev_add(&coproc_device.cdev, devno, 1);
- if (rc) {
- pr_err("cdev_add() failed %d\n", rc);
- goto err_cdev;
- }
-
- coproc_device.device = device_create(coproc_device.class, NULL,
- devno, NULL, name, MINOR(devno));
- if (IS_ERR(coproc_device.device)) {
- rc = PTR_ERR(coproc_device.device);
- pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc);
- goto err;
- }
-
- pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
- MINOR(devno));
-
- return 0;
-
-err:
- cdev_del(&coproc_device.cdev);
-err_cdev:
- class_destroy(coproc_device.class);
-err_class:
- unregister_chrdev_region(coproc_device.devt, 1);
- return rc;
-}
-EXPORT_SYMBOL_GPL(vas_register_coproc_api);
-
-void vas_unregister_coproc_api(void)
-{
- dev_t devno;
-
- cdev_del(&coproc_device.cdev);
- devno = MKDEV(MAJOR(coproc_device.devt), 0);
- device_destroy(coproc_device.class, devno);
-
- class_destroy(coproc_device.class);
- unregister_chrdev_region(coproc_device.devt, 1);
-}
-EXPORT_SYMBOL_GPL(vas_unregister_coproc_api);
diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c
index 41fa90d2f4ab..3ce89a4b54be 100644
--- a/arch/powerpc/platforms/powernv/vas-debug.c
+++ b/arch/powerpc/platforms/powernv/vas-debug.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <asm/vas.h>
#include "vas.h"
static struct dentry *vas_debugfs;
@@ -28,7 +29,7 @@ static char *cop_to_str(int cop)
static int info_show(struct seq_file *s, void *private)
{
- struct vas_window *window = s->private;
+ struct pnv_vas_window *window = s->private;
mutex_lock(&vas_mutex);
@@ -36,9 +37,9 @@ static int info_show(struct seq_file *s, void *private)
if (!window->hvwc_map)
goto unlock;
- seq_printf(s, "Type: %s, %s\n", cop_to_str(window->cop),
+ seq_printf(s, "Type: %s, %s\n", cop_to_str(window->vas_win.cop),
window->tx_win ? "Send" : "Receive");
- seq_printf(s, "Pid : %d\n", vas_window_pid(window));
+ seq_printf(s, "Pid : %d\n", vas_window_pid(&window->vas_win));
unlock:
mutex_unlock(&vas_mutex);
@@ -47,7 +48,7 @@ unlock:
DEFINE_SHOW_ATTRIBUTE(info);
-static inline void print_reg(struct seq_file *s, struct vas_window *win,
+static inline void print_reg(struct seq_file *s, struct pnv_vas_window *win,
char *name, u32 reg)
{
seq_printf(s, "0x%016llx %s\n", read_hvwc_reg(win, name, reg), name);
@@ -55,7 +56,7 @@ static inline void print_reg(struct seq_file *s, struct vas_window *win,
static int hvwc_show(struct seq_file *s, void *private)
{
- struct vas_window *window = s->private;
+ struct pnv_vas_window *window = s->private;
mutex_lock(&vas_mutex);
@@ -103,8 +104,10 @@ unlock:
DEFINE_SHOW_ATTRIBUTE(hvwc);
-void vas_window_free_dbgdir(struct vas_window *window)
+void vas_window_free_dbgdir(struct pnv_vas_window *pnv_win)
{
+ struct vas_window *window = &pnv_win->vas_win;
+
if (window->dbgdir) {
debugfs_remove_recursive(window->dbgdir);
kfree(window->dbgname);
@@ -113,21 +116,21 @@ void vas_window_free_dbgdir(struct vas_window *window)
}
}
-void vas_window_init_dbgdir(struct vas_window *window)
+void vas_window_init_dbgdir(struct pnv_vas_window *window)
{
struct dentry *d;
if (!window->vinst->dbgdir)
return;
- window->dbgname = kzalloc(16, GFP_KERNEL);
- if (!window->dbgname)
+ window->vas_win.dbgname = kzalloc(16, GFP_KERNEL);
+ if (!window->vas_win.dbgname)
return;
- snprintf(window->dbgname, 16, "w%d", window->winid);
+ snprintf(window->vas_win.dbgname, 16, "w%d", window->vas_win.winid);
- d = debugfs_create_dir(window->dbgname, window->vinst->dbgdir);
- window->dbgdir = d;
+ d = debugfs_create_dir(window->vas_win.dbgname, window->vinst->dbgdir);
+ window->vas_win.dbgdir = d;
debugfs_create_file("info", 0444, d, window, &info_fops);
debugfs_create_file("hvwc", 0444, d, window, &hvwc_fops);
diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c
index 3d21fce254b7..a7aabc18039e 100644
--- a/arch/powerpc/platforms/powernv/vas-fault.c
+++ b/arch/powerpc/platforms/powernv/vas-fault.c
@@ -26,150 +26,6 @@
*/
#define VAS_FAULT_WIN_FIFO_SIZE (4 << 20)
-static void dump_crb(struct coprocessor_request_block *crb)
-{
- struct data_descriptor_entry *dde;
- struct nx_fault_stamp *nx;
-
- dde = &crb->source;
- pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
- be64_to_cpu(dde->address), be32_to_cpu(dde->length),
- dde->count, dde->index, dde->flags);
-
- dde = &crb->target;
- pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
- be64_to_cpu(dde->address), be32_to_cpu(dde->length),
- dde->count, dde->index, dde->flags);
-
- nx = &crb->stamp.nx;
- pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n",
- be32_to_cpu(nx->pswid),
- be64_to_cpu(crb->stamp.nx.fault_storage_addr),
- nx->flags, nx->fault_status);
-}
-
-/*
- * Update the CSB to indicate a translation error.
- *
- * User space will be polling on CSB after the request is issued.
- * If NX can handle the request without any issues, it updates CSB.
- * Whereas if NX encounters page fault, the kernel will handle the
- * fault and update CSB with translation error.
- *
- * If we are unable to update the CSB means copy_to_user failed due to
- * invalid csb_addr, send a signal to the process.
- */
-static void update_csb(struct vas_window *window,
- struct coprocessor_request_block *crb)
-{
- struct coprocessor_status_block csb;
- struct kernel_siginfo info;
- struct task_struct *tsk;
- void __user *csb_addr;
- struct pid *pid;
- int rc;
-
- /*
- * NX user space windows can not be opened for task->mm=NULL
- * and faults will not be generated for kernel requests.
- */
- if (WARN_ON_ONCE(!window->mm || !window->user_win))
- return;
-
- csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
-
- memset(&csb, 0, sizeof(csb));
- csb.cc = CSB_CC_FAULT_ADDRESS;
- csb.ce = CSB_CE_TERMINATION;
- csb.cs = 0;
- csb.count = 0;
-
- /*
- * NX operates and returns in BE format as defined CRB struct.
- * So saves fault_storage_addr in BE as NX pastes in FIFO and
- * expects user space to convert to CPU format.
- */
- csb.address = crb->stamp.nx.fault_storage_addr;
- csb.flags = 0;
-
- pid = window->pid;
- tsk = get_pid_task(pid, PIDTYPE_PID);
- /*
- * Process closes send window after all pending NX requests are
- * completed. In multi-thread applications, a child thread can
- * open a window and can exit without closing it. May be some
- * requests are pending or this window can be used by other
- * threads later. We should handle faults if NX encounters
- * pages faults on these requests. Update CSB with translation
- * error and fault address. If csb_addr passed by user space is
- * invalid, send SEGV signal to pid saved in window. If the
- * child thread is not running, send the signal to tgid.
- * Parent thread (tgid) will close this window upon its exit.
- *
- * pid and mm references are taken when window is opened by
- * process (pid). So tgid is used only when child thread opens
- * a window and exits without closing it.
- */
- if (!tsk) {
- pid = window->tgid;
- tsk = get_pid_task(pid, PIDTYPE_PID);
- /*
- * Parent thread (tgid) will be closing window when it
- * exits. So should not get here.
- */
- if (WARN_ON_ONCE(!tsk))
- return;
- }
-
- /* Return if the task is exiting. */
- if (tsk->flags & PF_EXITING) {
- put_task_struct(tsk);
- return;
- }
-
- kthread_use_mm(window->mm);
- rc = copy_to_user(csb_addr, &csb, sizeof(csb));
- /*
- * User space polls on csb.flags (first byte). So add barrier
- * then copy first byte with csb flags update.
- */
- if (!rc) {
- csb.flags = CSB_V;
- /* Make sure update to csb.flags is visible now */
- smp_mb();
- rc = copy_to_user(csb_addr, &csb, sizeof(u8));
- }
- kthread_unuse_mm(window->mm);
- put_task_struct(tsk);
-
- /* Success */
- if (!rc)
- return;
-
- pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n",
- csb_addr, pid_vnr(pid));
-
- clear_siginfo(&info);
- info.si_signo = SIGSEGV;
- info.si_errno = EFAULT;
- info.si_code = SEGV_MAPERR;
- info.si_addr = csb_addr;
-
- /*
- * process will be polling on csb.flags after request is sent to
- * NX. So generally CSB update should not fail except when an
- * application passes invalid csb_addr. So an error message will
- * be displayed and leave it to user space whether to ignore or
- * handle this signal.
- */
- rcu_read_lock();
- rc = kill_pid_info(SIGSEGV, &info, pid);
- rcu_read_unlock();
-
- pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
- pid_vnr(pid), rc);
-}
-
static void dump_fifo(struct vas_instance *vinst, void *entry)
{
unsigned long *end = vinst->fault_fifo + vinst->fault_fifo_size;
@@ -212,7 +68,7 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data)
struct vas_instance *vinst = data;
struct coprocessor_request_block *crb, *entry;
struct coprocessor_request_block buf;
- struct vas_window *window;
+ struct pnv_vas_window *window;
unsigned long flags;
void *fifo;
@@ -272,7 +128,7 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data)
vinst->vas_id, vinst->fault_fifo, fifo,
vinst->fault_crbs);
- dump_crb(crb);
+ vas_dump_crb(crb);
window = vas_pswid_to_window(vinst,
be32_to_cpu(crb->stamp.nx.pswid));
@@ -293,7 +149,14 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data)
WARN_ON_ONCE(1);
} else {
- update_csb(window, crb);
+ /*
+ * NX sees faults only with user space windows.
+ */
+ if (window->user_win)
+ vas_update_csb(crb, &window->vas_win.task_ref);
+ else
+ WARN_ON_ONCE(!window->user_win);
+
/*
* Return credit for send window after processing
* fault CRB.
@@ -336,6 +199,7 @@ irqreturn_t vas_fault_handler(int irq, void *dev_id)
int vas_setup_fault_window(struct vas_instance *vinst)
{
struct vas_rx_win_attr attr;
+ struct vas_window *win;
vinst->fault_fifo_size = VAS_FAULT_WIN_FIFO_SIZE;
vinst->fault_fifo = kzalloc(vinst->fault_fifo_size, GFP_KERNEL);
@@ -364,18 +228,17 @@ int vas_setup_fault_window(struct vas_instance *vinst)
attr.lnotify_pid = mfspr(SPRN_PID);
attr.lnotify_tid = mfspr(SPRN_PID);
- vinst->fault_win = vas_rx_win_open(vinst->vas_id, VAS_COP_TYPE_FAULT,
- &attr);
-
- if (IS_ERR(vinst->fault_win)) {
- pr_err("VAS: Error %ld opening FaultWin\n",
- PTR_ERR(vinst->fault_win));
+ win = vas_rx_win_open(vinst->vas_id, VAS_COP_TYPE_FAULT, &attr);
+ if (IS_ERR(win)) {
+ pr_err("VAS: Error %ld opening FaultWin\n", PTR_ERR(win));
kfree(vinst->fault_fifo);
- return PTR_ERR(vinst->fault_win);
+ return PTR_ERR(win);
}
+ vinst->fault_win = container_of(win, struct pnv_vas_window, vas_win);
+
pr_devel("VAS: Created FaultWin %d, LPID/PID/TID [%d/%d/%d]\n",
- vinst->fault_win->winid, attr.lnotify_lpid,
+ vinst->fault_win->vas_win.winid, attr.lnotify_lpid,
attr.lnotify_pid, attr.lnotify_tid);
return 0;
diff --git a/arch/powerpc/platforms/powernv/vas-trace.h b/arch/powerpc/platforms/powernv/vas-trace.h
index a449b9f0c12e..ca2e08f2ddc0 100644
--- a/arch/powerpc/platforms/powernv/vas-trace.h
+++ b/arch/powerpc/platforms/powernv/vas-trace.h
@@ -80,7 +80,7 @@ TRACE_EVENT( vas_tx_win_open,
TRACE_EVENT( vas_paste_crb,
TP_PROTO(struct task_struct *tsk,
- struct vas_window *win),
+ struct pnv_vas_window *win),
TP_ARGS(tsk, win),
@@ -96,7 +96,7 @@ TRACE_EVENT( vas_paste_crb,
TP_fast_assign(
__entry->pid = tsk->pid;
__entry->vasid = win->vinst->vas_id;
- __entry->winid = win->winid;
+ __entry->winid = win->vas_win.winid;
__entry->paste_kaddr = (unsigned long)win->paste_kaddr
),
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index 5f5fe63a3d1c..0f8d39fbf2b2 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -16,6 +16,7 @@
#include <linux/mmu_context.h>
#include <asm/switch_to.h>
#include <asm/ppc-opcode.h>
+#include <asm/vas.h>
#include "vas.h"
#include "copy-paste.h"
@@ -26,14 +27,14 @@
* Compute the paste address region for the window @window using the
* ->paste_base_addr and ->paste_win_id_shift we got from device tree.
*/
-void vas_win_paste_addr(struct vas_window *window, u64 *addr, int *len)
+void vas_win_paste_addr(struct pnv_vas_window *window, u64 *addr, int *len)
{
int winid;
u64 base, shift;
base = window->vinst->paste_base_addr;
shift = window->vinst->paste_win_id_shift;
- winid = window->winid;
+ winid = window->vas_win.winid;
*addr = base + (winid << shift);
if (len)
@@ -42,23 +43,23 @@ void vas_win_paste_addr(struct vas_window *window, u64 *addr, int *len)
pr_debug("Txwin #%d: Paste addr 0x%llx\n", winid, *addr);
}
-static inline void get_hvwc_mmio_bar(struct vas_window *window,
+static inline void get_hvwc_mmio_bar(struct pnv_vas_window *window,
u64 *start, int *len)
{
u64 pbaddr;
pbaddr = window->vinst->hvwc_bar_start;
- *start = pbaddr + window->winid * VAS_HVWC_SIZE;
+ *start = pbaddr + window->vas_win.winid * VAS_HVWC_SIZE;
*len = VAS_HVWC_SIZE;
}
-static inline void get_uwc_mmio_bar(struct vas_window *window,
+static inline void get_uwc_mmio_bar(struct pnv_vas_window *window,
u64 *start, int *len)
{
u64 pbaddr;
pbaddr = window->vinst->uwc_bar_start;
- *start = pbaddr + window->winid * VAS_UWC_SIZE;
+ *start = pbaddr + window->vas_win.winid * VAS_UWC_SIZE;
*len = VAS_UWC_SIZE;
}
@@ -67,7 +68,7 @@ static inline void get_uwc_mmio_bar(struct vas_window *window,
* space. Unlike MMIO regions (map_mmio_region() below), paste region must
* be mapped cache-able and is only applicable to send windows.
*/
-static void *map_paste_region(struct vas_window *txwin)
+static void *map_paste_region(struct pnv_vas_window *txwin)
{
int len;
void *map;
@@ -75,7 +76,7 @@ static void *map_paste_region(struct vas_window *txwin)
u64 start;
name = kasprintf(GFP_KERNEL, "window-v%d-w%d", txwin->vinst->vas_id,
- txwin->winid);
+ txwin->vas_win.winid);
if (!name)
goto free_name;
@@ -132,7 +133,7 @@ static void unmap_region(void *addr, u64 start, int len)
/*
* Unmap the paste address region for a window.
*/
-static void unmap_paste_region(struct vas_window *window)
+static void unmap_paste_region(struct pnv_vas_window *window)
{
int len;
u64 busaddr_start;
@@ -153,7 +154,7 @@ static void unmap_paste_region(struct vas_window *window)
* path, just minimize the time we hold the mutex for now. We can add
* a per-instance mutex later if necessary.
*/
-static void unmap_winctx_mmio_bars(struct vas_window *window)
+static void unmap_winctx_mmio_bars(struct pnv_vas_window *window)
{
int len;
void *uwc_map;
@@ -186,7 +187,7 @@ static void unmap_winctx_mmio_bars(struct vas_window *window)
* OS/User Window Context (UWC) MMIO Base Address Region for the given window.
* Map these bus addresses and save the mapped kernel addresses in @window.
*/
-static int map_winctx_mmio_bars(struct vas_window *window)
+static int map_winctx_mmio_bars(struct pnv_vas_window *window)
{
int len;
u64 start;
@@ -214,7 +215,7 @@ static int map_winctx_mmio_bars(struct vas_window *window)
* registers are not sequential. And, we can only write to offsets
* with valid registers.
*/
-static void reset_window_regs(struct vas_window *window)
+static void reset_window_regs(struct pnv_vas_window *window)
{
write_hvwc_reg(window, VREG(LPID), 0ULL);
write_hvwc_reg(window, VREG(PID), 0ULL);
@@ -270,7 +271,7 @@ static void reset_window_regs(struct vas_window *window)
* want to add fields to vas_winctx and move the initialization to
* init_vas_winctx_regs().
*/
-static void init_xlate_regs(struct vas_window *window, bool user_win)
+static void init_xlate_regs(struct pnv_vas_window *window, bool user_win)
{
u64 lpcr, val;
@@ -335,7 +336,7 @@ static void init_xlate_regs(struct vas_window *window, bool user_win)
*
* TODO: Reserved (aka dedicated) send buffers are not supported yet.
*/
-static void init_rsvd_tx_buf_count(struct vas_window *txwin,
+static void init_rsvd_tx_buf_count(struct pnv_vas_window *txwin,
struct vas_winctx *winctx)
{
write_hvwc_reg(txwin, VREG(TX_RSVD_BUF_COUNT), 0ULL);
@@ -357,7 +358,7 @@ static void init_rsvd_tx_buf_count(struct vas_window *txwin,
* as a one-time task? That could work for NX but what about other
* receivers? Let the receivers tell us the rx-fifo buffers for now.
*/
-static void init_winctx_regs(struct vas_window *window,
+static void init_winctx_regs(struct pnv_vas_window *window,
struct vas_winctx *winctx)
{
u64 val;
@@ -519,10 +520,10 @@ static int vas_assign_window_id(struct ida *ida)
return winid;
}
-static void vas_window_free(struct vas_window *window)
+static void vas_window_free(struct pnv_vas_window *window)
{
- int winid = window->winid;
struct vas_instance *vinst = window->vinst;
+ int winid = window->vas_win.winid;
unmap_winctx_mmio_bars(window);
@@ -533,10 +534,10 @@ static void vas_window_free(struct vas_window *window)
vas_release_window_id(&vinst->ida, winid);
}
-static struct vas_window *vas_window_alloc(struct vas_instance *vinst)
+static struct pnv_vas_window *vas_window_alloc(struct vas_instance *vinst)
{
int winid;
- struct vas_window *window;
+ struct pnv_vas_window *window;
winid = vas_assign_window_id(&vinst->ida);
if (winid < 0)
@@ -547,7 +548,7 @@ static struct vas_window *vas_window_alloc(struct vas_instance *vinst)
goto out_free;
window->vinst = vinst;
- window->winid = winid;
+ window->vas_win.winid = winid;
if (map_winctx_mmio_bars(window))
goto out_free;
@@ -562,7 +563,7 @@ out_free:
return ERR_PTR(-ENOMEM);
}
-static void put_rx_win(struct vas_window *rxwin)
+static void put_rx_win(struct pnv_vas_window *rxwin)
{
/* Better not be a send window! */
WARN_ON_ONCE(rxwin->tx_win);
@@ -578,10 +579,11 @@ static void put_rx_win(struct vas_window *rxwin)
*
* NOTE: We access ->windows[] table and assume that vinst->mutex is held.
*/
-static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
+static struct pnv_vas_window *get_user_rxwin(struct vas_instance *vinst,
+ u32 pswid)
{
int vasid, winid;
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
decode_pswid(pswid, &vasid, &winid);
@@ -590,7 +592,7 @@ static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
rxwin = vinst->windows[winid];
- if (!rxwin || rxwin->tx_win || rxwin->cop != VAS_COP_TYPE_FTW)
+ if (!rxwin || rxwin->tx_win || rxwin->vas_win.cop != VAS_COP_TYPE_FTW)
return ERR_PTR(-EINVAL);
return rxwin;
@@ -602,10 +604,10 @@ static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
*
* See also function header of set_vinst_win().
*/
-static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst,
+static struct pnv_vas_window *get_vinst_rxwin(struct vas_instance *vinst,
enum vas_cop_type cop, u32 pswid)
{
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
mutex_lock(&vinst->mutex);
@@ -638,9 +640,9 @@ static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst,
* window, we also save the window in the ->rxwin[] table.
*/
static void set_vinst_win(struct vas_instance *vinst,
- struct vas_window *window)
+ struct pnv_vas_window *window)
{
- int id = window->winid;
+ int id = window->vas_win.winid;
mutex_lock(&vinst->mutex);
@@ -649,8 +651,8 @@ static void set_vinst_win(struct vas_instance *vinst,
* unless its a user (FTW) window.
*/
if (!window->user_win && !window->tx_win) {
- WARN_ON_ONCE(vinst->rxwin[window->cop]);
- vinst->rxwin[window->cop] = window;
+ WARN_ON_ONCE(vinst->rxwin[window->vas_win.cop]);
+ vinst->rxwin[window->vas_win.cop] = window;
}
WARN_ON_ONCE(vinst->windows[id] != NULL);
@@ -663,16 +665,16 @@ static void set_vinst_win(struct vas_instance *vinst,
* Clear this window from the table(s) of windows for this VAS instance.
* See also function header of set_vinst_win().
*/
-static void clear_vinst_win(struct vas_window *window)
+static void clear_vinst_win(struct pnv_vas_window *window)
{
- int id = window->winid;
+ int id = window->vas_win.winid;
struct vas_instance *vinst = window->vinst;
mutex_lock(&vinst->mutex);
if (!window->user_win && !window->tx_win) {
- WARN_ON_ONCE(!vinst->rxwin[window->cop]);
- vinst->rxwin[window->cop] = NULL;
+ WARN_ON_ONCE(!vinst->rxwin[window->vas_win.cop]);
+ vinst->rxwin[window->vas_win.cop] = NULL;
}
WARN_ON_ONCE(vinst->windows[id] != window);
@@ -681,7 +683,7 @@ static void clear_vinst_win(struct vas_window *window)
mutex_unlock(&vinst->mutex);
}
-static void init_winctx_for_rxwin(struct vas_window *rxwin,
+static void init_winctx_for_rxwin(struct pnv_vas_window *rxwin,
struct vas_rx_win_attr *rxattr,
struct vas_winctx *winctx)
{
@@ -702,7 +704,7 @@ static void init_winctx_for_rxwin(struct vas_window *rxwin,
winctx->rx_fifo = rxattr->rx_fifo;
winctx->rx_fifo_size = rxattr->rx_fifo_size;
- winctx->wcreds_max = rxwin->wcreds_max;
+ winctx->wcreds_max = rxwin->vas_win.wcreds_max;
winctx->pin_win = rxattr->pin_win;
winctx->nx_win = rxattr->nx_win;
@@ -851,7 +853,7 @@ EXPORT_SYMBOL_GPL(vas_init_rx_win_attr);
struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
struct vas_rx_win_attr *rxattr)
{
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
struct vas_winctx winctx;
struct vas_instance *vinst;
@@ -870,21 +872,21 @@ struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
rxwin = vas_window_alloc(vinst);
if (IS_ERR(rxwin)) {
pr_devel("Unable to allocate memory for Rx window\n");
- return rxwin;
+ return (struct vas_window *)rxwin;
}
rxwin->tx_win = false;
rxwin->nx_win = rxattr->nx_win;
rxwin->user_win = rxattr->user_win;
- rxwin->cop = cop;
- rxwin->wcreds_max = rxattr->wcreds_max;
+ rxwin->vas_win.cop = cop;
+ rxwin->vas_win.wcreds_max = rxattr->wcreds_max;
init_winctx_for_rxwin(rxwin, rxattr, &winctx);
init_winctx_regs(rxwin, &winctx);
set_vinst_win(vinst, rxwin);
- return rxwin;
+ return &rxwin->vas_win;
}
EXPORT_SYMBOL_GPL(vas_rx_win_open);
@@ -905,7 +907,7 @@ void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop)
}
EXPORT_SYMBOL_GPL(vas_init_tx_win_attr);
-static void init_winctx_for_txwin(struct vas_window *txwin,
+static void init_winctx_for_txwin(struct pnv_vas_window *txwin,
struct vas_tx_win_attr *txattr,
struct vas_winctx *winctx)
{
@@ -926,7 +928,7 @@ static void init_winctx_for_txwin(struct vas_window *txwin,
*/
memset(winctx, 0, sizeof(struct vas_winctx));
- winctx->wcreds_max = txwin->wcreds_max;
+ winctx->wcreds_max = txwin->vas_win.wcreds_max;
winctx->user_win = txattr->user_win;
winctx->nx_win = txwin->rxwin->nx_win;
@@ -946,13 +948,13 @@ static void init_winctx_for_txwin(struct vas_window *txwin,
winctx->lpid = txattr->lpid;
winctx->pidr = txattr->pidr;
- winctx->rx_win_id = txwin->rxwin->winid;
+ winctx->rx_win_id = txwin->rxwin->vas_win.winid;
/*
* IRQ and fault window setup is successful. Set fault window
* for the send window so that ready to handle faults.
*/
if (txwin->vinst->virq)
- winctx->fault_win_id = txwin->vinst->fault_win->winid;
+ winctx->fault_win_id = txwin->vinst->fault_win->vas_win.winid;
winctx->dma_type = VAS_DMA_TYPE_INJECT;
winctx->tc_mode = txattr->tc_mode;
@@ -962,7 +964,8 @@ static void init_winctx_for_txwin(struct vas_window *txwin,
winctx->irq_port = txwin->vinst->irq_port;
winctx->pswid = txattr->pswid ? txattr->pswid :
- encode_pswid(txwin->vinst->vas_id, txwin->winid);
+ encode_pswid(txwin->vinst->vas_id,
+ txwin->vas_win.winid);
}
static bool tx_win_args_valid(enum vas_cop_type cop,
@@ -993,8 +996,8 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
struct vas_tx_win_attr *attr)
{
int rc;
- struct vas_window *txwin;
- struct vas_window *rxwin;
+ struct pnv_vas_window *txwin;
+ struct pnv_vas_window *rxwin;
struct vas_winctx winctx;
struct vas_instance *vinst;
@@ -1020,7 +1023,7 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
rxwin = get_vinst_rxwin(vinst, cop, attr->pswid);
if (IS_ERR(rxwin)) {
pr_devel("No RxWin for vasid %d, cop %d\n", vasid, cop);
- return rxwin;
+ return (struct vas_window *)rxwin;
}
txwin = vas_window_alloc(vinst);
@@ -1029,12 +1032,12 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
goto put_rxwin;
}
- txwin->cop = cop;
+ txwin->vas_win.cop = cop;
txwin->tx_win = 1;
txwin->rxwin = rxwin;
txwin->nx_win = txwin->rxwin->nx_win;
txwin->user_win = attr->user_win;
- txwin->wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT;
+ txwin->vas_win.wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT;
init_winctx_for_txwin(txwin, attr, &winctx);
@@ -1064,56 +1067,16 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
rc = -ENODEV;
goto free_window;
}
-
- /*
- * Window opened by a child thread may not be closed when
- * it exits. So take reference to its pid and release it
- * when the window is free by parent thread.
- * Acquire a reference to the task's pid to make sure
- * pid will not be re-used - needed only for multithread
- * applications.
- */
- txwin->pid = get_task_pid(current, PIDTYPE_PID);
- /*
- * Acquire a reference to the task's mm.
- */
- txwin->mm = get_task_mm(current);
-
- if (!txwin->mm) {
- put_pid(txwin->pid);
- pr_err("VAS: pid(%d): mm_struct is not found\n",
- current->pid);
- rc = -EPERM;
+ rc = get_vas_user_win_ref(&txwin->vas_win.task_ref);
+ if (rc)
goto free_window;
- }
- mmgrab(txwin->mm);
- mmput(txwin->mm);
- mm_context_add_vas_window(txwin->mm);
- /*
- * Process closes window during exit. In the case of
- * multithread application, the child thread can open
- * window and can exit without closing it. Expects parent
- * thread to use and close the window. So do not need
- * to take pid reference for parent thread.
- */
- txwin->tgid = find_get_pid(task_tgid_vnr(current));
- /*
- * Even a process that has no foreign real address mapping can
- * use an unpaired COPY instruction (to no real effect). Issue
- * CP_ABORT to clear any pending COPY and prevent a covert
- * channel.
- *
- * __switch_to() will issue CP_ABORT on future context switches
- * if process / thread has any open VAS window (Use
- * current->mm->context.vas_windows).
- */
- asm volatile(PPC_CP_ABORT);
+ vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
}
set_vinst_win(vinst, txwin);
- return txwin;
+ return &txwin->vas_win;
free_window:
vas_window_free(txwin);
@@ -1132,12 +1095,14 @@ int vas_copy_crb(void *crb, int offset)
EXPORT_SYMBOL_GPL(vas_copy_crb);
#define RMA_LSMP_REPORT_ENABLE PPC_BIT(53)
-int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
+int vas_paste_crb(struct vas_window *vwin, int offset, bool re)
{
+ struct pnv_vas_window *txwin;
int rc;
void *addr;
uint64_t val;
+ txwin = container_of(vwin, struct pnv_vas_window, vas_win);
trace_vas_paste_crb(current, txwin);
/*
@@ -1167,7 +1132,7 @@ int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
else
rc = -EINVAL;
- pr_debug("Txwin #%d: Msg count %llu\n", txwin->winid,
+ pr_debug("Txwin #%d: Msg count %llu\n", txwin->vas_win.winid,
read_hvwc_reg(txwin, VREG(LRFIFO_PUSH)));
return rc;
@@ -1187,7 +1152,7 @@ EXPORT_SYMBOL_GPL(vas_paste_crb);
* user space. (NX-842 driver waits for CSB and Fast thread-wakeup
* doesn't use credit checking).
*/
-static void poll_window_credits(struct vas_window *window)
+static void poll_window_credits(struct pnv_vas_window *window)
{
u64 val;
int creds, mode;
@@ -1217,7 +1182,7 @@ retry:
* and issue CRB Kill to stop all pending requests. Need only
* if there is a bug in NX or fault handling in kernel.
*/
- if (creds < window->wcreds_max) {
+ if (creds < window->vas_win.wcreds_max) {
val = 0;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(10));
@@ -1228,7 +1193,8 @@ retry:
*/
if (!(count % 1000))
pr_warn_ratelimited("VAS: pid %d stuck. Waiting for credits returned for Window(%d). creds %d, Retries %d\n",
- vas_window_pid(window), window->winid,
+ vas_window_pid(&window->vas_win),
+ window->vas_win.winid,
creds, count);
goto retry;
@@ -1240,7 +1206,7 @@ retry:
* short time to queue a CRB, so window should not be busy for too long.
* Trying 5ms intervals.
*/
-static void poll_window_busy_state(struct vas_window *window)
+static void poll_window_busy_state(struct pnv_vas_window *window)
{
int busy;
u64 val;
@@ -1260,7 +1226,8 @@ retry:
*/
if (!(count % 1000))
pr_warn_ratelimited("VAS: pid %d stuck. Window (ID=%d) is in busy state. Retries %d\n",
- vas_window_pid(window), window->winid, count);
+ vas_window_pid(&window->vas_win),
+ window->vas_win.winid, count);
goto retry;
}
@@ -1282,7 +1249,7 @@ retry:
* casting out becomes necessary we should consider offloading the
* job to a worker thread, so the window close can proceed quickly.
*/
-static void poll_window_castout(struct vas_window *window)
+static void poll_window_castout(struct pnv_vas_window *window)
{
/* stub for now */
}
@@ -1291,7 +1258,7 @@ static void poll_window_castout(struct vas_window *window)
* Unpin and close a window so no new requests are accepted and the
* hardware can evict this window from cache if necessary.
*/
-static void unpin_close_window(struct vas_window *window)
+static void unpin_close_window(struct pnv_vas_window *window)
{
u64 val;
@@ -1313,11 +1280,15 @@ static void unpin_close_window(struct vas_window *window)
*
* Besides the hardware, kernel has some bookkeeping of course.
*/
-int vas_win_close(struct vas_window *window)
+int vas_win_close(struct vas_window *vwin)
{
- if (!window)
+ struct pnv_vas_window *window;
+
+ if (!vwin)
return 0;
+ window = container_of(vwin, struct pnv_vas_window, vas_win);
+
if (!window->tx_win && atomic_read(&window->num_txwins) != 0) {
pr_devel("Attempting to close an active Rx window!\n");
WARN_ON_ONCE(1);
@@ -1339,12 +1310,8 @@ int vas_win_close(struct vas_window *window)
/* if send window, drop reference to matching receive window */
if (window->tx_win) {
if (window->user_win) {
- /* Drop references to pid and mm */
- put_pid(window->pid);
- if (window->mm) {
- mm_context_remove_vas_window(window->mm);
- mmdrop(window->mm);
- }
+ put_vas_user_win_ref(&vwin->task_ref);
+ mm_context_remove_vas_window(vwin->task_ref.mm);
}
put_rx_win(window->rxwin);
}
@@ -1377,7 +1344,7 @@ EXPORT_SYMBOL_GPL(vas_win_close);
* - The kernel with return credit on fault window after reading entry
* from fault FIFO.
*/
-void vas_return_credit(struct vas_window *window, bool tx)
+void vas_return_credit(struct pnv_vas_window *window, bool tx)
{
uint64_t val;
@@ -1391,10 +1358,10 @@ void vas_return_credit(struct vas_window *window, bool tx)
}
}
-struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
+struct pnv_vas_window *vas_pswid_to_window(struct vas_instance *vinst,
uint32_t pswid)
{
- struct vas_window *window;
+ struct pnv_vas_window *window;
int winid;
if (!pswid) {
@@ -1431,13 +1398,74 @@ struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
* by NX).
*/
if (!window->tx_win || !window->user_win || !window->nx_win ||
- window->cop == VAS_COP_TYPE_FAULT ||
- window->cop == VAS_COP_TYPE_FTW) {
+ window->vas_win.cop == VAS_COP_TYPE_FAULT ||
+ window->vas_win.cop == VAS_COP_TYPE_FTW) {
pr_err("PSWID decode: id %d, tx %d, user %d, nx %d, cop %d\n",
winid, window->tx_win, window->user_win,
- window->nx_win, window->cop);
+ window->nx_win, window->vas_win.cop);
WARN_ON(1);
}
return window;
}
+
+static struct vas_window *vas_user_win_open(int vas_id, u64 flags,
+ enum vas_cop_type cop_type)
+{
+ struct vas_tx_win_attr txattr = {};
+
+ vas_init_tx_win_attr(&txattr, cop_type);
+
+ txattr.lpid = mfspr(SPRN_LPID);
+ txattr.pidr = mfspr(SPRN_PID);
+ txattr.user_win = true;
+ txattr.rsvd_txbuf_count = false;
+ txattr.pswid = false;
+
+ pr_devel("Pid %d: Opening txwin, PIDR %ld\n", txattr.pidr,
+ mfspr(SPRN_PID));
+
+ return vas_tx_win_open(vas_id, cop_type, &txattr);
+}
+
+static u64 vas_user_win_paste_addr(struct vas_window *txwin)
+{
+ struct pnv_vas_window *win;
+ u64 paste_addr;
+
+ win = container_of(txwin, struct pnv_vas_window, vas_win);
+ vas_win_paste_addr(win, &paste_addr, NULL);
+
+ return paste_addr;
+}
+
+static int vas_user_win_close(struct vas_window *txwin)
+{
+ vas_win_close(txwin);
+
+ return 0;
+}
+
+static const struct vas_user_win_ops vops = {
+ .open_win = vas_user_win_open,
+ .paste_addr = vas_user_win_paste_addr,
+ .close_win = vas_user_win_close,
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_api_powernv(struct module *mod, enum vas_cop_type cop_type,
+ const char *name)
+{
+
+ return vas_register_coproc_api(mod, cop_type, name, &vops);
+}
+EXPORT_SYMBOL_GPL(vas_register_api_powernv);
+
+void vas_unregister_api_powernv(void)
+{
+ vas_unregister_coproc_api();
+}
+EXPORT_SYMBOL_GPL(vas_unregister_api_powernv);
diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h
index c7db3190baca..8bb08e395de0 100644
--- a/arch/powerpc/platforms/powernv/vas.h
+++ b/arch/powerpc/platforms/powernv/vas.h
@@ -334,11 +334,11 @@ struct vas_instance {
int fifo_in_progress; /* To wake up thread or return IRQ_HANDLED */
spinlock_t fault_lock; /* Protects fifo_in_progress update */
void *fault_fifo;
- struct vas_window *fault_win; /* Fault window */
+ struct pnv_vas_window *fault_win; /* Fault window */
struct mutex mutex;
- struct vas_window *rxwin[VAS_COP_TYPE_MAX];
- struct vas_window *windows[VAS_WINDOWS_PER_CHIP];
+ struct pnv_vas_window *rxwin[VAS_COP_TYPE_MAX];
+ struct pnv_vas_window *windows[VAS_WINDOWS_PER_CHIP];
char *name;
char *dbgname;
@@ -346,32 +346,24 @@ struct vas_instance {
};
/*
- * In-kernel state a VAS window. One per window.
+ * In-kernel state a VAS window on PowerNV. One per window.
*/
-struct vas_window {
+struct pnv_vas_window {
+ struct vas_window vas_win;
/* Fields common to send and receive windows */
struct vas_instance *vinst;
- int winid;
bool tx_win; /* True if send window */
bool nx_win; /* True if NX window */
bool user_win; /* True if user space window */
void *hvwc_map; /* HV window context */
void *uwc_map; /* OS/User window context */
- struct pid *pid; /* Linux process id of owner */
- struct pid *tgid; /* Thread group ID of owner */
- struct mm_struct *mm; /* Linux process mm_struct */
- int wcreds_max; /* Window credits */
-
- char *dbgname;
- struct dentry *dbgdir;
/* Fields applicable only to send windows */
void *paste_kaddr;
char *paste_addr_name;
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
- /* Feilds applicable only to receive windows */
- enum vas_cop_type cop;
+ /* Fields applicable only to receive windows */
atomic_t num_txwins;
};
@@ -430,32 +422,32 @@ extern struct mutex vas_mutex;
extern struct vas_instance *find_vas_instance(int vasid);
extern void vas_init_dbgdir(void);
extern void vas_instance_init_dbgdir(struct vas_instance *vinst);
-extern void vas_window_init_dbgdir(struct vas_window *win);
-extern void vas_window_free_dbgdir(struct vas_window *win);
+extern void vas_window_init_dbgdir(struct pnv_vas_window *win);
+extern void vas_window_free_dbgdir(struct pnv_vas_window *win);
extern int vas_setup_fault_window(struct vas_instance *vinst);
extern irqreturn_t vas_fault_thread_fn(int irq, void *data);
extern irqreturn_t vas_fault_handler(int irq, void *dev_id);
-extern void vas_return_credit(struct vas_window *window, bool tx);
-extern struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
+extern void vas_return_credit(struct pnv_vas_window *window, bool tx);
+extern struct pnv_vas_window *vas_pswid_to_window(struct vas_instance *vinst,
uint32_t pswid);
-extern void vas_win_paste_addr(struct vas_window *window, u64 *addr,
- int *len);
+extern void vas_win_paste_addr(struct pnv_vas_window *window, u64 *addr,
+ int *len);
static inline int vas_window_pid(struct vas_window *window)
{
- return pid_vnr(window->pid);
+ return pid_vnr(window->task_ref.pid);
}
-static inline void vas_log_write(struct vas_window *win, char *name,
+static inline void vas_log_write(struct pnv_vas_window *win, char *name,
void *regptr, u64 val)
{
if (val)
pr_debug("%swin #%d: %s reg %p, val 0x%016llx\n",
- win->tx_win ? "Tx" : "Rx", win->winid, name,
- regptr, val);
+ win->tx_win ? "Tx" : "Rx", win->vas_win.winid,
+ name, regptr, val);
}
-static inline void write_uwc_reg(struct vas_window *win, char *name,
+static inline void write_uwc_reg(struct pnv_vas_window *win, char *name,
s32 reg, u64 val)
{
void *regptr;
@@ -466,7 +458,7 @@ static inline void write_uwc_reg(struct vas_window *win, char *name,
out_be64(regptr, val);
}
-static inline void write_hvwc_reg(struct vas_window *win, char *name,
+static inline void write_hvwc_reg(struct pnv_vas_window *win, char *name,
s32 reg, u64 val)
{
void *regptr;
@@ -477,7 +469,7 @@ static inline void write_hvwc_reg(struct vas_window *win, char *name,
out_be64(regptr, val);
}
-static inline u64 read_hvwc_reg(struct vas_window *win,
+static inline u64 read_hvwc_reg(struct pnv_vas_window *win,
char *name __maybe_unused, s32 reg)
{
return in_be64(win->hvwc_map+reg);
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index 4d0535cc7946..a4048b8c8c50 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -86,6 +86,15 @@ config PS3_SYS_MANAGER
This support is required for PS3 system control. In
general, all users will say Y or M.
+config PS3_VERBOSE_RESULT
+ bool "PS3 Verbose LV1 hypercall results" if PS3_ADVANCED
+ depends on PPC_PS3
+ help
+ Enables more verbose log mesages for LV1 hypercall results.
+
+ If in doubt, say N here and reduce the size of the kernel by a
+ small amount.
+
config PS3_REPOSITORY_WRITE
bool "PS3 Repository write support" if PS3_ADVANCED
depends on PPC_PS3
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index d094321964fb..a81eac35d900 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -6,6 +6,7 @@
* Copyright 2006 Sony Corp.
*/
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/memblock.h>
@@ -1118,6 +1119,7 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev,
enum ps3_dma_region_type region_type, void *addr, unsigned long len)
{
unsigned long lpar_addr;
+ int result;
lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
@@ -1129,6 +1131,16 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev,
r->offset -= map.r1.offset;
r->len = len ? len : ALIGN(map.total, 1 << r->page_size);
+ dev->core.dma_mask = &r->dma_mask;
+
+ result = dma_set_mask_and_coherent(&dev->core, DMA_BIT_MASK(32));
+
+ if (result < 0) {
+ dev_err(&dev->core, "%s:%d: dma_set_mask_and_coherent failed: %d\n",
+ __func__, __LINE__, result);
+ return result;
+ }
+
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_SB:
r->region_ops = (USE_DYNAMIC_DMA)
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index e9ae5dd03593..3de9145c20bc 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -36,6 +36,7 @@ DEFINE_MUTEX(ps3_gpu_mutex);
EXPORT_SYMBOL_GPL(ps3_gpu_mutex);
static union ps3_firmware_version ps3_firmware_version;
+static char ps3_firmware_version_str[16];
void ps3_get_firmware_version(union ps3_firmware_version *v)
{
@@ -182,6 +183,40 @@ static int ps3_set_dabr(unsigned long dabr, unsigned long dabrx)
return lv1_set_dabr(dabr, dabrx) ? -1 : 0;
}
+static ssize_t ps3_fw_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", ps3_firmware_version_str);
+}
+
+static int __init ps3_setup_sysfs(void)
+{
+ static struct kobj_attribute attr = __ATTR(fw-version, S_IRUGO,
+ ps3_fw_version_show, NULL);
+ static struct kobject *kobj;
+ int result;
+
+ kobj = kobject_create_and_add("ps3", firmware_kobj);
+
+ if (!kobj) {
+ pr_warn("%s:%d: kobject_create_and_add failed.\n", __func__,
+ __LINE__);
+ return -ENOMEM;
+ }
+
+ result = sysfs_create_file(kobj, &attr.attr);
+
+ if (result) {
+ pr_warn("%s:%d: sysfs_create_file failed.\n", __func__,
+ __LINE__);
+ kobject_put(kobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+core_initcall(ps3_setup_sysfs);
+
static void __init ps3_setup_arch(void)
{
u64 tmp;
@@ -190,9 +225,11 @@ static void __init ps3_setup_arch(void)
lv1_get_version_info(&ps3_firmware_version.raw, &tmp);
- printk(KERN_INFO "PS3 firmware version %u.%u.%u\n",
- ps3_firmware_version.major, ps3_firmware_version.minor,
- ps3_firmware_version.rev);
+ snprintf(ps3_firmware_version_str, sizeof(ps3_firmware_version_str),
+ "%u.%u.%u", ps3_firmware_version.major,
+ ps3_firmware_version.minor, ps3_firmware_version.rev);
+
+ printk(KERN_INFO "PS3 firmware version %s\n", ps3_firmware_version_str);
ps3_spu_set_platform();
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index b431f41c6cb5..1a5665875165 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -64,9 +64,10 @@ static int ps3_open_hv_device_sb(struct ps3_system_bus_device *dev)
result = lv1_open_device(dev->bus_id, dev->dev_id, 0);
if (result) {
- pr_debug("%s:%d: lv1_open_device failed: %s\n", __func__,
- __LINE__, ps3_result(result));
- result = -EPERM;
+ pr_warn("%s:%d: lv1_open_device dev=%u.%u(%s) failed: %s\n",
+ __func__, __LINE__, dev->match_id, dev->match_sub_id,
+ dev_name(&dev->core), ps3_result(result));
+ result = -EPERM;
}
done:
@@ -120,7 +121,7 @@ static int ps3_open_hv_device_gpu(struct ps3_system_bus_device *dev)
result = lv1_gpu_open(0);
if (result) {
- pr_debug("%s:%d: lv1_gpu_open failed: %s\n", __func__,
+ pr_warn("%s:%d: lv1_gpu_open failed: %s\n", __func__,
__LINE__, ps3_result(result));
result = -EPERM;
}
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index c8a2b0b05ac0..4cda0ef87be0 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_PPC_SVM) += svm.o
obj-$(CONFIG_FA_DUMP) += rtas-fadump.o
obj-$(CONFIG_SUSPEND) += suspend.o
+obj-$(CONFIG_PPC_VAS) += vas.o
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 3ac70790ec7a..b1f01ac0c29e 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -289,8 +289,7 @@ int dlpar_acquire_drc(u32 drc_index)
{
int dr_status, rc;
- rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
- DR_ENTITY_SENSE, drc_index);
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_UNUSABLE)
return -1;
@@ -311,8 +310,7 @@ int dlpar_release_drc(u32 drc_index)
{
int dr_status, rc;
- rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
- DR_ENTITY_SENSE, drc_index);
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_PRESENT)
return -1;
@@ -333,8 +331,7 @@ int dlpar_unisolate_drc(u32 drc_index)
{
int dr_status, rc;
- rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
- DR_ENTITY_SENSE, drc_index);
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_PRESENT)
return -1;
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 8377f1f7c78e..377d852f5a9a 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -348,7 +348,8 @@ static int pseries_remove_mem_node(struct device_node *np)
static bool lmb_is_removable(struct drmem_lmb *lmb)
{
- if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
+ if ((lmb->flags & DRCONF_MEM_RESERVED) ||
+ !(lmb->flags & DRCONF_MEM_ASSIGNED))
return false;
#ifdef CONFIG_FA_DUMP
@@ -401,7 +402,7 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb)
static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
{
struct drmem_lmb *lmb;
- int lmbs_removed = 0;
+ int lmbs_reserved = 0;
int lmbs_available = 0;
int rc;
@@ -435,12 +436,12 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
*/
drmem_mark_lmb_reserved(lmb);
- lmbs_removed++;
- if (lmbs_removed == lmbs_to_remove)
+ lmbs_reserved++;
+ if (lmbs_reserved == lmbs_to_remove)
break;
}
- if (lmbs_removed != lmbs_to_remove) {
+ if (lmbs_reserved != lmbs_to_remove) {
pr_err("Memory hot-remove failed, adding LMB's back\n");
for_each_drmem_lmb(lmb) {
@@ -453,6 +454,10 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
+
+ lmbs_reserved--;
+ if (lmbs_reserved == 0)
+ break;
}
rc = -EINVAL;
@@ -466,6 +471,10 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
lmb->base_addr);
drmem_remove_lmb_reservation(lmb);
+
+ lmbs_reserved--;
+ if (lmbs_reserved == 0)
+ break;
}
rc = 0;
}
@@ -508,7 +517,6 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
{
struct drmem_lmb *lmb, *start_lmb, *end_lmb;
- int lmbs_available = 0;
int rc;
pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
@@ -521,18 +529,29 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
if (rc)
return -EINVAL;
- /* Validate that there are enough LMBs to satisfy the request */
+ /*
+ * Validate that all LMBs in range are not reserved. Note that it
+ * is ok if they are !ASSIGNED since our goal here is to remove the
+ * LMB range, regardless of whether some LMBs were already removed
+ * by any other reason.
+ *
+ * This is a contrast to what is done in remove_by_count() where we
+ * check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
+ * because we want to remove a fixed amount of LMBs in that function.
+ */
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
- if (lmb->flags & DRCONF_MEM_RESERVED)
- break;
-
- lmbs_available++;
+ if (lmb->flags & DRCONF_MEM_RESERVED) {
+ pr_err("Memory at %llx (drc index %x) is reserved\n",
+ lmb->base_addr, lmb->drc_index);
+ return -EINVAL;
+ }
}
- if (lmbs_available < lmbs_to_remove)
- return -EINVAL;
-
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
+ /*
+ * dlpar_remove_lmb() will error out if the LMB is already
+ * !ASSIGNED, but this case is a no-op for us.
+ */
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
continue;
@@ -551,6 +570,13 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
if (!drmem_lmb_reserved(lmb))
continue;
+ /*
+ * Setting the isolation state of an UNISOLATED/CONFIGURED
+ * device to UNISOLATE is a no-op, but the hypervisor can
+ * use it as a hint that the LMB removal failed.
+ */
+ dlpar_unisolate_drc(lmb->drc_index);
+
rc = dlpar_add_lmb(lmb);
if (rc)
pr_err("Failed to add LMB, drc index %x\n",
@@ -585,10 +611,6 @@ static inline int pseries_remove_mem_node(struct device_node *np)
{
return 0;
}
-static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
-{
- return -EOPNOTSUPP;
-}
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
{
return -EOPNOTSUPP;
@@ -651,7 +673,7 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
{
struct drmem_lmb *lmb;
int lmbs_available = 0;
- int lmbs_added = 0;
+ int lmbs_reserved = 0;
int rc;
pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
@@ -661,6 +683,9 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
/* Validate that there are enough LMBs to satisfy the request */
for_each_drmem_lmb(lmb) {
+ if (lmb->flags & DRCONF_MEM_RESERVED)
+ continue;
+
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
lmbs_available++;
@@ -689,13 +714,12 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
* requested LMBs cannot be added.
*/
drmem_mark_lmb_reserved(lmb);
-
- lmbs_added++;
- if (lmbs_added == lmbs_to_add)
+ lmbs_reserved++;
+ if (lmbs_reserved == lmbs_to_add)
break;
}
- if (lmbs_added != lmbs_to_add) {
+ if (lmbs_reserved != lmbs_to_add) {
pr_err("Memory hot-add failed, removing any added LMBs\n");
for_each_drmem_lmb(lmb) {
@@ -710,6 +734,10 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
dlpar_release_drc(lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
+ lmbs_reserved--;
+
+ if (lmbs_reserved == 0)
+ break;
}
rc = -EINVAL;
} else {
@@ -720,6 +748,10 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
pr_debug("Memory at %llx (drc index %x) was hot-added\n",
lmb->base_addr, lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
+ lmbs_reserved--;
+
+ if (lmbs_reserved == 0)
+ break;
}
rc = 0;
}
@@ -764,7 +796,6 @@ static int dlpar_memory_add_by_index(u32 drc_index)
static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
{
struct drmem_lmb *lmb, *start_lmb, *end_lmb;
- int lmbs_available = 0;
int rc;
pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
@@ -779,15 +810,14 @@ static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
/* Validate that the LMBs in this range are not reserved */
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
- if (lmb->flags & DRCONF_MEM_RESERVED)
- break;
-
- lmbs_available++;
+ /* Fail immediately if the whole range can't be hot-added */
+ if (lmb->flags & DRCONF_MEM_RESERVED) {
+ pr_err("Memory at %llx (drc index %x) is reserved\n",
+ lmb->base_addr, lmb->drc_index);
+ return -EINVAL;
+ }
}
- if (lmbs_available < lmbs_to_add)
- return -EINVAL;
-
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
if (lmb->flags & DRCONF_MEM_ASSIGNED)
continue;
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 8a2b8d64265b..ab9fc6506861 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -108,6 +108,10 @@ _GLOBAL_TOC(plpar_hcall_norets_notrace)
mfcr r0
stw r0,8(r1)
HVSC /* invoke the hypervisor */
+
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
blr /* return r3 = status */
@@ -120,6 +124,9 @@ _GLOBAL_TOC(plpar_hcall_norets)
HCALL_BRANCH(plpar_hcall_norets_trace)
HVSC /* invoke the hypervisor */
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
blr /* return r3 = status */
@@ -129,6 +136,10 @@ plpar_hcall_norets_trace:
HCALL_INST_PRECALL(R4)
HVSC
HCALL_INST_POSTCALL_NORETS
+
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
blr
@@ -159,6 +170,9 @@ _GLOBAL_TOC(plpar_hcall)
std r6, 16(r12)
std r7, 24(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -188,6 +202,9 @@ plpar_hcall_trace:
HCALL_INST_POSTCALL(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -223,6 +240,9 @@ _GLOBAL(plpar_hcall_raw)
std r6, 16(r12)
std r7, 24(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -262,6 +282,9 @@ _GLOBAL_TOC(plpar_hcall9)
std r11,56(r12)
std r0, 64(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -300,6 +323,9 @@ plpar_hcall9_trace:
HCALL_INST_POSTCALL(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -339,6 +365,9 @@ _GLOBAL(plpar_hcall9_raw)
std r11,56(r12)
std r0, 64(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index ef26fe40efb0..f48e87ac89c9 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -18,6 +18,7 @@
#include <asm/plpar_wrappers.h>
#include <asm/papr_pdsm.h>
#include <asm/mce.h>
+#include <asm/unaligned.h>
#define BIND_ANY_ADDR (~0ul)
@@ -114,6 +115,9 @@ struct papr_scm_priv {
/* Health information for the dimm */
u64 health_bitmap;
+ /* Holds the last known dirty shutdown counter value */
+ u64 dirty_shutdown_counter;
+
/* length of the stat buffer as expected by phyp */
size_t stat_buffer_len;
};
@@ -260,7 +264,7 @@ err_out:
* Query the Dimm performance stats from PHYP and copy them (if returned) to
* provided struct papr_scm_perf_stats instance 'stats' that can hold atleast
* (num_stats + header) bytes.
- * - If buff_stats == NULL the return value is the size in byes of the buffer
+ * - If buff_stats == NULL the return value is the size in bytes of the buffer
* needed to hold all supported performance-statistics.
* - If buff_stats != NULL and num_stats == 0 then we copy all known
* performance-statistics to 'buff_stat' and expect to be large enough to
@@ -310,6 +314,13 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
dev_err(&p->pdev->dev,
"Unknown performance stats, Err:0x%016lX\n", ret[0]);
return -ENOENT;
+ } else if (rc == H_AUTHORITY) {
+ dev_info(&p->pdev->dev,
+ "Permission denied while accessing performance stats");
+ return -EPERM;
+ } else if (rc == H_UNSUPPORTED) {
+ dev_dbg(&p->pdev->dev, "Performance stats unsupported\n");
+ return -EOPNOTSUPP;
} else if (rc != H_SUCCESS) {
dev_err(&p->pdev->dev,
"Failed to query performance stats, Err:%lld\n", rc);
@@ -596,6 +607,16 @@ free_stats:
return rc;
}
+/* Add the dirty-shutdown-counter value to the pdsm */
+static int papr_pdsm_dsc(struct papr_scm_priv *p,
+ union nd_pdsm_payload *payload)
+{
+ payload->health.extension_flags |= PDSM_DIMM_DSC_VALID;
+ payload->health.dimm_dsc = p->dirty_shutdown_counter;
+
+ return sizeof(struct nd_papr_pdsm_health);
+}
+
/* Fetch the DIMM health info and populate it in provided package. */
static int papr_pdsm_health(struct papr_scm_priv *p,
union nd_pdsm_payload *payload)
@@ -639,6 +660,8 @@ static int papr_pdsm_health(struct papr_scm_priv *p,
/* Populate the fuel gauge meter in the payload */
papr_pdsm_fuel_gauge(p, payload);
+ /* Populate the dirty-shutdown-counter field */
+ papr_pdsm_dsc(p, payload);
rc = sizeof(struct nd_papr_pdsm_health);
@@ -900,15 +923,41 @@ static ssize_t flags_show(struct device *dev,
}
DEVICE_ATTR_RO(flags);
+static ssize_t dirty_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *dimm = to_nvdimm(dev);
+ struct papr_scm_priv *p = nvdimm_provider_data(dimm);
+
+ return sysfs_emit(buf, "%llu\n", p->dirty_shutdown_counter);
+}
+DEVICE_ATTR_RO(dirty_shutdown);
+
+static umode_t papr_nd_attribute_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
+
+ /* For if perf-stats not available remove perf_stats sysfs */
+ if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
+ return 0;
+
+ return attr->mode;
+}
+
/* papr_scm specific dimm attributes */
static struct attribute *papr_nd_attributes[] = {
&dev_attr_flags.attr,
&dev_attr_perf_stats.attr,
+ &dev_attr_dirty_shutdown.attr,
NULL,
};
static struct attribute_group papr_nd_attribute_group = {
.name = "papr",
+ .is_visible = papr_nd_attribute_visible,
.attrs = papr_nd_attributes,
};
@@ -924,7 +973,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
struct nd_region_desc ndr_desc;
unsigned long dimm_flags;
int target_nid, online_nid;
- ssize_t stat_size;
p->bus_desc.ndctl = papr_scm_ndctl;
p->bus_desc.module = THIS_MODULE;
@@ -1009,16 +1057,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
list_add_tail(&p->region_list, &papr_nd_regions);
mutex_unlock(&papr_ndr_lock);
- /* Try retriving the stat buffer and see if its supported */
- stat_size = drc_pmem_query_stats(p, NULL, 0);
- if (stat_size > 0) {
- p->stat_buffer_len = stat_size;
- dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
- p->stat_buffer_len);
- } else {
- dev_info(&p->pdev->dev, "Dimm performance stats unavailable\n");
- }
-
return 0;
err: nvdimm_bus_unregister(p->bus);
@@ -1094,8 +1132,10 @@ static int papr_scm_probe(struct platform_device *pdev)
u32 drc_index, metadata_size;
u64 blocks, block_size;
struct papr_scm_priv *p;
+ u8 uuid_raw[UUID_SIZE];
const char *uuid_str;
- u64 uuid[2];
+ ssize_t stat_size;
+ uuid_t uuid;
int rc;
/* check we have all the required DT properties */
@@ -1137,17 +1177,28 @@ static int papr_scm_probe(struct platform_device *pdev)
p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
p->hcall_flush_required = of_property_read_bool(dn, "ibm,hcall-flush-required");
+ if (of_property_read_u64(dn, "ibm,persistence-failed-count",
+ &p->dirty_shutdown_counter))
+ p->dirty_shutdown_counter = 0;
+
/* We just need to ensure that set cookies are unique across */
- uuid_parse(uuid_str, (uuid_t *) uuid);
+ uuid_parse(uuid_str, &uuid);
+
/*
- * cookie1 and cookie2 are not really little endian
- * we store a little endian representation of the
- * uuid str so that we can compare this with the label
- * area cookie irrespective of the endian config with which
- * the kernel is built.
+ * The cookie1 and cookie2 are not really little endian.
+ * We store a raw buffer representation of the
+ * uuid string so that we can compare this with the label
+ * area cookie irrespective of the endian configuration
+ * with which the kernel is built.
+ *
+ * Historically we stored the cookie in the below format.
+ * for a uuid string 72511b67-0b3b-42fd-8d1d-5be3cae8bcaa
+ * cookie1 was 0xfd423b0b671b5172
+ * cookie2 was 0xaabce8cae35b1d8d
*/
- p->nd_set.cookie1 = cpu_to_le64(uuid[0]);
- p->nd_set.cookie2 = cpu_to_le64(uuid[1]);
+ export_uuid(uuid_raw, &uuid);
+ p->nd_set.cookie1 = get_unaligned_le64(&uuid_raw[0]);
+ p->nd_set.cookie2 = get_unaligned_le64(&uuid_raw[8]);
/* might be zero */
p->metadata_size = metadata_size;
@@ -1172,6 +1223,14 @@ static int papr_scm_probe(struct platform_device *pdev)
p->res.name = pdev->name;
p->res.flags = IORESOURCE_MEM;
+ /* Try retrieving the stat buffer and see if its supported */
+ stat_size = drc_pmem_query_stats(p, NULL, 0);
+ if (stat_size > 0) {
+ p->stat_buffer_len = stat_size;
+ dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
+ p->stat_buffer_len);
+ }
+
rc = papr_scm_nvdimm_init(p);
if (rc)
goto err2;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 9d4ef65da7f3..167f2e1b8d39 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -487,8 +487,8 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
if ((be64_to_cpu(regs->msr) &
(MSR_LE|MSR_RI|MSR_DR|MSR_IR|MSR_ME|MSR_PR|
MSR_ILE|MSR_HV|MSR_SF)) == (MSR_DR|MSR_SF)) {
- regs->nip = be64_to_cpu((__be64)regs->nip);
- regs->msr = 0;
+ regs_set_return_ip(regs, be64_to_cpu((__be64)regs->nip));
+ regs_set_return_msr(regs, 0);
}
#endif
@@ -593,8 +593,6 @@ static int mce_handle_err_virtmode(struct pt_regs *regs,
mce_err.severity = MCE_SEV_SEVERE;
else if (severity == RTAS_SEVERITY_ERROR)
mce_err.severity = MCE_SEV_SEVERE;
- else if (severity == RTAS_SEVERITY_FATAL)
- mce_err.severity = MCE_SEV_FATAL;
else
mce_err.severity = MCE_SEV_FATAL;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 754e493b7c05..0dfaa6ab44cc 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -77,7 +77,7 @@
#include "../../../../drivers/pci/pci.h"
DEFINE_STATIC_KEY_FALSE(shared_processor);
-EXPORT_SYMBOL_GPL(shared_processor);
+EXPORT_SYMBOL(shared_processor);
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
@@ -539,9 +539,10 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
* H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
* H_CPU_BEHAV_FAVOUR_SECURITY is.
*/
- if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
+ if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) {
security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
- else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
+ pseries_security_flavor = 0;
+ } else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
pseries_security_flavor = 1;
else
pseries_security_flavor = 2;
@@ -549,6 +550,15 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
+ if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY)
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
+
+ if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS)
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
+
+ if (result->behaviour & H_CPU_BEHAV_NO_STF_BARRIER)
+ security_ftr_clear(SEC_FTR_STF_BARRIER);
+
if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
}
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index c70b4be9f0a5..f47429323eee 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -105,9 +105,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
return 1;
}
- /* Fixup atomic count: it exited inside IRQ handler. */
- task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0;
-
/*
* If the RTAS start-cpu token does not exist then presume the
* cpu is already spinning.
@@ -211,7 +208,9 @@ static __init void pSeries_smp_probe(void)
if (!cpu_has_feature(CPU_FTR_SMT))
return;
- if (check_kvm_guest()) {
+ check_kvm_guest();
+
+ if (is_kvm_guest()) {
/*
* KVM emulates doorbells by disabling FSCR[MSGP] so msgsndp
* faults to the hypervisor which then reads the instruction
diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
new file mode 100644
index 000000000000..b5c1cf1bc64d
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/vas.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2020-21 IBM Corp.
+ */
+
+#define pr_fmt(fmt) "vas: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <asm/machdep.h>
+#include <asm/hvcall.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/vas.h>
+#include "vas.h"
+
+#define VAS_INVALID_WIN_ADDRESS 0xFFFFFFFFFFFFFFFFul
+#define VAS_DEFAULT_DOMAIN_ID 0xFFFFFFFFFFFFFFFFul
+/* The hypervisor allows one credit per window right now */
+#define DEF_WIN_CREDS 1
+
+static struct vas_all_caps caps_all;
+static bool copypaste_feat;
+
+static struct vas_caps vascaps[VAS_MAX_FEAT_TYPE];
+static DEFINE_MUTEX(vas_pseries_mutex);
+
+static long hcall_return_busy_check(long rc)
+{
+ /* Check if we are stalled for some time */
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ } else if (rc == H_BUSY) {
+ cond_resched();
+ }
+
+ return rc;
+}
+
+/*
+ * Allocate VAS window hcall
+ */
+static int h_allocate_vas_window(struct pseries_vas_window *win, u64 *domain,
+ u8 wintype, u16 credits)
+{
+ long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+ long rc;
+
+ do {
+ rc = plpar_hcall9(H_ALLOCATE_VAS_WINDOW, retbuf, wintype,
+ credits, domain[0], domain[1], domain[2],
+ domain[3], domain[4], domain[5]);
+
+ rc = hcall_return_busy_check(rc);
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS) {
+ if (win->win_addr == VAS_INVALID_WIN_ADDRESS) {
+ pr_err("H_ALLOCATE_VAS_WINDOW: COPY/PASTE is not supported\n");
+ return -ENOTSUPP;
+ }
+ win->vas_win.winid = retbuf[0];
+ win->win_addr = retbuf[1];
+ win->complete_irq = retbuf[2];
+ win->fault_irq = retbuf[3];
+ return 0;
+ }
+
+ pr_err("H_ALLOCATE_VAS_WINDOW error: %ld, wintype: %u, credits: %u\n",
+ rc, wintype, credits);
+
+ return -EIO;
+}
+
+/*
+ * Deallocate VAS window hcall.
+ */
+static int h_deallocate_vas_window(u64 winid)
+{
+ long rc;
+
+ do {
+ rc = plpar_hcall_norets(H_DEALLOCATE_VAS_WINDOW, winid);
+
+ rc = hcall_return_busy_check(rc);
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("H_DEALLOCATE_VAS_WINDOW error: %ld, winid: %llu\n",
+ rc, winid);
+ return -EIO;
+}
+
+/*
+ * Modify VAS window.
+ * After the window is opened with allocate window hcall, configure it
+ * with flags and LPAR PID before using.
+ */
+static int h_modify_vas_window(struct pseries_vas_window *win)
+{
+ long rc;
+ u32 lpid = mfspr(SPRN_PID);
+
+ /*
+ * AMR value is not supported in Linux VAS implementation.
+ * The hypervisor ignores it if 0 is passed.
+ */
+ do {
+ rc = plpar_hcall_norets(H_MODIFY_VAS_WINDOW,
+ win->vas_win.winid, lpid, 0,
+ VAS_MOD_WIN_FLAGS, 0);
+
+ rc = hcall_return_busy_check(rc);
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("H_MODIFY_VAS_WINDOW error: %ld, winid %u lpid %u\n",
+ rc, win->vas_win.winid, lpid);
+ return -EIO;
+}
+
+/*
+ * This hcall is used to determine the capabilities from the hypervisor.
+ * @hcall: H_QUERY_VAS_CAPABILITIES or H_QUERY_NX_CAPABILITIES
+ * @query_type: If 0 is passed, the hypervisor returns the overall
+ * capabilities which provides all feature(s) that are
+ * available. Then query the hypervisor to get the
+ * corresponding capabilities for the specific feature.
+ * Example: H_QUERY_VAS_CAPABILITIES provides VAS GZIP QoS
+ * and VAS GZIP Default capabilities.
+ * H_QUERY_NX_CAPABILITIES provides NX GZIP
+ * capabilities.
+ * @result: Return buffer to save capabilities.
+ */
+int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result)
+{
+ long rc;
+
+ rc = plpar_hcall_norets(hcall, query_type, result);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("HCALL(%llx) error %ld, query_type %u, result buffer 0x%llx\n",
+ hcall, rc, query_type, result);
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(h_query_vas_capabilities);
+
+/*
+ * hcall to get fault CRB from the hypervisor.
+ */
+static int h_get_nx_fault(u32 winid, u64 buffer)
+{
+ long rc;
+
+ rc = plpar_hcall_norets(H_GET_NX_FAULT, winid, buffer);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("H_GET_NX_FAULT error: %ld, winid %u, buffer 0x%llx\n",
+ rc, winid, buffer);
+ return -EIO;
+
+}
+
+/*
+ * Handle the fault interrupt.
+ * When the fault interrupt is received for each window, query the
+ * hypervisor to get the fault CRB on the specific fault. Then
+ * process the CRB by updating CSB or send signal if the user space
+ * CSB is invalid.
+ * Note: The hypervisor forwards an interrupt for each fault request.
+ * So one fault CRB to process for each H_GET_NX_FAULT hcall.
+ */
+irqreturn_t pseries_vas_fault_thread_fn(int irq, void *data)
+{
+ struct pseries_vas_window *txwin = data;
+ struct coprocessor_request_block crb;
+ struct vas_user_win_ref *tsk_ref;
+ int rc;
+
+ rc = h_get_nx_fault(txwin->vas_win.winid, (u64)virt_to_phys(&crb));
+ if (!rc) {
+ tsk_ref = &txwin->vas_win.task_ref;
+ vas_dump_crb(&crb);
+ vas_update_csb(&crb, tsk_ref);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Allocate window and setup IRQ mapping.
+ */
+static int allocate_setup_window(struct pseries_vas_window *txwin,
+ u64 *domain, u8 wintype)
+{
+ int rc;
+
+ rc = h_allocate_vas_window(txwin, domain, wintype, DEF_WIN_CREDS);
+ if (rc)
+ return rc;
+ /*
+ * On PowerVM, the hypervisor setup and forwards the fault
+ * interrupt per window. So the IRQ setup and fault handling
+ * will be done for each open window separately.
+ */
+ txwin->fault_virq = irq_create_mapping(NULL, txwin->fault_irq);
+ if (!txwin->fault_virq) {
+ pr_err("Failed irq mapping %d\n", txwin->fault_irq);
+ rc = -EINVAL;
+ goto out_win;
+ }
+
+ txwin->name = kasprintf(GFP_KERNEL, "vas-win-%d",
+ txwin->vas_win.winid);
+ if (!txwin->name) {
+ rc = -ENOMEM;
+ goto out_irq;
+ }
+
+ rc = request_threaded_irq(txwin->fault_virq, NULL,
+ pseries_vas_fault_thread_fn, IRQF_ONESHOT,
+ txwin->name, txwin);
+ if (rc) {
+ pr_err("VAS-Window[%d]: Request IRQ(%u) failed with %d\n",
+ txwin->vas_win.winid, txwin->fault_virq, rc);
+ goto out_free;
+ }
+
+ txwin->vas_win.wcreds_max = DEF_WIN_CREDS;
+
+ return 0;
+out_free:
+ kfree(txwin->name);
+out_irq:
+ irq_dispose_mapping(txwin->fault_virq);
+out_win:
+ h_deallocate_vas_window(txwin->vas_win.winid);
+ return rc;
+}
+
+static inline void free_irq_setup(struct pseries_vas_window *txwin)
+{
+ free_irq(txwin->fault_virq, txwin);
+ kfree(txwin->name);
+ irq_dispose_mapping(txwin->fault_virq);
+}
+
+static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+ enum vas_cop_type cop_type)
+{
+ long domain[PLPAR_HCALL9_BUFSIZE] = {VAS_DEFAULT_DOMAIN_ID};
+ struct vas_cop_feat_caps *cop_feat_caps;
+ struct vas_caps *caps;
+ struct pseries_vas_window *txwin;
+ int rc;
+
+ txwin = kzalloc(sizeof(*txwin), GFP_KERNEL);
+ if (!txwin)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * A VAS window can have many credits which means that many
+ * requests can be issued simultaneously. But the hypervisor
+ * restricts one credit per window.
+ * The hypervisor introduces 2 different types of credits:
+ * Default credit type (Uses normal priority FIFO):
+ * A limited number of credits are assigned to partitions
+ * based on processor entitlement. But these credits may be
+ * over-committed on a system depends on whether the CPUs
+ * are in shared or dedicated modes - that is, more requests
+ * may be issued across the system than NX can service at
+ * once which can result in paste command failure (RMA_busy).
+ * Then the process has to resend requests or fall-back to
+ * SW compression.
+ * Quality of Service (QoS) credit type (Uses high priority FIFO):
+ * To avoid NX HW contention, the system admins can assign
+ * QoS credits for each LPAR so that this partition is
+ * guaranteed access to NX resources. These credits are
+ * assigned to partitions via the HMC.
+ * Refer PAPR for more information.
+ *
+ * Allocate window with QoS credits if user requested. Otherwise
+ * default credits are used.
+ */
+ if (flags & VAS_TX_WIN_FLAG_QOS_CREDIT)
+ caps = &vascaps[VAS_GZIP_QOS_FEAT_TYPE];
+ else
+ caps = &vascaps[VAS_GZIP_DEF_FEAT_TYPE];
+
+ cop_feat_caps = &caps->caps;
+
+ if (atomic_inc_return(&cop_feat_caps->used_lpar_creds) >
+ atomic_read(&cop_feat_caps->target_lpar_creds)) {
+ pr_err("Credits are not available to allocate window\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (vas_id == -1) {
+ /*
+ * The user space is requesting to allocate a window on
+ * a VAS instance where the process is executing.
+ * On PowerVM, domain values are passed to the hypervisor
+ * to select VAS instance. Useful if the process is
+ * affinity to NUMA node.
+ * The hypervisor selects VAS instance if
+ * VAS_DEFAULT_DOMAIN_ID (-1) is passed for domain values.
+ * The h_allocate_vas_window hcall is defined to take a
+ * domain values as specified by h_home_node_associativity,
+ * So no unpacking needs to be done.
+ */
+ rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, domain,
+ VPHN_FLAG_VCPU, smp_processor_id());
+ if (rc != H_SUCCESS) {
+ pr_err("H_HOME_NODE_ASSOCIATIVITY error: %d\n", rc);
+ goto out;
+ }
+ }
+
+ /*
+ * Allocate / Deallocate window hcalls and setup / free IRQs
+ * have to be protected with mutex.
+ * Open VAS window: Allocate window hcall and setup IRQ
+ * Close VAS window: Deallocate window hcall and free IRQ
+ * The hypervisor waits until all NX requests are
+ * completed before closing the window. So expects OS
+ * to handle NX faults, means IRQ can be freed only
+ * after the deallocate window hcall is returned.
+ * So once the window is closed with deallocate hcall before
+ * the IRQ is freed, it can be assigned to new allocate
+ * hcall with the same fault IRQ by the hypervisor. It can
+ * result in setup IRQ fail for the new window since the
+ * same fault IRQ is not freed by the OS before.
+ */
+ mutex_lock(&vas_pseries_mutex);
+ rc = allocate_setup_window(txwin, (u64 *)&domain[0],
+ cop_feat_caps->win_type);
+ mutex_unlock(&vas_pseries_mutex);
+ if (rc)
+ goto out;
+
+ /*
+ * Modify window and it is ready to use.
+ */
+ rc = h_modify_vas_window(txwin);
+ if (!rc)
+ rc = get_vas_user_win_ref(&txwin->vas_win.task_ref);
+ if (rc)
+ goto out_free;
+
+ vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
+ txwin->win_type = cop_feat_caps->win_type;
+ mutex_lock(&vas_pseries_mutex);
+ list_add(&txwin->win_list, &caps->list);
+ mutex_unlock(&vas_pseries_mutex);
+
+ return &txwin->vas_win;
+
+out_free:
+ /*
+ * Window is not operational. Free IRQ before closing
+ * window so that do not have to hold mutex.
+ */
+ free_irq_setup(txwin);
+ h_deallocate_vas_window(txwin->vas_win.winid);
+out:
+ atomic_dec(&cop_feat_caps->used_lpar_creds);
+ kfree(txwin);
+ return ERR_PTR(rc);
+}
+
+static u64 vas_paste_address(struct vas_window *vwin)
+{
+ struct pseries_vas_window *win;
+
+ win = container_of(vwin, struct pseries_vas_window, vas_win);
+ return win->win_addr;
+}
+
+static int deallocate_free_window(struct pseries_vas_window *win)
+{
+ int rc = 0;
+
+ /*
+ * The hypervisor waits for all requests including faults
+ * are processed before closing the window - Means all
+ * credits have to be returned. In the case of fault
+ * request, a credit is returned after OS issues
+ * H_GET_NX_FAULT hcall.
+ * So free IRQ after executing H_DEALLOCATE_VAS_WINDOW
+ * hcall.
+ */
+ rc = h_deallocate_vas_window(win->vas_win.winid);
+ if (!rc)
+ free_irq_setup(win);
+
+ return rc;
+}
+
+static int vas_deallocate_window(struct vas_window *vwin)
+{
+ struct pseries_vas_window *win;
+ struct vas_cop_feat_caps *caps;
+ int rc = 0;
+
+ if (!vwin)
+ return -EINVAL;
+
+ win = container_of(vwin, struct pseries_vas_window, vas_win);
+
+ /* Should not happen */
+ if (win->win_type >= VAS_MAX_FEAT_TYPE) {
+ pr_err("Window (%u): Invalid window type %u\n",
+ vwin->winid, win->win_type);
+ return -EINVAL;
+ }
+
+ caps = &vascaps[win->win_type].caps;
+ mutex_lock(&vas_pseries_mutex);
+ rc = deallocate_free_window(win);
+ if (rc) {
+ mutex_unlock(&vas_pseries_mutex);
+ return rc;
+ }
+
+ list_del(&win->win_list);
+ atomic_dec(&caps->used_lpar_creds);
+ mutex_unlock(&vas_pseries_mutex);
+
+ put_vas_user_win_ref(&vwin->task_ref);
+ mm_context_remove_vas_window(vwin->task_ref.mm);
+
+ kfree(win);
+ return 0;
+}
+
+static const struct vas_user_win_ops vops_pseries = {
+ .open_win = vas_allocate_window, /* Open and configure window */
+ .paste_addr = vas_paste_address, /* To do copy/paste */
+ .close_win = vas_deallocate_window, /* Close window */
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_api_pseries(struct module *mod, enum vas_cop_type cop_type,
+ const char *name)
+{
+ int rc;
+
+ if (!copypaste_feat)
+ return -ENOTSUPP;
+
+ rc = vas_register_coproc_api(mod, cop_type, name, &vops_pseries);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(vas_register_api_pseries);
+
+void vas_unregister_api_pseries(void)
+{
+ vas_unregister_coproc_api();
+}
+EXPORT_SYMBOL_GPL(vas_unregister_api_pseries);
+
+/*
+ * Get the specific capabilities based on the feature type.
+ * Right now supports GZIP default and GZIP QoS capabilities.
+ */
+static int get_vas_capabilities(u8 feat, enum vas_cop_feat_type type,
+ struct hv_vas_cop_feat_caps *hv_caps)
+{
+ struct vas_cop_feat_caps *caps;
+ struct vas_caps *vcaps;
+ int rc = 0;
+
+ vcaps = &vascaps[type];
+ memset(vcaps, 0, sizeof(*vcaps));
+ INIT_LIST_HEAD(&vcaps->list);
+
+ caps = &vcaps->caps;
+
+ rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, feat,
+ (u64)virt_to_phys(hv_caps));
+ if (rc)
+ return rc;
+
+ caps->user_mode = hv_caps->user_mode;
+ if (!(caps->user_mode & VAS_COPY_PASTE_USER_MODE)) {
+ pr_err("User space COPY/PASTE is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ caps->descriptor = be64_to_cpu(hv_caps->descriptor);
+ caps->win_type = hv_caps->win_type;
+ if (caps->win_type >= VAS_MAX_FEAT_TYPE) {
+ pr_err("Unsupported window type %u\n", caps->win_type);
+ return -EINVAL;
+ }
+ caps->max_lpar_creds = be16_to_cpu(hv_caps->max_lpar_creds);
+ caps->max_win_creds = be16_to_cpu(hv_caps->max_win_creds);
+ atomic_set(&caps->target_lpar_creds,
+ be16_to_cpu(hv_caps->target_lpar_creds));
+ if (feat == VAS_GZIP_DEF_FEAT) {
+ caps->def_lpar_creds = be16_to_cpu(hv_caps->def_lpar_creds);
+
+ if (caps->max_win_creds < DEF_WIN_CREDS) {
+ pr_err("Window creds(%u) > max allowed window creds(%u)\n",
+ DEF_WIN_CREDS, caps->max_win_creds);
+ return -EINVAL;
+ }
+ }
+
+ copypaste_feat = true;
+
+ return 0;
+}
+
+static int __init pseries_vas_init(void)
+{
+ struct hv_vas_cop_feat_caps *hv_cop_caps;
+ struct hv_vas_all_caps *hv_caps;
+ int rc;
+
+ /*
+ * Linux supports user space COPY/PASTE only with Radix
+ */
+ if (!radix_enabled()) {
+ pr_err("API is supported only with radix page tables\n");
+ return -ENOTSUPP;
+ }
+
+ hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
+ if (!hv_caps)
+ return -ENOMEM;
+ /*
+ * Get VAS overall capabilities by passing 0 to feature type.
+ */
+ rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, 0,
+ (u64)virt_to_phys(hv_caps));
+ if (rc)
+ goto out;
+
+ caps_all.descriptor = be64_to_cpu(hv_caps->descriptor);
+ caps_all.feat_type = be64_to_cpu(hv_caps->feat_type);
+
+ hv_cop_caps = kmalloc(sizeof(*hv_cop_caps), GFP_KERNEL);
+ if (!hv_cop_caps) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ /*
+ * QOS capabilities available
+ */
+ if (caps_all.feat_type & VAS_GZIP_QOS_FEAT_BIT) {
+ rc = get_vas_capabilities(VAS_GZIP_QOS_FEAT,
+ VAS_GZIP_QOS_FEAT_TYPE, hv_cop_caps);
+
+ if (rc)
+ goto out_cop;
+ }
+ /*
+ * Default capabilities available
+ */
+ if (caps_all.feat_type & VAS_GZIP_DEF_FEAT_BIT) {
+ rc = get_vas_capabilities(VAS_GZIP_DEF_FEAT,
+ VAS_GZIP_DEF_FEAT_TYPE, hv_cop_caps);
+ if (rc)
+ goto out_cop;
+ }
+
+ pr_info("GZIP feature is available\n");
+
+out_cop:
+ kfree(hv_cop_caps);
+out:
+ kfree(hv_caps);
+ return rc;
+}
+machine_device_initcall(pseries, pseries_vas_init);
diff --git a/arch/powerpc/platforms/pseries/vas.h b/arch/powerpc/platforms/pseries/vas.h
new file mode 100644
index 000000000000..4ecb3fcabd10
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/vas.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2020-21 IBM Corp.
+ */
+
+#ifndef _VAS_H
+#define _VAS_H
+#include <asm/vas.h>
+#include <linux/mutex.h>
+#include <linux/stringify.h>
+
+/*
+ * VAS window modify flags
+ */
+#define VAS_MOD_WIN_CLOSE PPC_BIT(0)
+#define VAS_MOD_WIN_JOBS_KILL PPC_BIT(1)
+#define VAS_MOD_WIN_DR PPC_BIT(3)
+#define VAS_MOD_WIN_PR PPC_BIT(4)
+#define VAS_MOD_WIN_SF PPC_BIT(5)
+#define VAS_MOD_WIN_TA PPC_BIT(6)
+#define VAS_MOD_WIN_FLAGS (VAS_MOD_WIN_JOBS_KILL | VAS_MOD_WIN_DR | \
+ VAS_MOD_WIN_PR | VAS_MOD_WIN_SF)
+
+#define VAS_WIN_ACTIVE 0x0
+#define VAS_WIN_CLOSED 0x1
+#define VAS_WIN_INACTIVE 0x2 /* Inactive due to HW failure */
+/* Process of being modified, deallocated, or quiesced */
+#define VAS_WIN_MOD_IN_PROCESS 0x3
+
+#define VAS_COPY_PASTE_USER_MODE 0x00000001
+#define VAS_COP_OP_USER_MODE 0x00000010
+
+/*
+ * Co-processor feature - GZIP QoS windows or GZIP default windows
+ */
+enum vas_cop_feat_type {
+ VAS_GZIP_QOS_FEAT_TYPE,
+ VAS_GZIP_DEF_FEAT_TYPE,
+ VAS_MAX_FEAT_TYPE,
+};
+
+/*
+ * Use to get feature specific capabilities from the
+ * hypervisor.
+ */
+struct hv_vas_cop_feat_caps {
+ __be64 descriptor;
+ u8 win_type; /* Default or QoS type */
+ u8 user_mode;
+ __be16 max_lpar_creds;
+ __be16 max_win_creds;
+ union {
+ __be16 reserved;
+ __be16 def_lpar_creds; /* Used for default capabilities */
+ };
+ __be16 target_lpar_creds;
+} __packed __aligned(0x1000);
+
+/*
+ * Feature specific (QoS or default) capabilities.
+ */
+struct vas_cop_feat_caps {
+ u64 descriptor;
+ u8 win_type; /* Default or QoS type */
+ u8 user_mode; /* User mode copy/paste or COP HCALL */
+ u16 max_lpar_creds; /* Max credits available in LPAR */
+ /* Max credits can be assigned per window */
+ u16 max_win_creds;
+ union {
+ u16 reserved; /* Used for QoS credit type */
+ u16 def_lpar_creds; /* Used for default credit type */
+ };
+ /* Total LPAR available credits. Can be different from max LPAR */
+ /* credits due to DLPAR operation */
+ atomic_t target_lpar_creds;
+ atomic_t used_lpar_creds; /* Used credits so far */
+ u16 avail_lpar_creds; /* Remaining available credits */
+};
+
+/*
+ * Feature (QoS or Default) specific to store capabilities and
+ * the list of open windows.
+ */
+struct vas_caps {
+ struct vas_cop_feat_caps caps;
+ struct list_head list; /* List of open windows */
+};
+
+/*
+ * To get window information from the hypervisor.
+ */
+struct hv_vas_win_lpar {
+ __be16 version;
+ u8 win_type;
+ u8 status;
+ __be16 credits; /* No of credits assigned to this window */
+ __be16 reserved;
+ __be32 pid; /* LPAR Process ID */
+ __be32 tid; /* LPAR Thread ID */
+ __be64 win_addr; /* Paste address */
+ __be32 interrupt; /* Interrupt when NX request completes */
+ __be32 fault; /* Interrupt when NX sees fault */
+ /* Associativity Domain Identifiers as returned in */
+ /* H_HOME_NODE_ASSOCIATIVITY */
+ __be64 domain[6];
+ __be64 win_util; /* Number of bytes processed */
+} __packed __aligned(0x1000);
+
+struct pseries_vas_window {
+ struct vas_window vas_win;
+ u64 win_addr; /* Physical paste address */
+ u8 win_type; /* QoS or Default window */
+ u32 complete_irq; /* Completion interrupt */
+ u32 fault_irq; /* Fault interrupt */
+ u64 domain[6]; /* Associativity domain Ids */
+ /* this window is allocated */
+ u64 util;
+
+ /* List of windows opened which is used for LPM */
+ struct list_head win_list;
+ u64 flags;
+ char *name;
+ int fault_virq;
+};
+#endif /* _VAS_H */
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 69af73765783..b8f76f3fd994 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -1072,7 +1072,7 @@ int fsl_pci_mcheck_exception(struct pt_regs *regs)
ret = get_kernel_nofault(inst, (void *)regs->nip);
if (!ret && mcheck_handle_load(regs, inst)) {
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
return 1;
}
}
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 07c164f7f8cf..5a95b8ea23d8 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -108,8 +108,8 @@ int fsl_rio_mcheck_exception(struct pt_regs *regs)
__func__);
out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
0);
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
}
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig
index 304614c920aa..063d9195891f 100644
--- a/arch/powerpc/sysdev/xics/Kconfig
+++ b/arch/powerpc/sysdev/xics/Kconfig
@@ -12,3 +12,6 @@ config PPC_ICP_HV
config PPC_ICS_RTAS
def_bool n
+
+config PPC_ICS_NATIVE
+ def_bool n
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile
index ba1e3117b1c0..747063927c6c 100644
--- a/arch/powerpc/sysdev/xics/Makefile
+++ b/arch/powerpc/sysdev/xics/Makefile
@@ -4,4 +4,5 @@ obj-y += xics-common.o
obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o
obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o
obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o
+obj-$(CONFIG_PPC_ICS_NATIVE) += ics-native.o
obj-$(CONFIG_PPC_POWERNV) += ics-opal.o icp-opal.o
diff --git a/arch/powerpc/sysdev/xics/ics-native.c b/arch/powerpc/sysdev/xics/ics-native.c
new file mode 100644
index 000000000000..d450502f4053
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/ics-native.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ICS backend for OPAL managed interrupts.
+ *
+ * Copyright 2011 IBM Corp.
+ */
+
+//#define DEBUG
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <linux/msi.h>
+#include <linux/list.h>
+
+#include <asm/prom.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/xics.h>
+#include <asm/opal.h>
+#include <asm/firmware.h>
+
+struct ics_native {
+ struct ics ics;
+ struct device_node *node;
+ void __iomem *base;
+ u32 ibase;
+ u32 icount;
+};
+#define to_ics_native(_ics) container_of(_ics, struct ics_native, ics)
+
+static void __iomem *ics_native_xive(struct ics_native *in, unsigned int vec)
+{
+ return in->base + 0x800 + ((vec - in->ibase) << 2);
+}
+
+static void ics_native_unmask_irq(struct irq_data *d)
+{
+ unsigned int vec = (unsigned int)irqd_to_hwirq(d);
+ struct ics *ics = irq_data_get_irq_chip_data(d);
+ struct ics_native *in = to_ics_native(ics);
+ unsigned int server;
+
+ pr_devel("ics-native: unmask virq %d [hw 0x%x]\n", d->irq, vec);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return;
+
+ server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
+ out_be32(ics_native_xive(in, vec), (server << 8) | DEFAULT_PRIORITY);
+}
+
+static unsigned int ics_native_startup(struct irq_data *d)
+{
+#ifdef CONFIG_PCI_MSI
+ /*
+ * The generic MSI code returns with the interrupt disabled on the
+ * card, using the MSI mask bits. Firmware doesn't appear to unmask
+ * at that level, so we do it here by hand.
+ */
+ if (irq_data_get_msi_desc(d))
+ pci_msi_unmask_irq(d);
+#endif
+
+ /* unmask it */
+ ics_native_unmask_irq(d);
+ return 0;
+}
+
+static void ics_native_do_mask(struct ics_native *in, unsigned int vec)
+{
+ out_be32(ics_native_xive(in, vec), 0xff);
+}
+
+static void ics_native_mask_irq(struct irq_data *d)
+{
+ unsigned int vec = (unsigned int)irqd_to_hwirq(d);
+ struct ics *ics = irq_data_get_irq_chip_data(d);
+ struct ics_native *in = to_ics_native(ics);
+
+ pr_devel("ics-native: mask virq %d [hw 0x%x]\n", d->irq, vec);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return;
+ ics_native_do_mask(in, vec);
+}
+
+static int ics_native_set_affinity(struct irq_data *d,
+ const struct cpumask *cpumask,
+ bool force)
+{
+ unsigned int vec = (unsigned int)irqd_to_hwirq(d);
+ struct ics *ics = irq_data_get_irq_chip_data(d);
+ struct ics_native *in = to_ics_native(ics);
+ int server;
+ u32 xive;
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return -EINVAL;
+
+ server = xics_get_irq_server(d->irq, cpumask, 1);
+ if (server == -1) {
+ pr_warn("%s: No online cpus in the mask %*pb for irq %d\n",
+ __func__, cpumask_pr_args(cpumask), d->irq);
+ return -1;
+ }
+
+ xive = in_be32(ics_native_xive(in, vec));
+ xive = (xive & 0xff) | (server << 8);
+ out_be32(ics_native_xive(in, vec), xive);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip ics_native_irq_chip = {
+ .name = "ICS",
+ .irq_startup = ics_native_startup,
+ .irq_mask = ics_native_mask_irq,
+ .irq_unmask = ics_native_unmask_irq,
+ .irq_eoi = NULL, /* Patched at init time */
+ .irq_set_affinity = ics_native_set_affinity,
+ .irq_set_type = xics_set_irq_type,
+ .irq_retrigger = xics_retrigger,
+};
+
+static int ics_native_map(struct ics *ics, unsigned int virq)
+{
+ unsigned int vec = (unsigned int)virq_to_hw(virq);
+ struct ics_native *in = to_ics_native(ics);
+
+ pr_devel("%s: vec=0x%x\n", __func__, vec);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return -EINVAL;
+
+ irq_set_chip_and_handler(virq, &ics_native_irq_chip, handle_fasteoi_irq);
+ irq_set_chip_data(virq, ics);
+
+ return 0;
+}
+
+static void ics_native_mask_unknown(struct ics *ics, unsigned long vec)
+{
+ struct ics_native *in = to_ics_native(ics);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return;
+
+ ics_native_do_mask(in, vec);
+}
+
+static long ics_native_get_server(struct ics *ics, unsigned long vec)
+{
+ struct ics_native *in = to_ics_native(ics);
+ u32 xive;
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return -EINVAL;
+
+ xive = in_be32(ics_native_xive(in, vec));
+ return (xive >> 8) & 0xfff;
+}
+
+static int ics_native_host_match(struct ics *ics, struct device_node *node)
+{
+ struct ics_native *in = to_ics_native(ics);
+
+ return in->node == node;
+}
+
+static struct ics ics_native_template = {
+ .map = ics_native_map,
+ .mask_unknown = ics_native_mask_unknown,
+ .get_server = ics_native_get_server,
+ .host_match = ics_native_host_match,
+};
+
+static int __init ics_native_add_one(struct device_node *np)
+{
+ struct ics_native *ics;
+ u32 ranges[2];
+ int rc, count;
+
+ ics = kzalloc(sizeof(struct ics_native), GFP_KERNEL);
+ if (!ics)
+ return -ENOMEM;
+ ics->node = of_node_get(np);
+ memcpy(&ics->ics, &ics_native_template, sizeof(struct ics));
+
+ ics->base = of_iomap(np, 0);
+ if (!ics->base) {
+ pr_err("Failed to map %pOFP\n", np);
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ count = of_property_count_u32_elems(np, "interrupt-ranges");
+ if (count < 2 || count & 1) {
+ pr_err("Failed to read interrupt-ranges of %pOFP\n", np);
+ rc = -EINVAL;
+ goto fail;
+ }
+ if (count > 2) {
+ pr_warn("ICS %pOFP has %d ranges, only one supported\n",
+ np, count >> 1);
+ }
+ rc = of_property_read_u32_array(np, "interrupt-ranges",
+ ranges, 2);
+ if (rc) {
+ pr_err("Failed to read interrupt-ranges of %pOFP\n", np);
+ goto fail;
+ }
+ ics->ibase = ranges[0];
+ ics->icount = ranges[1];
+
+ pr_info("ICS native initialized for sources %d..%d\n",
+ ics->ibase, ics->ibase + ics->icount - 1);
+
+ /* Register ourselves */
+ xics_register_ics(&ics->ics);
+
+ return 0;
+fail:
+ of_node_put(ics->node);
+ kfree(ics);
+ return rc;
+}
+
+int __init ics_native_init(void)
+{
+ struct device_node *ics;
+ bool found_one = false;
+
+ /* We need to patch our irq chip's EOI to point to the
+ * right ICP
+ */
+ ics_native_irq_chip.irq_eoi = icp_ops->eoi;
+
+ /* Find native ICS in the device-tree */
+ for_each_compatible_node(ics, NULL, "openpower,xics-sources") {
+ if (ics_native_add_one(ics) == 0)
+ found_one = true;
+ }
+
+ if (found_one)
+ pr_info("ICS native backend registered\n");
+
+ return found_one ? 0 : -ENODEV;
+}
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index fdf8db4444b6..b14c502e56a8 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -477,6 +477,8 @@ void __init xics_init(void)
if (rc < 0)
rc = ics_opal_init();
if (rc < 0)
+ rc = ics_native_init();
+ if (rc < 0)
pr_warn("XICS: Cannot find a Source Controller !\n");
/* Initialize common bits */
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index a8304327072d..943fd30095af 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -67,6 +67,7 @@ static struct irq_domain *xive_irq_domain;
static struct xive_ipi_desc {
unsigned int irq;
char name[16];
+ atomic_t started;
} *xive_ipis;
/*
@@ -1120,7 +1121,7 @@ static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
.alloc = xive_ipi_irq_domain_alloc,
};
-static int __init xive_request_ipi(void)
+static int __init xive_init_ipis(void)
{
struct fwnode_handle *fwnode;
struct irq_domain *ipi_domain;
@@ -1144,27 +1145,17 @@ static int __init xive_request_ipi(void)
struct xive_ipi_desc *xid = &xive_ipis[node];
struct xive_ipi_alloc_info info = { node };
- /* Skip nodes without CPUs */
- if (cpumask_empty(cpumask_of_node(node)))
- continue;
-
/*
* Map one IPI interrupt per node for all cpus of that node.
* Since the HW interrupt number doesn't have any meaning,
* simply use the node number.
*/
- xid->irq = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
- if (xid->irq < 0) {
- ret = xid->irq;
+ ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
+ if (ret < 0)
goto out_free_xive_ipis;
- }
+ xid->irq = ret;
snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
-
- ret = request_irq(xid->irq, xive_muxed_ipi_action,
- IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL);
-
- WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
}
return ret;
@@ -1179,6 +1170,22 @@ out:
return ret;
}
+static int __init xive_request_ipi(unsigned int cpu)
+{
+ struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
+ int ret;
+
+ if (atomic_inc_return(&xid->started) > 1)
+ return 0;
+
+ ret = request_irq(xid->irq, xive_muxed_ipi_action,
+ IRQF_PERCPU | IRQF_NO_THREAD,
+ xid->name, NULL);
+
+ WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
+ return ret;
+}
+
static int xive_setup_cpu_ipi(unsigned int cpu)
{
unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
@@ -1193,6 +1200,9 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
if (xc->hw_ipi != XIVE_BAD_IRQ)
return 0;
+ /* Register the IPI */
+ xive_request_ipi(cpu);
+
/* Grab an IPI from the backend, this will populate xc->hw_ipi */
if (xive_ops->get_ipi(cpu, xc))
return -EIO;
@@ -1232,6 +1242,8 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
+ /* TODO: clear IPI mapping */
+
/* Mask the IPI */
xive_do_source_set_mask(&xc->ipi_data, true);
@@ -1254,7 +1266,7 @@ void __init xive_smp_probe(void)
smp_ops->cause_ipi = xive_cause_ipi;
/* Register the IPI */
- xive_request_ipi();
+ xive_init_ipis();
/* Allocate and setup IPI for the boot CPU */
xive_setup_cpu_ipi(smp_processor_id());
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 84de2d7c2f40..da4d7f225a40 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -70,6 +70,9 @@ static cpumask_t cpus_in_xmon = CPU_MASK_NONE;
static unsigned long xmon_taken = 1;
static int xmon_owner;
static int xmon_gate;
+static int xmon_batch;
+static unsigned long xmon_batch_start_cpu;
+static cpumask_t xmon_batch_cpus = CPU_MASK_NONE;
#else
#define xmon_owner 0
#endif /* CONFIG_SMP */
@@ -100,7 +103,7 @@ static long *xmon_fault_jmp[NR_CPUS];
/* Breakpoint stuff */
struct bpt {
unsigned long address;
- struct ppc_inst *instr;
+ u32 *instr;
atomic_t ref_count;
int enabled;
unsigned long pad;
@@ -133,6 +136,12 @@ static void prdump(unsigned long, long);
static int ppc_inst_dump(unsigned long, long, int);
static void dump_log_buf(void);
+#ifdef CONFIG_SMP
+static int xmon_switch_cpu(unsigned long);
+static int xmon_batch_next_cpu(void);
+static int batch_cmds(struct pt_regs *);
+#endif
+
#ifdef CONFIG_PPC_POWERNV
static void dump_opal_msglog(void);
#else
@@ -216,7 +225,8 @@ Commands:\n\
#ifdef CONFIG_SMP
"\
c print cpus stopped in xmon\n\
- c# try to switch to cpu number h (in hex)\n"
+ c# try to switch to cpu number h (in hex)\n\
+ c# $ run command '$' (one of 'r','S' or 't') on all cpus in xmon\n"
#endif
"\
C checksum\n\
@@ -489,10 +499,10 @@ static void xmon_touch_watchdogs(void)
touch_nmi_watchdog();
}
-static int xmon_core(struct pt_regs *regs, int fromipi)
+static int xmon_core(struct pt_regs *regs, volatile int fromipi)
{
- int cmd = 0;
- struct bpt *bp;
+ volatile int cmd = 0;
+ struct bpt *volatile bp;
long recurse_jmp[JMP_BUF_LEN];
bool locked_down;
unsigned long offset;
@@ -514,7 +524,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) {
- regs->nip = bp->address + offset;
+ regs_set_return_ip(regs, bp->address + offset);
atomic_dec(&bp->ref_count);
}
@@ -644,7 +654,12 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
spin_cpu_relax();
touch_nmi_watchdog();
} else {
- if (!locked_down)
+ cmd = 1;
+#ifdef CONFIG_SMP
+ if (xmon_batch)
+ cmd = batch_cmds(regs);
+#endif
+ if (!locked_down && cmd)
cmd = cmds(regs);
if (locked_down || cmd != 0) {
/* exiting xmon */
@@ -702,7 +717,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
if (regs->msr & MSR_DE) {
bp = at_breakpoint(regs->nip);
if (bp != NULL) {
- regs->nip = (unsigned long) &bp->instr[0];
+ regs_set_return_ip(regs, (unsigned long) &bp->instr[0]);
atomic_inc(&bp->ref_count);
}
}
@@ -712,7 +727,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
if (bp != NULL) {
int stepped = emulate_step(regs, ppc_inst_read(bp->instr));
if (stepped == 0) {
- regs->nip = (unsigned long) &bp->instr[0];
+ regs_set_return_ip(regs, (unsigned long) &bp->instr[0]);
atomic_inc(&bp->ref_count);
} else if (stepped < 0) {
printf("Couldn't single-step %s instruction\n",
@@ -766,7 +781,7 @@ static int xmon_bpt(struct pt_regs *regs)
/* Are we at the trap at bp->instr[1] for some bp? */
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL && (offset == 4 || offset == 8)) {
- regs->nip = bp->address + offset;
+ regs_set_return_ip(regs, bp->address + offset);
atomic_dec(&bp->ref_count);
return 1;
}
@@ -836,7 +851,7 @@ static int xmon_fault_handler(struct pt_regs *regs)
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) {
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) {
- regs->nip = bp->address + offset;
+ regs_set_return_ip(regs, bp->address + offset);
atomic_dec(&bp->ref_count);
}
}
@@ -857,7 +872,7 @@ static inline void force_enable_xmon(void)
static struct bpt *at_breakpoint(unsigned long pc)
{
int i;
- struct bpt *bp;
+ struct bpt *volatile bp;
bp = bpts;
for (i = 0; i < NBPTS; ++i, ++bp)
@@ -946,11 +961,11 @@ static void insert_bpts(void)
}
patch_instruction(bp->instr, instr);
- patch_instruction(ppc_inst_next(bp->instr, &instr),
+ patch_instruction(ppc_inst_next(bp->instr, bp->instr),
ppc_inst(bpinstr));
if (bp->enabled & BP_CIABR)
continue;
- if (patch_instruction((struct ppc_inst *)bp->address,
+ if (patch_instruction((u32 *)bp->address,
ppc_inst(bpinstr)) != 0) {
printf("Couldn't write instruction at %lx, "
"disabling breakpoint there\n", bp->address);
@@ -992,7 +1007,7 @@ static void remove_bpts(void)
if (mread_instr(bp->address, &instr)
&& ppc_inst_equal(instr, ppc_inst(bpinstr))
&& patch_instruction(
- (struct ppc_inst *)bp->address, ppc_inst_read(bp->instr)) != 0)
+ (u32 *)bp->address, ppc_inst_read(bp->instr)) != 0)
printf("Couldn't remove breakpoint at %lx\n",
bp->address);
}
@@ -1188,7 +1203,7 @@ cmds(struct pt_regs *excp)
#ifdef CONFIG_BOOKE
static int do_step(struct pt_regs *regs)
{
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
return 1;
}
@@ -1221,7 +1236,7 @@ static int do_step(struct pt_regs *regs)
}
}
}
- regs->msr |= MSR_SE;
+ regs_set_return_msr(regs, regs->msr | MSR_SE);
return 1;
}
#endif
@@ -1243,11 +1258,112 @@ static void bootcmds(void)
}
}
+#ifdef CONFIG_SMP
+static int xmon_switch_cpu(unsigned long cpu)
+{
+ int timeout;
+
+ xmon_taken = 0;
+ mb();
+ xmon_owner = cpu;
+ timeout = 10000000;
+ while (!xmon_taken) {
+ if (--timeout == 0) {
+ if (test_and_set_bit(0, &xmon_taken))
+ break;
+ /* take control back */
+ mb();
+ xmon_owner = smp_processor_id();
+ printf("cpu 0x%lx didn't take control\n", cpu);
+ return 0;
+ }
+ barrier();
+ }
+ return 1;
+}
+
+static int xmon_batch_next_cpu(void)
+{
+ unsigned long cpu;
+
+ while (!cpumask_empty(&xmon_batch_cpus)) {
+ cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus,
+ xmon_batch_start_cpu, true);
+ if (cpu == nr_cpumask_bits)
+ break;
+ if (xmon_batch_start_cpu == -1)
+ xmon_batch_start_cpu = cpu;
+ if (xmon_switch_cpu(cpu))
+ return 0;
+ cpumask_clear_cpu(cpu, &xmon_batch_cpus);
+ }
+
+ xmon_batch = 0;
+ printf("%x:mon> \n", smp_processor_id());
+ return 1;
+}
+
+static int batch_cmds(struct pt_regs *excp)
+{
+ int cmd;
+
+ /* simulate command entry */
+ cmd = xmon_batch;
+ termch = '\n';
+
+ last_cmd = NULL;
+ xmon_regs = excp;
+
+ printf("%x:", smp_processor_id());
+ printf("mon> ");
+ printf("%c\n", (char)cmd);
+
+ switch (cmd) {
+ case 'r':
+ prregs(excp); /* print regs */
+ break;
+ case 'S':
+ super_regs();
+ break;
+ case 't':
+ backtrace(excp);
+ break;
+ }
+
+ cpumask_clear_cpu(smp_processor_id(), &xmon_batch_cpus);
+
+ return xmon_batch_next_cpu();
+}
+
static int cpu_cmd(void)
{
-#ifdef CONFIG_SMP
unsigned long cpu, first_cpu, last_cpu;
- int timeout;
+
+ cpu = skipbl();
+ if (cpu == '#') {
+ xmon_batch = skipbl();
+ if (xmon_batch) {
+ switch (xmon_batch) {
+ case 'r':
+ case 'S':
+ case 't':
+ cpumask_copy(&xmon_batch_cpus, &cpus_in_xmon);
+ if (cpumask_weight(&xmon_batch_cpus) <= 1) {
+ printf("There are no other cpus in xmon\n");
+ break;
+ }
+ xmon_batch_start_cpu = -1;
+ if (!xmon_batch_next_cpu())
+ return 1;
+ break;
+ default:
+ printf("c# only supports 'r', 'S' and 't' commands\n");
+ }
+ xmon_batch = 0;
+ return 0;
+ }
+ }
+ termch = cpu;
if (!scanhex(&cpu)) {
/* print cpus waiting or in xmon */
@@ -1279,27 +1395,15 @@ static int cpu_cmd(void)
#endif
return 0;
}
- xmon_taken = 0;
- mb();
- xmon_owner = cpu;
- timeout = 10000000;
- while (!xmon_taken) {
- if (--timeout == 0) {
- if (test_and_set_bit(0, &xmon_taken))
- break;
- /* take control back */
- mb();
- xmon_owner = smp_processor_id();
- printf("cpu 0x%lx didn't take control\n", cpu);
- return 0;
- }
- barrier();
- }
- return 1;
+
+ return xmon_switch_cpu(cpu);
+}
#else
+static int cpu_cmd(void)
+{
return 0;
-#endif /* CONFIG_SMP */
}
+#endif /* CONFIG_SMP */
static unsigned short fcstab[256] = {
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
@@ -2214,7 +2318,7 @@ mread_instr(unsigned long adrs, struct ppc_inst *instr)
if (setjmp(bus_error_jmp) == 0) {
catch_memory_errors = 1;
sync();
- *instr = ppc_inst_read((struct ppc_inst *)adrs);
+ *instr = ppc_inst_read((u32 *)adrs);
sync();
/* wait a little while to see if we get a machine check */
__delay(200);
@@ -2975,7 +3079,7 @@ static void
dump_log_buf(void)
{
struct kmsg_dump_iter iter;
- unsigned char buf[128];
+ static unsigned char buf[1024];
size_t len;
if (setjmp(bus_error_jmp) != 0) {
@@ -3005,7 +3109,7 @@ static void dump_opal_msglog(void)
{
unsigned char buf[128];
ssize_t res;
- loff_t pos = 0;
+ volatile loff_t pos = 0;
if (!firmware_has_feature(FW_FEATURE_OPAL)) {
printf("Machine is not running OPAL firmware.\n");
@@ -3160,7 +3264,7 @@ memzcan(void)
printf("%.8lx\n", a - mskip);
}
-static void show_task(struct task_struct *tsk)
+static void show_task(struct task_struct *volatile tsk)
{
unsigned int p_state = READ_ONCE(tsk->__state);
char state;
@@ -3205,7 +3309,7 @@ static void format_pte(void *ptep, unsigned long pte)
static void show_pte(unsigned long addr)
{
unsigned long tskv = 0;
- struct task_struct *tsk = NULL;
+ struct task_struct *volatile tsk = NULL;
struct mm_struct *mm;
pgd_t *pgdp;
p4d_t *p4dp;
@@ -3300,7 +3404,7 @@ static void show_pte(unsigned long addr)
static void show_tasks(void)
{
unsigned long tskv;
- struct task_struct *tsk = NULL;
+ struct task_struct *volatile tsk = NULL;
printf(" task_struct ->thread.ksp ->thread.regs PID PPID S P CMD\n");
@@ -3623,7 +3727,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
const char *after)
{
char *modname;
- const char *name = NULL;
+ const char *volatile name = NULL;
unsigned long offset, size;
printf(REG, address);
@@ -4055,7 +4159,7 @@ void xmon_register_spus(struct list_head *list)
static void stop_spus(void)
{
struct spu *spu;
- int i;
+ volatile int i;
u64 tmp;
for (i = 0; i < XMON_NUM_SPUS; i++) {
@@ -4096,7 +4200,7 @@ static void stop_spus(void)
static void restart_spus(void)
{
struct spu *spu;
- int i;
+ volatile int i;
for (i = 0; i < XMON_NUM_SPUS; i++) {
if (!spu_info[i].spu)
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 15f9490a7aad..4f7b70ae7c31 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -26,8 +26,8 @@ config RISCV
select ARCH_HAS_KCOV
select ARCH_HAS_MMIOWB
select ARCH_HAS_PTE_SPECIAL
- select ARCH_HAS_SET_DIRECT_MAP
- select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_SET_DIRECT_MAP if MMU
+ select ARCH_HAS_SET_MEMORY if MMU
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
@@ -65,11 +65,14 @@ config RISCV
select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && 64BIT
select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
+ select HAVE_ARCH_KFENCE if MMU && 64BIT
select HAVE_ARCH_KGDB if !XIP_KERNEL
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
+ select HAVE_ARCH_VMAP_STACK if MMU && 64BIT
select HAVE_ASM_MODVERSIONS
select HAVE_CONTEXT_TRACKING
select HAVE_DEBUG_KMEMLEAK
@@ -83,11 +86,14 @@ config RISCV
select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
select HAVE_KRETPROBES if !XIP_KERNEL
+ select HAVE_MOVE_PMD
+ select HAVE_MOVE_PUD
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
@@ -104,6 +110,7 @@ config RISCV
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select UACCESS_MEMCPY if !MMU
+ select ZONE_DMA32 if 64BIT
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
@@ -133,10 +140,6 @@ config MMU
Select if you want MMU-based virtualised addressing space
support by paged memory management. If unsure, say 'Y'.
-config ZONE_DMA32
- bool
- default y if 64BIT
-
config VA_BITS
int
default 32 if 32BIT
@@ -489,6 +492,7 @@ config CC_HAVE_STACKPROTECTOR_TLS
config STACKPROTECTOR_PER_TASK
def_bool y
+ depends on !GCC_PLUGIN_RANDSTRUCT
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
config PHYS_RAM_BASE_FIXED
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 99ecd8bcfd77..bc74afdbf31e 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -99,7 +99,6 @@ endif
head-y := arch/riscv/kernel/head.o
-core-y += arch/riscv/
core-$(CONFIG_RISCV_ERRATA_ALTERNATIVE) += arch/riscv/errata/
libs-y += arch/riscv/lib/
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
index b1c3c596578f..2e4ea84f27e7 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
@@ -24,7 +24,7 @@
memory@80000000 {
device_type = "memory";
- reg = <0x0 0x80000000 0x2 0x00000000>;
+ reg = <0x0 0x80000000 0x4 0x00000000>;
};
soc {
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
index 2a652b0c987d..ef386fcf3939 100644
--- a/arch/riscv/include/asm/asm-prototypes.h
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -25,4 +25,7 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
DECLARE_DO_ERROR_INFO(do_trap_break);
+asmlinkage unsigned long get_overflow_stack(void);
+asmlinkage void handle_bad_stack(struct pt_regs *regs);
+
#endif /* _ASM_RISCV_PROTOTYPES_H */
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
index 6d98cd999680..7b3483ba2e84 100644
--- a/arch/riscv/include/asm/efi.h
+++ b/arch/riscv/include/asm/efi.h
@@ -27,10 +27,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
-/* Load initrd at enough distance from DRAM start */
+/* Load initrd anywhere in system RAM */
static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
{
- return image_addr + SZ_256M;
+ return ULONG_MAX;
}
#define alloc_screen_info(x...) (&screen_info)
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index c025a746a148..69605a474270 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -52,19 +52,6 @@
#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
-#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
-#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
-#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
-
-#define outb(v,c) ({ __io_pbw(); writeb_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
-#define outw(v,c) ({ __io_pbw(); writew_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
-#define outl(v,c) ({ __io_pbw(); writel_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
-
-#ifdef CONFIG_64BIT
-#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(__v); __v; })
-#define outq(v,c) ({ __io_pbw(); writeq_cpu((v),(void*)(c)); __io_paw(); })
-#endif
-
/*
* Accesses from a single hart to a single I/O address must be ordered. This
* allows us to use the raw read macros, but we still need to fence before and
diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h
new file mode 100644
index 000000000000..d887a54042aa
--- /dev/null
+++ b/arch/riscv/include/asm/kfence.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_KFENCE_H
+#define _ASM_RISCV_KFENCE_H
+
+#include <linux/kfence.h>
+#include <linux/pfn.h>
+#include <asm-generic/pgalloc.h>
+#include <asm/pgtable.h>
+
+static inline int split_pmd_page(unsigned long addr)
+{
+ int i;
+ unsigned long pfn = PFN_DOWN(__pa((addr & PMD_MASK)));
+ pmd_t *pmd = pmd_off_k(addr);
+ pte_t *pte = pte_alloc_one_kernel(&init_mm);
+
+ if (!pte)
+ return -ENOMEM;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL));
+ set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE));
+
+ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+ return 0;
+}
+
+static inline bool arch_kfence_init_pool(void)
+{
+ int ret;
+ unsigned long addr;
+ pmd_t *pmd;
+
+ for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
+ addr += PAGE_SIZE) {
+ pmd = pmd_off_k(addr);
+
+ if (pmd_leaf(*pmd)) {
+ ret = split_pmd_page(addr);
+ if (ret)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ pte_t *pte = virt_to_kpte(addr);
+
+ if (protect)
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+ else
+ set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ return true;
+}
+
+#endif /* _ASM_RISCV_KFENCE_H */
diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
index 4647d38018f6..9ea9b5ec3113 100644
--- a/arch/riscv/include/asm/kprobes.h
+++ b/arch/riscv/include/asm/kprobes.h
@@ -29,18 +29,11 @@ struct prev_kprobe {
unsigned int status;
};
-/* Single step context for kprobe */
-struct kprobe_step_ctx {
- unsigned long ss_pending;
- unsigned long match_addr;
-};
-
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned int kprobe_status;
unsigned long saved_status;
struct prev_kprobe prev_kprobe;
- struct kprobe_step_ctx ss_ctx;
};
void arch_remove_kprobe(struct kprobe *p);
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index b0659413a080..7030837adc1a 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -33,6 +33,8 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}
+DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
+
#include <asm-generic/mmu_context.h>
#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 6a7761c86ec2..b0ca5058e7ae 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -37,16 +37,6 @@
#ifndef __ASSEMBLY__
-#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
-#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
-
-/* align addr on a size boundary - adjust address up/down if needed */
-#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1)))
-#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1)))
-
-/* align addr on a size boundary - adjust address up if needed */
-#define _ALIGN(addr, size) _ALIGN_UP(addr, size)
-
#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
@@ -89,59 +79,69 @@ typedef struct page *pgtable_t;
#endif
#ifdef CONFIG_MMU
-extern unsigned long va_pa_offset;
-#ifdef CONFIG_64BIT
-extern unsigned long va_kernel_pa_offset;
-#endif
-#ifdef CONFIG_XIP_KERNEL
-extern unsigned long va_kernel_xip_pa_offset;
-#endif
extern unsigned long pfn_base;
#define ARCH_PFN_OFFSET (pfn_base)
#else
-#define va_pa_offset 0
-#ifdef CONFIG_64BIT
-#define va_kernel_pa_offset 0
-#endif
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */
-extern unsigned long kernel_virt_addr;
-
+struct kernel_mapping {
+ unsigned long virt_addr;
+ uintptr_t phys_addr;
+ uintptr_t size;
+ /* Offset between linear mapping virtual address and kernel load address */
+ unsigned long va_pa_offset;
#ifdef CONFIG_64BIT
-#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
+ /* Offset between kernel mapping virtual address and kernel load address */
+ unsigned long va_kernel_pa_offset;
+#endif
+ unsigned long va_kernel_xip_pa_offset;
#ifdef CONFIG_XIP_KERNEL
+ uintptr_t xiprom;
+ uintptr_t xiprom_sz;
+#endif
+};
+
+extern struct kernel_mapping kernel_map;
+extern phys_addr_t phys_ram_base;
+
+#ifdef CONFIG_64BIT
+#define is_kernel_mapping(x) \
+ ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
+#define is_linear_mapping(x) \
+ ((x) >= PAGE_OFFSET && (x) < kernel_map.virt_addr)
+
+#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
#define kernel_mapping_pa_to_va(y) ({ \
unsigned long _y = y; \
- (_y >= CONFIG_PHYS_RAM_BASE) ? \
- (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
- (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
+ (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) : \
+ (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
})
-#else
-#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
-#endif
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
-#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
-#ifdef CONFIG_XIP_KERNEL
+#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
#define kernel_mapping_va_to_pa(y) ({ \
unsigned long _y = y; \
- (_y < kernel_virt_addr + XIP_OFFSET) ? \
- ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
- ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
+ (_y < kernel_map.virt_addr + XIP_OFFSET) ? \
+ ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \
+ ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
})
-#else
-#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
-#endif
+
#define __va_to_pa_nodebug(x) ({ \
unsigned long _x = x; \
- (_x < kernel_virt_addr) ? \
+ is_linear_mapping(_x) ? \
linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
})
#else
-#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
-#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
-#endif
+#define is_kernel_mapping(x) \
+ ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
+#define is_linear_mapping(x) \
+ ((x) >= PAGE_OFFSET)
+
+#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + kernel_map.va_pa_offset))
+#define __va_to_pa_nodebug(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
+#endif /* CONFIG_64BIT */
#ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(unsigned long x);
diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h
index 658e112c3ce7..7fd52a30e605 100644
--- a/arch/riscv/include/asm/pci.h
+++ b/arch/riscv/include/asm/pci.h
@@ -18,6 +18,8 @@
/* RISC-V shim does not initialize PCI bus */
#define pcibios_assign_all_busses() 1
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
+
extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index 23b1544e0ca5..0af6933a7100 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -38,8 +38,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
#endif /* __PAGETABLE_PMD_FOLDED */
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index f3b0da64c6c8..228261aa9628 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -46,8 +46,7 @@ static inline int pud_bad(pud_t pud)
#define pud_leaf pud_leaf
static inline int pud_leaf(pud_t pud)
{
- return pud_present(pud) &&
- (pud_val(pud) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+ return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
}
static inline void set_pud(pud_t *pudp, pud_t pud)
@@ -60,9 +59,9 @@ static inline void pud_clear(pud_t *pudp)
set_pud(pudp, __pud(0));
}
-static inline unsigned long pud_page_vaddr(pud_t pud)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
- return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
+ return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
}
static inline struct page *pud_page(pud_t pud)
@@ -80,6 +79,8 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
return pmd_val(pmd) >> _PAGE_PFN_SHIFT;
}
+#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
+
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index bbaeb5d35842..2ee413912926 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -39,5 +39,10 @@
#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
_PAGE_WRITE | _PAGE_EXEC | \
_PAGE_USER | _PAGE_GLOBAL))
+/*
+ * when all of R/W/X are zero, the PTE is a pointer to the next level
+ * of the page table; otherwise, it is a leaf PTE.
+ */
+#define _PAGE_LEAF (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#endif /* _ASM_RISCV_PGTABLE_BITS_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 380cd3a7e548..39b550310ec6 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -76,6 +76,8 @@
#ifdef CONFIG_XIP_KERNEL
#define XIP_OFFSET SZ_8M
+#else
+#define XIP_OFFSET 0
#endif
#ifndef __ASSEMBLY__
@@ -133,7 +135,8 @@
| _PAGE_WRITE \
| _PAGE_PRESENT \
| _PAGE_ACCESSED \
- | _PAGE_DIRTY)
+ | _PAGE_DIRTY \
+ | _PAGE_GLOBAL)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
@@ -171,10 +174,23 @@ extern pgd_t swapper_pg_dir[];
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_present(pmd_t pmd)
+{
+ /*
+ * Checking for _PAGE_LEAF is needed too because:
+ * When splitting a THP, split_huge_page() will temporarily clear
+ * the present bit, in this situation, pmd_present() and
+ * pmd_trans_huge() still needs to return true.
+ */
+ return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
+}
+#else
static inline int pmd_present(pmd_t pmd)
{
return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
+#endif
static inline int pmd_none(pmd_t pmd)
{
@@ -183,14 +199,13 @@ static inline int pmd_none(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
- return !pmd_present(pmd);
+ return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
}
#define pmd_leaf pmd_leaf
static inline int pmd_leaf(pmd_t pmd)
{
- return pmd_present(pmd) &&
- (pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+ return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
}
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
@@ -228,6 +243,11 @@ static inline pte_t pmd_pte(pmd_t pmd)
return __pte(pmd_val(pmd));
}
+static inline pte_t pud_pte(pud_t pud)
+{
+ return __pte(pud_val(pud));
+}
+
/* Yields the page frame number (PFN) of a page table entry */
static inline unsigned long pte_pfn(pte_t pte)
{
@@ -266,8 +286,7 @@ static inline int pte_exec(pte_t pte)
static inline int pte_huge(pte_t pte)
{
- return pte_present(pte)
- && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+ return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
}
static inline int pte_dirty(pte_t pte)
@@ -370,6 +389,14 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
local_flush_tlb_page(address);
}
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ pte_t *ptep = (pte_t *)pmdp;
+
+ update_mmu_cache(vma, address, ptep);
+}
+
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
@@ -464,6 +491,137 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
}
/*
+ * THP functions
+ */
+static inline pmd_t pte_pmd(pte_t pte)
+{
+ return __pmd(pte_val(pte));
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
+}
+
+#define __pmd_to_phys(pmd) (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
+}
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
+}
+
+#define pmd_write pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ return pte_write(pmd_pte(pmd));
+}
+
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return pte_dirty(pmd_pte(pmd));
+}
+
+static inline int pmd_young(pmd_t pmd)
+{
+ return pte_young(pmd_pte(pmd));
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+ return pte_pmd(pte_mkold(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+ return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+ return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+ return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkclean(pmd_t pmd)
+{
+ return pte_pmd(pte_mkclean(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+ return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ return set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
+}
+
+static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud)
+{
+ return set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return pmd_leaf(pmd);
+}
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
+}
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
+}
+
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
+}
+
+#define pmdp_establish pmdp_establish
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/*
* Encode and decode a swap entry
*
* Format of swap PTE:
@@ -532,12 +690,9 @@ extern uintptr_t _dtb_early_pa;
#define dtb_early_pa _dtb_early_pa
#endif /* CONFIG_XIP_KERNEL */
-void setup_bootmem(void);
void paging_init(void);
void misc_mem_init(void);
-#define FIRST_USER_ADDRESS 0
-
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index 09ad4e923510..6ecd461129d2 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -141,6 +141,37 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset);
}
+
+/**
+ * regs_get_kernel_argument() - get Nth function argument in kernel
+ * @regs: pt_regs of that context
+ * @n: function argument number (start from 0)
+ *
+ * regs_get_argument() returns @n th argument of the function call.
+ *
+ * Note you can get the parameter correctly if the function has no
+ * more than eight arguments.
+ */
+static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
+ unsigned int n)
+{
+ static const int nr_reg_arguments = 8;
+ static const unsigned int argument_offs[] = {
+ offsetof(struct pt_regs, a0),
+ offsetof(struct pt_regs, a1),
+ offsetof(struct pt_regs, a2),
+ offsetof(struct pt_regs, a3),
+ offsetof(struct pt_regs, a4),
+ offsetof(struct pt_regs, a5),
+ offsetof(struct pt_regs, a6),
+ offsetof(struct pt_regs, a7),
+ };
+
+ if (n < nr_reg_arguments)
+ return regs_get_register(regs, argument_offs[n]);
+ return 0;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h
index 8a303fb1ee3b..32336e8a17cb 100644
--- a/arch/riscv/include/asm/sections.h
+++ b/arch/riscv/include/asm/sections.h
@@ -6,6 +6,7 @@
#define __ASM_SECTIONS_H
#include <asm-generic/sections.h>
+#include <linux/mm.h>
extern char _start[];
extern char _start_kernel[];
@@ -13,4 +14,20 @@ extern char __init_data_begin[], __init_data_end[];
extern char __init_text_begin[], __init_text_end[];
extern char __alt_start[], __alt_end[];
+static inline bool is_va_kernel_text(uintptr_t va)
+{
+ uintptr_t start = (uintptr_t)_start;
+ uintptr_t end = (uintptr_t)__init_data_begin;
+
+ return va >= start && va < end;
+}
+
+static inline bool is_va_kernel_lm_alias_text(uintptr_t va)
+{
+ uintptr_t start = (uintptr_t)lm_alias(_start);
+ uintptr_t end = (uintptr_t)lm_alias(__init_data_begin);
+
+ return va >= start && va < end;
+}
+
#endif /* __ASM_SECTIONS_H */
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index 086f757e8ba3..a2c14d4b3993 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -16,20 +16,28 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_rw_nx(unsigned long addr, int numpages);
-void protect_kernel_text_data(void);
+static __always_inline int set_kernel_memory(char *startp, char *endp,
+ int (*set_memory)(unsigned long start,
+ int num_pages))
+{
+ unsigned long start = (unsigned long)startp;
+ unsigned long end = (unsigned long)endp;
+ int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT;
+
+ return set_memory(start, num_pages);
+}
#else
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
-static inline void protect_kernel_text_data(void) {}
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
-#endif
-
-#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
-void protect_kernel_linear_mapping_text_rodata(void);
-#else
-static inline void protect_kernel_linear_mapping_text_rodata(void) {}
+static inline int set_kernel_memory(char *startp, char *endp,
+ int (*set_memory)(unsigned long start,
+ int num_pages))
+{
+ return 0;
+}
#endif
int set_direct_map_invalid_noflush(struct page *page);
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index 407bcc96a710..0a3f4f95c555 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -6,6 +6,7 @@
#ifndef _ASM_RISCV_SWITCH_TO_H
#define _ASM_RISCV_SWITCH_TO_H
+#include <linux/jump_label.h>
#include <linux/sched/task_stack.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
@@ -55,9 +56,13 @@ static inline void __switch_to_aux(struct task_struct *prev,
fstate_restore(next, task_pt_regs(next));
}
-extern bool has_fpu;
+extern struct static_key_false cpu_hwcap_fpu;
+static __always_inline bool has_fpu(void)
+{
+ return static_branch_likely(&cpu_hwcap_fpu);
+}
#else
-#define has_fpu false
+static __always_inline bool has_fpu(void) { return false; }
#define fstate_save(task, regs) do { } while (0)
#define fstate_restore(task, regs) do { } while (0)
#define __switch_to_aux(__prev, __next) do { } while (0)
@@ -70,7 +75,7 @@ extern struct task_struct *__switch_to(struct task_struct *,
do { \
struct task_struct *__prev = (prev); \
struct task_struct *__next = (next); \
- if (has_fpu) \
+ if (has_fpu()) \
__switch_to_aux(__prev, __next); \
((last) = __switch_to(__prev, __next)); \
} while (0)
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 0e549a3089b3..60da0dcacf14 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -19,6 +19,21 @@
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+/*
+ * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
+ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
+ * assembly.
+ */
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN (2 * THREAD_SIZE)
+#else
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
+#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
+#define OVERFLOW_STACK_SIZE SZ_4K
+#define SHADOW_OVERFLOW_STACK_SIZE (1024)
+
#ifndef __ASSEMBLY__
#include <asm/processor.h>
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index c84218ad7afc..801019381dea 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -33,6 +33,11 @@ void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+#endif
#else /* CONFIG_SMP && CONFIG_MMU */
#define flush_tlb_all() local_flush_tlb_all()
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index 977ee6181dab..6c316093a1e5 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -9,6 +9,7 @@
*/
#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_MEMFD_SECRET
#include <uapi/asm/unistd.h>
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index d3081e4d9600..3397ddac1a30 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -11,7 +11,7 @@ endif
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
ifdef CONFIG_KEXEC
-AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax
+AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
endif
extra-y += head.o
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index 9ef33346853c..90f8ce64fa6f 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -311,4 +311,6 @@ void asm_offsets(void)
* ensures the alignment is sane.
*/
DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN));
+
+ OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr);
}
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index ac202f44a670..d959d207a40d 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -19,7 +19,7 @@ unsigned long elf_hwcap __read_mostly;
static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
#ifdef CONFIG_FPU
-bool has_fpu __read_mostly;
+__ro_after_init DEFINE_STATIC_KEY_FALSE(cpu_hwcap_fpu);
#endif
/**
@@ -59,7 +59,7 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
}
EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
-void riscv_fill_hwcap(void)
+void __init riscv_fill_hwcap(void)
{
struct device_node *node;
const char *isa;
@@ -146,6 +146,6 @@ void riscv_fill_hwcap(void)
#ifdef CONFIG_FPU
if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D))
- has_fpu = true;
+ static_branch_enable(&cpu_hwcap_fpu);
#endif
}
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 80d5a9e017b0..98f502654edd 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -30,6 +30,15 @@ ENTRY(handle_exception)
_restore_kernel_tpsp:
csrr tp, CSR_SCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp)
+
+#ifdef CONFIG_VMAP_STACK
+ addi sp, sp, -(PT_SIZE_ON_STACK)
+ srli sp, sp, THREAD_SHIFT
+ andi sp, sp, 0x1
+ bnez sp, handle_kernel_stack_overflow
+ REG_L sp, TASK_TI_KERNEL_SP(tp)
+#endif
+
_save_context:
REG_S sp, TASK_TI_USER_SP(tp)
REG_L sp, TASK_TI_KERNEL_SP(tp)
@@ -376,6 +385,105 @@ handle_syscall_trace_exit:
call do_syscall_trace_exit
j ret_from_exception
+#ifdef CONFIG_VMAP_STACK
+handle_kernel_stack_overflow:
+ la sp, shadow_stack
+ addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
+
+ //save caller register to shadow stack
+ addi sp, sp, -(PT_SIZE_ON_STACK)
+ REG_S x1, PT_RA(sp)
+ REG_S x5, PT_T0(sp)
+ REG_S x6, PT_T1(sp)
+ REG_S x7, PT_T2(sp)
+ REG_S x10, PT_A0(sp)
+ REG_S x11, PT_A1(sp)
+ REG_S x12, PT_A2(sp)
+ REG_S x13, PT_A3(sp)
+ REG_S x14, PT_A4(sp)
+ REG_S x15, PT_A5(sp)
+ REG_S x16, PT_A6(sp)
+ REG_S x17, PT_A7(sp)
+ REG_S x28, PT_T3(sp)
+ REG_S x29, PT_T4(sp)
+ REG_S x30, PT_T5(sp)
+ REG_S x31, PT_T6(sp)
+
+ la ra, restore_caller_reg
+ tail get_overflow_stack
+
+restore_caller_reg:
+ //save per-cpu overflow stack
+ REG_S a0, -8(sp)
+ //restore caller register from shadow_stack
+ REG_L x1, PT_RA(sp)
+ REG_L x5, PT_T0(sp)
+ REG_L x6, PT_T1(sp)
+ REG_L x7, PT_T2(sp)
+ REG_L x10, PT_A0(sp)
+ REG_L x11, PT_A1(sp)
+ REG_L x12, PT_A2(sp)
+ REG_L x13, PT_A3(sp)
+ REG_L x14, PT_A4(sp)
+ REG_L x15, PT_A5(sp)
+ REG_L x16, PT_A6(sp)
+ REG_L x17, PT_A7(sp)
+ REG_L x28, PT_T3(sp)
+ REG_L x29, PT_T4(sp)
+ REG_L x30, PT_T5(sp)
+ REG_L x31, PT_T6(sp)
+
+ //load per-cpu overflow stack
+ REG_L sp, -8(sp)
+ addi sp, sp, -(PT_SIZE_ON_STACK)
+
+ //save context to overflow stack
+ REG_S x1, PT_RA(sp)
+ REG_S x3, PT_GP(sp)
+ REG_S x5, PT_T0(sp)
+ REG_S x6, PT_T1(sp)
+ REG_S x7, PT_T2(sp)
+ REG_S x8, PT_S0(sp)
+ REG_S x9, PT_S1(sp)
+ REG_S x10, PT_A0(sp)
+ REG_S x11, PT_A1(sp)
+ REG_S x12, PT_A2(sp)
+ REG_S x13, PT_A3(sp)
+ REG_S x14, PT_A4(sp)
+ REG_S x15, PT_A5(sp)
+ REG_S x16, PT_A6(sp)
+ REG_S x17, PT_A7(sp)
+ REG_S x18, PT_S2(sp)
+ REG_S x19, PT_S3(sp)
+ REG_S x20, PT_S4(sp)
+ REG_S x21, PT_S5(sp)
+ REG_S x22, PT_S6(sp)
+ REG_S x23, PT_S7(sp)
+ REG_S x24, PT_S8(sp)
+ REG_S x25, PT_S9(sp)
+ REG_S x26, PT_S10(sp)
+ REG_S x27, PT_S11(sp)
+ REG_S x28, PT_T3(sp)
+ REG_S x29, PT_T4(sp)
+ REG_S x30, PT_T5(sp)
+ REG_S x31, PT_T6(sp)
+
+ REG_L s0, TASK_TI_KERNEL_SP(tp)
+ csrr s1, CSR_STATUS
+ csrr s2, CSR_EPC
+ csrr s3, CSR_TVAL
+ csrr s4, CSR_CAUSE
+ csrr s5, CSR_SCRATCH
+ REG_S s0, PT_SP(sp)
+ REG_S s1, PT_STATUS(sp)
+ REG_S s2, PT_EPC(sp)
+ REG_S s3, PT_BADADDR(sp)
+ REG_S s4, PT_CAUSE(sp)
+ REG_S s5, PT_TP(sp)
+ move a0, sp
+ tail handle_bad_stack
+#endif
+
END(handle_exception)
ENTRY(ret_from_fork)
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 89cc58ab52b4..fce5184b22c3 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -81,9 +81,9 @@ pe_head_start:
#ifdef CONFIG_MMU
relocate:
/* Relocate return address */
- la a1, kernel_virt_addr
+ la a1, kernel_map
XIP_FIXUP_OFFSET a1
- REG_L a1, 0(a1)
+ REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
la a2, _start
sub a1, a1, a2
add ra, ra, a1
diff --git a/arch/riscv/kernel/kexec_relocate.S b/arch/riscv/kernel/kexec_relocate.S
index 88c3beabe9b4..a80b52a74f58 100644
--- a/arch/riscv/kernel/kexec_relocate.S
+++ b/arch/riscv/kernel/kexec_relocate.S
@@ -20,7 +20,7 @@ SYM_CODE_START(riscv_kexec_relocate)
* s4: Pointer to the destination address for the relocation
* s5: (const) Number of words per page
* s6: (const) 1, used for subtraction
- * s7: (const) va_pa_offset, used when switching MMU off
+ * s7: (const) kernel_map.va_pa_offset, used when switching MMU off
* s8: (const) Physical address of the main loop
* s9: (debug) indirection page counter
* s10: (debug) entry counter
@@ -159,7 +159,7 @@ SYM_CODE_START(riscv_kexec_norelocate)
* s0: (const) Phys address to jump to
* s1: (const) Phys address of the FDT image
* s2: (const) The hartid of the current hart
- * s3: (const) va_pa_offset, used when switching MMU off
+ * s3: (const) kernel_map.va_pa_offset, used when switching MMU off
*/
mv s0, a1
mv s1, a2
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
index 9e99e1db156b..e6eca271a4d6 100644
--- a/arch/riscv/kernel/machine_kexec.c
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -189,6 +189,6 @@ machine_kexec(struct kimage *image)
/* Jump to the relocation code */
pr_notice("Bye...\n");
kexec_method(first_ind_entry, jump_addr, fdt_addr,
- this_hart_id, va_pa_offset);
+ this_hart_id, kernel_map.va_pa_offset);
unreachable();
}
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index 247e33fa5bc7..00088dc6da4b 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -17,7 +17,7 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void __kprobes
-post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
+post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
@@ -43,7 +43,7 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
p->ainsn.api.handler((u32)p->opcode,
(unsigned long)p->addr, regs);
- post_kprobe_handler(kcb, regs);
+ post_kprobe_handler(p, kcb, regs);
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
@@ -151,21 +151,6 @@ static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
regs->status = kcb->saved_status;
}
-static void __kprobes
-set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
-{
- unsigned long offset = GET_INSN_LENGTH(p->opcode);
-
- kcb->ss_ctx.ss_pending = true;
- kcb->ss_ctx.match_addr = addr + offset;
-}
-
-static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
-{
- kcb->ss_ctx.ss_pending = false;
- kcb->ss_ctx.match_addr = 0;
-}
-
static void __kprobes setup_singlestep(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb, int reenter)
@@ -184,8 +169,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
/* prepare for single stepping */
slot = (unsigned long)p->ainsn.api.insn;
- set_ss_context(kcb, slot, p); /* mark pending ss */
-
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
@@ -221,13 +204,8 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
}
static void __kprobes
-post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
{
- struct kprobe *cur = kprobe_running();
-
- if (!cur)
- return;
-
/* return addr restore if non-branching insn */
if (cur->ainsn.api.restore != 0)
regs->epc = cur->ainsn.api.restore;
@@ -342,16 +320,16 @@ bool __kprobes
kprobe_single_step_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long addr = instruction_pointer(regs);
+ struct kprobe *cur = kprobe_running();
- if ((kcb->ss_ctx.ss_pending)
- && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
- clear_ss_context(kcb); /* clear pending ss */
-
+ if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
+ ((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) {
kprobes_restore_local_irqflag(kcb, regs);
-
- post_kprobe_handler(kcb, regs);
+ post_kprobe_handler(cur, kcb, regs);
return true;
}
+ /* not ours, kprobes should ignore it */
return false;
}
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index f9cd57c9c67d..03ac3aa611f5 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -87,7 +87,7 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
regs->status = SR_PIE;
- if (has_fpu) {
+ if (has_fpu()) {
regs->status |= SR_FS_INITIAL;
/*
* Restore the initial value to the FP register
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 9a1b7a0603b2..18bd0e4bc36c 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -17,7 +17,6 @@
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/sched/task.h>
-#include <linux/swiotlb.h>
#include <linux/smp.h>
#include <linux/efi.h>
#include <linux/crash_dump.h>
@@ -264,10 +263,7 @@ static void __init parse_dtb(void)
void __init setup_arch(char **cmdline_p)
{
parse_dtb();
- init_mm.start_code = (unsigned long) _stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) _end;
+ setup_initial_init_mm(_stext, _etext, _edata, _end);
*cmdline_p = boot_command_line;
@@ -276,7 +272,6 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
efi_init();
- setup_bootmem();
paging_init();
#if IS_ENABLED(CONFIG_BUILTIN_DTB)
unflatten_and_copy_device_tree();
@@ -291,15 +286,6 @@ void __init setup_arch(char **cmdline_p)
init_resources();
sbi_init();
- if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
- protect_kernel_text_data();
- protect_kernel_linear_mapping_text_rodata();
- }
-
-#ifdef CONFIG_SWIOTLB
- swiotlb_init(1);
-#endif
-
#ifdef CONFIG_KASAN
kasan_init();
#endif
@@ -334,11 +320,10 @@ subsys_initcall(topology_init);
void free_initmem(void)
{
- unsigned long init_begin = (unsigned long)__init_begin;
- unsigned long init_end = (unsigned long)__init_end;
-
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
- set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
+ set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end),
+ IS_ENABLED(CONFIG_64BIT) ?
+ set_memory_rw : set_memory_rw_nx);
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 65942b3748b4..c2d5ecbe5526 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -90,7 +90,7 @@ static long restore_sigcontext(struct pt_regs *regs,
/* sc_regs is structured the same as the start of pt_regs */
err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs));
/* Restore the floating-point state. */
- if (has_fpu)
+ if (has_fpu())
err |= restore_fp_state(regs, &sc->sc_fpregs);
return err;
}
@@ -143,7 +143,7 @@ static long setup_sigcontext(struct rt_sigframe __user *frame,
/* sc_regs is structured the same as the start of pt_regs */
err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs));
/* Save the floating-point state. */
- if (has_fpu)
+ if (has_fpu())
err |= save_fp_state(regs, &sc->sc_fpregs);
return err;
}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index ff467b98c3e3..315db3d0229b 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -27,7 +27,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
- } else if (task == current) {
+ } else if (task == NULL || task == current) {
fp = (unsigned long)__builtin_frame_address(1);
sp = (unsigned long)__builtin_frame_address(0);
pc = (unsigned long)__builtin_return_address(0);
@@ -132,8 +132,12 @@ unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && !task_is_running(task)))
+ if (likely(task && task != current && !task_is_running(task))) {
+ if (!try_get_task_stack(task))
+ return 0;
walk_stackframe(task, NULL, save_wchan, &pc);
+ put_task_stack(task);
+ }
return pc;
}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 7bc88d8aab97..0a98fd0ddfe9 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -203,3 +203,38 @@ int is_valid_bugaddr(unsigned long pc)
void __init trap_init(void)
{
}
+
+#ifdef CONFIG_VMAP_STACK
+static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
+ overflow_stack)__aligned(16);
+/*
+ * shadow stack, handled_ kernel_ stack_ overflow(in kernel/entry.S) is used
+ * to get per-cpu overflow stack(get_overflow_stack).
+ */
+long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)];
+asmlinkage unsigned long get_overflow_stack(void)
+{
+ return (unsigned long)this_cpu_ptr(overflow_stack) +
+ OVERFLOW_STACK_SIZE;
+}
+
+asmlinkage void handle_bad_stack(struct pt_regs *regs)
+{
+ unsigned long tsk_stk = (unsigned long)current->stack;
+ unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
+
+ console_verbose();
+
+ pr_emerg("Insufficient stack space to handle exception!\n");
+ pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
+ tsk_stk, tsk_stk + THREAD_SIZE);
+ pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
+ ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
+
+ __show_regs(regs);
+ panic("Kernel stack overflow");
+
+ for (;;)
+ wait_for_interrupt();
+}
+#endif
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index a3ff09c4c3f9..af776555ded9 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -12,7 +12,6 @@
#include <asm/vmlinux.lds.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 891742ff75a7..502d0826ecb1 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -117,7 +117,7 @@ SECTIONS
. = ALIGN(SECTION_ALIGN);
_data = .;
- RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
.sdata : {
__global_pointer$ = . + 0x800;
*(.sdata*)
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index fceaeb18cc64..63bc691cff91 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -19,50 +19,160 @@ ENTRY(__asm_copy_from_user)
li t6, SR_SUM
csrs CSR_STATUS, t6
- add a3, a1, a2
- /* Use word-oriented copy only if low-order bits match */
- andi t0, a0, SZREG-1
- andi t1, a1, SZREG-1
- bne t0, t1, 2f
+ /* Save for return value */
+ mv t5, a2
- addi t0, a1, SZREG-1
- andi t1, a3, ~(SZREG-1)
- andi t0, t0, ~(SZREG-1)
/*
- * a3: terminal address of source region
- * t0: lowest XLEN-aligned address in source
- * t1: highest XLEN-aligned address in source
+ * Register allocation for code below:
+ * a0 - start of uncopied dst
+ * a1 - start of uncopied src
+ * a2 - size
+ * t0 - end of uncopied dst
*/
- bgeu t0, t1, 2f
- bltu a1, t0, 4f
+ add t0, a0, a2
+
+ /*
+ * Use byte copy only if too small.
+ * SZREG holds 4 for RV32 and 8 for RV64
+ */
+ li a3, 9*SZREG /* size must be larger than size in word_copy */
+ bltu a2, a3, .Lbyte_copy_tail
+
+ /*
+ * Copy first bytes until dst is aligned to word boundary.
+ * a0 - start of dst
+ * t1 - start of aligned dst
+ */
+ addi t1, a0, SZREG-1
+ andi t1, t1, ~(SZREG-1)
+ /* dst is already aligned, skip */
+ beq a0, t1, .Lskip_align_dst
1:
- fixup REG_L, t2, (a1), 10f
- fixup REG_S, t2, (a0), 10f
- addi a1, a1, SZREG
- addi a0, a0, SZREG
- bltu a1, t1, 1b
+ /* a5 - one byte for copying data */
+ fixup lb a5, 0(a1), 10f
+ addi a1, a1, 1 /* src */
+ fixup sb a5, 0(a0), 10f
+ addi a0, a0, 1 /* dst */
+ bltu a0, t1, 1b /* t1 - start of aligned dst */
+
+.Lskip_align_dst:
+ /*
+ * Now dst is aligned.
+ * Use shift-copy if src is misaligned.
+ * Use word-copy if both src and dst are aligned because
+ * can not use shift-copy which do not require shifting
+ */
+ /* a1 - start of src */
+ andi a3, a1, SZREG-1
+ bnez a3, .Lshift_copy
+
+.Lword_copy:
+ /*
+ * Both src and dst are aligned, unrolled word copy
+ *
+ * a0 - start of aligned dst
+ * a1 - start of aligned src
+ * t0 - end of aligned dst
+ */
+ addi t0, t0, -(8*SZREG) /* not to over run */
2:
- bltu a1, a3, 5f
+ fixup REG_L a4, 0(a1), 10f
+ fixup REG_L a5, SZREG(a1), 10f
+ fixup REG_L a6, 2*SZREG(a1), 10f
+ fixup REG_L a7, 3*SZREG(a1), 10f
+ fixup REG_L t1, 4*SZREG(a1), 10f
+ fixup REG_L t2, 5*SZREG(a1), 10f
+ fixup REG_L t3, 6*SZREG(a1), 10f
+ fixup REG_L t4, 7*SZREG(a1), 10f
+ fixup REG_S a4, 0(a0), 10f
+ fixup REG_S a5, SZREG(a0), 10f
+ fixup REG_S a6, 2*SZREG(a0), 10f
+ fixup REG_S a7, 3*SZREG(a0), 10f
+ fixup REG_S t1, 4*SZREG(a0), 10f
+ fixup REG_S t2, 5*SZREG(a0), 10f
+ fixup REG_S t3, 6*SZREG(a0), 10f
+ fixup REG_S t4, 7*SZREG(a0), 10f
+ addi a0, a0, 8*SZREG
+ addi a1, a1, 8*SZREG
+ bltu a0, t0, 2b
+
+ addi t0, t0, 8*SZREG /* revert to original value */
+ j .Lbyte_copy_tail
+
+.Lshift_copy:
+
+ /*
+ * Word copy with shifting.
+ * For misaligned copy we still perform aligned word copy, but
+ * we need to use the value fetched from the previous iteration and
+ * do some shifts.
+ * This is safe because reading is less than a word size.
+ *
+ * a0 - start of aligned dst
+ * a1 - start of src
+ * a3 - a1 & mask:(SZREG-1)
+ * t0 - end of uncopied dst
+ * t1 - end of aligned dst
+ */
+ /* calculating aligned word boundary for dst */
+ andi t1, t0, ~(SZREG-1)
+ /* Converting unaligned src to aligned src */
+ andi a1, a1, ~(SZREG-1)
+
+ /*
+ * Calculate shifts
+ * t3 - prev shift
+ * t4 - current shift
+ */
+ slli t3, a3, 3 /* converting bytes in a3 to bits */
+ li a5, SZREG*8
+ sub t4, a5, t3
+
+ /* Load the first word to combine with second word */
+ fixup REG_L a5, 0(a1), 10f
3:
+ /* Main shifting copy
+ *
+ * a0 - start of aligned dst
+ * a1 - start of aligned src
+ * t1 - end of aligned dst
+ */
+
+ /* At least one iteration will be executed */
+ srl a4, a5, t3
+ fixup REG_L a5, SZREG(a1), 10f
+ addi a1, a1, SZREG
+ sll a2, a5, t4
+ or a2, a2, a4
+ fixup REG_S a2, 0(a0), 10f
+ addi a0, a0, SZREG
+ bltu a0, t1, 3b
+
+ /* Revert src to original unaligned value */
+ add a1, a1, a3
+
+.Lbyte_copy_tail:
+ /*
+ * Byte copy anything left.
+ *
+ * a0 - start of remaining dst
+ * a1 - start of remaining src
+ * t0 - end of remaining dst
+ */
+ bgeu a0, t0, .Lout_copy_user /* check if end of copy */
+4:
+ fixup lb a5, 0(a1), 10f
+ addi a1, a1, 1 /* src */
+ fixup sb a5, 0(a0), 10f
+ addi a0, a0, 1 /* dst */
+ bltu a0, t0, 4b /* t0 - end of dst */
+
+.Lout_copy_user:
/* Disable access to user memory */
csrc CSR_STATUS, t6
- li a0, 0
+ li a0, 0
ret
-4: /* Edge case: unalignment */
- fixup lbu, t2, (a1), 10f
- fixup sb, t2, (a0), 10f
- addi a1, a1, 1
- addi a0, a0, 1
- bltu a1, t0, 4b
- j 1b
-5: /* Edge case: remainder */
- fixup lbu, t2, (a1), 10f
- fixup sb, t2, (a0), 10f
- addi a1, a1, 1
- addi a0, a0, 1
- bltu a1, a3, 5b
- j 3b
ENDPROC(__asm_copy_to_user)
ENDPROC(__asm_copy_from_user)
EXPORT_SYMBOL(__asm_copy_to_user)
@@ -117,7 +227,7 @@ EXPORT_SYMBOL(__clear_user)
10:
/* Disable access to user memory */
csrs CSR_STATUS, t6
- mv a0, a2
+ mv a0, t5
ret
11:
csrs CSR_STATUS, t6
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 68aa312fc352..ee3459cb6750 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -18,7 +18,7 @@
#ifdef CONFIG_MMU
-static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
+DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
static unsigned long asid_bits;
static unsigned long num_asids;
@@ -213,7 +213,7 @@ static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
set_mm_noasid(mm);
}
-static int asids_init(void)
+static int __init asids_init(void)
{
unsigned long old;
@@ -243,8 +243,7 @@ static int asids_init(void)
if (num_asids > (2 * num_possible_cpus())) {
atomic_long_set(&current_version, num_asids);
- context_asid_map = kcalloc(BITS_TO_LONGS(num_asids),
- sizeof(*context_asid_map), GFP_KERNEL);
+ context_asid_map = bitmap_zalloc(num_asids, GFP_KERNEL);
if (!context_asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
num_asids);
@@ -280,11 +279,12 @@ static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
* cache flush to be performed before execution resumes on each hart. This
* actually performs that local instruction cache flush, which implicitly only
* refers to the current hart.
+ *
+ * The "cpu" argument must be the current local CPU number.
*/
-static inline void flush_icache_deferred(struct mm_struct *mm)
+static inline void flush_icache_deferred(struct mm_struct *mm, unsigned int cpu)
{
#ifdef CONFIG_SMP
- unsigned int cpu = smp_processor_id();
cpumask_t *mask = &mm->context.icache_stale_mask;
if (cpumask_test_cpu(cpu, mask)) {
@@ -320,5 +320,5 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
set_mm(next, cpu);
- flush_icache_deferred(next);
+ flush_icache_deferred(next, cpu);
}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 096463cc6fff..aa08dd2f8fae 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -14,6 +14,7 @@
#include <linux/signal.h>
#include <linux/uaccess.h>
#include <linux/kprobes.h>
+#include <linux/kfence.h>
#include <asm/ptrace.h>
#include <asm/tlbflush.h>
@@ -45,7 +46,15 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- msg = (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request";
+ if (addr < PAGE_SIZE)
+ msg = "NULL pointer dereference";
+ else {
+ if (kfence_handle_page_fault(addr, regs->cause == EXC_STORE_PAGE_FAULT, regs))
+ return;
+
+ msg = "paging request";
+ }
+
die_kernel_fault(msg, addr, regs);
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 4c4c92ce0bb8..7cb4f391d106 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -11,6 +11,7 @@
#include <linux/memblock.h>
#include <linux/initrd.h>
#include <linux/swap.h>
+#include <linux/swiotlb.h>
#include <linux/sizes.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
@@ -29,10 +30,17 @@
#include "../kernel/head.h"
-unsigned long kernel_virt_addr = KERNEL_LINK_ADDR;
-EXPORT_SYMBOL(kernel_virt_addr);
+struct kernel_mapping kernel_map __ro_after_init;
+EXPORT_SYMBOL(kernel_map);
#ifdef CONFIG_XIP_KERNEL
-#define kernel_virt_addr (*((unsigned long *)XIP_FIXUP(&kernel_virt_addr)))
+#define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
+#endif
+
+phys_addr_t phys_ram_base __ro_after_init;
+EXPORT_SYMBOL(phys_ram_base);
+
+#ifdef CONFIG_XIP_KERNEL
+extern char _xiprom[], _exiprom[];
#endif
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
@@ -53,7 +61,7 @@ struct pt_alloc_ops {
#endif
};
-static phys_addr_t dma32_phys_limit __ro_after_init;
+static phys_addr_t dma32_phys_limit __initdata;
static void __init zone_sizes_init(void)
{
@@ -67,11 +75,6 @@ static void __init zone_sizes_init(void)
free_area_init(max_zone_pfns);
}
-static void __init setup_zero_page(void)
-{
- memset((void *)empty_zero_page, 0, PAGE_SIZE);
-}
-
#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
static inline void print_mlk(char *name, unsigned long b, unsigned long t)
{
@@ -113,25 +116,60 @@ void __init mem_init(void)
BUG_ON(!mem_map);
#endif /* CONFIG_FLATMEM */
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_force == SWIOTLB_FORCE ||
+ max_pfn > PFN_DOWN(dma32_phys_limit))
+ swiotlb_init(1);
+ else
+ swiotlb_force = SWIOTLB_NO_FORCE;
+#endif
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
memblock_free_all();
print_vm_layout();
}
-void __init setup_bootmem(void)
+/*
+ * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel,
+ * whereas for 64-bit kernel, the end of the virtual address space is occupied
+ * by the modules/BPF/kernel mappings which reduces the available size of the
+ * linear mapping.
+ * Limit the memory size via mem.
+ */
+#ifdef CONFIG_64BIT
+static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G;
+#else
+static phys_addr_t memory_limit = -PAGE_OFFSET;
+#endif
+
+static int __init early_mem(char *p)
+{
+ u64 size;
+
+ if (!p)
+ return 1;
+
+ size = memparse(p, &p) & PAGE_MASK;
+ memory_limit = min_t(u64, size, memory_limit);
+
+ pr_notice("Memory limited to %lldMB\n", (u64)memory_limit >> 20);
+
+ return 0;
+}
+early_param("mem", early_mem);
+
+static void __init setup_bootmem(void)
{
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
- phys_addr_t dram_end = memblock_end_of_DRAM();
- phys_addr_t max_mapped_addr = __pa(~(ulong)0);
+ phys_addr_t __maybe_unused max_mapped_addr;
+ phys_addr_t phys_ram_end;
#ifdef CONFIG_XIP_KERNEL
vmlinux_start = __pa_symbol(&_sdata);
#endif
- /* The maximal physical memory size is -PAGE_OFFSET. */
- memblock_enforce_memory_limit(-PAGE_OFFSET);
+ memblock_enforce_memory_limit(memory_limit);
/*
* Reserve from the start of the kernel to the end of the kernel
@@ -146,17 +184,28 @@ void __init setup_bootmem(void)
#endif
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
+
+ phys_ram_end = memblock_end_of_DRAM();
+#ifndef CONFIG_64BIT
+#ifndef CONFIG_XIP_KERNEL
+ phys_ram_base = memblock_start_of_DRAM();
+#endif
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
* macro. Make sure that last 4k bytes are not usable by memblock
- * if end of dram is equal to maximum addressable memory.
+ * if end of dram is equal to maximum addressable memory. For 64-bit
+ * kernel, this problem can't happen here as the end of the virtual
+ * address space is occupied by the kernel mapping then this check must
+ * be done as soon as the kernel mapping base address is determined.
*/
- if (max_mapped_addr == (dram_end - 1))
+ max_mapped_addr = __pa(~(ulong)0);
+ if (max_mapped_addr == (phys_ram_end - 1))
memblock_set_current_limit(max_mapped_addr - 4096);
+#endif
- min_low_pfn = PFN_UP(memblock_start_of_DRAM());
- max_low_pfn = max_pfn = PFN_DOWN(dram_end);
+ min_low_pfn = PFN_UP(phys_ram_base);
+ max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
@@ -176,15 +225,8 @@ void __init setup_bootmem(void)
memblock_allow_resize();
}
-#ifdef CONFIG_XIP_KERNEL
-
-extern char _xiprom[], _exiprom[];
-extern char _sdata[], _edata[];
-
-#endif /* CONFIG_XIP_KERNEL */
-
#ifdef CONFIG_MMU
-static struct pt_alloc_ops _pt_ops __ro_after_init;
+static struct pt_alloc_ops _pt_ops __initdata;
#ifdef CONFIG_XIP_KERNEL
#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops))
@@ -192,31 +234,12 @@ static struct pt_alloc_ops _pt_ops __ro_after_init;
#define pt_ops _pt_ops
#endif
-/* Offset between linear mapping virtual address and kernel load address */
-unsigned long va_pa_offset __ro_after_init;
-EXPORT_SYMBOL(va_pa_offset);
-#ifdef CONFIG_XIP_KERNEL
-#define va_pa_offset (*((unsigned long *)XIP_FIXUP(&va_pa_offset)))
-#endif
-/* Offset between kernel mapping virtual address and kernel load address */
-#ifdef CONFIG_64BIT
-unsigned long va_kernel_pa_offset;
-EXPORT_SYMBOL(va_kernel_pa_offset);
-#endif
-#ifdef CONFIG_XIP_KERNEL
-#define va_kernel_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_pa_offset)))
-#endif
-unsigned long va_kernel_xip_pa_offset;
-EXPORT_SYMBOL(va_kernel_xip_pa_offset);
-#ifdef CONFIG_XIP_KERNEL
-#define va_kernel_xip_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_xip_pa_offset)))
-#endif
unsigned long pfn_base __ro_after_init;
EXPORT_SYMBOL(pfn_base);
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
-pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
+static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
@@ -253,7 +276,7 @@ static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
}
-static inline pte_t *get_pte_virt_late(phys_addr_t pa)
+static inline pte_t *__init get_pte_virt_late(phys_addr_t pa)
{
return (pte_t *) __va(pa);
}
@@ -272,7 +295,7 @@ static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
-static phys_addr_t alloc_pte_late(uintptr_t va)
+static phys_addr_t __init alloc_pte_late(uintptr_t va)
{
unsigned long vaddr;
@@ -296,10 +319,10 @@ static void __init create_pte_mapping(pte_t *ptep,
#ifndef __PAGETABLE_PMD_FOLDED
-pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
-pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
-pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
-pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
+static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
+static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
+static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
+static pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
@@ -319,14 +342,14 @@ static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
}
-static pmd_t *get_pmd_virt_late(phys_addr_t pa)
+static pmd_t *__init get_pmd_virt_late(phys_addr_t pa)
{
return (pmd_t *) __va(pa);
}
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
- BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT);
+ BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
return (uintptr_t)early_pmd;
}
@@ -336,7 +359,7 @@ static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
-static phys_addr_t alloc_pmd_late(uintptr_t va)
+static phys_addr_t __init alloc_pmd_late(uintptr_t va)
{
unsigned long vaddr;
@@ -436,6 +459,43 @@ asmlinkage void __init __copy_data(void)
}
#endif
+#ifdef CONFIG_STRICT_KERNEL_RWX
+static __init pgprot_t pgprot_from_va(uintptr_t va)
+{
+ if (is_va_kernel_text(va))
+ return PAGE_KERNEL_READ_EXEC;
+
+ /*
+ * In 64-bit kernel, the kernel mapping is outside the linear mapping so
+ * we must protect its linear mapping alias from being executed and
+ * written.
+ * And rodata section is marked readonly in mark_rodata_ro.
+ */
+ if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
+ return PAGE_KERNEL_READ;
+
+ return PAGE_KERNEL;
+}
+
+void mark_rodata_ro(void)
+{
+ set_kernel_memory(__start_rodata, _data, set_memory_ro);
+ if (IS_ENABLED(CONFIG_64BIT))
+ set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
+ set_memory_ro);
+
+ debug_checkwx();
+}
+#else
+static __init pgprot_t pgprot_from_va(uintptr_t va)
+{
+ if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
+ return PAGE_KERNEL;
+
+ return PAGE_KERNEL_EXEC;
+}
+#endif /* CONFIG_STRICT_KERNEL_RWX */
+
/*
* setup_vm() is called from head.S with MMU-off.
*
@@ -454,45 +514,39 @@ asmlinkage void __init __copy_data(void)
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
#endif
-uintptr_t load_pa, load_sz;
#ifdef CONFIG_XIP_KERNEL
-#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa)))
-#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz)))
-#endif
-
-#ifdef CONFIG_XIP_KERNEL
-uintptr_t xiprom, xiprom_sz;
-#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
-#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))
-
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
+static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
+ __always_unused bool early)
{
uintptr_t va, end_va;
/* Map the flash resident part */
- end_va = kernel_virt_addr + xiprom_sz;
- for (va = kernel_virt_addr; va < end_va; va += map_size)
+ end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
+ for (va = kernel_map.virt_addr; va < end_va; va += map_size)
create_pgd_mapping(pgdir, va,
- xiprom + (va - kernel_virt_addr),
+ kernel_map.xiprom + (va - kernel_map.virt_addr),
map_size, PAGE_KERNEL_EXEC);
/* Map the data in RAM */
- end_va = kernel_virt_addr + XIP_OFFSET + load_sz;
- for (va = kernel_virt_addr + XIP_OFFSET; va < end_va; va += map_size)
+ end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
+ for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += map_size)
create_pgd_mapping(pgdir, va,
- load_pa + (va - (kernel_virt_addr + XIP_OFFSET)),
+ kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
map_size, PAGE_KERNEL);
}
#else
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
+static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
+ bool early)
{
uintptr_t va, end_va;
- end_va = kernel_virt_addr + load_sz;
- for (va = kernel_virt_addr; va < end_va; va += map_size)
+ end_va = kernel_map.virt_addr + kernel_map.size;
+ for (va = kernel_map.virt_addr; va < end_va; va += map_size)
create_pgd_mapping(pgdir, va,
- load_pa + (va - kernel_virt_addr),
- map_size, PAGE_KERNEL_EXEC);
+ kernel_map.phys_addr + (va - kernel_map.virt_addr),
+ map_size,
+ early ?
+ PAGE_KERNEL_EXEC : pgprot_from_va(va));
}
#endif
@@ -504,25 +558,28 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
+ kernel_map.virt_addr = KERNEL_LINK_ADDR;
+
#ifdef CONFIG_XIP_KERNEL
- xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
- xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
+ kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
+ kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
- load_pa = (uintptr_t)CONFIG_PHYS_RAM_BASE;
- load_sz = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
+ phys_ram_base = CONFIG_PHYS_RAM_BASE;
+ kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
+ kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
- va_kernel_xip_pa_offset = kernel_virt_addr - xiprom;
+ kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
#else
- load_pa = (uintptr_t)(&_start);
- load_sz = (uintptr_t)(&_end) - load_pa;
+ kernel_map.phys_addr = (uintptr_t)(&_start);
+ kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
#endif
- va_pa_offset = PAGE_OFFSET - load_pa;
+ kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr;
#ifdef CONFIG_64BIT
- va_kernel_pa_offset = kernel_virt_addr - load_pa;
+ kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
#endif
- pfn_base = PFN_DOWN(load_pa);
+ pfn_base = PFN_DOWN(kernel_map.phys_addr);
/*
* Enforce boot alignment requirements of RV32 and
@@ -532,7 +589,15 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
- BUG_ON((load_pa % map_size) != 0);
+ BUG_ON((kernel_map.phys_addr % map_size) != 0);
+
+#ifdef CONFIG_64BIT
+ /*
+ * The last 4K bytes of the addressable memory can not be mapped because
+ * of IS_ERR_VALUE macro.
+ */
+ BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
+#endif
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
@@ -549,19 +614,19 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_pmd_mapping(fixmap_pmd, FIXADDR_START,
(uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
/* Setup trampoline PGD and PMD */
- create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
+ create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
(uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
#ifdef CONFIG_XIP_KERNEL
- create_pmd_mapping(trampoline_pmd, kernel_virt_addr,
- xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
+ create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
+ kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
#else
- create_pmd_mapping(trampoline_pmd, kernel_virt_addr,
- load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
+ create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
+ kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC);
#endif
#else
/* Setup trampoline PGD */
- create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
- load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
+ create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
+ kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC);
#endif
/*
@@ -569,7 +634,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
- create_kernel_page_table(early_pg_dir, map_size);
+ create_kernel_page_table(early_pg_dir, map_size, true);
#ifndef __PAGETABLE_PMD_FOLDED
/* Setup early PMD for DTB */
@@ -645,22 +710,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif
}
-#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
-void protect_kernel_linear_mapping_text_rodata(void)
-{
- unsigned long text_start = (unsigned long)lm_alias(_start);
- unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
- unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
- unsigned long data_start = (unsigned long)lm_alias(_data);
-
- set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
- set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
-
- set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
- set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-}
-#endif
-
static void __init setup_vm_final(void)
{
uintptr_t va, map_size;
@@ -689,25 +738,21 @@ static void __init setup_vm_final(void)
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
+ if (end >= __pa(PAGE_OFFSET) + memory_limit)
+ end = __pa(PAGE_OFFSET) + memory_limit;
map_size = best_map_size(start, end - start);
for (pa = start; pa < end; pa += map_size) {
va = (uintptr_t)__va(pa);
- create_pgd_mapping(swapper_pg_dir, va, pa,
- map_size,
-#ifdef CONFIG_64BIT
- PAGE_KERNEL
-#else
- PAGE_KERNEL_EXEC
-#endif
- );
+ create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
+ pgprot_from_va(va));
}
}
#ifdef CONFIG_64BIT
/* Map the kernel */
- create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
+ create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
#endif
/* Clear fixmap PTE and PMD mappings */
@@ -738,39 +783,6 @@ static inline void setup_vm_final(void)
}
#endif /* CONFIG_MMU */
-#ifdef CONFIG_STRICT_KERNEL_RWX
-void __init protect_kernel_text_data(void)
-{
- unsigned long text_start = (unsigned long)_start;
- unsigned long init_text_start = (unsigned long)__init_text_begin;
- unsigned long init_data_start = (unsigned long)__init_data_begin;
- unsigned long rodata_start = (unsigned long)__start_rodata;
- unsigned long data_start = (unsigned long)_data;
-#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
- unsigned long end_va = kernel_virt_addr + load_sz;
-#else
- unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
-#endif
-
- set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
- set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
- set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
- /* rodata section is marked readonly in mark_rodata_ro */
- set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
- set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
-}
-
-void mark_rodata_ro(void)
-{
- unsigned long rodata_start = (unsigned long)__start_rodata;
- unsigned long data_start = (unsigned long)_data;
-
- set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-
- debug_checkwx();
-}
-#endif
-
#ifdef CONFIG_KEXEC_CORE
/*
* reserve_crashkernel() - reserves memory for crash kernel
@@ -858,7 +870,7 @@ static void __init reserve_crashkernel(void)
* reserved once we call early_init_fdt_scan_reserved_mem()
* later on.
*/
-static int elfcore_hdr_setup(struct reserved_mem *rmem)
+static int __init elfcore_hdr_setup(struct reserved_mem *rmem)
{
elfcorehdr_addr = rmem->base;
elfcorehdr_size = rmem->size;
@@ -870,8 +882,8 @@ RESERVEDMEM_OF_DECLARE(elfcorehdr, "linux,elfcorehdr", elfcore_hdr_setup);
void __init paging_init(void)
{
+ setup_bootmem();
setup_vm_final();
- setup_zero_page();
}
void __init misc_mem_init(void)
diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c
index 35703d5ef5fd..e7fd0c253c7b 100644
--- a/arch/riscv/mm/physaddr.c
+++ b/arch/riscv/mm/physaddr.c
@@ -23,7 +23,7 @@ EXPORT_SYMBOL(__virt_to_phys);
phys_addr_t __phys_addr_symbol(unsigned long x)
{
- unsigned long kernel_start = (unsigned long)kernel_virt_addr;
+ unsigned long kernel_start = kernel_map.virt_addr;
unsigned long kernel_end = (unsigned long)_end;
/*
diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
index 0536ac84b730..830e7de65e3a 100644
--- a/arch/riscv/mm/ptdump.c
+++ b/arch/riscv/mm/ptdump.c
@@ -98,8 +98,8 @@ static struct addr_marker address_markers[] = {
{0, "vmalloc() end"},
{0, "Linear mapping"},
#ifdef CONFIG_64BIT
- {0, "Modules mapping"},
- {0, "Kernel mapping (kernel, BPF)"},
+ {0, "Modules/BPF mapping"},
+ {0, "Kernel mapping"},
#endif
{-1, NULL},
};
@@ -379,7 +379,7 @@ static int __init ptdump_init(void)
address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
#ifdef CONFIG_64BIT
address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
- address_markers[KERNEL_MAPPING_NR].start_address = kernel_virt_addr;
+ address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;
#endif
kernel_ptd_info.base_addr = KERN_VIRT_START;
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 720b443c4528..64f8201237c2 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -4,36 +4,66 @@
#include <linux/smp.h>
#include <linux/sched.h>
#include <asm/sbi.h>
+#include <asm/mmu_context.h>
+
+static inline void local_flush_tlb_all_asid(unsigned long asid)
+{
+ __asm__ __volatile__ ("sfence.vma x0, %0"
+ :
+ : "r" (asid)
+ : "memory");
+}
+
+static inline void local_flush_tlb_page_asid(unsigned long addr,
+ unsigned long asid)
+{
+ __asm__ __volatile__ ("sfence.vma %0, %1"
+ :
+ : "r" (addr), "r" (asid)
+ : "memory");
+}
void flush_tlb_all(void)
{
sbi_remote_sfence_vma(NULL, 0, -1);
}
-/*
- * This function must not be called with cmask being null.
- * Kernel may panic if cmask is NULL.
- */
-static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
- unsigned long size)
+static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
+ unsigned long size, unsigned long stride)
{
+ struct cpumask *cmask = mm_cpumask(mm);
struct cpumask hmask;
unsigned int cpuid;
+ bool broadcast;
if (cpumask_empty(cmask))
return;
cpuid = get_cpu();
+ /* check if the tlbflush needs to be sent to other CPUs */
+ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
+ if (static_branch_unlikely(&use_asid_allocator)) {
+ unsigned long asid = atomic_long_read(&mm->context.id);
- if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
- /* local cpu is the only cpu present in cpumask */
- if (size <= PAGE_SIZE)
+ if (broadcast) {
+ riscv_cpuid_to_hartid_mask(cmask, &hmask);
+ sbi_remote_sfence_vma_asid(cpumask_bits(&hmask),
+ start, size, asid);
+ } else if (size <= stride) {
+ local_flush_tlb_page_asid(start, asid);
+ } else {
+ local_flush_tlb_all_asid(asid);
+ }
+ } else {
+ if (broadcast) {
+ riscv_cpuid_to_hartid_mask(cmask, &hmask);
+ sbi_remote_sfence_vma(cpumask_bits(&hmask),
+ start, size);
+ } else if (size <= stride) {
local_flush_tlb_page(start);
- else
+ } else {
local_flush_tlb_all();
- } else {
- riscv_cpuid_to_hartid_mask(cmask, &hmask);
- sbi_remote_sfence_vma(cpumask_bits(&hmask), start, size);
+ }
}
put_cpu();
@@ -41,16 +71,23 @@ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
void flush_tlb_mm(struct mm_struct *mm)
{
- __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
+ __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
- __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
+ __sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
- __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
+ __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
+}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);
}
+#endif
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index 81de865f4c7c..e6497424cbf6 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -1251,6 +1251,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
return -1;
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_W:
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 87e3bf5b9086..3af4131c22c7 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -939,6 +939,10 @@ out_be:
emit_ld(rd, 0, RV_REG_T1, ctx);
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_B:
emit_imm(RV_REG_T1, imm, ctx);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index a49971647f81..a0e2130f0100 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -2,9 +2,6 @@
config MMU
def_bool y
-config ZONE_DMA
- def_bool y
-
config CPU_BIG_ENDIAN
def_bool y
@@ -62,7 +59,7 @@ config S390
select ARCH_BINFMT_ELF_STATE
select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
select ARCH_ENABLE_MEMORY_HOTREMOVE
- select ARCH_ENABLE_SPLIT_PMD_PTLOCK
+ select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX
select ARCH_HAS_DEVMEM_IS_ALLOWED
@@ -165,7 +162,7 @@ config S390
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO
- select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select HAVE_IOREMAP_PROT if PCI
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
@@ -173,6 +170,7 @@ config S390
select HAVE_KERNEL_LZO
select HAVE_KERNEL_UNCOMPRESSED
select HAVE_KERNEL_XZ
+ select HAVE_KERNEL_ZSTD
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
@@ -211,6 +209,7 @@ config S390
select THREAD_INFO_IN_TASK
select TTY
select VIRT_CPU_ACCOUNTING
+ select ZONE_DMA
# Note: keep the above list sorted alphabetically
config SCHED_OMIT_FRAME_POINTER
@@ -438,6 +437,7 @@ config COMPAT
select COMPAT_OLD_SIGACTION
select HAVE_UID16
depends on MULTIUSER
+ depends on !CC_IS_CLANG
help
Select this option if you want to enable your system kernel to
handle system-calls from ELF binaries for 31 bit ESA. This option
@@ -769,7 +769,7 @@ config VFIO_CCW
config VFIO_AP
def_tristate n
prompt "VFIO support for AP devices"
- depends on S390_AP_IOMMU && VFIO_MDEV_DEVICE && KVM
+ depends on S390_AP_IOMMU && VFIO_MDEV && KVM
depends on ZCRYPT
help
This driver grants access to Adjunct Processor (AP) devices
@@ -854,7 +854,7 @@ config CMM_IUCV
config APPLDATA_BASE
def_bool n
prompt "Linux - VM Monitor Stream, base infrastructure"
- depends on PROC_FS
+ depends on PROC_SYSCTL
help
This provides a kernel interface for creating and updating z/VM APPLDATA
monitor records. The monitor records are updated at certain time
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index e443ed9947bd..1e3172877982 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -28,6 +28,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
+KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
@@ -128,9 +129,6 @@ OBJCOPYFLAGS := -O binary
head-y := arch/s390/kernel/head64.o
-# See arch/s390/Kbuild for content of core part of the kernel
-core-y += arch/s390/
-
libs-y += arch/s390/lib/
drivers-y += drivers/s390/
@@ -165,6 +163,19 @@ archheaders:
archprepare:
$(Q)$(MAKE) $(build)=$(syscalls) kapi
$(Q)$(MAKE) $(build)=$(tools) kapi
+ifeq ($(KBUILD_EXTMOD),)
+# We need to generate vdso-offsets.h before compiling certain files in kernel/.
+# In order to do that, we should use the archprepare target, but we can't since
+# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
+# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
+# Therefore we need to generate the header after prepare0 has been made, hence
+# this hack.
+prepare: vdso_prepare
+vdso_prepare: prepare0
+ $(Q)$(MAKE) $(build)=arch/s390/kernel/vdso64 include/generated/vdso64-offsets.h
+ $(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
+ $(build)=arch/s390/kernel/vdso32 include/generated/vdso32-offsets.h)
+endif
# Don't use tabs in echo arguments
define archhelp
diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c
index ff6801d401c4..47c48fbfb563 100644
--- a/arch/s390/boot/als.c
+++ b/arch/s390/boot/als.c
@@ -68,7 +68,7 @@ void print_missing_facilities(void)
first = 1;
for (i = 0; i < ARRAY_SIZE(als); i++) {
- val = ~S390_lowcore.stfle_fac_list[i] & als[i];
+ val = ~stfle_fac_list[i] & als[i];
for (j = 0; j < BITS_PER_LONG; j++) {
if (!(val & (1UL << (BITS_PER_LONG - 1 - j))))
continue;
@@ -106,9 +106,9 @@ void verify_facilities(void)
{
int i;
- __stfle(S390_lowcore.stfle_fac_list, ARRAY_SIZE(S390_lowcore.stfle_fac_list));
+ __stfle(stfle_fac_list, ARRAY_SIZE(stfle_fac_list));
for (i = 0; i < ARRAY_SIZE(als); i++) {
- if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i])
+ if ((stfle_fac_list[i] & als[i]) != als[i])
facility_mismatch();
}
}
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 8b50967f5804..ae04e1c93764 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -24,6 +24,7 @@ void __printf(1, 2) decompressor_printk(const char *fmt, ...);
extern const char kernel_version[];
extern unsigned long memory_limit;
+extern unsigned long vmalloc_size;
extern int vmalloc_size_set;
extern int kaslr_enabled;
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index de18dab518bb..e30d3fdbbc78 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -11,9 +11,11 @@ UBSAN_SANITIZE := n
KASAN_SANITIZE := n
obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
+obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
obj-all := $(obj-y) piggy.o syms.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
+targets += vmlinux.bin.zst
targets += info.bin syms.bin vmlinux.syms $(obj-all)
KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
@@ -33,7 +35,7 @@ $(obj)/vmlinux.syms: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OB
quiet_cmd_dumpsyms = DUMPSYMS $<
define cmd_dumpsyms
- $(NM) -n -S --format=bsd "$<" | $(PERL) -ne '/(\w+)\s+(\w+)\s+[tT]\s+(\w+)/ and printf "%x %x %s\0",hex $$1,hex $$2,$$3' > "$@"
+ $(NM) -n -S --format=bsd "$<" | sed -nE 's/^0*([0-9a-fA-F]+) 0*([0-9a-fA-F]+) [tT] ([^ ]*)$$/\1 \2 \3/p' | tr '\n' '\0' > "$@"
endef
$(obj)/syms.bin: $(obj)/vmlinux.syms FORCE
@@ -63,6 +65,7 @@ suffix-$(CONFIG_KERNEL_LZ4) := .lz4
suffix-$(CONFIG_KERNEL_LZMA) := .lzma
suffix-$(CONFIG_KERNEL_LZO) := .lzo
suffix-$(CONFIG_KERNEL_XZ) := .xz
+suffix-$(CONFIG_KERNEL_ZSTD) := .zst
$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,gzip)
@@ -76,6 +79,8 @@ $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzo)
$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,xzkern)
+$(obj)/vmlinux.bin.zst: $(vmlinux.bin.all-y) FORCE
+ $(call if_changed,zstd22)
OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE
diff --git a/arch/s390/boot/compressed/clz_ctz.c b/arch/s390/boot/compressed/clz_ctz.c
new file mode 100644
index 000000000000..c3ebf248596b
--- /dev/null
+++ b/arch/s390/boot/compressed/clz_ctz.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../../lib/clz_ctz.c"
diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c
index 3061b11c4d27..37a4a8d33c6c 100644
--- a/arch/s390/boot/compressed/decompressor.c
+++ b/arch/s390/boot/compressed/decompressor.c
@@ -28,8 +28,10 @@ extern char _end[];
extern unsigned char _compressed_start[];
extern unsigned char _compressed_end[];
-#ifdef CONFIG_HAVE_KERNEL_BZIP2
+#ifdef CONFIG_KERNEL_BZIP2
#define BOOT_HEAP_SIZE 0x400000
+#elif CONFIG_KERNEL_ZSTD
+#define BOOT_HEAP_SIZE 0x30000
#else
#define BOOT_HEAP_SIZE 0x10000
#endif
@@ -61,6 +63,10 @@ static unsigned long free_mem_end_ptr = (unsigned long) _end + BOOT_HEAP_SIZE;
#include "../../../../lib/decompress_unxz.c"
#endif
+#ifdef CONFIG_KERNEL_ZSTD
+#include "../../../../lib/decompress_unzstd.c"
+#endif
+
#define decompress_offset ALIGN((unsigned long)_end + BOOT_HEAP_SIZE, PAGE_SIZE)
unsigned long mem_safe_offset(void)
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index dacb7813f982..51693cfb65c2 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -401,6 +401,7 @@ SYM_CODE_END(startup_pgm_check_handler)
# Must be keept in sync with struct parmarea in setup.h
#
.org PARMAREA
+SYM_DATA_START(parmarea)
.quad 0 # IPL_DEVICE
.quad 0 # INITRD_START
.quad 0 # INITRD_SIZE
@@ -411,6 +412,8 @@ SYM_CODE_END(startup_pgm_check_handler)
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
+ .org PARMAREA+__PARMAREA_SIZE
+SYM_DATA_END(parmarea)
.org EARLY_SCCB_OFFSET
.fill 4096
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index d372a45fe10e..0f84c072625e 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -12,39 +12,44 @@
#include "boot.h"
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
+int __bootdata(noexec_disabled);
+
+unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_block_valid);
-unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
-
-unsigned long __bootdata(vmalloc_size) = VMALLOC_DEFAULT_SIZE;
-int __bootdata(noexec_disabled);
+unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE;
unsigned long memory_limit;
int vmalloc_size_set;
int kaslr_enabled;
static inline int __diag308(unsigned long subcode, void *addr)
{
- register unsigned long _addr asm("0") = (unsigned long)addr;
- register unsigned long _rc asm("1") = 0;
unsigned long reg1, reg2;
- psw_t old = S390_lowcore.program_new_psw;
+ union register_pair r1;
+ psw_t old;
+ r1.even = (unsigned long) addr;
+ r1.odd = 0;
asm volatile(
- " epsw %0,%1\n"
- " st %0,%[psw_pgm]\n"
- " st %1,%[psw_pgm]+4\n"
- " larl %0,1f\n"
- " stg %0,%[psw_pgm]+8\n"
- " diag %[addr],%[subcode],0x308\n"
- "1: nopr %%r7\n"
- : "=&d" (reg1), "=&a" (reg2),
- [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
- [addr] "+d" (_addr), "+d" (_rc)
- : [subcode] "d" (subcode)
+ " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
+ " epsw %[reg1],%[reg2]\n"
+ " st %[reg1],0(%[psw_pgm])\n"
+ " st %[reg2],4(%[psw_pgm])\n"
+ " larl %[reg1],1f\n"
+ " stg %[reg1],8(%[psw_pgm])\n"
+ " diag %[r1],%[subcode],0x308\n"
+ "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
+ : [r1] "+&d" (r1.pair),
+ [reg1] "=&d" (reg1),
+ [reg2] "=&a" (reg2),
+ "+Q" (S390_lowcore.program_new_psw),
+ "=Q" (old)
+ : [subcode] "d" (subcode),
+ [psw_old] "a" (&old),
+ [psw_pgm] "a" (&S390_lowcore.program_new_psw)
: "cc", "memory");
- S390_lowcore.program_new_psw = old;
- return _rc;
+ return r1.odd;
}
void store_ipl_parmblock(void)
@@ -165,12 +170,12 @@ static inline int has_ebcdic_char(const char *str)
void setup_boot_command_line(void)
{
- COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
+ parmarea.command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0;
/* convert arch command line to ascii if necessary */
- if (has_ebcdic_char(COMMAND_LINE))
- EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
+ if (has_ebcdic_char(parmarea.command_line))
+ EBCASC(parmarea.command_line, ARCH_COMMAND_LINE_SIZE);
/* copy arch command line */
- strcpy(early_command_line, strim(COMMAND_LINE));
+ strcpy(early_command_line, strim(parmarea.command_line));
/* append IPL PARM data to the boot command line */
if (!is_prot_virt_guest() && ipl_block_valid)
@@ -180,9 +185,9 @@ void setup_boot_command_line(void)
static void modify_facility(unsigned long nr, bool clear)
{
if (clear)
- __clear_facility(nr, S390_lowcore.stfle_fac_list);
+ __clear_facility(nr, stfle_fac_list);
else
- __set_facility(nr, S390_lowcore.stfle_fac_list);
+ __set_facility(nr, stfle_fac_list);
}
static void check_cleared_facilities(void)
@@ -191,7 +196,7 @@ static void check_cleared_facilities(void)
int i;
for (i = 0; i < ARRAY_SIZE(als); i++) {
- if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i]) {
+ if ((stfle_fac_list[i] & als[i]) != als[i]) {
sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
print_missing_facilities();
break;
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
index 40168e59abd3..4e17adbde495 100644
--- a/arch/s390/boot/mem_detect.c
+++ b/arch/s390/boot/mem_detect.c
@@ -64,30 +64,37 @@ void add_mem_detect_block(u64 start, u64 end)
static int __diag260(unsigned long rx1, unsigned long rx2)
{
- register unsigned long _rx1 asm("2") = rx1;
- register unsigned long _rx2 asm("3") = rx2;
- register unsigned long _ry asm("4") = 0x10; /* storage configuration */
- int rc = -1; /* fail */
- unsigned long reg1, reg2;
- psw_t old = S390_lowcore.program_new_psw;
-
+ unsigned long reg1, reg2, ry;
+ union register_pair rx;
+ psw_t old;
+ int rc;
+
+ rx.even = rx1;
+ rx.odd = rx2;
+ ry = 0x10; /* storage configuration */
+ rc = -1; /* fail */
asm volatile(
- " epsw %0,%1\n"
- " st %0,%[psw_pgm]\n"
- " st %1,%[psw_pgm]+4\n"
- " larl %0,1f\n"
- " stg %0,%[psw_pgm]+8\n"
+ " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
+ " epsw %[reg1],%[reg2]\n"
+ " st %[reg1],0(%[psw_pgm])\n"
+ " st %[reg2],4(%[psw_pgm])\n"
+ " larl %[reg1],1f\n"
+ " stg %[reg1],8(%[psw_pgm])\n"
" diag %[rx],%[ry],0x260\n"
" ipm %[rc]\n"
" srl %[rc],28\n"
- "1:\n"
- : "=&d" (reg1), "=&a" (reg2),
- [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
- [rc] "+&d" (rc), [ry] "+d" (_ry)
- : [rx] "d" (_rx1), "d" (_rx2)
+ "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
+ : [reg1] "=&d" (reg1),
+ [reg2] "=&a" (reg2),
+ [rc] "+&d" (rc),
+ [ry] "+&d" (ry),
+ "+Q" (S390_lowcore.program_new_psw),
+ "=Q" (old)
+ : [rx] "d" (rx.pair),
+ [psw_old] "a" (&old),
+ [psw_pgm] "a" (&S390_lowcore.program_new_psw)
: "cc", "memory");
- S390_lowcore.program_new_psw = old;
- return rc == 0 ? _ry : -1;
+ return rc == 0 ? ry : -1;
}
static int diag260(void)
@@ -111,24 +118,30 @@ static int diag260(void)
static int tprot(unsigned long addr)
{
- unsigned long pgm_addr;
+ unsigned long reg1, reg2;
int rc = -EFAULT;
- psw_t old = S390_lowcore.program_new_psw;
+ psw_t old;
- S390_lowcore.program_new_psw.mask = __extract_psw();
asm volatile(
- " larl %[pgm_addr],1f\n"
- " stg %[pgm_addr],%[psw_pgm_addr]\n"
+ " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
+ " epsw %[reg1],%[reg2]\n"
+ " st %[reg1],0(%[psw_pgm])\n"
+ " st %[reg2],4(%[psw_pgm])\n"
+ " larl %[reg1],1f\n"
+ " stg %[reg1],8(%[psw_pgm])\n"
" tprot 0(%[addr]),0\n"
" ipm %[rc]\n"
" srl %[rc],28\n"
- "1:\n"
- : [pgm_addr] "=&d"(pgm_addr),
- [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
- [rc] "+&d"(rc)
- : [addr] "a"(addr)
+ "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
+ : [reg1] "=&d" (reg1),
+ [reg2] "=&a" (reg2),
+ [rc] "+&d" (rc),
+ "=Q" (S390_lowcore.program_new_psw.addr),
+ "=Q" (old)
+ : [psw_old] "a" (&old),
+ [psw_pgm] "a" (&S390_lowcore.program_new_psw),
+ [addr] "a" (addr)
: "cc", "memory");
- S390_lowcore.program_new_psw = old;
return rc;
}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 05f8eefa3dcf..d0cf21641e3a 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -5,6 +5,7 @@
#include <asm/sections.h>
#include <asm/cpu_mf.h>
#include <asm/setup.h>
+#include <asm/kasan.h>
#include <asm/kexec.h>
#include <asm/sclp.h>
#include <asm/diag.h>
@@ -15,7 +16,17 @@
extern char __boot_data_start[], __boot_data_end[];
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
unsigned long __bootdata_preserved(__kaslr_offset);
+unsigned long __bootdata_preserved(VMALLOC_START);
+unsigned long __bootdata_preserved(VMALLOC_END);
+struct page *__bootdata_preserved(vmemmap);
+unsigned long __bootdata_preserved(vmemmap_size);
+unsigned long __bootdata_preserved(MODULES_VADDR);
+unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata(ident_map_size);
+int __bootdata(is_full_image) = 1;
+
+u64 __bootdata_preserved(stfle_fac_list[16]);
+u64 __bootdata_preserved(alt_stfle_fac_list[16]);
/*
* Some code and data needs to stay below 2 GB, even when the kernel would be
@@ -169,6 +180,86 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
#endif
}
+static void setup_kernel_memory_layout(void)
+{
+ bool vmalloc_size_verified = false;
+ unsigned long vmemmap_off;
+ unsigned long vspace_left;
+ unsigned long rte_size;
+ unsigned long pages;
+ unsigned long vmax;
+
+ pages = ident_map_size / PAGE_SIZE;
+ /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
+ vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
+
+ /* choose kernel address space layout: 4 or 3 levels. */
+ vmemmap_off = round_up(ident_map_size, _REGION3_SIZE);
+ if (IS_ENABLED(CONFIG_KASAN) ||
+ vmalloc_size > _REGION2_SIZE ||
+ vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE)
+ vmax = _REGION1_SIZE;
+ else
+ vmax = _REGION2_SIZE;
+
+ /* keep vmemmap_off aligned to a top level region table entry */
+ rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE;
+ MODULES_END = vmax;
+ if (is_prot_virt_host()) {
+ /*
+ * forcing modules and vmalloc area under the ultravisor
+ * secure storage limit, so that any vmalloc allocation
+ * we do could be used to back secure guest storage.
+ */
+ adjust_to_uv_max(&MODULES_END);
+ }
+
+#ifdef CONFIG_KASAN
+ if (MODULES_END < vmax) {
+ /* force vmalloc and modules below kasan shadow */
+ MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
+ } else {
+ /*
+ * leave vmalloc and modules above kasan shadow but make
+ * sure they don't overlap with it
+ */
+ vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN);
+ vmalloc_size_verified = true;
+ vspace_left = KASAN_SHADOW_START;
+ }
+#endif
+ MODULES_VADDR = MODULES_END - MODULES_LEN;
+ VMALLOC_END = MODULES_VADDR;
+
+ if (vmalloc_size_verified) {
+ VMALLOC_START = VMALLOC_END - vmalloc_size;
+ } else {
+ vmemmap_off = round_up(ident_map_size, rte_size);
+
+ if (vmemmap_off + vmemmap_size > VMALLOC_END ||
+ vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) {
+ /*
+ * allow vmalloc area to occupy up to 1/2 of
+ * the rest virtual space left.
+ */
+ vmalloc_size = min(vmalloc_size, VMALLOC_END / 2);
+ }
+ VMALLOC_START = VMALLOC_END - vmalloc_size;
+ vspace_left = VMALLOC_START;
+ }
+
+ pages = vspace_left / (PAGE_SIZE + sizeof(struct page));
+ pages = SECTION_ALIGN_UP(pages);
+ vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size);
+ /* keep vmemmap left most starting from a fresh region table entry */
+ vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size));
+ /* take care that identity map is lower then vmemmap */
+ ident_map_size = min(ident_map_size, vmemmap_off);
+ vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
+ VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START);
+ vmemmap = (struct page *)vmemmap_off;
+}
+
/*
* This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
*/
@@ -208,6 +299,7 @@ void startup_kernel(void)
parse_boot_command_line();
setup_ident_map_size(detect_memory());
setup_vmalloc_size();
+ setup_kernel_memory_layout();
random_lma = __kaslr_offset = 0;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
diff --git a/arch/s390/boot/text_dma.S b/arch/s390/boot/text_dma.S
index f7c77cd518f2..5ff5fee02801 100644
--- a/arch/s390/boot/text_dma.S
+++ b/arch/s390/boot/text_dma.S
@@ -9,16 +9,6 @@
#include <asm/errno.h>
#include <asm/sigp.h>
-#ifdef CC_USING_EXPOLINE
- .pushsection .dma.text.__s390_indirect_jump_r14,"axG"
-__dma__s390_indirect_jump_r14:
- larl %r1,0f
- ex 0,0(%r1)
- j .
-0: br %r14
- .popsection
-#endif
-
.section .dma.text,"ax"
/*
* Simplified version of expoline thunk. The normal thunks can not be used here,
@@ -27,11 +17,10 @@ __dma__s390_indirect_jump_r14:
* affects a few functions that are not performance-relevant.
*/
.macro BR_EX_DMA_r14
-#ifdef CC_USING_EXPOLINE
- jg __dma__s390_indirect_jump_r14
-#else
- br %r14
-#endif
+ larl %r1,0f
+ ex 0,0(%r1)
+ j .
+0: br %r14
.endm
/*
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
index 87641dd65ccf..f6b0c4f43c99 100644
--- a/arch/s390/boot/uv.c
+++ b/arch/s390/boot/uv.c
@@ -36,6 +36,7 @@ void uv_query_info(void)
uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id;
+ uv_info.uv_feature_indications = uvcb.uv_feature_indications;
}
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
@@ -44,3 +45,28 @@ void uv_query_info(void)
prot_virt_guest = 1;
#endif
}
+
+#if IS_ENABLED(CONFIG_KVM)
+static bool has_uv_sec_stor_limit(void)
+{
+ /*
+ * keep these conditions in line with setup_uv()
+ */
+ if (!is_prot_virt_host())
+ return false;
+
+ if (is_prot_virt_guest())
+ return false;
+
+ if (!test_facility(158))
+ return false;
+
+ return !!uv_info.max_sec_stor_addr;
+}
+
+void adjust_to_uv_max(unsigned long *vmax)
+{
+ if (has_uv_sec_stor_limit())
+ *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
+}
+#endif
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 86afcc6b56bf..b88184019af9 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -5,7 +5,12 @@ CONFIG_WATCH_QUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_LSM=y
CONFIG_PREEMPT=y
+CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -28,14 +33,13 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_BPF_LSM=y
-CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
@@ -76,6 +80,7 @@ CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
@@ -95,6 +100,7 @@ CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
@@ -158,6 +164,7 @@ CONFIG_IPV6_RPL_LWTUNNEL=y
CONFIG_MPTCP=y
CONFIG_NETFILTER=y
CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
@@ -280,6 +287,7 @@ CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -327,7 +335,7 @@ CONFIG_L2TP_DEBUGFS=m
CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=m
CONFIG_L2TP_ETH=m
-CONFIG_BRIDGE=m
+CONFIG_BRIDGE=y
CONFIG_BRIDGE_MRP=y
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
@@ -384,12 +392,11 @@ CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
-CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_PCI=y
-CONFIG_PCI_IOV=y
# CONFIG_PCIEASPM is not set
CONFIG_PCI_DEBUG=y
+CONFIG_PCI_IOV=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_DEVTMPFS=y
@@ -436,7 +443,7 @@ CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
-CONFIG_BLK_DEV_DM=m
+CONFIG_BLK_DEV_DM=y
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -453,6 +460,7 @@ CONFIG_DM_MULTIPATH_ST=m
CONFIG_DM_MULTIPATH_HST=m
CONFIG_DM_MULTIPATH_IOA=m
CONFIG_DM_DELAY=m
+CONFIG_DM_INIT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
CONFIG_DM_VERITY=m
@@ -495,6 +503,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
@@ -551,7 +560,6 @@ CONFIG_INPUT_EVDEV=y
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_VIRTIO_CONSOLE=m
CONFIG_HW_RANDOM_VIRTIO=m
-CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
CONFIG_PPS=m
@@ -574,7 +582,6 @@ CONFIG_SYNC_FILE=y
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
CONFIG_VFIO_MDEV=m
-CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
@@ -619,6 +626,7 @@ CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_VIRTIO_FS=m
CONFIG_OVERLAY_FS=m
+CONFIG_NETFS_STATS=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@@ -654,7 +662,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
-CONFIG_CIFS_STATS2=y
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
@@ -682,6 +689,7 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_SECURITY_LOCKDOWN_LSM=y
CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
+CONFIG_SECURITY_LANDLOCK=y
CONFIG_INTEGRITY_SIGNATURE=y
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
@@ -696,6 +704,7 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -843,7 +852,6 @@ CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
-CONFIG_TEST_LIST_SORT=y
CONFIG_TEST_MIN_HEAP=y
CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
@@ -853,3 +861,4 @@ CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_LIVEPATCH=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 71b49ea5b058..1667a3cdcf0a 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -4,6 +4,11 @@ CONFIG_WATCH_QUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_LSM=y
+CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -26,14 +31,13 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_BPF_LSM=y
-CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
@@ -70,6 +74,7 @@ CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
@@ -87,6 +92,7 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
+CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
@@ -149,6 +155,7 @@ CONFIG_IPV6_RPL_LWTUNNEL=y
CONFIG_MPTCP=y
CONFIG_NETFILTER=y
CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
@@ -271,6 +278,7 @@ CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -317,7 +325,7 @@ CONFIG_L2TP_DEBUGFS=m
CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=m
CONFIG_L2TP_ETH=m
-CONFIG_BRIDGE=m
+CONFIG_BRIDGE=y
CONFIG_BRIDGE_MRP=y
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
@@ -374,11 +382,10 @@ CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
-CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_PCI=y
-CONFIG_PCI_IOV=y
# CONFIG_PCIEASPM is not set
+CONFIG_PCI_IOV=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_UEVENT_HELPER=y
@@ -427,7 +434,7 @@ CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
-CONFIG_BLK_DEV_DM=m
+CONFIG_BLK_DEV_DM=y
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -444,6 +451,7 @@ CONFIG_DM_MULTIPATH_ST=m
CONFIG_DM_MULTIPATH_HST=m
CONFIG_DM_MULTIPATH_IOA=m
CONFIG_DM_DELAY=m
+CONFIG_DM_INIT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
CONFIG_DM_VERITY=m
@@ -487,6 +495,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
@@ -543,7 +552,6 @@ CONFIG_INPUT_EVDEV=y
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_VIRTIO_CONSOLE=m
CONFIG_HW_RANDOM_VIRTIO=m
-CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
# CONFIG_PTP_1588_CLOCK is not set
@@ -566,7 +574,6 @@ CONFIG_SYNC_FILE=y
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
CONFIG_VFIO_MDEV=m
-CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
@@ -607,6 +614,7 @@ CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_VIRTIO_FS=m
CONFIG_OVERLAY_FS=m
+CONFIG_NETFS_STATS=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@@ -642,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
-CONFIG_CIFS_STATS2=y
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
@@ -669,6 +676,7 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_SECURITY_LOCKDOWN_LSM=y
CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
+CONFIG_SECURITY_LANDLOCK=y
CONFIG_INTEGRITY_SIGNATURE=y
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
@@ -684,6 +692,7 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -754,6 +763,7 @@ CONFIG_CRC8=m
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_GDB_SCRIPTS=y
@@ -781,3 +791,4 @@ CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
+CONFIG_TEST_LIVEPATCH=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 76123a4b26ab..d576aaab27c9 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -29,9 +29,9 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set
-# CONFIG_BOUNCE is not set
CONFIG_NET=y
# CONFIG_IUCV is not set
+# CONFIG_PCPU_DEV_REFCNT is not set
# CONFIG_ETHTOOL_NETLINK is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_RAM=y
@@ -51,7 +51,6 @@ CONFIG_ZFCP=y
# CONFIG_SERIO is not set
# CONFIG_HVC_IUCV is not set
# CONFIG_HW_RANDOM_S390 is not set
-CONFIG_RAW_DRIVER=y
# CONFIG_HMC_DRV is not set
# CONFIG_S390_TAPE is not set
# CONFIG_VMCP is not set
diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c
index 7d9fb496d155..f5f7e78ddc0c 100644
--- a/arch/s390/hypfs/hypfs_sprp.c
+++ b/arch/s390/hypfs/hypfs_sprp.c
@@ -25,14 +25,13 @@
static inline unsigned long __hypfs_sprp_diag304(void *data, unsigned long cmd)
{
- register unsigned long _data asm("2") = (unsigned long) data;
- register unsigned long _rc asm("3");
- register unsigned long _cmd asm("4") = cmd;
+ union register_pair r1 = { .even = (unsigned long)data, };
- asm volatile("diag %1,%2,0x304\n"
- : "=d" (_rc) : "d" (_data), "d" (_cmd) : "memory");
-
- return _rc;
+ asm volatile("diag %[r1],%[r3],0x304\n"
+ : [r1] "+&d" (r1.pair)
+ : [r3] "d" (cmd)
+ : "memory");
+ return r1.odd;
}
static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd)
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index 837d1699b109..3afbee21dc1f 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -53,18 +53,20 @@ struct ap_queue_status {
*/
static inline bool ap_instructions_available(void)
{
- register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
- register unsigned long reg1 asm ("1") = 0;
- register unsigned long reg2 asm ("2") = 0;
+ unsigned long reg0 = AP_MKQID(0, 0);
+ unsigned long reg1 = 0;
asm volatile(
- " .long 0xb2af0000\n" /* PQAP(TAPQ) */
- "0: la %0,1\n"
+ " lgr 0,%[reg0]\n" /* qid into gr0 */
+ " lghi 1,0\n" /* 0 into gr1 */
+ " lghi 2,0\n" /* 0 into gr2 */
+ " .long 0xb2af0000\n" /* PQAP(TAPQ) */
+ "0: la %[reg1],1\n" /* 1 into reg1 */
"1:\n"
EX_TABLE(0b, 1b)
- : "+d" (reg1), "+d" (reg2)
- : "d" (reg0)
- : "cc");
+ : [reg1] "+&d" (reg1)
+ : [reg0] "d" (reg0)
+ : "cc", "0", "1", "2");
return reg1 != 0;
}
@@ -77,14 +79,18 @@ static inline bool ap_instructions_available(void)
*/
static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
{
- register unsigned long reg0 asm ("0") = qid;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2");
-
- asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
- : "=d" (reg1), "=d" (reg2)
- : "d" (reg0)
- : "cc");
+ struct ap_queue_status reg1;
+ unsigned long reg2;
+
+ asm volatile(
+ " lgr 0,%[qid]\n" /* qid into gr0 */
+ " lghi 2,0\n" /* 0 into gr2 */
+ " .long 0xb2af0000\n" /* PQAP(TAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr %[reg2],2\n" /* gr2 into reg2 */
+ : [reg1] "=&d" (reg1), [reg2] "=&d" (reg2)
+ : [qid] "d" (qid)
+ : "cc", "0", "1", "2");
if (info)
*info = reg2;
return reg1;
@@ -115,14 +121,16 @@ static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
*/
static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
{
- register unsigned long reg0 asm ("0") = qid | (1UL << 24);
- register struct ap_queue_status reg1 asm ("1");
+ unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */
+ struct ap_queue_status reg1;
asm volatile(
- ".long 0xb2af0000" /* PQAP(RAPQ) */
- : "=d" (reg1)
- : "d" (reg0)
- : "cc");
+ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
+ " .long 0xb2af0000\n" /* PQAP(RAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ : [reg1] "=&d" (reg1)
+ : [reg0] "d" (reg0)
+ : "cc", "0", "1");
return reg1;
}
@@ -134,14 +142,16 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
*/
static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
{
- register unsigned long reg0 asm ("0") = qid | (2UL << 24);
- register struct ap_queue_status reg1 asm ("1");
+ unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */
+ struct ap_queue_status reg1;
asm volatile(
- ".long 0xb2af0000" /* PQAP(ZAPQ) */
- : "=d" (reg1)
- : "d" (reg0)
- : "cc");
+ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
+ " .long 0xb2af0000\n" /* PQAP(ZAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ : [reg1] "=&d" (reg1)
+ : [reg0] "d" (reg0)
+ : "cc", "0", "1");
return reg1;
}
@@ -172,18 +182,20 @@ struct ap_config_info {
*/
static inline int ap_qci(struct ap_config_info *config)
{
- register unsigned long reg0 asm ("0") = 4UL << 24;
- register unsigned long reg1 asm ("1") = -EOPNOTSUPP;
- register struct ap_config_info *reg2 asm ("2") = config;
+ unsigned long reg0 = 4UL << 24; /* fc 4UL is QCI */
+ unsigned long reg1 = -EOPNOTSUPP;
+ struct ap_config_info *reg2 = config;
asm volatile(
- ".long 0xb2af0000\n" /* PQAP(QCI) */
- "0: la %0,0\n"
+ " lgr 0,%[reg0]\n" /* QCI fc into gr0 */
+ " lgr 2,%[reg2]\n" /* ptr to config into gr2 */
+ " .long 0xb2af0000\n" /* PQAP(QCI) */
+ "0: la %[reg1],0\n" /* good case, QCI fc available */
"1:\n"
EX_TABLE(0b, 1b)
- : "+d" (reg1)
- : "d" (reg0), "d" (reg2)
- : "cc", "memory");
+ : [reg1] "+&d" (reg1)
+ : [reg0] "d" (reg0), [reg2] "d" (reg2)
+ : "cc", "memory", "0", "2");
return reg1;
}
@@ -220,21 +232,25 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
struct ap_qirq_ctrl qirqctrl,
void *ind)
{
- register unsigned long reg0 asm ("0") = qid | (3UL << 24);
- register union {
+ unsigned long reg0 = qid | (3UL << 24); /* fc 3UL is AQIC */
+ union {
unsigned long value;
struct ap_qirq_ctrl qirqctrl;
struct ap_queue_status status;
- } reg1 asm ("1");
- register void *reg2 asm ("2") = ind;
+ } reg1;
+ void *reg2 = ind;
reg1.qirqctrl = qirqctrl;
asm volatile(
- ".long 0xb2af0000" /* PQAP(AQIC) */
- : "+d" (reg1)
- : "d" (reg0), "d" (reg2)
- : "cc");
+ " lgr 0,%[reg0]\n" /* qid param into gr0 */
+ " lgr 1,%[reg1]\n" /* irq ctrl into gr1 */
+ " lgr 2,%[reg2]\n" /* ni addr into gr2 */
+ " .long 0xb2af0000\n" /* PQAP(AQIC) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ : [reg1] "+&d" (reg1)
+ : [reg0] "d" (reg0), [reg2] "d" (reg2)
+ : "cc", "0", "1", "2");
return reg1.status;
}
@@ -268,21 +284,24 @@ union ap_qact_ap_info {
static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
union ap_qact_ap_info *apinfo)
{
- register unsigned long reg0 asm ("0") = qid | (5UL << 24)
- | ((ifbit & 0x01) << 22);
- register union {
+ unsigned long reg0 = qid | (5UL << 24) | ((ifbit & 0x01) << 22);
+ union {
unsigned long value;
struct ap_queue_status status;
- } reg1 asm ("1");
- register unsigned long reg2 asm ("2");
+ } reg1;
+ unsigned long reg2;
reg1.value = apinfo->val;
asm volatile(
- ".long 0xb2af0000" /* PQAP(QACT) */
- : "+d" (reg1), "=d" (reg2)
- : "d" (reg0)
- : "cc");
+ " lgr 0,%[reg0]\n" /* qid param into gr0 */
+ " lgr 1,%[reg1]\n" /* qact in info into gr1 */
+ " .long 0xb2af0000\n" /* PQAP(QACT) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr %[reg2],2\n" /* qact out info into reg2 */
+ : [reg1] "+&d" (reg1), [reg2] "=&d" (reg2)
+ : [reg0] "d" (reg0)
+ : "cc", "0", "1", "2");
apinfo->val = reg2;
return reg1.status;
}
@@ -303,19 +322,24 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
unsigned long long psmid,
void *msg, size_t length)
{
- register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = (unsigned long) msg;
- register unsigned long reg3 asm ("3") = (unsigned long) length;
- register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
- register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
+ unsigned long reg0 = qid | 0x40000000UL; /* 0x4... is last msg part */
+ union register_pair nqap_r1, nqap_r2;
+ struct ap_queue_status reg1;
+
+ nqap_r1.even = (unsigned int)(psmid >> 32);
+ nqap_r1.odd = psmid & 0xffffffff;
+ nqap_r2.even = (unsigned long)msg;
+ nqap_r2.odd = (unsigned long)length;
asm volatile (
- "0: .long 0xb2ad0042\n" /* NQAP */
- " brc 2,0b"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
- : "d" (reg4), "d" (reg5)
- : "cc", "memory");
+ " lgr 0,%[reg0]\n" /* qid param in gr0 */
+ "0: .insn rre,0xb2ad0000,%[nqap_r1],%[nqap_r2]\n"
+ " brc 2,0b\n" /* handle partial completion */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1),
+ [nqap_r2] "+&d" (nqap_r2.pair)
+ : [nqap_r1] "d" (nqap_r1.pair)
+ : "cc", "memory", "0", "1");
return reg1;
}
@@ -325,6 +349,8 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* @psmid: Pointer to program supplied message identifier
* @msg: The message text
* @length: The message length
+ * @reslength: Resitual length on return
+ * @resgr0: input: gr0 value (only used if != 0), output: resitual gr0 content
*
* Returns AP queue status structure.
* Condition code 1 on DQAP means the receive has taken place
@@ -336,27 +362,65 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* Note that gpr2 is used by the DQAP instruction to keep track of
* any 'residual' length, in case the instruction gets interrupted.
* Hence it gets zeroed before the instruction.
+ * If the message does not fit into the buffer, this function will
+ * return with a truncated message and the reply in the firmware queue
+ * is not removed. This is indicated to the caller with an
+ * ap_queue_status response_code value of all bits on (0xFF) and (if
+ * the reslength ptr is given) the remaining length is stored in
+ * *reslength and (if the resgr0 ptr is given) the updated gr0 value
+ * for further processing of this msg entry is stored in *resgr0. The
+ * caller needs to detect this situation and should invoke ap_dqap
+ * with a valid resgr0 ptr and a value in there != 0 to indicate that
+ * *resgr0 is to be used instead of qid to further process this entry.
*/
static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
unsigned long long *psmid,
- void *msg, size_t length)
+ void *msg, size_t length,
+ size_t *reslength,
+ unsigned long *resgr0)
{
- register unsigned long reg0 asm("0") = qid | 0x80000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm("2") = 0UL;
- register unsigned long reg4 asm("4") = (unsigned long) msg;
- register unsigned long reg5 asm("5") = (unsigned long) length;
- register unsigned long reg6 asm("6") = 0UL;
- register unsigned long reg7 asm("7") = 0UL;
+ unsigned long reg0 = resgr0 && *resgr0 ? *resgr0 : qid | 0x80000000UL;
+ struct ap_queue_status reg1;
+ unsigned long reg2;
+ union register_pair rp1, rp2;
+ rp1.even = 0UL;
+ rp1.odd = 0UL;
+ rp2.even = (unsigned long)msg;
+ rp2.odd = (unsigned long)length;
asm volatile(
- "0: .long 0xb2ae0064\n" /* DQAP */
- " brc 6,0b\n"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2),
- "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7)
- : : "cc", "memory");
- *psmid = (((unsigned long long) reg6) << 32) + reg7;
+ " lgr 0,%[reg0]\n" /* qid param into gr0 */
+ " lghi 2,0\n" /* 0 into gr2 (res length) */
+ "0: ltgr %N[rp2],%N[rp2]\n" /* check buf len */
+ " jz 2f\n" /* go out if buf len is 0 */
+ "1: .insn rre,0xb2ae0000,%[rp1],%[rp2]\n"
+ " brc 6,0b\n" /* handle partial complete */
+ "2: lgr %[reg0],0\n" /* gr0 (qid + info) into reg0 */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr %[reg2],2\n" /* gr2 (res length) into reg2 */
+ : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1), [reg2] "=&d" (reg2),
+ [rp1] "+&d" (rp1.pair), [rp2] "+&d" (rp2.pair)
+ :
+ : "cc", "memory", "0", "1", "2");
+
+ if (reslength)
+ *reslength = reg2;
+ if (reg2 != 0 && rp2.odd == 0) {
+ /*
+ * Partially complete, status in gr1 is not set.
+ * Signal the caller that this dqap is only partially received
+ * with a special status response code 0xFF and *resgr0 updated
+ */
+ reg1.response_code = 0xFF;
+ if (resgr0)
+ *resgr0 = reg0;
+ } else {
+ *psmid = (((unsigned long long)rp1.even) << 32) + rp1.odd;
+ if (resgr0)
+ *resgr0 = 0;
+ }
+
return reg1;
}
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 68da67d2c4c9..fd149480b6e2 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -299,13 +299,13 @@ static inline unsigned char __flogr(unsigned long word)
}
return bit;
} else {
- register unsigned long bit asm("4") = word;
- register unsigned long out asm("5");
+ union register_pair rp;
+ rp.even = word;
asm volatile(
- " flogr %[bit],%[bit]\n"
- : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
- return bit;
+ " flogr %[rp],%[rp]\n"
+ : [rp] "+d" (rp.pair) : : "cc");
+ return rp.even;
}
}
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index ad3acb1e882b..20f169b6db4e 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -11,8 +11,7 @@ struct ccw_driver;
* @count: number of attached slave devices
* @dev: embedded device structure
* @cdev: variable number of slave devices, allocated as needed
- * @ungroup_work: work to be done when a ccwgroup notifier has action
- * type %BUS_NOTIFY_UNBIND_DRIVER
+ * @ungroup_work: used to ungroup the ccwgroup device
*/
struct ccwgroup_device {
enum {
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index a8c02cfbc712..cdd19d326345 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -29,13 +29,15 @@
*/
static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
{
- register unsigned long reg2 asm("2") = (unsigned long) buff;
- register unsigned long reg3 asm("3") = (unsigned long) len;
+ union register_pair rp = {
+ .even = (unsigned long) buff,
+ .odd = (unsigned long) len,
+ };
asm volatile(
- "0: cksm %0,%1\n" /* do checksum on longs */
+ "0: cksm %[sum],%[rp]\n"
" jo 0b\n"
- : "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory");
+ : [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory");
return sum;
}
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index ac02df906cae..f58c92f28701 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -9,6 +9,7 @@
#include <linux/bitops.h>
#include <linux/genalloc.h>
#include <asm/types.h>
+#include <asm/tpi.h>
#define LPM_ANYPATH 0xff
#define __MAX_CSSID 0
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 1960a7295ae5..84c3f0d576c5 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -169,32 +169,36 @@ static __always_inline unsigned long __cmpxchg(unsigned long address,
#define system_has_cmpxchg_double() 1
-#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
-({ \
- register __typeof__(*(p1)) __old1 asm("2") = (o1); \
- register __typeof__(*(p2)) __old2 asm("3") = (o2); \
- register __typeof__(*(p1)) __new1 asm("4") = (n1); \
- register __typeof__(*(p2)) __new2 asm("5") = (n2); \
- int cc; \
- asm volatile( \
- " cdsg %[old],%[new],%[ptr]\n" \
- " ipm %[cc]\n" \
- " srl %[cc],28" \
- : [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \
- : [new] "d" (__new1), "d" (__new2), \
- [ptr] "Q" (*(p1)), "Q" (*(p2)) \
- : "memory", "cc"); \
- !cc; \
-})
+static __always_inline int __cmpxchg_double(unsigned long p1, unsigned long p2,
+ unsigned long o1, unsigned long o2,
+ unsigned long n1, unsigned long n2)
+{
+ union register_pair old = { .even = o1, .odd = o2, };
+ union register_pair new = { .even = n1, .odd = n2, };
+ int cc;
+
+ asm volatile(
+ " cdsg %[old],%[new],%[ptr]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), [old] "+&d" (old.pair)
+ : [new] "d" (new.pair),
+ [ptr] "QS" (*(unsigned long *)p1), "Q" (*(unsigned long *)p2)
+ : "memory", "cc");
+ return !cc;
+}
#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
- __typeof__(p1) __p1 = (p1); \
- __typeof__(p2) __p2 = (p2); \
+ typeof(p1) __p1 = (p1); \
+ typeof(p2) __p2 = (p2); \
+ \
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
- __cmpxchg_double(__p1, __p2, o1, o2, n1, n2); \
+ __cmpxchg_double((unsigned long)__p1, (unsigned long)__p2, \
+ (unsigned long)(o1), (unsigned long)(o2), \
+ (unsigned long)(n1), (unsigned long)(n2)); \
})
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/cpu_mcf.h b/arch/s390/include/asm/cpu_mcf.h
index 3e4cbcb7c4cc..ca0e0e5ddbc4 100644
--- a/arch/s390/include/asm/cpu_mcf.h
+++ b/arch/s390/include/asm/cpu_mcf.h
@@ -32,39 +32,22 @@ static const u64 cpumf_ctr_ctl[CPUMF_CTR_SET_MAX] = {
[CPUMF_CTR_SET_MT_DIAG] = 0x20,
};
-static inline void ctr_set_enable(u64 *state, int ctr_set)
-{
- *state |= cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT;
-}
-static inline void ctr_set_disable(u64 *state, int ctr_set)
-{
- *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT);
-}
-static inline void ctr_set_start(u64 *state, int ctr_set)
-{
- *state |= cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT;
-}
-static inline void ctr_set_stop(u64 *state, int ctr_set)
-{
- *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
-}
-
-static inline void ctr_set_multiple_enable(u64 *state, u64 ctrsets)
+static inline void ctr_set_enable(u64 *state, u64 ctrsets)
{
*state |= ctrsets << CPUMF_LCCTL_ENABLE_SHIFT;
}
-static inline void ctr_set_multiple_disable(u64 *state, u64 ctrsets)
+static inline void ctr_set_disable(u64 *state, u64 ctrsets)
{
*state &= ~(ctrsets << CPUMF_LCCTL_ENABLE_SHIFT);
}
-static inline void ctr_set_multiple_start(u64 *state, u64 ctrsets)
+static inline void ctr_set_start(u64 *state, u64 ctrsets)
{
*state |= ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT;
}
-static inline void ctr_set_multiple_stop(u64 *state, u64 ctrsets)
+static inline void ctr_set_stop(u64 *state, u64 ctrsets)
{
*state &= ~(ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT);
}
@@ -92,9 +75,15 @@ struct cpu_cf_events {
struct cpumf_ctr_info info;
atomic_t ctr_set[CPUMF_CTR_SET_MAX];
atomic64_t alert;
- u64 state, tx_state;
+ u64 state; /* For perf_event_open SVC */
+ u64 dev_state; /* For /dev/hwctr */
unsigned int flags;
- unsigned int txn_flags;
+ size_t used; /* Bytes used in data */
+ size_t usedss; /* Bytes used in start/stop */
+ unsigned char start[PAGE_SIZE]; /* Counter set at event add */
+ unsigned char stop[PAGE_SIZE]; /* Counter set at event delete */
+ unsigned char data[PAGE_SIZE]; /* Counter set at /dev/hwctr */
+ unsigned int sets; /* # Counter set saved in memory */
};
DECLARE_PER_CPU(struct cpu_cf_events, cpu_cf_events);
@@ -125,4 +114,6 @@ static inline int stccm_avail(void)
size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset,
struct cpumf_ctr_info *info);
+int cfset_online_cpu(unsigned int cpu);
+int cfset_offline_cpu(unsigned int cpu);
#endif /* _ASM_S390_CPU_MCF_H */
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index ed5efbb531c4..adc0179fa34e 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -21,8 +21,6 @@
#define CR0_INTERRUPT_KEY_SUBMASK BIT(63 - 57)
#define CR0_MEASUREMENT_ALERT_SUBMASK BIT(63 - 58)
-#define CR2_GUARDED_STORAGE BIT(63 - 59)
-
#define CR14_UNUSED_32 BIT(63 - 32)
#define CR14_UNUSED_33 BIT(63 - 33)
#define CR14_CHANNEL_REPORT_SUBMASK BIT(63 - 35)
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 66d51ad090ab..bd00c94620d3 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -144,10 +144,6 @@ typedef s390_compat_regs compat_elf_gregset_t;
#include <linux/sched/mm.h> /* for task_struct */
#include <asm/mmu_context.h>
-#include <asm/vdso.h>
-
-extern unsigned int vdso_enabled;
-
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
@@ -176,7 +172,7 @@ struct arch_elf_state {
!current->mm->context.alloc_pgste) { \
set_thread_flag(TIF_PGSTE); \
set_pt_regs_flag(task_pt_regs(current), \
- PIF_SYSCALL_RESTART); \
+ PIF_EXECVE_PGSTE_RESTART); \
_state->rc = -EAGAIN; \
} \
_state->rc; \
@@ -268,11 +264,10 @@ do { \
#define STACK_RND_MASK MMAP_RND_MASK
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
-#define ARCH_DLINFO \
-do { \
- if (vdso_enabled) \
- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
- (unsigned long)current->mm->context.vdso_base); \
+#define ARCH_DLINFO \
+do { \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (unsigned long)current->mm->context.vdso_base); \
} while (0)
struct linux_binprm;
diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
index baa8005090c3..17aead80aadb 100644
--- a/arch/s390/include/asm/entry-common.h
+++ b/arch/s390/include/asm/entry-common.h
@@ -14,7 +14,6 @@
#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP)
void do_per_trap(struct pt_regs *regs);
-void do_syscall(struct pt_regs *regs);
#ifdef CONFIG_DEBUG_ENTRY
static __always_inline void arch_check_user_regs(struct pt_regs *regs)
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 91b5d714d28f..e3aa354ab9f4 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -13,7 +13,10 @@
#include <linux/preempt.h>
#include <asm/lowcore.h>
-#define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8)
+#define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8)
+
+extern u64 stfle_fac_list[16];
+extern u64 alt_stfle_fac_list[16];
static inline void __set_facility(unsigned long nr, void *facilities)
{
@@ -56,18 +59,20 @@ static inline int test_facility(unsigned long nr)
if (__test_facility(nr, &facilities_als))
return 1;
}
- return __test_facility(nr, &S390_lowcore.stfle_fac_list);
+ return __test_facility(nr, &stfle_fac_list);
}
static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
{
- register unsigned long reg0 asm("0") = size - 1;
+ unsigned long reg0 = size - 1;
asm volatile(
- ".insn s,0xb2b00000,0(%1)" /* stfle */
- : "+d" (reg0)
- : "a" (stfle_fac_list)
- : "memory", "cc");
+ " lgr 0,%[reg0]\n"
+ " .insn s,0xb2b00000,%[list]\n" /* stfle */
+ " lgr %[reg0],0\n"
+ : [reg0] "+&d" (reg0), [list] "+Q" (*stfle_fac_list)
+ :
+ : "memory", "cc", "0");
return reg0;
}
@@ -79,13 +84,15 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
static inline void __stfle(u64 *stfle_fac_list, int size)
{
unsigned long nr;
+ u32 stfl_fac_list;
asm volatile(
" stfl 0(0)\n"
: "=m" (S390_lowcore.stfl_fac_list));
+ stfl_fac_list = S390_lowcore.stfl_fac_list;
+ memcpy(stfle_fac_list, &stfl_fac_list, 4);
nr = 4; /* bytes stored by stfl */
- memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
- if (S390_lowcore.stfl_fac_list & 0x01000000) {
+ if (stfl_fac_list & 0x01000000) {
/* More facility bits available with stfle */
nr = __stfle_asm(stfle_fac_list, size);
nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 695c61989f97..345cbe982a8b 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -19,6 +19,7 @@ void ftrace_caller(void);
extern char ftrace_graph_caller_end;
extern unsigned long ftrace_plt;
+extern void *ftrace_func;
struct dyn_arch_ftrace { };
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 586df4c9e2f2..02427b205c11 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -32,45 +32,45 @@
})
/* set system mask. */
-static inline notrace void __arch_local_irq_ssm(unsigned long flags)
+static __always_inline void __arch_local_irq_ssm(unsigned long flags)
{
asm volatile("ssm %0" : : "Q" (flags) : "memory");
}
-static inline notrace unsigned long arch_local_save_flags(void)
+static __always_inline unsigned long arch_local_save_flags(void)
{
return __arch_local_irq_stnsm(0xff);
}
-static inline notrace unsigned long arch_local_irq_save(void)
+static __always_inline unsigned long arch_local_irq_save(void)
{
return __arch_local_irq_stnsm(0xfc);
}
-static inline notrace void arch_local_irq_disable(void)
+static __always_inline void arch_local_irq_disable(void)
{
arch_local_irq_save();
}
-static inline notrace void arch_local_irq_enable(void)
+static __always_inline void arch_local_irq_enable(void)
{
__arch_local_irq_stosm(0x03);
}
/* This only restores external and I/O interrupt state */
-static inline notrace void arch_local_irq_restore(unsigned long flags)
+static __always_inline void arch_local_irq_restore(unsigned long flags)
{
/* only disabled->disabled and disabled->enabled is valid */
if (flags & ARCH_IRQ_ENABLED)
arch_local_irq_enable();
}
-static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
+static __always_inline bool arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & ARCH_IRQ_ENABLED);
}
-static inline notrace bool arch_irqs_disabled(void)
+static __always_inline bool arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index 76f351bd6645..2768d5db181f 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -16,7 +16,6 @@
extern void kasan_early_init(void);
extern void kasan_copy_shadow_mapping(void);
extern void kasan_free_early_identity(void);
-extern unsigned long kasan_vmax;
/*
* Estimate kasan memory requirements, which it will reserve
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 9b4473f76e56..161a9e12bfb8 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -445,15 +445,15 @@ struct kvm_vcpu_stat {
u64 instruction_sigp_init_cpu_reset;
u64 instruction_sigp_cpu_reset;
u64 instruction_sigp_unknown;
- u64 diagnose_10;
- u64 diagnose_44;
- u64 diagnose_9c;
- u64 diagnose_9c_ignored;
- u64 diagnose_9c_forward;
- u64 diagnose_258;
- u64 diagnose_308;
- u64 diagnose_500;
- u64 diagnose_other;
+ u64 instruction_diagnose_10;
+ u64 instruction_diagnose_44;
+ u64 instruction_diagnose_9c;
+ u64 diag_9c_ignored;
+ u64 diag_9c_forward;
+ u64 instruction_diagnose_258;
+ u64 instruction_diagnose_308;
+ u64 instruction_diagnose_500;
+ u64 instruction_diagnose_other;
u64 pfault_sync;
};
diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h
index a0a7a2c72bd4..24e8fed150cf 100644
--- a/arch/s390/include/asm/linkage.h
+++ b/arch/s390/include/asm/linkage.h
@@ -5,7 +5,7 @@
#include <asm/asm-const.h>
#include <linux/stringify.h>
-#define __ALIGN .align 4, 0x07
+#define __ALIGN .align 16, 0x07
#define __ALIGN_STR __stringify(__ALIGN)
/*
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 22bceeeba4bc..47bde5a20a41 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -17,15 +17,23 @@
#define LC_ORDER 1
#define LC_PAGES 2
+struct pgm_tdb {
+ u64 data[32];
+};
+
struct lowcore {
__u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
__u32 ipl_parmblock_ptr; /* 0x0014 */
__u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */
__u32 ext_params; /* 0x0080 */
- __u16 ext_cpu_addr; /* 0x0084 */
- __u16 ext_int_code; /* 0x0086 */
- __u16 svc_ilc; /* 0x0088 */
- __u16 svc_code; /* 0x008a */
+ union {
+ struct {
+ __u16 ext_cpu_addr; /* 0x0084 */
+ __u16 ext_int_code; /* 0x0086 */
+ };
+ __u32 ext_int_code_addr;
+ };
+ __u32 svc_int_code; /* 0x0088 */
__u16 pgm_ilc; /* 0x008c */
__u16 pgm_code; /* 0x008e */
__u32 data_exc_code; /* 0x0090 */
@@ -40,10 +48,15 @@ struct lowcore {
__u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */
__u64 trans_exc_code; /* 0x00a8 */
__u64 monitor_code; /* 0x00b0 */
- __u16 subchannel_id; /* 0x00b8 */
- __u16 subchannel_nr; /* 0x00ba */
- __u32 io_int_parm; /* 0x00bc */
- __u32 io_int_word; /* 0x00c0 */
+ union {
+ struct {
+ __u16 subchannel_id; /* 0x00b8 */
+ __u16 subchannel_nr; /* 0x00ba */
+ __u32 io_int_parm; /* 0x00bc */
+ __u32 io_int_word; /* 0x00c0 */
+ };
+ struct tpi_info tpi_info; /* 0x00b8 */
+ };
__u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
__u32 stfl_fac_list; /* 0x00c8 */
__u8 pad_0x00cc[0x00e8-0x00cc]; /* 0x00cc */
@@ -154,12 +167,7 @@ struct lowcore {
__u64 vmcore_info; /* 0x0e0c */
__u8 pad_0x0e14[0x0e18-0x0e14]; /* 0x0e14 */
__u64 os_info; /* 0x0e18 */
- __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
-
- /* Extended facility list */
- __u64 stfle_fac_list[16]; /* 0x0f00 */
- __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
- __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
+ __u8 pad_0x0e20[0x11b0-0x0e20]; /* 0x0e20 */
/* Pointer to the machine check extended save area */
__u64 mcesad; /* 0x11b0 */
@@ -185,7 +193,7 @@ struct lowcore {
__u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */
/* Transaction abort diagnostic block */
- __u8 pgm_tdb[256]; /* 0x1800 */
+ struct pgm_tdb pgm_tdb; /* 0x1800 */
__u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
} __packed __aligned(8192);
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index e7cffc7b5c2f..c7937f369e62 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -70,8 +70,8 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
{
int cpu = smp_processor_id();
@@ -85,6 +85,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch_mm_irqs_off(prev, next, tsk);
+ local_irq_restore(flags);
+}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index 20e51c9ff240..2db45d7e68aa 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -23,12 +23,16 @@
#define MCCK_CODE_SYSTEM_DAMAGE BIT(63)
#define MCCK_CODE_EXT_DAMAGE BIT(63 - 5)
#define MCCK_CODE_CP BIT(63 - 9)
-#define MCCK_CODE_CPU_TIMER_VALID BIT(63 - 46)
+#define MCCK_CODE_STG_ERROR BIT(63 - 16)
+#define MCCK_CODE_STG_KEY_ERROR BIT(63 - 18)
+#define MCCK_CODE_STG_DEGRAD BIT(63 - 19)
#define MCCK_CODE_PSW_MWP_VALID BIT(63 - 20)
#define MCCK_CODE_PSW_IA_VALID BIT(63 - 23)
+#define MCCK_CODE_STG_FAIL_ADDR BIT(63 - 24)
#define MCCK_CODE_CR_VALID BIT(63 - 29)
#define MCCK_CODE_GS_VALID BIT(63 - 36)
#define MCCK_CODE_FC_VALID BIT(63 - 43)
+#define MCCK_CODE_CPU_TIMER_VALID BIT(63 - 46)
#ifndef __ASSEMBLY__
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 479dc76e0eca..3ba945c6b9dc 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -55,13 +55,16 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
*/
static inline void copy_page(void *to, void *from)
{
- register void *reg2 asm ("2") = to;
- register unsigned long reg3 asm ("3") = 0x1000;
- register void *reg4 asm ("4") = from;
- register unsigned long reg5 asm ("5") = 0xb0001000;
+ union register_pair dst, src;
+
+ dst.even = (unsigned long) to;
+ dst.odd = 0x1000;
+ src.even = (unsigned long) from;
+ src.odd = 0xb0001000;
+
asm volatile(
- " mvcl 2,4"
- : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
+ " mvcl %[dst],%[src]"
+ : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
: : "memory", "cc");
}
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 10b67f8aab99..5509b224c2ec 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -133,7 +133,8 @@ struct zpci_dev {
u8 has_resources : 1;
u8 is_physfn : 1;
u8 util_str_avail : 1;
- u8 reserved : 3;
+ u8 irqs_registered : 1;
+ u8 reserved : 2;
unsigned int devfn; /* DEVFN part of the RID*/
struct mutex lock;
@@ -271,9 +272,13 @@ struct zpci_dev *get_zdev_by_fid(u32);
int zpci_dma_init(void);
void zpci_dma_exit(void);
+/* IRQ */
int __init zpci_irq_init(void);
void __init zpci_irq_exit(void);
+int zpci_set_irq(struct zpci_dev *zdev);
+int zpci_clear_irq(struct zpci_dev *zdev);
+
/* FMB */
int zpci_fmb_enable_device(struct zpci_dev *);
int zpci_fmb_disable_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 918f0ba4f4d2..cb5fc0690435 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -164,19 +164,20 @@
#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
-({ \
- typeof(pcp1) o1__ = (o1), n1__ = (n1); \
- typeof(pcp2) o2__ = (o2), n2__ = (n2); \
- typeof(pcp1) *p1__; \
- typeof(pcp2) *p2__; \
- int ret__; \
- preempt_disable_notrace(); \
- p1__ = raw_cpu_ptr(&(pcp1)); \
- p2__ = raw_cpu_ptr(&(pcp2)); \
- ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
- preempt_enable_notrace(); \
- ret__; \
+#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
+({ \
+ typeof(pcp1) *p1__; \
+ typeof(pcp2) *p2__; \
+ int ret__; \
+ \
+ preempt_disable_notrace(); \
+ p1__ = raw_cpu_ptr(&(pcp1)); \
+ p2__ = raw_cpu_ptr(&(pcp2)); \
+ ret__ = __cmpxchg_double((unsigned long)p1__, (unsigned long)p2__, \
+ (unsigned long)(o1), (unsigned long)(o2), \
+ (unsigned long)(n1), (unsigned long)(n2)); \
+ preempt_enable_notrace(); \
+ ret__; \
})
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 6b187cd72251..f14a555eff74 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -134,9 +134,6 @@ static inline void pmd_populate(struct mm_struct *mm,
#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
-#define pmd_pgtable(pmd) \
- ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
-
/*
* page table entry allocation/free routines.
*/
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index b38f7b781564..dcac7b2df72c 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -17,6 +17,7 @@
#include <linux/page-flags.h>
#include <linux/radix-tree.h>
#include <linux/atomic.h>
+#include <asm/sections.h>
#include <asm/bug.h>
#include <asm/page.h>
#include <asm/uv.h>
@@ -65,8 +66,6 @@ extern unsigned long zero_page_mask;
/* TODO: s390 cannot support io_remap_pfn_range... */
-#define FIRST_USER_ADDRESS 0UL
-
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
#define pmd_ERROR(e) \
@@ -86,16 +85,16 @@ extern unsigned long zero_page_mask;
* happen without trampolines and in addition the placement within a
* 2GB frame is branch prediction unit friendly.
*/
-extern unsigned long VMALLOC_START;
-extern unsigned long VMALLOC_END;
+extern unsigned long __bootdata_preserved(VMALLOC_START);
+extern unsigned long __bootdata_preserved(VMALLOC_END);
#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
-extern struct page *vmemmap;
-extern unsigned long vmemmap_size;
+extern struct page *__bootdata_preserved(vmemmap);
+extern unsigned long __bootdata_preserved(vmemmap_size);
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
-extern unsigned long MODULES_VADDR;
-extern unsigned long MODULES_END;
+extern unsigned long __bootdata_preserved(MODULES_VADDR);
+extern unsigned long __bootdata_preserved(MODULES_END);
#define MODULES_VADDR MODULES_VADDR
#define MODULES_END MODULES_END
#define MODULES_LEN (1UL << 31)
@@ -555,27 +554,25 @@ static inline int mm_uses_skeys(struct mm_struct *mm)
static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
{
- register unsigned long reg2 asm("2") = old;
- register unsigned long reg3 asm("3") = new;
+ union register_pair r1 = { .even = old, .odd = new, };
unsigned long address = (unsigned long)ptr | 1;
asm volatile(
- " csp %0,%3"
- : "+d" (reg2), "+m" (*ptr)
- : "d" (reg3), "d" (address)
+ " csp %[r1],%[address]"
+ : [r1] "+&d" (r1.pair), "+m" (*ptr)
+ : [address] "d" (address)
: "cc");
}
static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
{
- register unsigned long reg2 asm("2") = old;
- register unsigned long reg3 asm("3") = new;
+ union register_pair r1 = { .even = old, .odd = new, };
unsigned long address = (unsigned long)ptr | 1;
asm volatile(
- " .insn rre,0xb98a0000,%0,%3"
- : "+d" (reg2), "+m" (*ptr)
- : "d" (reg3), "d" (address)
+ " .insn rre,0xb98a0000,%[r1],%[address]"
+ : [r1] "+&d" (r1.pair), "+m" (*ptr)
+ : [address] "d" (address)
: "cc");
}
@@ -589,14 +586,12 @@ static inline void crdte(unsigned long old, unsigned long new,
unsigned long table, unsigned long dtt,
unsigned long address, unsigned long asce)
{
- register unsigned long reg2 asm("2") = old;
- register unsigned long reg3 asm("3") = new;
- register unsigned long reg4 asm("4") = table | dtt;
- register unsigned long reg5 asm("5") = address;
+ union register_pair r1 = { .even = old, .odd = new, };
+ union register_pair r2 = { .even = table | dtt, .odd = address, };
- asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
- : "+d" (reg2)
- : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
+ asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
+ : [r1] "+&d" (r1.pair)
+ : [r2] "d" (r2.pair), [asce] "a" (asce)
: "memory", "cc");
}
@@ -864,6 +859,25 @@ static inline int pte_unused(pte_t pte)
}
/*
+ * Extract the pgprot value from the given pte while at the same time making it
+ * usable for kernel address space mappings where fault driven dirty and
+ * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
+ * must not be set.
+ */
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
+
+ if (pte_write(pte))
+ pte_flags |= pgprot_val(PAGE_KERNEL);
+ else
+ pte_flags |= pgprot_val(PAGE_KERNEL_RO);
+ pte_flags |= pte_val(pte) & mio_wb_bit_mask;
+
+ return __pgprot(pte_flags);
+}
+
+/*
* pgd/pmd/pte modification functions
*/
@@ -1711,4 +1725,7 @@ extern void s390_reset_cmma(struct mm_struct *mm);
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#define pmd_pgtable(pmd) \
+ ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
+
#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index 23ff51be7e29..d9d5350cc3ec 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -29,12 +29,6 @@ static inline void preempt_count_set(int pc)
old, new) != old);
}
-#define init_task_preempt_count(p) do { } while (0)
-
-#define init_idle_preempt_count(p, cpu) do { \
- S390_lowcore.preempt_count = PREEMPT_DISABLED; \
-} while (0)
-
static inline void set_preempt_need_resched(void)
{
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
@@ -88,12 +82,6 @@ static inline void preempt_count_set(int pc)
S390_lowcore.preempt_count = pc;
}
-#define init_task_preempt_count(p) do { } while (0)
-
-#define init_idle_preempt_count(p, cpu) do { \
- S390_lowcore.preempt_count = PREEMPT_DISABLED; \
-} while (0)
-
static inline void set_preempt_need_resched(void)
{
}
@@ -130,6 +118,10 @@ static inline bool should_resched(int preempt_offset)
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#define init_task_preempt_count(p) do { } while (0)
+/* Deferred to CPU bringup time */
+#define init_idle_preempt_count(p, cpu) do { } while (0)
+
#ifdef CONFIG_PREEMPTION
extern void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 023a15dc25a3..ddc7858bbce4 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -129,7 +129,7 @@ struct thread_struct {
struct runtime_instr_cb *ri_cb;
struct gs_cb *gs_cb; /* Current guarded storage cb */
struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */
- unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
+ struct pgm_tdb trap_tdb; /* Transaction abort diagnose block */
/*
* Warning: 'fpu' is dynamically-sized. It *MUST* be at
* the end.
@@ -207,7 +207,7 @@ static __always_inline unsigned long current_stack_pointer(void)
return sp;
}
-static __no_kasan_or_inline unsigned short stap(void)
+static __always_inline unsigned short stap(void)
{
unsigned short cpu_address;
@@ -246,7 +246,7 @@ static inline void __load_psw(psw_t psw)
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
-static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
+static __always_inline void __load_psw_mask(unsigned long mask)
{
unsigned long addr;
psw_t psw;
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index f828be78937f..61b22aa990e7 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -9,16 +9,17 @@
#include <linux/bits.h>
#include <uapi/asm/ptrace.h>
+#include <asm/tpi.h>
-#define PIF_SYSCALL 0 /* inside a system call */
-#define PIF_SYSCALL_RESTART 1 /* restart the current system call */
-#define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */
-#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */
+#define PIF_SYSCALL 0 /* inside a system call */
+#define PIF_EXECVE_PGSTE_RESTART 1 /* restart execve for PGSTE binaries */
+#define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */
+#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */
-#define _PIF_SYSCALL BIT(PIF_SYSCALL)
-#define _PIF_SYSCALL_RESTART BIT(PIF_SYSCALL_RESTART)
-#define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET)
-#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT)
+#define _PIF_SYSCALL BIT(PIF_SYSCALL)
+#define _PIF_EXECVE_PGSTE_RESTART BIT(PIF_EXECVE_PGSTE_RESTART)
+#define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET)
+#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT)
#ifndef __ASSEMBLY__
@@ -86,9 +87,14 @@ struct pt_regs
};
};
unsigned long orig_gpr2;
- unsigned int int_code;
- unsigned int int_parm;
- unsigned long int_parm_long;
+ union {
+ struct {
+ unsigned int int_code;
+ unsigned int int_parm;
+ unsigned long int_parm_long;
+ };
+ struct tpi_info tpi_info;
+ };
unsigned long flags;
unsigned long cr1;
};
@@ -156,6 +162,14 @@ static inline int test_pt_regs_flag(struct pt_regs *regs, int flag)
return !!(regs->flags & (1UL << flag));
}
+static inline int test_and_clear_pt_regs_flag(struct pt_regs *regs, int flag)
+{
+ int ret = test_pt_regs_flag(regs, flag);
+
+ clear_pt_regs_flag(regs, flag);
+ return ret;
+}
+
/*
* These are defined as per linux/ptrace.h, which see.
*/
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 3e388fa208d4..3a77aa96d092 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -78,6 +78,8 @@ struct parmarea {
char command_line[ARCH_COMMAND_LINE_SIZE]; /* 0x10480 */
};
+extern struct parmarea parmarea;
+
extern unsigned int zlib_dfltcc_support;
#define ZLIB_DFLTCC_DISABLED 0
#define ZLIB_DFLTCC_FULL 1
@@ -87,7 +89,6 @@ extern unsigned int zlib_dfltcc_support;
extern int noexec_disabled;
extern unsigned long ident_map_size;
-extern unsigned long vmalloc_size;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
@@ -158,6 +159,8 @@ static inline unsigned long kaslr_offset(void)
return __kaslr_offset;
}
+extern int is_full_image;
+
static inline u32 gen_lpswe(unsigned long addr)
{
BUILD_BUG_ON(addr > 0xfff);
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index 53ee795cd3d3..edee63da08e7 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -41,15 +41,17 @@
static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm,
u32 *status)
{
- register unsigned long reg1 asm ("1") = parm;
+ union register_pair r1 = { .odd = parm, };
int cc;
asm volatile(
- " sigp %1,%2,0(%3)\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
- *status = reg1;
+ " sigp %[r1],%[addr],0(%[order])\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), [r1] "+&d" (r1.pair)
+ : [addr] "d" (addr), [order] "a" (order)
+ : "cc");
+ *status = r1.even;
return cc;
}
diff --git a/arch/s390/include/asm/softirq_stack.h b/arch/s390/include/asm/softirq_stack.h
new file mode 100644
index 000000000000..fd17f25704bd
--- /dev/null
+++ b/arch/s390/include/asm/softirq_stack.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_S390_SOFTIRQ_STACK_H
+#define __ASM_S390_SOFTIRQ_STACK_H
+
+#include <asm/lowcore.h>
+#include <asm/stacktrace.h>
+
+static inline void do_softirq_own_stack(void)
+{
+ call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
+}
+
+#endif /* __ASM_S390_SOFTIRQ_STACK_H */
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 76c6034428be..3d8a4b94c620 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -74,23 +74,6 @@ struct stack_frame {
((unsigned long)__builtin_frame_address(0) - \
offsetof(struct stack_frame, back_chain))
-#define CALL_ARGS_0() \
- register unsigned long r2 asm("2")
-#define CALL_ARGS_1(arg1) \
- register unsigned long r2 asm("2") = (unsigned long)(arg1)
-#define CALL_ARGS_2(arg1, arg2) \
- CALL_ARGS_1(arg1); \
- register unsigned long r3 asm("3") = (unsigned long)(arg2)
-#define CALL_ARGS_3(arg1, arg2, arg3) \
- CALL_ARGS_2(arg1, arg2); \
- register unsigned long r4 asm("4") = (unsigned long)(arg3)
-#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
- CALL_ARGS_3(arg1, arg2, arg3); \
- register unsigned long r4 asm("5") = (unsigned long)(arg4)
-#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
- CALL_ARGS_4(arg1, arg2, arg3, arg4); \
- register unsigned long r4 asm("6") = (unsigned long)(arg5)
-
/*
* To keep this simple mark register 2-6 as being changed (volatile)
* by the called function, even though register 6 is saved/nonvolatile.
@@ -109,34 +92,113 @@ struct stack_frame {
#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
#define CALL_CLOBBER_0 CALL_CLOBBER_1
-#define CALL_ON_STACK(fn, stack, nr, args...) \
+#define CALL_LARGS_0(...) \
+ long dummy = 0
+#define CALL_LARGS_1(t1, a1) \
+ long arg1 = (long)(t1)(a1)
+#define CALL_LARGS_2(t1, a1, t2, a2) \
+ CALL_LARGS_1(t1, a1); \
+ long arg2 = (long)(t2)(a2)
+#define CALL_LARGS_3(t1, a1, t2, a2, t3, a3) \
+ CALL_LARGS_2(t1, a1, t2, a2); \
+ long arg3 = (long)(t3)(a3)
+#define CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4) \
+ CALL_LARGS_3(t1, a1, t2, a2, t3, a3); \
+ long arg4 = (long)(t4)(a4)
+#define CALL_LARGS_5(t1, a1, t2, a2, t3, a3, t4, a4, t5, a5) \
+ CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4); \
+ long arg5 = (long)(t5)(a5)
+
+#define CALL_REGS_0 \
+ register long r2 asm("2") = dummy
+#define CALL_REGS_1 \
+ register long r2 asm("2") = arg1
+#define CALL_REGS_2 \
+ CALL_REGS_1; \
+ register long r3 asm("3") = arg2
+#define CALL_REGS_3 \
+ CALL_REGS_2; \
+ register long r4 asm("4") = arg3
+#define CALL_REGS_4 \
+ CALL_REGS_3; \
+ register long r5 asm("5") = arg4
+#define CALL_REGS_5 \
+ CALL_REGS_4; \
+ register long r6 asm("6") = arg5
+
+#define CALL_TYPECHECK_0(...)
+#define CALL_TYPECHECK_1(t, a, ...) \
+ typecheck(t, a)
+#define CALL_TYPECHECK_2(t, a, ...) \
+ CALL_TYPECHECK_1(__VA_ARGS__); \
+ typecheck(t, a)
+#define CALL_TYPECHECK_3(t, a, ...) \
+ CALL_TYPECHECK_2(__VA_ARGS__); \
+ typecheck(t, a)
+#define CALL_TYPECHECK_4(t, a, ...) \
+ CALL_TYPECHECK_3(__VA_ARGS__); \
+ typecheck(t, a)
+#define CALL_TYPECHECK_5(t, a, ...) \
+ CALL_TYPECHECK_4(__VA_ARGS__); \
+ typecheck(t, a)
+
+#define CALL_PARM_0(...) void
+#define CALL_PARM_1(t, a, ...) t
+#define CALL_PARM_2(t, a, ...) t, CALL_PARM_1(__VA_ARGS__)
+#define CALL_PARM_3(t, a, ...) t, CALL_PARM_2(__VA_ARGS__)
+#define CALL_PARM_4(t, a, ...) t, CALL_PARM_3(__VA_ARGS__)
+#define CALL_PARM_5(t, a, ...) t, CALL_PARM_4(__VA_ARGS__)
+#define CALL_PARM_6(t, a, ...) t, CALL_PARM_5(__VA_ARGS__)
+
+/*
+ * Use call_on_stack() to call a function switching to a specified
+ * stack. Proper sign and zero extension of function arguments is
+ * done. Usage:
+ *
+ * rc = call_on_stack(nr, stack, rettype, fn, t1, a1, t2, a2, ...)
+ *
+ * - nr specifies the number of function arguments of fn.
+ * - stack specifies the stack to be used.
+ * - fn is the function to be called.
+ * - rettype is the return type of fn.
+ * - t1, a1, ... are pairs, where t1 must match the type of the first
+ * argument of fn, t2 the second, etc. a1 is the corresponding
+ * first function argument (not name), etc.
+ */
+#define call_on_stack(nr, stack, rettype, fn, ...) \
({ \
+ rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = fn; \
unsigned long frame = current_frame_address(); \
- CALL_ARGS_##nr(args); \
+ unsigned long __stack = stack; \
unsigned long prev; \
+ CALL_LARGS_##nr(__VA_ARGS__); \
+ CALL_REGS_##nr; \
\
+ CALL_TYPECHECK_##nr(__VA_ARGS__); \
asm volatile( \
- " la %[_prev],0(15)\n" \
+ " lgr %[_prev],15\n" \
" lg 15,%[_stack]\n" \
" stg %[_frame],%[_bc](15)\n" \
" brasl 14,%[_fn]\n" \
- " la 15,0(%[_prev])\n" \
- : [_prev] "=&a" (prev), CALL_FMT_##nr \
- : [_stack] "R" (stack), \
+ " lgr 15,%[_prev]\n" \
+ : [_prev] "=&d" (prev), CALL_FMT_##nr \
+ : [_stack] "R" (__stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_frame] "d" (frame), \
- [_fn] "X" (fn) : CALL_CLOBBER_##nr); \
- r2; \
+ [_fn] "X" (__fn) : CALL_CLOBBER_##nr); \
+ (rettype)r2; \
})
-#define CALL_ON_STACK_NORETURN(fn, stack) \
+#define call_on_stack_noreturn(fn, stack) \
({ \
+ void (*__fn)(void) = fn; \
+ \
asm volatile( \
" la 15,0(%[_stack])\n" \
" xc %[_bc](8,15),%[_bc](15)\n" \
" brasl 14,%[_fn]\n" \
::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
- [_stack] "a" (stack), [_fn] "X" (fn)); \
+ [_stack] "a" (stack), [_fn] "X" (__fn)); \
BUG(); \
})
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 4c0690fc5167..4fd66c5e8934 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -107,16 +107,18 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t count)
#ifdef __HAVE_ARCH_MEMCHR
static inline void *memchr(const void * s, int c, size_t n)
{
- register int r0 asm("0") = (char) c;
const void *ret = s + n;
asm volatile(
- "0: srst %0,%1\n"
+ " lgr 0,%[c]\n"
+ "0: srst %[ret],%[s]\n"
" jo 0b\n"
" jl 1f\n"
- " la %0,0\n"
+ " la %[ret],0\n"
"1:"
- : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
+ : [ret] "+&a" (ret), [s] "+&a" (s)
+ : [c] "d" (c)
+ : "cc", "memory", "0");
return (void *) ret;
}
#endif
@@ -124,13 +126,15 @@ static inline void *memchr(const void * s, int c, size_t n)
#ifdef __HAVE_ARCH_MEMSCAN
static inline void *memscan(void *s, int c, size_t n)
{
- register int r0 asm("0") = (char) c;
const void *ret = s + n;
asm volatile(
- "0: srst %0,%1\n"
+ " lgr 0,%[c]\n"
+ "0: srst %[ret],%[s]\n"
" jo 0b\n"
- : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
+ : [ret] "+&a" (ret), [s] "+&a" (s)
+ : [c] "d" (c)
+ : "cc", "memory", "0");
return (void *) ret;
}
#endif
@@ -138,17 +142,18 @@ static inline void *memscan(void *s, int c, size_t n)
#ifdef __HAVE_ARCH_STRCAT
static inline char *strcat(char *dst, const char *src)
{
- register int r0 asm("0") = 0;
- unsigned long dummy;
+ unsigned long dummy = 0;
char *ret = dst;
asm volatile(
- "0: srst %0,%1\n"
+ " lghi 0,0\n"
+ "0: srst %[dummy],%[dst]\n"
" jo 0b\n"
- "1: mvst %0,%2\n"
+ "1: mvst %[dummy],%[src]\n"
" jo 1b"
- : "=&a" (dummy), "+a" (dst), "+a" (src)
- : "d" (r0), "0" (0) : "cc", "memory" );
+ : [dummy] "+&a" (dummy), [dst] "+&a" (dst), [src] "+&a" (src)
+ :
+ : "cc", "memory", "0");
return ret;
}
#endif
@@ -156,14 +161,15 @@ static inline char *strcat(char *dst, const char *src)
#ifdef __HAVE_ARCH_STRCPY
static inline char *strcpy(char *dst, const char *src)
{
- register int r0 asm("0") = 0;
char *ret = dst;
asm volatile(
- "0: mvst %0,%1\n"
+ " lghi 0,0\n"
+ "0: mvst %[dst],%[src]\n"
" jo 0b"
- : "+&a" (dst), "+&a" (src) : "d" (r0)
- : "cc", "memory");
+ : [dst] "+&a" (dst), [src] "+&a" (src)
+ :
+ : "cc", "memory", "0");
return ret;
}
#endif
@@ -171,28 +177,33 @@ static inline char *strcpy(char *dst, const char *src)
#if defined(__HAVE_ARCH_STRLEN) || (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__))
static inline size_t __no_sanitize_prefix_strfunc(strlen)(const char *s)
{
- register unsigned long r0 asm("0") = 0;
+ unsigned long end = 0;
const char *tmp = s;
asm volatile(
- "0: srst %0,%1\n"
+ " lghi 0,0\n"
+ "0: srst %[end],%[tmp]\n"
" jo 0b"
- : "+d" (r0), "+a" (tmp) : : "cc", "memory");
- return r0 - (unsigned long) s;
+ : [end] "+&a" (end), [tmp] "+&a" (tmp)
+ :
+ : "cc", "memory", "0");
+ return end - (unsigned long)s;
}
#endif
#ifdef __HAVE_ARCH_STRNLEN
static inline size_t strnlen(const char * s, size_t n)
{
- register int r0 asm("0") = 0;
const char *tmp = s;
const char *end = s + n;
asm volatile(
- "0: srst %0,%1\n"
+ " lghi 0,0\n"
+ "0: srst %[end],%[tmp]\n"
" jo 0b"
- : "+a" (end), "+a" (tmp) : "d" (r0) : "cc", "memory");
+ : [end] "+&a" (end), [tmp] "+&a" (tmp)
+ :
+ : "cc", "memory", "0");
return end - s;
}
#endif
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index f6326c6d2abe..50d9b04ecbd1 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -75,9 +75,12 @@ static inline void set_clock_comparator(__u64 time)
static inline void set_tod_programmable_field(u16 val)
{
- register unsigned long reg0 asm("0") = val;
-
- asm volatile("sckpf" : : "d" (reg0));
+ asm volatile(
+ " lgr 0,%[val]\n"
+ " sckpf\n"
+ :
+ : [val] "d" ((unsigned long)val)
+ : "0");
}
void clock_comparator_work(void);
@@ -138,16 +141,19 @@ struct ptff_qui {
#define ptff(ptff_block, len, func) \
({ \
struct addrtype { char _[len]; }; \
- register unsigned int reg0 asm("0") = func; \
- register unsigned long reg1 asm("1") = (unsigned long) (ptff_block);\
+ unsigned int reg0 = func; \
+ unsigned long reg1 = (unsigned long)(ptff_block); \
int rc; \
\
asm volatile( \
- " .word 0x0104\n" \
- " ipm %0\n" \
- " srl %0,28\n" \
- : "=d" (rc), "+m" (*(struct addrtype *) reg1) \
- : "d" (reg0), "d" (reg1) : "cc"); \
+ " lgr 0,%[reg0]\n" \
+ " lgr 1,%[reg1]\n" \
+ " .insn e,0x0104\n" \
+ " ipm %[rc]\n" \
+ " srl %[rc],28\n" \
+ : [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1) \
+ : [reg0] "d" (reg0), [reg1] "d" (reg1) \
+ : "cc", "0", "1"); \
rc; \
})
diff --git a/arch/s390/include/asm/tpi.h b/arch/s390/include/asm/tpi.h
new file mode 100644
index 000000000000..1ac538b8cbf5
--- /dev/null
+++ b/arch/s390/include/asm/tpi.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_S390_TPI_H
+#define _ASM_S390_TPI_H
+
+#include <linux/types.h>
+#include <uapi/asm/schid.h>
+
+#ifndef __ASSEMBLY__
+
+/* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */
+struct tpi_info {
+ struct subchannel_id schid;
+ u32 intparm;
+ u32 adapter_IO:1;
+ u32 directed_irq:1;
+ u32 isc:3;
+ u32 :12;
+ u32 type:3;
+ u32 :12;
+} __packed __aligned(4);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_TPI_H */
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
new file mode 100644
index 000000000000..0b5d550a0478
--- /dev/null
+++ b/arch/s390/include/asm/types.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _ASM_S390_TYPES_H
+#define _ASM_S390_TYPES_H
+
+#include <uapi/asm/types.h>
+
+#ifndef __ASSEMBLY__
+
+union register_pair {
+ unsigned __int128 pair;
+ struct {
+ unsigned long even;
+ unsigned long odd;
+ };
+};
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_S390_TYPES_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 4756d2937e54..2316f2440881 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -49,52 +49,51 @@ int __get_user_bad(void) __attribute__((noreturn));
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
-#define __put_get_user_asm(to, from, size, spec) \
+#define __put_get_user_asm(to, from, size, insn) \
({ \
- register unsigned long __reg0 asm("0") = spec; \
int __rc; \
\
asm volatile( \
- "0: mvcos %1,%3,%2\n" \
- "1: xr %0,%0\n" \
+ insn " 0,%[spec]\n" \
+ "0: mvcos %[_to],%[_from],%[_size]\n" \
+ "1: xr %[rc],%[rc]\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
- "3: lhi %0,%5\n" \
+ "3: lhi %[rc],%[retval]\n" \
" jg 2b\n" \
".popsection\n" \
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
- : "=d" (__rc), "+Q" (*(to)) \
- : "d" (size), "Q" (*(from)), \
- "d" (__reg0), "K" (-EFAULT) \
- : "cc"); \
+ : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
+ : [_size] "d" (size), [_from] "Q" (*(from)), \
+ [retval] "K" (-EFAULT), [spec] "K" (0x81UL) \
+ : "cc", "0"); \
__rc; \
})
static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
- unsigned long spec = 0x810000UL;
int rc;
switch (size) {
case 1:
rc = __put_get_user_asm((unsigned char __user *)ptr,
(unsigned char *)x,
- size, spec);
+ size, "llilh");
break;
case 2:
rc = __put_get_user_asm((unsigned short __user *)ptr,
(unsigned short *)x,
- size, spec);
+ size, "llilh");
break;
case 4:
rc = __put_get_user_asm((unsigned int __user *)ptr,
(unsigned int *)x,
- size, spec);
+ size, "llilh");
break;
case 8:
rc = __put_get_user_asm((unsigned long __user *)ptr,
(unsigned long *)x,
- size, spec);
+ size, "llilh");
break;
default:
__put_user_bad();
@@ -105,29 +104,28 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon
static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
- unsigned long spec = 0x81UL;
int rc;
switch (size) {
case 1:
rc = __put_get_user_asm((unsigned char *)x,
(unsigned char __user *)ptr,
- size, spec);
+ size, "lghi");
break;
case 2:
rc = __put_get_user_asm((unsigned short *)x,
(unsigned short __user *)ptr,
- size, spec);
+ size, "lghi");
break;
case 4:
rc = __put_get_user_asm((unsigned int *)x,
(unsigned int __user *)ptr,
- size, spec);
+ size, "lghi");
break;
case 8:
rc = __put_get_user_asm((unsigned long *)x,
(unsigned long __user *)ptr,
- size, spec);
+ size, "lghi");
break;
default:
__get_user_bad();
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index 7b98d4caee77..12c5f006c136 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -73,6 +73,10 @@ enum uv_cmds_inst {
BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
};
+enum uv_feat_ind {
+ BIT_UV_FEAT_MISC = 0,
+};
+
struct uv_cb_header {
u16 len;
u16 cmd; /* Command Code */
@@ -97,7 +101,8 @@ struct uv_cb_qui {
u64 max_guest_stor_addr;
u8 reserved88[158 - 136];
u16 max_guest_cpu_id;
- u8 reserveda0[200 - 160];
+ u64 uv_feature_indications;
+ u8 reserveda0[200 - 168];
} __packed __aligned(8);
/* Initialize Ultravisor */
@@ -274,6 +279,7 @@ struct uv_info {
unsigned long max_sec_stor_addr;
unsigned int max_num_sec_conf;
unsigned short max_guest_cpu_id;
+ unsigned long uv_feature_indications;
};
extern struct uv_info uv_info;
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index b45e3dddd2c2..53165aa7813a 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -4,18 +4,31 @@
#include <vdso/datapage.h>
-/* Default link address for the vDSO */
-#define VDSO64_LBASE 0
+#ifndef __ASSEMBLY__
-#define __VVAR_PAGES 2
+#include <generated/vdso64-offsets.h>
+#ifdef CONFIG_COMPAT
+#include <generated/vdso32-offsets.h>
+#endif
-#define VDSO_VERSION_STRING LINUX_2.6.29
-
-#ifndef __ASSEMBLY__
+#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name))
+#ifdef CONFIG_COMPAT
+#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
+#else
+#define VDSO32_SYMBOL(tsk, name) (-1UL)
+#endif
extern struct vdso_data *vdso_data;
int vdso_getcpu_init(void);
#endif /* __ASSEMBLY__ */
+
+/* Default link address for the vDSO */
+#define VDSO_LBASE 0
+
+#define __VVAR_PAGES 2
+
+#define VDSO_VERSION_STRING LINUX_2.6.29
+
#endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/include/asm/vdso/gettimeofday.h b/arch/s390/include/asm/vdso/gettimeofday.h
index 383c53c3dddd..d6465b22ffe3 100644
--- a/arch/s390/include/asm/vdso/gettimeofday.h
+++ b/arch/s390/include/asm/vdso/gettimeofday.h
@@ -8,7 +8,6 @@
#include <asm/timex.h>
#include <asm/unistd.h>
-#include <asm/vdso.h>
#include <linux/compiler.h>
#define vdso_calc_delta __arch_vdso_calc_delta
diff --git a/arch/s390/include/uapi/asm/schid.h b/arch/s390/include/uapi/asm/schid.h
index 58fca6f48410..a3e1cf168553 100644
--- a/arch/s390/include/uapi/asm/schid.h
+++ b/arch/s390/include/uapi/asm/schid.h
@@ -4,6 +4,8 @@
#include <linux/types.h>
+#ifndef __ASSEMBLY__
+
struct subchannel_id {
__u32 cssid : 8;
__u32 : 4;
@@ -13,5 +15,6 @@ struct subchannel_id {
__u32 sch_no : 16;
} __attribute__ ((packed, aligned(4)));
+#endif /* __ASSEMBLY__ */
#endif /* _UAPIASM_SCHID_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 68ca1834316f..4a44ba5a2d73 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -71,10 +71,10 @@ obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
-obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_diag.o
obj-$(CONFIG_TRACEPOINTS) += trace.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
# vdso
obj-y += vdso64/
+obj-$(CONFIG_COMPAT) += vdso32/
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
index 8e1f2aee85ef..c22ea1c3ef84 100644
--- a/arch/s390/kernel/alternative.c
+++ b/arch/s390/kernel/alternative.c
@@ -76,8 +76,7 @@ static void __init_or_module __apply_alternatives(struct alt_instr *start,
instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
- if (!__test_facility(a->facility,
- S390_lowcore.alt_stfle_fac_list))
+ if (!__test_facility(a->facility, alt_stfle_fac_list))
continue;
if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 15e637728a4b..77ff2130cb04 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -14,7 +14,6 @@
#include <linux/pgtable.h>
#include <asm/idle.h>
#include <asm/gmap.h>
-#include <asm/nmi.h>
#include <asm/stacktrace.h>
int main(void)
@@ -58,8 +57,6 @@ int main(void)
OFFSET(__LC_EXT_PARAMS, lowcore, ext_params);
OFFSET(__LC_EXT_CPU_ADDR, lowcore, ext_cpu_addr);
OFFSET(__LC_EXT_INT_CODE, lowcore, ext_int_code);
- OFFSET(__LC_SVC_ILC, lowcore, svc_ilc);
- OFFSET(__LC_SVC_INT_CODE, lowcore, svc_code);
OFFSET(__LC_PGM_ILC, lowcore, pgm_ilc);
OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_code);
OFFSET(__LC_DATA_EXC_CODE, lowcore, data_exc_code);
@@ -77,8 +74,6 @@ int main(void)
OFFSET(__LC_SUBCHANNEL_NR, lowcore, subchannel_nr);
OFFSET(__LC_IO_INT_PARM, lowcore, io_int_parm);
OFFSET(__LC_IO_INT_WORD, lowcore, io_int_word);
- OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list);
- OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list);
OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
@@ -111,7 +106,6 @@ int main(void)
OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
- OFFSET(__LC_CLOCK_COMPARATOR, lowcore, clock_comparator);
OFFSET(__LC_BOOT_CLOCK, lowcore, boot_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
@@ -148,9 +142,6 @@ int main(void)
OFFSET(__LC_CREGS_SAVE_AREA, lowcore, cregs_save_area);
OFFSET(__LC_PGM_TDB, lowcore, pgm_tdb);
BLANK();
- /* extended machine check save area */
- OFFSET(__MCESA_GS_SAVE_AREA, mcesa, guarded_storage_save_area);
- BLANK();
/* gmap/sie offsets */
OFFSET(__GMAP_ASCE, gmap, asce);
OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c);
@@ -159,5 +150,7 @@ int main(void)
OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start);
OFFSET(__KEXEC_SHA_REGION_LEN, kexec_sha_region, len);
DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region));
+ /* sizeof kernel parameter area */
+ DEFINE(__PARMAREA_SIZE, sizeof(struct parmarea));
return 0;
}
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 1d0e17ec93eb..cca142fbb516 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -28,6 +28,7 @@
#include <linux/uaccess.h>
#include <asm/lowcore.h>
#include <asm/switch_to.h>
+#include <asm/vdso.h>
#include "compat_linux.h"
#include "compat_ptrace.h"
#include "entry.h"
@@ -118,7 +119,6 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, &current->thread.fpu);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
- clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
return 0;
}
@@ -304,11 +304,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
restorer = (unsigned long __force)
ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
} else {
- /* Signal frames without vectors registers are short ! */
- __u16 __user *svc = (void __user *) frame + frame_size - 2;
- if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
- return -EFAULT;
- restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
+ restorer = VDSO32_SYMBOL(current, sigreturn);
}
/* Set up registers for signal handler */
@@ -371,10 +367,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
restorer = (unsigned long __force)
ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
} else {
- __u16 __user *svc = &frame->svc_insn;
- if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
- return -EFAULT;
- restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
+ restorer = VDSO32_SYMBOL(current, rt_sigreturn);
}
/* Create siginfo on the signal stack */
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 2da027359798..54efc279f54e 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -26,33 +26,35 @@ static char cpcmd_buf[241];
static int diag8_noresponse(int cmdlen)
{
- register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
- register unsigned long reg3 asm ("3") = cmdlen;
-
asm volatile(
- " diag %1,%0,0x8\n"
- : "+d" (reg3) : "d" (reg2) : "cc");
- return reg3;
+ " diag %[rx],%[ry],0x8\n"
+ : [ry] "+&d" (cmdlen)
+ : [rx] "d" ((addr_t) cpcmd_buf)
+ : "cc");
+ return cmdlen;
}
static int diag8_response(int cmdlen, char *response, int *rlen)
{
- unsigned long _cmdlen = cmdlen | 0x40000000L;
- unsigned long _rlen = *rlen;
- register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
- register unsigned long reg3 asm ("3") = (addr_t) response;
- register unsigned long reg4 asm ("4") = _cmdlen;
- register unsigned long reg5 asm ("5") = _rlen;
+ union register_pair rx, ry;
+ int cc;
+ rx.even = (addr_t) cpcmd_buf;
+ rx.odd = (addr_t) response;
+ ry.even = cmdlen | 0x40000000L;
+ ry.odd = *rlen;
asm volatile(
- " diag %2,%0,0x8\n"
- " brc 8,1f\n"
- " agr %1,%4\n"
- "1:\n"
- : "+d" (reg4), "+d" (reg5)
- : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
- *rlen = reg5;
- return reg4;
+ " diag %[rx],%[ry],0x8\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), [ry] "+&d" (ry.pair)
+ : [rx] "d" (rx.pair)
+ : "cc");
+ if (cc)
+ *rlen += ry.odd;
+ else
+ *rlen = ry.odd;
+ return ry.even;
}
/*
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index bb958d32bd81..09b6c6402f9b 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1418,7 +1418,7 @@ int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
else
except_str = "-";
caller = (unsigned long) entry->caller;
- rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %04u %pK ",
+ rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %04u %px ",
area, sec, usec, level, except_str,
entry->cpu, (void *)caller);
return rc;
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index b8b0cd7b008f..a3f47464c3f1 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -141,16 +141,15 @@ EXPORT_SYMBOL(diag14);
static inline int __diag204(unsigned long *subcode, unsigned long size, void *addr)
{
- register unsigned long _subcode asm("0") = *subcode;
- register unsigned long _size asm("1") = size;
+ union register_pair rp = { .even = *subcode, .odd = size };
asm volatile(
- " diag %2,%0,0x204\n"
+ " diag %[addr],%[rp],0x204\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
- : "+d" (_subcode), "+d" (_size) : "d" (addr) : "memory");
- *subcode = _subcode;
- return _size;
+ : [rp] "+&d" (rp.pair) : [addr] "d" (addr) : "memory");
+ *subcode = rp.even;
+ return rp.odd;
}
int diag204(unsigned long subcode, unsigned long size, void *addr)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index a361d2e70025..fb84e3fc1686 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -33,6 +33,8 @@
#include <asm/switch_to.h>
#include "entry.h"
+int __bootdata(is_full_image);
+
static void __init reset_tod_clock(void)
{
union tod_clock clk;
@@ -180,11 +182,9 @@ static noinline __init void setup_lowcore_early(void)
static noinline __init void setup_facility_list(void)
{
- memcpy(S390_lowcore.alt_stfle_fac_list,
- S390_lowcore.stfle_fac_list,
- sizeof(S390_lowcore.alt_stfle_fac_list));
+ memcpy(alt_stfle_fac_list, stfle_fac_list, sizeof(alt_stfle_fac_list));
if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __clear_facility(82, alt_stfle_fac_list);
}
static __init void detect_diag9c(void)
@@ -281,7 +281,7 @@ static void __init setup_boot_command_line(void)
static void __init check_image_bootable(void)
{
- if (!memcmp(EP_STRING, (void *)EP_OFFSET, strlen(EP_STRING)))
+ if (is_full_image)
return;
sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index e84f495e7eb2..5a2f70cbd3a9 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -14,7 +14,6 @@
#include <asm/alternative-asm.h>
#include <asm/processor.h>
#include <asm/cache.h>
-#include <asm/ctl_reg.h>
#include <asm/dwarf.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
@@ -129,6 +128,52 @@ _LPP_OFFSET = __LC_LPP
"jnz .+8; .long 0xb2e8d000", 82
.endm
+ /*
+ * The CHKSTG macro jumps to the provided label in case the
+ * machine check interruption code reports one of unrecoverable
+ * storage errors:
+ * - Storage error uncorrected
+ * - Storage key error uncorrected
+ * - Storage degradation with Failing-storage-address validity
+ */
+ .macro CHKSTG errlabel
+ TSTMSK __LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR)
+ jnz \errlabel
+ TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD
+ jz oklabel\@
+ TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR
+ jnz \errlabel
+oklabel\@:
+ .endm
+
+#if IS_ENABLED(CONFIG_KVM)
+ /*
+ * The OUTSIDE macro jumps to the provided label in case the value
+ * in the provided register is outside of the provided range. The
+ * macro is useful for checking whether a PSW stored in a register
+ * pair points inside or outside of a block of instructions.
+ * @reg: register to check
+ * @start: start of the range
+ * @end: end of the range
+ * @outside_label: jump here if @reg is outside of [@start..@end)
+ */
+ .macro OUTSIDE reg,start,end,outside_label
+ lgr %r14,\reg
+ larl %r13,\start
+ slgr %r14,%r13
+ lghi %r13,\end - \start
+ clgr %r14,%r13
+ jhe \outside_label
+ .endm
+
+ .macro SIEEXIT
+ lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
+ ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
+ .endm
+#endif
+
GEN_BR_THUNK %r14
GEN_BR_THUNK %r14,%r13
@@ -214,7 +259,6 @@ ENTRY(sie64a)
# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
# Other instructions between sie64a and .Lsie_done should not cause program
# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
-# See also .Lcleanup_sie_mcck/.Lcleanup_sie_int
.Lrewind_pad6:
nopr 7
.Lrewind_pad4:
@@ -276,6 +320,7 @@ ENTRY(system_call)
xgr %r10,%r10
xgr %r11,%r11
la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
+ mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
lgr %r3,%r14
brasl %r14,__do_syscall
lctlg %c1,%c1,__LC_USER_ASCE
@@ -318,16 +363,8 @@ ENTRY(pgm_check_handler)
.Lpgm_skip_asce:
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in sie64a
- lgr %r14,%r9
- larl %r13,.Lsie_gmap
- slgr %r14,%r13
- lghi %r13,.Lsie_done - .Lsie_gmap
- clgr %r14,%r13
- jhe 1f
- lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
- ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
- larl %r9,sie_exit # skip forward to sie_exit
+ OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
+ SIEEXIT
lghi %r10,_PIF_GUEST_FAULT
#endif
1: tmhh %r8,0x4000 # PER bit set in old PSW ?
@@ -392,13 +429,9 @@ ENTRY(\name)
tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
#if IS_ENABLED(CONFIG_KVM)
- lgr %r14,%r9
- larl %r13,.Lsie_gmap
- slgr %r14,%r13
- lghi %r13,.Lsie_done - .Lsie_gmap
- clgr %r14,%r13
- jhe 0f
- brasl %r14,.Lcleanup_sie_int
+ OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f
+ BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ SIEEXIT
#endif
0: CHECK_STACK __LC_SAVE_AREA_ASYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
@@ -472,8 +505,6 @@ ENTRY(mcck_int_handler)
BPOFF
la %r1,4095 # validate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
- sckc __LC_CLOCK_COMPARATOR # validate comparator
- lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
lg %r12,__LC_CURRENT
lmg %r8,%r9,__LC_MCK_OLD_PSW
@@ -484,41 +515,7 @@ ENTRY(mcck_int_handler)
la %r14,4095
lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
ptlb
- lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area
- nill %r11,0xfc00 # MCESA_ORIGIN_MASK
- TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
- jno 0f
- TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID
- jno 0f
- .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
-0: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
- TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID
- jo 0f
- sr %r14,%r14
-0: sfpc %r14
- TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
- jo 0f
- lghi %r14,__LC_FPREGS_SAVE_AREA
- ld %f0,0(%r14)
- ld %f1,8(%r14)
- ld %f2,16(%r14)
- ld %f3,24(%r14)
- ld %f4,32(%r14)
- ld %f5,40(%r14)
- ld %f6,48(%r14)
- ld %f7,56(%r14)
- ld %f8,64(%r14)
- ld %f9,72(%r14)
- ld %f10,80(%r14)
- ld %f11,88(%r14)
- ld %f12,96(%r14)
- ld %f13,104(%r14)
- ld %f14,112(%r14)
- ld %f15,120(%r14)
- j 1f
-0: VLM %v0,%v15,0,%r11
- VLM %v16,%v31,256,%r11
-1: lghi %r14,__LC_CPU_TIMER_SAVE_AREA
+ lghi %r14,__LC_CPU_TIMER_SAVE_AREA
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
jo 3f
@@ -534,27 +531,29 @@ ENTRY(mcck_int_handler)
3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
jno .Lmcck_panic
tmhh %r8,0x0001 # interrupting from user ?
- jnz 4f
+ jnz 6f
TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
jno .Lmcck_panic
-4: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
- tmhh %r8,0x0001 # interrupting from user ?
- jnz .Lmcck_user
#if IS_ENABLED(CONFIG_KVM)
- lgr %r14,%r9
- larl %r13,.Lsie_gmap
- slgr %r14,%r13
- lghi %r13,.Lsie_done - .Lsie_gmap
- clgr %r14,%r13
- jhe .Lmcck_stack
- brasl %r14,.Lcleanup_sie_mcck
-#endif
+ OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f
+ OUTSIDE %r9,.Lsie_entry,.Lsie_skip,4f
+ oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
+ j 5f
+4: CHKSTG .Lmcck_panic
+5: larl %r14,.Lstosm_tmp
+ stosm 0(%r14),0x04 # turn dat on, keep irqs off
+ BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ SIEEXIT
j .Lmcck_stack
-.Lmcck_user:
+#endif
+6: CHKSTG .Lmcck_panic
+ larl %r14,.Lstosm_tmp
+ stosm 0(%r14),0x04 # turn dat on, keep irqs off
+ tmhh %r8,0x0001 # interrupting from user ?
+ jz .Lmcck_stack
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
.Lmcck_stack:
lg %r15,__LC_MCCK_STACK
-.Lmcck_skip:
la %r11,STACK_FRAME_OVERHEAD(%r15)
stctg %c1,%c1,__PT_CR1(%r11)
lctlg %c1,%c1,__LC_KERNEL_ASCE
@@ -596,8 +595,33 @@ ENTRY(mcck_int_handler)
b __LC_RETURN_MCCK_LPSWE
.Lmcck_panic:
- lg %r15,__LC_NODAT_STACK
- j .Lmcck_skip
+ /*
+ * Iterate over all possible CPU addresses in the range 0..0xffff
+ * and stop each CPU using signal processor. Use compare and swap
+ * to allow just one CPU-stopper and prevent concurrent CPUs from
+ * stopping each other while leaving the others running.
+ */
+ lhi %r5,0
+ lhi %r6,1
+ larl %r7,.Lstop_lock
+ cs %r5,%r6,0(%r7) # single CPU-stopper only
+ jnz 4f
+ larl %r7,.Lthis_cpu
+ stap 0(%r7) # this CPU address
+ lh %r4,0(%r7)
+ nilh %r4,0
+ lhi %r0,1
+ sll %r0,16 # CPU counter
+ lhi %r3,0 # next CPU address
+0: cr %r3,%r4
+ je 2f
+1: sigp %r1,%r3,SIGP_STOP # stop next CPU
+ brc SIGP_CC_BUSY,1b
+2: ahi %r3,1
+ brct %r0,0b
+3: sigp %r1,%r4,SIGP_STOP # stop this CPU
+ brc SIGP_CC_BUSY,3b
+4: j 4b
ENDPROC(mcck_int_handler)
#
@@ -648,23 +672,11 @@ ENTRY(stack_overflow)
ENDPROC(stack_overflow)
#endif
-#if IS_ENABLED(CONFIG_KVM)
-.Lcleanup_sie_mcck:
- larl %r13,.Lsie_entry
- slgr %r9,%r13
- lghi %r13,.Lsie_skip - .Lsie_entry
- clgr %r9,%r13
- jhe .Lcleanup_sie_int
- oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
-.Lcleanup_sie_int:
- BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
- lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
- ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_KERNEL_ASCE
- larl %r9,sie_exit # skip forward to sie_exit
- BR_EX %r14,%r13
-
-#endif
+ .section .data, "aw"
+ .align 4
+.Lstop_lock: .long 0
+.Lthis_cpu: .short 0
+.Lstosm_tmp: .byte 0
.section .rodata, "a"
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
.globl sys_call_table
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index c6ddeb5029b4..2d8f595d9196 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -40,6 +40,7 @@
* trampoline (ftrace_plt), which clobbers also r1.
*/
+void *ftrace_func __read_mostly = ftrace_stub;
unsigned long ftrace_plt;
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
@@ -85,6 +86,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
int ftrace_update_ftrace_func(ftrace_func_t func)
{
+ ftrace_func = func;
return 0;
}
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index dba04fbc37a2..50e2c21e0ec9 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/ctype.h>
#include <linux/fs.h>
@@ -162,16 +163,18 @@ static bool reipl_ccw_clear;
static inline int __diag308(unsigned long subcode, void *addr)
{
- register unsigned long _addr asm("0") = (unsigned long) addr;
- register unsigned long _rc asm("1") = 0;
+ union register_pair r1;
+ r1.even = (unsigned long) addr;
+ r1.odd = 0;
asm volatile(
- " diag %0,%2,0x308\n"
+ " diag %[r1],%[subcode],0x308\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
- : "+d" (_addr), "+d" (_rc)
- : "d" (subcode) : "cc", "memory");
- return _rc;
+ : [r1] "+&d" (r1.pair)
+ : [subcode] "d" (subcode)
+ : "cc", "memory");
+ return r1.odd;
}
int diag308(unsigned long subcode, void *addr)
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 714269e10eec..234d085257eb 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -110,15 +110,17 @@ static int on_async_stack(void)
{
unsigned long frame = current_frame_address();
- return !!!((S390_lowcore.async_stack - frame) >> (PAGE_SHIFT + THREAD_SIZE_ORDER));
+ return ((S390_lowcore.async_stack ^ frame) & ~(THREAD_SIZE - 1)) == 0;
}
static void do_irq_async(struct pt_regs *regs, int irq)
{
- if (on_async_stack())
+ if (on_async_stack()) {
do_IRQ(regs, irq);
- else
- CALL_ON_STACK(do_IRQ, S390_lowcore.async_stack, 2, regs, irq);
+ } else {
+ call_on_stack(2, S390_lowcore.async_stack, void, do_IRQ,
+ struct pt_regs *, regs, int, irq);
+ }
}
static int irq_pending(struct pt_regs *regs)
@@ -146,8 +148,8 @@ void noinstr do_io_irq(struct pt_regs *regs)
account_idle_time_irq();
do {
- memcpy(&regs->int_code, &S390_lowcore.subchannel_id, 12);
- if (S390_lowcore.io_int_word & BIT(31))
+ regs->tpi_info = S390_lowcore.tpi_info;
+ if (S390_lowcore.tpi_info.adapter_IO)
do_irq_async(regs, THIN_INTERRUPT);
else
do_irq_async(regs, IO_INTERRUPT);
@@ -172,7 +174,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
if (user_mode(regs))
update_timer_sys();
- memcpy(&regs->int_code, &S390_lowcore.ext_cpu_addr, 4);
+ regs->int_code = S390_lowcore.ext_int_code_addr;
regs->int_parm = S390_lowcore.ext_params;
regs->int_parm_long = S390_lowcore.ext_params2;
@@ -266,24 +268,6 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
}
/*
- * Switch to the asynchronous interrupt stack for softirq execution.
- */
-void do_softirq_own_stack(void)
-{
- unsigned long old, new;
-
- old = current_stack_pointer();
- /* Check against async. stack address range. */
- new = S390_lowcore.async_stack;
- if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
- CALL_ON_STACK(__do_softirq, new, 0);
- } else {
- /* We are already on the async stack. */
- __do_softirq();
- }
-}
-
-/*
* ext_int_hash[index] is the list head for all external interrupts that hash
* to this index.
*/
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 74b0bd2c24d4..52d056a5f89f 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -44,11 +44,6 @@ void *alloc_insn_page(void)
return page;
}
-void free_insn_page(void *page)
-{
- module_memfree(page);
-}
-
static void *alloc_s390_insn_page(void)
{
if (xchg(&insn_page_in_use, 1) == 1)
@@ -97,11 +92,6 @@ static void copy_instruction(struct kprobe *p)
}
NOKPROBE_SYMBOL(copy_instruction);
-static inline int is_kernel_addr(void *addr)
-{
- return addr < (void *)_end;
-}
-
static int s390_get_insn_slot(struct kprobe *p)
{
/*
@@ -110,7 +100,7 @@ static int s390_get_insn_slot(struct kprobe *p)
* field can be patched and executed within the insn slot.
*/
p->ainsn.insn = NULL;
- if (is_kernel_addr(p->addr))
+ if (is_kernel((unsigned long)p->addr))
p->ainsn.insn = get_s390_insn_slot();
else if (is_module_addr(p->addr))
p->ainsn.insn = get_insn_slot();
@@ -122,7 +112,7 @@ static void s390_free_insn_slot(struct kprobe *p)
{
if (!p->ainsn.insn)
return;
- if (is_kernel_addr(p->addr))
+ if (is_kernel((unsigned long)p->addr))
free_s390_insn_slot(p->ainsn.insn, 0);
else
free_insn_slot(p->ainsn.insn, 0);
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index d91989c7bd6a..1005a6935fbe 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -132,7 +132,8 @@ static bool kdump_csum_valid(struct kimage *image)
int rc;
preempt_disable();
- rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image);
+ rc = call_on_stack(1, S390_lowcore.nodat_stack, unsigned long, do_start_kdump,
+ unsigned long, (unsigned long)image);
preempt_enable();
return rc == 0;
#else
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index faf64c2f90f5..6b13797143a7 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -59,13 +59,13 @@ ENTRY(ftrace_caller)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
aghik %r2,%r0,-MCOUNT_INSN_SIZE
lgrl %r4,function_trace_op
- lgrl %r1,ftrace_trace_function
+ lgrl %r1,ftrace_func
#else
lgr %r2,%r0
aghi %r2,-MCOUNT_INSN_SIZE
larl %r4,function_trace_op
lg %r4,0(%r4)
- larl %r1,ftrace_trace_function
+ larl %r1,ftrace_func
lg %r1,0(%r1)
#endif
lgr %r3,%r14
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 11f8c296f60d..20f8e1868853 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -189,12 +189,16 @@ void noinstr s390_handle_mcck(void)
* returns 0 if all required registers are available
* returns 1 otherwise
*/
-static int notrace s390_check_registers(union mci mci, int umode)
+static int notrace s390_validate_registers(union mci mci, int umode)
{
+ struct mcesa *mcesa;
+ void *fpt_save_area;
union ctlreg2 cr2;
int kill_task;
+ u64 zero;
kill_task = 0;
+ zero = 0;
if (!mci.gr) {
/*
@@ -205,14 +209,6 @@ static int notrace s390_check_registers(union mci mci, int umode)
s390_handle_damage();
kill_task = 1;
}
- /* Check control registers */
- if (!mci.cr) {
- /*
- * Control registers have unknown contents.
- * Can't recover and therefore stopping machine.
- */
- s390_handle_damage();
- }
if (!mci.fp) {
/*
* Floating point registers can't be restored. If the
@@ -225,35 +221,89 @@ static int notrace s390_check_registers(union mci mci, int umode)
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
}
+ fpt_save_area = &S390_lowcore.floating_pt_save_area;
if (!mci.fc) {
/*
* Floating point control register can't be restored.
* If the kernel currently uses the floating pointer
* registers and needs the FPC register the system is
* stopped. If the process has its floating pointer
- * registers loaded it is terminated.
+ * registers loaded it is terminated. Otherwise the
+ * FPC is just validated.
*/
if (S390_lowcore.fpu_flags & KERNEL_FPC)
s390_handle_damage();
+ asm volatile(
+ " lfpc %0\n"
+ :
+ : "Q" (zero));
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
+ } else {
+ asm volatile(
+ " lfpc %0\n"
+ :
+ : "Q" (S390_lowcore.fpt_creg_save_area));
}
- if (MACHINE_HAS_VX) {
+ mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
+ if (!MACHINE_HAS_VX) {
+ /* Validate floating point registers */
+ asm volatile(
+ " ld 0,0(%0)\n"
+ " ld 1,8(%0)\n"
+ " ld 2,16(%0)\n"
+ " ld 3,24(%0)\n"
+ " ld 4,32(%0)\n"
+ " ld 5,40(%0)\n"
+ " ld 6,48(%0)\n"
+ " ld 7,56(%0)\n"
+ " ld 8,64(%0)\n"
+ " ld 9,72(%0)\n"
+ " ld 10,80(%0)\n"
+ " ld 11,88(%0)\n"
+ " ld 12,96(%0)\n"
+ " ld 13,104(%0)\n"
+ " ld 14,112(%0)\n"
+ " ld 15,120(%0)\n"
+ :
+ : "a" (fpt_save_area)
+ : "memory");
+ } else {
+ /* Validate vector registers */
+ union ctlreg0 cr0;
+
if (!mci.vr) {
/*
* Vector registers can't be restored. If the kernel
* currently uses vector registers the system is
* stopped. If the process has its vector registers
- * loaded it is terminated.
+ * loaded it is terminated. Otherwise just validate
+ * the registers.
*/
if (S390_lowcore.fpu_flags & KERNEL_VXR)
s390_handle_damage();
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
}
+ cr0.val = S390_lowcore.cregs_save_area[0];
+ cr0.afp = cr0.vx = 1;
+ __ctl_load(cr0.val, 0, 0);
+ asm volatile(
+ " la 1,%0\n"
+ " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
+ " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
+ :
+ : "Q" (*(struct vx_array *)mcesa->vector_save_area)
+ : "1");
+ __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
}
- /* Check if access registers are valid */
+ /* Validate access registers */
+ asm volatile(
+ " lam 0,15,0(%0)\n"
+ :
+ : "a" (&S390_lowcore.access_regs_save_area)
+ : "memory");
if (!mci.ar) {
/*
* Access registers have unknown contents.
@@ -261,7 +311,7 @@ static int notrace s390_check_registers(union mci mci, int umode)
*/
kill_task = 1;
}
- /* Check guarded storage registers */
+ /* Validate guarded storage registers */
cr2.val = S390_lowcore.cregs_save_area[2];
if (cr2.gse) {
if (!mci.gs) {
@@ -271,31 +321,26 @@ static int notrace s390_check_registers(union mci mci, int umode)
* It has to be terminated.
*/
kill_task = 1;
+ } else {
+ load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area);
}
}
- /* Check if old PSW is valid */
- if (!mci.wp) {
- /*
- * Can't tell if we come from user or kernel mode
- * -> stopping machine.
- */
- s390_handle_damage();
- }
- /* Check for invalid kernel instruction address */
- if (!mci.ia && !umode) {
- /*
- * The instruction address got lost while running
- * in the kernel -> stopping machine.
- */
- s390_handle_damage();
- }
+ /*
+ * The getcpu vdso syscall reads CPU number from the programmable
+ * field of the TOD clock. Disregard the TOD programmable register
+ * validity bit and load the CPU number into the TOD programmable
+ * field unconditionally.
+ */
+ set_tod_programmable_field(raw_smp_processor_id());
+ /* Validate clock comparator register */
+ set_clock_comparator(S390_lowcore.clock_comparator);
if (!mci.ms || !mci.pm || !mci.ia)
kill_task = 1;
return kill_task;
}
-NOKPROBE_SYMBOL(s390_check_registers);
+NOKPROBE_SYMBOL(s390_validate_registers);
/*
* Backup the guest's machine check info to its description block
@@ -353,11 +398,6 @@ int notrace s390_do_machine_check(struct pt_regs *regs)
mci.val = S390_lowcore.mcck_interruption_code;
mcck = this_cpu_ptr(&cpu_mcck);
- if (mci.sd) {
- /* System damage -> stopping machine */
- s390_handle_damage();
- }
-
/*
* Reinject the instruction processing damages' machine checks
* including Delayed Access Exception into the guest
@@ -398,7 +438,7 @@ int notrace s390_do_machine_check(struct pt_regs *regs)
s390_handle_damage();
}
}
- if (s390_check_registers(mci, user_mode(regs))) {
+ if (s390_validate_registers(mci, user_mode(regs))) {
/*
* Couldn't restore all register contents for the
* user space process -> mark task for termination.
@@ -428,21 +468,6 @@ int notrace s390_do_machine_check(struct pt_regs *regs)
mcck_pending = 1;
}
- /*
- * Reinject storage related machine checks into the guest if they
- * happen when the guest is running.
- */
- if (!test_cpu_flag(CIF_MCCK_GUEST)) {
- if (mci.se)
- /* Storage error uncorrected */
- s390_handle_damage();
- if (mci.ke)
- /* Storage key-error uncorrected */
- s390_handle_damage();
- if (mci.ds && mci.fa)
- /* Storage degradation */
- s390_handle_damage();
- }
if (mci.cp) {
/* Channel report word pending */
mcck->channel_report = 1;
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 29e511f5bf06..250e4dbf653c 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -17,11 +17,11 @@ static int __init nobp_setup_early(char *str)
* The user explicitely requested nobp=1, enable it and
* disable the expoline support.
*/
- __set_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __set_facility(82, alt_stfle_fac_list);
if (IS_ENABLED(CONFIG_EXPOLINE))
nospec_disable = 1;
} else {
- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __clear_facility(82, alt_stfle_fac_list);
}
return 0;
}
@@ -29,7 +29,7 @@ early_param("nobp", nobp_setup_early);
static int __init nospec_setup_early(char *str)
{
- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __clear_facility(82, alt_stfle_fac_list);
return 0;
}
early_param("nospec", nospec_setup_early);
@@ -40,7 +40,7 @@ static int __init nospec_report(void)
pr_info("Spectre V2 mitigation: etokens\n");
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
pr_info("Spectre V2 mitigation: execute trampolines\n");
- if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+ if (__test_facility(82, alt_stfle_fac_list))
pr_info("Spectre V2 mitigation: limited branch prediction\n");
return 0;
}
@@ -66,14 +66,14 @@ void __init nospec_auto_detect(void)
*/
if (__is_defined(CC_USING_EXPOLINE))
nospec_disable = 1;
- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __clear_facility(82, alt_stfle_fac_list);
} else if (__is_defined(CC_USING_EXPOLINE)) {
/*
* The kernel has been compiled with expolines.
* Keep expolines enabled and disable nobp.
*/
nospec_disable = 0;
- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __clear_facility(82, alt_stfle_fac_list);
}
/*
* If the kernel has not been compiled with expolines the
@@ -86,7 +86,7 @@ static int __init spectre_v2_setup_early(char *str)
{
if (str && !strncmp(str, "on", 2)) {
nospec_disable = 0;
- __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ __clear_facility(82, alt_stfle_fac_list);
}
if (str && !strncmp(str, "off", 3))
nospec_disable = 1;
@@ -99,6 +99,7 @@ early_param("spectre_v2", spectre_v2_setup_early);
static void __init_or_module __nospec_revert(s32 *start, s32 *end)
{
enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
+ static const u8 branch[] = { 0x47, 0x00, 0x07, 0x00 };
u8 *instr, *thunk, *br;
u8 insnbuf[6];
s32 *epo;
@@ -128,7 +129,7 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
continue;
- memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
+ memcpy(insnbuf + 2, branch, sizeof(branch));
switch (type) {
case BRCL_EXPOLINE:
insnbuf[0] = br[0];
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
index 48f472bf9290..b4b5c8c21166 100644
--- a/arch/s390/kernel/nospec-sysfs.c
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -17,7 +17,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
return sprintf(buf, "Mitigation: etokens\n");
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
return sprintf(buf, "Mitigation: execute trampolines\n");
- if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+ if (__test_facility(82, alt_stfle_fac_list))
return sprintf(buf, "Mitigation: limited branch prediction\n");
return sprintf(buf, "Vulnerable\n");
}
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 31a605bcbc6e..d7dc36ec0a60 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -2,8 +2,9 @@
/*
* Performance event support for s390x - CPU-measurement Counter Facility
*
- * Copyright IBM Corp. 2012, 2019
+ * Copyright IBM Corp. 2012, 2021
* Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
+ * Thomas Richter <tmricht@linux.ibm.com>
*/
#define KMSG_COMPONENT "cpum_cf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
@@ -14,7 +15,223 @@
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/export.h>
+#include <linux/miscdevice.h>
+
#include <asm/cpu_mcf.h>
+#include <asm/hwctrset.h>
+#include <asm/debug.h>
+
+static unsigned int cfdiag_cpu_speed; /* CPU speed for CF_DIAG trailer */
+static debug_info_t *cf_dbg;
+
+#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
+ /* interval in seconds */
+
+/* Counter sets are stored as data stream in a page sized memory buffer and
+ * exported to user space via raw data attached to the event sample data.
+ * Each counter set starts with an eight byte header consisting of:
+ * - a two byte eye catcher (0xfeef)
+ * - a one byte counter set number
+ * - a two byte counter set size (indicates the number of counters in this set)
+ * - a three byte reserved value (must be zero) to make the header the same
+ * size as a counter value.
+ * All counter values are eight byte in size.
+ *
+ * All counter sets are followed by a 64 byte trailer.
+ * The trailer consists of a:
+ * - flag field indicating valid fields when corresponding bit set
+ * - the counter facility first and second version number
+ * - the CPU speed if nonzero
+ * - the time stamp the counter sets have been collected
+ * - the time of day (TOD) base value
+ * - the machine type.
+ *
+ * The counter sets are saved when the process is prepared to be executed on a
+ * CPU and saved again when the process is going to be removed from a CPU.
+ * The difference of both counter sets are calculated and stored in the event
+ * sample data area.
+ */
+struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
+ unsigned int def:16; /* 0-15 Data Entry Format */
+ unsigned int set:16; /* 16-31 Counter set identifier */
+ unsigned int ctr:16; /* 32-47 Number of stored counters */
+ unsigned int res1:16; /* 48-63 Reserved */
+};
+
+struct cf_trailer_entry { /* CPU-M CF_DIAG trailer (64 byte) */
+ /* 0 - 7 */
+ union {
+ struct {
+ unsigned int clock_base:1; /* TOD clock base set */
+ unsigned int speed:1; /* CPU speed set */
+ /* Measurement alerts */
+ unsigned int mtda:1; /* Loss of MT ctr. data alert */
+ unsigned int caca:1; /* Counter auth. change alert */
+ unsigned int lcda:1; /* Loss of counter data alert */
+ };
+ unsigned long flags; /* 0-63 All indicators */
+ };
+ /* 8 - 15 */
+ unsigned int cfvn:16; /* 64-79 Ctr First Version */
+ unsigned int csvn:16; /* 80-95 Ctr Second Version */
+ unsigned int cpu_speed:32; /* 96-127 CPU speed */
+ /* 16 - 23 */
+ unsigned long timestamp; /* 128-191 Timestamp (TOD) */
+ /* 24 - 55 */
+ union {
+ struct {
+ unsigned long progusage1;
+ unsigned long progusage2;
+ unsigned long progusage3;
+ unsigned long tod_base;
+ };
+ unsigned long progusage[4];
+ };
+ /* 56 - 63 */
+ unsigned int mach_type:16; /* Machine type */
+ unsigned int res1:16; /* Reserved */
+ unsigned int res2:32; /* Reserved */
+};
+
+/* Create the trailer data at the end of a page. */
+static void cfdiag_trailer(struct cf_trailer_entry *te)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct cpuid cpuid;
+
+ te->cfvn = cpuhw->info.cfvn; /* Counter version numbers */
+ te->csvn = cpuhw->info.csvn;
+
+ get_cpu_id(&cpuid); /* Machine type */
+ te->mach_type = cpuid.machine;
+ te->cpu_speed = cfdiag_cpu_speed;
+ if (te->cpu_speed)
+ te->speed = 1;
+ te->clock_base = 1; /* Save clock base */
+ te->tod_base = tod_clock_base.tod;
+ te->timestamp = get_tod_clock_fast();
+}
+
+/* Read a counter set. The counter set number determines the counter set and
+ * the CPUM-CF first and second version number determine the number of
+ * available counters in each counter set.
+ * Each counter set starts with header containing the counter set number and
+ * the number of eight byte counters.
+ *
+ * The functions returns the number of bytes occupied by this counter set
+ * including the header.
+ * If there is no counter in the counter set, this counter set is useless and
+ * zero is returned on this case.
+ *
+ * Note that the counter sets may not be enabled or active and the stcctm
+ * instruction might return error 3. Depending on error_ok value this is ok,
+ * for example when called from cpumf_pmu_start() call back function.
+ */
+static size_t cfdiag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
+ size_t room, bool error_ok)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ size_t ctrset_size, need = 0;
+ int rc = 3; /* Assume write failure */
+
+ ctrdata->def = CF_DIAG_CTRSET_DEF;
+ ctrdata->set = ctrset;
+ ctrdata->res1 = 0;
+ ctrset_size = cpum_cf_ctrset_size(ctrset, &cpuhw->info);
+
+ if (ctrset_size) { /* Save data */
+ need = ctrset_size * sizeof(u64) + sizeof(*ctrdata);
+ if (need <= room) {
+ rc = ctr_stcctm(ctrset, ctrset_size,
+ (u64 *)(ctrdata + 1));
+ }
+ if (rc != 3 || error_ok)
+ ctrdata->ctr = ctrset_size;
+ else
+ need = 0;
+ }
+
+ debug_sprintf_event(cf_dbg, 3,
+ "%s ctrset %d ctrset_size %zu cfvn %d csvn %d"
+ " need %zd rc %d\n", __func__, ctrset, ctrset_size,
+ cpuhw->info.cfvn, cpuhw->info.csvn, need, rc);
+ return need;
+}
+
+/* Read out all counter sets and save them in the provided data buffer.
+ * The last 64 byte host an artificial trailer entry.
+ */
+static size_t cfdiag_getctr(void *data, size_t sz, unsigned long auth,
+ bool error_ok)
+{
+ struct cf_trailer_entry *trailer;
+ size_t offset = 0, done;
+ int i;
+
+ memset(data, 0, sz);
+ sz -= sizeof(*trailer); /* Always room for trailer */
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
+ struct cf_ctrset_entry *ctrdata = data + offset;
+
+ if (!(auth & cpumf_ctr_ctl[i]))
+ continue; /* Counter set not authorized */
+
+ done = cfdiag_getctrset(ctrdata, i, sz - offset, error_ok);
+ offset += done;
+ }
+ trailer = data + offset;
+ cfdiag_trailer(trailer);
+ return offset + sizeof(*trailer);
+}
+
+/* Calculate the difference for each counter in a counter set. */
+static void cfdiag_diffctrset(u64 *pstart, u64 *pstop, int counters)
+{
+ for (; --counters >= 0; ++pstart, ++pstop)
+ if (*pstop >= *pstart)
+ *pstop -= *pstart;
+ else
+ *pstop = *pstart - *pstop + 1;
+}
+
+/* Scan the counter sets and calculate the difference of each counter
+ * in each set. The result is the increment of each counter during the
+ * period the counter set has been activated.
+ *
+ * Return true on success.
+ */
+static int cfdiag_diffctr(struct cpu_cf_events *cpuhw, unsigned long auth)
+{
+ struct cf_trailer_entry *trailer_start, *trailer_stop;
+ struct cf_ctrset_entry *ctrstart, *ctrstop;
+ size_t offset = 0;
+
+ auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1;
+ do {
+ ctrstart = (struct cf_ctrset_entry *)(cpuhw->start + offset);
+ ctrstop = (struct cf_ctrset_entry *)(cpuhw->stop + offset);
+
+ if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) {
+ pr_err_once("cpum_cf_diag counter set compare error "
+ "in set %i\n", ctrstart->set);
+ return 0;
+ }
+ auth &= ~cpumf_ctr_ctl[ctrstart->set];
+ if (ctrstart->def == CF_DIAG_CTRSET_DEF) {
+ cfdiag_diffctrset((u64 *)(ctrstart + 1),
+ (u64 *)(ctrstop + 1), ctrstart->ctr);
+ offset += ctrstart->ctr * sizeof(u64) +
+ sizeof(*ctrstart);
+ }
+ } while (ctrstart->def && auth);
+
+ /* Save time_stamp from start of event in stop's trailer */
+ trailer_start = (struct cf_trailer_entry *)(cpuhw->start + offset);
+ trailer_stop = (struct cf_trailer_entry *)(cpuhw->stop + offset);
+ trailer_stop->progusage[0] = trailer_start->timestamp;
+
+ return 1;
+}
static enum cpumf_ctr_set get_counter_set(u64 event)
{
@@ -34,7 +251,8 @@ static enum cpumf_ctr_set get_counter_set(u64 event)
return set;
}
-static int validate_ctr_version(const struct hw_perf_event *hwc)
+static int validate_ctr_version(const struct hw_perf_event *hwc,
+ enum cpumf_ctr_set set)
{
struct cpu_cf_events *cpuhw;
int err = 0;
@@ -43,7 +261,7 @@ static int validate_ctr_version(const struct hw_perf_event *hwc)
cpuhw = &get_cpu_var(cpu_cf_events);
/* check required version for counter sets */
- switch (hwc->config_base) {
+ switch (set) {
case CPUMF_CTR_SET_BASIC:
case CPUMF_CTR_SET_USER:
if (cpuhw->info.cfvn < 1)
@@ -86,6 +304,8 @@ static int validate_ctr_version(const struct hw_perf_event *hwc)
(cpuhw->info.act_ctl & mtdiag_ctl)))
err = -EOPNOTSUPP;
break;
+ case CPUMF_CTR_SET_MAX:
+ err = -EOPNOTSUPP;
}
put_cpu_var(cpu_cf_events);
@@ -95,7 +315,6 @@ static int validate_ctr_version(const struct hw_perf_event *hwc)
static int validate_ctr_auth(const struct hw_perf_event *hwc)
{
struct cpu_cf_events *cpuhw;
- u64 ctrs_state;
int err = 0;
cpuhw = &get_cpu_var(cpu_cf_events);
@@ -105,8 +324,7 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc)
* return with -ENOENT in order to fall back to other
* PMUs that might suffice the event request.
*/
- ctrs_state = cpumf_ctr_ctl[hwc->config_base];
- if (!(ctrs_state & cpuhw->info.auth_ctl))
+ if (!(hwc->config_base & cpuhw->info.auth_ctl))
err = -ENOENT;
put_cpu_var(cpu_cf_events);
@@ -126,7 +344,7 @@ static void cpumf_pmu_enable(struct pmu *pmu)
if (cpuhw->flags & PMU_F_ENABLED)
return;
- err = lcctl(cpuhw->state);
+ err = lcctl(cpuhw->state | cpuhw->dev_state);
if (err) {
pr_err("Enabling the performance measuring unit "
"failed with rc=%x\n", err);
@@ -151,6 +369,7 @@ static void cpumf_pmu_disable(struct pmu *pmu)
return;
inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
+ inactive |= cpuhw->dev_state;
err = lcctl(inactive);
if (err) {
pr_err("Disabling the performance measuring unit "
@@ -199,6 +418,14 @@ static const int cpumf_generic_events_user[] = {
[PERF_COUNT_HW_BUS_CYCLES] = -1,
};
+static void cpumf_hw_inuse(void)
+{
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_inc_return(&num_events) == 1)
+ __kernel_cpumcf_begin();
+ mutex_unlock(&pmc_reserve_mutex);
+}
+
static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
{
struct perf_event_attr *attr = &event->attr;
@@ -258,11 +485,11 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
/*
* Use the hardware perf event structure to store the
* counter number in the 'config' member and the counter
- * set number in the 'config_base'. The counter set number
- * is then later used to enable/disable the counter(s).
+ * set number in the 'config_base' as bit mask.
+ * It is later used to enable/disable the counter(s).
*/
hwc->config = ev;
- hwc->config_base = set;
+ hwc->config_base = cpumf_ctr_ctl[set];
break;
case CPUMF_CTR_SET_MAX:
/* The counter could not be associated to a counter set */
@@ -270,22 +497,13 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
}
/* Initialize for using the CPU-measurement counter facility */
- if (!atomic_inc_not_zero(&num_events)) {
- mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&num_events) == 0 && __kernel_cpumcf_begin())
- err = -EBUSY;
- else
- atomic_inc(&num_events);
- mutex_unlock(&pmc_reserve_mutex);
- }
- if (err)
- return err;
+ cpumf_hw_inuse();
event->destroy = hw_perf_event_destroy;
/* Finally, validate version and authorization of the counter set */
err = validate_ctr_auth(hwc);
if (!err)
- err = validate_ctr_version(hwc);
+ err = validate_ctr_version(hwc, set);
return err;
}
@@ -361,16 +579,11 @@ static void cpumf_pmu_start(struct perf_event *event, int flags)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct hw_perf_event *hwc = &event->hw;
+ int i;
- if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
- return;
-
- if (WARN_ON_ONCE(hwc->config == -1))
+ if (!(hwc->state & PERF_HES_STOPPED))
return;
- if (flags & PERF_EF_RELOAD)
- WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
-
hwc->state = 0;
/* (Re-)enable and activate the counter set */
@@ -382,29 +595,92 @@ static void cpumf_pmu_start(struct perf_event *event, int flags)
* needs to be synchronized. At this point, the counter set can be in
* the inactive or disabled state.
*/
- hw_perf_event_reset(event);
+ if (hwc->config == PERF_EVENT_CPUM_CF_DIAG) {
+ cpuhw->usedss = cfdiag_getctr(cpuhw->start,
+ sizeof(cpuhw->start),
+ hwc->config_base, true);
+ } else {
+ hw_perf_event_reset(event);
+ }
- /* increment refcount for this counter set */
- atomic_inc(&cpuhw->ctr_set[hwc->config_base]);
+ /* Increment refcount for counter sets */
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
+ if ((hwc->config_base & cpumf_ctr_ctl[i]))
+ atomic_inc(&cpuhw->ctr_set[i]);
+}
+
+/* Create perf event sample with the counter sets as raw data. The sample
+ * is then pushed to the event subsystem and the function checks for
+ * possible event overflows. If an event overflow occurs, the PMU is
+ * stopped.
+ *
+ * Return non-zero if an event overflow occurred.
+ */
+static int cfdiag_push_sample(struct perf_event *event,
+ struct cpu_cf_events *cpuhw)
+{
+ struct perf_sample_data data;
+ struct perf_raw_record raw;
+ struct pt_regs regs;
+ int overflow;
+
+ /* Setup perf sample */
+ perf_sample_data_init(&data, 0, event->hw.last_period);
+ memset(&regs, 0, sizeof(regs));
+ memset(&raw, 0, sizeof(raw));
+
+ if (event->attr.sample_type & PERF_SAMPLE_CPU)
+ data.cpu_entry.cpu = event->cpu;
+ if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+ raw.frag.size = cpuhw->usedss;
+ raw.frag.data = cpuhw->stop;
+ raw.size = raw.frag.size;
+ data.raw = &raw;
+ }
+
+ overflow = perf_event_overflow(event, &data, &regs);
+ debug_sprintf_event(cf_dbg, 3,
+ "%s event %#llx sample_type %#llx raw %d ov %d\n",
+ __func__, event->hw.config,
+ event->attr.sample_type, raw.size, overflow);
+ if (overflow)
+ event->pmu->stop(event, 0);
+
+ perf_event_update_userpage(event);
+ return overflow;
}
static void cpumf_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct hw_perf_event *hwc = &event->hw;
+ int i;
if (!(hwc->state & PERF_HES_STOPPED)) {
/* Decrement reference count for this counter set and if this
* is the last used counter in the set, clear activation
* control and set the counter set state to inactive.
*/
- if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base]))
- ctr_set_stop(&cpuhw->state, hwc->config_base);
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
+ if (!(hwc->config_base & cpumf_ctr_ctl[i]))
+ continue;
+ if (!atomic_dec_return(&cpuhw->ctr_set[i]))
+ ctr_set_stop(&cpuhw->state, cpumf_ctr_ctl[i]);
+ }
hwc->state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
- hw_perf_event_update(event);
+ if (hwc->config == PERF_EVENT_CPUM_CF_DIAG) {
+ local64_inc(&event->count);
+ cpuhw->usedss = cfdiag_getctr(cpuhw->stop,
+ sizeof(cpuhw->stop),
+ event->hw.config_base,
+ false);
+ if (cfdiag_diffctr(cpuhw, event->hw.config_base))
+ cfdiag_push_sample(event, cpuhw);
+ } else
+ hw_perf_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
}
@@ -413,15 +689,6 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- /* Check authorization for the counter set to which this
- * counter belongs.
- * For group events transaction, the authorization check is
- * done in cpumf_pmu_commit_txn().
- */
- if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD))
- if (validate_ctr_auth(&event->hw))
- return -ENOENT;
-
ctr_set_enable(&cpuhw->state, event->hw.config_base);
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
@@ -434,6 +701,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
static void cpumf_pmu_del(struct perf_event *event, int flags)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ int i;
cpumf_pmu_stop(event, PERF_EF_UPDATE);
@@ -445,110 +713,716 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
* clear enable control and resets all counters in a set. Therefore,
* cpumf_pmu_start() always has to reenable a counter set.
*/
- if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base]))
- ctr_set_disable(&cpuhw->state, event->hw.config_base);
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
+ if (!atomic_read(&cpuhw->ctr_set[i]))
+ ctr_set_disable(&cpuhw->state, cpumf_ctr_ctl[i]);
}
-/*
- * Start group events scheduling transaction.
- * Set flags to perform a single test at commit time.
+/* Performance monitoring unit for s390x */
+static struct pmu cpumf_pmu = {
+ .task_ctx_nr = perf_sw_context,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ .pmu_enable = cpumf_pmu_enable,
+ .pmu_disable = cpumf_pmu_disable,
+ .event_init = cpumf_pmu_event_init,
+ .add = cpumf_pmu_add,
+ .del = cpumf_pmu_del,
+ .start = cpumf_pmu_start,
+ .stop = cpumf_pmu_stop,
+ .read = cpumf_pmu_read,
+};
+
+static int cfset_init(void);
+static int __init cpumf_pmu_init(void)
+{
+ int rc;
+
+ if (!kernel_cpumcf_avail())
+ return -ENODEV;
+
+ /* Setup s390dbf facility */
+ cf_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128);
+ if (!cf_dbg) {
+ pr_err("Registration of s390dbf(cpum_cf) failed\n");
+ return -ENOMEM;
+ }
+ debug_register_view(cf_dbg, &debug_sprintf_view);
+
+ cpumf_pmu.attr_groups = cpumf_cf_event_group();
+ rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", -1);
+ if (rc) {
+ debug_unregister_view(cf_dbg, &debug_sprintf_view);
+ debug_unregister(cf_dbg);
+ pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
+ } else if (stccm_avail()) { /* Setup counter set device */
+ cfset_init();
+ }
+ return rc;
+}
+
+/* Support for the CPU Measurement Facility counter set extraction using
+ * device /dev/hwctr. This allows user space programs to extract complete
+ * counter set via normal file operations.
+ */
+
+static atomic_t cfset_opencnt = ATOMIC_INIT(0); /* Excl. access */
+static DEFINE_MUTEX(cfset_ctrset_mutex);/* Synchronize access to hardware */
+struct cfset_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */
+ unsigned int sets; /* Counter set bit mask */
+ atomic_t cpus_ack; /* # CPUs successfully executed func */
+};
+
+static struct cfset_request { /* CPUs and counter set bit mask */
+ unsigned long ctrset; /* Bit mask of counter set to read */
+ cpumask_t mask; /* CPU mask to read from */
+} cfset_request;
+
+static void cfset_ctrset_clear(void)
+{
+ cpumask_clear(&cfset_request.mask);
+ cfset_request.ctrset = 0;
+}
+
+/* The /dev/hwctr device access uses PMU_F_IN_USE to mark the device access
+ * path is currently used.
+ * The cpu_cf_events::dev_state is used to denote counter sets in use by this
+ * interface. It is always or'ed in. If this interface is not active, its
+ * value is zero and no additional counter sets will be included.
+ *
+ * The cpu_cf_events::state is used by the perf_event_open SVC and remains
+ * unchanged.
+ *
+ * perf_pmu_enable() and perf_pmu_enable() and its call backs
+ * cpumf_pmu_enable() and cpumf_pmu_disable() are called by the
+ * performance measurement subsystem to enable per process
+ * CPU Measurement counter facility.
+ * The XXX_enable() and XXX_disable functions are used to turn off
+ * x86 performance monitoring interrupt (PMI) during scheduling.
+ * s390 uses these calls to temporarily stop and resume the active CPU
+ * counters sets during scheduling.
*
- * We only support PERF_PMU_TXN_ADD transactions. Save the
- * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
- * transactions.
+ * We do allow concurrent access of perf_event_open() SVC and /dev/hwctr
+ * device access. The perf_event_open() SVC interface makes a lot of effort
+ * to only run the counters while the calling process is actively scheduled
+ * to run.
+ * When /dev/hwctr interface is also used at the same time, the counter sets
+ * will keep running, even when the process is scheduled off a CPU.
+ * However this is not a problem and does not lead to wrong counter values
+ * for the perf_event_open() SVC. The current counter value will be recorded
+ * during schedule-in. At schedule-out time the current counter value is
+ * extracted again and the delta is calculated and added to the event.
*/
-static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
+/* Stop all counter sets via ioctl interface */
+static void cfset_ioctl_off(void *parm)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct cfset_call_on_cpu_parm *p = parm;
+ int rc;
- WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
+ cpuhw->dev_state = 0;
+ for (rc = CPUMF_CTR_SET_BASIC; rc < CPUMF_CTR_SET_MAX; ++rc)
+ if ((p->sets & cpumf_ctr_ctl[rc]))
+ atomic_dec(&cpuhw->ctr_set[rc]);
+ rc = lcctl(cpuhw->state); /* Keep perf_event_open counter sets */
+ if (rc)
+ pr_err("Counter set stop %#llx of /dev/%s failed rc=%i\n",
+ cpuhw->state, S390_HWCTR_DEVICE, rc);
+ cpuhw->flags &= ~PMU_F_IN_USE;
+ debug_sprintf_event(cf_dbg, 4, "%s rc %d state %#llx dev_state %#llx\n",
+ __func__, rc, cpuhw->state, cpuhw->dev_state);
+}
- cpuhw->txn_flags = txn_flags;
- if (txn_flags & ~PERF_PMU_TXN_ADD)
- return;
+/* Start counter sets on particular CPU */
+static void cfset_ioctl_on(void *parm)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct cfset_call_on_cpu_parm *p = parm;
+ int rc;
- perf_pmu_disable(pmu);
- cpuhw->tx_state = cpuhw->state;
+ cpuhw->flags |= PMU_F_IN_USE;
+ ctr_set_enable(&cpuhw->dev_state, p->sets);
+ ctr_set_start(&cpuhw->dev_state, p->sets);
+ for (rc = CPUMF_CTR_SET_BASIC; rc < CPUMF_CTR_SET_MAX; ++rc)
+ if ((p->sets & cpumf_ctr_ctl[rc]))
+ atomic_inc(&cpuhw->ctr_set[rc]);
+ rc = lcctl(cpuhw->dev_state | cpuhw->state); /* Start counter sets */
+ if (!rc)
+ atomic_inc(&p->cpus_ack);
+ else
+ pr_err("Counter set start %#llx of /dev/%s failed rc=%i\n",
+ cpuhw->dev_state | cpuhw->state, S390_HWCTR_DEVICE, rc);
+ debug_sprintf_event(cf_dbg, 4, "%s rc %d state %#llx dev_state %#llx\n",
+ __func__, rc, cpuhw->state, cpuhw->dev_state);
}
-/*
- * Stop and cancel a group events scheduling tranctions.
- * Assumes cpumf_pmu_del() is called for each successful added
- * cpumf_pmu_add() during the transaction.
- */
-static void cpumf_pmu_cancel_txn(struct pmu *pmu)
+static void cfset_release_cpu(void *p)
{
- unsigned int txn_flags;
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ int rc;
- WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
+ debug_sprintf_event(cf_dbg, 4, "%s state %#llx dev_state %#llx\n",
+ __func__, cpuhw->state, cpuhw->dev_state);
+ rc = lcctl(cpuhw->state); /* Keep perf_event_open counter sets */
+ if (rc)
+ pr_err("Counter set release %#llx of /dev/%s failed rc=%i\n",
+ cpuhw->state, S390_HWCTR_DEVICE, rc);
+ cpuhw->dev_state = 0;
+}
- txn_flags = cpuhw->txn_flags;
- cpuhw->txn_flags = 0;
- if (txn_flags & ~PERF_PMU_TXN_ADD)
- return;
+/* Release function is also called when application gets terminated without
+ * doing a proper ioctl(..., S390_HWCTR_STOP, ...) command.
+ */
+static int cfset_release(struct inode *inode, struct file *file)
+{
+ on_each_cpu(cfset_release_cpu, NULL, 1);
+ hw_perf_event_destroy(NULL);
+ cfset_ctrset_clear();
+ atomic_set(&cfset_opencnt, 0);
+ return 0;
+}
- WARN_ON(cpuhw->tx_state != cpuhw->state);
+static int cfset_open(struct inode *inode, struct file *file)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ /* Only one user space program can open /dev/hwctr */
+ if (atomic_xchg(&cfset_opencnt, 1))
+ return -EBUSY;
+
+ cpumf_hw_inuse();
+ file->private_data = NULL;
+ /* nonseekable_open() never fails */
+ return nonseekable_open(inode, file);
+}
- perf_pmu_enable(pmu);
+static int cfset_all_stop(void)
+{
+ struct cfset_call_on_cpu_parm p = {
+ .sets = cfset_request.ctrset,
+ };
+ cpumask_var_t mask;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_and(mask, &cfset_request.mask, cpu_online_mask);
+ on_each_cpu_mask(mask, cfset_ioctl_off, &p, 1);
+ free_cpumask_var(mask);
+ return 0;
}
-/*
- * Commit the group events scheduling transaction. On success, the
- * transaction is closed. On error, the transaction is kept open
- * until cpumf_pmu_cancel_txn() is called.
+static int cfset_all_start(void)
+{
+ struct cfset_call_on_cpu_parm p = {
+ .sets = cfset_request.ctrset,
+ .cpus_ack = ATOMIC_INIT(0),
+ };
+ cpumask_var_t mask;
+ int rc = 0;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_and(mask, &cfset_request.mask, cpu_online_mask);
+ on_each_cpu_mask(mask, cfset_ioctl_on, &p, 1);
+ if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
+ on_each_cpu_mask(mask, cfset_ioctl_off, &p, 1);
+ rc = -EIO;
+ debug_sprintf_event(cf_dbg, 4, "%s CPUs missing", __func__);
+ }
+ free_cpumask_var(mask);
+ return rc;
+}
+
+
+/* Return the maximum required space for all possible CPUs in case one
+ * CPU will be onlined during the START, READ, STOP cycles.
+ * To find out the size of the counter sets, any one CPU will do. They
+ * all have the same counter sets.
*/
-static int cpumf_pmu_commit_txn(struct pmu *pmu)
+static size_t cfset_needspace(unsigned int sets)
+{
+ struct cpu_cf_events *cpuhw = get_cpu_ptr(&cpu_cf_events);
+ size_t bytes = 0;
+ int i;
+
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
+ if (!(sets & cpumf_ctr_ctl[i]))
+ continue;
+ bytes += cpum_cf_ctrset_size(i, &cpuhw->info) * sizeof(u64) +
+ sizeof(((struct s390_ctrset_setdata *)0)->set) +
+ sizeof(((struct s390_ctrset_setdata *)0)->no_cnts);
+ }
+ bytes = sizeof(((struct s390_ctrset_read *)0)->no_cpus) + nr_cpu_ids *
+ (bytes + sizeof(((struct s390_ctrset_cpudata *)0)->cpu_nr) +
+ sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
+ put_cpu_ptr(&cpu_cf_events);
+ return bytes;
+}
+
+static int cfset_all_copy(unsigned long arg, cpumask_t *mask)
+{
+ struct s390_ctrset_read __user *ctrset_read;
+ unsigned int cpu, cpus, rc;
+ void __user *uptr;
+
+ ctrset_read = (struct s390_ctrset_read __user *)arg;
+ uptr = ctrset_read->data;
+ for_each_cpu(cpu, mask) {
+ struct cpu_cf_events *cpuhw = per_cpu_ptr(&cpu_cf_events, cpu);
+ struct s390_ctrset_cpudata __user *ctrset_cpudata;
+
+ ctrset_cpudata = uptr;
+ rc = put_user(cpu, &ctrset_cpudata->cpu_nr);
+ rc |= put_user(cpuhw->sets, &ctrset_cpudata->no_sets);
+ rc |= copy_to_user(ctrset_cpudata->data, cpuhw->data,
+ cpuhw->used);
+ if (rc)
+ return -EFAULT;
+ uptr += sizeof(struct s390_ctrset_cpudata) + cpuhw->used;
+ cond_resched();
+ }
+ cpus = cpumask_weight(mask);
+ if (put_user(cpus, &ctrset_read->no_cpus))
+ return -EFAULT;
+ debug_sprintf_event(cf_dbg, 4, "%s copied %ld\n", __func__,
+ uptr - (void __user *)ctrset_read->data);
+ return 0;
+}
+
+static size_t cfset_cpuset_read(struct s390_ctrset_setdata *p, int ctrset,
+ int ctrset_size, size_t room)
+{
+ size_t need = 0;
+ int rc = -1;
+
+ need = sizeof(*p) + sizeof(u64) * ctrset_size;
+ if (need <= room) {
+ p->set = cpumf_ctr_ctl[ctrset];
+ p->no_cnts = ctrset_size;
+ rc = ctr_stcctm(ctrset, ctrset_size, (u64 *)p->cv);
+ if (rc == 3) /* Nothing stored */
+ need = 0;
+ }
+ return need;
+}
+
+/* Read all counter sets. */
+static void cfset_cpu_read(void *parm)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- u64 state;
+ struct cfset_call_on_cpu_parm *p = parm;
+ int set, set_size;
+ size_t space;
+
+ /* No data saved yet */
+ cpuhw->used = 0;
+ cpuhw->sets = 0;
+ memset(cpuhw->data, 0, sizeof(cpuhw->data));
+
+ /* Scan the counter sets */
+ for (set = CPUMF_CTR_SET_BASIC; set < CPUMF_CTR_SET_MAX; ++set) {
+ struct s390_ctrset_setdata *sp = (void *)cpuhw->data +
+ cpuhw->used;
+
+ if (!(p->sets & cpumf_ctr_ctl[set]))
+ continue; /* Counter set not in list */
+ set_size = cpum_cf_ctrset_size(set, &cpuhw->info);
+ space = sizeof(cpuhw->data) - cpuhw->used;
+ space = cfset_cpuset_read(sp, set, set_size, space);
+ if (space) {
+ cpuhw->used += space;
+ cpuhw->sets += 1;
+ }
+ }
+ debug_sprintf_event(cf_dbg, 4, "%s sets %d used %zd\n", __func__,
+ cpuhw->sets, cpuhw->used);
+}
+
+static int cfset_all_read(unsigned long arg)
+{
+ struct cfset_call_on_cpu_parm p;
+ cpumask_var_t mask;
+ int rc;
- WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
- if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
- cpuhw->txn_flags = 0;
- return 0;
+ p.sets = cfset_request.ctrset;
+ cpumask_and(mask, &cfset_request.mask, cpu_online_mask);
+ on_each_cpu_mask(mask, cfset_cpu_read, &p, 1);
+ rc = cfset_all_copy(arg, mask);
+ free_cpumask_var(mask);
+ return rc;
+}
+
+static long cfset_ioctl_read(unsigned long arg)
+{
+ struct s390_ctrset_read read;
+ int ret = 0;
+
+ if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
+ return -EFAULT;
+ ret = cfset_all_read(arg);
+ return ret;
+}
+
+static long cfset_ioctl_stop(void)
+{
+ int ret = ENXIO;
+
+ if (cfset_request.ctrset) {
+ ret = cfset_all_stop();
+ cfset_ctrset_clear();
}
+ return ret;
+}
- /* check if the updated state can be scheduled */
- state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
- state >>= CPUMF_LCCTL_ENABLE_SHIFT;
- if ((state & cpuhw->info.auth_ctl) != state)
- return -ENOENT;
+static long cfset_ioctl_start(unsigned long arg)
+{
+ struct s390_ctrset_start __user *ustart;
+ struct s390_ctrset_start start;
+ void __user *umask;
+ unsigned int len;
+ int ret = 0;
+ size_t need;
+
+ if (cfset_request.ctrset)
+ return -EBUSY;
+ ustart = (struct s390_ctrset_start __user *)arg;
+ if (copy_from_user(&start, ustart, sizeof(start)))
+ return -EFAULT;
+ if (start.version != S390_HWCTR_START_VERSION)
+ return -EINVAL;
+ if (start.counter_sets & ~(cpumf_ctr_ctl[CPUMF_CTR_SET_BASIC] |
+ cpumf_ctr_ctl[CPUMF_CTR_SET_USER] |
+ cpumf_ctr_ctl[CPUMF_CTR_SET_CRYPTO] |
+ cpumf_ctr_ctl[CPUMF_CTR_SET_EXT] |
+ cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG]))
+ return -EINVAL; /* Invalid counter set */
+ if (!start.counter_sets)
+ return -EINVAL; /* No counter set at all? */
+ cpumask_clear(&cfset_request.mask);
+ len = min_t(u64, start.cpumask_len, cpumask_size());
+ umask = (void __user *)start.cpumask;
+ if (copy_from_user(&cfset_request.mask, umask, len))
+ return -EFAULT;
+ if (cpumask_empty(&cfset_request.mask))
+ return -EINVAL;
+ need = cfset_needspace(start.counter_sets);
+ if (put_user(need, &ustart->data_bytes))
+ ret = -EFAULT;
+ if (ret)
+ goto out;
+ cfset_request.ctrset = start.counter_sets;
+ ret = cfset_all_start();
+out:
+ if (ret)
+ cfset_ctrset_clear();
+ debug_sprintf_event(cf_dbg, 4, "%s sets %#lx need %ld ret %d\n",
+ __func__, cfset_request.ctrset, need, ret);
+ return ret;
+}
+
+/* Entry point to the /dev/hwctr device interface.
+ * The ioctl system call supports three subcommands:
+ * S390_HWCTR_START: Start the specified counter sets on a CPU list. The
+ * counter set keeps running until explicitly stopped. Returns the number
+ * of bytes needed to store the counter values. If another S390_HWCTR_START
+ * ioctl subcommand is called without a previous S390_HWCTR_STOP stop
+ * command, -EBUSY is returned.
+ * S390_HWCTR_READ: Read the counter set values from specified CPU list given
+ * with the S390_HWCTR_START command.
+ * S390_HWCTR_STOP: Stops the counter sets on the CPU list given with the
+ * previous S390_HWCTR_START subcommand.
+ */
+static long cfset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
- cpuhw->txn_flags = 0;
- perf_pmu_enable(pmu);
+ get_online_cpus();
+ mutex_lock(&cfset_ctrset_mutex);
+ switch (cmd) {
+ case S390_HWCTR_START:
+ ret = cfset_ioctl_start(arg);
+ break;
+ case S390_HWCTR_STOP:
+ ret = cfset_ioctl_stop();
+ break;
+ case S390_HWCTR_READ:
+ ret = cfset_ioctl_read(arg);
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+ mutex_unlock(&cfset_ctrset_mutex);
+ put_online_cpus();
+ return ret;
+}
+
+static const struct file_operations cfset_fops = {
+ .owner = THIS_MODULE,
+ .open = cfset_open,
+ .release = cfset_release,
+ .unlocked_ioctl = cfset_ioctl,
+ .compat_ioctl = cfset_ioctl,
+ .llseek = no_llseek
+};
+
+static struct miscdevice cfset_dev = {
+ .name = S390_HWCTR_DEVICE,
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &cfset_fops,
+};
+
+int cfset_online_cpu(unsigned int cpu)
+{
+ struct cfset_call_on_cpu_parm p;
+
+ mutex_lock(&cfset_ctrset_mutex);
+ if (cfset_request.ctrset) {
+ p.sets = cfset_request.ctrset;
+ cfset_ioctl_on(&p);
+ cpumask_set_cpu(cpu, &cfset_request.mask);
+ }
+ mutex_unlock(&cfset_ctrset_mutex);
return 0;
}
-/* Performance monitoring unit for s390x */
-static struct pmu cpumf_pmu = {
+int cfset_offline_cpu(unsigned int cpu)
+{
+ struct cfset_call_on_cpu_parm p;
+
+ mutex_lock(&cfset_ctrset_mutex);
+ if (cfset_request.ctrset) {
+ p.sets = cfset_request.ctrset;
+ cfset_ioctl_off(&p);
+ cpumask_clear_cpu(cpu, &cfset_request.mask);
+ }
+ mutex_unlock(&cfset_ctrset_mutex);
+ return 0;
+}
+
+static void cfdiag_read(struct perf_event *event)
+{
+ debug_sprintf_event(cf_dbg, 3, "%s event %#llx count %ld\n", __func__,
+ event->attr.config, local64_read(&event->count));
+}
+
+static int get_authctrsets(void)
+{
+ struct cpu_cf_events *cpuhw;
+ unsigned long auth = 0;
+ enum cpumf_ctr_set i;
+
+ cpuhw = &get_cpu_var(cpu_cf_events);
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
+ if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
+ auth |= cpumf_ctr_ctl[i];
+ }
+ put_cpu_var(cpu_cf_events);
+ return auth;
+}
+
+/* Setup the event. Test for authorized counter sets and only include counter
+ * sets which are authorized at the time of the setup. Including unauthorized
+ * counter sets result in specification exception (and panic).
+ */
+static int cfdiag_event_init2(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ int err = 0;
+
+ /* Set sample_period to indicate sampling */
+ event->hw.config = attr->config;
+ event->hw.sample_period = attr->sample_period;
+ local64_set(&event->hw.period_left, event->hw.sample_period);
+ local64_set(&event->count, 0);
+ event->hw.last_period = event->hw.sample_period;
+
+ /* Add all authorized counter sets to config_base. The
+ * the hardware init function is either called per-cpu or just once
+ * for all CPUS (event->cpu == -1). This depends on the whether
+ * counting is started for all CPUs or on a per workload base where
+ * the perf event moves from one CPU to another CPU.
+ * Checking the authorization on any CPU is fine as the hardware
+ * applies the same authorization settings to all CPUs.
+ */
+ event->hw.config_base = get_authctrsets();
+
+ /* No authorized counter sets, nothing to count/sample */
+ if (!event->hw.config_base)
+ err = -EINVAL;
+
+ debug_sprintf_event(cf_dbg, 5, "%s err %d config_base %#lx\n",
+ __func__, err, event->hw.config_base);
+ return err;
+}
+
+static int cfdiag_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ int err = -ENOENT;
+
+ if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
+ event->attr.type != event->pmu->type)
+ goto out;
+
+ /* Raw events are used to access counters directly,
+ * hence do not permit excludes.
+ * This event is useless without PERF_SAMPLE_RAW to return counter set
+ * values as raw data.
+ */
+ if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv ||
+ !(attr->sample_type & (PERF_SAMPLE_CPU | PERF_SAMPLE_RAW))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* Initialize for using the CPU-measurement counter facility */
+ cpumf_hw_inuse();
+ event->destroy = hw_perf_event_destroy;
+
+ err = cfdiag_event_init2(event);
+ if (unlikely(err))
+ event->destroy(event);
+out:
+ return err;
+}
+
+/* Create cf_diag/events/CF_DIAG event sysfs file. This counter is used
+ * to collect the complete counter sets for a scheduled process. Target
+ * are complete counter sets attached as raw data to the artificial event.
+ * This results in complete counter sets available when a process is
+ * scheduled. Contains the delta of every counter while the process was
+ * running.
+ */
+CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG);
+
+static struct attribute *cfdiag_events_attr[] = {
+ CPUMF_EVENT_PTR(CF_DIAG, CF_DIAG),
+ NULL,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-63");
+
+static struct attribute *cfdiag_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group cfdiag_events_group = {
+ .name = "events",
+ .attrs = cfdiag_events_attr,
+};
+static struct attribute_group cfdiag_format_group = {
+ .name = "format",
+ .attrs = cfdiag_format_attr,
+};
+static const struct attribute_group *cfdiag_attr_groups[] = {
+ &cfdiag_events_group,
+ &cfdiag_format_group,
+ NULL,
+};
+
+/* Performance monitoring unit for event CF_DIAG. Since this event
+ * is also started and stopped via the perf_event_open() system call, use
+ * the same event enable/disable call back functions. They do not
+ * have a pointer to the perf_event strcture as first parameter.
+ *
+ * The functions XXX_add, XXX_del, XXX_start and XXX_stop are also common.
+ * Reuse them and distinguish the event (always first parameter) via
+ * 'config' member.
+ */
+static struct pmu cf_diag = {
.task_ctx_nr = perf_sw_context,
- .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ .event_init = cfdiag_event_init,
.pmu_enable = cpumf_pmu_enable,
.pmu_disable = cpumf_pmu_disable,
- .event_init = cpumf_pmu_event_init,
.add = cpumf_pmu_add,
.del = cpumf_pmu_del,
.start = cpumf_pmu_start,
.stop = cpumf_pmu_stop,
- .read = cpumf_pmu_read,
- .start_txn = cpumf_pmu_start_txn,
- .commit_txn = cpumf_pmu_commit_txn,
- .cancel_txn = cpumf_pmu_cancel_txn,
+ .read = cfdiag_read,
+
+ .attr_groups = cfdiag_attr_groups
};
-static int __init cpumf_pmu_init(void)
+/* Calculate memory needed to store all counter sets together with header and
+ * trailer data. This is independent of the counter set authorization which
+ * can vary depending on the configuration.
+ */
+static size_t cfdiag_maxsize(struct cpumf_ctr_info *info)
+{
+ size_t max_size = sizeof(struct cf_trailer_entry);
+ enum cpumf_ctr_set i;
+
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
+ size_t size = cpum_cf_ctrset_size(i, info);
+
+ if (size)
+ max_size += size * sizeof(u64) +
+ sizeof(struct cf_ctrset_entry);
+ }
+ return max_size;
+}
+
+/* Get the CPU speed, try sampling facility first and CPU attributes second. */
+static void cfdiag_get_cpu_speed(void)
{
+ if (cpum_sf_avail()) { /* Sampling facility first */
+ struct hws_qsi_info_block si;
+
+ memset(&si, 0, sizeof(si));
+ if (!qsi(&si)) {
+ cfdiag_cpu_speed = si.cpu_speed;
+ return;
+ }
+ }
+
+ /* Fallback: CPU speed extract static part. Used in case
+ * CPU Measurement Sampling Facility is turned off.
+ */
+ if (test_facility(34)) {
+ unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
+
+ if (mhz != -1UL)
+ cfdiag_cpu_speed = mhz & 0xffffffff;
+ }
+}
+
+static int cfset_init(void)
+{
+ struct cpumf_ctr_info info;
+ size_t need;
int rc;
- if (!kernel_cpumcf_avail())
+ if (qctri(&info))
return -ENODEV;
- cpumf_pmu.attr_groups = cpumf_cf_event_group();
- rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", -1);
- if (rc)
- pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
+ cfdiag_get_cpu_speed();
+ /* Make sure the counter set data fits into predefined buffer. */
+ need = cfdiag_maxsize(&info);
+ if (need > sizeof(((struct cpu_cf_events *)0)->start)) {
+ pr_err("Insufficient memory for PMU(cpum_cf_diag) need=%zu\n",
+ need);
+ return -ENOMEM;
+ }
+
+ rc = misc_register(&cfset_dev);
+ if (rc) {
+ pr_err("Registration of /dev/%s failed rc=%i\n",
+ cfset_dev.name, rc);
+ goto out;
+ }
+
+ rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
+ if (rc) {
+ misc_deregister(&cfset_dev);
+ pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n",
+ rc);
+ }
+out:
return rc;
}
-subsys_initcall(cpumf_pmu_init);
+
+device_initcall(cpumf_pmu_init);
diff --git a/arch/s390/kernel/perf_cpum_cf_common.c b/arch/s390/kernel/perf_cpum_cf_common.c
index 6d53215c8484..30f0242de4a5 100644
--- a/arch/s390/kernel/perf_cpum_cf_common.c
+++ b/arch/s390/kernel/perf_cpum_cf_common.c
@@ -29,8 +29,11 @@ DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events) = {
},
.alert = ATOMIC64_INIT(0),
.state = 0,
+ .dev_state = 0,
.flags = 0,
- .txn_flags = 0,
+ .used = 0,
+ .usedss = 0,
+ .sets = 0
};
/* Indicator whether the CPU-Measurement Counter Facility Support is ready */
static bool cpum_cf_initalized;
@@ -97,25 +100,10 @@ bool kernel_cpumcf_avail(void)
}
EXPORT_SYMBOL(kernel_cpumcf_avail);
-
-/* Reserve/release functions for sharing perf hardware */
-static DEFINE_SPINLOCK(cpumcf_owner_lock);
-static void *cpumcf_owner;
-
/* Initialize the CPU-measurement counter facility */
int __kernel_cpumcf_begin(void)
{
int flags = PMC_INIT;
- int err = 0;
-
- spin_lock(&cpumcf_owner_lock);
- if (cpumcf_owner)
- err = -EBUSY;
- else
- cpumcf_owner = __builtin_return_address(0);
- spin_unlock(&cpumcf_owner_lock);
- if (err)
- return err;
on_each_cpu(cpum_cf_setup_cpu, &flags, 1);
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
@@ -145,10 +133,6 @@ void __kernel_cpumcf_end(void)
on_each_cpu(cpum_cf_setup_cpu, &flags, 1);
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
-
- spin_lock(&cpumcf_owner_lock);
- cpumcf_owner = NULL;
- spin_unlock(&cpumcf_owner_lock);
}
EXPORT_SYMBOL(__kernel_cpumcf_end);
@@ -162,11 +146,13 @@ static int cpum_cf_setup(unsigned int cpu, int flags)
static int cpum_cf_online_cpu(unsigned int cpu)
{
- return cpum_cf_setup(cpu, PMC_INIT);
+ cpum_cf_setup(cpu, PMC_INIT);
+ return cfset_online_cpu(cpu);
}
static int cpum_cf_offline_cpu(unsigned int cpu)
{
+ cfset_offline_cpu(cpu);
return cpum_cf_setup(cpu, PMC_RELEASE);
}
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
deleted file mode 100644
index 08c985c1097c..000000000000
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ /dev/null
@@ -1,1148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Performance event support for s390x - CPU-measurement Counter Sets
- *
- * Copyright IBM Corp. 2019, 2021
- * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
- * Thomas Richer <tmricht@linux.ibm.com>
- */
-#define KMSG_COMPONENT "cpum_cf_diag"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/processor.h>
-#include <linux/miscdevice.h>
-#include <linux/mutex.h>
-
-#include <asm/ctl_reg.h>
-#include <asm/irq.h>
-#include <asm/cpu_mcf.h>
-#include <asm/timex.h>
-#include <asm/debug.h>
-
-#include <asm/hwctrset.h>
-
-#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
- /* interval in seconds */
-static unsigned int cf_diag_cpu_speed;
-static debug_info_t *cf_diag_dbg;
-
-struct cf_diag_csd { /* Counter set data per CPU */
- size_t used; /* Bytes used in data/start */
- unsigned char start[PAGE_SIZE]; /* Counter set at event start */
- unsigned char data[PAGE_SIZE]; /* Counter set at event delete */
- unsigned int sets; /* # Counter set saved in data */
-};
-static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
-
-/* Counter sets are stored as data stream in a page sized memory buffer and
- * exported to user space via raw data attached to the event sample data.
- * Each counter set starts with an eight byte header consisting of:
- * - a two byte eye catcher (0xfeef)
- * - a one byte counter set number
- * - a two byte counter set size (indicates the number of counters in this set)
- * - a three byte reserved value (must be zero) to make the header the same
- * size as a counter value.
- * All counter values are eight byte in size.
- *
- * All counter sets are followed by a 64 byte trailer.
- * The trailer consists of a:
- * - flag field indicating valid fields when corresponding bit set
- * - the counter facility first and second version number
- * - the CPU speed if nonzero
- * - the time stamp the counter sets have been collected
- * - the time of day (TOD) base value
- * - the machine type.
- *
- * The counter sets are saved when the process is prepared to be executed on a
- * CPU and saved again when the process is going to be removed from a CPU.
- * The difference of both counter sets are calculated and stored in the event
- * sample data area.
- */
-
-struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
- unsigned int def:16; /* 0-15 Data Entry Format */
- unsigned int set:16; /* 16-31 Counter set identifier */
- unsigned int ctr:16; /* 32-47 Number of stored counters */
- unsigned int res1:16; /* 48-63 Reserved */
-};
-
-struct cf_trailer_entry { /* CPU-M CF_DIAG trailer (64 byte) */
- /* 0 - 7 */
- union {
- struct {
- unsigned int clock_base:1; /* TOD clock base set */
- unsigned int speed:1; /* CPU speed set */
- /* Measurement alerts */
- unsigned int mtda:1; /* Loss of MT ctr. data alert */
- unsigned int caca:1; /* Counter auth. change alert */
- unsigned int lcda:1; /* Loss of counter data alert */
- };
- unsigned long flags; /* 0-63 All indicators */
- };
- /* 8 - 15 */
- unsigned int cfvn:16; /* 64-79 Ctr First Version */
- unsigned int csvn:16; /* 80-95 Ctr Second Version */
- unsigned int cpu_speed:32; /* 96-127 CPU speed */
- /* 16 - 23 */
- unsigned long timestamp; /* 128-191 Timestamp (TOD) */
- /* 24 - 55 */
- union {
- struct {
- unsigned long progusage1;
- unsigned long progusage2;
- unsigned long progusage3;
- unsigned long tod_base;
- };
- unsigned long progusage[4];
- };
- /* 56 - 63 */
- unsigned int mach_type:16; /* Machine type */
- unsigned int res1:16; /* Reserved */
- unsigned int res2:32; /* Reserved */
-};
-
-/* Create the trailer data at the end of a page. */
-static void cf_diag_trailer(struct cf_trailer_entry *te)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- struct cpuid cpuid;
-
- te->cfvn = cpuhw->info.cfvn; /* Counter version numbers */
- te->csvn = cpuhw->info.csvn;
-
- get_cpu_id(&cpuid); /* Machine type */
- te->mach_type = cpuid.machine;
- te->cpu_speed = cf_diag_cpu_speed;
- if (te->cpu_speed)
- te->speed = 1;
- te->clock_base = 1; /* Save clock base */
- te->tod_base = tod_clock_base.tod;
- te->timestamp = get_tod_clock_fast();
-}
-
-/*
- * Change the CPUMF state to active.
- * Enable and activate the CPU-counter sets according
- * to the per-cpu control state.
- */
-static void cf_diag_enable(struct pmu *pmu)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- int err;
-
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s pmu %p cpu %d flags %#x state %#llx\n",
- __func__, pmu, smp_processor_id(), cpuhw->flags,
- cpuhw->state);
- if (cpuhw->flags & PMU_F_ENABLED)
- return;
-
- err = lcctl(cpuhw->state);
- if (err) {
- pr_err("Enabling the performance measuring unit "
- "failed with rc=%x\n", err);
- return;
- }
- cpuhw->flags |= PMU_F_ENABLED;
-}
-
-/*
- * Change the CPUMF state to inactive.
- * Disable and enable (inactive) the CPU-counter sets according
- * to the per-cpu control state.
- */
-static void cf_diag_disable(struct pmu *pmu)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- u64 inactive;
- int err;
-
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s pmu %p cpu %d flags %#x state %#llx\n",
- __func__, pmu, smp_processor_id(), cpuhw->flags,
- cpuhw->state);
- if (!(cpuhw->flags & PMU_F_ENABLED))
- return;
-
- inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
- err = lcctl(inactive);
- if (err) {
- pr_err("Disabling the performance measuring unit "
- "failed with rc=%x\n", err);
- return;
- }
- cpuhw->flags &= ~PMU_F_ENABLED;
-}
-
-/* Number of perf events counting hardware events */
-static atomic_t cf_diag_events = ATOMIC_INIT(0);
-/* Used to avoid races in calling reserve/release_cpumf_hardware */
-static DEFINE_MUTEX(cf_diag_reserve_mutex);
-
-/* Release the PMU if event is the last perf event */
-static void cf_diag_perf_event_destroy(struct perf_event *event)
-{
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s event %p cpu %d cf_diag_events %d\n",
- __func__, event, smp_processor_id(),
- atomic_read(&cf_diag_events));
- if (atomic_dec_return(&cf_diag_events) == 0)
- __kernel_cpumcf_end();
-}
-
-static int get_authctrsets(void)
-{
- struct cpu_cf_events *cpuhw;
- unsigned long auth = 0;
- enum cpumf_ctr_set i;
-
- cpuhw = &get_cpu_var(cpu_cf_events);
- for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
- if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
- auth |= cpumf_ctr_ctl[i];
- }
- put_cpu_var(cpu_cf_events);
- return auth;
-}
-
-/* Setup the event. Test for authorized counter sets and only include counter
- * sets which are authorized at the time of the setup. Including unauthorized
- * counter sets result in specification exception (and panic).
- */
-static int __hw_perf_event_init(struct perf_event *event)
-{
- struct perf_event_attr *attr = &event->attr;
- int err = 0;
-
- debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
- event, event->cpu);
-
- event->hw.config = attr->config;
-
- /* Add all authorized counter sets to config_base. The
- * the hardware init function is either called per-cpu or just once
- * for all CPUS (event->cpu == -1). This depends on the whether
- * counting is started for all CPUs or on a per workload base where
- * the perf event moves from one CPU to another CPU.
- * Checking the authorization on any CPU is fine as the hardware
- * applies the same authorization settings to all CPUs.
- */
- event->hw.config_base = get_authctrsets();
-
- /* No authorized counter sets, nothing to count/sample */
- if (!event->hw.config_base) {
- err = -EINVAL;
- goto out;
- }
-
- /* Set sample_period to indicate sampling */
- event->hw.sample_period = attr->sample_period;
- local64_set(&event->hw.period_left, event->hw.sample_period);
- event->hw.last_period = event->hw.sample_period;
-out:
- debug_sprintf_event(cf_diag_dbg, 5, "%s err %d config_base %#lx\n",
- __func__, err, event->hw.config_base);
- return err;
-}
-
-/* Return 0 if the CPU-measurement counter facility is currently free
- * and an error otherwise.
- */
-static int cf_diag_perf_event_inuse(void)
-{
- int err = 0;
-
- if (!atomic_inc_not_zero(&cf_diag_events)) {
- mutex_lock(&cf_diag_reserve_mutex);
- if (atomic_read(&cf_diag_events) == 0 &&
- __kernel_cpumcf_begin())
- err = -EBUSY;
- else
- err = atomic_inc_return(&cf_diag_events);
- mutex_unlock(&cf_diag_reserve_mutex);
- }
- return err;
-}
-
-static int cf_diag_event_init(struct perf_event *event)
-{
- struct perf_event_attr *attr = &event->attr;
- int err = -ENOENT;
-
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s event %p cpu %d config %#llx type:%u "
- "sample_type %#llx cf_diag_events %d\n", __func__,
- event, event->cpu, attr->config, event->pmu->type,
- attr->sample_type, atomic_read(&cf_diag_events));
-
- if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
- event->attr.type != event->pmu->type)
- goto out;
-
- /* Raw events are used to access counters directly,
- * hence do not permit excludes.
- * This event is usesless without PERF_SAMPLE_RAW to return counter set
- * values as raw data.
- */
- if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv ||
- !(attr->sample_type & (PERF_SAMPLE_CPU | PERF_SAMPLE_RAW))) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- /* Initialize for using the CPU-measurement counter facility */
- err = cf_diag_perf_event_inuse();
- if (err < 0)
- goto out;
- event->destroy = cf_diag_perf_event_destroy;
-
- err = __hw_perf_event_init(event);
- if (unlikely(err))
- event->destroy(event);
-out:
- debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err);
- return err;
-}
-
-static void cf_diag_read(struct perf_event *event)
-{
- debug_sprintf_event(cf_diag_dbg, 5, "%s event %p\n", __func__, event);
-}
-
-/* Calculate memory needed to store all counter sets together with header and
- * trailer data. This is independend of the counter set authorization which
- * can vary depending on the configuration.
- */
-static size_t cf_diag_ctrset_maxsize(struct cpumf_ctr_info *info)
-{
- size_t max_size = sizeof(struct cf_trailer_entry);
- enum cpumf_ctr_set i;
-
- for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
- size_t size = cpum_cf_ctrset_size(i, info);
-
- if (size)
- max_size += size * sizeof(u64) +
- sizeof(struct cf_ctrset_entry);
- }
- debug_sprintf_event(cf_diag_dbg, 5, "%s max_size %zu\n", __func__,
- max_size);
-
- return max_size;
-}
-
-/* Read a counter set. The counter set number determines which counter set and
- * the CPUM-CF first and second version number determine the number of
- * available counters in this counter set.
- * Each counter set starts with header containing the counter set number and
- * the number of 8 byte counters.
- *
- * The functions returns the number of bytes occupied by this counter set
- * including the header.
- * If there is no counter in the counter set, this counter set is useless and
- * zero is returned on this case.
- */
-static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
- size_t room)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- size_t ctrset_size, need = 0;
- int rc = 3; /* Assume write failure */
-
- ctrdata->def = CF_DIAG_CTRSET_DEF;
- ctrdata->set = ctrset;
- ctrdata->res1 = 0;
- ctrset_size = cpum_cf_ctrset_size(ctrset, &cpuhw->info);
-
- if (ctrset_size) { /* Save data */
- need = ctrset_size * sizeof(u64) + sizeof(*ctrdata);
- if (need <= room)
- rc = ctr_stcctm(ctrset, ctrset_size,
- (u64 *)(ctrdata + 1));
- if (rc != 3)
- ctrdata->ctr = ctrset_size;
- else
- need = 0;
- }
-
- debug_sprintf_event(cf_diag_dbg, 6,
- "%s ctrset %d ctrset_size %zu cfvn %d csvn %d"
- " need %zd rc %d\n",
- __func__, ctrset, ctrset_size, cpuhw->info.cfvn,
- cpuhw->info.csvn, need, rc);
- return need;
-}
-
-/* Read out all counter sets and save them in the provided data buffer.
- * The last 64 byte host an artificial trailer entry.
- */
-static size_t cf_diag_getctr(void *data, size_t sz, unsigned long auth)
-{
- struct cf_trailer_entry *trailer;
- size_t offset = 0, done;
- int i;
-
- memset(data, 0, sz);
- sz -= sizeof(*trailer); /* Always room for trailer */
- for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
- struct cf_ctrset_entry *ctrdata = data + offset;
-
- if (!(auth & cpumf_ctr_ctl[i]))
- continue; /* Counter set not authorized */
-
- done = cf_diag_getctrset(ctrdata, i, sz - offset);
- offset += done;
- debug_sprintf_event(cf_diag_dbg, 6,
- "%s ctrset %d offset %zu done %zu\n",
- __func__, i, offset, done);
- }
- trailer = data + offset;
- cf_diag_trailer(trailer);
- return offset + sizeof(*trailer);
-}
-
-/* Calculate the difference for each counter in a counter set. */
-static void cf_diag_diffctrset(u64 *pstart, u64 *pstop, int counters)
-{
- for (; --counters >= 0; ++pstart, ++pstop)
- if (*pstop >= *pstart)
- *pstop -= *pstart;
- else
- *pstop = *pstart - *pstop;
-}
-
-/* Scan the counter sets and calculate the difference of each counter
- * in each set. The result is the increment of each counter during the
- * period the counter set has been activated.
- *
- * Return true on success.
- */
-static int cf_diag_diffctr(struct cf_diag_csd *csd, unsigned long auth)
-{
- struct cf_trailer_entry *trailer_start, *trailer_stop;
- struct cf_ctrset_entry *ctrstart, *ctrstop;
- size_t offset = 0;
-
- auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1;
- do {
- ctrstart = (struct cf_ctrset_entry *)(csd->start + offset);
- ctrstop = (struct cf_ctrset_entry *)(csd->data + offset);
-
- if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) {
- pr_err("cpum_cf_diag counter set compare error "
- "in set %i\n", ctrstart->set);
- return 0;
- }
- auth &= ~cpumf_ctr_ctl[ctrstart->set];
- if (ctrstart->def == CF_DIAG_CTRSET_DEF) {
- cf_diag_diffctrset((u64 *)(ctrstart + 1),
- (u64 *)(ctrstop + 1), ctrstart->ctr);
- offset += ctrstart->ctr * sizeof(u64) +
- sizeof(*ctrstart);
- }
- debug_sprintf_event(cf_diag_dbg, 6,
- "%s set %d ctr %d offset %zu auth %lx\n",
- __func__, ctrstart->set, ctrstart->ctr,
- offset, auth);
- } while (ctrstart->def && auth);
-
- /* Save time_stamp from start of event in stop's trailer */
- trailer_start = (struct cf_trailer_entry *)(csd->start + offset);
- trailer_stop = (struct cf_trailer_entry *)(csd->data + offset);
- trailer_stop->progusage[0] = trailer_start->timestamp;
-
- return 1;
-}
-
-/* Create perf event sample with the counter sets as raw data. The sample
- * is then pushed to the event subsystem and the function checks for
- * possible event overflows. If an event overflow occurs, the PMU is
- * stopped.
- *
- * Return non-zero if an event overflow occurred.
- */
-static int cf_diag_push_sample(struct perf_event *event,
- struct cf_diag_csd *csd)
-{
- struct perf_sample_data data;
- struct perf_raw_record raw;
- struct pt_regs regs;
- int overflow;
-
- /* Setup perf sample */
- perf_sample_data_init(&data, 0, event->hw.last_period);
- memset(&regs, 0, sizeof(regs));
- memset(&raw, 0, sizeof(raw));
-
- if (event->attr.sample_type & PERF_SAMPLE_CPU)
- data.cpu_entry.cpu = event->cpu;
- if (event->attr.sample_type & PERF_SAMPLE_RAW) {
- raw.frag.size = csd->used;
- raw.frag.data = csd->data;
- raw.size = csd->used;
- data.raw = &raw;
- }
-
- overflow = perf_event_overflow(event, &data, &regs);
- debug_sprintf_event(cf_diag_dbg, 6,
- "%s event %p cpu %d sample_type %#llx raw %d "
- "ov %d\n", __func__, event, event->cpu,
- event->attr.sample_type, raw.size, overflow);
- if (overflow)
- event->pmu->stop(event, 0);
-
- perf_event_update_userpage(event);
- return overflow;
-}
-
-static void cf_diag_start(struct perf_event *event, int flags)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
- struct hw_perf_event *hwc = &event->hw;
-
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s event %p cpu %d flags %#x hwc-state %#x\n",
- __func__, event, event->cpu, flags, hwc->state);
- if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
- return;
-
- /* (Re-)enable and activate all counter sets */
- lcctl(0); /* Reset counter sets */
- hwc->state = 0;
- ctr_set_multiple_enable(&cpuhw->state, hwc->config_base);
- lcctl(cpuhw->state); /* Enable counter sets */
- csd->used = cf_diag_getctr(csd->start, sizeof(csd->start),
- event->hw.config_base);
- ctr_set_multiple_start(&cpuhw->state, hwc->config_base);
- /* Function cf_diag_enable() starts the counter sets. */
-}
-
-static void cf_diag_stop(struct perf_event *event, int flags)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
- struct hw_perf_event *hwc = &event->hw;
-
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s event %p cpu %d flags %#x hwc-state %#x\n",
- __func__, event, event->cpu, flags, hwc->state);
-
- /* Deactivate all counter sets */
- ctr_set_multiple_stop(&cpuhw->state, hwc->config_base);
- local64_inc(&event->count);
- csd->used = cf_diag_getctr(csd->data, sizeof(csd->data),
- event->hw.config_base);
- if (cf_diag_diffctr(csd, event->hw.config_base))
- cf_diag_push_sample(event, csd);
- hwc->state |= PERF_HES_STOPPED;
-}
-
-static int cf_diag_add(struct perf_event *event, int flags)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- int err = 0;
-
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s event %p cpu %d flags %#x cpuhw %p\n",
- __func__, event, event->cpu, flags, cpuhw);
-
- if (cpuhw->flags & PMU_F_IN_USE) {
- err = -EAGAIN;
- goto out;
- }
-
- event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
-
- cpuhw->flags |= PMU_F_IN_USE;
- if (flags & PERF_EF_START)
- cf_diag_start(event, PERF_EF_RELOAD);
-out:
- debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err);
- return err;
-}
-
-static void cf_diag_del(struct perf_event *event, int flags)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
-
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s event %p cpu %d flags %#x\n",
- __func__, event, event->cpu, flags);
-
- cf_diag_stop(event, PERF_EF_UPDATE);
- ctr_set_multiple_stop(&cpuhw->state, event->hw.config_base);
- ctr_set_multiple_disable(&cpuhw->state, event->hw.config_base);
- cpuhw->flags &= ~PMU_F_IN_USE;
-}
-
-/* Default counter set events and format attribute groups */
-
-CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG);
-
-static struct attribute *cf_diag_events_attr[] = {
- CPUMF_EVENT_PTR(CF_DIAG, CF_DIAG),
- NULL,
-};
-
-PMU_FORMAT_ATTR(event, "config:0-63");
-
-static struct attribute *cf_diag_format_attr[] = {
- &format_attr_event.attr,
- NULL,
-};
-
-static struct attribute_group cf_diag_events_group = {
- .name = "events",
- .attrs = cf_diag_events_attr,
-};
-static struct attribute_group cf_diag_format_group = {
- .name = "format",
- .attrs = cf_diag_format_attr,
-};
-static const struct attribute_group *cf_diag_attr_groups[] = {
- &cf_diag_events_group,
- &cf_diag_format_group,
- NULL,
-};
-
-/* Performance monitoring unit for s390x */
-static struct pmu cf_diag = {
- .task_ctx_nr = perf_sw_context,
- .pmu_enable = cf_diag_enable,
- .pmu_disable = cf_diag_disable,
- .event_init = cf_diag_event_init,
- .add = cf_diag_add,
- .del = cf_diag_del,
- .start = cf_diag_start,
- .stop = cf_diag_stop,
- .read = cf_diag_read,
-
- .attr_groups = cf_diag_attr_groups
-};
-
-/* Get the CPU speed, try sampling facility first and CPU attributes second. */
-static void cf_diag_get_cpu_speed(void)
-{
- if (cpum_sf_avail()) { /* Sampling facility first */
- struct hws_qsi_info_block si;
-
- memset(&si, 0, sizeof(si));
- if (!qsi(&si)) {
- cf_diag_cpu_speed = si.cpu_speed;
- return;
- }
- }
-
- if (test_facility(34)) { /* CPU speed extract static part */
- unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
-
- if (mhz != -1UL)
- cf_diag_cpu_speed = mhz & 0xffffffff;
- }
-}
-
-/* Code to create device and file I/O operations */
-static atomic_t ctrset_opencnt = ATOMIC_INIT(0); /* Excl. access */
-
-static int cf_diag_open(struct inode *inode, struct file *file)
-{
- int err = 0;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (atomic_xchg(&ctrset_opencnt, 1))
- return -EBUSY;
-
- /* Avoid concurrent access with perf_event_open() system call */
- mutex_lock(&cf_diag_reserve_mutex);
- if (atomic_read(&cf_diag_events) || __kernel_cpumcf_begin())
- err = -EBUSY;
- mutex_unlock(&cf_diag_reserve_mutex);
- if (err) {
- atomic_set(&ctrset_opencnt, 0);
- return err;
- }
- file->private_data = NULL;
- debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__);
- /* nonseekable_open() never fails */
- return nonseekable_open(inode, file);
-}
-
-/* Variables for ioctl() interface support */
-static DEFINE_MUTEX(cf_diag_ctrset_mutex);
-static struct cf_diag_ctrset {
- unsigned long ctrset; /* Bit mask of counter set to read */
- cpumask_t mask; /* CPU mask to read from */
-} cf_diag_ctrset;
-
-static void cf_diag_ctrset_clear(void)
-{
- cpumask_clear(&cf_diag_ctrset.mask);
- cf_diag_ctrset.ctrset = 0;
-}
-
-static void cf_diag_release_cpu(void *p)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
-
- debug_sprintf_event(cf_diag_dbg, 3, "%s cpu %d\n", __func__,
- smp_processor_id());
- lcctl(0); /* Reset counter sets */
- cpuhw->state = 0; /* Save state in CPU hardware state */
-}
-
-/* Release function is also called when application gets terminated without
- * doing a proper ioctl(..., S390_HWCTR_STOP, ...) command.
- * Since only one application is allowed to open the device, simple stop all
- * CPU counter sets.
- */
-static int cf_diag_release(struct inode *inode, struct file *file)
-{
- on_each_cpu(cf_diag_release_cpu, NULL, 1);
- cf_diag_ctrset_clear();
- atomic_set(&ctrset_opencnt, 0);
- __kernel_cpumcf_end();
- debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__);
- return 0;
-}
-
-struct cf_diag_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */
- unsigned int sets; /* Counter set bit mask */
- atomic_t cpus_ack; /* # CPUs successfully executed func */
-};
-
-static int cf_diag_all_copy(unsigned long arg, cpumask_t *mask)
-{
- struct s390_ctrset_read __user *ctrset_read;
- unsigned int cpu, cpus, rc;
- void __user *uptr;
-
- ctrset_read = (struct s390_ctrset_read __user *)arg;
- uptr = ctrset_read->data;
- for_each_cpu(cpu, mask) {
- struct cf_diag_csd *csd = per_cpu_ptr(&cf_diag_csd, cpu);
- struct s390_ctrset_cpudata __user *ctrset_cpudata;
-
- ctrset_cpudata = uptr;
- debug_sprintf_event(cf_diag_dbg, 5, "%s cpu %d used %zd\n",
- __func__, cpu, csd->used);
- rc = put_user(cpu, &ctrset_cpudata->cpu_nr);
- rc |= put_user(csd->sets, &ctrset_cpudata->no_sets);
- rc |= copy_to_user(ctrset_cpudata->data, csd->data, csd->used);
- if (rc)
- return -EFAULT;
- uptr += sizeof(struct s390_ctrset_cpudata) + csd->used;
- cond_resched();
- }
- cpus = cpumask_weight(mask);
- if (put_user(cpus, &ctrset_read->no_cpus))
- return -EFAULT;
- debug_sprintf_event(cf_diag_dbg, 5, "%s copied %ld\n",
- __func__, uptr - (void __user *)ctrset_read->data);
- return 0;
-}
-
-static size_t cf_diag_cpuset_read(struct s390_ctrset_setdata *p, int ctrset,
- int ctrset_size, size_t room)
-{
- size_t need = 0;
- int rc = -1;
-
- need = sizeof(*p) + sizeof(u64) * ctrset_size;
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s room %zd need %zd set %#x set_size %d\n",
- __func__, room, need, ctrset, ctrset_size);
- if (need <= room) {
- p->set = cpumf_ctr_ctl[ctrset];
- p->no_cnts = ctrset_size;
- rc = ctr_stcctm(ctrset, ctrset_size, (u64 *)p->cv);
- if (rc == 3) /* Nothing stored */
- need = 0;
- }
- debug_sprintf_event(cf_diag_dbg, 5, "%s need %zd rc %d\n", __func__,
- need, rc);
- return need;
-}
-
-/* Read all counter sets. Since the perf_event_open() system call with
- * event cpum_cf_diag/.../ is blocked when this interface is active, reuse
- * the perf_event_open() data buffer to store the counter sets.
- */
-static void cf_diag_cpu_read(void *parm)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
- struct cf_diag_call_on_cpu_parm *p = parm;
- int set, set_size;
- size_t space;
-
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s new %#x flags %#x state %#llx\n",
- __func__, p->sets, cpuhw->flags,
- cpuhw->state);
- /* No data saved yet */
- csd->used = 0;
- csd->sets = 0;
- memset(csd->data, 0, sizeof(csd->data));
-
- /* Scan the counter sets */
- for (set = CPUMF_CTR_SET_BASIC; set < CPUMF_CTR_SET_MAX; ++set) {
- struct s390_ctrset_setdata *sp = (void *)csd->data + csd->used;
-
- if (!(p->sets & cpumf_ctr_ctl[set]))
- continue; /* Counter set not in list */
- set_size = cpum_cf_ctrset_size(set, &cpuhw->info);
- space = sizeof(csd->data) - csd->used;
- space = cf_diag_cpuset_read(sp, set, set_size, space);
- if (space) {
- csd->used += space;
- csd->sets += 1;
- }
- debug_sprintf_event(cf_diag_dbg, 5, "%s sp %px space %zd\n",
- __func__, sp, space);
- }
- debug_sprintf_event(cf_diag_dbg, 5, "%s sets %d used %zd\n", __func__,
- csd->sets, csd->used);
-}
-
-static int cf_diag_all_read(unsigned long arg)
-{
- struct cf_diag_call_on_cpu_parm p;
- cpumask_var_t mask;
- int rc;
-
- debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
- return -ENOMEM;
-
- p.sets = cf_diag_ctrset.ctrset;
- cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
- on_each_cpu_mask(mask, cf_diag_cpu_read, &p, 1);
- rc = cf_diag_all_copy(arg, mask);
- free_cpumask_var(mask);
- debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d\n", __func__, rc);
- return rc;
-}
-
-/* Stop all counter sets via ioctl interface */
-static void cf_diag_ioctl_off(void *parm)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- struct cf_diag_call_on_cpu_parm *p = parm;
- int rc;
-
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s new %#x flags %#x state %#llx\n",
- __func__, p->sets, cpuhw->flags,
- cpuhw->state);
-
- ctr_set_multiple_disable(&cpuhw->state, p->sets);
- ctr_set_multiple_stop(&cpuhw->state, p->sets);
- rc = lcctl(cpuhw->state); /* Stop counter sets */
- if (!cpuhw->state)
- cpuhw->flags &= ~PMU_F_IN_USE;
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s rc %d flags %#x state %#llx\n", __func__,
- rc, cpuhw->flags, cpuhw->state);
-}
-
-/* Start counter sets on particular CPU */
-static void cf_diag_ioctl_on(void *parm)
-{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
- struct cf_diag_call_on_cpu_parm *p = parm;
- int rc;
-
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s new %#x flags %#x state %#llx\n",
- __func__, p->sets, cpuhw->flags,
- cpuhw->state);
-
- if (!(cpuhw->flags & PMU_F_IN_USE))
- cpuhw->state = 0;
- cpuhw->flags |= PMU_F_IN_USE;
- rc = lcctl(cpuhw->state); /* Reset unused counter sets */
- ctr_set_multiple_enable(&cpuhw->state, p->sets);
- ctr_set_multiple_start(&cpuhw->state, p->sets);
- rc |= lcctl(cpuhw->state); /* Start counter sets */
- if (!rc)
- atomic_inc(&p->cpus_ack);
- debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d state %#llx\n",
- __func__, rc, cpuhw->state);
-}
-
-static int cf_diag_all_stop(void)
-{
- struct cf_diag_call_on_cpu_parm p = {
- .sets = cf_diag_ctrset.ctrset,
- };
- cpumask_var_t mask;
-
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
- return -ENOMEM;
- cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
- on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1);
- free_cpumask_var(mask);
- return 0;
-}
-
-static int cf_diag_all_start(void)
-{
- struct cf_diag_call_on_cpu_parm p = {
- .sets = cf_diag_ctrset.ctrset,
- .cpus_ack = ATOMIC_INIT(0),
- };
- cpumask_var_t mask;
- int rc = 0;
-
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
- return -ENOMEM;
- cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
- on_each_cpu_mask(mask, cf_diag_ioctl_on, &p, 1);
- if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
- on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1);
- rc = -EIO;
- }
- free_cpumask_var(mask);
- return rc;
-}
-
-/* Return the maximum required space for all possible CPUs in case one
- * CPU will be onlined during the START, READ, STOP cycles.
- * To find out the size of the counter sets, any one CPU will do. They
- * all have the same counter sets.
- */
-static size_t cf_diag_needspace(unsigned int sets)
-{
- struct cpu_cf_events *cpuhw = get_cpu_ptr(&cpu_cf_events);
- size_t bytes = 0;
- int i;
-
- for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
- if (!(sets & cpumf_ctr_ctl[i]))
- continue;
- bytes += cpum_cf_ctrset_size(i, &cpuhw->info) * sizeof(u64) +
- sizeof(((struct s390_ctrset_setdata *)0)->set) +
- sizeof(((struct s390_ctrset_setdata *)0)->no_cnts);
- }
- bytes = sizeof(((struct s390_ctrset_read *)0)->no_cpus) + nr_cpu_ids *
- (bytes + sizeof(((struct s390_ctrset_cpudata *)0)->cpu_nr) +
- sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
- debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__,
- bytes);
- put_cpu_ptr(&cpu_cf_events);
- return bytes;
-}
-
-static long cf_diag_ioctl_read(unsigned long arg)
-{
- struct s390_ctrset_read read;
- int ret = 0;
-
- debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
- if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
- return -EFAULT;
- ret = cf_diag_all_read(arg);
- debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret);
- return ret;
-}
-
-static long cf_diag_ioctl_stop(void)
-{
- int ret;
-
- debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
- ret = cf_diag_all_stop();
- cf_diag_ctrset_clear();
- debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret);
- return ret;
-}
-
-static long cf_diag_ioctl_start(unsigned long arg)
-{
- struct s390_ctrset_start __user *ustart;
- struct s390_ctrset_start start;
- void __user *umask;
- unsigned int len;
- int ret = 0;
- size_t need;
-
- if (cf_diag_ctrset.ctrset)
- return -EBUSY;
- ustart = (struct s390_ctrset_start __user *)arg;
- if (copy_from_user(&start, ustart, sizeof(start)))
- return -EFAULT;
- if (start.version != S390_HWCTR_START_VERSION)
- return -EINVAL;
- if (start.counter_sets & ~(cpumf_ctr_ctl[CPUMF_CTR_SET_BASIC] |
- cpumf_ctr_ctl[CPUMF_CTR_SET_USER] |
- cpumf_ctr_ctl[CPUMF_CTR_SET_CRYPTO] |
- cpumf_ctr_ctl[CPUMF_CTR_SET_EXT] |
- cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG]))
- return -EINVAL; /* Invalid counter set */
- if (!start.counter_sets)
- return -EINVAL; /* No counter set at all? */
- cpumask_clear(&cf_diag_ctrset.mask);
- len = min_t(u64, start.cpumask_len, cpumask_size());
- umask = (void __user *)start.cpumask;
- if (copy_from_user(&cf_diag_ctrset.mask, umask, len))
- return -EFAULT;
- if (cpumask_empty(&cf_diag_ctrset.mask))
- return -EINVAL;
- need = cf_diag_needspace(start.counter_sets);
- if (put_user(need, &ustart->data_bytes))
- ret = -EFAULT;
- if (ret)
- goto out;
- cf_diag_ctrset.ctrset = start.counter_sets;
- ret = cf_diag_all_start();
-out:
- if (ret)
- cf_diag_ctrset_clear();
- debug_sprintf_event(cf_diag_dbg, 2, "%s sets %#lx need %ld ret %d\n",
- __func__, cf_diag_ctrset.ctrset, need, ret);
- return ret;
-}
-
-static long cf_diag_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret;
-
- debug_sprintf_event(cf_diag_dbg, 2, "%s cmd %#x arg %lx\n", __func__,
- cmd, arg);
- get_online_cpus();
- mutex_lock(&cf_diag_ctrset_mutex);
- switch (cmd) {
- case S390_HWCTR_START:
- ret = cf_diag_ioctl_start(arg);
- break;
- case S390_HWCTR_STOP:
- ret = cf_diag_ioctl_stop();
- break;
- case S390_HWCTR_READ:
- ret = cf_diag_ioctl_read(arg);
- break;
- default:
- ret = -ENOTTY;
- break;
- }
- mutex_unlock(&cf_diag_ctrset_mutex);
- put_online_cpus();
- debug_sprintf_event(cf_diag_dbg, 2, "%s ret %d\n", __func__, ret);
- return ret;
-}
-
-static const struct file_operations cf_diag_fops = {
- .owner = THIS_MODULE,
- .open = cf_diag_open,
- .release = cf_diag_release,
- .unlocked_ioctl = cf_diag_ioctl,
- .compat_ioctl = cf_diag_ioctl,
- .llseek = no_llseek
-};
-
-static struct miscdevice cf_diag_dev = {
- .name = S390_HWCTR_DEVICE,
- .minor = MISC_DYNAMIC_MINOR,
- .fops = &cf_diag_fops,
-};
-
-static int cf_diag_online_cpu(unsigned int cpu)
-{
- struct cf_diag_call_on_cpu_parm p;
-
- mutex_lock(&cf_diag_ctrset_mutex);
- if (!cf_diag_ctrset.ctrset)
- goto out;
- p.sets = cf_diag_ctrset.ctrset;
- cf_diag_ioctl_on(&p);
-out:
- mutex_unlock(&cf_diag_ctrset_mutex);
- return 0;
-}
-
-static int cf_diag_offline_cpu(unsigned int cpu)
-{
- struct cf_diag_call_on_cpu_parm p;
-
- mutex_lock(&cf_diag_ctrset_mutex);
- if (!cf_diag_ctrset.ctrset)
- goto out;
- p.sets = cf_diag_ctrset.ctrset;
- cf_diag_ioctl_off(&p);
-out:
- mutex_unlock(&cf_diag_ctrset_mutex);
- return 0;
-}
-
-/* Initialize the counter set PMU to generate complete counter set data as
- * event raw data. This relies on the CPU Measurement Counter Facility device
- * already being loaded and initialized.
- */
-static int __init cf_diag_init(void)
-{
- struct cpumf_ctr_info info;
- size_t need;
- int rc;
-
- if (!kernel_cpumcf_avail() || !stccm_avail() || qctri(&info))
- return -ENODEV;
- cf_diag_get_cpu_speed();
-
- /* Make sure the counter set data fits into predefined buffer. */
- need = cf_diag_ctrset_maxsize(&info);
- if (need > sizeof(((struct cf_diag_csd *)0)->start)) {
- pr_err("Insufficient memory for PMU(cpum_cf_diag) need=%zu\n",
- need);
- return -ENOMEM;
- }
-
- rc = misc_register(&cf_diag_dev);
- if (rc) {
- pr_err("Registration of /dev/" S390_HWCTR_DEVICE
- "failed rc=%d\n", rc);
- goto out;
- }
-
- /* Setup s390dbf facility */
- cf_diag_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128);
- if (!cf_diag_dbg) {
- pr_err("Registration of s390dbf(cpum_cf_diag) failed\n");
- rc = -ENOMEM;
- goto out_dbf;
- }
- debug_register_view(cf_diag_dbg, &debug_sprintf_view);
-
- rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
- if (rc) {
- pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n",
- rc);
- goto out_perf;
- }
- rc = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_S390_CFD_ONLINE,
- "perf/s390/cfd:online",
- cf_diag_online_cpu, cf_diag_offline_cpu);
- if (!rc)
- goto out;
-
- pr_err("Registration of CPUHP_AP_PERF_S390_CFD_ONLINE failed rc=%i\n",
- rc);
- perf_pmu_unregister(&cf_diag);
-out_perf:
- debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
- debug_unregister(cf_diag_dbg);
-out_dbf:
- misc_deregister(&cf_diag_dev);
-out:
- return rc;
-}
-device_initcall(cf_diag_init);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7ae5dde9c54d..350e94d0cac2 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -166,6 +166,12 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
p->thread.acrs[1] = (unsigned int)tls;
}
}
+ /*
+ * s390 stores the svc return address in arch_data when calling
+ * sigreturn()/restart_syscall() via vdso. 1 means no valid address
+ * stored.
+ */
+ p->restart_block.arch_data = 1;
return 0;
}
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index c92d04f876cb..82df39b17bb5 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -103,11 +103,9 @@ EXPORT_SYMBOL(cpu_have_feature);
static void show_facilities(struct seq_file *m)
{
unsigned int bit;
- long *facilities;
- facilities = (long *)&S390_lowcore.stfle_fac_list;
seq_puts(m, "facilities :");
- for_each_set_bit_inv(bit, facilities, MAX_FACILITY_BIT)
+ for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
seq_printf(m, " %d", bit);
seq_putc(m, '\n');
}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 18b3416fd663..0ea3d02b378d 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -975,10 +975,12 @@ static int s390_tdb_get(struct task_struct *target,
struct membuf to)
{
struct pt_regs *regs = task_pt_regs(target);
+ size_t size;
if (!(regs->int_code & 0x200))
return -ENODATA;
- return membuf_write(&to, target->thread.trap_tdb, 256);
+ size = sizeof(target->thread.trap_tdb.data);
+ return membuf_write(&to, target->thread.trap_tdb.data, size);
}
static int s390_tdb_set(struct task_struct *target,
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 5aab59ad5688..ff0f9e838916 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -96,7 +96,6 @@ unsigned long int_hwcap = 0;
int __bootdata(noexec_disabled);
unsigned long __bootdata(ident_map_size);
-unsigned long __bootdata(vmalloc_size);
struct mem_detect_info __bootdata(mem_detect);
struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
@@ -108,6 +107,9 @@ unsigned long __bootdata_preserved(__edma);
unsigned long __bootdata_preserved(__kaslr_offset);
unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support);
+u64 __bootdata_preserved(stfle_fac_list[16]);
+EXPORT_SYMBOL(stfle_fac_list);
+u64 __bootdata_preserved(alt_stfle_fac_list[16]);
unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);
@@ -165,7 +167,7 @@ static void __init set_preferred_console(void)
else if (CONSOLE_IS_3270)
add_preferred_console("tty3270", 0, NULL);
else if (CONSOLE_IS_VT220)
- add_preferred_console("ttyS", 1, NULL);
+ add_preferred_console("ttysclp", 0, NULL);
else if (CONSOLE_IS_HVC)
add_preferred_console("hvc", 0, NULL);
}
@@ -338,27 +340,6 @@ int __init arch_early_irq_init(void)
return 0;
}
-static int __init stack_realloc(void)
-{
- unsigned long old, new;
-
- old = S390_lowcore.async_stack - STACK_INIT_OFFSET;
- new = stack_alloc();
- if (!new)
- panic("Couldn't allocate async stack");
- WRITE_ONCE(S390_lowcore.async_stack, new + STACK_INIT_OFFSET);
- free_pages(old, THREAD_SIZE_ORDER);
-
- old = S390_lowcore.mcck_stack - STACK_INIT_OFFSET;
- new = stack_alloc();
- if (!new)
- panic("Couldn't allocate machine check stack");
- WRITE_ONCE(S390_lowcore.mcck_stack, new + STACK_INIT_OFFSET);
- memblock_free_late(old, THREAD_SIZE);
- return 0;
-}
-early_initcall(stack_realloc);
-
void __init arch_call_rest_init(void)
{
unsigned long stack;
@@ -373,7 +354,7 @@ void __init arch_call_rest_init(void)
set_task_stack_end_magic(current);
stack += STACK_INIT_OFFSET;
S390_lowcore.kernel_stack = stack;
- CALL_ON_STACK_NORETURN(rest_init, stack);
+ call_on_stack_noreturn(rest_init, stack);
}
static void __init setup_lowcore_dat_off(void)
@@ -413,11 +394,6 @@ static void __init setup_lowcore_dat_off(void)
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
lc->preempt_count = S390_lowcore.preempt_count;
- lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
- memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- sizeof(lc->stfle_fac_list));
- memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
- sizeof(lc->alt_stfle_fac_list));
nmi_alloc_boot_cpu(lc);
lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer;
@@ -466,6 +442,7 @@ static void __init setup_lowcore_dat_off(void)
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
+ lc->preempt_count = PREEMPT_DISABLED;
set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc;
@@ -568,53 +545,10 @@ static void __init setup_resources(void)
#endif
}
-static void __init setup_ident_map_size(void)
+static void __init setup_memory_end(void)
{
- unsigned long vmax, tmp;
-
- /* Choose kernel address space layout: 3 or 4 levels. */
- tmp = ident_map_size / PAGE_SIZE;
- tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
- if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
- vmax = _REGION2_SIZE; /* 3-level kernel page table */
- else
- vmax = _REGION1_SIZE; /* 4-level kernel page table */
- /* module area is at the end of the kernel address space. */
- MODULES_END = vmax;
- if (is_prot_virt_host())
- adjust_to_uv_max(&MODULES_END);
-#ifdef CONFIG_KASAN
- vmax = _REGION1_SIZE;
- MODULES_END = kasan_vmax;
-#endif
- MODULES_VADDR = MODULES_END - MODULES_LEN;
- VMALLOC_END = MODULES_VADDR;
- VMALLOC_START = VMALLOC_END - vmalloc_size;
-
- /* Split remaining virtual space between 1:1 mapping & vmemmap array */
- tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
- /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
- tmp = SECTION_ALIGN_UP(tmp);
- tmp = VMALLOC_START - tmp * sizeof(struct page);
- tmp &= ~((vmax >> 11) - 1); /* align to page table level */
- tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
- vmemmap = (struct page *) tmp;
-
- /* Take care that ident_map_size <= vmemmap */
- ident_map_size = min(ident_map_size, (unsigned long)vmemmap);
-#ifdef CONFIG_KASAN
- ident_map_size = min(ident_map_size, KASAN_SHADOW_START);
-#endif
- vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
-#ifdef CONFIG_KASAN
- /* move vmemmap above kasan shadow only if stands in a way */
- if (KASAN_SHADOW_END > (unsigned long)vmemmap &&
- (unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START)
- vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
-#endif
- max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
memblock_remove(ident_map_size, ULONG_MAX);
-
+ max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
}
@@ -653,30 +587,6 @@ static void __init reserve_above_ident_map(void)
}
/*
- * Make sure that oldmem, where the dump is stored, is protected
- */
-static void __init reserve_oldmem(void)
-{
-#ifdef CONFIG_CRASH_DUMP
- if (OLDMEM_BASE)
- /* Forget all memory above the running kdump system */
- memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
-#endif
-}
-
-/*
- * Make sure that oldmem, where the dump is stored, is protected
- */
-static void __init remove_oldmem(void)
-{
-#ifdef CONFIG_CRASH_DUMP
- if (OLDMEM_BASE)
- /* Forget all memory above the running kdump system */
- memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
-#endif
-}
-
-/*
* Reserve memory for kdump kernel to be loaded with kexec
*/
static void __init reserve_crashkernel(void)
@@ -1119,10 +1029,7 @@ void __init setup_arch(char **cmdline_p)
ROOT_DEV = Root_RAM0;
- init_mm.start_code = (unsigned long) _text;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) _end;
+ setup_initial_init_mm(_text, _etext, _edata, _end);
if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
nospec_auto_detect();
@@ -1141,7 +1048,6 @@ void __init setup_arch(char **cmdline_p)
/* Do some memory reservations *before* memory is added to memblock */
reserve_above_ident_map();
- reserve_oldmem();
reserve_kernel();
reserve_initrd();
reserve_certificate_list();
@@ -1152,10 +1058,9 @@ void __init setup_arch(char **cmdline_p)
memblock_add_mem_detect_info();
free_mem_detect_info();
- remove_oldmem();
setup_uv();
- setup_ident_map_size();
+ setup_memory_end();
setup_memory();
dma_contiguous_reserve(ident_map_size);
vmcp_cma_reserve();
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 080e7aed181f..78ef53b29958 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include <asm/lowcore.h>
#include <asm/switch_to.h>
+#include <asm/vdso.h>
#include "entry.h"
/*
@@ -171,7 +172,6 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
fpregs_load(&user_sregs.fpregs, &current->thread.fpu);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
- clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
return 0;
}
@@ -334,15 +334,10 @@ static int setup_frame(int sig, struct k_sigaction *ka,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
- if (ka->sa.sa_flags & SA_RESTORER) {
+ if (ka->sa.sa_flags & SA_RESTORER)
restorer = (unsigned long) ka->sa.sa_restorer;
- } else {
- /* Signal frame without vector registers are short ! */
- __u16 __user *svc = (void __user *) frame + frame_size - 2;
- if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
- return -EFAULT;
- restorer = (unsigned long) svc;
- }
+ else
+ restorer = VDSO64_SYMBOL(current, sigreturn);
/* Set up registers for signal handler */
regs->gprs[14] = restorer;
@@ -397,14 +392,10 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
- if (ksig->ka.sa.sa_flags & SA_RESTORER) {
+ if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = (unsigned long) ksig->ka.sa.sa_restorer;
- } else {
- __u16 __user *svc = &frame->svc_insn;
- if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
- return -EFAULT;
- restorer = (unsigned long) svc;
- }
+ else
+ restorer = VDSO64_SYMBOL(current, rt_sigreturn);
/* Create siginfo on the signal stack */
if (copy_siginfo_to_user(&frame->info, &ksig->info))
@@ -501,7 +492,7 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
}
/* No longer in a system call */
clear_pt_regs_flag(regs, PIF_SYSCALL);
- clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
+
rseq_signal_deliver(&ksig, regs);
if (is_compat_task())
handle_signal32(&ksig, oldset, regs);
@@ -517,14 +508,20 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
/* Restart with sys_restart_syscall */
- regs->int_code = __NR_restart_syscall;
- fallthrough;
+ regs->gprs[2] = regs->orig_gpr2;
+ current->restart_block.arch_data = regs->psw.addr;
+ if (is_compat_task())
+ regs->psw.addr = VDSO32_SYMBOL(current, restart_syscall);
+ else
+ regs->psw.addr = VDSO64_SYMBOL(current, restart_syscall);
+ if (test_thread_flag(TIF_SINGLE_STEP))
+ clear_thread_flag(TIF_PER_TRAP);
+ break;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
- /* Restart system call with magic TIF bit. */
regs->gprs[2] = regs->orig_gpr2;
- set_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
+ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
if (test_thread_flag(TIF_SINGLE_STEP))
clear_thread_flag(TIF_PER_TRAP);
break;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 111909aeb8d2..8984711f72ed 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -74,7 +74,6 @@ enum {
static DEFINE_PER_CPU(struct cpu *, cpu_device);
struct pcpu {
- struct lowcore *lowcore; /* lowcore page(s) for the cpu */
unsigned long ec_mask; /* bit mask for ec_xxx functions */
unsigned long ec_clk; /* sigp timestamp for ec_xxx */
signed char state; /* physical cpu state */
@@ -194,20 +193,12 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc;
- if (pcpu != &pcpu_devices[0]) {
- pcpu->lowcore = (struct lowcore *)
- __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
- nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
- if (!pcpu->lowcore || !nodat_stack)
- goto out;
- } else {
- nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
- }
+ lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
+ nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
async_stack = stack_alloc();
mcck_stack = stack_alloc();
- if (!async_stack || !mcck_stack)
- goto out_stack;
- lc = pcpu->lowcore;
+ if (!lc || !nodat_stack || !async_stack || !mcck_stack)
+ goto out;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc->async_stack = async_stack + STACK_INIT_OFFSET;
@@ -219,46 +210,44 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
+ lc->preempt_count = PREEMPT_DISABLED;
if (nmi_alloc_per_cpu(lc))
- goto out_stack;
+ goto out;
lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
return 0;
-out_stack:
+out:
stack_free(mcck_stack);
stack_free(async_stack);
-out:
- if (pcpu != &pcpu_devices[0]) {
- free_pages(nodat_stack, THREAD_SIZE_ORDER);
- free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
- }
+ free_pages(nodat_stack, THREAD_SIZE_ORDER);
+ free_pages((unsigned long) lc, LC_ORDER);
return -ENOMEM;
}
static void pcpu_free_lowcore(struct pcpu *pcpu)
{
- unsigned long async_stack, nodat_stack, mcck_stack, lowcore;
-
- nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
- async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
- mcck_stack = pcpu->lowcore->mcck_stack - STACK_INIT_OFFSET;
- lowcore = (unsigned long) pcpu->lowcore;
+ unsigned long async_stack, nodat_stack, mcck_stack;
+ struct lowcore *lc;
+ int cpu;
+ cpu = pcpu - pcpu_devices;
+ lc = lowcore_ptr[cpu];
+ nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
+ async_stack = lc->async_stack - STACK_INIT_OFFSET;
+ mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
- lowcore_ptr[pcpu - pcpu_devices] = NULL;
- nmi_free_per_cpu(pcpu->lowcore);
+ lowcore_ptr[cpu] = NULL;
+ nmi_free_per_cpu(lc);
stack_free(async_stack);
stack_free(mcck_stack);
- if (pcpu == &pcpu_devices[0])
- return;
free_pages(nodat_stack, THREAD_SIZE_ORDER);
- free_pages(lowcore, LC_ORDER);
+ free_pages((unsigned long) lc, LC_ORDER);
}
static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
{
- struct lowcore *lc = pcpu->lowcore;
+ struct lowcore *lc = lowcore_ptr[cpu];
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
@@ -275,17 +264,16 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->cregs_save_area[1] = lc->kernel_asce;
lc->cregs_save_area[7] = lc->user_asce;
save_access_regs((unsigned int *) lc->access_regs_save_area);
- memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- sizeof(lc->stfle_fac_list));
- memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
- sizeof(lc->alt_stfle_fac_list));
arch_spin_lock_setup(cpu);
}
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
{
- struct lowcore *lc = pcpu->lowcore;
+ struct lowcore *lc;
+ int cpu;
+ cpu = pcpu - pcpu_devices;
+ lc = lowcore_ptr[cpu];
lc->kernel_stack = (unsigned long) task_stack_page(tsk)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) tsk;
@@ -301,8 +289,11 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
{
- struct lowcore *lc = pcpu->lowcore;
+ struct lowcore *lc;
+ int cpu;
+ cpu = pcpu - pcpu_devices;
+ lc = lowcore_ptr[cpu];
lc->restart_stack = lc->nodat_stack;
lc->restart_fn = (unsigned long) func;
lc->restart_data = (unsigned long) data;
@@ -310,24 +301,28 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
}
+typedef void (pcpu_delegate_fn)(void *);
+
/*
* Call function via PSW restart on pcpu and stop the current cpu.
*/
-static void __pcpu_delegate(void (*func)(void*), void *data)
+static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
{
func(data); /* should not return */
}
static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
- void (*func)(void *),
+ pcpu_delegate_fn *func,
void *data, unsigned long stack)
{
struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
unsigned long source_cpu = stap();
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
- if (pcpu->address == source_cpu)
- CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data);
+ if (pcpu->address == source_cpu) {
+ call_on_stack(2, stack, void, __pcpu_delegate,
+ pcpu_delegate_fn *, func, void *, data);
+ }
/* Stop target cpu (if func returns this stops the current cpu). */
pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
/* Restart func on the target cpu and stop the current cpu. */
@@ -387,7 +382,7 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
*/
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
- struct lowcore *lc = pcpu_devices->lowcore;
+ struct lowcore *lc = lowcore_ptr[0];
if (pcpu_devices[0].address == stap())
lc = &S390_lowcore;
@@ -600,18 +595,21 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
int smp_store_status(int cpu)
{
- struct pcpu *pcpu = pcpu_devices + cpu;
+ struct lowcore *lc;
+ struct pcpu *pcpu;
unsigned long pa;
- pa = __pa(&pcpu->lowcore->floating_pt_save_area);
+ pcpu = pcpu_devices + cpu;
+ lc = lowcore_ptr[cpu];
+ pa = __pa(&lc->floating_pt_save_area);
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
return 0;
- pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
+ pa = __pa(lc->mcesad & MCESA_ORIGIN_MASK);
if (MACHINE_HAS_GS)
- pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
+ pa |= lc->mcesad & MCESA_LC_MASK;
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
@@ -905,7 +903,7 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
S390_lowcore.restart_source = -1UL;
__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
- CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack);
+ call_on_stack_noreturn(smp_init_secondary, S390_lowcore.kernel_stack);
}
/* Upping and downing of CPUs */
@@ -1011,7 +1009,6 @@ void __init smp_prepare_boot_cpu(void)
WARN_ON(!cpu_present(0) || !cpu_online(0));
pcpu->state = CPU_STATE_CONFIGURED;
- pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
}
@@ -1237,3 +1234,54 @@ out:
return rc;
}
subsys_initcall(s390_smp_init);
+
+static __always_inline void set_new_lowcore(struct lowcore *lc)
+{
+ union register_pair dst, src;
+ u32 pfx;
+
+ src.even = (unsigned long) &S390_lowcore;
+ src.odd = sizeof(S390_lowcore);
+ dst.even = (unsigned long) lc;
+ dst.odd = sizeof(*lc);
+ pfx = (unsigned long) lc;
+
+ asm volatile(
+ " mvcl %[dst],%[src]\n"
+ " spx %[pfx]\n"
+ : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
+ : [pfx] "Q" (pfx)
+ : "memory", "cc");
+}
+
+static int __init smp_reinit_ipl_cpu(void)
+{
+ unsigned long async_stack, nodat_stack, mcck_stack;
+ struct lowcore *lc, *lc_ipl;
+ unsigned long flags;
+
+ lc_ipl = lowcore_ptr[0];
+ lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
+ nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+ async_stack = stack_alloc();
+ mcck_stack = stack_alloc();
+ if (!lc || !nodat_stack || !async_stack || !mcck_stack)
+ panic("Couldn't allocate memory");
+
+ local_irq_save(flags);
+ local_mcck_disable();
+ set_new_lowcore(lc);
+ S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
+ S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
+ S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
+ lowcore_ptr[0] = lc;
+ local_mcck_enable();
+ local_irq_restore(flags);
+
+ free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER);
+ memblock_free_late(lc_ipl->mcck_stack - STACK_INIT_OFFSET, THREAD_SIZE);
+ memblock_free_late((unsigned long) lc_ipl, sizeof(*lc_ipl));
+
+ return 0;
+}
+early_initcall(smp_reinit_ipl_cpu);
diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c
index 888cc2f166db..4d141e2c132e 100644
--- a/arch/s390/kernel/sthyi.c
+++ b/arch/s390/kernel/sthyi.c
@@ -395,19 +395,18 @@ out:
static int sthyi(u64 vaddr, u64 *rc)
{
- register u64 code asm("0") = 0;
- register u64 addr asm("2") = vaddr;
- register u64 rcode asm("3");
+ union register_pair r1 = { .even = 0, }; /* subcode */
+ union register_pair r2 = { .even = vaddr, };
int cc;
asm volatile(
- ".insn rre,0xB2560000,%[code],%[addr]\n"
+ ".insn rre,0xB2560000,%[r1],%[r2]\n"
"ipm %[cc]\n"
"srl %[cc],28\n"
- : [cc] "=d" (cc), "=d" (rcode)
- : [code] "d" (code), [addr] "a" (addr)
+ : [cc] "=&d" (cc), [r2] "+&d" (r2.pair)
+ : [r1] "d" (r1.pair)
: "memory", "cc");
- *rc = rcode;
+ *rc = r2.odd;
return cc;
}
diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c
index 4e5cc7d2364e..8fe2d23b64f4 100644
--- a/arch/s390/kernel/syscall.c
+++ b/arch/s390/kernel/syscall.c
@@ -108,7 +108,7 @@ SYSCALL_DEFINE0(ni_syscall)
return -ENOSYS;
}
-void do_syscall(struct pt_regs *regs)
+static void do_syscall(struct pt_regs *regs)
{
unsigned long nr;
@@ -121,6 +121,10 @@ void do_syscall(struct pt_regs *regs)
regs->gprs[2] = nr;
+ if (nr == __NR_restart_syscall && !(current->restart_block.arch_data & 1)) {
+ regs->psw.addr = current->restart_block.arch_data;
+ current->restart_block.arch_data = 1;
+ }
nr = syscall_enter_from_user_mode_work(regs, nr);
/*
@@ -130,13 +134,16 @@ void do_syscall(struct pt_regs *regs)
* work, the ptrace code sets PIF_SYSCALL_RET_SET, which is checked here
* and if set, the syscall will be skipped.
*/
- if (!test_pt_regs_flag(regs, PIF_SYSCALL_RET_SET)) {
- regs->gprs[2] = -ENOSYS;
- if (likely(nr < NR_syscalls))
- regs->gprs[2] = current->thread.sys_call_table[nr](regs);
- } else {
- clear_pt_regs_flag(regs, PIF_SYSCALL_RET_SET);
- }
+
+ if (unlikely(test_and_clear_pt_regs_flag(regs, PIF_SYSCALL_RET_SET)))
+ goto out;
+ regs->gprs[2] = -ENOSYS;
+ if (likely(nr >= NR_syscalls))
+ goto out;
+ do {
+ regs->gprs[2] = current->thread.sys_call_table[nr](regs);
+ } while (test_and_clear_pt_regs_flag(regs, PIF_EXECVE_PGSTE_RESTART));
+out:
syscall_exit_to_user_mode_work(regs);
}
@@ -144,11 +151,8 @@ void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
{
add_random_kstack_offset();
enter_from_user_mode(regs);
-
- memcpy(&regs->gprs[8], S390_lowcore.save_area_sync, 8 * sizeof(unsigned long));
- memcpy(&regs->int_code, &S390_lowcore.svc_ilc, sizeof(regs->int_code));
regs->psw = S390_lowcore.svc_old_psw;
-
+ regs->int_code = S390_lowcore.svc_int_code;
update_timer_sys();
local_irq_enable();
@@ -157,13 +161,8 @@ void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
if (per_trap)
set_thread_flag(TIF_PER_TRAP);
- for (;;) {
- regs->flags = 0;
- set_pt_regs_flag(regs, PIF_SYSCALL);
- do_syscall(regs);
- if (!test_pt_regs_flag(regs, PIF_SYSCALL_RESTART))
- break;
- local_irq_enable();
- }
+ regs->flags = 0;
+ set_pt_regs_flag(regs, PIF_SYSCALL);
+ do_syscall(regs);
exit_to_user_mode();
}
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 2ac3c9b56a13..ef3f2659876c 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -25,19 +25,22 @@ int topology_max_mnest;
static inline int __stsi(void *sysinfo, int fc, int sel1, int sel2, int *lvl)
{
- register int r0 asm("0") = (fc << 28) | sel1;
- register int r1 asm("1") = sel2;
+ int r0 = (fc << 28) | sel1;
int rc = 0;
asm volatile(
- " stsi 0(%3)\n"
+ " lr 0,%[r0]\n"
+ " lr 1,%[r1]\n"
+ " stsi 0(%[sysinfo])\n"
"0: jz 2f\n"
- "1: lhi %1,%4\n"
- "2:\n"
+ "1: lhi %[rc],%[retval]\n"
+ "2: lr %[r0],0\n"
EX_TABLE(0b, 1b)
- : "+d" (r0), "+d" (rc)
- : "d" (r1), "a" (sysinfo), "K" (-EOPNOTSUPP)
- : "cc", "memory");
+ : [r0] "+d" (r0), [rc] "+d" (rc)
+ : [r1] "d" (sel2),
+ [sysinfo] "a" (sysinfo),
+ [retval] "K" (-EOPNOTSUPP)
+ : "cc", "0", "1", "memory");
*lvl = ((unsigned int) r0) >> 28;
return rc;
}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 8dd23c703718..76947275fe8b 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -36,7 +36,7 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
unsigned long address;
if (regs->int_code & 0x200)
- address = *(unsigned long *)(current->thread.trap_tdb + 24);
+ address = current->thread.trap_tdb.data[3];
else
address = regs->psw.addr;
return (void __user *) (address - (regs->int_code >> 16));
@@ -277,6 +277,8 @@ static void __init test_monitor_call(void)
{
int val = 1;
+ if (!IS_ENABLED(CONFIG_BUG))
+ return;
asm volatile(
" mc 0,0\n"
"0: xgr %0,%0\n"
@@ -299,10 +301,9 @@ static void (*pgm_check_table[128])(struct pt_regs *regs);
void noinstr __do_pgm_check(struct pt_regs *regs)
{
unsigned long last_break = S390_lowcore.breaking_event_addr;
- unsigned int trapnr, syscall_redirect = 0;
+ unsigned int trapnr;
irqentry_state_t state;
- add_random_kstack_offset();
regs->int_code = *(u32 *)&S390_lowcore.pgm_ilc;
regs->int_parm_long = S390_lowcore.trans_exc_code;
@@ -318,7 +319,7 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
if (S390_lowcore.pgm_code & 0x0200) {
/* transaction abort */
- memcpy(&current->thread.trap_tdb, &S390_lowcore.pgm_tdb, 256);
+ current->thread.trap_tdb = S390_lowcore.pgm_tdb;
}
if (S390_lowcore.pgm_code & PGM_INT_CODE_PER) {
@@ -344,18 +345,9 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
trapnr = regs->int_code & PGM_INT_CODE_MASK;
if (trapnr)
pgm_check_table[trapnr](regs);
- syscall_redirect = user_mode(regs) && test_pt_regs_flag(regs, PIF_SYSCALL);
out:
local_irq_disable();
irqentry_exit(regs, state);
-
- if (syscall_redirect) {
- enter_from_user_mode(regs);
- local_irq_enable();
- regs->orig_gpr2 = regs->gprs[2];
- do_syscall(regs);
- exit_to_user_mode();
- }
}
/*
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index bbf8622bbf5d..bd3ef121c379 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -126,6 +126,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
case DIE_SSTEP:
if (uprobe_post_sstep_notifier(regs))
return NOTIFY_STOP;
+ break;
default:
break;
}
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 370f664580af..aeb0a15bcbb7 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -52,7 +52,7 @@ void __init setup_uv(void)
unsigned long uv_stor_base;
/*
- * keep these conditions in line with kasan init code has_uv_sec_stor_limit()
+ * keep these conditions in line with has_uv_sec_stor_limit()
*/
if (!is_prot_virt_host())
return;
@@ -91,12 +91,6 @@ fail:
prot_virt_host = 0;
}
-void adjust_to_uv_max(unsigned long *vmax)
-{
- if (uv_info.max_sec_stor_addr)
- *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
-}
-
/*
* Requests the Ultravisor to pin the page in the shared state. This will
* cause an intercept when the guest attempts to unshare the pinned page.
@@ -364,6 +358,15 @@ static ssize_t uv_query_facilities(struct kobject *kobj,
static struct kobj_attribute uv_query_facilities_attr =
__ATTR(facilities, 0444, uv_query_facilities, NULL);
+static ssize_t uv_query_feature_indications(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
+}
+
+static struct kobj_attribute uv_query_feature_indications_attr =
+ __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
+
static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
@@ -396,6 +399,7 @@ static struct kobj_attribute uv_query_max_guest_addr_attr =
static struct attribute *uv_query_attrs[] = {
&uv_query_facilities_attr.attr,
+ &uv_query_feature_indications_attr.attr,
&uv_query_max_guest_cpus_attr.attr,
&uv_query_max_guest_vms_attr.attr,
&uv_query_max_guest_addr_attr.attr,
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 8c4e07d533c8..99694260cac9 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -20,7 +20,7 @@
#include <asm/vdso.h>
extern char vdso64_start[], vdso64_end[];
-static unsigned int vdso_pages;
+extern char vdso32_start[], vdso32_end[];
static struct vm_special_mapping vvar_mapping;
@@ -37,18 +37,6 @@ enum vvar_pages {
VVAR_NR_PAGES,
};
-unsigned int __read_mostly vdso_enabled = 1;
-
-static int __init vdso_setup(char *str)
-{
- bool enabled;
-
- if (!kstrtobool(str, &enabled))
- vdso_enabled = enabled;
- return 1;
-}
-__setup("vdso=", vdso_setup);
-
#ifdef CONFIG_TIME_NS
struct vdso_data *arch_get_vdso_data(void *vvar_page)
{
@@ -155,7 +143,12 @@ static struct vm_special_mapping vvar_mapping = {
.fault = vvar_fault,
};
-static struct vm_special_mapping vdso_mapping = {
+static struct vm_special_mapping vdso64_mapping = {
+ .name = "[vdso]",
+ .mremap = vdso_mremap,
+};
+
+static struct vm_special_mapping vdso32_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
};
@@ -171,16 +164,22 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
unsigned long vdso_text_len, vdso_mapping_len;
unsigned long vvar_start, vdso_text_start;
+ struct vm_special_mapping *vdso_mapping;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc;
BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
- if (!vdso_enabled || is_compat_task())
- return 0;
if (mmap_write_lock_killable(mm))
return -EINTR;
- vdso_text_len = vdso_pages << PAGE_SHIFT;
+
+ if (is_compat_task()) {
+ vdso_text_len = vdso32_end - vdso32_start;
+ vdso_mapping = &vdso32_mapping;
+ } else {
+ vdso_text_len = vdso64_end - vdso64_start;
+ vdso_mapping = &vdso64_mapping;
+ }
vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
vvar_start = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
rc = vvar_start;
@@ -198,7 +197,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- &vdso_mapping);
+ vdso_mapping);
if (IS_ERR(vma)) {
do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
rc = PTR_ERR(vma);
@@ -211,21 +210,25 @@ out:
return rc;
}
-static int __init vdso_init(void)
+static struct page ** __init vdso_setup_pages(void *start, void *end)
{
- struct page **pages;
+ int pages = (end - start) >> PAGE_SHIFT;
+ struct page **pagelist;
int i;
- vdso_pages = (vdso64_end - vdso64_start) >> PAGE_SHIFT;
- pages = kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
- if (!pages) {
- vdso_enabled = 0;
- return -ENOMEM;
- }
- for (i = 0; i < vdso_pages; i++)
- pages[i] = virt_to_page(vdso64_start + i * PAGE_SIZE);
- pages[vdso_pages] = NULL;
- vdso_mapping.pages = pages;
+ pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
+ if (!pagelist)
+ panic("%s: Cannot allocate page list for VDSO", __func__);
+ for (i = 0; i < pages; i++)
+ pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
+ return pagelist;
+}
+
+static int __init vdso_init(void)
+{
+ vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
+ if (IS_ENABLED(CONFIG_COMPAT))
+ vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
return 0;
}
arch_initcall(vdso_init);
diff --git a/arch/s390/kernel/vdso32/.gitignore b/arch/s390/kernel/vdso32/.gitignore
new file mode 100644
index 000000000000..5167384843b9
--- /dev/null
+++ b/arch/s390/kernel/vdso32/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vdso32.lds
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
new file mode 100644
index 000000000000..3457dcf10396
--- /dev/null
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0
+# List of files in the vdso
+
+KCOV_INSTRUMENT := n
+ARCH_REL_TYPE_ABS := R_390_COPY|R_390_GLOB_DAT|R_390_JMP_SLOT|R_390_RELATIVE
+ARCH_REL_TYPE_ABS += R_390_GOT|R_390_PLT
+
+include $(srctree)/lib/vdso/Makefile
+obj-vdso32 = vdso_user_wrapper-32.o note-32.o
+
+# Build rules
+
+targets := $(obj-vdso32) vdso32.so vdso32.so.dbg
+obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
+
+KBUILD_AFLAGS += -DBUILD_VDSO
+KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
+
+KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
+KBUILD_AFLAGS_32 += -m31 -s
+
+KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
+
+LDFLAGS_vdso32.so.dbg += -fPIC -shared -nostdlib -soname=linux-vdso32.so.1 \
+ --hash-style=both --build-id=sha1 -melf_s390 -T
+
+$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
+$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
+
+obj-y += vdso32_wrapper.o
+targets += vdso32.lds
+CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
+
+# Disable gcov profiling, ubsan and kasan for VDSO code
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
+
+# Force dependency (incbin is bad)
+$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
+
+$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
+ $(call if_changed,ld)
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+$(obj-vdso32): %-32.o: %.S FORCE
+ $(call if_changed_dep,vdso32as)
+
+# actual build commands
+quiet_cmd_vdso32as = VDSO32A $@
+ cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
+quiet_cmd_vdso32cc = VDSO32C $@
+ cmd_vdso32cc = $(CC) $(c_flags) -c -o $@ $<
+
+# install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso32.so: $(obj)/vdso32.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso32.so
+
+# Generate VDSO offsets using helper script
+gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+
+include/generated/vdso32-offsets.h: $(obj)/vdso32.so.dbg FORCE
+ $(call if_changed,vdsosym)
diff --git a/arch/s390/kernel/vdso32/gen_vdso_offsets.sh b/arch/s390/kernel/vdso32/gen_vdso_offsets.sh
new file mode 100755
index 000000000000..9c4f951e227d
--- /dev/null
+++ b/arch/s390/kernel/vdso32/gen_vdso_offsets.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# Match symbols in the DSO that look like VDSO_*; produce a header file
+# of constant offsets into the shared object.
+#
+# Doing this inside the Makefile will break the $(filter-out) function,
+# causing Kbuild to rebuild the vdso-offsets header file every time.
+#
+# Inspired by arm64 version.
+#
+
+LC_ALL=C
+sed -n 's/\([0-9a-f]*\) . __kernel_compat_\(.*\)/\#define vdso32_offset_\2\t0x\1/p'
diff --git a/arch/s390/kernel/vdso32/note.S b/arch/s390/kernel/vdso32/note.S
new file mode 100644
index 000000000000..db19d0680a0a
--- /dev/null
+++ b/arch/s390/kernel/vdso32/note.S
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S
new file mode 100644
index 000000000000..edf5ff1debe1
--- /dev/null
+++ b/arch/s390/kernel/vdso32/vdso32.lds.S
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This is the infamous ld script for the 64 bits vdso
+ * library
+ */
+
+#include <asm/page.h>
+#include <asm/vdso.h>
+
+OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
+OUTPUT_ARCH(s390:31-bit)
+ENTRY(_start)
+
+SECTIONS
+{
+ PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
+#ifdef CONFIG_TIME_NS
+ PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
+#endif
+ . = VDSO_LBASE + SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+ . = ALIGN(16);
+ .text : {
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ } :text
+ PROVIDE(__etext = .);
+ PROVIDE(_etext = .);
+ PROVIDE(etext = .);
+
+ /*
+ * Other stuff is appended to the text segment:
+ */
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+ .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) }
+
+ .rela.dyn ALIGN(8) : { *(.rela.dyn) }
+ .got ALIGN(8) : { *(.got .toc) }
+ .got.plt ALIGN(8) : { *(.got.plt) }
+
+ _end = .;
+ PROVIDE(end = .);
+
+ /*
+ * Stabs debugging sections are here too.
+ */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+
+ /*
+ * DWARF debug sections.
+ * Symbols in the DWARF debugging sections are relative to the
+ * beginning of the section so we begin them at 0.
+ */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+
+ /DISCARD/ : {
+ *(.note.GNU-stack)
+ *(.branch_lt)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
+}
+
+/*
+ * Very old versions of ld do not recognize this name token; use the constant.
+ */
+#define PT_GNU_EH_FRAME 0x6474e550
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+ VDSO_VERSION_STRING {
+ global:
+ /*
+ * Has to be there for the kernel to find
+ */
+ __kernel_compat_restart_syscall;
+ __kernel_compat_rt_sigreturn;
+ __kernel_compat_sigreturn;
+ local: *;
+ };
+}
diff --git a/arch/s390/kernel/vdso32/vdso32_wrapper.S b/arch/s390/kernel/vdso32/vdso32_wrapper.S
new file mode 100644
index 000000000000..de2fb930471a
--- /dev/null
+++ b/arch/s390/kernel/vdso32/vdso32_wrapper.S
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ __PAGE_ALIGNED_DATA
+
+ .globl vdso32_start, vdso32_end
+ .balign PAGE_SIZE
+vdso32_start:
+ .incbin "arch/s390/kernel/vdso32/vdso32.so"
+ .balign PAGE_SIZE
+vdso32_end:
+
+ .previous
diff --git a/arch/s390/kernel/vdso32/vdso_user_wrapper.S b/arch/s390/kernel/vdso32/vdso_user_wrapper.S
new file mode 100644
index 000000000000..3f42f27f978c
--- /dev/null
+++ b/arch/s390/kernel/vdso32/vdso_user_wrapper.S
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/unistd.h>
+#include <asm/dwarf.h>
+
+.macro vdso_syscall func,syscall
+ .globl __kernel_compat_\func
+ .type __kernel_compat_\func,@function
+ .align 8
+__kernel_compat_\func:
+ CFI_STARTPROC
+ svc \syscall
+ /* Make sure we notice when a syscall returns, which shouldn't happen */
+ .word 0
+ CFI_ENDPROC
+ .size __kernel_compat_\func,.-__kernel_compat_\func
+.endm
+
+vdso_syscall restart_syscall,__NR_restart_syscall
+vdso_syscall sigreturn,__NR_sigreturn
+vdso_syscall rt_sigreturn,__NR_rt_sigreturn
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index a6e0fb6b91d6..2a2092ce19f1 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -74,3 +74,11 @@ vdso64.so: $(obj)/vdso64.so.dbg
$(call cmd,vdso_install)
vdso_install: vdso64.so
+
+# Generate VDSO offsets using helper script
+gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+
+include/generated/vdso64-offsets.h: $(obj)/vdso64.so.dbg FORCE
+ $(call if_changed,vdsosym)
diff --git a/arch/s390/kernel/vdso64/gen_vdso_offsets.sh b/arch/s390/kernel/vdso64/gen_vdso_offsets.sh
new file mode 100755
index 000000000000..37f05cb38dad
--- /dev/null
+++ b/arch/s390/kernel/vdso64/gen_vdso_offsets.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# Match symbols in the DSO that look like VDSO_*; produce a header file
+# of constant offsets into the shared object.
+#
+# Doing this inside the Makefile will break the $(filter-out) function,
+# causing Kbuild to rebuild the vdso-offsets header file every time.
+#
+# Inspired by arm64 version.
+#
+
+LC_ALL=C
+sed -n 's/\([0-9a-f]*\) . __kernel_\(.*\)/\#define vdso64_offset_\2\t0x\1/p'
diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S
index 518f1ea405f4..4461ea151e49 100644
--- a/arch/s390/kernel/vdso64/vdso64.lds.S
+++ b/arch/s390/kernel/vdso64/vdso64.lds.S
@@ -17,7 +17,7 @@ SECTIONS
#ifdef CONFIG_TIME_NS
PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
#endif
- . = VDSO64_LBASE + SIZEOF_HEADERS;
+ . = VDSO_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
@@ -51,6 +51,7 @@ SECTIONS
.rela.dyn ALIGN(8) : { *(.rela.dyn) }
.got ALIGN(8) : { *(.got .toc) }
+ .got.plt ALIGN(8) : { *(.got.plt) }
_end = .;
PROVIDE(end = .);
@@ -137,6 +138,9 @@ VERSION
__kernel_clock_gettime;
__kernel_clock_getres;
__kernel_getcpu;
+ __kernel_restart_syscall;
+ __kernel_rt_sigreturn;
+ __kernel_sigreturn;
local: *;
};
}
diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
index f773505c7e63..97f0c0a669a5 100644
--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
@@ -37,3 +37,20 @@ vdso_func gettimeofday
vdso_func clock_getres
vdso_func clock_gettime
vdso_func getcpu
+
+.macro vdso_syscall func,syscall
+ .globl __kernel_\func
+ .type __kernel_\func,@function
+ .align 8
+__kernel_\func:
+ CFI_STARTPROC
+ svc \syscall
+ /* Make sure we notice when a syscall returns, which shouldn't happen */
+ .word 0
+ CFI_ENDPROC
+ .size __kernel_\func,.-__kernel_\func
+.endm
+
+vdso_syscall restart_syscall,__NR_restart_syscall
+vdso_syscall sigreturn,__NR_sigreturn
+vdso_syscall rt_sigreturn,__NR_rt_sigreturn
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 02c146f9e5cd..807fa9da1e72 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -24,7 +24,7 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
- vcpu->stat.diagnose_10++;
+ vcpu->stat.instruction_diagnose_10++;
if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
|| start < 2 * PAGE_SIZE)
@@ -74,7 +74,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
vcpu->run->s.regs.gprs[rx]);
- vcpu->stat.diagnose_258++;
+ vcpu->stat.instruction_diagnose_258++;
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
@@ -145,7 +145,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
- vcpu->stat.diagnose_44++;
+ vcpu->stat.instruction_diagnose_44++;
kvm_vcpu_on_spin(vcpu, true);
return 0;
}
@@ -169,7 +169,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
int tid;
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
- vcpu->stat.diagnose_9c++;
+ vcpu->stat.instruction_diagnose_9c++;
/* yield to self */
if (tid == vcpu->vcpu_id)
@@ -192,7 +192,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 5,
"diag time slice end directed to %d: yield forwarded",
tid);
- vcpu->stat.diagnose_9c_forward++;
+ vcpu->stat.diag_9c_forward++;
return 0;
}
@@ -203,7 +203,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
return 0;
no_yield:
VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
- vcpu->stat.diagnose_9c_ignored++;
+ vcpu->stat.diag_9c_ignored++;
return 0;
}
@@ -213,7 +213,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
- vcpu->stat.diagnose_308++;
+ vcpu->stat.instruction_diagnose_308++;
switch (subcode) {
case 3:
vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
@@ -245,7 +245,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
{
int ret;
- vcpu->stat.diagnose_500++;
+ vcpu->stat.instruction_diagnose_500++;
/* No virtio-ccw notification? Get out quickly. */
if (!vcpu->kvm->arch.css_support ||
(vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
@@ -299,7 +299,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
case 0x500:
return __diag_virtio_hypercall(vcpu);
default:
- vcpu->stat.diagnose_other++;
+ vcpu->stat.instruction_diagnose_other++;
return -EOPNOTSUPP;
}
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f9fb1e1d960d..4527ac7b5961 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -163,15 +163,15 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
- STATS_DESC_COUNTER(VCPU, diagnose_10),
- STATS_DESC_COUNTER(VCPU, diagnose_44),
- STATS_DESC_COUNTER(VCPU, diagnose_9c),
- STATS_DESC_COUNTER(VCPU, diagnose_9c_ignored),
- STATS_DESC_COUNTER(VCPU, diagnose_9c_forward),
- STATS_DESC_COUNTER(VCPU, diagnose_258),
- STATS_DESC_COUNTER(VCPU, diagnose_308),
- STATS_DESC_COUNTER(VCPU, diagnose_500),
- STATS_DESC_COUNTER(VCPU, diagnose_other),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
+ STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
+ STATS_DESC_COUNTER(VCPU, diag_9c_forward),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
+ STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
STATS_DESC_COUNTER(VCPU, pfault_sync)
};
static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
@@ -234,7 +234,7 @@ static unsigned long kvm_s390_fac_size(void)
BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
- sizeof(S390_lowcore.stfle_fac_list));
+ sizeof(stfle_fac_list));
return SIZE_INTERNAL;
}
@@ -1482,8 +1482,8 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
mach->ibc = sclp.ibc;
memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
S390_ARCH_FAC_LIST_SIZE_BYTE);
- memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
- sizeof(S390_lowcore.stfle_fac_list));
+ memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
+ sizeof(stfle_fac_list));
VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
kvm->arch.model.ibc,
kvm->arch.model.cpuid);
@@ -2707,10 +2707,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
for (i = 0; i < kvm_s390_fac_size(); i++) {
- kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
+ kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
(kvm_s390_fac_base[i] |
kvm_s390_fac_ext[i]);
- kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
+ kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
kvm_s390_fac_base[i];
}
kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
@@ -5079,7 +5079,7 @@ static int __init kvm_s390_init(void)
for (i = 0; i < 16; i++)
kvm_s390_fac_base[i] |=
- S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
+ stfle_fac_list[i] & nonhyp_mask(i);
return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
}
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 93b3209b94a2..cfcdf76d6a95 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -18,23 +18,30 @@
*/
static inline char *__strend(const char *s)
{
- register unsigned long r0 asm("0") = 0;
-
- asm volatile ("0: srst %0,%1\n"
- " jo 0b"
- : "+d" (r0), "+a" (s) : : "cc", "memory");
- return (char *) r0;
+ unsigned long e = 0;
+
+ asm volatile(
+ " lghi 0,0\n"
+ "0: srst %[e],%[s]\n"
+ " jo 0b\n"
+ : [e] "+&a" (e), [s] "+&a" (s)
+ :
+ : "cc", "memory", "0");
+ return (char *)e;
}
static inline char *__strnend(const char *s, size_t n)
{
- register unsigned long r0 asm("0") = 0;
const char *p = s + n;
- asm volatile ("0: srst %0,%1\n"
- " jo 0b"
- : "+d" (p), "+a" (s) : "d" (r0) : "cc", "memory");
- return (char *) p;
+ asm volatile(
+ " lghi 0,0\n"
+ "0: srst %[p],%[s]\n"
+ " jo 0b\n"
+ : [p] "+&d" (p), [s] "+&a" (s)
+ :
+ : "cc", "memory", "0");
+ return (char *)p;
}
/**
@@ -76,13 +83,15 @@ EXPORT_SYMBOL(strnlen);
#ifdef __HAVE_ARCH_STRCPY
char *strcpy(char *dest, const char *src)
{
- register int r0 asm("0") = 0;
char *ret = dest;
- asm volatile ("0: mvst %0,%1\n"
- " jo 0b"
- : "+&a" (dest), "+&a" (src) : "d" (r0)
- : "cc", "memory" );
+ asm volatile(
+ " lghi 0,0\n"
+ "0: mvst %[dest],%[src]\n"
+ " jo 0b\n"
+ : [dest] "+&a" (dest), [src] "+&a" (src)
+ :
+ : "cc", "memory", "0");
return ret;
}
EXPORT_SYMBOL(strcpy);
@@ -144,16 +153,18 @@ EXPORT_SYMBOL(strncpy);
#ifdef __HAVE_ARCH_STRCAT
char *strcat(char *dest, const char *src)
{
- register int r0 asm("0") = 0;
- unsigned long dummy;
+ unsigned long dummy = 0;
char *ret = dest;
- asm volatile ("0: srst %0,%1\n"
- " jo 0b\n"
- "1: mvst %0,%2\n"
- " jo 1b"
- : "=&a" (dummy), "+a" (dest), "+a" (src)
- : "d" (r0), "0" (0UL) : "cc", "memory" );
+ asm volatile(
+ " lghi 0,0\n"
+ "0: srst %[dummy],%[dest]\n"
+ " jo 0b\n"
+ "1: mvst %[dummy],%[src]\n"
+ " jo 1b\n"
+ : [dummy] "+&a" (dummy), [dest] "+&a" (dest), [src] "+&a" (src)
+ :
+ : "cc", "memory", "0");
return ret;
}
EXPORT_SYMBOL(strcat);
@@ -221,18 +232,20 @@ EXPORT_SYMBOL(strncat);
#ifdef __HAVE_ARCH_STRCMP
int strcmp(const char *s1, const char *s2)
{
- register int r0 asm("0") = 0;
int ret = 0;
- asm volatile ("0: clst %2,%3\n"
- " jo 0b\n"
- " je 1f\n"
- " ic %0,0(%2)\n"
- " ic %1,0(%3)\n"
- " sr %0,%1\n"
- "1:"
- : "+d" (ret), "+d" (r0), "+a" (s1), "+a" (s2)
- : : "cc", "memory");
+ asm volatile(
+ " lghi 0,0\n"
+ "0: clst %[s1],%[s2]\n"
+ " jo 0b\n"
+ " je 1f\n"
+ " ic %[ret],0(%[s1])\n"
+ " ic 0,0(%[s2])\n"
+ " sr %[ret],0\n"
+ "1:"
+ : [ret] "+&d" (ret), [s1] "+&a" (s1), [s2] "+&a" (s2)
+ :
+ : "cc", "memory", "0");
return ret;
}
EXPORT_SYMBOL(strcmp);
@@ -261,18 +274,18 @@ EXPORT_SYMBOL(strrchr);
static inline int clcle(const char *s1, unsigned long l1,
const char *s2, unsigned long l2)
{
- register unsigned long r2 asm("2") = (unsigned long) s1;
- register unsigned long r3 asm("3") = (unsigned long) l1;
- register unsigned long r4 asm("4") = (unsigned long) s2;
- register unsigned long r5 asm("5") = (unsigned long) l2;
+ union register_pair r1 = { .even = (unsigned long)s1, .odd = l1, };
+ union register_pair r3 = { .even = (unsigned long)s2, .odd = l2, };
int cc;
- asm volatile ("0: clcle %1,%3,0\n"
- " jo 0b\n"
- " ipm %0\n"
- " srl %0,28"
- : "=&d" (cc), "+a" (r2), "+a" (r3),
- "+a" (r4), "+a" (r5) : : "cc", "memory");
+ asm volatile(
+ "0: clcle %[r1],%[r3],0\n"
+ " jo 0b\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), [r1] "+&d" (r1.pair), [r3] "+&d" (r3.pair)
+ :
+ : "cc", "memory");
return cc;
}
@@ -315,15 +328,18 @@ EXPORT_SYMBOL(strstr);
#ifdef __HAVE_ARCH_MEMCHR
void *memchr(const void *s, int c, size_t n)
{
- register int r0 asm("0") = (char) c;
const void *ret = s + n;
- asm volatile ("0: srst %0,%1\n"
- " jo 0b\n"
- " jl 1f\n"
- " la %0,0\n"
- "1:"
- : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
+ asm volatile(
+ " lgr 0,%[c]\n"
+ "0: srst %[ret],%[s]\n"
+ " jo 0b\n"
+ " jl 1f\n"
+ " la %[ret],0\n"
+ "1:"
+ : [ret] "+&a" (ret), [s] "+&a" (s)
+ : [c] "d" (c)
+ : "cc", "memory", "0");
return (void *) ret;
}
EXPORT_SYMBOL(memchr);
@@ -360,13 +376,16 @@ EXPORT_SYMBOL(memcmp);
#ifdef __HAVE_ARCH_MEMSCAN
void *memscan(void *s, int c, size_t n)
{
- register int r0 asm("0") = (char) c;
const void *ret = s + n;
- asm volatile ("0: srst %0,%1\n"
- " jo 0b\n"
- : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
- return (void *) ret;
+ asm volatile(
+ " lgr 0,%[c]\n"
+ "0: srst %[ret],%[s]\n"
+ " jo 0b\n"
+ : [ret] "+&a" (ret), [s] "+&a" (s)
+ : [c] "d" (c)
+ : "cc", "memory", "0");
+ return (void *)ret;
}
EXPORT_SYMBOL(memscan);
#endif
diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c
index 2f32802f79ce..ecf327d743a0 100644
--- a/arch/s390/lib/test_unwind.c
+++ b/arch/s390/lib/test_unwind.c
@@ -120,7 +120,7 @@ static struct unwindme *unwindme;
#define UWM_REGS 0x2 /* Pass regs to test_unwind(). */
#define UWM_SP 0x4 /* Pass sp to test_unwind(). */
#define UWM_CALLER 0x8 /* Unwind starting from caller. */
-#define UWM_SWITCH_STACK 0x10 /* Use CALL_ON_STACK. */
+#define UWM_SWITCH_STACK 0x10 /* Use call_on_stack. */
#define UWM_IRQ 0x20 /* Unwind from irq context. */
#define UWM_PGM 0x40 /* Unwind from program check handler. */
@@ -211,7 +211,8 @@ static noinline int unwindme_func2(struct unwindme *u)
if (u->flags & UWM_SWITCH_STACK) {
local_irq_save(flags);
local_mcck_disable();
- rc = CALL_ON_STACK(unwindme_func3, S390_lowcore.nodat_stack, 1, u);
+ rc = call_on_stack(1, S390_lowcore.nodat_stack,
+ int, unwindme_func3, struct unwindme *, u);
local_mcck_enable();
local_irq_restore(flags);
return rc;
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 2fece1fd210a..7ec8b1fa0f08 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -61,11 +61,11 @@ static inline int copy_with_mvcos(void)
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x81UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
asm volatile(
+ " lghi 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
"6: jz 4f\n"
"1: algr %0,%3\n"
@@ -84,7 +84,8 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
+ : [spec] "K" (0x81UL)
+ : "cc", "memory", "0");
return size;
}
@@ -133,11 +134,11 @@ EXPORT_SYMBOL(raw_copy_from_user);
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x810000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
asm volatile(
+ " llilh 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
"6: jz 4f\n"
"1: algr %0,%3\n"
@@ -156,7 +157,8 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
+ : [spec] "K" (0x81UL)
+ : "cc", "memory", "0");
return size;
}
@@ -205,12 +207,12 @@ EXPORT_SYMBOL(raw_copy_to_user);
static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x810081UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
/* FIXME: copy with reduced length. */
asm volatile(
+ " lgr 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
" jz 2f\n"
"1: algr %0,%3\n"
@@ -221,7 +223,8 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use
"3: \n"
EX_TABLE(0b,3b)
: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
+ : [spec] "d" (0x810081UL)
+ : "cc", "memory", "0");
return size;
}
@@ -266,11 +269,11 @@ EXPORT_SYMBOL(raw_copy_in_user);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x810000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
asm volatile(
+ " llilh 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
" jz 4f\n"
"1: algr %0,%2\n"
@@ -288,7 +291,8 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
- : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
+ : "a" (empty_zero_page), [spec] "K" (0x81UL)
+ : "cc", "memory", "0");
return size;
}
@@ -338,10 +342,10 @@ EXPORT_SYMBOL(__clear_user);
static inline unsigned long strnlen_user_srst(const char __user *src,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0;
unsigned long tmp1, tmp2;
asm volatile(
+ " lghi 0,0\n"
" la %2,0(%1)\n"
" la %3,0(%0,%1)\n"
" slgr %0,%0\n"
@@ -353,7 +357,8 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
"1: sacf 768\n"
EX_TABLE(0b,1b)
: "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
+ :
+ : "cc", "memory", "0");
return size;
}
diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c
index 29d9470dbceb..a963c3d8ad0d 100644
--- a/arch/s390/lib/xor.c
+++ b/arch/s390/lib/xor.c
@@ -91,9 +91,6 @@ static void xor_xc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
static void xor_xc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
- /* Get around a gcc oddity */
- register unsigned long *reg7 asm ("7") = p5;
-
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
@@ -122,7 +119,7 @@ static void xor_xc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" xc 0(1,%1),0(%5)\n"
"3:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4),
- "+a" (reg7)
+ "+a" (p5)
: : "0", "1", "cc", "memory");
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 8ae3dc5783fd..e33c43b38afe 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -285,26 +285,6 @@ static noinline void do_sigbus(struct pt_regs *regs)
(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
}
-static noinline int signal_return(struct pt_regs *regs)
-{
- u16 instruction;
- int rc;
-
- rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
- if (rc)
- return rc;
- if (instruction == 0x0a77) {
- set_pt_regs_flag(regs, PIF_SYSCALL);
- regs->int_code = 0x00040077;
- return 0;
- } else if (instruction == 0x0aad) {
- set_pt_regs_flag(regs, PIF_SYSCALL);
- regs->int_code = 0x000400ad;
- return 0;
- }
- return -EACCES;
-}
-
static noinline void do_fault_error(struct pt_regs *regs, int access,
vm_fault_t fault)
{
@@ -312,9 +292,6 @@ static noinline void do_fault_error(struct pt_regs *regs, int access,
switch (fault) {
case VM_FAULT_BADACCESS:
- if (access == VM_EXEC && signal_return(regs) == 0)
- break;
- fallthrough;
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (user_mode(regs)) {
@@ -792,6 +769,32 @@ void do_secure_storage_access(struct pt_regs *regs)
struct page *page;
int rc;
+ /*
+ * bit 61 tells us if the address is valid, if it's not we
+ * have a major problem and should stop the kernel or send a
+ * SIGSEGV to the process. Unfortunately bit 61 is not
+ * reliable without the misc UV feature so we need to check
+ * for that as well.
+ */
+ if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
+ !test_bit_inv(61, &regs->int_parm_long)) {
+ /*
+ * When this happens, userspace did something that it
+ * was not supposed to do, e.g. branching into secure
+ * memory. Trigger a segmentation fault.
+ */
+ if (user_mode(regs)) {
+ send_sig(SIGSEGV, current, 0);
+ return;
+ }
+
+ /*
+ * The kernel should never run into this case and we
+ * have no way out of this situation.
+ */
+ panic("Unexpected PGM 0x3d with TEID bit 61=0");
+ }
+
switch (get_fault_type(regs)) {
case USER_FAULT:
mm = current->mm;
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index db4d303aaaa9..a0fdc6dc5f9d 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -13,7 +13,6 @@
#include <asm/setup.h>
#include <asm/uv.h>
-unsigned long kasan_vmax;
static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata;
static unsigned long pgalloc_pos __initdata;
@@ -251,28 +250,9 @@ static void __init kasan_early_detect_facilities(void)
}
}
-static bool __init has_uv_sec_stor_limit(void)
-{
- /*
- * keep these conditions in line with setup_uv()
- */
- if (!is_prot_virt_host())
- return false;
-
- if (is_prot_virt_guest())
- return false;
-
- if (!test_facility(158))
- return false;
-
- return !!uv_info.max_sec_stor_addr;
-}
-
void __init kasan_early_init(void)
{
- unsigned long untracked_mem_end;
unsigned long shadow_alloc_size;
- unsigned long vmax_unlimited;
unsigned long initrd_end;
unsigned long memsize;
unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
@@ -306,9 +286,6 @@ void __init kasan_early_init(void)
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
crst_table_init((unsigned long *)early_pg_dir, _REGION2_ENTRY_EMPTY);
- untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE;
- if (has_uv_sec_stor_limit())
- kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr);
/* init kasan zero shadow */
crst_table_init((unsigned long *)kasan_early_shadow_p4d,
@@ -375,18 +352,18 @@ void __init kasan_early_init(void)
*/
/* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP);
- if (IS_ENABLED(CONFIG_MODULES))
- untracked_mem_end = kasan_vmax - MODULES_LEN;
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
- untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN;
/* shallowly populate kasan shadow for vmalloc and modules */
- kasan_early_pgtable_populate(__sha(untracked_mem_end), __sha(kasan_vmax),
+ kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
POPULATE_SHALLOW);
}
/* populate kasan shadow for untracked memory */
- kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_mem_end),
+ kasan_early_pgtable_populate(__sha(ident_map_size),
+ IS_ENABLED(CONFIG_KASAN_VMALLOC) ?
+ __sha(VMALLOC_START) :
+ __sha(MODULES_VADDR),
POPULATE_ZERO_SHADOW);
- kasan_early_pgtable_populate(__sha(kasan_vmax), __sha(vmax_unlimited),
+ kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
POPULATE_ZERO_SHADOW);
/* memory allocated for identity mapping structs will be freed later */
pgalloc_freeable = pgalloc_pos;
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 1d17413b319a..a0f54bd5e98a 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -79,22 +79,21 @@ notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
{
- register unsigned long _dest asm("2") = (unsigned long) dest;
- register unsigned long _len1 asm("3") = (unsigned long) count;
- register unsigned long _src asm("4") = (unsigned long) src;
- register unsigned long _len2 asm("5") = (unsigned long) count;
+ union register_pair _dst, _src;
int rc = -EFAULT;
+ _dst.even = (unsigned long) dest;
+ _dst.odd = (unsigned long) count;
+ _src.even = (unsigned long) src;
+ _src.odd = (unsigned long) count;
asm volatile (
- "0: mvcle %1,%2,0x0\n"
+ "0: mvcle %[dst],%[src],0\n"
"1: jo 0b\n"
- " lhi %0,0x0\n"
+ " lhi %[rc],0\n"
"2:\n"
EX_TABLE(1b,2b)
- : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
- "+d" (_len2), "=m" (*((long *) dest))
- : "m" (*((long *) src))
- : "cc", "memory");
+ : [rc] "+&d" (rc), [dst] "+&d" (_dst.pair), [src] "+&d" (_src.pair)
+ : : "cc", "memory");
return rc;
}
@@ -126,12 +125,18 @@ static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
*/
int memcpy_real(void *dest, void *src, size_t count)
{
+ unsigned long _dest = (unsigned long)dest;
+ unsigned long _src = (unsigned long)src;
+ unsigned long _count = (unsigned long)count;
int rc;
if (S390_lowcore.nodat_stack != 0) {
preempt_disable();
- rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3,
- dest, src, count);
+ rc = call_on_stack(3, S390_lowcore.nodat_stack,
+ unsigned long, _memcpy_real,
+ unsigned long, _dest,
+ unsigned long, _src,
+ unsigned long, _count);
preempt_enable();
return rc;
}
@@ -140,8 +145,7 @@ int memcpy_real(void *dest, void *src, size_t count)
* not set up yet. Just call _memcpy_real on the early boot
* stack
*/
- return _memcpy_real((unsigned long) dest,(unsigned long) src,
- (unsigned long) count);
+ return _memcpy_real(_dest, _src, _count);
}
/*
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index 7f0e154a470a..68b153083a92 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -31,17 +31,17 @@ __setup("cmma=", cmma);
static inline int cmma_test_essa(void)
{
- register unsigned long tmp asm("0") = 0;
- register int rc asm("1");
+ unsigned long tmp = 0;
+ int rc = -EOPNOTSUPP;
/* test ESSA_GET_STATE */
asm volatile(
- " .insn rrf,0xb9ab0000,%1,%1,%2,0\n"
- "0: la %0,0\n"
+ " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
+ "0: la %[rc],0\n"
"1:\n"
EX_TABLE(0b,1b)
- : "=&d" (rc), "+&d" (tmp)
- : "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP));
+ : [rc] "+&d" (rc), [tmp] "+&d" (tmp)
+ : [cmd] "i" (ESSA_GET_STATE));
return rc;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 18205f851c24..eec3a9d7176e 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -691,7 +691,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
if (!non_swap_entry(entry))
dec_mm_counter(mm, MM_SWAPENTS);
else if (is_migration_entry(entry)) {
- struct page *page = migration_entry_to_page(entry);
+ struct page *page = pfn_swap_entry_to_page(entry);
dec_mm_counter(mm, mm_counter(page));
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 63cae0476bb4..88419263a89a 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -112,7 +112,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
{
u32 r1 = reg2hex[b1];
- if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
+ if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
jit->seen_reg[r1] = 1;
}
@@ -1154,6 +1154,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
}
break;
/*
+ * BPF_NOSPEC (speculation barrier)
+ */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+ /*
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 02f9505c99a8..2e43996159f0 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -63,16 +63,15 @@ u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
/* Refresh PCI Translations */
static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
{
- register u64 __addr asm("2") = addr;
- register u64 __range asm("3") = range;
+ union register_pair addr_range = {.even = addr, .odd = range};
u8 cc;
asm volatile (
- " .insn rre,0xb9d30000,%[fn],%[addr]\n"
+ " .insn rre,0xb9d30000,%[fn],%[addr_range]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [fn] "+d" (fn)
- : [addr] "d" (__addr), "d" (__range)
+ : [addr_range] "d" (addr_range.pair)
: "cc");
*status = fn >> 24 & 0xff;
return cc;
@@ -113,21 +112,19 @@ int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
/* PCI Load */
static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
{
- register u64 __req asm("2") = req;
- register u64 __offset asm("3") = offset;
+ union register_pair req_off = {.even = req, .odd = offset};
int cc = -ENXIO;
u64 __data;
asm volatile (
- " .insn rre,0xb9d20000,%[data],%[req]\n"
+ " .insn rre,0xb9d20000,%[data],%[req_off]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
- : "d" (__offset)
- : "cc");
- *status = __req >> 24 & 0xff;
+ : [cc] "+d" (cc), [data] "=d" (__data),
+ [req_off] "+&d" (req_off.pair) :: "cc");
+ *status = req_off.even >> 24 & 0xff;
*data = __data;
return cc;
}
@@ -173,21 +170,19 @@ static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
{
- register u64 addr asm("2") = ioaddr;
- register u64 r3 asm("3") = len;
+ union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
int cc = -ENXIO;
u64 __data;
asm volatile (
- " .insn rre,0xb9d60000,%[data],%[ioaddr]\n"
+ " .insn rre,0xb9d60000,%[data],%[ioaddr_len]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : [cc] "+d" (cc), [data] "=d" (__data), "+d" (r3)
- : [ioaddr] "d" (addr)
- : "cc");
- *status = r3 >> 24 & 0xff;
+ : [cc] "+d" (cc), [data] "=d" (__data),
+ [ioaddr_len] "+&d" (ioaddr_len.pair) :: "cc");
+ *status = ioaddr_len.odd >> 24 & 0xff;
*data = __data;
return cc;
}
@@ -211,20 +206,19 @@ EXPORT_SYMBOL_GPL(zpci_load);
/* PCI Store */
static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
{
- register u64 __req asm("2") = req;
- register u64 __offset asm("3") = offset;
+ union register_pair req_off = {.even = req, .odd = offset};
int cc = -ENXIO;
asm volatile (
- " .insn rre,0xb9d00000,%[data],%[req]\n"
+ " .insn rre,0xb9d00000,%[data],%[req_off]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : [cc] "+d" (cc), [req] "+d" (__req)
- : "d" (__offset), [data] "d" (data)
+ : [cc] "+d" (cc), [req_off] "+&d" (req_off.pair)
+ : [data] "d" (data)
: "cc");
- *status = __req >> 24 & 0xff;
+ *status = req_off.even >> 24 & 0xff;
return cc;
}
@@ -257,20 +251,19 @@ static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
{
- register u64 addr asm("2") = ioaddr;
- register u64 r3 asm("3") = len;
+ union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
int cc = -ENXIO;
asm volatile (
- " .insn rre,0xb9d40000,%[data],%[ioaddr]\n"
+ " .insn rre,0xb9d40000,%[data],%[ioaddr_len]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : [cc] "+d" (cc), "+d" (r3)
- : [data] "d" (data), [ioaddr] "d" (addr)
- : "cc");
- *status = r3 >> 24 & 0xff;
+ : [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
+ : [data] "d" (data)
+ : "cc", "memory");
+ *status = ioaddr_len.odd >> 24 & 0xff;
return cc;
}
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index 9dd5ad1b553d..9c7de9089939 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -35,7 +35,7 @@ static struct airq_iv *zpci_sbv;
*/
static struct airq_iv **zpci_ibv;
-/* Modify PCI: Register adapter interruptions */
+/* Modify PCI: Register floating adapter interruptions */
static int zpci_set_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
@@ -53,7 +53,7 @@ static int zpci_set_airq(struct zpci_dev *zdev)
return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
}
-/* Modify PCI: Unregister adapter interruptions */
+/* Modify PCI: Unregister floating adapter interruptions */
static int zpci_clear_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
@@ -98,6 +98,38 @@ static int zpci_clear_directed_irq(struct zpci_dev *zdev)
return cc ? -EIO : 0;
}
+/* Register adapter interruptions */
+int zpci_set_irq(struct zpci_dev *zdev)
+{
+ int rc;
+
+ if (irq_delivery == DIRECTED)
+ rc = zpci_set_directed_irq(zdev);
+ else
+ rc = zpci_set_airq(zdev);
+
+ if (!rc)
+ zdev->irqs_registered = 1;
+
+ return rc;
+}
+
+/* Clear adapter interruptions */
+int zpci_clear_irq(struct zpci_dev *zdev)
+{
+ int rc;
+
+ if (irq_delivery == DIRECTED)
+ rc = zpci_clear_directed_irq(zdev);
+ else
+ rc = zpci_clear_airq(zdev);
+
+ if (!rc)
+ zdev->irqs_registered = 0;
+
+ return rc;
+}
+
static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest,
bool force)
{
@@ -311,10 +343,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
zdev->msi_first_bit = bit;
zdev->msi_nr_irqs = msi_vecs;
- if (irq_delivery == DIRECTED)
- rc = zpci_set_directed_irq(zdev);
- else
- rc = zpci_set_airq(zdev);
+ rc = zpci_set_irq(zdev);
if (rc)
return rc;
@@ -328,10 +357,7 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
int rc;
/* Disable interrupts */
- if (irq_delivery == DIRECTED)
- rc = zpci_clear_directed_irq(zdev);
- else
- rc = zpci_clear_airq(zdev);
+ rc = zpci_clear_irq(zdev);
if (rc)
return;
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 474617b88648..ae683aa623ac 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -49,8 +49,7 @@ static inline int __pcistg_mio_inuser(
void __iomem *ioaddr, const void __user *src,
u64 ulen, u8 *status)
{
- register u64 addr asm("2") = (u64 __force) ioaddr;
- register u64 len asm("3") = ulen;
+ union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
int cc = -ENXIO;
u64 val = 0;
u64 cnt = ulen;
@@ -68,7 +67,7 @@ static inline int __pcistg_mio_inuser(
" aghi %[src],1\n"
" ogr %[val],%[tmp]\n"
" brctg %[cnt],0b\n"
- "1: .insn rre,0xb9d40000,%[val],%[ioaddr]\n"
+ "1: .insn rre,0xb9d40000,%[val],%[ioaddr_len]\n"
"2: ipm %[cc]\n"
" srl %[cc],28\n"
"3: sacf 768\n"
@@ -76,10 +75,9 @@ static inline int __pcistg_mio_inuser(
:
[src] "+a" (src), [cnt] "+d" (cnt),
[val] "+d" (val), [tmp] "=d" (tmp),
- [len] "+d" (len), [cc] "+d" (cc),
- [ioaddr] "+a" (addr)
+ [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
:: "cc", "memory");
- *status = len >> 24 & 0xff;
+ *status = ioaddr_len.odd >> 24 & 0xff;
/* did we read everything from user memory? */
if (!cc && cnt != 0)
@@ -195,8 +193,7 @@ static inline int __pcilg_mio_inuser(
void __user *dst, const void __iomem *ioaddr,
u64 ulen, u8 *status)
{
- register u64 addr asm("2") = (u64 __force) ioaddr;
- register u64 len asm("3") = ulen;
+ union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
u64 cnt = ulen;
int shift = ulen * 8;
int cc = -ENXIO;
@@ -209,7 +206,7 @@ static inline int __pcilg_mio_inuser(
*/
asm volatile (
" sacf 256\n"
- "0: .insn rre,0xb9d60000,%[val],%[ioaddr]\n"
+ "0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n"
"1: ipm %[cc]\n"
" srl %[cc],28\n"
" ltr %[cc],%[cc]\n"
@@ -222,18 +219,17 @@ static inline int __pcilg_mio_inuser(
"4: sacf 768\n"
EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b)
:
- [cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len),
+ [ioaddr_len] "+&d" (ioaddr_len.pair),
+ [cc] "+d" (cc), [val] "=d" (val),
[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
[shift] "+d" (shift)
- :
- [ioaddr] "a" (addr)
- : "cc", "memory");
+ :: "cc", "memory");
/* did we write everything to the user space buffer? */
if (!cc && cnt != 0)
cc = -EFAULT;
- *status = len >> 24 & 0xff;
+ *status = ioaddr_len.odd >> 24 & 0xff;
return cc;
}
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
index c57f8c40e992..21c4ebe29b9a 100644
--- a/arch/s390/purgatory/Makefile
+++ b/arch/s390/purgatory/Makefile
@@ -24,6 +24,7 @@ KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common
+KBUILD_CFLAGS += -fno-stack-protector
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
diff --git a/arch/sh/Kbuild b/arch/sh/Kbuild
new file mode 100644
index 000000000000..48c2a091a072
--- /dev/null
+++ b/arch/sh/Kbuild
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += kernel/ mm/ boards/
+obj-$(CONFIG_SH_FPU_EMU) += math-emu/
+obj-$(CONFIG_USE_BUILTIN_DTB) += boot/dts/
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 45a0549421cd..b683b69a4556 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -39,7 +39,6 @@ config SUPERH
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_HW_BREAKPOINT
- select HAVE_IDE if HAS_IOPORT_MAP
select HAVE_IOREMAP_PROT if MMU && !X2TLB
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 44bcb80e791a..88ddb6f1c75b 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -116,11 +116,6 @@ export ld-bfd
head-y := arch/sh/kernel/head_32.o
-core-y += arch/sh/kernel/ arch/sh/mm/ arch/sh/boards/
-core-$(CONFIG_SH_FPU_EMU) += arch/sh/math-emu/
-
-core-$(CONFIG_USE_BUILTIN_DTB) += arch/sh/boot/dts/
-
# Mach groups
machdir-$(CONFIG_SOLUTION_ENGINE) += mach-se
machdir-$(CONFIG_SH_HP6XX) += mach-hp6xx
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 0e6b0be25e33..a9e98233c4d4 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -30,7 +30,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#define __pte_free_tlb(tlb,pte,addr) \
do { \
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
index 82d74472dfcd..56bf35c2f29c 100644
--- a/arch/sh/include/asm/pgtable-3level.h
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -32,9 +32,9 @@ typedef struct { unsigned long long pmd; } pmd_t;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) } )
-static inline unsigned long pud_page_vaddr(pud_t pud)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
- return pud_val(pud);
+ return (pmd_t *)pud_val(pud);
}
/* only used by the stubbed out hugetlb gup code, should never be called */
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 27751e9470df..d7ddb1ec86a0 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -59,8 +59,6 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
/* Entries per level */
#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
-#define FIRST_USER_ADDRESS 0UL
-
#define PHYS_ADDR_MASK29 0x1fffffff
#define PHYS_ADDR_MASK32 0xffffffff
diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h
deleted file mode 100644
index d311f00ed530..000000000000
--- a/arch/sh/include/asm/unaligned-sh4a.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_UNALIGNED_SH4A_H
-#define __ASM_SH_UNALIGNED_SH4A_H
-
-/*
- * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only.
- * Support for 64-bit accesses are done through shifting and masking
- * relative to the endianness. Unaligned stores are not supported by the
- * instruction encoding, so these continue to use the packed
- * struct.
- *
- * The same note as with the movli.l/movco.l pair applies here, as long
- * as the load is guaranteed to be inlined, nothing else will hook in to
- * r0 and we get the return value for free.
- *
- * NOTE: Due to the fact we require r0 encoding, care should be taken to
- * avoid mixing these heavily with other r0 consumers, such as the atomic
- * ops. Failure to adhere to this can result in the compiler running out
- * of spill registers and blowing up when building at low optimization
- * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777.
- */
-#include <linux/unaligned/packed_struct.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-static inline u16 sh4a_get_unaligned_cpu16(const u8 *p)
-{
-#ifdef __LITTLE_ENDIAN
- return p[0] | p[1] << 8;
-#else
- return p[0] << 8 | p[1];
-#endif
-}
-
-static __always_inline u32 sh4a_get_unaligned_cpu32(const u8 *p)
-{
- unsigned long unaligned;
-
- __asm__ __volatile__ (
- "movua.l @%1, %0\n\t"
- : "=z" (unaligned)
- : "r" (p)
- );
-
- return unaligned;
-}
-
-/*
- * Even though movua.l supports auto-increment on the read side, it can
- * only store to r0 due to instruction encoding constraints, so just let
- * the compiler sort it out on its own.
- */
-static inline u64 sh4a_get_unaligned_cpu64(const u8 *p)
-{
-#ifdef __LITTLE_ENDIAN
- return (u64)sh4a_get_unaligned_cpu32(p + 4) << 32 |
- sh4a_get_unaligned_cpu32(p);
-#else
- return (u64)sh4a_get_unaligned_cpu32(p) << 32 |
- sh4a_get_unaligned_cpu32(p + 4);
-#endif
-}
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- return le16_to_cpu(sh4a_get_unaligned_cpu16(p));
-}
-
-static inline u32 get_unaligned_le32(const void *p)
-{
- return le32_to_cpu(sh4a_get_unaligned_cpu32(p));
-}
-
-static inline u64 get_unaligned_le64(const void *p)
-{
- return le64_to_cpu(sh4a_get_unaligned_cpu64(p));
-}
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- return be16_to_cpu(sh4a_get_unaligned_cpu16(p));
-}
-
-static inline u32 get_unaligned_be32(const void *p)
-{
- return be32_to_cpu(sh4a_get_unaligned_cpu32(p));
-}
-
-static inline u64 get_unaligned_be64(const void *p)
-{
- return be64_to_cpu(sh4a_get_unaligned_cpu64(p));
-}
-
-static inline void nonnative_put_le16(u16 val, u8 *p)
-{
- *p++ = val;
- *p++ = val >> 8;
-}
-
-static inline void nonnative_put_le32(u32 val, u8 *p)
-{
- nonnative_put_le16(val, p);
- nonnative_put_le16(val >> 16, p + 2);
-}
-
-static inline void nonnative_put_le64(u64 val, u8 *p)
-{
- nonnative_put_le32(val, p);
- nonnative_put_le32(val >> 32, p + 4);
-}
-
-static inline void nonnative_put_be16(u16 val, u8 *p)
-{
- *p++ = val >> 8;
- *p++ = val;
-}
-
-static inline void nonnative_put_be32(u32 val, u8 *p)
-{
- nonnative_put_be16(val >> 16, p);
- nonnative_put_be16(val, p + 2);
-}
-
-static inline void nonnative_put_be64(u64 val, u8 *p)
-{
- nonnative_put_be32(val >> 32, p);
- nonnative_put_be32(val, p + 4);
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
-#ifdef __LITTLE_ENDIAN
- __put_unaligned_cpu16(val, p);
-#else
- nonnative_put_le16(val, p);
-#endif
-}
-
-static inline void put_unaligned_le32(u32 val, void *p)
-{
-#ifdef __LITTLE_ENDIAN
- __put_unaligned_cpu32(val, p);
-#else
- nonnative_put_le32(val, p);
-#endif
-}
-
-static inline void put_unaligned_le64(u64 val, void *p)
-{
-#ifdef __LITTLE_ENDIAN
- __put_unaligned_cpu64(val, p);
-#else
- nonnative_put_le64(val, p);
-#endif
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
-#ifdef __BIG_ENDIAN
- __put_unaligned_cpu16(val, p);
-#else
- nonnative_put_be16(val, p);
-#endif
-}
-
-static inline void put_unaligned_be32(u32 val, void *p)
-{
-#ifdef __BIG_ENDIAN
- __put_unaligned_cpu32(val, p);
-#else
- nonnative_put_be32(val, p);
-#endif
-}
-
-static inline void put_unaligned_be64(u64 val, void *p)
-{
-#ifdef __BIG_ENDIAN
- __put_unaligned_cpu64(val, p);
-#else
- nonnative_put_be64(val, p);
-#endif
-}
-
-/*
- * While it's a bit non-obvious, even though the generic le/be wrappers
- * use the __get/put_xxx prefixing, they actually wrap in to the
- * non-prefixed get/put_xxx variants as provided above.
- */
-#include <linux/unaligned/generic.h>
-
-#ifdef __LITTLE_ENDIAN
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#else
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#endif
-
-#endif /* __ASM_SH_UNALIGNED_SH4A_H */
diff --git a/arch/sh/include/asm/unaligned.h b/arch/sh/include/asm/unaligned.h
deleted file mode 100644
index 0c92e2c73af4..000000000000
--- a/arch/sh/include/asm/unaligned.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_SH_UNALIGNED_H
-#define _ASM_SH_UNALIGNED_H
-
-#ifdef CONFIG_CPU_SH4A
-/* SH-4A can handle unaligned loads in a relatively neutered fashion. */
-#include <asm/unaligned-sh4a.h>
-#else
-/* Otherwise, SH can't handle unaligned accesses. */
-#include <asm-generic/unaligned.h>
-#endif
-
-#endif /* _ASM_SH_UNALIGNED_H */
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 4144be650d41..1fcb6659822a 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -294,10 +294,7 @@ void __init setup_arch(char **cmdline_p)
if (!MOUNT_ROOT_RDONLY)
root_mountflags &= ~MS_RDONLY;
- init_mm.start_code = (unsigned long) _text;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) _end;
+ setup_initial_init_mm(_text, _etext, _edata, _end);
code_resource.start = virt_to_phys(_text);
code_resource.end = virt_to_phys(_etext)-1;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index c72f52c704cd..f0c0f955e169 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -19,7 +19,6 @@ config SPARC
select OF
select OF_PROMTREE
select HAVE_ASM_MODVERSIONS
- select HAVE_IDE
select HAVE_ARCH_KGDB if !SMP || SPARC64
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_SECCOMP if SPARC64
@@ -59,6 +58,7 @@ config SPARC32
select CLZ_TAB
select HAVE_UID16
select OLD_SIGACTION
+ select ZONE_DMA
config SPARC64
def_bool 64BIT
@@ -141,10 +141,6 @@ config HIGHMEM
default y if SPARC32
select KMAP_LOCAL
-config ZONE_DMA
- bool
- default y if SPARC32
-
config GENERIC_ISA_DMA
bool
default y if SPARC32
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index bee99e65fe23..4e65245bc755 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -58,9 +58,6 @@ endif
head-y := arch/sparc/kernel/head_$(BITS).o
-# See arch/sparc/Kbuild for the core part of the kernel
-core-y += arch/sparc/
-
libs-y += arch/sparc/prom/
libs-y += arch/sparc/lib/
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
index 8c39a9981187..12d00a42c0a3 100644
--- a/arch/sparc/include/asm/cmpxchg_64.h
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -201,7 +201,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index 9d353e6dc5a9..4f73e87b22a3 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -51,7 +51,6 @@ static inline void free_pmd_fast(pmd_t * pmd)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
-#define pmd_pgtable(pmd) (pgtable_t)__pmd_page(pmd)
void pmd_set(pmd_t *pmdp, pte_t *ptep);
#define pmd_populate_kernel pmd_populate
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index a8dafc550985..7b5561d17ab1 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -67,7 +67,6 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage);
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
-#define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
void pgtable_free(void *table, bool is_page);
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index a5cf79c149fe..ffccfe3b22ed 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -48,7 +48,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
#define PTRS_PER_PMD 64
#define PTRS_PER_PGD 256
#define USER_PTRS_PER_PGD PAGE_OFFSET / PGDIR_SIZE
-#define FIRST_USER_ADDRESS 0UL
#define PTE_SIZE (PTRS_PER_PTE*4)
#define PAGE_NONE SRMMU_PAGE_NONE
@@ -152,13 +151,13 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
return (unsigned long)__nocache_va(v << 4);
}
-static inline unsigned long pud_page_vaddr(pud_t pud)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
if (srmmu_device_memory(pud_val(pud))) {
- return ~0;
+ return (pmd_t *)~0;
} else {
unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
- return (unsigned long)__nocache_va(v << 4);
+ return (pmd_t *)__nocache_va(v << 4);
}
}
@@ -433,4 +432,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA
+#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
+
#endif /* !(_SPARC_PGTABLE_H) */
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 550d3904de65..4679e45c8348 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -95,9 +95,6 @@ bool kern_addr_valid(unsigned long addr);
#define PTRS_PER_PUD (1UL << PUD_BITS)
#define PTRS_PER_PGD (1UL << PGDIR_BITS)
-/* Kernel has a separate 44bit address space. */
-#define FIRST_USER_ADDRESS 0UL
-
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
__FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
@@ -377,8 +374,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
#define pgprot_noncached pgprot_noncached
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable);
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
#define arch_make_huge_pte arch_make_huge_pte
static inline unsigned long __pte_default_huge_mask(void)
{
@@ -845,23 +841,23 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
return ((unsigned long) __va(pfn << PAGE_SHIFT));
}
-static inline unsigned long pud_page_vaddr(pud_t pud)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
pte_t pte = __pte(pud_val(pud));
unsigned long pfn;
pfn = pte_pfn(pte);
- return ((unsigned long) __va(pfn << PAGE_SHIFT));
+ return ((pmd_t *) __va(pfn << PAGE_SHIFT));
}
#define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
-#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
+#define pud_page(pud) virt_to_page((void *)pud_pgtable(pud))
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
#define pud_present(pud) (pud_val(pud) != 0U)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
-#define p4d_page_vaddr(p4d) \
- ((unsigned long) __va(p4d_val(p4d)))
+#define p4d_pgtable(p4d) \
+ ((pud_t *) __va(p4d_val(p4d)))
#define p4d_present(p4d) (p4d_val(p4d) != 0U)
#define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
@@ -1121,6 +1117,8 @@ extern unsigned long cmdline_memory_size;
asmlinkage void do_sparc64_fault(struct pt_regs *regs);
+#define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
+
#ifdef CONFIG_HUGETLB_PAGE
#define pud_leaf_size pud_leaf_size
diff --git a/arch/sparc/include/asm/unaligned.h b/arch/sparc/include/asm/unaligned.h
deleted file mode 100644
index 7971d89d2f54..000000000000
--- a/arch/sparc/include/asm/unaligned.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_SPARC_UNALIGNED_H
-#define _ASM_SPARC_UNALIGNED_H
-
-#include <linux/unaligned/be_struct.h>
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/generic.h>
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-
-#endif /* _ASM_SPARC_UNALIGNED_H */
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 059f0eb678e0..8a1a83bbb6d5 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -362,7 +362,7 @@ struct vio_driver {
struct list_head node;
const struct vio_device_id *id_table;
int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
- int (*remove)(struct vio_dev *dev);
+ void (*remove)(struct vio_dev *dev);
void (*shutdown)(struct vio_dev *dev);
unsigned long driver_data;
struct device_driver driver;
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 522e5b51050c..4a5bdb0df779 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -1236,11 +1236,6 @@ out_err:
return err;
}
-static int ds_remove(struct vio_dev *vdev)
-{
- return 0;
-}
-
static const struct vio_device_id ds_match[] = {
{
.type = "domain-services-port",
@@ -1251,7 +1246,6 @@ static const struct vio_device_id ds_match[] = {
static struct vio_driver ds_driver = {
.id_table = ds_match,
.probe = ds_probe,
- .remove = ds_remove,
.name = "ds",
};
diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c
index ac8677c3841e..3bcc4ddc6911 100644
--- a/arch/sparc/kernel/sstate.c
+++ b/arch/sparc/kernel/sstate.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
diff --git a/arch/sparc/kernel/syscalls/Makefile b/arch/sparc/kernel/syscalls/Makefile
index 0f2ea5bcb0d7..d63f18dd058d 100644
--- a/arch/sparc/kernel/syscalls/Makefile
+++ b/arch/sparc/kernel/syscalls/Makefile
@@ -10,25 +10,15 @@ syshdr := $(srctree)/scripts/syscallhdr.sh
systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis $(abis) $< $@
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis common,$* $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis common,$* $< $@
-$(uapi)/unistd_32.h: abis := common,32
-$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
+$(uapi)/unistd_%.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-$(uapi)/unistd_64.h: abis := common,64
-$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE
- $(call if_changed,syshdr)
-
-$(kapi)/syscall_table_32.h: abis := common,32
-$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE
- $(call if_changed,systbl)
-
-$(kapi)/syscall_table_64.h: abis := common,64
-$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE
+$(kapi)/syscall_table_%.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h unistd_64.h
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 4f57056ed463..348a88691219 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -105,10 +105,10 @@ static int vio_device_remove(struct device *dev)
* routines to do so at the moment. TBD
*/
- return drv->remove(vdev);
+ drv->remove(vdev);
}
- return 1;
+ return 0;
}
static ssize_t devspec_show(struct device *dev,
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 04d8790f6c32..0f49fada2093 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -177,10 +177,8 @@ static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
return sun4u_hugepage_shift_to_tte(entry, shift);
}
-pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writeable)
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
- unsigned int shift = huge_page_shift(hstate_vma(vma));
pte_t pte;
pte = hugepage_shift_to_tte(entry, shift);
@@ -188,7 +186,7 @@ pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
#ifdef CONFIG_SPARC64
/* If this vma has ADI enabled on it, turn on TTE.mcd
*/
- if (vma->vm_flags & VM_SPARC_ADI)
+ if (flags & VM_SPARC_ADI)
return pte_mkmcd(pte);
else
return pte_mknotmcd(pte);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 06e938d03f3b..1b23639e2fcd 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -27,6 +27,7 @@
#include <linux/percpu.h>
#include <linux/mmzone.h>
#include <linux/gfp.h>
+#include <linux/bootmem_info.h>
#include <asm/head.h>
#include <asm/page.h>
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 4b8d3c65d266..9a2f20cbd48b 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1287,6 +1287,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
return 1;
break;
}
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:
diff --git a/arch/um/Kbuild b/arch/um/Kbuild
new file mode 100644
index 000000000000..a4e40e534e6a
--- /dev/null
+++ b/arch/um/Kbuild
@@ -0,0 +1 @@
+# SPDX-License-Identifier: GPL-2.0-only
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 57cfd9a1c082..0561b73cfd9a 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -15,7 +15,7 @@ config UML
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_BUGVERBOSE
- select NO_DMA
+ select NO_DMA if !UML_DMA_EMULATION
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select HAVE_GCC_PLUGINS
@@ -26,7 +26,22 @@ config MMU
bool
default y
+config UML_DMA_EMULATION
+ bool
+
config NO_IOMEM
+ bool "disable IOMEM" if EXPERT
+ depends on !INDIRECT_IOMEM
+ default y
+
+config UML_IOMEM_EMULATION
+ bool
+ select INDIRECT_IOMEM
+ select GENERIC_PCI_IOMAP
+ select GENERIC_IOMAP
+ select NO_GENERIC_PCI_IOPORT_MAP
+
+config NO_IOPORT_MAP
def_bool y
config ISA
@@ -61,6 +76,9 @@ config NR_CPUS
range 1 1
default 1
+config ARCH_HAS_CACHE_LINE_SIZE
+ def_bool y
+
source "arch/$(HEADER_ARCH)/um/Kconfig"
config MAY_HAVE_RUNTIME_DEPS
@@ -91,6 +109,19 @@ config LD_SCRIPT_DYN
depends on !LD_SCRIPT_STATIC
select MODULE_REL_CRCS if MODVERSIONS
+config LD_SCRIPT_DYN_RPATH
+ bool "set rpath in the binary" if EXPERT
+ default y
+ depends on LD_SCRIPT_DYN
+ help
+ Add /lib (and /lib64 for 64-bit) to the linux binary's rpath
+ explicitly.
+
+ You may need to turn this off if compiling for nix systems
+ that have their libraries in random /nix directories and
+ might otherwise unexpected use libraries from /lib or /lib64
+ instead of the desired ones.
+
config HOSTFS
tristate "Host filesystem"
help
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 1cea46ff9bb7..12a7acef0357 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -118,7 +118,8 @@ archprepare:
$(Q)$(MAKE) $(build)=$(HOST_DIR)/um include/generated/user_constants.h
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
-LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
+LINK-$(CONFIG_LD_SCRIPT_DYN) += $(call cc-option, -no-pie)
+LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib
CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
-fno-stack-protector $(call cc-option, -fno-stack-protector-all)
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index 03ba34b61115..f145842c40b9 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -357,3 +357,23 @@ config UML_RTC
rtcwake, especially in time-travel mode. This driver enables that
by providing a fake RTC clock that causes a wakeup at the right
time.
+
+config UML_PCI_OVER_VIRTIO
+ bool "Enable PCI over VIRTIO device simulation"
+ # in theory, just VIRTIO is enough, but that causes recursion
+ depends on VIRTIO_UML
+ select FORCE_PCI
+ select UML_IOMEM_EMULATION
+ select UML_DMA_EMULATION
+ select PCI_MSI
+ select PCI_MSI_IRQ_DOMAIN
+ select PCI_LOCKLESS_CONFIG
+
+config UML_PCI_OVER_VIRTIO_DEVICE_ID
+ int "set the virtio device ID for PCI emulation"
+ default -1
+ depends on UML_PCI_OVER_VIRTIO
+ help
+ There's no official device ID assigned (yet), set the one you
+ wish to use for experimentation here. The default of -1 is
+ not valid and will cause the driver to fail at probe.
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index dcc64a02f81f..803666e85414 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o
obj-$(CONFIG_UML_RANDOM) += random.o
obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o
obj-$(CONFIG_UML_RTC) += rtc.o
+obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virt-pci.o
# pcap_user.o must be added explicitly.
USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o pcap_user.o vde_user.o vector_user.o
diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
index d8845d4aac6a..6040817c036f 100644
--- a/arch/um/drivers/chan_user.c
+++ b/arch/um/drivers/chan_user.c
@@ -256,7 +256,8 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
goto out_close;
}
- if (os_set_fd_block(*fd_out, 0)) {
+ err = os_set_fd_block(*fd_out, 0);
+ if (err) {
printk(UM_KERN_ERR "winch_tramp: failed to set thread_fd "
"non-blocking.\n");
goto out_close;
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 1c70a31e7c5b..fbc623d2cc07 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -32,7 +32,7 @@ static irqreturn_t line_interrupt(int irq, void *data)
*
* Should be called while holding line->lock (this does not modify data).
*/
-static int write_room(struct line *line)
+static unsigned int write_room(struct line *line)
{
int n;
@@ -47,11 +47,11 @@ static int write_room(struct line *line)
return n - 1;
}
-int line_write_room(struct tty_struct *tty)
+unsigned int line_write_room(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
unsigned long flags;
- int room;
+ unsigned int room;
spin_lock_irqsave(&line->lock, flags);
room = write_room(line);
@@ -60,11 +60,11 @@ int line_write_room(struct tty_struct *tty)
return room;
}
-int line_chars_in_buffer(struct tty_struct *tty)
+unsigned int line_chars_in_buffer(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
unsigned long flags;
- int ret;
+ unsigned int ret;
spin_lock_irqsave(&line->lock, flags);
/* write_room subtracts 1 for the needed NULL, so we readd it.*/
@@ -211,11 +211,6 @@ out_up:
return ret;
}
-void line_set_termios(struct tty_struct *tty, struct ktermios * old)
-{
- /* nothing */
-}
-
void line_throttle(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
diff --git a/arch/um/drivers/line.h b/arch/um/drivers/line.h
index 01d21e76144f..bdb16b96e76f 100644
--- a/arch/um/drivers/line.h
+++ b/arch/um/drivers/line.h
@@ -66,11 +66,10 @@ extern int line_setup(char **conf, unsigned nlines, char **def,
char *init, char *name);
extern int line_write(struct tty_struct *tty, const unsigned char *buf,
int len);
-extern void line_set_termios(struct tty_struct *tty, struct ktermios * old);
-extern int line_chars_in_buffer(struct tty_struct *tty);
+extern unsigned int line_chars_in_buffer(struct tty_struct *tty);
extern void line_flush_buffer(struct tty_struct *tty);
extern void line_flush_chars(struct tty_struct *tty);
-extern int line_write_room(struct tty_struct *tty);
+extern unsigned int line_write_room(struct tty_struct *tty);
extern void line_throttle(struct tty_struct *tty);
extern void line_unthrottle(struct tty_struct *tty);
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 6d00af25ec6b..6ead1e240457 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/sched/debug.h>
#include <linux/proc_fs.h>
@@ -140,7 +141,7 @@ void mconsole_proc(struct mc_request *req)
mconsole_reply(req, "Proc not available", 1, 0);
goto out;
}
- file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
+ file = file_open_root_mnt(mnt, ptr, O_RDONLY, 0);
if (IS_ERR(file)) {
mconsole_reply(req, "Failed to open file", 1, 0);
printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
diff --git a/arch/um/drivers/slip_user.c b/arch/um/drivers/slip_user.c
index 482a19c5105c..7334019c9e60 100644
--- a/arch/um/drivers/slip_user.c
+++ b/arch/um/drivers/slip_user.c
@@ -145,7 +145,8 @@ static int slip_open(void *data)
}
sfd = err;
- if (set_up_tty(sfd))
+ err = set_up_tty(sfd);
+ if (err)
goto out_close2;
pri->slave = sfd;
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 6476b28d7c5e..41eae2e8fb65 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -99,7 +99,6 @@ static const struct tty_operations ssl_ops = {
.chars_in_buffer = line_chars_in_buffer,
.flush_buffer = line_flush_buffer,
.flush_chars = line_flush_chars,
- .set_termios = line_set_termios,
.throttle = line_throttle,
.unthrottle = line_unthrottle,
.install = ssl_install,
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 37b127941e6f..e8b762f4d8c2 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -106,7 +106,6 @@ static const struct tty_operations console_ops = {
.chars_in_buffer = line_chars_in_buffer,
.flush_buffer = line_flush_buffer,
.flush_chars = line_flush_chars,
- .set_termios = line_set_termios,
.throttle = line_throttle,
.unthrottle = line_unthrottle,
.hangup = line_hangup,
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 8e0b43cf089f..e497185dd393 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -125,9 +125,7 @@ static const struct block_device_operations ubd_blops = {
};
/* Protected by ubd_lock */
-static int fake_major = UBD_MAJOR;
static struct gendisk *ubd_gendisk[MAX_DEV];
-static struct gendisk *fake_gendisk[MAX_DEV];
#ifdef CONFIG_BLK_DEV_UBD_SYNC
#define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 1, .c = 0, \
@@ -197,54 +195,19 @@ struct ubd {
/* Protected by ubd_lock */
static struct ubd ubd_devs[MAX_DEV] = { [0 ... MAX_DEV - 1] = DEFAULT_UBD };
-/* Only changed by fake_ide_setup which is a setup */
-static int fake_ide = 0;
-static struct proc_dir_entry *proc_ide_root = NULL;
-static struct proc_dir_entry *proc_ide = NULL;
-
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd);
-static void make_proc_ide(void)
-{
- proc_ide_root = proc_mkdir("ide", NULL);
- proc_ide = proc_mkdir("ide0", proc_ide_root);
-}
-
-static int fake_ide_media_proc_show(struct seq_file *m, void *v)
-{
- seq_puts(m, "disk\n");
- return 0;
-}
-
-static void make_ide_entries(const char *dev_name)
-{
- struct proc_dir_entry *dir, *ent;
- char name[64];
-
- if(proc_ide_root == NULL) make_proc_ide();
-
- dir = proc_mkdir(dev_name, proc_ide);
- if(!dir) return;
-
- ent = proc_create_single("media", S_IRUGO, dir,
- fake_ide_media_proc_show);
- if(!ent) return;
- snprintf(name, sizeof(name), "ide0/%s", dev_name);
- proc_symlink(dev_name, proc_ide_root, name);
-}
-
static int fake_ide_setup(char *str)
{
- fake_ide = 1;
+ pr_warn("The fake_ide option has been removed\n");
return 1;
}
-
__setup("fake_ide", fake_ide_setup);
__uml_help(fake_ide_setup,
"fake_ide\n"
-" Create ide0 entries that map onto ubd devices.\n\n"
+" Obsolete stub.\n\n"
);
static int parse_unit(char **ptr)
@@ -280,36 +243,14 @@ static int ubd_setup_common(char *str, int *index_out, char **error_out)
if(index_out) *index_out = -1;
n = *str;
if(n == '='){
- char *end;
- int major;
-
str++;
if(!strcmp(str, "sync")){
global_openflags = of_sync(global_openflags);
return err;
}
- err = -EINVAL;
- major = simple_strtoul(str, &end, 0);
- if((*end != '\0') || (end == str)){
- *error_out = "Didn't parse major number";
- return err;
- }
-
- mutex_lock(&ubd_lock);
- if (fake_major != UBD_MAJOR) {
- *error_out = "Can't assign a fake major twice";
- goto out1;
- }
-
- fake_major = major;
-
- printk(KERN_INFO "Setting extra ubd major number to %d\n",
- major);
- err = 0;
- out1:
- mutex_unlock(&ubd_lock);
- return err;
+ pr_warn("fake major not supported any more\n");
+ return 0;
}
n = parse_unit(&str);
@@ -874,7 +815,6 @@ static void ubd_device_release(struct device *dev)
{
struct ubd *ubd_dev = dev_get_drvdata(dev);
- blk_cleanup_queue(ubd_dev->queue);
blk_mq_free_tag_set(&ubd_dev->tag_set);
*ubd_dev = ((struct ubd) DEFAULT_UBD);
}
@@ -914,41 +854,25 @@ static const struct attribute_group *ubd_attr_groups[] = {
NULL,
};
-static int ubd_disk_register(int major, u64 size, int unit,
- struct gendisk **disk_out)
+static void ubd_disk_register(int major, u64 size, int unit,
+ struct gendisk *disk)
{
- struct device *parent = NULL;
- struct gendisk *disk;
-
- disk = alloc_disk(1 << UBD_SHIFT);
- if(disk == NULL)
- return -ENOMEM;
-
disk->major = major;
disk->first_minor = unit << UBD_SHIFT;
+ disk->minors = 1 << UBD_SHIFT;
disk->fops = &ubd_blops;
set_capacity(disk, size / 512);
- if (major == UBD_MAJOR)
- sprintf(disk->disk_name, "ubd%c", 'a' + unit);
- else
- sprintf(disk->disk_name, "ubd_fake%d", unit);
-
- /* sysfs register (not for ide fake devices) */
- if (major == UBD_MAJOR) {
- ubd_devs[unit].pdev.id = unit;
- ubd_devs[unit].pdev.name = DRIVER_NAME;
- ubd_devs[unit].pdev.dev.release = ubd_device_release;
- dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]);
- platform_device_register(&ubd_devs[unit].pdev);
- parent = &ubd_devs[unit].pdev.dev;
- }
+ sprintf(disk->disk_name, "ubd%c", 'a' + unit);
+
+ ubd_devs[unit].pdev.id = unit;
+ ubd_devs[unit].pdev.name = DRIVER_NAME;
+ ubd_devs[unit].pdev.dev.release = ubd_device_release;
+ dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]);
+ platform_device_register(&ubd_devs[unit].pdev);
disk->private_data = &ubd_devs[unit];
disk->queue = ubd_devs[unit].queue;
- device_add_disk(parent, disk, ubd_attr_groups);
-
- *disk_out = disk;
- return 0;
+ device_add_disk(&ubd_devs[unit].pdev.dev, disk, ubd_attr_groups);
}
#define ROUND_BLOCK(n) ((n + (SECTOR_SIZE - 1)) & (-SECTOR_SIZE))
@@ -960,6 +884,7 @@ static const struct blk_mq_ops ubd_mq_ops = {
static int ubd_add(int n, char **error_out)
{
struct ubd *ubd_dev = &ubd_devs[n];
+ struct gendisk *disk;
int err = 0;
if(ubd_dev->file == NULL)
@@ -984,43 +909,24 @@ static int ubd_add(int n, char **error_out)
if (err)
goto out;
- ubd_dev->queue = blk_mq_init_queue(&ubd_dev->tag_set);
- if (IS_ERR(ubd_dev->queue)) {
- err = PTR_ERR(ubd_dev->queue);
+ disk = blk_mq_alloc_disk(&ubd_dev->tag_set, ubd_dev);
+ if (IS_ERR(disk)) {
+ err = PTR_ERR(disk);
goto out_cleanup_tags;
}
+ ubd_dev->queue = disk->queue;
- ubd_dev->queue->queuedata = ubd_dev;
blk_queue_write_cache(ubd_dev->queue, true, false);
-
blk_queue_max_segments(ubd_dev->queue, MAX_SG);
blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1);
- err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
- if(err){
- *error_out = "Failed to register device";
- goto out_cleanup_tags;
- }
-
- if (fake_major != UBD_MAJOR)
- ubd_disk_register(fake_major, ubd_dev->size, n,
- &fake_gendisk[n]);
-
- /*
- * Perhaps this should also be under the "if (fake_major)" above
- * using the fake_disk->disk_name
- */
- if (fake_ide)
- make_ide_entries(ubd_gendisk[n]->disk_name);
-
- err = 0;
-out:
- return err;
+ ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, disk);
+ ubd_gendisk[n] = disk;
+ return 0;
out_cleanup_tags:
blk_mq_free_tag_set(&ubd_dev->tag_set);
- if (!(IS_ERR(ubd_dev->queue)))
- blk_cleanup_queue(ubd_dev->queue);
- goto out;
+out:
+ return err;
}
static int ubd_config(char *str, char **error_out)
@@ -1123,13 +1029,7 @@ static int ubd_remove(int n, char **error_out)
ubd_gendisk[n] = NULL;
if(disk != NULL){
del_gendisk(disk);
- put_disk(disk);
- }
-
- if(fake_gendisk[n] != NULL){
- del_gendisk(fake_gendisk[n]);
- put_disk(fake_gendisk[n]);
- fake_gendisk[n] = NULL;
+ blk_cleanup_disk(disk);
}
err = 0;
@@ -1188,14 +1088,6 @@ static int __init ubd_init(void)
if (register_blkdev(UBD_MAJOR, "ubd"))
return -1;
- if (fake_major != UBD_MAJOR) {
- char name[sizeof("ubd_nnn\0")];
-
- snprintf(name, sizeof(name), "ubd_%d", fake_major);
- if (register_blkdev(fake_major, "ubd"))
- return -1;
- }
-
irq_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE,
sizeof(struct io_thread_req *),
GFP_KERNEL
@@ -1242,8 +1134,7 @@ static int __init ubd_driver_init(void){
* enough. So use anyway the io thread. */
}
stack = alloc_stack(0, 0);
- io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *),
- &thread_fd);
+ io_pid = start_io_thread(stack + PAGE_SIZE, &thread_fd);
if(io_pid < 0){
printk(KERN_ERR
"ubd : Failed to start I/O thread (errno = %d) - "
diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
new file mode 100644
index 000000000000..0b802834f40a
--- /dev/null
+++ b/arch/um/drivers/virt-pci.c
@@ -0,0 +1,895 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/logic_iomem.h>
+#include <linux/irqdomain.h>
+#include <linux/virtio_pcidev.h>
+#include <linux/virtio-uml.h>
+#include <linux/delay.h>
+#include <linux/msi.h>
+#include <asm/unaligned.h>
+#include <irq_kern.h>
+
+#define MAX_DEVICES 8
+#define MAX_MSI_VECTORS 32
+#define CFG_SPACE_SIZE 4096
+
+/* for MSI-X we have a 32-bit payload */
+#define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32))
+#define NUM_IRQ_MSGS 10
+
+#define HANDLE_NO_FREE(ptr) ((void *)((unsigned long)(ptr) | 1))
+#define HANDLE_IS_NO_FREE(ptr) ((unsigned long)(ptr) & 1)
+
+struct um_pci_device {
+ struct virtio_device *vdev;
+
+ /* for now just standard BARs */
+ u8 resptr[PCI_STD_NUM_BARS];
+
+ struct virtqueue *cmd_vq, *irq_vq;
+
+#define UM_PCI_STAT_WAITING 0
+ unsigned long status;
+
+ int irq;
+};
+
+struct um_pci_device_reg {
+ struct um_pci_device *dev;
+ void __iomem *iomem;
+};
+
+static struct pci_host_bridge *bridge;
+static DEFINE_MUTEX(um_pci_mtx);
+static struct um_pci_device_reg um_pci_devices[MAX_DEVICES];
+static struct fwnode_handle *um_pci_fwnode;
+static struct irq_domain *um_pci_inner_domain;
+static struct irq_domain *um_pci_msi_domain;
+static unsigned long um_pci_msi_used[BITS_TO_LONGS(MAX_MSI_VECTORS)];
+
+#define UM_VIRT_PCI_MAXDELAY 40000
+
+static int um_pci_send_cmd(struct um_pci_device *dev,
+ struct virtio_pcidev_msg *cmd,
+ unsigned int cmd_size,
+ const void *extra, unsigned int extra_size,
+ void *out, unsigned int out_size)
+{
+ struct scatterlist out_sg, extra_sg, in_sg;
+ struct scatterlist *sgs_list[] = {
+ [0] = &out_sg,
+ [1] = extra ? &extra_sg : &in_sg,
+ [2] = extra ? &in_sg : NULL,
+ };
+ int delay_count = 0;
+ int ret, len;
+ bool posted;
+
+ if (WARN_ON(cmd_size < sizeof(*cmd)))
+ return -EINVAL;
+
+ switch (cmd->op) {
+ case VIRTIO_PCIDEV_OP_CFG_WRITE:
+ case VIRTIO_PCIDEV_OP_MMIO_WRITE:
+ case VIRTIO_PCIDEV_OP_MMIO_MEMSET:
+ /* in PCI, writes are posted, so don't wait */
+ posted = !out;
+ WARN_ON(!posted);
+ break;
+ default:
+ posted = false;
+ break;
+ }
+
+ if (posted) {
+ u8 *ncmd = kmalloc(cmd_size + extra_size, GFP_ATOMIC);
+
+ if (ncmd) {
+ memcpy(ncmd, cmd, cmd_size);
+ if (extra)
+ memcpy(ncmd + cmd_size, extra, extra_size);
+ cmd = (void *)ncmd;
+ cmd_size += extra_size;
+ extra = NULL;
+ extra_size = 0;
+ } else {
+ /* try without allocating memory */
+ posted = false;
+ }
+ }
+
+ sg_init_one(&out_sg, cmd, cmd_size);
+ if (extra)
+ sg_init_one(&extra_sg, extra, extra_size);
+ if (out)
+ sg_init_one(&in_sg, out, out_size);
+
+ /* add to internal virtio queue */
+ ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list,
+ extra ? 2 : 1,
+ out ? 1 : 0,
+ posted ? cmd : HANDLE_NO_FREE(cmd),
+ GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ if (posted) {
+ virtqueue_kick(dev->cmd_vq);
+ return 0;
+ }
+
+ /* kick and poll for getting a response on the queue */
+ set_bit(UM_PCI_STAT_WAITING, &dev->status);
+ virtqueue_kick(dev->cmd_vq);
+
+ while (1) {
+ void *completed = virtqueue_get_buf(dev->cmd_vq, &len);
+
+ if (completed == HANDLE_NO_FREE(cmd))
+ break;
+
+ if (completed && !HANDLE_IS_NO_FREE(completed))
+ kfree(completed);
+
+ if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) ||
+ ++delay_count > UM_VIRT_PCI_MAXDELAY,
+ "um virt-pci delay: %d", delay_count)) {
+ ret = -EIO;
+ break;
+ }
+ udelay(1);
+ }
+ clear_bit(UM_PCI_STAT_WAITING, &dev->status);
+
+ return ret;
+}
+
+static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
+ int size)
+{
+ struct um_pci_device_reg *reg = priv;
+ struct um_pci_device *dev = reg->dev;
+ struct virtio_pcidev_msg hdr = {
+ .op = VIRTIO_PCIDEV_OP_CFG_READ,
+ .size = size,
+ .addr = offset,
+ };
+ /* maximum size - we may only use parts of it */
+ u8 data[8];
+
+ if (!dev)
+ return ~0ULL;
+
+ memset(data, 0xff, sizeof(data));
+
+ switch (size) {
+ case 1:
+ case 2:
+ case 4:
+#ifdef CONFIG_64BIT
+ case 8:
+#endif
+ break;
+ default:
+ WARN(1, "invalid config space read size %d\n", size);
+ return ~0ULL;
+ }
+
+ if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0,
+ data, sizeof(data)))
+ return ~0ULL;
+
+ switch (size) {
+ case 1:
+ return data[0];
+ case 2:
+ return le16_to_cpup((void *)data);
+ case 4:
+ return le32_to_cpup((void *)data);
+#ifdef CONFIG_64BIT
+ case 8:
+ return le64_to_cpup((void *)data);
+#endif
+ default:
+ return ~0ULL;
+ }
+}
+
+static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size,
+ unsigned long val)
+{
+ struct um_pci_device_reg *reg = priv;
+ struct um_pci_device *dev = reg->dev;
+ struct {
+ struct virtio_pcidev_msg hdr;
+ /* maximum size - we may only use parts of it */
+ u8 data[8];
+ } msg = {
+ .hdr = {
+ .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
+ .size = size,
+ .addr = offset,
+ },
+ };
+
+ if (!dev)
+ return;
+
+ switch (size) {
+ case 1:
+ msg.data[0] = (u8)val;
+ break;
+ case 2:
+ put_unaligned_le16(val, (void *)msg.data);
+ break;
+ case 4:
+ put_unaligned_le32(val, (void *)msg.data);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ put_unaligned_le64(val, (void *)msg.data);
+ break;
+#endif
+ default:
+ WARN(1, "invalid config space write size %d\n", size);
+ return;
+ }
+
+ WARN_ON(um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0));
+}
+
+static const struct logic_iomem_ops um_pci_device_cfgspace_ops = {
+ .read = um_pci_cfgspace_read,
+ .write = um_pci_cfgspace_write,
+};
+
+static void um_pci_bar_copy_from(void *priv, void *buffer,
+ unsigned int offset, int size)
+{
+ u8 *resptr = priv;
+ struct um_pci_device *dev = container_of(resptr - *resptr,
+ struct um_pci_device,
+ resptr[0]);
+ struct virtio_pcidev_msg hdr = {
+ .op = VIRTIO_PCIDEV_OP_MMIO_READ,
+ .bar = *resptr,
+ .size = size,
+ .addr = offset,
+ };
+
+ memset(buffer, 0xff, size);
+
+ um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size);
+}
+
+static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
+ int size)
+{
+ /* maximum size - we may only use parts of it */
+ u8 data[8];
+
+ switch (size) {
+ case 1:
+ case 2:
+ case 4:
+#ifdef CONFIG_64BIT
+ case 8:
+#endif
+ break;
+ default:
+ WARN(1, "invalid config space read size %d\n", size);
+ return ~0ULL;
+ }
+
+ um_pci_bar_copy_from(priv, data, offset, size);
+
+ switch (size) {
+ case 1:
+ return data[0];
+ case 2:
+ return le16_to_cpup((void *)data);
+ case 4:
+ return le32_to_cpup((void *)data);
+#ifdef CONFIG_64BIT
+ case 8:
+ return le64_to_cpup((void *)data);
+#endif
+ default:
+ return ~0ULL;
+ }
+}
+
+static void um_pci_bar_copy_to(void *priv, unsigned int offset,
+ const void *buffer, int size)
+{
+ u8 *resptr = priv;
+ struct um_pci_device *dev = container_of(resptr - *resptr,
+ struct um_pci_device,
+ resptr[0]);
+ struct virtio_pcidev_msg hdr = {
+ .op = VIRTIO_PCIDEV_OP_MMIO_WRITE,
+ .bar = *resptr,
+ .size = size,
+ .addr = offset,
+ };
+
+ um_pci_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0);
+}
+
+static void um_pci_bar_write(void *priv, unsigned int offset, int size,
+ unsigned long val)
+{
+ /* maximum size - we may only use parts of it */
+ u8 data[8];
+
+ switch (size) {
+ case 1:
+ data[0] = (u8)val;
+ break;
+ case 2:
+ put_unaligned_le16(val, (void *)data);
+ break;
+ case 4:
+ put_unaligned_le32(val, (void *)data);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ put_unaligned_le64(val, (void *)data);
+ break;
+#endif
+ default:
+ WARN(1, "invalid config space write size %d\n", size);
+ return;
+ }
+
+ um_pci_bar_copy_to(priv, offset, data, size);
+}
+
+static void um_pci_bar_set(void *priv, unsigned int offset, u8 value, int size)
+{
+ u8 *resptr = priv;
+ struct um_pci_device *dev = container_of(resptr - *resptr,
+ struct um_pci_device,
+ resptr[0]);
+ struct {
+ struct virtio_pcidev_msg hdr;
+ u8 data;
+ } msg = {
+ .hdr = {
+ .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
+ .bar = *resptr,
+ .size = size,
+ .addr = offset,
+ },
+ .data = value,
+ };
+
+ um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0);
+}
+
+static const struct logic_iomem_ops um_pci_device_bar_ops = {
+ .read = um_pci_bar_read,
+ .write = um_pci_bar_write,
+ .set = um_pci_bar_set,
+ .copy_from = um_pci_bar_copy_from,
+ .copy_to = um_pci_bar_copy_to,
+};
+
+static void __iomem *um_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct um_pci_device_reg *dev;
+ unsigned int busn = bus->number;
+
+ if (busn > 0)
+ return NULL;
+
+ /* not allowing functions for now ... */
+ if (devfn % 8)
+ return NULL;
+
+ if (devfn / 8 >= ARRAY_SIZE(um_pci_devices))
+ return NULL;
+
+ dev = &um_pci_devices[devfn / 8];
+ if (!dev)
+ return NULL;
+
+ return (void __iomem *)((unsigned long)dev->iomem + where);
+}
+
+static struct pci_ops um_pci_ops = {
+ .map_bus = um_pci_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void um_pci_rescan(void)
+{
+ pci_lock_rescan_remove();
+ pci_rescan_bus(bridge->bus);
+ pci_unlock_rescan_remove();
+}
+
+static void um_pci_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick)
+{
+ struct scatterlist sg[1];
+
+ sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE);
+ if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC))
+ kfree(buf);
+ else if (kick)
+ virtqueue_kick(vq);
+}
+
+static void um_pci_handle_irq_message(struct virtqueue *vq,
+ struct virtio_pcidev_msg *msg)
+{
+ struct virtio_device *vdev = vq->vdev;
+ struct um_pci_device *dev = vdev->priv;
+
+ /* we should properly chain interrupts, but on ARCH=um we don't care */
+
+ switch (msg->op) {
+ case VIRTIO_PCIDEV_OP_INT:
+ generic_handle_irq(dev->irq);
+ break;
+ case VIRTIO_PCIDEV_OP_MSI:
+ /* our MSI message is just the interrupt number */
+ if (msg->size == sizeof(u32))
+ generic_handle_irq(le32_to_cpup((void *)msg->data));
+ else
+ generic_handle_irq(le16_to_cpup((void *)msg->data));
+ break;
+ case VIRTIO_PCIDEV_OP_PME:
+ /* nothing to do - we already woke up due to the message */
+ break;
+ default:
+ dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op);
+ break;
+ }
+}
+
+static void um_pci_cmd_vq_cb(struct virtqueue *vq)
+{
+ struct virtio_device *vdev = vq->vdev;
+ struct um_pci_device *dev = vdev->priv;
+ void *cmd;
+ int len;
+
+ if (test_bit(UM_PCI_STAT_WAITING, &dev->status))
+ return;
+
+ while ((cmd = virtqueue_get_buf(vq, &len))) {
+ if (WARN_ON(HANDLE_IS_NO_FREE(cmd)))
+ continue;
+ kfree(cmd);
+ }
+}
+
+static void um_pci_irq_vq_cb(struct virtqueue *vq)
+{
+ struct virtio_pcidev_msg *msg;
+ int len;
+
+ while ((msg = virtqueue_get_buf(vq, &len))) {
+ if (len >= sizeof(*msg))
+ um_pci_handle_irq_message(vq, msg);
+
+ /* recycle the message buffer */
+ um_pci_irq_vq_addbuf(vq, msg, true);
+ }
+}
+
+static int um_pci_init_vqs(struct um_pci_device *dev)
+{
+ struct virtqueue *vqs[2];
+ static const char *const names[2] = { "cmd", "irq" };
+ vq_callback_t *cbs[2] = { um_pci_cmd_vq_cb, um_pci_irq_vq_cb };
+ int err, i;
+
+ err = virtio_find_vqs(dev->vdev, 2, vqs, cbs, names, NULL);
+ if (err)
+ return err;
+
+ dev->cmd_vq = vqs[0];
+ dev->irq_vq = vqs[1];
+
+ for (i = 0; i < NUM_IRQ_MSGS; i++) {
+ void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
+
+ if (msg)
+ um_pci_irq_vq_addbuf(dev->irq_vq, msg, false);
+ }
+
+ virtqueue_kick(dev->irq_vq);
+
+ return 0;
+}
+
+static int um_pci_virtio_probe(struct virtio_device *vdev)
+{
+ struct um_pci_device *dev;
+ int i, free = -1;
+ int err = -ENOSPC;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vdev = vdev;
+ vdev->priv = dev;
+
+ mutex_lock(&um_pci_mtx);
+ for (i = 0; i < MAX_DEVICES; i++) {
+ if (um_pci_devices[i].dev)
+ continue;
+ free = i;
+ break;
+ }
+
+ if (free < 0)
+ goto error;
+
+ err = um_pci_init_vqs(dev);
+ if (err)
+ goto error;
+
+ dev->irq = irq_alloc_desc(numa_node_id());
+ if (dev->irq < 0) {
+ err = dev->irq;
+ goto error;
+ }
+ um_pci_devices[free].dev = dev;
+ vdev->priv = dev;
+
+ mutex_unlock(&um_pci_mtx);
+
+ device_set_wakeup_enable(&vdev->dev, true);
+
+ /*
+ * In order to do suspend-resume properly, don't allow VQs
+ * to be suspended.
+ */
+ virtio_uml_set_no_vq_suspend(vdev, true);
+
+ um_pci_rescan();
+ return 0;
+error:
+ mutex_unlock(&um_pci_mtx);
+ kfree(dev);
+ return err;
+}
+
+static void um_pci_virtio_remove(struct virtio_device *vdev)
+{
+ struct um_pci_device *dev = vdev->priv;
+ int i;
+
+ /* Stop all virtqueues */
+ vdev->config->reset(vdev);
+ vdev->config->del_vqs(vdev);
+
+ device_set_wakeup_enable(&vdev->dev, false);
+
+ mutex_lock(&um_pci_mtx);
+ for (i = 0; i < MAX_DEVICES; i++) {
+ if (um_pci_devices[i].dev != dev)
+ continue;
+ um_pci_devices[i].dev = NULL;
+ irq_free_desc(dev->irq);
+ }
+ mutex_unlock(&um_pci_mtx);
+
+ um_pci_rescan();
+
+ kfree(dev);
+}
+
+static struct virtio_device_id id_table[] = {
+ { CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver um_pci_virtio_driver = {
+ .driver.name = "virtio-pci",
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = um_pci_virtio_probe,
+ .remove = um_pci_virtio_remove,
+};
+
+static struct resource virt_cfgspace_resource = {
+ .name = "PCI config space",
+ .start = 0xf0000000 - MAX_DEVICES * CFG_SPACE_SIZE,
+ .end = 0xf0000000 - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static long um_pci_map_cfgspace(unsigned long offset, size_t size,
+ const struct logic_iomem_ops **ops,
+ void **priv)
+{
+ if (WARN_ON(size > CFG_SPACE_SIZE || offset % CFG_SPACE_SIZE))
+ return -EINVAL;
+
+ if (offset / CFG_SPACE_SIZE < MAX_DEVICES) {
+ *ops = &um_pci_device_cfgspace_ops;
+ *priv = &um_pci_devices[offset / CFG_SPACE_SIZE];
+ return 0;
+ }
+
+ WARN(1, "cannot map offset 0x%lx/0x%zx\n", offset, size);
+ return -ENOENT;
+}
+
+static const struct logic_iomem_region_ops um_pci_cfgspace_ops = {
+ .map = um_pci_map_cfgspace,
+};
+
+static struct resource virt_iomem_resource = {
+ .name = "PCI iomem",
+ .start = 0xf0000000,
+ .end = 0xffffffff,
+ .flags = IORESOURCE_MEM,
+};
+
+struct um_pci_map_iomem_data {
+ unsigned long offset;
+ size_t size;
+ const struct logic_iomem_ops **ops;
+ void **priv;
+ long ret;
+};
+
+static int um_pci_map_iomem_walk(struct pci_dev *pdev, void *_data)
+{
+ struct um_pci_map_iomem_data *data = _data;
+ struct um_pci_device_reg *reg = &um_pci_devices[pdev->devfn / 8];
+ struct um_pci_device *dev;
+ int i;
+
+ if (!reg->dev)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(dev->resptr); i++) {
+ struct resource *r = &pdev->resource[i];
+
+ if ((r->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM)
+ continue;
+
+ /*
+ * must be the whole or part of the resource,
+ * not allowed to only overlap
+ */
+ if (data->offset < r->start || data->offset > r->end)
+ continue;
+ if (data->offset + data->size - 1 > r->end)
+ continue;
+
+ dev = reg->dev;
+ *data->ops = &um_pci_device_bar_ops;
+ dev->resptr[i] = i;
+ *data->priv = &dev->resptr[i];
+ data->ret = data->offset - r->start;
+
+ /* no need to continue */
+ return 1;
+ }
+
+ return 0;
+}
+
+static long um_pci_map_iomem(unsigned long offset, size_t size,
+ const struct logic_iomem_ops **ops,
+ void **priv)
+{
+ struct um_pci_map_iomem_data data = {
+ /* we want the full address here */
+ .offset = offset + virt_iomem_resource.start,
+ .size = size,
+ .ops = ops,
+ .priv = priv,
+ .ret = -ENOENT,
+ };
+
+ pci_walk_bus(bridge->bus, um_pci_map_iomem_walk, &data);
+ return data.ret;
+}
+
+static const struct logic_iomem_region_ops um_pci_iomem_ops = {
+ .map = um_pci_map_iomem,
+};
+
+static void um_pci_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ /*
+ * This is a very low address and not actually valid 'physical' memory
+ * in UML, so we can simply map MSI(-X) vectors to there, it cannot be
+ * legitimately written to by the device in any other way.
+ * We use the (virtual) IRQ number here as the message to simplify the
+ * code that receives the message, where for now we simply trust the
+ * device to send the correct message.
+ */
+ msg->address_hi = 0;
+ msg->address_lo = 0xa0000;
+ msg->data = data->irq;
+}
+
+static struct irq_chip um_pci_msi_bottom_irq_chip = {
+ .name = "UM virtio MSI",
+ .irq_compose_msi_msg = um_pci_compose_msi_msg,
+};
+
+static int um_pci_inner_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ unsigned long bit;
+
+ WARN_ON(nr_irqs != 1);
+
+ mutex_lock(&um_pci_mtx);
+ bit = find_first_zero_bit(um_pci_msi_used, MAX_MSI_VECTORS);
+ if (bit >= MAX_MSI_VECTORS) {
+ mutex_unlock(&um_pci_mtx);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, um_pci_msi_used);
+ mutex_unlock(&um_pci_mtx);
+
+ irq_domain_set_info(domain, virq, bit, &um_pci_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+
+ return 0;
+}
+
+static void um_pci_inner_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ mutex_lock(&um_pci_mtx);
+
+ if (!test_bit(d->hwirq, um_pci_msi_used))
+ pr_err("trying to free unused MSI#%lu\n", d->hwirq);
+ else
+ __clear_bit(d->hwirq, um_pci_msi_used);
+
+ mutex_unlock(&um_pci_mtx);
+}
+
+static const struct irq_domain_ops um_pci_inner_domain_ops = {
+ .alloc = um_pci_inner_domain_alloc,
+ .free = um_pci_inner_domain_free,
+};
+
+static struct irq_chip um_pci_msi_irq_chip = {
+ .name = "UM virtio PCIe MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info um_pci_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX,
+ .chip = &um_pci_msi_irq_chip,
+};
+
+static struct resource busn_resource = {
+ .name = "PCI busn",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUS,
+};
+
+static int um_pci_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
+{
+ struct um_pci_device_reg *reg = &um_pci_devices[pdev->devfn / 8];
+
+ if (WARN_ON(!reg->dev))
+ return -EINVAL;
+
+ /* Yes, we map all pins to the same IRQ ... doesn't matter for now. */
+ return reg->dev->irq;
+}
+
+void *pci_root_bus_fwnode(struct pci_bus *bus)
+{
+ return um_pci_fwnode;
+}
+
+int um_pci_init(void)
+{
+ int err, i;
+
+ WARN_ON(logic_iomem_add_region(&virt_cfgspace_resource,
+ &um_pci_cfgspace_ops));
+ WARN_ON(logic_iomem_add_region(&virt_iomem_resource,
+ &um_pci_iomem_ops));
+
+ if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0,
+ "No virtio device ID configured for PCI - no PCI support\n"))
+ return 0;
+
+ bridge = pci_alloc_host_bridge(0);
+ if (!bridge)
+ return -ENOMEM;
+
+ um_pci_fwnode = irq_domain_alloc_named_fwnode("um-pci");
+ if (!um_pci_fwnode) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ um_pci_inner_domain = __irq_domain_add(um_pci_fwnode, MAX_MSI_VECTORS,
+ MAX_MSI_VECTORS, 0,
+ &um_pci_inner_domain_ops, NULL);
+ if (!um_pci_inner_domain) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ um_pci_msi_domain = pci_msi_create_irq_domain(um_pci_fwnode,
+ &um_pci_msi_domain_info,
+ um_pci_inner_domain);
+ if (!um_pci_msi_domain) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ pci_add_resource(&bridge->windows, &virt_iomem_resource);
+ pci_add_resource(&bridge->windows, &busn_resource);
+ bridge->ops = &um_pci_ops;
+ bridge->map_irq = um_pci_map_irq;
+
+ for (i = 0; i < MAX_DEVICES; i++) {
+ resource_size_t start;
+
+ start = virt_cfgspace_resource.start + i * CFG_SPACE_SIZE;
+ um_pci_devices[i].iomem = ioremap(start, CFG_SPACE_SIZE);
+ if (WARN(!um_pci_devices[i].iomem, "failed to map %d\n", i)) {
+ err = -ENOMEM;
+ goto free;
+ }
+ }
+
+ err = pci_host_probe(bridge);
+ if (err)
+ goto free;
+
+ err = register_virtio_driver(&um_pci_virtio_driver);
+ if (err)
+ goto free;
+ return 0;
+free:
+ if (um_pci_inner_domain)
+ irq_domain_remove(um_pci_inner_domain);
+ if (um_pci_fwnode)
+ irq_domain_free_fwnode(um_pci_fwnode);
+ pci_free_resource_list(&bridge->windows);
+ pci_free_host_bridge(bridge);
+ return err;
+}
+module_init(um_pci_init);
+
+void um_pci_exit(void)
+{
+ unregister_virtio_driver(&um_pci_virtio_driver);
+ irq_domain_remove(um_pci_msi_domain);
+ irq_domain_remove(um_pci_inner_domain);
+ pci_free_resource_list(&bridge->windows);
+ pci_free_host_bridge(bridge);
+}
+module_exit(um_pci_exit);
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index 91ddf74ca888..4412d6febade 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -56,6 +56,7 @@ struct virtio_uml_device {
u8 status;
u8 registered:1;
u8 suspended:1;
+ u8 no_vq_suspend:1;
u8 config_changed_irq:1;
uint64_t vq_irq_vq_map;
@@ -1098,6 +1099,19 @@ static void virtio_uml_release_dev(struct device *d)
kfree(vu_dev);
}
+void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
+ bool no_vq_suspend)
+{
+ struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
+
+ if (WARN_ON(vdev->config != &virtio_uml_config_ops))
+ return;
+
+ vu_dev->no_vq_suspend = no_vq_suspend;
+ dev_info(&vdev->dev, "%sabled VQ suspend\n",
+ no_vq_suspend ? "dis" : "en");
+}
+
/* Platform device */
static int virtio_uml_probe(struct platform_device *pdev)
@@ -1302,13 +1316,16 @@ MODULE_DEVICE_TABLE(of, virtio_uml_match);
static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
{
struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
- struct virtqueue *vq;
- virtio_device_for_each_vq((&vu_dev->vdev), vq) {
- struct virtio_uml_vq_info *info = vq->priv;
+ if (!vu_dev->no_vq_suspend) {
+ struct virtqueue *vq;
- info->suspended = true;
- vhost_user_set_vring_enable(vu_dev, vq->index, false);
+ virtio_device_for_each_vq((&vu_dev->vdev), vq) {
+ struct virtio_uml_vq_info *info = vq->priv;
+
+ info->suspended = true;
+ vhost_user_set_vring_enable(vu_dev, vq->index, false);
+ }
}
if (!device_may_wakeup(&vu_dev->vdev.dev)) {
@@ -1322,13 +1339,16 @@ static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
static int virtio_uml_resume(struct platform_device *pdev)
{
struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
- struct virtqueue *vq;
- virtio_device_for_each_vq((&vu_dev->vdev), vq) {
- struct virtio_uml_vq_info *info = vq->priv;
+ if (!vu_dev->no_vq_suspend) {
+ struct virtqueue *vq;
+
+ virtio_device_for_each_vq((&vu_dev->vdev), vq) {
+ struct virtio_uml_vq_info *info = vq->priv;
- info->suspended = false;
- vhost_user_set_vring_enable(vu_dev, vq->index, true);
+ info->suspended = false;
+ vhost_user_set_vring_enable(vu_dev, vq->index, true);
+ }
}
vu_dev->suspended = false;
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index d7492e5a1bbb..e5a7b552bb38 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -7,8 +7,8 @@ generic-y += device.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
+generic-y += fb.h
generic-y += ftrace.h
-generic-y += futex.h
generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
@@ -17,7 +17,6 @@ generic-y += mcs_spinlock.h
generic-y += mmiowb.h
generic-y += module.lds.h
generic-y += param.h
-generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += softirq_stack.h
@@ -27,3 +26,4 @@ generic-y += trace_clock.h
generic-y += word-at-a-time.h
generic-y += kprobes.h
generic-y += mm_hooks.h
+generic-y += vga.h
diff --git a/arch/um/include/asm/cacheflush.h b/arch/um/include/asm/cacheflush.h
new file mode 100644
index 000000000000..4c9858cd36ec
--- /dev/null
+++ b/arch/um/include/asm/cacheflush.h
@@ -0,0 +1,9 @@
+#ifndef __UM_ASM_CACHEFLUSH_H
+#define __UM_ASM_CACHEFLUSH_H
+
+#include <asm/tlbflush.h>
+#define flush_cache_vmap flush_tlb_kernel_range
+#define flush_cache_vunmap flush_tlb_kernel_range
+
+#include <asm-generic/cacheflush.h>
+#endif /* __UM_ASM_CACHEFLUSH_H */
diff --git a/arch/um/include/asm/cpufeature.h b/arch/um/include/asm/cpufeature.h
new file mode 100644
index 000000000000..19cd7ed6ec3c
--- /dev/null
+++ b/arch/um/include/asm/cpufeature.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_UM_CPUFEATURE_H
+#define _ASM_UM_CPUFEATURE_H
+
+#include <asm/processor.h>
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+
+#include <asm/asm.h>
+#include <linux/bitops.h>
+
+extern const char * const x86_cap_flags[NCAPINTS*32];
+extern const char * const x86_power_flags[32];
+#define X86_CAP_FMT "%s"
+#define x86_cap_flag(flag) x86_cap_flags[flag]
+
+/*
+ * In order to save room, we index into this array by doing
+ * X86_BUG_<name> - NCAPINTS*32.
+ */
+extern const char * const x86_bug_flags[NBUGINTS*32];
+
+#define test_cpu_cap(c, bit) \
+ test_bit(bit, (unsigned long *)((c)->x86_capability))
+
+/*
+ * There are 32 bits/features in each mask word. The high bits
+ * (selected with (bit>>5) give us the word number and the low 5
+ * bits give us the bit/feature number inside the word.
+ * (1UL<<((bit)&31) gives us a mask for the feature_bit so we can
+ * see if it is set in the mask word.
+ */
+#define CHECK_BIT_IN_MASK_WORD(maskname, word, bit) \
+ (((bit)>>5)==(word) && (1UL<<((bit)&31) & maskname##word ))
+
+#define cpu_has(c, bit) \
+ test_cpu_cap(c, bit)
+
+#define this_cpu_has(bit) \
+ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
+ x86_this_cpu_test_bit(bit, \
+ (unsigned long __percpu *)&cpu_info.x86_capability))
+
+/*
+ * This macro is for detection of features which need kernel
+ * infrastructure to be used. It may *not* directly test the CPU
+ * itself. Use the cpu_has() family if you want true runtime
+ * testing of CPU features, like in hypervisor code where you are
+ * supporting a possible guest feature where host support for it
+ * is not relevant.
+ */
+#define cpu_feature_enabled(bit) \
+ (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : static_cpu_has(bit))
+
+#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
+
+#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
+
+extern void setup_clear_cpu_cap(unsigned int bit);
+
+#define setup_force_cpu_cap(bit) do { \
+ set_cpu_cap(&boot_cpu_data, bit); \
+ set_bit(bit, (unsigned long *)cpu_caps_set); \
+} while (0)
+
+#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
+
+#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
+
+/*
+ * Workaround for the sake of BPF compilation which utilizes kernel
+ * headers, but clang does not support ASM GOTO and fails the build.
+ */
+#ifndef __BPF_TRACING__
+#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
+#endif
+
+#define static_cpu_has(bit) boot_cpu_has(bit)
+
+#else
+
+/*
+ * Static testing of CPU features. Used the same as boot_cpu_has(). It
+ * statically patches the target code for additional performance. Use
+ * static_cpu_has() only in fast paths, where every cycle counts. Which
+ * means that the boot_cpu_has() variant is already fast enough for the
+ * majority of cases and you should stick to using it as it is generally
+ * only two instructions: a RIP-relative MOV and a TEST.
+ */
+static __always_inline bool _static_cpu_has(u16 bit)
+{
+ asm_volatile_goto("1: jmp 6f\n"
+ "2:\n"
+ ".skip -(((5f-4f) - (2b-1b)) > 0) * "
+ "((5f-4f) - (2b-1b)),0x90\n"
+ "3:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 4f - .\n" /* repl offset */
+ " .word %P[always]\n" /* always replace */
+ " .byte 3b - 1b\n" /* src len */
+ " .byte 5f - 4f\n" /* repl len */
+ " .byte 3b - 2b\n" /* pad len */
+ ".previous\n"
+ ".section .altinstr_replacement,\"ax\"\n"
+ "4: jmp %l[t_no]\n"
+ "5:\n"
+ ".previous\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 0\n" /* no replacement */
+ " .word %P[feature]\n" /* feature bit */
+ " .byte 3b - 1b\n" /* src len */
+ " .byte 0\n" /* repl len */
+ " .byte 0\n" /* pad len */
+ ".previous\n"
+ ".section .altinstr_aux,\"ax\"\n"
+ "6:\n"
+ " testb %[bitnum],%[cap_byte]\n"
+ " jnz %l[t_yes]\n"
+ " jmp %l[t_no]\n"
+ ".previous\n"
+ : : [feature] "i" (bit),
+ [always] "i" (X86_FEATURE_ALWAYS),
+ [bitnum] "i" (1 << (bit & 7)),
+ [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
+ : : t_yes, t_no);
+t_yes:
+ return true;
+t_no:
+ return false;
+}
+
+#define static_cpu_has(bit) \
+( \
+ __builtin_constant_p(boot_cpu_has(bit)) ? \
+ boot_cpu_has(bit) : \
+ _static_cpu_has(bit) \
+)
+#endif
+
+#define cpu_has_bug(c, bit) cpu_has(c, (bit))
+#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
+
+#define static_cpu_has_bug(bit) static_cpu_has((bit))
+#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
+#define boot_cpu_set_bug(bit) set_cpu_cap(&boot_cpu_data, (bit))
+
+#define MAX_CPU_FEATURES (NCAPINTS * 32)
+#define cpu_have_feature boot_cpu_has
+
+#define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X"
+#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
+ boot_cpu_data.x86_model
+
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
+#endif /* _ASM_UM_CPUFEATURE_H */
diff --git a/arch/um/include/asm/fpu/api.h b/arch/um/include/asm/fpu/api.h
new file mode 100644
index 000000000000..71bfd9ef3938
--- /dev/null
+++ b/arch/um/include/asm/fpu/api.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_UM_FPU_API_H
+#define _ASM_UM_FPU_API_H
+
+/* Copyright (c) 2020 Cambridge Greys Ltd
+ * Copyright (c) 2020 Red Hat Inc.
+ * A set of "dummy" defines to allow the direct inclusion
+ * of x86 optimized copy, xor, etc routines into the
+ * UML code tree. */
+
+#define kernel_fpu_begin() (void)0
+#define kernel_fpu_end() (void)0
+
+static inline bool irq_fpu_usable(void)
+{
+ return true;
+}
+
+
+#endif
diff --git a/arch/um/include/asm/futex.h b/arch/um/include/asm/futex.h
new file mode 100644
index 000000000000..780aa6bfc050
--- /dev/null
+++ b/arch/um/include/asm/futex.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_UM_FUTEX_H
+#define _ASM_UM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+
+int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr);
+int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval);
+
+#endif
diff --git a/arch/um/include/asm/io.h b/arch/um/include/asm/io.h
index 6ce18d343997..9ea42cc746d9 100644
--- a/arch/um/include/asm/io.h
+++ b/arch/um/include/asm/io.h
@@ -3,16 +3,23 @@
#define _ASM_UM_IO_H
#include <linux/types.h>
+/* get emulated iomem (if desired) */
+#include <asm-generic/logic_io.h>
+
+#ifndef ioremap
#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
{
return NULL;
}
+#endif /* ioremap */
+#ifndef iounmap
#define iounmap iounmap
static inline void iounmap(void __iomem *addr)
{
}
+#endif /* iounmap */
#include <asm-generic/io.h>
diff --git a/arch/um/include/asm/irq.h b/arch/um/include/asm/irq.h
index 3f5d3e8228fc..e187c789369d 100644
--- a/arch/um/include/asm/irq.h
+++ b/arch/um/include/asm/irq.h
@@ -31,7 +31,13 @@
#endif
-#define NR_IRQS 64
+#define UM_LAST_SIGNAL_IRQ 64
+/* If we have (simulated) PCI MSI, allow 64 more interrupt numbers for it */
+#ifdef CONFIG_PCI_MSI
+#define NR_IRQS (UM_LAST_SIGNAL_IRQ + 64)
+#else
+#define NR_IRQS UM_LAST_SIGNAL_IRQ
+#endif /* CONFIG_PCI_MSI */
#include <asm-generic/irq.h>
#endif
diff --git a/arch/um/include/asm/irqflags.h b/arch/um/include/asm/irqflags.h
index 0642ad9035d1..dab5744e9253 100644
--- a/arch/um/include/asm/irqflags.h
+++ b/arch/um/include/asm/irqflags.h
@@ -2,15 +2,15 @@
#ifndef __UM_IRQFLAGS_H
#define __UM_IRQFLAGS_H
-extern int get_signals(void);
-extern int set_signals(int enable);
-extern void block_signals(void);
-extern void unblock_signals(void);
+extern int signals_enabled;
+int set_signals(int enable);
+void block_signals(void);
+void unblock_signals(void);
#define arch_local_save_flags arch_local_save_flags
static inline unsigned long arch_local_save_flags(void)
{
- return get_signals();
+ return signals_enabled;
}
#define arch_local_irq_restore arch_local_irq_restore
diff --git a/arch/um/include/asm/msi.h b/arch/um/include/asm/msi.h
new file mode 100644
index 000000000000..c8c6c381f394
--- /dev/null
+++ b/arch/um/include/asm/msi.h
@@ -0,0 +1 @@
+#include <asm-generic/msi.h>
diff --git a/arch/um/include/asm/pci.h b/arch/um/include/asm/pci.h
new file mode 100644
index 000000000000..da13fd5519ef
--- /dev/null
+++ b/arch/um/include/asm/pci.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_UM_PCI_H
+#define __ASM_UM_PCI_H
+#include <linux/types.h>
+#include <asm/io.h>
+
+#define PCIBIOS_MIN_IO 0
+#define PCIBIOS_MIN_MEM 0
+
+#define pcibios_assign_all_busses() 1
+
+extern int isa_dma_bridge_buggy;
+
+#ifdef CONFIG_PCI
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ /* no legacy IRQs */
+ return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_PCI_DOMAINS
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ /* always show the domain in /proc */
+ return 1;
+}
+#endif /* CONFIG_PCI */
+
+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+/*
+ * This is a bit of an annoying hack, and it assumes we only have
+ * the virt-pci (if anything). Which is true, but still.
+ */
+void *pci_root_bus_fwnode(struct pci_bus *bus);
+#define pci_root_bus_fwnode pci_root_bus_fwnode
+#endif
+
+#endif /* __ASM_UM_PCI_H */
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index 2bbf28cf3aa9..8ec7cd46dd96 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -19,7 +19,6 @@
set_pmd(pmd, __pmd(_PAGE_TABLE + \
((unsigned long long)page_to_pfn(pte) << \
(unsigned long long) PAGE_SHIFT)))
-#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h
index 32106d31e4ab..8256ecc5b919 100644
--- a/arch/um/include/asm/pgtable-2level.h
+++ b/arch/um/include/asm/pgtable-2level.h
@@ -23,7 +23,6 @@
#define PTRS_PER_PTE 1024
#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
#define PTRS_PER_PGD 1024
-#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
index 7e6a4180db9d..cb896e6121c8 100644
--- a/arch/um/include/asm/pgtable-3level.h
+++ b/arch/um/include/asm/pgtable-3level.h
@@ -41,7 +41,6 @@
#endif
#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
@@ -84,7 +83,7 @@ static inline void pud_clear (pud_t *pud)
}
#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
-#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
+#define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & PAGE_MASK))
static inline unsigned long pte_pfn(pte_t pte)
{
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index afd9b267cf81..b5cf0ed116d9 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -16,6 +16,8 @@ struct task_struct;
#include <linux/prefetch.h>
+#include <asm/cpufeatures.h>
+
struct mm_struct;
struct thread_struct {
@@ -90,12 +92,18 @@ extern void start_thread(struct pt_regs *regs, unsigned long entry,
struct cpuinfo_um {
unsigned long loops_per_jiffy;
int ipi_pipe[2];
+ int cache_alignment;
+ union {
+ __u32 x86_capability[NCAPINTS + NBUGINTS];
+ unsigned long x86_capability_alignment;
+ };
};
extern struct cpuinfo_um boot_cpu_data;
#define cpu_data (&boot_cpu_data)
#define current_cpu_data boot_cpu_data
+#define cache_line_size() (boot_cpu_data.cache_alignment)
#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
extern unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index ff9c62828962..0422467bda5b 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -5,7 +5,7 @@
#include <linux/mm.h>
#include <asm/tlbflush.h>
-#include <asm-generic/cacheflush.h>
+#include <asm/cacheflush.h>
#include <asm-generic/tlb.h>
#endif
diff --git a/arch/um/include/asm/xor.h b/arch/um/include/asm/xor.h
index 36b33d62a35d..f512704a9ec7 100644
--- a/arch/um/include/asm/xor.h
+++ b/arch/um/include/asm/xor.h
@@ -1,7 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include <asm-generic/xor.h>
+#ifndef _ASM_UM_XOR_H
+#define _ASM_UM_XOR_H
+
+#ifdef CONFIG_64BIT
+#undef CONFIG_X86_32
+#else
+#define CONFIG_X86_32 1
+#endif
+
+#include <asm/cpufeature.h>
+#include <../../x86/include/asm/xor.h>
#include <linux/time-internal.h>
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+#undef XOR_SELECT_TEMPLATE
/* pick an arbitrary one - measuring isn't possible with inf-cpu */
#define XOR_SELECT_TEMPLATE(x) \
(time_travel_mode == TT_MODE_INFCPU ? &xor_block_8regs : NULL)
+#endif
+
+#endif
diff --git a/arch/um/include/linux/time-internal.h b/arch/um/include/linux/time-internal.h
index 759956ab0108..b22226634ff6 100644
--- a/arch/um/include/linux/time-internal.h
+++ b/arch/um/include/linux/time-internal.h
@@ -8,17 +8,11 @@
#define __TIMER_INTERNAL_H__
#include <linux/list.h>
#include <asm/bug.h>
+#include <shared/timetravel.h>
#define TIMER_MULTIPLIER 256
#define TIMER_MIN_DELTA 500
-enum time_travel_mode {
- TT_MODE_OFF,
- TT_MODE_BASIC,
- TT_MODE_INFCPU,
- TT_MODE_EXTERNAL,
-};
-
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
struct time_travel_event {
unsigned long long time;
@@ -27,8 +21,6 @@ struct time_travel_event {
bool pending, onstack;
};
-extern enum time_travel_mode time_travel_mode;
-
void time_travel_sleep(void);
static inline void
@@ -62,8 +54,6 @@ bool time_travel_del_event(struct time_travel_event *e);
struct time_travel_event {
};
-#define time_travel_mode TT_MODE_OFF
-
static inline void time_travel_sleep(void)
{
}
diff --git a/arch/um/include/linux/virtio-uml.h b/arch/um/include/linux/virtio-uml.h
new file mode 100644
index 000000000000..2f652fa90f04
--- /dev/null
+++ b/arch/um/include/linux/virtio-uml.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+
+#ifndef __VIRTIO_UML_H__
+#define __VIRTIO_UML_H__
+
+void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
+ bool no_vq_suspend);
+
+#endif /* __VIRTIO_UML_H__ */
diff --git a/arch/um/include/shared/irq_user.h b/arch/um/include/shared/irq_user.h
index 07239e801a5b..065829f443ae 100644
--- a/arch/um/include/shared/irq_user.h
+++ b/arch/um/include/shared/irq_user.h
@@ -17,6 +17,7 @@ enum um_irq_type {
struct siginfo;
extern void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
+void sigio_run_timetravel_handlers(void);
extern void free_irq_by_fd(int fd);
extern void deactivate_fd(int fd, int irqnum);
extern int deactivate_all_fds(void);
diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
index 2888ec812f6e..a2cfd42608a0 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -33,7 +33,6 @@ extern int handle_page_fault(unsigned long address, unsigned long ip,
int is_write, int is_user, int *code_out);
extern unsigned int do_IRQ(int irq, struct uml_pt_regs *regs);
-extern int smp_sigio_handler(void);
extern void initial_thread_cb(void (*proc)(void *), void *arg);
extern int is_syscall(unsigned long addr);
diff --git a/arch/um/include/shared/longjmp.h b/arch/um/include/shared/longjmp.h
index 85a1cc290ecb..bdb2869b72b3 100644
--- a/arch/um/include/shared/longjmp.h
+++ b/arch/um/include/shared/longjmp.h
@@ -5,6 +5,7 @@
#include <sysdep/archsetjmp.h>
#include <os.h>
+extern int signals_enabled;
extern int setjmp(jmp_buf);
extern void longjmp(jmp_buf, int);
@@ -12,13 +13,12 @@ extern void longjmp(jmp_buf, int);
longjmp(*buf, val); \
} while(0)
-#define UML_SETJMP(buf) ({ \
- int n; \
- volatile int enable; \
- enable = get_signals(); \
- n = setjmp(*buf); \
- if(n != 0) \
- set_signals_trace(enable); \
+#define UML_SETJMP(buf) ({ \
+ int n, enable; \
+ enable = *(volatile int *)&signals_enabled; \
+ n = setjmp(*buf); \
+ if(n != 0) \
+ set_signals_trace(enable); \
n; })
#endif
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index 13d86f94cf0f..60b84edc8a68 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -187,6 +187,9 @@ int os_poll(unsigned int n, const int *fds);
extern void os_early_checks(void);
extern void os_check_bugs(void);
extern void check_host_supports_tls(int *supports_tls, int *tls_min);
+extern void get_host_cpu_features(
+ void (*flags_helper_func)(char *line),
+ void (*cache_helper_func)(char *line));
/* mem.c */
extern int create_mem_file(unsigned long long len);
@@ -211,7 +214,6 @@ extern int os_protect_memory(void *addr, unsigned long len,
extern int os_unmap_memory(void *addr, int len);
extern int os_drop_memory(void *addr, int length);
extern int can_drop_memory(void);
-extern void os_flush_stdout(void);
extern int os_mincore(void *addr, unsigned long len);
/* execvp.c */
@@ -237,12 +239,14 @@ extern void send_sigio_to_self(void);
extern int change_sig(int signal, int on);
extern void block_signals(void);
extern void unblock_signals(void);
-extern int get_signals(void);
extern int set_signals(int enable);
extern int set_signals_trace(int enable);
extern int os_is_signal_stack(void);
extern void deliver_alarm(void);
extern void register_pm_wake_signal(void);
+extern void block_signals_hard(void);
+extern void unblock_signals_hard(void);
+extern void mark_sigio_pending(void);
/* util.c */
extern void stack_protections(unsigned long address);
diff --git a/arch/um/include/shared/timetravel.h b/arch/um/include/shared/timetravel.h
new file mode 100644
index 000000000000..e5c3d69f1b69
--- /dev/null
+++ b/arch/um/include/shared/timetravel.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2021 Intel Corporation
+ */
+#ifndef _UM_TIME_TRAVEL_H_
+#define _UM_TIME_TRAVEL_H_
+
+enum time_travel_mode {
+ TT_MODE_OFF,
+ TT_MODE_BASIC,
+ TT_MODE_INFCPU,
+ TT_MODE_EXTERNAL,
+};
+
+#if defined(UML_CONFIG_UML_TIME_TRAVEL_SUPPORT) || \
+ defined(CONFIG_UML_TIME_TRAVEL_SUPPORT)
+extern enum time_travel_mode time_travel_mode;
+#else
+#define time_travel_mode TT_MODE_OFF
+#endif /* (UML_)CONFIG_UML_TIME_TRAVEL_SUPPORT */
+
+#endif /* _UM_TIME_TRAVEL_H_ */
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index e698e0c7dbdc..1d18e4e46989 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -17,18 +17,19 @@ extra-y := vmlinux.lds
obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
physmem.o process.o ptrace.o reboot.o sigio.o \
signal.o syscall.o sysrq.o time.o tlb.o trap.o \
- um_arch.o umid.o maccess.o kmsg_dump.o skas/
+ um_arch.o umid.o maccess.o kmsg_dump.o capflags.o skas/
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
obj-$(CONFIG_GPROF) += gprof_syms.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_GENERIC_PCI_IOMAP) += ioport.o
USER_OBJS := config.o
include arch/um/scripts/Makefile.rules
-targets := config.c config.tmp
+targets := config.c config.tmp capflags.c
# Be careful with the below Sed code - sed is pitfall-rich!
# We use sed to lower build requirements, for "embedded" builders for instance.
@@ -43,6 +44,15 @@ quiet_cmd_quote1 = QUOTE $@
$(obj)/config.c: $(src)/config.c.in $(obj)/config.tmp FORCE
$(call if_changed,quote2)
+quiet_cmd_mkcapflags = MKCAP $@
+ cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/../../x86/kernel/cpu/mkcapflags.sh $@ $^
+
+cpufeature = $(src)/../../x86/include/asm/cpufeatures.h
+vmxfeature = $(src)/../../x86/include/asm/vmxfeatures.h
+
+$(obj)/capflags.c: $(cpufeature) $(vmxfeature) $(src)/../../x86/kernel/cpu/mkcapflags.sh FORCE
+ $(call if_changed,mkcapflags)
+
quiet_cmd_quote2 = QUOTE $@
cmd_quote2 = sed -e '/CONFIG/{' \
-e 's/"CONFIG"//' \
diff --git a/arch/um/kernel/ioport.c b/arch/um/kernel/ioport.c
new file mode 100644
index 000000000000..7220615b3beb
--- /dev/null
+++ b/arch/um/kernel/ioport.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <asm/iomap.h>
+#include <asm-generic/pci_iomap.h>
+
+void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
+ unsigned int nr)
+{
+ return NULL;
+}
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 82af5191e73d..a8873d9bc28b 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -56,7 +56,7 @@ struct irq_entry {
static DEFINE_SPINLOCK(irq_lock);
static LIST_HEAD(active_fds);
-static DECLARE_BITMAP(irqs_allocated, NR_IRQS);
+static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
static bool irqs_suspended;
static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
@@ -101,10 +101,12 @@ static bool irq_do_timetravel_handler(struct irq_entry *entry,
if (!reg->timetravel_handler)
return false;
- /* prevent nesting - we'll get it again later when we SIGIO ourselves */
- if (reg->pending_on_resume)
- return true;
-
+ /*
+ * Handle all messages - we might get multiple even while
+ * interrupts are already suspended, due to suspend order
+ * etc. Note that time_travel_add_irq_event() will not add
+ * an event twice, if it's pending already "first wins".
+ */
reg->timetravel_handler(reg->irq, entry->fd, reg->id, &reg->event);
if (!reg->event.pending)
@@ -123,7 +125,8 @@ static bool irq_do_timetravel_handler(struct irq_entry *entry,
#endif
static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
- struct uml_pt_regs *regs)
+ struct uml_pt_regs *regs,
+ bool timetravel_handlers_only)
{
struct irq_reg *reg = &entry->reg[t];
@@ -136,18 +139,29 @@ static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type
if (irq_do_timetravel_handler(entry, t))
return;
- if (irqs_suspended)
+ /*
+ * If we're called to only run time-travel handlers then don't
+ * actually proceed but mark sigio as pending (if applicable).
+ * For suspend/resume, timetravel_handlers_only may be true
+ * despite time-travel not being configured and used.
+ */
+ if (timetravel_handlers_only) {
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ mark_sigio_pending();
+#endif
return;
+ }
irq_io_loop(reg, regs);
}
-void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+static void _sigio_handler(struct uml_pt_regs *regs,
+ bool timetravel_handlers_only)
{
struct irq_entry *irq_entry;
int n, i;
- if (irqs_suspended && !um_irq_timetravel_handler_used())
+ if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
return;
while (1) {
@@ -172,14 +186,20 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
irq_entry = os_epoll_get_data_pointer(i);
for (t = 0; t < NUM_IRQ_TYPES; t++)
- sigio_reg_handler(i, irq_entry, t, regs);
+ sigio_reg_handler(i, irq_entry, t, regs,
+ timetravel_handlers_only);
}
}
- if (!irqs_suspended)
+ if (!timetravel_handlers_only)
free_irqs();
}
+void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+{
+ _sigio_handler(regs, irqs_suspended);
+}
+
static struct irq_entry *get_irq_entry_by_fd(int fd)
{
struct irq_entry *walk;
@@ -399,7 +419,8 @@ unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
void um_free_irq(int irq, void *dev)
{
- if (WARN(irq < 0 || irq > NR_IRQS, "freeing invalid irq %d", irq))
+ if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
+ "freeing invalid irq %d", irq))
return;
free_irq_by_irq_and_dev(irq, dev);
@@ -467,6 +488,11 @@ int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
devname, dev_id, timetravel_handler);
}
EXPORT_SYMBOL(um_request_irq_tt);
+
+void sigio_run_timetravel_handlers(void)
+{
+ _sigio_handler(NULL, true);
+}
#endif
#ifdef CONFIG_PM_SLEEP
@@ -623,7 +649,7 @@ void __init init_IRQ(void)
irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
- for (i = 1; i < NR_IRQS; i++)
+ for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
/* Initialize EPOLL Loop */
os_setup_epoll();
diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c
index 8ade54a86a7e..b1e5634398d0 100644
--- a/arch/um/kernel/ksyms.c
+++ b/arch/um/kernel/ksyms.c
@@ -7,7 +7,7 @@
#include <os.h>
EXPORT_SYMBOL(set_signals);
-EXPORT_SYMBOL(get_signals);
+EXPORT_SYMBOL(signals_enabled);
EXPORT_SYMBOL(os_stat_fd);
EXPORT_SYMBOL(os_stat_file);
diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c
index 592cdb138441..5afac0fef24e 100644
--- a/arch/um/kernel/skas/clone.c
+++ b/arch/um/kernel/skas/clone.c
@@ -29,7 +29,7 @@ stub_clone_handler(void)
long err;
err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD,
- (unsigned long)data + UM_KERN_PAGE_SIZE / 2 - sizeof(void *));
+ (unsigned long)data + UM_KERN_PAGE_SIZE / 2);
if (err) {
data->parent_err = err;
goto done;
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 2dec915abe6f..6c76df96e858 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -11,6 +11,7 @@
#include <asm/current.h>
#include <asm/page.h>
#include <kern_util.h>
+#include <asm/futex.h>
#include <os.h>
pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr)
@@ -248,3 +249,138 @@ long __strnlen_user(const void __user *str, long len)
return 0;
}
EXPORT_SYMBOL(__strnlen_user);
+
+/**
+ * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * argument and comparison of the previous
+ * futex value with another constant.
+ *
+ * @encoded_op: encoded operation to execute
+ * @uaddr: pointer to user space address
+ *
+ * Return:
+ * 0 - On success
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Operation not supported
+ */
+
+int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval, ret;
+ struct page *page;
+ unsigned long addr = (unsigned long) uaddr;
+ pte_t *pte;
+
+ ret = -EFAULT;
+ if (!access_ok(uaddr, sizeof(*uaddr)))
+ return -EFAULT;
+ preempt_disable();
+ pte = maybe_map(addr, 1);
+ if (pte == NULL)
+ goto out_inuser;
+
+ page = pte_page(*pte);
+#ifdef CONFIG_64BIT
+ pagefault_disable();
+ addr = (unsigned long) page_address(page) +
+ (((unsigned long) addr) & ~PAGE_MASK);
+#else
+ addr = (unsigned long) kmap_atomic(page) +
+ ((unsigned long) addr & ~PAGE_MASK);
+#endif
+ uaddr = (u32 *) addr;
+ oldval = *uaddr;
+
+ ret = 0;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ *uaddr = oparg;
+ break;
+ case FUTEX_OP_ADD:
+ *uaddr += oparg;
+ break;
+ case FUTEX_OP_OR:
+ *uaddr |= oparg;
+ break;
+ case FUTEX_OP_ANDN:
+ *uaddr &= ~oparg;
+ break;
+ case FUTEX_OP_XOR:
+ *uaddr ^= oparg;
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+#ifdef CONFIG_64BIT
+ pagefault_enable();
+#else
+ kunmap_atomic((void *)addr);
+#endif
+
+out_inuser:
+ preempt_enable();
+
+ if (ret == 0)
+ *oval = oldval;
+
+ return ret;
+}
+EXPORT_SYMBOL(arch_futex_atomic_op_inuser);
+
+/**
+ * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
+ * uaddr with newval if the current value is
+ * oldval.
+ * @uval: pointer to store content of @uaddr
+ * @uaddr: pointer to user space address
+ * @oldval: old value
+ * @newval: new value to store to @uaddr
+ *
+ * Return:
+ * 0 - On success
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
+ */
+
+int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ struct page *page;
+ pte_t *pte;
+ int ret = -EFAULT;
+
+ if (!access_ok(uaddr, sizeof(*uaddr)))
+ return -EFAULT;
+
+ preempt_disable();
+ pte = maybe_map((unsigned long) uaddr, 1);
+ if (pte == NULL)
+ goto out_inatomic;
+
+ page = pte_page(*pte);
+#ifdef CONFIG_64BIT
+ pagefault_disable();
+ uaddr = page_address(page) + (((unsigned long) uaddr) & ~PAGE_MASK);
+#else
+ uaddr = kmap_atomic(page) + ((unsigned long) uaddr & ~PAGE_MASK);
+#endif
+
+ *uval = *uaddr;
+
+ ret = cmpxchg(uaddr, oldval, newval);
+
+#ifdef CONFIG_64BIT
+ pagefault_enable();
+#else
+ kunmap_atomic(uaddr);
+#endif
+ ret = 0;
+
+out_inatomic:
+ preempt_enable();
+ return ret;
+}
+EXPORT_SYMBOL(futex_atomic_cmpxchg_inatomic);
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index e0cdb9694fb8..fddd1dec27e6 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -68,23 +68,15 @@ static void time_travel_handle_message(struct um_timetravel_msg *msg,
int ret;
/*
- * Poll outside the locked section (if we're not called to only read
- * the response) so we can get interrupts for e.g. virtio while we're
- * here, but then we need to lock to not get interrupted between the
- * read of the message and write of the ACK.
+ * We can't unlock here, but interrupt signals with a timetravel_handler
+ * (see um_request_irq_tt) get to the timetravel_handler anyway.
*/
if (mode != TTMH_READ) {
- bool disabled = irqs_disabled();
+ BUG_ON(mode == TTMH_IDLE && !irqs_disabled());
- BUG_ON(mode == TTMH_IDLE && !disabled);
-
- if (disabled)
- local_irq_enable();
while (os_poll(1, &time_travel_ext_fd) != 0) {
/* nothing */
}
- if (disabled)
- local_irq_disable();
}
ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
@@ -123,15 +115,15 @@ static u64 time_travel_ext_req(u32 op, u64 time)
.time = time,
.seq = mseq,
};
- unsigned long flags;
/*
- * We need to save interrupts here and only restore when we
- * got the ACK - otherwise we can get interrupted and send
- * another request while we're still waiting for an ACK, but
- * the peer doesn't know we got interrupted and will send
- * the ACKs in the same order as the message, but we'd need
- * to see them in the opposite order ...
+ * We need to block even the timetravel handlers of SIGIO here and
+ * only restore their use when we got the ACK - otherwise we may
+ * (will) get interrupted by that, try to queue the IRQ for future
+ * processing and thus send another request while we're still waiting
+ * for an ACK, but the peer doesn't know we got interrupted and will
+ * send the ACKs in the same order as the message, but we'd need to
+ * see them in the opposite order ...
*
* This wouldn't matter *too* much, but some ACKs carry the
* current time (for UM_TIMETRAVEL_GET) and getting another
@@ -140,7 +132,7 @@ static u64 time_travel_ext_req(u32 op, u64 time)
* The sequence number assignment that happens here lets us
* debug such message handling issues more easily.
*/
- local_irq_save(flags);
+ block_signals_hard();
os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
while (msg.op != UM_TIMETRAVEL_ACK)
@@ -152,7 +144,7 @@ static u64 time_travel_ext_req(u32 op, u64 time)
if (op == UM_TIMETRAVEL_GET)
time_travel_set_time(msg.time);
- local_irq_restore(flags);
+ unblock_signals_hard();
return msg.time;
}
@@ -352,9 +344,6 @@ void deliver_time_travel_irqs(void)
while ((e = list_first_entry_or_null(&time_travel_irqs,
struct time_travel_event,
list))) {
- WARN(e->time != time_travel_time,
- "time moved from %lld to %lld before IRQ delivery\n",
- time_travel_time, e->time);
list_del(&e->list);
e->pending = false;
e->fn(e);
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 74e07e748a9b..a149a5e9a16a 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -6,7 +6,9 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/mm.h>
+#include <linux/ctype.h>
#include <linux/module.h>
+#include <linux/panic_notifier.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/utsname.h>
@@ -16,6 +18,7 @@
#include <linux/suspend.h>
#include <asm/processor.h>
+#include <asm/cpufeature.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <as-layout.h>
@@ -50,9 +53,13 @@ static void __init add_arg(char *arg)
*/
struct cpuinfo_um boot_cpu_data = {
.loops_per_jiffy = 0,
- .ipi_pipe = { -1, -1 }
+ .ipi_pipe = { -1, -1 },
+ .cache_alignment = L1_CACHE_BYTES,
+ .x86_capability = { 0 }
};
+EXPORT_SYMBOL(boot_cpu_data);
+
union thread_union cpu0_irqstack
__section(".data..init_irqstack") =
{ .thread_info = INIT_THREAD_INFO(init_task) };
@@ -62,17 +69,25 @@ static char host_info[(__NEW_UTS_LEN + 1) * 5];
static int show_cpuinfo(struct seq_file *m, void *v)
{
- int index = 0;
+ int i = 0;
- seq_printf(m, "processor\t: %d\n", index);
+ seq_printf(m, "processor\t: %d\n", i);
seq_printf(m, "vendor_id\t: User Mode Linux\n");
seq_printf(m, "model name\t: UML\n");
seq_printf(m, "mode\t\t: skas\n");
seq_printf(m, "host\t\t: %s\n", host_info);
- seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
+ seq_printf(m, "fpu\t\t: %s\n", cpu_has(&boot_cpu_data, X86_FEATURE_FPU) ? "yes" : "no");
+ seq_printf(m, "flags\t\t:");
+ for (i = 0; i < 32*NCAPINTS; i++)
+ if (cpu_has(&boot_cpu_data, i) && (x86_cap_flags[i] != NULL))
+ seq_printf(m, " %s", x86_cap_flags[i]);
+ seq_printf(m, "\n");
+ seq_printf(m, "cache_alignment\t: %d\n", boot_cpu_data.cache_alignment);
+ seq_printf(m, "bogomips\t: %lu.%02lu\n",
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100);
+
return 0;
}
@@ -261,6 +276,30 @@ EXPORT_SYMBOL(end_iomem);
#define MIN_VMALLOC (32 * 1024 * 1024)
+static void parse_host_cpu_flags(char *line)
+{
+ int i;
+ for (i = 0; i < 32*NCAPINTS; i++) {
+ if ((x86_cap_flags[i] != NULL) && strstr(line, x86_cap_flags[i]))
+ set_cpu_cap(&boot_cpu_data, i);
+ }
+}
+static void parse_cache_line(char *line)
+{
+ long res;
+ char *to_parse = strstr(line, ":");
+ if (to_parse) {
+ to_parse++;
+ while (*to_parse != 0 && isspace(*to_parse)) {
+ to_parse++;
+ }
+ if (kstrtoul(to_parse, 10, &res) == 0 && is_power_of_2(res))
+ boot_cpu_data.cache_alignment = res;
+ else
+ boot_cpu_data.cache_alignment = L1_CACHE_BYTES;
+ }
+}
+
int __init linux_main(int argc, char **argv)
{
unsigned long avail, diff;
@@ -297,6 +336,8 @@ int __init linux_main(int argc, char **argv)
/* OS sanity checks that need to happen before the kernel runs */
os_early_checks();
+ get_host_cpu_features(parse_host_cpu_flags, parse_cache_line);
+
brk_start = (unsigned long) sbrk(0);
/*
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index 9fa6e4187d4f..32e88baf18dd 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -64,7 +64,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
goto out_close;
}
- sp = stack + UM_KERN_PAGE_SIZE - sizeof(void *);
+ sp = stack + UM_KERN_PAGE_SIZE;
data.pre_exec = pre_exec;
data.pre_data = pre_data;
data.argv = argv;
@@ -120,7 +120,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
if (stack == 0)
return -ENOMEM;
- sp = stack + UM_KERN_PAGE_SIZE - sizeof(void *);
+ sp = stack + UM_KERN_PAGE_SIZE;
pid = clone(proc, (void *) sp, flags, arg);
if (pid < 0) {
err = -errno;
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 96f511d1aabe..6de99bb16113 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -18,6 +18,7 @@
#include <sysdep/mcontext.h>
#include <um_malloc.h>
#include <sys/ucontext.h>
+#include <timetravel.h>
void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
[SIGTRAP] = relay_signal,
@@ -62,17 +63,30 @@ static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
#define SIGALRM_BIT 1
#define SIGALRM_MASK (1 << SIGALRM_BIT)
-static int signals_enabled;
+int signals_enabled;
+#ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
+static int signals_blocked;
+#else
+#define signals_blocked false
+#endif
static unsigned int signals_pending;
static unsigned int signals_active = 0;
void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
{
- int enabled;
+ int enabled = signals_enabled;
- enabled = signals_enabled;
- if (!enabled && (sig == SIGIO)) {
- signals_pending |= SIGIO_MASK;
+ if ((signals_blocked || !enabled) && (sig == SIGIO)) {
+ /*
+ * In TT_MODE_EXTERNAL, need to still call time-travel
+ * handlers unless signals are also blocked for the
+ * external time message processing. This will mark
+ * signals_pending by itself (only if necessary.)
+ */
+ if (!signals_blocked && time_travel_mode == TT_MODE_EXTERNAL)
+ sigio_run_timetravel_handlers();
+ else
+ signals_pending |= SIGIO_MASK;
return;
}
@@ -129,7 +143,7 @@ void set_sigstack(void *sig_stack, int size)
stack_t stack = {
.ss_flags = 0,
.ss_sp = sig_stack,
- .ss_size = size - sizeof(void *)
+ .ss_size = size
};
if (sigaltstack(&stack, NULL) != 0)
@@ -334,11 +348,6 @@ void unblock_signals(void)
}
}
-int get_signals(void)
-{
- return signals_enabled;
-}
-
int set_signals(int enable)
{
int ret;
@@ -368,6 +377,39 @@ int set_signals_trace(int enable)
return ret;
}
+#ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
+void mark_sigio_pending(void)
+{
+ signals_pending |= SIGIO_MASK;
+}
+
+void block_signals_hard(void)
+{
+ if (signals_blocked)
+ return;
+ signals_blocked = 1;
+ barrier();
+}
+
+void unblock_signals_hard(void)
+{
+ if (!signals_blocked)
+ return;
+ /* Must be set to 0 before we check the pending bits etc. */
+ signals_blocked = 0;
+ barrier();
+
+ if (signals_pending && signals_enabled) {
+ /* this is a bit inefficient, but that's not really important */
+ block_signals();
+ unblock_signals();
+ } else if (signals_pending & SIGIO_MASK) {
+ /* we need to run time-travel handlers even if not enabled */
+ sigio_run_timetravel_handlers();
+ }
+}
+#endif
+
int os_is_signal_stack(void)
{
stack_t ss;
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index fba674fac8b7..87d3129e7362 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -327,7 +327,7 @@ int start_userspace(unsigned long stub_stack)
}
/* set stack pointer to the end of the stack page, so it can grow downwards */
- sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
+ sp = (unsigned long)stack + UM_KERN_PAGE_SIZE;
flags = CLONE_FILES | SIGCHLD;
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index f79dc338279e..8a72c99994eb 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -321,6 +321,38 @@ static void __init check_coredump_limit(void)
os_info("%llu\n", (unsigned long long)lim.rlim_max);
}
+void __init get_host_cpu_features(
+ void (*flags_helper_func)(char *line),
+ void (*cache_helper_func)(char *line))
+{
+ FILE *cpuinfo;
+ char *line = NULL;
+ size_t len = 0;
+ int done_parsing = 0;
+
+ cpuinfo = fopen("/proc/cpuinfo", "r");
+ if (cpuinfo == NULL) {
+ os_info("Failed to get host CPU features\n");
+ } else {
+ while ((getline(&line, &len, cpuinfo)) != -1) {
+ if (strstr(line, "flags")) {
+ flags_helper_func(line);
+ done_parsing++;
+ }
+ if (strstr(line, "cache_alignment")) {
+ cache_helper_func(line);
+ done_parsing++;
+ }
+ free(line);
+ line = NULL;
+ if (done_parsing > 1)
+ break;
+ }
+ fclose(cpuinfo);
+ }
+}
+
+
void __init os_early_checks(void)
{
int pid;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 867e7936dbc5..88fb922c23a0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -33,6 +33,7 @@ config X86_64
select NEED_DMA_MAP_STATE
select SWIOTLB
select ARCH_HAS_ELFCORE_COMPAT
+ select ZONE_DMA32
config FORCE_DYNAMIC_FTRACE
def_bool y
@@ -63,7 +64,7 @@ config X86
select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64 || (X86_32 && HIGHMEM)
select ARCH_ENABLE_MEMORY_HOTREMOVE if MEMORY_HOTPLUG
- select ARCH_ENABLE_SPLIT_PMD_PTLOCK if X86_64 || X86_PAE
+ select ARCH_ENABLE_SPLIT_PMD_PTLOCK if (PGTABLE_LEVELS > 2) && (X86_64 || X86_PAE)
select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CACHE_LINE_SIZE
@@ -93,6 +94,7 @@ config X86
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_DEBUG_WX
+ select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT
@@ -200,7 +202,6 @@ config X86
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT
- select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
select HAVE_IRQ_TIME_ACCOUNTING
@@ -344,9 +345,6 @@ config ARCH_SUSPEND_POSSIBLE
config ARCH_WANT_GENERAL_HUGETLB
def_bool y
-config ZONE_DMA32
- def_bool y if X86_64
-
config AUDIT_ARCH
def_bool y if X86_64
@@ -394,16 +392,6 @@ config CC_HAS_SANE_STACKPROTECTOR
menu "Processor type and features"
-config ZONE_DMA
- bool "DMA memory allocation support" if EXPERT
- default y
- help
- DMA memory allocation support allows devices with less than 32-bit
- addressing to allocate within the first 16MB of address space.
- Disable if no such devices will be used.
-
- If unsure, say Y.
-
config SMP
bool "Symmetric multi-processing support"
help
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 53eceaf71ab7..307fd0000a83 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -240,9 +240,6 @@ head-y += arch/x86/kernel/platform-quirks.o
libs-y += arch/x86/lib/
-# See arch/x86/Kbuild for content of core part of the kernel
-core-y += arch/x86/
-
# drivers-y are linked after core-y
drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/
drivers-$(CONFIG_PCI) += arch/x86/pci/
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 1db7913795f5..b3c1ae084180 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -44,7 +44,7 @@ ELF_FORMAT := elf64-x86-64
# Not on all 64-bit distros /lib is a symlink to /lib64. PLD is an example.
-LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64
+LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib64
LINK-y += -m64
endif
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index fba2f615119a..ce763a12311c 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -451,3 +451,4 @@
444 i386 landlock_create_ruleset sys_landlock_create_ruleset
445 i386 landlock_add_rule sys_landlock_add_rule
446 i386 landlock_restrict_self sys_landlock_restrict_self
+447 i386 memfd_secret sys_memfd_secret
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index af973e400053..f6b57799c1ea 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -368,6 +368,7 @@
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+447 common memfd_secret sys_memfd_secret
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 1eb45139fcc6..3092fbf9dbe4 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2489,13 +2489,15 @@ void perf_clear_dirty_counters(void)
return;
for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
- /* Metrics and fake events don't have corresponding HW counters. */
- if (is_metric_idx(i) || (i == INTEL_PMC_IDX_FIXED_VLBR))
- continue;
- else if (i >= INTEL_PMC_IDX_FIXED)
+ if (i >= INTEL_PMC_IDX_FIXED) {
+ /* Metrics and fake events don't have corresponding HW counters. */
+ if ((i - INTEL_PMC_IDX_FIXED) >= hybrid(cpuc->pmu, num_counters_fixed))
+ continue;
+
wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
- else
+ } else {
wrmsrl(x86_pmu_event_addr(i), 0);
+ }
}
bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index fca7a6e2242f..ac6fd2dabf6a 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2904,24 +2904,28 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
*/
static int intel_pmu_handle_irq(struct pt_regs *regs)
{
- struct cpu_hw_events *cpuc;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
+ bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
int loops;
u64 status;
int handled;
int pmu_enabled;
- cpuc = this_cpu_ptr(&cpu_hw_events);
-
/*
* Save the PMU state.
* It needs to be restored when leaving the handler.
*/
pmu_enabled = cpuc->enabled;
/*
- * No known reason to not always do late ACK,
- * but just in case do it opt-in.
+ * In general, the early ACK is only applied for old platforms.
+ * For the big core starts from Haswell, the late ACK should be
+ * applied.
+ * For the small core after Tremont, we have to do the ACK right
+ * before re-enabling counters, which is in the middle of the
+ * NMI handler.
*/
- if (!x86_pmu.late_ack)
+ if (!late_ack && !mid_ack)
apic_write(APIC_LVTPC, APIC_DM_NMI);
intel_bts_disable_local();
cpuc->enabled = 0;
@@ -2958,6 +2962,8 @@ again:
goto again;
done:
+ if (mid_ack)
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
/* Only restore PMU state when it's active. See x86_pmu_disable(). */
cpuc->enabled = pmu_enabled;
if (pmu_enabled)
@@ -2969,7 +2975,7 @@ done:
* have been reset. This avoids spurious NMIs on
* Haswell CPUs.
*/
- if (x86_pmu.late_ack)
+ if (late_ack)
apic_write(APIC_LVTPC, APIC_DM_NMI);
return handled;
}
@@ -6129,7 +6135,6 @@ __init int intel_pmu_init(void)
static_branch_enable(&perf_is_hybrid);
x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS;
- x86_pmu.late_ack = true;
x86_pmu.pebs_aliases = NULL;
x86_pmu.pebs_prec_dist = true;
x86_pmu.pebs_block = true;
@@ -6167,6 +6172,7 @@ __init int intel_pmu_init(void)
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
pmu->name = "cpu_core";
pmu->cpu_type = hybrid_big;
+ pmu->late_ack = true;
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
pmu->num_counters = x86_pmu.num_counters + 2;
pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
@@ -6192,6 +6198,7 @@ __init int intel_pmu_init(void)
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
pmu->name = "cpu_atom";
pmu->cpu_type = hybrid_small;
+ pmu->mid_ack = true;
pmu->num_counters = x86_pmu.num_counters;
pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
pmu->max_pebs_events = x86_pmu.max_pebs_events;
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 433399069e27..c6262b154c3a 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -40,7 +40,7 @@
* Model specific counters:
* MSR_CORE_C1_RES: CORE C1 Residency Counter
* perf code: 0x00
- * Available model: SLM,AMT,GLM,CNL,TNT,ADL
+ * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL
* Scope: Core (each processor core has a MSR)
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
* perf code: 0x01
@@ -50,8 +50,8 @@
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
- * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
- * TNT,RKL,ADL
+ * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
+ * TGL,TNT,RKL,ADL
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
@@ -61,7 +61,7 @@
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
- * KBL,CML,ICL,TGL,TNT,RKL,ADL
+ * KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
@@ -72,8 +72,8 @@
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
- * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
- * TNT,RKL,ADL
+ * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
+ * TGL,TNT,RKL,ADL
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
@@ -566,6 +566,14 @@ static const struct cstate_model icl_cstates __initconst = {
BIT(PERF_CSTATE_PKG_C10_RES),
};
+static const struct cstate_model icx_cstates __initconst = {
+ .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
+ BIT(PERF_CSTATE_CORE_C6_RES),
+
+ .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
+ BIT(PERF_CSTATE_PKG_C6_RES),
+};
+
static const struct cstate_model adl_cstates __initconst = {
.core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
BIT(PERF_CSTATE_CORE_C6_RES) |
@@ -664,6 +672,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates),
+
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates),
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index e8453de7a964..9e6d6eaeb4cb 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -491,7 +491,7 @@ static void intel_pmu_arch_lbr_xrstors(void *ctx)
{
struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
- copy_kernel_to_dynamic_supervisor(&task_ctx->xsave, XFEATURE_MASK_LBR);
+ xrstors(&task_ctx->xsave, XFEATURE_MASK_LBR);
}
static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
@@ -576,7 +576,7 @@ static void intel_pmu_arch_lbr_xsaves(void *ctx)
{
struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
- copy_dynamic_supervisor_to_kernel(&task_ctx->xsave, XFEATURE_MASK_LBR);
+ xsaves(&task_ctx->xsave, XFEATURE_MASK_LBR);
}
static void __intel_pmu_lbr_save(void *ctx)
@@ -993,7 +993,7 @@ static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc)
intel_pmu_store_lbr(cpuc, NULL);
return;
}
- copy_dynamic_supervisor_to_kernel(&xsave->xsave, XFEATURE_MASK_LBR);
+ xsaves(&xsave->xsave, XFEATURE_MASK_LBR);
intel_pmu_store_lbr(cpuc, xsave->lbr.entries);
}
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index bb6eb1e5569c..609c24aec71a 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3804,11 +3804,11 @@ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
/* One more for NULL. */
attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
if (!attrs)
- goto err;
+ goto clear_topology;
eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
if (!eas)
- goto err;
+ goto clear_attrs;
for (die = 0; die < uncore_max_dies(); die++) {
sprintf(buf, "die%ld", die);
@@ -3829,7 +3829,9 @@ err:
for (; die >= 0; die--)
kfree(eas[die].attr.attr.name);
kfree(eas);
+clear_attrs:
kfree(attrs);
+clear_topology:
kfree(type->topology);
clear_attr_update:
type->attr_update = NULL;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 2bf1c7ea2758..e3ac05c97b5e 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -656,6 +656,10 @@ struct x86_hybrid_pmu {
struct event_constraint *event_constraints;
struct event_constraint *pebs_constraints;
struct extra_reg *extra_regs;
+
+ unsigned int late_ack :1,
+ mid_ack :1,
+ enabled_ack :1;
};
static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
@@ -686,6 +690,16 @@ extern struct static_key_false perf_is_hybrid;
__Fp; \
}))
+#define hybrid_bit(_pmu, _field) \
+({ \
+ bool __Fp = x86_pmu._field; \
+ \
+ if (is_hybrid() && (_pmu)) \
+ __Fp = hybrid_pmu(_pmu)->_field; \
+ \
+ __Fp; \
+})
+
enum hybrid_pmu_type {
hybrid_big = 0x40,
hybrid_small = 0x20,
@@ -755,6 +769,7 @@ struct x86_pmu {
/* PMI handler bits */
unsigned int late_ack :1,
+ mid_ack :1,
enabled_ack :1;
/*
* sysfs attrs
@@ -1115,9 +1130,10 @@ void x86_pmu_stop(struct perf_event *event, int flags);
static inline void x86_pmu_disable_event(struct perf_event *event)
{
+ u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base, hwc->config);
+ wrmsrl(hwc->config_base, hwc->config & ~disable_mask);
if (is_counter_pair(hwc))
wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index e63cf582201f..ab97b22ac04a 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -9,6 +9,7 @@
#include <asm/irq_vectors.h>
#include <asm/cpu_entry_area.h>
+#include <linux/debug_locks.h>
#include <linux/smp.h>
#include <linux/percpu.h>
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 7d7500806af8..29fea180a665 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -312,6 +312,7 @@ do { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
} \
+ NEW_AUX_ENT(AT_MINSIGSTKSZ, get_sigframe_size()); \
} while (0)
/*
@@ -328,6 +329,7 @@ extern unsigned long task_size_32bit(void);
extern unsigned long task_size_64bit(int full_addr_space);
extern unsigned long get_mmap_base(int is_legacy);
extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
+extern unsigned long get_sigframe_size(void);
#ifdef CONFIG_X86_32
@@ -349,6 +351,7 @@ do { \
if (vdso64_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long __force)current->mm->context.vdso); \
+ NEW_AUX_ENT(AT_MINSIGSTKSZ, get_sigframe_size()); \
} while (0)
/* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
@@ -357,6 +360,7 @@ do { \
if (vdso64_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long __force)current->mm->context.vdso); \
+ NEW_AUX_ENT(AT_MINSIGSTKSZ, get_sigframe_size()); \
} while (0)
#define AT_SYSINFO 32
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 16bf4d4a8159..5a18694a89b2 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -26,16 +26,17 @@
/*
* High level FPU state handling functions:
*/
-extern void fpu__prepare_read(struct fpu *fpu);
-extern void fpu__prepare_write(struct fpu *fpu);
-extern void fpu__save(struct fpu *fpu);
extern int fpu__restore_sig(void __user *buf, int ia32_frame);
extern void fpu__drop(struct fpu *fpu);
-extern int fpu__copy(struct task_struct *dst, struct task_struct *src);
extern void fpu__clear_user_states(struct fpu *fpu);
-extern void fpu__clear_all(struct fpu *fpu);
extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
+extern void fpu_sync_fpstate(struct fpu *fpu);
+
+/* Clone and exit operations */
+extern int fpu_clone(struct task_struct *dst);
+extern void fpu_flush_thread(void);
+
/*
* Boot time FPU initialization functions:
*/
@@ -45,7 +46,6 @@ extern void fpu__init_cpu_xstate(void);
extern void fpu__init_system(struct cpuinfo_x86 *c);
extern void fpu__init_check_bugs(void);
extern void fpu__resume_cpu(void);
-extern u64 fpu__get_supported_xfeatures_mask(void);
/*
* Debugging facility:
@@ -86,23 +86,9 @@ extern void fpstate_init_soft(struct swregs_state *soft);
#else
static inline void fpstate_init_soft(struct swregs_state *soft) {}
#endif
+extern void save_fpregs_to_fpstate(struct fpu *fpu);
-static inline void fpstate_init_xstate(struct xregs_state *xsave)
-{
- /*
- * XRSTORS requires these bits set in xcomp_bv, or it will
- * trigger #GP:
- */
- xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all;
-}
-
-static inline void fpstate_init_fxstate(struct fxregs_state *fx)
-{
- fx->cwd = 0x37f;
- fx->mxcsr = MXCSR_DEFAULT;
-}
-extern void fpstate_sanitize_xstate(struct fpu *fpu);
-
+/* Returns 0 or the negated trap number, which results in -EFAULT for #PF */
#define user_insn(insn, output, input...) \
({ \
int err; \
@@ -110,14 +96,14 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
might_fault(); \
\
asm volatile(ASM_STAC "\n" \
- "1:" #insn "\n\t" \
+ "1: " #insn "\n" \
"2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
- "3: movl $-1,%[err]\n" \
+ "3: negl %%eax\n" \
" jmp 2b\n" \
".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
- : [err] "=r" (err), output \
+ _ASM_EXTABLE_FAULT(1b, 3b) \
+ : [err] "=a" (err), output \
: "0"(0), input); \
err; \
})
@@ -143,12 +129,12 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
: output : input)
-static inline int copy_fregs_to_user(struct fregs_state __user *fx)
+static inline int fnsave_to_user_sigframe(struct fregs_state __user *fx)
{
return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
}
-static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
+static inline int fxsave_to_user_sigframe(struct fxregs_state __user *fx)
{
if (IS_ENABLED(CONFIG_X86_32))
return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
@@ -157,7 +143,7 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
}
-static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
+static inline void fxrstor(struct fxregs_state *fx)
{
if (IS_ENABLED(CONFIG_X86_32))
kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
@@ -165,7 +151,7 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
}
-static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
+static inline int fxrstor_safe(struct fxregs_state *fx)
{
if (IS_ENABLED(CONFIG_X86_32))
return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
@@ -173,7 +159,7 @@ static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
}
-static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
+static inline int fxrstor_from_user_sigframe(struct fxregs_state __user *fx)
{
if (IS_ENABLED(CONFIG_X86_32))
return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
@@ -181,29 +167,21 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
}
-static inline void copy_kernel_to_fregs(struct fregs_state *fx)
+static inline void frstor(struct fregs_state *fx)
{
kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
-static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
+static inline int frstor_safe(struct fregs_state *fx)
{
return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
-static inline int copy_user_to_fregs(struct fregs_state __user *fx)
+static inline int frstor_from_user_sigframe(struct fregs_state __user *fx)
{
return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
-static inline void copy_fxregs_to_kernel(struct fpu *fpu)
-{
- if (IS_ENABLED(CONFIG_X86_32))
- asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
- else
- asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
-}
-
static inline void fxsave(struct fxregs_state *fx)
{
if (IS_ENABLED(CONFIG_X86_32))
@@ -219,16 +197,20 @@ static inline void fxsave(struct fxregs_state *fx)
#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
+/*
+ * After this @err contains 0 on success or the negated trap number when
+ * the operation raises an exception. For faults this results in -EFAULT.
+ */
#define XSTATE_OP(op, st, lmask, hmask, err) \
asm volatile("1:" op "\n\t" \
"xor %[err], %[err]\n" \
"2:\n\t" \
".pushsection .fixup,\"ax\"\n\t" \
- "3: movl $-2,%[err]\n\t" \
+ "3: negl %%eax\n\t" \
"jmp 2b\n\t" \
".popsection\n\t" \
- _ASM_EXTABLE(1b, 3b) \
- : [err] "=r" (err) \
+ _ASM_EXTABLE_FAULT(1b, 3b) \
+ : [err] "=a" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
@@ -280,9 +262,9 @@ static inline void fxsave(struct fxregs_state *fx)
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
-static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
+static inline void os_xrstor_booting(struct xregs_state *xstate)
{
- u64 mask = -1;
+ u64 mask = xfeatures_mask_fpstate();
u32 lmask = mask;
u32 hmask = mask >> 32;
int err;
@@ -303,8 +285,11 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
/*
* Save processor xstate to xsave area.
+ *
+ * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features
+ * and command line options. The choice is permanent until the next reboot.
*/
-static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
+static inline void os_xsave(struct xregs_state *xstate)
{
u64 mask = xfeatures_mask_all;
u32 lmask = mask;
@@ -321,8 +306,10 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
/*
* Restore processor xstate from xsave area.
+ *
+ * Uses XRSTORS when XSAVES is used, XRSTOR otherwise.
*/
-static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
+static inline void os_xrstor(struct xregs_state *xstate, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
@@ -340,9 +327,14 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
* backward compatibility for old applications which don't understand
* compacted format of xsave area.
*/
-static inline int copy_xregs_to_user(struct xregs_state __user *buf)
+static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
{
- u64 mask = xfeatures_mask_user();
+ /*
+ * Include the features which are not xsaved/rstored by the kernel
+ * internally, e.g. PKRU. That's user space ABI and also required
+ * to allow the signal handler to modify PKRU.
+ */
+ u64 mask = xfeatures_mask_uabi();
u32 lmask = mask;
u32 hmask = mask >> 32;
int err;
@@ -365,7 +357,7 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
/*
* Restore xstate from user space xsave area.
*/
-static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
+static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask)
{
struct xregs_state *xstate = ((__force struct xregs_state *)buf);
u32 lmask = mask;
@@ -383,13 +375,13 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
* Restore xstate from kernel space xsave area, return an error code instead of
* an exception.
*/
-static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
+static inline int os_xrstor_safe(struct xregs_state *xstate, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
int err;
- if (static_cpu_has(X86_FEATURE_XSAVES))
+ if (cpu_feature_enabled(X86_FEATURE_XSAVES))
XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
else
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
@@ -397,36 +389,11 @@ static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
return err;
}
-extern int copy_fpregs_to_fpstate(struct fpu *fpu);
+extern void __restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask);
-static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
+static inline void restore_fpregs_from_fpstate(union fpregs_state *fpstate)
{
- if (use_xsave()) {
- copy_kernel_to_xregs(&fpstate->xsave, mask);
- } else {
- if (use_fxsr())
- copy_kernel_to_fxregs(&fpstate->fxsave);
- else
- copy_kernel_to_fregs(&fpstate->fsave);
- }
-}
-
-static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
-{
- /*
- * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
- * pending. Clear the x87 state here by setting it to fixed values.
- * "m" is a random variable that should be in L1.
- */
- if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
- asm volatile(
- "fnclex\n\t"
- "emms\n\t"
- "fildl %P[addr]" /* set F?P to defined value */
- : : [addr] "m" (fpstate));
- }
-
- __copy_kernel_to_fpregs(fpstate, -1);
+ __restore_fpregs_from_fpstate(fpstate, xfeatures_mask_fpstate());
}
extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
@@ -485,10 +452,8 @@ static inline void fpregs_activate(struct fpu *fpu)
trace_x86_fpu_regs_activated(fpu);
}
-/*
- * Internal helper, do not use directly. Use switch_fpu_return() instead.
- */
-static inline void __fpregs_load_activate(void)
+/* Internal helper for switch_fpu_return() and signal frame setup */
+static inline void fpregs_restore_userregs(void)
{
struct fpu *fpu = &current->thread.fpu;
int cpu = smp_processor_id();
@@ -497,7 +462,21 @@ static inline void __fpregs_load_activate(void)
return;
if (!fpregs_state_valid(fpu, cpu)) {
- copy_kernel_to_fpregs(&fpu->state);
+ u64 mask;
+
+ /*
+ * This restores _all_ xstate which has not been
+ * established yet.
+ *
+ * If PKRU is enabled, then the PKRU value is already
+ * correct because it was either set in switch_to() or in
+ * flush_thread(). So it is excluded because it might be
+ * not up to date in current->thread.fpu.xsave state.
+ */
+ mask = xfeatures_mask_restore_user() |
+ xfeatures_mask_supervisor();
+ __restore_fpregs_from_fpstate(&fpu->state, mask);
+
fpregs_activate(fpu);
fpu->last_cpu = cpu;
}
@@ -529,12 +508,17 @@ static inline void __fpregs_load_activate(void)
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
- if (!copy_fpregs_to_fpstate(old_fpu))
- old_fpu->last_cpu = -1;
- else
- old_fpu->last_cpu = cpu;
+ save_fpregs_to_fpstate(old_fpu);
+ /*
+ * The save operation preserved register state, so the
+ * fpu_fpregs_owner_ctx is still @old_fpu. Store the
+ * current CPU number in @old_fpu, so the next return
+ * to user space can avoid the FPU register restore
+ * when is returns on the same CPU and still owns the
+ * context.
+ */
+ old_fpu->last_cpu = cpu;
- /* But leave fpu_fpregs_owner_ctx! */
trace_x86_fpu_regs_deactivated(old_fpu);
}
}
@@ -544,39 +528,13 @@ static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
*/
/*
- * Load PKRU from the FPU context if available. Delay loading of the
- * complete FPU state until the return to userland.
+ * Delay loading of the complete FPU state until the return to userland.
+ * PKRU is handled separately.
*/
static inline void switch_fpu_finish(struct fpu *new_fpu)
{
- u32 pkru_val = init_pkru_value;
- struct pkru_state *pk;
-
- if (!static_cpu_has(X86_FEATURE_FPU))
- return;
-
- set_thread_flag(TIF_NEED_FPU_LOAD);
-
- if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
- return;
-
- /*
- * PKRU state is switched eagerly because it needs to be valid before we
- * return to userland e.g. for a copy_to_user() operation.
- */
- if (!(current->flags & PF_KTHREAD)) {
- /*
- * If the PKRU bit in xsave.header.xfeatures is not set,
- * then the PKRU component was in init state, which means
- * XRSTOR will set PKRU to 0. If the bit is not set then
- * get_xsave_addr() will return NULL because the PKRU value
- * in memory is not valid. This means pkru_val has to be
- * set to 0 and not to init_pkru_value.
- */
- pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
- pkru_val = pk ? pk->pkru : 0;
- }
- __write_pkru(pkru_val);
+ if (cpu_feature_enabled(X86_FEATURE_FPU))
+ set_thread_flag(TIF_NEED_FPU_LOAD);
}
#endif /* _ASM_X86_FPU_INTERNAL_H */
diff --git a/arch/x86/include/asm/fpu/signal.h b/arch/x86/include/asm/fpu/signal.h
index 7fb516b6893a..8b6631dffefd 100644
--- a/arch/x86/include/asm/fpu/signal.h
+++ b/arch/x86/include/asm/fpu/signal.h
@@ -29,6 +29,8 @@ unsigned long
fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
unsigned long *buf_fx, unsigned long *size);
+unsigned long fpu__get_fpstate_size(void);
+
extern void fpu__init_prepare_fx_sw_frame(void);
#endif /* _ASM_X86_FPU_SIGNAL_H */
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 47a92232d595..109dfcc75299 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <asm/processor.h>
+#include <asm/fpu/api.h>
#include <asm/user.h>
/* Bit 63 of XCR0 is reserved for future expansion */
@@ -34,6 +35,14 @@
XFEATURE_MASK_BNDREGS | \
XFEATURE_MASK_BNDCSR)
+/*
+ * Features which are restored when returning to user space.
+ * PKRU is not restored on return to user space because PKRU
+ * is switched eagerly in switch_to() and flush_thread()
+ */
+#define XFEATURE_MASK_USER_RESTORE \
+ (XFEATURE_MASK_USER_SUPPORTED & ~XFEATURE_MASK_PKRU)
+
/* All currently supported supervisor features */
#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (XFEATURE_MASK_PASID)
@@ -42,21 +51,21 @@
* and its size may be huge. Saving/restoring such supervisor state components
* at each context switch can cause high CPU and space overhead, which should
* be avoided. Such supervisor state components should only be saved/restored
- * on demand. The on-demand dynamic supervisor features are set in this mask.
+ * on demand. The on-demand supervisor features are set in this mask.
*
- * Unlike the existing supported supervisor features, a dynamic supervisor
+ * Unlike the existing supported supervisor features, an independent supervisor
* feature does not allocate a buffer in task->fpu, and the corresponding
* supervisor state component cannot be saved/restored at each context switch.
*
- * To support a dynamic supervisor feature, a developer should follow the
+ * To support an independent supervisor feature, a developer should follow the
* dos and don'ts as below:
* - Do dynamically allocate a buffer for the supervisor state component.
* - Do manually invoke the XSAVES/XRSTORS instruction to save/restore the
* state component to/from the buffer.
- * - Don't set the bit corresponding to the dynamic supervisor feature in
+ * - Don't set the bit corresponding to the independent supervisor feature in
* IA32_XSS at run time, since it has been set at boot time.
*/
-#define XFEATURE_MASK_DYNAMIC (XFEATURE_MASK_LBR)
+#define XFEATURE_MASK_INDEPENDENT (XFEATURE_MASK_LBR)
/*
* Unsupported supervisor features. When a supervisor feature in this mask is
@@ -66,7 +75,7 @@
/* All supervisor states including supported and unsupported states. */
#define XFEATURE_MASK_SUPERVISOR_ALL (XFEATURE_MASK_SUPERVISOR_SUPPORTED | \
- XFEATURE_MASK_DYNAMIC | \
+ XFEATURE_MASK_INDEPENDENT | \
XFEATURE_MASK_SUPERVISOR_UNSUPPORTED)
#ifdef CONFIG_X86_64
@@ -82,17 +91,42 @@ static inline u64 xfeatures_mask_supervisor(void)
return xfeatures_mask_all & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
}
-static inline u64 xfeatures_mask_user(void)
+/*
+ * The xfeatures which are enabled in XCR0 and expected to be in ptrace
+ * buffers and signal frames.
+ */
+static inline u64 xfeatures_mask_uabi(void)
{
return xfeatures_mask_all & XFEATURE_MASK_USER_SUPPORTED;
}
-static inline u64 xfeatures_mask_dynamic(void)
+/*
+ * The xfeatures which are restored by the kernel when returning to user
+ * mode. This is not necessarily the same as xfeatures_mask_uabi() as the
+ * kernel does not manage all XCR0 enabled features via xsave/xrstor as
+ * some of them have to be switched eagerly on context switch and exec().
+ */
+static inline u64 xfeatures_mask_restore_user(void)
+{
+ return xfeatures_mask_all & XFEATURE_MASK_USER_RESTORE;
+}
+
+/*
+ * Like xfeatures_mask_restore_user() but additionally restors the
+ * supported supervisor states.
+ */
+static inline u64 xfeatures_mask_fpstate(void)
+{
+ return xfeatures_mask_all & \
+ (XFEATURE_MASK_USER_RESTORE | XFEATURE_MASK_SUPERVISOR_SUPPORTED);
+}
+
+static inline u64 xfeatures_mask_independent(void)
{
if (!boot_cpu_has(X86_FEATURE_ARCH_LBR))
- return XFEATURE_MASK_DYNAMIC & ~XFEATURE_MASK_LBR;
+ return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
- return XFEATURE_MASK_DYNAMIC;
+ return XFEATURE_MASK_INDEPENDENT;
}
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
@@ -101,19 +135,21 @@ extern void __init update_regset_xstate_info(unsigned int size,
u64 xstate_mask);
void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
-const void *get_xsave_field_ptr(int xfeature_nr);
-int using_compacted_format(void);
int xfeature_size(int xfeature_nr);
-struct membuf;
-void copy_xstate_to_kernel(struct membuf to, struct xregs_state *xsave);
-int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
-int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
-void copy_supervisor_to_kernel(struct xregs_state *xsave);
-void copy_dynamic_supervisor_to_kernel(struct xregs_state *xstate, u64 mask);
-void copy_kernel_to_dynamic_supervisor(struct xregs_state *xstate, u64 mask);
+int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
+int copy_sigframe_from_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
+void xsaves(struct xregs_state *xsave, u64 mask);
+void xrstors(struct xregs_state *xsave, u64 mask);
-/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
-int validate_user_xstate_header(const struct xstate_header *hdr);
+enum xstate_copy_mode {
+ XSTATE_COPY_FP,
+ XSTATE_COPY_FX,
+ XSTATE_COPY_XSAVE,
+};
+
+struct membuf;
+void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
+ enum xstate_copy_mode mode);
#endif
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 974cbfb1eefe..af6ce8d4c86a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1038,6 +1038,13 @@ struct kvm_arch {
struct list_head lpage_disallowed_mmu_pages;
struct kvm_page_track_notifier_node mmu_sp_tracker;
struct kvm_page_track_notifier_head track_notifier_head;
+ /*
+ * Protects marking pages unsync during page faults, as TDP MMU page
+ * faults only take mmu_lock for read. For simplicity, the unsync
+ * pages lock is always taken when marking pages unsync regardless of
+ * whether mmu_lock is held for read or write.
+ */
+ spinlock_t mmu_unsync_pages_lock;
struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain;
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index 62ad61d6fefc..c7ec5bb88334 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -84,8 +84,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
#if CONFIG_PGTABLE_LEVELS > 2
extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index b1099f2d9800..448cd01eb3ec 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -23,7 +23,7 @@
#ifndef __ASSEMBLY__
#include <asm/x86_init.h>
-#include <asm/fpu/xstate.h>
+#include <asm/pkru.h>
#include <asm/fpu/api.h>
#include <asm-generic/pgtable_uffd.h>
@@ -126,35 +126,6 @@ static inline int pte_dirty(pte_t pte)
return pte_flags(pte) & _PAGE_DIRTY;
}
-
-static inline u32 read_pkru(void)
-{
- if (boot_cpu_has(X86_FEATURE_OSPKE))
- return rdpkru();
- return 0;
-}
-
-static inline void write_pkru(u32 pkru)
-{
- struct pkru_state *pk;
-
- if (!boot_cpu_has(X86_FEATURE_OSPKE))
- return;
-
- pk = get_xsave_addr(&current->thread.fpu.state.xsave, XFEATURE_PKRU);
-
- /*
- * The PKRU value in xstate needs to be in sync with the value that is
- * written to the CPU. The FPU restore on return to userland would
- * otherwise load the previous value again.
- */
- fpregs_lock();
- if (pk)
- pk->pkru = pkru;
- __write_pkru(pkru);
- fpregs_unlock();
-}
-
static inline int pte_young(pte_t pte)
{
return pte_flags(pte) & _PAGE_ACCESSED;
@@ -865,9 +836,9 @@ static inline int pud_present(pud_t pud)
return pud_flags(pud) & _PAGE_PRESENT;
}
-static inline unsigned long pud_page_vaddr(pud_t pud)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
- return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
+ return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud));
}
/*
@@ -906,9 +877,9 @@ static inline int p4d_present(p4d_t p4d)
return p4d_flags(p4d) & _PAGE_PRESENT;
}
-static inline unsigned long p4d_page_vaddr(p4d_t p4d)
+static inline pud_t *p4d_pgtable(p4d_t p4d)
{
- return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
+ return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
}
/*
@@ -1360,32 +1331,6 @@ static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
}
#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
-#define PKRU_AD_BIT 0x1
-#define PKRU_WD_BIT 0x2
-#define PKRU_BITS_PER_PKEY 2
-
-#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
-extern u32 init_pkru_value;
-#else
-#define init_pkru_value 0
-#endif
-
-static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
-{
- int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
- return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
-}
-
-static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
-{
- int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
- /*
- * Access-disable disables writes too so we need to check
- * both bits here.
- */
- return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
-}
-
static inline u16 pte_flags_pkey(unsigned long pte_flags)
{
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index f24d7ef8fffa..40497a9020c6 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -7,8 +7,6 @@
#include <asm/page_types.h>
-#define FIRST_USER_ADDRESS 0UL
-
#define _PAGE_BIT_PRESENT 0 /* is present */
#define _PAGE_BIT_RW 1 /* writeable */
#define _PAGE_BIT_USER 2 /* userspace addressable */
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
index 2ff9b98812b7..5c7bcaa79623 100644
--- a/arch/x86/include/asm/pkeys.h
+++ b/arch/x86/include/asm/pkeys.h
@@ -9,14 +9,14 @@
* will be necessary to ensure that the types that store key
* numbers and masks have sufficient capacity.
*/
-#define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
+#define arch_max_pkey() (cpu_feature_enabled(X86_FEATURE_OSPKE) ? 16 : 1)
extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
unsigned long init_val);
static inline bool arch_pkeys_enabled(void)
{
- return boot_cpu_has(X86_FEATURE_OSPKE);
+ return cpu_feature_enabled(X86_FEATURE_OSPKE);
}
/*
@@ -26,7 +26,7 @@ static inline bool arch_pkeys_enabled(void)
extern int __execute_only_pkey(struct mm_struct *mm);
static inline int execute_only_pkey(struct mm_struct *mm)
{
- if (!boot_cpu_has(X86_FEATURE_OSPKE))
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return ARCH_DEFAULT_PKEY;
return __execute_only_pkey(mm);
@@ -37,7 +37,7 @@ extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
int prot, int pkey)
{
- if (!boot_cpu_has(X86_FEATURE_OSPKE))
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return 0;
return __arch_override_mprotect_pkey(vma, prot, pkey);
@@ -124,7 +124,6 @@ extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
unsigned long init_val);
extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
unsigned long init_val);
-extern void copy_init_pkru_to_fpregs(void);
static inline int vma_pkey(struct vm_area_struct *vma)
{
diff --git a/arch/x86/include/asm/pkru.h b/arch/x86/include/asm/pkru.h
new file mode 100644
index 000000000000..ccc539faa5bb
--- /dev/null
+++ b/arch/x86/include/asm/pkru.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_PKRU_H
+#define _ASM_X86_PKRU_H
+
+#include <asm/fpu/xstate.h>
+
+#define PKRU_AD_BIT 0x1
+#define PKRU_WD_BIT 0x2
+#define PKRU_BITS_PER_PKEY 2
+
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+extern u32 init_pkru_value;
+#define pkru_get_init_value() READ_ONCE(init_pkru_value)
+#else
+#define init_pkru_value 0
+#define pkru_get_init_value() 0
+#endif
+
+static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
+{
+ int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
+ return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
+}
+
+static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
+{
+ int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
+ /*
+ * Access-disable disables writes too so we need to check
+ * both bits here.
+ */
+ return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
+}
+
+static inline u32 read_pkru(void)
+{
+ if (cpu_feature_enabled(X86_FEATURE_OSPKE))
+ return rdpkru();
+ return 0;
+}
+
+static inline void write_pkru(u32 pkru)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+ return;
+ /*
+ * WRPKRU is relatively expensive compared to RDPKRU.
+ * Avoid WRPKRU when it would not change the value.
+ */
+ if (pkru != rdpkru())
+ wrpkru(pkru);
+}
+
+static inline void pkru_write_default(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+ return;
+
+ wrpkru(pkru_get_init_value());
+}
+
+#endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 364d0e42e280..f3020c54e2cb 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -518,6 +518,15 @@ struct thread_struct {
unsigned int sig_on_uaccess_err:1;
+ /*
+ * Protection Keys Register for Userspace. Loaded immediately on
+ * context switch. Store it in thread_struct to avoid a lookup in
+ * the tasks's FPU xstate buffer. This value is only valid when a
+ * task is scheduled out. For 'current' the authoritative source of
+ * PKRU is the hardware itself.
+ */
+ u32 pkru;
+
/* Floating point and extended processor state */
struct fpu fpu;
/*
diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h
index 84eab2724875..5b1ed650b124 100644
--- a/arch/x86/include/asm/sigframe.h
+++ b/arch/x86/include/asm/sigframe.h
@@ -85,4 +85,6 @@ struct rt_sigframe_x32 {
#endif /* CONFIG_X86_64 */
+void __init init_sigframe_size(void);
+
#endif /* _ASM_X86_SIGFRAME_H */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 2acd6cb62328..f3fbb84ff8a7 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -104,25 +104,13 @@ static inline void wrpkru(u32 pkru)
: : "a" (pkru), "c"(ecx), "d"(edx));
}
-static inline void __write_pkru(u32 pkru)
-{
- /*
- * WRPKRU is relatively expensive compared to RDPKRU.
- * Avoid WRPKRU when it would not change the value.
- */
- if (pkru == rdpkru())
- return;
-
- wrpkru(pkru);
-}
-
#else
static inline u32 rdpkru(void)
{
return 0;
}
-static inline void __write_pkru(u32 pkru)
+static inline void wrpkru(u32 pkru)
{
}
#endif
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index e322676039f4..b00dbc5fac2b 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -184,6 +184,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define V_IGN_TPR_SHIFT 20
#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
+#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
+
#define V_INTR_MASKING_SHIFT 24
#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
diff --git a/arch/x86/include/asm/unaligned.h b/arch/x86/include/asm/unaligned.h
deleted file mode 100644
index 9c754a7447aa..000000000000
--- a/arch/x86/include/asm/unaligned.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_UNALIGNED_H
-#define _ASM_X86_UNALIGNED_H
-
-/*
- * The x86 can do unaligned accesses itself.
- */
-
-#include <linux/unaligned/access_ok.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-
-#endif /* _ASM_X86_UNALIGNED_H */
diff --git a/arch/x86/include/uapi/asm/auxvec.h b/arch/x86/include/uapi/asm/auxvec.h
index 580e3c567046..6beb55bbefa4 100644
--- a/arch/x86/include/uapi/asm/auxvec.h
+++ b/arch/x86/include/uapi/asm/auxvec.h
@@ -12,9 +12,9 @@
/* entries in ARCH_DLINFO: */
#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
-# define AT_VECTOR_SIZE_ARCH 2
+# define AT_VECTOR_SIZE_ARCH 3
#else /* else it's non-compat x86-64 */
-# define AT_VECTOR_SIZE_ARCH 1
+# define AT_VECTOR_SIZE_ARCH 2
#endif
#endif /* _ASM_X86_AUXVEC_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 0f66682ac02a..3e625c61f008 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_X86_TSC) += trace_clock.o
+obj-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_CRASH_CORE) += crash_core_$(BITS).o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d5c691a3208b..39224e035e47 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1986,7 +1986,8 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_set_affinity = ioapic_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_AFFINITY_PRE_STARTUP,
};
static struct irq_chip ioapic_ir_chip __read_mostly = {
@@ -1999,7 +2000,8 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
.irq_set_affinity = ioapic_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_AFFINITY_PRE_STARTUP,
};
static inline void init_IO_APIC_traps(void)
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index 44ebe25e7703..dbacb9ec8843 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -58,11 +58,13 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
* The quirk bit is not set in this case.
* - The new vector is the same as the old vector
* - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
+ * - The interrupt is not yet started up
* - The new destination CPU is the same as the old destination CPU
*/
if (!irqd_msi_nomask_quirk(irqd) ||
cfg->vector == old_cfg.vector ||
old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
+ !irqd_is_started(irqd) ||
cfg->dest_apicid == old_cfg.dest_apicid) {
irq_msi_update_msg(irqd, cfg);
return ret;
@@ -150,7 +152,8 @@ static struct irq_chip pci_msi_controller = {
.irq_ack = irq_chip_ack_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_affinity = msi_set_affinity,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_AFFINITY_PRE_STARTUP,
};
int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
@@ -219,7 +222,8 @@ static struct irq_chip pci_msi_ir_controller = {
.irq_mask = pci_msi_mask_irq,
.irq_ack = irq_chip_ack_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_AFFINITY_PRE_STARTUP,
};
static struct msi_domain_info pci_msi_ir_domain_info = {
@@ -273,7 +277,8 @@ static struct irq_chip dmar_msi_controller = {
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_compose_msi_msg = dmar_msi_compose_msg,
.irq_write_msi_msg = dmar_msi_write_msg,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_AFFINITY_PRE_STARTUP,
};
static int dmar_msi_init(struct irq_domain *domain,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a99d00393206..64b805bd6a54 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -58,6 +58,7 @@
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/uv/uv.h>
+#include <asm/sigframe.h>
#include "cpu.h"
@@ -465,27 +466,22 @@ static bool pku_disabled;
static __always_inline void setup_pku(struct cpuinfo_x86 *c)
{
- struct pkru_state *pk;
+ if (c == &boot_cpu_data) {
+ if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
+ return;
+ /*
+ * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
+ * bit to be set. Enforce it.
+ */
+ setup_force_cpu_cap(X86_FEATURE_OSPKE);
- /* check the boot processor, plus compile options for PKU: */
- if (!cpu_feature_enabled(X86_FEATURE_PKU))
- return;
- /* checks the actual processor's cpuid bits: */
- if (!cpu_has(c, X86_FEATURE_PKU))
- return;
- if (pku_disabled)
+ } else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
return;
+ }
cr4_set_bits(X86_CR4_PKE);
- pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
- if (pk)
- pk->pkru = init_pkru_value;
- /*
- * Setting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
- * cpuid bit to be set. We need to ensure that we
- * update that bit in this CPU's "cpu_info".
- */
- set_cpu_cap(c, X86_FEATURE_OSPKE);
+ /* Load the default PKRU value */
+ pkru_write_default();
}
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
@@ -1332,6 +1328,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
fpu__init_system(c);
+ init_sigframe_size();
+
#ifdef CONFIG_X86_32
/*
* Regardless of whether PCID is enumerated, the SDM says
@@ -1717,9 +1715,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
}
/*
- * clearcpuid= was already parsed in fpu__init_parse_early_param.
- * But we need to keep a dummy __setup around otherwise it would
- * show up as an environment variable for init.
+ * clearcpuid= was already parsed in cpu_parse_early_param(). This dummy
+ * function prevents it from becoming an environment variable for init.
*/
static __init int setup_clearcpuid(char *arg)
{
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 01ca94f42e4e..c890d67a64ad 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -17,6 +17,7 @@
#include <linux/irq.h>
#include <linux/kexec.h>
#include <linux/i8253.h>
+#include <linux/panic_notifier.h>
#include <linux/random.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
@@ -236,7 +237,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
for_each_present_cpu(i) {
if (i == 0)
continue;
- ret = hv_call_add_logical_proc(numa_cpu_node(i), i, i);
+ ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
BUG_ON(ret);
}
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index f07c10b87a87..57e4bb695ff9 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -285,15 +285,14 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
return chunks >>= shift;
}
-static int __mon_event_count(u32 rmid, struct rmid_read *rr)
+static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
{
struct mbm_state *m;
u64 chunks, tval;
tval = __rmid_read(rmid, rr->evtid);
if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
- rr->val = tval;
- return -EINVAL;
+ return tval;
}
switch (rr->evtid) {
case QOS_L3_OCCUP_EVENT_ID:
@@ -305,12 +304,6 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
case QOS_L3_MBM_LOCAL_EVENT_ID:
m = &rr->d->mbm_local[rmid];
break;
- default:
- /*
- * Code would never reach here because
- * an invalid event id would fail the __rmid_read.
- */
- return -EINVAL;
}
if (rr->first) {
@@ -361,23 +354,29 @@ void mon_event_count(void *info)
struct rdtgroup *rdtgrp, *entry;
struct rmid_read *rr = info;
struct list_head *head;
+ u64 ret_val;
rdtgrp = rr->rgrp;
- if (__mon_event_count(rdtgrp->mon.rmid, rr))
- return;
+ ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);
/*
- * For Ctrl groups read data from child monitor groups.
+ * For Ctrl groups read data from child monitor groups and
+ * add them together. Count events which are read successfully.
+ * Discard the rmid_read's reporting errors.
*/
head = &rdtgrp->mon.crdtgrp_list;
if (rdtgrp->type == RDTCTRL_GROUP) {
list_for_each_entry(entry, head, mon.crdtgrp_list) {
- if (__mon_event_count(entry->mon.rmid, rr))
- return;
+ if (__mon_event_count(entry->mon.rmid, rr) == 0)
+ ret_val = 0;
}
}
+
+ /* Report error if none of rmid_reads are successful */
+ if (ret_val)
+ rr->val = ret_val;
}
/*
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 299c20f0a38b..ea4fe192189d 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -69,7 +69,7 @@ static void printk_stack_address(unsigned long address, int reliable,
const char *log_lvl)
{
touch_nmi_watchdog();
- printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
+ printk("%s %s%pBb\n", log_lvl, reliable ? "" : "? ", (void *)address);
}
static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 571220ac8bea..7ada7bd03a32 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -23,7 +23,7 @@
* Represents the initial FPU state. It's mostly (but not completely) zeroes,
* depending on the FPU hardware format:
*/
-union fpregs_state init_fpstate __read_mostly;
+union fpregs_state init_fpstate __ro_after_init;
/*
* Track whether the kernel is using the FPU state
@@ -83,19 +83,23 @@ bool irq_fpu_usable(void)
EXPORT_SYMBOL(irq_fpu_usable);
/*
- * These must be called with preempt disabled. Returns
- * 'true' if the FPU state is still intact and we can
- * keep registers active.
+ * Save the FPU register state in fpu->state. The register state is
+ * preserved.
*
- * The legacy FNSAVE instruction cleared all FPU state
- * unconditionally, so registers are essentially destroyed.
- * Modern FPU state can be kept in registers, if there are
- * no pending FP exceptions.
+ * Must be called with fpregs_lock() held.
+ *
+ * The legacy FNSAVE instruction clears all FPU state unconditionally, so
+ * register state has to be reloaded. That might be a pointless exercise
+ * when the FPU is going to be used by another task right after that. But
+ * this only affects 20+ years old 32bit systems and avoids conditionals all
+ * over the place.
+ *
+ * FXSAVE and all XSAVE variants preserve the FPU register state.
*/
-int copy_fpregs_to_fpstate(struct fpu *fpu)
+void save_fpregs_to_fpstate(struct fpu *fpu)
{
if (likely(use_xsave())) {
- copy_xregs_to_kernel(&fpu->state.xsave);
+ os_xsave(&fpu->state.xsave);
/*
* AVX512 state is tracked here because its use is
@@ -103,23 +107,49 @@ int copy_fpregs_to_fpstate(struct fpu *fpu)
*/
if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
fpu->avx512_timestamp = jiffies;
- return 1;
+ return;
}
if (likely(use_fxsr())) {
- copy_fxregs_to_kernel(fpu);
- return 1;
+ fxsave(&fpu->state.fxsave);
+ return;
}
/*
* Legacy FPU register saving, FNSAVE always clears FPU registers,
- * so we have to mark them inactive:
+ * so we have to reload them from the memory state.
*/
asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
+ frstor(&fpu->state.fsave);
+}
+EXPORT_SYMBOL(save_fpregs_to_fpstate);
- return 0;
+void __restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask)
+{
+ /*
+ * AMD K7/K8 and later CPUs up to Zen don't save/restore
+ * FDP/FIP/FOP unless an exception is pending. Clear the x87 state
+ * here by setting it to fixed values. "m" is a random variable
+ * that should be in L1.
+ */
+ if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
+ asm volatile(
+ "fnclex\n\t"
+ "emms\n\t"
+ "fildl %P[addr]" /* set F?P to defined value */
+ : : [addr] "m" (fpstate));
+ }
+
+ if (use_xsave()) {
+ os_xrstor(&fpstate->xsave, mask);
+ } else {
+ if (use_fxsr())
+ fxrstor(&fpstate->fxsave);
+ else
+ frstor(&fpstate->fsave);
+ }
}
-EXPORT_SYMBOL(copy_fpregs_to_fpstate);
+EXPORT_SYMBOL_GPL(__restore_fpregs_from_fpstate);
void kernel_fpu_begin_mask(unsigned int kfpu_mask)
{
@@ -133,11 +163,7 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask)
if (!(current->flags & PF_KTHREAD) &&
!test_thread_flag(TIF_NEED_FPU_LOAD)) {
set_thread_flag(TIF_NEED_FPU_LOAD);
- /*
- * Ignore return value -- we don't care if reg state
- * is clobbered.
- */
- copy_fpregs_to_fpstate(&current->thread.fpu);
+ save_fpregs_to_fpstate(&current->thread.fpu);
}
__cpu_invalidate_fpregs_state();
@@ -160,27 +186,38 @@ void kernel_fpu_end(void)
EXPORT_SYMBOL_GPL(kernel_fpu_end);
/*
- * Save the FPU state (mark it for reload if necessary):
- *
- * This only ever gets called for the current task.
+ * Sync the FPU register state to current's memory register state when the
+ * current task owns the FPU. The hardware register state is preserved.
*/
-void fpu__save(struct fpu *fpu)
+void fpu_sync_fpstate(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu);
fpregs_lock();
trace_x86_fpu_before_save(fpu);
- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
- if (!copy_fpregs_to_fpstate(fpu)) {
- copy_kernel_to_fpregs(&fpu->state);
- }
- }
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD))
+ save_fpregs_to_fpstate(fpu);
trace_x86_fpu_after_save(fpu);
fpregs_unlock();
}
+static inline void fpstate_init_xstate(struct xregs_state *xsave)
+{
+ /*
+ * XRSTORS requires these bits set in xcomp_bv, or it will
+ * trigger #GP:
+ */
+ xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all;
+}
+
+static inline void fpstate_init_fxstate(struct fxregs_state *fx)
+{
+ fx->cwd = 0x37f;
+ fx->mxcsr = MXCSR_DEFAULT;
+}
+
/*
* Legacy x87 fpstate state init:
*/
@@ -210,18 +247,18 @@ void fpstate_init(union fpregs_state *state)
}
EXPORT_SYMBOL_GPL(fpstate_init);
-int fpu__copy(struct task_struct *dst, struct task_struct *src)
+/* Clone current's FPU state on fork */
+int fpu_clone(struct task_struct *dst)
{
+ struct fpu *src_fpu = &current->thread.fpu;
struct fpu *dst_fpu = &dst->thread.fpu;
- struct fpu *src_fpu = &src->thread.fpu;
+ /* The new task's FPU state cannot be valid in the hardware. */
dst_fpu->last_cpu = -1;
- if (!static_cpu_has(X86_FEATURE_FPU))
+ if (!cpu_feature_enabled(X86_FEATURE_FPU))
return 0;
- WARN_ON_FPU(src_fpu != &current->thread.fpu);
-
/*
* Don't let 'init optimized' areas of the XSAVE area
* leak into the child task:
@@ -229,20 +266,16 @@ int fpu__copy(struct task_struct *dst, struct task_struct *src)
memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
/*
- * If the FPU registers are not current just memcpy() the state.
- * Otherwise save current FPU registers directly into the child's FPU
- * context, without any memory-to-memory copying.
- *
- * ( The function 'fails' in the FNSAVE case, which destroys
- * register contents so we have to load them back. )
+ * If the FPU registers are not owned by current just memcpy() the
+ * state. Otherwise save the FPU registers directly into the
+ * child's FPU context, without any memory-to-memory copying.
*/
fpregs_lock();
if (test_thread_flag(TIF_NEED_FPU_LOAD))
memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
- else if (!copy_fpregs_to_fpstate(dst_fpu))
- copy_kernel_to_fpregs(&dst_fpu->state);
-
+ else
+ save_fpregs_to_fpstate(dst_fpu);
fpregs_unlock();
set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
@@ -254,63 +287,6 @@ int fpu__copy(struct task_struct *dst, struct task_struct *src)
}
/*
- * Activate the current task's in-memory FPU context,
- * if it has not been used before:
- */
-static void fpu__initialize(struct fpu *fpu)
-{
- WARN_ON_FPU(fpu != &current->thread.fpu);
-
- set_thread_flag(TIF_NEED_FPU_LOAD);
- fpstate_init(&fpu->state);
- trace_x86_fpu_init_state(fpu);
-}
-
-/*
- * This function must be called before we read a task's fpstate.
- *
- * There's two cases where this gets called:
- *
- * - for the current task (when coredumping), in which case we have
- * to save the latest FPU registers into the fpstate,
- *
- * - or it's called for stopped tasks (ptrace), in which case the
- * registers were already saved by the context-switch code when
- * the task scheduled out.
- *
- * If the task has used the FPU before then save it.
- */
-void fpu__prepare_read(struct fpu *fpu)
-{
- if (fpu == &current->thread.fpu)
- fpu__save(fpu);
-}
-
-/*
- * This function must be called before we write a task's fpstate.
- *
- * Invalidate any cached FPU registers.
- *
- * After this function call, after registers in the fpstate are
- * modified and the child task has woken up, the child task will
- * restore the modified FPU state from the modified context. If we
- * didn't clear its cached status here then the cached in-registers
- * state pending on its former CPU could be restored, corrupting
- * the modifications.
- */
-void fpu__prepare_write(struct fpu *fpu)
-{
- /*
- * Only stopped child tasks can be used to modify the FPU
- * state in the fpstate buffer:
- */
- WARN_ON_FPU(fpu == &current->thread.fpu);
-
- /* Invalidate any cached state: */
- __fpu_invalidate_fpregs_state(fpu);
-}
-
-/*
* Drops current FPU state: deactivates the fpregs and
* the fpstate. NOTE: it still leaves previous contents
* in the fpregs in the eager-FPU case.
@@ -340,61 +316,97 @@ void fpu__drop(struct fpu *fpu)
* Clear FPU registers by setting them up from the init fpstate.
* Caller must do fpregs_[un]lock() around it.
*/
-static inline void copy_init_fpstate_to_fpregs(u64 features_mask)
+static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
{
if (use_xsave())
- copy_kernel_to_xregs(&init_fpstate.xsave, features_mask);
- else if (static_cpu_has(X86_FEATURE_FXSR))
- copy_kernel_to_fxregs(&init_fpstate.fxsave);
+ os_xrstor(&init_fpstate.xsave, features_mask);
+ else if (use_fxsr())
+ fxrstor(&init_fpstate.fxsave);
else
- copy_kernel_to_fregs(&init_fpstate.fsave);
+ frstor(&init_fpstate.fsave);
- if (boot_cpu_has(X86_FEATURE_OSPKE))
- copy_init_pkru_to_fpregs();
+ pkru_write_default();
+}
+
+static inline unsigned int init_fpstate_copy_size(void)
+{
+ if (!use_xsave())
+ return fpu_kernel_xstate_size;
+
+ /* XSAVE(S) just needs the legacy and the xstate header part */
+ return sizeof(init_fpstate.xsave);
}
/*
- * Clear the FPU state back to init state.
- *
- * Called by sys_execve(), by the signal handler code and by various
- * error paths.
+ * Reset current->fpu memory state to the init values.
+ */
+static void fpu_reset_fpstate(void)
+{
+ struct fpu *fpu = &current->thread.fpu;
+
+ fpregs_lock();
+ fpu__drop(fpu);
+ /*
+ * This does not change the actual hardware registers. It just
+ * resets the memory image and sets TIF_NEED_FPU_LOAD so a
+ * subsequent return to usermode will reload the registers from the
+ * task's memory image.
+ *
+ * Do not use fpstate_init() here. Just copy init_fpstate which has
+ * the correct content already except for PKRU.
+ *
+ * PKRU handling does not rely on the xstate when restoring for
+ * user space as PKRU is eagerly written in switch_to() and
+ * flush_thread().
+ */
+ memcpy(&fpu->state, &init_fpstate, init_fpstate_copy_size());
+ set_thread_flag(TIF_NEED_FPU_LOAD);
+ fpregs_unlock();
+}
+
+/*
+ * Reset current's user FPU states to the init states. current's
+ * supervisor states, if any, are not modified by this function. The
+ * caller guarantees that the XSTATE header in memory is intact.
*/
-static void fpu__clear(struct fpu *fpu, bool user_only)
+void fpu__clear_user_states(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu);
- if (!static_cpu_has(X86_FEATURE_FPU)) {
- fpu__drop(fpu);
- fpu__initialize(fpu);
+ fpregs_lock();
+ if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
+ fpu_reset_fpstate();
+ fpregs_unlock();
return;
}
- fpregs_lock();
-
- if (user_only) {
- if (!fpregs_state_valid(fpu, smp_processor_id()) &&
- xfeatures_mask_supervisor())
- copy_kernel_to_xregs(&fpu->state.xsave,
- xfeatures_mask_supervisor());
- copy_init_fpstate_to_fpregs(xfeatures_mask_user());
- } else {
- copy_init_fpstate_to_fpregs(xfeatures_mask_all);
+ /*
+ * Ensure that current's supervisor states are loaded into their
+ * corresponding registers.
+ */
+ if (xfeatures_mask_supervisor() &&
+ !fpregs_state_valid(fpu, smp_processor_id())) {
+ os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor());
}
+ /* Reset user states in registers. */
+ restore_fpregs_from_init_fpstate(xfeatures_mask_restore_user());
+
+ /*
+ * Now all FPU registers have their desired values. Inform the FPU
+ * state machine that current's FPU registers are in the hardware
+ * registers. The memory image does not need to be updated because
+ * any operation relying on it has to save the registers first when
+ * current's FPU is marked active.
+ */
fpregs_mark_activate();
fpregs_unlock();
}
-void fpu__clear_user_states(struct fpu *fpu)
+void fpu_flush_thread(void)
{
- fpu__clear(fpu, true);
+ fpu_reset_fpstate();
}
-
-void fpu__clear_all(struct fpu *fpu)
-{
- fpu__clear(fpu, false);
-}
-
/*
* Load FPU context before returning to userspace.
*/
@@ -403,7 +415,7 @@ void switch_fpu_return(void)
if (!static_cpu_has(X86_FEATURE_FPU))
return;
- __fpregs_load_activate();
+ fpregs_restore_userregs();
}
EXPORT_SYMBOL_GPL(switch_fpu_return);
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 701f196d7c68..64e29927cc32 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -89,7 +89,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
/*
* Boot time FPU feature detection code:
*/
-unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
+unsigned int mxcsr_feature_mask __ro_after_init = 0xffffffffu;
EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
static void __init fpu__init_system_mxcsr(void)
@@ -135,7 +135,7 @@ static void __init fpu__init_system_generic(void)
* This is inherent to the XSAVE architecture which puts all state
* components into a single, continuous memory block:
*/
-unsigned int fpu_kernel_xstate_size;
+unsigned int fpu_kernel_xstate_size __ro_after_init;
EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
/* Get alignment of the TYPE. */
@@ -216,17 +216,6 @@ static void __init fpu__init_system_xstate_size_legacy(void)
fpu_user_xstate_size = fpu_kernel_xstate_size;
}
-/*
- * Find supported xfeatures based on cpu features and command-line input.
- * This must be called after fpu__init_parse_early_param() is called and
- * xfeatures_mask is enumerated.
- */
-u64 __init fpu__get_supported_xfeatures_mask(void)
-{
- return XFEATURE_MASK_USER_SUPPORTED |
- XFEATURE_MASK_SUPERVISOR_SUPPORTED;
-}
-
/* Legacy code to initialize eager fpu mode. */
static void __init fpu__init_system_ctx_switch(void)
{
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index c413756ba89f..66ed317ebc0d 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -2,11 +2,13 @@
/*
* FPU register's regset abstraction, for ptrace, core dumps, etc.
*/
+#include <linux/sched/task_stack.h>
+#include <linux/vmalloc.h>
+
#include <asm/fpu/internal.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/regset.h>
#include <asm/fpu/xstate.h>
-#include <linux/sched/task_stack.h>
/*
* The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
@@ -26,18 +28,58 @@ int regset_xregset_fpregs_active(struct task_struct *target, const struct user_r
return 0;
}
+/*
+ * The regset get() functions are invoked from:
+ *
+ * - coredump to dump the current task's fpstate. If the current task
+ * owns the FPU then the memory state has to be synchronized and the
+ * FPU register state preserved. Otherwise fpstate is already in sync.
+ *
+ * - ptrace to dump fpstate of a stopped task, in which case the registers
+ * have already been saved to fpstate on context switch.
+ */
+static void sync_fpstate(struct fpu *fpu)
+{
+ if (fpu == &current->thread.fpu)
+ fpu_sync_fpstate(fpu);
+}
+
+/*
+ * Invalidate cached FPU registers before modifying the stopped target
+ * task's fpstate.
+ *
+ * This forces the target task on resume to restore the FPU registers from
+ * modified fpstate. Otherwise the task might skip the restore and operate
+ * with the cached FPU registers which discards the modifications.
+ */
+static void fpu_force_restore(struct fpu *fpu)
+{
+ /*
+ * Only stopped child tasks can be used to modify the FPU
+ * state in the fpstate buffer:
+ */
+ WARN_ON_FPU(fpu == &current->thread.fpu);
+
+ __fpu_invalidate_fpregs_state(fpu);
+}
+
int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
struct fpu *fpu = &target->thread.fpu;
- if (!boot_cpu_has(X86_FEATURE_FXSR))
+ if (!cpu_feature_enabled(X86_FEATURE_FXSR))
return -ENODEV;
- fpu__prepare_read(fpu);
- fpstate_sanitize_xstate(fpu);
+ sync_fpstate(fpu);
+
+ if (!use_xsave()) {
+ return membuf_write(&to, &fpu->state.fxsave,
+ sizeof(fpu->state.fxsave));
+ }
- return membuf_write(&to, &fpu->state.fxsave, sizeof(struct fxregs_state));
+ copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_FX);
+ return 0;
}
int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
@@ -45,62 +87,52 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
+ struct user32_fxsr_struct newstate;
int ret;
- if (!boot_cpu_has(X86_FEATURE_FXSR))
+ BUILD_BUG_ON(sizeof(newstate) != sizeof(struct fxregs_state));
+
+ if (!cpu_feature_enabled(X86_FEATURE_FXSR))
return -ENODEV;
- fpu__prepare_write(fpu);
- fpstate_sanitize_xstate(fpu);
+ /* No funny business with partial or oversized writes is permitted. */
+ if (pos != 0 || count != sizeof(newstate))
+ return -EINVAL;
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &fpu->state.fxsave, 0, -1);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
+ if (ret)
+ return ret;
- /*
- * mxcsr reserved bits must be masked to zero for security reasons.
- */
- fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
+ /* Do not allow an invalid MXCSR value. */
+ if (newstate.mxcsr & ~mxcsr_feature_mask)
+ return -EINVAL;
- /*
- * update the header bits in the xsave header, indicating the
- * presence of FP and SSE state.
- */
- if (boot_cpu_has(X86_FEATURE_XSAVE))
+ fpu_force_restore(fpu);
+
+ /* Copy the state */
+ memcpy(&fpu->state.fxsave, &newstate, sizeof(newstate));
+
+ /* Clear xmm8..15 */
+ BUILD_BUG_ON(sizeof(fpu->state.fxsave.xmm_space) != 16 * 16);
+ memset(&fpu->state.fxsave.xmm_space[8], 0, 8 * 16);
+
+ /* Mark FP and SSE as in use when XSAVE is enabled */
+ if (use_xsave())
fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
- return ret;
+ return 0;
}
int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
- struct fpu *fpu = &target->thread.fpu;
- struct xregs_state *xsave;
-
- if (!boot_cpu_has(X86_FEATURE_XSAVE))
+ if (!cpu_feature_enabled(X86_FEATURE_XSAVE))
return -ENODEV;
- xsave = &fpu->state.xsave;
-
- fpu__prepare_read(fpu);
+ sync_fpstate(&target->thread.fpu);
- if (using_compacted_format()) {
- copy_xstate_to_kernel(to, xsave);
- return 0;
- } else {
- fpstate_sanitize_xstate(fpu);
- /*
- * Copy the 48 bytes defined by the software into the xsave
- * area in the thread struct, so that we can copy the whole
- * area to user using one user_regset_copyout().
- */
- memcpy(&xsave->i387.sw_reserved, xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
-
- /*
- * Copy the xstate memory layout.
- */
- return membuf_write(&to, xsave, fpu_user_xstate_size);
- }
+ copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_XSAVE);
+ return 0;
}
int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
@@ -108,44 +140,34 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
- struct xregs_state *xsave;
+ struct xregs_state *tmpbuf = NULL;
int ret;
- if (!boot_cpu_has(X86_FEATURE_XSAVE))
+ if (!cpu_feature_enabled(X86_FEATURE_XSAVE))
return -ENODEV;
/*
* A whole standard-format XSAVE buffer is needed:
*/
- if ((pos != 0) || (count < fpu_user_xstate_size))
+ if (pos != 0 || count != fpu_user_xstate_size)
return -EFAULT;
- xsave = &fpu->state.xsave;
-
- fpu__prepare_write(fpu);
+ if (!kbuf) {
+ tmpbuf = vmalloc(count);
+ if (!tmpbuf)
+ return -ENOMEM;
- if (using_compacted_format()) {
- if (kbuf)
- ret = copy_kernel_to_xstate(xsave, kbuf);
- else
- ret = copy_user_to_xstate(xsave, ubuf);
- } else {
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
- if (!ret)
- ret = validate_user_xstate_header(&xsave->header);
+ if (copy_from_user(tmpbuf, ubuf, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
}
- /*
- * mxcsr reserved bits must be masked to zero for security reasons.
- */
- xsave->i387.mxcsr &= mxcsr_feature_mask;
-
- /*
- * In case of failure, mark all states as init:
- */
- if (ret)
- fpstate_init(&fpu->state);
+ fpu_force_restore(fpu);
+ ret = copy_uabi_from_kernel_to_xstate(&fpu->state.xsave, kbuf ?: tmpbuf);
+out:
+ vfree(tmpbuf);
return ret;
}
@@ -221,10 +243,10 @@ static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave)
* FXSR floating point environment conversions.
*/
-void
-convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
+static void __convert_from_fxsr(struct user_i387_ia32_struct *env,
+ struct task_struct *tsk,
+ struct fxregs_state *fxsave)
{
- struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
int i;
@@ -258,6 +280,12 @@ convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
memcpy(&to[i], &from[i], sizeof(to[0]));
}
+void
+convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
+{
+ __convert_from_fxsr(env, tsk, &tsk->thread.fpu.state.fxsave);
+}
+
void convert_to_fxsr(struct fxregs_state *fxsave,
const struct user_i387_ia32_struct *env)
@@ -290,25 +318,29 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
{
struct fpu *fpu = &target->thread.fpu;
struct user_i387_ia32_struct env;
+ struct fxregs_state fxsave, *fx;
- fpu__prepare_read(fpu);
+ sync_fpstate(fpu);
- if (!boot_cpu_has(X86_FEATURE_FPU))
+ if (!cpu_feature_enabled(X86_FEATURE_FPU))
return fpregs_soft_get(target, regset, to);
- if (!boot_cpu_has(X86_FEATURE_FXSR)) {
+ if (!cpu_feature_enabled(X86_FEATURE_FXSR)) {
return membuf_write(&to, &fpu->state.fsave,
sizeof(struct fregs_state));
}
- fpstate_sanitize_xstate(fpu);
+ if (use_xsave()) {
+ struct membuf mb = { .p = &fxsave, .left = sizeof(fxsave) };
- if (to.left == sizeof(env)) {
- convert_from_fxsr(to.p, target);
- return 0;
+ /* Handle init state optimized xstate correctly */
+ copy_xstate_to_uabi_buf(mb, target, XSTATE_COPY_FP);
+ fx = &fxsave;
+ } else {
+ fx = &fpu->state.fxsave;
}
- convert_from_fxsr(&env, target);
+ __convert_from_fxsr(&env, target, fx);
return membuf_write(&to, &env, sizeof(env));
}
@@ -320,31 +352,32 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
struct user_i387_ia32_struct env;
int ret;
- fpu__prepare_write(fpu);
- fpstate_sanitize_xstate(fpu);
+ /* No funny business with partial or oversized writes is permitted. */
+ if (pos != 0 || count != sizeof(struct user_i387_ia32_struct))
+ return -EINVAL;
- if (!boot_cpu_has(X86_FEATURE_FPU))
+ if (!cpu_feature_enabled(X86_FEATURE_FPU))
return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
- if (!boot_cpu_has(X86_FEATURE_FXSR))
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &fpu->state.fsave, 0,
- -1);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
+ if (ret)
+ return ret;
- if (pos > 0 || count < sizeof(env))
- convert_from_fxsr(&env, target);
+ fpu_force_restore(fpu);
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
- if (!ret)
- convert_to_fxsr(&target->thread.fpu.state.fxsave, &env);
+ if (cpu_feature_enabled(X86_FEATURE_FXSR))
+ convert_to_fxsr(&fpu->state.fxsave, &env);
+ else
+ memcpy(&fpu->state.fsave, &env, sizeof(env));
/*
- * update the header bit in the xsave header, indicating the
+ * Update the header bit in the xsave header, indicating the
* presence of FP.
*/
- if (boot_cpu_has(X86_FEATURE_XSAVE))
+ if (cpu_feature_enabled(X86_FEATURE_XSAVE))
fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP;
- return ret;
+
+ return 0;
}
#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index b7b92cdf3add..445c57c9c539 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -15,29 +15,30 @@
#include <asm/sigframe.h>
#include <asm/trace/fpu.h>
-static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
+static struct _fpx_sw_bytes fx_sw_reserved __ro_after_init;
+static struct _fpx_sw_bytes fx_sw_reserved_ia32 __ro_after_init;
/*
* Check for the presence of extended state information in the
* user fpstate pointer in the sigcontext.
*/
-static inline int check_for_xstate(struct fxregs_state __user *buf,
- void __user *fpstate,
- struct _fpx_sw_bytes *fx_sw)
+static inline int check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
+ struct _fpx_sw_bytes *fx_sw)
{
int min_xstate_size = sizeof(struct fxregs_state) +
sizeof(struct xstate_header);
+ void __user *fpstate = fxbuf;
unsigned int magic2;
- if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
- return -1;
+ if (__copy_from_user(fx_sw, &fxbuf->sw_reserved[0], sizeof(*fx_sw)))
+ return -EFAULT;
/* Check for the first magic field and other error scenarios. */
if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
fx_sw->xstate_size < min_xstate_size ||
fx_sw->xstate_size > fpu_user_xstate_size ||
fx_sw->xstate_size > fx_sw->extended_size)
- return -1;
+ goto setfx;
/*
* Check for the presence of second magic word at the end of memory
@@ -45,10 +46,18 @@ static inline int check_for_xstate(struct fxregs_state __user *buf,
* fpstate layout with out copying the extended state information
* in the memory layout.
*/
- if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
- || magic2 != FP_XSTATE_MAGIC2)
- return -1;
+ if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size)))
+ return -EFAULT;
+ if (likely(magic2 == FP_XSTATE_MAGIC2))
+ return 0;
+setfx:
+ trace_x86_fpu_xstate_check_failed(&current->thread.fpu);
+
+ /* Set the parameters for fx only state */
+ fx_sw->magic1 = 0;
+ fx_sw->xstate_size = sizeof(struct fxregs_state);
+ fx_sw->xfeatures = XFEATURE_MASK_FPSSE;
return 0;
}
@@ -64,7 +73,7 @@ static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
fpregs_lock();
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
- copy_fxregs_to_kernel(&tsk->thread.fpu);
+ fxsave(&tsk->thread.fpu.state.fxsave);
fpregs_unlock();
convert_from_fxsr(&env, tsk);
@@ -129,11 +138,11 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
int err;
if (use_xsave())
- err = copy_xregs_to_user(buf);
+ err = xsave_to_user_sigframe(buf);
else if (use_fxsr())
- err = copy_fxregs_to_user((struct fxregs_state __user *) buf);
+ err = fxsave_to_user_sigframe((struct fxregs_state __user *) buf);
else
- err = copy_fregs_to_user((struct fregs_state __user *) buf);
+ err = fnsave_to_user_sigframe((struct fregs_state __user *) buf);
if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size))
err = -EFAULT;
@@ -188,7 +197,7 @@ retry:
*/
fpregs_lock();
if (test_thread_flag(TIF_NEED_FPU_LOAD))
- __fpregs_load_activate();
+ fpregs_restore_userregs();
pagefault_disable();
ret = copy_fpregs_to_sigframe(buf_fx);
@@ -211,270 +220,202 @@ retry:
return 0;
}
-static inline void
-sanitize_restored_user_xstate(union fpregs_state *state,
- struct user_i387_ia32_struct *ia32_env,
- u64 user_xfeatures, int fx_only)
+static int __restore_fpregs_from_user(void __user *buf, u64 xrestore,
+ bool fx_only)
{
- struct xregs_state *xsave = &state->xsave;
- struct xstate_header *header = &xsave->header;
-
if (use_xsave()) {
- /*
- * Clear all feature bits which are not set in
- * user_xfeatures and clear all extended features
- * for fx_only mode.
- */
- u64 mask = fx_only ? XFEATURE_MASK_FPSSE : user_xfeatures;
+ u64 init_bv = xfeatures_mask_uabi() & ~xrestore;
+ int ret;
- /*
- * Supervisor state has to be preserved. The sigframe
- * restore can only modify user features, i.e. @mask
- * cannot contain them.
- */
- header->xfeatures &= mask | xfeatures_mask_supervisor();
- }
-
- if (use_fxsr()) {
- /*
- * mscsr reserved bits must be masked to zero for security
- * reasons.
- */
- xsave->i387.mxcsr &= mxcsr_feature_mask;
+ if (likely(!fx_only))
+ ret = xrstor_from_user_sigframe(buf, xrestore);
+ else
+ ret = fxrstor_from_user_sigframe(buf);
- if (ia32_env)
- convert_to_fxsr(&state->fxsave, ia32_env);
+ if (!ret && unlikely(init_bv))
+ os_xrstor(&init_fpstate.xsave, init_bv);
+ return ret;
+ } else if (use_fxsr()) {
+ return fxrstor_from_user_sigframe(buf);
+ } else {
+ return frstor_from_user_sigframe(buf);
}
}
/*
- * Restore the extended state if present. Otherwise, restore the FP/SSE state.
+ * Attempt to restore the FPU registers directly from user memory.
+ * Pagefaults are handled and any errors returned are fatal.
*/
-static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
+static int restore_fpregs_from_user(void __user *buf, u64 xrestore,
+ bool fx_only, unsigned int size)
{
- u64 init_bv;
- int r;
+ struct fpu *fpu = &current->thread.fpu;
+ int ret;
- if (use_xsave()) {
- if (fx_only) {
- init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
-
- r = copy_user_to_fxregs(buf);
- if (!r)
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- return r;
- } else {
- init_bv = xfeatures_mask_user() & ~xbv;
-
- r = copy_user_to_xregs(buf, xbv);
- if (!r && unlikely(init_bv))
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- return r;
- }
- } else if (use_fxsr()) {
- return copy_user_to_fxregs(buf);
- } else
- return copy_user_to_fregs(buf);
+retry:
+ fpregs_lock();
+ pagefault_disable();
+ ret = __restore_fpregs_from_user(buf, xrestore, fx_only);
+ pagefault_enable();
+
+ if (unlikely(ret)) {
+ /*
+ * The above did an FPU restore operation, restricted to
+ * the user portion of the registers, and failed, but the
+ * microcode might have modified the FPU registers
+ * nevertheless.
+ *
+ * If the FPU registers do not belong to current, then
+ * invalidate the FPU register state otherwise the task
+ * might preempt current and return to user space with
+ * corrupted FPU registers.
+ */
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ __cpu_invalidate_fpregs_state();
+ fpregs_unlock();
+
+ /* Try to handle #PF, but anything else is fatal. */
+ if (ret != -EFAULT)
+ return -EINVAL;
+
+ ret = fault_in_pages_readable(buf, size);
+ if (!ret)
+ goto retry;
+ return ret;
+ }
+
+ /*
+ * Restore supervisor states: previous context switch etc has done
+ * XSAVES and saved the supervisor states in the kernel buffer from
+ * which they can be restored now.
+ *
+ * It would be optimal to handle this with a single XRSTORS, but
+ * this does not work because the rest of the FPU registers have
+ * been restored from a user buffer directly.
+ */
+ if (test_thread_flag(TIF_NEED_FPU_LOAD) && xfeatures_mask_supervisor())
+ os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor());
+
+ fpregs_mark_activate();
+ fpregs_unlock();
+ return 0;
}
-static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
+ bool ia32_fxstate)
{
- struct user_i387_ia32_struct *envp = NULL;
int state_size = fpu_kernel_xstate_size;
- int ia32_fxstate = (buf != buf_fx);
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env;
u64 user_xfeatures = 0;
- int fx_only = 0;
- int ret = 0;
-
- ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
- IS_ENABLED(CONFIG_IA32_EMULATION));
-
- if (!buf) {
- fpu__clear_user_states(fpu);
- return 0;
- }
-
- if (!access_ok(buf, size)) {
- ret = -EACCES;
- goto out;
- }
-
- if (!static_cpu_has(X86_FEATURE_FPU)) {
- ret = fpregs_soft_set(current, NULL, 0,
- sizeof(struct user_i387_ia32_struct),
- NULL, buf);
- goto out;
- }
+ bool fx_only = false;
+ int ret;
if (use_xsave()) {
struct _fpx_sw_bytes fx_sw_user;
- if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
- /*
- * Couldn't find the extended state information in the
- * memory layout. Restore just the FP/SSE and init all
- * the other extended state.
- */
- state_size = sizeof(struct fxregs_state);
- fx_only = 1;
- trace_x86_fpu_xstate_check_failed(fpu);
- } else {
- state_size = fx_sw_user.xstate_size;
- user_xfeatures = fx_sw_user.xfeatures;
- }
- }
- if ((unsigned long)buf_fx % 64)
- fx_only = 1;
+ ret = check_xstate_in_sigframe(buf_fx, &fx_sw_user);
+ if (unlikely(ret))
+ return ret;
- if (!ia32_fxstate) {
- /*
- * Attempt to restore the FPU registers directly from user
- * memory. For that to succeed, the user access cannot cause
- * page faults. If it does, fall back to the slow path below,
- * going through the kernel buffer with the enabled pagefault
- * handler.
- */
- fpregs_lock();
- pagefault_disable();
- ret = copy_user_to_fpregs_zeroing(buf_fx, user_xfeatures, fx_only);
- pagefault_enable();
- if (!ret) {
-
- /*
- * Restore supervisor states: previous context switch
- * etc has done XSAVES and saved the supervisor states
- * in the kernel buffer from which they can be restored
- * now.
- *
- * We cannot do a single XRSTORS here - which would
- * be nice - because the rest of the FPU registers are
- * being restored from a user buffer directly. The
- * single XRSTORS happens below, when the user buffer
- * has been copied to the kernel one.
- */
- if (test_thread_flag(TIF_NEED_FPU_LOAD) &&
- xfeatures_mask_supervisor())
- copy_kernel_to_xregs(&fpu->state.xsave,
- xfeatures_mask_supervisor());
- fpregs_mark_activate();
- fpregs_unlock();
- return 0;
- }
-
- /*
- * The above did an FPU restore operation, restricted to
- * the user portion of the registers, and failed, but the
- * microcode might have modified the FPU registers
- * nevertheless.
- *
- * If the FPU registers do not belong to current, then
- * invalidate the FPU register state otherwise the task might
- * preempt current and return to user space with corrupted
- * FPU registers.
- *
- * In case current owns the FPU registers then no further
- * action is required. The fixup below will handle it
- * correctly.
- */
- if (test_thread_flag(TIF_NEED_FPU_LOAD))
- __cpu_invalidate_fpregs_state();
-
- fpregs_unlock();
+ fx_only = !fx_sw_user.magic1;
+ state_size = fx_sw_user.xstate_size;
+ user_xfeatures = fx_sw_user.xfeatures;
} else {
+ user_xfeatures = XFEATURE_MASK_FPSSE;
+ }
+
+ if (likely(!ia32_fxstate)) {
/*
- * For 32-bit frames with fxstate, copy the fxstate so it can
- * be reconstructed later.
+ * Attempt to restore the FPU registers directly from user
+ * memory. For that to succeed, the user access cannot cause page
+ * faults. If it does, fall back to the slow path below, going
+ * through the kernel buffer with the enabled pagefault handler.
*/
- ret = __copy_from_user(&env, buf, sizeof(env));
- if (ret)
- goto out;
- envp = &env;
+ return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
+ state_size);
}
/*
+ * Copy the legacy state because the FP portion of the FX frame has
+ * to be ignored for histerical raisins. The legacy state is folded
+ * in once the larger state has been copied.
+ */
+ ret = __copy_from_user(&env, buf, sizeof(env));
+ if (ret)
+ return ret;
+
+ /*
* By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is
* not modified on context switch and that the xstate is considered
* to be loaded again on return to userland (overriding last_cpu avoids
* the optimisation).
*/
fpregs_lock();
-
if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
-
/*
- * Supervisor states are not modified by user space input. Save
- * current supervisor states first and invalidate the FPU regs.
+ * If supervisor states are available then save the
+ * hardware state in current's fpstate so that the
+ * supervisor state is preserved. Save the full state for
+ * simplicity. There is no point in optimizing this by only
+ * saving the supervisor states and then shuffle them to
+ * the right place in memory. It's ia32 mode. Shrug.
*/
if (xfeatures_mask_supervisor())
- copy_supervisor_to_kernel(&fpu->state.xsave);
+ os_xsave(&fpu->state.xsave);
set_thread_flag(TIF_NEED_FPU_LOAD);
}
__fpu_invalidate_fpregs_state(fpu);
+ __cpu_invalidate_fpregs_state();
fpregs_unlock();
if (use_xsave() && !fx_only) {
- u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
-
- ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
+ ret = copy_sigframe_from_user_to_xstate(&fpu->state.xsave, buf_fx);
if (ret)
- goto out;
+ return ret;
+ } else {
+ if (__copy_from_user(&fpu->state.fxsave, buf_fx,
+ sizeof(fpu->state.fxsave)))
+ return -EFAULT;
- sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
- fx_only);
+ /* Reject invalid MXCSR values. */
+ if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
+ return -EINVAL;
- fpregs_lock();
- if (unlikely(init_bv))
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+ /* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
+ if (use_xsave())
+ fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
+ }
+ /* Fold the legacy FP storage */
+ convert_to_fxsr(&fpu->state.fxsave, &env);
+
+ fpregs_lock();
+ if (use_xsave()) {
/*
- * Restore previously saved supervisor xstates along with
- * copied-in user xstates.
+ * Remove all UABI feature bits not set in user_xfeatures
+ * from the memory xstate header which makes the full
+ * restore below bring them into init state. This works for
+ * fx_only mode as well because that has only FP and SSE
+ * set in user_xfeatures.
+ *
+ * Preserve supervisor states!
*/
- ret = copy_kernel_to_xregs_err(&fpu->state.xsave,
- user_xfeatures | xfeatures_mask_supervisor());
+ u64 mask = user_xfeatures | xfeatures_mask_supervisor();
- } else if (use_fxsr()) {
- ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
- if (ret) {
- ret = -EFAULT;
- goto out;
- }
-
- sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
- fx_only);
-
- fpregs_lock();
- if (use_xsave()) {
- u64 init_bv;
-
- init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- }
-
- ret = copy_kernel_to_fxregs_err(&fpu->state.fxsave);
+ fpu->state.xsave.header.xfeatures &= mask;
+ ret = os_xrstor_safe(&fpu->state.xsave, xfeatures_mask_all);
} else {
- ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
- if (ret)
- goto out;
-
- fpregs_lock();
- ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
+ ret = fxrstor_safe(&fpu->state.fxsave);
}
- if (!ret)
+
+ if (likely(!ret))
fpregs_mark_activate();
- else
- fpregs_deactivate(fpu);
- fpregs_unlock();
-out:
- if (ret)
- fpu__clear_user_states(fpu);
+ fpregs_unlock();
return ret;
}
-
static inline int xstate_sigframe_size(void)
{
return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE :
@@ -486,15 +427,47 @@ static inline int xstate_sigframe_size(void)
*/
int fpu__restore_sig(void __user *buf, int ia32_frame)
{
+ unsigned int size = xstate_sigframe_size();
+ struct fpu *fpu = &current->thread.fpu;
void __user *buf_fx = buf;
- int size = xstate_sigframe_size();
+ bool ia32_fxstate = false;
+ int ret;
+ if (unlikely(!buf)) {
+ fpu__clear_user_states(fpu);
+ return 0;
+ }
+
+ ia32_frame &= (IS_ENABLED(CONFIG_X86_32) ||
+ IS_ENABLED(CONFIG_IA32_EMULATION));
+
+ /*
+ * Only FXSR enabled systems need the FX state quirk.
+ * FRSTOR does not need it and can use the fast path.
+ */
if (ia32_frame && use_fxsr()) {
buf_fx = buf + sizeof(struct fregs_state);
size += sizeof(struct fregs_state);
+ ia32_fxstate = true;
+ }
+
+ if (!access_ok(buf, size)) {
+ ret = -EACCES;
+ goto out;
+ }
+
+ if (!IS_ENABLED(CONFIG_X86_64) && !cpu_feature_enabled(X86_FEATURE_FPU)) {
+ ret = fpregs_soft_set(current, NULL, 0,
+ sizeof(struct user_i387_ia32_struct),
+ NULL, buf);
+ } else {
+ ret = __fpu_restore_sig(buf, buf_fx, ia32_fxstate);
}
- return __fpu__restore_sig(buf, buf_fx, size);
+out:
+ if (unlikely(ret))
+ fpu__clear_user_states(fpu);
+ return ret;
}
unsigned long
@@ -513,6 +486,25 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
return sp;
}
+
+unsigned long fpu__get_fpstate_size(void)
+{
+ unsigned long ret = xstate_sigframe_size();
+
+ /*
+ * This space is needed on (most) 32-bit kernels, or when a 32-bit
+ * app is running on a 64-bit kernel. To keep things simple, just
+ * assume the worst case and always include space for 'freg_state',
+ * even for 64-bit apps on 64-bit kernels. This wastes a bit of
+ * space, but keeps the code simple.
+ */
+ if ((IS_ENABLED(CONFIG_IA32_EMULATION) ||
+ IS_ENABLED(CONFIG_X86_32)) && use_fxsr())
+ ret += sizeof(struct fregs_state);
+
+ return ret;
+}
+
/*
* Prepare the SW reserved portion of the fxsave memory layout, indicating
* the presence of the extended state information in the memory layout
@@ -526,7 +518,7 @@ void fpu__init_prepare_fx_sw_frame(void)
fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
fx_sw_reserved.extended_size = size;
- fx_sw_reserved.xfeatures = xfeatures_mask_user();
+ fx_sw_reserved.xfeatures = xfeatures_mask_uabi();
fx_sw_reserved.xstate_size = fpu_user_xstate_size;
if (IS_ENABLED(CONFIG_IA32_EMULATION) ||
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 1cadb2faf740..c8def1b7f8fb 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -59,19 +59,24 @@ static short xsave_cpuid_features[] __initdata = {
* This represents the full set of bits that should ever be set in a kernel
* XSAVE buffer, both supervisor and user xstates.
*/
-u64 xfeatures_mask_all __read_mostly;
-
-static unsigned int xstate_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
-static unsigned int xstate_sizes[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
-static unsigned int xstate_comp_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
-static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
+u64 xfeatures_mask_all __ro_after_init;
+EXPORT_SYMBOL_GPL(xfeatures_mask_all);
+
+static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init =
+ { [ 0 ... XFEATURE_MAX - 1] = -1};
+static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init =
+ { [ 0 ... XFEATURE_MAX - 1] = -1};
+static unsigned int xstate_comp_offsets[XFEATURE_MAX] __ro_after_init =
+ { [ 0 ... XFEATURE_MAX - 1] = -1};
+static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] __ro_after_init =
+ { [ 0 ... XFEATURE_MAX - 1] = -1};
/*
* The XSAVE area of kernel can be in standard or compacted format;
* it is always in standard format for user mode. This is the user
* mode standard format size used for signal and ptrace frames.
*/
-unsigned int fpu_user_xstate_size;
+unsigned int fpu_user_xstate_size __ro_after_init;
/*
* Return whether the system supports a given xfeature.
@@ -125,103 +130,13 @@ static bool xfeature_is_supervisor(int xfeature_nr)
}
/*
- * When executing XSAVEOPT (or other optimized XSAVE instructions), if
- * a processor implementation detects that an FPU state component is still
- * (or is again) in its initialized state, it may clear the corresponding
- * bit in the header.xfeatures field, and can skip the writeout of registers
- * to the corresponding memory layout.
- *
- * This means that when the bit is zero, the state component might still contain
- * some previous - non-initialized register state.
- *
- * Before writing xstate information to user-space we sanitize those components,
- * to always ensure that the memory layout of a feature will be in the init state
- * if the corresponding header bit is zero. This is to ensure that user-space doesn't
- * see some stale state in the memory layout during signal handling, debugging etc.
- */
-void fpstate_sanitize_xstate(struct fpu *fpu)
-{
- struct fxregs_state *fx = &fpu->state.fxsave;
- int feature_bit;
- u64 xfeatures;
-
- if (!use_xsaveopt())
- return;
-
- xfeatures = fpu->state.xsave.header.xfeatures;
-
- /*
- * None of the feature bits are in init state. So nothing else
- * to do for us, as the memory layout is up to date.
- */
- if ((xfeatures & xfeatures_mask_all) == xfeatures_mask_all)
- return;
-
- /*
- * FP is in init state
- */
- if (!(xfeatures & XFEATURE_MASK_FP)) {
- fx->cwd = 0x37f;
- fx->swd = 0;
- fx->twd = 0;
- fx->fop = 0;
- fx->rip = 0;
- fx->rdp = 0;
- memset(fx->st_space, 0, sizeof(fx->st_space));
- }
-
- /*
- * SSE is in init state
- */
- if (!(xfeatures & XFEATURE_MASK_SSE))
- memset(fx->xmm_space, 0, sizeof(fx->xmm_space));
-
- /*
- * First two features are FPU and SSE, which above we handled
- * in a special way already:
- */
- feature_bit = 0x2;
- xfeatures = (xfeatures_mask_user() & ~xfeatures) >> 2;
-
- /*
- * Update all the remaining memory layouts according to their
- * standard xstate layout, if their header bit is in the init
- * state:
- */
- while (xfeatures) {
- if (xfeatures & 0x1) {
- int offset = xstate_comp_offsets[feature_bit];
- int size = xstate_sizes[feature_bit];
-
- memcpy((void *)fx + offset,
- (void *)&init_fpstate.xsave + offset,
- size);
- }
-
- xfeatures >>= 1;
- feature_bit++;
- }
-}
-
-/*
* Enable the extended processor state save/restore feature.
* Called once per CPU onlining.
*/
void fpu__init_cpu_xstate(void)
{
- u64 unsup_bits;
-
if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask_all)
return;
- /*
- * Unsupported supervisor xstates should not be found in
- * the xfeatures mask.
- */
- unsup_bits = xfeatures_mask_all & XFEATURE_MASK_SUPERVISOR_UNSUPPORTED;
- WARN_ONCE(unsup_bits, "x86/fpu: Found unsupported supervisor xstates: 0x%llx\n",
- unsup_bits);
-
- xfeatures_mask_all &= ~XFEATURE_MASK_SUPERVISOR_UNSUPPORTED;
cr4_set_bits(X86_CR4_OSXSAVE);
@@ -230,14 +145,14 @@ void fpu__init_cpu_xstate(void)
* managed by XSAVE{C, OPT, S} and XRSTOR{S}. Only XSAVE user
* states can be set here.
*/
- xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_user());
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_uabi());
/*
* MSR_IA32_XSS sets supervisor states managed by XSAVES.
*/
if (boot_cpu_has(X86_FEATURE_XSAVES)) {
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
- xfeatures_mask_dynamic());
+ xfeatures_mask_independent());
}
}
@@ -486,7 +401,7 @@ static void __init setup_init_fpu_buf(void)
/*
* Init all the features state with header.xfeatures being 0x0
*/
- copy_kernel_to_xregs_booting(&init_fpstate.xsave);
+ os_xrstor_booting(&init_fpstate.xsave);
/*
* All components are now in init state. Read the state back so
@@ -535,25 +450,11 @@ int xfeature_size(int xfeature_nr)
return eax;
}
-/*
- * 'XSAVES' implies two different things:
- * 1. saving of supervisor/system state
- * 2. using the compacted format
- *
- * Use this function when dealing with the compacted format so
- * that it is obvious which aspect of 'XSAVES' is being handled
- * by the calling code.
- */
-int using_compacted_format(void)
-{
- return boot_cpu_has(X86_FEATURE_XSAVES);
-}
-
/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
-int validate_user_xstate_header(const struct xstate_header *hdr)
+static int validate_user_xstate_header(const struct xstate_header *hdr)
{
/* No unknown or supervisor features may be set */
- if (hdr->xfeatures & ~xfeatures_mask_user())
+ if (hdr->xfeatures & ~xfeatures_mask_uabi())
return -EINVAL;
/* Userspace must use the uncompacted format */
@@ -651,7 +552,7 @@ static void check_xstate_against_struct(int nr)
* how large the XSAVE buffer needs to be. We are recalculating
* it to be safe.
*
- * Dynamic XSAVE features allocate their own buffers and are not
+ * Independent XSAVE features allocate their own buffers and are not
* covered by these checks. Only the size of the buffer for task->fpu
* is checked here.
*/
@@ -667,9 +568,9 @@ static void do_extra_xstate_size_checks(void)
check_xstate_against_struct(i);
/*
* Supervisor state components can be managed only by
- * XSAVES, which is compacted-format only.
+ * XSAVES.
*/
- if (!using_compacted_format())
+ if (!cpu_feature_enabled(X86_FEATURE_XSAVES))
XSTATE_WARN_ON(xfeature_is_supervisor(i));
/* Align from the end of the previous feature */
@@ -679,9 +580,9 @@ static void do_extra_xstate_size_checks(void)
* The offset of a given state in the non-compacted
* format is given to us in a CPUID leaf. We check
* them for being ordered (increasing offsets) in
- * setup_xstate_features().
+ * setup_xstate_features(). XSAVES uses compacted format.
*/
- if (!using_compacted_format())
+ if (!cpu_feature_enabled(X86_FEATURE_XSAVES))
paranoid_xstate_size = xfeature_uncompacted_offset(i);
/*
* The compacted-format offset always depends on where
@@ -717,18 +618,18 @@ static unsigned int __init get_xsaves_size(void)
}
/*
- * Get the total size of the enabled xstates without the dynamic supervisor
+ * Get the total size of the enabled xstates without the independent supervisor
* features.
*/
-static unsigned int __init get_xsaves_size_no_dynamic(void)
+static unsigned int __init get_xsaves_size_no_independent(void)
{
- u64 mask = xfeatures_mask_dynamic();
+ u64 mask = xfeatures_mask_independent();
unsigned int size;
if (!mask)
return get_xsaves_size();
- /* Disable dynamic features. */
+ /* Disable independent features. */
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
/*
@@ -737,7 +638,7 @@ static unsigned int __init get_xsaves_size_no_dynamic(void)
*/
size = get_xsaves_size();
- /* Re-enable dynamic features so XSAVES will work on them again. */
+ /* Re-enable independent features so XSAVES will work on them again. */
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
return size;
@@ -780,7 +681,7 @@ static int __init init_xstate_size(void)
xsave_size = get_xsave_size();
if (boot_cpu_has(X86_FEATURE_XSAVES))
- possible_xstate_size = get_xsaves_size_no_dynamic();
+ possible_xstate_size = get_xsaves_size_no_independent();
else
possible_xstate_size = xsave_size;
@@ -821,6 +722,7 @@ void __init fpu__init_system_xstate(void)
{
unsigned int eax, ebx, ecx, edx;
static int on_boot_cpu __initdata = 1;
+ u64 xfeatures;
int err;
int i;
@@ -855,7 +757,7 @@ void __init fpu__init_system_xstate(void)
cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
xfeatures_mask_all |= ecx + ((u64)edx << 32);
- if ((xfeatures_mask_user() & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
+ if ((xfeatures_mask_uabi() & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
/*
* This indicates that something really unexpected happened
* with the enumeration. Disable XSAVE and try to continue
@@ -874,7 +776,11 @@ void __init fpu__init_system_xstate(void)
xfeatures_mask_all &= ~BIT_ULL(i);
}
- xfeatures_mask_all &= fpu__get_supported_xfeatures_mask();
+ xfeatures_mask_all &= XFEATURE_MASK_USER_SUPPORTED |
+ XFEATURE_MASK_SUPERVISOR_SUPPORTED;
+
+ /* Store it for paranoia check at the end */
+ xfeatures = xfeatures_mask_all;
/* Enable xstate instructions to be able to continue with initialization: */
fpu__init_cpu_xstate();
@@ -886,14 +792,24 @@ void __init fpu__init_system_xstate(void)
* Update info used for ptrace frames; use standard-format size and no
* supervisor xstates:
*/
- update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask_user());
+ update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask_uabi());
fpu__init_prepare_fx_sw_frame();
setup_init_fpu_buf();
setup_xstate_comp_offsets();
setup_supervisor_only_offsets();
- print_xstate_offset_size();
+ /*
+ * Paranoia check whether something in the setup modified the
+ * xfeatures mask.
+ */
+ if (xfeatures != xfeatures_mask_all) {
+ pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init, disabling XSAVE\n",
+ xfeatures, xfeatures_mask_all);
+ goto out_disable;
+ }
+
+ print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
xfeatures_mask_all,
fpu_kernel_xstate_size,
@@ -913,16 +829,16 @@ void fpu__resume_cpu(void)
/*
* Restore XCR0 on xsave capable CPUs:
*/
- if (boot_cpu_has(X86_FEATURE_XSAVE))
- xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_user());
+ if (cpu_feature_enabled(X86_FEATURE_XSAVE))
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_uabi());
/*
* Restore IA32_XSS. The same CPUID bit enumerates support
* of XSAVES and MSR_IA32_XSS.
*/
- if (boot_cpu_has(X86_FEATURE_XSAVES)) {
+ if (cpu_feature_enabled(X86_FEATURE_XSAVES)) {
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
- xfeatures_mask_dynamic());
+ xfeatures_mask_independent());
}
}
@@ -990,36 +906,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
}
EXPORT_SYMBOL_GPL(get_xsave_addr);
-/*
- * This wraps up the common operations that need to occur when retrieving
- * data from xsave state. It first ensures that the current task was
- * using the FPU and retrieves the data in to a buffer. It then calculates
- * the offset of the requested field in the buffer.
- *
- * This function is safe to call whether the FPU is in use or not.
- *
- * Note that this only works on the current task.
- *
- * Inputs:
- * @xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
- * XFEATURE_SSE, etc...)
- * Output:
- * address of the state in the xsave area or NULL if the state
- * is not present or is in its 'init state'.
- */
-const void *get_xsave_field_ptr(int xfeature_nr)
-{
- struct fpu *fpu = &current->thread.fpu;
-
- /*
- * fpu__save() takes the CPU's xstate registers
- * and saves them off to the 'fpu memory buffer.
- */
- fpu__save(fpu);
-
- return get_xsave_addr(&fpu->state.xsave, xfeature_nr);
-}
-
#ifdef CONFIG_ARCH_HAS_PKEYS
/*
@@ -1027,17 +913,16 @@ const void *get_xsave_field_ptr(int xfeature_nr)
* rights for @pkey to @init_val.
*/
int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
- unsigned long init_val)
+ unsigned long init_val)
{
- u32 old_pkru;
- int pkey_shift = (pkey * PKRU_BITS_PER_PKEY);
- u32 new_pkru_bits = 0;
+ u32 old_pkru, new_pkru_bits = 0;
+ int pkey_shift;
/*
* This check implies XSAVE support. OSPKE only gets
* set if we enable XSAVE and we enable PKU in XCR0.
*/
- if (!boot_cpu_has(X86_FEATURE_OSPKE))
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return -EINVAL;
/*
@@ -1045,7 +930,8 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
* values originating from in-kernel users. Complain
* if a bad value is observed.
*/
- WARN_ON_ONCE(pkey >= arch_max_pkey());
+ if (WARN_ON_ONCE(pkey >= arch_max_pkey()))
+ return -EINVAL;
/* Set the bits we need in PKRU: */
if (init_val & PKEY_DISABLE_ACCESS)
@@ -1054,6 +940,7 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
new_pkru_bits |= PKRU_WD_BIT;
/* Shift the bits in to the correct place in PKRU for pkey: */
+ pkey_shift = pkey * PKRU_BITS_PER_PKEY;
new_pkru_bits <<= pkey_shift;
/* Get old PKRU and mask off any old bits in place: */
@@ -1067,170 +954,178 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
}
#endif /* ! CONFIG_ARCH_HAS_PKEYS */
-/*
- * Weird legacy quirk: SSE and YMM states store information in the
- * MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP
- * area is marked as unused in the xfeatures header, we need to copy
- * MXCSR and MXCSR_FLAGS if either SSE or YMM are in use.
- */
-static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
+static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
+ void *init_xstate, unsigned int size)
{
- if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM)))
- return false;
-
- if (xfeatures & XFEATURE_MASK_FP)
- return false;
-
- return true;
-}
-
-static void fill_gap(struct membuf *to, unsigned *last, unsigned offset)
-{
- if (*last >= offset)
- return;
- membuf_write(to, (void *)&init_fpstate.xsave + *last, offset - *last);
- *last = offset;
-}
-
-static void copy_part(struct membuf *to, unsigned *last, unsigned offset,
- unsigned size, void *from)
-{
- fill_gap(to, last, offset);
- membuf_write(to, from, size);
- *last = offset + size;
+ membuf_write(to, from_xstate ? xstate : init_xstate, size);
}
-/*
- * Convert from kernel XSAVES compacted format to standard format and copy
- * to a kernel-space ptrace buffer.
+/**
+ * copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
+ * @to: membuf descriptor
+ * @tsk: The task from which to copy the saved xstate
+ * @copy_mode: The requested copy mode
+ *
+ * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming
+ * format, i.e. from the kernel internal hardware dependent storage format
+ * to the requested @mode. UABI XSTATE is always uncompacted!
*
- * It supports partial copy but pos always starts from zero. This is called
- * from xstateregs_get() and there we check the CPU has XSAVES.
+ * It supports partial copy but @to.pos always starts from zero.
*/
-void copy_xstate_to_kernel(struct membuf to, struct xregs_state *xsave)
+void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
+ enum xstate_copy_mode copy_mode)
{
+ const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
+ struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
+ struct xregs_state *xinit = &init_fpstate.xsave;
struct xstate_header header;
- const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr);
- unsigned size = to.left;
- unsigned last = 0;
+ unsigned int zerofrom;
int i;
- /*
- * The destination is a ptrace buffer; we put in only user xstates:
- */
memset(&header, 0, sizeof(header));
header.xfeatures = xsave->header.xfeatures;
- header.xfeatures &= xfeatures_mask_user();
-
- if (header.xfeatures & XFEATURE_MASK_FP)
- copy_part(&to, &last, 0, off_mxcsr, &xsave->i387);
- if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM))
- copy_part(&to, &last, off_mxcsr,
- MXCSR_AND_FLAGS_SIZE, &xsave->i387.mxcsr);
- if (header.xfeatures & XFEATURE_MASK_FP)
- copy_part(&to, &last, offsetof(struct fxregs_state, st_space),
- 128, &xsave->i387.st_space);
- if (header.xfeatures & XFEATURE_MASK_SSE)
- copy_part(&to, &last, xstate_offsets[XFEATURE_SSE],
- 256, &xsave->i387.xmm_space);
- /*
- * Fill xsave->i387.sw_reserved value for ptrace frame:
- */
- copy_part(&to, &last, offsetof(struct fxregs_state, sw_reserved),
- 48, xstate_fx_sw_bytes);
- /*
- * Copy xregs_state->header:
- */
- copy_part(&to, &last, offsetof(struct xregs_state, header),
- sizeof(header), &header);
- for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
- /*
- * Copy only in-use xstates:
- */
- if ((header.xfeatures >> i) & 1) {
- void *src = __raw_xsave_addr(xsave, i);
+ /* Mask out the feature bits depending on copy mode */
+ switch (copy_mode) {
+ case XSTATE_COPY_FP:
+ header.xfeatures &= XFEATURE_MASK_FP;
+ break;
- copy_part(&to, &last, xstate_offsets[i],
- xstate_sizes[i], src);
- }
+ case XSTATE_COPY_FX:
+ header.xfeatures &= XFEATURE_MASK_FP | XFEATURE_MASK_SSE;
+ break;
+ case XSTATE_COPY_XSAVE:
+ header.xfeatures &= xfeatures_mask_uabi();
+ break;
}
- fill_gap(&to, &last, size);
-}
-/*
- * Convert from a ptrace standard-format kernel buffer to kernel XSAVES format
- * and copy to the target thread. This is called from xstateregs_set().
- */
-int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
-{
- unsigned int offset, size;
- int i;
- struct xstate_header hdr;
+ /* Copy FP state up to MXCSR */
+ copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387,
+ &xinit->i387, off_mxcsr);
- offset = offsetof(struct xregs_state, header);
- size = sizeof(hdr);
+ /* Copy MXCSR when SSE or YMM are set in the feature mask */
+ copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM),
+ &to, &xsave->i387.mxcsr, &xinit->i387.mxcsr,
+ MXCSR_AND_FLAGS_SIZE);
- memcpy(&hdr, kbuf + offset, size);
+ /* Copy the remaining FP state */
+ copy_feature(header.xfeatures & XFEATURE_MASK_FP,
+ &to, &xsave->i387.st_space, &xinit->i387.st_space,
+ sizeof(xsave->i387.st_space));
- if (validate_user_xstate_header(&hdr))
- return -EINVAL;
+ /* Copy the SSE state - shared with YMM, but independently managed */
+ copy_feature(header.xfeatures & XFEATURE_MASK_SSE,
+ &to, &xsave->i387.xmm_space, &xinit->i387.xmm_space,
+ sizeof(xsave->i387.xmm_space));
- for (i = 0; i < XFEATURE_MAX; i++) {
- u64 mask = ((u64)1 << i);
+ if (copy_mode != XSTATE_COPY_XSAVE)
+ goto out;
- if (hdr.xfeatures & mask) {
- void *dst = __raw_xsave_addr(xsave, i);
+ /* Zero the padding area */
+ membuf_zero(&to, sizeof(xsave->i387.padding));
- offset = xstate_offsets[i];
- size = xstate_sizes[i];
+ /* Copy xsave->i387.sw_reserved */
+ membuf_write(&to, xstate_fx_sw_bytes, sizeof(xsave->i387.sw_reserved));
- memcpy(dst, kbuf + offset, size);
- }
- }
+ /* Copy the user space relevant state of @xsave->header */
+ membuf_write(&to, &header, sizeof(header));
- if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
- offset = offsetof(struct fxregs_state, mxcsr);
- size = MXCSR_AND_FLAGS_SIZE;
- memcpy(&xsave->i387.mxcsr, kbuf + offset, size);
- }
+ zerofrom = offsetof(struct xregs_state, extended_state_area);
- /*
- * The state that came in from userspace was user-state only.
- * Mask all the user states out of 'xfeatures':
- */
- xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL;
+ for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
+ /*
+ * The ptrace buffer is in non-compacted XSAVE format.
+ * In non-compacted format disabled features still occupy
+ * state space, but there is no state to copy from in the
+ * compacted init_fpstate. The gap tracking will zero this
+ * later.
+ */
+ if (!(xfeatures_mask_uabi() & BIT_ULL(i)))
+ continue;
- /*
- * Add back in the features that came in from userspace:
- */
- xsave->header.xfeatures |= hdr.xfeatures;
+ /*
+ * If there was a feature or alignment gap, zero the space
+ * in the destination buffer.
+ */
+ if (zerofrom < xstate_offsets[i])
+ membuf_zero(&to, xstate_offsets[i] - zerofrom);
+
+ if (i == XFEATURE_PKRU) {
+ struct pkru_state pkru = {0};
+ /*
+ * PKRU is not necessarily up to date in the
+ * thread's XSAVE buffer. Fill this part from the
+ * per-thread storage.
+ */
+ pkru.pkru = tsk->thread.pkru;
+ membuf_write(&to, &pkru, sizeof(pkru));
+ } else {
+ copy_feature(header.xfeatures & BIT_ULL(i), &to,
+ __raw_xsave_addr(xsave, i),
+ __raw_xsave_addr(xinit, i),
+ xstate_sizes[i]);
+ }
+ /*
+ * Keep track of the last copied state in the non-compacted
+ * target buffer for gap zeroing.
+ */
+ zerofrom = xstate_offsets[i] + xstate_sizes[i];
+ }
+
+out:
+ if (to.left)
+ membuf_zero(&to, to.left);
+}
+static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size,
+ const void *kbuf, const void __user *ubuf)
+{
+ if (kbuf) {
+ memcpy(dst, kbuf + offset, size);
+ } else {
+ if (copy_from_user(dst, ubuf + offset, size))
+ return -EFAULT;
+ }
return 0;
}
-/*
- * Convert from a ptrace or sigreturn standard-format user-space buffer to
- * kernel XSAVES format and copy to the target thread. This is called from
- * xstateregs_set(), as well as potentially from the sigreturn() and
- * rt_sigreturn() system calls.
- */
-int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
+
+static int copy_uabi_to_xstate(struct xregs_state *xsave, const void *kbuf,
+ const void __user *ubuf)
{
unsigned int offset, size;
- int i;
struct xstate_header hdr;
+ u64 mask;
+ int i;
offset = offsetof(struct xregs_state, header);
- size = sizeof(hdr);
-
- if (__copy_from_user(&hdr, ubuf + offset, size))
+ if (copy_from_buffer(&hdr, offset, sizeof(hdr), kbuf, ubuf))
return -EFAULT;
if (validate_user_xstate_header(&hdr))
return -EINVAL;
+ /* Validate MXCSR when any of the related features is in use */
+ mask = XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM;
+ if (hdr.xfeatures & mask) {
+ u32 mxcsr[2];
+
+ offset = offsetof(struct fxregs_state, mxcsr);
+ if (copy_from_buffer(mxcsr, offset, sizeof(mxcsr), kbuf, ubuf))
+ return -EFAULT;
+
+ /* Reserved bits in MXCSR must be zero. */
+ if (mxcsr[0] & ~mxcsr_feature_mask)
+ return -EINVAL;
+
+ /* SSE and YMM require MXCSR even when FP is not in use. */
+ if (!(hdr.xfeatures & XFEATURE_MASK_FP)) {
+ xsave->i387.mxcsr = mxcsr[0];
+ xsave->i387.mxcsr_mask = mxcsr[1];
+ }
+ }
+
for (i = 0; i < XFEATURE_MAX; i++) {
u64 mask = ((u64)1 << i);
@@ -1240,18 +1135,11 @@ int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
offset = xstate_offsets[i];
size = xstate_sizes[i];
- if (__copy_from_user(dst, ubuf + offset, size))
+ if (copy_from_buffer(dst, offset, size, kbuf, ubuf))
return -EFAULT;
}
}
- if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
- offset = offsetof(struct fxregs_state, mxcsr);
- size = MXCSR_AND_FLAGS_SIZE;
- if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size))
- return -EFAULT;
- }
-
/*
* The state that came in from userspace was user-state only.
* Mask all the user states out of 'xfeatures':
@@ -1267,130 +1155,94 @@ int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
}
/*
- * Save only supervisor states to the kernel buffer. This blows away all
- * old states, and is intended to be used only in __fpu__restore_sig(), where
- * user states are restored from the user buffer.
+ * Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S]
+ * format and copy to the target thread. This is called from
+ * xstateregs_set().
*/
-void copy_supervisor_to_kernel(struct xregs_state *xstate)
+int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
{
- struct xstate_header *header;
- u64 max_bit, min_bit;
- u32 lmask, hmask;
- int err, i;
-
- if (WARN_ON(!boot_cpu_has(X86_FEATURE_XSAVES)))
- return;
-
- if (!xfeatures_mask_supervisor())
- return;
-
- max_bit = __fls(xfeatures_mask_supervisor());
- min_bit = __ffs(xfeatures_mask_supervisor());
-
- lmask = xfeatures_mask_supervisor();
- hmask = xfeatures_mask_supervisor() >> 32;
- XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
+ return copy_uabi_to_xstate(xsave, kbuf, NULL);
+}
- /* We should never fault when copying to a kernel buffer: */
- if (WARN_ON_FPU(err))
- return;
+/*
+ * Convert from a sigreturn standard-format user-space buffer to kernel
+ * XSAVE[S] format and copy to the target thread. This is called from the
+ * sigreturn() and rt_sigreturn() system calls.
+ */
+int copy_sigframe_from_user_to_xstate(struct xregs_state *xsave,
+ const void __user *ubuf)
+{
+ return copy_uabi_to_xstate(xsave, NULL, ubuf);
+}
- /*
- * At this point, the buffer has only supervisor states and must be
- * converted back to normal kernel format.
- */
- header = &xstate->header;
- header->xcomp_bv |= xfeatures_mask_all;
+static bool validate_xsaves_xrstors(u64 mask)
+{
+ u64 xchk;
+ if (WARN_ON_FPU(!cpu_feature_enabled(X86_FEATURE_XSAVES)))
+ return false;
/*
- * This only moves states up in the buffer. Start with
- * the last state and move backwards so that states are
- * not overwritten until after they are moved. Note:
- * memmove() allows overlapping src/dst buffers.
+ * Validate that this is either a task->fpstate related component
+ * subset or an independent one.
*/
- for (i = max_bit; i >= min_bit; i--) {
- u8 *xbuf = (u8 *)xstate;
+ if (mask & xfeatures_mask_independent())
+ xchk = ~xfeatures_mask_independent();
+ else
+ xchk = ~xfeatures_mask_all;
- if (!((header->xfeatures >> i) & 1))
- continue;
+ if (WARN_ON_ONCE(!mask || mask & xchk))
+ return false;
- /* Move xfeature 'i' into its normal location */
- memmove(xbuf + xstate_comp_offsets[i],
- xbuf + xstate_supervisor_only_offsets[i],
- xstate_sizes[i]);
- }
+ return true;
}
/**
- * copy_dynamic_supervisor_to_kernel() - Save dynamic supervisor states to
- * an xsave area
- * @xstate: A pointer to an xsave area
- * @mask: Represent the dynamic supervisor features saved into the xsave area
+ * xsaves - Save selected components to a kernel xstate buffer
+ * @xstate: Pointer to the buffer
+ * @mask: Feature mask to select the components to save
*
- * Only the dynamic supervisor states sets in the mask are saved into the xsave
- * area (See the comment in XFEATURE_MASK_DYNAMIC for the details of dynamic
- * supervisor feature). Besides the dynamic supervisor states, the legacy
- * region and XSAVE header are also saved into the xsave area. The supervisor
- * features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and
- * XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not saved.
+ * The @xstate buffer must be 64 byte aligned and correctly initialized as
+ * XSAVES does not write the full xstate header. Before first use the
+ * buffer should be zeroed otherwise a consecutive XRSTORS from that buffer
+ * can #GP.
*
- * The xsave area must be 64-bytes aligned.
+ * The feature mask must either be a subset of the independent features or
+ * a subset of the task->fpstate related features.
*/
-void copy_dynamic_supervisor_to_kernel(struct xregs_state *xstate, u64 mask)
+void xsaves(struct xregs_state *xstate, u64 mask)
{
- u64 dynamic_mask = xfeatures_mask_dynamic() & mask;
- u32 lmask, hmask;
int err;
- if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES)))
- return;
-
- if (WARN_ON_FPU(!dynamic_mask))
+ if (!validate_xsaves_xrstors(mask))
return;
- lmask = dynamic_mask;
- hmask = dynamic_mask >> 32;
-
- XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
-
- /* Should never fault when copying to a kernel buffer */
- WARN_ON_FPU(err);
+ XSTATE_OP(XSAVES, xstate, (u32)mask, (u32)(mask >> 32), err);
+ WARN_ON_ONCE(err);
}
/**
- * copy_kernel_to_dynamic_supervisor() - Restore dynamic supervisor states from
- * an xsave area
- * @xstate: A pointer to an xsave area
- * @mask: Represent the dynamic supervisor features restored from the xsave area
+ * xrstors - Restore selected components from a kernel xstate buffer
+ * @xstate: Pointer to the buffer
+ * @mask: Feature mask to select the components to restore
*
- * Only the dynamic supervisor states sets in the mask are restored from the
- * xsave area (See the comment in XFEATURE_MASK_DYNAMIC for the details of
- * dynamic supervisor feature). Besides the dynamic supervisor states, the
- * legacy region and XSAVE header are also restored from the xsave area. The
- * supervisor features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and
- * XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not restored.
+ * The @xstate buffer must be 64 byte aligned and correctly initialized
+ * otherwise XRSTORS from that buffer can #GP.
*
- * The xsave area must be 64-bytes aligned.
+ * Proper usage is to restore the state which was saved with
+ * xsaves() into @xstate.
+ *
+ * The feature mask must either be a subset of the independent features or
+ * a subset of the task->fpstate related features.
*/
-void copy_kernel_to_dynamic_supervisor(struct xregs_state *xstate, u64 mask)
+void xrstors(struct xregs_state *xstate, u64 mask)
{
- u64 dynamic_mask = xfeatures_mask_dynamic() & mask;
- u32 lmask, hmask;
int err;
- if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES)))
- return;
-
- if (WARN_ON_FPU(!dynamic_mask))
+ if (!validate_xsaves_xrstors(mask))
return;
- lmask = dynamic_mask;
- hmask = dynamic_mask >> 32;
-
- XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
-
- /* Should never fault when copying from a kernel buffer */
- WARN_ON_FPU(err);
+ XSTATE_OP(XRSTORS, xstate, (u32)mask, (u32)(mask >> 32), err);
+ WARN_ON_ONCE(err);
}
#ifdef CONFIG_PROC_PID_ARCH_STATUS
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 08651a4e6aa0..42fc41dd0e1f 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -508,7 +508,7 @@ static struct irq_chip hpet_msi_controller __ro_after_init = {
.irq_set_affinity = msi_domain_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_write_msi_msg = hpet_msi_write_msg,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
};
static int hpet_msi_init(struct irq_domain *domain,
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 674906fad43b..68f091ba8443 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -79,9 +79,10 @@ __jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
return (struct jump_label_patch){.code = code, .size = size};
}
-static inline void __jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type,
- int init)
+static __always_inline void
+__jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ int init)
{
const struct jump_label_patch jlp = __jump_label_patch(entry, type);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index c492ad3001ca..b6e046e4b289 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -422,12 +422,6 @@ void *alloc_insn_page(void)
return page;
}
-/* Recover page to RW mode before releasing it */
-void free_insn_page(void *page)
-{
- module_memfree(page);
-}
-
/* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */
static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index e52b208b4641..1d9463e3096b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -87,8 +87,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
#ifdef CONFIG_VM86
dst->thread.vm86 = NULL;
#endif
-
- return fpu__copy(dst, src);
+ return fpu_clone(dst);
}
/*
@@ -157,11 +156,18 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
/* Kernel thread ? */
if (unlikely(p->flags & PF_KTHREAD)) {
+ p->thread.pkru = pkru_get_init_value();
memset(childregs, 0, sizeof(struct pt_regs));
kthread_frame_init(frame, sp, arg);
return 0;
}
+ /*
+ * Clone current's PKRU value from hardware. tsk->thread.pkru
+ * is only valid when scheduled out.
+ */
+ p->thread.pkru = read_pkru();
+
frame->bx = 0;
*childregs = *current_pt_regs();
childregs->ax = 0;
@@ -199,6 +205,15 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
return ret;
}
+static void pkru_flush_thread(void)
+{
+ /*
+ * If PKRU is enabled the default PKRU value has to be loaded into
+ * the hardware right here (similar to context switch).
+ */
+ pkru_write_default();
+}
+
void flush_thread(void)
{
struct task_struct *tsk = current;
@@ -206,7 +221,8 @@ void flush_thread(void)
flush_ptrace_hw_breakpoint(tsk);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
- fpu__clear_all(&tsk->thread.fpu);
+ fpu_flush_thread();
+ pkru_flush_thread();
}
void disable_TSC(void)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index d08307df69ad..ec0d836a13b1 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -41,6 +41,7 @@
#include <linux/syscalls.h>
#include <asm/processor.h>
+#include <asm/pkru.h>
#include <asm/fpu/internal.h>
#include <asm/mmu_context.h>
#include <asm/prctl.h>
@@ -136,7 +137,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
log_lvl, d3, d6, d7);
}
- if (boot_cpu_has(X86_FEATURE_OSPKE))
+ if (cpu_feature_enabled(X86_FEATURE_OSPKE))
printk("%sPKRU: %08x\n", log_lvl, read_pkru());
}
@@ -339,6 +340,29 @@ static __always_inline void load_seg_legacy(unsigned short prev_index,
}
}
+/*
+ * Store prev's PKRU value and load next's PKRU value if they differ. PKRU
+ * is not XSTATE managed on context switch because that would require a
+ * lookup in the task's FPU xsave buffer and require to keep that updated
+ * in various places.
+ */
+static __always_inline void x86_pkru_load(struct thread_struct *prev,
+ struct thread_struct *next)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+ return;
+
+ /* Stash the prev task's value: */
+ prev->pkru = rdpkru();
+
+ /*
+ * PKRU writes are slightly expensive. Avoid them when not
+ * strictly necessary:
+ */
+ if (prev->pkru != next->pkru)
+ wrpkru(next->pkru);
+}
+
static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
struct thread_struct *next)
{
@@ -588,6 +612,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
x86_fsgsbase_load(prev, next);
+ x86_pkru_load(prev, next);
+
/*
* Switch the PDA and FPU contexts.
*/
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 85acd22f8022..bff3a784aec5 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -14,6 +14,7 @@
#include <linux/initrd.h>
#include <linux/iscsi_ibft.h>
#include <linux/memblock.h>
+#include <linux/panic_notifier.h>
#include <linux/pci.h>
#include <linux/root_dev.h>
#include <linux/hugetlb.h>
@@ -846,10 +847,7 @@ void __init setup_arch(char **cmdline_p)
if (!boot_params.hdr.root_flags)
root_mountflags &= ~MS_RDONLY;
- init_mm.start_code = (unsigned long) _text;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = _brk_end;
+ setup_initial_init_mm(_text, _etext, _edata, (void *)_brk_end);
code_resource.start = __pa_symbol(_text);
code_resource.end = __pa_symbol(_etext)-1;
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index e12779a2714d..f4d21e470083 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -212,6 +212,11 @@ do { \
* Set up a signal frame.
*/
+/* x86 ABI requires 16-byte alignment */
+#define FRAME_ALIGNMENT 16UL
+
+#define MAX_FRAME_PADDING (FRAME_ALIGNMENT - 1)
+
/*
* Determine which stack to use..
*/
@@ -222,9 +227,9 @@ static unsigned long align_sigframe(unsigned long sp)
* Align the stack pointer according to the i386 ABI,
* i.e. so that on function entry ((sp + 4) & 15) == 0.
*/
- sp = ((sp + 4) & -16ul) - 4;
+ sp = ((sp + 4) & -FRAME_ALIGNMENT) - 4;
#else /* !CONFIG_X86_32 */
- sp = round_down(sp, 16) - 8;
+ sp = round_down(sp, FRAME_ALIGNMENT) - 8;
#endif
return sp;
}
@@ -234,10 +239,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
void __user **fpstate)
{
/* Default to using normal stack */
+ bool nested_altstack = on_sig_stack(regs->sp);
+ bool entering_altstack = false;
unsigned long math_size = 0;
unsigned long sp = regs->sp;
unsigned long buf_fx = 0;
- int onsigstack = on_sig_stack(sp);
int ret;
/* redzone */
@@ -246,15 +252,23 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (sas_ss_flags(sp) == 0)
+ /*
+ * This checks nested_altstack via sas_ss_flags(). Sensible
+ * programs use SS_AUTODISARM, which disables that check, and
+ * programs that don't use SS_AUTODISARM get compatible.
+ */
+ if (sas_ss_flags(sp) == 0) {
sp = current->sas_ss_sp + current->sas_ss_size;
+ entering_altstack = true;
+ }
} else if (IS_ENABLED(CONFIG_X86_32) &&
- !onsigstack &&
+ !nested_altstack &&
regs->ss != __USER_DS &&
!(ka->sa.sa_flags & SA_RESTORER) &&
ka->sa.sa_restorer) {
/* This is the legacy signal stack switching. */
sp = (unsigned long) ka->sa.sa_restorer;
+ entering_altstack = true;
}
sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
@@ -267,8 +281,15 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
* If we are on the alternate signal stack and would overflow it, don't.
* Return an always-bogus address instead so we will die with SIGSEGV.
*/
- if (onsigstack && !likely(on_sig_stack(sp)))
+ if (unlikely((nested_altstack || entering_altstack) &&
+ !__on_sig_stack(sp))) {
+
+ if (show_unhandled_signals && printk_ratelimit())
+ pr_info("%s[%d] overflowed sigaltstack\n",
+ current->comm, task_pid_nr(current));
+
return (void __user *)-1L;
+ }
/* save i387 and extended state */
ret = copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size);
@@ -663,6 +684,61 @@ badframe:
return 0;
}
+/*
+ * There are four different struct types for signal frame: sigframe_ia32,
+ * rt_sigframe_ia32, rt_sigframe_x32, and rt_sigframe. Use the worst case
+ * -- the largest size. It means the size for 64-bit apps is a bit more
+ * than needed, but this keeps the code simple.
+ */
+#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+# define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct sigframe_ia32)
+#else
+# define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct rt_sigframe)
+#endif
+
+/*
+ * The FP state frame contains an XSAVE buffer which must be 64-byte aligned.
+ * If a signal frame starts at an unaligned address, extra space is required.
+ * This is the max alignment padding, conservatively.
+ */
+#define MAX_XSAVE_PADDING 63UL
+
+/*
+ * The frame data is composed of the following areas and laid out as:
+ *
+ * -------------------------
+ * | alignment padding |
+ * -------------------------
+ * | (f)xsave frame |
+ * -------------------------
+ * | fsave header |
+ * -------------------------
+ * | alignment padding |
+ * -------------------------
+ * | siginfo + ucontext |
+ * -------------------------
+ */
+
+/* max_frame_size tells userspace the worst case signal stack size. */
+static unsigned long __ro_after_init max_frame_size;
+
+void __init init_sigframe_size(void)
+{
+ max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING;
+
+ max_frame_size += fpu__get_fpstate_size() + MAX_XSAVE_PADDING;
+
+ /* Userspace expects an aligned size. */
+ max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT);
+
+ pr_info("max sigframe size: %lu\n", max_frame_size);
+}
+
+unsigned long get_sigframe_size(void)
+{
+ return max_frame_size;
+}
+
static inline int is_ia32_compat_frame(struct ksignal *ksig)
{
return IS_ENABLED(CONFIG_IA32_EMULATION) &&
diff --git a/arch/x86/kernel/trace.c b/arch/x86/kernel/trace.c
new file mode 100644
index 000000000000..6b73b6f92ad3
--- /dev/null
+++ b/arch/x86/kernel/trace.c
@@ -0,0 +1,234 @@
+#include <asm/trace/irq_vectors.h>
+#include <linux/trace.h>
+
+#if defined(CONFIG_OSNOISE_TRACER) && defined(CONFIG_X86_LOCAL_APIC)
+/*
+ * trace_intel_irq_entry - record intel specific IRQ entry
+ */
+static void trace_intel_irq_entry(void *data, int vector)
+{
+ osnoise_trace_irq_entry(vector);
+}
+
+/*
+ * trace_intel_irq_exit - record intel specific IRQ exit
+ */
+static void trace_intel_irq_exit(void *data, int vector)
+{
+ char *vector_desc = (char *) data;
+
+ osnoise_trace_irq_exit(vector, vector_desc);
+}
+
+/*
+ * register_intel_irq_tp - Register intel specific IRQ entry tracepoints
+ */
+int osnoise_arch_register(void)
+{
+ int ret;
+
+ ret = register_trace_local_timer_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = register_trace_local_timer_exit(trace_intel_irq_exit, "local_timer");
+ if (ret)
+ goto out_timer_entry;
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ ret = register_trace_thermal_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_timer_exit;
+
+ ret = register_trace_thermal_apic_exit(trace_intel_irq_exit, "thermal_apic");
+ if (ret)
+ goto out_thermal_entry;
+#endif /* CONFIG_X86_THERMAL_VECTOR */
+
+#ifdef CONFIG_X86_MCE_AMD
+ ret = register_trace_deferred_error_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_thermal_exit;
+
+ ret = register_trace_deferred_error_apic_exit(trace_intel_irq_exit, "deferred_error");
+ if (ret)
+ goto out_deferred_entry;
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ ret = register_trace_threshold_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_deferred_exit;
+
+ ret = register_trace_threshold_apic_exit(trace_intel_irq_exit, "threshold_apic");
+ if (ret)
+ goto out_threshold_entry;
+#endif /* CONFIG_X86_MCE_THRESHOLD */
+
+#ifdef CONFIG_SMP
+ ret = register_trace_call_function_single_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_threshold_exit;
+
+ ret = register_trace_call_function_single_exit(trace_intel_irq_exit,
+ "call_function_single");
+ if (ret)
+ goto out_call_function_single_entry;
+
+ ret = register_trace_call_function_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_call_function_single_exit;
+
+ ret = register_trace_call_function_exit(trace_intel_irq_exit, "call_function");
+ if (ret)
+ goto out_call_function_entry;
+
+ ret = register_trace_reschedule_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_call_function_exit;
+
+ ret = register_trace_reschedule_exit(trace_intel_irq_exit, "reschedule");
+ if (ret)
+ goto out_reschedule_entry;
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_IRQ_WORK
+ ret = register_trace_irq_work_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_reschedule_exit;
+
+ ret = register_trace_irq_work_exit(trace_intel_irq_exit, "irq_work");
+ if (ret)
+ goto out_irq_work_entry;
+#endif
+
+ ret = register_trace_x86_platform_ipi_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_irq_work_exit;
+
+ ret = register_trace_x86_platform_ipi_exit(trace_intel_irq_exit, "x86_platform_ipi");
+ if (ret)
+ goto out_x86_ipi_entry;
+
+ ret = register_trace_error_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_x86_ipi_exit;
+
+ ret = register_trace_error_apic_exit(trace_intel_irq_exit, "error_apic");
+ if (ret)
+ goto out_error_apic_entry;
+
+ ret = register_trace_spurious_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_error_apic_exit;
+
+ ret = register_trace_spurious_apic_exit(trace_intel_irq_exit, "spurious_apic");
+ if (ret)
+ goto out_spurious_apic_entry;
+
+ return 0;
+
+out_spurious_apic_entry:
+ unregister_trace_spurious_apic_entry(trace_intel_irq_entry, NULL);
+out_error_apic_exit:
+ unregister_trace_error_apic_exit(trace_intel_irq_exit, "error_apic");
+out_error_apic_entry:
+ unregister_trace_error_apic_entry(trace_intel_irq_entry, NULL);
+out_x86_ipi_exit:
+ unregister_trace_x86_platform_ipi_exit(trace_intel_irq_exit, "x86_platform_ipi");
+out_x86_ipi_entry:
+ unregister_trace_x86_platform_ipi_entry(trace_intel_irq_entry, NULL);
+out_irq_work_exit:
+
+#ifdef CONFIG_IRQ_WORK
+ unregister_trace_irq_work_exit(trace_intel_irq_exit, "irq_work");
+out_irq_work_entry:
+ unregister_trace_irq_work_entry(trace_intel_irq_entry, NULL);
+out_reschedule_exit:
+#endif
+
+#ifdef CONFIG_SMP
+ unregister_trace_reschedule_exit(trace_intel_irq_exit, "reschedule");
+out_reschedule_entry:
+ unregister_trace_reschedule_entry(trace_intel_irq_entry, NULL);
+out_call_function_exit:
+ unregister_trace_call_function_exit(trace_intel_irq_exit, "call_function");
+out_call_function_entry:
+ unregister_trace_call_function_entry(trace_intel_irq_entry, NULL);
+out_call_function_single_exit:
+ unregister_trace_call_function_single_exit(trace_intel_irq_exit, "call_function_single");
+out_call_function_single_entry:
+ unregister_trace_call_function_single_entry(trace_intel_irq_entry, NULL);
+out_threshold_exit:
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ unregister_trace_threshold_apic_exit(trace_intel_irq_exit, "threshold_apic");
+out_threshold_entry:
+ unregister_trace_threshold_apic_entry(trace_intel_irq_entry, NULL);
+out_deferred_exit:
+#endif
+
+#ifdef CONFIG_X86_MCE_AMD
+ unregister_trace_deferred_error_apic_exit(trace_intel_irq_exit, "deferred_error");
+out_deferred_entry:
+ unregister_trace_deferred_error_apic_entry(trace_intel_irq_entry, NULL);
+out_thermal_exit:
+#endif /* CONFIG_X86_MCE_AMD */
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ unregister_trace_thermal_apic_exit(trace_intel_irq_exit, "thermal_apic");
+out_thermal_entry:
+ unregister_trace_thermal_apic_entry(trace_intel_irq_entry, NULL);
+out_timer_exit:
+#endif /* CONFIG_X86_THERMAL_VECTOR */
+
+ unregister_trace_local_timer_exit(trace_intel_irq_exit, "local_timer");
+out_timer_entry:
+ unregister_trace_local_timer_entry(trace_intel_irq_entry, NULL);
+out_err:
+ return -EINVAL;
+}
+
+void osnoise_arch_unregister(void)
+{
+ unregister_trace_spurious_apic_exit(trace_intel_irq_exit, "spurious_apic");
+ unregister_trace_spurious_apic_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_error_apic_exit(trace_intel_irq_exit, "error_apic");
+ unregister_trace_error_apic_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_x86_platform_ipi_exit(trace_intel_irq_exit, "x86_platform_ipi");
+ unregister_trace_x86_platform_ipi_entry(trace_intel_irq_entry, NULL);
+
+#ifdef CONFIG_IRQ_WORK
+ unregister_trace_irq_work_exit(trace_intel_irq_exit, "irq_work");
+ unregister_trace_irq_work_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_SMP
+ unregister_trace_reschedule_exit(trace_intel_irq_exit, "reschedule");
+ unregister_trace_reschedule_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_call_function_exit(trace_intel_irq_exit, "call_function");
+ unregister_trace_call_function_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_call_function_single_exit(trace_intel_irq_exit, "call_function_single");
+ unregister_trace_call_function_single_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ unregister_trace_threshold_apic_exit(trace_intel_irq_exit, "threshold_apic");
+ unregister_trace_threshold_apic_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_X86_MCE_AMD
+ unregister_trace_deferred_error_apic_exit(trace_intel_irq_exit, "deferred_error");
+ unregister_trace_deferred_error_apic_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ unregister_trace_thermal_apic_exit(trace_intel_irq_exit, "thermal_apic");
+ unregister_trace_thermal_apic_entry(trace_intel_irq_entry, NULL);
+#endif /* CONFIG_X86_THERMAL_VECTOR */
+
+ unregister_trace_local_timer_exit(trace_intel_irq_exit, "local_timer");
+ unregister_trace_local_timer_entry(trace_intel_irq_entry, NULL);
+}
+#endif /* CONFIG_OSNOISE_TRAECR && CONFIG_X86_LOCAL_APIC */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ed540e09a399..a58800973aed 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -1046,9 +1046,10 @@ static void math_error(struct pt_regs *regs, int trapnr)
}
/*
- * Save the info for the exception handler and clear the error.
+ * Synchronize the FPU register state to the memory register state
+ * if necessary. This allows the exception handler to inspect it.
*/
- fpu__save(fpu);
+ fpu_sync_fpstate(fpu);
task->thread.trap_nr = trapnr;
task->thread.error_code = 0;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index c42613cfb5ba..fe03bd978761 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -208,30 +208,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_mmu_after_set_cpuid(vcpu);
}
-static int is_efer_nx(void)
-{
- return host_efer & EFER_NX;
-}
-
-static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
-{
- int i;
- struct kvm_cpuid_entry2 *e, *entry;
-
- entry = NULL;
- for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
- e = &vcpu->arch.cpuid_entries[i];
- if (e->function == 0x80000001) {
- entry = e;
- break;
- }
- }
- if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) {
- cpuid_entry_clear(entry, X86_FEATURE_NX);
- printk(KERN_INFO "kvm: guest NX capability removed\n");
- }
-}
-
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -302,7 +278,6 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
vcpu->arch.cpuid_entries = e2;
vcpu->arch.cpuid_nent = cpuid->nent;
- cpuid_fix_nx_cap(vcpu);
kvm_update_cpuid_runtime(vcpu);
kvm_vcpu_after_set_cpuid(vcpu);
@@ -401,7 +376,6 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
void kvm_set_cpu_caps(void)
{
- unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
#ifdef CONFIG_X86_64
unsigned int f_gbpages = F(GBPAGES);
unsigned int f_lm = F(LM);
@@ -515,7 +489,7 @@ void kvm_set_cpu_caps(void)
F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
F(PAT) | F(PSE36) | 0 /* Reserved */ |
- f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
+ F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
);
@@ -765,7 +739,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
edx.split.bit_width_fixed = cap.bit_width_fixed;
- edx.split.anythread_deprecated = 1;
+ if (cap.version)
+ edx.split.anythread_deprecated = 1;
edx.split.reserved1 = 0;
edx.split.reserved2 = 0;
@@ -940,8 +915,21 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
unsigned phys_as = entry->eax & 0xff;
- if (!g_phys_as)
+ /*
+ * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
+ * the guest operates in the same PA space as the host, i.e.
+ * reductions in MAXPHYADDR for memory encryption affect shadow
+ * paging, too.
+ *
+ * If TDP is enabled but an explicit guest MAXPHYADDR is not
+ * provided, use the raw bare metal MAXPHYADDR as reductions to
+ * the HPAs do not affect GPAs.
+ */
+ if (!tdp_enabled)
+ g_phys_as = boot_cpu_data.x86_phys_bits;
+ else if (!g_phys_as)
g_phys_as = phys_as;
+
entry->eax = g_phys_as | (virt_as << 8);
entry->edx = 0;
cpuid_entry_override(entry, CPUID_8000_0008_EBX);
@@ -964,12 +952,18 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
case 0x8000001a:
case 0x8000001e:
break;
- /* Support memory encryption cpuid if host supports it */
case 0x8000001F:
- if (!kvm_cpu_cap_has(X86_FEATURE_SEV))
+ if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
- else
+ } else {
cpuid_entry_override(entry, CPUID_8000_001F_EAX);
+
+ /*
+ * Enumerate '0' for "PA bits reduction", the adjusted
+ * MAXPHYADDR is enumerated directly (see 0x80000008).
+ */
+ entry->ebx &= ~GENMASK(11, 6);
+ }
break;
/*Add support for Centaur's CPUID instruction*/
case 0xC0000000:
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index b07592ca92f0..41d2a53c5dea 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1933,7 +1933,7 @@ ret_success:
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *entry;
- struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+ struct kvm_vcpu_hv *hv_vcpu;
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
@@ -2016,6 +2016,7 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
{
+ trace_kvm_hv_hypercall_done(result);
kvm_hv_hypercall_set_result(vcpu, result);
++vcpu->stat.hypercalls;
return kvm_skip_emulated_instruction(vcpu);
@@ -2139,6 +2140,7 @@ static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_hv_hcall hc;
u64 ret = HV_STATUS_SUCCESS;
@@ -2173,17 +2175,25 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
hc.rep = !!(hc.rep_cnt || hc.rep_idx);
- if (hc.fast && is_xmm_fast_hypercall(&hc))
- kvm_hv_hypercall_read_xmm(&hc);
-
trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx,
hc.ingpa, hc.outgpa);
- if (unlikely(!hv_check_hypercall_access(to_hv_vcpu(vcpu), hc.code))) {
+ if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
ret = HV_STATUS_ACCESS_DENIED;
goto hypercall_complete;
}
+ if (hc.fast && is_xmm_fast_hypercall(&hc)) {
+ if (unlikely(hv_vcpu->enforce_cpuid &&
+ !(hv_vcpu->cpuid_cache.features_edx &
+ HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ kvm_hv_hypercall_read_xmm(&hc);
+ }
+
switch (hc.code) {
case HVCALL_NOTIFY_LONG_SPIN_WAIT:
if (unlikely(hc.rep)) {
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 698969e18fe3..ff005fe738a4 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
- bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
+ bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1);
}
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index 660401700075..11e4065e1617 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -43,13 +43,13 @@ struct kvm_vcpu;
struct dest_map {
/* vcpu bitmap where IRQ has been sent */
- DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
+ DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1);
/*
* Vector sent to a given vcpu, only valid when
* the vcpu's bit in map is set
*/
- u8 vectors[KVM_MAX_VCPU_ID];
+ u8 vectors[KVM_MAX_VCPU_ID + 1];
};
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 845d114ae075..47b765270239 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -53,6 +53,8 @@
#include <asm/kvm_page_track.h>
#include "trace.h"
+#include "paging.h"
+
extern bool itlb_multihit_kvm_mitigation;
int __read_mostly nx_huge_pages = -1;
@@ -1642,7 +1644,7 @@ static int is_empty_shadow_page(u64 *spt)
* aggregate version in order to make the slab shrinker
* faster
*/
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
{
kvm->arch.n_used_mmu_pages += nr;
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
@@ -2533,6 +2535,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
{
struct kvm_mmu_page *sp;
+ bool locked = false;
/*
* Force write-protection if the page is being tracked. Note, the page
@@ -2555,9 +2558,34 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
if (sp->unsync)
continue;
+ /*
+ * TDP MMU page faults require an additional spinlock as they
+ * run with mmu_lock held for read, not write, and the unsync
+ * logic is not thread safe. Take the spinklock regardless of
+ * the MMU type to avoid extra conditionals/parameters, there's
+ * no meaningful penalty if mmu_lock is held for write.
+ */
+ if (!locked) {
+ locked = true;
+ spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
+
+ /*
+ * Recheck after taking the spinlock, a different vCPU
+ * may have since marked the page unsync. A false
+ * positive on the unprotected check above is not
+ * possible as clearing sp->unsync _must_ hold mmu_lock
+ * for write, i.e. unsync cannot transition from 0->1
+ * while this CPU holds mmu_lock for read (or write).
+ */
+ if (READ_ONCE(sp->unsync))
+ continue;
+ }
+
WARN_ON(sp->role.level != PG_LEVEL_4K);
kvm_unsync_page(vcpu, sp);
}
+ if (locked)
+ spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
/*
* We need to ensure that the marking of unsync pages is visible
@@ -5535,6 +5563,8 @@ void kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+ spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
+
if (!kvm_mmu_init_tdp_mmu(kvm))
/*
* No smp_load/store wrappers needed here as we are in
diff --git a/arch/x86/kvm/mmu/paging.h b/arch/x86/kvm/mmu/paging.h
new file mode 100644
index 000000000000..de8ab323bb70
--- /dev/null
+++ b/arch/x86/kvm/mmu/paging.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Shadow paging constants/helpers that don't need to be #undef'd. */
+#ifndef __KVM_X86_PAGING_H
+#define __KVM_X86_PAGING_H
+
+#define GUEST_PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
+#define PT64_LVL_ADDR_MASK(level) \
+ (GUEST_PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
+ * PT64_LEVEL_BITS))) - 1))
+#define PT64_LVL_OFFSET_MASK(level) \
+ (GUEST_PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
+ * PT64_LEVEL_BITS))) - 1))
+#endif /* __KVM_X86_PAGING_H */
+
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 490a028ddabe..ee044d357b5f 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -24,7 +24,7 @@
#define pt_element_t u64
#define guest_walker guest_walker64
#define FNAME(name) paging##64_##name
- #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
+ #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
@@ -57,7 +57,7 @@
#define pt_element_t u64
#define guest_walker guest_walkerEPT
#define FNAME(name) ept_##name
- #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
+ #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 7a5ce9314107..eb7b227fc6cf 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -38,12 +38,6 @@ static_assert(SPTE_TDP_AD_ENABLED_MASK == 0);
#else
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
#endif
-#define PT64_LVL_ADDR_MASK(level) \
- (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
- * PT64_LEVEL_BITS))) - 1))
-#define PT64_LVL_OFFSET_MASK(level) \
- (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
- * PT64_LEVEL_BITS))) - 1))
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
| shadow_x_mask | shadow_nx_mask | shadow_me_mask)
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 0853370bd811..d80cb122b5f3 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -43,6 +43,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
if (!kvm->arch.tdp_mmu_enabled)
return;
+ WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
/*
@@ -81,8 +82,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared)
{
- gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
-
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
@@ -94,7 +93,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
list_del_rcu(&root->link);
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
- zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared);
+ zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
}
@@ -724,13 +723,29 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield, bool flush,
bool shared)
{
+ gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
+ bool zap_all = (start == 0 && end >= max_gfn_host);
struct tdp_iter iter;
+ /*
+ * No need to try to step down in the iterator when zapping all SPTEs,
+ * zapping the top-level non-leaf SPTEs will recurse on their children.
+ */
+ int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
+
+ /*
+ * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
+ * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
+ * and so KVM will never install a SPTE for such addresses.
+ */
+ end = min(end, max_gfn_host);
+
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
rcu_read_lock();
- tdp_root_for_each_pte(iter, root, start, end) {
+ for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
+ min_level, start, end) {
retry:
if (can_yield &&
tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
@@ -744,9 +759,10 @@ retry:
/*
* If this is a non-last-level SPTE that covers a larger range
* than should be zapped, continue, and zap the mappings at a
- * lower level.
+ * lower level, except when zapping all SPTEs.
*/
- if ((iter.gfn < start ||
+ if (!zap_all &&
+ (iter.gfn < start ||
iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
!is_last_spte(iter.old_spte, iter.level))
continue;
@@ -794,12 +810,11 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
{
- gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
bool flush = false;
int i;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
- flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
+ flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull,
flush, false);
if (flush)
@@ -838,7 +853,6 @@ static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
*/
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
{
- gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
struct kvm_mmu_page *next_root;
struct kvm_mmu_page *root;
bool flush = false;
@@ -854,8 +868,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
rcu_read_unlock();
- flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush,
- true);
+ flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
/*
* Put the reference acquired in
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 1d01da64c333..a8ad78a2faa1 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -646,7 +646,7 @@ out:
void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct vmcb *vmcb = svm->vmcb;
+ struct vmcb *vmcb = svm->vmcb01.ptr;
bool activated = kvm_vcpu_apicv_active(vcpu);
if (!enable_apicv)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 21d03e3a5dfd..e5515477c30a 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -154,6 +154,13 @@ void recalc_intercepts(struct vcpu_svm *svm)
for (i = 0; i < MAX_INTERCEPT; i++)
c->intercepts[i] |= g->intercepts[i];
+
+ /* If SMI is not intercepted, ignore guest SMI intercept as well */
+ if (!intercept_smi)
+ vmcb_clr_intercept(c, INTERCEPT_SMI);
+
+ vmcb_set_intercept(c, INTERCEPT_VMLOAD);
+ vmcb_set_intercept(c, INTERCEPT_VMSAVE);
}
static void copy_vmcb_control_area(struct vmcb_control_area *dst,
@@ -304,8 +311,8 @@ static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
return true;
}
-static void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
- struct vmcb_control_area *control)
+void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
+ struct vmcb_control_area *control)
{
copy_vmcb_control_area(&svm->nested.ctl, control);
@@ -499,7 +506,11 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
{
- const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
+ const u32 int_ctl_vmcb01_bits =
+ V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
+
+ const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
+
struct kvm_vcpu *vcpu = &svm->vcpu;
/*
@@ -511,7 +522,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
* Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
* avic_physical_id.
*/
- WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);
+ WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
/* Copied from vmcb01. msrpm_base can be overwritten later. */
svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
@@ -531,8 +542,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
svm->vmcb->control.int_ctl =
- (svm->nested.ctl.int_ctl & ~mask) |
- (svm->vmcb01.ptr->control.int_ctl & mask);
+ (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
+ (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
@@ -618,6 +629,11 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
struct kvm_host_map map;
u64 vmcb12_gpa;
+ if (!svm->nested.hsave_msr) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
if (is_smm(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
@@ -692,7 +708,28 @@ out:
return ret;
}
-void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
+/* Copy state save area fields which are handled by VMRUN */
+void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
+ struct vmcb_save_area *from_save)
+{
+ to_save->es = from_save->es;
+ to_save->cs = from_save->cs;
+ to_save->ss = from_save->ss;
+ to_save->ds = from_save->ds;
+ to_save->gdtr = from_save->gdtr;
+ to_save->idtr = from_save->idtr;
+ to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
+ to_save->efer = from_save->efer;
+ to_save->cr0 = from_save->cr0;
+ to_save->cr3 = from_save->cr3;
+ to_save->cr4 = from_save->cr4;
+ to_save->rax = from_save->rax;
+ to_save->rsp = from_save->rsp;
+ to_save->rip = from_save->rip;
+ to_save->cpl = 0;
+}
+
+void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
{
to_vmcb->save.fs = from_vmcb->save.fs;
to_vmcb->save.gs = from_vmcb->save.gs;
@@ -1355,28 +1392,11 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
- svm->vmcb01.ptr->save.es = save->es;
- svm->vmcb01.ptr->save.cs = save->cs;
- svm->vmcb01.ptr->save.ss = save->ss;
- svm->vmcb01.ptr->save.ds = save->ds;
- svm->vmcb01.ptr->save.gdtr = save->gdtr;
- svm->vmcb01.ptr->save.idtr = save->idtr;
- svm->vmcb01.ptr->save.rflags = save->rflags | X86_EFLAGS_FIXED;
- svm->vmcb01.ptr->save.efer = save->efer;
- svm->vmcb01.ptr->save.cr0 = save->cr0;
- svm->vmcb01.ptr->save.cr3 = save->cr3;
- svm->vmcb01.ptr->save.cr4 = save->cr4;
- svm->vmcb01.ptr->save.rax = save->rax;
- svm->vmcb01.ptr->save.rsp = save->rsp;
- svm->vmcb01.ptr->save.rip = save->rip;
- svm->vmcb01.ptr->save.cpl = 0;
-
+ svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
nested_load_control_from_vmcb12(svm, ctl);
svm_switch_vmcb(svm, &svm->nested.vmcb02);
-
nested_vmcb02_prepare_control(svm);
-
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
ret = 0;
out_free:
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 8d36f0c73071..7fbce342eec4 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -19,6 +19,7 @@
#include <linux/trace_events.h>
#include <asm/fpu/internal.h>
+#include <asm/pkru.h>
#include <asm/trapnr.h>
#include "x86.h"
@@ -63,6 +64,7 @@ static DEFINE_MUTEX(sev_bitmap_lock);
unsigned int max_sev_asid;
static unsigned int min_sev_asid;
static unsigned long sev_me_mask;
+static unsigned int nr_asids;
static unsigned long *sev_asid_bitmap;
static unsigned long *sev_reclaim_asid_bitmap;
@@ -77,11 +79,11 @@ struct enc_region {
/* Called with the sev_bitmap_lock held, or on shutdown */
static int sev_flush_asids(int min_asid, int max_asid)
{
- int ret, pos, error = 0;
+ int ret, asid, error = 0;
/* Check if there are any ASIDs to reclaim before performing a flush */
- pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid);
- if (pos >= max_asid)
+ asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
+ if (asid > max_asid)
return -EBUSY;
/*
@@ -114,15 +116,15 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
/* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
- max_sev_asid);
- bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
+ nr_asids);
+ bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
return true;
}
static int sev_asid_new(struct kvm_sev_info *sev)
{
- int pos, min_asid, max_asid, ret;
+ int asid, min_asid, max_asid, ret;
bool retry = true;
enum misc_res_type type;
@@ -142,11 +144,11 @@ static int sev_asid_new(struct kvm_sev_info *sev)
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
*/
- min_asid = sev->es_active ? 0 : min_sev_asid - 1;
+ min_asid = sev->es_active ? 1 : min_sev_asid;
max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
again:
- pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
- if (pos >= max_asid) {
+ asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
+ if (asid > max_asid) {
if (retry && __sev_recycle_asids(min_asid, max_asid)) {
retry = false;
goto again;
@@ -156,11 +158,11 @@ again:
goto e_uncharge;
}
- __set_bit(pos, sev_asid_bitmap);
+ __set_bit(asid, sev_asid_bitmap);
mutex_unlock(&sev_bitmap_lock);
- return pos + 1;
+ return asid;
e_uncharge:
misc_cg_uncharge(type, sev->misc_cg, 1);
put_misc_cg(sev->misc_cg);
@@ -178,17 +180,16 @@ static int sev_get_asid(struct kvm *kvm)
static void sev_asid_free(struct kvm_sev_info *sev)
{
struct svm_cpu_data *sd;
- int cpu, pos;
+ int cpu;
enum misc_res_type type;
mutex_lock(&sev_bitmap_lock);
- pos = sev->asid - 1;
- __set_bit(pos, sev_reclaim_asid_bitmap);
+ __set_bit(sev->asid, sev_reclaim_asid_bitmap);
for_each_possible_cpu(cpu) {
sd = per_cpu(svm_data, cpu);
- sd->sev_vmcbs[pos] = NULL;
+ sd->sev_vmcbs[sev->asid] = NULL;
}
mutex_unlock(&sev_bitmap_lock);
@@ -1271,8 +1272,8 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Pin guest memory */
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
PAGE_SIZE, &n, 0);
- if (!guest_page)
- return -EFAULT;
+ if (IS_ERR(guest_page))
+ return PTR_ERR(guest_page);
/* allocate memory for header and transport buffer */
ret = -ENOMEM;
@@ -1309,8 +1310,9 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
}
/* Copy packet header to userspace. */
- ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
- params.hdr_len);
+ if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
+ params.hdr_len))
+ ret = -EFAULT;
e_free_trans_data:
kfree(trans_data);
@@ -1462,11 +1464,12 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
data.trans_len = params.trans_len;
/* Pin guest memory */
- ret = -EFAULT;
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
PAGE_SIZE, &n, 0);
- if (!guest_page)
+ if (IS_ERR(guest_page)) {
+ ret = PTR_ERR(guest_page);
goto e_free_trans;
+ }
/* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
@@ -1854,12 +1857,17 @@ void __init sev_hardware_setup(void)
min_sev_asid = edx;
sev_me_mask = 1UL << (ebx & 0x3f);
- /* Initialize SEV ASID bitmaps */
- sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+ /*
+ * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
+ * even though it's never used, so that the bitmap is indexed by the
+ * actual ASID.
+ */
+ nr_asids = max_sev_asid + 1;
+ sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
if (!sev_asid_bitmap)
goto out;
- sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+ sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
if (!sev_reclaim_asid_bitmap) {
bitmap_free(sev_asid_bitmap);
sev_asid_bitmap = NULL;
@@ -1904,7 +1912,7 @@ void sev_hardware_teardown(void)
return;
/* No need to take sev_bitmap_lock, all VMs have been destroyed. */
- sev_flush_asids(0, max_sev_asid);
+ sev_flush_asids(1, max_sev_asid);
bitmap_free(sev_asid_bitmap);
bitmap_free(sev_reclaim_asid_bitmap);
@@ -1918,7 +1926,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
if (!sev_enabled)
return 0;
- sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL);
+ sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
if (!sd->sev_vmcbs)
return -ENOMEM;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 8834822c00cd..69639f9624f5 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -198,6 +198,11 @@ module_param(avic, bool, 0444);
bool __read_mostly dump_invalid_vmcb;
module_param(dump_invalid_vmcb, bool, 0644);
+
+bool intercept_smi = true;
+module_param(intercept_smi, bool, 0444);
+
+
static bool svm_gp_erratum_intercept = true;
static u8 rsm_ins_bytes[] = "\x0f\xaa";
@@ -1185,7 +1190,10 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
svm_set_intercept(svm, INTERCEPT_INTR);
svm_set_intercept(svm, INTERCEPT_NMI);
- svm_set_intercept(svm, INTERCEPT_SMI);
+
+ if (intercept_smi)
+ svm_set_intercept(svm, INTERCEPT_SMI);
+
svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
svm_set_intercept(svm, INTERCEPT_RDPMC);
svm_set_intercept(svm, INTERCEPT_CPUID);
@@ -1398,8 +1406,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
goto error_free_vmsa_page;
}
- svm_vcpu_init_msrpm(vcpu, svm->msrpm);
-
svm->vmcb01.ptr = page_address(vmcb01_page);
svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
@@ -1411,6 +1417,8 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm_switch_vmcb(svm, &svm->vmcb01);
init_vmcb(vcpu);
+ svm_vcpu_init_msrpm(vcpu, svm->msrpm);
+
svm_init_osvw(vcpu);
vcpu->arch.microcode_version = 0x01000065;
@@ -1560,8 +1568,11 @@ static void svm_set_vintr(struct vcpu_svm *svm)
{
struct vmcb_control_area *control;
- /* The following fields are ignored when AVIC is enabled */
- WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
+ /*
+ * The following fields are ignored when AVIC is enabled
+ */
+ WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
+
svm_set_intercept(svm, INTERCEPT_VINTR);
/*
@@ -1578,17 +1589,18 @@ static void svm_set_vintr(struct vcpu_svm *svm)
static void svm_clear_vintr(struct vcpu_svm *svm)
{
- const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK;
svm_clr_intercept(svm, INTERCEPT_VINTR);
/* Drop int_ctl fields related to VINTR injection. */
- svm->vmcb->control.int_ctl &= mask;
+ svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
if (is_guest_mode(&svm->vcpu)) {
- svm->vmcb01.ptr->control.int_ctl &= mask;
+ svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
(svm->nested.ctl.int_ctl & V_TPR_MASK));
- svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
+
+ svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
+ V_IRQ_INJECTION_BITS_MASK;
}
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
@@ -1923,7 +1935,7 @@ static int npf_interception(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
+ u64 fault_address = svm->vmcb->control.exit_info_2;
u64 error_code = svm->vmcb->control.exit_info_1;
trace_kvm_page_fault(fault_address, error_code);
@@ -2106,6 +2118,11 @@ static int nmi_interception(struct kvm_vcpu *vcpu)
return 1;
}
+static int smi_interception(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
static int intr_interception(struct kvm_vcpu *vcpu)
{
++vcpu->stat.irq_exits;
@@ -2134,11 +2151,12 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
ret = kvm_skip_emulated_instruction(vcpu);
if (vmload) {
- nested_svm_vmloadsave(vmcb12, svm->vmcb);
+ svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
svm->sysenter_eip_hi = 0;
svm->sysenter_esp_hi = 0;
- } else
- nested_svm_vmloadsave(svm->vmcb, vmcb12);
+ } else {
+ svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
+ }
kvm_vcpu_unmap(vcpu, &map, true);
@@ -2941,7 +2959,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
svm_disable_lbrv(vcpu);
break;
case MSR_VM_HSAVE_PA:
- svm->nested.hsave_msr = data;
+ /*
+ * Old kernels did not validate the value written to
+ * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid
+ * value to allow live migrating buggy or malicious guests
+ * originating from those kernels.
+ */
+ if (!msr->host_initiated && !page_address_valid(vcpu, data))
+ return 1;
+
+ svm->nested.hsave_msr = data & PAGE_MASK;
break;
case MSR_VM_CR:
return svm_set_vm_cr(vcpu, data);
@@ -3080,8 +3107,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception,
[SVM_EXIT_INTR] = intr_interception,
[SVM_EXIT_NMI] = nmi_interception,
- [SVM_EXIT_SMI] = kvm_emulate_as_nop,
- [SVM_EXIT_INIT] = kvm_emulate_as_nop,
+ [SVM_EXIT_SMI] = smi_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception,
[SVM_EXIT_RDPMC] = kvm_emulate_rdpmc,
[SVM_EXIT_CPUID] = kvm_emulate_cpuid,
@@ -4288,6 +4314,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ struct kvm_host_map map_save;
int ret;
if (is_guest_mode(vcpu)) {
@@ -4303,6 +4330,29 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
ret = nested_svm_vmexit(svm);
if (ret)
return ret;
+
+ /*
+ * KVM uses VMCB01 to store L1 host state while L2 runs but
+ * VMCB01 is going to be used during SMM and thus the state will
+ * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
+ * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
+ * format of the area is identical to guest save area offsetted
+ * by 0x400 (matches the offset of 'struct vmcb_save_area'
+ * within 'struct vmcb'). Note: HSAVE area may also be used by
+ * L1 hypervisor to save additional host context (e.g. KVM does
+ * that, see svm_prepare_guest_switch()) which must be
+ * preserved.
+ */
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
+ &map_save) == -EINVAL)
+ return 1;
+
+ BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
+
+ svm_copy_vmrun_state(map_save.hva + 0x400,
+ &svm->vmcb01.ptr->save);
+
+ kvm_vcpu_unmap(vcpu, &map_save, true);
}
return 0;
}
@@ -4310,13 +4360,14 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct kvm_host_map map;
+ struct kvm_host_map map, map_save;
int ret = 0;
if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
+ struct vmcb *vmcb12;
if (guest) {
if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
@@ -4332,8 +4383,25 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
if (svm_allocate_nested(svm))
return 1;
- ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, map.hva);
+ vmcb12 = map.hva;
+
+ nested_load_control_from_vmcb12(svm, &vmcb12->control);
+
+ ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
kvm_vcpu_unmap(vcpu, &map, true);
+
+ /*
+ * Restore L1 host state from L1 HSAVE area as VMCB01 was
+ * used during SMM (see svm_enter_smm())
+ */
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
+ &map_save) == -EINVAL)
+ return 1;
+
+ svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
+ map_save.hva + 0x400);
+
+ kvm_vcpu_unmap(vcpu, &map_save, true);
}
}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index f89b623bb591..bd0fe94c2920 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -31,6 +31,7 @@
#define MSRPM_OFFSETS 16
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
+extern bool intercept_smi;
/*
* Clean bits in VMCB.
@@ -463,7 +464,9 @@ void svm_leave_nested(struct vcpu_svm *svm);
void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct kvm_vcpu *vcpu);
-void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
+void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
+ struct vmcb_save_area *from_save);
+void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
int nested_svm_vmexit(struct vcpu_svm *svm);
static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
@@ -479,6 +482,8 @@ int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
int nested_svm_exit_special(struct vcpu_svm *svm);
+void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
+ struct vmcb_control_area *control);
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h
index 9b9a55abc29f..c53b8bf8d013 100644
--- a/arch/x86/kvm/svm/svm_onhyperv.h
+++ b/arch/x86/kvm/svm/svm_onhyperv.h
@@ -89,7 +89,7 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments(
* as we mark it dirty unconditionally towards end of vcpu
* init phase.
*/
- if (vmcb && vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
+ if (vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
hve->hv_enlightenments_control.msr_bitmap)
vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
}
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index b484141ea15b..03ebe368333e 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -92,6 +92,21 @@ TRACE_EVENT(kvm_hv_hypercall,
__entry->outgpa)
);
+TRACE_EVENT(kvm_hv_hypercall_done,
+ TP_PROTO(u64 result),
+ TP_ARGS(result),
+
+ TP_STRUCT__entry(
+ __field(__u64, result)
+ ),
+
+ TP_fast_assign(
+ __entry->result = result;
+ ),
+
+ TP_printk("result 0x%llx", __entry->result)
+);
+
/*
* Tracepoint for Xen hypercall.
*/
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 1a52134b0c42..b3f77d18eb5a 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -330,6 +330,31 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
vcpu_put(vcpu);
}
+#define EPTP_PA_MASK GENMASK_ULL(51, 12)
+
+static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
+{
+ return VALID_PAGE(root_hpa) &&
+ ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
+}
+
+static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
+ gpa_t addr)
+{
+ uint i;
+ struct kvm_mmu_root_info *cached_root;
+
+ WARN_ON_ONCE(!mmu_is_nested(vcpu));
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+ cached_root = &vcpu->arch.mmu->prev_roots[i];
+
+ if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
+ eptp))
+ vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
+ }
+}
+
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
@@ -342,10 +367,22 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
vm_exit_reason = EXIT_REASON_PML_FULL;
vmx->nested.pml_full = false;
exit_qualification &= INTR_INFO_UNBLOCK_NMI;
- } else if (fault->error_code & PFERR_RSVD_MASK)
- vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
- else
- vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
+ } else {
+ if (fault->error_code & PFERR_RSVD_MASK)
+ vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
+ else
+ vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
+
+ /*
+ * Although the caller (kvm_inject_emulated_page_fault) would
+ * have already synced the faulting address in the shadow EPT
+ * tables for the current EPTP12, we also need to sync it for
+ * any other cached EPTP02s based on the same EP4TA, since the
+ * TLB associates mappings to the EP4TA rather than the full EPTP.
+ */
+ nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
+ fault->address);
+ }
nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
vmcs12->guest_physical_address = fault->address;
@@ -5325,14 +5362,6 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
return nested_vmx_succeed(vcpu);
}
-#define EPTP_PA_MASK GENMASK_ULL(51, 12)
-
-static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
-{
- return VALID_PAGE(root_hpa) &&
- ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
-}
-
/* Emulate the INVEPT instruction */
static int handle_invept(struct kvm_vcpu *vcpu)
{
@@ -5826,7 +5855,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
if (is_nmi(intr_info))
return true;
else if (is_page_fault(intr_info))
- return vcpu->arch.apf.host_apf_flags || !enable_ept;
+ return vcpu->arch.apf.host_apf_flags ||
+ vmx_need_pf_intercept(vcpu);
else if (is_debug(intr_info) &&
vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 3979a947933a..17a1cb4b059d 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -14,8 +14,6 @@
#include "vmx_ops.h"
#include "cpuid.h"
-extern const u32 vmx_msr_index[];
-
#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
#define MSR_TYPE_RW 3
@@ -524,7 +522,7 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
{
- return vmx->secondary_exec_control &
+ return secondary_exec_controls_get(vmx) &
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 17468d983fbd..e5d5c5ed7dd4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -66,6 +66,7 @@
#include <asm/msr.h>
#include <asm/desc.h>
#include <asm/mce.h>
+#include <asm/pkru.h>
#include <linux/kernel_stat.h>
#include <asm/fpu/internal.h> /* Ugh! */
#include <asm/pvclock.h>
@@ -939,7 +940,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
(kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
(vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) &&
vcpu->arch.pkru != vcpu->arch.host_pkru)
- __write_pkru(vcpu->arch.pkru);
+ write_pkru(vcpu->arch.pkru);
}
EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
@@ -953,7 +954,7 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
(vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) {
vcpu->arch.pkru = rdpkru();
if (vcpu->arch.pkru != vcpu->arch.host_pkru)
- __write_pkru(vcpu->arch.host_pkru);
+ write_pkru(vcpu->arch.host_pkru);
}
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
@@ -3406,7 +3407,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
break;
case MSR_KVM_ASYNC_PF_ACK:
- if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
return 1;
if (data & 0x1) {
vcpu->arch.apf.pageready_pending = false;
@@ -3745,7 +3746,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.apf.msr_int_val;
break;
case MSR_KVM_ASYNC_PF_ACK:
- if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
return 1;
msr_info->data = 0;
@@ -4357,8 +4358,17 @@ static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
{
- return kvm_arch_interrupt_allowed(vcpu) &&
- kvm_cpu_accept_dm_intr(vcpu);
+ /*
+ * Do not cause an interrupt window exit if an exception
+ * is pending or an event needs reinjection; userspace
+ * might want to inject the interrupt manually using KVM_SET_REGS
+ * or KVM_SET_SREGS. For that to work, we must be at an
+ * instruction boundary and with no events half-injected.
+ */
+ return (kvm_arch_interrupt_allowed(vcpu) &&
+ kvm_cpu_accept_dm_intr(vcpu) &&
+ !kvm_event_needs_reinjection(vcpu) &&
+ !vcpu->arch.exception.pending);
}
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
@@ -4704,20 +4714,21 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
*/
valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
while (valid) {
+ u32 size, offset, ecx, edx;
u64 xfeature_mask = valid & -valid;
int xfeature_nr = fls64(xfeature_mask) - 1;
- void *src = get_xsave_addr(xsave, xfeature_nr);
-
- if (src) {
- u32 size, offset, ecx, edx;
- cpuid_count(XSTATE_CPUID, xfeature_nr,
- &size, &offset, &ecx, &edx);
- if (xfeature_nr == XFEATURE_PKRU)
- memcpy(dest + offset, &vcpu->arch.pkru,
- sizeof(vcpu->arch.pkru));
- else
- memcpy(dest + offset, src, size);
+ void *src;
+
+ cpuid_count(XSTATE_CPUID, xfeature_nr,
+ &size, &offset, &ecx, &edx);
+ if (xfeature_nr == XFEATURE_PKRU) {
+ memcpy(dest + offset, &vcpu->arch.pkru,
+ sizeof(vcpu->arch.pkru));
+ } else {
+ src = get_xsave_addr(xsave, xfeature_nr);
+ if (src)
+ memcpy(dest + offset, src, size);
}
valid -= xfeature_mask;
@@ -4747,18 +4758,20 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
*/
valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
while (valid) {
+ u32 size, offset, ecx, edx;
u64 xfeature_mask = valid & -valid;
int xfeature_nr = fls64(xfeature_mask) - 1;
- void *dest = get_xsave_addr(xsave, xfeature_nr);
-
- if (dest) {
- u32 size, offset, ecx, edx;
- cpuid_count(XSTATE_CPUID, xfeature_nr,
- &size, &offset, &ecx, &edx);
- if (xfeature_nr == XFEATURE_PKRU)
- memcpy(&vcpu->arch.pkru, src + offset,
- sizeof(vcpu->arch.pkru));
- else
+
+ cpuid_count(XSTATE_CPUID, xfeature_nr,
+ &size, &offset, &ecx, &edx);
+
+ if (xfeature_nr == XFEATURE_PKRU) {
+ memcpy(&vcpu->arch.pkru, src + offset,
+ sizeof(vcpu->arch.pkru));
+ } else {
+ void *dest = get_xsave_addr(xsave, xfeature_nr);
+
+ if (dest)
memcpy(dest, src + offset, size);
}
@@ -9597,6 +9610,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(vcpu->arch.eff_db[3], 3);
set_debugreg(vcpu->arch.dr6, 6);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
+ } else if (unlikely(hw_breakpoint_active())) {
+ set_debugreg(0, 7);
}
for (;;) {
@@ -9885,7 +9900,7 @@ static void kvm_save_current_fpu(struct fpu *fpu)
memcpy(&fpu->state, &current->thread.fpu.state,
fpu_kernel_xstate_size);
else
- copy_fpregs_to_fpstate(fpu);
+ save_fpregs_to_fpstate(fpu);
}
/* Swap (qemu) user FPU context for the guest FPU context. */
@@ -9901,7 +9916,7 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
*/
if (vcpu->arch.guest_fpu)
/* PKRU is separately restored in kvm_x86_ops.run. */
- __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
+ __restore_fpregs_from_fpstate(&vcpu->arch.guest_fpu->state,
~XFEATURE_MASK_PKRU);
fpregs_mark_activate();
@@ -9922,7 +9937,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
if (vcpu->arch.guest_fpu)
kvm_save_current_fpu(vcpu->arch.guest_fpu);
- copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
+ restore_fpregs_from_fpstate(&vcpu->arch.user_fpu->state);
fpregs_mark_activate();
fpregs_unlock();
@@ -10981,9 +10996,6 @@ int kvm_arch_hardware_setup(void *opaque)
int r;
rdmsrl_safe(MSR_EFER, &host_efer);
- if (WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_NX) &&
- !(host_efer & EFER_NX)))
- return -EIO;
if (boot_cpu_has(X86_FEATURE_XSAVES))
rdmsrl(MSR_IA32_XSS, host_xss);
diff --git a/arch/x86/math-emu/fpu_proto.h b/arch/x86/math-emu/fpu_proto.h
index 70d35c200945..94c4023092f3 100644
--- a/arch/x86/math-emu/fpu_proto.h
+++ b/arch/x86/math-emu/fpu_proto.h
@@ -144,7 +144,7 @@ extern int FPU_store_int16(FPU_REG *st0_ptr, u_char st0_tag, short __user *d);
extern int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d);
extern int FPU_round_to_int(FPU_REG *r, u_char tag);
extern u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user *s);
-extern void frstor(fpu_addr_modes addr_modes, u_char __user *data_address);
+extern void FPU_frstor(fpu_addr_modes addr_modes, u_char __user *data_address);
extern u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d);
extern void fsave(fpu_addr_modes addr_modes, u_char __user *data_address);
extern int FPU_tagof(FPU_REG *ptr);
diff --git a/arch/x86/math-emu/load_store.c b/arch/x86/math-emu/load_store.c
index f15263e158e8..4092df79de4f 100644
--- a/arch/x86/math-emu/load_store.c
+++ b/arch/x86/math-emu/load_store.c
@@ -240,7 +240,7 @@ int FPU_load_store(u_char type, fpu_addr_modes addr_modes,
fix-up operations. */
return 1;
case 022: /* frstor m94/108byte */
- frstor(addr_modes, (u_char __user *) data_address);
+ FPU_frstor(addr_modes, (u_char __user *) data_address);
/* Ensure that the values just loaded are not changed by
fix-up operations. */
return 1;
diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c
index 7ca6417c0c8d..7e4521fbe7da 100644
--- a/arch/x86/math-emu/reg_ld_str.c
+++ b/arch/x86/math-emu/reg_ld_str.c
@@ -1117,7 +1117,7 @@ u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user *s)
return s;
}
-void frstor(fpu_addr_modes addr_modes, u_char __user *data_address)
+void FPU_frstor(fpu_addr_modes addr_modes, u_char __user *data_address)
{
int i, regnr;
u_char __user *s = fldenv(addr_modes, data_address);
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 121921b2927c..e1664e9f969c 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -65,7 +65,7 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
(void *)instruction_pointer(regs));
- __copy_kernel_to_fpregs(&init_fpstate, -1);
+ __restore_fpregs_from_fpstate(&init_fpstate, xfeatures_mask_fpstate());
return true;
}
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2d27932c9ac7..b2eefdefc108 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -875,7 +875,7 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
/* This code is always called on the current mm */
bool foreign = false;
- if (!boot_cpu_has(X86_FEATURE_OSPKE))
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return false;
if (error_code & X86_PF_PK)
return true;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index e527d829e1ed..ddeaba947eb3 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -33,6 +33,7 @@
#include <linux/nmi.h>
#include <linux/gfp.h>
#include <linux/kcore.h>
+#include <linux/bootmem_info.h>
#include <asm/processor.h>
#include <asm/bios_ebda.h>
@@ -193,8 +194,8 @@ static void sync_global_pgds_l4(unsigned long start, unsigned long end)
spin_lock(pgt_lock);
if (!p4d_none(*p4d_ref) && !p4d_none(*p4d))
- BUG_ON(p4d_page_vaddr(*p4d)
- != p4d_page_vaddr(*p4d_ref));
+ BUG_ON(p4d_pgtable(*p4d)
+ != p4d_pgtable(*p4d_ref));
if (p4d_none(*p4d))
set_p4d(p4d, *p4d_ref);
@@ -1269,7 +1270,7 @@ static struct kcore_list kcore_vsyscall;
static void __init register_page_bootmem_info(void)
{
-#ifdef CONFIG_NUMA
+#if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP)
int i;
for_each_online_node(i)
@@ -1623,7 +1624,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return err;
}
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
+#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
void register_page_bootmem_memmap(unsigned long section_nr,
struct page *start_page, unsigned long nr_pages)
{
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 156cd235659f..ad8a5c586a35 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -1134,7 +1134,7 @@ static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
unsigned long start, unsigned long end)
{
if (unmap_pte_range(pmd, start, end))
- if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
+ if (try_to_free_pmd_page(pud_pgtable(*pud)))
pud_clear(pud);
}
@@ -1178,7 +1178,7 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
* Try again to free the PMD page if haven't succeeded above.
*/
if (!pud_none(*pud))
- if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
+ if (try_to_free_pmd_page(pud_pgtable(*pud)))
pud_clear(pud);
}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index d27cf69e811d..3481b35cb4ec 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -797,7 +797,7 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
pte_t *pte;
int i;
- pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pmd = pud_pgtable(*pud);
pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
if (!pmd_sv)
return 0;
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index 4a67b922bce1..e44e938885b7 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -10,7 +10,6 @@
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/mmu_context.h> /* vma_pkey() */
-#include <asm/fpu/internal.h> /* init_fpstate */
int __execute_only_pkey(struct mm_struct *mm)
{
@@ -125,22 +124,6 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
PKRU_AD_KEY(10) | PKRU_AD_KEY(11) | PKRU_AD_KEY(12) |
PKRU_AD_KEY(13) | PKRU_AD_KEY(14) | PKRU_AD_KEY(15);
-/*
- * Called from the FPU code when creating a fresh set of FPU
- * registers. This is called from a very specific context where
- * we know the FPU registers are safe for use and we can use PKRU
- * directly.
- */
-void copy_init_pkru_to_fpregs(void)
-{
- u32 init_pkru_value_snapshot = READ_ONCE(init_pkru_value);
- /*
- * Override the PKRU state that came from 'init_fpstate'
- * with the baseline from the process.
- */
- write_pkru(init_pkru_value_snapshot);
-}
-
static ssize_t init_pkru_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -154,7 +137,6 @@ static ssize_t init_pkru_read_file(struct file *file, char __user *user_buf,
static ssize_t init_pkru_write_file(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
- struct pkru_state *pk;
char buf[32];
ssize_t len;
u32 new_init_pkru;
@@ -177,10 +159,6 @@ static ssize_t init_pkru_write_file(struct file *file,
return -EINVAL;
WRITE_ONCE(init_pkru_value, new_init_pkru);
- pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
- if (!pk)
- return -EINVAL;
- pk->pkru = new_init_pkru;
return count;
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index e835164189f1..16d76f814e9b 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -570,6 +570,9 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
for (i = 0; i < prog->aux->size_poke_tab; i++) {
poke = &prog->aux->poke_tab[i];
+ if (poke->aux && poke->aux != prog->aux)
+ continue;
+
WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
@@ -1216,6 +1219,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
}
break;
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ if (boot_cpu_has(X86_FEATURE_XMM2))
+ /* Emit 'lfence' */
+ EMIT3(0x0F, 0xAE, 0xE8);
+ break;
+
/* ST: *(u8*)(dst_reg + off) = imm */
case BPF_ST | BPF_MEM | BPF_B:
if (is_ereg(dst_reg))
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 3da88ded6ee3..3bfda5f502cb 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -1886,6 +1886,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
i++;
break;
}
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ if (boot_cpu_has(X86_FEATURE_XMM2))
+ /* Emit 'lfence' */
+ EMIT3(0x0F, 0xAE, 0xE8);
+ break;
/* ST: *(u8*)(dst_reg + off) = imm */
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_B:
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index de6bf0e7e8f8..758cbfe55daa 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -461,7 +461,7 @@ static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
}
if (size < (16UL<<20) && size != old_size)
- return 0;
+ return false;
if (dev)
dev_info(dev, "MMCONFIG at %pR reserved in %s\n",
@@ -493,7 +493,7 @@ static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
&cfg->res, (unsigned long) cfg->address);
}
- return 1;
+ return true;
}
static bool __ref
@@ -501,7 +501,7 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
{
if (!early && !acpi_disabled) {
if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0))
- return 1;
+ return true;
if (dev)
dev_info(dev, FW_INFO
@@ -522,14 +522,14 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
* _CBA method, just assume it's reserved.
*/
if (pci_mmcfg_running_state)
- return 1;
+ return true;
/* Don't try to do this check unless configuration
type 1 is available. how about type 2 ?*/
if (raw_pci_ops)
return is_mmconf_reserved(e820__mapped_all, cfg, dev, 1);
- return 0;
+ return false;
}
static void __init pci_mmcfg_reject_broken(int early)
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index f03b64d9cb51..7558139920f8 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -9,6 +9,8 @@
*/
#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
#include <crypto/sha2.h>
#include <asm/purgatory.h>
diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk
index fd1ab80be0de..a4cf678cf5c8 100644
--- a/arch/x86/tools/chkobjdump.awk
+++ b/arch/x86/tools/chkobjdump.awk
@@ -10,6 +10,7 @@ BEGIN {
/^GNU objdump/ {
verstr = ""
+ gsub(/\(.*\)/, "");
for (i = 3; i <= NF; i++)
if (match($(i), "^[0-9]")) {
verstr = $(i);
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 04c5a44b9682..9ba700dc47de 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -57,12 +57,12 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
[S_REL] =
"^(__init_(begin|end)|"
"__x86_cpu_dev_(start|end)|"
- "(__parainstructions|__alt_instructions)(|_end)|"
- "(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
+ "(__parainstructions|__alt_instructions)(_end)?|"
+ "(__iommu_table|__apicdrivers|__smp_locks)(_end)?|"
"__(start|end)_pci_.*|"
"__(start|end)_builtin_fw|"
- "__(start|stop)___ksymtab(|_gpl)|"
- "__(start|stop)___kcrctab(|_gpl)|"
+ "__(start|stop)___ksymtab(_gpl)?|"
+ "__(start|stop)___kcrctab(_gpl)?|"
"__(start|stop)___param|"
"__(start|stop)___modver|"
"__(start|stop)___bug_table|"
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index aa9f50fccc5d..c79bd0af2e8c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -6,6 +6,7 @@
#include <linux/cpu.h>
#include <linux/kexec.h>
#include <linux/slab.h>
+#include <linux/panic_notifier.h>
#include <xen/xen.h>
#include <xen/features.h>
diff --git a/arch/xtensa/Kbuild b/arch/xtensa/Kbuild
new file mode 100644
index 000000000000..a4e40e534e6a
--- /dev/null
+++ b/arch/xtensa/Kbuild
@@ -0,0 +1 @@
+# SPDX-License-Identifier: GPL-2.0-only
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 2332b2156993..3878880469d1 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -327,7 +327,6 @@ config XTENSA_PLATFORM_ISS
config XTENSA_PLATFORM_XT2000
bool "XT2000"
- select HAVE_IDE
help
XT2000 is the name of Tensilica's feature-rich emulation platform.
This hardware is capable of running a full Linux distribution.
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index d3a22da4d2c9..eeb2de3a89e5 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -25,7 +25,6 @@
(pmd_val(*(pmdp)) = ((unsigned long)ptep))
#define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
-#define pmd_pgtable(pmd) pmd_page(pmd)
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
@@ -63,7 +62,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
return page;
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* CONFIG_MMU */
#endif /* _XTENSA_PGALLOC_H */
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index d7fc45c920c2..bd5aeb795567 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -59,7 +59,6 @@
#define PTRS_PER_PGD 1024
#define PGD_ORDER 0
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
#ifdef CONFIG_MMU
diff --git a/arch/xtensa/include/asm/unaligned.h b/arch/xtensa/include/asm/unaligned.h
deleted file mode 100644
index 8e7ed046bfed..000000000000
--- a/arch/xtensa/include/asm/unaligned.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Xtensa doesn't handle unaligned accesses efficiently.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-#ifndef _ASM_XTENSA_UNALIGNED_H
-#define _ASM_XTENSA_UNALIGNED_H
-
-#include <asm/byteorder.h>
-
-#ifdef __LITTLE_ENDIAN
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#else
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#endif
-
-#endif /* _ASM_XTENSA_UNALIGNED_H */
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index e5e643752947..b3a22095371b 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -106,6 +106,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index a3dda25a4e45..21184488c277 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -100,18 +100,12 @@ static void rs_flush_chars(struct tty_struct *tty)
{
}
-static int rs_write_room(struct tty_struct *tty)
+static unsigned int rs_write_room(struct tty_struct *tty)
{
/* Let's say iss can always accept 2K characters.. */
return 2 * 1024;
}
-static int rs_chars_in_buffer(struct tty_struct *tty)
-{
- /* the iss doesn't buffer characters */
- return 0;
-}
-
static void rs_hangup(struct tty_struct *tty)
{
/* Stub, once again.. */
@@ -135,7 +129,6 @@ static const struct tty_operations serial_ops = {
.put_char = rs_put_char,
.flush_chars = rs_flush_chars,
.write_room = rs_write_room,
- .chars_in_buffer = rs_chars_in_buffer,
.hangup = rs_hangup,
.wait_until_sent = rs_wait_until_sent,
.proc_show = rs_proc_show,
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index ed519aee0ec8..d3433e1bb94e 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/printk.h>
#include <linux/string.h>
diff --git a/block/Kconfig b/block/Kconfig
index e71c63eaaf52..fd732aede922 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -151,6 +151,15 @@ config BLK_CGROUP_IOLATENCY
Note, this is an experimental interface and could be changed someday.
+config BLK_CGROUP_FC_APPID
+ bool "Enable support to track FC I/O Traffic across cgroup applications"
+ depends on BLK_CGROUP && NVME_FC
+ help
+ Enabling this option enables the support to track FC I/O traffic across
+ cgroup applications. It enables the Fabric and the storage targets to
+ identify, monitor, and handle FC traffic based on VM tags by inserting
+ application specific identification into the FC frame.
+
config BLK_CGROUP_IOCOST
bool "Enable support for cost model based cgroup IO controller"
depends on BLK_CGROUP=y
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 64053d67a97b..2f2158e05a91 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -9,12 +9,6 @@ config MQ_IOSCHED_DEADLINE
help
MQ version of the deadline IO scheduler.
-config MQ_IOSCHED_DEADLINE_CGROUP
- tristate
- default y
- depends on MQ_IOSCHED_DEADLINE
- depends on BLK_CGROUP
-
config MQ_IOSCHED_KYBER
tristate "Kyber I/O scheduler"
default y
diff --git a/block/Makefile b/block/Makefile
index bfbe4e13ca1e..1e1afa10f869 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -22,8 +22,6 @@ obj-$(CONFIG_BLK_CGROUP_IOPRIO) += blk-ioprio.o
obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o
obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o
obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o
-mq-deadline-y += mq-deadline-main.o
-mq-deadline-$(CONFIG_MQ_IOSCHED_DEADLINE_CGROUP)+= mq-deadline-cgroup.o
obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 7b06a5fa3cac..31fe9be179d9 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -56,6 +56,8 @@ static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
bool blkcg_debug_stats = false;
static struct workqueue_struct *blkcg_punt_bio_wq;
+#define BLKG_DESTROY_BATCH_SIZE 64
+
static bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol)
{
@@ -422,7 +424,9 @@ static void blkg_destroy(struct blkcg_gq *blkg)
static void blkg_destroy_all(struct request_queue *q)
{
struct blkcg_gq *blkg, *n;
+ int count = BLKG_DESTROY_BATCH_SIZE;
+restart:
spin_lock_irq(&q->queue_lock);
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
@@ -430,6 +434,17 @@ static void blkg_destroy_all(struct request_queue *q)
spin_lock(&blkcg->lock);
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
+
+ /*
+ * in order to avoid holding the spin lock for too long, release
+ * it when a batch of blkgs are destroyed.
+ */
+ if (!(--count)) {
+ count = BLKG_DESTROY_BATCH_SIZE;
+ spin_unlock_irq(&q->queue_lock);
+ cond_resched();
+ goto restart;
+ }
}
q->root_blkg = NULL;
@@ -775,6 +790,7 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
struct blkcg_gq *parent = blkg->parent;
struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
struct blkg_iostat cur, delta;
+ unsigned long flags;
unsigned int seq;
/* fetch the current per-cpu values */
@@ -784,21 +800,21 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
} while (u64_stats_fetch_retry(&bisc->sync, seq));
/* propagate percpu delta to global */
- u64_stats_update_begin(&blkg->iostat.sync);
+ flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
blkg_iostat_set(&delta, &cur);
blkg_iostat_sub(&delta, &bisc->last);
blkg_iostat_add(&blkg->iostat.cur, &delta);
blkg_iostat_add(&bisc->last, &delta);
- u64_stats_update_end(&blkg->iostat.sync);
+ u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
/* propagate global delta to parent (unless that's root) */
if (parent && parent->parent) {
- u64_stats_update_begin(&parent->iostat.sync);
+ flags = u64_stats_update_begin_irqsave(&parent->iostat.sync);
blkg_iostat_set(&delta, &blkg->iostat.cur);
blkg_iostat_sub(&delta, &blkg->iostat.last);
blkg_iostat_add(&parent->iostat.cur, &delta);
blkg_iostat_add(&blkg->iostat.last, &delta);
- u64_stats_update_end(&parent->iostat.sync);
+ u64_stats_update_end_irqrestore(&parent->iostat.sync, flags);
}
}
@@ -833,6 +849,7 @@ static void blkcg_fill_root_iostats(void)
memset(&tmp, 0, sizeof(tmp));
for_each_possible_cpu(cpu) {
struct disk_stats *cpu_dkstats;
+ unsigned long flags;
cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
tmp.ios[BLKG_IOSTAT_READ] +=
@@ -849,9 +866,9 @@ static void blkcg_fill_root_iostats(void)
tmp.bytes[BLKG_IOSTAT_DISCARD] +=
cpu_dkstats->sectors[STAT_DISCARD] << 9;
- u64_stats_update_begin(&blkg->iostat.sync);
+ flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
blkg_iostat_set(&blkg->iostat.cur, &tmp);
- u64_stats_update_end(&blkg->iostat.sync);
+ u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
}
}
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 514838ccab2d..4f8449b29b21 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -122,7 +122,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->internal_tag = BLK_MQ_NO_TAG;
rq->start_time_ns = ktime_get_ns();
rq->part = NULL;
- refcount_set(&rq->ref, 1);
blk_crypto_rq_set_defaults(rq);
}
EXPORT_SYMBOL(blk_rq_init);
@@ -142,8 +141,6 @@ static const char *const blk_op_name[] = {
REQ_OP_NAME(ZONE_APPEND),
REQ_OP_NAME(WRITE_SAME),
REQ_OP_NAME(WRITE_ZEROES),
- REQ_OP_NAME(SCSI_IN),
- REQ_OP_NAME(SCSI_OUT),
REQ_OP_NAME(DRV_IN),
REQ_OP_NAME(DRV_OUT),
};
@@ -1243,7 +1240,7 @@ static void update_io_ticks(struct block_device *part, unsigned long now,
unsigned long stamp;
again:
stamp = READ_ONCE(part->bd_stamp);
- if (unlikely(stamp != now)) {
+ if (unlikely(time_after(now, stamp))) {
if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
__part_stat_add(part, io_ticks, end ? now - stamp : 1);
}
diff --git a/block/blk-exec.c b/block/blk-exec.c
index beae70a0e5e5..d6cd501c0d34 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -21,7 +21,7 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error)
{
struct completion *waiting = rq->end_io_data;
- rq->end_io_data = NULL;
+ rq->end_io_data = (void *)(uintptr_t)error;
/*
* complete last, if this is a stack request the process (and thus
@@ -63,6 +63,19 @@ void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
+static bool blk_rq_is_poll(struct request *rq)
+{
+ return rq->mq_hctx && rq->mq_hctx->type == HCTX_TYPE_POLL;
+}
+
+static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
+{
+ do {
+ blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), true);
+ cond_resched();
+ } while (!completion_done(wait));
+}
+
/**
* blk_execute_rq - insert a request into queue for execution
* @bd_disk: matching gendisk
@@ -72,8 +85,9 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
* Description:
* Insert a fully prepared request at the back of the I/O scheduler queue
* for execution and wait for completion.
+ * Return: The blk_status_t result provided to blk_mq_end_request().
*/
-void blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head)
+blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head)
{
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long hang_check;
@@ -83,9 +97,14 @@ void blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head)
/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;
- if (hang_check)
+
+ if (blk_rq_is_poll(rq))
+ blk_rq_poll_completion(rq, &wait);
+ else if (hang_check)
while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
else
wait_for_completion_io(&wait);
+
+ return (blk_status_t)(uintptr_t)rq->end_io_data;
}
EXPORT_SYMBOL(blk_execute_rq);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 1002f6c58181..4201728bf3a5 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -262,6 +262,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
}
+bool is_flush_rq(struct request *rq)
+{
+ return rq->end_io == flush_end_io;
+}
+
/**
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked
@@ -329,6 +334,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io;
+ /*
+ * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
+ * implied in refcount_inc_not_zero() called from
+ * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
+ * and READ flush_rq->end_io
+ */
+ smp_wmb();
+ refcount_set(&flush_rq->ref, 1);
blk_flush_queue_rq(flush_rq, false);
}
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index c2d6bc88d3f1..0e56557cacf2 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -1440,16 +1440,17 @@ static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
return -1;
iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
+ wait->committed = true;
/*
* autoremove_wake_function() removes the wait entry only when it
- * actually changed the task state. We want the wait always
- * removed. Remove explicitly and use default_wake_function().
+ * actually changed the task state. We want the wait always removed.
+ * Remove explicitly and use default_wake_function(). Note that the
+ * order of operations is important as finish_wait() tests whether
+ * @wq_entry is removed without grabbing the lock.
*/
- list_del_init(&wq_entry->entry);
- wait->committed = true;
-
default_wake_function(wq_entry, mode, flags, key);
+ list_del_init_careful(&wq_entry->entry);
return 0;
}
@@ -3060,19 +3061,19 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
return -EINVAL;
- spin_lock(&blkcg->lock);
+ spin_lock_irq(&blkcg->lock);
iocc->dfl_weight = v * WEIGHT_ONE;
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
struct ioc_gq *iocg = blkg_to_iocg(blkg);
if (iocg) {
- spin_lock_irq(&iocg->ioc->lock);
+ spin_lock(&iocg->ioc->lock);
ioc_now(iocg->ioc, &now);
weight_updated(iocg, &now);
- spin_unlock_irq(&iocg->ioc->lock);
+ spin_unlock(&iocg->ioc->lock);
}
}
- spin_unlock(&blkcg->lock);
+ spin_unlock_irq(&blkcg->lock);
return nbytes;
}
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 81be0096411d..d8b0d8bd132b 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -833,7 +833,11 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
enable = iolatency_set_min_lat_nsec(blkg, lat_val);
if (enable) {
- WARN_ON_ONCE(!blk_get_queue(blkg->q));
+ if (!blk_get_queue(blkg->q)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
blkg_get(blkg);
}
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index c838d81ac058..0f006cabfd91 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -515,17 +515,6 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
percpu_ref_put(&q->q_usage_counter);
}
-static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
- struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_idx)
-{
- if (hctx->sched_tags) {
- blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
- blk_mq_free_rq_map(hctx->sched_tags, set->flags);
- hctx->sched_tags = NULL;
- }
-}
-
static int blk_mq_sched_alloc_tags(struct request_queue *q,
struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
@@ -539,8 +528,10 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
return -ENOMEM;
ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
- if (ret)
- blk_mq_sched_free_tags(set, hctx, hctx_idx);
+ if (ret) {
+ blk_mq_free_rq_map(hctx->sched_tags, set->flags);
+ hctx->sched_tags = NULL;
+ }
return ret;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 2e9fd0ec63d7..9d4fdc2be88a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -911,7 +911,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
void blk_mq_put_rq_ref(struct request *rq)
{
- if (is_flush_rq(rq, rq->mq_hctx))
+ if (is_flush_rq(rq))
rq->end_io(rq, 0);
else if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
@@ -923,34 +923,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
unsigned long *next = priv;
/*
- * Just do a quick check if it is expired before locking the request in
- * so we're not unnecessarilly synchronizing across CPUs.
- */
- if (!blk_mq_req_expired(rq, next))
- return true;
-
- /*
- * We have reason to believe the request may be expired. Take a
- * reference on the request to lock this request lifetime into its
- * currently allocated context to prevent it from being reallocated in
- * the event the completion by-passes this timeout handler.
- *
- * If the reference was already released, then the driver beat the
- * timeout handler to posting a natural completion.
- */
- if (!refcount_inc_not_zero(&rq->ref))
- return true;
-
- /*
- * The request is now locked and cannot be reallocated underneath the
- * timeout handler's processing. Re-verify this exact request is truly
- * expired; if it is not expired, then the request was completed and
- * reallocated as a new request.
+ * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
+ * be reallocated underneath the timeout handler's processing, then
+ * the expire check is reliable. If the request is not expired, then
+ * it was completed and reallocated as a new request after returning
+ * from blk_mq_check_expired().
*/
if (blk_mq_req_expired(rq, next))
blk_mq_rq_timed_out(rq, reserved);
-
- blk_mq_put_rq_ref(rq);
return true;
}
@@ -2994,10 +2974,12 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
int i;
queue_for_each_hw_ctx(q, hctx, i) {
- if (shared)
+ if (shared) {
hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
- else
+ } else {
+ blk_mq_tag_idle(hctx);
hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
+ }
}
}
@@ -3109,7 +3091,7 @@ void blk_mq_release(struct request_queue *q)
blk_mq_sysfs_deinit(q);
}
-struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
+static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
void *queuedata)
{
struct request_queue *q;
@@ -3126,7 +3108,6 @@ struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
}
return q;
}
-EXPORT_SYMBOL_GPL(blk_mq_init_queue_data);
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
diff --git a/block/blk.h b/block/blk.h
index 4b885c0f6708..cb01429c162c 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q)
kobject_get(&q->kobj);
}
-static inline bool
-is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
-{
- return hctx->fq->flush_rq == req;
-}
+bool is_flush_rq(struct request *req);
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
gfp_t flags);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 330fede77271..a89d80102304 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -45,7 +45,7 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
return PTR_ERR(job->request);
if (hdr->dout_xfer_len && hdr->din_xfer_len) {
- job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0);
+ job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0);
if (IS_ERR(job->bidi_rq)) {
ret = PTR_ERR(job->bidi_rq);
goto out;
@@ -84,7 +84,7 @@ static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
*/
hdr->device_status = job->result & 0xff;
hdr->transport_status = host_byte(job->result);
- hdr->driver_status = driver_byte(job->result);
+ hdr->driver_status = 0;
hdr->info = 0;
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
diff --git a/block/bsg.c b/block/bsg.c
index bd10922d5cbb..1f196563ae6c 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -96,7 +96,9 @@ static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
*/
hdr->device_status = sreq->result & 0xff;
hdr->transport_status = host_byte(sreq->result);
- hdr->driver_status = driver_byte(sreq->result);
+ hdr->driver_status = 0;
+ if (scsi_status_is_check_condition(sreq->result))
+ hdr->driver_status = DRIVER_SENSE;
hdr->info = 0;
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
@@ -152,7 +154,7 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
return ret;
rq = blk_get_request(q, hdr.dout_xfer_len ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
diff --git a/block/genhd.c b/block/genhd.c
index 79aa40b4c39c..298ee78c1bda 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -365,12 +365,12 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action)
xa_for_each(&disk->part_tbl, idx, part) {
if (bdev_is_partition(part) && !bdev_nr_sectors(part))
continue;
- if (!bdgrab(part))
+ if (!kobject_get_unless_zero(&part->bd_device.kobj))
continue;
rcu_read_unlock();
kobject_uevent(bdev_kobj(part), action);
- bdput(part);
+ put_device(&part->bd_device);
rcu_read_lock();
}
rcu_read_unlock();
@@ -1079,10 +1079,9 @@ static void disk_release(struct device *dev)
disk_release_events(disk);
kfree(disk->random);
xa_destroy(&disk->part_tbl);
- bdput(disk->part0);
if (test_bit(GD_QUEUE_REF, &disk->state) && disk->queue)
blk_put_queue(disk->queue);
- kfree(disk);
+ bdput(disk->part0); /* frees the disk */
}
struct class block_class = {
.name = "block",
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 81e3279ecd57..15a8be57203d 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -596,13 +596,13 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
struct list_head *head = &kcq->rq_list[sched_domain];
spin_lock(&kcq->lock);
+ trace_block_rq_insert(rq);
if (at_head)
list_move(&rq->queuelist, head);
else
list_move_tail(&rq->queuelist, head);
sbitmap_set_bit(&khd->kcq_map[sched_domain],
rq->mq_ctx->index_hw[hctx->type]);
- trace_block_rq_insert(rq);
spin_unlock(&kcq->lock);
}
}
diff --git a/block/mq-deadline-cgroup.c b/block/mq-deadline-cgroup.c
deleted file mode 100644
index 3b4bfddec39f..000000000000
--- a/block/mq-deadline-cgroup.c
+++ /dev/null
@@ -1,126 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/blk-cgroup.h>
-#include <linux/ioprio.h>
-
-#include "mq-deadline-cgroup.h"
-
-static struct blkcg_policy dd_blkcg_policy;
-
-static struct blkcg_policy_data *dd_cpd_alloc(gfp_t gfp)
-{
- struct dd_blkcg *pd;
-
- pd = kzalloc(sizeof(*pd), gfp);
- if (!pd)
- return NULL;
- pd->stats = alloc_percpu_gfp(typeof(*pd->stats),
- GFP_KERNEL | __GFP_ZERO);
- if (!pd->stats) {
- kfree(pd);
- return NULL;
- }
- return &pd->cpd;
-}
-
-static void dd_cpd_free(struct blkcg_policy_data *cpd)
-{
- struct dd_blkcg *dd_blkcg = container_of(cpd, typeof(*dd_blkcg), cpd);
-
- free_percpu(dd_blkcg->stats);
- kfree(dd_blkcg);
-}
-
-static struct dd_blkcg *dd_blkcg_from_pd(struct blkg_policy_data *pd)
-{
- return container_of(blkcg_to_cpd(pd->blkg->blkcg, &dd_blkcg_policy),
- struct dd_blkcg, cpd);
-}
-
-/*
- * Convert an association between a block cgroup and a request queue into a
- * pointer to the mq-deadline information associated with a (blkcg, queue) pair.
- */
-struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
-{
- struct blkg_policy_data *pd;
-
- pd = blkg_to_pd(bio->bi_blkg, &dd_blkcg_policy);
- if (!pd)
- return NULL;
-
- return dd_blkcg_from_pd(pd);
-}
-
-static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
-{
- static const char *const prio_class_name[] = {
- [IOPRIO_CLASS_NONE] = "NONE",
- [IOPRIO_CLASS_RT] = "RT",
- [IOPRIO_CLASS_BE] = "BE",
- [IOPRIO_CLASS_IDLE] = "IDLE",
- };
- struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd);
- int res = 0;
- u8 prio;
-
- for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++)
- res += scnprintf(buf + res, size - res,
- " [%s] dispatched=%u inserted=%u merged=%u",
- prio_class_name[prio],
- ddcg_sum(blkcg, dispatched, prio) +
- ddcg_sum(blkcg, merged, prio) -
- ddcg_sum(blkcg, completed, prio),
- ddcg_sum(blkcg, inserted, prio) -
- ddcg_sum(blkcg, completed, prio),
- ddcg_sum(blkcg, merged, prio));
-
- return res;
-}
-
-static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q,
- struct blkcg *blkcg)
-{
- struct dd_blkg *pd;
-
- pd = kzalloc(sizeof(*pd), gfp);
- if (!pd)
- return NULL;
- return &pd->pd;
-}
-
-static void dd_pd_free(struct blkg_policy_data *pd)
-{
- struct dd_blkg *dd_blkg = container_of(pd, typeof(*dd_blkg), pd);
-
- kfree(dd_blkg);
-}
-
-static struct blkcg_policy dd_blkcg_policy = {
- .cpd_alloc_fn = dd_cpd_alloc,
- .cpd_free_fn = dd_cpd_free,
-
- .pd_alloc_fn = dd_pd_alloc,
- .pd_free_fn = dd_pd_free,
- .pd_stat_fn = dd_pd_stat,
-};
-
-int dd_activate_policy(struct request_queue *q)
-{
- return blkcg_activate_policy(q, &dd_blkcg_policy);
-}
-
-void dd_deactivate_policy(struct request_queue *q)
-{
- blkcg_deactivate_policy(q, &dd_blkcg_policy);
-}
-
-int __init dd_blkcg_init(void)
-{
- return blkcg_policy_register(&dd_blkcg_policy);
-}
-
-void __exit dd_blkcg_exit(void)
-{
- blkcg_policy_unregister(&dd_blkcg_policy);
-}
diff --git a/block/mq-deadline-cgroup.h b/block/mq-deadline-cgroup.h
deleted file mode 100644
index 0143fd74f3ce..000000000000
--- a/block/mq-deadline-cgroup.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#if !defined(_MQ_DEADLINE_CGROUP_H_)
-#define _MQ_DEADLINE_CGROUP_H_
-
-#include <linux/blk-cgroup.h>
-
-struct request_queue;
-
-/**
- * struct io_stats_per_prio - I/O statistics per I/O priority class.
- * @inserted: Number of inserted requests.
- * @merged: Number of merged requests.
- * @dispatched: Number of dispatched requests.
- * @completed: Number of I/O completions.
- */
-struct io_stats_per_prio {
- local_t inserted;
- local_t merged;
- local_t dispatched;
- local_t completed;
-};
-
-/* I/O statistics per I/O cgroup per I/O priority class (IOPRIO_CLASS_*). */
-struct blkcg_io_stats {
- struct io_stats_per_prio stats[4];
-};
-
-/**
- * struct dd_blkcg - Per cgroup data.
- * @cpd: blkcg_policy_data structure.
- * @stats: I/O statistics.
- */
-struct dd_blkcg {
- struct blkcg_policy_data cpd; /* must be the first member */
- struct blkcg_io_stats __percpu *stats;
-};
-
-/*
- * Count one event of type 'event_type' and with I/O priority class
- * 'prio_class'.
- */
-#define ddcg_count(ddcg, event_type, prio_class) do { \
-if (ddcg) { \
- struct blkcg_io_stats *io_stats = get_cpu_ptr((ddcg)->stats); \
- \
- BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \
- BUILD_BUG_ON(!__same_type((prio_class), u8)); \
- local_inc(&io_stats->stats[(prio_class)].event_type); \
- put_cpu_ptr(io_stats); \
-} \
-} while (0)
-
-/*
- * Returns the total number of ddcg_count(ddcg, event_type, prio_class) calls
- * across all CPUs. No locking or barriers since it is fine if the returned
- * sum is slightly outdated.
- */
-#define ddcg_sum(ddcg, event_type, prio) ({ \
- unsigned int cpu; \
- u32 sum = 0; \
- \
- BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \
- BUILD_BUG_ON(!__same_type((prio), u8)); \
- for_each_present_cpu(cpu) \
- sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)-> \
- stats[(prio)].event_type); \
- sum; \
-})
-
-#ifdef CONFIG_BLK_CGROUP
-
-/**
- * struct dd_blkg - Per (cgroup, request queue) data.
- * @pd: blkg_policy_data structure.
- */
-struct dd_blkg {
- struct blkg_policy_data pd; /* must be the first member */
-};
-
-struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio);
-int dd_activate_policy(struct request_queue *q);
-void dd_deactivate_policy(struct request_queue *q);
-int __init dd_blkcg_init(void);
-void __exit dd_blkcg_exit(void);
-
-#else /* CONFIG_BLK_CGROUP */
-
-static inline struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
-{
- return NULL;
-}
-
-static inline int dd_activate_policy(struct request_queue *q)
-{
- return 0;
-}
-
-static inline void dd_deactivate_policy(struct request_queue *q)
-{
-}
-
-static inline int dd_blkcg_init(void)
-{
- return 0;
-}
-
-static inline void dd_blkcg_exit(void)
-{
-}
-
-#endif /* CONFIG_BLK_CGROUP */
-
-#endif /* _MQ_DEADLINE_CGROUP_H_ */
diff --git a/block/mq-deadline-main.c b/block/mq-deadline.c
index 6f612e6dc82b..a09761cbdf12 100644
--- a/block/mq-deadline-main.c
+++ b/block/mq-deadline.c
@@ -25,7 +25,6 @@
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"
-#include "mq-deadline-cgroup.h"
/*
* See Documentation/block/deadline-iosched.rst
@@ -57,6 +56,14 @@ enum dd_prio {
enum { DD_PRIO_COUNT = 3 };
+/* I/O statistics per I/O priority. */
+struct io_stats_per_prio {
+ local_t inserted;
+ local_t merged;
+ local_t dispatched;
+ local_t completed;
+};
+
/* I/O statistics for all I/O priorities (enum dd_prio). */
struct io_stats {
struct io_stats_per_prio stats[DD_PRIO_COUNT];
@@ -79,9 +86,6 @@ struct deadline_data {
* run time data
*/
- /* Request queue that owns this data structure. */
- struct request_queue *queue;
-
struct dd_per_prio per_prio[DD_PRIO_COUNT];
/* Data direction of latest dispatched request. */
@@ -234,10 +238,8 @@ static void dd_merged_requests(struct request_queue *q, struct request *req,
struct deadline_data *dd = q->elevator->elevator_data;
const u8 ioprio_class = dd_rq_ioclass(next);
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
- struct dd_blkcg *blkcg = next->elv.priv[0];
dd_count(dd, merged, prio);
- ddcg_count(blkcg, merged, ioprio_class);
/*
* if next expires before rq, assign its expire time to rq
@@ -375,7 +377,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
{
struct request *rq, *next_rq;
enum dd_data_dir data_dir;
- struct dd_blkcg *blkcg;
enum dd_prio prio;
u8 ioprio_class;
@@ -474,8 +475,6 @@ done:
ioprio_class = dd_rq_ioclass(rq);
prio = ioprio_class_to_prio[ioprio_class];
dd_count(dd, dispatched, prio);
- blkcg = rq->elv.priv[0];
- ddcg_count(blkcg, dispatched, ioprio_class);
/*
* If the request needs its target zone locked, do it.
*/
@@ -569,8 +568,6 @@ static void dd_exit_sched(struct elevator_queue *e)
struct deadline_data *dd = e->elevator_data;
enum dd_prio prio;
- dd_deactivate_policy(dd->queue);
-
for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
struct dd_per_prio *per_prio = &dd->per_prio[prio];
@@ -584,7 +581,7 @@ static void dd_exit_sched(struct elevator_queue *e)
}
/*
- * Initialize elevator private data (deadline_data) and associate with blkcg.
+ * initialize elevator private data (deadline_data).
*/
static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
{
@@ -593,12 +590,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
enum dd_prio prio;
int ret = -ENOMEM;
- /*
- * Initialization would be very tricky if the queue is not frozen,
- * hence the warning statement below.
- */
- WARN_ON_ONCE(!percpu_ref_is_zero(&q->q_usage_counter));
-
eq = elevator_alloc(q, e);
if (!eq)
return ret;
@@ -614,8 +605,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
if (!dd->stats)
goto free_dd;
- dd->queue = q;
-
for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
struct dd_per_prio *per_prio = &dd->per_prio[prio];
@@ -635,17 +624,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
spin_lock_init(&dd->lock);
spin_lock_init(&dd->zone_lock);
- ret = dd_activate_policy(q);
- if (ret)
- goto free_stats;
-
- ret = 0;
q->elevator = eq;
return 0;
-free_stats:
- free_percpu(dd->stats);
-
free_dd:
kfree(dd);
@@ -718,7 +699,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
struct dd_per_prio *per_prio;
enum dd_prio prio;
- struct dd_blkcg *blkcg;
LIST_HEAD(free);
lockdep_assert_held(&dd->lock);
@@ -729,18 +709,8 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
*/
blk_req_zone_write_unlock(rq);
- /*
- * If a block cgroup has been associated with the submitter and if an
- * I/O priority has been set in the associated block cgroup, use the
- * lowest of the cgroup priority and the request priority for the
- * request. If no priority has been set in the request, use the cgroup
- * priority.
- */
prio = ioprio_class_to_prio[ioprio_class];
dd_count(dd, inserted, prio);
- blkcg = dd_blkcg_from_bio(rq->bio);
- ddcg_count(blkcg, inserted, ioprio_class);
- rq->elv.priv[0] = blkcg;
if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
blk_mq_free_requests(&free);
@@ -789,10 +759,12 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
spin_unlock(&dd->lock);
}
-/* Callback from inside blk_mq_rq_ctx_init(). */
+/*
+ * Nothing to do here. This is defined only to ensure that .finish_request
+ * method is called upon request completion.
+ */
static void dd_prepare_request(struct request *rq)
{
- rq->elv.priv[0] = NULL;
}
/*
@@ -815,13 +787,11 @@ static void dd_finish_request(struct request *rq)
{
struct request_queue *q = rq->q;
struct deadline_data *dd = q->elevator->elevator_data;
- struct dd_blkcg *blkcg = rq->elv.priv[0];
const u8 ioprio_class = dd_rq_ioclass(rq);
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
struct dd_per_prio *per_prio = &dd->per_prio[prio];
dd_count(dd, completed, prio);
- ddcg_count(blkcg, completed, ioprio_class);
if (blk_queue_is_zoned(q)) {
unsigned long flags;
@@ -1144,26 +1114,11 @@ MODULE_ALIAS("mq-deadline-iosched");
static int __init deadline_init(void)
{
- int ret;
-
- ret = elv_register(&mq_deadline);
- if (ret)
- goto out;
- ret = dd_blkcg_init();
- if (ret)
- goto unreg;
-
-out:
- return ret;
-
-unreg:
- elv_unregister(&mq_deadline);
- goto out;
+ return elv_register(&mq_deadline);
}
static void __exit deadline_exit(void)
{
- dd_blkcg_exit();
elv_unregister(&mq_deadline);
}
diff --git a/block/partitions/core.c b/block/partitions/core.c
index 347c56a51d87..4230d4f71879 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -453,17 +453,26 @@ int bdev_add_partition(struct block_device *bdev, int partno,
sector_t start, sector_t length)
{
struct block_device *part;
+ struct gendisk *disk = bdev->bd_disk;
+ int ret;
- mutex_lock(&bdev->bd_disk->open_mutex);
- if (partition_overlaps(bdev->bd_disk, start, length, -1)) {
- mutex_unlock(&bdev->bd_disk->open_mutex);
- return -EBUSY;
+ mutex_lock(&disk->open_mutex);
+ if (!(disk->flags & GENHD_FL_UP)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (partition_overlaps(disk, start, length, -1)) {
+ ret = -EBUSY;
+ goto out;
}
- part = add_partition(bdev->bd_disk, partno, start, length,
+ part = add_partition(disk, partno, start, length,
ADDPART_FLAG_NONE, NULL);
- mutex_unlock(&bdev->bd_disk->open_mutex);
- return PTR_ERR_OR_ZERO(part);
+ ret = PTR_ERR_OR_ZERO(part);
+out:
+ mutex_unlock(&disk->open_mutex);
+ return ret;
}
int bdev_del_partition(struct block_device *bdev, int partno)
@@ -537,12 +546,8 @@ void blk_drop_partitions(struct gendisk *disk)
lockdep_assert_held(&disk->open_mutex);
- xa_for_each_start(&disk->part_tbl, idx, part, 1) {
- if (!bdgrab(part))
- continue;
+ xa_for_each_start(&disk->part_tbl, idx, part, 1)
delete_partition(part);
- bdput(part);
- }
}
static bool blk_add_partition(struct gendisk *disk,
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index d333786b5c7e..b8b518d7fb77 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
*
* Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
@@ -510,7 +510,7 @@ static bool ldm_validate_partition_table(struct parsed_partitions *state)
p = (struct msdos_partition *)(data + 0x01BE);
for (i = 0; i < 4; i++, p++)
- if (SYS_IND (p) == LDM_PARTITION) {
+ if (p->sys_ind == LDM_PARTITION) {
result = true;
break;
}
diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h
index d8d6beaa72c4..8693704dcf5e 100644
--- a/block/partitions/ldm.h
+++ b/block/partitions/ldm.h
@@ -84,9 +84,6 @@ struct parsed_partitions;
#define TOC_BITMAP1 "config" /* Names of the two defined */
#define TOC_BITMAP2 "log" /* bitmaps in the TOCBLOCK. */
-/* Borrowed from msdos.c */
-#define SYS_IND(p) (get_unaligned(&(p)->sys_ind))
-
struct frag { /* VBLK Fragment handling */
struct list_head list;
u32 group;
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 63e4f6f8b6e9..f5102596a984 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -38,8 +38,6 @@
*/
#include <asm/unaligned.h>
-#define SYS_IND(p) get_unaligned(&p->sys_ind)
-
static inline sector_t nr_sects(struct msdos_partition *p)
{
return (sector_t)get_unaligned_le32(&p->nr_sects);
@@ -52,9 +50,9 @@ static inline sector_t start_sect(struct msdos_partition *p)
static inline int is_extended_partition(struct msdos_partition *p)
{
- return (SYS_IND(p) == DOS_EXTENDED_PARTITION ||
- SYS_IND(p) == WIN98_EXTENDED_PARTITION ||
- SYS_IND(p) == LINUX_EXTENDED_PARTITION);
+ return (p->sys_ind == DOS_EXTENDED_PARTITION ||
+ p->sys_ind == WIN98_EXTENDED_PARTITION ||
+ p->sys_ind == LINUX_EXTENDED_PARTITION);
}
#define MSDOS_LABEL_MAGIC1 0x55
@@ -193,7 +191,7 @@ static void parse_extended(struct parsed_partitions *state,
put_partition(state, state->next, next, size);
set_info(state, state->next, disksig);
- if (SYS_IND(p) == LINUX_RAID_PARTITION)
+ if (p->sys_ind == LINUX_RAID_PARTITION)
state->parts[state->next].flags = ADDPART_FLAG_RAID;
loopct = 0;
if (++state->next == state->limit)
@@ -546,7 +544,7 @@ static void parse_minix(struct parsed_partitions *state,
* a secondary MBR describing its subpartitions, or
* the normal boot sector. */
if (msdos_magic_present(data + 510) &&
- SYS_IND(p) == MINIX_PARTITION) { /* subpartition table present */
+ p->sys_ind == MINIX_PARTITION) { /* subpartition table present */
char tmp[1 + BDEVNAME_SIZE + 10 + 9 + 1];
snprintf(tmp, sizeof(tmp), " %s%d: <minix:", state->name, origin);
@@ -555,7 +553,7 @@ static void parse_minix(struct parsed_partitions *state,
if (state->next == state->limit)
break;
/* add each partition in use */
- if (SYS_IND(p) == MINIX_PARTITION)
+ if (p->sys_ind == MINIX_PARTITION)
put_partition(state, state->next++,
start_sect(p), nr_sects(p));
}
@@ -643,7 +641,7 @@ int msdos_partition(struct parsed_partitions *state)
p = (struct msdos_partition *) (data + 0x1be);
for (slot = 1 ; slot <= 4 ; slot++, p++) {
/* If this is an EFI GPT disk, msdos should ignore it. */
- if (SYS_IND(p) == EFI_PMBR_OSTYPE_EFI_GPT) {
+ if (p->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT) {
put_dev_sector(sect);
return 0;
}
@@ -685,11 +683,11 @@ int msdos_partition(struct parsed_partitions *state)
}
put_partition(state, slot, start, size);
set_info(state, slot, disksig);
- if (SYS_IND(p) == LINUX_RAID_PARTITION)
+ if (p->sys_ind == LINUX_RAID_PARTITION)
state->parts[slot].flags = ADDPART_FLAG_RAID;
- if (SYS_IND(p) == DM6_PARTITION)
+ if (p->sys_ind == DM6_PARTITION)
strlcat(state->pp_buf, "[DM]", PAGE_SIZE);
- if (SYS_IND(p) == EZD_PARTITION)
+ if (p->sys_ind == EZD_PARTITION)
strlcat(state->pp_buf, "[EZD]", PAGE_SIZE);
}
@@ -698,7 +696,7 @@ int msdos_partition(struct parsed_partitions *state)
/* second pass - output for each on a separate line */
p = (struct msdos_partition *) (0x1be + data);
for (slot = 1 ; slot <= 4 ; slot++, p++) {
- unsigned char id = SYS_IND(p);
+ unsigned char id = p->sys_ind;
int n;
if (!nr_sects(p))
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 1b3fe99b83a6..d247431a6853 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -254,9 +254,11 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
*/
hdr->status = req->result & 0xff;
hdr->masked_status = status_byte(req->result);
- hdr->msg_status = msg_byte(req->result);
+ hdr->msg_status = COMMAND_COMPLETE;
hdr->host_status = host_byte(req->result);
- hdr->driver_status = driver_byte(req->result);
+ hdr->driver_status = 0;
+ if (scsi_status_is_check_condition(hdr->status))
+ hdr->driver_status = DRIVER_SENSE;
hdr->info = 0;
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
@@ -311,7 +313,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
at_head = 1;
ret = -ENOMEM;
- rq = blk_get_request(q, writing ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
+ rq = blk_get_request(q, writing ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
req = scsi_req(rq);
@@ -433,7 +435,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
}
- rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
+ rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto error_free_buffer;
@@ -484,9 +486,10 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
break;
}
- if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO)) {
- err = DRIVER_ERROR << 24;
- goto error;
+ if (bytes) {
+ err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO);
+ if (err)
+ goto error;
}
blk_execute_rq(disk, rq, 0);
@@ -521,7 +524,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq;
int err;
- rq = blk_get_request(q, REQ_OP_SCSI_OUT, 0);
+ rq = blk_get_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index ca3b02dcbbfa..64b772c5d1c9 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1768,7 +1768,7 @@ config CRYPTO_DRBG_HMAC
bool
default y
select CRYPTO_HMAC
- select CRYPTO_SHA256
+ select CRYPTO_SHA512
config CRYPTO_DRBG_HASH
bool "Enable Hash DRBG"
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 1f7f63e836ae..c978e41f11a1 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -4686,8 +4686,11 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
}, {
.alg = "drbg_nopr_hmac_sha512",
- .test = alg_test_null,
+ .test = alg_test_drbg,
.fips_allowed = 1,
+ .suite = {
+ .drbg = __VECS(drbg_nopr_hmac_sha512_tv_template)
+ }
}, {
.alg = "drbg_nopr_sha1",
.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 96eb7ce9f81b..3ed6ab34ab51 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -21984,6 +21984,55 @@ static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
},
};
+/* Test vector obtained during NIST ACVP testing */
+static const struct drbg_testvec drbg_nopr_hmac_sha512_tv_template[] = {
+ {
+ .entropy = (unsigned char *)
+ "\xDF\xB0\xF2\x18\xF0\x78\x07\x01\x29\xA4\x29\x26"
+ "\x2F\x8A\x34\xCB\x37\xEF\xEE\x41\xE6\x96\xF7\xFF"
+ "\x61\x47\xD3\xED\x41\x97\xEF\x64\x0C\x48\x56\x5A"
+ "\xE6\x40\x6E\x4A\x3B\x9E\x7F\xAC\x08\xEC\x25\xAE"
+ "\x0B\x51\x0E\x2C\x44\x2E\xBD\xDB\x57\xD0\x4A\x6D"
+ "\x80\x3E\x37\x0F",
+ .entropylen = 64,
+ .expected = (unsigned char *)
+ "\x48\xc6\xa8\xdb\x09\xae\xde\x5d\x8c\x77\xf3\x52"
+ "\x92\x71\xa7\xb9\x6d\x53\x6d\xa3\x73\xe3\x55\xb8"
+ "\x39\xd6\x44\x2b\xee\xcb\xe1\x32\x15\x30\xbe\x4e"
+ "\x9b\x1e\x06\xd1\x6b\xbf\xd5\x3e\xea\x7c\xf5\xaa"
+ "\x4b\x05\xb5\xd3\xa7\xb2\xc4\xfe\xe7\x1b\xda\x11"
+ "\x43\x98\x03\x70\x90\xbf\x6e\x43\x9b\xe4\x14\xef"
+ "\x71\xa3\x2a\xef\x9f\x0d\xb9\xe3\x52\xf2\x89\xc9"
+ "\x66\x9a\x60\x60\x99\x60\x62\x4c\xd6\x45\x52\x54"
+ "\xe6\x32\xb2\x1b\xd4\x48\xb5\xa6\xf9\xba\xd3\xff"
+ "\x29\xc5\x21\xe0\x91\x31\xe0\x38\x8c\x93\x0f\x3c"
+ "\x30\x7b\x53\xa3\xc0\x7f\x2d\xc1\x39\xec\x69\x0e"
+ "\xf2\x4a\x3c\x65\xcc\xed\x07\x2a\xf2\x33\x83\xdb"
+ "\x10\x74\x96\x40\xa7\xc5\x1b\xde\x81\xca\x0b\x8f"
+ "\x1e\x0a\x1a\x7a\xbf\x3c\x4a\xb8\x8c\xaf\x7b\x80"
+ "\xb7\xdc\x5d\x0f\xef\x1b\x97\x6e\x3d\x17\x23\x5a"
+ "\x31\xb9\x19\xcf\x5a\xc5\x00\x2a\xb6\xf3\x99\x34"
+ "\x65\xee\xe9\x1c\x55\xa0\x3b\x07\x60\xc9\xc4\xe4"
+ "\xf7\x57\x5c\x34\x9f\xc6\x31\x30\x3f\x23\xb2\x89"
+ "\xc0\xe7\x50\xf3\xde\x59\xd1\x0e\xb3\x0f\x78\xcc"
+ "\x7e\x54\x5e\x61\xf6\x86\x3d\xb3\x11\x94\x36\x3e"
+ "\x61\x5c\x48\x99\xf6\x7b\x02\x9a\xdc\x6a\x28\xe6"
+ "\xd1\xa7\xd1\xa3",
+ .expectedlen = 256,
+ .addtla = (unsigned char *)
+ "\x6B\x0F\x4A\x48\x0B\x12\x85\xE4\x72\x23\x7F\x7F"
+ "\x94\x7C\x24\x69\x14\x9F\xDC\x72\xA6\x33\xAD\x3C"
+ "\x8C\x72\xC1\x88\x49\x59\x82\xC5",
+ .addtlb = (unsigned char *)
+ "\xC4\xAF\x36\x3D\xB8\x5D\x9D\xFA\x92\xF5\xC3\x3C"
+ "\x2D\x1E\x22\x2A\xBD\x8B\x05\x6F\xA3\xFC\xBF\x16"
+ "\xED\xAA\x75\x8D\x73\x9A\xF6\xEC",
+ .addtllen = 32,
+ .pers = NULL,
+ .perslen = 0,
+ }
+};
+
static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
{
.entropy = (unsigned char *)
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index 359bead4b280..fdc6b593f500 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -225,6 +225,7 @@ static int keyboard_notifier_call(struct notifier_block *blk,
case KBD_POST_KEYSYM:
{
unsigned char type = KTYP(param->value) - 0xf0;
+
if (type == KT_SPEC) {
unsigned char val = KVAL(param->value);
int on_off = -1;
@@ -265,6 +266,7 @@ static int vt_notifier_call(struct notifier_block *blk,
{
struct vt_notifier_param *param = _param;
struct vc_data *vc = param->vc;
+
switch (code) {
case VT_ALLOCATE:
break;
@@ -273,6 +275,7 @@ static int vt_notifier_call(struct notifier_block *blk,
case VT_WRITE:
{
unsigned char c = param->c;
+
if (vc->vc_num != fg_console)
break;
switch (c) {
diff --git a/drivers/accessibility/speakup/i18n.c b/drivers/accessibility/speakup/i18n.c
index 46bd50f3c3a4..bc7b47d1876f 100644
--- a/drivers/accessibility/speakup/i18n.c
+++ b/drivers/accessibility/speakup/i18n.c
@@ -90,6 +90,13 @@ static char *speakup_default_msgs[MSG_LAST_INDEX] = {
[MSG_COLOR_YELLOW] = "yellow",
[MSG_COLOR_WHITE] = "white",
[MSG_COLOR_GREY] = "grey",
+ [MSG_COLOR_BRIGHTBLUE] "bright blue",
+ [MSG_COLOR_BRIGHTGREEN] "bright green",
+ [MSG_COLOR_BRIGHTCYAN] "bright cyan",
+ [MSG_COLOR_BRIGHTRED] "bright red",
+ [MSG_COLOR_BRIGHTMAGENTA] "bright magenta",
+ [MSG_COLOR_BRIGHTYELLOW] "bright yellow",
+ [MSG_COLOR_BRIGHTWHITE] "bright white",
/* Names of key states. */
[MSG_STATE_DOUBLE] = "double",
diff --git a/drivers/accessibility/speakup/i18n.h b/drivers/accessibility/speakup/i18n.h
index 2a607d263234..51e3260995d9 100644
--- a/drivers/accessibility/speakup/i18n.h
+++ b/drivers/accessibility/speakup/i18n.h
@@ -99,7 +99,14 @@ enum msg_index_t {
MSG_COLOR_YELLOW,
MSG_COLOR_WHITE,
MSG_COLOR_GREY,
- MSG_COLORS_END = MSG_COLOR_GREY,
+ MSG_COLOR_BRIGHTBLUE,
+ MSG_COLOR_BRIGHTGREEN,
+ MSG_COLOR_BRIGHTCYAN,
+ MSG_COLOR_BRIGHTRED,
+ MSG_COLOR_BRIGHTMAGENTA,
+ MSG_COLOR_BRIGHTYELLOW,
+ MSG_COLOR_BRIGHTWHITE,
+ MSG_COLORS_END = MSG_COLOR_BRIGHTWHITE,
MSG_STATES_START,
MSG_STATE_DOUBLE = MSG_STATES_START,
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 428fceaf9d50..d726537fa16c 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -389,10 +389,6 @@ static void say_attributes(struct vc_data *vc)
int fg = spk_attr & 0x0f;
int bg = spk_attr >> 4;
- if (fg > 8) {
- synth_printf("%s ", spk_msg_get(MSG_BRIGHT));
- fg -= 8;
- }
synth_printf("%s", spk_msg_get(MSG_COLORS_START + fg));
if (bg > 7) {
synth_printf(" %s ", spk_msg_get(MSG_ON_BLINKING));
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index 2e39fcf492d8..0d1f397cd896 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -72,7 +72,8 @@ static void spk_ttyio_ldisc_close(struct tty_struct *tty)
}
static int spk_ttyio_receive_buf2(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+ const unsigned char *cp,
+ const char *fp, int count)
{
struct spk_ldisc_data *ldisc_data = tty->disc_data;
struct spk_synth *synth = ldisc_data->synth;
@@ -104,6 +105,7 @@ static int spk_ttyio_receive_buf2(struct tty_struct *tty,
static struct tty_ldisc_ops spk_ttyio_ldisc_ops = {
.owner = THIS_MODULE,
+ .num = N_SPEAKUP,
.name = "speakup_ldisc",
.open = spk_ttyio_ldisc_open,
.close = spk_ttyio_ldisc_close,
@@ -211,14 +213,13 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
void spk_ttyio_register_ldisc(void)
{
- if (tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops))
+ if (tty_register_ldisc(&spk_ttyio_ldisc_ops))
pr_warn("speakup: Error registering line discipline. Most synths won't work.\n");
}
void spk_ttyio_unregister_ldisc(void)
{
- if (tty_unregister_ldisc(N_SPEAKUP))
- pr_warn("speakup: Couldn't unregister ldisc\n");
+ tty_unregister_ldisc(&spk_ttyio_ldisc_ops);
}
static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 3972de7b7565..8f9940f40baa 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -370,7 +370,7 @@ config ACPI_TABLE_UPGRADE
config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
bool "Override ACPI tables from built-in initrd"
depends on ACPI_TABLE_UPGRADE
- depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION=""
+ depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION_NONE
help
This option provides functionality to override arbitrary ACPI tables
from built-in uncompressed initrd.
@@ -526,6 +526,9 @@ endif
source "drivers/acpi/pmic/Kconfig"
+config ACPI_VIOT
+ bool
+
endif # ACPI
config X86_PM_TIMER
@@ -548,3 +551,13 @@ config ACPI_PRMT
bool "Platform Runtime Mechanism Support"
depends on EFI && X86_64
default y
+ help
+ Platform Runtime Mechanism (PRM) is a firmware interface exposing a
+ set of binary executables that can be called from the AML interpreter
+ or directly from device drivers.
+
+ Say Y to enable the AML interpreter to execute the PRM code.
+
+ While this feature is optional in principle, leaving it out may
+ substantially increase computational overhead related to the
+ initialization of some server systems.
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index ceb1aed4b1fc..3018714e87d9 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -124,3 +124,5 @@ video-objs += acpi_video.o video_detect.o
obj-y += dptf/
obj-$(CONFIG_ARM64) += arm64/
+
+obj-$(CONFIG_ACPI_VIOT) += viot.o
diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c
index 49b781a9cd97..ab8a4e0191b1 100644
--- a/drivers/acpi/acpi_amba.c
+++ b/drivers/acpi/acpi_amba.c
@@ -76,6 +76,7 @@ static int amba_handler_attach(struct acpi_device *adev,
case IORESOURCE_MEM:
if (!address_found) {
dev->res = *rentry->res;
+ dev->res.name = dev_name(&dev->dev);
address_found = true;
}
break;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index ffb4afc5aad9..42ede059728c 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -540,6 +540,15 @@ static const struct dmi_system_id video_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
},
},
+ {
+ .callback = video_set_report_key_events,
+ .driver_data = (void *)((uintptr_t)REPORT_BRIGHTNESS_KEY_EVENTS),
+ .ident = "Dell Vostro 3350",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
+ },
+ },
/*
* Some machines change the brightness themselves when a brightness
* hotkey gets pressed, despite us telling them not to. In this case
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 38e10ab976e6..14b71b41e845 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -379,13 +379,6 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
(*element_ptr)->common.reference_count =
original_ref_count;
-
- /*
- * The original_element holds a reference from the package object
- * that represents _HID. Since a new element was created by _HID,
- * remove the reference from the _CID package.
- */
- acpi_ut_remove_reference(original_element);
}
element_ptr++;
diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile
index 6ff50f4ed947..66acbe77f46e 100644
--- a/drivers/acpi/arm64/Makefile
+++ b/drivers/acpi/arm64/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_ACPI_IORT) += iort.o
obj-$(CONFIG_ACPI_GTDT) += gtdt.o
+obj-y += dma.o
diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c
new file mode 100644
index 000000000000..f16739ad3cc0
--- /dev/null
+++ b/drivers/acpi/arm64/dma.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
+#include <linux/device.h>
+#include <linux/dma-direct.h>
+
+void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
+{
+ int ret;
+ u64 end, mask;
+ u64 dmaaddr = 0, size = 0, offset = 0;
+
+ /*
+ * If @dev is expected to be DMA-capable then the bus code that created
+ * it should have initialised its dma_mask pointer by this point. For
+ * now, we'll continue the legacy behaviour of coercing it to the
+ * coherent mask if not, but we'll no longer do so quietly.
+ */
+ if (!dev->dma_mask) {
+ dev_warn(dev, "DMA mask not set\n");
+ dev->dma_mask = &dev->coherent_dma_mask;
+ }
+
+ if (dev->coherent_dma_mask)
+ size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+ else
+ size = 1ULL << 32;
+
+ ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
+ if (ret == -ENODEV)
+ ret = iort_dma_get_ranges(dev, &size);
+ if (!ret) {
+ /*
+ * Limit coherent and dma mask based on size retrieved from
+ * firmware.
+ */
+ end = dmaaddr + size - 1;
+ mask = DMA_BIT_MASK(ilog2(end) + 1);
+ dev->bus_dma_limit = end;
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
+ *dev->dma_mask = min(*dev->dma_mask, mask);
+ }
+
+ *dma_addr = dmaaddr;
+ *dma_size = size;
+
+ ret = dma_direct_set_offset(dev, dmaaddr + offset, dmaaddr, size);
+
+ dev_dbg(dev, "dma_offset(%#08llx)%s\n", offset, ret ? " failed!" : "");
+}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index e34937e11186..3b23fb775ac4 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -806,23 +806,6 @@ static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
return NULL;
}
-static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
-{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
- return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
-}
-
-static inline int iort_add_device_replay(struct device *dev)
-{
- int err = 0;
-
- if (dev->bus && !device_iommu_mapped(dev))
- err = iommu_probe_device(dev);
-
- return err;
-}
-
/**
* iort_iommu_msi_get_resv_regions - Reserved region driver helper
* @dev: Device from iommu_get_resv_regions()
@@ -900,18 +883,6 @@ static inline bool iort_iommu_driver_enabled(u8 type)
}
}
-static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
- struct fwnode_handle *fwnode,
- const struct iommu_ops *ops)
-{
- int ret = iommu_fwspec_init(dev, fwnode, ops);
-
- if (!ret)
- ret = iommu_fwspec_add_ids(dev, &streamid, 1);
-
- return ret;
-}
-
static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
{
struct acpi_iort_root_complex *pci_rc;
@@ -946,7 +917,7 @@ static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
return iort_iommu_driver_enabled(node->type) ?
-EPROBE_DEFER : -ENODEV;
- return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
+ return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode, ops);
}
struct iort_pci_alias_info {
@@ -968,13 +939,15 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
static void iort_named_component_init(struct device *dev,
struct acpi_iort_node *node)
{
- struct property_entry props[2] = {};
+ struct property_entry props[3] = {};
struct acpi_iort_named_component *nc;
nc = (struct acpi_iort_named_component *)node->node_data;
props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
FIELD_GET(ACPI_IORT_NC_PASID_BITS,
nc->node_flags));
+ if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED)
+ props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall");
if (device_create_managed_software_node(dev, props, NULL))
dev_warn(dev, "Could not add device properties\n");
@@ -1020,24 +993,13 @@ static int iort_nc_iommu_map_id(struct device *dev,
* @dev: device to configure
* @id_in: optional input id const value pointer
*
- * Returns: iommu_ops pointer on configuration success
- * NULL on configuration failure
+ * Returns: 0 on success, <0 on failure
*/
-const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
- const u32 *id_in)
+int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
{
struct acpi_iort_node *node;
- const struct iommu_ops *ops;
int err = -ENODEV;
- /*
- * If we already translated the fwspec there
- * is nothing left to do, return the iommu_ops.
- */
- ops = iort_fwspec_iommu_ops(dev);
- if (ops)
- return ops;
-
if (dev_is_pci(dev)) {
struct iommu_fwspec *fwspec;
struct pci_bus *bus = to_pci_dev(dev)->bus;
@@ -1046,7 +1008,7 @@ const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
iort_match_node_callback, &bus->dev);
if (!node)
- return NULL;
+ return -ENODEV;
info.node = node;
err = pci_for_each_dma_alias(to_pci_dev(dev),
@@ -1059,7 +1021,7 @@ const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
if (!node)
- return NULL;
+ return -ENODEV;
err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
iort_nc_iommu_map(dev, node);
@@ -1068,32 +1030,14 @@ const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
iort_named_component_init(dev, node);
}
- /*
- * If we have reason to believe the IOMMU driver missed the initial
- * add_device callback for dev, replay it to get things in order.
- */
- if (!err) {
- ops = iort_fwspec_iommu_ops(dev);
- err = iort_add_device_replay(dev);
- }
-
- /* Ignore all other errors apart from EPROBE_DEFER */
- if (err == -EPROBE_DEFER) {
- ops = ERR_PTR(err);
- } else if (err) {
- dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
- ops = NULL;
- }
-
- return ops;
+ return err;
}
#else
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
{ return 0; }
-const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
- const u32 *input_id)
-{ return NULL; }
+int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
+{ return -ENODEV; }
#endif
static int nc_dma_get_range(struct device *dev, u64 *size)
@@ -1144,56 +1088,18 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
}
/**
- * iort_dma_setup() - Set-up device DMA parameters.
+ * iort_dma_get_ranges() - Look up DMA addressing limit for the device
+ * @dev: device to lookup
+ * @size: DMA range size result pointer
*
- * @dev: device to configure
- * @dma_addr: device DMA address result pointer
- * @dma_size: DMA range size result pointer
+ * Return: 0 on success, an error otherwise.
*/
-void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
+int iort_dma_get_ranges(struct device *dev, u64 *size)
{
- u64 end, mask, dmaaddr = 0, size = 0, offset = 0;
- int ret;
-
- /*
- * If @dev is expected to be DMA-capable then the bus code that created
- * it should have initialised its dma_mask pointer by this point. For
- * now, we'll continue the legacy behaviour of coercing it to the
- * coherent mask if not, but we'll no longer do so quietly.
- */
- if (!dev->dma_mask) {
- dev_warn(dev, "DMA mask not set\n");
- dev->dma_mask = &dev->coherent_dma_mask;
- }
-
- if (dev->coherent_dma_mask)
- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+ if (dev_is_pci(dev))
+ return rc_dma_get_range(dev, size);
else
- size = 1ULL << 32;
-
- ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
- if (ret == -ENODEV)
- ret = dev_is_pci(dev) ? rc_dma_get_range(dev, &size)
- : nc_dma_get_range(dev, &size);
-
- if (!ret) {
- /*
- * Limit coherent and dma mask based on size retrieved from
- * firmware.
- */
- end = dmaaddr + size - 1;
- mask = DMA_BIT_MASK(ilog2(end) + 1);
- dev->bus_dma_limit = end;
- dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
- *dev->dma_mask = min(*dev->dma_mask, mask);
- }
-
- *dma_addr = dmaaddr;
- *dma_size = size;
-
- ret = dma_direct_set_offset(dev, dmaaddr + offset, dmaaddr, size);
-
- dev_dbg(dev, "dma_offset(%#08llx)%s\n", offset, ret ? " failed!" : "");
+ return nc_dma_get_range(dev, size);
}
static void __init acpi_iort_register_irq(int hwirq, const char *name,
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 60fb6a843853..f854bcb8d010 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -27,6 +27,7 @@
#include <linux/dmi.h>
#endif
#include <linux/acpi_iort.h>
+#include <linux/acpi_viot.h>
#include <linux/pci.h>
#include <acpi/apei.h>
#include <linux/suspend.h>
@@ -303,7 +304,8 @@ static void acpi_bus_osc_negotiate_platform_control(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
- capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT;
+ if (IS_ENABLED(CONFIG_ACPI_PRMT))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT;
#ifdef CONFIG_ARM64
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT;
@@ -1335,6 +1337,7 @@ static int __init acpi_init(void)
acpi_wakeup_device_init();
acpi_debugger_init();
acpi_setup_sb_notify_handler();
+ acpi_viot_init();
return 0;
}
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index 5fca18296bf6..550b9081fcbc 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -9,6 +9,42 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+struct pch_fivr_resp {
+ u64 status;
+ u64 result;
+};
+
+static int pch_fivr_read(acpi_handle handle, char *method, struct pch_fivr_resp *fivr_resp)
+{
+ struct acpi_buffer resp = { sizeof(struct pch_fivr_resp), fivr_resp};
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer format = { sizeof("NN"), "NN" };
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = -EFAULT;
+
+ status = acpi_evaluate_object(handle, method, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return ret;
+
+ obj = buffer.pointer;
+ if (!obj || obj->type != ACPI_TYPE_PACKAGE)
+ goto release_buffer;
+
+ status = acpi_extract_package(obj, &format, &resp);
+ if (ACPI_FAILURE(status))
+ goto release_buffer;
+
+ if (fivr_resp->status)
+ goto release_buffer;
+
+ ret = 0;
+
+release_buffer:
+ kfree(buffer.pointer);
+ return ret;
+}
+
/*
* Presentation of attributes which are defined for INT1045
* They are:
@@ -23,15 +59,14 @@ static ssize_t name##_show(struct device *dev,\
char *buf)\
{\
struct acpi_device *acpi_dev = dev_get_drvdata(dev);\
- unsigned long long val;\
- acpi_status status;\
+ struct pch_fivr_resp fivr_resp;\
+ int status;\
\
- status = acpi_evaluate_integer(acpi_dev->handle, #method,\
- NULL, &val);\
- if (ACPI_SUCCESS(status))\
- return sprintf(buf, "%d\n", (int)val);\
- else\
- return -EINVAL;\
+ status = pch_fivr_read(acpi_dev->handle, #method, &fivr_resp);\
+ if (status)\
+ return status;\
+\
+ return sprintf(buf, "%llu\n", fivr_resp.result);\
}
#define PCH_FIVR_STORE(name, method) \
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 23d9a09d7060..a3ef6cce644c 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3021,6 +3021,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
struct nd_mapping_desc *mapping;
+ /* range index 0 == unmapped in SPA or invalid-SPA */
+ if (memdev->range_index == 0 || spa->range_index == 0)
+ continue;
if (memdev->range_index != spa->range_index)
continue;
if (count >= ND_MAX_MAPPINGS) {
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 31cf9aee5edd..1f6007abcf18 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -292,6 +292,12 @@ void __init init_prmt(void)
int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
sizeof (struct acpi_table_prmt_header),
0, acpi_parse_prmt, 0);
+ /*
+ * Return immediately if PRMT table is not present or no PRM module found.
+ */
+ if (mc <= 0)
+ return;
+
pr_info("PRM: found %u modules\n", mc);
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index dc01fb550b28..ee78a210c606 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -423,13 +423,6 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
}
}
-static bool irq_is_legacy(struct acpi_resource_irq *irq)
-{
- return irq->triggering == ACPI_EDGE_SENSITIVE &&
- irq->polarity == ACPI_ACTIVE_HIGH &&
- irq->shareable == ACPI_EXCLUSIVE;
-}
-
/**
* acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
* @ares: Input ACPI resource object.
@@ -468,7 +461,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity,
- irq->shareable, irq_is_legacy(irq));
+ irq->shareable, true);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0641bc20b097..b24513ec3fae 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -11,6 +11,8 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/acpi_viot.h>
+#include <linux/iommu.h>
#include <linux/signal.h>
#include <linux/kthread.h>
#include <linux/dmi.h>
@@ -1526,6 +1528,78 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
return ret >= 0 ? 0 : ret;
}
+#ifdef CONFIG_IOMMU_API
+int acpi_iommu_fwspec_init(struct device *dev, u32 id,
+ struct fwnode_handle *fwnode,
+ const struct iommu_ops *ops)
+{
+ int ret = iommu_fwspec_init(dev, fwnode, ops);
+
+ if (!ret)
+ ret = iommu_fwspec_add_ids(dev, &id, 1);
+
+ return ret;
+}
+
+static inline const struct iommu_ops *acpi_iommu_fwspec_ops(struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ return fwspec ? fwspec->ops : NULL;
+}
+
+static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
+ const u32 *id_in)
+{
+ int err;
+ const struct iommu_ops *ops;
+
+ /*
+ * If we already translated the fwspec there is nothing left to do,
+ * return the iommu_ops.
+ */
+ ops = acpi_iommu_fwspec_ops(dev);
+ if (ops)
+ return ops;
+
+ err = iort_iommu_configure_id(dev, id_in);
+ if (err && err != -EPROBE_DEFER)
+ err = viot_iommu_configure(dev);
+
+ /*
+ * If we have reason to believe the IOMMU driver missed the initial
+ * iommu_probe_device() call for dev, replay it to get things in order.
+ */
+ if (!err && dev->bus && !device_iommu_mapped(dev))
+ err = iommu_probe_device(dev);
+
+ /* Ignore all other errors apart from EPROBE_DEFER */
+ if (err == -EPROBE_DEFER) {
+ return ERR_PTR(err);
+ } else if (err) {
+ dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
+ return NULL;
+ }
+ return acpi_iommu_fwspec_ops(dev);
+}
+
+#else /* !CONFIG_IOMMU_API */
+
+int acpi_iommu_fwspec_init(struct device *dev, u32 id,
+ struct fwnode_handle *fwnode,
+ const struct iommu_ops *ops)
+{
+ return -ENODEV;
+}
+
+static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
+ const u32 *id_in)
+{
+ return NULL;
+}
+
+#endif /* !CONFIG_IOMMU_API */
+
/**
* acpi_dma_configure_id - Set-up DMA configuration for the device.
* @dev: The pointer to the device
@@ -1543,9 +1617,9 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
return 0;
}
- iort_dma_setup(dev, &dma_addr, &size);
+ acpi_arch_dma_setup(dev, &dma_addr, &size);
- iommu = iort_iommu_configure_id(dev, input_id);
+ iommu = acpi_iommu_configure_id(dev, input_id);
if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index e7ddd281afff..d5cedffeeff9 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -860,11 +860,9 @@ EXPORT_SYMBOL(acpi_dev_present);
* Return the next match of ACPI device if another matching device was present
* at the moment of invocation, or NULL otherwise.
*
- * FIXME: The function does not tolerate the sudden disappearance of @adev, e.g.
- * in the case of a hotplug event. That said, the caller should ensure that
- * this will never happen.
- *
* The caller is responsible for invoking acpi_dev_put() on the returned device.
+ * On the other hand the function invokes acpi_dev_put() on the given @adev
+ * assuming that its reference counter had been increased beforehand.
*
* See additional information in acpi_dev_present() as well.
*/
@@ -880,6 +878,7 @@ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const cha
match.hrv = hrv;
dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb);
+ acpi_dev_put(adev);
return dev ? to_acpi_device(dev) : NULL;
}
EXPORT_SYMBOL(acpi_dev_get_next_match_dev);
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
new file mode 100644
index 000000000000..d2256326c73a
--- /dev/null
+++ b/drivers/acpi/viot.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Virtual I/O topology
+ *
+ * The Virtual I/O Translation Table (VIOT) describes the topology of
+ * para-virtual IOMMUs and the endpoints they manage. The OS uses it to
+ * initialize devices in the right order, preventing endpoints from issuing DMA
+ * before their IOMMU is ready.
+ *
+ * When binding a driver to a device, before calling the device driver's probe()
+ * method, the driver infrastructure calls dma_configure(). At that point the
+ * VIOT driver looks for an IOMMU associated to the device in the VIOT table.
+ * If an IOMMU exists and has been initialized, the VIOT driver initializes the
+ * device's IOMMU fwspec, allowing the DMA infrastructure to invoke the IOMMU
+ * ops when the device driver configures DMA mappings. If an IOMMU exists and
+ * hasn't yet been initialized, VIOT returns -EPROBE_DEFER to postpone probing
+ * the device until the IOMMU is available.
+ */
+#define pr_fmt(fmt) "ACPI: VIOT: " fmt
+
+#include <linux/acpi_viot.h>
+#include <linux/dma-iommu.h>
+#include <linux/fwnode.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+struct viot_iommu {
+ /* Node offset within the table */
+ unsigned int offset;
+ struct fwnode_handle *fwnode;
+ struct list_head list;
+};
+
+struct viot_endpoint {
+ union {
+ /* PCI range */
+ struct {
+ u16 segment_start;
+ u16 segment_end;
+ u16 bdf_start;
+ u16 bdf_end;
+ };
+ /* MMIO */
+ u64 address;
+ };
+ u32 endpoint_id;
+ struct viot_iommu *viommu;
+ struct list_head list;
+};
+
+static struct acpi_table_viot *viot;
+static LIST_HEAD(viot_iommus);
+static LIST_HEAD(viot_pci_ranges);
+static LIST_HEAD(viot_mmio_endpoints);
+
+static int __init viot_check_bounds(const struct acpi_viot_header *hdr)
+{
+ struct acpi_viot_header *start, *end, *hdr_end;
+
+ start = ACPI_ADD_PTR(struct acpi_viot_header, viot,
+ max_t(size_t, sizeof(*viot), viot->node_offset));
+ end = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->header.length);
+ hdr_end = ACPI_ADD_PTR(struct acpi_viot_header, hdr, sizeof(*hdr));
+
+ if (hdr < start || hdr_end > end) {
+ pr_err(FW_BUG "Node pointer overflows\n");
+ return -EOVERFLOW;
+ }
+ if (hdr->length < sizeof(*hdr)) {
+ pr_err(FW_BUG "Empty node\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
+ u16 segment, u16 bdf)
+{
+ struct pci_dev *pdev;
+ struct fwnode_handle *fwnode;
+
+ pdev = pci_get_domain_bus_and_slot(segment, PCI_BUS_NUM(bdf),
+ bdf & 0xff);
+ if (!pdev) {
+ pr_err("Could not find PCI IOMMU\n");
+ return -ENODEV;
+ }
+
+ fwnode = pdev->dev.fwnode;
+ if (!fwnode) {
+ /*
+ * PCI devices aren't necessarily described by ACPI. Create a
+ * fwnode so the IOMMU subsystem can identify this device.
+ */
+ fwnode = acpi_alloc_fwnode_static();
+ if (!fwnode) {
+ pci_dev_put(pdev);
+ return -ENOMEM;
+ }
+ set_primary_fwnode(&pdev->dev, fwnode);
+ }
+ viommu->fwnode = pdev->dev.fwnode;
+ pci_dev_put(pdev);
+ return 0;
+}
+
+static int __init viot_get_mmio_iommu_fwnode(struct viot_iommu *viommu,
+ u64 address)
+{
+ struct acpi_device *adev;
+ struct resource res = {
+ .start = address,
+ .end = address,
+ .flags = IORESOURCE_MEM,
+ };
+
+ adev = acpi_resource_consumer(&res);
+ if (!adev) {
+ pr_err("Could not find MMIO IOMMU\n");
+ return -EINVAL;
+ }
+ viommu->fwnode = &adev->fwnode;
+ return 0;
+}
+
+static struct viot_iommu * __init viot_get_iommu(unsigned int offset)
+{
+ int ret;
+ struct viot_iommu *viommu;
+ struct acpi_viot_header *hdr = ACPI_ADD_PTR(struct acpi_viot_header,
+ viot, offset);
+ union {
+ struct acpi_viot_virtio_iommu_pci pci;
+ struct acpi_viot_virtio_iommu_mmio mmio;
+ } *node = (void *)hdr;
+
+ list_for_each_entry(viommu, &viot_iommus, list)
+ if (viommu->offset == offset)
+ return viommu;
+
+ if (viot_check_bounds(hdr))
+ return NULL;
+
+ viommu = kzalloc(sizeof(*viommu), GFP_KERNEL);
+ if (!viommu)
+ return NULL;
+
+ viommu->offset = offset;
+ switch (hdr->type) {
+ case ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI:
+ if (hdr->length < sizeof(node->pci))
+ goto err_free;
+
+ ret = viot_get_pci_iommu_fwnode(viommu, node->pci.segment,
+ node->pci.bdf);
+ break;
+ case ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO:
+ if (hdr->length < sizeof(node->mmio))
+ goto err_free;
+
+ ret = viot_get_mmio_iommu_fwnode(viommu,
+ node->mmio.base_address);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (ret)
+ goto err_free;
+
+ list_add(&viommu->list, &viot_iommus);
+ return viommu;
+
+err_free:
+ kfree(viommu);
+ return NULL;
+}
+
+static int __init viot_parse_node(const struct acpi_viot_header *hdr)
+{
+ int ret = -EINVAL;
+ struct list_head *list;
+ struct viot_endpoint *ep;
+ union {
+ struct acpi_viot_mmio mmio;
+ struct acpi_viot_pci_range pci;
+ } *node = (void *)hdr;
+
+ if (viot_check_bounds(hdr))
+ return -EINVAL;
+
+ if (hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI ||
+ hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO)
+ return 0;
+
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ switch (hdr->type) {
+ case ACPI_VIOT_NODE_PCI_RANGE:
+ if (hdr->length < sizeof(node->pci)) {
+ pr_err(FW_BUG "Invalid PCI node size\n");
+ goto err_free;
+ }
+
+ ep->segment_start = node->pci.segment_start;
+ ep->segment_end = node->pci.segment_end;
+ ep->bdf_start = node->pci.bdf_start;
+ ep->bdf_end = node->pci.bdf_end;
+ ep->endpoint_id = node->pci.endpoint_start;
+ ep->viommu = viot_get_iommu(node->pci.output_node);
+ list = &viot_pci_ranges;
+ break;
+ case ACPI_VIOT_NODE_MMIO:
+ if (hdr->length < sizeof(node->mmio)) {
+ pr_err(FW_BUG "Invalid MMIO node size\n");
+ goto err_free;
+ }
+
+ ep->address = node->mmio.base_address;
+ ep->endpoint_id = node->mmio.endpoint;
+ ep->viommu = viot_get_iommu(node->mmio.output_node);
+ list = &viot_mmio_endpoints;
+ break;
+ default:
+ pr_warn("Unsupported node %x\n", hdr->type);
+ ret = 0;
+ goto err_free;
+ }
+
+ if (!ep->viommu) {
+ pr_warn("No IOMMU node found\n");
+ /*
+ * A future version of the table may use the node for other
+ * purposes. Keep parsing.
+ */
+ ret = 0;
+ goto err_free;
+ }
+
+ list_add(&ep->list, list);
+ return 0;
+
+err_free:
+ kfree(ep);
+ return ret;
+}
+
+/**
+ * acpi_viot_init - Parse the VIOT table
+ *
+ * Parse the VIOT table, prepare the list of endpoints to be used during DMA
+ * setup of devices.
+ */
+void __init acpi_viot_init(void)
+{
+ int i;
+ acpi_status status;
+ struct acpi_table_header *hdr;
+ struct acpi_viot_header *node;
+
+ status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get table, %s\n", msg);
+ }
+ return;
+ }
+
+ viot = (void *)hdr;
+
+ node = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->node_offset);
+ for (i = 0; i < viot->node_count; i++) {
+ if (viot_parse_node(node))
+ return;
+
+ node = ACPI_ADD_PTR(struct acpi_viot_header, node,
+ node->length);
+ }
+
+ acpi_put_table(hdr);
+}
+
+static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu,
+ u32 epid)
+{
+ const struct iommu_ops *ops;
+
+ if (!viommu)
+ return -ENODEV;
+
+ /* We're not translating ourself */
+ if (viommu->fwnode == dev->fwnode)
+ return -EINVAL;
+
+ ops = iommu_ops_from_fwnode(viommu->fwnode);
+ if (!ops)
+ return IS_ENABLED(CONFIG_VIRTIO_IOMMU) ?
+ -EPROBE_DEFER : -ENODEV;
+
+ return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode, ops);
+}
+
+static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
+{
+ u32 epid;
+ struct viot_endpoint *ep;
+ u32 domain_nr = pci_domain_nr(pdev->bus);
+
+ list_for_each_entry(ep, &viot_pci_ranges, list) {
+ if (domain_nr >= ep->segment_start &&
+ domain_nr <= ep->segment_end &&
+ dev_id >= ep->bdf_start &&
+ dev_id <= ep->bdf_end) {
+ epid = ((domain_nr - ep->segment_start) << 16) +
+ dev_id - ep->bdf_start + ep->endpoint_id;
+
+ /*
+ * If we found a PCI range managed by the viommu, we're
+ * the one that has to request ACS.
+ */
+ pci_request_acs();
+
+ return viot_dev_iommu_init(&pdev->dev, ep->viommu,
+ epid);
+ }
+ }
+ return -ENODEV;
+}
+
+static int viot_mmio_dev_iommu_init(struct platform_device *pdev)
+{
+ struct resource *mem;
+ struct viot_endpoint *ep;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -ENODEV;
+
+ list_for_each_entry(ep, &viot_mmio_endpoints, list) {
+ if (ep->address == mem->start)
+ return viot_dev_iommu_init(&pdev->dev, ep->viommu,
+ ep->endpoint_id);
+ }
+ return -ENODEV;
+}
+
+/**
+ * viot_iommu_configure - Setup IOMMU ops for an endpoint described by VIOT
+ * @dev: the endpoint
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int viot_iommu_configure(struct device *dev)
+{
+ if (dev_is_pci(dev))
+ return pci_for_each_dma_alias(to_pci_dev(dev),
+ viot_pci_dev_iommu_init, NULL);
+ else if (dev_is_platform(dev))
+ return viot_mmio_dev_iommu_init(to_platform_device(dev));
+ return -ENODEV;
+}
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 816bf2c34b7a..3a308461246a 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -378,19 +378,25 @@ static int lps0_device_attach(struct acpi_device *adev,
* AMDI0006:
* - should use rev_id 0x0
* - function mask = 0x3: Should use Microsoft method
+ * AMDI0007:
+ * - Should use rev_id 0x2
+ * - Should only use AMD method
*/
const char *hid = acpi_device_hid(adev);
- rev_id = 0;
+ rev_id = strcmp(hid, "AMDI0007") ? 0 : 2;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
- ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
+ ACPI_LPS0_DSM_UUID_MICROSOFT, 0,
&lps0_dsm_guid_microsoft);
if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
!strcmp(hid, "AMDI0005"))) {
lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
+ } else if (lps0_dsm_func_mask_microsoft > 0 && !strcmp(hid, "AMDI0007")) {
+ lps0_dsm_func_mask_microsoft = -EINVAL;
+ acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
}
} else {
rev_id = 1;
@@ -417,11 +423,15 @@ static int lps0_device_attach(struct acpi_device *adev,
mem_sleep_current = PM_SUSPEND_TO_IDLE;
/*
- * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
- * EC GPE to be enabled while suspended for certain wakeup devices to
- * work, so mark it as wakeup-capable.
+ * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
+ * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
+ * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
+ *
+ * Only enable on !AMD as enabling this universally causes problems for a number
+ * of AMD based systems.
*/
- acpi_ec_mark_gpe_for_wake();
+ if (!acpi_s2idle_vendor_amd())
+ acpi_ec_mark_gpe_for_wake();
return 0;
}
@@ -442,7 +452,7 @@ int acpi_s2idle_prepare_late(void)
if (lps0_dsm_func_mask_microsoft > 0) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
@@ -469,7 +479,7 @@ void acpi_s2idle_restore_early(void)
if (lps0_dsm_func_mask_microsoft > 0) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index b7a5abee2147..a7da8ea7b3ed 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -1058,7 +1058,7 @@ config PATA_ISAPNP
config PATA_IXP4XX_CF
tristate "IXP4XX Compact Flash support"
- depends on ARCH_IXP4XX
+ depends on ARCH_IXP4XX || COMPILE_TEST
help
This option enables support for a Compact Flash connected on
the ixp4xx expansion bus. This driver had been written for
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index fd8b6febbf70..b9588c52815d 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -196,9 +196,7 @@ void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
if (!cmd)
return;
- cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
-
- scsi_build_sense_buffer(d_sense, cmd->sense_buffer, sk, asc, ascq);
+ scsi_build_sense(cmd, d_sense, sk, asc, ascq);
}
void ata_scsi_set_sense_information(struct ata_device *dev,
@@ -409,13 +407,16 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
- if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
+ if (cmd_result < 0) {
+ rc = cmd_result;
+ goto error;
+ }
+ if (scsi_sense_valid(&sshdr)) {/* sense data available */
u8 *desc = sensebuf + 8;
- cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
/* If we set cc then ATA pass-through will cause a
* check condition even if no error. Filter that. */
- if (cmd_result & SAM_STAT_CHECK_CONDITION) {
+ if (scsi_status_is_check_condition(cmd_result)) {
if (sshdr.sense_key == RECOVERED_ERROR &&
sshdr.asc == 0 && sshdr.ascq == 0x1d)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
@@ -490,9 +491,12 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
- if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
+ if (cmd_result < 0) {
+ rc = cmd_result;
+ goto error;
+ }
+ if (scsi_sense_valid(&sshdr)) {/* sense data available */
u8 *desc = sensebuf + 8;
- cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
/* If we set cc then ATA pass-through will cause a
* check condition even if no error. Filter that. */
@@ -638,7 +642,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
if (cmd->request->rq_flags & RQF_QUIET)
qc->flags |= ATA_QCFLAG_QUIET;
} else {
- cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
+ cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
cmd->scsi_done(cmd);
}
@@ -858,8 +862,6 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
- cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
-
/*
* Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq.
@@ -874,8 +876,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
* ATA PASS-THROUGH INFORMATION AVAILABLE
* Always in descriptor format sense.
*/
- scsi_build_sense_buffer(1, cmd->sense_buffer,
- RECOVERED_ERROR, 0, 0x1D);
+ scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
}
if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
@@ -957,8 +958,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
- cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
-
if (ata_dev_disabled(dev)) {
/* Device disabled after error recovery */
/* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
@@ -4196,7 +4195,6 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
case REQUEST_SENSE:
ata_scsi_set_sense(dev, cmd, 0, 0, 0);
- cmd->result = (DRIVER_SENSE << 24);
break;
/* if we reach this, then writeback caching is disabled,
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index ae7189d1a568..b71ea4a680b0 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -637,6 +637,20 @@ unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
}
EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
+static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page,
+ unsigned int offset, size_t xfer_size)
+{
+ bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+ unsigned char *buf;
+
+ buf = kmap_atomic(page);
+ qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write);
+ kunmap_atomic(buf);
+
+ if (!do_write && !PageSlab(page))
+ flush_dcache_page(page);
+}
+
/**
* ata_pio_sector - Transfer a sector of data.
* @qc: Command on going
@@ -648,11 +662,9 @@ EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
*/
static void ata_pio_sector(struct ata_queued_cmd *qc)
{
- int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
struct ata_port *ap = qc->ap;
struct page *page;
unsigned int offset;
- unsigned char *buf;
if (!qc->cursg) {
qc->curbytes = qc->nbytes;
@@ -670,13 +682,20 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
- /* do the actual data transfer */
- buf = kmap_atomic(page);
- ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size, do_write);
- kunmap_atomic(buf);
+ /*
+ * Split the transfer when it splits a page boundary. Note that the
+ * split still has to be dword aligned like all ATA data transfers.
+ */
+ WARN_ON_ONCE(offset % 4);
+ if (offset + qc->sect_size > PAGE_SIZE) {
+ unsigned int split_len = PAGE_SIZE - offset;
- if (!do_write && !PageSlab(page))
- flush_dcache_page(page);
+ ata_pio_xfer(qc, page, offset, split_len);
+ ata_pio_xfer(qc, nth_page(page, 1), 0,
+ qc->sect_size - split_len);
+ } else {
+ ata_pio_xfer(qc, page, offset, qc->sect_size);
+ }
qc->curbytes += qc->sect_size;
qc->cursg_ofs += qc->sect_size;
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 43215a4c1e54..5881d64af943 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -17,6 +17,7 @@
#include <linux/libata.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/pata_ixp4xx_cf.h>
#include <scsi/scsi_host.h>
#define DRV_NAME "pata_ixp4xx_cf"
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 8b93a7f291ec..ef8e44a7d288 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -30,3 +30,6 @@ obj-y += test/
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
+# define_trace.h needs to know how to find our header
+CFLAGS_trace.o := -I$(src)
+obj-$(CONFIG_TRACING) += trace.o
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index c1179edc0f3b..921312a8d957 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -18,10 +18,11 @@
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/percpu.h>
+#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/smp.h>
-static DEFINE_PER_CPU(struct scale_freq_data *, sft_data);
+static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
@@ -66,16 +67,20 @@ void topology_set_scale_freq_source(struct scale_freq_data *data,
if (cpumask_empty(&scale_freq_counters_mask))
scale_freq_invariant = topology_scale_freq_invariant();
+ rcu_read_lock();
+
for_each_cpu(cpu, cpus) {
- sfd = per_cpu(sft_data, cpu);
+ sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
/* Use ARCH provided counters whenever possible */
if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
- per_cpu(sft_data, cpu) = data;
+ rcu_assign_pointer(per_cpu(sft_data, cpu), data);
cpumask_set_cpu(cpu, &scale_freq_counters_mask);
}
}
+ rcu_read_unlock();
+
update_scale_freq_invariant(true);
}
EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
@@ -86,22 +91,32 @@ void topology_clear_scale_freq_source(enum scale_freq_source source,
struct scale_freq_data *sfd;
int cpu;
+ rcu_read_lock();
+
for_each_cpu(cpu, cpus) {
- sfd = per_cpu(sft_data, cpu);
+ sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
if (sfd && sfd->source == source) {
- per_cpu(sft_data, cpu) = NULL;
+ rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
}
}
+ rcu_read_unlock();
+
+ /*
+ * Make sure all references to previous sft_data are dropped to avoid
+ * use-after-free races.
+ */
+ synchronize_rcu();
+
update_scale_freq_invariant(false);
}
EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
void topology_scale_freq_tick(void)
{
- struct scale_freq_data *sfd = *this_cpu_ptr(&sft_data);
+ struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
if (sfd)
sfd->set_freq_scale();
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 9c00d203d61e..01ef796c2055 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -284,8 +284,8 @@ fail:
* matching classdev or fail all of them.
*
* @dev: The generic device to run the trigger for
- * @fn the function to execute for each classdev.
- * @undo A function to undo the work previously done in case of error
+ * @fn: the function to execute for each classdev.
+ * @undo: A function to undo the work previously done in case of error
*
* This function is a safe version of
* attribute_container_device_trigger. It stops on the first error and
@@ -343,7 +343,7 @@ attribute_container_device_trigger_safe(struct device *dev,
* attribute_container_device_trigger - execute a trigger for each matching classdev
*
* @dev: The generic device to run the trigger for
- * @fn the function to execute for each classdev.
+ * @fn: the function to execute for each classdev.
*
* This function is for executing a trigger when you need to know both
* the container and the classdev. If you only care about the
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index adc199dfba3c..6a30264ab2ba 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -231,6 +231,8 @@ EXPORT_SYMBOL_GPL(auxiliary_find_device);
int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
struct module *owner, const char *modname)
{
+ int ret;
+
if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table))
return -EINVAL;
@@ -246,7 +248,11 @@ int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
auxdrv->driver.bus = &auxiliary_bus_type;
auxdrv->driver.mod_name = modname;
- return driver_register(&auxdrv->driver);
+ ret = driver_register(&auxdrv->driver);
+ if (ret)
+ kfree(auxdrv->driver.name);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(__auxiliary_driver_register);
diff --git a/drivers/base/base.h b/drivers/base/base.h
index e5f9b7e656c3..404db83ee5ec 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -152,7 +152,6 @@ extern int driver_add_groups(struct device_driver *drv,
const struct attribute_group **groups);
extern void driver_remove_groups(struct device_driver *drv,
const struct attribute_group **groups);
-int device_driver_attach(struct device_driver *drv, struct device *dev);
void device_driver_detach(struct device *dev);
extern char *make_class_name(const char *name, struct kobject *kobj);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 36d0c654ea61..1f6b4bd61056 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -210,15 +210,11 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
int err = -ENODEV;
dev = bus_find_device_by_name(bus, NULL, buf);
- if (dev && dev->driver == NULL && driver_match_device(drv, dev)) {
+ if (dev && driver_match_device(drv, dev)) {
err = device_driver_attach(drv, dev);
-
- if (err > 0) {
+ if (!err) {
/* success */
err = count;
- } else if (err == 0) {
- /* driver didn't accept device */
- err = -ENODEV;
}
}
put_device(dev);
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 272ba42392f0..5e79299f6c3f 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -63,7 +63,7 @@ struct master {
bool bound;
const struct component_master_ops *ops;
- struct device *dev;
+ struct device *parent;
struct component_match *match;
};
@@ -95,7 +95,7 @@ static int component_devices_show(struct seq_file *s, void *data)
seq_printf(s, "%-40s %20s\n", "master name", "status");
seq_puts(s, "-------------------------------------------------------------\n");
seq_printf(s, "%-40s %20s\n\n",
- dev_name(m->dev), m->bound ? "bound" : "not bound");
+ dev_name(m->parent), m->bound ? "bound" : "not bound");
seq_printf(s, "%-40s %20s\n", "device name", "status");
seq_puts(s, "-------------------------------------------------------------\n");
@@ -124,13 +124,13 @@ core_initcall(component_debug_init);
static void component_master_debugfs_add(struct master *m)
{
- debugfs_create_file(dev_name(m->dev), 0444, component_debugfs_dir, m,
+ debugfs_create_file(dev_name(m->parent), 0444, component_debugfs_dir, m,
&component_devices_fops);
}
static void component_master_debugfs_del(struct master *m)
{
- debugfs_remove(debugfs_lookup(dev_name(m->dev), component_debugfs_dir));
+ debugfs_remove(debugfs_lookup(dev_name(m->parent), component_debugfs_dir));
}
#else
@@ -143,13 +143,13 @@ static void component_master_debugfs_del(struct master *m)
#endif
-static struct master *__master_find(struct device *dev,
+static struct master *__master_find(struct device *parent,
const struct component_master_ops *ops)
{
struct master *m;
list_for_each_entry(m, &masters, node)
- if (m->dev == dev && (!ops || m->ops == ops))
+ if (m->parent == parent && (!ops || m->ops == ops))
return m;
return NULL;
@@ -189,7 +189,7 @@ static int find_components(struct master *master)
struct component_match_array *mc = &match->compare[i];
struct component *c;
- dev_dbg(master->dev, "Looking for component %zu\n", i);
+ dev_dbg(master->parent, "Looking for component %zu\n", i);
if (match->compare[i].component)
continue;
@@ -200,7 +200,7 @@ static int find_components(struct master *master)
break;
}
- dev_dbg(master->dev, "found component %s, duplicate %u\n", dev_name(c->dev), !!c->master);
+ dev_dbg(master->parent, "found component %s, duplicate %u\n", dev_name(c->dev), !!c->master);
/* Attach this component to the master */
match->compare[i].duplicate = !!c->master;
@@ -233,28 +233,28 @@ static int try_to_bring_up_master(struct master *master,
{
int ret;
- dev_dbg(master->dev, "trying to bring up master\n");
+ dev_dbg(master->parent, "trying to bring up master\n");
if (find_components(master)) {
- dev_dbg(master->dev, "master has incomplete components\n");
+ dev_dbg(master->parent, "master has incomplete components\n");
return 0;
}
if (component && component->master != master) {
- dev_dbg(master->dev, "master is not for this component (%s)\n",
+ dev_dbg(master->parent, "master is not for this component (%s)\n",
dev_name(component->dev));
return 0;
}
- if (!devres_open_group(master->dev, NULL, GFP_KERNEL))
+ if (!devres_open_group(master->parent, NULL, GFP_KERNEL))
return -ENOMEM;
/* Found all components */
- ret = master->ops->bind(master->dev);
+ ret = master->ops->bind(master->parent);
if (ret < 0) {
- devres_release_group(master->dev, NULL);
+ devres_release_group(master->parent, NULL);
if (ret != -EPROBE_DEFER)
- dev_info(master->dev, "master bind failed: %d\n", ret);
+ dev_info(master->parent, "master bind failed: %d\n", ret);
return ret;
}
@@ -281,34 +281,28 @@ static int try_to_bring_up_masters(struct component *component)
static void take_down_master(struct master *master)
{
if (master->bound) {
- master->ops->unbind(master->dev);
- devres_release_group(master->dev, NULL);
+ master->ops->unbind(master->parent);
+ devres_release_group(master->parent, NULL);
master->bound = false;
}
}
-static void component_match_release(struct device *master,
- struct component_match *match)
+static void devm_component_match_release(struct device *parent, void *res)
{
+ struct component_match *match = res;
unsigned int i;
for (i = 0; i < match->num; i++) {
struct component_match_array *mc = &match->compare[i];
if (mc->release)
- mc->release(master, mc->data);
+ mc->release(parent, mc->data);
}
kfree(match->compare);
}
-static void devm_component_match_release(struct device *dev, void *res)
-{
- component_match_release(dev, res);
-}
-
-static int component_match_realloc(struct device *dev,
- struct component_match *match, size_t num)
+static int component_match_realloc(struct component_match *match, size_t num)
{
struct component_match_array *new;
@@ -359,7 +353,7 @@ static void __component_match_add(struct device *master,
size_t new_size = match->alloc + 16;
int ret;
- ret = component_match_realloc(master, match, new_size);
+ ret = component_match_realloc(match, new_size);
if (ret) {
*matchptr = ERR_PTR(ret);
return;
@@ -451,7 +445,7 @@ static void free_master(struct master *master)
/**
* component_master_add_with_match - register an aggregate driver
- * @dev: device with the aggregate driver
+ * @parent: parent device of the aggregate driver
* @ops: callbacks for the aggregate driver
* @match: component match list for the aggregate driver
*
@@ -461,7 +455,7 @@ static void free_master(struct master *master)
* &component_master_ops.bind from @ops. Must be unregistered by calling
* component_master_del().
*/
-int component_master_add_with_match(struct device *dev,
+int component_master_add_with_match(struct device *parent,
const struct component_master_ops *ops,
struct component_match *match)
{
@@ -469,7 +463,7 @@ int component_master_add_with_match(struct device *dev,
int ret;
/* Reallocate the match array for its true size */
- ret = component_match_realloc(dev, match, match->num);
+ ret = component_match_realloc(match, match->num);
if (ret)
return ret;
@@ -477,7 +471,7 @@ int component_master_add_with_match(struct device *dev,
if (!master)
return -ENOMEM;
- master->dev = dev;
+ master->parent = parent;
master->ops = ops;
master->match = match;
@@ -499,20 +493,20 @@ EXPORT_SYMBOL_GPL(component_master_add_with_match);
/**
* component_master_del - unregister an aggregate driver
- * @dev: device with the aggregate driver
+ * @parent: parent device of the aggregate driver
* @ops: callbacks for the aggregate driver
*
* Unregisters an aggregate driver registered with
* component_master_add_with_match(). If necessary the aggregate driver is first
* disassembled by calling &component_master_ops.unbind from @ops.
*/
-void component_master_del(struct device *dev,
+void component_master_del(struct device *parent,
const struct component_master_ops *ops)
{
struct master *master;
mutex_lock(&component_mutex);
- master = __master_find(dev, ops);
+ master = __master_find(parent, ops);
if (master) {
take_down_master(master);
free_master(master);
@@ -527,7 +521,7 @@ static void component_unbind(struct component *component,
WARN_ON(!component->bound);
if (component->ops && component->ops->unbind)
- component->ops->unbind(component->dev, master->dev, data);
+ component->ops->unbind(component->dev, master->parent, data);
component->bound = false;
/* Release all resources claimed in the binding of this component */
@@ -536,14 +530,14 @@ static void component_unbind(struct component *component,
/**
* component_unbind_all - unbind all components of an aggregate driver
- * @master_dev: device with the aggregate driver
+ * @parent: parent device of the aggregate driver
* @data: opaque pointer, passed to all components
*
- * Unbinds all components of the aggregate @dev by passing @data to their
+ * Unbinds all components of the aggregate device by passing @data to their
* &component_ops.unbind functions. Should be called from
* &component_master_ops.unbind.
*/
-void component_unbind_all(struct device *master_dev, void *data)
+void component_unbind_all(struct device *parent, void *data)
{
struct master *master;
struct component *c;
@@ -551,7 +545,7 @@ void component_unbind_all(struct device *master_dev, void *data)
WARN_ON(!mutex_is_locked(&component_mutex));
- master = __master_find(master_dev, NULL);
+ master = __master_find(parent, NULL);
if (!master)
return;
@@ -574,7 +568,7 @@ static int component_bind(struct component *component, struct master *master,
* This allows us to roll-back a failed component without
* affecting anything else.
*/
- if (!devres_open_group(master->dev, NULL, GFP_KERNEL))
+ if (!devres_open_group(master->parent, NULL, GFP_KERNEL))
return -ENOMEM;
/*
@@ -583,14 +577,14 @@ static int component_bind(struct component *component, struct master *master,
* at the appropriate moment.
*/
if (!devres_open_group(component->dev, component, GFP_KERNEL)) {
- devres_release_group(master->dev, NULL);
+ devres_release_group(master->parent, NULL);
return -ENOMEM;
}
- dev_dbg(master->dev, "binding %s (ops %ps)\n",
+ dev_dbg(master->parent, "binding %s (ops %ps)\n",
dev_name(component->dev), component->ops);
- ret = component->ops->bind(component->dev, master->dev, data);
+ ret = component->ops->bind(component->dev, master->parent, data);
if (!ret) {
component->bound = true;
@@ -601,16 +595,16 @@ static int component_bind(struct component *component, struct master *master,
* can clean those resources up independently.
*/
devres_close_group(component->dev, NULL);
- devres_remove_group(master->dev, NULL);
+ devres_remove_group(master->parent, NULL);
- dev_info(master->dev, "bound %s (ops %ps)\n",
+ dev_info(master->parent, "bound %s (ops %ps)\n",
dev_name(component->dev), component->ops);
} else {
devres_release_group(component->dev, NULL);
- devres_release_group(master->dev, NULL);
+ devres_release_group(master->parent, NULL);
if (ret != -EPROBE_DEFER)
- dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
+ dev_err(master->parent, "failed to bind %s (ops %ps): %d\n",
dev_name(component->dev), component->ops, ret);
}
@@ -619,14 +613,14 @@ static int component_bind(struct component *component, struct master *master,
/**
* component_bind_all - bind all components of an aggregate driver
- * @master_dev: device with the aggregate driver
+ * @parent: parent device of the aggregate driver
* @data: opaque pointer, passed to all components
*
* Binds all components of the aggregate @dev by passing @data to their
* &component_ops.bind functions. Should be called from
* &component_master_ops.bind.
*/
-int component_bind_all(struct device *master_dev, void *data)
+int component_bind_all(struct device *parent, void *data)
{
struct master *master;
struct component *c;
@@ -635,7 +629,7 @@ int component_bind_all(struct device *master_dev, void *data)
WARN_ON(!mutex_is_locked(&component_mutex));
- master = __master_find(master_dev, NULL);
+ master = __master_find(parent, NULL);
if (!master)
return -EINVAL;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2a61003ea2c1..6c0ef9d55a34 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -574,8 +574,10 @@ static void devlink_remove_symlinks(struct device *dev,
return;
}
- snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
- sysfs_remove_link(&con->kobj, buf);
+ if (device_is_registered(con)) {
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ sysfs_remove_link(&con->kobj, buf);
+ }
snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf);
kfree(buf);
@@ -2409,6 +2411,25 @@ static ssize_t online_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(online);
+static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const char *loc;
+
+ switch (dev->removable) {
+ case DEVICE_REMOVABLE:
+ loc = "removable";
+ break;
+ case DEVICE_FIXED:
+ loc = "fixed";
+ break;
+ default:
+ loc = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", loc);
+}
+static DEVICE_ATTR_RO(removable);
+
int device_add_groups(struct device *dev, const struct attribute_group **groups)
{
return sysfs_create_groups(&dev->kobj, groups);
@@ -2586,8 +2607,16 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_online;
}
+ if (dev_removable_is_valid(dev)) {
+ error = device_create_file(dev, &dev_attr_removable);
+ if (error)
+ goto err_remove_dev_waiting_for_supplier;
+ }
+
return 0;
+ err_remove_dev_waiting_for_supplier:
+ device_remove_file(dev, &dev_attr_waiting_for_supplier);
err_remove_dev_online:
device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
@@ -2607,6 +2636,7 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
+ device_remove_file(dev, &dev_attr_removable);
device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
@@ -2807,6 +2837,7 @@ void device_initialize(struct device *dev)
device_pm_init(dev);
set_dev_node(dev, -1);
#ifdef CONFIG_GENERIC_MSI_IRQ
+ raw_spin_lock_init(&dev->msi_lock);
INIT_LIST_HEAD(&dev->msi_list);
#endif
INIT_LIST_HEAD(&dev->links.consumers);
@@ -3442,7 +3473,7 @@ bool kill_device(struct device *dev)
* to run while we are tearing out the bus/class/sysfs from
* underneath the device.
*/
- lockdep_assert_held(&dev->mutex);
+ device_lock_assert(dev);
if (dev->p->dead)
return false;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 2b9e41377a07..5ef14db97904 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -175,7 +175,7 @@ static struct attribute *crash_note_cpu_attrs[] = {
NULL
};
-static struct attribute_group crash_note_cpu_attr_group = {
+static const struct attribute_group crash_note_cpu_attr_group = {
.attrs = crash_note_cpu_attrs,
};
#endif
@@ -475,7 +475,7 @@ static struct attribute *cpu_root_attrs[] = {
NULL
};
-static struct attribute_group cpu_root_attr_group = {
+static const struct attribute_group cpu_root_attr_group = {
.attrs = cpu_root_attrs,
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index ecd7cf848daf..437cd61343b2 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -471,6 +471,8 @@ static void driver_sysfs_remove(struct device *dev)
* (It is ok to call with no other effort from a driver's probe() method.)
*
* This function must be called with the device lock held.
+ *
+ * Callers should prefer to use device_driver_attach() instead.
*/
int device_bind_driver(struct device *dev)
{
@@ -491,15 +493,6 @@ EXPORT_SYMBOL_GPL(device_bind_driver);
static atomic_t probe_count = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
-static void driver_deferred_probe_add_trigger(struct device *dev,
- int local_trigger_count)
-{
- driver_deferred_probe_add(dev);
- /* Did a trigger occur while probing? Need to re-trigger if yes */
- if (local_trigger_count != atomic_read(&deferred_trigger_count))
- driver_deferred_probe_trigger();
-}
-
static ssize_t state_synced_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -513,12 +506,43 @@ static ssize_t state_synced_show(struct device *dev,
}
static DEVICE_ATTR_RO(state_synced);
+
+static int call_driver_probe(struct device *dev, struct device_driver *drv)
+{
+ int ret = 0;
+
+ if (dev->bus->probe)
+ ret = dev->bus->probe(dev);
+ else if (drv->probe)
+ ret = drv->probe(dev);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EPROBE_DEFER:
+ /* Driver requested deferred probing */
+ dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
+ break;
+ case -ENODEV:
+ case -ENXIO:
+ pr_debug("%s: probe of %s rejects match %d\n",
+ drv->name, dev_name(dev), ret);
+ break;
+ default:
+ /* driver matched but the probe failed */
+ pr_warn("%s: probe of %s failed with error %d\n",
+ drv->name, dev_name(dev), ret);
+ break;
+ }
+
+ return ret;
+}
+
static int really_probe(struct device *dev, struct device_driver *drv)
{
- int ret = -EPROBE_DEFER;
- int local_trigger_count = atomic_read(&deferred_trigger_count);
bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
!drv->suppress_bind_attrs;
+ int ret;
if (defer_all_probes) {
/*
@@ -527,17 +551,13 @@ static int really_probe(struct device *dev, struct device_driver *drv)
* wait_for_device_probe() right after that to avoid any races.
*/
dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
- driver_deferred_probe_add(dev);
- return ret;
+ return -EPROBE_DEFER;
}
ret = device_links_check_suppliers(dev);
- if (ret == -EPROBE_DEFER)
- driver_deferred_probe_add_trigger(dev, local_trigger_count);
if (ret)
return ret;
- atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
if (!list_empty(&dev->devres_head)) {
@@ -572,14 +592,14 @@ re_probe:
goto probe_failed;
}
- if (dev->bus->probe) {
- ret = dev->bus->probe(dev);
- if (ret)
- goto probe_failed;
- } else if (drv->probe) {
- ret = drv->probe(dev);
- if (ret)
- goto probe_failed;
+ ret = call_driver_probe(dev, drv);
+ if (ret) {
+ /*
+ * Return probe errors as positive values so that the callers
+ * can distinguish them from other errors.
+ */
+ ret = -ret;
+ goto probe_failed;
}
if (device_add_groups(dev, drv->dev_groups)) {
@@ -621,7 +641,6 @@ re_probe:
dev->pm_domain->sync(dev);
driver_bound(dev);
- ret = 1;
pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
goto done;
@@ -634,8 +653,6 @@ dev_groups_failed:
else if (drv->remove)
drv->remove(dev);
probe_failed:
- kfree(dev->dma_range_map);
- dev->dma_range_map = NULL;
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
@@ -643,6 +660,8 @@ pinctrl_bind_failed:
device_links_no_driver(dev);
devres_release_all(dev);
arch_teardown_dma_ops(dev);
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
driver_sysfs_remove(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
@@ -650,31 +669,7 @@ pinctrl_bind_failed:
dev->pm_domain->dismiss(dev);
pm_runtime_reinit(dev);
dev_pm_set_driver_flags(dev, 0);
-
- switch (ret) {
- case -EPROBE_DEFER:
- /* Driver requested deferred probing */
- dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
- driver_deferred_probe_add_trigger(dev, local_trigger_count);
- break;
- case -ENODEV:
- case -ENXIO:
- pr_debug("%s: probe of %s rejects match %d\n",
- drv->name, dev_name(dev), ret);
- break;
- default:
- /* driver matched but the probe failed */
- pr_warn("%s: probe of %s failed with error %d\n",
- drv->name, dev_name(dev), ret);
- }
- /*
- * Ignore errors returned by ->probe so that the next driver can try
- * its luck.
- */
- ret = 0;
done:
- atomic_dec(&probe_count);
- wake_up_all(&probe_waitqueue);
return ret;
}
@@ -728,25 +723,14 @@ void wait_for_device_probe(void)
}
EXPORT_SYMBOL_GPL(wait_for_device_probe);
-/**
- * driver_probe_device - attempt to bind device & driver together
- * @drv: driver to bind a device to
- * @dev: device to try to bind to the driver
- *
- * This function returns -ENODEV if the device is not registered,
- * 1 if the device is bound successfully and 0 otherwise.
- *
- * This function must be called with @dev lock held. When called for a
- * USB interface, @dev->parent lock must be held as well.
- *
- * If the device has a parent, runtime-resume the parent before driver probing.
- */
-static int driver_probe_device(struct device_driver *drv, struct device *dev)
+static int __driver_probe_device(struct device_driver *drv, struct device *dev)
{
int ret = 0;
- if (!device_is_registered(dev))
+ if (dev->p->dead || !device_is_registered(dev))
return -ENODEV;
+ if (dev->driver)
+ return -EBUSY;
dev->can_match = true;
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
@@ -770,6 +754,42 @@ static int driver_probe_device(struct device_driver *drv, struct device *dev)
return ret;
}
+/**
+ * driver_probe_device - attempt to bind device & driver together
+ * @drv: driver to bind a device to
+ * @dev: device to try to bind to the driver
+ *
+ * This function returns -ENODEV if the device is not registered, -EBUSY if it
+ * already has a driver, 0 if the device is bound successfully and a positive
+ * (inverted) error code for failures from the ->probe method.
+ *
+ * This function must be called with @dev lock held. When called for a
+ * USB interface, @dev->parent lock must be held as well.
+ *
+ * If the device has a parent, runtime-resume the parent before driver probing.
+ */
+static int driver_probe_device(struct device_driver *drv, struct device *dev)
+{
+ int trigger_count = atomic_read(&deferred_trigger_count);
+ int ret;
+
+ atomic_inc(&probe_count);
+ ret = __driver_probe_device(drv, dev);
+ if (ret == -EPROBE_DEFER || ret == EPROBE_DEFER) {
+ driver_deferred_probe_add(dev);
+
+ /*
+ * Did a trigger occur while probing? Need to re-trigger if yes
+ */
+ if (trigger_count != atomic_read(&deferred_trigger_count) &&
+ !defer_all_probes)
+ driver_deferred_probe_trigger();
+ }
+ atomic_dec(&probe_count);
+ wake_up_all(&probe_waitqueue);
+ return ret;
+}
+
static inline bool cmdline_requested_async_probing(const char *drv_name)
{
return parse_option_str(async_probe_drv_names, drv_name);
@@ -867,7 +887,14 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
if (data->check_async && async_allowed != data->want_async)
return 0;
- return driver_probe_device(drv, dev);
+ /*
+ * Ignore errors returned by ->probe so that the next driver can try
+ * its luck.
+ */
+ ret = driver_probe_device(drv, dev);
+ if (ret < 0)
+ return ret;
+ return ret == 0;
}
static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
@@ -1023,43 +1050,34 @@ static void __device_driver_unlock(struct device *dev, struct device *parent)
* @dev: Device to attach it to
*
* Manually attach driver to a device. Will acquire both @dev lock and
- * @dev->parent lock if needed.
+ * @dev->parent lock if needed. Returns 0 on success, -ERR on failure.
*/
int device_driver_attach(struct device_driver *drv, struct device *dev)
{
- int ret = 0;
+ int ret;
__device_driver_lock(dev, dev->parent);
-
- /*
- * If device has been removed or someone has already successfully
- * bound a driver before us just skip the driver probe call.
- */
- if (!dev->p->dead && !dev->driver)
- ret = driver_probe_device(drv, dev);
-
+ ret = __driver_probe_device(drv, dev);
__device_driver_unlock(dev, dev->parent);
+ /* also return probe errors as normal negative errnos */
+ if (ret > 0)
+ ret = -ret;
+ if (ret == -EPROBE_DEFER)
+ return -EAGAIN;
return ret;
}
+EXPORT_SYMBOL_GPL(device_driver_attach);
static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
{
struct device *dev = _dev;
struct device_driver *drv;
- int ret = 0;
+ int ret;
__device_driver_lock(dev, dev->parent);
-
drv = dev->p->async_driver;
-
- /*
- * If device has been removed or someone has already successfully
- * bound a driver before us just skip the driver probe call.
- */
- if (!dev->p->dead && !dev->driver)
- ret = driver_probe_device(drv, dev);
-
+ ret = driver_probe_device(drv, dev);
__device_driver_unlock(dev, dev->parent);
dev_dbg(dev, "driver %s async attach completed: %d\n", drv->name, ret);
@@ -1114,7 +1132,9 @@ static int __driver_attach(struct device *dev, void *data)
return 0;
}
- device_driver_attach(drv, dev);
+ __device_driver_lock(dev, dev->parent);
+ driver_probe_device(drv, dev);
+ __device_driver_unlock(dev, dev->parent);
return 0;
}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 8eec0e0ddff7..f4d794d6bb85 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -3,10 +3,6 @@
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
*
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
* Author: Johannes Berg <johannes@sipsolutions.net>
*/
#include <linux/module.h>
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 8746f2212781..eaa9a5cd1db9 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -14,14 +14,13 @@
#include <asm/sections.h>
#include "base.h"
+#include "trace.h"
struct devres_node {
struct list_head entry;
dr_release_t release;
-#ifdef CONFIG_DEBUG_DEVRES
const char *name;
size_t size;
-#endif
};
struct devres {
@@ -43,10 +42,6 @@ struct devres_group {
/* -- 8 pointers */
};
-#ifdef CONFIG_DEBUG_DEVRES
-static int log_devres = 0;
-module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
-
static void set_node_dbginfo(struct devres_node *node, const char *name,
size_t size)
{
@@ -54,7 +49,11 @@ static void set_node_dbginfo(struct devres_node *node, const char *name,
node->size = size;
}
-static void devres_log(struct device *dev, struct devres_node *node,
+#ifdef CONFIG_DEBUG_DEVRES
+static int log_devres = 0;
+module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
+
+static void devres_dbg(struct device *dev, struct devres_node *node,
const char *op)
{
if (unlikely(log_devres))
@@ -62,10 +61,16 @@ static void devres_log(struct device *dev, struct devres_node *node,
op, node, node->name, node->size);
}
#else /* CONFIG_DEBUG_DEVRES */
-#define set_node_dbginfo(node, n, s) do {} while (0)
-#define devres_log(dev, node, op) do {} while (0)
+#define devres_dbg(dev, node, op) do {} while (0)
#endif /* CONFIG_DEBUG_DEVRES */
+static void devres_log(struct device *dev, struct devres_node *node,
+ const char *op)
+{
+ trace_devres_log(dev, op, node, node->name, node->size);
+ devres_dbg(dev, node, op);
+}
+
/*
* Release functions for devres group. These callbacks are used only
* for identification.
@@ -134,26 +139,13 @@ static void replace_dr(struct device *dev,
list_replace(&old->entry, &new->entry);
}
-#ifdef CONFIG_DEBUG_DEVRES
-void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
- const char *name)
-{
- struct devres *dr;
-
- dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
- if (unlikely(!dr))
- return NULL;
- set_node_dbginfo(&dr->node, name, size);
- return dr->data;
-}
-EXPORT_SYMBOL_GPL(__devres_alloc_node);
-#else
/**
- * devres_alloc_node - Allocate device resource data
+ * __devres_alloc_node - Allocate device resource data
* @release: Release function devres will be associated with
* @size: Allocation size
* @gfp: Allocation flags
* @nid: NUMA node
+ * @name: Name of the resource
*
* Allocate devres of @size bytes. The allocated area is zeroed, then
* associated with @release. The returned pointer can be passed to
@@ -162,17 +154,18 @@ EXPORT_SYMBOL_GPL(__devres_alloc_node);
* RETURNS:
* Pointer to allocated devres on success, NULL on failure.
*/
-void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid)
+void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
+ const char *name)
{
struct devres *dr;
dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
if (unlikely(!dr))
return NULL;
+ set_node_dbginfo(&dr->node, name, size);
return dr->data;
}
-EXPORT_SYMBOL_GPL(devres_alloc_node);
-#endif
+EXPORT_SYMBOL_GPL(__devres_alloc_node);
/**
* devres_for_each_res - Resource iterator
@@ -438,20 +431,16 @@ static int remove_nodes(struct device *dev,
struct list_head *first, struct list_head *end,
struct list_head *todo)
{
+ struct devres_node *node, *n;
int cnt = 0, nr_groups = 0;
- struct list_head *cur;
/* First pass - move normal devres entries to @todo and clear
* devres_group colors.
*/
- cur = first;
- while (cur != end) {
- struct devres_node *node;
+ node = list_entry(first, struct devres_node, entry);
+ list_for_each_entry_safe_from(node, n, end, entry) {
struct devres_group *grp;
- node = list_entry(cur, struct devres_node, entry);
- cur = cur->next;
-
grp = node_to_group(node);
if (grp) {
/* clear color of group markers in the first pass */
@@ -471,18 +460,14 @@ static int remove_nodes(struct device *dev,
/* Second pass - Scan groups and color them. A group gets
* color value of two iff the group is wholly contained in
- * [cur, end). That is, for a closed group, both opening and
- * closing markers should be in the range, while just the
+ * [current node, end). That is, for a closed group, both opening
+ * and closing markers should be in the range, while just the
* opening marker is enough for an open group.
*/
- cur = first;
- while (cur != end) {
- struct devres_node *node;
+ node = list_entry(first, struct devres_node, entry);
+ list_for_each_entry_safe_from(node, n, end, entry) {
struct devres_group *grp;
- node = list_entry(cur, struct devres_node, entry);
- cur = cur->next;
-
grp = node_to_group(node);
BUG_ON(!grp || list_empty(&grp->node[0].entry));
@@ -492,7 +477,7 @@ static int remove_nodes(struct device *dev,
BUG_ON(grp->color <= 0 || grp->color > 2);
if (grp->color == 2) {
- /* No need to update cur or end. The removed
+ /* No need to update current node or end. The removed
* nodes are always before both.
*/
list_move_tail(&grp->node[0].entry, todo);
@@ -503,28 +488,18 @@ static int remove_nodes(struct device *dev,
return cnt;
}
-static int release_nodes(struct device *dev, struct list_head *first,
- struct list_head *end, unsigned long flags)
- __releases(&dev->devres_lock)
+static void release_nodes(struct device *dev, struct list_head *todo)
{
- LIST_HEAD(todo);
- int cnt;
struct devres *dr, *tmp;
- cnt = remove_nodes(dev, first, end, &todo);
-
- spin_unlock_irqrestore(&dev->devres_lock, flags);
-
/* Release. Note that both devres and devres_group are
* handled as devres in the following loop. This is safe.
*/
- list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
+ list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
devres_log(dev, &dr->node, "REL");
dr->node.release(dev, dr->data);
kfree(dr);
}
-
- return cnt;
}
/**
@@ -537,13 +512,23 @@ static int release_nodes(struct device *dev, struct list_head *first,
int devres_release_all(struct device *dev)
{
unsigned long flags;
+ LIST_HEAD(todo);
+ int cnt;
/* Looks like an uninitialized device structure */
if (WARN_ON(dev->devres_head.next == NULL))
return -ENODEV;
+
+ /* Nothing to release if list is empty */
+ if (list_empty(&dev->devres_head))
+ return 0;
+
spin_lock_irqsave(&dev->devres_lock, flags);
- return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
- flags);
+ cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo);
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+ release_nodes(dev, &todo);
+ return cnt;
}
/**
@@ -679,6 +664,7 @@ int devres_release_group(struct device *dev, void *id)
{
struct devres_group *grp;
unsigned long flags;
+ LIST_HEAD(todo);
int cnt = 0;
spin_lock_irqsave(&dev->devres_lock, flags);
@@ -691,7 +677,10 @@ int devres_release_group(struct device *dev, void *id)
if (!list_empty(&grp->node[1].entry))
end = grp->node[1].entry.next;
- cnt = release_nodes(dev, first, end, flags);
+ cnt = remove_nodes(dev, first, end, &todo);
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+ release_nodes(dev, &todo);
} else {
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index 5fa7ce3745a0..101754ad48d9 100644
--- a/drivers/base/firmware_loader/builtin/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -8,7 +8,6 @@ fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir))
obj-y := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)))
FWNAME = $(patsubst $(obj)/%.gen.S,%,$@)
-comma := ,
FWSTR = $(subst $(comma),_,$(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME)))))
ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long)
ASM_ALIGN = $(if $(CONFIG_64BIT),3,2)
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 91899d185e31..d7d63c1aa993 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -89,12 +89,11 @@ static void __fw_load_abort(struct fw_priv *fw_priv)
{
/*
* There is a small window in which user can write to 'loading'
- * between loading done and disappearance of 'loading'
+ * between loading done/aborted and disappearance of 'loading'
*/
- if (fw_sysfs_done(fw_priv))
+ if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv))
return;
- list_del_init(&fw_priv->pending_list);
fw_state_aborted(fw_priv);
}
@@ -280,7 +279,6 @@ static ssize_t firmware_loading_store(struct device *dev,
* Same logic as fw_load_abort, only the DONE bit
* is ignored and we set ABORT only on failure.
*/
- list_del_init(&fw_priv->pending_list);
if (rc) {
fw_state_aborted(fw_priv);
written = rc;
@@ -513,6 +511,11 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
}
mutex_lock(&fw_lock);
+ if (fw_state_is_aborted(fw_priv)) {
+ mutex_unlock(&fw_lock);
+ retval = -EINTR;
+ goto out;
+ }
list_add(&fw_priv->pending_list, &pending_fw_head);
mutex_unlock(&fw_lock);
@@ -535,11 +538,10 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
if (fw_state_is_aborted(fw_priv)) {
if (retval == -ERESTARTSYS)
retval = -EINTR;
- else
- retval = -EAGAIN;
} else if (fw_priv->is_paged_buf && !fw_priv->data)
retval = -ENOMEM;
+out:
device_del(f_dev);
err_put_dev:
put_device(f_dev);
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 63bd29fdcb9c..a3014e9e2c85 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -117,8 +117,16 @@ static inline void __fw_state_set(struct fw_priv *fw_priv,
WRITE_ONCE(fw_st->status, status);
- if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+ if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) {
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ /*
+ * Doing this here ensures that the fw_priv is deleted from
+ * the pending list in all abort/done paths.
+ */
+ list_del_init(&fw_priv->pending_list);
+#endif
complete_all(&fw_st->completion);
+ }
}
static inline void fw_state_aborted(struct fw_priv *fw_priv)
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 4fdb8219cd08..68c549d71230 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -783,8 +783,10 @@ static void fw_abort_batch_reqs(struct firmware *fw)
return;
fw_priv = fw->priv;
+ mutex_lock(&fw_lock);
if (!fw_state_is_aborted(fw_priv))
fw_state_aborted(fw_priv);
+ mutex_unlock(&fw_lock);
}
/* called from request_firmware() and request_firmware_work_func() */
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index d5ffaab3cb61..aa31a21f33d7 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -596,7 +596,7 @@ static struct attribute *memory_memblk_attrs[] = {
NULL
};
-static struct attribute_group memory_memblk_attr_group = {
+static const struct attribute_group memory_memblk_attr_group = {
.attrs = memory_memblk_attrs,
};
@@ -772,7 +772,7 @@ static struct attribute *memory_root_attrs[] = {
NULL
};
-static struct attribute_group memory_root_attr_group = {
+static const struct attribute_group memory_root_attr_group = {
.attrs = memory_root_attrs,
};
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 9db297431b97..4a4ae868ad9f 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -233,7 +233,7 @@ static ssize_t name##_show(struct device *dev, \
return sysfs_emit(buf, fmt "\n", \
to_cache_info(dev)->cache_attrs.name); \
} \
-DEVICE_ATTR_RO(name);
+static DEVICE_ATTR_RO(name);
CACHE_ATTR(size, "%llu")
CACHE_ATTR(line_size, "%u")
@@ -1038,7 +1038,7 @@ static struct attribute *node_state_attrs[] = {
NULL
};
-static struct attribute_group memory_root_attr_group = {
+static const struct attribute_group memory_root_attr_group = {
.attrs = node_state_attrs,
};
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9cd34def2237..8640578f45e9 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -125,26 +125,6 @@ void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
/**
- * devm_platform_ioremap_resource_wc - write-combined variant of
- * devm_platform_ioremap_resource()
- *
- * @pdev: platform device to use both for memory resource lookup as well as
- * resource management
- * @index: resource index
- *
- * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure.
- */
-void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev,
- unsigned int index)
-{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, index);
- return devm_ioremap_resource_wc(&pdev->dev, res);
-}
-
-/**
* devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
* a platform device, retrieve the
* resource by name
@@ -1355,7 +1335,7 @@ static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute
return a->mode;
}
-static struct attribute_group platform_dev_group = {
+static const struct attribute_group platform_dev_group = {
.attrs = platform_dev_attrs,
.is_visible = platform_dev_attrs_visible,
};
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index ab0b740cc0f1..a934c679e6ce 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2018,8 +2018,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
- genpd_debug_add(genpd);
mutex_unlock(&gpd_list_lock);
+ genpd_debug_add(genpd);
return 0;
}
@@ -2206,12 +2206,19 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
static bool genpd_present(const struct generic_pm_domain *genpd)
{
+ bool ret = false;
const struct generic_pm_domain *gpd;
- list_for_each_entry(gpd, &gpd_list, gpd_list_node)
- if (gpd == genpd)
- return true;
- return false;
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (gpd == genpd) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
}
/**
@@ -2222,15 +2229,13 @@ static bool genpd_present(const struct generic_pm_domain *genpd)
int of_genpd_add_provider_simple(struct device_node *np,
struct generic_pm_domain *genpd)
{
- int ret = -EINVAL;
+ int ret;
if (!np || !genpd)
return -EINVAL;
- mutex_lock(&gpd_list_lock);
-
if (!genpd_present(genpd))
- goto unlock;
+ return -EINVAL;
genpd->dev.of_node = np;
@@ -2241,7 +2246,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
if (ret != -EPROBE_DEFER)
dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
ret);
- goto unlock;
+ return ret;
}
/*
@@ -2259,16 +2264,13 @@ int of_genpd_add_provider_simple(struct device_node *np,
dev_pm_opp_of_remove_table(&genpd->dev);
}
- goto unlock;
+ return ret;
}
genpd->provider = &np->fwnode;
genpd->has_provider = true;
-unlock:
- mutex_unlock(&gpd_list_lock);
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
@@ -2287,8 +2289,6 @@ int of_genpd_add_provider_onecell(struct device_node *np,
if (!np || !data)
return -EINVAL;
- mutex_lock(&gpd_list_lock);
-
if (!data->xlate)
data->xlate = genpd_xlate_onecell;
@@ -2328,8 +2328,6 @@ int of_genpd_add_provider_onecell(struct device_node *np,
if (ret < 0)
goto error;
- mutex_unlock(&gpd_list_lock);
-
return 0;
error:
@@ -2348,8 +2346,6 @@ error:
}
}
- mutex_unlock(&gpd_list_lock);
-
return ret;
}
EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index f893c3c5af07..d568772152c2 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -220,16 +220,13 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
void *cb, int error)
{
ktime_t rettime;
- s64 nsecs;
if (!pm_print_times_enabled)
return;
rettime = ktime_get();
- nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
-
dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
- (unsigned long long)nsecs >> 10);
+ (unsigned long long)ktime_us_delta(rettime, calltime));
}
/**
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 1f533b314efc..d0874f6c29bb 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -627,14 +627,15 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
*/
struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode)
{
- struct device *dev = NULL;
+ struct device *dev;
fwnode_handle_get(fwnode);
do {
fwnode = fwnode_get_next_parent(fwnode);
- if (fwnode)
- dev = get_dev_from_fwnode(fwnode);
- } while (fwnode && !dev);
+ if (!fwnode)
+ return NULL;
+ dev = get_dev_from_fwnode(fwnode);
+ } while (!dev);
fwnode_handle_put(fwnode);
return dev;
}
@@ -742,10 +743,9 @@ fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode,
do {
next_child = fwnode_get_next_child_node(fwnode, next_child);
-
- if (!next_child || fwnode_device_is_available(next_child))
- break;
- } while (next_child);
+ if (!next_child)
+ return NULL;
+ } while (!fwnode_device_is_available(next_child));
return next_child;
}
diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c
index 1106fedcceed..6071d5bc128c 100644
--- a/drivers/base/test/property-entry-test.c
+++ b/drivers/base/test/property-entry-test.c
@@ -32,11 +32,11 @@ static void pe_test_uints(struct kunit *test)
error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+ KUNIT_EXPECT_EQ(test, val_u8, 8);
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+ KUNIT_EXPECT_EQ(test, array_u8[0], 8);
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
KUNIT_EXPECT_NE(test, error, 0);
@@ -49,14 +49,14 @@ static void pe_test_uints(struct kunit *test)
error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+ KUNIT_EXPECT_EQ(test, val_u16, 16);
error = fwnode_property_count_u16(node, "prop-u16");
KUNIT_EXPECT_EQ(test, error, 1);
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+ KUNIT_EXPECT_EQ(test, array_u16[0], 16);
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
KUNIT_EXPECT_NE(test, error, 0);
@@ -69,14 +69,14 @@ static void pe_test_uints(struct kunit *test)
error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+ KUNIT_EXPECT_EQ(test, val_u32, 32);
error = fwnode_property_count_u32(node, "prop-u32");
KUNIT_EXPECT_EQ(test, error, 1);
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+ KUNIT_EXPECT_EQ(test, array_u32[0], 32);
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
KUNIT_EXPECT_NE(test, error, 0);
@@ -89,14 +89,14 @@ static void pe_test_uints(struct kunit *test)
error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+ KUNIT_EXPECT_EQ(test, val_u64, 64);
error = fwnode_property_count_u64(node, "prop-u64");
KUNIT_EXPECT_EQ(test, error, 1);
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+ KUNIT_EXPECT_EQ(test, array_u64[0], 64);
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
KUNIT_EXPECT_NE(test, error, 0);
@@ -140,19 +140,19 @@ static void pe_test_uint_arrays(struct kunit *test)
error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+ KUNIT_EXPECT_EQ(test, val_u8, 8);
error = fwnode_property_count_u8(node, "prop-u8");
KUNIT_EXPECT_EQ(test, error, 10);
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+ KUNIT_EXPECT_EQ(test, array_u8[0], 8);
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
- KUNIT_EXPECT_EQ(test, (int)array_u8[1], 9);
+ KUNIT_EXPECT_EQ(test, array_u8[0], 8);
+ KUNIT_EXPECT_EQ(test, array_u8[1], 9);
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17);
KUNIT_EXPECT_NE(test, error, 0);
@@ -165,19 +165,19 @@ static void pe_test_uint_arrays(struct kunit *test)
error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+ KUNIT_EXPECT_EQ(test, val_u16, 16);
error = fwnode_property_count_u16(node, "prop-u16");
KUNIT_EXPECT_EQ(test, error, 10);
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+ KUNIT_EXPECT_EQ(test, array_u16[0], 16);
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
- KUNIT_EXPECT_EQ(test, (int)array_u16[1], 17);
+ KUNIT_EXPECT_EQ(test, array_u16[0], 16);
+ KUNIT_EXPECT_EQ(test, array_u16[1], 17);
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17);
KUNIT_EXPECT_NE(test, error, 0);
@@ -190,19 +190,19 @@ static void pe_test_uint_arrays(struct kunit *test)
error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+ KUNIT_EXPECT_EQ(test, val_u32, 32);
error = fwnode_property_count_u32(node, "prop-u32");
KUNIT_EXPECT_EQ(test, error, 10);
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+ KUNIT_EXPECT_EQ(test, array_u32[0], 32);
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
- KUNIT_EXPECT_EQ(test, (int)array_u32[1], 33);
+ KUNIT_EXPECT_EQ(test, array_u32[0], 32);
+ KUNIT_EXPECT_EQ(test, array_u32[1], 33);
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17);
KUNIT_EXPECT_NE(test, error, 0);
@@ -215,19 +215,19 @@ static void pe_test_uint_arrays(struct kunit *test)
error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+ KUNIT_EXPECT_EQ(test, val_u64, 64);
error = fwnode_property_count_u64(node, "prop-u64");
KUNIT_EXPECT_EQ(test, error, 10);
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+ KUNIT_EXPECT_EQ(test, array_u64[0], 64);
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
KUNIT_EXPECT_EQ(test, error, 0);
- KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
- KUNIT_EXPECT_EQ(test, (int)array_u64[1], 65);
+ KUNIT_EXPECT_EQ(test, array_u64[0], 64);
+ KUNIT_EXPECT_EQ(test, array_u64[1], 65);
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17);
KUNIT_EXPECT_NE(test, error, 0);
@@ -358,13 +358,13 @@ static void pe_test_move_inline_u8(struct kunit *test)
KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
data_ptr = (u8 *)&copy[0].value;
- KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 1);
- KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 2);
+ KUNIT_EXPECT_EQ(test, data_ptr[0], 1);
+ KUNIT_EXPECT_EQ(test, data_ptr[1], 2);
KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
data_ptr = copy[1].pointer;
- KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 5);
- KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 6);
+ KUNIT_EXPECT_EQ(test, data_ptr[0], 5);
+ KUNIT_EXPECT_EQ(test, data_ptr[1], 6);
property_entries_free(copy);
}
diff --git a/drivers/base/trace.c b/drivers/base/trace.c
new file mode 100644
index 000000000000..b24b0a309c4a
--- /dev/null
+++ b/drivers/base/trace.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device core Trace Support
+ * Copyright (C) 2021, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/base/trace.h b/drivers/base/trace.h
new file mode 100644
index 000000000000..3192e18f877e
--- /dev/null
+++ b/drivers/base/trace.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device core Trace Support
+ * Copyright (C) 2021, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dev
+
+#if !defined(__DEV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __DEV_TRACE_H
+
+#include <linux/device.h>
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+DECLARE_EVENT_CLASS(devres,
+ TP_PROTO(struct device *dev, const char *op, void *node, const char *name, size_t size),
+ TP_ARGS(dev, op, node, name, size),
+ TP_STRUCT__entry(
+ __string(devname, dev_name(dev))
+ __field(struct device *, dev)
+ __field(const char *, op)
+ __field(void *, node)
+ __field(const char *, name)
+ __field(size_t, size)
+ ),
+ TP_fast_assign(
+ __assign_str(devname, dev_name(dev));
+ __entry->op = op;
+ __entry->node = node;
+ __entry->name = name;
+ __entry->size = size;
+ ),
+ TP_printk("%s %3s %p %s (%zu bytes)", __get_str(devname),
+ __entry->op, __entry->node, __entry->name, __entry->size)
+);
+
+DEFINE_EVENT(devres, devres_log,
+ TP_PROTO(struct device *dev, const char *op, void *node, const char *name, size_t size),
+ TP_ARGS(dev, op, node, name, size)
+);
+
+#endif /* __DEV_TRACE_H */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cc0e8c39a48b..f0cdff0c5fbf 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -88,6 +88,47 @@
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_ctl_mutex);
+static DEFINE_MUTEX(loop_validate_mutex);
+
+/**
+ * loop_global_lock_killable() - take locks for safe loop_validate_file() test
+ *
+ * @lo: struct loop_device
+ * @global: true if @lo is about to bind another "struct loop_device", false otherwise
+ *
+ * Returns 0 on success, -EINTR otherwise.
+ *
+ * Since loop_validate_file() traverses on other "struct loop_device" if
+ * is_loop_device() is true, we need a global lock for serializing concurrent
+ * loop_configure()/loop_change_fd()/__loop_clr_fd() calls.
+ */
+static int loop_global_lock_killable(struct loop_device *lo, bool global)
+{
+ int err;
+
+ if (global) {
+ err = mutex_lock_killable(&loop_validate_mutex);
+ if (err)
+ return err;
+ }
+ err = mutex_lock_killable(&lo->lo_mutex);
+ if (err && global)
+ mutex_unlock(&loop_validate_mutex);
+ return err;
+}
+
+/**
+ * loop_global_unlock() - release locks taken by loop_global_lock_killable()
+ *
+ * @lo: struct loop_device
+ * @global: true if @lo was about to bind another "struct loop_device", false otherwise
+ */
+static void loop_global_unlock(struct loop_device *lo, bool global)
+{
+ mutex_unlock(&lo->lo_mutex);
+ if (global)
+ mutex_unlock(&loop_validate_mutex);
+}
static int max_part;
static int part_shift;
@@ -672,13 +713,15 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
while (is_loop_device(f)) {
struct loop_device *l;
+ lockdep_assert_held(&loop_validate_mutex);
if (f->f_mapping->host->i_rdev == bdev->bd_dev)
return -EBADF;
l = I_BDEV(f->f_mapping->host)->bd_disk->private_data;
- if (l->lo_state != Lo_bound) {
+ if (l->lo_state != Lo_bound)
return -EINVAL;
- }
+ /* Order wrt setting lo->lo_backing_file in loop_configure(). */
+ rmb();
f = l->lo_backing_file;
}
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
@@ -697,13 +740,18 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
unsigned int arg)
{
- struct file *file = NULL, *old_file;
- int error;
- bool partscan;
+ struct file *file = fget(arg);
+ struct file *old_file;
+ int error;
+ bool partscan;
+ bool is_loop;
- error = mutex_lock_killable(&lo->lo_mutex);
+ if (!file)
+ return -EBADF;
+ is_loop = is_loop_device(file);
+ error = loop_global_lock_killable(lo, is_loop);
if (error)
- return error;
+ goto out_putf;
error = -ENXIO;
if (lo->lo_state != Lo_bound)
goto out_err;
@@ -713,11 +761,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
goto out_err;
- error = -EBADF;
- file = fget(arg);
- if (!file)
- goto out_err;
-
error = loop_validate_file(file, bdev);
if (error)
goto out_err;
@@ -740,7 +783,16 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
- mutex_unlock(&lo->lo_mutex);
+ loop_global_unlock(lo, is_loop);
+
+ /*
+ * Flush loop_validate_file() before fput(), for l->lo_backing_file
+ * might be pointing at old_file which might be the last reference.
+ */
+ if (!is_loop) {
+ mutex_lock(&loop_validate_mutex);
+ mutex_unlock(&loop_validate_mutex);
+ }
/*
* We must drop file reference outside of lo_mutex as dropping
* the file ref can take open_mutex which creates circular locking
@@ -752,9 +804,9 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
return 0;
out_err:
- mutex_unlock(&lo->lo_mutex);
- if (file)
- fput(file);
+ loop_global_unlock(lo, is_loop);
+out_putf:
+ fput(file);
return error;
}
@@ -1136,22 +1188,22 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
struct block_device *bdev,
const struct loop_config *config)
{
- struct file *file;
- struct inode *inode;
+ struct file *file = fget(config->fd);
+ struct inode *inode;
struct address_space *mapping;
- int error;
- loff_t size;
- bool partscan;
- unsigned short bsize;
+ int error;
+ loff_t size;
+ bool partscan;
+ unsigned short bsize;
+ bool is_loop;
+
+ if (!file)
+ return -EBADF;
+ is_loop = is_loop_device(file);
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
- error = -EBADF;
- file = fget(config->fd);
- if (!file)
- goto out;
-
/*
* If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner.
@@ -1162,7 +1214,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
goto out_putf;
}
- error = mutex_lock_killable(&lo->lo_mutex);
+ error = loop_global_lock_killable(lo, is_loop);
if (error)
goto out_bdev;
@@ -1242,6 +1294,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
size = get_loop_size(lo, file);
loop_set_size(lo, size);
+ /* Order wrt reading lo_state in loop_validate_file(). */
+ wmb();
+
lo->lo_state = Lo_bound;
if (part_shift)
lo->lo_flags |= LO_FLAGS_PARTSCAN;
@@ -1253,7 +1308,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
* put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
*/
bdgrab(bdev);
- mutex_unlock(&lo->lo_mutex);
+ loop_global_unlock(lo, is_loop);
if (partscan)
loop_reread_partitions(lo);
if (!(mode & FMODE_EXCL))
@@ -1261,13 +1316,12 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
return 0;
out_unlock:
- mutex_unlock(&lo->lo_mutex);
+ loop_global_unlock(lo, is_loop);
out_bdev:
if (!(mode & FMODE_EXCL))
bd_abort_claiming(bdev, loop_configure);
out_putf:
fput(file);
-out:
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return error;
@@ -1283,6 +1337,18 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
int lo_number;
struct loop_worker *pos, *worker;
+ /*
+ * Flush loop_configure() and loop_change_fd(). It is acceptable for
+ * loop_validate_file() to succeed, for actual clear operation has not
+ * started yet.
+ */
+ mutex_lock(&loop_validate_mutex);
+ mutex_unlock(&loop_validate_mutex);
+ /*
+ * loop_validate_file() now fails because l->lo_state != Lo_bound
+ * became visible.
+ */
+
mutex_lock(&lo->lo_mutex);
if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
err = -ENXIO;
@@ -1434,7 +1500,6 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
- struct block_device *bdev;
kuid_t uid = current_uid();
int prev_lo_flags;
bool partscan = false;
@@ -1503,7 +1568,6 @@ out_unfreeze:
if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
!(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
- bdev = lo->lo_device;
partscan = true;
}
out_unlock:
@@ -2237,7 +2301,7 @@ static const struct blk_mq_ops loop_mq_ops = {
.complete = lo_complete_rq,
};
-static int loop_add(struct loop_device **l, int i)
+static int loop_add(int i)
{
struct loop_device *lo;
struct gendisk *disk;
@@ -2247,9 +2311,12 @@ static int loop_add(struct loop_device **l, int i)
lo = kzalloc(sizeof(*lo), GFP_KERNEL);
if (!lo)
goto out;
-
lo->lo_state = Lo_unbound;
+ err = mutex_lock_killable(&loop_ctl_mutex);
+ if (err)
+ goto out_free_dev;
+
/* allocate id, if @id >= 0, we're requesting that specific id */
if (i >= 0) {
err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
@@ -2259,7 +2326,7 @@ static int loop_add(struct loop_device **l, int i)
err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
}
if (err < 0)
- goto out_free_dev;
+ goto out_unlock;
i = err;
err = -ENOMEM;
@@ -2326,13 +2393,15 @@ static int loop_add(struct loop_device **l, int i)
disk->queue = lo->lo_queue;
sprintf(disk->disk_name, "loop%d", i);
add_disk(disk);
- *l = lo;
- return lo->lo_number;
+ mutex_unlock(&loop_ctl_mutex);
+ return i;
out_cleanup_tags:
blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:
idr_remove(&loop_index_idr, i);
+out_unlock:
+ mutex_unlock(&loop_ctl_mutex);
out_free_dev:
kfree(lo);
out:
@@ -2348,109 +2417,86 @@ static void loop_remove(struct loop_device *lo)
kfree(lo);
}
-static int find_free_cb(int id, void *ptr, void *data)
+static void loop_probe(dev_t dev)
{
- struct loop_device *lo = ptr;
- struct loop_device **l = data;
+ int idx = MINOR(dev) >> part_shift;
- if (lo->lo_state == Lo_unbound) {
- *l = lo;
- return 1;
- }
- return 0;
+ if (max_loop && idx >= max_loop)
+ return;
+ loop_add(idx);
}
-static int loop_lookup(struct loop_device **l, int i)
+static int loop_control_remove(int idx)
{
struct loop_device *lo;
- int ret = -ENODEV;
+ int ret;
- if (i < 0) {
- int err;
+ if (idx < 0) {
+ pr_warn("deleting an unspecified loop device is not supported.\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_killable(&loop_ctl_mutex);
+ if (ret)
+ return ret;
- err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
- if (err == 1) {
- *l = lo;
- ret = lo->lo_number;
- }
- goto out;
+ lo = idr_find(&loop_index_idr, idx);
+ if (!lo) {
+ ret = -ENODEV;
+ goto out_unlock_ctrl;
}
- /* lookup and return a specific i */
- lo = idr_find(&loop_index_idr, i);
- if (lo) {
- *l = lo;
- ret = lo->lo_number;
+ ret = mutex_lock_killable(&lo->lo_mutex);
+ if (ret)
+ goto out_unlock_ctrl;
+ if (lo->lo_state != Lo_unbound ||
+ atomic_read(&lo->lo_refcnt) > 0) {
+ mutex_unlock(&lo->lo_mutex);
+ ret = -EBUSY;
+ goto out_unlock_ctrl;
}
-out:
+ lo->lo_state = Lo_deleting;
+ mutex_unlock(&lo->lo_mutex);
+
+ idr_remove(&loop_index_idr, lo->lo_number);
+ loop_remove(lo);
+out_unlock_ctrl:
+ mutex_unlock(&loop_ctl_mutex);
return ret;
}
-static void loop_probe(dev_t dev)
+static int loop_control_get_free(int idx)
{
- int idx = MINOR(dev) >> part_shift;
struct loop_device *lo;
+ int id, ret;
- if (max_loop && idx >= max_loop)
- return;
-
- mutex_lock(&loop_ctl_mutex);
- if (loop_lookup(&lo, idx) < 0)
- loop_add(&lo, idx);
+ ret = mutex_lock_killable(&loop_ctl_mutex);
+ if (ret)
+ return ret;
+ idr_for_each_entry(&loop_index_idr, lo, id) {
+ if (lo->lo_state == Lo_unbound)
+ goto found;
+ }
mutex_unlock(&loop_ctl_mutex);
+ return loop_add(-1);
+found:
+ mutex_unlock(&loop_ctl_mutex);
+ return id;
}
static long loop_control_ioctl(struct file *file, unsigned int cmd,
unsigned long parm)
{
- struct loop_device *lo;
- int ret;
-
- ret = mutex_lock_killable(&loop_ctl_mutex);
- if (ret)
- return ret;
-
- ret = -ENOSYS;
switch (cmd) {
case LOOP_CTL_ADD:
- ret = loop_lookup(&lo, parm);
- if (ret >= 0) {
- ret = -EEXIST;
- break;
- }
- ret = loop_add(&lo, parm);
- break;
+ return loop_add(parm);
case LOOP_CTL_REMOVE:
- ret = loop_lookup(&lo, parm);
- if (ret < 0)
- break;
- ret = mutex_lock_killable(&lo->lo_mutex);
- if (ret)
- break;
- if (lo->lo_state != Lo_unbound) {
- ret = -EBUSY;
- mutex_unlock(&lo->lo_mutex);
- break;
- }
- if (atomic_read(&lo->lo_refcnt) > 0) {
- ret = -EBUSY;
- mutex_unlock(&lo->lo_mutex);
- break;
- }
- lo->lo_state = Lo_deleting;
- mutex_unlock(&lo->lo_mutex);
- idr_remove(&loop_index_idr, lo->lo_number);
- loop_remove(lo);
- break;
+ return loop_control_remove(parm);
case LOOP_CTL_GET_FREE:
- ret = loop_lookup(&lo, -1);
- if (ret >= 0)
- break;
- ret = loop_add(&lo, -1);
+ return loop_control_get_free(parm);
+ default:
+ return -ENOSYS;
}
- mutex_unlock(&loop_ctl_mutex);
-
- return ret;
}
static const struct file_operations loop_ctl_fops = {
@@ -2473,7 +2519,6 @@ MODULE_ALIAS("devname:loop-control");
static int __init loop_init(void)
{
int i, nr;
- struct loop_device *lo;
int err;
part_shift = 0;
@@ -2525,10 +2570,8 @@ static int __init loop_init(void)
}
/* pre-create number of devices given by config or max_loop */
- mutex_lock(&loop_ctl_mutex);
for (i = 0; i < nr; i++)
- loop_add(&lo, i);
- mutex_unlock(&loop_ctl_mutex);
+ loop_add(i);
printk(KERN_INFO "loop: module loaded\n");
return 0;
@@ -2539,26 +2582,20 @@ err_out:
return err;
}
-static int loop_exit_cb(int id, void *ptr, void *data)
-{
- struct loop_device *lo = ptr;
-
- loop_remove(lo);
- return 0;
-}
-
static void __exit loop_exit(void)
{
- mutex_lock(&loop_ctl_mutex);
-
- idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
- idr_destroy(&loop_index_idr);
+ struct loop_device *lo;
+ int id;
unregister_blkdev(LOOP_MAJOR, "loop");
-
misc_deregister(&loop_misc);
+ mutex_lock(&loop_ctl_mutex);
+ idr_for_each_entry(&loop_index_idr, lo, id)
+ loop_remove(lo);
mutex_unlock(&loop_ctl_mutex);
+
+ idr_destroy(&loop_index_idr);
}
module_init(loop_init);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index ff3e7b3f5ad8..901855717cb5 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2160,6 +2160,20 @@ static ssize_t mtip_hw_show_status(struct device *dev,
static DEVICE_ATTR(status, 0444, mtip_hw_show_status, NULL);
+static struct attribute *mtip_disk_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static const struct attribute_group mtip_disk_attr_group = {
+ .attrs = mtip_disk_attrs,
+};
+
+static const struct attribute_group *mtip_disk_attr_groups[] = {
+ &mtip_disk_attr_group,
+ NULL,
+};
+
/* debugsfs entries */
static ssize_t show_device_status(struct device_driver *drv, char *buf)
@@ -2374,47 +2388,6 @@ static const struct file_operations mtip_flags_fops = {
.llseek = no_llseek,
};
-/*
- * Create the sysfs related attributes.
- *
- * @dd Pointer to the driver data structure.
- * @kobj Pointer to the kobj for the block device.
- *
- * return value
- * 0 Operation completed successfully.
- * -EINVAL Invalid parameter.
- */
-static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
-{
- if (!kobj || !dd)
- return -EINVAL;
-
- if (sysfs_create_file(kobj, &dev_attr_status.attr))
- dev_warn(&dd->pdev->dev,
- "Error creating 'status' sysfs entry\n");
- return 0;
-}
-
-/*
- * Remove the sysfs related attributes.
- *
- * @dd Pointer to the driver data structure.
- * @kobj Pointer to the kobj for the block device.
- *
- * return value
- * 0 Operation completed successfully.
- * -EINVAL Invalid parameter.
- */
-static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
-{
- if (!kobj || !dd)
- return -EINVAL;
-
- sysfs_remove_file(kobj, &dev_attr_status.attr);
-
- return 0;
-}
-
static int mtip_hw_debugfs_init(struct driver_data *dd)
{
if (!dfs_parent)
@@ -3566,7 +3539,6 @@ static int mtip_block_initialize(struct driver_data *dd)
int rv = 0, wait_for_rebuild = 0;
sector_t capacity;
unsigned int index = 0;
- struct kobject *kobj;
if (dd->disk)
goto skip_create_disk; /* hw init done, before rebuild */
@@ -3576,35 +3548,6 @@ static int mtip_block_initialize(struct driver_data *dd)
goto protocol_init_error;
}
- dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node);
- if (dd->disk == NULL) {
- dev_err(&dd->pdev->dev,
- "Unable to allocate gendisk structure\n");
- rv = -EINVAL;
- goto alloc_disk_error;
- }
-
- rv = ida_alloc(&rssd_index_ida, GFP_KERNEL);
- if (rv < 0)
- goto ida_get_error;
- index = rv;
-
- rv = rssd_disk_name_format("rssd",
- index,
- dd->disk->disk_name,
- DISK_NAME_LEN);
- if (rv)
- goto disk_index_error;
-
- dd->disk->major = dd->major;
- dd->disk->first_minor = index * MTIP_MAX_MINORS;
- dd->disk->minors = MTIP_MAX_MINORS;
- dd->disk->fops = &mtip_block_ops;
- dd->disk->private_data = dd;
- dd->index = index;
-
- mtip_hw_debugfs_init(dd);
-
memset(&dd->tags, 0, sizeof(dd->tags));
dd->tags.ops = &mtip_mq_ops;
dd->tags.nr_hw_queues = 1;
@@ -3623,17 +3566,35 @@ static int mtip_block_initialize(struct driver_data *dd)
goto block_queue_alloc_tag_error;
}
- /* Allocate the request queue. */
- dd->queue = blk_mq_init_queue(&dd->tags);
- if (IS_ERR(dd->queue)) {
+ dd->disk = blk_mq_alloc_disk(&dd->tags, dd);
+ if (IS_ERR(dd->disk)) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
rv = -ENOMEM;
goto block_queue_alloc_init_error;
}
+ dd->queue = dd->disk->queue;
+
+ rv = ida_alloc(&rssd_index_ida, GFP_KERNEL);
+ if (rv < 0)
+ goto ida_get_error;
+ index = rv;
+
+ rv = rssd_disk_name_format("rssd",
+ index,
+ dd->disk->disk_name,
+ DISK_NAME_LEN);
+ if (rv)
+ goto disk_index_error;
+
+ dd->disk->major = dd->major;
+ dd->disk->first_minor = index * MTIP_MAX_MINORS;
+ dd->disk->minors = MTIP_MAX_MINORS;
+ dd->disk->fops = &mtip_block_ops;
+ dd->disk->private_data = dd;
+ dd->index = index;
- dd->disk->queue = dd->queue;
- dd->queue->queuedata = dd;
+ mtip_hw_debugfs_init(dd);
skip_create_disk:
/* Initialize the protocol layer. */
@@ -3672,17 +3633,7 @@ skip_create_disk:
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
- device_add_disk(&dd->pdev->dev, dd->disk, NULL);
-
- /*
- * Now that the disk is active, initialize any sysfs attributes
- * managed by the protocol layer.
- */
- kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
- if (kobj) {
- mtip_hw_sysfs_init(dd, kobj);
- kobject_put(kobj);
- }
+ device_add_disk(&dd->pdev->dev, dd->disk, mtip_disk_attr_groups);
if (dd->mtip_svc_handler) {
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3709,23 +3660,17 @@ start_service_thread:
kthread_run_error:
/* Delete our gendisk. This also removes the device from /dev */
del_gendisk(dd->disk);
-
read_capacity_error:
init_hw_cmds_error:
- blk_cleanup_queue(dd->queue);
-block_queue_alloc_init_error:
- blk_mq_free_tag_set(&dd->tags);
-block_queue_alloc_tag_error:
mtip_hw_debugfs_exit(dd);
disk_index_error:
ida_free(&rssd_index_ida, index);
-
ida_get_error:
- put_disk(dd->disk);
-
-alloc_disk_error:
+ blk_cleanup_disk(dd->disk);
+block_queue_alloc_init_error:
+ blk_mq_free_tag_set(&dd->tags);
+block_queue_alloc_tag_error:
mtip_hw_exit(dd); /* De-initialize the protocol layer. */
-
protocol_init_error:
return rv;
}
@@ -3751,8 +3696,6 @@ static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
*/
static int mtip_block_remove(struct driver_data *dd)
{
- struct kobject *kobj;
-
mtip_hw_debugfs_exit(dd);
if (dd->mtip_svc_handler) {
@@ -3761,15 +3704,6 @@ static int mtip_block_remove(struct driver_data *dd)
kthread_stop(dd->mtip_svc_handler);
}
- /* Clean up the sysfs attributes, if created */
- if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
- kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
- if (kobj) {
- mtip_hw_sysfs_exit(dd, kobj);
- kobject_put(kobj);
- }
- }
-
if (!dd->sr) {
/*
* Explicitly wait here for IOs to quiesce,
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index 7b4dd10af9ec..c84be0028f63 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -74,7 +74,7 @@ static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos)
n64cart_wait_dma();
- n64cart_write_reg(PI_DRAM_REG, dma_addr + bv->bv_offset);
+ n64cart_write_reg(PI_DRAM_REG, dma_addr);
n64cart_write_reg(PI_CART_REG, (bstart | CART_DOMAIN) & CART_MAX);
n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 614d82e7fae4..19f5d5a8b16a 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -79,6 +79,7 @@ struct link_dead_args {
#define NBD_RT_HAS_CONFIG_REF 4
#define NBD_RT_BOUND 5
#define NBD_RT_DISCONNECT_ON_CLOSE 6
+#define NBD_RT_HAS_BACKEND_FILE 7
#define NBD_DESTROY_ON_DISCONNECT 0
#define NBD_DISCONNECT_REQUESTED 1
@@ -119,6 +120,8 @@ struct nbd_device {
struct completion *destroy_complete;
unsigned long flags;
+
+ char *backend;
};
#define NBD_CMD_REQUEUED 1
@@ -216,14 +219,28 @@ static const struct device_attribute pid_attr = {
.show = pid_show,
};
+static ssize_t backend_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
+
+ return sprintf(buf, "%s\n", nbd->backend ?: "");
+}
+
+static const struct device_attribute backend_attr = {
+ .attr = { .name = "backend", .mode = 0444},
+ .show = backend_show,
+};
+
static void nbd_dev_remove(struct nbd_device *nbd)
{
struct gendisk *disk = nbd->disk;
if (disk) {
del_gendisk(disk);
- blk_mq_free_tag_set(&nbd->tag_set);
blk_cleanup_disk(disk);
+ blk_mq_free_tag_set(&nbd->tag_set);
}
/*
@@ -801,6 +818,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ /* don't abort one completed request */
+ if (blk_mq_request_completed(req))
+ return true;
+
mutex_lock(&cmd->lock);
cmd->status = BLK_STS_IOERR;
mutex_unlock(&cmd->lock);
@@ -1211,6 +1232,12 @@ static void nbd_config_put(struct nbd_device *nbd)
&config->runtime_flags))
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
nbd->task_recv = NULL;
+ if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
+ &config->runtime_flags)) {
+ device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
+ kfree(nbd->backend);
+ nbd->backend = NULL;
+ }
nbd_clear_sock(nbd);
if (config->num_connections) {
int i;
@@ -1270,7 +1297,7 @@ static int nbd_start_device(struct nbd_device *nbd)
error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (error) {
- dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
+ dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n");
return error;
}
set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
@@ -1657,6 +1684,7 @@ static int nbd_dev_add(int index)
BLK_MQ_F_BLOCKING;
nbd->tag_set.driver_data = nbd;
nbd->destroy_complete = NULL;
+ nbd->backend = NULL;
err = blk_mq_alloc_tag_set(&nbd->tag_set);
if (err)
@@ -1743,6 +1771,7 @@ static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
[NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
[NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
[NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
+ [NBD_ATTR_BACKEND_IDENTIFIER] = { .type = NLA_STRING},
};
static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
@@ -1945,6 +1974,23 @@ again:
}
}
ret = nbd_start_device(nbd);
+ if (ret)
+ goto out;
+ if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
+ nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
+ GFP_KERNEL);
+ if (!nbd->backend) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr);
+ if (ret) {
+ dev_err(disk_to_dev(nbd->disk),
+ "device_create_file failed for backend!\n");
+ goto out;
+ }
+ set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
out:
mutex_unlock(&nbd->config_lock);
if (!ret) {
@@ -1962,15 +2008,19 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
{
mutex_lock(&nbd->config_lock);
nbd_disconnect(nbd);
- nbd_clear_sock(nbd);
- mutex_unlock(&nbd->config_lock);
+ sock_shutdown(nbd);
/*
* Make sure recv thread has finished, so it does not drop the last
* config ref and try to destroy the workqueue from inside the work
- * queue.
+ * queue. And this also ensure that we can safely call nbd_clear_que()
+ * to cancel the inflight I/Os.
*/
if (nbd->recv_workq)
flush_workqueue(nbd->recv_workq);
+ nbd_clear_que(nbd);
+ nbd->task_setup = NULL;
+ mutex_unlock(&nbd->config_lock);
+
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
@@ -2037,6 +2087,22 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
index);
return -EINVAL;
}
+ if (nbd->backend) {
+ if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
+ if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
+ nbd->backend)) {
+ mutex_unlock(&nbd_index_mutex);
+ dev_err(nbd_to_dev(nbd),
+ "backend image doesn't match with %s\n",
+ nbd->backend);
+ return -EINVAL;
+ }
+ } else {
+ mutex_unlock(&nbd_index_mutex);
+ dev_err(nbd_to_dev(nbd), "must specify backend\n");
+ return -EINVAL;
+ }
+ }
if (!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex);
printk(KERN_ERR "nbd: device at index %d is going down\n",
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 3b320b005aa8..d734e9ee1546 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1849,7 +1849,6 @@ static int null_add_dev(struct nullb_device *dev)
if (!null_setup_fault())
goto out_cleanup_tags;
- rv = -ENOMEM;
nullb->tag_set->timeout = 5 * HZ;
nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb);
if (IS_ERR(nullb->disk)) {
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 3b2b8e872beb..9b3298926356 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -1014,8 +1014,8 @@ static void __exit pd_exit(void)
if (p) {
disk->gd = NULL;
del_gendisk(p);
- blk_mq_free_tag_set(&disk->tag_set);
blk_cleanup_disk(p);
+ blk_mq_free_tag_set(&disk->tag_set);
pi_release(disk->pi);
}
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f69b5c69c2a6..538446b652de 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -704,7 +704,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
int ret = 0;
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 531d390902dd..90b947c96402 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4100,8 +4100,6 @@ again:
static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
{
- bool need_wait;
-
dout("%s rbd_dev %p\n", __func__, rbd_dev);
lockdep_assert_held_write(&rbd_dev->lock_rwsem);
@@ -4113,11 +4111,11 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
*/
rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
rbd_assert(!completion_done(&rbd_dev->releasing_wait));
- need_wait = !list_empty(&rbd_dev->running_list);
- downgrade_write(&rbd_dev->lock_rwsem);
- if (need_wait)
- wait_for_completion(&rbd_dev->releasing_wait);
- up_read(&rbd_dev->lock_rwsem);
+ if (list_empty(&rbd_dev->running_list))
+ return true;
+
+ up_write(&rbd_dev->lock_rwsem);
+ wait_for_completion(&rbd_dev->releasing_wait);
down_write(&rbd_dev->lock_rwsem);
if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
@@ -4203,15 +4201,11 @@ static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
down_write(&rbd_dev->lock_rwsem);
if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
- /*
- * we already know that the remote client is
- * the owner
- */
- up_write(&rbd_dev->lock_rwsem);
- return;
+ dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
+ __func__, rbd_dev, cid.gid, cid.handle);
+ } else {
+ rbd_set_owner_cid(rbd_dev, &cid);
}
-
- rbd_set_owner_cid(rbd_dev, &cid);
downgrade_write(&rbd_dev->lock_rwsem);
} else {
down_read(&rbd_dev->lock_rwsem);
@@ -4236,14 +4230,12 @@ static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
down_write(&rbd_dev->lock_rwsem);
if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
- dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
+ dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
__func__, rbd_dev, cid.gid, cid.handle,
rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
- up_write(&rbd_dev->lock_rwsem);
- return;
+ } else {
+ rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
}
-
- rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
downgrade_write(&rbd_dev->lock_rwsem);
} else {
down_read(&rbd_dev->lock_rwsem);
@@ -4951,6 +4943,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
disk->minors = RBD_MINORS_PER_MAJOR;
}
disk->fops = &rbd_bd_ops;
+ disk->private_data = rbd_dev;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index c4631e684386..4d4bb810c2ae 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -1050,7 +1050,7 @@ err_out_release_mdesc:
return err;
}
-static int vdc_port_remove(struct vio_dev *vdev)
+static void vdc_port_remove(struct vio_dev *vdev)
{
struct vdc_port *port = dev_get_drvdata(&vdev->dev);
@@ -1072,7 +1072,6 @@ static int vdc_port_remove(struct vio_dev *vdev)
kfree(port);
}
- return 0;
}
static void vdc_requeue_inflight(struct vdc_port *port)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e4bd3b1fc3c2..afb37aac09e8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -21,6 +21,9 @@
#define VQ_NAME_LEN 16
#define MAX_DISCARD_SEGMENTS 256u
+/* The maximum number of sg elements that fit into a virtqueue */
+#define VIRTIO_BLK_MAX_SG_ELEMS 32768
+
static int major;
static DEFINE_IDA(vd_index_ida);
@@ -447,13 +450,6 @@ static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
/* Host must always specify the capacity. */
virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
- /* If capacity is too big, truncate with warning. */
- if ((sector_t)capacity != capacity) {
- dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
- (unsigned long long)capacity);
- capacity = (sector_t)-1;
- }
-
nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
string_get_size(nblocks, queue_logical_block_size(q),
@@ -696,6 +692,28 @@ static const struct blk_mq_ops virtio_mq_ops = {
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
+static int virtblk_validate(struct virtio_device *vdev)
+{
+ u32 blk_size;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE))
+ return 0;
+
+ blk_size = virtio_cread32(vdev,
+ offsetof(struct virtio_blk_config, blk_size));
+
+ if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)
+ __virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE);
+
+ return 0;
+}
+
static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
@@ -707,12 +725,6 @@ static int virtblk_probe(struct virtio_device *vdev)
u8 physical_block_exp, alignment_offset;
unsigned int queue_depth;
- if (!vdev->config->get) {
- dev_err(&vdev->dev, "%s failure: config access disabled\n",
- __func__);
- return -EINVAL;
- }
-
err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
GFP_KERNEL);
if (err < 0)
@@ -728,7 +740,10 @@ static int virtblk_probe(struct virtio_device *vdev)
if (err || !sg_elems)
sg_elems = 1;
- /* We need an extra sg elements at head and tail. */
+ /* Prevent integer overflows and honor max vq size */
+ sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
+
+ /* We need extra sg elements at head and tail. */
sg_elems += 2;
vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
if (!vblk) {
@@ -824,6 +839,14 @@ static int virtblk_probe(struct virtio_device *vdev)
else
blk_size = queue_logical_block_size(q);
+ if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) {
+ dev_err(&vdev->dev,
+ "block size is changed unexpectedly, now is %u\n",
+ blk_size);
+ err = -EINVAL;
+ goto err_cleanup_disk;
+ }
+
/* Use topology information if available */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, physical_block_exp,
@@ -882,6 +905,8 @@ static int virtblk_probe(struct virtio_device *vdev)
device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
return 0;
+err_cleanup_disk:
+ blk_cleanup_disk(vblk->disk);
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);
out_free_vq:
@@ -936,6 +961,8 @@ static int virtblk_freeze(struct virtio_device *vdev)
blk_mq_quiesce_queue(vblk->disk->queue);
vdev->config->del_vqs(vdev);
+ kfree(vblk->vqs);
+
return 0;
}
@@ -982,6 +1009,7 @@ static struct virtio_driver virtio_blk = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
+ .validate = virtblk_validate,
.probe = virtblk_probe,
.remove = virtblk_remove,
.config_changed = virtblk_config_changed,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8d49f8fa98bb..d83fee21f6c5 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -502,34 +502,21 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
unsigned command, unsigned long argument)
{
- struct blkfront_info *info = bdev->bd_disk->private_data;
int i;
- dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
- command, (long)argument);
-
switch (command) {
case CDROMMULTISESSION:
- dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
for (i = 0; i < sizeof(struct cdrom_multisession); i++)
if (put_user(0, (char __user *)(argument + i)))
return -EFAULT;
return 0;
-
- case CDROM_GET_CAPABILITY: {
- struct gendisk *gd = info->gd;
- if (gd->flags & GENHD_FL_CD)
+ case CDROM_GET_CAPABILITY:
+ if (bdev->bd_disk->flags & GENHD_FL_CD)
return 0;
return -EINVAL;
- }
-
default:
- /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
- command);*/
- return -EINVAL; /* same return as native Linux */
+ return -EINVAL;
}
-
- return 0;
}
static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
@@ -1177,36 +1164,6 @@ out_release_minors:
return err;
}
-static void xlvbd_release_gendisk(struct blkfront_info *info)
-{
- unsigned int minor, nr_minors, i;
- struct blkfront_ring_info *rinfo;
-
- if (info->rq == NULL)
- return;
-
- /* No more blkif_request(). */
- blk_mq_stop_hw_queues(info->rq);
-
- for_each_rinfo(info, rinfo, i) {
- /* No more gnttab callback work. */
- gnttab_cancel_free_callback(&rinfo->callback);
-
- /* Flush gnttab callback work. Must be done with no locks held. */
- flush_work(&rinfo->work);
- }
-
- del_gendisk(info->gd);
-
- minor = info->gd->first_minor;
- nr_minors = info->gd->minors;
- xlbd_release_minors(minor, nr_minors);
-
- blk_cleanup_disk(info->gd);
- info->gd = NULL;
- blk_mq_free_tag_set(&info->tag_set);
-}
-
/* Already hold rinfo->ring_lock. */
static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
{
@@ -1756,12 +1713,6 @@ abort_transaction:
return err;
}
-static void free_info(struct blkfront_info *info)
-{
- list_del(&info->info_list);
- kfree(info);
-}
-
/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
struct blkfront_info *info)
@@ -1880,13 +1831,6 @@ again:
xenbus_dev_fatal(dev, err, "%s", message);
destroy_blkring:
blkif_free(info, 0);
-
- mutex_lock(&blkfront_mutex);
- free_info(info);
- mutex_unlock(&blkfront_mutex);
-
- dev_set_drvdata(&dev->dev, NULL);
-
return err;
}
@@ -2126,38 +2070,26 @@ static int blkfront_resume(struct xenbus_device *dev)
static void blkfront_closing(struct blkfront_info *info)
{
struct xenbus_device *xbdev = info->xbdev;
- struct block_device *bdev = NULL;
-
- mutex_lock(&info->mutex);
+ struct blkfront_ring_info *rinfo;
+ unsigned int i;
- if (xbdev->state == XenbusStateClosing) {
- mutex_unlock(&info->mutex);
+ if (xbdev->state == XenbusStateClosing)
return;
- }
- if (info->gd)
- bdev = bdgrab(info->gd->part0);
-
- mutex_unlock(&info->mutex);
-
- if (!bdev) {
- xenbus_frontend_closed(xbdev);
- return;
- }
+ /* No more blkif_request(). */
+ blk_mq_stop_hw_queues(info->rq);
+ blk_set_queue_dying(info->rq);
+ set_capacity(info->gd, 0);
- mutex_lock(&bdev->bd_disk->open_mutex);
+ for_each_rinfo(info, rinfo, i) {
+ /* No more gnttab callback work. */
+ gnttab_cancel_free_callback(&rinfo->callback);
- if (bdev->bd_openers) {
- xenbus_dev_error(xbdev, -EBUSY,
- "Device in use; refusing to close");
- xenbus_switch_state(xbdev, XenbusStateClosing);
- } else {
- xlvbd_release_gendisk(info);
- xenbus_frontend_closed(xbdev);
+ /* Flush gnttab callback work. Must be done with no locks held. */
+ flush_work(&rinfo->work);
}
- mutex_unlock(&bdev->bd_disk->open_mutex);
- bdput(bdev);
+ xenbus_frontend_closed(xbdev);
}
static void blkfront_setup_discard(struct blkfront_info *info)
@@ -2472,8 +2404,7 @@ static void blkback_changed(struct xenbus_device *dev,
break;
fallthrough;
case XenbusStateClosing:
- if (info)
- blkfront_closing(info);
+ blkfront_closing(info);
break;
}
}
@@ -2481,56 +2412,21 @@ static void blkback_changed(struct xenbus_device *dev,
static int blkfront_remove(struct xenbus_device *xbdev)
{
struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
- struct block_device *bdev = NULL;
- struct gendisk *disk;
dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
- if (!info)
- return 0;
-
- blkif_free(info, 0);
-
- mutex_lock(&info->mutex);
-
- disk = info->gd;
- if (disk)
- bdev = bdgrab(disk->part0);
-
- info->xbdev = NULL;
- mutex_unlock(&info->mutex);
-
- if (!bdev) {
- mutex_lock(&blkfront_mutex);
- free_info(info);
- mutex_unlock(&blkfront_mutex);
- return 0;
- }
-
- /*
- * The xbdev was removed before we reached the Closed
- * state. See if it's safe to remove the disk. If the bdev
- * isn't closed yet, we let release take care of it.
- */
-
- mutex_lock(&disk->open_mutex);
- info = disk->private_data;
-
- dev_warn(disk_to_dev(disk),
- "%s was hot-unplugged, %d stale handles\n",
- xbdev->nodename, bdev->bd_openers);
+ del_gendisk(info->gd);
- if (info && !bdev->bd_openers) {
- xlvbd_release_gendisk(info);
- disk->private_data = NULL;
- mutex_lock(&blkfront_mutex);
- free_info(info);
- mutex_unlock(&blkfront_mutex);
- }
+ mutex_lock(&blkfront_mutex);
+ list_del(&info->info_list);
+ mutex_unlock(&blkfront_mutex);
- mutex_unlock(&disk->open_mutex);
- bdput(bdev);
+ blkif_free(info, 0);
+ xlbd_release_minors(info->gd->first_minor, info->gd->minors);
+ blk_cleanup_disk(info->gd);
+ blk_mq_free_tag_set(&info->tag_set);
+ kfree(info);
return 0;
}
@@ -2541,77 +2437,9 @@ static int blkfront_is_ready(struct xenbus_device *dev)
return info->is_ready && info->xbdev;
}
-static int blkif_open(struct block_device *bdev, fmode_t mode)
-{
- struct gendisk *disk = bdev->bd_disk;
- struct blkfront_info *info;
- int err = 0;
-
- mutex_lock(&blkfront_mutex);
-
- info = disk->private_data;
- if (!info) {
- /* xbdev gone */
- err = -ERESTARTSYS;
- goto out;
- }
-
- mutex_lock(&info->mutex);
-
- if (!info->gd)
- /* xbdev is closed */
- err = -ERESTARTSYS;
-
- mutex_unlock(&info->mutex);
-
-out:
- mutex_unlock(&blkfront_mutex);
- return err;
-}
-
-static void blkif_release(struct gendisk *disk, fmode_t mode)
-{
- struct blkfront_info *info = disk->private_data;
- struct xenbus_device *xbdev;
-
- mutex_lock(&blkfront_mutex);
- if (disk->part0->bd_openers)
- goto out_mutex;
-
- /*
- * Check if we have been instructed to close. We will have
- * deferred this request, because the bdev was still open.
- */
-
- mutex_lock(&info->mutex);
- xbdev = info->xbdev;
-
- if (xbdev && xbdev->state == XenbusStateClosing) {
- /* pending switch to state closed */
- dev_info(disk_to_dev(disk), "releasing disk\n");
- xlvbd_release_gendisk(info);
- xenbus_frontend_closed(info->xbdev);
- }
-
- mutex_unlock(&info->mutex);
-
- if (!xbdev) {
- /* sudden device removal */
- dev_info(disk_to_dev(disk), "releasing disk\n");
- xlvbd_release_gendisk(info);
- disk->private_data = NULL;
- free_info(info);
- }
-
-out_mutex:
- mutex_unlock(&blkfront_mutex);
-}
-
static const struct block_device_operations xlvbd_block_fops =
{
.owner = THIS_MODULE,
- .open = blkif_open,
- .release = blkif_release,
.getgeo = blkif_getgeo,
.ioctl = blkif_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 74c411911b6e..80c3b43b4828 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -113,8 +113,8 @@ struct zram {
* zram is claimed so open request will be failed
*/
bool claim; /* Protected by disk->open_mutex */
- struct file *backing_dev;
#ifdef CONFIG_ZRAM_WRITEBACK
+ struct file *backing_dev;
spinlock_t wb_limit_lock;
bool wb_limit_enable;
u64 bd_wb_limit;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 71a4ca505e09..5ed2cfa7da1d 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -593,7 +593,7 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
* Return Value: None
*/
static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
- char *flags, int count)
+ const char *flags, int count)
{
struct hci_uart *hu = tty->disc_data;
@@ -821,6 +821,7 @@ static __poll_t hci_uart_tty_poll(struct tty_struct *tty,
static struct tty_ldisc_ops hci_uart_ldisc = {
.owner = THIS_MODULE,
+ .num = N_HCI,
.name = "n_hci",
.open = hci_uart_tty_open,
.close = hci_uart_tty_close,
@@ -840,7 +841,7 @@ static int __init hci_uart_init(void)
BT_INFO("HCI UART driver ver %s", VERSION);
/* Register the tty discipline */
- err = tty_register_ldisc(N_HCI, &hci_uart_ldisc);
+ err = tty_register_ldisc(&hci_uart_ldisc);
if (err) {
BT_ERR("HCI line discipline registration failed. (%d)", err);
return err;
@@ -882,8 +883,6 @@ static int __init hci_uart_init(void)
static void __exit hci_uart_exit(void)
{
- int err;
-
#ifdef CONFIG_BT_HCIUART_H4
h4_deinit();
#endif
@@ -915,10 +914,7 @@ static void __exit hci_uart_exit(void)
mrvl_deinit();
#endif
- /* Release tty registration of line discipline */
- err = tty_unregister_ldisc(N_HCI);
- if (err)
- BT_ERR("Can't unregister HCI line discipline (%d)", err);
+ tty_unregister_ldisc(&hci_uart_ldisc);
}
module_init(hci_uart_init);
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 7355fa2cb439..6551286a60cc 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
+#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/sysfs.h>
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index e3e2ae41c22b..315e830b6ecd 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -350,7 +350,8 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
* dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
*
* @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
- *
+ * @alloc_interrupts: if true the function allocates the interrupt pool,
+ * otherwise the interrupt allocation is delayed
* Scans the physical DPRC and synchronizes the state of the Linux
* bus driver with the actual state of the MC by adding and removing
* devices as appropriate.
@@ -373,10 +374,11 @@ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev,
return error;
}
EXPORT_SYMBOL_GPL(dprc_scan_container);
+
/**
* dprc_irq0_handler - Regular ISR for DPRC interrupt 0
*
- * @irq: IRQ number of the interrupt being handled
+ * @irq_num: IRQ number of the interrupt being handled
* @arg: Pointer to device structure
*/
static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
@@ -387,7 +389,7 @@ static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
/**
* dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
*
- * @irq: IRQ number of the interrupt being handled
+ * @irq_num: IRQ number of the interrupt being handled
* @arg: Pointer to device structure
*/
static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index 27b0a01bad9b..d129338b8bc0 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -334,7 +334,7 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
- * @attributes Returned container attributes
+ * @attr: Returned container attributes
*
* Return: '0' on Success; Error code otherwise.
*/
@@ -504,7 +504,7 @@ EXPORT_SYMBOL_GPL(dprc_set_obj_irq);
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
- * @obj_type; Object type as returned in dprc_get_obj()
+ * @obj_type: Object type as returned in dprc_get_obj()
* @obj_id: Unique object instance as returned in dprc_get_obj()
* @region_index: The specific region to query
* @region_desc: Returns the requested region descriptor
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index 2d7c764bb7dc..6c513556911e 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
* @mc_dev: fsl-mc device which is used in conjunction with the
* allocated object
* @pool_type: pool type
- * @new_mc_dev: pointer to area where the pointer to the allocated device
+ * @new_mc_adev: pointer to area where the pointer to the allocated device
* is to be returned
*
* Allocatable objects are always used in conjunction with some functional
@@ -409,7 +409,7 @@ cleanup_msi_irqs:
}
EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
-/**
+/*
* Teardown the interrupt pool associated with an fsl-mc bus.
* It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
*/
@@ -436,7 +436,7 @@ void fsl_mc_cleanup_irq_pool(struct fsl_mc_device *mc_bus_dev)
}
EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
-/**
+/*
* Allocate the IRQs required by a given fsl-mc device.
*/
int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
@@ -578,7 +578,7 @@ void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
}
-/**
+/*
* fsl_mc_allocator_probe - callback invoked when an allocatable device is
* being added to the system
*/
@@ -610,7 +610,7 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
return 0;
}
-/**
+/*
* fsl_mc_allocator_remove - callback invoked when an allocatable device is
* being removed from the system
*/
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 380ad1fdb745..09c8ab5e0959 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -24,7 +24,7 @@
#include "fsl-mc-private.h"
-/**
+/*
* Default DMA mask for devices on a fsl-mc bus
*/
#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
@@ -36,6 +36,7 @@ static struct fsl_mc_version mc_version;
* @root_mc_bus_dev: fsl-mc device representing the root DPRC
* @num_translation_ranges: number of entries in addr_translation_ranges
* @translation_ranges: array of bus to system address translation ranges
+ * @fsl_mc_regs: base address of register bank
*/
struct fsl_mc {
struct fsl_mc_device *root_mc_bus_dev;
@@ -117,7 +118,7 @@ out:
return found;
}
-/**
+/*
* fsl_mc_bus_uevent - callback invoked when a device is added
*/
static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -467,7 +468,7 @@ static void fsl_mc_driver_shutdown(struct device *dev)
mc_drv->shutdown(mc_dev);
}
-/**
+/*
* __fsl_mc_driver_register - registers a child device driver with the
* MC bus
*
@@ -503,7 +504,7 @@ int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
}
EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
-/**
+/*
* fsl_mc_driver_unregister - unregisters a device driver from the
* MC bus
*/
@@ -563,7 +564,7 @@ struct fsl_mc_version *fsl_mc_get_version(void)
}
EXPORT_SYMBOL_GPL(fsl_mc_get_version);
-/**
+/*
* fsl_mc_get_root_dprc - function to traverse to the root dprc
*/
void fsl_mc_get_root_dprc(struct device *dev,
@@ -732,7 +733,7 @@ error_cleanup_regions:
return error;
}
-/**
+/*
* fsl_mc_is_root_dprc - function to check if a given device is a root dprc
*/
bool fsl_mc_is_root_dprc(struct device *dev)
@@ -757,7 +758,7 @@ static void fsl_mc_device_release(struct device *dev)
kfree(mc_dev);
}
-/**
+/*
* Add a newly discovered fsl-mc device to be visible in Linux
*/
int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
@@ -1058,7 +1059,7 @@ static int get_mc_addr_translation_ranges(struct device *dev,
return 0;
}
-/**
+/*
* fsl_mc_bus_probe - callback invoked when the root MC bus is being
* added
*/
@@ -1182,7 +1183,7 @@ error_cleanup_mc_io:
return error;
}
-/**
+/*
* fsl_mc_bus_remove - callback invoked when the root MC bus is being
* removed
*/
diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c
index 8edadf05cbb7..cf974870ba55 100644
--- a/drivers/bus/fsl-mc/fsl-mc-msi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
@@ -148,7 +148,7 @@ static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
/**
* fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
- * @np: Optional device-tree node of the interrupt controller
+ * @fwnode: Optional firmware node of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 305015486b91..95b10a6cf307 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -50,12 +50,12 @@ static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
}
/**
- * Creates an MC I/O object
+ * fsl_create_mc_io() - Creates an MC I/O object
*
* @dev: device to be associated with the MC I/O object
* @mc_portal_phys_addr: physical address of the MC portal to use
* @mc_portal_size: size in bytes of the MC portal
- * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O
+ * @dpmcp_dev: Pointer to the DPMCP object associated with this MC I/O
* object or NULL if none.
* @flags: flags for the new MC I/O object
* @new_mc_io: Area to return pointer to newly created MC I/O object
@@ -123,7 +123,7 @@ error_destroy_mc_io:
}
/**
- * Destroys an MC I/O object
+ * fsl_destroy_mc_io() - Destroys an MC I/O object
*
* @mc_io: MC I/O object to destroy
*/
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index b291b35e3884..f2052cd0a051 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -16,7 +16,7 @@
#include "fsl-mc-private.h"
-/**
+/*
* Timeout in milliseconds to wait for the completion of an MC command
*/
#define MC_CMD_COMPLETION_TIMEOUT_MS 500
@@ -148,9 +148,10 @@ static inline enum mc_cmd_status mc_read_response(struct fsl_mc_command __iomem
}
/**
- * Waits for the completion of an MC command doing preemptible polling.
- * uslepp_range() is called between polling iterations.
- *
+ * mc_polling_wait_preemptible() - Waits for the completion of an MC
+ * command doing preemptible polling.
+ * uslepp_range() is called between
+ * polling iterations.
* @mc_io: MC I/O object to be used
* @cmd: command buffer to receive MC response
* @mc_status: MC command completion status
@@ -194,9 +195,9 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
}
/**
- * Waits for the completion of an MC command doing atomic polling.
- * udelay() is called between polling iterations.
- *
+ * mc_polling_wait_atomic() - Waits for the completion of an MC command
+ * doing atomic polling. udelay() is called
+ * between polling iterations.
* @mc_io: MC I/O object to be used
* @cmd: command buffer to receive MC response
* @mc_status: MC command completion status
@@ -234,8 +235,8 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
}
/**
- * Sends a command to the MC device using the given MC I/O object
- *
+ * mc_send_command() - Sends a command to the MC device using the given
+ * MC I/O object
* @mc_io: MC I/O object to be used
* @cmd: command to be sent
*
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index 5b9ea66b92dc..bc239a11aa69 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -682,7 +682,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan);
+ struct mhi_chan *mhi_chan, unsigned int flags);
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 22acde118bc3..84448233f64c 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -773,11 +773,18 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
- write_lock_bh(&mhi_chan->lock);
- mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
- complete(&mhi_chan->completion);
- write_unlock_bh(&mhi_chan->lock);
+
+ if (chan < mhi_cntrl->max_chan &&
+ mhi_cntrl->mhi_chan[chan].configured) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ write_lock_bh(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
+ complete(&mhi_chan->completion);
+ write_unlock_bh(&mhi_chan->lock);
+ } else {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Completion packet for invalid channel ID: %d\n", chan);
+ }
mhi_del_ring_element(mhi_cntrl, mhi_ring);
}
@@ -1423,7 +1430,7 @@ exit_unprepare_channel:
}
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan)
+ struct mhi_chan *mhi_chan, unsigned int flags)
{
int ret = 0;
struct device *dev = &mhi_chan->mhi_dev->dev;
@@ -1448,6 +1455,9 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
if (ret)
goto error_pm_state;
+ if (mhi_chan->dir == DMA_FROM_DEVICE)
+ mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
+
/* Pre-allocate buffer for xfer ring */
if (mhi_chan->pre_alloc) {
int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
@@ -1603,7 +1613,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
}
/* Move channel to start state */
-int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
{
int ret, dir;
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
@@ -1614,7 +1624,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
if (!mhi_chan)
continue;
- ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
+ ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
if (ret)
goto error_open_chan;
}
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index e2e59a341fef..bbf6cd04861e 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -465,23 +465,15 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
/* Trigger MHI RESET so that the device will not access host memory */
if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
- u32 in_reset = -1;
- unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
-
dev_dbg(dev, "Triggering MHI Reset in device\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
/* Wait for the reset bit to be cleared by the device */
- ret = wait_event_timeout(mhi_cntrl->state_event,
- mhi_read_reg_field(mhi_cntrl,
- mhi_cntrl->regs,
- MHICTRL,
- MHICTRL_RESET_MASK,
- MHICTRL_RESET_SHIFT,
- &in_reset) ||
- !in_reset, timeout);
- if (!ret || in_reset)
- dev_err(dev, "Device failed to exit MHI Reset state\n");
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
+ 25000);
+ if (ret)
+ dev_err(dev, "Device failed to clear MHI Reset\n");
/*
* Device will clear BHI_INTVEC as a part of RESET processing,
@@ -934,6 +926,7 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M2 ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index b3357a8a2fdb..4dd1077354af 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -32,6 +32,8 @@
* @edl: emergency download mode firmware path (if any)
* @bar_num: PCI base address register to use for MHI MMIO register space
* @dma_data_width: DMA transfer word size (32 or 64 bits)
+ * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
+ * of inband wake support (such as sdx24)
*/
struct mhi_pci_dev_info {
const struct mhi_controller_config *config;
@@ -40,6 +42,7 @@ struct mhi_pci_dev_info {
const char *edl;
unsigned int bar_num;
unsigned int dma_data_width;
+ bool sideband_wake;
};
#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
@@ -72,6 +75,22 @@ struct mhi_pci_dev_info {
.doorbell_mode_switch = false, \
}
+#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ .auto_queue = true, \
+ }
+
#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
{ \
.num_elements = el_count, \
@@ -210,7 +229,7 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
- MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
+ MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
@@ -242,7 +261,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
.edl = "qcom/sdx65m/edl.mbn",
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
- .dma_data_width = 32
+ .dma_data_width = 32,
+ .sideband_wake = false,
};
static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
@@ -251,7 +271,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
.edl = "qcom/sdx55m/edl.mbn",
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
- .dma_data_width = 32
+ .dma_data_width = 32,
+ .sideband_wake = false,
};
static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
@@ -259,7 +280,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
.edl = "qcom/prog_firehose_sdx24.mbn",
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
- .dma_data_width = 32
+ .dma_data_width = 32,
+ .sideband_wake = true,
};
static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
@@ -301,7 +323,8 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
.edl = "qcom/prog_firehose_sdx24.mbn",
.config = &modem_quectel_em1xx_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
- .dma_data_width = 32
+ .dma_data_width = 32,
+ .sideband_wake = true,
};
static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
@@ -339,7 +362,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.edl = "qcom/sdx55m/edl.mbn",
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
- .dma_data_width = 32
+ .dma_data_width = 32,
+ .sideband_wake = false,
};
static const struct pci_device_id mhi_pci_id_table[] = {
@@ -640,9 +664,12 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->status_cb = mhi_pci_status_cb;
mhi_cntrl->runtime_get = mhi_pci_runtime_get;
mhi_cntrl->runtime_put = mhi_pci_runtime_put;
- mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
- mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
- mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
+
+ if (info->sideband_wake) {
+ mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
+ mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
+ mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
+ }
err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
if (err)
@@ -665,7 +692,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
if (err)
- return err;
+ goto err_disable_reporting;
/* MHI bus does not power up the controller by default */
err = mhi_prepare_for_power_up(mhi_cntrl);
@@ -699,6 +726,8 @@ err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_unregister:
mhi_unregister_controller(mhi_cntrl);
+err_disable_reporting:
+ pci_disable_pcie_error_reporting(pdev);
return err;
}
@@ -721,6 +750,7 @@ static void mhi_pci_remove(struct pci_dev *pdev)
pm_runtime_get_noresume(&pdev->dev);
mhi_unregister_controller(mhi_cntrl);
+ pci_disable_pcie_error_reporting(pdev);
}
static void mhi_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
index 0b8f53a688b8..663c82749222 100644
--- a/drivers/bus/qcom-ebi2.c
+++ b/drivers/bus/qcom-ebi2.c
@@ -102,8 +102,8 @@
/**
* struct cs_data - struct with info on a chipselect setting
* @enable_mask: mask to enable the chipselect in the EBI2 config
- * @slow_cfg0: offset to XMEMC slow CS config
- * @fast_cfg1: offset to XMEMC fast CS config
+ * @slow_cfg: offset to XMEMC slow CS config
+ * @fast_cfg: offset to XMEMC fast CS config
*/
struct cs_data {
u32 enable_mask;
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 38cb116ed433..148a4dd8cb9a 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -100,6 +100,7 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
* @cookie: data used by legacy platform callbacks
* @name: name if available
* @revision: interconnect target module revision
+ * @reserved: target module is reserved and already in use
* @enabled: sysc runtime enabled status
* @needs_resume: runtime resume needed on resume from suspend
* @child_needs_resume: runtime resume needed for child on resume from suspend
@@ -130,6 +131,7 @@ struct sysc {
struct ti_sysc_cookie cookie;
const char *name;
u32 revision;
+ unsigned int reserved:1;
unsigned int enabled:1;
unsigned int needs_resume:1;
unsigned int child_needs_resume:1;
@@ -2951,6 +2953,8 @@ static int sysc_init_soc(struct sysc *ddata)
case SOC_3430 ... SOC_3630:
sysc_add_disabled(0x48304000); /* timer12 */
break;
+ case SOC_AM3:
+ sysc_add_disabled(0x48310000); /* rng */
default:
break;
}
@@ -3093,7 +3097,9 @@ static int sysc_probe(struct platform_device *pdev)
return error;
error = sysc_check_active_timer(ddata);
- if (error)
+ if (error == -ENXIO)
+ ddata->reserved = true;
+ else if (error)
return error;
error = sysc_get_clocks(ddata);
@@ -3130,11 +3136,15 @@ static int sysc_probe(struct platform_device *pdev)
sysc_show_registers(ddata);
ddata->dev->type = &sysc_device_type;
- error = of_platform_populate(ddata->dev->of_node, sysc_match_table,
- pdata ? pdata->auxdata : NULL,
- ddata->dev);
- if (error)
- goto err;
+
+ if (!ddata->reserved) {
+ error = of_platform_populate(ddata->dev->of_node,
+ sysc_match_table,
+ pdata ? pdata->auxdata : NULL,
+ ddata->dev);
+ if (error)
+ goto err;
+ }
INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 90ad34c6ef8e..feb827eefd1a 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2186,7 +2186,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
len = nr * CD_FRAMESIZE_RAW;
- rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
+ rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
break;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index b151e0fcdeb5..ea3ead00f30f 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -218,19 +218,6 @@ config XILINX_HWICAP
If unsure, say N.
-config R3964
- tristate "Siemens R3964 line discipline"
- depends on TTY && BROKEN
- help
- This driver allows synchronous communication with devices using the
- Siemens R3964 packet protocol. Unless you are dealing with special
- hardware like PLCs, you are unlikely to need this.
-
- To compile this driver as a module, choose M here: the
- module will be called n_r3964.
-
- If unsure, say N.
-
config APPLICOM
tristate "Applicom intelligent fieldbus card support"
depends on PCI
@@ -357,27 +344,6 @@ config NVRAM
To compile this driver as a module, choose M here: the
module will be called nvram.
-config RAW_DRIVER
- tristate "RAW driver (/dev/raw/rawN)"
- depends on BLOCK
- help
- The raw driver permits block devices to be bound to /dev/raw/rawN.
- Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
- See the raw(8) manpage for more details.
-
- Applications should preferably open the device (eg /dev/hda1)
- with the O_DIRECT flag.
-
-config MAX_RAW_DEVS
- int "Maximum number of RAW devices to support (1-65536)"
- depends on RAW_DRIVER
- range 1 65536
- default "256"
- help
- The maximum number of RAW devices that are supported.
- Default is 256. Increase this number in case you need lots of
- raw devices.
-
config DEVPORT
bool "/dev/port character device"
depends on ISA || PCI
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index ffce287ef415..264eb398fdd4 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
-obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
obj-$(CONFIG_IBM_BSR) += bsr.o
@@ -44,6 +43,6 @@ obj-$(CONFIG_TCG_TPM) += tpm/
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
-obj-$(CONFIG_XILLYBUS) += xillybus/
+obj-$(CONFIG_XILLYBUS_CLASS) += xillybus/
obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
obj-$(CONFIG_ADI) += adi.o
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 8b55085650ad..4e5431f01450 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -156,12 +156,12 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
* This has the effect of treating non-periodic like periodic.
*/
if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
- unsigned long m, t, mc, base, k;
+ unsigned long t, mc, base, k;
struct hpet __iomem *hpet = devp->hd_hpet;
struct hpets *hpetp = devp->hd_hpets;
t = devp->hd_ireqfreq;
- m = read_counter(&devp->hd_timer->hpet_compare);
+ read_counter(&devp->hd_timer->hpet_compare);
mc = read_counter(&hpet->hpet_mc);
/* The time for the next interrupt would logically be t + m,
* however, if we are very unlucky and the interrupt is delayed
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c11f12d4ab53..3f166c8a4099 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -152,7 +152,7 @@ config HW_RANDOM_VIA
config HW_RANDOM_IXP4XX
tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support"
- depends on ARCH_IXP4XX
+ depends on ARCH_IXP4XX || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Pseudo-Random
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
index beec1627db3c..188854dd16a9 100644
--- a/drivers/char/hw_random/ixp4xx-rng.c
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/char/hw_random/ixp4xx-rng.c
*
@@ -8,23 +9,20 @@
* Copyright 2005 (c) MontaVista Software, Inc.
*
* Fixes by Michael Buesch
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/hw_random.h>
+#include <linux/of.h>
+#include <linux/soc/ixp4xx/cpu.h>
#include <asm/io.h>
-#include <mach/hardware.h>
-
static int ixp4xx_rng_data_read(struct hwrng *rng, u32 *buffer)
{
@@ -40,35 +38,40 @@ static struct hwrng ixp4xx_rng_ops = {
.data_read = ixp4xx_rng_data_read,
};
-static int __init ixp4xx_rng_init(void)
+static int ixp4xx_rng_probe(struct platform_device *pdev)
{
void __iomem * rng_base;
- int err;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
if (!cpu_is_ixp46x()) /* includes IXP455 */
return -ENOSYS;
- rng_base = ioremap(0x70002100, 4);
- if (!rng_base)
- return -ENOMEM;
- ixp4xx_rng_ops.priv = (unsigned long)rng_base;
- err = hwrng_register(&ixp4xx_rng_ops);
- if (err)
- iounmap(rng_base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rng_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(rng_base))
+ return PTR_ERR(rng_base);
- return err;
+ ixp4xx_rng_ops.priv = (unsigned long)rng_base;
+ return devm_hwrng_register(dev, &ixp4xx_rng_ops);
}
-static void __exit ixp4xx_rng_exit(void)
-{
- void __iomem * rng_base = (void __iomem *)ixp4xx_rng_ops.priv;
-
- hwrng_unregister(&ixp4xx_rng_ops);
- iounmap(rng_base);
-}
+static const struct of_device_id ixp4xx_rng_of_match[] = {
+ {
+ .compatible = "intel,ixp46x-rng",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ixp4xx_rng_of_match);
-module_init(ixp4xx_rng_init);
-module_exit(ixp4xx_rng_exit);
+static struct platform_driver ixp4xx_rng_driver = {
+ .driver = {
+ .name = "ixp4xx-hwrandom",
+ .of_match_table = ixp4xx_rng_of_match,
+ },
+ .probe = ixp4xx_rng_probe,
+};
+module_platform_driver(ixp4xx_rng_driver);
MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x");
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index f4949b689bd5..62bdd5af1339 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -29,7 +29,7 @@ static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait
return 8;
}
-/**
+/*
* pseries_rng_get_desired_dma - Return desired DMA allocate for CMO operations
*
* This is a required function for a driver to operate in a CMO environment
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 8a0e97b33cae..e96cb5c4f97a 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/panic_notifier.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 15dc54fa1d47..1c596b5cdb27 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -16,7 +16,6 @@
#include <linux/mman.h>
#include <linux/random.h>
#include <linux/init.h>
-#include <linux/raw.h>
#include <linux/tty.h>
#include <linux/capability.h>
#include <linux/ptrace.h>
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 89681f07bc78..8f1bce0b4fe5 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -544,6 +544,10 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
io_read_num_rec_bytes(iobase, &num_bytes_read);
if (num_bytes_read >= 4) {
DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read);
+ if (num_bytes_read > 4) {
+ rc = -EIO;
+ goto exit_setprotocol;
+ }
break;
}
usleep_range(10000, 11000);
@@ -1050,7 +1054,6 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
struct cm4000_dev *dev = filp->private_data;
unsigned int iobase = dev->p_dev->resource[0]->start;
unsigned short s;
- unsigned char tmp;
unsigned char infolen;
unsigned char sendT0;
unsigned short nsend;
@@ -1148,7 +1151,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
set_cardparameter(dev);
/* dummy read, reset flag procedure received */
- tmp = inb(REG_FLAGS1(iobase));
+ inb(REG_FLAGS1(iobase));
dev->flags1 = 0x20 /* T_Active */
| (sendT0)
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index d5e43606339c..827711911da4 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -221,7 +221,6 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
unsigned long i;
size_t min_bytes_to_read;
int rc;
- unsigned char uc;
DEBUGP(2, dev, "-> cm4040_read(%s,%d)\n", current->comm, current->pid);
@@ -308,7 +307,7 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
return -EIO;
}
- uc = xinb(iobase + REG_OFFSET_BULK_IN);
+ xinb(iobase + REG_OFFSET_BULK_IN);
DEBUGP(2, dev, "<- cm4040_read (successfully)\n");
return min_bytes_to_read;
diff --git a/drivers/char/pcmcia/scr24x_cs.c b/drivers/char/pcmcia/scr24x_cs.c
index 47feb39af34c..1bdce08fae3d 100644
--- a/drivers/char/pcmcia/scr24x_cs.c
+++ b/drivers/char/pcmcia/scr24x_cs.c
@@ -265,7 +265,6 @@ static int scr24x_probe(struct pcmcia_device *link)
cdev_init(&dev->c_dev, &scr24x_fops);
dev->c_dev.owner = THIS_MODULE;
- dev->c_dev.ops = &scr24x_fops;
ret = cdev_add(&dev->c_dev, MKDEV(MAJOR(scr24x_devt), dev->devno), 1);
if (ret < 0)
goto err;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 3287a7627ed0..6eaefea0520e 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -985,7 +985,7 @@ static void tx_done(MGSLPC_INFO *info, struct tty_struct *tty)
else
#endif
{
- if (tty && (tty->stopped || tty->hw_stopped)) {
+ if (tty && (tty->flow.stopped || tty->hw_stopped)) {
tx_stop(info);
return;
}
@@ -1005,7 +1005,7 @@ static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty)
if (!info->tx_active)
return;
} else {
- if (tty && (tty->stopped || tty->hw_stopped)) {
+ if (tty && (tty->flow.stopped || tty->hw_stopped)) {
tx_stop(info);
return;
}
@@ -1419,13 +1419,7 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
/* byte size and parity */
- switch (cflag & CSIZE) {
- case CS5: info->params.data_bits = 5; break;
- case CS6: info->params.data_bits = 6; break;
- case CS7: info->params.data_bits = 7; break;
- case CS8: info->params.data_bits = 8; break;
- default: info->params.data_bits = 7; break;
- }
+ info->params.data_bits = tty_get_char_size(cflag);
if (cflag & CSTOPB)
info->params.stop_bits = 2;
@@ -1525,7 +1519,7 @@ static void mgslpc_flush_chars(struct tty_struct *tty)
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars"))
return;
- if (info->tx_count <= 0 || tty->stopped ||
+ if (info->tx_count <= 0 || tty->flow.stopped ||
tty->hw_stopped || !info->tx_buf)
return;
@@ -1594,7 +1588,7 @@ static int mgslpc_write(struct tty_struct * tty,
ret += c;
}
start:
- if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
+ if (info->tx_count && !tty->flow.stopped && !tty->hw_stopped) {
spin_lock_irqsave(&info->lock, flags);
if (!info->tx_active)
tx_start(info, tty);
@@ -1609,7 +1603,7 @@ cleanup:
/* Return the count of free bytes in transmit buffer
*/
-static int mgslpc_write_room(struct tty_struct *tty)
+static unsigned int mgslpc_write_room(struct tty_struct *tty)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
int ret;
@@ -1637,10 +1631,10 @@ static int mgslpc_write_room(struct tty_struct *tty)
/* Return the count of bytes in transmit buffer
*/
-static int mgslpc_chars_in_buffer(struct tty_struct *tty)
+static unsigned int mgslpc_chars_in_buffer(struct tty_struct *tty)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- int rc;
+ unsigned int rc;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_chars_in_buffer(%s)\n",
@@ -1655,7 +1649,7 @@ static int mgslpc_chars_in_buffer(struct tty_struct *tty)
rc = info->tx_count;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n",
+ printk("%s(%d):mgslpc_chars_in_buffer(%s)=%u\n",
__FILE__, __LINE__, info->device_name, rc);
return rc;
diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c
index 027484ecfb0d..3c99696b145e 100644
--- a/drivers/char/powernv-op-panel.c
+++ b/drivers/char/powernv-op-panel.c
@@ -75,6 +75,7 @@ static int __op_panel_update_display(void)
rc);
break;
}
+ break;
case OPAL_SUCCESS:
break;
default:
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
deleted file mode 100644
index 5d52a1f4738c..000000000000
--- a/drivers/char/raw.c
+++ /dev/null
@@ -1,362 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/char/raw.c
- *
- * Front-end raw character devices. These can be bound to any block
- * devices to provide genuine Unix raw character device semantics.
- *
- * We reserve minor number 0 for a control interface. ioctl()s on this
- * device are used to bind the other minor numbers to block devices.
- */
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/major.h>
-#include <linux/blkdev.h>
-#include <linux/backing-dev.h>
-#include <linux/module.h>
-#include <linux/raw.h>
-#include <linux/capability.h>
-#include <linux/uio.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/gfp.h>
-#include <linux/compat.h>
-#include <linux/vmalloc.h>
-
-#include <linux/uaccess.h>
-
-struct raw_device_data {
- dev_t binding;
- struct block_device *bdev;
- int inuse;
-};
-
-static struct class *raw_class;
-static struct raw_device_data *raw_devices;
-static DEFINE_MUTEX(raw_mutex);
-static const struct file_operations raw_ctl_fops; /* forward declaration */
-
-static int max_raw_minors = CONFIG_MAX_RAW_DEVS;
-
-module_param(max_raw_minors, int, 0);
-MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)");
-
-/*
- * Open/close code for raw IO.
- *
- * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
- * point at the blockdev's address_space and set the file handle to use
- * O_DIRECT.
- *
- * Set the device's soft blocksize to the minimum possible. This gives the
- * finest possible alignment and has no adverse impact on performance.
- */
-static int raw_open(struct inode *inode, struct file *filp)
-{
- const int minor = iminor(inode);
- struct block_device *bdev;
- int err;
-
- if (minor == 0) { /* It is the control device */
- filp->f_op = &raw_ctl_fops;
- return 0;
- }
-
- pr_warn_ratelimited(
- "process %s (pid %d) is using the deprecated raw device\n"
- "support will be removed in Linux 5.14.\n",
- current->comm, current->pid);
-
- mutex_lock(&raw_mutex);
-
- /*
- * All we need to do on open is check that the device is bound.
- */
- err = -ENODEV;
- if (!raw_devices[minor].binding)
- goto out;
- bdev = blkdev_get_by_dev(raw_devices[minor].binding,
- filp->f_mode | FMODE_EXCL, raw_open);
- if (IS_ERR(bdev)) {
- err = PTR_ERR(bdev);
- goto out;
- }
- err = set_blocksize(bdev, bdev_logical_block_size(bdev));
- if (err)
- goto out1;
- filp->f_flags |= O_DIRECT;
- filp->f_mapping = bdev->bd_inode->i_mapping;
- if (++raw_devices[minor].inuse == 1)
- file_inode(filp)->i_mapping =
- bdev->bd_inode->i_mapping;
- filp->private_data = bdev;
- raw_devices[minor].bdev = bdev;
- mutex_unlock(&raw_mutex);
- return 0;
-
-out1:
- blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
-out:
- mutex_unlock(&raw_mutex);
- return err;
-}
-
-/*
- * When the final fd which refers to this character-special node is closed, we
- * make its ->mapping point back at its own i_data.
- */
-static int raw_release(struct inode *inode, struct file *filp)
-{
- const int minor= iminor(inode);
- struct block_device *bdev;
-
- mutex_lock(&raw_mutex);
- bdev = raw_devices[minor].bdev;
- if (--raw_devices[minor].inuse == 0)
- /* Here inode->i_mapping == bdev->bd_inode->i_mapping */
- inode->i_mapping = &inode->i_data;
- mutex_unlock(&raw_mutex);
-
- blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
- return 0;
-}
-
-/*
- * Forward ioctls to the underlying block device.
- */
-static long
-raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
-{
- struct block_device *bdev = filp->private_data;
- return blkdev_ioctl(bdev, 0, command, arg);
-}
-
-static int bind_set(int number, u64 major, u64 minor)
-{
- dev_t dev = MKDEV(major, minor);
- dev_t raw = MKDEV(RAW_MAJOR, number);
- struct raw_device_data *rawdev;
- int err = 0;
-
- if (number <= 0 || number >= max_raw_minors)
- return -EINVAL;
-
- if (MAJOR(dev) != major || MINOR(dev) != minor)
- return -EINVAL;
-
- rawdev = &raw_devices[number];
-
- /*
- * This is like making block devices, so demand the
- * same capability
- */
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- /*
- * For now, we don't need to check that the underlying
- * block device is present or not: we can do that when
- * the raw device is opened. Just check that the
- * major/minor numbers make sense.
- */
-
- if (MAJOR(dev) == 0 && dev != 0)
- return -EINVAL;
-
- mutex_lock(&raw_mutex);
- if (rawdev->inuse) {
- mutex_unlock(&raw_mutex);
- return -EBUSY;
- }
- if (rawdev->binding)
- module_put(THIS_MODULE);
-
- rawdev->binding = dev;
- if (!dev) {
- /* unbind */
- device_destroy(raw_class, raw);
- } else {
- __module_get(THIS_MODULE);
- device_destroy(raw_class, raw);
- device_create(raw_class, NULL, raw, NULL, "raw%d", number);
- }
- mutex_unlock(&raw_mutex);
- return err;
-}
-
-static int bind_get(int number, dev_t *dev)
-{
- if (number <= 0 || number >= max_raw_minors)
- return -EINVAL;
- *dev = raw_devices[number].binding;
- return 0;
-}
-
-/*
- * Deal with ioctls against the raw-device control interface, to bind
- * and unbind other raw devices.
- */
-static long raw_ctl_ioctl(struct file *filp, unsigned int command,
- unsigned long arg)
-{
- struct raw_config_request rq;
- dev_t dev;
- int err;
-
- switch (command) {
- case RAW_SETBIND:
- if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
- return -EFAULT;
-
- return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
-
- case RAW_GETBIND:
- if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
- return -EFAULT;
-
- err = bind_get(rq.raw_minor, &dev);
- if (err)
- return err;
-
- rq.block_major = MAJOR(dev);
- rq.block_minor = MINOR(dev);
-
- if (copy_to_user((void __user *)arg, &rq, sizeof(rq)))
- return -EFAULT;
-
- return 0;
- }
-
- return -EINVAL;
-}
-
-#ifdef CONFIG_COMPAT
-struct raw32_config_request {
- compat_int_t raw_minor;
- compat_u64 block_major;
- compat_u64 block_minor;
-};
-
-static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct raw32_config_request __user *user_req = compat_ptr(arg);
- struct raw32_config_request rq;
- dev_t dev;
- int err = 0;
-
- switch (cmd) {
- case RAW_SETBIND:
- if (copy_from_user(&rq, user_req, sizeof(rq)))
- return -EFAULT;
-
- return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
-
- case RAW_GETBIND:
- if (copy_from_user(&rq, user_req, sizeof(rq)))
- return -EFAULT;
-
- err = bind_get(rq.raw_minor, &dev);
- if (err)
- return err;
-
- rq.block_major = MAJOR(dev);
- rq.block_minor = MINOR(dev);
-
- if (copy_to_user(user_req, &rq, sizeof(rq)))
- return -EFAULT;
-
- return 0;
- }
-
- return -EINVAL;
-}
-#endif
-
-static const struct file_operations raw_fops = {
- .read_iter = blkdev_read_iter,
- .write_iter = blkdev_write_iter,
- .fsync = blkdev_fsync,
- .open = raw_open,
- .release = raw_release,
- .unlocked_ioctl = raw_ioctl,
- .llseek = default_llseek,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations raw_ctl_fops = {
- .unlocked_ioctl = raw_ctl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = raw_ctl_compat_ioctl,
-#endif
- .open = raw_open,
- .owner = THIS_MODULE,
- .llseek = noop_llseek,
-};
-
-static struct cdev raw_cdev;
-
-static char *raw_devnode(struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
-}
-
-static int __init raw_init(void)
-{
- dev_t dev = MKDEV(RAW_MAJOR, 0);
- int ret;
-
- if (max_raw_minors < 1 || max_raw_minors > 65536) {
- pr_warn("raw: invalid max_raw_minors (must be between 1 and 65536), using %d\n",
- CONFIG_MAX_RAW_DEVS);
- max_raw_minors = CONFIG_MAX_RAW_DEVS;
- }
-
- raw_devices = vzalloc(array_size(max_raw_minors,
- sizeof(struct raw_device_data)));
- if (!raw_devices) {
- printk(KERN_ERR "Not enough memory for raw device structures\n");
- ret = -ENOMEM;
- goto error;
- }
-
- ret = register_chrdev_region(dev, max_raw_minors, "raw");
- if (ret)
- goto error;
-
- cdev_init(&raw_cdev, &raw_fops);
- ret = cdev_add(&raw_cdev, dev, max_raw_minors);
- if (ret)
- goto error_region;
- raw_class = class_create(THIS_MODULE, "raw");
- if (IS_ERR(raw_class)) {
- printk(KERN_ERR "Error creating raw class.\n");
- cdev_del(&raw_cdev);
- ret = PTR_ERR(raw_class);
- goto error_region;
- }
- raw_class->devnode = raw_devnode;
- device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
-
- return 0;
-
-error_region:
- unregister_chrdev_region(dev, max_raw_minors);
-error:
- vfree(raw_devices);
- return ret;
-}
-
-static void __exit raw_exit(void)
-{
- device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
- class_destroy(raw_class);
- cdev_del(&raw_cdev);
- unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors);
-}
-
-module_init(raw_init);
-module_exit(raw_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 2ccdf8ac6994..6e3235565a4d 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -254,11 +254,11 @@ static int ftpm_tee_probe(struct device *dev)
pvt_data->session = sess_arg.session;
/* Allocate dynamic shared memory with fTPM TA */
- pvt_data->shm = tee_shm_alloc(pvt_data->ctx,
- MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE,
- TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ pvt_data->shm = tee_shm_alloc_kernel_buf(pvt_data->ctx,
+ MAX_COMMAND_SIZE +
+ MAX_RESPONSE_SIZE);
if (IS_ERR(pvt_data->shm)) {
- dev_err(dev, "%s: tee_shm_alloc failed\n", __func__);
+ dev_err(dev, "%s: tee_shm_alloc_kernel_buf failed\n", __func__);
rc = -ENOMEM;
goto out_shm_alloc;
}
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 93f5d11c830b..230b2c9b3e3c 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -52,12 +52,7 @@ static void tpk_flush(void)
static int tpk_printk(const unsigned char *buf, int count)
{
- int i = tpk_curr;
-
- if (buf == NULL) {
- tpk_flush();
- return i;
- }
+ int i;
for (i = 0; i < count; i++) {
if (tpk_curr >= TPK_STR_SIZE) {
@@ -100,12 +95,6 @@ static int tpk_open(struct tty_struct *tty, struct file *filp)
static void tpk_close(struct tty_struct *tty, struct file *filp)
{
struct ttyprintk_port *tpkp = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&tpkp->spinlock, flags);
- /* flush tpk_printk buffer */
- tpk_printk(NULL, 0);
- spin_unlock_irqrestore(&tpkp->spinlock, flags);
tty_port_close(&tpkp->port, tty, filp);
}
@@ -120,7 +109,6 @@ static int tpk_write(struct tty_struct *tty,
unsigned long flags;
int ret;
-
/* exclusive use of tpk_printk within this tty */
spin_lock_irqsave(&tpkp->spinlock, flags);
ret = tpk_printk(buf, count);
@@ -132,40 +120,33 @@ static int tpk_write(struct tty_struct *tty,
/*
* TTY operations write_room function.
*/
-static int tpk_write_room(struct tty_struct *tty)
+static unsigned int tpk_write_room(struct tty_struct *tty)
{
return TPK_MAX_ROOM;
}
/*
- * TTY operations ioctl function.
+ * TTY operations hangup function.
*/
-static int tpk_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
+static void tpk_hangup(struct tty_struct *tty)
{
struct ttyprintk_port *tpkp = tty->driver_data;
- if (!tpkp)
- return -EINVAL;
-
- switch (cmd) {
- /* Stop TIOCCONS */
- case TIOCCONS:
- return -EOPNOTSUPP;
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
+ tty_port_hangup(&tpkp->port);
}
/*
- * TTY operations hangup function.
+ * TTY port operations shutdown function.
*/
-static void tpk_hangup(struct tty_struct *tty)
+static void tpk_port_shutdown(struct tty_port *tport)
{
- struct ttyprintk_port *tpkp = tty->driver_data;
+ struct ttyprintk_port *tpkp =
+ container_of(tport, struct ttyprintk_port, port);
+ unsigned long flags;
- tty_port_hangup(&tpkp->port);
+ spin_lock_irqsave(&tpkp->spinlock, flags);
+ tpk_flush();
+ spin_unlock_irqrestore(&tpkp->spinlock, flags);
}
static const struct tty_operations ttyprintk_ops = {
@@ -173,11 +154,12 @@ static const struct tty_operations ttyprintk_ops = {
.close = tpk_close,
.write = tpk_write,
.write_room = tpk_write_room,
- .ioctl = tpk_ioctl,
.hangup = tpk_hangup,
};
-static const struct tty_port_operations null_ops = { };
+static const struct tty_port_operations tpk_port_ops = {
+ .shutdown = tpk_port_shutdown,
+};
static struct tty_driver *ttyprintk_driver;
@@ -195,7 +177,7 @@ static int __init ttyprintk_init(void)
return PTR_ERR(ttyprintk_driver);
tty_port_init(&tpk_port.port);
- tpk_port.port.ops = &null_ops;
+ tpk_port.port.ops = &tpk_port_ops;
ttyprintk_driver->driver_name = "ttyprintk";
ttyprintk_driver->name = "ttyprintk";
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 59dfd9c421a1..7eaf303a7a86 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -475,7 +475,7 @@ static struct port_buffer *get_inbuf(struct port *port)
buf = virtqueue_get_buf(port->in_vq, &len);
if (buf) {
- buf->len = len;
+ buf->len = min_t(size_t, len, buf->size);
buf->offset = 0;
port->stats.bytes_received += len;
}
@@ -1709,7 +1709,7 @@ static void control_work_handler(struct work_struct *work)
while ((buf = virtqueue_get_buf(vq, &len))) {
spin_unlock(&portdev->c_ivq_lock);
- buf->len = len;
+ buf->len = min_t(size_t, len, buf->size);
buf->offset = 0;
handle_control_message(vq->vdev, portdev, buf);
diff --git a/drivers/char/xillybus/Kconfig b/drivers/char/xillybus/Kconfig
index 130dbdce858f..a8036dad437e 100644
--- a/drivers/char/xillybus/Kconfig
+++ b/drivers/char/xillybus/Kconfig
@@ -3,10 +3,14 @@
# Xillybus devices
#
+config XILLYBUS_CLASS
+ tristate
+
config XILLYBUS
tristate "Xillybus generic FPGA interface"
depends on PCI || OF
select CRC32
+ select XILLYBUS_CLASS
help
Xillybus is a generic interface for peripherals designed on
programmable logic (FPGA). The driver probes the hardware for
@@ -21,7 +25,7 @@ config XILLYBUS_PCIE
depends on PCI_MSI
help
Set to M if you want Xillybus to use PCI Express for communicating
- with the FPGA.
+ with the FPGA. The module will be called xillybus_pcie.
config XILLYBUS_OF
tristate "Xillybus over Device Tree"
@@ -29,6 +33,20 @@ config XILLYBUS_OF
help
Set to M if you want Xillybus to find its resources from the
Open Firmware Flattened Device Tree. If the target is an embedded
- system, say M.
+ system, say M. The module will be called xillybus_of.
endif # if XILLYBUS
+
+# XILLYUSB doesn't depend on XILLYBUS
+
+config XILLYUSB
+ tristate "XillyUSB: Xillybus generic FPGA interface for USB"
+ depends on USB
+ select CRC32
+ select XILLYBUS_CLASS
+ help
+ XillyUSB is the Xillybus variant which uses USB for communicating
+ with the FPGA.
+
+ Set to M if you want Xillybus to use USB for communicating with
+ the FPGA. The module will be called xillyusb.
diff --git a/drivers/char/xillybus/Makefile b/drivers/char/xillybus/Makefile
index 099e9a3585fc..16f31d03209d 100644
--- a/drivers/char/xillybus/Makefile
+++ b/drivers/char/xillybus/Makefile
@@ -3,6 +3,8 @@
# Makefile for Xillybus driver
#
+obj-$(CONFIG_XILLYBUS_CLASS) += xillybus_class.o
obj-$(CONFIG_XILLYBUS) += xillybus_core.o
obj-$(CONFIG_XILLYBUS_PCIE) += xillybus_pcie.o
obj-$(CONFIG_XILLYBUS_OF) += xillybus_of.o
+obj-$(CONFIG_XILLYUSB) += xillyusb.o
diff --git a/drivers/char/xillybus/xillybus.h b/drivers/char/xillybus/xillybus.h
index 8e3ed4d1bb7f..c63ffc56637c 100644
--- a/drivers/char/xillybus/xillybus.h
+++ b/drivers/char/xillybus/xillybus.h
@@ -30,7 +30,8 @@ struct xilly_buffer {
struct xilly_idt_handle {
unsigned char *chandesc;
- unsigned char *idt;
+ unsigned char *names;
+ int names_len;
int entries;
};
@@ -94,7 +95,6 @@ struct xilly_endpoint {
struct device *dev;
struct xilly_endpoint_hardware *ephw;
- struct list_head ep_list;
int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */
__iomem void *registers;
int fatal_error;
@@ -102,12 +102,6 @@ struct xilly_endpoint {
struct mutex register_mutex;
wait_queue_head_t ep_wait;
- /* Channels and message handling */
- struct cdev cdev;
-
- int major;
- int lowest_minor; /* Highest minor = lowest_minor + num_channels - 1 */
-
int num_channels; /* EXCLUDING message buffer */
struct xilly_channel **channels;
int msg_counter;
diff --git a/drivers/char/xillybus/xillybus_class.c b/drivers/char/xillybus/xillybus_class.c
new file mode 100644
index 000000000000..5046486011c8
--- /dev/null
+++ b/drivers/char/xillybus/xillybus_class.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus class
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include "xillybus_class.h"
+
+MODULE_DESCRIPTION("Driver for Xillybus class");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_ALIAS("xillybus_class");
+MODULE_LICENSE("GPL v2");
+
+static DEFINE_MUTEX(unit_mutex);
+static LIST_HEAD(unit_list);
+static struct class *xillybus_class;
+
+#define UNITNAMELEN 16
+
+struct xilly_unit {
+ struct list_head list_entry;
+ void *private_data;
+
+ struct cdev *cdev;
+ char name[UNITNAMELEN];
+ int major;
+ int lowest_minor;
+ int num_nodes;
+};
+
+int xillybus_init_chrdev(struct device *dev,
+ const struct file_operations *fops,
+ struct module *owner,
+ void *private_data,
+ unsigned char *idt, unsigned int len,
+ int num_nodes,
+ const char *prefix, bool enumerate)
+{
+ int rc;
+ dev_t mdev;
+ int i;
+ char devname[48];
+
+ struct device *device;
+ size_t namelen;
+ struct xilly_unit *unit, *u;
+
+ unit = kzalloc(sizeof(*unit), GFP_KERNEL);
+
+ if (!unit)
+ return -ENOMEM;
+
+ mutex_lock(&unit_mutex);
+
+ if (!enumerate)
+ snprintf(unit->name, UNITNAMELEN, "%s", prefix);
+
+ for (i = 0; enumerate; i++) {
+ snprintf(unit->name, UNITNAMELEN, "%s_%02d",
+ prefix, i);
+
+ enumerate = false;
+ list_for_each_entry(u, &unit_list, list_entry)
+ if (!strcmp(unit->name, u->name)) {
+ enumerate = true;
+ break;
+ }
+ }
+
+ rc = alloc_chrdev_region(&mdev, 0, num_nodes, unit->name);
+
+ if (rc) {
+ dev_warn(dev, "Failed to obtain major/minors");
+ goto fail_obtain;
+ }
+
+ unit->major = MAJOR(mdev);
+ unit->lowest_minor = MINOR(mdev);
+ unit->num_nodes = num_nodes;
+ unit->private_data = private_data;
+
+ unit->cdev = cdev_alloc();
+ if (!unit->cdev) {
+ rc = -ENOMEM;
+ goto unregister_chrdev;
+ }
+ unit->cdev->ops = fops;
+ unit->cdev->owner = owner;
+
+ rc = cdev_add(unit->cdev, MKDEV(unit->major, unit->lowest_minor),
+ unit->num_nodes);
+ if (rc) {
+ dev_err(dev, "Failed to add cdev.\n");
+ /* kobject_put() is normally done by cdev_del() */
+ kobject_put(&unit->cdev->kobj);
+ goto unregister_chrdev;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ namelen = strnlen(idt, len);
+
+ if (namelen == len) {
+ dev_err(dev, "IDT's list of names is too short. This is exceptionally weird, because its CRC is OK\n");
+ rc = -ENODEV;
+ goto unroll_device_create;
+ }
+
+ snprintf(devname, sizeof(devname), "%s_%s",
+ unit->name, idt);
+
+ len -= namelen + 1;
+ idt += namelen + 1;
+
+ device = device_create(xillybus_class,
+ NULL,
+ MKDEV(unit->major,
+ i + unit->lowest_minor),
+ NULL,
+ "%s", devname);
+
+ if (IS_ERR(device)) {
+ dev_err(dev, "Failed to create %s device. Aborting.\n",
+ devname);
+ rc = -ENODEV;
+ goto unroll_device_create;
+ }
+ }
+
+ if (len) {
+ dev_err(dev, "IDT's list of names is too long. This is exceptionally weird, because its CRC is OK\n");
+ rc = -ENODEV;
+ goto unroll_device_create;
+ }
+
+ list_add_tail(&unit->list_entry, &unit_list);
+
+ dev_info(dev, "Created %d device files.\n", num_nodes);
+
+ mutex_unlock(&unit_mutex);
+
+ return 0;
+
+unroll_device_create:
+ for (i--; i >= 0; i--)
+ device_destroy(xillybus_class, MKDEV(unit->major,
+ i + unit->lowest_minor));
+
+ cdev_del(unit->cdev);
+
+unregister_chrdev:
+ unregister_chrdev_region(MKDEV(unit->major, unit->lowest_minor),
+ unit->num_nodes);
+
+fail_obtain:
+ mutex_unlock(&unit_mutex);
+
+ kfree(unit);
+
+ return rc;
+}
+EXPORT_SYMBOL(xillybus_init_chrdev);
+
+void xillybus_cleanup_chrdev(void *private_data,
+ struct device *dev)
+{
+ int minor;
+ struct xilly_unit *unit;
+ bool found = false;
+
+ mutex_lock(&unit_mutex);
+
+ list_for_each_entry(unit, &unit_list, list_entry)
+ if (unit->private_data == private_data) {
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ dev_err(dev, "Weird bug: Failed to find unit\n");
+ mutex_unlock(&unit_mutex);
+ return;
+ }
+
+ for (minor = unit->lowest_minor;
+ minor < (unit->lowest_minor + unit->num_nodes);
+ minor++)
+ device_destroy(xillybus_class, MKDEV(unit->major, minor));
+
+ cdev_del(unit->cdev);
+
+ unregister_chrdev_region(MKDEV(unit->major, unit->lowest_minor),
+ unit->num_nodes);
+
+ dev_info(dev, "Removed %d device files.\n",
+ unit->num_nodes);
+
+ list_del(&unit->list_entry);
+ kfree(unit);
+
+ mutex_unlock(&unit_mutex);
+}
+EXPORT_SYMBOL(xillybus_cleanup_chrdev);
+
+int xillybus_find_inode(struct inode *inode,
+ void **private_data, int *index)
+{
+ int minor = iminor(inode);
+ int major = imajor(inode);
+ struct xilly_unit *unit;
+ bool found = false;
+
+ mutex_lock(&unit_mutex);
+
+ list_for_each_entry(unit, &unit_list, list_entry)
+ if (unit->major == major &&
+ minor >= unit->lowest_minor &&
+ minor < (unit->lowest_minor + unit->num_nodes)) {
+ found = true;
+ break;
+ }
+
+ mutex_unlock(&unit_mutex);
+
+ if (!found)
+ return -ENODEV;
+
+ *private_data = unit->private_data;
+ *index = minor - unit->lowest_minor;
+
+ return 0;
+}
+EXPORT_SYMBOL(xillybus_find_inode);
+
+static int __init xillybus_class_init(void)
+{
+ xillybus_class = class_create(THIS_MODULE, "xillybus");
+
+ if (IS_ERR(xillybus_class)) {
+ pr_warn("Failed to register xillybus class\n");
+
+ return PTR_ERR(xillybus_class);
+ }
+ return 0;
+}
+
+static void __exit xillybus_class_exit(void)
+{
+ class_destroy(xillybus_class);
+}
+
+module_init(xillybus_class_init);
+module_exit(xillybus_class_exit);
diff --git a/drivers/char/xillybus/xillybus_class.h b/drivers/char/xillybus/xillybus_class.h
new file mode 100644
index 000000000000..5dbfdfc95c65
--- /dev/null
+++ b/drivers/char/xillybus/xillybus_class.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2021 Xillybus Ltd, http://www.xillybus.com
+ *
+ * Header file for the Xillybus class
+ */
+
+#ifndef __XILLYBUS_CLASS_H
+#define __XILLYBUS_CLASS_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+
+int xillybus_init_chrdev(struct device *dev,
+ const struct file_operations *fops,
+ struct module *owner,
+ void *private_data,
+ unsigned char *idt, unsigned int len,
+ int num_nodes,
+ const char *prefix, bool enumerate);
+
+void xillybus_cleanup_chrdev(void *private_data,
+ struct device *dev);
+
+int xillybus_find_inode(struct inode *inode,
+ void **private_data, int *index);
+
+#endif /* __XILLYBUS_CLASS_H */
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index 57fa68834981..931d0bf4cec6 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -21,7 +21,6 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/fs.h>
-#include <linux/cdev.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/crc32.h>
@@ -30,10 +29,10 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "xillybus.h"
+#include "xillybus_class.h"
MODULE_DESCRIPTION("Xillybus core functions");
MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
-MODULE_VERSION("1.07");
MODULE_ALIAS("xillybus_core");
MODULE_LICENSE("GPL v2");
@@ -58,16 +57,6 @@ MODULE_LICENSE("GPL v2");
static const char xillyname[] = "xillybus";
-static struct class *xillybus_class;
-
-/*
- * ep_list_lock is the last lock to be taken; No other lock requests are
- * allowed while holding it. It merely protects list_of_endpoints, and not
- * the endpoints listed in it.
- */
-
-static LIST_HEAD(list_of_endpoints);
-static struct mutex ep_list_lock;
static struct workqueue_struct *xillybus_wq;
/*
@@ -570,10 +559,8 @@ static int xilly_scan_idt(struct xilly_endpoint *endpoint,
unsigned char *scan;
int len;
- scan = idt;
- idt_handle->idt = idt;
-
- scan++; /* Skip version number */
+ scan = idt + 1;
+ idt_handle->names = scan;
while ((scan <= end_of_idt) && *scan) {
while ((scan <= end_of_idt) && *scan++)
@@ -581,6 +568,8 @@ static int xilly_scan_idt(struct xilly_endpoint *endpoint,
count++;
}
+ idt_handle->names_len = scan - idt_handle->names;
+
scan++;
if (scan > end_of_idt) {
@@ -1407,36 +1396,20 @@ static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
static int xillybus_open(struct inode *inode, struct file *filp)
{
- int rc = 0;
+ int rc;
unsigned long flags;
- int minor = iminor(inode);
- int major = imajor(inode);
- struct xilly_endpoint *ep_iter, *endpoint = NULL;
+ struct xilly_endpoint *endpoint;
struct xilly_channel *channel;
+ int index;
- mutex_lock(&ep_list_lock);
-
- list_for_each_entry(ep_iter, &list_of_endpoints, ep_list) {
- if ((ep_iter->major == major) &&
- (minor >= ep_iter->lowest_minor) &&
- (minor < (ep_iter->lowest_minor +
- ep_iter->num_channels))) {
- endpoint = ep_iter;
- break;
- }
- }
- mutex_unlock(&ep_list_lock);
-
- if (!endpoint) {
- pr_err("xillybus: open() failed to find a device for major=%d and minor=%d\n",
- major, minor);
- return -ENODEV;
- }
+ rc = xillybus_find_inode(inode, (void **)&endpoint, &index);
+ if (rc)
+ return rc;
if (endpoint->fatal_error)
return -EIO;
- channel = endpoint->channels[1 + minor - endpoint->lowest_minor];
+ channel = endpoint->channels[1 + index];
filp->private_data = channel;
/*
@@ -1799,95 +1772,6 @@ static const struct file_operations xillybus_fops = {
.poll = xillybus_poll,
};
-static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
- const unsigned char *idt)
-{
- int rc;
- dev_t dev;
- int devnum, i, minor, major;
- char devname[48];
- struct device *device;
-
- rc = alloc_chrdev_region(&dev, 0, /* minor start */
- endpoint->num_channels,
- xillyname);
- if (rc) {
- dev_warn(endpoint->dev, "Failed to obtain major/minors");
- return rc;
- }
-
- endpoint->major = major = MAJOR(dev);
- endpoint->lowest_minor = minor = MINOR(dev);
-
- cdev_init(&endpoint->cdev, &xillybus_fops);
- endpoint->cdev.owner = endpoint->ephw->owner;
- rc = cdev_add(&endpoint->cdev, MKDEV(major, minor),
- endpoint->num_channels);
- if (rc) {
- dev_warn(endpoint->dev, "Failed to add cdev. Aborting.\n");
- goto unregister_chrdev;
- }
-
- idt++;
-
- for (i = minor, devnum = 0;
- devnum < endpoint->num_channels;
- devnum++, i++) {
- snprintf(devname, sizeof(devname)-1, "xillybus_%s", idt);
-
- devname[sizeof(devname)-1] = 0; /* Should never matter */
-
- while (*idt++)
- /* Skip to next */;
-
- device = device_create(xillybus_class,
- NULL,
- MKDEV(major, i),
- NULL,
- "%s", devname);
-
- if (IS_ERR(device)) {
- dev_warn(endpoint->dev,
- "Failed to create %s device. Aborting.\n",
- devname);
- rc = -ENODEV;
- goto unroll_device_create;
- }
- }
-
- dev_info(endpoint->dev, "Created %d device files.\n",
- endpoint->num_channels);
- return 0; /* succeed */
-
-unroll_device_create:
- devnum--; i--;
- for (; devnum >= 0; devnum--, i--)
- device_destroy(xillybus_class, MKDEV(major, i));
-
- cdev_del(&endpoint->cdev);
-unregister_chrdev:
- unregister_chrdev_region(MKDEV(major, minor), endpoint->num_channels);
-
- return rc;
-}
-
-static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint)
-{
- int minor;
-
- for (minor = endpoint->lowest_minor;
- minor < (endpoint->lowest_minor + endpoint->num_channels);
- minor++)
- device_destroy(xillybus_class, MKDEV(endpoint->major, minor));
- cdev_del(&endpoint->cdev);
- unregister_chrdev_region(MKDEV(endpoint->major,
- endpoint->lowest_minor),
- endpoint->num_channels);
-
- dev_info(endpoint->dev, "Removed %d device files.\n",
- endpoint->num_channels);
-}
-
struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
struct device *dev,
struct xilly_endpoint_hardware
@@ -2027,28 +1911,20 @@ int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
if (rc)
goto failed_idt;
- /*
- * endpoint is now completely configured. We put it on the list
- * available to open() before registering the char device(s)
- */
-
- mutex_lock(&ep_list_lock);
- list_add_tail(&endpoint->ep_list, &list_of_endpoints);
- mutex_unlock(&ep_list_lock);
+ rc = xillybus_init_chrdev(dev, &xillybus_fops,
+ endpoint->ephw->owner, endpoint,
+ idt_handle.names,
+ idt_handle.names_len,
+ endpoint->num_channels,
+ xillyname, false);
- rc = xillybus_init_chrdev(endpoint, idt_handle.idt);
if (rc)
- goto failed_chrdevs;
+ goto failed_idt;
devres_release_group(dev, bootstrap_resources);
return 0;
-failed_chrdevs:
- mutex_lock(&ep_list_lock);
- list_del(&endpoint->ep_list);
- mutex_unlock(&ep_list_lock);
-
failed_idt:
xilly_quiesce(endpoint);
flush_workqueue(xillybus_wq);
@@ -2059,11 +1935,7 @@ EXPORT_SYMBOL(xillybus_endpoint_discovery);
void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
{
- xillybus_cleanup_chrdev(endpoint);
-
- mutex_lock(&ep_list_lock);
- list_del(&endpoint->ep_list);
- mutex_unlock(&ep_list_lock);
+ xillybus_cleanup_chrdev(endpoint, endpoint->dev);
xilly_quiesce(endpoint);
@@ -2077,17 +1949,9 @@ EXPORT_SYMBOL(xillybus_endpoint_remove);
static int __init xillybus_init(void)
{
- mutex_init(&ep_list_lock);
-
- xillybus_class = class_create(THIS_MODULE, xillyname);
- if (IS_ERR(xillybus_class))
- return PTR_ERR(xillybus_class);
-
xillybus_wq = alloc_workqueue(xillyname, 0, 0);
- if (!xillybus_wq) {
- class_destroy(xillybus_class);
+ if (!xillybus_wq)
return -ENOMEM;
- }
return 0;
}
@@ -2096,8 +1960,6 @@ static void __exit xillybus_exit(void)
{
/* flush_workqueue() was called for each endpoint released */
destroy_workqueue(xillybus_wq);
-
- class_destroy(xillybus_class);
}
module_init(xillybus_init);
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index 96b6de8a30e5..1a20b286fd1d 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -17,7 +17,6 @@
MODULE_DESCRIPTION("Xillybus driver for Open Firmware");
MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
-MODULE_VERSION("1.06");
MODULE_ALIAS("xillybus_of");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c
index 18b0c392bc93..bdf1c366b4fc 100644
--- a/drivers/char/xillybus/xillybus_pcie.c
+++ b/drivers/char/xillybus/xillybus_pcie.c
@@ -14,7 +14,6 @@
MODULE_DESCRIPTION("Xillybus driver for PCIe");
MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
-MODULE_VERSION("1.06");
MODULE_ALIAS("xillybus_pcie");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
new file mode 100644
index 000000000000..e7f88f35c702
--- /dev/null
+++ b/drivers/char/xillybus/xillyusb.c
@@ -0,0 +1,2259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the XillyUSB FPGA/host framework.
+ *
+ * This driver interfaces with a special IP core in an FPGA, setting up
+ * a pipe between a hardware FIFO in the programmable logic and a device
+ * file in the host. The number of such pipes and their attributes are
+ * set up on the logic. This driver detects these automatically and
+ * creates the device files accordingly.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+
+#include "xillybus_class.h"
+
+MODULE_DESCRIPTION("Driver for XillyUSB FPGA IP Core");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_ALIAS("xillyusb");
+MODULE_LICENSE("GPL v2");
+
+#define XILLY_RX_TIMEOUT (10 * HZ / 1000)
+#define XILLY_RESPONSE_TIMEOUT (500 * HZ / 1000)
+
+#define BUF_SIZE_ORDER 4
+#define BUFNUM 8
+#define LOG2_IDT_FIFO_SIZE 16
+#define LOG2_INITIAL_FIFO_BUF_SIZE 16
+
+#define MSG_EP_NUM 1
+#define IN_EP_NUM 1
+
+static const char xillyname[] = "xillyusb";
+
+static unsigned int fifo_buf_order;
+
+#define USB_VENDOR_ID_XILINX 0x03fd
+#define USB_VENDOR_ID_ALTERA 0x09fb
+
+#define USB_PRODUCT_ID_XILLYUSB 0xebbe
+
+static const struct usb_device_id xillyusb_table[] = {
+ { USB_DEVICE(USB_VENDOR_ID_XILINX, USB_PRODUCT_ID_XILLYUSB) },
+ { USB_DEVICE(USB_VENDOR_ID_ALTERA, USB_PRODUCT_ID_XILLYUSB) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, xillyusb_table);
+
+struct xillyusb_dev;
+
+struct xillyfifo {
+ unsigned int bufsize; /* In bytes, always a power of 2 */
+ unsigned int bufnum;
+ unsigned int size; /* Lazy: Equals bufsize * bufnum */
+ unsigned int buf_order;
+
+ int fill; /* Number of bytes in the FIFO */
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+
+ unsigned int readpos;
+ unsigned int readbuf;
+ unsigned int writepos;
+ unsigned int writebuf;
+ char **mem;
+};
+
+struct xillyusb_channel;
+
+struct xillyusb_endpoint {
+ struct xillyusb_dev *xdev;
+
+ struct mutex ep_mutex; /* serialize operations on endpoint */
+
+ struct list_head buffers;
+ struct list_head filled_buffers;
+ spinlock_t buffers_lock; /* protect these two lists */
+
+ unsigned int order;
+ unsigned int buffer_size;
+
+ unsigned int fill_mask;
+
+ int outstanding_urbs;
+
+ struct usb_anchor anchor;
+
+ struct xillyfifo fifo;
+
+ struct work_struct workitem;
+
+ bool shutting_down;
+ bool drained;
+ bool wake_on_drain;
+
+ u8 ep_num;
+};
+
+struct xillyusb_channel {
+ struct xillyusb_dev *xdev;
+
+ struct xillyfifo *in_fifo;
+ struct xillyusb_endpoint *out_ep;
+ struct mutex lock; /* protect @out_ep, @in_fifo, bit fields below */
+
+ struct mutex in_mutex; /* serialize fops on FPGA to host stream */
+ struct mutex out_mutex; /* serialize fops on host to FPGA stream */
+ wait_queue_head_t flushq;
+
+ int chan_idx;
+
+ u32 in_consumed_bytes;
+ u32 in_current_checkpoint;
+ u32 out_bytes;
+
+ unsigned int in_log2_element_size;
+ unsigned int out_log2_element_size;
+ unsigned int in_log2_fifo_size;
+ unsigned int out_log2_fifo_size;
+
+ unsigned int read_data_ok; /* EOF not arrived (yet) */
+ unsigned int poll_used;
+ unsigned int flushing;
+ unsigned int flushed;
+ unsigned int canceled;
+
+ /* Bit fields protected by @lock except for initialization */
+ unsigned readable:1;
+ unsigned writable:1;
+ unsigned open_for_read:1;
+ unsigned open_for_write:1;
+ unsigned in_synchronous:1;
+ unsigned out_synchronous:1;
+ unsigned in_seekable:1;
+ unsigned out_seekable:1;
+};
+
+struct xillybuffer {
+ struct list_head entry;
+ struct xillyusb_endpoint *ep;
+ void *buf;
+ unsigned int len;
+};
+
+struct xillyusb_dev {
+ struct xillyusb_channel *channels;
+
+ struct usb_device *udev;
+ struct device *dev; /* For dev_err() and such */
+ struct kref kref;
+ struct workqueue_struct *workq;
+
+ int error;
+ spinlock_t error_lock; /* protect @error */
+ struct work_struct wakeup_workitem;
+
+ int num_channels;
+
+ struct xillyusb_endpoint *msg_ep;
+ struct xillyusb_endpoint *in_ep;
+
+ struct mutex msg_mutex; /* serialize opcode transmission */
+ int in_bytes_left;
+ int leftover_chan_num;
+ unsigned int in_counter;
+ struct mutex process_in_mutex; /* synchronize wakeup_all() */
+};
+
+/* FPGA to host opcodes */
+enum {
+ OPCODE_DATA = 0,
+ OPCODE_QUIESCE_ACK = 1,
+ OPCODE_EOF = 2,
+ OPCODE_REACHED_CHECKPOINT = 3,
+ OPCODE_CANCELED_CHECKPOINT = 4,
+};
+
+/* Host to FPGA opcodes */
+enum {
+ OPCODE_QUIESCE = 0,
+ OPCODE_REQ_IDT = 1,
+ OPCODE_SET_CHECKPOINT = 2,
+ OPCODE_CLOSE = 3,
+ OPCODE_SET_PUSH = 4,
+ OPCODE_UPDATE_PUSH = 5,
+ OPCODE_CANCEL_CHECKPOINT = 6,
+ OPCODE_SET_ADDR = 7,
+};
+
+/*
+ * fifo_write() and fifo_read() are NOT reentrant (i.e. concurrent multiple
+ * calls to each on the same FIFO is not allowed) however it's OK to have
+ * threads calling each of the two functions once on the same FIFO, and
+ * at the same time.
+ */
+
+static int fifo_write(struct xillyfifo *fifo,
+ const void *data, unsigned int len,
+ int (*copier)(void *, const void *, int))
+{
+ unsigned int done = 0;
+ unsigned int todo = len;
+ unsigned int nmax;
+ unsigned int writepos = fifo->writepos;
+ unsigned int writebuf = fifo->writebuf;
+ unsigned long flags;
+ int rc;
+
+ nmax = fifo->size - READ_ONCE(fifo->fill);
+
+ while (1) {
+ unsigned int nrail = fifo->bufsize - writepos;
+ unsigned int n = min(todo, nmax);
+
+ if (n == 0) {
+ spin_lock_irqsave(&fifo->lock, flags);
+ fifo->fill += done;
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ fifo->writepos = writepos;
+ fifo->writebuf = writebuf;
+
+ return done;
+ }
+
+ if (n > nrail)
+ n = nrail;
+
+ rc = (*copier)(fifo->mem[writebuf] + writepos, data + done, n);
+
+ if (rc)
+ return rc;
+
+ done += n;
+ todo -= n;
+
+ writepos += n;
+ nmax -= n;
+
+ if (writepos == fifo->bufsize) {
+ writepos = 0;
+ writebuf++;
+
+ if (writebuf == fifo->bufnum)
+ writebuf = 0;
+ }
+ }
+}
+
+static int fifo_read(struct xillyfifo *fifo,
+ void *data, unsigned int len,
+ int (*copier)(void *, const void *, int))
+{
+ unsigned int done = 0;
+ unsigned int todo = len;
+ unsigned int fill;
+ unsigned int readpos = fifo->readpos;
+ unsigned int readbuf = fifo->readbuf;
+ unsigned long flags;
+ int rc;
+
+ /*
+ * The spinlock here is necessary, because otherwise fifo->fill
+ * could have been increased by fifo_write() after writing data
+ * to the buffer, but this data would potentially not have been
+ * visible on this thread at the time the updated fifo->fill was.
+ * That could lead to reading invalid data.
+ */
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ fill = fifo->fill;
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ while (1) {
+ unsigned int nrail = fifo->bufsize - readpos;
+ unsigned int n = min(todo, fill);
+
+ if (n == 0) {
+ spin_lock_irqsave(&fifo->lock, flags);
+ fifo->fill -= done;
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ fifo->readpos = readpos;
+ fifo->readbuf = readbuf;
+
+ return done;
+ }
+
+ if (n > nrail)
+ n = nrail;
+
+ rc = (*copier)(data + done, fifo->mem[readbuf] + readpos, n);
+
+ if (rc)
+ return rc;
+
+ done += n;
+ todo -= n;
+
+ readpos += n;
+ fill -= n;
+
+ if (readpos == fifo->bufsize) {
+ readpos = 0;
+ readbuf++;
+
+ if (readbuf == fifo->bufnum)
+ readbuf = 0;
+ }
+ }
+}
+
+/*
+ * These three wrapper functions are used as the @copier argument to
+ * fifo_write() and fifo_read(), so that they can work directly with
+ * user memory as well.
+ */
+
+static int xilly_copy_from_user(void *dst, const void *src, int n)
+{
+ if (copy_from_user(dst, (const void __user *)src, n))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int xilly_copy_to_user(void *dst, const void *src, int n)
+{
+ if (copy_to_user((void __user *)dst, src, n))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int xilly_memcpy(void *dst, const void *src, int n)
+{
+ memcpy(dst, src, n);
+
+ return 0;
+}
+
+static int fifo_init(struct xillyfifo *fifo,
+ unsigned int log2_size)
+{
+ unsigned int log2_bufnum;
+ unsigned int buf_order;
+ int i;
+
+ unsigned int log2_fifo_buf_size;
+
+retry:
+ log2_fifo_buf_size = fifo_buf_order + PAGE_SHIFT;
+
+ if (log2_size > log2_fifo_buf_size) {
+ log2_bufnum = log2_size - log2_fifo_buf_size;
+ buf_order = fifo_buf_order;
+ fifo->bufsize = 1 << log2_fifo_buf_size;
+ } else {
+ log2_bufnum = 0;
+ buf_order = (log2_size > PAGE_SHIFT) ?
+ log2_size - PAGE_SHIFT : 0;
+ fifo->bufsize = 1 << log2_size;
+ }
+
+ fifo->bufnum = 1 << log2_bufnum;
+ fifo->size = fifo->bufnum * fifo->bufsize;
+ fifo->buf_order = buf_order;
+
+ fifo->mem = kmalloc_array(fifo->bufnum, sizeof(void *), GFP_KERNEL);
+
+ if (!fifo->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < fifo->bufnum; i++) {
+ fifo->mem[i] = (void *)
+ __get_free_pages(GFP_KERNEL, buf_order);
+
+ if (!fifo->mem[i])
+ goto memfail;
+ }
+
+ fifo->fill = 0;
+ fifo->readpos = 0;
+ fifo->readbuf = 0;
+ fifo->writepos = 0;
+ fifo->writebuf = 0;
+ spin_lock_init(&fifo->lock);
+ init_waitqueue_head(&fifo->waitq);
+ return 0;
+
+memfail:
+ for (i--; i >= 0; i--)
+ free_pages((unsigned long)fifo->mem[i], buf_order);
+
+ kfree(fifo->mem);
+ fifo->mem = NULL;
+
+ if (fifo_buf_order) {
+ fifo_buf_order--;
+ goto retry;
+ } else {
+ return -ENOMEM;
+ }
+}
+
+static void fifo_mem_release(struct xillyfifo *fifo)
+{
+ int i;
+
+ if (!fifo->mem)
+ return;
+
+ for (i = 0; i < fifo->bufnum; i++)
+ free_pages((unsigned long)fifo->mem[i], fifo->buf_order);
+
+ kfree(fifo->mem);
+}
+
+/*
+ * When endpoint_quiesce() returns, the endpoint has no URBs submitted,
+ * won't accept any new URB submissions, and its related work item doesn't
+ * and won't run anymore.
+ */
+
+static void endpoint_quiesce(struct xillyusb_endpoint *ep)
+{
+ mutex_lock(&ep->ep_mutex);
+ ep->shutting_down = true;
+ mutex_unlock(&ep->ep_mutex);
+
+ usb_kill_anchored_urbs(&ep->anchor);
+ cancel_work_sync(&ep->workitem);
+}
+
+/*
+ * Note that endpoint_dealloc() also frees fifo memory (if allocated), even
+ * though endpoint_alloc doesn't allocate that memory.
+ */
+
+static void endpoint_dealloc(struct xillyusb_endpoint *ep)
+{
+ struct list_head *this, *next;
+
+ fifo_mem_release(&ep->fifo);
+
+ /* Join @filled_buffers with @buffers to free these entries too */
+ list_splice(&ep->filled_buffers, &ep->buffers);
+
+ list_for_each_safe(this, next, &ep->buffers) {
+ struct xillybuffer *xb =
+ list_entry(this, struct xillybuffer, entry);
+
+ free_pages((unsigned long)xb->buf, ep->order);
+ kfree(xb);
+ }
+
+ kfree(ep);
+}
+
+static struct xillyusb_endpoint
+*endpoint_alloc(struct xillyusb_dev *xdev,
+ u8 ep_num,
+ void (*work)(struct work_struct *),
+ unsigned int order,
+ int bufnum)
+{
+ int i;
+
+ struct xillyusb_endpoint *ep;
+
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+
+ if (!ep)
+ return NULL;
+
+ INIT_LIST_HEAD(&ep->buffers);
+ INIT_LIST_HEAD(&ep->filled_buffers);
+
+ spin_lock_init(&ep->buffers_lock);
+ mutex_init(&ep->ep_mutex);
+
+ init_usb_anchor(&ep->anchor);
+ INIT_WORK(&ep->workitem, work);
+
+ ep->order = order;
+ ep->buffer_size = 1 << (PAGE_SHIFT + order);
+ ep->outstanding_urbs = 0;
+ ep->drained = true;
+ ep->wake_on_drain = false;
+ ep->xdev = xdev;
+ ep->ep_num = ep_num;
+ ep->shutting_down = false;
+
+ for (i = 0; i < bufnum; i++) {
+ struct xillybuffer *xb;
+ unsigned long addr;
+
+ xb = kzalloc(sizeof(*xb), GFP_KERNEL);
+
+ if (!xb) {
+ endpoint_dealloc(ep);
+ return NULL;
+ }
+
+ addr = __get_free_pages(GFP_KERNEL, order);
+
+ if (!addr) {
+ kfree(xb);
+ endpoint_dealloc(ep);
+ return NULL;
+ }
+
+ xb->buf = (void *)addr;
+ xb->ep = ep;
+ list_add_tail(&xb->entry, &ep->buffers);
+ }
+ return ep;
+}
+
+static void cleanup_dev(struct kref *kref)
+{
+ struct xillyusb_dev *xdev =
+ container_of(kref, struct xillyusb_dev, kref);
+
+ if (xdev->in_ep)
+ endpoint_dealloc(xdev->in_ep);
+
+ if (xdev->msg_ep)
+ endpoint_dealloc(xdev->msg_ep);
+
+ if (xdev->workq)
+ destroy_workqueue(xdev->workq);
+
+ kfree(xdev->channels); /* Argument may be NULL, and that's fine */
+ kfree(xdev);
+}
+
+/*
+ * @process_in_mutex is taken to ensure that bulk_in_work() won't call
+ * process_bulk_in() after wakeup_all()'s execution: The latter zeroes all
+ * @read_data_ok entries, which will make process_bulk_in() report false
+ * errors if executed. The mechanism relies on that xdev->error is assigned
+ * a non-zero value by report_io_error() prior to queueing wakeup_all(),
+ * which prevents bulk_in_work() from calling process_bulk_in().
+ *
+ * The fact that wakeup_all() and bulk_in_work() are queued on the same
+ * workqueue makes their concurrent execution very unlikely, however the
+ * kernel's API doesn't seem to ensure this strictly.
+ */
+
+static void wakeup_all(struct work_struct *work)
+{
+ int i;
+ struct xillyusb_dev *xdev = container_of(work, struct xillyusb_dev,
+ wakeup_workitem);
+
+ mutex_lock(&xdev->process_in_mutex);
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ struct xillyusb_channel *chan = &xdev->channels[i];
+
+ mutex_lock(&chan->lock);
+
+ if (chan->in_fifo) {
+ /*
+ * Fake an EOF: Even if such arrives, it won't be
+ * processed.
+ */
+ chan->read_data_ok = 0;
+ wake_up_interruptible(&chan->in_fifo->waitq);
+ }
+
+ if (chan->out_ep)
+ wake_up_interruptible(&chan->out_ep->fifo.waitq);
+
+ mutex_unlock(&chan->lock);
+
+ wake_up_interruptible(&chan->flushq);
+ }
+
+ mutex_unlock(&xdev->process_in_mutex);
+
+ wake_up_interruptible(&xdev->msg_ep->fifo.waitq);
+
+ kref_put(&xdev->kref, cleanup_dev);
+}
+
+static void report_io_error(struct xillyusb_dev *xdev,
+ int errcode)
+{
+ unsigned long flags;
+ bool do_once = false;
+
+ spin_lock_irqsave(&xdev->error_lock, flags);
+ if (!xdev->error) {
+ xdev->error = errcode;
+ do_once = true;
+ }
+ spin_unlock_irqrestore(&xdev->error_lock, flags);
+
+ if (do_once) {
+ kref_get(&xdev->kref); /* xdev is used by work item */
+ queue_work(xdev->workq, &xdev->wakeup_workitem);
+ }
+}
+
+/*
+ * safely_assign_in_fifo() changes the value of chan->in_fifo and ensures
+ * the previous pointer is never used after its return.
+ */
+
+static void safely_assign_in_fifo(struct xillyusb_channel *chan,
+ struct xillyfifo *fifo)
+{
+ mutex_lock(&chan->lock);
+ chan->in_fifo = fifo;
+ mutex_unlock(&chan->lock);
+
+ flush_work(&chan->xdev->in_ep->workitem);
+}
+
+static void bulk_in_completer(struct urb *urb)
+{
+ struct xillybuffer *xb = urb->context;
+ struct xillyusb_endpoint *ep = xb->ep;
+ unsigned long flags;
+
+ if (urb->status) {
+ if (!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))
+ report_io_error(ep->xdev, -EIO);
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ return;
+ }
+
+ xb->len = urb->actual_length;
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->filled_buffers);
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ if (!ep->shutting_down)
+ queue_work(ep->xdev->workq, &ep->workitem);
+}
+
+static void bulk_out_completer(struct urb *urb)
+{
+ struct xillybuffer *xb = urb->context;
+ struct xillyusb_endpoint *ep = xb->ep;
+ unsigned long flags;
+
+ if (urb->status &&
+ (!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN)))
+ report_io_error(ep->xdev, -EIO);
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ if (!ep->shutting_down)
+ queue_work(ep->xdev->workq, &ep->workitem);
+}
+
+static void try_queue_bulk_in(struct xillyusb_endpoint *ep)
+{
+ struct xillyusb_dev *xdev = ep->xdev;
+ struct xillybuffer *xb;
+ struct urb *urb;
+
+ int rc;
+ unsigned long flags;
+ unsigned int bufsize = ep->buffer_size;
+
+ mutex_lock(&ep->ep_mutex);
+
+ if (ep->shutting_down || xdev->error)
+ goto done;
+
+ while (1) {
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+
+ if (list_empty(&ep->buffers)) {
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+ goto done;
+ }
+
+ xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
+ list_del(&xb->entry);
+ ep->outstanding_urbs++;
+
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ report_io_error(xdev, -ENOMEM);
+ goto relist;
+ }
+
+ usb_fill_bulk_urb(urb, xdev->udev,
+ usb_rcvbulkpipe(xdev->udev, ep->ep_num),
+ xb->buf, bufsize, bulk_in_completer, xb);
+
+ usb_anchor_urb(urb, &ep->anchor);
+
+ rc = usb_submit_urb(urb, GFP_KERNEL);
+
+ if (rc) {
+ report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
+ -EIO);
+ goto unanchor;
+ }
+
+ usb_free_urb(urb); /* This just decrements reference count */
+ }
+
+unanchor:
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+
+relist:
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+done:
+ mutex_unlock(&ep->ep_mutex);
+}
+
+static void try_queue_bulk_out(struct xillyusb_endpoint *ep)
+{
+ struct xillyfifo *fifo = &ep->fifo;
+ struct xillyusb_dev *xdev = ep->xdev;
+ struct xillybuffer *xb;
+ struct urb *urb;
+
+ int rc;
+ unsigned int fill;
+ unsigned long flags;
+ bool do_wake = false;
+
+ mutex_lock(&ep->ep_mutex);
+
+ if (ep->shutting_down || xdev->error)
+ goto done;
+
+ fill = READ_ONCE(fifo->fill) & ep->fill_mask;
+
+ while (1) {
+ int count;
+ unsigned int max_read;
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+
+ /*
+ * Race conditions might have the FIFO filled while the
+ * endpoint is marked as drained here. That doesn't matter,
+ * because the sole purpose of @drained is to ensure that
+ * certain data has been sent on the USB channel before
+ * shutting it down. Hence knowing that the FIFO appears
+ * to be empty with no outstanding URBs at some moment
+ * is good enough.
+ */
+
+ if (!fill) {
+ ep->drained = !ep->outstanding_urbs;
+ if (ep->drained && ep->wake_on_drain)
+ do_wake = true;
+
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+ goto done;
+ }
+
+ ep->drained = false;
+
+ if ((fill < ep->buffer_size && ep->outstanding_urbs) ||
+ list_empty(&ep->buffers)) {
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+ goto done;
+ }
+
+ xb = list_first_entry(&ep->buffers, struct xillybuffer, entry);
+ list_del(&xb->entry);
+ ep->outstanding_urbs++;
+
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ max_read = min(fill, ep->buffer_size);
+
+ count = fifo_read(&ep->fifo, xb->buf, max_read, xilly_memcpy);
+
+ /*
+ * xilly_memcpy always returns 0 => fifo_read can't fail =>
+ * count > 0
+ */
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ report_io_error(xdev, -ENOMEM);
+ goto relist;
+ }
+
+ usb_fill_bulk_urb(urb, xdev->udev,
+ usb_sndbulkpipe(xdev->udev, ep->ep_num),
+ xb->buf, count, bulk_out_completer, xb);
+
+ usb_anchor_urb(urb, &ep->anchor);
+
+ rc = usb_submit_urb(urb, GFP_KERNEL);
+
+ if (rc) {
+ report_io_error(xdev, (rc == -ENOMEM) ? -ENOMEM :
+ -EIO);
+ goto unanchor;
+ }
+
+ usb_free_urb(urb); /* This just decrements reference count */
+
+ fill -= count;
+ do_wake = true;
+ }
+
+unanchor:
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+
+relist:
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+done:
+ mutex_unlock(&ep->ep_mutex);
+
+ if (do_wake)
+ wake_up_interruptible(&fifo->waitq);
+}
+
+static void bulk_out_work(struct work_struct *work)
+{
+ struct xillyusb_endpoint *ep = container_of(work,
+ struct xillyusb_endpoint,
+ workitem);
+ try_queue_bulk_out(ep);
+}
+
+static int process_in_opcode(struct xillyusb_dev *xdev,
+ int opcode,
+ int chan_num)
+{
+ struct xillyusb_channel *chan;
+ struct device *dev = xdev->dev;
+ int chan_idx = chan_num >> 1;
+
+ if (chan_idx >= xdev->num_channels) {
+ dev_err(dev, "Received illegal channel ID %d from FPGA\n",
+ chan_num);
+ return -EIO;
+ }
+
+ chan = &xdev->channels[chan_idx];
+
+ switch (opcode) {
+ case OPCODE_EOF:
+ if (!chan->read_data_ok) {
+ dev_err(dev, "Received unexpected EOF for channel %d\n",
+ chan_num);
+ return -EIO;
+ }
+
+ /*
+ * A write memory barrier ensures that the FIFO's fill level
+ * is visible before read_data_ok turns zero, so the data in
+ * the FIFO isn't missed by the consumer.
+ */
+ smp_wmb();
+ WRITE_ONCE(chan->read_data_ok, 0);
+ wake_up_interruptible(&chan->in_fifo->waitq);
+ break;
+
+ case OPCODE_REACHED_CHECKPOINT:
+ chan->flushing = 0;
+ wake_up_interruptible(&chan->flushq);
+ break;
+
+ case OPCODE_CANCELED_CHECKPOINT:
+ chan->canceled = 1;
+ wake_up_interruptible(&chan->flushq);
+ break;
+
+ default:
+ dev_err(dev, "Received illegal opcode %d from FPGA\n",
+ opcode);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int process_bulk_in(struct xillybuffer *xb)
+{
+ struct xillyusb_endpoint *ep = xb->ep;
+ struct xillyusb_dev *xdev = ep->xdev;
+ struct device *dev = xdev->dev;
+ int dws = xb->len >> 2;
+ __le32 *p = xb->buf;
+ u32 ctrlword;
+ struct xillyusb_channel *chan;
+ struct xillyfifo *fifo;
+ int chan_num = 0, opcode;
+ int chan_idx;
+ int bytes, count, dwconsume;
+ int in_bytes_left = 0;
+ int rc;
+
+ if ((dws << 2) != xb->len) {
+ dev_err(dev, "Received BULK IN transfer with %d bytes, not a multiple of 4\n",
+ xb->len);
+ return -EIO;
+ }
+
+ if (xdev->in_bytes_left) {
+ bytes = min(xdev->in_bytes_left, dws << 2);
+ in_bytes_left = xdev->in_bytes_left - bytes;
+ chan_num = xdev->leftover_chan_num;
+ goto resume_leftovers;
+ }
+
+ while (dws) {
+ ctrlword = le32_to_cpu(*p++);
+ dws--;
+
+ chan_num = ctrlword & 0xfff;
+ count = (ctrlword >> 12) & 0x3ff;
+ opcode = (ctrlword >> 24) & 0xf;
+
+ if (opcode != OPCODE_DATA) {
+ unsigned int in_counter = xdev->in_counter++ & 0x3ff;
+
+ if (count != in_counter) {
+ dev_err(dev, "Expected opcode counter %d, got %d\n",
+ in_counter, count);
+ return -EIO;
+ }
+
+ rc = process_in_opcode(xdev, opcode, chan_num);
+
+ if (rc)
+ return rc;
+
+ continue;
+ }
+
+ bytes = min(count + 1, dws << 2);
+ in_bytes_left = count + 1 - bytes;
+
+resume_leftovers:
+ chan_idx = chan_num >> 1;
+
+ if (!(chan_num & 1) || chan_idx >= xdev->num_channels ||
+ !xdev->channels[chan_idx].read_data_ok) {
+ dev_err(dev, "Received illegal channel ID %d from FPGA\n",
+ chan_num);
+ return -EIO;
+ }
+ chan = &xdev->channels[chan_idx];
+
+ fifo = chan->in_fifo;
+
+ if (unlikely(!fifo))
+ return -EIO; /* We got really unexpected data */
+
+ if (bytes != fifo_write(fifo, p, bytes, xilly_memcpy)) {
+ dev_err(dev, "Misbehaving FPGA overflowed an upstream FIFO!\n");
+ return -EIO;
+ }
+
+ wake_up_interruptible(&fifo->waitq);
+
+ dwconsume = (bytes + 3) >> 2;
+ dws -= dwconsume;
+ p += dwconsume;
+ }
+
+ xdev->in_bytes_left = in_bytes_left;
+ xdev->leftover_chan_num = chan_num;
+ return 0;
+}
+
+static void bulk_in_work(struct work_struct *work)
+{
+ struct xillyusb_endpoint *ep =
+ container_of(work, struct xillyusb_endpoint, workitem);
+ struct xillyusb_dev *xdev = ep->xdev;
+ unsigned long flags;
+ struct xillybuffer *xb;
+ bool consumed = false;
+ int rc = 0;
+
+ mutex_lock(&xdev->process_in_mutex);
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+
+ while (1) {
+ if (rc || list_empty(&ep->filled_buffers)) {
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+ mutex_unlock(&xdev->process_in_mutex);
+
+ if (rc)
+ report_io_error(xdev, rc);
+ else if (consumed)
+ try_queue_bulk_in(ep);
+
+ return;
+ }
+
+ xb = list_first_entry(&ep->filled_buffers, struct xillybuffer,
+ entry);
+ list_del(&xb->entry);
+
+ spin_unlock_irqrestore(&ep->buffers_lock, flags);
+
+ consumed = true;
+
+ if (!xdev->error)
+ rc = process_bulk_in(xb);
+
+ spin_lock_irqsave(&ep->buffers_lock, flags);
+ list_add_tail(&xb->entry, &ep->buffers);
+ ep->outstanding_urbs--;
+ }
+}
+
+static int xillyusb_send_opcode(struct xillyusb_dev *xdev,
+ int chan_num, char opcode, u32 data)
+{
+ struct xillyusb_endpoint *ep = xdev->msg_ep;
+ struct xillyfifo *fifo = &ep->fifo;
+ __le32 msg[2];
+
+ int rc = 0;
+
+ msg[0] = cpu_to_le32((chan_num & 0xfff) |
+ ((opcode & 0xf) << 24));
+ msg[1] = cpu_to_le32(data);
+
+ mutex_lock(&xdev->msg_mutex);
+
+ /*
+ * The wait queue is woken with the interruptible variant, so the
+ * wait function matches, however returning because of an interrupt
+ * will mess things up considerably, in particular when the caller is
+ * the release method. And the xdev->error part prevents being stuck
+ * forever in the event of a bizarre hardware bug: Pull the USB plug.
+ */
+
+ while (wait_event_interruptible(fifo->waitq,
+ fifo->fill <= (fifo->size - 8) ||
+ xdev->error))
+ ; /* Empty loop */
+
+ if (xdev->error) {
+ rc = xdev->error;
+ goto unlock_done;
+ }
+
+ fifo_write(fifo, (void *)msg, 8, xilly_memcpy);
+
+ try_queue_bulk_out(ep);
+
+unlock_done:
+ mutex_unlock(&xdev->msg_mutex);
+
+ return rc;
+}
+
+/*
+ * Note that flush_downstream() merely waits for the data to arrive to
+ * the application logic at the FPGA -- unlike PCIe Xillybus' counterpart,
+ * it does nothing to make it happen (and neither is it necessary).
+ *
+ * This function is not reentrant for the same @chan, but this is covered
+ * by the fact that for any given @chan, it's called either by the open,
+ * write, llseek and flush fops methods, which can't run in parallel (and the
+ * write + flush and llseek method handlers are protected with out_mutex).
+ *
+ * chan->flushed is there to avoid multiple flushes at the same position,
+ * in particular as a result of programs that close the file descriptor
+ * e.g. after a dup2() for redirection.
+ */
+
+static int flush_downstream(struct xillyusb_channel *chan,
+ long timeout,
+ bool interruptible)
+{
+ struct xillyusb_dev *xdev = chan->xdev;
+ int chan_num = chan->chan_idx << 1;
+ long deadline, left_to_sleep;
+ int rc;
+
+ if (chan->flushed)
+ return 0;
+
+ deadline = jiffies + 1 + timeout;
+
+ if (chan->flushing) {
+ long cancel_deadline = jiffies + 1 + XILLY_RESPONSE_TIMEOUT;
+
+ chan->canceled = 0;
+ rc = xillyusb_send_opcode(xdev, chan_num,
+ OPCODE_CANCEL_CHECKPOINT, 0);
+
+ if (rc)
+ return rc; /* Only real error, never -EINTR */
+
+ /* Ignoring interrupts. Cancellation must be handled */
+ while (!chan->canceled) {
+ left_to_sleep = cancel_deadline - ((long)jiffies);
+
+ if (left_to_sleep <= 0) {
+ report_io_error(xdev, -EIO);
+ return -EIO;
+ }
+
+ rc = wait_event_interruptible_timeout(chan->flushq,
+ chan->canceled ||
+ xdev->error,
+ left_to_sleep);
+
+ if (xdev->error)
+ return xdev->error;
+ }
+ }
+
+ chan->flushing = 1;
+
+ /*
+ * The checkpoint is given in terms of data elements, not bytes. As
+ * a result, if less than an element's worth of data is stored in the
+ * FIFO, it's not flushed, including the flush before closing, which
+ * means that such data is lost. This is consistent with PCIe Xillybus.
+ */
+
+ rc = xillyusb_send_opcode(xdev, chan_num,
+ OPCODE_SET_CHECKPOINT,
+ chan->out_bytes >>
+ chan->out_log2_element_size);
+
+ if (rc)
+ return rc; /* Only real error, never -EINTR */
+
+ if (!timeout) {
+ while (chan->flushing) {
+ rc = wait_event_interruptible(chan->flushq,
+ !chan->flushing ||
+ xdev->error);
+ if (xdev->error)
+ return xdev->error;
+
+ if (interruptible && rc)
+ return -EINTR;
+ }
+
+ goto done;
+ }
+
+ while (chan->flushing) {
+ left_to_sleep = deadline - ((long)jiffies);
+
+ if (left_to_sleep <= 0)
+ return -ETIMEDOUT;
+
+ rc = wait_event_interruptible_timeout(chan->flushq,
+ !chan->flushing ||
+ xdev->error,
+ left_to_sleep);
+
+ if (xdev->error)
+ return xdev->error;
+
+ if (interruptible && rc < 0)
+ return -EINTR;
+ }
+
+done:
+ chan->flushed = 1;
+ return 0;
+}
+
+/* request_read_anything(): Ask the FPGA for any little amount of data */
+static int request_read_anything(struct xillyusb_channel *chan,
+ char opcode)
+{
+ struct xillyusb_dev *xdev = chan->xdev;
+ unsigned int sh = chan->in_log2_element_size;
+ int chan_num = (chan->chan_idx << 1) | 1;
+ u32 mercy = chan->in_consumed_bytes + (2 << sh) - 1;
+
+ return xillyusb_send_opcode(xdev, chan_num, opcode, mercy >> sh);
+}
+
+static int xillyusb_open(struct inode *inode, struct file *filp)
+{
+ struct xillyusb_dev *xdev;
+ struct xillyusb_channel *chan;
+ struct xillyfifo *in_fifo = NULL;
+ struct xillyusb_endpoint *out_ep = NULL;
+ int rc;
+ int index;
+
+ rc = xillybus_find_inode(inode, (void **)&xdev, &index);
+ if (rc)
+ return rc;
+
+ chan = &xdev->channels[index];
+ filp->private_data = chan;
+
+ mutex_lock(&chan->lock);
+
+ rc = -ENODEV;
+
+ if (xdev->error)
+ goto unmutex_fail;
+
+ if (((filp->f_mode & FMODE_READ) && !chan->readable) ||
+ ((filp->f_mode & FMODE_WRITE) && !chan->writable))
+ goto unmutex_fail;
+
+ if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_READ) &&
+ chan->in_synchronous) {
+ dev_err(xdev->dev,
+ "open() failed: O_NONBLOCK not allowed for read on this device\n");
+ goto unmutex_fail;
+ }
+
+ if ((filp->f_flags & O_NONBLOCK) && (filp->f_mode & FMODE_WRITE) &&
+ chan->out_synchronous) {
+ dev_err(xdev->dev,
+ "open() failed: O_NONBLOCK not allowed for write on this device\n");
+ goto unmutex_fail;
+ }
+
+ rc = -EBUSY;
+
+ if (((filp->f_mode & FMODE_READ) && chan->open_for_read) ||
+ ((filp->f_mode & FMODE_WRITE) && chan->open_for_write))
+ goto unmutex_fail;
+
+ kref_get(&xdev->kref);
+
+ if (filp->f_mode & FMODE_READ)
+ chan->open_for_read = 1;
+
+ if (filp->f_mode & FMODE_WRITE)
+ chan->open_for_write = 1;
+
+ mutex_unlock(&chan->lock);
+
+ if (filp->f_mode & FMODE_WRITE) {
+ out_ep = endpoint_alloc(xdev,
+ (chan->chan_idx + 2) | USB_DIR_OUT,
+ bulk_out_work, BUF_SIZE_ORDER, BUFNUM);
+
+ if (!out_ep) {
+ rc = -ENOMEM;
+ goto unopen;
+ }
+
+ rc = fifo_init(&out_ep->fifo, chan->out_log2_fifo_size);
+
+ if (rc)
+ goto late_unopen;
+
+ out_ep->fill_mask = -(1 << chan->out_log2_element_size);
+ chan->out_bytes = 0;
+ chan->flushed = 0;
+
+ /*
+ * Sending a flush request to a previously closed stream
+ * effectively opens it, and also waits until the command is
+ * confirmed by the FPGA. The latter is necessary because the
+ * data is sent through a separate BULK OUT endpoint, and the
+ * xHCI controller is free to reorder transmissions.
+ *
+ * This can't go wrong unless there's a serious hardware error
+ * (or the computer is stuck for 500 ms?)
+ */
+ rc = flush_downstream(chan, XILLY_RESPONSE_TIMEOUT, false);
+
+ if (rc == -ETIMEDOUT) {
+ rc = -EIO;
+ report_io_error(xdev, rc);
+ }
+
+ if (rc)
+ goto late_unopen;
+ }
+
+ if (filp->f_mode & FMODE_READ) {
+ in_fifo = kzalloc(sizeof(*in_fifo), GFP_KERNEL);
+
+ if (!in_fifo) {
+ rc = -ENOMEM;
+ goto late_unopen;
+ }
+
+ rc = fifo_init(in_fifo, chan->in_log2_fifo_size);
+
+ if (rc) {
+ kfree(in_fifo);
+ goto late_unopen;
+ }
+ }
+
+ mutex_lock(&chan->lock);
+ if (in_fifo) {
+ chan->in_fifo = in_fifo;
+ chan->read_data_ok = 1;
+ }
+ if (out_ep)
+ chan->out_ep = out_ep;
+ mutex_unlock(&chan->lock);
+
+ if (in_fifo) {
+ u32 in_checkpoint = 0;
+
+ if (!chan->in_synchronous)
+ in_checkpoint = in_fifo->size >>
+ chan->in_log2_element_size;
+
+ chan->in_consumed_bytes = 0;
+ chan->poll_used = 0;
+ chan->in_current_checkpoint = in_checkpoint;
+ rc = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
+ OPCODE_SET_CHECKPOINT,
+ in_checkpoint);
+
+ if (rc) /* Failure guarantees that opcode wasn't sent */
+ goto unfifo;
+
+ /*
+ * In non-blocking mode, request the FPGA to send any data it
+ * has right away. Otherwise, the first read() will always
+ * return -EAGAIN, which is OK strictly speaking, but ugly.
+ * Checking and unrolling if this fails isn't worth the
+ * effort -- the error is propagated to the first read()
+ * anyhow.
+ */
+ if (filp->f_flags & O_NONBLOCK)
+ request_read_anything(chan, OPCODE_SET_PUSH);
+ }
+
+ return 0;
+
+unfifo:
+ chan->read_data_ok = 0;
+ safely_assign_in_fifo(chan, NULL);
+ fifo_mem_release(in_fifo);
+ kfree(in_fifo);
+
+ if (out_ep) {
+ mutex_lock(&chan->lock);
+ chan->out_ep = NULL;
+ mutex_unlock(&chan->lock);
+ }
+
+late_unopen:
+ if (out_ep)
+ endpoint_dealloc(out_ep);
+
+unopen:
+ mutex_lock(&chan->lock);
+
+ if (filp->f_mode & FMODE_READ)
+ chan->open_for_read = 0;
+
+ if (filp->f_mode & FMODE_WRITE)
+ chan->open_for_write = 0;
+
+ mutex_unlock(&chan->lock);
+
+ kref_put(&xdev->kref, cleanup_dev);
+
+ return rc;
+
+unmutex_fail:
+ mutex_unlock(&chan->lock);
+ return rc;
+}
+
+static ssize_t xillyusb_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ struct xillyusb_dev *xdev = chan->xdev;
+ struct xillyfifo *fifo = chan->in_fifo;
+ int chan_num = (chan->chan_idx << 1) | 1;
+
+ long deadline, left_to_sleep;
+ int bytes_done = 0;
+ bool sent_set_push = false;
+ int rc;
+
+ deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
+
+ rc = mutex_lock_interruptible(&chan->in_mutex);
+
+ if (rc)
+ return rc;
+
+ while (1) {
+ u32 fifo_checkpoint_bytes, complete_checkpoint_bytes;
+ u32 complete_checkpoint, fifo_checkpoint;
+ u32 checkpoint;
+ s32 diff, leap;
+ unsigned int sh = chan->in_log2_element_size;
+ bool checkpoint_for_complete;
+
+ rc = fifo_read(fifo, (__force void *)userbuf + bytes_done,
+ count - bytes_done, xilly_copy_to_user);
+
+ if (rc < 0)
+ break;
+
+ bytes_done += rc;
+ chan->in_consumed_bytes += rc;
+
+ left_to_sleep = deadline - ((long)jiffies);
+
+ /*
+ * Some 32-bit arithmetic that may wrap. Note that
+ * complete_checkpoint is rounded up to the closest element
+ * boundary, because the read() can't be completed otherwise.
+ * fifo_checkpoint_bytes is rounded down, because it protects
+ * in_fifo from overflowing.
+ */
+
+ fifo_checkpoint_bytes = chan->in_consumed_bytes + fifo->size;
+ complete_checkpoint_bytes =
+ chan->in_consumed_bytes + count - bytes_done;
+
+ fifo_checkpoint = fifo_checkpoint_bytes >> sh;
+ complete_checkpoint =
+ (complete_checkpoint_bytes + (1 << sh) - 1) >> sh;
+
+ diff = (fifo_checkpoint - complete_checkpoint) << sh;
+
+ if (chan->in_synchronous && diff >= 0) {
+ checkpoint = complete_checkpoint;
+ checkpoint_for_complete = true;
+ } else {
+ checkpoint = fifo_checkpoint;
+ checkpoint_for_complete = false;
+ }
+
+ leap = (checkpoint - chan->in_current_checkpoint) << sh;
+
+ /*
+ * To prevent flooding of OPCODE_SET_CHECKPOINT commands as
+ * data is consumed, it's issued only if it moves the
+ * checkpoint by at least an 8th of the FIFO's size, or if
+ * it's necessary to complete the number of bytes requested by
+ * the read() call.
+ *
+ * chan->read_data_ok is checked to spare an unnecessary
+ * submission after receiving EOF, however it's harmless if
+ * such slips away.
+ */
+
+ if (chan->read_data_ok &&
+ (leap > (fifo->size >> 3) ||
+ (checkpoint_for_complete && leap > 0))) {
+ chan->in_current_checkpoint = checkpoint;
+ rc = xillyusb_send_opcode(xdev, chan_num,
+ OPCODE_SET_CHECKPOINT,
+ checkpoint);
+
+ if (rc)
+ break;
+ }
+
+ if (bytes_done == count ||
+ (left_to_sleep <= 0 && bytes_done))
+ break;
+
+ /*
+ * Reaching here means that the FIFO was empty when
+ * fifo_read() returned, but not necessarily right now. Error
+ * and EOF are checked and reported only now, so that no data
+ * that managed its way to the FIFO is lost.
+ */
+
+ if (!READ_ONCE(chan->read_data_ok)) { /* FPGA has sent EOF */
+ /* Has data slipped into the FIFO since fifo_read()? */
+ smp_rmb();
+ if (READ_ONCE(fifo->fill))
+ continue;
+
+ rc = 0;
+ break;
+ }
+
+ if (xdev->error) {
+ rc = xdev->error;
+ break;
+ }
+
+ if (filp->f_flags & O_NONBLOCK) {
+ rc = -EAGAIN;
+ break;
+ }
+
+ if (!sent_set_push) {
+ rc = xillyusb_send_opcode(xdev, chan_num,
+ OPCODE_SET_PUSH,
+ complete_checkpoint);
+
+ if (rc)
+ break;
+
+ sent_set_push = true;
+ }
+
+ if (left_to_sleep > 0) {
+ /*
+ * Note that when xdev->error is set (e.g. when the
+ * device is unplugged), read_data_ok turns zero and
+ * fifo->waitq is awaken.
+ * Therefore no special attention to xdev->error.
+ */
+
+ rc = wait_event_interruptible_timeout
+ (fifo->waitq,
+ fifo->fill || !chan->read_data_ok,
+ left_to_sleep);
+ } else { /* bytes_done == 0 */
+ /* Tell FPGA to send anything it has */
+ rc = request_read_anything(chan, OPCODE_UPDATE_PUSH);
+
+ if (rc)
+ break;
+
+ rc = wait_event_interruptible
+ (fifo->waitq,
+ fifo->fill || !chan->read_data_ok);
+ }
+
+ if (rc < 0) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+ if (((filp->f_flags & O_NONBLOCK) || chan->poll_used) &&
+ !READ_ONCE(fifo->fill))
+ request_read_anything(chan, OPCODE_SET_PUSH);
+
+ mutex_unlock(&chan->in_mutex);
+
+ if (bytes_done)
+ return bytes_done;
+
+ return rc;
+}
+
+static int xillyusb_flush(struct file *filp, fl_owner_t id)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ int rc;
+
+ if (!(filp->f_mode & FMODE_WRITE))
+ return 0;
+
+ rc = mutex_lock_interruptible(&chan->out_mutex);
+
+ if (rc)
+ return rc;
+
+ /*
+ * One second's timeout on flushing. Interrupts are ignored, because if
+ * the user pressed CTRL-C, that interrupt will still be in flight by
+ * the time we reach here, and the opportunity to flush is lost.
+ */
+ rc = flush_downstream(chan, HZ, false);
+
+ mutex_unlock(&chan->out_mutex);
+
+ if (rc == -ETIMEDOUT) {
+ /* The things you do to use dev_warn() and not pr_warn() */
+ struct xillyusb_dev *xdev = chan->xdev;
+
+ mutex_lock(&chan->lock);
+ if (!xdev->error)
+ dev_warn(xdev->dev,
+ "Timed out while flushing. Output data may be lost.\n");
+ mutex_unlock(&chan->lock);
+ }
+
+ return rc;
+}
+
+static ssize_t xillyusb_write(struct file *filp, const char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ struct xillyusb_dev *xdev = chan->xdev;
+ struct xillyfifo *fifo = &chan->out_ep->fifo;
+ int rc;
+
+ rc = mutex_lock_interruptible(&chan->out_mutex);
+
+ if (rc)
+ return rc;
+
+ while (1) {
+ if (xdev->error) {
+ rc = xdev->error;
+ break;
+ }
+
+ if (count == 0)
+ break;
+
+ rc = fifo_write(fifo, (__force void *)userbuf, count,
+ xilly_copy_from_user);
+
+ if (rc != 0)
+ break;
+
+ if (filp->f_flags & O_NONBLOCK) {
+ rc = -EAGAIN;
+ break;
+ }
+
+ if (wait_event_interruptible
+ (fifo->waitq,
+ fifo->fill != fifo->size || xdev->error)) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+ if (rc < 0)
+ goto done;
+
+ chan->out_bytes += rc;
+
+ if (rc) {
+ try_queue_bulk_out(chan->out_ep);
+ chan->flushed = 0;
+ }
+
+ if (chan->out_synchronous) {
+ int flush_rc = flush_downstream(chan, 0, true);
+
+ if (flush_rc && !rc)
+ rc = flush_rc;
+ }
+
+done:
+ mutex_unlock(&chan->out_mutex);
+
+ return rc;
+}
+
+static int xillyusb_release(struct inode *inode, struct file *filp)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ struct xillyusb_dev *xdev = chan->xdev;
+ int rc_read = 0, rc_write = 0;
+
+ if (filp->f_mode & FMODE_READ) {
+ struct xillyfifo *in_fifo = chan->in_fifo;
+
+ rc_read = xillyusb_send_opcode(xdev, (chan->chan_idx << 1) | 1,
+ OPCODE_CLOSE, 0);
+ /*
+ * If rc_read is nonzero, xdev->error indicates a global
+ * device error. The error is reported later, so that
+ * resources are freed.
+ *
+ * Looping on wait_event_interruptible() kinda breaks the idea
+ * of being interruptible, and this should have been
+ * wait_event(). Only it's being waken with
+ * wake_up_interruptible() for the sake of other uses. If
+ * there's a global device error, chan->read_data_ok is
+ * deasserted and the wait queue is awaken, so this is covered.
+ */
+
+ while (wait_event_interruptible(in_fifo->waitq,
+ !chan->read_data_ok))
+ ; /* Empty loop */
+
+ safely_assign_in_fifo(chan, NULL);
+ fifo_mem_release(in_fifo);
+ kfree(in_fifo);
+
+ mutex_lock(&chan->lock);
+ chan->open_for_read = 0;
+ mutex_unlock(&chan->lock);
+ }
+
+ if (filp->f_mode & FMODE_WRITE) {
+ struct xillyusb_endpoint *ep = chan->out_ep;
+ /*
+ * chan->flushing isn't zeroed. If the pre-release flush timed
+ * out, a cancel request will be sent before the next
+ * OPCODE_SET_CHECKPOINT (i.e. when the file is opened again).
+ * This is despite that the FPGA forgets about the checkpoint
+ * request as the file closes. Still, in an exceptional race
+ * condition, the FPGA could send an OPCODE_REACHED_CHECKPOINT
+ * just before closing that would reach the host after the
+ * file has re-opened.
+ */
+
+ mutex_lock(&chan->lock);
+ chan->out_ep = NULL;
+ mutex_unlock(&chan->lock);
+
+ endpoint_quiesce(ep);
+ endpoint_dealloc(ep);
+
+ /* See comments on rc_read above */
+ rc_write = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
+ OPCODE_CLOSE, 0);
+
+ mutex_lock(&chan->lock);
+ chan->open_for_write = 0;
+ mutex_unlock(&chan->lock);
+ }
+
+ kref_put(&xdev->kref, cleanup_dev);
+
+ return rc_read ? rc_read : rc_write;
+}
+
+/*
+ * Xillybus' API allows device nodes to be seekable, giving the user
+ * application access to a RAM array on the FPGA (or logic emulating it).
+ */
+
+static loff_t xillyusb_llseek(struct file *filp, loff_t offset, int whence)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ struct xillyusb_dev *xdev = chan->xdev;
+ loff_t pos = filp->f_pos;
+ int rc = 0;
+ unsigned int log2_element_size = chan->readable ?
+ chan->in_log2_element_size : chan->out_log2_element_size;
+
+ /*
+ * Take both mutexes not allowing interrupts, since it seems like
+ * common applications don't expect an -EINTR here. Besides, multiple
+ * access to a single file descriptor on seekable devices is a mess
+ * anyhow.
+ */
+
+ mutex_lock(&chan->out_mutex);
+ mutex_lock(&chan->in_mutex);
+
+ switch (whence) {
+ case SEEK_SET:
+ pos = offset;
+ break;
+ case SEEK_CUR:
+ pos += offset;
+ break;
+ case SEEK_END:
+ pos = offset; /* Going to the end => to the beginning */
+ break;
+ default:
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* In any case, we must finish on an element boundary */
+ if (pos & ((1 << log2_element_size) - 1)) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = xillyusb_send_opcode(xdev, chan->chan_idx << 1,
+ OPCODE_SET_ADDR,
+ pos >> log2_element_size);
+
+ if (rc)
+ goto end;
+
+ if (chan->writable) {
+ chan->flushed = 0;
+ rc = flush_downstream(chan, HZ, false);
+ }
+
+end:
+ mutex_unlock(&chan->out_mutex);
+ mutex_unlock(&chan->in_mutex);
+
+ if (rc) /* Return error after releasing mutexes */
+ return rc;
+
+ filp->f_pos = pos;
+
+ return pos;
+}
+
+static __poll_t xillyusb_poll(struct file *filp, poll_table *wait)
+{
+ struct xillyusb_channel *chan = filp->private_data;
+ __poll_t mask = 0;
+
+ if (chan->in_fifo)
+ poll_wait(filp, &chan->in_fifo->waitq, wait);
+
+ if (chan->out_ep)
+ poll_wait(filp, &chan->out_ep->fifo.waitq, wait);
+
+ /*
+ * If this is the first time poll() is called, and the file is
+ * readable, set the relevant flag. Also tell the FPGA to send all it
+ * has, to kickstart the mechanism that ensures there's always some
+ * data in in_fifo unless the stream is dry end-to-end. Note that the
+ * first poll() may not return a EPOLLIN, even if there's data on the
+ * FPGA. Rather, the data will arrive soon, and trigger the relevant
+ * wait queue.
+ */
+
+ if (!chan->poll_used && chan->in_fifo) {
+ chan->poll_used = 1;
+ request_read_anything(chan, OPCODE_SET_PUSH);
+ }
+
+ /*
+ * poll() won't play ball regarding read() channels which
+ * are synchronous. Allowing that will create situations where data has
+ * been delivered at the FPGA, and users expecting select() to wake up,
+ * which it may not. So make it never work.
+ */
+
+ if (chan->in_fifo && !chan->in_synchronous &&
+ (READ_ONCE(chan->in_fifo->fill) || !chan->read_data_ok))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ if (chan->out_ep &&
+ (READ_ONCE(chan->out_ep->fifo.fill) != chan->out_ep->fifo.size))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+
+ if (chan->xdev->error)
+ mask |= EPOLLERR;
+
+ return mask;
+}
+
+static const struct file_operations xillyusb_fops = {
+ .owner = THIS_MODULE,
+ .read = xillyusb_read,
+ .write = xillyusb_write,
+ .open = xillyusb_open,
+ .flush = xillyusb_flush,
+ .release = xillyusb_release,
+ .llseek = xillyusb_llseek,
+ .poll = xillyusb_poll,
+};
+
+static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
+{
+ xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
+ bulk_out_work, 1, 2);
+ if (!xdev->msg_ep)
+ return -ENOMEM;
+
+ if (fifo_init(&xdev->msg_ep->fifo, 13)) /* 8 kiB */
+ goto dealloc;
+
+ xdev->msg_ep->fill_mask = -8; /* 8 bytes granularity */
+
+ xdev->in_ep = endpoint_alloc(xdev, IN_EP_NUM | USB_DIR_IN,
+ bulk_in_work, BUF_SIZE_ORDER, BUFNUM);
+ if (!xdev->in_ep)
+ goto dealloc;
+
+ try_queue_bulk_in(xdev->in_ep);
+
+ return 0;
+
+dealloc:
+ endpoint_dealloc(xdev->msg_ep); /* Also frees FIFO mem if allocated */
+ return -ENOMEM;
+}
+
+static int setup_channels(struct xillyusb_dev *xdev,
+ __le16 *chandesc,
+ int num_channels)
+{
+ struct xillyusb_channel *chan;
+ int i;
+
+ chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ xdev->channels = chan;
+
+ for (i = 0; i < num_channels; i++, chan++) {
+ unsigned int in_desc = le16_to_cpu(*chandesc++);
+ unsigned int out_desc = le16_to_cpu(*chandesc++);
+
+ chan->xdev = xdev;
+ mutex_init(&chan->in_mutex);
+ mutex_init(&chan->out_mutex);
+ mutex_init(&chan->lock);
+ init_waitqueue_head(&chan->flushq);
+
+ chan->chan_idx = i;
+
+ if (in_desc & 0x80) { /* Entry is valid */
+ chan->readable = 1;
+ chan->in_synchronous = !!(in_desc & 0x40);
+ chan->in_seekable = !!(in_desc & 0x20);
+ chan->in_log2_element_size = in_desc & 0x0f;
+ chan->in_log2_fifo_size = ((in_desc >> 8) & 0x1f) + 16;
+ }
+
+ /*
+ * A downstream channel should never exist above index 13,
+ * as it would request a nonexistent BULK endpoint > 15.
+ * In the peculiar case that it does, it's ignored silently.
+ */
+
+ if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
+ chan->writable = 1;
+ chan->out_synchronous = !!(out_desc & 0x40);
+ chan->out_seekable = !!(out_desc & 0x20);
+ chan->out_log2_element_size = out_desc & 0x0f;
+ chan->out_log2_fifo_size =
+ ((out_desc >> 8) & 0x1f) + 16;
+ }
+ }
+
+ return 0;
+}
+
+static int xillyusb_discovery(struct usb_interface *interface)
+{
+ int rc;
+ struct xillyusb_dev *xdev = usb_get_intfdata(interface);
+ __le16 bogus_chandesc[2];
+ struct xillyfifo idt_fifo;
+ struct xillyusb_channel *chan;
+ unsigned int idt_len, names_offset;
+ unsigned char *idt;
+ int num_channels;
+
+ rc = xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
+
+ if (rc) {
+ dev_err(&interface->dev, "Failed to send quiesce request. Aborting.\n");
+ return rc;
+ }
+
+ /* Phase I: Set up one fake upstream channel and obtain IDT */
+
+ /* Set up a fake IDT with one async IN stream */
+ bogus_chandesc[0] = cpu_to_le16(0x80);
+ bogus_chandesc[1] = cpu_to_le16(0);
+
+ rc = setup_channels(xdev, bogus_chandesc, 1);
+
+ if (rc)
+ return rc;
+
+ rc = fifo_init(&idt_fifo, LOG2_IDT_FIFO_SIZE);
+
+ if (rc)
+ return rc;
+
+ chan = xdev->channels;
+
+ chan->in_fifo = &idt_fifo;
+ chan->read_data_ok = 1;
+
+ xdev->num_channels = 1;
+
+ rc = xillyusb_send_opcode(xdev, ~0, OPCODE_REQ_IDT, 0);
+
+ if (rc) {
+ dev_err(&interface->dev, "Failed to send IDT request. Aborting.\n");
+ goto unfifo;
+ }
+
+ rc = wait_event_interruptible_timeout(idt_fifo.waitq,
+ !chan->read_data_ok,
+ XILLY_RESPONSE_TIMEOUT);
+
+ if (xdev->error) {
+ rc = xdev->error;
+ goto unfifo;
+ }
+
+ if (rc < 0) {
+ rc = -EINTR; /* Interrupt on probe method? Interesting. */
+ goto unfifo;
+ }
+
+ if (chan->read_data_ok) {
+ rc = -ETIMEDOUT;
+ dev_err(&interface->dev, "No response from FPGA. Aborting.\n");
+ goto unfifo;
+ }
+
+ idt_len = READ_ONCE(idt_fifo.fill);
+ idt = kmalloc(idt_len, GFP_KERNEL);
+
+ if (!idt) {
+ rc = -ENOMEM;
+ goto unfifo;
+ }
+
+ fifo_read(&idt_fifo, idt, idt_len, xilly_memcpy);
+
+ if (crc32_le(~0, idt, idt_len) != 0) {
+ dev_err(&interface->dev, "IDT failed CRC check. Aborting.\n");
+ rc = -ENODEV;
+ goto unidt;
+ }
+
+ if (*idt > 0x90) {
+ dev_err(&interface->dev, "No support for IDT version 0x%02x. Maybe the xillyusb driver needs an upgrade. Aborting.\n",
+ (int)*idt);
+ rc = -ENODEV;
+ goto unidt;
+ }
+
+ /* Phase II: Set up the streams as defined in IDT */
+
+ num_channels = le16_to_cpu(*((__le16 *)(idt + 1)));
+ names_offset = 3 + num_channels * 4;
+ idt_len -= 4; /* Exclude CRC */
+
+ if (idt_len < names_offset) {
+ dev_err(&interface->dev, "IDT too short. This is exceptionally weird, because its CRC is OK\n");
+ rc = -ENODEV;
+ goto unidt;
+ }
+
+ rc = setup_channels(xdev, (void *)idt + 3, num_channels);
+
+ if (rc)
+ goto unidt;
+
+ /*
+ * Except for wildly misbehaving hardware, or if it was disconnected
+ * just after responding with the IDT, there is no reason for any
+ * work item to be running now. To be sure that xdev->channels
+ * is updated on anything that might run in parallel, flush the
+ * workqueue, which rarely does anything.
+ */
+ flush_workqueue(xdev->workq);
+
+ xdev->num_channels = num_channels;
+
+ fifo_mem_release(&idt_fifo);
+ kfree(chan);
+
+ rc = xillybus_init_chrdev(&interface->dev, &xillyusb_fops,
+ THIS_MODULE, xdev,
+ idt + names_offset,
+ idt_len - names_offset,
+ num_channels,
+ xillyname, true);
+
+ kfree(idt);
+
+ return rc;
+
+unidt:
+ kfree(idt);
+
+unfifo:
+ safely_assign_in_fifo(chan, NULL);
+ fifo_mem_release(&idt_fifo);
+
+ return rc;
+}
+
+static int xillyusb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct xillyusb_dev *xdev;
+ int rc;
+
+ xdev = kzalloc(sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ kref_init(&xdev->kref);
+ mutex_init(&xdev->process_in_mutex);
+ mutex_init(&xdev->msg_mutex);
+
+ xdev->udev = usb_get_dev(interface_to_usbdev(interface));
+ xdev->dev = &interface->dev;
+ xdev->error = 0;
+ spin_lock_init(&xdev->error_lock);
+ xdev->in_counter = 0;
+ xdev->in_bytes_left = 0;
+ xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI, 0);
+
+ if (!xdev->workq) {
+ dev_err(&interface->dev, "Failed to allocate work queue\n");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_WORK(&xdev->wakeup_workitem, wakeup_all);
+
+ usb_set_intfdata(interface, xdev);
+
+ rc = xillyusb_setup_base_eps(xdev);
+ if (rc)
+ goto fail;
+
+ rc = xillyusb_discovery(interface);
+ if (rc)
+ goto latefail;
+
+ return 0;
+
+latefail:
+ endpoint_quiesce(xdev->in_ep);
+ endpoint_quiesce(xdev->msg_ep);
+
+fail:
+ usb_set_intfdata(interface, NULL);
+ kref_put(&xdev->kref, cleanup_dev);
+ return rc;
+}
+
+static void xillyusb_disconnect(struct usb_interface *interface)
+{
+ struct xillyusb_dev *xdev = usb_get_intfdata(interface);
+ struct xillyusb_endpoint *msg_ep = xdev->msg_ep;
+ struct xillyfifo *fifo = &msg_ep->fifo;
+ int rc;
+ int i;
+
+ xillybus_cleanup_chrdev(xdev, &interface->dev);
+
+ /*
+ * Try to send OPCODE_QUIESCE, which will fail silently if the device
+ * was disconnected, but makes sense on module unload.
+ */
+
+ msg_ep->wake_on_drain = true;
+ xillyusb_send_opcode(xdev, ~0, OPCODE_QUIESCE, 0);
+
+ /*
+ * If the device has been disconnected, sending the opcode causes
+ * a global device error with xdev->error, if such error didn't
+ * occur earlier. Hence timing out means that the USB link is fine,
+ * but somehow the message wasn't sent. Should never happen.
+ */
+
+ rc = wait_event_interruptible_timeout(fifo->waitq,
+ msg_ep->drained || xdev->error,
+ XILLY_RESPONSE_TIMEOUT);
+
+ if (!rc)
+ dev_err(&interface->dev,
+ "Weird timeout condition on sending quiesce request.\n");
+
+ report_io_error(xdev, -ENODEV); /* Discourage further activity */
+
+ /*
+ * This device driver is declared with soft_unbind set, or else
+ * sending OPCODE_QUIESCE above would always fail. The price is
+ * that the USB framework didn't kill outstanding URBs, so it has
+ * to be done explicitly before returning from this call.
+ */
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ struct xillyusb_channel *chan = &xdev->channels[i];
+
+ /*
+ * Lock taken to prevent chan->out_ep from changing. It also
+ * ensures xillyusb_open() and xillyusb_flush() don't access
+ * xdev->dev after being nullified below.
+ */
+ mutex_lock(&chan->lock);
+ if (chan->out_ep)
+ endpoint_quiesce(chan->out_ep);
+ mutex_unlock(&chan->lock);
+ }
+
+ endpoint_quiesce(xdev->in_ep);
+ endpoint_quiesce(xdev->msg_ep);
+
+ usb_set_intfdata(interface, NULL);
+
+ xdev->dev = NULL;
+
+ kref_put(&xdev->kref, cleanup_dev);
+}
+
+static struct usb_driver xillyusb_driver = {
+ .name = xillyname,
+ .id_table = xillyusb_table,
+ .probe = xillyusb_probe,
+ .disconnect = xillyusb_disconnect,
+ .soft_unbind = 1,
+};
+
+static int __init xillyusb_init(void)
+{
+ int rc = 0;
+
+ if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
+ fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
+ else
+ fifo_buf_order = 0;
+
+ rc = usb_register(&xillyusb_driver);
+
+ return rc;
+}
+
+static void __exit xillyusb_exit(void)
+{
+ usb_deregister(&xillyusb_driver);
+}
+
+module_init(xillyusb_init);
+module_exit(xillyusb_exit);
diff --git a/drivers/clk/analogbits/wrpll-cln28hpc.c b/drivers/clk/analogbits/wrpll-cln28hpc.c
index 3b1947575dcc..09ca82356399 100644
--- a/drivers/clk/analogbits/wrpll-cln28hpc.c
+++ b/drivers/clk/analogbits/wrpll-cln28hpc.c
@@ -23,8 +23,12 @@
#include <linux/bug.h>
#include <linux/err.h>
+#include <linux/limits.h>
#include <linux/log2.h>
#include <linux/math64.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+
#include <linux/clk/analogbits-wrpll-cln28hpc.h>
/* MIN_INPUT_FREQ: minimum input clock frequency, in Hz (Fref_min) */
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index be160764911b..f9d5b7334341 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -92,13 +92,20 @@ int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
}
EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional);
+static void devm_clk_bulk_release_all(struct device *dev, void *res)
+{
+ struct clk_bulk_devres *devres = res;
+
+ clk_bulk_put_all(devres->num_clks, devres->clks);
+}
+
int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{
struct clk_bulk_devres *devres;
int ret;
- devres = devres_alloc(devm_clk_bulk_release,
+ devres = devres_alloc(devm_clk_bulk_release_all,
sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 344997203f0e..87ba4966b0e8 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -343,16 +343,63 @@ static int clk_divider_bestdiv(struct clk_hw *hw, struct clk_hw *parent,
return bestdiv;
}
+int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags)
+{
+ int div;
+
+ div = clk_divider_bestdiv(hw, req->best_parent_hw, req->rate,
+ &req->best_parent_rate, table, width, flags);
+
+ req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, div);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(divider_determine_rate);
+
+int divider_ro_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags, unsigned int val)
+{
+ int div;
+
+ div = _get_div(table, val, flags, width);
+
+ /* Even a read-only clock can propagate a rate change */
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
+ if (!req->best_parent_hw)
+ return -EINVAL;
+
+ req->best_parent_rate = clk_hw_round_rate(req->best_parent_hw,
+ req->rate * div);
+ }
+
+ req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, div);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(divider_ro_determine_rate);
+
long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
unsigned long rate, unsigned long *prate,
const struct clk_div_table *table,
u8 width, unsigned long flags)
{
- int div;
+ struct clk_rate_request req = {
+ .rate = rate,
+ .best_parent_rate = *prate,
+ .best_parent_hw = parent,
+ };
+ int ret;
- div = clk_divider_bestdiv(hw, parent, rate, prate, table, width, flags);
+ ret = divider_determine_rate(hw, &req, table, width, flags);
+ if (ret)
+ return ret;
- return DIV_ROUND_UP_ULL((u64)*prate, div);
+ *prate = req.best_parent_rate;
+
+ return req.rate;
}
EXPORT_SYMBOL_GPL(divider_round_rate_parent);
@@ -361,23 +408,23 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
const struct clk_div_table *table, u8 width,
unsigned long flags, unsigned int val)
{
- int div;
-
- div = _get_div(table, val, flags, width);
+ struct clk_rate_request req = {
+ .rate = rate,
+ .best_parent_rate = *prate,
+ .best_parent_hw = parent,
+ };
+ int ret;
- /* Even a read-only clock can propagate a rate change */
- if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
- if (!parent)
- return -EINVAL;
+ ret = divider_ro_determine_rate(hw, &req, table, width, flags, val);
+ if (ret)
+ return ret;
- *prate = clk_hw_round_rate(parent, rate * div);
- }
+ *prate = req.best_parent_rate;
- return DIV_ROUND_UP_ULL((u64)*prate, div);
+ return req.rate;
}
EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent);
-
static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
diff --git a/drivers/clk/clk-k210.c b/drivers/clk/clk-k210.c
index 6c84abf5b2e3..67a7cb3503c3 100644
--- a/drivers/clk/clk-k210.c
+++ b/drivers/clk/clk-k210.c
@@ -722,6 +722,7 @@ static int k210_clk_set_parent(struct clk_hw *hw, u8 index)
reg |= BIT(cfg->mux_bit);
else
reg &= ~BIT(cfg->mux_bit);
+ writel(reg, ksc->regs + cfg->mux_reg);
spin_unlock_irqrestore(&ksc->clk_lock, flags);
return 0;
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index c1095e733220..c7a3a029fb1e 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -519,7 +519,7 @@ static long lmk04832_vco_round_rate(struct clk_hw *hw, unsigned long rate,
vco_rate = lmk04832_calc_pll2_params(*prate, rate, &n, &p, &r);
if (vco_rate < 0) {
- dev_err(lmk->dev, "PLL2 parmeters out of range\n");
+ dev_err(lmk->dev, "PLL2 parameters out of range\n");
return vco_rate;
}
@@ -550,7 +550,7 @@ static int lmk04832_vco_set_rate(struct clk_hw *hw, unsigned long rate,
vco_rate = lmk04832_calc_pll2_params(prate, rate, &n, &p, &r);
if (vco_rate < 0) {
- dev_err(lmk->dev, "failed to determine PLL2 parmeters\n");
+ dev_err(lmk->dev, "failed to determine PLL2 parameters\n");
return vco_rate;
}
@@ -573,7 +573,7 @@ static int lmk04832_vco_set_rate(struct clk_hw *hw, unsigned long rate,
/*
* PLL2_N registers must be programmed after other PLL2 dividers are
- * programed to ensure proper VCO frequency calibration
+ * programmed to ensure proper VCO frequency calibration
*/
ret = regmap_write(lmk->regmap, LMK04832_REG_PLL2_N_0,
FIELD_GET(0x030000, n));
@@ -1120,7 +1120,7 @@ static int lmk04832_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
- /* Enable Duty Cycle Corretion */
+ /* Enable Duty Cycle Correction */
if (dclk_div == 1) {
ret = regmap_update_bits(lmk->regmap,
LMK04832_REG_CLKOUT_CTRL3(dclk->id),
@@ -1425,23 +1425,23 @@ static int lmk04832_probe(struct spi_device *spi)
lmk->dclk = devm_kcalloc(lmk->dev, info->num_channels >> 1,
sizeof(struct lmk_dclk), GFP_KERNEL);
- if (IS_ERR(lmk->dclk)) {
- ret = PTR_ERR(lmk->dclk);
+ if (!lmk->dclk) {
+ ret = -ENOMEM;
goto err_disable_oscin;
}
lmk->clkout = devm_kcalloc(lmk->dev, info->num_channels,
sizeof(*lmk->clkout), GFP_KERNEL);
- if (IS_ERR(lmk->clkout)) {
- ret = PTR_ERR(lmk->clkout);
+ if (!lmk->clkout) {
+ ret = -ENOMEM;
goto err_disable_oscin;
}
lmk->clk_data = devm_kzalloc(lmk->dev, struct_size(lmk->clk_data, hws,
info->num_channels),
GFP_KERNEL);
- if (IS_ERR(lmk->clk_data)) {
- ret = PTR_ERR(lmk->clk_data);
+ if (!lmk->clk_data) {
+ ret = -ENOMEM;
goto err_disable_oscin;
}
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 18117ce5ff85..5c75e3d906c2 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -526,7 +526,7 @@ struct stm32f4_pll {
struct stm32f4_pll_post_div_data {
int idx;
- u8 pll_num;
+ int pll_idx;
const char *name;
const char *parent;
u8 flag;
@@ -557,13 +557,13 @@ static const struct clk_div_table post_divr_table[] = {
#define MAX_POST_DIV 3
static const struct stm32f4_pll_post_div_data post_div_data[MAX_POST_DIV] = {
- { CLK_I2SQ_PDIV, PLL_I2S, "plli2s-q-div", "plli2s-q",
+ { CLK_I2SQ_PDIV, PLL_VCO_I2S, "plli2s-q-div", "plli2s-q",
CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 0, 5, 0, NULL},
- { CLK_SAIQ_PDIV, PLL_SAI, "pllsai-q-div", "pllsai-q",
+ { CLK_SAIQ_PDIV, PLL_VCO_SAI, "pllsai-q-div", "pllsai-q",
CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 8, 5, 0, NULL },
- { NO_IDX, PLL_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT,
+ { NO_IDX, PLL_VCO_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT,
STM32F4_RCC_DCKCFGR, 16, 2, 0, post_divr_table },
};
@@ -1774,7 +1774,7 @@ static void __init stm32f4_rcc_init(struct device_node *np)
post_div->width,
post_div->flag_div,
post_div->div_table,
- clks[post_div->pll_num],
+ clks[post_div->pll_idx],
&stm32f4_clk_lock);
if (post_div->idx != NO_IDX)
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index 6adc625e79cb..256575bd29b9 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -2263,6 +2263,7 @@ static int stm32_rcc_reset_init(struct device *dev, void __iomem *base,
if (!reset_data)
return -ENOMEM;
+ spin_lock_init(&reset_data->lock);
reset_data->membase = base;
reset_data->rcdev.owner = THIS_MODULE;
reset_data->rcdev.ops = &stm32_reset_ops;
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
index 5ecc37aaa118..c1ec75aa4ccd 100644
--- a/drivers/clk/hisilicon/Kconfig
+++ b/drivers/clk/hisilicon/Kconfig
@@ -18,6 +18,7 @@ config COMMON_CLK_HI3519
config COMMON_CLK_HI3559A
bool "Hi3559A Clock Driver"
depends on ARCH_HISI || COMPILE_TEST
+ select RESET_HISI
default ARCH_HISI
help
Build the clock driver for hi3559a.
diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c
index b1f19c43b558..56012a3d0219 100644
--- a/drivers/clk/hisilicon/clk-hi3559a.c
+++ b/drivers/clk/hisilicon/clk-hi3559a.c
@@ -107,25 +107,25 @@ static const struct hisi_fixed_rate_clock hi3559av100_fixed_rate_clks_crg[] = {
};
-static const char *fmc_mux_p[] __initconst = {
+static const char *fmc_mux_p[] = {
"24m", "75m", "125m", "150m", "200m", "250m", "300m", "400m"
};
-static const char *mmc_mux_p[] __initconst = {
+static const char *mmc_mux_p[] = {
"100k", "25m", "49p5m", "99m", "187p5m", "150m", "198m", "400k"
};
-static const char *sysapb_mux_p[] __initconst = {
+static const char *sysapb_mux_p[] = {
"24m", "50m",
};
-static const char *sysbus_mux_p[] __initconst = {
+static const char *sysbus_mux_p[] = {
"24m", "300m"
};
-static const char *uart_mux_p[] __initconst = { "50m", "24m", "3m" };
+static const char *uart_mux_p[] = { "50m", "24m", "3m" };
-static const char *a73_clksel_mux_p[] __initconst = {
+static const char *a73_clksel_mux_p[] = {
"24m", "apll", "1000m"
};
@@ -136,7 +136,7 @@ static const u32 sysbus_mux_table[] = { 0, 1 };
static const u32 uart_mux_table[] = { 0, 1, 2 };
static const u32 a73_clksel_mux_table[] = { 0, 1, 2 };
-static struct hisi_mux_clock hi3559av100_mux_clks_crg[] __initdata = {
+static struct hisi_mux_clock hi3559av100_mux_clks_crg[] = {
{
HI3559AV100_FMC_MUX, "fmc_mux", fmc_mux_p, ARRAY_SIZE(fmc_mux_p),
CLK_SET_RATE_PARENT, 0x170, 2, 3, 0, fmc_mux_table,
@@ -181,7 +181,7 @@ static struct hisi_mux_clock hi3559av100_mux_clks_crg[] __initdata = {
},
};
-static struct hisi_gate_clock hi3559av100_gate_clks[] __initdata = {
+static struct hisi_gate_clock hi3559av100_gate_clks[] = {
{
HI3559AV100_FMC_CLK, "clk_fmc", "fmc_mux",
CLK_SET_RATE_PARENT, 0x170, 1, 0,
@@ -336,7 +336,7 @@ static struct hisi_gate_clock hi3559av100_gate_clks[] __initdata = {
},
};
-static struct hi3559av100_pll_clock hi3559av100_pll_clks[] __initdata = {
+static struct hi3559av100_pll_clock hi3559av100_pll_clks[] = {
{
HI3559AV100_APLL_CLK, "apll", NULL, 0x0, 0, 24, 24, 3, 28, 3,
0x4, 0, 12, 12, 6
@@ -502,7 +502,7 @@ static void hisi_clk_register_pll(struct hi3559av100_pll_clock *clks,
}
}
-static __init struct hisi_clock_data *hi3559av100_clk_register(
+static struct hisi_clock_data *hi3559av100_clk_register(
struct platform_device *pdev)
{
struct hisi_clock_data *clk_data;
@@ -549,7 +549,7 @@ unregister_fixed_rate:
return ERR_PTR(ret);
}
-static __init void hi3559av100_clk_unregister(struct platform_device *pdev)
+static void hi3559av100_clk_unregister(struct platform_device *pdev)
{
struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
@@ -568,8 +568,7 @@ static const struct hisi_crg_funcs hi3559av100_crg_funcs = {
.unregister_clks = hi3559av100_clk_unregister,
};
-static struct hisi_fixed_rate_clock hi3559av100_shub_fixed_rate_clks[]
- __initdata = {
+static struct hisi_fixed_rate_clock hi3559av100_shub_fixed_rate_clks[] = {
{ HI3559AV100_SHUB_SOURCE_SOC_24M, "clk_source_24M", NULL, 0, 24000000UL, },
{ HI3559AV100_SHUB_SOURCE_SOC_200M, "clk_source_200M", NULL, 0, 200000000UL, },
{ HI3559AV100_SHUB_SOURCE_SOC_300M, "clk_source_300M", NULL, 0, 300000000UL, },
@@ -587,16 +586,16 @@ static struct hisi_fixed_rate_clock hi3559av100_shub_fixed_rate_clks[]
/* shub mux clk */
static u32 shub_source_clk_mux_table[] = {0, 1, 2, 3};
-static const char *shub_source_clk_mux_p[] __initconst = {
+static const char *shub_source_clk_mux_p[] = {
"clk_source_24M", "clk_source_200M", "clk_source_300M", "clk_source_PLL"
};
static u32 shub_uart_source_clk_mux_table[] = {0, 1, 2, 3};
-static const char *shub_uart_source_clk_mux_p[] __initconst = {
+static const char *shub_uart_source_clk_mux_p[] = {
"clk_uart_32K", "clk_uart_div_clk", "clk_uart_div_clk", "clk_source_24M"
};
-static struct hisi_mux_clock hi3559av100_shub_mux_clks[] __initdata = {
+static struct hisi_mux_clock hi3559av100_shub_mux_clks[] = {
{
HI3559AV100_SHUB_SOURCE_CLK, "shub_clk", shub_source_clk_mux_p,
ARRAY_SIZE(shub_source_clk_mux_p),
@@ -615,7 +614,7 @@ static struct hisi_mux_clock hi3559av100_shub_mux_clks[] __initdata = {
static struct clk_div_table shub_spi_clk_table[] = {{0, 8}, {1, 4}, {2, 2}};
static struct clk_div_table shub_uart_div_clk_table[] = {{1, 8}, {2, 4}};
-static struct hisi_divider_clock hi3559av100_shub_div_clks[] __initdata = {
+static struct hisi_divider_clock hi3559av100_shub_div_clks[] = {
{ HI3559AV100_SHUB_SPI_SOURCE_CLK, "clk_spi_clk", "shub_clk", 0, 0x20, 24, 2,
CLK_DIVIDER_ALLOW_ZERO, shub_spi_clk_table,
},
@@ -625,7 +624,7 @@ static struct hisi_divider_clock hi3559av100_shub_div_clks[] __initdata = {
};
/* shub gate clk */
-static struct hisi_gate_clock hi3559av100_shub_gate_clks[] __initdata = {
+static struct hisi_gate_clock hi3559av100_shub_gate_clks[] = {
{
HI3559AV100_SHUB_SPI0_CLK, "clk_shub_spi0", "clk_spi_clk",
0, 0x20, 1, 0,
@@ -697,7 +696,7 @@ static int hi3559av100_shub_default_clk_set(void)
return 0;
}
-static __init struct hisi_clock_data *hi3559av100_shub_clk_register(
+static struct hisi_clock_data *hi3559av100_shub_clk_register(
struct platform_device *pdev)
{
struct hisi_clock_data *clk_data = NULL;
@@ -751,7 +750,7 @@ unregister_fixed_rate:
return ERR_PTR(ret);
}
-static __init void hi3559av100_shub_clk_unregister(struct platform_device *pdev)
+static void hi3559av100_shub_clk_unregister(struct platform_device *pdev)
{
struct hisi_crg_dev *crg = platform_get_drvdata(pdev);
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index dcd1757cc5df..8ad8977cf1c2 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -75,8 +75,8 @@ static unsigned long clk_regmap_div_recalc_rate(struct clk_hw *hw,
div->width);
}
-static long clk_regmap_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_regmap_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct clk_regmap_div_data *div = clk_get_regmap_div_data(clk);
@@ -87,18 +87,17 @@ static long clk_regmap_div_round_rate(struct clk_hw *hw, unsigned long rate,
if (div->flags & CLK_DIVIDER_READ_ONLY) {
ret = regmap_read(clk->map, div->offset, &val);
if (ret)
- /* Gives a hint that something is wrong */
- return 0;
+ return ret;
val >>= div->shift;
val &= clk_div_mask(div->width);
- return divider_ro_round_rate(hw, rate, prate, div->table,
- div->width, div->flags, val);
+ return divider_ro_determine_rate(hw, req, div->table,
+ div->width, div->flags, val);
}
- return divider_round_rate(hw, rate, prate, div->table, div->width,
- div->flags);
+ return divider_determine_rate(hw, req, div->table, div->width,
+ div->flags);
}
static int clk_regmap_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -123,14 +122,14 @@ static int clk_regmap_div_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_regmap_divider_ops = {
.recalc_rate = clk_regmap_div_recalc_rate,
- .round_rate = clk_regmap_div_round_rate,
+ .determine_rate = clk_regmap_div_determine_rate,
.set_rate = clk_regmap_div_set_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_divider_ops);
const struct clk_ops clk_regmap_divider_ro_ops = {
.recalc_rate = clk_regmap_div_recalc_rate,
- .round_rate = clk_regmap_div_round_rate,
+ .determine_rate = clk_regmap_div_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_divider_ro_ops);
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 800b2fef1887..b2c142f3a649 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -467,7 +467,7 @@ DEFINE_CLK_SMD_RPM(msm8936, sysmmnoc_clk, sysmmnoc_a_clk, QCOM_SMD_RPM_BUS_CLK,
static struct clk_smd_rpm *msm8936_clks[] = {
[RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
- [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
[RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
[RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
[RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index 50b5269586a4..ae24e0397d3c 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -30,8 +30,9 @@ enum clk_ids {
CLK_PLL2_DIV20,
CLK_PLL3,
CLK_PLL3_DIV2,
+ CLK_PLL3_DIV2_4,
+ CLK_PLL3_DIV2_4_2,
CLK_PLL3_DIV4,
- CLK_PLL3_DIV8,
CLK_PLL4,
CLK_PLL5,
CLK_PLL5_DIV2,
@@ -42,12 +43,13 @@ enum clk_ids {
};
/* Divider tables */
-static const struct clk_div_table dtable_3b[] = {
+static const struct clk_div_table dtable_1_32[] = {
{0, 1},
{1, 2},
{2, 4},
{3, 8},
{4, 32},
+ {0, 0},
};
static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
@@ -66,47 +68,56 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_FIXED(".pll2_div20", CLK_PLL2_DIV20, CLK_PLL2, 1, 20),
DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2),
+ DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4),
+ DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2),
DEF_FIXED(".pll3_div4", CLK_PLL3_DIV4, CLK_PLL3, 1, 4),
- DEF_FIXED(".pll3_div8", CLK_PLL3_DIV8, CLK_PLL3, 1, 8),
/* Core output clk */
DEF_FIXED("I", R9A07G044_CLK_I, CLK_PLL1, 1, 1),
DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV16, DIVPL2A,
- dtable_3b, CLK_DIVIDER_HIWORD_MASK),
+ dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV20, 1, 1),
- DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV8,
- DIVPL3B, dtable_3b, CLK_DIVIDER_HIWORD_MASK),
+ DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4,
+ DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+ DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2,
+ DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
};
static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
- DEF_MOD("gic", R9A07G044_CLK_GIC600,
- R9A07G044_CLK_P1,
- 0x514, BIT(0), (BIT(0) | BIT(1))),
- DEF_MOD("ia55", R9A07G044_CLK_IA55,
- R9A07G044_CLK_P1,
- 0x518, (BIT(0) | BIT(1)), BIT(0)),
- DEF_MOD("scif0", R9A07G044_CLK_SCIF0,
- R9A07G044_CLK_P0,
- 0x584, BIT(0), BIT(0)),
- DEF_MOD("scif1", R9A07G044_CLK_SCIF1,
- R9A07G044_CLK_P0,
- 0x584, BIT(1), BIT(1)),
- DEF_MOD("scif2", R9A07G044_CLK_SCIF2,
- R9A07G044_CLK_P0,
- 0x584, BIT(2), BIT(2)),
- DEF_MOD("scif3", R9A07G044_CLK_SCIF3,
- R9A07G044_CLK_P0,
- 0x584, BIT(3), BIT(3)),
- DEF_MOD("scif4", R9A07G044_CLK_SCIF4,
- R9A07G044_CLK_P0,
- 0x584, BIT(4), BIT(4)),
- DEF_MOD("sci0", R9A07G044_CLK_SCI0,
- R9A07G044_CLK_P0,
- 0x588, BIT(0), BIT(0)),
+ DEF_MOD("gic", R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1,
+ 0x514, 0),
+ DEF_MOD("ia55_pclk", R9A07G044_IA55_PCLK, R9A07G044_CLK_P2,
+ 0x518, 0),
+ DEF_MOD("ia55_clk", R9A07G044_IA55_CLK, R9A07G044_CLK_P1,
+ 0x518, 1),
+ DEF_MOD("scif0", R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0,
+ 0x584, 0),
+ DEF_MOD("scif1", R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0,
+ 0x584, 1),
+ DEF_MOD("scif2", R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0,
+ 0x584, 2),
+ DEF_MOD("scif3", R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0,
+ 0x584, 3),
+ DEF_MOD("scif4", R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0,
+ 0x584, 4),
+ DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
+ 0x588, 0),
+};
+
+static struct rzg2l_reset r9a07g044_resets[] = {
+ DEF_RST(R9A07G044_GIC600_GICRESET_N, 0x814, 0),
+ DEF_RST(R9A07G044_GIC600_DBG_GICRESET_N, 0x814, 1),
+ DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0),
+ DEF_RST(R9A07G044_SCIF0_RST_SYSTEM_N, 0x884, 0),
+ DEF_RST(R9A07G044_SCIF1_RST_SYSTEM_N, 0x884, 1),
+ DEF_RST(R9A07G044_SCIF2_RST_SYSTEM_N, 0x884, 2),
+ DEF_RST(R9A07G044_SCIF3_RST_SYSTEM_N, 0x884, 3),
+ DEF_RST(R9A07G044_SCIF4_RST_SYSTEM_N, 0x884, 4),
+ DEF_RST(R9A07G044_SCI0_RST, 0x888, 0),
};
static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
- MOD_CLK_BASE + R9A07G044_CLK_GIC600,
+ MOD_CLK_BASE + R9A07G044_GIC600_GICCLK,
};
const struct rzg2l_cpg_info r9a07g044_cpg_info = {
@@ -123,5 +134,9 @@ const struct rzg2l_cpg_info r9a07g044_cpg_info = {
/* Module Clocks */
.mod_clks = r9a07g044_mod_clks,
.num_mod_clks = ARRAY_SIZE(r9a07g044_mod_clks),
- .num_hw_mod_clks = R9A07G044_CLK_MIPI_DSI_PIN + 1,
+ .num_hw_mod_clks = R9A07G044_TSU_PCLK + 1,
+
+ /* Resets */
+ .resets = r9a07g044_resets,
+ .num_resets = ARRAY_SIZE(r9a07g044_resets),
};
diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.c b/drivers/clk/renesas/renesas-rzg2l-cpg.c
index 5009b9e48b13..e7c59af2a1d8 100644
--- a/drivers/clk/renesas/renesas-rzg2l-cpg.c
+++ b/drivers/clk/renesas/renesas-rzg2l-cpg.c
@@ -47,9 +47,9 @@
#define SDIV(val) DIV_RSMASK(val, 0, 0x7)
#define CLK_ON_R(reg) (reg)
-#define CLK_MON_R(reg) (0x680 - 0x500 + (reg))
-#define CLK_RST_R(reg) (0x800 - 0x500 + (reg))
-#define CLK_MRST_R(reg) (0x980 - 0x500 + (reg))
+#define CLK_MON_R(reg) (0x180 + (reg))
+#define CLK_RST_R(reg) (reg)
+#define CLK_MRST_R(reg) (0x180 + (reg))
#define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
#define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
@@ -78,6 +78,7 @@ struct rzg2l_cpg_priv {
struct clk **clks;
unsigned int num_core_clks;
unsigned int num_mod_clks;
+ unsigned int num_resets;
unsigned int last_dt_core_clk;
struct raw_notifier_head notifiers;
@@ -315,15 +316,13 @@ fail:
*
* @hw: handle between common and hardware-specific interfaces
* @off: register offset
- * @onoff: ON/MON bits
- * @reset: reset bits
+ * @bit: ON/MON bit
* @priv: CPG/MSTP private data
*/
struct mstp_clock {
struct clk_hw hw;
u16 off;
- u8 onoff;
- u8 reset;
+ u8 bit;
struct rzg2l_cpg_priv *priv;
};
@@ -337,6 +336,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
struct device *dev = priv->dev;
unsigned long flags;
unsigned int i;
+ u32 bitmask = BIT(clock->bit);
u32 value;
if (!clock->off) {
@@ -349,9 +349,9 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
spin_lock_irqsave(&priv->rmw_lock, flags);
if (enable)
- value = (clock->onoff << 16) | clock->onoff;
+ value = (bitmask << 16) | bitmask;
else
- value = clock->onoff << 16;
+ value = bitmask << 16;
writel(value, priv->base + CLK_ON_R(reg));
spin_unlock_irqrestore(&priv->rmw_lock, flags);
@@ -360,7 +360,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
return 0;
for (i = 1000; i > 0; --i) {
- if (((readl(priv->base + CLK_MON_R(reg))) & clock->onoff))
+ if (((readl(priv->base + CLK_MON_R(reg))) & bitmask))
break;
cpu_relax();
}
@@ -388,6 +388,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
{
struct mstp_clock *clock = to_mod_clock(hw);
struct rzg2l_cpg_priv *priv = clock->priv;
+ u32 bitmask = BIT(clock->bit);
u32 value;
if (!clock->off) {
@@ -397,7 +398,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
value = readl(priv->base + CLK_MON_R(clock->off));
- return !(value & clock->onoff);
+ return !(value & bitmask);
}
static const struct clk_ops rzg2l_mod_clock_ops = {
@@ -457,8 +458,7 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
init.num_parents = 1;
clock->off = mod->off;
- clock->onoff = mod->onoff;
- clock->reset = mod->reset;
+ clock->bit = mod->bit;
clock->priv = priv;
clock->hw.init = &init;
@@ -483,12 +483,11 @@ static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->mod_clks[id].off;
- u32 dis = info->mod_clks[id].reset;
+ unsigned int reg = info->resets[id].off;
+ u32 dis = BIT(info->resets[id].bit);
u32 we = dis << 16;
- dev_dbg(rcdev->dev, "reset name:%s id:%ld offset:0x%x\n",
- info->mod_clks[id].name, id, CLK_RST_R(reg));
+ dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
/* Reset module */
writel(we, priv->base + CLK_RST_R(reg));
@@ -507,11 +506,10 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->mod_clks[id].off;
- u32 value = info->mod_clks[id].reset << 16;
+ unsigned int reg = info->resets[id].off;
+ u32 value = BIT(info->resets[id].bit) << 16;
- dev_dbg(rcdev->dev, "assert name:%s id:%ld offset:0x%x\n",
- info->mod_clks[id].name, id, CLK_RST_R(reg));
+ dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
writel(value, priv->base + CLK_RST_R(reg));
return 0;
@@ -522,12 +520,12 @@ static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->mod_clks[id].off;
- u32 dis = info->mod_clks[id].reset;
+ unsigned int reg = info->resets[id].off;
+ u32 dis = BIT(info->resets[id].bit);
u32 value = (dis << 16) | dis;
- dev_dbg(rcdev->dev, "deassert name:%s id:%ld offset:0x%x\n",
- info->mod_clks[id].name, id, CLK_RST_R(reg));
+ dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
+ CLK_RST_R(reg));
writel(value, priv->base + CLK_RST_R(reg));
return 0;
@@ -538,8 +536,8 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->mod_clks[id].off;
- u32 bitmask = info->mod_clks[id].reset;
+ unsigned int reg = info->resets[id].off;
+ u32 bitmask = BIT(info->resets[id].bit);
return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
}
@@ -554,9 +552,11 @@ static const struct reset_control_ops rzg2l_cpg_reset_ops = {
static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
+ struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
+ const struct rzg2l_cpg_info *info = priv->info;
unsigned int id = reset_spec->args[0];
- if (id >= rcdev->nr_resets) {
+ if (id >= rcdev->nr_resets || !info->resets[id].off) {
dev_err(rcdev->dev, "Invalid reset index %u\n", id);
return -EINVAL;
}
@@ -571,7 +571,7 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
priv->rcdev.dev = priv->dev;
priv->rcdev.of_reset_n_cells = 1;
priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
- priv->rcdev.nr_resets = priv->num_mod_clks;
+ priv->rcdev.nr_resets = priv->num_resets;
return devm_reset_controller_register(priv->dev, &priv->rcdev);
}
@@ -594,42 +594,49 @@ static int rzg2l_cpg_attach_dev(struct generic_pm_domain *unused, struct device
{
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
+ bool once = true;
struct clk *clk;
int error;
int i = 0;
while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
&clkspec)) {
- if (rzg2l_cpg_is_pm_clk(&clkspec))
- goto found;
-
- of_node_put(clkspec.np);
+ if (rzg2l_cpg_is_pm_clk(&clkspec)) {
+ if (once) {
+ once = false;
+ error = pm_clk_create(dev);
+ if (error) {
+ of_node_put(clkspec.np);
+ goto err;
+ }
+ }
+ clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
+ if (IS_ERR(clk)) {
+ error = PTR_ERR(clk);
+ goto fail_destroy;
+ }
+
+ error = pm_clk_add_clk(dev, clk);
+ if (error) {
+ dev_err(dev, "pm_clk_add_clk failed %d\n",
+ error);
+ goto fail_put;
+ }
+ } else {
+ of_node_put(clkspec.np);
+ }
i++;
}
return 0;
-found:
- clk = of_clk_get_from_provider(&clkspec);
- of_node_put(clkspec.np);
-
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- error = pm_clk_create(dev);
- if (error)
- goto fail_put;
-
- error = pm_clk_add_clk(dev, clk);
- if (error)
- goto fail_destroy;
-
- return 0;
+fail_put:
+ clk_put(clk);
fail_destroy:
pm_clk_destroy(dev);
-fail_put:
- clk_put(clk);
+err:
return error;
}
@@ -692,6 +699,7 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
priv->clks = clks;
priv->num_core_clks = info->num_total_core_clks;
priv->num_mod_clks = info->num_hw_mod_clks;
+ priv->num_resets = info->num_resets;
priv->last_dt_core_clk = info->last_dt_core_clk;
for (i = 0; i < nclks; i++)
diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.h b/drivers/clk/renesas/renesas-rzg2l-cpg.h
index 3948bdd8afc9..63695280ce8b 100644
--- a/drivers/clk/renesas/renesas-rzg2l-cpg.h
+++ b/drivers/clk/renesas/renesas-rzg2l-cpg.h
@@ -21,6 +21,7 @@
#define DDIV_PACK(offset, bitpos, size) \
(((offset) << 20) | ((bitpos) << 12) | ((size) << 8))
#define DIVPL2A DDIV_PACK(CPG_PL2_DDIV, 0, 3)
+#define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3)
#define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3)
/**
@@ -76,26 +77,40 @@ enum clk_types {
* @id: clock index in array containing all Core and Module Clocks
* @parent: id of parent clock
* @off: register offset
- * @onoff: ON/MON bits
- * @reset: reset bits
+ * @bit: ON/MON bit
*/
struct rzg2l_mod_clk {
const char *name;
unsigned int id;
unsigned int parent;
u16 off;
- u8 onoff;
- u8 reset;
+ u8 bit;
};
-#define DEF_MOD(_name, _id, _parent, _off, _onoff, _reset) \
- [_id] = { \
+#define DEF_MOD(_name, _id, _parent, _off, _bit) \
+ { \
.name = _name, \
- .id = MOD_CLK_BASE + _id, \
+ .id = MOD_CLK_BASE + (_id), \
.parent = (_parent), \
.off = (_off), \
- .onoff = (_onoff), \
- .reset = (_reset) \
+ .bit = (_bit), \
+ }
+
+/**
+ * struct rzg2l_reset - Reset definitions
+ *
+ * @off: register offset
+ * @bit: reset bit
+ */
+struct rzg2l_reset {
+ u16 off;
+ u8 bit;
+};
+
+#define DEF_RST(_id, _off, _bit) \
+ [_id] = { \
+ .off = (_off), \
+ .bit = (_bit) \
}
/**
@@ -126,6 +141,10 @@ struct rzg2l_cpg_info {
unsigned int num_mod_clks;
unsigned int num_hw_mod_clks;
+ /* Resets */
+ const struct rzg2l_reset *resets;
+ unsigned int num_resets;
+
/* Critical Module Clocks that should not be disabled */
const unsigned int *crit_mod_clks;
unsigned int num_crit_mod_clks;
diff --git a/drivers/clk/tegra/clk-sdmmc-mux.c b/drivers/clk/tegra/clk-sdmmc-mux.c
index 316912d3b1a4..4f2c3309eea4 100644
--- a/drivers/clk/tegra/clk-sdmmc-mux.c
+++ b/drivers/clk/tegra/clk-sdmmc-mux.c
@@ -194,6 +194,15 @@ static void clk_sdmmc_mux_disable(struct clk_hw *hw)
gate_ops->disable(gate_hw);
}
+static void clk_sdmmc_mux_disable_unused(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+ struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+ gate_ops->disable_unused(gate_hw);
+}
+
static void clk_sdmmc_mux_restore_context(struct clk_hw *hw)
{
struct clk_hw *parent = clk_hw_get_parent(hw);
@@ -218,6 +227,7 @@ static const struct clk_ops tegra_clk_sdmmc_mux_ops = {
.is_enabled = clk_sdmmc_mux_is_enabled,
.enable = clk_sdmmc_mux_enable,
.disable = clk_sdmmc_mux_disable,
+ .disable_unused = clk_sdmmc_mux_disable_unused,
.restore_context = clk_sdmmc_mux_restore_context,
};
diff --git a/drivers/comedi/drivers/comedi_8254.c b/drivers/comedi/drivers/comedi_8254.c
index d1d509e9add9..4bf5daa9e885 100644
--- a/drivers/comedi/drivers/comedi_8254.c
+++ b/drivers/comedi/drivers/comedi_8254.c
@@ -555,6 +555,7 @@ static int comedi_8254_insn_config(struct comedi_device *dev,
/**
* comedi_8254_subdevice_init - initialize a comedi_subdevice for the 8254 timer
* @s: comedi_subdevice struct
+ * @i8254: comedi_8254 struct
*/
void comedi_8254_subdevice_init(struct comedi_subdevice *s,
struct comedi_8254 *i8254)
@@ -607,7 +608,7 @@ static struct comedi_8254 *__i8254_init(unsigned long iobase,
/**
* comedi_8254_init - allocate and initialize the 8254 device for pio access
- * @mmio: port I/O base address
+ * @iobase: port I/O base address
* @osc_base: base time of the counter in ns
* OPTIONAL - only used by comedi_8254_cascade_ns_to_timer()
* @iosize: I/O register size
diff --git a/drivers/comedi/drivers/comedi_isadma.c b/drivers/comedi/drivers/comedi_isadma.c
index c729094298c2..479b58e209ba 100644
--- a/drivers/comedi/drivers/comedi_isadma.c
+++ b/drivers/comedi/drivers/comedi_isadma.c
@@ -143,7 +143,7 @@ EXPORT_SYMBOL_GPL(comedi_isadma_set_mode);
* comedi_isadma_alloc - allocate and initialize the ISA DMA
* @dev: comedi_device struct
* @n_desc: the number of cookies to allocate
- * @dma_chan: DMA channel for the first cookie
+ * @dma_chan1: DMA channel for the first cookie
* @dma_chan2: DMA channel for the second cookie
* @maxsize: the size of the buffer to allocate for each cookie
* @dma_dir: the DMA direction
diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c
index 7a02c4fa3cda..f963080dd61f 100644
--- a/drivers/comedi/drivers/jr3_pci.c
+++ b/drivers/comedi/drivers/jr3_pci.c
@@ -186,19 +186,6 @@ static void set_full_scales(struct jr3_sensor __iomem *sensor,
set_s16(&sensor->command_word0, 0x0a00);
}
-static struct six_axis_t get_min_full_scales(struct jr3_sensor __iomem *sensor)
-{
- struct six_axis_t result;
-
- result.fx = get_s16(&sensor->min_full_scale.fx);
- result.fy = get_s16(&sensor->min_full_scale.fy);
- result.fz = get_s16(&sensor->min_full_scale.fz);
- result.mx = get_s16(&sensor->min_full_scale.mx);
- result.my = get_s16(&sensor->min_full_scale.my);
- result.mz = get_s16(&sensor->min_full_scale.mz);
- return result;
-}
-
static struct six_axis_t get_max_full_scales(struct jr3_sensor __iomem *sensor)
{
struct six_axis_t result;
@@ -504,10 +491,8 @@ jr3_pci_poll_subdevice(struct comedi_subdevice *s)
result = poll_delay_min_max(20, 100);
} else {
/* Set full scale */
- struct six_axis_t min_full_scale;
struct six_axis_t max_full_scale;
- min_full_scale = get_min_full_scales(sensor);
max_full_scale = get_max_full_scales(sensor);
set_full_scales(sensor, max_full_scale);
diff --git a/drivers/comedi/drivers/ni_routes.c b/drivers/comedi/drivers/ni_routes.c
index c426a9286f15..f0f8cd424b30 100644
--- a/drivers/comedi/drivers/ni_routes.c
+++ b/drivers/comedi/drivers/ni_routes.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routes.c
* Route information for NI boards.
@@ -246,7 +245,7 @@ unsigned int ni_get_valid_routes(const struct ni_route_tables *tables,
}
EXPORT_SYMBOL_GPL(ni_get_valid_routes);
-/**
+/*
* List of NI global signal names that, as destinations, are only routeable
* indirectly through the *_arg elements of the comedi_cmd structure.
*/
@@ -388,7 +387,7 @@ ni_find_route_set(const int destination,
}
EXPORT_SYMBOL_GPL(ni_find_route_set);
-/**
+/*
* ni_route_set_has_source() - Determines whether the given source is in
* included given route_set.
*
@@ -507,7 +506,7 @@ s8 ni_route_to_register(const int src, const int dest,
}
EXPORT_SYMBOL_GPL(ni_route_to_register);
-/**
+/*
* ni_find_route_source() - Finds the signal source corresponding to a signal
* route (src-->dest) of the specified routing register
* value and the specified route destination on the
diff --git a/drivers/comedi/drivers/ni_routes.h b/drivers/comedi/drivers/ni_routes.h
index b7680fd2afe1..036982315584 100644
--- a/drivers/comedi/drivers/ni_routes.h
+++ b/drivers/comedi/drivers/ni_routes.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0+ */
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routes.h
* Route information for NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes.c b/drivers/comedi/drivers/ni_routing/ni_device_routes.c
index 7b6a74dfe48b..58654c2b12d6 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes.h b/drivers/comedi/drivers/ni_routing/ni_device_routes.h
index b9f1c47d19e1..09e4e172c659 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes.h
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0+ */
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/all.h b/drivers/comedi/drivers/ni_routing/ni_device_routes/all.h
index 78b24138acb7..001dbb88a874 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/all.h
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/all.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0+ */
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/all.h
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6070e.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6070e.c
index f1126a0cb285..7d3064c92643 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6070e.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6070e.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6070e.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6220.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6220.c
index 74a59222963f..e2c462edb8ec 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6220.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6220.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6220.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6221.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6221.c
index 44dcbabf2a99..9e02ec0a66ad 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6221.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6221.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6221.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6229.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6229.c
index fa5794e4e2b3..33f7fff61f74 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6229.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6229.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6229.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6251.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6251.c
index 645fd1cd2de4..dde676b73624 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6251.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6251.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6251.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6254.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6254.c
index 056a240cd3a2..167a2da97c14 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6254.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6254.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6254.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6259.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6259.c
index e0b5fa78c3bc..ba990f98590c 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6259.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6259.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6259.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6534.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6534.c
index a2472ed288cf..f8d2a91b6c0a 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6534.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6534.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6534.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6602.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6602.c
index 91de9dac2d6a..2eee91f590eb 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6602.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6602.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6602.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6713.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6713.c
index d378b36d2084..c07ef3584a4b 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6713.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6713.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6713.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6723.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6723.c
index e0cc57ab06e7..c37373f8f0e1 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6723.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6723.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6723.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6733.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6733.c
index f6e1e17ab854..f252fbe19638 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6733.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pci-6733.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pci-6733.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6030e.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6030e.c
index 9978d632117f..4ccba4fdf3bc 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6030e.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6030e.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pxi-6030e.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6224.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6224.c
index 1b89e27d7aa5..84fdfa2ef9a7 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6224.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6224.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pxi-6224.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6225.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6225.c
index 10dfc34bc87c..2b99ce0f87a4 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6225.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6225.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pxi-6225.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6251.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6251.c
index 25db4b7363de..1c5164c46306 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6251.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6251.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pxi-6251.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6733.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6733.c
index 27da4433fc4a..a3402b1ca6e8 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6733.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxi-6733.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pxi-6733.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6251.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6251.c
index 8354fe971d59..defcc4cfe1e4 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6251.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6251.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pxie-6251.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6535.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6535.c
index 2ebb679e0129..d2013b9e6767 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6535.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6535.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pxie-6535.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6738.c b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6738.c
index d88504314d7f..89aff39a4fc2 100644
--- a/drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6738.c
+++ b/drivers/comedi/drivers/ni_routing/ni_device_routes/pxie-6738.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_device_routes/pxie-6738.c
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_route_values.c b/drivers/comedi/drivers/ni_routing/ni_route_values.c
index 5901762734ed..54a740b39819 100644
--- a/drivers/comedi/drivers/ni_routing/ni_route_values.c
+++ b/drivers/comedi/drivers/ni_routing/ni_route_values.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_route_values.c
* Route information for NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_route_values.h b/drivers/comedi/drivers/ni_routing/ni_route_values.h
index 80e0145fb82b..6e358efa6f7f 100644
--- a/drivers/comedi/drivers/ni_routing/ni_route_values.h
+++ b/drivers/comedi/drivers/ni_routing/ni_route_values.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0+ */
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_route_values.h
* Route information for NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_route_values/all.h b/drivers/comedi/drivers/ni_routing/ni_route_values/all.h
index 7227461500b5..30761e55f746 100644
--- a/drivers/comedi/drivers/ni_routing/ni_route_values/all.h
+++ b/drivers/comedi/drivers/ni_routing/ni_route_values/all.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0+ */
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_route_values/all.h
* List of valid routes for specific NI boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_route_values/ni_660x.c b/drivers/comedi/drivers/ni_routing/ni_route_values/ni_660x.c
index f1c7e6646261..aace60e49507 100644
--- a/drivers/comedi/drivers/ni_routing/ni_route_values/ni_660x.c
+++ b/drivers/comedi/drivers/ni_routing/ni_route_values/ni_660x.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_route_values/ni_660x.c
* Route information for NI_660X boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_route_values/ni_eseries.c b/drivers/comedi/drivers/ni_routing/ni_route_values/ni_eseries.c
index d1ab3c9ce585..7a52f024cdbd 100644
--- a/drivers/comedi/drivers/ni_routing/ni_route_values/ni_eseries.c
+++ b/drivers/comedi/drivers/ni_routing/ni_route_values/ni_eseries.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_route_values/ni_eseries.c
* Route information for NI_ESERIES boards.
diff --git a/drivers/comedi/drivers/ni_routing/ni_route_values/ni_mseries.c b/drivers/comedi/drivers/ni_routing/ni_route_values/ni_mseries.c
index c59d8afe0ae9..d1ddd13b33b5 100644
--- a/drivers/comedi/drivers/ni_routing/ni_route_values/ni_mseries.c
+++ b/drivers/comedi/drivers/ni_routing/ni_route_values/ni_mseries.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/ni_route_values/ni_mseries.c
* Route information for NI_MSERIES boards.
diff --git a/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c b/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c
index dedb6f2fc678..d55521b5bdcb 100644
--- a/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c
+++ b/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
#include <stdint.h>
#include <stdbool.h>
diff --git a/drivers/comedi/drivers/ni_routing/tools/convert_csv_to_c.py b/drivers/comedi/drivers/ni_routing/tools/convert_csv_to_c.py
index 532eb6372a5a..90378fb50580 100755
--- a/drivers/comedi/drivers/ni_routing/tools/convert_csv_to_c.py
+++ b/drivers/comedi/drivers/ni_routing/tools/convert_csv_to_c.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
-# vim: ts=2:sw=2:et:tw=80:nowrap
# This is simply to aide in creating the entries in the order of the value of
# the device-global NI signal/terminal constants defined in comedi.h
@@ -123,7 +122,6 @@ class DeviceRoutes(CSVCollection):
output_file_top = """\
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/{filename}
* List of valid routes for specific NI boards.
@@ -155,7 +153,6 @@ class DeviceRoutes(CSVCollection):
extern_header = """\
/* SPDX-License-Identifier: GPL-2.0+ */
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/{filename}
* List of valid routes for specific NI boards.
@@ -193,7 +190,6 @@ class DeviceRoutes(CSVCollection):
single_output_file_top = """\
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/{filename}
* List of valid routes for specific NI boards.
@@ -299,7 +295,6 @@ class RouteValues(CSVCollection):
output_file_top = """\
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/{filename}
* Route information for NI boards.
@@ -337,7 +332,6 @@ class RouteValues(CSVCollection):
extern_header = """\
/* SPDX-License-Identifier: GPL-2.0+ */
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/{filename}
* List of valid routes for specific NI boards.
@@ -375,7 +369,6 @@ class RouteValues(CSVCollection):
single_output_file_top = """\
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/ni_routing/{filename}
* Route information for {sheet} boards.
diff --git a/drivers/comedi/drivers/ni_routing/tools/convert_py_to_csv.py b/drivers/comedi/drivers/ni_routing/tools/convert_py_to_csv.py
index b3e6472bac22..a273b33edb8f 100755
--- a/drivers/comedi/drivers/ni_routing/tools/convert_py_to_csv.py
+++ b/drivers/comedi/drivers/ni_routing/tools/convert_py_to_csv.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
-# vim: ts=2:sw=2:et:tw=80:nowrap
from os import path
import os, csv
diff --git a/drivers/comedi/drivers/ni_routing/tools/csv_collection.py b/drivers/comedi/drivers/ni_routing/tools/csv_collection.py
index 12617329a928..db977ecb4307 100644
--- a/drivers/comedi/drivers/ni_routing/tools/csv_collection.py
+++ b/drivers/comedi/drivers/ni_routing/tools/csv_collection.py
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0+
-# vim: ts=2:sw=2:et:tw=80:nowrap
import os, csv, glob
diff --git a/drivers/comedi/drivers/ni_routing/tools/make_blank_csv.py b/drivers/comedi/drivers/ni_routing/tools/make_blank_csv.py
index 89c90a0ba24d..c00eaf803299 100755
--- a/drivers/comedi/drivers/ni_routing/tools/make_blank_csv.py
+++ b/drivers/comedi/drivers/ni_routing/tools/make_blank_csv.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
-# vim: ts=2:sw=2:et:tw=80:nowrap
from os import path
import os, csv
diff --git a/drivers/comedi/drivers/ni_routing/tools/ni_names.py b/drivers/comedi/drivers/ni_routing/tools/ni_names.py
index 5f9b825968b1..d4df5f29e3e5 100644
--- a/drivers/comedi/drivers/ni_routing/tools/ni_names.py
+++ b/drivers/comedi/drivers/ni_routing/tools/ni_names.py
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0+
-# vim: ts=2:sw=2:et:tw=80:nowrap
"""
This file helps to extract string names of NI signals as included in comedi.h
between NI_NAMES_BASE and NI_NAMES_BASE+NI_NUM_NAMES.
diff --git a/drivers/comedi/drivers/ni_tio.c b/drivers/comedi/drivers/ni_tio.c
index f6154addaa95..da6826d77e60 100644
--- a/drivers/comedi/drivers/ni_tio.c
+++ b/drivers/comedi/drivers/ni_tio.c
@@ -1501,7 +1501,7 @@ int ni_tio_insn_config(struct comedi_device *dev,
}
EXPORT_SYMBOL_GPL(ni_tio_insn_config);
-/**
+/*
* Retrieves the register value of the current source of the output selector for
* the given destination.
*
@@ -1541,10 +1541,10 @@ int ni_tio_get_routing(struct ni_gpct_device *counter_dev, unsigned int dest)
EXPORT_SYMBOL_GPL(ni_tio_get_routing);
/**
- * Sets the register value of the selector MUX for the given destination.
- * @counter_dev:Pointer to general counter device.
- * @destination:Device-global identifier of route destination.
- * @register_value:
+ * ni_tio_set_routing() - Sets the register value of the selector MUX for the given destination.
+ * @counter_dev: Pointer to general counter device.
+ * @dest: Device-global identifier of route destination.
+ * @reg:
* The first several bits of this value should store the desired
* value to write to the register. All other bits are for
* transmitting information that modify the mode of the particular
@@ -1580,7 +1580,7 @@ int ni_tio_set_routing(struct ni_gpct_device *counter_dev, unsigned int dest,
}
EXPORT_SYMBOL_GPL(ni_tio_set_routing);
-/**
+/*
* Sets the given destination MUX to its default value or disable it.
*
* Return: 0 if successful; -EINVAL if terminal is unknown.
diff --git a/drivers/comedi/drivers/tests/comedi_example_test.c b/drivers/comedi/drivers/tests/comedi_example_test.c
index e5aaaeab7bdd..81d074bcdea5 100644
--- a/drivers/comedi/drivers/tests/comedi_example_test.c
+++ b/drivers/comedi/drivers/tests/comedi_example_test.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/tests/comedi_example_test.c
* Example set of unit tests.
diff --git a/drivers/comedi/drivers/tests/ni_routes_test.c b/drivers/comedi/drivers/tests/ni_routes_test.c
index 32073850d545..652362486ff6 100644
--- a/drivers/comedi/drivers/tests/ni_routes_test.c
+++ b/drivers/comedi/drivers/tests/ni_routes_test.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/tests/ni_routes_test.c
* Unit tests for NI routes (ni_routes.c module).
diff --git a/drivers/comedi/drivers/tests/unittest.h b/drivers/comedi/drivers/tests/unittest.h
index 2da3beea2479..b0b34e058aad 100644
--- a/drivers/comedi/drivers/tests/unittest.h
+++ b/drivers/comedi/drivers/tests/unittest.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0+ */
-/* vim: set ts=8 sw=8 noet tw=80 nowrap: */
/*
* comedi/drivers/tests/unittest.h
* Simple framework for unittests for comedi drivers.
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 9691f8612be8..09a9a77cce06 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -21,7 +21,7 @@
static unsigned int base[max_num_isa_dev(QUAD8_EXTENT)];
static unsigned int num_quad8;
-module_param_array(base, uint, &num_quad8, 0);
+module_param_hw_array(base, uint, ioport, &num_quad8, 0);
MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
#define QUAD8_NUM_COUNTERS 8
@@ -193,7 +193,7 @@ enum quad8_count_function {
QUAD8_COUNT_FUNCTION_QUADRATURE_X4
};
-static enum counter_count_function quad8_count_functions_list[] = {
+static const enum counter_count_function quad8_count_functions_list[] = {
[QUAD8_COUNT_FUNCTION_PULSE_DIRECTION] = COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
[QUAD8_COUNT_FUNCTION_QUADRATURE_X1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A,
[QUAD8_COUNT_FUNCTION_QUADRATURE_X2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
@@ -305,12 +305,12 @@ enum quad8_synapse_action {
QUAD8_SYNAPSE_ACTION_BOTH_EDGES
};
-static enum counter_synapse_action quad8_index_actions_list[] = {
+static const enum counter_synapse_action quad8_index_actions_list[] = {
[QUAD8_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
[QUAD8_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE
};
-static enum counter_synapse_action quad8_synapse_actions_list[] = {
+static const enum counter_synapse_action quad8_synapse_actions_list[] = {
[QUAD8_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
[QUAD8_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
[QUAD8_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
@@ -632,8 +632,8 @@ static ssize_t quad8_count_preset_read(struct counter_device *counter,
return sprintf(buf, "%u\n", priv->preset[count->id]);
}
-static void quad8_preset_register_set(struct quad8 *priv, int id,
- unsigned int preset)
+static void quad8_preset_register_set(struct quad8 *const priv, const int id,
+ const unsigned int preset)
{
const unsigned int base_offset = priv->base + 2 * id;
int i;
@@ -1082,7 +1082,6 @@ static int quad8_probe(struct device *dev, unsigned int id)
/* Enable all counters */
outb(QUAD8_CHAN_OP_ENABLE_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
- /* Register Counter device */
return devm_counter_register(dev, &priv->counter);
}
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index 5328705aa09c..d5d2540b30c2 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -91,4 +91,14 @@ config MICROCHIP_TCB_CAPTURE
To compile this driver as a module, choose M here: the
module will be called microchip-tcb-capture.
+config INTEL_QEP
+ tristate "Intel Quadrature Encoder Peripheral driver"
+ depends on PCI
+ help
+ Select this option to enable the Intel Quadrature Encoder Peripheral
+ driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-qep.
+
endif # COUNTER
diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile
index cb646ed2f039..19742e6f5e3e 100644
--- a/drivers/counter/Makefile
+++ b/drivers/counter/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_STM32_LPTIMER_CNT) += stm32-lptimer-cnt.o
obj-$(CONFIG_TI_EQEP) += ti-eqep.o
obj-$(CONFIG_FTM_QUADDEC) += ftm-quaddec.o
obj-$(CONFIG_MICROCHIP_TCB_CAPTURE) += microchip-tcb-capture.o
+obj-$(CONFIG_INTEL_QEP) += intel-qep.o
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index c2b3fdfd8b77..9371532406ca 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -162,7 +162,7 @@ enum ftm_quaddec_synapse_action {
FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES,
};
-static enum counter_synapse_action ftm_quaddec_synapse_actions[] = {
+static const enum counter_synapse_action ftm_quaddec_synapse_actions[] = {
[FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES] =
COUNTER_SYNAPSE_ACTION_BOTH_EDGES
};
diff --git a/drivers/counter/intel-qep.c b/drivers/counter/intel-qep.c
new file mode 100644
index 000000000000..8d7ae28fbd67
--- /dev/null
+++ b/drivers/counter/intel-qep.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Quadrature Encoder Peripheral driver
+ *
+ * Copyright (C) 2019-2021 Intel Corporation
+ *
+ * Author: Felipe Balbi (Intel)
+ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ * Author: Raymond Tan <raymond.tan@intel.com>
+ */
+#include <linux/bitops.h>
+#include <linux/counter.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#define INTEL_QEPCON 0x00
+#define INTEL_QEPFLT 0x04
+#define INTEL_QEPCOUNT 0x08
+#define INTEL_QEPMAX 0x0c
+#define INTEL_QEPWDT 0x10
+#define INTEL_QEPCAPDIV 0x14
+#define INTEL_QEPCNTR 0x18
+#define INTEL_QEPCAPBUF 0x1c
+#define INTEL_QEPINT_STAT 0x20
+#define INTEL_QEPINT_MASK 0x24
+
+/* QEPCON */
+#define INTEL_QEPCON_EN BIT(0)
+#define INTEL_QEPCON_FLT_EN BIT(1)
+#define INTEL_QEPCON_EDGE_A BIT(2)
+#define INTEL_QEPCON_EDGE_B BIT(3)
+#define INTEL_QEPCON_EDGE_INDX BIT(4)
+#define INTEL_QEPCON_SWPAB BIT(5)
+#define INTEL_QEPCON_OP_MODE BIT(6)
+#define INTEL_QEPCON_PH_ERR BIT(7)
+#define INTEL_QEPCON_COUNT_RST_MODE BIT(8)
+#define INTEL_QEPCON_INDX_GATING_MASK GENMASK(10, 9)
+#define INTEL_QEPCON_INDX_GATING(n) (((n) & 3) << 9)
+#define INTEL_QEPCON_INDX_PAL_PBL INTEL_QEPCON_INDX_GATING(0)
+#define INTEL_QEPCON_INDX_PAL_PBH INTEL_QEPCON_INDX_GATING(1)
+#define INTEL_QEPCON_INDX_PAH_PBL INTEL_QEPCON_INDX_GATING(2)
+#define INTEL_QEPCON_INDX_PAH_PBH INTEL_QEPCON_INDX_GATING(3)
+#define INTEL_QEPCON_CAP_MODE BIT(11)
+#define INTEL_QEPCON_FIFO_THRE_MASK GENMASK(14, 12)
+#define INTEL_QEPCON_FIFO_THRE(n) ((((n) - 1) & 7) << 12)
+#define INTEL_QEPCON_FIFO_EMPTY BIT(15)
+
+/* QEPFLT */
+#define INTEL_QEPFLT_MAX_COUNT(n) ((n) & 0x1fffff)
+
+/* QEPINT */
+#define INTEL_QEPINT_FIFOCRIT BIT(5)
+#define INTEL_QEPINT_FIFOENTRY BIT(4)
+#define INTEL_QEPINT_QEPDIR BIT(3)
+#define INTEL_QEPINT_QEPRST_UP BIT(2)
+#define INTEL_QEPINT_QEPRST_DOWN BIT(1)
+#define INTEL_QEPINT_WDT BIT(0)
+
+#define INTEL_QEPINT_MASK_ALL GENMASK(5, 0)
+
+#define INTEL_QEP_CLK_PERIOD_NS 10
+
+#define INTEL_QEP_COUNTER_EXT_RW(_name) \
+{ \
+ .name = #_name, \
+ .read = _name##_read, \
+ .write = _name##_write, \
+}
+
+struct intel_qep {
+ struct counter_device counter;
+ struct mutex lock;
+ struct device *dev;
+ void __iomem *regs;
+ bool enabled;
+ /* Context save registers */
+ u32 qepcon;
+ u32 qepflt;
+ u32 qepmax;
+};
+
+static inline u32 intel_qep_readl(struct intel_qep *qep, u32 offset)
+{
+ return readl(qep->regs + offset);
+}
+
+static inline void intel_qep_writel(struct intel_qep *qep,
+ u32 offset, u32 value)
+{
+ writel(value, qep->regs + offset);
+}
+
+static void intel_qep_init(struct intel_qep *qep)
+{
+ u32 reg;
+
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+ reg &= ~INTEL_QEPCON_EN;
+ intel_qep_writel(qep, INTEL_QEPCON, reg);
+ qep->enabled = false;
+ /*
+ * Make sure peripheral is disabled by flushing the write with
+ * a dummy read
+ */
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+
+ reg &= ~(INTEL_QEPCON_OP_MODE | INTEL_QEPCON_FLT_EN);
+ reg |= INTEL_QEPCON_EDGE_A | INTEL_QEPCON_EDGE_B |
+ INTEL_QEPCON_EDGE_INDX | INTEL_QEPCON_COUNT_RST_MODE;
+ intel_qep_writel(qep, INTEL_QEPCON, reg);
+ intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
+}
+
+static int intel_qep_count_read(struct counter_device *counter,
+ struct counter_count *count,
+ unsigned long *val)
+{
+ struct intel_qep *const qep = counter->priv;
+
+ pm_runtime_get_sync(qep->dev);
+ *val = intel_qep_readl(qep, INTEL_QEPCOUNT);
+ pm_runtime_put(qep->dev);
+
+ return 0;
+}
+
+static const enum counter_count_function intel_qep_count_functions[] = {
+ COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
+};
+
+static int intel_qep_function_get(struct counter_device *counter,
+ struct counter_count *count,
+ size_t *function)
+{
+ *function = 0;
+
+ return 0;
+}
+
+static const enum counter_synapse_action intel_qep_synapse_actions[] = {
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+};
+
+static int intel_qep_action_get(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ size_t *action)
+{
+ *action = 0;
+ return 0;
+}
+
+static const struct counter_ops intel_qep_counter_ops = {
+ .count_read = intel_qep_count_read,
+ .function_get = intel_qep_function_get,
+ .action_get = intel_qep_action_get,
+};
+
+#define INTEL_QEP_SIGNAL(_id, _name) { \
+ .id = (_id), \
+ .name = (_name), \
+}
+
+static struct counter_signal intel_qep_signals[] = {
+ INTEL_QEP_SIGNAL(0, "Phase A"),
+ INTEL_QEP_SIGNAL(1, "Phase B"),
+ INTEL_QEP_SIGNAL(2, "Index"),
+};
+
+#define INTEL_QEP_SYNAPSE(_signal_id) { \
+ .actions_list = intel_qep_synapse_actions, \
+ .num_actions = ARRAY_SIZE(intel_qep_synapse_actions), \
+ .signal = &intel_qep_signals[(_signal_id)], \
+}
+
+static struct counter_synapse intel_qep_count_synapses[] = {
+ INTEL_QEP_SYNAPSE(0),
+ INTEL_QEP_SYNAPSE(1),
+ INTEL_QEP_SYNAPSE(2),
+};
+
+static ssize_t ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *priv, char *buf)
+{
+ struct intel_qep *qep = counter->priv;
+ u32 reg;
+
+ pm_runtime_get_sync(qep->dev);
+ reg = intel_qep_readl(qep, INTEL_QEPMAX);
+ pm_runtime_put(qep->dev);
+
+ return sysfs_emit(buf, "%u\n", reg);
+}
+
+static ssize_t ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *priv, const char *buf, size_t len)
+{
+ struct intel_qep *qep = counter->priv;
+ u32 max;
+ int ret;
+
+ ret = kstrtou32(buf, 0, &max);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&qep->lock);
+ if (qep->enabled) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ pm_runtime_get_sync(qep->dev);
+ intel_qep_writel(qep, INTEL_QEPMAX, max);
+ pm_runtime_put(qep->dev);
+ ret = len;
+
+out:
+ mutex_unlock(&qep->lock);
+ return ret;
+}
+
+static ssize_t enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *priv, char *buf)
+{
+ struct intel_qep *qep = counter->priv;
+
+ return sysfs_emit(buf, "%u\n", qep->enabled);
+}
+
+static ssize_t enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *priv, const char *buf, size_t len)
+{
+ struct intel_qep *qep = counter->priv;
+ u32 reg;
+ bool val, changed;
+ int ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&qep->lock);
+ changed = val ^ qep->enabled;
+ if (!changed)
+ goto out;
+
+ pm_runtime_get_sync(qep->dev);
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+ if (val) {
+ /* Enable peripheral and keep runtime PM always on */
+ reg |= INTEL_QEPCON_EN;
+ pm_runtime_get_noresume(qep->dev);
+ } else {
+ /* Let runtime PM be idle and disable peripheral */
+ pm_runtime_put_noidle(qep->dev);
+ reg &= ~INTEL_QEPCON_EN;
+ }
+ intel_qep_writel(qep, INTEL_QEPCON, reg);
+ pm_runtime_put(qep->dev);
+ qep->enabled = val;
+
+out:
+ mutex_unlock(&qep->lock);
+ return len;
+}
+
+static ssize_t spike_filter_ns_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *priv, char *buf)
+{
+ struct intel_qep *qep = counter->priv;
+ u32 reg;
+
+ pm_runtime_get_sync(qep->dev);
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+ if (!(reg & INTEL_QEPCON_FLT_EN)) {
+ pm_runtime_put(qep->dev);
+ return sysfs_emit(buf, "0\n");
+ }
+ reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT));
+ pm_runtime_put(qep->dev);
+
+ return sysfs_emit(buf, "%u\n", (reg + 2) * INTEL_QEP_CLK_PERIOD_NS);
+}
+
+static ssize_t spike_filter_ns_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *priv, const char *buf, size_t len)
+{
+ struct intel_qep *qep = counter->priv;
+ u32 reg, length;
+ bool enable;
+ int ret;
+
+ ret = kstrtou32(buf, 0, &length);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Spike filter length is (MAX_COUNT + 2) clock periods.
+ * Disable filter when userspace writes 0, enable for valid
+ * nanoseconds values and error out otherwise.
+ */
+ length /= INTEL_QEP_CLK_PERIOD_NS;
+ if (length == 0) {
+ enable = false;
+ length = 0;
+ } else if (length >= 2) {
+ enable = true;
+ length -= 2;
+ } else {
+ return -EINVAL;
+ }
+
+ if (length > INTEL_QEPFLT_MAX_COUNT(length))
+ return -EINVAL;
+
+ mutex_lock(&qep->lock);
+ if (qep->enabled) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ pm_runtime_get_sync(qep->dev);
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+ if (enable)
+ reg |= INTEL_QEPCON_FLT_EN;
+ else
+ reg &= ~INTEL_QEPCON_FLT_EN;
+ intel_qep_writel(qep, INTEL_QEPFLT, length);
+ intel_qep_writel(qep, INTEL_QEPCON, reg);
+ pm_runtime_put(qep->dev);
+ ret = len;
+
+out:
+ mutex_unlock(&qep->lock);
+ return ret;
+}
+
+static ssize_t preset_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *priv, char *buf)
+{
+ struct intel_qep *qep = counter->priv;
+ u32 reg;
+
+ pm_runtime_get_sync(qep->dev);
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+ pm_runtime_put(qep->dev);
+ return sysfs_emit(buf, "%u\n", !(reg & INTEL_QEPCON_COUNT_RST_MODE));
+}
+
+static ssize_t preset_enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *priv, const char *buf, size_t len)
+{
+ struct intel_qep *qep = counter->priv;
+ u32 reg;
+ bool val;
+ int ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&qep->lock);
+ if (qep->enabled) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ pm_runtime_get_sync(qep->dev);
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+ if (val)
+ reg &= ~INTEL_QEPCON_COUNT_RST_MODE;
+ else
+ reg |= INTEL_QEPCON_COUNT_RST_MODE;
+
+ intel_qep_writel(qep, INTEL_QEPCON, reg);
+ pm_runtime_put(qep->dev);
+ ret = len;
+
+out:
+ mutex_unlock(&qep->lock);
+
+ return ret;
+}
+
+static const struct counter_count_ext intel_qep_count_ext[] = {
+ INTEL_QEP_COUNTER_EXT_RW(ceiling),
+ INTEL_QEP_COUNTER_EXT_RW(enable),
+ INTEL_QEP_COUNTER_EXT_RW(spike_filter_ns),
+ INTEL_QEP_COUNTER_EXT_RW(preset_enable)
+};
+
+static struct counter_count intel_qep_counter_count[] = {
+ {
+ .id = 0,
+ .name = "Channel 1 Count",
+ .functions_list = intel_qep_count_functions,
+ .num_functions = ARRAY_SIZE(intel_qep_count_functions),
+ .synapses = intel_qep_count_synapses,
+ .num_synapses = ARRAY_SIZE(intel_qep_count_synapses),
+ .ext = intel_qep_count_ext,
+ .num_ext = ARRAY_SIZE(intel_qep_count_ext),
+ },
+};
+
+static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ struct intel_qep *qep;
+ struct device *dev = &pci->dev;
+ void __iomem *regs;
+ int ret;
+
+ qep = devm_kzalloc(dev, sizeof(*qep), GFP_KERNEL);
+ if (!qep)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pci);
+ if (ret)
+ return ret;
+
+ pci_set_master(pci);
+
+ ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
+ if (ret)
+ return ret;
+
+ regs = pcim_iomap_table(pci)[0];
+ if (!regs)
+ return -ENOMEM;
+
+ qep->dev = dev;
+ qep->regs = regs;
+ mutex_init(&qep->lock);
+
+ intel_qep_init(qep);
+ pci_set_drvdata(pci, qep);
+
+ qep->counter.name = pci_name(pci);
+ qep->counter.parent = dev;
+ qep->counter.ops = &intel_qep_counter_ops;
+ qep->counter.counts = intel_qep_counter_count;
+ qep->counter.num_counts = ARRAY_SIZE(intel_qep_counter_count);
+ qep->counter.signals = intel_qep_signals;
+ qep->counter.num_signals = ARRAY_SIZE(intel_qep_signals);
+ qep->counter.priv = qep;
+ qep->enabled = false;
+
+ pm_runtime_put(dev);
+ pm_runtime_allow(dev);
+
+ return devm_counter_register(&pci->dev, &qep->counter);
+}
+
+static void intel_qep_remove(struct pci_dev *pci)
+{
+ struct intel_qep *qep = pci_get_drvdata(pci);
+ struct device *dev = &pci->dev;
+
+ pm_runtime_forbid(dev);
+ if (!qep->enabled)
+ pm_runtime_get(dev);
+
+ intel_qep_writel(qep, INTEL_QEPCON, 0);
+}
+
+static int __maybe_unused intel_qep_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct intel_qep *qep = pci_get_drvdata(pdev);
+
+ qep->qepcon = intel_qep_readl(qep, INTEL_QEPCON);
+ qep->qepflt = intel_qep_readl(qep, INTEL_QEPFLT);
+ qep->qepmax = intel_qep_readl(qep, INTEL_QEPMAX);
+
+ return 0;
+}
+
+static int __maybe_unused intel_qep_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct intel_qep *qep = pci_get_drvdata(pdev);
+
+ /*
+ * Make sure peripheral is disabled when restoring registers and
+ * control register bits that are writable only when the peripheral
+ * is disabled
+ */
+ intel_qep_writel(qep, INTEL_QEPCON, 0);
+ intel_qep_readl(qep, INTEL_QEPCON);
+
+ intel_qep_writel(qep, INTEL_QEPFLT, qep->qepflt);
+ intel_qep_writel(qep, INTEL_QEPMAX, qep->qepmax);
+ intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
+
+ /* Restore all other control register bits except enable status */
+ intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon & ~INTEL_QEPCON_EN);
+ intel_qep_readl(qep, INTEL_QEPCON);
+
+ /* Restore enable status */
+ intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(intel_qep_pm_ops,
+ intel_qep_suspend, intel_qep_resume, NULL);
+
+static const struct pci_device_id intel_qep_id_table[] = {
+ /* EHL */
+ { PCI_VDEVICE(INTEL, 0x4bc3), },
+ { PCI_VDEVICE(INTEL, 0x4b81), },
+ { PCI_VDEVICE(INTEL, 0x4b82), },
+ { PCI_VDEVICE(INTEL, 0x4b83), },
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(pci, intel_qep_id_table);
+
+static struct pci_driver intel_qep_driver = {
+ .name = "intel-qep",
+ .id_table = intel_qep_id_table,
+ .probe = intel_qep_probe,
+ .remove = intel_qep_remove,
+ .driver = {
+ .pm = &intel_qep_pm_ops,
+ }
+};
+
+module_pci_driver(intel_qep_driver);
+
+MODULE_AUTHOR("Felipe Balbi (Intel)");
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
+MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");
diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c
index a99ee7996977..5df7cd13d4c7 100644
--- a/drivers/counter/interrupt-cnt.c
+++ b/drivers/counter/interrupt-cnt.c
@@ -77,7 +77,7 @@ static const struct counter_count_ext interrupt_cnt_ext[] = {
},
};
-static enum counter_synapse_action interrupt_cnt_synapse_actionss[] = {
+static const enum counter_synapse_action interrupt_cnt_synapse_actions[] = {
COUNTER_SYNAPSE_ACTION_RISING_EDGE,
};
@@ -112,7 +112,7 @@ static int interrupt_cnt_write(struct counter_device *counter,
return 0;
}
-static enum counter_count_function interrupt_cnt_functions[] = {
+static const enum counter_count_function interrupt_cnt_functions[] = {
COUNTER_COUNT_FUNCTION_INCREASE,
};
@@ -194,8 +194,8 @@ static int interrupt_cnt_probe(struct platform_device *pdev)
priv->counter.signals = &priv->signals;
priv->counter.num_signals = 1;
- priv->synapses.actions_list = interrupt_cnt_synapse_actionss;
- priv->synapses.num_actions = ARRAY_SIZE(interrupt_cnt_synapse_actionss);
+ priv->synapses.actions_list = interrupt_cnt_synapse_actions;
+ priv->synapses.num_actions = ARRAY_SIZE(interrupt_cnt_synapse_actions);
priv->synapses.signal = &priv->signals;
priv->cnts.name = "Channel 0 Count";
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 710acc0a3704..51b8af80f98b 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -37,7 +37,7 @@ enum mchp_tc_count_function {
MCHP_TC_FUNCTION_QUADRATURE,
};
-static enum counter_count_function mchp_tc_count_functions[] = {
+static const enum counter_count_function mchp_tc_count_functions[] = {
[MCHP_TC_FUNCTION_INCREASE] = COUNTER_COUNT_FUNCTION_INCREASE,
[MCHP_TC_FUNCTION_QUADRATURE] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
};
@@ -49,7 +49,7 @@ enum mchp_tc_synapse_action {
MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE
};
-static enum counter_synapse_action mchp_tc_synapse_actions[] = {
+static const enum counter_synapse_action mchp_tc_synapse_actions[] = {
[MCHP_TC_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
[MCHP_TC_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
[MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index 937439635d53..c19d998df5ba 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -134,7 +134,7 @@ enum stm32_lptim_cnt_function {
STM32_LPTIM_ENCODER_BOTH_EDGE,
};
-static enum counter_count_function stm32_lptim_cnt_functions[] = {
+static const enum counter_count_function stm32_lptim_cnt_functions[] = {
[STM32_LPTIM_COUNTER_INCREASE] = COUNTER_COUNT_FUNCTION_INCREASE,
[STM32_LPTIM_ENCODER_BOTH_EDGE] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
};
@@ -146,7 +146,7 @@ enum stm32_lptim_synapse_action {
STM32_LPTIM_SYNAPSE_ACTION_NONE,
};
-static enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
+static const enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
/* Index must match with stm32_lptim_cnt_polarity[] (priv->polarity) */
[STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
[STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 75bc401fdd18..603b30ada839 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -50,7 +50,7 @@ enum stm32_count_function {
STM32_COUNT_ENCODER_MODE_3,
};
-static enum counter_count_function stm32_count_functions[] = {
+static const enum counter_count_function stm32_count_functions[] = {
[STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_COUNT_FUNCTION_INCREASE,
[STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
[STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
@@ -267,7 +267,7 @@ enum stm32_synapse_action {
STM32_SYNAPSE_ACTION_BOTH_EDGES
};
-static enum counter_synapse_action stm32_synapse_actions[] = {
+static const enum counter_synapse_action stm32_synapse_actions[] = {
[STM32_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
[STM32_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES
};
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index e65e0a43be64..a5c5f70acfc9 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -19,6 +19,16 @@ config ACPI_CPPC_CPUFREQ
If in doubt, say N.
+config ACPI_CPPC_CPUFREQ_FIE
+ bool "Frequency Invariance support for CPPC cpufreq driver"
+ depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
+ default y
+ help
+ This extends frequency invariance support in the CPPC cpufreq driver,
+ by using CPPC delivered and reference performance counters.
+
+ If in doubt, say N.
+
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
depends on ARCH_SUNXI
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 3fc98a3ffd91..c10fc33b29b1 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -104,7 +104,11 @@ struct armada_37xx_dvfs {
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
- {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+ /*
+ * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
+ * unstable because we do not know how to configure it properly.
+ */
+ /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 2f769b1630c5..d4c27022b9c9 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -10,14 +10,18 @@
#define pr_fmt(fmt) "CPPC Cpufreq:" fmt
+#include <linux/arch_topology.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/dmi.h>
+#include <linux/irq_work.h>
+#include <linux/kthread.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
+#include <uapi/linux/sched/types.h>
#include <asm/unaligned.h>
@@ -57,6 +61,216 @@ static struct cppc_workaround_oem_info wa_info[] = {
}
};
+#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
+
+/* Frequency invariance support */
+struct cppc_freq_invariance {
+ int cpu;
+ struct irq_work irq_work;
+ struct kthread_work work;
+ struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
+ struct cppc_cpudata *cpu_data;
+};
+
+static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
+static struct kthread_worker *kworker_fie;
+
+static struct cpufreq_driver cppc_cpufreq_driver;
+static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
+static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t1);
+
+/**
+ * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
+ * @work: The work item.
+ *
+ * The CPPC driver register itself with the topology core to provide its own
+ * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
+ * gets called by the scheduler on every tick.
+ *
+ * Note that the arch specific counters have higher priority than CPPC counters,
+ * if available, though the CPPC driver doesn't need to have any special
+ * handling for that.
+ *
+ * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
+ * reach here from hard-irq context), which then schedules a normal work item
+ * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
+ * based on the counter updates since the last tick.
+ */
+static void cppc_scale_freq_workfn(struct kthread_work *work)
+{
+ struct cppc_freq_invariance *cppc_fi;
+ struct cppc_perf_fb_ctrs fb_ctrs = {0};
+ struct cppc_cpudata *cpu_data;
+ unsigned long local_freq_scale;
+ u64 perf;
+
+ cppc_fi = container_of(work, struct cppc_freq_invariance, work);
+ cpu_data = cppc_fi->cpu_data;
+
+ if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
+ pr_warn("%s: failed to read perf counters\n", __func__);
+ return;
+ }
+
+ perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
+ &fb_ctrs);
+ cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
+
+ perf <<= SCHED_CAPACITY_SHIFT;
+ local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
+
+ /* This can happen due to counter's overflow */
+ if (unlikely(local_freq_scale > 1024))
+ local_freq_scale = 1024;
+
+ per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
+}
+
+static void cppc_irq_work(struct irq_work *irq_work)
+{
+ struct cppc_freq_invariance *cppc_fi;
+
+ cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
+ kthread_queue_work(kworker_fie, &cppc_fi->work);
+}
+
+static void cppc_scale_freq_tick(void)
+{
+ struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
+
+ /*
+ * cppc_get_perf_ctrs() can potentially sleep, call that from the right
+ * context.
+ */
+ irq_work_queue(&cppc_fi->irq_work);
+}
+
+static struct scale_freq_data cppc_sftd = {
+ .source = SCALE_FREQ_SOURCE_CPPC,
+ .set_freq_scale = cppc_scale_freq_tick,
+};
+
+static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
+{
+ struct cppc_freq_invariance *cppc_fi;
+ int cpu, ret;
+
+ if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+ return;
+
+ for_each_cpu(cpu, policy->cpus) {
+ cppc_fi = &per_cpu(cppc_freq_inv, cpu);
+ cppc_fi->cpu = cpu;
+ cppc_fi->cpu_data = policy->driver_data;
+ kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
+ init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
+
+ ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
+ if (ret) {
+ pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
+ __func__, cpu, ret);
+
+ /*
+ * Don't abort if the CPU was offline while the driver
+ * was getting registered.
+ */
+ if (cpu_online(cpu))
+ return;
+ }
+ }
+
+ /* Register for freq-invariance */
+ topology_set_scale_freq_source(&cppc_sftd, policy->cpus);
+}
+
+/*
+ * We free all the resources on policy's removal and not on CPU removal as the
+ * irq-work are per-cpu and the hotplug core takes care of flushing the pending
+ * irq-works (hint: smpcfd_dying_cpu()) on CPU hotplug. Even if the kthread-work
+ * fires on another CPU after the concerned CPU is removed, it won't harm.
+ *
+ * We just need to make sure to remove them all on policy->exit().
+ */
+static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
+{
+ struct cppc_freq_invariance *cppc_fi;
+ int cpu;
+
+ if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+ return;
+
+ /* policy->cpus will be empty here, use related_cpus instead */
+ topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus);
+
+ for_each_cpu(cpu, policy->related_cpus) {
+ cppc_fi = &per_cpu(cppc_freq_inv, cpu);
+ irq_work_sync(&cppc_fi->irq_work);
+ kthread_cancel_work_sync(&cppc_fi->work);
+ }
+}
+
+static void __init cppc_freq_invariance_init(void)
+{
+ struct sched_attr attr = {
+ .size = sizeof(struct sched_attr),
+ .sched_policy = SCHED_DEADLINE,
+ .sched_nice = 0,
+ .sched_priority = 0,
+ /*
+ * Fake (unused) bandwidth; workaround to "fix"
+ * priority inheritance.
+ */
+ .sched_runtime = 1000000,
+ .sched_deadline = 10000000,
+ .sched_period = 10000000,
+ };
+ int ret;
+
+ if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+ return;
+
+ kworker_fie = kthread_create_worker(0, "cppc_fie");
+ if (IS_ERR(kworker_fie))
+ return;
+
+ ret = sched_setattr_nocheck(kworker_fie->task, &attr);
+ if (ret) {
+ pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
+ ret);
+ kthread_destroy_worker(kworker_fie);
+ return;
+ }
+}
+
+static void cppc_freq_invariance_exit(void)
+{
+ if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+ return;
+
+ kthread_destroy_worker(kworker_fie);
+ kworker_fie = NULL;
+}
+
+#else
+static inline void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
+{
+}
+
+static inline void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
+{
+}
+
+static inline void cppc_freq_invariance_init(void)
+{
+}
+
+static inline void cppc_freq_invariance_exit(void)
+{
+}
+#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
+
/* Callback function used to retrieve the max frequency from DMI */
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
{
@@ -182,27 +396,6 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
return 0;
}
-static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
-{
- struct cppc_cpudata *cpu_data = policy->driver_data;
- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
- unsigned int cpu = policy->cpu;
- int ret;
-
- cpu_data->perf_ctrls.desired_perf = caps->lowest_perf;
-
- ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
- if (ret)
- pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
- caps->lowest_perf, cpu, ret);
-
- /* Remove CPU node from list and free driver data for policy */
- free_cpumask_var(cpu_data->shared_cpu_map);
- list_del(&cpu_data->node);
- kfree(policy->driver_data);
- policy->driver_data = NULL;
-}
-
/*
* The PCC subspace describes the rate at which platform can accept commands
* on the shared PCC channel (including READs which do not count towards freq
@@ -277,6 +470,16 @@ out:
return NULL;
}
+static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
+{
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+
+ list_del(&cpu_data->node);
+ free_cpumask_var(cpu_data->shared_cpu_map);
+ kfree(cpu_data);
+ policy->driver_data = NULL;
+}
+
static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
@@ -330,7 +533,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
default:
pr_debug("Unsupported CPU co-ord type: %d\n",
policy->shared_type);
- return -EFAULT;
+ ret = -EFAULT;
+ goto out;
}
/*
@@ -345,13 +549,40 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
- if (ret)
+ if (ret) {
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
caps->highest_perf, cpu, ret);
+ goto out;
+ }
+
+ cppc_cpufreq_cpu_fie_init(policy);
+ return 0;
+out:
+ cppc_cpufreq_put_cpu_data(policy);
return ret;
}
+static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+ struct cppc_perf_caps *caps = &cpu_data->perf_caps;
+ unsigned int cpu = policy->cpu;
+ int ret;
+
+ cppc_cpufreq_cpu_fie_exit(policy);
+
+ cpu_data->perf_ctrls.desired_perf = caps->lowest_perf;
+
+ ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
+ if (ret)
+ pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
+ caps->lowest_perf, cpu, ret);
+
+ cppc_cpufreq_put_cpu_data(policy);
+ return 0;
+}
+
static inline u64 get_delta(u64 t1, u64 t0)
{
if (t1 > t0 || t0 > ~(u32)0)
@@ -360,28 +591,25 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
-static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs fb_ctrs_t0,
- struct cppc_perf_fb_ctrs fb_ctrs_t1)
+static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
- u64 reference_perf, delivered_perf;
+ u64 reference_perf;
- reference_perf = fb_ctrs_t0.reference_perf;
+ reference_perf = fb_ctrs_t0->reference_perf;
- delta_reference = get_delta(fb_ctrs_t1.reference,
- fb_ctrs_t0.reference);
- delta_delivered = get_delta(fb_ctrs_t1.delivered,
- fb_ctrs_t0.delivered);
+ delta_reference = get_delta(fb_ctrs_t1->reference,
+ fb_ctrs_t0->reference);
+ delta_delivered = get_delta(fb_ctrs_t1->delivered,
+ fb_ctrs_t0->delivered);
- /* Check to avoid divide-by zero */
- if (delta_reference || delta_delivered)
- delivered_perf = (reference_perf * delta_delivered) /
- delta_reference;
- else
- delivered_perf = cpu_data->perf_ctrls.desired_perf;
+ /* Check to avoid divide-by zero and invalid delivered_perf */
+ if (!delta_reference || !delta_delivered)
+ return cpu_data->perf_ctrls.desired_perf;
- return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
+ return (reference_perf * delta_delivered) / delta_reference;
}
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
@@ -389,6 +617,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cppc_cpudata *cpu_data = policy->driver_data;
+ u64 delivered_perf;
int ret;
cpufreq_cpu_put(policy);
@@ -403,7 +632,10 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
if (ret)
return ret;
- return cppc_get_rate_from_fbctrs(cpu_data, fb_ctrs_t0, fb_ctrs_t1);
+ delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
+ &fb_ctrs_t1);
+
+ return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
}
static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
@@ -451,7 +683,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
.target = cppc_cpufreq_set_target,
.get = cppc_cpufreq_get_rate,
.init = cppc_cpufreq_cpu_init,
- .stop_cpu = cppc_cpufreq_stop_cpu,
+ .exit = cppc_cpufreq_cpu_exit,
.set_boost = cppc_cpufreq_set_boost,
.attr = cppc_cpufreq_attr,
.name = "cppc_cpufreq",
@@ -504,14 +736,21 @@ static void cppc_check_hisi_workaround(void)
static int __init cppc_cpufreq_init(void)
{
+ int ret;
+
if ((acpi_disabled) || !acpi_cpc_valid())
return -ENODEV;
INIT_LIST_HEAD(&cpu_data_list);
cppc_check_hisi_workaround();
+ cppc_freq_invariance_init();
+
+ ret = cpufreq_register_driver(&cppc_cpufreq_driver);
+ if (ret)
+ cppc_freq_invariance_exit();
- return cpufreq_register_driver(&cppc_cpufreq_driver);
+ return ret;
}
static inline void free_cpu_data(void)
@@ -529,6 +768,7 @@ static inline void free_cpu_data(void)
static void __exit cppc_cpufreq_exit(void)
{
cpufreq_unregister_driver(&cppc_cpufreq_driver);
+ cppc_freq_invariance_exit();
free_cpu_data();
}
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 5e07065ec22f..231e585f6ba2 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -15,7 +15,7 @@
* Machines for which the cpufreq device is *always* created, mostly used for
* platforms using "operating-points" (V1) property.
*/
-static const struct of_device_id whitelist[] __initconst = {
+static const struct of_device_id allowlist[] __initconst = {
{ .compatible = "allwinner,sun4i-a10", },
{ .compatible = "allwinner,sun5i-a10s", },
{ .compatible = "allwinner,sun5i-a13", },
@@ -100,7 +100,7 @@ static const struct of_device_id whitelist[] __initconst = {
* Machines for which the cpufreq device is *not* created, mostly used for
* platforms using "operating-points-v2" property.
*/
-static const struct of_device_id blacklist[] __initconst = {
+static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "allwinner,sun50i-h6", },
{ .compatible = "arm,vexpress", },
@@ -126,6 +126,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "mediatek,mt8173", },
{ .compatible = "mediatek,mt8176", },
{ .compatible = "mediatek,mt8183", },
+ { .compatible = "mediatek,mt8365", },
{ .compatible = "mediatek,mt8516", },
{ .compatible = "nvidia,tegra20", },
@@ -137,7 +138,10 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "qcom,msm8996", },
{ .compatible = "qcom,qcs404", },
{ .compatible = "qcom,sc7180", },
+ { .compatible = "qcom,sc7280", },
+ { .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sdm845", },
+ { .compatible = "qcom,sm8150", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
@@ -177,13 +181,13 @@ static int __init cpufreq_dt_platdev_init(void)
if (!np)
return -ENODEV;
- match = of_match_node(whitelist, np);
+ match = of_match_node(allowlist, np);
if (match) {
data = match->data;
goto create_pdev;
}
- if (cpu0_node_has_opp_v2_prop() && !of_match_node(blacklist, np))
+ if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np))
goto create_pdev;
of_node_put(np);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index cbab834c37a0..45f3416988f1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -524,6 +524,22 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
+static unsigned int __resolve_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ unsigned int idx;
+
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
+
+ if (!cpufreq_driver->target_index)
+ return target_freq;
+
+ idx = cpufreq_frequency_table_target(policy, target_freq, relation);
+ policy->cached_resolved_idx = idx;
+ policy->cached_target_freq = target_freq;
+ return policy->freq_table[idx].frequency;
+}
+
/**
* cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
* one.
@@ -538,22 +554,7 @@ EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
- policy->cached_target_freq = target_freq;
-
- if (cpufreq_driver->target_index) {
- unsigned int idx;
-
- idx = cpufreq_frequency_table_target(policy, target_freq,
- CPUFREQ_RELATION_L);
- policy->cached_resolved_idx = idx;
- return policy->freq_table[idx].frequency;
- }
-
- if (cpufreq_driver->resolve_freq)
- return cpufreq_driver->resolve_freq(policy, target_freq);
-
- return target_freq;
+ return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_L);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -1606,9 +1607,6 @@ static int cpufreq_offline(unsigned int cpu)
policy->cdev = NULL;
}
- if (cpufreq_driver->stop_cpu)
- cpufreq_driver->stop_cpu(policy);
-
if (has_target())
cpufreq_exit_governor(policy);
@@ -2234,13 +2232,11 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation)
{
unsigned int old_target_freq = target_freq;
- int index;
if (cpufreq_disabled())
return -ENODEV;
- /* Make sure that target_freq is within supported range */
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = __resolve_freq(policy, target_freq, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
@@ -2261,9 +2257,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (!cpufreq_driver->target_index)
return -EINVAL;
- index = cpufreq_frequency_table_target(policy, target_freq, relation);
-
- return __target_index(policy, index);
+ return __target_index(policy, policy->cached_resolved_idx);
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6012964df51b..bb4549959b11 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2532,7 +2532,7 @@ static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
return 0;
}
-static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
+static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
@@ -2577,11 +2577,11 @@ static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
return 0;
}
-static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
+static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
{
- pr_debug("CPU %d stopping\n", policy->cpu);
-
intel_pstate_clear_update_util_hook(policy->cpu);
+
+ return intel_cpufreq_cpu_offline(policy);
}
static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
@@ -2654,7 +2654,6 @@ static struct cpufreq_driver intel_pstate = {
.resume = intel_pstate_resume,
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
- .stop_cpu = intel_pstate_stop_cpu,
.offline = intel_pstate_cpu_offline,
.online = intel_pstate_cpu_online,
.update_limits = intel_pstate_update_limits,
@@ -2956,7 +2955,7 @@ static struct cpufreq_driver intel_cpufreq = {
.fast_switch = intel_cpufreq_fast_switch,
.init = intel_cpufreq_cpu_init,
.exit = intel_cpufreq_cpu_exit,
- .offline = intel_pstate_cpu_offline,
+ .offline = intel_cpufreq_cpu_offline,
.online = intel_pstate_cpu_online,
.suspend = intel_pstate_suspend,
.resume = intel_pstate_resume,
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 182a4dbca095..c538a153ee82 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -942,8 +942,6 @@ static int __init longhaul_init(void)
return cpufreq_register_driver(&longhaul_driver);
case 10:
pr_err("Use acpi-cpufreq driver for VIA C7\n");
- default:
- ;
}
return -ENODEV;
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index f2e491b25b07..87019d5a9547 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -537,6 +537,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt8173", },
{ .compatible = "mediatek,mt8176", },
{ .compatible = "mediatek,mt8183", },
+ { .compatible = "mediatek,mt8365", },
{ .compatible = "mediatek,mt8516", },
{ }
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index e439b43c19eb..005600cef273 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -875,7 +875,15 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
- /* timer is deleted in cpufreq_cpu_stop() */
+ struct powernv_smp_call_data freq_data;
+ struct global_pstate_info *gpstates = policy->driver_data;
+
+ freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min);
+ freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
+ smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
+ if (gpstates)
+ del_timer_sync(&gpstates->timer);
+
kfree(policy->driver_data);
return 0;
@@ -1007,18 +1015,6 @@ static struct notifier_block powernv_cpufreq_opal_nb = {
.priority = 0,
};
-static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
-{
- struct powernv_smp_call_data freq_data;
- struct global_pstate_info *gpstates = policy->driver_data;
-
- freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min);
- freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
- smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
- if (gpstates)
- del_timer_sync(&gpstates->timer);
-}
-
static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
@@ -1042,7 +1038,6 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
.target_index = powernv_cpufreq_target_index,
.fast_switch = powernv_fast_switch,
.get = powernv_cpufreq_get,
- .stop_cpu = powernv_cpufreq_stop_cpu,
.attr = powernv_cpu_freq_attr,
};
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index c8a4364ad3c2..75f818d04b48 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -134,7 +134,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
}
if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
- ret = -ENOMEM;
+ return -ENOMEM;
/* Obtain CPUs that share SCMI performance controls */
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
@@ -174,7 +174,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
if (nr_opp <= 0) {
dev_err(cpu_dev, "%s: No OPPs for this device: %d\n",
- __func__, ret);
+ __func__, nr_opp);
ret = -ENODEV;
goto out_free_opp;
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index adf91a6e4d7d..c0e7971da2da 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -87,6 +87,18 @@ static const struct spm_reg_data spm_reg_8974_8084_cpu = {
.start_index[PM_SLEEP_MODE_SPC] = 3,
};
+/* SPM register data for 8226 */
+static const struct spm_reg_data spm_reg_8226_cpu = {
+ .reg_offset = spm_reg_offset_v2_1,
+ .spm_cfg = 0x0,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B,
+ 0x80, 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
[SPM_REG_CFG] = 0x08,
[SPM_REG_SPM_CTL] = 0x20,
@@ -259,6 +271,8 @@ static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
}
static const struct of_device_id spm_match_table[] = {
+ { .compatible = "qcom,msm8226-saw2-v2.1-cpu",
+ .data = &spm_reg_8226_cpu },
{ .compatible = "qcom,msm8974-saw2-v2.1-cpu",
.data = &spm_reg_8974_8084_cpu },
{ .compatible = "qcom,apq8084-saw2-v2.1-cpu",
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index 7b91060e82f6..d9262db79cae 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -382,8 +382,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
alt_intercepts = 2 * idx_intercept_sum > cpu_data->total - idx_hit_sum;
alt_recent = idx_recent_sum > NR_RECENT / 2;
if (alt_recent || alt_intercepts) {
- s64 last_enabled_span_ns = duration_ns;
- int last_enabled_idx = idx;
+ s64 first_suitable_span_ns = duration_ns;
+ int first_suitable_idx = idx;
/*
* Look for the deepest idle state whose target residency had
@@ -397,37 +397,51 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
intercept_sum = 0;
recent_sum = 0;
- for (i = idx - 1; i >= idx0; i--) {
+ for (i = idx - 1; i >= 0; i--) {
struct teo_bin *bin = &cpu_data->state_bins[i];
s64 span_ns;
intercept_sum += bin->intercepts;
recent_sum += bin->recent;
+ span_ns = teo_middle_of_bin(i, drv);
+
+ if ((!alt_recent || 2 * recent_sum > idx_recent_sum) &&
+ (!alt_intercepts ||
+ 2 * intercept_sum > idx_intercept_sum)) {
+ if (teo_time_ok(span_ns) &&
+ !dev->states_usage[i].disable) {
+ idx = i;
+ duration_ns = span_ns;
+ } else {
+ /*
+ * The current state is too shallow or
+ * disabled, so take the first enabled
+ * deeper state with suitable time span.
+ */
+ idx = first_suitable_idx;
+ duration_ns = first_suitable_span_ns;
+ }
+ break;
+ }
+
if (dev->states_usage[i].disable)
continue;
- span_ns = teo_middle_of_bin(i, drv);
if (!teo_time_ok(span_ns)) {
/*
- * The current state is too shallow, so select
- * the first enabled deeper state.
+ * The current state is too shallow, but if an
+ * alternative candidate state has been found,
+ * it may still turn out to be a better choice.
*/
- duration_ns = last_enabled_span_ns;
- idx = last_enabled_idx;
- break;
- }
+ if (first_suitable_idx != idx)
+ continue;
- if ((!alt_recent || 2 * recent_sum > idx_recent_sum) &&
- (!alt_intercepts ||
- 2 * intercept_sum > idx_intercept_sum)) {
- idx = i;
- duration_ns = span_ns;
break;
}
- last_enabled_span_ns = span_ns;
- last_enabled_idx = i;
+ first_suitable_span_ns = span_ns;
+ first_suitable_idx = i;
}
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index ebcec460c045..51690e73153a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -267,14 +267,14 @@ config CRYPTO_DEV_NIAGARA2
checksumming, and raw copies.
config CRYPTO_DEV_SL3516
- tristate "Stormlink SL3516 crypto offloader"
- depends on HAS_IOMEM
+ tristate "Storlink SL3516 crypto offloader"
+ depends on ARCH_GEMINI || COMPILE_TEST
+ depends on HAS_IOMEM && PM
select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
select CRYPTO_ECB
select CRYPTO_AES
select HW_RANDOM
- depends on PM
help
This option allows you to have support for SL3516 crypto offloader.
diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c
index b41c2f5fc495..c1c2b1d86663 100644
--- a/drivers/crypto/gemini/sl3516-ce-cipher.c
+++ b/drivers/crypto/gemini/sl3516-ce-cipher.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * sl3516-ce-cipher.c - hardware cryptographic offloader for Stormlink SL3516 SoC
+ * sl3516-ce-cipher.c - hardware cryptographic offloader for Storlink SL3516 SoC
*
* Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com>
*
diff --git a/drivers/crypto/gemini/sl3516-ce-core.c b/drivers/crypto/gemini/sl3516-ce-core.c
index da6cd529a6c0..b7524b649068 100644
--- a/drivers/crypto/gemini/sl3516-ce-core.c
+++ b/drivers/crypto/gemini/sl3516-ce-core.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * sl3516-ce-core.c - hardware cryptographic offloader for Stormlink SL3516 SoC
+ * sl3516-ce-core.c - hardware cryptographic offloader for Storlink SL3516 SoC
*
* Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com>
*
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index d120ce3e34ed..490db7bccf61 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -364,6 +364,9 @@ static void sec_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
+ if (qm->ver < QM_HW_V3)
+ return;
+
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
val |= SEC_PREFETCH_DISABLE;
writel(val, qm->io_base + SEC_PREFETCH_CFG);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 35fc5ee70491..98730aab287c 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -31,6 +31,10 @@
#include <linux/soc/ixp4xx/npe.h>
#include <linux/soc/ixp4xx/qmgr.h>
+/* Intermittent includes, delete this after v5.14-rc1 */
+#include <linux/soc/ixp4xx/cpu.h>
+#include <mach/ixp4xx-regs.h>
+
#define MAX_KEYLEN 32
/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig
index 23e3d0160e67..2a35e0e785bd 100644
--- a/drivers/crypto/nx/Kconfig
+++ b/drivers/crypto/nx/Kconfig
@@ -29,6 +29,7 @@ if CRYPTO_DEV_NX_COMPRESS
config CRYPTO_DEV_NX_COMPRESS_PSERIES
tristate "Compression acceleration support on pSeries platform"
depends on PPC_PSERIES && IBMVIO
+ depends on PPC_VAS
default y
help
Support for PowerPC Nest (NX) compression acceleration. This
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index bc89a20e5d9d..d00181a26dd6 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -14,5 +14,5 @@ nx-crypto-objs := nx.o \
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
nx-compress-objs := nx-842.o
-nx-compress-pseries-objs := nx-842-pseries.o
+nx-compress-pseries-objs := nx-common-pseries.o
nx-compress-powernv-objs := nx-common-powernv.o
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 655361ba9107..32a036ada5d0 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -1092,8 +1092,8 @@ static __init int nx_compress_powernv_init(void)
* normal FIFO priority is assigned for userspace.
* 842 compression is supported only in kernel.
*/
- ret = vas_register_coproc_api(THIS_MODULE, VAS_COP_TYPE_GZIP,
- "nx-gzip");
+ ret = vas_register_api_powernv(THIS_MODULE, VAS_COP_TYPE_GZIP,
+ "nx-gzip");
/*
* GZIP is not supported in kernel right now.
@@ -1129,7 +1129,7 @@ static void __exit nx_compress_powernv_exit(void)
* use. So delete this API use for GZIP engine.
*/
if (!nx842_ct)
- vas_unregister_coproc_api();
+ vas_unregister_api_powernv();
crypto_unregister_alg(&nx842_powernv_alg);
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index 1491cbfbc071..4e304f6081e4 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -9,6 +9,8 @@
*/
#include <asm/vio.h>
+#include <asm/hvcall.h>
+#include <asm/vas.h>
#include "nx-842.h"
#include "nx_csbcpb.h" /* struct nx_csbcpb */
@@ -19,6 +21,29 @@ MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
MODULE_ALIAS_CRYPTO("842");
MODULE_ALIAS_CRYPTO("842-nx");
+/*
+ * Coprocessor type specific capabilities from the hypervisor.
+ */
+struct hv_nx_cop_caps {
+ __be64 descriptor;
+ __be64 req_max_processed_len; /* Max bytes in one GZIP request */
+ __be64 min_compress_len; /* Min compression size in bytes */
+ __be64 min_decompress_len; /* Min decompression size in bytes */
+} __packed __aligned(0x1000);
+
+/*
+ * Coprocessor type specific capabilities.
+ */
+struct nx_cop_caps {
+ u64 descriptor;
+ u64 req_max_processed_len; /* Max bytes in one GZIP request */
+ u64 min_compress_len; /* Min compression in bytes */
+ u64 min_decompress_len; /* Min decompression in bytes */
+};
+
+static u64 caps_feat;
+static struct nx_cop_caps nx_cop_caps;
+
static struct nx842_constraints nx842_pseries_constraints = {
.alignment = DDE_BUFFER_ALIGN,
.multiple = DDE_BUFFER_LAST_MULT,
@@ -942,6 +967,36 @@ static struct attribute_group nx842_attribute_group = {
.attrs = nx842_sysfs_entries,
};
+#define nxcop_caps_read(_name) \
+static ssize_t nxcop_##_name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%lld\n", nx_cop_caps._name); \
+}
+
+#define NXCT_ATTR_RO(_name) \
+ nxcop_caps_read(_name); \
+ static struct device_attribute dev_attr_##_name = __ATTR(_name, \
+ 0444, \
+ nxcop_##_name##_show, \
+ NULL);
+
+NXCT_ATTR_RO(req_max_processed_len);
+NXCT_ATTR_RO(min_compress_len);
+NXCT_ATTR_RO(min_decompress_len);
+
+static struct attribute *nxcop_caps_sysfs_entries[] = {
+ &dev_attr_req_max_processed_len.attr,
+ &dev_attr_min_compress_len.attr,
+ &dev_attr_min_decompress_len.attr,
+ NULL,
+};
+
+static struct attribute_group nxcop_caps_attr_group = {
+ .name = "nx_gzip_caps",
+ .attrs = nxcop_caps_sysfs_entries,
+};
+
static struct nx842_driver nx842_pseries_driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
@@ -1031,6 +1086,16 @@ static int nx842_probe(struct vio_dev *viodev,
goto error;
}
+ if (caps_feat) {
+ if (sysfs_create_group(&viodev->dev.kobj,
+ &nxcop_caps_attr_group)) {
+ dev_err(&viodev->dev,
+ "Could not create sysfs NX capability entries\n");
+ ret = -1;
+ goto error;
+ }
+ }
+
return 0;
error_unlock:
@@ -1050,6 +1115,9 @@ static void nx842_remove(struct vio_dev *viodev)
pr_info("Removing IBM Power 842 compression device\n");
sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
+ if (caps_feat)
+ sysfs_remove_group(&viodev->dev.kobj, &nxcop_caps_attr_group);
+
crypto_unregister_alg(&nx842_pseries_alg);
spin_lock_irqsave(&devdata_mutex, flags);
@@ -1065,6 +1133,64 @@ static void nx842_remove(struct vio_dev *viodev)
kfree(old_devdata);
}
+/*
+ * Get NX capabilities from the hypervisor.
+ * Only NXGZIP capabilities are provided by the hypersvisor right
+ * now and these values are available to user space with sysfs.
+ */
+static void __init nxcop_get_capabilities(void)
+{
+ struct hv_vas_all_caps *hv_caps;
+ struct hv_nx_cop_caps *hv_nxc;
+ int rc;
+
+ hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
+ if (!hv_caps)
+ return;
+ /*
+ * Get NX overall capabilities with feature type=0
+ */
+ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0,
+ (u64)virt_to_phys(hv_caps));
+ if (rc)
+ goto out;
+
+ caps_feat = be64_to_cpu(hv_caps->feat_type);
+ /*
+ * NX-GZIP feature available
+ */
+ if (caps_feat & VAS_NX_GZIP_FEAT_BIT) {
+ hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
+ if (!hv_nxc)
+ goto out;
+ /*
+ * Get capabilities for NX-GZIP feature
+ */
+ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
+ VAS_NX_GZIP_FEAT,
+ (u64)virt_to_phys(hv_nxc));
+ } else {
+ pr_err("NX-GZIP feature is not available\n");
+ rc = -EINVAL;
+ }
+
+ if (!rc) {
+ nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor);
+ nx_cop_caps.req_max_processed_len =
+ be64_to_cpu(hv_nxc->req_max_processed_len);
+ nx_cop_caps.min_compress_len =
+ be64_to_cpu(hv_nxc->min_compress_len);
+ nx_cop_caps.min_decompress_len =
+ be64_to_cpu(hv_nxc->min_decompress_len);
+ } else {
+ caps_feat = 0;
+ }
+
+ kfree(hv_nxc);
+out:
+ kfree(hv_caps);
+}
+
static const struct vio_device_id nx842_vio_driver_ids[] = {
{"ibm,compression-v1", "ibm,compression"},
{"", ""},
@@ -1093,6 +1219,10 @@ static int __init nx842_pseries_init(void)
return -ENOMEM;
RCU_INIT_POINTER(devdata, new_devdata);
+ /*
+ * Get NX capabilities from the hypervisor.
+ */
+ nxcop_get_capabilities();
ret = vio_register_driver(&nx842_vio_driver);
if (ret) {
@@ -1102,6 +1232,12 @@ static int __init nx842_pseries_init(void)
return ret;
}
+ ret = vas_register_api_pseries(THIS_MODULE, VAS_COP_TYPE_GZIP,
+ "nx-gzip");
+
+ if (ret)
+ pr_err("NX-GZIP is not supported. Returned=%d\n", ret);
+
return 0;
}
@@ -1112,6 +1248,8 @@ static void __exit nx842_pseries_exit(void)
struct nx842_devdata *old_devdata;
unsigned long flags;
+ vas_unregister_api_pseries();
+
crypto_unregister_alg(&nx842_pseries_alg);
spin_lock_irqsave(&devdata_mutex, flags);
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index 94b2dba90f0d..31bdb1d76d11 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -183,8 +183,7 @@ static void omap_crypto_copy_data(struct scatterlist *src,
memcpy(dstb, srcb, amt);
- if (!PageSlab(sg_page(dst)))
- flush_kernel_dcache_page(sg_page(dst));
+ flush_dcache_page(sg_page(dst));
kunmap_atomic(srcb);
kunmap_atomic(dstb);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 96b437bfe3de..6f64aa693146 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -406,7 +406,7 @@ static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
break;
default:
- pr_err("QAT: Invalid ioctl\n");
+ pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd);
ret = -EFAULT;
break;
}
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 97dc4d751651..e6de221cc568 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -15,21 +15,17 @@ if CXL_BUS
config CXL_MEM
tristate "CXL.mem: Memory Devices"
+ default CXL_BUS
help
The CXL.mem protocol allows a device to act as a provider of
"System RAM" and/or "Persistent Memory" that is fully coherent
as if the memory was attached to the typical CPU memory
controller.
- Say 'y/m' to enable a driver (named "cxl_mem.ko" when built as
- a module) that will attach to CXL.mem devices for
- configuration, provisioning, and health monitoring. This
- driver is required for dynamic provisioning of CXL.mem
- attached memory which is a prerequisite for persistent memory
- support. Typically volatile memory is mapped by platform
- firmware and included in the platform memory map, but in some
- cases the OS is responsible for mapping that memory. See
- Chapter 2.3 Type 3 CXL Device in the CXL 2.0 specification.
+ Say 'y/m' to enable a driver that will attach to CXL.mem devices for
+ configuration and management primarily via the mailbox interface. See
+ Chapter 2.3 Type 3 CXL Device in the CXL 2.0 specification for more
+ details.
If unsure say 'm'.
@@ -50,4 +46,33 @@ config CXL_MEM_RAW_COMMANDS
potential impact to memory currently in use by the kernel.
If developing CXL hardware or the driver say Y, otherwise say N.
+
+config CXL_ACPI
+ tristate "CXL ACPI: Platform Support"
+ depends on ACPI
+ default CXL_BUS
+ help
+ Enable support for host managed device memory (HDM) resources
+ published by a platform's ACPI CXL memory layout description. See
+ Chapter 9.14.1 CXL Early Discovery Table (CEDT) in the CXL 2.0
+ specification, and CXL Fixed Memory Window Structures (CEDT.CFMWS)
+ (https://www.computeexpresslink.org/spec-landing). The CXL core
+ consumes these resource to publish the root of a cxl_port decode
+ hierarchy to map regions that represent System RAM, or Persistent
+ Memory regions to be managed by LIBNVDIMM.
+
+ If unsure say 'm'.
+
+config CXL_PMEM
+ tristate "CXL PMEM: Persistent Memory Support"
+ depends on LIBNVDIMM
+ default CXL_BUS
+ help
+ In addition to typical memory resources a platform may also advertise
+ support for persistent memory attached via CXL. This support is
+ managed via a bridge driver from CXL to the LIBNVDIMM system
+ subsystem. Say 'y/m' to enable support for enumerating and
+ provisioning the persistent memory capacity of CXL memory expanders.
+
+ If unsure say 'm'.
endif
diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
index a314a1891f4d..32954059b37b 100644
--- a/drivers/cxl/Makefile
+++ b/drivers/cxl/Makefile
@@ -1,7 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CXL_BUS) += cxl_bus.o
-obj-$(CONFIG_CXL_MEM) += cxl_mem.o
+obj-$(CONFIG_CXL_BUS) += cxl_core.o
+obj-$(CONFIG_CXL_MEM) += cxl_pci.o
+obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
+obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL
-cxl_bus-y := bus.o
-cxl_mem-y := mem.o
+cxl_core-y := core.o
+cxl_pci-y := pci.o
+cxl_acpi-y := acpi.o
+cxl_pmem-y := pmem.o
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
new file mode 100644
index 000000000000..8ae89273f58e
--- /dev/null
+++ b/drivers/cxl/acpi.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include "cxl.h"
+
+static struct acpi_table_header *acpi_cedt;
+
+/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
+#define CFMWS_INTERLEAVE_WAYS(x) (1 << (x)->interleave_ways)
+#define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8)
+
+static unsigned long cfmws_to_decoder_flags(int restrictions)
+{
+ unsigned long flags = 0;
+
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
+ flags |= CXL_DECODER_F_TYPE2;
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
+ flags |= CXL_DECODER_F_TYPE3;
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
+ flags |= CXL_DECODER_F_RAM;
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM)
+ flags |= CXL_DECODER_F_PMEM;
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED)
+ flags |= CXL_DECODER_F_LOCK;
+
+ return flags;
+}
+
+static int cxl_acpi_cfmws_verify(struct device *dev,
+ struct acpi_cedt_cfmws *cfmws)
+{
+ int expected_len;
+
+ if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) {
+ dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) {
+ dev_err(dev, "CFMWS Base HPA not 256MB aligned\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) {
+ dev_err(dev, "CFMWS Window Size not 256MB aligned\n");
+ return -EINVAL;
+ }
+
+ expected_len = struct_size((cfmws), interleave_targets,
+ CFMWS_INTERLEAVE_WAYS(cfmws));
+
+ if (cfmws->header.length < expected_len) {
+ dev_err(dev, "CFMWS length %d less than expected %d\n",
+ cfmws->header.length, expected_len);
+ return -EINVAL;
+ }
+
+ if (cfmws->header.length > expected_len)
+ dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
+ cfmws->header.length, expected_len);
+
+ return 0;
+}
+
+static void cxl_add_cfmws_decoders(struct device *dev,
+ struct cxl_port *root_port)
+{
+ struct acpi_cedt_cfmws *cfmws;
+ struct cxl_decoder *cxld;
+ acpi_size len, cur = 0;
+ void *cedt_subtable;
+ unsigned long flags;
+ int rc;
+
+ len = acpi_cedt->length - sizeof(*acpi_cedt);
+ cedt_subtable = acpi_cedt + 1;
+
+ while (cur < len) {
+ struct acpi_cedt_header *c = cedt_subtable + cur;
+
+ if (c->type != ACPI_CEDT_TYPE_CFMWS) {
+ cur += c->length;
+ continue;
+ }
+
+ cfmws = cedt_subtable + cur;
+
+ if (cfmws->header.length < sizeof(*cfmws)) {
+ dev_warn_once(dev,
+ "CFMWS entry skipped:invalid length:%u\n",
+ cfmws->header.length);
+ cur += c->length;
+ continue;
+ }
+
+ rc = cxl_acpi_cfmws_verify(dev, cfmws);
+ if (rc) {
+ dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
+ cfmws->base_hpa, cfmws->base_hpa +
+ cfmws->window_size - 1);
+ cur += c->length;
+ continue;
+ }
+
+ flags = cfmws_to_decoder_flags(cfmws->restrictions);
+ cxld = devm_cxl_add_decoder(dev, root_port,
+ CFMWS_INTERLEAVE_WAYS(cfmws),
+ cfmws->base_hpa, cfmws->window_size,
+ CFMWS_INTERLEAVE_WAYS(cfmws),
+ CFMWS_INTERLEAVE_GRANULARITY(cfmws),
+ CXL_DECODER_EXPANDER,
+ flags);
+
+ if (IS_ERR(cxld)) {
+ dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
+ cfmws->base_hpa, cfmws->base_hpa +
+ cfmws->window_size - 1);
+ } else {
+ dev_dbg(dev, "add: %s range %#llx-%#llx\n",
+ dev_name(&cxld->dev), cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1);
+ }
+ cur += c->length;
+ }
+}
+
+static struct acpi_cedt_chbs *cxl_acpi_match_chbs(struct device *dev, u32 uid)
+{
+ struct acpi_cedt_chbs *chbs, *chbs_match = NULL;
+ acpi_size len, cur = 0;
+ void *cedt_subtable;
+
+ len = acpi_cedt->length - sizeof(*acpi_cedt);
+ cedt_subtable = acpi_cedt + 1;
+
+ while (cur < len) {
+ struct acpi_cedt_header *c = cedt_subtable + cur;
+
+ if (c->type != ACPI_CEDT_TYPE_CHBS) {
+ cur += c->length;
+ continue;
+ }
+
+ chbs = cedt_subtable + cur;
+
+ if (chbs->header.length < sizeof(*chbs)) {
+ dev_warn_once(dev,
+ "CHBS entry skipped: invalid length:%u\n",
+ chbs->header.length);
+ cur += c->length;
+ continue;
+ }
+
+ if (chbs->uid != uid) {
+ cur += c->length;
+ continue;
+ }
+
+ if (chbs_match) {
+ dev_warn_once(dev,
+ "CHBS entry skipped: duplicate UID:%u\n",
+ uid);
+ cur += c->length;
+ continue;
+ }
+
+ chbs_match = chbs;
+ cur += c->length;
+ }
+
+ return chbs_match ? chbs_match : ERR_PTR(-ENODEV);
+}
+
+static resource_size_t get_chbcr(struct acpi_cedt_chbs *chbs)
+{
+ return IS_ERR(chbs) ? CXL_RESOURCE_NONE : chbs->base;
+}
+
+struct cxl_walk_context {
+ struct device *dev;
+ struct pci_bus *root;
+ struct cxl_port *port;
+ int error;
+ int count;
+};
+
+static int match_add_root_ports(struct pci_dev *pdev, void *data)
+{
+ struct cxl_walk_context *ctx = data;
+ struct pci_bus *root_bus = ctx->root;
+ struct cxl_port *port = ctx->port;
+ int type = pci_pcie_type(pdev);
+ struct device *dev = ctx->dev;
+ u32 lnkcap, port_num;
+ int rc;
+
+ if (pdev->bus != root_bus)
+ return 0;
+ if (!pci_is_pcie(pdev))
+ return 0;
+ if (type != PCI_EXP_TYPE_ROOT_PORT)
+ return 0;
+ if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
+ &lnkcap) != PCIBIOS_SUCCESSFUL)
+ return 0;
+
+ /* TODO walk DVSEC to find component register base */
+ port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
+ rc = cxl_add_dport(port, &pdev->dev, port_num, CXL_RESOURCE_NONE);
+ if (rc) {
+ ctx->error = rc;
+ return rc;
+ }
+ ctx->count++;
+
+ dev_dbg(dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev));
+
+ return 0;
+}
+
+static struct cxl_dport *find_dport_by_dev(struct cxl_port *port, struct device *dev)
+{
+ struct cxl_dport *dport;
+
+ device_lock(&port->dev);
+ list_for_each_entry(dport, &port->dports, list)
+ if (dport->dport == dev) {
+ device_unlock(&port->dev);
+ return dport;
+ }
+
+ device_unlock(&port->dev);
+ return NULL;
+}
+
+static struct acpi_device *to_cxl_host_bridge(struct device *dev)
+{
+ struct acpi_device *adev = to_acpi_device(dev);
+
+ if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
+ return adev;
+ return NULL;
+}
+
+/*
+ * A host bridge is a dport to a CFMWS decode and it is a uport to the
+ * dport (PCIe Root Ports) in the host bridge.
+ */
+static int add_host_bridge_uport(struct device *match, void *arg)
+{
+ struct acpi_device *bridge = to_cxl_host_bridge(match);
+ struct cxl_port *root_port = arg;
+ struct device *host = root_port->dev.parent;
+ struct acpi_pci_root *pci_root;
+ struct cxl_walk_context ctx;
+ struct cxl_decoder *cxld;
+ struct cxl_dport *dport;
+ struct cxl_port *port;
+
+ if (!bridge)
+ return 0;
+
+ pci_root = acpi_pci_find_root(bridge->handle);
+ if (!pci_root)
+ return -ENXIO;
+
+ dport = find_dport_by_dev(root_port, match);
+ if (!dport) {
+ dev_dbg(host, "host bridge expected and not found\n");
+ return -ENODEV;
+ }
+
+ port = devm_cxl_add_port(host, match, dport->component_reg_phys,
+ root_port);
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+ dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
+
+ ctx = (struct cxl_walk_context){
+ .dev = host,
+ .root = pci_root->bus,
+ .port = port,
+ };
+ pci_walk_bus(pci_root->bus, match_add_root_ports, &ctx);
+
+ if (ctx.count == 0)
+ return -ENODEV;
+ if (ctx.error)
+ return ctx.error;
+
+ /* TODO: Scan CHBCR for HDM Decoder resources */
+
+ /*
+ * In the single-port host-bridge case there are no HDM decoders
+ * in the CHBCR and a 1:1 passthrough decode is implied.
+ */
+ if (ctx.count == 1) {
+ cxld = devm_cxl_add_passthrough_decoder(host, port);
+ if (IS_ERR(cxld))
+ return PTR_ERR(cxld);
+
+ dev_dbg(host, "add: %s\n", dev_name(&cxld->dev));
+ }
+
+ return 0;
+}
+
+static int add_host_bridge_dport(struct device *match, void *arg)
+{
+ int rc;
+ acpi_status status;
+ unsigned long long uid;
+ struct acpi_cedt_chbs *chbs;
+ struct cxl_port *root_port = arg;
+ struct device *host = root_port->dev.parent;
+ struct acpi_device *bridge = to_cxl_host_bridge(match);
+
+ if (!bridge)
+ return 0;
+
+ status = acpi_evaluate_integer(bridge->handle, METHOD_NAME__UID, NULL,
+ &uid);
+ if (status != AE_OK) {
+ dev_err(host, "unable to retrieve _UID of %s\n",
+ dev_name(match));
+ return -ENODEV;
+ }
+
+ chbs = cxl_acpi_match_chbs(host, uid);
+ if (IS_ERR(chbs))
+ dev_dbg(host, "No CHBS found for Host Bridge: %s\n",
+ dev_name(match));
+
+ rc = cxl_add_dport(root_port, match, uid, get_chbcr(chbs));
+ if (rc) {
+ dev_err(host, "failed to add downstream port: %s\n",
+ dev_name(match));
+ return rc;
+ }
+ dev_dbg(host, "add dport%llu: %s\n", uid, dev_name(match));
+ return 0;
+}
+
+static int add_root_nvdimm_bridge(struct device *match, void *data)
+{
+ struct cxl_decoder *cxld;
+ struct cxl_port *root_port = data;
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct device *host = root_port->dev.parent;
+
+ if (!is_root_decoder(match))
+ return 0;
+
+ cxld = to_cxl_decoder(match);
+ if (!(cxld->flags & CXL_DECODER_F_PMEM))
+ return 0;
+
+ cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port);
+ if (IS_ERR(cxl_nvb)) {
+ dev_dbg(host, "failed to register pmem\n");
+ return PTR_ERR(cxl_nvb);
+ }
+ dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev),
+ dev_name(&cxl_nvb->dev));
+ return 1;
+}
+
+static int cxl_acpi_probe(struct platform_device *pdev)
+{
+ int rc;
+ acpi_status status;
+ struct cxl_port *root_port;
+ struct device *host = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(host);
+
+ root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
+ if (IS_ERR(root_port))
+ return PTR_ERR(root_port);
+ dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
+
+ status = acpi_get_table(ACPI_SIG_CEDT, 0, &acpi_cedt);
+ if (ACPI_FAILURE(status))
+ return -ENXIO;
+
+ rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
+ add_host_bridge_dport);
+ if (rc)
+ goto out;
+
+ cxl_add_cfmws_decoders(host, root_port);
+
+ /*
+ * Root level scanned with host-bridge as dports, now scan host-bridges
+ * for their role as CXL uports to their CXL-capable PCIe Root Ports.
+ */
+ rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
+ add_host_bridge_uport);
+ if (rc)
+ goto out;
+
+ if (IS_ENABLED(CONFIG_CXL_PMEM))
+ rc = device_for_each_child(&root_port->dev, root_port,
+ add_root_nvdimm_bridge);
+
+out:
+ acpi_put_table(acpi_cedt);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+
+static const struct acpi_device_id cxl_acpi_ids[] = {
+ { "ACPI0017", 0 },
+ { "", 0 },
+};
+MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
+
+static struct platform_driver cxl_acpi_driver = {
+ .probe = cxl_acpi_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .acpi_match_table = cxl_acpi_ids,
+ },
+};
+
+module_platform_driver(cxl_acpi_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(CXL);
diff --git a/drivers/cxl/bus.c b/drivers/cxl/bus.c
deleted file mode 100644
index 58f74796d525..000000000000
--- a/drivers/cxl/bus.c
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
-#include <linux/device.h>
-#include <linux/module.h>
-
-/**
- * DOC: cxl bus
- *
- * The CXL bus provides namespace for control devices and a rendezvous
- * point for cross-device interleave coordination.
- */
-struct bus_type cxl_bus_type = {
- .name = "cxl",
-};
-EXPORT_SYMBOL_GPL(cxl_bus_type);
-
-static __init int cxl_bus_init(void)
-{
- return bus_register(&cxl_bus_type);
-}
-
-static void cxl_bus_exit(void)
-{
- bus_unregister(&cxl_bus_type);
-}
-
-module_init(cxl_bus_init);
-module_exit(cxl_bus_exit);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/cxl/core.c b/drivers/cxl/core.c
new file mode 100644
index 000000000000..a2e4d54fc7bc
--- /dev/null
+++ b/drivers/cxl/core.c
@@ -0,0 +1,1067 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include "cxl.h"
+#include "mem.h"
+
+/**
+ * DOC: cxl core
+ *
+ * The CXL core provides a sysfs hierarchy for control devices and a rendezvous
+ * point for cross-device interleave coordination through cxl ports.
+ */
+
+static DEFINE_IDA(cxl_port_ida);
+
+static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", dev->type->name);
+}
+static DEVICE_ATTR_RO(devtype);
+
+static struct attribute *cxl_base_attributes[] = {
+ &dev_attr_devtype.attr,
+ NULL,
+};
+
+static struct attribute_group cxl_base_attribute_group = {
+ .attrs = cxl_base_attributes,
+};
+
+static ssize_t start_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ return sysfs_emit(buf, "%#llx\n", cxld->range.start);
+}
+static DEVICE_ATTR_RO(start);
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
+}
+static DEVICE_ATTR_RO(size);
+
+#define CXL_DECODER_FLAG_ATTR(name, flag) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct cxl_decoder *cxld = to_cxl_decoder(dev); \
+ \
+ return sysfs_emit(buf, "%s\n", \
+ (cxld->flags & (flag)) ? "1" : "0"); \
+} \
+static DEVICE_ATTR_RO(name)
+
+CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
+CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
+CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
+CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
+CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
+
+static ssize_t target_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ switch (cxld->target_type) {
+ case CXL_DECODER_ACCELERATOR:
+ return sysfs_emit(buf, "accelerator\n");
+ case CXL_DECODER_EXPANDER:
+ return sysfs_emit(buf, "expander\n");
+ }
+ return -ENXIO;
+}
+static DEVICE_ATTR_RO(target_type);
+
+static ssize_t target_list_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+ ssize_t offset = 0;
+ int i, rc = 0;
+
+ device_lock(dev);
+ for (i = 0; i < cxld->interleave_ways; i++) {
+ struct cxl_dport *dport = cxld->target[i];
+ struct cxl_dport *next = NULL;
+
+ if (!dport)
+ break;
+
+ if (i + 1 < cxld->interleave_ways)
+ next = cxld->target[i + 1];
+ rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
+ next ? "," : "");
+ if (rc < 0)
+ break;
+ offset += rc;
+ }
+ device_unlock(dev);
+
+ if (rc < 0)
+ return rc;
+
+ rc = sysfs_emit_at(buf, offset, "\n");
+ if (rc < 0)
+ return rc;
+
+ return offset + rc;
+}
+static DEVICE_ATTR_RO(target_list);
+
+static struct attribute *cxl_decoder_base_attrs[] = {
+ &dev_attr_start.attr,
+ &dev_attr_size.attr,
+ &dev_attr_locked.attr,
+ &dev_attr_target_list.attr,
+ NULL,
+};
+
+static struct attribute_group cxl_decoder_base_attribute_group = {
+ .attrs = cxl_decoder_base_attrs,
+};
+
+static struct attribute *cxl_decoder_root_attrs[] = {
+ &dev_attr_cap_pmem.attr,
+ &dev_attr_cap_ram.attr,
+ &dev_attr_cap_type2.attr,
+ &dev_attr_cap_type3.attr,
+ NULL,
+};
+
+static struct attribute_group cxl_decoder_root_attribute_group = {
+ .attrs = cxl_decoder_root_attrs,
+};
+
+static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
+ &cxl_decoder_root_attribute_group,
+ &cxl_decoder_base_attribute_group,
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+static struct attribute *cxl_decoder_switch_attrs[] = {
+ &dev_attr_target_type.attr,
+ NULL,
+};
+
+static struct attribute_group cxl_decoder_switch_attribute_group = {
+ .attrs = cxl_decoder_switch_attrs,
+};
+
+static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
+ &cxl_decoder_switch_attribute_group,
+ &cxl_decoder_base_attribute_group,
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+static void cxl_decoder_release(struct device *dev)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+ struct cxl_port *port = to_cxl_port(dev->parent);
+
+ ida_free(&port->decoder_ida, cxld->id);
+ kfree(cxld);
+}
+
+static const struct device_type cxl_decoder_switch_type = {
+ .name = "cxl_decoder_switch",
+ .release = cxl_decoder_release,
+ .groups = cxl_decoder_switch_attribute_groups,
+};
+
+static const struct device_type cxl_decoder_root_type = {
+ .name = "cxl_decoder_root",
+ .release = cxl_decoder_release,
+ .groups = cxl_decoder_root_attribute_groups,
+};
+
+bool is_root_decoder(struct device *dev)
+{
+ return dev->type == &cxl_decoder_root_type;
+}
+EXPORT_SYMBOL_GPL(is_root_decoder);
+
+struct cxl_decoder *to_cxl_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
+ "not a cxl_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_decoder, dev);
+}
+EXPORT_SYMBOL_GPL(to_cxl_decoder);
+
+static void cxl_dport_release(struct cxl_dport *dport)
+{
+ list_del(&dport->list);
+ put_device(dport->dport);
+ kfree(dport);
+}
+
+static void cxl_port_release(struct device *dev)
+{
+ struct cxl_port *port = to_cxl_port(dev);
+ struct cxl_dport *dport, *_d;
+
+ device_lock(dev);
+ list_for_each_entry_safe(dport, _d, &port->dports, list)
+ cxl_dport_release(dport);
+ device_unlock(dev);
+ ida_free(&cxl_port_ida, port->id);
+ kfree(port);
+}
+
+static const struct attribute_group *cxl_port_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+static const struct device_type cxl_port_type = {
+ .name = "cxl_port",
+ .release = cxl_port_release,
+ .groups = cxl_port_attribute_groups,
+};
+
+struct cxl_port *to_cxl_port(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
+ "not a cxl_port device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_port, dev);
+}
+
+static void unregister_port(void *_port)
+{
+ struct cxl_port *port = _port;
+ struct cxl_dport *dport;
+
+ device_lock(&port->dev);
+ list_for_each_entry(dport, &port->dports, list) {
+ char link_name[CXL_TARGET_STRLEN];
+
+ if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d",
+ dport->port_id) >= CXL_TARGET_STRLEN)
+ continue;
+ sysfs_remove_link(&port->dev.kobj, link_name);
+ }
+ device_unlock(&port->dev);
+ device_unregister(&port->dev);
+}
+
+static void cxl_unlink_uport(void *_port)
+{
+ struct cxl_port *port = _port;
+
+ sysfs_remove_link(&port->dev.kobj, "uport");
+}
+
+static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
+{
+ int rc;
+
+ rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
+ if (rc)
+ return rc;
+ return devm_add_action_or_reset(host, cxl_unlink_uport, port);
+}
+
+static struct cxl_port *cxl_port_alloc(struct device *uport,
+ resource_size_t component_reg_phys,
+ struct cxl_port *parent_port)
+{
+ struct cxl_port *port;
+ struct device *dev;
+ int rc;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
+ if (rc < 0)
+ goto err;
+ port->id = rc;
+
+ /*
+ * The top-level cxl_port "cxl_root" does not have a cxl_port as
+ * its parent and it does not have any corresponding component
+ * registers as its decode is described by a fixed platform
+ * description.
+ */
+ dev = &port->dev;
+ if (parent_port)
+ dev->parent = &parent_port->dev;
+ else
+ dev->parent = uport;
+
+ port->uport = uport;
+ port->component_reg_phys = component_reg_phys;
+ ida_init(&port->decoder_ida);
+ INIT_LIST_HEAD(&port->dports);
+
+ device_initialize(dev);
+ device_set_pm_not_required(dev);
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_port_type;
+
+ return port;
+
+err:
+ kfree(port);
+ return ERR_PTR(rc);
+}
+
+/**
+ * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
+ * @host: host device for devm operations
+ * @uport: "physical" device implementing this upstream port
+ * @component_reg_phys: (optional) for configurable cxl_port instances
+ * @parent_port: next hop up in the CXL memory decode hierarchy
+ */
+struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
+ resource_size_t component_reg_phys,
+ struct cxl_port *parent_port)
+{
+ struct cxl_port *port;
+ struct device *dev;
+ int rc;
+
+ port = cxl_port_alloc(uport, component_reg_phys, parent_port);
+ if (IS_ERR(port))
+ return port;
+
+ dev = &port->dev;
+ if (parent_port)
+ rc = dev_set_name(dev, "port%d", port->id);
+ else
+ rc = dev_set_name(dev, "root%d", port->id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(host, unregister_port, port);
+ if (rc)
+ return ERR_PTR(rc);
+
+ rc = devm_cxl_link_uport(host, port);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return port;
+
+err:
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_port);
+
+static struct cxl_dport *find_dport(struct cxl_port *port, int id)
+{
+ struct cxl_dport *dport;
+
+ device_lock_assert(&port->dev);
+ list_for_each_entry (dport, &port->dports, list)
+ if (dport->port_id == id)
+ return dport;
+ return NULL;
+}
+
+static int add_dport(struct cxl_port *port, struct cxl_dport *new)
+{
+ struct cxl_dport *dup;
+
+ device_lock(&port->dev);
+ dup = find_dport(port, new->port_id);
+ if (dup)
+ dev_err(&port->dev,
+ "unable to add dport%d-%s non-unique port id (%s)\n",
+ new->port_id, dev_name(new->dport),
+ dev_name(dup->dport));
+ else
+ list_add_tail(&new->list, &port->dports);
+ device_unlock(&port->dev);
+
+ return dup ? -EEXIST : 0;
+}
+
+/**
+ * cxl_add_dport - append downstream port data to a cxl_port
+ * @port: the cxl_port that references this dport
+ * @dport_dev: firmware or PCI device representing the dport
+ * @port_id: identifier for this dport in a decoder's target list
+ * @component_reg_phys: optional location of CXL component registers
+ *
+ * Note that all allocations and links are undone by cxl_port deletion
+ * and release.
+ */
+int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
+ resource_size_t component_reg_phys)
+{
+ char link_name[CXL_TARGET_STRLEN];
+ struct cxl_dport *dport;
+ int rc;
+
+ if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
+ CXL_TARGET_STRLEN)
+ return -EINVAL;
+
+ dport = kzalloc(sizeof(*dport), GFP_KERNEL);
+ if (!dport)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dport->list);
+ dport->dport = get_device(dport_dev);
+ dport->port_id = port_id;
+ dport->component_reg_phys = component_reg_phys;
+ dport->port = port;
+
+ rc = add_dport(port, dport);
+ if (rc)
+ goto err;
+
+ rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
+ if (rc)
+ goto err;
+
+ return 0;
+err:
+ cxl_dport_release(dport);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(cxl_add_dport);
+
+static struct cxl_decoder *
+cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
+ resource_size_t len, int interleave_ways,
+ int interleave_granularity, enum cxl_decoder_type type,
+ unsigned long flags)
+{
+ struct cxl_decoder *cxld;
+ struct device *dev;
+ int rc = 0;
+
+ if (interleave_ways < 1)
+ return ERR_PTR(-EINVAL);
+
+ device_lock(&port->dev);
+ if (list_empty(&port->dports))
+ rc = -EINVAL;
+ device_unlock(&port->dev);
+ if (rc)
+ return ERR_PTR(rc);
+
+ cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
+ if (!cxld)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
+ if (rc < 0)
+ goto err;
+
+ *cxld = (struct cxl_decoder) {
+ .id = rc,
+ .range = {
+ .start = base,
+ .end = base + len - 1,
+ },
+ .flags = flags,
+ .interleave_ways = interleave_ways,
+ .interleave_granularity = interleave_granularity,
+ .target_type = type,
+ };
+
+ /* handle implied target_list */
+ if (interleave_ways == 1)
+ cxld->target[0] =
+ list_first_entry(&port->dports, struct cxl_dport, list);
+ dev = &cxld->dev;
+ device_initialize(dev);
+ device_set_pm_not_required(dev);
+ dev->parent = &port->dev;
+ dev->bus = &cxl_bus_type;
+
+ /* root ports do not have a cxl_port_type parent */
+ if (port->dev.parent->type == &cxl_port_type)
+ dev->type = &cxl_decoder_switch_type;
+ else
+ dev->type = &cxl_decoder_root_type;
+
+ return cxld;
+err:
+ kfree(cxld);
+ return ERR_PTR(rc);
+}
+
+static void unregister_dev(void *dev)
+{
+ device_unregister(dev);
+}
+
+struct cxl_decoder *
+devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
+ resource_size_t base, resource_size_t len,
+ int interleave_ways, int interleave_granularity,
+ enum cxl_decoder_type type, unsigned long flags)
+{
+ struct cxl_decoder *cxld;
+ struct device *dev;
+ int rc;
+
+ cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
+ interleave_granularity, type, flags);
+ if (IS_ERR(cxld))
+ return cxld;
+
+ dev = &cxld->dev;
+ rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(host, unregister_dev, dev);
+ if (rc)
+ return ERR_PTR(rc);
+ return cxld;
+
+err:
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
+
+/**
+ * cxl_probe_component_regs() - Detect CXL Component register blocks
+ * @dev: Host device of the @base mapping
+ * @base: Mapping containing the HDM Decoder Capability Header
+ * @map: Map object describing the register block information found
+ *
+ * See CXL 2.0 8.2.4 Component Register Layout and Definition
+ * See CXL 2.0 8.2.5.5 CXL Device Register Interface
+ *
+ * Probe for component register information and return it in map object.
+ */
+void cxl_probe_component_regs(struct device *dev, void __iomem *base,
+ struct cxl_component_reg_map *map)
+{
+ int cap, cap_count;
+ u64 cap_array;
+
+ *map = (struct cxl_component_reg_map) { 0 };
+
+ /*
+ * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
+ * CXL 2.0 8.2.4 Table 141.
+ */
+ base += CXL_CM_OFFSET;
+
+ cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
+
+ if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
+ dev_err(dev,
+ "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
+ return;
+ }
+
+ /* It's assumed that future versions will be backward compatible */
+ cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
+
+ for (cap = 1; cap <= cap_count; cap++) {
+ void __iomem *register_block;
+ u32 hdr;
+ int decoder_cnt;
+ u16 cap_id, offset;
+ u32 length;
+
+ hdr = readl(base + cap * 0x4);
+
+ cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
+ offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
+ register_block = base + offset;
+
+ switch (cap_id) {
+ case CXL_CM_CAP_CAP_ID_HDM:
+ dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
+ offset);
+
+ hdr = readl(register_block);
+
+ decoder_cnt = cxl_hdm_decoder_count(hdr);
+ length = 0x20 * decoder_cnt + 0x10;
+
+ map->hdm_decoder.valid = true;
+ map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
+ map->hdm_decoder.size = length;
+ break;
+ default:
+ dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
+ offset);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
+
+static void cxl_nvdimm_bridge_release(struct device *dev)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
+
+ kfree(cxl_nvb);
+}
+
+static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+static const struct device_type cxl_nvdimm_bridge_type = {
+ .name = "cxl_nvdimm_bridge",
+ .release = cxl_nvdimm_bridge_release,
+ .groups = cxl_nvdimm_bridge_attribute_groups,
+};
+
+struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
+ "not a cxl_nvdimm_bridge device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_nvdimm_bridge, dev);
+}
+EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
+
+static struct cxl_nvdimm_bridge *
+cxl_nvdimm_bridge_alloc(struct cxl_port *port)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct device *dev;
+
+ cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
+ if (!cxl_nvb)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &cxl_nvb->dev;
+ cxl_nvb->port = port;
+ cxl_nvb->state = CXL_NVB_NEW;
+ device_initialize(dev);
+ device_set_pm_not_required(dev);
+ dev->parent = &port->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_nvdimm_bridge_type;
+
+ return cxl_nvb;
+}
+
+static void unregister_nvb(void *_cxl_nvb)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
+ bool flush;
+
+ /*
+ * If the bridge was ever activated then there might be in-flight state
+ * work to flush. Once the state has been changed to 'dead' then no new
+ * work can be queued by user-triggered bind.
+ */
+ device_lock(&cxl_nvb->dev);
+ flush = cxl_nvb->state != CXL_NVB_NEW;
+ cxl_nvb->state = CXL_NVB_DEAD;
+ device_unlock(&cxl_nvb->dev);
+
+ /*
+ * Even though the device core will trigger device_release_driver()
+ * before the unregister, it does not know about the fact that
+ * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
+ * release not and flush it before tearing down the nvdimm device
+ * hierarchy.
+ */
+ device_release_driver(&cxl_nvb->dev);
+ if (flush)
+ flush_work(&cxl_nvb->state_work);
+ device_unregister(&cxl_nvb->dev);
+}
+
+struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+ struct cxl_port *port)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct device *dev;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_CXL_PMEM))
+ return ERR_PTR(-ENXIO);
+
+ cxl_nvb = cxl_nvdimm_bridge_alloc(port);
+ if (IS_ERR(cxl_nvb))
+ return cxl_nvb;
+
+ dev = &cxl_nvb->dev;
+ rc = dev_set_name(dev, "nvdimm-bridge");
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return cxl_nvb;
+
+err:
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
+
+static void cxl_nvdimm_release(struct device *dev)
+{
+ struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
+
+ kfree(cxl_nvd);
+}
+
+static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+static const struct device_type cxl_nvdimm_type = {
+ .name = "cxl_nvdimm",
+ .release = cxl_nvdimm_release,
+ .groups = cxl_nvdimm_attribute_groups,
+};
+
+bool is_cxl_nvdimm(struct device *dev)
+{
+ return dev->type == &cxl_nvdimm_type;
+}
+EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
+
+struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
+ "not a cxl_nvdimm device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_nvdimm, dev);
+}
+EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
+
+static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
+{
+ struct cxl_nvdimm *cxl_nvd;
+ struct device *dev;
+
+ cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
+ if (!cxl_nvd)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &cxl_nvd->dev;
+ cxl_nvd->cxlmd = cxlmd;
+ device_initialize(dev);
+ device_set_pm_not_required(dev);
+ dev->parent = &cxlmd->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_nvdimm_type;
+
+ return cxl_nvd;
+}
+
+int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
+{
+ struct cxl_nvdimm *cxl_nvd;
+ struct device *dev;
+ int rc;
+
+ cxl_nvd = cxl_nvdimm_alloc(cxlmd);
+ if (IS_ERR(cxl_nvd))
+ return PTR_ERR(cxl_nvd);
+
+ dev = &cxl_nvd->dev;
+ rc = dev_set_name(dev, "pmem%d", cxlmd->id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
+ dev_name(dev));
+
+ return devm_add_action_or_reset(host, unregister_dev, dev);
+
+err:
+ put_device(dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
+
+/**
+ * cxl_probe_device_regs() - Detect CXL Device register blocks
+ * @dev: Host device of the @base mapping
+ * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
+ * @map: Map object describing the register block information found
+ *
+ * Probe for device register information and return it in map object.
+ */
+void cxl_probe_device_regs(struct device *dev, void __iomem *base,
+ struct cxl_device_reg_map *map)
+{
+ int cap, cap_count;
+ u64 cap_array;
+
+ *map = (struct cxl_device_reg_map){ 0 };
+
+ cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
+ if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
+ CXLDEV_CAP_ARRAY_CAP_ID)
+ return;
+
+ cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
+
+ for (cap = 1; cap <= cap_count; cap++) {
+ u32 offset, length;
+ u16 cap_id;
+
+ cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
+ readl(base + cap * 0x10));
+ offset = readl(base + cap * 0x10 + 0x4);
+ length = readl(base + cap * 0x10 + 0x8);
+
+ switch (cap_id) {
+ case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
+ dev_dbg(dev, "found Status capability (0x%x)\n", offset);
+
+ map->status.valid = true;
+ map->status.offset = offset;
+ map->status.size = length;
+ break;
+ case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
+ dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
+ map->mbox.valid = true;
+ map->mbox.offset = offset;
+ map->mbox.size = length;
+ break;
+ case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
+ dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
+ break;
+ case CXLDEV_CAP_CAP_ID_MEMDEV:
+ dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
+ map->memdev.valid = true;
+ map->memdev.offset = offset;
+ map->memdev.size = length;
+ break;
+ default:
+ if (cap_id >= 0x8000)
+ dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
+ else
+ dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
+
+static void __iomem *devm_cxl_iomap_block(struct device *dev,
+ resource_size_t addr,
+ resource_size_t length)
+{
+ void __iomem *ret_val;
+ struct resource *res;
+
+ res = devm_request_mem_region(dev, addr, length, dev_name(dev));
+ if (!res) {
+ resource_size_t end = addr + length - 1;
+
+ dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
+ return NULL;
+ }
+
+ ret_val = devm_ioremap(dev, addr, length);
+ if (!ret_val)
+ dev_err(dev, "Failed to map region %pr\n", res);
+
+ return ret_val;
+}
+
+int cxl_map_component_regs(struct pci_dev *pdev,
+ struct cxl_component_regs *regs,
+ struct cxl_register_map *map)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t phys_addr;
+ resource_size_t length;
+
+ phys_addr = pci_resource_start(pdev, map->barno);
+ phys_addr += map->block_offset;
+
+ phys_addr += map->component_map.hdm_decoder.offset;
+ length = map->component_map.hdm_decoder.size;
+ regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
+ if (!regs->hdm_decoder)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_map_component_regs);
+
+int cxl_map_device_regs(struct pci_dev *pdev,
+ struct cxl_device_regs *regs,
+ struct cxl_register_map *map)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t phys_addr;
+
+ phys_addr = pci_resource_start(pdev, map->barno);
+ phys_addr += map->block_offset;
+
+ if (map->device_map.status.valid) {
+ resource_size_t addr;
+ resource_size_t length;
+
+ addr = phys_addr + map->device_map.status.offset;
+ length = map->device_map.status.size;
+ regs->status = devm_cxl_iomap_block(dev, addr, length);
+ if (!regs->status)
+ return -ENOMEM;
+ }
+
+ if (map->device_map.mbox.valid) {
+ resource_size_t addr;
+ resource_size_t length;
+
+ addr = phys_addr + map->device_map.mbox.offset;
+ length = map->device_map.mbox.size;
+ regs->mbox = devm_cxl_iomap_block(dev, addr, length);
+ if (!regs->mbox)
+ return -ENOMEM;
+ }
+
+ if (map->device_map.memdev.valid) {
+ resource_size_t addr;
+ resource_size_t length;
+
+ addr = phys_addr + map->device_map.memdev.offset;
+ length = map->device_map.memdev.size;
+ regs->memdev = devm_cxl_iomap_block(dev, addr, length);
+ if (!regs->memdev)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_map_device_regs);
+
+/**
+ * __cxl_driver_register - register a driver for the cxl bus
+ * @cxl_drv: cxl driver structure to attach
+ * @owner: owning module/driver
+ * @modname: KBUILD_MODNAME for parent driver
+ */
+int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
+ const char *modname)
+{
+ if (!cxl_drv->probe) {
+ pr_debug("%s ->probe() must be specified\n", modname);
+ return -EINVAL;
+ }
+
+ if (!cxl_drv->name) {
+ pr_debug("%s ->name must be specified\n", modname);
+ return -EINVAL;
+ }
+
+ if (!cxl_drv->id) {
+ pr_debug("%s ->id must be specified\n", modname);
+ return -EINVAL;
+ }
+
+ cxl_drv->drv.bus = &cxl_bus_type;
+ cxl_drv->drv.owner = owner;
+ cxl_drv->drv.mod_name = modname;
+ cxl_drv->drv.name = cxl_drv->name;
+
+ return driver_register(&cxl_drv->drv);
+}
+EXPORT_SYMBOL_GPL(__cxl_driver_register);
+
+void cxl_driver_unregister(struct cxl_driver *cxl_drv)
+{
+ driver_unregister(&cxl_drv->drv);
+}
+EXPORT_SYMBOL_GPL(cxl_driver_unregister);
+
+static int cxl_device_id(struct device *dev)
+{
+ if (dev->type == &cxl_nvdimm_bridge_type)
+ return CXL_DEVICE_NVDIMM_BRIDGE;
+ if (dev->type == &cxl_nvdimm_type)
+ return CXL_DEVICE_NVDIMM;
+ return 0;
+}
+
+static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
+ cxl_device_id(dev));
+}
+
+static int cxl_bus_match(struct device *dev, struct device_driver *drv)
+{
+ return cxl_device_id(dev) == to_cxl_drv(drv)->id;
+}
+
+static int cxl_bus_probe(struct device *dev)
+{
+ return to_cxl_drv(dev->driver)->probe(dev);
+}
+
+static int cxl_bus_remove(struct device *dev)
+{
+ struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
+
+ if (cxl_drv->remove)
+ cxl_drv->remove(dev);
+ return 0;
+}
+
+struct bus_type cxl_bus_type = {
+ .name = "cxl",
+ .uevent = cxl_bus_uevent,
+ .match = cxl_bus_match,
+ .probe = cxl_bus_probe,
+ .remove = cxl_bus_remove,
+};
+EXPORT_SYMBOL_GPL(cxl_bus_type);
+
+static __init int cxl_core_init(void)
+{
+ return bus_register(&cxl_bus_type);
+}
+
+static void cxl_core_exit(void)
+{
+ bus_unregister(&cxl_bus_type);
+}
+
+module_init(cxl_core_init);
+module_exit(cxl_core_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 6f14838c2d25..b6bda39a59e3 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -4,10 +4,51 @@
#ifndef __CXL_H__
#define __CXL_H__
+#include <linux/libnvdimm.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/io.h>
+/**
+ * DOC: cxl objects
+ *
+ * The CXL core objects like ports, decoders, and regions are shared
+ * between the subsystem drivers cxl_acpi, cxl_pci, and core drivers
+ * (port-driver, region-driver, nvdimm object-drivers... etc).
+ */
+
+/* CXL 2.0 8.2.5 CXL.cache and CXL.mem Registers*/
+#define CXL_CM_OFFSET 0x1000
+#define CXL_CM_CAP_HDR_OFFSET 0x0
+#define CXL_CM_CAP_HDR_ID_MASK GENMASK(15, 0)
+#define CM_CAP_HDR_CAP_ID 1
+#define CXL_CM_CAP_HDR_VERSION_MASK GENMASK(19, 16)
+#define CM_CAP_HDR_CAP_VERSION 1
+#define CXL_CM_CAP_HDR_CACHE_MEM_VERSION_MASK GENMASK(23, 20)
+#define CM_CAP_HDR_CACHE_MEM_VERSION 1
+#define CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24)
+#define CXL_CM_CAP_PTR_MASK GENMASK(31, 20)
+
+#define CXL_CM_CAP_CAP_ID_HDM 0x5
+#define CXL_CM_CAP_CAP_HDM_VERSION 1
+
+/* HDM decoders CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure */
+#define CXL_HDM_DECODER_CAP_OFFSET 0x0
+#define CXL_HDM_DECODER_COUNT_MASK GENMASK(3, 0)
+#define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
+#define CXL_HDM_DECODER0_BASE_LOW_OFFSET 0x10
+#define CXL_HDM_DECODER0_BASE_HIGH_OFFSET 0x14
+#define CXL_HDM_DECODER0_SIZE_LOW_OFFSET 0x18
+#define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET 0x1c
+#define CXL_HDM_DECODER0_CTRL_OFFSET 0x20
+
+static inline int cxl_hdm_decoder_count(u32 cap_hdr)
+{
+ int val = FIELD_GET(CXL_HDM_DECODER_COUNT_MASK, cap_hdr);
+
+ return val ? val * 2 : 1;
+}
+
/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
#define CXLDEV_CAP_ARRAY_OFFSET 0x0
#define CXLDEV_CAP_ARRAY_CAP_ID 0
@@ -34,62 +75,253 @@
#define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
#define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
-/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
-#define CXLMDEV_STATUS_OFFSET 0x0
-#define CXLMDEV_DEV_FATAL BIT(0)
-#define CXLMDEV_FW_HALT BIT(1)
-#define CXLMDEV_STATUS_MEDIA_STATUS_MASK GENMASK(3, 2)
-#define CXLMDEV_MS_NOT_READY 0
-#define CXLMDEV_MS_READY 1
-#define CXLMDEV_MS_ERROR 2
-#define CXLMDEV_MS_DISABLED 3
-#define CXLMDEV_READY(status) \
- (FIELD_GET(CXLMDEV_STATUS_MEDIA_STATUS_MASK, status) == \
- CXLMDEV_MS_READY)
-#define CXLMDEV_MBOX_IF_READY BIT(4)
-#define CXLMDEV_RESET_NEEDED_MASK GENMASK(7, 5)
-#define CXLMDEV_RESET_NEEDED_NOT 0
-#define CXLMDEV_RESET_NEEDED_COLD 1
-#define CXLMDEV_RESET_NEEDED_WARM 2
-#define CXLMDEV_RESET_NEEDED_HOT 3
-#define CXLMDEV_RESET_NEEDED_CXL 4
-#define CXLMDEV_RESET_NEEDED(status) \
- (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
- CXLMDEV_RESET_NEEDED_NOT)
-
-struct cxl_memdev;
+#define CXL_COMPONENT_REGS() \
+ void __iomem *hdm_decoder
+
+#define CXL_DEVICE_REGS() \
+ void __iomem *status; \
+ void __iomem *mbox; \
+ void __iomem *memdev
+
+/* See note for 'struct cxl_regs' for the rationale of this organization */
+/*
+ * CXL_COMPONENT_REGS - Common set of CXL Component register block base pointers
+ * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
+ */
+struct cxl_component_regs {
+ CXL_COMPONENT_REGS();
+};
+
+/* See note for 'struct cxl_regs' for the rationale of this organization */
+/*
+ * CXL_DEVICE_REGS - Common set of CXL Device register block base pointers
+ * @status: CXL 2.0 8.2.8.3 Device Status Registers
+ * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
+ * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
+ */
+struct cxl_device_regs {
+ CXL_DEVICE_REGS();
+};
+
+/*
+ * Note, the anonymous union organization allows for per
+ * register-block-type helper routines, without requiring block-type
+ * agnostic code to include the prefix.
+ */
+struct cxl_regs {
+ union {
+ struct {
+ CXL_COMPONENT_REGS();
+ };
+ struct cxl_component_regs component;
+ };
+ union {
+ struct {
+ CXL_DEVICE_REGS();
+ };
+ struct cxl_device_regs device_regs;
+ };
+};
+
+struct cxl_reg_map {
+ bool valid;
+ unsigned long offset;
+ unsigned long size;
+};
+
+struct cxl_component_reg_map {
+ struct cxl_reg_map hdm_decoder;
+};
+
+struct cxl_device_reg_map {
+ struct cxl_reg_map status;
+ struct cxl_reg_map mbox;
+ struct cxl_reg_map memdev;
+};
+
+struct cxl_register_map {
+ struct list_head list;
+ u64 block_offset;
+ u8 reg_type;
+ u8 barno;
+ union {
+ struct cxl_component_reg_map component_map;
+ struct cxl_device_reg_map device_map;
+ };
+};
+
+void cxl_probe_component_regs(struct device *dev, void __iomem *base,
+ struct cxl_component_reg_map *map);
+void cxl_probe_device_regs(struct device *dev, void __iomem *base,
+ struct cxl_device_reg_map *map);
+int cxl_map_component_regs(struct pci_dev *pdev,
+ struct cxl_component_regs *regs,
+ struct cxl_register_map *map);
+int cxl_map_device_regs(struct pci_dev *pdev,
+ struct cxl_device_regs *regs,
+ struct cxl_register_map *map);
+
+#define CXL_RESOURCE_NONE ((resource_size_t) -1)
+#define CXL_TARGET_STRLEN 20
+
+/*
+ * cxl_decoder flags that define the type of memory / devices this
+ * decoder supports as well as configuration lock status See "CXL 2.0
+ * 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details.
+ */
+#define CXL_DECODER_F_RAM BIT(0)
+#define CXL_DECODER_F_PMEM BIT(1)
+#define CXL_DECODER_F_TYPE2 BIT(2)
+#define CXL_DECODER_F_TYPE3 BIT(3)
+#define CXL_DECODER_F_LOCK BIT(4)
+#define CXL_DECODER_F_MASK GENMASK(4, 0)
+
+enum cxl_decoder_type {
+ CXL_DECODER_ACCELERATOR = 2,
+ CXL_DECODER_EXPANDER = 3,
+};
+
/**
- * struct cxl_mem - A CXL memory device
- * @pdev: The PCI device associated with this CXL device.
- * @regs: IO mappings to the device's MMIO
- * @status_regs: CXL 2.0 8.2.8.3 Device Status Registers
- * @mbox_regs: CXL 2.0 8.2.8.4 Mailbox Registers
- * @memdev_regs: CXL 2.0 8.2.8.5 Memory Device Registers
- * @payload_size: Size of space for payload
- * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
- * @mbox_mutex: Mutex to synchronize mailbox access.
- * @firmware_version: Firmware version for the memory device.
- * @enabled_commands: Hardware commands found enabled in CEL.
- * @pmem_range: Persistent memory capacity information.
- * @ram_range: Volatile memory capacity information.
+ * struct cxl_decoder - CXL address range decode configuration
+ * @dev: this decoder's device
+ * @id: kernel device name id
+ * @range: address range considered by this decoder
+ * @interleave_ways: number of cxl_dports in this decode
+ * @interleave_granularity: data stride per dport
+ * @target_type: accelerator vs expander (type2 vs type3) selector
+ * @flags: memory type capabilities and locking
+ * @target: active ordered target list in current decoder configuration
*/
-struct cxl_mem {
- struct pci_dev *pdev;
- void __iomem *regs;
- struct cxl_memdev *cxlmd;
+struct cxl_decoder {
+ struct device dev;
+ int id;
+ struct range range;
+ int interleave_ways;
+ int interleave_granularity;
+ enum cxl_decoder_type target_type;
+ unsigned long flags;
+ struct cxl_dport *target[];
+};
- void __iomem *status_regs;
- void __iomem *mbox_regs;
- void __iomem *memdev_regs;
- size_t payload_size;
- struct mutex mbox_mutex; /* Protects device mailbox and firmware */
- char firmware_version[0x10];
- unsigned long *enabled_cmds;
+enum cxl_nvdimm_brige_state {
+ CXL_NVB_NEW,
+ CXL_NVB_DEAD,
+ CXL_NVB_ONLINE,
+ CXL_NVB_OFFLINE,
+};
- struct range pmem_range;
- struct range ram_range;
+struct cxl_nvdimm_bridge {
+ struct device dev;
+ struct cxl_port *port;
+ struct nvdimm_bus *nvdimm_bus;
+ struct nvdimm_bus_descriptor nd_desc;
+ struct work_struct state_work;
+ enum cxl_nvdimm_brige_state state;
};
+struct cxl_nvdimm {
+ struct device dev;
+ struct cxl_memdev *cxlmd;
+ struct nvdimm *nvdimm;
+};
+
+/**
+ * struct cxl_port - logical collection of upstream port devices and
+ * downstream port devices to construct a CXL memory
+ * decode hierarchy.
+ * @dev: this port's device
+ * @uport: PCI or platform device implementing the upstream port capability
+ * @id: id for port device-name
+ * @dports: cxl_dport instances referenced by decoders
+ * @decoder_ida: allocator for decoder ids
+ * @component_reg_phys: component register capability base address (optional)
+ */
+struct cxl_port {
+ struct device dev;
+ struct device *uport;
+ int id;
+ struct list_head dports;
+ struct ida decoder_ida;
+ resource_size_t component_reg_phys;
+};
+
+/**
+ * struct cxl_dport - CXL downstream port
+ * @dport: PCI bridge or firmware device representing the downstream link
+ * @port_id: unique hardware identifier for dport in decoder target list
+ * @component_reg_phys: downstream port component registers
+ * @port: reference to cxl_port that contains this downstream port
+ * @list: node for a cxl_port's list of cxl_dport instances
+ */
+struct cxl_dport {
+ struct device *dport;
+ int port_id;
+ resource_size_t component_reg_phys;
+ struct cxl_port *port;
+ struct list_head list;
+};
+
+struct cxl_port *to_cxl_port(struct device *dev);
+struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
+ resource_size_t component_reg_phys,
+ struct cxl_port *parent_port);
+
+int cxl_add_dport(struct cxl_port *port, struct device *dport, int port_id,
+ resource_size_t component_reg_phys);
+
+struct cxl_decoder *to_cxl_decoder(struct device *dev);
+bool is_root_decoder(struct device *dev);
+struct cxl_decoder *
+devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
+ resource_size_t base, resource_size_t len,
+ int interleave_ways, int interleave_granularity,
+ enum cxl_decoder_type type, unsigned long flags);
+
+/*
+ * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
+ * single ported host-bridges need not publish a decoder capability when a
+ * passthrough decode can be assumed, i.e. all transactions that the uport sees
+ * are claimed and passed to the single dport. Default the range a 0-base
+ * 0-length until the first CXL region is activated.
+ */
+static inline struct cxl_decoder *
+devm_cxl_add_passthrough_decoder(struct device *host, struct cxl_port *port)
+{
+ return devm_cxl_add_decoder(host, port, 1, 0, 0, 1, PAGE_SIZE,
+ CXL_DECODER_EXPANDER, 0);
+}
+
extern struct bus_type cxl_bus_type;
+
+struct cxl_driver {
+ const char *name;
+ int (*probe)(struct device *dev);
+ void (*remove)(struct device *dev);
+ struct device_driver drv;
+ int id;
+};
+
+static inline struct cxl_driver *to_cxl_drv(struct device_driver *drv)
+{
+ return container_of(drv, struct cxl_driver, drv);
+}
+
+int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
+ const char *modname);
+#define cxl_driver_register(x) __cxl_driver_register(x, THIS_MODULE, KBUILD_MODNAME)
+void cxl_driver_unregister(struct cxl_driver *cxl_drv);
+
+#define CXL_DEVICE_NVDIMM_BRIDGE 1
+#define CXL_DEVICE_NVDIMM 2
+
+#define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
+#define CXL_MODALIAS_FMT "cxl:t%d"
+
+struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev);
+struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+ struct cxl_port *port);
+struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
+bool is_cxl_nvdimm(struct device *dev);
+int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/mem.h b/drivers/cxl/mem.h
new file mode 100644
index 000000000000..8f02d02b26b4
--- /dev/null
+++ b/drivers/cxl/mem.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2020-2021 Intel Corporation. */
+#ifndef __CXL_MEM_H__
+#define __CXL_MEM_H__
+#include <linux/cdev.h>
+#include "cxl.h"
+
+/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
+#define CXLMDEV_STATUS_OFFSET 0x0
+#define CXLMDEV_DEV_FATAL BIT(0)
+#define CXLMDEV_FW_HALT BIT(1)
+#define CXLMDEV_STATUS_MEDIA_STATUS_MASK GENMASK(3, 2)
+#define CXLMDEV_MS_NOT_READY 0
+#define CXLMDEV_MS_READY 1
+#define CXLMDEV_MS_ERROR 2
+#define CXLMDEV_MS_DISABLED 3
+#define CXLMDEV_READY(status) \
+ (FIELD_GET(CXLMDEV_STATUS_MEDIA_STATUS_MASK, status) == \
+ CXLMDEV_MS_READY)
+#define CXLMDEV_MBOX_IF_READY BIT(4)
+#define CXLMDEV_RESET_NEEDED_MASK GENMASK(7, 5)
+#define CXLMDEV_RESET_NEEDED_NOT 0
+#define CXLMDEV_RESET_NEEDED_COLD 1
+#define CXLMDEV_RESET_NEEDED_WARM 2
+#define CXLMDEV_RESET_NEEDED_HOT 3
+#define CXLMDEV_RESET_NEEDED_CXL 4
+#define CXLMDEV_RESET_NEEDED(status) \
+ (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
+ CXLMDEV_RESET_NEEDED_NOT)
+
+/*
+ * An entire PCI topology full of devices should be enough for any
+ * config
+ */
+#define CXL_MEM_MAX_DEVS 65536
+
+/**
+ * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
+ * @dev: driver core device object
+ * @cdev: char dev core object for ioctl operations
+ * @cxlm: pointer to the parent device driver data
+ * @id: id number of this memdev instance.
+ */
+struct cxl_memdev {
+ struct device dev;
+ struct cdev cdev;
+ struct cxl_mem *cxlm;
+ int id;
+};
+
+/**
+ * struct cxl_mem - A CXL memory device
+ * @pdev: The PCI device associated with this CXL device.
+ * @cxlmd: Logical memory device chardev / interface
+ * @regs: Parsed register blocks
+ * @payload_size: Size of space for payload
+ * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
+ * @lsa_size: Size of Label Storage Area
+ * (CXL 2.0 8.2.9.5.1.1 Identify Memory Device)
+ * @mbox_mutex: Mutex to synchronize mailbox access.
+ * @firmware_version: Firmware version for the memory device.
+ * @enabled_cmds: Hardware commands found enabled in CEL.
+ * @pmem_range: Persistent memory capacity information.
+ * @ram_range: Volatile memory capacity information.
+ */
+struct cxl_mem {
+ struct pci_dev *pdev;
+ struct cxl_memdev *cxlmd;
+
+ struct cxl_regs regs;
+
+ size_t payload_size;
+ size_t lsa_size;
+ struct mutex mbox_mutex; /* Protects device mailbox and firmware */
+ char firmware_version[0x10];
+ unsigned long *enabled_cmds;
+
+ struct range pmem_range;
+ struct range ram_range;
+};
+#endif /* __CXL_MEM_H__ */
diff --git a/drivers/cxl/mem.c b/drivers/cxl/pci.c
index 2acc6173da36..4cf351a3cf99 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/pci.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/sizes.h>
#include <linux/mutex.h>
+#include <linux/list.h>
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/pci.h>
@@ -13,12 +14,14 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "pci.h"
#include "cxl.h"
+#include "mem.h"
/**
- * DOC: cxl mem
+ * DOC: cxl pci
*
- * This implements a CXL memory device ("type-3") as it is defined by the
- * Compute Express Link specification.
+ * This implements the PCI exclusive functionality for a CXL device as it is
+ * defined by the Compute Express Link specification. CXL devices may surface
+ * certain functionality even if it isn't CXL enabled.
*
* The driver has several responsibilities, mainly:
* - Create the memX device and register on the CXL bus.
@@ -26,18 +29,10 @@
* - Probe the device attributes to establish sysfs interface.
* - Provide an IOCTL interface to userspace to communicate with the device for
* things like firmware update.
- * - Support management of interleave sets.
- * - Handle and manage error conditions.
*/
-/*
- * An entire PCI topology full of devices should be enough for any
- * config
- */
-#define CXL_MEM_MAX_DEVS 65536
-
#define cxl_doorbell_busy(cxlm) \
- (readl((cxlm)->mbox_regs + CXLDEV_MBOX_CTRL_OFFSET) & \
+ (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
CXLDEV_MBOX_CTRL_DOORBELL)
/* CXL 2.0 - 8.2.8.4 */
@@ -56,7 +51,14 @@ enum opcode {
CXL_MBOX_OP_GET_LSA = 0x4102,
CXL_MBOX_OP_SET_LSA = 0x4103,
CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200,
+ CXL_MBOX_OP_GET_ALERT_CONFIG = 0x4201,
+ CXL_MBOX_OP_SET_ALERT_CONFIG = 0x4202,
+ CXL_MBOX_OP_GET_SHUTDOWN_STATE = 0x4203,
CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204,
+ CXL_MBOX_OP_GET_POISON = 0x4300,
+ CXL_MBOX_OP_INJECT_POISON = 0x4301,
+ CXL_MBOX_OP_CLEAR_POISON = 0x4302,
+ CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303,
CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
CXL_MBOX_OP_MAX = 0x10000
@@ -92,20 +94,6 @@ struct mbox_cmd {
#define CXL_MBOX_SUCCESS 0
};
-/**
- * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
- * @dev: driver core device object
- * @cdev: char dev core object for ioctl operations
- * @cxlm: pointer to the parent device driver data
- * @id: id number of this memdev instance.
- */
-struct cxl_memdev {
- struct device dev;
- struct cdev cdev;
- struct cxl_mem *cxlm;
- int id;
-};
-
static int cxl_mem_major;
static DEFINE_IDA(cxl_memdev_ida);
static DECLARE_RWSEM(cxl_memdev_rwsem);
@@ -178,6 +166,18 @@ static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(GET_LSA, 0x8, ~0, 0),
CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
+ CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
+ CXL_CMD(SET_LSA, ~0, 0, 0),
+ CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
+ CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
+ CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
+ CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
+ CXL_CMD(GET_POISON, 0x10, ~0, 0),
+ CXL_CMD(INJECT_POISON, 0x8, 0, 0),
+ CXL_CMD(CLEAR_POISON, 0x48, 0, 0),
+ CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
+ CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
+ CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0),
};
/*
@@ -292,7 +292,7 @@ static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm,
static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm,
struct mbox_cmd *mbox_cmd)
{
- void __iomem *payload = cxlm->mbox_regs + CXLDEV_MBOX_PAYLOAD_OFFSET;
+ void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
u64 cmd_reg, status_reg;
size_t out_len;
int rc;
@@ -335,12 +335,12 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm,
}
/* #2, #3 */
- writeq(cmd_reg, cxlm->mbox_regs + CXLDEV_MBOX_CMD_OFFSET);
+ writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
/* #4 */
dev_dbg(&cxlm->pdev->dev, "Sending command\n");
writel(CXLDEV_MBOX_CTRL_DOORBELL,
- cxlm->mbox_regs + CXLDEV_MBOX_CTRL_OFFSET);
+ cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
/* #5 */
rc = cxl_mem_wait_for_doorbell(cxlm);
@@ -350,7 +350,7 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm,
}
/* #6 */
- status_reg = readq(cxlm->mbox_regs + CXLDEV_MBOX_STATUS_OFFSET);
+ status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
mbox_cmd->return_code =
FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
@@ -360,7 +360,7 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm,
}
/* #7 */
- cmd_reg = readq(cxlm->mbox_regs + CXLDEV_MBOX_CMD_OFFSET);
+ cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
/* #8 */
@@ -421,7 +421,7 @@ static int cxl_mem_mbox_get(struct cxl_mem *cxlm)
goto out;
}
- md_status = readq(cxlm->memdev_regs + CXLMDEV_STATUS_OFFSET);
+ md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET);
if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) {
dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n");
rc = -EBUSY;
@@ -890,75 +890,9 @@ static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode,
return 0;
}
-/**
- * cxl_mem_setup_regs() - Setup necessary MMIO.
- * @cxlm: The CXL memory device to communicate with.
- *
- * Return: 0 if all necessary registers mapped.
- *
- * A memory device is required by spec to implement a certain set of MMIO
- * regions. The purpose of this function is to enumerate and map those
- * registers.
- */
-static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
-{
- struct device *dev = &cxlm->pdev->dev;
- int cap, cap_count;
- u64 cap_array;
-
- cap_array = readq(cxlm->regs + CXLDEV_CAP_ARRAY_OFFSET);
- if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
- CXLDEV_CAP_ARRAY_CAP_ID)
- return -ENODEV;
-
- cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
-
- for (cap = 1; cap <= cap_count; cap++) {
- void __iomem *register_block;
- u32 offset;
- u16 cap_id;
-
- cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
- readl(cxlm->regs + cap * 0x10));
- offset = readl(cxlm->regs + cap * 0x10 + 0x4);
- register_block = cxlm->regs + offset;
-
- switch (cap_id) {
- case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
- dev_dbg(dev, "found Status capability (0x%x)\n", offset);
- cxlm->status_regs = register_block;
- break;
- case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
- dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
- cxlm->mbox_regs = register_block;
- break;
- case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
- dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
- break;
- case CXLDEV_CAP_CAP_ID_MEMDEV:
- dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
- cxlm->memdev_regs = register_block;
- break;
- default:
- dev_dbg(dev, "Unknown cap ID: %d (0x%x)\n", cap_id, offset);
- break;
- }
- }
-
- if (!cxlm->status_regs || !cxlm->mbox_regs || !cxlm->memdev_regs) {
- dev_err(dev, "registers not found: %s%s%s\n",
- !cxlm->status_regs ? "status " : "",
- !cxlm->mbox_regs ? "mbox " : "",
- !cxlm->memdev_regs ? "memdev" : "");
- return -ENXIO;
- }
-
- return 0;
-}
-
static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm)
{
- const int cap = readl(cxlm->mbox_regs + CXLDEV_MBOX_CAPS_OFFSET);
+ const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
cxlm->payload_size =
1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
@@ -983,55 +917,62 @@ static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm)
return 0;
}
-static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev, u32 reg_lo,
- u32 reg_hi)
+static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct cxl_mem *cxlm;
- void __iomem *regs;
- u64 offset;
- u8 bar;
- int rc;
- cxlm = devm_kzalloc(&pdev->dev, sizeof(*cxlm), GFP_KERNEL);
+ cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL);
if (!cxlm) {
dev_err(dev, "No memory available\n");
- return NULL;
- }
-
- offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK);
- bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
-
- /* Basic sanity check that BAR is big enough */
- if (pci_resource_len(pdev, bar) < offset) {
- dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar,
- &pdev->resource[bar], (unsigned long long)offset);
- return NULL;
- }
-
- rc = pcim_iomap_regions(pdev, BIT(bar), pci_name(pdev));
- if (rc) {
- dev_err(dev, "failed to map registers\n");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
- regs = pcim_iomap_table(pdev)[bar];
mutex_init(&cxlm->mbox_mutex);
cxlm->pdev = pdev;
- cxlm->regs = regs + offset;
cxlm->enabled_cmds =
devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count),
sizeof(unsigned long),
GFP_KERNEL | __GFP_ZERO);
if (!cxlm->enabled_cmds) {
dev_err(dev, "No memory available for bitmap\n");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
- dev_dbg(dev, "Mapped CXL Memory Device resource\n");
return cxlm;
}
+static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm,
+ u8 bar, u64 offset)
+{
+ struct pci_dev *pdev = cxlm->pdev;
+ struct device *dev = &pdev->dev;
+ void __iomem *addr;
+
+ /* Basic sanity check that BAR is big enough */
+ if (pci_resource_len(pdev, bar) < offset) {
+ dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar,
+ &pdev->resource[bar], (unsigned long long)offset);
+ return IOMEM_ERR_PTR(-ENXIO);
+ }
+
+ addr = pci_iomap(pdev, bar, 0);
+ if (!addr) {
+ dev_err(dev, "failed to map registers\n");
+ return addr;
+ }
+
+ dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %#llx\n",
+ bar, offset);
+
+ return addr;
+}
+
+static void cxl_mem_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base)
+{
+ pci_iounmap(cxlm->pdev, base);
+}
+
static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec)
{
int pos;
@@ -1055,6 +996,171 @@ static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec)
return 0;
}
+static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base,
+ struct cxl_register_map *map)
+{
+ struct pci_dev *pdev = cxlm->pdev;
+ struct device *dev = &pdev->dev;
+ struct cxl_component_reg_map *comp_map;
+ struct cxl_device_reg_map *dev_map;
+
+ switch (map->reg_type) {
+ case CXL_REGLOC_RBI_COMPONENT:
+ comp_map = &map->component_map;
+ cxl_probe_component_regs(dev, base, comp_map);
+ if (!comp_map->hdm_decoder.valid) {
+ dev_err(dev, "HDM decoder registers not found\n");
+ return -ENXIO;
+ }
+
+ dev_dbg(dev, "Set up component registers\n");
+ break;
+ case CXL_REGLOC_RBI_MEMDEV:
+ dev_map = &map->device_map;
+ cxl_probe_device_regs(dev, base, dev_map);
+ if (!dev_map->status.valid || !dev_map->mbox.valid ||
+ !dev_map->memdev.valid) {
+ dev_err(dev, "registers not found: %s%s%s\n",
+ !dev_map->status.valid ? "status " : "",
+ !dev_map->mbox.valid ? "status " : "",
+ !dev_map->memdev.valid ? "status " : "");
+ return -ENXIO;
+ }
+
+ dev_dbg(dev, "Probing device registers...\n");
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map)
+{
+ struct pci_dev *pdev = cxlm->pdev;
+ struct device *dev = &pdev->dev;
+
+ switch (map->reg_type) {
+ case CXL_REGLOC_RBI_COMPONENT:
+ cxl_map_component_regs(pdev, &cxlm->regs.component, map);
+ dev_dbg(dev, "Mapping component registers...\n");
+ break;
+ case CXL_REGLOC_RBI_MEMDEV:
+ cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map);
+ dev_dbg(dev, "Probing device registers...\n");
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi,
+ u8 *bar, u64 *offset, u8 *reg_type)
+{
+ *offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK);
+ *bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
+ *reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo);
+}
+
+/**
+ * cxl_mem_setup_regs() - Setup necessary MMIO.
+ * @cxlm: The CXL memory device to communicate with.
+ *
+ * Return: 0 if all necessary registers mapped.
+ *
+ * A memory device is required by spec to implement a certain set of MMIO
+ * regions. The purpose of this function is to enumerate and map those
+ * registers.
+ */
+static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
+{
+ struct pci_dev *pdev = cxlm->pdev;
+ struct device *dev = &pdev->dev;
+ u32 regloc_size, regblocks;
+ void __iomem *base;
+ int regloc, i;
+ struct cxl_register_map *map, *n;
+ LIST_HEAD(register_maps);
+ int ret = 0;
+
+ regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID);
+ if (!regloc) {
+ dev_err(dev, "register location dvsec not found\n");
+ return -ENXIO;
+ }
+
+ if (pci_request_mem_regions(pdev, pci_name(pdev)))
+ return -ENODEV;
+
+ /* Get the size of the Register Locator DVSEC */
+ pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, &regloc_size);
+ regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size);
+
+ regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET;
+ regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8;
+
+ for (i = 0; i < regblocks; i++, regloc += 8) {
+ u32 reg_lo, reg_hi;
+ u8 reg_type;
+ u64 offset;
+ u8 bar;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map) {
+ ret = -ENOMEM;
+ goto free_maps;
+ }
+
+ list_add(&map->list, &register_maps);
+
+ pci_read_config_dword(pdev, regloc, &reg_lo);
+ pci_read_config_dword(pdev, regloc + 4, &reg_hi);
+
+ cxl_decode_register_block(reg_lo, reg_hi, &bar, &offset,
+ &reg_type);
+
+ dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n",
+ bar, offset, reg_type);
+
+ base = cxl_mem_map_regblock(cxlm, bar, offset);
+ if (!base) {
+ ret = -ENOMEM;
+ goto free_maps;
+ }
+
+ map->barno = bar;
+ map->block_offset = offset;
+ map->reg_type = reg_type;
+
+ ret = cxl_probe_regs(cxlm, base + offset, map);
+
+ /* Always unmap the regblock regardless of probe success */
+ cxl_mem_unmap_regblock(cxlm, base);
+
+ if (ret)
+ goto free_maps;
+ }
+
+ pci_release_mem_regions(pdev);
+
+ list_for_each_entry(map, &register_maps, list) {
+ ret = cxl_map_regs(cxlm, map);
+ if (ret)
+ goto free_maps;
+ }
+
+free_maps:
+ list_for_each_entry_safe(map, n, &register_maps, list) {
+ list_del(&map->list);
+ kfree(map);
+ }
+
+ return ret;
+}
+
static struct cxl_memdev *to_cxl_memdev(struct device *dev)
{
return container_of(dev, struct cxl_memdev, dev);
@@ -1094,6 +1200,16 @@ static ssize_t payload_max_show(struct device *dev,
}
static DEVICE_ATTR_RO(payload_max);
+static ssize_t label_storage_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
+}
+static DEVICE_ATTR_RO(label_storage_size);
+
static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1123,6 +1239,7 @@ static struct device_attribute dev_attr_pmem_size =
static struct attribute *cxl_memdev_attributes[] = {
&dev_attr_firmware_version.attr,
&dev_attr_payload_max.attr,
+ &dev_attr_label_storage_size.attr,
NULL,
};
@@ -1215,7 +1332,8 @@ err:
return ERR_PTR(rc);
}
-static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+static struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
+ struct cxl_mem *cxlm)
{
struct cxl_memdev *cxlmd;
struct device *dev;
@@ -1224,7 +1342,7 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
cxlmd = cxl_memdev_alloc(cxlm);
if (IS_ERR(cxlmd))
- return PTR_ERR(cxlmd);
+ return cxlmd;
dev = &cxlmd->dev;
rc = dev_set_name(dev, "mem%d", cxlmd->id);
@@ -1242,8 +1360,10 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
if (rc)
goto err;
- return devm_add_action_or_reset(dev->parent, cxl_memdev_unregister,
- cxlmd);
+ rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
+ if (rc)
+ return ERR_PTR(rc);
+ return cxlmd;
err:
/*
@@ -1252,7 +1372,7 @@ err:
*/
cxl_memdev_shutdown(cxlmd);
put_device(dev);
- return rc;
+ return ERR_PTR(rc);
}
static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
@@ -1455,6 +1575,7 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
cxlm->pmem_range.end =
le64_to_cpu(id.persistent_capacity) * SZ_256M - 1;
+ cxlm->lsa_size = le32_to_cpu(id.lsa_size);
memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
return 0;
@@ -1462,46 +1583,17 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct device *dev = &pdev->dev;
- struct cxl_mem *cxlm = NULL;
- u32 regloc_size, regblocks;
- int rc, regloc, i;
+ struct cxl_memdev *cxlmd;
+ struct cxl_mem *cxlm;
+ int rc;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
- regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_OFFSET);
- if (!regloc) {
- dev_err(dev, "register location dvsec not found\n");
- return -ENXIO;
- }
-
- /* Get the size of the Register Locator DVSEC */
- pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, &regloc_size);
- regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size);
-
- regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET;
- regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8;
-
- for (i = 0; i < regblocks; i++, regloc += 8) {
- u32 reg_lo, reg_hi;
- u8 reg_type;
-
- /* "register low and high" contain other bits */
- pci_read_config_dword(pdev, regloc, &reg_lo);
- pci_read_config_dword(pdev, regloc + 4, &reg_hi);
-
- reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo);
-
- if (reg_type == CXL_REGLOC_RBI_MEMDEV) {
- cxlm = cxl_mem_create(pdev, reg_lo, reg_hi);
- break;
- }
- }
-
- if (!cxlm)
- return -ENODEV;
+ cxlm = cxl_mem_create(pdev);
+ if (IS_ERR(cxlm))
+ return PTR_ERR(cxlm);
rc = cxl_mem_setup_regs(cxlm);
if (rc)
@@ -1519,7 +1611,14 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- return cxl_mem_add_memdev(cxlm);
+ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm);
+ if (IS_ERR(cxlmd))
+ return PTR_ERR(cxlmd);
+
+ if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
+ rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd);
+
+ return rc;
}
static const struct pci_device_id cxl_mem_pci_tbl[] = {
@@ -1544,6 +1643,10 @@ static __init int cxl_mem_init(void)
dev_t devt;
int rc;
+ /* Double check the anonymous union trickery in struct cxl_regs */
+ BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
+ offsetof(struct cxl_regs, device_regs.memdev));
+
rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
if (rc)
return rc;
diff --git a/drivers/cxl/pci.h b/drivers/cxl/pci.h
index af3ec078cf6c..dad7a831f65f 100644
--- a/drivers/cxl/pci.h
+++ b/drivers/cxl/pci.h
@@ -13,7 +13,7 @@
#define PCI_DVSEC_VENDOR_ID_CXL 0x1E98
#define PCI_DVSEC_ID_CXL 0x0
-#define PCI_DVSEC_ID_CXL_REGLOC_OFFSET 0x8
+#define PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID 0x8
#define PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET 0xC
/* BAR Indicator Register (BIR) */
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
new file mode 100644
index 000000000000..0088e41dd2f3
--- /dev/null
+++ b/drivers/cxl/pmem.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
+#include <linux/libnvdimm.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/ndctl.h>
+#include <linux/async.h>
+#include <linux/slab.h>
+#include "mem.h"
+#include "cxl.h"
+
+/*
+ * Ordered workqueue for cxl nvdimm device arrival and departure
+ * to coordinate bus rescans when a bridge arrives and trigger remove
+ * operations when the bridge is removed.
+ */
+static struct workqueue_struct *cxl_pmem_wq;
+
+static void unregister_nvdimm(void *nvdimm)
+{
+ nvdimm_delete(nvdimm);
+}
+
+static int match_nvdimm_bridge(struct device *dev, const void *data)
+{
+ return strcmp(dev_name(dev), "nvdimm-bridge") == 0;
+}
+
+static struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(void)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&cxl_bus_type, NULL, NULL, match_nvdimm_bridge);
+ if (!dev)
+ return NULL;
+ return to_cxl_nvdimm_bridge(dev);
+}
+
+static int cxl_nvdimm_probe(struct device *dev)
+{
+ struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ unsigned long flags = 0;
+ struct nvdimm *nvdimm;
+ int rc = -ENXIO;
+
+ cxl_nvb = cxl_find_nvdimm_bridge();
+ if (!cxl_nvb)
+ return -ENXIO;
+
+ device_lock(&cxl_nvb->dev);
+ if (!cxl_nvb->nvdimm_bus)
+ goto out;
+
+ set_bit(NDD_LABELING, &flags);
+ nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, 0, 0,
+ NULL);
+ if (!nvdimm)
+ goto out;
+
+ rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
+out:
+ device_unlock(&cxl_nvb->dev);
+ put_device(&cxl_nvb->dev);
+
+ return rc;
+}
+
+static struct cxl_driver cxl_nvdimm_driver = {
+ .name = "cxl_nvdimm",
+ .probe = cxl_nvdimm_probe,
+ .id = CXL_DEVICE_NVDIMM,
+};
+
+static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len, int *cmd_rc)
+{
+ return -ENOTTY;
+}
+
+static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
+{
+ if (cxl_nvb->nvdimm_bus)
+ return true;
+ cxl_nvb->nvdimm_bus =
+ nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc);
+ return cxl_nvb->nvdimm_bus != NULL;
+}
+
+static int cxl_nvdimm_release_driver(struct device *dev, void *data)
+{
+ if (!is_cxl_nvdimm(dev))
+ return 0;
+ device_release_driver(dev);
+ return 0;
+}
+
+static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus)
+{
+ if (!nvdimm_bus)
+ return;
+
+ /*
+ * Set the state of cxl_nvdimm devices to unbound / idle before
+ * nvdimm_bus_unregister() rips the nvdimm objects out from
+ * underneath them.
+ */
+ bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_release_driver);
+ nvdimm_bus_unregister(nvdimm_bus);
+}
+
+static void cxl_nvb_update_state(struct work_struct *work)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb =
+ container_of(work, typeof(*cxl_nvb), state_work);
+ struct nvdimm_bus *victim_bus = NULL;
+ bool release = false, rescan = false;
+
+ device_lock(&cxl_nvb->dev);
+ switch (cxl_nvb->state) {
+ case CXL_NVB_ONLINE:
+ if (!online_nvdimm_bus(cxl_nvb)) {
+ dev_err(&cxl_nvb->dev,
+ "failed to establish nvdimm bus\n");
+ release = true;
+ } else
+ rescan = true;
+ break;
+ case CXL_NVB_OFFLINE:
+ case CXL_NVB_DEAD:
+ victim_bus = cxl_nvb->nvdimm_bus;
+ cxl_nvb->nvdimm_bus = NULL;
+ break;
+ default:
+ break;
+ }
+ device_unlock(&cxl_nvb->dev);
+
+ if (release)
+ device_release_driver(&cxl_nvb->dev);
+ if (rescan) {
+ int rc = bus_rescan_devices(&cxl_bus_type);
+
+ dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc);
+ }
+ offline_nvdimm_bus(victim_bus);
+
+ put_device(&cxl_nvb->dev);
+}
+
+static void cxl_nvdimm_bridge_remove(struct device *dev)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
+
+ if (cxl_nvb->state == CXL_NVB_ONLINE)
+ cxl_nvb->state = CXL_NVB_OFFLINE;
+ if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
+ get_device(&cxl_nvb->dev);
+}
+
+static int cxl_nvdimm_bridge_probe(struct device *dev)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
+
+ if (cxl_nvb->state == CXL_NVB_DEAD)
+ return -ENXIO;
+
+ if (cxl_nvb->state == CXL_NVB_NEW) {
+ cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) {
+ .provider_name = "CXL",
+ .module = THIS_MODULE,
+ .ndctl = cxl_pmem_ctl,
+ };
+
+ INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state);
+ }
+
+ cxl_nvb->state = CXL_NVB_ONLINE;
+ if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
+ get_device(&cxl_nvb->dev);
+
+ return 0;
+}
+
+static struct cxl_driver cxl_nvdimm_bridge_driver = {
+ .name = "cxl_nvdimm_bridge",
+ .probe = cxl_nvdimm_bridge_probe,
+ .remove = cxl_nvdimm_bridge_remove,
+ .id = CXL_DEVICE_NVDIMM_BRIDGE,
+};
+
+static __init int cxl_pmem_init(void)
+{
+ int rc;
+
+ cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0);
+ if (!cxl_pmem_wq)
+ return -ENXIO;
+
+ rc = cxl_driver_register(&cxl_nvdimm_bridge_driver);
+ if (rc)
+ goto err_bridge;
+
+ rc = cxl_driver_register(&cxl_nvdimm_driver);
+ if (rc)
+ goto err_nvdimm;
+
+ return 0;
+
+err_nvdimm:
+ cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
+err_bridge:
+ destroy_workqueue(cxl_pmem_wq);
+ return rc;
+}
+
+static __exit void cxl_pmem_exit(void)
+{
+ cxl_driver_unregister(&cxl_nvdimm_driver);
+ cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
+ destroy_workqueue(cxl_pmem_wq);
+}
+
+MODULE_LICENSE("GPL v2");
+module_init(cxl_pmem_init);
+module_exit(cxl_pmem_exit);
+MODULE_IMPORT_NS(CXL);
+MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
+MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 5fa6ae9dbc8b..44736cbd446e 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -313,7 +313,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
return -ENXIO;
if (nr_pages < 0)
- return nr_pages;
+ return -EINVAL;
avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
kaddr, pfn);
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 20d9bddbb985..394e6e1e9686 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -211,8 +211,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
struct sync_file *sync_file;
- struct dma_fence **fences, **nfences, **a_fences, **b_fences;
- int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
+ struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences;
+ int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc();
if (!sync_file)
@@ -236,7 +236,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* If a sync_file can only be created with sync_file_merge
* and sync_file_create, this is a reasonable assumption.
*/
- for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
+ for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
struct dma_fence *pt_a = a_fences[i_a];
struct dma_fence *pt_b = b_fences[i_b];
@@ -277,15 +277,16 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
fences = nfences;
}
- if (sync_file_set_fence(sync_file, fences, i) < 0) {
- kfree(fences);
+ if (sync_file_set_fence(sync_file, fences, i) < 0)
goto err;
- }
strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
return sync_file;
err:
+ while (i)
+ dma_fence_put(fences[--i]);
+ kfree(fences);
fput(sync_file->file);
return NULL;
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 9a841ce5f0c5..0fe0676f8e1d 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/of_dma.h>
#include "dmaengine.h"
@@ -888,6 +889,13 @@ static int msgdma_probe(struct platform_device *pdev)
if (ret)
goto fail;
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_xlate_by_chan_id, dma_dev);
+ if (ret == -EINVAL)
+ dev_warn(&pdev->dev, "device was not probed from DT");
+ else if (ret && ret != -ENODEV)
+ goto fail;
+
dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
return 0;
@@ -908,6 +916,8 @@ static int msgdma_remove(struct platform_device *pdev)
{
struct msgdma_device *mdev = platform_get_drvdata(pdev);
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&mdev->dmadev);
msgdma_dev_remove(mdev);
@@ -916,9 +926,19 @@ static int msgdma_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id msgdma_match[] = {
+ { .compatible = "altr,socfpga-msgdma", },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, msgdma_match);
+#endif
+
static struct platform_driver msgdma_driver = {
.driver = {
.name = "altera-msgdma",
+ .of_match_table = of_match_ptr(msgdma_match),
},
.probe = msgdma_probe,
.remove = msgdma_remove,
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index ed2ab46b15e7..045ead46ec8f 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -1235,7 +1235,11 @@ static int fsl_qdma_probe(struct platform_device *pdev)
fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_mask failure.\n");
+ return ret;
+ }
platform_set_drvdata(pdev, fsl_qdma);
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 025d8ad5a63c..92caae55aece 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -201,6 +201,7 @@ EXPORT_SYMBOL_GPL(hsu_dma_get_status);
*/
int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
{
+ struct dma_chan_percpu *stat;
struct hsu_dma_chan *hsuc;
struct hsu_dma_desc *desc;
unsigned long flags;
@@ -210,6 +211,7 @@ int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
return 0;
hsuc = &chip->hsu->chan[nr];
+ stat = this_cpu_ptr(hsuc->vchan.chan.local);
spin_lock_irqsave(&hsuc->vchan.lock, flags);
desc = hsuc->desc;
@@ -221,6 +223,7 @@ int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
} else {
vchan_cookie_complete(&desc->vdesc);
desc->status = DMA_COMPLETE;
+ stat->bytes_transferred += desc->length;
hsu_dma_start_transfer(hsuc);
}
}
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index d4419bf1fede..e9def577c697 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -296,9 +296,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
void idxd_wq_del_cdev(struct idxd_wq *wq)
{
struct idxd_cdev *idxd_cdev;
- struct idxd_cdev_context *cdev_ctx;
- cdev_ctx = &ictx[wq->idxd->data->type];
idxd_cdev = wq->idxd_cdev;
wq->idxd_cdev = NULL;
cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 26482c7d4c3a..fc708be7ad9a 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -294,6 +294,14 @@ struct idxd_desc {
struct idxd_wq *wq;
};
+/*
+ * This is software defined error for the completion status. We overload the error code
+ * that will never appear in completion status and only SWERR register.
+ */
+enum idxd_completion_status {
+ IDXD_COMP_DESC_ABORT = 0xff,
+};
+
#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
@@ -482,4 +490,10 @@ static inline void perfmon_init(void) {}
static inline void perfmon_exit(void) {}
#endif
+static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
+{
+ idxd_dma_complete_txd(desc, reason);
+ idxd_free_desc(desc->wq, desc);
+}
+
#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 442d55c11a5f..c0f4c0422f32 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -102,6 +102,8 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
spin_lock_init(&idxd->irq_entries[i].list_lock);
}
+ idxd_msix_perm_setup(idxd);
+
irq_entry = &idxd->irq_entries[0];
rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
0, "idxd-misc", irq_entry);
@@ -148,7 +150,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
}
idxd_unmask_error_interrupts(idxd);
- idxd_msix_perm_setup(idxd);
return 0;
err_wq_irqs:
@@ -162,6 +163,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
err_misc_irq:
/* Disable error interrupt generation */
idxd_mask_error_interrupts(idxd);
+ idxd_msix_perm_clear(idxd);
err_irq_entries:
pci_free_irq_vectors(pdev);
dev_err(dev, "No usable interrupts\n");
@@ -351,7 +353,8 @@ static int idxd_setup_internals(struct idxd_device *idxd)
init_waitqueue_head(&idxd->cmd_waitq);
if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
- idxd->int_handles = devm_kcalloc(dev, idxd->max_wqs, sizeof(int), GFP_KERNEL);
+ idxd->int_handles = kcalloc_node(idxd->max_wqs, sizeof(int), GFP_KERNEL,
+ dev_to_node(dev));
if (!idxd->int_handles)
return -ENOMEM;
}
@@ -757,32 +760,40 @@ static void idxd_shutdown(struct pci_dev *pdev)
for (i = 0; i < msixcnt; i++) {
irq_entry = &idxd->irq_entries[i];
synchronize_irq(irq_entry->vector);
- free_irq(irq_entry->vector, irq_entry);
if (i == 0)
continue;
idxd_flush_pending_llist(irq_entry);
idxd_flush_work_list(irq_entry);
}
-
- idxd_msix_perm_clear(idxd);
- idxd_release_int_handles(idxd);
- pci_free_irq_vectors(pdev);
- pci_iounmap(pdev, idxd->reg_base);
- pci_disable_device(pdev);
- destroy_workqueue(idxd->wq);
+ flush_workqueue(idxd->wq);
}
static void idxd_remove(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
+ struct idxd_irq_entry *irq_entry;
+ int msixcnt = pci_msix_vec_count(pdev);
+ int i;
dev_dbg(&pdev->dev, "%s called\n", __func__);
idxd_shutdown(pdev);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
idxd_unregister_devices(idxd);
- perfmon_pmu_remove(idxd);
+
+ for (i = 0; i < msixcnt; i++) {
+ irq_entry = &idxd->irq_entries[i];
+ free_irq(irq_entry->vector, irq_entry);
+ }
+ idxd_msix_perm_clear(idxd);
+ idxd_release_int_handles(idxd);
+ pci_free_irq_vectors(pdev);
+ pci_iounmap(pdev, idxd->reg_base);
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ pci_disable_device(pdev);
+ destroy_workqueue(idxd->wq);
+ perfmon_pmu_remove(idxd);
+ device_unregister(&idxd->conf_dev);
}
static struct pci_driver idxd_pci_driver = {
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index ae68e1e5487a..4e3a7198c0ca 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -245,12 +245,6 @@ static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
return false;
}
-static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
-{
- idxd_dma_complete_txd(desc, reason);
- idxd_free_desc(desc->wq, desc);
-}
-
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
enum irq_work_type wtype,
int *processed, u64 data)
@@ -272,8 +266,16 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
reason = IDXD_COMPLETE_DEV_FAIL;
llist_for_each_entry_safe(desc, t, head, llnode) {
- if (desc->completion->status) {
- if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+ u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
+
+ if (status) {
+ if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+ complete_desc(desc, IDXD_COMPLETE_ABORT);
+ (*processed)++;
+ continue;
+ }
+
+ if (unlikely(status != DSA_COMP_SUCCESS))
match_fault(desc, data);
complete_desc(desc, reason);
(*processed)++;
@@ -329,7 +331,14 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
list_for_each_entry(desc, &flist, list) {
- if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+ u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
+
+ if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+ complete_desc(desc, IDXD_COMPLETE_ABORT);
+ continue;
+ }
+
+ if (unlikely(status != DSA_COMP_SUCCESS))
match_fault(desc, data);
complete_desc(desc, reason);
}
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 19afb62abaff..36c9c1a89b7e 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -25,11 +25,10 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
* Descriptor completion vectors are 1...N for MSIX. We will round
* robin through the N vectors.
*/
- wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+ wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
if (!idxd->int_handles) {
desc->hw->int_handle = wq->vec_ptr;
} else {
- desc->vector = wq->vec_ptr;
/*
* int_handles are only for descriptor completion. However for device
* MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
@@ -88,9 +87,64 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
}
+static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+ struct idxd_desc *desc)
+{
+ struct idxd_desc *d, *n;
+
+ lockdep_assert_held(&ie->list_lock);
+ list_for_each_entry_safe(d, n, &ie->work_list, list) {
+ if (d == desc) {
+ list_del(&d->list);
+ return d;
+ }
+ }
+
+ /*
+ * At this point, the desc needs to be aborted is held by the completion
+ * handler where it has taken it off the pending list but has not added to the
+ * work list. It will be cleaned up by the interrupt handler when it sees the
+ * IDXD_COMP_DESC_ABORT for completion status.
+ */
+ return NULL;
+}
+
+static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+ struct idxd_desc *desc)
+{
+ struct idxd_desc *d, *t, *found = NULL;
+ struct llist_node *head;
+ unsigned long flags;
+
+ desc->completion->status = IDXD_COMP_DESC_ABORT;
+ /*
+ * Grab the list lock so it will block the irq thread handler. This allows the
+ * abort code to locate the descriptor need to be aborted.
+ */
+ spin_lock_irqsave(&ie->list_lock, flags);
+ head = llist_del_all(&ie->pending_llist);
+ if (head) {
+ llist_for_each_entry_safe(d, t, head, llnode) {
+ if (d == desc) {
+ found = desc;
+ continue;
+ }
+ list_add_tail(&desc->list, &ie->work_list);
+ }
+ }
+
+ if (!found)
+ found = list_abort_desc(wq, ie, desc);
+ spin_unlock_irqrestore(&ie->list_lock, flags);
+
+ if (found)
+ complete_desc(found, IDXD_COMPLETE_ABORT);
+}
+
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
struct idxd_device *idxd = wq->idxd;
+ struct idxd_irq_entry *ie = NULL;
void __iomem *portal;
int rc;
@@ -108,6 +162,16 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* even on UP because the recipient is a device.
*/
wmb();
+
+ /*
+ * Pending the descriptor to the lockless list for the irq_entry
+ * that we designated the descriptor to.
+ */
+ if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
+ ie = &idxd->irq_entries[desc->vector];
+ llist_add(&desc->llnode, &ie->pending_llist);
+ }
+
if (wq_dedicated(wq)) {
iosubmit_cmds512(portal, desc->hw, 1);
} else {
@@ -118,29 +182,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* device is not accepting descriptor at all.
*/
rc = enqcmds(portal, desc->hw);
- if (rc < 0)
+ if (rc < 0) {
+ if (ie)
+ llist_abort_desc(wq, ie, desc);
return rc;
+ }
}
percpu_ref_put(&wq->wq_active);
-
- /*
- * Pending the descriptor to the lockless list for the irq_entry
- * that we designated the descriptor to.
- */
- if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
- int vec;
-
- /*
- * If the driver is on host kernel, it would be the value
- * assigned to interrupt handle, which is index for MSIX
- * vector. If it's guest then can't use the int_handle since
- * that is the index to IMS for the entire device. The guest
- * device local index will be used.
- */
- vec = !idxd->int_handles ? desc->hw->int_handle : desc->vector;
- llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist);
- }
-
return 0;
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 0460d58e3941..bb4df63906a7 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1744,8 +1744,6 @@ void idxd_unregister_devices(struct idxd_device *idxd)
device_unregister(&group->conf_dev);
}
-
- device_unregister(&idxd->conf_dev);
}
int idxd_register_bus_type(void)
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 7f116bbcfad2..2ddc31e64db0 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -812,6 +812,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
dma_length += sg_dma_len(sg);
}
+ imxdma_config_write(chan, &imxdmac->config, direction);
+
switch (imxdmac->word_size) {
case DMA_SLAVE_BUSWIDTH_4_BYTES:
if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d5590c08db51..8070fd664bfc 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -35,7 +35,6 @@
#include <linux/workqueue.h>
#include <asm/irq.h>
-#include <linux/platform_data/dma-imx-sdma.h>
#include <linux/platform_data/dma-imx.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
@@ -181,6 +180,61 @@
BIT(DMA_MEM_TO_DEV) | \
BIT(DMA_DEV_TO_DEV))
+/**
+ * struct sdma_script_start_addrs - SDMA script start pointers
+ *
+ * start addresses of the different functions in the physical
+ * address space of the SDMA engine.
+ */
+struct sdma_script_start_addrs {
+ s32 ap_2_ap_addr;
+ s32 ap_2_bp_addr;
+ s32 ap_2_ap_fixed_addr;
+ s32 bp_2_ap_addr;
+ s32 loopback_on_dsp_side_addr;
+ s32 mcu_interrupt_only_addr;
+ s32 firi_2_per_addr;
+ s32 firi_2_mcu_addr;
+ s32 per_2_firi_addr;
+ s32 mcu_2_firi_addr;
+ s32 uart_2_per_addr;
+ s32 uart_2_mcu_addr;
+ s32 per_2_app_addr;
+ s32 mcu_2_app_addr;
+ s32 per_2_per_addr;
+ s32 uartsh_2_per_addr;
+ s32 uartsh_2_mcu_addr;
+ s32 per_2_shp_addr;
+ s32 mcu_2_shp_addr;
+ s32 ata_2_mcu_addr;
+ s32 mcu_2_ata_addr;
+ s32 app_2_per_addr;
+ s32 app_2_mcu_addr;
+ s32 shp_2_per_addr;
+ s32 shp_2_mcu_addr;
+ s32 mshc_2_mcu_addr;
+ s32 mcu_2_mshc_addr;
+ s32 spdif_2_mcu_addr;
+ s32 mcu_2_spdif_addr;
+ s32 asrc_2_mcu_addr;
+ s32 ext_mem_2_ipu_addr;
+ s32 descrambler_addr;
+ s32 dptc_dvfs_addr;
+ s32 utra_addr;
+ s32 ram_code_start_addr;
+ /* End of v1 array */
+ s32 mcu_2_ssish_addr;
+ s32 ssish_2_mcu_addr;
+ s32 hdmi_dma_addr;
+ /* End of v2 array */
+ s32 zcanfd_2_mcu_addr;
+ s32 zqspi_2_mcu_addr;
+ s32 mcu_2_ecspi_addr;
+ /* End of v3 array */
+ s32 mcu_2_zqspi_addr;
+ /* End of v4 array */
+};
+
/*
* Mode/Count of data node descriptors - IPCv2
*/
@@ -1829,7 +1883,7 @@ static int sdma_get_firmware(struct sdma_engine *sdma,
int ret;
ret = request_firmware_nowait(THIS_MODULE,
- FW_ACTION_HOTPLUG, fw_name, sdma->dev,
+ FW_ACTION_UEVENT, fw_name, sdma->dev,
GFP_KERNEL, sdma, sdma_load_firmware);
return ret;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 104ad420abbe..baab1ca9f621 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -618,6 +618,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
case IDMAC_SDC_1:
case IDMAC_IC_7:
ipu_channel_set_priority(ipu, channel, true);
+ break;
default:
break;
}
@@ -978,6 +979,7 @@ static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
case IDMAC_SDC_0:
case IDMAC_SDC_1:
n_desc = 4;
+ break;
default:
break;
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index c1a69149c8bf..4a51fdbf5aa9 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -813,6 +813,7 @@ inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
case 16:
if (is_mpc8308)
return false;
+ break;
case 1:
case 2:
case 4:
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index ec00b20ae8e4..ac61ecda2926 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -67,8 +67,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
return NULL;
ofdma_target = of_dma_find_controller(&dma_spec_target);
- if (!ofdma_target)
- return NULL;
+ if (!ofdma_target) {
+ ofdma->dma_router->route_free(ofdma->dma_router->dev,
+ route_data);
+ chan = ERR_PTR(-EPROBE_DEFER);
+ goto err;
+ }
chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
if (IS_ERR_OR_NULL(chan)) {
@@ -89,6 +93,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
}
}
+err:
/*
* Need to put the node back since the ofdma->of_dma_route_allocate
* has taken it for generating the new, translated dma_spec
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 43ac3ab23d4c..1a1b7d8458c9 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -2282,6 +2282,7 @@ static int gpi_probe(struct platform_device *pdev)
static const struct of_device_id gpi_of_match[] = {
{ .compatible = "qcom,sdm845-gpi-dma" },
{ .compatible = "qcom,sm8150-gpi-dma" },
+ { .compatible = "qcom,sm8250-gpi-dma" },
{ },
};
MODULE_DEVICE_TABLE(of, gpi_of_match);
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index c4c4e8575764..f12606aeff87 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -94,6 +94,7 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
{
struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
struct sf_pdma_desc *desc;
+ unsigned long iflags;
if (chan && (!len || !dest || !src)) {
dev_err(chan->pdma->dma_dev.dev,
@@ -109,10 +110,10 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
desc->dirn = DMA_MEM_TO_MEM;
desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->vchan.lock, iflags);
chan->desc = desc;
sf_pdma_fill_desc(desc, dest, src, len);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock_irqrestore(&chan->vchan.lock, iflags);
return desc->async_tx;
}
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 112fbd22bb3f..abdf10341725 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -3,7 +3,7 @@
# DMA Engine Helpers
#
-obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
+obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o
#
# DMA Controllers
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
deleted file mode 100644
index be89dd894328..000000000000
--- a/drivers/dma/sh/shdma-of.c
+++ /dev/null
@@ -1,76 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SHDMA Device Tree glue
- *
- * Copyright (C) 2013 Renesas Electronics Inc.
- * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- */
-
-#include <linux/dmaengine.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_dma.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/shdma-base.h>
-
-#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
-
-static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- u32 id = dma_spec->args[0];
- dma_cap_mask_t mask;
- struct dma_chan *chan;
-
- if (dma_spec->args_count != 1)
- return NULL;
-
- dma_cap_zero(mask);
- /* Only slave DMA channels can be allocated via DT */
- dma_cap_set(DMA_SLAVE, mask);
-
- chan = dma_request_channel(mask, shdma_chan_filter,
- (void *)(uintptr_t)id);
- if (chan)
- to_shdma_chan(chan)->hw_req = id;
-
- return chan;
-}
-
-static int shdma_of_probe(struct platform_device *pdev)
-{
- const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
- int ret;
-
- ret = of_dma_controller_register(pdev->dev.of_node,
- shdma_of_xlate, pdev);
- if (ret < 0)
- return ret;
-
- ret = of_platform_populate(pdev->dev.of_node, NULL, lookup, &pdev->dev);
- if (ret < 0)
- of_dma_controller_free(pdev->dev.of_node);
-
- return ret;
-}
-
-static const struct of_device_id shdma_of_match[] = {
- { .compatible = "renesas,shdma-mux", },
- { }
-};
-MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
-
-static struct platform_driver shdma_of = {
- .driver = {
- .name = "shdma-of",
- .of_match_table = shdma_of_match,
- },
- .probe = shdma_of_probe,
-};
-
-module_platform_driver(shdma_of);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("SH-DMA driver DT glue");
-MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 8f7ceb698226..1cc06900153e 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -855,8 +855,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
error:
of_dma_controller_free(pdev->dev.of_node);
- pm_runtime_put(&pdev->dev);
error_pm:
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index f54ecb123a52..7dd1d3d0bf06 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1200,7 +1200,7 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
chan->config_init = false;
- ret = pm_runtime_get_sync(dmadev->ddev.dev);
+ ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
if (ret < 0)
return ret;
@@ -1470,7 +1470,7 @@ static int stm32_dma_suspend(struct device *dev)
struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
int id, ret, scr;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index ef0d0555103d..a42164389ebc 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -137,7 +137,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
/* Set dma request */
spin_lock_irqsave(&dmamux->lock, flags);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
spin_unlock_irqrestore(&dmamux->lock, flags);
goto error;
@@ -336,7 +336,7 @@ static int stm32_dmamux_suspend(struct device *dev)
struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
int i, ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
@@ -361,7 +361,7 @@ static int stm32_dmamux_resume(struct device *dev)
if (ret < 0)
return ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index e8b6633ae661..93f1645ae928 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1042,9 +1042,8 @@ handle_pending:
* Move the promise into the completed list now that
* we're done with it
*/
- list_del(&vchan->processing->list);
- list_add_tail(&vchan->processing->list,
- &contract->completed_demands);
+ list_move_tail(&vchan->processing->list,
+ &contract->completed_demands);
/*
* Cyclic DMA transfers are special:
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 96ad21869ba7..a35858610780 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4948,6 +4948,7 @@ static int setup_resources(struct udma_dev *ud)
ud->tchan_cnt),
ud->rchan_cnt - bitmap_weight(ud->rchan_map,
ud->rchan_cnt));
+ break;
default:
break;
}
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 268a08058714..7cb577e6587b 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1608,7 +1608,8 @@ static int omap_dma_context_notifier(struct notifier_block *nb,
return NOTIFY_BAD;
omap_dma_context_save(od);
break;
- case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
+ break;
case CPU_CLUSTER_PM_EXIT:
omap_dma_context_restore(od);
break;
diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c
index 16b19654873d..d6b8a202474f 100644
--- a/drivers/dma/uniphier-xdmac.c
+++ b/drivers/dma/uniphier-xdmac.c
@@ -209,8 +209,8 @@ static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
writel(0, xc->reg_ch_base + XDMAC_TSS);
/* wait until transfer is stopped */
- return readl_poll_timeout(xc->reg_ch_base + XDMAC_STAT, val,
- !(val & XDMAC_STAT_TENF), 100, 1000);
+ return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val,
+ !(val & XDMAC_STAT_TENF), 100, 1000);
}
/* xc->vc.lock must be held by caller */
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 75c0b8e904e5..4b9530a7bf65 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -394,6 +394,7 @@ struct xilinx_dma_tx_descriptor {
* @genlock: Support genlock mode
* @err: Channel has errors
* @idle: Check for channel idle
+ * @terminating: Check for channel being synchronized by user
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
@@ -431,6 +432,7 @@ struct xilinx_dma_chan {
bool genlock;
bool err;
bool idle;
+ bool terminating;
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
@@ -1049,6 +1051,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
/* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx);
xilinx_dma_free_tx_descriptor(chan, desc);
+
+ /*
+ * While we ran a callback the user called a terminate function,
+ * which takes care of cleaning up any remaining descriptors
+ */
+ if (chan->terminating)
+ break;
}
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1965,6 +1974,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
if (desc->cyclic)
chan->cyclic = true;
+ chan->terminating = false;
+
spin_unlock_irqrestore(&chan->lock, flags);
return cookie;
@@ -2436,6 +2447,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
xilinx_dma_chan_reset(chan);
/* Remove and free all of the descriptors in the lists */
+ chan->terminating = true;
xilinx_dma_free_descriptors(chan);
chan->idle = true;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 6c709803203a..b280a53e8570 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -531,7 +531,7 @@ static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev,
for (i = 1; i < num_src_addr; i++) {
u32 *addr = &hw_desc->src_addr2;
- addr[i-1] = lower_32_bits(dma_addr[i]);
+ addr[i - 1] = lower_32_bits(dma_addr[i]);
if (xdev->ext_addr) {
u32 *addr_ext = &hw_desc->addr_ext_23;
@@ -703,8 +703,9 @@ xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan,
size_t stride = hsize + xt->sgl[0].icg;
if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
- dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n",
- XILINX_DPDMA_ALIGN_BYTES);
+ dev_err(chan->xdev->dev,
+ "chan%u: buffer should be aligned at %d B\n",
+ chan->id, XILINX_DPDMA_ALIGN_BYTES);
return NULL;
}
@@ -917,7 +918,7 @@ static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
}
/**
- * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event
+ * xilinx_dpdma_chan_notify_no_ostand - Notify no outstanding transaction event
* @chan: DPDMA channel
*
* Notify waiters for no outstanding event, so waiters can stop the channel
@@ -936,7 +937,9 @@ static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
cnt = xilinx_dpdma_chan_ostand(chan);
if (cnt) {
- dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt);
+ dev_dbg(chan->xdev->dev,
+ "chan%u: %d outstanding transactions\n",
+ chan->id, cnt);
return -EWOULDBLOCK;
}
@@ -972,8 +975,8 @@ static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
return 0;
}
- dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
- xilinx_dpdma_chan_ostand(chan));
+ dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n",
+ chan->id, xilinx_dpdma_chan_ostand(chan));
if (ret == 0)
return -ETIMEDOUT;
@@ -1007,8 +1010,8 @@ static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
return 0;
}
- dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
- xilinx_dpdma_chan_ostand(chan));
+ dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n",
+ chan->id, xilinx_dpdma_chan_ostand(chan));
return -ETIMEDOUT;
}
@@ -1062,7 +1065,8 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
vchan_cyclic_callback(&active->vdesc);
else
dev_warn(chan->xdev->dev,
- "DONE IRQ with no active descriptor!\n");
+ "chan%u: DONE IRQ with no active descriptor!\n",
+ chan->id);
spin_unlock_irqrestore(&chan->lock, flags);
}
@@ -1094,8 +1098,12 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
/* If the retrigger raced with vsync, retry at the next frame. */
sw_desc = list_first_entry(&pending->descriptors,
struct xilinx_dpdma_sw_desc, node);
- if (sw_desc->hw.desc_id != desc_id)
+ if (sw_desc->hw.desc_id != desc_id) {
+ dev_dbg(chan->xdev->dev,
+ "chan%u: vsync race lost (%u != %u), retrying\n",
+ chan->id, sw_desc->hw.desc_id, desc_id);
goto out;
+ }
/*
* Complete the active descriptor, if any, promote the pending
@@ -1151,10 +1159,12 @@ static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
- dev_dbg(xdev->dev, "cur desc addr = 0x%04x%08x\n",
+ dev_dbg(xdev->dev, "chan%u: cur desc addr = 0x%04x%08x\n",
+ chan->id,
dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
- dev_dbg(xdev->dev, "cur payload addr = 0x%04x%08x\n",
+ dev_dbg(xdev->dev, "chan%u: cur payload addr = 0x%04x%08x\n",
+ chan->id,
dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
@@ -1170,7 +1180,8 @@ static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
xilinx_dpdma_chan_dump_tx_desc(chan, active);
if (active->error)
- dev_dbg(xdev->dev, "repeated error on desc\n");
+ dev_dbg(xdev->dev, "chan%u: repeated error on desc\n",
+ chan->id);
/* Reschedule if there's no new descriptor */
if (!chan->desc.pending &&
@@ -1235,7 +1246,8 @@ static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
align, 0);
if (!chan->desc_pool) {
dev_err(chan->xdev->dev,
- "failed to allocate a descriptor pool\n");
+ "chan%u: failed to allocate a descriptor pool\n",
+ chan->id);
return -ENOMEM;
}
@@ -1588,7 +1600,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
- uint32_t chan_id = dma_spec->args[0];
+ u32 chan_id = dma_spec->args[0];
if (chan_id >= ARRAY_SIZE(xdev->chan))
return NULL;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 91164c5f0757..2fc4c3f91fd5 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -271,7 +271,7 @@ config EDAC_PND2
config EDAC_IGEN6
tristate "Intel client SoC Integrated MC"
depends on PCI && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG
- depends on X64_64 && X86_MCE_INTEL
+ depends on X86_64 && X86_MCE_INTEL
help
Support for error detection and correction on the Intel
client SoC Integrated Memory Controller using In-Band ECC IP.
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 5f7fd79ec82f..61c21bd880a4 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -20,6 +20,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index d9a16ba2ccc2..65bffde137e3 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -155,34 +155,29 @@ void eisa_driver_unregister(struct eisa_driver *edrv)
}
EXPORT_SYMBOL(eisa_driver_unregister);
-static ssize_t eisa_show_sig(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t signature_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct eisa_device *edev = to_eisa_device(dev);
return sprintf(buf, "%s\n", edev->id.sig);
}
+static DEVICE_ATTR_RO(signature);
-static DEVICE_ATTR(signature, S_IRUGO, eisa_show_sig, NULL);
-
-static ssize_t eisa_show_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct eisa_device *edev = to_eisa_device(dev);
return sprintf(buf, "%d\n", edev->state & EISA_CONFIG_ENABLED);
}
+static DEVICE_ATTR_RO(enabled);
-static DEVICE_ATTR(enabled, S_IRUGO, eisa_show_state, NULL);
-
-static ssize_t eisa_show_modalias(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct eisa_device *edev = to_eisa_device(dev);
return sprintf(buf, EISA_DEVICE_MODALIAS_FMT "\n", edev->id.sig);
}
-
-static DEVICE_ATTR(modalias, S_IRUGO, eisa_show_modalias, NULL);
+static DEVICE_ATTR_RO(modalias);
static int __init eisa_init_device(struct eisa_root_device *root,
struct eisa_device *edev,
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index e3db936becfd..c69d40ae5619 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -154,7 +154,7 @@ config EXTCON_RT8973A
from abnormal high input voltage (up to 28V).
config EXTCON_SM5502
- tristate "Silicon Mitus SM5502 EXTCON support"
+ tristate "Silicon Mitus SM5502/SM5504 EXTCON support"
depends on I2C
select IRQ_DOMAIN
select REGMAP_I2C
diff --git a/drivers/extcon/extcon-intel-mrfld.c b/drivers/extcon/extcon-intel-mrfld.c
index f47016fb28a8..cd1a5f230077 100644
--- a/drivers/extcon/extcon-intel-mrfld.c
+++ b/drivers/extcon/extcon-intel-mrfld.c
@@ -197,6 +197,7 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
struct regmap *regmap = pmic->regmap;
struct mrfld_extcon_data *data;
+ unsigned int status;
unsigned int id;
int irq, ret;
@@ -244,6 +245,14 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
/* Get initial state */
mrfld_extcon_role_detect(data);
+ /*
+ * Cached status value is used for cable detection, see comments
+ * in mrfld_extcon_cable_detect(), we need to sync cached value
+ * with a real state of the hardware.
+ */
+ regmap_read(regmap, BCOVE_SCHGRIRQ1, &status);
+ data->status = status;
+
mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
mrfld_extcon_clear(data, BCOVE_MCHGRIRQ1, BCOVE_CHGRIRQ_ALL);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index bbc592823570..9cddf08e0696 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -773,3 +773,4 @@ module_platform_driver(max8997_muic_driver);
MODULE_DESCRIPTION("Maxim MAX8997 Extcon driver");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:max8997-muic");
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index db41d1c58efd..93da2d8379b1 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -40,17 +40,13 @@ struct sm5502_muic_info {
struct i2c_client *i2c;
struct regmap *regmap;
+ const struct sm5502_type *type;
struct regmap_irq_chip_data *irq_data;
- struct muic_irq *muic_irqs;
- unsigned int num_muic_irqs;
int irq;
bool irq_attach;
bool irq_detach;
struct work_struct irq_work;
- struct reg_data *reg_data;
- unsigned int num_reg_data;
-
struct mutex mutex;
/*
@@ -62,6 +58,18 @@ struct sm5502_muic_info {
struct delayed_work wq_detcable;
};
+struct sm5502_type {
+ struct muic_irq *muic_irqs;
+ unsigned int num_muic_irqs;
+ const struct regmap_irq_chip *irq_chip;
+
+ struct reg_data *reg_data;
+ unsigned int num_reg_data;
+
+ unsigned int otg_dev_type1;
+ int (*parse_irq)(struct sm5502_muic_info *info, int irq_type);
+};
+
/* Default value of SM5502 register to bring up MUIC device. */
static struct reg_data sm5502_reg_data[] = {
{
@@ -88,7 +96,33 @@ static struct reg_data sm5502_reg_data[] = {
| SM5502_REG_INTM2_MHL_MASK,
.invert = true,
},
- { }
+};
+
+/* Default value of SM5504 register to bring up MUIC device. */
+static struct reg_data sm5504_reg_data[] = {
+ {
+ .reg = SM5502_REG_RESET,
+ .val = SM5502_REG_RESET_MASK,
+ .invert = true,
+ }, {
+ .reg = SM5502_REG_INTMASK1,
+ .val = SM5504_REG_INTM1_ATTACH_MASK
+ | SM5504_REG_INTM1_DETACH_MASK,
+ .invert = false,
+ }, {
+ .reg = SM5502_REG_INTMASK2,
+ .val = SM5504_REG_INTM2_RID_CHG_MASK
+ | SM5504_REG_INTM2_UVLO_MASK
+ | SM5504_REG_INTM2_POR_MASK,
+ .invert = true,
+ }, {
+ .reg = SM5502_REG_CONTROL,
+ .val = SM5502_REG_CONTROL_MANUAL_SW_MASK
+ | SM5504_REG_CONTROL_CHGTYP_MASK
+ | SM5504_REG_CONTROL_USBCHDEN_MASK
+ | SM5504_REG_CONTROL_ADC_EN_MASK,
+ .invert = true,
+ },
};
/* List of detectable cables */
@@ -199,6 +233,55 @@ static const struct regmap_irq_chip sm5502_muic_irq_chip = {
.num_irqs = ARRAY_SIZE(sm5502_irqs),
};
+/* List of supported interrupt for SM5504 */
+static struct muic_irq sm5504_muic_irqs[] = {
+ { SM5504_IRQ_INT1_ATTACH, "muic-attach" },
+ { SM5504_IRQ_INT1_DETACH, "muic-detach" },
+ { SM5504_IRQ_INT1_CHG_DET, "muic-chg-det" },
+ { SM5504_IRQ_INT1_DCD_OUT, "muic-dcd-out" },
+ { SM5504_IRQ_INT1_OVP_EVENT, "muic-ovp-event" },
+ { SM5504_IRQ_INT1_CONNECT, "muic-connect" },
+ { SM5504_IRQ_INT1_ADC_CHG, "muic-adc-chg" },
+ { SM5504_IRQ_INT2_RID_CHG, "muic-rid-chg" },
+ { SM5504_IRQ_INT2_UVLO, "muic-uvlo" },
+ { SM5504_IRQ_INT2_POR, "muic-por" },
+ { SM5504_IRQ_INT2_OVP_FET, "muic-ovp-fet" },
+ { SM5504_IRQ_INT2_OCP_LATCH, "muic-ocp-latch" },
+ { SM5504_IRQ_INT2_OCP_EVENT, "muic-ocp-event" },
+ { SM5504_IRQ_INT2_OVP_OCP_EVENT, "muic-ovp-ocp-event" },
+};
+
+/* Define interrupt list of SM5504 to register regmap_irq */
+static const struct regmap_irq sm5504_irqs[] = {
+ /* INT1 interrupts */
+ { .reg_offset = 0, .mask = SM5504_IRQ_INT1_ATTACH_MASK, },
+ { .reg_offset = 0, .mask = SM5504_IRQ_INT1_DETACH_MASK, },
+ { .reg_offset = 0, .mask = SM5504_IRQ_INT1_CHG_DET_MASK, },
+ { .reg_offset = 0, .mask = SM5504_IRQ_INT1_DCD_OUT_MASK, },
+ { .reg_offset = 0, .mask = SM5504_IRQ_INT1_OVP_MASK, },
+ { .reg_offset = 0, .mask = SM5504_IRQ_INT1_CONNECT_MASK, },
+ { .reg_offset = 0, .mask = SM5504_IRQ_INT1_ADC_CHG_MASK, },
+
+ /* INT2 interrupts */
+ { .reg_offset = 1, .mask = SM5504_IRQ_INT2_RID_CHG_MASK,},
+ { .reg_offset = 1, .mask = SM5504_IRQ_INT2_UVLO_MASK, },
+ { .reg_offset = 1, .mask = SM5504_IRQ_INT2_POR_MASK, },
+ { .reg_offset = 1, .mask = SM5504_IRQ_INT2_OVP_FET_MASK, },
+ { .reg_offset = 1, .mask = SM5504_IRQ_INT2_OCP_LATCH_MASK, },
+ { .reg_offset = 1, .mask = SM5504_IRQ_INT2_OCP_EVENT_MASK, },
+ { .reg_offset = 1, .mask = SM5504_IRQ_INT2_OVP_OCP_EVENT_MASK, },
+};
+
+static const struct regmap_irq_chip sm5504_muic_irq_chip = {
+ .name = "sm5504",
+ .status_base = SM5502_REG_INT1,
+ .mask_base = SM5502_REG_INTMASK1,
+ .mask_invert = false,
+ .num_regs = 2,
+ .irqs = sm5504_irqs,
+ .num_irqs = ARRAY_SIZE(sm5504_irqs),
+};
+
/* Define regmap configuration of SM5502 for I2C communication */
static bool sm5502_muic_volatile_reg(struct device *dev, unsigned int reg)
{
@@ -302,11 +385,9 @@ static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
return ret;
}
- switch (dev_type1) {
- case SM5502_REG_DEV_TYPE1_USB_OTG_MASK:
+ if (dev_type1 == info->type->otg_dev_type1) {
cable_type = SM5502_MUIC_ADC_GROUND_USB_OTG;
- break;
- default:
+ } else {
dev_dbg(info->dev,
"cannot identify the cable type: adc(0x%x), dev_type1(0x%x)\n",
adc, dev_type1);
@@ -359,6 +440,11 @@ static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
return ret;
}
+ if (dev_type1 == info->type->otg_dev_type1) {
+ cable_type = SM5502_MUIC_ADC_OPEN_USB_OTG;
+ break;
+ }
+
switch (dev_type1) {
case SM5502_REG_DEV_TYPE1_USB_SDP_MASK:
cable_type = SM5502_MUIC_ADC_OPEN_USB;
@@ -366,9 +452,6 @@ static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
case SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK:
cable_type = SM5502_MUIC_ADC_OPEN_TA;
break;
- case SM5502_REG_DEV_TYPE1_USB_OTG_MASK:
- cable_type = SM5502_MUIC_ADC_OPEN_USB_OTG;
- break;
default:
dev_dbg(info->dev,
"cannot identify the cable type: adc(0x%x)\n",
@@ -498,16 +581,44 @@ static int sm5502_parse_irq(struct sm5502_muic_info *info, int irq_type)
return 0;
}
+static int sm5504_parse_irq(struct sm5502_muic_info *info, int irq_type)
+{
+ switch (irq_type) {
+ case SM5504_IRQ_INT1_ATTACH:
+ info->irq_attach = true;
+ break;
+ case SM5504_IRQ_INT1_DETACH:
+ info->irq_detach = true;
+ break;
+ case SM5504_IRQ_INT1_CHG_DET:
+ case SM5504_IRQ_INT1_DCD_OUT:
+ case SM5504_IRQ_INT1_OVP_EVENT:
+ case SM5504_IRQ_INT1_CONNECT:
+ case SM5504_IRQ_INT1_ADC_CHG:
+ case SM5504_IRQ_INT2_RID_CHG:
+ case SM5504_IRQ_INT2_UVLO:
+ case SM5504_IRQ_INT2_POR:
+ case SM5504_IRQ_INT2_OVP_FET:
+ case SM5504_IRQ_INT2_OCP_LATCH:
+ case SM5504_IRQ_INT2_OCP_EVENT:
+ case SM5504_IRQ_INT2_OVP_OCP_EVENT:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static irqreturn_t sm5502_muic_irq_handler(int irq, void *data)
{
struct sm5502_muic_info *info = data;
int i, irq_type = -1, ret;
- for (i = 0; i < info->num_muic_irqs; i++)
- if (irq == info->muic_irqs[i].virq)
- irq_type = info->muic_irqs[i].irq;
+ for (i = 0; i < info->type->num_muic_irqs; i++)
+ if (irq == info->type->muic_irqs[i].virq)
+ irq_type = info->type->muic_irqs[i].irq;
- ret = sm5502_parse_irq(info, irq_type);
+ ret = info->type->parse_irq(info, irq_type);
if (ret < 0) {
dev_warn(info->dev, "cannot handle is interrupt:%d\n",
irq_type);
@@ -552,19 +663,18 @@ static void sm5502_init_dev_type(struct sm5502_muic_info *info)
version_id, vendor_id);
/* Initiazle the register of SM5502 device to bring-up */
- for (i = 0; i < info->num_reg_data; i++) {
+ for (i = 0; i < info->type->num_reg_data; i++) {
unsigned int val = 0;
- if (!info->reg_data[i].invert)
- val |= ~info->reg_data[i].val;
+ if (!info->type->reg_data[i].invert)
+ val |= ~info->type->reg_data[i].val;
else
- val = info->reg_data[i].val;
- regmap_write(info->regmap, info->reg_data[i].reg, val);
+ val = info->type->reg_data[i].val;
+ regmap_write(info->regmap, info->type->reg_data[i].reg, val);
}
}
-static int sm5022_muic_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int sm5022_muic_i2c_probe(struct i2c_client *i2c)
{
struct device_node *np = i2c->dev.of_node;
struct sm5502_muic_info *info;
@@ -581,10 +691,13 @@ static int sm5022_muic_i2c_probe(struct i2c_client *i2c,
info->dev = &i2c->dev;
info->i2c = i2c;
info->irq = i2c->irq;
- info->muic_irqs = sm5502_muic_irqs;
- info->num_muic_irqs = ARRAY_SIZE(sm5502_muic_irqs);
- info->reg_data = sm5502_reg_data;
- info->num_reg_data = ARRAY_SIZE(sm5502_reg_data);
+ info->type = device_get_match_data(info->dev);
+ if (!info->type)
+ return -EINVAL;
+ if (!info->type->parse_irq) {
+ dev_err(info->dev, "parse_irq missing in struct sm5502_type\n");
+ return -EINVAL;
+ }
mutex_init(&info->mutex);
@@ -600,16 +713,17 @@ static int sm5022_muic_i2c_probe(struct i2c_client *i2c,
/* Support irq domain for SM5502 MUIC device */
irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_SHARED;
- ret = regmap_add_irq_chip(info->regmap, info->irq, irq_flags, 0,
- &sm5502_muic_irq_chip, &info->irq_data);
+ ret = devm_regmap_add_irq_chip(info->dev, info->regmap, info->irq,
+ irq_flags, 0, info->type->irq_chip,
+ &info->irq_data);
if (ret != 0) {
dev_err(info->dev, "failed to request IRQ %d: %d\n",
info->irq, ret);
return ret;
}
- for (i = 0; i < info->num_muic_irqs; i++) {
- struct muic_irq *muic_irq = &info->muic_irqs[i];
+ for (i = 0; i < info->type->num_muic_irqs; i++) {
+ struct muic_irq *muic_irq = &info->type->muic_irqs[i];
int virq = 0;
virq = regmap_irq_get_virq(info->irq_data, muic_irq->irq);
@@ -661,17 +775,29 @@ static int sm5022_muic_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int sm5502_muic_i2c_remove(struct i2c_client *i2c)
-{
- struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
-
- regmap_del_irq_chip(info->irq, info->irq_data);
+static const struct sm5502_type sm5502_data = {
+ .muic_irqs = sm5502_muic_irqs,
+ .num_muic_irqs = ARRAY_SIZE(sm5502_muic_irqs),
+ .irq_chip = &sm5502_muic_irq_chip,
+ .reg_data = sm5502_reg_data,
+ .num_reg_data = ARRAY_SIZE(sm5502_reg_data),
+ .otg_dev_type1 = SM5502_REG_DEV_TYPE1_USB_OTG_MASK,
+ .parse_irq = sm5502_parse_irq,
+};
- return 0;
-}
+static const struct sm5502_type sm5504_data = {
+ .muic_irqs = sm5504_muic_irqs,
+ .num_muic_irqs = ARRAY_SIZE(sm5504_muic_irqs),
+ .irq_chip = &sm5504_muic_irq_chip,
+ .reg_data = sm5504_reg_data,
+ .num_reg_data = ARRAY_SIZE(sm5504_reg_data),
+ .otg_dev_type1 = SM5504_REG_DEV_TYPE1_USB_OTG_MASK,
+ .parse_irq = sm5504_parse_irq,
+};
static const struct of_device_id sm5502_dt_match[] = {
- { .compatible = "siliconmitus,sm5502-muic" },
+ { .compatible = "siliconmitus,sm5502-muic", .data = &sm5502_data },
+ { .compatible = "siliconmitus,sm5504-muic", .data = &sm5504_data },
{ },
};
MODULE_DEVICE_TABLE(of, sm5502_dt_match);
@@ -702,7 +828,8 @@ static SIMPLE_DEV_PM_OPS(sm5502_muic_pm_ops,
sm5502_muic_suspend, sm5502_muic_resume);
static const struct i2c_device_id sm5502_i2c_id[] = {
- { "sm5502", TYPE_SM5502 },
+ { "sm5502", (kernel_ulong_t)&sm5502_data },
+ { "sm5504", (kernel_ulong_t)&sm5504_data },
{ }
};
MODULE_DEVICE_TABLE(i2c, sm5502_i2c_id);
@@ -713,8 +840,7 @@ static struct i2c_driver sm5502_muic_i2c_driver = {
.pm = &sm5502_muic_pm_ops,
.of_match_table = sm5502_dt_match,
},
- .probe = sm5022_muic_i2c_probe,
- .remove = sm5502_muic_i2c_remove,
+ .probe_new = sm5022_muic_i2c_probe,
.id_table = sm5502_i2c_id,
};
diff --git a/drivers/extcon/extcon-sm5502.h b/drivers/extcon/extcon-sm5502.h
index ce1f1ec310c4..9c04315d48e2 100644
--- a/drivers/extcon/extcon-sm5502.h
+++ b/drivers/extcon/extcon-sm5502.h
@@ -8,10 +8,6 @@
#ifndef __LINUX_EXTCON_SM5502_H
#define __LINUX_EXTCON_SM5502_H
-enum sm5502_types {
- TYPE_SM5502,
-};
-
/* SM5502 registers */
enum sm5502_reg {
SM5502_REG_DEVICE_ID = 0x01,
@@ -93,6 +89,13 @@ enum sm5502_reg {
#define SM5502_REG_CONTROL_RAW_DATA_MASK (0x1 << SM5502_REG_CONTROL_RAW_DATA_SHIFT)
#define SM5502_REG_CONTROL_SW_OPEN_MASK (0x1 << SM5502_REG_CONTROL_SW_OPEN_SHIFT)
+#define SM5504_REG_CONTROL_CHGTYP_SHIFT 5
+#define SM5504_REG_CONTROL_USBCHDEN_SHIFT 6
+#define SM5504_REG_CONTROL_ADC_EN_SHIFT 7
+#define SM5504_REG_CONTROL_CHGTYP_MASK (0x1 << SM5504_REG_CONTROL_CHGTYP_SHIFT)
+#define SM5504_REG_CONTROL_USBCHDEN_MASK (0x1 << SM5504_REG_CONTROL_USBCHDEN_SHIFT)
+#define SM5504_REG_CONTROL_ADC_EN_MASK (0x1 << SM5504_REG_CONTROL_ADC_EN_SHIFT)
+
#define SM5502_REG_INTM1_ATTACH_SHIFT 0
#define SM5502_REG_INTM1_DETACH_SHIFT 1
#define SM5502_REG_INTM1_KP_SHIFT 2
@@ -123,6 +126,36 @@ enum sm5502_reg {
#define SM5502_REG_INTM2_STUCK_KEY_RCV_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT)
#define SM5502_REG_INTM2_MHL_MASK (0x1 << SM5502_REG_INTM2_MHL_SHIFT)
+#define SM5504_REG_INTM1_ATTACH_SHIFT 0
+#define SM5504_REG_INTM1_DETACH_SHIFT 1
+#define SM5504_REG_INTM1_CHG_DET_SHIFT 2
+#define SM5504_REG_INTM1_DCD_OUT_SHIFT 3
+#define SM5504_REG_INTM1_OVP_EVENT_SHIFT 4
+#define SM5504_REG_INTM1_CONNECT_SHIFT 5
+#define SM5504_REG_INTM1_ADC_CHG_SHIFT 6
+#define SM5504_REG_INTM1_ATTACH_MASK (0x1 << SM5504_REG_INTM1_ATTACH_SHIFT)
+#define SM5504_REG_INTM1_DETACH_MASK (0x1 << SM5504_REG_INTM1_DETACH_SHIFT)
+#define SM5504_REG_INTM1_CHG_DET_MASK (0x1 << SM5504_REG_INTM1_CHG_DET_SHIFT)
+#define SM5504_REG_INTM1_DCD_OUT_MASK (0x1 << SM5504_REG_INTM1_DCD_OUT_SHIFT)
+#define SM5504_REG_INTM1_OVP_EVENT_MASK (0x1 << SM5504_REG_INTM1_OVP_EVENT_SHIFT)
+#define SM5504_REG_INTM1_CONNECT_MASK (0x1 << SM5504_REG_INTM1_CONNECT_SHIFT)
+#define SM5504_REG_INTM1_ADC_CHG_MASK (0x1 << SM5504_REG_INTM1_ADC_CHG_SHIFT)
+
+#define SM5504_REG_INTM2_RID_CHG_SHIFT 0
+#define SM5504_REG_INTM2_UVLO_SHIFT 1
+#define SM5504_REG_INTM2_POR_SHIFT 2
+#define SM5504_REG_INTM2_OVP_FET_SHIFT 4
+#define SM5504_REG_INTM2_OCP_LATCH_SHIFT 5
+#define SM5504_REG_INTM2_OCP_EVENT_SHIFT 6
+#define SM5504_REG_INTM2_OVP_OCP_EVENT_SHIFT 7
+#define SM5504_REG_INTM2_RID_CHG_MASK (0x1 << SM5504_REG_INTM2_RID_CHG_SHIFT)
+#define SM5504_REG_INTM2_UVLO_MASK (0x1 << SM5504_REG_INTM2_UVLO_SHIFT)
+#define SM5504_REG_INTM2_POR_MASK (0x1 << SM5504_REG_INTM2_POR_SHIFT)
+#define SM5504_REG_INTM2_OVP_FET_MASK (0x1 << SM5504_REG_INTM2_OVP_FET_SHIFT)
+#define SM5504_REG_INTM2_OCP_LATCH_MASK (0x1 << SM5504_REG_INTM2_OCP_LATCH_SHIFT)
+#define SM5504_REG_INTM2_OCP_EVENT_MASK (0x1 << SM5504_REG_INTM2_OCP_EVENT_SHIFT)
+#define SM5504_REG_INTM2_OVP_OCP_EVENT_MASK (0x1 << SM5504_REG_INTM2_OVP_OCP_EVENT_SHIFT)
+
#define SM5502_REG_ADC_SHIFT 0
#define SM5502_REG_ADC_MASK (0x1f << SM5502_REG_ADC_SHIFT)
@@ -199,6 +232,9 @@ enum sm5502_reg {
#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT)
#define SM5502_REG_DEV_TYPE1_USB_OTG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT)
+#define SM5504_REG_DEV_TYPE1_USB_OTG_SHIFT 0
+#define SM5504_REG_DEV_TYPE1_USB_OTG_MASK (0x1 << SM5504_REG_DEV_TYPE1_USB_OTG_SHIFT)
+
#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT 0
#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT 1
#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT 2
@@ -277,4 +313,42 @@ enum sm5502_irq {
#define SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK BIT(4)
#define SM5502_IRQ_INT2_MHL_MASK BIT(5)
+/* SM5504 Interrupts */
+enum sm5504_irq {
+ /* INT1 */
+ SM5504_IRQ_INT1_ATTACH,
+ SM5504_IRQ_INT1_DETACH,
+ SM5504_IRQ_INT1_CHG_DET,
+ SM5504_IRQ_INT1_DCD_OUT,
+ SM5504_IRQ_INT1_OVP_EVENT,
+ SM5504_IRQ_INT1_CONNECT,
+ SM5504_IRQ_INT1_ADC_CHG,
+
+ /* INT2 */
+ SM5504_IRQ_INT2_RID_CHG,
+ SM5504_IRQ_INT2_UVLO,
+ SM5504_IRQ_INT2_POR,
+ SM5504_IRQ_INT2_OVP_FET,
+ SM5504_IRQ_INT2_OCP_LATCH,
+ SM5504_IRQ_INT2_OCP_EVENT,
+ SM5504_IRQ_INT2_OVP_OCP_EVENT,
+
+ SM5504_IRQ_NUM,
+};
+
+#define SM5504_IRQ_INT1_ATTACH_MASK BIT(0)
+#define SM5504_IRQ_INT1_DETACH_MASK BIT(1)
+#define SM5504_IRQ_INT1_CHG_DET_MASK BIT(2)
+#define SM5504_IRQ_INT1_DCD_OUT_MASK BIT(3)
+#define SM5504_IRQ_INT1_OVP_MASK BIT(4)
+#define SM5504_IRQ_INT1_CONNECT_MASK BIT(5)
+#define SM5504_IRQ_INT1_ADC_CHG_MASK BIT(6)
+#define SM5504_IRQ_INT2_RID_CHG_MASK BIT(0)
+#define SM5504_IRQ_INT2_UVLO_MASK BIT(1)
+#define SM5504_IRQ_INT2_POR_MASK BIT(2)
+#define SM5504_IRQ_INT2_OVP_FET_MASK BIT(4)
+#define SM5504_IRQ_INT2_OCP_LATCH_MASK BIT(5)
+#define SM5504_IRQ_INT2_OCP_EVENT_MASK BIT(6)
+#define SM5504_IRQ_INT2_OVP_OCP_EVENT_MASK BIT(7)
+
#endif /* __LINUX_EXTCON_SM5502_H */
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 88ed971e32c0..b0d671db178a 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -511,12 +511,12 @@ remove_card(struct pci_dev *dev)
wake_up_interruptible(&client->buffer.wait);
spin_unlock_irq(&lynx->client_list_lock);
- pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
- lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
- pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
- lynx->rcv_pcl, lynx->rcv_pcl_bus);
- pci_free_consistent(lynx->pci_device, PAGE_SIZE,
- lynx->rcv_buffer, lynx->rcv_buffer_bus);
+ dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
+ lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
+ dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
+ lynx->rcv_pcl, lynx->rcv_pcl_bus);
+ dma_free_coherent(&lynx->pci_device->dev, PAGE_SIZE, lynx->rcv_buffer,
+ lynx->rcv_buffer_bus);
iounmap(lynx->registers);
pci_disable_device(dev);
@@ -532,7 +532,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
u32 p, end;
int ret, i;
- if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) {
dev_err(&dev->dev,
"DMA address limits not supported for PCILynx hardware\n");
return -ENXIO;
@@ -564,12 +564,16 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
goto fail_deallocate_lynx;
}
- lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
- sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
- lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device,
- sizeof(struct pcl), &lynx->rcv_pcl_bus);
- lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device,
- RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus);
+ lynx->rcv_start_pcl = dma_alloc_coherent(&lynx->pci_device->dev,
+ sizeof(struct pcl),
+ &lynx->rcv_start_pcl_bus,
+ GFP_KERNEL);
+ lynx->rcv_pcl = dma_alloc_coherent(&lynx->pci_device->dev,
+ sizeof(struct pcl),
+ &lynx->rcv_pcl_bus, GFP_KERNEL);
+ lynx->rcv_buffer = dma_alloc_coherent(&lynx->pci_device->dev,
+ RCV_BUFFER_SIZE,
+ &lynx->rcv_buffer_bus, GFP_KERNEL);
if (lynx->rcv_start_pcl == NULL ||
lynx->rcv_pcl == NULL ||
lynx->rcv_buffer == NULL) {
@@ -667,14 +671,15 @@ fail_free_irq:
fail_deallocate_buffers:
if (lynx->rcv_start_pcl)
- pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
- lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
+ dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
+ lynx->rcv_start_pcl,
+ lynx->rcv_start_pcl_bus);
if (lynx->rcv_pcl)
- pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
- lynx->rcv_pcl, lynx->rcv_pcl_bus);
+ dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
+ lynx->rcv_pcl, lynx->rcv_pcl_bus);
if (lynx->rcv_buffer)
- pci_free_consistent(lynx->pci_device, PAGE_SIZE,
- lynx->rcv_buffer, lynx->rcv_buffer_bus);
+ dma_free_coherent(&lynx->pci_device->dev, PAGE_SIZE,
+ lynx->rcv_buffer, lynx->rcv_buffer_bus);
iounmap(lynx->registers);
fail_deallocate_lynx:
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index db0ea2d2d75a..1db738d5b301 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -9,7 +9,7 @@ menu "Firmware Drivers"
config ARM_SCMI_PROTOCOL
tristate "ARM System Control and Management Interface (SCMI) Message Protocol"
depends on ARM || ARM64 || COMPILE_TEST
- depends on MAILBOX
+ depends on MAILBOX || HAVE_ARM_SMCCC_DISCOVERY
help
ARM System Control and Management Interface (SCMI) protocol is a
set of operating system-independent software interfaces that are
@@ -296,6 +296,7 @@ config TURRIS_MOX_RWTM
other manufacturing data and also utilize the Entropy Bit Generator
for hardware random number generation.
+source "drivers/firmware/arm_ffa/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 5e013b6a3692..546ac8e7f6d0 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
+obj-y += arm_ffa/
obj-y += arm_scmi/
obj-y += broadcom/
obj-y += meson/
diff --git a/drivers/firmware/arm_ffa/Kconfig b/drivers/firmware/arm_ffa/Kconfig
new file mode 100644
index 000000000000..5e3ae5cf82e8
--- /dev/null
+++ b/drivers/firmware/arm_ffa/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config ARM_FFA_TRANSPORT
+ tristate "Arm Firmware Framework for Armv8-A"
+ depends on OF
+ depends on ARM64
+ default n
+ help
+ This Firmware Framework(FF) for Arm A-profile processors describes
+ interfaces that standardize communication between the various
+ software images which includes communication between images in
+ the Secure world and Normal world. It also leverages the
+ virtualization extension to isolate software images provided
+ by an ecosystem of vendors from each other.
+
+ This driver provides interface for all the client drivers making
+ use of the features offered by ARM FF-A.
+
+config ARM_FFA_SMCCC
+ bool
+ default ARM_FFA_TRANSPORT
+ depends on ARM64 && HAVE_ARM_SMCCC_DISCOVERY
diff --git a/drivers/firmware/arm_ffa/Makefile b/drivers/firmware/arm_ffa/Makefile
new file mode 100644
index 000000000000..9d9f37523200
--- /dev/null
+++ b/drivers/firmware/arm_ffa/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ffa-bus-y = bus.o
+ffa-driver-y = driver.o
+ffa-transport-$(CONFIG_ARM_FFA_SMCCC) += smccc.o
+ffa-module-objs := $(ffa-bus-y) $(ffa-driver-y) $(ffa-transport-y)
+obj-$(CONFIG_ARM_FFA_TRANSPORT) = ffa-module.o
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
new file mode 100644
index 000000000000..00fe595a5bc8
--- /dev/null
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm_ffa.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "common.h"
+
+static int ffa_device_match(struct device *dev, struct device_driver *drv)
+{
+ const struct ffa_device_id *id_table;
+ struct ffa_device *ffa_dev;
+
+ id_table = to_ffa_driver(drv)->id_table;
+ ffa_dev = to_ffa_dev(dev);
+
+ while (!uuid_is_null(&id_table->uuid)) {
+ /*
+ * FF-A v1.0 doesn't provide discovery of UUIDs, just the
+ * partition IDs, so fetch the partitions IDs for this
+ * id_table UUID and assign the UUID to the device if the
+ * partition ID matches
+ */
+ if (uuid_is_null(&ffa_dev->uuid))
+ ffa_device_match_uuid(ffa_dev, &id_table->uuid);
+
+ if (uuid_equal(&ffa_dev->uuid, &id_table->uuid))
+ return 1;
+ id_table++;
+ }
+
+ return 0;
+}
+
+static int ffa_device_probe(struct device *dev)
+{
+ struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
+ struct ffa_device *ffa_dev = to_ffa_dev(dev);
+
+ return ffa_drv->probe(ffa_dev);
+}
+
+static int ffa_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct ffa_device *ffa_dev = to_ffa_dev(dev);
+
+ return add_uevent_var(env, "MODALIAS=arm_ffa:%04x:%pUb",
+ ffa_dev->vm_id, &ffa_dev->uuid);
+}
+
+static ssize_t partition_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ffa_device *ffa_dev = to_ffa_dev(dev);
+
+ return sprintf(buf, "0x%04x\n", ffa_dev->vm_id);
+}
+static DEVICE_ATTR_RO(partition_id);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ffa_device *ffa_dev = to_ffa_dev(dev);
+
+ return sprintf(buf, "%pUb\n", &ffa_dev->uuid);
+}
+static DEVICE_ATTR_RO(uuid);
+
+static struct attribute *ffa_device_attributes_attrs[] = {
+ &dev_attr_partition_id.attr,
+ &dev_attr_uuid.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ffa_device_attributes);
+
+struct bus_type ffa_bus_type = {
+ .name = "arm_ffa",
+ .match = ffa_device_match,
+ .probe = ffa_device_probe,
+ .uevent = ffa_device_uevent,
+ .dev_groups = ffa_device_attributes_groups,
+};
+EXPORT_SYMBOL_GPL(ffa_bus_type);
+
+int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ int ret;
+
+ if (!driver->probe)
+ return -EINVAL;
+
+ driver->driver.bus = &ffa_bus_type;
+ driver->driver.name = driver->name;
+ driver->driver.owner = owner;
+ driver->driver.mod_name = mod_name;
+
+ ret = driver_register(&driver->driver);
+ if (!ret)
+ pr_debug("registered new ffa driver %s\n", driver->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ffa_driver_register);
+
+void ffa_driver_unregister(struct ffa_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(ffa_driver_unregister);
+
+static void ffa_release_device(struct device *dev)
+{
+ struct ffa_device *ffa_dev = to_ffa_dev(dev);
+
+ kfree(ffa_dev);
+}
+
+static int __ffa_devices_unregister(struct device *dev, void *data)
+{
+ ffa_release_device(dev);
+
+ return 0;
+}
+
+static void ffa_devices_unregister(void)
+{
+ bus_for_each_dev(&ffa_bus_type, NULL, NULL,
+ __ffa_devices_unregister);
+}
+
+bool ffa_device_is_valid(struct ffa_device *ffa_dev)
+{
+ bool valid = false;
+ struct device *dev = NULL;
+ struct ffa_device *tmp_dev;
+
+ do {
+ dev = bus_find_next_device(&ffa_bus_type, dev);
+ tmp_dev = to_ffa_dev(dev);
+ if (tmp_dev == ffa_dev) {
+ valid = true;
+ break;
+ }
+ put_device(dev);
+ } while (dev);
+
+ put_device(dev);
+
+ return valid;
+}
+
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id)
+{
+ int ret;
+ struct device *dev;
+ struct ffa_device *ffa_dev;
+
+ ffa_dev = kzalloc(sizeof(*ffa_dev), GFP_KERNEL);
+ if (!ffa_dev)
+ return NULL;
+
+ dev = &ffa_dev->dev;
+ dev->bus = &ffa_bus_type;
+ dev->release = ffa_release_device;
+ dev_set_name(&ffa_dev->dev, "arm-ffa-%04x", vm_id);
+
+ ffa_dev->vm_id = vm_id;
+ uuid_copy(&ffa_dev->uuid, uuid);
+
+ ret = device_register(&ffa_dev->dev);
+ if (ret) {
+ dev_err(dev, "unable to register device %s err=%d\n",
+ dev_name(dev), ret);
+ put_device(dev);
+ return NULL;
+ }
+
+ return ffa_dev;
+}
+EXPORT_SYMBOL_GPL(ffa_device_register);
+
+void ffa_device_unregister(struct ffa_device *ffa_dev)
+{
+ if (!ffa_dev)
+ return;
+
+ device_unregister(&ffa_dev->dev);
+}
+EXPORT_SYMBOL_GPL(ffa_device_unregister);
+
+int arm_ffa_bus_init(void)
+{
+ return bus_register(&ffa_bus_type);
+}
+
+void arm_ffa_bus_exit(void)
+{
+ ffa_devices_unregister();
+ bus_unregister(&ffa_bus_type);
+}
diff --git a/drivers/firmware/arm_ffa/common.h b/drivers/firmware/arm_ffa/common.h
new file mode 100644
index 000000000000..d6eccf1fd3f6
--- /dev/null
+++ b/drivers/firmware/arm_ffa/common.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _FFA_COMMON_H
+#define _FFA_COMMON_H
+
+#include <linux/arm_ffa.h>
+#include <linux/arm-smccc.h>
+#include <linux/err.h>
+
+typedef struct arm_smccc_1_2_regs ffa_value_t;
+
+typedef void (ffa_fn)(ffa_value_t, ffa_value_t *);
+
+int arm_ffa_bus_init(void);
+void arm_ffa_bus_exit(void);
+bool ffa_device_is_valid(struct ffa_device *ffa_dev);
+void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid);
+
+#ifdef CONFIG_ARM_FFA_SMCCC
+int __init ffa_transport_init(ffa_fn **invoke_ffa_fn);
+#else
+static inline int __init ffa_transport_init(ffa_fn **invoke_ffa_fn)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif /* _FFA_COMMON_H */
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
new file mode 100644
index 000000000000..c9fb56afbcb4
--- /dev/null
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -0,0 +1,733 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Arm Firmware Framework for ARMv8-A(FFA) interface driver
+ *
+ * The Arm FFA specification[1] describes a software architecture to
+ * leverages the virtualization extension to isolate software images
+ * provided by an ecosystem of vendors from each other and describes
+ * interfaces that standardize communication between the various software
+ * images including communication between images in the Secure world and
+ * Normal world. Any Hypervisor could use the FFA interfaces to enable
+ * communication between VMs it manages.
+ *
+ * The Hypervisor a.k.a Partition managers in FFA terminology can assign
+ * system resources(Memory regions, Devices, CPU cycles) to the partitions
+ * and manage isolation amongst them.
+ *
+ * [1] https://developer.arm.com/docs/den0077/latest
+ *
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#define DRIVER_NAME "ARM FF-A"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include <linux/arm_ffa.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+
+#include "common.h"
+
+#define FFA_DRIVER_VERSION FFA_VERSION_1_0
+
+#define FFA_SMC(calling_convention, func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \
+ ARM_SMCCC_OWNER_STANDARD, (func_num))
+
+#define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num))
+#define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num))
+
+#define FFA_ERROR FFA_SMC_32(0x60)
+#define FFA_SUCCESS FFA_SMC_32(0x61)
+#define FFA_INTERRUPT FFA_SMC_32(0x62)
+#define FFA_VERSION FFA_SMC_32(0x63)
+#define FFA_FEATURES FFA_SMC_32(0x64)
+#define FFA_RX_RELEASE FFA_SMC_32(0x65)
+#define FFA_RXTX_MAP FFA_SMC_32(0x66)
+#define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66)
+#define FFA_RXTX_UNMAP FFA_SMC_32(0x67)
+#define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68)
+#define FFA_ID_GET FFA_SMC_32(0x69)
+#define FFA_MSG_POLL FFA_SMC_32(0x6A)
+#define FFA_MSG_WAIT FFA_SMC_32(0x6B)
+#define FFA_YIELD FFA_SMC_32(0x6C)
+#define FFA_RUN FFA_SMC_32(0x6D)
+#define FFA_MSG_SEND FFA_SMC_32(0x6E)
+#define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F)
+#define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F)
+#define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70)
+#define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70)
+#define FFA_MEM_DONATE FFA_SMC_32(0x71)
+#define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71)
+#define FFA_MEM_LEND FFA_SMC_32(0x72)
+#define FFA_FN64_MEM_LEND FFA_SMC_64(0x72)
+#define FFA_MEM_SHARE FFA_SMC_32(0x73)
+#define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73)
+#define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74)
+#define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74)
+#define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75)
+#define FFA_MEM_RELINQUISH FFA_SMC_32(0x76)
+#define FFA_MEM_RECLAIM FFA_SMC_32(0x77)
+#define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78)
+#define FFA_MEM_OP_RESUME FFA_SMC_32(0x79)
+#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A)
+#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B)
+#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C)
+
+/*
+ * For some calls it is necessary to use SMC64 to pass or return 64-bit values.
+ * For such calls FFA_FN_NATIVE(name) will choose the appropriate
+ * (native-width) function ID.
+ */
+#ifdef CONFIG_64BIT
+#define FFA_FN_NATIVE(name) FFA_FN64_##name
+#else
+#define FFA_FN_NATIVE(name) FFA_##name
+#endif
+
+/* FFA error codes. */
+#define FFA_RET_SUCCESS (0)
+#define FFA_RET_NOT_SUPPORTED (-1)
+#define FFA_RET_INVALID_PARAMETERS (-2)
+#define FFA_RET_NO_MEMORY (-3)
+#define FFA_RET_BUSY (-4)
+#define FFA_RET_INTERRUPTED (-5)
+#define FFA_RET_DENIED (-6)
+#define FFA_RET_RETRY (-7)
+#define FFA_RET_ABORTED (-8)
+
+#define MAJOR_VERSION_MASK GENMASK(30, 16)
+#define MINOR_VERSION_MASK GENMASK(15, 0)
+#define MAJOR_VERSION(x) ((u16)(FIELD_GET(MAJOR_VERSION_MASK, (x))))
+#define MINOR_VERSION(x) ((u16)(FIELD_GET(MINOR_VERSION_MASK, (x))))
+#define PACK_VERSION_INFO(major, minor) \
+ (FIELD_PREP(MAJOR_VERSION_MASK, (major)) | \
+ FIELD_PREP(MINOR_VERSION_MASK, (minor)))
+#define FFA_VERSION_1_0 PACK_VERSION_INFO(1, 0)
+#define FFA_MIN_VERSION FFA_VERSION_1_0
+
+#define SENDER_ID_MASK GENMASK(31, 16)
+#define RECEIVER_ID_MASK GENMASK(15, 0)
+#define SENDER_ID(x) ((u16)(FIELD_GET(SENDER_ID_MASK, (x))))
+#define RECEIVER_ID(x) ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x))))
+#define PACK_TARGET_INFO(s, r) \
+ (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
+
+/*
+ * FF-A specification mentions explicitly about '4K pages'. This should
+ * not be confused with the kernel PAGE_SIZE, which is the translation
+ * granule kernel is configured and may be one among 4K, 16K and 64K.
+ */
+#define FFA_PAGE_SIZE SZ_4K
+/*
+ * Keeping RX TX buffer size as 4K for now
+ * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config
+ */
+#define RXTX_BUFFER_SIZE SZ_4K
+
+static ffa_fn *invoke_ffa_fn;
+
+static const int ffa_linux_errmap[] = {
+ /* better than switch case as long as return value is continuous */
+ 0, /* FFA_RET_SUCCESS */
+ -EOPNOTSUPP, /* FFA_RET_NOT_SUPPORTED */
+ -EINVAL, /* FFA_RET_INVALID_PARAMETERS */
+ -ENOMEM, /* FFA_RET_NO_MEMORY */
+ -EBUSY, /* FFA_RET_BUSY */
+ -EINTR, /* FFA_RET_INTERRUPTED */
+ -EACCES, /* FFA_RET_DENIED */
+ -EAGAIN, /* FFA_RET_RETRY */
+ -ECANCELED, /* FFA_RET_ABORTED */
+};
+
+static inline int ffa_to_linux_errno(int errno)
+{
+ int err_idx = -errno;
+
+ if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap))
+ return ffa_linux_errmap[err_idx];
+ return -EINVAL;
+}
+
+struct ffa_drv_info {
+ u32 version;
+ u16 vm_id;
+ struct mutex rx_lock; /* lock to protect Rx buffer */
+ struct mutex tx_lock; /* lock to protect Tx buffer */
+ void *rx_buffer;
+ void *tx_buffer;
+};
+
+static struct ffa_drv_info *drv_info;
+
+static int ffa_version_check(u32 *version)
+{
+ ffa_value_t ver;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
+ }, &ver);
+
+ if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
+ pr_info("FFA_VERSION returned not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ver.a0 < FFA_MIN_VERSION || ver.a0 > FFA_DRIVER_VERSION) {
+ pr_err("Incompatible version %d.%d found\n",
+ MAJOR_VERSION(ver.a0), MINOR_VERSION(ver.a0));
+ return -EINVAL;
+ }
+
+ *version = ver.a0;
+ pr_info("Version %d.%d found\n", MAJOR_VERSION(ver.a0),
+ MINOR_VERSION(ver.a0));
+ return 0;
+}
+
+static int ffa_rx_release(void)
+{
+ ffa_value_t ret;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_RX_RELEASE,
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ /* check for ret.a0 == FFA_RX_RELEASE ? */
+
+ return 0;
+}
+
+static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt)
+{
+ ffa_value_t ret;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_FN_NATIVE(RXTX_MAP),
+ .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt,
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ return 0;
+}
+
+static int ffa_rxtx_unmap(u16 vm_id)
+{
+ ffa_value_t ret;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0),
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ return 0;
+}
+
+/* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
+static int
+__ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
+ struct ffa_partition_info *buffer, int num_partitions)
+{
+ int count;
+ ffa_value_t partition_info;
+
+ mutex_lock(&drv_info->rx_lock);
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_PARTITION_INFO_GET,
+ .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3,
+ }, &partition_info);
+
+ if (partition_info.a0 == FFA_ERROR) {
+ mutex_unlock(&drv_info->rx_lock);
+ return ffa_to_linux_errno((int)partition_info.a2);
+ }
+
+ count = partition_info.a2;
+
+ if (buffer && count <= num_partitions)
+ memcpy(buffer, drv_info->rx_buffer, sizeof(*buffer) * count);
+
+ ffa_rx_release();
+
+ mutex_unlock(&drv_info->rx_lock);
+
+ return count;
+}
+
+/* buffer is allocated and caller must free the same if returned count > 0 */
+static int
+ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer)
+{
+ int count;
+ u32 uuid0_4[4];
+ struct ffa_partition_info *pbuf;
+
+ export_uuid((u8 *)uuid0_4, uuid);
+ count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
+ uuid0_4[3], NULL, 0);
+ if (count <= 0)
+ return count;
+
+ pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL);
+ if (!pbuf)
+ return -ENOMEM;
+
+ count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
+ uuid0_4[3], pbuf, count);
+ if (count <= 0)
+ kfree(pbuf);
+ else
+ *buffer = pbuf;
+
+ return count;
+}
+
+#define VM_ID_MASK GENMASK(15, 0)
+static int ffa_id_get(u16 *vm_id)
+{
+ ffa_value_t id;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_ID_GET,
+ }, &id);
+
+ if (id.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)id.a2);
+
+ *vm_id = FIELD_GET(VM_ID_MASK, (id.a2));
+
+ return 0;
+}
+
+static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
+ struct ffa_send_direct_data *data)
+{
+ u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
+ ffa_value_t ret;
+
+ if (mode_32bit) {
+ req_id = FFA_MSG_SEND_DIRECT_REQ;
+ resp_id = FFA_MSG_SEND_DIRECT_RESP;
+ } else {
+ req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ);
+ resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP);
+ }
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = req_id, .a1 = src_dst_ids, .a2 = 0,
+ .a3 = data->data0, .a4 = data->data1, .a5 = data->data2,
+ .a6 = data->data3, .a7 = data->data4,
+ }, &ret);
+
+ while (ret.a0 == FFA_INTERRUPT)
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_RUN, .a1 = ret.a1,
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ if (ret.a0 == resp_id) {
+ data->data0 = ret.a3;
+ data->data1 = ret.a4;
+ data->data2 = ret.a5;
+ data->data3 = ret.a6;
+ data->data4 = ret.a7;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
+ u32 frag_len, u32 len, u64 *handle)
+{
+ ffa_value_t ret;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = func_id, .a1 = len, .a2 = frag_len,
+ .a3 = buf, .a4 = buf_sz,
+ }, &ret);
+
+ while (ret.a0 == FFA_MEM_OP_PAUSE)
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_MEM_OP_RESUME,
+ .a1 = ret.a1, .a2 = ret.a2,
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ if (ret.a0 != FFA_SUCCESS)
+ return -EOPNOTSUPP;
+
+ if (handle)
+ *handle = PACK_HANDLE(ret.a2, ret.a3);
+
+ return frag_len;
+}
+
+static int ffa_mem_next_frag(u64 handle, u32 frag_len)
+{
+ ffa_value_t ret;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_MEM_FRAG_TX,
+ .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle),
+ .a3 = frag_len,
+ }, &ret);
+
+ while (ret.a0 == FFA_MEM_OP_PAUSE)
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_MEM_OP_RESUME,
+ .a1 = ret.a1, .a2 = ret.a2,
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ if (ret.a0 != FFA_MEM_FRAG_RX)
+ return -EOPNOTSUPP;
+
+ return ret.a3;
+}
+
+static int
+ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
+ u32 len, u64 *handle, bool first)
+{
+ if (!first)
+ return ffa_mem_next_frag(*handle, frag_len);
+
+ return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle);
+}
+
+static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
+{
+ u32 num_pages = 0;
+
+ do {
+ num_pages += sg->length / FFA_PAGE_SIZE;
+ } while ((sg = sg_next(sg)));
+
+ return num_pages;
+}
+
+static int
+ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
+ struct ffa_mem_ops_args *args)
+{
+ int rc = 0;
+ bool first = true;
+ phys_addr_t addr = 0;
+ struct ffa_composite_mem_region *composite;
+ struct ffa_mem_region_addr_range *constituents;
+ struct ffa_mem_region_attributes *ep_mem_access;
+ struct ffa_mem_region *mem_region = buffer;
+ u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg);
+
+ mem_region->tag = args->tag;
+ mem_region->flags = args->flags;
+ mem_region->sender_id = drv_info->vm_id;
+ mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK |
+ FFA_MEM_INNER_SHAREABLE;
+ ep_mem_access = &mem_region->ep_mem_access[0];
+
+ for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
+ ep_mem_access->receiver = args->attrs[idx].receiver;
+ ep_mem_access->attrs = args->attrs[idx].attrs;
+ ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs);
+ }
+ mem_region->ep_count = args->nattrs;
+
+ composite = buffer + COMPOSITE_OFFSET(args->nattrs);
+ composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
+ composite->addr_range_cnt = num_entries;
+
+ length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries);
+ frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0);
+ if (frag_len > max_fragsize)
+ return -ENXIO;
+
+ if (!args->use_txbuf) {
+ addr = virt_to_phys(buffer);
+ buf_sz = max_fragsize / FFA_PAGE_SIZE;
+ }
+
+ constituents = buffer + frag_len;
+ idx = 0;
+ do {
+ if (frag_len == max_fragsize) {
+ rc = ffa_transmit_fragment(func_id, addr, buf_sz,
+ frag_len, length,
+ &args->g_handle, first);
+ if (rc < 0)
+ return -ENXIO;
+
+ first = false;
+ idx = 0;
+ frag_len = 0;
+ constituents = buffer;
+ }
+
+ if ((void *)constituents - buffer > max_fragsize) {
+ pr_err("Memory Region Fragment > Tx Buffer size\n");
+ return -EFAULT;
+ }
+
+ constituents->address = sg_phys(args->sg);
+ constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
+ constituents++;
+ frag_len += sizeof(struct ffa_mem_region_addr_range);
+ } while ((args->sg = sg_next(args->sg)));
+
+ return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
+ length, &args->g_handle, first);
+}
+
+static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
+{
+ int ret;
+ void *buffer;
+
+ if (!args->use_txbuf) {
+ buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ } else {
+ buffer = drv_info->tx_buffer;
+ mutex_lock(&drv_info->tx_lock);
+ }
+
+ ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args);
+
+ if (args->use_txbuf)
+ mutex_unlock(&drv_info->tx_lock);
+ else
+ free_pages_exact(buffer, RXTX_BUFFER_SIZE);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int ffa_memory_reclaim(u64 g_handle, u32 flags)
+{
+ ffa_value_t ret;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_MEM_RECLAIM,
+ .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle),
+ .a3 = flags,
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ return 0;
+}
+
+static u32 ffa_api_version_get(void)
+{
+ return drv_info->version;
+}
+
+static int ffa_partition_info_get(const char *uuid_str,
+ struct ffa_partition_info *buffer)
+{
+ int count;
+ uuid_t uuid;
+ struct ffa_partition_info *pbuf;
+
+ if (uuid_parse(uuid_str, &uuid)) {
+ pr_err("invalid uuid (%s)\n", uuid_str);
+ return -ENODEV;
+ }
+
+ count = ffa_partition_probe(&uuid_null, &pbuf);
+ if (count <= 0)
+ return -ENOENT;
+
+ memcpy(buffer, pbuf, sizeof(*pbuf) * count);
+ kfree(pbuf);
+ return 0;
+}
+
+static void ffa_mode_32bit_set(struct ffa_device *dev)
+{
+ dev->mode_32bit = true;
+}
+
+static int ffa_sync_send_receive(struct ffa_device *dev,
+ struct ffa_send_direct_data *data)
+{
+ return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id,
+ dev->mode_32bit, data);
+}
+
+static int
+ffa_memory_share(struct ffa_device *dev, struct ffa_mem_ops_args *args)
+{
+ if (dev->mode_32bit)
+ return ffa_memory_ops(FFA_MEM_SHARE, args);
+
+ return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
+}
+
+static const struct ffa_dev_ops ffa_ops = {
+ .api_version_get = ffa_api_version_get,
+ .partition_info_get = ffa_partition_info_get,
+ .mode_32bit_set = ffa_mode_32bit_set,
+ .sync_send_receive = ffa_sync_send_receive,
+ .memory_reclaim = ffa_memory_reclaim,
+ .memory_share = ffa_memory_share,
+};
+
+const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev)
+{
+ if (ffa_device_is_valid(dev))
+ return &ffa_ops;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ffa_dev_ops_get);
+
+void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
+{
+ int count, idx;
+ struct ffa_partition_info *pbuf, *tpbuf;
+
+ count = ffa_partition_probe(uuid, &pbuf);
+ if (count <= 0)
+ return;
+
+ for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++)
+ if (tpbuf->id == ffa_dev->vm_id)
+ uuid_copy(&ffa_dev->uuid, uuid);
+ kfree(pbuf);
+}
+
+static void ffa_setup_partitions(void)
+{
+ int count, idx;
+ struct ffa_device *ffa_dev;
+ struct ffa_partition_info *pbuf, *tpbuf;
+
+ count = ffa_partition_probe(&uuid_null, &pbuf);
+ if (count <= 0) {
+ pr_info("%s: No partitions found, error %d\n", __func__, count);
+ return;
+ }
+
+ for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
+ /* Note that the &uuid_null parameter will require
+ * ffa_device_match() to find the UUID of this partition id
+ * with help of ffa_device_match_uuid(). Once the FF-A spec
+ * is updated to provide correct UUID here for each partition
+ * as part of the discovery API, we need to pass the
+ * discovered UUID here instead.
+ */
+ ffa_dev = ffa_device_register(&uuid_null, tpbuf->id);
+ if (!ffa_dev) {
+ pr_err("%s: failed to register partition ID 0x%x\n",
+ __func__, tpbuf->id);
+ continue;
+ }
+
+ ffa_dev_set_drvdata(ffa_dev, drv_info);
+ }
+ kfree(pbuf);
+}
+
+static int __init ffa_init(void)
+{
+ int ret;
+
+ ret = ffa_transport_init(&invoke_ffa_fn);
+ if (ret)
+ return ret;
+
+ ret = arm_ffa_bus_init();
+ if (ret)
+ return ret;
+
+ drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL);
+ if (!drv_info) {
+ ret = -ENOMEM;
+ goto ffa_bus_exit;
+ }
+
+ ret = ffa_version_check(&drv_info->version);
+ if (ret)
+ goto free_drv_info;
+
+ if (ffa_id_get(&drv_info->vm_id)) {
+ pr_err("failed to obtain VM id for self\n");
+ ret = -ENODEV;
+ goto free_drv_info;
+ }
+
+ drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
+ if (!drv_info->rx_buffer) {
+ ret = -ENOMEM;
+ goto free_pages;
+ }
+
+ drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
+ if (!drv_info->tx_buffer) {
+ ret = -ENOMEM;
+ goto free_pages;
+ }
+
+ ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer),
+ virt_to_phys(drv_info->rx_buffer),
+ RXTX_BUFFER_SIZE / FFA_PAGE_SIZE);
+ if (ret) {
+ pr_err("failed to register FFA RxTx buffers\n");
+ goto free_pages;
+ }
+
+ mutex_init(&drv_info->rx_lock);
+ mutex_init(&drv_info->tx_lock);
+
+ ffa_setup_partitions();
+
+ return 0;
+free_pages:
+ if (drv_info->tx_buffer)
+ free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
+ free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
+free_drv_info:
+ kfree(drv_info);
+ffa_bus_exit:
+ arm_ffa_bus_exit();
+ return ret;
+}
+subsys_initcall(ffa_init);
+
+static void __exit ffa_exit(void)
+{
+ ffa_rxtx_unmap(drv_info->vm_id);
+ free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
+ free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
+ kfree(drv_info);
+ arm_ffa_bus_exit();
+}
+module_exit(ffa_exit);
+
+MODULE_ALIAS("arm-ffa");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("Arm FF-A interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_ffa/smccc.c b/drivers/firmware/arm_ffa/smccc.c
new file mode 100644
index 000000000000..4d85bfff0a4e
--- /dev/null
+++ b/drivers/firmware/arm_ffa/smccc.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#include <linux/printk.h>
+
+#include "common.h"
+
+static void __arm_ffa_fn_smc(ffa_value_t args, ffa_value_t *res)
+{
+ arm_smccc_1_2_smc(&args, res);
+}
+
+static void __arm_ffa_fn_hvc(ffa_value_t args, ffa_value_t *res)
+{
+ arm_smccc_1_2_hvc(&args, res);
+}
+
+int __init ffa_transport_init(ffa_fn **invoke_ffa_fn)
+{
+ enum arm_smccc_conduit conduit;
+
+ if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
+ return -EOPNOTSUPP;
+
+ conduit = arm_smccc_1_1_get_conduit();
+ if (conduit == SMCCC_CONDUIT_NONE) {
+ pr_err("%s: invalid SMCCC conduit\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (conduit == SMCCC_CONDUIT_SMC)
+ *invoke_ffa_fn = __arm_ffa_fn_smc;
+ else
+ *invoke_ffa_fn = __arm_ffa_fn_hvc;
+
+ return 0;
+}
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 784cf0027da3..6c7e24935eca 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -104,11 +104,6 @@ static int scmi_dev_probe(struct device *dev)
{
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
- const struct scmi_device_id *id;
-
- id = scmi_dev_match_id(scmi_dev, scmi_drv);
- if (!id)
- return -ENODEV;
if (!scmi_dev->handle)
return -EPROBE_DEFER;
@@ -139,6 +134,9 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
{
int retval;
+ if (!driver->probe)
+ return -EINVAL;
+
retval = scmi_protocol_device_request(driver->id_table);
if (retval)
return retval;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 228bf4a71d23..8685619d38f9 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -331,7 +331,7 @@ struct scmi_desc {
};
extern const struct scmi_desc scmi_mailbox_desc;
-#ifdef CONFIG_HAVE_ARM_SMCCC
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
extern const struct scmi_desc scmi_smc_desc;
#endif
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 66eb3f0e5daf..9b2e8d42a992 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -47,7 +47,6 @@ enum scmi_error_codes {
SCMI_ERR_GENERIC = -8, /* Generic Error */
SCMI_ERR_HARDWARE = -9, /* Hardware Error */
SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
- SCMI_ERR_MAX
};
/* List of all SCMI devices active in system */
@@ -166,8 +165,10 @@ static const int scmi_linux_errmap[] = {
static inline int scmi_to_linux_errno(int errno)
{
- if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
- return scmi_linux_errmap[-errno];
+ int err_idx = -errno;
+
+ if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
+ return scmi_linux_errmap[err_idx];
return -EIO;
}
@@ -241,7 +242,6 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
xfer = &minfo->xfer_block[xfer_id];
xfer->hdr.seq = xfer_id;
- reinit_completion(&xfer->done);
xfer->transfer_id = atomic_inc_return(&transfer_last_id);
return xfer;
@@ -335,6 +335,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
return;
}
+ /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
+ if (msg_type == MSG_TYPE_DELAYED_RESP)
+ xfer->rx.len = info->desc->max_msg_size;
+
scmi_dump_header_dbg(dev, &xfer->hdr);
info->desc->ops->fetch_response(cinfo, xfer);
@@ -429,11 +433,12 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
struct scmi_chan_info *cinfo;
/*
- * Re-instate protocol id here from protocol handle so that cannot be
+ * Initialise protocol id now from protocol handle to avoid it being
* overridden by mistake (or malice) by the protocol code mangling with
- * the scmi_xfer structure.
+ * the scmi_xfer structure prior to this.
*/
xfer->hdr.protocol_id = pi->proto->id;
+ reinit_completion(&xfer->done);
cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
if (unlikely(!cinfo))
@@ -505,16 +510,17 @@ static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer)
{
int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
- const struct scmi_protocol_instance *pi = ph_to_pi(ph);
DECLARE_COMPLETION_ONSTACK(async_response);
- xfer->hdr.protocol_id = pi->proto->id;
-
xfer->async_done = &async_response;
ret = do_xfer(ph, xfer);
- if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
- ret = -ETIMEDOUT;
+ if (!ret) {
+ if (!wait_for_completion_timeout(xfer->async_done, timeout))
+ ret = -ETIMEDOUT;
+ else if (xfer->hdr.status)
+ ret = scmi_to_linux_errno(xfer->hdr.status);
+ }
xfer->async_done = NULL;
return ret;
@@ -561,7 +567,6 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph,
xfer->tx.len = tx_size;
xfer->rx.len = rx_size ? : info->desc->max_msg_size;
xfer->hdr.id = msg_id;
- xfer->hdr.protocol_id = pi->proto->id;
xfer->hdr.poll_completion = false;
*p = xfer;
@@ -1021,8 +1026,9 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
const struct scmi_desc *desc = sinfo->desc;
/* Pre-allocated messages, no more than what hdr.seq can support */
- if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
- dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
+ if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
+ dev_err(dev,
+ "Invalid maximum messages %d, not in range [1 - %lu]\n",
desc->max_msg, MSG_TOKEN_MAX);
return -EINVAL;
}
@@ -1133,6 +1139,8 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
* @proto_id and @name: if device was still not existent it is created as a
* child of the specified SCMI instance @info and its transport properly
* initialized as usual.
+ *
+ * Return: A properly initialized scmi device, NULL otherwise.
*/
static inline struct scmi_device *
scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
@@ -1567,7 +1575,9 @@ ATTRIBUTE_GROUPS(versions);
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
+#ifdef CONFIG_MAILBOX
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
+#endif
#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
#endif
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 4626404be541..e3dcb58314ae 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -69,6 +69,9 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
return -ENOMEM;
shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
+ if (!of_device_is_compatible(shmem, "arm,scmi-shmem"))
+ return -ENXIO;
+
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);
if (ret) {
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index d860bebd984a..0efd20cd9d69 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -1457,6 +1457,8 @@ static void scmi_devm_release_notifier(struct device *dev, void *res)
*
* Generic devres managed helper to register a notifier_block against a
* protocol event.
+ *
+ * Return: 0 on Success
*/
static int scmi_devm_notifier_register(struct scmi_device *sdev,
u8 proto_id, u8 evt_id,
@@ -1523,6 +1525,8 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
* Generic devres managed helper to explicitly un-register a notifier_block
* against a protocol event, which was previously registered using the above
* @scmi_devm_notifier_register.
+ *
+ * Return: 0 on Success
*/
static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
u8 proto_id, u8 evt_id,
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 9d36d5c0622d..4371fdcd5a73 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/scmi_protocol.h>
@@ -52,6 +53,27 @@ static int scmi_pd_power_off(struct generic_pm_domain *domain)
return scmi_pd_power(domain, false);
}
+static int scmi_pd_attach_dev(struct generic_pm_domain *pd, struct device *dev)
+{
+ int ret;
+
+ ret = pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ ret = of_pm_clk_add_clks(dev);
+ if (ret >= 0)
+ return 0;
+
+ pm_clk_destroy(dev);
+ return ret;
+}
+
+static void scmi_pd_detach_dev(struct generic_pm_domain *pd, struct device *dev)
+{
+ pm_clk_destroy(dev);
+}
+
static int scmi_pm_domain_probe(struct scmi_device *sdev)
{
int num_domains, i;
@@ -102,6 +124,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd->genpd.name = scmi_pd->name;
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
+ scmi_pd->genpd.attach_dev = scmi_pd_attach_dev;
+ scmi_pd->genpd.detach_dev = scmi_pd_detach_dev;
+ scmi_pd->genpd.flags = GENPD_FLAG_PM_CLK |
+ GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&scmi_pd->genpd, NULL,
state == SCMI_POWER_STATE_GENERIC_OFF);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 2c88aa221559..308471586381 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -166,7 +166,8 @@ struct scmi_msg_sensor_reading_get {
struct scmi_resp_sensor_reading_complete {
__le32 id;
- __le64 readings;
+ __le32 readings_low;
+ __le32 readings_high;
};
struct scmi_sensor_reading_resp {
@@ -717,7 +718,8 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
resp = t->rx.buf;
if (le32_to_cpu(resp->id) == sensor_id)
- *value = get_unaligned_le64(&resp->readings);
+ *value =
+ get_unaligned_le64(&resp->readings_low);
else
ret = -EPROTO;
}
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index fcbe2677f84b..bed5596c7209 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -76,6 +76,9 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
return -ENOMEM;
np = of_parse_phandle(cdev->of_node, "shmem", 0);
+ if (!of_device_is_compatible(np, "arm,scmi-shmem"))
+ return -ENXIO;
+
ret = of_address_to_resource(np, 0, &res);
of_node_put(np);
if (ret) {
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 4ceba5ef7895..ddf0b9ff9e15 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -899,6 +899,14 @@ static const struct of_device_id legacy_scpi_of_match[] = {
{},
};
+static const struct of_device_id shmem_of_match[] __maybe_unused = {
+ { .compatible = "amlogic,meson-gxbb-scp-shmem", },
+ { .compatible = "amlogic,meson-axg-scp-shmem", },
+ { .compatible = "arm,juno-scp-shmem", },
+ { .compatible = "arm,scp-shmem", },
+ { }
+};
+
static int scpi_probe(struct platform_device *pdev)
{
int count, idx, ret;
@@ -935,6 +943,9 @@ static int scpi_probe(struct platform_device *pdev)
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
+ if (!of_match_node(shmem_of_match, shmem))
+ return -ENXIO;
+
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);
if (ret) {
diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c
index ed10da5313e8..a5bf4c3f6dc7 100644
--- a/drivers/firmware/broadcom/tee_bnxt_fw.c
+++ b/drivers/firmware/broadcom/tee_bnxt_fw.c
@@ -212,10 +212,9 @@ static int tee_bnxt_fw_probe(struct device *dev)
pvt_data.dev = dev;
- fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
- TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ fw_shm_pool = tee_shm_alloc_kernel_buf(pvt_data.ctx, MAX_SHM_MEM_SZ);
if (IS_ERR(fw_shm_pool)) {
- dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
+ dev_err(pvt_data.dev, "tee_shm_alloc_kernel_buf failed\n");
err = PTR_ERR(fw_shm_pool);
goto out_sess;
}
@@ -242,6 +241,14 @@ static int tee_bnxt_fw_remove(struct device *dev)
return 0;
}
+static void tee_bnxt_fw_shutdown(struct device *dev)
+{
+ tee_shm_free(pvt_data.fw_shm_pool);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+ pvt_data.ctx = NULL;
+}
+
static const struct tee_client_device_id tee_bnxt_fw_id_table[] = {
{UUID_INIT(0x6272636D, 0x2019, 0x0716,
0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)},
@@ -257,6 +264,7 @@ static struct tee_client_driver tee_bnxt_fw_driver = {
.bus = &tee_bus_type,
.probe = tee_bnxt_fw_probe,
.remove = tee_bnxt_fw_remove,
+ .shutdown = tee_bnxt_fw_shutdown,
},
};
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index 10d4457417a4..eb9c65f97841 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -34,7 +34,6 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
break;
if (!adev->pnp.unique_id && node->acpi.uid == 0)
break;
- acpi_dev_put(adev);
}
if (!adev)
return -ENODEV;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 4b7ee3fa9224..847f33ffc4ae 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -896,6 +896,7 @@ static int __init efi_memreserve_map_root(void)
static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
{
struct resource *res, *parent;
+ int ret;
res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
if (!res)
@@ -908,7 +909,17 @@ static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
/* we expect a conflict with a 'System RAM' region */
parent = request_resource_conflict(&iomem_resource, res);
- return parent ? request_resource(parent, res) : 0;
+ ret = parent ? request_resource(parent, res) : 0;
+
+ /*
+ * Given that efi_mem_reserve_iomem() can be called at any
+ * time, only call memblock_reserve() if the architecture
+ * keeps the infrastructure around.
+ */
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
+ memblock_reserve(addr, size);
+
+ return ret;
}
int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 7bf0a7acae5e..2363fee9211c 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -35,15 +35,48 @@ efi_status_t check_platform_features(void)
}
/*
- * Although relocatable kernels can fix up the misalignment with respect to
- * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of
- * sync with those recorded in the vmlinux when kaslr is disabled but the
- * image required relocation anyway. Therefore retain 2M alignment unless
- * KASLR is in use.
+ * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
+ * to provide space, and fail to zero it). Check for this condition by double
+ * checking that the first and the last byte of the image are covered by the
+ * same EFI memory map entry.
*/
-static u64 min_kimg_align(void)
+static bool check_image_region(u64 base, u64 size)
{
- return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
+ unsigned long map_size, desc_size, buff_size;
+ efi_memory_desc_t *memory_map;
+ struct efi_boot_memmap map;
+ efi_status_t status;
+ bool ret = false;
+ int map_offset;
+
+ map.map = &memory_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = NULL;
+ map.key_ptr = NULL;
+ map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(&map);
+ if (status != EFI_SUCCESS)
+ return false;
+
+ for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+ efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
+
+ /*
+ * Find the region that covers base, and return whether
+ * it covers base+size bytes.
+ */
+ if (base >= md->phys_addr && base < end) {
+ ret = (base + size) <= end;
+ break;
+ }
+ }
+
+ efi_bs_call(free_pool, memory_map);
+
+ return ret;
}
efi_status_t handle_kernel_image(unsigned long *image_addr,
@@ -56,6 +89,16 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long kernel_size, kernel_memsize = 0;
u32 phys_seed = 0;
+ /*
+ * Although relocatable kernels can fix up the misalignment with
+ * respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are
+ * subtly out of sync with those recorded in the vmlinux when kaslr is
+ * disabled but the image required relocation anyway. Therefore retain
+ * 2M alignment if KASLR was explicitly disabled, even if it was not
+ * going to be activated to begin with.
+ */
+ u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
+
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
if (!efi_nokaslr) {
status = efi_get_random_bytes(sizeof(phys_seed),
@@ -76,6 +119,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
if (image->image_base != _text)
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
+ if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
+ efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
+ EFI_KIMG_ALIGN >> 10);
+
kernel_size = _edata - _text;
kernel_memsize = kernel_size + (_end - _edata);
*reserve_size = kernel_memsize;
@@ -85,14 +132,18 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
* If KASLR is enabled, and we have some randomness available,
* locate the kernel at a randomized offset in physical memory.
*/
- status = efi_random_alloc(*reserve_size, min_kimg_align(),
+ status = efi_random_alloc(*reserve_size, min_kimg_align,
reserve_addr, phys_seed);
+ if (status != EFI_SUCCESS)
+ efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
} else {
status = EFI_OUT_OF_RESOURCES;
}
if (status != EFI_SUCCESS) {
- if (IS_ALIGNED((u64)_text, min_kimg_align())) {
+ if (!check_image_region((u64)_text, kernel_memsize)) {
+ efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
+ } else if (IS_ALIGNED((u64)_text, min_kimg_align)) {
/*
* Just execute from wherever we were loaded by the
* UEFI PE/COFF loader if the alignment is suitable.
@@ -103,7 +154,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
}
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
- ULONG_MAX, min_kimg_align());
+ ULONG_MAX, min_kimg_align);
if (status != EFI_SUCCESS) {
efi_err("Failed to relocate kernel\n");
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index aa8da0a49829..ae87dded989d 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -630,8 +630,8 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
* @image: EFI loaded image protocol
* @load_addr: pointer to loaded initrd
* @load_size: size of loaded initrd
- * @soft_limit: preferred size of allocated memory for loading the initrd
- * @hard_limit: minimum size of allocated memory
+ * @soft_limit: preferred address for loading the initrd
+ * @hard_limit: upper limit address for loading the initrd
*
* Return: status code
*/
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index a408df474d83..724155b9e10d 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -30,6 +30,8 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
(u64)ULONG_MAX);
+ if (region_end < size)
+ return 0;
first_slot = round_up(md->phys_addr, align);
last_slot = round_down(region_end - size + 1, align);
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
index d8bc01340686..38722d2009e2 100644
--- a/drivers/firmware/efi/mokvar-table.c
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -180,7 +180,10 @@ void __init efi_mokvar_table_init(void)
pr_err("EFI MOKvar config table is not valid\n");
return;
}
- efi_mem_reserve(efi.mokvar_table, map_size_needed);
+
+ if (md.type == EFI_BOOT_SERVICES_DATA)
+ efi_mem_reserve(efi.mokvar_table, map_size_needed);
+
efi_mokvar_table_size = map_size_needed;
}
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index c1955d320fec..8f665678e9e3 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -62,9 +62,11 @@ int __init efi_tpm_eventlog_init(void)
tbl_size = sizeof(*log_tbl) + log_tbl->size;
memblock_reserve(efi.tpm_log, tbl_size);
- if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
- log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
- pr_warn(FW_BUG "TPM Final Events table missing or invalid\n");
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
+ pr_info("TPM Final Events table not present\n");
+ goto out;
+ } else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+ pr_warn(FW_BUG "TPM Final Events table invalid\n");
goto out;
}
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index bb6e77ee3898..adaa492c3d2d 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/panic_notifier.h>
#include <linux/ioctl.h>
#include <linux/acpi.h>
#include <linux/io.h>
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index e3da38e15c5b..cfb448eabdaa 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -296,7 +296,8 @@ static int get_set_conduit_method(struct device_node *np)
return 0;
}
-static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
+static int psci_sys_reset(struct notifier_block *nb, unsigned long action,
+ void *data)
{
if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
psci_system_reset2_supported) {
@@ -309,8 +310,15 @@ static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
} else {
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
}
+
+ return NOTIFY_DONE;
}
+static struct notifier_block psci_sys_reset_nb = {
+ .notifier_call = psci_sys_reset,
+ .priority = 129,
+};
+
static void psci_sys_poweroff(void)
{
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
@@ -477,7 +485,7 @@ static void __init psci_0_2_set_functions(void)
.migrate_info_type = psci_migrate_info_type,
};
- arm_pm_restart = psci_sys_reset;
+ register_restart_handler(&psci_sys_reset_nb);
pm_power_off = psci_sys_poweroff;
}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index ee9cb545e73b..47ea2bd42b10 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1281,6 +1281,9 @@ static const struct of_device_id qcom_scm_dt_match[] = {
SCM_HAS_BUS_CLK)
},
{ .compatible = "qcom,scm-ipq4019" },
+ { .compatible = "qcom,scm-mdm9607", .data = (void *)(SCM_HAS_CORE_CLK |
+ SCM_HAS_IFACE_CLK |
+ SCM_HAS_BUS_CLK) },
{ .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
{ .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
{ .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 3aa489dba30a..2a7687911c09 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1034,24 +1034,32 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
/* add svc client device(s) */
svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
- if (!svc)
- return -ENOMEM;
+ if (!svc) {
+ ret = -ENOMEM;
+ goto err_free_kfifo;
+ }
svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
if (!svc->stratix10_svc_rsu) {
dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_kfifo;
}
ret = platform_device_add(svc->stratix10_svc_rsu);
- if (ret) {
- platform_device_put(svc->stratix10_svc_rsu);
- return ret;
- }
+ if (ret)
+ goto err_put_device;
+
dev_set_drvdata(dev, svc);
pr_info("Intel Service Layer Driver Initialized\n");
+ return 0;
+
+err_put_device:
+ platform_device_put(svc->stratix10_svc_rsu);
+err_free_kfifo:
+ kfifo_free(&controller->svc_fifo);
return ret;
}
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
index 49c87e00fafb..620cf3fdd607 100644
--- a/drivers/firmware/tegra/Makefile
+++ b/drivers/firmware/tegra/Makefile
@@ -3,6 +3,7 @@ tegra-bpmp-y = bpmp.o
tegra-bpmp-$(CONFIG_ARCH_TEGRA_210_SOC) += bpmp-tegra210.o
tegra-bpmp-$(CONFIG_ARCH_TEGRA_186_SOC) += bpmp-tegra186.o
tegra-bpmp-$(CONFIG_ARCH_TEGRA_194_SOC) += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_234_SOC) += bpmp-tegra186.o
tegra-bpmp-$(CONFIG_DEBUG_FS) += bpmp-debugfs.o
obj-$(CONFIG_TEGRA_BPMP) += tegra-bpmp.o
obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp-private.h b/drivers/firmware/tegra/bpmp-private.h
index 54d560c48398..182bfe396516 100644
--- a/drivers/firmware/tegra/bpmp-private.h
+++ b/drivers/firmware/tegra/bpmp-private.h
@@ -24,7 +24,8 @@ struct tegra_bpmp_ops {
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
extern const struct tegra_bpmp_ops tegra186_bpmp_ops;
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c
index ae15940a078e..c32754055c60 100644
--- a/drivers/firmware/tegra/bpmp-tegra210.c
+++ b/drivers/firmware/tegra/bpmp-tegra210.c
@@ -210,7 +210,7 @@ static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
priv->tx_irq_data = irq_get_irq_data(err);
if (!priv->tx_irq_data) {
dev_err(&pdev->dev, "failed to get IRQ data for TX IRQ\n");
- return err;
+ return -ENOENT;
}
err = platform_get_irq_byname(pdev, "rx");
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 0742a90cb844..5654c5e9862b 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -809,7 +809,8 @@ static const struct dev_pm_ops tegra_bpmp_pm_ops = {
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
static const struct tegra_bpmp_soc tegra186_soc = {
.channels = {
.cpu_tx = {
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 62f0d1a5dd32..c2d34dc8ba46 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -147,11 +147,14 @@ MOX_ATTR_RO(pubkey, "%s\n", pubkey);
static int mox_get_status(enum mbox_cmd cmd, u32 retval)
{
- if (MBOX_STS_CMD(retval) != cmd ||
- MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS)
+ if (MBOX_STS_CMD(retval) != cmd)
return -EIO;
else if (MBOX_STS_ERROR(retval) == MBOX_STS_FAIL)
return -(int)MBOX_STS_VALUE(retval);
+ else if (MBOX_STS_ERROR(retval) == MBOX_STS_BADCMD)
+ return -ENOSYS;
+ else if (MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS)
+ return -EIO;
else
return MBOX_STS_VALUE(retval);
}
@@ -201,11 +204,14 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
return ret;
ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval);
- if (ret < 0 && ret != -ENODATA) {
- return ret;
- } else if (ret == -ENODATA) {
+ if (ret == -ENODATA) {
dev_warn(rwtm->dev,
"Board does not have manufacturing information burned!\n");
+ } else if (ret == -ENOSYS) {
+ dev_notice(rwtm->dev,
+ "Firmware does not support the BOARD_INFO command\n");
+ } else if (ret < 0) {
+ return ret;
} else {
rwtm->serial_number = reply->status[1];
rwtm->serial_number <<= 32;
@@ -234,10 +240,13 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
return ret;
ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval);
- if (ret < 0 && ret != -ENODATA) {
- return ret;
- } else if (ret == -ENODATA) {
+ if (ret == -ENODATA) {
dev_warn(rwtm->dev, "Board has no public key burned!\n");
+ } else if (ret == -ENOSYS) {
+ dev_notice(rwtm->dev,
+ "Firmware does not support the ECDSA_PUB_KEY command\n");
+ } else if (ret < 0) {
+ return ret;
} else {
u32 *s = reply->status;
@@ -251,6 +260,27 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
return 0;
}
+static int check_get_random_support(struct mox_rwtm *rwtm)
+{
+ struct armada_37xx_rwtm_tx_msg msg;
+ int ret;
+
+ msg.command = MBOX_CMD_GET_RANDOM;
+ msg.args[0] = 1;
+ msg.args[1] = rwtm->buf_phys;
+ msg.args[2] = 4;
+
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+ if (ret < 0)
+ return ret;
+
+ return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
+}
+
static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct mox_rwtm *rwtm = (struct mox_rwtm *) rng->priv;
@@ -488,6 +518,13 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (ret < 0)
dev_warn(dev, "Cannot read board information: %i\n", ret);
+ ret = check_get_random_support(rwtm);
+ if (ret < 0) {
+ dev_notice(dev,
+ "Firmware does not support the GET_RANDOM command\n");
+ goto free_channel;
+ }
+
rwtm->hwrng.name = DRIVER_NAME "_hwrng";
rwtm->hwrng.read = mox_hwrng_read;
rwtm->hwrng.priv = (unsigned long) rwtm;
@@ -505,6 +542,8 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
goto free_channel;
}
+ dev_info(dev, "HWRNG successfully registered\n");
+
return 0;
free_channel:
@@ -530,6 +569,7 @@ static int turris_mox_rwtm_remove(struct platform_device *pdev)
static const struct of_device_id turris_mox_rwtm_match[] = {
{ .compatible = "cznic,turris-mox-rwtm", },
+ { .compatible = "marvell,armada-3700-rwtm-firmware", },
{ },
};
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 33e15058d0dc..8cd454ee20c0 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -7,7 +7,7 @@ menuconfig FPGA
tristate "FPGA Configuration Framework"
help
Say Y here if you want support for configuring FPGAs from the
- kernel. The FPGA framework adds a FPGA manager class and FPGA
+ kernel. The FPGA framework adds an FPGA manager class and FPGA
manager drivers.
if FPGA
@@ -134,7 +134,7 @@ config FPGA_REGION
tristate "FPGA Region"
depends on FPGA_BRIDGE
help
- FPGA Region common code. A FPGA Region controls a FPGA Manager
+ FPGA Region common code. An FPGA Region controls an FPGA Manager
and the FPGA Bridges associated with either a reconfigurable
region of an FPGA or a whole FPGA.
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index 5b130c4d9882..dfdf21ed34c4 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -199,16 +199,6 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
}
EXPORT_SYMBOL_GPL(alt_pr_register);
-void alt_pr_unregister(struct device *dev)
-{
- struct fpga_manager *mgr = dev_get_drvdata(dev);
-
- dev_dbg(dev, "%s\n", __func__);
-
- fpga_mgr_unregister(mgr);
-}
-EXPORT_SYMBOL_GPL(alt_pr_unregister);
-
MODULE_AUTHOR("Matthew Gerlach <matthew.gerlach@linux.intel.com>");
MODULE_DESCRIPTION("Altera Partial Reconfiguration IP Core");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
index 4299145ef347..587c82be12f7 100644
--- a/drivers/fpga/dfl-fme-perf.c
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -953,6 +953,8 @@ static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
priv->cpu = target;
+ perf_pmu_migrate_context(&priv->pmu, cpu, target);
+
return 0;
}
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index e9266b2a357f..2bfb2ff86930 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -85,14 +85,14 @@ err_dev:
}
/**
- * of_fpga_bridge_get - get an exclusive reference to a fpga bridge
+ * of_fpga_bridge_get - get an exclusive reference to an fpga bridge
*
- * @np: node pointer of a FPGA bridge
+ * @np: node pointer of an FPGA bridge
* @info: fpga image specific information
*
* Return fpga_bridge struct if successful.
* Return -EBUSY if someone already has a reference to the bridge.
- * Return -ENODEV if @np is not a FPGA Bridge.
+ * Return -ENODEV if @np is not an FPGA Bridge.
*/
struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
struct fpga_image_info *info)
@@ -113,11 +113,11 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
}
/**
- * fpga_bridge_get - get an exclusive reference to a fpga bridge
+ * fpga_bridge_get - get an exclusive reference to an fpga bridge
* @dev: parent device that fpga bridge was registered with
* @info: fpga manager info
*
- * Given a device, get an exclusive reference to a fpga bridge.
+ * Given a device, get an exclusive reference to an fpga bridge.
*
* Return: fpga bridge struct or IS_ERR() condition containing error code.
*/
@@ -224,7 +224,7 @@ EXPORT_SYMBOL_GPL(fpga_bridges_put);
/**
* of_fpga_bridge_get_to_list - get a bridge, add it to a list
*
- * @np: node pointer of a FPGA bridge
+ * @np: node pointer of an FPGA bridge
* @info: fpga image specific information
* @bridge_list: list of FPGA bridges
*
@@ -313,7 +313,7 @@ ATTRIBUTE_GROUPS(fpga_bridge);
/**
* fpga_bridge_create - create and initialize a struct fpga_bridge
- * @dev: FPGA bridge device from pdev
+ * @parent: FPGA bridge device from pdev
* @name: FPGA bridge name
* @br_ops: pointer to structure of fpga bridge ops
* @priv: FPGA bridge private data
@@ -323,7 +323,7 @@ ATTRIBUTE_GROUPS(fpga_bridge);
*
* Return: struct fpga_bridge or NULL
*/
-struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
+struct fpga_bridge *fpga_bridge_create(struct device *parent, const char *name,
const struct fpga_bridge_ops *br_ops,
void *priv)
{
@@ -331,7 +331,7 @@ struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
int id, ret;
if (!name || !strlen(name)) {
- dev_err(dev, "Attempt to register with no name!\n");
+ dev_err(parent, "Attempt to register with no name!\n");
return NULL;
}
@@ -353,8 +353,8 @@ struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
device_initialize(&bridge->dev);
bridge->dev.groups = br_ops->groups;
bridge->dev.class = fpga_bridge_class;
- bridge->dev.parent = dev;
- bridge->dev.of_node = dev->of_node;
+ bridge->dev.parent = parent;
+ bridge->dev.of_node = parent->of_node;
bridge->dev.id = id;
ret = dev_set_name(&bridge->dev, "br%d", id);
@@ -373,7 +373,7 @@ error_kfree:
EXPORT_SYMBOL_GPL(fpga_bridge_create);
/**
- * fpga_bridge_free - free a fpga bridge created by fpga_bridge_create()
+ * fpga_bridge_free - free an fpga bridge created by fpga_bridge_create()
* @bridge: FPGA bridge struct
*/
void fpga_bridge_free(struct fpga_bridge *bridge)
@@ -392,12 +392,12 @@ static void devm_fpga_bridge_release(struct device *dev, void *res)
/**
* devm_fpga_bridge_create - create and init a managed struct fpga_bridge
- * @dev: FPGA bridge device from pdev
+ * @parent: FPGA bridge device from pdev
* @name: FPGA bridge name
* @br_ops: pointer to structure of fpga bridge ops
* @priv: FPGA bridge private data
*
- * This function is intended for use in a FPGA bridge driver's probe function.
+ * This function is intended for use in an FPGA bridge driver's probe function.
* After the bridge driver creates the struct with devm_fpga_bridge_create(), it
* should register the bridge with fpga_bridge_register(). The bridge driver's
* remove function should call fpga_bridge_unregister(). The bridge struct
@@ -408,7 +408,7 @@ static void devm_fpga_bridge_release(struct device *dev, void *res)
* Return: struct fpga_bridge or NULL
*/
struct fpga_bridge
-*devm_fpga_bridge_create(struct device *dev, const char *name,
+*devm_fpga_bridge_create(struct device *parent, const char *name,
const struct fpga_bridge_ops *br_ops, void *priv)
{
struct fpga_bridge **ptr, *bridge;
@@ -417,12 +417,12 @@ struct fpga_bridge
if (!ptr)
return NULL;
- bridge = fpga_bridge_create(dev, name, br_ops, priv);
+ bridge = fpga_bridge_create(parent, name, br_ops, priv);
if (!bridge) {
devres_free(ptr);
} else {
*ptr = bridge;
- devres_add(dev, ptr);
+ devres_add(parent, ptr);
}
return bridge;
@@ -430,7 +430,7 @@ struct fpga_bridge
EXPORT_SYMBOL_GPL(devm_fpga_bridge_create);
/**
- * fpga_bridge_register - register a FPGA bridge
+ * fpga_bridge_register - register an FPGA bridge
*
* @bridge: FPGA bridge struct
*
@@ -454,11 +454,11 @@ int fpga_bridge_register(struct fpga_bridge *bridge)
EXPORT_SYMBOL_GPL(fpga_bridge_register);
/**
- * fpga_bridge_unregister - unregister a FPGA bridge
+ * fpga_bridge_unregister - unregister an FPGA bridge
*
* @bridge: FPGA bridge struct
*
- * This function is intended for use in a FPGA bridge driver's remove function.
+ * This function is intended for use in an FPGA bridge driver's remove function.
*/
void fpga_bridge_unregister(struct fpga_bridge *bridge)
{
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index b85bc47c91a9..ecb4c3c795fa 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -26,7 +26,7 @@ struct fpga_mgr_devres {
};
/**
- * fpga_image_info_alloc - Allocate a FPGA image info struct
+ * fpga_image_info_alloc - Allocate an FPGA image info struct
* @dev: owning device
*
* Return: struct fpga_image_info or NULL
@@ -50,7 +50,7 @@ struct fpga_image_info *fpga_image_info_alloc(struct device *dev)
EXPORT_SYMBOL_GPL(fpga_image_info_alloc);
/**
- * fpga_image_info_free - Free a FPGA image info struct
+ * fpga_image_info_free - Free an FPGA image info struct
* @info: FPGA image info struct to free
*/
void fpga_image_info_free(struct fpga_image_info *info)
@@ -470,7 +470,7 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data)
}
/**
- * fpga_mgr_get - Given a device, get a reference to a fpga mgr.
+ * fpga_mgr_get - Given a device, get a reference to an fpga mgr.
* @dev: parent device that fpga mgr was registered with
*
* Return: fpga manager struct or IS_ERR() condition containing error code.
@@ -487,7 +487,7 @@ struct fpga_manager *fpga_mgr_get(struct device *dev)
EXPORT_SYMBOL_GPL(fpga_mgr_get);
/**
- * of_fpga_mgr_get - Given a device node, get a reference to a fpga mgr.
+ * of_fpga_mgr_get - Given a device node, get a reference to an fpga mgr.
*
* @node: device node
*
@@ -506,7 +506,7 @@ struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
/**
- * fpga_mgr_put - release a reference to a fpga manager
+ * fpga_mgr_put - release a reference to an fpga manager
* @mgr: fpga manager structure
*/
void fpga_mgr_put(struct fpga_manager *mgr)
@@ -550,8 +550,8 @@ void fpga_mgr_unlock(struct fpga_manager *mgr)
EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
/**
- * fpga_mgr_create - create and initialize a FPGA manager struct
- * @dev: fpga manager device from pdev
+ * fpga_mgr_create - create and initialize an FPGA manager struct
+ * @parent: fpga manager device from pdev
* @name: fpga manager name
* @mops: pointer to structure of fpga manager ops
* @priv: fpga manager private data
@@ -561,7 +561,7 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
*
* Return: pointer to struct fpga_manager or NULL
*/
-struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
+struct fpga_manager *fpga_mgr_create(struct device *parent, const char *name,
const struct fpga_manager_ops *mops,
void *priv)
{
@@ -571,12 +571,12 @@ struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
if (!mops || !mops->write_complete || !mops->state ||
!mops->write_init || (!mops->write && !mops->write_sg) ||
(mops->write && mops->write_sg)) {
- dev_err(dev, "Attempt to register without fpga_manager_ops\n");
+ dev_err(parent, "Attempt to register without fpga_manager_ops\n");
return NULL;
}
if (!name || !strlen(name)) {
- dev_err(dev, "Attempt to register with no name!\n");
+ dev_err(parent, "Attempt to register with no name!\n");
return NULL;
}
@@ -597,8 +597,8 @@ struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
device_initialize(&mgr->dev);
mgr->dev.class = fpga_mgr_class;
mgr->dev.groups = mops->groups;
- mgr->dev.parent = dev;
- mgr->dev.of_node = dev->of_node;
+ mgr->dev.parent = parent;
+ mgr->dev.of_node = parent->of_node;
mgr->dev.id = id;
ret = dev_set_name(&mgr->dev, "fpga%d", id);
@@ -617,7 +617,7 @@ error_kfree:
EXPORT_SYMBOL_GPL(fpga_mgr_create);
/**
- * fpga_mgr_free - free a FPGA manager created with fpga_mgr_create()
+ * fpga_mgr_free - free an FPGA manager created with fpga_mgr_create()
* @mgr: fpga manager struct
*/
void fpga_mgr_free(struct fpga_manager *mgr)
@@ -636,12 +636,12 @@ static void devm_fpga_mgr_release(struct device *dev, void *res)
/**
* devm_fpga_mgr_create - create and initialize a managed FPGA manager struct
- * @dev: fpga manager device from pdev
+ * @parent: fpga manager device from pdev
* @name: fpga manager name
* @mops: pointer to structure of fpga manager ops
* @priv: fpga manager private data
*
- * This function is intended for use in a FPGA manager driver's probe function.
+ * This function is intended for use in an FPGA manager driver's probe function.
* After the manager driver creates the manager struct with
* devm_fpga_mgr_create(), it should register it with fpga_mgr_register(). The
* manager driver's remove function should call fpga_mgr_unregister(). The
@@ -651,7 +651,7 @@ static void devm_fpga_mgr_release(struct device *dev, void *res)
*
* Return: pointer to struct fpga_manager or NULL
*/
-struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
+struct fpga_manager *devm_fpga_mgr_create(struct device *parent, const char *name,
const struct fpga_manager_ops *mops,
void *priv)
{
@@ -661,20 +661,20 @@ struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
if (!dr)
return NULL;
- dr->mgr = fpga_mgr_create(dev, name, mops, priv);
+ dr->mgr = fpga_mgr_create(parent, name, mops, priv);
if (!dr->mgr) {
devres_free(dr);
return NULL;
}
- devres_add(dev, dr);
+ devres_add(parent, dr);
return dr->mgr;
}
EXPORT_SYMBOL_GPL(devm_fpga_mgr_create);
/**
- * fpga_mgr_register - register a FPGA manager
+ * fpga_mgr_register - register an FPGA manager
* @mgr: fpga manager struct
*
* Return: 0 on success, negative error code otherwise.
@@ -706,10 +706,10 @@ error_device:
EXPORT_SYMBOL_GPL(fpga_mgr_register);
/**
- * fpga_mgr_unregister - unregister a FPGA manager
+ * fpga_mgr_unregister - unregister an FPGA manager
* @mgr: fpga manager struct
*
- * This function is intended for use in a FPGA manager driver's remove function.
+ * This function is intended for use in an FPGA manager driver's remove function.
*/
void fpga_mgr_unregister(struct fpga_manager *mgr)
{
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index c3134b89c3fe..a4838715221f 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -33,14 +33,14 @@ struct fpga_region *fpga_region_class_find(
EXPORT_SYMBOL_GPL(fpga_region_class_find);
/**
- * fpga_region_get - get an exclusive reference to a fpga region
+ * fpga_region_get - get an exclusive reference to an fpga region
* @region: FPGA Region struct
*
* Caller should call fpga_region_put() when done with region.
*
* Return fpga_region struct if successful.
* Return -EBUSY if someone already has a reference to the region.
- * Return -ENODEV if @np is not a FPGA Region.
+ * Return -ENODEV if @np is not an FPGA Region.
*/
static struct fpga_region *fpga_region_get(struct fpga_region *region)
{
@@ -181,7 +181,7 @@ ATTRIBUTE_GROUPS(fpga_region);
/**
* fpga_region_create - alloc and init a struct fpga_region
- * @dev: device parent
+ * @parent: device parent
* @mgr: manager that programs this region
* @get_bridges: optional function to get bridges to a list
*
@@ -192,7 +192,7 @@ ATTRIBUTE_GROUPS(fpga_region);
* Return: struct fpga_region or NULL
*/
struct fpga_region
-*fpga_region_create(struct device *dev,
+*fpga_region_create(struct device *parent,
struct fpga_manager *mgr,
int (*get_bridges)(struct fpga_region *))
{
@@ -214,8 +214,8 @@ struct fpga_region
device_initialize(&region->dev);
region->dev.class = fpga_region_class;
- region->dev.parent = dev;
- region->dev.of_node = dev->of_node;
+ region->dev.parent = parent;
+ region->dev.of_node = parent->of_node;
region->dev.id = id;
ret = dev_set_name(&region->dev, "region%d", id);
@@ -234,7 +234,7 @@ err_free:
EXPORT_SYMBOL_GPL(fpga_region_create);
/**
- * fpga_region_free - free a FPGA region created by fpga_region_create()
+ * fpga_region_free - free an FPGA region created by fpga_region_create()
* @region: FPGA region
*/
void fpga_region_free(struct fpga_region *region)
@@ -253,11 +253,11 @@ static void devm_fpga_region_release(struct device *dev, void *res)
/**
* devm_fpga_region_create - create and initialize a managed FPGA region struct
- * @dev: device parent
+ * @parent: device parent
* @mgr: manager that programs this region
* @get_bridges: optional function to get bridges to a list
*
- * This function is intended for use in a FPGA region driver's probe function.
+ * This function is intended for use in an FPGA region driver's probe function.
* After the region driver creates the region struct with
* devm_fpga_region_create(), it should register it with fpga_region_register().
* The region driver's remove function should call fpga_region_unregister().
@@ -268,7 +268,7 @@ static void devm_fpga_region_release(struct device *dev, void *res)
* Return: struct fpga_region or NULL
*/
struct fpga_region
-*devm_fpga_region_create(struct device *dev,
+*devm_fpga_region_create(struct device *parent,
struct fpga_manager *mgr,
int (*get_bridges)(struct fpga_region *))
{
@@ -278,12 +278,12 @@ struct fpga_region
if (!ptr)
return NULL;
- region = fpga_region_create(dev, mgr, get_bridges);
+ region = fpga_region_create(parent, mgr, get_bridges);
if (!region) {
devres_free(ptr);
} else {
*ptr = region;
- devres_add(dev, ptr);
+ devres_add(parent, ptr);
}
return region;
@@ -291,7 +291,7 @@ struct fpga_region
EXPORT_SYMBOL_GPL(devm_fpga_region_create);
/**
- * fpga_region_register - register a FPGA region
+ * fpga_region_register - register an FPGA region
* @region: FPGA region
*
* Return: 0 or -errno
@@ -303,10 +303,10 @@ int fpga_region_register(struct fpga_region *region)
EXPORT_SYMBOL_GPL(fpga_region_register);
/**
- * fpga_region_unregister - unregister a FPGA region
+ * fpga_region_unregister - unregister an FPGA region
* @region: FPGA region
*
- * This function is intended for use in a FPGA region driver's remove function.
+ * This function is intended for use in an FPGA region driver's remove function.
*/
void fpga_region_unregister(struct fpga_region *region)
{
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index 114a64d2b7a4..1afb41aa20d7 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -374,11 +374,13 @@ static int machxo2_spi_probe(struct spi_device *spi)
return devm_fpga_mgr_register(dev, mgr);
}
+#ifdef CONFIG_OF
static const struct of_device_id of_match[] = {
{ .compatible = "lattice,machxo2-slave-spi", },
{}
};
MODULE_DEVICE_TABLE(of, of_match);
+#endif
static const struct spi_device_id lattice_ids[] = {
{ "machxo2-slave-spi", 0 },
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index e405309baadc..e3c25576b6b9 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -181,7 +181,7 @@ static int child_regions_with_firmware(struct device_node *overlay)
* @region: FPGA region
* @overlay: overlay applied to the FPGA region
*
- * Given an overlay applied to a FPGA region, parse the FPGA image specific
+ * Given an overlay applied to an FPGA region, parse the FPGA image specific
* info in the overlay and do some checking.
*
* Returns:
@@ -273,7 +273,7 @@ ret_no_info:
* @region: FPGA region that the overlay was applied to
* @nd: overlay notification data
*
- * Called when an overlay targeted to a FPGA Region is about to be applied.
+ * Called when an overlay targeted to an FPGA Region is about to be applied.
* Parses the overlay for properties that influence how the FPGA will be
* programmed and does some checking. If the checks pass, programs the FPGA.
* If the checks fail, overlay is rejected and does not get added to the
@@ -336,8 +336,8 @@ static void of_fpga_region_notify_post_remove(struct fpga_region *region,
* @action: notifier action
* @arg: reconfig data
*
- * This notifier handles programming a FPGA when a "firmware-name" property is
- * added to a fpga-region.
+ * This notifier handles programming an FPGA when a "firmware-name" property is
+ * added to an fpga-region.
*
* Returns NOTIFY_OK or error if FPGA programming fails.
*/
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 657a70c5fc99..a2cea500f7cc 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -271,7 +271,7 @@ static int s10_send_buf(struct fpga_manager *mgr, const char *buf, size_t count)
}
/*
- * Send a FPGA image to privileged layers to write to the FPGA. When done
+ * Send an FPGA image to privileged layers to write to the FPGA. When done
* sending, free all service layer buffers we allocated in write_init.
*/
static int s10_ops_write(struct fpga_manager *mgr, const char *buf,
@@ -454,6 +454,7 @@ static int s10_remove(struct platform_device *pdev)
struct s10_priv *priv = mgr->priv;
fpga_mgr_unregister(mgr);
+ fpga_mgr_free(mgr);
stratix10_svc_free_channel(priv->chan);
return 0;
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 4e60e84cd17a..59ddc9fd5bca 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -724,7 +724,7 @@ static ssize_t cfam_read(struct file *filep, char __user *buf, size_t count,
rc = count;
fail:
*offset = off;
- return count;
+ return rc;
}
static ssize_t cfam_write(struct file *filep, const char __user *buf,
@@ -761,7 +761,7 @@ static ssize_t cfam_write(struct file *filep, const char __user *buf,
rc = count;
fail:
*offset = off;
- return count;
+ return rc;
}
static loff_t cfam_llseek(struct file *file, loff_t offset, int whence)
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
index 90dbe58ca1ed..8606e55c1721 100644
--- a/drivers/fsi/fsi-master-aspeed.c
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -92,7 +92,7 @@ static const u32 fsi_base = 0xa0000000;
static u16 aspeed_fsi_divisor = FSI_DIVISOR_DEFAULT;
module_param_named(bus_div,aspeed_fsi_divisor, ushort, 0);
-#define OPB_POLL_TIMEOUT 10000
+#define OPB_POLL_TIMEOUT 500
static int __opb_write(struct fsi_master_aspeed *aspeed, u32 addr,
u32 val, u32 transfer_size)
@@ -101,11 +101,15 @@ static int __opb_write(struct fsi_master_aspeed *aspeed, u32 addr,
u32 reg, status;
int ret;
- writel(CMD_WRITE, base + OPB0_RW);
- writel(transfer_size, base + OPB0_XFER_SIZE);
- writel(addr, base + OPB0_FSI_ADDR);
- writel(val, base + OPB0_FSI_DATA_W);
- writel(0x1, base + OPB_IRQ_CLEAR);
+ /*
+ * The ordering of these writes up until the trigger
+ * write does not matter, so use writel_relaxed.
+ */
+ writel_relaxed(CMD_WRITE, base + OPB0_RW);
+ writel_relaxed(transfer_size, base + OPB0_XFER_SIZE);
+ writel_relaxed(addr, base + OPB0_FSI_ADDR);
+ writel_relaxed(val, base + OPB0_FSI_DATA_W);
+ writel_relaxed(0x1, base + OPB_IRQ_CLEAR);
writel(0x1, base + OPB_TRIGGER);
ret = readl_poll_timeout(base + OPB_IRQ_STATUS, reg,
@@ -149,10 +153,14 @@ static int __opb_read(struct fsi_master_aspeed *aspeed, uint32_t addr,
u32 result, reg;
int status, ret;
- writel(CMD_READ, base + OPB0_RW);
- writel(transfer_size, base + OPB0_XFER_SIZE);
- writel(addr, base + OPB0_FSI_ADDR);
- writel(0x1, base + OPB_IRQ_CLEAR);
+ /*
+ * The ordering of these writes up until the trigger
+ * write does not matter, so use writel_relaxed.
+ */
+ writel_relaxed(CMD_READ, base + OPB0_RW);
+ writel_relaxed(transfer_size, base + OPB0_XFER_SIZE);
+ writel_relaxed(addr, base + OPB0_FSI_ADDR);
+ writel_relaxed(0x1, base + OPB_IRQ_CLEAR);
writel(0x1, base + OPB_TRIGGER);
ret = readl_poll_timeout(base + OPB_IRQ_STATUS, reg,
@@ -525,7 +533,6 @@ static int tacoma_cabled_fsi_fixup(struct device *dev)
static int fsi_master_aspeed_probe(struct platform_device *pdev)
{
struct fsi_master_aspeed *aspeed;
- struct resource *res;
int rc, links, reg;
__be32 raw;
@@ -541,8 +548,7 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
aspeed->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- aspeed->base = devm_ioremap_resource(&pdev->dev, res);
+ aspeed->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(aspeed->base))
return PTR_ERR(aspeed->base);
@@ -645,6 +651,7 @@ static const struct of_device_id fsi_master_aspeed_match[] = {
{ .compatible = "aspeed,ast2600-fsi-master" },
{ },
};
+MODULE_DEVICE_TABLE(of, fsi_master_aspeed_match);
static struct platform_driver fsi_master_aspeed_driver = {
.driver = {
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
index 57a779a89b07..24292acdbaf8 100644
--- a/drivers/fsi/fsi-master-ast-cf.c
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -1309,7 +1309,6 @@ static int fsi_master_acf_probe(struct platform_device *pdev)
master->cf_mem = devm_ioremap_resource(&pdev->dev, &res);
if (IS_ERR(master->cf_mem)) {
rc = PTR_ERR(master->cf_mem);
- dev_err(&pdev->dev, "Error %d mapping coldfire memory\n", rc);
goto err_free;
}
dev_dbg(&pdev->dev, "DRAM allocation @%x\n", master->cf_mem_addr);
@@ -1427,6 +1426,7 @@ static const struct of_device_id fsi_master_acf_match[] = {
{ .compatible = "aspeed,ast2500-cf-fsi-master" },
{ },
};
+MODULE_DEVICE_TABLE(of, fsi_master_acf_match);
static struct platform_driver fsi_master_acf = {
.driver = {
diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
index aa97c4a250cb..7d5f29b4b595 100644
--- a/drivers/fsi/fsi-master-gpio.c
+++ b/drivers/fsi/fsi-master-gpio.c
@@ -882,6 +882,7 @@ static const struct of_device_id fsi_master_gpio_match[] = {
{ .compatible = "fsi-master-gpio" },
{ },
};
+MODULE_DEVICE_TABLE(of, fsi_master_gpio_match);
static struct platform_driver fsi_master_gpio_driver = {
.driver = {
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index 10ca2e290655..b223f0ef337b 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -223,7 +223,8 @@ static const struct file_operations occ_fops = {
.release = occ_release,
};
-static int occ_verify_checksum(struct occ_response *resp, u16 data_length)
+static int occ_verify_checksum(struct occ *occ, struct occ_response *resp,
+ u16 data_length)
{
/* Fetch the two bytes after the data for the checksum. */
u16 checksum_resp = get_unaligned_be16(&resp->data[data_length]);
@@ -238,8 +239,11 @@ static int occ_verify_checksum(struct occ_response *resp, u16 data_length)
for (i = 0; i < data_length; ++i)
checksum += resp->data[i];
- if (checksum != checksum_resp)
+ if (checksum != checksum_resp) {
+ dev_err(occ->dev, "Bad checksum: %04x!=%04x\n", checksum,
+ checksum_resp);
return -EBADMSG;
+ }
return 0;
}
@@ -495,6 +499,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
goto done;
if (resp->return_status == OCC_RESP_CMD_IN_PRG ||
+ resp->return_status == OCC_RESP_CRIT_INIT ||
resp->seq_no != seq_no) {
rc = -ETIMEDOUT;
@@ -532,7 +537,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
}
*resp_len = resp_data_length + 7;
- rc = occ_verify_checksum(resp, resp_data_length);
+ rc = occ_verify_checksum(occ, resp, resp_data_length);
done:
mutex_unlock(&occ->occ_lock);
@@ -635,6 +640,7 @@ static const struct of_device_id occ_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, occ_match);
static struct platform_driver occ_driver = {
.driver = {
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
index bfd5e5da8020..84cb965bfed5 100644
--- a/drivers/fsi/fsi-sbefifo.c
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -325,7 +325,8 @@ static int sbefifo_up_write(struct sbefifo *sbefifo, __be32 word)
static int sbefifo_request_reset(struct sbefifo *sbefifo)
{
struct device *dev = &sbefifo->fsi_dev->dev;
- u32 status, timeout;
+ unsigned long end_time;
+ u32 status;
int rc;
dev_dbg(dev, "Requesting FIFO reset\n");
@@ -341,7 +342,8 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
}
/* Wait for it to complete */
- for (timeout = 0; timeout < SBEFIFO_RESET_TIMEOUT; timeout++) {
+ end_time = jiffies + msecs_to_jiffies(SBEFIFO_RESET_TIMEOUT);
+ while (!time_after(jiffies, end_time)) {
rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &status);
if (rc) {
dev_err(dev, "Failed to read UP fifo status during reset"
@@ -355,7 +357,7 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
return 0;
}
- msleep(1);
+ cond_resched();
}
dev_err(dev, "FIFO reset timed out\n");
@@ -400,7 +402,7 @@ static int sbefifo_cleanup_hw(struct sbefifo *sbefifo)
/* The FIFO already contains a reset request from the SBE ? */
if (down_status & SBEFIFO_STS_RESET_REQ) {
dev_info(dev, "Cleanup: FIFO reset request set, resetting\n");
- rc = sbefifo_regw(sbefifo, SBEFIFO_UP, SBEFIFO_PERFORM_RESET);
+ rc = sbefifo_regw(sbefifo, SBEFIFO_DOWN, SBEFIFO_PERFORM_RESET);
if (rc) {
sbefifo->broken = true;
dev_err(dev, "Cleanup: Reset reg write failed, rc=%d\n", rc);
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index b45bfab7b7f5..da1486bb6a14 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -38,9 +38,10 @@
#define SCOM_STATUS_PIB_RESP_MASK 0x00007000
#define SCOM_STATUS_PIB_RESP_SHIFT 12
-#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \
- SCOM_STATUS_PARITY | \
- SCOM_STATUS_PIB_ABORT | \
+#define SCOM_STATUS_FSI2PIB_ERROR (SCOM_STATUS_PROTECTION | \
+ SCOM_STATUS_PARITY | \
+ SCOM_STATUS_PIB_ABORT)
+#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_FSI2PIB_ERROR | \
SCOM_STATUS_PIB_RESP_MASK)
/* SCOM address encodings */
#define XSCOM_ADDR_IND_FLAG BIT_ULL(63)
@@ -60,7 +61,6 @@
#define XSCOM_ADDR_FORM1_HI_SHIFT 20
/* Retries */
-#define SCOM_MAX_RETRIES 100 /* Retries on busy */
#define SCOM_MAX_IND_RETRIES 10 /* Retries indirect not ready */
struct scom_device {
@@ -240,14 +240,15 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
{
uint32_t dummy = -1;
- if (status & SCOM_STATUS_PROTECTION)
- return -EPERM;
- if (status & SCOM_STATUS_PARITY) {
+ if (status & SCOM_STATUS_FSI2PIB_ERROR)
fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
sizeof(uint32_t));
+
+ if (status & SCOM_STATUS_PROTECTION)
+ return -EPERM;
+ if (status & SCOM_STATUS_PARITY)
return -EIO;
- }
- /* Return -EBUSY on PIB abort to force a retry */
+
if (status & SCOM_STATUS_PIB_ABORT)
return -EBUSY;
return 0;
@@ -284,69 +285,39 @@ static int handle_pib_status(struct scom_device *scom, uint8_t status)
static int put_scom(struct scom_device *scom, uint64_t value,
uint64_t addr)
{
- uint32_t status, dummy = -1;
- int rc, retries;
-
- for (retries = 0; retries < SCOM_MAX_RETRIES; retries++) {
- rc = raw_put_scom(scom, value, addr, &status);
- if (rc) {
- /* Try resetting the bridge if FSI fails */
- if (rc != -ENODEV && retries == 0) {
- fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG,
- &dummy, sizeof(uint32_t));
- rc = -EBUSY;
- } else
- return rc;
- } else
- rc = handle_fsi2pib_status(scom, status);
- if (rc && rc != -EBUSY)
- break;
- if (rc == 0) {
- rc = handle_pib_status(scom,
- (status & SCOM_STATUS_PIB_RESP_MASK)
- >> SCOM_STATUS_PIB_RESP_SHIFT);
- if (rc && rc != -EBUSY)
- break;
- }
- if (rc == 0)
- break;
- msleep(1);
- }
- return rc;
+ uint32_t status;
+ int rc;
+
+ rc = raw_put_scom(scom, value, addr, &status);
+ if (rc == -ENODEV)
+ return rc;
+
+ rc = handle_fsi2pib_status(scom, status);
+ if (rc)
+ return rc;
+
+ return handle_pib_status(scom,
+ (status & SCOM_STATUS_PIB_RESP_MASK)
+ >> SCOM_STATUS_PIB_RESP_SHIFT);
}
static int get_scom(struct scom_device *scom, uint64_t *value,
uint64_t addr)
{
- uint32_t status, dummy = -1;
- int rc, retries;
-
- for (retries = 0; retries < SCOM_MAX_RETRIES; retries++) {
- rc = raw_get_scom(scom, value, addr, &status);
- if (rc) {
- /* Try resetting the bridge if FSI fails */
- if (rc != -ENODEV && retries == 0) {
- fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG,
- &dummy, sizeof(uint32_t));
- rc = -EBUSY;
- } else
- return rc;
- } else
- rc = handle_fsi2pib_status(scom, status);
- if (rc && rc != -EBUSY)
- break;
- if (rc == 0) {
- rc = handle_pib_status(scom,
- (status & SCOM_STATUS_PIB_RESP_MASK)
- >> SCOM_STATUS_PIB_RESP_SHIFT);
- if (rc && rc != -EBUSY)
- break;
- }
- if (rc == 0)
- break;
- msleep(1);
- }
- return rc;
+ uint32_t status;
+ int rc;
+
+ rc = raw_get_scom(scom, value, addr, &status);
+ if (rc == -ENODEV)
+ return rc;
+
+ rc = handle_fsi2pib_status(scom, status);
+ if (rc)
+ return rc;
+
+ return handle_pib_status(scom,
+ (status & SCOM_STATUS_PIB_RESP_MASK)
+ >> SCOM_STATUS_PIB_RESP_SHIFT);
}
static ssize_t scom_read(struct file *filep, char __user *buf, size_t len,
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 63b84ba161dc..fab571016adf 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -782,6 +782,18 @@ config GPIO_MSC313
Say Y here to support the main GPIO block on MStar/SigmaStar
ARMv7 based SoCs.
+config GPIO_IDT3243X
+ tristate "IDT 79RC3243X GPIO support"
+ depends on MIKROTIK_RB532 || COMPILE_TEST
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Select this option to enable GPIO driver for
+ IDT 79RC3243X based devices like Mikrotik RB532.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-idt3243x.
+
endmenu
menu "Port-mapped I/O GPIO drivers"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index d7c81e1611a4..32a32659866a 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_GPIO_HISI) += gpio-hisi.o
obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
+obj-$(CONFIG_GPIO_IDT3243X) += gpio-idt3243x.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 50ad0280fd78..55b40299ebfa 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -44,11 +44,12 @@ struct idio_16_gpio {
struct gpio_chip chip;
raw_spinlock_t lock;
unsigned long irq_mask;
- unsigned base;
- unsigned out_state;
+ unsigned int base;
+ unsigned int out_state;
};
-static int idio_16_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+static int idio_16_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
{
if (offset > 15)
return GPIO_LINE_DIRECTION_IN;
@@ -56,22 +57,23 @@ static int idio_16_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
return GPIO_LINE_DIRECTION_OUT;
}
-static int idio_16_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int idio_16_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
{
return 0;
}
static int idio_16_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
+ unsigned int offset, int value)
{
chip->set(chip, offset, value);
return 0;
}
-static int idio_16_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int idio_16_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned mask = BIT(offset-16);
+ const unsigned int mask = BIT(offset-16);
if (offset < 16)
return -EINVAL;
@@ -96,10 +98,11 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
return 0;
}
-static void idio_16_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void idio_16_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned mask = BIT(offset);
+ const unsigned int mask = BIT(offset);
unsigned long flags;
if (offset > 15)
@@ -180,7 +183,7 @@ static void idio_16_irq_unmask(struct irq_data *data)
}
}
-static int idio_16_irq_set_type(struct irq_data *data, unsigned flow_type)
+static int idio_16_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
/* The only valid irq types are none and both-edges */
if (flow_type != IRQ_TYPE_NONE &&
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 0386ede53f3a..c55e821c63b6 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -113,10 +113,8 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
if (pdata->gpio_en_mask & (1 << i))
dev->lut[gpios++] = 1 << i;
- if (gpios < 1) {
- ret = -EINVAL;
- goto err;
- }
+ if (gpios < 1)
+ return -EINVAL;
gc = &dev->gpio_chip;
gc->direction_input = adp5520_gpio_direction_input;
@@ -148,18 +146,10 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to write\n");
- goto err;
+ return ret;
}
- ret = devm_gpiochip_add_data(&pdev->dev, &dev->gpio_chip, dev);
- if (ret)
- goto err;
-
- platform_set_drvdata(pdev, dev);
- return 0;
-
-err:
- return ret;
+ return devm_gpiochip_add_data(&pdev->dev, &dev->gpio_chip, dev);
}
static struct platform_driver adp5520_gpio_driver = {
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index b5917c48e4dc..6af51feda06f 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -78,7 +78,6 @@ static const struct gpio_chip altr_a10sr_gc = {
static int altr_a10sr_gpio_probe(struct platform_device *pdev)
{
struct altr_a10sr_gpio *gpio;
- int ret;
struct altr_a10sr *a10sr = dev_get_drvdata(pdev->dev.parent);
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
@@ -91,15 +90,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev)
gpio->gp.parent = pdev->dev.parent;
gpio->gp.of_node = pdev->dev.of_node;
- ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, gpio);
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
}
static const struct of_device_id altr_a10sr_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 678ddd375891..9b780dc5d390 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -234,7 +234,6 @@ static int ath79_gpio_probe(struct platform_device *pdev)
ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
- platform_set_drvdata(pdev, ctrl);
if (np) {
err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
@@ -290,13 +289,7 @@ static int ath79_gpio_probe(struct platform_device *pdev)
girq->handler = handle_simple_irq;
}
- err = devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
- if (err) {
- dev_err(dev,
- "cannot add AR71xx GPIO chip, error=%d", err);
- return err;
- }
- return 0;
+ return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
}
static struct platform_driver ath79_gpio_driver = {
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
index df6102b57734..9a4d55f703bb 100644
--- a/drivers/gpio/gpio-bd9571mwv.c
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -97,25 +97,16 @@ static const struct gpio_chip template_chip = {
static int bd9571mwv_gpio_probe(struct platform_device *pdev)
{
struct bd9571mwv_gpio *gpio;
- int ret;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
return -ENOMEM;
- platform_set_drvdata(pdev, gpio);
-
gpio->regmap = dev_get_regmap(pdev->dev.parent, NULL);
gpio->chip = template_chip;
gpio->chip.parent = pdev->dev.parent;
- ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- return ret;
- }
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
}
static const struct platform_device_id bd9571mwv_gpio_id_table[] = {
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 9aa59afdcbbf..559188d80c2b 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -196,7 +196,6 @@ static int da9052_gpio_probe(struct platform_device *pdev)
{
struct da9052_gpio *gpio;
struct da9052_pdata *pdata;
- int ret;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
@@ -209,15 +208,7 @@ static int da9052_gpio_probe(struct platform_device *pdev)
if (pdata && pdata->gpio_base)
gpio->gp.base = pdata->gpio_base;
- ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, gpio);
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
}
static struct platform_driver da9052_gpio_driver = {
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 6ad0c37b862e..49446a030f10 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -133,7 +133,6 @@ static int da9055_gpio_probe(struct platform_device *pdev)
{
struct da9055_gpio *gpio;
struct da9055_pdata *pdata;
- int ret;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
@@ -146,15 +145,7 @@ static int da9055_gpio_probe(struct platform_device *pdev)
if (pdata && pdata->gpio_base)
gpio->gp.base = pdata->gpio_base;
- ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, gpio);
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
}
static struct platform_driver da9055_gpio_driver = {
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index d3233cc4b76b..3eb13d6d31ef 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -13,17 +13,15 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/platform_data/gpio-dwapb.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/reset.h>
-#include <linux/spinlock.h>
-#include <linux/platform_data/gpio-dwapb.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "gpiolib.h"
#include "gpiolib-acpi.h"
@@ -297,9 +295,6 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
irq_hw_number_t bit = irqd_to_hwirq(d);
unsigned long level, polarity, flags;
- if (type & ~IRQ_TYPE_SENSE_MASK)
- return -EINVAL;
-
spin_lock_irqsave(&gc->bgpio_lock, flags);
level = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
polarity = dwapb_read(gpio, GPIO_INT_POLARITY);
@@ -531,17 +526,13 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
static void dwapb_get_irq(struct device *dev, struct fwnode_handle *fwnode,
struct dwapb_port_property *pp)
{
- struct device_node *np = NULL;
- int irq = -ENXIO, j;
-
- if (fwnode_property_read_bool(fwnode, "interrupt-controller"))
- np = to_of_node(fwnode);
+ int irq, j;
for (j = 0; j < pp->ngpio; j++) {
- if (np)
- irq = of_irq_get(np, j);
- else if (has_acpi_companion(dev))
+ if (has_acpi_companion(dev))
irq = platform_get_irq_optional(to_platform_device(dev), j);
+ else
+ irq = fwnode_irq_get(fwnode, j);
if (irq > 0)
pp->irq[j] = irq;
}
diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c
new file mode 100644
index 000000000000..50003ad2e589
--- /dev/null
+++ b/drivers/gpio/gpio-idt3243x.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Driver for IDT/Renesas 79RC3243x Interrupt Controller */
+
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define IDT_PIC_IRQ_PEND 0x00
+#define IDT_PIC_IRQ_MASK 0x08
+
+#define IDT_GPIO_DIR 0x00
+#define IDT_GPIO_DATA 0x04
+#define IDT_GPIO_ILEVEL 0x08
+#define IDT_GPIO_ISTAT 0x0C
+
+struct idt_gpio_ctrl {
+ struct gpio_chip gc;
+ void __iomem *pic;
+ void __iomem *gpio;
+ u32 mask_cache;
+};
+
+static void idt_gpio_dispatch(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ unsigned int bit, virq;
+ unsigned long pending;
+
+ chained_irq_enter(host_chip, desc);
+
+ pending = readl(ctrl->pic + IDT_PIC_IRQ_PEND);
+ pending &= ~ctrl->mask_cache;
+ for_each_set_bit(bit, &pending, gc->ngpio) {
+ virq = irq_linear_revmap(gc->irq.domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static int idt_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
+ unsigned int sense = flow_type & IRQ_TYPE_SENSE_MASK;
+ unsigned long flags;
+ u32 ilevel;
+
+ /* hardware only supports level triggered */
+ if (sense == IRQ_TYPE_NONE || (sense & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+ ilevel = readl(ctrl->gpio + IDT_GPIO_ILEVEL);
+ if (sense & IRQ_TYPE_LEVEL_HIGH)
+ ilevel |= BIT(d->hwirq);
+ else if (sense & IRQ_TYPE_LEVEL_LOW)
+ ilevel &= ~BIT(d->hwirq);
+
+ writel(ilevel, ctrl->gpio + IDT_GPIO_ILEVEL);
+ irq_set_handler_locked(d, handle_level_irq);
+
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ return 0;
+}
+
+static void idt_gpio_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
+
+ writel(~BIT(d->hwirq), ctrl->gpio + IDT_GPIO_ISTAT);
+}
+
+static void idt_gpio_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+ ctrl->mask_cache |= BIT(d->hwirq);
+ writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
+
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static void idt_gpio_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+ ctrl->mask_cache &= ~BIT(d->hwirq);
+ writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
+
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static int idt_gpio_irq_init_hw(struct gpio_chip *gc)
+{
+ struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
+
+ /* Mask interrupts. */
+ ctrl->mask_cache = 0xffffffff;
+ writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
+
+ return 0;
+}
+
+static struct irq_chip idt_gpio_irqchip = {
+ .name = "IDTGPIO",
+ .irq_mask = idt_gpio_mask,
+ .irq_ack = idt_gpio_ack,
+ .irq_unmask = idt_gpio_unmask,
+ .irq_set_type = idt_gpio_irq_set_type
+};
+
+static int idt_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gpio_irq_chip *girq;
+ struct idt_gpio_ctrl *ctrl;
+ unsigned int parent_irq;
+ int ngpios;
+ int ret;
+
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->gpio = devm_platform_ioremap_resource_byname(pdev, "gpio");
+ if (IS_ERR(ctrl->gpio))
+ return PTR_ERR(ctrl->gpio);
+
+ ctrl->gc.parent = dev;
+
+ ret = bgpio_init(&ctrl->gc, &pdev->dev, 4, ctrl->gpio + IDT_GPIO_DATA,
+ NULL, NULL, ctrl->gpio + IDT_GPIO_DIR, NULL, 0);
+ if (ret) {
+ dev_err(dev, "bgpio_init failed\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "ngpios", &ngpios);
+ if (!ret)
+ ctrl->gc.ngpio = ngpios;
+
+ if (device_property_read_bool(dev, "interrupt-controller")) {
+ ctrl->pic = devm_platform_ioremap_resource_byname(pdev, "pic");
+ if (IS_ERR(ctrl->pic))
+ return PTR_ERR(ctrl->pic);
+
+ parent_irq = platform_get_irq(pdev, 0);
+ if (!parent_irq)
+ return -EINVAL;
+
+ girq = &ctrl->gc.irq;
+ girq->chip = &idt_gpio_irqchip;
+ girq->init_hw = idt_gpio_irq_init_hw;
+ girq->parent_handler = idt_gpio_dispatch;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, girq->num_parents,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+
+ girq->parents[0] = parent_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ }
+
+ return devm_gpiochip_add_data(&pdev->dev, &ctrl->gc, ctrl);
+}
+
+static const struct of_device_id idt_gpio_of_match[] = {
+ { .compatible = "idt,32434-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, idt_gpio_of_match);
+
+static struct platform_driver idt_gpio_driver = {
+ .probe = idt_gpio_probe,
+ .driver = {
+ .name = "idt3243x-gpio",
+ .of_match_table = idt_gpio_of_match,
+ },
+};
+module_platform_driver(idt_gpio_driver);
+
+MODULE_DESCRIPTION("IDT 79RC3243x GPIO/PIC Driver");
+MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
index 015632cf159f..992cc958a43f 100644
--- a/drivers/gpio/gpio-logicvc.c
+++ b/drivers/gpio/gpio-logicvc.c
@@ -114,10 +114,8 @@ static int logicvc_gpio_probe(struct platform_device *pdev)
}
base = devm_ioremap_resource(dev, &res);
- if (IS_ERR(base)) {
- dev_err(dev, "Failed to map I/O base\n");
+ if (IS_ERR(base))
return PTR_ERR(base);
- }
logicvc_gpio_regmap_config.max_register = resource_size(&res) -
logicvc_gpio_regmap_config.reg_stride;
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index e1244520cf7d..fcde6708b5df 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -123,14 +123,14 @@ static int lp87565_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
return regmap_update_bits(gpio->map,
LP87565_REG_GPIO_CONFIG,
BIT(offset +
- __ffs(LP87565_GOIO1_OD)),
+ __ffs(LP87565_GPIO1_OD)),
BIT(offset +
- __ffs(LP87565_GOIO1_OD)));
+ __ffs(LP87565_GPIO1_OD)));
case PIN_CONFIG_DRIVE_PUSH_PULL:
return regmap_update_bits(gpio->map,
LP87565_REG_GPIO_CONFIG,
BIT(offset +
- __ffs(LP87565_GOIO1_OD)), 0);
+ __ffs(LP87565_GPIO1_OD)), 0);
default:
return -ENOTSUPP;
}
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index d7e73876a3b9..0a9d746a0fe0 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -144,12 +144,9 @@ static void gpio_mockup_set_multiple(struct gpio_chip *gc,
static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
unsigned int offset, int value)
{
+ struct gpio_chip *gc = &chip->gc;
+ struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
int curr, irq, irq_type, ret = 0;
- struct gpio_desc *desc;
- struct gpio_chip *gc;
-
- gc = &chip->gc;
- desc = &gc->gpiodev->descs[offset];
mutex_lock(&chip->lock);
@@ -369,7 +366,7 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
priv->chip = chip;
priv->offset = i;
- priv->desc = &gc->gpiodev->descs[i];
+ priv->desc = gpiochip_get_desc(gc, i);
debugfs_create_file(name, 0200, chip->dbg_dir, priv,
&gpio_mockup_debugfs_ops);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 4b9157a69fca..50b321a1ab1b 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -405,7 +405,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn,
mpc8xxx_gpio_irq_cascade,
- IRQF_SHARED, "gpio-cascade",
+ IRQF_NO_THREAD | IRQF_SHARED, "gpio-cascade",
mpc8xxx_gc);
if (ret) {
dev_err(&pdev->dev,
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 524b668eb1ac..31a336b86ff2 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -229,14 +229,14 @@ static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
return rv;
}
-static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct mxs_gpio_port *port = gpiochip_get_data(gc);
return irq_find_mapping(port->domain, offset);
}
-static int mxs_gpio_get_direction(struct gpio_chip *gc, unsigned offset)
+static int mxs_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
{
struct mxs_gpio_port *port = gpiochip_get_data(gc);
u32 mask = 1 << offset;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index c91d05651596..f5cfc0698799 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1241,6 +1241,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "onnn,cat9554", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "onnn,pca9654", .data = OF_953X( 8, PCA_INT), },
+ { .compatible = "onnn,pca9655", .data = OF_953X(16, PCA_INT), },
{ .compatible = "exar,xra1202", .data = OF_953X( 8, 0), },
{ }
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 134cedf151a7..69c219742083 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -178,12 +178,6 @@ static int gpio_regmap_direction_output(struct gpio_chip *chip,
return gpio_regmap_set_direction(chip, offset, true);
}
-void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data)
-{
- gpio->driver_data = data;
-}
-EXPORT_SYMBOL_GPL(gpio_regmap_set_drvdata);
-
void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio)
{
return gpio->driver_data;
@@ -226,6 +220,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
return ERR_PTR(-ENOMEM);
gpio->parent = config->parent;
+ gpio->driver_data = config->drvdata;
gpio->regmap = config->regmap;
gpio->ngpio_per_reg = config->ngpio_per_reg;
gpio->reg_stride = config->reg_stride;
@@ -311,9 +306,9 @@ void gpio_regmap_unregister(struct gpio_regmap *gpio)
}
EXPORT_SYMBOL_GPL(gpio_regmap_unregister);
-static void devm_gpio_regmap_unregister(struct device *dev, void *res)
+static void devm_gpio_regmap_unregister(void *res)
{
- gpio_regmap_unregister(*(struct gpio_regmap **)res);
+ gpio_regmap_unregister(res);
}
/**
@@ -330,20 +325,17 @@ static void devm_gpio_regmap_unregister(struct device *dev, void *res)
struct gpio_regmap *devm_gpio_regmap_register(struct device *dev,
const struct gpio_regmap_config *config)
{
- struct gpio_regmap **ptr, *gpio;
-
- ptr = devres_alloc(devm_gpio_regmap_unregister, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
+ struct gpio_regmap *gpio;
+ int ret;
gpio = gpio_regmap_register(config);
- if (!IS_ERR(gpio)) {
- *ptr = gpio;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
+
+ if (IS_ERR(gpio))
+ return gpio;
+
+ ret = devm_add_action_or_reset(dev, devm_gpio_regmap_unregister, gpio);
+ if (ret)
+ return ERR_PTR(ret);
return gpio;
}
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 6eca531b7d96..49aac2bb8d2c 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -122,7 +122,6 @@ static int spics_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spear_spics *spics;
- int ret;
spics = devm_kzalloc(&pdev->dev, sizeof(*spics), GFP_KERNEL);
if (!spics)
@@ -148,8 +147,6 @@ static int spics_gpio_probe(struct platform_device *pdev)
&spics->cs_enable_shift))
goto err_dt_data;
- platform_set_drvdata(pdev, spics);
-
spics->chip.ngpio = NUM_OF_GPIO;
spics->chip.base = -1;
spics->chip.request = spics_request;
@@ -163,14 +160,7 @@ static int spics_gpio_probe(struct platform_device *pdev)
spics->chip.owner = THIS_MODULE;
spics->last_off = -1;
- ret = devm_gpiochip_add_data(&pdev->dev, &spics->chip, spics);
- if (ret) {
- dev_err(&pdev->dev, "unable to add gpio chip\n");
- return ret;
- }
-
- dev_info(&pdev->dev, "spear spics registered\n");
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &spics->chip, spics);
err_dt_data:
dev_err(&pdev->dev, "DT probe failed\n");
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index 36ea8a3bd451..25c37edcbc6c 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -222,7 +222,6 @@ static int sprd_gpio_probe(struct platform_device *pdev)
{
struct gpio_irq_chip *irq;
struct sprd_gpio *sprd_gpio;
- int ret;
sprd_gpio = devm_kzalloc(&pdev->dev, sizeof(*sprd_gpio), GFP_KERNEL);
if (!sprd_gpio)
@@ -259,14 +258,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
irq->num_parents = 1;
irq->parents = &sprd_gpio->irq;
- ret = devm_gpiochip_add_data(&pdev->dev, &sprd_gpio->chip, sprd_gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpiochip %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, sprd_gpio);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &sprd_gpio->chip, sprd_gpio);
}
static const struct of_device_id sprd_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index a74bb97a41e2..392fcab06ab8 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -398,15 +398,7 @@ static int gsta_probe(struct platform_device *dev)
return err;
}
- err = devm_gpiochip_add_data(&dev->dev, &chip->gpio, chip);
- if (err < 0) {
- dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n",
- -err);
- return err;
- }
-
- platform_set_drvdata(dev, chip);
- return 0;
+ return devm_gpiochip_add_data(&dev->dev, &chip->gpio, chip);
}
static struct platform_driver sta2x11_gpio_platform_driver = {
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index b94ef8181428..dd4d58b4ae49 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -449,6 +449,11 @@ static void stmpe_init_irq_valid_mask(struct gpio_chip *gc,
}
}
+static void stmpe_gpio_disable(void *stmpe)
+{
+ stmpe_disable(stmpe, STMPE_BLOCK_GPIO);
+}
+
static int stmpe_gpio_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
@@ -461,7 +466,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- stmpe_gpio = kzalloc(sizeof(*stmpe_gpio), GFP_KERNEL);
+ stmpe_gpio = devm_kzalloc(&pdev->dev, sizeof(*stmpe_gpio), GFP_KERNEL);
if (!stmpe_gpio)
return -ENOMEM;
@@ -489,7 +494,11 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
ret = stmpe_enable(stmpe, STMPE_BLOCK_GPIO);
if (ret)
- goto out_free;
+ return ret;
+
+ ret = devm_add_action_or_reset(&pdev->dev, stmpe_gpio_disable, stmpe);
+ if (ret)
+ return ret;
if (irq > 0) {
struct gpio_irq_chip *girq;
@@ -499,7 +508,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
"stmpe-gpio", stmpe_gpio);
if (ret) {
dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
- goto out_disable;
+ return ret;
}
girq = &stmpe_gpio->chip.irq;
@@ -514,22 +523,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
girq->init_valid_mask = stmpe_init_irq_valid_mask;
}
- ret = gpiochip_add_data(&stmpe_gpio->chip, stmpe_gpio);
- if (ret) {
- dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
- goto out_disable;
- }
-
- platform_set_drvdata(pdev, stmpe_gpio);
-
- return 0;
-
-out_disable:
- stmpe_disable(stmpe, STMPE_BLOCK_GPIO);
- gpiochip_remove(&stmpe_gpio->chip);
-out_free:
- kfree(stmpe_gpio);
- return ret;
+ return devm_gpiochip_add_data(&pdev->dev, &stmpe_gpio->chip, stmpe_gpio);
}
static struct platform_driver stmpe_gpio_driver = {
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 55b8dbd13d11..8d158492488f 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -357,16 +357,7 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_gpiochip_add_data(&pdev->dev, &tc3589x_gpio->chip,
- tc3589x_gpio);
- if (ret) {
- dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, tc3589x_gpio);
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &tc3589x_gpio->chip, tc3589x_gpio);
}
static struct platform_driver tc3589x_gpio_driver = {
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 05974b760796..d38980b9923a 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -730,18 +730,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
offset += port->pins;
}
- platform_set_drvdata(pdev, gpio);
-
- err = devm_gpiochip_add_data(&pdev->dev, &gpio->gpio, gpio);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-static int tegra186_gpio_remove(struct platform_device *pdev)
-{
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &gpio->gpio, gpio);
}
#define TEGRA186_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
@@ -913,7 +902,6 @@ static struct platform_driver tegra186_gpio_driver = {
.of_match_table = tegra186_gpio_of_match,
},
.probe = tegra186_gpio_probe,
- .remove = tegra186_gpio_remove,
};
module_platform_driver(tegra186_gpio_driver);
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index 43a1150055ce..3517debe2b0b 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -187,7 +187,6 @@ static int tps65218_gpio_probe(struct platform_device *pdev)
{
struct tps65218 *tps65218 = dev_get_drvdata(pdev->dev.parent);
struct tps65218_gpio *tps65218_gpio;
- int ret;
tps65218_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps65218_gpio),
GFP_KERNEL);
@@ -201,16 +200,8 @@ static int tps65218_gpio_probe(struct platform_device *pdev)
tps65218_gpio->gpio_chip.of_node = pdev->dev.of_node;
#endif
- ret = devm_gpiochip_add_data(&pdev->dev, &tps65218_gpio->gpio_chip,
- tps65218_gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register gpiochip, %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, tps65218_gpio);
-
- return ret;
+ return devm_gpiochip_add_data(&pdev->dev, &tps65218_gpio->gpio_chip,
+ tps65218_gpio);
}
static const struct of_device_id tps65218_dt_match[] = {
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index 9b6cc74f47c8..da0304b764a5 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -76,7 +76,6 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
{
struct tps6586x_platform_data *pdata;
struct tps6586x_gpio *tps6586x_gpio;
- int ret;
pdata = dev_get_platdata(pdev->dev.parent);
tps6586x_gpio = devm_kzalloc(&pdev->dev,
@@ -106,16 +105,8 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
else
tps6586x_gpio->gpio_chip.base = -1;
- ret = devm_gpiochip_add_data(&pdev->dev, &tps6586x_gpio->gpio_chip,
- tps6586x_gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, tps6586x_gpio);
-
- return ret;
+ return devm_gpiochip_add_data(&pdev->dev, &tps6586x_gpio->gpio_chip,
+ tps6586x_gpio);
}
static struct platform_driver tps6586x_gpio_driver = {
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 0c0b445c75c0..7fa8c841081f 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -165,16 +165,8 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
}
skip_init:
- ret = devm_gpiochip_add_data(&pdev->dev, &tps65910_gpio->gpio_chip,
- tps65910_gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, tps65910_gpio);
-
- return ret;
+ return devm_gpiochip_add_data(&pdev->dev, &tps65910_gpio->gpio_chip,
+ tps65910_gpio);
}
static struct platform_driver tps65910_gpio_driver = {
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 510d9ed9fd2a..fab771cb6a87 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -99,7 +99,6 @@ static int tps65912_gpio_probe(struct platform_device *pdev)
{
struct tps65912 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65912_gpio *gpio;
- int ret;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
@@ -109,16 +108,7 @@ static int tps65912_gpio_probe(struct platform_device *pdev)
gpio->gpio_chip = template_chip;
gpio->gpio_chip.parent = tps->dev;
- ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gpio_chip,
- gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, gpio);
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &gpio->gpio_chip, gpio);
}
static const struct platform_device_id tps65912_gpio_id_table[] = {
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
index f7f5f770e0fb..423b7bc30ae8 100644
--- a/drivers/gpio/gpio-tps68470.c
+++ b/drivers/gpio/gpio-tps68470.c
@@ -125,7 +125,6 @@ static const char *tps68470_names[TPS68470_N_GPIO] = {
static int tps68470_gpio_probe(struct platform_device *pdev)
{
struct tps68470_gpio_data *tps68470_gpio;
- int ret;
tps68470_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps68470_gpio),
GFP_KERNEL);
@@ -146,16 +145,7 @@ static int tps68470_gpio_probe(struct platform_device *pdev)
tps68470_gpio->gc.base = -1;
tps68470_gpio->gc.parent = &pdev->dev;
- ret = devm_gpiochip_add_data(&pdev->dev, &tps68470_gpio->gc,
- tps68470_gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register gpio_chip: %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, tps68470_gpio);
-
- return ret;
+ return devm_gpiochip_add_data(&pdev->dev, &tps68470_gpio->gc, tps68470_gpio);
}
static struct platform_driver tps68470_gpio_driver = {
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 5022e0ad0fae..0f5d17f343f1 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -238,8 +238,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0 && irq != -ENXIO)
return irq;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -278,7 +278,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
- if (irq) {
+ if (irq > 0) {
struct irq_chip *irq_chip = &gpio->irq_chip;
u8 irq_status;
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
index 0e3d19828eb1..47455810bdb9 100644
--- a/drivers/gpio/gpio-visconti.c
+++ b/drivers/gpio/gpio-visconti.c
@@ -187,15 +187,7 @@ static int visconti_gpio_probe(struct platform_device *pdev)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
- ret = devm_gpiochip_add_data(dev, &priv->gpio_chip, priv);
- if (ret) {
- dev_err(dev, "failed to add GPIO chip\n");
- return ret;
- }
-
- platform_set_drvdata(pdev, priv);
-
- return ret;
+ return devm_gpiochip_add_data(dev, &priv->gpio_chip, priv);
}
static const struct of_device_id visconti_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index a3a32a77041f..9cf1e5ebb352 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -261,7 +261,6 @@ static int wm831x_gpio_probe(struct platform_device *pdev)
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = &wm831x->pdata;
struct wm831x_gpio *wm831x_gpio;
- int ret;
wm831x_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm831x_gpio),
GFP_KERNEL);
@@ -280,16 +279,7 @@ static int wm831x_gpio_probe(struct platform_device *pdev)
wm831x_gpio->gpio_chip.of_node = wm831x->dev->of_node;
#endif
- ret = devm_gpiochip_add_data(&pdev->dev, &wm831x_gpio->gpio_chip,
- wm831x_gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, wm831x_gpio);
-
- return ret;
+ return devm_gpiochip_add_data(&pdev->dev, &wm831x_gpio->gpio_chip, wm831x_gpio);
}
static struct platform_driver wm831x_gpio_driver = {
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index 460f0a4b04bd..b1b131fb9804 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -105,7 +105,6 @@ static int wm8350_gpio_probe(struct platform_device *pdev)
struct wm8350 *wm8350 = dev_get_drvdata(pdev->dev.parent);
struct wm8350_platform_data *pdata = dev_get_platdata(wm8350->dev);
struct wm8350_gpio_data *wm8350_gpio;
- int ret;
wm8350_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm8350_gpio),
GFP_KERNEL);
@@ -121,16 +120,7 @@ static int wm8350_gpio_probe(struct platform_device *pdev)
else
wm8350_gpio->gpio_chip.base = -1;
- ret = devm_gpiochip_add_data(&pdev->dev, &wm8350_gpio->gpio_chip,
- wm8350_gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, wm8350_gpio);
-
- return ret;
+ return devm_gpiochip_add_data(&pdev->dev, &wm8350_gpio->gpio_chip, wm8350_gpio);
}
static struct platform_driver wm8350_gpio_driver = {
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 9af89cf7f6bc..f4a474cef32d 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -263,7 +263,6 @@ static int wm8994_gpio_probe(struct platform_device *pdev)
struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent);
struct wm8994_pdata *pdata = dev_get_platdata(wm8994->dev);
struct wm8994_gpio *wm8994_gpio;
- int ret;
wm8994_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm8994_gpio),
GFP_KERNEL);
@@ -279,17 +278,7 @@ static int wm8994_gpio_probe(struct platform_device *pdev)
else
wm8994_gpio->gpio_chip.base = -1;
- ret = devm_gpiochip_add_data(&pdev->dev, &wm8994_gpio->gpio_chip,
- wm8994_gpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
- ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, wm8994_gpio);
-
- return ret;
+ return devm_gpiochip_add_data(&pdev->dev, &wm8994_gpio->gpio_chip, wm8994_gpio);
}
static struct platform_driver wm8994_gpio_driver = {
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 532b0df8a1f2..fb4b0c67aeef 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -159,7 +159,6 @@ static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
static int xgene_gpio_probe(struct platform_device *pdev)
{
struct xgene_gpio *gpio;
- int err = 0;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
@@ -183,15 +182,7 @@ static int xgene_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gpio);
- err = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
- if (err) {
- dev_err(&pdev->dev,
- "failed to register gpiochip.\n");
- return err;
- }
-
- dev_info(&pdev->dev, "X-Gene GPIO driver registered.\n");
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
}
static const struct of_device_id xgene_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 136557e7dd3c..c329c3a606e8 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -5,6 +5,7 @@
* Copyright 2008 - 2013 Xilinx, Inc.
*/
+#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/errno.h>
@@ -23,7 +24,8 @@
#define XGPIO_DATA_OFFSET (0x0) /* Data register */
#define XGPIO_TRI_OFFSET (0x4) /* I/O direction register */
-#define XGPIO_CHANNEL_OFFSET 0x8
+#define XGPIO_CHANNEL0_OFFSET 0x0
+#define XGPIO_CHANNEL1_OFFSET 0x8
#define XGPIO_GIER_OFFSET 0x11c /* Global Interrupt Enable */
#define XGPIO_GIER_IE BIT(31)
@@ -43,56 +45,101 @@
* struct xgpio_instance - Stores information about GPIO device
* @gc: GPIO chip
* @regs: register block
- * @gpio_width: GPIO width for every channel
- * @gpio_state: GPIO write state shadow register
- * @gpio_last_irq_read: GPIO read state register from last interrupt
- * @gpio_dir: GPIO direction shadow register
+ * @hw_map: GPIO pin mapping on hardware side
+ * @sw_map: GPIO pin mapping on software side
+ * @state: GPIO write state shadow register
+ * @last_irq_read: GPIO read state register from last interrupt
+ * @dir: GPIO direction shadow register
* @gpio_lock: Lock used for synchronization
* @irq: IRQ used by GPIO device
* @irqchip: IRQ chip
- * @irq_enable: GPIO IRQ enable/disable bitfield
- * @irq_rising_edge: GPIO IRQ rising edge enable/disable bitfield
- * @irq_falling_edge: GPIO IRQ falling edge enable/disable bitfield
+ * @enable: GPIO IRQ enable/disable bitfield
+ * @rising_edge: GPIO IRQ rising edge enable/disable bitfield
+ * @falling_edge: GPIO IRQ falling edge enable/disable bitfield
* @clk: clock resource for this driver
*/
struct xgpio_instance {
struct gpio_chip gc;
void __iomem *regs;
- unsigned int gpio_width[2];
- u32 gpio_state[2];
- u32 gpio_last_irq_read[2];
- u32 gpio_dir[2];
+ DECLARE_BITMAP(hw_map, 64);
+ DECLARE_BITMAP(sw_map, 64);
+ DECLARE_BITMAP(state, 64);
+ DECLARE_BITMAP(last_irq_read, 64);
+ DECLARE_BITMAP(dir, 64);
spinlock_t gpio_lock; /* For serializing operations */
int irq;
struct irq_chip irqchip;
- u32 irq_enable[2];
- u32 irq_rising_edge[2];
- u32 irq_falling_edge[2];
+ DECLARE_BITMAP(enable, 64);
+ DECLARE_BITMAP(rising_edge, 64);
+ DECLARE_BITMAP(falling_edge, 64);
struct clk *clk;
};
-static inline int xgpio_index(struct xgpio_instance *chip, int gpio)
+static inline int xgpio_from_bit(struct xgpio_instance *chip, int bit)
{
- if (gpio >= chip->gpio_width[0])
- return 1;
+ return bitmap_bitremap(bit, chip->hw_map, chip->sw_map, 64);
+}
- return 0;
+static inline int xgpio_to_bit(struct xgpio_instance *chip, int gpio)
+{
+ return bitmap_bitremap(gpio, chip->sw_map, chip->hw_map, 64);
}
-static inline int xgpio_regoffset(struct xgpio_instance *chip, int gpio)
+static inline u32 xgpio_get_value32(const unsigned long *map, int bit)
{
- if (xgpio_index(chip, gpio))
- return XGPIO_CHANNEL_OFFSET;
+ const size_t index = BIT_WORD(bit);
+ const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
- return 0;
+ return (map[index] >> offset) & 0xFFFFFFFFul;
+}
+
+static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v)
+{
+ const size_t index = BIT_WORD(bit);
+ const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
+
+ map[index] &= ~(0xFFFFFFFFul << offset);
+ map[index] |= v << offset;
+}
+
+static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
+{
+ switch (ch) {
+ case 0:
+ return XGPIO_CHANNEL0_OFFSET;
+ case 1:
+ return XGPIO_CHANNEL1_OFFSET;
+ default:
+ return -EINVAL;
+ }
}
-static inline int xgpio_offset(struct xgpio_instance *chip, int gpio)
+static void xgpio_read_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
- if (xgpio_index(chip, gpio))
- return gpio - chip->gpio_width[0];
+ void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+ xgpio_set_value32(a, bit, xgpio_readreg(addr));
+}
+
+static void xgpio_write_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
+{
+ void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+ xgpio_writereg(addr, xgpio_get_value32(a, bit));
+}
+
+static void xgpio_read_ch_all(struct xgpio_instance *chip, int reg, unsigned long *a)
+{
+ int bit, lastbit = xgpio_to_bit(chip, chip->gc.ngpio - 1);
+
+ for (bit = 0; bit <= lastbit ; bit += 32)
+ xgpio_read_ch(chip, reg, bit, a);
+}
+
+static void xgpio_write_ch_all(struct xgpio_instance *chip, int reg, unsigned long *a)
+{
+ int bit, lastbit = xgpio_to_bit(chip, chip->gc.ngpio - 1);
- return gpio;
+ for (bit = 0; bit <= lastbit ; bit += 32)
+ xgpio_write_ch(chip, reg, bit, a);
}
/**
@@ -109,12 +156,12 @@ static inline int xgpio_offset(struct xgpio_instance *chip, int gpio)
static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct xgpio_instance *chip = gpiochip_get_data(gc);
- u32 val;
+ int bit = xgpio_to_bit(chip, gpio);
+ DECLARE_BITMAP(state, 64);
- val = xgpio_readreg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, gpio));
+ xgpio_read_ch(chip, XGPIO_DATA_OFFSET, bit, state);
- return !!(val & BIT(xgpio_offset(chip, gpio)));
+ return test_bit(bit, state);
}
/**
@@ -130,19 +177,14 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, gpio);
- int offset = xgpio_offset(chip, gpio);
+ int bit = xgpio_to_bit(chip, gpio);
spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
- if (val)
- chip->gpio_state[index] |= BIT(offset);
- else
- chip->gpio_state[index] &= ~BIT(offset);
+ __assign_bit(bit, chip->state, val);
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
+ xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
@@ -159,37 +201,22 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
+ DECLARE_BITMAP(hw_mask, 64);
+ DECLARE_BITMAP(hw_bits, 64);
+ DECLARE_BITMAP(state, 64);
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, 0);
- int offset, i;
+
+ bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
+ bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
spin_lock_irqsave(&chip->gpio_lock, flags);
- /* Write to GPIO signals */
- for (i = 0; i < gc->ngpio; i++) {
- if (*mask == 0)
- break;
- /* Once finished with an index write it out to the register */
- if (index != xgpio_index(chip, i)) {
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- index * XGPIO_CHANNEL_OFFSET,
- chip->gpio_state[index]);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
- index = xgpio_index(chip, i);
- spin_lock_irqsave(&chip->gpio_lock, flags);
- }
- if (__test_and_clear_bit(i, mask)) {
- offset = xgpio_offset(chip, i);
- if (test_bit(i, bits))
- chip->gpio_state[index] |= BIT(offset);
- else
- chip->gpio_state[index] &= ~BIT(offset);
- }
- }
+ bitmap_replace(state, chip->state, hw_bits, hw_mask, 64);
+
+ xgpio_write_ch_all(chip, XGPIO_DATA_OFFSET, state);
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
+ bitmap_copy(chip->state, state, 64);
spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
@@ -207,15 +234,13 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, gpio);
- int offset = xgpio_offset(chip, gpio);
+ int bit = xgpio_to_bit(chip, gpio);
spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
- chip->gpio_dir[index] |= BIT(offset);
- xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
+ __set_bit(bit, chip->dir);
+ xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
spin_unlock_irqrestore(&chip->gpio_lock, flags);
@@ -238,23 +263,17 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, gpio);
- int offset = xgpio_offset(chip, gpio);
+ int bit = xgpio_to_bit(chip, gpio);
spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
- if (val)
- chip->gpio_state[index] |= BIT(offset);
- else
- chip->gpio_state[index] &= ~BIT(offset);
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
+ __assign_bit(bit, chip->state, val);
+ xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
/* Clear the GPIO bit in shadow register and set direction as output */
- chip->gpio_dir[index] &= ~BIT(offset);
- xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
+ __clear_bit(bit, chip->dir);
+ xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
spin_unlock_irqrestore(&chip->gpio_lock, flags);
@@ -267,16 +286,8 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
*/
static void xgpio_save_regs(struct xgpio_instance *chip)
{
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET, chip->gpio_state[0]);
- xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET, chip->gpio_dir[0]);
-
- if (!chip->gpio_width[1])
- return;
-
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET,
- chip->gpio_state[1]);
- xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET,
- chip->gpio_dir[1]);
+ xgpio_write_ch_all(chip, XGPIO_DATA_OFFSET, chip->state);
+ xgpio_write_ch_all(chip, XGPIO_TRI_OFFSET, chip->dir);
}
static int xgpio_request(struct gpio_chip *chip, unsigned int offset)
@@ -302,8 +313,8 @@ static int __maybe_unused xgpio_suspend(struct device *dev)
struct irq_data *data = irq_get_irq_data(gpio->irq);
if (!data) {
- dev_err(dev, "irq_get_irq_data() failed\n");
- return -EINVAL;
+ dev_dbg(dev, "IRQ not connected\n");
+ return pm_runtime_force_suspend(dev);
}
if (!irqd_is_wakeup_set(data))
@@ -348,8 +359,8 @@ static int __maybe_unused xgpio_resume(struct device *dev)
struct irq_data *data = irq_get_irq_data(gpio->irq);
if (!data) {
- dev_err(dev, "irq_get_irq_data() failed\n");
- return -EINVAL;
+ dev_dbg(dev, "IRQ not connected\n");
+ return pm_runtime_force_resume(dev);
}
if (!irqd_is_wakeup_set(data))
@@ -391,18 +402,17 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
unsigned long flags;
struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
int irq_offset = irqd_to_hwirq(irq_data);
- int index = xgpio_index(chip, irq_offset);
- int offset = xgpio_offset(chip, irq_offset);
+ int bit = xgpio_to_bit(chip, irq_offset);
+ u32 mask = BIT(bit / 32), temp;
spin_lock_irqsave(&chip->gpio_lock, flags);
- chip->irq_enable[index] &= ~BIT(offset);
+ __clear_bit(bit, chip->enable);
- if (!chip->irq_enable[index]) {
+ if (xgpio_get_value32(chip->enable, bit) == 0) {
/* Disable per channel interrupt */
- u32 temp = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
-
- temp &= ~BIT(index);
+ temp = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
+ temp &= ~mask;
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
}
spin_unlock_irqrestore(&chip->gpio_lock, flags);
@@ -417,30 +427,26 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
unsigned long flags;
struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
int irq_offset = irqd_to_hwirq(irq_data);
- int index = xgpio_index(chip, irq_offset);
- int offset = xgpio_offset(chip, irq_offset);
- u32 old_enable = chip->irq_enable[index];
+ int bit = xgpio_to_bit(chip, irq_offset);
+ u32 old_enable = xgpio_get_value32(chip->enable, bit);
+ u32 mask = BIT(bit / 32), val;
spin_lock_irqsave(&chip->gpio_lock, flags);
- chip->irq_enable[index] |= BIT(offset);
+ __set_bit(bit, chip->enable);
- if (!old_enable) {
+ if (old_enable == 0) {
/* Clear any existing per-channel interrupts */
- u32 val = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET) &
- BIT(index);
-
- if (val)
- xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, val);
+ val = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
+ val &= mask;
+ xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, val);
/* Update GPIO IRQ read data before enabling interrupt*/
- val = xgpio_readreg(chip->regs + XGPIO_DATA_OFFSET +
- index * XGPIO_CHANNEL_OFFSET);
- chip->gpio_last_irq_read[index] = val;
+ xgpio_read_ch(chip, XGPIO_DATA_OFFSET, bit, chip->last_irq_read);
/* Enable per channel interrupt */
val = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
- val |= BIT(index);
+ val |= mask;
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
}
@@ -459,8 +465,7 @@ static int xgpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
{
struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
int irq_offset = irqd_to_hwirq(irq_data);
- int index = xgpio_index(chip, irq_offset);
- int offset = xgpio_offset(chip, irq_offset);
+ int bit = xgpio_to_bit(chip, irq_offset);
/*
* The Xilinx GPIO hardware provides a single interrupt status
@@ -470,16 +475,16 @@ static int xgpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
*/
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_BOTH:
- chip->irq_rising_edge[index] |= BIT(offset);
- chip->irq_falling_edge[index] |= BIT(offset);
+ __set_bit(bit, chip->rising_edge);
+ __set_bit(bit, chip->falling_edge);
break;
case IRQ_TYPE_EDGE_RISING:
- chip->irq_rising_edge[index] |= BIT(offset);
- chip->irq_falling_edge[index] &= ~BIT(offset);
+ __set_bit(bit, chip->rising_edge);
+ __clear_bit(bit, chip->falling_edge);
break;
case IRQ_TYPE_EDGE_FALLING:
- chip->irq_rising_edge[index] &= ~BIT(offset);
- chip->irq_falling_edge[index] |= BIT(offset);
+ __clear_bit(bit, chip->rising_edge);
+ __set_bit(bit, chip->falling_edge);
break;
default:
return -EINVAL;
@@ -496,46 +501,44 @@ static int xgpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
static void xgpio_irqhandler(struct irq_desc *desc)
{
struct xgpio_instance *chip = irq_desc_get_handler_data(desc);
+ struct gpio_chip *gc = &chip->gc;
struct irq_chip *irqchip = irq_desc_get_chip(desc);
- u32 num_channels = chip->gpio_width[1] ? 2 : 1;
- u32 offset = 0, index;
- u32 status = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
-
+ DECLARE_BITMAP(rising, 64);
+ DECLARE_BITMAP(falling, 64);
+ DECLARE_BITMAP(all, 64);
+ int irq_offset;
+ u32 status;
+ u32 bit;
+
+ status = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, status);
chained_irq_enter(irqchip, desc);
- for (index = 0; index < num_channels; index++) {
- if ((status & BIT(index))) {
- unsigned long rising_events, falling_events, all_events;
- unsigned long flags;
- u32 data, bit;
- unsigned int irq;
-
- spin_lock_irqsave(&chip->gpio_lock, flags);
- data = xgpio_readreg(chip->regs + XGPIO_DATA_OFFSET +
- index * XGPIO_CHANNEL_OFFSET);
- rising_events = data &
- ~chip->gpio_last_irq_read[index] &
- chip->irq_enable[index] &
- chip->irq_rising_edge[index];
- falling_events = ~data &
- chip->gpio_last_irq_read[index] &
- chip->irq_enable[index] &
- chip->irq_falling_edge[index];
- dev_dbg(chip->gc.parent,
- "IRQ chan %u rising 0x%lx falling 0x%lx\n",
- index, rising_events, falling_events);
- all_events = rising_events | falling_events;
- chip->gpio_last_irq_read[index] = data;
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
-
- for_each_set_bit(bit, &all_events, 32) {
- irq = irq_find_mapping(chip->gc.irq.domain,
- offset + bit);
- generic_handle_irq(irq);
- }
- }
- offset += chip->gpio_width[index];
+
+ spin_lock(&chip->gpio_lock);
+
+ xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
+
+ bitmap_complement(rising, chip->last_irq_read, 64);
+ bitmap_and(rising, rising, all, 64);
+ bitmap_and(rising, rising, chip->enable, 64);
+ bitmap_and(rising, rising, chip->rising_edge, 64);
+
+ bitmap_complement(falling, all, 64);
+ bitmap_and(falling, falling, chip->last_irq_read, 64);
+ bitmap_and(falling, falling, chip->enable, 64);
+ bitmap_and(falling, falling, chip->falling_edge, 64);
+
+ bitmap_copy(chip->last_irq_read, all, 64);
+ bitmap_or(all, rising, falling, 64);
+
+ spin_unlock(&chip->gpio_lock);
+
+ dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
+
+ for_each_set_bit(bit, all, 64) {
+ irq_offset = xgpio_from_bit(chip, bit);
+ generic_handle_irq(irq_find_mapping(gc->irq.domain, irq_offset));
}
chained_irq_exit(irqchip, desc);
@@ -556,6 +559,9 @@ static int xgpio_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
u32 is_dual = 0;
u32 cells = 2;
+ u32 width[2];
+ u32 state[2];
+ u32 dir[2];
struct gpio_irq_chip *girq;
u32 temp;
@@ -565,13 +571,25 @@ static int xgpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, chip);
+ /* First, check if the device is dual-channel */
+ of_property_read_u32(np, "xlnx,is-dual", &is_dual);
+
+ /* Setup defaults */
+ memset32(width, 0, ARRAY_SIZE(width));
+ memset32(state, 0, ARRAY_SIZE(state));
+ memset32(dir, 0xFFFFFFFF, ARRAY_SIZE(dir));
+
/* Update GPIO state shadow register with default value */
- if (of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state[0]))
- chip->gpio_state[0] = 0x0;
+ of_property_read_u32(np, "xlnx,dout-default", &state[0]);
+ of_property_read_u32(np, "xlnx,dout-default-2", &state[1]);
+
+ bitmap_from_arr32(chip->state, state, 64);
/* Update GPIO direction shadow register with default value */
- if (of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir[0]))
- chip->gpio_dir[0] = 0xFFFFFFFF;
+ of_property_read_u32(np, "xlnx,tri-default", &dir[0]);
+ of_property_read_u32(np, "xlnx,tri-default-2", &dir[1]);
+
+ bitmap_from_arr32(chip->dir, dir, 64);
/* Update cells with gpio-cells value */
if (of_property_read_u32(np, "#gpio-cells", &cells))
@@ -586,42 +604,29 @@ static int xgpio_probe(struct platform_device *pdev)
* Check device node and parent device node for device width
* and assume default width of 32
*/
- if (of_property_read_u32(np, "xlnx,gpio-width", &chip->gpio_width[0]))
- chip->gpio_width[0] = 32;
+ if (of_property_read_u32(np, "xlnx,gpio-width", &width[0]))
+ width[0] = 32;
- if (chip->gpio_width[0] > 32)
+ if (width[0] > 32)
return -EINVAL;
- spin_lock_init(&chip->gpio_lock);
+ if (is_dual && of_property_read_u32(np, "xlnx,gpio2-width", &width[1]))
+ width[1] = 32;
- if (of_property_read_u32(np, "xlnx,is-dual", &is_dual))
- is_dual = 0;
-
- if (is_dual) {
- /* Update GPIO state shadow register with default value */
- if (of_property_read_u32(np, "xlnx,dout-default-2",
- &chip->gpio_state[1]))
- chip->gpio_state[1] = 0x0;
-
- /* Update GPIO direction shadow register with default value */
- if (of_property_read_u32(np, "xlnx,tri-default-2",
- &chip->gpio_dir[1]))
- chip->gpio_dir[1] = 0xFFFFFFFF;
-
- /*
- * Check device node and parent device node for device width
- * and assume default width of 32
- */
- if (of_property_read_u32(np, "xlnx,gpio2-width",
- &chip->gpio_width[1]))
- chip->gpio_width[1] = 32;
-
- if (chip->gpio_width[1] > 32)
- return -EINVAL;
- }
+ if (width[1] > 32)
+ return -EINVAL;
+
+ /* Setup software pin mapping */
+ bitmap_set(chip->sw_map, 0, width[0] + width[1]);
+
+ /* Setup hardware pin mapping */
+ bitmap_set(chip->hw_map, 0, width[0]);
+ bitmap_set(chip->hw_map, 32, width[1]);
+
+ spin_lock_init(&chip->gpio_lock);
chip->gc.base = -1;
- chip->gc.ngpio = chip->gpio_width[0] + chip->gpio_width[1];
+ chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
chip->gc.parent = &pdev->dev;
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 3521c1dc3ac0..f0cb8ccd03ed 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -736,6 +736,11 @@ static int __maybe_unused zynq_gpio_suspend(struct device *dev)
struct zynq_gpio *gpio = dev_get_drvdata(dev);
struct irq_data *data = irq_get_irq_data(gpio->irq);
+ if (!data) {
+ dev_err(dev, "irq_get_irq_data() failed\n");
+ return -EINVAL;
+ }
+
if (!device_may_wakeup(dev))
disable_irq(gpio->irq);
@@ -753,6 +758,11 @@ static int __maybe_unused zynq_gpio_resume(struct device *dev)
struct irq_data *data = irq_get_irq_data(gpio->irq);
int ret;
+ if (!data) {
+ dev_err(dev, "irq_get_irq_data() failed\n");
+ return -EINVAL;
+ }
+
if (!device_may_wakeup(dev))
enable_irq(gpio->irq);
@@ -1001,8 +1011,11 @@ err_pm_dis:
static int zynq_gpio_remove(struct platform_device *pdev)
{
struct zynq_gpio *gpio = platform_get_drvdata(pdev);
+ int ret;
- pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n");
gpiochip_remove(&gpio->chip);
clk_disable_unprepare(gpio->clk);
device_set_wakeup_capable(&pdev->dev, 0);
@@ -1020,22 +1033,7 @@ static struct platform_driver zynq_gpio_driver = {
.remove = zynq_gpio_remove,
};
-/**
- * zynq_gpio_init - Initial driver registration call
- *
- * Return: value from platform_driver_register
- */
-static int __init zynq_gpio_init(void)
-{
- return platform_driver_register(&zynq_gpio_driver);
-}
-postcore_initcall(zynq_gpio_init);
-
-static void __exit zynq_gpio_exit(void)
-{
- platform_driver_unregister(&zynq_gpio_driver);
-}
-module_exit(zynq_gpio_exit);
+module_platform_driver(zynq_gpio_driver);
MODULE_AUTHOR("Xilinx Inc.");
MODULE_DESCRIPTION("Zynq GPIO driver");
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index ae49bb23c6ed..4098bc7f88b7 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -66,9 +66,8 @@ static ssize_t direction_show(struct device *dev,
mutex_lock(&data->mutex);
gpiod_get_direction(desc);
- status = sprintf(buf, "%s\n",
- test_bit(FLAG_IS_OUT, &desc->flags)
- ? "out" : "in");
+ status = sysfs_emit(buf, "%s\n",
+ test_bit(FLAG_IS_OUT, &desc->flags) ? "out" : "in");
mutex_unlock(&data->mutex);
@@ -109,13 +108,9 @@ static ssize_t value_show(struct device *dev,
mutex_lock(&data->mutex);
status = gpiod_get_value_cansleep(desc);
- if (status < 0)
- goto err;
+ if (status >= 0)
+ status = sysfs_emit(buf, "%zd\n", status);
- buf[0] = '0' + status;
- buf[1] = '\n';
- status = 2;
-err:
mutex_unlock(&data->mutex);
return status;
@@ -249,11 +244,11 @@ static ssize_t edge_show(struct device *dev,
mutex_lock(&data->mutex);
for (i = 0; i < ARRAY_SIZE(trigger_types); i++) {
- if (data->irq_flags == trigger_types[i].flags) {
- status = sprintf(buf, "%s\n", trigger_types[i].name);
+ if (data->irq_flags == trigger_types[i].flags)
break;
- }
}
+ if (i < ARRAY_SIZE(trigger_types))
+ status = sysfs_emit(buf, "%s\n", trigger_types[i].name);
mutex_unlock(&data->mutex);
@@ -312,10 +307,7 @@ static int gpio_sysfs_set_active_low(struct device *dev, int value)
if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
return 0;
- if (value)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- else
- clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ assign_bit(FLAG_ACTIVE_LOW, &desc->flags, value);
/* reconfigure poll(2) support if enabled on one edge only */
if (flags == GPIO_IRQF_TRIGGER_FALLING ||
@@ -336,8 +328,8 @@ static ssize_t active_low_show(struct device *dev,
mutex_lock(&data->mutex);
- status = sprintf(buf, "%d\n",
- !!test_bit(FLAG_ACTIVE_LOW, &desc->flags));
+ status = sysfs_emit(buf, "%d\n",
+ !!test_bit(FLAG_ACTIVE_LOW, &desc->flags));
mutex_unlock(&data->mutex);
@@ -415,7 +407,7 @@ static ssize_t base_show(struct device *dev,
{
const struct gpio_chip *chip = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", chip->base);
+ return sysfs_emit(buf, "%d\n", chip->base);
}
static DEVICE_ATTR_RO(base);
@@ -424,7 +416,7 @@ static ssize_t label_show(struct device *dev,
{
const struct gpio_chip *chip = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", chip->label ? : "");
+ return sysfs_emit(buf, "%s\n", chip->label ?: "");
}
static DEVICE_ATTR_RO(label);
@@ -433,7 +425,7 @@ static ssize_t ngpio_show(struct device *dev,
{
const struct gpio_chip *chip = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", chip->ngpio);
+ return sysfs_emit(buf, "%u\n", chip->ngpio);
}
static DEVICE_ATTR_RO(ngpio);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 1427c1be749b..27c07108496d 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2004,9 +2004,6 @@ const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_desc *desc;
- if (offset >= gc->ngpio)
- return NULL;
-
desc = gpiochip_get_desc(gc, offset);
if (IS_ERR(desc))
return NULL;
@@ -2543,21 +2540,28 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
while (i < array_size) {
struct gpio_chip *gc = desc_array[i]->gdev->chip;
- unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
+ DECLARE_BITMAP(fastpath_mask, FASTPATH_NGPIO);
+ DECLARE_BITMAP(fastpath_bits, FASTPATH_NGPIO);
unsigned long *mask, *bits;
int first, j;
if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
- mask = fastpath;
+ mask = fastpath_mask;
+ bits = fastpath_bits;
} else {
- mask = kmalloc_array(2 * BITS_TO_LONGS(gc->ngpio),
- sizeof(*mask),
- can_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ gfp_t flags = can_sleep ? GFP_KERNEL : GFP_ATOMIC;
+
+ mask = bitmap_alloc(gc->ngpio, flags);
if (!mask)
return -ENOMEM;
+
+ bits = bitmap_alloc(gc->ngpio, flags);
+ if (!bits) {
+ bitmap_free(mask);
+ return -ENOMEM;
+ }
}
- bits = mask + BITS_TO_LONGS(gc->ngpio);
bitmap_zero(mask, gc->ngpio);
if (!can_sleep)
@@ -2580,8 +2584,10 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
ret = gpio_chip_get_multiple(gc, mask, bits);
if (ret) {
- if (mask != fastpath)
- kfree(mask);
+ if (mask != fastpath_mask)
+ bitmap_free(mask);
+ if (bits != fastpath_bits)
+ bitmap_free(bits);
return ret;
}
@@ -2601,8 +2607,10 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
j);
}
- if (mask != fastpath)
- kfree(mask);
+ if (mask != fastpath_mask)
+ bitmap_free(mask);
+ if (bits != fastpath_bits)
+ bitmap_free(bits);
}
return 0;
}
@@ -2826,21 +2834,28 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
while (i < array_size) {
struct gpio_chip *gc = desc_array[i]->gdev->chip;
- unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
+ DECLARE_BITMAP(fastpath_mask, FASTPATH_NGPIO);
+ DECLARE_BITMAP(fastpath_bits, FASTPATH_NGPIO);
unsigned long *mask, *bits;
int count = 0;
if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
- mask = fastpath;
+ mask = fastpath_mask;
+ bits = fastpath_bits;
} else {
- mask = kmalloc_array(2 * BITS_TO_LONGS(gc->ngpio),
- sizeof(*mask),
- can_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ gfp_t flags = can_sleep ? GFP_KERNEL : GFP_ATOMIC;
+
+ mask = bitmap_alloc(gc->ngpio, flags);
if (!mask)
return -ENOMEM;
+
+ bits = bitmap_alloc(gc->ngpio, flags);
+ if (!bits) {
+ bitmap_free(mask);
+ return -ENOMEM;
+ }
}
- bits = mask + BITS_TO_LONGS(gc->ngpio);
bitmap_zero(mask, gc->ngpio);
if (!can_sleep)
@@ -2885,8 +2900,10 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
if (count != 0)
gpio_chip_set_multiple(gc, mask, bits);
- if (mask != fastpath)
- kfree(mask);
+ if (mask != fastpath_mask)
+ bitmap_free(mask);
+ if (bits != fastpath_bits)
+ bitmap_free(bits);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c0316eaba547..8ac6eb9f1fdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -619,6 +619,13 @@ struct amdgpu_video_codec_info {
u32 max_level;
};
+#define codec_info_build(type, width, height, level) \
+ .codec_type = type,\
+ .max_width = width,\
+ .max_height = height,\
+ .max_pixels_per_frame = height * width,\
+ .max_level = level,
+
struct amdgpu_video_codecs {
const u32 codec_count;
const struct amdgpu_video_codec_info *codec_array;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 84a1b4bc9bb4..4137e848f6a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/power_supply.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <acpi/video.h>
#include <acpi/actbl.h>
@@ -1039,10 +1040,10 @@ void amdgpu_acpi_detect(void)
*/
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
{
-#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
+#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_PM_SLEEP)
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
if (adev->flags & AMD_IS_APU)
- return true;
+ return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
}
#endif
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index db16b3e83694..cf62f43a03da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -269,7 +269,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed);
+ struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 3b8e1ee8c475..4fb15750b9bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1057,8 +1057,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
static int update_gpuvm_pte(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
- struct amdgpu_sync *sync,
- bool *table_freed)
+ struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_device *adev = entry->adev;
@@ -1069,7 +1068,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret;
/* Update the page tables */
- ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
if (ret) {
pr_err("amdgpu_vm_bo_update failed\n");
return ret;
@@ -1081,8 +1080,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
static int map_bo_to_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync,
- bool no_update_pte,
- bool *table_freed)
+ bool no_update_pte)
{
int ret;
@@ -1099,7 +1097,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
if (no_update_pte)
return 0;
- ret = update_gpuvm_pte(mem, entry, sync, table_freed);
+ ret = update_gpuvm_pte(mem, entry, sync);
if (ret) {
pr_err("update_gpuvm_pte() failed\n");
goto update_gpuvm_pte_failed;
@@ -1393,8 +1391,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
@@ -1597,8 +1594,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
}
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem,
- void *drm_priv, bool *table_freed)
+ struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
@@ -1686,7 +1682,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
entry->va, entry->va + bo_size, entry);
ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
- is_invalid_userptr, table_freed);
+ is_invalid_userptr);
if (ret) {
pr_err("Failed to map bo to gpuvm\n");
goto out_unreserve;
@@ -2136,7 +2132,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
- ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
+ ret = update_gpuvm_pte(mem, attachment, &sync);
if (ret) {
pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */
@@ -2342,7 +2338,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
- ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
+ ret = update_gpuvm_pte(mem, attachment, &sync_obj);
if (ret) {
pr_debug("Memory eviction: update PTE failed. Try again\n");
goto validate_map_fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 3b5d13189073..8f53837d4d3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -468,6 +468,46 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
}
+/*
+ * Helper function to query RAS EEPROM address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return true if vbios supports ras rom address reporting
+ */
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union firmware_info *firmware_info;
+ u8 frev, crev;
+
+ if (i2c_address == NULL)
+ return false;
+
+ *i2c_address = 0;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
+ index, &size, &frev, &crev, &data_offset)) {
+ /* support firmware_info 3.4 + */
+ if ((frev == 3 && crev >=4) || (frev > 3)) {
+ firmware_info = (union firmware_info *)
+ (mode_info->atom_context->bios + data_offset);
+ *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+ }
+ }
+
+ if (*i2c_address != 0)
+ return true;
+
+ return false;
+}
+
+
union smu_info {
struct atom_smu_info_v3_1 v31;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 1bbbb195015d..751248b253de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -36,6 +36,7 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address);
bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 76fe5b71e35d..30fa1f61e0e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -781,7 +781,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
if (r)
return r;
@@ -792,7 +792,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
@@ -811,7 +811,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (bo_va == NULL)
continue;
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 130a9adf09ef..f3fd5ec710b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1369,6 +1369,38 @@ def_value:
adev->pm.smu_prv_buffer_size = 0;
}
+static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
+{
+ if (!(adev->flags & AMD_IS_APU) ||
+ adev->asic_type < CHIP_RAVEN)
+ return 0;
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ if (adev->pdev->device == 0x15dd)
+ adev->apu_flags |= AMD_APU_IS_RAVEN;
+ if (adev->pdev->device == 0x15d8)
+ adev->apu_flags |= AMD_APU_IS_PICASSO;
+ break;
+ case CHIP_RENOIR:
+ if ((adev->pdev->device == 0x1636) ||
+ (adev->pdev->device == 0x164c))
+ adev->apu_flags |= AMD_APU_IS_RENOIR;
+ else
+ adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
+ break;
+ case CHIP_VANGOGH:
+ adev->apu_flags |= AMD_APU_IS_VANGOGH;
+ break;
+ case CHIP_YELLOW_CARP:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_device_check_arguments - validate module params
*
@@ -3386,6 +3418,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
+ r = amdgpu_device_init_apu_flags(adev);
+ if (r)
+ return r;
+
r = amdgpu_device_check_arguments(adev);
if (r)
return r;
@@ -3468,13 +3504,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- goto failed_unmap;
+ return r;
}
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
- goto failed_unmap;
+ return r;
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -3700,10 +3736,6 @@ release_ras_con:
failed:
amdgpu_vf_error_trans_all(adev);
-failed_unmap:
- iounmap(adev->rmmio);
- adev->rmmio = NULL;
-
return r;
}
@@ -4304,6 +4336,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
case CHIP_ALDEBARAN:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 43e7b61d1c5c..ada7bc19118a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -299,6 +299,9 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
ip->major, ip->minor,
ip->revision);
+ if (le16_to_cpu(ip->hw_id) == VCN_HWID)
+ adev->vcn.num_vcn_inst++;
+
for (k = 0; k < num_base_address; k++) {
/*
* convert the endianness of base addresses in place,
@@ -385,7 +388,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
struct harvest_table *harvest_info;
- int i;
+ int i, vcn_harvest_count = 0;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
@@ -397,8 +400,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
case VCN_HWID:
- adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
- adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ vcn_harvest_count++;
break;
case DMU_HWID:
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
@@ -407,6 +409,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
break;
}
}
+ if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ }
}
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 6f30c525caac..971c5b8e75dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -160,6 +160,7 @@ int amdgpu_smu_pptable_id = -1;
* highest. That helps saving some idle power.
* DISABLE_FRACTIONAL_PWM (bit 2) disabled by default
* PSR (bit 3) disabled by default
+ * EDP NO POWER SEQUENCING (bit 4) disabled by default
*/
uint amdgpu_dc_feature_mask = 2;
uint amdgpu_dc_debug_mask;
@@ -1167,6 +1168,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
+ {0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
@@ -1188,6 +1190,10 @@ static const struct pci_device_id pciidlist[] = {
/* Van Gogh */
{0x1002, 0x163F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VANGOGH|AMD_IS_APU},
+ /* Yellow Carp */
+ {0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
+ {0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
+
/* Navy_Flounder */
{0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
@@ -1198,6 +1204,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
/* Aldebaran */
@@ -1206,6 +1213,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
{0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
+ /* BEIGE_GOBY */
+ {0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ {0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ {0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+
{0, 0, 0}
};
@@ -1557,6 +1571,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ } else if (amdgpu_device_supports_boco(drm_dev)) {
+ /* nothing to do */
} else if (amdgpu_device_supports_baco(drm_dev)) {
amdgpu_device_baco_enter(drm_dev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index b3404c43a911..854fc497844b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -255,6 +255,15 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
+ /* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
+ * for debugger access to invisible VRAM. Should have used MAP_SHARED
+ * instead. Clearing VM_MAYWRITE prevents the mapping from ever
+ * becoming writable and makes is_cow_mapping(vm_flags) false.
+ */
+ if (is_cow_mapping(vma->vm_flags) &&
+ !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+ vma->vm_flags &= ~VM_MAYWRITE;
+
return drm_gem_ttm_mmap(obj, vma);
}
@@ -612,7 +621,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) {
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
goto error;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 0174f7817ce2..d0b8d415b63b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -562,6 +562,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_VANGOGH:
+ case CHIP_YELLOW_CARP:
/* Don't enable it by default yet.
*/
if (amdgpu_tmz < 1) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 32ce0e679dc7..83af307e97cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -278,6 +278,21 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
return true;
}
+static void amdgpu_restore_msix(struct amdgpu_device *adev)
+{
+ u16 ctrl;
+
+ pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
+ return;
+
+ /* VF FLR */
+ ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+ ctrl |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+}
+
/**
* amdgpu_irq_init - initialize interrupt handling
*
@@ -569,6 +584,9 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{
int i, j, k;
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_restore_msix(adev);
+
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index d6c54c7f7679..4b153daf283d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -160,7 +160,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
struct mm_struct *mm, struct page **pages,
uint64_t start, uint64_t npages,
struct hmm_range **phmm_range, bool readonly,
- bool mmap_locked)
+ bool mmap_locked, void *owner)
{
struct hmm_range *hmm_range;
unsigned long timeout;
@@ -185,6 +185,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
hmm_range->hmm_pfns = pfns;
hmm_range->start = start;
hmm_range->end = start + npages * PAGE_SIZE;
+ hmm_range->dev_private_owner = owner;
/* Assuming 512MB takes maxmium 1 second to fault page address */
timeout = max(npages >> 17, 1ULL) * HMM_RANGE_DEFAULT_TIMEOUT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
index 7f7d37a457c3..14a3c1864085 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
@@ -34,7 +34,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
struct mm_struct *mm, struct page **pages,
uint64_t start, uint64_t npages,
struct hmm_range **phmm_range, bool readonly,
- bool mmap_locked);
+ bool mmap_locked, void *owner);
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
#if defined(CONFIG_HMM_MIRROR)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 25ee53545837..45295dce5c3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -93,6 +93,8 @@ struct amdgpu_nbio_funcs {
void (*enable_aspm)(struct amdgpu_device *adev,
bool enable);
void (*program_aspm)(struct amdgpu_device *adev);
+ void (*apply_lc_spc_mode_wa)(struct amdgpu_device *adev);
+ void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev);
};
struct amdgpu_nbio {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index c13b02caf8c3..fc66aca28594 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -809,7 +809,7 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
/* query/inject/cure begin */
int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
- struct ras_query_if *info)
+ struct ras_query_if *info)
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
struct ras_err_data err_data = {0, 0, 0, NULL};
@@ -1043,17 +1043,32 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
return ret;
}
-/* get the total error counts on all IPs */
-void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
- unsigned long *ce_count,
- unsigned long *ue_count)
+/**
+ * amdgpu_ras_query_error_count -- Get error counts of all IPs
+ * adev: pointer to AMD GPU device
+ * ce_count: pointer to an integer to be set to the count of correctible errors.
+ * ue_count: pointer to an integer to be set to the count of uncorrectible
+ * errors.
+ *
+ * If set, @ce_count or @ue_count, count and return the corresponding
+ * error counts in those integer pointers. Return 0 if the device
+ * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
+ */
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
unsigned long ce, ue;
if (!adev->ras_enabled || !con)
- return;
+ return -EOPNOTSUPP;
+
+ /* Don't count since no reporting.
+ */
+ if (!ce_count && !ue_count)
+ return 0;
ce = 0;
ue = 0;
@@ -1061,9 +1076,11 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
struct ras_query_if info = {
.head = obj->head,
};
+ int res;
- if (amdgpu_ras_query_error_status(adev, &info))
- return;
+ res = amdgpu_ras_query_error_status(adev, &info);
+ if (res)
+ return res;
ce += info.ce_count;
ue += info.ue_count;
@@ -1074,6 +1091,8 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
if (ue_count)
*ue_count = ue;
+
+ return 0;
}
/* query/inject/cure end */
@@ -2137,9 +2156,10 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
/* Cache new values.
*/
- amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
- atomic_set(&con->ras_ce_count, ce_count);
- atomic_set(&con->ras_ue_count, ue_count);
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+ atomic_set(&con->ras_ce_count, ce_count);
+ atomic_set(&con->ras_ue_count, ue_count);
+ }
pm_runtime_mark_last_busy(dev->dev);
Out:
@@ -2312,9 +2332,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
/* Those are the cached values at init.
*/
- amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
- atomic_set(&con->ras_ce_count, ce_count);
- atomic_set(&con->ras_ue_count, ue_count);
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+ atomic_set(&con->ras_ce_count, ce_count);
+ atomic_set(&con->ras_ue_count, ue_count);
+ }
return 0;
cleanup:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 256cea5d34f2..b504ed8c9b50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -490,9 +490,9 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev);
-void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
- unsigned long *ce_count,
- unsigned long *ue_count);
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count);
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index f40c871da0c6..38222de921d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -26,6 +26,7 @@
#include "amdgpu_ras.h"
#include <linux/bits.h>
#include "atom.h"
+#include "amdgpu_atomfirmware.h"
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
@@ -96,6 +97,9 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
if (!i2c_addr)
return false;
+ if (amdgpu_atomfirmware_ras_rom_addr(adev, (uint8_t*)i2c_addr))
+ return true;
+
switch (adev->asic_type) {
case CHIP_VEGA20:
*i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 59e0fefb15aa..acfa207cf970 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -54,11 +54,12 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
{
struct drm_mm_node *node;
- if (!res) {
+ if (!res || res->mem_type == TTM_PL_SYSTEM) {
cur->start = start;
cur->size = size;
cur->remaining = size;
cur->node = NULL;
+ WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 0527772fe1b8..d855cb53c7e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -176,10 +176,10 @@ TRACE_EVENT(amdgpu_cs_ioctl,
TP_fast_assign(
__entry->sched_job_id = job->base.id;
- __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job));
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name);
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -201,10 +201,10 @@ TRACE_EVENT(amdgpu_sched_run_job,
TP_fast_assign(
__entry->sched_job_id = job->base.id;
- __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job));
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name);
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -229,7 +229,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_fast_assign(
__entry->pasid = vm->pasid;
- __assign_str(ring, ring->name)
+ __assign_str(ring, ring->name);
__entry->vmid = job->vmid;
__entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr;
@@ -424,7 +424,7 @@ TRACE_EVENT(amdgpu_vm_flush,
),
TP_fast_assign(
- __assign_str(ring, ring->name)
+ __assign_str(ring, ring->name);
__entry->vmid = vmid;
__entry->vm_hub = ring->funcs->vmhub;
__entry->pd_addr = pd_addr;
@@ -525,7 +525,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
),
TP_fast_assign(
- __assign_str(ring, sched_job->base.sched->name)
+ __assign_str(ring, sched_job->base.sched->name);
__entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index cdfc20b0b2eb..3a55f08e00e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -590,10 +590,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
- if (adev->gmc.xgmi.connected_to_cpu)
- mem->bus.caching = ttm_cached;
- else
- mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
@@ -695,7 +691,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
ttm->num_pages, &gtt->range, readonly,
- true);
+ true, NULL);
out_unlock:
mmap_read_unlock(mm);
mmput(mm);
@@ -923,7 +919,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
bo_mem->mem_type == AMDGPU_PL_OA)
return -EINVAL;
- if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
+ if (bo_mem->mem_type != TTM_PL_TT ||
+ !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
gtt->offset = AMDGPU_BO_INVALID_OFFSET;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 79cfa2d68487..078c068937fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1758,7 +1758,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
r = vm->update_funcs->commit(&params, fence);
if (table_freed)
- *table_freed = *table_freed || params.table_freed;
+ *table_freed = params.table_freed;
error_unlock:
amdgpu_vm_eviction_unlock(vm);
@@ -1816,7 +1816,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* @adev: amdgpu_device pointer
* @bo_va: requested BO and VM object
* @clear: if true clear the entries
- * @table_freed: return true if page table is freed
*
* Fill in the page table entries for @bo_va.
*
@@ -1824,7 +1823,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* 0 for success, -EINVAL for failure.
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
- bool clear, bool *table_freed)
+ bool clear)
{
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
@@ -1903,7 +1902,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
resv, mapping->start,
mapping->last, update_flags,
mapping->offset, mem,
- pages_addr, last_update, table_freed);
+ pages_addr, last_update, NULL);
if (r)
return r;
}
@@ -2155,7 +2154,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
/* Per VM BOs never need to bo cleared in the page tables */
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
}
@@ -2174,7 +2173,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
else
clear = true;
- r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index ddb85a85cbba..f8fa653d4da7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -406,7 +406,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct dma_fence **fence, bool *free_table);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
- bool clear, bool *table_freed);
+ bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 436ec246a7da..2fd77c36a1ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -463,6 +463,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (i == 1)
node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
+ if (adev->gmc.xgmi.connected_to_cpu)
+ node->base.bus.caching = ttm_cached;
+ else
+ node->base.bus.caching = ttm_write_combined;
+
atomic64_add(vis_usage, &mgr->vis_usage);
*res = &node->base;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index 5b90efd6f6d0..3ac505d954c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -36,9 +36,12 @@ athub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
{
uint32_t def, data;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
+ return;
+
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
+ if (enable)
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
else
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
@@ -53,10 +56,13 @@ athub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
{
uint32_t def, data;
+ if (!((adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
+ (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)))
+ return;
+
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
- (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ if (enable)
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
else
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 33324427b555..7e0d8c092c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -766,7 +766,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1;
+ adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 2d56b60bc058..a64b2c706090 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3300,6 +3300,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
};
@@ -3379,6 +3380,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1_Vangogh, 0xffffffff, 0x00070103),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQG_CONFIG, 0x000017ff, 0x00001000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00400000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
@@ -3445,6 +3447,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x01030000, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x03a00000, 0x00a00000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020)
@@ -5086,47 +5089,44 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
4 + /* RMI */
1); /* SQG */
- if (adev->asic_type == CHIP_NAVI10 ||
- adev->asic_type == CHIP_NAVI14 ||
- adev->asic_type == CHIP_NAVI12) {
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
- wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
- /*
- * Set corresponding TCP bits for the inactive WGPs in
- * GCRD_SA_TARGETS_DISABLE
- */
- gcrd_targets_disable_tcp = 0;
- /* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */
- utcl_invreq_disable = 0;
-
- for (k = 0; k < max_wgp_per_sh; k++) {
- if (!(wgp_active_bitmap & (1 << k))) {
- gcrd_targets_disable_tcp |= 3 << (2 * k);
- utcl_invreq_disable |= (3 << (2 * k)) |
- (3 << (2 * (max_wgp_per_sh + k)));
- }
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
+ wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
+ /*
+ * Set corresponding TCP bits for the inactive WGPs in
+ * GCRD_SA_TARGETS_DISABLE
+ */
+ gcrd_targets_disable_tcp = 0;
+ /* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */
+ utcl_invreq_disable = 0;
+
+ for (k = 0; k < max_wgp_per_sh; k++) {
+ if (!(wgp_active_bitmap & (1 << k))) {
+ gcrd_targets_disable_tcp |= 3 << (2 * k);
+ gcrd_targets_disable_tcp |= 1 << (k + (max_wgp_per_sh * 2));
+ utcl_invreq_disable |= (3 << (2 * k)) |
+ (3 << (2 * (max_wgp_per_sh + k)));
}
-
- tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE);
- /* only override TCP & SQC bits */
- tmp &= 0xffffffff << (4 * max_wgp_per_sh);
- tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask);
- WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp);
-
- tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE);
- /* only override TCP bits */
- tmp &= 0xffffffff << (2 * max_wgp_per_sh);
- tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask);
- WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp);
}
- }
- gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
+ tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE);
+ /* only override TCP & SQC bits */
+ tmp &= (0xffffffffU << (4 * max_wgp_per_sh));
+ tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask);
+ WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp);
+
+ tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE);
+ /* only override TCP & SQC bits */
+ tmp &= (0xffffffffU << (3 * max_wgp_per_sh));
+ tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask);
+ WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp);
+ }
}
+
+ gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
}
static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
@@ -7404,7 +7404,10 @@ static int gfx_v10_0_hw_init(void *handle)
* init golden registers and rlc resume may override some registers,
* reconfig them here
*/
- gfx_v10_0_tcp_harvest(adev);
+ if (adev->asic_type == CHIP_NAVI10 ||
+ adev->asic_type == CHIP_NAVI14 ||
+ adev->asic_type == CHIP_NAVI12)
+ gfx_v10_0_tcp_harvest(adev);
r = gfx_v10_0_cp_resume(adev);
if (r)
@@ -7777,6 +7780,9 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
{
uint32_t data, def;
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
+ return;
+
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 0 - Disable some blocks' MGCG */
@@ -7791,6 +7797,7 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK);
if (def != data)
@@ -7813,13 +7820,15 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
- } else {
+ } else if (!enable || !(adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
- RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK);
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
@@ -7845,22 +7854,34 @@ static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev,
{
uint32_t data, def;
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)))
+ return;
+
/* Enable 3D CGCG/CGLS */
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ if (enable) {
/* write cmd to clear cgcg/cgls ov */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+
/* unset CGCG override */
- data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
+
/* update CGCG and CGLS override bits */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
+
/* enable 3Dcgcg FSM(0x0000363f) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
- data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
- RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+ data = 0;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
+ data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
+
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
@@ -7873,9 +7894,14 @@ static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev,
} else {
/* Disable CGCG/CGLS */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
+
/* disable cgcg, cgls should be disabled */
- data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
- RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
+ data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
+ data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
+
/* disable cgcg and cgls in FSM */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
@@ -7887,25 +7913,35 @@ static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
{
uint32_t def, data;
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)))
+ return;
+
+ if (enable) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+
/* unset CGCG override */
- data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
+
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
- else
- data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
+
/* update CGCG and CGLS override bits */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
/* enable cgcg FSM(0x0000363F) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
- data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
- RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
+ data = 0;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
+ data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
+
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
+
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
@@ -7917,8 +7953,14 @@ static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
} else {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
+
/* reset CGCG/CGLS bits */
- data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
+ data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
+ data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
+
/* disable cgcg and cgls in FSM */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
@@ -7930,7 +7972,10 @@ static void gfx_v10_0_update_fine_grain_clock_gating(struct amdgpu_device *adev,
{
uint32_t def, data;
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) {
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
+ return;
+
+ if (enable) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset FGCG override */
data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
@@ -7961,6 +8006,97 @@ static void gfx_v10_0_update_fine_grain_clock_gating(struct amdgpu_device *adev,
}
}
+static void gfx_v10_0_apply_medium_grain_clock_gating_workaround(struct amdgpu_device *adev)
+{
+ uint32_t reg_data = 0;
+ uint32_t reg_idx = 0;
+ uint32_t i;
+
+ const uint32_t tcp_ctrl_regs[] = {
+ mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP00_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP01_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP01_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP02_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP02_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP10_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP10_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP11_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP11_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP12_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP12_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP00_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP00_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP01_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP01_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP02_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP02_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP10_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP10_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP11_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP11_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP12_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP12_CU1_TCP_CTRL_REG
+ };
+
+ const uint32_t tcp_ctrl_regs_nv12[] = {
+ mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP00_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP01_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP01_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP02_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP02_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP10_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP10_CU1_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP11_CU0_TCP_CTRL_REG,
+ mmCGTS_SA0_WGP11_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP00_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP00_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP01_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP01_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP02_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP02_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP10_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP10_CU1_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP11_CU0_TCP_CTRL_REG,
+ mmCGTS_SA1_WGP11_CU1_TCP_CTRL_REG,
+ };
+
+ const uint32_t sm_ctlr_regs[] = {
+ mmCGTS_SA0_QUAD0_SM_CTRL_REG,
+ mmCGTS_SA0_QUAD1_SM_CTRL_REG,
+ mmCGTS_SA1_QUAD0_SM_CTRL_REG,
+ mmCGTS_SA1_QUAD1_SM_CTRL_REG
+ };
+
+ if (adev->asic_type == CHIP_NAVI12) {
+ for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs_nv12); i++) {
+ reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] +
+ tcp_ctrl_regs_nv12[i];
+ reg_data = RREG32(reg_idx);
+ reg_data |= CGTS_SA0_WGP00_CU0_TCP_CTRL_REG__TCPI_LS_OVERRIDE_MASK;
+ WREG32(reg_idx, reg_data);
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs); i++) {
+ reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] +
+ tcp_ctrl_regs[i];
+ reg_data = RREG32(reg_idx);
+ reg_data |= CGTS_SA0_WGP00_CU0_TCP_CTRL_REG__TCPI_LS_OVERRIDE_MASK;
+ WREG32(reg_idx, reg_data);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sm_ctlr_regs); i++) {
+ reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_QUAD0_SM_CTRL_REG_BASE_IDX] +
+ sm_ctlr_regs[i];
+ reg_data = RREG32(reg_idx);
+ reg_data &= ~CGTS_SA0_QUAD0_SM_CTRL_REG__SM_MODE_MASK;
+ reg_data |= 2 << CGTS_SA0_QUAD0_SM_CTRL_REG__SM_MODE__SHIFT;
+ WREG32(reg_idx, reg_data);
+ }
+}
+
static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -7977,6 +8113,10 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
gfx_v10_0_update_3d_clock_gating(adev, enable);
/* === CGCG + CGLS === */
gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
+
+ if ((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12))
+ gfx_v10_0_apply_medium_grain_clock_gating_workaround(adev);
} else {
/* CGCG/CGLS should be disabled before MGCG/MGLS
* === CGCG + CGLS ===
@@ -9324,17 +9464,22 @@ static void gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *
static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
{
- u32 data, wgp_bitmask;
- data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
- data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
+ u32 disabled_mask =
+ ~amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
+ u32 efuse_setting = 0;
+ u32 vbios_setting = 0;
+
+ efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
+ efuse_setting &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
+ efuse_setting >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
- data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
- data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
+ vbios_setting = RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
+ vbios_setting &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
+ vbios_setting >>= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
- wgp_bitmask =
- amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
+ disabled_mask |= efuse_setting | vbios_setting;
- return (~data) & wgp_bitmask;
+ return (~disabled_mask);
}
static u32 gfx_v10_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 044076ec1d03..6a23c6826e12 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1295,6 +1295,16 @@ static bool is_raven_kicker(struct amdgpu_device *adev)
return false;
}
+static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
+{
+ if ((adev->asic_type == CHIP_RENOIR) &&
+ (adev->gfx.me_fw_version >= 0x000000a5) &&
+ (adev->gfx.me_feature_version >= 52))
+ return true;
+ else
+ return false;
+}
+
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{
if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
@@ -3675,7 +3685,16 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(adev->doorbell_index.kiq * 2) << 2);
- WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+ /* If GC has entered CGPG, ringing doorbell > first page
+ * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
+ * workaround this issue. And this change has to align with firmware
+ * update.
+ */
+ if (check_if_enlarge_doorbell_range(adev))
+ WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+ (adev->doorbell.size - 4));
+ else
+ WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
(adev->doorbell_index.userqueue_end * 2) << 2);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index 7a15e669b68d..5793977953cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -90,45 +90,56 @@ static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev,
RC_MEM_POWER_SD_EN, 0);
WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
- /* only one clock gating mode (LS/DS/SD) can be enabled */
- if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_LS_EN, enable);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_LS_EN, enable);
- } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_DS_EN, enable);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, enable);
- } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_SD_EN, enable);
- /* RC should not use shut down mode, fallback to ds */
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, enable);
- }
-
- /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
- * be set for SRAM LS/DS/SD */
- if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
- AMD_CG_SUPPORT_HDP_SD)) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_CTRL_EN, 1);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_CTRL_EN, 1);
+ /* Already disabled above. The actions below are for "enabled" only */
+ if (enable) {
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_LS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_DS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_SD_EN, 1);
+ /* RC should not use shut down mode, fallback to ds or ls if allowed */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS)
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 1);
+ else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 1);
+ }
+
+ /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
+ * be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+ }
}
- WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
-
- /* restore IPH & RC clock override after clock/power mode changing */
- WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
+ /* disable IPH & RC clock override after clock/power mode changing */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ IPH_MEM_CLK_SOFT_OVERRIDE, 0);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 0);
+ WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
}
static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index f7e93bbc4e15..7ded6b2f058e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -568,6 +568,9 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
{
uint32_t def, data, def1, data1;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
+ return;
+
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
@@ -582,7 +585,7 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
break;
}
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
+ if (enable) {
data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
@@ -627,6 +630,9 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
{
uint32_t def, data;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
+ return;
+
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
@@ -639,7 +645,7 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
break;
}
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
+ if (enable)
data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
else
data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 3ee481557fc9..ff2307d7ee0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -252,7 +252,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
- if (!down_read_trylock(&adev->reset_sem))
+ if (!down_write_trylock(&adev->reset_sem))
return;
amdgpu_virt_fini_data_exchange(adev);
@@ -268,7 +268,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
flr_done:
atomic_set(&adev->in_gpu_reset, 0);
- up_read(&adev->reset_sem);
+ up_write(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 48e588d3c409..9f7aac435d69 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -273,7 +273,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
- if (!down_read_trylock(&adev->reset_sem))
+ if (!down_write_trylock(&adev->reset_sem))
return;
amdgpu_virt_fini_data_exchange(adev);
@@ -289,7 +289,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
flr_done:
atomic_set(&adev->in_gpu_reset, 0);
- up_read(&adev->reset_sem);
+ up_write(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index 05ddec7ba7e2..7b79eeaa88aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -51,6 +51,8 @@
#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01d8
#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
@@ -218,8 +220,11 @@ static void nbio_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *ade
{
uint32_t def, data;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
+ return;
+
def = data = RREG32_PCIE(smnCPM_CONTROL);
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) {
+ if (enable) {
data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
@@ -244,8 +249,11 @@ static void nbio_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev
{
uint32_t def, data;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
+ return;
+
def = data = RREG32_PCIE(smnPCIE_CNTL2);
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
+ if (enable) {
data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
@@ -463,6 +471,43 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
}
+static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
+{
+ uint32_t reg_data = 0;
+ uint32_t link_width = 0;
+
+ if (!((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12)))
+ return;
+
+ reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL);
+ link_width = (reg_data & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+
+ /*
+ * Program PCIE_LC_CNTL6.LC_SPC_MODE_8GT to 0x2 (4 symbols per clock data)
+ * if link_width is 0x3 (x4)
+ */
+ if (0x3 == link_width) {
+ reg_data = RREG32_PCIE(smnPCIE_LC_CNTL6);
+ reg_data &= ~PCIE_LC_CNTL6__LC_SPC_MODE_8GT_MASK;
+ reg_data |= (0x2 << PCIE_LC_CNTL6__LC_SPC_MODE_8GT__SHIFT);
+ WREG32_PCIE(smnPCIE_LC_CNTL6, reg_data);
+ }
+}
+
+static void nbio_v2_3_apply_l1_link_width_reconfig_wa(struct amdgpu_device *adev)
+{
+ uint32_t reg_data = 0;
+
+ if (adev->asic_type != CHIP_NAVI10)
+ return;
+
+ reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL);
+ reg_data |= PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN_MASK;
+ WREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL, reg_data);
+}
+
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
@@ -484,4 +529,6 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
.enable_aspm = nbio_v2_3_enable_aspm,
.program_aspm = nbio_v2_3_program_aspm,
+ .apply_lc_spc_mode_wa = nbio_v2_3_apply_lc_spc_mode_wa,
+ .apply_l1_link_width_reconfig_wa = nbio_v2_3_apply_l1_link_width_reconfig_wa,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 455d0425787c..94d029dbf30d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -69,20 +69,8 @@ static const struct amd_ip_funcs nv_common_ip_funcs;
/* Navi */
static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
static const struct amdgpu_video_codecs nv_video_codecs_encode =
@@ -94,55 +82,13 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode =
/* Navi1x */
static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 3,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 5,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 52,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 4,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 186,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs nv_video_codecs_decode =
@@ -154,62 +100,14 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
/* Sienna Cichlid */
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 3,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 5,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 52,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 4,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 186,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs sc_video_codecs_decode =
@@ -221,80 +119,20 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode =
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 3,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 5,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 52,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 4,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 186,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 8192 * 4352,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
@@ -309,6 +147,36 @@ static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
.codec_array = sriov_sc_video_codecs_decode_array,
};
+/* Beige Goby*/
+static const struct amdgpu_video_codec_info bg_video_codecs_decode_array[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+};
+
+static const struct amdgpu_video_codecs bg_video_codecs_decode = {
+ .codec_count = ARRAY_SIZE(bg_video_codecs_decode_array),
+ .codec_array = bg_video_codecs_decode_array,
+};
+
+static const struct amdgpu_video_codecs bg_video_codecs_encode = {
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
+/* Yellow Carp*/
+static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+};
+
+static const struct amdgpu_video_codecs yc_video_codecs_decode = {
+ .codec_count = ARRAY_SIZE(yc_video_codecs_decode_array),
+ .codec_array = yc_video_codecs_decode_array,
+};
+
static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
@@ -329,12 +197,23 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
if (encode)
*codecs = &nv_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode;
return 0;
+ case CHIP_YELLOW_CARP:
+ if (encode)
+ *codecs = &nv_video_codecs_encode;
+ else
+ *codecs = &yc_video_codecs_decode;
+ return 0;
+ case CHIP_BEIGE_GOBY:
+ if (encode)
+ *codecs = &bg_video_codecs_encode;
+ else
+ *codecs = &bg_video_codecs_decode;
+ return 0;
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
@@ -1275,7 +1154,6 @@ static int nv_common_early_init(void *handle)
break;
case CHIP_VANGOGH:
- adev->apu_flags |= AMD_APU_IS_VANGOGH;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
@@ -1358,7 +1236,10 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
- adev->external_rev_id = adev->rev_id + 0x01;
+ if (adev->pdev->device == 0x1681)
+ adev->external_rev_id = adev->rev_id + 0x19;
+ else
+ adev->external_rev_id = adev->rev_id + 0x01;
break;
default:
/* FIXME: not supported yet */
@@ -1411,6 +1292,12 @@ static int nv_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->nbio.funcs->apply_lc_spc_mode_wa)
+ adev->nbio.funcs->apply_lc_spc_mode_wa(adev);
+
+ if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa)
+ adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev);
+
/* enable pcie gen2/3 link */
nv_pcie_gen3_enable(adev);
/* enable aspm */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 618e5b6b85d9..536d41f327c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -67,7 +67,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
err = psp_init_asd_microcode(psp, chip_name);
if (err)
- goto out;
+ return err;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
@@ -80,7 +80,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
} else {
err = amdgpu_ucode_validate(adev->psp.ta_fw);
if (err)
- goto out2;
+ goto out;
ta_hdr = (const struct ta_firmware_header_v1_0 *)
adev->psp.ta_fw->data;
@@ -105,10 +105,9 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
return 0;
-out2:
+out:
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
-out:
if (err) {
dev_err(adev->dev,
"psp v12.0: Failed to load firmware \"%s\"\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index ae5464e2535a..8931000dcd41 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -144,7 +144,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
@@ -288,7 +288,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003fff07, 0x40000051),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
};
@@ -1896,8 +1896,11 @@ static int sdma_v4_0_late_init(void *handle)
sdma_v4_0_setup_ulv(adev);
- if (adev->sdma.funcs && adev->sdma.funcs->reset_ras_error_count)
- adev->sdma.funcs->reset_ras_error_count(adev);
+ if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
+ if (adev->sdma.funcs &&
+ adev->sdma.funcs->reset_ras_error_count)
+ adev->sdma.funcs->reset_ras_error_count(adev);
+ }
if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
return adev->sdma.funcs->ras_late_init(adev, &ih_info);
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c
index e9c474c217ec..b6f1322f908c 100644
--- a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c
@@ -43,9 +43,12 @@ static void smuio_v11_0_update_rom_clock_gating(struct amdgpu_device *adev, bool
if (adev->flags & AMD_IS_APU)
return;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
+ return;
+
def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
+ if (enable)
data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
else
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index de85577c9cfd..b7d350be8050 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -88,20 +88,8 @@
/* Vega, Raven, Arcturus */
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_encode =
@@ -113,48 +101,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
/* Vega */
static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 3,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 5,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 52,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 4,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 186,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_decode =
@@ -166,55 +118,13 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
/* Raven */
static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 3,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 5,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 52,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 4,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 186,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs rv_video_codecs_decode =
@@ -226,55 +136,13 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
/* Renoir, Arcturus */
static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 3,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 5,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 52,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 4,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 186,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
- .max_width = 8192,
- .max_height = 4352,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs rn_video_codecs_decode =
@@ -1360,10 +1228,7 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RAVEN:
adev->asic_funcs = &soc15_asic_funcs;
- if (adev->pdev->device == 0x15dd)
- adev->apu_flags |= AMD_APU_IS_RAVEN;
- if (adev->pdev->device == 0x15d8)
- adev->apu_flags |= AMD_APU_IS_PICASSO;
+
if (adev->rev_id >= 0x8)
adev->apu_flags |= AMD_APU_IS_RAVEN2;
@@ -1455,11 +1320,6 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
- if ((adev->pdev->device == 0x1636) ||
- (adev->pdev->device == 0x164c))
- adev->apu_flags |= AMD_APU_IS_RENOIR;
- else
- adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
if (adev->apu_flags & AMD_APU_IS_RENOIR)
adev->external_rev_id = adev->rev_id + 0x91;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 67541c30327a..e48acdd03c1a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1393,7 +1393,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
long err = 0;
int i;
uint32_t *devices_arr = NULL;
- bool table_freed = false;
dev = kfd_device_by_id(GET_GPU_ID(args->handle));
if (!dev)
@@ -1451,8 +1450,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
goto get_mem_obj_from_handle_failed;
}
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- peer->kgd, (struct kgd_mem *)mem,
- peer_pdd->drm_priv, &table_freed);
+ peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv);
if (err) {
pr_err("Failed to map to gpu %d/%d\n",
i, args->n_devices);
@@ -1470,17 +1468,16 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
}
/* Flush TLBs after waiting for the page table updates to complete */
- if (table_freed) {
- for (i = 0; i < args->n_devices; i++) {
- peer = kfd_device_by_id(devices_arr[i]);
- if (WARN_ON_ONCE(!peer))
- continue;
- peer_pdd = kfd_get_process_device_data(peer, p);
- if (WARN_ON_ONCE(!peer_pdd))
- continue;
- kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
- }
+ for (i = 0; i < args->n_devices; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (WARN_ON_ONCE(!peer))
+ continue;
+ peer_pdd = kfd_get_process_device_data(peer, p);
+ if (WARN_ON_ONCE(!peer_pdd))
+ continue;
+ kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
}
+
kfree(devices_arr);
return err;
@@ -1568,27 +1565,10 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
}
args->n_success = i+1;
}
- mutex_unlock(&p->mutex);
-
- err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
- if (err) {
- pr_debug("Sync memory failed, wait interrupted by user signal\n");
- goto sync_memory_failed;
- }
-
- /* Flush TLBs after waiting for the page table updates to complete */
- for (i = 0; i < args->n_devices; i++) {
- peer = kfd_device_by_id(devices_arr[i]);
- if (WARN_ON_ONCE(!peer))
- continue;
- peer_pdd = kfd_get_process_device_data(peer, p);
- if (WARN_ON_ONCE(!peer_pdd))
- continue;
- kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
- }
-
kfree(devices_arr);
+ mutex_unlock(&p->mutex);
+
return 0;
bind_process_to_device_failed:
@@ -1596,7 +1576,6 @@ get_mem_obj_from_handle_failed:
unmap_memory_from_gpu_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
-sync_memory_failed:
kfree(devices_arr);
return err;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 2660f03e63a7..dab290a4d19d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -218,7 +218,8 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
struct page *page;
page = pfn_to_page(pfn);
- page->zone_device_data = prange;
+ svm_range_bo_ref(prange->svm_bo);
+ page->zone_device_data = prange->svm_bo;
get_page(page);
lock_page(page);
}
@@ -293,15 +294,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
for (i = j = 0; i < npages; i++) {
struct page *spage;
- dst[i] = cursor.start + (j << PAGE_SHIFT);
- migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
- svm_migrate_get_vram_page(prange, migrate->dst[i]);
-
- migrate->dst[i] = migrate_pfn(migrate->dst[i]);
- migrate->dst[i] |= MIGRATE_PFN_LOCKED;
-
- if (migrate->src[i] & MIGRATE_PFN_VALID) {
- spage = migrate_pfn_to_page(migrate->src[i]);
+ spage = migrate_pfn_to_page(migrate->src[i]);
+ if (spage && !is_zone_device_page(spage)) {
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
+ migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+ svm_migrate_get_vram_page(prange, migrate->dst[i]);
+ migrate->dst[i] = migrate_pfn(migrate->dst[i]);
+ migrate->dst[i] |= MIGRATE_PFN_LOCKED;
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]);
@@ -355,6 +354,20 @@ out_free_vram_pages:
}
}
+#ifdef DEBUG_FORCE_MIXED_DOMAINS
+ for (i = 0, j = 0; i < npages; i += 4, j++) {
+ if (j & 1)
+ continue;
+ svm_migrate_put_vram_page(adev, dst[i]);
+ migrate->dst[i] = 0;
+ svm_migrate_put_vram_page(adev, dst[i + 1]);
+ migrate->dst[i + 1] = 0;
+ svm_migrate_put_vram_page(adev, dst[i + 2]);
+ migrate->dst[i + 2] = 0;
+ svm_migrate_put_vram_page(adev, dst[i + 3]);
+ migrate->dst[i + 3] = 0;
+ }
+#endif
out:
return r;
}
@@ -365,20 +378,20 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t end)
{
uint64_t npages = (end - start) >> PAGE_SHIFT;
+ struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate;
dma_addr_t *scratch;
size_t size;
void *buf;
int r = -ENOMEM;
- int retry = 0;
memset(&migrate, 0, sizeof(migrate));
migrate.vma = vma;
migrate.start = start;
migrate.end = end;
migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
- migrate.pgmap_owner = adev;
+ migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t);
size *= npages;
@@ -390,7 +403,6 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.dst = migrate.src + npages;
scratch = (dma_addr_t *)(migrate.dst + npages);
-retry:
r = migrate_vma_setup(&migrate);
if (r) {
pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
@@ -398,17 +410,9 @@ retry:
goto out_free;
}
if (migrate.cpages != npages) {
- pr_debug("collect 0x%lx/0x%llx pages, retry\n", migrate.cpages,
+ pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
+ migrate.cpages,
npages);
- migrate_vma_finalize(&migrate);
- if (retry++ >= 3) {
- r = -ENOMEM;
- pr_debug("failed %d migrate svms 0x%p [0x%lx 0x%lx]\n",
- r, prange->svms, prange->start, prange->last);
- goto out_free;
- }
-
- goto retry;
}
if (migrate.cpages) {
@@ -425,6 +429,12 @@ retry:
out_free:
kvfree(buf);
out:
+ if (!r) {
+ pdd = svm_range_get_pdd_by_adev(prange, adev);
+ if (pdd)
+ WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages);
+ }
+
return r;
}
@@ -464,7 +474,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
prange->start, prange->last, best_loc);
/* FIXME: workaround for page locking bug with invalid pages */
- svm_range_prefault(prange, mm);
+ svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev));
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
@@ -493,15 +503,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
static void svm_migrate_page_free(struct page *page)
{
- /* Keep this function to avoid warning */
+ struct svm_range_bo *svm_bo = page->zone_device_data;
+
+ if (svm_bo) {
+ pr_debug("svm_bo ref left: %d\n", kref_read(&svm_bo->kref));
+ svm_range_bo_unref(svm_bo);
+ }
}
static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch)
+ dma_addr_t *scratch, uint64_t npages)
{
- uint64_t npages = migrate->cpages;
struct device *dev = adev->dev;
uint64_t *src;
dma_addr_t *dst;
@@ -518,15 +532,23 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
src = (uint64_t *)(scratch + npages);
dst = scratch;
- for (i = 0, j = 0; i < npages; i++, j++, addr += PAGE_SIZE) {
+ for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
struct page *spage;
spage = migrate_pfn_to_page(migrate->src[i]);
- if (!spage) {
- pr_debug("failed get spage svms 0x%p [0x%lx 0x%lx]\n",
+ if (!spage || !is_zone_device_page(spage)) {
+ pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange->start, prange->last);
- r = -ENOMEM;
- goto out_oom;
+ if (j) {
+ r = svm_migrate_copy_memory_gart(adev, dst + i - j,
+ src + i - j, j,
+ FROM_VRAM_TO_RAM,
+ mfence);
+ if (r)
+ goto out_oom;
+ j = 0;
+ }
+ continue;
}
src[i] = svm_migrate_addr(adev, spage);
if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
@@ -559,6 +581,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
+ j++;
}
r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
@@ -581,6 +604,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end)
{
uint64_t npages = (end - start) >> PAGE_SHIFT;
+ struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate;
dma_addr_t *scratch;
@@ -593,7 +617,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.start = start;
migrate.end = end;
migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
- migrate.pgmap_owner = adev;
+ migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t);
size *= npages;
@@ -616,7 +640,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
if (migrate.cpages) {
r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
- scratch);
+ scratch, npages);
migrate_vma_pages(&migrate);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
@@ -630,6 +654,12 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
out_free:
kvfree(buf);
out:
+ if (!r) {
+ pdd = svm_range_get_pdd_by_adev(prange, adev);
+ if (pdd)
+ WRITE_ONCE(pdd->page_out,
+ pdd->page_out + migrate.cpages);
+ }
return r;
}
@@ -859,7 +889,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
pgmap->range.start = res->start;
pgmap->range.end = res->end;
pgmap->ops = &svm_migrate_pgmap_ops;
- pgmap->owner = adev;
+ pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 6dc22fa1e555..3426743ed228 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -730,6 +730,15 @@ struct kfd_process_device {
* number of CU's a device has along with number of other competing processes
*/
struct attribute attr_cu_occupancy;
+
+ /* sysfs counters for GPU retry fault and page migration tracking */
+ struct kobject *kobj_counters;
+ struct attribute attr_faults;
+ struct attribute attr_page_in;
+ struct attribute attr_page_out;
+ uint64_t faults;
+ uint64_t page_in;
+ uint64_t page_out;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 09b98a83f670..8a2c6fc438c0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -416,6 +416,29 @@ static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
return 0;
}
+static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct kfd_process_device *pdd;
+
+ if (!strcmp(attr->name, "faults")) {
+ pdd = container_of(attr, struct kfd_process_device,
+ attr_faults);
+ return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
+ }
+ if (!strcmp(attr->name, "page_in")) {
+ pdd = container_of(attr, struct kfd_process_device,
+ attr_page_in);
+ return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
+ }
+ if (!strcmp(attr->name, "page_out")) {
+ pdd = container_of(attr, struct kfd_process_device,
+ attr_page_out);
+ return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
+ }
+ return 0;
+}
+
static struct attribute attr_queue_size = {
.name = "size",
.mode = KFD_SYSFS_FILE_MODE
@@ -451,13 +474,18 @@ static const struct sysfs_ops procfs_stats_ops = {
.show = kfd_procfs_stats_show,
};
-static struct attribute *procfs_stats_attrs[] = {
- NULL
-};
-
static struct kobj_type procfs_stats_type = {
.sysfs_ops = &procfs_stats_ops,
- .default_attrs = procfs_stats_attrs,
+ .release = kfd_procfs_kobj_release,
+};
+
+static const struct sysfs_ops sysfs_counters_ops = {
+ .show = kfd_sysfs_counters_show,
+};
+
+static struct kobj_type sysfs_counters_type = {
+ .sysfs_ops = &sysfs_counters_ops,
+ .release = kfd_procfs_kobj_release,
};
int kfd_procfs_add_queue(struct queue *q)
@@ -484,34 +512,31 @@ int kfd_procfs_add_queue(struct queue *q)
return 0;
}
-static int kfd_sysfs_create_file(struct kfd_process *p, struct attribute *attr,
+static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
char *name)
{
- int ret = 0;
+ int ret;
- if (!p || !attr || !name)
- return -EINVAL;
+ if (!kobj || !attr || !name)
+ return;
attr->name = name;
attr->mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(attr);
- ret = sysfs_create_file(p->kobj, attr);
-
- return ret;
+ ret = sysfs_create_file(kobj, attr);
+ if (ret)
+ pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
}
-static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
+static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
{
- int ret = 0;
+ int ret;
int i;
char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
- if (!p)
- return -EINVAL;
-
- if (!p->kobj)
- return -EFAULT;
+ if (!p || !p->kobj)
+ return;
/*
* Create sysfs files for each GPU:
@@ -521,63 +546,87 @@ static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
*/
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
- struct kobject *kobj_stats;
snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
"stats_%u", pdd->dev->id);
- kobj_stats = kfd_alloc_struct(kobj_stats);
- if (!kobj_stats)
- return -ENOMEM;
+ pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
+ if (!pdd->kobj_stats)
+ return;
- ret = kobject_init_and_add(kobj_stats,
- &procfs_stats_type,
- p->kobj,
- stats_dir_filename);
+ ret = kobject_init_and_add(pdd->kobj_stats,
+ &procfs_stats_type,
+ p->kobj,
+ stats_dir_filename);
if (ret) {
pr_warn("Creating KFD proc/stats_%s folder failed",
- stats_dir_filename);
- kobject_put(kobj_stats);
- goto err;
+ stats_dir_filename);
+ kobject_put(pdd->kobj_stats);
+ pdd->kobj_stats = NULL;
+ return;
}
- pdd->kobj_stats = kobj_stats;
- pdd->attr_evict.name = "evicted_ms";
- pdd->attr_evict.mode = KFD_SYSFS_FILE_MODE;
- sysfs_attr_init(&pdd->attr_evict);
- ret = sysfs_create_file(kobj_stats, &pdd->attr_evict);
- if (ret)
- pr_warn("Creating eviction stats for gpuid %d failed",
- (int)pdd->dev->id);
-
+ kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
+ "evicted_ms");
/* Add sysfs file to report compute unit occupancy */
- if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) {
- pdd->attr_cu_occupancy.name = "cu_occupancy";
- pdd->attr_cu_occupancy.mode = KFD_SYSFS_FILE_MODE;
- sysfs_attr_init(&pdd->attr_cu_occupancy);
- ret = sysfs_create_file(kobj_stats,
- &pdd->attr_cu_occupancy);
- if (ret)
- pr_warn("Creating %s failed for gpuid: %d",
- pdd->attr_cu_occupancy.name,
- (int)pdd->dev->id);
- }
+ if (pdd->dev->kfd2kgd->get_cu_occupancy)
+ kfd_sysfs_create_file(pdd->kobj_stats,
+ &pdd->attr_cu_occupancy,
+ "cu_occupancy");
}
-err:
- return ret;
}
-
-static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
+static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
{
int ret = 0;
int i;
+ char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
- if (!p)
- return -EINVAL;
+ if (!p || !p->kobj)
+ return;
- if (!p->kobj)
- return -EFAULT;
+ /*
+ * Create sysfs files for each GPU which supports SVM
+ * - proc/<pid>/counters_<gpuid>/
+ * - proc/<pid>/counters_<gpuid>/faults
+ * - proc/<pid>/counters_<gpuid>/page_in
+ * - proc/<pid>/counters_<gpuid>/page_out
+ */
+ for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
+ struct kfd_process_device *pdd = p->pdds[i];
+ struct kobject *kobj_counters;
+
+ snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
+ "counters_%u", pdd->dev->id);
+ kobj_counters = kfd_alloc_struct(kobj_counters);
+ if (!kobj_counters)
+ return;
+
+ ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
+ p->kobj, counters_dir_filename);
+ if (ret) {
+ pr_warn("Creating KFD proc/%s folder failed",
+ counters_dir_filename);
+ kobject_put(kobj_counters);
+ return;
+ }
+
+ pdd->kobj_counters = kobj_counters;
+ kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
+ "faults");
+ kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
+ "page_in");
+ kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
+ "page_out");
+ }
+}
+
+static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
+{
+ int i;
+
+ if (!p || !p->kobj)
+ return;
/*
* Create sysfs files for each GPU:
@@ -589,20 +638,14 @@ static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
pdd->dev->id);
- ret = kfd_sysfs_create_file(p, &pdd->attr_vram, pdd->vram_filename);
- if (ret)
- pr_warn("Creating vram usage for gpu id %d failed",
- (int)pdd->dev->id);
+ kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
+ pdd->vram_filename);
snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
pdd->dev->id);
- ret = kfd_sysfs_create_file(p, &pdd->attr_sdma, pdd->sdma_filename);
- if (ret)
- pr_warn("Creating sdma usage for gpu id %d failed",
- (int)pdd->dev->id);
+ kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
+ pdd->sdma_filename);
}
-
- return ret;
}
void kfd_procfs_del_queue(struct queue *q)
@@ -671,8 +714,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
if (err)
goto err_alloc_mem;
- err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem,
- pdd->drm_priv, NULL);
+ err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv);
if (err)
goto err_map_mem;
@@ -800,28 +842,17 @@ struct kfd_process *kfd_create_process(struct file *filep)
goto out;
}
- process->attr_pasid.name = "pasid";
- process->attr_pasid.mode = KFD_SYSFS_FILE_MODE;
- sysfs_attr_init(&process->attr_pasid);
- ret = sysfs_create_file(process->kobj, &process->attr_pasid);
- if (ret)
- pr_warn("Creating pasid for pid %d failed",
- (int)process->lead_thread->pid);
+ kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
+ "pasid");
process->kobj_queues = kobject_create_and_add("queues",
process->kobj);
if (!process->kobj_queues)
pr_warn("Creating KFD proc/queues folder failed");
- ret = kfd_procfs_add_sysfs_stats(process);
- if (ret)
- pr_warn("Creating sysfs stats dir for pid %d failed",
- (int)process->lead_thread->pid);
-
- ret = kfd_procfs_add_sysfs_files(process);
- if (ret)
- pr_warn("Creating sysfs usage file for pid %d failed",
- (int)process->lead_thread->pid);
+ kfd_procfs_add_sysfs_stats(process);
+ kfd_procfs_add_sysfs_files(process);
+ kfd_procfs_add_sysfs_counters(process);
}
out:
if (!IS_ERR(process))
@@ -964,6 +995,50 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
p->n_pdds = 0;
}
+static void kfd_process_remove_sysfs(struct kfd_process *p)
+{
+ struct kfd_process_device *pdd;
+ int i;
+
+ if (!p->kobj)
+ return;
+
+ sysfs_remove_file(p->kobj, &p->attr_pasid);
+ kobject_del(p->kobj_queues);
+ kobject_put(p->kobj_queues);
+ p->kobj_queues = NULL;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ pdd = p->pdds[i];
+
+ sysfs_remove_file(p->kobj, &pdd->attr_vram);
+ sysfs_remove_file(p->kobj, &pdd->attr_sdma);
+
+ sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
+ if (pdd->dev->kfd2kgd->get_cu_occupancy)
+ sysfs_remove_file(pdd->kobj_stats,
+ &pdd->attr_cu_occupancy);
+ kobject_del(pdd->kobj_stats);
+ kobject_put(pdd->kobj_stats);
+ pdd->kobj_stats = NULL;
+ }
+
+ for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
+ pdd = p->pdds[i];
+
+ sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
+ sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
+ sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
+ kobject_del(pdd->kobj_counters);
+ kobject_put(pdd->kobj_counters);
+ pdd->kobj_counters = NULL;
+ }
+
+ kobject_del(p->kobj);
+ kobject_put(p->kobj);
+ p->kobj = NULL;
+}
+
/* No process locking is needed in this function, because the process
* is not findable any more. We must assume that no other thread is
* using it any more, otherwise we couldn't safely free the process
@@ -973,33 +1048,7 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
- int i;
-
- /* Remove the procfs files */
- if (p->kobj) {
- sysfs_remove_file(p->kobj, &p->attr_pasid);
- kobject_del(p->kobj_queues);
- kobject_put(p->kobj_queues);
- p->kobj_queues = NULL;
-
- for (i = 0; i < p->n_pdds; i++) {
- struct kfd_process_device *pdd = p->pdds[i];
-
- sysfs_remove_file(p->kobj, &pdd->attr_vram);
- sysfs_remove_file(p->kobj, &pdd->attr_sdma);
- sysfs_remove_file(p->kobj, &pdd->attr_evict);
- if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL)
- sysfs_remove_file(p->kobj, &pdd->attr_cu_occupancy);
- kobject_del(pdd->kobj_stats);
- kobject_put(pdd->kobj_stats);
- pdd->kobj_stats = NULL;
- }
-
- kobject_del(p->kobj);
- kobject_put(p->kobj);
- p->kobj = NULL;
- }
-
+ kfd_process_remove_sysfs(p);
kfd_iommu_unbind_process(p);
kfd_process_free_outstanding_kfd_bos(p);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 95a6c36cea4c..243dd1efcdbf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -153,6 +153,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
if (pqn->q && pqn->q->gws)
amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
pqn->q->gws);
+ kfd_procfs_del_queue(pqn->q);
uninit_queue(pqn->q);
list_del(&pqn->process_queue_list);
kfree(pqn);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index dff1011dd7ee..e883731c3f8f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -119,28 +119,40 @@ static void svm_range_remove_notifier(struct svm_range *prange)
}
static int
-svm_range_dma_map_dev(struct device *dev, dma_addr_t **dma_addr,
- unsigned long *hmm_pfns, uint64_t npages)
+svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
+ unsigned long *hmm_pfns, uint32_t gpuidx)
{
enum dma_data_direction dir = DMA_BIDIRECTIONAL;
- dma_addr_t *addr = *dma_addr;
+ dma_addr_t *addr = prange->dma_addr[gpuidx];
+ struct device *dev = adev->dev;
struct page *page;
int i, r;
if (!addr) {
- addr = kvmalloc_array(npages, sizeof(*addr),
+ addr = kvmalloc_array(prange->npages, sizeof(*addr),
GFP_KERNEL | __GFP_ZERO);
if (!addr)
return -ENOMEM;
- *dma_addr = addr;
+ prange->dma_addr[gpuidx] = addr;
}
- for (i = 0; i < npages; i++) {
+ for (i = 0; i < prange->npages; i++) {
if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
"leaking dma mapping\n"))
dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
page = hmm_pfn_to_page(hmm_pfns[i]);
+ if (is_zone_device_page(page)) {
+ struct amdgpu_device *bo_adev =
+ amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
+
+ addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
+ bo_adev->vm_manager.vram_base_offset -
+ bo_adev->kfd.dev->pgmap.range.start;
+ addr[i] |= SVM_RANGE_VRAM_DOMAIN;
+ pr_debug("vram address detected: 0x%llx\n", addr[i]);
+ continue;
+ }
addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
r = dma_mapping_error(dev, addr[i]);
if (r) {
@@ -175,8 +187,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
}
adev = (struct amdgpu_device *)pdd->dev->kgd;
- r = svm_range_dma_map_dev(adev->dev, &prange->dma_addr[gpuidx],
- hmm_pfns, prange->npages);
+ r = svm_range_dma_map_dev(adev, prange, hmm_pfns, gpuidx);
if (r)
break;
}
@@ -301,14 +312,6 @@ static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
return true;
}
-static struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo)
-{
- if (svm_bo)
- kref_get(&svm_bo->kref);
-
- return svm_bo;
-}
-
static void svm_range_bo_release(struct kref *kref)
{
struct svm_range_bo *svm_bo;
@@ -347,7 +350,7 @@ static void svm_range_bo_release(struct kref *kref)
kfree(svm_bo);
}
-static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
+void svm_range_bo_unref(struct svm_range_bo *svm_bo)
{
if (!svm_bo)
return;
@@ -564,6 +567,24 @@ svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
return (struct amdgpu_device *)pdd->dev->kgd;
}
+struct kfd_process_device *
+svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
+{
+ struct kfd_process *p;
+ int32_t gpu_idx, gpuid;
+ int r;
+
+ p = container_of(prange->svms, struct kfd_process, svms);
+
+ r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx);
+ if (r) {
+ pr_debug("failed to get device id by adev %p\n", adev);
+ return NULL;
+ }
+
+ return kfd_process_device_from_gpuidx(p, gpu_idx);
+}
+
static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
@@ -1002,21 +1023,22 @@ svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
}
static uint64_t
-svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange)
+svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
+ int domain)
{
struct amdgpu_device *bo_adev;
uint32_t flags = prange->flags;
uint32_t mapping_flags = 0;
uint64_t pte_flags;
- bool snoop = !prange->ttm_res;
+ bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
- if (prange->svm_bo && prange->ttm_res)
+ if (domain == SVM_RANGE_VRAM_DOMAIN)
bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
switch (adev->asic_type) {
case CHIP_ARCTURUS:
- if (prange->svm_bo && prange->ttm_res) {
+ if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_adev == adev) {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -1032,7 +1054,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange)
}
break;
case CHIP_ALDEBARAN:
- if (prange->svm_bo && prange->ttm_res) {
+ if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_adev == adev) {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -1062,14 +1084,14 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
pte_flags = AMDGPU_PTE_VALID;
- pte_flags |= prange->ttm_res ? 0 : AMDGPU_PTE_SYSTEM;
+ pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
pr_debug("svms 0x%p [0x%lx 0x%lx] vram %d PTE 0x%llx mapping 0x%x\n",
prange->svms, prange->start, prange->last,
- prange->ttm_res ? 1:0, pte_flags, mapping_flags);
+ (domain == SVM_RANGE_VRAM_DOMAIN) ? 1:0, pte_flags, mapping_flags);
return pte_flags;
}
@@ -1140,31 +1162,41 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va bo_va;
bool table_freed = false;
uint64_t pte_flags;
+ unsigned long last_start;
+ int last_domain;
int r = 0;
+ int64_t i;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
prange->last);
- if (prange->svm_bo && prange->ttm_res) {
+ if (prange->svm_bo && prange->ttm_res)
bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
- prange->mapping.bo_va = &bo_va;
- }
- prange->mapping.start = prange->start;
- prange->mapping.last = prange->last;
- prange->mapping.offset = prange->ttm_res ? prange->offset : 0;
- pte_flags = svm_range_get_pte_flags(adev, prange);
+ last_start = prange->start;
+ for (i = 0; i < prange->npages; i++) {
+ last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
+ dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
+ if ((prange->start + i) < prange->last &&
+ last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
+ continue;
- r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
- prange->mapping.start,
- prange->mapping.last, pte_flags,
- prange->mapping.offset,
- prange->ttm_res,
- dma_addr, &vm->last_update,
- &table_freed);
- if (r) {
- pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
- goto out;
+ pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
+ last_start, prange->start + i, last_domain ? "GPU" : "CPU");
+ pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
+ r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
+ last_start,
+ prange->start + i, pte_flags,
+ last_start - prange->start,
+ NULL,
+ dma_addr,
+ &vm->last_update,
+ &table_freed);
+ if (r) {
+ pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
+ goto out;
+ }
+ last_start = prange->start + i + 1;
}
r = amdgpu_vm_update_pdes(adev, vm, false);
@@ -1185,7 +1217,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
p->pasid, TLB_FLUSH_LEGACY);
}
out:
- prange->mapping.bo_va = NULL;
return r;
}
@@ -1319,6 +1350,17 @@ static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
}
+static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
+{
+ struct kfd_process_device *pdd;
+ struct amdgpu_device *adev;
+
+ pdd = kfd_process_device_from_gpuidx(p, gpuidx);
+ adev = (struct amdgpu_device *)pdd->dev->kgd;
+
+ return SVM_ADEV_PGMAP_OWNER(adev);
+}
+
/*
* Validation+GPU mapping with concurrent invalidation (MMU notifiers)
*
@@ -1349,6 +1391,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
{
struct svm_validate_context ctx;
struct hmm_range *hmm_range;
+ struct kfd_process *p;
+ void *owner;
+ int32_t idx;
int r = 0;
ctx.process = container_of(prange->svms, struct kfd_process, svms);
@@ -1394,33 +1439,38 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
svm_range_reserve_bos(&ctx);
- if (!prange->actual_loc) {
- r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
- prange->start << PAGE_SHIFT,
- prange->npages, &hmm_range,
- false, true);
- if (r) {
- pr_debug("failed %d to get svm range pages\n", r);
- goto unreserve_out;
- }
-
- r = svm_range_dma_map(prange, ctx.bitmap,
- hmm_range->hmm_pfns);
- if (r) {
- pr_debug("failed %d to dma map range\n", r);
- goto unreserve_out;
+ p = container_of(prange->svms, struct kfd_process, svms);
+ owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
+ MAX_GPU_INSTANCE));
+ for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
+ if (kfd_svm_page_owner(p, idx) != owner) {
+ owner = NULL;
+ break;
}
+ }
+ r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
+ prange->start << PAGE_SHIFT,
+ prange->npages, &hmm_range,
+ false, true, owner);
+ if (r) {
+ pr_debug("failed %d to get svm range pages\n", r);
+ goto unreserve_out;
+ }
- prange->validated_once = true;
+ r = svm_range_dma_map(prange, ctx.bitmap,
+ hmm_range->hmm_pfns);
+ if (r) {
+ pr_debug("failed %d to dma map range\n", r);
+ goto unreserve_out;
}
+ prange->validated_once = true;
+
svm_range_lock(prange);
- if (!prange->actual_loc) {
- if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
- pr_debug("hmm update the range, need validate again\n");
- r = -EAGAIN;
- goto unlock_out;
- }
+ if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
+ pr_debug("hmm update the range, need validate again\n");
+ r = -EAGAIN;
+ goto unlock_out;
}
if (!list_empty(&prange->child_list)) {
pr_debug("range split by unmap in parallel, validate again\n");
@@ -1572,6 +1622,7 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
unsigned long start, unsigned long last)
{
struct svm_range_list *svms = prange->svms;
+ struct svm_range *pchild;
struct kfd_process *p;
int r = 0;
@@ -1583,7 +1634,19 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
if (!p->xnack_enabled) {
int evicted_ranges;
- atomic_inc(&prange->invalid);
+ list_for_each_entry(pchild, &prange->child_list, child_list) {
+ mutex_lock_nested(&pchild->lock, 1);
+ if (pchild->start <= last && pchild->last >= start) {
+ pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
+ pchild->start, pchild->last);
+ atomic_inc(&pchild->invalid);
+ }
+ mutex_unlock(&pchild->lock);
+ }
+
+ if (prange->start <= last && prange->last >= start)
+ atomic_inc(&prange->invalid);
+
evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
if (evicted_ranges != 1)
return r;
@@ -1600,7 +1663,6 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
schedule_delayed_work(&svms->restore_work,
msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
} else {
- struct svm_range *pchild;
unsigned long s, l;
pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
@@ -2311,6 +2373,33 @@ static bool svm_range_skip_recover(struct svm_range *prange)
return false;
}
+static void
+svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
+ int32_t gpuidx)
+{
+ struct kfd_process_device *pdd;
+
+ /* fault is on different page of same range
+ * or fault is skipped to recover later
+ * or fault is on invalid virtual address
+ */
+ if (gpuidx == MAX_GPU_INSTANCE) {
+ uint32_t gpuid;
+ int r;
+
+ r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx);
+ if (r < 0)
+ return;
+ }
+
+ /* fault is recovered
+ * or fault cannot recover because GPU no access on the range
+ */
+ pdd = kfd_process_device_from_gpuidx(p, gpuidx);
+ if (pdd)
+ WRITE_ONCE(pdd->faults, pdd->faults + 1);
+}
+
int
svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
uint64_t addr)
@@ -2320,7 +2409,8 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
struct svm_range *prange;
struct kfd_process *p;
uint64_t timestamp;
- int32_t best_loc, gpuidx;
+ int32_t best_loc;
+ int32_t gpuidx = MAX_GPU_INSTANCE;
bool write_locked = false;
int r = 0;
@@ -2440,6 +2530,9 @@ out_unlock_range:
out_unlock_svms:
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
+
+ svm_range_count_fault(adev, p, gpuidx);
+
mmput(mm);
out:
kfd_unref_process(p);
@@ -2662,7 +2755,8 @@ out:
/* FIXME: This is a workaround for page locking bug when some pages are
* invalid during migration to VRAM
*/
-void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm)
+void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
+ void *owner)
{
struct hmm_range *hmm_range;
int r;
@@ -2673,7 +2767,7 @@ void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm)
r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
prange->start << PAGE_SHIFT,
prange->npages, &hmm_range,
- false, true);
+ false, true, owner);
if (!r) {
amdgpu_hmm_range_get_pages_done(hmm_range);
prange->validated_once = true;
@@ -2718,16 +2812,6 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
best_loc == prange->actual_loc)
return 0;
- /*
- * Prefetch to GPU without host access flag, set actual_loc to gpu, then
- * validate on gpu and map to gpus will be handled afterwards.
- */
- if (best_loc && !prange->actual_loc &&
- !(prange->flags & KFD_IOCTL_SVM_FLAG_HOST_ACCESS)) {
- prange->actual_loc = best_loc;
- return 0;
- }
-
if (!best_loc) {
r = svm_migrate_vram_to_ram(prange, mm);
*migrated = !r;
@@ -2942,6 +3026,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
start + size - 1, nattr);
+ /* Flush pending deferred work to avoid racing with deferred actions from
+ * previous memory map changes (e.g. munmap). Concurrent memory map changes
+ * can still race with get_attr because we don't hold the mmap lock. But that
+ * would be a race condition in the application anyway, and undefined
+ * behaviour is acceptable in that case.
+ */
+ flush_work(&p->svms.deferred_list_work);
+
mmap_read_lock(mm);
if (!svm_range_is_valid(mm, start, size)) {
pr_debug("invalid range\n");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 0c0fc399395e..3fc1fd8b4fbc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -35,6 +35,10 @@
#include "amdgpu.h"
#include "kfd_priv.h"
+#define SVM_RANGE_VRAM_DOMAIN (1UL << 0)
+#define SVM_ADEV_PGMAP_OWNER(adev)\
+ ((adev)->hive ? (void *)(adev)->hive : (void *)(adev))
+
struct svm_range_bo {
struct amdgpu_bo *bo;
struct kref kref;
@@ -110,7 +114,6 @@ struct svm_range {
struct list_head update_list;
struct list_head remove_list;
struct list_head insert_list;
- struct amdgpu_bo_va_mapping mapping;
uint64_t npages;
dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
struct ttm_resource *ttm_res;
@@ -147,6 +150,14 @@ static inline void svm_range_unlock(struct svm_range *prange)
mutex_unlock(&prange->lock);
}
+static inline struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo)
+{
+ if (svm_bo)
+ kref_get(&svm_bo->kref);
+
+ return svm_bo;
+}
+
int svm_range_list_init(struct kfd_process *p);
void svm_range_list_fini(struct kfd_process *p);
int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
@@ -173,13 +184,17 @@ void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
void svm_range_free_dma_mappings(struct svm_range *prange);
-void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm);
+void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
+ void *owner);
+struct kfd_process_device *
+svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev);
/* SVM API and HMM page migration work together, device memory type
* is initialized to not 0 when page migration register device memory.
*/
#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)
+void svm_range_bo_unref(struct svm_range_bo *svm_bo);
#else
struct kfd_process;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b5b5ccf0ed71..afa96c8f721b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1160,6 +1160,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
init_data.flags.disable_fractional_pwm = true;
+ if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
+ init_data.flags.edp_no_power_sequencing = true;
+
init_data.flags.power_down_display_on_boot = true;
INIT_LIST_HEAD(&adev->dm.da_list);
@@ -1545,6 +1548,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
}
hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+ adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
@@ -1558,7 +1562,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->dm.dmcub_fw_version);
}
- adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
dmub_srv = adev->dm.dmub_srv;
@@ -2426,9 +2429,9 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
- if (caps->ext_caps->bits.oled == 1 ||
+ if (caps->ext_caps->bits.oled == 1 /*||
caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
- caps->ext_caps->bits.hdr_aux_backlight_control == 1)
+ caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
caps->aux_support = true;
if (amdgpu_backlight == 0)
@@ -9188,7 +9191,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
/* restore the backlight level */
- if (dm->backlight_dev)
+ if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0]))
amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
#endif
/*
@@ -9602,7 +9605,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
} else if (amdgpu_freesync_vid_mode && aconnector &&
is_freesync_video_mode(&new_crtc_state->mode,
aconnector)) {
- set_freesync_fixed_config(dm_new_crtc_state);
+ struct drm_display_mode *high_mode;
+
+ high_mode = get_highest_refresh_rate_mode(aconnector, false);
+ if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
+ set_freesync_fixed_config(dm_new_crtc_state);
+ }
}
ret = dm_atomic_get_state(state, &dm_state);
@@ -10118,7 +10126,7 @@ static int validate_overlay(struct drm_atomic_state *state)
int i;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
- struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
+ struct drm_plane_state *primary_state, *overlay_state = NULL;
/* Check if primary plane is contained inside overlay */
for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
@@ -10148,14 +10156,6 @@ static int validate_overlay(struct drm_atomic_state *state)
if (!primary_state->crtc)
return 0;
- /* check if cursor plane is enabled */
- cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
- if (IS_ERR(cursor_state))
- return PTR_ERR(cursor_state);
-
- if (drm_atomic_plane_disabling(plane->state, cursor_state))
- return 0;
-
/* Perform the bounds check to ensure the overlay plane covers the primary */
if (primary_state->crtc_x < overlay_state->crtc_x ||
primary_state->crtc_y < overlay_state->crtc_y ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 40f617bbb86f..4aba0e8c84f8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -584,7 +584,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
/*allocate a new amdgpu_dm_irq_handler_data*/
- handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+ handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
if (!handler_data_add) {
DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
return;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 6e0c5c664fdc..a5331b96f551 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -197,7 +197,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
-// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000);
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index c6f494f0dcea..6185f9475fa2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -66,9 +66,11 @@ int rn_get_active_display_cnt_wa(
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
+ /* Extend the WA to DP for Linux*/
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
- stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
tmds_present = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 513676a6f52b..af7004b770ae 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -190,6 +190,10 @@ void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_levels);
+ /* SOCCLK */
+ dcn3_init_single_clock(clk_mgr, PPCLK_SOCCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
+ &num_levels);
// DPREFCLK ???
/* DISPCLK */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 7b7d884d58be..4a4894e9d9c9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -48,6 +48,21 @@
#include "dc_dmub_srv.h"
+#include "yellow_carp_offset.h"
+
+#define regCLK1_CLK_PLL_REQ 0x0237
+#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+#define REG(reg_name) \
+ (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+
#define TO_CLK_MGR_DCN31(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn31, base)
@@ -124,10 +139,10 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
* also if safe to lower is false, we just go in the higher state
*/
if (safe_to_lower) {
- if (new_clocks->z9_support == DCN_Z9_SUPPORT_ALLOW &&
- new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
+ if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
+ new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, true);
- clk_mgr_base->clks.z9_support = new_clocks->z9_support;
+ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
@@ -148,10 +163,10 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
} else {
- if (new_clocks->z9_support == DCN_Z9_SUPPORT_DISALLOW &&
- new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
+ if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
+ new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, false);
- clk_mgr_base->clks.z9_support = new_clocks->z9_support;
+ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
@@ -229,7 +244,32 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
{
- return 0;
+ /* get FbMult value */
+ struct fixed31_32 pll_req;
+ unsigned int fbmult_frac_val = 0;
+ unsigned int fbmult_int_val = 0;
+
+ /*
+ * Register value of fbmult is in 8.16 format, we are converting to 31.32
+ * to leverage the fix point operations available in driver
+ */
+
+ REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
+ REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
+
+ pll_req = dc_fixpt_from_int(fbmult_int_val);
+
+ /*
+ * since fractional part is only 16 bit in register definition but is 32 bit
+ * in our fix point definiton, need to shift left by 16 to obtain correct value
+ */
+ pll_req.value |= fbmult_frac_val << 16;
+
+ /* multiply by REFCLK period */
+ pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
+
+ /* integer part is now VCO frequency in kHz */
+ return dc_fixpt_floor(pll_req);
}
static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@@ -246,7 +286,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.z9_support = DCN_Z9_SUPPORT_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
}
static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
@@ -260,7 +300,7 @@ static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
return false;
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
return false;
- else if (a->z9_support != b->z9_support)
+ else if (a->zstate_support != b->zstate_support)
return false;
else if (a->dtbclk_en != b->dtbclk_en)
return false;
@@ -592,6 +632,7 @@ void dcn31_clk_mgr_construct(
clk_mgr->base.dprefclk_ss_percentage = 0;
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
+ clk_mgr->base.dfs_ref_freq_khz = 48000;
clk_mgr->smu_wm_set.wm_set = (struct dcn31_watermarks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
index cc21cf75eafd..f8f100535526 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
@@ -27,60 +27,6 @@
#define __DCN31_CLK_MGR_H__
#include "clk_mgr_internal.h"
-//CLK1_CLK_PLL_REQ
-#ifndef CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
-#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
-#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
-//CLK1_CLK0_DFS_CNTL
-#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER__SHIFT 0x0
-#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER_MASK 0x0000007FL
-/*DPREF clock related*/
-#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
-#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
-#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
-#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
-#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
-#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
-#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
-#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
-
-//CLK3_0_CLK3_CLK_PLL_REQ
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
-#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
-#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
-
-#define mmCLK0_CLK3_DFS_CNTL 0x16C60
-#define mmCLK00_CLK0_CLK3_DFS_CNTL 0x16C60
-#define mmCLK01_CLK0_CLK3_DFS_CNTL 0x16E60
-#define mmCLK02_CLK0_CLK3_DFS_CNTL 0x17060
-#define mmCLK03_CLK0_CLK3_DFS_CNTL 0x17260
-
-#define mmCLK0_CLK_PLL_REQ 0x16C10
-#define mmCLK00_CLK0_CLK_PLL_REQ 0x16C10
-#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E10
-#define mmCLK02_CLK0_CLK_PLL_REQ 0x17010
-#define mmCLK03_CLK0_CLK_PLL_REQ 0x17210
-
-#define mmCLK1_CLK_PLL_REQ 0x1B00D
-#define mmCLK10_CLK1_CLK_PLL_REQ 0x1B00D
-#define mmCLK11_CLK1_CLK_PLL_REQ 0x1B20D
-#define mmCLK12_CLK1_CLK_PLL_REQ 0x1B40D
-#define mmCLK13_CLK1_CLK_PLL_REQ 0x1B60D
-
-#define mmCLK2_CLK_PLL_REQ 0x17E0D
-
-/*AMCLK*/
-#define mmCLK11_CLK1_CLK0_DFS_CNTL 0x1B23F
-#define mmCLK11_CLK1_CLK_PLL_REQ 0x1B20D
-#endif
-
struct dcn31_watermarks;
struct dcn31_smu_watermark_set {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 66db5e988bc1..dad4a4c18bcf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -31,8 +31,8 @@
#include "dcn31_smu.h"
#include "yellow_carp_offset.h"
-#include "mp/mp_13_0_1_offset.h"
-#include "mp/mp_13_0_1_sh_mask.h"
+#include "mp/mp_13_0_2_offset.h"
+#include "mp/mp_13_0_2_sh_mask.h"
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 605e297b7a59..a30283fa5173 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1530,6 +1530,12 @@ void dc_z10_restore(struct dc *dc)
if (dc->hwss.z10_restore)
dc->hwss.z10_restore(dc);
}
+
+void dc_z10_save_init(struct dc *dc)
+{
+ if (dc->hwss.z10_save_init)
+ dc->hwss.z10_save_init(dc);
+}
#endif
/*
* Applies given context to HW and copy it into current context.
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index b8832bdde2bc..a6d0fd24fd02 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1620,11 +1620,12 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train
{
enum dc_status status = DC_OK;
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- status = configure_lttpr_mode_non_transparent(link, lt_settings);
- else
+ if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT)
status = configure_lttpr_mode_transparent(link);
+ else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ status = configure_lttpr_mode_non_transparent(link, lt_settings);
+
return status;
}
@@ -1784,7 +1785,6 @@ bool perform_link_training_with_retries(
link_enc = stream->link_enc;
else
link_enc = link->link_enc;
- ASSERT(link_enc);
/* We need to do this before the link training to ensure the idle pattern in SST
* mode will be sent right after the link training
@@ -1820,8 +1820,7 @@ bool perform_link_training_with_retries(
*/
panel_mode = DP_PANEL_MODE_DEFAULT;
}
- } else
- panel_mode = DP_PANEL_MODE_DEFAULT;
+ }
}
#endif
@@ -3603,29 +3602,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
bool dp_retrieve_lttpr_cap(struct dc_link *link)
{
uint8_t lttpr_dpcd_data[6];
- bool vbios_lttpr_enable = false;
- bool vbios_lttpr_interop = false;
- struct dc_bios *bios = link->dc->ctx->dc_bios;
+ bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
+ bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
enum dc_status status = DC_ERROR_UNEXPECTED;
bool is_lttpr_present = false;
memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
- /* Query BIOS to determine if LTTPR functionality is forced on by system */
- if (bios->funcs->get_lttpr_caps) {
- enum bp_result bp_query_result;
- uint8_t is_vbios_lttpr_enable = 0;
-
- bp_query_result = bios->funcs->get_lttpr_caps(bios, &is_vbios_lttpr_enable);
- vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
- }
-
- if (bios->funcs->get_lttpr_interop) {
- enum bp_result bp_query_result;
- uint8_t is_vbios_interop_enabled = 0;
-
- bp_query_result = bios->funcs->get_lttpr_interop(bios, &is_vbios_interop_enabled);
- vbios_lttpr_interop = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
- }
/*
* Logic to determine LTTPR mode
@@ -4650,7 +4632,10 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
}
}
- if (link->dpcd_caps.panel_mode_edp) {
+ if (link->dpcd_caps.panel_mode_edp &&
+ (link->connector_signal == SIGNAL_TYPE_EDP ||
+ (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->is_internal_display))) {
return DP_PANEL_MODE_EDP;
}
@@ -4914,9 +4899,7 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link)
{
uint32_t default_backlight;
- if (link &&
- (link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
- link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
+ if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
if (!dc_link_read_default_bl_aux(link, &default_backlight))
default_backlight = 150000;
// if < 5 nits or > 5000, it might be wrong readback
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a6a67244a322..1596f6b7fed7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1062,7 +1062,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
* so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3
* did not show such problems, so this seems to be the exception.
*/
- if (plane_state->ctx->dce_version != DCE_VERSION_11_0)
+ if (plane_state->ctx->dce_version > DCE_VERSION_11_0)
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
else
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index f2b39ec35c89..cde8ed2560b3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dc_z10_save_init(dc);
+#endif
}
return num_vmids;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 5101a4f8f69f..21d78289b048 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -183,6 +183,8 @@ struct dc_caps {
unsigned int cursor_cache_size;
struct dc_plane_cap planes[MAX_PLANES];
struct dc_color_caps color;
+ bool vbios_lttpr_aware;
+ bool vbios_lttpr_enable;
};
struct dc_bug_wa {
@@ -297,6 +299,7 @@ struct dc_config {
bool allow_seamless_boot_optimization;
bool power_down_display_on_boot;
bool edp_not_connected;
+ bool edp_no_power_sequencing;
bool force_enum_edp;
bool forced_clocks;
bool allow_lttpr_non_transparent_mode;
@@ -353,10 +356,10 @@ enum dcn_pwr_state {
};
#if defined(CONFIG_DRM_AMD_DC_DCN)
-enum dcn_z9_support_state {
- DCN_Z9_SUPPORT_UNKNOWN,
- DCN_Z9_SUPPORT_ALLOW,
- DCN_Z9_SUPPORT_DISALLOW,
+enum dcn_zstate_support_state {
+ DCN_ZSTATE_SUPPORT_UNKNOWN,
+ DCN_ZSTATE_SUPPORT_ALLOW,
+ DCN_ZSTATE_SUPPORT_DISALLOW,
};
#endif
/*
@@ -377,7 +380,7 @@ struct dc_clocks {
int dramclk_khz;
bool p_state_change_support;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- enum dcn_z9_support_state z9_support;
+ enum dcn_zstate_support_state zstate_support;
bool dtbclk_en;
#endif
enum dcn_pwr_state pwr_state;
@@ -1335,6 +1338,7 @@ void dc_hardware_release(struct dc *dc);
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
#if defined(CONFIG_DRM_AMD_DC_DCN)
void dc_z10_restore(struct dc *dc);
+void dc_z10_save_init(struct dc *dc);
#endif
bool dc_enable_dmub_notifications(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index df6539e4c730..0464a8f3db3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -636,6 +636,7 @@ struct dce_hwseq_registers {
uint32_t ODM_MEM_PWR_CTRL3;
uint32_t DMU_MEM_PWR_CNTL;
uint32_t MMHUBBUB_MEM_PWR_CNTL;
+ uint32_t DCHUBBUB_ARB_HOSTVM_CNTL;
};
/* set field name */
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -1110,7 +1111,8 @@ struct dce_hwseq_registers {
type DOMAIN_POWER_FORCEON;\
type DOMAIN_POWER_GATE;\
type DOMAIN_PGFSM_PWR_STATUS;\
- type HPO_HDMISTREAMCLK_G_GATE_DIS;
+ type HPO_HDMISTREAMCLK_G_GATE_DIS;\
+ type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 2938caaa2299..62d595ded866 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1022,8 +1022,20 @@ void dce110_edp_backlight_control(
/* dc_service_sleep_in_milliseconds(50); */
/*edp 1.2*/
panel_instance = link->panel_cntl->inst;
- if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
- edp_receiver_ready_T7(link);
+
+ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) {
+ if (!link->dc->config.edp_no_power_sequencing)
+ /*
+ * Sometimes, DP receiver chip power-controlled externally by an
+ * Embedded Controller could be treated and used as eDP,
+ * if it drives mobile display. In this case,
+ * we shouldn't be doing power-sequencing, hence we can skip
+ * waiting for T7-ready.
+ */
+ edp_receiver_ready_T7(link);
+ else
+ DC_LOG_DC("edp_receiver_ready_T7 skipped\n");
+ }
if (ctx->dc->ctx->dmub_srv &&
ctx->dc->debug.dmub_command_table) {
@@ -1048,8 +1060,19 @@ void dce110_edp_backlight_control(
dc_link_backlight_enable_aux(link, enable);
/*edp 1.2*/
- if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF)
- edp_add_delay_for_T9(link);
+ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF) {
+ if (!link->dc->config.edp_no_power_sequencing)
+ /*
+ * Sometimes, DP receiver chip power-controlled externally by an
+ * Embedded Controller could be treated and used as eDP,
+ * if it drives mobile display. In this case,
+ * we shouldn't be doing power-sequencing, hence we can skip
+ * waiting for T9-ready.
+ */
+ edp_add_delay_for_T9(link);
+ else
+ DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
+ }
if (!enable && link->dpcd_sink_ext_caps.bits.oled)
msleep(OLED_PRE_T11_DELAY);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index 673b93f4fea5..cb9767ddf93d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -217,6 +217,8 @@ static void dpp1_dscl_set_lb(
const struct line_buffer_params *lb_params,
enum lb_memory_config mem_size_config)
{
+ uint32_t max_partitions = 63; /* Currently hardcoded on all ASICs before DCN 3.2 */
+
/* LB */
if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL caps: pixel data processed in fixed format */
@@ -239,9 +241,12 @@ static void dpp1_dscl_set_lb(
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
}
+ if (dpp->base.caps->max_lb_partitions == 31)
+ max_partitions = 31;
+
REG_SET_2(LB_MEMORY_CTRL, 0,
MEMORY_CONFIG, mem_size_config,
- LB_MAX_PARTITIONS, 63);
+ LB_MAX_PARTITIONS, max_partitions);
}
static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 7fa9fc656b0c..f6e747f25ebe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -464,7 +464,7 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
MASTER_UPDATE_LOCK_DB_X,
- h_blank_start - 200 - 1,
+ (h_blank_start - 200 - 1) / optc1->opp_count,
MASTER_UPDATE_LOCK_DB_Y,
v_blank_start - 1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 1b05a37b674d..b173fa3653b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2093,8 +2093,10 @@ int dcn20_populate_dml_pipes_from_context(
- timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
pipes[pipe_cnt].pipe.dest.vtotal = v_total;
- pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
- pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
+ pipes[pipe_cnt].pipe.dest.hactive =
+ timing->h_addressable + timing->h_border_left + timing->h_border_right;
+ pipes[pipe_cnt].pipe.dest.vactive =
+ timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
@@ -3079,6 +3081,37 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
return false;
}
+static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struct dc_state *context)
+{
+ int plane_count;
+ int i;
+
+ plane_count = 0;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+ }
+
+ /*
+ * Zstate is allowed in following scenarios:
+ * 1. Single eDP with PSR enabled
+ * 2. 0 planes (No memory requests)
+ * 3. Single eDP without PSR but > 5ms stutter period
+ */
+ if (plane_count == 0)
+ return DCN_ZSTATE_SUPPORT_ALLOW;
+ else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ struct dc_link *link = context->streams[0]->sink->link;
+
+ if ((link->link_index == 0 && link->psr_settings.psr_feature_enabled)
+ || context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+ return DCN_ZSTATE_SUPPORT_ALLOW;
+ else
+ return DCN_ZSTATE_SUPPORT_DISALLOW;
+ } else
+ return DCN_ZSTATE_SUPPORT_DISALLOW;
+}
+
void dcn20_calculate_dlg_params(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -3086,7 +3119,6 @@ void dcn20_calculate_dlg_params(
int vlevel)
{
int i, pipe_idx;
- int plane_count;
/* Writeback MCIF_WB arbitration parameters */
dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
@@ -3102,17 +3134,7 @@ void dcn20_calculate_dlg_params(
!= dm_dram_clock_change_unsupported;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
- context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
- DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
-
- plane_count = 0;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].plane_state)
- plane_count++;
- }
-
- if (plane_count == 0)
- context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
+ context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index f3d98e3ba624..bf0a198eae15 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -109,6 +109,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.max_page_table_levels = 4,
.pte_chunk_size_kbytes = 2,
.meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
.writeback_chunk_size_kbytes = 2,
.line_buffer_size_bits = 789504,
.is_line_buffer_bpp_fixed = 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 2140b75540cf..23a52d47e61c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -383,13 +383,6 @@ bool dpp3_get_optimal_number_of_taps(
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
- if (scl_data->viewport.width != scl_data->h_active &&
- scl_data->viewport.height != scl_data->v_active &&
- dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
- scl_data->format == PIXEL_FORMAT_FP16)
- return false;
-
if (scl_data->viewport.width > scl_data->h_active &&
dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
@@ -1440,15 +1433,6 @@ bool dpp3_construct(
dpp->tf_shift = tf_shift;
dpp->tf_mask = tf_mask;
- dpp->lb_pixel_depth_supported =
- LB_PIXEL_DEPTH_18BPP |
- LB_PIXEL_DEPTH_24BPP |
- LB_PIXEL_DEPTH_30BPP |
- LB_PIXEL_DEPTH_36BPP;
-
- dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
- dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
-
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
index 3fa86cd090a0..ac644ae6b9f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
@@ -154,6 +154,7 @@
SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \
SRI(CURSOR_CONTROL, CURSOR0_, id),\
SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
+ SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \
SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
#define DPP_REG_LIST_DCN30(id)\
@@ -163,8 +164,6 @@
SRI(CM_SHAPER_LUT_DATA, CM, id),\
SRI(CM_MEM_PWR_CTRL2, CM, id), \
SRI(CM_MEM_PWR_STATUS2, CM, id), \
- SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \
- SRI(DSCL_MEM_PWR_CTRL, DSCL, id), \
SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM, id),\
SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM, id),\
SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM, id),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 596c97dce67e..28e15ebf2f43 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1788,7 +1788,6 @@ static bool dcn30_split_stream_for_mpc_or_odm(
}
pri_pipe->next_odm_pipe = sec_pipe;
sec_pipe->prev_odm_pipe = pri_pipe;
- ASSERT(sec_pipe->top_pipe == NULL);
if (!sec_pipe->top_pipe)
sec_pipe->stream_res.opp = pool->opps[pipe_idx];
@@ -2617,6 +2616,26 @@ static bool dcn30_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* read VBIOS LTTPR caps */
+ {
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ if (ctx->dc_bios->funcs->get_lttpr_interop) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_interop_enabled = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios,
+ &is_vbios_interop_enabled);
+ dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+ }
+ }
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 9776d1737818..912285fdce18 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
}
-static void calculate_wm_set_for_vlevel(
- int vlevel,
- struct wm_range_table_entry *table_entry,
- struct dcn_watermarks *wm_set,
- struct display_mode_lib *dml,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt)
-{
- double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
-
- ASSERT(vlevel < dml->soc.num_states);
- /* only pipe 0 is read for voltage and dcf/soc clocks */
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
- pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
-
- dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
- dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
- dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
-
- wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
- wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
- wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
- wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
- wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
- wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
- wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
- wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
- dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
-
-}
-
-static void dcn301_calculate_wm_and_dlg(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel_req)
-{
- int i, pipe_idx;
- int vlevel, vlevel_max;
- struct wm_range_table_entry *table_entry;
- struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
-
- ASSERT(bw_params);
-
- vlevel_max = bw_params->clk_table.num_entries - 1;
-
- /* WM Set D */
- table_entry = &bw_params->wm_table.entries[WM_D];
- if (table_entry->wm_type == WM_TYPE_RETRAINING)
- vlevel = 0;
- else
- vlevel = vlevel_max;
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
- &context->bw_ctx.dml, pipes, pipe_cnt);
- /* WM Set C */
- table_entry = &bw_params->wm_table.entries[WM_C];
- vlevel = min(max(vlevel_req, 2), vlevel_max);
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
- &context->bw_ctx.dml, pipes, pipe_cnt);
- /* WM Set B */
- table_entry = &bw_params->wm_table.entries[WM_B];
- vlevel = min(max(vlevel_req, 1), vlevel_max);
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
- &context->bw_ctx.dml, pipes, pipe_cnt);
-
- /* WM Set A */
- table_entry = &bw_params->wm_table.entries[WM_A];
- vlevel = min(vlevel_req, vlevel_max);
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
- &context->bw_ctx.dml, pipes, pipe_cnt);
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
-
- dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-}
-
static struct resource_funcs dcn301_res_pool_funcs = {
.destroy = dcn301_destroy_resource_pool,
.link_enc_create = dcn301_link_encoder_create,
.panel_cntl_create = dcn301_panel_cntl_create,
.validate_bandwidth = dcn30_validate_bandwidth,
- .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
+ .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 16a75ba0ca82..7d3ff5d44402 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -1398,11 +1398,18 @@ void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_02_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_02_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
- dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[0].dtbclk_mhz;
+ /* Populate from bw_params for DTBCLK, SOCCLK */
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
+ dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz;
+ else
+ dcn3_02_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
+ dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz;
+ else
+ dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */
- /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+ /* FCLK, PHYCLK_D18, DSCCLK */
dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz;
- dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[0].socclk_mhz;
dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz;
}
/* re-init DML with updated bb */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 34b89464ae02..dc7823d23ba8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -146,8 +146,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = {
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
.num_states = 1,
- .sr_exit_time_us = 26.5,
- .sr_enter_plus_exit_time_us = 31,
+ .sr_exit_time_us = 35.5,
+ .sr_enter_plus_exit_time_us = 40,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -1326,11 +1326,18 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_03_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_03_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_03_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
- dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[0].dtbclk_mhz;
+ /* Populate from bw_params for DTBCLK, SOCCLK */
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
+ dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[i-1].dtbclk_mhz;
+ else
+ dcn3_03_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
+ dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[i-1].socclk_mhz;
+ else
+ dcn3_03_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
/* These clocks cannot come from bw_params, always fill from dcn3_03_soc[1] */
- /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+ /* FCLK, PHYCLK_D18, DSCCLK */
dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz;
- dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[0].socclk_mhz;
dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz;
}
/* re-init DML with updated bb */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
index 5dcdc5a858fe..4bab97acb155 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -28,6 +28,7 @@ endif
CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o += -mhard-float
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -36,6 +37,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o += -mpreferred-stack-boundary=4
else
CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o += -msse2
endif
+endif
AMD_DAL_DCN31 = $(addprefix $(AMDDALPATH)/dc/dcn31/,$(DCN31))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index fc1fc1a4bf8b..8a2119d8ca0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -47,6 +47,7 @@
#include "dce/dmub_outbox.h"
#include "dc_link_dp.h"
#include "inc/link_dpcd.h"
+#include "dcn10/dcn10_hw_sequencer.h"
#define DC_LOGGER_INIT(logger)
@@ -390,7 +391,7 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
- if (!is_hdmi_tmds)
+ if (!is_hdmi_tmds && !is_dp)
return;
if (is_hdmi_tmds)
@@ -403,6 +404,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
&pipe_ctx->stream_res.encoder_info_frame);
}
}
+void dcn31_z10_save_init(struct dc *dc)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
+
+ dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+}
void dcn31_z10_restore(struct dc *dc)
{
@@ -594,3 +607,20 @@ bool dcn31_is_abm_supported(struct dc *dc,
}
return false;
}
+
+static void apply_riommu_invalidation_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (!hws->wa.early_riommu_invalidation)
+ return;
+
+ REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, 0);
+}
+
+void dcn31_init_pipes(struct dc *dc, struct dc_state *context)
+{
+ dcn10_init_pipes(dc, context);
+ apply_riommu_invalidation_wa(dc);
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
index ff72f0fdd5be..140435e4f7ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
@@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane(
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
void dcn31_z10_restore(struct dc *dc);
+void dcn31_z10_save_init(struct dc *dc);
void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config);
@@ -52,5 +53,6 @@ void dcn31_reset_hw_ctx_wrap(
struct dc_state *context);
bool dcn31_is_abm_supported(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream);
+void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
#endif /* __DC_HWSS_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index e3048f8827d2..b30d923471cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -93,18 +93,18 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.z10_restore = dcn31_z10_restore,
+ .z10_save_init = dcn31_z10_save_init,
.is_abm_supported = dcn31_is_abm_supported,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
- .init_pipes = dcn10_init_pipes,
+ .init_pipes = dcn31_init_pipes,
.update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index c67bc9544f5d..cd3248dc31d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -220,6 +220,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
.sr_exit_z8_time_us = 402.0,
.sr_enter_plus_exit_z8_time_us = 520.0,
.writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -741,6 +742,7 @@ static const struct dccg_mask dccg_mask = {
#define HWSEQ_DCN31_REG_LIST()\
SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
SR(DIO_MEM_PWR_CTRL), \
SR(ODM_MEM_PWR_CTRL3), \
SR(DMU_MEM_PWR_CNTL), \
@@ -801,6 +803,7 @@ static const struct dce_hwseq_registers hwseq_reg = {
#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
@@ -1299,6 +1302,7 @@ static struct dce_hwseq *dcn31_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
+ hws->wa.early_riommu_invalidation = true;
}
return hws;
}
@@ -1964,6 +1968,22 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* read VBIOS LTTPR caps */
+ {
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ /* interop bit is implicit */
+ {
+ dc->caps.vbios_lttpr_aware = true;
+ }
+ }
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index d34024fd798a..45862167e6ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -50,6 +50,10 @@ dml_ccflags += -msse2
endif
endif
+ifneq ($(CONFIG_FRAME_WARN),0)
+frame_warn_flag := -Wframe-larger-than=2048
+endif
+
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
ifdef CONFIG_DRM_AMD_DC_DCN
@@ -60,9 +64,9 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) -Wframe-larger-than=2048
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) -Wframe-larger-than=2048
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index c26e742e8137..6655bb99fdfd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -841,6 +841,9 @@ static bool CalculatePrefetchSchedule(
else
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ // Limit to prevent overflow in DST_Y_PREFETCH register
+ *DestinationLinesForPrefetch = dml_min(*DestinationLinesForPrefetch, 63.75);
+
dml_print("DML: VStartup: %d\n", VStartup);
dml_print("DML: TCalc: %f\n", TCalc);
dml_print("DML: TWait: %f\n", TWait);
@@ -4889,7 +4892,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
} while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
&& (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0]
- || mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode));
+ || mode_lib->vba.NextPrefetchMode <= mode_lib->vba.MaxPrefetchMode));
if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index 2a0db2b03047..9ac9d5e8df8b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -289,6 +289,9 @@ struct dpp_caps {
/* DSCL processing pixel data in fixed or float format */
enum dscl_data_processing_format dscl_data_proc_format;
+ /* max LB partitions */
+ unsigned int max_lb_partitions;
+
/* Calculates the number of partitions in the line buffer.
* The implementation of this function is overloaded for
* different versions of DSCL LB.
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 5ab008e62b82..ad5f2adcc40d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -237,6 +237,7 @@ struct hw_sequencer_funcs {
int width, int height, int offset);
void (*z10_restore)(struct dc *dc);
+ void (*z10_save_init)(struct dc *dc);
void (*update_visual_confirm_color)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index f7f7e4fff0c2..082549f75978 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -41,6 +41,7 @@ struct dce_hwseq_wa {
bool DEGVIDCN10_254;
bool DEGVIDCN21;
bool disallow_self_refresh_during_multi_plane_transition;
+ bool early_riommu_invalidation;
};
struct hwseq_wa_state {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 5f245bde54ff..a2a4fbeb83f8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -119,7 +119,7 @@ bool dal_irq_service_set(
dal_irq_service_ack(irq_service, source);
- if (info->funcs->set)
+ if (info->funcs && info->funcs->set)
return info->funcs->set(irq_service, info, enable);
dal_irq_service_set_generic(irq_service, info, enable);
@@ -153,7 +153,7 @@ bool dal_irq_service_ack(
return false;
}
- if (info->funcs->ack)
+ if (info->funcs && info->funcs->ack)
return info->funcs->ack(irq_service, info);
dal_irq_service_ack_generic(irq_service, info);
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index 5f9346622301..1139b9eb9f6f 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -165,7 +165,7 @@ enum irq_type
};
#define DAL_VALID_IRQ_SRC_NUM(src) \
- ((src) <= DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
+ ((src) < DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
/* Number of Page Flip IRQ Sources. */
#define DAL_PFLIP_IRQ_SRC_NUM \
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 7c4734f905d9..7fafb8d6c1da 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -856,6 +856,11 @@ enum dmub_cmd_idle_opt_type {
* DCN hardware restore.
*/
DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0,
+
+ /**
+ * DCN hardware save.
+ */
+ DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 8c886ece71f6..27c7fa3110c8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -267,11 +267,13 @@ void dmub_dcn31_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset)
bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub)
{
- uint32_t is_hw_init;
+ union dmub_fw_boot_status status;
+ uint32_t is_enable;
- REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_hw_init);
+ status.all = REG_READ(DMCUB_SCRATCH0);
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable);
- return is_hw_init != 0;
+ return is_enable != 0 && status.bits.dal_fw;
}
bool dmub_dcn31_is_supported(struct dmub_srv *dmub)
@@ -352,3 +354,63 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_TIMER_CURRENT);
}
+
+void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+{
+ uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
+ uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+
+ if (!dmub || !diag_data)
+ return;
+
+ memset(diag_data, 0, sizeof(*diag_data));
+
+ diag_data->dmcub_version = dmub->fw_version;
+
+ diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
+
+ diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
+ diag_data->is_dmcub_enabled = is_dmub_enabled;
+
+ REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
+ diag_data->is_dmcub_soft_reset = is_soft_reset;
+
+ REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
+ diag_data->is_dmcub_secure_reset = is_sec_reset;
+
+ REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
+ diag_data->is_traceport_en = is_traceport_enabled;
+
+ REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
+ diag_data->is_cw0_enabled = is_cw0_enabled;
+
+ REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
+ diag_data->is_cw6_enabled = is_cw6_enabled;
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index 2829c3e9a310..9456a6a2d518 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -36,6 +36,9 @@ struct dmub_srv;
DMUB_SR(DMCUB_CNTL) \
DMUB_SR(DMCUB_CNTL2) \
DMUB_SR(DMCUB_SEC_CNTL) \
+ DMUB_SR(DMCUB_INBOX0_SIZE) \
+ DMUB_SR(DMCUB_INBOX0_RPTR) \
+ DMUB_SR(DMCUB_INBOX0_WPTR) \
DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \
DMUB_SR(DMCUB_INBOX1_SIZE) \
DMUB_SR(DMCUB_INBOX1_RPTR) \
@@ -103,11 +106,15 @@ struct dmub_srv;
DMUB_SR(DMCUB_SCRATCH14) \
DMUB_SR(DMCUB_SCRATCH15) \
DMUB_SR(DMCUB_GPINT_DATAIN1) \
+ DMUB_SR(DMCUB_GPINT_DATAOUT) \
DMUB_SR(CC_DC_PIPE_DIS) \
DMUB_SR(MMHUBBUB_SOFT_RESET) \
DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
DMUB_SR(DCN_VM_FB_OFFSET) \
- DMUB_SR(DMCUB_TIMER_CURRENT)
+ DMUB_SR(DMCUB_TIMER_CURRENT) \
+ DMUB_SR(DMCUB_INST_FETCH_FAULT_ADDR) \
+ DMUB_SR(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR) \
+ DMUB_SR(DMCUB_DATA_WRITE_FAULT_ADDR)
#define DMUB_DCN31_FIELDS() \
DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \
@@ -115,6 +122,7 @@ struct dmub_srv;
DMUB_SF(DMCUB_CNTL2, DMCUB_SOFT_RESET) \
DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \
DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \
+ DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS) \
DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \
DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \
@@ -138,11 +146,13 @@ struct dmub_srv;
DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
- DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET)
+ DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \
+ DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR)
struct dmub_srv_dcn31_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
DMUB_DCN31_REGS()
+ DMCUB_INTERNAL_REGS()
#undef DMUB_SR
};
@@ -227,4 +237,6 @@ void dmub_dcn31_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub);
+void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+
#endif /* _DMUB_DCN31_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index fd7e996ab1d7..2bdbd7406f56 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -208,6 +208,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
break;
case DMUB_ASIC_DCN31:
+ dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
funcs->reset = dmub_dcn31_reset;
funcs->reset_release = dmub_dcn31_reset_release;
funcs->backdoor_load = dmub_dcn31_backdoor_load;
@@ -231,9 +232,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_outbox0_wptr = dmub_dcn31_get_outbox0_wptr;
funcs->set_outbox0_rptr = dmub_dcn31_set_outbox0_rptr;
- if (asic == DMUB_ASIC_DCN31) {
- dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
- }
+ funcs->get_diagnostic_data = dmub_dcn31_get_diagnostic_data;
funcs->get_current_time = dmub_dcn31_get_current_time;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 332b0df53e52..ff1d3d4a6488 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -223,10 +223,12 @@ enum amd_harvest_ip_mask {
};
enum DC_FEATURE_MASK {
- DC_FBC_MASK = 0x1,
- DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
- DC_DISABLE_FRACTIONAL_PWM_MASK = 0x4,
- DC_PSR_MASK = 0x8,
+ //Default value can be found at "uint amdgpu_dc_feature_mask"
+ DC_FBC_MASK = (1 << 0), //0x1, disabled by default
+ DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default
+ DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default
+ DC_PSR_MASK = (1 << 3), //0x8, disabled by default
+ DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default
};
enum DC_DEBUG_MASK {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h
deleted file mode 100644
index dfacc6b5d89d..000000000000
--- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- */
-#ifndef _mp_13_0_1_OFFSET_HEADER
-#define _mp_13_0_1_OFFSET_HEADER
-
-
-
-// addressBlock: mp_SmuMp0_SmnDec
-// base address: 0x0
-#define regMP0_SMN_C2PMSG_32 0x0060
-#define regMP0_SMN_C2PMSG_32_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_33 0x0061
-#define regMP0_SMN_C2PMSG_33_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_34 0x0062
-#define regMP0_SMN_C2PMSG_34_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_35 0x0063
-#define regMP0_SMN_C2PMSG_35_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_36 0x0064
-#define regMP0_SMN_C2PMSG_36_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_37 0x0065
-#define regMP0_SMN_C2PMSG_37_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_38 0x0066
-#define regMP0_SMN_C2PMSG_38_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_39 0x0067
-#define regMP0_SMN_C2PMSG_39_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_40 0x0068
-#define regMP0_SMN_C2PMSG_40_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_41 0x0069
-#define regMP0_SMN_C2PMSG_41_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_42 0x006a
-#define regMP0_SMN_C2PMSG_42_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_43 0x006b
-#define regMP0_SMN_C2PMSG_43_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_44 0x006c
-#define regMP0_SMN_C2PMSG_44_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_45 0x006d
-#define regMP0_SMN_C2PMSG_45_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_46 0x006e
-#define regMP0_SMN_C2PMSG_46_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_47 0x006f
-#define regMP0_SMN_C2PMSG_47_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_48 0x0070
-#define regMP0_SMN_C2PMSG_48_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_49 0x0071
-#define regMP0_SMN_C2PMSG_49_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_50 0x0072
-#define regMP0_SMN_C2PMSG_50_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_51 0x0073
-#define regMP0_SMN_C2PMSG_51_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_52 0x0074
-#define regMP0_SMN_C2PMSG_52_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_53 0x0075
-#define regMP0_SMN_C2PMSG_53_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_54 0x0076
-#define regMP0_SMN_C2PMSG_54_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_55 0x0077
-#define regMP0_SMN_C2PMSG_55_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_56 0x0078
-#define regMP0_SMN_C2PMSG_56_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_57 0x0079
-#define regMP0_SMN_C2PMSG_57_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_58 0x007a
-#define regMP0_SMN_C2PMSG_58_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_59 0x007b
-#define regMP0_SMN_C2PMSG_59_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_60 0x007c
-#define regMP0_SMN_C2PMSG_60_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_61 0x007d
-#define regMP0_SMN_C2PMSG_61_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_62 0x007e
-#define regMP0_SMN_C2PMSG_62_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_63 0x007f
-#define regMP0_SMN_C2PMSG_63_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_64 0x0080
-#define regMP0_SMN_C2PMSG_64_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_65 0x0081
-#define regMP0_SMN_C2PMSG_65_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_66 0x0082
-#define regMP0_SMN_C2PMSG_66_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_67 0x0083
-#define regMP0_SMN_C2PMSG_67_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_68 0x0084
-#define regMP0_SMN_C2PMSG_68_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_69 0x0085
-#define regMP0_SMN_C2PMSG_69_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_70 0x0086
-#define regMP0_SMN_C2PMSG_70_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_71 0x0087
-#define regMP0_SMN_C2PMSG_71_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_72 0x0088
-#define regMP0_SMN_C2PMSG_72_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_73 0x0089
-#define regMP0_SMN_C2PMSG_73_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_74 0x008a
-#define regMP0_SMN_C2PMSG_74_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_75 0x008b
-#define regMP0_SMN_C2PMSG_75_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_76 0x008c
-#define regMP0_SMN_C2PMSG_76_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_77 0x008d
-#define regMP0_SMN_C2PMSG_77_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_78 0x008e
-#define regMP0_SMN_C2PMSG_78_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_79 0x008f
-#define regMP0_SMN_C2PMSG_79_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_80 0x0090
-#define regMP0_SMN_C2PMSG_80_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_81 0x0091
-#define regMP0_SMN_C2PMSG_81_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_82 0x0092
-#define regMP0_SMN_C2PMSG_82_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_83 0x0093
-#define regMP0_SMN_C2PMSG_83_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_84 0x0094
-#define regMP0_SMN_C2PMSG_84_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_85 0x0095
-#define regMP0_SMN_C2PMSG_85_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_86 0x0096
-#define regMP0_SMN_C2PMSG_86_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_87 0x0097
-#define regMP0_SMN_C2PMSG_87_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_88 0x0098
-#define regMP0_SMN_C2PMSG_88_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_89 0x0099
-#define regMP0_SMN_C2PMSG_89_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_90 0x009a
-#define regMP0_SMN_C2PMSG_90_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_91 0x009b
-#define regMP0_SMN_C2PMSG_91_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_92 0x009c
-#define regMP0_SMN_C2PMSG_92_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_93 0x009d
-#define regMP0_SMN_C2PMSG_93_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_94 0x009e
-#define regMP0_SMN_C2PMSG_94_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_95 0x009f
-#define regMP0_SMN_C2PMSG_95_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_96 0x00a0
-#define regMP0_SMN_C2PMSG_96_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_97 0x00a1
-#define regMP0_SMN_C2PMSG_97_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_98 0x00a2
-#define regMP0_SMN_C2PMSG_98_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_99 0x00a3
-#define regMP0_SMN_C2PMSG_99_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_100 0x00a4
-#define regMP0_SMN_C2PMSG_100_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_101 0x00a5
-#define regMP0_SMN_C2PMSG_101_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_102 0x00a6
-#define regMP0_SMN_C2PMSG_102_BASE_IDX 0
-#define regMP0_SMN_C2PMSG_103 0x00a7
-#define regMP0_SMN_C2PMSG_103_BASE_IDX 0
-#define regMP0_SMN_IH_CREDIT 0x00c1
-#define regMP0_SMN_IH_CREDIT_BASE_IDX 0
-#define regMP0_SMN_IH_SW_INT 0x00c2
-#define regMP0_SMN_IH_SW_INT_BASE_IDX 0
-#define regMP0_SMN_IH_SW_INT_CTRL 0x00c3
-#define regMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0
-
-
-// addressBlock: mp_SmuMp1_SmnDec
-// base address: 0x0
-#define regMP1_SMN_C2PMSG_32 0x0260
-#define regMP1_SMN_C2PMSG_32_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_33 0x0261
-#define regMP1_SMN_C2PMSG_33_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_34 0x0262
-#define regMP1_SMN_C2PMSG_34_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_35 0x0263
-#define regMP1_SMN_C2PMSG_35_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_36 0x0264
-#define regMP1_SMN_C2PMSG_36_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_37 0x0265
-#define regMP1_SMN_C2PMSG_37_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_38 0x0266
-#define regMP1_SMN_C2PMSG_38_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_39 0x0267
-#define regMP1_SMN_C2PMSG_39_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_40 0x0268
-#define regMP1_SMN_C2PMSG_40_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_41 0x0269
-#define regMP1_SMN_C2PMSG_41_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_42 0x026a
-#define regMP1_SMN_C2PMSG_42_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_43 0x026b
-#define regMP1_SMN_C2PMSG_43_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_44 0x026c
-#define regMP1_SMN_C2PMSG_44_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_45 0x026d
-#define regMP1_SMN_C2PMSG_45_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_46 0x026e
-#define regMP1_SMN_C2PMSG_46_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_47 0x026f
-#define regMP1_SMN_C2PMSG_47_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_48 0x0270
-#define regMP1_SMN_C2PMSG_48_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_49 0x0271
-#define regMP1_SMN_C2PMSG_49_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_50 0x0272
-#define regMP1_SMN_C2PMSG_50_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_51 0x0273
-#define regMP1_SMN_C2PMSG_51_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_52 0x0274
-#define regMP1_SMN_C2PMSG_52_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_53 0x0275
-#define regMP1_SMN_C2PMSG_53_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_54 0x0276
-#define regMP1_SMN_C2PMSG_54_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_55 0x0277
-#define regMP1_SMN_C2PMSG_55_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_56 0x0278
-#define regMP1_SMN_C2PMSG_56_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_57 0x0279
-#define regMP1_SMN_C2PMSG_57_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_58 0x027a
-#define regMP1_SMN_C2PMSG_58_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_59 0x027b
-#define regMP1_SMN_C2PMSG_59_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_60 0x027c
-#define regMP1_SMN_C2PMSG_60_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_61 0x027d
-#define regMP1_SMN_C2PMSG_61_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_62 0x027e
-#define regMP1_SMN_C2PMSG_62_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_63 0x027f
-#define regMP1_SMN_C2PMSG_63_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_64 0x0280
-#define regMP1_SMN_C2PMSG_64_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_65 0x0281
-#define regMP1_SMN_C2PMSG_65_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_66 0x0282
-#define regMP1_SMN_C2PMSG_66_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_67 0x0283
-#define regMP1_SMN_C2PMSG_67_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_68 0x0284
-#define regMP1_SMN_C2PMSG_68_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_69 0x0285
-#define regMP1_SMN_C2PMSG_69_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_70 0x0286
-#define regMP1_SMN_C2PMSG_70_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_71 0x0287
-#define regMP1_SMN_C2PMSG_71_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_72 0x0288
-#define regMP1_SMN_C2PMSG_72_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_73 0x0289
-#define regMP1_SMN_C2PMSG_73_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_74 0x028a
-#define regMP1_SMN_C2PMSG_74_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_75 0x028b
-#define regMP1_SMN_C2PMSG_75_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_76 0x028c
-#define regMP1_SMN_C2PMSG_76_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_77 0x028d
-#define regMP1_SMN_C2PMSG_77_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_78 0x028e
-#define regMP1_SMN_C2PMSG_78_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_79 0x028f
-#define regMP1_SMN_C2PMSG_79_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_80 0x0290
-#define regMP1_SMN_C2PMSG_80_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_81 0x0291
-#define regMP1_SMN_C2PMSG_81_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_82 0x0292
-#define regMP1_SMN_C2PMSG_82_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_83 0x0293
-#define regMP1_SMN_C2PMSG_83_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_84 0x0294
-#define regMP1_SMN_C2PMSG_84_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_85 0x0295
-#define regMP1_SMN_C2PMSG_85_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_86 0x0296
-#define regMP1_SMN_C2PMSG_86_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_87 0x0297
-#define regMP1_SMN_C2PMSG_87_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_88 0x0298
-#define regMP1_SMN_C2PMSG_88_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_89 0x0299
-#define regMP1_SMN_C2PMSG_89_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_90 0x029a
-#define regMP1_SMN_C2PMSG_90_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_91 0x029b
-#define regMP1_SMN_C2PMSG_91_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_92 0x029c
-#define regMP1_SMN_C2PMSG_92_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_93 0x029d
-#define regMP1_SMN_C2PMSG_93_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_94 0x029e
-#define regMP1_SMN_C2PMSG_94_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_95 0x029f
-#define regMP1_SMN_C2PMSG_95_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_96 0x02a0
-#define regMP1_SMN_C2PMSG_96_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_97 0x02a1
-#define regMP1_SMN_C2PMSG_97_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_98 0x02a2
-#define regMP1_SMN_C2PMSG_98_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_99 0x02a3
-#define regMP1_SMN_C2PMSG_99_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_100 0x02a4
-#define regMP1_SMN_C2PMSG_100_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_101 0x02a5
-#define regMP1_SMN_C2PMSG_101_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_102 0x02a6
-#define regMP1_SMN_C2PMSG_102_BASE_IDX 0
-#define regMP1_SMN_C2PMSG_103 0x02a7
-#define regMP1_SMN_C2PMSG_103_BASE_IDX 0
-#define regMP1_SMN_IH_CREDIT 0x02c1
-#define regMP1_SMN_IH_CREDIT_BASE_IDX 0
-#define regMP1_SMN_IH_SW_INT 0x02c2
-#define regMP1_SMN_IH_SW_INT_BASE_IDX 0
-#define regMP1_SMN_IH_SW_INT_CTRL 0x02c3
-#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0
-#define regMP1_SMN_FPS_CNT 0x02c4
-#define regMP1_SMN_FPS_CNT_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH0 0x0340
-#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH1 0x0341
-#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH2 0x0342
-#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH3 0x0343
-#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH4 0x0344
-#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH5 0x0345
-#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH6 0x0346
-#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 0
-#define regMP1_SMN_EXT_SCRATCH7 0x0347
-#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 0
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h
deleted file mode 100644
index 2d5e8b58e693..000000000000
--- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- */
-#ifndef _mp_13_0_1_SH_MASK_HEADER
-#define _mp_13_0_1_SH_MASK_HEADER
-
-
-// addressBlock: mp_SmuMp0_SmnDec
-//MP0_SMN_C2PMSG_32
-#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_33
-#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_34
-#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_35
-#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_36
-#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_37
-#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_38
-#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_39
-#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_40
-#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_41
-#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_42
-#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_43
-#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_44
-#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_45
-#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_46
-#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_47
-#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_48
-#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_49
-#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_50
-#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_51
-#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_52
-#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_53
-#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_54
-#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_55
-#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_56
-#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_57
-#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_58
-#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_59
-#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_60
-#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_61
-#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_62
-#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_63
-#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_64
-#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_65
-#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_66
-#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_67
-#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_68
-#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_69
-#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_70
-#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_71
-#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_72
-#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_73
-#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_74
-#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_75
-#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_76
-#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_77
-#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_78
-#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_79
-#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_80
-#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_81
-#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_82
-#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_83
-#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_84
-#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_85
-#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_86
-#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_87
-#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_88
-#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_89
-#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_90
-#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_91
-#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_92
-#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_93
-#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_94
-#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_95
-#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_96
-#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_97
-#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_98
-#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_99
-#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_100
-#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_101
-#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_102
-#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_103
-#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_IH_CREDIT
-#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
-#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
-#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
-#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
-//MP0_SMN_IH_SW_INT
-#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
-#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
-#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
-#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
-//MP0_SMN_IH_SW_INT_CTRL
-#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
-#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
-#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
-#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
-
-
-// addressBlock: mp_SmuMp1Pub_CruDec
-//MP1_FIRMWARE_FLAGS
-#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
-#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
-#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
-#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
-
-
-// addressBlock: mp_SmuMp1_SmnDec
-//MP1_SMN_C2PMSG_32
-#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_33
-#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_34
-#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_35
-#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_36
-#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_37
-#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_38
-#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_39
-#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_40
-#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_41
-#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_42
-#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_43
-#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_44
-#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_45
-#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_46
-#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_47
-#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_48
-#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_49
-#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_50
-#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_51
-#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_52
-#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_53
-#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_54
-#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_55
-#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_56
-#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_57
-#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_58
-#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_59
-#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_60
-#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_61
-#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_62
-#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_63
-#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_64
-#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_65
-#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_66
-#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_67
-#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_68
-#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_69
-#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_70
-#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_71
-#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_72
-#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_73
-#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_74
-#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_75
-#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_76
-#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_77
-#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_78
-#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_79
-#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_80
-#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_81
-#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_82
-#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_83
-#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_84
-#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_85
-#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_86
-#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_87
-#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_88
-#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_89
-#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_90
-#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_91
-#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_92
-#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_93
-#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_94
-#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_95
-#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_96
-#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_97
-#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_98
-#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_99
-#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_100
-#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_101
-#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_102
-#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_103
-#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_IH_CREDIT
-#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
-#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
-#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
-#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
-//MP1_SMN_IH_SW_INT
-#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
-#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
-#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
-#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
-//MP1_SMN_IH_SW_INT_CTRL
-#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
-#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
-#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
-#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
-//MP1_SMN_FPS_CNT
-#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
-#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH0
-#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH1
-#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH2
-#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH3
-#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH4
-#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH5
-#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH6
-#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH7
-#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 3811e58dd857..44955458fe38 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -590,7 +590,7 @@ struct atom_firmware_info_v3_4 {
uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def
uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id
uint8_t board_i2c_feature_slave_addr;
- uint8_t reserved3;
+ uint8_t ras_rom_i2c_slave_addr;
uint16_t bootup_mvddq_mv;
uint16_t bootup_mvpp_mv;
uint32_t zfbstartaddrin16mb;
diff --git a/drivers/gpu/drm/amd/include/navi10_enum.h b/drivers/gpu/drm/amd/include/navi10_enum.h
index d5ead9680c6e..84bcb96f76ea 100644
--- a/drivers/gpu/drm/amd/include/navi10_enum.h
+++ b/drivers/gpu/drm/amd/include/navi10_enum.h
@@ -430,7 +430,7 @@ ARRAY_2D_DEPTH = 0x00000001,
*/
typedef enum ENUM_NUM_SIMD_PER_CU {
-NUM_SIMD_PER_CU = 0x00000004,
+NUM_SIMD_PER_CU = 0x00000002,
} ENUM_NUM_SIMD_PER_CU;
/*
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index a276ebad47e6..769f58d5ae1a 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2902,14 +2902,15 @@ static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
return sprintf(buf, "%i\n", 0);
}
-static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+
+static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ enum pp_power_limit_level pp_limit_level)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
- enum pp_power_limit_level pp_limit_level = PP_PWR_LIMIT_MAX;
uint32_t limit;
ssize_t size;
int r;
@@ -2919,17 +2920,17 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ if ( !(pp_funcs && pp_funcs->get_power_limit))
+ return -ENODATA;
+
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
- if (pp_funcs && pp_funcs->get_power_limit)
- r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
- pp_limit_level, power_type);
- else
- r = -ENODATA;
+ r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
+ pp_limit_level, power_type);
if (!r)
size = sysfs_emit(buf, "%u\n", limit * 1000000);
@@ -2942,85 +2943,31 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
return size;
}
-static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
+
+static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
- enum pp_power_limit_level pp_limit_level = PP_PWR_LIMIT_CURRENT;
- uint32_t limit;
- ssize_t size;
- int r;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
-
- if (pp_funcs && pp_funcs->get_power_limit)
- r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
- pp_limit_level, power_type);
- else
- r = -ENODATA;
+ return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
- if (!r)
- size = sysfs_emit(buf, "%u\n", limit * 1000000);
- else
- size = sysfs_emit(buf, "\n");
+}
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
- return size;
}
static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
- enum pp_power_limit_level pp_limit_level = PP_PWR_LIMIT_DEFAULT;
- uint32_t limit;
- ssize_t size;
- int r;
+ return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
-
- if (pp_funcs && pp_funcs->get_power_limit)
- r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
- pp_limit_level, power_type);
- else
- r = -ENODATA;
-
- if (!r)
- size = sysfs_emit(buf, "%u\n", limit * 1000000);
- else
- size = sysfs_emit(buf, "\n");
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-
- return size;
}
+
static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
struct device_attribute *attr,
char *buf)
diff --git a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
index 610266088ff1..35fa0d8e92dd 100644
--- a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
@@ -101,7 +101,8 @@
#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x41
#define PPSMC_MSG_GfxDriverResetRecovery 0x42
-#define PPSMC_Message_Count 0x43
+#define PPSMC_MSG_BoardPowerCalibration 0x43
+#define PPSMC_Message_Count 0x44
//PPSMC Reset Types
#define PPSMC_RESET_TYPE_WARM_RESET 0x00
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 89a16dcd0fff..1d3765b873df 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -225,7 +225,8 @@
__SMU_DUMMY_MAP(DisableDeterminism), \
__SMU_DUMMY_MAP(SetUclkDpmMode), \
__SMU_DUMMY_MAP(LightSBR), \
- __SMU_DUMMY_MAP(GfxDriverResetRecovery),
+ __SMU_DUMMY_MAP(GfxDriverResetRecovery), \
+ __SMU_DUMMY_MAP(BoardPowerCalibration),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 1962a5877191..f61b5c914a3d 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -34,7 +34,7 @@
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03
#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
-#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0x9
+#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD
/* MP Apertures */
#define MP0_Public 0x03800000
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
index 6119a36b2cba..dc91eb608791 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
@@ -26,6 +26,7 @@
#include "amdgpu_smu.h"
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x07
/* MP Apertures */
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h
deleted file mode 100644
index b6c976a4d578..000000000000
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __SMU_V13_0_1_H__
-#define __SMU_V13_0_1_H__
-
-#include "amdgpu_smu.h"
-
-#define SMU13_0_1_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP 0x3
-
-/* MP Apertures */
-#define MP0_Public 0x03800000
-#define MP0_SRAM 0x03900000
-#define MP1_Public 0x03b00000
-#define MP1_SRAM 0x03c00004
-
-/* address block */
-#define smnMP1_FIRMWARE_FLAGS 0x3010024
-
-
-#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
-
-int smu_v13_0_1_check_fw_status(struct smu_context *smu);
-
-int smu_v13_0_1_check_fw_version(struct smu_context *smu);
-
-int smu_v13_0_1_fini_smc_tables(struct smu_context *smu);
-
-int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu);
-
-int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu);
-
-int smu_v13_0_1_set_driver_table_location(struct smu_context *smu);
-
-int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable);
-#endif
-#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h
index 5627de734246..c5e26d619bf0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h
@@ -111,7 +111,9 @@ typedef struct {
uint32_t InWhisperMode : 1;
uint32_t spare0 : 1;
uint32_t ZstateStatus : 4;
- uint32_t spare1 :12;
+ uint32_t spare1 : 4;
+ uint32_t DstateFun : 4;
+ uint32_t DstateDev : 4;
// MP1_EXT_SCRATCH2
uint32_t P2JobHandler :24;
uint32_t RsmuPmiP2FinishedCnt : 8;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 25979106fd25..02e8c6e5448d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -5127,6 +5127,13 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
return size;
}
+static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ return (adev->pdev->device == 0x6860);
+}
+
static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
}
out:
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+ if (vega10_get_power_profile_mode_quirks(hwmgr))
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+ 1 << power_profile_mode,
+ NULL);
+ else
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
(!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
NULL);
+
hwmgr->power_profile_mode = power_profile_mode;
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index cb375f1beebd..ebe672142808 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1453,10 +1453,14 @@ static int smu_hw_fini(void *handle)
if (smu->is_apu) {
smu_powergate_sdma(&adev->smu, true);
- smu_dpm_set_vcn_enable(smu, false);
- smu_dpm_set_jpeg_enable(smu, false);
}
+ smu_dpm_set_vcn_enable(smu, false);
+ smu_dpm_set_jpeg_enable(smu, false);
+
+ adev->vcn.cur_state = AMD_PG_STATE_GATE;
+ adev->jpeg.cur_state = AMD_PG_STATE_GATE;
+
if (!smu->pm_enabled)
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index c751f717a0da..d92dd2c7448e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -353,8 +353,7 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t val;
- if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
- powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) {
+ if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO) {
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
smu_baco->platform_support =
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 388c5cb5c647..0a5d46ac9ccd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1528,6 +1528,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_BEIGE_GOBY:
if (amdgpu_runtime_pm == 2)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 18681dc458da..bcaaa086fc2f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -256,7 +256,7 @@ static int vangogh_tables_init(struct smu_context *smu)
return 0;
err3_out:
- kfree(smu_table->clocks_table);
+ kfree(smu_table->watermarks_table);
err2_out:
kfree(smu_table->gpu_metrics_table);
err1_out:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
index 9b3a8503f5cd..d4c4c495762c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU13_MGR = smu_v13_0.o aldebaran_ppt.o smu_v13_0_1.o yellow_carp_ppt.o
+SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o
AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 9316a726195c..cb5485cf243f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -134,6 +134,7 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0),
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
+ MSG_MAP(BoardPowerCalibration, PPSMC_MSG_BoardPowerCalibration, 0),
};
static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
@@ -440,6 +441,39 @@ static int aldebaran_setup_pptable(struct smu_context *smu)
return ret;
}
+static bool aldebaran_is_primary(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
+ return adev->smuio.funcs->get_die_id(adev) == 0;
+
+ return true;
+}
+
+static int aldebaran_run_board_btc(struct smu_context *smu)
+{
+ u32 smu_version;
+ int ret;
+
+ if (!aldebaran_is_primary(smu))
+ return 0;
+
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get smu version!\n");
+ return ret;
+ }
+ if (smu_version <= 0x00441d00)
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "Board power calibration failed!\n");
+
+ return ret;
+}
+
static int aldebaran_run_btc(struct smu_context *smu)
{
int ret;
@@ -447,6 +481,8 @@ static int aldebaran_run_btc(struct smu_context *smu)
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
if (ret)
dev_err(smu->adev->dev, "RunDcBtc failed!\n");
+ else
+ ret = aldebaran_run_board_btc(smu);
return ret;
}
@@ -524,16 +560,6 @@ static int aldebaran_freqs_in_same_level(int32_t frequency1,
return (abs(frequency1 - frequency2) <= EPSILON);
}
-static bool aldebaran_is_primary(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
-
- if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
- return adev->smuio.funcs->get_die_id(adev) == 0;
-
- return true;
-}
-
static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a3dc7194aaf8..a421ba85bd6d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -210,6 +210,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
case CHIP_ALDEBARAN:
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
break;
+ case CHIP_YELLOW_CARP:
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
+ break;
default:
dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV;
@@ -694,6 +697,27 @@ failed:
return ret;
}
+int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ switch (adev->asic_type) {
+ case CHIP_YELLOW_CARP:
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+ if (enable)
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+ else
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
int smu_v13_0_system_features_control(struct smu_context *smu,
bool en)
{
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c
deleted file mode 100644
index 61917b49f2bf..000000000000
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-//#include <linux/reboot.h>
-
-#define SWSMU_CODE_LAYER_L3
-
-#include "amdgpu.h"
-#include "amdgpu_smu.h"
-#include "smu_v13_0_1.h"
-#include "soc15_common.h"
-#include "smu_cmn.h"
-#include "atomfirmware.h"
-#include "amdgpu_atomfirmware.h"
-#include "amdgpu_atombios.h"
-#include "atom.h"
-
-#include "asic_reg/mp/mp_13_0_1_offset.h"
-#include "asic_reg/mp/mp_13_0_1_sh_mask.h"
-
-/*
- * DO NOT use these for err/warn/info/debug messages.
- * Use dev_err, dev_warn, dev_info and dev_dbg instead.
- * They are more MGPU friendly.
- */
-#undef pr_err
-#undef pr_warn
-#undef pr_info
-#undef pr_debug
-
-int smu_v13_0_1_check_fw_status(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t mp1_fw_flags;
-
- mp1_fw_flags = RREG32_PCIE(MP1_Public |
- (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
-
- if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
- MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
- return 0;
-
- return -EIO;
-}
-
-int smu_v13_0_1_check_fw_version(struct smu_context *smu)
-{
- uint32_t if_version = 0xff, smu_version = 0xff;
- uint16_t smu_major;
- uint8_t smu_minor, smu_debug;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return ret;
-
- smu_major = (smu_version >> 16) & 0xffff;
- smu_minor = (smu_version >> 8) & 0xff;
- smu_debug = (smu_version >> 0) & 0xff;
-
- switch (smu->adev->asic_type) {
- case CHIP_YELLOW_CARP:
- smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP;
- break;
-
- default:
- dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
- smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_INV;
- break;
- }
-
- dev_info(smu->adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n",
- smu_version, smu_major, smu_minor, smu_debug);
-
- /*
- * 1. if_version mismatch is not critical as our fw is designed
- * to be backward compatible.
- * 2. New fw usually brings some optimizations. But that's visible
- * only on the paired driver.
- * Considering above, we just leave user a warning message instead
- * of halt driver loading.
- */
- if (if_version != smu->smc_driver_if_version) {
- dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_driver_if_version, if_version,
- smu_version, smu_major, smu_minor, smu_debug);
- dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
- }
-
- return ret;
-}
-
-int smu_v13_0_1_fini_smc_tables(struct smu_context *smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
-
- kfree(smu_table->clocks_table);
- smu_table->clocks_table = NULL;
-
- kfree(smu_table->metrics_table);
- smu_table->metrics_table = NULL;
-
- kfree(smu_table->watermarks_table);
- smu_table->watermarks_table = NULL;
-
- return 0;
-}
-
-static int smu_v13_0_1_atom_get_smu_clockinfo(struct amdgpu_device *adev,
- uint8_t clk_id,
- uint8_t syspll_id,
- uint32_t *clk_freq)
-{
- struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
- struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
- int ret, index;
-
- input.clk_id = clk_id;
- input.syspll_id = syspll_id;
- input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
- index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
- getsmuclockinfo);
-
- ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
- if (ret)
- return -EINVAL;
-
- output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
- *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
-
- return 0;
-}
-
-int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu)
-{
- int ret, index;
- uint16_t size;
- uint8_t frev, crev;
- struct atom_common_table_header *header;
- struct atom_firmware_info_v3_4 *v_3_4;
- struct atom_firmware_info_v3_3 *v_3_3;
- struct atom_firmware_info_v3_1 *v_3_1;
-
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- firmwareinfo);
-
- ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
- (uint8_t **)&header);
- if (ret)
- return ret;
-
- if (header->format_revision != 3) {
- dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n");
- return -EINVAL;
- }
-
- switch (header->content_revision) {
- case 0:
- case 1:
- case 2:
- v_3_1 = (struct atom_firmware_info_v3_1 *)header;
- smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
- smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
- smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
- smu->smu_table.boot_values.socclk = 0;
- smu->smu_table.boot_values.dcefclk = 0;
- smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
- smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
- smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
- smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
- smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
- break;
- case 3:
- v_3_3 = (struct atom_firmware_info_v3_3 *)header;
- smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
- smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
- smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
- smu->smu_table.boot_values.socclk = 0;
- smu->smu_table.boot_values.dcefclk = 0;
- smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
- smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
- smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
- smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
- smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
- break;
- case 4:
- default:
- v_3_4 = (struct atom_firmware_info_v3_4 *)header;
- smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
- smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
- smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
- smu->smu_table.boot_values.socclk = 0;
- smu->smu_table.boot_values.dcefclk = 0;
- smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
- smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
- smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
- smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
- smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
- break;
- }
-
- smu->smu_table.boot_values.format_revision = header->format_revision;
- smu->smu_table.boot_values.content_revision = header->content_revision;
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_SOCCLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.socclk);
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.dcefclk);
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_ECLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.eclk);
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_VCLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.vclk);
-
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL0_DCLK_ID,
- (uint8_t)0,
- &smu->smu_table.boot_values.dclk);
-
- if ((smu->smu_table.boot_values.format_revision == 3) &&
- (smu->smu_table.boot_values.content_revision >= 2))
- smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
- (uint8_t)SMU11_SYSPLL1_0_FCLK_ID,
- (uint8_t)SMU11_SYSPLL1_2_ID,
- &smu->smu_table.boot_values.fclk);
-
- return 0;
-}
-
-int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
-
- return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
-}
-
-int smu_v13_0_1_set_driver_table_location(struct smu_context *smu)
-{
- struct smu_table *driver_table = &smu->smu_table.driver_table;
- int ret = 0;
-
- if (!driver_table->mc_address)
- return 0;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address),
- NULL);
-
- if (ret)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address),
- NULL);
-
- return ret;
-}
-
-int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable)
-{
- int ret = 0;
- struct amdgpu_device *adev = smu->adev;
-
- switch (adev->asic_type) {
- case CHIP_YELLOW_CARP:
- if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
- return 0;
- if (enable)
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
- else
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
- break;
- default:
- break;
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 7664334d8144..0cfeb9fc7c03 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -25,7 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_smu.h"
-#include "smu_v13_0_1.h"
+#include "smu_v13_0.h"
#include "smu13_driver_if_yellow_carp.h"
#include "yellow_carp_ppt.h"
#include "smu_v13_0_1_ppsmc.h"
@@ -186,13 +186,30 @@ err0_out:
return -ENOMEM;
}
+static int yellow_carp_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ kfree(smu_table->clocks_table);
+ smu_table->clocks_table = NULL;
+
+ kfree(smu_table->metrics_table);
+ smu_table->metrics_table = NULL;
+
+ kfree(smu_table->watermarks_table);
+ smu_table->watermarks_table = NULL;
+
+ return 0;
+}
+
static int yellow_carp_system_features_control(struct smu_context *smu, bool en)
{
struct smu_feature *feature = &smu->smu_feature;
+ struct amdgpu_device *adev = smu->adev;
uint32_t feature_mask[2];
int ret = 0;
- if (!en)
+ if (!en && !adev->in_s0ix)
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
bitmap_zero(feature->enabled, feature->feature_num);
@@ -281,13 +298,9 @@ static int yellow_carp_mode_reset(struct smu_context *smu, int type)
if (index < 0)
return index == -EACCES ? 0 : index;
- mutex_lock(&smu->message_lock);
-
- ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
-
- mutex_unlock(&smu->message_lock);
-
- mdelay(10);
+ ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to mode reset!\n");
return ret;
}
@@ -658,6 +671,13 @@ static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v2_1);
}
+static int yellow_carp_set_default_dpm_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
+}
+
static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
{
@@ -1202,17 +1222,17 @@ static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
}
static const struct pptable_funcs yellow_carp_ppt_funcs = {
- .check_fw_status = smu_v13_0_1_check_fw_status,
- .check_fw_version = smu_v13_0_1_check_fw_version,
+ .check_fw_status = smu_v13_0_check_fw_status,
+ .check_fw_version = smu_v13_0_check_fw_version,
.init_smc_tables = yellow_carp_init_smc_tables,
- .fini_smc_tables = smu_v13_0_1_fini_smc_tables,
- .get_vbios_bootup_values = smu_v13_0_1_get_vbios_bootup_values,
+ .fini_smc_tables = yellow_carp_fini_smc_tables,
+ .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
.system_features_control = yellow_carp_system_features_control,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
.dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable,
.dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable,
- .set_default_dpm_table = smu_v13_0_1_set_default_dpm_tables,
+ .set_default_dpm_table = yellow_carp_set_default_dpm_tables,
.read_sensor = yellow_carp_read_sensor,
.is_dpm_running = yellow_carp_is_dpm_running,
.set_watermarks_table = yellow_carp_set_watermarks_table,
@@ -1221,8 +1241,8 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = {
.get_gpu_metrics = yellow_carp_get_gpu_metrics,
.get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
- .set_driver_table_location = smu_v13_0_1_set_driver_table_location,
- .gfx_off_control = smu_v13_0_1_gfx_off_control,
+ .set_driver_table_location = smu_v13_0_set_driver_table_location,
+ .gfx_off_control = smu_v13_0_gfx_off_control,
.post_init = yellow_carp_post_smu_init,
.mode2_reset = yellow_carp_mode2_reset,
.get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index d29907955ff7..5d82891c3222 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
- if (err)
- return err;
req32.reply.type = req.reply.type;
req32.reply.sequence = req.reply.sequence;
@@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
- return 0;
+ return err;
}
#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 98ae00661656..f454e0424086 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -834,6 +834,9 @@ long drm_ioctl(struct file *filp,
if (drm_dev_is_unplugged(dev))
return -ENODEV;
+ if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
+ return -ENOTTY;
+
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
if (is_driver_ioctl) {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index ebe9dccf2d83..0b8648396fb2 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -352,6 +352,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
const struct drm_mode_fb_cmd2 *cmd)
{
struct drm_gem_object *obj;
+ struct drm_framebuffer *fb;
/*
* Find the GEM object and thus the gtt range object that is
@@ -362,7 +363,11 @@ static struct drm_framebuffer *psb_user_framebuffer_create
return ERR_PTR(-ENOENT);
/* Let the core code do all the work */
- return psb_framebuffer_create(dev, cmd, obj);
+ fb = psb_framebuffer_create(dev, cmd, obj);
+ if (IS_ERR(fb))
+ drm_gem_object_put(obj);
+
+ return fb;
}
static int psbfb_probe(struct drm_fb_helper *fb_helper,
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index b63d374dff23..f960f5d7664e 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -125,7 +125,7 @@ config DRM_I915_GVT_KVMGT
tristate "Enable KVM/VFIO support for Intel GVT-g"
depends on DRM_I915_GVT
depends on KVM
- depends on VFIO_MDEV && VFIO_MDEV_DEVICE
+ depends on VFIO_MDEV
default n
help
Choose this option if you want to enable KVMGT support for
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 5b6922e28ef2..aa667fa71158 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -2166,7 +2166,8 @@ static void
init_vbt_missing_defaults(struct drm_i915_private *i915)
{
enum port port;
- int ports = PORT_A | PORT_B | PORT_C | PORT_D | PORT_E | PORT_F;
+ int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) |
+ BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F);
if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 390869bd6b63..00dade49665b 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1791,10 +1791,23 @@ static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum intel_dpll_id id;
+ u32 val;
- return _cnl_ddi_get_pll(i915, DG1_DPCLKA_CFGCR0(phy),
- DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
- DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
+ val = intel_de_read(i915, DG1_DPCLKA_CFGCR0(phy));
+ val &= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ val >>= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
+ id = val;
+
+ /*
+ * _DG1_DPCLKA0_CFGCR0 maps between DPLL 0 and 1 with one bit for phy A
+ * and B while _DG1_DPCLKA1_CFGCR0 maps between DPLL 2 and 3 with one
+ * bit for phy C and D.
+ */
+ if (phy >= PHY_C)
+ id += DPLL_ID_DG1_DPLL2;
+
+ return intel_get_shared_dpll_by_id(i915, id);
}
static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
@@ -2450,6 +2463,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
}
}
+/* Splitter enable for eDP MSO is limited to certain pipes. */
+static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+{
+ if (IS_ALDERLAKE_P(i915))
+ return BIT(PIPE_A) | BIT(PIPE_B);
+ else
+ return BIT(PIPE_A);
+}
+
static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -2467,8 +2489,7 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
if (!pipe_config->splitter.enable)
return;
- /* Splitter enable is supported for pipe A only. */
- if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) {
+ if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
pipe_config->splitter.enable = false;
return;
}
@@ -2500,10 +2521,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
return;
if (crtc_state->splitter.enable) {
- /* Splitter enable is supported for pipe A only. */
- if (drm_WARN_ON(&i915->drm, pipe != PIPE_A))
- return;
-
dss1 |= SPLITTER_ENABLE;
dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
if (crtc_state->splitter.link_count == 2)
@@ -4730,12 +4747,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
dig_port->hpd_pulse = intel_dp_hpd_pulse;
- /* Splitter enable for eDP MSO is limited to certain pipes. */
- if (dig_port->dp.mso_link_count) {
- encoder->pipe_mask = BIT(PIPE_A);
- if (IS_ALDERLAKE_P(dev_priv))
- encoder->pipe_mask |= BIT(PIPE_B);
- }
+ if (dig_port->dp.mso_link_count)
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
}
/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3bad4e00f7be..0a8a2395c8ac 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -5746,16 +5746,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
switch (crtc_state->pipe_bpp) {
case 18:
- val |= PIPEMISC_DITHER_6_BPC;
+ val |= PIPEMISC_6_BPC;
break;
case 24:
- val |= PIPEMISC_DITHER_8_BPC;
+ val |= PIPEMISC_8_BPC;
break;
case 30:
- val |= PIPEMISC_DITHER_10_BPC;
+ val |= PIPEMISC_10_BPC;
break;
case 36:
- val |= PIPEMISC_DITHER_12_BPC;
+ /* Port output 12BPC defined for ADLP+ */
+ if (DISPLAY_VER(dev_priv) > 12)
+ val |= PIPEMISC_12_BPC_ADLP;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
@@ -5808,15 +5810,27 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
- switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
- case PIPEMISC_DITHER_6_BPC:
+ switch (tmp & PIPEMISC_BPC_MASK) {
+ case PIPEMISC_6_BPC:
return 18;
- case PIPEMISC_DITHER_8_BPC:
+ case PIPEMISC_8_BPC:
return 24;
- case PIPEMISC_DITHER_10_BPC:
+ case PIPEMISC_10_BPC:
return 30;
- case PIPEMISC_DITHER_12_BPC:
- return 36;
+ /*
+ * PORT OUTPUT 12 BPC defined for ADLP+.
+ *
+ * TODO:
+ * For previous platforms with DSI interface, bits 5:7
+ * are used for storing pipe_bpp irrespective of dithering.
+ * Since the value of 12 BPC is not defined for these bits
+ * on older platforms, need to find a workaround for 12 BPC
+ * MIPI DSI HW readout.
+ */
+ case PIPEMISC_12_BPC_ADLP:
+ if (DISPLAY_VER(dev_priv) > 12)
+ return 36;
+ fallthrough;
default:
MISSING_CASE(tmp);
return 0;
@@ -11361,13 +11375,19 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
vlv_dsi_init(dev_priv);
- } else if (DISPLAY_VER(dev_priv) >= 9) {
+ } else if (DISPLAY_VER(dev_priv) == 10) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
intel_ddi_init(dev_priv, PORT_D);
intel_ddi_init(dev_priv, PORT_E);
intel_ddi_init(dev_priv, PORT_F);
+ } else if (DISPLAY_VER(dev_priv) >= 9) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D);
+ intel_ddi_init(dev_priv, PORT_E);
} else if (HAS_DDI(dev_priv)) {
u32 found;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 4298ae684d7d..86b7ac7b65ec 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -6387,13 +6387,13 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
IS_BROXTON(i915)) {
bxt_enable_dc9(i915);
- /* Tweaked Wa_14010685332:icp,jsp,mcc */
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
- intel_de_rmw(i915, SOUTH_CHICKEN1,
- SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_enable_pc8(i915);
}
+
+ /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+ if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+ intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
void intel_display_power_resume_early(struct drm_i915_private *i915)
@@ -6402,13 +6402,13 @@ void intel_display_power_resume_early(struct drm_i915_private *i915)
IS_BROXTON(i915)) {
gen9_sanitize_dc_state(i915);
bxt_disable_dc9(i915);
- /* Tweaked Wa_14010685332:icp,jsp,mcc */
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
-
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
}
+
+ /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+ if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+ intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
}
void intel_display_power_suspend(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 5c9222283044..6cc03b9e4321 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2868,7 +2868,7 @@ static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
if (size < sizeof(struct dp_sdp))
return -EINVAL;
- memset(vsc, 0, size);
+ memset(vsc, 0, sizeof(*vsc));
if (sdp->sdp_header.HB0 != 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 08bceae40aa8..053a3c2f7267 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -206,7 +206,6 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
return lttpr_count;
}
-EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
static u8 dp_voltage_max(u8 preemph)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index a8abc9af5ff4..4a6419d7be93 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -25,10 +25,8 @@
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_ioctls.h"
-#include "i915_sw_fence_work.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
-#include "i915_memcpy.h"
struct eb_vma {
struct i915_vma *vma;
@@ -1456,6 +1454,10 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
int err;
struct intel_engine_cs *engine = eb->engine;
+ /* If we need to copy for the cmdparser, we will stall anyway */
+ if (eb_use_cmdparser(eb))
+ return ERR_PTR(-EWOULDBLOCK);
+
if (!reloc_can_use_engine(engine)) {
engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
if (!engine)
@@ -2372,217 +2374,6 @@ shadow_batch_pin(struct i915_execbuffer *eb,
return vma;
}
-struct eb_parse_work {
- struct dma_fence_work base;
- struct intel_engine_cs *engine;
- struct i915_vma *batch;
- struct i915_vma *shadow;
- struct i915_vma *trampoline;
- unsigned long batch_offset;
- unsigned long batch_length;
- unsigned long *jump_whitelist;
- const void *batch_map;
- void *shadow_map;
-};
-
-static int __eb_parse(struct dma_fence_work *work)
-{
- struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
- int ret;
- bool cookie;
-
- cookie = dma_fence_begin_signalling();
- ret = intel_engine_cmd_parser(pw->engine,
- pw->batch,
- pw->batch_offset,
- pw->batch_length,
- pw->shadow,
- pw->jump_whitelist,
- pw->shadow_map,
- pw->batch_map);
- dma_fence_end_signalling(cookie);
-
- return ret;
-}
-
-static void __eb_parse_release(struct dma_fence_work *work)
-{
- struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
-
- if (!IS_ERR_OR_NULL(pw->jump_whitelist))
- kfree(pw->jump_whitelist);
-
- if (pw->batch_map)
- i915_gem_object_unpin_map(pw->batch->obj);
- else
- i915_gem_object_unpin_pages(pw->batch->obj);
-
- i915_gem_object_unpin_map(pw->shadow->obj);
-
- if (pw->trampoline)
- i915_active_release(&pw->trampoline->active);
- i915_active_release(&pw->shadow->active);
- i915_active_release(&pw->batch->active);
-}
-
-static const struct dma_fence_work_ops eb_parse_ops = {
- .name = "eb_parse",
- .work = __eb_parse,
- .release = __eb_parse_release,
-};
-
-static inline int
-__parser_mark_active(struct i915_vma *vma,
- struct intel_timeline *tl,
- struct dma_fence *fence)
-{
- struct intel_gt_buffer_pool_node *node = vma->private;
-
- return i915_active_ref(&node->active, tl->fence_context, fence);
-}
-
-static int
-parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
-{
- int err;
-
- mutex_lock(&tl->mutex);
-
- err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
- if (err)
- goto unlock;
-
- if (pw->trampoline) {
- err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
- if (err)
- goto unlock;
- }
-
-unlock:
- mutex_unlock(&tl->mutex);
- return err;
-}
-
-static int eb_parse_pipeline(struct i915_execbuffer *eb,
- struct i915_vma *shadow,
- struct i915_vma *trampoline)
-{
- struct eb_parse_work *pw;
- struct drm_i915_gem_object *batch = eb->batch->vma->obj;
- bool needs_clflush;
- int err;
-
- GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
- GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));
-
- pw = kzalloc(sizeof(*pw), GFP_KERNEL);
- if (!pw)
- return -ENOMEM;
-
- err = i915_active_acquire(&eb->batch->vma->active);
- if (err)
- goto err_free;
-
- err = i915_active_acquire(&shadow->active);
- if (err)
- goto err_batch;
-
- if (trampoline) {
- err = i915_active_acquire(&trampoline->active);
- if (err)
- goto err_shadow;
- }
-
- pw->shadow_map = i915_gem_object_pin_map(shadow->obj, I915_MAP_WB);
- if (IS_ERR(pw->shadow_map)) {
- err = PTR_ERR(pw->shadow_map);
- goto err_trampoline;
- }
-
- needs_clflush =
- !(batch->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
-
- pw->batch_map = ERR_PTR(-ENODEV);
- if (needs_clflush && i915_has_memcpy_from_wc())
- pw->batch_map = i915_gem_object_pin_map(batch, I915_MAP_WC);
-
- if (IS_ERR(pw->batch_map)) {
- err = i915_gem_object_pin_pages(batch);
- if (err)
- goto err_unmap_shadow;
- pw->batch_map = NULL;
- }
-
- pw->jump_whitelist =
- intel_engine_cmd_parser_alloc_jump_whitelist(eb->batch_len,
- trampoline);
- if (IS_ERR(pw->jump_whitelist)) {
- err = PTR_ERR(pw->jump_whitelist);
- goto err_unmap_batch;
- }
-
- dma_fence_work_init(&pw->base, &eb_parse_ops);
-
- pw->engine = eb->engine;
- pw->batch = eb->batch->vma;
- pw->batch_offset = eb->batch_start_offset;
- pw->batch_length = eb->batch_len;
- pw->shadow = shadow;
- pw->trampoline = trampoline;
-
- /* Mark active refs early for this worker, in case we get interrupted */
- err = parser_mark_active(pw, eb->context->timeline);
- if (err)
- goto err_commit;
-
- err = dma_resv_reserve_shared(pw->batch->resv, 1);
- if (err)
- goto err_commit;
-
- err = dma_resv_reserve_shared(shadow->resv, 1);
- if (err)
- goto err_commit;
-
- /* Wait for all writes (and relocs) into the batch to complete */
- err = i915_sw_fence_await_reservation(&pw->base.chain,
- pw->batch->resv, NULL, false,
- 0, I915_FENCE_GFP);
- if (err < 0)
- goto err_commit;
-
- /* Keep the batch alive and unwritten as we parse */
- dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
-
- /* Force execution to wait for completion of the parser */
- dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
-
- dma_fence_work_commit_imm(&pw->base);
- return 0;
-
-err_commit:
- i915_sw_fence_set_error_once(&pw->base.chain, err);
- dma_fence_work_commit_imm(&pw->base);
- return err;
-
-err_unmap_batch:
- if (pw->batch_map)
- i915_gem_object_unpin_map(batch);
- else
- i915_gem_object_unpin_pages(batch);
-err_unmap_shadow:
- i915_gem_object_unpin_map(shadow->obj);
-err_trampoline:
- if (trampoline)
- i915_active_release(&trampoline->active);
-err_shadow:
- i915_active_release(&shadow->active);
-err_batch:
- i915_active_release(&eb->batch->vma->active);
-err_free:
- kfree(pw);
- return err;
-}
-
static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
{
/*
@@ -2672,7 +2463,15 @@ static int eb_parse(struct i915_execbuffer *eb)
goto err_trampoline;
}
- err = eb_parse_pipeline(eb, shadow, trampoline);
+ err = dma_resv_reserve_shared(shadow->resv, 1);
+ if (err)
+ goto err_trampoline;
+
+ err = intel_engine_cmd_parser(eb->engine,
+ eb->batch->vma,
+ eb->batch_start_offset,
+ eb->batch_len,
+ shadow, trampoline);
if (err)
goto err_unpin_batch;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index f4fb68e8955a..e382b7f2353b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -62,6 +62,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj,
switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
+ return;
case __I915_MADV_PURGED:
return;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
index 4df505e4c53a..16162fc2782d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -125,6 +125,10 @@ static int igt_gpu_reloc(void *arg)
intel_gt_pm_get(&eb.i915->gt);
for_each_uabi_engine(eb.engine, eb.i915) {
+ if (intel_engine_requires_cmd_parser(eb.engine) ||
+ intel_engine_using_cmd_parser(eb.engine))
+ continue;
+
reloc_cache_init(&eb.reloc_cache, eb.i915);
memset(map, POISON_INUSE, 4096);
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 21c8b7350b7a..da4f5eb43ac2 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -303,10 +303,7 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
__i915_gem_object_pin_pages(pt->base);
i915_gem_object_make_unshrinkable(pt->base);
- if (lvl ||
- gen8_pt_count(*start, end) < I915_PDES ||
- intel_vgpu_active(vm->i915))
- fill_px(pt, vm->scratch[lvl]->encode);
+ fill_px(pt, vm->scratch[lvl]->encode);
spin_lock(&pd->lock);
if (likely(!pd->entry[idx])) {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 9ceddfbb1687..7f03df236613 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1279,7 +1279,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return true;
/* Waiting to drain ELSP? */
- synchronize_hardirq(to_pci_dev(engine->i915->drm.dev)->irq);
+ intel_synchronize_hardirq(engine->i915);
intel_engine_flush_submission(engine);
/* ELSP is empty, but there are ready requests? E.g. after reset */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index cac7f3f44642..f8948de72036 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -348,7 +348,7 @@ static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
if (intel_has_pending_fb_unpin(ggtt->vm.i915))
return ERR_PTR(-EAGAIN);
- return ERR_PTR(-EDEADLK);
+ return ERR_PTR(-ENOBUFS);
}
int __i915_vma_pin_fence(struct i915_vma *vma)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 0c423f096e2b..37d74d4ed59b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -184,8 +184,11 @@ static int xcs_resume(struct intel_engine_cs *engine)
ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
ring->head, ring->tail);
- /* Double check the ring is empty & disabled before we resume */
- synchronize_hardirq(engine->i915->drm.irq);
+ /*
+ * Double check the ring is empty & disabled before we resume. Called
+ * from atomic context during PCI probe, so _hardirq().
+ */
+ intel_synchronize_hardirq(engine->i915);
if (!stop_ring(engine))
goto err;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 98eb48c24c46..cde0a477fb49 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1977,6 +1977,21 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (drm_WARN_ON(&i915->drm, !engine))
return -EINVAL;
+ /*
+ * Due to d3_entered is used to indicate skipping PPGTT invalidation on
+ * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
+ * vGPU reset if in resuming.
+ * In S0ix exit, the device power state also transite from D3 to D0 as
+ * S3 resume, but no vGPU reset (triggered by QEMU devic model). After
+ * S0ix exit, all engines continue to work. However the d3_entered
+ * remains set which will break next vGPU reset logic (miss the expected
+ * PPGTT invalidation).
+ * Engines can only work in D0. Thus the 1st elsp write gives GVT a
+ * chance to clear d3_entered.
+ */
+ if (vgpu->d3_entered)
+ vgpu->d3_entered = false;
+
execlist = &vgpu->submission.execlist[engine->id];
execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
@@ -3134,6 +3149,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(_MMIO(0xb110), D_BDW);
+ MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS);
MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
D_BDW_PLUS, NULL, force_nonpriv_write);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 48b4d4cf805d..1ac98f8aba31 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -88,6 +88,7 @@ struct kvmgt_pgfn {
struct hlist_node hnode;
};
+#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
struct kvmgt_guest_info {
struct kvm *kvm;
struct intel_vgpu *vgpu;
@@ -95,7 +96,6 @@ struct kvmgt_guest_info {
#define NR_BKT (1 << 18)
struct hlist_head ptable[NR_BKT];
#undef NR_BKT
- struct dentry *debugfs_cache_entries;
};
struct gvt_dma {
@@ -1947,16 +1947,15 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node);
- info->debugfs_cache_entries = debugfs_create_ulong(
- "kvmgt_nr_cache_entries",
- 0444, vgpu->debugfs,
- &vdev->nr_cache_entries);
+ debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
+ &vdev->nr_cache_entries);
return 0;
}
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{
- debugfs_remove(info->debugfs_cache_entries);
+ debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME,
+ info->vgpu->debugfs));
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
kvm_put_kvm(info->kvm);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index b8ac80765461..f776c470914d 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -105,6 +105,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
{RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
{RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+ {RCS0, GEN9_SCRATCH1, 0, false}, /* 0xb11c */
+ {RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */
{RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
{RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
{RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 3992c25a191d..a3b4d99d64b9 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1145,19 +1145,41 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj,
unsigned long offset, unsigned long length,
- void *dst, const void *src)
+ bool *needs_clflush_after)
{
- bool needs_clflush =
- !(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
-
- if (src) {
- GEM_BUG_ON(!needs_clflush);
- i915_unaligned_memcpy_from_wc(dst, src + offset, length);
- } else {
- struct scatterlist *sg;
+ unsigned int src_needs_clflush;
+ unsigned int dst_needs_clflush;
+ void *dst, *src;
+ int ret;
+
+ ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
+ if (ret)
+ return ERR_PTR(ret);
+
+ dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
+ i915_gem_object_finish_access(dst_obj);
+ if (IS_ERR(dst))
+ return dst;
+
+ ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
+ if (ret) {
+ i915_gem_object_unpin_map(dst_obj);
+ return ERR_PTR(ret);
+ }
+
+ src = ERR_PTR(-ENODEV);
+ if (src_needs_clflush && i915_has_memcpy_from_wc()) {
+ src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
+ if (!IS_ERR(src)) {
+ i915_unaligned_memcpy_from_wc(dst,
+ src + offset,
+ length);
+ i915_gem_object_unpin_map(src_obj);
+ }
+ }
+ if (IS_ERR(src)) {
+ unsigned long x, n, remain;
void *ptr;
- unsigned int x, sg_ofs;
- unsigned long remain;
/*
* We can avoid clflushing partial cachelines before the write
@@ -1168,40 +1190,34 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* validate up to the end of the batch.
*/
remain = length;
- if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ if (dst_needs_clflush & CLFLUSH_BEFORE)
remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset);
- sg = i915_gem_object_get_sg(src_obj, offset >> PAGE_SHIFT, &sg_ofs, false);
-
- while (remain) {
- unsigned long sg_max = sg->length >> PAGE_SHIFT;
-
- for (; remain && sg_ofs < sg_max; sg_ofs++) {
- unsigned long len = min(remain, PAGE_SIZE - x);
- void *map;
-
- map = kmap_atomic(nth_page(sg_page(sg), sg_ofs));
- if (needs_clflush)
- drm_clflush_virt_range(map + x, len);
- memcpy(ptr, map + x, len);
- kunmap_atomic(map);
-
- ptr += len;
- remain -= len;
- x = 0;
- }
-
- sg_ofs = 0;
- sg = sg_next(sg);
+ for (n = offset >> PAGE_SHIFT; remain; n++) {
+ int len = min(remain, PAGE_SIZE - x);
+
+ src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+ if (src_needs_clflush)
+ drm_clflush_virt_range(src + x, len);
+ memcpy(ptr, src + x, len);
+ kunmap_atomic(src);
+
+ ptr += len;
+ remain -= len;
+ x = 0;
}
}
+ i915_gem_object_finish_access(src_obj);
+
memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
/* dst_obj is returned with vmap pinned */
+ *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
+
return dst;
}
@@ -1360,6 +1376,9 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
if (target_cmd_index == offset)
return 0;
+ if (IS_ERR(jump_whitelist))
+ return PTR_ERR(jump_whitelist);
+
if (!test_bit(target_cmd_index, jump_whitelist)) {
DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
jump_target);
@@ -1369,28 +1388,10 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
return 0;
}
-/**
- * intel_engine_cmd_parser_alloc_jump_whitelist() - preallocate jump whitelist for intel_engine_cmd_parser()
- * @batch_length: length of the commands in batch_obj
- * @trampoline: Whether jump trampolines are used.
- *
- * Preallocates a jump whitelist for parsing the cmd buffer in intel_engine_cmd_parser().
- * This has to be preallocated, because the command parser runs in signaling context,
- * and may not allocate any memory.
- *
- * Return: NULL or pointer to a jump whitelist, or ERR_PTR() on failure. Use
- * IS_ERR() to check for errors. Must bre freed() with kfree().
- *
- * NULL is a valid value, meaning no allocation was required.
- */
-unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
- bool trampoline)
+static unsigned long *alloc_whitelist(u32 batch_length)
{
unsigned long *jmp;
- if (trampoline)
- return NULL;
-
/*
* We expect batch_length to be less than 256KiB for known users,
* i.e. we need at most an 8KiB bitmap allocation which should be
@@ -1415,9 +1416,7 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
* @batch_offset: byte offset in the batch at which execution starts
* @batch_length: length of the commands in batch_obj
* @shadow: validated copy of the batch buffer in question
- * @jump_whitelist: buffer preallocated with intel_engine_cmd_parser_alloc_jump_whitelist()
- * @shadow_map: mapping to @shadow vma
- * @batch_map: mapping to @batch vma
+ * @trampoline: true if we need to trampoline into privileged execution
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@@ -1425,21 +1424,21 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
+
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch,
unsigned long batch_offset,
unsigned long batch_length,
struct i915_vma *shadow,
- unsigned long *jump_whitelist,
- void *shadow_map,
- const void *batch_map)
+ bool trampoline)
{
u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
+ bool needs_clflush_after = false;
+ unsigned long *jump_whitelist;
u64 batch_addr, shadow_addr;
int ret = 0;
- bool trampoline = !jump_whitelist;
GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
@@ -1447,8 +1446,18 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
batch->size));
GEM_BUG_ON(!batch_length);
- cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length,
- shadow_map, batch_map);
+ cmd = copy_batch(shadow->obj, batch->obj,
+ batch_offset, batch_length,
+ &needs_clflush_after);
+ if (IS_ERR(cmd)) {
+ DRM_DEBUG("CMD: Failed to copy batch\n");
+ return PTR_ERR(cmd);
+ }
+
+ jump_whitelist = NULL;
+ if (!trampoline)
+ /* Defer failure until attempted use */
+ jump_whitelist = alloc_whitelist(batch_length);
shadow_addr = gen8_canonical_addr(shadow->node.start);
batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
@@ -1549,6 +1558,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
i915_gem_object_flush_map(shadow->obj);
+ if (!IS_ERR_OR_NULL(jump_whitelist))
+ kfree(jump_whitelist);
+ i915_gem_object_unpin_map(shadow->obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 850b499c71c8..73de45472f60 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -42,7 +42,6 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_ioctl.h>
-#include <drm/drm_irq.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 38ff2fb89744..b30397b04529 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1906,17 +1906,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
- bool trampoline);
-
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch,
unsigned long batch_offset,
unsigned long batch_length,
struct i915_vma *shadow,
- unsigned long *jump_whitelist,
- void *shadow_map,
- const void *batch_map);
+ bool trampoline);
#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
/* intel_device_info.c */
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index 77f1911c463b..3acb0b6be284 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -138,7 +138,7 @@ void i915_globals_unpark(void)
atomic_inc(&active);
}
-static void __exit __i915_globals_flush(void)
+static void __i915_globals_flush(void)
{
atomic_inc(&active); /* skip shrinking */
@@ -148,7 +148,7 @@ static void __exit __i915_globals_flush(void)
atomic_dec(&active);
}
-void __exit i915_globals_exit(void)
+void i915_globals_exit(void)
{
GEM_BUG_ON(atomic_read(&active));
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 35c97c39f125..966664610c8c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -727,9 +727,18 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
if (GRAPHICS_VER(m->i915) >= 12) {
int i;
- for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
+ for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+ /*
+ * SFC_DONE resides in the VD forcewake domain, so it
+ * only exists if the corresponding VCS engine is
+ * present.
+ */
+ if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+ continue;
+
err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
gt->sfc_done[i]);
+ }
err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
}
@@ -1581,6 +1590,14 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
if (GRAPHICS_VER(i915) >= 12) {
for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+ /*
+ * SFC_DONE resides in the VD forcewake domain, so it
+ * only exists if the corresponding VCS engine is
+ * present.
+ */
+ if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+ continue;
+
gt->sfc_done[i] =
intel_uncore_read(uncore, GEN12_SFC_DONE(i));
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a11bdb667241..c3816f5c6900 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -33,7 +33,6 @@
#include <linux/sysrq.h>
#include <drm/drm_drv.h>
-#include <drm/drm_irq.h>
#include "display/intel_de.h"
#include "display/intel_display_types.h"
@@ -3065,24 +3064,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- /*
- * Wa_14010685332:cnp/cmp,tgp,adp
- * TODO: Clarify which platforms this applies to
- * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
- * on earlier platforms and whether the workaround is also needed for runtime suspend/resume
- */
- if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
- (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
- intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
- SBCLK_RUN_REFCLK_DIS);
- intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
- }
-}
-
static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3116,7 +3097,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
- cnp_display_clock_wa(dev_priv);
}
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -3160,8 +3140,6 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
-
- cnp_display_clock_wa(dev_priv);
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
@@ -4564,10 +4542,6 @@ void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
- /*
- * We only use drm_irq_uninstall() at unload and VT switch, so
- * this is the only thing we need to check.
- */
return dev_priv->runtime_pm.irqs_enabled;
}
@@ -4575,3 +4549,8 @@ void intel_synchronize_irq(struct drm_i915_private *i915)
{
synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
}
+
+void intel_synchronize_hardirq(struct drm_i915_private *i915)
+{
+ synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index db34d5dbe402..e43b6734f21b 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -94,6 +94,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
bool intel_irqs_enabled(struct drm_i915_private *dev_priv);
void intel_synchronize_irq(struct drm_i915_private *i915);
+void intel_synchronize_hardirq(struct drm_i915_private *i915);
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 83b500bb170c..2880ec57c97d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -1195,6 +1195,7 @@ static int __init i915_init(void)
err = pci_register_driver(&i915_pci_driver);
if (err) {
i915_pmu_exit();
+ i915_globals_exit();
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e915ec034c98..476bb3b9ad11 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -422,7 +422,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN12_HCP_SFC_LOCK_ACK_BIT REG_BIT(1)
#define GEN12_HCP_SFC_USAGE_BIT REG_BIT(0)
-#define GEN12_SFC_DONE(n) _MMIO(0x1cc00 + (n) * 0x100)
+#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
#define GEN12_SFC_DONE_MAX 4
#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228)
@@ -6163,11 +6163,17 @@ enum {
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
-#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
-#define PIPEMISC_DITHER_8_BPC (0 << 5)
-#define PIPEMISC_DITHER_10_BPC (1 << 5)
-#define PIPEMISC_DITHER_6_BPC (2 << 5)
-#define PIPEMISC_DITHER_12_BPC (3 << 5)
+/*
+ * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
+ * valid values of: 6, 8, 10 BPC.
+ * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
+ * 6, 8, 10, 12 BPC.
+ */
+#define PIPEMISC_BPC_MASK (7 << 5)
+#define PIPEMISC_8_BPC (0 << 5)
+#define PIPEMISC_10_BPC (1 << 5)
+#define PIPEMISC_6_BPC (2 << 5)
+#define PIPEMISC_12_BPC_ADLP (4 << 5) /* adlp+ */
#define PIPEMISC_DITHER_ENABLE (1 << 4)
#define PIPEMISC_DITHER_TYPE_MASK (3 << 2)
#define PIPEMISC_DITHER_TYPE_SP (0 << 2)
@@ -10513,7 +10519,6 @@ enum skl_power_gate {
#define _DG1_DPCLKA1_CFGCR0 0x16C280
#define _DG1_DPCLKA_PHY_IDX(phy) ((phy) % 2)
#define _DG1_DPCLKA_PLL_IDX(pll) ((pll) % 2)
-#define _DG1_PHY_DPLL_MAP(phy) ((phy) >= PHY_C ? DPLL_ID_DG1_DPLL2 : DPLL_ID_DG1_DPLL0)
#define DG1_DPCLKA_CFGCR0(phy) _MMIO_PHY((phy) / 2, \
_DG1_DPCLKA_CFGCR0, \
_DG1_DPCLKA1_CFGCR0)
@@ -10521,8 +10526,6 @@ enum skl_power_gate {
#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) (_DG1_DPCLKA_PHY_IDX(phy) * 2)
#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) (_DG1_DPCLKA_PLL_IDX(pll) << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (0x3 << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
-#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy) \
- (((clk_sel) >> DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) + _DG1_PHY_DPLL_MAP(phy))
/* ADLS Clocks */
#define _ADLS_DPCLKA_CFGCR0 0x164280
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 1014c71cf7f5..37aef1308573 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1426,10 +1426,8 @@ i915_request_await_execution(struct i915_request *rq,
do {
fence = *child++;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- i915_sw_fence_set_error_once(&rq->submit, fence->error);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
continue;
- }
if (fence->context == rq->fence.context)
continue;
@@ -1527,10 +1525,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
do {
fence = *child++;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- i915_sw_fence_set_error_once(&rq->submit, fence->error);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
continue;
- }
/*
* Requests on the same timeline are explicitly ordered, along
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 7eaa92fee421..e0a10f36acc1 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -325,7 +325,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->pipe_mask &= ~BIT(PIPE_C);
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
- } else if (HAS_DISPLAY(dev_priv) && GRAPHICS_VER(dev_priv) >= 9) {
+ } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) {
u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
@@ -340,7 +340,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->pipe_mask &= ~BIT(PIPE_C);
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
- if (GRAPHICS_VER(dev_priv) >= 12 &&
+
+ if (DISPLAY_VER(dev_priv) >= 12 &&
(dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
info->pipe_mask &= ~BIT(PIPE_D);
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
@@ -352,10 +353,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
info->display.has_fbc = 0;
- if (GRAPHICS_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+ if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
info->display.has_dmc = 0;
- if (GRAPHICS_VER(dev_priv) >= 10 &&
+ if (DISPLAY_VER(dev_priv) >= 10 &&
(dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
info->display.has_dsc = 0;
}
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 96ea1a2c11dd..f54392ec4fab 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -203,6 +203,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
unsigned long status, val, val1;
int plane_id, dma0_state, dma1_state;
struct kmb_drm_private *kmb = to_kmb(dev);
+ u32 ctrl = 0;
status = kmb_read_lcd(kmb, LCD_INT_STATUS);
@@ -227,6 +228,19 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
kmb->plane_status[plane_id].ctrl);
+ ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
+ if (!(ctrl & (LCD_CTRL_VL1_ENABLE |
+ LCD_CTRL_VL2_ENABLE |
+ LCD_CTRL_GL1_ENABLE |
+ LCD_CTRL_GL2_ENABLE))) {
+ /* If no LCD layers are using DMA,
+ * then disable DMA pipelined AXI read
+ * transactions.
+ */
+ kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
+ LCD_CTRL_PIPELINE_DMA);
+ }
+
kmb->plane_status[plane_id].disable = false;
}
}
@@ -411,10 +425,10 @@ static const struct drm_driver kmb_driver = {
.fops = &fops,
DRM_GEM_CMA_DRIVER_OPS_VMAP,
.name = "kmb-drm",
- .desc = "KEEMBAY DISPLAY DRIVER ",
- .date = "20201008",
- .major = 1,
- .minor = 0,
+ .desc = "KEEMBAY DISPLAY DRIVER",
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
};
static int kmb_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h
index 02e806712a64..ebbaa5f422d5 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.h
+++ b/drivers/gpu/drm/kmb/kmb_drv.h
@@ -15,6 +15,11 @@
#define KMB_MAX_HEIGHT 1080 /*Max height in pixels */
#define KMB_MIN_WIDTH 1920 /*Max width in pixels */
#define KMB_MIN_HEIGHT 1080 /*Max height in pixels */
+
+#define DRIVER_DATE "20210223"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 1
+
#define KMB_LCD_DEFAULT_CLK 200000000
#define KMB_SYS_CLK_MHZ 500
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index d5b6195856d1..ecee6782612d 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -427,8 +427,14 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl);
- /* FIXME no doc on how to set output format,these values are
- * taken from the Myriadx tests
+ /* Enable pipeline AXI read transactions for the DMA
+ * after setting graphics layers. This must be done
+ * in a separate write cycle.
+ */
+ kmb_set_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA);
+
+ /* FIXME no doc on how to set output format, these values are taken
+ * from the Myriadx tests
*/
out_format |= LCD_OUTF_FORMAT_RGB888;
@@ -526,6 +532,11 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
plane->id = i;
}
+ /* Disable pipeline AXI read transactions for the DMA
+ * prior to setting graphics layers
+ */
+ kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA);
+
return primary;
cleanup:
drmm_kfree(drm, plane);
diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h
index 3a430e93d384..494b9790b1da 100644
--- a/drivers/gpu/drm/lima/lima_trace.h
+++ b/drivers/gpu/drm/lima/lima_trace.h
@@ -24,7 +24,7 @@ DECLARE_EVENT_CLASS(lima_task,
__entry->task_id = task->base.id;
__entry->context = task->base.s_fence->finished.context;
__entry->seqno = task->base.s_fence->finished.seqno;
- __assign_str(pipe, task->base.sched->name)
+ __assign_str(pipe, task->base.sched->name);
),
TP_printk("task=%llu, context=%u seqno=%u pipe=%s",
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 6f4c80bbc0eb..473f5bb5cbad 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -133,6 +133,8 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
static int mtk_disp_color_remove(struct platform_device *pdev)
{
+ component_del(&pdev->dev, &mtk_disp_color_component_ops);
+
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index fa9d79963cd3..5326989d5206 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -423,6 +423,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
static int mtk_disp_ovl_remove(struct platform_device *pdev)
{
+ component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
+
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index bced555648b0..e94738fe4db8 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -605,11 +605,15 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct mtk_dpi *dpi = bridge->driver_private;
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge);
unsigned int out_bus_format;
out_bus_format = bridge_state->output_bus_cfg.format;
+ if (out_bus_format == MEDIA_BUS_FMT_FIXED)
+ if (dpi->conf->num_output_fmts)
+ out_bus_format = dpi->conf->output_fmts[0];
+
dev_dbg(dpi->dev, "input format 0x%04x, output format 0x%04x\n",
bridge_state->input_bus_cfg.format,
bridge_state->output_bus_cfg.format);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 474efb844249..735efe79f075 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -532,13 +532,10 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- const struct drm_plane_helper_funcs *plane_helper_funcs =
- plane->helper_private;
if (!mtk_crtc->enabled)
return;
- plane_helper_funcs->atomic_update(plane, state);
mtk_drm_crtc_update_config(mtk_crtc, false);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 75bc00e17fc4..50d20562e612 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -34,6 +34,7 @@
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
+#define DISP_AAL_OUTPUT_SIZE 0x04d8
#define DISP_DITHER_EN 0x0000
#define DITHER_EN BIT(0)
@@ -197,6 +198,7 @@ static void mtk_aal_config(struct device *dev, unsigned int w,
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_OUTPUT_SIZE);
}
static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index b5582dcf564c..e6dcb34d3052 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -110,6 +110,35 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
true, true);
}
+static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+ struct mtk_plane_state *mtk_plane_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_gem_object *gem;
+ struct mtk_drm_gem_obj *mtk_gem;
+ unsigned int pitch, format;
+ dma_addr_t addr;
+
+ gem = fb->obj[0];
+ mtk_gem = to_mtk_gem_obj(gem);
+ addr = mtk_gem->dma_addr;
+ pitch = fb->pitches[0];
+ format = fb->format->format;
+
+ addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
+ addr += (new_state->src.y1 >> 16) * pitch;
+
+ mtk_plane_state->pending.enable = true;
+ mtk_plane_state->pending.pitch = pitch;
+ mtk_plane_state->pending.format = format;
+ mtk_plane_state->pending.addr = addr;
+ mtk_plane_state->pending.x = new_state->dst.x1;
+ mtk_plane_state->pending.y = new_state->dst.y1;
+ mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
+ mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
+ mtk_plane_state->pending.rotation = new_state->rotation;
+}
+
static void mtk_plane_atomic_async_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -126,8 +155,10 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
plane->state->src_h = new_state->src_h;
plane->state->src_w = new_state->src_w;
swap(plane->state->fb, new_state->fb);
- new_plane_state->pending.async_dirty = true;
+ mtk_plane_update_new_state(new_state, new_plane_state);
+ wmb(); /* Make sure the above parameters are set before update */
+ new_plane_state->pending.async_dirty = true;
mtk_drm_crtc_async_update(new_state->crtc, plane, state);
}
@@ -189,14 +220,8 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
- struct drm_crtc *crtc = new_state->crtc;
- struct drm_framebuffer *fb = new_state->fb;
- struct drm_gem_object *gem;
- struct mtk_drm_gem_obj *mtk_gem;
- unsigned int pitch, format;
- dma_addr_t addr;
- if (!crtc || WARN_ON(!fb))
+ if (!new_state->crtc || WARN_ON(!new_state->fb))
return;
if (!new_state->visible) {
@@ -204,24 +229,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
return;
}
- gem = fb->obj[0];
- mtk_gem = to_mtk_gem_obj(gem);
- addr = mtk_gem->dma_addr;
- pitch = fb->pitches[0];
- format = fb->format->format;
-
- addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
- addr += (new_state->src.y1 >> 16) * pitch;
-
- mtk_plane_state->pending.enable = true;
- mtk_plane_state->pending.pitch = pitch;
- mtk_plane_state->pending.format = format;
- mtk_plane_state->pending.addr = addr;
- mtk_plane_state->pending.x = new_state->dst.x1;
- mtk_plane_state->pending.y = new_state->dst.y1;
- mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
- mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
- mtk_plane_state->pending.rotation = new_state->rotation;
+ mtk_plane_update_new_state(new_state, mtk_plane_state);
wmb(); /* Make sure the above parameters are set before update */
mtk_plane_state->pending.dirty = true;
}
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 446e7961da48..0f3cafab8860 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -634,6 +634,11 @@
#define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc
#define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd
+/* osd1 HDR */
+#define OSD1_HDR2_CTRL 0x38a0
+#define OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN BIT(13)
+#define OSD1_HDR2_CTRL_REG_ONLY_MAT BIT(16)
+
/* osd2 scaler */
#define OSD2_VSC_PHASE_STEP 0x3d00
#define OSD2_VSC_INI_PHASE 0x3d01
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index aede0c67a57f..259f3e6bec90 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -425,9 +425,14 @@ void meson_viu_init(struct meson_drm *priv)
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
meson_viu_load_matrix(priv);
- else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
true);
+ /* fix green/pink color distortion from vendor u-boot */
+ writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT |
+ OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN, 0,
+ priv->io_base + _REG(OSD1_HDR2_CTRL));
+ }
/* Initialize OSD1 fifo control register */
reg = VIU_OSD_DDR_PRIORITY_URGENT |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index d01c4c919504..704dace895cb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -296,7 +296,7 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
static const struct dpu_mdp_cfg sm8250_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
- .base = 0x0, .len = 0x45C,
+ .base = 0x0, .len = 0x494,
.features = 0,
.highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index ca96e3514790..c0423e76eed7 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -771,6 +771,7 @@ int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
dp_catalog->width_blanking);
dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
+ dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0);
return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index ee221d835fa0..eaddfd739885 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1526,7 +1526,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
* running. Add the global reset just before disabling the
* link clocks and core clocks.
*/
- ret = dp_ctrl_off(&ctrl->dp_ctrl);
+ ret = dp_ctrl_off_link_stream(&ctrl->dp_ctrl);
if (ret) {
DRM_ERROR("failed to disable DP controller\n");
return ret;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 051c1be1de7e..867388a399ad 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -219,6 +219,7 @@ static int dp_display_bind(struct device *dev, struct device *master,
goto end;
}
+ dp->aux->drm_dev = drm;
rc = dp_aux_register(dp->aux);
if (rc) {
DRM_ERROR("DRM DP AUX register failed\n");
@@ -1311,6 +1312,10 @@ static int dp_pm_resume(struct device *dev)
else
dp->dp_display.is_connected = false;
+ dp_display_handle_plugged_change(g_dp_display,
+ dp->dp_display.is_connected);
+
+
mutex_unlock(&dp->event_mutex);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 141178754231..1e8a971a86f2 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1169,7 +1169,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
case MSM_BO_CACHED_COHERENT:
if (priv->has_cached_coherent)
break;
- /* fallthrough */
+ fallthrough;
default:
DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index eed2a762e9dd..bcaddbba564d 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -142,6 +142,9 @@ static const struct iommu_flush_ops null_tlb_ops = {
.tlb_add_page = msm_iommu_tlb_add_page,
};
+static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags, void *arg);
+
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
@@ -157,6 +160,13 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
if (!ttbr1_cfg)
return ERR_PTR(-ENODEV);
+ /*
+ * Defer setting the fault handler until we have a valid adreno_smmu
+ * to avoid accidentially installing a GPU specific fault handler for
+ * the display's iommu
+ */
+ iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
+
pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
if (!pagetable)
return ERR_PTR(-ENOMEM);
@@ -300,7 +310,6 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
- iommu_set_fault_handler(domain, msm_fault_handler, iommu);
atomic_set(&iommu->pagetables, 0);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index f949767698fc..bcb0310a41b6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2237,6 +2237,33 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
}
+ /* Finish updating head(s)...
+ *
+ * NVD is rather picky about both where window assignments can change,
+ * *and* about certain core and window channel states matching.
+ *
+ * The EFI GOP driver on newer GPUs configures window channels with a
+ * different output format to what we do, and the core channel update
+ * in the assign_windows case above would result in a state mismatch.
+ *
+ * Delay some of the head update until after that point to workaround
+ * the issue. This only affects the initial modeset.
+ *
+ * TODO: handle this better when adding flexible window mapping
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
+ struct nv50_head *head = nv50_head(crtc);
+
+ NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
+ asyh->set.mask, asyh->clr.mask);
+
+ if (asyh->set.mask) {
+ nv50_head_flush_set_wndw(head, asyh);
+ interlock[NV50_DISP_INTERLOCK_CORE] = 1;
+ }
+ }
+
/* Update plane(s). */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index ec361d17e900..d66f97280282 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -50,11 +50,8 @@ nv50_head_flush_clr(struct nv50_head *head,
}
void
-nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- if (asyh->set.view ) head->func->view (head, asyh);
- if (asyh->set.mode ) head->func->mode (head, asyh);
- if (asyh->set.core ) head->func->core_set(head, asyh);
if (asyh->set.olut ) {
asyh->olut.offset = nv50_lut_load(&head->olut,
asyh->olut.buffer,
@@ -62,6 +59,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
asyh->olut.load);
head->func->olut_set(head, asyh);
}
+}
+
+void
+nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ if (asyh->set.view ) head->func->view (head, asyh);
+ if (asyh->set.mode ) head->func->mode (head, asyh);
+ if (asyh->set.core ) head->func->core_set(head, asyh);
if (asyh->set.curs ) head->func->curs_set(head, asyh);
if (asyh->set.base ) head->func->base (head, asyh);
if (asyh->set.ovly ) head->func->ovly (head, asyh);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index dae841dc05fd..0bac6be9ba34 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -21,6 +21,7 @@ struct nv50_head {
struct nv50_head *nv50_head_create(struct drm_device *, int index);
void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh);
+void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh);
void nv50_head_flush_clr(struct nv50_head *head,
struct nv50_head_atom *asyh, bool flush);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 0b86c44878e0..59759c4fb62e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -4,7 +4,8 @@
struct nv_device_v0 {
__u8 version;
- __u8 pad01[7];
+ __u8 priv;
+ __u8 pad02[6];
__u64 device; /* device identifier, ~0 for client default */
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index ba2c28ea43d2..c68cc957248e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -61,8 +61,6 @@
#define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e
#define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e
#define NV40_CHANNEL_DMA /* cl506b.h */ 0x0000406e
-#define NV50_CHANNEL_DMA /* cl506e.h */ 0x0000506e
-#define G82_CHANNEL_DMA /* cl826e.h */ 0x0000826e
#define NV50_CHANNEL_GPFIFO /* cl506f.h */ 0x0000506f
#define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index 347d2c020bd1..5d9395e651b6 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -9,7 +9,6 @@ struct nvif_client {
const struct nvif_driver *driver;
u64 version;
u8 route;
- bool super;
};
int nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h
index 8e85b936eaa0..7a3af05f7f98 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h
@@ -11,7 +11,7 @@ struct nvif_driver {
void (*fini)(void *priv);
int (*suspend)(void *priv);
int (*resume)(void *priv);
- int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack);
+ int (*ioctl)(void *priv, void *data, u32 size, void **hack);
void __iomem *(*map)(void *priv, u64 handle, u32 size);
void (*unmap)(void *priv, void __iomem *ptr, u32 size);
bool keep;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000c.h b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
index d6dd40f21eed..9c7ff56831c5 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if000c.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
@@ -77,6 +77,7 @@ struct nvif_vmm_pfnmap_v0 {
#define NVIF_VMM_PFNMAP_V0_APER 0x00000000000000f0ULL
#define NVIF_VMM_PFNMAP_V0_HOST 0x0000000000000000ULL
#define NVIF_VMM_PFNMAP_V0_VRAM 0x0000000000000010ULL
+#define NVIF_VMM_PFNMAP_V0_A 0x0000000000000004ULL
#define NVIF_VMM_PFNMAP_V0_W 0x0000000000000002ULL
#define NVIF_VMM_PFNMAP_V0_V 0x0000000000000001ULL
#define NVIF_VMM_PFNMAP_V0_NONE 0x0000000000000000ULL
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index 5d7017fe5039..2f86606e708c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -13,7 +13,6 @@ struct nvkm_client {
struct nvkm_client_notify *notify[32];
struct rb_root objroot;
- bool super;
void *data;
int (*ntfy)(const void *, u32, const void *, u32);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
index 71ed147ad077..f52918a43246 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
@@ -4,5 +4,5 @@
#include <core/os.h>
struct nvkm_client;
-int nvkm_ioctl(struct nvkm_client *, bool, void *, u32, void **);
+int nvkm_ioctl(struct nvkm_client *, void *, u32, void **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 0911e73f7424..70e7887ef4b4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -15,7 +15,6 @@ struct nvkm_vma {
u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
bool used:1; /* Region allocated. */
bool part:1; /* Region was split from an allocated region by map(). */
- bool user:1; /* Region user-allocated. */
bool busy:1; /* Region busy (for temporarily preventing user access). */
bool mapped:1; /* Region contains valid pages. */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index b45ec3086285..4107b7006539 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -570,11 +570,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
}
client->route = NVDRM_OBJECT_ABI16;
- client->super = true;
ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
NV_DMA_IN_MEMORY, &args, sizeof(args),
&ntfy->object);
- client->super = false;
client->route = NVDRM_OBJECT_NVIF;
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4f3a5357dd56..6d07e653f82d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -149,6 +149,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
*/
if (bo->base.dev)
drm_gem_object_release(&bo->base);
+ else
+ dma_resv_fini(&bo->base._resv);
kfree(nvbo);
}
@@ -330,6 +332,10 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
+ nvbo->bo.base.size = size;
+ dma_resv_init(&nvbo->bo.base._resv);
+ drm_vma_node_reset(&nvbo->bo.base.vma_node);
+
ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 40362600eed2..80099ef75702 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -86,12 +86,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
struct nouveau_channel *chan = *pchan;
if (chan) {
struct nouveau_cli *cli = (void *)chan->user.client;
- bool super;
-
- if (cli) {
- super = cli->base.super;
- cli->base.super = true;
- }
if (chan->fence)
nouveau_fence(chan->drm)->context_del(chan);
@@ -111,9 +105,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
-
- if (cli)
- cli->base.super = super;
}
*pchan = NULL;
}
@@ -512,20 +503,16 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
struct nouveau_channel **pchan)
{
struct nouveau_cli *cli = (void *)device->object.client;
- bool super;
int ret;
/* hack until fencenv50 is fixed, and agp access relaxed */
- super = cli->base.super;
- cli->base.super = true;
-
ret = nouveau_channel_ind(drm, device, arg0, priv, pchan);
if (ret) {
NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
ret = nouveau_channel_dma(drm, device, pchan);
if (ret) {
NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
- goto done;
+ return ret;
}
}
@@ -533,15 +520,13 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
if (ret) {
NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
- goto done;
+ return ret;
}
ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
if (ret)
nouveau_channel_del(pchan);
-done:
- cli->base.super = super;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index a616cf4573b8..ba4cd5f83725 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -244,6 +244,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
&(struct nv_device_v0) {
.device = ~0,
+ .priv = true,
}, sizeof(struct nv_device_v0),
&cli->device);
if (ret) {
@@ -1086,8 +1087,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
if (ret)
goto done;
- cli->base.super = false;
-
fpriv->driver_priv = cli;
mutex_lock(&drm->client.mutex);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 0de6549fb875..2ca3207c13fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -41,8 +41,6 @@ nouveau_mem_map(struct nouveau_mem *mem,
struct gf100_vmm_map_v0 gf100;
} args;
u32 argc = 0;
- bool super;
- int ret;
switch (vmm->object.oclass) {
case NVIF_CLASS_VMM_NV04:
@@ -73,12 +71,7 @@ nouveau_mem_map(struct nouveau_mem *mem,
return -ENOSYS;
}
- super = vmm->object.client->super;
- vmm->object.client->super = true;
- ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
- &mem->mem, 0);
- vmm->object.client->super = super;
- return ret;
+ return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0);
}
void
@@ -99,7 +92,6 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
struct nouveau_drm *drm = cli->drm;
struct nvif_mmu *mmu = &cli->mmu;
struct nvif_mem_ram_v0 args = {};
- bool super = cli->base.super;
u8 type;
int ret;
@@ -122,11 +114,9 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
args.dma = tt->dma_address;
mutex_lock(&drm->master.lock);
- cli->base.super = true;
ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
reg->num_pages << PAGE_SHIFT,
&args, sizeof(args), &mem->mem);
- cli->base.super = super;
mutex_unlock(&drm->master.lock);
return ret;
}
@@ -138,12 +128,10 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
struct nouveau_cli *cli = mem->cli;
struct nouveau_drm *drm = cli->drm;
struct nvif_mmu *mmu = &cli->mmu;
- bool super = cli->base.super;
u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
int ret;
mutex_lock(&drm->master.lock);
- cli->base.super = true;
switch (cli->mem->oclass) {
case NVIF_CLASS_MEM_GF100:
ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
@@ -167,7 +155,6 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
WARN_ON(1);
break;
}
- cli->base.super = super;
mutex_unlock(&drm->master.lock);
reg->start = mem->mem.addr >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index b3f29b1ce9ea..52f5793b7274 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -52,9 +52,9 @@ nvkm_client_map(void *priv, u64 handle, u32 size)
}
static int
-nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
+nvkm_client_ioctl(void *priv, void *data, u32 size, void **hack)
{
- return nvkm_ioctl(priv, super, data, size, hack);
+ return nvkm_ioctl(priv, data, size, hack);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 1c3f890377d2..b0c3422cb01f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -35,6 +35,7 @@
#include <linux/sched/mm.h>
#include <linux/sort.h>
#include <linux/hmm.h>
+#include <linux/rmap.h>
struct nouveau_svm {
struct nouveau_drm *drm;
@@ -67,6 +68,11 @@ struct nouveau_svm {
} buffer[1];
};
+#define FAULT_ACCESS_READ 0
+#define FAULT_ACCESS_WRITE 1
+#define FAULT_ACCESS_ATOMIC 2
+#define FAULT_ACCESS_PREFETCH 3
+
#define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
@@ -231,14 +237,11 @@ void
nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
{
if (limit > start) {
- bool super = svmm->vmm->vmm.object.client->super;
- svmm->vmm->vmm.object.client->super = true;
nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
&(struct nvif_vmm_pfnclr_v0) {
.addr = start,
.size = limit - start,
}, sizeof(struct nvif_vmm_pfnclr_v0));
- svmm->vmm->vmm.object.client->super = super;
}
}
@@ -265,7 +268,7 @@ nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
* the invalidation is handled as part of the migration process.
*/
if (update->event == MMU_NOTIFY_MIGRATE &&
- update->migrate_pgmap_owner == svmm->vmm->cli->drm->dev)
+ update->owner == svmm->vmm->cli->drm->dev)
goto out;
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
@@ -412,6 +415,24 @@ nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
}
static int
+nouveau_svm_fault_priority(u8 fault)
+{
+ switch (fault) {
+ case FAULT_ACCESS_PREFETCH:
+ return 0;
+ case FAULT_ACCESS_READ:
+ return 1;
+ case FAULT_ACCESS_WRITE:
+ return 2;
+ case FAULT_ACCESS_ATOMIC:
+ return 3;
+ default:
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+}
+
+static int
nouveau_svm_fault_cmp(const void *a, const void *b)
{
const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
@@ -421,9 +442,8 @@ nouveau_svm_fault_cmp(const void *a, const void *b)
return ret;
if ((ret = (s64)fa->addr - fb->addr))
return ret;
- /*XXX: atomic? */
- return (fa->access == 0 || fa->access == 3) -
- (fb->access == 0 || fb->access == 3);
+ return nouveau_svm_fault_priority(fa->access) -
+ nouveau_svm_fault_priority(fb->access);
}
static void
@@ -487,6 +507,10 @@ static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
struct svm_notifier *sn =
container_of(mni, struct svm_notifier, notifier);
+ if (range->event == MMU_NOTIFY_EXCLUSIVE &&
+ range->owner == sn->svmm->vmm->cli->drm->dev)
+ return true;
+
/*
* serializes the update to mni->invalidate_seq done by caller and
* prevents invalidation of the PTE from progressing while HW is being
@@ -555,6 +579,69 @@ static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
args->p.phys[0] |= NVIF_VMM_PFNMAP_V0_W;
}
+static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
+ struct nouveau_drm *drm,
+ struct nouveau_pfnmap_args *args, u32 size,
+ struct svm_notifier *notifier)
+{
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ struct mm_struct *mm = svmm->notifier.mm;
+ struct page *page;
+ unsigned long start = args->p.addr;
+ unsigned long notifier_seq;
+ int ret = 0;
+
+ ret = mmu_interval_notifier_insert(&notifier->notifier, mm,
+ args->p.addr, args->p.size,
+ &nouveau_svm_mni_ops);
+ if (ret)
+ return ret;
+
+ while (true) {
+ if (time_after(jiffies, timeout)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ notifier_seq = mmu_interval_read_begin(&notifier->notifier);
+ mmap_read_lock(mm);
+ ret = make_device_exclusive_range(mm, start, start + PAGE_SIZE,
+ &page, drm->dev);
+ mmap_read_unlock(mm);
+ if (ret <= 0 || !page) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&svmm->mutex);
+ if (!mmu_interval_read_retry(&notifier->notifier,
+ notifier_seq))
+ break;
+ mutex_unlock(&svmm->mutex);
+ }
+
+ /* Map the page on the GPU. */
+ args->p.page = 12;
+ args->p.size = PAGE_SIZE;
+ args->p.addr = start;
+ args->p.phys[0] = page_to_phys(page) |
+ NVIF_VMM_PFNMAP_V0_V |
+ NVIF_VMM_PFNMAP_V0_W |
+ NVIF_VMM_PFNMAP_V0_A |
+ NVIF_VMM_PFNMAP_V0_HOST;
+
+ ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
+ mutex_unlock(&svmm->mutex);
+
+ unlock_page(page);
+ put_page(page);
+
+out:
+ mmu_interval_notifier_remove(&notifier->notifier);
+ return ret;
+}
+
static int nouveau_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm,
struct nouveau_pfnmap_args *args, u32 size,
@@ -567,18 +654,27 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
unsigned long hmm_pfns[1];
struct hmm_range range = {
.notifier = &notifier->notifier,
- .start = notifier->notifier.interval_tree.start,
- .end = notifier->notifier.interval_tree.last + 1,
.default_flags = hmm_flags,
.hmm_pfns = hmm_pfns,
.dev_private_owner = drm->dev,
};
- struct mm_struct *mm = notifier->notifier.mm;
+ struct mm_struct *mm = svmm->notifier.mm;
int ret;
+ ret = mmu_interval_notifier_insert(&notifier->notifier, mm,
+ args->p.addr, args->p.size,
+ &nouveau_svm_mni_ops);
+ if (ret)
+ return ret;
+
+ range.start = notifier->notifier.interval_tree.start;
+ range.end = notifier->notifier.interval_tree.last + 1;
+
while (true) {
- if (time_after(jiffies, timeout))
- return -EBUSY;
+ if (time_after(jiffies, timeout)) {
+ ret = -EBUSY;
+ goto out;
+ }
range.notifier_seq = mmu_interval_read_begin(range.notifier);
mmap_read_lock(mm);
@@ -587,7 +683,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
if (ret) {
if (ret == -EBUSY)
continue;
- return ret;
+ goto out;
}
mutex_lock(&svmm->mutex);
@@ -601,11 +697,12 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
nouveau_hmm_convert_pfn(drm, &range, args);
- svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
- svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
+out:
+ mmu_interval_notifier_remove(&notifier->notifier);
+
return ret;
}
@@ -625,7 +722,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
unsigned long hmm_flags;
u64 inst, start, limit;
int fi, fn;
- int replay = 0, ret;
+ int replay = 0, atomic = 0, ret;
/* Parse available fault buffer entries into a cache, and update
* the GET pointer so HW can reuse the entries.
@@ -706,12 +803,14 @@ nouveau_svm_fault(struct nvif_notify *notify)
/*
* Determine required permissions based on GPU fault
* access flags.
- * XXX: atomic?
*/
switch (buffer->fault[fi]->access) {
case 0: /* READ. */
hmm_flags = HMM_PFN_REQ_FAULT;
break;
+ case 2: /* ATOMIC. */
+ atomic = true;
+ break;
case 3: /* PREFETCH. */
hmm_flags = 0;
break;
@@ -727,14 +826,14 @@ nouveau_svm_fault(struct nvif_notify *notify)
}
notifier.svmm = svmm;
- ret = mmu_interval_notifier_insert(&notifier.notifier, mm,
- args.i.p.addr, args.i.p.size,
- &nouveau_svm_mni_ops);
- if (!ret) {
+ if (atomic)
+ ret = nouveau_atomic_range_fault(svmm, svm->drm,
+ &args.i, sizeof(args),
+ &notifier);
+ else
ret = nouveau_range_fault(svmm, svm->drm, &args.i,
- sizeof(args), hmm_flags, &notifier);
- mmu_interval_notifier_remove(&notifier.notifier);
- }
+ sizeof(args), hmm_flags,
+ &notifier);
mmput(mm);
limit = args.i.p.addr + args.i.p.size;
@@ -750,11 +849,15 @@ nouveau_svm_fault(struct nvif_notify *notify)
*/
if (buffer->fault[fn]->svmm != svmm ||
buffer->fault[fn]->addr >= limit ||
- (buffer->fault[fi]->access == 0 /* READ. */ &&
+ (buffer->fault[fi]->access == FAULT_ACCESS_READ &&
!(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
- (buffer->fault[fi]->access != 0 /* READ. */ &&
- buffer->fault[fi]->access != 3 /* PREFETCH. */ &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)))
+ (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
+ buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
+ !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
+ (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
+ buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
+ buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
+ !(args.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
break;
}
@@ -818,10 +921,8 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
mutex_lock(&svmm->mutex);
- svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
npages * sizeof(args->p.phys[0]), NULL);
- svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 9dc10b17ad34..5da1f4d223d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -32,6 +32,9 @@
#include <nvif/event.h>
#include <nvif/ioctl.h>
+#include <nvif/class.h>
+#include <nvif/cl0080.h>
+
struct usif_notify_p {
struct drm_pending_event base;
struct {
@@ -261,7 +264,7 @@ usif_object_dtor(struct usif_object *object)
}
static int
-usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16)
{
struct nouveau_cli *cli = nouveau_cli(f);
struct nvif_client *client = &cli->base;
@@ -271,23 +274,48 @@ usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
struct usif_object *object;
int ret = -ENOSYS;
+ if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true)))
+ return ret;
+
+ switch (args->v0.oclass) {
+ case NV_DMA_FROM_MEMORY:
+ case NV_DMA_TO_MEMORY:
+ case NV_DMA_IN_MEMORY:
+ return -EINVAL;
+ case NV_DEVICE: {
+ union {
+ struct nv_device_v0 v0;
+ } *args = data;
+
+ if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false)))
+ return ret;
+
+ args->v0.priv = false;
+ break;
+ }
+ default:
+ if (!parent_abi16)
+ return -EINVAL;
+ break;
+ }
+
if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
return -ENOMEM;
list_add(&object->head, &cli->objects);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- object->route = args->v0.route;
- object->token = args->v0.token;
- args->v0.route = NVDRM_OBJECT_USIF;
- args->v0.token = (unsigned long)(void *)object;
- ret = nvif_client_ioctl(client, argv, argc);
- args->v0.token = object->token;
- args->v0.route = object->route;
+ object->route = args->v0.route;
+ object->token = args->v0.token;
+ args->v0.route = NVDRM_OBJECT_USIF;
+ args->v0.token = (unsigned long)(void *)object;
+ ret = nvif_client_ioctl(client, argv, argc);
+ if (ret) {
+ usif_object_dtor(object);
+ return ret;
}
- if (ret)
- usif_object_dtor(object);
- return ret;
+ args->v0.token = object->token;
+ args->v0.route = object->route;
+ return 0;
}
int
@@ -301,6 +329,7 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
struct nvif_ioctl_v0 v0;
} *argv = data;
struct usif_object *object;
+ bool abi16 = false;
u8 owner;
int ret;
@@ -331,11 +360,13 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
mutex_unlock(&cli->mutex);
goto done;
}
+
+ abi16 = true;
}
switch (argv->v0.type) {
case NVIF_IOCTL_V0_NEW:
- ret = usif_object_new(filp, data, size, argv, argc);
+ ret = usif_object_new(filp, data, size, argv, argc, abi16);
break;
case NVIF_IOCTL_V0_NTFY_NEW:
ret = usif_notify_new(filp, data, size, argv, argc);
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 12644f811b3e..a3264a0e933a 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -32,7 +32,7 @@
int
nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
{
- return client->driver->ioctl(client->object.priv, client->super, data, size, NULL);
+ return client->driver->ioctl(client->object.priv, data, size, NULL);
}
int
@@ -80,7 +80,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
client->object.client = client;
client->object.handle = ~0;
client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
- client->super = true;
client->driver = parent->driver;
if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 671a5c0199e0..dce1ecee2af5 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -44,8 +44,7 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
} else
return -ENOSYS;
- return client->driver->ioctl(client->object.priv, client->super,
- data, size, hack);
+ return client->driver->ioctl(client->object.priv, data, size, hack);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index d777df5a64e6..735cb6816f10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -426,8 +426,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
}
int
-nvkm_ioctl(struct nvkm_client *client, bool supervisor,
- void *data, u32 size, void **hack)
+nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack)
{
struct nvkm_object *object = &client->object;
union {
@@ -435,7 +434,6 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor,
} *args = data;
int ret = -ENOSYS;
- client->super = supervisor;
nvif_ioctl(object, "size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index b930f539feec..93ddf63d1114 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2624,6 +2624,26 @@ nv174_chipset = {
.dma = { 0x00000001, gv100_dma_new },
};
+static const struct nvkm_device_chip
+nv177_chipset = {
+ .name = "GA107",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -3049,6 +3069,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
+ case 0x177: device->chip = &nv177_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
switch (device->chipset) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index fea9d8f2b10c..f28894fdede9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -397,7 +397,7 @@ nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
return ret;
/* give priviledged clients register access */
- if (client->super)
+ if (args->v0.priv)
func = &nvkm_udevice_super;
else
func = &nvkm_udevice;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 55fbfe28c6dc..9669472a2749 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -440,7 +440,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
return ret;
}
-static void
+void
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
{
struct nvkm_dp *dp = nvkm_dp(outp);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
index 428b3f488f03..e484d0c3b0d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
@@ -32,6 +32,7 @@ struct nvkm_dp {
int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
struct nvkm_outp **);
+void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
/* DPCD Receiver Capabilities */
#define DPCD_RC00_DPCD_REV 0x00000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index dffcac249211..129982fef7ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "outp.h"
+#include "dp.h"
#include "ior.h"
#include <subdev/bios.h>
@@ -257,6 +258,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
if (!ior->arm.head || ior->arm.proto != proto) {
OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
ior->arm.proto, proto);
+
+ /* The EFI GOP driver on Ampere can leave unused DP links routed,
+ * which we don't expect. The DisableLT IED script *should* get
+ * us back to where we need to be.
+ */
+ if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
+ nvkm_dp_disable(outp, ior);
+
return;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
index d20cc0681a88..797131ed7d67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
@@ -26,7 +26,6 @@
#include <core/client.h>
#include <core/gpuobj.h>
#include <subdev/fb.h>
-#include <subdev/instmem.h>
#include <nvif/cl0002.h>
#include <nvif/unpack.h>
@@ -72,11 +71,7 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
union {
struct nv_dma_v0 v0;
} *args = *pdata;
- struct nvkm_device *device = dma->engine.subdev.device;
- struct nvkm_client *client = oclass->client;
struct nvkm_object *parent = oclass->parent;
- struct nvkm_instmem *instmem = device->imem;
- struct nvkm_fb *fb = device->fb;
void *data = *pdata;
u32 size = *psize;
int ret = -ENOSYS;
@@ -109,23 +104,13 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
dmaobj->target = NV_MEM_TARGET_VM;
break;
case NV_DMA_V0_TARGET_VRAM:
- if (!client->super) {
- if (dmaobj->limit >= fb->ram->size - instmem->reserved)
- return -EACCES;
- if (device->card_type >= NV_50)
- return -EACCES;
- }
dmaobj->target = NV_MEM_TARGET_VRAM;
break;
case NV_DMA_V0_TARGET_PCI:
- if (!client->super)
- return -EACCES;
dmaobj->target = NV_MEM_TARGET_PCI;
break;
case NV_DMA_V0_TARGET_PCI_US:
case NV_DMA_V0_TARGET_AGP:
- if (!client->super)
- return -EACCES;
dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 90e9a0972a44..3209eb7af65f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -27,8 +27,6 @@ nvkm-y += nvkm/engine/fifo/dmanv04.o
nvkm-y += nvkm/engine/fifo/dmanv10.o
nvkm-y += nvkm/engine/fifo/dmanv17.o
nvkm-y += nvkm/engine/fifo/dmanv40.o
-nvkm-y += nvkm/engine/fifo/dmanv50.o
-nvkm-y += nvkm/engine/fifo/dmag84.o
nvkm-y += nvkm/engine/fifo/gpfifonv50.o
nvkm-y += nvkm/engine/fifo/gpfifog84.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
index af8bdf275552..3a95730d7ff5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -48,8 +48,6 @@ void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
const struct nvkm_oclass *, struct nv50_fifo_chan *);
-extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
-extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
deleted file mode 100644
index fc34cddcd2f5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-
-#include <core/client.h>
-#include <core/ramht.h>
-
-#include <nvif/class.h>
-#include <nvif/cl826e.h>
-#include <nvif/unpack.h>
-
-static int
-g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
-{
- struct nvkm_object *parent = oclass->parent;
- union {
- struct g82_channel_dma_v0 v0;
- } *args = data;
- struct nv50_fifo *fifo = nv50_fifo(base);
- struct nv50_fifo_chan *chan;
- int ret = -ENOSYS;
-
- nvif_ioctl(parent, "create channel dma size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
- "pushbuf %llx offset %016llx\n",
- args->v0.version, args->v0.vmm, args->v0.pushbuf,
- args->v0.offset);
- if (!args->v0.pushbuf)
- return -EINVAL;
- } else
- return ret;
-
- if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
- return -ENOMEM;
- *pobject = &chan->base.object;
-
- ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
- oclass, chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- nvkm_kmap(chan->ramfc);
- nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
- nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
- nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
- nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
- nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
- nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
- nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
- nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->node->offset >> 4));
- nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
- nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
- nvkm_done(chan->ramfc);
- return 0;
-}
-
-const struct nvkm_fifo_chan_oclass
-g84_fifo_dma_oclass = {
- .base.oclass = G82_CHANNEL_DMA,
- .base.minver = 0,
- .base.maxver = 0,
- .ctor = g84_fifo_dma_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
deleted file mode 100644
index 8043718ad150..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-
-#include <core/client.h>
-#include <core/ramht.h>
-
-#include <nvif/class.h>
-#include <nvif/cl506e.h>
-#include <nvif/unpack.h>
-
-static int
-nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
-{
- struct nvkm_object *parent = oclass->parent;
- union {
- struct nv50_channel_dma_v0 v0;
- } *args = data;
- struct nv50_fifo *fifo = nv50_fifo(base);
- struct nv50_fifo_chan *chan;
- int ret = -ENOSYS;
-
- nvif_ioctl(parent, "create channel dma size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
- "pushbuf %llx offset %016llx\n",
- args->v0.version, args->v0.vmm, args->v0.pushbuf,
- args->v0.offset);
- if (!args->v0.pushbuf)
- return -EINVAL;
- } else
- return ret;
-
- if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
- return -ENOMEM;
- *pobject = &chan->base.object;
-
- ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
- oclass, chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- nvkm_kmap(chan->ramfc);
- nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
- nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
- nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
- nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
- nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
- nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
- nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
- nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->node->offset >> 4));
- nvkm_done(chan->ramfc);
- return 0;
-}
-
-const struct nvkm_fifo_chan_oclass
-nv50_fifo_dma_oclass = {
- .base.oclass = NV50_CHANNEL_DMA,
- .base.minver = 0,
- .base.maxver = 0,
- .ctor = nv50_fifo_dma_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
index c0a7d0f21dac..3885c3830b94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -119,7 +119,6 @@ g84_fifo = {
.uevent_init = g84_fifo_uevent_init,
.uevent_fini = g84_fifo_uevent_fini,
.chan = {
- &g84_fifo_dma_oclass,
&g84_fifo_gpfifo_oclass,
NULL
},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index b6900a52bcce..ae6c4d846eb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -341,8 +341,6 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
"runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength, args->v0.runlist, args->v0.priv);
- if (args->v0.priv && !oclass->client->super)
- return -EINVAL;
return gk104_fifo_gpfifo_new_(fifo,
&args->v0.runlist,
&args->v0.chid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
index ee4967b706a7..743791c514fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -226,8 +226,6 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
"runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength, args->v0.runlist, args->v0.priv);
- if (args->v0.priv && !oclass->client->super)
- return -EINVAL;
return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
&args->v0.runlist,
&args->v0.chid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
index abef7fb6e2d3..99aafa103a31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
@@ -65,8 +65,6 @@ tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
"runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength, args->v0.runlist, args->v0.priv);
- if (args->v0.priv && !oclass->client->super)
- return -EINVAL;
return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo,
&args->v0.runlist,
&args->v0.chid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index be94156ea248..a08742cf425a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -136,7 +136,6 @@ nv50_fifo = {
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
- &nv50_fifo_dma_oclass,
&nv50_fifo_gpfifo_oclass,
NULL
},
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
index fac2f9a45ea6..e530bb8b3b17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
@@ -41,7 +41,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
object = nvkm_object_search(client, handle, &nvkm_umem);
if (IS_ERR(object)) {
- if (client->super && client != master) {
+ if (client != master) {
spin_lock(&master->lock);
list_for_each_entry(umem, &master->umem, head) {
if (umem->object.object == handle) {
@@ -53,8 +53,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
}
} else {
umem = nvkm_umem(object);
- if (!umem->priv || client->super)
- memory = nvkm_memory_ref(umem->memory);
+ memory = nvkm_memory_ref(umem->memory);
}
return memory ? memory : ERR_PTR(-ENOENT);
@@ -167,7 +166,6 @@ nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
umem->mmu = mmu;
umem->type = mmu->type[type].type;
- umem->priv = oclass->client->super;
INIT_LIST_HEAD(&umem->head);
*pobject = &umem->object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
index 85cf692d620a..d56a594016cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
@@ -8,7 +8,6 @@ struct nvkm_umem {
struct nvkm_object object;
struct nvkm_mmu *mmu;
u8 type:8;
- bool priv:1;
bool mappable:1;
bool io:1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
index 0e4b8941da37..6870fda4b188 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
@@ -34,7 +34,7 @@ nvkm_ummu_sclass(struct nvkm_object *object, int index,
{
struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
- if (mmu->func->mem.user.oclass && oclass->client->super) {
+ if (mmu->func->mem.user.oclass) {
if (index-- == 0) {
oclass->base = mmu->func->mem.user;
oclass->ctor = nvkm_umem_new;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index c43b8248c682..d6a1f8d04c09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -45,7 +45,6 @@ nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
static int
nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_pfnclr_v0 v0;
} *args = argv;
@@ -59,9 +58,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- if (!client->super)
- return -ENOENT;
-
if (size) {
mutex_lock(&vmm->mutex);
ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
@@ -74,7 +70,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
static int
nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_pfnmap_v0 v0;
} *args = argv;
@@ -93,9 +88,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- if (!client->super)
- return -ENOENT;
-
if (size) {
mutex_lock(&vmm->mutex);
ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
@@ -108,7 +100,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
static int
nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_unmap_v0 v0;
} *args = argv;
@@ -130,9 +121,8 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto done;
}
- if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
- VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
- vma->user, !client->super, vma->busy);
+ if (ret = -ENOENT, vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
goto done;
}
@@ -181,9 +171,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto fail;
}
- if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
- VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
- vma->user, !client->super, vma->busy);
+ if (ret = -ENOENT, vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
goto fail;
}
@@ -230,7 +219,6 @@ fail:
static int
nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_put_v0 v0;
} *args = argv;
@@ -252,9 +240,8 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto done;
}
- if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
- VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
- vma->user, !client->super, vma->busy);
+ if (ret = -ENOENT, vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
goto done;
}
@@ -268,7 +255,6 @@ done:
static int
nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_get_v0 v0;
} *args = argv;
@@ -297,7 +283,6 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
return ret;
args->v0.addr = vma->addr;
- vma->user = !client->super;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 710f3f8dc7c9..8bf00b396ec1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -774,7 +774,6 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
new->refd = vma->refd;
new->used = vma->used;
new->part = vma->part;
- new->user = vma->user;
new->busy = vma->busy;
new->mapped = vma->mapped;
list_add(&new->head, &vma->head);
@@ -951,7 +950,7 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm,
static void
nvkm_vma_dump(struct nvkm_vma *vma)
{
- printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n",
+ printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n",
vma->addr, (u64)vma->size,
vma->used ? '-' : 'F',
vma->mapref ? 'R' : '-',
@@ -959,7 +958,6 @@ nvkm_vma_dump(struct nvkm_vma *vma)
vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
vma->part ? 'P' : '-',
- vma->user ? 'U' : '-',
vma->busy ? 'B' : '-',
vma->mapped ? 'M' : '-',
vma->memory);
@@ -1024,7 +1022,6 @@ nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
vma->mapref = true;
vma->sparse = false;
vma->used = true;
- vma->user = true;
nvkm_vmm_node_insert(vmm, vma);
list_add_tail(&vma->head, &vmm->list);
return 0;
@@ -1615,7 +1612,6 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma->page = NVKM_VMA_PAGE_NONE;
vma->refd = NVKM_VMA_PAGE_NONE;
vma->used = false;
- vma->user = false;
nvkm_vmm_put_region(vmm, vma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index a2b179568970..f6188aa9171c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -178,6 +178,7 @@ void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *);
#define NVKM_VMM_PFN_APER 0x00000000000000f0ULL
#define NVKM_VMM_PFN_HOST 0x0000000000000000ULL
#define NVKM_VMM_PFN_VRAM 0x0000000000000010ULL
+#define NVKM_VMM_PFN_A 0x0000000000000004ULL
#define NVKM_VMM_PFN_W 0x0000000000000002ULL
#define NVKM_VMM_PFN_V 0x0000000000000001ULL
#define NVKM_VMM_PFN_NONE 0x0000000000000000ULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index 236db5570771..b5e733783b5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -88,6 +88,9 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
if (!(*map->pfn & NVKM_VMM_PFN_W))
data |= BIT_ULL(6); /* RO. */
+ if (!(*map->pfn & NVKM_VMM_PFN_A))
+ data |= BIT_ULL(7); /* Atomic disable. */
+
if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
addr = dma_map_page(dev, pfn_to_page(addr), 0,
@@ -322,6 +325,9 @@ gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
if (!(*map->pfn & NVKM_VMM_PFN_W))
data |= BIT_ULL(6); /* RO. */
+ if (!(*map->pfn & NVKM_VMM_PFN_A))
+ data |= BIT_ULL(7); /* Atomic disable. */
+
if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
addr = dma_map_page(dev, pfn_to_page(addr), 0,
@@ -528,15 +534,13 @@ int
gp100_vmm_mthd(struct nvkm_vmm *vmm,
struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
{
- if (client->super) {
- switch (mthd) {
- case GP100_VMM_VN_FAULT_REPLAY:
- return gp100_vmm_fault_replay(vmm, argv, argc);
- case GP100_VMM_VN_FAULT_CANCEL:
- return gp100_vmm_fault_cancel(vmm, argv, argc);
- default:
- break;
- }
+ switch (mthd) {
+ case GP100_VMM_VN_FAULT_REPLAY:
+ return gp100_vmm_fault_replay(vmm, argv, argc);
+ case GP100_VMM_VN_FAULT_CANCEL:
+ return gp100_vmm_fault_cancel(vmm, argv, argc);
+ default:
+ break;
}
return -EINVAL;
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index ef70140c5b09..873cbd38e6d3 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -706,9 +706,7 @@ static int nt35510_power_on(struct nt35510 *nt)
if (ret)
return ret;
- ret = nt35510_read_id(nt);
- if (ret)
- return ret;
+ nt35510_read_id(nt);
/* Set up stuff in manufacturer control, page 1 */
ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 2229f1af2ca8..46029c5610c8 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -447,7 +447,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c)
drm_panel_remove(&ts->base);
mipi_dsi_device_unregister(ts->dsi);
- kfree(ts->dsi);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 21939d4352cf..1b80290c2b53 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -4166,7 +4166,7 @@ static const struct drm_display_mode yes_optoelectronics_ytc700tlag_05_201c_mode
static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
.modes = &yes_optoelectronics_ytc700tlag_05_201c_mode,
.num_modes = 1,
- .bpc = 6,
+ .bpc = 8,
.size = {
.width = 154,
.height = 90,
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 19fd39d9a00c..37a1b6a6ad6d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -127,7 +127,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
struct qxl_bo *qbo;
struct qxl_device *qdev;
- if (!qxl_ttm_bo_is_qxl_bo(bo))
+ if (!qxl_ttm_bo_is_qxl_bo(bo) || !bo->resource)
return;
qbo = to_qxl_bo(bo);
qdev = to_qxl(qbo->tbo.base.dev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 406681317419..573154268d43 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1325,6 +1325,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
if (obj->import_attach) {
DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8cd135fa6dcd..5c23b77cb81a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -374,13 +374,13 @@ radeon_pci_shutdown(struct pci_dev *pdev)
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)
/*
* Some adapters need to be suspended before a
* shutdown occurs in order to prevent an error
- * during kexec.
- * Make this power specific becauase it breaks
- * some non-power boards.
+ * during kexec, shutdown or reboot.
+ * Make this power and Loongson specific because
+ * it breaks some other boards.
*/
radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index bfaaa3c969a3..56ede9d63b12 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -49,23 +49,23 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
-static void radeon_update_memory_usage(struct radeon_bo *bo,
- unsigned mem_type, int sign)
+static void radeon_update_memory_usage(struct ttm_buffer_object *bo,
+ unsigned int mem_type, int sign)
{
- struct radeon_device *rdev = bo->rdev;
+ struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
switch (mem_type) {
case TTM_PL_TT:
if (sign > 0)
- atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
+ atomic64_add(bo->base.size, &rdev->gtt_usage);
else
- atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
+ atomic64_sub(bo->base.size, &rdev->gtt_usage);
break;
case TTM_PL_VRAM:
if (sign > 0)
- atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
+ atomic64_add(bo->base.size, &rdev->vram_usage);
else
- atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
+ atomic64_sub(bo->base.size, &rdev->vram_usage);
break;
}
}
@@ -76,8 +76,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct radeon_bo, tbo);
- radeon_update_memory_usage(bo, bo->tbo.resource->mem_type, -1);
-
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
@@ -727,24 +725,21 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- bool evict,
+ unsigned int old_type,
struct ttm_resource *new_mem)
{
struct radeon_bo *rbo;
+ radeon_update_memory_usage(bo, old_type, -1);
+ if (new_mem)
+ radeon_update_memory_usage(bo, new_mem->mem_type, 1);
+
if (!radeon_ttm_bo_is_radeon_bo(bo))
return;
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 1);
radeon_vm_bo_invalidate(rbo->rdev, rbo);
-
- /* update statistics */
- if (!new_mem)
- return;
-
- radeon_update_memory_usage(rbo, bo->resource->mem_type, -1);
- radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
}
vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 1739c6a142cd..1afc7992ef91 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -161,7 +161,7 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- bool evict,
+ unsigned int old_type,
struct ttm_resource *new_mem);
extern vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index ad2a5a791bba..a06d4cc2fb1c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -199,7 +199,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = bo->resource;
struct radeon_device *rdev;
struct radeon_bo *rbo;
- int r;
+ int r, old_type;
if (new_mem->mem_type == TTM_PL_TT) {
r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem);
@@ -216,6 +216,9 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
return -EINVAL;
+ /* Save old type for statistics update */
+ old_type = old_mem->mem_type;
+
rdev = radeon_get_rdev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ttm_bo_move_null(bo, new_mem);
@@ -261,7 +264,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
out:
/* update statistics */
atomic64_add(bo->base.size, &rdev->num_bytes_moved);
- radeon_bo_move_notify(bo, evict, new_mem);
+ radeon_bo_move_notify(bo, old_type, new_mem);
return 0;
}
@@ -682,7 +685,11 @@ bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev,
static void
radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
- radeon_bo_move_notify(bo, false, NULL);
+ unsigned int old_type = TTM_PL_SYSTEM;
+
+ if (bo->resource)
+ old_type = bo->resource->mem_type;
+ radeon_bo_move_notify(bo, old_type, NULL);
}
static struct ttm_device_funcs radeon_bo_driver = {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1b950b45cf4b..8d7fd65ccced 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -102,6 +102,9 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
return;
}
+ if (!mem)
+ return;
+
man = ttm_manager_type(bdev, mem->mem_type);
list_move_tail(&bo->lru, &man->lru[bo->priority]);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2f57f824e6db..763fa6f4e07d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -63,6 +63,9 @@ int ttm_mem_io_reserve(struct ttm_device *bdev,
void ttm_mem_io_free(struct ttm_device *bdev,
struct ttm_resource *mem)
{
+ if (!mem)
+ return;
+
if (!mem->bus.offset && !mem->bus.addr)
return;
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 5f31acec3ad7..2df59b3c2ea1 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -44,6 +44,8 @@ static unsigned ttm_glob_use_count;
struct ttm_global ttm_glob;
EXPORT_SYMBOL(ttm_glob);
+struct dentry *ttm_debugfs_root;
+
static void ttm_global_release(void)
{
struct ttm_global *glob = &ttm_glob;
@@ -53,6 +55,7 @@ static void ttm_global_release(void)
goto out;
ttm_pool_mgr_fini();
+ debugfs_remove(ttm_debugfs_root);
__free_page(glob->dummy_read_page);
memset(glob, 0, sizeof(*glob));
@@ -73,6 +76,11 @@ static int ttm_global_init(void)
si_meminfo(&si);
+ ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
+ if (IS_ERR(ttm_debugfs_root)) {
+ ttm_debugfs_root = NULL;
+ }
+
/* Limit the number of pages in the pool to about 50% of the total
* system memory.
*/
@@ -100,6 +108,10 @@ static int ttm_global_init(void)
debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
&glob->bo_count);
out:
+ if (ret && ttm_debugfs_root)
+ debugfs_remove(ttm_debugfs_root);
+ if (ret)
+ --ttm_glob_use_count;
mutex_unlock(&ttm_global_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 997c458f68a9..7fcdef278c74 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -72,22 +72,6 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
return tmp;
}
-struct dentry *ttm_debugfs_root;
-
-static int __init ttm_init(void)
-{
- ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
- return 0;
-}
-
-static void __exit ttm_exit(void)
-{
- debugfs_remove(ttm_debugfs_root);
-}
-
-module_init(ttm_init);
-module_exit(ttm_exit);
-
MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 03395386e8a7..f4b08a8705b3 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -181,6 +181,9 @@ int ttm_range_man_fini(struct ttm_device *bdev,
struct drm_mm *mm = &rman->mm;
int ret;
+ if (!man)
+ return 0;
+
ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_evict_all(bdev, man);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index aab1b36ceb3c..c2876731ee2d 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1857,38 +1857,46 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (vc4_hdmi->variant->external_irq_controller) {
- ret = devm_request_threaded_irq(&pdev->dev,
- platform_get_irq_byname(pdev, "cec-rx"),
- vc4_cec_irq_handler_rx_bare,
- vc4_cec_irq_handler_rx_thread, 0,
- "vc4 hdmi cec rx", vc4_hdmi);
+ ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-rx"),
+ vc4_cec_irq_handler_rx_bare,
+ vc4_cec_irq_handler_rx_thread, 0,
+ "vc4 hdmi cec rx", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
- ret = devm_request_threaded_irq(&pdev->dev,
- platform_get_irq_byname(pdev, "cec-tx"),
- vc4_cec_irq_handler_tx_bare,
- vc4_cec_irq_handler_tx_thread, 0,
- "vc4 hdmi cec tx", vc4_hdmi);
+ ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-tx"),
+ vc4_cec_irq_handler_tx_bare,
+ vc4_cec_irq_handler_tx_thread, 0,
+ "vc4 hdmi cec tx", vc4_hdmi);
if (ret)
- goto err_delete_cec_adap;
+ goto err_remove_cec_rx_handler;
} else {
HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
- ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
- vc4_cec_irq_handler,
- vc4_cec_irq_handler_thread, 0,
- "vc4 hdmi cec", vc4_hdmi);
+ ret = request_threaded_irq(platform_get_irq(pdev, 0),
+ vc4_cec_irq_handler,
+ vc4_cec_irq_handler_thread, 0,
+ "vc4 hdmi cec", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
}
ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
if (ret < 0)
- goto err_delete_cec_adap;
+ goto err_remove_handlers;
return 0;
+err_remove_handlers:
+ if (vc4_hdmi->variant->external_irq_controller)
+ free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
+ else
+ free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
+
+err_remove_cec_rx_handler:
+ if (vc4_hdmi->variant->external_irq_controller)
+ free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
+
err_delete_cec_adap:
cec_delete_adapter(vc4_hdmi->cec_adap);
@@ -1897,6 +1905,15 @@ err_delete_cec_adap:
static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi)
{
+ struct platform_device *pdev = vc4_hdmi->pdev;
+
+ if (vc4_hdmi->variant->external_irq_controller) {
+ free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
+ free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
+ } else {
+ free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
+ }
+
cec_unregister_adapter(vc4_hdmi->cec_adap);
}
#else
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 6f5ea00973e0..45aeeca9b8f6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -36,6 +36,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_placement.h>
#include <generated/utsrelease.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d1cef3b69e9d..5652d982b1ce 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -492,7 +492,7 @@ struct vmw_private {
resource_size_t vram_start;
resource_size_t vram_size;
resource_size_t prim_bb_mem;
- void __iomem *rmmio;
+ u32 __iomem *rmmio;
u32 *fifo_mem;
resource_size_t fifo_mem_size;
uint32_t fb_max_width;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 5648664f71bc..f2d625415458 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -354,7 +354,6 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
ttm_bo_unpin(bo);
ttm_bo_unreserve(bo);
- ttm_bo_unpin(batch->otable_bo);
ttm_bo_put(batch->otable_bo);
batch->otable_bo = NULL;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 160554903ef9..76937f716fbe 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -576,7 +576,7 @@ config HID_LOGITECH_HIDPP
depends on HID_LOGITECH
select POWER_SUPPLY
help
- Support for Logitech devices relyingon the HID++ Logitech specification
+ Support for Logitech devices relying on the HID++ Logitech specification
Say Y if you want support for Logitech devices relying on the HID++
specification. Such devices are the various Logitech Touchpads (T650,
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 96e2577fa37e..8d68796aa905 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -58,7 +58,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
cmd_base.cmd_v2.sensor_id = sensor_idx;
cmd_base.cmd_v2.length = 16;
- writeq(0x0, privdata->mmio + AMD_C2P_MSG2);
+ writeq(0x0, privdata->mmio + AMD_C2P_MSG1);
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 6b8f0d004d34..dc6bd4299c54 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -501,6 +501,8 @@ static const struct hid_device_id apple_devices[] = {
APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
.driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
+ .driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
.driver_data = APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index fca8fc78a78a..fb807c8e989b 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -485,9 +485,6 @@ static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
{
struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
cdev);
- if (led->brightness == brightness)
- return;
-
led->brightness = brightness;
schedule_work(&led->work);
}
diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c
index f43a8406cb9a..4ef1c3b8094e 100644
--- a/drivers/hid/hid-ft260.c
+++ b/drivers/hid/hid-ft260.c
@@ -742,7 +742,7 @@ static int ft260_is_interface_enabled(struct hid_device *hdev)
int ret;
ret = ft260_get_system_config(hdev, &cfg);
- if (ret)
+ if (ret < 0)
return ret;
ft260_dbg("interface: 0x%02x\n", interface);
@@ -754,23 +754,16 @@ static int ft260_is_interface_enabled(struct hid_device *hdev)
switch (cfg.chip_mode) {
case FT260_MODE_ALL:
case FT260_MODE_BOTH:
- if (interface == 1) {
+ if (interface == 1)
hid_info(hdev, "uart interface is not supported\n");
- return 0;
- }
- ret = 1;
+ else
+ ret = 1;
break;
case FT260_MODE_UART:
- if (interface == 0) {
- hid_info(hdev, "uart is unsupported on interface 0\n");
- ret = 0;
- }
+ hid_info(hdev, "uart interface is not supported\n");
break;
case FT260_MODE_I2C:
- if (interface == 1) {
- hid_info(hdev, "i2c is unsupported on interface 1\n");
- ret = 0;
- }
+ ret = 1;
break;
}
return ret;
@@ -785,7 +778,7 @@ static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len,
if (ret < 0)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%hi\n", *field);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", *field);
}
static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
@@ -797,7 +790,7 @@ static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
if (ret < 0)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field));
+ return scnprintf(buf, PAGE_SIZE, "%d\n", le16_to_cpu(*field));
}
#define FT260_ATTR_SHOW(name, reptype, id, type, func) \
@@ -1004,11 +997,9 @@ err_hid_stop:
static void ft260_remove(struct hid_device *hdev)
{
- int ret;
struct ft260_device *dev = hid_get_drvdata(hdev);
- ret = ft260_is_interface_enabled(hdev);
- if (ret <= 0)
+ if (!dev)
return;
sysfs_remove_group(&hdev->dev.kobj, &ft260_attr_group);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 6b1fa971b33e..91bf4d01e91a 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -784,6 +784,17 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
}
}
+static void hid_ishtp_cl_resume_handler(struct work_struct *work)
+{
+ struct ishtp_cl_data *client_data = container_of(work, struct ishtp_cl_data, resume_work);
+ struct ishtp_cl *hid_ishtp_cl = client_data->hid_ishtp_cl;
+
+ if (ishtp_wait_resume(ishtp_get_ishtp_device(hid_ishtp_cl))) {
+ client_data->suspended = false;
+ wake_up_interruptible(&client_data->ishtp_resume_wait);
+ }
+}
+
ishtp_print_log ishtp_hid_print_trace;
/**
@@ -822,6 +833,8 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
init_waitqueue_head(&client_data->ishtp_resume_wait);
INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler);
+ INIT_WORK(&client_data->resume_work, hid_ishtp_cl_resume_handler);
+
ishtp_hid_print_trace = ishtp_trace_callback(cl_device);
@@ -921,7 +934,7 @@ static int hid_ishtp_cl_resume(struct device *device)
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
- client_data->suspended = false;
+ schedule_work(&client_data->resume_work);
return 0;
}
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index f88443a7d935..6a5cc11aefd8 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -135,6 +135,7 @@ struct ishtp_cl_data {
int multi_packet_cnt;
struct work_struct work;
+ struct work_struct resume_work;
struct ishtp_cl_device *cl_device;
};
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index f0802b047ed8..aa2c51624012 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -314,13 +314,6 @@ static int ishtp_cl_device_resume(struct device *dev)
if (!device)
return 0;
- /*
- * When ISH needs hard reset, it is done asynchrnously, hence bus
- * resume will be called before full ISH resume
- */
- if (device->ishtp_dev->resume_flag)
- return 0;
-
driver = to_ishtp_cl_driver(dev->driver);
if (driver && driver->driver.pm) {
if (driver->driver.pm->resume)
@@ -850,6 +843,28 @@ struct device *ishtp_device(struct ishtp_cl_device *device)
EXPORT_SYMBOL(ishtp_device);
/**
+ * ishtp_wait_resume() - Wait for IPC resume
+ *
+ * Wait for IPC resume
+ *
+ * Return: resume complete or not
+ */
+bool ishtp_wait_resume(struct ishtp_device *dev)
+{
+ /* 50ms to get resume response */
+ #define WAIT_FOR_RESUME_ACK_MS 50
+
+ /* Waiting to get resume response */
+ if (dev->resume_flag)
+ wait_event_interruptible_timeout(dev->resume_wait,
+ !dev->resume_flag,
+ msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS));
+
+ return (!dev->resume_flag);
+}
+EXPORT_SYMBOL_GPL(ishtp_wait_resume);
+
+/**
* ishtp_get_pci_device() - Return PCI device dev pointer
* This interface is used to return PCI device pointer
* from ishtp_cl_device instance.
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index dcf3a235870f..7c2032f7f44d 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -38,7 +38,7 @@ config USB_HIDDEV
help
Say Y here if you want to support HID devices (from the USB
specification standpoint) that aren't strictly user interface
- devices, like monitor controls and Uninterruptable Power Supplies.
+ devices, like monitor controls and Uninterruptible Power Supplies.
This module supports these devices separately using a separate
event interface on /dev/usb/hiddevX (char 180:96 to 180:111).
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 81d7d12bcf34..81ba642adcb7 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2548,6 +2548,9 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
int slot;
slot = input_mt_get_slot_by_key(input, hid_data->id);
+ if (slot < 0)
+ return;
+
input_mt_slot(input, slot);
input_mt_report_slot_state(input, MT_TOOL_FINGER, prox);
}
@@ -3831,7 +3834,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
wacom_wac->shared->touch->product == 0xF6) {
input_dev->evbit[0] |= BIT_MASK(EV_SW);
__set_bit(SW_MUTE_DEVICE, input_dev->swbit);
- wacom_wac->shared->has_mute_touch_switch = true;
+ wacom_wac->has_mute_touch_switch = true;
}
fallthrough;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index caf6d0c4bc1b..142308526ec6 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -605,6 +605,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
*/
mutex_lock(&vmbus_connection.channel_mutex);
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (guid_equal(&channel->offermsg.offer.if_type,
+ &newchannel->offermsg.offer.if_type) &&
+ guid_equal(&channel->offermsg.offer.if_instance,
+ &newchannel->offermsg.offer.if_instance)) {
+ fnew = false;
+ newchannel->primary_channel = channel;
+ break;
+ }
+ }
+
init_vp_index(newchannel);
/* Remember the channels that should be cleaned up upon suspend. */
@@ -617,16 +628,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
*/
atomic_dec(&vmbus_connection.offer_in_progress);
- list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
- if (guid_equal(&channel->offermsg.offer.if_type,
- &newchannel->offermsg.offer.if_type) &&
- guid_equal(&channel->offermsg.offer.if_instance,
- &newchannel->offermsg.offer.if_instance)) {
- fnew = false;
- break;
- }
- }
-
if (fnew) {
list_add_tail(&newchannel->listentry,
&vmbus_connection.chn_list);
@@ -647,7 +648,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
/*
* Process the sub-channel.
*/
- newchannel->primary_channel = channel;
list_add_tail(&newchannel->sc_list, &channel->sc_list);
}
@@ -684,6 +684,30 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
}
/*
+ * Check if CPUs used by other channels of the same device.
+ * It should only be called by init_vp_index().
+ */
+static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn)
+{
+ struct vmbus_channel *primary = chn->primary_channel;
+ struct vmbus_channel *sc;
+
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
+
+ if (!primary)
+ return false;
+
+ if (primary->target_cpu == cpu)
+ return true;
+
+ list_for_each_entry(sc, &primary->sc_list, sc_list)
+ if (sc != chn && sc->target_cpu == cpu)
+ return true;
+
+ return false;
+}
+
+/*
* We use this state to statically distribute the channel interrupt load.
*/
static int next_numa_node_id;
@@ -702,6 +726,7 @@ static int next_numa_node_id;
static void init_vp_index(struct vmbus_channel *channel)
{
bool perf_chn = hv_is_perf_channel(channel);
+ u32 i, ncpu = num_online_cpus();
cpumask_var_t available_mask;
struct cpumask *alloced_mask;
u32 target_cpu;
@@ -724,31 +749,38 @@ static void init_vp_index(struct vmbus_channel *channel)
return;
}
- while (true) {
- numa_node = next_numa_node_id++;
- if (numa_node == nr_node_ids) {
- next_numa_node_id = 0;
- continue;
+ for (i = 1; i <= ncpu + 1; i++) {
+ while (true) {
+ numa_node = next_numa_node_id++;
+ if (numa_node == nr_node_ids) {
+ next_numa_node_id = 0;
+ continue;
+ }
+ if (cpumask_empty(cpumask_of_node(numa_node)))
+ continue;
+ break;
+ }
+ alloced_mask = &hv_context.hv_numa_map[numa_node];
+
+ if (cpumask_weight(alloced_mask) ==
+ cpumask_weight(cpumask_of_node(numa_node))) {
+ /*
+ * We have cycled through all the CPUs in the node;
+ * reset the alloced map.
+ */
+ cpumask_clear(alloced_mask);
}
- if (cpumask_empty(cpumask_of_node(numa_node)))
- continue;
- break;
- }
- alloced_mask = &hv_context.hv_numa_map[numa_node];
- if (cpumask_weight(alloced_mask) ==
- cpumask_weight(cpumask_of_node(numa_node))) {
- /*
- * We have cycled through all the CPUs in the node;
- * reset the alloced map.
- */
- cpumask_clear(alloced_mask);
- }
+ cpumask_xor(available_mask, alloced_mask,
+ cpumask_of_node(numa_node));
- cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));
+ target_cpu = cpumask_first(available_mask);
+ cpumask_set_cpu(target_cpu, alloced_mask);
- target_cpu = cpumask_first(available_mask);
- cpumask_set_cpu(target_cpu, alloced_mask);
+ if (channel->offermsg.offer.sub_channel_index >= ncpu ||
+ i > ncpu || !hv_cpuself_used(target_cpu, channel))
+ break;
+ }
channel->target_cpu = target_cpu;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 92cb3f7d21d9..57bbbaa4e8f7 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -25,6 +25,7 @@
#include <linux/delay.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/ptrace.h>
#include <linux/screen_info.h>
#include <linux/kdebug.h>
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 967532afb1c0..0d68a78be980 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -1151,6 +1151,8 @@ int occ_setup(struct occ *occ, const char *name)
{
int rc;
+ /* start with 1 to avoid false match with zero-initialized SRAM buffer */
+ occ->seq_no = 1;
mutex_init(&occ->lock);
occ->groups[0] = &occ->group;
@@ -1160,8 +1162,9 @@ int occ_setup(struct occ *occ, const char *name)
dev_info(occ->bus_dev, "host is not ready\n");
return rc;
} else if (rc < 0) {
- dev_err(occ->bus_dev, "failed to get OCC poll response: %d\n",
- rc);
+ dev_err(occ->bus_dev,
+ "failed to get OCC poll response=%02x: %d\n",
+ occ->resp.return_status, rc);
return rc;
}
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 53e13476e831..3874d15b0e9b 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -44,6 +44,15 @@ config HWSPINLOCK_STM32
If unsure, say N.
+config HWSPINLOCK_SUN6I
+ tristate "SUN6I Hardware Spinlock device"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ Say y here to support the SUN6I Hardware Spinlock device which can be
+ found in most of the sun6i compatible Allwinner SoCs.
+
+ If unsure, say N.
+
config HSEM_U8500
tristate "STE Hardware Semaphore functionality"
depends on ARCH_U8500 || COMPILE_TEST
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
index 1f8dd6f5814f..a0f16c9aaa82 100644
--- a/drivers/hwspinlock/Makefile
+++ b/drivers/hwspinlock/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_QCOM) += qcom_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_SPRD) += sprd_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_STM32) += stm32_hwspinlock.o
+obj-$(CONFIG_HWSPINLOCK_SUN6I) += sun6i_hwspinlock.o
obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o
diff --git a/drivers/hwspinlock/sun6i_hwspinlock.c b/drivers/hwspinlock/sun6i_hwspinlock.c
new file mode 100644
index 000000000000..c2d314588046
--- /dev/null
+++ b/drivers/hwspinlock/sun6i_hwspinlock.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * sun6i_hwspinlock.c - hardware spinlock driver for sun6i compatible Allwinner SoCs
+ * Copyright (C) 2020 Wilken Gottwalt <wilken.gottwalt@posteo.net>
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "hwspinlock_internal.h"
+
+#define DRIVER_NAME "sun6i_hwspinlock"
+
+#define SPINLOCK_BASE_ID 0 /* there is only one hwspinlock device per SoC */
+#define SPINLOCK_SYSSTATUS_REG 0x0000
+#define SPINLOCK_LOCK_REGN 0x0100
+#define SPINLOCK_NOTTAKEN 0
+
+struct sun6i_hwspinlock_data {
+ struct hwspinlock_device *bank;
+ struct reset_control *reset;
+ struct clk *ahb_clk;
+ struct dentry *debugfs;
+ int nlocks;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static int hwlocks_supported_show(struct seq_file *seqf, void *unused)
+{
+ struct sun6i_hwspinlock_data *priv = seqf->private;
+
+ seq_printf(seqf, "%d\n", priv->nlocks);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(hwlocks_supported);
+
+static void sun6i_hwspinlock_debugfs_init(struct sun6i_hwspinlock_data *priv)
+{
+ priv->debugfs = debugfs_create_dir(DRIVER_NAME, NULL);
+ debugfs_create_file("supported", 0444, priv->debugfs, priv, &hwlocks_supported_fops);
+}
+
+#else
+
+static void sun6i_hwspinlock_debugfs_init(struct sun6i_hwspinlock_data *priv)
+{
+}
+
+#endif
+
+static int sun6i_hwspinlock_trylock(struct hwspinlock *lock)
+{
+ void __iomem *lock_addr = lock->priv;
+
+ return (readl(lock_addr) == SPINLOCK_NOTTAKEN);
+}
+
+static void sun6i_hwspinlock_unlock(struct hwspinlock *lock)
+{
+ void __iomem *lock_addr = lock->priv;
+
+ writel(SPINLOCK_NOTTAKEN, lock_addr);
+}
+
+static const struct hwspinlock_ops sun6i_hwspinlock_ops = {
+ .trylock = sun6i_hwspinlock_trylock,
+ .unlock = sun6i_hwspinlock_unlock,
+};
+
+static void sun6i_hwspinlock_disable(void *data)
+{
+ struct sun6i_hwspinlock_data *priv = data;
+
+ debugfs_remove_recursive(priv->debugfs);
+ clk_disable_unprepare(priv->ahb_clk);
+ reset_control_assert(priv->reset);
+}
+
+static int sun6i_hwspinlock_probe(struct platform_device *pdev)
+{
+ struct sun6i_hwspinlock_data *priv;
+ struct hwspinlock *hwlock;
+ void __iomem *io_base;
+ u32 num_banks;
+ int err, i;
+
+ io_base = devm_platform_ioremap_resource(pdev, SPINLOCK_BASE_ID);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ahb_clk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(priv->ahb_clk)) {
+ err = PTR_ERR(priv->ahb_clk);
+ dev_err(&pdev->dev, "unable to get AHB clock (%d)\n", err);
+ return err;
+ }
+
+ priv->reset = devm_reset_control_get(&pdev->dev, "ahb");
+ if (IS_ERR(priv->reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->reset),
+ "unable to get reset control\n");
+
+ err = reset_control_deassert(priv->reset);
+ if (err) {
+ dev_err(&pdev->dev, "deassert reset control failure (%d)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(priv->ahb_clk);
+ if (err) {
+ dev_err(&pdev->dev, "unable to prepare AHB clk (%d)\n", err);
+ goto clk_fail;
+ }
+
+ /*
+ * bit 28 and 29 represents the hwspinlock setup
+ *
+ * every datasheet (A64, A80, A83T, H3, H5, H6 ...) says the default value is 0x1 and 0x1
+ * to 0x4 represent 32, 64, 128 and 256 locks
+ * but later datasheets (H5, H6) say 00, 01, 10, 11 represent 32, 64, 128 and 256 locks,
+ * but that would mean H5 and H6 have 64 locks, while their datasheets talk about 32 locks
+ * all the time, not a single mentioning of 64 locks
+ * the 0x4 value is also not representable by 2 bits alone, so some datasheets are not
+ * correct
+ * one thing have all in common, default value of the sysstatus register is 0x10000000,
+ * which results in bit 28 being set
+ * this is the reason 0x1 is considered being 32 locks and bit 30 is taken into account
+ * verified on H2+ (datasheet 0x1 = 32 locks) and H5 (datasheet 01 = 64 locks)
+ */
+ num_banks = readl(io_base + SPINLOCK_SYSSTATUS_REG) >> 28;
+ switch (num_banks) {
+ case 1 ... 4:
+ priv->nlocks = 1 << (4 + num_banks);
+ break;
+ default:
+ err = -EINVAL;
+ dev_err(&pdev->dev, "unsupported hwspinlock setup (%d)\n", num_banks);
+ goto bank_fail;
+ }
+
+ priv->bank = devm_kzalloc(&pdev->dev, struct_size(priv->bank, lock, priv->nlocks),
+ GFP_KERNEL);
+ if (!priv->bank) {
+ err = -ENOMEM;
+ goto bank_fail;
+ }
+
+ for (i = 0; i < priv->nlocks; ++i) {
+ hwlock = &priv->bank->lock[i];
+ hwlock->priv = io_base + SPINLOCK_LOCK_REGN + sizeof(u32) * i;
+ }
+
+ /* failure of debugfs is considered non-fatal */
+ sun6i_hwspinlock_debugfs_init(priv);
+ if (IS_ERR(priv->debugfs))
+ priv->debugfs = NULL;
+
+ err = devm_add_action_or_reset(&pdev->dev, sun6i_hwspinlock_disable, priv);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add hwspinlock disable action\n");
+ goto bank_fail;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return devm_hwspin_lock_register(&pdev->dev, priv->bank, &sun6i_hwspinlock_ops,
+ SPINLOCK_BASE_ID, priv->nlocks);
+
+bank_fail:
+ clk_disable_unprepare(priv->ahb_clk);
+clk_fail:
+ reset_control_assert(priv->reset);
+
+ return err;
+}
+
+static const struct of_device_id sun6i_hwspinlock_ids[] = {
+ { .compatible = "allwinner,sun6i-a31-hwspinlock", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sun6i_hwspinlock_ids);
+
+static struct platform_driver sun6i_hwspinlock_driver = {
+ .probe = sun6i_hwspinlock_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = sun6i_hwspinlock_ids,
+ },
+};
+module_platform_driver(sun6i_hwspinlock_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SUN6I hardware spinlock driver");
+MODULE_AUTHOR("Wilken Gottwalt <wilken.gottwalt@posteo.net>");
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 6c68d34d956e..1002605db8ba 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -608,7 +608,7 @@ static struct coresight_device *
coresight_find_enabled_sink(struct coresight_device *csdev)
{
int i;
- struct coresight_device *sink;
+ struct coresight_device *sink = NULL;
if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
@@ -886,7 +886,6 @@ void coresight_release_path(struct list_head *path)
}
kfree(path);
- path = NULL;
}
/* return true if the device is a suitable type for a default sink */
@@ -1392,7 +1391,7 @@ static int coresight_fixup_device_conns(struct coresight_device *csdev)
}
}
- return 0;
+ return ret;
}
static int coresight_remove_match(struct device *dev, void *data)
@@ -1730,9 +1729,9 @@ char *coresight_alloc_device_name(struct coresight_dev_list *dict,
if (idx < 0) {
/* Make space for the new entry */
idx = dict->nr_idx;
- list = krealloc(dict->fwnode_list,
- (idx + 1) * sizeof(*dict->fwnode_list),
- GFP_KERNEL);
+ list = krealloc_array(dict->fwnode_list,
+ idx + 1, sizeof(*dict->fwnode_list),
+ GFP_KERNEL);
if (ZERO_OR_NULL_PTR(list)) {
idx = -ENOMEM;
goto done;
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 2dcf13de751f..9731d3a96073 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/panic_notifier.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/smp.h>
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index db881993c211..da27cd4a3c38 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -568,11 +568,6 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
struct etmv4_config *config = &drvdata->config;
struct perf_event_attr *attr = &event->attr;
- if (!attr) {
- ret = -EINVAL;
- goto out;
- }
-
/* Clear configuration from previous run */
memset(config, 0, sizeof(struct etmv4_config));
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 45b85edfc690..cd0fb7bfba68 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -530,7 +530,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
buf_ptr = buf->data_pages[cur] + offset;
*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
- if (lost && *barrier) {
+ if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
*buf_ptr = *barrier;
barrier++;
}
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 24d0c974bfd5..66eed2dff818 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -100,16 +100,18 @@ static int intel_th_remove(struct device *dev)
struct intel_th_driver *thdrv = to_intel_th_driver(dev->driver);
struct intel_th_device *thdev = to_intel_th_device(dev);
struct intel_th_device *hub = to_intel_th_hub(thdev);
- int err;
if (thdev->type == INTEL_TH_SWITCH) {
struct intel_th *th = to_intel_th(hub);
int i, lowest;
- /* disconnect outputs */
- err = device_for_each_child(dev, thdev, intel_th_child_remove);
- if (err)
- return err;
+ /*
+ * disconnect outputs
+ *
+ * intel_th_child_remove returns 0 unconditionally, so there is
+ * no need to check the return value of device_for_each_child.
+ */
+ device_for_each_child(dev, thdev, intel_th_child_remove);
/*
* Remove outputs, that is, hub's children: they are created
@@ -215,6 +217,22 @@ static ssize_t port_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(port);
+static void intel_th_trace_prepare(struct intel_th_device *thdev)
+{
+ struct intel_th_device *hub = to_intel_th_hub(thdev);
+ struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
+
+ if (hub->type != INTEL_TH_SWITCH)
+ return;
+
+ if (thdev->type != INTEL_TH_OUTPUT)
+ return;
+
+ pm_runtime_get_sync(&thdev->dev);
+ hubdrv->prepare(hub, &thdev->output);
+ pm_runtime_put(&thdev->dev);
+}
+
static int intel_th_output_activate(struct intel_th_device *thdev)
{
struct intel_th_driver *thdrv =
@@ -235,6 +253,7 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
if (ret)
goto fail_put;
+ intel_th_trace_prepare(thdev);
if (thdrv->activate)
ret = thdrv->activate(thdev);
else
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 28509b02a0b5..b3308934a687 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -564,6 +564,21 @@ static void gth_tscu_resync(struct gth_device *gth)
iowrite32(reg, gth->base + REG_TSCU_TSUCTRL);
}
+static void intel_th_gth_prepare(struct intel_th_device *thdev,
+ struct intel_th_output *output)
+{
+ struct gth_device *gth = dev_get_drvdata(&thdev->dev);
+ int count;
+
+ /*
+ * Wait until the output port is in reset before we start
+ * programming it.
+ */
+ for (count = GTH_PLE_WAITLOOP_DEPTH;
+ count && !(gth_output_get(gth, output->port) & BIT(5)); count--)
+ cpu_relax();
+}
+
/**
* intel_th_gth_enable() - enable tracing to an output device
* @thdev: GTH device
@@ -815,6 +830,7 @@ static struct intel_th_driver intel_th_gth_driver = {
.assign = intel_th_gth_assign,
.unassign = intel_th_gth_unassign,
.set_output = intel_th_gth_set_output,
+ .prepare = intel_th_gth_prepare,
.enable = intel_th_gth_enable,
.trig_switch = intel_th_gth_switch,
.disable = intel_th_gth_disable,
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 89c67e0e1d34..0ffb42990175 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -143,6 +143,7 @@ intel_th_output_assigned(struct intel_th_device *thdev)
* @remove: remove method
* @assign: match a given output type device against available outputs
* @unassign: deassociate an output type device from an output port
+ * @prepare: prepare output port for tracing
* @enable: enable tracing for a given output device
* @disable: disable tracing for a given output device
* @irq: interrupt callback
@@ -164,6 +165,8 @@ struct intel_th_driver {
struct intel_th_device *othdev);
void (*unassign)(struct intel_th_device *thdev,
struct intel_th_device *othdev);
+ void (*prepare)(struct intel_th_device *thdev,
+ struct intel_th_output *output);
void (*enable)(struct intel_th_device *thdev,
struct intel_th_output *output);
void (*trig_switch)(struct intel_th_device *thdev,
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 2edc4666633d..432ade0842f6 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1024,33 +1024,49 @@ err_nomem:
}
#ifdef CONFIG_X86
-static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
+static void msc_buffer_set_uc(struct msc *msc)
{
struct scatterlist *sg_ptr;
+ struct msc_window *win;
int i;
- for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
- /* Set the page as uncached */
- set_memory_uc((unsigned long)sg_virt(sg_ptr),
- PFN_DOWN(sg_ptr->length));
+ if (msc->mode == MSC_MODE_SINGLE) {
+ set_memory_uc((unsigned long)msc->base, msc->nr_pages);
+ return;
+ }
+
+ list_for_each_entry(win, &msc->win_list, entry) {
+ for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
+ /* Set the page as uncached */
+ set_memory_uc((unsigned long)sg_virt(sg_ptr),
+ PFN_DOWN(sg_ptr->length));
+ }
}
}
-static void msc_buffer_set_wb(struct msc_window *win)
+static void msc_buffer_set_wb(struct msc *msc)
{
struct scatterlist *sg_ptr;
+ struct msc_window *win;
int i;
- for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
- /* Reset the page to write-back */
- set_memory_wb((unsigned long)sg_virt(sg_ptr),
- PFN_DOWN(sg_ptr->length));
+ if (msc->mode == MSC_MODE_SINGLE) {
+ set_memory_wb((unsigned long)msc->base, msc->nr_pages);
+ return;
+ }
+
+ list_for_each_entry(win, &msc->win_list, entry) {
+ for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
+ /* Reset the page to write-back */
+ set_memory_wb((unsigned long)sg_virt(sg_ptr),
+ PFN_DOWN(sg_ptr->length));
+ }
}
}
#else /* !X86 */
static inline void
-msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {}
-static inline void msc_buffer_set_wb(struct msc_window *win) {}
+msc_buffer_set_uc(struct msc *msc) {}
+static inline void msc_buffer_set_wb(struct msc *msc) {}
#endif /* CONFIG_X86 */
/**
@@ -1097,8 +1113,6 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
if (ret <= 0)
goto err_nomem;
- msc_buffer_set_uc(win, ret);
-
win->nr_segs = ret;
win->nr_blocks = nr_blocks;
@@ -1152,8 +1166,6 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
msc->base_addr = 0;
}
- msc_buffer_set_wb(win);
-
if (msc->mbuf && msc->mbuf->free_window)
msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
else
@@ -1260,6 +1272,8 @@ static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
*/
static void msc_buffer_free(struct msc *msc)
{
+ msc_buffer_set_wb(msc);
+
if (msc->mode == MSC_MODE_SINGLE)
msc_buffer_contig_free(msc);
else if (msc->mode == MSC_MODE_MULTI)
@@ -1303,6 +1317,8 @@ static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
}
if (!ret) {
+ msc_buffer_set_uc(msc);
+
/* allocation should be visible before the counter goes to 0 */
smp_mb__before_atomic();
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index fb93152845f4..ee83c4581bce 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -508,6 +508,11 @@ static void ali1535_remove(struct pci_dev *dev)
{
i2c_del_adapter(&ali1535_adapter);
release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
+
+ /*
+ * do not call pci_disable_device(dev) since it can cause hard hangs on
+ * some systems during power-off
+ */
}
static struct pci_driver ali1535_driver = {
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 724bf30600d6..67e8b97c0c95 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -727,10 +727,14 @@ static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
{
u32 addr_reg_val, func_ctrl_reg_val;
- /* Set slave addr. */
- addr_reg_val = readl(bus->base + ASPEED_I2C_DEV_ADDR_REG);
- addr_reg_val &= ~ASPEED_I2CD_DEV_ADDR_MASK;
- addr_reg_val |= slave_addr & ASPEED_I2CD_DEV_ADDR_MASK;
+ /*
+ * Set slave addr. Reserved bits can all safely be written with zeros
+ * on all of ast2[456]00, so zero everything else to ensure we only
+ * enable a single slave address (ast2500 has two, ast2600 has three,
+ * the enable bits for which are also in this register) so that we don't
+ * end up with additional phantom devices responding on the bus.
+ */
+ addr_reg_val = slave_addr & ASPEED_I2CD_DEV_ADDR_MASK;
writel(addr_reg_val, bus->base + ASPEED_I2C_DEV_ADDR_REG);
/* Turn on slave mode. */
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index cceaf69279a9..6304d1dd2dd6 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -1224,14 +1224,14 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
disable_irq(iproc_i2c->irq);
+ tasklet_kill(&iproc_i2c->slave_rx_tasklet);
+
/* disable all slave interrupts */
tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
IE_S_ALL_INTERRUPT_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
- tasklet_kill(&iproc_i2c->slave_rx_tasklet);
-
/* Erase the slave address programmed */
tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 66aafa7d1123..20aa3398e642 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -578,6 +578,11 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
{
unsigned int ctrl_reg;
unsigned int isr_status;
+ unsigned long flags;
+ bool hold_clear = false;
+ bool irq_save = false;
+
+ u32 addr;
id->p_recv_buf = id->p_msg->buf;
id->recv_count = id->p_msg->len;
@@ -618,14 +623,43 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET);
}
- /* Set the slave address in address register - triggers operation */
- cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
- CDNS_I2C_ADDR_OFFSET);
- /* Clear the bus hold flag if bytes to receive is less than FIFO size */
+ /* Determine hold_clear based on number of bytes to receive and hold flag */
if (!id->bus_hold_flag &&
- ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
- (id->recv_count <= CDNS_I2C_FIFO_DEPTH))
- cdns_i2c_clear_bus_hold(id);
+ ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
+ (id->recv_count <= CDNS_I2C_FIFO_DEPTH)) {
+ if (cdns_i2c_readreg(CDNS_I2C_CR_OFFSET) & CDNS_I2C_CR_HOLD) {
+ hold_clear = true;
+ if (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT)
+ irq_save = true;
+ }
+ }
+
+ addr = id->p_msg->addr;
+ addr &= CDNS_I2C_ADDR_MASK;
+
+ if (hold_clear) {
+ ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET) & ~CDNS_I2C_CR_HOLD;
+ /*
+ * In case of Xilinx Zynq SOC, clear the HOLD bit before transfer size
+ * register reaches '0'. This is an IP bug which causes transfer size
+ * register overflow to 0xFF. To satisfy this timing requirement,
+ * disable the interrupts on current processor core between register
+ * writes to slave address register and control register.
+ */
+ if (irq_save)
+ local_irq_save(flags);
+
+ cdns_i2c_writereg(addr, CDNS_I2C_ADDR_OFFSET);
+ cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
+ /* Read it back to avoid bufferring and make sure write happens */
+ cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+
+ if (irq_save)
+ local_irq_restore(flags);
+ } else {
+ cdns_i2c_writereg(addr, CDNS_I2C_ADDR_OFFSET);
+ }
+
cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
}
@@ -1217,11 +1251,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
"Cadence I2C at %08lx", (unsigned long)r_mem->start);
id->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(id->clk)) {
- if (PTR_ERR(id->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "input clock not found.\n");
- return PTR_ERR(id->clk);
- }
+ if (IS_ERR(id->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(id->clk),
+ "input clock not found.\n");
+
ret = clk_prepare_enable(id->clk);
if (ret)
dev_err(&pdev->dev, "Unable to enable clock.\n");
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 08f491ea21ac..1cf68f85b2e1 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -354,8 +354,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
return ret;
/* Alloc and register client IRQ */
- adap->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 1,
- &irq_domain_simple_ops, NULL);
+ adap->irq_domain = irq_domain_add_linear(NULL, 1, &irq_domain_simple_ops, NULL);
if (!adap->irq_domain)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 232a7679b69b..e9d07323c604 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -768,10 +768,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
if (irq <= 0) {
if (!irq)
irq = -ENXIO;
- if (irq != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "can't get irq resource ret=%d\n", irq);
- return irq;
+ return dev_err_probe(&pdev->dev, irq, "can't get irq resource\n");
}
dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_i2c_dev),
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 04a1e38f2a6f..aa3f60e69230 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -88,6 +88,8 @@
* See the file Documentation/i2c/busses/i2c-i801.rst for details.
*/
+#define DRV_NAME "i801_smbus"
+
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -103,7 +105,7 @@
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/wait.h>
+#include <linux/completion.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/platform_data/itco_wdt.h>
@@ -131,8 +133,6 @@
/* PCI Address Constants */
#define SMBBAR 4
-#define SMBPCICTL 0x004
-#define SMBPCISTS 0x006
#define SMBHSTCFG 0x040
#define TCOBASE 0x050
#define TCOCTL 0x054
@@ -141,12 +141,6 @@
#define SBREG_SMBCTRL 0xc6000c
#define SBREG_SMBCTRL_DNV 0xcf000c
-/* Host status bits for SMBPCISTS */
-#define SMBPCISTS_INTS BIT(3)
-
-/* Control bits for SMBPCICTL */
-#define SMBPCICTL_INTDIS BIT(10)
-
/* Host configuration bits for SMBHSTCFG */
#define SMBHSTCFG_HST_EN BIT(0)
#define SMBHSTCFG_SMB_SMI_EN BIT(1)
@@ -164,9 +158,6 @@
#define SMBAUXCTL_CRC BIT(0)
#define SMBAUXCTL_E32B BIT(1)
-/* Other settings */
-#define MAX_RETRIES 400
-
/* I801 command constants */
#define I801_QUICK 0x00
#define I801_BYTE 0x04
@@ -270,7 +261,7 @@ struct i801_priv {
unsigned int features;
/* isr processing */
- wait_queue_head_t waitq;
+ struct completion done;
u8 status;
/* Command state used by isr for byte-by-byte block transactions */
@@ -453,67 +444,53 @@ static int i801_check_post(struct i801_priv *priv, int status)
/* Wait for BUSY being cleared and either INTR or an error flag being set */
static int i801_wait_intr(struct i801_priv *priv)
{
- int timeout = 0;
- int status;
+ unsigned long timeout = jiffies + priv->adapter.timeout;
+ int status, busy;
- /* We will always wait for a fraction of a second! */
do {
usleep_range(250, 500);
status = inb_p(SMBHSTSTS(priv));
- } while (((status & SMBHSTSTS_HOST_BUSY) ||
- !(status & (STATUS_ERROR_FLAGS | SMBHSTSTS_INTR))) &&
- (timeout++ < MAX_RETRIES));
+ busy = status & SMBHSTSTS_HOST_BUSY;
+ status &= STATUS_ERROR_FLAGS | SMBHSTSTS_INTR;
+ if (!busy && status)
+ return status;
+ } while (time_is_after_eq_jiffies(timeout));
- if (timeout > MAX_RETRIES) {
- dev_dbg(&priv->pci_dev->dev, "INTR Timeout!\n");
- return -ETIMEDOUT;
- }
- return status & (STATUS_ERROR_FLAGS | SMBHSTSTS_INTR);
+ return -ETIMEDOUT;
}
/* Wait for either BYTE_DONE or an error flag being set */
static int i801_wait_byte_done(struct i801_priv *priv)
{
- int timeout = 0;
+ unsigned long timeout = jiffies + priv->adapter.timeout;
int status;
- /* We will always wait for a fraction of a second! */
do {
usleep_range(250, 500);
status = inb_p(SMBHSTSTS(priv));
- } while (!(status & (STATUS_ERROR_FLAGS | SMBHSTSTS_BYTE_DONE)) &&
- (timeout++ < MAX_RETRIES));
+ if (status & (STATUS_ERROR_FLAGS | SMBHSTSTS_BYTE_DONE))
+ return status & STATUS_ERROR_FLAGS;
+ } while (time_is_after_eq_jiffies(timeout));
- if (timeout > MAX_RETRIES) {
- dev_dbg(&priv->pci_dev->dev, "BYTE_DONE Timeout!\n");
- return -ETIMEDOUT;
- }
- return status & STATUS_ERROR_FLAGS;
+ return -ETIMEDOUT;
}
static int i801_transaction(struct i801_priv *priv, int xact)
{
int status;
- int result;
+ unsigned long result;
const struct i2c_adapter *adap = &priv->adapter;
- result = i801_check_pre(priv);
- if (result < 0)
- return result;
+ status = i801_check_pre(priv);
+ if (status < 0)
+ return status;
if (priv->features & FEATURE_IRQ) {
+ reinit_completion(&priv->done);
outb_p(xact | SMBHSTCNT_INTREN | SMBHSTCNT_START,
SMBHSTCNT(priv));
- result = wait_event_timeout(priv->waitq,
- (status = priv->status),
- adap->timeout);
- if (!result) {
- status = -ETIMEDOUT;
- dev_warn(&priv->pci_dev->dev,
- "Timeout waiting for interrupt!\n");
- }
- priv->status = 0;
- return i801_check_post(priv, status);
+ result = wait_for_completion_timeout(&priv->done, adap->timeout);
+ return i801_check_post(priv, result ? priv->status : -ETIMEDOUT);
}
/* the current contents of SMBHSTCNT can be overwritten, since PEC,
@@ -638,7 +615,7 @@ static irqreturn_t i801_host_notify_isr(struct i801_priv *priv)
* DEV_ERR - Invalid command, NAK or communication timeout
* BUS_ERR - SMI# transaction collision
* FAILED - transaction was canceled due to a KILL request
- * When any of these occur, update ->status and wake up the waitq.
+ * When any of these occur, update ->status and signal completion.
* ->status must be cleared before kicking off the next transaction.
*
* 2) For byte-by-byte (I2C read/write) transactions, one BYTE_DONE interrupt
@@ -653,8 +630,8 @@ static irqreturn_t i801_isr(int irq, void *dev_id)
u8 status;
/* Confirm this is our interrupt */
- pci_read_config_word(priv->pci_dev, SMBPCISTS, &pcists);
- if (!(pcists & SMBPCISTS_INTS))
+ pci_read_config_word(priv->pci_dev, PCI_STATUS, &pcists);
+ if (!(pcists & PCI_STATUS_INTERRUPT))
return IRQ_NONE;
if (priv->features & FEATURE_HOST_NOTIFY) {
@@ -675,7 +652,7 @@ static irqreturn_t i801_isr(int irq, void *dev_id)
if (status) {
outb_p(status, SMBHSTSTS(priv));
priv->status = status;
- wake_up(&priv->waitq);
+ complete(&priv->done);
}
return IRQ_HANDLED;
@@ -694,15 +671,15 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
int i, len;
int smbcmd;
int status;
- int result;
+ unsigned long result;
const struct i2c_adapter *adap = &priv->adapter;
if (command == I2C_SMBUS_BLOCK_PROC_CALL)
return -EOPNOTSUPP;
- result = i801_check_pre(priv);
- if (result < 0)
- return result;
+ status = i801_check_pre(priv);
+ if (status < 0)
+ return status;
len = data->block[0];
@@ -726,17 +703,10 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
priv->count = 0;
priv->data = &data->block[1];
+ reinit_completion(&priv->done);
outb_p(priv->cmd | SMBHSTCNT_START, SMBHSTCNT(priv));
- result = wait_event_timeout(priv->waitq,
- (status = priv->status),
- adap->timeout);
- if (!result) {
- status = -ETIMEDOUT;
- dev_warn(&priv->pci_dev->dev,
- "Timeout waiting for interrupt!\n");
- }
- priv->status = 0;
- return i801_check_post(priv, status);
+ result = wait_for_completion_timeout(&priv->done, adap->timeout);
+ return i801_check_post(priv, result ? priv->status : -ETIMEDOUT);
}
for (i = 1; i <= len; i++) {
@@ -1322,11 +1292,11 @@ static void i801_probe_optional_slaves(struct i801_priv *priv)
return;
if (apanel_addr) {
- struct i2c_board_info info;
+ struct i2c_board_info info = {
+ .addr = apanel_addr,
+ .type = "fujitsu_apanel",
+ };
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = apanel_addr;
- strlcpy(info.type, "fujitsu_apanel", I2C_NAME_SIZE);
i2c_new_client_device(&priv->adapter, &info);
}
@@ -1715,19 +1685,17 @@ static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
static inline void i801_acpi_remove(struct i801_priv *priv) { }
#endif
-static unsigned char i801_setup_hstcfg(struct i801_priv *priv)
+static void i801_setup_hstcfg(struct i801_priv *priv)
{
unsigned char hstcfg = priv->original_hstcfg;
hstcfg &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */
hstcfg |= SMBHSTCFG_HST_EN;
pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg);
- return hstcfg;
}
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- unsigned char temp;
int err, i;
struct i801_priv *priv;
@@ -1838,8 +1806,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (i801_acpi_probe(priv))
return -ENODEV;
- err = pcim_iomap_regions(dev, 1 << SMBBAR,
- dev_driver_string(&dev->dev));
+ err = pcim_iomap_regions(dev, 1 << SMBBAR, DRV_NAME);
if (err) {
dev_err(&dev->dev,
"Failed to request SMBus region 0x%lx-0x%Lx\n",
@@ -1850,16 +1817,16 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &priv->original_hstcfg);
- temp = i801_setup_hstcfg(priv);
+ i801_setup_hstcfg(priv);
if (!(priv->original_hstcfg & SMBHSTCFG_HST_EN))
dev_info(&dev->dev, "Enabling SMBus device\n");
- if (temp & SMBHSTCFG_SMB_SMI_EN) {
+ if (priv->original_hstcfg & SMBHSTCFG_SMB_SMI_EN) {
dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n");
/* Disable SMBus interrupt feature if SMBus using SMI# */
priv->features &= ~FEATURE_IRQ;
}
- if (temp & SMBHSTCFG_SPD_WD)
+ if (priv->original_hstcfg & SMBHSTCFG_SPD_WD)
dev_info(&dev->dev, "SPD Write Disable is set\n");
/* Clear special mode bits */
@@ -1881,24 +1848,23 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
u16 pcictl, pcists;
/* Complain if an interrupt is already pending */
- pci_read_config_word(priv->pci_dev, SMBPCISTS, &pcists);
- if (pcists & SMBPCISTS_INTS)
+ pci_read_config_word(priv->pci_dev, PCI_STATUS, &pcists);
+ if (pcists & PCI_STATUS_INTERRUPT)
dev_warn(&dev->dev, "An interrupt is pending!\n");
/* Check if interrupts have been disabled */
- pci_read_config_word(priv->pci_dev, SMBPCICTL, &pcictl);
- if (pcictl & SMBPCICTL_INTDIS) {
+ pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pcictl);
+ if (pcictl & PCI_COMMAND_INTX_DISABLE) {
dev_info(&dev->dev, "Interrupts are disabled\n");
priv->features &= ~FEATURE_IRQ;
}
}
if (priv->features & FEATURE_IRQ) {
- init_waitqueue_head(&priv->waitq);
+ init_completion(&priv->done);
err = devm_request_irq(&dev->dev, dev->irq, i801_isr,
- IRQF_SHARED,
- dev_driver_string(&dev->dev), priv);
+ IRQF_SHARED, DRV_NAME, priv);
if (err) {
dev_err(&dev->dev, "Failed to allocate irq %d: %d\n",
dev->irq, err);
@@ -1988,7 +1954,7 @@ static int i801_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(i801_pm_ops, i801_suspend, i801_resume);
static struct pci_driver i801_driver = {
- .name = "i801_smbus",
+ .name = DRV_NAME,
.id_table = i801_ids,
.probe = i801_probe,
.remove = i801_remove,
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index dc5ca71906db..d5b5f084a27d 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -170,11 +170,11 @@ enum imx_i2c_type {
struct imx_i2c_hwdata {
enum imx_i2c_type devtype;
- unsigned regshift;
+ unsigned int regshift;
struct imx_i2c_clk_pair *clk_div;
- unsigned ndivs;
- unsigned i2sr_clr_opcode;
- unsigned i2cr_ien_opcode;
+ unsigned int ndivs;
+ unsigned int i2sr_clr_opcode;
+ unsigned int i2cr_ien_opcode;
};
struct imx_i2c_dma {
@@ -452,8 +452,6 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool a
unsigned long orig_jiffies = jiffies;
unsigned int temp;
- dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
-
while (1) {
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
@@ -599,8 +597,6 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx, bool atomic)
unsigned int temp = 0;
int result;
- dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
-
imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR);
/* Enable I2C controller */
imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
@@ -635,7 +631,6 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx, bool atomic)
if (!i2c_imx->stopped) {
/* Stop I2C transaction */
- dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
if (!(temp & I2CR_MSTA))
i2c_imx->stopped = 1;
@@ -1167,8 +1162,6 @@ static int i2c_imx_xfer_common(struct i2c_adapter *adapter,
bool is_lastmsg = false;
struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
- dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
-
/* Start I2C transfer */
result = i2c_imx_start(i2c_imx, atomic);
if (result) {
@@ -1371,8 +1364,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
dma_addr_t phy_addr;
const struct imx_i2c_hwdata *match;
- dev_dbg(&pdev->dev, "<%s>\n", __func__);
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -1395,7 +1386,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
platform_get_device_id(pdev)->driver_data;
/* Setup i2c_imx driver structure */
- strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
+ strscpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
i2c_imx->adapter.owner = THIS_MODULE;
i2c_imx->adapter.algo = &i2c_imx_algo;
i2c_imx->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index dcca9c2396db..a6ea1eb1394e 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -635,6 +635,8 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
status = readb(i2c->base + MPC_I2C_SR);
if (status & CSR_MIF) {
+ /* Wait up to 100us for transfer to properly complete */
+ readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100);
writeb(0, i2c->base + MPC_I2C_SR);
mpc_i2c_do_intr(i2c, status);
return IRQ_HANDLED;
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 4e9fb6b44436..4ca716e09149 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1225,6 +1225,13 @@ static int mtk_i2c_probe(struct platform_device *pdev)
i2c->adap.quirks = i2c->dev_comp->quirks;
i2c->adap.timeout = 2 * HZ;
i2c->adap.retries = 1;
+ i2c->adap.bus_regulator = devm_regulator_get_optional(&pdev->dev, "vbus");
+ if (IS_ERR(i2c->adap.bus_regulator)) {
+ if (PTR_ERR(i2c->adap.bus_regulator) == -ENODEV)
+ i2c->adap.bus_regulator = NULL;
+ else
+ return PTR_ERR(i2c->adap.bus_regulator);
+ }
ret = mtk_i2c_parse_dt(pdev->dev.of_node, i2c);
if (ret)
@@ -1286,7 +1293,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq,
IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
- I2C_DRV_NAME, i2c);
+ dev_name(&pdev->dev), i2c);
if (ret < 0) {
dev_err(&pdev->dev,
"Request I2C IRQ %d fail\n", irq);
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index c63d5545fc2a..c1de8eb66169 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -769,6 +769,7 @@ static const struct of_device_id cci_dt_match[] = {
{ .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
{ .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
{ .compatible = "qcom,sdm845-cci", .data = &cci_v2_data},
+ { .compatible = "qcom,sm8250-cci", .data = &cci_v2_data},
{}
};
MODULE_DEVICE_TABLE(of, cci_dt_match);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 327c092a4130..bff9913c37b8 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -1013,7 +1013,6 @@ static const struct of_device_id rcar_i2c_dt_ids[] = {
{ .compatible = "renesas,i2c-r8a7794", .data = (void *)I2C_RCAR_GEN2 },
{ .compatible = "renesas,i2c-r8a7795", .data = (void *)I2C_RCAR_GEN3 },
{ .compatible = "renesas,i2c-r8a7796", .data = (void *)I2C_RCAR_GEN3 },
- { .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_GEN1 }, /* Deprecated */
{ .compatible = "renesas,rcar-gen1-i2c", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,rcar-gen2-i2c", .data = (void *)I2C_RCAR_GEN2 },
{ .compatible = "renesas,rcar-gen3-i2c", .data = (void *)I2C_RCAR_GEN3 },
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 4eccc0f69861..78b84445ee6a 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -42,8 +42,10 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#define RIIC_ICCR1 0x00
#define RIIC_ICCR2 0x04
@@ -86,6 +88,11 @@
#define RIIC_INIT_MSG -1
+enum riic_type {
+ RIIC_RZ_A,
+ RIIC_RZ_G2L,
+};
+
struct riic_dev {
void __iomem *base;
u8 *buf;
@@ -395,7 +402,9 @@ static int riic_i2c_probe(struct platform_device *pdev)
struct i2c_adapter *adap;
struct resource *res;
struct i2c_timings i2c_t;
+ struct reset_control *rstc;
int i, ret;
+ enum riic_type type;
riic = devm_kzalloc(&pdev->dev, sizeof(*riic), GFP_KERNEL);
if (!riic)
@@ -412,6 +421,17 @@ static int riic_i2c_probe(struct platform_device *pdev)
return PTR_ERR(riic->clk);
}
+ type = (enum riic_type)of_device_get_match_data(&pdev->dev);
+ if (type == RIIC_RZ_G2L) {
+ rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(rstc)) {
+ dev_err(&pdev->dev, "Error: missing reset ctrl\n");
+ return PTR_ERR(rstc);
+ }
+
+ reset_control_deassert(rstc);
+ }
+
for (i = 0; i < ARRAY_SIZE(riic_irqs); i++) {
res = platform_get_resource(pdev, IORESOURCE_IRQ, riic_irqs[i].res_num);
if (!res)
@@ -472,7 +492,8 @@ static int riic_i2c_remove(struct platform_device *pdev)
}
static const struct of_device_id riic_i2c_dt_ids[] = {
- { .compatible = "renesas,riic-rz" },
+ { .compatible = "renesas,riic-r9a07g044", .data = (void *)RIIC_RZ_G2L },
+ { .compatible = "renesas,riic-rz", .data = (void *)RIIC_RZ_A },
{ /* Sentinel */ },
};
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 0138317ea600..b9b19a2a2ffa 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -51,6 +51,7 @@
/* STM32F7 I2C control 1 */
#define STM32F7_I2C_CR1_PECEN BIT(23)
+#define STM32F7_I2C_CR1_ALERTEN BIT(22)
#define STM32F7_I2C_CR1_SMBHEN BIT(20)
#define STM32F7_I2C_CR1_WUPEN BIT(18)
#define STM32F7_I2C_CR1_SBC BIT(16)
@@ -125,6 +126,7 @@
(((n) & STM32F7_I2C_ISR_ADDCODE_MASK) >> 17)
#define STM32F7_I2C_ISR_DIR BIT(16)
#define STM32F7_I2C_ISR_BUSY BIT(15)
+#define STM32F7_I2C_ISR_ALERT BIT(13)
#define STM32F7_I2C_ISR_PECERR BIT(11)
#define STM32F7_I2C_ISR_ARLO BIT(9)
#define STM32F7_I2C_ISR_BERR BIT(8)
@@ -138,6 +140,7 @@
#define STM32F7_I2C_ISR_TXE BIT(0)
/* STM32F7 I2C Interrupt Clear */
+#define STM32F7_I2C_ICR_ALERTCF BIT(13)
#define STM32F7_I2C_ICR_PECCF BIT(11)
#define STM32F7_I2C_ICR_ARLOCF BIT(9)
#define STM32F7_I2C_ICR_BERRCF BIT(8)
@@ -279,6 +282,17 @@ struct stm32f7_i2c_msg {
};
/**
+ * struct stm32f7_i2c_alert - SMBus alert specific data
+ * @setup: platform data for the smbus_alert i2c client
+ * @ara: I2C slave device used to respond to the SMBus Alert with Alert
+ * Response Address
+ */
+struct stm32f7_i2c_alert {
+ struct i2c_smbus_alert_setup setup;
+ struct i2c_client *ara;
+};
+
+/**
* struct stm32f7_i2c_dev - private data of the controller
* @adap: I2C adapter for this controller
* @dev: device for this controller
@@ -310,6 +324,7 @@ struct stm32f7_i2c_msg {
* @analog_filter: boolean to indicate enabling of the analog filter
* @dnf_dt: value of digital filter requested via dt
* @dnf: value of digital filter to apply
+ * @alert: SMBus alert specific data
*/
struct stm32f7_i2c_dev {
struct i2c_adapter adap;
@@ -341,6 +356,7 @@ struct stm32f7_i2c_dev {
bool analog_filter;
u32 dnf_dt;
u32 dnf;
+ struct stm32f7_i2c_alert *alert;
};
/*
@@ -1624,6 +1640,13 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
f7_msg->result = -EINVAL;
}
+ if (status & STM32F7_I2C_ISR_ALERT) {
+ dev_dbg(dev, "<%s>: SMBus alert received\n", __func__);
+ writel_relaxed(STM32F7_I2C_ICR_ALERTCF, base + STM32F7_I2C_ICR);
+ i2c_handle_smbus_alert(i2c_dev->alert->ara);
+ return IRQ_HANDLED;
+ }
+
if (!i2c_dev->slave_running) {
u32 mask;
/* Disable interrupts */
@@ -1990,6 +2013,42 @@ static void stm32f7_i2c_disable_smbus_host(struct stm32f7_i2c_dev *i2c_dev)
}
}
+static int stm32f7_i2c_enable_smbus_alert(struct stm32f7_i2c_dev *i2c_dev)
+{
+ struct stm32f7_i2c_alert *alert;
+ struct i2c_adapter *adap = &i2c_dev->adap;
+ struct device *dev = i2c_dev->dev;
+ void __iomem *base = i2c_dev->base;
+
+ alert = devm_kzalloc(dev, sizeof(*alert), GFP_KERNEL);
+ if (!alert)
+ return -ENOMEM;
+
+ alert->ara = i2c_new_smbus_alert_device(adap, &alert->setup);
+ if (IS_ERR(alert->ara))
+ return PTR_ERR(alert->ara);
+
+ i2c_dev->alert = alert;
+
+ /* Enable SMBus Alert */
+ stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_ALERTEN);
+
+ return 0;
+}
+
+static void stm32f7_i2c_disable_smbus_alert(struct stm32f7_i2c_dev *i2c_dev)
+{
+ struct stm32f7_i2c_alert *alert = i2c_dev->alert;
+ void __iomem *base = i2c_dev->base;
+
+ if (alert) {
+ /* Disable SMBus Alert */
+ stm32f7_i2c_clr_bits(base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_ALERTEN);
+ i2c_unregister_device(alert->ara);
+ }
+}
+
static u32 stm32f7_i2c_func(struct i2c_adapter *adap)
{
struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
@@ -2173,6 +2232,16 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
}
}
+ if (of_property_read_bool(pdev->dev.of_node, "smbus-alert")) {
+ ret = stm32f7_i2c_enable_smbus_alert(i2c_dev);
+ if (ret) {
+ dev_err(i2c_dev->dev,
+ "failed to enable SMBus alert protocol (%d)\n",
+ ret);
+ goto i2c_disable_smbus_host;
+ }
+ }
+
dev_info(i2c_dev->dev, "STM32F7 I2C-%d bus adapter\n", adap->nr);
pm_runtime_mark_last_busy(i2c_dev->dev);
@@ -2180,6 +2249,9 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
return 0;
+i2c_disable_smbus_host:
+ stm32f7_i2c_disable_smbus_host(i2c_dev);
+
i2c_adapter_remove:
i2c_del_adapter(adap);
@@ -2214,6 +2286,7 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
{
struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ stm32f7_i2c_disable_smbus_alert(i2c_dev);
stm32f7_i2c_disable_smbus_host(i2c_dev);
i2c_del_adapter(&i2c_dev->adap);
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 2a8568b97c14..bb93db98404e 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -798,11 +798,10 @@ static int xiic_i2c_probe(struct platform_device *pdev)
init_waitqueue_head(&i2c->wait);
i2c->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(i2c->clk)) {
- if (PTR_ERR(i2c->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "input clock not found.\n");
- return PTR_ERR(i2c->clk);
- }
+ if (IS_ERR(i2c->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk),
+ "input clock not found.\n");
+
ret = clk_prepare_enable(i2c->clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable clock.\n");
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 5a97e4a02fa2..84f12bf90644 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -24,6 +24,7 @@
#include <linux/i2c-smbus.h>
#include <linux/idr.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/irqflags.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
@@ -399,7 +400,8 @@ static int i2c_gpio_init_recovery(struct i2c_adapter *adap)
static int i2c_init_recovery(struct i2c_adapter *adap)
{
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
- char *err_str, *err_level = KERN_ERR;
+ bool is_error_level = true;
+ char *err_str;
if (!bri)
return 0;
@@ -409,7 +411,7 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
if (!bri->recover_bus) {
err_str = "no suitable method provided";
- err_level = KERN_DEBUG;
+ is_error_level = false;
goto err;
}
@@ -436,7 +438,10 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
return 0;
err:
- dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str);
+ if (is_error_level)
+ dev_err(&adap->dev, "Not using recovery: %s\n", err_str);
+ else
+ dev_dbg(&adap->dev, "Not using recovery: %s\n", err_str);
adap->bus_recovery_info = NULL;
return -EINVAL;
@@ -461,12 +466,14 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
static int i2c_device_probe(struct device *dev)
{
struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_adapter *adap;
struct i2c_driver *driver;
int status;
if (!client)
return 0;
+ adap = client->adapter;
client->irq = client->init_irq;
if (!client->irq) {
@@ -532,6 +539,14 @@ static int i2c_device_probe(struct device *dev)
dev_dbg(dev, "probe\n");
+ if (adap->bus_regulator) {
+ status = regulator_enable(adap->bus_regulator);
+ if (status < 0) {
+ dev_err(&adap->dev, "Failed to enable bus regulator\n");
+ goto err_clear_wakeup_irq;
+ }
+ }
+
status = of_clk_set_defaults(dev->of_node, false);
if (status < 0)
goto err_clear_wakeup_irq;
@@ -589,8 +604,10 @@ put_sync_adapter:
static int i2c_device_remove(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_adapter *adap;
struct i2c_driver *driver;
+ adap = client->adapter;
driver = to_i2c_driver(dev->driver);
if (driver->remove) {
int status;
@@ -605,6 +622,8 @@ static int i2c_device_remove(struct device *dev)
devres_release_group(&client->dev, client->devres_group_id);
dev_pm_domain_detach(&client->dev, true);
+ if (!pm_runtime_status_suspended(&client->dev) && adap->bus_regulator)
+ regulator_disable(adap->bus_regulator);
dev_pm_clear_wake_irq(&client->dev);
device_init_wakeup(&client->dev, false);
@@ -617,6 +636,86 @@ static int i2c_device_remove(struct device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int i2c_resume_early(struct device *dev)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
+ int err;
+
+ if (!client)
+ return 0;
+
+ if (pm_runtime_status_suspended(&client->dev) &&
+ client->adapter->bus_regulator) {
+ err = regulator_enable(client->adapter->bus_regulator);
+ if (err)
+ return err;
+ }
+
+ return pm_generic_resume_early(&client->dev);
+}
+
+static int i2c_suspend_late(struct device *dev)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
+ int err;
+
+ if (!client)
+ return 0;
+
+ err = pm_generic_suspend_late(&client->dev);
+ if (err)
+ return err;
+
+ if (!pm_runtime_status_suspended(&client->dev) &&
+ client->adapter->bus_regulator)
+ return regulator_disable(client->adapter->bus_regulator);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int i2c_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
+ int err;
+
+ if (!client)
+ return 0;
+
+ if (client->adapter->bus_regulator) {
+ err = regulator_enable(client->adapter->bus_regulator);
+ if (err)
+ return err;
+ }
+
+ return pm_generic_runtime_resume(&client->dev);
+}
+
+static int i2c_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
+ int err;
+
+ if (!client)
+ return 0;
+
+ err = pm_generic_runtime_suspend(&client->dev);
+ if (err)
+ return err;
+
+ if (client->adapter->bus_regulator)
+ return regulator_disable(client->adapter->bus_regulator);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops i2c_device_pm = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(i2c_suspend_late, i2c_resume_early)
+ SET_RUNTIME_PM_OPS(i2c_runtime_suspend, i2c_runtime_resume, NULL)
+};
+
static void i2c_device_shutdown(struct device *dev)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -627,6 +726,8 @@ static void i2c_device_shutdown(struct device *dev)
driver = to_i2c_driver(dev->driver);
if (driver->shutdown)
driver->shutdown(client);
+ else if (client->irq > 0)
+ disable_irq(client->irq);
}
static void i2c_client_dev_release(struct device *dev)
@@ -674,6 +775,7 @@ struct bus_type i2c_bus_type = {
.probe = i2c_device_probe,
.remove = i2c_device_remove,
.shutdown = i2c_device_shutdown,
+ .pm = &i2c_device_pm,
};
EXPORT_SYMBOL_GPL(i2c_bus_type);
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index d2d32c0fd8c3..e5b2d1465e7e 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -37,8 +37,15 @@ static u8 crc8(u16 data)
return (u8)(data >> 8);
}
-/* Incremental CRC8 over count bytes in the array pointed to by p */
-static u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count)
+/**
+ * i2c_smbus_pec - Incremental CRC8 over the given input data array
+ * @crc: previous return crc8 value
+ * @p: pointer to data buffer.
+ * @count: number of bytes in data buffer.
+ *
+ * Incremental CRC8 over count bytes in the array pointed to by p
+ */
+u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count)
{
int i;
@@ -46,6 +53,7 @@ static u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count)
crc = crc8((crc ^ p[i]) << 8);
return crc;
}
+EXPORT_SYMBOL(i2c_smbus_pec);
/* Assume a 7-bit address, which is reasonable for SMBus */
static u8 i2c_smbus_msg_pec(u8 pec, struct i2c_msg *msg)
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index cb64fe649390..77f576e51652 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -141,7 +141,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
if (count > 8192)
count = 8192;
- tmp = kmalloc(count, GFP_KERNEL);
+ tmp = kzalloc(count, GFP_KERNEL);
if (tmp == NULL)
return -ENOMEM;
@@ -150,7 +150,8 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
ret = i2c_master_recv(client, tmp, count);
if (ret >= 0)
- ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
+ if (copy_to_user(buf, tmp, ret))
+ ret = -EFAULT;
kfree(tmp);
return ret;
}
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 3f2226928fe0..5b37ffe5ad5b 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -1379,6 +1379,8 @@ static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
case IBIR_TYPE_MR:
WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
+ break;
+
default:
break;
}
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 1f6ba4221817..879e5a64acaf 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -195,7 +195,7 @@ struct svc_i3c_master {
};
/**
- * struct svc_i3c_i3c_dev_data - Device specific data
+ * struct svc_i3c_i2c_dev_data - Device specific data
* @index: Index in the master tables corresponding to this device
* @ibi: IBI slot index in the master structure
* @ibi_pool: IBI pool associated to this device
@@ -1448,7 +1448,6 @@ static int svc_i3c_master_remove(struct platform_device *pdev)
if (ret)
return ret;
- free_irq(master->irq, master);
clk_disable_unprepare(master->pclk);
clk_disable_unprepare(master->fclk);
clk_disable_unprepare(master->sclk);
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 8b1723635cce..8d8b1ba42ff8 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -89,13 +89,13 @@ config ADXL372_I2C
module will be called adxl372_i2c.
config BMA180
- tristate "Bosch BMA023/BMA1x0/BMA25x 3-Axis Accelerometer Driver"
+ tristate "Bosch BMA023/BMA1x0/BMA250 3-Axis Accelerometer Driver"
depends on I2C && INPUT_BMA150=n
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say Y here if you want to build a driver for the Bosch BMA023, BMA150
- BMA180, SMB380, or BMA25x triaxial acceleration sensor.
+ BMA180, BMA250 or SMB380 triaxial acceleration sensor.
To compile this driver as a module, choose M here: the
module will be called bma180.
@@ -143,9 +143,12 @@ config BMC150_ACCEL
select BMC150_ACCEL_SPI if SPI
help
Say yes here to build support for the following Bosch accelerometers:
- BMC150, BMI055, BMA250E, BMA222E, BMA255, BMA280.
+ BMA222, BMA222E, BMA250E, BMA253, BMA254, BMA255, BMA280, BMC150, BMI055.
+
+ Note that some of these are combo modules:
+ - BMC150: accelerometer and magnetometer
+ - BMI055: accelerometer and gyroscope
- This is a combo module with both accelerometer and magnetometer.
This driver is only implementing accelerometer part, which has
its own address and register map.
@@ -226,6 +229,35 @@ config DMARD10
Choosing M will build the driver as a module. If so, the module
will be called dmard10.
+config FXLS8962AF
+ tristate
+ depends on I2C || !I2C # cannot be built-in for modular I2C
+
+config FXLS8962AF_I2C
+ tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
+ depends on I2C
+ select FXLS8962AF
+ select REGMAP_I2C
+ help
+ Say yes here to build support for the NXP 3-axis automotive
+ accelerometer FXLS8962AF/FXLS8964AF with I2C support.
+
+ To compile this driver as a module, choose M here: the module
+ will be called fxls8962af_i2c.
+
+config FXLS8962AF_SPI
+ tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer SPI Driver"
+ depends on SPI
+ depends on I2C || !I2C
+ select FXLS8962AF
+ select REGMAP_SPI
+ help
+ Say yes here to build support for the NXP 3-axis automotive
+ accelerometer FXLS8962AF/FXLS8964AF with SPI support.
+
+ To compile this driver as a module, choose M here: the module
+ will be called fxls8962af_spi.
+
config HID_SENSOR_ACCEL_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
@@ -449,6 +481,19 @@ config SCA3000
To compile this driver as a module, say M here: the module will be
called sca3000.
+config SCA3300
+ tristate "Murata SCA3300 3-Axis Accelerometer Driver"
+ depends on SPI
+ select CRC8
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Murata SCA3300 3-Axis
+ accelerometer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called sca3300.
+
config STK8312
tristate "Sensortek STK8312 3-Axis Accelerometer Driver"
depends on I2C
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 32cd1342a31a..89280e823bcd 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -27,6 +27,9 @@ obj-$(CONFIG_DA311) += da311.o
obj-$(CONFIG_DMARD06) += dmard06.o
obj-$(CONFIG_DMARD09) += dmard09.o
obj-$(CONFIG_DMARD10) += dmard10.o
+obj-$(CONFIG_FXLS8962AF) += fxls8962af-core.o
+obj-$(CONFIG_FXLS8962AF_I2C) += fxls8962af-i2c.o
+obj-$(CONFIG_FXLS8962AF_SPI) += fxls8962af-spi.o
obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
obj-$(CONFIG_KXCJK1013) += kxcjk-1013.o
obj-$(CONFIG_KXSD9) += kxsd9.o
@@ -50,6 +53,7 @@ obj-$(CONFIG_MXC4005) += mxc4005.o
obj-$(CONFIG_MXC6255) += mxc6255.o
obj-$(CONFIG_SCA3000) += sca3000.o
+obj-$(CONFIG_SCA3300) += sca3300.o
obj-$(CONFIG_STK8312) += stk8312.o
obj-$(CONFIG_STK8BA50) += stk8ba50.o
diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
index fe225990de24..7a434e2884d4 100644
--- a/drivers/iio/accel/adis16201.c
+++ b/drivers/iio/accel/adis16201.c
@@ -8,10 +8,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
#include <linux/iio/iio.h>
#include <linux/iio/imu/adis.h>
diff --git a/drivers/iio/accel/adis16209.c b/drivers/iio/accel/adis16209.c
index 6c2d4a967de7..ac08e866d612 100644
--- a/drivers/iio/accel/adis16209.c
+++ b/drivers/iio/accel/adis16209.c
@@ -7,11 +7,8 @@
#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/iio/iio.h>
#include <linux/iio/imu/adis.h>
diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c
index 9c9a896a872a..fc9592407717 100644
--- a/drivers/iio/accel/adxl372.c
+++ b/drivers/iio/accel/adxl372.c
@@ -1223,14 +1223,14 @@ int adxl372_probe(struct device *dev, struct regmap *regmap,
st->dready_trig = devm_iio_trigger_alloc(dev,
"%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (st->dready_trig == NULL)
return -ENOMEM;
st->peak_datardy_trig = devm_iio_trigger_alloc(dev,
"%s-dev%d-peak",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!st->peak_datardy_trig)
return -ENOMEM;
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index b8a7469cdae4..2edfcb4819b7 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -10,7 +10,6 @@
* BMA023/BMA150/SMB380: 7-bit I2C slave address 0x38
* BMA180: 7-bit I2C slave address 0x40 or 0x41
* BMA250: 7-bit I2C slave address 0x18 or 0x19
- * BMA254: 7-bit I2C slave address 0x18 or 0x19
*/
#include <linux/module.h>
@@ -38,7 +37,6 @@ enum chip_ids {
BMA150,
BMA180,
BMA250,
- BMA254,
};
struct bma180_data;
@@ -55,11 +53,10 @@ struct bma180_part_info {
u8 int_reset_reg, int_reset_mask;
u8 sleep_reg, sleep_mask;
- u8 bw_reg, bw_mask;
+ u8 bw_reg, bw_mask, bw_offset;
u8 scale_reg, scale_mask;
u8 power_reg, power_mask, lowpower_val;
u8 int_enable_reg, int_enable_mask;
- u8 int_map_reg, int_enable_dataready_int1_mask;
u8 softreset_reg, softreset_val;
int (*chip_config)(struct bma180_data *data);
@@ -112,7 +109,6 @@ struct bma180_part_info {
#define BMA023_ID_REG_VAL 0x02
#define BMA180_ID_REG_VAL 0x03
#define BMA250_ID_REG_VAL 0x03
-#define BMA254_ID_REG_VAL 0xfa /* 250 decimal */
/* Chip power modes */
#define BMA180_LOW_POWER 0x03
@@ -127,29 +123,13 @@ struct bma180_part_info {
#define BMA250_RANGE_MASK GENMASK(3, 0) /* Range of accel values */
#define BMA250_BW_MASK GENMASK(4, 0) /* Accel bandwidth */
+#define BMA250_BW_OFFSET 8
#define BMA250_SUSPEND_MASK BIT(7) /* chip will sleep */
#define BMA250_LOWPOWER_MASK BIT(6)
#define BMA250_DATA_INTEN_MASK BIT(4)
#define BMA250_INT1_DATA_MASK BIT(0)
#define BMA250_INT_RESET_MASK BIT(7) /* Reset pending interrupts */
-#define BMA254_RANGE_REG 0x0f
-#define BMA254_BW_REG 0x10
-#define BMA254_POWER_REG 0x11
-#define BMA254_RESET_REG 0x14
-#define BMA254_INT_ENABLE_REG 0x17
-#define BMA254_INT_MAP_REG 0x1a
-#define BMA254_INT_RESET_REG 0x21
-
-#define BMA254_RANGE_MASK GENMASK(3, 0) /* Range of accel values */
-#define BMA254_BW_MASK GENMASK(4, 0) /* Accel bandwidth */
-#define BMA254_SUSPEND_MASK BIT(7) /* chip will sleep */
-#define BMA254_LOWPOWER_MASK BIT(6)
-#define BMA254_DATA_INTEN_MASK BIT(4)
-#define BMA254_INT2_DATA_MASK BIT(7)
-#define BMA254_INT1_DATA_MASK BIT(0)
-#define BMA254_INT_RESET_MASK BIT(7) /* Reset pending interrupts */
-
struct bma180_data {
struct regulator *vdd_supply;
struct regulator *vddio_supply;
@@ -162,7 +142,11 @@ struct bma180_data {
int scale;
int bw;
bool pmode;
- u8 buff[16]; /* 3x 16-bit + 8-bit + padding + timestamp */
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ s16 chan[4];
+ s64 timestamp __aligned(8);
+ } scan;
};
enum bma180_chan {
@@ -178,8 +162,8 @@ static int bma023_scale_table[] = { 2452, 4903, 9709, };
static int bma180_bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
static int bma180_scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
-static int bma25x_bw_table[] = { 8, 16, 31, 63, 125, 250 }; /* Hz */
-static int bma25x_scale_table[] = { 0, 0, 0, 38344, 0, 76590, 0, 0, 153180, 0,
+static int bma250_bw_table[] = { 8, 16, 31, 63, 125, 250, 500, 1000 }; /* Hz */
+static int bma250_scale_table[] = { 0, 0, 0, 38344, 0, 76590, 0, 0, 153180, 0,
0, 0, 306458 };
static int bma180_get_data_reg(struct bma180_data *data, enum bma180_chan chan)
@@ -283,7 +267,8 @@ static int bma180_set_bw(struct bma180_data *data, int val)
for (i = 0; i < data->part_info->num_bw; ++i) {
if (data->part_info->bw_table[i] == val) {
ret = bma180_set_bits(data, data->part_info->bw_reg,
- data->part_info->bw_mask, i);
+ data->part_info->bw_mask,
+ i + data->part_info->bw_offset);
if (ret) {
dev_err(&data->client->dev,
"failed to set bandwidth\n");
@@ -425,7 +410,7 @@ err:
return ret;
}
-static int bma25x_chip_config(struct bma180_data *data)
+static int bma250_chip_config(struct bma180_data *data)
{
int ret = bma180_chip_init(data);
@@ -444,8 +429,7 @@ static int bma25x_chip_config(struct bma180_data *data)
* This enables dataready interrupt on the INT1 pin
* FIXME: support using the INT2 pin
*/
- ret = bma180_set_bits(data, data->part_info->int_map_reg,
- data->part_info->int_enable_dataready_int1_mask, 1);
+ ret = bma180_set_bits(data, BMA250_INT_MAP_REG, BMA250_INT1_DATA_MASK, 1);
if (ret)
goto err;
@@ -482,7 +466,7 @@ err:
dev_err(&data->client->dev, "failed to disable the chip\n");
}
-static void bma25x_chip_disable(struct bma180_data *data)
+static void bma250_chip_disable(struct bma180_data *data)
{
if (bma180_set_new_data_intr_state(data, false))
goto err;
@@ -768,14 +752,6 @@ static const struct iio_chan_spec bma250_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(4),
};
-static const struct iio_chan_spec bma254_channels[] = {
- BMA180_ACC_CHANNEL(X, 12),
- BMA180_ACC_CHANNEL(Y, 12),
- BMA180_ACC_CHANNEL(Z, 12),
- BMA180_TEMP_CHANNEL,
- IIO_CHAN_SOFT_TIMESTAMP(4),
-};
-
static const struct bma180_part_info bma180_part_info[] = {
[BMA023] = {
.chip_id = BMA023_ID_REG_VAL,
@@ -865,10 +841,10 @@ static const struct bma180_part_info bma180_part_info[] = {
.chip_id = BMA250_ID_REG_VAL,
.channels = bma250_channels,
.num_channels = ARRAY_SIZE(bma250_channels),
- .scale_table = bma25x_scale_table,
- .num_scales = ARRAY_SIZE(bma25x_scale_table),
- .bw_table = bma25x_bw_table,
- .num_bw = ARRAY_SIZE(bma25x_bw_table),
+ .scale_table = bma250_scale_table,
+ .num_scales = ARRAY_SIZE(bma250_scale_table),
+ .bw_table = bma250_bw_table,
+ .num_bw = ARRAY_SIZE(bma250_bw_table),
.temp_offset = 48, /* 0 LSB @ 24 degree C */
.int_reset_reg = BMA250_INT_RESET_REG,
.int_reset_mask = BMA250_INT_RESET_MASK,
@@ -876,6 +852,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.sleep_mask = BMA250_SUSPEND_MASK,
.bw_reg = BMA250_BW_REG,
.bw_mask = BMA250_BW_MASK,
+ .bw_offset = BMA250_BW_OFFSET,
.scale_reg = BMA250_RANGE_REG,
.scale_mask = BMA250_RANGE_MASK,
.power_reg = BMA250_POWER_REG,
@@ -883,41 +860,10 @@ static const struct bma180_part_info bma180_part_info[] = {
.lowpower_val = 1,
.int_enable_reg = BMA250_INT_ENABLE_REG,
.int_enable_mask = BMA250_DATA_INTEN_MASK,
- .int_map_reg = BMA250_INT_MAP_REG,
- .int_enable_dataready_int1_mask = BMA250_INT1_DATA_MASK,
.softreset_reg = BMA250_RESET_REG,
.softreset_val = BMA180_RESET_VAL,
- .chip_config = bma25x_chip_config,
- .chip_disable = bma25x_chip_disable,
- },
- [BMA254] = {
- .chip_id = BMA254_ID_REG_VAL,
- .channels = bma254_channels,
- .num_channels = ARRAY_SIZE(bma254_channels),
- .scale_table = bma25x_scale_table,
- .num_scales = ARRAY_SIZE(bma25x_scale_table),
- .bw_table = bma25x_bw_table,
- .num_bw = ARRAY_SIZE(bma25x_bw_table),
- .temp_offset = 46, /* 0 LSB @ 23 degree C */
- .int_reset_reg = BMA254_INT_RESET_REG,
- .int_reset_mask = BMA254_INT_RESET_MASK,
- .sleep_reg = BMA254_POWER_REG,
- .sleep_mask = BMA254_SUSPEND_MASK,
- .bw_reg = BMA254_BW_REG,
- .bw_mask = BMA254_BW_MASK,
- .scale_reg = BMA254_RANGE_REG,
- .scale_mask = BMA254_RANGE_MASK,
- .power_reg = BMA254_POWER_REG,
- .power_mask = BMA254_LOWPOWER_MASK,
- .lowpower_val = 1,
- .int_enable_reg = BMA254_INT_ENABLE_REG,
- .int_enable_mask = BMA254_DATA_INTEN_MASK,
- .int_map_reg = BMA254_INT_MAP_REG,
- .int_enable_dataready_int1_mask = BMA254_INT1_DATA_MASK,
- .softreset_reg = BMA254_RESET_REG,
- .softreset_val = BMA180_RESET_VAL,
- .chip_config = bma25x_chip_config,
- .chip_disable = bma25x_chip_disable,
+ .chip_config = bma250_chip_config,
+ .chip_disable = bma250_chip_disable,
},
};
@@ -938,12 +884,12 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
mutex_unlock(&data->mutex);
goto err;
}
- ((s16 *)data->buff)[i++] = ret;
+ data->scan.chan[i++] = ret;
}
mutex_unlock(&data->mutex);
- iio_push_to_buffers_with_timestamp(indio_dev, data->buff, time_ns);
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, time_ns);
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -997,8 +943,7 @@ static int bma180_probe(struct i2c_client *client,
chip = id->driver_data;
data->part_info = &bma180_part_info[chip];
- ret = iio_read_mount_matrix(dev, "mount-matrix",
- &data->orientation);
+ ret = iio_read_mount_matrix(dev, &data->orientation);
if (ret)
return ret;
@@ -1045,7 +990,7 @@ static int bma180_probe(struct i2c_client *client,
if (client->irq > 0) {
data->trig = iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->trig) {
ret = -ENOMEM;
goto err_chip_disable;
@@ -1158,7 +1103,6 @@ static const struct i2c_device_id bma180_ids[] = {
{ "bma150", BMA150 },
{ "bma180", BMA180 },
{ "bma250", BMA250 },
- { "bma254", BMA254 },
{ "smb380", BMA150 },
{ }
};
@@ -1183,10 +1127,6 @@ static const struct of_device_id bma180_of_match[] = {
.data = (void *)BMA250
},
{
- .compatible = "bosch,bma254",
- .data = (void *)BMA254
- },
- {
.compatible = "bosch,smb380",
.data = (void *)BMA150
},
@@ -1209,5 +1149,5 @@ module_i2c_driver(bma180_driver);
MODULE_AUTHOR("Kravchenko Oleksandr <x0199363@ti.com>");
MODULE_AUTHOR("Texas Instruments, Inc.");
-MODULE_DESCRIPTION("Bosch BMA023/BMA1x0/BMA25x triaxial acceleration sensor");
+MODULE_DESCRIPTION("Bosch BMA023/BMA1x0/BMA250 triaxial acceleration sensor");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index 36fc9876dbca..0622c7936499 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -63,7 +63,11 @@ static const int bma220_scale_table[][2] = {
struct bma220_data {
struct spi_device *spi_device;
struct mutex lock;
- s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 8x8 timestamp */
+ struct {
+ s8 chans[3];
+ /* Ensure timestamp is naturally aligned. */
+ s64 timestamp __aligned(8);
+ } scan;
u8 tx_buf[2] ____cacheline_aligned;
};
@@ -94,12 +98,12 @@ static irqreturn_t bma220_trigger_handler(int irq, void *p)
mutex_lock(&data->lock);
data->tx_buf[0] = BMA220_REG_ACCEL_X | BMA220_READ_MASK;
- ret = spi_write_then_read(spi, data->tx_buf, 1, data->buffer,
+ ret = spi_write_then_read(spi, data->tx_buf, 1, &data->scan.chans,
ARRAY_SIZE(bma220_channels) - 1);
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
pf->timestamp);
err:
mutex_unlock(&data->lock);
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index 7eeba80e32cb..21520e022a21 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -811,7 +811,7 @@ int bma400_probe(struct device *dev, struct regmap *regmap, const char *name)
if (ret)
return ret;
- ret = iio_read_mount_matrix(dev, "mount-matrix", &data->orientation);
+ ret = iio_read_mount_matrix(dev, &data->orientation);
if (ret)
return ret;
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 04d85ce34e9f..5ce384ebe6c7 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -1,14 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * 3-axis accelerometer driver supporting following Bosch-Sensortec chips:
- * - BMC150
- * - BMI055
- * - BMA255
- * - BMA250E
- * - BMA222
- * - BMA222E
- * - BMA280
- *
+ * 3-axis accelerometer driver supporting many Bosch-Sensortec chips
* Copyright (c) 2014, Intel Corporation.
*/
@@ -157,59 +149,6 @@ struct bmc150_accel_chip_info {
const struct bmc150_scale_info scale_table[4];
};
-struct bmc150_accel_interrupt {
- const struct bmc150_accel_interrupt_info *info;
- atomic_t users;
-};
-
-struct bmc150_accel_trigger {
- struct bmc150_accel_data *data;
- struct iio_trigger *indio_trig;
- int (*setup)(struct bmc150_accel_trigger *t, bool state);
- int intr;
- bool enabled;
-};
-
-enum bmc150_accel_interrupt_id {
- BMC150_ACCEL_INT_DATA_READY,
- BMC150_ACCEL_INT_ANY_MOTION,
- BMC150_ACCEL_INT_WATERMARK,
- BMC150_ACCEL_INTERRUPTS,
-};
-
-enum bmc150_accel_trigger_id {
- BMC150_ACCEL_TRIGGER_DATA_READY,
- BMC150_ACCEL_TRIGGER_ANY_MOTION,
- BMC150_ACCEL_TRIGGERS,
-};
-
-struct bmc150_accel_data {
- struct regmap *regmap;
- struct regulator_bulk_data regulators[2];
- struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
- struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
- struct mutex mutex;
- u8 fifo_mode, watermark;
- s16 buffer[8];
- /*
- * Ensure there is sufficient space and correct alignment for
- * the timestamp if enabled
- */
- struct {
- __le16 channels[3];
- s64 ts __aligned(8);
- } scan;
- u8 bw_bits;
- u32 slope_dur;
- u32 slope_thres;
- u32 range;
- int ev_enable_state;
- int64_t timestamp, old_timestamp; /* Only used in hw fifo mode. */
- const struct bmc150_accel_chip_info *chip_info;
- struct i2c_client *second_device;
- struct iio_mount_matrix orientation;
-};
-
static const struct {
int val;
int val2;
@@ -389,7 +328,7 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
int ret;
if (on) {
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
} else {
pm_runtime_mark_last_busy(dev);
ret = pm_runtime_put_autosuspend(dev);
@@ -398,9 +337,6 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
if (ret < 0) {
dev_err(dev,
"Failed: %s for %d\n", __func__, on);
- if (on)
- pm_runtime_put_noidle(dev);
-
return ret;
}
@@ -439,8 +375,8 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
* Onda V80 plus
* Predia Basic Tablet
*/
-static bool bmc150_apply_acpi_orientation(struct device *dev,
- struct iio_mount_matrix *orientation)
+static bool bmc150_apply_bosc0200_acpi_orientation(struct device *dev,
+ struct iio_mount_matrix *orientation)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct iio_dev *indio_dev = dev_get_drvdata(dev);
@@ -450,9 +386,6 @@ static bool bmc150_apply_acpi_orientation(struct device *dev,
acpi_status status;
int i, j, val[3];
- if (!adev || !acpi_dev_hid_uid_match(adev, "BOSC0200", NULL))
- return false;
-
if (strcmp(dev_name(dev), "i2c-BOSC0200:base") == 0) {
alt_name = "ROMK";
label = "accel-base";
@@ -508,6 +441,33 @@ unknown_format:
kfree(buffer.pointer);
return false;
}
+
+static bool bmc150_apply_dual250e_acpi_orientation(struct device *dev,
+ struct iio_mount_matrix *orientation)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+ if (strcmp(dev_name(dev), "i2c-DUAL250E:base") == 0)
+ indio_dev->label = "accel-base";
+ else
+ indio_dev->label = "accel-display";
+
+ return false; /* DUAL250E fwnodes have no mount matrix info */
+}
+
+static bool bmc150_apply_acpi_orientation(struct device *dev,
+ struct iio_mount_matrix *orientation)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (adev && acpi_dev_hid_uid_match(adev, "BOSC0200", NULL))
+ return bmc150_apply_bosc0200_acpi_orientation(dev, orientation);
+
+ if (adev && acpi_dev_hid_uid_match(adev, "DUAL250E", NULL))
+ return bmc150_apply_dual250e_acpi_orientation(dev, orientation);
+
+ return false;
+}
#else
static bool bmc150_apply_acpi_orientation(struct device *dev,
struct iio_mount_matrix *orientation)
@@ -1128,80 +1088,63 @@ static const struct iio_chan_spec bmc150_accel_channels[] =
static const struct iio_chan_spec bma280_accel_channels[] =
BMC150_ACCEL_CHANNELS(14);
+/*
+ * The range for the Bosch sensors is typically +-2g/4g/8g/16g, distributed
+ * over the amount of bits (see above). The scale table can be calculated using
+ * (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2
+ * e.g. for +-2g and 12 bits: (4 / 2^12) * 9.80665 m/s^2 = 0.0095768... m/s^2
+ * Multiply 10^6 and round to get the values listed below.
+ */
static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
- [bmc150] = {
- .name = "BMC150A",
- .chip_id = 0xFA,
- .channels = bmc150_accel_channels,
- .num_channels = ARRAY_SIZE(bmc150_accel_channels),
- .scale_table = { {9610, BMC150_ACCEL_DEF_RANGE_2G},
- {19122, BMC150_ACCEL_DEF_RANGE_4G},
- {38344, BMC150_ACCEL_DEF_RANGE_8G},
- {76590, BMC150_ACCEL_DEF_RANGE_16G} },
- },
- [bmi055] = {
- .name = "BMI055A",
- .chip_id = 0xFA,
- .channels = bmc150_accel_channels,
- .num_channels = ARRAY_SIZE(bmc150_accel_channels),
- .scale_table = { {9610, BMC150_ACCEL_DEF_RANGE_2G},
- {19122, BMC150_ACCEL_DEF_RANGE_4G},
- {38344, BMC150_ACCEL_DEF_RANGE_8G},
- {76590, BMC150_ACCEL_DEF_RANGE_16G} },
- },
- [bma255] = {
- .name = "BMA0255",
- .chip_id = 0xFA,
- .channels = bmc150_accel_channels,
- .num_channels = ARRAY_SIZE(bmc150_accel_channels),
- .scale_table = { {9610, BMC150_ACCEL_DEF_RANGE_2G},
- {19122, BMC150_ACCEL_DEF_RANGE_4G},
- {38344, BMC150_ACCEL_DEF_RANGE_8G},
- {76590, BMC150_ACCEL_DEF_RANGE_16G} },
- },
- [bma250e] = {
- .name = "BMA250E",
- .chip_id = 0xF9,
- .channels = bma250e_accel_channels,
- .num_channels = ARRAY_SIZE(bma250e_accel_channels),
- .scale_table = { {38344, BMC150_ACCEL_DEF_RANGE_2G},
- {76590, BMC150_ACCEL_DEF_RANGE_4G},
- {153277, BMC150_ACCEL_DEF_RANGE_8G},
- {306457, BMC150_ACCEL_DEF_RANGE_16G} },
- },
- [bma222] = {
+ {
.name = "BMA222",
.chip_id = 0x03,
.channels = bma222e_accel_channels,
.num_channels = ARRAY_SIZE(bma222e_accel_channels),
- /*
- * The datasheet page 17 says:
- * 15.6, 31.3, 62.5 and 125 mg per LSB.
- */
- .scale_table = { {156000, BMC150_ACCEL_DEF_RANGE_2G},
- {313000, BMC150_ACCEL_DEF_RANGE_4G},
- {625000, BMC150_ACCEL_DEF_RANGE_8G},
- {1250000, BMC150_ACCEL_DEF_RANGE_16G} },
+ .scale_table = { {153229, BMC150_ACCEL_DEF_RANGE_2G},
+ {306458, BMC150_ACCEL_DEF_RANGE_4G},
+ {612916, BMC150_ACCEL_DEF_RANGE_8G},
+ {1225831, BMC150_ACCEL_DEF_RANGE_16G} },
},
- [bma222e] = {
+ {
.name = "BMA222E",
.chip_id = 0xF8,
.channels = bma222e_accel_channels,
.num_channels = ARRAY_SIZE(bma222e_accel_channels),
- .scale_table = { {153277, BMC150_ACCEL_DEF_RANGE_2G},
- {306457, BMC150_ACCEL_DEF_RANGE_4G},
- {612915, BMC150_ACCEL_DEF_RANGE_8G},
+ .scale_table = { {153229, BMC150_ACCEL_DEF_RANGE_2G},
+ {306458, BMC150_ACCEL_DEF_RANGE_4G},
+ {612916, BMC150_ACCEL_DEF_RANGE_8G},
{1225831, BMC150_ACCEL_DEF_RANGE_16G} },
},
- [bma280] = {
- .name = "BMA0280",
+ {
+ .name = "BMA250E",
+ .chip_id = 0xF9,
+ .channels = bma250e_accel_channels,
+ .num_channels = ARRAY_SIZE(bma250e_accel_channels),
+ .scale_table = { {38307, BMC150_ACCEL_DEF_RANGE_2G},
+ {76614, BMC150_ACCEL_DEF_RANGE_4G},
+ {153229, BMC150_ACCEL_DEF_RANGE_8G},
+ {306458, BMC150_ACCEL_DEF_RANGE_16G} },
+ },
+ {
+ .name = "BMA253/BMA254/BMA255/BMC150/BMI055",
+ .chip_id = 0xFA,
+ .channels = bmc150_accel_channels,
+ .num_channels = ARRAY_SIZE(bmc150_accel_channels),
+ .scale_table = { {9577, BMC150_ACCEL_DEF_RANGE_2G},
+ {19154, BMC150_ACCEL_DEF_RANGE_4G},
+ {38307, BMC150_ACCEL_DEF_RANGE_8G},
+ {76614, BMC150_ACCEL_DEF_RANGE_16G} },
+ },
+ {
+ .name = "BMA280",
.chip_id = 0xFB,
.channels = bma280_accel_channels,
.num_channels = ARRAY_SIZE(bma280_accel_channels),
- .scale_table = { {2392, BMC150_ACCEL_DEF_RANGE_2G},
- {4785, BMC150_ACCEL_DEF_RANGE_4G},
- {9581, BMC150_ACCEL_DEF_RANGE_8G},
- {19152, BMC150_ACCEL_DEF_RANGE_16G} },
+ .scale_table = { {2394, BMC150_ACCEL_DEF_RANGE_2G},
+ {4788, BMC150_ACCEL_DEF_RANGE_4G},
+ {9577, BMC150_ACCEL_DEF_RANGE_8G},
+ {19154, BMC150_ACCEL_DEF_RANGE_16G} },
},
};
@@ -1470,9 +1413,9 @@ static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
struct bmc150_accel_trigger *t = &data->triggers[i];
t->indio_trig = devm_iio_trigger_alloc(dev,
- bmc150_accel_triggers[i].name,
+ bmc150_accel_triggers[i].name,
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!t->indio_trig) {
ret = -ENOMEM;
break;
@@ -1688,8 +1631,7 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
data->regmap = regmap;
if (!bmc150_apply_acpi_orientation(dev, &data->orientation)) {
- ret = iio_read_mount_matrix(dev, "mount-matrix",
- &data->orientation);
+ ret = iio_read_mount_matrix(dev, &data->orientation);
if (ret)
return ret;
}
@@ -1807,26 +1749,6 @@ err_disable_regulators:
}
EXPORT_SYMBOL_GPL(bmc150_accel_core_probe);
-struct i2c_client *bmc150_get_second_device(struct i2c_client *client)
-{
- struct bmc150_accel_data *data = i2c_get_clientdata(client);
-
- if (!data)
- return NULL;
-
- return data->second_device;
-}
-EXPORT_SYMBOL_GPL(bmc150_get_second_device);
-
-void bmc150_set_second_device(struct i2c_client *client)
-{
- struct bmc150_accel_data *data = i2c_get_clientdata(client);
-
- if (data)
- data->second_device = client;
-}
-EXPORT_SYMBOL_GPL(bmc150_set_second_device);
-
int bmc150_accel_core_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
@@ -1836,7 +1758,6 @@ int bmc150_accel_core_remove(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
- pm_runtime_put_noidle(dev);
bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
@@ -1876,6 +1797,9 @@ static int bmc150_accel_resume(struct device *dev)
bmc150_accel_fifo_set_mode(data);
mutex_unlock(&data->mutex);
+ if (data->resume_callback)
+ data->resume_callback(dev);
+
return 0;
}
#endif
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index 69f709319484..999495f0669d 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -1,14 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * 3-axis accelerometer driver supporting following I2C Bosch-Sensortec chips:
- * - BMC150
- * - BMI055
- * - BMA255
- * - BMA250E
- * - BMA222
- * - BMA222E
- * - BMA280
- *
+ * 3-axis accelerometer driver supporting many I2C Bosch-Sensortec chips
* Copyright (c) 2014, Intel Corporation.
*/
@@ -21,6 +13,164 @@
#include "bmc150-accel.h"
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id bmc150_acpi_dual_accel_ids[] = {
+ {"BOSC0200"},
+ {"DUAL250E"},
+ { }
+};
+
+/*
+ * The DUAL250E ACPI device for 360° hinges type 2-in-1s with 1 accelerometer
+ * in the display and 1 in the hinge has an ACPI-method (DSM) to tell the
+ * ACPI code about the angle between the 2 halves. This will make the ACPI
+ * code enable/disable the keyboard and touchpad. We need to call this to avoid
+ * the keyboard being disabled when the 2-in-1 is turned-on or resumed while
+ * fully folded into tablet mode (which gets detected with a HALL-sensor).
+ * If we don't call this then the keyboard won't work even when the 2-in-1 is
+ * changed to be used in laptop mode after the power-on / resume.
+ *
+ * This DSM takes 2 angles, selected by setting aux0 to 0 or 1, these presumably
+ * define the angle between the gravity vector measured by the accelerometer in
+ * the display (aux0=0) resp. the base (aux0=1) and some reference vector.
+ * The 2 angles get subtracted from each other so the reference vector does
+ * not matter and we can simply leave the second angle at 0.
+ */
+
+#define BMC150_DSM_GUID "7681541e-8827-4239-8d9d-36be7fe12542"
+#define DUAL250E_SET_ANGLE_FN_INDEX 3
+
+struct dual250e_set_angle_args {
+ u32 aux0;
+ u32 ang0;
+ u32 rawx;
+ u32 rawy;
+ u32 rawz;
+} __packed;
+
+static bool bmc150_acpi_set_angle_dsm(struct i2c_client *client, u32 aux0, u32 ang0)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+ struct dual250e_set_angle_args args = {
+ .aux0 = aux0,
+ .ang0 = ang0,
+ };
+ union acpi_object args_obj, *obj;
+ guid_t guid;
+
+ if (!acpi_dev_hid_uid_match(adev, "DUAL250E", NULL))
+ return false;
+
+ guid_parse(BMC150_DSM_GUID, &guid);
+
+ if (!acpi_check_dsm(adev->handle, &guid, 0, BIT(DUAL250E_SET_ANGLE_FN_INDEX)))
+ return false;
+
+ /*
+ * Note this triggers the following warning:
+ * "ACPI Warning: \_SB.PCI0.I2C2.ACC1._DSM: Argument #4 type mismatch -
+ * Found [Buffer], ACPI requires [Package]"
+ * This is unavoidable since the _DSM implementation expects a "naked"
+ * buffer, so wrapping it in a package will _not_ work.
+ */
+ args_obj.type = ACPI_TYPE_BUFFER;
+ args_obj.buffer.length = sizeof(args);
+ args_obj.buffer.pointer = (u8 *)&args;
+
+ obj = acpi_evaluate_dsm(adev->handle, &guid, 0, DUAL250E_SET_ANGLE_FN_INDEX, &args_obj);
+ if (!obj) {
+ dev_err(&client->dev, "Failed to call DSM to enable keyboard and touchpad\n");
+ return false;
+ }
+
+ ACPI_FREE(obj);
+ return true;
+}
+
+static bool bmc150_acpi_enable_keyboard(struct i2c_client *client)
+{
+ /*
+ * The EC must see a change for it to re-enable the kbd, so first
+ * set the angle to 270° (tent/stand mode) and then change it to
+ * 90° (laptop mode).
+ */
+ if (!bmc150_acpi_set_angle_dsm(client, 0, 270))
+ return false;
+
+ /* The EC needs some time to notice the angle being changed */
+ msleep(100);
+
+ return bmc150_acpi_set_angle_dsm(client, 0, 90);
+}
+
+static void bmc150_acpi_resume_work(struct work_struct *work)
+{
+ struct bmc150_accel_data *data =
+ container_of(work, struct bmc150_accel_data, resume_work.work);
+
+ bmc150_acpi_enable_keyboard(data->second_device);
+}
+
+static void bmc150_acpi_resume_handler(struct device *dev)
+{
+ struct bmc150_accel_data *data = iio_priv(dev_get_drvdata(dev));
+
+ /*
+ * Delay the bmc150_acpi_enable_keyboard() call till after the system
+ * resume has completed, otherwise it will not work.
+ */
+ schedule_delayed_work(&data->resume_work, msecs_to_jiffies(1000));
+}
+
+/*
+ * Some acpi_devices describe 2 accelerometers in a single ACPI device,
+ * try instantiating a second i2c_client for an I2cSerialBusV2 ACPI resource
+ * with index 1.
+ */
+static void bmc150_acpi_dual_accel_probe(struct i2c_client *client)
+{
+ struct bmc150_accel_data *data = iio_priv(i2c_get_clientdata(client));
+ struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+ char dev_name[16];
+ struct i2c_board_info board_info = {
+ .type = "bmc150_accel",
+ .dev_name = dev_name,
+ .fwnode = client->dev.fwnode,
+ };
+
+ if (acpi_match_device_ids(adev, bmc150_acpi_dual_accel_ids))
+ return;
+
+ /*
+ * The 2nd accel sits in the base of 2-in-1s. The suffix is static, as
+ * there should never be more then 1 ACPI node with 2 accelerometers.
+ */
+ snprintf(dev_name, sizeof(dev_name), "%s:base", acpi_device_hid(adev));
+
+ board_info.irq = acpi_dev_gpio_irq_get(adev, 1);
+
+ data->second_device = i2c_acpi_new_device(&client->dev, 1, &board_info);
+
+ if (!IS_ERR(data->second_device) && bmc150_acpi_enable_keyboard(data->second_device)) {
+ INIT_DELAYED_WORK(&data->resume_work, bmc150_acpi_resume_work);
+ data->resume_callback = bmc150_acpi_resume_handler;
+ }
+}
+
+static void bmc150_acpi_dual_accel_remove(struct i2c_client *client)
+{
+ struct bmc150_accel_data *data = iio_priv(i2c_get_clientdata(client));
+
+ if (data->resume_callback)
+ cancel_delayed_work_sync(&data->resume_work);
+
+ i2c_unregister_device(data->second_device);
+}
+#else
+static void bmc150_acpi_dual_accel_probe(struct i2c_client *client) {}
+static void bmc150_acpi_dual_accel_remove(struct i2c_client *client) {}
+#endif
+
static int bmc150_accel_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -30,7 +180,6 @@ static int bmc150_accel_probe(struct i2c_client *client,
i2c_check_functionality(client->adapter, I2C_FUNC_I2C) ||
i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK);
- struct acpi_device __maybe_unused *adev;
int ret;
regmap = devm_regmap_init_i2c(client, &bmc150_regmap_conf);
@@ -47,80 +196,62 @@ static int bmc150_accel_probe(struct i2c_client *client,
return ret;
/*
- * Some BOSC0200 acpi_devices describe 2 accelerometers in a single ACPI
- * device, try instantiating a second i2c_client for an I2cSerialBusV2
- * ACPI resource with index 1. The !id check avoids recursion when
- * bmc150_accel_probe() gets called for the second client.
+ * The !id check avoids recursion when probe() gets called
+ * for the second client.
*/
-#ifdef CONFIG_ACPI
- adev = ACPI_COMPANION(&client->dev);
- if (!id && adev && strcmp(acpi_device_hid(adev), "BOSC0200") == 0) {
- struct i2c_board_info board_info = {
- .type = "bmc150_accel",
- /*
- * The 2nd accel sits in the base of 2-in-1s. Note this
- * name is static, as there should never be more then 1
- * BOSC0200 ACPI node with 2 accelerometers in it.
- */
- .dev_name = "BOSC0200:base",
- .fwnode = client->dev.fwnode,
- .irq = -ENOENT,
- };
- struct i2c_client *second_dev;
-
- second_dev = i2c_acpi_new_device(&client->dev, 1, &board_info);
- if (!IS_ERR(second_dev))
- bmc150_set_second_device(second_dev);
- }
-#endif
+ if (!id && has_acpi_companion(&client->dev))
+ bmc150_acpi_dual_accel_probe(client);
return 0;
}
static int bmc150_accel_remove(struct i2c_client *client)
{
- struct i2c_client *second_dev = bmc150_get_second_device(client);
-
- i2c_unregister_device(second_dev);
+ bmc150_acpi_dual_accel_remove(client);
return bmc150_accel_core_remove(&client->dev);
}
static const struct acpi_device_id bmc150_accel_acpi_match[] = {
- {"BSBA0150", bmc150},
- {"BMC150A", bmc150},
- {"BMI055A", bmi055},
- {"BMA0255", bma255},
- {"BMA250E", bma250e},
- {"BMA222", bma222},
- {"BMA222E", bma222e},
- {"BMA0280", bma280},
+ {"BMA0255"},
+ {"BMA0280"},
+ {"BMA222"},
+ {"BMA222E"},
+ {"BMA250E"},
+ {"BMC150A"},
+ {"BMI055A"},
{"BOSC0200"},
+ {"BSBA0150"},
+ {"DUAL250E"},
{ },
};
MODULE_DEVICE_TABLE(acpi, bmc150_accel_acpi_match);
static const struct i2c_device_id bmc150_accel_id[] = {
- {"bmc150_accel", bmc150},
- {"bmi055_accel", bmi055},
- {"bma255", bma255},
- {"bma250e", bma250e},
- {"bma222", bma222},
- {"bma222e", bma222e},
- {"bma280", bma280},
+ {"bma222"},
+ {"bma222e"},
+ {"bma250e"},
+ {"bma253"},
+ {"bma254"},
+ {"bma255"},
+ {"bma280"},
+ {"bmc150_accel"},
+ {"bmi055_accel"},
{}
};
MODULE_DEVICE_TABLE(i2c, bmc150_accel_id);
static const struct of_device_id bmc150_accel_of_match[] = {
- { .compatible = "bosch,bmc150_accel" },
- { .compatible = "bosch,bmi055_accel" },
- { .compatible = "bosch,bma255" },
- { .compatible = "bosch,bma250e" },
{ .compatible = "bosch,bma222" },
{ .compatible = "bosch,bma222e" },
+ { .compatible = "bosch,bma250e" },
+ { .compatible = "bosch,bma253" },
+ { .compatible = "bosch,bma254" },
+ { .compatible = "bosch,bma255" },
{ .compatible = "bosch,bma280" },
+ { .compatible = "bosch,bmc150_accel" },
+ { .compatible = "bosch,bmi055_accel" },
{ },
};
MODULE_DEVICE_TABLE(of, bmc150_accel_of_match);
diff --git a/drivers/iio/accel/bmc150-accel-spi.c b/drivers/iio/accel/bmc150-accel-spi.c
index 74a8aee4f612..54b8c9c8068b 100644
--- a/drivers/iio/accel/bmc150-accel-spi.c
+++ b/drivers/iio/accel/bmc150-accel-spi.c
@@ -34,26 +34,27 @@ static int bmc150_accel_remove(struct spi_device *spi)
}
static const struct acpi_device_id bmc150_accel_acpi_match[] = {
- {"BSBA0150", bmc150},
- {"BMC150A", bmc150},
- {"BMI055A", bmi055},
- {"BMA0255", bma255},
- {"BMA250E", bma250e},
- {"BMA222", bma222},
- {"BMA222E", bma222e},
- {"BMA0280", bma280},
+ {"BMA0255"},
+ {"BMA0280"},
+ {"BMA222"},
+ {"BMA222E"},
+ {"BMA250E"},
+ {"BMC150A"},
+ {"BMI055A"},
+ {"BSBA0150"},
{ },
};
MODULE_DEVICE_TABLE(acpi, bmc150_accel_acpi_match);
static const struct spi_device_id bmc150_accel_id[] = {
- {"bmc150_accel", bmc150},
- {"bmi055_accel", bmi055},
- {"bma255", bma255},
- {"bma250e", bma250e},
- {"bma222", bma222},
- {"bma222e", bma222e},
- {"bma280", bma280},
+ {"bma222"},
+ {"bma222e"},
+ {"bma250e"},
+ {"bma253"},
+ {"bma255"},
+ {"bma280"},
+ {"bmc150_accel"},
+ {"bmi055_accel"},
{}
};
MODULE_DEVICE_TABLE(spi, bmc150_accel_id);
diff --git a/drivers/iio/accel/bmc150-accel.h b/drivers/iio/accel/bmc150-accel.h
index 6024f15b9700..47121f070fe9 100644
--- a/drivers/iio/accel/bmc150-accel.h
+++ b/drivers/iio/accel/bmc150-accel.h
@@ -2,23 +2,75 @@
#ifndef _BMC150_ACCEL_H_
#define _BMC150_ACCEL_H_
+#include <linux/atomic.h>
+#include <linux/iio/iio.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+
struct regmap;
+struct i2c_client;
+struct bmc150_accel_chip_info;
+struct bmc150_accel_interrupt_info;
+
+struct bmc150_accel_interrupt {
+ const struct bmc150_accel_interrupt_info *info;
+ atomic_t users;
+};
+
+struct bmc150_accel_trigger {
+ struct bmc150_accel_data *data;
+ struct iio_trigger *indio_trig;
+ int (*setup)(struct bmc150_accel_trigger *t, bool state);
+ int intr;
+ bool enabled;
+};
+
+enum bmc150_accel_interrupt_id {
+ BMC150_ACCEL_INT_DATA_READY,
+ BMC150_ACCEL_INT_ANY_MOTION,
+ BMC150_ACCEL_INT_WATERMARK,
+ BMC150_ACCEL_INTERRUPTS,
+};
+
+enum bmc150_accel_trigger_id {
+ BMC150_ACCEL_TRIGGER_DATA_READY,
+ BMC150_ACCEL_TRIGGER_ANY_MOTION,
+ BMC150_ACCEL_TRIGGERS,
+};
-enum {
- bmc150,
- bmi055,
- bma255,
- bma250e,
- bma222,
- bma222e,
- bma280,
+struct bmc150_accel_data {
+ struct regmap *regmap;
+ struct regulator_bulk_data regulators[2];
+ struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
+ struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
+ struct mutex mutex;
+ u8 fifo_mode, watermark;
+ s16 buffer[8];
+ /*
+ * Ensure there is sufficient space and correct alignment for
+ * the timestamp if enabled
+ */
+ struct {
+ __le16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
+ u8 bw_bits;
+ u32 slope_dur;
+ u32 slope_thres;
+ u32 range;
+ int ev_enable_state;
+ int64_t timestamp, old_timestamp; /* Only used in hw fifo mode. */
+ const struct bmc150_accel_chip_info *chip_info;
+ struct i2c_client *second_device;
+ void (*resume_callback)(struct device *dev);
+ struct delayed_work resume_work;
+ struct iio_mount_matrix orientation;
};
int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
const char *name, bool block_supported);
int bmc150_accel_core_remove(struct device *dev);
-struct i2c_client *bmc150_get_second_device(struct i2c_client *second_device);
-void bmc150_set_second_device(struct i2c_client *second_device);
extern const struct dev_pm_ops bmc150_accel_pm_ops;
extern const struct regmap_config bmc150_regmap_conf;
diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c
index 12d00658e46f..a06dae5c971d 100644
--- a/drivers/iio/accel/bmi088-accel-core.c
+++ b/drivers/iio/accel/bmi088-accel-core.c
@@ -285,11 +285,17 @@ static int bmi088_accel_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
case IIO_TEMP:
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
ret = bmi088_accel_get_temp(data, val);
goto out_read_raw_pm_put;
case IIO_ACCEL:
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
goto out_read_raw_pm_put;
@@ -319,7 +325,10 @@ static int bmi088_accel_read_raw(struct iio_dev *indio_dev,
*val = BMI088_ACCEL_TEMP_UNIT;
return IIO_VAL_INT;
case IIO_ACCEL:
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
ret = regmap_read(data->regmap,
BMI088_ACCEL_REG_ACC_RANGE, val);
if (ret)
@@ -334,7 +343,10 @@ static int bmi088_accel_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_SAMP_FREQ:
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
ret = bmi088_accel_get_sample_freq(data, val, val2);
goto out_read_raw_pm_put;
default:
@@ -376,7 +388,10 @@ static int bmi088_accel_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
ret = bmi088_accel_set_sample_freq(data, val);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -496,7 +511,6 @@ int bmi088_accel_core_probe(struct device *dev, struct regmap *regmap,
if (ret)
return ret;
- indio_dev->dev.parent = dev;
indio_dev->channels = data->chip_info->channels;
indio_dev->num_channels = data->chip_info->num_channels;
indio_dev->name = name ? name : data->chip_info->name;
@@ -531,7 +545,6 @@ int bmi088_accel_core_remove(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
- pm_runtime_put_noidle(dev);
bmi088_accel_power_down(data);
return 0;
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
new file mode 100644
index 000000000000..0019f1ea7df2
--- /dev/null
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -0,0 +1,968 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NXP FXLS8962AF/FXLS8964AF Accelerometer Core Driver
+ *
+ * Copyright 2021 Connected Cars A/S
+ *
+ * Datasheet:
+ * https://www.nxp.com/docs/en/data-sheet/FXLS8962AF.pdf
+ * https://www.nxp.com/docs/en/data-sheet/FXLS8964AF.pdf
+ *
+ * Errata:
+ * https://www.nxp.com/docs/en/errata/ES_FXLS8962AF.pdf
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/sysfs.h>
+
+#include "fxls8962af.h"
+
+#define FXLS8962AF_INT_STATUS 0x00
+#define FXLS8962AF_INT_STATUS_SRC_BOOT BIT(0)
+#define FXLS8962AF_INT_STATUS_SRC_BUF BIT(5)
+#define FXLS8962AF_INT_STATUS_SRC_DRDY BIT(7)
+#define FXLS8962AF_TEMP_OUT 0x01
+#define FXLS8962AF_VECM_LSB 0x02
+#define FXLS8962AF_OUT_X_LSB 0x04
+#define FXLS8962AF_OUT_Y_LSB 0x06
+#define FXLS8962AF_OUT_Z_LSB 0x08
+#define FXLS8962AF_BUF_STATUS 0x0b
+#define FXLS8962AF_BUF_STATUS_BUF_CNT GENMASK(5, 0)
+#define FXLS8962AF_BUF_STATUS_BUF_OVF BIT(6)
+#define FXLS8962AF_BUF_STATUS_BUF_WMRK BIT(7)
+#define FXLS8962AF_BUF_X_LSB 0x0c
+#define FXLS8962AF_BUF_Y_LSB 0x0e
+#define FXLS8962AF_BUF_Z_LSB 0x10
+
+#define FXLS8962AF_PROD_REV 0x12
+#define FXLS8962AF_WHO_AM_I 0x13
+
+#define FXLS8962AF_SYS_MODE 0x14
+#define FXLS8962AF_SENS_CONFIG1 0x15
+#define FXLS8962AF_SENS_CONFIG1_ACTIVE BIT(0)
+#define FXLS8962AF_SENS_CONFIG1_RST BIT(7)
+#define FXLS8962AF_SC1_FSR_MASK GENMASK(2, 1)
+#define FXLS8962AF_SC1_FSR_PREP(x) FIELD_PREP(FXLS8962AF_SC1_FSR_MASK, (x))
+#define FXLS8962AF_SC1_FSR_GET(x) FIELD_GET(FXLS8962AF_SC1_FSR_MASK, (x))
+
+#define FXLS8962AF_SENS_CONFIG2 0x16
+#define FXLS8962AF_SENS_CONFIG3 0x17
+#define FXLS8962AF_SC3_WAKE_ODR_MASK GENMASK(7, 4)
+#define FXLS8962AF_SC3_WAKE_ODR_PREP(x) FIELD_PREP(FXLS8962AF_SC3_WAKE_ODR_MASK, (x))
+#define FXLS8962AF_SC3_WAKE_ODR_GET(x) FIELD_GET(FXLS8962AF_SC3_WAKE_ODR_MASK, (x))
+#define FXLS8962AF_SENS_CONFIG4 0x18
+#define FXLS8962AF_SC4_INT_PP_OD_MASK BIT(1)
+#define FXLS8962AF_SC4_INT_PP_OD_PREP(x) FIELD_PREP(FXLS8962AF_SC4_INT_PP_OD_MASK, (x))
+#define FXLS8962AF_SC4_INT_POL_MASK BIT(0)
+#define FXLS8962AF_SC4_INT_POL_PREP(x) FIELD_PREP(FXLS8962AF_SC4_INT_POL_MASK, (x))
+#define FXLS8962AF_SENS_CONFIG5 0x19
+
+#define FXLS8962AF_WAKE_IDLE_LSB 0x1b
+#define FXLS8962AF_SLEEP_IDLE_LSB 0x1c
+#define FXLS8962AF_ASLP_COUNT_LSB 0x1e
+
+#define FXLS8962AF_INT_EN 0x20
+#define FXLS8962AF_INT_EN_BUF_EN BIT(6)
+#define FXLS8962AF_INT_PIN_SEL 0x21
+#define FXLS8962AF_INT_PIN_SEL_MASK GENMASK(7, 0)
+#define FXLS8962AF_INT_PIN_SEL_INT1 0x00
+#define FXLS8962AF_INT_PIN_SEL_INT2 GENMASK(7, 0)
+
+#define FXLS8962AF_OFF_X 0x22
+#define FXLS8962AF_OFF_Y 0x23
+#define FXLS8962AF_OFF_Z 0x24
+
+#define FXLS8962AF_BUF_CONFIG1 0x26
+#define FXLS8962AF_BC1_BUF_MODE_MASK GENMASK(6, 5)
+#define FXLS8962AF_BC1_BUF_MODE_PREP(x) FIELD_PREP(FXLS8962AF_BC1_BUF_MODE_MASK, (x))
+#define FXLS8962AF_BUF_CONFIG2 0x27
+#define FXLS8962AF_BUF_CONFIG2_BUF_WMRK GENMASK(5, 0)
+
+#define FXLS8962AF_ORIENT_STATUS 0x28
+#define FXLS8962AF_ORIENT_CONFIG 0x29
+#define FXLS8962AF_ORIENT_DBCOUNT 0x2a
+#define FXLS8962AF_ORIENT_BF_ZCOMP 0x2b
+#define FXLS8962AF_ORIENT_THS_REG 0x2c
+
+#define FXLS8962AF_SDCD_INT_SRC1 0x2d
+#define FXLS8962AF_SDCD_INT_SRC2 0x2e
+#define FXLS8962AF_SDCD_CONFIG1 0x2f
+#define FXLS8962AF_SDCD_CONFIG2 0x30
+#define FXLS8962AF_SDCD_OT_DBCNT 0x31
+#define FXLS8962AF_SDCD_WT_DBCNT 0x32
+#define FXLS8962AF_SDCD_LTHS_LSB 0x33
+#define FXLS8962AF_SDCD_UTHS_LSB 0x35
+
+#define FXLS8962AF_SELF_TEST_CONFIG1 0x37
+#define FXLS8962AF_SELF_TEST_CONFIG2 0x38
+
+#define FXLS8962AF_MAX_REG 0x38
+
+#define FXLS8962AF_DEVICE_ID 0x62
+#define FXLS8964AF_DEVICE_ID 0x84
+
+/* Raw temp channel offset */
+#define FXLS8962AF_TEMP_CENTER_VAL 25
+
+#define FXLS8962AF_AUTO_SUSPEND_DELAY_MS 2000
+
+#define FXLS8962AF_FIFO_LENGTH 32
+#define FXLS8962AF_SCALE_TABLE_LEN 4
+#define FXLS8962AF_SAMP_FREQ_TABLE_LEN 13
+
+static const int fxls8962af_scale_table[FXLS8962AF_SCALE_TABLE_LEN][2] = {
+ {0, IIO_G_TO_M_S_2(980000)},
+ {0, IIO_G_TO_M_S_2(1950000)},
+ {0, IIO_G_TO_M_S_2(3910000)},
+ {0, IIO_G_TO_M_S_2(7810000)},
+};
+
+static const int fxls8962af_samp_freq_table[FXLS8962AF_SAMP_FREQ_TABLE_LEN][2] = {
+ {3200, 0}, {1600, 0}, {800, 0}, {400, 0}, {200, 0}, {100, 0},
+ {50, 0}, {25, 0}, {12, 500000}, {6, 250000}, {3, 125000},
+ {1, 563000}, {0, 781000},
+};
+
+struct fxls8962af_chip_info {
+ const char *name;
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ u8 chip_id;
+};
+
+struct fxls8962af_data {
+ struct regmap *regmap;
+ const struct fxls8962af_chip_info *chip_info;
+ struct regulator *vdd_reg;
+ struct {
+ __le16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
+ int64_t timestamp, old_timestamp; /* Only used in hw fifo mode. */
+ struct iio_mount_matrix orientation;
+ u8 watermark;
+};
+
+const struct regmap_config fxls8962af_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = FXLS8962AF_MAX_REG,
+};
+EXPORT_SYMBOL_GPL(fxls8962af_regmap_conf);
+
+enum {
+ fxls8962af_idx_x,
+ fxls8962af_idx_y,
+ fxls8962af_idx_z,
+ fxls8962af_idx_ts,
+};
+
+enum fxls8962af_int_pin {
+ FXLS8962AF_PIN_INT1,
+ FXLS8962AF_PIN_INT2,
+};
+
+static int fxls8962af_power_on(struct fxls8962af_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ dev_err(dev, "failed to power on\n");
+
+ return ret;
+}
+
+static int fxls8962af_power_off(struct fxls8962af_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ pm_runtime_mark_last_busy(dev);
+ ret = pm_runtime_put_autosuspend(dev);
+ if (ret)
+ dev_err(dev, "failed to power off\n");
+
+ return ret;
+}
+
+static int fxls8962af_standby(struct fxls8962af_data *data)
+{
+ return regmap_update_bits(data->regmap, FXLS8962AF_SENS_CONFIG1,
+ FXLS8962AF_SENS_CONFIG1_ACTIVE, 0);
+}
+
+static int fxls8962af_active(struct fxls8962af_data *data)
+{
+ return regmap_update_bits(data->regmap, FXLS8962AF_SENS_CONFIG1,
+ FXLS8962AF_SENS_CONFIG1_ACTIVE, 1);
+}
+
+static int fxls8962af_is_active(struct fxls8962af_data *data)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(data->regmap, FXLS8962AF_SENS_CONFIG1, &reg);
+ if (ret)
+ return ret;
+
+ return reg & FXLS8962AF_SENS_CONFIG1_ACTIVE;
+}
+
+static int fxls8962af_get_out(struct fxls8962af_data *data,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ __le16 raw_val;
+ int is_active;
+ int ret;
+
+ is_active = fxls8962af_is_active(data);
+ if (!is_active) {
+ ret = fxls8962af_power_on(data);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_bulk_read(data->regmap, chan->address,
+ &raw_val, (chan->scan_type.storagebits / 8));
+
+ if (!is_active)
+ fxls8962af_power_off(data);
+
+ if (ret) {
+ dev_err(dev, "failed to get out reg 0x%lx\n", chan->address);
+ return ret;
+ }
+
+ *val = sign_extend32(le16_to_cpu(raw_val),
+ chan->scan_type.realbits - 1);
+
+ return IIO_VAL_INT;
+}
+
+static int fxls8962af_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *type = IIO_VAL_INT_PLUS_NANO;
+ *vals = (int *)fxls8962af_scale_table;
+ *length = ARRAY_SIZE(fxls8962af_scale_table) * 2;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *vals = (int *)fxls8962af_samp_freq_table;
+ *length = ARRAY_SIZE(fxls8962af_samp_freq_table) * 2;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fxls8962af_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return IIO_VAL_INT_PLUS_NANO;
+ }
+}
+
+static int fxls8962af_update_config(struct fxls8962af_data *data, u8 reg,
+ u8 mask, u8 val)
+{
+ int ret;
+ int is_active;
+
+ is_active = fxls8962af_is_active(data);
+ if (is_active) {
+ ret = fxls8962af_standby(data);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, reg, mask, val);
+ if (ret)
+ return ret;
+
+ if (is_active) {
+ ret = fxls8962af_active(data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fxls8962af_set_full_scale(struct fxls8962af_data *data, u32 scale)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fxls8962af_scale_table); i++)
+ if (scale == fxls8962af_scale_table[i][1])
+ break;
+
+ if (i == ARRAY_SIZE(fxls8962af_scale_table))
+ return -EINVAL;
+
+ return fxls8962af_update_config(data, FXLS8962AF_SENS_CONFIG1,
+ FXLS8962AF_SC1_FSR_MASK,
+ FXLS8962AF_SC1_FSR_PREP(i));
+}
+
+static unsigned int fxls8962af_read_full_scale(struct fxls8962af_data *data,
+ int *val)
+{
+ int ret;
+ unsigned int reg;
+ u8 range_idx;
+
+ ret = regmap_read(data->regmap, FXLS8962AF_SENS_CONFIG1, &reg);
+ if (ret)
+ return ret;
+
+ range_idx = FXLS8962AF_SC1_FSR_GET(reg);
+
+ *val = fxls8962af_scale_table[range_idx][1];
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int fxls8962af_set_samp_freq(struct fxls8962af_data *data, u32 val,
+ u32 val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fxls8962af_samp_freq_table); i++)
+ if (val == fxls8962af_samp_freq_table[i][0] &&
+ val2 == fxls8962af_samp_freq_table[i][1])
+ break;
+
+ if (i == ARRAY_SIZE(fxls8962af_samp_freq_table))
+ return -EINVAL;
+
+ return fxls8962af_update_config(data, FXLS8962AF_SENS_CONFIG3,
+ FXLS8962AF_SC3_WAKE_ODR_MASK,
+ FXLS8962AF_SC3_WAKE_ODR_PREP(i));
+}
+
+static unsigned int fxls8962af_read_samp_freq(struct fxls8962af_data *data,
+ int *val, int *val2)
+{
+ int ret;
+ unsigned int reg;
+ u8 range_idx;
+
+ ret = regmap_read(data->regmap, FXLS8962AF_SENS_CONFIG3, &reg);
+ if (ret)
+ return ret;
+
+ range_idx = FXLS8962AF_SC3_WAKE_ODR_GET(reg);
+
+ *val = fxls8962af_samp_freq_table[range_idx][0];
+ *val2 = fxls8962af_samp_freq_table[range_idx][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int fxls8962af_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_TEMP:
+ case IIO_ACCEL:
+ return fxls8962af_get_out(data, chan, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ if (chan->type != IIO_TEMP)
+ return -EINVAL;
+
+ *val = FXLS8962AF_TEMP_CENTER_VAL;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ return fxls8962af_read_full_scale(data, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return fxls8962af_read_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fxls8962af_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (val != 0)
+ return -EINVAL;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = fxls8962af_set_full_scale(data, val2);
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = fxls8962af_set_samp_freq(data, val, val2);
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fxls8962af_set_watermark(struct iio_dev *indio_dev, unsigned val)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+
+ if (val > FXLS8962AF_FIFO_LENGTH)
+ val = FXLS8962AF_FIFO_LENGTH;
+
+ data->watermark = val;
+
+ return 0;
+}
+
+#define FXLS8962AF_CHANNEL(axis, reg, idx) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define FXLS8962AF_TEMP_CHANNEL { \
+ .type = IIO_TEMP, \
+ .address = FXLS8962AF_TEMP_OUT, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET),\
+ .scan_index = -1, \
+ .scan_type = { \
+ .realbits = 8, \
+ .storagebits = 8, \
+ }, \
+}
+
+static const struct iio_chan_spec fxls8962af_channels[] = {
+ FXLS8962AF_CHANNEL(X, FXLS8962AF_OUT_X_LSB, fxls8962af_idx_x),
+ FXLS8962AF_CHANNEL(Y, FXLS8962AF_OUT_Y_LSB, fxls8962af_idx_y),
+ FXLS8962AF_CHANNEL(Z, FXLS8962AF_OUT_Z_LSB, fxls8962af_idx_z),
+ IIO_CHAN_SOFT_TIMESTAMP(fxls8962af_idx_ts),
+ FXLS8962AF_TEMP_CHANNEL,
+};
+
+static const struct fxls8962af_chip_info fxls_chip_info_table[] = {
+ [fxls8962af] = {
+ .chip_id = FXLS8962AF_DEVICE_ID,
+ .name = "fxls8962af",
+ .channels = fxls8962af_channels,
+ .num_channels = ARRAY_SIZE(fxls8962af_channels),
+ },
+ [fxls8964af] = {
+ .chip_id = FXLS8964AF_DEVICE_ID,
+ .name = "fxls8964af",
+ .channels = fxls8962af_channels,
+ .num_channels = ARRAY_SIZE(fxls8962af_channels),
+ },
+};
+
+static const struct iio_info fxls8962af_info = {
+ .read_raw = &fxls8962af_read_raw,
+ .write_raw = &fxls8962af_write_raw,
+ .write_raw_get_fmt = fxls8962af_write_raw_get_fmt,
+ .read_avail = fxls8962af_read_avail,
+ .hwfifo_set_watermark = fxls8962af_set_watermark,
+};
+
+static int fxls8962af_reset(struct fxls8962af_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, FXLS8962AF_SENS_CONFIG1,
+ FXLS8962AF_SENS_CONFIG1_RST,
+ FXLS8962AF_SENS_CONFIG1_RST);
+ if (ret)
+ return ret;
+
+ /* TBOOT1, TBOOT2, specifies we have to wait between 1 - 17.7ms */
+ ret = regmap_read_poll_timeout(data->regmap, FXLS8962AF_INT_STATUS, reg,
+ (reg & FXLS8962AF_INT_STATUS_SRC_BOOT),
+ 1000, 18000);
+ if (ret == -ETIMEDOUT)
+ dev_err(dev, "reset timeout, int_status = 0x%x\n", reg);
+
+ return ret;
+}
+
+static int __fxls8962af_fifo_set_mode(struct fxls8962af_data *data, bool onoff)
+{
+ int ret;
+
+ /* Enable watermark at max fifo size */
+ ret = regmap_update_bits(data->regmap, FXLS8962AF_BUF_CONFIG2,
+ FXLS8962AF_BUF_CONFIG2_BUF_WMRK,
+ data->watermark);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(data->regmap, FXLS8962AF_BUF_CONFIG1,
+ FXLS8962AF_BC1_BUF_MODE_MASK,
+ FXLS8962AF_BC1_BUF_MODE_PREP(onoff));
+}
+
+static int fxls8962af_buffer_preenable(struct iio_dev *indio_dev)
+{
+ return fxls8962af_power_on(iio_priv(indio_dev));
+}
+
+static int fxls8962af_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+ int ret;
+
+ fxls8962af_standby(data);
+
+ /* Enable buffer interrupt */
+ ret = regmap_update_bits(data->regmap, FXLS8962AF_INT_EN,
+ FXLS8962AF_INT_EN_BUF_EN,
+ FXLS8962AF_INT_EN_BUF_EN);
+ if (ret)
+ return ret;
+
+ ret = __fxls8962af_fifo_set_mode(data, true);
+
+ fxls8962af_active(data);
+
+ return ret;
+}
+
+static int fxls8962af_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+ int ret;
+
+ fxls8962af_standby(data);
+
+ /* Disable buffer interrupt */
+ ret = regmap_update_bits(data->regmap, FXLS8962AF_INT_EN,
+ FXLS8962AF_INT_EN_BUF_EN, 0);
+ if (ret)
+ return ret;
+
+ ret = __fxls8962af_fifo_set_mode(data, false);
+
+ fxls8962af_active(data);
+
+ return ret;
+}
+
+static int fxls8962af_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+
+ return fxls8962af_power_off(data);
+}
+
+static const struct iio_buffer_setup_ops fxls8962af_buffer_ops = {
+ .preenable = fxls8962af_buffer_preenable,
+ .postenable = fxls8962af_buffer_postenable,
+ .predisable = fxls8962af_buffer_predisable,
+ .postdisable = fxls8962af_buffer_postdisable,
+};
+
+static int fxls8962af_i2c_raw_read_errata3(struct fxls8962af_data *data,
+ u16 *buffer, int samples,
+ int sample_length)
+{
+ int i, ret;
+
+ for (i = 0; i < samples; i++) {
+ ret = regmap_raw_read(data->regmap, FXLS8962AF_BUF_X_LSB,
+ &buffer[i * 3], sample_length);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fxls8962af_fifo_transfer(struct fxls8962af_data *data,
+ u16 *buffer, int samples)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int sample_length = 3 * sizeof(*buffer);
+ int total_length = samples * sample_length;
+ int ret;
+
+ if (i2c_verify_client(dev))
+ /*
+ * Due to errata bug:
+ * E3: FIFO burst read operation error using I2C interface
+ * We have to avoid burst reads on I2C..
+ */
+ ret = fxls8962af_i2c_raw_read_errata3(data, buffer, samples,
+ sample_length);
+ else
+ ret = regmap_raw_read(data->regmap, FXLS8962AF_BUF_X_LSB, buffer,
+ total_length);
+
+ if (ret)
+ dev_err(dev, "Error transferring data from fifo: %d\n", ret);
+
+ return ret;
+}
+
+static int fxls8962af_fifo_flush(struct iio_dev *indio_dev)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+ u16 buffer[FXLS8962AF_FIFO_LENGTH * 3];
+ uint64_t sample_period;
+ unsigned int reg;
+ int64_t tstamp;
+ int ret, i;
+ u8 count;
+
+ ret = regmap_read(data->regmap, FXLS8962AF_BUF_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ if (reg & FXLS8962AF_BUF_STATUS_BUF_OVF) {
+ dev_err(dev, "Buffer overflow");
+ return -EOVERFLOW;
+ }
+
+ count = reg & FXLS8962AF_BUF_STATUS_BUF_CNT;
+ if (!count)
+ return 0;
+
+ data->old_timestamp = data->timestamp;
+ data->timestamp = iio_get_time_ns(indio_dev);
+
+ /*
+ * Approximate timestamps for each of the sample based on the sampling,
+ * frequency, timestamp for last sample and number of samples.
+ */
+ sample_period = (data->timestamp - data->old_timestamp);
+ do_div(sample_period, count);
+ tstamp = data->timestamp - (count - 1) * sample_period;
+
+ ret = fxls8962af_fifo_transfer(data, buffer, count);
+ if (ret)
+ return ret;
+
+ /* Demux hw FIFO into kfifo. */
+ for (i = 0; i < count; i++) {
+ int j, bit;
+
+ j = 0;
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit],
+ sizeof(data->scan.channels[0]));
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ tstamp);
+
+ tstamp += sample_period;
+ }
+
+ return count;
+}
+
+static irqreturn_t fxls8962af_interrupt(int irq, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(data->regmap, FXLS8962AF_INT_STATUS, &reg);
+ if (ret)
+ return IRQ_NONE;
+
+ if (reg & FXLS8962AF_INT_STATUS_SRC_BUF) {
+ ret = fxls8962af_fifo_flush(indio_dev);
+ if (ret)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void fxls8962af_regulator_disable(void *data_ptr)
+{
+ struct fxls8962af_data *data = data_ptr;
+
+ regulator_disable(data->vdd_reg);
+}
+
+static void fxls8962af_pm_disable(void *dev_ptr)
+{
+ struct device *dev = dev_ptr;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+
+ fxls8962af_standby(iio_priv(indio_dev));
+}
+
+static void fxls8962af_get_irq(struct device_node *of_node,
+ enum fxls8962af_int_pin *pin)
+{
+ int irq;
+
+ irq = of_irq_get_byname(of_node, "INT2");
+ if (irq > 0) {
+ *pin = FXLS8962AF_PIN_INT2;
+ return;
+ }
+
+ *pin = FXLS8962AF_PIN_INT1;
+}
+
+static int fxls8962af_irq_setup(struct iio_dev *indio_dev, int irq)
+{
+ struct fxls8962af_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned long irq_type;
+ bool irq_active_high;
+ enum fxls8962af_int_pin int_pin;
+ u8 int_pin_sel;
+ int ret;
+
+ fxls8962af_get_irq(dev->of_node, &int_pin);
+ switch (int_pin) {
+ case FXLS8962AF_PIN_INT1:
+ int_pin_sel = FXLS8962AF_INT_PIN_SEL_INT1;
+ break;
+ case FXLS8962AF_PIN_INT2:
+ int_pin_sel = FXLS8962AF_INT_PIN_SEL_INT2;
+ break;
+ default:
+ dev_err(dev, "unsupported int pin selected\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, FXLS8962AF_INT_PIN_SEL,
+ FXLS8962AF_INT_PIN_SEL_MASK, int_pin_sel);
+ if (ret)
+ return ret;
+
+ irq_type = irqd_get_trigger_type(irq_get_irq_data(irq));
+
+ switch (irq_type) {
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ irq_active_high = true;
+ break;
+ case IRQF_TRIGGER_LOW:
+ case IRQF_TRIGGER_FALLING:
+ irq_active_high = false;
+ break;
+ default:
+ dev_info(dev, "mode %lx unsupported\n", irq_type);
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, FXLS8962AF_SENS_CONFIG4,
+ FXLS8962AF_SC4_INT_POL_MASK,
+ FXLS8962AF_SC4_INT_POL_PREP(irq_active_high));
+ if (ret)
+ return ret;
+
+ if (device_property_read_bool(dev, "drive-open-drain")) {
+ ret = regmap_update_bits(data->regmap, FXLS8962AF_SENS_CONFIG4,
+ FXLS8962AF_SC4_INT_PP_OD_MASK,
+ FXLS8962AF_SC4_INT_PP_OD_PREP(1));
+ if (ret)
+ return ret;
+
+ irq_type |= IRQF_SHARED;
+ }
+
+ return devm_request_threaded_irq(dev,
+ irq,
+ NULL, fxls8962af_interrupt,
+ irq_type | IRQF_ONESHOT,
+ indio_dev->name, indio_dev);
+}
+
+int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq)
+{
+ struct fxls8962af_data *data;
+ struct iio_dev *indio_dev;
+ unsigned int reg;
+ int ret, i;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+ data->regmap = regmap;
+
+ ret = iio_read_mount_matrix(dev, &data->orientation);
+ if (ret)
+ return ret;
+
+ data->vdd_reg = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(data->vdd_reg))
+ return dev_err_probe(dev, PTR_ERR(data->vdd_reg),
+ "Failed to get vdd regulator\n");
+
+ ret = regulator_enable(data->vdd_reg);
+ if (ret) {
+ dev_err(dev, "Failed to enable vdd regulator: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, fxls8962af_regulator_disable, data);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(data->regmap, FXLS8962AF_WHO_AM_I, &reg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(fxls_chip_info_table); i++) {
+ if (fxls_chip_info_table[i].chip_id == reg) {
+ data->chip_info = &fxls_chip_info_table[i];
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(fxls_chip_info_table)) {
+ dev_err(dev, "failed to match device in table\n");
+ return -ENXIO;
+ }
+
+ indio_dev->channels = data->chip_info->channels;
+ indio_dev->num_channels = data->chip_info->num_channels;
+ indio_dev->name = data->chip_info->name;
+ indio_dev->info = &fxls8962af_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = fxls8962af_reset(data);
+ if (ret)
+ return ret;
+
+ if (irq) {
+ ret = fxls8962af_irq_setup(indio_dev, irq);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
+ INDIO_BUFFER_SOFTWARE,
+ &fxls8962af_buffer_ops);
+ if (ret)
+ return ret;
+ }
+
+ ret = pm_runtime_set_active(dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, FXLS8962AF_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+
+ ret = devm_add_action_or_reset(dev, fxls8962af_pm_disable, dev);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_GPL(fxls8962af_core_probe);
+
+static int __maybe_unused fxls8962af_runtime_suspend(struct device *dev)
+{
+ struct fxls8962af_data *data = iio_priv(dev_get_drvdata(dev));
+ int ret;
+
+ ret = fxls8962af_standby(data);
+ if (ret) {
+ dev_err(dev, "powering off device failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused fxls8962af_runtime_resume(struct device *dev)
+{
+ struct fxls8962af_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return fxls8962af_active(data);
+}
+
+const struct dev_pm_ops fxls8962af_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(fxls8962af_runtime_suspend,
+ fxls8962af_runtime_resume, NULL)
+};
+EXPORT_SYMBOL_GPL(fxls8962af_pm_ops);
+
+MODULE_AUTHOR("Sean Nyekjaer <sean@geanix.com>");
+MODULE_DESCRIPTION("NXP FXLS8962AF/FXLS8964AF accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c
new file mode 100644
index 000000000000..cfb004b20455
--- /dev/null
+++ b/drivers/iio/accel/fxls8962af-i2c.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver
+ *
+ * Copyright 2021 Connected Cars A/S
+ */
+
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "fxls8962af.h"
+
+static int fxls8962af_probe(struct i2c_client *client)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &fxls8962af_regmap_conf);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to initialize i2c regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ return fxls8962af_core_probe(&client->dev, regmap, client->irq);
+}
+
+static const struct i2c_device_id fxls8962af_id[] = {
+ { "fxls8962af", fxls8962af },
+ { "fxls8964af", fxls8964af },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, fxls8962af_id);
+
+static const struct of_device_id fxls8962af_of_match[] = {
+ { .compatible = "nxp,fxls8962af" },
+ { .compatible = "nxp,fxls8964af" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fxls8962af_of_match);
+
+static struct i2c_driver fxls8962af_driver = {
+ .driver = {
+ .name = "fxls8962af_i2c",
+ .of_match_table = fxls8962af_of_match,
+ .pm = &fxls8962af_pm_ops,
+ },
+ .probe_new = fxls8962af_probe,
+ .id_table = fxls8962af_id,
+};
+module_i2c_driver(fxls8962af_driver);
+
+MODULE_AUTHOR("Sean Nyekjaer <sean@geanix.com>");
+MODULE_DESCRIPTION("NXP FXLS8962AF/FXLS8964AF accelerometer i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/fxls8962af-spi.c b/drivers/iio/accel/fxls8962af-spi.c
new file mode 100644
index 000000000000..57108d3d480b
--- /dev/null
+++ b/drivers/iio/accel/fxls8962af-spi.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NXP FXLS8962AF/FXLS8964AF Accelerometer SPI Driver
+ *
+ * Copyright 2021 Connected Cars A/S
+ */
+
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+
+#include "fxls8962af.h"
+
+static int fxls8962af_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &fxls8962af_regmap_conf);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to initialize spi regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ return fxls8962af_core_probe(&spi->dev, regmap, spi->irq);
+}
+
+static const struct of_device_id fxls8962af_spi_of_match[] = {
+ { .compatible = "nxp,fxls8962af" },
+ { .compatible = "nxp,fxls8964af" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fxls8962af_spi_of_match);
+
+static const struct spi_device_id fxls8962af_spi_id_table[] = {
+ { "fxls8962af", fxls8962af },
+ { "fxls8964af", fxls8964af },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, fxls8962af_spi_id_table);
+
+static struct spi_driver fxls8962af_driver = {
+ .driver = {
+ .name = "fxls8962af_spi",
+ .pm = &fxls8962af_pm_ops,
+ .of_match_table = fxls8962af_spi_of_match,
+ },
+ .probe = fxls8962af_probe,
+ .id_table = fxls8962af_spi_id_table,
+};
+module_spi_driver(fxls8962af_driver);
+
+MODULE_AUTHOR("Sean Nyekjaer <sean@geanix.com>");
+MODULE_DESCRIPTION("NXP FXLS8962AF/FXLS8964AF accelerometer spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h
new file mode 100644
index 000000000000..b67572c3ef06
--- /dev/null
+++ b/drivers/iio/accel/fxls8962af.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2021 Connected Cars A/S
+ */
+#ifndef _FXLS8962AF_H_
+#define _FXLS8962AF_H_
+
+struct regmap;
+struct device;
+
+enum {
+ fxls8962af,
+ fxls8964af,
+};
+
+int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq);
+int fxls8962af_core_remove(struct device *dev);
+
+extern const struct dev_pm_ops fxls8962af_pm_ops;
+extern const struct regmap_config fxls8962af_regmap_conf;
+
+#endif /* _FXLS8962AF_H_ */
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 2f9465cb382f..55cdca818b3b 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -6,13 +6,10 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
-#include <linux/delay.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
@@ -28,8 +25,11 @@ struct accel_3d_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info accel[ACCEL_3D_CHANNEL_MAX];
- /* Reserve for 3 channels + padding + timestamp */
- u32 accel_val[ACCEL_3D_CHANNEL_MAX + 3];
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u32 accel_val[3];
+ s64 timestamp __aligned(8);
+ } scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
@@ -245,8 +245,8 @@ static int accel_3d_proc_event(struct hid_sensor_hub_device *hsdev,
accel_state->timestamp = iio_get_time_ns(indio_dev);
hid_sensor_push_data(indio_dev,
- accel_state->accel_val,
- sizeof(accel_state->accel_val),
+ &accel_state->scan,
+ sizeof(accel_state->scan),
accel_state->timestamp);
accel_state->timestamp = 0;
@@ -271,7 +271,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
case HID_USAGE_SENSOR_ACCEL_Y_AXIS:
case HID_USAGE_SENSOR_ACCEL_Z_AXIS:
offset = usage_id - HID_USAGE_SENSOR_ACCEL_X_AXIS;
- accel_state->accel_val[CHANNEL_SCAN_INDEX_X + offset] =
+ accel_state->scan.accel_val[CHANNEL_SCAN_INDEX_X + offset] =
*(u32 *)raw_data;
ret = 0;
break;
@@ -462,3 +462,4 @@ module_platform_driver(hid_accel_3d_platform_driver);
MODULE_DESCRIPTION("HID Sensor Accel 3D");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HID);
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index ff724bc17a45..a51fdd3c9b5b 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -51,13 +51,15 @@
#define KXTF9_REG_TILT_POS_CUR 0x10
#define KXTF9_REG_TILT_POS_PREV 0x11
#define KXTF9_REG_INT_SRC1 0x15
-#define KXCJK1013_REG_INT_SRC1 0x16 /* compatible, but called INT_SRC2 in KXTF9 ds */
+#define KXTF9_REG_INT_SRC2 0x16
+#define KXCJK1013_REG_INT_SRC1 0x16
#define KXCJK1013_REG_INT_SRC2 0x17
#define KXCJK1013_REG_STATUS_REG 0x18
#define KXCJK1013_REG_INT_REL 0x1A
#define KXCJK1013_REG_CTRL1 0x1B
#define KXTF9_REG_CTRL2 0x1C
-#define KXCJK1013_REG_CTRL2 0x1D /* mostly compatible, CTRL_REG3 in KTXF9 ds */
+#define KXTF9_REG_CTRL3 0x1D
+#define KXCJK1013_REG_CTRL2 0x1D
#define KXCJK1013_REG_INT_CTRL1 0x1E
#define KXCJK1013_REG_INT_CTRL2 0x1F
#define KXTF9_REG_INT_CTRL3 0x20
@@ -77,6 +79,45 @@
#define KXTF9_REG_HYST_SET 0x5F
#define KXCJK1013_REG_WAKE_THRES 0x6A
+/* Everything up to 0x11 is equal to KXCJK1013/KXTF9 above */
+#define KX023_REG_INS1 0x12
+#define KX023_REG_INS2 0x13
+#define KX023_REG_INS3 0x14
+#define KX023_REG_STAT 0x15
+#define KX023_REG_INT_REL 0x17
+#define KX023_REG_CNTL1 0x18
+#define KX023_REG_CNTL2 0x19
+#define KX023_REG_CNTL3 0x1A
+#define KX023_REG_ODCNTL 0x1B
+#define KX023_REG_INC1 0x1C
+#define KX023_REG_INC2 0x1D
+#define KX023_REG_INC3 0x1E
+#define KX023_REG_INC4 0x1F
+#define KX023_REG_INC5 0x20
+#define KX023_REG_INC6 0x21
+#define KX023_REG_TILT_TIMER 0x22
+#define KX023_REG_WUFC 0x23
+#define KX023_REG_TDTRC 0x24
+#define KX023_REG_TDTC 0x25
+#define KX023_REG_TTH 0x26
+#define KX023_REG_TTL 0x27
+#define KX023_REG_FTD 0x28
+#define KX023_REG_STD 0x29
+#define KX023_REG_TLT 0x2A
+#define KX023_REG_TWS 0x2B
+#define KX023_REG_ATH 0x30
+#define KX023_REG_TILT_ANGLE_LL 0x32
+#define KX023_REG_TILT_ANGLE_HL 0x33
+#define KX023_REG_HYST_SET 0x34
+#define KX023_REG_LP_CNTL 0x35
+#define KX023_REG_BUF_CNTL1 0x3A
+#define KX023_REG_BUF_CNTL2 0x3B
+#define KX023_REG_BUF_STATUS_1 0x3C
+#define KX023_REG_BUF_STATUS_2 0x3D
+#define KX023_REG_BUF_CLEAR 0x3E
+#define KX023_REG_BUF_READ 0x3F
+#define KX023_REG_SELF_TEST 0x60
+
#define KXCJK1013_REG_CTRL1_BIT_PC1 BIT(7)
#define KXCJK1013_REG_CTRL1_BIT_RES BIT(6)
#define KXCJK1013_REG_CTRL1_BIT_DRDY BIT(5)
@@ -117,6 +158,14 @@
#define KXCJK1013_REG_INT_SRC2_BIT_XP BIT(4)
#define KXCJK1013_REG_INT_SRC2_BIT_XN BIT(5)
+/* KX023 interrupt routing to INT1. INT2 can be configured with INC6 */
+#define KX023_REG_INC4_BFI1 BIT(6)
+#define KX023_REG_INC4_WMI1 BIT(5)
+#define KX023_REG_INC4_DRDY1 BIT(4)
+#define KX023_REG_INC4_TDTI1 BIT(2)
+#define KX023_REG_INC4_WUFI1 BIT(1)
+#define KX023_REG_INC4_TPI1 BIT(0)
+
#define KXCJK1013_DEFAULT_WAKE_THRES 1
enum kx_chipset {
@@ -124,6 +173,7 @@ enum kx_chipset {
KXCJ91008,
KXTJ21009,
KXTF9,
+ KX0231025,
KX_MAX_CHIPS /* this must be last */
};
@@ -133,6 +183,63 @@ enum kx_acpi_type {
ACPI_KIOX010A,
};
+struct kx_chipset_regs {
+ u8 int_src1;
+ u8 int_src2;
+ u8 int_rel;
+ u8 ctrl1;
+ u8 wuf_ctrl;
+ u8 int_ctrl1;
+ u8 data_ctrl;
+ u8 wake_timer;
+ u8 wake_thres;
+};
+
+static const struct kx_chipset_regs kxcjk1013_regs = {
+ .int_src1 = KXCJK1013_REG_INT_SRC1,
+ .int_src2 = KXCJK1013_REG_INT_SRC2,
+ .int_rel = KXCJK1013_REG_INT_REL,
+ .ctrl1 = KXCJK1013_REG_CTRL1,
+ .wuf_ctrl = KXCJK1013_REG_CTRL2,
+ .int_ctrl1 = KXCJK1013_REG_INT_CTRL1,
+ .data_ctrl = KXCJK1013_REG_DATA_CTRL,
+ .wake_timer = KXCJK1013_REG_WAKE_TIMER,
+ .wake_thres = KXCJK1013_REG_WAKE_THRES,
+};
+
+static const struct kx_chipset_regs kxtf9_regs = {
+ /* .int_src1 was moved to INT_SRC2 on KXTF9 */
+ .int_src1 = KXTF9_REG_INT_SRC2,
+ /* .int_src2 is not available */
+ .int_rel = KXCJK1013_REG_INT_REL,
+ .ctrl1 = KXCJK1013_REG_CTRL1,
+ .wuf_ctrl = KXTF9_REG_CTRL3,
+ .int_ctrl1 = KXCJK1013_REG_INT_CTRL1,
+ .data_ctrl = KXCJK1013_REG_DATA_CTRL,
+ .wake_timer = KXCJK1013_REG_WAKE_TIMER,
+ .wake_thres = KXTF9_REG_WAKE_THRESH,
+};
+
+/* The registers have totally different names but the bits are compatible */
+static const struct kx_chipset_regs kx0231025_regs = {
+ .int_src1 = KX023_REG_INS2,
+ .int_src2 = KX023_REG_INS3,
+ .int_rel = KX023_REG_INT_REL,
+ .ctrl1 = KX023_REG_CNTL1,
+ .wuf_ctrl = KX023_REG_CNTL3,
+ .int_ctrl1 = KX023_REG_INC1,
+ .data_ctrl = KX023_REG_ODCNTL,
+ .wake_timer = KX023_REG_WUFC,
+ .wake_thres = KX023_REG_ATH,
+};
+
+enum kxcjk1013_axis {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z,
+ AXIS_MAX
+};
+
struct kxcjk1013_data {
struct regulator_bulk_data regulators[2];
struct i2c_client *client;
@@ -140,7 +247,11 @@ struct kxcjk1013_data {
struct iio_trigger *motion_trig;
struct iio_mount_matrix orientation;
struct mutex mutex;
- s16 buffer[8];
+ /* Ensure timestamp naturally aligned */
+ struct {
+ s16 chans[AXIS_MAX];
+ s64 timestamp __aligned(8);
+ } scan;
u8 odr_bits;
u8 range;
int wake_thres;
@@ -152,13 +263,7 @@ struct kxcjk1013_data {
int64_t timestamp;
enum kx_chipset chipset;
enum kx_acpi_type acpi_type;
-};
-
-enum kxcjk1013_axis {
- AXIS_X,
- AXIS_Y,
- AXIS_Z,
- AXIS_MAX,
+ const struct kx_chipset_regs *regs;
};
enum kxcjk1013_mode {
@@ -268,6 +373,22 @@ static const struct {
{0x05, 5100},
{0x06, 2700},
},
+ /* KX023-1025 */
+ {
+ /* First 4 are not in datasheet, taken from KXCTJ2-1009 */
+ {0x08, 1240000},
+ {0x09, 621000},
+ {0x0A, 309000},
+ {0x0B, 151000},
+ {0, 81000},
+ {0x01, 40000},
+ {0x02, 22000},
+ {0x03, 12000},
+ {0x04, 7000},
+ {0x05, 4400},
+ {0x06, 3000},
+ {0x07, 3000},
+ },
};
static const struct {
@@ -309,7 +430,7 @@ static int kxcjk1013_set_mode(struct kxcjk1013_data *data,
{
int ret;
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_CTRL1);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->ctrl1);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
return ret;
@@ -320,8 +441,7 @@ static int kxcjk1013_set_mode(struct kxcjk1013_data *data,
else
ret |= KXCJK1013_REG_CTRL1_BIT_PC1;
- ret = i2c_smbus_write_byte_data(data->client,
- KXCJK1013_REG_CTRL1, ret);
+ ret = i2c_smbus_write_byte_data(data->client, data->regs->ctrl1, ret);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
return ret;
@@ -335,7 +455,7 @@ static int kxcjk1013_get_mode(struct kxcjk1013_data *data,
{
int ret;
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_CTRL1);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->ctrl1);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
return ret;
@@ -353,7 +473,7 @@ static int kxcjk1013_set_range(struct kxcjk1013_data *data, int range_index)
{
int ret;
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_CTRL1);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->ctrl1);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
return ret;
@@ -364,9 +484,7 @@ static int kxcjk1013_set_range(struct kxcjk1013_data *data, int range_index)
ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3);
ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4);
- ret = i2c_smbus_write_byte_data(data->client,
- KXCJK1013_REG_CTRL1,
- ret);
+ ret = i2c_smbus_write_byte_data(data->client, data->regs->ctrl1, ret);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
return ret;
@@ -400,7 +518,7 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
if (ret < 0)
return ret;
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_CTRL1);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->ctrl1);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
return ret;
@@ -409,8 +527,7 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
/* Set 12 bit mode */
ret |= KXCJK1013_REG_CTRL1_BIT_RES;
- ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_CTRL1,
- ret);
+ ret = i2c_smbus_write_byte_data(data->client, data->regs->ctrl1, ret);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_ctrl\n");
return ret;
@@ -421,7 +538,7 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
if (ret < 0)
return ret;
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_DATA_CTRL);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->data_ctrl);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_data_ctrl\n");
return ret;
@@ -430,7 +547,7 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
data->odr_bits = ret;
/* Set up INT polarity */
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_INT_CTRL1);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->int_ctrl1);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_int_ctrl1\n");
return ret;
@@ -441,13 +558,23 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
else
ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEA;
- ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_INT_CTRL1,
- ret);
+ ret = i2c_smbus_write_byte_data(data->client, data->regs->int_ctrl1, ret);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_int_ctrl1\n");
return ret;
}
+ /* On KX023, route all used interrupts to INT1 for now */
+ if (data->chipset == KX0231025 && data->client->irq > 0) {
+ ret = i2c_smbus_write_byte_data(data->client, KX023_REG_INC4,
+ KX023_REG_INC4_DRDY1 |
+ KX023_REG_INC4_WUFI1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_inc4\n");
+ return ret;
+ }
+ }
+
ret = kxcjk1013_set_mode(data, OPERATION);
if (ret < 0)
return ret;
@@ -478,7 +605,7 @@ static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
int ret;
if (on)
- ret = pm_runtime_get_sync(&data->client->dev);
+ ret = pm_runtime_resume_and_get(&data->client->dev);
else {
pm_runtime_mark_last_busy(&data->client->dev);
ret = pm_runtime_put_autosuspend(&data->client->dev);
@@ -486,8 +613,6 @@ static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
if (ret < 0) {
dev_err(&data->client->dev,
"Failed: %s for %d\n", __func__, on);
- if (on)
- pm_runtime_put_noidle(&data->client->dev);
return ret;
}
#endif
@@ -497,10 +622,9 @@ static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
static int kxcjk1013_chip_update_thresholds(struct kxcjk1013_data *data)
{
- int waketh_reg, ret;
+ int ret;
- ret = i2c_smbus_write_byte_data(data->client,
- KXCJK1013_REG_WAKE_TIMER,
+ ret = i2c_smbus_write_byte_data(data->client, data->regs->wake_timer,
data->wake_dur);
if (ret < 0) {
dev_err(&data->client->dev,
@@ -508,9 +632,7 @@ static int kxcjk1013_chip_update_thresholds(struct kxcjk1013_data *data)
return ret;
}
- waketh_reg = data->chipset == KXTF9 ?
- KXTF9_REG_WAKE_THRESH : KXCJK1013_REG_WAKE_THRES;
- ret = i2c_smbus_write_byte_data(data->client, waketh_reg,
+ ret = i2c_smbus_write_byte_data(data->client, data->regs->wake_thres,
data->wake_thres);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_wake_thres\n");
@@ -539,7 +661,7 @@ static int kxcjk1013_setup_any_motion_interrupt(struct kxcjk1013_data *data,
if (ret < 0)
return ret;
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_INT_CTRL1);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->int_ctrl1);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_int_ctrl1\n");
return ret;
@@ -550,14 +672,13 @@ static int kxcjk1013_setup_any_motion_interrupt(struct kxcjk1013_data *data,
else
ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEN;
- ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_INT_CTRL1,
- ret);
+ ret = i2c_smbus_write_byte_data(data->client, data->regs->int_ctrl1, ret);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_int_ctrl1\n");
return ret;
}
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_CTRL1);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->ctrl1);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
return ret;
@@ -568,8 +689,7 @@ static int kxcjk1013_setup_any_motion_interrupt(struct kxcjk1013_data *data,
else
ret &= ~KXCJK1013_REG_CTRL1_BIT_WUFE;
- ret = i2c_smbus_write_byte_data(data->client,
- KXCJK1013_REG_CTRL1, ret);
+ ret = i2c_smbus_write_byte_data(data->client, data->regs->ctrl1, ret);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
return ret;
@@ -599,7 +719,7 @@ static int kxcjk1013_setup_new_data_interrupt(struct kxcjk1013_data *data,
if (ret < 0)
return ret;
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_INT_CTRL1);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->int_ctrl1);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_int_ctrl1\n");
return ret;
@@ -610,14 +730,13 @@ static int kxcjk1013_setup_new_data_interrupt(struct kxcjk1013_data *data,
else
ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEN;
- ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_INT_CTRL1,
- ret);
+ ret = i2c_smbus_write_byte_data(data->client, data->regs->int_ctrl1, ret);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_int_ctrl1\n");
return ret;
}
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_CTRL1);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->ctrl1);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
return ret;
@@ -628,8 +747,7 @@ static int kxcjk1013_setup_new_data_interrupt(struct kxcjk1013_data *data,
else
ret &= ~KXCJK1013_REG_CTRL1_BIT_DRDY;
- ret = i2c_smbus_write_byte_data(data->client,
- KXCJK1013_REG_CTRL1, ret);
+ ret = i2c_smbus_write_byte_data(data->client, data->regs->ctrl1, ret);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
return ret;
@@ -701,7 +819,7 @@ static int kxcjk1013_set_odr(struct kxcjk1013_data *data, int val, int val2)
if (ret < 0)
return ret;
- ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_DATA_CTRL,
+ ret = i2c_smbus_write_byte_data(data->client, data->regs->data_ctrl,
odr_setting->odr_bits);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing data_ctrl\n");
@@ -710,7 +828,7 @@ static int kxcjk1013_set_odr(struct kxcjk1013_data *data, int val, int val2)
data->odr_bits = odr_setting->odr_bits;
- ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_CTRL2,
+ ret = i2c_smbus_write_byte_data(data->client, data->regs->wuf_ctrl,
odr_setting->wuf_bits);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_ctrl2\n");
@@ -1094,12 +1212,12 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
ret = i2c_smbus_read_i2c_block_data_or_emulated(data->client,
KXCJK1013_REG_XOUT_L,
AXIS_MAX * 2,
- (u8 *)data->buffer);
+ (u8 *)data->scan.chans);
mutex_unlock(&data->mutex);
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
data->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -1113,7 +1231,7 @@ static void kxcjk1013_trig_reen(struct iio_trigger *trig)
struct kxcjk1013_data *data = iio_priv(indio_dev);
int ret;
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_INT_REL);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->int_rel);
if (ret < 0)
dev_err(&data->client->dev, "Error reading reg_int_rel\n");
}
@@ -1166,8 +1284,7 @@ static void kxcjk1013_report_motion_event(struct iio_dev *indio_dev)
{
struct kxcjk1013_data *data = iio_priv(indio_dev);
- int ret = i2c_smbus_read_byte_data(data->client,
- KXCJK1013_REG_INT_SRC2);
+ int ret = i2c_smbus_read_byte_data(data->client, data->regs->int_src2);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_int_src2\n");
return;
@@ -1234,7 +1351,7 @@ static irqreturn_t kxcjk1013_event_handler(int irq, void *private)
struct kxcjk1013_data *data = iio_priv(indio_dev);
int ret;
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_INT_SRC1);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->int_src1);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_int_src1\n");
goto ack_intr;
@@ -1257,7 +1374,7 @@ ack_intr:
if (data->dready_trigger_on)
return IRQ_HANDLED;
- ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_INT_REL);
+ ret = i2c_smbus_read_byte_data(data->client, data->regs->int_rel);
if (ret < 0)
dev_err(&data->client->dev, "Error reading reg_int_rel\n");
@@ -1338,8 +1455,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
} else {
data->active_high_intr = true; /* default polarity */
- ret = iio_read_mount_matrix(&client->dev, "mount-matrix",
- &data->orientation);
+ ret = iio_read_mount_matrix(&client->dev, &data->orientation);
if (ret)
return ret;
}
@@ -1378,6 +1494,22 @@ static int kxcjk1013_probe(struct i2c_client *client,
} else
return -ENODEV;
+ switch (data->chipset) {
+ case KXCJK1013:
+ case KXCJ91008:
+ case KXTJ21009:
+ data->regs = &kxcjk1013_regs;
+ break;
+ case KXTF9:
+ data->regs = &kxtf9_regs;
+ break;
+ case KX0231025:
+ data->regs = &kx0231025_regs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
ret = kxcjk1013_chip_init(data);
if (ret < 0)
return ret;
@@ -1404,7 +1536,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
data->dready_trig = devm_iio_trigger_alloc(&client->dev,
"%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->dready_trig) {
ret = -ENOMEM;
goto err_poweroff;
@@ -1413,7 +1545,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
data->motion_trig = devm_iio_trigger_alloc(&client->dev,
"%s-any-motion-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->motion_trig) {
ret = -ENOMEM;
goto err_poweroff;
@@ -1485,7 +1617,6 @@ static int kxcjk1013_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
if (data->dready_trig) {
iio_triggered_buffer_cleanup(indio_dev);
@@ -1593,6 +1724,7 @@ static const struct i2c_device_id kxcjk1013_id[] = {
{"kxcj91008", KXCJ91008},
{"kxtj21009", KXTJ21009},
{"kxtf9", KXTF9},
+ {"kx023-1025", KX0231025},
{"SMO8500", KXCJ91008},
{}
};
@@ -1604,6 +1736,7 @@ static const struct of_device_id kxcjk1013_of_match[] = {
{ .compatible = "kionix,kxcj91008", },
{ .compatible = "kionix,kxtj21009", },
{ .compatible = "kionix,kxtf9", },
+ { .compatible = "kionix,kx023-1025", },
{ }
};
MODULE_DEVICE_TABLE(of, kxcjk1013_of_match);
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 0e18b92e2099..bf7ed9e7d00f 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -420,7 +420,7 @@ int kxsd9_common_probe(struct device *dev,
indio_dev->available_scan_masks = kxsd9_scan_masks;
/* Read the mounting matrix, if present */
- ret = iio_read_mount_matrix(dev, "mount-matrix", &st->orientation);
+ ret = iio_read_mount_matrix(dev, &st->orientation);
if (ret)
return ret;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 4d307dfb9169..715b8138fb71 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -221,7 +221,7 @@ static int mma8452_set_runtime_pm_state(struct i2c_client *client, bool on)
int ret;
if (on) {
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
} else {
pm_runtime_mark_last_busy(&client->dev);
ret = pm_runtime_put_autosuspend(&client->dev);
@@ -230,8 +230,6 @@ static int mma8452_set_runtime_pm_state(struct i2c_client *client, bool on)
if (ret < 0) {
dev_err(&client->dev,
"failed to change power state to %d\n", on);
- if (on)
- pm_runtime_put_noidle(&client->dev);
return ret;
}
@@ -1461,7 +1459,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev)
trig = devm_iio_trigger_alloc(&data->client->dev, "%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!trig)
return -ENOMEM;
@@ -1711,7 +1709,6 @@ static int mma8452_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
iio_triggered_buffer_cleanup(indio_dev);
mma8452_trigger_cleanup(indio_dev);
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index 08a2303cc9df..4c359fb05480 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -515,7 +515,6 @@ static int mma9551_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
mutex_lock(&data->mutex);
mma9551_set_device_state(data->client, false);
diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c
index 666e7a04a7d7..fbf2e2c45678 100644
--- a/drivers/iio/accel/mma9551_core.c
+++ b/drivers/iio/accel/mma9551_core.c
@@ -664,7 +664,7 @@ int mma9551_set_power_state(struct i2c_client *client, bool on)
int ret;
if (on)
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
else {
pm_runtime_mark_last_busy(&client->dev);
ret = pm_runtime_put_autosuspend(&client->dev);
@@ -673,8 +673,6 @@ int mma9551_set_power_state(struct i2c_client *client, bool on)
if (ret < 0) {
dev_err(&client->dev,
"failed to change power state to %d\n", on);
- if (on)
- pm_runtime_put_noidle(&client->dev);
return ret;
}
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index c15908faa381..ba3ecb3b57dc 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -1154,7 +1154,6 @@ static int mma9553_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
mutex_lock(&data->mutex);
mma9551_set_device_state(data->client, false);
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index fb3cbaa62bd8..b3afbf064915 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -56,7 +56,11 @@ struct mxc4005_data {
struct mutex mutex;
struct regmap *regmap;
struct iio_trigger *dready_trig;
- __be16 buffer[8];
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ __be16 chans[3];
+ s64 timestamp __aligned(8);
+ } scan;
bool trigger_enabled;
};
@@ -135,7 +139,7 @@ static int mxc4005_read_xyz(struct mxc4005_data *data)
int ret;
ret = regmap_bulk_read(data->regmap, MXC4005_REG_XOUT_UPPER,
- data->buffer, sizeof(data->buffer));
+ data->scan.chans, sizeof(data->scan.chans));
if (ret < 0) {
dev_err(data->dev, "failed to read axes\n");
return ret;
@@ -301,7 +305,7 @@ static irqreturn_t mxc4005_trigger_handler(int irq, void *private)
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
pf->timestamp);
err:
@@ -433,7 +437,7 @@ static int mxc4005_probe(struct i2c_client *client,
data->dready_trig = devm_iio_trigger_alloc(&client->dev,
"%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->dready_trig)
return -ENOMEM;
diff --git a/drivers/iio/accel/sca3300.c b/drivers/iio/accel/sca3300.c
new file mode 100644
index 000000000000..f7ef8ecfd34a
--- /dev/null
+++ b/drivers/iio/accel/sca3300.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Murata SCA3300 3-axis industrial accelerometer
+ *
+ * Copyright (c) 2021 Vaisala Oyj. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define SCA3300_ALIAS "sca3300"
+
+#define SCA3300_CRC8_POLYNOMIAL 0x1d
+
+/* Device mode register */
+#define SCA3300_REG_MODE 0xd
+#define SCA3300_MODE_SW_RESET 0x20
+
+/* Last register in map */
+#define SCA3300_REG_SELBANK 0x1f
+
+/* Device status and mask */
+#define SCA3300_REG_STATUS 0x6
+#define SCA3300_STATUS_MASK GENMASK(8, 0)
+
+/* Device ID */
+#define SCA3300_REG_WHOAMI 0x10
+#define SCA3300_WHOAMI_ID 0x51
+
+/* Device return status and mask */
+#define SCA3300_VALUE_RS_ERROR 0x3
+#define SCA3300_MASK_RS_STATUS GENMASK(1, 0)
+
+enum sca3300_scan_indexes {
+ SCA3300_ACC_X = 0,
+ SCA3300_ACC_Y,
+ SCA3300_ACC_Z,
+ SCA3300_TEMP,
+ SCA3300_TIMESTAMP,
+};
+
+#define SCA3300_ACCEL_CHANNEL(index, reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+static const struct iio_chan_spec sca3300_channels[] = {
+ SCA3300_ACCEL_CHANNEL(SCA3300_ACC_X, 0x1, X),
+ SCA3300_ACCEL_CHANNEL(SCA3300_ACC_Y, 0x2, Y),
+ SCA3300_ACCEL_CHANNEL(SCA3300_ACC_Z, 0x3, Z),
+ {
+ .type = IIO_TEMP,
+ .address = 0x5,
+ .scan_index = SCA3300_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const int sca3300_lp_freq[] = {70, 70, 70, 10};
+static const int sca3300_accel_scale[][2] = {{0, 370}, {0, 741}, {0, 185}, {0, 185}};
+
+static const unsigned long sca3300_scan_masks[] = {
+ BIT(SCA3300_ACC_X) | BIT(SCA3300_ACC_Y) | BIT(SCA3300_ACC_Z) |
+ BIT(SCA3300_TEMP),
+ 0
+};
+
+/**
+ * struct sca3300_data - device data
+ * @spi: SPI device structure
+ * @lock: Data buffer lock
+ * @scan: Triggered buffer. Four channel 16-bit data + 64-bit timestamp
+ * @txbuf: Transmit buffer
+ * @rxbuf: Receive buffer
+ */
+struct sca3300_data {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct {
+ s16 channels[4];
+ s64 ts __aligned(sizeof(s64));
+ } scan;
+ u8 txbuf[4] ____cacheline_aligned;
+ u8 rxbuf[4];
+};
+
+DECLARE_CRC8_TABLE(sca3300_crc_table);
+
+static int sca3300_transfer(struct sca3300_data *sca_data, int *val)
+{
+ /* Consecutive requests min. 10 us delay (Datasheet section 5.1.2) */
+ struct spi_delay delay = { .value = 10, .unit = SPI_DELAY_UNIT_USECS };
+ int32_t ret;
+ int rs;
+ u8 crc;
+ struct spi_transfer xfers[2] = {
+ {
+ .tx_buf = sca_data->txbuf,
+ .len = ARRAY_SIZE(sca_data->txbuf),
+ .delay = delay,
+ .cs_change = 1,
+ },
+ {
+ .rx_buf = sca_data->rxbuf,
+ .len = ARRAY_SIZE(sca_data->rxbuf),
+ .delay = delay,
+ }
+ };
+
+ /* inverted crc value as described in device data sheet */
+ crc = ~crc8(sca3300_crc_table, &sca_data->txbuf[0], 3, CRC8_INIT_VALUE);
+ sca_data->txbuf[3] = crc;
+
+ ret = spi_sync_transfer(sca_data->spi, xfers, ARRAY_SIZE(xfers));
+ if (ret) {
+ dev_err(&sca_data->spi->dev,
+ "transfer error, error: %d\n", ret);
+ return -EIO;
+ }
+
+ crc = ~crc8(sca3300_crc_table, &sca_data->rxbuf[0], 3, CRC8_INIT_VALUE);
+ if (sca_data->rxbuf[3] != crc) {
+ dev_err(&sca_data->spi->dev, "CRC checksum mismatch");
+ return -EIO;
+ }
+
+ /* get return status */
+ rs = sca_data->rxbuf[0] & SCA3300_MASK_RS_STATUS;
+ if (rs == SCA3300_VALUE_RS_ERROR)
+ ret = -EINVAL;
+
+ *val = sign_extend32(get_unaligned_be16(&sca_data->rxbuf[1]), 15);
+
+ return ret;
+}
+
+static int sca3300_error_handler(struct sca3300_data *sca_data)
+{
+ int ret;
+ int val;
+
+ mutex_lock(&sca_data->lock);
+ sca_data->txbuf[0] = SCA3300_REG_STATUS << 2;
+ ret = sca3300_transfer(sca_data, &val);
+ mutex_unlock(&sca_data->lock);
+ /*
+ * Return status error is cleared after reading status register once,
+ * expect EINVAL here.
+ */
+ if (ret != -EINVAL) {
+ dev_err(&sca_data->spi->dev,
+ "error reading device status: %d\n", ret);
+ return ret;
+ }
+
+ dev_err(&sca_data->spi->dev, "device status: 0x%lx\n",
+ val & SCA3300_STATUS_MASK);
+
+ return 0;
+}
+
+static int sca3300_read_reg(struct sca3300_data *sca_data, u8 reg, int *val)
+{
+ int ret;
+
+ mutex_lock(&sca_data->lock);
+ sca_data->txbuf[0] = reg << 2;
+ ret = sca3300_transfer(sca_data, val);
+ mutex_unlock(&sca_data->lock);
+ if (ret != -EINVAL)
+ return ret;
+
+ return sca3300_error_handler(sca_data);
+}
+
+static int sca3300_write_reg(struct sca3300_data *sca_data, u8 reg, int val)
+{
+ int reg_val = 0;
+ int ret;
+
+ mutex_lock(&sca_data->lock);
+ /* BIT(7) for write operation */
+ sca_data->txbuf[0] = BIT(7) | (reg << 2);
+ put_unaligned_be16(val, &sca_data->txbuf[1]);
+ ret = sca3300_transfer(sca_data, &reg_val);
+ mutex_unlock(&sca_data->lock);
+ if (ret != -EINVAL)
+ return ret;
+
+ return sca3300_error_handler(sca_data);
+}
+
+static int sca3300_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct sca3300_data *data = iio_priv(indio_dev);
+ int reg_val;
+ int ret;
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (val)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(sca3300_accel_scale); i++) {
+ if (val2 == sca3300_accel_scale[i][1])
+ return sca3300_write_reg(data, SCA3300_REG_MODE, i);
+ }
+ return -EINVAL;
+
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ ret = sca3300_read_reg(data, SCA3300_REG_MODE, &reg_val);
+ if (ret)
+ return ret;
+ /* freq. change is possible only for mode 3 and 4 */
+ if (reg_val == 2 && val == sca3300_lp_freq[3])
+ return sca3300_write_reg(data, SCA3300_REG_MODE, 3);
+ if (reg_val == 3 && val == sca3300_lp_freq[2])
+ return sca3300_write_reg(data, SCA3300_REG_MODE, 2);
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sca3300_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct sca3300_data *data = iio_priv(indio_dev);
+ int ret;
+ int reg_val;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = sca3300_read_reg(data, chan->address, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = sca3300_read_reg(data, SCA3300_REG_MODE, &reg_val);
+ if (ret)
+ return ret;
+ *val = 0;
+ *val2 = sca3300_accel_scale[reg_val][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ ret = sca3300_read_reg(data, SCA3300_REG_MODE, &reg_val);
+ if (ret)
+ return ret;
+ *val = sca3300_lp_freq[reg_val];
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t sca3300_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct sca3300_data *data = iio_priv(indio_dev);
+ int bit, ret, val, i = 0;
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = sca3300_read_reg(data, sca3300_channels[bit].address,
+ &val);
+ if (ret) {
+ dev_err_ratelimited(&data->spi->dev,
+ "failed to read register, error: %d\n", ret);
+ /* handled, but bailing out due to errors */
+ goto out;
+ }
+ data->scan.channels[i++] = val;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ iio_get_time_ns(indio_dev));
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * sca3300_init - Device init sequence. See datasheet rev 2 section
+ * 4.2 Start-Up Sequence for details.
+ */
+static int sca3300_init(struct sca3300_data *sca_data,
+ struct iio_dev *indio_dev)
+{
+ int value = 0;
+ int ret;
+
+ ret = sca3300_write_reg(sca_data, SCA3300_REG_MODE,
+ SCA3300_MODE_SW_RESET);
+ if (ret)
+ return ret;
+
+ /*
+ * Wait 1ms after SW-reset command.
+ * Wait 15ms for settling of signal paths.
+ */
+ usleep_range(16e3, 50e3);
+
+ ret = sca3300_read_reg(sca_data, SCA3300_REG_WHOAMI, &value);
+ if (ret)
+ return ret;
+
+ if (value != SCA3300_WHOAMI_ID) {
+ dev_err(&sca_data->spi->dev,
+ "device id not expected value, %d != %u\n",
+ value, SCA3300_WHOAMI_ID);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int sca3300_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
+{
+ struct sca3300_data *data = iio_priv(indio_dev);
+ int value;
+ int ret;
+
+ if (reg > SCA3300_REG_SELBANK)
+ return -EINVAL;
+
+ if (!readval)
+ return sca3300_write_reg(data, reg, writeval);
+
+ ret = sca3300_read_reg(data, reg, &value);
+ if (ret)
+ return ret;
+
+ *readval = value;
+
+ return 0;
+}
+
+static int sca3300_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (const int *)sca3300_accel_scale;
+ *length = ARRAY_SIZE(sca3300_accel_scale) * 2 - 2;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *vals = &sca3300_lp_freq[2];
+ *length = 2;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info sca3300_info = {
+ .read_raw = sca3300_read_raw,
+ .write_raw = sca3300_write_raw,
+ .debugfs_reg_access = &sca3300_debugfs_reg_access,
+ .read_avail = sca3300_read_avail,
+};
+
+static int sca3300_probe(struct spi_device *spi)
+{
+ struct sca3300_data *sca_data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*sca_data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ sca_data = iio_priv(indio_dev);
+ mutex_init(&sca_data->lock);
+ sca_data->spi = spi;
+
+ crc8_populate_msb(sca3300_crc_table, SCA3300_CRC8_POLYNOMIAL);
+
+ indio_dev->info = &sca3300_info;
+ indio_dev->name = SCA3300_ALIAS;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = sca3300_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sca3300_channels);
+ indio_dev->available_scan_masks = sca3300_scan_masks;
+
+ ret = sca3300_init(sca_data, indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "failed to init device, error: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ iio_pollfunc_store_time,
+ sca3300_trigger_handler, NULL);
+ if (ret) {
+ dev_err(&spi->dev,
+ "iio triggered buffer setup failed, error: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "iio device register failed, error: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static const struct of_device_id sca3300_dt_ids[] = {
+ { .compatible = "murata,sca3300"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, sca3300_dt_ids);
+
+static struct spi_driver sca3300_driver = {
+ .driver = {
+ .name = SCA3300_ALIAS,
+ .of_match_table = sca3300_dt_ids,
+ },
+ .probe = sca3300_probe,
+};
+module_spi_driver(sca3300_driver);
+
+MODULE_AUTHOR("Tomas Melin <tomas.melin@vaisala.com>");
+MODULE_DESCRIPTION("Murata SCA3300 SPI Accelerometer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 5d356288e001..f5b0b8bbaff7 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -62,18 +62,6 @@ enum st_accel_type {
#define LIS2DE12_ACCEL_DEV_NAME "lis2de12"
#define LIS2HH12_ACCEL_DEV_NAME "lis2hh12"
-/**
-* struct st_sensors_platform_data - default accel platform data
-* @drdy_int_pin: default accel DRDY is available on INT1 pin.
-*/
-static __maybe_unused const struct st_sensors_platform_data default_accel_pdata = {
- .drdy_int_pin = 1,
-};
-
-const struct st_sensor_settings *st_accel_get_settings(const char *name);
-int st_accel_common_probe(struct iio_dev *indio_dev);
-void st_accel_common_remove(struct iio_dev *indio_dev);
-
#ifdef CONFIG_IIO_BUFFER
int st_accel_allocate_ring(struct iio_dev *indio_dev);
void st_accel_deallocate_ring(struct iio_dev *indio_dev);
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 43c50167d220..28fceac9f2f6 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -41,51 +41,74 @@
#define ST_ACCEL_FS_AVL_200G 200
#define ST_ACCEL_FS_AVL_400G 400
+static const struct iio_mount_matrix *
+st_accel_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ return &adata->mount_matrix;
+}
+
+static const struct iio_chan_spec_ext_info st_accel_mount_matrix_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, st_accel_get_mount_matrix),
+ { }
+};
+
static const struct iio_chan_spec st_accel_8bit_channels[] = {
- ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 8, 8,
- ST_ACCEL_DEFAULT_OUT_X_L_ADDR+1),
- ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ ST_ACCEL_DEFAULT_OUT_X_L_ADDR+1,
+ st_accel_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 8, 8,
- ST_ACCEL_DEFAULT_OUT_Y_L_ADDR+1),
- ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ ST_ACCEL_DEFAULT_OUT_Y_L_ADDR+1,
+ st_accel_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 8, 8,
- ST_ACCEL_DEFAULT_OUT_Z_L_ADDR+1),
+ ST_ACCEL_DEFAULT_OUT_Z_L_ADDR+1,
+ st_accel_mount_matrix_ext_info),
IIO_CHAN_SOFT_TIMESTAMP(3)
};
static const struct iio_chan_spec st_accel_12bit_channels[] = {
- ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 12, 16,
- ST_ACCEL_DEFAULT_OUT_X_L_ADDR),
- ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ ST_ACCEL_DEFAULT_OUT_X_L_ADDR,
+ st_accel_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 12, 16,
- ST_ACCEL_DEFAULT_OUT_Y_L_ADDR),
- ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ ST_ACCEL_DEFAULT_OUT_Y_L_ADDR,
+ st_accel_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 12, 16,
- ST_ACCEL_DEFAULT_OUT_Z_L_ADDR),
+ ST_ACCEL_DEFAULT_OUT_Z_L_ADDR,
+ st_accel_mount_matrix_ext_info),
IIO_CHAN_SOFT_TIMESTAMP(3)
};
static const struct iio_chan_spec st_accel_16bit_channels[] = {
- ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16,
- ST_ACCEL_DEFAULT_OUT_X_L_ADDR),
- ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ ST_ACCEL_DEFAULT_OUT_X_L_ADDR,
+ st_accel_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16,
- ST_ACCEL_DEFAULT_OUT_Y_L_ADDR),
- ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ ST_ACCEL_DEFAULT_OUT_Y_L_ADDR,
+ st_accel_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16,
- ST_ACCEL_DEFAULT_OUT_Z_L_ADDR),
+ ST_ACCEL_DEFAULT_OUT_Z_L_ADDR,
+ st_accel_mount_matrix_ext_info),
IIO_CHAN_SOFT_TIMESTAMP(3)
};
@@ -980,7 +1003,99 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.multi_read_bit = true,
.bootime = 2,
},
+ {
+ .wai = 0x49,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LSM9DS0_IMU_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_16bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = GENMASK(7, 4),
+ .odr_avl = {
+ { 3, 0x01, },
+ { 6, 0x02, },
+ { 12, 0x03, },
+ { 25, 0x04, },
+ { 50, 0x05, },
+ { 100, 0x06, },
+ { 200, 0x07, },
+ { 400, 0x08, },
+ { 800, 0x09, },
+ { 1600, 0x0a, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = GENMASK(7, 4),
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = 0x21,
+ .mask = GENMASK(5, 3),
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(61),
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(122),
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_6G,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(183),
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(244),
+ },
+ [4] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = 0x04,
+ .gain = IIO_G_TO_M_S_2(732),
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x20,
+ .mask = BIT(3),
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x22,
+ .mask = BIT(2),
+ },
+ .int2 = {
+ .addr = 0x23,
+ .mask = BIT(3),
+ },
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = GENMASK(2, 0),
+ },
+ },
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(0),
+ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
+};
+/* Default accel DRDY is available on INT1 pin */
+static const struct st_sensors_platform_data default_accel_pdata = {
+ .drdy_int_pin = 1,
};
static int st_accel_read_raw(struct iio_dev *indio_dev,
@@ -1070,25 +1185,10 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
#endif
#ifdef CONFIG_ACPI
-static const struct iio_mount_matrix *
-get_mount_matrix(const struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- struct st_sensor_data *adata = iio_priv(indio_dev);
-
- return adata->mount_matrix;
-}
-
-static const struct iio_chan_spec_ext_info mount_matrix_ext_info[] = {
- IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, get_mount_matrix),
- { },
-};
-
/* Read ST-specific _ONT orientation data from ACPI and generate an
* appropriate mount matrix.
*/
-static int apply_acpi_orientation(struct iio_dev *indio_dev,
- struct iio_chan_spec *channels)
+static int apply_acpi_orientation(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
@@ -1177,14 +1277,6 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev,
}
/* Convert our integer matrix to a string-based iio_mount_matrix */
- adata->mount_matrix = devm_kmalloc(&indio_dev->dev,
- sizeof(*adata->mount_matrix),
- GFP_KERNEL);
- if (!adata->mount_matrix) {
- ret = -ENOMEM;
- goto out;
- }
-
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
int matrix_val = final_ont[i][j];
@@ -1203,26 +1295,25 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev,
default:
goto out;
}
- adata->mount_matrix->rotation[i * 3 + j] = str_value;
+ adata->mount_matrix.rotation[i * 3 + j] = str_value;
}
}
- /* Expose the mount matrix via ext_info */
- for (i = 0; i < indio_dev->num_channels; i++)
- channels[i].ext_info = mount_matrix_ext_info;
-
ret = 0;
dev_info(&indio_dev->dev, "computed mount matrix from ACPI\n");
out:
kfree(buffer.pointer);
+ if (ret)
+ dev_dbg(&indio_dev->dev,
+ "failed to apply ACPI orientation data: %d\n", ret);
+
return ret;
}
#else /* !CONFIG_ACPI */
-static int apply_acpi_orientation(struct iio_dev *indio_dev,
- struct iio_chan_spec *channels)
+static int apply_acpi_orientation(struct iio_dev *indio_dev)
{
- return 0;
+ return -EINVAL;
}
#endif
@@ -1248,38 +1339,30 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
struct st_sensors_platform_data *pdata = dev_get_platdata(adata->dev);
- struct iio_chan_spec *channels;
- size_t channels_size;
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &accel_info;
- err = st_sensors_power_enable(indio_dev);
- if (err)
- return err;
-
err = st_sensors_verify_id(indio_dev);
if (err < 0)
- goto st_accel_power_off;
+ return err;
adata->num_data_channels = ST_ACCEL_NUMBER_DATA_CHANNELS;
+ indio_dev->channels = adata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
- channels_size = indio_dev->num_channels * sizeof(struct iio_chan_spec);
- channels = devm_kmemdup(&indio_dev->dev,
- adata->sensor_settings->ch,
- channels_size, GFP_KERNEL);
- if (!channels) {
- err = -ENOMEM;
- goto st_accel_power_off;
+ /*
+ * First try specific ACPI methods to retrieve orientation then try the
+ * generic function.
+ */
+ err = apply_acpi_orientation(indio_dev);
+ if (err) {
+ err = iio_read_mount_matrix(adata->dev, &adata->mount_matrix);
+ if (err)
+ return err;
}
- if (apply_acpi_orientation(indio_dev, channels))
- dev_warn(&indio_dev->dev,
- "failed to apply ACPI orientation data: %d\n", err);
-
- indio_dev->channels = channels;
adata->current_fullscale = &adata->sensor_settings->fs.fs_avl[0];
adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
@@ -1288,11 +1371,11 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
- goto st_accel_power_off;
+ return err;
err = st_accel_allocate_ring(indio_dev);
if (err < 0)
- goto st_accel_power_off;
+ return err;
if (adata->irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
@@ -1315,9 +1398,6 @@ st_accel_device_register_error:
st_sensors_deallocate_trigger(indio_dev);
st_accel_probe_trigger_error:
st_accel_deallocate_ring(indio_dev);
-st_accel_power_off:
- st_sensors_power_disable(indio_dev);
-
return err;
}
EXPORT_SYMBOL(st_accel_common_probe);
@@ -1326,8 +1406,6 @@ void st_accel_common_remove(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
- st_sensors_power_disable(indio_dev);
-
iio_device_unregister(indio_dev);
if (adata->irq > 0)
st_sensors_deallocate_trigger(indio_dev);
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 360e16f2cadb..95e305b88d5e 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -174,16 +174,29 @@ static int st_accel_i2c_probe(struct i2c_client *client)
if (ret < 0)
return ret;
+ ret = st_sensors_power_enable(indio_dev);
+ if (ret)
+ return ret;
+
ret = st_accel_common_probe(indio_dev);
if (ret < 0)
- return ret;
+ goto st_accel_power_off;
return 0;
+
+st_accel_power_off:
+ st_sensors_power_disable(indio_dev);
+
+ return ret;
}
static int st_accel_i2c_remove(struct i2c_client *client)
{
- st_accel_common_remove(i2c_get_clientdata(client));
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ st_sensors_power_disable(indio_dev);
+
+ st_accel_common_remove(indio_dev);
return 0;
}
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 568ff1bae0ee..83d3308ce5cc 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -123,16 +123,29 @@ static int st_accel_spi_probe(struct spi_device *spi)
if (err < 0)
return err;
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
+
err = st_accel_common_probe(indio_dev);
if (err < 0)
- return err;
+ goto st_accel_power_off;
return 0;
+
+st_accel_power_off:
+ st_sensors_power_disable(indio_dev);
+
+ return err;
}
static int st_accel_spi_remove(struct spi_device *spi)
{
- st_accel_common_remove(spi_get_drvdata(spi));
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ st_sensors_power_disable(indio_dev);
+
+ st_accel_common_remove(indio_dev);
return 0;
}
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index 157d8faefb9e..43c621d0f11e 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -7,7 +7,6 @@
* IIO driver for STK8312; 7-bit I2C address: 0x3D.
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -103,7 +102,11 @@ struct stk8312_data {
u8 mode;
struct iio_trigger *dready_trig;
bool dready_trigger_on;
- s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 64-bit timestamp */
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ s8 chans[3];
+ s64 timestamp __aligned(8);
+ } scan;
};
static IIO_CONST_ATTR(in_accel_scale_available, STK8312_SCALE_AVAIL);
@@ -438,7 +441,7 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p)
ret = i2c_smbus_read_i2c_block_data(data->client,
STK8312_REG_XOUT,
STK8312_ALL_CHANNEL_SIZE,
- data->buffer);
+ data->scan.chans);
if (ret < STK8312_ALL_CHANNEL_SIZE) {
dev_err(&data->client->dev, "register read failed\n");
mutex_unlock(&data->lock);
@@ -452,12 +455,12 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p)
mutex_unlock(&data->lock);
goto err;
}
- data->buffer[i++] = ret;
+ data->scan.chans[i++] = ret;
}
}
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -552,7 +555,7 @@ static int stk8312_probe(struct i2c_client *client,
data->dready_trig = devm_iio_trigger_alloc(&client->dev,
"%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->dready_trig) {
ret = -ENOMEM;
goto err_power_off;
@@ -635,23 +638,17 @@ static SIMPLE_DEV_PM_OPS(stk8312_pm_ops, stk8312_suspend, stk8312_resume);
#endif
static const struct i2c_device_id stk8312_i2c_id[] = {
- {"STK8312", 0},
+ /* Deprecated in favour of lowercase form */
+ { "STK8312", 0 },
+ { "stk8312", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, stk8312_i2c_id);
-static const struct acpi_device_id stk8312_acpi_id[] = {
- {"STK8312", 0},
- {}
-};
-
-MODULE_DEVICE_TABLE(acpi, stk8312_acpi_id);
-
static struct i2c_driver stk8312_driver = {
.driver = {
.name = STK8312_DRIVER_NAME,
.pm = STK8312_PM_OPS,
- .acpi_match_table = ACPI_PTR(stk8312_acpi_id),
},
.probe = stk8312_probe,
.remove = stk8312_remove,
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 7cf9cb7e8666..e137a34b5c9a 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -91,12 +91,11 @@ struct stk8ba50_data {
u8 sample_rate_idx;
struct iio_trigger *dready_trig;
bool dready_trigger_on;
- /*
- * 3 x 16-bit channels (10-bit data, 6-bit padding) +
- * 1 x 16 padding +
- * 4 x 16 64-bit timestamp
- */
- s16 buffer[8];
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ s16 chans[3];
+ s64 timetamp __aligned(8);
+ } scan;
};
#define STK8BA50_ACCEL_CHANNEL(index, reg, axis) { \
@@ -324,7 +323,7 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
ret = i2c_smbus_read_i2c_block_data(data->client,
STK8BA50_REG_XOUT,
STK8BA50_ALL_CHANNEL_SIZE,
- (u8 *)data->buffer);
+ (u8 *)data->scan.chans);
if (ret < STK8BA50_ALL_CHANNEL_SIZE) {
dev_err(&data->client->dev, "register read failed\n");
goto err;
@@ -337,10 +336,10 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
if (ret < 0)
goto err;
- data->buffer[i++] = ret;
+ data->scan.chans[i++] = ret;
}
}
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
pf->timestamp);
err:
mutex_unlock(&data->lock);
@@ -448,7 +447,7 @@ static int stk8ba50_probe(struct i2c_client *client,
data->dready_trig = devm_iio_trigger_alloc(&client->dev,
"%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->dready_trig) {
ret = -ENOMEM;
goto err_power_off;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index c7946c439612..db0c8fb60515 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -1190,6 +1190,18 @@ config TI_TLC4541
This driver can also be built as a module. If so, the module will be
called ti-tlc4541.
+config TI_TSC2046
+ tristate "Texas Instruments TSC2046 ADC driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for ADC functionality of Texas
+ Instruments TSC2046 touch screen controller.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-tsc2046.
+
config TWL4030_MADC
tristate "TWL4030 MADC (Monitoring A/D Converter)"
depends on TWL4030_CORE
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index a226657d19c0..f70d877c555a 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_TI_ADS124S08) += ti-ads124s08.o
obj-$(CONFIG_TI_ADS131E08) += ti-ads131e08.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
obj-$(CONFIG_TI_TLC4541) += ti-tlc4541.o
+obj-$(CONFIG_TI_TSC2046) += ti-tsc2046.o
obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
obj-$(CONFIG_TWL6030_GPADC) += twl6030-gpadc.o
obj-$(CONFIG_VF610_ADC) += vf610_adc.o
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index a27db78ea13e..e45c600fccc0 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -862,6 +862,11 @@ static void ad7124_reg_disable(void *r)
regulator_disable(r);
}
+static void ad7124_clk_disable(void *c)
+{
+ clk_disable_unprepare(c);
+}
+
static int ad7124_probe(struct spi_device *spi)
{
const struct ad7124_chip_info *info;
@@ -883,8 +888,6 @@ static int ad7124_probe(struct spi_device *spi)
ad_sd_init(&st->sd, indio_dev, spi, &ad7124_sigma_delta_info);
- spi_set_drvdata(spi, indio_dev);
-
indio_dev->name = st->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &ad7124_info;
@@ -922,48 +925,28 @@ static int ad7124_probe(struct spi_device *spi)
if (ret < 0)
return ret;
+ ret = devm_add_action_or_reset(&spi->dev, ad7124_clk_disable, st->mclk);
+ if (ret)
+ return ret;
+
ret = ad7124_soft_reset(st);
if (ret < 0)
- goto error_clk_disable_unprepare;
+ return ret;
ret = ad7124_check_chip_id(st);
if (ret)
- goto error_clk_disable_unprepare;
+ return ret;
ret = ad7124_setup(st);
if (ret < 0)
- goto error_clk_disable_unprepare;
+ return ret;
- ret = ad_sd_setup_buffer_and_trigger(indio_dev);
+ ret = devm_ad_sd_setup_buffer_and_trigger(&spi->dev, indio_dev);
if (ret < 0)
- goto error_clk_disable_unprepare;
-
- ret = iio_device_register(indio_dev);
- if (ret < 0) {
- dev_err(&spi->dev, "Failed to register iio device\n");
- goto error_remove_trigger;
- }
-
- return 0;
-
-error_remove_trigger:
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-error_clk_disable_unprepare:
- clk_disable_unprepare(st->mclk);
-
- return ret;
-}
-
-static int ad7124_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7124_state *st = iio_priv(indio_dev);
+ return ret;
- iio_device_unregister(indio_dev);
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
- clk_disable_unprepare(st->mclk);
+ return devm_iio_device_register(&spi->dev, indio_dev);
- return 0;
}
static const struct of_device_id ad7124_of_match[] = {
@@ -981,7 +964,6 @@ static struct spi_driver ad71124_driver = {
.of_match_table = ad7124_of_match,
},
.probe = ad7124_probe,
- .remove = ad7124_remove,
};
module_spi_driver(ad71124_driver);
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 1141cc13a124..ee8ed9481025 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -326,7 +326,7 @@ static int ad7192_of_clock_select(struct ad7192_state *st)
clock_sel = AD7192_CLK_INT;
/* use internal clock */
- if (PTR_ERR(st->mclk) == -ENOENT) {
+ if (st->mclk) {
if (of_property_read_bool(np, "adi,int-clock-output-enable"))
clock_sel = AD7192_CLK_INT_CO;
} else {
@@ -908,6 +908,16 @@ static int ad7192_channels_config(struct iio_dev *indio_dev)
return 0;
}
+static void ad7192_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
+static void ad7192_clk_disable(void *clk)
+{
+ clk_disable_unprepare(clk);
+}
+
static int ad7192_probe(struct spi_device *spi)
{
struct ad7192_state *st;
@@ -937,33 +947,38 @@ static int ad7192_probe(struct spi_device *spi)
return ret;
}
+ ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->avdd);
+ if (ret)
+ return ret;
+
st->dvdd = devm_regulator_get(&spi->dev, "dvdd");
- if (IS_ERR(st->dvdd)) {
- ret = PTR_ERR(st->dvdd);
- goto error_disable_avdd;
- }
+ if (IS_ERR(st->dvdd))
+ return PTR_ERR(st->dvdd);
ret = regulator_enable(st->dvdd);
if (ret) {
dev_err(&spi->dev, "Failed to enable specified DVdd supply\n");
- goto error_disable_avdd;
+ return ret;
}
+ ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->dvdd);
+ if (ret)
+ return ret;
+
ret = regulator_get_voltage(st->avdd);
if (ret < 0) {
dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
- goto error_disable_avdd;
+ return ret;
}
st->int_vref_mv = ret / 1000;
- spi_set_drvdata(spi, indio_dev);
st->chip_info = of_device_get_match_data(&spi->dev);
indio_dev->name = st->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = ad7192_channels_config(indio_dev);
if (ret < 0)
- goto error_disable_dvdd;
+ return ret;
if (st->chip_info->chip_id == CHIPID_AD7195)
indio_dev->info = &ad7195_info;
@@ -972,17 +987,15 @@ static int ad7192_probe(struct spi_device *spi)
ad_sd_init(&st->sd, indio_dev, spi, &ad7192_sigma_delta_info);
- ret = ad_sd_setup_buffer_and_trigger(indio_dev);
+ ret = devm_ad_sd_setup_buffer_and_trigger(&spi->dev, indio_dev);
if (ret)
- goto error_disable_dvdd;
+ return ret;
st->fclk = AD7192_INT_FREQ_MHZ;
- st->mclk = devm_clk_get(&st->sd.spi->dev, "mclk");
- if (IS_ERR(st->mclk) && PTR_ERR(st->mclk) != -ENOENT) {
- ret = PTR_ERR(st->mclk);
- goto error_remove_trigger;
- }
+ st->mclk = devm_clk_get_optional(&spi->dev, "mclk");
+ if (IS_ERR(st->mclk))
+ return PTR_ERR(st->mclk);
st->clock_sel = ad7192_of_clock_select(st);
@@ -990,55 +1003,26 @@ static int ad7192_probe(struct spi_device *spi)
st->clock_sel == AD7192_CLK_EXT_MCLK2) {
ret = clk_prepare_enable(st->mclk);
if (ret < 0)
- goto error_remove_trigger;
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, ad7192_clk_disable,
+ st->mclk);
+ if (ret)
+ return ret;
st->fclk = clk_get_rate(st->mclk);
if (!ad7192_valid_external_frequency(st->fclk)) {
- ret = -EINVAL;
dev_err(&spi->dev,
"External clock frequency out of bounds\n");
- goto error_disable_clk;
+ return -EINVAL;
}
}
ret = ad7192_setup(st, spi->dev.of_node);
if (ret)
- goto error_disable_clk;
-
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto error_disable_clk;
- return 0;
-
-error_disable_clk:
- if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
- st->clock_sel == AD7192_CLK_EXT_MCLK2)
- clk_disable_unprepare(st->mclk);
-error_remove_trigger:
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-error_disable_dvdd:
- regulator_disable(st->dvdd);
-error_disable_avdd:
- regulator_disable(st->avdd);
-
- return ret;
-}
-
-static int ad7192_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7192_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
- st->clock_sel == AD7192_CLK_EXT_MCLK2)
- clk_disable_unprepare(st->mclk);
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-
- regulator_disable(st->dvdd);
- regulator_disable(st->avdd);
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id ad7192_of_match[] = {
@@ -1056,7 +1040,6 @@ static struct spi_driver ad7192_driver = {
.of_match_table = ad7192_of_match,
},
.probe = ad7192_probe,
- .remove = ad7192_remove,
};
module_spi_driver(ad7192_driver);
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index d2163cb62f4f..3f4e73f7d35a 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -13,6 +13,7 @@
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
@@ -346,6 +347,12 @@ static int ad7298_probe(struct spi_device *spi)
return devm_iio_device_register(&spi->dev, indio_dev);
}
+static const struct acpi_device_id ad7298_acpi_ids[] = {
+ { "INT3494", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, ad7298_acpi_ids);
+
static const struct spi_device_id ad7298_id[] = {
{"ad7298", 0},
{}
@@ -355,6 +362,7 @@ MODULE_DEVICE_TABLE(spi, ad7298_id);
static struct spi_driver ad7298_driver = {
.driver = {
.name = "ad7298",
+ .acpi_match_table = ad7298_acpi_ids,
},
.probe = ad7298_probe,
.id_table = ad7298_id,
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 9e9ff07cf972..a1e8b32671cf 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -32,12 +32,14 @@ struct ad7476_chip_info {
/* channels used when convst gpio is defined */
struct iio_chan_spec convst_channel[2];
void (*reset)(struct ad7476_state *);
+ bool has_vref;
+ bool has_vdrive;
};
struct ad7476_state {
struct spi_device *spi;
const struct ad7476_chip_info *chip_info;
- struct regulator *reg;
+ struct regulator *ref_reg;
struct gpio_desc *convst_gpio;
struct spi_transfer xfer;
struct spi_message msg;
@@ -52,13 +54,17 @@ struct ad7476_state {
};
enum ad7476_supported_device_ids {
+ ID_AD7091,
ID_AD7091R,
+ ID_AD7273,
+ ID_AD7274,
ID_AD7276,
ID_AD7277,
ID_AD7278,
ID_AD7466,
ID_AD7467,
ID_AD7468,
+ ID_AD7475,
ID_AD7495,
ID_AD7940,
ID_ADC081S,
@@ -145,8 +151,8 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
GENMASK(st->chip_info->channel[0].scan_type.realbits - 1, 0);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- if (!st->chip_info->int_vref_uv) {
- scale_uv = regulator_get_voltage(st->reg);
+ if (st->ref_reg) {
+ scale_uv = regulator_get_voltage(st->ref_reg);
if (scale_uv < 0)
return scale_uv;
} else {
@@ -187,13 +193,32 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
BIT(IIO_CHAN_INFO_RAW))
static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
+ [ID_AD7091] = {
+ .channel[0] = AD7091R_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .convst_channel[0] = AD7091R_CONVST_CHAN(12),
+ .convst_channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .reset = ad7091_reset,
+ },
[ID_AD7091R] = {
.channel[0] = AD7091R_CHAN(12),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
.convst_channel[0] = AD7091R_CONVST_CHAN(12),
.convst_channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .int_vref_uv = 2500000,
+ .has_vref = true,
.reset = ad7091_reset,
},
+ [ID_AD7273] = {
+ .channel[0] = AD7940_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .has_vref = true,
+ },
+ [ID_AD7274] = {
+ .channel[0] = AD7940_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .has_vref = true,
+ },
[ID_AD7276] = {
.channel[0] = AD7940_CHAN(12),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
@@ -218,10 +243,17 @@ static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
.channel[0] = AD7476_CHAN(8),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
+ [ID_AD7475] = {
+ .channel[0] = AD7476_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .has_vref = true,
+ .has_vdrive = true,
+ },
[ID_AD7495] = {
.channel[0] = AD7476_CHAN(12),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
.int_vref_uv = 2500000,
+ .has_vdrive = true,
},
[ID_AD7940] = {
.channel[0] = AD7940_CHAN(14),
@@ -254,6 +286,7 @@ static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
[ID_LTC2314_14] = {
.channel[0] = AD7940_CHAN(14),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .has_vref = true,
},
};
@@ -263,15 +296,16 @@ static const struct iio_info ad7476_info = {
static void ad7476_reg_disable(void *data)
{
- struct ad7476_state *st = data;
+ struct regulator *reg = data;
- regulator_disable(st->reg);
+ regulator_disable(reg);
}
static int ad7476_probe(struct spi_device *spi)
{
struct ad7476_state *st;
struct iio_dev *indio_dev;
+ struct regulator *reg;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -282,27 +316,79 @@ static int ad7476_probe(struct spi_device *spi)
st->chip_info =
&ad7476_chip_info_tbl[spi_get_device_id(spi)->driver_data];
- st->reg = devm_regulator_get(&spi->dev, "vcc");
- if (IS_ERR(st->reg))
- return PTR_ERR(st->reg);
+ reg = devm_regulator_get(&spi->dev, "vcc");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
- ret = regulator_enable(st->reg);
+ ret = regulator_enable(reg);
if (ret)
return ret;
- ret = devm_add_action_or_reset(&spi->dev, ad7476_reg_disable,
- st);
+ ret = devm_add_action_or_reset(&spi->dev, ad7476_reg_disable, reg);
if (ret)
return ret;
+ /* Either vcc or vref (below) as appropriate */
+ if (!st->chip_info->int_vref_uv)
+ st->ref_reg = reg;
+
+ if (st->chip_info->has_vref) {
+
+ /* If a device has an internal reference vref is optional */
+ if (st->chip_info->int_vref_uv) {
+ reg = devm_regulator_get_optional(&spi->dev, "vref");
+ if (IS_ERR(reg) && (PTR_ERR(reg) != -ENODEV))
+ return PTR_ERR(reg);
+ } else {
+ reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+ }
+
+ if (!IS_ERR(reg)) {
+ ret = regulator_enable(reg);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev,
+ ad7476_reg_disable,
+ reg);
+ if (ret)
+ return ret;
+ st->ref_reg = reg;
+ } else {
+ /*
+ * Can only get here if device supports both internal
+ * and external reference, but the regulator connected
+ * to the external reference is not connected.
+ * Set the reference regulator pointer to NULL to
+ * indicate this.
+ */
+ st->ref_reg = NULL;
+ }
+ }
+
+ if (st->chip_info->has_vdrive) {
+ reg = devm_regulator_get(&spi->dev, "vdrive");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ ret = regulator_enable(reg);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, ad7476_reg_disable,
+ reg);
+ if (ret)
+ return ret;
+ }
+
st->convst_gpio = devm_gpiod_get_optional(&spi->dev,
"adi,conversion-start",
GPIOD_OUT_LOW);
if (IS_ERR(st->convst_gpio))
return PTR_ERR(st->convst_gpio);
- spi_set_drvdata(spi, indio_dev);
-
st->spi = spi;
indio_dev->name = spi_get_device_id(spi)->name;
@@ -333,17 +419,17 @@ static int ad7476_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7476_id[] = {
- {"ad7091", ID_AD7091R},
+ {"ad7091", ID_AD7091},
{"ad7091r", ID_AD7091R},
- {"ad7273", ID_AD7277},
- {"ad7274", ID_AD7276},
+ {"ad7273", ID_AD7273},
+ {"ad7274", ID_AD7274},
{"ad7276", ID_AD7276},
{"ad7277", ID_AD7277},
{"ad7278", ID_AD7278},
{"ad7466", ID_AD7466},
{"ad7467", ID_AD7467},
{"ad7468", ID_AD7468},
- {"ad7475", ID_AD7466},
+ {"ad7475", ID_AD7475},
{"ad7476", ID_AD7466},
{"ad7476a", ID_AD7466},
{"ad7477", ID_AD7467},
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index 0af0bb4d5a7f..0a60ecc69d38 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -663,7 +663,8 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
}
st->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
- indio_dev->name, indio_dev->id);
+ indio_dev->name,
+ iio_device_id(indio_dev));
if (!st->trig)
return -ENOMEM;
diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c
index 1e41759f3ee5..51ee9482e0df 100644
--- a/drivers/iio/adc/ad7766.c
+++ b/drivers/iio/adc/ad7766.c
@@ -248,7 +248,8 @@ static int ad7766_probe(struct spi_device *spi)
if (spi->irq > 0) {
ad7766->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
- indio_dev->name, indio_dev->id);
+ indio_dev->name,
+ iio_device_id(indio_dev));
if (!ad7766->trig)
return -ENOMEM;
@@ -272,8 +273,6 @@ static int ad7766_probe(struct spi_device *spi)
return ret;
}
- spi_set_drvdata(spi, indio_dev);
-
ad7766->spi = spi;
/* First byte always 0 */
@@ -289,10 +288,7 @@ static int ad7766_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = devm_iio_device_register(&spi->dev, indio_dev);
- if (ret)
- return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ad7766_id[] = {
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 60f21fed6dcb..2c5c8a3672b2 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -614,7 +614,6 @@ static int ad7768_probe(struct spi_device *spi)
st->mclk_freq = clk_get_rate(st->mclk);
- spi_set_drvdata(spi, indio_dev);
mutex_init(&st->lock);
indio_dev->channels = ad7768_channels;
@@ -630,7 +629,8 @@ static int ad7768_probe(struct spi_device *spi)
}
st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
- indio_dev->name, indio_dev->id);
+ indio_dev->name,
+ iio_device_id(indio_dev));
if (!st->trig)
return -ENOMEM;
diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
index 42e7e8e595d1..42bb952f4738 100644
--- a/drivers/iio/adc/ad7780.c
+++ b/drivers/iio/adc/ad7780.c
@@ -300,6 +300,11 @@ static int ad7780_init_gpios(struct device *dev, struct ad7780_state *st)
return 0;
}
+static void ad7780_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int ad7780_probe(struct spi_device *spi)
{
struct ad7780_state *st;
@@ -318,8 +323,6 @@ static int ad7780_probe(struct spi_device *spi)
st->chip_info =
&ad7780_chip_info_tbl[spi_get_device_id(spi)->driver_data];
- spi_set_drvdata(spi, indio_dev);
-
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = &st->chip_info->channel;
@@ -340,35 +343,15 @@ static int ad7780_probe(struct spi_device *spi)
return ret;
}
- ret = ad_sd_setup_buffer_and_trigger(indio_dev);
+ ret = devm_add_action_or_reset(&spi->dev, ad7780_reg_disable, st->reg);
if (ret)
- goto error_disable_reg;
+ return ret;
- ret = iio_device_register(indio_dev);
+ ret = devm_ad_sd_setup_buffer_and_trigger(&spi->dev, indio_dev);
if (ret)
- goto error_cleanup_buffer_and_trigger;
-
- return 0;
-
-error_cleanup_buffer_and_trigger:
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-error_disable_reg:
- regulator_disable(st->reg);
-
- return ret;
-}
-
-static int ad7780_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7780_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-
- regulator_disable(st->reg);
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ad7780_id[] = {
@@ -385,7 +368,6 @@ static struct spi_driver ad7780_driver = {
.name = "ad7780",
},
.probe = ad7780_probe,
- .remove = ad7780_remove,
.id_table = ad7780_id,
};
module_spi_driver(ad7780_driver);
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index d57ad966e17c..cb579aa89f39 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -394,6 +394,11 @@ static int ad7791_setup(struct ad7791_state *st,
st->mode);
}
+static void ad7791_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int ad7791_probe(struct spi_device *spi)
{
struct ad7791_platform_data *pdata = spi->dev.platform_data;
@@ -420,11 +425,13 @@ static int ad7791_probe(struct spi_device *spi)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&spi->dev, ad7791_reg_disable, st->reg);
+ if (ret)
+ return ret;
+
st->info = &ad7791_chip_infos[spi_get_device_id(spi)->driver_data];
ad_sd_init(&st->sd, indio_dev, spi, &ad7791_sigma_delta_info);
- spi_set_drvdata(spi, indio_dev);
-
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->info->channels;
@@ -434,39 +441,15 @@ static int ad7791_probe(struct spi_device *spi)
else
indio_dev->info = &ad7791_no_filter_info;
- ret = ad_sd_setup_buffer_and_trigger(indio_dev);
+ ret = devm_ad_sd_setup_buffer_and_trigger(&spi->dev, indio_dev);
if (ret)
- goto error_disable_reg;
+ return ret;
ret = ad7791_setup(st, pdata);
if (ret)
- goto error_remove_trigger;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_remove_trigger;
-
- return 0;
-
-error_remove_trigger:
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-error_disable_reg:
- regulator_disable(st->reg);
-
- return ret;
-}
-
-static int ad7791_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7791_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-
- regulator_disable(st->reg);
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ad7791_spi_ids[] = {
@@ -484,7 +467,6 @@ static struct spi_driver ad7791_driver = {
.name = "ad7791",
},
.probe = ad7791_probe,
- .remove = ad7791_remove,
.id_table = ad7791_spi_ids,
};
module_spi_driver(ad7791_driver);
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 440ef4c7be07..ef3e2d3ecb0c 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -769,6 +769,11 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
},
};
+static void ad7793_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int ad7793_probe(struct spi_device *spi)
{
const struct ad7793_platform_data *pdata = spi->dev.platform_data;
@@ -803,11 +808,13 @@ static int ad7793_probe(struct spi_device *spi)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&spi->dev, ad7793_reg_disable, st->reg);
+ if (ret)
+ return ret;
+
vref_mv = regulator_get_voltage(st->reg);
- if (vref_mv < 0) {
- ret = vref_mv;
- goto error_disable_reg;
- }
+ if (vref_mv < 0)
+ return vref_mv;
vref_mv /= 1000;
} else {
@@ -817,50 +824,21 @@ static int ad7793_probe(struct spi_device *spi)
st->chip_info =
&ad7793_chip_info_tbl[spi_get_device_id(spi)->driver_data];
- spi_set_drvdata(spi, indio_dev);
-
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
indio_dev->info = st->chip_info->iio_info;
- ret = ad_sd_setup_buffer_and_trigger(indio_dev);
+ ret = devm_ad_sd_setup_buffer_and_trigger(&spi->dev, indio_dev);
if (ret)
- goto error_disable_reg;
+ return ret;
ret = ad7793_setup(indio_dev, pdata, vref_mv);
if (ret)
- goto error_remove_trigger;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_remove_trigger;
-
- return 0;
-
-error_remove_trigger:
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-error_disable_reg:
- if (pdata->refsel != AD7793_REFSEL_INTERNAL)
- regulator_disable(st->reg);
-
- return ret;
-}
-
-static int ad7793_remove(struct spi_device *spi)
-{
- const struct ad7793_platform_data *pdata = spi->dev.platform_data;
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7793_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- ad_sd_cleanup_buffer_and_trigger(indio_dev);
-
- if (pdata->refsel != AD7793_REFSEL_INTERNAL)
- regulator_disable(st->reg);
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ad7793_id[] = {
@@ -882,7 +860,6 @@ static struct spi_driver ad7793_driver = {
.name = "ad7793",
},
.probe = ad7793_probe,
- .remove = ad7793_remove,
.id_table = ad7793_id,
};
module_spi_driver(ad7793_driver);
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 9b3cbe1ddc6f..f64999714a4d 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -268,7 +268,6 @@ static int ad7887_probe(struct spi_device *spi)
st->chip_info =
&ad7887_chip_info_tbl[spi_get_device_id(spi)->driver_data];
- spi_set_drvdata(spi, indio_dev);
st->spi = spi;
indio_dev->name = spi_get_device_id(spi)->name;
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index 19a45dd43796..dbfc8517cb8a 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -434,8 +434,6 @@ static int ad9467_probe(struct spi_device *spi)
mdelay(10);
}
- spi_set_drvdata(spi, st);
-
conv->chip_info = &info->axi_adc_info;
id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index e777ec718973..1d652d9b2f5c 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -470,91 +470,65 @@ EXPORT_SYMBOL_GPL(ad_sd_validate_trigger);
static const struct iio_trigger_ops ad_sd_trigger_ops = {
};
-static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
+static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_dev)
{
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
int ret;
- sigma_delta->trig = iio_trigger_alloc(&sigma_delta->spi->dev,
- "%s-dev%d", indio_dev->name,
- indio_dev->id);
- if (sigma_delta->trig == NULL) {
- ret = -ENOMEM;
- goto error_ret;
+ if (dev != &sigma_delta->spi->dev) {
+ dev_err(dev, "Trigger parent should be '%s', got '%s'\n",
+ dev_name(dev), dev_name(&sigma_delta->spi->dev));
+ return -EFAULT;
}
+
+ sigma_delta->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
+ iio_device_id(indio_dev));
+ if (sigma_delta->trig == NULL)
+ return -ENOMEM;
+
sigma_delta->trig->ops = &ad_sd_trigger_ops;
init_completion(&sigma_delta->completion);
sigma_delta->irq_dis = true;
- ret = request_irq(sigma_delta->spi->irq,
- ad_sd_data_rdy_trig_poll,
- sigma_delta->info->irq_flags | IRQF_NO_AUTOEN,
- indio_dev->name,
- sigma_delta);
+ ret = devm_request_irq(dev, sigma_delta->spi->irq,
+ ad_sd_data_rdy_trig_poll,
+ sigma_delta->info->irq_flags | IRQF_NO_AUTOEN,
+ indio_dev->name,
+ sigma_delta);
if (ret)
- goto error_free_trig;
+ return ret;
iio_trigger_set_drvdata(sigma_delta->trig, sigma_delta);
- ret = iio_trigger_register(sigma_delta->trig);
+ ret = devm_iio_trigger_register(dev, sigma_delta->trig);
if (ret)
- goto error_free_irq;
+ return ret;
/* select default trigger */
indio_dev->trig = iio_trigger_get(sigma_delta->trig);
return 0;
-
-error_free_irq:
- free_irq(sigma_delta->spi->irq, sigma_delta);
-error_free_trig:
- iio_trigger_free(sigma_delta->trig);
-error_ret:
- return ret;
-}
-
-static void ad_sd_remove_trigger(struct iio_dev *indio_dev)
-{
- struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
-
- iio_trigger_unregister(sigma_delta->trig);
- free_irq(sigma_delta->spi->irq, sigma_delta);
- iio_trigger_free(sigma_delta->trig);
}
/**
- * ad_sd_setup_buffer_and_trigger() -
+ * devm_ad_sd_setup_buffer_and_trigger() - Device-managed buffer & trigger setup
+ * @dev: Device object to which to bind the life-time of the resources attached
* @indio_dev: The IIO device
*/
-int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev)
+int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev)
{
int ret;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- &ad_sd_trigger_handler, &ad_sd_buffer_setup_ops);
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &ad_sd_trigger_handler,
+ &ad_sd_buffer_setup_ops);
if (ret)
return ret;
- ret = ad_sd_probe_trigger(indio_dev);
- if (ret) {
- iio_triggered_buffer_cleanup(indio_dev);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ad_sd_setup_buffer_and_trigger);
-
-/**
- * ad_sd_cleanup_buffer_and_trigger() -
- * @indio_dev: The IIO device
- */
-void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev)
-{
- ad_sd_remove_trigger(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
+ return devm_ad_sd_probe_trigger(dev, indio_dev);
}
-EXPORT_SYMBOL_GPL(ad_sd_cleanup_buffer_and_trigger);
+EXPORT_SYMBOL_GPL(devm_ad_sd_setup_buffer_and_trigger);
/**
* ad_sd_init() - Initializes a ad_sigma_delta struct
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index d5f6ffc5b5bc..a73e3c2d212f 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -202,29 +202,25 @@ static void adi_axi_adc_conv_unregister(struct adi_axi_adc_conv *conv)
kfree(cl);
}
-static void devm_adi_axi_adc_conv_release(struct device *dev, void *res)
+static void devm_adi_axi_adc_conv_release(void *conv)
{
- adi_axi_adc_conv_unregister(*(struct adi_axi_adc_conv **)res);
+ adi_axi_adc_conv_unregister(conv);
}
struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
size_t sizeof_priv)
{
- struct adi_axi_adc_conv **ptr, *conv;
-
- ptr = devres_alloc(devm_adi_axi_adc_conv_release, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
+ struct adi_axi_adc_conv *conv;
+ int ret;
conv = adi_axi_adc_conv_register(dev, sizeof_priv);
- if (IS_ERR(conv)) {
- devres_free(ptr);
- return ERR_CAST(conv);
- }
+ if (IS_ERR(conv))
+ return conv;
- *ptr = conv;
- devres_add(dev, ptr);
+ ret = devm_add_action_or_reset(dev, devm_adi_axi_adc_conv_release,
+ conv);
+ if (ret)
+ return ERR_PTR(ret);
return conv;
}
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index a7826f097b95..ea5ca163d879 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -403,7 +403,8 @@ struct at91_adc_state {
struct at91_adc_dma dma_st;
struct at91_adc_touch touch_st;
struct iio_dev *indio_dev;
- u16 buffer[AT91_BUFFER_MAX_HWORDS];
+ /* Ensure naturally aligned timestamp */
+ u16 buffer[AT91_BUFFER_MAX_HWORDS] __aligned(8);
/*
* lock to prevent concurrent 'single conversion' requests through
* sysfs.
@@ -997,7 +998,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
int ret;
trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name,
- indio->id, trigger_name);
+ iio_device_id(indio), trigger_name);
if (!trig)
return NULL;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 0b5f0c91d0d7..5a7d3a3a5fa8 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -547,7 +547,7 @@ static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
char *name = kasprintf(GFP_KERNEL,
"%s-dev%d-%s",
idev->name,
- idev->id,
+ iio_device_id(idev),
triggers[i].name);
if (!name)
return -ENOMEM;
@@ -626,7 +626,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev,
int ret;
trig = iio_trigger_alloc(idev->dev.parent, "%s-dev%d-%s", idev->name,
- idev->id, trigger->name);
+ iio_device_id(idev), trigger->name);
if (trig == NULL)
return NULL;
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 0d53ef18e045..16407664182c 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -649,7 +649,8 @@ static int dln2_adc_probe(struct platform_device *pdev)
indio_dev->setup_ops = &dln2_adc_buffer_setup_ops;
dln2->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
- indio_dev->name, indio_dev->id);
+ indio_dev->name,
+ iio_device_id(indio_dev));
if (!dln2->trig) {
dev_err(dev, "failed to allocate trigger\n");
return -ENOMEM;
diff --git a/drivers/iio/adc/ep93xx_adc.c b/drivers/iio/adc/ep93xx_adc.c
index c08ab3c6dfaf..a10a4e8d94fd 100644
--- a/drivers/iio/adc/ep93xx_adc.c
+++ b/drivers/iio/adc/ep93xx_adc.c
@@ -165,10 +165,8 @@ static int ep93xx_adc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->base)) {
- dev_err(&pdev->dev, "Cannot map memory resource\n");
+ if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- }
iiodev->name = dev_name(&pdev->dev);
iiodev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 8c98d8c9ab1f..3b3868aa2533 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -794,7 +794,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
struct s3c2410_ts_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct iio_dev *indio_dev = NULL;
bool has_ts = false;
- int ret = -ENODEV;
+ int ret;
int irq;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct exynos_adc));
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index 074c30970465..8b353e26668e 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -483,7 +483,6 @@ static int hi8435_probe(struct spi_device *spi)
gpiod_set_value_cansleep(reset_gpio, 1);
}
- spi_set_drvdata(spi, idev);
mutex_init(&priv->lock);
idev->name = spi_get_device_id(spi)->name;
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 6a173531d355..f7ee856a6b8b 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -86,9 +86,9 @@ struct hx711_data {
struct mutex lock;
/*
* triggered buffer
- * 2x32-bit channel + 64-bit timestamp
+ * 2x32-bit channel + 64-bit naturally aligned timestamp
*/
- u32 buffer[4];
+ u32 buffer[4] __aligned(8);
/*
* delay after a rising edge on SCK until the data is ready DOUT
* this is dependent on the hx711 where the datasheet tells a
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 2ae54258b221..a4b2ff9e0dd5 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -843,7 +843,8 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
chip->allow_async_readout);
task = kthread_create(ina2xx_capture_thread, (void *)indio_dev,
- "%s:%d-%uus", indio_dev->name, indio_dev->id,
+ "%s:%d-%uus", indio_dev->name,
+ iio_device_id(indio_dev),
sampling_us);
if (IS_ERR(task))
return PTR_ERR(task);
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index e3c8ec107722..655ab02d03d8 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -430,8 +430,6 @@ static int max1027_probe(struct spi_device *spi)
return -ENOMEM;
}
- spi_set_drvdata(spi, indio_dev);
-
st = iio_priv(indio_dev);
st->spi = spi;
st->info = &max1027_chip_info_tbl[spi_get_device_id(spi)->driver_data];
diff --git a/drivers/iio/adc/max11100.c b/drivers/iio/adc/max11100.c
index 6cf21758ca66..eb1ce6a0315c 100644
--- a/drivers/iio/adc/max11100.c
+++ b/drivers/iio/adc/max11100.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <asm/unaligned.h>
#include <linux/iio/iio.h>
#include <linux/iio/driver.h>
@@ -63,7 +64,7 @@ static int max11100_read_single(struct iio_dev *indio_dev, int *val)
return -EINVAL;
}
- *val = (state->buffer[1] << 8) | state->buffer[2];
+ *val = get_unaligned_be16(&state->buffer[1]);
return 0;
}
@@ -101,6 +102,11 @@ static const struct iio_info max11100_info = {
.read_raw = max11100_read_raw,
};
+static void max11100_regulator_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int max11100_probe(struct spi_device *spi)
{
int ret;
@@ -111,8 +117,6 @@ static int max11100_probe(struct spi_device *spi)
if (!indio_dev)
return -ENOMEM;
- spi_set_drvdata(spi, indio_dev);
-
state = iio_priv(indio_dev);
state->spi = spi;
@@ -130,27 +134,12 @@ static int max11100_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = iio_device_register(indio_dev);
+ ret = devm_add_action_or_reset(&spi->dev, max11100_regulator_disable,
+ state->vref_reg);
if (ret)
- goto disable_regulator;
-
- return 0;
-
-disable_regulator:
- regulator_disable(state->vref_reg);
-
- return ret;
-}
-
-static int max11100_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct max11100_state *state = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- regulator_disable(state->vref_reg);
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id max11100_ids[] = {
@@ -165,7 +154,6 @@ static struct spi_driver max11100_driver = {
.of_match_table = max11100_ids,
},
.probe = max11100_probe,
- .remove = max11100_remove,
};
module_spi_driver(max11100_driver);
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 6efb0b43d938..8cec9d949083 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -66,9 +66,8 @@ static const struct iio_chan_spec max1118_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(2),
};
-static int max1118_read(struct spi_device *spi, int channel)
+static int max1118_read(struct iio_dev *indio_dev, int channel)
{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct max1118 *adc = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
/*
@@ -103,9 +102,9 @@ static int max1118_read(struct spi_device *spi, int channel)
int ret;
if (channel == 0)
- ret = spi_sync_transfer(spi, xfers + 1, 2);
+ ret = spi_sync_transfer(adc->spi, xfers + 1, 2);
else
- ret = spi_sync_transfer(spi, xfers, 3);
+ ret = spi_sync_transfer(adc->spi, xfers, 3);
if (ret)
return ret;
@@ -113,11 +112,10 @@ static int max1118_read(struct spi_device *spi, int channel)
return adc->data;
}
-static int max1118_get_vref_mV(struct spi_device *spi)
+static int max1118_get_vref_mV(struct iio_dev *indio_dev)
{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct max1118 *adc = iio_priv(indio_dev);
- const struct spi_device_id *id = spi_get_device_id(spi);
+ const struct spi_device_id *id = spi_get_device_id(adc->spi);
int vref_uV;
switch (id->driver_data) {
@@ -144,14 +142,14 @@ static int max1118_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
mutex_lock(&adc->lock);
- *val = max1118_read(adc->spi, chan->channel);
+ *val = max1118_read(indio_dev, chan->channel);
mutex_unlock(&adc->lock);
if (*val < 0)
return *val;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = max1118_get_vref_mV(adc->spi);
+ *val = max1118_get_vref_mV(indio_dev);
if (*val < 0)
return *val;
*val2 = 8;
@@ -180,7 +178,7 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p)
indio_dev->masklength) {
const struct iio_chan_spec *scan_chan =
&indio_dev->channels[scan_index];
- int ret = max1118_read(adc->spi, scan_chan->channel);
+ int ret = max1118_read(indio_dev, scan_chan->channel);
if (ret < 0) {
dev_warn(&adc->spi->dev,
@@ -201,6 +199,11 @@ out:
return IRQ_HANDLED;
}
+static void max1118_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int max1118_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -225,9 +228,13 @@ static int max1118_probe(struct spi_device *spi)
ret = regulator_enable(adc->reg);
if (ret)
return ret;
- }
- spi_set_drvdata(spi, indio_dev);
+ ret = devm_add_action_or_reset(&spi->dev, max1118_reg_disable,
+ adc->reg);
+ if (ret)
+ return ret;
+
+ }
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &max1118_info;
@@ -241,40 +248,14 @@ static int max1118_probe(struct spi_device *spi)
* a conversion has been completed, the MAX1117/MAX1118/MAX1119 will go
* into AutoShutdown mode until the next conversion is initiated.
*/
- max1118_read(spi, 0);
-
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
- max1118_trigger_handler, NULL);
- if (ret)
- goto err_reg_disable;
+ max1118_read(indio_dev, 0);
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
+ max1118_trigger_handler, NULL);
if (ret)
- goto err_buffer_cleanup;
-
- return 0;
-
-err_buffer_cleanup:
- iio_triggered_buffer_cleanup(indio_dev);
-err_reg_disable:
- if (id->driver_data == max1118)
- regulator_disable(adc->reg);
-
- return ret;
-}
-
-static int max1118_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct max1118 *adc = iio_priv(indio_dev);
- const struct spi_device_id *id = spi_get_device_id(spi);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- if (id->driver_data == max1118)
- return regulator_disable(adc->reg);
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id max1118_id[] = {
@@ -299,7 +280,6 @@ static struct spi_driver max1118_spi_driver = {
.of_match_table = max1118_dt_ids,
},
.probe = max1118_probe,
- .remove = max1118_remove,
.id_table = max1118_id,
};
module_spi_driver(max1118_spi_driver);
diff --git a/drivers/iio/adc/max1241.c b/drivers/iio/adc/max1241.c
index 0cbbb3c56d08..b60f8448f21a 100644
--- a/drivers/iio/adc/max1241.c
+++ b/drivers/iio/adc/max1241.c
@@ -147,8 +147,6 @@ static int max1241_probe(struct spi_device *spi)
adc->spi = spi;
mutex_init(&adc->lock);
- spi_set_drvdata(spi, indio_dev);
-
adc->vdd = devm_regulator_get(dev, "vdd");
if (IS_ERR(adc->vdd)) {
dev_err(dev, "failed to get vdd regulator\n");
diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c
index 331a9a728217..aca084f1e78a 100644
--- a/drivers/iio/adc/mp2629_adc.c
+++ b/drivers/iio/adc/mp2629_adc.c
@@ -144,7 +144,6 @@ static int mp2629_adc_probe(struct platform_device *pdev)
}
indio_dev->name = "mp2629-adc";
- indio_dev->dev.parent = dev;
indio_dev->channels = mp2629_channels;
indio_dev->num_channels = ARRAY_SIZE(mp2629_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/mt6360-adc.c b/drivers/iio/adc/mt6360-adc.c
index 6b39a139ce28..07c0e6768391 100644
--- a/drivers/iio/adc/mt6360-adc.c
+++ b/drivers/iio/adc/mt6360-adc.c
@@ -337,7 +337,6 @@ static int mt6360_adc_probe(struct platform_device *pdev)
}
indio_dev->name = dev_name(&pdev->dev);
- indio_dev->dev.parent = &pdev->dev;
indio_dev->info = &mt6360_adc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = mt6360_adc_channels;
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index 30e29f44ebd2..bca79a93cbe4 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -115,7 +115,8 @@ struct mxs_lradc_adc {
struct device *dev;
void __iomem *base;
- u32 buffer[10];
+ /* Maximum of 8 channels + 8 byte ts */
+ u32 buffer[10] __aligned(8);
struct iio_trigger *trig;
struct completion completion;
spinlock_t lock;
@@ -455,7 +456,7 @@ static int mxs_lradc_adc_trigger_init(struct iio_dev *iio)
struct mxs_lradc_adc *adc = iio_priv(iio);
trig = devm_iio_trigger_alloc(&iio->dev, "%s-dev%i", iio->name,
- iio->id);
+ iio_device_id(iio));
if (!trig)
return -ENOMEM;
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 6ef09609be9f..f9c8385c72d3 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -664,8 +664,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc)
adc_period = adc->auto_conversion_period;
for (i = 0; i < 16; ++i) {
- if (((1000 * (1 << i)) / 32) < adc_period)
- continue;
+ if (((1000 * (1 << i)) / 32) >= adc_period)
+ break;
}
if (i > 0)
i--;
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index 9f38cf3c7dc2..a48895046408 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -162,18 +162,13 @@ static const struct iio_chan_spec rcar_gyroadc_iio_channels_3[] = {
static int rcar_gyroadc_set_power(struct rcar_gyroadc *priv, bool on)
{
struct device *dev = priv->dev;
- int ret;
if (on) {
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- pm_runtime_put_noidle(dev);
+ return pm_runtime_resume_and_get(dev);
} else {
pm_runtime_mark_last_busy(dev);
- ret = pm_runtime_put_autosuspend(dev);
+ return pm_runtime_put_autosuspend(dev);
}
-
- return ret;
}
static int rcar_gyroadc_read_raw(struct iio_dev *indio_dev,
@@ -535,7 +530,10 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ goto err_power_up;
+
rcar_gyroadc_hw_init(priv);
rcar_gyroadc_hw_start(priv);
@@ -552,6 +550,7 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
err_iio_device_register:
rcar_gyroadc_hw_stop(priv);
pm_runtime_put_sync(dev);
+err_power_up:
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
clk_disable_unprepare(priv->clk);
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index 301cf66de695..00098caf6d9e 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -549,6 +549,7 @@ static const struct of_device_id sc27xx_adc_of_match[] = {
{ .compatible = "sprd,sc2731-adc", },
{ }
};
+MODULE_DEVICE_TABLE(of, sc27xx_adc_of_match);
static struct platform_driver sc27xx_adc_driver = {
.probe = sc27xx_adc_probe,
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index b25386b19373..5088de835bb1 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -449,7 +449,7 @@ static const struct stm32_adc_regspec stm32h7_adc_regspec = {
.smp_bits = stm32h7_smp_bits,
};
-/**
+/*
* STM32 ADC registers access routines
* @adc: stm32 adc instance
* @reg: reg offset in adc instance
@@ -851,7 +851,7 @@ static int stm32h7_adc_restore_selfcalib(struct iio_dev *indio_dev)
return 0;
}
-/**
+/*
* Fixed timeout value for ADC calibration.
* worst cases:
* - low clock frequency
@@ -1158,11 +1158,9 @@ static int stm32_adc_single_conv(struct iio_dev *indio_dev,
adc->bufi = 0;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
return ret;
- }
/* Apply sampling time settings */
stm32_adc_writel(adc, regs->smpr[0], adc->smpr_val[0]);
@@ -1364,11 +1362,9 @@ static int stm32_adc_update_scan_mode(struct iio_dev *indio_dev,
struct device *dev = indio_dev->dev.parent;
int ret;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
return ret;
- }
adc->num_conv = bitmap_weight(scan_mask, indio_dev->masklength);
@@ -1413,11 +1409,9 @@ static int stm32_adc_debugfs_reg_access(struct iio_dev *indio_dev,
struct device *dev = indio_dev->dev.parent;
int ret;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
return ret;
- }
if (!readval)
stm32_adc_writel(adc, reg, writeval);
@@ -1537,11 +1531,9 @@ static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev)
struct device *dev = indio_dev->dev.parent;
int ret;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
return ret;
- }
ret = stm32_adc_set_trig(indio_dev, indio_dev->trig);
if (ret) {
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index bb925a11c8ae..a627af9a825e 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -135,11 +135,9 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
int ret;
if (atomic_inc_return(&priv->n_active_ch) == 1) {
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto error_ret;
- }
/* select clock source, e.g. 0 for "dfsdm" or 1 for "audio" */
clk_src = priv->aclk ? 1 : 0;
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index b64718daa201..16fc608db36a 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -146,6 +146,11 @@ out:
return IRQ_HANDLED;
}
+static void adc081c_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int adc081c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -175,6 +180,11 @@ static int adc081c_probe(struct i2c_client *client,
if (err < 0)
return err;
+ err = devm_add_action_or_reset(&client->dev, adc081c_reg_disable,
+ adc->ref);
+ if (err)
+ return err;
+
iio->name = dev_name(&client->dev);
iio->modes = INDIO_DIRECT_MODE;
iio->info = &adc081c_info;
@@ -182,38 +192,14 @@ static int adc081c_probe(struct i2c_client *client,
iio->channels = model->channels;
iio->num_channels = ADC081C_NUM_CHANNELS;
- err = iio_triggered_buffer_setup(iio, NULL, adc081c_trigger_handler, NULL);
+ err = devm_iio_triggered_buffer_setup(&client->dev, iio, NULL,
+ adc081c_trigger_handler, NULL);
if (err < 0) {
dev_err(&client->dev, "iio triggered buffer setup failed\n");
- goto err_regulator_disable;
+ return err;
}
- err = iio_device_register(iio);
- if (err < 0)
- goto err_buffer_cleanup;
-
- i2c_set_clientdata(client, iio);
-
- return 0;
-
-err_buffer_cleanup:
- iio_triggered_buffer_cleanup(iio);
-err_regulator_disable:
- regulator_disable(adc->ref);
-
- return err;
-}
-
-static int adc081c_remove(struct i2c_client *client)
-{
- struct iio_dev *iio = i2c_get_clientdata(client);
- struct adc081c *adc = iio_priv(iio);
-
- iio_device_unregister(iio);
- iio_triggered_buffer_cleanup(iio);
- regulator_disable(adc->ref);
-
- return 0;
+ return devm_iio_device_register(&client->dev, iio);
}
static const struct i2c_device_id adc081c_id[] = {
@@ -238,7 +224,6 @@ static struct i2c_driver adc081c_driver = {
.of_match_table = adc081c_of_match,
},
.probe = adc081c_probe,
- .remove = adc081c_remove,
.id_table = adc081c_id,
};
module_i2c_driver(adc081c_driver);
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index 0261b3cfc92b..fb5e72600b96 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -236,6 +236,11 @@ out:
return IRQ_HANDLED;
}
+static void adc0832_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int adc0832_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -287,36 +292,17 @@ static int adc0832_probe(struct spi_device *spi)
if (ret)
return ret;
- spi_set_drvdata(spi, indio_dev);
-
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
- adc0832_trigger_handler, NULL);
+ ret = devm_add_action_or_reset(&spi->dev, adc0832_reg_disable,
+ adc->reg);
if (ret)
- goto err_reg_disable;
+ return ret;
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
+ adc0832_trigger_handler, NULL);
if (ret)
- goto err_buffer_cleanup;
-
- return 0;
-err_buffer_cleanup:
- iio_triggered_buffer_cleanup(indio_dev);
-err_reg_disable:
- regulator_disable(adc->reg);
-
- return ret;
-}
-
-static int adc0832_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adc0832 *adc = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- regulator_disable(adc->reg);
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id adc0832_dt_ids[] = {
@@ -343,7 +329,6 @@ static struct spi_driver adc0832_driver = {
.of_match_table = adc0832_dt_ids,
},
.probe = adc0832_probe,
- .remove = adc0832_remove,
.id_table = adc0832_id,
};
module_spi_driver(adc0832_driver);
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index 33aea961d850..ce3f5a3814f9 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -210,9 +210,6 @@ static int adc084s021_probe(struct spi_device *spi)
adc = iio_priv(indio_dev);
adc->spi = spi;
- /* Connect the SPI device and the iio dev */
- spi_set_drvdata(spi, indio_dev);
-
/* Initiate the Industrial I/O device */
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/ti-adc108s102.c b/drivers/iio/adc/ti-adc108s102.c
index 183b2245e89b..db902aef2abe 100644
--- a/drivers/iio/adc/ti-adc108s102.c
+++ b/drivers/iio/adc/ti-adc108s102.c
@@ -215,6 +215,11 @@ static const struct iio_info adc108s102_info = {
.update_scan_mode = &adc108s102_update_scan_mode,
};
+static void adc108s102_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int adc108s102_probe(struct spi_device *spi)
{
struct adc108s102_state *st;
@@ -239,6 +244,10 @@ static int adc108s102_probe(struct spi_device *spi)
dev_err(&spi->dev, "Cannot enable vref regulator\n");
return ret;
}
+ ret = devm_add_action_or_reset(&spi->dev, adc108s102_reg_disable,
+ st->reg);
+ if (ret)
+ return ret;
ret = regulator_get_voltage(st->reg);
if (ret < 0) {
@@ -249,7 +258,6 @@ static int adc108s102_probe(struct spi_device *spi)
st->va_millivolt = ret / 1000;
}
- spi_set_drvdata(spi, indio_dev);
st->spi = spi;
indio_dev->name = spi->modalias;
@@ -266,40 +274,18 @@ static int adc108s102_probe(struct spi_device *spi)
spi_message_init_with_transfers(&st->scan_single_msg,
&st->scan_single_xfer, 1);
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
- &adc108s102_trigger_handler, NULL);
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
+ &adc108s102_trigger_handler,
+ NULL);
if (ret)
- goto error_disable_reg;
+ return ret;
- ret = iio_device_register(indio_dev);
- if (ret) {
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
dev_err(&spi->dev, "Failed to register IIO device\n");
- goto error_cleanup_triggered_buffer;
- }
- return 0;
-
-error_cleanup_triggered_buffer:
- iio_triggered_buffer_cleanup(indio_dev);
-
-error_disable_reg:
- regulator_disable(st->reg);
-
return ret;
}
-static int adc108s102_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adc108s102_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
-
- regulator_disable(st->reg);
-
- return 0;
-}
-
static const struct of_device_id adc108s102_of_match[] = {
{ .compatible = "ti,adc108s102" },
{ }
@@ -327,7 +313,6 @@ static struct spi_driver adc108s102_driver = {
.acpi_match_table = ACPI_PTR(adc108s102_acpi_ids),
},
.probe = adc108s102_probe,
- .remove = adc108s102_remove,
.id_table = adc108s102_id,
};
module_spi_driver(adc108s102_driver);
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index 607791ffe7f0..75ca7f1c8726 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -169,6 +169,11 @@ static const struct iio_info ti_adc_info = {
.read_raw = ti_adc_read_raw,
};
+static void ti_adc_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int ti_adc_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -182,7 +187,6 @@ static int ti_adc_probe(struct spi_device *spi)
indio_dev->info = &ti_adc_info;
indio_dev->name = TI_ADC_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- spi_set_drvdata(spi, indio_dev);
data = iio_priv(indio_dev);
data->spi = spi;
@@ -203,42 +207,24 @@ static int ti_adc_probe(struct spi_device *spi)
}
data->ref = devm_regulator_get(&spi->dev, "vdda");
- if (!IS_ERR(data->ref)) {
- ret = regulator_enable(data->ref);
- if (ret < 0)
- return ret;
- }
+ if (IS_ERR(data->ref))
+ return PTR_ERR(data->ref);
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
- ti_adc_trigger_handler, NULL);
- if (ret)
- goto error_regulator_disable;
+ ret = regulator_enable(data->ref);
+ if (ret < 0)
+ return ret;
- ret = iio_device_register(indio_dev);
+ ret = devm_add_action_or_reset(&spi->dev, ti_adc_reg_disable,
+ data->ref);
if (ret)
- goto error_unreg_buffer;
-
- return 0;
+ return ret;
-error_unreg_buffer:
- iio_triggered_buffer_cleanup(indio_dev);
-
-error_regulator_disable:
- regulator_disable(data->ref);
-
- return ret;
-}
-
-static int ti_adc_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ti_adc_data *data = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- regulator_disable(data->ref);
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
+ ti_adc_trigger_handler, NULL);
+ if (ret)
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id ti_adc_dt_ids[] = {
@@ -261,7 +247,6 @@ static struct spi_driver ti_adc_driver = {
.of_match_table = ti_adc_dt_ids,
},
.probe = ti_adc_probe,
- .remove = ti_adc_remove,
.id_table = ti_adc_id,
};
module_spi_driver(ti_adc_driver);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 9fef39bcf997..b0352e91ac16 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -323,9 +323,7 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
struct device *dev = regmap_get_device(data->regmap);
if (on) {
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
} else {
pm_runtime_mark_last_busy(dev);
ret = pm_runtime_put_autosuspend(dev);
@@ -395,10 +393,14 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ads1015_data *data = iio_priv(indio_dev);
- s16 buf[8]; /* 1x s16 ADC val + 3x s16 padding + 4x s16 timestamp */
+ /* Ensure natural alignment of timestamp */
+ struct {
+ s16 chan;
+ s64 timestamp __aligned(8);
+ } scan;
int chan, ret, res;
- memset(buf, 0, sizeof(buf));
+ memset(&scan, 0, sizeof(scan));
mutex_lock(&data->lock);
chan = find_first_bit(indio_dev->active_scan_mask,
@@ -409,10 +411,10 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
goto err;
}
- buf[0] = res;
+ scan.chan = res;
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
iio_get_time_ns(indio_dev));
err:
@@ -1066,7 +1068,6 @@ static int ads1015_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
/* power down single shot mode */
return ads1015_set_conv_mode(data, ADS1015_SINGLESHOT);
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index b4a128b19188..17d0da5877a9 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -327,8 +327,6 @@ static int ads124s_probe(struct spi_device *spi)
ads124s_priv->chip_info = &ads124s_chip_info_tbl[spi_id->driver_data];
- spi_set_drvdata(spi, indio_dev);
-
ads124s_priv->spi = spi;
indio_dev->name = spi_id->name;
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index 764dab087b41..0c2025a22575 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -830,7 +830,6 @@ static int ads131e08_probe(struct spi_device *spi)
return ret;
indio_dev->name = st->info->name;
- indio_dev->dev.parent = &spi->dev;
indio_dev->info = &ads131e08_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -850,7 +849,7 @@ static int ads131e08_probe(struct spi_device *spi)
}
st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
- indio_dev->name, indio_dev->id);
+ indio_dev->name, iio_device_id(indio_dev));
if (!st->trig) {
dev_err(&spi->dev, "failed to allocate IIO trigger\n");
return -ENOMEM;
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 2383eacada87..a2b83f0bd526 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -568,7 +568,6 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->ring_xfer.tx_buf = &st->tx_buf[0];
st->ring_xfer.rx_buf = &st->rx_buf[0];
/* len will be set later */
- st->ring_xfer.cs_change = true;
spi_message_add_tail(&st->ring_xfer, &st->ring_msg);
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 16bcb37eebb7..79c803537dc4 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -383,7 +383,8 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
- u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
+ /* Ensure naturally aligned timestamp */
+ u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8);
int i, j = 0;
for (i = 0; i < indio_dev->masklength; i++) {
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
new file mode 100644
index 000000000000..170950d5dd49
--- /dev/null
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -0,0 +1,712 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments TSC2046 SPI ADC driver
+ *
+ * Copyright (c) 2021 Oleksij Rempel <kernel@pengutronix.de>, Pengutronix
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger.h>
+
+/*
+ * The PENIRQ of TSC2046 controller is implemented as level shifter attached to
+ * the X+ line. If voltage of the X+ line reaches a specific level the IRQ will
+ * be activated or deactivated.
+ * To make this kind of IRQ reusable as trigger following additions were
+ * implemented:
+ * - rate limiting:
+ * For typical touchscreen use case, we need to trigger about each 10ms.
+ * - hrtimer:
+ * Continue triggering at least once after the IRQ was deactivated. Then
+ * deactivate this trigger to stop sampling in order to reduce power
+ * consumption.
+ */
+
+#define TI_TSC2046_NAME "tsc2046"
+
+/* This driver doesn't aim at the peak continuous sample rate */
+#define TI_TSC2046_MAX_SAMPLE_RATE 125000
+#define TI_TSC2046_SAMPLE_BITS \
+ BITS_PER_TYPE(struct tsc2046_adc_atom)
+#define TI_TSC2046_MAX_CLK_FREQ \
+ (TI_TSC2046_MAX_SAMPLE_RATE * TI_TSC2046_SAMPLE_BITS)
+
+#define TI_TSC2046_SAMPLE_INTERVAL_US 10000
+
+#define TI_TSC2046_START BIT(7)
+#define TI_TSC2046_ADDR GENMASK(6, 4)
+#define TI_TSC2046_ADDR_TEMP1 7
+#define TI_TSC2046_ADDR_AUX 6
+#define TI_TSC2046_ADDR_X 5
+#define TI_TSC2046_ADDR_Z2 4
+#define TI_TSC2046_ADDR_Z1 3
+#define TI_TSC2046_ADDR_VBAT 2
+#define TI_TSC2046_ADDR_Y 1
+#define TI_TSC2046_ADDR_TEMP0 0
+
+/*
+ * The mode bit sets the resolution of the ADC. With this bit low, the next
+ * conversion has 12-bit resolution, whereas with this bit high, the next
+ * conversion has 8-bit resolution. This driver is optimized for 12-bit mode.
+ * So, for this driver, this bit should stay zero.
+ */
+#define TI_TSC2046_8BIT_MODE BIT(3)
+
+/*
+ * SER/DFR - The SER/DFR bit controls the reference mode, either single-ended
+ * (high) or differential (low).
+ */
+#define TI_TSC2046_SER BIT(2)
+
+/*
+ * If VREF_ON and ADC_ON are both zero, then the chip operates in
+ * auto-wake/suspend mode. In most case this bits should stay zero.
+ */
+#define TI_TSC2046_PD1_VREF_ON BIT(1)
+#define TI_TSC2046_PD0_ADC_ON BIT(0)
+
+/*
+ * All supported devices can do 8 or 12bit resolution. This driver
+ * supports only 12bit mode, here we have a 16bit data transfer, where
+ * the MSB and the 3 LSB are 0.
+ */
+#define TI_TSC2046_DATA_12BIT GENMASK(14, 3)
+
+#define TI_TSC2046_MAX_CHAN 8
+
+/* Represents a HW sample */
+struct tsc2046_adc_atom {
+ /*
+ * Command transmitted to the controller. This field is empty on the RX
+ * buffer.
+ */
+ u8 cmd;
+ /*
+ * Data received from the controller. This field is empty for the TX
+ * buffer
+ */
+ __be16 data;
+} __packed;
+
+/* Layout of atomic buffers within big buffer */
+struct tsc2046_adc_group_layout {
+ /* Group offset within the SPI RX buffer */
+ unsigned int offset;
+ /*
+ * Amount of tsc2046_adc_atom structs within the same command gathered
+ * within same group.
+ */
+ unsigned int count;
+ /*
+ * Settling samples (tsc2046_adc_atom structs) which should be skipped
+ * before good samples will start.
+ */
+ unsigned int skip;
+};
+
+struct tsc2046_adc_dcfg {
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+};
+
+struct tsc2046_adc_ch_cfg {
+ unsigned int settling_time_us;
+ unsigned int oversampling_ratio;
+};
+
+struct tsc2046_adc_priv {
+ struct spi_device *spi;
+ const struct tsc2046_adc_dcfg *dcfg;
+
+ struct iio_trigger *trig;
+ struct hrtimer trig_timer;
+ spinlock_t trig_lock;
+ unsigned int trig_more_count;
+
+ struct spi_transfer xfer;
+ struct spi_message msg;
+
+ struct {
+ /* Scan data for each channel */
+ u16 data[TI_TSC2046_MAX_CHAN];
+ /* Timestamp */
+ s64 ts __aligned(8);
+ } scan_buf;
+
+ /*
+ * Lock to protect the layout and the SPI transfer buffer.
+ * tsc2046_adc_group_layout can be changed within update_scan_mode(),
+ * in this case the l[] and tx/rx buffer will be out of sync to each
+ * other.
+ */
+ struct mutex slock;
+ struct tsc2046_adc_group_layout l[TI_TSC2046_MAX_CHAN];
+ struct tsc2046_adc_atom *rx;
+ struct tsc2046_adc_atom *tx;
+
+ struct tsc2046_adc_atom *rx_one;
+ struct tsc2046_adc_atom *tx_one;
+
+ unsigned int count;
+ unsigned int groups;
+ u32 effective_speed_hz;
+ u32 scan_interval_us;
+ u32 time_per_scan_us;
+ u32 time_per_bit_ns;
+
+ struct tsc2046_adc_ch_cfg ch_cfg[TI_TSC2046_MAX_CHAN];
+};
+
+#define TI_TSC2046_V_CHAN(index, bits, name) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .datasheet_name = "#name", \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = bits, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+#define DECLARE_TI_TSC2046_8_CHANNELS(name, bits) \
+const struct iio_chan_spec name ## _channels[] = { \
+ TI_TSC2046_V_CHAN(0, bits, TEMP0), \
+ TI_TSC2046_V_CHAN(1, bits, Y), \
+ TI_TSC2046_V_CHAN(2, bits, VBAT), \
+ TI_TSC2046_V_CHAN(3, bits, Z1), \
+ TI_TSC2046_V_CHAN(4, bits, Z2), \
+ TI_TSC2046_V_CHAN(5, bits, X), \
+ TI_TSC2046_V_CHAN(6, bits, AUX), \
+ TI_TSC2046_V_CHAN(7, bits, TEMP1), \
+ IIO_CHAN_SOFT_TIMESTAMP(8), \
+}
+
+static DECLARE_TI_TSC2046_8_CHANNELS(tsc2046_adc, 12);
+
+static const struct tsc2046_adc_dcfg tsc2046_adc_dcfg_tsc2046e = {
+ .channels = tsc2046_adc_channels,
+ .num_channels = ARRAY_SIZE(tsc2046_adc_channels),
+};
+
+/*
+ * Convert time to a number of samples which can be transferred within this
+ * time.
+ */
+static unsigned int tsc2046_adc_time_to_count(struct tsc2046_adc_priv *priv,
+ unsigned long time)
+{
+ unsigned int bit_count, sample_count;
+
+ bit_count = DIV_ROUND_UP(time * NSEC_PER_USEC, priv->time_per_bit_ns);
+ sample_count = DIV_ROUND_UP(bit_count, TI_TSC2046_SAMPLE_BITS);
+
+ dev_dbg(&priv->spi->dev, "Effective speed %u, time per bit: %u, count bits: %u, count samples: %u\n",
+ priv->effective_speed_hz, priv->time_per_bit_ns,
+ bit_count, sample_count);
+
+ return sample_count;
+}
+
+static u8 tsc2046_adc_get_cmd(struct tsc2046_adc_priv *priv, int ch_idx,
+ bool keep_power)
+{
+ u32 pd;
+
+ /*
+ * if PD bits are 0, controller will automatically disable ADC, VREF and
+ * enable IRQ.
+ */
+ if (keep_power)
+ pd = TI_TSC2046_PD0_ADC_ON;
+ else
+ pd = 0;
+
+ return TI_TSC2046_START | FIELD_PREP(TI_TSC2046_ADDR, ch_idx) | pd;
+}
+
+static u16 tsc2046_adc_get_value(struct tsc2046_adc_atom *buf)
+{
+ return FIELD_GET(TI_TSC2046_DATA_12BIT, get_unaligned_be16(&buf->data));
+}
+
+static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx,
+ u32 *effective_speed_hz)
+{
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ int ret;
+
+ memset(&xfer, 0, sizeof(xfer));
+ priv->tx_one->cmd = tsc2046_adc_get_cmd(priv, ch_idx, false);
+ priv->tx_one->data = 0;
+ xfer.tx_buf = priv->tx_one;
+ xfer.rx_buf = priv->rx_one;
+ xfer.len = sizeof(*priv->tx_one);
+ spi_message_init_with_transfers(&msg, &xfer, 1);
+
+ /*
+ * We aren't using spi_write_then_read() because we need to be able
+ * to get hold of the effective_speed_hz from the xfer
+ */
+ ret = spi_sync(priv->spi, &msg);
+ if (ret) {
+ dev_err_ratelimited(&priv->spi->dev, "SPI transfer failed %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (effective_speed_hz)
+ *effective_speed_hz = xfer.effective_speed_hz;
+
+ return tsc2046_adc_get_value(priv->rx_one);
+}
+
+static size_t tsc2046_adc_group_set_layout(struct tsc2046_adc_priv *priv,
+ unsigned int group,
+ unsigned int ch_idx)
+{
+ struct tsc2046_adc_ch_cfg *ch = &priv->ch_cfg[ch_idx];
+ struct tsc2046_adc_group_layout *cur;
+ unsigned int max_count, count_skip;
+ unsigned int offset = 0;
+
+ if (group)
+ offset = priv->l[group - 1].offset + priv->l[group - 1].count;
+
+ count_skip = tsc2046_adc_time_to_count(priv, ch->settling_time_us);
+ max_count = count_skip + ch->oversampling_ratio;
+
+ cur = &priv->l[group];
+ cur->offset = offset;
+ cur->count = max_count;
+ cur->skip = count_skip;
+
+ return sizeof(*priv->tx) * max_count;
+}
+
+static void tsc2046_adc_group_set_cmd(struct tsc2046_adc_priv *priv,
+ unsigned int group, int ch_idx)
+{
+ struct tsc2046_adc_group_layout *l = &priv->l[group];
+ unsigned int i;
+ u8 cmd;
+
+ /*
+ * Do not enable automatic power down on working samples. Otherwise the
+ * plates will never be completely charged.
+ */
+ cmd = tsc2046_adc_get_cmd(priv, ch_idx, true);
+
+ for (i = 0; i < l->count - 1; i++)
+ priv->tx[l->offset + i].cmd = cmd;
+
+ /* automatically power down on last sample */
+ priv->tx[l->offset + i].cmd = tsc2046_adc_get_cmd(priv, ch_idx, false);
+}
+
+static u16 tsc2046_adc_get_val(struct tsc2046_adc_priv *priv, int group)
+{
+ struct tsc2046_adc_group_layout *l;
+ unsigned int val, val_normalized = 0;
+ int valid_count, i;
+
+ l = &priv->l[group];
+ valid_count = l->count - l->skip;
+
+ for (i = 0; i < valid_count; i++) {
+ val = tsc2046_adc_get_value(&priv->rx[l->offset + l->skip + i]);
+ val_normalized += val;
+ }
+
+ return DIV_ROUND_UP(val_normalized, valid_count);
+}
+
+static int tsc2046_adc_scan(struct iio_dev *indio_dev)
+{
+ struct tsc2046_adc_priv *priv = iio_priv(indio_dev);
+ struct device *dev = &priv->spi->dev;
+ int group;
+ int ret;
+
+ ret = spi_sync(priv->spi, &priv->msg);
+ if (ret < 0) {
+ dev_err_ratelimited(dev, "SPI transfer failed: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ for (group = 0; group < priv->groups; group++)
+ priv->scan_buf.data[group] = tsc2046_adc_get_val(priv, group);
+
+ ret = iio_push_to_buffers_with_timestamp(indio_dev, &priv->scan_buf,
+ iio_get_time_ns(indio_dev));
+ /* If the consumer is kfifo, we may get a EBUSY here - ignore it. */
+ if (ret < 0 && ret != -EBUSY) {
+ dev_err_ratelimited(dev, "Failed to push scan buffer %pe\n",
+ ERR_PTR(ret));
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static irqreturn_t tsc2046_adc_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct tsc2046_adc_priv *priv = iio_priv(indio_dev);
+
+ mutex_lock(&priv->slock);
+ tsc2046_adc_scan(indio_dev);
+ mutex_unlock(&priv->slock);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int tsc2046_adc_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *active_scan_mask)
+{
+ struct tsc2046_adc_priv *priv = iio_priv(indio_dev);
+ unsigned int ch_idx, group = 0;
+ size_t size;
+
+ mutex_lock(&priv->slock);
+
+ size = 0;
+ for_each_set_bit(ch_idx, active_scan_mask, indio_dev->num_channels) {
+ size += tsc2046_adc_group_set_layout(priv, group, ch_idx);
+ tsc2046_adc_group_set_cmd(priv, group, ch_idx);
+ group++;
+ }
+
+ priv->groups = group;
+ priv->xfer.len = size;
+ priv->time_per_scan_us = size * 8 * priv->time_per_bit_ns / NSEC_PER_USEC;
+
+ if (priv->scan_interval_us > priv->time_per_scan_us)
+ dev_warn(&priv->spi->dev, "The scan interval (%d) is less then calculated scan time (%d)\n",
+ priv->scan_interval_us, priv->time_per_scan_us);
+
+ mutex_unlock(&priv->slock);
+
+ return 0;
+}
+
+static const struct iio_info tsc2046_adc_info = {
+ .update_scan_mode = tsc2046_adc_update_scan_mode,
+};
+
+static enum hrtimer_restart tsc2046_adc_trig_more(struct hrtimer *hrtimer)
+{
+ struct tsc2046_adc_priv *priv = container_of(hrtimer,
+ struct tsc2046_adc_priv,
+ trig_timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->trig_lock, flags);
+
+ disable_irq_nosync(priv->spi->irq);
+
+ priv->trig_more_count++;
+ iio_trigger_poll(priv->trig);
+
+ spin_unlock_irqrestore(&priv->trig_lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t tsc2046_adc_irq(int irq, void *dev_id)
+{
+ struct iio_dev *indio_dev = dev_id;
+ struct tsc2046_adc_priv *priv = iio_priv(indio_dev);
+
+ spin_lock(&priv->trig_lock);
+
+ hrtimer_try_to_cancel(&priv->trig_timer);
+
+ priv->trig_more_count = 0;
+ disable_irq_nosync(priv->spi->irq);
+ iio_trigger_poll(priv->trig);
+
+ spin_unlock(&priv->trig_lock);
+
+ return IRQ_HANDLED;
+}
+
+static void tsc2046_adc_reenable_trigger(struct iio_trigger *trig)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct tsc2046_adc_priv *priv = iio_priv(indio_dev);
+ unsigned long flags;
+ int delta;
+
+ /*
+ * We can sample it as fast as we can, but usually we do not need so
+ * many samples. Reduce the sample rate for default (touchscreen) use
+ * case.
+ * Currently we do not need a highly precise sample rate. It is enough
+ * to have calculated numbers.
+ */
+ delta = priv->scan_interval_us - priv->time_per_scan_us;
+ if (delta > 0)
+ fsleep(delta);
+
+ spin_lock_irqsave(&priv->trig_lock, flags);
+
+ /*
+ * We need to trigger at least one extra sample to detect state
+ * difference on ADC side.
+ */
+ if (!priv->trig_more_count) {
+ int timeout_ms = DIV_ROUND_UP(priv->scan_interval_us,
+ USEC_PER_MSEC);
+
+ hrtimer_start(&priv->trig_timer, ms_to_ktime(timeout_ms),
+ HRTIMER_MODE_REL_SOFT);
+ }
+
+ enable_irq(priv->spi->irq);
+
+ spin_unlock_irqrestore(&priv->trig_lock, flags);
+}
+
+static int tsc2046_adc_set_trigger_state(struct iio_trigger *trig, bool enable)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct tsc2046_adc_priv *priv = iio_priv(indio_dev);
+
+ if (enable) {
+ enable_irq(priv->spi->irq);
+ } else {
+ disable_irq(priv->spi->irq);
+ hrtimer_try_to_cancel(&priv->trig_timer);
+ }
+
+ return 0;
+}
+
+static const struct iio_trigger_ops tsc2046_adc_trigger_ops = {
+ .set_trigger_state = tsc2046_adc_set_trigger_state,
+ .reenable = tsc2046_adc_reenable_trigger,
+};
+
+static int tsc2046_adc_setup_spi_msg(struct tsc2046_adc_priv *priv)
+{
+ unsigned int ch_idx;
+ size_t size;
+ int ret;
+
+ priv->tx_one = devm_kzalloc(&priv->spi->dev, sizeof(*priv->tx_one),
+ GFP_KERNEL);
+ if (!priv->tx_one)
+ return -ENOMEM;
+
+ priv->rx_one = devm_kzalloc(&priv->spi->dev, sizeof(*priv->rx_one),
+ GFP_KERNEL);
+ if (!priv->rx_one)
+ return -ENOMEM;
+
+ /*
+ * Make dummy read to set initial power state and get real SPI clock
+ * freq. It seems to be not important which channel is used for this
+ * case.
+ */
+ ret = tsc2046_adc_read_one(priv, TI_TSC2046_ADDR_TEMP0,
+ &priv->effective_speed_hz);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * In case SPI controller do not report effective_speed_hz, use
+ * configure value and hope it will match.
+ */
+ if (!priv->effective_speed_hz)
+ priv->effective_speed_hz = priv->spi->max_speed_hz;
+
+
+ priv->scan_interval_us = TI_TSC2046_SAMPLE_INTERVAL_US;
+ priv->time_per_bit_ns = DIV_ROUND_UP(NSEC_PER_SEC,
+ priv->effective_speed_hz);
+
+ /*
+ * Calculate and allocate maximal size buffer if all channels are
+ * enabled.
+ */
+ size = 0;
+ for (ch_idx = 0; ch_idx < priv->dcfg->num_channels; ch_idx++)
+ size += tsc2046_adc_group_set_layout(priv, ch_idx, ch_idx);
+
+ priv->tx = devm_kzalloc(&priv->spi->dev, size, GFP_KERNEL);
+ if (!priv->tx)
+ return -ENOMEM;
+
+ priv->rx = devm_kzalloc(&priv->spi->dev, size, GFP_KERNEL);
+ if (!priv->rx)
+ return -ENOMEM;
+
+ priv->xfer.tx_buf = priv->tx;
+ priv->xfer.rx_buf = priv->rx;
+ priv->xfer.len = size;
+ spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
+
+ return 0;
+}
+
+static void tsc2046_adc_parse_fwnode(struct tsc2046_adc_priv *priv)
+{
+ struct fwnode_handle *child;
+ struct device *dev = &priv->spi->dev;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->ch_cfg); i++) {
+ priv->ch_cfg[i].settling_time_us = 1;
+ priv->ch_cfg[i].oversampling_ratio = 1;
+ }
+
+ device_for_each_child_node(dev, child) {
+ u32 stl, overs, reg;
+ int ret;
+
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret) {
+ dev_err(dev, "invalid reg on %pfw, err: %pe\n", child,
+ ERR_PTR(ret));
+ continue;
+ }
+
+ if (reg >= ARRAY_SIZE(priv->ch_cfg)) {
+ dev_err(dev, "%pfw: Unsupported reg value: %i, max supported is: %zu.\n",
+ child, reg, ARRAY_SIZE(priv->ch_cfg));
+ continue;
+ }
+
+ ret = fwnode_property_read_u32(child, "settling-time-us", &stl);
+ if (!ret)
+ priv->ch_cfg[reg].settling_time_us = stl;
+
+ ret = fwnode_property_read_u32(child, "oversampling-ratio",
+ &overs);
+ if (!ret)
+ priv->ch_cfg[reg].oversampling_ratio = overs;
+ }
+}
+
+static int tsc2046_adc_probe(struct spi_device *spi)
+{
+ const struct tsc2046_adc_dcfg *dcfg;
+ struct device *dev = &spi->dev;
+ struct tsc2046_adc_priv *priv;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ int ret;
+
+ if (spi->max_speed_hz > TI_TSC2046_MAX_CLK_FREQ) {
+ dev_err(dev, "SPI max_speed_hz is too high: %d Hz. Max supported freq is %zu Hz\n",
+ spi->max_speed_hz, TI_TSC2046_MAX_CLK_FREQ);
+ return -EINVAL;
+ }
+
+ dcfg = device_get_match_data(dev);
+ if (!dcfg)
+ return -EINVAL;
+
+ spi->bits_per_word = 8;
+ spi->mode &= ~SPI_MODE_X_MASK;
+ spi->mode |= SPI_MODE_0;
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Error in SPI setup\n");
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+ priv->dcfg = dcfg;
+
+ priv->spi = spi;
+
+ indio_dev->name = TI_TSC2046_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_TRIGGERED;
+ indio_dev->channels = dcfg->channels;
+ indio_dev->num_channels = dcfg->num_channels;
+ indio_dev->info = &tsc2046_adc_info;
+
+ tsc2046_adc_parse_fwnode(priv);
+
+ ret = tsc2046_adc_setup_spi_msg(priv);
+ if (ret)
+ return ret;
+
+ mutex_init(&priv->slock);
+
+ ret = devm_request_irq(dev, spi->irq, &tsc2046_adc_irq,
+ IRQF_NO_AUTOEN, indio_dev->name, indio_dev);
+ if (ret)
+ return ret;
+
+ trig = devm_iio_trigger_alloc(dev, "touchscreen-%s", indio_dev->name);
+ if (!trig)
+ return -ENOMEM;
+
+ priv->trig = trig;
+ iio_trigger_set_drvdata(trig, indio_dev);
+ trig->ops = &tsc2046_adc_trigger_ops;
+
+ spin_lock_init(&priv->trig_lock);
+ hrtimer_init(&priv->trig_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_SOFT);
+ priv->trig_timer.function = tsc2046_adc_trig_more;
+
+ ret = devm_iio_trigger_register(dev, trig);
+ if (ret) {
+ dev_err(dev, "failed to register trigger\n");
+ return ret;
+ }
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ &tsc2046_adc_trigger_handler, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to setup triggered buffer\n");
+ return ret;
+ }
+
+ /* set default trigger */
+ indio_dev->trig = iio_trigger_get(priv->trig);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id ads7950_of_table[] = {
+ { .compatible = "ti,tsc2046e-adc", .data = &tsc2046_adc_dcfg_tsc2046e },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ads7950_of_table);
+
+static struct spi_driver tsc2046_adc_driver = {
+ .driver = {
+ .name = "tsc2046",
+ .of_match_table = ads7950_of_table,
+ },
+ .probe = tsc2046_adc_probe,
+};
+module_spi_driver(tsc2046_adc_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("TI TSC2046 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 1d794cf3e3f1..fd57fc43e8e5 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -167,7 +167,11 @@ struct vf610_adc {
u32 sample_freq_avail[5];
struct completion completion;
- u16 buffer[8];
+ /* Ensure the timestamp is naturally aligned */
+ struct {
+ u16 chan;
+ s64 timestamp __aligned(8);
+ } scan;
};
static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
@@ -579,9 +583,9 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
if (coco & VF610_ADC_HS_COCO0) {
info->value = vf610_adc_read_data(info);
if (iio_buffer_enabled(indio_dev)) {
- info->buffer[0] = info->value;
+ info->scan.chan = info->value;
iio_push_to_buffers_with_timestamp(indio_dev,
- info->buffer,
+ &info->scan,
iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
} else
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 6914c1900ed0..198d2916266d 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -743,7 +743,7 @@ static struct iio_trigger *xadc_alloc_trigger(struct iio_dev *indio_dev,
int ret;
trig = devm_iio_trigger_alloc(dev, "%s%d-%s", indio_dev->name,
- indio_dev->id, name);
+ iio_device_id(indio_dev), name);
if (trig == NULL)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
index e42ea2b1707d..774eb3044edd 100644
--- a/drivers/iio/afe/iio-rescale.c
+++ b/drivers/iio/afe/iio-rescale.c
@@ -29,6 +29,7 @@ struct rescale {
struct iio_channel *source;
struct iio_chan_spec chan;
struct iio_chan_spec_ext_info *ext_info;
+ bool chan_processed;
s32 numerator;
s32 denominator;
};
@@ -43,10 +44,27 @@ static int rescale_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- return iio_read_channel_raw(rescale->source, val);
+ if (rescale->chan_processed)
+ /*
+ * When only processed channels are supported, we
+ * read the processed data and scale it by 1/1
+ * augmented with whatever the rescaler has calculated.
+ */
+ return iio_read_channel_processed(rescale->source, val);
+ else
+ return iio_read_channel_raw(rescale->source, val);
case IIO_CHAN_INFO_SCALE:
- ret = iio_read_channel_scale(rescale->source, val, val2);
+ if (rescale->chan_processed) {
+ /*
+ * Processed channels are scaled 1-to-1
+ */
+ *val = 1;
+ *val2 = 1;
+ ret = IIO_VAL_FRACTIONAL;
+ } else {
+ ret = iio_read_channel_scale(rescale->source, val, val2);
+ }
switch (ret) {
case IIO_VAL_FRACTIONAL:
*val *= rescale->numerator;
@@ -130,16 +148,27 @@ static int rescale_configure_channel(struct device *dev,
chan->ext_info = rescale->ext_info;
chan->type = rescale->cfg->type;
- if (!iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) ||
- !iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) {
- dev_err(dev, "source channel does not support raw/scale\n");
+ if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) ||
+ iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) {
+ dev_info(dev, "using raw+scale source channel\n");
+ } else if (iio_channel_has_info(schan, IIO_CHAN_INFO_PROCESSED)) {
+ dev_info(dev, "using processed channel\n");
+ rescale->chan_processed = true;
+ } else {
+ dev_err(dev, "source channel is not supported\n");
return -EINVAL;
}
chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE);
- if (iio_channel_has_available(schan, IIO_CHAN_INFO_RAW))
+ /*
+ * Using .read_avail() is fringe to begin with and makes no sense
+ * whatsoever for processed channels, so we make sure that this cannot
+ * be called on a processed channel.
+ */
+ if (iio_channel_has_available(schan, IIO_CHAN_INFO_RAW) &&
+ !rescale->chan_processed)
chan->info_mask_separate_available |= BIT(IIO_CHAN_INFO_RAW);
return 0;
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index d76179878ff9..1ac94c4e9792 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -228,9 +228,9 @@ static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
iio_buffer_put(buffer);
}
-static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res)
+static void __devm_iio_dmaengine_buffer_free(void *buffer)
{
- iio_dmaengine_buffer_free(*(struct iio_buffer **)res);
+ iio_dmaengine_buffer_free(buffer);
}
/**
@@ -247,21 +247,17 @@ static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res)
static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
const char *channel)
{
- struct iio_buffer **bufferp, *buffer;
-
- bufferp = devres_alloc(__devm_iio_dmaengine_buffer_free,
- sizeof(*bufferp), GFP_KERNEL);
- if (!bufferp)
- return ERR_PTR(-ENOMEM);
+ struct iio_buffer *buffer;
+ int ret;
buffer = iio_dmaengine_buffer_alloc(dev, channel);
- if (IS_ERR(buffer)) {
- devres_free(bufferp);
+ if (IS_ERR(buffer))
return buffer;
- }
- *bufferp = buffer;
- devres_add(dev, bufferp);
+ ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
+ buffer);
+ if (ret)
+ return ERR_PTR(ret);
return buffer;
}
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
index f2d27788f666..87d9aabd20c7 100644
--- a/drivers/iio/buffer/industrialio-hw-consumer.c
+++ b/drivers/iio/buffer/industrialio-hw-consumer.c
@@ -137,9 +137,9 @@ void iio_hw_consumer_free(struct iio_hw_consumer *hwc)
}
EXPORT_SYMBOL_GPL(iio_hw_consumer_free);
-static void devm_iio_hw_consumer_release(struct device *dev, void *res)
+static void devm_iio_hw_consumer_release(void *iio_hwc)
{
- iio_hw_consumer_free(*(struct iio_hw_consumer **)res);
+ iio_hw_consumer_free(iio_hwc);
}
/**
@@ -153,20 +153,17 @@ static void devm_iio_hw_consumer_release(struct device *dev, void *res)
*/
struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
{
- struct iio_hw_consumer **ptr, *iio_hwc;
-
- ptr = devres_alloc(devm_iio_hw_consumer_release, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return NULL;
+ struct iio_hw_consumer *iio_hwc;
+ int ret;
iio_hwc = iio_hw_consumer_alloc(dev);
- if (IS_ERR(iio_hwc)) {
- devres_free(ptr);
- } else {
- *ptr = iio_hwc;
- devres_add(dev, ptr);
- }
+ if (IS_ERR(iio_hwc))
+ return iio_hwc;
+
+ ret = devm_add_action_or_reset(dev, devm_iio_hw_consumer_release,
+ iio_hwc);
+ if (ret)
+ return ERR_PTR(ret);
return iio_hwc;
}
diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c
index b2b1b7d27af4..f77c4538141e 100644
--- a/drivers/iio/buffer/industrialio-triggered-buffer.c
+++ b/drivers/iio/buffer/industrialio-triggered-buffer.c
@@ -56,7 +56,7 @@ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
indio_dev,
"%s_consumer%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (indio_dev->pollfunc == NULL) {
ret = -ENOMEM;
goto error_kfifo_free;
@@ -96,9 +96,9 @@ void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev)
}
EXPORT_SYMBOL(iio_triggered_buffer_cleanup);
-static void devm_iio_triggered_buffer_clean(struct device *dev, void *res)
+static void devm_iio_triggered_buffer_clean(void *indio_dev)
{
- iio_triggered_buffer_cleanup(*(struct iio_dev **)res);
+ iio_triggered_buffer_cleanup(indio_dev);
}
int devm_iio_triggered_buffer_setup_ext(struct device *dev,
@@ -108,24 +108,15 @@ int devm_iio_triggered_buffer_setup_ext(struct device *dev,
const struct iio_buffer_setup_ops *ops,
const struct attribute **buffer_attrs)
{
- struct iio_dev **ptr;
int ret;
- ptr = devres_alloc(devm_iio_triggered_buffer_clean, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- *ptr = indio_dev;
-
ret = iio_triggered_buffer_setup_ext(indio_dev, h, thread, ops,
buffer_attrs);
- if (!ret)
- devres_add(dev, ptr);
- else
- devres_free(ptr);
+ if (ret)
+ return ret;
- return ret;
+ return devm_add_action_or_reset(dev, devm_iio_triggered_buffer_clean,
+ indio_dev);
}
EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_setup_ext);
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index 10bb431bc3ce..a4920646e9be 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -132,17 +132,32 @@ config SENSIRION_SGP30
module will be called sgp30.
config SPS30
- tristate "SPS30 particulate matter sensor"
- depends on I2C
- select CRC8
+ tristate
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+
+config SPS30_I2C
+ tristate "SPS30 particulate matter sensor I2C driver"
+ depends on I2C
+ select SPS30
+ select CRC8
help
- Say Y here to build support for the Sensirion SPS30 particulate
- matter sensor.
+ Say Y here to build support for the Sensirion SPS30 I2C interface
+ driver.
+
+ To compile this driver as a module, choose M here: the module will
+ be called sps30_i2c.
+
+config SPS30_SERIAL
+ tristate "SPS30 particulate matter sensor serial driver"
+ depends on SERIAL_DEV_BUS
+ select SPS30
+ help
+ Say Y here to build support for the Sensirion SPS30 serial interface
+ driver.
To compile this driver as a module, choose M here: the module will
- be called sps30.
+ be called sps30_serial.
config VZ89X
tristate "SGX Sensortech MiCS VZ89X VOC sensor"
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index fef63dd5bf92..4898690cc155 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -17,4 +17,6 @@ obj-$(CONFIG_SCD30_I2C) += scd30_i2c.o
obj-$(CONFIG_SCD30_SERIAL) += scd30_serial.o
obj-$(CONFIG_SENSIRION_SGP30) += sgp30.o
obj-$(CONFIG_SPS30) += sps30.o
+obj-$(CONFIG_SPS30_I2C) += sps30_i2c.o
+obj-$(CONFIG_SPS30_SERIAL) += sps30_serial.o
obj-$(CONFIG_VZ89X) += vz89x.o
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 56ba6c82b501..9cb99585b6ff 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -91,8 +91,8 @@ struct atlas_data {
struct regmap *regmap;
struct irq_work work;
unsigned int interrupt_enabled;
-
- __be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */
+ /* 96-bit data + 32-bit pad + 64-bit timestamp */
+ __be32 buffer[6] __aligned(8);
};
static const struct regmap_config atlas_regmap_config = {
@@ -410,11 +410,9 @@ static int atlas_buffer_postenable(struct iio_dev *indio_dev)
struct atlas_data *data = iio_priv(indio_dev);
int ret;
- ret = pm_runtime_get_sync(&data->client->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&data->client->dev);
+ ret = pm_runtime_resume_and_get(&data->client->dev);
+ if (ret)
return ret;
- }
return atlas_set_interrupt(data, true);
}
@@ -487,11 +485,9 @@ static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val)
int suspended = pm_runtime_suspended(dev);
int ret;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
return ret;
- }
if (suspended)
msleep(data->chip->delay);
@@ -640,7 +636,7 @@ static int atlas_probe(struct i2c_client *client,
indio_dev->modes = INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE;
trig = devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
- indio_dev->name, indio_dev->id);
+ indio_dev->name, iio_device_id(indio_dev));
if (!trig)
return -ENOMEM;
@@ -741,7 +737,6 @@ static int atlas_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
return atlas_set_powermode(data, 0);
}
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
index 29c0dfa4702b..74cf89c82c0a 100644
--- a/drivers/iio/chemical/bme680_i2c.c
+++ b/drivers/iio/chemical/bme680_i2c.c
@@ -11,7 +11,6 @@
* Note: SDO pin cannot be left floating otherwise I2C address
* will be undefined.
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -42,12 +41,6 @@ static const struct i2c_device_id bme680_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, bme680_i2c_id);
-static const struct acpi_device_id bme680_acpi_match[] = {
- {"BME0680", 0},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, bme680_acpi_match);
-
static const struct of_device_id bme680_of_i2c_match[] = {
{ .compatible = "bosch,bme680", },
{},
@@ -57,7 +50,6 @@ MODULE_DEVICE_TABLE(of, bme680_of_i2c_match);
static struct i2c_driver bme680_i2c_driver = {
.driver = {
.name = "bme680_i2c",
- .acpi_match_table = ACPI_PTR(bme680_acpi_match),
.of_match_table = bme680_of_i2c_match,
},
.probe = bme680_i2c_probe,
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
index 6f56ad48cc40..cc579a7ac5ce 100644
--- a/drivers/iio/chemical/bme680_spi.c
+++ b/drivers/iio/chemical/bme680_spi.c
@@ -4,7 +4,6 @@
*
* Copyright (C) 2018 Himanshu Jha <himanshujha199640@gmail.com>
*/
-#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
@@ -145,12 +144,6 @@ static const struct spi_device_id bme680_spi_id[] = {
};
MODULE_DEVICE_TABLE(spi, bme680_spi_id);
-static const struct acpi_device_id bme680_acpi_match[] = {
- {"BME0680", 0},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, bme680_acpi_match);
-
static const struct of_device_id bme680_of_spi_match[] = {
{ .compatible = "bosch,bme680", },
{},
@@ -160,7 +153,6 @@ MODULE_DEVICE_TABLE(of, bme680_of_spi_match);
static struct spi_driver bme680_spi_driver = {
.driver = {
.name = "bme680_spi",
- .acpi_match_table = ACPI_PTR(bme680_acpi_match),
.of_match_table = bme680_of_spi_match,
},
.probe = bme680_spi_probe,
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 886e96496dbf..847194fa1e46 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -491,7 +491,7 @@ static int ccs811_probe(struct i2c_client *client,
data->drdy_trig = devm_iio_trigger_alloc(&client->dev,
"%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->drdy_trig) {
ret = -ENOMEM;
goto err_poweroff;
diff --git a/drivers/iio/chemical/scd30_core.c b/drivers/iio/chemical/scd30_core.c
index d89f117dd0ef..9fe6bbe9ee04 100644
--- a/drivers/iio/chemical/scd30_core.c
+++ b/drivers/iio/chemical/scd30_core.c
@@ -640,7 +640,8 @@ static int scd30_setup_trigger(struct iio_dev *indio_dev)
struct iio_trigger *trig;
int ret;
- trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name, indio_dev->id);
+ trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
+ iio_device_id(indio_dev));
if (!trig) {
dev_err(dev, "failed to allocate trigger\n");
return -ENOMEM;
diff --git a/drivers/iio/chemical/sgp30.c b/drivers/iio/chemical/sgp30.c
index 1029c457be15..2343d444604d 100644
--- a/drivers/iio/chemical/sgp30.c
+++ b/drivers/iio/chemical/sgp30.c
@@ -425,7 +425,7 @@ static int sgp_check_compat(struct sgp_data *data,
product = SGP_VERS_PRODUCT(data);
if (product != product_id) {
- dev_err(dev, "sensor reports a different product: 0x%04hx\n",
+ dev_err(dev, "sensor reports a different product: 0x%04x\n",
product);
return -ENODEV;
}
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index 2ea9a5c4d846..d51314505115 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -3,11 +3,8 @@
* Sensirion SPS30 particulate matter sensor driver
*
* Copyright (c) Tomasz Duszynski <tduszyns@gmail.com>
- *
- * I2C slave address: 0x69
*/
-#include <asm/unaligned.h>
#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -19,27 +16,14 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#define SPS30_CRC8_POLYNOMIAL 0x31
-/* max number of bytes needed to store PM measurements or serial string */
-#define SPS30_MAX_READ_SIZE 48
+#include "sps30.h"
+
/* sensor measures reliably up to 3000 ug / m3 */
#define SPS30_MAX_PM 3000
/* minimum and maximum self cleaning periods in seconds */
#define SPS30_AUTO_CLEANING_PERIOD_MIN 0
#define SPS30_AUTO_CLEANING_PERIOD_MAX 604800
-/* SPS30 commands */
-#define SPS30_START_MEAS 0x0010
-#define SPS30_STOP_MEAS 0x0104
-#define SPS30_RESET 0xd304
-#define SPS30_READ_DATA_READY_FLAG 0x0202
-#define SPS30_READ_DATA 0x0300
-#define SPS30_READ_SERIAL 0xd033
-#define SPS30_START_FAN_CLEANING 0x5607
-#define SPS30_AUTO_CLEANING_PERIOD 0x8004
-/* not a sensor command per se, used only to distinguish write from read */
-#define SPS30_READ_AUTO_CLEANING_PERIOD 0x8005
-
enum {
PM1,
PM2P5,
@@ -52,114 +36,9 @@ enum {
MEASURING,
};
-struct sps30_state {
- struct i2c_client *client;
- /*
- * Guards against concurrent access to sensor registers.
- * Must be held whenever sequence of commands is to be executed.
- */
- struct mutex lock;
- int state;
-};
-
-DECLARE_CRC8_TABLE(sps30_crc8_table);
-
-static int sps30_write_then_read(struct sps30_state *state, u8 *txbuf,
- int txsize, u8 *rxbuf, int rxsize)
-{
- int ret;
-
- /*
- * Sensor does not support repeated start so instead of
- * sending two i2c messages in a row we just send one by one.
- */
- ret = i2c_master_send(state->client, txbuf, txsize);
- if (ret != txsize)
- return ret < 0 ? ret : -EIO;
-
- if (!rxbuf)
- return 0;
-
- ret = i2c_master_recv(state->client, rxbuf, rxsize);
- if (ret != rxsize)
- return ret < 0 ? ret : -EIO;
-
- return 0;
-}
-
-static int sps30_do_cmd(struct sps30_state *state, u16 cmd, u8 *data, int size)
-{
- /*
- * Internally sensor stores measurements in a following manner:
- *
- * PM1: upper two bytes, crc8, lower two bytes, crc8
- * PM2P5: upper two bytes, crc8, lower two bytes, crc8
- * PM4: upper two bytes, crc8, lower two bytes, crc8
- * PM10: upper two bytes, crc8, lower two bytes, crc8
- *
- * What follows next are number concentration measurements and
- * typical particle size measurement which we omit.
- */
- u8 buf[SPS30_MAX_READ_SIZE] = { cmd >> 8, cmd };
- int i, ret = 0;
-
- switch (cmd) {
- case SPS30_START_MEAS:
- buf[2] = 0x03;
- buf[3] = 0x00;
- buf[4] = crc8(sps30_crc8_table, &buf[2], 2, CRC8_INIT_VALUE);
- ret = sps30_write_then_read(state, buf, 5, NULL, 0);
- break;
- case SPS30_STOP_MEAS:
- case SPS30_RESET:
- case SPS30_START_FAN_CLEANING:
- ret = sps30_write_then_read(state, buf, 2, NULL, 0);
- break;
- case SPS30_READ_AUTO_CLEANING_PERIOD:
- buf[0] = SPS30_AUTO_CLEANING_PERIOD >> 8;
- buf[1] = (u8)(SPS30_AUTO_CLEANING_PERIOD & 0xff);
- fallthrough;
- case SPS30_READ_DATA_READY_FLAG:
- case SPS30_READ_DATA:
- case SPS30_READ_SERIAL:
- /* every two data bytes are checksummed */
- size += size / 2;
- ret = sps30_write_then_read(state, buf, 2, buf, size);
- break;
- case SPS30_AUTO_CLEANING_PERIOD:
- buf[2] = data[0];
- buf[3] = data[1];
- buf[4] = crc8(sps30_crc8_table, &buf[2], 2, CRC8_INIT_VALUE);
- buf[5] = data[2];
- buf[6] = data[3];
- buf[7] = crc8(sps30_crc8_table, &buf[5], 2, CRC8_INIT_VALUE);
- ret = sps30_write_then_read(state, buf, 8, NULL, 0);
- break;
- }
-
- if (ret)
- return ret;
-
- /* validate received data and strip off crc bytes */
- for (i = 0; i < size; i += 3) {
- u8 crc = crc8(sps30_crc8_table, &buf[i], 2, CRC8_INIT_VALUE);
-
- if (crc != buf[i + 2]) {
- dev_err(&state->client->dev,
- "data integrity check failed\n");
- return -EIO;
- }
-
- *data++ = buf[i];
- *data++ = buf[i + 1];
- }
-
- return 0;
-}
-
-static s32 sps30_float_to_int_clamped(const u8 *fp)
+static s32 sps30_float_to_int_clamped(__be32 *fp)
{
- int val = get_unaligned_be32(fp);
+ int val = be32_to_cpup(fp);
int mantissa = val & GENMASK(22, 0);
/* this is fine since passed float is always non-negative */
int exp = val >> 23;
@@ -188,38 +67,35 @@ static s32 sps30_float_to_int_clamped(const u8 *fp)
static int sps30_do_meas(struct sps30_state *state, s32 *data, int size)
{
- int i, ret, tries = 5;
- u8 tmp[16];
+ int i, ret;
if (state->state == RESET) {
- ret = sps30_do_cmd(state, SPS30_START_MEAS, NULL, 0);
+ ret = state->ops->start_meas(state);
if (ret)
return ret;
state->state = MEASURING;
}
- while (tries--) {
- ret = sps30_do_cmd(state, SPS30_READ_DATA_READY_FLAG, tmp, 2);
- if (ret)
- return -EIO;
+ ret = state->ops->read_meas(state, (__be32 *)data, size);
+ if (ret)
+ return ret;
- /* new measurements ready to be read */
- if (tmp[1] == 1)
- break;
+ for (i = 0; i < size; i++)
+ data[i] = sps30_float_to_int_clamped((__be32 *)&data[i]);
- msleep_interruptible(300);
- }
+ return 0;
+}
- if (tries == -1)
- return -ETIMEDOUT;
+static int sps30_do_reset(struct sps30_state *state)
+{
+ int ret;
- ret = sps30_do_cmd(state, SPS30_READ_DATA, tmp, sizeof(int) * size);
+ ret = state->ops->reset(state);
if (ret)
return ret;
- for (i = 0; i < size; i++)
- data[i] = sps30_float_to_int_clamped(&tmp[4 * i]);
+ state->state = RESET;
return 0;
}
@@ -310,24 +186,6 @@ static int sps30_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int sps30_do_cmd_reset(struct sps30_state *state)
-{
- int ret;
-
- ret = sps30_do_cmd(state, SPS30_RESET, NULL, 0);
- msleep(300);
- /*
- * Power-on-reset causes sensor to produce some glitch on i2c bus and
- * some controllers end up in error state. Recover simply by placing
- * some data on the bus, for example STOP_MEAS command, which
- * is NOP in this case.
- */
- sps30_do_cmd(state, SPS30_STOP_MEAS, NULL, 0);
- state->state = RESET;
-
- return ret;
-}
-
static ssize_t start_cleaning_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
@@ -340,7 +198,7 @@ static ssize_t start_cleaning_store(struct device *dev,
return -EINVAL;
mutex_lock(&state->lock);
- ret = sps30_do_cmd(state, SPS30_START_FAN_CLEANING, NULL, 0);
+ ret = state->ops->clean_fan(state);
mutex_unlock(&state->lock);
if (ret)
return ret;
@@ -349,31 +207,29 @@ static ssize_t start_cleaning_store(struct device *dev,
}
static ssize_t cleaning_period_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct sps30_state *state = iio_priv(indio_dev);
- u8 tmp[4];
+ __be32 val;
int ret;
mutex_lock(&state->lock);
- ret = sps30_do_cmd(state, SPS30_READ_AUTO_CLEANING_PERIOD, tmp, 4);
+ ret = state->ops->read_cleaning_period(state, &val);
mutex_unlock(&state->lock);
if (ret)
return ret;
- return sprintf(buf, "%d\n", get_unaligned_be32(tmp));
+ return sprintf(buf, "%d\n", be32_to_cpu(val));
}
-static ssize_t cleaning_period_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t cleaning_period_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct sps30_state *state = iio_priv(indio_dev);
int val, ret;
- u8 tmp[4];
if (kstrtoint(buf, 0, &val))
return -EINVAL;
@@ -382,10 +238,8 @@ static ssize_t cleaning_period_store(struct device *dev,
(val > SPS30_AUTO_CLEANING_PERIOD_MAX))
return -EINVAL;
- put_unaligned_be32(val, tmp);
-
mutex_lock(&state->lock);
- ret = sps30_do_cmd(state, SPS30_AUTO_CLEANING_PERIOD, tmp, 0);
+ ret = state->ops->write_cleaning_period(state, cpu_to_be32(val));
if (ret) {
mutex_unlock(&state->lock);
return ret;
@@ -397,7 +251,7 @@ static ssize_t cleaning_period_store(struct device *dev,
* sensor requires reset in order to return up to date self cleaning
* period
*/
- ret = sps30_do_cmd_reset(state);
+ ret = sps30_do_reset(state);
if (ret)
dev_warn(dev,
"period changed but reads will return the old value\n");
@@ -411,9 +265,9 @@ static ssize_t cleaning_period_available_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "[%d %d %d]\n",
- SPS30_AUTO_CLEANING_PERIOD_MIN, 1,
- SPS30_AUTO_CLEANING_PERIOD_MAX);
+ return sysfs_emit(buf, "[%d %d %d]\n",
+ SPS30_AUTO_CLEANING_PERIOD_MIN, 1,
+ SPS30_AUTO_CLEANING_PERIOD_MAX);
}
static IIO_DEVICE_ATTR_WO(start_cleaning, 0);
@@ -460,90 +314,65 @@ static const struct iio_chan_spec sps30_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(4),
};
-static void sps30_stop_meas(void *data)
+static void sps30_devm_stop_meas(void *data)
{
struct sps30_state *state = data;
- sps30_do_cmd(state, SPS30_STOP_MEAS, NULL, 0);
+ if (state->state == MEASURING)
+ state->ops->stop_meas(state);
}
static const unsigned long sps30_scan_masks[] = { 0x0f, 0x00 };
-static int sps30_probe(struct i2c_client *client)
+int sps30_probe(struct device *dev, const char *name, void *priv, const struct sps30_ops *ops)
{
struct iio_dev *indio_dev;
struct sps30_state *state;
- u8 buf[32];
int ret;
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- return -EOPNOTSUPP;
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*state));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*state));
if (!indio_dev)
return -ENOMEM;
+ dev_set_drvdata(dev, indio_dev);
+
state = iio_priv(indio_dev);
- i2c_set_clientdata(client, indio_dev);
- state->client = client;
- state->state = RESET;
+ state->dev = dev;
+ state->priv = priv;
+ state->ops = ops;
+ mutex_init(&state->lock);
+
indio_dev->info = &sps30_info;
- indio_dev->name = client->name;
+ indio_dev->name = name;
indio_dev->channels = sps30_channels;
indio_dev->num_channels = ARRAY_SIZE(sps30_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->available_scan_masks = sps30_scan_masks;
- mutex_init(&state->lock);
- crc8_populate_msb(sps30_crc8_table, SPS30_CRC8_POLYNOMIAL);
-
- ret = sps30_do_cmd_reset(state);
+ ret = sps30_do_reset(state);
if (ret) {
- dev_err(&client->dev, "failed to reset device\n");
+ dev_err(dev, "failed to reset device\n");
return ret;
}
- ret = sps30_do_cmd(state, SPS30_READ_SERIAL, buf, sizeof(buf));
+ ret = state->ops->show_info(state);
if (ret) {
- dev_err(&client->dev, "failed to read serial number\n");
+ dev_err(dev, "failed to read device info\n");
return ret;
}
- /* returned serial number is already NUL terminated */
- dev_info(&client->dev, "serial number: %s\n", buf);
- ret = devm_add_action_or_reset(&client->dev, sps30_stop_meas, state);
+ ret = devm_add_action_or_reset(dev, sps30_devm_stop_meas, state);
if (ret)
return ret;
- ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev, NULL,
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
sps30_trigger_handler, NULL);
if (ret)
return ret;
- return devm_iio_device_register(&client->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
-
-static const struct i2c_device_id sps30_id[] = {
- { "sps30" },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, sps30_id);
-
-static const struct of_device_id sps30_of_match[] = {
- { .compatible = "sensirion,sps30" },
- { }
-};
-MODULE_DEVICE_TABLE(of, sps30_of_match);
-
-static struct i2c_driver sps30_driver = {
- .driver = {
- .name = "sps30",
- .of_match_table = sps30_of_match,
- },
- .id_table = sps30_id,
- .probe_new = sps30_probe,
-};
-module_i2c_driver(sps30_driver);
+EXPORT_SYMBOL_GPL(sps30_probe);
MODULE_AUTHOR("Tomasz Duszynski <tduszyns@gmail.com>");
MODULE_DESCRIPTION("Sensirion SPS30 particulate matter sensor driver");
diff --git a/drivers/iio/chemical/sps30.h b/drivers/iio/chemical/sps30.h
new file mode 100644
index 000000000000..a58ee43cf45d
--- /dev/null
+++ b/drivers/iio/chemical/sps30.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SPS30_H
+#define _SPS30_H
+
+#include <linux/types.h>
+
+struct sps30_state;
+struct sps30_ops {
+ int (*start_meas)(struct sps30_state *state);
+ int (*stop_meas)(struct sps30_state *state);
+ int (*read_meas)(struct sps30_state *state, __be32 *meas, size_t num);
+ int (*reset)(struct sps30_state *state);
+ int (*clean_fan)(struct sps30_state *state);
+ int (*read_cleaning_period)(struct sps30_state *state, __be32 *period);
+ int (*write_cleaning_period)(struct sps30_state *state, __be32 period);
+ int (*show_info)(struct sps30_state *state);
+};
+
+struct sps30_state {
+ /* serialize access to the device */
+ struct mutex lock;
+ struct device *dev;
+ int state;
+ /*
+ * priv pointer is solely for serdev driver private data. We keep it
+ * here because driver_data inside dev has been already used for iio and
+ * struct serdev_device doesn't have one.
+ */
+ void *priv;
+ const struct sps30_ops *ops;
+};
+
+int sps30_probe(struct device *dev, const char *name, void *priv, const struct sps30_ops *ops);
+
+#endif
diff --git a/drivers/iio/chemical/sps30_i2c.c b/drivers/iio/chemical/sps30_i2c.c
new file mode 100644
index 000000000000..d33560ed7184
--- /dev/null
+++ b/drivers/iio/chemical/sps30_i2c.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sensirion SPS30 particulate matter sensor i2c driver
+ *
+ * Copyright (c) 2020 Tomasz Duszynski <tomasz.duszynski@octakon.com>
+ *
+ * I2C slave address: 0x69
+ */
+#include <asm/unaligned.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "sps30.h"
+
+#define SPS30_I2C_CRC8_POLYNOMIAL 0x31
+/* max number of bytes needed to store PM measurements or serial string */
+#define SPS30_I2C_MAX_BUF_SIZE 48
+
+DECLARE_CRC8_TABLE(sps30_i2c_crc8_table);
+
+#define SPS30_I2C_START_MEAS 0x0010
+#define SPS30_I2C_STOP_MEAS 0x0104
+#define SPS30_I2C_READ_MEAS 0x0300
+#define SPS30_I2C_MEAS_READY 0x0202
+#define SPS30_I2C_RESET 0xd304
+#define SPS30_I2C_CLEAN_FAN 0x5607
+#define SPS30_I2C_PERIOD 0x8004
+#define SPS30_I2C_READ_SERIAL 0xd033
+#define SPS30_I2C_READ_VERSION 0xd100
+
+static int sps30_i2c_xfer(struct sps30_state *state, unsigned char *txbuf, size_t txsize,
+ unsigned char *rxbuf, size_t rxsize)
+{
+ struct i2c_client *client = to_i2c_client(state->dev);
+ int ret;
+
+ /*
+ * Sensor does not support repeated start so instead of
+ * sending two i2c messages in a row we just send one by one.
+ */
+ ret = i2c_master_send(client, txbuf, txsize);
+ if (ret < 0)
+ return ret;
+ if (ret != txsize)
+ return -EIO;
+
+ if (!rxsize)
+ return 0;
+
+ ret = i2c_master_recv(client, rxbuf, rxsize);
+ if (ret < 0)
+ return ret;
+ if (ret != rxsize)
+ return -EIO;
+
+ return 0;
+}
+
+static int sps30_i2c_command(struct sps30_state *state, u16 cmd, void *arg, size_t arg_size,
+ void *rsp, size_t rsp_size)
+{
+ /*
+ * Internally sensor stores measurements in a following manner:
+ *
+ * PM1: upper two bytes, crc8, lower two bytes, crc8
+ * PM2P5: upper two bytes, crc8, lower two bytes, crc8
+ * PM4: upper two bytes, crc8, lower two bytes, crc8
+ * PM10: upper two bytes, crc8, lower two bytes, crc8
+ *
+ * What follows next are number concentration measurements and
+ * typical particle size measurement which we omit.
+ */
+ unsigned char buf[SPS30_I2C_MAX_BUF_SIZE];
+ unsigned char *tmp;
+ unsigned char crc;
+ size_t i;
+ int ret;
+
+ put_unaligned_be16(cmd, buf);
+ i = 2;
+
+ if (rsp) {
+ /* each two bytes are followed by a crc8 */
+ rsp_size += rsp_size / 2;
+ } else {
+ tmp = arg;
+
+ while (arg_size) {
+ buf[i] = *tmp++;
+ buf[i + 1] = *tmp++;
+ buf[i + 2] = crc8(sps30_i2c_crc8_table, buf + i, 2, CRC8_INIT_VALUE);
+ arg_size -= 2;
+ i += 3;
+ }
+ }
+
+ ret = sps30_i2c_xfer(state, buf, i, buf, rsp_size);
+ if (ret)
+ return ret;
+
+ /* validate received data and strip off crc bytes */
+ tmp = rsp;
+ for (i = 0; i < rsp_size; i += 3) {
+ crc = crc8(sps30_i2c_crc8_table, buf + i, 2, CRC8_INIT_VALUE);
+ if (crc != buf[i + 2]) {
+ dev_err(state->dev, "data integrity check failed\n");
+ return -EIO;
+ }
+
+ *tmp++ = buf[i];
+ *tmp++ = buf[i + 1];
+ }
+
+ return 0;
+}
+
+static int sps30_i2c_start_meas(struct sps30_state *state)
+{
+ /* request BE IEEE754 formatted data */
+ unsigned char buf[] = { 0x03, 0x00 };
+
+ return sps30_i2c_command(state, SPS30_I2C_START_MEAS, buf, sizeof(buf), NULL, 0);
+}
+
+static int sps30_i2c_stop_meas(struct sps30_state *state)
+{
+ return sps30_i2c_command(state, SPS30_I2C_STOP_MEAS, NULL, 0, NULL, 0);
+}
+
+static int sps30_i2c_reset(struct sps30_state *state)
+{
+ int ret;
+
+ ret = sps30_i2c_command(state, SPS30_I2C_RESET, NULL, 0, NULL, 0);
+ msleep(500);
+ /*
+ * Power-on-reset causes sensor to produce some glitch on i2c bus and
+ * some controllers end up in error state. Recover simply by placing
+ * some data on the bus, for example STOP_MEAS command, which
+ * is NOP in this case.
+ */
+ sps30_i2c_stop_meas(state);
+
+ return ret;
+}
+
+static bool sps30_i2c_meas_ready(struct sps30_state *state)
+{
+ unsigned char buf[2];
+ int ret;
+
+ ret = sps30_i2c_command(state, SPS30_I2C_MEAS_READY, NULL, 0, buf, sizeof(buf));
+ if (ret)
+ return false;
+
+ return buf[1];
+}
+
+static int sps30_i2c_read_meas(struct sps30_state *state, __be32 *meas, size_t num)
+{
+ /* measurements are ready within a second */
+ if (msleep_interruptible(1000))
+ return -EINTR;
+
+ if (!sps30_i2c_meas_ready(state))
+ return -ETIMEDOUT;
+
+ return sps30_i2c_command(state, SPS30_I2C_READ_MEAS, NULL, 0, meas, sizeof(num) * num);
+}
+
+static int sps30_i2c_clean_fan(struct sps30_state *state)
+{
+ return sps30_i2c_command(state, SPS30_I2C_CLEAN_FAN, NULL, 0, NULL, 0);
+}
+
+static int sps30_i2c_read_cleaning_period(struct sps30_state *state, __be32 *period)
+{
+ return sps30_i2c_command(state, SPS30_I2C_PERIOD, NULL, 0, period, sizeof(*period));
+}
+
+static int sps30_i2c_write_cleaning_period(struct sps30_state *state, __be32 period)
+{
+ return sps30_i2c_command(state, SPS30_I2C_PERIOD, &period, sizeof(period), NULL, 0);
+}
+
+static int sps30_i2c_show_info(struct sps30_state *state)
+{
+ /* extra nul just in case */
+ unsigned char buf[32 + 1] = { 0x00 };
+ int ret;
+
+ ret = sps30_i2c_command(state, SPS30_I2C_READ_SERIAL, NULL, 0, buf, sizeof(buf) - 1);
+ if (ret)
+ return ret;
+
+ dev_info(state->dev, "serial number: %s\n", buf);
+
+ ret = sps30_i2c_command(state, SPS30_I2C_READ_VERSION, NULL, 0, buf, 2);
+ if (ret)
+ return ret;
+
+ dev_info(state->dev, "fw version: %u.%u\n", buf[0], buf[1]);
+
+ return 0;
+}
+
+static const struct sps30_ops sps30_i2c_ops = {
+ .start_meas = sps30_i2c_start_meas,
+ .stop_meas = sps30_i2c_stop_meas,
+ .read_meas = sps30_i2c_read_meas,
+ .reset = sps30_i2c_reset,
+ .clean_fan = sps30_i2c_clean_fan,
+ .read_cleaning_period = sps30_i2c_read_cleaning_period,
+ .write_cleaning_period = sps30_i2c_write_cleaning_period,
+ .show_info = sps30_i2c_show_info,
+};
+
+static int sps30_i2c_probe(struct i2c_client *client)
+{
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ crc8_populate_msb(sps30_i2c_crc8_table, SPS30_I2C_CRC8_POLYNOMIAL);
+
+ return sps30_probe(&client->dev, client->name, NULL, &sps30_i2c_ops);
+}
+
+static const struct i2c_device_id sps30_i2c_id[] = {
+ { "sps30" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sps30_i2c_id);
+
+static const struct of_device_id sps30_i2c_of_match[] = {
+ { .compatible = "sensirion,sps30" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sps30_i2c_of_match);
+
+static struct i2c_driver sps30_i2c_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = sps30_i2c_of_match,
+ },
+ .id_table = sps30_i2c_id,
+ .probe_new = sps30_i2c_probe,
+};
+module_i2c_driver(sps30_i2c_driver);
+
+MODULE_AUTHOR("Tomasz Duszynski <tomasz.duszynski@octakon.com>");
+MODULE_DESCRIPTION("Sensirion SPS30 particulate matter sensor i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/sps30_serial.c b/drivers/iio/chemical/sps30_serial.c
new file mode 100644
index 000000000000..3f311d50087c
--- /dev/null
+++ b/drivers/iio/chemical/sps30_serial.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sensirion SPS30 particulate matter sensor serial driver
+ *
+ * Copyright (c) 2021 Tomasz Duszynski <tomasz.duszynski@octakon.com>
+ */
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/iio.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/serdev.h>
+#include <linux/types.h>
+
+#include "sps30.h"
+
+#define SPS30_SERIAL_DEV_NAME "sps30"
+
+#define SPS30_SERIAL_SOF_EOF 0x7e
+#define SPS30_SERIAL_TIMEOUT msecs_to_jiffies(20)
+#define SPS30_SERIAL_MAX_BUF_SIZE 263
+#define SPS30_SERIAL_ESCAPE_CHAR 0x7d
+
+#define SPS30_SERIAL_FRAME_MIN_SIZE 7
+#define SPS30_SERIAL_FRAME_ADR_OFFSET 1
+#define SPS30_SERIAL_FRAME_CMD_OFFSET 2
+#define SPS30_SERIAL_FRAME_MOSI_LEN_OFFSET 3
+#define SPS30_SERIAL_FRAME_MISO_STATE_OFFSET 3
+#define SPS30_SERIAL_FRAME_MISO_LEN_OFFSET 4
+#define SPS30_SERIAL_FRAME_MISO_DATA_OFFSET 5
+
+#define SPS30_SERIAL_START_MEAS 0x00
+#define SPS30_SERIAL_STOP_MEAS 0x01
+#define SPS30_SERIAL_READ_MEAS 0x03
+#define SPS30_SERIAL_RESET 0xd3
+#define SPS30_SERIAL_CLEAN_FAN 0x56
+#define SPS30_SERIAL_PERIOD 0x80
+#define SPS30_SERIAL_DEV_INFO 0xd0
+#define SPS30_SERIAL_READ_VERSION 0xd1
+
+struct sps30_serial_priv {
+ struct completion new_frame;
+ unsigned char buf[SPS30_SERIAL_MAX_BUF_SIZE];
+ size_t num;
+ bool escaped;
+ bool done;
+};
+
+static int sps30_serial_xfer(struct sps30_state *state, const unsigned char *buf, size_t size)
+{
+ struct serdev_device *serdev = to_serdev_device(state->dev);
+ struct sps30_serial_priv *priv = state->priv;
+ int ret;
+
+ priv->num = 0;
+ priv->escaped = false;
+ priv->done = false;
+
+ ret = serdev_device_write(serdev, buf, size, SPS30_SERIAL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ if (ret != size)
+ return -EIO;
+
+ ret = wait_for_completion_interruptible_timeout(&priv->new_frame, SPS30_SERIAL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static const struct {
+ unsigned char byte;
+ unsigned char byte2;
+} sps30_serial_bytes[] = {
+ { 0x11, 0x31 },
+ { 0x13, 0x33 },
+ { 0x7e, 0x5e },
+ { 0x7d, 0x5d },
+};
+
+static int sps30_serial_put_byte(unsigned char *buf, unsigned char byte)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sps30_serial_bytes); i++) {
+ if (sps30_serial_bytes[i].byte != byte)
+ continue;
+
+ buf[0] = SPS30_SERIAL_ESCAPE_CHAR;
+ buf[1] = sps30_serial_bytes[i].byte2;
+
+ return 2;
+ }
+
+ buf[0] = byte;
+
+ return 1;
+}
+
+static char sps30_serial_get_byte(bool escaped, unsigned char byte2)
+{
+ int i;
+
+ if (!escaped)
+ return byte2;
+
+ for (i = 0; i < ARRAY_SIZE(sps30_serial_bytes); i++) {
+ if (sps30_serial_bytes[i].byte2 != byte2)
+ continue;
+
+ return sps30_serial_bytes[i].byte;
+ }
+
+ return 0;
+}
+
+static unsigned char sps30_serial_calc_chksum(const unsigned char *buf, size_t num)
+{
+ unsigned int chksum = 0;
+ size_t i;
+
+ for (i = 0; i < num; i++)
+ chksum += buf[i];
+
+ return ~chksum;
+}
+
+static int sps30_serial_prep_frame(unsigned char *buf, unsigned char cmd,
+ const unsigned char *arg, size_t arg_size)
+{
+ unsigned char chksum;
+ int num = 0;
+ size_t i;
+
+ buf[num++] = SPS30_SERIAL_SOF_EOF;
+ buf[num++] = 0;
+ num += sps30_serial_put_byte(buf + num, cmd);
+ num += sps30_serial_put_byte(buf + num, arg_size);
+
+ for (i = 0; i < arg_size; i++)
+ num += sps30_serial_put_byte(buf + num, arg[i]);
+
+ /* SOF isn't checksummed */
+ chksum = sps30_serial_calc_chksum(buf + 1, num - 1);
+ num += sps30_serial_put_byte(buf + num, chksum);
+ buf[num++] = SPS30_SERIAL_SOF_EOF;
+
+ return num;
+}
+
+static bool sps30_serial_frame_valid(struct sps30_state *state, const unsigned char *buf)
+{
+ struct sps30_serial_priv *priv = state->priv;
+ unsigned char chksum;
+
+ if ((priv->num < SPS30_SERIAL_FRAME_MIN_SIZE) ||
+ (priv->num != SPS30_SERIAL_FRAME_MIN_SIZE +
+ priv->buf[SPS30_SERIAL_FRAME_MISO_LEN_OFFSET])) {
+ dev_err(state->dev, "frame has invalid number of bytes\n");
+ return false;
+ }
+
+ if ((priv->buf[SPS30_SERIAL_FRAME_ADR_OFFSET] != buf[SPS30_SERIAL_FRAME_ADR_OFFSET]) ||
+ (priv->buf[SPS30_SERIAL_FRAME_CMD_OFFSET] != buf[SPS30_SERIAL_FRAME_CMD_OFFSET])) {
+ dev_err(state->dev, "frame has wrong ADR and CMD bytes\n");
+ return false;
+ }
+
+ if (priv->buf[SPS30_SERIAL_FRAME_MISO_STATE_OFFSET]) {
+ dev_err(state->dev, "frame with non-zero state received (0x%02x)\n",
+ priv->buf[SPS30_SERIAL_FRAME_MISO_STATE_OFFSET]);
+ return false;
+ }
+
+ /* SOF, checksum and EOF are not checksummed */
+ chksum = sps30_serial_calc_chksum(priv->buf + 1, priv->num - 3);
+ if (priv->buf[priv->num - 2] != chksum) {
+ dev_err(state->dev, "frame integrity check failed\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int sps30_serial_command(struct sps30_state *state, unsigned char cmd,
+ const void *arg, size_t arg_size, void *rsp, size_t rsp_size)
+{
+ struct sps30_serial_priv *priv = state->priv;
+ unsigned char buf[SPS30_SERIAL_MAX_BUF_SIZE];
+ int ret, size;
+
+ size = sps30_serial_prep_frame(buf, cmd, arg, arg_size);
+ ret = sps30_serial_xfer(state, buf, size);
+ if (ret)
+ return ret;
+
+ if (!sps30_serial_frame_valid(state, buf))
+ return -EIO;
+
+ if (rsp) {
+ rsp_size = min_t(size_t, priv->buf[SPS30_SERIAL_FRAME_MISO_LEN_OFFSET], rsp_size);
+ memcpy(rsp, &priv->buf[SPS30_SERIAL_FRAME_MISO_DATA_OFFSET], rsp_size);
+ }
+
+ return rsp_size;
+}
+
+static int sps30_serial_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t size)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(&serdev->dev);
+ struct sps30_serial_priv *priv;
+ struct sps30_state *state;
+ unsigned char byte;
+ size_t i;
+
+ if (!indio_dev)
+ return 0;
+
+ state = iio_priv(indio_dev);
+ priv = state->priv;
+
+ /* just in case device put some unexpected data on the bus */
+ if (priv->done)
+ return size;
+
+ /* wait for the start of frame */
+ if (!priv->num && size && buf[0] != SPS30_SERIAL_SOF_EOF)
+ return 1;
+
+ if (priv->num + size >= ARRAY_SIZE(priv->buf))
+ size = ARRAY_SIZE(priv->buf) - priv->num;
+
+ for (i = 0; i < size; i++) {
+ byte = buf[i];
+ /* remove stuffed bytes on-the-fly */
+ if (byte == SPS30_SERIAL_ESCAPE_CHAR) {
+ priv->escaped = true;
+ continue;
+ }
+
+ byte = sps30_serial_get_byte(priv->escaped, byte);
+ if (priv->escaped && !byte)
+ dev_warn(state->dev, "unrecognized escaped char (0x%02x)\n", byte);
+
+ priv->buf[priv->num++] = byte;
+
+ /* EOF received */
+ if (!priv->escaped && byte == SPS30_SERIAL_SOF_EOF) {
+ if (priv->num < SPS30_SERIAL_FRAME_MIN_SIZE)
+ continue;
+
+ priv->done = true;
+ complete(&priv->new_frame);
+ i++;
+ break;
+ }
+
+ priv->escaped = false;
+ }
+
+ return i;
+}
+
+static const struct serdev_device_ops sps30_serial_device_ops = {
+ .receive_buf = sps30_serial_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int sps30_serial_start_meas(struct sps30_state *state)
+{
+ /* request BE IEEE754 formatted data */
+ unsigned char buf[] = { 0x01, 0x03 };
+
+ return sps30_serial_command(state, SPS30_SERIAL_START_MEAS, buf, sizeof(buf), NULL, 0);
+}
+
+static int sps30_serial_stop_meas(struct sps30_state *state)
+{
+ return sps30_serial_command(state, SPS30_SERIAL_STOP_MEAS, NULL, 0, NULL, 0);
+}
+
+static int sps30_serial_reset(struct sps30_state *state)
+{
+ int ret;
+
+ ret = sps30_serial_command(state, SPS30_SERIAL_RESET, NULL, 0, NULL, 0);
+ msleep(500);
+
+ return ret;
+}
+
+static int sps30_serial_read_meas(struct sps30_state *state, __be32 *meas, size_t num)
+{
+ int ret;
+
+ /* measurements are ready within a second */
+ if (msleep_interruptible(1000))
+ return -EINTR;
+
+ ret = sps30_serial_command(state, SPS30_SERIAL_READ_MEAS, NULL, 0, meas, num * sizeof(num));
+ if (ret < 0)
+ return ret;
+ /* if measurements aren't ready sensor returns empty frame */
+ if (ret == SPS30_SERIAL_FRAME_MIN_SIZE)
+ return -ETIMEDOUT;
+ if (ret != num * sizeof(*meas))
+ return -EIO;
+
+ return 0;
+}
+
+static int sps30_serial_clean_fan(struct sps30_state *state)
+{
+ return sps30_serial_command(state, SPS30_SERIAL_CLEAN_FAN, NULL, 0, NULL, 0);
+}
+
+static int sps30_serial_read_cleaning_period(struct sps30_state *state, __be32 *period)
+{
+ unsigned char buf[] = { 0x00 };
+ int ret;
+
+ ret = sps30_serial_command(state, SPS30_SERIAL_PERIOD, buf, sizeof(buf),
+ period, sizeof(*period));
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(*period))
+ return -EIO;
+
+ return 0;
+}
+
+static int sps30_serial_write_cleaning_period(struct sps30_state *state, __be32 period)
+{
+ unsigned char buf[5] = { 0x00 };
+
+ memcpy(buf + 1, &period, sizeof(period));
+
+ return sps30_serial_command(state, SPS30_SERIAL_PERIOD, buf, sizeof(buf), NULL, 0);
+}
+
+static int sps30_serial_show_info(struct sps30_state *state)
+{
+ /*
+ * tell device do return serial number and add extra nul byte just in case
+ * serial number isn't a valid string
+ */
+ unsigned char buf[32 + 1] = { 0x03 };
+ struct device *dev = state->dev;
+ int ret;
+
+ ret = sps30_serial_command(state, SPS30_SERIAL_DEV_INFO, buf, 1, buf, sizeof(buf) - 1);
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(buf) - 1)
+ return -EIO;
+
+ dev_info(dev, "serial number: %s\n", buf);
+
+ ret = sps30_serial_command(state, SPS30_SERIAL_READ_VERSION, NULL, 0, buf, sizeof(buf) - 1);
+ if (ret < 0)
+ return ret;
+ if (ret < 2)
+ return -EIO;
+
+ dev_info(dev, "fw version: %u.%u\n", buf[0], buf[1]);
+
+ return 0;
+}
+
+static const struct sps30_ops sps30_serial_ops = {
+ .start_meas = sps30_serial_start_meas,
+ .stop_meas = sps30_serial_stop_meas,
+ .read_meas = sps30_serial_read_meas,
+ .reset = sps30_serial_reset,
+ .clean_fan = sps30_serial_clean_fan,
+ .read_cleaning_period = sps30_serial_read_cleaning_period,
+ .write_cleaning_period = sps30_serial_write_cleaning_period,
+ .show_info = sps30_serial_show_info,
+};
+
+static int sps30_serial_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct sps30_serial_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ init_completion(&priv->new_frame);
+ serdev_device_set_client_ops(serdev, &sps30_serial_device_ops);
+
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, 115200);
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ if (ret)
+ return ret;
+
+ return sps30_probe(dev, SPS30_SERIAL_DEV_NAME, priv, &sps30_serial_ops);
+}
+
+static const struct of_device_id sps30_serial_of_match[] = {
+ { .compatible = "sensirion,sps30" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sps30_serial_of_match);
+
+static struct serdev_device_driver sps30_serial_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = sps30_serial_of_match,
+ },
+ .probe = sps30_serial_probe,
+};
+module_serdev_device_driver(sps30_serial_driver);
+
+MODULE_AUTHOR("Tomasz Duszynski <tomasz.duszynski@octakon.com>");
+MODULE_DESCRIPTION("Sensirion SPS30 particulate matter sensor serial driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index cb52b4fd6bf7..043f199e7bc6 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -3,18 +3,12 @@
* HID Sensors Driver
* Copyright (c) 2012, Intel Corporation.
*/
-#include <linux/device.h>
-#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/kernel.h>
-#include <linux/slab.h>
#include <linux/time.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#define HZ_PER_MHZ 1000000L
@@ -176,7 +170,7 @@ s32 hid_sensor_read_poll_value(struct hid_sensor_common *st)
return value;
}
-EXPORT_SYMBOL(hid_sensor_read_poll_value);
+EXPORT_SYMBOL_NS(hid_sensor_read_poll_value, IIO_HID_ATTRIBUTES);
int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st,
int *val1, int *val2)
@@ -203,7 +197,7 @@ int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st,
return IIO_VAL_INT_PLUS_MICRO;
}
-EXPORT_SYMBOL(hid_sensor_read_samp_freq_value);
+EXPORT_SYMBOL_NS(hid_sensor_read_samp_freq_value, IIO_HID);
int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
int val1, int val2)
@@ -238,7 +232,7 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
return 0;
}
-EXPORT_SYMBOL(hid_sensor_write_samp_freq_value);
+EXPORT_SYMBOL_NS(hid_sensor_write_samp_freq_value, IIO_HID);
int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st,
int *val1, int *val2)
@@ -261,7 +255,7 @@ int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st,
return IIO_VAL_INT_PLUS_MICRO;
}
-EXPORT_SYMBOL(hid_sensor_read_raw_hyst_value);
+EXPORT_SYMBOL_NS(hid_sensor_read_raw_hyst_value, IIO_HID);
int hid_sensor_read_raw_hyst_rel_value(struct hid_sensor_common *st, int *val1,
int *val2)
@@ -283,7 +277,7 @@ int hid_sensor_read_raw_hyst_rel_value(struct hid_sensor_common *st, int *val1,
return IIO_VAL_INT_PLUS_MICRO;
}
-EXPORT_SYMBOL(hid_sensor_read_raw_hyst_rel_value);
+EXPORT_SYMBOL_NS(hid_sensor_read_raw_hyst_rel_value, IIO_HID);
int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
@@ -315,7 +309,7 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
return 0;
}
-EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
+EXPORT_SYMBOL_NS(hid_sensor_write_raw_hyst_value, IIO_HID);
int hid_sensor_write_raw_hyst_rel_value(struct hid_sensor_common *st,
int val1, int val2)
@@ -346,7 +340,7 @@ int hid_sensor_write_raw_hyst_rel_value(struct hid_sensor_common *st,
return 0;
}
-EXPORT_SYMBOL(hid_sensor_write_raw_hyst_rel_value);
+EXPORT_SYMBOL_NS(hid_sensor_write_raw_hyst_rel_value, IIO_HID);
/*
* This fuction applies the unit exponent to the scale.
@@ -430,14 +424,14 @@ int hid_sensor_format_scale(u32 usage_id,
return IIO_VAL_INT_PLUS_NANO;
}
-EXPORT_SYMBOL(hid_sensor_format_scale);
+EXPORT_SYMBOL_NS(hid_sensor_format_scale, IIO_HID);
int64_t hid_sensor_convert_timestamp(struct hid_sensor_common *st,
int64_t raw_value)
{
return st->timestamp_ns_scale * raw_value;
}
-EXPORT_SYMBOL(hid_sensor_convert_timestamp);
+EXPORT_SYMBOL_NS(hid_sensor_convert_timestamp, IIO_HID);
static
int hid_sensor_get_reporting_interval(struct hid_sensor_hub_device *hsdev,
@@ -484,7 +478,7 @@ int hid_sensor_get_report_latency(struct hid_sensor_common *st)
return value;
}
-EXPORT_SYMBOL(hid_sensor_get_report_latency);
+EXPORT_SYMBOL_NS(hid_sensor_get_report_latency, IIO_HID_ATTRIBUTES);
int hid_sensor_set_report_latency(struct hid_sensor_common *st, int latency_ms)
{
@@ -492,13 +486,13 @@ int hid_sensor_set_report_latency(struct hid_sensor_common *st, int latency_ms)
st->report_latency.index,
sizeof(latency_ms), &latency_ms);
}
-EXPORT_SYMBOL(hid_sensor_set_report_latency);
+EXPORT_SYMBOL_NS(hid_sensor_set_report_latency, IIO_HID_ATTRIBUTES);
bool hid_sensor_batch_mode_supported(struct hid_sensor_common *st)
{
return st->report_latency.index > 0 && st->report_latency.report_id > 0;
}
-EXPORT_SYMBOL(hid_sensor_batch_mode_supported);
+EXPORT_SYMBOL_NS(hid_sensor_batch_mode_supported, IIO_HID_ATTRIBUTES);
int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
@@ -590,7 +584,7 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
return 0;
}
-EXPORT_SYMBOL(hid_sensor_parse_common_attributes);
+EXPORT_SYMBOL_NS(hid_sensor_parse_common_attributes, IIO_HID);
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
MODULE_DESCRIPTION("HID Sensor common attribute processing");
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 95ddccb44f1c..a4ec11a3b68a 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -6,16 +6,13 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/hid-sensor-hub.h>
+#include <linux/workqueue.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/buffer.h>
#include <linux/iio/sysfs.h>
#include "hid-sensor-trigger.h"
@@ -150,7 +147,7 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
return 0;
}
-EXPORT_SYMBOL(hid_sensor_power_state);
+EXPORT_SYMBOL_NS(hid_sensor_power_state, IIO_HID);
int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
{
@@ -163,18 +160,15 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
if (state) {
atomic_inc(&st->user_requested_state);
- ret = pm_runtime_get_sync(&st->pdev->dev);
+ ret = pm_runtime_resume_and_get(&st->pdev->dev);
} else {
atomic_dec(&st->user_requested_state);
pm_runtime_mark_last_busy(&st->pdev->dev);
pm_runtime_use_autosuspend(&st->pdev->dev);
ret = pm_runtime_put_autosuspend(&st->pdev->dev);
}
- if (ret < 0) {
- if (state)
- pm_runtime_put_noidle(&st->pdev->dev);
+ if (ret < 0)
return ret;
- }
return 0;
#else
@@ -222,14 +216,13 @@ void hid_sensor_remove_trigger(struct iio_dev *indio_dev,
pm_runtime_disable(&attrb->pdev->dev);
pm_runtime_set_suspended(&attrb->pdev->dev);
- pm_runtime_put_noidle(&attrb->pdev->dev);
cancel_work_sync(&attrb->work);
iio_trigger_unregister(attrb->trigger);
iio_trigger_free(attrb->trigger);
iio_triggered_buffer_cleanup(indio_dev);
}
-EXPORT_SYMBOL(hid_sensor_remove_trigger);
+EXPORT_SYMBOL_NS(hid_sensor_remove_trigger, IIO_HID);
static const struct iio_trigger_ops hid_sensor_trigger_ops = {
.set_trigger_state = &hid_sensor_data_rdy_trigger_set_state,
@@ -256,7 +249,7 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
}
trig = iio_trigger_alloc(indio_dev->dev.parent,
- "%s-dev%d", name, indio_dev->id);
+ "%s-dev%d", name, iio_device_id(indio_dev));
if (trig == NULL) {
dev_err(&indio_dev->dev, "Trigger Allocate Failed\n");
ret = -ENOMEM;
@@ -295,7 +288,7 @@ error_triggered_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
return ret;
}
-EXPORT_SYMBOL(hid_sensor_setup_trigger);
+EXPORT_SYMBOL_NS(hid_sensor_setup_trigger, IIO_HID);
static int __maybe_unused hid_sensor_suspend(struct device *dev)
{
@@ -325,8 +318,9 @@ const struct dev_pm_ops hid_sensor_pm_ops = {
SET_RUNTIME_PM_OPS(hid_sensor_suspend,
hid_sensor_runtime_resume, NULL)
};
-EXPORT_SYMBOL(hid_sensor_pm_ops);
+EXPORT_SYMBOL_NS(hid_sensor_pm_ops, IIO_HID);
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
MODULE_DESCRIPTION("HID Sensor trigger processing");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HID_ATTRIBUTES);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
index bb45cc89e551..f94fca4f1edf 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
@@ -9,6 +9,9 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+struct hid_sensor_common;
+struct iio_dev;
+
extern const struct dev_pm_ops hid_sensor_pm_ops;
int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
diff --git a/drivers/iio/common/scmi_sensors/Makefile b/drivers/iio/common/scmi_sensors/Makefile
index f13140a2575a..645e0fce1a73 100644
--- a/drivers/iio/common/scmi_sensors/Makefile
+++ b/drivers/iio/common/scmi_sensors/Makefile
@@ -1,4 +1,4 @@
-# SPDX - License - Identifier : GPL - 2.0 - only
+# SPDX-License-Identifier: GPL-2.0-only
#
# Makefile for the IIO over SCMI
#
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index 141e8aa6911e..7cf2bf282cef 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -525,7 +525,6 @@ scmi_alloc_iiodev(struct scmi_device *sdev,
return ERR_PTR(-ENOMEM);
iiodev->modes = INDIO_DIRECT_MODE;
- iiodev->dev.parent = dev;
sensor = iio_priv(iiodev);
sensor->sensor_ops = ops;
sensor->ph = ph;
diff --git a/drivers/iio/dac/ad5766.c b/drivers/iio/dac/ad5766.c
index 79837a4b3a41..3104ec32dfac 100644
--- a/drivers/iio/dac/ad5766.c
+++ b/drivers/iio/dac/ad5766.c
@@ -597,8 +597,6 @@ static int ad5766_probe(struct spi_device *spi)
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
indio_dev->info = &ad5766_info;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index a5b0a52bf86e..dd2e306824e7 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -69,9 +69,8 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
}
if (enable) {
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
mutex_unlock(&dac->lock);
return ret;
}
diff --git a/drivers/iio/dummy/Kconfig b/drivers/iio/dummy/Kconfig
index 5c5c2f8c55f3..1f46cb9e51b7 100644
--- a/drivers/iio/dummy/Kconfig
+++ b/drivers/iio/dummy/Kconfig
@@ -34,6 +34,7 @@ config IIO_SIMPLE_DUMMY_BUFFER
select IIO_BUFFER
select IIO_TRIGGER
select IIO_KFIFO_BUF
+ select IIO_TRIGGERED_BUFFER
help
Add buffered data capture to the simple dummy driver.
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 1462a6a5bc6d..3d9eba716b69 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -563,8 +563,10 @@ static int adf4350_probe(struct spi_device *spi)
st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
GPIOD_IN);
- if (IS_ERR(st->lock_detect_gpiod))
- return PTR_ERR(st->lock_detect_gpiod);
+ if (IS_ERR(st->lock_detect_gpiod)) {
+ ret = PTR_ERR(st->lock_detect_gpiod);
+ goto error_disable_reg;
+ }
if (pdata->power_up_frequency) {
ret = adf4350_set_freq(st, pdata->power_up_frequency);
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index a11ae9db0d11..36879f01e28c 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -6,19 +6,14 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
#include <linux/iio/imu/adis.h>
#include <linux/debugfs.h>
@@ -223,13 +218,12 @@ static ssize_t adis16136_read_frequency(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct adis16136 *adis16136 = iio_priv(indio_dev);
- struct mutex *slock = &adis16136->adis.state_lock;
unsigned int freq;
int ret;
- mutex_lock(slock);
+ adis_dev_lock(&adis16136->adis);
ret = __adis16136_get_freq(adis16136, &freq);
- mutex_unlock(slock);
+ adis_dev_unlock(&adis16136->adis);
if (ret)
return ret;
@@ -254,11 +248,10 @@ static const unsigned adis16136_3db_divisors[] = {
static int adis16136_set_filter(struct iio_dev *indio_dev, int val)
{
struct adis16136 *adis16136 = iio_priv(indio_dev);
- struct mutex *slock = &adis16136->adis.state_lock;
unsigned int freq;
int i, ret;
- mutex_lock(slock);
+ adis_dev_lock(&adis16136->adis);
ret = __adis16136_get_freq(adis16136, &freq);
if (ret)
goto out_unlock;
@@ -270,7 +263,7 @@ static int adis16136_set_filter(struct iio_dev *indio_dev, int val)
ret = __adis_write_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, i);
out_unlock:
- mutex_unlock(slock);
+ adis_dev_unlock(&adis16136->adis);
return ret;
}
@@ -278,12 +271,11 @@ out_unlock:
static int adis16136_get_filter(struct iio_dev *indio_dev, int *val)
{
struct adis16136 *adis16136 = iio_priv(indio_dev);
- struct mutex *slock = &adis16136->adis.state_lock;
unsigned int freq;
uint16_t val16;
int ret;
- mutex_lock(slock);
+ adis_dev_lock(&adis16136->adis);
ret = __adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT,
&val16);
@@ -297,7 +289,7 @@ static int adis16136_get_filter(struct iio_dev *indio_dev, int *val)
*val = freq / adis16136_3db_divisors[val16 & 0x07];
err_unlock:
- mutex_unlock(slock);
+ adis_dev_unlock(&adis16136->adis);
return ret ? ret : IIO_VAL_INT;
}
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index e7c9a3e31c45..66b6b7bd5e1b 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -5,17 +5,12 @@
* Copyright 2010 Analog Devices Inc.
*/
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
#include <linux/iio/imu/adis.h>
#define ADIS16260_STARTUP_DELAY 220 /* ms */
@@ -293,7 +288,7 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
addr = adis16260_addresses[chan->scan_index][1];
return adis_write_reg_16(adis, addr, val);
case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&adis->state_lock);
+ adis_dev_lock(adis);
if (spi_get_device_id(adis->spi)->driver_data)
t = 256 / val;
else
@@ -310,7 +305,7 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
adis->spi->max_speed_hz = ADIS16260_SPI_FAST;
ret = __adis_write_reg_8(adis, ADIS16260_SMPL_PRD, t);
- mutex_unlock(&adis->state_lock);
+ adis_dev_unlock(adis);
return ret;
}
return -EINVAL;
diff --git a/drivers/iio/gyro/adxrs290.c b/drivers/iio/gyro/adxrs290.c
index cec5e1f17c22..3e0734ddafe3 100644
--- a/drivers/iio/gyro/adxrs290.c
+++ b/drivers/iio/gyro/adxrs290.c
@@ -589,7 +589,7 @@ static int adxrs290_probe_trigger(struct iio_dev *indio_dev)
st->dready_trig = devm_iio_trigger_alloc(&st->spi->dev, "%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!st->dready_trig)
return -ENOMEM;
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index b11ebd9bb7a4..17b939a367ad 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -98,7 +98,11 @@ struct bmg160_data {
struct iio_trigger *motion_trig;
struct iio_mount_matrix orientation;
struct mutex mutex;
- s16 buffer[8];
+ /* Ensure naturally aligned timestamp */
+ struct {
+ s16 chans[3];
+ s64 timestamp __aligned(8);
+ } scan;
u32 dps_range;
int ev_enable_state;
int slope_thres;
@@ -882,12 +886,12 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
mutex_lock(&data->mutex);
ret = regmap_bulk_read(data->regmap, BMG160_REG_XOUT_L,
- data->buffer, AXIS_MAX * 2);
+ data->scan.chans, AXIS_MAX * 2);
mutex_unlock(&data->mutex);
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -1102,8 +1106,7 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
if (ret)
return ret;
- ret = iio_read_mount_matrix(dev, "mount-matrix",
- &data->orientation);
+ ret = iio_read_mount_matrix(dev, &data->orientation);
if (ret)
return ret;
@@ -1137,14 +1140,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
data->dready_trig = devm_iio_trigger_alloc(dev,
"%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->dready_trig)
return -ENOMEM;
data->motion_trig = devm_iio_trigger_alloc(dev,
"%s-any-motion-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->motion_trig)
return -ENOMEM;
diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
index 645461c70454..410e5e9f2672 100644
--- a/drivers/iio/gyro/fxas21002c_core.c
+++ b/drivers/iio/gyro/fxas21002c_core.c
@@ -366,14 +366,7 @@ out_unlock:
static int fxas21002c_pm_get(struct fxas21002c_data *data)
{
- struct device *dev = regmap_get_device(data->regmap);
- int ret;
-
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- pm_runtime_put_noidle(dev);
-
- return ret;
+ return pm_runtime_resume_and_get(regmap_get_device(data->regmap));
}
static int fxas21002c_pm_put(struct fxas21002c_data *data)
@@ -854,7 +847,7 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data)
data->dready_trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->dready_trig)
return -ENOMEM;
@@ -1004,7 +997,6 @@ int fxas21002c_core_probe(struct device *dev, struct regmap *regmap, int irq,
pm_disable:
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
- pm_runtime_put_noidle(dev);
return ret;
}
@@ -1018,7 +1010,6 @@ void fxas21002c_core_remove(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
- pm_runtime_put_noidle(dev);
}
EXPORT_SYMBOL_GPL(fxas21002c_core_remove);
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index dad26ee4fd1f..bc63c2a34c5e 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -6,13 +6,10 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
-#include <linux/delay.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
@@ -400,3 +397,4 @@ module_platform_driver(hid_gyro_3d_platform_driver);
MODULE_DESCRIPTION("HID Sensor Gyroscope 3D");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HID);
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
index af0aaa146f0c..04dd6a7969ea 100644
--- a/drivers/iio/gyro/itg3200_buffer.c
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -114,7 +114,7 @@ int itg3200_probe_trigger(struct iio_dev *indio_dev)
struct itg3200 *st = iio_priv(indio_dev);
st->trig = iio_trigger_alloc(&st->i2c->dev, "%s-dev%d", indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!st->trig)
return -ENOMEM;
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index e9804664db73..a7f1bbb5f289 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -308,8 +308,7 @@ static int itg3200_probe(struct i2c_client *client,
st = iio_priv(indio_dev);
- ret = iio_read_mount_matrix(&client->dev, "mount-matrix",
- &st->orientation);
+ ret = iio_read_mount_matrix(&client->dev, &st->orientation);
if (ret)
return ret;
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index f17a93519535..3225de1f023b 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -1058,7 +1058,7 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
mpu3050->trig = devm_iio_trigger_alloc(&indio_dev->dev,
"%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!mpu3050->trig)
return -ENOMEM;
@@ -1164,7 +1164,7 @@ int mpu3050_common_probe(struct device *dev,
mpu3050->divisor = 99;
/* Read the mounting matrix, if present */
- ret = iio_read_mount_matrix(dev, "mount-matrix", &mpu3050->orientation);
+ ret = iio_read_mount_matrix(dev, &mpu3050->orientation);
if (ret)
return ret;
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index fd9171cc3aba..6537f5cb8320 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -24,18 +24,6 @@
#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
#define LSM9DS0_GYRO_DEV_NAME "lsm9ds0_gyro"
-/**
- * struct st_sensors_platform_data - gyro platform data
- * @drdy_int_pin: DRDY on gyros is available only on INT2 pin.
- */
-static __maybe_unused const struct st_sensors_platform_data gyro_pdata = {
- .drdy_int_pin = 2,
-};
-
-const struct st_sensor_settings *st_gyro_get_settings(const char *name);
-int st_gyro_common_probe(struct iio_dev *indio_dev);
-void st_gyro_common_remove(struct iio_dev *indio_dev);
-
#ifdef CONFIG_IIO_BUFFER
int st_gyro_allocate_ring(struct iio_dev *indio_dev);
void st_gyro_deallocate_ring(struct iio_dev *indio_dev);
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index c8aa051995d3..b86ee4d940d9 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -37,19 +37,36 @@
#define ST_GYRO_FS_AVL_500DPS 500
#define ST_GYRO_FS_AVL_2000DPS 2000
+static const struct iio_mount_matrix *
+st_gyro_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ return &gdata->mount_matrix;
+}
+
+static const struct iio_chan_spec_ext_info st_gyro_mount_matrix_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, st_gyro_get_mount_matrix),
+ { }
+};
+
static const struct iio_chan_spec st_gyro_16bit_channels[] = {
- ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL,
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_ANGL_VEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16,
- ST_GYRO_DEFAULT_OUT_X_L_ADDR),
- ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL,
+ ST_GYRO_DEFAULT_OUT_X_L_ADDR,
+ st_gyro_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_ANGL_VEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16,
- ST_GYRO_DEFAULT_OUT_Y_L_ADDR),
- ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL,
+ ST_GYRO_DEFAULT_OUT_Y_L_ADDR,
+ st_gyro_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_ANGL_VEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16,
- ST_GYRO_DEFAULT_OUT_Z_L_ADDR),
+ ST_GYRO_DEFAULT_OUT_Z_L_ADDR,
+ st_gyro_mount_matrix_ext_info),
IIO_CHAN_SOFT_TIMESTAMP(3)
};
@@ -357,6 +374,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
},
};
+/* DRDY on gyros is available only on INT2 pin */
+static const struct st_sensors_platform_data gyro_pdata = {
+ .drdy_int_pin = 2,
+};
+
static int st_gyro_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *ch, int *val,
int *val2, long mask)
@@ -466,18 +488,18 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &gyro_info;
- err = st_sensors_power_enable(indio_dev);
- if (err)
- return err;
-
err = st_sensors_verify_id(indio_dev);
if (err < 0)
- goto st_gyro_power_off;
+ return err;
gdata->num_data_channels = ST_GYRO_NUMBER_DATA_CHANNELS;
indio_dev->channels = gdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
+ err = iio_read_mount_matrix(gdata->dev, &gdata->mount_matrix);
+ if (err)
+ return err;
+
gdata->current_fullscale = &gdata->sensor_settings->fs.fs_avl[0];
gdata->odr = gdata->sensor_settings->odr.odr_avl[0].hz;
@@ -485,11 +507,11 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
- goto st_gyro_power_off;
+ return err;
err = st_gyro_allocate_ring(indio_dev);
if (err < 0)
- goto st_gyro_power_off;
+ return err;
if (gdata->irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
@@ -512,9 +534,6 @@ st_gyro_device_register_error:
st_sensors_deallocate_trigger(indio_dev);
st_gyro_probe_trigger_error:
st_gyro_deallocate_ring(indio_dev);
-st_gyro_power_off:
- st_sensors_power_disable(indio_dev);
-
return err;
}
EXPORT_SYMBOL(st_gyro_common_probe);
@@ -523,8 +542,6 @@ void st_gyro_common_remove(struct iio_dev *indio_dev)
{
struct st_sensor_data *gdata = iio_priv(indio_dev);
- st_sensors_power_disable(indio_dev);
-
iio_device_unregister(indio_dev);
if (gdata->irq > 0)
st_sensors_deallocate_trigger(indio_dev);
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 8190966e6ff0..a25cc0379e16 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -86,16 +86,29 @@ static int st_gyro_i2c_probe(struct i2c_client *client,
if (err < 0)
return err;
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
+
err = st_gyro_common_probe(indio_dev);
if (err < 0)
- return err;
+ goto st_gyro_power_off;
return 0;
+
+st_gyro_power_off:
+ st_sensors_power_disable(indio_dev);
+
+ return err;
}
static int st_gyro_i2c_remove(struct i2c_client *client)
{
- st_gyro_common_remove(i2c_get_clientdata(client));
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ st_sensors_power_disable(indio_dev);
+
+ st_gyro_common_remove(indio_dev);
return 0;
}
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index efb862763ca3..18d6a2aeda45 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -90,16 +90,29 @@ static int st_gyro_spi_probe(struct spi_device *spi)
if (err < 0)
return err;
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
+
err = st_gyro_common_probe(indio_dev);
if (err < 0)
- return err;
+ goto st_gyro_power_off;
return 0;
+
+st_gyro_power_off:
+ st_sensors_power_disable(indio_dev);
+
+ return err;
}
static int st_gyro_spi_remove(struct spi_device *spi)
{
- st_gyro_common_remove(spi_get_drvdata(spi));
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ st_sensors_power_disable(indio_dev);
+
+ st_gyro_common_remove(indio_dev);
return 0;
}
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 1fa8d51d5080..d4921385aaf7 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -521,7 +521,7 @@ static int afe4403_probe(struct spi_device *spi)
afe->trig = devm_iio_trigger_alloc(afe->dev,
"%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!afe->trig) {
dev_err(afe->dev, "Unable to allocate IIO trigger\n");
ret = -ENOMEM;
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index e1476bf79fe2..d8a27dfe074a 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -528,7 +528,7 @@ static int afe4404_probe(struct i2c_client *client,
afe->trig = devm_iio_trigger_alloc(afe->dev,
"%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!afe->trig) {
dev_err(afe->dev, "Unable to allocate IIO trigger\n");
ret = -ENOMEM;
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index 23bc9c784ef4..4a39f1019347 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -7,7 +7,6 @@
* 7-bit I2C address: 0x5C.
*/
-#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
@@ -33,7 +32,11 @@
struct am2315_data {
struct i2c_client *client;
struct mutex lock;
- s16 buffer[8]; /* 2x16-bit channels + 2x16 padding + 4x16 timestamp */
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ s16 chans[2];
+ s64 timestamp __aligned(8);
+ } scan;
};
struct am2315_sensor_data {
@@ -167,20 +170,20 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p)
mutex_lock(&data->lock);
if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
- data->buffer[0] = sensor_data.hum_data;
- data->buffer[1] = sensor_data.temp_data;
+ data->scan.chans[0] = sensor_data.hum_data;
+ data->scan.chans[1] = sensor_data.temp_data;
} else {
i = 0;
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
- data->buffer[i] = (bit ? sensor_data.temp_data :
- sensor_data.hum_data);
+ data->scan.chans[i] = (bit ? sensor_data.temp_data :
+ sensor_data.hum_data);
i++;
}
}
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -256,17 +259,9 @@ static const struct i2c_device_id am2315_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, am2315_i2c_id);
-static const struct acpi_device_id am2315_acpi_id[] = {
- {"AOS2315", 0},
- {}
-};
-
-MODULE_DEVICE_TABLE(acpi, am2315_acpi_id);
-
static struct i2c_driver am2315_driver = {
.driver = {
.name = "am2315",
- .acpi_match_table = ACPI_PTR(am2315_acpi_id),
},
.probe = am2315_probe,
.id_table = am2315_i2c_id,
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 2a957f19048e..9e0fce917ce4 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -25,6 +25,8 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/time.h>
+
#define HDC100X_REG_TEMP 0x00
#define HDC100X_REG_HUMIDITY 0x01
@@ -166,7 +168,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
struct iio_chan_spec const *chan)
{
struct i2c_client *client = data->client;
- int delay = data->adc_int_us[chan->address];
+ int delay = data->adc_int_us[chan->address] + 1*USEC_PER_MSEC;
int ret;
__be16 val;
@@ -316,7 +318,7 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct hdc100x_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client;
- int delay = data->adc_int_us[0] + data->adc_int_us[1];
+ int delay = data->adc_int_us[0] + data->adc_int_us[1] + 2*USEC_PER_MSEC;
int ret;
/* dual read starts at temp register */
diff --git a/drivers/iio/humidity/hdc2010.c b/drivers/iio/humidity/hdc2010.c
index 83f5b9f60780..1381df46187c 100644
--- a/drivers/iio/humidity/hdc2010.c
+++ b/drivers/iio/humidity/hdc2010.c
@@ -272,7 +272,6 @@ static int hdc2010_probe(struct i2c_client *client,
data->client = client;
mutex_init(&data->lock);
- indio_dev->dev.parent = &client->dev;
/*
* As DEVICE ID register does not differentiate between
* HDC2010 and HDC2080, we have the name hardcoded
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
index 74383abc0d44..fa0fe404a70a 100644
--- a/drivers/iio/humidity/hid-sensor-humidity.c
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -8,6 +8,7 @@
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "hid-sensor-trigger.h"
@@ -295,3 +296,4 @@ module_platform_driver(hid_humidity_platform_driver);
MODULE_DESCRIPTION("HID Environmental humidity sensor");
MODULE_AUTHOR("Song Hongyan <hongyan.song@intel.com>");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_HID);
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index f02883b08480..001ca2c3ff95 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -94,6 +94,7 @@ config KMX61
source "drivers/iio/imu/inv_icm42600/Kconfig"
source "drivers/iio/imu/inv_mpu6050/Kconfig"
source "drivers/iio/imu/st_lsm6dsx/Kconfig"
+source "drivers/iio/imu/st_lsm9ds0/Kconfig"
endmenu
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 13e9ff442b11..c82748096c77 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -26,3 +26,4 @@ obj-y += inv_mpu6050/
obj-$(CONFIG_KMX61) += kmx61.o
obj-y += st_lsm6dsx/
+obj-y += st_lsm9ds0/
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 319b64b2fd88..b9a06ca29bee 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -12,14 +12,10 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/module.h>
#include <asm/unaligned.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
#include <linux/iio/imu/adis.h>
#define ADIS_MSC_CTRL_DATA_RDY_EN BIT(2)
@@ -415,12 +411,11 @@ int __adis_initial_startup(struct adis *adis)
int ret;
/* check if the device has rst pin low */
- gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_ASIS);
+ gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
if (gpio) {
- gpiod_set_value_cansleep(gpio, 1);
msleep(10);
/* bring device out of reset */
gpiod_set_value_cansleep(gpio, 0);
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 768aa493a1a6..b12917a7cb60 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -10,22 +10,15 @@
* Copyright (c) 2011 Analog Devices Inc.
*/
-#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/bitops.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/imu/adis.h>
@@ -641,28 +634,13 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct adis16400_state *st = iio_priv(indio_dev);
struct adis *adis = &st->adis;
- u32 old_speed_hz = st->adis.spi->max_speed_hz;
void *buffer;
int ret;
- if (!adis->buffer)
- return -ENOMEM;
-
- if (!(st->variant->flags & ADIS16400_NO_BURST) &&
- st->adis.spi->max_speed_hz > ADIS16400_SPI_BURST) {
- st->adis.spi->max_speed_hz = ADIS16400_SPI_BURST;
- spi_setup(st->adis.spi);
- }
-
ret = spi_sync(adis->spi, &adis->msg);
if (ret)
dev_err(&adis->spi->dev, "Failed to read data: %d\n", ret);
- if (!(st->variant->flags & ADIS16400_NO_BURST)) {
- st->adis.spi->max_speed_hz = old_speed_hz;
- spi_setup(st->adis.spi);
- }
-
if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
buffer = adis->buffer + sizeof(u16);
else
@@ -968,7 +946,8 @@ static const char * const adis16400_status_error_msgs[] = {
BIT(ADIS16400_DIAG_STAT_POWER_LOW), \
.timeouts = (_timeouts), \
.burst_reg_cmd = ADIS16400_GLOB_CMD, \
- .burst_len = (_burst_len) \
+ .burst_len = (_burst_len), \
+ .burst_max_speed_hz = ADIS16400_SPI_BURST \
}
static const struct adis_timeout adis16300_timeouts = {
@@ -1178,8 +1157,6 @@ static int adis16400_probe(struct spi_device *spi)
return -ENOMEM;
st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
/* setup the industrialio driver allocated elements */
st->variant = &adis16400_chips[spi_get_device_id(spi)->driver_data];
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index 73bf45e859b8..a6f9fba3e03f 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -5,7 +5,6 @@
* Copyright 2019 Analog Devices Inc.
*/
-#include <linux/delay.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
@@ -388,8 +387,6 @@ static int adis16460_probe(struct spi_device *spi)
if (indio_dev == NULL)
return -ENOMEM;
- spi_set_drvdata(spi, indio_dev);
-
st = iio_priv(indio_dev);
st->chip_info = &adis16460_chip_info;
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 1de62fc79e0f..eb48102f9424 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -14,7 +14,6 @@
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/imu/adis.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/irq.h>
#include <linux/lcm.h>
@@ -645,7 +644,8 @@ static int adis16475_enable_irq(struct adis *adis, bool enable)
.timeouts = (_timeouts), \
.burst_reg_cmd = ADIS16475_REG_GLOB_CMD, \
.burst_len = ADIS16475_BURST_MAX_DATA, \
- .burst_max_len = ADIS16475_BURST32_MAX_DATA \
+ .burst_max_len = ADIS16475_BURST32_MAX_DATA, \
+ .burst_max_speed_hz = ADIS16475_BURST_MAX_SPEED \
}
static const struct adis16475_sync adis16475_sync_mode[] = {
@@ -1062,15 +1062,11 @@ static irqreturn_t adis16475_trigger_handler(int irq, void *p)
bool valid;
/* offset until the first element after gyro and accel */
const u8 offset = st->burst32 ? 13 : 7;
- const u32 cached_spi_speed_hz = adis->spi->max_speed_hz;
-
- adis->spi->max_speed_hz = ADIS16475_BURST_MAX_SPEED;
ret = spi_sync(adis->spi, &adis->msg);
if (ret)
- return ret;
+ goto check_burst32;
- adis->spi->max_speed_hz = cached_spi_speed_hz;
buffer = adis->buffer;
crc = be16_to_cpu(buffer[offset + 2]);
@@ -1332,7 +1328,6 @@ static int adis16475_probe(struct spi_device *spi)
return -ENOMEM;
st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
st->info = device_get_match_data(&spi->dev);
if (!st->info)
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index f81b86690b76..a869a6e52a16 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -9,21 +9,19 @@
#include <linux/bitfield.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
-#include <linux/delay.h>
#include <linux/math.h>
-#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/lcm.h>
+#include <linux/swab.h>
+#include <linux/crc32.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/imu/adis.h>
+#include <linux/iio/trigger_consumer.h>
#include <linux/debugfs.h>
@@ -103,6 +101,12 @@
* Available only for ADIS1649x devices
*/
#define ADIS16495_REG_SYNC_SCALE ADIS16480_REG(0x03, 0x10)
+#define ADIS16495_REG_BURST_CMD ADIS16480_REG(0x00, 0x7C)
+#define ADIS16495_BURST_ID 0xA5A5
+/* total number of segments in burst */
+#define ADIS16495_BURST_MAX_DATA 20
+/* spi max speed in burst mode */
+#define ADIS16495_BURST_MAX_SPEED 6000000
#define ADIS16480_REG_SERIAL_NUM ADIS16480_REG(0x04, 0x20)
@@ -163,6 +167,8 @@ struct adis16480 {
struct clk *ext_clk;
enum adis16480_clock_mode clk_mode;
unsigned int clk_freq;
+ /* Alignment needed for the timestamp */
+ __be16 data[ADIS16495_BURST_MAX_DATA] __aligned(8);
};
static const char * const adis16480_int_pin_names[4] = {
@@ -863,7 +869,7 @@ static const char * const adis16480_status_error_msgs[] = {
static int adis16480_enable_irq(struct adis *adis, bool enable);
-#define ADIS16480_DATA(_prod_id, _timeouts) \
+#define ADIS16480_DATA(_prod_id, _timeouts, _burst_len) \
{ \
.diag_stat_reg = ADIS16480_REG_DIAG_STS, \
.glob_cmd_reg = ADIS16480_REG_GLOB_CMD, \
@@ -887,6 +893,9 @@ static int adis16480_enable_irq(struct adis *adis, bool enable);
BIT(ADIS16480_DIAG_STAT_BARO_FAIL), \
.enable_irq = adis16480_enable_irq, \
.timeouts = (_timeouts), \
+ .burst_reg_cmd = ADIS16495_REG_BURST_CMD, \
+ .burst_len = (_burst_len), \
+ .burst_max_speed_hz = ADIS16495_BURST_MAX_SPEED \
}
static const struct adis_timeout adis16485_timeouts = {
@@ -931,7 +940,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
- .adis_data = ADIS16480_DATA(16375, &adis16485_timeouts),
+ .adis_data = ADIS16480_DATA(16375, &adis16485_timeouts, 0),
},
[ADIS16480] = {
.channels = adis16480_channels,
@@ -944,7 +953,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
- .adis_data = ADIS16480_DATA(16480, &adis16480_timeouts),
+ .adis_data = ADIS16480_DATA(16480, &adis16480_timeouts, 0),
},
[ADIS16485] = {
.channels = adis16485_channels,
@@ -957,7 +966,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
- .adis_data = ADIS16480_DATA(16485, &adis16485_timeouts),
+ .adis_data = ADIS16480_DATA(16485, &adis16485_timeouts, 0),
},
[ADIS16488] = {
.channels = adis16480_channels,
@@ -970,7 +979,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
- .adis_data = ADIS16480_DATA(16488, &adis16485_timeouts),
+ .adis_data = ADIS16480_DATA(16488, &adis16485_timeouts, 0),
},
[ADIS16490] = {
.channels = adis16485_channels,
@@ -984,7 +993,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .adis_data = ADIS16480_DATA(16490, &adis16495_timeouts),
+ .adis_data = ADIS16480_DATA(16490, &adis16495_timeouts, 0),
},
[ADIS16495_1] = {
.channels = adis16485_channels,
@@ -998,7 +1007,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts),
+ /* 20 elements of 16bits */
+ .adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts,
+ ADIS16495_BURST_MAX_DATA * 2),
},
[ADIS16495_2] = {
.channels = adis16485_channels,
@@ -1012,7 +1023,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts),
+ /* 20 elements of 16bits */
+ .adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts,
+ ADIS16495_BURST_MAX_DATA * 2),
},
[ADIS16495_3] = {
.channels = adis16485_channels,
@@ -1026,7 +1039,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts),
+ /* 20 elements of 16bits */
+ .adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts,
+ ADIS16495_BURST_MAX_DATA * 2),
},
[ADIS16497_1] = {
.channels = adis16485_channels,
@@ -1040,7 +1055,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts),
+ /* 20 elements of 16bits */
+ .adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts,
+ ADIS16495_BURST_MAX_DATA * 2),
},
[ADIS16497_2] = {
.channels = adis16485_channels,
@@ -1054,7 +1071,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts),
+ /* 20 elements of 16bits */
+ .adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts,
+ ADIS16495_BURST_MAX_DATA * 2),
},
[ADIS16497_3] = {
.channels = adis16485_channels,
@@ -1068,10 +1087,118 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts),
+ /* 20 elements of 16bits */
+ .adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts,
+ ADIS16495_BURST_MAX_DATA * 2),
},
};
+static bool adis16480_validate_crc(const u16 *buf, const u8 n_elem, const u32 crc)
+{
+ u32 crc_calc;
+ u16 crc_buf[15];
+ int j;
+
+ for (j = 0; j < n_elem; j++)
+ crc_buf[j] = swab16(buf[j]);
+
+ crc_calc = crc32(~0, crc_buf, n_elem * 2);
+ crc_calc ^= ~0;
+
+ return (crc == crc_calc);
+}
+
+static irqreturn_t adis16480_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16480 *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ int ret, bit, offset, i = 0;
+ __be16 *buffer;
+ u32 crc;
+ bool valid;
+
+ adis_dev_lock(adis);
+ if (adis->current_page != 0) {
+ adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
+ adis->tx[1] = 0;
+ ret = spi_write(adis->spi, adis->tx, 2);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Failed to change device page: %d\n", ret);
+ adis_dev_unlock(adis);
+ goto irq_done;
+ }
+
+ adis->current_page = 0;
+ }
+
+ ret = spi_sync(adis->spi, &adis->msg);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Failed to read data: %d\n", ret);
+ adis_dev_unlock(adis);
+ goto irq_done;
+ }
+
+ adis_dev_unlock(adis);
+
+ /*
+ * After making the burst request, the response can have one or two
+ * 16-bit responses containing the BURST_ID depending on the sclk. If
+ * clk > 3.6MHz, then we will have two BURST_ID in a row. If clk < 3MHZ,
+ * we have only one. To manage that variation, we use the transition from the
+ * BURST_ID to the SYS_E_FLAG register, which will not be equal to 0xA5A5. If
+ * we not find this variation in the first 4 segments, then the data should
+ * not be valid.
+ */
+ buffer = adis->buffer;
+ for (offset = 0; offset < 4; offset++) {
+ u16 curr = be16_to_cpu(buffer[offset]);
+ u16 next = be16_to_cpu(buffer[offset + 1]);
+
+ if (curr == ADIS16495_BURST_ID && next != ADIS16495_BURST_ID) {
+ offset++;
+ break;
+ }
+ }
+
+ if (offset == 4) {
+ dev_err(&adis->spi->dev, "Invalid burst data\n");
+ goto irq_done;
+ }
+
+ crc = be16_to_cpu(buffer[offset + 16]) << 16 | be16_to_cpu(buffer[offset + 15]);
+ valid = adis16480_validate_crc((u16 *)&buffer[offset], 15, crc);
+ if (!valid) {
+ dev_err(&adis->spi->dev, "Invalid crc\n");
+ goto irq_done;
+ }
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) {
+ /*
+ * When burst mode is used, temperature is the first data
+ * channel in the sequence, but the temperature scan index
+ * is 10.
+ */
+ switch (bit) {
+ case ADIS16480_SCAN_TEMP:
+ st->data[i++] = buffer[offset + 1];
+ break;
+ case ADIS16480_SCAN_GYRO_X ... ADIS16480_SCAN_ACCEL_Z:
+ /* The lower register data is sequenced first */
+ st->data[i++] = buffer[2 * bit + offset + 3];
+ st->data[i++] = buffer[2 * bit + offset + 2];
+ break;
+ }
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data, pf->timestamp);
+irq_done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const struct iio_info adis16480_info = {
.read_raw = &adis16480_read_raw,
.write_raw = &adis16480_write_raw,
@@ -1279,8 +1406,6 @@ static int adis16480_probe(struct spi_device *spi)
if (indio_dev == NULL)
return -ENOMEM;
- spi_set_drvdata(spi, indio_dev);
-
st = iio_priv(indio_dev);
st->chip_info = &adis16480_chip_info[id->driver_data];
@@ -1341,7 +1466,8 @@ static int adis16480_probe(struct spi_device *spi)
st->clk_freq = st->chip_info->int_clk;
}
- ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
+ ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
+ adis16480_trigger_handler);
if (ret)
return ret;
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index ac354321f63a..351c303c8a8c 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -51,9 +51,13 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev,
adis->xfer[0].tx_buf = tx;
adis->xfer[0].bits_per_word = 8;
adis->xfer[0].len = 2;
+ if (adis->data->burst_max_speed_hz)
+ adis->xfer[0].speed_hz = adis->data->burst_max_speed_hz;
adis->xfer[1].rx_buf = adis->buffer;
adis->xfer[1].bits_per_word = 8;
adis->xfer[1].len = burst_length;
+ if (adis->data->burst_max_speed_hz)
+ adis->xfer[1].speed_hz = adis->data->burst_max_speed_hz;
spi_message_init(&adis->msg);
spi_message_add_tail(&adis->xfer[0], &adis->msg);
@@ -129,31 +133,34 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
struct adis *adis = iio_device_get_drvdata(indio_dev);
int ret;
- if (!adis->buffer)
- return -ENOMEM;
-
if (adis->data->has_paging) {
mutex_lock(&adis->state_lock);
if (adis->current_page != 0) {
adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
adis->tx[1] = 0;
- spi_write(adis->spi, adis->tx, 2);
+ ret = spi_write(adis->spi, adis->tx, 2);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Failed to change device page: %d\n", ret);
+ mutex_unlock(&adis->state_lock);
+ goto irq_done;
+ }
+
+ adis->current_page = 0;
}
}
ret = spi_sync(adis->spi, &adis->msg);
- if (ret)
- dev_err(&adis->spi->dev, "Failed to read data: %d", ret);
-
-
- if (adis->data->has_paging) {
- adis->current_page = 0;
+ if (adis->data->has_paging)
mutex_unlock(&adis->state_lock);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Failed to read data: %d", ret);
+ goto irq_done;
}
iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
pf->timestamp);
+irq_done:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index fa5540fabacc..48eedc29b28a 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -62,7 +62,8 @@ int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
int ret;
adis->trig = devm_iio_trigger_alloc(&adis->spi->dev, "%s-dev%d",
- indio_dev->name, indio_dev->id);
+ indio_dev->name,
+ iio_device_id(indio_dev));
if (!adis->trig)
return -ENOMEM;
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 290b5ef83f77..824b5124a5f5 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -785,7 +785,8 @@ int bmi160_probe_trigger(struct iio_dev *indio_dev, int irq, u32 irq_type)
int ret;
data->trig = devm_iio_trigger_alloc(&indio_dev->dev, "%s-dev%d",
- indio_dev->name, indio_dev->id);
+ indio_dev->name,
+ iio_device_id(indio_dev));
if (data->trig == NULL)
return -ENOMEM;
@@ -851,8 +852,7 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap,
return ret;
}
- ret = iio_read_mount_matrix(dev, "mount-matrix",
- &data->orientation);
+ ret = iio_read_mount_matrix(dev, &data->orientation);
if (ret)
return ret;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index 8bd77185ccb7..86858da9cc38 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -592,7 +592,7 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
st->chip = chip;
st->map = regmap;
- ret = iio_read_mount_matrix(dev, "mount-matrix", &st->orientation);
+ ret = iio_read_mount_matrix(dev, &st->orientation);
if (ret) {
dev_err(dev, "failed to retrieve mounting matrix %d\n", ret);
return ret;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 6244a07048df..8a7a920e6200 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -570,11 +570,9 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
freq_hz = INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
period_us = 1000000 / freq_hz;
- result = pm_runtime_get_sync(pdev);
- if (result < 0) {
- pm_runtime_put_noidle(pdev);
+ result = pm_runtime_resume_and_get(pdev);
+ if (result)
return result;
- }
switch (chan->type) {
case IIO_ANGL_VEL:
@@ -812,11 +810,9 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
return result;
mutex_lock(&st->lock);
- result = pm_runtime_get_sync(pdev);
- if (result < 0) {
- pm_runtime_put_noidle(pdev);
+ result = pm_runtime_resume_and_get(pdev);
+ if (result)
goto error_write_raw_unlock;
- }
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -930,11 +926,9 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
result = 0;
goto fifo_rate_fail_unlock;
}
- result = pm_runtime_get_sync(pdev);
- if (result < 0) {
- pm_runtime_put_noidle(pdev);
+ result = pm_runtime_resume_and_get(pdev);
+ if (result)
goto fifo_rate_fail_unlock;
- }
result = regmap_write(st->map, st->reg->sample_rate_div, d);
if (result)
@@ -1314,8 +1308,7 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
for (i = 0; i < INV_NUM_PARTS; ++i) {
if (regval == hw_info[i].whoami) {
dev_warn(regmap_get_device(st->map),
- "whoami mismatch got %#02x (%s)"
- "expected %#02hhx (%s)\n",
+ "whoami mismatch got 0x%02x (%s) expected 0x%02x (%s)\n",
regval, hw_info[i].name,
st->hw->whoami, st->hw->name);
break;
@@ -1323,7 +1316,7 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
}
if (i >= INV_NUM_PARTS) {
dev_err(regmap_get_device(st->map),
- "invalid whoami %#02x expected %#02hhx (%s)\n",
+ "invalid whoami 0x%02x expected 0x%02x (%s)\n",
regval, st->hw->whoami, st->hw->name);
return -ENODEV;
}
@@ -1422,7 +1415,6 @@ static void inv_mpu_pm_disable(void *data)
{
struct device *dev = data;
- pm_runtime_put_sync_suspend(dev);
pm_runtime_disable(dev);
}
@@ -1455,8 +1447,7 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
pdata = dev_get_platdata(dev);
if (!pdata) {
- result = iio_read_mount_matrix(dev, "mount-matrix",
- &st->orientation);
+ result = iio_read_mount_matrix(dev, &st->orientation);
if (result) {
dev_err(dev, "Failed to retrieve mounting matrix %d\n",
result);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index de8ed1446d60..2d0e8cdd4848 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -173,11 +173,9 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (enable) {
scan = inv_scan_query(indio_dev);
- result = pm_runtime_get_sync(pdev);
- if (result < 0) {
- pm_runtime_put_noidle(pdev);
+ result = pm_runtime_resume_and_get(pdev);
+ if (result)
return result;
- }
/*
* In case autosuspend didn't trigger, turn off first not
* required sensors.
@@ -238,7 +236,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type)
st->trig = devm_iio_trigger_alloc(&indio_dev->dev,
"%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!st->trig)
return -ENOMEM;
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index fc5a60fcfec0..1dabfd615dab 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -750,7 +750,7 @@ static int kmx61_set_power_state(struct kmx61_data *data, bool on, u8 device)
}
if (on) {
- ret = pm_runtime_get_sync(&data->client->dev);
+ ret = pm_runtime_resume_and_get(&data->client->dev);
} else {
pm_runtime_mark_last_busy(&data->client->dev);
ret = pm_runtime_put_autosuspend(&data->client->dev);
@@ -759,8 +759,6 @@ static int kmx61_set_power_state(struct kmx61_data *data, bool on, u8 device)
dev_err(&data->client->dev,
"Failed: kmx61_set_power_state for %d, ret %d\n",
on, ret);
- if (on)
- pm_runtime_put_noidle(&data->client->dev);
return ret;
}
@@ -1264,7 +1262,7 @@ static struct iio_trigger *kmx61_trigger_setup(struct kmx61_data *data,
"%s-%s-dev%d",
indio_dev->name,
tag,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!trig)
return ERR_PTR(-ENOMEM);
@@ -1426,7 +1424,6 @@ static int kmx61_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
if (client->irq > 0) {
iio_triggered_buffer_cleanup(data->acc_indio_dev);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 7cedaab096a7..db45f1fc0b81 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -15,19 +15,19 @@
*
* Supported sensors:
* - LSM6DS3:
- * - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416
+ * - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 8KB
*
* - LSM6DS3H/LSM6DSL/LSM6DSM/ISM330DLC/LSM6DS3TR-C:
- * - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416
+ * - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 4KB
*
* - LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP:
- * - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416,
+ * - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416,
* 833
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
@@ -2256,7 +2256,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
}
- err = iio_read_mount_matrix(hw->dev, "mount-matrix", &hw->orientation);
+ err = iio_read_mount_matrix(hw->dev, &hw->orientation);
if (err)
return err;
diff --git a/drivers/iio/imu/st_lsm9ds0/Kconfig b/drivers/iio/imu/st_lsm9ds0/Kconfig
new file mode 100644
index 000000000000..53b7017014f8
--- /dev/null
+++ b/drivers/iio/imu/st_lsm9ds0/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config IIO_ST_LSM9DS0
+ tristate "STMicroelectronics LSM9DS0 IMU driver"
+ depends on (I2C || SPI_MASTER) && SYSFS
+ depends on !SENSORS_LIS3_I2C
+ depends on !SENSORS_LIS3_SPI
+ select IIO_ST_LSM9DS0_I2C if I2C
+ select IIO_ST_LSM9DS0_SPI if SPI_MASTER
+ select IIO_ST_ACCEL_3AXIS
+ select IIO_ST_MAGN_3AXIS
+
+ help
+ Say yes here to build support for STMicroelectronics LSM9DS0 IMU
+ sensor. Supported devices: accelerometer/magnetometer of lsm9ds0.
+
+ To compile this driver as a module, choose M here: the module
+ will be called st_lsm9ds0.
+
+config IIO_ST_LSM9DS0_I2C
+ tristate
+ depends on IIO_ST_LSM9DS0
+ select REGMAP_I2C
+
+config IIO_ST_LSM9DS0_SPI
+ tristate
+ depends on IIO_ST_LSM9DS0
+ select REGMAP_SPI
diff --git a/drivers/iio/imu/st_lsm9ds0/Makefile b/drivers/iio/imu/st_lsm9ds0/Makefile
new file mode 100644
index 000000000000..488af523f648
--- /dev/null
+++ b/drivers/iio/imu/st_lsm9ds0/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_IIO_ST_LSM9DS0) += st_lsm9ds0.o
+st_lsm9ds0-y := st_lsm9ds0_core.o
+obj-$(CONFIG_IIO_ST_LSM9DS0_I2C) += st_lsm9ds0_i2c.o
+obj-$(CONFIG_IIO_ST_LSM9DS0_SPI) += st_lsm9ds0_spi.o
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h
new file mode 100644
index 000000000000..146393afd9a7
--- /dev/null
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+// STMicroelectronics LSM9DS0 IMU driver
+
+#ifndef ST_LSM9DS0_H
+#define ST_LSM9DS0_H
+
+struct iio_dev;
+struct regulator;
+
+struct st_lsm9ds0 {
+ struct device *dev;
+ const char *name;
+ int irq;
+ struct iio_dev *accel;
+ struct iio_dev *magn;
+ struct regulator *vdd;
+ struct regulator *vdd_io;
+};
+
+int st_lsm9ds0_probe(struct st_lsm9ds0 *lsm9ds0, struct regmap *regmap);
+int st_lsm9ds0_remove(struct st_lsm9ds0 *lsm9ds0);
+
+#endif /* ST_LSM9DS0_H */
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c
new file mode 100644
index 000000000000..8204f7303fd7
--- /dev/null
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics LSM9DS0 IMU driver
+ *
+ * Copyright (C) 2021, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/iio.h>
+
+#include "st_lsm9ds0.h"
+
+static int st_lsm9ds0_power_enable(struct device *dev, struct st_lsm9ds0 *lsm9ds0)
+{
+ int ret;
+
+ /* Regulators not mandatory, but if requested we should enable them. */
+ lsm9ds0->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(lsm9ds0->vdd)) {
+ dev_err(dev, "unable to get Vdd supply\n");
+ return PTR_ERR(lsm9ds0->vdd);
+ }
+ ret = regulator_enable(lsm9ds0->vdd);
+ if (ret) {
+ dev_warn(dev, "Failed to enable specified Vdd supply\n");
+ return ret;
+ }
+
+ lsm9ds0->vdd_io = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(lsm9ds0->vdd_io)) {
+ dev_err(dev, "unable to get Vdd_IO supply\n");
+ regulator_disable(lsm9ds0->vdd);
+ return PTR_ERR(lsm9ds0->vdd_io);
+ }
+ ret = regulator_enable(lsm9ds0->vdd_io);
+ if (ret) {
+ dev_warn(dev, "Failed to enable specified Vdd_IO supply\n");
+ regulator_disable(lsm9ds0->vdd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void st_lsm9ds0_power_disable(void *data)
+{
+ struct st_lsm9ds0 *lsm9ds0 = data;
+
+ regulator_disable(lsm9ds0->vdd_io);
+ regulator_disable(lsm9ds0->vdd);
+}
+
+static int devm_st_lsm9ds0_power_enable(struct st_lsm9ds0 *lsm9ds0)
+{
+ struct device *dev = lsm9ds0->dev;
+ int ret;
+
+ ret = st_lsm9ds0_power_enable(dev, lsm9ds0);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, st_lsm9ds0_power_disable, lsm9ds0);
+}
+
+static int st_lsm9ds0_probe_accel(struct st_lsm9ds0 *lsm9ds0, struct regmap *regmap)
+{
+ const struct st_sensor_settings *settings;
+ struct device *dev = lsm9ds0->dev;
+ struct st_sensor_data *data;
+
+ settings = st_accel_get_settings(lsm9ds0->name);
+ if (!settings) {
+ dev_err(dev, "device name %s not recognized.\n", lsm9ds0->name);
+ return -ENODEV;
+ }
+
+ lsm9ds0->accel = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!lsm9ds0->accel)
+ return -ENOMEM;
+
+ lsm9ds0->accel->name = lsm9ds0->name;
+
+ data = iio_priv(lsm9ds0->accel);
+ data->sensor_settings = (struct st_sensor_settings *)settings;
+ data->dev = dev;
+ data->irq = lsm9ds0->irq;
+ data->regmap = regmap;
+ data->vdd = lsm9ds0->vdd;
+ data->vdd_io = lsm9ds0->vdd_io;
+
+ return st_accel_common_probe(lsm9ds0->accel);
+}
+
+static int st_lsm9ds0_probe_magn(struct st_lsm9ds0 *lsm9ds0, struct regmap *regmap)
+{
+ const struct st_sensor_settings *settings;
+ struct device *dev = lsm9ds0->dev;
+ struct st_sensor_data *data;
+
+ settings = st_magn_get_settings(lsm9ds0->name);
+ if (!settings) {
+ dev_err(dev, "device name %s not recognized.\n", lsm9ds0->name);
+ return -ENODEV;
+ }
+
+ lsm9ds0->magn = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!lsm9ds0->magn)
+ return -ENOMEM;
+
+ lsm9ds0->magn->name = lsm9ds0->name;
+
+ data = iio_priv(lsm9ds0->magn);
+ data->sensor_settings = (struct st_sensor_settings *)settings;
+ data->dev = dev;
+ data->irq = lsm9ds0->irq;
+ data->regmap = regmap;
+ data->vdd = lsm9ds0->vdd;
+ data->vdd_io = lsm9ds0->vdd_io;
+
+ return st_magn_common_probe(lsm9ds0->magn);
+}
+
+int st_lsm9ds0_probe(struct st_lsm9ds0 *lsm9ds0, struct regmap *regmap)
+{
+ int ret;
+
+ ret = devm_st_lsm9ds0_power_enable(lsm9ds0);
+ if (ret)
+ return ret;
+
+ /* Setup accelerometer device */
+ ret = st_lsm9ds0_probe_accel(lsm9ds0, regmap);
+ if (ret)
+ return ret;
+
+ /* Setup magnetometer device */
+ ret = st_lsm9ds0_probe_magn(lsm9ds0, regmap);
+ if (ret)
+ st_accel_common_remove(lsm9ds0->accel);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(st_lsm9ds0_probe);
+
+int st_lsm9ds0_remove(struct st_lsm9ds0 *lsm9ds0)
+{
+ st_magn_common_remove(lsm9ds0->magn);
+ st_accel_common_remove(lsm9ds0->accel);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(st_lsm9ds0_remove);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("STMicroelectronics LSM9DS0 IMU core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
new file mode 100644
index 000000000000..50a36ab53bc3
--- /dev/null
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics LSM9DS0 IMU driver
+ *
+ * Copyright (C) 2021, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/iio/common/st_sensors_i2c.h>
+
+#include "st_lsm9ds0.h"
+
+static const struct of_device_id st_lsm9ds0_of_match[] = {
+ {
+ .compatible = "st,lsm9ds0-imu",
+ .data = LSM9DS0_IMU_DEV_NAME,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, st_lsm9ds0_of_match);
+
+static const struct i2c_device_id st_lsm9ds0_id_table[] = {
+ { LSM9DS0_IMU_DEV_NAME },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, st_lsm9ds0_id_table);
+
+static const struct regmap_config st_lsm9ds0_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .read_flag_mask = 0x80,
+};
+
+static int st_lsm9ds0_i2c_probe(struct i2c_client *client)
+{
+ const struct regmap_config *config = &st_lsm9ds0_regmap_config;
+ struct device *dev = &client->dev;
+ struct st_lsm9ds0 *lsm9ds0;
+ struct regmap *regmap;
+
+ st_sensors_dev_name_probe(dev, client->name, sizeof(client->name));
+
+ lsm9ds0 = devm_kzalloc(dev, sizeof(*lsm9ds0), GFP_KERNEL);
+ if (!lsm9ds0)
+ return -ENOMEM;
+
+ lsm9ds0->dev = dev;
+ lsm9ds0->name = client->name;
+ lsm9ds0->irq = client->irq;
+
+ regmap = devm_regmap_init_i2c(client, config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ i2c_set_clientdata(client, lsm9ds0);
+
+ return st_lsm9ds0_probe(lsm9ds0, regmap);
+}
+
+static int st_lsm9ds0_i2c_remove(struct i2c_client *client)
+{
+ return st_lsm9ds0_remove(i2c_get_clientdata(client));
+}
+
+static struct i2c_driver st_lsm9ds0_driver = {
+ .driver = {
+ .name = "st-lsm9ds0-i2c",
+ .of_match_table = st_lsm9ds0_of_match,
+ },
+ .probe_new = st_lsm9ds0_i2c_probe,
+ .remove = st_lsm9ds0_i2c_remove,
+ .id_table = st_lsm9ds0_id_table,
+};
+module_i2c_driver(st_lsm9ds0_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("STMicroelectronics LSM9DS0 IMU I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
new file mode 100644
index 000000000000..272c88990dd0
--- /dev/null
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics LSM9DS0 IMU driver
+ *
+ * Copyright (C) 2021, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/common/st_sensors_spi.h>
+
+#include "st_lsm9ds0.h"
+
+static const struct of_device_id st_lsm9ds0_of_match[] = {
+ {
+ .compatible = "st,lsm9ds0-imu",
+ .data = LSM9DS0_IMU_DEV_NAME,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, st_lsm9ds0_of_match);
+
+static const struct spi_device_id st_lsm9ds0_id_table[] = {
+ { LSM9DS0_IMU_DEV_NAME },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, st_lsm9ds0_id_table);
+
+static const struct regmap_config st_lsm9ds0_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .read_flag_mask = 0xc0,
+};
+
+static int st_lsm9ds0_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct st_lsm9ds0 *lsm9ds0;
+ struct regmap *regmap;
+
+ st_sensors_dev_name_probe(dev, spi->modalias, sizeof(spi->modalias));
+
+ lsm9ds0 = devm_kzalloc(dev, sizeof(*lsm9ds0), GFP_KERNEL);
+ if (!lsm9ds0)
+ return -ENOMEM;
+
+ lsm9ds0->dev = dev;
+ lsm9ds0->name = spi->modalias;
+ lsm9ds0->irq = spi->irq;
+
+ regmap = devm_regmap_init_spi(spi, &st_lsm9ds0_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ spi_set_drvdata(spi, lsm9ds0);
+
+ return st_lsm9ds0_probe(lsm9ds0, regmap);
+}
+
+static int st_lsm9ds0_spi_remove(struct spi_device *spi)
+{
+ return st_lsm9ds0_remove(spi_get_drvdata(spi));
+}
+
+static struct spi_driver st_lsm9ds0_driver = {
+ .driver = {
+ .name = "st-lsm9ds0-spi",
+ .of_match_table = st_lsm9ds0_of_match,
+ },
+ .probe = st_lsm9ds0_spi_probe,
+ .remove = st_lsm9ds0_spi_remove,
+ .id_table = st_lsm9ds0_id_table,
+};
+module_spi_driver(st_lsm9ds0_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("STMicroelectronics LSM9DS0 IMU SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 9a8e16c7e9af..fdd623407b96 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -601,8 +601,10 @@ static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
return iio_storage_bytes_for_si(indio_dev,
- indio_dev->scan_index_timestamp);
+ iio_dev_opaque->scan_index_timestamp);
}
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
@@ -924,7 +926,6 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
if (ret)
goto error_clear_mux_table;
out_loc += length;
- in_loc += length;
}
buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
if (buffer->demux_bounce == NULL) {
@@ -1148,12 +1149,13 @@ int iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *insert_buffer,
struct iio_buffer *remove_buffer)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int ret;
if (insert_buffer == remove_buffer)
return 0;
- mutex_lock(&indio_dev->info_exist_lock);
+ mutex_lock(&iio_dev_opaque->info_exist_lock);
mutex_lock(&indio_dev->mlock);
if (insert_buffer && iio_buffer_is_active(insert_buffer))
@@ -1176,7 +1178,7 @@ int iio_update_buffers(struct iio_dev *indio_dev,
out_unlock:
mutex_unlock(&indio_dev->mlock);
- mutex_unlock(&indio_dev->info_exist_lock);
+ mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
@@ -1469,6 +1471,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
struct iio_dev *indio_dev,
int index)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_dev_attr *p;
struct attribute **attr;
int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
@@ -1495,7 +1498,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
goto error_cleanup_dynamic;
scan_el_attrcount += ret;
if (channels[i].type == IIO_TIMESTAMP)
- indio_dev->scan_index_timestamp =
+ iio_dev_opaque->scan_index_timestamp =
channels[i].scan_index;
}
if (indio_dev->masklength && buffer->scan_mask == NULL) {
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 59efb36db2c7..6d2175eb7af2 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -169,6 +169,20 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
[IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
};
+/**
+ * iio_device_id() - query the unique ID for the device
+ * @indio_dev: Device structure whose ID is being queried
+ *
+ * The IIO device ID is a unique index used for example for the naming
+ * of the character device /dev/iio\:device[ID]
+ */
+int iio_device_id(struct iio_dev *indio_dev)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+ return iio_dev_opaque->id;
+}
+EXPORT_SYMBOL_GPL(iio_device_id);
/**
* iio_sysfs_match_string_with_gaps - matches given string in an array with gaps
@@ -257,7 +271,7 @@ int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
mutex_unlock(&indio_dev->mlock);
return -EBUSY;
}
- indio_dev->clock_id = clock_id;
+ iio_dev_opaque->clock_id = clock_id;
mutex_unlock(&indio_dev->mlock);
return 0;
@@ -265,6 +279,18 @@ int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
EXPORT_SYMBOL(iio_device_set_clock);
/**
+ * iio_device_get_clock() - Retrieve current timestamping clock for the device
+ * @indio_dev: IIO device structure containing the device
+ */
+clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+ return iio_dev_opaque->clock_id;
+}
+EXPORT_SYMBOL(iio_device_get_clock);
+
+/**
* iio_get_time_ns() - utility function to get a time stamp for events etc
* @indio_dev: device
*/
@@ -591,7 +617,6 @@ EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
* iio_read_mount_matrix() - retrieve iio device mounting matrix from
* device "mount-matrix" property
* @dev: device the mounting matrix property is assigned to
- * @propname: device specific mounting matrix property name
* @matrix: where to store retrieved matrix
*
* If device is assigned no mounting matrix property, a default 3x3 identity
@@ -599,14 +624,12 @@ EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
*
* Return: 0 if success, or a negative error code on failure.
*/
-int iio_read_mount_matrix(struct device *dev, const char *propname,
- struct iio_mount_matrix *matrix)
+int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix)
{
size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation);
int err;
- err = device_property_read_string_array(dev, propname,
- matrix->rotation, len);
+ err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len);
if (err == len)
return 0;
@@ -1588,7 +1611,7 @@ static void iio_dev_release(struct device *device)
iio_device_detach_buffers(indio_dev);
- ida_simple_remove(&iio_ida, indio_dev->id);
+ ida_simple_remove(&iio_ida, iio_dev_opaque->id);
kfree(iio_dev_opaque);
}
@@ -1628,17 +1651,17 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
device_initialize(&indio_dev->dev);
iio_device_set_drvdata(indio_dev, (void *)indio_dev);
mutex_init(&indio_dev->mlock);
- mutex_init(&indio_dev->info_exist_lock);
+ mutex_init(&iio_dev_opaque->info_exist_lock);
INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list);
- indio_dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
- if (indio_dev->id < 0) {
+ iio_dev_opaque->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
+ if (iio_dev_opaque->id < 0) {
/* cannot use a dev_err as the name isn't available */
pr_err("failed to get device id\n");
kfree(iio_dev_opaque);
return NULL;
}
- dev_set_name(&indio_dev->dev, "iio:device%d", indio_dev->id);
+ dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id);
INIT_LIST_HEAD(&iio_dev_opaque->buffer_list);
INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
@@ -1657,9 +1680,9 @@ void iio_device_free(struct iio_dev *dev)
}
EXPORT_SYMBOL(iio_device_free);
-static void devm_iio_device_release(struct device *dev, void *res)
+static void devm_iio_device_release(void *iio_dev)
{
- iio_device_free(*(struct iio_dev **)res);
+ iio_device_free(iio_dev);
}
/**
@@ -1675,20 +1698,17 @@ static void devm_iio_device_release(struct device *dev, void *res)
*/
struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv)
{
- struct iio_dev **ptr, *iio_dev;
+ struct iio_dev *iio_dev;
+ int ret;
- ptr = devres_alloc(devm_iio_device_release, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
+ iio_dev = iio_device_alloc(parent, sizeof_priv);
+ if (!iio_dev)
return NULL;
- iio_dev = iio_device_alloc(parent, sizeof_priv);
- if (iio_dev) {
- *ptr = iio_dev;
- devres_add(parent, ptr);
- } else {
- devres_free(ptr);
- }
+ ret = devm_add_action_or_reset(parent, devm_iio_device_release,
+ iio_dev);
+ if (ret)
+ return NULL;
return iio_dev;
}
@@ -1704,11 +1724,12 @@ EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
**/
static int iio_chrdev_open(struct inode *inode, struct file *filp)
{
- struct iio_dev *indio_dev = container_of(inode->i_cdev,
- struct iio_dev, chrdev);
+ struct iio_dev_opaque *iio_dev_opaque =
+ container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
+ struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
struct iio_dev_buffer_pair *ib;
- if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags))
return -EBUSY;
iio_device_get(indio_dev);
@@ -1716,7 +1737,7 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
ib = kmalloc(sizeof(*ib), GFP_KERNEL);
if (!ib) {
iio_device_put(indio_dev);
- clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
+ clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
return -ENOMEM;
}
@@ -1738,10 +1759,11 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
static int iio_chrdev_release(struct inode *inode, struct file *filp)
{
struct iio_dev_buffer_pair *ib = filp->private_data;
- struct iio_dev *indio_dev = container_of(inode->i_cdev,
- struct iio_dev, chrdev);
+ struct iio_dev_opaque *iio_dev_opaque =
+ container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
+ struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
kfree(ib);
- clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
+ clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
iio_device_put(indio_dev);
return 0;
@@ -1768,7 +1790,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct iio_ioctl_handler *h;
int ret = -ENODEV;
- mutex_lock(&indio_dev->info_exist_lock);
+ mutex_lock(&iio_dev_opaque->info_exist_lock);
/**
* The NULL check here is required to prevent crashing when a device
@@ -1788,7 +1810,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = -ENODEV;
out_unlock:
- mutex_unlock(&indio_dev->info_exist_lock);
+ mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
@@ -1847,7 +1869,7 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
if (!indio_dev->info)
return -EINVAL;
- indio_dev->driver_module = this_mod;
+ iio_dev_opaque->driver_module = this_mod;
/* If the calling driver did not initialize of_node, do it here */
if (!indio_dev->dev.of_node && indio_dev->dev.parent)
indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
@@ -1889,19 +1911,19 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
indio_dev->setup_ops = &noop_ring_setup_ops;
if (iio_dev_opaque->attached_buffers_cnt)
- cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
+ cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops);
else if (iio_dev_opaque->event_interface)
- cdev_init(&indio_dev->chrdev, &iio_event_fileops);
+ cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops);
if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) {
- indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
- indio_dev->chrdev.owner = this_mod;
+ indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id);
+ iio_dev_opaque->chrdev.owner = this_mod;
}
/* assign device groups now; they should be all registered now */
indio_dev->dev.groups = iio_dev_opaque->groups;
- ret = cdev_device_add(&indio_dev->chrdev, &indio_dev->dev);
+ ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev);
if (ret < 0)
goto error_unreg_eventset;
@@ -1925,9 +1947,11 @@ EXPORT_SYMBOL(__iio_device_register);
**/
void iio_device_unregister(struct iio_dev *indio_dev)
{
- cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- mutex_lock(&indio_dev->info_exist_lock);
+ cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev);
+
+ mutex_lock(&iio_dev_opaque->info_exist_lock);
iio_device_unregister_debugfs(indio_dev);
@@ -1938,35 +1962,27 @@ void iio_device_unregister(struct iio_dev *indio_dev)
iio_device_wakeup_eventset(indio_dev);
iio_buffer_wakeup_poll(indio_dev);
- mutex_unlock(&indio_dev->info_exist_lock);
+ mutex_unlock(&iio_dev_opaque->info_exist_lock);
iio_buffers_free_sysfs_and_mask(indio_dev);
}
EXPORT_SYMBOL(iio_device_unregister);
-static void devm_iio_device_unreg(struct device *dev, void *res)
+static void devm_iio_device_unreg(void *indio_dev)
{
- iio_device_unregister(*(struct iio_dev **)res);
+ iio_device_unregister(indio_dev);
}
int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
struct module *this_mod)
{
- struct iio_dev **ptr;
int ret;
- ptr = devres_alloc(devm_iio_device_unreg, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- *ptr = indio_dev;
ret = __iio_device_register(indio_dev, this_mod);
- if (!ret)
- devres_add(dev, ptr);
- else
- devres_free(ptr);
+ if (ret)
+ return ret;
- return ret;
+ return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev);
}
EXPORT_SYMBOL_GPL(__devm_iio_device_register);
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index b2c94abbb487..b23caa2f2aa1 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/iio/iio.h>
+#include <linux/iio/iio-opaque.h>
#include <linux/iio/trigger.h>
#include "iio_core.h"
#include "iio_core_trigger.h"
@@ -116,14 +117,17 @@ EXPORT_SYMBOL(iio_trigger_unregister);
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
{
+ struct iio_dev_opaque *iio_dev_opaque;
+
if (!indio_dev || !trig)
return -EINVAL;
+ iio_dev_opaque = to_iio_dev_opaque(indio_dev);
mutex_lock(&indio_dev->mlock);
- WARN_ON(indio_dev->trig_readonly);
+ WARN_ON(iio_dev_opaque->trig_readonly);
indio_dev->trig = iio_trigger_get(trig);
- indio_dev->trig_readonly = true;
+ iio_dev_opaque->trig_readonly = true;
mutex_unlock(&indio_dev->mlock);
return 0;
@@ -240,12 +244,13 @@ static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
int iio_trigger_attach_poll_func(struct iio_trigger *trig,
struct iio_poll_func *pf)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
bool notinuse =
bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
int ret = 0;
/* Prevent the module from being removed whilst attached to a trigger */
- __module_get(pf->indio_dev->driver_module);
+ __module_get(iio_dev_opaque->driver_module);
/* Get irq number */
pf->irq = iio_trigger_get_irq(trig);
@@ -284,13 +289,14 @@ out_free_irq:
out_put_irq:
iio_trigger_put_irq(trig, pf->irq);
out_put_module:
- module_put(pf->indio_dev->driver_module);
+ module_put(iio_dev_opaque->driver_module);
return ret;
}
int iio_trigger_detach_poll_func(struct iio_trigger *trig,
struct iio_poll_func *pf)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
bool no_other_users =
bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1;
int ret = 0;
@@ -304,7 +310,7 @@ int iio_trigger_detach_poll_func(struct iio_trigger *trig,
trig->attached_own_device = false;
iio_trigger_put_irq(trig, pf->irq);
free_irq(pf->irq, pf);
- module_put(pf->indio_dev->driver_module);
+ module_put(iio_dev_opaque->driver_module);
return ret;
}
@@ -399,6 +405,7 @@ static ssize_t iio_trigger_write_current(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_trigger *oldtrig = indio_dev->trig;
struct iio_trigger *trig;
int ret;
@@ -408,7 +415,7 @@ static ssize_t iio_trigger_write_current(struct device *dev,
mutex_unlock(&indio_dev->mlock);
return -EBUSY;
}
- if (indio_dev->trig_readonly) {
+ if (iio_dev_opaque->trig_readonly) {
mutex_unlock(&indio_dev->mlock);
return -EPERM;
}
@@ -634,9 +641,9 @@ struct iio_trigger *devm_iio_trigger_alloc(struct device *parent, const char *fm
}
EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
-static void devm_iio_trigger_unreg(struct device *dev, void *res)
+static void devm_iio_trigger_unreg(void *trigger_info)
{
- iio_trigger_unregister(*(struct iio_trigger **)res);
+ iio_trigger_unregister(trigger_info);
}
/**
@@ -657,21 +664,13 @@ int __devm_iio_trigger_register(struct device *dev,
struct iio_trigger *trig_info,
struct module *this_mod)
{
- struct iio_trigger **ptr;
int ret;
- ptr = devres_alloc(devm_iio_trigger_unreg, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- *ptr = trig_info;
ret = __iio_trigger_register(trig_info, this_mod);
- if (!ret)
- devres_add(dev, ptr);
- else
- devres_free(ptr);
+ if (ret)
+ return ret;
- return ret;
+ return devm_add_action_or_reset(dev, devm_iio_trigger_unreg, trig_info);
}
EXPORT_SYMBOL_GPL(__devm_iio_trigger_register);
diff --git a/drivers/iio/industrialio-triggered-event.c b/drivers/iio/industrialio-triggered-event.c
index 53da9ab17a62..4bedc65c9fe3 100644
--- a/drivers/iio/industrialio-triggered-event.c
+++ b/drivers/iio/industrialio-triggered-event.c
@@ -37,7 +37,7 @@ int iio_triggered_event_setup(struct iio_dev *indio_dev,
indio_dev,
"%s_consumer%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (indio_dev->pollfunc_event == NULL)
return -ENOMEM;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 9c22697b7e83..391a3380a1d1 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -10,6 +10,7 @@
#include <linux/of.h>
#include <linux/iio/iio.h>
+#include <linux/iio/iio-opaque.h>
#include "iio_core.h"
#include <linux/iio/machine.h>
#include <linux/iio/driver.h>
@@ -359,30 +360,24 @@ void iio_channel_release(struct iio_channel *channel)
}
EXPORT_SYMBOL_GPL(iio_channel_release);
-static void devm_iio_channel_free(struct device *dev, void *res)
+static void devm_iio_channel_free(void *iio_channel)
{
- struct iio_channel *channel = *(struct iio_channel **)res;
-
- iio_channel_release(channel);
+ iio_channel_release(iio_channel);
}
struct iio_channel *devm_iio_channel_get(struct device *dev,
const char *channel_name)
{
- struct iio_channel **ptr, *channel;
-
- ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
+ struct iio_channel *channel;
+ int ret;
channel = iio_channel_get(dev, channel_name);
- if (IS_ERR(channel)) {
- devres_free(ptr);
+ if (IS_ERR(channel))
return channel;
- }
- *ptr = channel;
- devres_add(dev, ptr);
+ ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
+ if (ret)
+ return ERR_PTR(ret);
return channel;
}
@@ -392,20 +387,16 @@ struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
struct device_node *np,
const char *channel_name)
{
- struct iio_channel **ptr, *channel;
-
- ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
+ struct iio_channel *channel;
+ int ret;
channel = of_iio_channel_get_by_name(np, channel_name);
- if (IS_ERR(channel)) {
- devres_free(ptr);
+ if (IS_ERR(channel))
return channel;
- }
- *ptr = channel;
- devres_add(dev, ptr);
+ ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
+ if (ret)
+ return ERR_PTR(ret);
return channel;
}
@@ -496,29 +487,24 @@ void iio_channel_release_all(struct iio_channel *channels)
}
EXPORT_SYMBOL_GPL(iio_channel_release_all);
-static void devm_iio_channel_free_all(struct device *dev, void *res)
+static void devm_iio_channel_free_all(void *iio_channels)
{
- struct iio_channel *channels = *(struct iio_channel **)res;
-
- iio_channel_release_all(channels);
+ iio_channel_release_all(iio_channels);
}
struct iio_channel *devm_iio_channel_get_all(struct device *dev)
{
- struct iio_channel **ptr, *channels;
-
- ptr = devres_alloc(devm_iio_channel_free_all, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
+ struct iio_channel *channels;
+ int ret;
channels = iio_channel_get_all(dev);
- if (IS_ERR(channels)) {
- devres_free(ptr);
+ if (IS_ERR(channels))
return channels;
- }
- *ptr = channels;
- devres_add(dev, ptr);
+ ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
+ channels);
+ if (ret)
+ return ERR_PTR(ret);
return channels;
}
@@ -553,9 +539,10 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
int iio_read_channel_raw(struct iio_channel *chan, int *val)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
- mutex_lock(&chan->indio_dev->info_exist_lock);
+ mutex_lock(&iio_dev_opaque->info_exist_lock);
if (chan->indio_dev->info == NULL) {
ret = -ENODEV;
goto err_unlock;
@@ -563,7 +550,7 @@ int iio_read_channel_raw(struct iio_channel *chan, int *val)
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
err_unlock:
- mutex_unlock(&chan->indio_dev->info_exist_lock);
+ mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
@@ -571,9 +558,10 @@ EXPORT_SYMBOL_GPL(iio_read_channel_raw);
int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
- mutex_lock(&chan->indio_dev->info_exist_lock);
+ mutex_lock(&iio_dev_opaque->info_exist_lock);
if (chan->indio_dev->info == NULL) {
ret = -ENODEV;
goto err_unlock;
@@ -581,7 +569,7 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
err_unlock:
- mutex_unlock(&chan->indio_dev->info_exist_lock);
+ mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
@@ -646,9 +634,10 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
int *processed, unsigned int scale)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
- mutex_lock(&chan->indio_dev->info_exist_lock);
+ mutex_lock(&iio_dev_opaque->info_exist_lock);
if (chan->indio_dev->info == NULL) {
ret = -ENODEV;
goto err_unlock;
@@ -657,7 +646,7 @@ int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
scale);
err_unlock:
- mutex_unlock(&chan->indio_dev->info_exist_lock);
+ mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
@@ -666,9 +655,10 @@ EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
enum iio_chan_info_enum attribute)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
- mutex_lock(&chan->indio_dev->info_exist_lock);
+ mutex_lock(&iio_dev_opaque->info_exist_lock);
if (chan->indio_dev->info == NULL) {
ret = -ENODEV;
goto err_unlock;
@@ -676,7 +666,7 @@ int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
ret = iio_channel_read(chan, val, val2, attribute);
err_unlock:
- mutex_unlock(&chan->indio_dev->info_exist_lock);
+ mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
@@ -691,9 +681,10 @@ EXPORT_SYMBOL_GPL(iio_read_channel_offset);
int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
unsigned int scale)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
- mutex_lock(&chan->indio_dev->info_exist_lock);
+ mutex_lock(&iio_dev_opaque->info_exist_lock);
if (chan->indio_dev->info == NULL) {
ret = -ENODEV;
goto err_unlock;
@@ -714,7 +705,7 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
}
err_unlock:
- mutex_unlock(&chan->indio_dev->info_exist_lock);
+ mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
@@ -748,9 +739,10 @@ int iio_read_avail_channel_attribute(struct iio_channel *chan,
const int **vals, int *type, int *length,
enum iio_chan_info_enum attribute)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
- mutex_lock(&chan->indio_dev->info_exist_lock);
+ mutex_lock(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
@@ -758,7 +750,7 @@ int iio_read_avail_channel_attribute(struct iio_channel *chan,
ret = iio_channel_read_avail(chan, vals, type, length, attribute);
err_unlock:
- mutex_unlock(&chan->indio_dev->info_exist_lock);
+ mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
@@ -830,10 +822,11 @@ static int iio_channel_read_max(struct iio_channel *chan,
int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
int type;
- mutex_lock(&chan->indio_dev->info_exist_lock);
+ mutex_lock(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
@@ -841,7 +834,7 @@ int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
err_unlock:
- mutex_unlock(&chan->indio_dev->info_exist_lock);
+ mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
@@ -849,10 +842,11 @@ EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret = 0;
/* Need to verify underlying driver has not gone away */
- mutex_lock(&chan->indio_dev->info_exist_lock);
+ mutex_lock(&iio_dev_opaque->info_exist_lock);
if (chan->indio_dev->info == NULL) {
ret = -ENODEV;
goto err_unlock;
@@ -860,7 +854,7 @@ int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
*type = chan->channel->type;
err_unlock:
- mutex_unlock(&chan->indio_dev->info_exist_lock);
+ mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
@@ -876,9 +870,10 @@ static int iio_channel_write(struct iio_channel *chan, int val, int val2,
int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
enum iio_chan_info_enum attribute)
{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
- mutex_lock(&chan->indio_dev->info_exist_lock);
+ mutex_lock(&iio_dev_opaque->info_exist_lock);
if (chan->indio_dev->info == NULL) {
ret = -ENODEV;
goto err_unlock;
@@ -886,7 +881,7 @@ int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
ret = iio_channel_write(chan, val, val2, attribute);
err_unlock:
- mutex_unlock(&chan->indio_dev->info_exist_lock);
+ mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 917f9becf9c7..a62c7b4b8678 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -499,6 +499,17 @@ config TSL2583
Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
Access ALS data via iio, sysfs.
+config TSL2591
+ tristate "TAOS TSL2591 ambient light sensor"
+ depends on I2C
+ help
+ Select Y here for support of the AMS/TAOS TSL2591 ambient light sensor,
+ featuring channels for combined visible + IR intensity and lux illuminance.
+ Access data via iio and sysfs. Supports iio_events.
+
+ To compile this driver as a module, select M: the
+ module will be called tsl2591.
+
config TSL2772
tristate "TAOS TSL/TMD2x71 and TSL/TMD2x72 Family of light and proximity sensors"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index ea376deaca54..d10912faf964 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_ST_UVIS25_SPI) += st_uvis25_spi.o
obj-$(CONFIG_TCS3414) += tcs3414.o
obj-$(CONFIG_TCS3472) += tcs3472.o
obj-$(CONFIG_TSL2583) += tsl2583.o
+obj-$(CONFIG_TSL2591) += tsl2591.o
obj-$(CONFIG_TSL2772) += tsl2772.o
obj-$(CONFIG_TSL4531) += tsl4531.o
obj-$(CONFIG_US5182D) += us5182d.o
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index 0a6ab5761eec..e1ff6f524f4b 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -204,7 +204,8 @@ static int acpi_als_add(struct acpi_device *device)
indio_dev->channels = acpi_als_channels;
indio_dev->num_channels = ARRAY_SIZE(acpi_als_channels);
- als->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name, indio_dev->id);
+ als->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
+ iio_device_id(indio_dev));
if (!als->trig)
return -ENOMEM;
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 85c8a05b73cb..2ff252c75c03 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -6,13 +6,10 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
-#include <linux/delay.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
@@ -392,3 +389,4 @@ module_platform_driver(hid_als_platform_driver);
MODULE_DESCRIPTION("HID Sensor ALS");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HID);
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 17d167c3d595..1621530f5f61 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -6,13 +6,10 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
-#include <linux/delay.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
@@ -350,3 +347,4 @@ module_platform_driver(hid_prox_platform_driver);
MODULE_DESCRIPTION("HID Sensor Proximity");
MODULE_AUTHOR("Archana Patni <archana.patni@intel.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HID);
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index 2f8b494f3e08..9de3262aa688 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -339,9 +339,7 @@ static int isl29028_set_pm_runtime_busy(struct isl29028_chip *chip, bool on)
int ret;
if (on) {
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
} else {
pm_runtime_mark_last_busy(dev);
ret = pm_runtime_put_autosuspend(dev);
@@ -647,7 +645,6 @@ static int isl29028_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
return isl29028_clear_configure_reg(chip);
}
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index b93b85dbc3a6..ba53b50d711a 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -51,7 +51,11 @@
struct isl29125_data {
struct i2c_client *client;
u8 conf1;
- u16 buffer[8]; /* 3x 16-bit, padding, 8 bytes timestamp */
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u16 chans[3];
+ s64 timestamp __aligned(8);
+ } scan;
};
#define ISL29125_CHANNEL(_color, _si) { \
@@ -184,10 +188,10 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- data->buffer[j++] = ret;
+ data->scan.chans[j++] = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index b4323d2db0b1..1830221da48d 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -32,9 +32,12 @@
#define LTR501_PART_ID 0x86
#define LTR501_MANUFAC_ID 0x87
#define LTR501_ALS_DATA1 0x88 /* 16-bit, little endian */
+#define LTR501_ALS_DATA1_UPPER 0x89 /* upper 8 bits of LTR501_ALS_DATA1 */
#define LTR501_ALS_DATA0 0x8a /* 16-bit, little endian */
+#define LTR501_ALS_DATA0_UPPER 0x8b /* upper 8 bits of LTR501_ALS_DATA0 */
#define LTR501_ALS_PS_STATUS 0x8c
#define LTR501_PS_DATA 0x8d /* 16-bit, little endian */
+#define LTR501_PS_DATA_UPPER 0x8e /* upper 8 bits of LTR501_PS_DATA */
#define LTR501_INTR 0x8f /* output mode, polarity, mode */
#define LTR501_PS_THRESH_UP 0x90 /* 11 bit, ps upper threshold */
#define LTR501_PS_THRESH_LOW 0x92 /* 11 bit, ps lower threshold */
@@ -149,7 +152,7 @@ struct ltr501_chip_info {
struct ltr501_data {
struct i2c_client *client;
struct mutex lock_als, lock_ps;
- struct ltr501_chip_info *chip_info;
+ const struct ltr501_chip_info *chip_info;
u8 als_contr, ps_contr;
int als_period, ps_period; /* period in micro seconds */
struct regmap *regmap;
@@ -406,18 +409,19 @@ static int ltr501_read_als(const struct ltr501_data *data, __le16 buf[2])
static int ltr501_read_ps(const struct ltr501_data *data)
{
- int ret, status;
+ __le16 status;
+ int ret;
ret = ltr501_drdy(data, LTR501_STATUS_PS_RDY);
if (ret < 0)
return ret;
ret = regmap_bulk_read(data->regmap, LTR501_PS_DATA,
- &status, 2);
+ &status, sizeof(status));
if (ret < 0)
return ret;
- return status;
+ return le16_to_cpu(status);
}
static int ltr501_read_intr_prst(const struct ltr501_data *data,
@@ -735,7 +739,7 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
{
struct ltr501_data *data = iio_priv(indio_dev);
int i, ret, freq_val, freq_val2;
- struct ltr501_chip_info *info = data->chip_info;
+ const struct ltr501_chip_info *info = data->chip_info;
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
@@ -1082,7 +1086,7 @@ static ssize_t ltr501_show_proximity_scale_avail(struct device *dev,
char *buf)
{
struct ltr501_data *data = iio_priv(dev_to_iio_dev(dev));
- struct ltr501_chip_info *info = data->chip_info;
+ const struct ltr501_chip_info *info = data->chip_info;
ssize_t len = 0;
int i;
@@ -1104,7 +1108,7 @@ static ssize_t ltr501_show_intensity_scale_avail(struct device *dev,
char *buf)
{
struct ltr501_data *data = iio_priv(dev_to_iio_dev(dev));
- struct ltr501_chip_info *info = data->chip_info;
+ const struct ltr501_chip_info *info = data->chip_info;
ssize_t len = 0;
int i;
@@ -1184,7 +1188,7 @@ static const struct iio_info ltr301_info = {
.write_event_config = &ltr501_write_event_config,
};
-static struct ltr501_chip_info ltr501_chip_info_tbl[] = {
+static const struct ltr501_chip_info ltr501_chip_info_tbl[] = {
[ltr501] = {
.partid = 0x08,
.als_gain = ltr501_als_gain_tbl,
@@ -1205,7 +1209,7 @@ static struct ltr501_chip_info ltr501_chip_info_tbl[] = {
.als_gain_tbl_size = ARRAY_SIZE(ltr559_als_gain_tbl),
.ps_gain = ltr559_ps_gain_tbl,
.ps_gain_tbl_size = ARRAY_SIZE(ltr559_ps_gain_tbl),
- .als_mode_active = BIT(1),
+ .als_mode_active = BIT(0),
.als_gain_mask = BIT(2) | BIT(3) | BIT(4),
.als_gain_shift = 2,
.info = &ltr501_info,
@@ -1354,9 +1358,12 @@ static bool ltr501_is_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case LTR501_ALS_DATA1:
+ case LTR501_ALS_DATA1_UPPER:
case LTR501_ALS_DATA0:
+ case LTR501_ALS_DATA0_UPPER:
case LTR501_ALS_PS_STATUS:
case LTR501_PS_DATA:
+ case LTR501_PS_DATA_UPPER:
return true;
default:
return false;
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
index bfade6577a38..a52b2c788540 100644
--- a/drivers/iio/light/pa12203001.c
+++ b/drivers/iio/light/pa12203001.c
@@ -186,9 +186,7 @@ static int pa12203001_set_power_state(struct pa12203001_data *data, bool on,
}
if (on) {
- ret = pm_runtime_get_sync(&data->client->dev);
- if (ret < 0)
- pm_runtime_put_noidle(&data->client->dev);
+ ret = pm_runtime_resume_and_get(&data->client->dev);
} else {
pm_runtime_mark_last_busy(&data->client->dev);
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index 033578f444e4..c2dd8a3d4217 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -360,7 +360,7 @@ static int rpr0521_set_power_state(struct rpr0521_data *data, bool on,
* both stay enabled until _suspend().
*/
if (on) {
- ret = pm_runtime_get_sync(&data->client->dev);
+ ret = pm_runtime_resume_and_get(&data->client->dev);
} else {
pm_runtime_mark_last_busy(&data->client->dev);
ret = pm_runtime_put_autosuspend(&data->client->dev);
@@ -369,9 +369,6 @@ static int rpr0521_set_power_state(struct rpr0521_data *data, bool on,
dev_err(&data->client->dev,
"Failed: rpr0521_set_power_state for %d, ret %d\n",
on, ret);
- if (on)
- pm_runtime_put_noidle(&data->client->dev);
-
return ret;
}
@@ -985,7 +982,7 @@ static int rpr0521_probe(struct i2c_client *client,
/* Trigger0 producer setup */
data->drdy_trigger0 = devm_iio_trigger_alloc(
indio_dev->dev.parent,
- "%s-dev%d", indio_dev->name, indio_dev->id);
+ "%s-dev%d", indio_dev->name, iio_device_id(indio_dev));
if (!data->drdy_trigger0) {
ret = -ENOMEM;
goto err_pm_disable;
@@ -1038,7 +1035,6 @@ static int rpr0521_probe(struct i2c_client *client,
err_pm_disable:
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
err_poweroff:
rpr0521_poweroff(data);
@@ -1053,7 +1049,6 @@ static int rpr0521_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
rpr0521_poweroff(iio_priv(indio_dev));
diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c
index c280b4195003..f8c9b2cc322e 100644
--- a/drivers/iio/light/si1133.c
+++ b/drivers/iio/light/si1133.c
@@ -352,22 +352,22 @@ static int si1133_parse_response_err(struct device *dev, u32 resp, u8 cmd)
switch (resp) {
case SI1133_ERR_OUTPUT_BUFFER_OVERFLOW:
- dev_warn(dev, "Output buffer overflow: %#02hhx\n", cmd);
+ dev_warn(dev, "Output buffer overflow: 0x%02x\n", cmd);
return -EOVERFLOW;
case SI1133_ERR_SATURATION_ADC_OR_OVERFLOW_ACCUMULATION:
- dev_warn(dev, "Saturation of the ADC or overflow of accumulation: %#02hhx\n",
+ dev_warn(dev, "Saturation of the ADC or overflow of accumulation: 0x%02x\n",
cmd);
return -EOVERFLOW;
case SI1133_ERR_INVALID_LOCATION_CMD:
dev_warn(dev,
- "Parameter access to an invalid location: %#02hhx\n",
+ "Parameter access to an invalid location: 0x%02x\n",
cmd);
return -EINVAL;
case SI1133_ERR_INVALID_CMD:
- dev_warn(dev, "Invalid command %#02hhx\n", cmd);
+ dev_warn(dev, "Invalid command 0x%02x\n", cmd);
return -EINVAL;
default:
- dev_warn(dev, "Unknown error %#02hhx\n", cmd);
+ dev_warn(dev, "Unknown error 0x%02x\n", cmd);
return -EINVAL;
}
}
@@ -400,7 +400,7 @@ static int si1133_command(struct si1133_data *data, u8 cmd)
err = regmap_write(data->regmap, SI1133_REG_COMMAND, cmd);
if (err) {
- dev_warn(dev, "Failed to write command %#02hhx, ret=%d\n", cmd,
+ dev_warn(dev, "Failed to write command 0x%02x, ret=%d\n", cmd,
err);
goto out;
}
@@ -425,7 +425,7 @@ static int si1133_command(struct si1133_data *data, u8 cmd)
SI1133_CMD_TIMEOUT_MS * 1000);
if (err) {
dev_warn(dev,
- "Failed to read command %#02hhx, ret=%d\n",
+ "Failed to read command 0x%02x, ret=%d\n",
cmd, err);
goto out;
}
@@ -978,11 +978,11 @@ static int si1133_validate_ids(struct iio_dev *iio_dev)
return err;
dev_info(&iio_dev->dev,
- "Device ID part %#02hhx rev %#02hhx mfr %#02hhx\n",
+ "Device ID part 0x%02x rev 0x%02x mfr 0x%02x\n",
part_id, rev_id, mfr_id);
if (part_id != SI1133_PART_ID) {
dev_err(&iio_dev->dev,
- "Part ID mismatch got %#02hhx, expected %#02x\n",
+ "Part ID mismatch got 0x%02x, expected 0x%02x\n",
part_id, SI1133_PART_ID);
return -ENODEV;
}
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 9b5c99823943..e2abad48b9f4 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -271,7 +271,7 @@ static int si1145_command(struct si1145_data *data, u8 cmd)
if ((ret & ~SI1145_RSP_COUNTER_MASK) == 0) {
if (ret == data->rsp_seq) {
if (time_after(jiffies, stop_jiffies)) {
- dev_warn(dev, "timeout on command %#02hhx\n",
+ dev_warn(dev, "timeout on command 0x%02x\n",
cmd);
ret = -ETIMEDOUT;
break;
@@ -291,12 +291,12 @@ static int si1145_command(struct si1145_data *data, u8 cmd)
ret = -EIO;
} else {
if (ret == SI1145_RSP_INVALID_SETTING) {
- dev_warn(dev, "INVALID_SETTING error on command %#02hhx\n",
+ dev_warn(dev, "INVALID_SETTING error on command 0x%02x\n",
cmd);
ret = -EINVAL;
} else {
/* All overflows are treated identically */
- dev_dbg(dev, "overflow, ret=%d, cmd=%#02hhx\n",
+ dev_dbg(dev, "overflow, ret=%d, cmd=0x%02x\n",
ret, cmd);
ret = -EOVERFLOW;
}
@@ -1243,7 +1243,7 @@ static int si1145_probe_trigger(struct iio_dev *indio_dev)
int ret;
trig = devm_iio_trigger_alloc(&client->dev,
- "%s-dev%d", indio_dev->name, indio_dev->id);
+ "%s-dev%d", indio_dev->name, iio_device_id(indio_dev));
if (!trig)
return -ENOMEM;
@@ -1299,10 +1299,10 @@ static int si1145_probe(struct i2c_client *client,
SI1145_REG_SEQ_ID);
if (ret < 0)
return ret;
- dev_info(&client->dev, "device ID part %#02hhx rev %#02hhx seq %#02hhx\n",
+ dev_info(&client->dev, "device ID part 0x%02x rev 0x%02x seq 0x%02x\n",
part_id, rev_id, seq_id);
if (part_id != data->part_info->part) {
- dev_err(&client->dev, "part ID mismatch got %#02hhx, expected %#02x\n",
+ dev_err(&client->dev, "part ID mismatch got 0x%02x, expected 0x%02x\n",
part_id, data->part_info->part);
return -ENODEV;
}
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 6fe5d46f80d4..0593abd600ec 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -53,7 +53,11 @@ struct tcs3414_data {
u8 control;
u8 gain;
u8 timing;
- u16 buffer[8]; /* 4x 16-bit + 8 bytes timestamp */
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u16 chans[4];
+ s64 timestamp __aligned(8);
+ } scan;
};
#define TCS3414_CHANNEL(_color, _si, _addr) { \
@@ -209,10 +213,10 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- data->buffer[j++] = ret;
+ data->scan.chans[j++] = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index a0dc447aeb68..371c6a39a165 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -64,7 +64,11 @@ struct tcs3472_data {
u8 control;
u8 atime;
u8 apers;
- u16 buffer[8]; /* 4 16-bit channels + 64-bit timestamp */
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u16 chans[4];
+ s64 timestamp __aligned(8);
+ } scan;
};
static const struct iio_event_spec tcs3472_events[] = {
@@ -386,10 +390,10 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- data->buffer[j++] = ret;
+ data->scan.chans[j++] = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
done:
@@ -531,7 +535,8 @@ static int tcs3472_probe(struct i2c_client *client,
return 0;
free_irq:
- free_irq(client->irq, indio_dev);
+ if (client->irq)
+ free_irq(client->irq, indio_dev);
buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
return ret;
@@ -559,7 +564,8 @@ static int tcs3472_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
- free_irq(client->irq, indio_dev);
+ if (client->irq)
+ free_irq(client->irq, indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
tcs3472_powerdown(iio_priv(indio_dev));
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index c9d8f07a6fcd..7e101d5f72ee 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -644,9 +644,7 @@ static int tsl2583_set_pm_runtime_busy(struct tsl2583_chip *chip, bool on)
int ret;
if (on) {
- ret = pm_runtime_get_sync(&chip->client->dev);
- if (ret < 0)
- pm_runtime_put_noidle(&chip->client->dev);
+ ret = pm_runtime_resume_and_get(&chip->client->dev);
} else {
pm_runtime_mark_last_busy(&chip->client->dev);
ret = pm_runtime_put_autosuspend(&chip->client->dev);
@@ -729,8 +727,10 @@ static int tsl2583_read_raw(struct iio_dev *indio_dev,
read_done:
mutex_unlock(&chip->als_mutex);
- if (ret < 0)
+ if (ret < 0) {
+ tsl2583_set_pm_runtime_busy(chip, false);
return ret;
+ }
/*
* Preserve the ret variable if the call to
@@ -791,8 +791,10 @@ static int tsl2583_write_raw(struct iio_dev *indio_dev,
mutex_unlock(&chip->als_mutex);
- if (ret < 0)
+ if (ret < 0) {
+ tsl2583_set_pm_runtime_busy(chip, false);
return ret;
+ }
ret = tsl2583_set_pm_runtime_busy(chip, false);
if (ret < 0)
@@ -880,7 +882,6 @@ static int tsl2583_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
return tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_OFF);
}
diff --git a/drivers/iio/light/tsl2591.c b/drivers/iio/light/tsl2591.c
new file mode 100644
index 000000000000..39e68d0c9d6a
--- /dev/null
+++ b/drivers/iio/light/tsl2591.c
@@ -0,0 +1,1225 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021 Joe Sandom <joe.g.sandom@gmail.com>
+ *
+ * Datasheet: https://ams.com/tsl25911#tab/documents
+ *
+ * Device driver for the TAOS TSL2591. This is a very-high sensitivity
+ * light-to-digital converter that transforms light intensity into a digital
+ * signal.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* ADC integration time, field value to time in ms */
+#define TSL2591_FVAL_TO_MSEC(x) (((x) + 1) * 100)
+/* ADC integration time, field value to time in seconds */
+#define TSL2591_FVAL_TO_SEC(x) ((x) + 1)
+/* ADC integration time, time in seconds to field value */
+#define TSL2591_SEC_TO_FVAL(x) ((x) - 1)
+
+/* TSL2591 register set */
+#define TSL2591_ENABLE 0x00
+#define TSL2591_CONTROL 0x01
+#define TSL2591_AILTL 0x04
+#define TSL2591_AILTH 0x05
+#define TSL2591_AIHTL 0x06
+#define TSL2591_AIHTH 0x07
+#define TSL2591_NP_AILTL 0x08
+#define TSL2591_NP_AILTH 0x09
+#define TSL2591_NP_AIHTL 0x0A
+#define TSL2591_NP_AIHTH 0x0B
+#define TSL2591_PERSIST 0x0C
+#define TSL2591_PACKAGE_ID 0x11
+#define TSL2591_DEVICE_ID 0x12
+#define TSL2591_STATUS 0x13
+#define TSL2591_C0_DATAL 0x14
+#define TSL2591_C0_DATAH 0x15
+#define TSL2591_C1_DATAL 0x16
+#define TSL2591_C1_DATAH 0x17
+
+/* TSL2591 command register definitions */
+#define TSL2591_CMD_NOP 0xA0
+#define TSL2591_CMD_SF_INTSET 0xE4
+#define TSL2591_CMD_SF_CALS_I 0xE5
+#define TSL2591_CMD_SF_CALS_NPI 0xE7
+#define TSL2591_CMD_SF_CNP_ALSI 0xEA
+
+/* TSL2591 enable register definitions */
+#define TSL2591_PWR_ON 0x01
+#define TSL2591_PWR_OFF 0x00
+#define TSL2591_ENABLE_ALS 0x02
+#define TSL2591_ENABLE_ALS_INT 0x10
+#define TSL2591_ENABLE_SLEEP_INT 0x40
+#define TSL2591_ENABLE_NP_INT 0x80
+
+/* TSL2591 control register definitions */
+#define TSL2591_CTRL_ALS_INTEGRATION_100MS 0x00
+#define TSL2591_CTRL_ALS_INTEGRATION_200MS 0x01
+#define TSL2591_CTRL_ALS_INTEGRATION_300MS 0x02
+#define TSL2591_CTRL_ALS_INTEGRATION_400MS 0x03
+#define TSL2591_CTRL_ALS_INTEGRATION_500MS 0x04
+#define TSL2591_CTRL_ALS_INTEGRATION_600MS 0x05
+#define TSL2591_CTRL_ALS_LOW_GAIN 0x00
+#define TSL2591_CTRL_ALS_MED_GAIN 0x10
+#define TSL2591_CTRL_ALS_HIGH_GAIN 0x20
+#define TSL2591_CTRL_ALS_MAX_GAIN 0x30
+#define TSL2591_CTRL_SYS_RESET 0x80
+
+/* TSL2591 persist register definitions */
+#define TSL2591_PRST_ALS_INT_CYCLE_0 0x00
+#define TSL2591_PRST_ALS_INT_CYCLE_ANY 0x01
+#define TSL2591_PRST_ALS_INT_CYCLE_2 0x02
+#define TSL2591_PRST_ALS_INT_CYCLE_3 0x03
+#define TSL2591_PRST_ALS_INT_CYCLE_5 0x04
+#define TSL2591_PRST_ALS_INT_CYCLE_10 0x05
+#define TSL2591_PRST_ALS_INT_CYCLE_15 0x06
+#define TSL2591_PRST_ALS_INT_CYCLE_20 0x07
+#define TSL2591_PRST_ALS_INT_CYCLE_25 0x08
+#define TSL2591_PRST_ALS_INT_CYCLE_30 0x09
+#define TSL2591_PRST_ALS_INT_CYCLE_35 0x0A
+#define TSL2591_PRST_ALS_INT_CYCLE_40 0x0B
+#define TSL2591_PRST_ALS_INT_CYCLE_45 0x0C
+#define TSL2591_PRST_ALS_INT_CYCLE_50 0x0D
+#define TSL2591_PRST_ALS_INT_CYCLE_55 0x0E
+#define TSL2591_PRST_ALS_INT_CYCLE_60 0x0F
+#define TSL2591_PRST_ALS_INT_CYCLE_MAX (BIT(4) - 1)
+
+/* TSL2591 PID register mask */
+#define TSL2591_PACKAGE_ID_MASK GENMASK(5, 4)
+
+/* TSL2591 ID register mask */
+#define TSL2591_DEVICE_ID_MASK GENMASK(7, 0)
+
+/* TSL2591 status register masks */
+#define TSL2591_STS_ALS_VALID_MASK BIT(0)
+#define TSL2591_STS_ALS_INT_MASK BIT(4)
+#define TSL2591_STS_NPERS_INT_MASK BIT(5)
+#define TSL2591_STS_VAL_HIGH_MASK BIT(0)
+
+/* TSL2591 constant values */
+#define TSL2591_PACKAGE_ID_VAL 0x00
+#define TSL2591_DEVICE_ID_VAL 0x50
+
+/* Power off suspend delay time MS */
+#define TSL2591_POWER_OFF_DELAY_MS 2000
+
+/* TSL2591 default values */
+#define TSL2591_DEFAULT_ALS_INT_TIME TSL2591_CTRL_ALS_INTEGRATION_300MS
+#define TSL2591_DEFAULT_ALS_GAIN TSL2591_CTRL_ALS_MED_GAIN
+#define TSL2591_DEFAULT_ALS_PERSIST TSL2591_PRST_ALS_INT_CYCLE_ANY
+#define TSL2591_DEFAULT_ALS_LOWER_THRESH 100
+#define TSL2591_DEFAULT_ALS_UPPER_THRESH 1500
+
+/* TSL2591 number of data registers */
+#define TSL2591_NUM_DATA_REGISTERS 4
+
+/* TSL2591 number of valid status reads on ADC complete */
+#define TSL2591_ALS_STS_VALID_COUNT 10
+
+/* TSL2591 delay period between polls when checking for ALS valid flag */
+#define TSL2591_DELAY_PERIOD_US 10000
+
+/* TSL2591 maximum values */
+#define TSL2591_MAX_ALS_INT_TIME_MS 600
+#define TSL2591_ALS_MAX_VALUE (BIT(16) - 1)
+
+/*
+ * LUX calculations;
+ * AGAIN values from Adafruit's TSL2591 Arduino library
+ * https://github.com/adafruit/Adafruit_TSL2591_Library
+ */
+#define TSL2591_CTRL_ALS_LOW_GAIN_MULTIPLIER 1
+#define TSL2591_CTRL_ALS_MED_GAIN_MULTIPLIER 25
+#define TSL2591_CTRL_ALS_HIGH_GAIN_MULTIPLIER 428
+#define TSL2591_CTRL_ALS_MAX_GAIN_MULTIPLIER 9876
+#define TSL2591_LUX_COEFFICIENT 408
+
+struct tsl2591_als_settings {
+ u16 als_lower_thresh;
+ u16 als_upper_thresh;
+ u8 als_int_time;
+ u8 als_persist;
+ u8 als_gain;
+};
+
+struct tsl2591_chip {
+ struct tsl2591_als_settings als_settings;
+ struct i2c_client *client;
+ /*
+ * Keep als_settings in sync with hardware state
+ * and ensure multiple readers are serialized.
+ */
+ struct mutex als_mutex;
+ bool events_enabled;
+};
+
+/*
+ * Period table is ALS persist cycle x integration time setting
+ * Integration times: 100ms, 200ms, 300ms, 400ms, 500ms, 600ms
+ * ALS cycles: 1, 2, 3, 5, 10, 20, 25, 30, 35, 40, 45, 50, 55, 60
+ */
+static const char * const tsl2591_als_period_list[] = {
+ "0.1 0.2 0.3 0.5 1.0 2.0 2.5 3.0 3.5 4.0 4.5 5.0 5.5 6.0",
+ "0.2 0.4 0.6 1.0 2.0 4.0 5.0 6.0 7.0 8.0 9.0 10.0 11.0 12.0",
+ "0.3 0.6 0.9 1.5 3.0 6.0 7.5 9.0 10.5 12.0 13.5 15.0 16.5 18.0",
+ "0.4 0.8 1.2 2.0 4.0 8.0 10.0 12.0 14.0 16.0 18.0 20.0 22.0 24.0",
+ "0.5 1.0 1.5 2.5 5.0 10.0 12.5 15.0 17.5 20.0 22.5 25.0 27.5 30.0",
+ "0.6 1.2 1.8 3.0 6.0 12.0 15.0 18.0 21.0 24.0 27.0 30.0 33.0 36.0",
+};
+
+static const int tsl2591_int_time_available[] = {
+ 1, 2, 3, 4, 5, 6,
+};
+
+static const int tsl2591_calibscale_available[] = {
+ 1, 25, 428, 9876,
+};
+
+static int tsl2591_set_als_lower_threshold(struct tsl2591_chip *chip,
+ u16 als_lower_threshold);
+static int tsl2591_set_als_upper_threshold(struct tsl2591_chip *chip,
+ u16 als_upper_threshold);
+
+static int tsl2591_gain_to_multiplier(const u8 als_gain)
+{
+ switch (als_gain) {
+ case TSL2591_CTRL_ALS_LOW_GAIN:
+ return TSL2591_CTRL_ALS_LOW_GAIN_MULTIPLIER;
+ case TSL2591_CTRL_ALS_MED_GAIN:
+ return TSL2591_CTRL_ALS_MED_GAIN_MULTIPLIER;
+ case TSL2591_CTRL_ALS_HIGH_GAIN:
+ return TSL2591_CTRL_ALS_HIGH_GAIN_MULTIPLIER;
+ case TSL2591_CTRL_ALS_MAX_GAIN:
+ return TSL2591_CTRL_ALS_MAX_GAIN_MULTIPLIER;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tsl2591_multiplier_to_gain(const u32 multiplier)
+{
+ switch (multiplier) {
+ case TSL2591_CTRL_ALS_LOW_GAIN_MULTIPLIER:
+ return TSL2591_CTRL_ALS_LOW_GAIN;
+ case TSL2591_CTRL_ALS_MED_GAIN_MULTIPLIER:
+ return TSL2591_CTRL_ALS_MED_GAIN;
+ case TSL2591_CTRL_ALS_HIGH_GAIN_MULTIPLIER:
+ return TSL2591_CTRL_ALS_HIGH_GAIN;
+ case TSL2591_CTRL_ALS_MAX_GAIN_MULTIPLIER:
+ return TSL2591_CTRL_ALS_MAX_GAIN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tsl2591_persist_cycle_to_lit(const u8 als_persist)
+{
+ switch (als_persist) {
+ case TSL2591_PRST_ALS_INT_CYCLE_ANY:
+ return 1;
+ case TSL2591_PRST_ALS_INT_CYCLE_2:
+ return 2;
+ case TSL2591_PRST_ALS_INT_CYCLE_3:
+ return 3;
+ case TSL2591_PRST_ALS_INT_CYCLE_5:
+ return 5;
+ case TSL2591_PRST_ALS_INT_CYCLE_10:
+ return 10;
+ case TSL2591_PRST_ALS_INT_CYCLE_15:
+ return 15;
+ case TSL2591_PRST_ALS_INT_CYCLE_20:
+ return 20;
+ case TSL2591_PRST_ALS_INT_CYCLE_25:
+ return 25;
+ case TSL2591_PRST_ALS_INT_CYCLE_30:
+ return 30;
+ case TSL2591_PRST_ALS_INT_CYCLE_35:
+ return 35;
+ case TSL2591_PRST_ALS_INT_CYCLE_40:
+ return 40;
+ case TSL2591_PRST_ALS_INT_CYCLE_45:
+ return 45;
+ case TSL2591_PRST_ALS_INT_CYCLE_50:
+ return 50;
+ case TSL2591_PRST_ALS_INT_CYCLE_55:
+ return 55;
+ case TSL2591_PRST_ALS_INT_CYCLE_60:
+ return 60;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tsl2591_persist_lit_to_cycle(const u8 als_persist)
+{
+ switch (als_persist) {
+ case 1:
+ return TSL2591_PRST_ALS_INT_CYCLE_ANY;
+ case 2:
+ return TSL2591_PRST_ALS_INT_CYCLE_2;
+ case 3:
+ return TSL2591_PRST_ALS_INT_CYCLE_3;
+ case 5:
+ return TSL2591_PRST_ALS_INT_CYCLE_5;
+ case 10:
+ return TSL2591_PRST_ALS_INT_CYCLE_10;
+ case 15:
+ return TSL2591_PRST_ALS_INT_CYCLE_15;
+ case 20:
+ return TSL2591_PRST_ALS_INT_CYCLE_20;
+ case 25:
+ return TSL2591_PRST_ALS_INT_CYCLE_25;
+ case 30:
+ return TSL2591_PRST_ALS_INT_CYCLE_30;
+ case 35:
+ return TSL2591_PRST_ALS_INT_CYCLE_35;
+ case 40:
+ return TSL2591_PRST_ALS_INT_CYCLE_40;
+ case 45:
+ return TSL2591_PRST_ALS_INT_CYCLE_45;
+ case 50:
+ return TSL2591_PRST_ALS_INT_CYCLE_50;
+ case 55:
+ return TSL2591_PRST_ALS_INT_CYCLE_55;
+ case 60:
+ return TSL2591_PRST_ALS_INT_CYCLE_60;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tsl2591_compatible_int_time(struct tsl2591_chip *chip,
+ const u32 als_integration_time)
+{
+ switch (als_integration_time) {
+ case TSL2591_CTRL_ALS_INTEGRATION_100MS:
+ case TSL2591_CTRL_ALS_INTEGRATION_200MS:
+ case TSL2591_CTRL_ALS_INTEGRATION_300MS:
+ case TSL2591_CTRL_ALS_INTEGRATION_400MS:
+ case TSL2591_CTRL_ALS_INTEGRATION_500MS:
+ case TSL2591_CTRL_ALS_INTEGRATION_600MS:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tsl2591_als_time_to_fval(const u32 als_integration_time)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tsl2591_int_time_available); i++) {
+ if (als_integration_time == tsl2591_int_time_available[i])
+ return TSL2591_SEC_TO_FVAL(als_integration_time);
+ }
+
+ return -EINVAL;
+}
+
+static int tsl2591_compatible_gain(struct tsl2591_chip *chip, const u8 als_gain)
+{
+ switch (als_gain) {
+ case TSL2591_CTRL_ALS_LOW_GAIN:
+ case TSL2591_CTRL_ALS_MED_GAIN:
+ case TSL2591_CTRL_ALS_HIGH_GAIN:
+ case TSL2591_CTRL_ALS_MAX_GAIN:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tsl2591_compatible_als_persist_cycle(struct tsl2591_chip *chip,
+ const u32 als_persist)
+{
+ switch (als_persist) {
+ case TSL2591_PRST_ALS_INT_CYCLE_ANY:
+ case TSL2591_PRST_ALS_INT_CYCLE_2:
+ case TSL2591_PRST_ALS_INT_CYCLE_3:
+ case TSL2591_PRST_ALS_INT_CYCLE_5:
+ case TSL2591_PRST_ALS_INT_CYCLE_10:
+ case TSL2591_PRST_ALS_INT_CYCLE_15:
+ case TSL2591_PRST_ALS_INT_CYCLE_20:
+ case TSL2591_PRST_ALS_INT_CYCLE_25:
+ case TSL2591_PRST_ALS_INT_CYCLE_30:
+ case TSL2591_PRST_ALS_INT_CYCLE_35:
+ case TSL2591_PRST_ALS_INT_CYCLE_40:
+ case TSL2591_PRST_ALS_INT_CYCLE_45:
+ case TSL2591_PRST_ALS_INT_CYCLE_50:
+ case TSL2591_PRST_ALS_INT_CYCLE_55:
+ case TSL2591_PRST_ALS_INT_CYCLE_60:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tsl2591_check_als_valid(struct i2c_client *client)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, TSL2591_CMD_NOP | TSL2591_STATUS);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read register\n");
+ return -EINVAL;
+ }
+
+ return FIELD_GET(TSL2591_STS_ALS_VALID_MASK, ret);
+}
+
+static int tsl2591_wait_adc_complete(struct tsl2591_chip *chip)
+{
+ struct tsl2591_als_settings settings = chip->als_settings;
+ struct i2c_client *client = chip->client;
+ int delay;
+ int val;
+ int ret;
+
+ delay = TSL2591_FVAL_TO_MSEC(settings.als_int_time);
+ if (!delay)
+ return -EINVAL;
+
+ /*
+ * Sleep for ALS integration time to allow enough time or an ADC read
+ * cycle to complete. Check status after delay for ALS valid.
+ */
+ msleep(delay);
+
+ /* Check for status ALS valid flag for up to 100ms */
+ ret = readx_poll_timeout(tsl2591_check_als_valid, client,
+ val, val == TSL2591_STS_VAL_HIGH_MASK,
+ TSL2591_DELAY_PERIOD_US,
+ TSL2591_DELAY_PERIOD_US * TSL2591_ALS_STS_VALID_COUNT);
+ if (ret)
+ dev_err(&client->dev, "Timed out waiting for valid ALS data\n");
+
+ return ret;
+}
+
+/*
+ * tsl2591_read_channel_data - Reads raw channel data and calculates lux
+ *
+ * Formula for lux calculation;
+ * Derived from Adafruit's TSL2591 library
+ * Link: https://github.com/adafruit/Adafruit_TSL2591_Library
+ * Counts Per Lux (CPL) = (ATIME_ms * AGAIN) / LUX DF
+ * lux = ((C0DATA - C1DATA) * (1 - (C1DATA / C0DATA))) / CPL
+ *
+ * Scale values to get more representative value of lux i.e.
+ * lux = ((C0DATA - C1DATA) * (1000 - ((C1DATA * 1000) / C0DATA))) / CPL
+ *
+ * Channel 0 = IR + Visible
+ * Channel 1 = IR only
+ */
+static int tsl2591_read_channel_data(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ struct tsl2591_chip *chip = iio_priv(indio_dev);
+ struct tsl2591_als_settings *settings = &chip->als_settings;
+ struct i2c_client *client = chip->client;
+ u8 als_data[TSL2591_NUM_DATA_REGISTERS];
+ int counts_per_lux, int_time_fval, gain_multi, lux;
+ u16 als_ch0, als_ch1;
+ int ret;
+
+ ret = tsl2591_wait_adc_complete(chip);
+ if (ret < 0) {
+ dev_err(&client->dev, "No data available. Err: %d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_smbus_read_i2c_block_data(client,
+ TSL2591_CMD_NOP | TSL2591_C0_DATAL,
+ sizeof(als_data), als_data);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read data bytes");
+ return ret;
+ }
+
+ als_ch0 = get_unaligned_le16(&als_data[0]);
+ als_ch1 = get_unaligned_le16(&als_data[2]);
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
+ *val = als_ch0;
+ else if (chan->channel2 == IIO_MOD_LIGHT_IR)
+ *val = als_ch1;
+ else
+ return -EINVAL;
+ break;
+ case IIO_LIGHT:
+ gain_multi = tsl2591_gain_to_multiplier(settings->als_gain);
+ if (gain_multi < 0) {
+ dev_err(&client->dev, "Invalid multiplier");
+ return gain_multi;
+ }
+
+ int_time_fval = TSL2591_FVAL_TO_MSEC(settings->als_int_time);
+ /* Calculate counts per lux value */
+ counts_per_lux = (int_time_fval * gain_multi) / TSL2591_LUX_COEFFICIENT;
+
+ dev_dbg(&client->dev, "Counts Per Lux: %d\n", counts_per_lux);
+
+ /* Calculate lux value */
+ lux = ((als_ch0 - als_ch1) *
+ (1000 - ((als_ch1 * 1000) / als_ch0))) / counts_per_lux;
+
+ dev_dbg(&client->dev, "Raw lux calculation: %d\n", lux);
+
+ /* Divide by 1000 to get real lux value before scaling */
+ *val = lux / 1000;
+
+ /* Get the decimal part of lux reading */
+ *val2 = (lux - (*val * 1000)) * 1000;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tsl2591_set_als_gain_int_time(struct tsl2591_chip *chip)
+{
+ struct tsl2591_als_settings als_settings = chip->als_settings;
+ struct i2c_client *client = chip->client;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client,
+ TSL2591_CMD_NOP | TSL2591_CONTROL,
+ als_settings.als_int_time | als_settings.als_gain);
+ if (ret)
+ dev_err(&client->dev, "Failed to set als gain & int time\n");
+
+ return ret;
+}
+
+static int tsl2591_set_als_lower_threshold(struct tsl2591_chip *chip,
+ u16 als_lower_threshold)
+{
+ struct tsl2591_als_settings als_settings = chip->als_settings;
+ struct i2c_client *client = chip->client;
+ u16 als_upper_threshold;
+ u8 als_lower_l;
+ u8 als_lower_h;
+ int ret;
+
+ chip->als_settings.als_lower_thresh = als_lower_threshold;
+
+ /*
+ * Lower threshold should not be greater or equal to upper.
+ * If this is the case, then assert upper threshold to new lower
+ * threshold + 1 to avoid ordering issues when setting thresholds.
+ */
+ if (als_lower_threshold >= als_settings.als_upper_thresh) {
+ als_upper_threshold = als_lower_threshold + 1;
+ tsl2591_set_als_upper_threshold(chip, als_upper_threshold);
+ }
+
+ als_lower_l = als_lower_threshold;
+ als_lower_h = als_lower_threshold >> 8;
+
+ ret = i2c_smbus_write_byte_data(client,
+ TSL2591_CMD_NOP | TSL2591_AILTL,
+ als_lower_l);
+ if (ret) {
+ dev_err(&client->dev, "Failed to set als lower threshold\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(client,
+ TSL2591_CMD_NOP | TSL2591_AILTH,
+ als_lower_h);
+ if (ret) {
+ dev_err(&client->dev, "Failed to set als lower threshold\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tsl2591_set_als_upper_threshold(struct tsl2591_chip *chip,
+ u16 als_upper_threshold)
+{
+ struct tsl2591_als_settings als_settings = chip->als_settings;
+ struct i2c_client *client = chip->client;
+ u16 als_lower_threshold;
+ u8 als_upper_l;
+ u8 als_upper_h;
+ int ret;
+
+ if (als_upper_threshold > TSL2591_ALS_MAX_VALUE)
+ return -EINVAL;
+
+ chip->als_settings.als_upper_thresh = als_upper_threshold;
+
+ /*
+ * Upper threshold should not be less than lower. If this
+ * is the case, then assert lower threshold to new upper
+ * threshold - 1 to avoid ordering issues when setting thresholds.
+ */
+ if (als_upper_threshold < als_settings.als_lower_thresh) {
+ als_lower_threshold = als_upper_threshold - 1;
+ tsl2591_set_als_lower_threshold(chip, als_lower_threshold);
+ }
+
+ als_upper_l = als_upper_threshold;
+ als_upper_h = als_upper_threshold >> 8;
+
+ ret = i2c_smbus_write_byte_data(client,
+ TSL2591_CMD_NOP | TSL2591_AIHTL,
+ als_upper_l);
+ if (ret) {
+ dev_err(&client->dev, "Failed to set als upper threshold\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(client,
+ TSL2591_CMD_NOP | TSL2591_AIHTH,
+ als_upper_h);
+ if (ret) {
+ dev_err(&client->dev, "Failed to set als upper threshold\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tsl2591_set_als_persist_cycle(struct tsl2591_chip *chip,
+ u8 als_persist)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client,
+ TSL2591_CMD_NOP | TSL2591_PERSIST,
+ als_persist);
+ if (ret)
+ dev_err(&client->dev, "Failed to set als persist cycle\n");
+
+ chip->als_settings.als_persist = als_persist;
+
+ return ret;
+}
+
+static int tsl2591_set_power_state(struct tsl2591_chip *chip, u8 state)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client,
+ TSL2591_CMD_NOP | TSL2591_ENABLE,
+ state);
+ if (ret)
+ dev_err(&client->dev,
+ "Failed to set the power state to %#04x\n", state);
+
+ return ret;
+}
+
+static ssize_t tsl2591_in_illuminance_period_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2591_chip *chip = iio_priv(indio_dev);
+
+ return sysfs_emit(buf, "%s\n",
+ tsl2591_als_period_list[chip->als_settings.als_int_time]);
+}
+
+static IIO_DEVICE_ATTR_RO(tsl2591_in_illuminance_period_available, 0);
+
+static struct attribute *tsl2591_event_attrs_ctrl[] = {
+ &iio_dev_attr_tsl2591_in_illuminance_period_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tsl2591_event_attribute_group = {
+ .attrs = tsl2591_event_attrs_ctrl,
+};
+
+static const struct iio_event_spec tsl2591_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec tsl2591_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE)
+ },
+ {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .event_spec = tsl2591_events,
+ .num_event_specs = ARRAY_SIZE(tsl2591_events),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE)
+ },
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE)
+ },
+};
+
+static int tsl2591_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tsl2591_chip *chip = iio_priv(indio_dev);
+ struct i2c_client *client = chip->client;
+ int ret;
+
+ pm_runtime_get_sync(&client->dev);
+
+ mutex_lock(&chip->als_mutex);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type != IIO_INTENSITY) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = tsl2591_read_channel_data(indio_dev, chan, val, val2);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ if (chan->type != IIO_LIGHT) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = tsl2591_read_channel_data(indio_dev, chan, val, val2);
+ if (ret < 0)
+ break;
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (chan->type != IIO_INTENSITY) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ *val = TSL2591_FVAL_TO_SEC(chip->als_settings.als_int_time);
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->type != IIO_INTENSITY) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ *val = tsl2591_gain_to_multiplier(chip->als_settings.als_gain);
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+err_unlock:
+ mutex_unlock(&chip->als_mutex);
+
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+
+ return ret;
+}
+
+static int tsl2591_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct tsl2591_chip *chip = iio_priv(indio_dev);
+ int int_time;
+ int gain;
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ int_time = tsl2591_als_time_to_fval(val);
+ if (int_time < 0) {
+ ret = int_time;
+ goto err_unlock;
+ }
+ ret = tsl2591_compatible_int_time(chip, int_time);
+ if (ret < 0)
+ goto err_unlock;
+
+ chip->als_settings.als_int_time = int_time;
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ gain = tsl2591_multiplier_to_gain(val);
+ if (gain < 0) {
+ ret = gain;
+ goto err_unlock;
+ }
+ ret = tsl2591_compatible_gain(chip, gain);
+ if (ret < 0)
+ goto err_unlock;
+
+ chip->als_settings.als_gain = gain;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = tsl2591_set_als_gain_int_time(chip);
+
+err_unlock:
+ mutex_unlock(&chip->als_mutex);
+ return ret;
+}
+
+static int tsl2591_read_available(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ *length = ARRAY_SIZE(tsl2591_int_time_available);
+ *vals = tsl2591_int_time_available;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+
+ case IIO_CHAN_INFO_CALIBSCALE:
+ *length = ARRAY_SIZE(tsl2591_calibscale_available);
+ *vals = tsl2591_calibscale_available;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tsl2591_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val,
+ int *val2)
+{
+ struct tsl2591_chip *chip = iio_priv(indio_dev);
+ struct i2c_client *client = chip->client;
+ int als_persist, int_time, period;
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = chip->als_settings.als_upper_thresh;
+ break;
+ case IIO_EV_DIR_FALLING:
+ *val = chip->als_settings.als_lower_thresh;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_EV_INFO_PERIOD:
+ ret = i2c_smbus_read_byte_data(client,
+ TSL2591_CMD_NOP | TSL2591_PERSIST);
+ if (ret <= 0 || ret > TSL2591_PRST_ALS_INT_CYCLE_MAX)
+ goto err_unlock;
+
+ als_persist = tsl2591_persist_cycle_to_lit(ret);
+ int_time = TSL2591_FVAL_TO_MSEC(chip->als_settings.als_int_time);
+ period = als_persist * (int_time * MSEC_PER_SEC);
+
+ *val = period / USEC_PER_SEC;
+ *val2 = period % USEC_PER_SEC;
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+err_unlock:
+ mutex_unlock(&chip->als_mutex);
+ return ret;
+}
+
+static int tsl2591_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val,
+ int val2)
+{
+ struct tsl2591_chip *chip = iio_priv(indio_dev);
+ int period, int_time, als_persist;
+ int ret;
+
+ if (val < 0 || val2 < 0)
+ return -EINVAL;
+
+ mutex_lock(&chip->als_mutex);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (val > TSL2591_ALS_MAX_VALUE) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = tsl2591_set_als_upper_threshold(chip, val);
+ if (ret < 0)
+ goto err_unlock;
+ break;
+ case IIO_EV_DIR_FALLING:
+ ret = tsl2591_set_als_lower_threshold(chip, val);
+ if (ret < 0)
+ goto err_unlock;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ break;
+ case IIO_EV_INFO_PERIOD:
+ int_time = TSL2591_FVAL_TO_MSEC(chip->als_settings.als_int_time);
+
+ period = ((val * MSEC_PER_SEC) +
+ (val2 / MSEC_PER_SEC)) / int_time;
+
+ als_persist = tsl2591_persist_lit_to_cycle(period);
+ if (als_persist < 0) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = tsl2591_compatible_als_persist_cycle(chip, als_persist);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = tsl2591_set_als_persist_cycle(chip, als_persist);
+ if (ret < 0)
+ goto err_unlock;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+err_unlock:
+ mutex_unlock(&chip->als_mutex);
+ return ret;
+}
+
+static int tsl2591_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct tsl2591_chip *chip = iio_priv(indio_dev);
+
+ return chip->events_enabled;
+}
+
+static int tsl2591_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct tsl2591_chip *chip = iio_priv(indio_dev);
+ struct i2c_client *client = chip->client;
+
+ if (state && !chip->events_enabled) {
+ chip->events_enabled = true;
+ pm_runtime_get_sync(&client->dev);
+ } else if (!state && chip->events_enabled) {
+ chip->events_enabled = false;
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+ }
+
+ return 0;
+}
+
+static const struct iio_info tsl2591_info = {
+ .event_attrs = &tsl2591_event_attribute_group,
+ .read_raw = tsl2591_read_raw,
+ .write_raw = tsl2591_write_raw,
+ .read_avail = tsl2591_read_available,
+ .read_event_value = tsl2591_read_event_value,
+ .write_event_value = tsl2591_write_event_value,
+ .read_event_config = tsl2591_read_event_config,
+ .write_event_config = tsl2591_write_event_config,
+};
+
+static const struct iio_info tsl2591_info_no_irq = {
+ .read_raw = tsl2591_read_raw,
+ .write_raw = tsl2591_write_raw,
+ .read_avail = tsl2591_read_available,
+};
+
+static int __maybe_unused tsl2591_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2591_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&chip->als_mutex);
+ ret = tsl2591_set_power_state(chip, TSL2591_PWR_OFF);
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static int __maybe_unused tsl2591_resume(struct device *dev)
+{
+ int power_state = TSL2591_PWR_ON | TSL2591_ENABLE_ALS;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2591_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ if (chip->events_enabled)
+ power_state |= TSL2591_ENABLE_ALS_INT;
+
+ mutex_lock(&chip->als_mutex);
+ ret = tsl2591_set_power_state(chip, power_state);
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops tsl2591_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(tsl2591_suspend, tsl2591_resume, NULL)
+};
+
+static irqreturn_t tsl2591_event_handler(int irq, void *private)
+{
+ struct iio_dev *dev_info = private;
+ struct tsl2591_chip *chip = iio_priv(dev_info);
+ struct i2c_client *client = chip->client;
+
+ if (!chip->events_enabled)
+ return IRQ_NONE;
+
+ iio_push_event(dev_info,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(dev_info));
+
+ /* Clear ALS irq */
+ i2c_smbus_write_byte(client, TSL2591_CMD_SF_CALS_NPI);
+
+ return IRQ_HANDLED;
+}
+
+static int tsl2591_load_defaults(struct tsl2591_chip *chip)
+{
+ int ret;
+
+ chip->als_settings.als_int_time = TSL2591_DEFAULT_ALS_INT_TIME;
+ chip->als_settings.als_gain = TSL2591_DEFAULT_ALS_GAIN;
+ chip->als_settings.als_lower_thresh = TSL2591_DEFAULT_ALS_LOWER_THRESH;
+ chip->als_settings.als_upper_thresh = TSL2591_DEFAULT_ALS_UPPER_THRESH;
+
+ ret = tsl2591_set_als_gain_int_time(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = tsl2591_set_als_persist_cycle(chip, TSL2591_DEFAULT_ALS_PERSIST);
+ if (ret < 0)
+ return ret;
+
+ ret = tsl2591_set_als_lower_threshold(chip, TSL2591_DEFAULT_ALS_LOWER_THRESH);
+ if (ret < 0)
+ return ret;
+
+ ret = tsl2591_set_als_upper_threshold(chip, TSL2591_DEFAULT_ALS_UPPER_THRESH);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void tsl2591_chip_off(void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct tsl2591_chip *chip = iio_priv(indio_dev);
+ struct i2c_client *client = chip->client;
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ tsl2591_set_power_state(chip, TSL2591_PWR_OFF);
+}
+
+static int tsl2591_probe(struct i2c_client *client)
+{
+ struct tsl2591_chip *chip;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev,
+ "I2C smbus byte data functionality is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+ chip->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, tsl2591_event_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "tsl2591_irq", indio_dev);
+ if (ret) {
+ dev_err_probe(&client->dev, ret, "IRQ request error\n");
+ return -EINVAL;
+ }
+ indio_dev->info = &tsl2591_info;
+ } else {
+ indio_dev->info = &tsl2591_info_no_irq;
+ }
+
+ mutex_init(&chip->als_mutex);
+
+ ret = i2c_smbus_read_byte_data(client,
+ TSL2591_CMD_NOP | TSL2591_DEVICE_ID);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to read the device ID register\n");
+ return ret;
+ }
+ ret = FIELD_GET(TSL2591_DEVICE_ID_MASK, ret);
+ if (ret != TSL2591_DEVICE_ID_VAL) {
+ dev_err(&client->dev, "Device ID: %#04x unknown\n", ret);
+ return -EINVAL;
+ }
+
+ indio_dev->channels = tsl2591_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tsl2591_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = chip->client->name;
+ chip->events_enabled = false;
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev,
+ TSL2591_POWER_OFF_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ /*
+ * Add chip off to automatically managed path and disable runtime
+ * power management. This ensures that the chip power management
+ * is handled correctly on driver remove. tsl2591_chip_off() must be
+ * added to the managed path after pm runtime is enabled and before
+ * any error exit paths are met to ensure we're not left in a state
+ * of pm runtime not being disabled properly.
+ */
+ ret = devm_add_action_or_reset(&client->dev, tsl2591_chip_off,
+ indio_dev);
+ if (ret < 0)
+ return -EINVAL;
+
+ ret = tsl2591_load_defaults(chip);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to load sensor defaults\n");
+ return -EINVAL;
+ }
+
+ ret = i2c_smbus_write_byte(client, TSL2591_CMD_SF_CALS_NPI);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to clear als irq\n");
+ return -EINVAL;
+ }
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct of_device_id tsl2591_of_match[] = {
+ { .compatible = "amstaos,tsl2591"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, tsl2591_of_match);
+
+static struct i2c_driver tsl2591_driver = {
+ .driver = {
+ .name = "tsl2591",
+ .pm = &tsl2591_pm_ops,
+ .of_match_table = tsl2591_of_match,
+ },
+ .probe_new = tsl2591_probe
+};
+module_i2c_driver(tsl2591_driver);
+
+MODULE_AUTHOR("Joe Sandom <joe.g.sandom@gmail.com>");
+MODULE_DESCRIPTION("TAOS tsl2591 ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 393f27b75c75..96e4a66ddf28 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -367,9 +367,7 @@ static int us5182d_set_power_state(struct us5182d_data *data, bool on)
return 0;
if (on) {
- ret = pm_runtime_get_sync(&data->client->dev);
- if (ret < 0)
- pm_runtime_put_noidle(&data->client->dev);
+ ret = pm_runtime_resume_and_get(&data->client->dev);
} else {
pm_runtime_mark_last_busy(&data->client->dev);
ret = pm_runtime_put_autosuspend(&data->client->dev);
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 2f7916f95689..e02e92bc2928 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -413,9 +413,7 @@ static int vcnl4000_set_pm_runtime_state(struct vcnl4000_data *data, bool on)
int ret;
if (on) {
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
} else {
pm_runtime_mark_last_busy(dev);
ret = pm_runtime_put_autosuspend(dev);
@@ -910,7 +908,7 @@ static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct vcnl4000_data *data = iio_priv(indio_dev);
const unsigned long *active_scan_mask = indio_dev->active_scan_mask;
- u16 buffer[8] = {0}; /* 1x16-bit + ts */
+ u16 buffer[8] __aligned(8) = {0}; /* 1x16-bit + naturally aligned ts */
bool data_read = false;
unsigned long isr;
int val = 0;
@@ -998,7 +996,8 @@ static int vcnl4010_probe_trigger(struct iio_dev *indio_dev)
struct iio_trigger *trigger;
trigger = devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
- indio_dev->name, indio_dev->id);
+ indio_dev->name,
+ iio_device_id(indio_dev));
if (!trigger)
return -ENOMEM;
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index ae87740d9cef..0db306ee910e 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -102,7 +102,8 @@ static irqreturn_t vcnl4035_trigger_consumer_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct vcnl4035_data *data = iio_priv(indio_dev);
- u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)];
+ /* Ensure naturally aligned timestamp */
+ u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)] __aligned(8);
int ret;
ret = regmap_read(data->regmap, VCNL4035_ALS_DATA, (int *)buffer);
@@ -144,9 +145,7 @@ static int vcnl4035_set_pm_runtime_state(struct vcnl4035_data *data, bool on)
struct device *dev = &data->client->dev;
if (on) {
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
} else {
pm_runtime_mark_last_busy(dev);
ret = pm_runtime_put_autosuspend(dev);
@@ -507,7 +506,7 @@ static int vcnl4035_probe_trigger(struct iio_dev *indio_dev)
data->drdy_trigger0 = devm_iio_trigger_alloc(
indio_dev->dev.parent,
- "%s-dev%d", indio_dev->name, indio_dev->id);
+ "%s-dev%d", indio_dev->name, iio_device_id(indio_dev));
if (!data->drdy_trigger0)
return -ENOMEM;
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
index de85c9b30be1..3c937c55a10d 100644
--- a/drivers/iio/light/veml6030.c
+++ b/drivers/iio/light/veml6030.c
@@ -128,7 +128,7 @@ static ssize_t in_illuminance_period_available_show(struct device *dev,
return -EINVAL;
}
- return snprintf(buf, PAGE_SIZE, "%s\n", period_values[x]);
+ return sysfs_emit(buf, "%s\n", period_values[x]);
}
static IIO_DEVICE_ATTR_RO(in_illuminance_period_available, 0);
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 24b2f7b1fe44..e54feacfb980 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -833,8 +833,7 @@ static int ak8974_probe(struct i2c_client *i2c,
ak8974->i2c = i2c;
mutex_init(&ak8974->lock);
- ret = iio_read_mount_matrix(&i2c->dev, "mount-matrix",
- &ak8974->orientation);
+ ret = iio_read_mount_matrix(&i2c->dev, &ak8974->orientation);
if (ret)
return ret;
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index d988b6ac3659..42b8a2680e3a 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -890,7 +890,7 @@ static int ak8975_probe(struct i2c_client *client,
data->reset_gpiod = reset_gpiod;
data->eoc_irq = 0;
- err = iio_read_mount_matrix(&client->dev, "mount-matrix", &data->orientation);
+ err = iio_read_mount_matrix(&client->dev, &data->orientation);
if (err)
return err;
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index 00f9766bad5c..f96f53175349 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -138,8 +138,11 @@ struct bmc150_magn_data {
struct regmap *regmap;
struct regulator_bulk_data regulators[2];
struct iio_mount_matrix orientation;
- /* 4 x 32 bits for x, y z, 4 bytes align, 64 bits timestamp */
- s32 buffer[6];
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ s32 chans[3];
+ s64 timestamp __aligned(8);
+ } scan;
struct iio_trigger *dready_trig;
bool dready_trigger_on;
int max_odr;
@@ -262,7 +265,7 @@ static int bmc150_magn_set_power_state(struct bmc150_magn_data *data, bool on)
int ret;
if (on) {
- ret = pm_runtime_get_sync(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
} else {
pm_runtime_mark_last_busy(data->dev);
ret = pm_runtime_put_autosuspend(data->dev);
@@ -271,9 +274,6 @@ static int bmc150_magn_set_power_state(struct bmc150_magn_data *data, bool on)
if (ret < 0) {
dev_err(data->dev,
"failed to change power state to %d\n", on);
- if (on)
- pm_runtime_put_noidle(data->dev);
-
return ret;
}
#endif
@@ -675,11 +675,11 @@ static irqreturn_t bmc150_magn_trigger_handler(int irq, void *p)
int ret;
mutex_lock(&data->mutex);
- ret = bmc150_magn_read_xyz(data, data->buffer);
+ ret = bmc150_magn_read_xyz(data, data->scan.chans);
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
pf->timestamp);
err:
@@ -890,8 +890,7 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
- ret = iio_read_mount_matrix(dev, "mount-matrix",
- &data->orientation);
+ ret = iio_read_mount_matrix(dev, &data->orientation);
if (ret)
return ret;
@@ -915,7 +914,7 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
data->dready_trig = devm_iio_trigger_alloc(dev,
"%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->dready_trig) {
ret = -ENOMEM;
dev_err(dev, "iio trigger alloc failed\n");
@@ -963,12 +962,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(dev, "unable to register iio device\n");
- goto err_buffer_cleanup;
+ goto err_disable_runtime_pm;
}
dev_dbg(dev, "Registered device %s\n", name);
return 0;
+err_disable_runtime_pm:
+ pm_runtime_disable(dev);
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
err_free_irq:
@@ -992,7 +993,6 @@ int bmc150_magn_remove(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
- pm_runtime_put_noidle(dev);
iio_triggered_buffer_cleanup(indio_dev);
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index b78691523dd4..e85a3a8eea90 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -6,13 +6,9 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
@@ -587,3 +583,4 @@ module_platform_driver(hid_magn_3d_platform_driver);
MODULE_DESCRIPTION("HID Sensor Magnetometer 3D");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HID);
diff --git a/drivers/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
index 3f6c0b662941..242f742f2643 100644
--- a/drivers/iio/magnetometer/hmc5843.h
+++ b/drivers/iio/magnetometer/hmc5843.h
@@ -33,7 +33,8 @@ enum hmc5843_ids {
* @lock: update and read regmap data
* @regmap: hardware access register maps
* @variant: describe chip variants
- * @buffer: 3x 16-bit channels + padding + 64-bit timestamp
+ * @scan: buffer to pack data for passing to
+ * iio_push_to_buffers_with_timestamp()
*/
struct hmc5843_data {
struct device *dev;
@@ -41,7 +42,10 @@ struct hmc5843_data {
struct regmap *regmap;
const struct hmc5843_chip_info *variant;
struct iio_mount_matrix orientation;
- __be16 buffer[8];
+ struct {
+ __be16 chans[3];
+ s64 timestamp __aligned(8);
+ } scan;
};
int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index 780faea61d82..cf62057480cf 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -446,13 +446,13 @@ static irqreturn_t hmc5843_trigger_handler(int irq, void *p)
}
ret = regmap_bulk_read(data->regmap, HMC5843_DATA_OUT_MSB_REGS,
- data->buffer, 3 * sizeof(__be16));
+ data->scan.chans, sizeof(data->scan.chans));
mutex_unlock(&data->lock);
if (ret < 0)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
done:
@@ -637,8 +637,7 @@ int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
data->variant = &hmc5843_chip_info_tbl[id];
mutex_init(&data->lock);
- ret = iio_read_mount_matrix(dev, "mount-matrix",
- &data->orientation);
+ ret = iio_read_mount_matrix(dev, &data->orientation);
if (ret)
return ret;
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index dd811da9cb6d..13914273c999 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -78,7 +78,8 @@ struct rm3100_data {
bool use_interrupt;
int conversion_time;
int scale;
- u8 buffer[RM3100_SCAN_BYTES];
+ /* Ensure naturally aligned timestamp */
+ u8 buffer[RM3100_SCAN_BYTES] __aligned(8);
struct iio_trigger *drdy_trig;
/*
@@ -575,7 +576,7 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
data->drdy_trig = devm_iio_trigger_alloc(dev, "%s-drdy%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->drdy_trig)
return -ENOMEM;
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 7ba6a6ba5c58..fb6c906c4c0c 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -23,10 +23,6 @@
#define LSM9DS1_MAGN_DEV_NAME "lsm9ds1_magn"
#define IIS2MDC_MAGN_DEV_NAME "iis2mdc"
-const struct st_sensor_settings *st_magn_get_settings(const char *name);
-int st_magn_common_probe(struct iio_dev *indio_dev);
-void st_magn_common_remove(struct iio_dev *indio_dev);
-
#ifdef CONFIG_IIO_BUFFER
int st_magn_allocate_ring(struct iio_dev *indio_dev);
void st_magn_deallocate_ring(struct iio_dev *indio_dev);
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 71faebd07feb..0048c3cd36ee 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -33,6 +33,7 @@
/* FULLSCALE */
#define ST_MAGN_FS_AVL_1300MG 1300
#define ST_MAGN_FS_AVL_1900MG 1900
+#define ST_MAGN_FS_AVL_2000MG 2000
#define ST_MAGN_FS_AVL_2500MG 2500
#define ST_MAGN_FS_AVL_4000MG 4000
#define ST_MAGN_FS_AVL_4700MG 4700
@@ -53,51 +54,95 @@
#define ST_MAGN_3_OUT_Y_L_ADDR 0x6a
#define ST_MAGN_3_OUT_Z_L_ADDR 0x6c
+/* Special L addresses for sensor 4 */
+#define ST_MAGN_4_OUT_X_L_ADDR 0x08
+#define ST_MAGN_4_OUT_Y_L_ADDR 0x0a
+#define ST_MAGN_4_OUT_Z_L_ADDR 0x0c
+
+static const struct iio_mount_matrix *
+st_magn_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ return &mdata->mount_matrix;
+}
+
+static const struct iio_chan_spec_ext_info st_magn_mount_matrix_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, st_magn_get_mount_matrix),
+ { }
+};
+
static const struct iio_chan_spec st_magn_16bit_channels[] = {
- ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_MAGN,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_BE, 16, 16,
- ST_MAGN_DEFAULT_OUT_X_H_ADDR),
- ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ ST_MAGN_DEFAULT_OUT_X_H_ADDR,
+ st_magn_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_MAGN,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_BE, 16, 16,
- ST_MAGN_DEFAULT_OUT_Y_H_ADDR),
- ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ ST_MAGN_DEFAULT_OUT_Y_H_ADDR,
+ st_magn_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_MAGN,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_BE, 16, 16,
- ST_MAGN_DEFAULT_OUT_Z_H_ADDR),
+ ST_MAGN_DEFAULT_OUT_Z_H_ADDR,
+ st_magn_mount_matrix_ext_info),
IIO_CHAN_SOFT_TIMESTAMP(3)
};
static const struct iio_chan_spec st_magn_2_16bit_channels[] = {
- ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_MAGN,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16,
- ST_MAGN_2_OUT_X_L_ADDR),
- ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ ST_MAGN_2_OUT_X_L_ADDR,
+ st_magn_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_MAGN,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16,
- ST_MAGN_2_OUT_Y_L_ADDR),
- ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ ST_MAGN_2_OUT_Y_L_ADDR,
+ st_magn_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_MAGN,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16,
- ST_MAGN_2_OUT_Z_L_ADDR),
+ ST_MAGN_2_OUT_Z_L_ADDR,
+ st_magn_mount_matrix_ext_info),
IIO_CHAN_SOFT_TIMESTAMP(3)
};
static const struct iio_chan_spec st_magn_3_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_MAGN,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16,
+ ST_MAGN_3_OUT_X_L_ADDR,
+ st_magn_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_MAGN,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16,
+ ST_MAGN_3_OUT_Y_L_ADDR,
+ st_magn_mount_matrix_ext_info),
+ ST_SENSORS_LSM_CHANNELS_EXT(IIO_MAGN,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16,
+ ST_MAGN_3_OUT_Z_L_ADDR,
+ st_magn_mount_matrix_ext_info),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct iio_chan_spec st_magn_4_16bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16,
- ST_MAGN_3_OUT_X_L_ADDR),
+ ST_MAGN_4_OUT_X_L_ADDR),
ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16,
- ST_MAGN_3_OUT_Y_L_ADDR),
+ ST_MAGN_4_OUT_Y_L_ADDR),
ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16,
- ST_MAGN_3_OUT_Z_L_ADDR),
+ ST_MAGN_4_OUT_Z_L_ADDR),
IIO_CHAN_SOFT_TIMESTAMP(3)
};
@@ -381,6 +426,87 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.multi_read_bit = false,
.bootime = 2,
},
+ {
+ .wai = 0x49,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LSM9DS0_IMU_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_magn_4_16bit_channels,
+ .odr = {
+ .addr = 0x24,
+ .mask = GENMASK(4, 2),
+ .odr_avl = {
+ { 3, 0x00, },
+ { 6, 0x01, },
+ { 12, 0x02, },
+ { 25, 0x03, },
+ { 50, 0x04, },
+ { 100, 0x05, },
+ },
+ },
+ .pw = {
+ .addr = 0x26,
+ .mask = GENMASK(1, 0),
+ .value_on = 0x00,
+ .value_off = 0x03,
+ },
+ .fs = {
+ .addr = 0x25,
+ .mask = GENMASK(6, 5),
+ .fs_avl = {
+ [0] = {
+ .num = ST_MAGN_FS_AVL_2000MG,
+ .value = 0x00,
+ .gain = 73,
+ },
+ [1] = {
+ .num = ST_MAGN_FS_AVL_4000MG,
+ .value = 0x01,
+ .gain = 146,
+ },
+ [2] = {
+ .num = ST_MAGN_FS_AVL_8000MG,
+ .value = 0x02,
+ .gain = 292,
+ },
+ [3] = {
+ .num = ST_MAGN_FS_AVL_12000MG,
+ .value = 0x03,
+ .gain = 438,
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x20,
+ .mask = BIT(3),
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x22,
+ .mask = BIT(1),
+ },
+ .int2 = {
+ .addr = 0x23,
+ .mask = BIT(2),
+ },
+ .stat_drdy = {
+ .addr = 0x07,
+ .mask = GENMASK(2, 0),
+ },
+ },
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(0),
+ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
+};
+
+/* Default magn DRDY is available on INT2 pin */
+static const struct st_sensors_platform_data default_magn_pdata = {
+ .drdy_int_pin = 2,
};
static int st_magn_read_raw(struct iio_dev *indio_dev,
@@ -490,33 +616,37 @@ EXPORT_SYMBOL(st_magn_get_settings);
int st_magn_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *mdata = iio_priv(indio_dev);
+ struct st_sensors_platform_data *pdata = dev_get_platdata(mdata->dev);
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &magn_info;
- err = st_sensors_power_enable(indio_dev);
- if (err)
- return err;
-
err = st_sensors_verify_id(indio_dev);
if (err < 0)
- goto st_magn_power_off;
+ return err;
mdata->num_data_channels = ST_MAGN_NUMBER_DATA_CHANNELS;
indio_dev->channels = mdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
+ err = iio_read_mount_matrix(mdata->dev, &mdata->mount_matrix);
+ if (err)
+ return err;
+
mdata->current_fullscale = &mdata->sensor_settings->fs.fs_avl[0];
mdata->odr = mdata->sensor_settings->odr.odr_avl[0].hz;
- err = st_sensors_init_sensor(indio_dev, NULL);
+ if (!pdata)
+ pdata = (struct st_sensors_platform_data *)&default_magn_pdata;
+
+ err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
- goto st_magn_power_off;
+ return err;
err = st_magn_allocate_ring(indio_dev);
if (err < 0)
- goto st_magn_power_off;
+ return err;
if (mdata->irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
@@ -539,9 +669,6 @@ st_magn_device_register_error:
st_sensors_deallocate_trigger(indio_dev);
st_magn_probe_trigger_error:
st_magn_deallocate_ring(indio_dev);
-st_magn_power_off:
- st_sensors_power_disable(indio_dev);
-
return err;
}
EXPORT_SYMBOL(st_magn_common_probe);
@@ -550,8 +677,6 @@ void st_magn_common_remove(struct iio_dev *indio_dev)
{
struct st_sensor_data *mdata = iio_priv(indio_dev);
- st_sensors_power_disable(indio_dev);
-
iio_device_unregister(indio_dev);
if (mdata->irq > 0)
st_sensors_deallocate_trigger(indio_dev);
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index 36f4e7b53b24..3e23c117de8e 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -82,16 +82,28 @@ static int st_magn_i2c_probe(struct i2c_client *client,
if (err < 0)
return err;
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
+
err = st_magn_common_probe(indio_dev);
if (err < 0)
- return err;
+ goto st_magn_power_off;
return 0;
+
+st_magn_power_off:
+ st_sensors_power_disable(indio_dev);
+
+ return err;
}
static int st_magn_i2c_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ st_sensors_power_disable(indio_dev);
+
st_magn_common_remove(indio_dev);
return 0;
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 0e2323dfc687..03c0a737aba6 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -76,16 +76,28 @@ static int st_magn_spi_probe(struct spi_device *spi)
if (err < 0)
return err;
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
+
err = st_magn_common_probe(indio_dev);
if (err < 0)
- return err;
+ goto st_magn_power_off;
return 0;
+
+st_magn_power_off:
+ st_sensors_power_disable(indio_dev);
+
+ return err;
}
static int st_magn_spi_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ st_sensors_power_disable(indio_dev);
+
st_magn_common_remove(indio_dev);
return 0;
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index 2f2f8cb3c26c..9ff7b0e56cf6 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -831,7 +831,7 @@ static int yas5xx_probe(struct i2c_client *i2c,
yas5xx->dev = dev;
mutex_init(&yas5xx->lock);
- ret = iio_read_mount_matrix(dev, "mount-matrix", &yas5xx->orientation);
+ ret = iio_read_mount_matrix(dev, &yas5xx->orientation);
if (ret)
return ret;
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index 7af48d336285..c0079e2c8807 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -7,13 +7,10 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
-#include <linux/delay.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
@@ -425,3 +422,4 @@ module_platform_driver(hid_incl_3d_platform_driver);
MODULE_DESCRIPTION("HID Sensor Inclinometer 3D");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HID);
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index cf7f57a47681..a033699910e8 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -7,9 +7,7 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -373,3 +371,4 @@ module_platform_driver(hid_dev_rot_platform_driver);
MODULE_DESCRIPTION("HID Sensor Device Rotation");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HID);
diff --git a/drivers/iio/position/hid-sensor-custom-intel-hinge.c b/drivers/iio/position/hid-sensor-custom-intel-hinge.c
index fd77e7ee87f3..07c30d217255 100644
--- a/drivers/iio/position/hid-sensor-custom-intel-hinge.c
+++ b/drivers/iio/position/hid-sensor-custom-intel-hinge.c
@@ -7,6 +7,8 @@
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
@@ -303,7 +305,6 @@ static int hid_hinge_probe(struct platform_device *pdev)
return ret;
}
- indio_dev->dev.parent = &pdev->dev;
indio_dev->info = &hinge_info;
indio_dev->name = "hinge";
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -377,3 +378,4 @@ module_platform_driver(hid_hinge_platform_driver);
MODULE_DESCRIPTION("HID Sensor INTEL Hinge");
MODULE_AUTHOR("Ye Xiang <xiang.ye@intel.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HID);
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index 8a9c576616ee..ed30bdaa10ec 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -71,8 +71,8 @@ struct lmp91000_data {
struct completion completion;
u8 chan_select;
-
- u32 buffer[4]; /* 64-bit data + 64-bit timestamp */
+ /* 64-bit data + 64-bit naturally aligned timestamp */
+ u32 buffer[4] __aligned(8);
};
static const struct iio_chan_spec lmp91000_channels[] = {
@@ -323,7 +323,8 @@ static int lmp91000_probe(struct i2c_client *client,
}
data->trig = devm_iio_trigger_alloc(dev, "%s-mux%d",
- indio_dev->name, indio_dev->id);
+ indio_dev->name,
+ iio_device_id(indio_dev));
if (!data->trig) {
dev_err(dev, "cannot allocate iio trigger.\n");
return -ENOMEM;
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index c416d261e3e3..10c52b8df2ba 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -6,13 +6,10 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
-#include <linux/delay.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
@@ -357,3 +354,4 @@ module_platform_driver(hid_press_platform_driver);
MODULE_DESCRIPTION("HID Sensor Pressure");
MODULE_AUTHOR("Archana Patni <archana.patni@intel.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HID);
diff --git a/drivers/iio/pressure/icp10100.c b/drivers/iio/pressure/icp10100.c
index 48759fc4bf18..af4621eaa6b5 100644
--- a/drivers/iio/pressure/icp10100.c
+++ b/drivers/iio/pressure/icp10100.c
@@ -250,7 +250,9 @@ static int icp10100_get_measures(struct icp10100_state *st,
__be16 measures[3];
int ret;
- pm_runtime_get_sync(&st->client->dev);
+ ret = pm_runtime_resume_and_get(&st->client->dev);
+ if (ret < 0)
+ return ret;
mutex_lock(&st->lock);
cmd = &icp10100_cmd_measure[st->mode];
@@ -525,7 +527,6 @@ static void icp10100_pm_disable(void *data)
{
struct device *dev = data;
- pm_runtime_put_sync_suspend(dev);
pm_runtime_disable(dev);
}
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index 5c746ff6087e..9417b3bd7513 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -41,10 +41,6 @@ static __maybe_unused const struct st_sensors_platform_data default_press_pdata
.drdy_int_pin = 1,
};
-const struct st_sensor_settings *st_press_get_settings(const char *name);
-int st_press_common_probe(struct iio_dev *indio_dev);
-void st_press_common_remove(struct iio_dev *indio_dev);
-
#ifdef CONFIG_IIO_BUFFER
int st_press_allocate_ring(struct iio_dev *indio_dev);
void st_press_deallocate_ring(struct iio_dev *indio_dev);
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 789a2928504a..7912b5a68395 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -689,13 +689,9 @@ int st_press_common_probe(struct iio_dev *indio_dev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &press_info;
- err = st_sensors_power_enable(indio_dev);
- if (err)
- return err;
-
err = st_sensors_verify_id(indio_dev);
if (err < 0)
- goto st_press_power_off;
+ return err;
/*
* Skip timestamping channel while declaring available channels to
@@ -718,11 +714,11 @@ int st_press_common_probe(struct iio_dev *indio_dev)
err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
- goto st_press_power_off;
+ return err;
err = st_press_allocate_ring(indio_dev);
if (err < 0)
- goto st_press_power_off;
+ return err;
if (press_data->irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
@@ -745,9 +741,6 @@ st_press_device_register_error:
st_sensors_deallocate_trigger(indio_dev);
st_press_probe_trigger_error:
st_press_deallocate_ring(indio_dev);
-st_press_power_off:
- st_sensors_power_disable(indio_dev);
-
return err;
}
EXPORT_SYMBOL(st_press_common_probe);
@@ -756,8 +749,6 @@ void st_press_common_remove(struct iio_dev *indio_dev)
{
struct st_sensor_data *press_data = iio_priv(indio_dev);
- st_sensors_power_disable(indio_dev);
-
iio_device_unregister(indio_dev);
if (press_data->irq > 0)
st_sensors_deallocate_trigger(indio_dev);
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 09c6903f99b8..f0a5af314ceb 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -98,16 +98,29 @@ static int st_press_i2c_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ ret = st_sensors_power_enable(indio_dev);
+ if (ret)
+ return ret;
+
ret = st_press_common_probe(indio_dev);
if (ret < 0)
- return ret;
+ goto st_press_power_off;
return 0;
+
+st_press_power_off:
+ st_sensors_power_disable(indio_dev);
+
+ return ret;
}
static int st_press_i2c_remove(struct i2c_client *client)
{
- st_press_common_remove(i2c_get_clientdata(client));
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ st_sensors_power_disable(indio_dev);
+
+ st_press_common_remove(indio_dev);
return 0;
}
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index b5ee3ec2764f..b48cf7d01cd7 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -82,16 +82,29 @@ static int st_press_spi_probe(struct spi_device *spi)
if (err < 0)
return err;
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
+
err = st_press_common_probe(indio_dev);
if (err < 0)
- return err;
+ goto st_press_power_off;
return 0;
+
+st_press_power_off:
+ st_sensors_power_disable(indio_dev);
+
+ return err;
}
static int st_press_spi_remove(struct spi_device *spi)
{
- st_press_common_remove(spi_get_drvdata(spi));
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ st_sensors_power_disable(indio_dev);
+
+ st_press_common_remove(indio_dev);
return 0;
}
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index a93411216aee..89295c90f801 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -1408,7 +1408,8 @@ static int zpa2326_init_managed_trigger(struct device *parent,
return 0;
trigger = devm_iio_trigger_alloc(parent, "%s-dev%d",
- indio_dev->name, indio_dev->id);
+ indio_dev->name,
+ iio_device_id(indio_dev));
if (!trigger)
return -ENOMEM;
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index edc4a35ae66d..3797a8f54276 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -59,7 +59,11 @@ struct as3935_state {
unsigned long noise_tripped;
u32 tune_cap;
u32 nflwdth_reg;
- u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u8 chan;
+ s64 timestamp __aligned(8);
+ } scan;
u8 buf[2] ____cacheline_aligned;
};
@@ -225,8 +229,8 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
if (ret)
goto err_read;
- st->buffer[0] = val & AS3935_DATA_MASK;
- iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
+ st->scan.chan = val & AS3935_DATA_MASK;
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->scan,
iio_get_time_ns(indio_dev));
err_read:
iio_trigger_notify_done(indio_dev->trig);
@@ -404,7 +408,8 @@ static int as3935_probe(struct spi_device *spi)
indio_dev->info = &as3935_info;
trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
- indio_dev->name, indio_dev->id);
+ indio_dev->name,
+ iio_device_id(indio_dev));
if (!trig)
return -ENOMEM;
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
index 90e76451c972..5b6ea783795d 100644
--- a/drivers/iio/proximity/isl29501.c
+++ b/drivers/iio/proximity/isl29501.c
@@ -938,7 +938,7 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct isl29501_private *isl29501 = iio_priv(indio_dev);
const unsigned long *active_mask = indio_dev->active_scan_mask;
- u32 buffer[4] = {}; /* 1x16-bit + ts */
+ u32 buffer[4] __aligned(8) = {}; /* 1x16-bit + naturally aligned ts */
if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask))
isl29501_register_read(isl29501, REG_DISTANCE, buffer);
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index cc206bfa09c7..27026c060ab9 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -44,7 +44,11 @@ struct lidar_data {
int (*xfer)(struct lidar_data *data, u8 reg, u8 *val, int len);
int i2c_enabled;
- u16 buffer[8]; /* 2 byte distance + 8 byte timestamp */
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u16 chan;
+ s64 timestamp __aligned(8);
+ } scan;
};
static const struct iio_chan_spec lidar_channels[] = {
@@ -154,7 +158,9 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
int tries = 10;
int ret;
- pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
+ return ret;
/* start sample */
ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
@@ -230,9 +236,9 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
struct lidar_data *data = iio_priv(indio_dev);
int ret;
- ret = lidar_get_measurement(data, data->buffer);
+ ret = lidar_get_measurement(data, &data->scan.chan);
if (!ret) {
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
} else if (ret != -EINVAL) {
dev_err(&data->client->dev, "cannot read LIDAR measurement");
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index 420c37c72de4..fe88b2bb60bc 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -100,9 +100,11 @@ static int srf04_read(struct srf04_data *data)
u64 dt_ns;
u32 time_ns, distance_mm;
- if (data->gpiod_power)
- pm_runtime_get_sync(data->dev);
-
+ if (data->gpiod_power) {
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
+ return ret;
+ }
/*
* just one read-echo-cycle can take place at a time
* ==> lock against concurrent reading calls
diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
index 70beac5c9c1d..9b0886760f76 100644
--- a/drivers/iio/proximity/srf08.c
+++ b/drivers/iio/proximity/srf08.c
@@ -63,11 +63,11 @@ struct srf08_data {
int range_mm;
struct mutex lock;
- /*
- * triggered buffer
- * 1x16-bit channel + 3x16 padding + 4x16 timestamp
- */
- s16 buffer[8];
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ s16 chan;
+ s64 timestamp __aligned(8);
+ } scan;
/* Sensor-Type */
enum srf08_sensor_type sensor_type;
@@ -190,9 +190,9 @@ static irqreturn_t srf08_trigger_handler(int irq, void *p)
mutex_lock(&data->lock);
- data->buffer[0] = sensor_data;
+ data->scan.chan = sensor_data;
iio_push_to_buffers_with_timestamp(indio_dev,
- data->buffer, pf->timestamp);
+ &data->scan, pf->timestamp);
mutex_unlock(&data->lock);
err:
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index 327ebb7ddbb9..175f3b7c61d7 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -1473,7 +1473,7 @@ static int sx9310_probe(struct i2c_client *client)
data->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
indio_dev->name,
- indio_dev->id);
+ iio_device_id(indio_dev));
if (!data->trig)
return -ENOMEM;
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index a87f4a8e4327..3e4ddb2e8c2b 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -946,7 +946,7 @@ static int sx9500_probe(struct i2c_client *client,
return ret;
data->trig = devm_iio_trigger_alloc(&client->dev,
- "%s-dev%d", indio_dev->name, indio_dev->id);
+ "%s-dev%d", indio_dev->name, iio_device_id(indio_dev));
if (!data->trig)
return -ENOMEM;
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index 4df60082c1fa..f20ae3c963cb 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -96,6 +96,16 @@ config TMP007
This driver can also be built as a module. If so, the module will
be called tmp007.
+config TMP117
+ tristate "TMP117 Digital temperature sensor with integrated NV memory"
+ depends on I2C
+ help
+ If you say yes here you get support for the Texas Instruments
+ TMP117 Digital temperature sensor with integrated NV memory.
+
+ This driver can also be built as a module. If so, the module will
+ be called tmp117.
+
config TSYS01
tristate "Measurement Specialties TSYS01 temperature sensor using I2C bus connection"
depends on I2C
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
index 90c113115422..e3392c4b29b4 100644
--- a/drivers/iio/temperature/Makefile
+++ b/drivers/iio/temperature/Makefile
@@ -12,5 +12,6 @@ obj-$(CONFIG_MLX90614) += mlx90614.o
obj-$(CONFIG_MLX90632) += mlx90632.o
obj-$(CONFIG_TMP006) += tmp006.o
obj-$(CONFIG_TMP007) += tmp007.o
+obj-$(CONFIG_TMP117) += tmp117.o
obj-$(CONFIG_TSYS01) += tsys01.o
obj-$(CONFIG_TSYS02D) += tsys02d.o
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
index dc534ed784c3..d40f235af1d4 100644
--- a/drivers/iio/temperature/hid-sensor-temperature.c
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -8,6 +8,7 @@
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
@@ -291,3 +292,4 @@ module_platform_driver(hid_temperature_platform_driver);
MODULE_DESCRIPTION("HID Environmental temperature sensor");
MODULE_AUTHOR("Song Hongyan <hongyan.song@intel.com>");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_HID);
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index ef0fec94d269..afcb10ea7c44 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -176,11 +176,14 @@ static inline s32 mlx90614_iir_search(const struct i2c_client *client,
static int mlx90614_power_get(struct mlx90614_data *data, bool startup)
{
unsigned long now;
+ int ret;
if (!data->wakeup_gpio)
return 0;
- pm_runtime_get_sync(&data->client->dev);
+ ret = pm_runtime_resume_and_get(&data->client->dev);
+ if (ret < 0)
+ return ret;
if (startup) {
now = jiffies;
@@ -267,7 +270,10 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
*val = MLX90614_CONST_SCALE;
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBEMISSIVITY: /* 1/65535 / LSB */
- mlx90614_power_get(data, false);
+ ret = mlx90614_power_get(data, false);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&data->lock);
ret = i2c_smbus_read_word_data(data->client,
MLX90614_EMISSIVITY);
@@ -287,7 +293,10 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: /* IIR setting with
FIR = 1024 */
- mlx90614_power_get(data, false);
+ ret = mlx90614_power_get(data, false);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&data->lock);
ret = i2c_smbus_read_word_data(data->client, MLX90614_CONFIG);
mutex_unlock(&data->lock);
@@ -319,7 +328,10 @@ static int mlx90614_write_raw(struct iio_dev *indio_dev,
val = val * MLX90614_CONST_RAW_EMISSIVITY_MAX +
val2 / MLX90614_CONST_EMISSIVITY_RESOLUTION;
- mlx90614_power_get(data, false);
+ ret = mlx90614_power_get(data, false);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&data->lock);
ret = mlx90614_write_word(data->client, MLX90614_EMISSIVITY,
val);
@@ -331,7 +343,10 @@ static int mlx90614_write_raw(struct iio_dev *indio_dev,
if (val < 0 || val2 < 0)
return -EINVAL;
- mlx90614_power_get(data, false);
+ ret = mlx90614_power_get(data, false);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&data->lock);
ret = mlx90614_iir_search(data->client,
val * 100 + val2 / 10000);
diff --git a/drivers/iio/temperature/tmp117.c b/drivers/iio/temperature/tmp117.c
new file mode 100644
index 000000000000..f9b8f2b570f6
--- /dev/null
+++ b/drivers/iio/temperature/tmp117.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Digital temperature sensor with integrated Non-volatile memory
+ * Copyright (c) 2021 Puranjay Mohan <puranjay12@gmail.com>
+ *
+ * Driver for the Texas Instruments TMP117 Temperature Sensor
+ * (7-bit I2C slave address (0x48 - 0x4B), changeable via ADD pins)
+ *
+ * Note: This driver assumes that the sensor has been calibrated beforehand.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+
+#include <linux/iio/iio.h>
+
+#define TMP117_REG_TEMP 0x0
+#define TMP117_REG_CFGR 0x1
+#define TMP117_REG_HIGH_LIM 0x2
+#define TMP117_REG_LOW_LIM 0x3
+#define TMP117_REG_EEPROM_UL 0x4
+#define TMP117_REG_EEPROM1 0x5
+#define TMP117_REG_EEPROM2 0x6
+#define TMP117_REG_TEMP_OFFSET 0x7
+#define TMP117_REG_EEPROM3 0x8
+#define TMP117_REG_DEVICE_ID 0xF
+
+#define TMP117_RESOLUTION_10UC 78125
+#define TMP117_DEVICE_ID 0x0117
+#define MICRODEGREE_PER_10MILLIDEGREE 10000
+
+struct tmp117_data {
+ struct i2c_client *client;
+ s16 calibbias;
+};
+
+static int tmp117_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *val,
+ int *val2, long mask)
+{
+ struct tmp117_data *data = iio_priv(indio_dev);
+ s32 ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_word_swapped(data->client,
+ TMP117_REG_TEMP);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, 15);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = i2c_smbus_read_word_swapped(data->client,
+ TMP117_REG_TEMP_OFFSET);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, 15);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * Conversion from 10s of uC to mC
+ * as IIO reports temperature in mC
+ */
+ *val = TMP117_RESOLUTION_10UC / MICRODEGREE_PER_10MILLIDEGREE;
+ *val2 = (TMP117_RESOLUTION_10UC %
+ MICRODEGREE_PER_10MILLIDEGREE) * 100;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tmp117_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int val,
+ int val2, long mask)
+{
+ struct tmp117_data *data = iio_priv(indio_dev);
+ s16 off;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ off = clamp_t(int, val, S16_MIN, S16_MAX);
+ if (off == data->calibbias)
+ return 0;
+ data->calibbias = off;
+ return i2c_smbus_write_word_swapped(data->client,
+ TMP117_REG_TEMP_OFFSET, off);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec tmp117_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static const struct iio_info tmp117_info = {
+ .read_raw = tmp117_read_raw,
+ .write_raw = tmp117_write_raw,
+};
+
+static int tmp117_identify(struct i2c_client *client)
+{
+ int dev_id;
+
+ dev_id = i2c_smbus_read_word_swapped(client, TMP117_REG_DEVICE_ID);
+ if (dev_id < 0)
+ return dev_id;
+ if (dev_id != TMP117_DEVICE_ID) {
+ dev_err(&client->dev, "TMP117 not found\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int tmp117_probe(struct i2c_client *client)
+{
+ struct tmp117_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -EOPNOTSUPP;
+
+ ret = tmp117_identify(client);
+ if (ret < 0)
+ return ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->calibbias = 0;
+
+ indio_dev->name = "tmp117";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &tmp117_info;
+
+ indio_dev->channels = tmp117_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tmp117_channels);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct of_device_id tmp117_of_match[] = {
+ { .compatible = "ti,tmp117", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tmp117_of_match);
+
+static const struct i2c_device_id tmp117_id[] = {
+ { "tmp117", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp117_id);
+
+static struct i2c_driver tmp117_driver = {
+ .driver = {
+ .name = "tmp117",
+ .of_match_table = tmp117_of_match,
+ },
+ .probe_new = tmp117_probe,
+ .id_table = tmp117_id,
+};
+module_i2c_driver(tmp117_driver);
+
+MODULE_AUTHOR("Puranjay Mohan <puranjay12@gmail.com>");
+MODULE_DESCRIPTION("TI TMP117 Temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/test/iio-test-format.c b/drivers/iio/test/iio-test-format.c
index 55a0cfe9181d..f1e951eddb43 100644
--- a/drivers/iio/test/iio-test-format.c
+++ b/drivers/iio/test/iio-test-format.c
@@ -8,7 +8,7 @@
#include <linux/iio/iio.h>
#define IIO_TEST_FORMAT_EXPECT_EQ(_test, _buf, _ret, _val) do { \
- KUNIT_EXPECT_EQ(_test, (int)strlen(_buf), _ret); \
+ KUNIT_EXPECT_EQ(_test, strlen(_buf), _ret); \
KUNIT_EXPECT_STREQ(_test, (_buf), (_val)); \
} while (0)
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index 3aa9e8bba005..33083877cd19 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -296,7 +296,7 @@ static ssize_t stm32_tt_show_master_mode(struct device *dev,
else
cr2 = (cr2 & TIM_CR2_MMS) >> TIM_CR2_MMS_SHIFT;
- return snprintf(buf, PAGE_SIZE, "%s\n", master_mode_table[cr2]);
+ return sysfs_emit(buf, "%s\n", master_mode_table[cr2]);
}
static ssize_t stm32_tt_store_master_mode(struct device *dev,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 515a7e95a421..5d3b8b8d163d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -926,12 +926,25 @@ static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
return ret;
}
+static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+ int qp_attr_mask, ret;
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
+ if (ret)
+ return ret;
+
+ return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+}
+
int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr)
{
struct rdma_id_private *id_priv;
struct ib_qp *qp;
- int ret = 0;
+ int ret;
id_priv = container_of(id, struct rdma_id_private, id);
if (id->device != pd->device) {
@@ -948,6 +961,8 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
if (id->qp_type == IB_QPT_UD)
ret = cma_init_ud_qp(id_priv, qp);
+ else
+ ret = cma_init_conn_qp(id_priv, qp);
if (ret)
goto out_destroy;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index d5674026512a..a8688a92c760 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -120,6 +120,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
if (!chip_ctx)
return -ENOMEM;
chip_ctx->chip_num = bp->chip_num;
+ chip_ctx->hw_stats_size = bp->hw_ring_stats_size;
rdev->chip_ctx = chip_ctx;
/* rest members to follow eventually */
@@ -550,6 +551,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
dma_addr_t dma_map,
u32 *fw_stats_ctx_id)
{
+ struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
struct hwrm_stat_ctx_alloc_output resp = {0};
struct hwrm_stat_ctx_alloc_input req = {0};
struct bnxt_en_dev *en_dev = rdev->en_dev;
@@ -566,7 +568,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000);
req.stats_dma_addr = cpu_to_le64(dma_map);
- req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext));
+ req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 17f0701b3cee..44282a8cdd4f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -56,6 +56,7 @@
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats);
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
struct bnxt_qplib_stats *stats);
/* PBL */
@@ -559,7 +560,7 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
goto fail;
stats_alloc:
/* Stats */
- rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats);
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
if (rc)
goto fail;
@@ -889,15 +890,12 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
}
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
struct bnxt_qplib_stats *stats)
{
memset(stats, 0, sizeof(*stats));
stats->fw_id = -1;
- /* 128 byte aligned context memory is required only for 57500.
- * However making this unconditional, it does not harm previous
- * generation.
- */
- stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
+ stats->size = cctx->hw_stats_size;
stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
&stats->dma_map, GFP_KERNEL);
if (!stats->dma) {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index c291f495ae91..91031502e8f5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -54,6 +54,7 @@ struct bnxt_qplib_chip_ctx {
u16 chip_num;
u8 chip_rev;
u8 chip_metal;
+ u16 hw_stats_size;
struct bnxt_qplib_drv_modes modes;
};
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 6c8c910f4e86..c7e8d7b3baa1 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -967,6 +967,12 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return !err || err == -ENODATA ? npolled : err;
}
+void c4iw_cq_rem_ref(struct c4iw_cq *chp)
+{
+ if (refcount_dec_and_test(&chp->refcnt))
+ complete(&chp->cq_rel_comp);
+}
+
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct c4iw_cq *chp;
@@ -976,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
chp = to_c4iw_cq(ib_cq);
xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
- refcount_dec(&chp->refcnt);
- wait_event(chp->wait, !refcount_read(&chp->refcnt));
+ c4iw_cq_rem_ref(chp);
+ wait_for_completion(&chp->cq_rel_comp);
ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
ibucontext);
@@ -1081,7 +1087,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
spin_lock_init(&chp->lock);
spin_lock_init(&chp->comp_handler_lock);
refcount_set(&chp->refcnt, 1);
- init_waitqueue_head(&chp->wait);
+ init_completion(&chp->cq_rel_comp);
ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
if (ret)
goto err_destroy_cq;
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 7798d090888b..34211a533d5c 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -213,8 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
break;
}
done:
- if (refcount_dec_and_test(&chp->refcnt))
- wake_up(&chp->wait);
+ c4iw_cq_rem_ref(chp);
c4iw_qp_rem_ref(&qhp->ibqp);
out:
return;
@@ -234,8 +233,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
- if (refcount_dec_and_test(&chp->refcnt))
- wake_up(&chp->wait);
+ c4iw_cq_rem_ref(chp);
} else {
pr_debug("unknown cqid 0x%x\n", qid);
xa_unlock_irqrestore(&dev->cqs, flag);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 3883af3d2312..ac5f581aff4c 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -428,7 +428,7 @@ struct c4iw_cq {
spinlock_t lock;
spinlock_t comp_handler_lock;
refcount_t refcnt;
- wait_queue_head_t wait;
+ struct completion cq_rel_comp;
struct c4iw_wr_wait *wr_waitp;
};
@@ -979,6 +979,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+void c4iw_cq_rem_ref(struct c4iw_cq *chp);
int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
diff --git a/drivers/infiniband/hw/hfi1/trace_misc.h b/drivers/infiniband/hw/hfi1/trace_misc.h
index 8db2253523ff..93338988b922 100644
--- a/drivers/infiniband/hw/hfi1/trace_misc.h
+++ b/drivers/infiniband/hw/hfi1/trace_misc.h
@@ -63,7 +63,7 @@ TRACE_EVENT(hfi1_interrupt,
__array(char, buf, 64)
__field(int, src)
),
- TP_fast_assign(DD_DEV_ASSIGN(dd)
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
is_entry->is_name(__entry->buf, 64,
src - is_entry->start);
__entry->src = src;
@@ -100,7 +100,7 @@ TRACE_EVENT(hfi1_fault_opcode,
__field(u32, qpn)
__field(u8, opcode)
),
- TP_fast_assign(DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ TP_fast_assign(DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->opcode = opcode;
),
diff --git a/drivers/infiniband/hw/hfi1/trace_rc.h b/drivers/infiniband/hw/hfi1/trace_rc.h
index 1ebca37862e0..5f49e1eeb211 100644
--- a/drivers/infiniband/hw/hfi1/trace_rc.h
+++ b/drivers/infiniband/hw/hfi1/trace_rc.h
@@ -70,7 +70,7 @@ DECLARE_EVENT_CLASS(hfi1_rc_template,
__field(u32, r_psn)
),
TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->s_flags = qp->s_flags;
__entry->psn = psn;
@@ -130,7 +130,7 @@ DECLARE_EVENT_CLASS(/* rc_ack */
__field(u32, lpsn)
),
TP_fast_assign(/* assign */
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->aeth = aeth;
__entry->psn = psn;
diff --git a/drivers/infiniband/hw/hfi1/trace_tid.h b/drivers/infiniband/hw/hfi1/trace_tid.h
index 985ffa9cc958..d129b8195959 100644
--- a/drivers/infiniband/hw/hfi1/trace_tid.h
+++ b/drivers/infiniband/hw/hfi1/trace_tid.h
@@ -886,7 +886,7 @@ DECLARE_EVENT_CLASS(/* sender_info */
__field(u8, s_retry)
),
TP_fast_assign(/* assign */
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->state = qp->state;
__entry->s_cur = qp->s_cur;
@@ -1285,7 +1285,7 @@ DECLARE_EVENT_CLASS(/* rc_rcv_err */
__field(int, diff)
),
TP_fast_assign(/* assign */
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->s_flags = qp->s_flags;
__entry->state = qp->state;
@@ -1574,7 +1574,7 @@ DECLARE_EVENT_CLASS(/* tid_ack */
__field(u32, resync_psn)
),
TP_fast_assign(/* assign */
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->aeth = aeth;
__entry->psn = psn;
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
index d44fc54858b9..f1922a7619fe 100644
--- a/drivers/infiniband/hw/hfi1/trace_tx.h
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -120,7 +120,7 @@ DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
__field(unsigned long, iow_flags)
),
TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->flags = flags;
__entry->qpn = qp->ibqp.qp_num;
__entry->s_flags = qp->s_flags;
@@ -868,7 +868,7 @@ TRACE_EVENT(
__field(int, send_flags)
),
TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->wqe = wqe;
__entry->wr_id = wqe->wr.wr_id;
__entry->qpn = qp->ibqp.qp_num;
@@ -904,7 +904,7 @@ DECLARE_EVENT_CLASS(
__field(bool, flag)
),
TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->flag = flag;
),
@@ -952,7 +952,7 @@ DECLARE_EVENT_CLASS(/* AIP */
__field(u8, stopped)
),
TP_fast_assign(/* assign */
- DD_DEV_ASSIGN(txq->priv->dd)
+ DD_DEV_ASSIGN(txq->priv->dd);
__entry->txq = txq;
__entry->sde = txq->sde;
__entry->head = txq->tx_ring.head;
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 8f68cc3ff193..84f3f2b5f097 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -213,8 +213,10 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
hr_cmd->context =
kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL);
- if (!hr_cmd->context)
+ if (!hr_cmd->context) {
+ hr_dev->cmd_mod = 0;
return -ENOMEM;
+ }
for (i = 0; i < hr_cmd->max_cmds; ++i) {
hr_cmd->context[i].token = i;
@@ -228,7 +230,6 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
spin_lock_init(&hr_cmd->context_lock);
hr_cmd->use_events = 1;
- down(&hr_cmd->poll_sem);
return 0;
}
@@ -239,8 +240,6 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
kfree(hr_cmd->context);
hr_cmd->use_events = 0;
-
- up(&hr_cmd->poll_sem);
}
struct hns_roce_cmd_mailbox *
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 078a97193f0e..cc6eab14a222 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -873,11 +873,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
if (hr_dev->cmd_mod) {
ret = hns_roce_cmd_use_events(hr_dev);
- if (ret) {
+ if (ret)
dev_warn(dev,
"Cmd event mode failed, set back to poll!\n");
- hns_roce_cmd_use_polling(hr_dev);
- }
}
ret = hns_roce_init_hem(hr_dev);
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index b1023a7d0bd1..f1e5515256e0 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -2845,7 +2845,7 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
* parses fpm commit info and copy base value
* of hmc objects in hmc_info
*/
-static enum irdma_status_code
+static void
irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
struct irdma_hmc_obj_info *info, u32 *sd)
{
@@ -2915,7 +2915,6 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
else
*sd = (u32)(size >> 21);
- return 0;
}
/**
@@ -4187,11 +4186,9 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
* @dev: sc device struct
* @count: allocate count
*/
-enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
{
writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
-
- return 0;
}
/**
@@ -4434,9 +4431,9 @@ static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev,
ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
&commit_fpm_mem, true, wait_type);
if (!ret_code)
- ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
- hmc_info->hmc_obj,
- &hmc_info->sd_table.sd_cnt);
+ irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
+ hmc_info->hmc_obj,
+ &hmc_info->sd_table.sd_cnt);
print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
false);
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 7afb8a6a0526..00de5ee9a260 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -1920,7 +1920,7 @@ enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf)
* irdma_set_hw_rsrc - set hw memory resources.
* @rf: RDMA PCI function
*/
-static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
+static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
{
rf->allocated_qps = (void *)(rf->mem_rsrc +
(sizeof(struct irdma_arp_entry) * rf->arp_table_size));
@@ -1937,8 +1937,6 @@ static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
spin_lock_init(&rf->arp_lock);
spin_lock_init(&rf->qptable_lock);
spin_lock_init(&rf->qh_list_lock);
-
- return 0;
}
/**
@@ -2000,9 +1998,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
- ret = irdma_set_hw_rsrc(rf);
- if (ret)
- goto set_hw_rsrc_fail;
+ irdma_set_hw_rsrc(rf);
set_bit(0, rf->allocated_mrs);
set_bit(0, rf->allocated_qps);
@@ -2025,9 +2021,6 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
return 0;
-set_hw_rsrc_fail:
- kfree(rf->mem_rsrc);
- rf->mem_rsrc = NULL;
mem_rsrc_kzalloc_fail:
kfree(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index ea59432351fb..51a41359e0b4 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -215,10 +215,10 @@ static void irdma_remove(struct auxiliary_device *aux_dev)
pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
}
-static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf)
+static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf,
+ struct ice_vsi *vsi)
{
struct irdma_pci_f *rf = iwdev->rf;
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
rf->cdev = pf;
rf->gen_ops.register_qset = irdma_lan_register_qset;
@@ -253,12 +253,15 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
struct iidc_auxiliary_dev,
adev);
struct ice_pf *pf = iidc_adev->pf;
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct iidc_qos_params qos_info = {};
struct irdma_device *iwdev;
struct irdma_pci_f *rf;
struct irdma_l2params l2params = {};
int err;
+ if (!vsi)
+ return -EIO;
iwdev = ib_alloc_device(irdma_device, ibdev);
if (!iwdev)
return -ENOMEM;
@@ -268,7 +271,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
return -ENOMEM;
}
- irdma_fill_device_info(iwdev, pf);
+ irdma_fill_device_info(iwdev, pf, vsi);
rf = iwdev->rf;
if (irdma_ctrl_init_hw(rf)) {
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 7387b83e826d..874bc25a938b 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -1222,8 +1222,7 @@ enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
struct irdma_aeq_init_info *info);
enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
struct irdma_aeqe_info *info);
-enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev,
- u32 count);
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
int abi_ver);
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index a6d52c20091c..5fb92de1f015 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -931,7 +931,7 @@ enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
struct irdma_post_rq_info *info)
{
- u32 total_size = 0, wqe_idx, i, byte_off;
+ u32 wqe_idx, i, byte_off;
u32 addl_frag_cnt;
__le64 *wqe;
u64 hdr;
@@ -939,9 +939,6 @@ enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
if (qp->max_rq_frag_cnt < info->num_sges)
return IRDMA_ERR_INVALID_FRAG_COUNT;
- for (i = 0; i < info->num_sges; i++)
- total_size += info->sg_list[i].len;
-
wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
if (!wqe)
return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 9712f6902ba8..717147ed0519 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -557,7 +557,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
* @iwqp: qp ptr
* @init_info: initialize info to return
*/
-static int irdma_setup_virt_qp(struct irdma_device *iwdev,
+static void irdma_setup_virt_qp(struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_qp_init_info *init_info)
{
@@ -574,8 +574,6 @@ static int irdma_setup_virt_qp(struct irdma_device *iwdev,
init_info->sq_pa = qpmr->sq_pbl.addr;
init_info->rq_pa = qpmr->rq_pbl.addr;
}
-
- return 0;
}
/**
@@ -914,7 +912,7 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
}
}
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
- err_code = irdma_setup_virt_qp(iwdev, iwqp, &init_info);
+ irdma_setup_virt_qp(iwdev, iwqp, &init_info);
} else {
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 7abeb576b3c5..b8e5e371bb19 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -945,7 +945,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
u32 *cqb = NULL;
void *cqc;
int cqe_size;
- unsigned int irqn;
int eqn;
int err;
@@ -984,7 +983,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
- err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
+ err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
if (err)
goto err_cqb;
@@ -1007,7 +1006,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_cqb;
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
- cq->mcq.irqn = irqn;
if (udata)
cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
else
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index eb9b0a2707f8..c869b2a91a28 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -975,7 +975,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
struct mlx5_ib_dev *dev;
int user_vector;
int dev_eqn;
- unsigned int irqn;
int err;
if (uverbs_copy_from(&user_vector, attrs,
@@ -987,7 +986,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
return PTR_ERR(c);
dev = to_mdev(c->ibucontext.device);
- err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
+ err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
if (err < 0)
return err;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 3263851ea574..3f1c5a4f158b 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -531,8 +531,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
*/
spin_unlock_irq(&ent->lock);
need_delay = need_resched() || someone_adding(cache) ||
- time_after(jiffies,
- READ_ONCE(cache->last_add) + 300 * HZ);
+ !time_after(jiffies,
+ READ_ONCE(cache->last_add) + 300 * HZ);
spin_lock_irq(&ent->lock);
if (ent->disabled)
goto out;
diff --git a/drivers/infiniband/sw/rdmavt/trace_cq.h b/drivers/infiniband/sw/rdmavt/trace_cq.h
index e3c416c6f900..91bc192cee5e 100644
--- a/drivers/infiniband/sw/rdmavt/trace_cq.h
+++ b/drivers/infiniband/sw/rdmavt/trace_cq.h
@@ -85,7 +85,7 @@ DECLARE_EVENT_CLASS(rvt_cq_template,
__field(int, comp_vector_cpu)
__field(u32, flags)
),
- TP_fast_assign(RDI_DEV_ASSIGN(cq->rdi)
+ TP_fast_assign(RDI_DEV_ASSIGN(cq->rdi);
__entry->ip = cq->ip;
__entry->cqe = attr->cqe;
__entry->comp_vector = attr->comp_vector;
@@ -123,7 +123,7 @@ DECLARE_EVENT_CLASS(
__field(u32, imm)
),
TP_fast_assign(
- RDI_DEV_ASSIGN(cq->rdi)
+ RDI_DEV_ASSIGN(cq->rdi);
__entry->wr_id = wc->wr_id;
__entry->status = wc->status;
__entry->opcode = wc->opcode;
diff --git a/drivers/infiniband/sw/rdmavt/trace_mr.h b/drivers/infiniband/sw/rdmavt/trace_mr.h
index 95b8a0e3b8bd..c5b675ca4fa0 100644
--- a/drivers/infiniband/sw/rdmavt/trace_mr.h
+++ b/drivers/infiniband/sw/rdmavt/trace_mr.h
@@ -195,7 +195,7 @@ TRACE_EVENT(
__field(uint, sg_offset)
),
TP_fast_assign(
- RDI_DEV_ASSIGN(ib_to_rvt(to_imr(ibmr)->mr.pd->device))
+ RDI_DEV_ASSIGN(ib_to_rvt(to_imr(ibmr)->mr.pd->device));
__entry->ibmr_iova = ibmr->iova;
__entry->iova = to_imr(ibmr)->mr.iova;
__entry->user_base = to_imr(ibmr)->mr.user_base;
diff --git a/drivers/infiniband/sw/rdmavt/trace_qp.h b/drivers/infiniband/sw/rdmavt/trace_qp.h
index c32d21cc615e..800cec8bb3c7 100644
--- a/drivers/infiniband/sw/rdmavt/trace_qp.h
+++ b/drivers/infiniband/sw/rdmavt/trace_qp.h
@@ -65,7 +65,7 @@ DECLARE_EVENT_CLASS(rvt_qphash_template,
__field(u32, bucket)
),
TP_fast_assign(
- RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->bucket = bucket;
),
@@ -97,7 +97,7 @@ DECLARE_EVENT_CLASS(
__field(u32, to)
),
TP_fast_assign(
- RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->hrtimer = &qp->s_rnr_timer;
__entry->s_flags = qp->s_flags;
diff --git a/drivers/infiniband/sw/rdmavt/trace_rc.h b/drivers/infiniband/sw/rdmavt/trace_rc.h
index c47357af2099..9de52e138025 100644
--- a/drivers/infiniband/sw/rdmavt/trace_rc.h
+++ b/drivers/infiniband/sw/rdmavt/trace_rc.h
@@ -71,7 +71,7 @@ DECLARE_EVENT_CLASS(rvt_rc_template,
__field(u32, r_psn)
),
TP_fast_assign(
- RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
__entry->qpn = qp->ibqp.qp_num;
__entry->s_flags = qp->s_flags;
__entry->psn = psn;
diff --git a/drivers/infiniband/sw/rdmavt/trace_tx.h b/drivers/infiniband/sw/rdmavt/trace_tx.h
index d963ca755828..cb96be0f8f19 100644
--- a/drivers/infiniband/sw/rdmavt/trace_tx.h
+++ b/drivers/infiniband/sw/rdmavt/trace_tx.h
@@ -111,7 +111,7 @@ TRACE_EVENT(
__field(int, wr_num_sge)
),
TP_fast_assign(
- RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
__entry->wqe = wqe;
__entry->wr_id = wqe->wr.wr_id;
__entry->qpn = qp->ibqp.qp_num;
@@ -170,7 +170,7 @@ TRACE_EVENT(
__field(int, send_flags)
),
TP_fast_assign(
- RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
__entry->wqe = wqe;
__entry->wr_id = wqe->wr.wr_id;
__entry->qpn = qp->ibqp.qp_num;
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 6aabcb4de235..be4bcb420fab 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int num_buf;
void *vaddr;
int err;
+ int i;
umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
- pr_warn("err %d from rxe_umem_get\n",
- (int)PTR_ERR(umem));
+ pr_warn("%s: Unable to pin memory region err = %d\n",
+ __func__, (int)PTR_ERR(umem));
err = PTR_ERR(umem);
- goto err1;
+ goto err_out;
}
mr->umem = umem;
@@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
err = rxe_mr_alloc(mr, num_buf);
if (err) {
- pr_warn("err %d from rxe_mr_alloc\n", err);
- ib_umem_release(umem);
- goto err1;
+ pr_warn("%s: Unable to allocate memory for map\n",
+ __func__);
+ goto err_release_umem;
}
mr->page_shift = PAGE_SHIFT;
@@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) {
- pr_warn("null vaddr\n");
- ib_umem_release(umem);
+ pr_warn("%s: Unable to get virtual address\n",
+ __func__);
err = -ENOMEM;
- goto err1;
+ goto err_cleanup_map;
}
buf->addr = (uintptr_t)vaddr;
@@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
return 0;
-err1:
+err_cleanup_map:
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
+ kfree(mr->map);
+err_release_umem:
+ ib_umem_release(umem);
+err_out:
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index dec92928a1cd..5ac27f28ace1 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -259,6 +259,7 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
iph->version = IPVERSION;
iph->ihl = sizeof(struct iphdr) >> 2;
+ iph->tot_len = htons(skb->len);
iph->frag_off = df;
iph->protocol = proto;
iph->tos = tos;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 3743dc39b60c..360ec67cb9e1 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -318,7 +318,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
return RESPST_ERR_MALFORMED_WQE;
}
- size = sizeof(wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
+ size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
memcpy(&qp->resp.srq_wqe, wqe, size);
qp->resp.wqe = &qp->resp.srq_wqe.wqe;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 8fcaa1136f2c..776e46ee95da 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -506,6 +506,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
iser_conn->iscsi_conn = conn;
out:
+ iscsi_put_endpoint(ep);
mutex_unlock(&iser_conn->state_mutex);
return error;
}
@@ -1002,6 +1003,7 @@ static struct iscsi_transport iscsi_iser_transport = {
/* connection management */
.create_conn = iscsi_iser_conn_create,
.bind_conn = iscsi_iser_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_conn_teardown,
.attr_is_visible = iser_attr_is_visible,
.set_param = iscsi_iser_set_param,
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 6ba48a09eac4..8d5cf5eb5778 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2209,7 +2209,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
* to reduce queue depth temporarily.
*/
scmnd->result = len == -ENOMEM ?
- DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
+ DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
goto err_iu;
}
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index ec0e861f185f..5baebf62df33 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -4,7 +4,6 @@
#
menu "Input device support"
- depends on !UML
config INPUT
tristate "Generic input layer (needed for keyboard, mouse, ...)" if EXPERT
diff --git a/drivers/input/evbug.c b/drivers/input/evbug.c
index c0d0b121ae7e..e47bdf92088a 100644
--- a/drivers/input/evbug.c
+++ b/drivers/input/evbug.c
@@ -7,9 +7,6 @@
* Input driver event debug module - dumps all events into syslog
*/
-/*
- */
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
diff --git a/drivers/input/gameport/Kconfig b/drivers/input/gameport/Kconfig
index 4761795cb49f..5a2c2fb3217d 100644
--- a/drivers/input/gameport/Kconfig
+++ b/drivers/input/gameport/Kconfig
@@ -4,6 +4,7 @@
#
config GAMEPORT
tristate "Gameport support"
+ depends on !UML
help
Gameport support is for the standard 15-pin PC gameport. If you
have a joystick, gamepad, gameport card, a soundcard with a gameport
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index da8963a9f044..947d440a3be6 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -499,7 +499,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
memcpy(joydev->keypam, keypam, len);
for (i = 0; i < joydev->nkey; i++)
- joydev->keymap[keypam[i] - BTN_MISC] = i;
+ joydev->keymap[joydev->keypam[i] - BTN_MISC] = i;
out:
kfree(keypam);
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 5e38899058c1..3b23078bc7b5 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -4,6 +4,7 @@
#
menuconfig INPUT_JOYSTICK
bool "Joysticks/Gamepads"
+ depends on !UML
help
If you have a joystick, 6dof controller, gamepad, steering wheel,
weapon control system or something like that you can say Y here
@@ -372,6 +373,15 @@ config JOYSTICK_PXRC
To compile this driver as a module, choose M here: the
module will be called pxrc.
+config JOYSTICK_QWIIC
+ tristate "SparkFun Qwiic Joystick"
+ depends on I2C
+ help
+ Say Y here if you want to use the SparkFun Qwiic Joystick.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qwiic-joystick.
+
config JOYSTICK_FSIA6B
tristate "FlySky FS-iA6B RC Receiver"
select SERIO
diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile
index 31d720c9e493..5174b8aba2dd 100644
--- a/drivers/input/joystick/Makefile
+++ b/drivers/input/joystick/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_JOYSTICK_MAPLE) += maplecontrol.o
obj-$(CONFIG_JOYSTICK_N64) += n64joy.o
obj-$(CONFIG_JOYSTICK_PSXPAD_SPI) += psxpad-spi.o
obj-$(CONFIG_JOYSTICK_PXRC) += pxrc.o
+obj-$(CONFIG_JOYSTICK_QWIIC) += qwiic-joystick.o
obj-$(CONFIG_JOYSTICK_SIDEWINDER) += sidewinder.o
obj-$(CONFIG_JOYSTICK_SPACEBALL) += spaceball.o
obj-$(CONFIG_JOYSTICK_SPACEORB) += spaceorb.o
diff --git a/drivers/input/joystick/qwiic-joystick.c b/drivers/input/joystick/qwiic-joystick.c
new file mode 100644
index 000000000000..d4da31c0616c
--- /dev/null
+++ b/drivers/input/joystick/qwiic-joystick.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Oleh Kravchenko <oleg@kaa.org.ua>
+ *
+ * SparkFun Qwiic Joystick
+ * Product page:https://www.sparkfun.com/products/15168
+ * Firmware and hardware sources:https://github.com/sparkfun/Qwiic_Joystick
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define DRV_NAME "qwiic-joystick"
+
+#define QWIIC_JSK_REG_VERS 1
+#define QWIIC_JSK_REG_DATA 3
+
+#define QWIIC_JSK_MAX_AXIS GENMASK(9, 0)
+#define QWIIC_JSK_FUZZ 2
+#define QWIIC_JSK_FLAT 2
+#define QWIIC_JSK_POLL_INTERVAL 16
+#define QWIIC_JSK_POLL_MIN 8
+#define QWIIC_JSK_POLL_MAX 32
+
+struct qwiic_jsk {
+ char phys[32];
+ struct input_dev *dev;
+ struct i2c_client *client;
+};
+
+struct qwiic_ver {
+ u8 major;
+ u8 minor;
+};
+
+struct qwiic_data {
+ __be16 x;
+ __be16 y;
+ u8 thumb;
+};
+
+static void qwiic_poll(struct input_dev *input)
+{
+ struct qwiic_jsk *priv = input_get_drvdata(input);
+ struct qwiic_data data;
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(priv->client, QWIIC_JSK_REG_DATA,
+ sizeof(data), (u8 *)&data);
+ if (err != sizeof(data))
+ return;
+
+ input_report_abs(input, ABS_X, be16_to_cpu(data.x) >> 6);
+ input_report_abs(input, ABS_Y, be16_to_cpu(data.y) >> 6);
+ input_report_key(input, BTN_THUMBL, !data.thumb);
+ input_sync(input);
+}
+
+static int qwiic_probe(struct i2c_client *client)
+{
+ struct qwiic_jsk *priv;
+ struct qwiic_ver vers;
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(client, QWIIC_JSK_REG_VERS,
+ sizeof(vers), (u8 *)&vers);
+ if (err < 0)
+ return err;
+ if (err != sizeof(vers))
+ return -EIO;
+
+ dev_dbg(&client->dev, "SparkFun Qwiic Joystick, FW: %u.%u\n",
+ vers.major, vers.minor);
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ snprintf(priv->phys, sizeof(priv->phys),
+ "i2c/%s", dev_name(&client->dev));
+ i2c_set_clientdata(client, priv);
+
+ priv->dev = devm_input_allocate_device(&client->dev);
+ if (!priv->dev)
+ return -ENOMEM;
+
+ priv->dev->id.bustype = BUS_I2C;
+ priv->dev->name = "SparkFun Qwiic Joystick";
+ priv->dev->phys = priv->phys;
+ input_set_drvdata(priv->dev, priv);
+
+ input_set_abs_params(priv->dev, ABS_X, 0, QWIIC_JSK_MAX_AXIS,
+ QWIIC_JSK_FUZZ, QWIIC_JSK_FLAT);
+ input_set_abs_params(priv->dev, ABS_Y, 0, QWIIC_JSK_MAX_AXIS,
+ QWIIC_JSK_FUZZ, QWIIC_JSK_FLAT);
+ input_set_capability(priv->dev, EV_KEY, BTN_THUMBL);
+
+ err = input_setup_polling(priv->dev, qwiic_poll);
+ if (err) {
+ dev_err(&client->dev, "failed to set up polling: %d\n", err);
+ return err;
+ }
+ input_set_poll_interval(priv->dev, QWIIC_JSK_POLL_INTERVAL);
+ input_set_min_poll_interval(priv->dev, QWIIC_JSK_POLL_MIN);
+ input_set_max_poll_interval(priv->dev, QWIIC_JSK_POLL_MAX);
+
+ err = input_register_device(priv->dev);
+ if (err) {
+ dev_err(&client->dev, "failed to register joystick: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_qwiic_match[] = {
+ { .compatible = "sparkfun,qwiic-joystick", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_qwiic_match);
+#endif /* CONFIG_OF */
+
+static const struct i2c_device_id qwiic_id_table[] = {
+ { KBUILD_MODNAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, qwiic_id_table);
+
+static struct i2c_driver qwiic_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(of_qwiic_match),
+ },
+ .id_table = qwiic_id_table,
+ .probe_new = qwiic_probe,
+};
+module_i2c_driver(qwiic_driver);
+
+MODULE_AUTHOR("Oleh Kravchenko <oleg@kaa.org.ua>");
+MODULE_DESCRIPTION("SparkFun Qwiic Joystick driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index fac91ea14f17..8e9672deb1eb 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -660,6 +660,7 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
fallthrough;
case 45: /* Ambiguous packet length */
if (j <= 40) { /* ID length less or eq 40 -> FSP */
+ fallthrough;
case 43:
sw->type = SW_ID_FSP;
break;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d69d7657ab12..29de8412e416 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -79,6 +79,7 @@
#define MAP_DPAD_TO_BUTTONS (1 << 0)
#define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
#define MAP_STICKS_TO_NULL (1 << 2)
+#define MAP_SELECT_BUTTON (1 << 3)
#define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \
MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL)
@@ -130,6 +131,7 @@ static const struct xpad_device {
{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ { 0x045e, 0x0b12, "Microsoft Xbox One X pad", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
@@ -864,6 +866,8 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
/* menu/view buttons */
input_report_key(dev, BTN_START, data[4] & 0x04);
input_report_key(dev, BTN_SELECT, data[4] & 0x08);
+ if (xpad->mapping & MAP_SELECT_BUTTON)
+ input_report_key(dev, KEY_RECORD, data[22] & 0x01);
/* buttons A,B,X,Y */
input_report_key(dev, BTN_A, data[4] & 0x10);
@@ -1674,6 +1678,8 @@ static int xpad_init_input(struct usb_xpad *xpad)
xpad->xtype == XTYPE_XBOXONE) {
for (i = 0; xpad360_btn[i] >= 0; i++)
input_set_capability(input_dev, EV_KEY, xpad360_btn[i]);
+ if (xpad->mapping & MAP_SELECT_BUTTON)
+ input_set_capability(input_dev, EV_KEY, KEY_RECORD);
} else {
for (i = 0; xpad_btn[i] >= 0; i++)
input_set_capability(input_dev, EV_KEY, xpad_btn[i]);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index edc613efc158..fbdef95291e9 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -324,7 +324,7 @@ static ssize_t atkbd_show_function_row_physmap(struct atkbd *atkbd, char *buf)
static umode_t atkbd_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct serio *serio = to_serio_port(dev);
struct atkbd *atkbd = serio_get_drvdata(serio);
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 38457d9641bd..fc02c540636e 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -644,7 +644,7 @@ static umode_t cros_ec_keyb_attr_is_visible(struct kobject *kobj,
struct attribute *attr,
int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
if (attr == &dev_attr_function_row_physmap.attr &&
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index bb29a7c9a1c0..54afb38601b9 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -512,6 +512,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
HIL_IDD_NUM_AXES_PER_SET(*idd)) {
printk(KERN_INFO PREFIX
"combo devices are not supported.\n");
+ error = -EINVAL;
goto bail1;
}
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 81de8c4e37d0..6f38aa23a1ff 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -647,8 +647,8 @@ static int __ims_pcu_execute_command(struct ims_pcu *pcu,
#define IMS_PCU_BL_DATA_OFFSET 3
static int __ims_pcu_execute_bl_command(struct ims_pcu *pcu,
- u8 command, const void *data, size_t len,
- u8 expected_response, int response_time)
+ u8 command, const void *data, size_t len,
+ u8 expected_response, int response_time)
{
int error;
@@ -1228,7 +1228,7 @@ static struct attribute *ims_pcu_attrs[] = {
static umode_t ims_pcu_is_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
umode_t mode = attr->mode;
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index cf8104454e74..10e3fc0eac6e 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2011, 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2014, Sony Mobile Communications Inc.
*/
@@ -22,6 +22,8 @@
#define PON_RT_STS 0x10
#define PON_KPDPWR_N_SET BIT(0)
#define PON_RESIN_N_SET BIT(1)
+#define PON_GEN3_RESIN_N_SET BIT(6)
+#define PON_GEN3_KPDPWR_N_SET BIT(7)
#define PON_PS_HOLD_RST_CTL 0x5a
#define PON_PS_HOLD_RST_CTL2 0x5b
@@ -38,8 +40,12 @@
#define PON_DBC_DELAY_MASK 0x7
struct pm8941_data {
- unsigned int pull_up_bit;
- unsigned int status_bit;
+ unsigned int pull_up_bit;
+ unsigned int status_bit;
+ bool supports_ps_hold_poff_config;
+ bool supports_debounce_config;
+ const char *name;
+ const char *phys;
};
struct pm8941_pwrkey {
@@ -231,34 +237,40 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
input_set_capability(pwrkey->input, EV_KEY, pwrkey->code);
- pwrkey->input->name = "pm8941_pwrkey";
- pwrkey->input->phys = "pm8941_pwrkey/input0";
-
- req_delay = (req_delay << 6) / USEC_PER_SEC;
- req_delay = ilog2(req_delay);
-
- error = regmap_update_bits(pwrkey->regmap,
- pwrkey->baseaddr + PON_DBC_CTL,
- PON_DBC_DELAY_MASK,
- req_delay);
- if (error) {
- dev_err(&pdev->dev, "failed to set debounce: %d\n", error);
- return error;
+ pwrkey->input->name = pwrkey->data->name;
+ pwrkey->input->phys = pwrkey->data->phys;
+
+ if (pwrkey->data->supports_debounce_config) {
+ req_delay = (req_delay << 6) / USEC_PER_SEC;
+ req_delay = ilog2(req_delay);
+
+ error = regmap_update_bits(pwrkey->regmap,
+ pwrkey->baseaddr + PON_DBC_CTL,
+ PON_DBC_DELAY_MASK,
+ req_delay);
+ if (error) {
+ dev_err(&pdev->dev, "failed to set debounce: %d\n",
+ error);
+ return error;
+ }
}
- error = regmap_update_bits(pwrkey->regmap,
- pwrkey->baseaddr + PON_PULL_CTL,
- pwrkey->data->pull_up_bit,
- pull_up ? pwrkey->data->pull_up_bit : 0);
- if (error) {
- dev_err(&pdev->dev, "failed to set pull: %d\n", error);
- return error;
+ if (pwrkey->data->pull_up_bit) {
+ error = regmap_update_bits(pwrkey->regmap,
+ pwrkey->baseaddr + PON_PULL_CTL,
+ pwrkey->data->pull_up_bit,
+ pull_up ? pwrkey->data->pull_up_bit :
+ 0);
+ if (error) {
+ dev_err(&pdev->dev, "failed to set pull: %d\n", error);
+ return error;
+ }
}
error = devm_request_threaded_irq(&pdev->dev, pwrkey->irq,
NULL, pm8941_pwrkey_irq,
IRQF_ONESHOT,
- "pm8941_pwrkey", pwrkey);
+ pwrkey->data->name, pwrkey);
if (error) {
dev_err(&pdev->dev, "failed requesting IRQ: %d\n", error);
return error;
@@ -271,12 +283,14 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
return error;
}
- pwrkey->reboot_notifier.notifier_call = pm8941_reboot_notify,
- error = register_reboot_notifier(&pwrkey->reboot_notifier);
- if (error) {
- dev_err(&pdev->dev, "failed to register reboot notifier: %d\n",
- error);
- return error;
+ if (pwrkey->data->supports_ps_hold_poff_config) {
+ pwrkey->reboot_notifier.notifier_call = pm8941_reboot_notify,
+ error = register_reboot_notifier(&pwrkey->reboot_notifier);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register reboot notifier: %d\n",
+ error);
+ return error;
+ }
}
platform_set_drvdata(pdev, pwrkey);
@@ -289,7 +303,8 @@ static int pm8941_pwrkey_remove(struct platform_device *pdev)
{
struct pm8941_pwrkey *pwrkey = platform_get_drvdata(pdev);
- unregister_reboot_notifier(&pwrkey->reboot_notifier);
+ if (pwrkey->data->supports_ps_hold_poff_config)
+ unregister_reboot_notifier(&pwrkey->reboot_notifier);
return 0;
}
@@ -297,16 +312,42 @@ static int pm8941_pwrkey_remove(struct platform_device *pdev)
static const struct pm8941_data pwrkey_data = {
.pull_up_bit = PON_KPDPWR_PULL_UP,
.status_bit = PON_KPDPWR_N_SET,
+ .name = "pm8941_pwrkey",
+ .phys = "pm8941_pwrkey/input0",
+ .supports_ps_hold_poff_config = true,
+ .supports_debounce_config = true,
};
static const struct pm8941_data resin_data = {
.pull_up_bit = PON_RESIN_PULL_UP,
.status_bit = PON_RESIN_N_SET,
+ .name = "pm8941_resin",
+ .phys = "pm8941_resin/input0",
+ .supports_ps_hold_poff_config = true,
+ .supports_debounce_config = true,
+};
+
+static const struct pm8941_data pon_gen3_pwrkey_data = {
+ .status_bit = PON_GEN3_KPDPWR_N_SET,
+ .name = "pmic_pwrkey",
+ .phys = "pmic_pwrkey/input0",
+ .supports_ps_hold_poff_config = false,
+ .supports_debounce_config = false,
+};
+
+static const struct pm8941_data pon_gen3_resin_data = {
+ .status_bit = PON_GEN3_RESIN_N_SET,
+ .name = "pmic_resin",
+ .phys = "pmic_resin/input0",
+ .supports_ps_hold_poff_config = false,
+ .supports_debounce_config = false,
};
static const struct of_device_id pm8941_pwr_key_id_table[] = {
{ .compatible = "qcom,pm8941-pwrkey", .data = &pwrkey_data },
{ .compatible = "qcom,pm8941-resin", .data = &resin_data },
+ { .compatible = "qcom,pmk8350-pwrkey", .data = &pon_gen3_pwrkey_data },
+ { .compatible = "qcom,pmk8350-resin", .data = &pon_gen3_resin_data },
{ }
};
MODULE_DEVICE_TABLE(of, pm8941_pwr_key_id_table);
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index ef2fa0905208..4a86b3e31d3b 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -214,7 +214,7 @@ static bool trackpoint_is_attr_available(struct psmouse *psmouse,
static umode_t trackpoint_is_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct serio *serio = to_serio_port(dev);
struct psmouse *psmouse = serio_get_drvdata(serio);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index abae23af0791..0b9f1d0a8f8b 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -139,7 +139,7 @@ static DEFINE_SPINLOCK(i8042_lock);
/*
* Writers to AUX and KBD ports as well as users issuing i8042_command
* directly should acquire i8042_mutex (by means of calling
- * i8042_lock_chip() and i8042_unlock_ship() helpers) to ensure that
+ * i8042_lock_chip() and i8042_unlock_chip() helpers) to ensure that
* they do not disturb each other (unfortunately in many i8042
* implementations write to one of the ports will immediately abort
* command that is being processed by another port).
@@ -979,7 +979,7 @@ static int i8042_controller_selftest(void)
}
/*
- * i8042_controller init initializes the i8042 controller, and,
+ * i8042_controller_init initializes the i8042 controller, and,
* most importantly, sets it into non-xlated mode if that's
* desired.
*/
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 33e9d9bfd036..7fbbe00e3553 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -114,7 +114,8 @@ static void serport_ldisc_close(struct tty_struct *tty)
* 'interrupt' routine.
*/
-static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
+static void serport_ldisc_receive(struct tty_struct *tty,
+ const unsigned char *cp, const char *fp, int count)
{
struct serport *serport = (struct serport*) tty->disc_data;
unsigned long flags;
@@ -273,6 +274,7 @@ static void serport_ldisc_write_wakeup(struct tty_struct * tty)
static struct tty_ldisc_ops serport_ldisc = {
.owner = THIS_MODULE,
+ .num = N_MOUSE,
.name = "input",
.open = serport_ldisc_open,
.close = serport_ldisc_close,
@@ -293,7 +295,7 @@ static struct tty_ldisc_ops serport_ldisc = {
static int __init serport_init(void)
{
int retval;
- retval = tty_register_ldisc(N_MOUSE, &serport_ldisc);
+ retval = tty_register_ldisc(&serport_ldisc);
if (retval)
printk(KERN_ERR "serport.c: Error registering line discipline.\n");
@@ -302,7 +304,7 @@ static int __init serport_init(void)
static void __exit serport_exit(void)
{
- tty_unregister_ldisc(N_MOUSE);
+ tty_unregister_ldisc(&serport_ldisc);
}
module_init(serport_init);
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index f465bae618fe..495ef156cf43 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -7,15 +7,14 @@
* Some cleanups by Alan Cox <alan@linux.intel.com>
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <linux/i2c.h>
#include <linux/input.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/input/cy8ctmg110_pdata.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
#define CY8CTMG110_DRIVER_NAME "cy8ctmg110"
@@ -45,18 +44,18 @@ struct cy8ctmg110 {
struct input_dev *input;
char phys[32];
struct i2c_client *client;
- int reset_pin;
- int irq_pin;
+ struct gpio_desc *reset_gpio;
};
/*
* cy8ctmg110_power is the routine that is called when touch hardware
- * will powered off or on.
+ * is being powered off or on. When powering on this routine de-asserts
+ * the RESET line, when powering off reset line is asserted.
*/
static void cy8ctmg110_power(struct cy8ctmg110 *ts, bool poweron)
{
- if (ts->reset_pin)
- gpio_direction_output(ts->reset_pin, 1 - poweron);
+ if (ts->reset_gpio)
+ gpiod_set_value_cansleep(ts->reset_gpio, !poweron);
}
static int cy8ctmg110_write_regs(struct cy8ctmg110 *tsc, unsigned char reg,
@@ -112,7 +111,6 @@ static int cy8ctmg110_touch_pos(struct cy8ctmg110 *tsc)
{
struct input_dev *input = tsc->input;
unsigned char reg_p[CY8CTMG110_REG_MAX];
- int x, y;
memset(reg_p, 0, CY8CTMG110_REG_MAX);
@@ -120,16 +118,15 @@ static int cy8ctmg110_touch_pos(struct cy8ctmg110 *tsc)
if (cy8ctmg110_read_regs(tsc, reg_p, 9, CY8CTMG110_TOUCH_X1) != 0)
return -EIO;
- y = reg_p[2] << 8 | reg_p[3];
- x = reg_p[0] << 8 | reg_p[1];
-
/* Number of touch */
if (reg_p[8] == 0) {
input_report_key(input, BTN_TOUCH, 0);
} else {
input_report_key(input, BTN_TOUCH, 1);
- input_report_abs(input, ABS_X, x);
- input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_X,
+ be16_to_cpup((__be16 *)(reg_p + 0)));
+ input_report_abs(input, ABS_Y,
+ be16_to_cpup((__be16 *)(reg_p + 2)));
}
input_sync(input);
@@ -163,35 +160,35 @@ static irqreturn_t cy8ctmg110_irq_thread(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void cy8ctmg110_shut_off(void *_ts)
+{
+ struct cy8ctmg110 *ts = _ts;
+
+ cy8ctmg110_set_sleepmode(ts, true);
+ cy8ctmg110_power(ts, false);
+}
+
static int cy8ctmg110_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct cy8ctmg110_pdata *pdata = dev_get_platdata(&client->dev);
struct cy8ctmg110 *ts;
struct input_dev *input_dev;
int err;
- /* No pdata no way forward */
- if (pdata == NULL) {
- dev_err(&client->dev, "no pdata\n");
- return -ENODEV;
- }
-
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_WORD_DATA))
return -EIO;
- ts = kzalloc(sizeof(struct cy8ctmg110), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !input_dev) {
- err = -ENOMEM;
- goto err_free_mem;
- }
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev)
+ return -ENOMEM;
ts->client = client;
ts->input = input_dev;
- ts->reset_pin = pdata->reset_pin;
- ts->irq_pin = pdata->irq_pin;
snprintf(ts->phys, sizeof(ts->phys),
"%s/input0", dev_name(&client->dev));
@@ -199,84 +196,46 @@ static int cy8ctmg110_probe(struct i2c_client *client,
input_dev->name = CY8CTMG110_DRIVER_NAME " Touchscreen";
input_dev->phys = ts->phys;
input_dev->id.bustype = BUS_I2C;
- input_dev->dev.parent = &client->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X,
CY8CTMG110_X_MIN, CY8CTMG110_X_MAX, 4, 0);
input_set_abs_params(input_dev, ABS_Y,
CY8CTMG110_Y_MIN, CY8CTMG110_Y_MAX, 4, 0);
- if (ts->reset_pin) {
- err = gpio_request(ts->reset_pin, NULL);
- if (err) {
- dev_err(&client->dev,
- "Unable to request GPIO pin %d.\n",
- ts->reset_pin);
- goto err_free_mem;
- }
+ /* Request and assert reset line */
+ ts->reset_gpio = devm_gpiod_get_optional(&client->dev, NULL,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->reset_gpio)) {
+ err = PTR_ERR(ts->reset_gpio);
+ dev_err(&client->dev,
+ "Unable to request reset GPIO: %d\n", err);
+ return err;
}
cy8ctmg110_power(ts, true);
cy8ctmg110_set_sleepmode(ts, false);
- err = gpio_request(ts->irq_pin, "touch_irq_key");
- if (err < 0) {
- dev_err(&client->dev,
- "Failed to request GPIO %d, error %d\n",
- ts->irq_pin, err);
- goto err_shutoff_device;
- }
-
- err = gpio_direction_input(ts->irq_pin);
- if (err < 0) {
- dev_err(&client->dev,
- "Failed to configure input direction for GPIO %d, error %d\n",
- ts->irq_pin, err);
- goto err_free_irq_gpio;
- }
-
- client->irq = gpio_to_irq(ts->irq_pin);
- if (client->irq < 0) {
- err = client->irq;
- dev_err(&client->dev,
- "Unable to get irq number for GPIO %d, error %d\n",
- ts->irq_pin, err);
- goto err_free_irq_gpio;
- }
+ err = devm_add_action_or_reset(&client->dev, cy8ctmg110_shut_off, ts);
+ if (err)
+ return err;
- err = request_threaded_irq(client->irq, NULL, cy8ctmg110_irq_thread,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "touch_reset_key", ts);
- if (err < 0) {
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, cy8ctmg110_irq_thread,
+ IRQF_ONESHOT, "touch_reset_key", ts);
+ if (err) {
dev_err(&client->dev,
"irq %d busy? error %d\n", client->irq, err);
- goto err_free_irq_gpio;
+ return err;
}
err = input_register_device(input_dev);
if (err)
- goto err_free_irq;
+ return err;
i2c_set_clientdata(client, ts);
- device_init_wakeup(&client->dev, 1);
- return 0;
-err_free_irq:
- free_irq(client->irq, ts);
-err_free_irq_gpio:
- gpio_free(ts->irq_pin);
-err_shutoff_device:
- cy8ctmg110_set_sleepmode(ts, true);
- cy8ctmg110_power(ts, false);
- if (ts->reset_pin)
- gpio_free(ts->reset_pin);
-err_free_mem:
- input_free_device(input_dev);
- kfree(ts);
- return err;
+ return 0;
}
static int __maybe_unused cy8ctmg110_suspend(struct device *dev)
@@ -284,12 +243,11 @@ static int __maybe_unused cy8ctmg110_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct cy8ctmg110 *ts = i2c_get_clientdata(client);
- if (device_may_wakeup(&client->dev))
- enable_irq_wake(client->irq);
- else {
+ if (!device_may_wakeup(&client->dev)) {
cy8ctmg110_set_sleepmode(ts, true);
cy8ctmg110_power(ts, false);
}
+
return 0;
}
@@ -298,34 +256,16 @@ static int __maybe_unused cy8ctmg110_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct cy8ctmg110 *ts = i2c_get_clientdata(client);
- if (device_may_wakeup(&client->dev))
- disable_irq_wake(client->irq);
- else {
+ if (!device_may_wakeup(&client->dev)) {
cy8ctmg110_power(ts, true);
cy8ctmg110_set_sleepmode(ts, false);
}
+
return 0;
}
static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume);
-static int cy8ctmg110_remove(struct i2c_client *client)
-{
- struct cy8ctmg110 *ts = i2c_get_clientdata(client);
-
- cy8ctmg110_set_sleepmode(ts, true);
- cy8ctmg110_power(ts, false);
-
- free_irq(client->irq, ts);
- input_unregister_device(ts->input);
- gpio_free(ts->irq_pin);
- if (ts->reset_pin)
- gpio_free(ts->reset_pin);
- kfree(ts);
-
- return 0;
-}
-
static const struct i2c_device_id cy8ctmg110_idtable[] = {
{ CY8CTMG110_DRIVER_NAME, 1 },
{ }
@@ -340,7 +280,6 @@ static struct i2c_driver cy8ctmg110_driver = {
},
.id_table = cy8ctmg110_idtable,
.probe = cy8ctmg110_probe,
- .remove = cy8ctmg110_remove,
};
module_i2c_driver(cy8ctmg110_driver);
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 106dd4962785..1dbd849c9613 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/property.h>
#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
#include "cyttsp_core.h"
@@ -45,8 +46,15 @@
#define CY_MAXZ 255
#define CY_DELAY_DFLT 20 /* ms */
#define CY_DELAY_MAX 500
-#define CY_ACT_DIST_DFLT 0xF8
+/* Active distance in pixels for a gesture to be reported */
+#define CY_ACT_DIST_DFLT 0xF8 /* pixels */
#define CY_ACT_DIST_MASK 0x0F
+/* Active Power state scanning/processing refresh interval */
+#define CY_ACT_INTRVL_DFLT 0x00 /* ms */
+/* Low Power state scanning/processing refresh interval */
+#define CY_LP_INTRVL_DFLT 0x0A /* ms */
+/* touch timeout for the Active power */
+#define CY_TCH_TMOUT_DFLT 0xFF /* ms */
#define CY_HNDSHK_BIT 0x80
/* device mode bits */
#define CY_OPERATE_MODE 0x00
@@ -608,6 +616,14 @@ static int cyttsp_parse_properties(struct cyttsp *ts)
return 0;
}
+static void cyttsp_disable_regulators(void *_ts)
+{
+ struct cyttsp *ts = _ts;
+
+ regulator_bulk_disable(ARRAY_SIZE(ts->regulators),
+ ts->regulators);
+}
+
struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
struct device *dev, int irq, size_t xfer_buf_size)
{
@@ -628,6 +644,32 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
ts->bus_ops = bus_ops;
ts->irq = irq;
+ /*
+ * VCPIN is the analog voltage supply
+ * VDD is the digital voltage supply
+ */
+ ts->regulators[0].supply = "vcpin";
+ ts->regulators[1].supply = "vdd";
+ error = devm_regulator_bulk_get(dev, ARRAY_SIZE(ts->regulators),
+ ts->regulators);
+ if (error) {
+ dev_err(dev, "Failed to get regulators: %d\n", error);
+ return ERR_PTR(error);
+ }
+
+ error = regulator_bulk_enable(ARRAY_SIZE(ts->regulators),
+ ts->regulators);
+ if (error) {
+ dev_err(dev, "Cannot enable regulators: %d\n", error);
+ return ERR_PTR(error);
+ }
+
+ error = devm_add_action_or_reset(dev, cyttsp_disable_regulators, ts);
+ if (error) {
+ dev_err(dev, "failed to install chip disable handler\n");
+ return ERR_PTR(error);
+ }
+
ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ts->reset_gpio)) {
error = PTR_ERR(ts->reset_gpio);
@@ -664,8 +706,7 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
}
error = devm_request_threaded_irq(dev, ts->irq, NULL, cyttsp_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
- IRQF_NO_AUTOEN,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"cyttsp", ts);
if (error) {
dev_err(ts->dev, "failed to request IRQ %d, err: %d\n",
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
index 9bc4fe7e6ac5..075509e695a2 100644
--- a/drivers/input/touchscreen/cyttsp_core.h
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/device.h>
-#include <linux/input/cyttsp.h>
+#include <linux/regulator/consumer.h>
#define CY_NUM_RETRY 16 /* max number of retries for read ops */
@@ -122,6 +122,7 @@ struct cyttsp {
enum cyttsp_state state;
bool suspended;
+ struct regulator_bulk_data regulators[2];
struct gpio_desc *reset_gpio;
bool use_hndshk;
u8 act_dist;
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index 061debf64a2b..4c8473d327ab 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -18,6 +18,8 @@
#include <linux/i2c.h>
#include <linux/input.h>
+#define CY_I2C_NAME "cyttsp-i2c"
+
#define CY_I2C_DATA_SIZE 128
static const struct cyttsp_bus_ops cyttsp_i2c_bus_ops = {
@@ -52,10 +54,18 @@ static const struct i2c_device_id cyttsp_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, cyttsp_i2c_id);
+static const struct of_device_id cyttsp_of_i2c_match[] = {
+ { .compatible = "cypress,cy8ctma340", },
+ { .compatible = "cypress,cy8ctst341", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cyttsp_of_i2c_match);
+
static struct i2c_driver cyttsp_i2c_driver = {
.driver = {
.name = CY_I2C_NAME,
.pm = &cyttsp_pm_ops,
+ .of_match_table = cyttsp_of_i2c_match,
},
.probe = cyttsp_i2c_probe,
.id_table = cyttsp_i2c_id,
diff --git a/drivers/input/touchscreen/cyttsp_spi.c b/drivers/input/touchscreen/cyttsp_spi.c
index 54e410921d53..30c6fbf86a86 100644
--- a/drivers/input/touchscreen/cyttsp_spi.c
+++ b/drivers/input/touchscreen/cyttsp_spi.c
@@ -20,6 +20,8 @@
#include <linux/input.h>
#include <linux/spi/spi.h>
+#define CY_SPI_NAME "cyttsp-spi"
+
#define CY_SPI_WR_OP 0x00 /* r/~w */
#define CY_SPI_RD_OP 0x01
#define CY_SPI_CMD_BYTES 4
@@ -160,10 +162,18 @@ static int cyttsp_spi_probe(struct spi_device *spi)
return 0;
}
+static const struct of_device_id cyttsp_of_spi_match[] = {
+ { .compatible = "cypress,cy8ctma340", },
+ { .compatible = "cypress,cy8ctst341", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cyttsp_of_spi_match);
+
static struct spi_driver cyttsp_spi_driver = {
.driver = {
.name = CY_SPI_NAME,
.pm = &cyttsp_pm_ops,
+ .of_match_table = cyttsp_of_spi_match,
},
.probe = cyttsp_spi_probe,
};
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 2eefbc2485bc..263de3bfb6cd 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -104,6 +104,7 @@ struct edt_ft5x06_ts_data {
u16 num_x;
u16 num_y;
struct regulator *vcc;
+ struct regulator *iovcc;
struct gpio_desc *reset_gpio;
struct gpio_desc *wake_gpio;
@@ -1062,11 +1063,12 @@ static void edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
}
}
-static void edt_ft5x06_disable_regulator(void *arg)
+static void edt_ft5x06_disable_regulators(void *arg)
{
struct edt_ft5x06_ts_data *data = arg;
regulator_disable(data->vcc);
+ regulator_disable(data->iovcc);
}
static int edt_ft5x06_ts_probe(struct i2c_client *client,
@@ -1107,14 +1109,33 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return error;
}
+ tsdata->iovcc = devm_regulator_get(&client->dev, "iovcc");
+ if (IS_ERR(tsdata->iovcc)) {
+ error = PTR_ERR(tsdata->iovcc);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "failed to request iovcc regulator: %d\n", error);
+ return error;
+ }
+
+ error = regulator_enable(tsdata->iovcc);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to enable iovcc: %d\n", error);
+ return error;
+ }
+
+ /* Delay enabling VCC for > 10us (T_ivd) after IOVCC */
+ usleep_range(10, 100);
+
error = regulator_enable(tsdata->vcc);
if (error < 0) {
dev_err(&client->dev, "failed to enable vcc: %d\n", error);
+ regulator_disable(tsdata->iovcc);
return error;
}
error = devm_add_action_or_reset(&client->dev,
- edt_ft5x06_disable_regulator,
+ edt_ft5x06_disable_regulators,
tsdata);
if (error)
return error;
@@ -1289,6 +1310,9 @@ static int __maybe_unused edt_ft5x06_ts_suspend(struct device *dev)
ret = regulator_disable(tsdata->vcc);
if (ret)
dev_warn(dev, "Failed to disable vcc\n");
+ ret = regulator_disable(tsdata->iovcc);
+ if (ret)
+ dev_warn(dev, "Failed to disable iovcc\n");
return 0;
}
@@ -1319,9 +1343,19 @@ static int __maybe_unused edt_ft5x06_ts_resume(struct device *dev)
gpiod_set_value_cansleep(reset_gpio, 1);
usleep_range(5000, 6000);
+ ret = regulator_enable(tsdata->iovcc);
+ if (ret) {
+ dev_err(dev, "Failed to enable iovcc\n");
+ return ret;
+ }
+
+ /* Delay enabling VCC for > 10us (T_ivd) after IOVCC */
+ usleep_range(10, 100);
+
ret = regulator_enable(tsdata->vcc);
if (ret) {
dev_err(dev, "Failed to enable vcc\n");
+ regulator_disable(tsdata->iovcc);
return ret;
}
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 17540bdb1eaf..68f542bb809f 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1369,8 +1369,7 @@ static bool elants_acpi_is_hid_device(struct device *dev)
}
#endif
-static int elants_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int elants_i2c_probe(struct i2c_client *client)
{
union i2c_smbus_data dummy;
struct elants_data *ts;
@@ -1396,7 +1395,7 @@ static int elants_i2c_probe(struct i2c_client *client,
init_completion(&ts->cmd_done);
ts->client = client;
- ts->chip_id = (enum elants_chip_id)id->driver_data;
+ ts->chip_id = (enum elants_chip_id)(uintptr_t)device_get_match_data(&client->dev);
i2c_set_clientdata(client, ts);
ts->vcc33 = devm_regulator_get(&client->dev, "vcc33");
@@ -1636,15 +1635,15 @@ MODULE_DEVICE_TABLE(acpi, elants_acpi_id);
#ifdef CONFIG_OF
static const struct of_device_id elants_of_match[] = {
- { .compatible = "elan,ekth3500" },
- { .compatible = "elan,ektf3624" },
+ { .compatible = "elan,ekth3500", .data = (void *)EKTH3500 },
+ { .compatible = "elan,ektf3624", .data = (void *)EKTF3624 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, elants_of_match);
#endif
static struct i2c_driver elants_i2c_driver = {
- .probe = elants_i2c_probe,
+ .probe_new = elants_i2c_probe,
.id_table = elants_i2c_id,
.driver = {
.name = DEVICE_NAME,
diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
index ddad4a82a5e5..e9547ee29756 100644
--- a/drivers/input/touchscreen/hideep.c
+++ b/drivers/input/touchscreen/hideep.c
@@ -361,13 +361,16 @@ static int hideep_enter_pgm(struct hideep_ts *ts)
return -EIO;
}
-static void hideep_nvm_unlock(struct hideep_ts *ts)
+static int hideep_nvm_unlock(struct hideep_ts *ts)
{
u32 unmask_code;
+ int error;
hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_SFR_RPAGE);
- hideep_pgm_r_reg(ts, 0x0000000C, &unmask_code);
+ error = hideep_pgm_r_reg(ts, 0x0000000C, &unmask_code);
hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_DEFAULT_PAGE);
+ if (error)
+ return error;
/* make it unprotected code */
unmask_code &= ~HIDEEP_PROT_MODE;
@@ -384,6 +387,8 @@ static void hideep_nvm_unlock(struct hideep_ts *ts)
NVM_W_SFR(HIDEEP_NVM_MASK_OFS, ts->nvm_mask);
SET_FLASH_HWCONTROL();
hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_DEFAULT_PAGE);
+
+ return 0;
}
static int hideep_check_status(struct hideep_ts *ts)
@@ -462,7 +467,9 @@ static int hideep_program_nvm(struct hideep_ts *ts,
u32 addr = 0;
int error;
- hideep_nvm_unlock(ts);
+ error = hideep_nvm_unlock(ts);
+ if (error)
+ return error;
while (ucode_len > 0) {
xfer_len = min_t(size_t, ucode_len, HIDEEP_NVM_PAGE_SIZE);
diff --git a/drivers/input/touchscreen/resistive-adc-touch.c b/drivers/input/touchscreen/resistive-adc-touch.c
index e50af30183f4..744544a723b7 100644
--- a/drivers/input/touchscreen/resistive-adc-touch.c
+++ b/drivers/input/touchscreen/resistive-adc-touch.c
@@ -13,44 +13,78 @@
#include <linux/input/touchscreen.h>
#include <linux/iio/consumer.h>
#include <linux/iio/iio.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#define DRIVER_NAME "resistive-adc-touch"
#define GRTS_DEFAULT_PRESSURE_MIN 50000
+#define GRTS_DEFAULT_PRESSURE_MAX 65535
#define GRTS_MAX_POS_MASK GENMASK(11, 0)
+#define GRTS_MAX_CHANNELS 4
+
+enum grts_ch_type {
+ GRTS_CH_X,
+ GRTS_CH_Y,
+ GRTS_CH_PRESSURE,
+ GRTS_CH_Z1,
+ GRTS_CH_Z2,
+ GRTS_CH_MAX = GRTS_CH_Z2 + 1
+};
/**
* struct grts_state - generic resistive touch screen information struct
+ * @x_plate_ohms: resistance of the X plate
* @pressure_min: number representing the minimum for the pressure
* @pressure: are we getting pressure info or not
* @iio_chans: list of channels acquired
* @iio_cb: iio_callback buffer for the data
* @input: the input device structure that we register
* @prop: touchscreen properties struct
+ * @ch_map: map of channels that are defined for the touchscreen
*/
struct grts_state {
+ u32 x_plate_ohms;
u32 pressure_min;
bool pressure;
struct iio_channel *iio_chans;
struct iio_cb_buffer *iio_cb;
struct input_dev *input;
struct touchscreen_properties prop;
+ u8 ch_map[GRTS_CH_MAX];
};
static int grts_cb(const void *data, void *private)
{
const u16 *touch_info = data;
struct grts_state *st = private;
- unsigned int x, y, press = 0x0;
-
- /* channel data coming in buffer in the order below */
- x = touch_info[0];
- y = touch_info[1];
- if (st->pressure)
- press = touch_info[2];
+ unsigned int x, y, press = 0;
+
+ x = touch_info[st->ch_map[GRTS_CH_X]];
+ y = touch_info[st->ch_map[GRTS_CH_Y]];
+
+ if (st->ch_map[GRTS_CH_PRESSURE] < GRTS_MAX_CHANNELS) {
+ press = touch_info[st->ch_map[GRTS_CH_PRESSURE]];
+ } else if (st->ch_map[GRTS_CH_Z1] < GRTS_MAX_CHANNELS) {
+ unsigned int z1 = touch_info[st->ch_map[GRTS_CH_Z1]];
+ unsigned int z2 = touch_info[st->ch_map[GRTS_CH_Z2]];
+ unsigned int Rt;
+
+ Rt = z2;
+ Rt -= z1;
+ Rt *= st->x_plate_ohms;
+ Rt = DIV_ROUND_CLOSEST(Rt, 16);
+ Rt *= x;
+ Rt /= z1;
+ Rt = DIV_ROUND_CLOSEST(Rt, 256);
+ /*
+ * On increased pressure the resistance (Rt) is decreasing
+ * so, convert values to make it looks as real pressure.
+ */
+ if (Rt < GRTS_DEFAULT_PRESSURE_MAX)
+ press = GRTS_DEFAULT_PRESSURE_MAX - Rt;
+ }
if ((!x && !y) || (st->pressure && (press < st->pressure_min))) {
/* report end of touch */
@@ -94,12 +128,77 @@ static void grts_disable(void *data)
iio_channel_release_all_cb(data);
}
+static int grts_map_channel(struct grts_state *st, struct device *dev,
+ enum grts_ch_type type, const char *name,
+ bool optional)
+{
+ int idx;
+
+ idx = device_property_match_string(dev, "io-channel-names", name);
+ if (idx < 0) {
+ if (!optional)
+ return idx;
+ idx = GRTS_MAX_CHANNELS;
+ } else if (idx >= GRTS_MAX_CHANNELS) {
+ return -EOVERFLOW;
+ }
+
+ st->ch_map[type] = idx;
+ return 0;
+}
+
+static int grts_get_properties(struct grts_state *st, struct device *dev)
+{
+ int error;
+
+ error = grts_map_channel(st, dev, GRTS_CH_X, "x", false);
+ if (error)
+ return error;
+
+ error = grts_map_channel(st, dev, GRTS_CH_Y, "y", false);
+ if (error)
+ return error;
+
+ /* pressure is optional */
+ error = grts_map_channel(st, dev, GRTS_CH_PRESSURE, "pressure", true);
+ if (error)
+ return error;
+
+ if (st->ch_map[GRTS_CH_PRESSURE] < GRTS_MAX_CHANNELS) {
+ st->pressure = true;
+ return 0;
+ }
+
+ /* if no pressure is defined, try optional z1 + z2 */
+ error = grts_map_channel(st, dev, GRTS_CH_Z1, "z1", true);
+ if (error)
+ return error;
+
+ if (st->ch_map[GRTS_CH_Z1] >= GRTS_MAX_CHANNELS)
+ return 0;
+
+ /* if z1 is provided z2 is not optional */
+ error = grts_map_channel(st, dev, GRTS_CH_Z2, "z2", true);
+ if (error)
+ return error;
+
+ error = device_property_read_u32(dev,
+ "touchscreen-x-plate-ohms",
+ &st->x_plate_ohms);
+ if (error) {
+ dev_err(dev, "can't get touchscreen-x-plate-ohms property\n");
+ return error;
+ }
+
+ st->pressure = true;
+ return 0;
+}
+
static int grts_probe(struct platform_device *pdev)
{
struct grts_state *st;
struct input_dev *input;
struct device *dev = &pdev->dev;
- struct iio_channel *chan;
int error;
st = devm_kzalloc(dev, sizeof(struct grts_state), GFP_KERNEL);
@@ -115,12 +214,13 @@ static int grts_probe(struct platform_device *pdev)
return error;
}
- chan = &st->iio_chans[0];
- st->pressure = false;
- while (chan && chan->indio_dev) {
- if (!strcmp(chan->channel->datasheet_name, "pressure"))
- st->pressure = true;
- chan++;
+ if (!device_property_present(dev, "io-channel-names"))
+ return -ENODEV;
+
+ error = grts_get_properties(st, dev);
+ if (error) {
+ dev_err(dev, "Failed to parse properties\n");
+ return error;
}
if (st->pressure) {
@@ -148,7 +248,7 @@ static int grts_probe(struct platform_device *pdev)
input_set_abs_params(input, ABS_Y, 0, GRTS_MAX_POS_MASK - 1, 0, 0);
if (st->pressure)
input_set_abs_params(input, ABS_PRESSURE, st->pressure_min,
- 0xffff, 0, 0);
+ GRTS_DEFAULT_PRESSURE_MAX, 0, 0);
input_set_capability(input, EV_KEY, BTN_TOUCH);
@@ -193,7 +293,7 @@ static struct platform_driver grts_driver = {
.probe = grts_probe,
.driver = {
.name = DRIVER_NAME,
- .of_match_table = of_match_ptr(grts_of_match),
+ .of_match_table = grts_of_match,
},
};
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index ce2fe30d6b8a..b8d720d52013 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -338,7 +338,7 @@ static struct attribute *tsc200x_attrs[] = {
static umode_t tsc200x_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct tsc200x *ts = dev_get_drvdata(dev);
umode_t mode = attr->mode;
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index c847453a03c2..43c521f50c85 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -251,7 +251,7 @@ static int e2i_init(struct usbtouch_usb *usbtouch)
int ret;
struct usb_device *udev = interface_to_usbdev(usbtouch->interface);
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x01, 0x02, 0x0000, 0x0081,
NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -531,7 +531,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch)
if (ret)
return ret;
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
MTOUCHUSB_RESET,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -543,7 +543,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch)
msleep(150);
for (i = 0; i < 3; i++) {
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
MTOUCHUSB_ASYNC_REPORT,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1, 1, NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -722,7 +722,7 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
}
/* start sending data */
- ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
TSC10_CMD_DATA1,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 8a1e70e00876..7887941730db 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -403,7 +403,7 @@ struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
{
struct icc_path **ptr, *path;
- ptr = devres_alloc(devm_icc_release, sizeof(**ptr), GFP_KERNEL);
+ ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
@@ -973,9 +973,14 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
}
node->avg_bw = node->init_avg;
node->peak_bw = node->init_peak;
+
+ if (provider->pre_aggregate)
+ provider->pre_aggregate(node);
+
if (provider->aggregate)
provider->aggregate(node, 0, node->init_avg, node->init_peak,
&node->avg_bw, &node->peak_bw);
+
provider->set(node, node);
node->avg_bw = 0;
node->peak_bw = 0;
@@ -1106,6 +1111,8 @@ void icc_sync_state(struct device *dev)
dev_dbg(p->dev, "interconnect provider is in synced state\n");
list_for_each_entry(n, &p->nodes, node_list) {
if (n->init_avg || n->init_peak) {
+ n->init_avg = 0;
+ n->init_peak = 0;
aggregate_requests(n);
p->set(n, n);
}
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index cdb3e11462c6..0d7a2500d0b8 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -74,6 +74,15 @@ config INTERCONNECT_QCOM_SC7180
This is a driver for the Qualcomm Network-on-Chip on sc7180-based
platforms.
+config INTERCONNECT_QCOM_SC7280
+ tristate "Qualcomm SC7280 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sc7280-based
+ platforms.
+
config INTERCONNECT_QCOM_SDM660
tristate "Qualcomm SDM660 interconnect driver"
depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 46fc62447156..2880129a6fe4 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -8,6 +8,7 @@ icc-osm-l3-objs := osm-l3.o
qnoc-qcs404-objs := qcs404.o
icc-rpmh-obj := icc-rpmh.o
qnoc-sc7180-objs := sc7180.o
+qnoc-sc7280-objs := sc7280.o
qnoc-sdm660-objs := sdm660.o
qnoc-sdm845-objs := sdm845.o
qnoc-sdx55-objs := sdx55.o
@@ -24,6 +25,7 @@ obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SC7280) += qnoc-sc7280.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM660) += qnoc-sdm660.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index bf01d09dba6c..27cc5f03611c 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -20,13 +20,18 @@ void qcom_icc_pre_aggregate(struct icc_node *node)
{
size_t i;
struct qcom_icc_node *qn;
+ struct qcom_icc_provider *qp;
qn = node->data;
+ qp = to_qcom_provider(node->provider);
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
qn->sum_avg[i] = 0;
qn->max_peak[i] = 0;
}
+
+ for (i = 0; i < qn->num_bcms; i++)
+ qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
}
EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
@@ -44,10 +49,8 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
{
size_t i;
struct qcom_icc_node *qn;
- struct qcom_icc_provider *qp;
qn = node->data;
- qp = to_qcom_provider(node->provider);
if (!tag)
tag = QCOM_ICC_TAG_ALWAYS;
@@ -57,14 +60,16 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
qn->sum_avg[i] += avg_bw;
qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
}
+
+ if (node->init_avg || node->init_peak) {
+ qn->sum_avg[i] = max_t(u64, qn->sum_avg[i], node->init_avg);
+ qn->max_peak[i] = max_t(u64, qn->max_peak[i], node->init_peak);
+ }
}
*agg_avg += avg_bw;
*agg_peak = max_t(u32, *agg_peak, peak_bw);
- for (i = 0; i < qn->num_bcms; i++)
- qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
-
return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
@@ -79,7 +84,6 @@ EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
struct qcom_icc_provider *qp;
- struct qcom_icc_node *qn;
struct icc_node *node;
if (!src)
@@ -88,12 +92,6 @@ int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
node = src;
qp = to_qcom_provider(node->provider);
- qn = node->data;
-
- qn->sum_avg[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->sum_avg[QCOM_ICC_BUCKET_AMC],
- node->avg_bw);
- qn->max_peak[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->max_peak[QCOM_ICC_BUCKET_AMC],
- node->peak_bw);
qcom_icc_bcm_voter_commit(qp->voter);
diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
new file mode 100644
index 000000000000..8d1b55c3705c
--- /dev/null
+++ b/drivers/interconnect/qcom/sc7280.c
@@ -0,0 +1,1938 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sc7280.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sc7280.h"
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SC7280_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = SC7280_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SC7280_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a1noc_cfg = {
+ .name = "qnm_a1noc_cfg",
+ .id = SC7280_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node xm_sdc1 = {
+ .name = "xm_sdc1",
+ .id = SC7280_MASTER_SDCC_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SC7280_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SC7280_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SC7280_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb2 = {
+ .name = "xm_usb2",
+ .id = SC7280_MASTER_USB2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SC7280_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SC7280_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a2noc_cfg = {
+ .name = "qnm_a2noc_cfg",
+ .id = SC7280_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qnm_cnoc_datapath = {
+ .name = "qnm_cnoc_datapath",
+ .id = SC7280_MASTER_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SC7280_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SC7280_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SC7280_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SC7280_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .links = { SC7280_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SC7280_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SC7280_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = SC7280_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qnm_cnoc3_cnoc2 = {
+ .name = "qnm_cnoc3_cnoc2",
+ .id = SC7280_MASTER_CNOC3_CNOC2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 44,
+ .links = { SC7280_SLAVE_AHB2PHY_SOUTH, SC7280_SLAVE_AHB2PHY_NORTH,
+ SC7280_SLAVE_CAMERA_CFG, SC7280_SLAVE_CLK_CTL,
+ SC7280_SLAVE_CDSP_CFG, SC7280_SLAVE_RBCPR_CX_CFG,
+ SC7280_SLAVE_RBCPR_MX_CFG, SC7280_SLAVE_CRYPTO_0_CFG,
+ SC7280_SLAVE_CX_RDPM, SC7280_SLAVE_DCC_CFG,
+ SC7280_SLAVE_DISPLAY_CFG, SC7280_SLAVE_GFX3D_CFG,
+ SC7280_SLAVE_HWKM, SC7280_SLAVE_IMEM_CFG,
+ SC7280_SLAVE_IPA_CFG, SC7280_SLAVE_IPC_ROUTER_CFG,
+ SC7280_SLAVE_LPASS, SC7280_SLAVE_CNOC_MSS,
+ SC7280_SLAVE_MX_RDPM, SC7280_SLAVE_PCIE_0_CFG,
+ SC7280_SLAVE_PCIE_1_CFG, SC7280_SLAVE_PDM,
+ SC7280_SLAVE_PIMEM_CFG, SC7280_SLAVE_PKA_WRAPPER_CFG,
+ SC7280_SLAVE_PMU_WRAPPER_CFG, SC7280_SLAVE_QDSS_CFG,
+ SC7280_SLAVE_QSPI_0, SC7280_SLAVE_QUP_0,
+ SC7280_SLAVE_QUP_1, SC7280_SLAVE_SDCC_1,
+ SC7280_SLAVE_SDCC_2, SC7280_SLAVE_SDCC_4,
+ SC7280_SLAVE_SECURITY, SC7280_SLAVE_TCSR,
+ SC7280_SLAVE_TLMM, SC7280_SLAVE_UFS_MEM_CFG,
+ SC7280_SLAVE_USB2, SC7280_SLAVE_USB3_0,
+ SC7280_SLAVE_VENUS_CFG, SC7280_SLAVE_VSENSE_CTRL_CFG,
+ SC7280_SLAVE_A1NOC_CFG, SC7280_SLAVE_A2NOC_CFG,
+ SC7280_SLAVE_CNOC_MNOC_CFG, SC7280_SLAVE_SNOC_CFG },
+};
+
+static struct qcom_icc_node xm_qdss_dap = {
+ .name = "xm_qdss_dap",
+ .id = SC7280_MASTER_QDSS_DAP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 45,
+ .links = { SC7280_SLAVE_AHB2PHY_SOUTH, SC7280_SLAVE_AHB2PHY_NORTH,
+ SC7280_SLAVE_CAMERA_CFG, SC7280_SLAVE_CLK_CTL,
+ SC7280_SLAVE_CDSP_CFG, SC7280_SLAVE_RBCPR_CX_CFG,
+ SC7280_SLAVE_RBCPR_MX_CFG, SC7280_SLAVE_CRYPTO_0_CFG,
+ SC7280_SLAVE_CX_RDPM, SC7280_SLAVE_DCC_CFG,
+ SC7280_SLAVE_DISPLAY_CFG, SC7280_SLAVE_GFX3D_CFG,
+ SC7280_SLAVE_HWKM, SC7280_SLAVE_IMEM_CFG,
+ SC7280_SLAVE_IPA_CFG, SC7280_SLAVE_IPC_ROUTER_CFG,
+ SC7280_SLAVE_LPASS, SC7280_SLAVE_CNOC_MSS,
+ SC7280_SLAVE_MX_RDPM, SC7280_SLAVE_PCIE_0_CFG,
+ SC7280_SLAVE_PCIE_1_CFG, SC7280_SLAVE_PDM,
+ SC7280_SLAVE_PIMEM_CFG, SC7280_SLAVE_PKA_WRAPPER_CFG,
+ SC7280_SLAVE_PMU_WRAPPER_CFG, SC7280_SLAVE_QDSS_CFG,
+ SC7280_SLAVE_QSPI_0, SC7280_SLAVE_QUP_0,
+ SC7280_SLAVE_QUP_1, SC7280_SLAVE_SDCC_1,
+ SC7280_SLAVE_SDCC_2, SC7280_SLAVE_SDCC_4,
+ SC7280_SLAVE_SECURITY, SC7280_SLAVE_TCSR,
+ SC7280_SLAVE_TLMM, SC7280_SLAVE_UFS_MEM_CFG,
+ SC7280_SLAVE_USB2, SC7280_SLAVE_USB3_0,
+ SC7280_SLAVE_VENUS_CFG, SC7280_SLAVE_VSENSE_CTRL_CFG,
+ SC7280_SLAVE_A1NOC_CFG, SC7280_SLAVE_A2NOC_CFG,
+ SC7280_SLAVE_CNOC2_CNOC3, SC7280_SLAVE_CNOC_MNOC_CFG,
+ SC7280_SLAVE_SNOC_CFG },
+};
+
+static struct qcom_icc_node qnm_cnoc2_cnoc3 = {
+ .name = "qnm_cnoc2_cnoc3",
+ .id = SC7280_MASTER_CNOC2_CNOC3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 9,
+ .links = { SC7280_SLAVE_AOSS, SC7280_SLAVE_APPSS,
+ SC7280_SLAVE_CNOC_A2NOC, SC7280_SLAVE_DDRSS_CFG,
+ SC7280_SLAVE_BOOT_IMEM, SC7280_SLAVE_IMEM,
+ SC7280_SLAVE_PIMEM, SC7280_SLAVE_QDSS_STM,
+ SC7280_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = SC7280_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 9,
+ .links = { SC7280_SLAVE_AOSS, SC7280_SLAVE_APPSS,
+ SC7280_SLAVE_CNOC3_CNOC2, SC7280_SLAVE_DDRSS_CFG,
+ SC7280_SLAVE_BOOT_IMEM, SC7280_SLAVE_IMEM,
+ SC7280_SLAVE_PIMEM, SC7280_SLAVE_QDSS_STM,
+ SC7280_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SC7280_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC7280_SLAVE_PCIE_0, SC7280_SLAVE_PCIE_1 },
+};
+
+static struct qcom_icc_node qnm_cnoc_dc_noc = {
+ .name = "qnm_cnoc_dc_noc",
+ .id = SC7280_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SC7280_SLAVE_LLCC_CFG, SC7280_SLAVE_GEM_NOC_CFG },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = SC7280_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SC7280_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SC7280_MASTER_APPSS_PROC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC,
+ SC7280_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_cmpnoc = {
+ .name = "qnm_cmpnoc",
+ .id = SC7280_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cfg = {
+ .name = "qnm_gemnoc_cfg",
+ .id = SC7280_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 5,
+ .links = { SC7280_SLAVE_MSS_PROC_MS_MPU_CFG, SC7280_SLAVE_MCDMA_MS_MPU_CFG,
+ SC7280_SLAVE_SERVICE_GEM_NOC_1, SC7280_SLAVE_SERVICE_GEM_NOC_2,
+ SC7280_SLAVE_SERVICE_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SC7280_MASTER_GFX3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SC7280_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SC7280_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SC7280_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SC7280_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SC7280_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC,
+ SC7280_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qhm_config_noc = {
+ .name = "qhm_config_noc",
+ .id = SC7280_MASTER_CNOC_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 6,
+ .links = { SC7280_SLAVE_LPASS_CORE_CFG, SC7280_SLAVE_LPASS_LPI_CFG,
+ SC7280_SLAVE_LPASS_MPU_CFG, SC7280_SLAVE_LPASS_TOP_CFG,
+ SC7280_SLAVE_SERVICES_LPASS_AML_NOC, SC7280_SLAVE_SERVICE_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SC7280_MASTER_LLCC,
+ .channels = 2,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_mnoc_cfg = {
+ .name = "qnm_mnoc_cfg",
+ .id = SC7280_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_video0 = {
+ .name = "qnm_video0",
+ .id = SC7280_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cpu = {
+ .name = "qnm_video_cpu",
+ .id = SC7280_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf = {
+ .name = "qxm_camnoc_hf",
+ .id = SC7280_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_icp = {
+ .name = "qxm_camnoc_icp",
+ .id = SC7280_MASTER_CAMNOC_ICP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf = {
+ .name = "qxm_camnoc_sf",
+ .id = SC7280_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp0 = {
+ .name = "qxm_mdp0",
+ .id = SC7280_MASTER_MDP0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_nsp_noc_config = {
+ .name = "qhm_nsp_noc_config",
+ .id = SC7280_MASTER_CDSP_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_SERVICE_NSP_NOC },
+};
+
+static struct qcom_icc_node qxm_nsp = {
+ .name = "qxm_nsp",
+ .id = SC7280_MASTER_CDSP_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SC7280_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SC7280_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_snoc_cfg = {
+ .name = "qnm_snoc_cfg",
+ .id = SC7280_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SC7280_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SC7280_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SC7280_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7280_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SC7280_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SC7280_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7280_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+ .name = "qns_pcie_mem_noc",
+ .id = SC7280_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7280_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SC7280_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SC7280_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = SC7280_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SC7280_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = SC7280_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SC7280_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SC7280_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_compute_cfg = {
+ .name = "qhs_compute_cfg",
+ .id = SC7280_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_MASTER_CDSP_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SC7280_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = SC7280_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SC7280_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cx_rdpm = {
+ .name = "qhs_cx_rdpm",
+ .id = SC7280_SLAVE_CX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_dcc_cfg = {
+ .name = "qhs_dcc_cfg",
+ .id = SC7280_SLAVE_DCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SC7280_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SC7280_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_hwkm = {
+ .name = "qhs_hwkm",
+ .id = SC7280_SLAVE_HWKM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SC7280_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SC7280_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SC7280_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_cfg = {
+ .name = "qhs_lpass_cfg",
+ .id = SC7280_SLAVE_LPASS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_MASTER_CNOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SC7280_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mx_rdpm = {
+ .name = "qhs_mx_rdpm",
+ .id = SC7280_SLAVE_MX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SC7280_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = SC7280_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SC7280_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SC7280_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pka_wrapper_cfg = {
+ .name = "qhs_pka_wrapper_cfg",
+ .id = SC7280_SLAVE_PKA_WRAPPER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pmu_wrapper_cfg = {
+ .name = "qhs_pmu_wrapper_cfg",
+ .id = SC7280_SLAVE_PMU_WRAPPER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SC7280_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SC7280_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = SC7280_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SC7280_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc1 = {
+ .name = "qhs_sdc1",
+ .id = SC7280_SLAVE_SDCC_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SC7280_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SC7280_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_security = {
+ .name = "qhs_security",
+ .id = SC7280_SLAVE_SECURITY,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SC7280_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SC7280_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SC7280_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb2 = {
+ .name = "qhs_usb2",
+ .id = SC7280_SLAVE_USB2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SC7280_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SC7280_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SC7280_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_a1_noc_cfg = {
+ .name = "qns_a1_noc_cfg",
+ .id = SC7280_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qns_a2_noc_cfg = {
+ .name = "qns_a2_noc_cfg",
+ .id = SC7280_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qns_cnoc2_cnoc3 = {
+ .name = "qns_cnoc2_cnoc3",
+ .id = SC7280_SLAVE_CNOC2_CNOC3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_MASTER_CNOC2_CNOC3 },
+};
+
+static struct qcom_icc_node qns_mnoc_cfg = {
+ .name = "qns_mnoc_cfg",
+ .id = SC7280_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qns_snoc_cfg = {
+ .name = "qns_snoc_cfg",
+ .id = SC7280_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SC7280_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SC7280_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_cnoc3_cnoc2 = {
+ .name = "qns_cnoc3_cnoc2",
+ .id = SC7280_SLAVE_CNOC3_CNOC2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_MASTER_CNOC3_CNOC2 },
+};
+
+static struct qcom_icc_node qns_cnoc_a2noc = {
+ .name = "qns_cnoc_a2noc",
+ .id = SC7280_SLAVE_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_MASTER_CNOC_A2NOC },
+};
+
+static struct qcom_icc_node qns_ddrss_cfg = {
+ .name = "qns_ddrss_cfg",
+ .id = SC7280_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qxs_boot_imem = {
+ .name = "qxs_boot_imem",
+ .id = SC7280_SLAVE_BOOT_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SC7280_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SC7280_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = SC7280_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = SC7280_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SC7280_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SC7280_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SC7280_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gemnoc = {
+ .name = "qns_gemnoc",
+ .id = SC7280_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7280_MASTER_GEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
+ .name = "qhs_mdsp_ms_mpu_cfg",
+ .id = SC7280_SLAVE_MSS_PROC_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_modem_ms_mpu_cfg = {
+ .name = "qhs_modem_ms_mpu_cfg",
+ .id = SC7280_SLAVE_MCDMA_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = SC7280_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7280_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SC7280_SLAVE_LLCC,
+ .channels = 2,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7280_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = SC7280_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node srvc_even_gemnoc = {
+ .name = "srvc_even_gemnoc",
+ .id = SC7280_SLAVE_SERVICE_GEM_NOC_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_odd_gemnoc = {
+ .name = "srvc_odd_gemnoc",
+ .id = SC7280_SLAVE_SERVICE_GEM_NOC_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_sys_gemnoc = {
+ .name = "srvc_sys_gemnoc",
+ .id = SC7280_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_core = {
+ .name = "qhs_lpass_core",
+ .id = SC7280_SLAVE_LPASS_CORE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_lpi = {
+ .name = "qhs_lpass_lpi",
+ .id = SC7280_SLAVE_LPASS_LPI_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_mpu = {
+ .name = "qhs_lpass_mpu",
+ .id = SC7280_SLAVE_LPASS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_top = {
+ .name = "qhs_lpass_top",
+ .id = SC7280_SLAVE_LPASS_TOP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_niu_aml_noc = {
+ .name = "srvc_niu_aml_noc",
+ .id = SC7280_SLAVE_SERVICES_LPASS_AML_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_niu_lpass_agnoc = {
+ .name = "srvc_niu_lpass_agnoc",
+ .id = SC7280_SLAVE_SERVICE_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SC7280_SLAVE_EBI1,
+ .channels = 2,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SC7280_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7280_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SC7280_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7280_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SC7280_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .id = SC7280_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7280_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node service_nsp_noc = {
+ .name = "service_nsp_noc",
+ .id = SC7280_SLAVE_SERVICE_NSP_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SC7280_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7280_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SC7280_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7280_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SC7280_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 2,
+ .nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .num_nodes = 47,
+ .nodes = { &qnm_cnoc3_cnoc2, &xm_qdss_dap,
+ &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_compute_cfg, &qhs_cpr_cx,
+ &qhs_cpr_mx, &qhs_crypto0_cfg,
+ &qhs_cx_rdpm, &qhs_dcc_cfg,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_hwkm, &qhs_imem_cfg,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_mss_cfg, &qhs_mx_rdpm,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pimem_cfg, &qhs_pka_wrapper_cfg,
+ &qhs_pmu_wrapper_cfg, &qhs_qdss_cfg,
+ &qhs_qup0, &qhs_qup1,
+ &qhs_security, &qhs_tcsr,
+ &qhs_tlmm, &qhs_ufs_mem_cfg, &qhs_usb2,
+ &qhs_usb3_0, &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg, &qns_a1_noc_cfg,
+ &qns_a2_noc_cfg, &qns_cnoc2_cnoc3,
+ &qns_mnoc_cfg, &qns_snoc_cfg,
+ &qnm_cnoc2_cnoc3, &qhs_aoss,
+ &qhs_apss, &qns_cnoc3_cnoc2,
+ &qns_cnoc_a2noc, &qns_ddrss_cfg },
+};
+
+static struct qcom_icc_bcm bcm_cn2 = {
+ .name = "CN2",
+ .num_nodes = 6,
+ .nodes = { &qhs_lpass_cfg, &qhs_pdm,
+ &qhs_qspi, &qhs_sdc1,
+ &qhs_sdc2, &qhs_sdc4 },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .num_nodes = 1,
+ .nodes = { &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_co3 = {
+ .name = "CO3",
+ .num_nodes = 1,
+ .nodes = { &qxm_nsp },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .num_nodes = 2,
+ .nodes = { &qxm_camnoc_hf, &qxm_mdp0 },
+};
+
+static struct qcom_icc_bcm bcm_mm4 = {
+ .name = "MM4",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_mm5 = {
+ .name = "MM5",
+ .num_nodes = 3,
+ .nodes = { &qnm_video0, &qxm_camnoc_icp,
+ &qxm_camnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .num_nodes = 2,
+ .nodes = { &alm_gpu_tcu, &alm_sys_tcu },
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .num_nodes = 1,
+ .nodes = { &qnm_cmpnoc },
+};
+
+static struct qcom_icc_bcm bcm_sh4 = {
+ .name = "SH4",
+ .num_nodes = 1,
+ .nodes = { &chm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 1,
+ .nodes = { &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .num_nodes = 1,
+ .nodes = { &xm_pcie3_0 },
+};
+
+static struct qcom_icc_bcm bcm_sn6 = {
+ .name = "SN6",
+ .num_nodes = 1,
+ .nodes = { &xm_pcie3_1 },
+};
+
+static struct qcom_icc_bcm bcm_sn7 = {
+ .name = "SN7",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn8 = {
+ .name = "SN8",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn14 = {
+ .name = "SN14",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+ &bcm_sn5,
+ &bcm_sn6,
+ &bcm_sn14,
+};
+
+static struct qcom_icc_node *aggre1_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
+ [MASTER_PCIE_0] = &xm_pcie3_0,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [MASTER_SDCC_1] = &xm_sdc1,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB2] = &xm_usb2,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static struct qcom_icc_desc sc7280_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node *aggre2_noc_nodes[] = {
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
+ [MASTER_CNOC_A2NOC] = &qnm_cnoc_datapath,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static struct qcom_icc_desc sc7280_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm *clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+};
+
+static struct qcom_icc_node *clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+};
+
+static struct qcom_icc_desc sc7280_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm *cnoc2_bcms[] = {
+ &bcm_cn1,
+ &bcm_cn2,
+};
+
+static struct qcom_icc_node *cnoc2_nodes[] = {
+ [MASTER_CNOC3_CNOC2] = &qnm_cnoc3_cnoc2,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_HWKM] = &qhs_hwkm,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_LPASS] = &qhs_lpass_cfg,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_MX_RDPM] = &qhs_mx_rdpm,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PKA_WRAPPER_CFG] = &qhs_pka_wrapper_cfg,
+ [SLAVE_PMU_WRAPPER_CFG] = &qhs_pmu_wrapper_cfg,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_SDCC_1] = &qhs_sdc1,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SECURITY] = &qhs_security,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB2] = &qhs_usb2,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg,
+ [SLAVE_CNOC2_CNOC3] = &qns_cnoc2_cnoc3,
+ [SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg,
+ [SLAVE_SNOC_CFG] = &qns_snoc_cfg,
+};
+
+static struct qcom_icc_desc sc7280_cnoc2 = {
+ .nodes = cnoc2_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc2_nodes),
+ .bcms = cnoc2_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc2_bcms),
+};
+
+static struct qcom_icc_bcm *cnoc3_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+ &bcm_sn3,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node *cnoc3_nodes[] = {
+ [MASTER_CNOC2_CNOC3] = &qnm_cnoc2_cnoc3,
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_CNOC3_CNOC2] = &qns_cnoc3_cnoc2,
+ [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
+ [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
+ [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_desc sc7280_cnoc3 = {
+ .nodes = cnoc3_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc3_nodes),
+ .bcms = cnoc3_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc3_bcms),
+};
+
+static struct qcom_icc_bcm *dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
+};
+
+static struct qcom_icc_desc sc7280_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm *gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh2,
+ &bcm_sh3,
+ &bcm_sh4,
+};
+
+static struct qcom_icc_node *gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
+ [MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_MCDMA_MS_MPU_CFG] = &qhs_modem_ms_mpu_cfg,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+ [SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
+};
+
+static struct qcom_icc_desc sc7280_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+ [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
+ [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
+ [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
+ [SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
+ [SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
+ [SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
+ [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
+};
+
+static struct qcom_icc_desc sc7280_lpass_ag_noc = {
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+ .bcms = lpass_ag_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+};
+
+static struct qcom_icc_bcm *mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node *mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static struct qcom_icc_desc sc7280_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm4,
+ &bcm_mm5,
+};
+
+static struct qcom_icc_node *mmss_noc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
+ [MASTER_VIDEO_P0] = &qnm_video0,
+ [MASTER_VIDEO_PROC] = &qnm_video_cpu,
+ [MASTER_CAMNOC_HF] = &qxm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qxm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_MDP0] = &qxm_mdp0,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static struct qcom_icc_desc sc7280_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+ &bcm_co0,
+ &bcm_co3,
+};
+
+static struct qcom_icc_node *nsp_noc_nodes[] = {
+ [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
+ [MASTER_CDSP_PROC] = &qxm_nsp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+ [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
+};
+
+static struct qcom_icc_desc sc7280_nsp_noc = {
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn2,
+ &bcm_sn7,
+ &bcm_sn8,
+};
+
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_SNOC_CFG] = &qnm_snoc_cfg,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+};
+
+static struct qcom_icc_desc sc7280_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate_extended = qcom_icc_xlate_extended;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter))
+ return PTR_ERR(qp->voter);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ if (!qnodes[i])
+ continue;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sc7280-aggre1-noc",
+ .data = &sc7280_aggre1_noc},
+ { .compatible = "qcom,sc7280-aggre2-noc",
+ .data = &sc7280_aggre2_noc},
+ { .compatible = "qcom,sc7280-clk-virt",
+ .data = &sc7280_clk_virt},
+ { .compatible = "qcom,sc7280-cnoc2",
+ .data = &sc7280_cnoc2},
+ { .compatible = "qcom,sc7280-cnoc3",
+ .data = &sc7280_cnoc3},
+ { .compatible = "qcom,sc7280-dc-noc",
+ .data = &sc7280_dc_noc},
+ { .compatible = "qcom,sc7280-gem-noc",
+ .data = &sc7280_gem_noc},
+ { .compatible = "qcom,sc7280-lpass-ag-noc",
+ .data = &sc7280_lpass_ag_noc},
+ { .compatible = "qcom,sc7280-mc-virt",
+ .data = &sc7280_mc_virt},
+ { .compatible = "qcom,sc7280-mmss-noc",
+ .data = &sc7280_mmss_noc},
+ { .compatible = "qcom,sc7280-nsp-noc",
+ .data = &sc7280_nsp_noc},
+ { .compatible = "qcom,sc7280-system-noc",
+ .data = &sc7280_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sc7280",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("SC7280 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sc7280.h b/drivers/interconnect/qcom/sc7280.h
new file mode 100644
index 000000000000..175e400305c5
--- /dev/null
+++ b/drivers/interconnect/qcom/sc7280.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm #define SC7280 interconnect IDs
+ *
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SC7280_H
+#define __DRIVERS_INTERCONNECT_QCOM_SC7280_H
+
+#define SC7280_MASTER_GPU_TCU 0
+#define SC7280_MASTER_SYS_TCU 1
+#define SC7280_MASTER_APPSS_PROC 2
+#define SC7280_MASTER_LLCC 3
+#define SC7280_MASTER_CNOC_LPASS_AG_NOC 4
+#define SC7280_MASTER_CDSP_NOC_CFG 5
+#define SC7280_MASTER_QDSS_BAM 6
+#define SC7280_MASTER_QSPI_0 7
+#define SC7280_MASTER_QUP_0 8
+#define SC7280_MASTER_QUP_1 9
+#define SC7280_MASTER_A1NOC_CFG 10
+#define SC7280_MASTER_A2NOC_CFG 11
+#define SC7280_MASTER_A1NOC_SNOC 12
+#define SC7280_MASTER_A2NOC_SNOC 13
+#define SC7280_MASTER_COMPUTE_NOC 14
+#define SC7280_MASTER_CNOC2_CNOC3 15
+#define SC7280_MASTER_CNOC3_CNOC2 16
+#define SC7280_MASTER_CNOC_A2NOC 17
+#define SC7280_MASTER_CNOC_DC_NOC 18
+#define SC7280_MASTER_GEM_NOC_CFG 19
+#define SC7280_MASTER_GEM_NOC_CNOC 20
+#define SC7280_MASTER_GEM_NOC_PCIE_SNOC 21
+#define SC7280_MASTER_GFX3D 22
+#define SC7280_MASTER_CNOC_MNOC_CFG 23
+#define SC7280_MASTER_MNOC_HF_MEM_NOC 24
+#define SC7280_MASTER_MNOC_SF_MEM_NOC 25
+#define SC7280_MASTER_ANOC_PCIE_GEM_NOC 26
+#define SC7280_MASTER_SNOC_CFG 27
+#define SC7280_MASTER_SNOC_GC_MEM_NOC 28
+#define SC7280_MASTER_SNOC_SF_MEM_NOC 29
+#define SC7280_MASTER_VIDEO_P0 30
+#define SC7280_MASTER_VIDEO_PROC 31
+#define SC7280_MASTER_QUP_CORE_0 32
+#define SC7280_MASTER_QUP_CORE_1 33
+#define SC7280_MASTER_CAMNOC_HF 34
+#define SC7280_MASTER_CAMNOC_ICP 35
+#define SC7280_MASTER_CAMNOC_SF 36
+#define SC7280_MASTER_CRYPTO 37
+#define SC7280_MASTER_IPA 38
+#define SC7280_MASTER_MDP0 39
+#define SC7280_MASTER_CDSP_PROC 40
+#define SC7280_MASTER_PIMEM 41
+#define SC7280_MASTER_GIC 42
+#define SC7280_MASTER_PCIE_0 43
+#define SC7280_MASTER_PCIE_1 44
+#define SC7280_MASTER_QDSS_DAP 45
+#define SC7280_MASTER_QDSS_ETR 46
+#define SC7280_MASTER_SDCC_1 47
+#define SC7280_MASTER_SDCC_2 48
+#define SC7280_MASTER_SDCC_4 49
+#define SC7280_MASTER_UFS_MEM 50
+#define SC7280_MASTER_USB2 51
+#define SC7280_MASTER_USB3_0 52
+#define SC7280_SLAVE_EBI1 53
+#define SC7280_SLAVE_AHB2PHY_SOUTH 54
+#define SC7280_SLAVE_AHB2PHY_NORTH 55
+#define SC7280_SLAVE_AOSS 56
+#define SC7280_SLAVE_APPSS 57
+#define SC7280_SLAVE_CAMERA_CFG 58
+#define SC7280_SLAVE_CLK_CTL 59
+#define SC7280_SLAVE_CDSP_CFG 60
+#define SC7280_SLAVE_RBCPR_CX_CFG 61
+#define SC7280_SLAVE_RBCPR_MX_CFG 62
+#define SC7280_SLAVE_CRYPTO_0_CFG 63
+#define SC7280_SLAVE_CX_RDPM 64
+#define SC7280_SLAVE_DCC_CFG 65
+#define SC7280_SLAVE_DISPLAY_CFG 66
+#define SC7280_SLAVE_GFX3D_CFG 67
+#define SC7280_SLAVE_HWKM 68
+#define SC7280_SLAVE_IMEM_CFG 69
+#define SC7280_SLAVE_IPA_CFG 70
+#define SC7280_SLAVE_IPC_ROUTER_CFG 71
+#define SC7280_SLAVE_LLCC_CFG 72
+#define SC7280_SLAVE_LPASS 73
+#define SC7280_SLAVE_LPASS_CORE_CFG 74
+#define SC7280_SLAVE_LPASS_LPI_CFG 75
+#define SC7280_SLAVE_LPASS_MPU_CFG 76
+#define SC7280_SLAVE_LPASS_TOP_CFG 77
+#define SC7280_SLAVE_MSS_PROC_MS_MPU_CFG 78
+#define SC7280_SLAVE_MCDMA_MS_MPU_CFG 79
+#define SC7280_SLAVE_CNOC_MSS 80
+#define SC7280_SLAVE_MX_RDPM 81
+#define SC7280_SLAVE_PCIE_0_CFG 82
+#define SC7280_SLAVE_PCIE_1_CFG 83
+#define SC7280_SLAVE_PDM 84
+#define SC7280_SLAVE_PIMEM_CFG 85
+#define SC7280_SLAVE_PKA_WRAPPER_CFG 86
+#define SC7280_SLAVE_PMU_WRAPPER_CFG 87
+#define SC7280_SLAVE_QDSS_CFG 88
+#define SC7280_SLAVE_QSPI_0 89
+#define SC7280_SLAVE_QUP_0 90
+#define SC7280_SLAVE_QUP_1 91
+#define SC7280_SLAVE_SDCC_1 92
+#define SC7280_SLAVE_SDCC_2 93
+#define SC7280_SLAVE_SDCC_4 94
+#define SC7280_SLAVE_SECURITY 95
+#define SC7280_SLAVE_TCSR 96
+#define SC7280_SLAVE_TLMM 97
+#define SC7280_SLAVE_UFS_MEM_CFG 98
+#define SC7280_SLAVE_USB2 99
+#define SC7280_SLAVE_USB3_0 100
+#define SC7280_SLAVE_VENUS_CFG 101
+#define SC7280_SLAVE_VSENSE_CTRL_CFG 102
+#define SC7280_SLAVE_A1NOC_CFG 103
+#define SC7280_SLAVE_A1NOC_SNOC 104
+#define SC7280_SLAVE_A2NOC_CFG 105
+#define SC7280_SLAVE_A2NOC_SNOC 106
+#define SC7280_SLAVE_CNOC2_CNOC3 107
+#define SC7280_SLAVE_CNOC3_CNOC2 108
+#define SC7280_SLAVE_CNOC_A2NOC 109
+#define SC7280_SLAVE_DDRSS_CFG 110
+#define SC7280_SLAVE_GEM_NOC_CNOC 111
+#define SC7280_SLAVE_GEM_NOC_CFG 112
+#define SC7280_SLAVE_SNOC_GEM_NOC_GC 113
+#define SC7280_SLAVE_SNOC_GEM_NOC_SF 114
+#define SC7280_SLAVE_LLCC 115
+#define SC7280_SLAVE_MNOC_HF_MEM_NOC 116
+#define SC7280_SLAVE_MNOC_SF_MEM_NOC 117
+#define SC7280_SLAVE_CNOC_MNOC_CFG 118
+#define SC7280_SLAVE_CDSP_MEM_NOC 119
+#define SC7280_SLAVE_MEM_NOC_PCIE_SNOC 120
+#define SC7280_SLAVE_ANOC_PCIE_GEM_NOC 121
+#define SC7280_SLAVE_SNOC_CFG 122
+#define SC7280_SLAVE_QUP_CORE_0 123
+#define SC7280_SLAVE_QUP_CORE_1 124
+#define SC7280_SLAVE_BOOT_IMEM 125
+#define SC7280_SLAVE_IMEM 126
+#define SC7280_SLAVE_PIMEM 127
+#define SC7280_SLAVE_SERVICE_NSP_NOC 128
+#define SC7280_SLAVE_SERVICE_A1NOC 129
+#define SC7280_SLAVE_SERVICE_A2NOC 130
+#define SC7280_SLAVE_SERVICE_GEM_NOC_1 131
+#define SC7280_SLAVE_SERVICE_MNOC 132
+#define SC7280_SLAVE_SERVICES_LPASS_AML_NOC 133
+#define SC7280_SLAVE_SERVICE_LPASS_AG_NOC 134
+#define SC7280_SLAVE_SERVICE_GEM_NOC_2 135
+#define SC7280_SLAVE_SERVICE_SNOC 136
+#define SC7280_SLAVE_SERVICE_GEM_NOC 137
+#define SC7280_SLAVE_PCIE_0 138
+#define SC7280_SLAVE_PCIE_1 139
+#define SC7280_SLAVE_QDSS_STM 140
+#define SC7280_SLAVE_TCU 141
+
+#endif
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 1f111b399bca..07b7c25cbed8 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -400,9 +400,11 @@ config HYPERV_IOMMU
config VIRTIO_IOMMU
tristate "Virtio IOMMU driver"
depends on VIRTIO
- depends on ARM64
+ depends on (ARM64 || X86)
select IOMMU_API
+ select IOMMU_DMA
select INTERVAL_TREE
+ select ACPI_VIOT if ACPI
help
Para-virtualised IOMMU driver with virtio.
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 55dd38d814d9..416815a525d6 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -11,8 +11,6 @@
#include "amd_iommu_types.h"
-extern int amd_iommu_init_dma_ops(void);
-extern int amd_iommu_init_passthrough(void);
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_apply_erratum_63(u16 devid);
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index d006724f4dc2..46280e6e1535 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -153,7 +153,8 @@ int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
static bool amd_iommu_detected;
-static bool __initdata amd_iommu_disabled;
+static bool amd_iommu_disabled __initdata;
+static bool amd_iommu_force_enable __initdata;
static int amd_iommu_target_ivhd_type;
u16 amd_iommu_last_bdf; /* largest PCI device id we have
@@ -231,7 +232,6 @@ enum iommu_init_state {
IOMMU_ENABLED,
IOMMU_PCI_INIT,
IOMMU_INTERRUPTS_EN,
- IOMMU_DMA_OPS,
IOMMU_INITIALIZED,
IOMMU_NOT_FOUND,
IOMMU_INIT_ERROR,
@@ -1908,8 +1908,8 @@ static void print_iommu_info(void)
pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
- pci_info(pdev, "Extended features (%#llx):",
- iommu->features);
+ pr_info("Extended features (%#llx):", iommu->features);
+
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
pr_cont(" %s", feat_str[i]);
@@ -2817,7 +2817,7 @@ out:
return ret;
}
-static bool detect_ivrs(void)
+static bool __init detect_ivrs(void)
{
struct acpi_table_header *ivrs_base;
acpi_status status;
@@ -2834,6 +2834,9 @@ static bool detect_ivrs(void)
acpi_put_table(ivrs_base);
+ if (amd_iommu_force_enable)
+ goto out;
+
/* Don't use IOMMU if there is Stoney Ridge graphics */
for (i = 0; i < 32; i++) {
u32 pci_id;
@@ -2845,6 +2848,7 @@ static bool detect_ivrs(void)
}
}
+out:
/* Make sure ACS will be enabled during PCI probe */
pci_request_acs();
@@ -2895,10 +2899,6 @@ static int __init state_next(void)
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
break;
case IOMMU_INTERRUPTS_EN:
- ret = amd_iommu_init_dma_ops();
- init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
- break;
- case IOMMU_DMA_OPS:
init_state = IOMMU_INITIALIZED;
break;
case IOMMU_INITIALIZED:
@@ -3100,6 +3100,8 @@ static int __init parse_amd_iommu_options(char *str)
for (; *str; ++str) {
if (strncmp(str, "fullflush", 9) == 0)
amd_iommu_unmap_flush = true;
+ if (strncmp(str, "force_enable", 12) == 0)
+ amd_iommu_force_enable = true;
if (strncmp(str, "off", 3) == 0)
amd_iommu_disabled = true;
if (strncmp(str, "force_isolation", 15) == 0)
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 3ac42bbdefc6..811a49a95d04 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -30,7 +30,6 @@
#include <linux/msi.h>
#include <linux/irqdomain.h>
#include <linux/percpu.h>
-#include <linux/iova.h>
#include <linux/io-pgtable.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
@@ -1713,7 +1712,7 @@ static void amd_iommu_probe_finalize(struct device *dev)
/* Domains are initialized for this device - have a look what we ended up with */
domain = iommu_get_domain_for_dev(dev);
if (domain->type == IOMMU_DOMAIN_DMA)
- iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
+ iommu_setup_dma_ops(dev, 0, U64_MAX);
else
set_dma_ops(dev, NULL);
}
@@ -1773,13 +1772,22 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_domain_flush_complete(domain);
}
+static void __init amd_iommu_init_dma_ops(void)
+{
+ swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
+
+ if (amd_iommu_unmap_flush)
+ pr_info("IO/TLB flush on unmap enabled\n");
+ else
+ pr_info("Lazy IO/TLB flushing enabled\n");
+ iommu_set_dma_strict(amd_iommu_unmap_flush);
+}
+
int __init amd_iommu_init_api(void)
{
- int ret, err = 0;
+ int err;
- ret = iova_cache_get();
- if (ret)
- return ret;
+ amd_iommu_init_dma_ops();
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
if (err)
@@ -1796,19 +1804,6 @@ int __init amd_iommu_init_api(void)
return 0;
}
-int __init amd_iommu_init_dma_ops(void)
-{
- swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
-
- if (amd_iommu_unmap_flush)
- pr_info("IO/TLB flush on unmap enabled\n");
- else
- pr_info("Lazy IO/TLB flushing enabled\n");
- iommu_set_dma_strict(amd_iommu_unmap_flush);
- return 0;
-
-}
-
/*****************************************************************************
*
* The following functions belong to the exported interface of AMD IOMMU
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index bb251cab61f3..ee66d1f4cb81 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -435,9 +435,13 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
return true;
}
-static bool arm_smmu_iopf_supported(struct arm_smmu_master *master)
+bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
{
- return false;
+ /* We're not keeping track of SIDs in fault events */
+ if (master->num_streams != 1)
+ return false;
+
+ return master->stall_enabled;
}
bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
@@ -445,8 +449,8 @@ bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
return false;
- /* SSID and IOPF support are mandatory for the moment */
- return master->ssid_bits && arm_smmu_iopf_supported(master);
+ /* SSID support is mandatory for the moment */
+ return master->ssid_bits;
}
bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
@@ -459,13 +463,55 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
return enabled;
}
+static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
+{
+ int ret;
+ struct device *dev = master->dev;
+
+ /*
+ * Drivers for devices supporting PRI or stall should enable IOPF first.
+ * Others have device-specific fault handlers and don't need IOPF.
+ */
+ if (!arm_smmu_master_iopf_supported(master))
+ return 0;
+
+ if (!master->iopf_enabled)
+ return -EINVAL;
+
+ ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
+ if (ret)
+ return ret;
+
+ ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+ if (ret) {
+ iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
+ return ret;
+ }
+ return 0;
+}
+
+static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
+{
+ struct device *dev = master->dev;
+
+ if (!master->iopf_enabled)
+ return;
+
+ iommu_unregister_device_fault_handler(dev);
+ iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
+}
+
int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
{
+ int ret;
+
mutex_lock(&sva_lock);
- master->sva_enabled = true;
+ ret = arm_smmu_master_sva_enable_iopf(master);
+ if (!ret)
+ master->sva_enabled = true;
mutex_unlock(&sva_lock);
- return 0;
+ return ret;
}
int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
@@ -476,6 +522,7 @@ int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
mutex_unlock(&sva_lock);
return -EBUSY;
}
+ arm_smmu_master_sva_disable_iopf(master);
master->sva_enabled = false;
mutex_unlock(&sva_lock);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 54b2f27b81d4..235f9bdaeaf2 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -23,7 +23,6 @@
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
@@ -32,6 +31,7 @@
#include <linux/amba/bus.h>
#include "arm-smmu-v3.h"
+#include "../../iommu-sva-lib.h"
static bool disable_bypass = true;
module_param(disable_bypass, bool, 0444);
@@ -313,6 +313,11 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
}
cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp);
break;
+ case CMDQ_OP_RESUME:
+ cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_SID, ent->resume.sid);
+ cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_RESP, ent->resume.resp);
+ cmd[1] |= FIELD_PREP(CMDQ_RESUME_1_STAG, ent->resume.stag);
+ break;
case CMDQ_OP_CMD_SYNC:
if (ent->sync.msiaddr) {
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ);
@@ -352,7 +357,7 @@ static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
{
- static const char *cerror_str[] = {
+ static const char * const cerror_str[] = {
[CMDQ_ERR_CERROR_NONE_IDX] = "No error",
[CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
[CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
@@ -374,6 +379,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
switch (idx) {
case CMDQ_ERR_CERROR_ABT_IDX:
dev_err(smmu->dev, "retrying command fetch\n");
+ return;
case CMDQ_ERR_CERROR_NONE_IDX:
return;
case CMDQ_ERR_CERROR_ATC_INV_IDX:
@@ -876,6 +882,44 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
}
+static int arm_smmu_page_response(struct device *dev,
+ struct iommu_fault_event *unused,
+ struct iommu_page_response *resp)
+{
+ struct arm_smmu_cmdq_ent cmd = {0};
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ int sid = master->streams[0].id;
+
+ if (master->stall_enabled) {
+ cmd.opcode = CMDQ_OP_RESUME;
+ cmd.resume.sid = sid;
+ cmd.resume.stag = resp->grpid;
+ switch (resp->code) {
+ case IOMMU_PAGE_RESP_INVALID:
+ case IOMMU_PAGE_RESP_FAILURE:
+ cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
+ break;
+ case IOMMU_PAGE_RESP_SUCCESS:
+ cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ return -ENODEV;
+ }
+
+ arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
+ /*
+ * Don't send a SYNC, it doesn't do anything for RESUME or PRI_RESP.
+ * RESUME consumption guarantees that the stalled transaction will be
+ * terminated... at some point in the future. PRI_RESP is fire and
+ * forget.
+ */
+
+ return 0;
+}
+
/* Context descriptor manipulation functions */
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
{
@@ -986,7 +1030,6 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
u64 val;
bool cd_live;
__le64 *cdptr;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
return -E2BIG;
@@ -1031,8 +1074,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
CTXDESC_CD_0_V;
- /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
- if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
+ if (smmu_domain->stall_enabled)
val |= CTXDESC_CD_0_S;
}
@@ -1276,7 +1318,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
FIELD_PREP(STRTAB_STE_1_STRW, strw));
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
- !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
+ !master->stall_enabled)
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
@@ -1353,7 +1395,6 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return 0;
}
-__maybe_unused
static struct arm_smmu_master *
arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
{
@@ -1377,18 +1418,118 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
}
/* IRQ and event handlers */
+static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
+{
+ int ret;
+ u32 reason;
+ u32 perm = 0;
+ struct arm_smmu_master *master;
+ bool ssid_valid = evt[0] & EVTQ_0_SSV;
+ u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
+ struct iommu_fault_event fault_evt = { };
+ struct iommu_fault *flt = &fault_evt.fault;
+
+ switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
+ case EVT_ID_TRANSLATION_FAULT:
+ reason = IOMMU_FAULT_REASON_PTE_FETCH;
+ break;
+ case EVT_ID_ADDR_SIZE_FAULT:
+ reason = IOMMU_FAULT_REASON_OOR_ADDRESS;
+ break;
+ case EVT_ID_ACCESS_FAULT:
+ reason = IOMMU_FAULT_REASON_ACCESS;
+ break;
+ case EVT_ID_PERMISSION_FAULT:
+ reason = IOMMU_FAULT_REASON_PERMISSION;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Stage-2 is always pinned at the moment */
+ if (evt[1] & EVTQ_1_S2)
+ return -EFAULT;
+
+ if (evt[1] & EVTQ_1_RnW)
+ perm |= IOMMU_FAULT_PERM_READ;
+ else
+ perm |= IOMMU_FAULT_PERM_WRITE;
+
+ if (evt[1] & EVTQ_1_InD)
+ perm |= IOMMU_FAULT_PERM_EXEC;
+
+ if (evt[1] & EVTQ_1_PnU)
+ perm |= IOMMU_FAULT_PERM_PRIV;
+
+ if (evt[1] & EVTQ_1_STALL) {
+ flt->type = IOMMU_FAULT_PAGE_REQ;
+ flt->prm = (struct iommu_fault_page_request) {
+ .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
+ .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
+ .perm = perm,
+ .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
+ };
+
+ if (ssid_valid) {
+ flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
+ }
+ } else {
+ flt->type = IOMMU_FAULT_DMA_UNRECOV;
+ flt->event = (struct iommu_fault_unrecoverable) {
+ .reason = reason,
+ .flags = IOMMU_FAULT_UNRECOV_ADDR_VALID,
+ .perm = perm,
+ .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
+ };
+
+ if (ssid_valid) {
+ flt->event.flags |= IOMMU_FAULT_UNRECOV_PASID_VALID;
+ flt->event.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
+ }
+ }
+
+ mutex_lock(&smmu->streams_mutex);
+ master = arm_smmu_find_master(smmu, sid);
+ if (!master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = iommu_report_device_fault(master->dev, &fault_evt);
+ if (ret && flt->type == IOMMU_FAULT_PAGE_REQ) {
+ /* Nobody cared, abort the access */
+ struct iommu_page_response resp = {
+ .pasid = flt->prm.pasid,
+ .grpid = flt->prm.grpid,
+ .code = IOMMU_PAGE_RESP_FAILURE,
+ };
+ arm_smmu_page_response(master->dev, &fault_evt, &resp);
+ }
+
+out_unlock:
+ mutex_unlock(&smmu->streams_mutex);
+ return ret;
+}
+
static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
{
- int i;
+ int i, ret;
struct arm_smmu_device *smmu = dev;
struct arm_smmu_queue *q = &smmu->evtq.q;
struct arm_smmu_ll_queue *llq = &q->llq;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
u64 evt[EVTQ_ENT_DWORDS];
do {
while (!queue_remove_raw(q, evt)) {
u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
+ ret = arm_smmu_handle_evt(smmu, evt);
+ if (!ret || !__ratelimit(&rs))
+ continue;
+
dev_info(smmu->dev, "event 0x%02x received:\n", id);
for (i = 0; i < ARRAY_SIZE(evt); ++i)
dev_info(smmu->dev, "\t0x%016llx\n",
@@ -1923,6 +2064,8 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
cfg->s1cdmax = master->ssid_bits;
+ smmu_domain->stall_enabled = master->stall_enabled;
+
ret = arm_smmu_alloc_cd_tables(smmu_domain);
if (ret)
goto out_free_asid;
@@ -2270,6 +2413,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
smmu_domain->s1_cfg.s1cdmax, master->ssid_bits);
ret = -EINVAL;
goto out_unlock;
+ } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
+ smmu_domain->stall_enabled != master->stall_enabled) {
+ dev_err(dev, "cannot attach to stall-%s domain\n",
+ smmu_domain->stall_enabled ? "enabled" : "disabled");
+ ret = -EINVAL;
+ goto out_unlock;
}
master->domain = smmu_domain;
@@ -2508,6 +2657,11 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
master->ssid_bits = min_t(u8, master->ssid_bits,
CTXDESC_LINEAR_CDMAX);
+ if ((smmu->features & ARM_SMMU_FEAT_STALLS &&
+ device_property_read_bool(dev, "dma-can-stall")) ||
+ smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
+ master->stall_enabled = true;
+
return &smmu->iommu;
err_free_master:
@@ -2525,7 +2679,8 @@ static void arm_smmu_release_device(struct device *dev)
return;
master = dev_iommu_priv_get(dev);
- WARN_ON(arm_smmu_master_sva_enabled(master));
+ if (WARN_ON(arm_smmu_master_sva_enabled(master)))
+ iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
arm_smmu_detach_dev(master);
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
@@ -2595,6 +2750,8 @@ static bool arm_smmu_dev_has_feature(struct device *dev,
return false;
switch (feat) {
+ case IOMMU_DEV_FEAT_IOPF:
+ return arm_smmu_master_iopf_supported(master);
case IOMMU_DEV_FEAT_SVA:
return arm_smmu_master_sva_supported(master);
default:
@@ -2611,6 +2768,8 @@ static bool arm_smmu_dev_feature_enabled(struct device *dev,
return false;
switch (feat) {
+ case IOMMU_DEV_FEAT_IOPF:
+ return master->iopf_enabled;
case IOMMU_DEV_FEAT_SVA:
return arm_smmu_master_sva_enabled(master);
default:
@@ -2621,6 +2780,8 @@ static bool arm_smmu_dev_feature_enabled(struct device *dev,
static int arm_smmu_dev_enable_feature(struct device *dev,
enum iommu_dev_features feat)
{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
if (!arm_smmu_dev_has_feature(dev, feat))
return -ENODEV;
@@ -2628,8 +2789,11 @@ static int arm_smmu_dev_enable_feature(struct device *dev,
return -EBUSY;
switch (feat) {
+ case IOMMU_DEV_FEAT_IOPF:
+ master->iopf_enabled = true;
+ return 0;
case IOMMU_DEV_FEAT_SVA:
- return arm_smmu_master_enable_sva(dev_iommu_priv_get(dev));
+ return arm_smmu_master_enable_sva(master);
default:
return -EINVAL;
}
@@ -2638,12 +2802,19 @@ static int arm_smmu_dev_enable_feature(struct device *dev,
static int arm_smmu_dev_disable_feature(struct device *dev,
enum iommu_dev_features feat)
{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
if (!arm_smmu_dev_feature_enabled(dev, feat))
return -EINVAL;
switch (feat) {
+ case IOMMU_DEV_FEAT_IOPF:
+ if (master->sva_enabled)
+ return -EBUSY;
+ master->iopf_enabled = false;
+ return 0;
case IOMMU_DEV_FEAT_SVA:
- return arm_smmu_master_disable_sva(dev_iommu_priv_get(dev));
+ return arm_smmu_master_disable_sva(master);
default:
return -EINVAL;
}
@@ -2673,6 +2844,7 @@ static struct iommu_ops arm_smmu_ops = {
.sva_bind = arm_smmu_sva_bind,
.sva_unbind = arm_smmu_sva_unbind,
.sva_get_pasid = arm_smmu_sva_get_pasid,
+ .page_response = arm_smmu_page_response,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
};
@@ -2771,6 +2943,13 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
if (ret)
return ret;
+ if ((smmu->features & ARM_SMMU_FEAT_SVA) &&
+ (smmu->features & ARM_SMMU_FEAT_STALLS)) {
+ smmu->evtq.iopf = iopf_queue_alloc(dev_name(smmu->dev));
+ if (!smmu->evtq.iopf)
+ return -ENOMEM;
+ }
+
/* priq */
if (!(smmu->features & ARM_SMMU_FEAT_PRI))
return 0;
@@ -2788,10 +2967,8 @@ static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
void *strtab = smmu->strtab_cfg.strtab;
cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
- if (!cfg->l1_desc) {
- dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
+ if (!cfg->l1_desc)
return -ENOMEM;
- }
for (i = 0; i < cfg->num_l1_ents; ++i) {
arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
@@ -3582,10 +3759,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
bool bypass;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
- if (!smmu) {
- dev_err(dev, "failed to allocate arm_smmu_device\n");
+ if (!smmu)
return -ENOMEM;
- }
smmu->dev = dev;
if (dev->of_node) {
@@ -3669,10 +3844,20 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
- return ret;
+ goto err_sysfs_remove;
}
- return arm_smmu_set_bus_ops(&arm_smmu_ops);
+ ret = arm_smmu_set_bus_ops(&arm_smmu_ops);
+ if (ret)
+ goto err_unregister_device;
+
+ return 0;
+
+err_unregister_device:
+ iommu_device_unregister(&smmu->iommu);
+err_sysfs_remove:
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return ret;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
@@ -3683,6 +3868,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_device_disable(smmu);
+ iopf_queue_free(smmu->evtq.iopf);
return 0;
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 46e8c49214a8..4cb136f07914 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -184,6 +184,7 @@
#else
#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1)
#endif
+#define Q_MIN_SZ_SHIFT (PAGE_SHIFT)
/*
* Stream table.
@@ -354,6 +355,13 @@
#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
#define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12)
+#define CMDQ_RESUME_0_RESP_TERM 0UL
+#define CMDQ_RESUME_0_RESP_RETRY 1UL
+#define CMDQ_RESUME_0_RESP_ABORT 2UL
+#define CMDQ_RESUME_0_RESP GENMASK_ULL(13, 12)
+#define CMDQ_RESUME_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_RESUME_1_STAG GENMASK_ULL(15, 0)
+
#define CMDQ_SYNC_0_CS GENMASK_ULL(13, 12)
#define CMDQ_SYNC_0_CS_NONE 0
#define CMDQ_SYNC_0_CS_IRQ 1
@@ -366,14 +374,33 @@
/* Event queue */
#define EVTQ_ENT_SZ_SHIFT 5
#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
-#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
+#define EVTQ_MAX_SZ_SHIFT (Q_MIN_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
#define EVTQ_0_ID GENMASK_ULL(7, 0)
+#define EVT_ID_TRANSLATION_FAULT 0x10
+#define EVT_ID_ADDR_SIZE_FAULT 0x11
+#define EVT_ID_ACCESS_FAULT 0x12
+#define EVT_ID_PERMISSION_FAULT 0x13
+
+#define EVTQ_0_SSV (1UL << 11)
+#define EVTQ_0_SSID GENMASK_ULL(31, 12)
+#define EVTQ_0_SID GENMASK_ULL(63, 32)
+#define EVTQ_1_STAG GENMASK_ULL(15, 0)
+#define EVTQ_1_STALL (1UL << 31)
+#define EVTQ_1_PnU (1UL << 33)
+#define EVTQ_1_InD (1UL << 34)
+#define EVTQ_1_RnW (1UL << 35)
+#define EVTQ_1_S2 (1UL << 39)
+#define EVTQ_1_CLASS GENMASK_ULL(41, 40)
+#define EVTQ_1_TT_READ (1UL << 44)
+#define EVTQ_2_ADDR GENMASK_ULL(63, 0)
+#define EVTQ_3_IPA GENMASK_ULL(51, 12)
+
/* PRI queue */
#define PRIQ_ENT_SZ_SHIFT 4
#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
-#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
+#define PRIQ_MAX_SZ_SHIFT (Q_MIN_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
#define PRIQ_0_SID GENMASK_ULL(31, 0)
#define PRIQ_0_SSID GENMASK_ULL(51, 32)
@@ -462,6 +489,13 @@ struct arm_smmu_cmdq_ent {
enum pri_resp resp;
} pri;
+ #define CMDQ_OP_RESUME 0x44
+ struct {
+ u32 sid;
+ u16 stag;
+ u8 resp;
+ } resume;
+
#define CMDQ_OP_CMD_SYNC 0x46
struct {
u64 msiaddr;
@@ -520,6 +554,7 @@ struct arm_smmu_cmdq_batch {
struct arm_smmu_evtq {
struct arm_smmu_queue q;
+ struct iopf_queue *iopf;
u32 max_stalls;
};
@@ -657,7 +692,9 @@ struct arm_smmu_master {
struct arm_smmu_stream *streams;
unsigned int num_streams;
bool ats_enabled;
+ bool stall_enabled;
bool sva_enabled;
+ bool iopf_enabled;
struct list_head bonds;
unsigned int ssid_bits;
};
@@ -675,6 +712,7 @@ struct arm_smmu_domain {
struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops;
+ bool stall_enabled;
atomic_t nr_ats_masters;
enum arm_smmu_domain_stage stage;
@@ -716,6 +754,7 @@ bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
+bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
struct iommu_sva *arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm,
void *drvdata);
void arm_smmu_sva_unbind(struct iommu_sva *handle);
@@ -747,6 +786,11 @@ static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
return -ENODEV;
}
+static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
+{
+ return false;
+}
+
static inline struct iommu_sva *
arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
{
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index 136872e77195..9f465e146799 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -211,7 +211,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl;
- if (of_device_is_compatible(np, "nvidia,tegra194-smmu"))
+ if (of_device_is_compatible(np, "nvidia,tegra194-smmu") ||
+ of_device_is_compatible(np, "nvidia,tegra186-smmu"))
return nvidia_smmu_impl_init(smmu);
smmu = qcom_smmu_impl_init(smmu);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
index 29117444e5a0..01e9b50b10a1 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -7,6 +7,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <soc/tegra/mc.h>
+
#include "arm-smmu.h"
/*
@@ -15,18 +17,32 @@
* interleaved IOVA accesses across them and translates accesses from
* non-isochronous HW devices.
* Third one is used for translating accesses from isochronous HW devices.
+ *
+ * In addition, the SMMU driver needs to coordinate with the memory controller
+ * driver to ensure that the right SID override is programmed for any given
+ * memory client. This is necessary to allow for use-case such as seamlessly
+ * handing over the display controller configuration from the firmware to the
+ * kernel.
+ *
* This implementation supports programming of the two instances that must
- * be programmed identically.
- * The third instance usage is through standard arm-smmu driver itself and
- * is out of scope of this implementation.
+ * be programmed identically and takes care of invoking the memory controller
+ * driver for SID override programming after devices have been attached to an
+ * SMMU instance.
*/
-#define NUM_SMMU_INSTANCES 2
+#define MAX_SMMU_INSTANCES 2
struct nvidia_smmu {
- struct arm_smmu_device smmu;
- void __iomem *bases[NUM_SMMU_INSTANCES];
+ struct arm_smmu_device smmu;
+ void __iomem *bases[MAX_SMMU_INSTANCES];
+ unsigned int num_instances;
+ struct tegra_mc *mc;
};
+static inline struct nvidia_smmu *to_nvidia_smmu(struct arm_smmu_device *smmu)
+{
+ return container_of(smmu, struct nvidia_smmu, smmu);
+}
+
static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
unsigned int inst, int page)
{
@@ -47,9 +63,10 @@ static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
int page, int offset, u32 val)
{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int i;
- for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ for (i = 0; i < nvidia->num_instances; i++) {
void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
writel_relaxed(val, reg);
@@ -67,9 +84,10 @@ static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu,
static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
int page, int offset, u64 val)
{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int i;
- for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ for (i = 0; i < nvidia->num_instances; i++) {
void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
writeq_relaxed(val, reg);
@@ -79,6 +97,7 @@ static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
int sync, int status)
{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int delay;
arm_smmu_writel(smmu, page, sync, 0);
@@ -90,7 +109,7 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
u32 val = 0;
unsigned int i;
- for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ for (i = 0; i < nvidia->num_instances; i++) {
void __iomem *reg;
reg = nvidia_smmu_page(smmu, i, page) + status;
@@ -112,9 +131,10 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int i;
- for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ for (i = 0; i < nvidia->num_instances; i++) {
u32 val;
void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
ARM_SMMU_GR0_sGFSR;
@@ -157,8 +177,9 @@ static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev)
unsigned int inst;
irqreturn_t ret = IRQ_NONE;
struct arm_smmu_device *smmu = dev;
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
- for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+ for (inst = 0; inst < nvidia->num_instances; inst++) {
irqreturn_t irq_ret;
irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
@@ -202,11 +223,13 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
struct arm_smmu_device *smmu;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain;
+ struct nvidia_smmu *nvidia;
smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
smmu = smmu_domain->smmu;
+ nvidia = to_nvidia_smmu(smmu);
- for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+ for (inst = 0; inst < nvidia->num_instances; inst++) {
irqreturn_t irq_ret;
/*
@@ -224,6 +247,17 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
return ret;
}
+static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct device *dev)
+{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
+ int err;
+
+ err = tegra_mc_probe_device(nvidia->mc, dev);
+ if (err < 0)
+ dev_err(smmu->dev, "memory controller probe failed for %s: %d\n",
+ dev_name(dev), err);
+}
+
static const struct arm_smmu_impl nvidia_smmu_impl = {
.read_reg = nvidia_smmu_read_reg,
.write_reg = nvidia_smmu_write_reg,
@@ -233,6 +267,11 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
.tlb_sync = nvidia_smmu_tlb_sync,
.global_fault = nvidia_smmu_global_fault,
.context_fault = nvidia_smmu_context_fault,
+ .probe_finalize = nvidia_smmu_probe_finalize,
+};
+
+static const struct arm_smmu_impl nvidia_smmu_single_impl = {
+ .probe_finalize = nvidia_smmu_probe_finalize,
};
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
@@ -241,23 +280,36 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
struct device *dev = smmu->dev;
struct nvidia_smmu *nvidia_smmu;
struct platform_device *pdev = to_platform_device(dev);
+ unsigned int i;
nvidia_smmu = devm_krealloc(dev, smmu, sizeof(*nvidia_smmu), GFP_KERNEL);
if (!nvidia_smmu)
return ERR_PTR(-ENOMEM);
+ nvidia_smmu->mc = devm_tegra_memory_controller_get(dev);
+ if (IS_ERR(nvidia_smmu->mc))
+ return ERR_CAST(nvidia_smmu->mc);
+
/* Instance 0 is ioremapped by arm-smmu.c. */
nvidia_smmu->bases[0] = smmu->base;
+ nvidia_smmu->num_instances++;
+
+ for (i = 1; i < MAX_SMMU_INSTANCES; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ break;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
- return ERR_PTR(-ENODEV);
+ nvidia_smmu->bases[i] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nvidia_smmu->bases[i]))
+ return ERR_CAST(nvidia_smmu->bases[i]);
- nvidia_smmu->bases[1] = devm_ioremap_resource(dev, res);
- if (IS_ERR(nvidia_smmu->bases[1]))
- return ERR_CAST(nvidia_smmu->bases[1]);
+ nvidia_smmu->num_instances++;
+ }
- nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
+ if (nvidia_smmu->num_instances == 1)
+ nvidia_smmu->smmu.impl = &nvidia_smmu_single_impl;
+ else
+ nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
return &nvidia_smmu->smmu;
}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 61fc645c1325..9b9d13ec5a88 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -3,6 +3,7 @@
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
+#include <linux/acpi.h>
#include <linux/adreno-smmu-priv.h>
#include <linux/of_device.h>
#include <linux/qcom_scm.h>
@@ -177,6 +178,16 @@ static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_doma
return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
}
+static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
+{
+ const struct device_node *np = smmu->dev->of_node;
+
+ if (of_device_is_compatible(np, "qcom,msm8996-smmu-v2"))
+ return false;
+
+ return true;
+}
+
static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
@@ -191,7 +202,8 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
* be AARCH64 stage 1 but double check because the arm-smmu code assumes
* that is the case when the TTBR1 quirk is enabled
*/
- if ((smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
+ if (qcom_adreno_can_do_ttbr1(smmu_domain->smmu) &&
+ (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
(smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
@@ -216,6 +228,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7180-mss-pil" },
+ { .compatible = "qcom,sc7280-mdss" },
{ .compatible = "qcom,sc8180x-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
@@ -380,24 +393,48 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,msm8998-smmu-v2" },
{ .compatible = "qcom,sc7180-smmu-500" },
+ { .compatible = "qcom,sc7280-smmu-500" },
{ .compatible = "qcom,sc8180x-smmu-500" },
{ .compatible = "qcom,sdm630-smmu-v2" },
{ .compatible = "qcom,sdm845-smmu-500" },
+ { .compatible = "qcom,sm6125-smmu-500" },
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
{ .compatible = "qcom,sm8350-smmu-500" },
{ }
};
+#ifdef CONFIG_ACPI
+static struct acpi_platform_list qcom_acpi_platlist[] = {
+ { "LENOVO", "CB-01 ", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
+ { "QCOM ", "QCOMEDK2", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
+ { }
+};
+#endif
+
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
{
const struct device_node *np = smmu->dev->of_node;
- if (of_match_node(qcom_smmu_impl_of_match, np))
- return qcom_smmu_create(smmu, &qcom_smmu_impl);
+#ifdef CONFIG_ACPI
+ if (np == NULL) {
+ /* Match platform for ACPI boot */
+ if (acpi_match_platform_list(qcom_acpi_platlist) >= 0)
+ return qcom_smmu_create(smmu, &qcom_smmu_impl);
+ }
+#endif
+ /*
+ * Do not change this order of implementation, i.e., first adreno
+ * smmu impl and then apss smmu since we can have both implementing
+ * arm,mmu-500 in which case we will miss setting adreno smmu specific
+ * features if the order is changed.
+ */
if (of_device_is_compatible(np, "qcom,adreno-smmu"))
return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl);
+ if (of_match_node(qcom_smmu_impl_of_match, np))
+ return qcom_smmu_create(smmu, &qcom_smmu_impl);
+
return smmu;
}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index b4b32d31fc06..f22dbeb1e510 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -31,7 +31,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -74,7 +73,7 @@ static bool using_legacy_binding, using_generic_binding;
static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
{
if (pm_runtime_enabled(smmu->dev))
- return pm_runtime_get_sync(smmu->dev);
+ return pm_runtime_resume_and_get(smmu->dev);
return 0;
}
@@ -1276,6 +1275,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
u64 phys;
unsigned long va, flags;
int ret, idx = cfg->cbndx;
+ phys_addr_t addr = 0;
ret = arm_smmu_rpm_get(smmu);
if (ret < 0)
@@ -1295,6 +1295,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
dev_err(dev,
"iova to phys timed out on %pad. Falling back to software table walk.\n",
&iova);
+ arm_smmu_rpm_put(smmu);
return ops->iova_to_phys(ops, iova);
}
@@ -1303,12 +1304,14 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
if (phys & ARM_SMMU_CB_PAR_F) {
dev_err(dev, "translation fault!\n");
dev_err(dev, "PAR = 0x%llx\n", phys);
- return 0;
+ goto out;
}
+ addr = (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
+out:
arm_smmu_rpm_put(smmu);
- return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
+ return addr;
}
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
@@ -1455,6 +1458,18 @@ static void arm_smmu_release_device(struct device *dev)
iommu_fwspec_free(dev);
}
+static void arm_smmu_probe_finalize(struct device *dev)
+{
+ struct arm_smmu_master_cfg *cfg;
+ struct arm_smmu_device *smmu;
+
+ cfg = dev_iommu_priv_get(dev);
+ smmu = cfg->smmu;
+
+ if (smmu->impl && smmu->impl->probe_finalize)
+ smmu->impl->probe_finalize(smmu, dev);
+}
+
static struct iommu_group *arm_smmu_device_group(struct device *dev)
{
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
@@ -1574,6 +1589,7 @@ static struct iommu_ops arm_smmu_ops = {
.iova_to_phys = arm_smmu_iova_to_phys,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
+ .probe_finalize = arm_smmu_probe_finalize,
.device_group = arm_smmu_device_group,
.enable_nesting = arm_smmu_enable_nesting,
.set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
@@ -2169,7 +2185,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (err) {
dev_err(dev, "Failed to register iommu\n");
- return err;
+ goto err_sysfs_remove;
}
platform_set_drvdata(pdev, smmu);
@@ -2192,10 +2208,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
* any device which might need it, so we want the bus ops in place
* ready to handle default domain setup as soon as any SMMU exists.
*/
- if (!using_legacy_binding)
- return arm_smmu_bus_init(&arm_smmu_ops);
+ if (!using_legacy_binding) {
+ err = arm_smmu_bus_init(&arm_smmu_ops);
+ if (err)
+ goto err_unregister_device;
+ }
return 0;
+
+err_unregister_device:
+ iommu_device_unregister(&smmu->iommu);
+err_sysfs_remove:
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return err;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 84c21c4b0691..a50271595960 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -441,6 +441,7 @@ struct arm_smmu_impl {
struct device *dev, int start);
void (*write_s2cr)(struct arm_smmu_device *smmu, int idx);
void (*write_sctlr)(struct arm_smmu_device *smmu, int idx, u32 reg);
+ void (*probe_finalize)(struct arm_smmu_device *smmu, struct device *dev);
};
#define INVALID_SMENDX -1
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 4294abe389b2..021cf8f65ffc 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -25,7 +25,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_iommu.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 7bcdd1205535..6f0df629353f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -243,9 +243,11 @@ resv_iova:
lo = iova_pfn(iovad, start);
hi = iova_pfn(iovad, end);
reserve_iova(iovad, lo, hi);
- } else {
+ } else if (end < start) {
/* dma_ranges list should be sorted */
- dev_err(&dev->dev, "Failed to reserve IOVA\n");
+ dev_err(&dev->dev,
+ "Failed to reserve IOVA [%pa-%pa]\n",
+ &start, &end);
return -EINVAL;
}
@@ -319,16 +321,16 @@ static bool dev_is_untrusted(struct device *dev)
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
* @base: IOVA at which the mappable address space starts
- * @size: Size of IOVA space
+ * @limit: Last address of the IOVA space
* @dev: Device the domain is being initialised for
*
- * @base and @size should be exact multiples of IOMMU page granularity to
+ * @base and @limit + 1 should be exact multiples of IOMMU page granularity to
* avoid rounding surprises. If necessary, we reserve the page at address 0
* to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
* any change which could make prior IOVAs invalid will fail.
*/
static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
- u64 size, struct device *dev)
+ dma_addr_t limit, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long order, base_pfn;
@@ -346,7 +348,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
/* Check the domain allows at least some access to the device... */
if (domain->geometry.force_aperture) {
if (base > domain->geometry.aperture_end ||
- base + size <= domain->geometry.aperture_start) {
+ limit < domain->geometry.aperture_start) {
pr_warn("specified DMA range outside IOMMU capability\n");
return -EFAULT;
}
@@ -766,6 +768,7 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
__iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
__iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
sg_free_table(&sh->sgt);
+ kfree(sh);
}
#endif /* CONFIG_DMA_REMAP */
@@ -1308,7 +1311,7 @@ static const struct dma_map_ops iommu_dma_ops = {
* The IOMMU core code allocates the default DMA domain, which the underlying
* IOMMU driver needs to support via the dma-iommu layer.
*/
-void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
+void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@@ -1320,7 +1323,7 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
* underlying IOMMU driver needs to support via the dma-iommu layer.
*/
if (domain->type == IOMMU_DOMAIN_DMA) {
- if (iommu_dma_init_domain(domain, dma_base, size, dev))
+ if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
goto out_err;
dev->dma_ops = &iommu_dma_ops;
}
@@ -1330,6 +1333,7 @@ out_err:
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
dev_name(dev));
}
+EXPORT_SYMBOL_GPL(iommu_setup_dma_ops);
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
phys_addr_t msi_addr, struct iommu_domain *domain)
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 7623d8c371f5..d0fbf1d10e18 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -17,7 +17,6 @@
#include <linux/kmemleak.h>
#include <linux/list.h>
#include <linux/of.h>
-#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 28a3d1596c76..43ebd8af11c5 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -3,6 +3,9 @@
config DMAR_TABLE
bool
+config DMAR_PERF
+ bool
+
config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64)
@@ -14,6 +17,7 @@ config INTEL_IOMMU
select SWIOTLB
select IOASID
select IOMMU_DMA
+ select PCI_ATS
help
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
@@ -24,6 +28,7 @@ config INTEL_IOMMU
config INTEL_IOMMU_DEBUGFS
bool "Export Intel IOMMU internals in Debugfs"
depends on INTEL_IOMMU && IOMMU_DEBUGFS
+ select DMAR_PERF
help
!!!WARNING!!!
@@ -41,6 +46,7 @@ config INTEL_IOMMU_SVM
select PCI_PRI
select MMU_NOTIFIER
select IOASID
+ select IOMMU_SVA_LIB
help
Shared Virtual Memory (SVM) provides a facility for devices
to access DMA resources through process address space by
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index ae236ec7d219..fa0dae16441c 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o
obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
+obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index efea7f02abd9..62e23ff3c987 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -16,6 +16,7 @@
#include <asm/irq_remapping.h>
#include "pasid.h"
+#include "perf.h"
struct tbl_walk {
u16 bus;
@@ -31,6 +32,9 @@ struct iommu_regset {
const char *regs;
};
+#define DEBUG_BUFFER_SIZE 1024
+static char debug_buf[DEBUG_BUFFER_SIZE];
+
#define IOMMU_REGSET_ENTRY(_reg_) \
{ DMAR_##_reg_##_REG, __stringify(_reg_) }
@@ -538,6 +542,111 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
#endif
+static void latency_show_one(struct seq_file *m, struct intel_iommu *iommu,
+ struct dmar_drhd_unit *drhd)
+{
+ int ret;
+
+ seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
+ iommu->name, drhd->reg_base_addr);
+
+ ret = dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
+ if (ret < 0)
+ seq_puts(m, "Failed to get latency snapshot");
+ else
+ seq_puts(m, debug_buf);
+ seq_puts(m, "\n");
+}
+
+static int latency_show(struct seq_file *m, void *v)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd)
+ latency_show_one(m, iommu, drhd);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int dmar_perf_latency_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, latency_show, NULL);
+}
+
+static ssize_t dmar_perf_latency_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ int counting;
+ char buf[64];
+
+ if (cnt > 63)
+ cnt = 63;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ if (kstrtoint(buf, 0, &counting))
+ return -EINVAL;
+
+ switch (counting) {
+ case 0:
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ dmar_latency_disable(iommu, DMAR_LATENCY_INV_IOTLB);
+ dmar_latency_disable(iommu, DMAR_LATENCY_INV_DEVTLB);
+ dmar_latency_disable(iommu, DMAR_LATENCY_INV_IEC);
+ dmar_latency_disable(iommu, DMAR_LATENCY_PRQ);
+ }
+ rcu_read_unlock();
+ break;
+ case 1:
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd)
+ dmar_latency_enable(iommu, DMAR_LATENCY_INV_IOTLB);
+ rcu_read_unlock();
+ break;
+ case 2:
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd)
+ dmar_latency_enable(iommu, DMAR_LATENCY_INV_DEVTLB);
+ rcu_read_unlock();
+ break;
+ case 3:
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd)
+ dmar_latency_enable(iommu, DMAR_LATENCY_INV_IEC);
+ rcu_read_unlock();
+ break;
+ case 4:
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd)
+ dmar_latency_enable(iommu, DMAR_LATENCY_PRQ);
+ rcu_read_unlock();
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static const struct file_operations dmar_perf_latency_fops = {
+ .open = dmar_perf_latency_open,
+ .write = dmar_perf_latency_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void __init intel_iommu_debugfs_init(void)
{
struct dentry *intel_iommu_debug = debugfs_create_dir("intel",
@@ -556,4 +665,6 @@ void __init intel_iommu_debugfs_init(void)
debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
NULL, &ir_translation_struct_fops);
#endif
+ debugfs_create_file("dmar_perf_latency", 0644, intel_iommu_debug,
+ NULL, &dmar_perf_latency_fops);
}
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 84057cb9596c..d66f79acd14d 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -34,6 +34,7 @@
#include <trace/events/intel_iommu.h>
#include "../irq_remapping.h"
+#include "perf.h"
typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
struct dmar_res_callback {
@@ -1342,15 +1343,33 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
unsigned int count, unsigned long options)
{
struct q_inval *qi = iommu->qi;
+ s64 devtlb_start_ktime = 0;
+ s64 iotlb_start_ktime = 0;
+ s64 iec_start_ktime = 0;
struct qi_desc wait_desc;
int wait_index, index;
unsigned long flags;
int offset, shift;
int rc, i;
+ u64 type;
if (!qi)
return 0;
+ type = desc->qw0 & GENMASK_ULL(3, 0);
+
+ if ((type == QI_IOTLB_TYPE || type == QI_EIOTLB_TYPE) &&
+ dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IOTLB))
+ iotlb_start_ktime = ktime_to_ns(ktime_get());
+
+ if ((type == QI_DIOTLB_TYPE || type == QI_DEIOTLB_TYPE) &&
+ dmar_latency_enabled(iommu, DMAR_LATENCY_INV_DEVTLB))
+ devtlb_start_ktime = ktime_to_ns(ktime_get());
+
+ if (type == QI_IEC_TYPE &&
+ dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IEC))
+ iec_start_ktime = ktime_to_ns(ktime_get());
+
restart:
rc = 0;
@@ -1425,6 +1444,18 @@ restart:
if (rc == -EAGAIN)
goto restart;
+ if (iotlb_start_ktime)
+ dmar_latency_update(iommu, DMAR_LATENCY_INV_IOTLB,
+ ktime_to_ns(ktime_get()) - iotlb_start_ktime);
+
+ if (devtlb_start_ktime)
+ dmar_latency_update(iommu, DMAR_LATENCY_INV_DEVTLB,
+ ktime_to_ns(ktime_get()) - devtlb_start_ktime);
+
+ if (iec_start_ktime)
+ dmar_latency_update(iommu, DMAR_LATENCY_INV_IEC,
+ ktime_to_ns(ktime_get()) - iec_start_ktime);
+
return rc;
}
@@ -1913,16 +1944,23 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
reason = dmar_get_fault_reason(fault_reason, &fault_type);
if (fault_type == INTR_REMAP)
- pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
- source_id >> 8, PCI_SLOT(source_id & 0xFF),
- PCI_FUNC(source_id & 0xFF), addr >> 48,
- fault_reason, reason);
- else
- pr_err("[%s] Request device [%02x:%02x.%d] PASID %x fault addr %llx [fault reason %02d] %s\n",
+ pr_err("[INTR-REMAP] Request device [0x%02x:0x%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
+ source_id >> 8, PCI_SLOT(source_id & 0xFF),
+ PCI_FUNC(source_id & 0xFF), addr >> 48,
+ fault_reason, reason);
+ else if (pasid == INVALID_IOASID)
+ pr_err("[%s NO_PASID] Request device [0x%02x:0x%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
type ? "DMA Read" : "DMA Write",
source_id >> 8, PCI_SLOT(source_id & 0xFF),
- PCI_FUNC(source_id & 0xFF), pasid, addr,
+ PCI_FUNC(source_id & 0xFF), addr,
fault_reason, reason);
+ else
+ pr_err("[%s PASID 0x%x] Request device [0x%02x:0x%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
+ type ? "DMA Read" : "DMA Write", pasid,
+ source_id >> 8, PCI_SLOT(source_id & 0xFF),
+ PCI_FUNC(source_id & 0xFF), addr,
+ fault_reason, reason);
+
return 0;
}
@@ -1989,7 +2027,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
if (!ratelimited)
/* Using pasid -1 if pasid is not present */
dmar_fault_do_one(iommu, type, fault_reason,
- pasid_present ? pasid : -1,
+ pasid_present ? pasid : INVALID_IOASID,
source_id, guest_addr);
fault_index++;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index be35284a2016..dd22fc7d5176 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -46,6 +46,7 @@
#include <asm/iommu.h>
#include "../irq_remapping.h"
+#include "../iommu-sva-lib.h"
#include "pasid.h"
#include "cap_audit.h"
@@ -564,7 +565,7 @@ static inline int domain_pfn_supported(struct dmar_domain *domain,
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
- int agaw = -1;
+ int agaw;
sagaw = cap_sagaw(iommu->cap);
for (agaw = width_to_agaw(max_gaw);
@@ -625,12 +626,12 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
bool found = false;
int i;
- domain->iommu_coherency = 1;
+ domain->iommu_coherency = true;
for_each_domain_iommu(i, domain) {
found = true;
if (!iommu_paging_structure_coherency(g_iommus[i])) {
- domain->iommu_coherency = 0;
+ domain->iommu_coherency = false;
break;
}
}
@@ -641,18 +642,18 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (!iommu_paging_structure_coherency(iommu)) {
- domain->iommu_coherency = 0;
+ domain->iommu_coherency = false;
break;
}
}
rcu_read_unlock();
}
-static int domain_update_iommu_snooping(struct intel_iommu *skip)
+static bool domain_update_iommu_snooping(struct intel_iommu *skip)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- int ret = 1;
+ bool ret = true;
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
@@ -665,7 +666,7 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
*/
if (!sm_supported(iommu) &&
!ecap_sc_support(iommu->ecap)) {
- ret = 0;
+ ret = false;
break;
}
}
@@ -682,9 +683,8 @@ static int domain_update_iommu_superpage(struct dmar_domain *domain,
struct intel_iommu *iommu;
int mask = 0x3;
- if (!intel_iommu_superpage) {
+ if (!intel_iommu_superpage)
return 0;
- }
/* set iommu_superpage to the smallest common denominator */
rcu_read_lock();
@@ -1919,7 +1919,6 @@ static int domain_attach_iommu(struct dmar_domain *domain,
assert_spin_locked(&iommu->lock);
domain->iommu_refcnt[iommu->seq_id] += 1;
- domain->iommu_count += 1;
if (domain->iommu_refcnt[iommu->seq_id] == 1) {
ndomains = cap_ndoms(iommu->cap);
num = find_first_zero_bit(iommu->domain_ids, ndomains);
@@ -1927,7 +1926,6 @@ static int domain_attach_iommu(struct dmar_domain *domain,
if (num >= ndomains) {
pr_err("%s: No free domain ids\n", iommu->name);
domain->iommu_refcnt[iommu->seq_id] -= 1;
- domain->iommu_count -= 1;
return -ENOSPC;
}
@@ -1943,16 +1941,15 @@ static int domain_attach_iommu(struct dmar_domain *domain,
return 0;
}
-static int domain_detach_iommu(struct dmar_domain *domain,
- struct intel_iommu *iommu)
+static void domain_detach_iommu(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
{
- int num, count;
+ int num;
assert_spin_locked(&device_domain_lock);
assert_spin_locked(&iommu->lock);
domain->iommu_refcnt[iommu->seq_id] -= 1;
- count = --domain->iommu_count;
if (domain->iommu_refcnt[iommu->seq_id] == 0) {
num = domain->iommu_did[iommu->seq_id];
clear_bit(num, iommu->domain_ids);
@@ -1961,8 +1958,6 @@ static int domain_detach_iommu(struct dmar_domain *domain,
domain_update_iommu_cap(domain);
domain->iommu_did[iommu->seq_id] = 0;
}
-
- return count;
}
static inline int guestwidth_to_adjustwidth(int gaw)
@@ -2434,10 +2429,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return 0;
}
-static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn)
{
- unsigned long flags;
+ struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
+ unsigned long flags;
u16 did_old;
if (!iommu)
@@ -2449,7 +2445,16 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
spin_unlock_irqrestore(&iommu->lock, flags);
return;
}
- did_old = context_domain_id(context);
+
+ if (sm_supported(iommu)) {
+ if (hw_pass_through && domain_type_is_si(info->domain))
+ did_old = FLPT_DEFAULT_DID;
+ else
+ did_old = info->domain->iommu_did[iommu->seq_id];
+ } else {
+ did_old = context_domain_id(context);
+ }
+
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock_irqrestore(&iommu->lock, flags);
@@ -2467,6 +2472,8 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
0,
0,
DMA_TLB_DSI_FLUSH);
+
+ __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
}
static inline void unlink_domain_info(struct device_domain_info *info)
@@ -4138,62 +4145,56 @@ static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
return container_of(iommu_dev, struct intel_iommu, iommu);
}
-static ssize_t intel_iommu_show_version(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
u32 ver = readl(iommu->reg + DMAR_VER_REG);
return sprintf(buf, "%d:%d\n",
DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
}
-static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
+static DEVICE_ATTR_RO(version);
-static ssize_t intel_iommu_show_address(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
return sprintf(buf, "%llx\n", iommu->reg_phys);
}
-static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
+static DEVICE_ATTR_RO(address);
-static ssize_t intel_iommu_show_cap(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
return sprintf(buf, "%llx\n", iommu->cap);
}
-static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
+static DEVICE_ATTR_RO(cap);
-static ssize_t intel_iommu_show_ecap(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ecap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
return sprintf(buf, "%llx\n", iommu->ecap);
}
-static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
+static DEVICE_ATTR_RO(ecap);
-static ssize_t intel_iommu_show_ndoms(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t domains_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
}
-static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
+static DEVICE_ATTR_RO(domains_supported);
-static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t domains_used_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
cap_ndoms(iommu->cap)));
}
-static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
+static DEVICE_ATTR_RO(domains_used);
static struct attribute *intel_iommu_attrs[] = {
&dev_attr_version.attr,
@@ -4436,9 +4437,9 @@ out_free_dmar:
static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
- struct intel_iommu *iommu = opaque;
+ struct device_domain_info *info = opaque;
- domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+ domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff);
return 0;
}
@@ -4448,12 +4449,13 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
* devices, unbinding the driver from any one of them will possibly leave
* the others unable to operate.
*/
-static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
+static void domain_context_clear(struct device_domain_info *info)
{
- if (!iommu || !dev || !dev_is_pci(dev))
+ if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
return;
- pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
+ pci_for_each_dma_alias(to_pci_dev(info->dev),
+ &domain_context_clear_one_cb, info);
}
static void __dmar_remove_one_dev_info(struct device_domain_info *info)
@@ -4470,14 +4472,13 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
iommu = info->iommu;
domain = info->domain;
- if (info->dev) {
+ if (info->dev && !dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, info->dev,
PASID_RID2PASID, false);
iommu_disable_dev_iotlb(info);
- if (!dev_is_real_dma_subdevice(info->dev))
- domain_context_clear(iommu, info->dev);
+ domain_context_clear(info);
intel_pasid_free_table(info->dev);
}
@@ -4511,13 +4512,13 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
adjust_width = guestwidth_to_adjustwidth(guest_width);
domain->agaw = width_to_agaw(adjust_width);
- domain->iommu_coherency = 0;
- domain->iommu_snooping = 0;
+ domain->iommu_coherency = false;
+ domain->iommu_snooping = false;
domain->iommu_superpage = 0;
domain->max_addr = 0;
/* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+ domain->pgd = alloc_pgtable_page(domain->nid);
if (!domain->pgd)
return -ENOMEM;
domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
@@ -4757,6 +4758,13 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
if (!iommu)
return -ENODEV;
+ if ((dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE) &&
+ !ecap_nest(iommu->ecap)) {
+ dev_err(dev, "%s: iommu not support nested translation\n",
+ iommu->name);
+ return -EINVAL;
+ }
+
/* check if this iommu agaw is sufficient for max mapped address */
addr_width = agaw_to_width(iommu->agaw);
if (addr_width > cap_mgaw(iommu->cap))
@@ -4778,8 +4786,7 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
pte = dmar_domain->pgd;
if (dma_pte_present(pte)) {
- dmar_domain->pgd = (struct dma_pte *)
- phys_to_virt(dma_pte_addr(pte));
+ dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte));
free_pgtable_page(pte);
}
dmar_domain->agaw--;
@@ -5129,7 +5136,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
static bool intel_iommu_capable(enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
- return domain_update_iommu_snooping(NULL) == 1;
+ return domain_update_iommu_snooping(NULL);
if (cap == IOMMU_CAP_INTR_REMAP)
return irq_remapping_enabled == 1;
@@ -5165,13 +5172,10 @@ static void intel_iommu_release_device(struct device *dev)
static void intel_iommu_probe_finalize(struct device *dev)
{
- dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
if (domain && domain->type == IOMMU_DOMAIN_DMA)
- iommu_setup_dma_ops(dev, base,
- __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base);
+ iommu_setup_dma_ops(dev, 0, U64_MAX);
else
set_dma_ops(dev, NULL);
}
@@ -5331,6 +5335,48 @@ static int intel_iommu_disable_auxd(struct device *dev)
return 0;
}
+static int intel_iommu_enable_sva(struct device *dev)
+{
+ struct device_domain_info *info = get_domain_info(dev);
+ struct intel_iommu *iommu;
+ int ret;
+
+ if (!info || dmar_disabled)
+ return -EINVAL;
+
+ iommu = info->iommu;
+ if (!iommu)
+ return -EINVAL;
+
+ if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
+ return -ENODEV;
+
+ if (intel_iommu_enable_pasid(iommu, dev))
+ return -ENODEV;
+
+ if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
+ return -EINVAL;
+
+ ret = iopf_queue_add_device(iommu->iopf_queue, dev);
+ if (!ret)
+ ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+
+ return ret;
+}
+
+static int intel_iommu_disable_sva(struct device *dev)
+{
+ struct device_domain_info *info = get_domain_info(dev);
+ struct intel_iommu *iommu = info->iommu;
+ int ret;
+
+ ret = iommu_unregister_device_fault_handler(dev);
+ if (!ret)
+ ret = iopf_queue_remove_device(iommu->iopf_queue, dev);
+
+ return ret;
+}
+
/*
* A PCI express designated vendor specific extended capability is defined
* in the section 3.7 of Intel scalable I/O virtualization technical spec
@@ -5392,35 +5438,37 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
static int
intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
{
- if (feat == IOMMU_DEV_FEAT_AUX)
+ switch (feat) {
+ case IOMMU_DEV_FEAT_AUX:
return intel_iommu_enable_auxd(dev);
- if (feat == IOMMU_DEV_FEAT_IOPF)
+ case IOMMU_DEV_FEAT_IOPF:
return intel_iommu_dev_has_feat(dev, feat) ? 0 : -ENODEV;
- if (feat == IOMMU_DEV_FEAT_SVA) {
- struct device_domain_info *info = get_domain_info(dev);
-
- if (!info)
- return -EINVAL;
+ case IOMMU_DEV_FEAT_SVA:
+ return intel_iommu_enable_sva(dev);
- if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
- return -EINVAL;
-
- if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
- return 0;
+ default:
+ return -ENODEV;
}
-
- return -ENODEV;
}
static int
intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
{
- if (feat == IOMMU_DEV_FEAT_AUX)
+ switch (feat) {
+ case IOMMU_DEV_FEAT_AUX:
return intel_iommu_disable_auxd(dev);
- return -ENODEV;
+ case IOMMU_DEV_FEAT_IOPF:
+ return 0;
+
+ case IOMMU_DEV_FEAT_SVA:
+ return intel_iommu_disable_sva(dev);
+
+ default:
+ return -ENODEV;
+ }
}
static bool
@@ -5457,7 +5505,7 @@ intel_iommu_enable_nesting(struct iommu_domain *domain)
int ret = -ENODEV;
spin_lock_irqsave(&device_domain_lock, flags);
- if (nested_mode_support() && list_empty(&dmar_domain->devices)) {
+ if (list_empty(&dmar_domain->devices)) {
dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
ret = 0;
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 72dc84821dad..9ec374e17469 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* intel-pasid.c - PASID idr, table and entry manipulation
*
* Copyright (C) 2018 Intel Corporation
@@ -511,7 +511,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
u32 pasid, bool fault_ignore)
{
struct pasid_entry *pte;
- u16 did;
+ u16 did, pgtt;
pte = intel_pasid_get_entry(dev, pasid);
if (WARN_ON(!pte))
@@ -521,13 +521,19 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
return;
did = pasid_get_domain_id(pte);
+ pgtt = pasid_pte_get_pgtt(pte);
+
intel_pasid_clear_entry(dev, pasid, fault_ignore);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+
+ if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+ else
+ iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
/* Device IOTLB doesn't need to be flushed in caching mode. */
if (!cap_caching_mode(iommu->cap))
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 5ff61c3d401f..c11bc8b833b8 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -99,6 +99,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
}
+/* Get PGTT field of a PASID table entry */
+static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
+{
+ return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
+}
+
extern unsigned int intel_pasid_max_id;
int intel_pasid_alloc_table(struct device *dev);
void intel_pasid_free_table(struct device *dev);
diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c
new file mode 100644
index 000000000000..73b7ec705552
--- /dev/null
+++ b/drivers/iommu/intel/perf.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * perf.c - performance monitor
+ *
+ * Copyright (C) 2021 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+
+#include <linux/spinlock.h>
+#include <linux/intel-iommu.h>
+
+#include "perf.h"
+
+static DEFINE_SPINLOCK(latency_lock);
+
+bool dmar_latency_enabled(struct intel_iommu *iommu, enum latency_type type)
+{
+ struct latency_statistic *lstat = iommu->perf_statistic;
+
+ return lstat && lstat[type].enabled;
+}
+
+int dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type)
+{
+ struct latency_statistic *lstat;
+ unsigned long flags;
+ int ret = -EBUSY;
+
+ if (dmar_latency_enabled(iommu, type))
+ return 0;
+
+ spin_lock_irqsave(&latency_lock, flags);
+ if (!iommu->perf_statistic) {
+ iommu->perf_statistic = kzalloc(sizeof(*lstat) * DMAR_LATENCY_NUM,
+ GFP_ATOMIC);
+ if (!iommu->perf_statistic) {
+ ret = -ENOMEM;
+ goto unlock_out;
+ }
+ }
+
+ lstat = iommu->perf_statistic;
+
+ if (!lstat[type].enabled) {
+ lstat[type].enabled = true;
+ lstat[type].counter[COUNTS_MIN] = UINT_MAX;
+ ret = 0;
+ }
+unlock_out:
+ spin_unlock_irqrestore(&latency_lock, flags);
+
+ return ret;
+}
+
+void dmar_latency_disable(struct intel_iommu *iommu, enum latency_type type)
+{
+ struct latency_statistic *lstat = iommu->perf_statistic;
+ unsigned long flags;
+
+ if (!dmar_latency_enabled(iommu, type))
+ return;
+
+ spin_lock_irqsave(&latency_lock, flags);
+ memset(&lstat[type], 0, sizeof(*lstat) * DMAR_LATENCY_NUM);
+ spin_unlock_irqrestore(&latency_lock, flags);
+}
+
+void dmar_latency_update(struct intel_iommu *iommu, enum latency_type type, u64 latency)
+{
+ struct latency_statistic *lstat = iommu->perf_statistic;
+ unsigned long flags;
+ u64 min, max;
+
+ if (!dmar_latency_enabled(iommu, type))
+ return;
+
+ spin_lock_irqsave(&latency_lock, flags);
+ if (latency < 100)
+ lstat[type].counter[COUNTS_10e2]++;
+ else if (latency < 1000)
+ lstat[type].counter[COUNTS_10e3]++;
+ else if (latency < 10000)
+ lstat[type].counter[COUNTS_10e4]++;
+ else if (latency < 100000)
+ lstat[type].counter[COUNTS_10e5]++;
+ else if (latency < 1000000)
+ lstat[type].counter[COUNTS_10e6]++;
+ else if (latency < 10000000)
+ lstat[type].counter[COUNTS_10e7]++;
+ else
+ lstat[type].counter[COUNTS_10e8_plus]++;
+
+ min = lstat[type].counter[COUNTS_MIN];
+ max = lstat[type].counter[COUNTS_MAX];
+ lstat[type].counter[COUNTS_MIN] = min_t(u64, min, latency);
+ lstat[type].counter[COUNTS_MAX] = max_t(u64, max, latency);
+ lstat[type].counter[COUNTS_SUM] += latency;
+ lstat[type].samples++;
+ spin_unlock_irqrestore(&latency_lock, flags);
+}
+
+static char *latency_counter_names[] = {
+ " <0.1us",
+ " 0.1us-1us", " 1us-10us", " 10us-100us",
+ " 100us-1ms", " 1ms-10ms", " >=10ms",
+ " min(us)", " max(us)", " average(us)"
+};
+
+static char *latency_type_names[] = {
+ " inv_iotlb", " inv_devtlb", " inv_iec",
+ " svm_prq"
+};
+
+int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
+{
+ struct latency_statistic *lstat = iommu->perf_statistic;
+ unsigned long flags;
+ int bytes = 0, i, j;
+
+ memset(str, 0, size);
+
+ for (i = 0; i < COUNTS_NUM; i++)
+ bytes += snprintf(str + bytes, size - bytes,
+ "%s", latency_counter_names[i]);
+
+ spin_lock_irqsave(&latency_lock, flags);
+ for (i = 0; i < DMAR_LATENCY_NUM; i++) {
+ if (!dmar_latency_enabled(iommu, i))
+ continue;
+
+ bytes += snprintf(str + bytes, size - bytes,
+ "\n%s", latency_type_names[i]);
+
+ for (j = 0; j < COUNTS_NUM; j++) {
+ u64 val = lstat[i].counter[j];
+
+ switch (j) {
+ case COUNTS_MIN:
+ if (val == UINT_MAX)
+ val = 0;
+ else
+ val = div_u64(val, 1000);
+ break;
+ case COUNTS_MAX:
+ val = div_u64(val, 1000);
+ break;
+ case COUNTS_SUM:
+ if (lstat[i].samples)
+ val = div_u64(val, (lstat[i].samples * 1000));
+ else
+ val = 0;
+ break;
+ default:
+ break;
+ }
+
+ bytes += snprintf(str + bytes, size - bytes,
+ "%12lld", val);
+ }
+ }
+ spin_unlock_irqrestore(&latency_lock, flags);
+
+ return bytes;
+}
diff --git a/drivers/iommu/intel/perf.h b/drivers/iommu/intel/perf.h
new file mode 100644
index 000000000000..fd6db8049d1a
--- /dev/null
+++ b/drivers/iommu/intel/perf.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * perf.h - performance monitor header
+ *
+ * Copyright (C) 2021 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+enum latency_type {
+ DMAR_LATENCY_INV_IOTLB = 0,
+ DMAR_LATENCY_INV_DEVTLB,
+ DMAR_LATENCY_INV_IEC,
+ DMAR_LATENCY_PRQ,
+ DMAR_LATENCY_NUM
+};
+
+enum latency_count {
+ COUNTS_10e2 = 0, /* < 0.1us */
+ COUNTS_10e3, /* 0.1us ~ 1us */
+ COUNTS_10e4, /* 1us ~ 10us */
+ COUNTS_10e5, /* 10us ~ 100us */
+ COUNTS_10e6, /* 100us ~ 1ms */
+ COUNTS_10e7, /* 1ms ~ 10ms */
+ COUNTS_10e8_plus, /* 10ms and plus*/
+ COUNTS_MIN,
+ COUNTS_MAX,
+ COUNTS_SUM,
+ COUNTS_NUM
+};
+
+struct latency_statistic {
+ bool enabled;
+ u64 counter[COUNTS_NUM];
+ u64 samples;
+};
+
+#ifdef CONFIG_DMAR_PERF
+int dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type);
+void dmar_latency_disable(struct intel_iommu *iommu, enum latency_type type);
+bool dmar_latency_enabled(struct intel_iommu *iommu, enum latency_type type);
+void dmar_latency_update(struct intel_iommu *iommu, enum latency_type type,
+ u64 latency);
+int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size);
+#else
+static inline int
+dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type)
+{
+ return -EINVAL;
+}
+
+static inline void
+dmar_latency_disable(struct intel_iommu *iommu, enum latency_type type)
+{
+}
+
+static inline bool
+dmar_latency_enabled(struct intel_iommu *iommu, enum latency_type type)
+{
+ return false;
+}
+
+static inline void
+dmar_latency_update(struct intel_iommu *iommu, enum latency_type type, u64 latency)
+{
+}
+
+static inline int
+dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
+{
+ return 0;
+}
+#endif /* CONFIG_DMAR_PERF */
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 5165cea90421..4b9b3f35ba0e 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -17,19 +17,76 @@
#include <linux/dmar.h>
#include <linux/interrupt.h>
#include <linux/mm_types.h>
+#include <linux/xarray.h>
#include <linux/ioasid.h>
#include <asm/page.h>
#include <asm/fpu/api.h>
+#include <trace/events/intel_iommu.h>
#include "pasid.h"
+#include "perf.h"
+#include "../iommu-sva-lib.h"
static irqreturn_t prq_event_thread(int irq, void *d);
static void intel_svm_drain_prq(struct device *dev, u32 pasid);
+#define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
#define PRQ_ORDER 0
+static DEFINE_XARRAY_ALLOC(pasid_private_array);
+static int pasid_private_add(ioasid_t pasid, void *priv)
+{
+ return xa_alloc(&pasid_private_array, &pasid, priv,
+ XA_LIMIT(pasid, pasid), GFP_ATOMIC);
+}
+
+static void pasid_private_remove(ioasid_t pasid)
+{
+ xa_erase(&pasid_private_array, pasid);
+}
+
+static void *pasid_private_find(ioasid_t pasid)
+{
+ return xa_load(&pasid_private_array, pasid);
+}
+
+static struct intel_svm_dev *
+svm_lookup_device_by_sid(struct intel_svm *svm, u16 sid)
+{
+ struct intel_svm_dev *sdev = NULL, *t;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(t, &svm->devs, list) {
+ if (t->sid == sid) {
+ sdev = t;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return sdev;
+}
+
+static struct intel_svm_dev *
+svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
+{
+ struct intel_svm_dev *sdev = NULL, *t;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(t, &svm->devs, list) {
+ if (t->dev == dev) {
+ sdev = t;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return sdev;
+}
+
int intel_svm_enable_prq(struct intel_iommu *iommu)
{
+ struct iopf_queue *iopfq;
struct page *pages;
int irq, ret;
@@ -46,13 +103,20 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
iommu->name);
ret = -EINVAL;
- err:
- free_pages((unsigned long)iommu->prq, PRQ_ORDER);
- iommu->prq = NULL;
- return ret;
+ goto free_prq;
}
iommu->pr_irq = irq;
+ snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
+ "dmar%d-iopfq", iommu->seq_id);
+ iopfq = iopf_queue_alloc(iommu->iopfq_name);
+ if (!iopfq) {
+ pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name);
+ ret = -ENOMEM;
+ goto free_hwirq;
+ }
+ iommu->iopf_queue = iopfq;
+
snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
@@ -60,9 +124,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
if (ret) {
pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
iommu->name);
- dmar_free_hwirq(irq);
- iommu->pr_irq = 0;
- goto err;
+ goto free_iopfq;
}
dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
@@ -71,6 +133,18 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
init_completion(&iommu->prq_complete);
return 0;
+
+free_iopfq:
+ iopf_queue_free(iommu->iopf_queue);
+ iommu->iopf_queue = NULL;
+free_hwirq:
+ dmar_free_hwirq(irq);
+ iommu->pr_irq = 0;
+free_prq:
+ free_pages((unsigned long)iommu->prq, PRQ_ORDER);
+ iommu->prq = NULL;
+
+ return ret;
}
int intel_svm_finish_prq(struct intel_iommu *iommu)
@@ -85,6 +159,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
iommu->pr_irq = 0;
}
+ if (iommu->iopf_queue) {
+ iopf_queue_free(iommu->iopf_queue);
+ iommu->iopf_queue = NULL;
+ }
+
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
iommu->prq = NULL;
@@ -204,17 +283,12 @@ static const struct mmu_notifier_ops intel_mmuops = {
};
static DEFINE_MUTEX(pasid_mutex);
-static LIST_HEAD(global_svm_list);
-
-#define for_each_svm_dev(sdev, svm, d) \
- list_for_each_entry((sdev), &(svm)->devs, list) \
- if ((d) != (sdev)->dev) {} else
static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
struct intel_svm **rsvm,
struct intel_svm_dev **rsdev)
{
- struct intel_svm_dev *d, *sdev = NULL;
+ struct intel_svm_dev *sdev = NULL;
struct intel_svm *svm;
/* The caller should hold the pasid_mutex lock */
@@ -224,7 +298,7 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
if (pasid == INVALID_IOASID || pasid >= PASID_MAX)
return -EINVAL;
- svm = ioasid_find(NULL, pasid, NULL);
+ svm = pasid_private_find(pasid);
if (IS_ERR(svm))
return PTR_ERR(svm);
@@ -237,15 +311,7 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
*/
if (WARN_ON(list_empty(&svm->devs)))
return -EINVAL;
-
- rcu_read_lock();
- list_for_each_entry_rcu(d, &svm->devs, list) {
- if (d->dev == dev) {
- sdev = d;
- break;
- }
- }
- rcu_read_unlock();
+ sdev = svm_lookup_device_by_dev(svm, dev);
out:
*rsvm = svm;
@@ -334,7 +400,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
svm->gpasid = data->gpasid;
svm->flags |= SVM_FLAG_GUEST_PASID;
}
- ioasid_set_data(data->hpasid, svm);
+ pasid_private_add(data->hpasid, svm);
INIT_LIST_HEAD_RCU(&svm->devs);
mmput(svm->mm);
}
@@ -388,7 +454,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
list_add_rcu(&sdev->list, &svm->devs);
out:
if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) {
- ioasid_set_data(data->hpasid, NULL);
+ pasid_private_remove(data->hpasid);
kfree(svm);
}
@@ -431,7 +497,7 @@ int intel_svm_unbind_gpasid(struct device *dev, u32 pasid)
* the unbind, IOMMU driver will get notified
* and perform cleanup.
*/
- ioasid_set_data(pasid, NULL);
+ pasid_private_remove(pasid);
kfree(svm);
}
}
@@ -459,79 +525,81 @@ static void load_pasid(struct mm_struct *mm, u32 pasid)
mutex_unlock(&mm->context.lock);
}
-/* Caller must hold pasid_mutex, mm reference */
-static int
-intel_svm_bind_mm(struct device *dev, unsigned int flags,
- struct mm_struct *mm, struct intel_svm_dev **sd)
+static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm,
+ unsigned int flags)
{
- struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
- struct intel_svm *svm = NULL, *t;
- struct device_domain_info *info;
- struct intel_svm_dev *sdev;
- unsigned long iflags;
- int pasid_max;
- int ret;
+ ioasid_t max_pasid = dev_is_pci(dev) ?
+ pci_max_pasids(to_pci_dev(dev)) : intel_pasid_max_id;
- if (!iommu || dmar_disabled)
- return -EINVAL;
+ return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1);
+}
- if (!intel_svm_capable(iommu))
- return -ENOTSUPP;
+static void intel_svm_free_pasid(struct mm_struct *mm)
+{
+ iommu_sva_free_pasid(mm);
+}
- if (dev_is_pci(dev)) {
- pasid_max = pci_max_pasids(to_pci_dev(dev));
- if (pasid_max < 0)
- return -EINVAL;
- } else
- pasid_max = 1 << 20;
+static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
+ struct device *dev,
+ struct mm_struct *mm,
+ unsigned int flags)
+{
+ struct device_domain_info *info = get_domain_info(dev);
+ unsigned long iflags, sflags;
+ struct intel_svm_dev *sdev;
+ struct intel_svm *svm;
+ int ret = 0;
- /* Bind supervisor PASID shuld have mm = NULL */
- if (flags & SVM_FLAG_SUPERVISOR_MODE) {
- if (!ecap_srs(iommu->ecap) || mm) {
- pr_err("Supervisor PASID with user provided mm.\n");
- return -EINVAL;
- }
- }
+ svm = pasid_private_find(mm->pasid);
+ if (!svm) {
+ svm = kzalloc(sizeof(*svm), GFP_KERNEL);
+ if (!svm)
+ return ERR_PTR(-ENOMEM);
- list_for_each_entry(t, &global_svm_list, list) {
- if (t->mm != mm)
- continue;
+ svm->pasid = mm->pasid;
+ svm->mm = mm;
+ svm->flags = flags;
+ INIT_LIST_HEAD_RCU(&svm->devs);
- svm = t;
- if (svm->pasid >= pasid_max) {
- dev_warn(dev,
- "Limited PASID width. Cannot use existing PASID %d\n",
- svm->pasid);
- ret = -ENOSPC;
- goto out;
+ if (!(flags & SVM_FLAG_SUPERVISOR_MODE)) {
+ svm->notifier.ops = &intel_mmuops;
+ ret = mmu_notifier_register(&svm->notifier, mm);
+ if (ret) {
+ kfree(svm);
+ return ERR_PTR(ret);
+ }
}
- /* Find the matching device in svm list */
- for_each_svm_dev(sdev, svm, dev) {
- sdev->users++;
- goto success;
+ ret = pasid_private_add(svm->pasid, svm);
+ if (ret) {
+ if (svm->notifier.ops)
+ mmu_notifier_unregister(&svm->notifier, mm);
+ kfree(svm);
+ return ERR_PTR(ret);
}
+ }
- break;
+ /* Find the matching device in svm list */
+ sdev = svm_lookup_device_by_dev(svm, dev);
+ if (sdev) {
+ sdev->users++;
+ goto success;
}
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev) {
ret = -ENOMEM;
- goto out;
+ goto free_svm;
}
+
sdev->dev = dev;
sdev->iommu = iommu;
-
- ret = intel_iommu_enable_pasid(iommu, dev);
- if (ret) {
- kfree(sdev);
- goto out;
- }
-
- info = get_domain_info(dev);
sdev->did = FLPT_DEFAULT_DID;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
+ sdev->users = 1;
+ sdev->pasid = svm->pasid;
+ sdev->sva.dev = dev;
+ init_rcu_head(&sdev->rcu);
if (info->ats_enabled) {
sdev->dev_iotlb = 1;
sdev->qdep = info->ats_qdep;
@@ -539,95 +607,37 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
sdev->qdep = 0;
}
- /* Finish the setup now we know we're keeping it */
- sdev->users = 1;
- init_rcu_head(&sdev->rcu);
-
- if (!svm) {
- svm = kzalloc(sizeof(*svm), GFP_KERNEL);
- if (!svm) {
- ret = -ENOMEM;
- kfree(sdev);
- goto out;
- }
-
- if (pasid_max > intel_pasid_max_id)
- pasid_max = intel_pasid_max_id;
+ /* Setup the pasid table: */
+ sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ?
+ PASID_FLAG_SUPERVISOR_MODE : 0;
+ sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
+ spin_lock_irqsave(&iommu->lock, iflags);
+ ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
+ FLPT_DEFAULT_DID, sflags);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
- /* Do not use PASID 0, reserved for RID to PASID */
- svm->pasid = ioasid_alloc(NULL, PASID_MIN,
- pasid_max - 1, svm);
- if (svm->pasid == INVALID_IOASID) {
- kfree(svm);
- kfree(sdev);
- ret = -ENOSPC;
- goto out;
- }
- svm->notifier.ops = &intel_mmuops;
- svm->mm = mm;
- svm->flags = flags;
- INIT_LIST_HEAD_RCU(&svm->devs);
- INIT_LIST_HEAD(&svm->list);
- ret = -ENOMEM;
- if (mm) {
- ret = mmu_notifier_register(&svm->notifier, mm);
- if (ret) {
- ioasid_put(svm->pasid);
- kfree(svm);
- kfree(sdev);
- goto out;
- }
- }
+ if (ret)
+ goto free_sdev;
- spin_lock_irqsave(&iommu->lock, iflags);
- ret = intel_pasid_setup_first_level(iommu, dev,
- mm ? mm->pgd : init_mm.pgd,
- svm->pasid, FLPT_DEFAULT_DID,
- (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
- (cpu_feature_enabled(X86_FEATURE_LA57) ?
- PASID_FLAG_FL5LP : 0));
- spin_unlock_irqrestore(&iommu->lock, iflags);
- if (ret) {
- if (mm)
- mmu_notifier_unregister(&svm->notifier, mm);
- ioasid_put(svm->pasid);
- kfree(svm);
- kfree(sdev);
- goto out;
- }
+ /* The newly allocated pasid is loaded to the mm. */
+ if (!(flags & SVM_FLAG_SUPERVISOR_MODE) && list_empty(&svm->devs))
+ load_pasid(mm, svm->pasid);
- list_add_tail(&svm->list, &global_svm_list);
- if (mm) {
- /* The newly allocated pasid is loaded to the mm. */
- load_pasid(mm, svm->pasid);
- }
- } else {
- /*
- * Binding a new device with existing PASID, need to setup
- * the PASID entry.
- */
- spin_lock_irqsave(&iommu->lock, iflags);
- ret = intel_pasid_setup_first_level(iommu, dev,
- mm ? mm->pgd : init_mm.pgd,
- svm->pasid, FLPT_DEFAULT_DID,
- (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
- (cpu_feature_enabled(X86_FEATURE_LA57) ?
- PASID_FLAG_FL5LP : 0));
- spin_unlock_irqrestore(&iommu->lock, iflags);
- if (ret) {
- kfree(sdev);
- goto out;
- }
- }
list_add_rcu(&sdev->list, &svm->devs);
success:
- sdev->pasid = svm->pasid;
- sdev->sva.dev = dev;
- if (sd)
- *sd = sdev;
- ret = 0;
-out:
- return ret;
+ return &sdev->sva;
+
+free_sdev:
+ kfree(sdev);
+free_svm:
+ if (list_empty(&svm->devs)) {
+ if (svm->notifier.ops)
+ mmu_notifier_unregister(&svm->notifier, mm);
+ pasid_private_remove(mm->pasid);
+ kfree(svm);
+ }
+
+ return ERR_PTR(ret);
}
/* Caller must hold pasid_mutex */
@@ -636,6 +646,7 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
struct intel_svm_dev *sdev;
struct intel_iommu *iommu;
struct intel_svm *svm;
+ struct mm_struct *mm;
int ret = -EINVAL;
iommu = device_to_iommu(dev, NULL, NULL);
@@ -645,6 +656,7 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
if (ret)
goto out;
+ mm = svm->mm;
if (sdev) {
sdev->users--;
@@ -663,13 +675,12 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
- ioasid_put(svm->pasid);
- if (svm->mm) {
- mmu_notifier_unregister(&svm->notifier, svm->mm);
+ if (svm->notifier.ops) {
+ mmu_notifier_unregister(&svm->notifier, mm);
/* Clear mm's pasid. */
- load_pasid(svm->mm, PASID_DISABLED);
+ load_pasid(mm, PASID_DISABLED);
}
- list_del(&svm->list);
+ pasid_private_remove(svm->pasid);
/* We mandate that no page faults may be outstanding
* for the PASID when intel_svm_unbind_mm() is called.
* If that is not obeyed, subtle errors will happen.
@@ -678,6 +689,8 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
kfree(svm);
}
}
+ /* Drop a PASID reference and free it if no reference. */
+ intel_svm_free_pasid(mm);
}
out:
return ret;
@@ -714,22 +727,6 @@ struct page_req_dsc {
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
-static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
-{
- unsigned long requested = 0;
-
- if (req->exe_req)
- requested |= VM_EXEC;
-
- if (req->rd_req)
- requested |= VM_READ;
-
- if (req->wr_req)
- requested |= VM_WRITE;
-
- return (requested & ~vma->vm_flags) != 0;
-}
-
static bool is_canonical_address(u64 addr)
{
int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
@@ -799,6 +796,8 @@ prq_retry:
goto prq_retry;
}
+ iopf_queue_flush_dev(dev);
+
/*
* Perform steps described in VT-d spec CH7.10 to drain page
* requests and responses in hardware.
@@ -841,8 +840,8 @@ static int prq_to_iommu_prot(struct page_req_dsc *req)
return prot;
}
-static int
-intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
+static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
+ struct page_req_dsc *desc)
{
struct iommu_fault_event event;
@@ -872,159 +871,136 @@ intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
*/
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
- memcpy(event.fault.prm.private_data, desc->priv_data,
- sizeof(desc->priv_data));
+ event.fault.prm.private_data[0] = desc->priv_data[0];
+ event.fault.prm.private_data[1] = desc->priv_data[1];
+ } else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) {
+ /*
+ * If the private data fields are not used by hardware, use it
+ * to monitor the prq handle latency.
+ */
+ event.fault.prm.private_data[0] = ktime_to_ns(ktime_get());
}
return iommu_report_device_fault(dev, &event);
}
+static void handle_bad_prq_event(struct intel_iommu *iommu,
+ struct page_req_dsc *req, int result)
+{
+ struct qi_desc desc;
+
+ pr_err("%s: Invalid page request: %08llx %08llx\n",
+ iommu->name, ((unsigned long long *)req)[0],
+ ((unsigned long long *)req)[1]);
+
+ /*
+ * Per VT-d spec. v3.0 ch7.7, system software must
+ * respond with page group response if private data
+ * is present (PDP) or last page in group (LPIG) bit
+ * is set. This is an additional VT-d feature beyond
+ * PCI ATS spec.
+ */
+ if (!req->lpig && !req->priv_data_present)
+ return;
+
+ desc.qw0 = QI_PGRP_PASID(req->pasid) |
+ QI_PGRP_DID(req->rid) |
+ QI_PGRP_PASID_P(req->pasid_present) |
+ QI_PGRP_PDP(req->priv_data_present) |
+ QI_PGRP_RESP_CODE(result) |
+ QI_PGRP_RESP_TYPE;
+ desc.qw1 = QI_PGRP_IDX(req->prg_index) |
+ QI_PGRP_LPIG(req->lpig);
+
+ if (req->priv_data_present) {
+ desc.qw2 = req->priv_data[0];
+ desc.qw3 = req->priv_data[1];
+ } else {
+ desc.qw2 = 0;
+ desc.qw3 = 0;
+ }
+
+ qi_submit_sync(iommu, &desc, 1, 0);
+}
+
static irqreturn_t prq_event_thread(int irq, void *d)
{
struct intel_svm_dev *sdev = NULL;
struct intel_iommu *iommu = d;
struct intel_svm *svm = NULL;
- int head, tail, handled = 0;
- unsigned int flags = 0;
+ struct page_req_dsc *req;
+ int head, tail, handled;
+ u64 address;
- /* Clear PPR bit before reading head/tail registers, to
- * ensure that we get a new interrupt if needed. */
+ /*
+ * Clear PPR bit before reading head/tail registers, to ensure that
+ * we get a new interrupt if needed.
+ */
writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
+ handled = (head != tail);
while (head != tail) {
- struct vm_area_struct *vma;
- struct page_req_dsc *req;
- struct qi_desc resp;
- int result;
- vm_fault_t ret;
- u64 address;
-
- handled = 1;
req = &iommu->prq[head / sizeof(*req)];
- result = QI_RESP_INVALID;
address = (u64)req->addr << VTD_PAGE_SHIFT;
- if (!req->pasid_present) {
- pr_err("%s: Page request without PASID: %08llx %08llx\n",
- iommu->name, ((unsigned long long *)req)[0],
- ((unsigned long long *)req)[1]);
- goto no_pasid;
+
+ if (unlikely(!req->pasid_present)) {
+ pr_err("IOMMU: %s: Page request without PASID\n",
+ iommu->name);
+bad_req:
+ svm = NULL;
+ sdev = NULL;
+ handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
+ goto prq_advance;
}
- /* We shall not receive page request for supervisor SVM */
- if (req->pm_req && (req->rd_req | req->wr_req)) {
- pr_err("Unexpected page request in Privilege Mode");
- /* No need to find the matching sdev as for bad_req */
- goto no_pasid;
+
+ if (unlikely(!is_canonical_address(address))) {
+ pr_err("IOMMU: %s: Address is not canonical\n",
+ iommu->name);
+ goto bad_req;
}
- /* DMA read with exec requeset is not supported. */
- if (req->exe_req && req->rd_req) {
- pr_err("Execution request not supported\n");
- goto no_pasid;
+
+ if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) {
+ pr_err("IOMMU: %s: Page request in Privilege Mode\n",
+ iommu->name);
+ goto bad_req;
}
+
+ if (unlikely(req->exe_req && req->rd_req)) {
+ pr_err("IOMMU: %s: Execution request not supported\n",
+ iommu->name);
+ goto bad_req;
+ }
+
if (!svm || svm->pasid != req->pasid) {
- rcu_read_lock();
- svm = ioasid_find(NULL, req->pasid, NULL);
- /* It *can't* go away, because the driver is not permitted
+ /*
+ * It can't go away, because the driver is not permitted
* to unbind the mm while any page faults are outstanding.
- * So we only need RCU to protect the internal idr code. */
- rcu_read_unlock();
- if (IS_ERR_OR_NULL(svm)) {
- pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
- iommu->name, req->pasid, ((unsigned long long *)req)[0],
- ((unsigned long long *)req)[1]);
- goto no_pasid;
- }
+ */
+ svm = pasid_private_find(req->pasid);
+ if (IS_ERR_OR_NULL(svm) || (svm->flags & SVM_FLAG_SUPERVISOR_MODE))
+ goto bad_req;
}
if (!sdev || sdev->sid != req->rid) {
- struct intel_svm_dev *t;
-
- sdev = NULL;
- rcu_read_lock();
- list_for_each_entry_rcu(t, &svm->devs, list) {
- if (t->sid == req->rid) {
- sdev = t;
- break;
- }
- }
- rcu_read_unlock();
+ sdev = svm_lookup_device_by_sid(svm, req->rid);
+ if (!sdev)
+ goto bad_req;
}
- /* Since we're using init_mm.pgd directly, we should never take
- * any faults on kernel addresses. */
- if (!svm->mm)
- goto bad_req;
-
- /* If address is not canonical, return invalid response */
- if (!is_canonical_address(address))
- goto bad_req;
+ sdev->prq_seq_number++;
/*
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here.
*/
- if (svm->flags & SVM_FLAG_GUEST_MODE) {
- if (sdev && !intel_svm_prq_report(sdev->dev, req))
- goto prq_advance;
- else
- goto bad_req;
- }
-
- /* If the mm is already defunct, don't handle faults. */
- if (!mmget_not_zero(svm->mm))
- goto bad_req;
-
- mmap_read_lock(svm->mm);
- vma = find_extend_vma(svm->mm, address);
- if (!vma || address < vma->vm_start)
- goto invalid;
-
- if (access_error(vma, req))
- goto invalid;
+ if (intel_svm_prq_report(iommu, sdev->dev, req))
+ handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
- flags = FAULT_FLAG_USER | FAULT_FLAG_REMOTE;
- if (req->wr_req)
- flags |= FAULT_FLAG_WRITE;
-
- ret = handle_mm_fault(vma, address, flags, NULL);
- if (ret & VM_FAULT_ERROR)
- goto invalid;
-
- result = QI_RESP_SUCCESS;
-invalid:
- mmap_read_unlock(svm->mm);
- mmput(svm->mm);
-bad_req:
- /* We get here in the error case where the PASID lookup failed,
- and these can be NULL. Do not use them below this point! */
- sdev = NULL;
- svm = NULL;
-no_pasid:
- if (req->lpig || req->priv_data_present) {
- /*
- * Per VT-d spec. v3.0 ch7.7, system software must
- * respond with page group response if private data
- * is present (PDP) or last page in group (LPIG) bit
- * is set. This is an additional VT-d feature beyond
- * PCI ATS spec.
- */
- resp.qw0 = QI_PGRP_PASID(req->pasid) |
- QI_PGRP_DID(req->rid) |
- QI_PGRP_PASID_P(req->pasid_present) |
- QI_PGRP_PDP(req->priv_data_present) |
- QI_PGRP_RESP_CODE(result) |
- QI_PGRP_RESP_TYPE;
- resp.qw1 = QI_PGRP_IDX(req->prg_index) |
- QI_PGRP_LPIG(req->lpig);
- resp.qw2 = 0;
- resp.qw3 = 0;
-
- if (req->priv_data_present)
- memcpy(&resp.qw2, req->priv_data,
- sizeof(req->priv_data));
- qi_submit_sync(iommu, &resp, 1, 0);
- }
+ trace_prq_report(iommu, sdev->dev, req->qw_0, req->qw_1,
+ req->priv_data[0], req->priv_data[1],
+ sdev->prq_seq_number);
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
@@ -1041,6 +1017,7 @@ prq_advance:
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
if (head == tail) {
+ iopf_queue_discard_partial(iommu->iopf_queue);
writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
iommu->name);
@@ -1053,31 +1030,42 @@ prq_advance:
return IRQ_RETVAL(handled);
}
-#define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
-struct iommu_sva *
-intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
+struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
{
- struct iommu_sva *sva = ERR_PTR(-EINVAL);
- struct intel_svm_dev *sdev = NULL;
+ struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
unsigned int flags = 0;
+ struct iommu_sva *sva;
int ret;
- /*
- * TODO: Consolidate with generic iommu-sva bind after it is merged.
- * It will require shared SVM data structures, i.e. combine io_mm
- * and intel_svm etc.
- */
if (drvdata)
flags = *(unsigned int *)drvdata;
+
+ if (flags & SVM_FLAG_SUPERVISOR_MODE) {
+ if (!ecap_srs(iommu->ecap)) {
+ dev_err(dev, "%s: Supervisor PASID not supported\n",
+ iommu->name);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (mm) {
+ dev_err(dev, "%s: Supervisor PASID with user provided mm\n",
+ iommu->name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mm = &init_mm;
+ }
+
mutex_lock(&pasid_mutex);
- ret = intel_svm_bind_mm(dev, flags, mm, &sdev);
- if (ret)
- sva = ERR_PTR(ret);
- else if (sdev)
- sva = &sdev->sva;
- else
- WARN(!sdev, "SVM bind succeeded with no sdev!\n");
+ ret = intel_svm_alloc_pasid(dev, mm, flags);
+ if (ret) {
+ mutex_unlock(&pasid_mutex);
+ return ERR_PTR(ret);
+ }
+ sva = intel_svm_bind_mm(iommu, dev, mm, flags);
+ if (IS_ERR_OR_NULL(sva))
+ intel_svm_free_pasid(mm);
mutex_unlock(&pasid_mutex);
return sva;
@@ -1085,10 +1073,9 @@ intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
void intel_svm_unbind(struct iommu_sva *sva)
{
- struct intel_svm_dev *sdev;
+ struct intel_svm_dev *sdev = to_intel_svm_dev(sva);
mutex_lock(&pasid_mutex);
- sdev = to_intel_svm_dev(sva);
intel_svm_unbind_mm(sdev->dev, sdev->pasid);
mutex_unlock(&pasid_mutex);
}
@@ -1194,9 +1181,14 @@ int intel_svm_page_response(struct device *dev,
desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
desc.qw2 = 0;
desc.qw3 = 0;
- if (private_present)
- memcpy(&desc.qw2, prm->private_data,
- sizeof(prm->private_data));
+
+ if (private_present) {
+ desc.qw2 = prm->private_data[0];
+ desc.qw3 = prm->private_data[1];
+ } else if (prm->private_data[0]) {
+ dmar_latency_update(iommu, DMAR_LATENCY_PRQ,
+ ktime_to_ns(ktime_get()) - prm->private_data[0]);
+ }
qi_submit_sync(iommu, &desc, 1, 0);
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 808ab70d5df5..63f0af10c403 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -924,6 +924,9 @@ void iommu_group_remove_device(struct device *dev)
struct iommu_group *group = dev->iommu_group;
struct group_device *tmp_device, *device = NULL;
+ if (!group)
+ return;
+
dev_info(dev, "Removing from iommu group %d\n", group->id);
/* Pre-notify listeners that a device is being removed. */
@@ -3059,9 +3062,6 @@ static int iommu_change_dev_def_domain(struct iommu_group *group,
int ret, dev_def_dom;
struct device *dev;
- if (!group)
- return -EINVAL;
-
mutex_lock(&group->mutex);
if (group->default_domain != group->domain) {
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b7ecd5b08039..b6cf5f16123b 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -412,12 +412,11 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
return NULL;
}
-static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
+static void remove_iova(struct iova_domain *iovad, struct iova *iova)
{
assert_spin_locked(&iovad->iova_rbtree_lock);
__cached_rbnode_delete_update(iovad, iova);
rb_erase(&iova->node, &iovad->rbroot);
- free_iova_mem(iova);
}
/**
@@ -452,8 +451,9 @@ __free_iova(struct iova_domain *iovad, struct iova *iova)
unsigned long flags;
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- private_free_iova(iovad, iova);
+ remove_iova(iovad, iova);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ free_iova_mem(iova);
}
EXPORT_SYMBOL_GPL(__free_iova);
@@ -472,10 +472,13 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
iova = private_find_iova(iovad, pfn);
- if (iova)
- private_free_iova(iovad, iova);
+ if (!iova) {
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return;
+ }
+ remove_iova(iovad, iova);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-
+ free_iova_mem(iova);
}
EXPORT_SYMBOL_GPL(free_iova);
@@ -825,7 +828,8 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
if (WARN_ON(!iova))
continue;
- private_free_iova(iovad, iova);
+ remove_iova(iovad, iova);
+ free_iova_mem(iova);
}
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index aaa6a4d59057..51ea6f00db2f 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -19,7 +19,6 @@
#include <linux/iommu.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 7880f307cb2d..3a38352b603f 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -18,7 +18,6 @@
#include <linux/iommu.h>
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/of_iommu.h>
#include <asm/cacheflush.h>
#include <linux/sizes.h>
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index e06b8a0e2b56..6f7c69688ce2 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -19,7 +19,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
-#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 5915d7b38211..778e66f5f1aa 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -22,7 +22,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_address.h>
-#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index a9d2df001149..5696314ae69e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -19,74 +19,6 @@
#define NO_IOMMU 1
-/**
- * of_get_dma_window - Parse *dma-window property and returns 0 if found.
- *
- * @dn: device node
- * @prefix: prefix for property name if any
- * @index: index to start to parse
- * @busno: Returns busno if supported. Otherwise pass NULL
- * @addr: Returns address that DMA starts
- * @size: Returns the range that DMA can handle
- *
- * This supports different formats flexibly. "prefix" can be
- * configured if any. "busno" and "index" are optionally
- * specified. Set 0(or NULL) if not used.
- */
-int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
- unsigned long *busno, dma_addr_t *addr, size_t *size)
-{
- const __be32 *dma_window, *end;
- int bytes, cur_index = 0;
- char propname[NAME_MAX], addrname[NAME_MAX], sizename[NAME_MAX];
-
- if (!dn || !addr || !size)
- return -EINVAL;
-
- if (!prefix)
- prefix = "";
-
- snprintf(propname, sizeof(propname), "%sdma-window", prefix);
- snprintf(addrname, sizeof(addrname), "%s#dma-address-cells", prefix);
- snprintf(sizename, sizeof(sizename), "%s#dma-size-cells", prefix);
-
- dma_window = of_get_property(dn, propname, &bytes);
- if (!dma_window)
- return -ENODEV;
- end = dma_window + bytes / sizeof(*dma_window);
-
- while (dma_window < end) {
- u32 cells;
- const void *prop;
-
- /* busno is one cell if supported */
- if (busno)
- *busno = be32_to_cpup(dma_window++);
-
- prop = of_get_property(dn, addrname, NULL);
- if (!prop)
- prop = of_get_property(dn, "#address-cells", NULL);
-
- cells = prop ? be32_to_cpup(prop) : of_n_addr_cells(dn);
- if (!cells)
- return -EINVAL;
- *addr = of_read_number(dma_window, cells);
- dma_window += cells;
-
- prop = of_get_property(dn, sizename, NULL);
- cells = prop ? be32_to_cpup(prop) : of_n_size_cells(dn);
- if (!cells)
- return -EINVAL;
- *size = of_read_number(dma_window, cells);
- dma_window += cells;
-
- if (cur_index++ == index)
- break;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_get_dma_window);
-
static int of_iommu_xlate(struct device *dev,
struct of_phandle_args *iommu_spec)
{
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 26e517eb0dd3..91749654fd49 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -22,7 +22,6 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
-#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 7a2932772fdf..9febfb7f3025 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -21,7 +21,6 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -96,6 +95,15 @@ static const char * const rk_iommu_clocks[] = {
"aclk", "iface",
};
+struct rk_iommu_ops {
+ phys_addr_t (*pt_address)(u32 dte);
+ u32 (*mk_dtentries)(dma_addr_t pt_dma);
+ u32 (*mk_ptentries)(phys_addr_t page, int prot);
+ phys_addr_t (*dte_addr_phys)(u32 addr);
+ u32 (*dma_addr_dte)(dma_addr_t dt_dma);
+ u64 dma_bit_mask;
+};
+
struct rk_iommu {
struct device *dev;
void __iomem **bases;
@@ -116,6 +124,7 @@ struct rk_iommudata {
};
static struct device *dma_dev;
+static const struct rk_iommu_ops *rk_ops;
static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
unsigned int count)
@@ -179,6 +188,33 @@ static inline phys_addr_t rk_dte_pt_address(u32 dte)
return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
}
+/*
+ * In v2:
+ * 31:12 - PT address bit 31:0
+ * 11: 8 - PT address bit 35:32
+ * 7: 4 - PT address bit 39:36
+ * 3: 1 - Reserved
+ * 0 - 1 if PT @ PT address is valid
+ */
+#define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4)
+#define DTE_HI_MASK1 GENMASK(11, 8)
+#define DTE_HI_MASK2 GENMASK(7, 4)
+#define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */
+#define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */
+#define PAGE_DESC_HI_MASK1 GENMASK_ULL(39, 36)
+#define PAGE_DESC_HI_MASK2 GENMASK_ULL(35, 32)
+
+static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
+{
+ u64 dte_v2 = dte;
+
+ dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) |
+ ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) |
+ (dte_v2 & RK_DTE_PT_ADDRESS_MASK);
+
+ return (phys_addr_t)dte_v2;
+}
+
static inline bool rk_dte_is_pt_valid(u32 dte)
{
return dte & RK_DTE_PT_VALID;
@@ -189,6 +225,15 @@ static inline u32 rk_mk_dte(dma_addr_t pt_dma)
return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
}
+static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma)
+{
+ pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) |
+ ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) |
+ (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2;
+
+ return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID;
+}
+
/*
* Each PTE has a Page address, some flags and a valid bit:
* +---------------------+---+-------+-+
@@ -215,11 +260,6 @@ static inline u32 rk_mk_dte(dma_addr_t pt_dma)
#define RK_PTE_PAGE_READABLE BIT(1)
#define RK_PTE_PAGE_VALID BIT(0)
-static inline phys_addr_t rk_pte_page_address(u32 pte)
-{
- return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
-}
-
static inline bool rk_pte_is_page_valid(u32 pte)
{
return pte & RK_PTE_PAGE_VALID;
@@ -235,6 +275,29 @@ static u32 rk_mk_pte(phys_addr_t page, int prot)
return page | flags | RK_PTE_PAGE_VALID;
}
+/*
+ * In v2:
+ * 31:12 - Page address bit 31:0
+ * 11:9 - Page address bit 34:32
+ * 8:4 - Page address bit 39:35
+ * 3 - Security
+ * 2 - Readable
+ * 1 - Writable
+ * 0 - 1 if Page @ Page address is valid
+ */
+#define RK_PTE_PAGE_READABLE_V2 BIT(2)
+#define RK_PTE_PAGE_WRITABLE_V2 BIT(1)
+
+static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
+{
+ u32 flags = 0;
+
+ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
+ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
+
+ return rk_mk_dte_v2(page) | flags;
+}
+
static u32 rk_mk_pte_invalid(u32 pte)
{
return pte & ~RK_PTE_PAGE_VALID;
@@ -448,10 +511,10 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
* and verifying that upper 5 nybbles are read back.
*/
for (i = 0; i < iommu->num_mmu; i++) {
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
+ dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY);
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
- dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
- if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
+ if (dte_addr != rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR)) {
dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
return -EFAULT;
}
@@ -470,6 +533,33 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
return 0;
}
+static inline phys_addr_t rk_dte_addr_phys(u32 addr)
+{
+ return (phys_addr_t)addr;
+}
+
+static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
+{
+ return dt_dma;
+}
+
+#define DT_HI_MASK GENMASK_ULL(39, 32)
+#define DTE_BASE_HI_MASK GENMASK(11, 4)
+#define DT_SHIFT 28
+
+static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
+{
+ u64 addr64 = addr;
+ return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
+ ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
+}
+
+static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
+{
+ return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
+ ((dt_dma & DT_HI_MASK) >> DT_SHIFT);
+}
+
static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
{
void __iomem *base = iommu->bases[index];
@@ -489,7 +579,7 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
page_offset = rk_iova_page_offset(iova);
mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
- mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
+ mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
dte_addr = phys_to_virt(dte_addr_phys);
@@ -498,14 +588,14 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
if (!rk_dte_is_pt_valid(dte))
goto print_it;
- pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
+ pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4);
pte_addr = phys_to_virt(pte_addr_phys);
pte = *pte_addr;
if (!rk_pte_is_page_valid(pte))
goto print_it;
- page_addr_phys = rk_pte_page_address(pte) + page_offset;
+ page_addr_phys = rk_ops->pt_address(pte) + page_offset;
page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
print_it:
@@ -601,13 +691,13 @@ static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
if (!rk_dte_is_pt_valid(dte))
goto out;
- pt_phys = rk_dte_pt_address(dte);
+ pt_phys = rk_ops->pt_address(dte);
page_table = (u32 *)phys_to_virt(pt_phys);
pte = page_table[rk_iova_pte_index(iova)];
if (!rk_pte_is_page_valid(pte))
goto out;
- phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
+ phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova);
out:
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
@@ -679,14 +769,13 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
return ERR_PTR(-ENOMEM);
}
- dte = rk_mk_dte(pt_dma);
+ dte = rk_ops->mk_dtentries(pt_dma);
*dte_addr = dte;
- rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
rk_table_flush(rk_domain,
rk_domain->dt_dma + dte_index * sizeof(u32), 1);
done:
- pt_phys = rk_dte_pt_address(dte);
+ pt_phys = rk_ops->pt_address(dte);
return (u32 *)phys_to_virt(pt_phys);
}
@@ -728,7 +817,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
if (rk_pte_is_page_valid(pte))
goto unwind;
- pte_addr[pte_count] = rk_mk_pte(paddr, prot);
+ pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot);
paddr += SPAGE_SIZE;
}
@@ -750,7 +839,7 @@ unwind:
pte_count * SPAGE_SIZE);
iova += pte_count * SPAGE_SIZE;
- page_phys = rk_pte_page_address(pte_addr[pte_count]);
+ page_phys = rk_ops->pt_address(pte_addr[pte_count]);
pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
&iova, &page_phys, &paddr, prot);
@@ -785,7 +874,8 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
pte_index = rk_iova_pte_index(iova);
pte_addr = &page_table[pte_index];
- pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
+
+ pte_dma = rk_ops->pt_address(dte_index) + pte_index * sizeof(u32);
ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
paddr, size, prot);
@@ -821,7 +911,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
return 0;
}
- pt_phys = rk_dte_pt_address(dte);
+ pt_phys = rk_ops->pt_address(dte);
pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
@@ -879,7 +969,7 @@ static int rk_iommu_enable(struct rk_iommu *iommu)
for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
- rk_domain->dt_dma);
+ rk_ops->dma_addr_dte(rk_domain->dt_dma));
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
}
@@ -1004,8 +1094,6 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
goto err_free_dt;
}
- rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
-
spin_lock_init(&rk_domain->iommus_lock);
spin_lock_init(&rk_domain->dt_lock);
INIT_LIST_HEAD(&rk_domain->iommus);
@@ -1037,7 +1125,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
for (i = 0; i < NUM_DT_ENTRIES; i++) {
u32 dte = rk_domain->dt[i];
if (rk_dte_is_pt_valid(dte)) {
- phys_addr_t pt_phys = rk_dte_pt_address(dte);
+ phys_addr_t pt_phys = rk_ops->pt_address(dte);
u32 *page_table = phys_to_virt(pt_phys);
dma_unmap_single(dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
@@ -1127,6 +1215,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rk_iommu *iommu;
struct resource *res;
+ const struct rk_iommu_ops *ops;
int num_res = pdev->num_resources;
int err, i;
@@ -1138,6 +1227,17 @@ static int rk_iommu_probe(struct platform_device *pdev)
iommu->dev = dev;
iommu->num_mmu = 0;
+ ops = of_device_get_match_data(dev);
+ if (!rk_ops)
+ rk_ops = ops;
+
+ /*
+ * That should not happen unless different versions of the
+ * hardware block are embedded the same SoC
+ */
+ if (WARN_ON(rk_ops != ops))
+ return -EINVAL;
+
iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
GFP_KERNEL);
if (!iommu->bases)
@@ -1226,6 +1326,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
}
}
+ dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
+
return 0;
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
@@ -1277,8 +1379,31 @@ static const struct dev_pm_ops rk_iommu_pm_ops = {
pm_runtime_force_resume)
};
+static struct rk_iommu_ops iommu_data_ops_v1 = {
+ .pt_address = &rk_dte_pt_address,
+ .mk_dtentries = &rk_mk_dte,
+ .mk_ptentries = &rk_mk_pte,
+ .dte_addr_phys = &rk_dte_addr_phys,
+ .dma_addr_dte = &rk_dma_addr_dte,
+ .dma_bit_mask = DMA_BIT_MASK(32),
+};
+
+static struct rk_iommu_ops iommu_data_ops_v2 = {
+ .pt_address = &rk_dte_pt_address_v2,
+ .mk_dtentries = &rk_mk_dte_v2,
+ .mk_ptentries = &rk_mk_pte_v2,
+ .dte_addr_phys = &rk_dte_addr_phys_v2,
+ .dma_addr_dte = &rk_dma_addr_dte_v2,
+ .dma_bit_mask = DMA_BIT_MASK(40),
+};
+
static const struct of_device_id rk_iommu_dt_ids[] = {
- { .compatible = "rockchip,iommu" },
+ { .compatible = "rockchip,iommu",
+ .data = &iommu_data_ops_v1,
+ },
+ { .compatible = "rockchip,rk3568-iommu",
+ .data = &iommu_data_ops_v2,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 1e98dc63ad13..0a281833f611 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -376,9 +376,9 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
if (client->swgroup != swgroup)
continue;
- value = smmu_readl(smmu, client->smmu.reg);
- value |= BIT(client->smmu.bit);
- smmu_writel(smmu, value, client->smmu.reg);
+ value = smmu_readl(smmu, client->regs.smmu.reg);
+ value |= BIT(client->regs.smmu.bit);
+ smmu_writel(smmu, value, client->regs.smmu.reg);
}
}
@@ -404,9 +404,9 @@ static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
if (client->swgroup != swgroup)
continue;
- value = smmu_readl(smmu, client->smmu.reg);
- value &= ~BIT(client->smmu.bit);
- smmu_writel(smmu, value, client->smmu.reg);
+ value = smmu_readl(smmu, client->regs.smmu.reg);
+ value &= ~BIT(client->regs.smmu.bit);
+ smmu_writel(smmu, value, client->regs.smmu.reg);
}
}
@@ -1042,9 +1042,9 @@ static int tegra_smmu_clients_show(struct seq_file *s, void *data)
const struct tegra_mc_client *client = &smmu->soc->clients[i];
const char *status;
- value = smmu_readl(smmu, client->smmu.reg);
+ value = smmu_readl(smmu, client->regs.smmu.reg);
- if (value & BIT(client->smmu.bit))
+ if (value & BIT(client->regs.smmu.bit))
status = "yes";
else
status = "no";
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index c6e5ee4d9cef..6abdcab7273b 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -10,11 +10,11 @@
#include <linux/amba/bus.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
+#include <linux/dma-map-ops.h>
#include <linux/freezer.h>
#include <linux/interval_tree.h>
#include <linux/iommu.h>
#include <linux/module.h>
-#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -904,6 +904,15 @@ err_free_dev:
return ERR_PTR(ret);
}
+static void viommu_probe_finalize(struct device *dev)
+{
+#ifndef CONFIG_ARCH_HAS_SETUP_DMA_OPS
+ /* First clear the DMA ops in case we're switching from a DMA domain */
+ set_dma_ops(dev, NULL);
+ iommu_setup_dma_ops(dev, 0, U64_MAX);
+#endif
+}
+
static void viommu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
@@ -940,6 +949,7 @@ static struct iommu_ops viommu_ops = {
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
.probe_device = viommu_probe_device,
+ .probe_finalize = viommu_probe_finalize,
.release_device = viommu_release_device,
.device_group = viommu_device_group,
.get_resv_regions = viommu_get_resv_regions,
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index ec71063fff76..3461b0a7dc62 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
- * tpci200.c
- *
+/*
* driver for the TEWS TPCI-200 device
*
* Copyright (C) 2009-2012 CERN (www.cern.ch)
@@ -596,8 +594,11 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
out_err_bus_register:
tpci200_uninstall(tpci200);
+ /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
+ tpci200->info->cfg_regs = NULL;
out_err_install:
- iounmap(tpci200->info->cfg_regs);
+ if (tpci200->info->cfg_regs)
+ iounmap(tpci200->info->cfg_regs);
out_err_ioremap:
pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
out_err_pci_request:
diff --git a/drivers/ipack/carriers/tpci200.h b/drivers/ipack/carriers/tpci200.h
index 2619f827e33f..e79ac64abcff 100644
--- a/drivers/ipack/carriers/tpci200.h
+++ b/drivers/ipack/carriers/tpci200.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
- * tpci200.h
- *
+/*
* driver for the carrier TEWS TPCI-200
*
* Copyright (C) 2009-2012 CERN (www.cern.ch)
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index 3940714e4397..20fa02c81070 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
- * ipoctal.c
- *
+/*
* driver for the GE IP-OCTAL boards
*
* Copyright (C) 2009-2012 CERN (www.cern.ch)
@@ -460,14 +458,14 @@ static int ipoctal_write_tty(struct tty_struct *tty,
return char_copied;
}
-static int ipoctal_write_room(struct tty_struct *tty)
+static unsigned int ipoctal_write_room(struct tty_struct *tty)
{
struct ipoctal_channel *channel = tty->driver_data;
return PAGE_SIZE - channel->nb_bytes;
}
-static int ipoctal_chars_in_buffer(struct tty_struct *tty)
+static unsigned int ipoctal_chars_in_buffer(struct tty_struct *tty)
{
struct ipoctal_channel *channel = tty->driver_data;
diff --git a/drivers/ipack/devices/ipoctal.h b/drivers/ipack/devices/ipoctal.h
index 75f83ba774a4..773dc41bd667 100644
--- a/drivers/ipack/devices/ipoctal.h
+++ b/drivers/ipack/devices/ipoctal.h
@@ -1,9 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
- * ipoctal.h
- *
+/*
* driver for the IPOCTAL boards
-
+ *
* Copyright (C) 2009-2012 CERN (www.cern.ch)
* Author: Nicolas Serafini, EIC2 SA
* Author: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 0bbb0b2d0dd5..0c7ae71a0af0 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -127,7 +127,6 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
asmlinkage void __weak plat_irq_dispatch(void)
{
unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
- unsigned int virq;
int irq;
if (!pending) {
@@ -137,12 +136,15 @@ asmlinkage void __weak plat_irq_dispatch(void)
pending >>= CAUSEB_IP;
while (pending) {
+ struct irq_domain *d;
+
irq = fls(pending) - 1;
if (IS_ENABLED(CONFIG_GENERIC_IRQ_IPI) && irq < 2)
- virq = irq_linear_revmap(ipi_domain, irq);
+ d = ipi_domain;
else
- virq = irq_linear_revmap(irq_domain, irq);
- do_IRQ(virq);
+ d = irq_domain;
+
+ do_domain_IRQ(d, irq);
pending &= ~BIT(irq);
}
}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index b146e069bf5b..54c7092cc61d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -169,8 +169,8 @@ static void gic_handle_shared_int(bool chained)
generic_handle_domain_irq(gic_irq_domain,
GIC_SHARED_TO_HWIRQ(intr));
else
- do_IRQ(irq_find_mapping(gic_irq_domain,
- GIC_SHARED_TO_HWIRQ(intr)));
+ do_domain_IRQ(gic_irq_domain,
+ GIC_SHARED_TO_HWIRQ(intr));
}
}
@@ -320,8 +320,8 @@ static void gic_handle_local_int(bool chained)
generic_handle_domain_irq(gic_irq_domain,
GIC_LOCAL_TO_HWIRQ(intr));
else
- do_IRQ(irq_find_mapping(gic_irq_domain,
- GIC_LOCAL_TO_HWIRQ(intr)));
+ do_domain_IRQ(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(intr));
}
}
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
index 34c4b4ffacd1..1d9bb28d13e5 100644
--- a/drivers/irqchip/irq-pic32-evic.c
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -42,11 +42,10 @@ static void __iomem *evic_base;
asmlinkage void __weak plat_irq_dispatch(void)
{
- unsigned int irq, hwirq;
+ unsigned int hwirq;
hwirq = readl(evic_base + REG_INTSTAT) & 0xFF;
- irq = irq_linear_revmap(evic_irq_domain, hwirq);
- do_IRQ(irq);
+ do_domain_IRQ(evic_irq_domain, hwirq);
}
static struct evic_chip_data *irqd_to_priv(struct irq_data *data)
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index fdf87acccd06..d5f9261fa879 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1158,8 +1158,6 @@ static void capinc_tty_flush_chars(struct tty_struct *tty)
struct capiminor *mp = tty->driver_data;
struct sk_buff *skb;
- pr_debug("capinc_tty_flush_chars\n");
-
spin_lock_bh(&mp->outlock);
skb = mp->outskb;
if (skb) {
@@ -1175,18 +1173,18 @@ static void capinc_tty_flush_chars(struct tty_struct *tty)
handle_minor_recv(mp);
}
-static int capinc_tty_write_room(struct tty_struct *tty)
+static unsigned int capinc_tty_write_room(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
- int room;
+ unsigned int room;
room = CAPINC_MAX_SENDQUEUE-skb_queue_len(&mp->outqueue);
room *= CAPI_MAX_BLKSIZE;
- pr_debug("capinc_tty_write_room = %d\n", room);
+ pr_debug("capinc_tty_write_room = %u\n", room);
return room;
}
-static int capinc_tty_chars_in_buffer(struct tty_struct *tty)
+static unsigned int capinc_tty_chars_in_buffer(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
@@ -1197,15 +1195,9 @@ static int capinc_tty_chars_in_buffer(struct tty_struct *tty)
return mp->outbytes;
}
-static void capinc_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
-{
- pr_debug("capinc_tty_set_termios\n");
-}
-
static void capinc_tty_throttle(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
- pr_debug("capinc_tty_throttle\n");
mp->ttyinstop = 1;
}
@@ -1213,7 +1205,6 @@ static void capinc_tty_unthrottle(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
- pr_debug("capinc_tty_unthrottle\n");
mp->ttyinstop = 0;
handle_minor_recv(mp);
}
@@ -1222,7 +1213,6 @@ static void capinc_tty_stop(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
- pr_debug("capinc_tty_stop\n");
mp->ttyoutstop = 1;
}
@@ -1230,7 +1220,6 @@ static void capinc_tty_start(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
- pr_debug("capinc_tty_start\n");
mp->ttyoutstop = 0;
handle_minor_send(mp);
}
@@ -1239,26 +1228,9 @@ static void capinc_tty_hangup(struct tty_struct *tty)
{
struct capiminor *mp = tty->driver_data;
- pr_debug("capinc_tty_hangup\n");
tty_port_hangup(&mp->port);
}
-static int capinc_tty_break_ctl(struct tty_struct *tty, int state)
-{
- pr_debug("capinc_tty_break_ctl(%d)\n", state);
- return 0;
-}
-
-static void capinc_tty_flush_buffer(struct tty_struct *tty)
-{
- pr_debug("capinc_tty_flush_buffer\n");
-}
-
-static void capinc_tty_set_ldisc(struct tty_struct *tty)
-{
- pr_debug("capinc_tty_set_ldisc\n");
-}
-
static void capinc_tty_send_xchar(struct tty_struct *tty, char ch)
{
pr_debug("capinc_tty_send_xchar(%d)\n", ch);
@@ -1272,15 +1244,11 @@ static const struct tty_operations capinc_ops = {
.flush_chars = capinc_tty_flush_chars,
.write_room = capinc_tty_write_room,
.chars_in_buffer = capinc_tty_chars_in_buffer,
- .set_termios = capinc_tty_set_termios,
.throttle = capinc_tty_throttle,
.unthrottle = capinc_tty_unthrottle,
.stop = capinc_tty_stop,
.start = capinc_tty_start,
.hangup = capinc_tty_hangup,
- .break_ctl = capinc_tty_break_ctl,
- .flush_buffer = capinc_tty_flush_buffer,
- .set_ldisc = capinc_tty_set_ldisc,
.send_xchar = capinc_tty_send_xchar,
.install = capinc_tty_install,
.cleanup = capinc_tty_cleanup,
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 49d99cb084db..bdf16180f5ff 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -199,6 +199,7 @@ config LEDS_LM3530
config LEDS_LM3532
tristate "LCD Backlight driver for LM3532"
+ select REGMAP_I2C
depends on LEDS_CLASS
depends on I2C
help
@@ -616,7 +617,6 @@ config LEDS_LT3593
tristate "LED driver for LT3593 controllers"
depends on LEDS_CLASS
depends on GPIOLIB || COMPILE_TEST
- depends on OF
help
This option enables support for LEDs driven by a Linear Technology
LT3593 controller. This controller uses a special one-wire pulse
diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
index 6a63846d10b5..7eb2f44f16be 100644
--- a/drivers/leds/blink/leds-lgm-sso.c
+++ b/drivers/leds/blink/leds-lgm-sso.c
@@ -7,7 +7,8 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/leds.h>
@@ -132,8 +133,7 @@ struct sso_led_priv {
struct regmap *mmap;
struct device *dev;
struct platform_device *pdev;
- struct clk *gclk;
- struct clk *fpid_clk;
+ struct clk_bulk_data clocks[2];
u32 fpid_clkrate;
u32 gptc_clkrate;
u32 freq[MAX_FREQ_RANK];
@@ -259,7 +259,7 @@ static void sso_led_brightness_set(struct led_classdev *led_cdev,
1 << desc->pin);
}
- if (!desc->hw_trig && led->gpiod)
+ if (!desc->hw_trig)
gpiod_set_value(led->gpiod, val);
}
@@ -423,7 +423,7 @@ static void sso_gpio_free(struct gpio_chip *chip, unsigned int offset)
static int sso_gpio_get_dir(struct gpio_chip *chip, unsigned int offset)
{
- return GPIOF_DIR_OUT;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int
@@ -763,12 +763,11 @@ static int sso_probe_gpios(struct sso_led_priv *priv)
return sso_gpio_gc_init(dev, priv);
}
-static void sso_clk_disable(void *data)
+static void sso_clock_disable_unprepare(void *data)
{
struct sso_led_priv *priv = data;
- clk_disable_unprepare(priv->fpid_clk);
- clk_disable_unprepare(priv->gclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
}
static int intel_sso_led_probe(struct platform_device *pdev)
@@ -785,36 +784,30 @@ static int intel_sso_led_probe(struct platform_device *pdev)
priv->dev = dev;
/* gate clock */
- priv->gclk = devm_clk_get(dev, "sso");
- if (IS_ERR(priv->gclk)) {
- dev_err(dev, "get sso gate clock failed!\n");
- return PTR_ERR(priv->gclk);
- }
+ priv->clocks[0].id = "sso";
+
+ /* fpid clock */
+ priv->clocks[1].id = "fpid";
- ret = clk_prepare_enable(priv->gclk);
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(priv->clocks), priv->clocks);
if (ret) {
- dev_err(dev, "Failed to prepare/enable sso gate clock!\n");
+ dev_err(dev, "Getting clocks failed!\n");
return ret;
}
- priv->fpid_clk = devm_clk_get(dev, "fpid");
- if (IS_ERR(priv->fpid_clk)) {
- dev_err(dev, "Failed to get fpid clock!\n");
- return PTR_ERR(priv->fpid_clk);
- }
-
- ret = clk_prepare_enable(priv->fpid_clk);
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks);
if (ret) {
- dev_err(dev, "Failed to prepare/enable fpid clock!\n");
+ dev_err(dev, "Failed to prepare and enable clocks!\n");
return ret;
}
- priv->fpid_clkrate = clk_get_rate(priv->fpid_clk);
- ret = devm_add_action_or_reset(dev, sso_clk_disable, priv);
- if (ret) {
- dev_err(dev, "Failed to devm_add_action_or_reset, %d\n", ret);
+ ret = devm_add_action_or_reset(dev, sso_clock_disable_unprepare, priv);
+ if (ret)
return ret;
- }
+
+ priv->fpid_clkrate = clk_get_rate(priv->clocks[1].clk);
+
+ priv->mmap = syscon_node_to_regmap(dev->of_node);
priv->mmap = syscon_node_to_regmap(dev->of_node);
if (IS_ERR(priv->mmap)) {
@@ -859,8 +852,6 @@ static int intel_sso_led_remove(struct platform_device *pdev)
sso_led_shutdown(led);
}
- clk_disable_unprepare(priv->fpid_clk);
- clk_disable_unprepare(priv->gclk);
regmap_exit(priv->mmap);
return 0;
@@ -878,7 +869,7 @@ static struct platform_driver intel_sso_led_driver = {
.remove = intel_sso_led_remove,
.driver = {
.name = "lgm-ssoled",
- .of_match_table = of_match_ptr(of_sso_led_match),
+ .of_match_table = of_sso_led_match,
},
};
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 2e495ff67856..f704391d57a8 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -285,10 +285,6 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev,
if (!dev)
return ERR_PTR(-EINVAL);
- /* Not using device tree? */
- if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
- return ERR_PTR(-ENOTSUPP);
-
led = of_led_get(dev->of_node, index);
if (IS_ERR(led))
return led;
@@ -513,7 +509,7 @@ static int devm_led_classdev_match(struct device *dev, void *res, void *data)
/**
* devm_led_classdev_unregister() - resource managed led_classdev_unregister()
- * @parent: The device to unregister.
+ * @dev: The device to unregister.
* @led_cdev: the led_classdev structure for this device.
*/
void devm_led_classdev_unregister(struct device *dev,
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index e8922fa03379..aa3f82be0a9c 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -185,7 +185,7 @@ static int as3645a_read(struct as3645a *flash, u8 addr)
*/
/**
- * as3645a_set_config - Set flash configuration registers
+ * as3645a_set_current - Set flash configuration registers
* @flash: The flash
*
* Configure the hardware with flash, assist and indicator currents, as well as
@@ -545,6 +545,7 @@ static int as3645a_parse_node(struct as3645a *flash,
if (!flash->indicator_node) {
dev_warn(&flash->client->dev,
"can't find indicator node\n");
+ rval = -ENODEV;
goto out_err;
}
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 226d17d253ed..2d4d87957a30 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -93,7 +93,7 @@ static unsigned long bcm6328_led_read(void __iomem *reg)
#endif
}
-/**
+/*
* LEDMode 64 bits / 24 LEDs
* bits [31:0] -> LEDs 8-23
* bits [47:32] -> LEDs 0-7
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index b4e1fdff4186..bd7d0d5cf3b6 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -480,9 +480,8 @@ static int blinkm_led_blue_set(struct led_classdev *led_cdev,
static void blinkm_init_hw(struct i2c_client *client)
{
- int ret;
- ret = blinkm_transfer_hw(client, BLM_STOP_SCRIPT);
- ret = blinkm_transfer_hw(client, BLM_GO_RGB);
+ blinkm_transfer_hw(client, BLM_STOP_SCRIPT);
+ blinkm_transfer_hw(client, BLM_GO_RGB);
}
static int blinkm_test_run(struct i2c_client *client)
diff --git a/drivers/leds/leds-el15203000.c b/drivers/leds/leds-el15203000.c
index 6ca47f2a2004..76b455e87574 100644
--- a/drivers/leds/leds-el15203000.c
+++ b/drivers/leds/leds-el15203000.c
@@ -68,8 +68,8 @@ enum el15203000_command {
};
struct el15203000_led {
- struct el15203000 *priv;
struct led_classdev ldev;
+ struct el15203000 *priv;
u32 reg;
};
@@ -82,6 +82,8 @@ struct el15203000 {
struct el15203000_led leds[];
};
+#define to_el15203000_led(d) container_of(d, struct el15203000_led, ldev)
+
static int el15203000_cmd(struct el15203000_led *led, u8 brightness)
{
int ret;
@@ -128,9 +130,7 @@ static int el15203000_cmd(struct el15203000_led *led, u8 brightness)
static int el15203000_set_blocking(struct led_classdev *ldev,
enum led_brightness brightness)
{
- struct el15203000_led *led = container_of(ldev,
- struct el15203000_led,
- ldev);
+ struct el15203000_led *led = to_el15203000_led(ldev);
return el15203000_cmd(led, brightness == LED_OFF ? EL_OFF : EL_ON);
}
@@ -139,9 +139,7 @@ static int el15203000_pattern_set_S(struct led_classdev *ldev,
struct led_pattern *pattern,
u32 len, int repeat)
{
- struct el15203000_led *led = container_of(ldev,
- struct el15203000_led,
- ldev);
+ struct el15203000_led *led = to_el15203000_led(ldev);
if (repeat > 0 || len != 2 ||
pattern[0].delta_t != 4000 || pattern[0].brightness != 0 ||
@@ -192,10 +190,8 @@ static int el15203000_pattern_set_P(struct led_classdev *ldev,
struct led_pattern *pattern,
u32 len, int repeat)
{
+ struct el15203000_led *led = to_el15203000_led(ldev);
u8 cmd;
- struct el15203000_led *led = container_of(ldev,
- struct el15203000_led,
- ldev);
if (repeat > 0)
return -EINVAL;
@@ -232,9 +228,7 @@ static int el15203000_pattern_set_P(struct led_classdev *ldev,
static int el15203000_pattern_clear(struct led_classdev *ldev)
{
- struct el15203000_led *led = container_of(ldev,
- struct el15203000_led,
- ldev);
+ struct el15203000_led *led = to_el15203000_led(ldev);
return el15203000_cmd(led, EL_OFF);
}
@@ -251,16 +245,13 @@ static int el15203000_probe_dt(struct el15203000 *priv)
ret = fwnode_property_read_u32(child, "reg", &led->reg);
if (ret) {
dev_err(priv->dev, "LED without ID number");
- fwnode_handle_put(child);
-
- break;
+ goto err_child_out;
}
if (led->reg > U8_MAX) {
dev_err(priv->dev, "LED value %d is invalid", led->reg);
- fwnode_handle_put(child);
-
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_child_out;
}
led->priv = priv;
@@ -282,14 +273,16 @@ static int el15203000_probe_dt(struct el15203000 *priv)
dev_err(priv->dev,
"failed to register LED device %s, err %d",
led->ldev.name, ret);
- fwnode_handle_put(child);
-
- break;
+ goto err_child_out;
}
led++;
}
+ return 0;
+
+err_child_out:
+ fwnode_handle_put(child);
return ret;
}
diff --git a/drivers/leds/leds-gpio-register.c b/drivers/leds/leds-gpio-register.c
index b9187e71e0cf..de3f12c2b80d 100644
--- a/drivers/leds/leds-gpio-register.c
+++ b/drivers/leds/leds-gpio-register.c
@@ -11,6 +11,7 @@
/**
* gpio_led_register_device - register a gpio-led device
* @pdata: the platform data used for the new device
+ * @id: platform ID
*
* Makes a copy of pdata and pdata->leds and registers a new leds-gpio device
* with the result. This allows to have pdata and pdata-leds in .init.rodata
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index 2180255ad339..3b55af9a8c58 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -58,7 +58,8 @@ struct is31fl32xx_priv {
* @pwm_registers_reversed: : true if PWM registers count down instead of up
* @led_control_register_base : address of first LED control register (optional)
* @enable_bits_per_led_control_register: number of LEDs enable bits in each
- * @reset_func: : pointer to reset function
+ * @reset_func : pointer to reset function
+ * @sw_shutdown_func : pointer to software shutdown function
*
* For all optional register addresses, the sentinel value %IS31FL32XX_REG_NONE
* indicates that this chip has no such register.
diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
index 632f10db4b3f..f341da1503a4 100644
--- a/drivers/leds/leds-ktd2692.c
+++ b/drivers/leds/leds-ktd2692.c
@@ -256,6 +256,17 @@ static void ktd2692_setup(struct ktd2692_context *led)
| KTD2692_REG_FLASH_CURRENT_BASE);
}
+static void regulator_disable_action(void *_data)
+{
+ struct device *dev = _data;
+ struct ktd2692_context *led = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(dev, "Failed to disable supply: %d\n", ret);
+}
+
static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
struct ktd2692_led_config_data *cfg)
{
@@ -286,8 +297,14 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
if (led->regulator) {
ret = regulator_enable(led->regulator);
- if (ret)
+ if (ret) {
dev_err(dev, "Failed to enable supply: %d\n", ret);
+ } else {
+ ret = devm_add_action_or_reset(dev,
+ regulator_disable_action, dev);
+ if (ret)
+ return ret;
+ }
}
child_node = of_get_next_available_child(np, NULL);
@@ -377,17 +394,9 @@ static int ktd2692_probe(struct platform_device *pdev)
static int ktd2692_remove(struct platform_device *pdev)
{
struct ktd2692_context *led = platform_get_drvdata(pdev);
- int ret;
led_classdev_flash_unregister(&led->fled_cdev);
- if (led->regulator) {
- ret = regulator_disable(led->regulator);
- if (ret)
- dev_err(&pdev->dev,
- "Failed to disable supply: %d\n", ret);
- }
-
mutex_destroy(&led->lock);
return 0;
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 2db455efd4b1..e72393534b72 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -99,7 +99,7 @@ static struct lm3530_mode_map mode_map[] = {
* @pdata: LM3530 platform data
* @mode: mode of operation - manual, ALS, PWM
* @regulator: regulator
- * @brighness: previous brightness value
+ * @brightness: previous brightness value
* @enable: regulator is enabled
*/
struct lm3530_data {
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index 0bf25bdde02f..beb53040e09e 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -586,7 +586,6 @@ static int lm3532_parse_node(struct lm3532_data *priv)
ret = fwnode_property_read_u32(child, "reg", &control_bank);
if (ret) {
dev_err(&priv->client->dev, "reg property missing\n");
- fwnode_handle_put(child);
goto child_out;
}
@@ -601,7 +600,6 @@ static int lm3532_parse_node(struct lm3532_data *priv)
&led->mode);
if (ret) {
dev_err(&priv->client->dev, "ti,led-mode property missing\n");
- fwnode_handle_put(child);
goto child_out;
}
@@ -636,7 +634,6 @@ static int lm3532_parse_node(struct lm3532_data *priv)
led->num_leds);
if (ret) {
dev_err(&priv->client->dev, "led-sources property missing\n");
- fwnode_handle_put(child);
goto child_out;
}
@@ -647,7 +644,6 @@ static int lm3532_parse_node(struct lm3532_data *priv)
if (ret) {
dev_err(&priv->client->dev, "led register err: %d\n",
ret);
- fwnode_handle_put(child);
goto child_out;
}
@@ -655,14 +651,15 @@ static int lm3532_parse_node(struct lm3532_data *priv)
if (ret) {
dev_err(&priv->client->dev, "register init err: %d\n",
ret);
- fwnode_handle_put(child);
goto child_out;
}
i++;
}
+ return 0;
child_out:
+ fwnode_handle_put(child);
return ret;
}
diff --git a/drivers/leds/leds-lm36274.c b/drivers/leds/leds-lm36274.c
index aadb03468a40..e009b6d17915 100644
--- a/drivers/leds/leds-lm36274.c
+++ b/drivers/leds/leds-lm36274.c
@@ -7,9 +7,10 @@
#include <linux/err.h>
#include <linux/leds.h>
#include <linux/leds-ti-lmu-common.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/mfd/ti-lmu.h>
#include <linux/mfd/ti-lmu-register.h>
@@ -127,6 +128,7 @@ static int lm36274_probe(struct platform_device *pdev)
ret = lm36274_init(chip);
if (ret) {
+ fwnode_handle_put(init_data.fwnode);
dev_err(chip->dev, "Failed to init the device\n");
return ret;
}
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index e945de45388c..a02756d7ed8f 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -96,15 +96,15 @@
#define LM3692X_FAULT_FLAG_OPEN BIT(4)
/**
- * struct lm3692x_led -
- * @lock - Lock for reading/writing the device
- * @client - Pointer to the I2C client
- * @led_dev - LED class device pointer
- * @regmap - Devices register map
- * @enable_gpio - VDDIO/EN gpio to enable communication interface
- * @regulator - LED supply regulator pointer
- * @led_enable - LED sync to be enabled
- * @model_id - Current device model ID enumerated
+ * struct lm3692x_led
+ * @lock: Lock for reading/writing the device
+ * @client: Pointer to the I2C client
+ * @led_dev: LED class device pointer
+ * @regmap: Devices register map
+ * @enable_gpio: VDDIO/EN gpio to enable communication interface
+ * @regulator: LED supply regulator pointer
+ * @led_enable: LED sync to be enabled
+ * @model_id: Current device model ID enumerated
*/
struct lm3692x_led {
struct mutex lock;
@@ -435,6 +435,7 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
ret = fwnode_property_read_u32(child, "reg", &led->led_enable);
if (ret) {
+ fwnode_handle_put(child);
dev_err(&led->client->dev, "reg DT property missing\n");
return ret;
}
@@ -449,12 +450,11 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
ret = devm_led_classdev_register_ext(&led->client->dev, &led->led_dev,
&init_data);
- if (ret) {
+ if (ret)
dev_err(&led->client->dev, "led register err: %d\n", ret);
- return ret;
- }
- return 0;
+ fwnode_handle_put(init_data.fwnode);
+ return ret;
}
static int lm3692x_probe(struct i2c_client *client,
diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
index 7d216cdb91a8..970a4f34791b 100644
--- a/drivers/leds/leds-lm3697.c
+++ b/drivers/leds/leds-lm3697.c
@@ -47,6 +47,8 @@
* @lmu_data: Register and setting values for common code
* @control_bank: Control bank the LED is associated to. 0 is control bank A
* 1 is control bank B
+ * @enabled: LED brightness level (or LED_OFF)
+ * @num_leds: Number of LEDs available
*/
struct lm3697_led {
u32 hvled_strings[LM3697_MAX_LED_STRINGS];
@@ -68,6 +70,8 @@ struct lm3697_led {
* @dev: Pointer to the devices device struct
* @lock: Lock for reading/writing the device
* @leds: Array of LED strings
+ * @bank_cfg: OUTPUT_CONFIG register values
+ * @num_banks: Number of control banks
*/
struct lm3697 {
struct gpio_desc *enable_gpio;
@@ -203,11 +207,9 @@ static int lm3697_probe_dt(struct lm3697 *priv)
priv->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(priv->enable_gpio)) {
- ret = PTR_ERR(priv->enable_gpio);
- dev_err(dev, "Failed to get enable gpio: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->enable_gpio),
+ "Failed to get enable GPIO\n");
priv->regulator = devm_regulator_get(dev, "vled");
if (IS_ERR(priv->regulator))
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 838e6f19d37e..437c711b2a27 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -92,7 +92,7 @@ static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value)
}
/**
- * Set the period for DIM status
+ * lp3944_dim_set_period() - Set the period for DIM status
*
* @client: the i2c client
* @dim: either LP3944_DIM0 or LP3944_DIM1
@@ -123,7 +123,7 @@ static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period)
}
/**
- * Set the duty cycle for DIM status
+ * lp3944_dim_set_dutycycle - Set the duty cycle for DIM status
*
* @client: the i2c client
* @dim: either LP3944_DIM0 or LP3944_DIM1
@@ -155,7 +155,7 @@ static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim,
}
/**
- * Set the led status
+ * lp3944_led_set() - Set the led status
*
* @led: a lp3944_led_data structure
* @status: one of LP3944_LED_STATUS_OFF
diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
index 06230614fdc5..401df1e2e05d 100644
--- a/drivers/leds/leds-lp50xx.c
+++ b/drivers/leds/leds-lp50xx.c
@@ -490,6 +490,7 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
ret = fwnode_property_read_u32(led_node, "color",
&color_id);
if (ret) {
+ fwnode_handle_put(led_node);
dev_err(priv->dev, "Cannot read color\n");
goto child_out;
}
@@ -512,7 +513,6 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
goto child_out;
}
i++;
- fwnode_handle_put(child);
}
return 0;
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 81de1346bf5d..d1657c46ee2f 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -694,7 +694,7 @@ struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev,
of_property_read_u8(np, "clock-mode", &pdata->clock_mode);
pdata->enable_gpiod = devm_gpiod_get_optional(dev, "enable",
- GPIOD_ASIS);
+ GPIOD_OUT_LOW);
if (IS_ERR(pdata->enable_gpiod))
return ERR_CAST(pdata->enable_gpiod);
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index f0533a337bc1..3c693d5e3b44 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -85,14 +85,14 @@
#define LP8860_NAME "lp8860"
/**
- * struct lp8860_led -
- * @lock - Lock for reading/writing the device
- * @client - Pointer to the I2C client
- * @led_dev - led class device pointer
- * @regmap - Devices register map
- * @eeprom_regmap - EEPROM register map
- * @enable_gpio - VDDIO/EN gpio to enable communication interface
- * @regulator - LED supply regulator pointer
+ * struct lp8860_led
+ * @lock: Lock for reading/writing the device
+ * @client: Pointer to the I2C client
+ * @led_dev: led class device pointer
+ * @regmap: Devices register map
+ * @eeprom_regmap: EEPROM register map
+ * @enable_gpio: VDDIO/EN gpio to enable communication interface
+ * @regulator: LED supply regulator pointer
*/
struct lp8860_led {
struct mutex lock;
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 68e06434ac08..3bb52d3165d9 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -7,8 +7,9 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/property.h>
#define LED_LT3593_NAME "lt3593"
@@ -68,9 +69,6 @@ static int lt3593_led_probe(struct platform_device *pdev)
struct led_init_data init_data = {};
const char *tmp;
- if (!dev_of_node(dev))
- return -ENODEV;
-
led_data = devm_kzalloc(dev, sizeof(*led_data), GFP_KERNEL);
if (!led_data)
return -ENOMEM;
@@ -119,7 +117,7 @@ static struct platform_driver lt3593_led_driver = {
.probe = lt3593_led_probe,
.driver = {
.name = "leds-lt3593",
- .of_match_table = of_match_ptr(of_lt3593_leds_match),
+ .of_match_table = of_lt3593_leds_match,
},
};
diff --git a/drivers/leds/leds-mlxcpld.c b/drivers/leds/leds-mlxcpld.c
index f4721f8065f0..1355c84a2919 100644
--- a/drivers/leds/leds-mlxcpld.c
+++ b/drivers/leds/leds-mlxcpld.c
@@ -64,10 +64,10 @@
#define MLXCPLD_LED_BLINK_6HZ 83 /* ~83 msec off/on */
/**
- * mlxcpld_param - LED access parameters:
- * @offset - offset for LED access in CPLD device
- * @mask - mask for LED access in CPLD device
- * @base_color - base color code for LED
+ * struct mlxcpld_param - LED access parameters:
+ * @offset: offset for LED access in CPLD device
+ * @mask: mask for LED access in CPLD device
+ * @base_color: base color code for LED
**/
struct mlxcpld_param {
u8 offset;
@@ -76,9 +76,9 @@ struct mlxcpld_param {
};
/**
- * mlxcpld_led_priv - LED private data:
- * @cled - LED class device instance
- * @param - LED CPLD access parameters
+ * struct mlxcpld_led_priv - LED private data:
+ * @cled: LED class device instance
+ * @param: LED CPLD access parameters
**/
struct mlxcpld_led_priv {
struct led_classdev cdev;
@@ -88,12 +88,12 @@ struct mlxcpld_led_priv {
#define cdev_to_priv(c) container_of(c, struct mlxcpld_led_priv, cdev)
/**
- * mlxcpld_led_profile - system LED profile (defined per system class):
- * @offset - offset for LED access in CPLD device
- * @mask - mask for LED access in CPLD device
- * @base_color - base color code
- * @brightness - default brightness setting (on/off)
- * @name - LED name
+ * struct mlxcpld_led_profile - system LED profile (defined per system class):
+ * @offset: offset for LED access in CPLD device
+ * @mask: mask for LED access in CPLD device
+ * @base_color: base color code
+ * @brightness: default brightness setting (on/off)
+ * @name: LED name
**/
struct mlxcpld_led_profile {
u8 offset;
@@ -104,12 +104,12 @@ struct mlxcpld_led_profile {
};
/**
- * mlxcpld_led_pdata - system LED private data
- * @pdev - platform device pointer
- * @pled - LED class device instance
- * @profile - system configuration profile
- * @num_led_instances - number of LED instances
- * @lock - device access lock
+ * struct mlxcpld_led_pdata - system LED private data
+ * @pdev: platform device pointer
+ * @pled: LED class device instance
+ * @profile: system configuration profile
+ * @num_led_instances: number of LED instances
+ * @lock: device access lock
**/
struct mlxcpld_led_pdata {
struct platform_device *pdev;
diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
index 82aea1cd0c12..b7855c93bd72 100644
--- a/drivers/leds/leds-mlxreg.c
+++ b/drivers/leds/leds-mlxreg.c
@@ -28,10 +28,11 @@
* struct mlxreg_led_data - led control data:
*
* @data: led configuration data;
- * @led_classdev: led class data;
+ * @led_cdev: led class data;
* @base_color: base led color (other colors have constant offset from base);
* @led_data: led data;
* @data_parent: pointer to private device control data of parent;
+ * @led_cdev_name: class device name
*/
struct mlxreg_led_data {
struct mlxreg_core_data *data;
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index f53f9309ca6c..d71e9fa5c8de 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -101,7 +101,7 @@ static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
{
struct fwnode_handle *fwnode;
struct led_pwm led;
- int ret = 0;
+ int ret;
memset(&led, 0, sizeof(led));
@@ -111,8 +111,8 @@ static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
led.name = to_of_node(fwnode)->name;
if (!led.name) {
- fwnode_handle_put(fwnode);
- return -EINVAL;
+ ret = EINVAL;
+ goto err_child_out;
}
led.active_low = fwnode_property_read_bool(fwnode,
@@ -121,12 +121,14 @@ static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
&led.max_brightness);
ret = led_pwm_add(dev, priv, &led, fwnode);
- if (ret) {
- fwnode_handle_put(fwnode);
- break;
- }
+ if (ret)
+ goto err_child_out;
}
+ return 0;
+
+err_child_out:
+ fwnode_handle_put(fwnode);
return ret;
}
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index 5b9dfdf743ec..cb7bd1353f9f 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -148,16 +148,20 @@ static int
tlc591xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct device_node *np = dev_of_node(&client->dev), *child;
+ struct device_node *np, *child;
struct device *dev = &client->dev;
const struct tlc591xx *tlc591xx;
struct tlc591xx_priv *priv;
int err, count, reg;
- tlc591xx = device_get_match_data(dev);
+ np = dev_of_node(dev);
if (!np)
return -ENODEV;
+ tlc591xx = device_get_match_data(dev);
+ if (!tlc591xx)
+ return -ENODEV;
+
count = of_get_available_child_count(np);
if (!count || count > tlc591xx->max_leds)
return -EINVAL;
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 2f9a289ab245..1adfed1c0619 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -274,6 +274,7 @@ static const struct i2c_device_id omnia_id[] = {
{ "omnia", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, omnia_id);
static struct i2c_driver omnia_leds_driver = {
.probe = omnia_leds_probe,
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
index 14ba7faaed9e..30bc9df03636 100644
--- a/drivers/leds/trigger/ledtrig-activity.c
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -11,6 +11,7 @@
#include <linux/kernel_stat.h>
#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index fca62d503590..8af4f9bb9cde 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -43,7 +43,7 @@ static atomic_t num_active_cpus = ATOMIC_INIT(0);
/**
* ledtrig_cpu - emit a CPU event as a trigger
- * @evt: CPU event to be emitted
+ * @ledevt: CPU event to be emitted
*
* Emit a CPU event on a CPU core, which will trigger a
* bound LED to turn on or turn off.
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index 36b6709afe9f..7fe0a05574d2 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/panic_notifier.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/sched.h>
diff --git a/drivers/leds/trigger/ledtrig-panic.c b/drivers/leds/trigger/ledtrig-panic.c
index 5751cd032f9d..64abf2e91608 100644
--- a/drivers/leds/trigger/ledtrig-panic.c
+++ b/drivers/leds/trigger/ledtrig-panic.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/leds.h>
#include "../leds.h"
diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
index 506676754538..53decd89876e 100644
--- a/drivers/mcb/mcb-lpc.c
+++ b/drivers/mcb/mcb-lpc.c
@@ -105,17 +105,8 @@ out_put:
return ret;
}
-static struct resource sc24_fpga_resource = {
- .start = 0xe000e000,
- .end = 0xe000e000 + CHAM_HEADER_SIZE,
- .flags = IORESOURCE_MEM,
-};
-
-static struct resource sc31_fpga_resource = {
- .start = 0xf000e000,
- .end = 0xf000e000 + CHAM_HEADER_SIZE,
- .flags = IORESOURCE_MEM,
-};
+static struct resource sc24_fpga_resource = DEFINE_RES_MEM(0xe000e000, CHAM_HEADER_SIZE);
+static struct resource sc31_fpga_resource = DEFINE_RES_MEM(0xf000e000, CHAM_HEADER_SIZE);
static struct platform_driver mcb_lpc_driver = {
.driver = {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 51f2547c2007..3c44c4bb40fc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -474,8 +474,6 @@ static void raid1_end_write_request(struct bio *bio)
/*
* When the device is faulty, it is not necessary to
* handle write error.
- * For failfast, this is the only remaining device,
- * We need to retry the write without FailFast.
*/
if (!test_bit(Faulty, &rdev->flags))
set_bit(R1BIO_WriteError, &r1_bio->state);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 16977e8e075d..07119d7e0fdf 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -471,12 +471,12 @@ static void raid10_end_write_request(struct bio *bio)
/*
* When the device is faulty, it is not necessary to
* handle write error.
- * For failfast, this is the only remaining device,
- * We need to retry the write without FailFast.
*/
if (!test_bit(Faulty, &rdev->flags))
set_bit(R10BIO_WriteError, &r10_bio->state);
else {
+ /* Fail the request */
+ set_bit(R10BIO_Degraded, &r10_bio->state);
r10_bio->devs[slot].bio = NULL;
to_put = bio;
dec_rdev = 1;
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 02281d13505f..508ac295eb06 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -1573,6 +1573,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
struct media_request *req)
{
struct vb2_buffer *vb;
+ enum vb2_buffer_state orig_state;
int ret;
if (q->error) {
@@ -1673,6 +1674,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
* Add to the queued buffers list, a buffer will stay on it until
* dequeued in dqbuf.
*/
+ orig_state = vb->state;
list_add_tail(&vb->queued_entry, &q->queued_list);
q->queued_count++;
q->waiting_for_buffers = false;
@@ -1703,8 +1705,17 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
if (q->streaming && !q->start_streaming_called &&
q->queued_count >= q->min_buffers_needed) {
ret = vb2_start_streaming(q);
- if (ret)
+ if (ret) {
+ /*
+ * Since vb2_core_qbuf will return with an error,
+ * we should return it to state DEQUEUED since
+ * the error indicates that the buffer wasn't queued.
+ */
+ list_del(&vb->queued_entry);
+ q->queued_count--;
+ vb->state = orig_state;
return ret;
+ }
}
dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
index 4657e99df033..59a36f922675 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
@@ -173,10 +173,8 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
int ret;
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
- if (!adev->status.enabled) {
- acpi_dev_put(adev);
+ if (!adev->status.enabled)
continue;
- }
if (bridge->n_sensors >= CIO2_NUM_PORTS) {
acpi_dev_put(adev);
@@ -185,7 +183,6 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
}
sensor = &bridge->sensors[bridge->n_sensors];
- sensor->adev = adev;
strscpy(sensor->name, cfg->hid, sizeof(sensor->name));
ret = cio2_bridge_read_acpi_buffer(adev, "SSDB",
@@ -215,6 +212,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
goto err_free_swnodes;
}
+ sensor->adev = acpi_dev_get(adev);
adev->fwnode.secondary = fwnode;
dev_info(&cio2->dev, "Found supported sensor %s\n",
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index 07f342db6701..7481f553f959 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -385,7 +385,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config)
com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER;
com.cmd.hdr.Length = 6;
- memcpy(&com.cmd.ConfigureBuffers.config, config, 6);
+ memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6);
com.in_len = 6;
com.out_len = 0;
diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h
index 84f04e0e0cb9..3d296f1998a1 100644
--- a/drivers/media/pci/ngene/ngene.h
+++ b/drivers/media/pci/ngene/ngene.h
@@ -407,12 +407,14 @@ enum _BUFFER_CONFIGS {
struct FW_CONFIGURE_FREE_BUFFERS {
struct FW_HEADER hdr;
- u8 UVI1_BufferLength;
- u8 UVI2_BufferLength;
- u8 TVO_BufferLength;
- u8 AUD1_BufferLength;
- u8 AUD2_BufferLength;
- u8 TVA_BufferLength;
+ struct {
+ u8 UVI1_BufferLength;
+ u8 UVI2_BufferLength;
+ u8 TVO_BufferLength;
+ u8 AUD1_BufferLength;
+ u8 AUD2_BufferLength;
+ u8 TVA_BufferLength;
+ } __packed config;
} __attribute__ ((__packed__));
struct FW_CONFIGURE_UART {
diff --git a/drivers/media/platform/atmel/Kconfig b/drivers/media/platform/atmel/Kconfig
index 99b51213f871..dda2f27da317 100644
--- a/drivers/media/platform/atmel/Kconfig
+++ b/drivers/media/platform/atmel/Kconfig
@@ -8,6 +8,7 @@ config VIDEO_ATMEL_ISC
select VIDEOBUF2_DMA_CONTIG
select REGMAP_MMIO
select V4L2_FWNODE
+ select VIDEO_ATMEL_ISC_BASE
help
This module makes the ATMEL Image Sensor Controller available
as a v4l2 device.
@@ -19,10 +20,17 @@ config VIDEO_ATMEL_XISC
select VIDEOBUF2_DMA_CONTIG
select REGMAP_MMIO
select V4L2_FWNODE
+ select VIDEO_ATMEL_ISC_BASE
help
This module makes the ATMEL eXtended Image Sensor Controller
available as a v4l2 device.
+config VIDEO_ATMEL_ISC_BASE
+ tristate
+ default n
+ help
+ ATMEL ISC and XISC common code base.
+
config VIDEO_ATMEL_ISI
tristate "ATMEL Image Sensor Interface (ISI) support"
depends on VIDEO_V4L2 && OF
diff --git a/drivers/media/platform/atmel/Makefile b/drivers/media/platform/atmel/Makefile
index c5c01556c653..46d264ab7948 100644
--- a/drivers/media/platform/atmel/Makefile
+++ b/drivers/media/platform/atmel/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-atmel-isc-objs = atmel-sama5d2-isc.o atmel-isc-base.o
-atmel-xisc-objs = atmel-sama7g5-isc.o atmel-isc-base.o
+atmel-isc-objs = atmel-sama5d2-isc.o
+atmel-xisc-objs = atmel-sama7g5-isc.o
obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
+obj-$(CONFIG_VIDEO_ATMEL_ISC_BASE) += atmel-isc-base.o
obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel-xisc.o
diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c
index 19daa49bf604..136ab7cf36ed 100644
--- a/drivers/media/platform/atmel/atmel-isc-base.c
+++ b/drivers/media/platform/atmel/atmel-isc-base.c
@@ -378,6 +378,7 @@ int isc_clk_init(struct isc_device *isc)
return 0;
}
+EXPORT_SYMBOL_GPL(isc_clk_init);
void isc_clk_cleanup(struct isc_device *isc)
{
@@ -392,6 +393,7 @@ void isc_clk_cleanup(struct isc_device *isc)
clk_unregister(isc_clk->clk);
}
}
+EXPORT_SYMBOL_GPL(isc_clk_cleanup);
static int isc_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
@@ -1578,6 +1580,7 @@ irqreturn_t isc_interrupt(int irq, void *dev_id)
return ret;
}
+EXPORT_SYMBOL_GPL(isc_interrupt);
static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max)
{
@@ -2212,6 +2215,7 @@ const struct v4l2_async_notifier_operations isc_async_ops = {
.unbind = isc_async_unbind,
.complete = isc_async_complete,
};
+EXPORT_SYMBOL_GPL(isc_async_ops);
void isc_subdev_cleanup(struct isc_device *isc)
{
@@ -2224,6 +2228,7 @@ void isc_subdev_cleanup(struct isc_device *isc)
INIT_LIST_HEAD(&isc->subdev_entities);
}
+EXPORT_SYMBOL_GPL(isc_subdev_cleanup);
int isc_pipeline_init(struct isc_device *isc)
{
@@ -2264,6 +2269,7 @@ int isc_pipeline_init(struct isc_device *isc)
return 0;
}
+EXPORT_SYMBOL_GPL(isc_pipeline_init);
/* regmap configuration */
#define ATMEL_ISC_REG_MAX 0xd5c
@@ -2273,4 +2279,9 @@ const struct regmap_config isc_regmap_config = {
.val_bits = 32,
.max_register = ATMEL_ISC_REG_MAX,
};
+EXPORT_SYMBOL_GPL(isc_regmap_config);
+MODULE_AUTHOR("Songjun Wu");
+MODULE_AUTHOR("Eugen Hristev");
+MODULE_DESCRIPTION("Atmel ISC common code base");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 1b24f5bfc4af..e55e411038f4 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -436,7 +436,7 @@ done:
static int fimc_is_request_firmware(struct fimc_is *is, const char *fw_name)
{
return request_firmware_nowait(THIS_MODULE,
- FW_ACTION_HOTPLUG, fw_name, &is->pdev->dev,
+ FW_ACTION_UEVENT, fw_name, &is->pdev->dev,
GFP_KERNEL, is, fimc_is_load_firmware);
}
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 83705730e37e..795a012d4020 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -37,7 +37,16 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
} else {
/* read */
requesttype = (USB_TYPE_VENDOR | USB_DIR_IN);
- pipe = usb_rcvctrlpipe(d->udev, 0);
+
+ /*
+ * Zero-length transfers must use usb_sndctrlpipe() and
+ * rtl28xxu_identify_state() uses a zero-length i2c read
+ * command to determine the chip type.
+ */
+ if (req->size)
+ pipe = usb_rcvctrlpipe(d->udev, 0);
+ else
+ pipe = usb_sndctrlpipe(d->udev, 0);
}
ret = usb_control_msg(d->udev, pipe, 0, requesttype, req->value,
@@ -612,9 +621,8 @@ static int rtl28xxu_read_config(struct dvb_usb_device *d)
static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name)
{
struct rtl28xxu_dev *dev = d_to_priv(d);
- u8 buf[1];
int ret;
- struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 1, buf};
+ struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 0, NULL};
dev_dbg(&d->intf->dev, "\n");
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index 14386d0b5f57..c267283b01fd 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -600,8 +600,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
child);
ret = atmel_ebi_dev_disable(ebi, child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
return ret;
+ }
}
}
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index f7825eef5894..762d0c0f0716 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -41,7 +41,6 @@
* @node: node in the device list
* @base: base address of memory-mapped IO registers.
* @dev: device pointer.
- * @addressing table with addressing information from the spec
* @regs_cache: An array of 'struct emif_regs' that stores
* calculated register values for different
* frequencies, to avoid re-calculating them on
@@ -61,7 +60,6 @@ struct emif_data {
unsigned long irq_state;
void __iomem *base;
struct device *dev;
- const struct lpddr2_addressing *addressing;
struct emif_regs *regs_cache[EMIF_MAX_NUM_FREQUENCIES];
struct emif_regs *curr_regs;
struct emif_platform_data *plat_data;
@@ -72,7 +70,6 @@ struct emif_data {
static struct emif_data *emif1;
static DEFINE_SPINLOCK(emif_lock);
static unsigned long irq_state;
-static u32 t_ck; /* DDR clock period in ps */
static LIST_HEAD(device_list);
#ifdef CONFIG_DEBUG_FS
@@ -170,15 +167,6 @@ static inline void __exit emif_debugfs_exit(struct emif_data *emif)
#endif
/*
- * Calculate the period of DDR clock from frequency value
- */
-static void set_ddr_clk_period(u32 freq)
-{
- /* Divide 10^12 by frequency to get period in ps */
- t_ck = (u32)DIV_ROUND_UP_ULL(1000000000000ull, freq);
-}
-
-/*
* Get bus width used by EMIF. Note that this may be different from the
* bus width of the DDR devices used. For instance two 16-bit DDR devices
* may be connected to a given CS of EMIF. In this case bus width as far
@@ -196,19 +184,6 @@ static u32 get_emif_bus_width(struct emif_data *emif)
return width;
}
-/*
- * Get the CL from SDRAM_CONFIG register
- */
-static u32 get_cl(struct emif_data *emif)
-{
- u32 cl;
- void __iomem *base = emif->base;
-
- cl = (readl(base + EMIF_SDRAM_CONFIG) & CL_MASK) >> CL_SHIFT;
-
- return cl;
-}
-
static void set_lpmode(struct emif_data *emif, u8 lpmode)
{
u32 temp;
@@ -328,203 +303,6 @@ static const struct lpddr2_addressing *get_addressing_table(
return &lpddr2_jedec_addressing_table[index];
}
-/*
- * Find the the right timing table from the array of timing
- * tables of the device using DDR clock frequency
- */
-static const struct lpddr2_timings *get_timings_table(struct emif_data *emif,
- u32 freq)
-{
- u32 i, min, max, freq_nearest;
- const struct lpddr2_timings *timings = NULL;
- const struct lpddr2_timings *timings_arr = emif->plat_data->timings;
- struct device *dev = emif->dev;
-
- /* Start with a very high frequency - 1GHz */
- freq_nearest = 1000000000;
-
- /*
- * Find the timings table such that:
- * 1. the frequency range covers the required frequency(safe) AND
- * 2. the max_freq is closest to the required frequency(optimal)
- */
- for (i = 0; i < emif->plat_data->timings_arr_size; i++) {
- max = timings_arr[i].max_freq;
- min = timings_arr[i].min_freq;
- if ((freq >= min) && (freq <= max) && (max < freq_nearest)) {
- freq_nearest = max;
- timings = &timings_arr[i];
- }
- }
-
- if (!timings)
- dev_err(dev, "%s: couldn't find timings for - %dHz\n",
- __func__, freq);
-
- dev_dbg(dev, "%s: timings table: freq %d, speed bin freq %d\n",
- __func__, freq, freq_nearest);
-
- return timings;
-}
-
-static u32 get_sdram_ref_ctrl_shdw(u32 freq,
- const struct lpddr2_addressing *addressing)
-{
- u32 ref_ctrl_shdw = 0, val = 0, freq_khz, t_refi;
-
- /* Scale down frequency and t_refi to avoid overflow */
- freq_khz = freq / 1000;
- t_refi = addressing->tREFI_ns / 100;
-
- /*
- * refresh rate to be set is 'tREFI(in us) * freq in MHz
- * division by 10000 to account for change in units
- */
- val = t_refi * freq_khz / 10000;
- ref_ctrl_shdw |= val << REFRESH_RATE_SHIFT;
-
- return ref_ctrl_shdw;
-}
-
-static u32 get_sdram_tim_1_shdw(const struct lpddr2_timings *timings,
- const struct lpddr2_min_tck *min_tck,
- const struct lpddr2_addressing *addressing)
-{
- u32 tim1 = 0, val = 0;
-
- val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1;
- tim1 |= val << T_WTR_SHIFT;
-
- if (addressing->num_banks == B8)
- val = DIV_ROUND_UP(timings->tFAW, t_ck*4);
- else
- val = max(min_tck->tRRD, DIV_ROUND_UP(timings->tRRD, t_ck));
- tim1 |= (val - 1) << T_RRD_SHIFT;
-
- val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab, t_ck) - 1;
- tim1 |= val << T_RC_SHIFT;
-
- val = max(min_tck->tRASmin, DIV_ROUND_UP(timings->tRAS_min, t_ck));
- tim1 |= (val - 1) << T_RAS_SHIFT;
-
- val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1;
- tim1 |= val << T_WR_SHIFT;
-
- val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD, t_ck)) - 1;
- tim1 |= val << T_RCD_SHIFT;
-
- val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab, t_ck)) - 1;
- tim1 |= val << T_RP_SHIFT;
-
- return tim1;
-}
-
-static u32 get_sdram_tim_1_shdw_derated(const struct lpddr2_timings *timings,
- const struct lpddr2_min_tck *min_tck,
- const struct lpddr2_addressing *addressing)
-{
- u32 tim1 = 0, val = 0;
-
- val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1;
- tim1 = val << T_WTR_SHIFT;
-
- /*
- * tFAW is approximately 4 times tRRD. So add 1875*4 = 7500ps
- * to tFAW for de-rating
- */
- if (addressing->num_banks == B8) {
- val = DIV_ROUND_UP(timings->tFAW + 7500, 4 * t_ck) - 1;
- } else {
- val = DIV_ROUND_UP(timings->tRRD + 1875, t_ck);
- val = max(min_tck->tRRD, val) - 1;
- }
- tim1 |= val << T_RRD_SHIFT;
-
- val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab + 1875, t_ck);
- tim1 |= (val - 1) << T_RC_SHIFT;
-
- val = DIV_ROUND_UP(timings->tRAS_min + 1875, t_ck);
- val = max(min_tck->tRASmin, val) - 1;
- tim1 |= val << T_RAS_SHIFT;
-
- val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1;
- tim1 |= val << T_WR_SHIFT;
-
- val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD + 1875, t_ck));
- tim1 |= (val - 1) << T_RCD_SHIFT;
-
- val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab + 1875, t_ck));
- tim1 |= (val - 1) << T_RP_SHIFT;
-
- return tim1;
-}
-
-static u32 get_sdram_tim_2_shdw(const struct lpddr2_timings *timings,
- const struct lpddr2_min_tck *min_tck,
- const struct lpddr2_addressing *addressing,
- u32 type)
-{
- u32 tim2 = 0, val = 0;
-
- val = min_tck->tCKE - 1;
- tim2 |= val << T_CKE_SHIFT;
-
- val = max(min_tck->tRTP, DIV_ROUND_UP(timings->tRTP, t_ck)) - 1;
- tim2 |= val << T_RTP_SHIFT;
-
- /* tXSNR = tRFCab_ps + 10 ns(tRFCab_ps for LPDDR2). */
- val = DIV_ROUND_UP(addressing->tRFCab_ps + 10000, t_ck) - 1;
- tim2 |= val << T_XSNR_SHIFT;
-
- /* XSRD same as XSNR for LPDDR2 */
- tim2 |= val << T_XSRD_SHIFT;
-
- val = max(min_tck->tXP, DIV_ROUND_UP(timings->tXP, t_ck)) - 1;
- tim2 |= val << T_XP_SHIFT;
-
- return tim2;
-}
-
-static u32 get_sdram_tim_3_shdw(const struct lpddr2_timings *timings,
- const struct lpddr2_min_tck *min_tck,
- const struct lpddr2_addressing *addressing,
- u32 type, u32 ip_rev, u32 derated)
-{
- u32 tim3 = 0, val = 0, t_dqsck;
-
- val = timings->tRAS_max_ns / addressing->tREFI_ns - 1;
- val = val > 0xF ? 0xF : val;
- tim3 |= val << T_RAS_MAX_SHIFT;
-
- val = DIV_ROUND_UP(addressing->tRFCab_ps, t_ck) - 1;
- tim3 |= val << T_RFC_SHIFT;
-
- t_dqsck = (derated == EMIF_DERATED_TIMINGS) ?
- timings->tDQSCK_max_derated : timings->tDQSCK_max;
- if (ip_rev == EMIF_4D5)
- val = DIV_ROUND_UP(t_dqsck + 1000, t_ck) - 1;
- else
- val = DIV_ROUND_UP(t_dqsck, t_ck) - 1;
-
- tim3 |= val << T_TDQSCKMAX_SHIFT;
-
- val = DIV_ROUND_UP(timings->tZQCS, t_ck) - 1;
- tim3 |= val << ZQ_ZQCS_SHIFT;
-
- val = DIV_ROUND_UP(timings->tCKESR, t_ck);
- val = max(min_tck->tCKESR, val) - 1;
- tim3 |= val << T_CKESR_SHIFT;
-
- if (ip_rev == EMIF_4D5) {
- tim3 |= (EMIF_T_CSTA - 1) << T_CSTA_SHIFT;
-
- val = DIV_ROUND_UP(EMIF_T_PDLL_UL, 128) - 1;
- tim3 |= val << T_PDLL_UL_SHIFT;
- }
-
- return tim3;
-}
-
static u32 get_zq_config_reg(const struct lpddr2_addressing *addressing,
bool cs1_used, bool cal_resistors_per_cs)
{
@@ -589,117 +367,6 @@ static u32 get_temp_alert_config(const struct lpddr2_addressing *addressing,
return alert;
}
-static u32 get_read_idle_ctrl_shdw(u8 volt_ramp)
-{
- u32 idle = 0, val = 0;
-
- /*
- * Maximum value in normal conditions and increased frequency
- * when voltage is ramping
- */
- if (volt_ramp)
- val = READ_IDLE_INTERVAL_DVFS / t_ck / 64 - 1;
- else
- val = 0x1FF;
-
- /*
- * READ_IDLE_CTRL register in EMIF4D has same offset and fields
- * as DLL_CALIB_CTRL in EMIF4D5, so use the same shifts
- */
- idle |= val << DLL_CALIB_INTERVAL_SHIFT;
- idle |= EMIF_READ_IDLE_LEN_VAL << ACK_WAIT_SHIFT;
-
- return idle;
-}
-
-static u32 get_dll_calib_ctrl_shdw(u8 volt_ramp)
-{
- u32 calib = 0, val = 0;
-
- if (volt_ramp == DDR_VOLTAGE_RAMPING)
- val = DLL_CALIB_INTERVAL_DVFS / t_ck / 16 - 1;
- else
- val = 0; /* Disabled when voltage is stable */
-
- calib |= val << DLL_CALIB_INTERVAL_SHIFT;
- calib |= DLL_CALIB_ACK_WAIT_VAL << ACK_WAIT_SHIFT;
-
- return calib;
-}
-
-static u32 get_ddr_phy_ctrl_1_attilaphy_4d(const struct lpddr2_timings *timings,
- u32 freq, u8 RL)
-{
- u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_ATTILAPHY, val = 0;
-
- val = RL + DIV_ROUND_UP(timings->tDQSCK_max, t_ck) - 1;
- phy |= val << READ_LATENCY_SHIFT_4D;
-
- if (freq <= 100000000)
- val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS_ATTILAPHY;
- else if (freq <= 200000000)
- val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ_ATTILAPHY;
- else
- val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ_ATTILAPHY;
-
- phy |= val << DLL_SLAVE_DLY_CTRL_SHIFT_4D;
-
- return phy;
-}
-
-static u32 get_phy_ctrl_1_intelliphy_4d5(u32 freq, u8 cl)
-{
- u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_INTELLIPHY, half_delay;
-
- /*
- * DLL operates at 266 MHz. If DDR frequency is near 266 MHz,
- * half-delay is not needed else set half-delay
- */
- if (freq >= 265000000 && freq < 267000000)
- half_delay = 0;
- else
- half_delay = 1;
-
- phy |= half_delay << DLL_HALF_DELAY_SHIFT_4D5;
- phy |= ((cl + DIV_ROUND_UP(EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS,
- t_ck) - 1) << READ_LATENCY_SHIFT_4D5);
-
- return phy;
-}
-
-static u32 get_ext_phy_ctrl_2_intelliphy_4d5(void)
-{
- u32 fifo_we_slave_ratio;
-
- fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
- EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
-
- return fifo_we_slave_ratio | fifo_we_slave_ratio << 11 |
- fifo_we_slave_ratio << 22;
-}
-
-static u32 get_ext_phy_ctrl_3_intelliphy_4d5(void)
-{
- u32 fifo_we_slave_ratio;
-
- fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
- EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
-
- return fifo_we_slave_ratio >> 10 | fifo_we_slave_ratio << 1 |
- fifo_we_slave_ratio << 12 | fifo_we_slave_ratio << 23;
-}
-
-static u32 get_ext_phy_ctrl_4_intelliphy_4d5(void)
-{
- u32 fifo_we_slave_ratio;
-
- fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
- EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
-
- return fifo_we_slave_ratio >> 9 | fifo_we_slave_ratio << 2 |
- fifo_we_slave_ratio << 13;
-}
-
static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev)
{
u32 pwr_mgmt_ctrl = 0, timeout;
@@ -822,51 +489,6 @@ static void get_temperature_level(struct emif_data *emif)
}
/*
- * Program EMIF shadow registers that are not dependent on temperature
- * or voltage
- */
-static void setup_registers(struct emif_data *emif, struct emif_regs *regs)
-{
- void __iomem *base = emif->base;
-
- writel(regs->sdram_tim2_shdw, base + EMIF_SDRAM_TIMING_2_SHDW);
- writel(regs->phy_ctrl_1_shdw, base + EMIF_DDR_PHY_CTRL_1_SHDW);
- writel(regs->pwr_mgmt_ctrl_shdw,
- base + EMIF_POWER_MANAGEMENT_CTRL_SHDW);
-
- /* Settings specific for EMIF4D5 */
- if (emif->plat_data->ip_rev != EMIF_4D5)
- return;
- writel(regs->ext_phy_ctrl_2_shdw, base + EMIF_EXT_PHY_CTRL_2_SHDW);
- writel(regs->ext_phy_ctrl_3_shdw, base + EMIF_EXT_PHY_CTRL_3_SHDW);
- writel(regs->ext_phy_ctrl_4_shdw, base + EMIF_EXT_PHY_CTRL_4_SHDW);
-}
-
-/*
- * When voltage ramps dll calibration and forced read idle should
- * happen more often
- */
-static void setup_volt_sensitive_regs(struct emif_data *emif,
- struct emif_regs *regs, u32 volt_state)
-{
- u32 calib_ctrl;
- void __iomem *base = emif->base;
-
- /*
- * EMIF_READ_IDLE_CTRL in EMIF4D refers to the same register as
- * EMIF_DLL_CALIB_CTRL in EMIF4D5 and dll_calib_ctrl_shadow_*
- * is an alias of the respective read_idle_ctrl_shdw_* (members of
- * a union). So, the below code takes care of both cases
- */
- if (volt_state == DDR_VOLTAGE_RAMPING)
- calib_ctrl = regs->dll_calib_ctrl_shdw_volt_ramp;
- else
- calib_ctrl = regs->dll_calib_ctrl_shdw_normal;
-
- writel(calib_ctrl, base + EMIF_DLL_CALIB_CTRL_SHDW);
-}
-
-/*
* setup_temperature_sensitive_regs() - set the timings for temperature
* sensitive registers. This happens once at initialisation time based
* on the temperature at boot time and subsequently based on the temperature
@@ -1508,7 +1130,6 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
}
list_add(&emif->node, &device_list);
- emif->addressing = get_addressing_table(emif->plat_data->device_info);
/* Save pointers to each other in emif and device structures */
emif->dev = &pdev->dev;
@@ -1563,305 +1184,6 @@ static void emif_shutdown(struct platform_device *pdev)
disable_and_clear_all_interrupts(emif);
}
-static int get_emif_reg_values(struct emif_data *emif, u32 freq,
- struct emif_regs *regs)
-{
- u32 ip_rev, phy_type;
- u32 cl, type;
- const struct lpddr2_timings *timings;
- const struct lpddr2_min_tck *min_tck;
- const struct ddr_device_info *device_info;
- const struct lpddr2_addressing *addressing;
- struct emif_data *emif_for_calc;
- struct device *dev;
-
- dev = emif->dev;
- /*
- * If the devices on this EMIF instance is duplicate of EMIF1,
- * use EMIF1 details for the calculation
- */
- emif_for_calc = emif->duplicate ? emif1 : emif;
- timings = get_timings_table(emif_for_calc, freq);
- addressing = emif_for_calc->addressing;
- if (!timings || !addressing) {
- dev_err(dev, "%s: not enough data available for %dHz",
- __func__, freq);
- return -1;
- }
-
- device_info = emif_for_calc->plat_data->device_info;
- type = device_info->type;
- ip_rev = emif_for_calc->plat_data->ip_rev;
- phy_type = emif_for_calc->plat_data->phy_type;
-
- min_tck = emif_for_calc->plat_data->min_tck;
-
- set_ddr_clk_period(freq);
-
- regs->ref_ctrl_shdw = get_sdram_ref_ctrl_shdw(freq, addressing);
- regs->sdram_tim1_shdw = get_sdram_tim_1_shdw(timings, min_tck,
- addressing);
- regs->sdram_tim2_shdw = get_sdram_tim_2_shdw(timings, min_tck,
- addressing, type);
- regs->sdram_tim3_shdw = get_sdram_tim_3_shdw(timings, min_tck,
- addressing, type, ip_rev, EMIF_NORMAL_TIMINGS);
-
- cl = get_cl(emif);
-
- if (phy_type == EMIF_PHY_TYPE_ATTILAPHY && ip_rev == EMIF_4D) {
- regs->phy_ctrl_1_shdw = get_ddr_phy_ctrl_1_attilaphy_4d(
- timings, freq, cl);
- } else if (phy_type == EMIF_PHY_TYPE_INTELLIPHY && ip_rev == EMIF_4D5) {
- regs->phy_ctrl_1_shdw = get_phy_ctrl_1_intelliphy_4d5(freq, cl);
- regs->ext_phy_ctrl_2_shdw = get_ext_phy_ctrl_2_intelliphy_4d5();
- regs->ext_phy_ctrl_3_shdw = get_ext_phy_ctrl_3_intelliphy_4d5();
- regs->ext_phy_ctrl_4_shdw = get_ext_phy_ctrl_4_intelliphy_4d5();
- } else {
- return -1;
- }
-
- /* Only timeout values in pwr_mgmt_ctrl_shdw register */
- regs->pwr_mgmt_ctrl_shdw =
- get_pwr_mgmt_ctrl(freq, emif_for_calc, ip_rev) &
- (CS_TIM_MASK | SR_TIM_MASK | PD_TIM_MASK);
-
- if (ip_rev & EMIF_4D) {
- regs->read_idle_ctrl_shdw_normal =
- get_read_idle_ctrl_shdw(DDR_VOLTAGE_STABLE);
-
- regs->read_idle_ctrl_shdw_volt_ramp =
- get_read_idle_ctrl_shdw(DDR_VOLTAGE_RAMPING);
- } else if (ip_rev & EMIF_4D5) {
- regs->dll_calib_ctrl_shdw_normal =
- get_dll_calib_ctrl_shdw(DDR_VOLTAGE_STABLE);
-
- regs->dll_calib_ctrl_shdw_volt_ramp =
- get_dll_calib_ctrl_shdw(DDR_VOLTAGE_RAMPING);
- }
-
- if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
- regs->ref_ctrl_shdw_derated = get_sdram_ref_ctrl_shdw(freq / 4,
- addressing);
-
- regs->sdram_tim1_shdw_derated =
- get_sdram_tim_1_shdw_derated(timings, min_tck,
- addressing);
-
- regs->sdram_tim3_shdw_derated = get_sdram_tim_3_shdw(timings,
- min_tck, addressing, type, ip_rev,
- EMIF_DERATED_TIMINGS);
- }
-
- regs->freq = freq;
-
- return 0;
-}
-
-/*
- * get_regs() - gets the cached emif_regs structure for a given EMIF instance
- * given frequency(freq):
- *
- * As an optimisation, every EMIF instance other than EMIF1 shares the
- * register cache with EMIF1 if the devices connected on this instance
- * are same as that on EMIF1(indicated by the duplicate flag)
- *
- * If we do not have an entry corresponding to the frequency given, we
- * allocate a new entry and calculate the values
- *
- * Upon finding the right reg dump, save it in curr_regs. It can be
- * directly used for thermal de-rating and voltage ramping changes.
- */
-static struct emif_regs *get_regs(struct emif_data *emif, u32 freq)
-{
- int i;
- struct emif_regs **regs_cache;
- struct emif_regs *regs = NULL;
- struct device *dev;
-
- dev = emif->dev;
- if (emif->curr_regs && emif->curr_regs->freq == freq) {
- dev_dbg(dev, "%s: using curr_regs - %u Hz", __func__, freq);
- return emif->curr_regs;
- }
-
- if (emif->duplicate)
- regs_cache = emif1->regs_cache;
- else
- regs_cache = emif->regs_cache;
-
- for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
- if (regs_cache[i]->freq == freq) {
- regs = regs_cache[i];
- dev_dbg(dev,
- "%s: reg dump found in reg cache for %u Hz\n",
- __func__, freq);
- break;
- }
- }
-
- /*
- * If we don't have an entry for this frequency in the cache create one
- * and calculate the values
- */
- if (!regs) {
- regs = devm_kzalloc(emif->dev, sizeof(*regs), GFP_ATOMIC);
- if (!regs)
- return NULL;
-
- if (get_emif_reg_values(emif, freq, regs)) {
- devm_kfree(emif->dev, regs);
- return NULL;
- }
-
- /*
- * Now look for an un-used entry in the cache and save the
- * newly created struct. If there are no free entries
- * over-write the last entry
- */
- for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++)
- ;
-
- if (i >= EMIF_MAX_NUM_FREQUENCIES) {
- dev_warn(dev, "%s: regs_cache full - reusing a slot!!\n",
- __func__);
- i = EMIF_MAX_NUM_FREQUENCIES - 1;
- devm_kfree(emif->dev, regs_cache[i]);
- }
- regs_cache[i] = regs;
- }
-
- return regs;
-}
-
-static void do_volt_notify_handling(struct emif_data *emif, u32 volt_state)
-{
- dev_dbg(emif->dev, "%s: voltage notification : %d", __func__,
- volt_state);
-
- if (!emif->curr_regs) {
- dev_err(emif->dev,
- "%s: volt-notify before registers are ready: %d\n",
- __func__, volt_state);
- return;
- }
-
- setup_volt_sensitive_regs(emif, emif->curr_regs, volt_state);
-}
-
-/*
- * TODO: voltage notify handling should be hooked up to
- * regulator framework as soon as the necessary support
- * is available in mainline kernel. This function is un-used
- * right now.
- */
-static void __attribute__((unused)) volt_notify_handling(u32 volt_state)
-{
- struct emif_data *emif;
-
- spin_lock_irqsave(&emif_lock, irq_state);
-
- list_for_each_entry(emif, &device_list, node)
- do_volt_notify_handling(emif, volt_state);
- do_freq_update();
-
- spin_unlock_irqrestore(&emif_lock, irq_state);
-}
-
-static void do_freq_pre_notify_handling(struct emif_data *emif, u32 new_freq)
-{
- struct emif_regs *regs;
-
- regs = get_regs(emif, new_freq);
- if (!regs)
- return;
-
- emif->curr_regs = regs;
-
- /*
- * Update the shadow registers:
- * Temperature and voltage-ramp sensitive settings are also configured
- * in terms of DDR cycles. So, we need to update them too when there
- * is a freq change
- */
- dev_dbg(emif->dev, "%s: setting up shadow registers for %uHz",
- __func__, new_freq);
- setup_registers(emif, regs);
- setup_temperature_sensitive_regs(emif, regs);
- setup_volt_sensitive_regs(emif, regs, DDR_VOLTAGE_STABLE);
-
- /*
- * Part of workaround for errata i728. See do_freq_update()
- * for more details
- */
- if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
- set_lpmode(emif, EMIF_LP_MODE_DISABLE);
-}
-
-/*
- * TODO: frequency notify handling should be hooked up to
- * clock framework as soon as the necessary support is
- * available in mainline kernel. This function is un-used
- * right now.
- */
-static void __attribute__((unused)) freq_pre_notify_handling(u32 new_freq)
-{
- struct emif_data *emif;
-
- /*
- * NOTE: we are taking the spin-lock here and releases it
- * only in post-notifier. This doesn't look good and
- * Sparse complains about it, but this seems to be
- * un-avoidable. We need to lock a sequence of events
- * that is split between EMIF and clock framework.
- *
- * 1. EMIF driver updates EMIF timings in shadow registers in the
- * frequency pre-notify callback from clock framework
- * 2. clock framework sets up the registers for the new frequency
- * 3. clock framework initiates a hw-sequence that updates
- * the frequency EMIF timings synchronously.
- *
- * All these 3 steps should be performed as an atomic operation
- * vis-a-vis similar sequence in the EMIF interrupt handler
- * for temperature events. Otherwise, there could be race
- * conditions that could result in incorrect EMIF timings for
- * a given frequency
- */
- spin_lock_irqsave(&emif_lock, irq_state);
-
- list_for_each_entry(emif, &device_list, node)
- do_freq_pre_notify_handling(emif, new_freq);
-}
-
-static void do_freq_post_notify_handling(struct emif_data *emif)
-{
- /*
- * Part of workaround for errata i728. See do_freq_update()
- * for more details
- */
- if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
- set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
-}
-
-/*
- * TODO: frequency notify handling should be hooked up to
- * clock framework as soon as the necessary support is
- * available in mainline kernel. This function is un-used
- * right now.
- */
-static void __attribute__((unused)) freq_post_notify_handling(void)
-{
- struct emif_data *emif;
-
- list_for_each_entry(emif, &device_list, node)
- do_freq_post_notify_handling(emif);
-
- /*
- * Lock is done in pre-notify handler. See freq_pre_notify_handling()
- * for more details
- */
- spin_unlock_irqrestore(&emif_lock, irq_state);
-}
-
#if defined(CONFIG_OF)
static const struct of_device_id emif_of_match[] = {
{ .compatible = "ti,emif-4d" },
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 89f99b5b6450..d062c2f8250f 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -97,7 +97,6 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev)
iounmap(ctrl->gregs);
dev_set_drvdata(&dev->dev, NULL);
- kfree(ctrl);
return 0;
}
@@ -209,7 +208,8 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
- fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL);
+ fsl_ifc_ctrl_dev = devm_kzalloc(&dev->dev, sizeof(*fsl_ifc_ctrl_dev),
+ GFP_KERNEL);
if (!fsl_ifc_ctrl_dev)
return -ENOMEM;
@@ -219,8 +219,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0);
if (!fsl_ifc_ctrl_dev->gregs) {
dev_err(&dev->dev, "failed to get memory region\n");
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
if (of_property_read_bool(dev->dev.of_node, "little-endian")) {
@@ -295,6 +294,7 @@ err_irq:
free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
irq_dispose_mapping(fsl_ifc_ctrl_dev->irq);
err:
+ iounmap(fsl_ifc_ctrl_dev->gregs);
return ret;
}
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
index 9c0a28416777..f84b98278745 100644
--- a/drivers/memory/pl353-smc.c
+++ b/drivers/memory/pl353-smc.c
@@ -8,263 +8,22 @@
*/
#include <linux/clk.h>
-#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/pl353-smc.h>
#include <linux/amba/bus.h>
-/* Register definitions */
-#define PL353_SMC_MEMC_STATUS_OFFS 0 /* Controller status reg, RO */
-#define PL353_SMC_CFG_CLR_OFFS 0xC /* Clear config reg, WO */
-#define PL353_SMC_DIRECT_CMD_OFFS 0x10 /* Direct command reg, WO */
-#define PL353_SMC_SET_CYCLES_OFFS 0x14 /* Set cycles register, WO */
-#define PL353_SMC_SET_OPMODE_OFFS 0x18 /* Set opmode register, WO */
-#define PL353_SMC_ECC_STATUS_OFFS 0x400 /* ECC status register */
-#define PL353_SMC_ECC_MEMCFG_OFFS 0x404 /* ECC mem config reg */
-#define PL353_SMC_ECC_MEMCMD1_OFFS 0x408 /* ECC mem cmd1 reg */
-#define PL353_SMC_ECC_MEMCMD2_OFFS 0x40C /* ECC mem cmd2 reg */
-#define PL353_SMC_ECC_VALUE0_OFFS 0x418 /* ECC value 0 reg */
-
-/* Controller status register specific constants */
-#define PL353_SMC_MEMC_STATUS_RAW_INT_1_SHIFT 6
-
-/* Clear configuration register specific constants */
-#define PL353_SMC_CFG_CLR_INT_CLR_1 0x10
-#define PL353_SMC_CFG_CLR_ECC_INT_DIS_1 0x40
-#define PL353_SMC_CFG_CLR_INT_DIS_1 0x2
-#define PL353_SMC_CFG_CLR_DEFAULT_MASK (PL353_SMC_CFG_CLR_INT_CLR_1 | \
- PL353_SMC_CFG_CLR_ECC_INT_DIS_1 | \
- PL353_SMC_CFG_CLR_INT_DIS_1)
-
-/* Set cycles register specific constants */
-#define PL353_SMC_SET_CYCLES_T0_MASK 0xF
-#define PL353_SMC_SET_CYCLES_T0_SHIFT 0
-#define PL353_SMC_SET_CYCLES_T1_MASK 0xF
-#define PL353_SMC_SET_CYCLES_T1_SHIFT 4
-#define PL353_SMC_SET_CYCLES_T2_MASK 0x7
-#define PL353_SMC_SET_CYCLES_T2_SHIFT 8
-#define PL353_SMC_SET_CYCLES_T3_MASK 0x7
-#define PL353_SMC_SET_CYCLES_T3_SHIFT 11
-#define PL353_SMC_SET_CYCLES_T4_MASK 0x7
-#define PL353_SMC_SET_CYCLES_T4_SHIFT 14
-#define PL353_SMC_SET_CYCLES_T5_MASK 0x7
-#define PL353_SMC_SET_CYCLES_T5_SHIFT 17
-#define PL353_SMC_SET_CYCLES_T6_MASK 0xF
-#define PL353_SMC_SET_CYCLES_T6_SHIFT 20
-
-/* ECC status register specific constants */
-#define PL353_SMC_ECC_STATUS_BUSY BIT(6)
-#define PL353_SMC_ECC_REG_SIZE_OFFS 4
-
-/* ECC memory config register specific constants */
-#define PL353_SMC_ECC_MEMCFG_MODE_MASK 0xC
-#define PL353_SMC_ECC_MEMCFG_MODE_SHIFT 2
-#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK 0x3
-
-#define PL353_SMC_DC_UPT_NAND_REGS ((4 << 23) | /* CS: NAND chip */ \
- (2 << 21)) /* UpdateRegs operation */
-
-#define PL353_NAND_ECC_CMD1 ((0x80) | /* Write command */ \
- (0 << 8) | /* Read command */ \
- (0x30 << 16) | /* Read End command */ \
- (1 << 24)) /* Read End command calid */
-
-#define PL353_NAND_ECC_CMD2 ((0x85) | /* Write col change cmd */ \
- (5 << 8) | /* Read col change cmd */ \
- (0xE0 << 16) | /* Read col change end cmd */ \
- (1 << 24)) /* Read col change end cmd valid */
-#define PL353_NAND_ECC_BUSY_TIMEOUT (1 * HZ)
/**
* struct pl353_smc_data - Private smc driver structure
* @memclk: Pointer to the peripheral clock
- * @aclk: Pointer to the APER clock
+ * @aclk: Pointer to the AXI peripheral clock
*/
struct pl353_smc_data {
struct clk *memclk;
struct clk *aclk;
};
-/* SMC virtual register base */
-static void __iomem *pl353_smc_base;
-
-/**
- * pl353_smc_set_buswidth - Set memory buswidth
- * @bw: Memory buswidth (8 | 16)
- * Return: 0 on success or negative errno.
- */
-int pl353_smc_set_buswidth(unsigned int bw)
-{
- if (bw != PL353_SMC_MEM_WIDTH_8 && bw != PL353_SMC_MEM_WIDTH_16)
- return -EINVAL;
-
- writel(bw, pl353_smc_base + PL353_SMC_SET_OPMODE_OFFS);
- writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base +
- PL353_SMC_DIRECT_CMD_OFFS);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pl353_smc_set_buswidth);
-
-/**
- * pl353_smc_set_cycles - Set memory timing parameters
- * @timings: NAND controller timing parameters
- *
- * Sets NAND chip specific timing parameters.
- */
-void pl353_smc_set_cycles(u32 timings[])
-{
- /*
- * Set write pulse timing. This one is easy to extract:
- *
- * NWE_PULSE = tWP
- */
- timings[0] &= PL353_SMC_SET_CYCLES_T0_MASK;
- timings[1] = (timings[1] & PL353_SMC_SET_CYCLES_T1_MASK) <<
- PL353_SMC_SET_CYCLES_T1_SHIFT;
- timings[2] = (timings[2] & PL353_SMC_SET_CYCLES_T2_MASK) <<
- PL353_SMC_SET_CYCLES_T2_SHIFT;
- timings[3] = (timings[3] & PL353_SMC_SET_CYCLES_T3_MASK) <<
- PL353_SMC_SET_CYCLES_T3_SHIFT;
- timings[4] = (timings[4] & PL353_SMC_SET_CYCLES_T4_MASK) <<
- PL353_SMC_SET_CYCLES_T4_SHIFT;
- timings[5] = (timings[5] & PL353_SMC_SET_CYCLES_T5_MASK) <<
- PL353_SMC_SET_CYCLES_T5_SHIFT;
- timings[6] = (timings[6] & PL353_SMC_SET_CYCLES_T6_MASK) <<
- PL353_SMC_SET_CYCLES_T6_SHIFT;
- timings[0] |= timings[1] | timings[2] | timings[3] |
- timings[4] | timings[5] | timings[6];
-
- writel(timings[0], pl353_smc_base + PL353_SMC_SET_CYCLES_OFFS);
- writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base +
- PL353_SMC_DIRECT_CMD_OFFS);
-}
-EXPORT_SYMBOL_GPL(pl353_smc_set_cycles);
-
-/**
- * pl353_smc_ecc_is_busy - Read ecc busy flag
- * Return: the ecc_status bit from the ecc_status register. 1 = busy, 0 = idle
- */
-bool pl353_smc_ecc_is_busy(void)
-{
- return ((readl(pl353_smc_base + PL353_SMC_ECC_STATUS_OFFS) &
- PL353_SMC_ECC_STATUS_BUSY) == PL353_SMC_ECC_STATUS_BUSY);
-}
-EXPORT_SYMBOL_GPL(pl353_smc_ecc_is_busy);
-
-/**
- * pl353_smc_get_ecc_val - Read ecc_valueN registers
- * @ecc_reg: Index of the ecc_value reg (0..3)
- * Return: the content of the requested ecc_value register.
- *
- * There are four valid ecc_value registers. The argument is truncated to stay
- * within this valid boundary.
- */
-u32 pl353_smc_get_ecc_val(int ecc_reg)
-{
- u32 addr, reg;
-
- addr = PL353_SMC_ECC_VALUE0_OFFS +
- (ecc_reg * PL353_SMC_ECC_REG_SIZE_OFFS);
- reg = readl(pl353_smc_base + addr);
-
- return reg;
-}
-EXPORT_SYMBOL_GPL(pl353_smc_get_ecc_val);
-
-/**
- * pl353_smc_get_nand_int_status_raw - Get NAND interrupt status bit
- * Return: the raw_int_status1 bit from the memc_status register
- */
-int pl353_smc_get_nand_int_status_raw(void)
-{
- u32 reg;
-
- reg = readl(pl353_smc_base + PL353_SMC_MEMC_STATUS_OFFS);
- reg >>= PL353_SMC_MEMC_STATUS_RAW_INT_1_SHIFT;
- reg &= 1;
-
- return reg;
-}
-EXPORT_SYMBOL_GPL(pl353_smc_get_nand_int_status_raw);
-
-/**
- * pl353_smc_clr_nand_int - Clear NAND interrupt
- */
-void pl353_smc_clr_nand_int(void)
-{
- writel(PL353_SMC_CFG_CLR_INT_CLR_1,
- pl353_smc_base + PL353_SMC_CFG_CLR_OFFS);
-}
-EXPORT_SYMBOL_GPL(pl353_smc_clr_nand_int);
-
-/**
- * pl353_smc_set_ecc_mode - Set SMC ECC mode
- * @mode: ECC mode (BYPASS, APB, MEM)
- * Return: 0 on success or negative errno.
- */
-int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode)
-{
- u32 reg;
- int ret = 0;
-
- switch (mode) {
- case PL353_SMC_ECCMODE_BYPASS:
- case PL353_SMC_ECCMODE_APB:
- case PL353_SMC_ECCMODE_MEM:
-
- reg = readl(pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS);
- reg &= ~PL353_SMC_ECC_MEMCFG_MODE_MASK;
- reg |= mode << PL353_SMC_ECC_MEMCFG_MODE_SHIFT;
- writel(reg, pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS);
-
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pl353_smc_set_ecc_mode);
-
-/**
- * pl353_smc_set_ecc_pg_size - Set SMC ECC page size
- * @pg_sz: ECC page size
- * Return: 0 on success or negative errno.
- */
-int pl353_smc_set_ecc_pg_size(unsigned int pg_sz)
-{
- u32 reg, sz;
-
- switch (pg_sz) {
- case 0:
- sz = 0;
- break;
- case SZ_512:
- sz = 1;
- break;
- case SZ_1K:
- sz = 2;
- break;
- case SZ_2K:
- sz = 3;
- break;
- default:
- return -EINVAL;
- }
-
- reg = readl(pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS);
- reg &= ~PL353_SMC_ECC_MEMCFG_PGSIZE_MASK;
- reg |= sz;
- writel(reg, pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pl353_smc_set_ecc_pg_size);
-
static int __maybe_unused pl353_smc_suspend(struct device *dev)
{
struct pl353_smc_data *pl353_smc = dev_get_drvdata(dev);
@@ -277,8 +36,8 @@ static int __maybe_unused pl353_smc_suspend(struct device *dev)
static int __maybe_unused pl353_smc_resume(struct device *dev)
{
- int ret;
struct pl353_smc_data *pl353_smc = dev_get_drvdata(dev);
+ int ret;
ret = clk_enable(pl353_smc->aclk);
if (ret) {
@@ -296,77 +55,31 @@ static int __maybe_unused pl353_smc_resume(struct device *dev)
return ret;
}
-static struct amba_driver pl353_smc_driver;
-
static SIMPLE_DEV_PM_OPS(pl353_smc_dev_pm_ops, pl353_smc_suspend,
pl353_smc_resume);
-/**
- * pl353_smc_init_nand_interface - Initialize the NAND interface
- * @adev: Pointer to the amba_device struct
- * @nand_node: Pointer to the pl353_nand device_node struct
- */
-static void pl353_smc_init_nand_interface(struct amba_device *adev,
- struct device_node *nand_node)
-{
- unsigned long timeout;
-
- pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_8);
- writel(PL353_SMC_CFG_CLR_INT_CLR_1,
- pl353_smc_base + PL353_SMC_CFG_CLR_OFFS);
- writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base +
- PL353_SMC_DIRECT_CMD_OFFS);
-
- timeout = jiffies + PL353_NAND_ECC_BUSY_TIMEOUT;
- /* Wait till the ECC operation is complete */
- do {
- if (pl353_smc_ecc_is_busy())
- cpu_relax();
- else
- break;
- } while (!time_after_eq(jiffies, timeout));
-
- if (time_after_eq(jiffies, timeout))
- return;
-
- writel(PL353_NAND_ECC_CMD1,
- pl353_smc_base + PL353_SMC_ECC_MEMCMD1_OFFS);
- writel(PL353_NAND_ECC_CMD2,
- pl353_smc_base + PL353_SMC_ECC_MEMCMD2_OFFS);
-}
-
static const struct of_device_id pl353_smc_supported_children[] = {
{
.compatible = "cfi-flash"
},
{
.compatible = "arm,pl353-nand-r2p1",
- .data = pl353_smc_init_nand_interface
},
{}
};
static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id)
{
+ struct device_node *of_node = adev->dev.of_node;
+ const struct of_device_id *match = NULL;
struct pl353_smc_data *pl353_smc;
struct device_node *child;
- struct resource *res;
int err;
- struct device_node *of_node = adev->dev.of_node;
- static void (*init)(struct amba_device *adev,
- struct device_node *nand_node);
- const struct of_device_id *match = NULL;
pl353_smc = devm_kzalloc(&adev->dev, sizeof(*pl353_smc), GFP_KERNEL);
if (!pl353_smc)
return -ENOMEM;
- /* Get the NAND controller virtual address */
- res = &adev->res;
- pl353_smc_base = devm_ioremap_resource(&adev->dev, res);
- if (IS_ERR(pl353_smc_base))
- return PTR_ERR(pl353_smc_base);
-
pl353_smc->aclk = devm_clk_get(&adev->dev, "apb_pclk");
if (IS_ERR(pl353_smc->aclk)) {
dev_err(&adev->dev, "aclk clock not found.\n");
@@ -388,15 +101,11 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id)
err = clk_prepare_enable(pl353_smc->memclk);
if (err) {
dev_err(&adev->dev, "Unable to enable memory clock.\n");
- goto out_clk_dis_aper;
+ goto disable_axi_clk;
}
amba_set_drvdata(adev, pl353_smc);
- /* clear interrupts */
- writel(PL353_SMC_CFG_CLR_DEFAULT_MASK,
- pl353_smc_base + PL353_SMC_CFG_CLR_OFFS);
-
/* Find compatible children. Only a single child is supported */
for_each_available_child_of_node(of_node, child) {
match = of_match_node(pl353_smc_supported_children, child);
@@ -407,20 +116,18 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id)
break;
}
if (!match) {
+ err = -ENODEV;
dev_err(&adev->dev, "no matching children\n");
- goto out_clk_disable;
+ goto disable_mem_clk;
}
- init = match->data;
- if (init)
- init(adev, child);
of_platform_device_create(child, NULL, &adev->dev);
return 0;
-out_clk_disable:
+disable_mem_clk:
clk_disable_unprepare(pl353_smc->memclk);
-out_clk_dis_aper:
+disable_axi_clk:
clk_disable_unprepare(pl353_smc->aclk);
return err;
@@ -436,8 +143,8 @@ static void pl353_smc_remove(struct amba_device *adev)
static const struct amba_id pl353_ids[] = {
{
- .id = 0x00041353,
- .mask = 0x000fffff,
+ .id = 0x00041353,
+ .mask = 0x000fffff,
},
{ 0, 0 },
};
diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c
index 4d5758c419c5..ffec26a99313 100644
--- a/drivers/memory/stm32-fmc2-ebi.c
+++ b/drivers/memory/stm32-fmc2-ebi.c
@@ -1048,16 +1048,19 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
if (ret) {
dev_err(dev, "could not retrieve reg property: %d\n",
ret);
+ of_node_put(child);
return ret;
}
if (bank >= FMC2_MAX_BANKS) {
dev_err(dev, "invalid reg value: %d\n", bank);
+ of_node_put(child);
return -EINVAL;
}
if (ebi->bank_assigned & BIT(bank)) {
dev_err(dev, "bank already assigned: %d\n", bank);
+ of_node_put(child);
return -EINVAL;
}
@@ -1066,6 +1069,7 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
if (ret) {
dev_err(dev, "setup chip select %d failed: %d\n",
bank, ret);
+ of_node_put(child);
return ret;
}
}
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index a70967a56e52..f9bae36c03a3 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -2,16 +2,18 @@
config TEGRA_MC
bool "NVIDIA Tegra Memory Controller support"
default y
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || (COMPILE_TEST && COMMON_CLK)
select INTERCONNECT
help
This driver supports the Memory Controller (MC) hardware found on
NVIDIA Tegra SoCs.
+if TEGRA_MC
+
config TEGRA20_EMC
tristate "NVIDIA Tegra20 External Memory Controller driver"
default y
- depends on TEGRA_MC && ARCH_TEGRA_2x_SOC
+ depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ
help
@@ -23,7 +25,7 @@ config TEGRA20_EMC
config TEGRA30_EMC
tristate "NVIDIA Tegra30 External Memory Controller driver"
default y
- depends on TEGRA_MC && ARCH_TEGRA_3x_SOC
+ depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
select PM_OPP
help
This driver is for the External Memory Controller (EMC) found on
@@ -34,8 +36,8 @@ config TEGRA30_EMC
config TEGRA124_EMC
tristate "NVIDIA Tegra124 External Memory Controller driver"
default y
- depends on TEGRA_MC && ARCH_TEGRA_124_SOC
- select TEGRA124_CLK_EMC
+ depends on ARCH_TEGRA_124_SOC || COMPILE_TEST
+ select TEGRA124_CLK_EMC if ARCH_TEGRA
select PM_OPP
help
This driver is for the External Memory Controller (EMC) found on
@@ -45,14 +47,16 @@ config TEGRA124_EMC
config TEGRA210_EMC_TABLE
bool
- depends on ARCH_TEGRA_210_SOC
+ depends on ARCH_TEGRA_210_SOC || COMPILE_TEST
config TEGRA210_EMC
tristate "NVIDIA Tegra210 External Memory Controller driver"
- depends on TEGRA_MC && ARCH_TEGRA_210_SOC
+ depends on ARCH_TEGRA_210_SOC || COMPILE_TEST
select TEGRA210_EMC_TABLE
help
This driver is for the External Memory Controller (EMC) found on
Tegra210 chips. The EMC controls the external DRAM on the board.
This driver is required to change memory timings / clock rate for
external memory.
+
+endif
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
index 6c1a2ecc6628..c992e87782d2 100644
--- a/drivers/memory/tegra/Makefile
+++ b/drivers/memory/tegra/Makefile
@@ -7,6 +7,8 @@ tegra-mc-$(CONFIG_ARCH_TEGRA_114_SOC) += tegra114.o
tegra-mc-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124.o
tegra-mc-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra124.o
tegra-mc-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186.o tegra194.o
obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
@@ -15,7 +17,7 @@ obj-$(CONFIG_TEGRA30_EMC) += tegra30-emc.o
obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o
obj-$(CONFIG_TEGRA210_EMC_TABLE) += tegra210-emc-table.o
obj-$(CONFIG_TEGRA210_EMC) += tegra210-emc.o
-obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o tegra186-emc.o
-obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186.o tegra186-emc.o
+obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186-emc.o
+obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186-emc.o
tegra210-emc-y := tegra210-emc-core.o tegra210-emc-cc-r21021.o
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index e58c3e5baea0..3c5aae7abf35 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -39,7 +39,13 @@ static const struct of_device_id tegra_mc_of_match[] = {
#ifdef CONFIG_ARCH_TEGRA_210_SOC
{ .compatible = "nvidia,tegra210-mc", .data = &tegra210_mc_soc },
#endif
- { }
+#ifdef CONFIG_ARCH_TEGRA_186_SOC
+ { .compatible = "nvidia,tegra186-mc", .data = &tegra186_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+ { .compatible = "nvidia,tegra194-mc", .data = &tegra194_mc_soc },
+#endif
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
@@ -91,6 +97,15 @@ struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_tegra_memory_controller_get);
+int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+{
+ if (mc->soc->ops && mc->soc->ops->probe_device)
+ return mc->soc->ops->probe_device(mc, dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_mc_probe_device);
+
static int tegra_mc_block_dma_common(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
@@ -299,38 +314,6 @@ static int tegra_mc_reset_setup(struct tegra_mc *mc)
return 0;
}
-static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
-{
- unsigned long long tick;
- unsigned int i;
- u32 value;
-
- /* compute the number of MC clock cycles per tick */
- tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
- do_div(tick, NSEC_PER_SEC);
-
- value = mc_readl(mc, MC_EMEM_ARB_CFG);
- value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK;
- value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick);
- mc_writel(mc, value, MC_EMEM_ARB_CFG);
-
- /* write latency allowance defaults */
- for (i = 0; i < mc->soc->num_clients; i++) {
- const struct tegra_mc_la *la = &mc->soc->clients[i].la;
- u32 value;
-
- value = mc_readl(mc, la->reg);
- value &= ~(la->mask << la->shift);
- value |= (la->def & la->mask) << la->shift;
- mc_writel(mc, value, la->reg);
- }
-
- /* latch new values */
- mc_writel(mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
-
- return 0;
-}
-
int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
{
unsigned int i;
@@ -368,6 +351,43 @@ unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
}
EXPORT_SYMBOL_GPL(tegra_mc_get_emem_device_count);
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_210_SOC)
+static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
+{
+ unsigned long long tick;
+ unsigned int i;
+ u32 value;
+
+ /* compute the number of MC clock cycles per tick */
+ tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
+ do_div(tick, NSEC_PER_SEC);
+
+ value = mc_readl(mc, MC_EMEM_ARB_CFG);
+ value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK;
+ value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick);
+ mc_writel(mc, value, MC_EMEM_ARB_CFG);
+
+ /* write latency allowance defaults */
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ const struct tegra_mc_client *client = &mc->soc->clients[i];
+ u32 value;
+
+ value = mc_readl(mc, client->regs.la.reg);
+ value &= ~(client->regs.la.mask << client->regs.la.shift);
+ value |= (client->regs.la.def & client->regs.la.mask) << client->regs.la.shift;
+ mc_writel(mc, value, client->regs.la.reg);
+ }
+
+ /* latch new values */
+ mc_writel(mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
+
+ return 0;
+}
+
static int load_one_timing(struct tegra_mc *mc,
struct tegra_mc_timing *timing,
struct device_node *node)
@@ -459,27 +479,35 @@ static int tegra_mc_setup_timings(struct tegra_mc *mc)
return 0;
}
-static const char *const status_names[32] = {
- [ 1] = "External interrupt",
- [ 6] = "EMEM address decode error",
- [ 7] = "GART page fault",
- [ 8] = "Security violation",
- [ 9] = "EMEM arbitration error",
- [10] = "Page fault",
- [11] = "Invalid APB ASID update",
- [12] = "VPR violation",
- [13] = "Secure carveout violation",
- [16] = "MTS carveout violation",
-};
+int tegra30_mc_probe(struct tegra_mc *mc)
+{
+ int err;
-static const char *const error_names[8] = {
- [2] = "EMEM decode error",
- [3] = "TrustZone violation",
- [4] = "Carveout violation",
- [6] = "SMMU translation error",
-};
+ mc->clk = devm_clk_get_optional(mc->dev, "mc");
+ if (IS_ERR(mc->clk)) {
+ dev_err(mc->dev, "failed to get MC clock: %ld\n", PTR_ERR(mc->clk));
+ return PTR_ERR(mc->clk);
+ }
+
+ /* ensure that debug features are disabled */
+ mc_writel(mc, 0x00000000, MC_TIMING_CONTROL_DBG);
+
+ err = tegra_mc_setup_latency_allowance(mc);
+ if (err < 0) {
+ dev_err(mc->dev, "failed to setup latency allowance: %d\n", err);
+ return err;
+ }
-static irqreturn_t tegra_mc_irq(int irq, void *data)
+ err = tegra_mc_setup_timings(mc);
+ if (err < 0) {
+ dev_err(mc->dev, "failed to setup timings: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static irqreturn_t tegra30_mc_handle_irq(int irq, void *data)
{
struct tegra_mc *mc = data;
unsigned long status;
@@ -491,7 +519,7 @@ static irqreturn_t tegra_mc_irq(int irq, void *data)
return IRQ_NONE;
for_each_set_bit(bit, &status, 32) {
- const char *error = status_names[bit] ?: "unknown";
+ const char *error = tegra_mc_status_names[bit] ?: "unknown";
const char *client = "unknown", *desc;
const char *direction, *secure;
phys_addr_t addr = 0;
@@ -531,7 +559,7 @@ static irqreturn_t tegra_mc_irq(int irq, void *data)
type = (value & MC_ERR_STATUS_TYPE_MASK) >>
MC_ERR_STATUS_TYPE_SHIFT;
- desc = error_names[type];
+ desc = tegra_mc_error_names[type];
switch (value & MC_ERR_STATUS_TYPE_MASK) {
case MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE:
@@ -576,78 +604,31 @@ static irqreturn_t tegra_mc_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static __maybe_unused irqreturn_t tegra20_mc_irq(int irq, void *data)
-{
- struct tegra_mc *mc = data;
- unsigned long status;
- unsigned int bit;
-
- /* mask all interrupts to avoid flooding */
- status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
- if (!status)
- return IRQ_NONE;
-
- for_each_set_bit(bit, &status, 32) {
- const char *direction = "read", *secure = "";
- const char *error = status_names[bit];
- const char *client, *desc;
- phys_addr_t addr;
- u32 value, reg;
- u8 id, type;
-
- switch (BIT(bit)) {
- case MC_INT_DECERR_EMEM:
- reg = MC_DECERR_EMEM_OTHERS_STATUS;
- value = mc_readl(mc, reg);
-
- id = value & mc->soc->client_id_mask;
- desc = error_names[2];
-
- if (value & BIT(31))
- direction = "write";
- break;
-
- case MC_INT_INVALID_GART_PAGE:
- reg = MC_GART_ERROR_REQ;
- value = mc_readl(mc, reg);
-
- id = (value >> 1) & mc->soc->client_id_mask;
- desc = error_names[2];
-
- if (value & BIT(0))
- direction = "write";
- break;
-
- case MC_INT_SECURITY_VIOLATION:
- reg = MC_SECURITY_VIOLATION_STATUS;
- value = mc_readl(mc, reg);
-
- id = value & mc->soc->client_id_mask;
- type = (value & BIT(30)) ? 4 : 3;
- desc = error_names[type];
- secure = "secure ";
-
- if (value & BIT(31))
- direction = "write";
- break;
-
- default:
- continue;
- }
-
- client = mc->soc->clients[id].name;
- addr = mc_readl(mc, reg + sizeof(u32));
-
- dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s)\n",
- client, secure, direction, &addr, error,
- desc);
- }
+const struct tegra_mc_ops tegra30_mc_ops = {
+ .probe = tegra30_mc_probe,
+ .handle_irq = tegra30_mc_handle_irq,
+};
+#endif
- /* clear interrupts */
- mc_writel(mc, status, MC_INTSTATUS);
+const char *const tegra_mc_status_names[32] = {
+ [ 1] = "External interrupt",
+ [ 6] = "EMEM address decode error",
+ [ 7] = "GART page fault",
+ [ 8] = "Security violation",
+ [ 9] = "EMEM arbitration error",
+ [10] = "Page fault",
+ [11] = "Invalid APB ASID update",
+ [12] = "VPR violation",
+ [13] = "Secure carveout violation",
+ [16] = "MTS carveout violation",
+};
- return IRQ_HANDLED;
-}
+const char *const tegra_mc_error_names[8] = {
+ [2] = "EMEM decode error",
+ [3] = "TrustZone violation",
+ [4] = "Carveout violation",
+ [6] = "SMMU translation error",
+};
/*
* Memory Controller (MC) has few Memory Clients that are issuing memory
@@ -748,7 +729,6 @@ static int tegra_mc_probe(struct platform_device *pdev)
{
struct resource *res;
struct tegra_mc *mc;
- void *isr;
u64 mask;
int err;
@@ -777,70 +757,38 @@ static int tegra_mc_probe(struct platform_device *pdev)
if (IS_ERR(mc->regs))
return PTR_ERR(mc->regs);
- mc->clk = devm_clk_get(&pdev->dev, "mc");
- if (IS_ERR(mc->clk)) {
- dev_err(&pdev->dev, "failed to get MC clock: %ld\n",
- PTR_ERR(mc->clk));
- return PTR_ERR(mc->clk);
+ mc->debugfs.root = debugfs_create_dir("mc", NULL);
+
+ if (mc->soc->ops && mc->soc->ops->probe) {
+ err = mc->soc->ops->probe(mc);
+ if (err < 0)
+ return err;
}
-#ifdef CONFIG_ARCH_TEGRA_2x_SOC
- if (mc->soc == &tegra20_mc_soc) {
- isr = tegra20_mc_irq;
- } else
-#endif
- {
- /* ensure that debug features are disabled */
- mc_writel(mc, 0x00000000, MC_TIMING_CONTROL_DBG);
+ if (mc->soc->ops && mc->soc->ops->handle_irq) {
+ mc->irq = platform_get_irq(pdev, 0);
+ if (mc->irq < 0)
+ return mc->irq;
- err = tegra_mc_setup_latency_allowance(mc);
- if (err < 0) {
- dev_err(&pdev->dev,
- "failed to setup latency allowance: %d\n",
- err);
- return err;
- }
+ WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
- isr = tegra_mc_irq;
+ mc_writel(mc, mc->soc->intmask, MC_INTMASK);
- err = tegra_mc_setup_timings(mc);
+ err = devm_request_irq(&pdev->dev, mc->irq, mc->soc->ops->handle_irq, 0,
+ dev_name(&pdev->dev), mc);
if (err < 0) {
- dev_err(&pdev->dev, "failed to setup timings: %d\n",
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq,
err);
return err;
}
}
- mc->irq = platform_get_irq(pdev, 0);
- if (mc->irq < 0)
- return mc->irq;
-
- WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
-
- mc_writel(mc, mc->soc->intmask, MC_INTMASK);
-
- err = devm_request_irq(&pdev->dev, mc->irq, isr, 0,
- dev_name(&pdev->dev), mc);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq,
- err);
- return err;
- }
-
- mc->debugfs.root = debugfs_create_dir("mc", NULL);
-
- if (mc->soc->init) {
- err = mc->soc->init(mc);
+ if (mc->soc->reset_ops) {
+ err = tegra_mc_reset_setup(mc);
if (err < 0)
- dev_err(&pdev->dev, "failed to initialize SoC driver: %d\n",
- err);
+ dev_err(&pdev->dev, "failed to register reset controller: %d\n", err);
}
- err = tegra_mc_reset_setup(mc);
- if (err < 0)
- dev_err(&pdev->dev, "failed to register reset controller: %d\n",
- err);
-
err = tegra_mc_interconnect_setup(mc);
if (err < 0)
dev_err(&pdev->dev, "failed to initialize interconnect: %d\n",
@@ -867,37 +815,28 @@ static int tegra_mc_probe(struct platform_device *pdev)
return 0;
}
-static int tegra_mc_suspend(struct device *dev)
+static int __maybe_unused tegra_mc_suspend(struct device *dev)
{
struct tegra_mc *mc = dev_get_drvdata(dev);
- int err;
- if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
- err = tegra_gart_suspend(mc->gart);
- if (err)
- return err;
- }
+ if (mc->soc->ops && mc->soc->ops->suspend)
+ return mc->soc->ops->suspend(mc);
return 0;
}
-static int tegra_mc_resume(struct device *dev)
+static int __maybe_unused tegra_mc_resume(struct device *dev)
{
struct tegra_mc *mc = dev_get_drvdata(dev);
- int err;
- if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
- err = tegra_gart_resume(mc->gart);
- if (err)
- return err;
- }
+ if (mc->soc->ops && mc->soc->ops->resume)
+ return mc->soc->ops->resume(mc);
return 0;
}
static const struct dev_pm_ops tegra_mc_pm_ops = {
- .suspend = tegra_mc_suspend,
- .resume = tegra_mc_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_mc_suspend, tegra_mc_resume)
};
static struct platform_driver tegra_mc_driver = {
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index 1ee34f0da4f7..1e492989c363 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -129,6 +129,31 @@ extern const struct tegra_mc_soc tegra132_mc_soc;
extern const struct tegra_mc_soc tegra210_mc_soc;
#endif
+#ifdef CONFIG_ARCH_TEGRA_186_SOC
+extern const struct tegra_mc_soc tegra186_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+extern const struct tegra_mc_soc tegra194_mc_soc;
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_210_SOC)
+int tegra30_mc_probe(struct tegra_mc *mc);
+extern const struct tegra_mc_ops tegra30_mc_ops;
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_186_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_194_SOC)
+extern const struct tegra_mc_ops tegra186_mc_ops;
+#endif
+
+extern const char * const tegra_mc_status_names[32];
+extern const char * const tegra_mc_error_names[8];
+
/*
* These IDs are for internal use of Tegra ICC drivers. The ID numbers are
* chosen such that they don't conflict with the device-tree ICC node IDs.
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index ed376ba2d2fe..41350570c815 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -15,883 +15,1013 @@ static const struct tegra_mc_client tegra114_mc_clients[] = {
.id = 0x00,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
- .la = {
- .reg = 0x34c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x0,
+ .regs = {
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
},
}, {
.id = 0x01,
.name = "display0a",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 1,
- },
- .la = {
- .reg = 0x2e8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
}, {
.id = 0x02,
.name = "display0ab",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 2,
- },
- .la = {
- .reg = 0x2f4,
- .shift = 0,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
}, {
.id = 0x03,
.name = "display0b",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 3,
- },
- .la = {
- .reg = 0x2e8,
- .shift = 16,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
}, {
.id = 0x04,
.name = "display0bb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 4,
- },
- .la = {
- .reg = 0x2f4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
}, {
.id = 0x05,
.name = "display0c",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 5,
- },
- .la = {
- .reg = 0x2ec,
- .shift = 0,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
}, {
.id = 0x06,
.name = "display0cb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 6,
- },
- .la = {
- .reg = 0x2f8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
}, {
.id = 0x09,
.name = "eppup",
.swgroup = TEGRA_SWGROUP_EPP,
- .smmu = {
- .reg = 0x228,
- .bit = 9,
- },
- .la = {
- .reg = 0x300,
- .shift = 0,
- .mask = 0xff,
- .def = 0x33,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x300,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x33,
+ },
},
}, {
.id = 0x0a,
.name = "g2pr",
.swgroup = TEGRA_SWGROUP_G2,
- .smmu = {
- .reg = 0x228,
- .bit = 10,
- },
- .la = {
- .reg = 0x308,
- .shift = 0,
- .mask = 0xff,
- .def = 0x09,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x308,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x09,
+ },
},
}, {
.id = 0x0b,
.name = "g2sr",
.swgroup = TEGRA_SWGROUP_G2,
- .smmu = {
- .reg = 0x228,
- .bit = 11,
- },
- .la = {
- .reg = 0x308,
- .shift = 16,
- .mask = 0xff,
- .def = 0x09,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x308,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x09,
+ },
},
}, {
.id = 0x0f,
.name = "avpcarm7r",
.swgroup = TEGRA_SWGROUP_AVPC,
- .smmu = {
- .reg = 0x228,
- .bit = 15,
- },
- .la = {
- .reg = 0x2e4,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
}, {
.id = 0x10,
.name = "displayhc",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 16,
- },
- .la = {
- .reg = 0x2f0,
- .shift = 0,
- .mask = 0xff,
- .def = 0x68,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x68,
+ },
},
}, {
.id = 0x11,
.name = "displayhcb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 17,
- },
- .la = {
- .reg = 0x2fc,
- .shift = 0,
- .mask = 0xff,
- .def = 0x68,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x68,
+ },
},
}, {
.id = 0x12,
.name = "fdcdrd",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x228,
- .bit = 18,
- },
- .la = {
- .reg = 0x334,
- .shift = 0,
- .mask = 0xff,
- .def = 0x0c,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x334,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
},
}, {
.id = 0x13,
.name = "fdcdrd2",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x228,
- .bit = 19,
- },
- .la = {
- .reg = 0x33c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x0c,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x33c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
},
}, {
.id = 0x14,
.name = "g2dr",
.swgroup = TEGRA_SWGROUP_G2,
- .smmu = {
- .reg = 0x228,
- .bit = 20,
- },
- .la = {
- .reg = 0x30c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x0a,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x30c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
},
}, {
.id = 0x15,
.name = "hdar",
.swgroup = TEGRA_SWGROUP_HDA,
- .smmu = {
- .reg = 0x228,
- .bit = 21,
- },
- .la = {
- .reg = 0x318,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
}, {
.id = 0x16,
.name = "host1xdmar",
.swgroup = TEGRA_SWGROUP_HC,
- .smmu = {
- .reg = 0x228,
- .bit = 22,
- },
- .la = {
- .reg = 0x310,
- .shift = 0,
- .mask = 0xff,
- .def = 0x10,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
},
}, {
.id = 0x17,
.name = "host1xr",
.swgroup = TEGRA_SWGROUP_HC,
- .smmu = {
- .reg = 0x228,
- .bit = 23,
- },
- .la = {
- .reg = 0x310,
- .shift = 16,
- .mask = 0xff,
- .def = 0xa5,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
},
}, {
.id = 0x18,
.name = "idxsrd",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x228,
- .bit = 24,
- },
- .la = {
- .reg = 0x334,
- .shift = 16,
- .mask = 0xff,
- .def = 0x0b,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x334,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0b,
+ },
},
}, {
.id = 0x1c,
.name = "msencsrd",
.swgroup = TEGRA_SWGROUP_MSENC,
- .smmu = {
- .reg = 0x228,
- .bit = 28,
- },
- .la = {
- .reg = 0x328,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x1d,
.name = "ppcsahbdmar",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x228,
- .bit = 29,
- },
- .la = {
- .reg = 0x344,
- .shift = 0,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
}, {
.id = 0x1e,
.name = "ppcsahbslvr",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x228,
- .bit = 30,
- },
- .la = {
- .reg = 0x344,
- .shift = 16,
- .mask = 0xff,
- .def = 0xe8,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xe8,
+ },
},
}, {
.id = 0x20,
.name = "texl2srd",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x22c,
- .bit = 0,
- },
- .la = {
- .reg = 0x338,
- .shift = 0,
- .mask = 0xff,
- .def = 0x0c,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x338,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
},
}, {
.id = 0x22,
.name = "vdebsevr",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 2,
- },
- .la = {
- .reg = 0x354,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
}, {
.id = 0x23,
.name = "vdember",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 3,
- },
- .la = {
- .reg = 0x354,
- .shift = 16,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
}, {
.id = 0x24,
.name = "vdemcer",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 4,
- },
- .la = {
- .reg = 0x358,
- .shift = 0,
- .mask = 0xff,
- .def = 0xb8,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xb8,
+ },
},
}, {
.id = 0x25,
.name = "vdetper",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 5,
- },
- .la = {
- .reg = 0x358,
- .shift = 16,
- .mask = 0xff,
- .def = 0xee,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xee,
+ },
},
}, {
.id = 0x26,
.name = "mpcorelpr",
.swgroup = TEGRA_SWGROUP_MPCORELP,
- .la = {
- .reg = 0x324,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
}, {
.id = 0x27,
.name = "mpcorer",
.swgroup = TEGRA_SWGROUP_MPCORE,
- .la = {
- .reg = 0x320,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
}, {
.id = 0x28,
.name = "eppu",
.swgroup = TEGRA_SWGROUP_EPP,
- .smmu = {
- .reg = 0x22c,
- .bit = 8,
- },
- .la = {
- .reg = 0x300,
- .shift = 16,
- .mask = 0xff,
- .def = 0x33,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x300,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x33,
+ },
},
}, {
.id = 0x29,
.name = "eppv",
.swgroup = TEGRA_SWGROUP_EPP,
- .smmu = {
- .reg = 0x22c,
- .bit = 9,
- },
- .la = {
- .reg = 0x304,
- .shift = 0,
- .mask = 0xff,
- .def = 0x6c,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x304,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
},
}, {
.id = 0x2a,
.name = "eppy",
.swgroup = TEGRA_SWGROUP_EPP,
- .smmu = {
- .reg = 0x22c,
- .bit = 10,
- },
- .la = {
- .reg = 0x304,
- .shift = 16,
- .mask = 0xff,
- .def = 0x6c,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x304,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
},
}, {
.id = 0x2b,
.name = "msencswr",
.swgroup = TEGRA_SWGROUP_MSENC,
- .smmu = {
- .reg = 0x22c,
- .bit = 11,
- },
- .la = {
- .reg = 0x328,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x2c,
.name = "viwsb",
.swgroup = TEGRA_SWGROUP_VI,
- .smmu = {
- .reg = 0x22c,
- .bit = 12,
- },
- .la = {
- .reg = 0x364,
- .shift = 0,
- .mask = 0xff,
- .def = 0x47,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x364,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x47,
+ },
},
}, {
.id = 0x2d,
.name = "viwu",
.swgroup = TEGRA_SWGROUP_VI,
- .smmu = {
- .reg = 0x22c,
- .bit = 13,
- },
- .la = {
- .reg = 0x368,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x368,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
}, {
.id = 0x2e,
.name = "viwv",
.swgroup = TEGRA_SWGROUP_VI,
- .smmu = {
- .reg = 0x22c,
- .bit = 14,
- },
- .la = {
- .reg = 0x368,
- .shift = 16,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x368,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
}, {
.id = 0x2f,
.name = "viwy",
.swgroup = TEGRA_SWGROUP_VI,
- .smmu = {
- .reg = 0x22c,
- .bit = 15,
- },
- .la = {
- .reg = 0x36c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x47,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x36c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x47,
+ },
},
}, {
.id = 0x30,
.name = "g2dw",
.swgroup = TEGRA_SWGROUP_G2,
- .smmu = {
- .reg = 0x22c,
- .bit = 16,
- },
- .la = {
- .reg = 0x30c,
- .shift = 16,
- .mask = 0xff,
- .def = 0x9,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x30c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x9,
+ },
},
}, {
.id = 0x32,
.name = "avpcarm7w",
.swgroup = TEGRA_SWGROUP_AVPC,
- .smmu = {
- .reg = 0x22c,
- .bit = 18,
- },
- .la = {
- .reg = 0x2e4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x0e,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
},
}, {
.id = 0x33,
.name = "fdcdwr",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x22c,
- .bit = 19,
- },
- .la = {
- .reg = 0x338,
- .shift = 16,
- .mask = 0xff,
- .def = 0x10,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x338,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x10,
+ },
},
}, {
.id = 0x34,
.name = "fdcdwr2",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x22c,
- .bit = 20,
- },
- .la = {
- .reg = 0x340,
- .shift = 0,
- .mask = 0xff,
- .def = 0x10,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x340,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
},
}, {
.id = 0x35,
.name = "hdaw",
.swgroup = TEGRA_SWGROUP_HDA,
- .smmu = {
- .reg = 0x22c,
- .bit = 21,
- },
- .la = {
- .reg = 0x318,
- .shift = 16,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
}, {
.id = 0x36,
.name = "host1xw",
.swgroup = TEGRA_SWGROUP_HC,
- .smmu = {
- .reg = 0x22c,
- .bit = 22,
- },
- .la = {
- .reg = 0x314,
- .shift = 0,
- .mask = 0xff,
- .def = 0x25,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x25,
+ },
},
}, {
.id = 0x37,
.name = "ispw",
.swgroup = TEGRA_SWGROUP_ISP,
- .smmu = {
- .reg = 0x22c,
- .bit = 23,
- },
- .la = {
- .reg = 0x31c,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x31c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
}, {
.id = 0x38,
.name = "mpcorelpw",
.swgroup = TEGRA_SWGROUP_MPCORELP,
- .la = {
- .reg = 0x324,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x39,
.name = "mpcorew",
.swgroup = TEGRA_SWGROUP_MPCORE,
- .la = {
- .reg = 0x320,
- .shift = 16,
- .mask = 0xff,
- .def = 0x0e,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
},
}, {
.id = 0x3b,
.name = "ppcsahbdmaw",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x22c,
- .bit = 27,
- },
- .la = {
- .reg = 0x348,
- .shift = 0,
- .mask = 0xff,
- .def = 0xa5,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
},
}, {
.id = 0x3c,
.name = "ppcsahbslvw",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x22c,
- .bit = 28,
- },
- .la = {
- .reg = 0x348,
- .shift = 16,
- .mask = 0xff,
- .def = 0xe8,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xe8,
+ },
},
}, {
.id = 0x3e,
.name = "vdebsevw",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 30,
- },
- .la = {
- .reg = 0x35c,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
}, {
.id = 0x3f,
.name = "vdedbgw",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 31,
- },
- .la = {
- .reg = 0x35c,
- .shift = 16,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
}, {
.id = 0x40,
.name = "vdembew",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x230,
- .bit = 0,
- },
- .la = {
- .reg = 0x360,
- .shift = 0,
- .mask = 0xff,
- .def = 0x89,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x89,
+ },
},
}, {
.id = 0x41,
.name = "vdetpmw",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x230,
- .bit = 1,
- },
- .la = {
- .reg = 0x360,
- .shift = 16,
- .mask = 0xff,
- .def = 0x59,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x59,
+ },
},
}, {
.id = 0x4a,
.name = "xusb_hostr",
.swgroup = TEGRA_SWGROUP_XUSB_HOST,
- .smmu = {
- .reg = 0x230,
- .bit = 10,
- },
- .la = {
- .reg = 0x37c,
- .shift = 0,
- .mask = 0xff,
- .def = 0xa5,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
},
}, {
.id = 0x4b,
.name = "xusb_hostw",
.swgroup = TEGRA_SWGROUP_XUSB_HOST,
- .smmu = {
- .reg = 0x230,
- .bit = 11,
- },
- .la = {
- .reg = 0x37c,
- .shift = 16,
- .mask = 0xff,
- .def = 0xa5,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
},
}, {
.id = 0x4c,
.name = "xusb_devr",
.swgroup = TEGRA_SWGROUP_XUSB_DEV,
- .smmu = {
- .reg = 0x230,
- .bit = 12,
- },
- .la = {
- .reg = 0x380,
- .shift = 0,
- .mask = 0xff,
- .def = 0xa5,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
},
}, {
.id = 0x4d,
.name = "xusb_devw",
.swgroup = TEGRA_SWGROUP_XUSB_DEV,
- .smmu = {
- .reg = 0x230,
- .bit = 13,
- },
- .la = {
- .reg = 0x380,
- .shift = 16,
- .mask = 0xff,
- .def = 0xa5,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
},
}, {
.id = 0x4e,
.name = "fdcdwr3",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x230,
- .bit = 14,
- },
- .la = {
- .reg = 0x388,
- .shift = 0,
- .mask = 0xff,
- .def = 0x10,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
},
}, {
.id = 0x4f,
.name = "fdcdrd3",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x230,
- .bit = 15,
- },
- .la = {
- .reg = 0x384,
- .shift = 0,
- .mask = 0xff,
- .def = 0x0c,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
},
}, {
.id = 0x50,
.name = "fdcwr4",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x230,
- .bit = 16,
- },
- .la = {
- .reg = 0x388,
- .shift = 16,
- .mask = 0xff,
- .def = 0x10,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x10,
+ },
},
}, {
.id = 0x51,
.name = "fdcrd4",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x230,
- .bit = 17,
- },
- .la = {
- .reg = 0x384,
- .shift = 16,
- .mask = 0xff,
- .def = 0x0c,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
},
}, {
.id = 0x52,
.name = "emucifr",
.swgroup = TEGRA_SWGROUP_EMUCIF,
- .la = {
- .reg = 0x38c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .la = {
+ .reg = 0x38c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
}, {
.id = 0x53,
.name = "emucifw",
.swgroup = TEGRA_SWGROUP_EMUCIF,
- .la = {
- .reg = 0x38c,
- .shift = 16,
- .mask = 0xff,
- .def = 0x0e,
+ .regs = {
+ .la = {
+ .reg = 0x38c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
},
}, {
.id = 0x54,
.name = "tsecsrd",
.swgroup = TEGRA_SWGROUP_TSEC,
- .smmu = {
- .reg = 0x230,
- .bit = 20,
- },
- .la = {
- .reg = 0x390,
- .shift = 0,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
}, {
.id = 0x55,
.name = "tsecswr",
.swgroup = TEGRA_SWGROUP_TSEC,
- .smmu = {
- .reg = 0x230,
- .bit = 21,
- },
- .la = {
- .reg = 0x390,
- .shift = 16,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
},
};
@@ -983,4 +1113,5 @@ const struct tegra_mc_soc tegra114_mc_soc = {
.reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra114_mc_resets,
.num_resets = ARRAY_SIZE(tegra114_mc_resets),
+ .ops = &tegra30_mc_ops,
};
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 5699d909abc2..908f8d5392b2 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -272,8 +272,8 @@
#define EMC_PUTERM_ADJ 0x574
#define DRAM_DEV_SEL_ALL 0
-#define DRAM_DEV_SEL_0 (2 << 30)
-#define DRAM_DEV_SEL_1 (1 << 30)
+#define DRAM_DEV_SEL_0 BIT(31)
+#define DRAM_DEV_SEL_1 BIT(30)
#define EMC_CFG_POWER_FEATURES_MASK \
(EMC_CFG_DYN_SREF | EMC_CFG_DRAM_ACPD | EMC_CFG_DRAM_CLKSTOP_SR | \
@@ -1269,10 +1269,6 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
}
emc->debugfs.root = debugfs_create_dir("emc", NULL);
- if (!emc->debugfs.root) {
- dev_err(dev, "failed to create debugfs directory\n");
- return;
- }
debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
&tegra_emc_debug_available_rates_fops);
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 459211f50c08..d780a84241fe 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -16,921 +16,1055 @@ static const struct tegra_mc_client tegra124_mc_clients[] = {
.id = 0x00,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
- .la = {
- .reg = 0x34c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x0,
+ .regs = {
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
},
}, {
.id = 0x01,
.name = "display0a",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 1,
- },
- .la = {
- .reg = 0x2e8,
- .shift = 0,
- .mask = 0xff,
- .def = 0xc2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xc2,
+ },
},
}, {
.id = 0x02,
.name = "display0ab",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 2,
- },
- .la = {
- .reg = 0x2f4,
- .shift = 0,
- .mask = 0xff,
- .def = 0xc6,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xc6,
+ },
},
}, {
.id = 0x03,
.name = "display0b",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 3,
- },
- .la = {
- .reg = 0x2e8,
- .shift = 16,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
}, {
.id = 0x04,
.name = "display0bb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 4,
- },
- .la = {
- .reg = 0x2f4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
}, {
.id = 0x05,
.name = "display0c",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 5,
- },
- .la = {
- .reg = 0x2ec,
- .shift = 0,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
}, {
.id = 0x06,
.name = "display0cb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 6,
- },
- .la = {
- .reg = 0x2f8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
}, {
.id = 0x0e,
.name = "afir",
.swgroup = TEGRA_SWGROUP_AFI,
- .smmu = {
- .reg = 0x228,
- .bit = 14,
- },
- .la = {
- .reg = 0x2e0,
- .shift = 0,
- .mask = 0xff,
- .def = 0x13,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
},
}, {
.id = 0x0f,
.name = "avpcarm7r",
.swgroup = TEGRA_SWGROUP_AVPC,
- .smmu = {
- .reg = 0x228,
- .bit = 15,
- },
- .la = {
- .reg = 0x2e4,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
}, {
.id = 0x10,
.name = "displayhc",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 16,
- },
- .la = {
- .reg = 0x2f0,
- .shift = 0,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
}, {
.id = 0x11,
.name = "displayhcb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 17,
- },
- .la = {
- .reg = 0x2fc,
- .shift = 0,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
}, {
.id = 0x15,
.name = "hdar",
.swgroup = TEGRA_SWGROUP_HDA,
- .smmu = {
- .reg = 0x228,
- .bit = 21,
- },
- .la = {
- .reg = 0x318,
- .shift = 0,
- .mask = 0xff,
- .def = 0x24,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x24,
+ },
},
}, {
.id = 0x16,
.name = "host1xdmar",
.swgroup = TEGRA_SWGROUP_HC,
- .smmu = {
- .reg = 0x228,
- .bit = 22,
- },
- .la = {
- .reg = 0x310,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
},
}, {
.id = 0x17,
.name = "host1xr",
.swgroup = TEGRA_SWGROUP_HC,
- .smmu = {
- .reg = 0x228,
- .bit = 23,
- },
- .la = {
- .reg = 0x310,
- .shift = 16,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
}, {
.id = 0x1c,
.name = "msencsrd",
.swgroup = TEGRA_SWGROUP_MSENC,
- .smmu = {
- .reg = 0x228,
- .bit = 28,
- },
- .la = {
- .reg = 0x328,
- .shift = 0,
- .mask = 0xff,
- .def = 0x23,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
},
}, {
.id = 0x1d,
.name = "ppcsahbdmar",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x228,
- .bit = 29,
- },
- .la = {
- .reg = 0x344,
- .shift = 0,
- .mask = 0xff,
- .def = 0x49,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
},
}, {
.id = 0x1e,
.name = "ppcsahbslvr",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x228,
- .bit = 30,
- },
- .la = {
- .reg = 0x344,
- .shift = 16,
- .mask = 0xff,
- .def = 0x1a,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
},
}, {
.id = 0x1f,
.name = "satar",
.swgroup = TEGRA_SWGROUP_SATA,
- .smmu = {
- .reg = 0x228,
- .bit = 31,
- },
- .la = {
- .reg = 0x350,
- .shift = 0,
- .mask = 0xff,
- .def = 0x65,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x65,
+ },
},
}, {
.id = 0x22,
.name = "vdebsevr",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 2,
- },
- .la = {
- .reg = 0x354,
- .shift = 0,
- .mask = 0xff,
- .def = 0x4f,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4f,
+ },
},
}, {
.id = 0x23,
.name = "vdember",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 3,
- },
- .la = {
- .reg = 0x354,
- .shift = 16,
- .mask = 0xff,
- .def = 0x3d,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x3d,
+ },
},
}, {
.id = 0x24,
.name = "vdemcer",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 4,
- },
- .la = {
- .reg = 0x358,
- .shift = 0,
- .mask = 0xff,
- .def = 0x66,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x66,
+ },
},
}, {
.id = 0x25,
.name = "vdetper",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 5,
- },
- .la = {
- .reg = 0x358,
- .shift = 16,
- .mask = 0xff,
- .def = 0xa5,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
},
}, {
.id = 0x26,
.name = "mpcorelpr",
.swgroup = TEGRA_SWGROUP_MPCORELP,
- .la = {
- .reg = 0x324,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
}, {
.id = 0x27,
.name = "mpcorer",
.swgroup = TEGRA_SWGROUP_MPCORE,
- .la = {
- .reg = 0x320,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
}, {
.id = 0x2b,
.name = "msencswr",
.swgroup = TEGRA_SWGROUP_MSENC,
- .smmu = {
- .reg = 0x22c,
- .bit = 11,
- },
- .la = {
- .reg = 0x328,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x31,
.name = "afiw",
.swgroup = TEGRA_SWGROUP_AFI,
- .smmu = {
- .reg = 0x22c,
- .bit = 17,
- },
- .la = {
- .reg = 0x2e0,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x32,
.name = "avpcarm7w",
.swgroup = TEGRA_SWGROUP_AVPC,
- .smmu = {
- .reg = 0x22c,
- .bit = 18,
- },
- .la = {
- .reg = 0x2e4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x35,
.name = "hdaw",
.swgroup = TEGRA_SWGROUP_HDA,
- .smmu = {
- .reg = 0x22c,
- .bit = 21,
- },
- .la = {
- .reg = 0x318,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x36,
.name = "host1xw",
.swgroup = TEGRA_SWGROUP_HC,
- .smmu = {
- .reg = 0x22c,
- .bit = 22,
- },
- .la = {
- .reg = 0x314,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x38,
.name = "mpcorelpw",
.swgroup = TEGRA_SWGROUP_MPCORELP,
- .la = {
- .reg = 0x324,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x39,
.name = "mpcorew",
.swgroup = TEGRA_SWGROUP_MPCORE,
- .la = {
- .reg = 0x320,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x3b,
.name = "ppcsahbdmaw",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x22c,
- .bit = 27,
- },
- .la = {
- .reg = 0x348,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x3c,
.name = "ppcsahbslvw",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x22c,
- .bit = 28,
- },
- .la = {
- .reg = 0x348,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x3d,
.name = "sataw",
.swgroup = TEGRA_SWGROUP_SATA,
- .smmu = {
- .reg = 0x22c,
- .bit = 29,
- },
- .la = {
- .reg = 0x350,
- .shift = 16,
- .mask = 0xff,
- .def = 0x65,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x65,
+ },
},
}, {
.id = 0x3e,
.name = "vdebsevw",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 30,
- },
- .la = {
- .reg = 0x35c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x3f,
.name = "vdedbgw",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 31,
- },
- .la = {
- .reg = 0x35c,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x40,
.name = "vdembew",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x230,
- .bit = 0,
- },
- .la = {
- .reg = 0x360,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x41,
.name = "vdetpmw",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x230,
- .bit = 1,
- },
- .la = {
- .reg = 0x360,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x44,
.name = "ispra",
.swgroup = TEGRA_SWGROUP_ISP2,
- .smmu = {
- .reg = 0x230,
- .bit = 4,
- },
- .la = {
- .reg = 0x370,
- .shift = 0,
- .mask = 0xff,
- .def = 0x18,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x370,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
},
}, {
.id = 0x46,
.name = "ispwa",
.swgroup = TEGRA_SWGROUP_ISP2,
- .smmu = {
- .reg = 0x230,
- .bit = 6,
- },
- .la = {
- .reg = 0x374,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x47,
.name = "ispwb",
.swgroup = TEGRA_SWGROUP_ISP2,
- .smmu = {
- .reg = 0x230,
- .bit = 7,
- },
- .la = {
- .reg = 0x374,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x4a,
.name = "xusb_hostr",
.swgroup = TEGRA_SWGROUP_XUSB_HOST,
- .smmu = {
- .reg = 0x230,
- .bit = 10,
- },
- .la = {
- .reg = 0x37c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x39,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x39,
+ },
},
}, {
.id = 0x4b,
.name = "xusb_hostw",
.swgroup = TEGRA_SWGROUP_XUSB_HOST,
- .smmu = {
- .reg = 0x230,
- .bit = 11,
- },
- .la = {
- .reg = 0x37c,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x4c,
.name = "xusb_devr",
.swgroup = TEGRA_SWGROUP_XUSB_DEV,
- .smmu = {
- .reg = 0x230,
- .bit = 12,
- },
- .la = {
- .reg = 0x380,
- .shift = 0,
- .mask = 0xff,
- .def = 0x39,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x39,
+ },
},
}, {
.id = 0x4d,
.name = "xusb_devw",
.swgroup = TEGRA_SWGROUP_XUSB_DEV,
- .smmu = {
- .reg = 0x230,
- .bit = 13,
- },
- .la = {
- .reg = 0x380,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x4e,
.name = "isprab",
.swgroup = TEGRA_SWGROUP_ISP2B,
- .smmu = {
- .reg = 0x230,
- .bit = 14,
- },
- .la = {
- .reg = 0x384,
- .shift = 0,
- .mask = 0xff,
- .def = 0x18,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
},
}, {
.id = 0x50,
.name = "ispwab",
.swgroup = TEGRA_SWGROUP_ISP2B,
- .smmu = {
- .reg = 0x230,
- .bit = 16,
- },
- .la = {
- .reg = 0x388,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x51,
.name = "ispwbb",
.swgroup = TEGRA_SWGROUP_ISP2B,
- .smmu = {
- .reg = 0x230,
- .bit = 17,
- },
- .la = {
- .reg = 0x388,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x54,
.name = "tsecsrd",
.swgroup = TEGRA_SWGROUP_TSEC,
- .smmu = {
- .reg = 0x230,
- .bit = 20,
- },
- .la = {
- .reg = 0x390,
- .shift = 0,
- .mask = 0xff,
- .def = 0x9b,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x9b,
+ },
},
}, {
.id = 0x55,
.name = "tsecswr",
.swgroup = TEGRA_SWGROUP_TSEC,
- .smmu = {
- .reg = 0x230,
- .bit = 21,
- },
- .la = {
- .reg = 0x390,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x56,
.name = "a9avpscr",
.swgroup = TEGRA_SWGROUP_A9AVP,
- .smmu = {
- .reg = 0x230,
- .bit = 22,
- },
- .la = {
- .reg = 0x3a4,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
}, {
.id = 0x57,
.name = "a9avpscw",
.swgroup = TEGRA_SWGROUP_A9AVP,
- .smmu = {
- .reg = 0x230,
- .bit = 23,
- },
- .la = {
- .reg = 0x3a4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x58,
.name = "gpusrd",
.swgroup = TEGRA_SWGROUP_GPU,
- .smmu = {
- /* read-only */
- .reg = 0x230,
- .bit = 24,
- },
- .la = {
- .reg = 0x3c8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1a,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
},
}, {
.id = 0x59,
.name = "gpuswr",
.swgroup = TEGRA_SWGROUP_GPU,
- .smmu = {
- /* read-only */
- .reg = 0x230,
- .bit = 25,
- },
- .la = {
- .reg = 0x3c8,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x5a,
.name = "displayt",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x230,
- .bit = 26,
- },
- .la = {
- .reg = 0x2f0,
- .shift = 16,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
}, {
.id = 0x60,
.name = "sdmmcra",
.swgroup = TEGRA_SWGROUP_SDMMC1A,
- .smmu = {
- .reg = 0x234,
- .bit = 0,
- },
- .la = {
- .reg = 0x3b8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x49,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
},
}, {
.id = 0x61,
.name = "sdmmcraa",
.swgroup = TEGRA_SWGROUP_SDMMC2A,
- .smmu = {
- .reg = 0x234,
- .bit = 1,
- },
- .la = {
- .reg = 0x3bc,
- .shift = 0,
- .mask = 0xff,
- .def = 0x49,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
},
}, {
.id = 0x62,
.name = "sdmmcr",
.swgroup = TEGRA_SWGROUP_SDMMC3A,
- .smmu = {
- .reg = 0x234,
- .bit = 2,
- },
- .la = {
- .reg = 0x3c0,
- .shift = 0,
- .mask = 0xff,
- .def = 0x49,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
},
}, {
.id = 0x63,
.swgroup = TEGRA_SWGROUP_SDMMC4A,
.name = "sdmmcrab",
- .smmu = {
- .reg = 0x234,
- .bit = 3,
- },
- .la = {
- .reg = 0x3c4,
- .shift = 0,
- .mask = 0xff,
- .def = 0x49,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
},
}, {
.id = 0x64,
.name = "sdmmcwa",
.swgroup = TEGRA_SWGROUP_SDMMC1A,
- .smmu = {
- .reg = 0x234,
- .bit = 4,
- },
- .la = {
- .reg = 0x3b8,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x65,
.name = "sdmmcwaa",
.swgroup = TEGRA_SWGROUP_SDMMC2A,
- .smmu = {
- .reg = 0x234,
- .bit = 5,
- },
- .la = {
- .reg = 0x3bc,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x66,
.name = "sdmmcw",
.swgroup = TEGRA_SWGROUP_SDMMC3A,
- .smmu = {
- .reg = 0x234,
- .bit = 6,
- },
- .la = {
- .reg = 0x3c0,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x67,
.name = "sdmmcwab",
.swgroup = TEGRA_SWGROUP_SDMMC4A,
- .smmu = {
- .reg = 0x234,
- .bit = 7,
- },
- .la = {
- .reg = 0x3c4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x6c,
.name = "vicsrd",
.swgroup = TEGRA_SWGROUP_VIC,
- .smmu = {
- .reg = 0x234,
- .bit = 12,
- },
- .la = {
- .reg = 0x394,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1a,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
},
}, {
.id = 0x6d,
.name = "vicswr",
.swgroup = TEGRA_SWGROUP_VIC,
- .smmu = {
- .reg = 0x234,
- .bit = 13,
- },
- .la = {
- .reg = 0x394,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x72,
.name = "viw",
.swgroup = TEGRA_SWGROUP_VI,
- .smmu = {
- .reg = 0x234,
- .bit = 18,
- },
- .la = {
- .reg = 0x398,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x398,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x73,
.name = "displayd",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x234,
- .bit = 19,
- },
- .la = {
- .reg = 0x3c8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
},
};
@@ -1140,6 +1274,7 @@ const struct tegra_mc_soc tegra124_mc_soc = {
.resets = tegra124_mc_resets,
.num_resets = ARRAY_SIZE(tegra124_mc_resets),
.icc_ops = &tegra124_mc_icc_ops,
+ .ops = &tegra30_mc_ops,
};
#endif /* CONFIG_ARCH_TEGRA_124_SOC */
@@ -1171,5 +1306,6 @@ const struct tegra_mc_soc tegra132_mc_soc = {
.resets = tegra124_mc_resets,
.num_resets = ARRAY_SIZE(tegra124_mc_resets),
.icc_ops = &tegra124_mc_icc_ops,
+ .ops = &tegra30_mc_ops,
};
#endif /* CONFIG_ARCH_TEGRA_132_SOC */
diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
index e25c954dde2e..e65eac5764d4 100644
--- a/drivers/memory/tegra/tegra186.c
+++ b/drivers/memory/tegra/tegra186.c
@@ -1,1605 +1,878 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2017-2021 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <soc/tegra/mc.h>
+
#if defined(CONFIG_ARCH_TEGRA_186_SOC)
#include <dt-bindings/memory/tegra186-mc.h>
#endif
-#if defined(CONFIG_ARCH_TEGRA_194_SOC)
-#include <dt-bindings/memory/tegra194-mc.h>
-#endif
-
-struct tegra186_mc_client {
- const char *name;
- unsigned int sid;
- struct {
- unsigned int override;
- unsigned int security;
- } regs;
-};
-
-struct tegra186_mc_soc {
- const struct tegra186_mc_client *clients;
- unsigned int num_clients;
-};
-
-struct tegra186_mc {
- struct device *dev;
- void __iomem *regs;
-
- const struct tegra186_mc_soc *soc;
-};
+#define MC_SID_STREAMID_OVERRIDE_MASK GENMASK(7, 0)
+#define MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED BIT(16)
+#define MC_SID_STREAMID_SECURITY_OVERRIDE BIT(8)
-static void tegra186_mc_program_sid(struct tegra186_mc *mc)
+static void tegra186_mc_program_sid(struct tegra_mc *mc)
{
unsigned int i;
for (i = 0; i < mc->soc->num_clients; i++) {
- const struct tegra186_mc_client *client = &mc->soc->clients[i];
+ const struct tegra_mc_client *client = &mc->soc->clients[i];
u32 override, security;
- override = readl(mc->regs + client->regs.override);
- security = readl(mc->regs + client->regs.security);
+ override = readl(mc->regs + client->regs.sid.override);
+ security = readl(mc->regs + client->regs.sid.security);
dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
client->name, override, security);
dev_dbg(mc->dev, "setting SID %u for %s\n", client->sid,
client->name);
- writel(client->sid, mc->regs + client->regs.override);
+ writel(client->sid, mc->regs + client->regs.sid.override);
- override = readl(mc->regs + client->regs.override);
- security = readl(mc->regs + client->regs.security);
+ override = readl(mc->regs + client->regs.sid.override);
+ security = readl(mc->regs + client->regs.sid.security);
dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
client->name, override, security);
}
}
+static int tegra186_mc_probe(struct tegra_mc *mc)
+{
+ int err;
+
+ err = of_platform_populate(mc->dev->of_node, NULL, NULL, mc->dev);
+ if (err < 0)
+ return err;
+
+ tegra186_mc_program_sid(mc);
+
+ return 0;
+}
+
+static void tegra186_mc_remove(struct tegra_mc *mc)
+{
+ of_platform_depopulate(mc->dev);
+}
+
+static int tegra186_mc_resume(struct tegra_mc *mc)
+{
+ tegra186_mc_program_sid(mc);
+
+ return 0;
+}
+
+static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
+ const struct tegra_mc_client *client,
+ unsigned int sid)
+{
+ u32 value, old;
+
+ value = readl(mc->regs + client->regs.sid.security);
+ if ((value & MC_SID_STREAMID_SECURITY_OVERRIDE) == 0) {
+ /*
+ * If the secure firmware has locked this down the override
+ * for this memory client, there's nothing we can do here.
+ */
+ if (value & MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED)
+ return;
+
+ /*
+ * Otherwise, try to set the override itself. Typically the
+ * secure firmware will never have set this configuration.
+ * Instead, it will either have disabled write access to
+ * this field, or it will already have set an explicit
+ * override itself.
+ */
+ WARN_ON((value & MC_SID_STREAMID_SECURITY_OVERRIDE) == 0);
+
+ value |= MC_SID_STREAMID_SECURITY_OVERRIDE;
+ writel(value, mc->regs + client->regs.sid.security);
+ }
+
+ value = readl(mc->regs + client->regs.sid.override);
+ old = value & MC_SID_STREAMID_OVERRIDE_MASK;
+
+ if (old != sid) {
+ dev_dbg(mc->dev, "overriding SID %x for %s with %x\n", old,
+ client->name, sid);
+ writel(sid, mc->regs + client->regs.sid.override);
+ }
+}
+
+static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API)
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct of_phandle_args args;
+ unsigned int i, index = 0;
+
+ while (!of_parse_phandle_with_args(dev->of_node, "interconnects", "#interconnect-cells",
+ index, &args)) {
+ if (args.np == mc->dev->of_node && args.args_count != 0) {
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ const struct tegra_mc_client *client = &mc->soc->clients[i];
+
+ if (client->id == args.args[0]) {
+ u32 sid = fwspec->ids[0] & MC_SID_STREAMID_OVERRIDE_MASK;
+
+ tegra186_mc_client_sid_override(mc, client, sid);
+ }
+ }
+ }
+
+ index++;
+ }
+#endif
+
+ return 0;
+}
+
+const struct tegra_mc_ops tegra186_mc_ops = {
+ .probe = tegra186_mc_probe,
+ .remove = tegra186_mc_remove,
+ .resume = tegra186_mc_resume,
+ .probe_device = tegra186_mc_probe_device,
+};
+
#if defined(CONFIG_ARCH_TEGRA_186_SOC)
-static const struct tegra186_mc_client tegra186_mc_clients[] = {
+static const struct tegra_mc_client tegra186_mc_clients[] = {
{
+ .id = TEGRA186_MEMORY_CLIENT_PTCR,
.name = "ptcr",
.sid = TEGRA186_SID_PASSTHROUGH,
.regs = {
- .override = 0x000,
- .security = 0x004,
+ .sid = {
+ .override = 0x000,
+ .security = 0x004,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_AFIR,
.name = "afir",
.sid = TEGRA186_SID_AFI,
.regs = {
- .override = 0x070,
- .security = 0x074,
+ .sid = {
+ .override = 0x070,
+ .security = 0x074,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_HDAR,
.name = "hdar",
.sid = TEGRA186_SID_HDA,
.regs = {
- .override = 0x0a8,
- .security = 0x0ac,
+ .sid = {
+ .override = 0x0a8,
+ .security = 0x0ac,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_HOST1XDMAR,
.name = "host1xdmar",
.sid = TEGRA186_SID_HOST1X,
.regs = {
- .override = 0x0b0,
- .security = 0x0b4,
+ .sid = {
+ .override = 0x0b0,
+ .security = 0x0b4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_NVENCSRD,
.name = "nvencsrd",
.sid = TEGRA186_SID_NVENC,
.regs = {
- .override = 0x0e0,
- .security = 0x0e4,
+ .sid = {
+ .override = 0x0e0,
+ .security = 0x0e4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SATAR,
.name = "satar",
.sid = TEGRA186_SID_SATA,
.regs = {
- .override = 0x0f8,
- .security = 0x0fc,
+ .sid = {
+ .override = 0x0f8,
+ .security = 0x0fc,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_MPCORER,
.name = "mpcorer",
.sid = TEGRA186_SID_PASSTHROUGH,
.regs = {
- .override = 0x138,
- .security = 0x13c,
+ .sid = {
+ .override = 0x138,
+ .security = 0x13c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_NVENCSWR,
.name = "nvencswr",
.sid = TEGRA186_SID_NVENC,
.regs = {
- .override = 0x158,
- .security = 0x15c,
+ .sid = {
+ .override = 0x158,
+ .security = 0x15c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_AFIW,
.name = "afiw",
.sid = TEGRA186_SID_AFI,
.regs = {
- .override = 0x188,
- .security = 0x18c,
+ .sid = {
+ .override = 0x188,
+ .security = 0x18c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_HDAW,
.name = "hdaw",
.sid = TEGRA186_SID_HDA,
.regs = {
- .override = 0x1a8,
- .security = 0x1ac,
+ .sid = {
+ .override = 0x1a8,
+ .security = 0x1ac,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_MPCOREW,
.name = "mpcorew",
.sid = TEGRA186_SID_PASSTHROUGH,
.regs = {
- .override = 0x1c8,
- .security = 0x1cc,
+ .sid = {
+ .override = 0x1c8,
+ .security = 0x1cc,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SATAW,
.name = "sataw",
.sid = TEGRA186_SID_SATA,
.regs = {
- .override = 0x1e8,
- .security = 0x1ec,
+ .sid = {
+ .override = 0x1e8,
+ .security = 0x1ec,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_ISPRA,
.name = "ispra",
.sid = TEGRA186_SID_ISP,
.regs = {
- .override = 0x220,
- .security = 0x224,
+ .sid = {
+ .override = 0x220,
+ .security = 0x224,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_ISPWA,
.name = "ispwa",
.sid = TEGRA186_SID_ISP,
.regs = {
- .override = 0x230,
- .security = 0x234,
+ .sid = {
+ .override = 0x230,
+ .security = 0x234,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_ISPWB,
.name = "ispwb",
.sid = TEGRA186_SID_ISP,
.regs = {
- .override = 0x238,
- .security = 0x23c,
+ .sid = {
+ .override = 0x238,
+ .security = 0x23c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_XUSB_HOSTR,
.name = "xusb_hostr",
.sid = TEGRA186_SID_XUSB_HOST,
.regs = {
- .override = 0x250,
- .security = 0x254,
+ .sid = {
+ .override = 0x250,
+ .security = 0x254,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_XUSB_HOSTW,
.name = "xusb_hostw",
.sid = TEGRA186_SID_XUSB_HOST,
.regs = {
- .override = 0x258,
- .security = 0x25c,
+ .sid = {
+ .override = 0x258,
+ .security = 0x25c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_XUSB_DEVR,
.name = "xusb_devr",
.sid = TEGRA186_SID_XUSB_DEV,
.regs = {
- .override = 0x260,
- .security = 0x264,
+ .sid = {
+ .override = 0x260,
+ .security = 0x264,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_XUSB_DEVW,
.name = "xusb_devw",
.sid = TEGRA186_SID_XUSB_DEV,
.regs = {
- .override = 0x268,
- .security = 0x26c,
+ .sid = {
+ .override = 0x268,
+ .security = 0x26c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_TSECSRD,
.name = "tsecsrd",
.sid = TEGRA186_SID_TSEC,
.regs = {
- .override = 0x2a0,
- .security = 0x2a4,
+ .sid = {
+ .override = 0x2a0,
+ .security = 0x2a4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_TSECSWR,
.name = "tsecswr",
.sid = TEGRA186_SID_TSEC,
.regs = {
- .override = 0x2a8,
- .security = 0x2ac,
+ .sid = {
+ .override = 0x2a8,
+ .security = 0x2ac,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_GPUSRD,
.name = "gpusrd",
.sid = TEGRA186_SID_GPU,
.regs = {
- .override = 0x2c0,
- .security = 0x2c4,
+ .sid = {
+ .override = 0x2c0,
+ .security = 0x2c4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_GPUSWR,
.name = "gpuswr",
.sid = TEGRA186_SID_GPU,
.regs = {
- .override = 0x2c8,
- .security = 0x2cc,
+ .sid = {
+ .override = 0x2c8,
+ .security = 0x2cc,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCRA,
.name = "sdmmcra",
.sid = TEGRA186_SID_SDMMC1,
.regs = {
- .override = 0x300,
- .security = 0x304,
+ .sid = {
+ .override = 0x300,
+ .security = 0x304,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCRAA,
.name = "sdmmcraa",
.sid = TEGRA186_SID_SDMMC2,
.regs = {
- .override = 0x308,
- .security = 0x30c,
+ .sid = {
+ .override = 0x308,
+ .security = 0x30c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCR,
.name = "sdmmcr",
.sid = TEGRA186_SID_SDMMC3,
.regs = {
- .override = 0x310,
- .security = 0x314,
+ .sid = {
+ .override = 0x310,
+ .security = 0x314,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCRAB,
.name = "sdmmcrab",
.sid = TEGRA186_SID_SDMMC4,
.regs = {
- .override = 0x318,
- .security = 0x31c,
+ .sid = {
+ .override = 0x318,
+ .security = 0x31c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCWA,
.name = "sdmmcwa",
.sid = TEGRA186_SID_SDMMC1,
.regs = {
- .override = 0x320,
- .security = 0x324,
+ .sid = {
+ .override = 0x320,
+ .security = 0x324,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCWAA,
.name = "sdmmcwaa",
.sid = TEGRA186_SID_SDMMC2,
.regs = {
- .override = 0x328,
- .security = 0x32c,
+ .sid = {
+ .override = 0x328,
+ .security = 0x32c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCW,
.name = "sdmmcw",
.sid = TEGRA186_SID_SDMMC3,
.regs = {
- .override = 0x330,
- .security = 0x334,
+ .sid = {
+ .override = 0x330,
+ .security = 0x334,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCWAB,
.name = "sdmmcwab",
.sid = TEGRA186_SID_SDMMC4,
.regs = {
- .override = 0x338,
- .security = 0x33c,
+ .sid = {
+ .override = 0x338,
+ .security = 0x33c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_VICSRD,
.name = "vicsrd",
.sid = TEGRA186_SID_VIC,
.regs = {
- .override = 0x360,
- .security = 0x364,
+ .sid = {
+ .override = 0x360,
+ .security = 0x364,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_VICSWR,
.name = "vicswr",
.sid = TEGRA186_SID_VIC,
.regs = {
- .override = 0x368,
- .security = 0x36c,
+ .sid = {
+ .override = 0x368,
+ .security = 0x36c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_VIW,
.name = "viw",
.sid = TEGRA186_SID_VI,
.regs = {
- .override = 0x390,
- .security = 0x394,
+ .sid = {
+ .override = 0x390,
+ .security = 0x394,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDECSRD,
.name = "nvdecsrd",
.sid = TEGRA186_SID_NVDEC,
.regs = {
- .override = 0x3c0,
- .security = 0x3c4,
+ .sid = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDECSWR,
.name = "nvdecswr",
.sid = TEGRA186_SID_NVDEC,
.regs = {
- .override = 0x3c8,
- .security = 0x3cc,
+ .sid = {
+ .override = 0x3c8,
+ .security = 0x3cc,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_APER,
.name = "aper",
.sid = TEGRA186_SID_APE,
.regs = {
- .override = 0x3d0,
- .security = 0x3d4,
+ .sid = {
+ .override = 0x3d0,
+ .security = 0x3d4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_APEW,
.name = "apew",
.sid = TEGRA186_SID_APE,
.regs = {
- .override = 0x3d8,
- .security = 0x3dc,
+ .sid = {
+ .override = 0x3d8,
+ .security = 0x3dc,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_NVJPGSRD,
.name = "nvjpgsrd",
.sid = TEGRA186_SID_NVJPG,
.regs = {
- .override = 0x3f0,
- .security = 0x3f4,
+ .sid = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_NVJPGSWR,
.name = "nvjpgswr",
.sid = TEGRA186_SID_NVJPG,
.regs = {
- .override = 0x3f8,
- .security = 0x3fc,
+ .sid = {
+ .override = 0x3f8,
+ .security = 0x3fc,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SESRD,
.name = "sesrd",
.sid = TEGRA186_SID_SE,
.regs = {
- .override = 0x400,
- .security = 0x404,
+ .sid = {
+ .override = 0x400,
+ .security = 0x404,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SESWR,
.name = "seswr",
.sid = TEGRA186_SID_SE,
.regs = {
- .override = 0x408,
- .security = 0x40c,
+ .sid = {
+ .override = 0x408,
+ .security = 0x40c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_ETRR,
.name = "etrr",
.sid = TEGRA186_SID_ETR,
.regs = {
- .override = 0x420,
- .security = 0x424,
+ .sid = {
+ .override = 0x420,
+ .security = 0x424,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_ETRW,
.name = "etrw",
.sid = TEGRA186_SID_ETR,
.regs = {
- .override = 0x428,
- .security = 0x42c,
+ .sid = {
+ .override = 0x428,
+ .security = 0x42c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_TSECSRDB,
.name = "tsecsrdb",
.sid = TEGRA186_SID_TSECB,
.regs = {
- .override = 0x430,
- .security = 0x434,
+ .sid = {
+ .override = 0x430,
+ .security = 0x434,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_TSECSWRB,
.name = "tsecswrb",
.sid = TEGRA186_SID_TSECB,
.regs = {
- .override = 0x438,
- .security = 0x43c,
+ .sid = {
+ .override = 0x438,
+ .security = 0x43c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_GPUSRD2,
.name = "gpusrd2",
.sid = TEGRA186_SID_GPU,
.regs = {
- .override = 0x440,
- .security = 0x444,
+ .sid = {
+ .override = 0x440,
+ .security = 0x444,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_GPUSWR2,
.name = "gpuswr2",
.sid = TEGRA186_SID_GPU,
.regs = {
- .override = 0x448,
- .security = 0x44c,
+ .sid = {
+ .override = 0x448,
+ .security = 0x44c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_AXISR,
.name = "axisr",
.sid = TEGRA186_SID_GPCDMA_0,
.regs = {
- .override = 0x460,
- .security = 0x464,
+ .sid = {
+ .override = 0x460,
+ .security = 0x464,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_AXISW,
.name = "axisw",
.sid = TEGRA186_SID_GPCDMA_0,
.regs = {
- .override = 0x468,
- .security = 0x46c,
+ .sid = {
+ .override = 0x468,
+ .security = 0x46c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_EQOSR,
.name = "eqosr",
.sid = TEGRA186_SID_EQOS,
.regs = {
- .override = 0x470,
- .security = 0x474,
+ .sid = {
+ .override = 0x470,
+ .security = 0x474,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_EQOSW,
.name = "eqosw",
.sid = TEGRA186_SID_EQOS,
.regs = {
- .override = 0x478,
- .security = 0x47c,
+ .sid = {
+ .override = 0x478,
+ .security = 0x47c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_UFSHCR,
.name = "ufshcr",
.sid = TEGRA186_SID_UFSHC,
.regs = {
- .override = 0x480,
- .security = 0x484,
+ .sid = {
+ .override = 0x480,
+ .security = 0x484,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_UFSHCW,
.name = "ufshcw",
.sid = TEGRA186_SID_UFSHC,
.regs = {
- .override = 0x488,
- .security = 0x48c,
+ .sid = {
+ .override = 0x488,
+ .security = 0x48c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDISPLAYR,
.name = "nvdisplayr",
.sid = TEGRA186_SID_NVDISPLAY,
.regs = {
- .override = 0x490,
- .security = 0x494,
+ .sid = {
+ .override = 0x490,
+ .security = 0x494,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_BPMPR,
.name = "bpmpr",
.sid = TEGRA186_SID_BPMP,
.regs = {
- .override = 0x498,
- .security = 0x49c,
+ .sid = {
+ .override = 0x498,
+ .security = 0x49c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_BPMPW,
.name = "bpmpw",
.sid = TEGRA186_SID_BPMP,
.regs = {
- .override = 0x4a0,
- .security = 0x4a4,
+ .sid = {
+ .override = 0x4a0,
+ .security = 0x4a4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_BPMPDMAR,
.name = "bpmpdmar",
.sid = TEGRA186_SID_BPMP,
.regs = {
- .override = 0x4a8,
- .security = 0x4ac,
+ .sid = {
+ .override = 0x4a8,
+ .security = 0x4ac,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_BPMPDMAW,
.name = "bpmpdmaw",
.sid = TEGRA186_SID_BPMP,
.regs = {
- .override = 0x4b0,
- .security = 0x4b4,
+ .sid = {
+ .override = 0x4b0,
+ .security = 0x4b4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_AONR,
.name = "aonr",
.sid = TEGRA186_SID_AON,
.regs = {
- .override = 0x4b8,
- .security = 0x4bc,
+ .sid = {
+ .override = 0x4b8,
+ .security = 0x4bc,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_AONW,
.name = "aonw",
.sid = TEGRA186_SID_AON,
.regs = {
- .override = 0x4c0,
- .security = 0x4c4,
+ .sid = {
+ .override = 0x4c0,
+ .security = 0x4c4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_AONDMAR,
.name = "aondmar",
.sid = TEGRA186_SID_AON,
.regs = {
- .override = 0x4c8,
- .security = 0x4cc,
+ .sid = {
+ .override = 0x4c8,
+ .security = 0x4cc,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_AONDMAW,
.name = "aondmaw",
.sid = TEGRA186_SID_AON,
.regs = {
- .override = 0x4d0,
- .security = 0x4d4,
+ .sid = {
+ .override = 0x4d0,
+ .security = 0x4d4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SCER,
.name = "scer",
.sid = TEGRA186_SID_SCE,
.regs = {
- .override = 0x4d8,
- .security = 0x4dc,
+ .sid = {
+ .override = 0x4d8,
+ .security = 0x4dc,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SCEW,
.name = "scew",
.sid = TEGRA186_SID_SCE,
.regs = {
- .override = 0x4e0,
- .security = 0x4e4,
+ .sid = {
+ .override = 0x4e0,
+ .security = 0x4e4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SCEDMAR,
.name = "scedmar",
.sid = TEGRA186_SID_SCE,
.regs = {
- .override = 0x4e8,
- .security = 0x4ec,
+ .sid = {
+ .override = 0x4e8,
+ .security = 0x4ec,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_SCEDMAW,
.name = "scedmaw",
.sid = TEGRA186_SID_SCE,
.regs = {
- .override = 0x4f0,
- .security = 0x4f4,
+ .sid = {
+ .override = 0x4f0,
+ .security = 0x4f4,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_APEDMAR,
.name = "apedmar",
.sid = TEGRA186_SID_APE,
.regs = {
- .override = 0x4f8,
- .security = 0x4fc,
+ .sid = {
+ .override = 0x4f8,
+ .security = 0x4fc,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_APEDMAW,
.name = "apedmaw",
.sid = TEGRA186_SID_APE,
.regs = {
- .override = 0x500,
- .security = 0x504,
+ .sid = {
+ .override = 0x500,
+ .security = 0x504,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDISPLAYR1,
.name = "nvdisplayr1",
.sid = TEGRA186_SID_NVDISPLAY,
.regs = {
- .override = 0x508,
- .security = 0x50c,
+ .sid = {
+ .override = 0x508,
+ .security = 0x50c,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_VICSRD1,
.name = "vicsrd1",
.sid = TEGRA186_SID_VIC,
.regs = {
- .override = 0x510,
- .security = 0x514,
+ .sid = {
+ .override = 0x510,
+ .security = 0x514,
+ },
},
}, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDECSRD1,
.name = "nvdecsrd1",
.sid = TEGRA186_SID_NVDEC,
.regs = {
- .override = 0x518,
- .security = 0x51c,
+ .sid = {
+ .override = 0x518,
+ .security = 0x51c,
+ },
},
},
};
-static const struct tegra186_mc_soc tegra186_mc_soc = {
+const struct tegra_mc_soc tegra186_mc_soc = {
.num_clients = ARRAY_SIZE(tegra186_mc_clients),
.clients = tegra186_mc_clients,
+ .num_address_bits = 40,
+ .ops = &tegra186_mc_ops,
};
#endif
-
-#if defined(CONFIG_ARCH_TEGRA_194_SOC)
-static const struct tegra186_mc_client tegra194_mc_clients[] = {
- {
- .name = "ptcr",
- .sid = TEGRA194_SID_PASSTHROUGH,
- .regs = {
- .override = 0x000,
- .security = 0x004,
- },
- }, {
- .name = "miu7r",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x008,
- .security = 0x00c,
- },
- }, {
- .name = "miu7w",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x010,
- .security = 0x014,
- },
- }, {
- .name = "hdar",
- .sid = TEGRA194_SID_HDA,
- .regs = {
- .override = 0x0a8,
- .security = 0x0ac,
- },
- }, {
- .name = "host1xdmar",
- .sid = TEGRA194_SID_HOST1X,
- .regs = {
- .override = 0x0b0,
- .security = 0x0b4,
- },
- }, {
- .name = "nvencsrd",
- .sid = TEGRA194_SID_NVENC,
- .regs = {
- .override = 0x0e0,
- .security = 0x0e4,
- },
- }, {
- .name = "satar",
- .sid = TEGRA194_SID_SATA,
- .regs = {
- .override = 0x0f8,
- .security = 0x0fc,
- },
- }, {
- .name = "mpcorer",
- .sid = TEGRA194_SID_PASSTHROUGH,
- .regs = {
- .override = 0x138,
- .security = 0x13c,
- },
- }, {
- .name = "nvencswr",
- .sid = TEGRA194_SID_NVENC,
- .regs = {
- .override = 0x158,
- .security = 0x15c,
- },
- }, {
- .name = "hdaw",
- .sid = TEGRA194_SID_HDA,
- .regs = {
- .override = 0x1a8,
- .security = 0x1ac,
- },
- }, {
- .name = "mpcorew",
- .sid = TEGRA194_SID_PASSTHROUGH,
- .regs = {
- .override = 0x1c8,
- .security = 0x1cc,
- },
- }, {
- .name = "sataw",
- .sid = TEGRA194_SID_SATA,
- .regs = {
- .override = 0x1e8,
- .security = 0x1ec,
- },
- }, {
- .name = "ispra",
- .sid = TEGRA194_SID_ISP,
- .regs = {
- .override = 0x220,
- .security = 0x224,
- },
- }, {
- .name = "ispfalr",
- .sid = TEGRA194_SID_ISP_FALCON,
- .regs = {
- .override = 0x228,
- .security = 0x22c,
- },
- }, {
- .name = "ispwa",
- .sid = TEGRA194_SID_ISP,
- .regs = {
- .override = 0x230,
- .security = 0x234,
- },
- }, {
- .name = "ispwb",
- .sid = TEGRA194_SID_ISP,
- .regs = {
- .override = 0x238,
- .security = 0x23c,
- },
- }, {
- .name = "xusb_hostr",
- .sid = TEGRA194_SID_XUSB_HOST,
- .regs = {
- .override = 0x250,
- .security = 0x254,
- },
- }, {
- .name = "xusb_hostw",
- .sid = TEGRA194_SID_XUSB_HOST,
- .regs = {
- .override = 0x258,
- .security = 0x25c,
- },
- }, {
- .name = "xusb_devr",
- .sid = TEGRA194_SID_XUSB_DEV,
- .regs = {
- .override = 0x260,
- .security = 0x264,
- },
- }, {
- .name = "xusb_devw",
- .sid = TEGRA194_SID_XUSB_DEV,
- .regs = {
- .override = 0x268,
- .security = 0x26c,
- },
- }, {
- .name = "sdmmcra",
- .sid = TEGRA194_SID_SDMMC1,
- .regs = {
- .override = 0x300,
- .security = 0x304,
- },
- }, {
- .name = "sdmmcr",
- .sid = TEGRA194_SID_SDMMC3,
- .regs = {
- .override = 0x310,
- .security = 0x314,
- },
- }, {
- .name = "sdmmcrab",
- .sid = TEGRA194_SID_SDMMC4,
- .regs = {
- .override = 0x318,
- .security = 0x31c,
- },
- }, {
- .name = "sdmmcwa",
- .sid = TEGRA194_SID_SDMMC1,
- .regs = {
- .override = 0x320,
- .security = 0x324,
- },
- }, {
- .name = "sdmmcw",
- .sid = TEGRA194_SID_SDMMC3,
- .regs = {
- .override = 0x330,
- .security = 0x334,
- },
- }, {
- .name = "sdmmcwab",
- .sid = TEGRA194_SID_SDMMC4,
- .regs = {
- .override = 0x338,
- .security = 0x33c,
- },
- }, {
- .name = "vicsrd",
- .sid = TEGRA194_SID_VIC,
- .regs = {
- .override = 0x360,
- .security = 0x364,
- },
- }, {
- .name = "vicswr",
- .sid = TEGRA194_SID_VIC,
- .regs = {
- .override = 0x368,
- .security = 0x36c,
- },
- }, {
- .name = "viw",
- .sid = TEGRA194_SID_VI,
- .regs = {
- .override = 0x390,
- .security = 0x394,
- },
- }, {
- .name = "nvdecsrd",
- .sid = TEGRA194_SID_NVDEC,
- .regs = {
- .override = 0x3c0,
- .security = 0x3c4,
- },
- }, {
- .name = "nvdecswr",
- .sid = TEGRA194_SID_NVDEC,
- .regs = {
- .override = 0x3c8,
- .security = 0x3cc,
- },
- }, {
- .name = "aper",
- .sid = TEGRA194_SID_APE,
- .regs = {
- .override = 0x3c0,
- .security = 0x3c4,
- },
- }, {
- .name = "apew",
- .sid = TEGRA194_SID_APE,
- .regs = {
- .override = 0x3d0,
- .security = 0x3d4,
- },
- }, {
- .name = "nvjpgsrd",
- .sid = TEGRA194_SID_NVJPG,
- .regs = {
- .override = 0x3f0,
- .security = 0x3f4,
- },
- }, {
- .name = "nvjpgswr",
- .sid = TEGRA194_SID_NVJPG,
- .regs = {
- .override = 0x3f0,
- .security = 0x3f4,
- },
- }, {
- .name = "axiapr",
- .sid = TEGRA194_SID_PASSTHROUGH,
- .regs = {
- .override = 0x410,
- .security = 0x414,
- },
- }, {
- .name = "axiapw",
- .sid = TEGRA194_SID_PASSTHROUGH,
- .regs = {
- .override = 0x418,
- .security = 0x41c,
- },
- }, {
- .name = "etrr",
- .sid = TEGRA194_SID_ETR,
- .regs = {
- .override = 0x420,
- .security = 0x424,
- },
- }, {
- .name = "etrw",
- .sid = TEGRA194_SID_ETR,
- .regs = {
- .override = 0x428,
- .security = 0x42c,
- },
- }, {
- .name = "axisr",
- .sid = TEGRA194_SID_PASSTHROUGH,
- .regs = {
- .override = 0x460,
- .security = 0x464,
- },
- }, {
- .name = "axisw",
- .sid = TEGRA194_SID_PASSTHROUGH,
- .regs = {
- .override = 0x468,
- .security = 0x46c,
- },
- }, {
- .name = "eqosr",
- .sid = TEGRA194_SID_EQOS,
- .regs = {
- .override = 0x470,
- .security = 0x474,
- },
- }, {
- .name = "eqosw",
- .sid = TEGRA194_SID_EQOS,
- .regs = {
- .override = 0x478,
- .security = 0x47c,
- },
- }, {
- .name = "ufshcr",
- .sid = TEGRA194_SID_UFSHC,
- .regs = {
- .override = 0x480,
- .security = 0x484,
- },
- }, {
- .name = "ufshcw",
- .sid = TEGRA194_SID_UFSHC,
- .regs = {
- .override = 0x488,
- .security = 0x48c,
- },
- }, {
- .name = "nvdisplayr",
- .sid = TEGRA194_SID_NVDISPLAY,
- .regs = {
- .override = 0x490,
- .security = 0x494,
- },
- }, {
- .name = "bpmpr",
- .sid = TEGRA194_SID_BPMP,
- .regs = {
- .override = 0x498,
- .security = 0x49c,
- },
- }, {
- .name = "bpmpw",
- .sid = TEGRA194_SID_BPMP,
- .regs = {
- .override = 0x4a0,
- .security = 0x4a4,
- },
- }, {
- .name = "bpmpdmar",
- .sid = TEGRA194_SID_BPMP,
- .regs = {
- .override = 0x4a8,
- .security = 0x4ac,
- },
- }, {
- .name = "bpmpdmaw",
- .sid = TEGRA194_SID_BPMP,
- .regs = {
- .override = 0x4b0,
- .security = 0x4b4,
- },
- }, {
- .name = "aonr",
- .sid = TEGRA194_SID_AON,
- .regs = {
- .override = 0x4b8,
- .security = 0x4bc,
- },
- }, {
- .name = "aonw",
- .sid = TEGRA194_SID_AON,
- .regs = {
- .override = 0x4c0,
- .security = 0x4c4,
- },
- }, {
- .name = "aondmar",
- .sid = TEGRA194_SID_AON,
- .regs = {
- .override = 0x4c8,
- .security = 0x4cc,
- },
- }, {
- .name = "aondmaw",
- .sid = TEGRA194_SID_AON,
- .regs = {
- .override = 0x4d0,
- .security = 0x4d4,
- },
- }, {
- .name = "scer",
- .sid = TEGRA194_SID_SCE,
- .regs = {
- .override = 0x4d8,
- .security = 0x4dc,
- },
- }, {
- .name = "scew",
- .sid = TEGRA194_SID_SCE,
- .regs = {
- .override = 0x4e0,
- .security = 0x4e4,
- },
- }, {
- .name = "scedmar",
- .sid = TEGRA194_SID_SCE,
- .regs = {
- .override = 0x4e8,
- .security = 0x4ec,
- },
- }, {
- .name = "scedmaw",
- .sid = TEGRA194_SID_SCE,
- .regs = {
- .override = 0x4f0,
- .security = 0x4f4,
- },
- }, {
- .name = "apedmar",
- .sid = TEGRA194_SID_APE,
- .regs = {
- .override = 0x4f8,
- .security = 0x4fc,
- },
- }, {
- .name = "apedmaw",
- .sid = TEGRA194_SID_APE,
- .regs = {
- .override = 0x500,
- .security = 0x504,
- },
- }, {
- .name = "nvdisplayr1",
- .sid = TEGRA194_SID_NVDISPLAY,
- .regs = {
- .override = 0x508,
- .security = 0x50c,
- },
- }, {
- .name = "vicsrd1",
- .sid = TEGRA194_SID_VIC,
- .regs = {
- .override = 0x510,
- .security = 0x514,
- },
- }, {
- .name = "nvdecsrd1",
- .sid = TEGRA194_SID_NVDEC,
- .regs = {
- .override = 0x518,
- .security = 0x51c,
- },
- }, {
- .name = "miu0r",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x530,
- .security = 0x534,
- },
- }, {
- .name = "miu0w",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x538,
- .security = 0x53c,
- },
- }, {
- .name = "miu1r",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x540,
- .security = 0x544,
- },
- }, {
- .name = "miu1w",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x548,
- .security = 0x54c,
- },
- }, {
- .name = "miu2r",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x570,
- .security = 0x574,
- },
- }, {
- .name = "miu2w",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x578,
- .security = 0x57c,
- },
- }, {
- .name = "miu3r",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x580,
- .security = 0x584,
- },
- }, {
- .name = "miu3w",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x588,
- .security = 0x58c,
- },
- }, {
- .name = "miu4r",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x590,
- .security = 0x594,
- },
- }, {
- .name = "miu4w",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x598,
- .security = 0x59c,
- },
- }, {
- .name = "dpmur",
- .sid = TEGRA194_SID_PASSTHROUGH,
- .regs = {
- .override = 0x598,
- .security = 0x59c,
- },
- }, {
- .name = "vifalr",
- .sid = TEGRA194_SID_VI_FALCON,
- .regs = {
- .override = 0x5e0,
- .security = 0x5e4,
- },
- }, {
- .name = "vifalw",
- .sid = TEGRA194_SID_VI_FALCON,
- .regs = {
- .override = 0x5e8,
- .security = 0x5ec,
- },
- }, {
- .name = "dla0rda",
- .sid = TEGRA194_SID_NVDLA0,
- .regs = {
- .override = 0x5f0,
- .security = 0x5f4,
- },
- }, {
- .name = "dla0falrdb",
- .sid = TEGRA194_SID_NVDLA0,
- .regs = {
- .override = 0x5f8,
- .security = 0x5fc,
- },
- }, {
- .name = "dla0wra",
- .sid = TEGRA194_SID_NVDLA0,
- .regs = {
- .override = 0x600,
- .security = 0x604,
- },
- }, {
- .name = "dla0falwrb",
- .sid = TEGRA194_SID_NVDLA0,
- .regs = {
- .override = 0x608,
- .security = 0x60c,
- },
- }, {
- .name = "dla1rda",
- .sid = TEGRA194_SID_NVDLA1,
- .regs = {
- .override = 0x610,
- .security = 0x614,
- },
- }, {
- .name = "dla1falrdb",
- .sid = TEGRA194_SID_NVDLA1,
- .regs = {
- .override = 0x618,
- .security = 0x61c,
- },
- }, {
- .name = "dla1wra",
- .sid = TEGRA194_SID_NVDLA1,
- .regs = {
- .override = 0x620,
- .security = 0x624,
- },
- }, {
- .name = "dla1falwrb",
- .sid = TEGRA194_SID_NVDLA1,
- .regs = {
- .override = 0x628,
- .security = 0x62c,
- },
- }, {
- .name = "pva0rda",
- .sid = TEGRA194_SID_PVA0,
- .regs = {
- .override = 0x630,
- .security = 0x634,
- },
- }, {
- .name = "pva0rdb",
- .sid = TEGRA194_SID_PVA0,
- .regs = {
- .override = 0x638,
- .security = 0x63c,
- },
- }, {
- .name = "pva0rdc",
- .sid = TEGRA194_SID_PVA0,
- .regs = {
- .override = 0x640,
- .security = 0x644,
- },
- }, {
- .name = "pva0wra",
- .sid = TEGRA194_SID_PVA0,
- .regs = {
- .override = 0x648,
- .security = 0x64c,
- },
- }, {
- .name = "pva0wrb",
- .sid = TEGRA194_SID_PVA0,
- .regs = {
- .override = 0x650,
- .security = 0x654,
- },
- }, {
- .name = "pva0wrc",
- .sid = TEGRA194_SID_PVA0,
- .regs = {
- .override = 0x658,
- .security = 0x65c,
- },
- }, {
- .name = "pva1rda",
- .sid = TEGRA194_SID_PVA1,
- .regs = {
- .override = 0x660,
- .security = 0x664,
- },
- }, {
- .name = "pva1rdb",
- .sid = TEGRA194_SID_PVA1,
- .regs = {
- .override = 0x668,
- .security = 0x66c,
- },
- }, {
- .name = "pva1rdc",
- .sid = TEGRA194_SID_PVA1,
- .regs = {
- .override = 0x670,
- .security = 0x674,
- },
- }, {
- .name = "pva1wra",
- .sid = TEGRA194_SID_PVA1,
- .regs = {
- .override = 0x678,
- .security = 0x67c,
- },
- }, {
- .name = "pva1wrb",
- .sid = TEGRA194_SID_PVA1,
- .regs = {
- .override = 0x680,
- .security = 0x684,
- },
- }, {
- .name = "pva1wrc",
- .sid = TEGRA194_SID_PVA1,
- .regs = {
- .override = 0x688,
- .security = 0x68c,
- },
- }, {
- .name = "rcer",
- .sid = TEGRA194_SID_RCE,
- .regs = {
- .override = 0x690,
- .security = 0x694,
- },
- }, {
- .name = "rcew",
- .sid = TEGRA194_SID_RCE,
- .regs = {
- .override = 0x698,
- .security = 0x69c,
- },
- }, {
- .name = "rcedmar",
- .sid = TEGRA194_SID_RCE,
- .regs = {
- .override = 0x6a0,
- .security = 0x6a4,
- },
- }, {
- .name = "rcedmaw",
- .sid = TEGRA194_SID_RCE,
- .regs = {
- .override = 0x6a8,
- .security = 0x6ac,
- },
- }, {
- .name = "nvenc1srd",
- .sid = TEGRA194_SID_NVENC1,
- .regs = {
- .override = 0x6b0,
- .security = 0x6b4,
- },
- }, {
- .name = "nvenc1swr",
- .sid = TEGRA194_SID_NVENC1,
- .regs = {
- .override = 0x6b8,
- .security = 0x6bc,
- },
- }, {
- .name = "pcie0r",
- .sid = TEGRA194_SID_PCIE0,
- .regs = {
- .override = 0x6c0,
- .security = 0x6c4,
- },
- }, {
- .name = "pcie0w",
- .sid = TEGRA194_SID_PCIE0,
- .regs = {
- .override = 0x6c8,
- .security = 0x6cc,
- },
- }, {
- .name = "pcie1r",
- .sid = TEGRA194_SID_PCIE1,
- .regs = {
- .override = 0x6d0,
- .security = 0x6d4,
- },
- }, {
- .name = "pcie1w",
- .sid = TEGRA194_SID_PCIE1,
- .regs = {
- .override = 0x6d8,
- .security = 0x6dc,
- },
- }, {
- .name = "pcie2ar",
- .sid = TEGRA194_SID_PCIE2,
- .regs = {
- .override = 0x6e0,
- .security = 0x6e4,
- },
- }, {
- .name = "pcie2aw",
- .sid = TEGRA194_SID_PCIE2,
- .regs = {
- .override = 0x6e8,
- .security = 0x6ec,
- },
- }, {
- .name = "pcie3r",
- .sid = TEGRA194_SID_PCIE3,
- .regs = {
- .override = 0x6f0,
- .security = 0x6f4,
- },
- }, {
- .name = "pcie3w",
- .sid = TEGRA194_SID_PCIE3,
- .regs = {
- .override = 0x6f8,
- .security = 0x6fc,
- },
- }, {
- .name = "pcie4r",
- .sid = TEGRA194_SID_PCIE4,
- .regs = {
- .override = 0x700,
- .security = 0x704,
- },
- }, {
- .name = "pcie4w",
- .sid = TEGRA194_SID_PCIE4,
- .regs = {
- .override = 0x708,
- .security = 0x70c,
- },
- }, {
- .name = "pcie5r",
- .sid = TEGRA194_SID_PCIE5,
- .regs = {
- .override = 0x710,
- .security = 0x714,
- },
- }, {
- .name = "pcie5w",
- .sid = TEGRA194_SID_PCIE5,
- .regs = {
- .override = 0x718,
- .security = 0x71c,
- },
- }, {
- .name = "ispfalw",
- .sid = TEGRA194_SID_ISP_FALCON,
- .regs = {
- .override = 0x720,
- .security = 0x724,
- },
- }, {
- .name = "dla0rda1",
- .sid = TEGRA194_SID_NVDLA0,
- .regs = {
- .override = 0x748,
- .security = 0x74c,
- },
- }, {
- .name = "dla1rda1",
- .sid = TEGRA194_SID_NVDLA1,
- .regs = {
- .override = 0x750,
- .security = 0x754,
- },
- }, {
- .name = "pva0rda1",
- .sid = TEGRA194_SID_PVA0,
- .regs = {
- .override = 0x758,
- .security = 0x75c,
- },
- }, {
- .name = "pva0rdb1",
- .sid = TEGRA194_SID_PVA0,
- .regs = {
- .override = 0x760,
- .security = 0x764,
- },
- }, {
- .name = "pva1rda1",
- .sid = TEGRA194_SID_PVA1,
- .regs = {
- .override = 0x768,
- .security = 0x76c,
- },
- }, {
- .name = "pva1rdb1",
- .sid = TEGRA194_SID_PVA1,
- .regs = {
- .override = 0x770,
- .security = 0x774,
- },
- }, {
- .name = "pcie5r1",
- .sid = TEGRA194_SID_PCIE5,
- .regs = {
- .override = 0x778,
- .security = 0x77c,
- },
- }, {
- .name = "nvencsrd1",
- .sid = TEGRA194_SID_NVENC,
- .regs = {
- .override = 0x780,
- .security = 0x784,
- },
- }, {
- .name = "nvenc1srd1",
- .sid = TEGRA194_SID_NVENC1,
- .regs = {
- .override = 0x788,
- .security = 0x78c,
- },
- }, {
- .name = "ispra1",
- .sid = TEGRA194_SID_ISP,
- .regs = {
- .override = 0x790,
- .security = 0x794,
- },
- }, {
- .name = "pcie0r1",
- .sid = TEGRA194_SID_PCIE0,
- .regs = {
- .override = 0x798,
- .security = 0x79c,
- },
- }, {
- .name = "nvdec1srd",
- .sid = TEGRA194_SID_NVDEC1,
- .regs = {
- .override = 0x7c8,
- .security = 0x7cc,
- },
- }, {
- .name = "nvdec1srd1",
- .sid = TEGRA194_SID_NVDEC1,
- .regs = {
- .override = 0x7d0,
- .security = 0x7d4,
- },
- }, {
- .name = "nvdec1swr",
- .sid = TEGRA194_SID_NVDEC1,
- .regs = {
- .override = 0x7d8,
- .security = 0x7dc,
- },
- }, {
- .name = "miu5r",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x7e0,
- .security = 0x7e4,
- },
- }, {
- .name = "miu5w",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x7e8,
- .security = 0x7ec,
- },
- }, {
- .name = "miu6r",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x7f0,
- .security = 0x7f4,
- },
- }, {
- .name = "miu6w",
- .sid = TEGRA194_SID_MIU,
- .regs = {
- .override = 0x7f8,
- .security = 0x7fc,
- },
- },
-};
-
-static const struct tegra186_mc_soc tegra194_mc_soc = {
- .num_clients = ARRAY_SIZE(tegra194_mc_clients),
- .clients = tegra194_mc_clients,
-};
-#endif
-
-static int tegra186_mc_probe(struct platform_device *pdev)
-{
- struct tegra186_mc *mc;
- struct resource *res;
- int err;
-
- mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
- if (!mc)
- return -ENOMEM;
-
- mc->soc = of_device_get_match_data(&pdev->dev);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mc->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mc->regs))
- return PTR_ERR(mc->regs);
-
- mc->dev = &pdev->dev;
-
- err = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
- if (err < 0)
- return err;
-
- platform_set_drvdata(pdev, mc);
- tegra186_mc_program_sid(mc);
-
- return 0;
-}
-
-static int tegra186_mc_remove(struct platform_device *pdev)
-{
- struct tegra186_mc *mc = platform_get_drvdata(pdev);
-
- of_platform_depopulate(mc->dev);
-
- return 0;
-}
-
-static const struct of_device_id tegra186_mc_of_match[] = {
-#if defined(CONFIG_ARCH_TEGRA_186_SOC)
- { .compatible = "nvidia,tegra186-mc", .data = &tegra186_mc_soc },
-#endif
-#if defined(CONFIG_ARCH_TEGRA_194_SOC)
- { .compatible = "nvidia,tegra194-mc", .data = &tegra194_mc_soc },
-#endif
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, tegra186_mc_of_match);
-
-static int __maybe_unused tegra186_mc_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int __maybe_unused tegra186_mc_resume(struct device *dev)
-{
- struct tegra186_mc *mc = dev_get_drvdata(dev);
-
- tegra186_mc_program_sid(mc);
-
- return 0;
-}
-
-static const struct dev_pm_ops tegra186_mc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tegra186_mc_suspend, tegra186_mc_resume)
-};
-
-static struct platform_driver tegra186_mc_driver = {
- .driver = {
- .name = "tegra186-mc",
- .of_match_table = tegra186_mc_of_match,
- .pm = &tegra186_mc_pm_ops,
- .suppress_bind_attrs = true,
- },
- .probe = tegra186_mc_probe,
- .remove = tegra186_mc_remove,
-};
-module_platform_driver(tegra186_mc_driver);
-
-MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
-MODULE_DESCRIPTION("NVIDIA Tegra186 Memory Controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra194.c b/drivers/memory/tegra/tegra194.c
new file mode 100644
index 000000000000..cab998b8bd5c
--- /dev/null
+++ b/drivers/memory/tegra/tegra194.c
@@ -0,0 +1,1351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2021 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <soc/tegra/mc.h>
+
+#include <dt-bindings/memory/tegra194-mc.h>
+
+#include "mc.h"
+
+static const struct tegra_mc_client tegra194_mc_clients[] = {
+ {
+ .id = TEGRA194_MEMORY_CLIENT_PTCR,
+ .name = "ptcr",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x000,
+ .security = 0x004,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU7R,
+ .name = "miu7r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x008,
+ .security = 0x00c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU7W,
+ .name = "miu7w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x010,
+ .security = 0x014,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_HDAR,
+ .name = "hdar",
+ .sid = TEGRA194_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0x0a8,
+ .security = 0x0ac,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_HOST1XDMAR,
+ .name = "host1xdmar",
+ .sid = TEGRA194_SID_HOST1X,
+ .regs = {
+ .sid = {
+ .override = 0x0b0,
+ .security = 0x0b4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENCSRD,
+ .name = "nvencsrd",
+ .sid = TEGRA194_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x0e0,
+ .security = 0x0e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SATAR,
+ .name = "satar",
+ .sid = TEGRA194_SID_SATA,
+ .regs = {
+ .sid = {
+ .override = 0x0f8,
+ .security = 0x0fc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MPCORER,
+ .name = "mpcorer",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x138,
+ .security = 0x13c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENCSWR,
+ .name = "nvencswr",
+ .sid = TEGRA194_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x158,
+ .security = 0x15c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_HDAW,
+ .name = "hdaw",
+ .sid = TEGRA194_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0x1a8,
+ .security = 0x1ac,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MPCOREW,
+ .name = "mpcorew",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x1c8,
+ .security = 0x1cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SATAW,
+ .name = "sataw",
+ .sid = TEGRA194_SID_SATA,
+ .regs = {
+ .sid = {
+ .override = 0x1e8,
+ .security = 0x1ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPRA,
+ .name = "ispra",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x220,
+ .security = 0x224,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPFALR,
+ .name = "ispfalr",
+ .sid = TEGRA194_SID_ISP_FALCON,
+ .regs = {
+ .sid = {
+ .override = 0x228,
+ .security = 0x22c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPWA,
+ .name = "ispwa",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x230,
+ .security = 0x234,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPWB,
+ .name = "ispwb",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x238,
+ .security = 0x23c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_XUSB_HOSTR,
+ .name = "xusb_hostr",
+ .sid = TEGRA194_SID_XUSB_HOST,
+ .regs = {
+ .sid = {
+ .override = 0x250,
+ .security = 0x254,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_XUSB_HOSTW,
+ .name = "xusb_hostw",
+ .sid = TEGRA194_SID_XUSB_HOST,
+ .regs = {
+ .sid = {
+ .override = 0x258,
+ .security = 0x25c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_XUSB_DEVR,
+ .name = "xusb_devr",
+ .sid = TEGRA194_SID_XUSB_DEV,
+ .regs = {
+ .sid = {
+ .override = 0x260,
+ .security = 0x264,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_XUSB_DEVW,
+ .name = "xusb_devw",
+ .sid = TEGRA194_SID_XUSB_DEV,
+ .regs = {
+ .sid = {
+ .override = 0x268,
+ .security = 0x26c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCRA,
+ .name = "sdmmcra",
+ .sid = TEGRA194_SID_SDMMC1,
+ .regs = {
+ .sid = {
+ .override = 0x300,
+ .security = 0x304,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCR,
+ .name = "sdmmcr",
+ .sid = TEGRA194_SID_SDMMC3,
+ .regs = {
+ .sid = {
+ .override = 0x310,
+ .security = 0x314,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCRAB,
+ .name = "sdmmcrab",
+ .sid = TEGRA194_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x318,
+ .security = 0x31c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCWA,
+ .name = "sdmmcwa",
+ .sid = TEGRA194_SID_SDMMC1,
+ .regs = {
+ .sid = {
+ .override = 0x320,
+ .security = 0x324,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCW,
+ .name = "sdmmcw",
+ .sid = TEGRA194_SID_SDMMC3,
+ .regs = {
+ .sid = {
+ .override = 0x330,
+ .security = 0x334,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCWAB,
+ .name = "sdmmcwab",
+ .sid = TEGRA194_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x338,
+ .security = 0x33c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VICSRD,
+ .name = "vicsrd",
+ .sid = TEGRA194_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x360,
+ .security = 0x364,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VICSWR,
+ .name = "vicswr",
+ .sid = TEGRA194_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x368,
+ .security = 0x36c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VIW,
+ .name = "viw",
+ .sid = TEGRA194_SID_VI,
+ .regs = {
+ .sid = {
+ .override = 0x390,
+ .security = 0x394,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDECSRD,
+ .name = "nvdecsrd",
+ .sid = TEGRA194_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDECSWR,
+ .name = "nvdecswr",
+ .sid = TEGRA194_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c8,
+ .security = 0x3cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_APER,
+ .name = "aper",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_APEW,
+ .name = "apew",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3d0,
+ .security = 0x3d4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVJPGSRD,
+ .name = "nvjpgsrd",
+ .sid = TEGRA194_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVJPGSWR,
+ .name = "nvjpgswr",
+ .sid = TEGRA194_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
+ },
+ }, {
+ .name = "axiapr",
+ .id = TEGRA194_MEMORY_CLIENT_AXIAPR,
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x410,
+ .security = 0x414,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AXIAPW,
+ .name = "axiapw",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x418,
+ .security = 0x41c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ETRR,
+ .name = "etrr",
+ .sid = TEGRA194_SID_ETR,
+ .regs = {
+ .sid = {
+ .override = 0x420,
+ .security = 0x424,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ETRW,
+ .name = "etrw",
+ .sid = TEGRA194_SID_ETR,
+ .regs = {
+ .sid = {
+ .override = 0x428,
+ .security = 0x42c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AXISR,
+ .name = "axisr",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x460,
+ .security = 0x464,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AXISW,
+ .name = "axisw",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x468,
+ .security = 0x46c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_EQOSR,
+ .name = "eqosr",
+ .sid = TEGRA194_SID_EQOS,
+ .regs = {
+ .sid = {
+ .override = 0x470,
+ .security = 0x474,
+ },
+ },
+ }, {
+ .name = "eqosw",
+ .id = TEGRA194_MEMORY_CLIENT_EQOSW,
+ .sid = TEGRA194_SID_EQOS,
+ .regs = {
+ .sid = {
+ .override = 0x478,
+ .security = 0x47c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_UFSHCR,
+ .name = "ufshcr",
+ .sid = TEGRA194_SID_UFSHC,
+ .regs = {
+ .sid = {
+ .override = 0x480,
+ .security = 0x484,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_UFSHCW,
+ .name = "ufshcw",
+ .sid = TEGRA194_SID_UFSHC,
+ .regs = {
+ .sid = {
+ .override = 0x488,
+ .security = 0x48c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDISPLAYR,
+ .name = "nvdisplayr",
+ .sid = TEGRA194_SID_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x490,
+ .security = 0x494,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_BPMPR,
+ .name = "bpmpr",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x498,
+ .security = 0x49c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_BPMPW,
+ .name = "bpmpw",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a0,
+ .security = 0x4a4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_BPMPDMAR,
+ .name = "bpmpdmar",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a8,
+ .security = 0x4ac,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_BPMPDMAW,
+ .name = "bpmpdmaw",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4b0,
+ .security = 0x4b4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AONR,
+ .name = "aonr",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4b8,
+ .security = 0x4bc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AONW,
+ .name = "aonw",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4c0,
+ .security = 0x4c4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AONDMAR,
+ .name = "aondmar",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4c8,
+ .security = 0x4cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AONDMAW,
+ .name = "aondmaw",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4d0,
+ .security = 0x4d4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SCER,
+ .name = "scer",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4d8,
+ .security = 0x4dc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SCEW,
+ .name = "scew",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4e0,
+ .security = 0x4e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SCEDMAR,
+ .name = "scedmar",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4e8,
+ .security = 0x4ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SCEDMAW,
+ .name = "scedmaw",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4f0,
+ .security = 0x4f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_APEDMAR,
+ .name = "apedmar",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x4f8,
+ .security = 0x4fc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_APEDMAW,
+ .name = "apedmaw",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x500,
+ .security = 0x504,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDISPLAYR1,
+ .name = "nvdisplayr1",
+ .sid = TEGRA194_SID_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x508,
+ .security = 0x50c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VICSRD1,
+ .name = "vicsrd1",
+ .sid = TEGRA194_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x510,
+ .security = 0x514,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDECSRD1,
+ .name = "nvdecsrd1",
+ .sid = TEGRA194_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x518,
+ .security = 0x51c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU0R,
+ .name = "miu0r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x530,
+ .security = 0x534,
+ },
+ },
+ }, {
+ .name = "miu0w",
+ .id = TEGRA194_MEMORY_CLIENT_MIU0W,
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x538,
+ .security = 0x53c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU1R,
+ .name = "miu1r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x540,
+ .security = 0x544,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU1W,
+ .name = "miu1w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x548,
+ .security = 0x54c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU2R,
+ .name = "miu2r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x570,
+ .security = 0x574,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU2W,
+ .name = "miu2w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x578,
+ .security = 0x57c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU3R,
+ .name = "miu3r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x580,
+ .security = 0x584,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU3W,
+ .name = "miu3w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x588,
+ .security = 0x58c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU4R,
+ .name = "miu4r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x590,
+ .security = 0x594,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU4W,
+ .name = "miu4w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x598,
+ .security = 0x59c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DPMUR,
+ .name = "dpmur",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x598,
+ .security = 0x59c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VIFALR,
+ .name = "vifalr",
+ .sid = TEGRA194_SID_VI_FALCON,
+ .regs = {
+ .sid = {
+ .override = 0x5e0,
+ .security = 0x5e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VIFALW,
+ .name = "vifalw",
+ .sid = TEGRA194_SID_VI_FALCON,
+ .regs = {
+ .sid = {
+ .override = 0x5e8,
+ .security = 0x5ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0RDA,
+ .name = "dla0rda",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x5f0,
+ .security = 0x5f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0FALRDB,
+ .name = "dla0falrdb",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x5f8,
+ .security = 0x5fc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0WRA,
+ .name = "dla0wra",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x600,
+ .security = 0x604,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0FALWRB,
+ .name = "dla0falwrb",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x608,
+ .security = 0x60c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1RDA,
+ .name = "dla1rda",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x610,
+ .security = 0x614,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1FALRDB,
+ .name = "dla1falrdb",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x618,
+ .security = 0x61c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1WRA,
+ .name = "dla1wra",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x620,
+ .security = 0x624,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1FALWRB,
+ .name = "dla1falwrb",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x628,
+ .security = 0x62c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDA,
+ .name = "pva0rda",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x630,
+ .security = 0x634,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDB,
+ .name = "pva0rdb",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x638,
+ .security = 0x63c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDC,
+ .name = "pva0rdc",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x640,
+ .security = 0x644,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0WRA,
+ .name = "pva0wra",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x648,
+ .security = 0x64c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0WRB,
+ .name = "pva0wrb",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x650,
+ .security = 0x654,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0WRC,
+ .name = "pva0wrc",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x658,
+ .security = 0x65c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDA,
+ .name = "pva1rda",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x660,
+ .security = 0x664,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDB,
+ .name = "pva1rdb",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x668,
+ .security = 0x66c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDC,
+ .name = "pva1rdc",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x670,
+ .security = 0x674,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1WRA,
+ .name = "pva1wra",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x678,
+ .security = 0x67c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1WRB,
+ .name = "pva1wrb",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x680,
+ .security = 0x684,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1WRC,
+ .name = "pva1wrc",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x688,
+ .security = 0x68c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_RCER,
+ .name = "rcer",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .sid = {
+ .override = 0x690,
+ .security = 0x694,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_RCEW,
+ .name = "rcew",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .sid = {
+ .override = 0x698,
+ .security = 0x69c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_RCEDMAR,
+ .name = "rcedmar",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .sid = {
+ .override = 0x6a0,
+ .security = 0x6a4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_RCEDMAW,
+ .name = "rcedmaw",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .sid = {
+ .override = 0x6a8,
+ .security = 0x6ac,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENC1SRD,
+ .name = "nvenc1srd",
+ .sid = TEGRA194_SID_NVENC1,
+ .regs = {
+ .sid = {
+ .override = 0x6b0,
+ .security = 0x6b4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENC1SWR,
+ .name = "nvenc1swr",
+ .sid = TEGRA194_SID_NVENC1,
+ .regs = {
+ .sid = {
+ .override = 0x6b8,
+ .security = 0x6bc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE0R,
+ .name = "pcie0r",
+ .sid = TEGRA194_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x6c0,
+ .security = 0x6c4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE0W,
+ .name = "pcie0w",
+ .sid = TEGRA194_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x6c8,
+ .security = 0x6cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE1R,
+ .name = "pcie1r",
+ .sid = TEGRA194_SID_PCIE1,
+ .regs = {
+ .sid = {
+ .override = 0x6d0,
+ .security = 0x6d4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE1W,
+ .name = "pcie1w",
+ .sid = TEGRA194_SID_PCIE1,
+ .regs = {
+ .sid = {
+ .override = 0x6d8,
+ .security = 0x6dc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE2AR,
+ .name = "pcie2ar",
+ .sid = TEGRA194_SID_PCIE2,
+ .regs = {
+ .sid = {
+ .override = 0x6e0,
+ .security = 0x6e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE2AW,
+ .name = "pcie2aw",
+ .sid = TEGRA194_SID_PCIE2,
+ .regs = {
+ .sid = {
+ .override = 0x6e8,
+ .security = 0x6ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE3R,
+ .name = "pcie3r",
+ .sid = TEGRA194_SID_PCIE3,
+ .regs = {
+ .sid = {
+ .override = 0x6f0,
+ .security = 0x6f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE3W,
+ .name = "pcie3w",
+ .sid = TEGRA194_SID_PCIE3,
+ .regs = {
+ .sid = {
+ .override = 0x6f8,
+ .security = 0x6fc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE4R,
+ .name = "pcie4r",
+ .sid = TEGRA194_SID_PCIE4,
+ .regs = {
+ .sid = {
+ .override = 0x700,
+ .security = 0x704,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE4W,
+ .name = "pcie4w",
+ .sid = TEGRA194_SID_PCIE4,
+ .regs = {
+ .sid = {
+ .override = 0x708,
+ .security = 0x70c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE5R,
+ .name = "pcie5r",
+ .sid = TEGRA194_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x710,
+ .security = 0x714,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE5W,
+ .name = "pcie5w",
+ .sid = TEGRA194_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x718,
+ .security = 0x71c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPFALW,
+ .name = "ispfalw",
+ .sid = TEGRA194_SID_ISP_FALCON,
+ .regs = {
+ .sid = {
+ .override = 0x720,
+ .security = 0x724,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0RDA1,
+ .name = "dla0rda1",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x748,
+ .security = 0x74c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1RDA1,
+ .name = "dla1rda1",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x750,
+ .security = 0x754,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDA1,
+ .name = "pva0rda1",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x758,
+ .security = 0x75c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDB1,
+ .name = "pva0rdb1",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x760,
+ .security = 0x764,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDA1,
+ .name = "pva1rda1",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x768,
+ .security = 0x76c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDB1,
+ .name = "pva1rdb1",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x770,
+ .security = 0x774,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE5R1,
+ .name = "pcie5r1",
+ .sid = TEGRA194_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x778,
+ .security = 0x77c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENCSRD1,
+ .name = "nvencsrd1",
+ .sid = TEGRA194_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x780,
+ .security = 0x784,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENC1SRD1,
+ .name = "nvenc1srd1",
+ .sid = TEGRA194_SID_NVENC1,
+ .regs = {
+ .sid = {
+ .override = 0x788,
+ .security = 0x78c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPRA1,
+ .name = "ispra1",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x790,
+ .security = 0x794,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE0R1,
+ .name = "pcie0r1",
+ .sid = TEGRA194_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x798,
+ .security = 0x79c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDEC1SRD,
+ .name = "nvdec1srd",
+ .sid = TEGRA194_SID_NVDEC1,
+ .regs = {
+ .sid = {
+ .override = 0x7c8,
+ .security = 0x7cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDEC1SRD1,
+ .name = "nvdec1srd1",
+ .sid = TEGRA194_SID_NVDEC1,
+ .regs = {
+ .sid = {
+ .override = 0x7d0,
+ .security = 0x7d4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDEC1SWR,
+ .name = "nvdec1swr",
+ .sid = TEGRA194_SID_NVDEC1,
+ .regs = {
+ .sid = {
+ .override = 0x7d8,
+ .security = 0x7dc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU5R,
+ .name = "miu5r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x7e0,
+ .security = 0x7e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU5W,
+ .name = "miu5w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x7e8,
+ .security = 0x7ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU6R,
+ .name = "miu6r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x7f0,
+ .security = 0x7f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU6W,
+ .name = "miu6w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x7f8,
+ .security = 0x7fc,
+ },
+ },
+ },
+};
+
+const struct tegra_mc_soc tegra194_mc_soc = {
+ .num_clients = ARRAY_SIZE(tegra194_mc_clients),
+ .clients = tegra194_mc_clients,
+ .num_address_bits = 40,
+ .ops = &tegra186_mc_ops,
+};
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index da8a0da8da79..c3462dbc8c22 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -776,10 +776,6 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
}
emc->debugfs.root = debugfs_create_dir("emc", NULL);
- if (!emc->debugfs.root) {
- dev_err(emc->dev, "failed to create debugfs directory\n");
- return;
- }
debugfs_create_file("available_rates", 0444, emc->debugfs.root,
emc, &tegra_emc_debug_available_rates_fops);
@@ -908,49 +904,6 @@ err_msg:
return err;
}
-static int tegra_emc_opp_table_init(struct tegra_emc *emc)
-{
- u32 hw_version = BIT(tegra_sku_info.soc_process_id);
- struct opp_table *hw_opp_table;
- int err;
-
- hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
- err = PTR_ERR_OR_ZERO(hw_opp_table);
- if (err) {
- dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
- return err;
- }
-
- err = dev_pm_opp_of_add_table(emc->dev);
- if (err) {
- if (err == -ENODEV)
- dev_err(emc->dev, "OPP table not found, please update your device tree\n");
- else
- dev_err(emc->dev, "failed to add OPP table: %d\n", err);
-
- goto put_hw_table;
- }
-
- dev_info_once(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
- hw_version, clk_get_rate(emc->clk) / 1000000);
-
- /* first dummy rate-set initializes voltage state */
- err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
- if (err) {
- dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err);
- goto remove_table;
- }
-
- return 0;
-
-remove_table:
- dev_pm_opp_of_remove_table(emc->dev);
-put_hw_table:
- dev_pm_opp_put_supported_hw(hw_opp_table);
-
- return err;
-}
-
static void devm_tegra_emc_unset_callback(void *data)
{
tegra20_clk_set_emc_round_callback(NULL, NULL);
@@ -1077,6 +1030,7 @@ static int tegra_emc_devfreq_init(struct tegra_emc *emc)
static int tegra_emc_probe(struct platform_device *pdev)
{
+ struct tegra_core_opp_params opp_params = {};
struct device_node *np;
struct tegra_emc *emc;
int irq, err;
@@ -1122,7 +1076,9 @@ static int tegra_emc_probe(struct platform_device *pdev)
if (err)
return err;
- err = tegra_emc_opp_table_init(emc);
+ opp_params.init_state = true;
+
+ err = devm_tegra_core_dev_init_opp_table(&pdev->dev, &opp_params);
if (err)
return err;
diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c
index 2db68a913b7a..fcd7738fcb53 100644
--- a/drivers/memory/tegra/tegra20.c
+++ b/drivers/memory/tegra/tegra20.c
@@ -679,7 +679,7 @@ static int tegra20_mc_stats_show(struct seq_file *s, void *unused)
return 0;
}
-static int tegra20_mc_init(struct tegra_mc *mc)
+static int tegra20_mc_probe(struct tegra_mc *mc)
{
debugfs_create_devm_seqfile(mc->dev, "stats", mc->debugfs.root,
tegra20_mc_stats_show);
@@ -687,6 +687,112 @@ static int tegra20_mc_init(struct tegra_mc *mc)
return 0;
}
+static int tegra20_mc_suspend(struct tegra_mc *mc)
+{
+ int err;
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
+ err = tegra_gart_suspend(mc->gart);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra20_mc_resume(struct tegra_mc *mc)
+{
+ int err;
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
+ err = tegra_gart_resume(mc->gart);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static irqreturn_t tegra20_mc_handle_irq(int irq, void *data)
+{
+ struct tegra_mc *mc = data;
+ unsigned long status;
+ unsigned int bit;
+
+ /* mask all interrupts to avoid flooding */
+ status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
+ if (!status)
+ return IRQ_NONE;
+
+ for_each_set_bit(bit, &status, 32) {
+ const char *error = tegra_mc_status_names[bit];
+ const char *direction = "read", *secure = "";
+ const char *client, *desc;
+ phys_addr_t addr;
+ u32 value, reg;
+ u8 id, type;
+
+ switch (BIT(bit)) {
+ case MC_INT_DECERR_EMEM:
+ reg = MC_DECERR_EMEM_OTHERS_STATUS;
+ value = mc_readl(mc, reg);
+
+ id = value & mc->soc->client_id_mask;
+ desc = tegra_mc_error_names[2];
+
+ if (value & BIT(31))
+ direction = "write";
+ break;
+
+ case MC_INT_INVALID_GART_PAGE:
+ reg = MC_GART_ERROR_REQ;
+ value = mc_readl(mc, reg);
+
+ id = (value >> 1) & mc->soc->client_id_mask;
+ desc = tegra_mc_error_names[2];
+
+ if (value & BIT(0))
+ direction = "write";
+ break;
+
+ case MC_INT_SECURITY_VIOLATION:
+ reg = MC_SECURITY_VIOLATION_STATUS;
+ value = mc_readl(mc, reg);
+
+ id = value & mc->soc->client_id_mask;
+ type = (value & BIT(30)) ? 4 : 3;
+ desc = tegra_mc_error_names[type];
+ secure = "secure ";
+
+ if (value & BIT(31))
+ direction = "write";
+ break;
+
+ default:
+ continue;
+ }
+
+ client = mc->soc->clients[id].name;
+ addr = mc_readl(mc, reg + sizeof(u32));
+
+ dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s)\n",
+ client, secure, direction, &addr, error,
+ desc);
+ }
+
+ /* clear interrupts */
+ mc_writel(mc, status, MC_INTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+static const struct tegra_mc_ops tegra20_mc_ops = {
+ .probe = tegra20_mc_probe,
+ .suspend = tegra20_mc_suspend,
+ .resume = tegra20_mc_resume,
+ .handle_irq = tegra20_mc_handle_irq,
+};
+
const struct tegra_mc_soc tegra20_mc_soc = {
.clients = tegra20_mc_clients,
.num_clients = ARRAY_SIZE(tegra20_mc_clients),
@@ -698,5 +804,5 @@ const struct tegra_mc_soc tegra20_mc_soc = {
.resets = tegra20_mc_resets,
.num_resets = ARRAY_SIZE(tegra20_mc_resets),
.icc_ops = &tegra20_mc_icc_ops,
- .init = tegra20_mc_init,
+ .ops = &tegra20_mc_ops,
};
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
index 5f224796e32e..06c0f17fa429 100644
--- a/drivers/memory/tegra/tegra210-emc-core.c
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -1759,10 +1759,6 @@ static void tegra210_emc_debugfs_init(struct tegra210_emc *emc)
}
emc->debugfs.root = debugfs_create_dir("emc", NULL);
- if (!emc->debugfs.root) {
- dev_err(dev, "failed to create debugfs directory\n");
- return;
- }
debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
&tegra210_emc_debug_available_rates_fops);
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index b3bbc5a05ba1..8ab6498dbe7d 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -16,1005 +16,1149 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.id = 0x01,
.name = "display0a",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 1,
- },
- .la = {
- .reg = 0x2e8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
},
}, {
.id = 0x02,
.name = "display0ab",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 2,
- },
- .la = {
- .reg = 0x2f4,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
},
}, {
.id = 0x03,
.name = "display0b",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 3,
- },
- .la = {
- .reg = 0x2e8,
- .shift = 16,
- .mask = 0xff,
- .def = 0x1e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
},
}, {
.id = 0x04,
.name = "display0bb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 4,
- },
- .la = {
- .reg = 0x2f4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x1e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
},
}, {
.id = 0x05,
.name = "display0c",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 5,
- },
- .la = {
- .reg = 0x2ec,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
},
}, {
.id = 0x06,
.name = "display0cb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 6,
- },
- .la = {
- .reg = 0x2f8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
},
}, {
.id = 0x0e,
.name = "afir",
.swgroup = TEGRA_SWGROUP_AFI,
- .smmu = {
- .reg = 0x228,
- .bit = 14,
- },
- .la = {
- .reg = 0x2e0,
- .shift = 0,
- .mask = 0xff,
- .def = 0x2e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2e,
+ },
},
}, {
.id = 0x0f,
.name = "avpcarm7r",
.swgroup = TEGRA_SWGROUP_AVPC,
- .smmu = {
- .reg = 0x228,
- .bit = 15,
- },
- .la = {
- .reg = 0x2e4,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
}, {
.id = 0x10,
.name = "displayhc",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 16,
- },
- .la = {
- .reg = 0x2f0,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
},
}, {
.id = 0x11,
.name = "displayhcb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 17,
- },
- .la = {
- .reg = 0x2fc,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
},
}, {
.id = 0x15,
.name = "hdar",
.swgroup = TEGRA_SWGROUP_HDA,
- .smmu = {
- .reg = 0x228,
- .bit = 21,
- },
- .la = {
- .reg = 0x318,
- .shift = 0,
- .mask = 0xff,
- .def = 0x24,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x24,
+ },
},
}, {
.id = 0x16,
.name = "host1xdmar",
.swgroup = TEGRA_SWGROUP_HC,
- .smmu = {
- .reg = 0x228,
- .bit = 22,
- },
- .la = {
- .reg = 0x310,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
},
}, {
.id = 0x17,
.name = "host1xr",
.swgroup = TEGRA_SWGROUP_HC,
- .smmu = {
- .reg = 0x228,
- .bit = 23,
- },
- .la = {
- .reg = 0x310,
- .shift = 16,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
}, {
.id = 0x1c,
.name = "nvencsrd",
.swgroup = TEGRA_SWGROUP_NVENC,
- .smmu = {
- .reg = 0x228,
- .bit = 28,
- },
- .la = {
- .reg = 0x328,
- .shift = 0,
- .mask = 0xff,
- .def = 0x23,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
},
}, {
.id = 0x1d,
.name = "ppcsahbdmar",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x228,
- .bit = 29,
- },
- .la = {
- .reg = 0x344,
- .shift = 0,
- .mask = 0xff,
- .def = 0x49,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
},
}, {
.id = 0x1e,
.name = "ppcsahbslvr",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x228,
- .bit = 30,
- },
- .la = {
- .reg = 0x344,
- .shift = 16,
- .mask = 0xff,
- .def = 0x1a,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
},
}, {
.id = 0x1f,
.name = "satar",
.swgroup = TEGRA_SWGROUP_SATA,
- .smmu = {
- .reg = 0x228,
- .bit = 31,
- },
- .la = {
- .reg = 0x350,
- .shift = 0,
- .mask = 0xff,
- .def = 0x65,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x65,
+ },
},
}, {
.id = 0x27,
.name = "mpcorer",
.swgroup = TEGRA_SWGROUP_MPCORE,
- .la = {
- .reg = 0x320,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
}, {
.id = 0x2b,
.name = "nvencswr",
.swgroup = TEGRA_SWGROUP_NVENC,
- .smmu = {
- .reg = 0x22c,
- .bit = 11,
- },
- .la = {
- .reg = 0x328,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x31,
.name = "afiw",
.swgroup = TEGRA_SWGROUP_AFI,
- .smmu = {
- .reg = 0x22c,
- .bit = 17,
- },
- .la = {
- .reg = 0x2e0,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x32,
.name = "avpcarm7w",
.swgroup = TEGRA_SWGROUP_AVPC,
- .smmu = {
- .reg = 0x22c,
- .bit = 18,
- },
- .la = {
- .reg = 0x2e4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x35,
.name = "hdaw",
.swgroup = TEGRA_SWGROUP_HDA,
- .smmu = {
- .reg = 0x22c,
- .bit = 21,
- },
- .la = {
- .reg = 0x318,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x36,
.name = "host1xw",
.swgroup = TEGRA_SWGROUP_HC,
- .smmu = {
- .reg = 0x22c,
- .bit = 22,
- },
- .la = {
- .reg = 0x314,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x39,
.name = "mpcorew",
.swgroup = TEGRA_SWGROUP_MPCORE,
- .la = {
- .reg = 0x320,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x3b,
.name = "ppcsahbdmaw",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x22c,
- .bit = 27,
- },
- .la = {
- .reg = 0x348,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x3c,
.name = "ppcsahbslvw",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x22c,
- .bit = 28,
- },
- .la = {
- .reg = 0x348,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x3d,
.name = "sataw",
.swgroup = TEGRA_SWGROUP_SATA,
- .smmu = {
- .reg = 0x22c,
- .bit = 29,
- },
- .la = {
- .reg = 0x350,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x44,
.name = "ispra",
.swgroup = TEGRA_SWGROUP_ISP2,
- .smmu = {
- .reg = 0x230,
- .bit = 4,
- },
- .la = {
- .reg = 0x370,
- .shift = 0,
- .mask = 0xff,
- .def = 0x18,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x370,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
},
}, {
.id = 0x46,
.name = "ispwa",
.swgroup = TEGRA_SWGROUP_ISP2,
- .smmu = {
- .reg = 0x230,
- .bit = 6,
- },
- .la = {
- .reg = 0x374,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x47,
.name = "ispwb",
.swgroup = TEGRA_SWGROUP_ISP2,
- .smmu = {
- .reg = 0x230,
- .bit = 7,
- },
- .la = {
- .reg = 0x374,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x4a,
.name = "xusb_hostr",
.swgroup = TEGRA_SWGROUP_XUSB_HOST,
- .smmu = {
- .reg = 0x230,
- .bit = 10,
- },
- .la = {
- .reg = 0x37c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x7a,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x7a,
+ },
},
}, {
.id = 0x4b,
.name = "xusb_hostw",
.swgroup = TEGRA_SWGROUP_XUSB_HOST,
- .smmu = {
- .reg = 0x230,
- .bit = 11,
- },
- .la = {
- .reg = 0x37c,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x4c,
.name = "xusb_devr",
.swgroup = TEGRA_SWGROUP_XUSB_DEV,
- .smmu = {
- .reg = 0x230,
- .bit = 12,
- },
- .la = {
- .reg = 0x380,
- .shift = 0,
- .mask = 0xff,
- .def = 0x39,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x39,
+ },
},
}, {
.id = 0x4d,
.name = "xusb_devw",
.swgroup = TEGRA_SWGROUP_XUSB_DEV,
- .smmu = {
- .reg = 0x230,
- .bit = 13,
- },
- .la = {
- .reg = 0x380,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x4e,
.name = "isprab",
.swgroup = TEGRA_SWGROUP_ISP2B,
- .smmu = {
- .reg = 0x230,
- .bit = 14,
- },
- .la = {
- .reg = 0x384,
- .shift = 0,
- .mask = 0xff,
- .def = 0x18,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
},
}, {
.id = 0x50,
.name = "ispwab",
.swgroup = TEGRA_SWGROUP_ISP2B,
- .smmu = {
- .reg = 0x230,
- .bit = 16,
- },
- .la = {
- .reg = 0x388,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x51,
.name = "ispwbb",
.swgroup = TEGRA_SWGROUP_ISP2B,
- .smmu = {
- .reg = 0x230,
- .bit = 17,
- },
- .la = {
- .reg = 0x388,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x54,
.name = "tsecsrd",
.swgroup = TEGRA_SWGROUP_TSEC,
- .smmu = {
- .reg = 0x230,
- .bit = 20,
- },
- .la = {
- .reg = 0x390,
- .shift = 0,
- .mask = 0xff,
- .def = 0x9b,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x9b,
+ },
},
}, {
.id = 0x55,
.name = "tsecswr",
.swgroup = TEGRA_SWGROUP_TSEC,
- .smmu = {
- .reg = 0x230,
- .bit = 21,
- },
- .la = {
- .reg = 0x390,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x56,
.name = "a9avpscr",
.swgroup = TEGRA_SWGROUP_A9AVP,
- .smmu = {
- .reg = 0x230,
- .bit = 22,
- },
- .la = {
- .reg = 0x3a4,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
}, {
.id = 0x57,
.name = "a9avpscw",
.swgroup = TEGRA_SWGROUP_A9AVP,
- .smmu = {
- .reg = 0x230,
- .bit = 23,
- },
- .la = {
- .reg = 0x3a4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x58,
.name = "gpusrd",
.swgroup = TEGRA_SWGROUP_GPU,
- .smmu = {
- /* read-only */
- .reg = 0x230,
- .bit = 24,
- },
- .la = {
- .reg = 0x3c8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1a,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
},
}, {
.id = 0x59,
.name = "gpuswr",
.swgroup = TEGRA_SWGROUP_GPU,
- .smmu = {
- /* read-only */
- .reg = 0x230,
- .bit = 25,
- },
- .la = {
- .reg = 0x3c8,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x5a,
.name = "displayt",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x230,
- .bit = 26,
- },
- .la = {
- .reg = 0x2f0,
- .shift = 16,
- .mask = 0xff,
- .def = 0x1e,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
},
}, {
.id = 0x60,
.name = "sdmmcra",
.swgroup = TEGRA_SWGROUP_SDMMC1A,
- .smmu = {
- .reg = 0x234,
- .bit = 0,
- },
- .la = {
- .reg = 0x3b8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x49,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
},
}, {
.id = 0x61,
.name = "sdmmcraa",
.swgroup = TEGRA_SWGROUP_SDMMC2A,
- .smmu = {
- .reg = 0x234,
- .bit = 1,
- },
- .la = {
- .reg = 0x3bc,
- .shift = 0,
- .mask = 0xff,
- .def = 0x5a,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x5a,
+ },
},
}, {
.id = 0x62,
.name = "sdmmcr",
.swgroup = TEGRA_SWGROUP_SDMMC3A,
- .smmu = {
- .reg = 0x234,
- .bit = 2,
- },
- .la = {
- .reg = 0x3c0,
- .shift = 0,
- .mask = 0xff,
- .def = 0x49,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
},
}, {
.id = 0x63,
.swgroup = TEGRA_SWGROUP_SDMMC4A,
.name = "sdmmcrab",
- .smmu = {
- .reg = 0x234,
- .bit = 3,
- },
- .la = {
- .reg = 0x3c4,
- .shift = 0,
- .mask = 0xff,
- .def = 0x5a,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x5a,
+ },
},
}, {
.id = 0x64,
.name = "sdmmcwa",
.swgroup = TEGRA_SWGROUP_SDMMC1A,
- .smmu = {
- .reg = 0x234,
- .bit = 4,
- },
- .la = {
- .reg = 0x3b8,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x65,
.name = "sdmmcwaa",
.swgroup = TEGRA_SWGROUP_SDMMC2A,
- .smmu = {
- .reg = 0x234,
- .bit = 5,
- },
- .la = {
- .reg = 0x3bc,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x66,
.name = "sdmmcw",
.swgroup = TEGRA_SWGROUP_SDMMC3A,
- .smmu = {
- .reg = 0x234,
- .bit = 6,
- },
- .la = {
- .reg = 0x3c0,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x67,
.name = "sdmmcwab",
.swgroup = TEGRA_SWGROUP_SDMMC4A,
- .smmu = {
- .reg = 0x234,
- .bit = 7,
- },
- .la = {
- .reg = 0x3c4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x6c,
.name = "vicsrd",
.swgroup = TEGRA_SWGROUP_VIC,
- .smmu = {
- .reg = 0x234,
- .bit = 12,
- },
- .la = {
- .reg = 0x394,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1a,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
},
}, {
.id = 0x6d,
.name = "vicswr",
.swgroup = TEGRA_SWGROUP_VIC,
- .smmu = {
- .reg = 0x234,
- .bit = 13,
- },
- .la = {
- .reg = 0x394,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x72,
.name = "viw",
.swgroup = TEGRA_SWGROUP_VI,
- .smmu = {
- .reg = 0x234,
- .bit = 18,
- },
- .la = {
- .reg = 0x398,
- .shift = 0,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x398,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x73,
.name = "displayd",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x234,
- .bit = 19,
- },
- .la = {
- .reg = 0x3c8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
}, {
.id = 0x78,
.name = "nvdecsrd",
.swgroup = TEGRA_SWGROUP_NVDEC,
- .smmu = {
- .reg = 0x234,
- .bit = 24,
- },
- .la = {
- .reg = 0x3d8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x23,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x3d8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
},
}, {
.id = 0x79,
.name = "nvdecswr",
.swgroup = TEGRA_SWGROUP_NVDEC,
- .smmu = {
- .reg = 0x234,
- .bit = 25,
- },
- .la = {
- .reg = 0x3d8,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x3d8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x7a,
.name = "aper",
.swgroup = TEGRA_SWGROUP_APE,
- .smmu = {
- .reg = 0x234,
- .bit = 26,
- },
- .la = {
- .reg = 0x3dc,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x3dc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
}, {
.id = 0x7b,
.name = "apew",
.swgroup = TEGRA_SWGROUP_APE,
- .smmu = {
- .reg = 0x234,
- .bit = 27,
- },
- .la = {
- .reg = 0x3dc,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x3dc,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x7e,
.name = "nvjpgsrd",
.swgroup = TEGRA_SWGROUP_NVJPG,
- .smmu = {
- .reg = 0x234,
- .bit = 30,
- },
- .la = {
- .reg = 0x3e4,
- .shift = 0,
- .mask = 0xff,
- .def = 0x23,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x3e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
},
}, {
.id = 0x7f,
.name = "nvjpgswr",
.swgroup = TEGRA_SWGROUP_NVJPG,
- .smmu = {
- .reg = 0x234,
- .bit = 31,
- },
- .la = {
- .reg = 0x3e4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x3e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x80,
.name = "sesrd",
.swgroup = TEGRA_SWGROUP_SE,
- .smmu = {
- .reg = 0xb98,
- .bit = 0,
- },
- .la = {
- .reg = 0x3e0,
- .shift = 0,
- .mask = 0xff,
- .def = 0x2e,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x3e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2e,
+ },
},
}, {
.id = 0x81,
.name = "seswr",
.swgroup = TEGRA_SWGROUP_SE,
- .smmu = {
- .reg = 0xb98,
- .bit = 1,
- },
- .la = {
- .reg = 0x3e0,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x3e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x82,
.name = "axiapr",
.swgroup = TEGRA_SWGROUP_AXIAP,
- .smmu = {
- .reg = 0xb98,
- .bit = 2,
- },
- .la = {
- .reg = 0x3a0,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x3a0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
}, {
.id = 0x83,
.name = "axiapw",
.swgroup = TEGRA_SWGROUP_AXIAP,
- .smmu = {
- .reg = 0xb98,
- .bit = 3,
- },
- .la = {
- .reg = 0x3a0,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x3a0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x84,
.name = "etrr",
.swgroup = TEGRA_SWGROUP_ETR,
- .smmu = {
- .reg = 0xb98,
- .bit = 4,
- },
- .la = {
- .reg = 0x3ec,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x3ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
}, {
.id = 0x85,
.name = "etrw",
.swgroup = TEGRA_SWGROUP_ETR,
- .smmu = {
- .reg = 0xb98,
- .bit = 5,
- },
- .la = {
- .reg = 0x3ec,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x3ec,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x86,
.name = "tsecsrdb",
.swgroup = TEGRA_SWGROUP_TSECB,
- .smmu = {
- .reg = 0xb98,
- .bit = 6,
- },
- .la = {
- .reg = 0x3f0,
- .shift = 0,
- .mask = 0xff,
- .def = 0x9b,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x3f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x9b,
+ },
},
}, {
.id = 0x87,
.name = "tsecswrb",
.swgroup = TEGRA_SWGROUP_TSECB,
- .smmu = {
- .reg = 0xb98,
- .bit = 7,
- },
- .la = {
- .reg = 0x3f0,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x3f0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
}, {
.id = 0x88,
.name = "gpusrd2",
.swgroup = TEGRA_SWGROUP_GPU,
- .smmu = {
- /* read-only */
- .reg = 0xb98,
- .bit = 8,
- },
- .la = {
- .reg = 0x3e8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x1a,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0xb98,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x3e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
},
}, {
.id = 0x89,
.name = "gpuswr2",
.swgroup = TEGRA_SWGROUP_GPU,
- .smmu = {
- /* read-only */
- .reg = 0xb98,
- .bit = 9,
- },
- .la = {
- .reg = 0x3e8,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0xb98,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x3e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
},
};
@@ -1142,4 +1286,5 @@ const struct tegra_mc_soc tegra210_mc_soc = {
.reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra210_mc_resets,
.num_resets = ARRAY_SIZE(tegra210_mc_resets),
+ .ops = &tegra30_mc_ops,
};
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 829f6d673c96..7e21a852f2e1 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -150,8 +150,8 @@
#define EMC_SELF_REF_CMD_ENABLED BIT(0)
#define DRAM_DEV_SEL_ALL (0 << 30)
-#define DRAM_DEV_SEL_0 (2 << 30)
-#define DRAM_DEV_SEL_1 (1 << 30)
+#define DRAM_DEV_SEL_0 BIT(31)
+#define DRAM_DEV_SEL_1 BIT(30)
#define DRAM_BROADCAST(num) \
((num) > 1 ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0)
@@ -1354,10 +1354,6 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
}
emc->debugfs.root = debugfs_create_dir("emc", NULL);
- if (!emc->debugfs.root) {
- dev_err(emc->dev, "failed to create debugfs directory\n");
- return;
- }
debugfs_create_file("available_rates", 0444, emc->debugfs.root,
emc, &tegra_emc_debug_available_rates_fops);
@@ -1480,49 +1476,6 @@ err_msg:
return err;
}
-static int tegra_emc_opp_table_init(struct tegra_emc *emc)
-{
- u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
- struct opp_table *hw_opp_table;
- int err;
-
- hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
- err = PTR_ERR_OR_ZERO(hw_opp_table);
- if (err) {
- dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
- return err;
- }
-
- err = dev_pm_opp_of_add_table(emc->dev);
- if (err) {
- if (err == -ENODEV)
- dev_err(emc->dev, "OPP table not found, please update your device tree\n");
- else
- dev_err(emc->dev, "failed to add OPP table: %d\n", err);
-
- goto put_hw_table;
- }
-
- dev_info_once(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
- hw_version, clk_get_rate(emc->clk) / 1000000);
-
- /* first dummy rate-set initializes voltage state */
- err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
- if (err) {
- dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err);
- goto remove_table;
- }
-
- return 0;
-
-remove_table:
- dev_pm_opp_of_remove_table(emc->dev);
-put_hw_table:
- dev_pm_opp_put_supported_hw(hw_opp_table);
-
- return err;
-}
-
static void devm_tegra_emc_unset_callback(void *data)
{
tegra20_clk_set_emc_round_callback(NULL, NULL);
@@ -1568,6 +1521,7 @@ static int tegra_emc_init_clk(struct tegra_emc *emc)
static int tegra_emc_probe(struct platform_device *pdev)
{
+ struct tegra_core_opp_params opp_params = {};
struct device_node *np;
struct tegra_emc *emc;
int err;
@@ -1617,7 +1571,9 @@ static int tegra_emc_probe(struct platform_device *pdev)
if (err)
return err;
- err = tegra_emc_opp_table_init(emc);
+ opp_params.init_state = true;
+
+ err = devm_tegra_core_dev_init_opp_table(&pdev->dev, &opp_params);
if (err)
return err;
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index ea849003014b..84316357513d 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -37,970 +37,1102 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.id = 0x00,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
- .la = {
- .reg = 0x34c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x0,
+ .regs = {
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
},
.fifo_size = 16 * 2,
}, {
.id = 0x01,
.name = "display0a",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 1,
- },
- .la = {
- .reg = 0x2e8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
.fifo_size = 16 * 128,
}, {
.id = 0x02,
.name = "display0ab",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 2,
- },
- .la = {
- .reg = 0x2f4,
- .shift = 0,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
.fifo_size = 16 * 128,
}, {
.id = 0x03,
.name = "display0b",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 3,
- },
- .la = {
- .reg = 0x2e8,
- .shift = 16,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x04,
.name = "display0bb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 4,
- },
- .la = {
- .reg = 0x2f4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x05,
.name = "display0c",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 5,
- },
- .la = {
- .reg = 0x2ec,
- .shift = 0,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
.fifo_size = 16 * 128,
}, {
.id = 0x06,
.name = "display0cb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 6,
- },
- .la = {
- .reg = 0x2f8,
- .shift = 0,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
.fifo_size = 16 * 128,
}, {
.id = 0x07,
.name = "display1b",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 7,
- },
- .la = {
- .reg = 0x2ec,
- .shift = 16,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x08,
.name = "display1bb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 8,
- },
- .la = {
- .reg = 0x2f8,
- .shift = 16,
- .mask = 0xff,
- .def = 0x4e,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x09,
.name = "eppup",
.swgroup = TEGRA_SWGROUP_EPP,
- .smmu = {
- .reg = 0x228,
- .bit = 9,
- },
- .la = {
- .reg = 0x300,
- .shift = 0,
- .mask = 0xff,
- .def = 0x17,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x300,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x17,
+ },
},
.fifo_size = 16 * 8,
}, {
.id = 0x0a,
.name = "g2pr",
.swgroup = TEGRA_SWGROUP_G2,
- .smmu = {
- .reg = 0x228,
- .bit = 10,
- },
- .la = {
- .reg = 0x308,
- .shift = 0,
- .mask = 0xff,
- .def = 0x09,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x308,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x09,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x0b,
.name = "g2sr",
.swgroup = TEGRA_SWGROUP_G2,
- .smmu = {
- .reg = 0x228,
- .bit = 11,
- },
- .la = {
- .reg = 0x308,
- .shift = 16,
- .mask = 0xff,
- .def = 0x09,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x308,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x09,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x0c,
.name = "mpeunifbr",
.swgroup = TEGRA_SWGROUP_MPE,
- .smmu = {
- .reg = 0x228,
- .bit = 12,
- },
- .la = {
- .reg = 0x328,
- .shift = 0,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
.fifo_size = 16 * 8,
}, {
.id = 0x0d,
.name = "viruv",
.swgroup = TEGRA_SWGROUP_VI,
- .smmu = {
- .reg = 0x228,
- .bit = 13,
- },
- .la = {
- .reg = 0x364,
- .shift = 0,
- .mask = 0xff,
- .def = 0x2c,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x364,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2c,
+ },
},
.fifo_size = 16 * 8,
}, {
.id = 0x0e,
.name = "afir",
.swgroup = TEGRA_SWGROUP_AFI,
- .smmu = {
- .reg = 0x228,
- .bit = 14,
- },
- .la = {
- .reg = 0x2e0,
- .shift = 0,
- .mask = 0xff,
- .def = 0x10,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
},
.fifo_size = 16 * 32,
}, {
.id = 0x0f,
.name = "avpcarm7r",
.swgroup = TEGRA_SWGROUP_AVPC,
- .smmu = {
- .reg = 0x228,
- .bit = 15,
- },
- .la = {
- .reg = 0x2e4,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
.fifo_size = 16 * 2,
}, {
.id = 0x10,
.name = "displayhc",
.swgroup = TEGRA_SWGROUP_DC,
- .smmu = {
- .reg = 0x228,
- .bit = 16,
- },
- .la = {
- .reg = 0x2f0,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
.fifo_size = 16 * 2,
}, {
.id = 0x11,
.name = "displayhcb",
.swgroup = TEGRA_SWGROUP_DCB,
- .smmu = {
- .reg = 0x228,
- .bit = 17,
- },
- .la = {
- .reg = 0x2fc,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
.fifo_size = 16 * 2,
}, {
.id = 0x12,
.name = "fdcdrd",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x228,
- .bit = 18,
- },
- .la = {
- .reg = 0x334,
- .shift = 0,
- .mask = 0xff,
- .def = 0x0a,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x334,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
},
.fifo_size = 16 * 48,
}, {
.id = 0x13,
.name = "fdcdrd2",
.swgroup = TEGRA_SWGROUP_NV2,
- .smmu = {
- .reg = 0x228,
- .bit = 19,
- },
- .la = {
- .reg = 0x33c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x0a,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x33c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
},
.fifo_size = 16 * 48,
}, {
.id = 0x14,
.name = "g2dr",
.swgroup = TEGRA_SWGROUP_G2,
- .smmu = {
- .reg = 0x228,
- .bit = 20,
- },
- .la = {
- .reg = 0x30c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x0a,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x30c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
},
.fifo_size = 16 * 48,
}, {
.id = 0x15,
.name = "hdar",
.swgroup = TEGRA_SWGROUP_HDA,
- .smmu = {
- .reg = 0x228,
- .bit = 21,
- },
- .la = {
- .reg = 0x318,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
.fifo_size = 16 * 16,
}, {
.id = 0x16,
.name = "host1xdmar",
.swgroup = TEGRA_SWGROUP_HC,
- .smmu = {
- .reg = 0x228,
- .bit = 22,
- },
- .la = {
- .reg = 0x310,
- .shift = 0,
- .mask = 0xff,
- .def = 0x05,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x05,
+ },
},
.fifo_size = 16 * 16,
}, {
.id = 0x17,
.name = "host1xr",
.swgroup = TEGRA_SWGROUP_HC,
- .smmu = {
- .reg = 0x228,
- .bit = 23,
- },
- .la = {
- .reg = 0x310,
- .shift = 16,
- .mask = 0xff,
- .def = 0x50,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
},
.fifo_size = 16 * 8,
}, {
.id = 0x18,
.name = "idxsrd",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x228,
- .bit = 24,
- },
- .la = {
- .reg = 0x334,
- .shift = 16,
- .mask = 0xff,
- .def = 0x13,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x334,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x13,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x19,
.name = "idxsrd2",
.swgroup = TEGRA_SWGROUP_NV2,
- .smmu = {
- .reg = 0x228,
- .bit = 25,
- },
- .la = {
- .reg = 0x33c,
- .shift = 16,
- .mask = 0xff,
- .def = 0x13,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x33c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x13,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x1a,
.name = "mpe_ipred",
.swgroup = TEGRA_SWGROUP_MPE,
- .smmu = {
- .reg = 0x228,
- .bit = 26,
- },
- .la = {
- .reg = 0x328,
- .shift = 16,
- .mask = 0xff,
- .def = 0x80,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
},
.fifo_size = 16 * 2,
}, {
.id = 0x1b,
.name = "mpeamemrd",
.swgroup = TEGRA_SWGROUP_MPE,
- .smmu = {
- .reg = 0x228,
- .bit = 27,
- },
- .la = {
- .reg = 0x32c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x42,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x32c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x42,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x1c,
.name = "mpecsrd",
.swgroup = TEGRA_SWGROUP_MPE,
- .smmu = {
- .reg = 0x228,
- .bit = 28,
- },
- .la = {
- .reg = 0x32c,
- .shift = 16,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x32c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
.fifo_size = 16 * 8,
}, {
.id = 0x1d,
.name = "ppcsahbdmar",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x228,
- .bit = 29,
- },
- .la = {
- .reg = 0x344,
- .shift = 0,
- .mask = 0xff,
- .def = 0x10,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
},
.fifo_size = 16 * 2,
}, {
.id = 0x1e,
.name = "ppcsahbslvr",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x228,
- .bit = 30,
- },
- .la = {
- .reg = 0x344,
- .shift = 16,
- .mask = 0xff,
- .def = 0x12,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x12,
+ },
},
.fifo_size = 16 * 8,
}, {
.id = 0x1f,
.name = "satar",
.swgroup = TEGRA_SWGROUP_SATA,
- .smmu = {
- .reg = 0x228,
- .bit = 31,
- },
- .la = {
- .reg = 0x350,
- .shift = 0,
- .mask = 0xff,
- .def = 0x33,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x33,
+ },
},
.fifo_size = 16 * 32,
}, {
.id = 0x20,
.name = "texsrd",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x22c,
- .bit = 0,
- },
- .la = {
- .reg = 0x338,
- .shift = 0,
- .mask = 0xff,
- .def = 0x13,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x338,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x21,
.name = "texsrd2",
.swgroup = TEGRA_SWGROUP_NV2,
- .smmu = {
- .reg = 0x22c,
- .bit = 1,
- },
- .la = {
- .reg = 0x340,
- .shift = 0,
- .mask = 0xff,
- .def = 0x13,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x340,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x22,
.name = "vdebsevr",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 2,
- },
- .la = {
- .reg = 0x354,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
.fifo_size = 16 * 8,
}, {
.id = 0x23,
.name = "vdember",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 3,
- },
- .la = {
- .reg = 0x354,
- .shift = 16,
- .mask = 0xff,
- .def = 0xd0,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xd0,
+ },
},
.fifo_size = 16 * 4,
}, {
.id = 0x24,
.name = "vdemcer",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 4,
- },
- .la = {
- .reg = 0x358,
- .shift = 0,
- .mask = 0xff,
- .def = 0x2a,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2a,
+ },
},
.fifo_size = 16 * 16,
}, {
.id = 0x25,
.name = "vdetper",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 5,
- },
- .la = {
- .reg = 0x358,
- .shift = 16,
- .mask = 0xff,
- .def = 0x74,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x74,
+ },
},
.fifo_size = 16 * 16,
}, {
.id = 0x26,
.name = "mpcorelpr",
.swgroup = TEGRA_SWGROUP_MPCORELP,
- .la = {
- .reg = 0x324,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
.fifo_size = 16 * 14,
}, {
.id = 0x27,
.name = "mpcorer",
.swgroup = TEGRA_SWGROUP_MPCORE,
- .la = {
- .reg = 0x320,
- .shift = 0,
- .mask = 0xff,
- .def = 0x04,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
},
.fifo_size = 16 * 14,
}, {
.id = 0x28,
.name = "eppu",
.swgroup = TEGRA_SWGROUP_EPP,
- .smmu = {
- .reg = 0x22c,
- .bit = 8,
- },
- .la = {
- .reg = 0x300,
- .shift = 16,
- .mask = 0xff,
- .def = 0x6c,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x300,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x29,
.name = "eppv",
.swgroup = TEGRA_SWGROUP_EPP,
- .smmu = {
- .reg = 0x22c,
- .bit = 9,
- },
- .la = {
- .reg = 0x304,
- .shift = 0,
- .mask = 0xff,
- .def = 0x6c,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x304,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x2a,
.name = "eppy",
.swgroup = TEGRA_SWGROUP_EPP,
- .smmu = {
- .reg = 0x22c,
- .bit = 10,
- },
- .la = {
- .reg = 0x304,
- .shift = 16,
- .mask = 0xff,
- .def = 0x6c,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x304,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x2b,
.name = "mpeunifbw",
.swgroup = TEGRA_SWGROUP_MPE,
- .smmu = {
- .reg = 0x22c,
- .bit = 11,
- },
- .la = {
- .reg = 0x330,
- .shift = 0,
- .mask = 0xff,
- .def = 0x13,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x330,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
},
.fifo_size = 16 * 8,
}, {
.id = 0x2c,
.name = "viwsb",
.swgroup = TEGRA_SWGROUP_VI,
- .smmu = {
- .reg = 0x22c,
- .bit = 12,
- },
- .la = {
- .reg = 0x364,
- .shift = 16,
- .mask = 0xff,
- .def = 0x12,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x364,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x12,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x2d,
.name = "viwu",
.swgroup = TEGRA_SWGROUP_VI,
- .smmu = {
- .reg = 0x22c,
- .bit = 13,
- },
- .la = {
- .reg = 0x368,
- .shift = 0,
- .mask = 0xff,
- .def = 0xb2,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x368,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xb2,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x2e,
.name = "viwv",
.swgroup = TEGRA_SWGROUP_VI,
- .smmu = {
- .reg = 0x22c,
- .bit = 14,
- },
- .la = {
- .reg = 0x368,
- .shift = 16,
- .mask = 0xff,
- .def = 0xb2,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x368,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xb2,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x2f,
.name = "viwy",
.swgroup = TEGRA_SWGROUP_VI,
- .smmu = {
- .reg = 0x22c,
- .bit = 15,
- },
- .la = {
- .reg = 0x36c,
- .shift = 0,
- .mask = 0xff,
- .def = 0x12,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x36c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x12,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x30,
.name = "g2dw",
.swgroup = TEGRA_SWGROUP_G2,
- .smmu = {
- .reg = 0x22c,
- .bit = 16,
- },
- .la = {
- .reg = 0x30c,
- .shift = 16,
- .mask = 0xff,
- .def = 0x9,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x30c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x9,
+ },
},
.fifo_size = 16 * 128,
}, {
.id = 0x31,
.name = "afiw",
.swgroup = TEGRA_SWGROUP_AFI,
- .smmu = {
- .reg = 0x22c,
- .bit = 17,
- },
- .la = {
- .reg = 0x2e0,
- .shift = 16,
- .mask = 0xff,
- .def = 0x0c,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
},
.fifo_size = 16 * 32,
}, {
.id = 0x32,
.name = "avpcarm7w",
.swgroup = TEGRA_SWGROUP_AVPC,
- .smmu = {
- .reg = 0x22c,
- .bit = 18,
- },
- .la = {
- .reg = 0x2e4,
- .shift = 16,
- .mask = 0xff,
- .def = 0x0e,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
},
.fifo_size = 16 * 2,
}, {
.id = 0x33,
.name = "fdcdwr",
.swgroup = TEGRA_SWGROUP_NV,
- .smmu = {
- .reg = 0x22c,
- .bit = 19,
- },
- .la = {
- .reg = 0x338,
- .shift = 16,
- .mask = 0xff,
- .def = 0x0a,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x338,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
},
.fifo_size = 16 * 48,
}, {
.id = 0x34,
.name = "fdcdwr2",
.swgroup = TEGRA_SWGROUP_NV2,
- .smmu = {
- .reg = 0x22c,
- .bit = 20,
- },
- .la = {
- .reg = 0x340,
- .shift = 16,
- .mask = 0xff,
- .def = 0x0a,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x340,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
},
.fifo_size = 16 * 48,
}, {
.id = 0x35,
.name = "hdaw",
.swgroup = TEGRA_SWGROUP_HDA,
- .smmu = {
- .reg = 0x22c,
- .bit = 21,
- },
- .la = {
- .reg = 0x318,
- .shift = 16,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
.fifo_size = 16 * 16,
}, {
.id = 0x36,
.name = "host1xw",
.swgroup = TEGRA_SWGROUP_HC,
- .smmu = {
- .reg = 0x22c,
- .bit = 22,
- },
- .la = {
- .reg = 0x314,
- .shift = 0,
- .mask = 0xff,
- .def = 0x10,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
},
.fifo_size = 16 * 32,
}, {
.id = 0x37,
.name = "ispw",
.swgroup = TEGRA_SWGROUP_ISP,
- .smmu = {
- .reg = 0x22c,
- .bit = 23,
- },
- .la = {
- .reg = 0x31c,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x31c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
.fifo_size = 16 * 64,
}, {
.id = 0x38,
.name = "mpcorelpw",
.swgroup = TEGRA_SWGROUP_MPCORELP,
- .la = {
- .reg = 0x324,
- .shift = 16,
- .mask = 0xff,
- .def = 0x0e,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
},
.fifo_size = 16 * 24,
}, {
.id = 0x39,
.name = "mpcorew",
.swgroup = TEGRA_SWGROUP_MPCORE,
- .la = {
- .reg = 0x320,
- .shift = 16,
- .mask = 0xff,
- .def = 0x0e,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
},
.fifo_size = 16 * 24,
}, {
.id = 0x3a,
.name = "mpecswr",
.swgroup = TEGRA_SWGROUP_MPE,
- .smmu = {
- .reg = 0x22c,
- .bit = 26,
- },
- .la = {
- .reg = 0x330,
- .shift = 16,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x330,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
.fifo_size = 16 * 8,
}, {
.id = 0x3b,
.name = "ppcsahbdmaw",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x22c,
- .bit = 27,
- },
- .la = {
- .reg = 0x348,
- .shift = 0,
- .mask = 0xff,
- .def = 0x10,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
},
.fifo_size = 16 * 2,
}, {
.id = 0x3c,
.name = "ppcsahbslvw",
.swgroup = TEGRA_SWGROUP_PPCS,
- .smmu = {
- .reg = 0x22c,
- .bit = 28,
- },
- .la = {
- .reg = 0x348,
- .shift = 16,
- .mask = 0xff,
- .def = 0x06,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x06,
+ },
},
.fifo_size = 16 * 4,
}, {
.id = 0x3d,
.name = "sataw",
.swgroup = TEGRA_SWGROUP_SATA,
- .smmu = {
- .reg = 0x22c,
- .bit = 29,
- },
- .la = {
- .reg = 0x350,
- .shift = 16,
- .mask = 0xff,
- .def = 0x33,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x33,
+ },
},
.fifo_size = 16 * 32,
}, {
.id = 0x3e,
.name = "vdebsevw",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 30,
- },
- .la = {
- .reg = 0x35c,
- .shift = 0,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
.fifo_size = 16 * 4,
}, {
.id = 0x3f,
.name = "vdedbgw",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x22c,
- .bit = 31,
- },
- .la = {
- .reg = 0x35c,
- .shift = 16,
- .mask = 0xff,
- .def = 0xff,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
},
.fifo_size = 16 * 16,
}, {
.id = 0x40,
.name = "vdembew",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x230,
- .bit = 0,
- },
- .la = {
- .reg = 0x360,
- .shift = 0,
- .mask = 0xff,
- .def = 0x42,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x42,
+ },
},
.fifo_size = 16 * 2,
}, {
.id = 0x41,
.name = "vdetpmw",
.swgroup = TEGRA_SWGROUP_VDE,
- .smmu = {
- .reg = 0x230,
- .bit = 1,
- },
- .la = {
- .reg = 0x360,
- .shift = 16,
- .mask = 0xff,
- .def = 0x2a,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x2a,
+ },
},
.fifo_size = 16 * 16,
},
@@ -1089,7 +1221,6 @@ static void tegra30_mc_tune_client_latency(struct tegra_mc *mc,
unsigned int bandwidth_mbytes_sec)
{
u32 arb_tolerance_compensation_nsec, arb_tolerance_compensation_div;
- const struct tegra_mc_la *la = &client->la;
unsigned int fifo_size = client->fifo_size;
u32 arb_nsec, la_ticks, value;
@@ -1149,12 +1280,12 @@ static void tegra30_mc_tune_client_latency(struct tegra_mc *mc,
* request.
*/
la_ticks = arb_nsec / mc->tick;
- la_ticks = min(la_ticks, la->mask);
+ la_ticks = min(la_ticks, client->regs.la.mask);
- value = mc_readl(mc, la->reg);
- value &= ~(la->mask << la->shift);
- value |= la_ticks << la->shift;
- mc_writel(mc, value, la->reg);
+ value = mc_readl(mc, client->regs.la.reg);
+ value &= ~(client->regs.la.mask << client->regs.la.shift);
+ value |= la_ticks << client->regs.la.shift;
+ mc_writel(mc, value, client->regs.la.reg);
}
static int tegra30_mc_icc_set(struct icc_node *src, struct icc_node *dst)
@@ -1268,4 +1399,5 @@ const struct tegra_mc_soc tegra30_mc_soc = {
.resets = tegra30_mc_resets,
.num_resets = ARRAY_SIZE(tegra30_mc_resets),
.icc_ops = &tegra30_mc_icc_ops,
+ .ops = &tegra30_mc_ops,
};
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index f4f89cf23631..7f7abc9069f7 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -6993,8 +6993,6 @@ mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
ioc->ioc_reset_in_progress = 1;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
- rc = -1;
-
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx])
mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 0484e9c15c09..572333fadd68 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -331,8 +331,8 @@ mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port,
break;
data_sz = hdr.PageLength * 4;
- ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz,
- &page0_dma);
+ ppage0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
+ &page0_dma, GFP_KERNEL);
rc = -ENOMEM;
if (!ppage0_alloc)
break;
@@ -367,8 +367,8 @@ mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port,
*p_p0 = *ppage0_alloc; /* save data */
*p_pp0++ = p_p0++; /* save addr */
}
- pci_free_consistent(ioc->pcidev, data_sz,
- (u8 *) ppage0_alloc, page0_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz,
+ ppage0_alloc, page0_dma);
if (rc != 0)
break;
@@ -763,7 +763,8 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
- ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
+ ppage0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
+ &page0_dma, GFP_KERNEL);
if (ppage0_alloc) {
try_again:
@@ -817,7 +818,8 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
mptfc_display_port_link_speed(ioc, portnum, pp0dest);
}
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz, ppage0_alloc,
+ page0_dma);
}
return rc;
@@ -904,9 +906,8 @@ start_over:
if (data_sz < sizeof(FCPortPage1_t))
data_sz = sizeof(FCPortPage1_t);
- page1_alloc = pci_alloc_consistent(ioc->pcidev,
- data_sz,
- &page1_dma);
+ page1_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
+ &page1_dma, GFP_KERNEL);
if (!page1_alloc)
return -ENOMEM;
}
@@ -916,8 +917,8 @@ start_over:
data_sz = ioc->fc_data.fc_port_page1[portnum].pg_sz;
if (hdr.PageLength * 4 > data_sz) {
ioc->fc_data.fc_port_page1[portnum].data = NULL;
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *)
- page1_alloc, page1_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz,
+ page1_alloc, page1_dma);
goto start_over;
}
}
@@ -932,8 +933,8 @@ start_over:
}
else {
ioc->fc_data.fc_port_page1[portnum].data = NULL;
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *)
- page1_alloc, page1_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz, page1_alloc,
+ page1_dma);
}
return rc;
@@ -1514,10 +1515,10 @@ static void mptfc_remove(struct pci_dev *pdev)
for (ii=0; ii<ioc->facts.NumberOfPorts; ii++) {
if (ioc->fc_data.fc_port_page1[ii].data) {
- pci_free_consistent(ioc->pcidev,
- ioc->fc_data.fc_port_page1[ii].pg_sz,
- (u8 *) ioc->fc_data.fc_port_page1[ii].data,
- ioc->fc_data.fc_port_page1[ii].dma);
+ dma_free_coherent(&ioc->pcidev->dev,
+ ioc->fc_data.fc_port_page1[ii].pg_sz,
+ ioc->fc_data.fc_port_page1[ii].data,
+ ioc->fc_data.fc_port_page1[ii].dma);
ioc->fc_data.fc_port_page1[ii].data = NULL;
}
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index e0a65a348502..85285ba8e817 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -86,7 +86,7 @@ MODULE_PARM_DESC(mpt_pt_clear,
" Clear persistency table: enable=1 "
"(default=MPTSCSIH_PT_CLEAR=0)");
-/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
+/* scsi-mid layer global parameter is max_report_luns, which is 511 */
#define MPTSAS_MAX_LUN (16895)
static int max_lun = MPTSAS_MAX_LUN;
module_param(max_lun, int, 0);
@@ -420,12 +420,14 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
}
/**
- * mptsas_find_portinfo_by_sas_address -
+ * mptsas_find_portinfo_by_sas_address - find and return portinfo for
+ * this sas_address
* @ioc: Pointer to MPT_ADAPTER structure
- * @handle:
+ * @sas_address: expander sas address
*
- * This function should be called with the sas_topology_mutex already held
+ * This function should be called with the sas_topology_mutex already held.
*
+ * Return: %NULL if not found.
**/
static struct mptsas_portinfo *
mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
@@ -567,12 +569,14 @@ starget)
}
/**
- * mptsas_add_device_component -
+ * mptsas_add_device_component - adds a new device component to our lists
* @ioc: Pointer to MPT_ADAPTER structure
- * @channel: fw mapped id's
- * @id:
- * @sas_address:
- * @device_info:
+ * @channel: channel number
+ * @id: Logical Target ID for reset (if appropriate)
+ * @sas_address: expander sas address
+ * @device_info: specific bits (flags) for devices
+ * @slot: enclosure slot ID
+ * @enclosure_logical_id: enclosure WWN
*
**/
static void
@@ -634,10 +638,10 @@ mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
}
/**
- * mptsas_add_device_component_by_fw -
+ * mptsas_add_device_component_by_fw - adds a new device component by FW ID
* @ioc: Pointer to MPT_ADAPTER structure
- * @channel: fw mapped id's
- * @id:
+ * @channel: channel number
+ * @id: Logical Target ID
*
**/
static void
@@ -668,8 +672,7 @@ mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
/**
* mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
* @ioc: Pointer to MPT_ADAPTER structure
- * @channel: fw mapped id's
- * @id:
+ * @starget: SCSI target for this SCSI device
*
**/
static void
@@ -771,9 +774,9 @@ mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
}
/**
- * mptsas_add_device_component_starget -
+ * mptsas_add_device_component_starget - adds a SCSI target device component
* @ioc: Pointer to MPT_ADAPTER structure
- * @starget:
+ * @starget: SCSI target for this SCSI device
*
**/
static void
@@ -806,7 +809,7 @@ mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
* mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
* @ioc: Pointer to MPT_ADAPTER structure
* @channel: os mapped id's
- * @id:
+ * @id: Logical Target ID
*
**/
static void
@@ -978,11 +981,12 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
}
/**
- * csmisas_find_vtarget
+ * mptsas_find_vtarget - find a virtual target device (FC LUN device or
+ * SCSI target device)
*
- * @ioc
- * @volume_id
- * @volume_bus
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @channel: channel number
+ * @id: Logical Target ID
*
**/
static VirtTarget *
@@ -1047,15 +1051,14 @@ mptsas_queue_rescan(MPT_ADAPTER *ioc)
/**
- * mptsas_target_reset
- *
- * Issues TARGET_RESET to end device using handshaking method
+ * mptsas_target_reset - Issues TARGET_RESET to end device using
+ * handshaking method
*
- * @ioc
- * @channel
- * @id
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @channel: channel number
+ * @id: Logical Target ID for reset
*
- * Returns (1) success
+ * Return: (1) success
* (0) failure
*
**/
@@ -1119,15 +1122,15 @@ mptsas_block_io_starget(struct scsi_target *starget)
}
/**
- * mptsas_target_reset_queue
+ * mptsas_target_reset_queue - queue a target reset
*
- * Receive request for TARGET_RESET after receiving an firmware
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sas_event_data: SAS Device Status Change Event data
+ *
+ * Receive request for TARGET_RESET after receiving a firmware
* event NOT_RESPONDING_EVENT, then put command in link list
* and queue if task_queue already in use.
*
- * @ioc
- * @sas_event_data
- *
**/
static void
mptsas_target_reset_queue(MPT_ADAPTER *ioc,
@@ -1207,9 +1210,11 @@ mptsas_schedule_target_reset(void *iocp)
/**
* mptsas_taskmgmt_complete - complete SAS task management function
* @ioc: Pointer to MPT_ADAPTER structure
+ * @mf: MPT message frame
+ * @mr: SCSI Task Management Reply structure ptr (may be %NULL)
*
* Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
- * queue to finish off removing device from upper layers. then send next
+ * queue to finish off removing device from upper layers, then send next
* TARGET_RESET in the queue.
**/
static int
@@ -1300,10 +1305,10 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
}
/**
- * mptscsih_ioc_reset
+ * mptsas_ioc_reset - issue an IOC reset for this reset phase
*
- * @ioc
- * @reset_phase
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @reset_phase: id of phase of reset
*
**/
static int
@@ -1350,7 +1355,7 @@ mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
/**
- * enum device_state -
+ * enum device_state - TUR device state
* @DEVICE_RETRY: need to retry the TUR
* @DEVICE_ERROR: TUR return error, don't add device
* @DEVICE_READY: device can be added
@@ -1941,7 +1946,7 @@ mptsas_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
}
/**
- * mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout
+ * mptsas_eh_timed_out - resets the scsi_cmnd timeout
* if the device under question is currently in the
* device removal delay.
* @sc: scsi command that the midlayer is about to time out
@@ -2839,14 +2844,15 @@ struct rep_manu_reply{
};
/**
- * mptsas_exp_repmanufacture_info -
+ * mptsas_exp_repmanufacture_info - sets expander manufacturer info
* @ioc: per adapter object
* @sas_address: expander sas address
* @edev: the sas_expander_device object
*
- * Fills in the sas_expander_device object when SMP port is created.
+ * For an edge expander or a fanout expander:
+ * fills in the sas_expander_device object when SMP port is created.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
@@ -3284,7 +3290,7 @@ static int mptsas_probe_one_phy(struct device *dev,
rphy_to_expander_device(rphy));
}
- /* If the device exists,verify it wasn't previously flagged
+ /* If the device exists, verify it wasn't previously flagged
as a missing device. If so, clear it */
vtarget = mptsas_find_vtarget(ioc,
phy_info->attached.channel,
@@ -3611,8 +3617,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc,
/**
* mptsas_send_expander_event - expanders events
- * @ioc: Pointer to MPT_ADAPTER structure
- * @expander_data: event data
+ * @fw_event: event data
*
*
* This function handles adding, removing, and refreshing
@@ -3657,9 +3662,9 @@ mptsas_send_expander_event(struct fw_event_work *fw_event)
/**
- * mptsas_expander_add -
+ * mptsas_expander_add - adds a newly discovered expander
* @ioc: Pointer to MPT_ADAPTER structure
- * @handle:
+ * @handle: device handle
*
*/
static struct mptsas_portinfo *
@@ -4000,9 +4005,9 @@ mptsas_probe_devices(MPT_ADAPTER *ioc)
}
/**
- * mptsas_scan_sas_topology -
+ * mptsas_scan_sas_topology - scans new SAS topology
+ * (part of probe or rescan)
* @ioc: Pointer to MPT_ADAPTER structure
- * @sas_address:
*
**/
static void
@@ -4150,11 +4155,12 @@ mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
}
/**
- * mptsas_find_phyinfo_by_phys_disk_num -
+ * mptsas_find_phyinfo_by_phys_disk_num - find phyinfo for the
+ * specified @phys_disk_num
* @ioc: Pointer to MPT_ADAPTER structure
- * @phys_disk_num:
- * @channel:
- * @id:
+ * @phys_disk_num: (hot plug) physical disk number (for RAID support)
+ * @channel: channel number
+ * @id: Logical Target ID
*
**/
static struct mptsas_phyinfo *
@@ -4773,8 +4779,9 @@ mptsas_send_raid_event(struct fw_event_work *fw_event)
* @lun: Logical unit for reset (if appropriate)
* @task_context: Context for the task to be aborted
* @timeout: timeout for task management control
+ * @issue_reset: set to 1 on return if reset is needed, else 0
*
- * return 0 on success and -1 on failure:
+ * Return: 0 on success or -1 on failure.
*
*/
static int
@@ -4847,9 +4854,9 @@ mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
/**
* mptsas_broadcast_primitive_work - Handle broadcast primitives
- * @work: work queue payload containing info describing the event
+ * @fw_event: work queue payload containing info describing the event
*
- * this will be handled in workqueue context.
+ * This will be handled in workqueue context.
*/
static void
mptsas_broadcast_primitive_work(struct fw_event_work *fw_event)
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index c7f964996a91..eaf9845633b4 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -122,12 +122,7 @@ static const struct i2c_device_id pm80x_id_table[] = {
MODULE_DEVICE_TABLE(i2c, pm80x_id_table);
static const struct resource rtc_resources[] = {
- {
- .name = "88pm80x-rtc",
- .start = PM800_IRQ_RTC,
- .end = PM800_IRQ_RTC,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(PM800_IRQ_RTC, "88pm80x-rtc"),
};
static struct mfd_cell rtc_devs[] = {
@@ -140,12 +135,7 @@ static struct mfd_cell rtc_devs[] = {
};
static struct resource onkey_resources[] = {
- {
- .name = "88pm80x-onkey",
- .start = PM800_IRQ_ONKEY,
- .end = PM800_IRQ_ONKEY,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(PM800_IRQ_ONKEY, "88pm80x-onkey"),
};
static const struct mfd_cell onkey_devs[] = {
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index 39f2302e137b..ada6c513302b 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -54,27 +54,14 @@ enum {
};
static struct resource codec_resources[] = {
- {
- /* Headset microphone insertion or removal */
- .name = "micin",
- .start = PM805_IRQ_MIC_DET,
- .end = PM805_IRQ_MIC_DET,
- .flags = IORESOURCE_IRQ,
- },
- {
- /* Audio short HP1 */
- .name = "audio-short1",
- .start = PM805_IRQ_HP1_SHRT,
- .end = PM805_IRQ_HP1_SHRT,
- .flags = IORESOURCE_IRQ,
- },
- {
- /* Audio short HP2 */
- .name = "audio-short2",
- .start = PM805_IRQ_HP2_SHRT,
- .end = PM805_IRQ_HP2_SHRT,
- .flags = IORESOURCE_IRQ,
- },
+ /* Headset microphone insertion or removal */
+ DEFINE_RES_IRQ_NAMED(PM805_IRQ_MIC_DET, "micin"),
+
+ /* Audio short HP1 */
+ DEFINE_RES_IRQ_NAMED(PM805_IRQ_HP1_SHRT, "audio-short1"),
+
+ /* Audio short HP2 */
+ DEFINE_RES_IRQ_NAMED(PM805_IRQ_HP2_SHRT, "audio-short2"),
};
static const struct mfd_cell codec_devs[] = {
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 99c4e1a80ae0..6a3fd2d75f96 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -465,6 +465,7 @@ config MFD_MP2629
tristate "Monolithic Power Systems MP2629 ADC and Battery charger"
depends on I2C
select REGMAP_I2C
+ select MFD_CORE
help
Select this option to enable support for Monolithic Power Systems
battery charger. This provides ADC, thermal and battery charger power
@@ -902,6 +903,7 @@ config MFD_MT6360
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
+ select CRC8
depends on I2C
help
Say Y here to enable MT6360 PMU/PMIC/LDO functional support.
@@ -1076,6 +1078,16 @@ config MFD_RDC321X
southbridge which provides access to GPIOs and Watchdog using the
southbridge PCI device configuration space.
+config MFD_RT4831
+ tristate "Richtek RT4831 four channel WLED and Display Bias Voltage"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ This enables support for the Richtek RT4831 that includes 4 channel
+ WLED driving and Display Bias Voltage. It's commonly used to provide
+ power to the LCD display and LCD backlight.
+
config MFD_RT5033
tristate "Richtek RT5033 Power Management IC"
depends on I2C
@@ -1133,6 +1145,7 @@ config MFD_RN5T618
config MFD_SEC_CORE
tristate "Samsung Electronics PMIC Series Support"
depends on I2C=y
+ depends on OF || COMPILE_TEST
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -1770,7 +1783,7 @@ config MFD_ARIZONA
select REGMAP
select REGMAP_IRQ
select MFD_CORE
- bool
+ tristate
config MFD_ARIZONA_I2C
tristate "Cirrus Logic/Wolfson Microelectronics Arizona platform with I2C"
@@ -2087,6 +2100,20 @@ config MFD_ACER_A500_EC
The controller itself is ENE KB930, it is running firmware
customized for the specific needs of the Acer A500 hardware.
+config MFD_QCOM_PM8008
+ tristate "QCOM PM8008 Power Management IC"
+ depends on I2C && OF
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ Select this option to get support for the Qualcomm Technologies, Inc.
+ PM8008 PMIC chip. PM8008 is a dedicated camera PMIC that integrates
+ all the necessary power management, housekeeping, and interface
+ support functions into a single IC. This driver provides common
+ support for accessing the device by instantiating all the child nodes
+ under it in the device tree. Additional drivers must be enabled in
+ order to use the functionality of the device.
+
menu "Multimedia Capabilities Port drivers"
depends on ARCH_SA1100
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 8b322d89a0c5..8116c19d5fd4 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -41,24 +41,24 @@ obj-$(CONFIG_MFD_TQMX86) += tqmx86.o
obj-$(CONFIG_MFD_LOCHNAGAR) += lochnagar-i2c.o
-obj-$(CONFIG_MFD_ARIZONA) += arizona-core.o
-obj-$(CONFIG_MFD_ARIZONA) += arizona-irq.o
+arizona-objs := arizona-core.o arizona-irq.o
+obj-$(CONFIG_MFD_ARIZONA) += arizona.o
obj-$(CONFIG_MFD_ARIZONA_I2C) += arizona-i2c.o
obj-$(CONFIG_MFD_ARIZONA_SPI) += arizona-spi.o
ifeq ($(CONFIG_MFD_WM5102),y)
-obj-$(CONFIG_MFD_ARIZONA) += wm5102-tables.o
+arizona-objs += wm5102-tables.o
endif
ifeq ($(CONFIG_MFD_WM5110),y)
-obj-$(CONFIG_MFD_ARIZONA) += wm5110-tables.o
+arizona-objs += wm5110-tables.o
endif
ifeq ($(CONFIG_MFD_WM8997),y)
-obj-$(CONFIG_MFD_ARIZONA) += wm8997-tables.o
+arizona-objs += wm8997-tables.o
endif
ifeq ($(CONFIG_MFD_WM8998),y)
-obj-$(CONFIG_MFD_ARIZONA) += wm8998-tables.o
+arizona-objs += wm8998-tables.o
endif
ifeq ($(CONFIG_MFD_CS47L24),y)
-obj-$(CONFIG_MFD_ARIZONA) += cs47l24-tables.o
+arizona-objs += cs47l24-tables.o
endif
obj-$(CONFIG_MFD_WCD934X) += wcd934x.o
obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
@@ -233,6 +233,7 @@ obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o
obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o
obj-$(CONFIG_MFD_HI655X_PMIC) += hi655x-pmic.o
obj-$(CONFIG_MFD_DLN2) += dln2.o
+obj-$(CONFIG_MFD_RT4831) += rt4831.o
obj-$(CONFIG_MFD_RT5033) += rt5033.o
obj-$(CONFIG_MFD_SKY81452) += sky81452.o
@@ -263,6 +264,7 @@ obj-$(CONFIG_MFD_ROHM_BD957XMUF) += rohm-bd9576.o
obj-$(CONFIG_MFD_STMFX) += stmfx.o
obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o
obj-$(CONFIG_MFD_ACER_A500_EC) += acer-ec-a500.o
+obj-$(CONFIG_MFD_QCOM_PM8008) += qcom-pm8008.o
obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o
obj-$(CONFIG_MFD_SIMPLE_MFD_I2C) += simple-mfd-i2c.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index c2ba498ad302..30489670ea52 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -827,8 +827,8 @@ static const struct mfd_cell ab8540_cut2_devs[] = {
},
};
-static ssize_t show_chip_id(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t chip_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ab8500 *ab8500;
@@ -848,8 +848,8 @@ static ssize_t show_chip_id(struct device *dev,
* 0x40 Power on key 1 pressed longer than 10 seconds
* 0x80 DB8500 thermal shutdown
*/
-static ssize_t show_switch_off_status(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t switch_off_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int ret;
u8 value;
@@ -883,8 +883,8 @@ void ab8500_override_turn_on_stat(u8 mask, u8 set)
* 0x40 UsbIDDetect
* 0x80 Reserved
*/
-static ssize_t show_turn_on_status(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t turn_on_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int ret;
u8 value;
@@ -912,8 +912,8 @@ static ssize_t show_turn_on_status(struct device *dev,
return sprintf(buf, "%#x\n", value);
}
-static ssize_t show_turn_on_status_2(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t turn_on_status_2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int ret;
u8 value;
@@ -927,8 +927,8 @@ static ssize_t show_turn_on_status_2(struct device *dev,
return sprintf(buf, "%#x\n", (value & 0x1));
}
-static ssize_t show_ab9540_dbbrstn(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t dbbrstn_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ab8500 *ab8500;
int ret;
@@ -945,7 +945,7 @@ static ssize_t show_ab9540_dbbrstn(struct device *dev,
(value & AB9540_MODEM_CTRL2_SWDBBRSTN_BIT) ? 1 : 0);
}
-static ssize_t store_ab9540_dbbrstn(struct device *dev,
+static ssize_t dbbrstn_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ab8500 *ab8500;
@@ -980,12 +980,11 @@ exit:
return ret;
}
-static DEVICE_ATTR(chip_id, S_IRUGO, show_chip_id, NULL);
-static DEVICE_ATTR(switch_off_status, S_IRUGO, show_switch_off_status, NULL);
-static DEVICE_ATTR(turn_on_status, S_IRUGO, show_turn_on_status, NULL);
-static DEVICE_ATTR(turn_on_status_2, S_IRUGO, show_turn_on_status_2, NULL);
-static DEVICE_ATTR(dbbrstn, S_IRUGO | S_IWUSR,
- show_ab9540_dbbrstn, store_ab9540_dbbrstn);
+static DEVICE_ATTR_RO(chip_id);
+static DEVICE_ATTR_RO(switch_off_status);
+static DEVICE_ATTR_RO(turn_on_status);
+static DEVICE_ATTR_RO(turn_on_status_2);
+static DEVICE_ATTR_RW(dbbrstn);
static struct attribute *ab8500_sysfs_entries[] = {
&dev_attr_chip_id.attr,
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index ce6fe6de34f8..9323b1e3a69e 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -1447,3 +1447,5 @@ int arizona_dev_exit(struct arizona *arizona)
return 0;
}
EXPORT_SYMBOL_GPL(arizona_dev_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index 59dfeff71592..38665efae4f0 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -24,21 +24,11 @@
#define AS3722_DEVICE_ID 0x0C
static const struct resource as3722_rtc_resource[] = {
- {
- .name = "as3722-rtc-alarm",
- .start = AS3722_IRQ_RTC_ALARM,
- .end = AS3722_IRQ_RTC_ALARM,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(AS3722_IRQ_RTC_ALARM, "as3722-rtc-alarm"),
};
static const struct resource as3722_adc_resource[] = {
- {
- .name = "as3722-adc",
- .start = AS3722_IRQ_ADC,
- .end = AS3722_IRQ_ADC,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(AS3722_IRQ_ADC, "as3722-adc"),
};
static const struct mfd_cell as3722_devs[] = {
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index a6bd2134cea2..8d58c8df46cf 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -723,16 +723,8 @@ static struct tmio_mmc_data asic3_mmc_data = {
};
static struct resource asic3_mmc_resources[] = {
- {
- .start = ASIC3_SD_CTRL_BASE,
- .end = ASIC3_SD_CTRL_BASE + 0x3ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_MEM(ASIC3_SD_CTRL_BASE, 0x400),
+ DEFINE_RES_IRQ(0)
};
static int asic3_mmc_enable(struct platform_device *pdev)
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 3eae04e24ac8..4145a38b3890 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -884,8 +884,13 @@ int axp20x_match_device(struct axp20x_dev *axp20x)
axp20x->regmap_irq_chip = &axp803_regmap_irq_chip;
break;
case AXP806_ID:
+ /*
+ * Don't register the power key part if in slave mode or
+ * if there is no interrupt line.
+ */
if (of_property_read_bool(axp20x->dev->of_node,
- "x-powers,self-working-mode")) {
+ "x-powers,self-working-mode") &&
+ axp20x->irq > 0) {
axp20x->nr_cells = ARRAY_SIZE(axp806_self_working_cells);
axp20x->cells = axp806_self_working_cells;
} else {
@@ -959,12 +964,17 @@ int axp20x_device_probe(struct axp20x_dev *axp20x)
AXP806_REG_ADDR_EXT_ADDR_SLAVE_MODE);
}
- ret = regmap_add_irq_chip(axp20x->regmap, axp20x->irq,
- IRQF_ONESHOT | IRQF_SHARED | axp20x->irq_flags,
- -1, axp20x->regmap_irq_chip, &axp20x->regmap_irqc);
- if (ret) {
- dev_err(axp20x->dev, "failed to add irq chip: %d\n", ret);
- return ret;
+ /* Only if there is an interrupt line connected towards the CPU. */
+ if (axp20x->irq > 0) {
+ ret = regmap_add_irq_chip(axp20x->regmap, axp20x->irq,
+ IRQF_ONESHOT | IRQF_SHARED | axp20x->irq_flags,
+ -1, axp20x->regmap_irq_chip,
+ &axp20x->regmap_irqc);
+ if (ret) {
+ dev_err(axp20x->dev, "failed to add irq chip: %d\n",
+ ret);
+ return ret;
+ }
}
ret = mfd_add_devices(axp20x->dev, -1, axp20x->cells,
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index d07b43d7c761..8c08d1c55726 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -5,6 +5,7 @@
* Copyright (C) 2014 Google, Inc.
*/
+#include <linux/dmi.h>
#include <linux/kconfig.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
@@ -112,8 +113,12 @@ static const struct cros_feature_to_cells cros_subdevices[] = {
static const struct mfd_cell cros_ec_platform_cells[] = {
{ .name = "cros-ec-chardev", },
{ .name = "cros-ec-debugfs", },
- { .name = "cros-ec-lightbar", },
{ .name = "cros-ec-sysfs", },
+ { .name = "cros-ec-pchg", },
+};
+
+static const struct mfd_cell cros_ec_lightbar_cells[] = {
+ { .name = "cros-ec-lightbar", }
};
static const struct mfd_cell cros_ec_vbc_cells[] = {
@@ -207,6 +212,20 @@ static int ec_device_probe(struct platform_device *pdev)
}
/*
+ * Lightbar is a special case. Newer devices support autodetection,
+ * but older ones do not.
+ */
+ if (cros_ec_check_features(ec, EC_FEATURE_LIGHTBAR) ||
+ dmi_match(DMI_PRODUCT_NAME, "Link")) {
+ retval = mfd_add_hotplug_devices(ec->dev,
+ cros_ec_lightbar_cells,
+ ARRAY_SIZE(cros_ec_lightbar_cells));
+ if (retval)
+ dev_warn(ec->dev, "failed to add lightbar: %d\n",
+ retval);
+ }
+
+ /*
* The PD notifier driver cell is separate since it only needs to be
* explicitly added on platforms that don't have the PD notifier ACPI
* device entry defined.
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 47556d2d9abe..8de93db35f3a 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -113,6 +113,7 @@ static const struct i2c_device_id da9052_i2c_id[] = {
{"da9053-bc", DA9053_BC},
{}
};
+MODULE_DEVICE_TABLE(i2c, da9052_i2c_id);
#ifdef CONFIG_OF
static const struct of_device_id dialog_dt_ids[] = {
@@ -154,13 +155,8 @@ static int da9052_i2c_probe(struct i2c_client *client,
return ret;
#ifdef CONFIG_OF
- if (!id) {
- struct device_node *np = client->dev.of_node;
- const struct of_device_id *deviceid;
-
- deviceid = of_match_node(dialog_dt_ids, np);
- id = deviceid->data;
- }
+ if (!id)
+ id = of_device_get_match_data(&client->dev);
#endif
if (!id) {
diff --git a/drivers/mfd/da9055-core.c b/drivers/mfd/da9055-core.c
index d074d213e661..c3bcbd8905c6 100644
--- a/drivers/mfd/da9055-core.c
+++ b/drivers/mfd/da9055-core.c
@@ -254,41 +254,19 @@ const struct regmap_config da9055_regmap_config = {
};
EXPORT_SYMBOL_GPL(da9055_regmap_config);
-static const struct resource da9055_onkey_resource = {
- .name = "ONKEY",
- .start = DA9055_IRQ_NONKEY,
- .end = DA9055_IRQ_NONKEY,
- .flags = IORESOURCE_IRQ,
-};
+static const struct resource da9055_onkey_resource =
+ DEFINE_RES_IRQ_NAMED(DA9055_IRQ_NONKEY, "ONKEY");
static const struct resource da9055_rtc_resource[] = {
- {
- .name = "ALM",
- .start = DA9055_IRQ_ALARM,
- .end = DA9055_IRQ_ALARM,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "TICK",
- .start = DA9055_IRQ_TICK,
- .end = DA9055_IRQ_TICK,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(DA9055_IRQ_ALARM, "ALM"),
+ DEFINE_RES_IRQ_NAMED(DA9055_IRQ_TICK, "TICK"),
};
-static const struct resource da9055_hwmon_resource = {
- .name = "HWMON",
- .start = DA9055_IRQ_HWMON,
- .end = DA9055_IRQ_HWMON,
- .flags = IORESOURCE_IRQ,
-};
+static const struct resource da9055_hwmon_resource =
+ DEFINE_RES_IRQ_NAMED(DA9055_IRQ_HWMON, "HWMON");
-static const struct resource da9055_ld05_6_resource = {
- .name = "REGULATOR",
- .start = DA9055_IRQ_REGULATOR,
- .end = DA9055_IRQ_REGULATOR,
- .flags = IORESOURCE_IRQ,
-};
+static const struct resource da9055_ld05_6_resource =
+ DEFINE_RES_IRQ_NAMED(DA9055_IRQ_REGULATOR, "REGULATOR");
static const struct mfd_cell da9055_devs[] = {
{
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 8d913375152d..01f8e10dfa55 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/irq.h>
#include <linux/mfd/core.h>
@@ -622,7 +623,6 @@ static int da9062_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct da9062 *chip;
- const struct of_device_id *match;
unsigned int irq_base;
const struct mfd_cell *cell;
const struct regmap_irq_chip *irq_chip;
@@ -635,15 +635,10 @@ static int da9062_i2c_probe(struct i2c_client *i2c,
if (!chip)
return -ENOMEM;
- if (i2c->dev.of_node) {
- match = of_match_node(da9062_dt_ids, i2c->dev.of_node);
- if (!match)
- return -EINVAL;
-
- chip->chip_type = (uintptr_t)match->data;
- } else {
+ if (i2c->dev.of_node)
+ chip->chip_type = (uintptr_t)of_device_get_match_data(&i2c->dev);
+ else
chip->chip_type = id->driver_data;
- }
i2c_set_clientdata(i2c, chip);
chip->dev = &i2c->dev;
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 783a14af18e2..4b7f707b7952 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -448,7 +448,7 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
DA9063_TWOWIRE_TO);
if (ret < 0) {
dev_err(da9063->dev, "Failed to set Two-Wire Bus Mode.\n");
- return -EIO;
+ return ret;
}
}
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 167faac9b75b..3bde7fda755f 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -616,7 +616,7 @@ enum romcode_read prcmu_get_rc_p2a(void)
}
/**
- * prcmu_get_current_mode - Return the current XP70 power mode
+ * prcmu_get_xp70_current_state - Return the current XP70 power mode
* Returns: Returns the current AP(ARM) power mode: init,
* apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset
*/
@@ -898,7 +898,7 @@ unlock_and_return:
}
/**
- * db8500_set_ape_opp - set the appropriate APE OPP
+ * db8500_prcmu_set_ape_opp - set the appropriate APE OPP
* @opp: The new APE operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
@@ -2297,7 +2297,7 @@ u16 db8500_prcmu_get_reset_code(void)
}
/**
- * db8500_prcmu_reset_modem - ask the PRCMU to reset modem
+ * db8500_prcmu_modem_reset - ask the PRCMU to reset modem
*/
void db8500_prcmu_modem_reset(void)
{
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index d3c86a7a3805..6909d075d017 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -2,7 +2,7 @@
/*
* Device driver for MFD hi655x PMIC
*
- * Copyright (c) 2016 Hisilicon.
+ * Copyright (c) 2016 HiSilicon Ltd.
*
* Authors:
* Chen Feng <puck.chen@hisilicon.com>
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 79c53617489c..c54d19fb184c 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -310,6 +310,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x51ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51eb), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51fb), (kernel_ulong_t)&bxt_info },
+ /* ADL-M */
+ { PCI_VDEVICE(INTEL, 0x54a8), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x54a9), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x54aa), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x54ab), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x54c5), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x54c6), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x54c7), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x54e8), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x54e9), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x54ea), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x54eb), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x54fb), (kernel_ulong_t)&bxt_info },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&apl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&apl_i2c_info },
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index 47d0d3a69a58..bc069c4daa60 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -330,14 +330,14 @@ static int regmap_ipc_byte_reg_write(void *context, unsigned int reg,
/* sysfs interfaces to r/w PMIC registers, required by initial script */
static unsigned long bxtwc_reg_addr;
-static ssize_t bxtwc_reg_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t addr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "0x%lx\n", bxtwc_reg_addr);
}
-static ssize_t bxtwc_reg_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t addr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
{
if (kstrtoul(buf, 0, &bxtwc_reg_addr)) {
dev_err(dev, "Invalid register address\n");
@@ -346,8 +346,8 @@ static ssize_t bxtwc_reg_store(struct device *dev,
return (ssize_t)count;
}
-static ssize_t bxtwc_val_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t val_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int ret;
unsigned int val;
@@ -362,8 +362,8 @@ static ssize_t bxtwc_val_show(struct device *dev,
return sprintf(buf, "0x%02x\n", val);
}
-static ssize_t bxtwc_val_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t val_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
{
int ret;
unsigned int val;
@@ -382,8 +382,8 @@ static ssize_t bxtwc_val_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(addr, S_IWUSR | S_IRUSR, bxtwc_reg_show, bxtwc_reg_store);
-static DEVICE_ATTR(val, S_IWUSR | S_IRUSR, bxtwc_val_show, bxtwc_val_store);
+static DEVICE_ATTR_ADMIN_RW(addr);
+static DEVICE_ATTR_ADMIN_RW(val);
static struct attribute *bxtwc_attrs[] = {
&dev_attr_addr.attr,
&dev_attr_val.attr,
diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c
index d1fc38a78acb..9805cf191245 100644
--- a/drivers/mfd/iqs62x.c
+++ b/drivers/mfd/iqs62x.c
@@ -998,7 +998,7 @@ static int iqs62x_probe(struct i2c_client *client)
device_property_read_string(&client->dev, "firmware-name", &fw_name);
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
fw_name ? : iqs62x->dev_desc->fw_name,
&client->dev, GFP_KERNEL, iqs62x,
iqs62x_firmware_load);
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index 3df4e9a2998f..70eba4ce496f 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -149,15 +149,15 @@ static int cmodio_probe_submodules(struct cmodio_device *priv)
* SYSFS Attributes
*/
-static ssize_t mbus_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t modulbus_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct cmodio_device *priv = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%x\n", priv->hex);
}
-static DEVICE_ATTR(modulbus_number, S_IRUGO, mbus_show, NULL);
+static DEVICE_ATTR_RO(modulbus_number);
static struct attribute *cmodio_sysfs_attrs[] = {
&dev_attr_modulbus_number.attr,
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 9166075c1f32..bb26241c73bd 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -344,16 +344,16 @@ static const char *kempld_get_type_string(struct kempld_device_data *pld)
return version_type;
}
-static ssize_t kempld_version_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pld_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct kempld_device_data *pld = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", pld->info.version);
}
-static ssize_t kempld_specification_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pld_specification_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct kempld_device_data *pld = dev_get_drvdata(dev);
@@ -361,18 +361,17 @@ static ssize_t kempld_specification_show(struct device *dev,
pld->info.spec_minor);
}
-static ssize_t kempld_type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pld_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct kempld_device_data *pld = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", kempld_get_type_string(pld));
}
-static DEVICE_ATTR(pld_version, S_IRUGO, kempld_version_show, NULL);
-static DEVICE_ATTR(pld_specification, S_IRUGO, kempld_specification_show,
- NULL);
-static DEVICE_ATTR(pld_type, S_IRUGO, kempld_type_show, NULL);
+static DEVICE_ATTR_RO(pld_version);
+static DEVICE_ATTR_RO(pld_specification);
+static DEVICE_ATTR_RO(pld_type);
static struct attribute *pld_attributes[] = {
&dev_attr_pld_version.attr,
diff --git a/drivers/mfd/lp87565.c b/drivers/mfd/lp87565.c
index 9c21483d9653..a52ab76febb3 100644
--- a/drivers/mfd/lp87565.c
+++ b/drivers/mfd/lp87565.c
@@ -5,6 +5,7 @@
* Author: Keerthy <j-keerthy@ti.com>
*/
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
@@ -64,6 +65,24 @@ static int lp87565_probe(struct i2c_client *client,
return ret;
}
+ lp87565->reset_gpio = devm_gpiod_get_optional(lp87565->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(lp87565->reset_gpio)) {
+ ret = PTR_ERR(lp87565->reset_gpio);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ }
+
+ if (lp87565->reset_gpio) {
+ gpiod_set_value_cansleep(lp87565->reset_gpio, 1);
+ /* The minimum assertion time is undocumented, just guess */
+ usleep_range(2000, 4000);
+
+ gpiod_set_value_cansleep(lp87565->reset_gpio, 0);
+ /* Min 1.2 ms before first I2C transaction */
+ usleep_range(1500, 3000);
+ }
+
ret = regmap_read(lp87565->regmap, LP87565_REG_OTP_REV, &otpid);
if (ret) {
dev_err(lp87565->dev, "Failed to read OTP ID\n");
@@ -83,6 +102,13 @@ static int lp87565_probe(struct i2c_client *client,
NULL, 0, NULL);
}
+static void lp87565_shutdown(struct i2c_client *client)
+{
+ struct lp87565 *lp87565 = i2c_get_clientdata(client);
+
+ gpiod_set_value_cansleep(lp87565->reset_gpio, 1);
+}
+
static const struct i2c_device_id lp87565_id_table[] = {
{ "lp87565-q1", 0 },
{ },
@@ -95,6 +121,7 @@ static struct i2c_driver lp87565_driver = {
.of_match_table = of_lp87565_match_table,
},
.probe = lp87565_probe,
+ .shutdown = lp87565_shutdown,
.id_table = lp87565_id_table,
};
module_i2c_driver(lp87565_driver);
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index d44baafd9d14..41f566e6a096 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -228,11 +228,9 @@ static int max8907_i2c_probe(struct i2c_client *i2c,
goto err_regmap_rtc;
}
- irq_set_status_flags(max8907->i2c_gen->irq, IRQ_NOAUTOEN);
-
ret = regmap_add_irq_chip(max8907->regmap_gen, max8907->i2c_gen->irq,
- IRQF_ONESHOT | IRQF_SHARED, -1,
- &max8907_chg_irq_chip,
+ IRQF_ONESHOT | IRQF_SHARED,
+ -1, &max8907_chg_irq_chip,
&max8907->irqc_chg);
if (ret != 0) {
dev_err(&i2c->dev, "failed to add chg irq chip: %d\n", ret);
@@ -255,8 +253,6 @@ static int max8907_i2c_probe(struct i2c_client *i2c,
goto err_irqc_rtc;
}
- enable_irq(max8907->i2c_gen->irq);
-
ret = mfd_add_devices(max8907->dev, -1, max8907_cells,
ARRAY_SIZE(max8907_cells), NULL, 0, NULL);
if (ret != 0) {
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 68d8f2b95287..2141de78115d 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
@@ -145,11 +146,9 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
static inline unsigned long max8997_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node);
- return (unsigned long)match->data;
- }
+ if (i2c->dev.of_node)
+ return (unsigned long)of_device_get_match_data(&i2c->dev);
+
return id->driver_data;
}
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 785f8e9841b7..0eb15e611b67 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -12,6 +12,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
@@ -155,11 +156,8 @@ static struct max8998_platform_data *max8998_i2c_parse_dt_pdata(
static inline unsigned long max8998_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_node(max8998_dt_match, i2c->dev.of_node);
- return (unsigned long)match->data;
- }
+ if (i2c->dev.of_node)
+ return (unsigned long)of_device_get_match_data(&i2c->dev);
return id->driver_data;
}
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 6f02b8022c6d..79f5c6a18815 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -266,18 +266,18 @@ static int mfd_add_device(struct device *parent, int id,
if (has_acpi_companion(&pdev->dev)) {
ret = acpi_check_resource_conflict(&res[r]);
if (ret)
- goto fail_of_entry;
+ goto fail_res_conflict;
}
}
}
ret = platform_device_add_resources(pdev, res, cell->num_resources);
if (ret)
- goto fail_of_entry;
+ goto fail_res_conflict;
ret = platform_device_add(pdev);
if (ret)
- goto fail_of_entry;
+ goto fail_res_conflict;
if (cell->pm_runtime_no_callbacks)
pm_runtime_no_callbacks(&pdev->dev);
@@ -286,13 +286,15 @@ static int mfd_add_device(struct device *parent, int id,
return 0;
+fail_res_conflict:
+ if (cell->swnode)
+ device_remove_software_node(&pdev->dev);
fail_of_entry:
list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list)
if (of_entry->dev == &pdev->dev) {
list_del(&of_entry->list);
kfree(of_entry);
}
- device_remove_software_node(&pdev->dev);
fail_alias:
regulator_bulk_unregister_supply_alias(&pdev->dev,
cell->parent_supplies,
@@ -358,11 +360,12 @@ static int mfd_remove_devices_fn(struct device *dev, void *data)
if (level && cell->level > *level)
return 0;
+ if (cell->swnode)
+ device_remove_software_node(&pdev->dev);
+
regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies,
cell->num_parent_supplies);
- device_remove_software_node(&pdev->dev);
-
platform_device_unregister(pdev);
return 0;
}
diff --git a/drivers/mfd/motorola-cpcap.c b/drivers/mfd/motorola-cpcap.c
index 30d82bfe5b02..6fb206da2729 100644
--- a/drivers/mfd/motorola-cpcap.c
+++ b/drivers/mfd/motorola-cpcap.c
@@ -327,6 +327,10 @@ static int cpcap_probe(struct spi_device *spi)
if (ret)
return ret;
+ /* Parent SPI controller uses DMA, CPCAP and child devices do not */
+ spi->dev.coherent_dma_mask = 0;
+ spi->dev.dma_mask = &spi->dev.coherent_dma_mask;
+
return devm_mfd_add_devices(&spi->dev, 0, cpcap_mfd_devices,
ARRAY_SIZE(cpcap_mfd_devices), NULL, 0, NULL);
}
diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c
index 480722acf706..e628953548ce 100644
--- a/drivers/mfd/mt6360-core.c
+++ b/drivers/mfd/mt6360-core.c
@@ -5,121 +5,180 @@
* Author: Gene Chen <gene_chen@richtek.com>
*/
+#include <linux/crc8.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+enum {
+ MT6360_SLAVE_TCPC = 0,
+ MT6360_SLAVE_PMIC,
+ MT6360_SLAVE_LDO,
+ MT6360_SLAVE_PMU,
+ MT6360_SLAVE_MAX,
+};
+
+struct mt6360_ddata {
+ struct i2c_client *i2c[MT6360_SLAVE_MAX];
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ unsigned int chip_rev;
+ u8 crc8_tbl[CRC8_TABLE_SIZE];
+};
-#include <linux/mfd/mt6360.h>
+#define MT6360_TCPC_SLAVEID 0x4E
+#define MT6360_PMIC_SLAVEID 0x1A
+#define MT6360_LDO_SLAVEID 0x64
+#define MT6360_PMU_SLAVEID 0x34
+
+#define MT6360_REG_TCPCSTART 0x00
+#define MT6360_REG_TCPCEND 0xFF
+#define MT6360_REG_PMICSTART 0x100
+#define MT6360_REG_PMICEND 0x13B
+#define MT6360_REG_LDOSTART 0x200
+#define MT6360_REG_LDOEND 0x21C
+#define MT6360_REG_PMUSTART 0x300
+#define MT6360_PMU_DEV_INFO 0x300
+#define MT6360_PMU_CHG_IRQ1 0x3D0
+#define MT6360_PMU_CHG_MASK1 0x3F0
+#define MT6360_REG_PMUEND 0x3FF
+
+#define MT6360_PMU_IRQ_REGNUM 16
+
+#define CHIP_VEN_MASK 0xF0
+#define CHIP_VEN_MT6360 0x50
+#define CHIP_REV_MASK 0x0F
+
+#define MT6360_ADDRESS_MASK 0x3F
+#define MT6360_DATA_SIZE_1_BYTE 0x00
+#define MT6360_DATA_SIZE_2_BYTES 0x40
+#define MT6360_DATA_SIZE_3_BYTES 0x80
+#define MT6360_DATA_SIZE_4_BYTES 0xC0
+
+#define MT6360_CRC8_POLYNOMIAL 0x7
+
+#define MT6360_CRC_I2C_ADDR_SIZE 1
+#define MT6360_CRC_REG_ADDR_SIZE 1
+/* prealloca read size = i2c device addr + i2c reg addr + val ... + crc8 */
+#define MT6360_ALLOC_READ_SIZE(_size) (_size + 3)
+/* prealloca write size = i2c device addr + i2c reg addr + val ... + crc8 + dummy byte */
+#define MT6360_ALLOC_WRITE_SIZE(_size) (_size + 4)
+#define MT6360_CRC_PREDATA_OFFSET (MT6360_CRC_I2C_ADDR_SIZE + MT6360_CRC_REG_ADDR_SIZE)
+#define MT6360_CRC_CRC8_SIZE 1
+#define MT6360_CRC_DUMMY_BYTE_SIZE 1
+#define MT6360_REGMAP_REG_BYTE_SIZE 2
+#define I2C_ADDR_XLATE_8BIT(_addr, _rw) (((_addr & 0x7F) << 1) + _rw)
/* reg 0 -> 0 ~ 7 */
-#define MT6360_CHG_TREG_EVT (4)
-#define MT6360_CHG_AICR_EVT (5)
-#define MT6360_CHG_MIVR_EVT (6)
-#define MT6360_PWR_RDY_EVT (7)
+#define MT6360_CHG_TREG_EVT 4
+#define MT6360_CHG_AICR_EVT 5
+#define MT6360_CHG_MIVR_EVT 6
+#define MT6360_PWR_RDY_EVT 7
/* REG 1 -> 8 ~ 15 */
-#define MT6360_CHG_BATSYSUV_EVT (9)
-#define MT6360_FLED_CHG_VINOVP_EVT (11)
-#define MT6360_CHG_VSYSUV_EVT (12)
-#define MT6360_CHG_VSYSOV_EVT (13)
-#define MT6360_CHG_VBATOV_EVT (14)
-#define MT6360_CHG_VBUSOV_EVT (15)
+#define MT6360_CHG_BATSYSUV_EVT 9
+#define MT6360_FLED_CHG_VINOVP_EVT 11
+#define MT6360_CHG_VSYSUV_EVT 12
+#define MT6360_CHG_VSYSOV_EVT 13
+#define MT6360_CHG_VBATOV_EVT 14
+#define MT6360_CHG_VBUSOV_EVT 15
/* REG 2 -> 16 ~ 23 */
/* REG 3 -> 24 ~ 31 */
-#define MT6360_WD_PMU_DET (25)
-#define MT6360_WD_PMU_DONE (26)
-#define MT6360_CHG_TMRI (27)
-#define MT6360_CHG_ADPBADI (29)
-#define MT6360_CHG_RVPI (30)
-#define MT6360_OTPI (31)
+#define MT6360_WD_PMU_DET 25
+#define MT6360_WD_PMU_DONE 26
+#define MT6360_CHG_TMRI 27
+#define MT6360_CHG_ADPBADI 29
+#define MT6360_CHG_RVPI 30
+#define MT6360_OTPI 31
/* REG 4 -> 32 ~ 39 */
-#define MT6360_CHG_AICCMEASL (32)
-#define MT6360_CHGDET_DONEI (34)
-#define MT6360_WDTMRI (35)
-#define MT6360_SSFINISHI (36)
-#define MT6360_CHG_RECHGI (37)
-#define MT6360_CHG_TERMI (38)
-#define MT6360_CHG_IEOCI (39)
+#define MT6360_CHG_AICCMEASL 32
+#define MT6360_CHGDET_DONEI 34
+#define MT6360_WDTMRI 35
+#define MT6360_SSFINISHI 36
+#define MT6360_CHG_RECHGI 37
+#define MT6360_CHG_TERMI 38
+#define MT6360_CHG_IEOCI 39
/* REG 5 -> 40 ~ 47 */
-#define MT6360_PUMPX_DONEI (40)
-#define MT6360_BAT_OVP_ADC_EVT (41)
-#define MT6360_TYPEC_OTP_EVT (42)
-#define MT6360_ADC_WAKEUP_EVT (43)
-#define MT6360_ADC_DONEI (44)
-#define MT6360_BST_BATUVI (45)
-#define MT6360_BST_VBUSOVI (46)
-#define MT6360_BST_OLPI (47)
+#define MT6360_PUMPX_DONEI 40
+#define MT6360_BAT_OVP_ADC_EVT 41
+#define MT6360_TYPEC_OTP_EVT 42
+#define MT6360_ADC_WAKEUP_EVT 43
+#define MT6360_ADC_DONEI 44
+#define MT6360_BST_BATUVI 45
+#define MT6360_BST_VBUSOVI 46
+#define MT6360_BST_OLPI 47
/* REG 6 -> 48 ~ 55 */
-#define MT6360_ATTACH_I (48)
-#define MT6360_DETACH_I (49)
-#define MT6360_QC30_STPDONE (51)
-#define MT6360_QC_VBUSDET_DONE (52)
-#define MT6360_HVDCP_DET (53)
-#define MT6360_CHGDETI (54)
-#define MT6360_DCDTI (55)
+#define MT6360_ATTACH_I 48
+#define MT6360_DETACH_I 49
+#define MT6360_QC30_STPDONE 51
+#define MT6360_QC_VBUSDET_DONE 52
+#define MT6360_HVDCP_DET 53
+#define MT6360_CHGDETI 54
+#define MT6360_DCDTI 55
/* REG 7 -> 56 ~ 63 */
-#define MT6360_FOD_DONE_EVT (56)
-#define MT6360_FOD_OV_EVT (57)
-#define MT6360_CHRDET_UVP_EVT (58)
-#define MT6360_CHRDET_OVP_EVT (59)
-#define MT6360_CHRDET_EXT_EVT (60)
-#define MT6360_FOD_LR_EVT (61)
-#define MT6360_FOD_HR_EVT (62)
-#define MT6360_FOD_DISCHG_FAIL_EVT (63)
+#define MT6360_FOD_DONE_EVT 56
+#define MT6360_FOD_OV_EVT 57
+#define MT6360_CHRDET_UVP_EVT 58
+#define MT6360_CHRDET_OVP_EVT 59
+#define MT6360_CHRDET_EXT_EVT 60
+#define MT6360_FOD_LR_EVT 61
+#define MT6360_FOD_HR_EVT 62
+#define MT6360_FOD_DISCHG_FAIL_EVT 63
/* REG 8 -> 64 ~ 71 */
-#define MT6360_USBID_EVT (64)
-#define MT6360_APWDTRST_EVT (65)
-#define MT6360_EN_EVT (66)
-#define MT6360_QONB_RST_EVT (67)
-#define MT6360_MRSTB_EVT (68)
-#define MT6360_OTP_EVT (69)
-#define MT6360_VDDAOV_EVT (70)
-#define MT6360_SYSUV_EVT (71)
+#define MT6360_USBID_EVT 64
+#define MT6360_APWDTRST_EVT 65
+#define MT6360_EN_EVT 66
+#define MT6360_QONB_RST_EVT 67
+#define MT6360_MRSTB_EVT 68
+#define MT6360_OTP_EVT 69
+#define MT6360_VDDAOV_EVT 70
+#define MT6360_SYSUV_EVT 71
/* REG 9 -> 72 ~ 79 */
-#define MT6360_FLED_STRBPIN_EVT (72)
-#define MT6360_FLED_TORPIN_EVT (73)
-#define MT6360_FLED_TX_EVT (74)
-#define MT6360_FLED_LVF_EVT (75)
-#define MT6360_FLED2_SHORT_EVT (78)
-#define MT6360_FLED1_SHORT_EVT (79)
+#define MT6360_FLED_STRBPIN_EVT 72
+#define MT6360_FLED_TORPIN_EVT 73
+#define MT6360_FLED_TX_EVT 74
+#define MT6360_FLED_LVF_EVT 75
+#define MT6360_FLED2_SHORT_EVT 78
+#define MT6360_FLED1_SHORT_EVT 79
/* REG 10 -> 80 ~ 87 */
-#define MT6360_FLED2_STRB_EVT (80)
-#define MT6360_FLED1_STRB_EVT (81)
-#define MT6360_FLED2_STRB_TO_EVT (82)
-#define MT6360_FLED1_STRB_TO_EVT (83)
-#define MT6360_FLED2_TOR_EVT (84)
-#define MT6360_FLED1_TOR_EVT (85)
+#define MT6360_FLED2_STRB_EVT 80
+#define MT6360_FLED1_STRB_EVT 81
+#define MT6360_FLED2_STRB_TO_EVT 82
+#define MT6360_FLED1_STRB_TO_EVT 83
+#define MT6360_FLED2_TOR_EVT 84
+#define MT6360_FLED1_TOR_EVT 85
/* REG 11 -> 88 ~ 95 */
/* REG 12 -> 96 ~ 103 */
-#define MT6360_BUCK1_PGB_EVT (96)
-#define MT6360_BUCK1_OC_EVT (100)
-#define MT6360_BUCK1_OV_EVT (101)
-#define MT6360_BUCK1_UV_EVT (102)
+#define MT6360_BUCK1_PGB_EVT 96
+#define MT6360_BUCK1_OC_EVT 100
+#define MT6360_BUCK1_OV_EVT 101
+#define MT6360_BUCK1_UV_EVT 102
/* REG 13 -> 104 ~ 111 */
-#define MT6360_BUCK2_PGB_EVT (104)
-#define MT6360_BUCK2_OC_EVT (108)
-#define MT6360_BUCK2_OV_EVT (109)
-#define MT6360_BUCK2_UV_EVT (110)
+#define MT6360_BUCK2_PGB_EVT 104
+#define MT6360_BUCK2_OC_EVT 108
+#define MT6360_BUCK2_OV_EVT 109
+#define MT6360_BUCK2_UV_EVT 110
/* REG 14 -> 112 ~ 119 */
-#define MT6360_LDO1_OC_EVT (113)
-#define MT6360_LDO2_OC_EVT (114)
-#define MT6360_LDO3_OC_EVT (115)
-#define MT6360_LDO5_OC_EVT (117)
-#define MT6360_LDO6_OC_EVT (118)
-#define MT6360_LDO7_OC_EVT (119)
+#define MT6360_LDO1_OC_EVT 113
+#define MT6360_LDO2_OC_EVT 114
+#define MT6360_LDO3_OC_EVT 115
+#define MT6360_LDO5_OC_EVT 117
+#define MT6360_LDO6_OC_EVT 118
+#define MT6360_LDO7_OC_EVT 119
/* REG 15 -> 120 ~ 127 */
-#define MT6360_LDO1_PGB_EVT (121)
-#define MT6360_LDO2_PGB_EVT (122)
-#define MT6360_LDO3_PGB_EVT (123)
-#define MT6360_LDO5_PGB_EVT (125)
-#define MT6360_LDO6_PGB_EVT (126)
-#define MT6360_LDO7_PGB_EVT (127)
-
-static const struct regmap_irq mt6360_pmu_irqs[] = {
+#define MT6360_LDO1_PGB_EVT 121
+#define MT6360_LDO2_PGB_EVT 122
+#define MT6360_LDO3_PGB_EVT 123
+#define MT6360_LDO5_PGB_EVT 125
+#define MT6360_LDO6_PGB_EVT 126
+#define MT6360_LDO7_PGB_EVT 127
+
+static const struct regmap_irq mt6360_irqs[] = {
REGMAP_IRQ_REG_LINE(MT6360_CHG_TREG_EVT, 8),
REGMAP_IRQ_REG_LINE(MT6360_CHG_AICR_EVT, 8),
REGMAP_IRQ_REG_LINE(MT6360_CHG_MIVR_EVT, 8),
@@ -208,30 +267,16 @@ static const struct regmap_irq mt6360_pmu_irqs[] = {
REGMAP_IRQ_REG_LINE(MT6360_LDO7_PGB_EVT, 8),
};
-static int mt6360_pmu_handle_post_irq(void *irq_drv_data)
-{
- struct mt6360_pmu_data *mpd = irq_drv_data;
-
- return regmap_update_bits(mpd->regmap,
- MT6360_PMU_IRQ_SET, MT6360_IRQ_RETRIG, MT6360_IRQ_RETRIG);
-}
-
-static struct regmap_irq_chip mt6360_pmu_irq_chip = {
- .irqs = mt6360_pmu_irqs,
- .num_irqs = ARRAY_SIZE(mt6360_pmu_irqs),
+static const struct regmap_irq_chip mt6360_irq_chip = {
+ .name = "mt6360_irqs",
+ .irqs = mt6360_irqs,
+ .num_irqs = ARRAY_SIZE(mt6360_irqs),
.num_regs = MT6360_PMU_IRQ_REGNUM,
.mask_base = MT6360_PMU_CHG_MASK1,
.status_base = MT6360_PMU_CHG_IRQ1,
.ack_base = MT6360_PMU_CHG_IRQ1,
.init_ack_masked = true,
.use_ack = true,
- .handle_post_irq = mt6360_pmu_handle_post_irq,
-};
-
-static const struct regmap_config mt6360_pmu_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = MT6360_PMU_MAXREG,
};
static const struct resource mt6360_adc_resources[] = {
@@ -265,7 +310,7 @@ static const struct resource mt6360_led_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6360_FLED1_STRB_TO_EVT, "fled1_strb_to_evt"),
};
-static const struct resource mt6360_pmic_resources[] = {
+static const struct resource mt6360_regulator_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6360_BUCK1_PGB_EVT, "buck1_pgb_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_BUCK1_OC_EVT, "buck1_oc_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_BUCK1_OV_EVT, "buck1_ov_evt"),
@@ -278,9 +323,6 @@ static const struct resource mt6360_pmic_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6360_LDO7_OC_EVT, "ldo7_oc_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_LDO6_PGB_EVT, "ldo6_pgb_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_LDO7_PGB_EVT, "ldo7_pgb_evt"),
-};
-
-static const struct resource mt6360_ldo_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6360_LDO1_OC_EVT, "ldo1_oc_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_LDO2_OC_EVT, "ldo2_oc_evt"),
DEFINE_RES_IRQ_NAMED(MT6360_LDO3_OC_EVT, "ldo3_oc_evt"),
@@ -292,84 +334,241 @@ static const struct resource mt6360_ldo_resources[] = {
};
static const struct mfd_cell mt6360_devs[] = {
- MFD_CELL_OF("mt6360_adc", mt6360_adc_resources,
- NULL, 0, 0, "mediatek,mt6360_adc"),
- MFD_CELL_OF("mt6360_chg", mt6360_chg_resources,
- NULL, 0, 0, "mediatek,mt6360_chg"),
- MFD_CELL_OF("mt6360_led", mt6360_led_resources,
- NULL, 0, 0, "mediatek,mt6360_led"),
- MFD_CELL_OF("mt6360_pmic", mt6360_pmic_resources,
- NULL, 0, 0, "mediatek,mt6360_pmic"),
- MFD_CELL_OF("mt6360_ldo", mt6360_ldo_resources,
- NULL, 0, 0, "mediatek,mt6360_ldo"),
- MFD_CELL_OF("mt6360_tcpc", NULL,
- NULL, 0, 0, "mediatek,mt6360_tcpc"),
+ MFD_CELL_OF("mt6360-adc", mt6360_adc_resources,
+ NULL, 0, 0, "mediatek,mt6360-adc"),
+ MFD_CELL_OF("mt6360-chg", mt6360_chg_resources,
+ NULL, 0, 0, "mediatek,mt6360-chg"),
+ MFD_CELL_OF("mt6360-led", mt6360_led_resources,
+ NULL, 0, 0, "mediatek,mt6360-led"),
+ MFD_CELL_RES("mt6360-regulator", mt6360_regulator_resources),
+ MFD_CELL_OF("mt6360-tcpc", NULL,
+ NULL, 0, 0, "mediatek,mt6360-tcpc"),
};
+static int mt6360_check_vendor_info(struct mt6360_ddata *ddata)
+{
+ u32 info;
+ int ret;
+
+ ret = regmap_read(ddata->regmap, MT6360_PMU_DEV_INFO, &info);
+ if (ret < 0)
+ return ret;
+
+ if ((info & CHIP_VEN_MASK) != CHIP_VEN_MT6360) {
+ dev_err(ddata->dev, "Device not supported\n");
+ return -ENODEV;
+ }
+
+ ddata->chip_rev = info & CHIP_REV_MASK;
+
+ return 0;
+}
+
static const unsigned short mt6360_slave_addr[MT6360_SLAVE_MAX] = {
- MT6360_PMU_SLAVEID,
+ MT6360_TCPC_SLAVEID,
MT6360_PMIC_SLAVEID,
MT6360_LDO_SLAVEID,
- MT6360_TCPC_SLAVEID,
+ MT6360_PMU_SLAVEID,
};
-static int mt6360_pmu_probe(struct i2c_client *client)
+static int mt6360_xlate_pmicldo_addr(u8 *addr, int rw_size)
{
- struct mt6360_pmu_data *mpd;
- unsigned int reg_data;
- int i, ret;
+ /* Address is already in encoded [5:0] */
+ *addr &= MT6360_ADDRESS_MASK;
+
+ switch (rw_size) {
+ case 1:
+ *addr |= MT6360_DATA_SIZE_1_BYTE;
+ break;
+ case 2:
+ *addr |= MT6360_DATA_SIZE_2_BYTES;
+ break;
+ case 3:
+ *addr |= MT6360_DATA_SIZE_3_BYTES;
+ break;
+ case 4:
+ *addr |= MT6360_DATA_SIZE_4_BYTES;
+ break;
+ default:
+ return -EINVAL;
+ }
- mpd = devm_kzalloc(&client->dev, sizeof(*mpd), GFP_KERNEL);
- if (!mpd)
+ return 0;
+}
+
+static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct mt6360_ddata *ddata = context;
+ u8 bank = *(u8 *)reg;
+ u8 reg_addr = *(u8 *)(reg + 1);
+ struct i2c_client *i2c = ddata->i2c[bank];
+ bool crc_needed = false;
+ u8 *buf;
+ int buf_len = MT6360_ALLOC_READ_SIZE(val_size);
+ int read_size = val_size;
+ u8 crc;
+ int ret;
+
+ if (bank == MT6360_SLAVE_PMIC || bank == MT6360_SLAVE_LDO) {
+ crc_needed = true;
+ ret = mt6360_xlate_pmicldo_addr(&reg_addr, val_size);
+ if (ret < 0)
+ return ret;
+ read_size += MT6360_CRC_CRC8_SIZE;
+ }
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
return -ENOMEM;
- mpd->dev = &client->dev;
- i2c_set_clientdata(client, mpd);
+ buf[0] = I2C_ADDR_XLATE_8BIT(i2c->addr, I2C_SMBUS_READ);
+ buf[1] = reg_addr;
- mpd->regmap = devm_regmap_init_i2c(client, &mt6360_pmu_regmap_config);
- if (IS_ERR(mpd->regmap)) {
- dev_err(&client->dev, "Failed to register regmap\n");
- return PTR_ERR(mpd->regmap);
+ ret = i2c_smbus_read_i2c_block_data(i2c, reg_addr, read_size,
+ buf + MT6360_CRC_PREDATA_OFFSET);
+ if (ret < 0)
+ goto out;
+ else if (ret != read_size) {
+ ret = -EIO;
+ goto out;
}
- ret = regmap_read(mpd->regmap, MT6360_PMU_DEV_INFO, &reg_data);
- if (ret) {
- dev_err(&client->dev, "Device not found\n");
- return ret;
+ if (crc_needed) {
+ crc = crc8(ddata->crc8_tbl, buf, val_size + MT6360_CRC_PREDATA_OFFSET, 0);
+ if (crc != buf[val_size + MT6360_CRC_PREDATA_OFFSET]) {
+ ret = -EIO;
+ goto out;
+ }
}
- mpd->chip_rev = reg_data & CHIP_REV_MASK;
- if (mpd->chip_rev != CHIP_VEN_MT6360) {
- dev_err(&client->dev, "Device not supported\n");
- return -ENODEV;
+ memcpy(val, buf + MT6360_CRC_PREDATA_OFFSET, val_size);
+out:
+ kfree(buf);
+ return (ret < 0) ? ret : 0;
+}
+
+static int mt6360_regmap_write(void *context, const void *val, size_t val_size)
+{
+ struct mt6360_ddata *ddata = context;
+ u8 bank = *(u8 *)val;
+ u8 reg_addr = *(u8 *)(val + 1);
+ struct i2c_client *i2c = ddata->i2c[bank];
+ bool crc_needed = false;
+ u8 *buf;
+ int buf_len = MT6360_ALLOC_WRITE_SIZE(val_size);
+ int write_size = val_size - MT6360_REGMAP_REG_BYTE_SIZE;
+ int ret;
+
+ if (bank == MT6360_SLAVE_PMIC || bank == MT6360_SLAVE_LDO) {
+ crc_needed = true;
+ ret = mt6360_xlate_pmicldo_addr(&reg_addr, val_size - MT6360_REGMAP_REG_BYTE_SIZE);
+ if (ret < 0)
+ return ret;
}
- mt6360_pmu_irq_chip.irq_drv_data = mpd;
- ret = devm_regmap_add_irq_chip(&client->dev, mpd->regmap, client->irq,
- IRQF_TRIGGER_FALLING, 0,
- &mt6360_pmu_irq_chip, &mpd->irq_data);
- if (ret) {
- dev_err(&client->dev, "Failed to add Regmap IRQ Chip\n");
- return ret;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = I2C_ADDR_XLATE_8BIT(i2c->addr, I2C_SMBUS_WRITE);
+ buf[1] = reg_addr;
+ memcpy(buf + MT6360_CRC_PREDATA_OFFSET, val + MT6360_REGMAP_REG_BYTE_SIZE, write_size);
+
+ if (crc_needed) {
+ buf[val_size] = crc8(ddata->crc8_tbl, buf, val_size, 0);
+ write_size += (MT6360_CRC_CRC8_SIZE + MT6360_CRC_DUMMY_BYTE_SIZE);
}
- mpd->i2c[0] = client;
- for (i = 1; i < MT6360_SLAVE_MAX; i++) {
- mpd->i2c[i] = devm_i2c_new_dummy_device(&client->dev,
- client->adapter,
- mt6360_slave_addr[i]);
- if (IS_ERR(mpd->i2c[i])) {
+ ret = i2c_smbus_write_i2c_block_data(i2c, reg_addr, write_size,
+ buf + MT6360_CRC_PREDATA_OFFSET);
+
+ kfree(buf);
+ return ret;
+}
+
+static const struct regmap_bus mt6360_regmap_bus = {
+ .read = mt6360_regmap_read,
+ .write = mt6360_regmap_write,
+
+ /* Due to PMIC and LDO CRC access size limit */
+ .max_raw_read = 4,
+ .max_raw_write = 4,
+};
+
+static bool mt6360_is_readwrite_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MT6360_REG_TCPCSTART ... MT6360_REG_TCPCEND:
+ fallthrough;
+ case MT6360_REG_PMICSTART ... MT6360_REG_PMICEND:
+ fallthrough;
+ case MT6360_REG_LDOSTART ... MT6360_REG_LDOEND:
+ fallthrough;
+ case MT6360_REG_PMUSTART ... MT6360_REG_PMUEND:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config mt6360_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .max_register = MT6360_REG_PMUEND,
+ .writeable_reg = mt6360_is_readwrite_reg,
+ .readable_reg = mt6360_is_readwrite_reg,
+};
+
+static int mt6360_probe(struct i2c_client *client)
+{
+ struct mt6360_ddata *ddata;
+ int i, ret;
+
+ ddata = devm_kzalloc(&client->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->dev = &client->dev;
+ i2c_set_clientdata(client, ddata);
+
+ for (i = 0; i < MT6360_SLAVE_MAX - 1; i++) {
+ ddata->i2c[i] = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ mt6360_slave_addr[i]);
+ if (IS_ERR(ddata->i2c[i])) {
dev_err(&client->dev,
"Failed to get new dummy I2C device for address 0x%x",
mt6360_slave_addr[i]);
- return PTR_ERR(mpd->i2c[i]);
+ return PTR_ERR(ddata->i2c[i]);
}
- i2c_set_clientdata(mpd->i2c[i], mpd);
+ }
+ ddata->i2c[MT6360_SLAVE_MAX - 1] = client;
+
+ crc8_populate_msb(ddata->crc8_tbl, MT6360_CRC8_POLYNOMIAL);
+ ddata->regmap = devm_regmap_init(ddata->dev, &mt6360_regmap_bus, ddata,
+ &mt6360_regmap_config);
+ if (IS_ERR(ddata->regmap)) {
+ dev_err(&client->dev, "Failed to register regmap\n");
+ return PTR_ERR(ddata->regmap);
+ }
+
+ ret = mt6360_check_vendor_info(ddata);
+ if (ret)
+ return ret;
+
+ ret = devm_regmap_add_irq_chip(&client->dev, ddata->regmap, client->irq,
+ 0, 0, &mt6360_irq_chip,
+ &ddata->irq_data);
+ if (ret) {
+ dev_err(&client->dev, "Failed to add Regmap IRQ Chip\n");
+ return ret;
}
ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_AUTO,
mt6360_devs, ARRAY_SIZE(mt6360_devs), NULL,
- 0, regmap_irq_get_domain(mpd->irq_data));
+ 0, regmap_irq_get_domain(ddata->irq_data));
if (ret) {
dev_err(&client->dev,
"Failed to register subordinate devices\n");
@@ -379,7 +578,7 @@ static int mt6360_pmu_probe(struct i2c_client *client)
return 0;
}
-static int __maybe_unused mt6360_pmu_suspend(struct device *dev)
+static int __maybe_unused mt6360_suspend(struct device *dev)
{
struct i2c_client *i2c = to_i2c_client(dev);
@@ -389,7 +588,7 @@ static int __maybe_unused mt6360_pmu_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mt6360_pmu_resume(struct device *dev)
+static int __maybe_unused mt6360_resume(struct device *dev)
{
struct i2c_client *i2c = to_i2c_client(dev);
@@ -400,25 +599,24 @@ static int __maybe_unused mt6360_pmu_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(mt6360_pmu_pm_ops,
- mt6360_pmu_suspend, mt6360_pmu_resume);
+static SIMPLE_DEV_PM_OPS(mt6360_pm_ops, mt6360_suspend, mt6360_resume);
-static const struct of_device_id __maybe_unused mt6360_pmu_of_id[] = {
- { .compatible = "mediatek,mt6360_pmu", },
+static const struct of_device_id __maybe_unused mt6360_of_id[] = {
+ { .compatible = "mediatek,mt6360", },
{},
};
-MODULE_DEVICE_TABLE(of, mt6360_pmu_of_id);
+MODULE_DEVICE_TABLE(of, mt6360_of_id);
-static struct i2c_driver mt6360_pmu_driver = {
+static struct i2c_driver mt6360_driver = {
.driver = {
- .name = "mt6360_pmu",
- .pm = &mt6360_pmu_pm_ops,
- .of_match_table = of_match_ptr(mt6360_pmu_of_id),
+ .name = "mt6360",
+ .pm = &mt6360_pm_ops,
+ .of_match_table = of_match_ptr(mt6360_of_id),
},
- .probe_new = mt6360_pmu_probe,
+ .probe_new = mt6360_probe,
};
-module_i2c_driver(mt6360_pmu_driver);
+module_i2c_driver(mt6360_driver);
MODULE_AUTHOR("Gene Chen <gene_chen@richtek.com>");
-MODULE_DESCRIPTION("MT6360 PMU I2C Driver");
+MODULE_DESCRIPTION("MT6360 I2C Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 9a615f75fbde..bddb40054b9e 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -47,14 +47,21 @@ static const struct resource mt6397_rtc_resources[] = {
DEFINE_RES_IRQ(MT6397_IRQ_RTC),
};
+static const struct resource mt6358_keys_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6358_IRQ_PWRKEY, "powerkey"),
+ DEFINE_RES_IRQ_NAMED(MT6358_IRQ_HOMEKEY, "homekey"),
+ DEFINE_RES_IRQ_NAMED(MT6358_IRQ_PWRKEY_R, "powerkey_r"),
+ DEFINE_RES_IRQ_NAMED(MT6358_IRQ_HOMEKEY_R, "homekey_r"),
+};
+
static const struct resource mt6323_keys_resources[] = {
- DEFINE_RES_IRQ(MT6323_IRQ_STATUS_PWRKEY),
- DEFINE_RES_IRQ(MT6323_IRQ_STATUS_FCHRKEY),
+ DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_PWRKEY, "powerkey"),
+ DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_FCHRKEY, "homekey"),
};
static const struct resource mt6397_keys_resources[] = {
- DEFINE_RES_IRQ(MT6397_IRQ_PWRKEY),
- DEFINE_RES_IRQ(MT6397_IRQ_HOMEKEY),
+ DEFINE_RES_IRQ_NAMED(MT6397_IRQ_PWRKEY, "powerkey"),
+ DEFINE_RES_IRQ_NAMED(MT6397_IRQ_HOMEKEY, "homekey"),
};
static const struct resource mt6323_pwrc_resources[] = {
@@ -98,6 +105,11 @@ static const struct mfd_cell mt6358_devs[] = {
}, {
.name = "mt6358-sound",
.of_compatible = "mediatek,mt6358-sound"
+ }, {
+ .name = "mt6358-keys",
+ .num_resources = ARRAY_SIZE(mt6358_keys_resources),
+ .resources = mt6358_keys_resources,
+ .of_compatible = "mediatek,mt6358-keys"
},
};
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 2a3a240b4619..787d2ae86375 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* omap-usb-host.c - The USBHS core driver for OMAP EHCI & OHCI
*
* Copyright (C) 2011-2013 Texas Instruments Incorporated - https://www.ti.com
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 16fad79c73f1..080d7970a377 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* omap-usb-tll.c - The USB TLL driver for OMAP EHCI & OHCI
*
* Copyright (C) 2012-2013 Texas Instruments Incorporated - https://www.ti.com
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 148bcd6120f4..e9c565cf0f54 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -77,8 +77,8 @@ int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 val)
EXPORT_SYMBOL_GPL(pcf50633_reg_clear_bits);
/* sysfs attributes */
-static ssize_t show_dump_regs(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t dump_regs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pcf50633 *pcf = dev_get_drvdata(dev);
u8 dump[16];
@@ -106,10 +106,10 @@ static ssize_t show_dump_regs(struct device *dev, struct device_attribute *attr,
return buf1 - buf;
}
-static DEVICE_ATTR(dump_regs, 0400, show_dump_regs, NULL);
+static DEVICE_ATTR_ADMIN_RO(dump_regs);
-static ssize_t show_resume_reason(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t resume_reason_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pcf50633 *pcf = dev_get_drvdata(dev);
int n;
@@ -123,7 +123,7 @@ static ssize_t show_resume_reason(struct device *dev,
return n;
}
-static DEVICE_ATTR(resume_reason, 0400, show_resume_reason, NULL);
+static DEVICE_ATTR_ADMIN_RO(resume_reason);
static struct attribute *pcf_sysfs_entries[] = {
&dev_attr_dump_regs.attr,
diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c
new file mode 100644
index 000000000000..c472d7f8103c
--- /dev/null
+++ b/drivers/mfd/qcom-pm8008.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/mfd/qcom-pm8008.h>
+
+#define I2C_INTR_STATUS_BASE 0x0550
+#define INT_RT_STS_OFFSET 0x10
+#define INT_SET_TYPE_OFFSET 0x11
+#define INT_POL_HIGH_OFFSET 0x12
+#define INT_POL_LOW_OFFSET 0x13
+#define INT_LATCHED_CLR_OFFSET 0x14
+#define INT_EN_SET_OFFSET 0x15
+#define INT_EN_CLR_OFFSET 0x16
+#define INT_LATCHED_STS_OFFSET 0x18
+
+enum {
+ PM8008_MISC,
+ PM8008_TEMP_ALARM,
+ PM8008_GPIO1,
+ PM8008_GPIO2,
+ PM8008_NUM_PERIPHS,
+};
+
+#define PM8008_PERIPH_0_BASE 0x900
+#define PM8008_PERIPH_1_BASE 0x2400
+#define PM8008_PERIPH_2_BASE 0xC000
+#define PM8008_PERIPH_3_BASE 0xC100
+
+#define PM8008_TEMP_ALARM_ADDR PM8008_PERIPH_1_BASE
+#define PM8008_GPIO1_ADDR PM8008_PERIPH_2_BASE
+#define PM8008_GPIO2_ADDR PM8008_PERIPH_3_BASE
+
+#define PM8008_STATUS_BASE (PM8008_PERIPH_0_BASE | INT_LATCHED_STS_OFFSET)
+#define PM8008_MASK_BASE (PM8008_PERIPH_0_BASE | INT_EN_SET_OFFSET)
+#define PM8008_UNMASK_BASE (PM8008_PERIPH_0_BASE | INT_EN_CLR_OFFSET)
+#define PM8008_TYPE_BASE (PM8008_PERIPH_0_BASE | INT_SET_TYPE_OFFSET)
+#define PM8008_ACK_BASE (PM8008_PERIPH_0_BASE | INT_LATCHED_CLR_OFFSET)
+#define PM8008_POLARITY_HI_BASE (PM8008_PERIPH_0_BASE | INT_POL_HIGH_OFFSET)
+#define PM8008_POLARITY_LO_BASE (PM8008_PERIPH_0_BASE | INT_POL_LOW_OFFSET)
+
+#define PM8008_PERIPH_OFFSET(paddr) (paddr - PM8008_PERIPH_0_BASE)
+
+struct pm8008_data {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+static unsigned int p0_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_0_BASE)};
+static unsigned int p1_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_1_BASE)};
+static unsigned int p2_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_2_BASE)};
+static unsigned int p3_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_3_BASE)};
+
+static struct regmap_irq_sub_irq_map pm8008_sub_reg_offsets[] = {
+ REGMAP_IRQ_MAIN_REG_OFFSET(p0_offs),
+ REGMAP_IRQ_MAIN_REG_OFFSET(p1_offs),
+ REGMAP_IRQ_MAIN_REG_OFFSET(p2_offs),
+ REGMAP_IRQ_MAIN_REG_OFFSET(p3_offs),
+};
+
+static unsigned int pm8008_virt_regs[] = {
+ PM8008_POLARITY_HI_BASE,
+ PM8008_POLARITY_LO_BASE,
+};
+
+enum {
+ POLARITY_HI_INDEX,
+ POLARITY_LO_INDEX,
+ PM8008_NUM_VIRT_REGS,
+};
+
+static struct regmap_irq pm8008_irqs[] = {
+ REGMAP_IRQ_REG(PM8008_IRQ_MISC_UVLO, PM8008_MISC, BIT(0)),
+ REGMAP_IRQ_REG(PM8008_IRQ_MISC_OVLO, PM8008_MISC, BIT(1)),
+ REGMAP_IRQ_REG(PM8008_IRQ_MISC_OTST2, PM8008_MISC, BIT(2)),
+ REGMAP_IRQ_REG(PM8008_IRQ_MISC_OTST3, PM8008_MISC, BIT(3)),
+ REGMAP_IRQ_REG(PM8008_IRQ_MISC_LDO_OCP, PM8008_MISC, BIT(4)),
+ REGMAP_IRQ_REG(PM8008_IRQ_TEMP_ALARM, PM8008_TEMP_ALARM, BIT(0)),
+ REGMAP_IRQ_REG(PM8008_IRQ_GPIO1, PM8008_GPIO1, BIT(0)),
+ REGMAP_IRQ_REG(PM8008_IRQ_GPIO2, PM8008_GPIO2, BIT(0)),
+};
+
+static int pm8008_set_type_virt(unsigned int **virt_buf,
+ unsigned int type, unsigned long hwirq,
+ int reg)
+{
+ switch (type) {
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
+ virt_buf[POLARITY_HI_INDEX][reg] &= ~pm8008_irqs[hwirq].mask;
+ virt_buf[POLARITY_LO_INDEX][reg] |= pm8008_irqs[hwirq].mask;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ virt_buf[POLARITY_HI_INDEX][reg] |= pm8008_irqs[hwirq].mask;
+ virt_buf[POLARITY_LO_INDEX][reg] &= ~pm8008_irqs[hwirq].mask;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ virt_buf[POLARITY_HI_INDEX][reg] |= pm8008_irqs[hwirq].mask;
+ virt_buf[POLARITY_LO_INDEX][reg] |= pm8008_irqs[hwirq].mask;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct regmap_irq_chip pm8008_irq_chip = {
+ .name = "pm8008_irq",
+ .main_status = I2C_INTR_STATUS_BASE,
+ .num_main_regs = 1,
+ .num_virt_regs = PM8008_NUM_VIRT_REGS,
+ .irqs = pm8008_irqs,
+ .num_irqs = ARRAY_SIZE(pm8008_irqs),
+ .num_regs = PM8008_NUM_PERIPHS,
+ .not_fixed_stride = true,
+ .sub_reg_offsets = pm8008_sub_reg_offsets,
+ .set_type_virt = pm8008_set_type_virt,
+ .status_base = PM8008_STATUS_BASE,
+ .mask_base = PM8008_MASK_BASE,
+ .unmask_base = PM8008_UNMASK_BASE,
+ .type_base = PM8008_TYPE_BASE,
+ .ack_base = PM8008_ACK_BASE,
+ .virt_reg_base = pm8008_virt_regs,
+ .num_type_reg = PM8008_NUM_PERIPHS,
+};
+
+static struct regmap_config qcom_mfd_regmap_cfg = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0xFFFF,
+};
+
+static int pm8008_init(struct pm8008_data *chip)
+{
+ int rc;
+
+ /*
+ * Set TEMP_ALARM peripheral's TYPE so that the regmap-irq framework
+ * reads this as the default value instead of zero, the HW default.
+ * This is required to enable the writing of TYPE registers in
+ * regmap_irq_sync_unlock().
+ */
+ rc = regmap_write(chip->regmap,
+ (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET),
+ BIT(0));
+ if (rc)
+ return rc;
+
+ /* Do the same for GPIO1 and GPIO2 peripherals */
+ rc = regmap_write(chip->regmap,
+ (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
+ if (rc)
+ return rc;
+
+ rc = regmap_write(chip->regmap,
+ (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
+
+ return rc;
+}
+
+static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
+ int client_irq)
+{
+ int rc, i;
+ struct regmap_irq_type *type;
+ struct regmap_irq_chip_data *irq_data;
+
+ rc = pm8008_init(chip);
+ if (rc) {
+ dev_err(chip->dev, "Init failed: %d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pm8008_irqs); i++) {
+ type = &pm8008_irqs[i].type;
+
+ type->type_reg_offset = pm8008_irqs[i].reg_offset;
+ type->type_rising_val = pm8008_irqs[i].mask;
+ type->type_falling_val = pm8008_irqs[i].mask;
+ type->type_level_high_val = 0;
+ type->type_level_low_val = 0;
+
+ if (type->type_reg_offset == PM8008_MISC)
+ type->types_supported = IRQ_TYPE_EDGE_RISING;
+ else
+ type->types_supported = (IRQ_TYPE_EDGE_BOTH |
+ IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW);
+ }
+
+ rc = devm_regmap_add_irq_chip(chip->dev, chip->regmap, client_irq,
+ IRQF_SHARED, 0, &pm8008_irq_chip, &irq_data);
+ if (rc) {
+ dev_err(chip->dev, "Failed to add IRQ chip: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int pm8008_probe(struct i2c_client *client)
+{
+ int rc;
+ struct pm8008_data *chip;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &client->dev;
+ chip->regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg);
+ if (!chip->regmap)
+ return -ENODEV;
+
+ i2c_set_clientdata(client, chip);
+
+ if (of_property_read_bool(chip->dev->of_node, "interrupt-controller")) {
+ rc = pm8008_probe_irq_peripherals(chip, client->irq);
+ if (rc)
+ dev_err(chip->dev, "Failed to probe irq periphs: %d\n", rc);
+ }
+
+ return devm_of_platform_populate(chip->dev);
+}
+
+static const struct of_device_id pm8008_match[] = {
+ { .compatible = "qcom,pm8008", },
+ { },
+};
+
+static struct i2c_driver pm8008_mfd_driver = {
+ .driver = {
+ .name = "pm8008",
+ .of_match_table = pm8008_match,
+ },
+ .probe_new = pm8008_probe,
+};
+module_i2c_driver(pm8008_mfd_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("i2c:qcom-pm8008");
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index ad923dd4e007..77ccd31ca1d9 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -65,6 +65,7 @@ static bool rk817_is_volatile_reg(struct device *dev, unsigned int reg)
switch (reg) {
case RK817_SECONDS_REG ... RK817_WEEKS_REG:
case RK817_RTC_STATUS_REG:
+ case RK817_CODEC_DTOP_LPT_SRST:
case RK817_INT_STS_REG0:
case RK817_INT_STS_REG1:
case RK817_INT_STS_REG2:
@@ -163,6 +164,7 @@ static const struct mfd_cell rk817s[] = {
.num_resources = ARRAY_SIZE(rk817_rtc_resources),
.resources = &rk817_rtc_resources[0],
},
+ { .name = "rk817-codec",},
};
static const struct mfd_cell rk818s[] = {
@@ -201,6 +203,85 @@ static const struct rk808_reg_data rk808_pre_init_reg[] = {
static const struct rk808_reg_data rk817_pre_init_reg[] = {
{RK817_RTC_CTRL_REG, RTC_STOP, RTC_STOP},
+ /* Codec specific registers */
+ { RK817_CODEC_DTOP_VUCTL, MASK_ALL, 0x03 },
+ { RK817_CODEC_DTOP_VUCTIME, MASK_ALL, 0x00 },
+ { RK817_CODEC_DTOP_LPT_SRST, MASK_ALL, 0x00 },
+ { RK817_CODEC_DTOP_DIGEN_CLKE, MASK_ALL, 0x00 },
+ /* from vendor driver, CODEC_AREF_RTCFG0 not defined in data sheet */
+ { RK817_CODEC_AREF_RTCFG0, MASK_ALL, 0x00 },
+ { RK817_CODEC_AREF_RTCFG1, MASK_ALL, 0x06 },
+ { RK817_CODEC_AADC_CFG0, MASK_ALL, 0xc8 },
+ /* from vendor driver, CODEC_AADC_CFG1 not defined in data sheet */
+ { RK817_CODEC_AADC_CFG1, MASK_ALL, 0x00 },
+ { RK817_CODEC_DADC_VOLL, MASK_ALL, 0x00 },
+ { RK817_CODEC_DADC_VOLR, MASK_ALL, 0x00 },
+ { RK817_CODEC_DADC_SR_ACL0, MASK_ALL, 0x00 },
+ { RK817_CODEC_DADC_ALC1, MASK_ALL, 0x00 },
+ { RK817_CODEC_DADC_ALC2, MASK_ALL, 0x00 },
+ { RK817_CODEC_DADC_NG, MASK_ALL, 0x00 },
+ { RK817_CODEC_DADC_HPF, MASK_ALL, 0x00 },
+ { RK817_CODEC_DADC_RVOLL, MASK_ALL, 0xff },
+ { RK817_CODEC_DADC_RVOLR, MASK_ALL, 0xff },
+ { RK817_CODEC_AMIC_CFG0, MASK_ALL, 0x70 },
+ { RK817_CODEC_AMIC_CFG1, MASK_ALL, 0x00 },
+ { RK817_CODEC_DMIC_PGA_GAIN, MASK_ALL, 0x66 },
+ { RK817_CODEC_DMIC_LMT1, MASK_ALL, 0x00 },
+ { RK817_CODEC_DMIC_LMT2, MASK_ALL, 0x00 },
+ { RK817_CODEC_DMIC_NG1, MASK_ALL, 0x00 },
+ { RK817_CODEC_DMIC_NG2, MASK_ALL, 0x00 },
+ /* from vendor driver, CODEC_ADAC_CFG0 not defined in data sheet */
+ { RK817_CODEC_ADAC_CFG0, MASK_ALL, 0x00 },
+ { RK817_CODEC_ADAC_CFG1, MASK_ALL, 0x07 },
+ { RK817_CODEC_DDAC_POPD_DACST, MASK_ALL, 0x82 },
+ { RK817_CODEC_DDAC_VOLL, MASK_ALL, 0x00 },
+ { RK817_CODEC_DDAC_VOLR, MASK_ALL, 0x00 },
+ { RK817_CODEC_DDAC_SR_LMT0, MASK_ALL, 0x00 },
+ { RK817_CODEC_DDAC_LMT1, MASK_ALL, 0x00 },
+ { RK817_CODEC_DDAC_LMT2, MASK_ALL, 0x00 },
+ { RK817_CODEC_DDAC_MUTE_MIXCTL, MASK_ALL, 0xa0 },
+ { RK817_CODEC_DDAC_RVOLL, MASK_ALL, 0xff },
+ { RK817_CODEC_DADC_RVOLR, MASK_ALL, 0xff },
+ { RK817_CODEC_AMIC_CFG0, MASK_ALL, 0x70 },
+ { RK817_CODEC_AMIC_CFG1, MASK_ALL, 0x00 },
+ { RK817_CODEC_DMIC_PGA_GAIN, MASK_ALL, 0x66 },
+ { RK817_CODEC_DMIC_LMT1, MASK_ALL, 0x00 },
+ { RK817_CODEC_DMIC_LMT2, MASK_ALL, 0x00 },
+ { RK817_CODEC_DMIC_NG1, MASK_ALL, 0x00 },
+ { RK817_CODEC_DMIC_NG2, MASK_ALL, 0x00 },
+ /* from vendor driver, CODEC_ADAC_CFG0 not defined in data sheet */
+ { RK817_CODEC_ADAC_CFG0, MASK_ALL, 0x00 },
+ { RK817_CODEC_ADAC_CFG1, MASK_ALL, 0x07 },
+ { RK817_CODEC_DDAC_POPD_DACST, MASK_ALL, 0x82 },
+ { RK817_CODEC_DDAC_VOLL, MASK_ALL, 0x00 },
+ { RK817_CODEC_DDAC_VOLR, MASK_ALL, 0x00 },
+ { RK817_CODEC_DDAC_SR_LMT0, MASK_ALL, 0x00 },
+ { RK817_CODEC_DDAC_LMT1, MASK_ALL, 0x00 },
+ { RK817_CODEC_DDAC_LMT2, MASK_ALL, 0x00 },
+ { RK817_CODEC_DDAC_MUTE_MIXCTL, MASK_ALL, 0xa0 },
+ { RK817_CODEC_DDAC_RVOLL, MASK_ALL, 0xff },
+ { RK817_CODEC_DDAC_RVOLR, MASK_ALL, 0xff },
+ { RK817_CODEC_AHP_ANTI0, MASK_ALL, 0x00 },
+ { RK817_CODEC_AHP_ANTI1, MASK_ALL, 0x00 },
+ { RK817_CODEC_AHP_CFG0, MASK_ALL, 0xe0 },
+ { RK817_CODEC_AHP_CFG1, MASK_ALL, 0x1f },
+ { RK817_CODEC_AHP_CP, MASK_ALL, 0x09 },
+ { RK817_CODEC_ACLASSD_CFG1, MASK_ALL, 0x69 },
+ { RK817_CODEC_ACLASSD_CFG2, MASK_ALL, 0x44 },
+ { RK817_CODEC_APLL_CFG0, MASK_ALL, 0x04 },
+ { RK817_CODEC_APLL_CFG1, MASK_ALL, 0x00 },
+ { RK817_CODEC_APLL_CFG2, MASK_ALL, 0x30 },
+ { RK817_CODEC_APLL_CFG3, MASK_ALL, 0x19 },
+ { RK817_CODEC_APLL_CFG4, MASK_ALL, 0x65 },
+ { RK817_CODEC_APLL_CFG5, MASK_ALL, 0x01 },
+ { RK817_CODEC_DI2S_CKM, MASK_ALL, 0x01 },
+ { RK817_CODEC_DI2S_RSD, MASK_ALL, 0x00 },
+ { RK817_CODEC_DI2S_RXCR1, MASK_ALL, 0x00 },
+ { RK817_CODEC_DI2S_RXCR2, MASK_ALL, 0x17 },
+ { RK817_CODEC_DI2S_RXCMD_TSD, MASK_ALL, 0x00 },
+ { RK817_CODEC_DI2S_TXCR1, MASK_ALL, 0x00 },
+ { RK817_CODEC_DI2S_TXCR2, MASK_ALL, 0x17 },
+ { RK817_CODEC_DI2S_TXCR3_TXCMD, MASK_ALL, 0x00 },
{RK817_GPIO_INT_CFG, RK817_INT_POL_MSK, RK817_INT_POL_L},
{RK817_SYS_CFG(1), RK817_HOTDIE_TEMP_MSK | RK817_TSD_TEMP_MSK,
RK817_HOTDIE_105 | RK817_TSD_140},
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index 6ed04e6dbc78..384acb459427 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -107,7 +107,7 @@ static int rn5t618_irq_init(struct rn5t618 *rn5t618)
ret = devm_regmap_add_irq_chip(rn5t618->dev, rn5t618->regmap,
rn5t618->irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
0, irq_chip, &rn5t618->irq_data);
if (ret)
dev_err(rn5t618->dev, "Failed to register IRQ chip\n");
diff --git a/drivers/mfd/rt4831.c b/drivers/mfd/rt4831.c
new file mode 100644
index 000000000000..b169781ac675
--- /dev/null
+++ b/drivers/mfd/rt4831.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Richtek Technology Corp.
+ *
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define RT4831_REG_REVISION 0x01
+#define RT4831_REG_ENABLE 0x08
+#define RT4831_REG_I2CPROT 0x15
+
+#define RICHTEK_VENDOR_ID 0x03
+#define RT4831_VID_MASK GENMASK(1, 0)
+#define RT4831_RESET_MASK BIT(7)
+#define RT4831_I2CSAFETMR_MASK BIT(0)
+
+static const struct mfd_cell rt4831_subdevs[] = {
+ MFD_CELL_OF("rt4831-backlight", NULL, NULL, 0, 0, "richtek,rt4831-backlight"),
+ MFD_CELL_NAME("rt4831-regulator")
+};
+
+static bool rt4831_is_accessible_reg(struct device *dev, unsigned int reg)
+{
+ if (reg >= RT4831_REG_REVISION && reg <= RT4831_REG_I2CPROT)
+ return true;
+ return false;
+}
+
+static const struct regmap_config rt4831_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT4831_REG_I2CPROT,
+
+ .readable_reg = rt4831_is_accessible_reg,
+ .writeable_reg = rt4831_is_accessible_reg,
+};
+
+static int rt4831_probe(struct i2c_client *client)
+{
+ struct gpio_desc *enable_gpio;
+ struct regmap *regmap;
+ unsigned int chip_id;
+ int ret;
+
+ enable_gpio = devm_gpiod_get_optional(&client->dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(enable_gpio)) {
+ dev_err(&client->dev, "Failed to get 'enable' GPIO\n");
+ return PTR_ERR(enable_gpio);
+ }
+
+ regmap = devm_regmap_init_i2c(client, &rt4831_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to initialize regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ ret = regmap_read(regmap, RT4831_REG_REVISION, &chip_id);
+ if (ret) {
+ dev_err(&client->dev, "Failed to get H/W revision\n");
+ return ret;
+ }
+
+ if ((chip_id & RT4831_VID_MASK) != RICHTEK_VENDOR_ID) {
+ dev_err(&client->dev, "Chip vendor ID 0x%02x not matched\n", chip_id);
+ return -ENODEV;
+ }
+
+ /*
+ * Used to prevent the abnormal shutdown.
+ * If SCL/SDA both keep low for one second to reset HW.
+ */
+ ret = regmap_update_bits(regmap, RT4831_REG_I2CPROT, RT4831_I2CSAFETMR_MASK,
+ RT4831_I2CSAFETMR_MASK);
+ if (ret) {
+ dev_err(&client->dev, "Failed to enable I2C safety timer\n");
+ return ret;
+ }
+
+ return devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_AUTO, rt4831_subdevs,
+ ARRAY_SIZE(rt4831_subdevs), NULL, 0, NULL);
+}
+
+static int rt4831_remove(struct i2c_client *client)
+{
+ struct regmap *regmap = dev_get_regmap(&client->dev, NULL);
+
+ /* Disable WLED and DSV outputs */
+ return regmap_update_bits(regmap, RT4831_REG_ENABLE, RT4831_RESET_MASK, RT4831_RESET_MASK);
+}
+
+static const struct of_device_id __maybe_unused rt4831_of_match[] = {
+ { .compatible = "richtek,rt4831", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt4831_of_match);
+
+static struct i2c_driver rt4831_driver = {
+ .driver = {
+ .name = "rt4831",
+ .of_match_table = rt4831_of_match,
+ },
+ .probe_new = rt4831_probe,
+ .remove = rt4831_remove,
+};
+module_i2c_driver(rt4831_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 8d55992da19e..1fb29c45f5cf 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
@@ -93,7 +94,6 @@ static const struct mfd_cell s2mpu02_devs[] = {
{ .name = "s2mpu02-regulator", },
};
-#ifdef CONFIG_OF
static const struct of_device_id sec_dt_match[] = {
{
.compatible = "samsung,s5m8767-pmic",
@@ -121,7 +121,6 @@ static const struct of_device_id sec_dt_match[] = {
},
};
MODULE_DEVICE_TABLE(of, sec_dt_match);
-#endif
static bool s2mpa01_volatile(struct device *dev, unsigned int reg)
{
@@ -281,7 +280,6 @@ static void sec_pmic_configure(struct sec_pmic_dev *sec_pmic)
}
}
-#ifdef CONFIG_OF
/*
* Only the common platform data elements for s5m8767 are parsed here from the
* device tree. Other sub-modules of s5m8767 such as pmic, rtc , charger and
@@ -300,48 +298,20 @@ sec_pmic_i2c_parse_dt_pdata(struct device *dev)
if (!pd)
return ERR_PTR(-ENOMEM);
- /*
- * ToDo: the 'wakeup' member in the platform data is more of a linux
- * specfic information. Hence, there is no binding for that yet and
- * not parsed here.
- */
-
pd->manual_poweroff = of_property_read_bool(dev->of_node,
"samsung,s2mps11-acokb-ground");
pd->disable_wrstbi = of_property_read_bool(dev->of_node,
"samsung,s2mps11-wrstbi-ground");
return pd;
}
-#else
-static struct sec_platform_data *
-sec_pmic_i2c_parse_dt_pdata(struct device *dev)
-{
- return NULL;
-}
-#endif
-
-static inline unsigned long sec_i2c_get_driver_data(struct i2c_client *i2c,
- const struct i2c_device_id *id)
-{
-#ifdef CONFIG_OF
- if (i2c->dev.of_node) {
- const struct of_device_id *match;
-
- match = of_match_node(sec_dt_match, i2c->dev.of_node);
- return (unsigned long)match->data;
- }
-#endif
- return id->driver_data;
-}
static int sec_pmic_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- struct sec_platform_data *pdata = dev_get_platdata(&i2c->dev);
const struct regmap_config *regmap;
+ struct sec_platform_data *pdata;
const struct mfd_cell *sec_devs;
struct sec_pmic_dev *sec_pmic;
- unsigned long device_type;
int ret, num_sec_devs;
sec_pmic = devm_kzalloc(&i2c->dev, sizeof(struct sec_pmic_dev),
@@ -353,23 +323,16 @@ static int sec_pmic_probe(struct i2c_client *i2c,
sec_pmic->dev = &i2c->dev;
sec_pmic->i2c = i2c;
sec_pmic->irq = i2c->irq;
- device_type = sec_i2c_get_driver_data(i2c, id);
-
- if (sec_pmic->dev->of_node) {
- pdata = sec_pmic_i2c_parse_dt_pdata(sec_pmic->dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- return ret;
- }
- pdata->device_type = device_type;
- }
- if (pdata) {
- sec_pmic->device_type = pdata->device_type;
- sec_pmic->irq_base = pdata->irq_base;
- sec_pmic->wakeup = pdata->wakeup;
- sec_pmic->pdata = pdata;
+
+ pdata = sec_pmic_i2c_parse_dt_pdata(sec_pmic->dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ return ret;
}
+ sec_pmic->device_type = (unsigned long)of_device_get_match_data(sec_pmic->dev);
+ sec_pmic->pdata = pdata;
+
switch (sec_pmic->device_type) {
case S2MPA01:
regmap = &s2mpa01_regmap_config;
@@ -408,9 +371,6 @@ static int sec_pmic_probe(struct i2c_client *i2c,
return ret;
}
- if (pdata && pdata->cfg_pmic_irq)
- pdata->cfg_pmic_irq();
-
sec_irq_init(sec_pmic);
pm_runtime_set_active(sec_pmic->dev);
@@ -462,7 +422,6 @@ static int sec_pmic_probe(struct i2c_client *i2c,
if (ret)
return ret;
- device_init_wakeup(sec_pmic->dev, sec_pmic->wakeup);
sec_pmic_configure(sec_pmic);
sec_pmic_dump_rev(sec_pmic);
@@ -533,21 +492,14 @@ static int sec_pmic_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume);
-static const struct i2c_device_id sec_pmic_id[] = {
- { "sec_pmic", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, sec_pmic_id);
-
static struct i2c_driver sec_pmic_driver = {
.driver = {
.name = "sec_pmic",
.pm = &sec_pmic_pm_ops,
- .of_match_table = of_match_ptr(sec_dt_match),
+ .of_match_table = sec_dt_match,
},
.probe = sec_pmic_probe,
.shutdown = sec_pmic_shutdown,
- .id_table = sec_pmic_id,
};
module_i2c_driver(sec_pmic_driver);
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index a98c5d165039..e473c2fb42d5 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -444,7 +444,6 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
if (!sec_pmic->irq) {
dev_warn(sec_pmic->dev,
"No interrupt specified, no interrupts\n");
- sec_pmic->irq_base = 0;
return 0;
}
@@ -482,8 +481,7 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
ret = devm_regmap_add_irq_chip(sec_pmic->dev, sec_pmic->regmap_pmic,
sec_pmic->irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- sec_pmic->irq_base, sec_irq_chip,
- &sec_pmic->irq_data);
+ 0, sec_irq_chip, &sec_pmic->irq_data);
if (ret != 0) {
dev_err(sec_pmic->dev, "Failed to register IRQ chip: %d\n", ret);
return ret;
diff --git a/drivers/mfd/si476x-cmd.c b/drivers/mfd/si476x-cmd.c
index d15b3e783369..f32f1fb93e37 100644
--- a/drivers/mfd/si476x-cmd.c
+++ b/drivers/mfd/si476x-cmd.c
@@ -390,7 +390,7 @@ static int si476x_cmd_tune_seek_freq(struct si476x_core *core,
}
/**
- * si476x_cmd_func_info() - send 'FUNC_INFO' command to the device
+ * si476x_core_cmd_func_info() - send 'FUNC_INFO' command to the device
* @core: device to send the command to
* @info: struct si476x_func_info to fill all the information
* returned by the command
@@ -424,7 +424,7 @@ int si476x_core_cmd_func_info(struct si476x_core *core,
EXPORT_SYMBOL_GPL(si476x_core_cmd_func_info);
/**
- * si476x_cmd_set_property() - send 'SET_PROPERTY' command to the device
+ * si476x_core_cmd_set_property() - send 'SET_PROPERTY' command to the device
* @core: device to send the command to
* @property: property address
* @value: property value
@@ -452,7 +452,7 @@ int si476x_core_cmd_set_property(struct si476x_core *core,
EXPORT_SYMBOL_GPL(si476x_core_cmd_set_property);
/**
- * si476x_cmd_get_property() - send 'GET_PROPERTY' command to the device
+ * si476x_core_cmd_get_property() - send 'GET_PROPERTY' command to the device
* @core: device to send the command to
* @property: property address
*
@@ -481,7 +481,7 @@ int si476x_core_cmd_get_property(struct si476x_core *core, u16 property)
EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property);
/**
- * si476x_cmd_dig_audio_pin_cfg() - send 'DIG_AUDIO_PIN_CFG' command to
+ * si476x_core_cmd_dig_audio_pin_cfg() - send 'DIG_AUDIO_PIN_CFG' command to
* the device
* @core: device to send the command to
* @dclk: DCLK pin function configuration:
@@ -539,7 +539,7 @@ int si476x_core_cmd_dig_audio_pin_cfg(struct si476x_core *core,
EXPORT_SYMBOL_GPL(si476x_core_cmd_dig_audio_pin_cfg);
/**
- * si476x_cmd_zif_pin_cfg - send 'ZIF_PIN_CFG_COMMAND'
+ * si476x_core_cmd_zif_pin_cfg - send 'ZIF_PIN_CFG_COMMAND'
* @core: - device to send the command to
* @iqclk: - IQCL pin function configuration:
* SI476X_IQCLK_NOOP - do not modify the behaviour
@@ -588,7 +588,7 @@ int si476x_core_cmd_zif_pin_cfg(struct si476x_core *core,
EXPORT_SYMBOL_GPL(si476x_core_cmd_zif_pin_cfg);
/**
- * si476x_cmd_ic_link_gpo_ctl_pin_cfg - send
+ * si476x_core_cmd_ic_link_gpo_ctl_pin_cfg - send
* 'IC_LINK_GPIO_CTL_PIN_CFG' comand to the device
* @core: - device to send the command to
* @icin: - ICIN pin function configuration:
@@ -645,7 +645,7 @@ int si476x_core_cmd_ic_link_gpo_ctl_pin_cfg(struct si476x_core *core,
EXPORT_SYMBOL_GPL(si476x_core_cmd_ic_link_gpo_ctl_pin_cfg);
/**
- * si476x_cmd_ana_audio_pin_cfg - send 'ANA_AUDIO_PIN_CFG' to the
+ * si476x_core_cmd_ana_audio_pin_cfg - send 'ANA_AUDIO_PIN_CFG' to the
* device
* @core: - device to send the command to
* @lrout: - LROUT pin function configuration:
@@ -674,7 +674,7 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_ana_audio_pin_cfg);
/**
- * si476x_cmd_intb_pin_cfg - send 'INTB_PIN_CFG' command to the device
+ * si476x_core_cmd_intb_pin_cfg_a10 - send 'INTB_PIN_CFG' command to the device
* @core: - device to send the command to
* @intb: - INTB pin function configuration:
* SI476X_INTB_NOOP - do not modify the behaviour
@@ -726,12 +726,12 @@ static int si476x_core_cmd_intb_pin_cfg_a20(struct si476x_core *core,
/**
- * si476x_cmd_am_rsq_status - send 'AM_RSQ_STATUS' command to the
+ * si476x_core_cmd_am_rsq_status - send 'AM_RSQ_STATUS' command to the
* device
* @core: - device to send the command to
* @rsqargs: - pointer to a structure containing a group of sub-args
* relevant to sending the RSQ status command
- * @report: - all signal quality information retured by the command
+ * @report: - all signal quality information returned by the command
* (if NULL then the output of the command is ignored)
*
* Function returns 0 on success and negative error code on failure
@@ -856,7 +856,7 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_am_acf_status);
/**
- * si476x_cmd_fm_seek_start - send 'FM_SEEK_START' command to the
+ * si476x_core_cmd_fm_seek_start - send 'FM_SEEK_START' command to the
* device
* @core: - device to send the command to
* @seekup: - if set the direction of the search is 'up'
@@ -884,7 +884,7 @@ int si476x_core_cmd_fm_seek_start(struct si476x_core *core,
EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_seek_start);
/**
- * si476x_cmd_fm_rds_status - send 'FM_RDS_STATUS' command to the
+ * si476x_core_cmd_fm_rds_status - send 'FM_RDS_STATUS' command to the
* device
* @core: - device to send the command to
* @status_only: - if set the data is not removed from RDSFIFO,
@@ -892,7 +892,7 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_seek_start);
* rest RDS data contains the last valid info received
* @mtfifo: if set the command clears RDS receive FIFO
* @intack: if set the command clards the RDSINT bit.
- * @report: - all signal quality information retured by the command
+ * @report: - all signal quality information returned by the command
* (if NULL then the output of the command is ignored)
*
* Function returns 0 on success and negative error code on failure
@@ -1032,7 +1032,7 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_phase_div_status);
/**
- * si476x_cmd_am_seek_start - send 'FM_SEEK_START' command to the
+ * si476x_core_cmd_am_seek_start - send 'FM_SEEK_START' command to the
* device
* @core: - device to send the command to
* @seekup: - if set the direction of the search is 'up'
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index c1d7b845244e..a2635c2d9d1a 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -350,7 +350,7 @@ static inline void si476x_core_start_rds_drainer_once(struct si476x_core *core)
mutex_unlock(&core->rds_drainer_status_lock);
}
/**
- * si476x_drain_rds_fifo() - RDS buffer drainer.
+ * si476x_core_drain_rds_fifo() - RDS buffer drainer.
* @work: struct work_struct being ppassed to the function by the
* kernel.
*
@@ -454,7 +454,7 @@ int si476x_core_i2c_xfer(struct si476x_core *core,
EXPORT_SYMBOL_GPL(si476x_core_i2c_xfer);
/**
- * si476x_get_status()
+ * si476x_core_get_status()
* @core: Core device structure
*
* Get the status byte of the core device by berforming one byte I2C
@@ -473,7 +473,7 @@ static int si476x_core_get_status(struct si476x_core *core)
}
/**
- * si476x_get_and_signal_status() - IRQ dispatcher
+ * si476x_core_get_and_signal_status() - IRQ dispatcher
* @core: Core device structure
*
* Dispatch the arrived interrupt request based on the value of the
@@ -532,7 +532,7 @@ static irqreturn_t si476x_core_interrupt(int irq, void *dev)
}
/**
- * si476x_firmware_version_to_revision()
+ * si476x_core_fwver_to_revision()
* @core: Core device structure
* @func: Selects the boot function of the device:
* *_BOOTLOADER - Boot loader
@@ -603,7 +603,7 @@ unknown_revision:
}
/**
- * si476x_get_revision_info()
+ * si476x_core_get_revision_info()
* @core: Core device structure
*
* Get the firmware version number of the device. It is done in
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 6d2f4a0a901d..bc0a2c38653e 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1190,13 +1190,13 @@ static int sm501_register_gpio_i2c(struct sm501_devdata *sm,
return 0;
}
-/* sm501_dbg_regs
+/* dbg_regs_show
*
* Debug attribute to attach to parent device to show core registers
*/
-static ssize_t sm501_dbg_regs(struct device *dev,
- struct device_attribute *attr, char *buff)
+static ssize_t dbg_regs_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
{
struct sm501_devdata *sm = dev_get_drvdata(dev) ;
unsigned int reg;
@@ -1213,7 +1213,7 @@ static ssize_t sm501_dbg_regs(struct device *dev,
}
-static DEVICE_ATTR(dbg_regs, 0444, sm501_dbg_regs, NULL);
+static DEVICE_ATTR_RO(dbg_regs);
/* sm501_init_reg
*
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 61aa020199f5..cd2f45257dc1 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -109,7 +109,7 @@ static const struct i2c_device_id stmpe_i2c_id[] = {
{ "stmpe2403", STMPE2403 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, stmpe_id);
+MODULE_DEVICE_TABLE(i2c, stmpe_i2c_id);
static struct i2c_driver stmpe_i2c_driver = {
.driver = {
diff --git a/drivers/mfd/sun6i-prcm.c b/drivers/mfd/sun6i-prcm.c
index c31927d4bbbe..ee03db0b8485 100644
--- a/drivers/mfd/sun6i-prcm.c
+++ b/drivers/mfd/sun6i-prcm.c
@@ -20,43 +20,23 @@ struct prcm_data {
};
static const struct resource sun6i_a31_ar100_clk_res[] = {
- {
- .start = 0x0,
- .end = 0x3,
- .flags = IORESOURCE_MEM,
- },
+ DEFINE_RES_MEM(0x0, 4)
};
static const struct resource sun6i_a31_apb0_clk_res[] = {
- {
- .start = 0xc,
- .end = 0xf,
- .flags = IORESOURCE_MEM,
- },
+ DEFINE_RES_MEM(0xc, 4)
};
static const struct resource sun6i_a31_apb0_gates_clk_res[] = {
- {
- .start = 0x28,
- .end = 0x2b,
- .flags = IORESOURCE_MEM,
- },
+ DEFINE_RES_MEM(0x28, 4)
};
static const struct resource sun6i_a31_ir_clk_res[] = {
- {
- .start = 0x54,
- .end = 0x57,
- .flags = IORESOURCE_MEM,
- },
+ DEFINE_RES_MEM(0x54, 4)
};
static const struct resource sun6i_a31_apb0_rstc_res[] = {
- {
- .start = 0xb0,
- .end = 0xb3,
- .flags = IORESOURCE_MEM,
- },
+ DEFINE_RES_MEM(0xb0, 4)
};
static const struct resource sun8i_codec_analog_res[] = {
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index c6f139b2e0c0..765c0210cb52 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -108,6 +108,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
syscon_config.max_register = resource_size(&res) - reg_io_width;
regmap = regmap_init_mmio(NULL, base, &syscon_config);
+ kfree(syscon_config.name);
if (IS_ERR(regmap)) {
pr_err("regmap init failed\n");
ret = PTR_ERR(regmap);
@@ -144,7 +145,6 @@ err_clk:
regmap_exit(regmap);
err_regmap:
iounmap(base);
- kfree(syscon_config.name);
err_map:
kfree(syscon);
return ERR_PTR(ret);
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 70da0c4ae457..5369c67e3280 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -37,16 +37,8 @@ enum {
};
static const struct resource t7l66xb_mmc_resources[] = {
- {
- .start = 0x800,
- .end = 0x9ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_T7L66XB_MMC,
- .end = IRQ_T7L66XB_MMC,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_MEM(0x800, 0x200),
+ DEFINE_RES_IRQ(IRQ_T7L66XB_MMC)
};
#define SCR_REVID 0x08 /* b Revision ID */
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index faecbca6dba3..9393ee60a656 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -623,8 +623,8 @@ static const struct mfd_cell timberdale_cells_bar2[] = {
},
};
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fw_ver_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct timberdale_device *priv = dev_get_drvdata(dev);
@@ -632,7 +632,7 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
priv->fw.config);
}
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+static DEVICE_ATTR_RO(fw_ver);
/*--------------------------------------------------------------------------*/
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
index 902e33548dd0..3c4e62c3406a 100644
--- a/drivers/mfd/tps80031.c
+++ b/drivers/mfd/tps80031.c
@@ -35,11 +35,7 @@
#include <linux/slab.h>
static const struct resource tps80031_rtc_resources[] = {
- {
- .start = TPS80031_INT_RTC_ALARM,
- .end = TPS80031_INT_RTC_ALARM,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(TPS80031_INT_RTC_ALARM),
};
/* TPS80031 sub mfd devices */
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 20cf8cfe4f3b..289b556dede2 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -485,7 +485,7 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
EXPORT_SYMBOL(twl_i2c_read);
/**
- * twl_regcache_bypass - Configure the regcache bypass for the regmap associated
+ * twl_set_regcache_bypass - Configure the regcache bypass for the regmap associated
* with the module
* @mod_no: module number
* @enable: Regcache bypass state
diff --git a/drivers/mfd/ucb1x00-assabet.c b/drivers/mfd/ucb1x00-assabet.c
index ecc9e9fc331d..6a389737c615 100644
--- a/drivers/mfd/ucb1x00-assabet.c
+++ b/drivers/mfd/ucb1x00-assabet.c
@@ -28,7 +28,7 @@ static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \
ucb1x00_adc_disable(ucb); \
return sprintf(buf, "%d\n", val); \
} \
-static DEVICE_ATTR(name,0444,name##_show,NULL)
+static DEVICE_ATTR_RO(name)
UCB1X00_ATTR(vbatt, UCB_ADC_INP_AD1);
UCB1X00_ATTR(vcharger, UCB_ADC_INP_AD0);
diff --git a/drivers/mfd/wcd934x.c b/drivers/mfd/wcd934x.c
index c274d733b656..aa19a6a4fdbf 100644
--- a/drivers/mfd/wcd934x.c
+++ b/drivers/mfd/wcd934x.c
@@ -17,6 +17,21 @@
#include <linux/regulator/consumer.h>
#include <linux/slimbus.h>
+#define WCD934X_REGMAP_IRQ_REG(_irq, _off, _mask) \
+ [_irq] = { \
+ .reg_offset = (_off), \
+ .mask = (_mask), \
+ .type = { \
+ .type_reg_offset = (_off), \
+ .types_supported = IRQ_TYPE_EDGE_BOTH, \
+ .type_reg_mask = (_mask), \
+ .type_level_low_val = (_mask), \
+ .type_level_high_val = (_mask), \
+ .type_falling_val = 0, \
+ .type_rising_val = 0, \
+ }, \
+ }
+
static const struct mfd_cell wcd934x_devices[] = {
{
.name = "wcd934x-codec",
@@ -30,32 +45,15 @@ static const struct mfd_cell wcd934x_devices[] = {
};
static const struct regmap_irq wcd934x_irqs[] = {
- [WCD934X_IRQ_SLIMBUS] = {
- .reg_offset = 0,
- .mask = BIT(0),
- .type = {
- .type_reg_offset = 0,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- .type_reg_mask = BIT(0),
- .type_level_low_val = BIT(0),
- .type_level_high_val = BIT(0),
- .type_falling_val = 0,
- .type_rising_val = 0,
- },
- },
- [WCD934X_IRQ_SOUNDWIRE] = {
- .reg_offset = 2,
- .mask = BIT(4),
- .type = {
- .type_reg_offset = 2,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- .type_reg_mask = BIT(4),
- .type_level_low_val = BIT(4),
- .type_level_high_val = BIT(4),
- .type_falling_val = 0,
- .type_rising_val = 0,
- },
- },
+ WCD934X_REGMAP_IRQ_REG(WCD934X_IRQ_SLIMBUS, 0, BIT(0)),
+ WCD934X_REGMAP_IRQ_REG(WCD934X_IRQ_HPH_PA_OCPL_FAULT, 0, BIT(2)),
+ WCD934X_REGMAP_IRQ_REG(WCD934X_IRQ_HPH_PA_OCPR_FAULT, 0, BIT(3)),
+ WCD934X_REGMAP_IRQ_REG(WCD934X_IRQ_MBHC_SW_DET, 1, BIT(0)),
+ WCD934X_REGMAP_IRQ_REG(WCD934X_IRQ_MBHC_ELECT_INS_REM_DET, 1, BIT(1)),
+ WCD934X_REGMAP_IRQ_REG(WCD934X_IRQ_MBHC_BUTTON_PRESS_DET, 1, BIT(2)),
+ WCD934X_REGMAP_IRQ_REG(WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET, 1, BIT(3)),
+ WCD934X_REGMAP_IRQ_REG(WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET, 1, BIT(4)),
+ WCD934X_REGMAP_IRQ_REG(WCD934X_IRQ_SOUNDWIRE, 2, BIT(4)),
};
static const struct regmap_irq_chip wcd934x_regmap_irq_chip = {
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index bcef08f58fb3..d2f444d2ae78 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -109,7 +109,7 @@ static int wm831x_reg_locked(struct wm831x *wm831x, unsigned short reg)
}
/**
- * wm831x_reg_unlock: Unlock user keyed registers
+ * wm831x_reg_lock: Unlock user keyed registers
*
* The WM831x has a user key preventing writes to particularly
* critical registers. This function locks those registers,
@@ -622,18 +622,8 @@ static const struct resource wm831x_dcdc1_resources[] = {
.end = WM831X_DC1_DVS_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_DC1,
- .end = WM831X_IRQ_UV_DC1,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "HC",
- .start = WM831X_IRQ_HC_DC1,
- .end = WM831X_IRQ_HC_DC1,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_DC1, "UV"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_HC_DC1, "HC"),
};
@@ -643,18 +633,8 @@ static const struct resource wm831x_dcdc2_resources[] = {
.end = WM831X_DC2_DVS_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_DC2,
- .end = WM831X_IRQ_UV_DC2,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "HC",
- .start = WM831X_IRQ_HC_DC2,
- .end = WM831X_IRQ_HC_DC2,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_DC2, "UV"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_HC_DC2, "HC"),
};
static const struct resource wm831x_dcdc3_resources[] = {
@@ -663,12 +643,7 @@ static const struct resource wm831x_dcdc3_resources[] = {
.end = WM831X_DC3_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_DC3,
- .end = WM831X_IRQ_UV_DC3,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_DC3, "UV"),
};
static const struct resource wm831x_dcdc4_resources[] = {
@@ -677,12 +652,7 @@ static const struct resource wm831x_dcdc4_resources[] = {
.end = WM831X_DC4_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_DC4,
- .end = WM831X_IRQ_UV_DC4,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_DC4, "UV"),
};
static const struct resource wm8320_dcdc4_buck_resources[] = {
@@ -691,12 +661,7 @@ static const struct resource wm8320_dcdc4_buck_resources[] = {
.end = WM832X_DC4_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_DC4,
- .end = WM831X_IRQ_UV_DC4,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_DC4, "UV"),
};
static const struct resource wm831x_gpio_resources[] = {
@@ -713,11 +678,7 @@ static const struct resource wm831x_isink1_resources[] = {
.end = WM831X_CURRENT_SINK_1,
.flags = IORESOURCE_REG,
},
- {
- .start = WM831X_IRQ_CS1,
- .end = WM831X_IRQ_CS1,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(WM831X_IRQ_CS1),
};
static const struct resource wm831x_isink2_resources[] = {
@@ -726,11 +687,7 @@ static const struct resource wm831x_isink2_resources[] = {
.end = WM831X_CURRENT_SINK_2,
.flags = IORESOURCE_REG,
},
- {
- .start = WM831X_IRQ_CS2,
- .end = WM831X_IRQ_CS2,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(WM831X_IRQ_CS2),
};
static const struct resource wm831x_ldo1_resources[] = {
@@ -739,12 +696,7 @@ static const struct resource wm831x_ldo1_resources[] = {
.end = WM831X_LDO1_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_LDO1,
- .end = WM831X_IRQ_UV_LDO1,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_LDO1, "UV"),
};
static const struct resource wm831x_ldo2_resources[] = {
@@ -753,12 +705,7 @@ static const struct resource wm831x_ldo2_resources[] = {
.end = WM831X_LDO2_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_LDO2,
- .end = WM831X_IRQ_UV_LDO2,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_LDO2, "UV"),
};
static const struct resource wm831x_ldo3_resources[] = {
@@ -767,12 +714,7 @@ static const struct resource wm831x_ldo3_resources[] = {
.end = WM831X_LDO3_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_LDO3,
- .end = WM831X_IRQ_UV_LDO3,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_LDO3, "UV"),
};
static const struct resource wm831x_ldo4_resources[] = {
@@ -781,12 +723,7 @@ static const struct resource wm831x_ldo4_resources[] = {
.end = WM831X_LDO4_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_LDO4,
- .end = WM831X_IRQ_UV_LDO4,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_LDO4, "UV"),
};
static const struct resource wm831x_ldo5_resources[] = {
@@ -795,12 +732,7 @@ static const struct resource wm831x_ldo5_resources[] = {
.end = WM831X_LDO5_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_LDO5,
- .end = WM831X_IRQ_UV_LDO5,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_LDO5, "UV"),
};
static const struct resource wm831x_ldo6_resources[] = {
@@ -809,12 +741,7 @@ static const struct resource wm831x_ldo6_resources[] = {
.end = WM831X_LDO6_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_LDO6,
- .end = WM831X_IRQ_UV_LDO6,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_LDO6, "UV"),
};
static const struct resource wm831x_ldo7_resources[] = {
@@ -823,12 +750,7 @@ static const struct resource wm831x_ldo7_resources[] = {
.end = WM831X_LDO7_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_LDO7,
- .end = WM831X_IRQ_UV_LDO7,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_LDO7, "UV"),
};
static const struct resource wm831x_ldo8_resources[] = {
@@ -837,12 +759,7 @@ static const struct resource wm831x_ldo8_resources[] = {
.end = WM831X_LDO8_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_LDO8,
- .end = WM831X_IRQ_UV_LDO8,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_LDO8, "UV"),
};
static const struct resource wm831x_ldo9_resources[] = {
@@ -851,12 +768,7 @@ static const struct resource wm831x_ldo9_resources[] = {
.end = WM831X_LDO9_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_LDO9,
- .end = WM831X_IRQ_UV_LDO9,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_LDO9, "UV"),
};
static const struct resource wm831x_ldo10_resources[] = {
@@ -865,12 +777,7 @@ static const struct resource wm831x_ldo10_resources[] = {
.end = WM831X_LDO10_SLEEP_CONTROL,
.flags = IORESOURCE_REG,
},
- {
- .name = "UV",
- .start = WM831X_IRQ_UV_LDO10,
- .end = WM831X_IRQ_UV_LDO10,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_UV_LDO10, "UV"),
};
static const struct resource wm831x_ldo11_resources[] = {
@@ -882,96 +789,27 @@ static const struct resource wm831x_ldo11_resources[] = {
};
static const struct resource wm831x_on_resources[] = {
- {
- .start = WM831X_IRQ_ON,
- .end = WM831X_IRQ_ON,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(WM831X_IRQ_ON),
};
static const struct resource wm831x_power_resources[] = {
- {
- .name = "SYSLO",
- .start = WM831X_IRQ_PPM_SYSLO,
- .end = WM831X_IRQ_PPM_SYSLO,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "PWR SRC",
- .start = WM831X_IRQ_PPM_PWR_SRC,
- .end = WM831X_IRQ_PPM_PWR_SRC,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "USB CURR",
- .start = WM831X_IRQ_PPM_USB_CURR,
- .end = WM831X_IRQ_PPM_USB_CURR,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "BATT HOT",
- .start = WM831X_IRQ_CHG_BATT_HOT,
- .end = WM831X_IRQ_CHG_BATT_HOT,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "BATT COLD",
- .start = WM831X_IRQ_CHG_BATT_COLD,
- .end = WM831X_IRQ_CHG_BATT_COLD,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "BATT FAIL",
- .start = WM831X_IRQ_CHG_BATT_FAIL,
- .end = WM831X_IRQ_CHG_BATT_FAIL,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "OV",
- .start = WM831X_IRQ_CHG_OV,
- .end = WM831X_IRQ_CHG_OV,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "END",
- .start = WM831X_IRQ_CHG_END,
- .end = WM831X_IRQ_CHG_END,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "TO",
- .start = WM831X_IRQ_CHG_TO,
- .end = WM831X_IRQ_CHG_TO,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "MODE",
- .start = WM831X_IRQ_CHG_MODE,
- .end = WM831X_IRQ_CHG_MODE,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "START",
- .start = WM831X_IRQ_CHG_START,
- .end = WM831X_IRQ_CHG_START,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_PPM_SYSLO, "SYSLO"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_PPM_PWR_SRC, "PWR SRC"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_PPM_USB_CURR, "USB CURR"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_CHG_BATT_HOT, "BATT HOT"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_CHG_BATT_COLD, "BATT COLD"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_CHG_BATT_FAIL, "BATT FAIL"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_CHG_OV, "OV"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_CHG_END, "END"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_CHG_TO, "TO"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_CHG_MODE, "MODE"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_CHG_START, "START"),
};
static const struct resource wm831x_rtc_resources[] = {
- {
- .name = "PER",
- .start = WM831X_IRQ_RTC_PER,
- .end = WM831X_IRQ_RTC_PER,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "ALM",
- .start = WM831X_IRQ_RTC_ALM,
- .end = WM831X_IRQ_RTC_ALM,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_RTC_PER, "PER"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_RTC_ALM, "ALM"),
};
static const struct resource wm831x_status1_resources[] = {
@@ -991,26 +829,12 @@ static const struct resource wm831x_status2_resources[] = {
};
static const struct resource wm831x_touch_resources[] = {
- {
- .name = "TCHPD",
- .start = WM831X_IRQ_TCHPD,
- .end = WM831X_IRQ_TCHPD,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "TCHDATA",
- .start = WM831X_IRQ_TCHDATA,
- .end = WM831X_IRQ_TCHDATA,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_TCHPD, "TCHPD"),
+ DEFINE_RES_IRQ_NAMED(WM831X_IRQ_TCHDATA, "TCHDATA"),
};
static const struct resource wm831x_wdt_resources[] = {
- {
- .start = WM831X_IRQ_WDOG_TO,
- .end = WM831X_IRQ_WDOG_TO,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(WM831X_IRQ_WDOG_TO),
};
static const struct mfd_cell wm8310_devs[] = {
diff --git a/drivers/mfd/wm831x-otp.c b/drivers/mfd/wm831x-otp.c
index afe59d52dd74..25f5d9fe33a1 100644
--- a/drivers/mfd/wm831x-otp.c
+++ b/drivers/mfd/wm831x-otp.c
@@ -38,8 +38,8 @@ static int wm831x_unique_id_read(struct wm831x *wm831x, char *id)
return 0;
}
-static ssize_t wm831x_unique_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t unique_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct wm831x *wm831x = dev_get_drvdata(dev);
int rval;
@@ -52,7 +52,7 @@ static ssize_t wm831x_unique_id_show(struct device *dev,
return sprintf(buf, "%*phN\n", WM831X_UNIQUE_ID_LEN, id);
}
-static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL);
+static DEVICE_ATTR_RO(unique_id);
int wm831x_otp_init(struct wm831x *wm831x)
{
diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c
index 6bfea3210389..ad639ee85b2a 100644
--- a/drivers/misc/bcm-vk/bcm_vk_dev.c
+++ b/drivers/misc/bcm-vk/bcm_vk_dev.c
@@ -9,6 +9,7 @@
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
+#include <linux/panic_notifier.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c
index f40cf08a6192..066b9ef7fcd7 100644
--- a/drivers/misc/bcm-vk/bcm_vk_msg.c
+++ b/drivers/misc/bcm-vk/bcm_vk_msg.c
@@ -354,8 +354,7 @@ static void bcm_vk_drain_all_pend(struct device *dev,
for (num = 0; num < chan->q_nr; num++) {
list_for_each_entry_safe(entry, tmp, &chan->pendq[num], node) {
if ((!ctx) || (entry->ctx->idx == ctx->idx)) {
- list_del(&entry->node);
- list_add_tail(&entry->node, &del_q);
+ list_move_tail(&entry->node, &del_q);
}
}
}
@@ -701,8 +700,7 @@ int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type,
return -EINVAL;
}
- entry = kzalloc(sizeof(*entry) +
- sizeof(struct vk_msg_blk), GFP_KERNEL);
+ entry = kzalloc(struct_size(entry, to_v_msg, 1), GFP_KERNEL);
if (!entry)
return -ENOMEM;
diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.h b/drivers/misc/bcm-vk/bcm_vk_msg.h
index 4eaad84825d6..56784c8896d8 100644
--- a/drivers/misc/bcm-vk/bcm_vk_msg.h
+++ b/drivers/misc/bcm-vk/bcm_vk_msg.h
@@ -116,7 +116,7 @@ struct bcm_vk_wkent {
u32 usr_msg_id;
u32 to_v_blks;
u32 seq_num;
- struct vk_msg_blk to_v_msg[0];
+ struct vk_msg_blk to_v_msg[];
};
/* queue stats counters */
diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c
index 4d02692ecfc7..dae9eeed84a2 100644
--- a/drivers/misc/bcm-vk/bcm_vk_tty.c
+++ b/drivers/misc/bcm-vk/bcm_vk_tty.c
@@ -214,7 +214,7 @@ static int bcm_vk_tty_write(struct tty_struct *tty,
return count;
}
-static int bcm_vk_tty_write_room(struct tty_struct *tty)
+static unsigned int bcm_vk_tty_write_room(struct tty_struct *tty)
{
struct bcm_vk *vk = dev_get_drvdata(tty->dev);
diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
index cd402c89189e..de6d44a158bb 100644
--- a/drivers/misc/cardreader/alcor_pci.c
+++ b/drivers/misc/cardreader/alcor_pci.c
@@ -139,7 +139,13 @@ static void alcor_pci_init_check_aspm(struct alcor_pci_priv *priv)
u32 val32;
priv->pdev_cap_off = alcor_pci_find_cap_offset(priv, priv->pdev);
- priv->parent_cap_off = alcor_pci_find_cap_offset(priv,
+ /*
+ * A device might be attached to root complex directly and
+ * priv->parent_pdev will be NULL. In this case we don't check its
+ * capability and disable ASPM completely.
+ */
+ if (priv->parent_pdev)
+ priv->parent_cap_off = alcor_pci_find_cap_offset(priv,
priv->parent_pdev);
if ((priv->pdev_cap_off == 0) || (priv->parent_cap_off == 0)) {
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index bd3bd32333c5..3dbdce96fae0 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -569,7 +569,8 @@ static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
int rc;
cdev_init(cdev, fops);
- if ((rc = cdev_add(cdev, devt, 1))) {
+ rc = cdev_add(cdev, devt, 1);
+ if (rc) {
dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc);
return rc;
}
@@ -577,8 +578,8 @@ static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
dev = device_create(cxl_class, &afu->dev, devt, afu,
"afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix);
if (IS_ERR(dev)) {
- dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc);
rc = PTR_ERR(dev);
+ dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc);
goto err;
}
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 0f791bfdc1f5..f0a7531f354c 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -32,12 +32,13 @@ config EEPROM_AT24
will be called at24.
config EEPROM_AT25
- tristate "SPI EEPROMs from most vendors"
+ tristate "SPI EEPROMs (FRAMs) from most vendors"
depends on SPI && SYSFS
select NVMEM
select NVMEM_SYSFS
help
- Enable this driver to get read/write support to most SPI EEPROMs,
+ Enable this driver to get read/write support to most SPI EEPROMs
+ and Cypress FRAMs,
after you configure the board init code to know about each eeprom
on your target board.
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 7a6f01ace78a..305ffad131a2 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -714,23 +714,20 @@ static int at24_probe(struct i2c_client *client)
}
/*
- * If the 'label' property is not present for the AT24 EEPROM,
- * then nvmem_config.id is initialised to NVMEM_DEVID_AUTO,
- * and this will append the 'devid' to the name of the NVMEM
- * device. This is purely legacy and the AT24 driver has always
- * defaulted to this. However, if the 'label' property is
- * present then this means that the name is specified by the
- * firmware and this name should be used verbatim and so it is
- * not necessary to append the 'devid'.
+ * We initialize nvmem_config.id to NVMEM_DEVID_AUTO even if the
+ * label property is set as some platform can have multiple eeproms
+ * with same label and we can not register each of those with same
+ * label. Failing to register those eeproms trigger cascade failure
+ * on such platform.
*/
+ nvmem_config.id = NVMEM_DEVID_AUTO;
+
if (device_property_present(dev, "label")) {
- nvmem_config.id = NVMEM_DEVID_NONE;
err = device_property_read_string(dev, "label",
&nvmem_config.name);
if (err)
return err;
} else {
- nvmem_config.id = NVMEM_DEVID_AUTO;
nvmem_config.name = dev_name(dev);
}
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index b76e4901b4a4..4d09b672ac3c 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* at25.c -- support most SPI EEPROMs, such as Atmel AT25 models
+ * and Cypress FRAMs FM25 models
*
* Copyright (C) 2006 David Brownell
*/
@@ -16,6 +17,9 @@
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
#include <linux/property.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/math.h>
/*
* NOTE: this is an *EEPROM* driver. The vagaries of product naming
@@ -27,6 +31,7 @@
* AT25M02, AT25128B
*/
+#define FM25_SN_LEN 8 /* serial number length */
struct at25_data {
struct spi_device *spi;
struct mutex lock;
@@ -34,6 +39,7 @@ struct at25_data {
unsigned addrlen;
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
+ u8 sernum[FM25_SN_LEN];
};
#define AT25_WREN 0x06 /* latch the write enable */
@@ -42,6 +48,9 @@ struct at25_data {
#define AT25_WRSR 0x01 /* write status register */
#define AT25_READ 0x03 /* read byte(s) */
#define AT25_WRITE 0x02 /* write byte(s)/sector */
+#define FM25_SLEEP 0xb9 /* enter sleep mode */
+#define FM25_RDID 0x9f /* read device ID */
+#define FM25_RDSN 0xc3 /* read S/N */
#define AT25_SR_nRDY 0x01 /* nRDY = write-in-progress */
#define AT25_SR_WEN 0x02 /* write enable (latched) */
@@ -51,6 +60,8 @@ struct at25_data {
#define AT25_INSTR_BIT3 0x08 /* Additional address bit in instr */
+#define FM25_ID_LEN 9 /* ID length */
+
#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
/* Specs often allow 5 msec for a page write, sometimes 20 msec;
@@ -129,6 +140,51 @@ static int at25_ee_read(void *priv, unsigned int offset,
return status;
}
+/*
+ * read extra registers as ID or serial number
+ */
+static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command,
+ int len)
+{
+ int status;
+ struct spi_transfer t[2];
+ struct spi_message m;
+
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
+
+ t[0].tx_buf = &command;
+ t[0].len = 1;
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = len;
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&at25->lock);
+
+ status = spi_sync(at25->spi, &m);
+ dev_dbg(&at25->spi->dev, "read %d aux bytes --> %d\n", len, status);
+
+ mutex_unlock(&at25->lock);
+ return status;
+}
+
+static ssize_t sernum_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct at25_data *at25;
+
+ at25 = dev_get_drvdata(dev);
+ return sysfs_emit(buf, "%*ph\n", (int)sizeof(at25->sernum), at25->sernum);
+}
+static DEVICE_ATTR_RO(sernum);
+
+static struct attribute *sernum_attrs[] = {
+ &dev_attr_sernum.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(sernum);
+
static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
{
struct at25_data *at25 = priv;
@@ -303,34 +359,39 @@ static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
return 0;
}
+static const struct of_device_id at25_of_match[] = {
+ { .compatible = "atmel,at25",},
+ { .compatible = "cypress,fm25",},
+ { }
+};
+MODULE_DEVICE_TABLE(of, at25_of_match);
+
static int at25_probe(struct spi_device *spi)
{
struct at25_data *at25 = NULL;
struct spi_eeprom chip;
int err;
int sr;
- int addrlen;
+ u8 id[FM25_ID_LEN];
+ u8 sernum[FM25_SN_LEN];
+ int i;
+ const struct of_device_id *match;
+ bool is_fram = 0;
+
+ match = of_match_device(of_match_ptr(at25_of_match), &spi->dev);
+ if (match && !strcmp(match->compatible, "cypress,fm25"))
+ is_fram = 1;
/* Chip description */
if (!spi->dev.platform_data) {
- err = at25_fw_to_chip(&spi->dev, &chip);
- if (err)
- return err;
+ if (!is_fram) {
+ err = at25_fw_to_chip(&spi->dev, &chip);
+ if (err)
+ return err;
+ }
} else
chip = *(struct spi_eeprom *)spi->dev.platform_data;
- /* For now we only support 8/16/24 bit addressing */
- if (chip.flags & EE_ADDR1)
- addrlen = 1;
- else if (chip.flags & EE_ADDR2)
- addrlen = 2;
- else if (chip.flags & EE_ADDR3)
- addrlen = 3;
- else {
- dev_dbg(&spi->dev, "unsupported address type\n");
- return -EINVAL;
- }
-
/* Ping the chip ... the status register is pretty portable,
* unlike probing manufacturer IDs. We do expect that system
* firmware didn't write it in the past few milliseconds!
@@ -349,9 +410,51 @@ static int at25_probe(struct spi_device *spi)
at25->chip = chip;
at25->spi = spi;
spi_set_drvdata(spi, at25);
- at25->addrlen = addrlen;
- at25->nvmem_config.type = NVMEM_TYPE_EEPROM;
+ if (is_fram) {
+ /* Get ID of chip */
+ fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN);
+ if (id[6] != 0xc2) {
+ dev_err(&spi->dev,
+ "Error: no Cypress FRAM (id %02x)\n", id[6]);
+ return -ENODEV;
+ }
+ /* set size found in ID */
+ if (id[7] < 0x21 || id[7] > 0x26) {
+ dev_err(&spi->dev, "Error: unsupported size (id %02x)\n", id[7]);
+ return -ENODEV;
+ }
+ chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
+
+ if (at25->chip.byte_len > 64 * 1024)
+ at25->chip.flags |= EE_ADDR3;
+ else
+ at25->chip.flags |= EE_ADDR2;
+
+ if (id[8]) {
+ fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN);
+ /* swap byte order */
+ for (i = 0; i < FM25_SN_LEN; i++)
+ at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i];
+ }
+
+ at25->chip.page_size = PAGE_SIZE;
+ strncpy(at25->chip.name, "fm25", sizeof(at25->chip.name));
+ }
+
+ /* For now we only support 8/16/24 bit addressing */
+ if (at25->chip.flags & EE_ADDR1)
+ at25->addrlen = 1;
+ else if (at25->chip.flags & EE_ADDR2)
+ at25->addrlen = 2;
+ else if (at25->chip.flags & EE_ADDR3)
+ at25->addrlen = 3;
+ else {
+ dev_dbg(&spi->dev, "unsupported address type\n");
+ return -EINVAL;
+ }
+
+ at25->nvmem_config.type = is_fram ? NVMEM_TYPE_FRAM : NVMEM_TYPE_EEPROM;
at25->nvmem_config.name = dev_name(&spi->dev);
at25->nvmem_config.dev = &spi->dev;
at25->nvmem_config.read_only = chip.flags & EE_READONLY;
@@ -370,27 +473,22 @@ static int at25_probe(struct spi_device *spi)
if (IS_ERR(at25->nvmem))
return PTR_ERR(at25->nvmem);
- dev_info(&spi->dev, "%d %s %s eeprom%s, pagesize %u\n",
- (chip.byte_len < 1024) ? chip.byte_len : (chip.byte_len / 1024),
- (chip.byte_len < 1024) ? "Byte" : "KByte",
- at25->chip.name,
- (chip.flags & EE_READONLY) ? " (readonly)" : "",
- at25->chip.page_size);
+ dev_info(&spi->dev, "%d %s %s %s%s, pagesize %u\n",
+ (chip.byte_len < 1024) ? chip.byte_len : (chip.byte_len / 1024),
+ (chip.byte_len < 1024) ? "Byte" : "KByte",
+ at25->chip.name, is_fram ? "fram" : "eeprom",
+ (chip.flags & EE_READONLY) ? " (readonly)" : "",
+ at25->chip.page_size);
return 0;
}
/*-------------------------------------------------------------------------*/
-static const struct of_device_id at25_of_match[] = {
- { .compatible = "atmel,at25", },
- { }
-};
-MODULE_DEVICE_TABLE(of, at25_of_match);
-
static struct spi_driver at25_driver = {
.driver = {
.name = "at25",
.of_match_table = at25_of_match,
+ .dev_groups = sernum_groups,
},
.probe = at25_probe,
};
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index 252e15ba65e1..bb9c4512c968 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -32,16 +32,17 @@
*/
#define EE1004_ADDR_SET_PAGE 0x36
-#define EE1004_EEPROM_SIZE 512
+#define EE1004_NUM_PAGES 2
#define EE1004_PAGE_SIZE 256
#define EE1004_PAGE_SHIFT 8
+#define EE1004_EEPROM_SIZE (EE1004_PAGE_SIZE * EE1004_NUM_PAGES)
/*
* Mutex protects ee1004_set_page and ee1004_dev_count, and must be held
* from page selection to end of read.
*/
static DEFINE_MUTEX(ee1004_bus_lock);
-static struct i2c_client *ee1004_set_page[2];
+static struct i2c_client *ee1004_set_page[EE1004_NUM_PAGES];
static unsigned int ee1004_dev_count;
static int ee1004_current_page;
@@ -71,40 +72,58 @@ static int ee1004_get_current_page(void)
return 0;
}
+static int ee1004_set_current_page(struct device *dev, int page)
+{
+ int ret;
+
+ if (page == ee1004_current_page)
+ return 0;
+
+ /* Data is ignored */
+ ret = i2c_smbus_write_byte(ee1004_set_page[page], 0x00);
+ /*
+ * Don't give up just yet. Some memory modules will select the page
+ * but not ack the command. Check which page is selected now.
+ */
+ if (ret == -ENXIO && ee1004_get_current_page() == page)
+ ret = 0;
+ if (ret < 0) {
+ dev_err(dev, "Failed to select page %d (%d)\n", page, ret);
+ return ret;
+ }
+
+ dev_dbg(dev, "Selected page %d\n", page);
+ ee1004_current_page = page;
+
+ return 0;
+}
+
static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
unsigned int offset, size_t count)
{
- int status;
+ int status, page;
+
+ page = offset >> EE1004_PAGE_SHIFT;
+ offset &= (1 << EE1004_PAGE_SHIFT) - 1;
+
+ status = ee1004_set_current_page(&client->dev, page);
+ if (status)
+ return status;
- if (count > I2C_SMBUS_BLOCK_MAX)
- count = I2C_SMBUS_BLOCK_MAX;
/* Can't cross page boundaries */
- if (unlikely(offset + count > EE1004_PAGE_SIZE))
+ if (offset + count > EE1004_PAGE_SIZE)
count = EE1004_PAGE_SIZE - offset;
- status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset,
- count, buf);
- dev_dbg(&client->dev, "read %zu@%d --> %d\n", count, offset, status);
-
- return status;
+ return i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf);
}
-static ssize_t ee1004_read(struct file *filp, struct kobject *kobj,
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = kobj_to_dev(kobj);
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = kobj_to_i2c_client(kobj);
size_t requested = count;
- int page;
-
- if (unlikely(!count))
- return count;
-
- page = off >> EE1004_PAGE_SHIFT;
- if (unlikely(page > 1))
- return 0;
- off &= (1 << EE1004_PAGE_SHIFT) - 1;
+ int ret = 0;
/*
* Read data from chip, protecting against concurrent access to
@@ -113,139 +132,85 @@ static ssize_t ee1004_read(struct file *filp, struct kobject *kobj,
mutex_lock(&ee1004_bus_lock);
while (count) {
- int status;
-
- /* Select page */
- if (page != ee1004_current_page) {
- /* Data is ignored */
- status = i2c_smbus_write_byte(ee1004_set_page[page],
- 0x00);
- if (status == -ENXIO) {
- /*
- * Don't give up just yet. Some memory
- * modules will select the page but not
- * ack the command. Check which page is
- * selected now.
- */
- if (ee1004_get_current_page() == page)
- status = 0;
- }
- if (status < 0) {
- dev_err(dev, "Failed to select page %d (%d)\n",
- page, status);
- mutex_unlock(&ee1004_bus_lock);
- return status;
- }
- dev_dbg(dev, "Selected page %d\n", page);
- ee1004_current_page = page;
- }
-
- status = ee1004_eeprom_read(client, buf, off, count);
- if (status < 0) {
- mutex_unlock(&ee1004_bus_lock);
- return status;
- }
- buf += status;
- off += status;
- count -= status;
+ ret = ee1004_eeprom_read(client, buf, off, count);
+ if (ret < 0)
+ goto out;
- if (off == EE1004_PAGE_SIZE) {
- page++;
- off = 0;
- }
+ buf += ret;
+ off += ret;
+ count -= ret;
}
-
+out:
mutex_unlock(&ee1004_bus_lock);
- return requested;
+ return ret < 0 ? ret : requested;
}
-static const struct bin_attribute eeprom_attr = {
- .attr = {
- .name = "eeprom",
- .mode = 0444,
- },
- .size = EE1004_EEPROM_SIZE,
- .read = ee1004_read,
+static BIN_ATTR_RO(eeprom, EE1004_EEPROM_SIZE);
+
+static struct bin_attribute *ee1004_attrs[] = {
+ &bin_attr_eeprom,
+ NULL
};
-static int ee1004_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+BIN_ATTRIBUTE_GROUPS(ee1004);
+
+static void ee1004_cleanup(int idx)
+{
+ if (--ee1004_dev_count == 0)
+ while (--idx >= 0) {
+ i2c_unregister_device(ee1004_set_page[idx]);
+ ee1004_set_page[idx] = NULL;
+ }
+}
+
+static int ee1004_probe(struct i2c_client *client)
{
int err, cnr = 0;
- const char *slow = NULL;
/* Make sure we can operate on this adapter */
if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_BYTE |
- I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
- if (i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_BYTE |
- I2C_FUNC_SMBUS_READ_WORD_DATA))
- slow = "word";
- else if (i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_BYTE |
- I2C_FUNC_SMBUS_READ_BYTE_DATA))
- slow = "byte";
- else
- return -EPFNOSUPPORT;
- }
+ I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_I2C_BLOCK) &&
+ !i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA))
+ return -EPFNOSUPPORT;
/* Use 2 dummy devices for page select command */
mutex_lock(&ee1004_bus_lock);
if (++ee1004_dev_count == 1) {
- for (cnr = 0; cnr < 2; cnr++) {
- ee1004_set_page[cnr] = i2c_new_dummy_device(client->adapter,
- EE1004_ADDR_SET_PAGE + cnr);
- if (IS_ERR(ee1004_set_page[cnr])) {
- dev_err(&client->dev,
- "address 0x%02x unavailable\n",
- EE1004_ADDR_SET_PAGE + cnr);
- err = PTR_ERR(ee1004_set_page[cnr]);
+ for (cnr = 0; cnr < EE1004_NUM_PAGES; cnr++) {
+ struct i2c_client *cl;
+
+ cl = i2c_new_dummy_device(client->adapter, EE1004_ADDR_SET_PAGE + cnr);
+ if (IS_ERR(cl)) {
+ err = PTR_ERR(cl);
goto err_clients;
}
+ ee1004_set_page[cnr] = cl;
}
- } else if (i2c_adapter_id(client->adapter) !=
- i2c_adapter_id(ee1004_set_page[0]->adapter)) {
+
+ /* Remember current page to avoid unneeded page select */
+ err = ee1004_get_current_page();
+ if (err < 0)
+ goto err_clients;
+ dev_dbg(&client->dev, "Currently selected page: %d\n", err);
+ ee1004_current_page = err;
+ } else if (client->adapter != ee1004_set_page[0]->adapter) {
dev_err(&client->dev,
"Driver only supports devices on a single I2C bus\n");
err = -EOPNOTSUPP;
goto err_clients;
}
-
- /* Remember current page to avoid unneeded page select */
- err = ee1004_get_current_page();
- if (err < 0)
- goto err_clients;
- ee1004_current_page = err;
- dev_dbg(&client->dev, "Currently selected page: %d\n",
- ee1004_current_page);
mutex_unlock(&ee1004_bus_lock);
- /* Create the sysfs eeprom file */
- err = sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
- if (err)
- goto err_clients_lock;
-
dev_info(&client->dev,
"%u byte EE1004-compliant SPD EEPROM, read-only\n",
EE1004_EEPROM_SIZE);
- if (slow)
- dev_notice(&client->dev,
- "Falling back to %s reads, performance will suffer\n",
- slow);
return 0;
- err_clients_lock:
- mutex_lock(&ee1004_bus_lock);
err_clients:
- if (--ee1004_dev_count == 0) {
- for (cnr--; cnr >= 0; cnr--) {
- i2c_unregister_device(ee1004_set_page[cnr]);
- ee1004_set_page[cnr] = NULL;
- }
- }
+ ee1004_cleanup(cnr);
mutex_unlock(&ee1004_bus_lock);
return err;
@@ -253,18 +218,9 @@ static int ee1004_probe(struct i2c_client *client,
static int ee1004_remove(struct i2c_client *client)
{
- int i;
-
- sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr);
-
/* Remove page select clients if this is the last device */
mutex_lock(&ee1004_bus_lock);
- if (--ee1004_dev_count == 0) {
- for (i = 0; i < 2; i++) {
- i2c_unregister_device(ee1004_set_page[i]);
- ee1004_set_page[i] = NULL;
- }
- }
+ ee1004_cleanup(EE1004_NUM_PAGES);
mutex_unlock(&ee1004_bus_lock);
return 0;
@@ -275,8 +231,9 @@ static int ee1004_remove(struct i2c_client *client)
static struct i2c_driver ee1004_driver = {
.driver = {
.name = "ee1004",
+ .dev_groups = ee1004_groups,
},
- .probe = ee1004_probe,
+ .probe_new = ee1004_probe,
.remove = ee1004_remove,
.id_table = ee1004_ids,
};
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 80114f4c80ad..29d8971ec558 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -28,14 +29,29 @@
struct eeprom_93xx46_devtype_data {
unsigned int quirks;
+ unsigned char flags;
+};
+
+static const struct eeprom_93xx46_devtype_data at93c46_data = {
+ .flags = EE_SIZE1K,
+};
+
+static const struct eeprom_93xx46_devtype_data at93c56_data = {
+ .flags = EE_SIZE2K,
+};
+
+static const struct eeprom_93xx46_devtype_data at93c66_data = {
+ .flags = EE_SIZE4K,
};
static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
+ .flags = EE_SIZE1K,
.quirks = EEPROM_93XX46_QUIRK_SINGLE_WORD_READ |
EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
};
static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = {
+ .flags = EE_SIZE1K,
.quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE,
};
@@ -70,6 +86,7 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
struct eeprom_93xx46_dev *edev = priv;
char *buf = val;
int err = 0;
+ int bits;
if (unlikely(off >= edev->size))
return 0;
@@ -83,21 +100,21 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
if (edev->pdata->prepare)
edev->pdata->prepare(edev);
+ /* The opcode in front of the address is three bits. */
+ bits = edev->addrlen + 3;
+
while (count) {
struct spi_message m;
struct spi_transfer t[2] = { { 0 } };
u16 cmd_addr = OP_READ << edev->addrlen;
size_t nbytes = count;
- int bits;
- if (edev->addrlen == 7) {
- cmd_addr |= off & 0x7f;
- bits = 10;
+ if (edev->pdata->flags & EE_ADDR8) {
+ cmd_addr |= off;
if (has_quirk_single_word_read(edev))
nbytes = 1;
} else {
- cmd_addr |= (off >> 1) & 0x3f;
- bits = 9;
+ cmd_addr |= (off >> 1);
if (has_quirk_single_word_read(edev))
nbytes = 2;
}
@@ -152,14 +169,14 @@ static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
int bits, ret;
u16 cmd_addr;
+ /* The opcode in front of the address is three bits. */
+ bits = edev->addrlen + 3;
+
cmd_addr = OP_START << edev->addrlen;
- if (edev->addrlen == 7) {
+ if (edev->pdata->flags & EE_ADDR8)
cmd_addr |= (is_on ? ADDR_EWEN : ADDR_EWDS) << 1;
- bits = 10;
- } else {
+ else
cmd_addr |= (is_on ? ADDR_EWEN : ADDR_EWDS);
- bits = 9;
- }
if (has_quirk_instruction_length(edev)) {
cmd_addr <<= 2;
@@ -205,15 +222,19 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
int bits, data_len, ret;
u16 cmd_addr;
+ if (unlikely(off >= edev->size))
+ return -EINVAL;
+
+ /* The opcode in front of the address is three bits. */
+ bits = edev->addrlen + 3;
+
cmd_addr = OP_WRITE << edev->addrlen;
- if (edev->addrlen == 7) {
- cmd_addr |= off & 0x7f;
- bits = 10;
+ if (edev->pdata->flags & EE_ADDR8) {
+ cmd_addr |= off;
data_len = 1;
} else {
- cmd_addr |= (off >> 1) & 0x3f;
- bits = 9;
+ cmd_addr |= (off >> 1);
data_len = 2;
}
@@ -253,7 +274,7 @@ static int eeprom_93xx46_write(void *priv, unsigned int off,
return count;
/* only write even number of bytes on 16-bit devices */
- if (edev->addrlen == 6) {
+ if (edev->pdata->flags & EE_ADDR16) {
step = 2;
count &= ~1;
}
@@ -295,14 +316,14 @@ static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
int bits, ret;
u16 cmd_addr;
+ /* The opcode in front of the address is three bits. */
+ bits = edev->addrlen + 3;
+
cmd_addr = OP_START << edev->addrlen;
- if (edev->addrlen == 7) {
+ if (edev->pdata->flags & EE_ADDR8)
cmd_addr |= ADDR_ERAL << 1;
- bits = 10;
- } else {
+ else
cmd_addr |= ADDR_ERAL;
- bits = 9;
- }
if (has_quirk_instruction_length(edev)) {
cmd_addr <<= 2;
@@ -375,8 +396,11 @@ static void select_deassert(void *context)
}
static const struct of_device_id eeprom_93xx46_of_table[] = {
- { .compatible = "eeprom-93xx46", },
+ { .compatible = "eeprom-93xx46", .data = &at93c46_data, },
+ { .compatible = "atmel,at93c46", .data = &at93c46_data, },
{ .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
+ { .compatible = "atmel,at93c56", .data = &at93c56_data, },
+ { .compatible = "atmel,at93c66", .data = &at93c66_data, },
{ .compatible = "microchip,93lc46b", .data = &microchip_93lc46b_data, },
{}
};
@@ -426,6 +450,7 @@ static int eeprom_93xx46_probe_dt(struct spi_device *spi)
const struct eeprom_93xx46_devtype_data *data = of_id->data;
pd->quirks = data->quirks;
+ pd->flags |= data->flags;
}
spi->dev.platform_data = pd;
@@ -455,10 +480,21 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
if (!edev)
return -ENOMEM;
+ if (pd->flags & EE_SIZE1K)
+ edev->size = 128;
+ else if (pd->flags & EE_SIZE2K)
+ edev->size = 256;
+ else if (pd->flags & EE_SIZE4K)
+ edev->size = 512;
+ else {
+ dev_err(&spi->dev, "unspecified size\n");
+ return -EINVAL;
+ }
+
if (pd->flags & EE_ADDR8)
- edev->addrlen = 7;
+ edev->addrlen = ilog2(edev->size);
else if (pd->flags & EE_ADDR16)
- edev->addrlen = 6;
+ edev->addrlen = ilog2(edev->size) - 1;
else {
dev_err(&spi->dev, "unspecified address type\n");
return -EINVAL;
@@ -469,7 +505,6 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->spi = spi;
edev->pdata = pd;
- edev->size = 128;
edev->nvmem_config.type = NVMEM_TYPE_EEPROM;
edev->nvmem_config.name = dev_name(&spi->dev);
edev->nvmem_config.dev = &spi->dev;
@@ -489,8 +524,9 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
if (IS_ERR(edev->nvmem))
return PTR_ERR(edev->nvmem);
- dev_info(&spi->dev, "%d-bit eeprom %s\n",
+ dev_info(&spi->dev, "%d-bit eeprom containing %d bytes %s\n",
(pd->flags & EE_ADDR8) ? 8 : 16,
+ edev->size,
(pd->flags & EE_READONLY) ? "(readonly)" : "");
if (!(pd->flags & EE_READONLY)) {
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 81c70e5bc168..b0cff4b152da 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -1,38 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This file is provided under a GPLv2 license. When using or
- * redistributing this file, you may do so under that license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, it can be found <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
* IDT PCIe-switch NTB Linux driver
*
* Contact Information:
@@ -1126,11 +1095,10 @@ static void idt_get_fw_data(struct idt_89hpesx_dev *pdev)
device_for_each_child_node(dev, fwnode) {
ee_id = idt_ee_match_id(fwnode);
- if (!ee_id) {
- dev_warn(dev, "Skip unsupported EEPROM device");
- continue;
- } else
+ if (ee_id)
break;
+
+ dev_warn(dev, "Skip unsupported EEPROM device %pfw\n", fwnode);
}
/* If there is no fwnode EEPROM device, then set zero size */
@@ -1161,6 +1129,7 @@ static void idt_get_fw_data(struct idt_89hpesx_dev *pdev)
else /* if (!fwnode_property_read_bool(node, "read-only")) */
pdev->eero = false;
+ fwnode_handle_put(fwnode);
dev_info(dev, "EEPROM of %d bytes found by 0x%x",
pdev->eesize, pdev->eeaddr);
}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index af3c497defb1..80c60fb41bbc 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -556,6 +556,13 @@ out:
else if (!cs->submitted)
cs->fence->error = -EBUSY;
+ if (unlikely(cs->skip_reset_on_timeout)) {
+ dev_err(hdev->dev,
+ "Command submission %llu completed after %llu (s)\n",
+ cs->sequence,
+ div_u64(jiffies - cs->submission_time_jiffies, HZ));
+ }
+
if (cs->timestamp)
cs->fence->timestamp = ktime_get();
complete_all(&cs->fence->completion);
@@ -571,6 +578,8 @@ static void cs_timedout(struct work_struct *work)
int rc;
struct hl_cs *cs = container_of(work, struct hl_cs,
work_tdr.work);
+ bool skip_reset_on_timeout = cs->skip_reset_on_timeout;
+
rc = cs_get_unless_zero(cs);
if (!rc)
return;
@@ -581,7 +590,8 @@ static void cs_timedout(struct work_struct *work)
}
/* Mark the CS is timed out so we won't try to cancel its TDR */
- cs->timedout = true;
+ if (likely(!skip_reset_on_timeout))
+ cs->timedout = true;
hdev = cs->ctx->hdev;
@@ -613,10 +623,12 @@ static void cs_timedout(struct work_struct *work)
cs_put(cs);
- if (hdev->reset_on_lockup)
- hl_device_reset(hdev, 0);
- else
- hdev->needs_reset = true;
+ if (likely(!skip_reset_on_timeout)) {
+ if (hdev->reset_on_lockup)
+ hl_device_reset(hdev, HL_RESET_TDR);
+ else
+ hdev->needs_reset = true;
+ }
}
static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
@@ -650,6 +662,10 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->type = cs_type;
cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
cs->timeout_jiffies = timeout;
+ cs->skip_reset_on_timeout =
+ hdev->skip_reset_on_timeout ||
+ !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
+ cs->submission_time_jiffies = jiffies;
INIT_LIST_HEAD(&cs->job_list);
INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
kref_init(&cs->refcount);
@@ -1481,6 +1497,61 @@ out:
return rc;
}
+/*
+ * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
+ * if the SOB value reaches the max value move to the other SOB reserved
+ * to the queue.
+ * Note that this function must be called while hw_queues_lock is taken.
+ */
+int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
+ struct hl_hw_sob **hw_sob, u32 count)
+{
+ struct hl_sync_stream_properties *prop;
+ struct hl_hw_sob *sob = *hw_sob, *other_sob;
+ u8 other_sob_offset;
+
+ prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
+
+ kref_get(&sob->kref);
+
+ /* check for wraparound */
+ if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
+ /*
+ * Decrement as we reached the max value.
+ * The release function won't be called here as we've
+ * just incremented the refcount right before calling this
+ * function.
+ */
+ kref_put(&sob->kref, hl_sob_reset_error);
+
+ /*
+ * check the other sob value, if it still in use then fail
+ * otherwise make the switch
+ */
+ other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
+ other_sob = &prop->hw_sob[other_sob_offset];
+
+ if (kref_read(&other_sob->kref) != 1) {
+ dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
+ q_idx);
+ return -EINVAL;
+ }
+
+ prop->next_sob_val = 1;
+
+ /* only two SOBs are currently in use */
+ prop->curr_sob_offset = other_sob_offset;
+ *hw_sob = other_sob;
+
+ dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
+ prop->curr_sob_offset, q_idx);
+ } else {
+ prop->next_sob_val += count;
+ }
+
+ return 0;
+}
+
static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
{
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index 62d705889ca8..19b6b045219e 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -12,7 +12,6 @@
static void hl_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
- u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
int i;
/* Release all allocated pending cb's, those cb's were never
@@ -57,14 +56,6 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
/* Scrub both SRAM and DRAM */
hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
-
- if ((!hdev->pldm) && (hdev->pdev) &&
- (!hdev->asic_funcs->is_device_idle(hdev,
- idle_mask,
- HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)))
- dev_notice(hdev->dev,
- "device not idle after user context is closed (0x%llx, 0x%llx)\n",
- idle_mask[0], idle_mask[1]);
} else {
dev_dbg(hdev->dev, "closing kernel context\n");
hdev->asic_funcs->ctx_fini(ctx);
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 8381155578a0..703d79fb6f3f 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -1278,6 +1278,11 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry->root,
&dev_entry->blob_desc);
+ debugfs_create_x8("skip_reset_on_timeout",
+ 0644,
+ dev_entry->root,
+ &hdev->skip_reset_on_timeout);
+
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 00e92b678828..ff4cbde289c0 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -51,6 +51,8 @@ bool hl_device_operational(struct hl_device *hdev,
static void hpriv_release(struct kref *ref)
{
+ u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
+ bool device_is_idle = true;
struct hl_fpriv *hpriv;
struct hl_device *hdev;
@@ -71,8 +73,20 @@ static void hpriv_release(struct kref *ref)
kfree(hpriv);
- if (hdev->reset_upon_device_release)
- hl_device_reset(hdev, 0);
+ if ((!hdev->pldm) && (hdev->pdev) &&
+ (!hdev->asic_funcs->is_device_idle(hdev,
+ idle_mask,
+ HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL))) {
+ dev_err(hdev->dev,
+ "device not idle after user context is closed (0x%llx_%llx)\n",
+ idle_mask[1], idle_mask[0]);
+
+ device_is_idle = false;
+ }
+
+ if ((hdev->reset_if_device_not_idle && !device_is_idle)
+ || hdev->reset_upon_device_release)
+ hl_device_reset(hdev, HL_RESET_DEVICE_RELEASE);
}
void hl_hpriv_get(struct hl_fpriv *hpriv)
@@ -118,6 +132,9 @@ static int hl_device_release(struct inode *inode, struct file *filp)
dev_warn(hdev->dev,
"Device is still in use because there are live CS and/or memory mappings\n");
+ hdev->last_open_session_duration_jif =
+ jiffies - hdev->last_successful_open_jif;
+
return 0;
}
@@ -868,7 +885,7 @@ static void device_disable_open_processes(struct hl_device *hdev)
int hl_device_reset(struct hl_device *hdev, u32 flags)
{
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
- bool hard_reset, from_hard_reset_thread;
+ bool hard_reset, from_hard_reset_thread, hard_instead_soft = false;
int i, rc;
if (!hdev->init_done) {
@@ -880,11 +897,28 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
hard_reset = (flags & HL_RESET_HARD) != 0;
from_hard_reset_thread = (flags & HL_RESET_FROM_RESET_THREAD) != 0;
- if ((!hard_reset) && (!hdev->supports_soft_reset)) {
- dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
+ if (!hard_reset && !hdev->supports_soft_reset) {
+ hard_instead_soft = true;
+ hard_reset = true;
+ }
+
+ if (hdev->reset_upon_device_release &&
+ (flags & HL_RESET_DEVICE_RELEASE)) {
+ dev_dbg(hdev->dev,
+ "Perform %s-reset upon device release\n",
+ hard_reset ? "hard" : "soft");
+ goto do_reset;
+ }
+
+ if (!hard_reset && !hdev->allow_external_soft_reset) {
+ hard_instead_soft = true;
hard_reset = true;
}
+ if (hard_instead_soft)
+ dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
+
+do_reset:
/* Re-entry of reset thread */
if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
goto kill_processes;
@@ -901,6 +935,19 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
return 0;
/*
+ * 'reset cause' is being updated here, because getting here
+ * means that it's the 1st time and the last time we're here
+ * ('in_reset' makes sure of it). This makes sure that
+ * 'reset_cause' will continue holding its 1st recorded reason!
+ */
+ if (flags & HL_RESET_HEARTBEAT)
+ hdev->curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
+ else if (flags & HL_RESET_TDR)
+ hdev->curr_reset_cause = HL_RESET_CAUSE_TDR;
+ else
+ hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+
+ /*
* if reset is due to heartbeat, device CPU is no responsive in
* which case no point sending PCI disable message to it
*/
@@ -943,9 +990,8 @@ again:
hdev->process_kill_trial_cnt = 0;
/*
- * Because the reset function can't run from interrupt or
- * from heartbeat work, we need to call the reset function
- * from a dedicated work
+ * Because the reset function can't run from heartbeat work,
+ * we need to call the reset function from a dedicated work.
*/
queue_delayed_work(hdev->device_reset_work.wq,
&hdev->device_reset_work.reset_work, 0);
@@ -1096,8 +1142,8 @@ kill_processes:
if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
dev_err(hdev->dev,
- "device is not idle (mask %#llx %#llx) after reset\n",
- idle_mask[0], idle_mask[1]);
+ "device is not idle (mask 0x%llx_%llx) after reset\n",
+ idle_mask[1], idle_mask[0]);
rc = -EIO;
goto out_err;
}
@@ -1334,8 +1380,9 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
}
/*
- * From this point, in case of an error, add char devices and create
- * sysfs nodes as part of the error flow, to allow debugging.
+ * From this point, override rc (=0) in case of an error to allow
+ * debugging (by adding char devices and create sysfs nodes as part of
+ * the error flow).
*/
add_cdev_sysfs_on_err = true;
@@ -1369,7 +1416,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
hdev->asic_name,
- hdev->asic_prop.dram_size / 1024 / 1024 / 1024);
+ hdev->asic_prop.dram_size / SZ_1G);
rc = hl_vm_init(hdev);
if (rc) {
@@ -1475,6 +1522,7 @@ out_disabled:
void hl_device_fini(struct hl_device *hdev)
{
ktime_t timeout;
+ u64 reset_sec;
int i, rc;
dev_info(hdev->dev, "Removing device\n");
@@ -1482,6 +1530,11 @@ void hl_device_fini(struct hl_device *hdev)
hdev->device_fini_pending = 1;
flush_delayed_work(&hdev->device_reset_work.reset_work);
+ if (hdev->pldm)
+ reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT;
+ else
+ reset_sec = HL_HARD_RESET_MAX_TIMEOUT;
+
/*
* This function is competing with the reset function, so try to
* take the reset atomic and if we are already in middle of reset,
@@ -1490,8 +1543,7 @@ void hl_device_fini(struct hl_device *hdev)
* ports, the hard reset could take between 10-30 seconds
*/
- timeout = ktime_add_us(ktime_get(),
- HL_HARD_RESET_MAX_TIMEOUT * 1000 * 1000);
+ timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
while (rc) {
usleep_range(50, 200);
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 0713b2c12d54..2e4d04ec6b53 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -9,42 +9,63 @@
#include "../include/common/hl_boot_if.h"
#include <linux/firmware.h>
+#include <linux/crc32.h>
#include <linux/slab.h>
+#include <linux/ctype.h>
-#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
-/**
- * hl_fw_load_fw_to_device() - Load F/W code to device's memory.
- *
- * @hdev: pointer to hl_device structure.
- * @fw_name: the firmware image name
- * @dst: IO memory mapped address space to copy firmware to
- * @src_offset: offset in src FW to copy from
- * @size: amount of bytes to copy (0 to copy the whole binary)
- *
- * Copy fw code from firmware file to device memory.
- *
- * Return: 0 on success, non-zero for failure.
- */
-int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
- void __iomem *dst, u32 src_offset, u32 size)
+#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
+
+#define FW_CPU_STATUS_POLL_INTERVAL_USEC 10000
+
+static char *extract_fw_ver_from_str(const char *fw_str)
+{
+ char *str, *fw_ver, *whitespace;
+
+ fw_ver = kmalloc(16, GFP_KERNEL);
+ if (!fw_ver)
+ return NULL;
+
+ str = strnstr(fw_str, "fw-", VERSION_MAX_LEN);
+ if (!str)
+ goto free_fw_ver;
+
+ /* Skip the fw- part */
+ str += 3;
+
+ /* Copy until the next whitespace */
+ whitespace = strnstr(str, " ", 15);
+ if (!whitespace)
+ goto free_fw_ver;
+
+ strscpy(fw_ver, str, whitespace - str + 1);
+
+ return fw_ver;
+
+free_fw_ver:
+ kfree(fw_ver);
+ return NULL;
+}
+
+static int hl_request_fw(struct hl_device *hdev,
+ const struct firmware **firmware_p,
+ const char *fw_name)
{
- const struct firmware *fw;
- const void *fw_data;
size_t fw_size;
int rc;
- rc = request_firmware(&fw, fw_name, hdev->dev);
+ rc = request_firmware(firmware_p, fw_name, hdev->dev);
if (rc) {
- dev_err(hdev->dev, "Firmware file %s is not found!\n", fw_name);
+ dev_err(hdev->dev, "Firmware file %s is not found! (error %d)\n",
+ fw_name, rc);
goto out;
}
- fw_size = fw->size;
+ fw_size = (*firmware_p)->size;
if ((fw_size % 4) != 0) {
dev_err(hdev->dev, "Illegal %s firmware size %zu\n",
- fw_name, fw_size);
+ fw_name, fw_size);
rc = -EINVAL;
- goto out;
+ goto release_fw;
}
dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
@@ -54,26 +75,125 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
"FW file size %zu exceeds maximum of %u bytes\n",
fw_size, FW_FILE_MAX_SIZE);
rc = -EINVAL;
- goto out;
+ goto release_fw;
}
- if (size - src_offset > fw_size) {
+ return 0;
+
+release_fw:
+ release_firmware(*firmware_p);
+out:
+ return rc;
+}
+
+/**
+ * hl_release_firmware() - release FW
+ *
+ * @fw: fw descriptor
+ *
+ * note: this inline function added to serve as a comprehensive mirror for the
+ * hl_request_fw function.
+ */
+static inline void hl_release_firmware(const struct firmware *fw)
+{
+ release_firmware(fw);
+}
+
+/**
+ * hl_fw_copy_fw_to_device() - copy FW to device
+ *
+ * @hdev: pointer to hl_device structure.
+ * @fw: fw descriptor
+ * @dst: IO memory mapped address space to copy firmware to
+ * @src_offset: offset in src FW to copy from
+ * @size: amount of bytes to copy (0 to copy the whole binary)
+ *
+ * actual copy of FW binary data to device, shared by static and dynamic loaders
+ */
+static int hl_fw_copy_fw_to_device(struct hl_device *hdev,
+ const struct firmware *fw, void __iomem *dst,
+ u32 src_offset, u32 size)
+{
+ const void *fw_data;
+
+ /* size 0 indicates to copy the whole file */
+ if (!size)
+ size = fw->size;
+
+ if (src_offset + size > fw->size) {
dev_err(hdev->dev,
"size to copy(%u) and offset(%u) are invalid\n",
size, src_offset);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if (size)
- fw_size = size;
-
fw_data = (const void *) fw->data;
- memcpy_toio(dst, fw_data + src_offset, fw_size);
+ memcpy_toio(dst, fw_data + src_offset, size);
+ return 0;
+}
-out:
- release_firmware(fw);
+/**
+ * hl_fw_copy_msg_to_device() - copy message to device
+ *
+ * @hdev: pointer to hl_device structure.
+ * @msg: message
+ * @dst: IO memory mapped address space to copy firmware to
+ * @src_offset: offset in src message to copy from
+ * @size: amount of bytes to copy (0 to copy the whole binary)
+ *
+ * actual copy of message data to device.
+ */
+static int hl_fw_copy_msg_to_device(struct hl_device *hdev,
+ struct lkd_msg_comms *msg, void __iomem *dst,
+ u32 src_offset, u32 size)
+{
+ void *msg_data;
+
+ /* size 0 indicates to copy the whole file */
+ if (!size)
+ size = sizeof(struct lkd_msg_comms);
+
+ if (src_offset + size > sizeof(struct lkd_msg_comms)) {
+ dev_err(hdev->dev,
+ "size to copy(%u) and offset(%u) are invalid\n",
+ size, src_offset);
+ return -EINVAL;
+ }
+
+ msg_data = (void *) msg;
+
+ memcpy_toio(dst, msg_data + src_offset, size);
+
+ return 0;
+}
+
+/**
+ * hl_fw_load_fw_to_device() - Load F/W code to device's memory.
+ *
+ * @hdev: pointer to hl_device structure.
+ * @fw_name: the firmware image name
+ * @dst: IO memory mapped address space to copy firmware to
+ * @src_offset: offset in src FW to copy from
+ * @size: amount of bytes to copy (0 to copy the whole binary)
+ *
+ * Copy fw code from firmware file to device memory.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
+ void __iomem *dst, u32 src_offset, u32 size)
+{
+ const struct firmware *fw;
+ int rc;
+
+ rc = hl_request_fw(hdev, &fw, fw_name);
+ if (rc)
+ return rc;
+
+ rc = hl_fw_copy_fw_to_device(hdev, fw, dst, src_offset, size);
+
+ hl_release_firmware(fw);
return rc;
}
@@ -91,6 +211,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u16 len, u32 timeout, u64 *result)
{
struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
struct cpucp_packet *pkt;
dma_addr_t pkt_dma_addr;
u32 tmp, expected_ack_val;
@@ -117,7 +238,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
}
/* set fence to a non valid value */
- pkt->fence = UINT_MAX;
+ pkt->fence = cpu_to_le32(UINT_MAX);
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr);
if (rc) {
@@ -125,8 +246,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
goto out;
}
- if (hdev->asic_prop.fw_app_security_map &
- CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
+ if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
expected_ack_val = queue->pi;
else
expected_ack_val = CPUCP_PACKET_FENCE_VAL;
@@ -272,10 +392,11 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
int hl_fw_send_heartbeat(struct hl_device *hdev)
{
- struct cpucp_packet hb_pkt = {};
+ struct cpucp_packet hb_pkt;
u64 result;
int rc;
+ memset(&hb_pkt, 0, sizeof(hb_pkt));
hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
@@ -284,29 +405,24 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
sizeof(hb_pkt), 0, &result);
if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
+ return -EIO;
+
+ if (le32_to_cpu(hb_pkt.status_mask) &
+ CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK) {
+ dev_warn(hdev->dev, "FW reported EQ fault during heartbeat\n");
rc = -EIO;
+ }
return rc;
}
-static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
- u32 cpu_security_boot_status_reg)
+static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
+ u32 sts_val)
{
- u32 err_val, security_val;
bool err_exists = false;
- /* Some of the firmware status codes are deprecated in newer f/w
- * versions. In those versions, the errors are reported
- * in different registers. Therefore, we need to check those
- * registers and print the exact errors. Moreover, there
- * may be multiple errors, so we need to report on each error
- * separately. Some of the error codes might indicate a state
- * that is not an error per-se, but it is an error in production
- * environment
- */
- err_val = RREG32(boot_err0_reg);
if (!(err_val & CPU_BOOT_ERR0_ENABLED))
- return 0;
+ return false;
if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) {
dev_err(hdev->dev,
@@ -377,44 +493,120 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
err_exists = true;
}
+ if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) {
+ dev_warn(hdev->dev,
+ "Device boot warning - Failed to load preboot primary image\n");
+ /* This is a warning so we don't want it to disable the
+ * device as we have a secondary preboot image
+ */
+ err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL) {
+ dev_err(hdev->dev, "Device boot error - Failed to load preboot secondary image\n");
+ err_exists = true;
+ }
+
if (err_val & CPU_BOOT_ERR0_PLL_FAIL) {
dev_err(hdev->dev, "Device boot error - PLL failure\n");
err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL) {
- dev_err(hdev->dev,
- "Device boot error - device unusable\n");
- err_exists = true;
+ /* Ignore this bit, don't prevent driver loading */
+ dev_dbg(hdev->dev, "device unusable status is set\n");
+ err_val &= ~CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL;
}
- security_val = RREG32(cpu_security_boot_status_reg);
- if (security_val & CPU_BOOT_DEV_STS0_ENABLED)
- dev_dbg(hdev->dev, "Device security status %#x\n",
- security_val);
+ if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
+ dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) {
dev_err(hdev->dev,
- "Device boot error - unknown error 0x%08x\n",
- err_val);
+ "Device boot error - unknown ERR0 error 0x%08x\n", err_val);
err_exists = true;
}
+ /* return error only if it's in the predefined mask */
if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
lower_32_bits(hdev->boot_error_status_mask)))
+ return true;
+
+ return false;
+}
+
+/* placeholder for ERR1 as no errors defined there yet */
+static bool fw_report_boot_dev1(struct hl_device *hdev, u32 err_val,
+ u32 sts_val)
+{
+ /*
+ * keep this variable to preserve the logic of the function.
+ * this way it would require less modifications when error will be
+ * added to DEV_ERR1
+ */
+ bool err_exists = false;
+
+ if (!(err_val & CPU_BOOT_ERR1_ENABLED))
+ return false;
+
+ if (sts_val & CPU_BOOT_DEV_STS1_ENABLED)
+ dev_dbg(hdev->dev, "Device status1 %#x\n", sts_val);
+
+ if (!err_exists && (err_val & ~CPU_BOOT_ERR1_ENABLED)) {
+ dev_err(hdev->dev,
+ "Device boot error - unknown ERR1 error 0x%08x\n",
+ err_val);
+ err_exists = true;
+ }
+
+ /* return error only if it's in the predefined mask */
+ if (err_exists && ((err_val & ~CPU_BOOT_ERR1_ENABLED) &
+ upper_32_bits(hdev->boot_error_status_mask)))
+ return true;
+
+ return false;
+}
+
+static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
+ u32 boot_err1_reg, u32 cpu_boot_dev_status0_reg,
+ u32 cpu_boot_dev_status1_reg)
+{
+ u32 err_val, status_val;
+ bool err_exists = false;
+
+ /* Some of the firmware status codes are deprecated in newer f/w
+ * versions. In those versions, the errors are reported
+ * in different registers. Therefore, we need to check those
+ * registers and print the exact errors. Moreover, there
+ * may be multiple errors, so we need to report on each error
+ * separately. Some of the error codes might indicate a state
+ * that is not an error per-se, but it is an error in production
+ * environment
+ */
+ err_val = RREG32(boot_err0_reg);
+ status_val = RREG32(cpu_boot_dev_status0_reg);
+ err_exists = fw_report_boot_dev0(hdev, err_val, status_val);
+
+ err_val = RREG32(boot_err1_reg);
+ status_val = RREG32(cpu_boot_dev_status1_reg);
+ err_exists |= fw_report_boot_dev1(hdev, err_val, status_val);
+
+ if (err_exists)
return -EIO;
return 0;
}
int hl_fw_cpucp_info_get(struct hl_device *hdev,
- u32 cpu_security_boot_status_reg,
- u32 boot_err0_reg)
+ u32 sts_boot_dev_sts0_reg,
+ u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
+ u32 boot_err1_reg)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct cpucp_packet pkt = {};
- void *cpucp_info_cpu_addr;
dma_addr_t cpucp_info_dma_addr;
+ void *cpucp_info_cpu_addr;
+ char *kernel_ver;
u64 result;
int rc;
@@ -443,7 +635,8 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev,
goto out;
}
- rc = fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg);
+ rc = fw_read_errors(hdev, boot_err0_reg, boot_err1_reg,
+ sts_boot_dev_sts0_reg, sts_boot_dev_sts1_reg);
if (rc) {
dev_err(hdev->dev, "Errors in device boot\n");
goto out;
@@ -460,10 +653,27 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev,
goto out;
}
+ kernel_ver = extract_fw_ver_from_str(prop->cpucp_info.kernel_version);
+ if (kernel_ver) {
+ dev_info(hdev->dev, "Linux version %s", kernel_ver);
+ kfree(kernel_ver);
+ }
+
+ /* assume EQ code doesn't need to check eqe index */
+ hdev->event_queue.check_eqe_index = false;
+
/* Read FW application security bits again */
- if (hdev->asic_prop.fw_security_status_valid)
- hdev->asic_prop.fw_app_security_map =
- RREG32(cpu_security_boot_status_reg);
+ if (hdev->asic_prop.fw_cpu_boot_dev_sts0_valid) {
+ hdev->asic_prop.fw_app_cpu_boot_dev_sts0 =
+ RREG32(sts_boot_dev_sts0_reg);
+ if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_EQ_INDEX_EN)
+ hdev->event_queue.check_eqe_index = true;
+ }
+
+ if (hdev->asic_prop.fw_cpu_boot_dev_sts1_valid)
+ hdev->asic_prop.fw_app_cpu_boot_dev_sts1 =
+ RREG32(sts_boot_dev_sts1_reg);
out:
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
@@ -501,7 +711,8 @@ static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
pkt->length = cpu_to_le32(CPUCP_NUM_OF_MSI_TYPES);
- hdev->asic_funcs->get_msi_info((u32 *)&pkt->data);
+ memset((void *) &pkt->data, 0xFF, data_size);
+ hdev->asic_funcs->get_msi_info(pkt->data);
pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_MSI_INFO_SET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
@@ -526,13 +737,15 @@ static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
}
int hl_fw_cpucp_handshake(struct hl_device *hdev,
- u32 cpu_security_boot_status_reg,
- u32 boot_err0_reg)
+ u32 sts_boot_dev_sts0_reg,
+ u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
+ u32 boot_err1_reg)
{
int rc;
- rc = hl_fw_cpucp_info_get(hdev, cpu_security_boot_status_reg,
- boot_err0_reg);
+ rc = hl_fw_cpucp_info_get(hdev, sts_boot_dev_sts0_reg,
+ sts_boot_dev_sts1_reg, boot_err0_reg,
+ boot_err1_reg);
if (rc)
return rc;
@@ -667,8 +880,8 @@ int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
bool dynamic_pll;
int fw_pll_idx;
- dynamic_pll = prop->fw_security_status_valid &&
- (prop->fw_app_security_map & CPU_BOOT_DEV_STS0_DYN_PLL_EN);
+ dynamic_pll = !!(prop->fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_DYN_PLL_EN);
if (!dynamic_pll) {
/*
@@ -759,6 +972,47 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
return rc;
}
+void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
+{
+ struct static_fw_load_mgr *static_loader =
+ &hdev->fw_loader.static_loader;
+ int rc;
+
+ if (hdev->asic_prop.dynamic_fw_load) {
+ rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
+ COMMS_RST_DEV, 0, false,
+ hdev->fw_loader.cpu_timeout);
+ if (rc)
+ dev_warn(hdev->dev, "Failed sending COMMS_RST_DEV\n");
+ } else {
+ WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_RST_DEV);
+ }
+}
+
+void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
+{
+ struct static_fw_load_mgr *static_loader =
+ &hdev->fw_loader.static_loader;
+ int rc;
+
+ if (hdev->device_cpu_is_halted)
+ return;
+
+ /* Stop device CPU to make sure nothing bad happens */
+ if (hdev->asic_prop.dynamic_fw_load) {
+ rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
+ COMMS_GOTO_WFE, 0, true,
+ hdev->fw_loader.cpu_timeout);
+ if (rc)
+ dev_warn(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
+ } else {
+ WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
+ msleep(static_loader->cpu_reset_wait_msec);
+ }
+
+ hdev->device_cpu_is_halted = true;
+}
+
static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
{
/* Some of the status codes below are deprecated in newer f/w
@@ -767,63 +1021,59 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
switch (status) {
case CPU_BOOT_STATUS_NA:
dev_err(hdev->dev,
- "Device boot error - BTL did NOT run\n");
+ "Device boot progress - BTL did NOT run\n");
break;
case CPU_BOOT_STATUS_IN_WFE:
dev_err(hdev->dev,
- "Device boot error - Stuck inside WFE loop\n");
+ "Device boot progress - Stuck inside WFE loop\n");
break;
case CPU_BOOT_STATUS_IN_BTL:
dev_err(hdev->dev,
- "Device boot error - Stuck in BTL\n");
+ "Device boot progress - Stuck in BTL\n");
break;
case CPU_BOOT_STATUS_IN_PREBOOT:
dev_err(hdev->dev,
- "Device boot error - Stuck in Preboot\n");
+ "Device boot progress - Stuck in Preboot\n");
break;
case CPU_BOOT_STATUS_IN_SPL:
dev_err(hdev->dev,
- "Device boot error - Stuck in SPL\n");
+ "Device boot progress - Stuck in SPL\n");
break;
case CPU_BOOT_STATUS_IN_UBOOT:
dev_err(hdev->dev,
- "Device boot error - Stuck in u-boot\n");
+ "Device boot progress - Stuck in u-boot\n");
break;
case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
dev_err(hdev->dev,
- "Device boot error - DRAM initialization failed\n");
+ "Device boot progress - DRAM initialization failed\n");
break;
case CPU_BOOT_STATUS_UBOOT_NOT_READY:
dev_err(hdev->dev,
- "Device boot error - u-boot stopped by user\n");
+ "Device boot progress - Cannot boot\n");
break;
case CPU_BOOT_STATUS_TS_INIT_FAIL:
dev_err(hdev->dev,
- "Device boot error - Thermal Sensor initialization failed\n");
+ "Device boot progress - Thermal Sensor initialization failed\n");
break;
default:
dev_err(hdev->dev,
- "Device boot error - Invalid status code %d\n",
+ "Device boot progress - Invalid status code %d\n",
status);
break;
}
}
-int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
- u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
- u32 timeout)
+static int hl_fw_read_preboot_caps(struct hl_device *hdev,
+ u32 cpu_boot_status_reg,
+ u32 sts_boot_dev_sts0_reg,
+ u32 sts_boot_dev_sts1_reg,
+ u32 boot_err0_reg, u32 boot_err1_reg,
+ u32 timeout)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 status, security_status;
+ u32 status, reg_val;
int rc;
- /* pldm was added for cases in which we use preboot on pldm and want
- * to load boot fit, but we can't wait for preboot because it runs
- * very slowly
- */
- if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU) || hdev->pldm)
- return 0;
-
/* Need to check two possible scenarios:
*
* CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT - for newer firmwares where
@@ -842,29 +1092,145 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
(status == CPU_BOOT_STATUS_SRAM_AVAIL) ||
(status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
- 10000,
+ FW_CPU_STATUS_POLL_INTERVAL_USEC,
timeout);
if (rc) {
- dev_err(hdev->dev, "Failed to read preboot version\n");
+ dev_err(hdev->dev, "CPU boot ready status timeout\n");
detect_cpu_boot_status(hdev, status);
/* If we read all FF, then something is totally wrong, no point
* of reading specific errors
*/
if (status != -1)
- fw_read_errors(hdev, boot_err0_reg,
- cpu_security_boot_status_reg);
+ fw_read_errors(hdev, boot_err0_reg, boot_err1_reg,
+ sts_boot_dev_sts0_reg,
+ sts_boot_dev_sts1_reg);
return -EIO;
}
- rc = hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT);
- if (rc)
- return rc;
+ /*
+ * the registers DEV_STS* contain FW capabilities/features.
+ * We can rely on this registers only if bit CPU_BOOT_DEV_STS*_ENABLED
+ * is set.
+ * In the first read of this register we store the value of this
+ * register ONLY if the register is enabled (which will be propagated
+ * to next stages) and also mark the register as valid.
+ * In case it is not enabled the stored value will be left 0- all
+ * caps/features are off
+ */
+ reg_val = RREG32(sts_boot_dev_sts0_reg);
+ if (reg_val & CPU_BOOT_DEV_STS0_ENABLED) {
+ prop->fw_cpu_boot_dev_sts0_valid = true;
+ prop->fw_preboot_cpu_boot_dev_sts0 = reg_val;
+ }
+
+ reg_val = RREG32(sts_boot_dev_sts1_reg);
+ if (reg_val & CPU_BOOT_DEV_STS1_ENABLED) {
+ prop->fw_cpu_boot_dev_sts1_valid = true;
+ prop->fw_preboot_cpu_boot_dev_sts1 = reg_val;
+ }
- security_status = RREG32(cpu_security_boot_status_reg);
+ prop->dynamic_fw_load = !!(prop->fw_preboot_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_FW_LD_COM_EN);
+
+ /* initialize FW loader once we know what load protocol is used */
+ hdev->asic_funcs->init_firmware_loader(hdev);
+
+ dev_dbg(hdev->dev, "Attempting %s FW load\n",
+ prop->dynamic_fw_load ? "dynamic" : "legacy");
+ return 0;
+}
- /* We read security status multiple times during boot:
+static int hl_fw_static_read_device_fw_version(struct hl_device *hdev,
+ enum hl_fw_component fwc)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct fw_load_mgr *fw_loader = &hdev->fw_loader;
+ struct static_fw_load_mgr *static_loader;
+ char *dest, *boot_ver, *preboot_ver;
+ u32 ver_off, limit;
+ const char *name;
+ char btl_ver[32];
+
+ static_loader = &hdev->fw_loader.static_loader;
+
+ switch (fwc) {
+ case FW_COMP_BOOT_FIT:
+ ver_off = RREG32(static_loader->boot_fit_version_offset_reg);
+ dest = prop->uboot_ver;
+ name = "Boot-fit";
+ limit = static_loader->boot_fit_version_max_off;
+ break;
+ case FW_COMP_PREBOOT:
+ ver_off = RREG32(static_loader->preboot_version_offset_reg);
+ dest = prop->preboot_ver;
+ name = "Preboot";
+ limit = static_loader->preboot_version_max_off;
+ break;
+ default:
+ dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
+ return -EIO;
+ }
+
+ ver_off &= static_loader->sram_offset_mask;
+
+ if (ver_off < limit) {
+ memcpy_fromio(dest,
+ hdev->pcie_bar[fw_loader->sram_bar_id] + ver_off,
+ VERSION_MAX_LEN);
+ } else {
+ dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
+ name, ver_off);
+ strscpy(dest, "unavailable", VERSION_MAX_LEN);
+ return -EIO;
+ }
+
+ if (fwc == FW_COMP_BOOT_FIT) {
+ boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
+ if (boot_ver) {
+ dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
+ kfree(boot_ver);
+ }
+ } else if (fwc == FW_COMP_PREBOOT) {
+ preboot_ver = strnstr(prop->preboot_ver, "Preboot",
+ VERSION_MAX_LEN);
+ if (preboot_ver && preboot_ver != prop->preboot_ver) {
+ strscpy(btl_ver, prop->preboot_ver,
+ min((int) (preboot_ver - prop->preboot_ver),
+ 31));
+ dev_info(hdev->dev, "%s\n", btl_ver);
+ }
+
+ preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
+ if (preboot_ver) {
+ dev_info(hdev->dev, "preboot version %s\n",
+ preboot_ver);
+ kfree(preboot_ver);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * hl_fw_preboot_update_state - update internal data structures during
+ * handshake with preboot
+ *
+ *
+ * @hdev: pointer to the habanalabs device structure
+ *
+ * @return 0 on success, otherwise non-zero error code
+ */
+static void hl_fw_preboot_update_state(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 cpu_boot_dev_sts0, cpu_boot_dev_sts1;
+
+ cpu_boot_dev_sts0 = prop->fw_preboot_cpu_boot_dev_sts0;
+ cpu_boot_dev_sts1 = prop->fw_preboot_cpu_boot_dev_sts1;
+
+ /* We read boot_dev_sts registers multiple times during boot:
* 1. preboot - a. Check whether the security status bits are valid
* b. Check whether fw security is enabled
* c. Check whether hard reset is done by preboot
@@ -874,48 +1240,1121 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
* b. Check whether hard reset is done by fw app
*
* Preboot:
- * Check security status bit (CPU_BOOT_DEV_STS0_ENABLED), if it is set
+ * Check security status bit (CPU_BOOT_DEV_STS0_ENABLED). If set, then-
* check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
+ * If set, then mark GIC controller to be disabled.
*/
- if (security_status & CPU_BOOT_DEV_STS0_ENABLED) {
- prop->fw_security_status_valid = 1;
+ prop->hard_reset_done_by_fw =
+ !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
- /* FW security should be derived from PCI ID, we keep this
- * check for backward compatibility
- */
- if (security_status & CPU_BOOT_DEV_STS0_SECURITY_EN)
- prop->fw_security_disabled = false;
+ dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n",
+ cpu_boot_dev_sts0);
+
+ dev_dbg(hdev->dev, "Firmware preboot boot device status1 %#x\n",
+ cpu_boot_dev_sts1);
+
+ dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
+ dev_dbg(hdev->dev, "firmware-level security is %s\n",
+ prop->fw_security_enabled ? "enabled" : "disabled");
+
+ dev_dbg(hdev->dev, "GIC controller is %s\n",
+ prop->gic_interrupts_enable ? "enabled" : "disabled");
+}
+
+static int hl_fw_static_read_preboot_status(struct hl_device *hdev)
+{
+ int rc;
+
+ rc = hl_fw_static_read_device_fw_version(hdev, FW_COMP_PREBOOT);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
+ u32 sts_boot_dev_sts0_reg,
+ u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
+ u32 boot_err1_reg, u32 timeout)
+{
+ int rc;
+
+ /* pldm was added for cases in which we use preboot on pldm and want
+ * to load boot fit, but we can't wait for preboot because it runs
+ * very slowly
+ */
+ if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU) || hdev->pldm)
+ return 0;
+
+ /*
+ * In order to determine boot method (static VS dymanic) we need to
+ * read the boot caps register
+ */
+ rc = hl_fw_read_preboot_caps(hdev, cpu_boot_status_reg,
+ sts_boot_dev_sts0_reg,
+ sts_boot_dev_sts1_reg, boot_err0_reg,
+ boot_err1_reg, timeout);
+ if (rc)
+ return rc;
+
+ hl_fw_preboot_update_state(hdev);
- if (security_status & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ /* no need to read preboot status in dynamic load */
+ if (hdev->asic_prop.dynamic_fw_load)
+ return 0;
+
+ return hl_fw_static_read_preboot_status(hdev);
+}
+
+/* associate string with COMM status */
+static char *hl_dynamic_fw_status_str[COMMS_STS_INVLD_LAST] = {
+ [COMMS_STS_NOOP] = "NOOP",
+ [COMMS_STS_ACK] = "ACK",
+ [COMMS_STS_OK] = "OK",
+ [COMMS_STS_ERR] = "ERR",
+ [COMMS_STS_VALID_ERR] = "VALID_ERR",
+ [COMMS_STS_TIMEOUT_ERR] = "TIMEOUT_ERR",
+};
+
+/**
+ * hl_fw_dynamic_report_error_status - report error status
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @status: value of FW status register
+ * @expected_status: the expected status
+ */
+static void hl_fw_dynamic_report_error_status(struct hl_device *hdev,
+ u32 status,
+ enum comms_sts expected_status)
+{
+ enum comms_sts comm_status =
+ FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
+
+ if (comm_status < COMMS_STS_INVLD_LAST)
+ dev_err(hdev->dev, "Device status %s, expected status: %s\n",
+ hl_dynamic_fw_status_str[comm_status],
+ hl_dynamic_fw_status_str[expected_status]);
+ else
+ dev_err(hdev->dev, "Device status unknown %d, expected status: %s\n",
+ comm_status,
+ hl_dynamic_fw_status_str[expected_status]);
+}
+
+/**
+ * hl_fw_dynamic_send_cmd - send LKD to FW cmd
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_loader: managing structure for loading device's FW
+ * @cmd: LKD to FW cmd code
+ * @size: size of next FW component to be loaded (0 if not necessary)
+ *
+ * LDK to FW exact command layout is defined at struct comms_command.
+ * note: the size argument is used only when the next FW component should be
+ * loaded, otherwise it shall be 0. the size is used by the FW in later
+ * protocol stages and when sending only indicating the amount of memory
+ * to be allocated by the FW to receive the next boot component.
+ */
+static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader,
+ enum comms_cmd cmd, unsigned int size)
+{
+ struct cpu_dyn_regs *dyn_regs;
+ u32 val;
+
+ dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
+
+ val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
+ val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
+
+ WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
+}
+
+/**
+ * hl_fw_dynamic_extract_fw_response - update the FW response
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_loader: managing structure for loading device's FW
+ * @response: FW response
+ * @status: the status read from CPU status register
+ *
+ * @return 0 on success, otherwise non-zero error code
+ */
+static int hl_fw_dynamic_extract_fw_response(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader,
+ struct fw_response *response,
+ u32 status)
+{
+ response->status = FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
+ response->ram_offset = FIELD_GET(COMMS_STATUS_OFFSET_MASK, status) <<
+ COMMS_STATUS_OFFSET_ALIGN_SHIFT;
+ response->ram_type = FIELD_GET(COMMS_STATUS_RAM_TYPE_MASK, status);
+
+ if ((response->ram_type != COMMS_SRAM) &&
+ (response->ram_type != COMMS_DRAM)) {
+ dev_err(hdev->dev, "FW status: invalid RAM type %u\n",
+ response->ram_type);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * hl_fw_dynamic_wait_for_status - wait for status in dynamic FW load
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_loader: managing structure for loading device's FW
+ * @expected_status: expected status to wait for
+ * @timeout: timeout for status wait
+ *
+ * @return 0 on success, otherwise non-zero error code
+ *
+ * waiting for status from FW include polling the FW status register until
+ * expected status is received or timeout occurs (whatever occurs first).
+ */
+static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader,
+ enum comms_sts expected_status,
+ u32 timeout)
+{
+ struct cpu_dyn_regs *dyn_regs;
+ u32 status;
+ int rc;
+
+ dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
+
+ /* Wait for expected status */
+ rc = hl_poll_timeout(
+ hdev,
+ le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
+ status,
+ FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
+ FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ timeout);
+
+ if (rc) {
+ hl_fw_dynamic_report_error_status(hdev, status,
+ expected_status);
+ return -EIO;
+ }
+
+ /*
+ * skip storing FW response for NOOP to preserve the actual desired
+ * FW status
+ */
+ if (expected_status == COMMS_STS_NOOP)
+ return 0;
+
+ rc = hl_fw_dynamic_extract_fw_response(hdev, fw_loader,
+ &fw_loader->dynamic_loader.response,
+ status);
+ return rc;
+}
+
+/**
+ * hl_fw_dynamic_send_clear_cmd - send clear command to FW
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_loader: managing structure for loading device's FW
+ *
+ * @return 0 on success, otherwise non-zero error code
+ *
+ * after command cycle between LKD to FW CPU (i.e. LKD got an expected status
+ * from FW) we need to clear the CPU status register in order to avoid garbage
+ * between command cycles.
+ * This is done by sending clear command and polling the CPU to LKD status
+ * register to hold the status NOOP
+ */
+static int hl_fw_dynamic_send_clear_cmd(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader)
+{
+ hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_CLR_STS, 0);
+
+ return hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_NOOP,
+ fw_loader->cpu_timeout);
+}
+
+/**
+ * hl_fw_dynamic_send_protocol_cmd - send LKD to FW cmd and wait for ACK
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_loader: managing structure for loading device's FW
+ * @cmd: LKD to FW cmd code
+ * @size: size of next FW component to be loaded (0 if not necessary)
+ * @wait_ok: if true also wait for OK response from FW
+ * @timeout: timeout for status wait
+ *
+ * @return 0 on success, otherwise non-zero error code
+ *
+ * brief:
+ * when sending protocol command we have the following steps:
+ * - send clear (clear command and verify clear status register)
+ * - send the actual protocol command
+ * - wait for ACK on the protocol command
+ * - send clear
+ * - send NOOP
+ * if, in addition, the specific protocol command should wait for OK then:
+ * - wait for OK
+ * - send clear
+ * - send NOOP
+ *
+ * NOTES:
+ * send clear: this is necessary in order to clear the status register to avoid
+ * leftovers between command
+ * NOOP command: necessary to avoid loop on the clear command by the FW
+ */
+int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader,
+ enum comms_cmd cmd, unsigned int size,
+ bool wait_ok, u32 timeout)
+{
+ int rc;
+
+ /* first send clear command to clean former commands */
+ rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
+
+ /* send the actual command */
+ hl_fw_dynamic_send_cmd(hdev, fw_loader, cmd, size);
+
+ /* wait for ACK for the command */
+ rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_ACK,
+ timeout);
+ if (rc)
+ return rc;
+
+ /* clear command to prepare for NOOP command */
+ rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
+ if (rc)
+ return rc;
+
+ /* send the actual NOOP command */
+ hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
+
+ if (!wait_ok)
+ return 0;
+
+ rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_OK,
+ timeout);
+ if (rc)
+ return rc;
+
+ /* clear command to prepare for NOOP command */
+ rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
+ if (rc)
+ return rc;
+
+ /* send the actual NOOP command */
+ hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
+
+ return 0;
+}
+
+/**
+ * hl_fw_compat_crc32 - CRC compatible with FW
+ *
+ * @data: pointer to the data
+ * @size: size of the data
+ *
+ * @return the CRC32 result
+ *
+ * NOTE: kernel's CRC32 differ's from standard CRC32 calculation.
+ * in order to be aligned we need to flip the bits of both the input
+ * initial CRC and kernel's CRC32 result.
+ * in addition both sides use initial CRC of 0,
+ */
+static u32 hl_fw_compat_crc32(u8 *data, size_t size)
+{
+ return ~crc32_le(~((u32)0), data, size);
+}
+
+/**
+ * hl_fw_dynamic_validate_memory_bound - validate memory bounds for memory
+ * transfer (image or descriptor) between
+ * host and FW
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @addr: device address of memory transfer
+ * @size: memory transter size
+ * @region: PCI memory region
+ *
+ * @return 0 on success, otherwise non-zero error code
+ */
+static int hl_fw_dynamic_validate_memory_bound(struct hl_device *hdev,
+ u64 addr, size_t size,
+ struct pci_mem_region *region)
+{
+ u64 end_addr;
+
+ /* now make sure that the memory transfer is within region's bounds */
+ end_addr = addr + size;
+ if (end_addr >= region->region_base + region->region_size) {
+ dev_err(hdev->dev,
+ "dynamic FW load: memory transfer end address out of memory region bounds. addr: %llx\n",
+ end_addr);
+ return -EIO;
+ }
+
+ /*
+ * now make sure memory transfer is within predefined BAR bounds.
+ * this is to make sure we do not need to set the bar (e.g. for DRAM
+ * memory transfers)
+ */
+ if (end_addr >= region->region_base - region->offset_in_bar +
+ region->bar_size) {
+ dev_err(hdev->dev,
+ "FW image beyond PCI BAR bounds\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * hl_fw_dynamic_validate_descriptor - validate FW descriptor
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_loader: managing structure for loading device's FW
+ * @fw_desc: the descriptor form FW
+ *
+ * @return 0 on success, otherwise non-zero error code
+ */
+static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader,
+ struct lkd_fw_comms_desc *fw_desc)
+{
+ struct pci_mem_region *region;
+ enum pci_region region_id;
+ size_t data_size;
+ u32 data_crc32;
+ u8 *data_ptr;
+ u64 addr;
+ int rc;
+
+ if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC) {
+ dev_err(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
+ fw_desc->header.magic);
+ return -EIO;
+ }
+
+ if (fw_desc->header.version != HL_COMMS_DESC_VER) {
+ dev_err(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
+ fw_desc->header.version);
+ return -EIO;
+ }
+
+ /*
+ * calc CRC32 of data without header.
+ * note that no alignment/stride address issues here as all structures
+ * are 64 bit padded
+ */
+ data_size = sizeof(struct lkd_fw_comms_desc) -
+ sizeof(struct comms_desc_header);
+ data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header);
+
+ if (le16_to_cpu(fw_desc->header.size) != data_size) {
+ dev_err(hdev->dev,
+ "Invalid descriptor size 0x%x, expected size 0x%zx\n",
+ le16_to_cpu(fw_desc->header.size), data_size);
+ return -EIO;
+ }
+
+ data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
+
+ if (data_crc32 != le32_to_cpu(fw_desc->header.crc32)) {
+ dev_err(hdev->dev,
+ "CRC32 mismatch for dynamic FW descriptor (%x:%x)\n",
+ data_crc32, fw_desc->header.crc32);
+ return -EIO;
+ }
+
+ /* find memory region to which to copy the image */
+ addr = le64_to_cpu(fw_desc->img_addr);
+ region_id = hl_get_pci_memory_region(hdev, addr);
+ if ((region_id != PCI_REGION_SRAM) &&
+ ((region_id != PCI_REGION_DRAM))) {
+ dev_err(hdev->dev,
+ "Invalid region to copy FW image address=%llx\n", addr);
+ return -EIO;
+ }
+
+ region = &hdev->pci_mem_region[region_id];
+
+ /* store the region for the copy stage */
+ fw_loader->dynamic_loader.image_region = region;
+
+ /*
+ * here we know that the start address is valid, now make sure that the
+ * image is within region's bounds
+ */
+ rc = hl_fw_dynamic_validate_memory_bound(hdev, addr,
+ fw_loader->dynamic_loader.fw_image_size,
+ region);
+ if (rc) {
+ dev_err(hdev->dev,
+ "invalid mem transfer request for FW image\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int hl_fw_dynamic_validate_response(struct hl_device *hdev,
+ struct fw_response *response,
+ struct pci_mem_region *region)
+{
+ u64 device_addr;
+ int rc;
+
+ device_addr = region->region_base + response->ram_offset;
+
+ /*
+ * validate that the descriptor is within region's bounds
+ * Note that as the start address was supplied according to the RAM
+ * type- testing only the end address is enough
+ */
+ rc = hl_fw_dynamic_validate_memory_bound(hdev, device_addr,
+ sizeof(struct lkd_fw_comms_desc),
+ region);
+ return rc;
+}
+
+/**
+ * hl_fw_dynamic_read_and_validate_descriptor - read and validate FW descriptor
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_loader: managing structure for loading device's FW
+ *
+ * @return 0 on success, otherwise non-zero error code
+ */
+static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader)
+{
+ struct lkd_fw_comms_desc *fw_desc;
+ struct pci_mem_region *region;
+ struct fw_response *response;
+ enum pci_region region_id;
+ void __iomem *src;
+ int rc;
+
+ fw_desc = &fw_loader->dynamic_loader.comm_desc;
+ response = &fw_loader->dynamic_loader.response;
+
+ region_id = (response->ram_type == COMMS_SRAM) ?
+ PCI_REGION_SRAM : PCI_REGION_DRAM;
+
+ region = &hdev->pci_mem_region[region_id];
+
+ rc = hl_fw_dynamic_validate_response(hdev, response, region);
+ if (rc) {
+ dev_err(hdev->dev,
+ "invalid mem transfer request for FW descriptor\n");
+ return rc;
+ }
+
+ /* extract address copy the descriptor from */
+ src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
+ response->ram_offset;
+ memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
+
+ return hl_fw_dynamic_validate_descriptor(hdev, fw_loader, fw_desc);
+}
+
+/**
+ * hl_fw_dynamic_request_descriptor - handshake with CPU to get FW descriptor
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_loader: managing structure for loading device's FW
+ * @next_image_size: size to allocate for next FW component
+ *
+ * @return 0 on success, otherwise non-zero error code
+ */
+static int hl_fw_dynamic_request_descriptor(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader,
+ size_t next_image_size)
+{
+ int rc;
+
+ rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_PREP_DESC,
+ next_image_size, true,
+ fw_loader->cpu_timeout);
+ if (rc)
+ return rc;
+
+ return hl_fw_dynamic_read_and_validate_descriptor(hdev, fw_loader);
+}
+
+/**
+ * hl_fw_dynamic_read_device_fw_version - read FW version to exposed properties
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fwc: the firmware component
+ * @fw_version: fw component's version string
+ */
+static void hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
+ enum hl_fw_component fwc,
+ const char *fw_version)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ char *preboot_ver, *boot_ver;
+ char btl_ver[32];
+
+ switch (fwc) {
+ case FW_COMP_BOOT_FIT:
+ strscpy(prop->uboot_ver, fw_version, VERSION_MAX_LEN);
+ boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
+ if (boot_ver) {
+ dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
+ kfree(boot_ver);
+ }
+
+ break;
+ case FW_COMP_PREBOOT:
+ strscpy(prop->preboot_ver, fw_version, VERSION_MAX_LEN);
+ preboot_ver = strnstr(prop->preboot_ver, "Preboot",
+ VERSION_MAX_LEN);
+ if (preboot_ver && preboot_ver != prop->preboot_ver) {
+ strscpy(btl_ver, prop->preboot_ver,
+ min((int) (preboot_ver - prop->preboot_ver),
+ 31));
+ dev_info(hdev->dev, "%s\n", btl_ver);
+ }
+
+ preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
+ if (preboot_ver) {
+ dev_info(hdev->dev, "preboot version %s\n",
+ preboot_ver);
+ kfree(preboot_ver);
+ }
+
+ break;
+ default:
+ dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
+ return;
+ }
+}
+
+/**
+ * hl_fw_dynamic_copy_image - copy image to memory allocated by the FW
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw: fw descriptor
+ * @fw_loader: managing structure for loading device's FW
+ */
+static int hl_fw_dynamic_copy_image(struct hl_device *hdev,
+ const struct firmware *fw,
+ struct fw_load_mgr *fw_loader)
+{
+ struct lkd_fw_comms_desc *fw_desc;
+ struct pci_mem_region *region;
+ void __iomem *dest;
+ u64 addr;
+ int rc;
+
+ fw_desc = &fw_loader->dynamic_loader.comm_desc;
+ addr = le64_to_cpu(fw_desc->img_addr);
+
+ /* find memory region to which to copy the image */
+ region = fw_loader->dynamic_loader.image_region;
+
+ dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
+ (addr - region->region_base);
+
+ rc = hl_fw_copy_fw_to_device(hdev, fw, dest,
+ fw_loader->boot_fit_img.src_off,
+ fw_loader->boot_fit_img.copy_size);
+
+ return rc;
+}
+
+/**
+ * hl_fw_dynamic_copy_msg - copy msg to memory allocated by the FW
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @msg: message
+ * @fw_loader: managing structure for loading device's FW
+ */
+static int hl_fw_dynamic_copy_msg(struct hl_device *hdev,
+ struct lkd_msg_comms *msg, struct fw_load_mgr *fw_loader)
+{
+ struct lkd_fw_comms_desc *fw_desc;
+ struct pci_mem_region *region;
+ void __iomem *dest;
+ u64 addr;
+ int rc;
+
+ fw_desc = &fw_loader->dynamic_loader.comm_desc;
+ addr = le64_to_cpu(fw_desc->img_addr);
+
+ /* find memory region to which to copy the image */
+ region = fw_loader->dynamic_loader.image_region;
+
+ dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
+ (addr - region->region_base);
+
+ rc = hl_fw_copy_msg_to_device(hdev, msg, dest, 0, 0);
+
+ return rc;
+}
+
+/**
+ * hl_fw_boot_fit_update_state - update internal data structures after boot-fit
+ * is loaded
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
+ * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
+ *
+ * @return 0 on success, otherwise non-zero error code
+ */
+static void hl_fw_boot_fit_update_state(struct hl_device *hdev,
+ u32 cpu_boot_dev_sts0_reg,
+ u32 cpu_boot_dev_sts1_reg)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ /* Clear reset status since we need to read it again from boot CPU */
+ prop->hard_reset_done_by_fw = false;
+
+ /* Read boot_cpu status bits */
+ if (prop->fw_preboot_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_ENABLED) {
+ prop->fw_bootfit_cpu_boot_dev_sts0 =
+ RREG32(cpu_boot_dev_sts0_reg);
+
+ if (prop->fw_bootfit_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
prop->hard_reset_done_by_fw = true;
- } else {
- prop->fw_security_status_valid = 0;
+
+ dev_dbg(hdev->dev, "Firmware boot CPU status0 %#x\n",
+ prop->fw_bootfit_cpu_boot_dev_sts0);
}
- dev_dbg(hdev->dev, "Firmware preboot security status %#x\n",
- security_status);
+ if (prop->fw_cpu_boot_dev_sts1_valid) {
+ prop->fw_bootfit_cpu_boot_dev_sts1 =
+ RREG32(cpu_boot_dev_sts1_reg);
- dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
+ dev_dbg(hdev->dev, "Firmware boot CPU status1 %#x\n",
+ prop->fw_bootfit_cpu_boot_dev_sts1);
+ }
+
+ dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+}
+
+static void hl_fw_dynamic_update_linux_interrupt_if(struct hl_device *hdev)
+{
+ struct cpu_dyn_regs *dyn_regs =
+ &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
+
+ /* Check whether all 3 interrupt interfaces are set, if not use a
+ * single interface
+ */
+ if (!hdev->asic_prop.gic_interrupts_enable &&
+ !(hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN)) {
+ dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_irq_ctrl;
+ dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_irq_ctrl;
+
+ dev_warn(hdev->dev,
+ "Using a single interrupt interface towards cpucp");
+ }
+}
+/**
+ * hl_fw_dynamic_load_image - load FW image using dynamic protocol
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_loader: managing structure for loading device's FW
+ * @load_fwc: the FW component to be loaded
+ * @img_ld_timeout: image load timeout
+ *
+ * @return 0 on success, otherwise non-zero error code
+ */
+static int hl_fw_dynamic_load_image(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader,
+ enum hl_fw_component load_fwc,
+ u32 img_ld_timeout)
+{
+ enum hl_fw_component cur_fwc;
+ const struct firmware *fw;
+ char *fw_name;
+ int rc = 0;
+
+ /*
+ * when loading image we have one of 2 scenarios:
+ * 1. current FW component is preboot and we want to load boot-fit
+ * 2. current FW component is boot-fit and we want to load linux
+ */
+ if (load_fwc == FW_COMP_BOOT_FIT) {
+ cur_fwc = FW_COMP_PREBOOT;
+ fw_name = fw_loader->boot_fit_img.image_name;
+ } else {
+ cur_fwc = FW_COMP_BOOT_FIT;
+ fw_name = fw_loader->linux_img.image_name;
+ }
+
+ /* request FW in order to communicate to FW the size to be allocated */
+ rc = hl_request_fw(hdev, &fw, fw_name);
+ if (rc)
+ return rc;
+
+ /* store the image size for future validation */
+ fw_loader->dynamic_loader.fw_image_size = fw->size;
+
+ rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, fw->size);
+ if (rc)
+ goto release_fw;
+
+ /* read preboot version */
+ hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
+ fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
+
+
+ /* update state according to boot stage */
+ if (cur_fwc == FW_COMP_BOOT_FIT) {
+ struct cpu_dyn_regs *dyn_regs;
+
+ dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
+ hl_fw_boot_fit_update_state(hdev,
+ le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
+ le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
+ }
+
+ /* copy boot fit to space allocated by FW */
+ rc = hl_fw_dynamic_copy_image(hdev, fw, fw_loader);
+ if (rc)
+ goto release_fw;
- dev_info(hdev->dev, "firmware-level security is %s\n",
- prop->fw_security_disabled ? "disabled" : "enabled");
+ rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
+ 0, true,
+ fw_loader->cpu_timeout);
+ if (rc)
+ goto release_fw;
+
+ rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
+ 0, false,
+ img_ld_timeout);
+release_fw:
+ hl_release_firmware(fw);
+ return rc;
+}
+
+static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader)
+{
+ struct dynamic_fw_load_mgr *dyn_loader;
+ u32 status;
+ int rc;
+
+ dyn_loader = &fw_loader->dynamic_loader;
+
+ /* Make sure CPU boot-loader is running */
+ rc = hl_poll_timeout(
+ hdev,
+ le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
+ status,
+ (status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
+ (status == CPU_BOOT_STATUS_READY_TO_BOOT),
+ FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ dyn_loader->wait_for_bl_timeout);
+ if (rc) {
+ dev_err(hdev->dev, "failed to wait for boot\n");
+ return rc;
+ }
+
+ dev_dbg(hdev->dev, "uboot status = %d\n", status);
return 0;
}
-int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
- u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
- u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
- bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout)
+static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader)
{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct dynamic_fw_load_mgr *dyn_loader;
u32 status;
int rc;
+ dyn_loader = &fw_loader->dynamic_loader;
+
+ /* Make sure CPU boot-loader is running */
+
+ rc = hl_poll_timeout(
+ hdev,
+ le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
+ status,
+ (status == CPU_BOOT_STATUS_SRAM_AVAIL),
+ FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ fw_loader->cpu_timeout);
+ if (rc) {
+ dev_err(hdev->dev, "failed to wait for Linux\n");
+ return rc;
+ }
+
+ dev_dbg(hdev->dev, "Boot status = %d\n", status);
+ return 0;
+}
+
+/**
+ * hl_fw_linux_update_state - update internal data structures after Linux
+ * is loaded.
+ * Note: Linux initialization is comprised mainly
+ * of two stages - loading kernel (SRAM_AVAIL)
+ * & loading ARMCP.
+ * Therefore reading boot device status in any of
+ * these stages might result in different values.
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
+ * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
+ *
+ * @return 0 on success, otherwise non-zero error code
+ */
+static void hl_fw_linux_update_state(struct hl_device *hdev,
+ u32 cpu_boot_dev_sts0_reg,
+ u32 cpu_boot_dev_sts1_reg)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ hdev->fw_loader.linux_loaded = true;
+
+ /* Clear reset status since we need to read again from app */
+ prop->hard_reset_done_by_fw = false;
+
+ /* Read FW application security bits */
+ if (prop->fw_cpu_boot_dev_sts0_valid) {
+ prop->fw_app_cpu_boot_dev_sts0 =
+ RREG32(cpu_boot_dev_sts0_reg);
+
+ if (prop->fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
+
+ if (prop->fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN)
+ prop->gic_interrupts_enable = false;
+
+ dev_dbg(hdev->dev,
+ "Firmware application CPU status0 %#x\n",
+ prop->fw_app_cpu_boot_dev_sts0);
+
+ dev_dbg(hdev->dev, "GIC controller is %s\n",
+ prop->gic_interrupts_enable ?
+ "enabled" : "disabled");
+ }
+
+ if (prop->fw_cpu_boot_dev_sts1_valid) {
+ prop->fw_app_cpu_boot_dev_sts1 =
+ RREG32(cpu_boot_dev_sts1_reg);
+
+ dev_dbg(hdev->dev,
+ "Firmware application CPU status1 %#x\n",
+ prop->fw_app_cpu_boot_dev_sts1);
+ }
+
+ dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
+ dev_info(hdev->dev, "Successfully loaded firmware to device\n");
+}
+
+/**
+ * hl_fw_dynamic_report_reset_cause - send a COMMS message with the cause
+ * of the newly triggered hard reset
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_loader: managing structure for loading device's FW
+ * @reset_cause: enumerated cause for the recent hard reset
+ *
+ * @return 0 on success, otherwise non-zero error code
+ */
+static int hl_fw_dynamic_report_reset_cause(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader,
+ enum comms_reset_cause reset_cause)
+{
+ struct lkd_msg_comms msg;
+ int rc;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* create message to be sent */
+ msg.header.type = HL_COMMS_RESET_CAUSE_TYPE;
+ msg.header.size = cpu_to_le16(sizeof(struct comms_msg_header));
+ msg.header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
+
+ msg.reset_cause = reset_cause;
+
+ rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
+ sizeof(struct lkd_msg_comms));
+ if (rc)
+ return rc;
+
+ /* copy message to space allocated by FW */
+ rc = hl_fw_dynamic_copy_msg(hdev, &msg, fw_loader);
+ if (rc)
+ return rc;
+
+ rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
+ 0, true,
+ fw_loader->cpu_timeout);
+ if (rc)
+ return rc;
+
+ rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
+ 0, true,
+ fw_loader->cpu_timeout);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * hl_fw_dynamic_init_cpu - initialize the device CPU using dynamic protocol
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_loader: managing structure for loading device's FW
+ *
+ * @return 0 on success, otherwise non-zero error code
+ *
+ * brief: the dynamic protocol is master (LKD) slave (FW CPU) protocol.
+ * the communication is done using registers:
+ * - LKD command register
+ * - FW status register
+ * the protocol is race free. this goal is achieved by splitting the requests
+ * and response to known synchronization points between the LKD and the FW.
+ * each response to LKD request is known and bound to a predefined timeout.
+ * in case of timeout expiration without the desired status from FW- the
+ * protocol (and hence the boot) will fail.
+ */
+static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader)
+{
+ struct cpu_dyn_regs *dyn_regs;
+ int rc;
+
+ dev_info(hdev->dev,
+ "Loading firmware to device, may take some time...\n");
+
+ dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
+
+ rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
+ 0, true,
+ fw_loader->cpu_timeout);
+ if (rc)
+ goto protocol_err;
+
+ if (hdev->curr_reset_cause) {
+ rc = hl_fw_dynamic_report_reset_cause(hdev, fw_loader,
+ hdev->curr_reset_cause);
+ if (rc)
+ goto protocol_err;
+
+ /* Clear current reset cause */
+ hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ }
+
+ if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
+ rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, 0);
+ if (rc)
+ goto protocol_err;
+
+ /* read preboot version */
+ hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
+ fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
+ return 0;
+ }
+
+ /* load boot fit to FW */
+ rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_BOOT_FIT,
+ fw_loader->boot_fit_timeout);
+ if (rc) {
+ dev_err(hdev->dev, "failed to load boot fit\n");
+ goto protocol_err;
+ }
+
+ rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader);
+ if (rc)
+ goto protocol_err;
+
+ /* Enable DRAM scrambling before Linux boot and after successful
+ * UBoot
+ */
+ hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
+
+ if (!(hdev->fw_components & FW_TYPE_LINUX)) {
+ dev_info(hdev->dev, "Skip loading Linux F/W\n");
+ return 0;
+ }
+
+ if (fw_loader->skip_bmc) {
+ rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader,
+ COMMS_SKIP_BMC, 0,
+ true,
+ fw_loader->cpu_timeout);
+ if (rc) {
+ dev_err(hdev->dev, "failed to load boot fit\n");
+ goto protocol_err;
+ }
+ }
+
+ /* load Linux image to FW */
+ rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_LINUX,
+ fw_loader->cpu_timeout);
+ if (rc) {
+ dev_err(hdev->dev, "failed to load Linux\n");
+ goto protocol_err;
+ }
+
+ rc = hl_fw_dynamic_wait_for_linux_active(hdev, fw_loader);
+ if (rc)
+ goto protocol_err;
+
+ hl_fw_linux_update_state(hdev, le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
+ le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
+
+ hl_fw_dynamic_update_linux_interrupt_if(hdev);
+
+ return 0;
+
+protocol_err:
+ fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
+ le32_to_cpu(dyn_regs->cpu_boot_err1),
+ le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
+ le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
+ return rc;
+}
+
+/**
+ * hl_fw_static_init_cpu - initialize the device CPU using static protocol
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_loader: managing structure for loading device's FW
+ *
+ * @return 0 on success, otherwise non-zero error code
+ */
+static int hl_fw_static_init_cpu(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader)
+{
+ u32 cpu_msg_status_reg, cpu_timeout, msg_to_cpu_reg, status;
+ u32 cpu_boot_dev_status0_reg, cpu_boot_dev_status1_reg;
+ struct static_fw_load_mgr *static_loader;
+ u32 cpu_boot_status_reg;
+ int rc;
+
if (!(hdev->fw_components & FW_TYPE_BOOT_CPU))
return 0;
+ /* init common loader parameters */
+ cpu_timeout = fw_loader->cpu_timeout;
+
+ /* init static loader parameters */
+ static_loader = &fw_loader->static_loader;
+ cpu_msg_status_reg = static_loader->cpu_cmd_status_to_host_reg;
+ msg_to_cpu_reg = static_loader->kmd_msg_to_cpu_reg;
+ cpu_boot_dev_status0_reg = static_loader->cpu_boot_dev_status0_reg;
+ cpu_boot_dev_status1_reg = static_loader->cpu_boot_dev_status1_reg;
+ cpu_boot_status_reg = static_loader->cpu_boot_status_reg;
+
dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
cpu_timeout / USEC_PER_SEC);
@@ -925,8 +2364,8 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
cpu_boot_status_reg,
status,
status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
- 10000,
- boot_fit_timeout);
+ FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ fw_loader->boot_fit_timeout);
if (rc) {
dev_dbg(hdev->dev,
@@ -948,8 +2387,8 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
cpu_msg_status_reg,
status,
status == CPU_MSG_OK,
- 10000,
- boot_fit_timeout);
+ FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ fw_loader->boot_fit_timeout);
if (rc) {
dev_err(hdev->dev,
@@ -970,33 +2409,17 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
(status == CPU_BOOT_STATUS_SRAM_AVAIL),
- 10000,
+ FW_CPU_STATUS_POLL_INTERVAL_USEC,
cpu_timeout);
dev_dbg(hdev->dev, "uboot status = %d\n", status);
/* Read U-Boot version now in case we will later fail */
- hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
-
- /* Clear reset status since we need to read it again from boot CPU */
- prop->hard_reset_done_by_fw = false;
-
- /* Read boot_cpu security bits */
- if (prop->fw_security_status_valid) {
- prop->fw_boot_cpu_security_map =
- RREG32(cpu_security_boot_status_reg);
-
- if (prop->fw_boot_cpu_security_map &
- CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
- prop->hard_reset_done_by_fw = true;
-
- dev_dbg(hdev->dev,
- "Firmware boot CPU security status %#x\n",
- prop->fw_boot_cpu_security_map);
- }
+ hl_fw_static_read_device_fw_version(hdev, FW_COMP_BOOT_FIT);
- dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
- prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+ /* update state according to boot stage */
+ hl_fw_boot_fit_update_state(hdev, cpu_boot_dev_status0_reg,
+ cpu_boot_dev_status1_reg);
if (rc) {
detect_cpu_boot_status(hdev, status);
@@ -1004,13 +2427,21 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
goto out;
}
+ /* Enable DRAM scrambling before Linux boot and after successful
+ * UBoot
+ */
+ hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
+
if (!(hdev->fw_components & FW_TYPE_LINUX)) {
dev_info(hdev->dev, "Skip loading Linux F/W\n");
+ rc = 0;
goto out;
}
- if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
+ if (status == CPU_BOOT_STATUS_SRAM_AVAIL) {
+ rc = 0;
goto out;
+ }
dev_info(hdev->dev,
"Loading firmware to device, may take some time...\n");
@@ -1019,7 +2450,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
if (rc)
goto out;
- if (skip_bmc) {
+ if (fw_loader->skip_bmc) {
WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC);
rc = hl_poll_timeout(
@@ -1027,7 +2458,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
cpu_boot_status_reg,
status,
(status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
- 10000,
+ FW_CPU_STATUS_POLL_INTERVAL_USEC,
cpu_timeout);
if (rc) {
@@ -1047,7 +2478,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
cpu_boot_status_reg,
status,
(status == CPU_BOOT_STATUS_SRAM_AVAIL),
- 10000,
+ FW_CPU_STATUS_POLL_INTERVAL_USEC,
cpu_timeout);
/* Clear message */
@@ -1066,36 +2497,43 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
goto out;
}
- rc = fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg);
+ rc = fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
+ fw_loader->static_loader.boot_err1_reg,
+ cpu_boot_dev_status0_reg,
+ cpu_boot_dev_status1_reg);
if (rc)
return rc;
- /* Clear reset status since we need to read again from app */
- prop->hard_reset_done_by_fw = false;
-
- /* Read FW application security bits */
- if (prop->fw_security_status_valid) {
- prop->fw_app_security_map =
- RREG32(cpu_security_boot_status_reg);
-
- if (prop->fw_app_security_map &
- CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
- prop->hard_reset_done_by_fw = true;
-
- dev_dbg(hdev->dev,
- "Firmware application CPU security status %#x\n",
- prop->fw_app_security_map);
- }
-
- dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
- prop->hard_reset_done_by_fw ? "enabled" : "disabled");
-
- dev_info(hdev->dev, "Successfully loaded firmware to device\n");
+ hl_fw_linux_update_state(hdev, cpu_boot_dev_status0_reg,
+ cpu_boot_dev_status1_reg);
return 0;
out:
- fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg);
+ fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
+ fw_loader->static_loader.boot_err1_reg,
+ cpu_boot_dev_status0_reg,
+ cpu_boot_dev_status1_reg);
return rc;
}
+
+/**
+ * hl_fw_init_cpu - initialize the device CPU
+ *
+ * @hdev: pointer to the habanalabs device structure
+ *
+ * @return 0 on success, otherwise non-zero error code
+ *
+ * perform necessary initializations for device's CPU. takes into account if
+ * init protocol is static or dynamic.
+ */
+int hl_fw_init_cpu(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct fw_load_mgr *fw_loader = &hdev->fw_loader;
+
+ return prop->dynamic_fw_load ?
+ hl_fw_dynamic_init_cpu(hdev, fw_loader) :
+ hl_fw_static_init_cpu(hdev, fw_loader);
+}
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 6579f8767abd..6b3cdd7e068a 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -48,6 +48,7 @@
#define HL_PENDING_RESET_LONG_SEC 60
#define HL_HARD_RESET_MAX_TIMEOUT 120
+#define HL_PLDM_HARD_RESET_MAX_TIMEOUT (HL_HARD_RESET_MAX_TIMEOUT * 3)
#define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
@@ -115,10 +116,18 @@ enum hl_mmu_page_table_location {
*
* - HL_RESET_HEARTBEAT
* Set if reset is due to heartbeat
+ *
+ * - HL_RESET_TDR
+ * Set if reset is due to TDR
+ *
+ * - HL_RESET_DEVICE_RELEASE
+ * Set if reset is due to device release
*/
#define HL_RESET_HARD (1 << 0)
#define HL_RESET_FROM_RESET_THREAD (1 << 1)
#define HL_RESET_HEARTBEAT (1 << 2)
+#define HL_RESET_TDR (1 << 3)
+#define HL_RESET_DEVICE_RELEASE (1 << 4)
#define HL_MAX_SOBS_PER_MONITOR 8
@@ -178,12 +187,14 @@ enum hl_pci_match_mode {
/**
* enum hl_fw_component - F/W components to read version through registers.
- * @FW_COMP_UBOOT: u-boot.
+ * @FW_COMP_BOOT_FIT: boot fit.
* @FW_COMP_PREBOOT: preboot.
+ * @FW_COMP_LINUX: linux.
*/
enum hl_fw_component {
- FW_COMP_UBOOT,
- FW_COMP_PREBOOT
+ FW_COMP_BOOT_FIT,
+ FW_COMP_PREBOOT,
+ FW_COMP_LINUX,
};
/**
@@ -420,12 +431,24 @@ struct hl_mmu_properties {
* @cb_pool_cb_size: size of each CB in the CB pool.
* @max_pending_cs: maximum of concurrent pending command submissions
* @max_queues: maximum amount of queues in the system
- * @fw_boot_cpu_security_map: bitmap representation of boot cpu security status
- * reported by FW, bit description can be found in
- * CPU_BOOT_DEV_STS*
- * @fw_app_security_map: bitmap representation of application security status
- * reported by FW, bit description can be found in
- * CPU_BOOT_DEV_STS*
+ * @fw_preboot_cpu_boot_dev_sts0: bitmap representation of preboot cpu
+ * capabilities reported by FW, bit description
+ * can be found in CPU_BOOT_DEV_STS0
+ * @fw_preboot_cpu_boot_dev_sts1: bitmap representation of preboot cpu
+ * capabilities reported by FW, bit description
+ * can be found in CPU_BOOT_DEV_STS1
+ * @fw_bootfit_cpu_boot_dev_sts0: bitmap representation of boot cpu security
+ * status reported by FW, bit description can be
+ * found in CPU_BOOT_DEV_STS0
+ * @fw_bootfit_cpu_boot_dev_sts1: bitmap representation of boot cpu security
+ * status reported by FW, bit description can be
+ * found in CPU_BOOT_DEV_STS1
+ * @fw_app_cpu_boot_dev_sts0: bitmap representation of application security
+ * status reported by FW, bit description can be
+ * found in CPU_BOOT_DEV_STS0
+ * @fw_app_cpu_boot_dev_sts1: bitmap representation of application security
+ * status reported by FW, bit description can be
+ * found in CPU_BOOT_DEV_STS1
* @collective_first_sob: first sync object available for collective use
* @collective_first_mon: first monitor available for collective use
* @sync_stream_first_sob: first sync object available for sync stream use
@@ -438,14 +461,19 @@ struct hl_mmu_properties {
* @user_interrupt_count: number of user interrupts.
* @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
- * @fw_security_disabled: true if security measures are disabled in firmware,
- * false otherwise
- * @fw_security_status_valid: security status bits are valid and can be fetched
- * from BOOT_DEV_STS0
+ * @fw_security_enabled: true if security measures are enabled in firmware,
+ * false otherwise
+ * @fw_cpu_boot_dev_sts0_valid: status bits are valid and can be fetched from
+ * BOOT_DEV_STS0
+ * @fw_cpu_boot_dev_sts1_valid: status bits are valid and can be fetched from
+ * BOOT_DEV_STS1
* @dram_supports_virtual_memory: is there an MMU towards the DRAM
* @hard_reset_done_by_fw: true if firmware is handling hard reset flow
* @num_functional_hbms: number of functional HBMs in each DCORE.
* @iatu_done_by_fw: true if iATU configuration is being done by FW.
+ * @dynamic_fw_load: is dynamic FW load is supported.
+ * @gic_interrupts_enable: true if FW is not blocking GIC controller,
+ * false otherwise.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -491,8 +519,12 @@ struct asic_fixed_properties {
u32 cb_pool_cb_size;
u32 max_pending_cs;
u32 max_queues;
- u32 fw_boot_cpu_security_map;
- u32 fw_app_security_map;
+ u32 fw_preboot_cpu_boot_dev_sts0;
+ u32 fw_preboot_cpu_boot_dev_sts1;
+ u32 fw_bootfit_cpu_boot_dev_sts0;
+ u32 fw_bootfit_cpu_boot_dev_sts1;
+ u32 fw_app_cpu_boot_dev_sts0;
+ u32 fw_app_cpu_boot_dev_sts1;
u16 collective_first_sob;
u16 collective_first_mon;
u16 sync_stream_first_sob;
@@ -504,12 +536,15 @@ struct asic_fixed_properties {
u16 user_interrupt_count;
u8 tpc_enabled_mask;
u8 completion_queues_count;
- u8 fw_security_disabled;
- u8 fw_security_status_valid;
+ u8 fw_security_enabled;
+ u8 fw_cpu_boot_dev_sts0_valid;
+ u8 fw_cpu_boot_dev_sts1_valid;
u8 dram_supports_virtual_memory;
u8 hard_reset_done_by_fw;
u8 num_functional_hbms;
u8 iatu_done_by_fw;
+ u8 dynamic_fw_load;
+ u8 gic_interrupts_enable;
};
/**
@@ -750,12 +785,19 @@ struct hl_user_pending_interrupt {
* @kernel_address: holds the queue's kernel virtual address
* @bus_address: holds the queue's DMA address
* @ci: ci inside the queue
+ * @prev_eqe_index: the index of the previous event queue entry. The index of
+ * the current entry's index must be +1 of the previous one.
+ * @check_eqe_index: do we need to check the index of the current entry vs. the
+ * previous one. This is for backward compatibility with older
+ * firmwares
*/
struct hl_eq {
struct hl_device *hdev;
void *kernel_address;
dma_addr_t bus_address;
u32 ci;
+ u32 prev_eqe_index;
+ bool check_eqe_index;
};
@@ -812,6 +854,132 @@ enum div_select_defs {
DIV_SEL_DIVIDED_PLL = 3,
};
+enum pci_region {
+ PCI_REGION_CFG,
+ PCI_REGION_SRAM,
+ PCI_REGION_DRAM,
+ PCI_REGION_SP_SRAM,
+ PCI_REGION_NUMBER,
+};
+
+/**
+ * struct pci_mem_region - describe memory region in a PCI bar
+ * @region_base: region base address
+ * @region_size: region size
+ * @bar_size: size of the BAR
+ * @offset_in_bar: region offset into the bar
+ * @bar_id: bar ID of the region
+ * @used: if used 1, otherwise 0
+ */
+struct pci_mem_region {
+ u64 region_base;
+ u64 region_size;
+ u64 bar_size;
+ u32 offset_in_bar;
+ u8 bar_id;
+ u8 used;
+};
+
+/**
+ * struct static_fw_load_mgr - static FW load manager
+ * @preboot_version_max_off: max offset to preboot version
+ * @boot_fit_version_max_off: max offset to boot fit version
+ * @kmd_msg_to_cpu_reg: register address for KDM->CPU messages
+ * @cpu_cmd_status_to_host_reg: register address for CPU command status response
+ * @cpu_boot_status_reg: boot status register
+ * @cpu_boot_dev_status0_reg: boot device status register 0
+ * @cpu_boot_dev_status1_reg: boot device status register 1
+ * @boot_err0_reg: boot error register 0
+ * @boot_err1_reg: boot error register 1
+ * @preboot_version_offset_reg: SRAM offset to preboot version register
+ * @boot_fit_version_offset_reg: SRAM offset to boot fit version register
+ * @sram_offset_mask: mask for getting offset into the SRAM
+ * @cpu_reset_wait_msec: used when setting WFE via kmd_msg_to_cpu_reg
+ */
+struct static_fw_load_mgr {
+ u64 preboot_version_max_off;
+ u64 boot_fit_version_max_off;
+ u32 kmd_msg_to_cpu_reg;
+ u32 cpu_cmd_status_to_host_reg;
+ u32 cpu_boot_status_reg;
+ u32 cpu_boot_dev_status0_reg;
+ u32 cpu_boot_dev_status1_reg;
+ u32 boot_err0_reg;
+ u32 boot_err1_reg;
+ u32 preboot_version_offset_reg;
+ u32 boot_fit_version_offset_reg;
+ u32 sram_offset_mask;
+ u32 cpu_reset_wait_msec;
+};
+
+/**
+ * struct fw_response - FW response to LKD command
+ * @ram_offset: descriptor offset into the RAM
+ * @ram_type: RAM type containing the descriptor (SRAM/DRAM)
+ * @status: command status
+ */
+struct fw_response {
+ u32 ram_offset;
+ u8 ram_type;
+ u8 status;
+};
+
+/**
+ * struct dynamic_fw_load_mgr - dynamic FW load manager
+ * @response: FW to LKD response
+ * @comm_desc: the communication descriptor with FW
+ * @image_region: region to copy the FW image to
+ * @fw_image_size: size of FW image to load
+ * @wait_for_bl_timeout: timeout for waiting for boot loader to respond
+ */
+struct dynamic_fw_load_mgr {
+ struct fw_response response;
+ struct lkd_fw_comms_desc comm_desc;
+ struct pci_mem_region *image_region;
+ size_t fw_image_size;
+ u32 wait_for_bl_timeout;
+};
+
+/**
+ * struct fw_image_props - properties of FW image
+ * @image_name: name of the image
+ * @src_off: offset in src FW to copy from
+ * @copy_size: amount of bytes to copy (0 to copy the whole binary)
+ */
+struct fw_image_props {
+ char *image_name;
+ u32 src_off;
+ u32 copy_size;
+};
+
+/**
+ * struct fw_load_mgr - manager FW loading process
+ * @dynamic_loader: specific structure for dynamic load
+ * @static_loader: specific structure for static load
+ * @boot_fit_img: boot fit image properties
+ * @linux_img: linux image properties
+ * @cpu_timeout: CPU response timeout in usec
+ * @boot_fit_timeout: Boot fit load timeout in usec
+ * @skip_bmc: should BMC be skipped
+ * @sram_bar_id: SRAM bar ID
+ * @dram_bar_id: DRAM bar ID
+ * @linux_loaded: true if linux was loaded so far
+ */
+struct fw_load_mgr {
+ union {
+ struct dynamic_fw_load_mgr dynamic_loader;
+ struct static_fw_load_mgr static_loader;
+ };
+ struct fw_image_props boot_fit_img;
+ struct fw_image_props linux_img;
+ u32 cpu_timeout;
+ u32 boot_fit_timeout;
+ u8 skip_bmc;
+ u8 sram_bar_id;
+ u8 dram_bar_id;
+ u8 linux_loaded;
+};
+
/**
* struct hl_asic_funcs - ASIC specific functions that are can be called from
* common code.
@@ -901,8 +1069,6 @@ enum div_select_defs {
* @ctx_fini: context dependent cleanup.
* @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
* @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
- * @read_device_fw_version: read the device's firmware versions that are
- * contained in registers
* @load_firmware_to_device: load the firmware to the device's memory
* @load_boot_fit_to_device: load boot fit to device's memory
* @get_signal_cb_size: Get signal CB size.
@@ -933,6 +1099,8 @@ enum div_select_defs {
* @get_msi_info: Retrieve asic-specific MSI ID of the f/w async event
* @map_pll_idx_to_fw_idx: convert driver specific per asic PLL index to
* generic f/w compatible PLL Indexes
+ * @init_firmware_loader: initialize data for FW loader.
+ * @init_cpu_scrambler_dram: Enable CPU specific DRAM scrambling
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1006,7 +1174,7 @@ struct hl_asic_funcs {
int (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
u32 flags);
int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
- u32 asid, u64 va, u64 size);
+ u32 flags, u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
void (*set_clock_gating)(struct hl_device *hdev);
void (*disable_clock_gating)(struct hl_device *hdev);
@@ -1030,8 +1198,6 @@ struct hl_asic_funcs {
void (*ctx_fini)(struct hl_ctx *ctx);
int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
- int (*read_device_fw_version)(struct hl_device *hdev,
- enum hl_fw_component fwc);
int (*load_firmware_to_device)(struct hl_device *hdev);
int (*load_boot_fit_to_device)(struct hl_device *hdev);
u32 (*get_signal_cb_size)(struct hl_device *hdev);
@@ -1056,8 +1222,10 @@ struct hl_asic_funcs {
int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
u32 block_id, u32 block_size);
void (*enable_events_from_fw)(struct hl_device *hdev);
- void (*get_msi_info)(u32 *table);
+ void (*get_msi_info)(__le32 *table);
int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
+ void (*init_firmware_loader)(struct hl_device *hdev);
+ void (*init_cpu_scrambler_dram)(struct hl_device *hdev);
};
@@ -1262,6 +1430,7 @@ struct hl_userptr {
* @staged_sequence: the sequence of the staged submission this CS is part of,
* relevant only if staged_cs is set.
* @timeout_jiffies: cs timeout in jiffies.
+ * @submission_time_jiffies: submission time of the cs
* @type: CS_TYPE_*.
* @submitted: true if CS was submitted to H/W.
* @completed: true if CS was completed by device.
@@ -1274,6 +1443,8 @@ struct hl_userptr {
* @staged_first: true if this is the first staged CS and we need to receive
* timeout for this CS.
* @staged_cs: true if this CS is part of a staged submission.
+ * @skip_reset_on_timeout: true if we shall not reset the device in case
+ * timeout occurs (debug scenario).
*/
struct hl_cs {
u16 *jobs_in_queue_cnt;
@@ -1291,6 +1462,7 @@ struct hl_cs {
u64 sequence;
u64 staged_sequence;
u64 timeout_jiffies;
+ u64 submission_time_jiffies;
enum hl_cs_type type;
u8 submitted;
u8 completed;
@@ -1301,6 +1473,7 @@ struct hl_cs {
u8 staged_last;
u8 staged_first;
u8 staged_cs;
+ u8 skip_reset_on_timeout;
};
/**
@@ -1922,7 +2095,7 @@ struct hl_mmu_funcs {
* @kernel_queues: array of hl_hw_queue.
* @cs_mirror_list: CS mirror list for TDR.
* @cs_mirror_lock: protects cs_mirror_list.
- * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
+ * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CBs.
* @event_queue: event queue for IRQ from CPU-CP.
* @dma_pool: DMA pool for small allocations.
* @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address.
@@ -1954,6 +2127,8 @@ struct hl_mmu_funcs {
* @aggregated_cs_counters: aggregated cs counters among all contexts
* @mmu_priv: device-specific MMU data.
* @mmu_func: device-related MMU functions.
+ * @fw_loader: FW loader manager.
+ * @pci_mem_region: array of memory regions in the PCI
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -1968,6 +2143,11 @@ struct hl_mmu_funcs {
* the error will be ignored by the driver during
* device initialization. Mainly used to debug and
* workaround firmware bugs
+ * @last_successful_open_jif: timestamp (jiffies) of the last successful
+ * device open.
+ * @last_open_session_duration_jif: duration (jiffies) of the last device open
+ * session.
+ * @open_counter: number of successful device open operations.
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
* @card_type: Various ASICs have several card types. This indicates the card
@@ -2007,6 +2187,8 @@ struct hl_mmu_funcs {
* @collective_mon_idx: helper index for collective initialization
* @supports_coresight: is CoreSight supported.
* @supports_soft_reset: is soft reset supported.
+ * @allow_external_soft_reset: true if soft reset initiated by user or TDR is
+ * allowed.
* @supports_cb_mapping: is mapping a CB to the device's MMU supported.
* @needs_reset: true if reset_on_lockup is false and device should be reset
* due to lockup.
@@ -2015,6 +2197,14 @@ struct hl_mmu_funcs {
* @device_fini_pending: true if device_fini was called and might be
* waiting for the reset thread to finish
* @supports_staged_submission: true if staged submissions are supported
+ * @curr_reset_cause: saves an enumerated reset cause when a hard reset is
+ * triggered, and cleared after it is shared with preboot.
+ * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
+ * complete instead.
+ * @device_cpu_is_halted: Flag to indicate whether the device CPU was already
+ * halted. We can't halt it again because the COMMS
+ * protocol will throw an error. Relevant only for
+ * cases where Linux was not loaded to device CPU
*/
struct hl_device {
struct pci_dev *pdev;
@@ -2079,11 +2269,18 @@ struct hl_device {
struct hl_mmu_priv mmu_priv;
struct hl_mmu_funcs mmu_func[MMU_NUM_PGT_LOCATIONS];
+ struct fw_load_mgr fw_loader;
+
+ struct pci_mem_region pci_mem_region[PCI_REGION_NUMBER];
+
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
u64 clock_gating_mask;
u64 boot_error_status_mask;
+ u64 last_successful_open_jif;
+ u64 last_open_session_duration_jif;
+ u64 open_counter;
atomic_t in_reset;
enum hl_pll_frequency curr_pll_profile;
enum cpucp_card_types card_type;
@@ -2116,11 +2313,15 @@ struct hl_device {
u8 collective_mon_idx;
u8 supports_coresight;
u8 supports_soft_reset;
+ u8 allow_external_soft_reset;
u8 supports_cb_mapping;
u8 needs_reset;
u8 process_kill_trial_cnt;
u8 device_fini_pending;
u8 supports_staged_submission;
+ u8 curr_reset_cause;
+ u8 skip_reset_on_timeout;
+ u8 device_cpu_is_halted;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -2138,6 +2339,7 @@ struct hl_device {
u8 rl_enable;
u8 reset_on_preboot_fail;
u8 reset_upon_device_release;
+ u8 reset_if_device_not_idle;
};
@@ -2384,11 +2586,13 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr);
int hl_fw_send_heartbeat(struct hl_device *hdev);
int hl_fw_cpucp_info_get(struct hl_device *hdev,
- u32 cpu_security_boot_status_reg,
- u32 boot_err0_reg);
+ u32 sts_boot_dev_sts0_reg,
+ u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
+ u32 boot_err1_reg);
int hl_fw_cpucp_handshake(struct hl_device *hdev,
- u32 cpu_security_boot_status_reg,
- u32 boot_err0_reg);
+ u32 sts_boot_dev_sts0_reg,
+ u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
+ u32 boot_err1_reg);
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
struct hl_info_pci_counters *counters);
@@ -2399,14 +2603,17 @@ int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
u16 *pll_freq_arr);
int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
-int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
- u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
- u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
- bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout);
+void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev);
+void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev);
+int hl_fw_init_cpu(struct hl_device *hdev);
int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
- u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
- u32 timeout);
-
+ u32 sts_boot_dev_sts0_reg,
+ u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
+ u32 boot_err1_reg, u32 timeout);
+int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader,
+ enum comms_cmd cmd, unsigned int size,
+ bool wait_ok, u32 timeout);
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
bool is_wc[3]);
int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data);
@@ -2415,6 +2622,7 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
struct hl_inbound_pci_region *pci_region);
int hl_pci_set_outbound_region(struct hl_device *hdev,
struct hl_outbound_pci_region *pci_region);
+enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr);
int hl_pci_init(struct hl_device *hdev);
void hl_pci_fini(struct hl_device *hdev);
@@ -2443,6 +2651,8 @@ int hl_set_voltage(struct hl_device *hdev,
int hl_set_current(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
void hl_release_pending_user_interrupts(struct hl_device *hdev);
+int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
+ struct hl_hw_sob **hw_sob, u32 count);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 64d1530db985..4194cda2d04c 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -29,7 +29,7 @@ static DEFINE_MUTEX(hl_devs_idr_lock);
static int timeout_locked = 30;
static int reset_on_lockup = 1;
-static int memory_scrub = 1;
+static int memory_scrub;
static ulong boot_error_status_mask = ULONG_MAX;
module_param(timeout_locked, int, 0444);
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(reset_on_lockup,
module_param(memory_scrub, int, 0444);
MODULE_PARM_DESC(memory_scrub,
- "Scrub device memory in various states (0 = no, 1 = yes, default yes)");
+ "Scrub device memory in various states (0 = no, 1 = yes, default no)");
module_param(boot_error_status_mask, ulong, 0444);
MODULE_PARM_DESC(boot_error_status_mask,
@@ -187,6 +187,9 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_debugfs_add_file(hpriv);
+ hdev->open_counter++;
+ hdev->last_successful_open_jif = jiffies;
+
return 0;
out_err:
@@ -264,6 +267,7 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
hdev->bmc_enable = 1;
hdev->hard_reset_on_fw_events = 1;
hdev->reset_on_preboot_fail = 1;
+ hdev->reset_if_device_not_idle = 1;
hdev->reset_pcilink = 0;
hdev->axi_drain = 0;
@@ -308,10 +312,10 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
}
if (pdev)
- hdev->asic_prop.fw_security_disabled =
- !is_asic_secured(pdev->device);
+ hdev->asic_prop.fw_security_enabled =
+ is_asic_secured(hdev->asic_type);
else
- hdev->asic_prop.fw_security_disabled = true;
+ hdev->asic_prop.fw_security_enabled = false;
/* Assign status description string */
strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
@@ -325,11 +329,14 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
hdev->reset_on_lockup = reset_on_lockup;
hdev->memory_scrub = memory_scrub;
hdev->boot_error_status_mask = boot_error_status_mask;
+ hdev->stop_on_err = true;
hdev->pldm = 0;
set_driver_behavior_per_device(hdev);
+ hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+
if (timeout_locked)
hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
else
@@ -464,6 +471,7 @@ static int hl_pci_probe(struct pci_dev *pdev,
return 0;
disable_device:
+ pci_disable_pcie_error_reporting(pdev);
pci_set_drvdata(pdev, NULL);
destroy_hdev(hdev);
@@ -572,7 +580,11 @@ static struct pci_driver hl_pci_driver = {
.probe = hl_pci_probe,
.remove = hl_pci_remove,
.shutdown = hl_pci_remove,
- .driver.pm = &hl_pm_ops,
+ .driver = {
+ .name = HL_NAME,
+ .pm = &hl_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
.err_handler = &hl_pci_err_handler,
};
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 33841c272eb6..f4dda7b4acdd 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -95,7 +95,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.first_available_interrupt_id =
prop->first_available_user_msix_interrupt;
return copy_to_user(out, &hw_ip,
- min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
+ min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
}
static int hw_events_info(struct hl_device *hdev, bool aggregate,
@@ -460,6 +460,24 @@ static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0;
}
+static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ struct hl_open_stats_info open_stats_info = {0};
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ open_stats_info.last_open_period_ms = jiffies64_to_msecs(
+ hdev->last_open_session_duration_jif);
+ open_stats_info.open_counter = hdev->open_counter;
+
+ return copy_to_user(out, &open_stats_info,
+ min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -543,6 +561,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_POWER:
return power_info(hpriv, args);
+ case HL_INFO_OPEN_STATS:
+ return open_stats_info(hpriv, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 173438461835..bcabfdbf1e01 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -410,19 +410,20 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
-static void init_signal_cs(struct hl_device *hdev,
+static int init_signal_cs(struct hl_device *hdev,
struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
{
struct hl_sync_stream_properties *prop;
struct hl_hw_sob *hw_sob;
u32 q_idx;
+ int rc = 0;
q_idx = job->hw_queue_id;
prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
hw_sob = &prop->hw_sob[prop->curr_sob_offset];
cs_cmpl->hw_sob = hw_sob;
- cs_cmpl->sob_val = prop->next_sob_val++;
+ cs_cmpl->sob_val = prop->next_sob_val;
dev_dbg(hdev->dev,
"generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
@@ -434,24 +435,9 @@ static void init_signal_cs(struct hl_device *hdev,
hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
cs_cmpl->hw_sob->sob_id, 0, true);
- kref_get(&hw_sob->kref);
+ rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1);
- /* check for wraparound */
- if (prop->next_sob_val == HL_MAX_SOB_VAL) {
- /*
- * Decrement as we reached the max value.
- * The release function won't be called here as we've
- * just incremented the refcount.
- */
- kref_put(&hw_sob->kref, hl_sob_reset_error);
- prop->next_sob_val = 1;
- /* only two SOBs are currently in use */
- prop->curr_sob_offset =
- (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
-
- dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
- prop->curr_sob_offset, q_idx);
- }
+ return rc;
}
static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
@@ -504,22 +490,25 @@ static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
*
* H/W queues spinlock should be taken before calling this function
*/
-static void init_signal_wait_cs(struct hl_cs *cs)
+static int init_signal_wait_cs(struct hl_cs *cs)
{
struct hl_ctx *ctx = cs->ctx;
struct hl_device *hdev = ctx->hdev;
struct hl_cs_job *job;
struct hl_cs_compl *cs_cmpl =
container_of(cs->fence, struct hl_cs_compl, base_fence);
+ int rc = 0;
/* There is only one job in a signal/wait CS */
job = list_first_entry(&cs->job_list, struct hl_cs_job,
cs_node);
if (cs->type & CS_TYPE_SIGNAL)
- init_signal_cs(hdev, job, cs_cmpl);
+ rc = init_signal_cs(hdev, job, cs_cmpl);
else if (cs->type & CS_TYPE_WAIT)
init_wait_cs(hdev, cs, job, cs_cmpl);
+
+ return rc;
}
/*
@@ -590,11 +579,16 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
}
- if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT))
- init_signal_wait_cs(cs);
- else if (cs->type == CS_TYPE_COLLECTIVE_WAIT)
+ if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT)) {
+ rc = init_signal_wait_cs(cs);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to submit signal cs\n");
+ goto unroll_cq_resv;
+ }
+ } else if (cs->type == CS_TYPE_COLLECTIVE_WAIT)
hdev->asic_funcs->collective_wait_init_cs(cs);
+
spin_lock(&hdev->cs_mirror_lock);
/* Verify staged CS exists and add to the staged list */
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c
index 27129868c711..39b14a933393 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -207,17 +207,33 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
struct hl_eq_entry *eq_entry;
struct hl_eq_entry *eq_base;
struct hl_eqe_work *handle_eqe_work;
+ bool entry_ready;
+ u32 cur_eqe;
+ u16 cur_eqe_index;
eq_base = eq->kernel_address;
while (1) {
- bool entry_ready =
- ((le32_to_cpu(eq_base[eq->ci].hdr.ctl) &
- EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT);
+ cur_eqe = le32_to_cpu(eq_base[eq->ci].hdr.ctl);
+ entry_ready = !!FIELD_GET(EQ_CTL_READY_MASK, cur_eqe);
if (!entry_ready)
break;
+ cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
+ if ((hdev->event_queue.check_eqe_index) &&
+ (((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK)
+ != cur_eqe_index)) {
+ dev_dbg(hdev->dev,
+ "EQE 0x%x in queue is ready but index does not match %d!=%d",
+ eq_base[eq->ci].hdr.ctl,
+ ((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
+ cur_eqe_index);
+ break;
+ }
+
+ eq->prev_eqe_index++;
+
eq_entry = &eq_base[eq->ci];
/*
@@ -341,6 +357,7 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
q->hdev = hdev;
q->kernel_address = p;
q->ci = 0;
+ q->prev_eqe_index = 0;
return 0;
}
@@ -365,6 +382,7 @@ void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
{
q->ci = 0;
+ q->prev_eqe_index = 0;
/*
* It's not enough to just reset the PI/CI because the H/W may have
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 2938cbbafbbc..af339ce1ab4f 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -570,8 +570,10 @@ static u64 get_va_block(struct hl_device *hdev,
if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
(!is_align_pow_2 &&
do_div(tmp_hint_addr, va_range->page_size))) {
- dev_info(hdev->dev, "Hint address 0x%llx will be ignored\n",
- hint_addr);
+
+ dev_dbg(hdev->dev,
+ "Hint address 0x%llx will be ignored because it is not aligned\n",
+ hint_addr);
hint_addr = 0;
}
@@ -1117,7 +1119,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto map_err;
}
- rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
+ rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, false,
+ *vm_type, ctx->asid, ret_vaddr, phys_pg_pack->total_size);
mutex_unlock(&ctx->mmu_lock);
@@ -1261,8 +1264,9 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
* at the loop end rather than for each iteration
*/
if (!ctx_free)
- rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
- *vm_type);
+ rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, true,
+ *vm_type, ctx->asid, vaddr,
+ phys_pg_pack->total_size);
mutex_unlock(&ctx->mmu_lock);
@@ -1369,12 +1373,7 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
/* Driver only allows mapping of a complete HW block */
block_size = vma->vm_end - vma->vm_start;
-#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
- if (!access_ok(VERIFY_WRITE,
- (void __user *) (uintptr_t) vma->vm_start, block_size)) {
-#else
if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) {
-#endif
dev_err(hdev->dev,
"user pointer is invalid - 0x%lx\n",
vma->vm_start);
@@ -1608,7 +1607,8 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
if (rc != npages) {
dev_err(hdev->dev,
- "Failed to map host memory, user ptr probably wrong\n");
+ "Failed (%d) to pin host memory with user ptr 0x%llx, size 0x%llx, npages %d\n",
+ rc, addr, size, npages);
if (rc < 0)
goto destroy_pages;
npages = rc;
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index b37189956b14..792d25b79ea6 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -501,12 +501,20 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) &&
!is_power_of_2(prop->dram_page_size)) {
- u32 bit;
+ unsigned long dram_page_size = prop->dram_page_size;
u64 page_offset_mask;
u64 phys_addr_mask;
+ u32 bit;
- bit = __ffs64((u64)prop->dram_page_size);
- page_offset_mask = ((1ull << bit) - 1);
+ /*
+ * find last set bit in page_size to cover all bits of page
+ * offset. note that 1 has to be added to bit index.
+ * note that the internal ulong variable is used to avoid
+ * alignment issue.
+ */
+ bit = find_last_bit(&dram_page_size,
+ sizeof(dram_page_size) * BITS_PER_BYTE) + 1;
+ page_offset_mask = (BIT_ULL(bit) - 1);
phys_addr_mask = ~page_offset_mask;
*phys_addr = (tmp_phys_addr & phys_addr_mask) |
(virt_addr & page_offset_mask);
diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/misc/habanalabs/common/pci/pci.c
index e941b7eef346..d5bedf5ba011 100644
--- a/drivers/misc/habanalabs/common/pci/pci.c
+++ b/drivers/misc/habanalabs/common/pci/pci.c
@@ -10,7 +10,7 @@
#include <linux/pci.h>
-#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 10)
+#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 100)
#define IATU_REGION_CTRL_REGION_EN_MASK BIT(31)
#define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30)
@@ -360,6 +360,32 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
}
/**
+ * hl_get_pci_memory_region() - get PCI region for given address
+ * @hdev: Pointer to hl_device structure.
+ * @addr: device address
+ *
+ * @return region index on success, otherwise PCI_REGION_NUMBER (invalid
+ * region index)
+ */
+enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr)
+{
+ int i;
+
+ for (i = 0 ; i < PCI_REGION_NUMBER ; i++) {
+ struct pci_mem_region *region = &hdev->pci_mem_region[i];
+
+ if (!region->used)
+ continue;
+
+ if ((addr >= region->region_base) &&
+ (addr < region->region_base + region->region_size))
+ return i;
+ }
+
+ return PCI_REGION_NUMBER;
+}
+
+/**
* hl_pci_init() - PCI initialization code.
* @hdev: Pointer to hl_device structure.
*
@@ -395,6 +421,12 @@ int hl_pci_init(struct hl_device *hdev)
goto unmap_pci_bars;
}
+ /* Driver must sleep in order for FW to finish the iATU configuration */
+ if (hdev->asic_prop.iatu_done_by_fw) {
+ usleep_range(2000, 3000);
+ hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+ }
+
rc = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(hdev->dma_mask));
if (rc) {
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index c9f649b31e3a..db72df282ef8 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -208,7 +208,7 @@ static ssize_t soft_reset_store(struct device *dev,
goto out;
}
- if (!hdev->supports_soft_reset) {
+ if (!hdev->allow_external_soft_reset) {
dev_err(hdev->dev, "Device does not support soft-reset\n");
goto out;
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 9e4a6bb3acd1..aa8a0ca5aca2 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -78,6 +78,7 @@
#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
+#define GAUDI_WAIT_FOR_BL_TIMEOUT_USEC 15000000 /* 15s */
#define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9
@@ -409,7 +410,7 @@ static inline void set_default_power_values(struct hl_device *hdev)
}
}
-static int gaudi_get_fixed_properties(struct hl_device *hdev)
+static int gaudi_set_fixed_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 num_sync_stream_queues = 0;
@@ -545,8 +546,10 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
- prop->fw_security_status_valid = false;
+ prop->fw_cpu_boot_dev_sts0_valid = false;
+ prop->fw_cpu_boot_dev_sts1_valid = false;
prop->hard_reset_done_by_fw = false;
+ prop->gic_interrupts_enable = true;
return 0;
}
@@ -577,6 +580,9 @@ static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
if ((gaudi) && (gaudi->hbm_bar_cur_addr == addr))
return old_addr;
+ if (hdev->asic_prop.iatu_done_by_fw)
+ return U64_MAX;
+
/* Inbound Region 2 - Bar 4 - Point to HBM */
pci_region.mode = PCI_BAR_MATCH_MODE;
pci_region.bar = HBM_BAR_ID;
@@ -599,10 +605,8 @@ static int gaudi_init_iatu(struct hl_device *hdev)
struct hl_outbound_pci_region outbound_region;
int rc;
- if (hdev->asic_prop.iatu_done_by_fw) {
- hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+ if (hdev->asic_prop.iatu_done_by_fw)
return 0;
- }
/* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */
inbound_region.mode = PCI_BAR_MATCH_MODE;
@@ -651,9 +655,9 @@ static int gaudi_early_init(struct hl_device *hdev)
u32 fw_boot_status;
int rc;
- rc = gaudi_get_fixed_properties(hdev);
+ rc = gaudi_set_fixed_properties(hdev);
if (rc) {
- dev_err(hdev->dev, "Failed to get fixed properties\n");
+ dev_err(hdev->dev, "Failed setting fixed properties\n");
return rc;
}
@@ -683,8 +687,14 @@ static int gaudi_early_init(struct hl_device *hdev)
prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
/* If FW security is enabled at this point it means no access to ELBI */
- if (!hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_enabled) {
hdev->asic_prop.iatu_done_by_fw = true;
+
+ /*
+ * GIC-security-bit can ONLY be set by CPUCP, so in this stage
+ * decision can only be taken based on PCI ID security.
+ */
+ hdev->asic_prop.gic_interrupts_enable = false;
goto pci_init;
}
@@ -707,8 +717,10 @@ pci_init:
* version to determine whether we run with a security-enabled firmware
*/
rc = hl_fw_read_preboot_status(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
- mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0,
- GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
+ mmCPU_BOOT_DEV_STS0,
+ mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
+ mmCPU_BOOT_ERR1,
+ GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
if (rc) {
if (hdev->reset_on_preboot_fail)
hdev->asic_funcs->hw_fini(hdev, true);
@@ -751,7 +763,14 @@ static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
int rc;
- if (hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_enabled) {
+ rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI_CPU_PLL, pll_freq_arr);
+
+ if (rc)
+ return rc;
+
+ freq = pll_freq_arr[2];
+ } else {
/* Backward compatibility */
div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
@@ -779,13 +798,6 @@ static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
div_sel);
freq = 0;
}
- } else {
- rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI_CPU_PLL, pll_freq_arr);
-
- if (rc)
- return rc;
-
- freq = pll_freq_arr[2];
}
prop->psoc_timestamp_frequency = freq;
@@ -988,9 +1000,27 @@ static void gaudi_sob_group_reset_error(struct kref *ref)
hw_sob_group->base_sob_id);
}
+static void gaudi_collective_mstr_sob_mask_set(struct gaudi_device *gaudi)
+{
+ struct gaudi_collective_properties *prop;
+ int i;
+
+ prop = &gaudi->collective_props;
+
+ memset(prop->mstr_sob_mask, 0, sizeof(prop->mstr_sob_mask));
+
+ for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++)
+ if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i))
+ prop->mstr_sob_mask[i / HL_MAX_SOBS_PER_MONITOR] |=
+ BIT(i % HL_MAX_SOBS_PER_MONITOR);
+ /* Set collective engine bit */
+ prop->mstr_sob_mask[i / HL_MAX_SOBS_PER_MONITOR] |=
+ BIT(i % HL_MAX_SOBS_PER_MONITOR);
+}
+
static int gaudi_collective_init(struct hl_device *hdev)
{
- u32 i, master_monitor_sobs, sob_id, reserved_sobs_per_group;
+ u32 i, sob_id, reserved_sobs_per_group;
struct gaudi_collective_properties *prop;
struct gaudi_device *gaudi;
@@ -1016,22 +1046,7 @@ static int gaudi_collective_init(struct hl_device *hdev)
gaudi_collective_map_sobs(hdev, i);
}
- prop->mstr_sob_mask[0] = 0;
- master_monitor_sobs = HL_MAX_SOBS_PER_MONITOR;
- for (i = 0 ; i < master_monitor_sobs ; i++)
- if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i))
- prop->mstr_sob_mask[0] |= BIT(i);
-
- prop->mstr_sob_mask[1] = 0;
- master_monitor_sobs =
- NIC_NUMBER_OF_ENGINES - HL_MAX_SOBS_PER_MONITOR;
- for (i = 0 ; i < master_monitor_sobs; i++) {
- if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i))
- prop->mstr_sob_mask[1] |= BIT(i);
- }
-
- /* Set collective engine bit */
- prop->mstr_sob_mask[1] |= BIT(i);
+ gaudi_collective_mstr_sob_mask_set(gaudi);
return 0;
}
@@ -1513,7 +1528,7 @@ static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
hdev->cpu_pci_msb_addr =
GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address);
- if (hdev->asic_prop.fw_security_disabled)
+ if (!hdev->asic_prop.fw_security_enabled)
GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address);
free_dma_mem_arr:
@@ -1590,6 +1605,48 @@ free_internal_qmans_pq_mem:
return rc;
}
+static void gaudi_set_pci_memory_regions(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pci_mem_region *region;
+
+ /* CFG */
+ region = &hdev->pci_mem_region[PCI_REGION_CFG];
+ region->region_base = CFG_BASE;
+ region->region_size = CFG_SIZE;
+ region->offset_in_bar = CFG_BASE - SPI_FLASH_BASE_ADDR;
+ region->bar_size = CFG_BAR_SIZE;
+ region->bar_id = CFG_BAR_ID;
+ region->used = 1;
+
+ /* SRAM */
+ region = &hdev->pci_mem_region[PCI_REGION_SRAM];
+ region->region_base = SRAM_BASE_ADDR;
+ region->region_size = SRAM_SIZE;
+ region->offset_in_bar = 0;
+ region->bar_size = SRAM_BAR_SIZE;
+ region->bar_id = SRAM_BAR_ID;
+ region->used = 1;
+
+ /* DRAM */
+ region = &hdev->pci_mem_region[PCI_REGION_DRAM];
+ region->region_base = DRAM_PHYS_BASE;
+ region->region_size = hdev->asic_prop.dram_size;
+ region->offset_in_bar = 0;
+ region->bar_size = prop->dram_pci_bar_size;
+ region->bar_id = HBM_BAR_ID;
+ region->used = 1;
+
+ /* SP SRAM */
+ region = &hdev->pci_mem_region[PCI_REGION_SP_SRAM];
+ region->region_base = PSOC_SCRATCHPAD_ADDR;
+ region->region_size = PSOC_SCRATCHPAD_SIZE;
+ region->offset_in_bar = PSOC_SCRATCHPAD_ADDR - SPI_FLASH_BASE_ADDR;
+ region->bar_size = CFG_BAR_SIZE;
+ region->bar_id = CFG_BAR_ID;
+ region->used = 1;
+}
+
static int gaudi_sw_init(struct hl_device *hdev)
{
struct gaudi_device *gaudi;
@@ -1664,12 +1721,14 @@ static int gaudi_sw_init(struct hl_device *hdev)
hdev->supports_coresight = true;
hdev->supports_staged_submission = true;
+ gaudi_set_pci_memory_regions(hdev);
+
return 0;
free_cpu_accessible_dma_pool:
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
free_cpu_dma_mem:
- if (hdev->asic_prop.fw_security_disabled)
+ if (!hdev->asic_prop.fw_security_enabled)
GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
hdev->cpu_pci_msb_addr);
hdev->asic_funcs->asic_dma_free_coherent(hdev,
@@ -1691,7 +1750,7 @@ static int gaudi_sw_fini(struct hl_device *hdev)
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
- if (hdev->asic_prop.fw_security_disabled)
+ if (!hdev->asic_prop.fw_security_enabled)
GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
hdev->cpu_pci_msb_addr);
@@ -1879,12 +1938,11 @@ static void gaudi_init_scrambler_sram(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- if (!hdev->asic_prop.fw_security_disabled)
+ if (hdev->asic_prop.fw_security_enabled)
return;
- if (hdev->asic_prop.fw_security_status_valid &&
- (hdev->asic_prop.fw_app_security_map &
- CPU_BOOT_DEV_STS0_SRAM_SCR_EN))
+ if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_SRAM_SCR_EN)
return;
if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER)
@@ -1951,12 +2009,11 @@ static void gaudi_init_scrambler_hbm(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- if (!hdev->asic_prop.fw_security_disabled)
+ if (hdev->asic_prop.fw_security_enabled)
return;
- if (hdev->asic_prop.fw_security_status_valid &&
- (hdev->asic_prop.fw_boot_cpu_security_map &
- CPU_BOOT_DEV_STS0_DRAM_SCR_EN))
+ if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_DRAM_SCR_EN)
return;
if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER)
@@ -2021,12 +2078,11 @@ static void gaudi_init_scrambler_hbm(struct hl_device *hdev)
static void gaudi_init_e2e(struct hl_device *hdev)
{
- if (!hdev->asic_prop.fw_security_disabled)
+ if (hdev->asic_prop.fw_security_enabled)
return;
- if (hdev->asic_prop.fw_security_status_valid &&
- (hdev->asic_prop.fw_boot_cpu_security_map &
- CPU_BOOT_DEV_STS0_E2E_CRED_EN))
+ if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_E2E_CRED_EN)
return;
WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 247 >> 3);
@@ -2396,12 +2452,11 @@ static void gaudi_init_hbm_cred(struct hl_device *hdev)
{
uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
- if (!hdev->asic_prop.fw_security_disabled)
+ if (hdev->asic_prop.fw_security_enabled)
return;
- if (hdev->asic_prop.fw_security_status_valid &&
- (hdev->asic_prop.fw_boot_cpu_security_map &
- CPU_BOOT_DEV_STS0_HBM_CRED_EN))
+ if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_HBM_CRED_EN)
return;
hbm0_wr = 0x33333333;
@@ -2487,10 +2542,12 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
int qman_id, dma_addr_t qman_pq_addr)
{
+ struct cpu_dyn_regs *dyn_regs =
+ &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
u32 q_off, dma_qm_offset;
- u32 dma_qm_err_cfg;
+ u32 dma_qm_err_cfg, irq_handler_offset;
dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
@@ -2539,20 +2596,23 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
/* The following configuration is needed only once per QMAN */
if (qman_id == 0) {
+ irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
+ le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl);
+
/* Configure RAZWI IRQ */
dma_qm_err_cfg = PCI_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
- if (hdev->stop_on_err) {
+ if (hdev->stop_on_err)
dma_qm_err_cfg |=
PCI_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
- }
WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg);
+
WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset,
- lower_32_bits(CFG_BASE +
- mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset,
- upper_32_bits(CFG_BASE +
- mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ upper_32_bits(CFG_BASE + irq_handler_offset));
+
WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset,
gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id +
dma_id);
@@ -2573,8 +2633,11 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
static void gaudi_init_dma_core(struct hl_device *hdev, int dma_id)
{
- u32 dma_offset = dma_id * DMA_CORE_OFFSET;
+ struct cpu_dyn_regs *dyn_regs =
+ &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 dma_err_cfg = 1 << DMA0_CORE_ERR_CFG_ERR_MSG_EN_SHIFT;
+ u32 dma_offset = dma_id * DMA_CORE_OFFSET;
+ u32 irq_handler_offset;
/* Set to maximum possible according to physical size */
WREG32(mmDMA0_CORE_RD_MAX_OUTSTAND + dma_offset, 0);
@@ -2588,10 +2651,16 @@ static void gaudi_init_dma_core(struct hl_device *hdev, int dma_id)
dma_err_cfg |= 1 << DMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT;
WREG32(mmDMA0_CORE_ERR_CFG + dma_offset, dma_err_cfg);
+
+ irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
+ le32_to_cpu(dyn_regs->gic_dma_core_irq_ctrl);
+
WREG32(mmDMA0_CORE_ERRMSG_ADDR_LO + dma_offset,
- lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmDMA0_CORE_ERRMSG_ADDR_HI + dma_offset,
- upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ upper_32_bits(CFG_BASE + irq_handler_offset));
+
WREG32(mmDMA0_CORE_ERRMSG_WDATA + dma_offset,
gaudi_irq_map_table[GAUDI_EVENT_DMA0_CORE].cpu_id + dma_id);
WREG32(mmDMA0_CORE_PROT + dma_offset,
@@ -2654,10 +2723,12 @@ static void gaudi_init_pci_dma_qmans(struct hl_device *hdev)
static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
int qman_id, u64 qman_base_addr)
{
+ struct cpu_dyn_regs *dyn_regs =
+ &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
+ u32 dma_qm_err_cfg, irq_handler_offset;
u32 q_off, dma_qm_offset;
- u32 dma_qm_err_cfg;
dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
@@ -2697,6 +2768,10 @@ static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
QMAN_CPDMA_DST_OFFSET);
} else {
+ irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
+ le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl);
+
WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
QMAN_LDMA_SIZE_OFFSET);
WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
@@ -2706,18 +2781,17 @@ static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
/* Configure RAZWI IRQ */
dma_qm_err_cfg = HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
- if (hdev->stop_on_err) {
+ if (hdev->stop_on_err)
dma_qm_err_cfg |=
HBM_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
- }
+
WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg);
WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset,
- lower_32_bits(CFG_BASE +
- mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset,
- upper_32_bits(CFG_BASE +
- mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ upper_32_bits(CFG_BASE + irq_handler_offset));
+
WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset,
gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id +
dma_id);
@@ -2792,8 +2866,11 @@ static void gaudi_init_hbm_dma_qmans(struct hl_device *hdev)
static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
int qman_id, u64 qman_base_addr)
{
+ struct cpu_dyn_regs *dyn_regs =
+ &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 mtr_base_lo, mtr_base_hi;
u32 so_base_lo, so_base_hi;
+ u32 irq_handler_offset;
u32 q_off, mme_id;
u32 mme_qm_err_cfg;
@@ -2825,6 +2902,10 @@ static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
QMAN_CPDMA_DST_OFFSET);
} else {
+ irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
+ le32_to_cpu(dyn_regs->gic_mme_qm_irq_ctrl);
+
WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
QMAN_LDMA_SIZE_OFFSET);
WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
@@ -2834,20 +2915,20 @@ static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
/* Configure RAZWI IRQ */
mme_id = mme_offset /
- (mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0);
+ (mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0) / 2;
mme_qm_err_cfg = MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
- if (hdev->stop_on_err) {
+ if (hdev->stop_on_err)
mme_qm_err_cfg |=
MME_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
- }
+
WREG32(mmMME0_QM_GLBL_ERR_CFG + mme_offset, mme_qm_err_cfg);
+
WREG32(mmMME0_QM_GLBL_ERR_ADDR_LO + mme_offset,
- lower_32_bits(CFG_BASE +
- mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmMME0_QM_GLBL_ERR_ADDR_HI + mme_offset,
- upper_32_bits(CFG_BASE +
- mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ upper_32_bits(CFG_BASE + irq_handler_offset));
+
WREG32(mmMME0_QM_GLBL_ERR_WDATA + mme_offset,
gaudi_irq_map_table[GAUDI_EVENT_MME0_QM].cpu_id +
mme_id);
@@ -2912,10 +2993,12 @@ static void gaudi_init_mme_qmans(struct hl_device *hdev)
static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
int qman_id, u64 qman_base_addr)
{
+ struct cpu_dyn_regs *dyn_regs =
+ &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
+ u32 tpc_qm_err_cfg, irq_handler_offset;
u32 q_off, tpc_id;
- u32 tpc_qm_err_cfg;
mtr_base_en_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
@@ -2956,6 +3039,10 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
QMAN_CPDMA_DST_OFFSET);
} else {
+ irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
+ le32_to_cpu(dyn_regs->gic_tpc_qm_irq_ctrl);
+
WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
QMAN_LDMA_SIZE_OFFSET);
WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
@@ -2965,18 +3052,17 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
/* Configure RAZWI IRQ */
tpc_qm_err_cfg = TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
- if (hdev->stop_on_err) {
+ if (hdev->stop_on_err)
tpc_qm_err_cfg |=
TPC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
- }
WREG32(mmTPC0_QM_GLBL_ERR_CFG + tpc_offset, tpc_qm_err_cfg);
+
WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + tpc_offset,
- lower_32_bits(CFG_BASE +
- mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + tpc_offset,
- upper_32_bits(CFG_BASE +
- mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ upper_32_bits(CFG_BASE + irq_handler_offset));
+
WREG32(mmTPC0_QM_GLBL_ERR_WDATA + tpc_offset,
gaudi_irq_map_table[GAUDI_EVENT_TPC0_QM].cpu_id +
tpc_id);
@@ -3059,10 +3145,12 @@ static void gaudi_init_tpc_qmans(struct hl_device *hdev)
static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset,
int qman_id, u64 qman_base_addr, int nic_id)
{
+ struct cpu_dyn_regs *dyn_regs =
+ &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
+ u32 nic_qm_err_cfg, irq_handler_offset;
u32 q_off;
- u32 nic_qm_err_cfg;
mtr_base_en_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
@@ -3109,20 +3197,23 @@ static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset,
WREG32(mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
if (qman_id == 0) {
+ irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
+ le32_to_cpu(dyn_regs->gic_nic_qm_irq_ctrl);
+
/* Configure RAZWI IRQ */
nic_qm_err_cfg = NIC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
- if (hdev->stop_on_err) {
+ if (hdev->stop_on_err)
nic_qm_err_cfg |=
NIC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
- }
WREG32(mmNIC0_QM0_GLBL_ERR_CFG + nic_offset, nic_qm_err_cfg);
+
WREG32(mmNIC0_QM0_GLBL_ERR_ADDR_LO + nic_offset,
- lower_32_bits(CFG_BASE +
- mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmNIC0_QM0_GLBL_ERR_ADDR_HI + nic_offset,
- upper_32_bits(CFG_BASE +
- mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ upper_32_bits(CFG_BASE + irq_handler_offset));
+
WREG32(mmNIC0_QM0_GLBL_ERR_WDATA + nic_offset,
gaudi_irq_map_table[GAUDI_EVENT_NIC0_QM0].cpu_id +
nic_id);
@@ -3475,7 +3566,7 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
if (hdev->in_debug)
return;
- if (!hdev->asic_prop.fw_security_disabled)
+ if (hdev->asic_prop.fw_security_enabled)
return;
for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
@@ -3535,7 +3626,7 @@ static void gaudi_disable_clock_gating(struct hl_device *hdev)
u32 qman_offset;
int i;
- if (!hdev->asic_prop.fw_security_disabled)
+ if (hdev->asic_prop.fw_security_enabled)
return;
for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
@@ -3674,9 +3765,6 @@ static int gaudi_load_firmware_to_device(struct hl_device *hdev)
{
void __iomem *dst;
- /* HBM scrambler must be initialized before pushing F/W to HBM */
- gaudi_init_scrambler_hbm(hdev);
-
dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET;
return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst, 0, 0);
@@ -3691,42 +3779,71 @@ static int gaudi_load_boot_fit_to_device(struct hl_device *hdev)
return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst, 0, 0);
}
-static int gaudi_read_device_fw_version(struct hl_device *hdev,
- enum hl_fw_component fwc)
+static void gaudi_init_dynamic_firmware_loader(struct hl_device *hdev)
{
- const char *name;
- u32 ver_off;
- char *dest;
+ struct dynamic_fw_load_mgr *dynamic_loader;
+ struct cpu_dyn_regs *dyn_regs;
- switch (fwc) {
- case FW_COMP_UBOOT:
- ver_off = RREG32(mmUBOOT_VER_OFFSET);
- dest = hdev->asic_prop.uboot_ver;
- name = "U-Boot";
- break;
- case FW_COMP_PREBOOT:
- ver_off = RREG32(mmPREBOOT_VER_OFFSET);
- dest = hdev->asic_prop.preboot_ver;
- name = "Preboot";
- break;
- default:
- dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
- return -EIO;
- }
+ dynamic_loader = &hdev->fw_loader.dynamic_loader;
- ver_off &= ~((u32)SRAM_BASE_ADDR);
+ /*
+ * here we update initial values for few specific dynamic regs (as
+ * before reading the first descriptor from FW those value has to be
+ * hard-coded) in later stages of the protocol those values will be
+ * updated automatically by reading the FW descriptor so data there
+ * will always be up-to-date
+ */
+ dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs;
+ dyn_regs->kmd_msg_to_cpu =
+ cpu_to_le32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU);
+ dyn_regs->cpu_cmd_status_to_host =
+ cpu_to_le32(mmCPU_CMD_STATUS_TO_HOST);
- if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
- memcpy_fromio(dest, hdev->pcie_bar[SRAM_BAR_ID] + ver_off,
- VERSION_MAX_LEN);
- } else {
- dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
- name, ver_off);
- strcpy(dest, "unavailable");
- return -EIO;
- }
+ dynamic_loader->wait_for_bl_timeout = GAUDI_WAIT_FOR_BL_TIMEOUT_USEC;
+}
- return 0;
+static void gaudi_init_static_firmware_loader(struct hl_device *hdev)
+{
+ struct static_fw_load_mgr *static_loader;
+
+ static_loader = &hdev->fw_loader.static_loader;
+
+ static_loader->preboot_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
+ static_loader->boot_fit_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
+ static_loader->kmd_msg_to_cpu_reg = mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU;
+ static_loader->cpu_cmd_status_to_host_reg = mmCPU_CMD_STATUS_TO_HOST;
+ static_loader->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
+ static_loader->cpu_boot_dev_status0_reg = mmCPU_BOOT_DEV_STS0;
+ static_loader->cpu_boot_dev_status1_reg = mmCPU_BOOT_DEV_STS1;
+ static_loader->boot_err0_reg = mmCPU_BOOT_ERR0;
+ static_loader->boot_err1_reg = mmCPU_BOOT_ERR1;
+ static_loader->preboot_version_offset_reg = mmPREBOOT_VER_OFFSET;
+ static_loader->boot_fit_version_offset_reg = mmUBOOT_VER_OFFSET;
+ static_loader->sram_offset_mask = ~(lower_32_bits(SRAM_BASE_ADDR));
+ static_loader->cpu_reset_wait_msec = hdev->pldm ?
+ GAUDI_PLDM_RESET_WAIT_MSEC :
+ GAUDI_CPU_RESET_WAIT_MSEC;
+}
+
+static void gaudi_init_firmware_loader(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct fw_load_mgr *fw_loader = &hdev->fw_loader;
+
+ /* fill common fields */
+ fw_loader->linux_loaded = false;
+ fw_loader->boot_fit_img.image_name = GAUDI_BOOT_FIT_FILE;
+ fw_loader->linux_img.image_name = GAUDI_LINUX_FW_FILE;
+ fw_loader->cpu_timeout = GAUDI_CPU_TIMEOUT_USEC;
+ fw_loader->boot_fit_timeout = GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC;
+ fw_loader->skip_bmc = !hdev->bmc_enable;
+ fw_loader->sram_bar_id = SRAM_BAR_ID;
+ fw_loader->dram_bar_id = HBM_BAR_ID;
+
+ if (prop->dynamic_fw_load)
+ gaudi_init_dynamic_firmware_loader(hdev);
+ else
+ gaudi_init_static_firmware_loader(hdev);
}
static int gaudi_init_cpu(struct hl_device *hdev)
@@ -3744,15 +3861,10 @@ static int gaudi_init_cpu(struct hl_device *hdev)
* The device CPU works with 40 bits addresses.
* This register sets the extension to 50 bits.
*/
- if (hdev->asic_prop.fw_security_disabled)
+ if (!hdev->asic_prop.fw_security_enabled)
WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr);
- rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
- mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU,
- mmCPU_CMD_STATUS_TO_HOST,
- mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0,
- !hdev->bmc_enable, GAUDI_CPU_TIMEOUT_USEC,
- GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
+ rc = hl_fw_init_cpu(hdev);
if (rc)
return rc;
@@ -3764,10 +3876,12 @@ static int gaudi_init_cpu(struct hl_device *hdev)
static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
{
- struct gaudi_device *gaudi = hdev->asic_specific;
+ struct cpu_dyn_regs *dyn_regs =
+ &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 status, irq_handler_offset;
struct hl_eq *eq;
- u32 status;
struct hl_hw_queue *cpu_pq =
&hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
int err;
@@ -3806,7 +3920,12 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
WREG32(mmCPU_IF_QUEUE_INIT,
PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI);
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_PI_UPDATE);
+ irq_handler_offset = prop->gic_interrupts_enable ?
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
+ le32_to_cpu(dyn_regs->gic_host_pi_upd_irq);
+
+ WREG32(irq_handler_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_PI_UPDATE].cpu_id);
err = hl_poll_timeout(
hdev,
@@ -3823,8 +3942,10 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
}
/* update FW application security bits */
- if (prop->fw_security_status_valid)
- prop->fw_app_security_map = RREG32(mmCPU_BOOT_DEV_STS0);
+ if (prop->fw_cpu_boot_dev_sts0_valid)
+ prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0);
+ if (prop->fw_cpu_boot_dev_sts1_valid)
+ prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1);
gaudi->hw_cap_initialized |= HW_CAP_CPU_Q;
return 0;
@@ -3835,7 +3956,7 @@ static void gaudi_pre_hw_init(struct hl_device *hdev)
/* Perform read from the device to make sure device is up */
RREG32(mmHW_STATE);
- if (hdev->asic_prop.fw_security_disabled) {
+ if (!hdev->asic_prop.fw_security_enabled) {
/* Set the access through PCI bars (Linux driver only) as
* secured
*/
@@ -3860,13 +3981,27 @@ static void gaudi_pre_hw_init(struct hl_device *hdev)
static int gaudi_hw_init(struct hl_device *hdev)
{
+ struct gaudi_device *gaudi = hdev->asic_specific;
int rc;
gaudi_pre_hw_init(hdev);
- gaudi_init_pci_dma_qmans(hdev);
+ /* If iATU is done by FW, the HBM bar ALWAYS points to DRAM_PHYS_BASE.
+ * So we set it here and if anyone tries to move it later to
+ * a different address, there will be an error
+ */
+ if (hdev->asic_prop.iatu_done_by_fw)
+ gaudi->hbm_bar_cur_addr = DRAM_PHYS_BASE;
- gaudi_init_hbm_dma_qmans(hdev);
+ /*
+ * Before pushing u-boot/linux to device, need to set the hbm bar to
+ * base address of dram
+ */
+ if (gaudi_set_hbm_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
+ dev_err(hdev->dev,
+ "failed to map HBM bar to DRAM base address\n");
+ return -EIO;
+ }
rc = gaudi_init_cpu(hdev);
if (rc) {
@@ -3895,6 +4030,10 @@ static int gaudi_hw_init(struct hl_device *hdev)
gaudi_init_security(hdev);
+ gaudi_init_pci_dma_qmans(hdev);
+
+ gaudi_init_hbm_dma_qmans(hdev);
+
gaudi_init_mme_qmans(hdev);
gaudi_init_tpc_qmans(hdev);
@@ -3934,8 +4073,11 @@ disable_queues:
static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
{
+ struct cpu_dyn_regs *dyn_regs =
+ &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
+ u32 status, reset_timeout_ms, cpu_timeout_ms, irq_handler_offset;
struct gaudi_device *gaudi = hdev->asic_specific;
- u32 status, reset_timeout_ms, cpu_timeout_ms;
+ bool driver_performs_reset;
if (!hard_reset) {
dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n");
@@ -3950,26 +4092,35 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
}
+ driver_performs_reset = !!(!hdev->asic_prop.fw_security_enabled &&
+ !hdev->asic_prop.hard_reset_done_by_fw);
+
/* Set device to handle FLR by H/W as we will put the device CPU to
* halt mode
*/
- if (hdev->asic_prop.fw_security_disabled &&
- !hdev->asic_prop.hard_reset_done_by_fw)
+ if (driver_performs_reset)
WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK |
PCIE_AUX_FLR_CTRL_INT_MASK_MASK));
- /* I don't know what is the state of the CPU so make sure it is
- * stopped in any means necessary
+ /* If linux is loaded in the device CPU we need to communicate with it
+ * via the GIC. Otherwise, we need to use COMMS or the MSG_TO_CPU
+ * registers in case of old F/Ws
*/
- if (hdev->asic_prop.hard_reset_done_by_fw)
- WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_RST_DEV);
- else
- WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+ if (hdev->fw_loader.linux_loaded) {
+ irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
+ le32_to_cpu(dyn_regs->gic_host_halt_irq);
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
+ WREG32(irq_handler_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_HALT_MACHINE].cpu_id);
+ } else {
+ if (hdev->asic_prop.hard_reset_done_by_fw)
+ hl_fw_ask_hard_reset_without_linux(hdev);
+ else
+ hl_fw_ask_halt_machine_without_linux(hdev);
+ }
- if (hdev->asic_prop.fw_security_disabled &&
- !hdev->asic_prop.hard_reset_done_by_fw) {
+ if (driver_performs_reset) {
/* Configure the reset registers. Must be done as early as
* possible in case we fail during H/W initialization
@@ -4003,8 +4154,7 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
/* Restart BTL/BLR upon hard-reset */
- if (hdev->asic_prop.fw_security_disabled)
- WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
@@ -4041,6 +4191,8 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
HW_CAP_CLK_GATE);
memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
+
+ hdev->device_cpu_is_halted = false;
}
}
@@ -4078,10 +4230,12 @@ static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
{
+ struct cpu_dyn_regs *dyn_regs =
+ &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
+ u32 db_reg_offset, db_value, dma_qm_offset, q_off, irq_handler_offset;
struct gaudi_device *gaudi = hdev->asic_specific;
- u32 db_reg_offset, db_value, dma_qm_offset, q_off;
- int dma_id;
bool invalid_queue = false;
+ int dma_id;
switch (hw_queue_id) {
case GAUDI_QUEUE_ID_DMA_0_0...GAUDI_QUEUE_ID_DMA_0_3:
@@ -4307,164 +4461,84 @@ static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
db_reg_offset = mmTPC7_QM_PQ_PI_3;
break;
- case GAUDI_QUEUE_ID_NIC_0_0:
- db_reg_offset = mmNIC0_QM0_PQ_PI_0;
- break;
-
- case GAUDI_QUEUE_ID_NIC_0_1:
- db_reg_offset = mmNIC0_QM0_PQ_PI_1;
- break;
-
- case GAUDI_QUEUE_ID_NIC_0_2:
- db_reg_offset = mmNIC0_QM0_PQ_PI_2;
- break;
-
- case GAUDI_QUEUE_ID_NIC_0_3:
- db_reg_offset = mmNIC0_QM0_PQ_PI_3;
- break;
-
- case GAUDI_QUEUE_ID_NIC_1_0:
- db_reg_offset = mmNIC0_QM1_PQ_PI_0;
- break;
-
- case GAUDI_QUEUE_ID_NIC_1_1:
- db_reg_offset = mmNIC0_QM1_PQ_PI_1;
- break;
-
- case GAUDI_QUEUE_ID_NIC_1_2:
- db_reg_offset = mmNIC0_QM1_PQ_PI_2;
- break;
-
- case GAUDI_QUEUE_ID_NIC_1_3:
- db_reg_offset = mmNIC0_QM1_PQ_PI_3;
- break;
-
- case GAUDI_QUEUE_ID_NIC_2_0:
- db_reg_offset = mmNIC1_QM0_PQ_PI_0;
- break;
-
- case GAUDI_QUEUE_ID_NIC_2_1:
- db_reg_offset = mmNIC1_QM0_PQ_PI_1;
- break;
-
- case GAUDI_QUEUE_ID_NIC_2_2:
- db_reg_offset = mmNIC1_QM0_PQ_PI_2;
- break;
-
- case GAUDI_QUEUE_ID_NIC_2_3:
- db_reg_offset = mmNIC1_QM0_PQ_PI_3;
- break;
-
- case GAUDI_QUEUE_ID_NIC_3_0:
- db_reg_offset = mmNIC1_QM1_PQ_PI_0;
- break;
-
- case GAUDI_QUEUE_ID_NIC_3_1:
- db_reg_offset = mmNIC1_QM1_PQ_PI_1;
- break;
-
- case GAUDI_QUEUE_ID_NIC_3_2:
- db_reg_offset = mmNIC1_QM1_PQ_PI_2;
- break;
-
- case GAUDI_QUEUE_ID_NIC_3_3:
- db_reg_offset = mmNIC1_QM1_PQ_PI_3;
- break;
-
- case GAUDI_QUEUE_ID_NIC_4_0:
- db_reg_offset = mmNIC2_QM0_PQ_PI_0;
- break;
-
- case GAUDI_QUEUE_ID_NIC_4_1:
- db_reg_offset = mmNIC2_QM0_PQ_PI_1;
- break;
-
- case GAUDI_QUEUE_ID_NIC_4_2:
- db_reg_offset = mmNIC2_QM0_PQ_PI_2;
- break;
-
- case GAUDI_QUEUE_ID_NIC_4_3:
- db_reg_offset = mmNIC2_QM0_PQ_PI_3;
- break;
-
- case GAUDI_QUEUE_ID_NIC_5_0:
- db_reg_offset = mmNIC2_QM1_PQ_PI_0;
- break;
+ case GAUDI_QUEUE_ID_NIC_0_0...GAUDI_QUEUE_ID_NIC_0_3:
+ if (!(gaudi->hw_cap_initialized & HW_CAP_NIC0))
+ invalid_queue = true;
- case GAUDI_QUEUE_ID_NIC_5_1:
- db_reg_offset = mmNIC2_QM1_PQ_PI_1;
+ q_off = ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmNIC0_QM0_PQ_PI_0 + q_off;
break;
- case GAUDI_QUEUE_ID_NIC_5_2:
- db_reg_offset = mmNIC2_QM1_PQ_PI_2;
- break;
+ case GAUDI_QUEUE_ID_NIC_1_0...GAUDI_QUEUE_ID_NIC_1_3:
+ if (!(gaudi->hw_cap_initialized & HW_CAP_NIC1))
+ invalid_queue = true;
- case GAUDI_QUEUE_ID_NIC_5_3:
- db_reg_offset = mmNIC2_QM1_PQ_PI_3;
+ q_off = ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmNIC0_QM1_PQ_PI_0 + q_off;
break;
- case GAUDI_QUEUE_ID_NIC_6_0:
- db_reg_offset = mmNIC3_QM0_PQ_PI_0;
- break;
+ case GAUDI_QUEUE_ID_NIC_2_0...GAUDI_QUEUE_ID_NIC_2_3:
+ if (!(gaudi->hw_cap_initialized & HW_CAP_NIC2))
+ invalid_queue = true;
- case GAUDI_QUEUE_ID_NIC_6_1:
- db_reg_offset = mmNIC3_QM0_PQ_PI_1;
+ q_off = ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmNIC1_QM0_PQ_PI_0 + q_off;
break;
- case GAUDI_QUEUE_ID_NIC_6_2:
- db_reg_offset = mmNIC3_QM0_PQ_PI_2;
- break;
+ case GAUDI_QUEUE_ID_NIC_3_0...GAUDI_QUEUE_ID_NIC_3_3:
+ if (!(gaudi->hw_cap_initialized & HW_CAP_NIC3))
+ invalid_queue = true;
- case GAUDI_QUEUE_ID_NIC_6_3:
- db_reg_offset = mmNIC3_QM0_PQ_PI_3;
+ q_off = ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmNIC1_QM1_PQ_PI_0 + q_off;
break;
- case GAUDI_QUEUE_ID_NIC_7_0:
- db_reg_offset = mmNIC3_QM1_PQ_PI_0;
- break;
+ case GAUDI_QUEUE_ID_NIC_4_0...GAUDI_QUEUE_ID_NIC_4_3:
+ if (!(gaudi->hw_cap_initialized & HW_CAP_NIC4))
+ invalid_queue = true;
- case GAUDI_QUEUE_ID_NIC_7_1:
- db_reg_offset = mmNIC3_QM1_PQ_PI_1;
+ q_off = ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmNIC2_QM0_PQ_PI_0 + q_off;
break;
- case GAUDI_QUEUE_ID_NIC_7_2:
- db_reg_offset = mmNIC3_QM1_PQ_PI_2;
- break;
+ case GAUDI_QUEUE_ID_NIC_5_0...GAUDI_QUEUE_ID_NIC_5_3:
+ if (!(gaudi->hw_cap_initialized & HW_CAP_NIC5))
+ invalid_queue = true;
- case GAUDI_QUEUE_ID_NIC_7_3:
- db_reg_offset = mmNIC3_QM1_PQ_PI_3;
+ q_off = ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmNIC2_QM1_PQ_PI_0 + q_off;
break;
- case GAUDI_QUEUE_ID_NIC_8_0:
- db_reg_offset = mmNIC4_QM0_PQ_PI_0;
- break;
+ case GAUDI_QUEUE_ID_NIC_6_0...GAUDI_QUEUE_ID_NIC_6_3:
+ if (!(gaudi->hw_cap_initialized & HW_CAP_NIC6))
+ invalid_queue = true;
- case GAUDI_QUEUE_ID_NIC_8_1:
- db_reg_offset = mmNIC4_QM0_PQ_PI_1;
+ q_off = ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmNIC3_QM0_PQ_PI_0 + q_off;
break;
- case GAUDI_QUEUE_ID_NIC_8_2:
- db_reg_offset = mmNIC4_QM0_PQ_PI_2;
- break;
+ case GAUDI_QUEUE_ID_NIC_7_0...GAUDI_QUEUE_ID_NIC_7_3:
+ if (!(gaudi->hw_cap_initialized & HW_CAP_NIC7))
+ invalid_queue = true;
- case GAUDI_QUEUE_ID_NIC_8_3:
- db_reg_offset = mmNIC4_QM0_PQ_PI_3;
+ q_off = ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmNIC3_QM1_PQ_PI_0 + q_off;
break;
- case GAUDI_QUEUE_ID_NIC_9_0:
- db_reg_offset = mmNIC4_QM1_PQ_PI_0;
- break;
+ case GAUDI_QUEUE_ID_NIC_8_0...GAUDI_QUEUE_ID_NIC_8_3:
+ if (!(gaudi->hw_cap_initialized & HW_CAP_NIC8))
+ invalid_queue = true;
- case GAUDI_QUEUE_ID_NIC_9_1:
- db_reg_offset = mmNIC4_QM1_PQ_PI_1;
+ q_off = ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmNIC4_QM0_PQ_PI_0 + q_off;
break;
- case GAUDI_QUEUE_ID_NIC_9_2:
- db_reg_offset = mmNIC4_QM1_PQ_PI_2;
- break;
+ case GAUDI_QUEUE_ID_NIC_9_0...GAUDI_QUEUE_ID_NIC_9_3:
+ if (!(gaudi->hw_cap_initialized & HW_CAP_NIC9))
+ invalid_queue = true;
- case GAUDI_QUEUE_ID_NIC_9_3:
- db_reg_offset = mmNIC4_QM1_PQ_PI_3;
+ q_off = ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmNIC4_QM1_PQ_PI_0 + q_off;
break;
default:
@@ -4486,8 +4560,13 @@ static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ) {
/* make sure device CPU will read latest data from host */
mb();
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
- GAUDI_EVENT_PI_UPDATE);
+
+ irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
+ le32_to_cpu(dyn_regs->gic_host_pi_upd_irq);
+
+ WREG32(irq_handler_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_PI_UPDATE].cpu_id);
}
}
@@ -4934,6 +5013,7 @@ already_pinned:
return 0;
unpin_memory:
+ list_del(&userptr->job_node);
hl_unpin_host_memory(hdev, userptr);
free_userptr:
kfree(userptr);
@@ -6513,7 +6593,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
gaudi_mmu_prepare_reg(hdev, mmMME2_ACC_WBC, asid);
gaudi_mmu_prepare_reg(hdev, mmMME3_ACC_WBC, asid);
- if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC0) {
+ if (gaudi->hw_cap_initialized & HW_CAP_NIC0) {
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1,
@@ -6526,7 +6606,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
asid);
}
- if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC1) {
+ if (gaudi->hw_cap_initialized & HW_CAP_NIC1) {
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1,
@@ -6539,7 +6619,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
asid);
}
- if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC2) {
+ if (gaudi->hw_cap_initialized & HW_CAP_NIC2) {
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1,
@@ -6552,7 +6632,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
asid);
}
- if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC3) {
+ if (gaudi->hw_cap_initialized & HW_CAP_NIC3) {
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1,
@@ -6565,7 +6645,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
asid);
}
- if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC4) {
+ if (gaudi->hw_cap_initialized & HW_CAP_NIC4) {
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1,
@@ -6578,7 +6658,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
asid);
}
- if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC5) {
+ if (gaudi->hw_cap_initialized & HW_CAP_NIC5) {
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1,
@@ -6591,7 +6671,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
asid);
}
- if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC6) {
+ if (gaudi->hw_cap_initialized & HW_CAP_NIC6) {
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1,
@@ -6604,7 +6684,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
asid);
}
- if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC7) {
+ if (gaudi->hw_cap_initialized & HW_CAP_NIC7) {
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1,
@@ -6617,7 +6697,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
asid);
}
- if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC8) {
+ if (gaudi->hw_cap_initialized & HW_CAP_NIC8) {
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1,
@@ -6630,7 +6710,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
asid);
}
- if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC9) {
+ if (gaudi->hw_cap_initialized & HW_CAP_NIC9) {
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1,
@@ -7044,14 +7124,158 @@ enable_clk_gate:
return rc;
}
+/*
+ * gaudi_queue_idx_dec - decrement queue index (pi/ci) and handle wrap
+ *
+ * @idx: the current pi/ci value
+ * @q_len: the queue length (power of 2)
+ *
+ * @return the cyclically decremented index
+ */
+static inline u32 gaudi_queue_idx_dec(u32 idx, u32 q_len)
+{
+ u32 mask = q_len - 1;
+
+ /*
+ * modular decrement is equivalent to adding (queue_size -1)
+ * later we take LSBs to make sure the value is in the
+ * range [0, queue_len - 1]
+ */
+ return (idx + q_len - 1) & mask;
+}
+
+/**
+ * gaudi_print_sw_config_stream_data - print SW config stream data
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @stream: the QMAN's stream
+ * @qman_base: base address of QMAN registers block
+ */
+static void gaudi_print_sw_config_stream_data(struct hl_device *hdev, u32 stream,
+ u64 qman_base)
+{
+ u64 cq_ptr_lo, cq_ptr_hi, cq_tsize, cq_ptr;
+ u32 cq_ptr_lo_off, size;
+
+ cq_ptr_lo_off = mmTPC0_QM_CQ_PTR_LO_1 - mmTPC0_QM_CQ_PTR_LO_0;
+
+ cq_ptr_lo = qman_base + (mmTPC0_QM_CQ_PTR_LO_0 - mmTPC0_QM_BASE) +
+ stream * cq_ptr_lo_off;
+ cq_ptr_hi = cq_ptr_lo +
+ (mmTPC0_QM_CQ_PTR_HI_0 - mmTPC0_QM_CQ_PTR_LO_0);
+ cq_tsize = cq_ptr_lo +
+ (mmTPC0_QM_CQ_TSIZE_0 - mmTPC0_QM_CQ_PTR_LO_0);
+
+ cq_ptr = (((u64) RREG32(cq_ptr_hi)) << 32) | RREG32(cq_ptr_lo);
+ size = RREG32(cq_tsize);
+ dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %x\n",
+ stream, cq_ptr, size);
+}
+
+/**
+ * gaudi_print_last_pqes_on_err - print last PQEs on error
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @qid_base: first QID of the QMAN (out of 4 streams)
+ * @stream: the QMAN's stream
+ * @qman_base: base address of QMAN registers block
+ * @pr_sw_conf: if true print the SW config stream data (CQ PTR and SIZE)
+ */
+static void gaudi_print_last_pqes_on_err(struct hl_device *hdev, u32 qid_base,
+ u32 stream, u64 qman_base,
+ bool pr_sw_conf)
+{
+ u32 ci, qm_ci_stream_off, queue_len;
+ struct hl_hw_queue *q;
+ u64 pq_ci;
+ int i;
+
+ q = &hdev->kernel_queues[qid_base + stream];
+
+ qm_ci_stream_off = mmTPC0_QM_PQ_CI_1 - mmTPC0_QM_PQ_CI_0;
+ pq_ci = qman_base + (mmTPC0_QM_PQ_CI_0 - mmTPC0_QM_BASE) +
+ stream * qm_ci_stream_off;
+
+ queue_len = (q->queue_type == QUEUE_TYPE_INT) ?
+ q->int_queue_len : HL_QUEUE_LENGTH;
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ if (pr_sw_conf)
+ gaudi_print_sw_config_stream_data(hdev, stream, qman_base);
+
+ ci = RREG32(pq_ci);
+
+ /* we should start printing form ci -1 */
+ ci = gaudi_queue_idx_dec(ci, queue_len);
+
+ for (i = 0; i < PQ_FETCHER_CACHE_SIZE; i++) {
+ struct hl_bd *bd;
+ u64 addr;
+ u32 len;
+
+ bd = q->kernel_address;
+ bd += ci;
+
+ len = le32_to_cpu(bd->len);
+ /* len 0 means uninitialized entry- break */
+ if (!len)
+ break;
+
+ addr = le64_to_cpu(bd->ptr);
+
+ dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %x\n",
+ stream, ci, addr, len);
+
+ /* get previous ci, wrap if needed */
+ ci = gaudi_queue_idx_dec(ci, queue_len);
+ }
+
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+}
+
+/**
+ * print_qman_data_on_err - extract QMAN data on error
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @qid_base: first QID of the QMAN (out of 4 streams)
+ * @stream: the QMAN's stream
+ * @qman_base: base address of QMAN registers block
+ *
+ * This function attempt to exatract as much data as possible on QMAN error.
+ * On upper CP print the SW config stream data and last 8 PQEs.
+ * On lower CP print SW config data and last PQEs of ALL 4 upper CPs
+ */
+static void print_qman_data_on_err(struct hl_device *hdev, u32 qid_base,
+ u32 stream, u64 qman_base)
+{
+ u32 i;
+
+ if (stream != QMAN_STREAMS) {
+ gaudi_print_last_pqes_on_err(hdev, qid_base, stream, qman_base,
+ true);
+ return;
+ }
+
+ gaudi_print_sw_config_stream_data(hdev, stream, qman_base);
+
+ for (i = 0; i < QMAN_STREAMS; i++)
+ gaudi_print_last_pqes_on_err(hdev, qid_base, i, qman_base,
+ false);
+}
+
static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
const char *qm_name,
- u64 glbl_sts_addr,
- u64 arb_err_addr)
+ u64 qman_base,
+ u32 qid_base)
{
u32 i, j, glbl_sts_val, arb_err_val, glbl_sts_clr_val;
+ u64 glbl_sts_addr, arb_err_addr;
char reg_desc[32];
+ glbl_sts_addr = qman_base + (mmTPC0_QM_GLBL_STS1_0 - mmTPC0_QM_BASE);
+ arb_err_addr = qman_base + (mmTPC0_QM_ARB_ERR_CAUSE - mmTPC0_QM_BASE);
+
/* Iterate through all stream GLBL_STS1 registers + Lower CP */
for (i = 0 ; i < QMAN_STREAMS + 1 ; i++) {
glbl_sts_clr_val = 0;
@@ -7078,6 +7302,8 @@ static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
/* Write 1 clear errors */
if (!hdev->stop_on_err)
WREG32(glbl_sts_addr + 4 * i, glbl_sts_clr_val);
+ else
+ print_qman_data_on_err(hdev, qid_base, i, qman_base);
}
arb_err_val = RREG32(arb_err_addr);
@@ -7222,90 +7448,88 @@ static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
{
- u64 glbl_sts_addr, arb_err_addr;
- u8 index;
+ u64 qman_base;
char desc[32];
+ u32 qid_base;
+ u8 index;
switch (event_type) {
case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
index = event_type - GAUDI_EVENT_TPC0_QM;
- glbl_sts_addr =
- mmTPC0_QM_GLBL_STS1_0 + index * TPC_QMAN_OFFSET;
- arb_err_addr =
- mmTPC0_QM_ARB_ERR_CAUSE + index * TPC_QMAN_OFFSET;
+ qid_base = GAUDI_QUEUE_ID_TPC_0_0 + index * QMAN_STREAMS;
+ qman_base = mmTPC0_QM_BASE + index * TPC_QMAN_OFFSET;
snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC_QM", index);
break;
case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
index = event_type - GAUDI_EVENT_MME0_QM;
- glbl_sts_addr =
- mmMME0_QM_GLBL_STS1_0 + index * MME_QMAN_OFFSET;
- arb_err_addr =
- mmMME0_QM_ARB_ERR_CAUSE + index * MME_QMAN_OFFSET;
+ qid_base = GAUDI_QUEUE_ID_MME_0_0 + index * QMAN_STREAMS;
+ qman_base = mmMME0_QM_BASE + index * MME_QMAN_OFFSET;
snprintf(desc, ARRAY_SIZE(desc), "%s%d", "MME_QM", index);
break;
case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
index = event_type - GAUDI_EVENT_DMA0_QM;
- glbl_sts_addr =
- mmDMA0_QM_GLBL_STS1_0 + index * DMA_QMAN_OFFSET;
- arb_err_addr =
- mmDMA0_QM_ARB_ERR_CAUSE + index * DMA_QMAN_OFFSET;
+ qid_base = GAUDI_QUEUE_ID_DMA_0_0 + index * QMAN_STREAMS;
+ /* skip GAUDI_QUEUE_ID_CPU_PQ if necessary */
+ if (index > 1)
+ qid_base++;
+ qman_base = mmDMA0_QM_BASE + index * DMA_QMAN_OFFSET;
snprintf(desc, ARRAY_SIZE(desc), "%s%d", "DMA_QM", index);
break;
case GAUDI_EVENT_NIC0_QM0:
- glbl_sts_addr = mmNIC0_QM0_GLBL_STS1_0;
- arb_err_addr = mmNIC0_QM0_ARB_ERR_CAUSE;
+ qid_base = GAUDI_QUEUE_ID_NIC_0_0;
+ qman_base = mmNIC0_QM0_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC0_QM0");
break;
case GAUDI_EVENT_NIC0_QM1:
- glbl_sts_addr = mmNIC0_QM1_GLBL_STS1_0;
- arb_err_addr = mmNIC0_QM1_ARB_ERR_CAUSE;
+ qid_base = GAUDI_QUEUE_ID_NIC_1_0;
+ qman_base = mmNIC0_QM1_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC0_QM1");
break;
case GAUDI_EVENT_NIC1_QM0:
- glbl_sts_addr = mmNIC1_QM0_GLBL_STS1_0;
- arb_err_addr = mmNIC1_QM0_ARB_ERR_CAUSE;
+ qid_base = GAUDI_QUEUE_ID_NIC_2_0;
+ qman_base = mmNIC1_QM0_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC1_QM0");
break;
case GAUDI_EVENT_NIC1_QM1:
- glbl_sts_addr = mmNIC1_QM1_GLBL_STS1_0;
- arb_err_addr = mmNIC1_QM1_ARB_ERR_CAUSE;
+ qid_base = GAUDI_QUEUE_ID_NIC_3_0;
+ qman_base = mmNIC1_QM1_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC1_QM1");
break;
case GAUDI_EVENT_NIC2_QM0:
- glbl_sts_addr = mmNIC2_QM0_GLBL_STS1_0;
- arb_err_addr = mmNIC2_QM0_ARB_ERR_CAUSE;
+ qid_base = GAUDI_QUEUE_ID_NIC_4_0;
+ qman_base = mmNIC2_QM0_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC2_QM0");
break;
case GAUDI_EVENT_NIC2_QM1:
- glbl_sts_addr = mmNIC2_QM1_GLBL_STS1_0;
- arb_err_addr = mmNIC2_QM1_ARB_ERR_CAUSE;
+ qid_base = GAUDI_QUEUE_ID_NIC_5_0;
+ qman_base = mmNIC2_QM1_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC2_QM1");
break;
case GAUDI_EVENT_NIC3_QM0:
- glbl_sts_addr = mmNIC3_QM0_GLBL_STS1_0;
- arb_err_addr = mmNIC3_QM0_ARB_ERR_CAUSE;
+ qid_base = GAUDI_QUEUE_ID_NIC_6_0;
+ qman_base = mmNIC3_QM0_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC3_QM0");
break;
case GAUDI_EVENT_NIC3_QM1:
- glbl_sts_addr = mmNIC3_QM1_GLBL_STS1_0;
- arb_err_addr = mmNIC3_QM1_ARB_ERR_CAUSE;
+ qid_base = GAUDI_QUEUE_ID_NIC_7_0;
+ qman_base = mmNIC3_QM1_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC3_QM1");
break;
case GAUDI_EVENT_NIC4_QM0:
- glbl_sts_addr = mmNIC4_QM0_GLBL_STS1_0;
- arb_err_addr = mmNIC4_QM0_ARB_ERR_CAUSE;
+ qid_base = GAUDI_QUEUE_ID_NIC_8_0;
+ qman_base = mmNIC4_QM0_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC4_QM0");
break;
case GAUDI_EVENT_NIC4_QM1:
- glbl_sts_addr = mmNIC4_QM1_GLBL_STS1_0;
- arb_err_addr = mmNIC4_QM1_ARB_ERR_CAUSE;
+ qid_base = GAUDI_QUEUE_ID_NIC_9_0;
+ qman_base = mmNIC4_QM1_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC4_QM1");
break;
default:
return;
}
- gaudi_handle_qman_err_generic(hdev, desc, glbl_sts_addr, arb_err_addr);
+ gaudi_handle_qman_err_generic(hdev, desc, qman_base, qid_base);
}
static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
@@ -7332,6 +7556,16 @@ static void gaudi_print_out_of_sync_info(struct hl_device *hdev,
sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci));
}
+static void gaudi_print_fw_alive_info(struct hl_device *hdev,
+ struct hl_eq_fw_alive *fw_alive)
+{
+ dev_err(hdev->dev,
+ "FW alive report: severity=%s, process_id=%u, thread_id=%u, uptime=%llu seconds\n",
+ (fw_alive->severity == FW_ALIVE_SEVERITY_MINOR) ?
+ "Minor" : "Critical", fw_alive->process_id,
+ fw_alive->thread_id, fw_alive->uptime_seconds);
+}
+
static int gaudi_soft_reset_late_init(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
@@ -7346,11 +7580,10 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
struct hl_eq_hbm_ecc_data *hbm_ecc_data)
{
u32 base, val, val2, wr_par, rd_par, ca_par, derr, serr, type, ch;
- int err = 0;
+ int rc = 0;
- if (hdev->asic_prop.fw_security_status_valid &&
- (hdev->asic_prop.fw_app_security_map &
- CPU_BOOT_DEV_STS0_HBM_ECC_EN)) {
+ if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_HBM_ECC_EN) {
if (!hbm_ecc_data) {
dev_err(hdev->dev, "No FW ECC data");
return 0;
@@ -7379,13 +7612,10 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
device, ch, hbm_ecc_data->first_addr, type,
hbm_ecc_data->sec_cont_cnt, hbm_ecc_data->sec_cnt,
hbm_ecc_data->dec_cnt);
-
- err = 1;
-
return 0;
}
- if (!hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_enabled) {
dev_info(hdev->dev, "Cannot access MC regs for ECC data while security is enabled\n");
return 0;
}
@@ -7395,7 +7625,7 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
val = RREG32_MASK(base + ch * 0x1000 + 0x06C, 0x0000FFFF);
val = (val & 0xFF) | ((val >> 8) & 0xFF);
if (val) {
- err = 1;
+ rc = -EIO;
dev_err(hdev->dev,
"HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
device, ch * 2, val & 0x1, (val >> 1) & 0x1,
@@ -7415,7 +7645,7 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
val = RREG32_MASK(base + ch * 0x1000 + 0x07C, 0x0000FFFF);
val = (val & 0xFF) | ((val >> 8) & 0xFF);
if (val) {
- err = 1;
+ rc = -EIO;
dev_err(hdev->dev,
"HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
device, ch * 2 + 1, val & 0x1, (val >> 1) & 0x1,
@@ -7444,7 +7674,7 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
val = RREG32(base + 0x8F30);
val2 = RREG32(base + 0x8F34);
if (val | val2) {
- err = 1;
+ rc = -EIO;
dev_err(hdev->dev,
"HBM %d MC SRAM SERR info: Reg 0x8F30=0x%x, Reg 0x8F34=0x%x\n",
device, val, val2);
@@ -7452,13 +7682,13 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
val = RREG32(base + 0x8F40);
val2 = RREG32(base + 0x8F44);
if (val | val2) {
- err = 1;
+ rc = -EIO;
dev_err(hdev->dev,
"HBM %d MC SRAM DERR info: Reg 0x8F40=0x%x, Reg 0x8F44=0x%x\n",
device, val, val2);
}
- return err;
+ return rc;
}
static int gaudi_hbm_event_to_dev(u16 hbm_event_type)
@@ -7604,6 +7834,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
case GAUDI_EVENT_MMU_DERR:
+ case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR:
gaudi_print_irq_info(hdev, event_type, true);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
goto reset_device;
@@ -7786,6 +8017,11 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
goto reset_device;
+ case GAUDI_EVENT_FW_ALIVE_S:
+ gaudi_print_irq_info(hdev, event_type, false);
+ gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive);
+ goto reset_device;
+
default:
dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
event_type);
@@ -7856,52 +8092,13 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
}
static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
- bool is_hard, u32 asid, u64 va, u64 size)
+ bool is_hard, u32 flags,
+ u32 asid, u64 va, u64 size)
{
- struct gaudi_device *gaudi = hdev->asic_specific;
- u32 status, timeout_usec;
- u32 inv_data;
- u32 pi;
- int rc;
-
- if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
- hdev->hard_reset_pending)
- return 0;
-
- if (hdev->pldm)
- timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
- else
- timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
-
- /*
- * TODO: currently invalidate entire L0 & L1 as in regular hard
- * invalidation. Need to apply invalidation of specific cache
- * lines with mask of ASID & VA & size.
- * Note that L1 with be flushed entirely in any case.
+ /* Treat as invalidate all because there is no range invalidation
+ * in Gaudi
*/
-
- /* L0 & L1 invalidation */
- inv_data = RREG32(mmSTLB_CACHE_INV);
- /* PI is 8 bit */
- pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
- WREG32(mmSTLB_CACHE_INV,
- (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
-
- rc = hl_poll_timeout(
- hdev,
- mmSTLB_INV_CONSUMER_INDEX,
- status,
- status == pi,
- 1000,
- timeout_usec);
-
- if (rc) {
- dev_err_ratelimited(hdev->dev,
- "MMU cache invalidation timeout\n");
- hl_device_reset(hdev, HL_RESET_HARD);
- }
-
- return rc;
+ return hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
}
static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev,
@@ -7956,7 +8153,9 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
+ rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0,
+ mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
+ mmCPU_BOOT_ERR1);
if (rc)
return rc;
@@ -8077,7 +8276,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
for (i = 0 ; i < (NIC_NUMBER_OF_ENGINES / 2) ; i++) {
offset = i * NIC_MACRO_QMAN_OFFSET;
port = 2 * i;
- if (hdev->nic_ports_mask & BIT(port)) {
+ if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + port)) {
qm_glbl_sts0 = RREG32(mmNIC0_QM0_GLBL_STS0 + offset);
qm_cgm_sts = RREG32(mmNIC0_QM0_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
@@ -8092,7 +8291,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
}
port = 2 * i + 1;
- if (hdev->nic_ports_mask & BIT(port)) {
+ if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + port)) {
qm_glbl_sts0 = RREG32(mmNIC0_QM1_GLBL_STS0 + offset);
qm_cgm_sts = RREG32(mmNIC0_QM1_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
@@ -8306,8 +8505,10 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
HL_VA_RANGE_TYPE_HOST, HOST_SPACE_INTERNAL_CB_SZ,
HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
- if (!hdev->internal_cb_va_base)
+ if (!hdev->internal_cb_va_base) {
+ rc = -ENOMEM;
goto destroy_internal_cb_pool;
+ }
mutex_lock(&ctx->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base,
@@ -8749,7 +8950,14 @@ static int gaudi_block_mmap(struct hl_device *hdev,
static void gaudi_enable_events_from_fw(struct hl_device *hdev)
{
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
+ struct cpu_dyn_regs *dyn_regs =
+ &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
+ u32 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
+ le32_to_cpu(dyn_regs->gic_host_ints_irq);
+
+ WREG32(irq_handler_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_INTS_REGISTER].cpu_id);
}
static int gaudi_map_pll_idx_to_fw_idx(u32 pll_idx)
@@ -8834,7 +9042,6 @@ static const struct hl_asic_funcs gaudi_funcs = {
.ctx_fini = gaudi_ctx_fini,
.get_clk_rate = gaudi_get_clk_rate,
.get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
- .read_device_fw_version = gaudi_read_device_fw_version,
.load_firmware_to_device = gaudi_load_firmware_to_device,
.load_boot_fit_to_device = gaudi_load_boot_fit_to_device,
.get_signal_cb_size = gaudi_get_signal_cb_size,
@@ -8853,7 +9060,9 @@ static const struct hl_asic_funcs gaudi_funcs = {
.get_hw_block_id = gaudi_get_hw_block_id,
.hw_block_mmap = gaudi_block_mmap,
.enable_events_from_fw = gaudi_enable_events_from_fw,
- .map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx
+ .map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx,
+ .init_firmware_loader = gaudi_init_firmware_loader,
+ .init_cpu_scrambler_dram = gaudi_init_scrambler_hbm
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index 5929be81ec23..957bf3720f70 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -82,6 +82,7 @@
QMAN_STREAMS)
#define QMAN_STREAMS 4
+#define PQ_FETCHER_CACHE_SIZE 8
#define DMA_QMAN_OFFSET (mmDMA1_QM_BASE - mmDMA0_QM_BASE)
#define TPC_QMAN_OFFSET (mmTPC1_QM_BASE - mmTPC0_QM_BASE)
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 6e56fa1c6c69..c2a27ed1c4d1 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -424,7 +424,7 @@ static int gaudi_config_stm(struct hl_device *hdev,
if (frequency == 0)
frequency = input->frequency;
WREG32(base_reg + 0xE8C, frequency);
- WREG32(base_reg + 0xE90, 0x7FF);
+ WREG32(base_reg + 0xE90, 0x1F00);
/* SW-2176 - SW WA for HW bug */
if ((CFG_BASE + base_reg) >= mmDMA_CH_0_CS_STM_BASE &&
@@ -434,7 +434,7 @@ static int gaudi_config_stm(struct hl_device *hdev,
WREG32(base_reg + 0xE6C, 0x0);
}
- WREG32(base_reg + 0xE80, 0x27 | (input->id << 16));
+ WREG32(base_reg + 0xE80, 0x23 | (input->id << 16));
} else {
WREG32(base_reg + 0xE80, 4);
WREG32(base_reg + 0xD64, 0);
@@ -634,7 +634,7 @@ static int gaudi_config_etr(struct hl_device *hdev,
WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
WREG32(mmPSOC_ETR_MODE, input->sink_mode);
- if (hdev->asic_prop.fw_security_disabled) {
+ if (!hdev->asic_prop.fw_security_enabled) {
/* make ETR not privileged */
val = FIELD_PREP(
PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK, 0);
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index 9a706c5980ef..0d3240f1f7d7 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -1448,7 +1448,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
u32 pb_addr, mask;
u8 word_offset;
- if (hdev->asic_prop.fw_security_disabled) {
+ if (!hdev->asic_prop.fw_security_enabled) {
gaudi_pb_set_block(hdev, mmDMA_IF_E_S_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH0_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH1_BASE);
@@ -9135,7 +9135,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
u32 pb_addr, mask;
u8 word_offset;
- if (hdev->asic_prop.fw_security_disabled) {
+ if (!hdev->asic_prop.fw_security_enabled) {
gaudi_pb_set_block(hdev, mmTPC0_E2E_CRED_BASE);
gaudi_pb_set_block(hdev, mmTPC1_E2E_CRED_BASE);
gaudi_pb_set_block(hdev, mmTPC2_E2E_CRED_BASE);
@@ -12818,7 +12818,7 @@ static void gaudi_init_protection_bits(struct hl_device *hdev)
* secured
*/
- if (hdev->asic_prop.fw_security_disabled) {
+ if (!hdev->asic_prop.fw_security_enabled) {
gaudi_pb_set_block(hdev, mmIF_E_PLL_BASE);
gaudi_pb_set_block(hdev, mmMESH_W_PLL_BASE);
gaudi_pb_set_block(hdev, mmSRAM_W_PLL_BASE);
@@ -13023,7 +13023,7 @@ void gaudi_init_security(struct hl_device *hdev)
* property configuration of MME SBAB and ACC to be non-privileged and
* non-secured
*/
- if (hdev->asic_prop.fw_security_disabled) {
+ if (!hdev->asic_prop.fw_security_enabled) {
WREG32(mmMME0_SBAB_PROT, 0x2);
WREG32(mmMME0_ACC_PROT, 0x2);
WREG32(mmMME1_SBAB_PROT, 0x2);
@@ -13032,11 +13032,12 @@ void gaudi_init_security(struct hl_device *hdev)
WREG32(mmMME2_ACC_PROT, 0x2);
WREG32(mmMME3_SBAB_PROT, 0x2);
WREG32(mmMME3_ACC_PROT, 0x2);
- }
- /* On RAZWI, 0 will be returned from RR and 0xBABA0BAD from PB */
- if (hdev->asic_prop.fw_security_disabled)
+ /*
+ * On RAZWI, 0 will be returned from RR and 0xBABA0BAD from PB
+ */
WREG32(0xC01B28, 0x1);
+ }
gaudi_init_range_registers_lbw(hdev);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index e0ad2a269779..755e08cf2ecc 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -87,6 +87,7 @@
#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
#define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
+#define GOYA_WAIT_FOR_BL_TIMEOUT_USEC 15000000 /* 15s */
#define GOYA_QMAN0_FENCE_VAL 0xD169B243
@@ -354,7 +355,7 @@ static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
-int goya_get_fixed_properties(struct hl_device *hdev)
+int goya_set_fixed_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i;
@@ -460,8 +461,10 @@ int goya_get_fixed_properties(struct hl_device *hdev)
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
- prop->fw_security_status_valid = false;
+ prop->fw_cpu_boot_dev_sts0_valid = false;
+ prop->fw_cpu_boot_dev_sts1_valid = false;
prop->hard_reset_done_by_fw = false;
+ prop->gic_interrupts_enable = true;
return 0;
}
@@ -531,10 +534,8 @@ static int goya_init_iatu(struct hl_device *hdev)
struct hl_outbound_pci_region outbound_region;
int rc;
- if (hdev->asic_prop.iatu_done_by_fw) {
- hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+ if (hdev->asic_prop.iatu_done_by_fw)
return 0;
- }
/* Inbound Region 0 - Bar 0 - Point to SRAM and CFG */
inbound_region.mode = PCI_BAR_MATCH_MODE;
@@ -586,7 +587,7 @@ static int goya_early_init(struct hl_device *hdev)
u32 fw_boot_status, val;
int rc;
- rc = goya_get_fixed_properties(hdev);
+ rc = goya_set_fixed_properties(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to get fixed properties\n");
return rc;
@@ -618,7 +619,7 @@ static int goya_early_init(struct hl_device *hdev)
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
/* If FW security is enabled at this point it means no access to ELBI */
- if (!hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_enabled) {
hdev->asic_prop.iatu_done_by_fw = true;
goto pci_init;
}
@@ -642,8 +643,10 @@ pci_init:
* version to determine whether we run with a security-enabled firmware
*/
rc = hl_fw_read_preboot_status(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
- mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0,
- GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
+ mmCPU_BOOT_DEV_STS0,
+ mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
+ mmCPU_BOOT_ERR1,
+ GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
if (rc) {
if (hdev->reset_on_preboot_fail)
hdev->asic_funcs->hw_fini(hdev, true);
@@ -723,7 +726,15 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
int rc;
- if (hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_enabled) {
+ rc = hl_fw_cpucp_pll_info_get(hdev, HL_GOYA_PCI_PLL,
+ pll_freq_arr);
+
+ if (rc)
+ return;
+
+ freq = pll_freq_arr[1];
+ } else {
div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
nr = RREG32(mmPSOC_PCI_PLL_NR);
@@ -750,14 +761,6 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
div_sel);
freq = 0;
}
- } else {
- rc = hl_fw_cpucp_pll_info_get(hdev, HL_GOYA_PCI_PLL,
- pll_freq_arr);
-
- if (rc)
- return;
-
- freq = pll_freq_arr[1];
}
prop->psoc_timestamp_frequency = freq;
@@ -849,6 +852,39 @@ void goya_late_fini(struct hl_device *hdev)
hdev->hl_chip_info->info = NULL;
}
+static void goya_set_pci_memory_regions(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pci_mem_region *region;
+
+ /* CFG */
+ region = &hdev->pci_mem_region[PCI_REGION_CFG];
+ region->region_base = CFG_BASE;
+ region->region_size = CFG_SIZE;
+ region->offset_in_bar = CFG_BASE - SRAM_BASE_ADDR;
+ region->bar_size = CFG_BAR_SIZE;
+ region->bar_id = SRAM_CFG_BAR_ID;
+ region->used = 1;
+
+ /* SRAM */
+ region = &hdev->pci_mem_region[PCI_REGION_SRAM];
+ region->region_base = SRAM_BASE_ADDR;
+ region->region_size = SRAM_SIZE;
+ region->offset_in_bar = 0;
+ region->bar_size = CFG_BAR_SIZE;
+ region->bar_id = SRAM_CFG_BAR_ID;
+ region->used = 1;
+
+ /* DRAM */
+ region = &hdev->pci_mem_region[PCI_REGION_DRAM];
+ region->region_base = DRAM_PHYS_BASE;
+ region->region_size = hdev->asic_prop.dram_size;
+ region->offset_in_bar = 0;
+ region->bar_size = prop->dram_pci_bar_size;
+ region->bar_id = DDR_BAR_ID;
+ region->used = 1;
+}
+
/*
* goya_sw_init - Goya software initialization code
*
@@ -918,6 +954,9 @@ static int goya_sw_init(struct hl_device *hdev)
spin_lock_init(&goya->hw_queues_lock);
hdev->supports_coresight = true;
hdev->supports_soft_reset = true;
+ hdev->allow_external_soft_reset = true;
+
+ goya_set_pci_memory_regions(hdev);
return 0;
@@ -1263,8 +1302,11 @@ int goya_init_cpu_queues(struct hl_device *hdev)
}
/* update FW application security bits */
- if (prop->fw_security_status_valid)
- prop->fw_app_security_map = RREG32(mmCPU_BOOT_DEV_STS0);
+ if (prop->fw_cpu_boot_dev_sts0_valid)
+ prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0);
+
+ if (prop->fw_cpu_boot_dev_sts1_valid)
+ prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1);
goya->hw_cap_initialized |= HW_CAP_CPU_Q;
return 0;
@@ -2402,47 +2444,67 @@ static int goya_load_boot_fit_to_device(struct hl_device *hdev)
return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst, 0, 0);
}
-/*
- * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
- * The version string should be located by that offset.
- */
-static int goya_read_device_fw_version(struct hl_device *hdev,
- enum hl_fw_component fwc)
-{
- const char *name;
- u32 ver_off;
- char *dest;
-
- switch (fwc) {
- case FW_COMP_UBOOT:
- ver_off = RREG32(mmUBOOT_VER_OFFSET);
- dest = hdev->asic_prop.uboot_ver;
- name = "U-Boot";
- break;
- case FW_COMP_PREBOOT:
- ver_off = RREG32(mmPREBOOT_VER_OFFSET);
- dest = hdev->asic_prop.preboot_ver;
- name = "Preboot";
- break;
- default:
- dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
- return -EIO;
- }
+static void goya_init_dynamic_firmware_loader(struct hl_device *hdev)
+{
+ struct dynamic_fw_load_mgr *dynamic_loader;
+ struct cpu_dyn_regs *dyn_regs;
- ver_off &= ~((u32)SRAM_BASE_ADDR);
+ dynamic_loader = &hdev->fw_loader.dynamic_loader;
- if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
- memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
- VERSION_MAX_LEN);
- } else {
- dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
- name, ver_off);
- strcpy(dest, "unavailable");
+ /*
+ * here we update initial values for few specific dynamic regs (as
+ * before reading the first descriptor from FW those value has to be
+ * hard-coded) in later stages of the protocol those values will be
+ * updated automatically by reading the FW descriptor so data there
+ * will always be up-to-date
+ */
+ dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs;
+ dyn_regs->kmd_msg_to_cpu =
+ cpu_to_le32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU);
+ dyn_regs->cpu_cmd_status_to_host =
+ cpu_to_le32(mmCPU_CMD_STATUS_TO_HOST);
- return -EIO;
- }
+ dynamic_loader->wait_for_bl_timeout = GOYA_WAIT_FOR_BL_TIMEOUT_USEC;
+}
- return 0;
+static void goya_init_static_firmware_loader(struct hl_device *hdev)
+{
+ struct static_fw_load_mgr *static_loader;
+
+ static_loader = &hdev->fw_loader.static_loader;
+
+ static_loader->preboot_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
+ static_loader->boot_fit_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
+ static_loader->kmd_msg_to_cpu_reg = mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU;
+ static_loader->cpu_cmd_status_to_host_reg = mmCPU_CMD_STATUS_TO_HOST;
+ static_loader->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
+ static_loader->cpu_boot_dev_status0_reg = mmCPU_BOOT_DEV_STS0;
+ static_loader->cpu_boot_dev_status1_reg = mmCPU_BOOT_DEV_STS1;
+ static_loader->boot_err0_reg = mmCPU_BOOT_ERR0;
+ static_loader->boot_err1_reg = mmCPU_BOOT_ERR1;
+ static_loader->preboot_version_offset_reg = mmPREBOOT_VER_OFFSET;
+ static_loader->boot_fit_version_offset_reg = mmUBOOT_VER_OFFSET;
+ static_loader->sram_offset_mask = ~(lower_32_bits(SRAM_BASE_ADDR));
+}
+
+static void goya_init_firmware_loader(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct fw_load_mgr *fw_loader = &hdev->fw_loader;
+
+ /* fill common fields */
+ fw_loader->boot_fit_img.image_name = GOYA_BOOT_FIT_FILE;
+ fw_loader->linux_img.image_name = GOYA_LINUX_FW_FILE;
+ fw_loader->cpu_timeout = GOYA_CPU_TIMEOUT_USEC;
+ fw_loader->boot_fit_timeout = GOYA_BOOT_FIT_REQ_TIMEOUT_USEC;
+ fw_loader->skip_bmc = false;
+ fw_loader->sram_bar_id = SRAM_CFG_BAR_ID;
+ fw_loader->dram_bar_id = DDR_BAR_ID;
+
+ if (prop->dynamic_fw_load)
+ goya_init_dynamic_firmware_loader(hdev);
+ else
+ goya_init_static_firmware_loader(hdev);
}
static int goya_init_cpu(struct hl_device *hdev)
@@ -2466,12 +2528,7 @@ static int goya_init_cpu(struct hl_device *hdev)
return -EIO;
}
- rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
- mmPSOC_GLOBAL_CONF_UBOOT_MAGIC,
- mmCPU_CMD_STATUS_TO_HOST,
- mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0,
- false, GOYA_CPU_TIMEOUT_USEC,
- GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
+ rc = hl_fw_init_cpu(hdev);
if (rc)
return rc;
@@ -2881,7 +2938,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
*dma_handle = hdev->asic_prop.sram_base_address;
- base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
+ base = (__force void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
switch (queue_id) {
case GOYA_QUEUE_ID_MME:
@@ -3270,6 +3327,7 @@ already_pinned:
return 0;
unpin_memory:
+ list_del(&userptr->job_node);
hl_unpin_host_memory(hdev, userptr);
free_userptr:
kfree(userptr);
@@ -5169,54 +5227,13 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
}
static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
- bool is_hard, u32 asid, u64 va, u64 size)
+ bool is_hard, u32 flags,
+ u32 asid, u64 va, u64 size)
{
- struct goya_device *goya = hdev->asic_specific;
- u32 status, timeout_usec, inv_data, pi;
- int rc;
-
- if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
- hdev->hard_reset_pending)
- return 0;
-
- /* no need in L1 only invalidation in Goya */
- if (!is_hard)
- return 0;
-
- if (hdev->pldm)
- timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
- else
- timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
-
- /*
- * TODO: currently invalidate entire L0 & L1 as in regular hard
- * invalidation. Need to apply invalidation of specific cache lines with
- * mask of ASID & VA & size.
- * Note that L1 with be flushed entirely in any case.
+ /* Treat as invalidate all because there is no range invalidation
+ * in Goya
*/
-
- /* L0 & L1 invalidation */
- inv_data = RREG32(mmSTLB_CACHE_INV);
- /* PI is 8 bit */
- pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
- WREG32(mmSTLB_CACHE_INV,
- (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
-
- rc = hl_poll_timeout(
- hdev,
- mmSTLB_INV_CONSUMER_INDEX,
- status,
- status == pi,
- 1000,
- timeout_usec);
-
- if (rc) {
- dev_err_ratelimited(hdev->dev,
- "MMU cache invalidation timeout\n");
- hl_device_reset(hdev, HL_RESET_HARD);
- }
-
- return rc;
+ return hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
}
int goya_send_heartbeat(struct hl_device *hdev)
@@ -5239,7 +5256,9 @@ int goya_cpucp_info_get(struct hl_device *hdev)
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
+ rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0,
+ mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
+ mmCPU_BOOT_ERR1);
if (rc)
return rc;
@@ -5385,6 +5404,11 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
return hl_fw_get_eeprom_data(hdev, data, max_size);
}
+static void goya_cpu_init_scrambler_dram(struct hl_device *hdev)
+{
+
+}
+
static int goya_ctx_init(struct hl_ctx *ctx)
{
if (ctx->asid != HL_KERNEL_ASID_ID)
@@ -5565,7 +5589,6 @@ static const struct hl_asic_funcs goya_funcs = {
.ctx_fini = goya_ctx_fini,
.get_clk_rate = goya_get_clk_rate,
.get_queue_id_for_cq = goya_get_queue_id_for_cq,
- .read_device_fw_version = goya_read_device_fw_version,
.load_firmware_to_device = goya_load_firmware_to_device,
.load_boot_fit_to_device = goya_load_boot_fit_to_device,
.get_signal_cb_size = goya_get_signal_cb_size,
@@ -5584,7 +5607,9 @@ static const struct hl_asic_funcs goya_funcs = {
.get_hw_block_id = goya_get_hw_block_id,
.hw_block_mmap = goya_block_mmap,
.enable_events_from_fw = goya_enable_events_from_fw,
- .map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx
+ .map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx,
+ .init_firmware_loader = goya_init_firmware_loader,
+ .init_cpu_scrambler_dram = goya_cpu_init_scrambler_dram
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index ef8c6c8b5c8d..0b05da614729 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -168,7 +168,7 @@ struct goya_device {
u8 device_cpu_mmu_mappings_done;
};
-int goya_get_fixed_properties(struct hl_device *hdev);
+int goya_set_fixed_properties(struct hl_device *hdev);
int goya_mmu_init(struct hl_device *hdev);
void goya_init_dma_qmans(struct hl_device *hdev);
void goya_init_mme_qmans(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index 6b7445cca580..c55c100fdd24 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -434,7 +434,7 @@ static int goya_config_etr(struct hl_device *hdev,
WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
WREG32(mmPSOC_ETR_MODE, input->sink_mode);
- if (hdev->asic_prop.fw_security_disabled) {
+ if (!hdev->asic_prop.fw_security_enabled) {
/* make ETR not privileged */
val = FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK, 0);
/* make ETR non-secured (inverted logic) */
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 27cd0ba99aa3..80b1d5a9d9f1 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -84,6 +84,20 @@ struct hl_eq_sm_sei_data {
__u8 pad[3];
};
+enum hl_fw_alive_severity {
+ FW_ALIVE_SEVERITY_MINOR,
+ FW_ALIVE_SEVERITY_CRITICAL
+};
+
+struct hl_eq_fw_alive {
+ __le64 uptime_seconds;
+ __le32 process_id;
+ __le32 thread_id;
+ /* enum hl_fw_alive_severity */
+ __u8 severity;
+ __u8 pad[7];
+};
+
struct hl_eq_entry {
struct hl_eq_header hdr;
union {
@@ -91,6 +105,7 @@ struct hl_eq_entry {
struct hl_eq_hbm_ecc_data hbm_ecc_data;
struct hl_eq_sm_sei_data sm_sei_data;
struct cpucp_pkt_sync_err pkt_sync_err;
+ struct hl_eq_fw_alive fw_alive;
__le64 data[7];
};
};
@@ -103,11 +118,16 @@ struct hl_eq_entry {
#define EQ_CTL_EVENT_TYPE_SHIFT 16
#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000
+#define EQ_CTL_INDEX_SHIFT 0
+#define EQ_CTL_INDEX_MASK 0x0000FFFF
+
enum pq_init_status {
PQ_INIT_STATUS_NA = 0,
PQ_INIT_STATUS_READY_FOR_CP,
PQ_INIT_STATUS_READY_FOR_HOST,
- PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI
+ PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI,
+ PQ_INIT_STATUS_LEN_NOT_POWER_OF_TWO_ERR,
+ PQ_INIT_STATUS_ILLEGAL_Q_ADDR_ERR
};
/*
@@ -384,6 +404,20 @@ enum cpucp_packet_id {
#define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48
#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull
+#define CPUCP_PKT_VAL_PFC_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_PFC_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_PFC_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_PFC_IN2_MASK 0x000000000000001Eull
+
+#define CPUCP_PKT_VAL_LPBK_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_LPBK_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_LPBK_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_LPBK_IN2_MASK 0x000000000000001Eull
+
+/* heartbeat status bits */
+#define CPUCP_PKT_HB_STATUS_EQ_FAULT_SHIFT 0
+#define CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK 0x00000001
+
struct cpucp_packet {
union {
__le64 value; /* For SET packets */
@@ -425,6 +459,12 @@ struct cpucp_packet {
/* For get CpuCP info/EEPROM data/NIC info */
__le32 data_max_size;
+
+ /*
+ * For any general status bitmask. Shall be used whenever the
+ * result cannot be used to hold general purpose data.
+ */
+ __le32 status_mask;
};
__le32 reserved;
@@ -629,6 +669,8 @@ struct cpucp_security_info {
* @card_name: card name that will be displayed in HWMON subsystem on the host
* @sec_info: security information
* @pll_map: Bit map of supported PLLs for current ASIC version.
+ * @mme_binning_mask: MME binning mask,
+ * (0 = functional, 1 = binned)
*/
struct cpucp_info {
struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
@@ -651,6 +693,7 @@ struct cpucp_info {
struct cpucp_security_info sec_info;
__le32 reserved6;
__u8 pll_map[PLL_MAP_LEN];
+ __le64 mme_binning_mask;
};
struct cpucp_mac_addr {
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index e0a259e0495c..fa8a5ad2d438 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -8,7 +8,7 @@
#ifndef HL_BOOT_IF_H
#define HL_BOOT_IF_H
-#define LKD_HARD_RESET_MAGIC 0xED7BD694
+#define LKD_HARD_RESET_MAGIC 0xED7BD694 /* deprecated - do not use */
#define HL_POWER9_HOST_MAGIC 0x1DA30009
#define BOOT_FIT_SRAM_OFFSET 0x200000
@@ -99,6 +99,7 @@
#define CPU_BOOT_ERR0_PLL_FAIL (1 << 12)
#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << 13)
#define CPU_BOOT_ERR0_ENABLED (1 << 31)
+#define CPU_BOOT_ERR1_ENABLED (1 << 31)
/*
* BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers
@@ -190,6 +191,24 @@
* PLLs.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN GIC access permission only from
+ * previleged entity. FW sets this status
+ * bit for host. If this bit is set then
+ * GIC can not be accessed from host.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_EQ_INDEX_EN Event Queue (EQ) index is a running
+ * index for each new event sent to host.
+ * This is used as a method in host to
+ * identify that the waiting event in
+ * queue is actually a new event which
+ * was not served before.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN Use multiple scratchpad interfaces to
+ * prevent IRQs overriding each other.
+ * Initialized in: linux
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -218,7 +237,11 @@
#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << 16)
#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << 17)
#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << 19)
+#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << 20)
+#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << 21)
+#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << 22)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
+#define CPU_BOOT_DEV_STS1_ENABLED (1 << 31)
enum cpu_boot_status {
CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
@@ -264,46 +287,98 @@ enum cpu_msg_status {
/* communication registers mapping - consider ABI when changing */
struct cpu_dyn_regs {
- uint32_t cpu_pq_base_addr_low;
- uint32_t cpu_pq_base_addr_high;
- uint32_t cpu_pq_length;
- uint32_t cpu_pq_init_status;
- uint32_t cpu_eq_base_addr_low;
- uint32_t cpu_eq_base_addr_high;
- uint32_t cpu_eq_length;
- uint32_t cpu_eq_ci;
- uint32_t cpu_cq_base_addr_low;
- uint32_t cpu_cq_base_addr_high;
- uint32_t cpu_cq_length;
- uint32_t cpu_pf_pq_pi;
- uint32_t cpu_boot_dev_sts0;
- uint32_t cpu_boot_dev_sts1;
- uint32_t cpu_boot_err0;
- uint32_t cpu_boot_err1;
- uint32_t cpu_boot_status;
- uint32_t fw_upd_sts;
- uint32_t fw_upd_cmd;
- uint32_t fw_upd_pending_sts;
- uint32_t fuse_ver_offset;
- uint32_t preboot_ver_offset;
- uint32_t uboot_ver_offset;
- uint32_t hw_state;
- uint32_t kmd_msg_to_cpu;
- uint32_t cpu_cmd_status_to_host;
- uint32_t reserved1[32]; /* reserve for future use */
+ __le32 cpu_pq_base_addr_low;
+ __le32 cpu_pq_base_addr_high;
+ __le32 cpu_pq_length;
+ __le32 cpu_pq_init_status;
+ __le32 cpu_eq_base_addr_low;
+ __le32 cpu_eq_base_addr_high;
+ __le32 cpu_eq_length;
+ __le32 cpu_eq_ci;
+ __le32 cpu_cq_base_addr_low;
+ __le32 cpu_cq_base_addr_high;
+ __le32 cpu_cq_length;
+ __le32 cpu_pf_pq_pi;
+ __le32 cpu_boot_dev_sts0;
+ __le32 cpu_boot_dev_sts1;
+ __le32 cpu_boot_err0;
+ __le32 cpu_boot_err1;
+ __le32 cpu_boot_status;
+ __le32 fw_upd_sts;
+ __le32 fw_upd_cmd;
+ __le32 fw_upd_pending_sts;
+ __le32 fuse_ver_offset;
+ __le32 preboot_ver_offset;
+ __le32 uboot_ver_offset;
+ __le32 hw_state;
+ __le32 kmd_msg_to_cpu;
+ __le32 cpu_cmd_status_to_host;
+ union {
+ __le32 gic_host_irq_ctrl;
+ __le32 gic_host_pi_upd_irq;
+ };
+ __le32 gic_tpc_qm_irq_ctrl;
+ __le32 gic_mme_qm_irq_ctrl;
+ __le32 gic_dma_qm_irq_ctrl;
+ __le32 gic_nic_qm_irq_ctrl;
+ __le32 gic_dma_core_irq_ctrl;
+ __le32 gic_host_halt_irq;
+ __le32 gic_host_ints_irq;
+ __le32 reserved1[24]; /* reserve for future use */
};
+/* TODO: remove the desc magic after the code is updated to use message */
/* HCDM - Habana Communications Descriptor Magic */
#define HL_COMMS_DESC_MAGIC 0x4843444D
#define HL_COMMS_DESC_VER 1
+/* HCMv - Habana Communications Message + header version */
+#define HL_COMMS_MSG_MAGIC_VALUE 0x48434D00
+#define HL_COMMS_MSG_MAGIC_MASK 0xFFFFFF00
+#define HL_COMMS_MSG_MAGIC_VER_MASK 0xFF
+
+#define HL_COMMS_MSG_MAGIC_VER(ver) (HL_COMMS_MSG_MAGIC_VALUE | \
+ ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
+#define HL_COMMS_MSG_MAGIC_V0 HL_COMMS_DESC_MAGIC
+#define HL_COMMS_MSG_MAGIC_V1 HL_COMMS_MSG_MAGIC_VER(1)
+
+#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V1
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC(magic) \
+ (((magic) & HL_COMMS_MSG_MAGIC_MASK) == \
+ HL_COMMS_MSG_MAGIC_VALUE)
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE_VERSION(magic, ver) \
+ (((magic) & HL_COMMS_MSG_MAGIC_VER_MASK) >= \
+ ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE(magic, ver) \
+ (HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC((magic)) && \
+ HL_COMMS_MSG_MAGIC_VALIDATE_VERSION((magic), (ver)))
+
+enum comms_msg_type {
+ HL_COMMS_DESC_TYPE = 0,
+ HL_COMMS_RESET_CAUSE_TYPE = 1,
+};
+
+/* TODO: remove this struct after the code is updated to use message */
/* this is the comms descriptor header - meta data */
struct comms_desc_header {
- uint32_t magic; /* magic for validation */
- uint32_t crc32; /* CRC32 of the descriptor w/o header */
- uint16_t size; /* size of the descriptor w/o header */
- uint8_t version; /* descriptor version */
- uint8_t reserved[5]; /* pad to 64 bit */
+ __le32 magic; /* magic for validation */
+ __le32 crc32; /* CRC32 of the descriptor w/o header */
+ __le16 size; /* size of the descriptor w/o header */
+ __u8 version; /* descriptor version */
+ __u8 reserved[5]; /* pad to 64 bit */
+};
+
+/* this is the comms message header - meta data */
+struct comms_msg_header {
+ __le32 magic; /* magic for validation */
+ __le32 crc32; /* CRC32 of the message w/o header */
+ __le16 size; /* size of the message w/o header */
+ __u8 version; /* message payload version */
+ __u8 type; /* message type */
+ __u8 reserved[4]; /* pad to 64 bit */
};
/* this is the main FW descriptor - consider ABI when changing */
@@ -314,7 +389,36 @@ struct lkd_fw_comms_desc {
char cur_fw_ver[VERSION_MAX_LEN];
/* can be used for 1 more version w/o ABI change */
char reserved0[VERSION_MAX_LEN];
- uint64_t img_addr; /* address for next FW component load */
+ __le64 img_addr; /* address for next FW component load */
+};
+
+enum comms_reset_cause {
+ HL_RESET_CAUSE_UNKNOWN = 0,
+ HL_RESET_CAUSE_HEARTBEAT = 1,
+ HL_RESET_CAUSE_TDR = 2,
+};
+
+/* TODO: remove define after struct name is aligned on all projects */
+#define lkd_msg_comms lkd_fw_comms_msg
+
+/* this is the comms message descriptor */
+struct lkd_fw_comms_msg {
+ struct comms_msg_header header;
+ /* union for future expantions of new messages */
+ union {
+ struct {
+ struct cpu_dyn_regs cpu_dyn_regs;
+ char fuse_ver[VERSION_MAX_LEN];
+ char cur_fw_ver[VERSION_MAX_LEN];
+ /* can be used for 1 more version w/o ABI change */
+ char reserved0[VERSION_MAX_LEN];
+ /* address for next FW component load */
+ __le64 img_addr;
+ };
+ struct {
+ __u8 reset_cause;
+ };
+ };
};
/*
@@ -386,11 +490,11 @@ enum comms_cmd {
struct comms_command {
union { /* bit fields are only for FW use */
struct {
- unsigned int size :25; /* 32MB max. */
- unsigned int reserved :2;
+ u32 size :25; /* 32MB max. */
+ u32 reserved :2;
enum comms_cmd cmd :5; /* 32 commands */
};
- unsigned int val;
+ __le32 val;
};
};
@@ -449,11 +553,11 @@ enum comms_ram_types {
struct comms_status {
union { /* bit fields are only for FW use */
struct {
- unsigned int offset :26;
- unsigned int ram_type :2;
+ u32 offset :26;
+ enum comms_ram_types ram_type :2;
enum comms_sts status :4; /* 16 statuses */
};
- unsigned int val;
+ __le32 val;
};
};
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
index e8651abf84f2..d966bd4dfea6 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
@@ -252,10 +252,11 @@ enum gaudi_async_event_id {
GAUDI_EVENT_HBM3_SPI_0 = 407,
GAUDI_EVENT_HBM3_SPI_1 = 408,
GAUDI_EVENT_PSOC_GPIO_U16_0 = 421,
- GAUDI_EVENT_PI_UPDATE = 484,
- GAUDI_EVENT_HALT_MACHINE = 485,
- GAUDI_EVENT_INTS_REGISTER = 486,
- GAUDI_EVENT_SOFT_RESET = 487,
+ GAUDI_EVENT_NIC0_CS_DBG_DERR = 483,
+ GAUDI_EVENT_NIC1_CS_DBG_DERR = 487,
+ GAUDI_EVENT_NIC2_CS_DBG_DERR = 491,
+ GAUDI_EVENT_NIC3_CS_DBG_DERR = 495,
+ GAUDI_EVENT_NIC4_CS_DBG_DERR = 499,
GAUDI_EVENT_RAZWI_OR_ADC = 548,
GAUDI_EVENT_TPC0_QM = 572,
GAUDI_EVENT_TPC1_QM = 573,
@@ -303,6 +304,11 @@ enum gaudi_async_event_id {
GAUDI_EVENT_NIC3_QP1 = 619,
GAUDI_EVENT_NIC4_QP0 = 620,
GAUDI_EVENT_NIC4_QP1 = 621,
+ GAUDI_EVENT_PI_UPDATE = 635,
+ GAUDI_EVENT_HALT_MACHINE = 636,
+ GAUDI_EVENT_INTS_REGISTER = 637,
+ GAUDI_EVENT_SOFT_RESET = 638,
+ GAUDI_EVENT_FW_ALIVE_S = 645,
GAUDI_EVENT_DEV_RESET_REQ = 646,
GAUDI_EVENT_PKT_QUEUE_OUT_SYNC = 647,
GAUDI_EVENT_FIX_POWER_ENV_S = 658,
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
index 3dc79c131805..479b6b038254 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
@@ -507,23 +507,28 @@ static struct gaudi_async_events_ids_map gaudi_irq_map_table[] = {
{ .fc_id = 480, .cpu_id = 329, .valid = 0, .name = "" },
{ .fc_id = 481, .cpu_id = 330, .valid = 0, .name = "" },
{ .fc_id = 482, .cpu_id = 331, .valid = 0, .name = "" },
- { .fc_id = 483, .cpu_id = 332, .valid = 0, .name = "" },
- { .fc_id = 484, .cpu_id = 333, .valid = 1, .name = "PI_UPDATE" },
- { .fc_id = 485, .cpu_id = 334, .valid = 1, .name = "HALT_MACHINE" },
- { .fc_id = 486, .cpu_id = 335, .valid = 1, .name = "INTS_REGISTER" },
- { .fc_id = 487, .cpu_id = 336, .valid = 1, .name = "SOFT_RESET" },
+ { .fc_id = 483, .cpu_id = 332, .valid = 1,
+ .name = "NIC0_CS_DBG_DERR" },
+ { .fc_id = 484, .cpu_id = 333, .valid = 0, .name = "" },
+ { .fc_id = 485, .cpu_id = 334, .valid = 0, .name = "" },
+ { .fc_id = 486, .cpu_id = 335, .valid = 0, .name = "" },
+ { .fc_id = 487, .cpu_id = 336, .valid = 1,
+ .name = "NIC1_CS_DBG_DERR" },
{ .fc_id = 488, .cpu_id = 337, .valid = 0, .name = "" },
{ .fc_id = 489, .cpu_id = 338, .valid = 0, .name = "" },
{ .fc_id = 490, .cpu_id = 339, .valid = 0, .name = "" },
- { .fc_id = 491, .cpu_id = 340, .valid = 0, .name = "" },
+ { .fc_id = 491, .cpu_id = 340, .valid = 1,
+ .name = "NIC2_CS_DBG_DERR" },
{ .fc_id = 492, .cpu_id = 341, .valid = 0, .name = "" },
{ .fc_id = 493, .cpu_id = 342, .valid = 0, .name = "" },
{ .fc_id = 494, .cpu_id = 343, .valid = 0, .name = "" },
- { .fc_id = 495, .cpu_id = 344, .valid = 0, .name = "" },
+ { .fc_id = 495, .cpu_id = 344, .valid = 1,
+ .name = "NIC3_CS_DBG_DERR" },
{ .fc_id = 496, .cpu_id = 345, .valid = 0, .name = "" },
{ .fc_id = 497, .cpu_id = 346, .valid = 0, .name = "" },
{ .fc_id = 498, .cpu_id = 347, .valid = 0, .name = "" },
- { .fc_id = 499, .cpu_id = 348, .valid = 0, .name = "" },
+ { .fc_id = 499, .cpu_id = 348, .valid = 1,
+ .name = "NIC4_CS_DBG_DERR" },
{ .fc_id = 500, .cpu_id = 349, .valid = 0, .name = "" },
{ .fc_id = 501, .cpu_id = 350, .valid = 0, .name = "" },
{ .fc_id = 502, .cpu_id = 351, .valid = 0, .name = "" },
@@ -659,17 +664,17 @@ static struct gaudi_async_events_ids_map gaudi_irq_map_table[] = {
{ .fc_id = 632, .cpu_id = 481, .valid = 0, .name = "" },
{ .fc_id = 633, .cpu_id = 482, .valid = 0, .name = "" },
{ .fc_id = 634, .cpu_id = 483, .valid = 0, .name = "" },
- { .fc_id = 635, .cpu_id = 484, .valid = 0, .name = "" },
- { .fc_id = 636, .cpu_id = 485, .valid = 0, .name = "" },
- { .fc_id = 637, .cpu_id = 486, .valid = 0, .name = "" },
- { .fc_id = 638, .cpu_id = 487, .valid = 0, .name = "" },
+ { .fc_id = 635, .cpu_id = 484, .valid = 1, .name = "PI_UPDATE" },
+ { .fc_id = 636, .cpu_id = 485, .valid = 1, .name = "HALT_MACHINE" },
+ { .fc_id = 637, .cpu_id = 486, .valid = 1, .name = "INTS_REGISTER" },
+ { .fc_id = 638, .cpu_id = 487, .valid = 1, .name = "SOFT_RESET" },
{ .fc_id = 639, .cpu_id = 488, .valid = 0, .name = "" },
{ .fc_id = 640, .cpu_id = 489, .valid = 0, .name = "" },
{ .fc_id = 641, .cpu_id = 490, .valid = 0, .name = "" },
{ .fc_id = 642, .cpu_id = 491, .valid = 0, .name = "" },
{ .fc_id = 643, .cpu_id = 492, .valid = 0, .name = "" },
{ .fc_id = 644, .cpu_id = 493, .valid = 0, .name = "" },
- { .fc_id = 645, .cpu_id = 494, .valid = 0, .name = "" },
+ { .fc_id = 645, .cpu_id = 494, .valid = 1, .name = "FW_ALIVE_S" },
{ .fc_id = 646, .cpu_id = 495, .valid = 1, .name = "DEV_RESET_REQ" },
{ .fc_id = 647, .cpu_id = 496, .valid = 1,
.name = "PKT_QUEUE_OUT_SYNC" },
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
index a9f51f9f9e92..34ca4fe50d91 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
@@ -20,6 +20,9 @@
#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */
#define LINUX_FW_OFFSET 0x800000 /* 8MB in HBM */
+/* HBM thermal delta in [Deg] added to composite (CTemp) */
+#define HBM_TEMP_ADJUST_COEFF 6
+
enum gaudi_nic_axi_error {
RXB,
RXE,
@@ -27,6 +30,7 @@ enum gaudi_nic_axi_error {
TXE,
QPC_RESP,
NON_AXI_ERR,
+ TMR,
};
/*
@@ -42,6 +46,48 @@ struct eq_nic_sei_event {
__u8 pad[6];
};
+/*
+ * struct gaudi_nic_status - describes the status of a NIC port.
+ * @port: NIC port index.
+ * @bad_format_cnt: e.g. CRC.
+ * @responder_out_of_sequence_psn_cnt: e.g NAK.
+ * @high_ber_reinit_cnt: link reinit due to high BER.
+ * @correctable_err_cnt: e.g. bit-flip.
+ * @uncorrectable_err_cnt: e.g. MAC errors.
+ * @retraining_cnt: re-training counter.
+ * @up: is port up.
+ * @pcs_link: has PCS link.
+ * @phy_ready: is PHY ready.
+ * @auto_neg: is Autoneg enabled.
+ * @timeout_retransmission_cnt: timeout retransmission events
+ * @high_ber_cnt: high ber events
+ */
+struct gaudi_nic_status {
+ __u32 port;
+ __u32 bad_format_cnt;
+ __u32 responder_out_of_sequence_psn_cnt;
+ __u32 high_ber_reinit;
+ __u32 correctable_err_cnt;
+ __u32 uncorrectable_err_cnt;
+ __u32 retraining_cnt;
+ __u8 up;
+ __u8 pcs_link;
+ __u8 phy_ready;
+ __u8 auto_neg;
+ __u32 timeout_retransmission_cnt;
+ __u32 high_ber_cnt;
+};
+
+struct gaudi_flops_2_data {
+ union {
+ struct {
+ __u32 spsram_init_done : 1;
+ __u32 reserved : 31;
+ };
+ __u32 data;
+ };
+};
+
#define GAUDI_PLL_FREQ_LOW 200000000 /* 200 MHz */
#endif /* GAUDI_FW_IF_H */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index b53aeda9a982..9aea7e996654 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -66,7 +66,8 @@
#define PCI_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
(FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \
(FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0xF)) | \
- (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0xF)))
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK, 0x1)))
#define HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
(FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \
@@ -76,7 +77,8 @@
#define HBM_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
(FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \
(FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0x1F)) | \
- (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F)))
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK, 0x1)))
#define TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
(FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \
@@ -86,7 +88,8 @@
#define TPC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
(FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \
(FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0x1F)) | \
- (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F)))
+ (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F)) | \
+ (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK, 0x1)))
#define MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
(FIELD_PREP(MME0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \
@@ -96,7 +99,8 @@
#define MME_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
(FIELD_PREP(MME0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \
(FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0x1F)) | \
- (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F)))
+ (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F)) | \
+ (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK, 0x1)))
#define NIC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
(FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \
@@ -106,7 +110,8 @@
#define NIC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
(FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \
(FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0xF)) | \
- (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0xF)))
+ (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0xF)) | \
+ (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK, 0x1)))
#define QMAN_CGM1_PWR_GATE_EN (FIELD_PREP(DMA0_QM_CGM_CFG1_MASK_TH_MASK, 0xA))
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
index 137afedf5f15..d95d4162ae2c 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
@@ -12,6 +12,16 @@
* PSOC scratch-pad registers
*/
#define mmHW_STATE mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
+/* TODO: remove mmGIC_HOST_IRQ_CTRL_POLL_REG */
+#define mmGIC_HOST_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
+#define mmGIC_HOST_PI_UPD_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
+#define mmGIC_TPC_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
+#define mmGIC_MME_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
+#define mmGIC_DMA_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
+#define mmGIC_NIC_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
+#define mmGIC_DMA_CR_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
+#define mmGIC_HOST_HALT_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
+#define mmGIC_HOST_INTS_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
#define mmCPU_BOOT_DEV_STS0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_20
#define mmCPU_BOOT_DEV_STS1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_21
#define mmFUSE_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_22
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index fea3ae9d8686..8d00df9243c4 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -693,6 +693,8 @@ static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
{
int bar;
unsigned long off;
+ u8 pci_rev_id;
+ int rc;
/* map the memory mapped i/o registers */
hw->mmio_vaddr = pci_iomap(pdev, 1, 0);
@@ -702,7 +704,13 @@ static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
}
/* map the adapter shared memory region */
- if (pdev->subsystem_device == 0x00E4) {
+ rc = pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev_id);
+ if (rc != 0) {
+ dev_err(&pdev->dev, "Error reading PCI rev id: %d\n", rc);
+ goto out;
+ }
+
+ if (pci_rev_id >= PCI_REV_ID_NECHES) {
bar = 5;
/* Last 8k is reserved for CCBs */
off = pci_resource_len(pdev, bar) - 0x2000;
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index f69ff645cac9..d57c34680b09 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -10,6 +10,9 @@
#define ILO_NAME "hpilo"
+/* iLO ASIC PCI revision id */
+#define PCI_REV_ID_NECHES 7
+
/* max number of open channel control blocks per device, hw limited to 32 */
#define MAX_CCB 24
/* min number of open channel control blocks per device, hw limited to 32 */
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index 4f5f3bdc814d..59c9a0d95659 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -9,6 +9,7 @@
*/
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include "ibmasm.h"
#include "dot_command.h"
#include "lowlevel.h"
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 4edad6c445d3..dc8a06c06c63 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -111,7 +111,7 @@ static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
result = ibmasm_init_remote_input_dev(sp);
if (result) {
dev_err(sp->dev, "Failed to initialize remote queue\n");
- goto error_send_message;
+ goto error_init_remote;
}
result = ibmasm_send_driver_vpd(sp);
@@ -131,8 +131,9 @@ static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
error_send_message:
- disable_sp_interrupts(sp->base_address);
ibmasm_free_remote_input_dev(sp);
+error_init_remote:
+ disable_sp_interrupts(sp->base_address);
free_irq(sp->irq, (void *)sp);
error_request_irq:
iounmap(sp->base_address);
diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h
index 8d364462aeea..ec4e78ec5a67 100644
--- a/drivers/misc/ibmasm/remote.h
+++ b/drivers/misc/ibmasm/remote.h
@@ -43,7 +43,7 @@
#define REMOTE_BUTTON_MIDDLE 0x02
#define REMOTE_BUTTON_RIGHT 0x04
-/* size of keysym/keycode translation matricies */
+/* size of keysym/keycode translation matrices */
#define XLATE_SIZE 256
struct mouse_input {
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 5eaf74447ca1..0f54730c7ed5 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -198,7 +198,7 @@ static int lattice_ecp3_probe(struct spi_device *spi)
spi_set_drvdata(spi, data);
init_completion(&data->fw_loaded);
- err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ err = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
FIRMWARE_NAME, &spi->dev,
GFP_KERNEL, spi, firmware_load);
if (err) {
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index a164896dc6d4..88c218a9f8b3 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -161,6 +161,9 @@ void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
if (*p == 0)
val = 0x87654321;
*p = val;
+
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
}
void lkdtm_SOFTLOCKUP(void)
@@ -300,8 +303,10 @@ void lkdtm_CORRUPT_LIST_ADD(void)
if (target[0] == NULL && target[1] == NULL)
pr_err("Overwrite did not happen, but no BUG?!\n");
- else
+ else {
pr_err("list_add() corruption not detected!\n");
+ pr_expected_config(CONFIG_DEBUG_LIST);
+ }
}
void lkdtm_CORRUPT_LIST_DEL(void)
@@ -325,8 +330,10 @@ void lkdtm_CORRUPT_LIST_DEL(void)
if (target[0] == NULL && target[1] == NULL)
pr_err("Overwrite did not happen, but no BUG?!\n");
- else
+ else {
pr_err("list_del() corruption not detected!\n");
+ pr_expected_config(CONFIG_DEBUG_LIST);
+ }
}
/* Test that VMAP_STACK is actually allocating with a leading guard page */
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
index e73ebdbfa806..c9aeddef1044 100644
--- a/drivers/misc/lkdtm/cfi.c
+++ b/drivers/misc/lkdtm/cfi.c
@@ -38,5 +38,6 @@ void lkdtm_CFI_FORWARD_PROTO(void)
func = (void *)lkdtm_increment_int;
func(&called_count);
- pr_info("Fail: survived mismatched prototype function call!\n");
+ pr_err("FAIL: survived mismatched prototype function call!\n");
+ pr_expected_config(CONFIG_CFI_CLANG);
}
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 8024b6a5cc7f..9dda87c6b54a 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
+#include <linux/init.h>
#define DEFAULT_COUNT 10
@@ -120,11 +121,14 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(FORTIFY_OBJECT),
CRASHTYPE(FORTIFY_SUBOBJECT),
- CRASHTYPE(OVERWRITE_ALLOCATION),
+ CRASHTYPE(SLAB_LINEAR_OVERFLOW),
+ CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
CRASHTYPE(WRITE_AFTER_FREE),
CRASHTYPE(READ_AFTER_FREE),
CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
CRASHTYPE(READ_BUDDY_AFTER_FREE),
+ CRASHTYPE(SLAB_INIT_ON_ALLOC),
+ CRASHTYPE(BUDDY_INIT_ON_ALLOC),
CRASHTYPE(SLAB_FREE_DOUBLE),
CRASHTYPE(SLAB_FREE_CROSS),
CRASHTYPE(SLAB_FREE_PAGE),
@@ -177,9 +181,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(STACKLEAK_ERASING),
CRASHTYPE(CFI_FORWARD_PROTO),
CRASHTYPE(FORTIFIED_STRSCPY),
-#ifdef CONFIG_X86_32
CRASHTYPE(DOUBLE_FAULT),
-#endif
#ifdef CONFIG_PPC_BOOK3S_64
CRASHTYPE(PPC_SLB_MULTIHIT),
#endif
@@ -399,6 +401,56 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
return count;
}
+#ifndef MODULE
+/*
+ * To avoid needing to export parse_args(), just don't use this code
+ * when LKDTM is built as a module.
+ */
+struct check_cmdline_args {
+ const char *param;
+ int value;
+};
+
+static int lkdtm_parse_one(char *param, char *val,
+ const char *unused, void *arg)
+{
+ struct check_cmdline_args *args = arg;
+
+ /* short circuit if we already found a value. */
+ if (args->value != -ESRCH)
+ return 0;
+ if (strncmp(param, args->param, strlen(args->param)) == 0) {
+ bool bool_result;
+ int ret;
+
+ ret = kstrtobool(val, &bool_result);
+ if (ret == 0)
+ args->value = bool_result;
+ }
+ return 0;
+}
+
+int lkdtm_check_bool_cmdline(const char *param)
+{
+ char *command_line;
+ struct check_cmdline_args args = {
+ .param = param,
+ .value = -ESRCH,
+ };
+
+ command_line = kstrdup(saved_command_line, GFP_KERNEL);
+ if (!command_line)
+ return -ENOMEM;
+
+ parse_args("Setting sysctl args", command_line,
+ NULL, 0, -1, -1, &args, lkdtm_parse_one);
+
+ kfree(command_line);
+
+ return args.value;
+}
+#endif
+
static struct dentry *lkdtm_debugfs_root;
static int __init lkdtm_module_init(void)
diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c
index faf29cf04baa..0f51d31b57ca 100644
--- a/drivers/misc/lkdtm/fortify.c
+++ b/drivers/misc/lkdtm/fortify.c
@@ -76,7 +76,8 @@ void lkdtm_FORTIFIED_STRSCPY(void)
*/
strscpy(dst, src, strlen(src));
- pr_warn("FAIL: No overflow in above strscpy()\n");
+ pr_err("FAIL: strscpy() overflow not detected!\n");
+ pr_expected_config(CONFIG_FORTIFY_SOURCE);
kfree(src);
}
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 1323bc16f113..3d9aae5821a0 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -5,6 +5,7 @@
*/
#include "lkdtm.h"
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/sched.h>
static struct kmem_cache *double_free_cache;
@@ -12,17 +13,36 @@ static struct kmem_cache *a_cache;
static struct kmem_cache *b_cache;
/*
+ * If there aren't guard pages, it's likely that a consecutive allocation will
+ * let us overflow into the second allocation without overwriting something real.
+ */
+void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
+{
+ char *one, *two;
+
+ one = vzalloc(PAGE_SIZE);
+ two = vzalloc(PAGE_SIZE);
+
+ pr_info("Attempting vmalloc linear overflow ...\n");
+ memset(one, 0xAA, PAGE_SIZE + 1);
+
+ vfree(two);
+ vfree(one);
+}
+
+/*
* This tries to stay within the next largest power-of-2 kmalloc cache
* to avoid actually overwriting anything important if it's not detected
* correctly.
*/
-void lkdtm_OVERWRITE_ALLOCATION(void)
+void lkdtm_SLAB_LINEAR_OVERFLOW(void)
{
size_t len = 1020;
u32 *data = kmalloc(len, GFP_KERNEL);
if (!data)
return;
+ pr_info("Attempting slab linear overflow ...\n");
data[1024 / sizeof(u32)] = 0x12345678;
kfree(data);
}
@@ -89,9 +109,10 @@ void lkdtm_READ_AFTER_FREE(void)
if (saw != *val) {
/* Good! Poisoning happened, so declare a win. */
pr_info("Memory correctly poisoned (%x)\n", saw);
- BUG();
+ } else {
+ pr_err("FAIL: Memory was not poisoned!\n");
+ pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
}
- pr_info("Memory was not poisoned\n");
kfree(val);
}
@@ -145,13 +166,79 @@ void lkdtm_READ_BUDDY_AFTER_FREE(void)
if (saw != *val) {
/* Good! Poisoning happened, so declare a win. */
pr_info("Memory correctly poisoned (%x)\n", saw);
- BUG();
+ } else {
+ pr_err("FAIL: Buddy page was not poisoned!\n");
+ pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
+ }
+
+ kfree(val);
+}
+
+void lkdtm_SLAB_INIT_ON_ALLOC(void)
+{
+ u8 *first;
+ u8 *val;
+
+ first = kmalloc(512, GFP_KERNEL);
+ if (!first) {
+ pr_info("Unable to allocate 512 bytes the first time.\n");
+ return;
+ }
+
+ memset(first, 0xAB, 512);
+ kfree(first);
+
+ val = kmalloc(512, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate 512 bytes the second time.\n");
+ return;
+ }
+ if (val != first) {
+ pr_warn("Reallocation missed clobbered memory.\n");
}
- pr_info("Buddy page was not poisoned\n");
+ if (memchr(val, 0xAB, 512) == NULL) {
+ pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
+ } else {
+ pr_err("FAIL: Slab was not initialized\n");
+ pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
+ }
kfree(val);
}
+void lkdtm_BUDDY_INIT_ON_ALLOC(void)
+{
+ u8 *first;
+ u8 *val;
+
+ first = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!first) {
+ pr_info("Unable to allocate first free page\n");
+ return;
+ }
+
+ memset(first, 0xAB, PAGE_SIZE);
+ free_page((unsigned long)first);
+
+ val = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate second free page\n");
+ return;
+ }
+
+ if (val != first) {
+ pr_warn("Reallocation missed clobbered memory.\n");
+ }
+
+ if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
+ pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
+ } else {
+ pr_err("FAIL: Slab was not initialized\n");
+ pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
+ }
+ free_page((unsigned long)val);
+}
+
void lkdtm_SLAB_FREE_DOUBLE(void)
{
int *val;
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 99f90d3e5e9c..6a30b60519f3 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -6,6 +6,47 @@
#include <linux/kernel.h>
+#define pr_expected_config(kconfig) \
+{ \
+ if (IS_ENABLED(kconfig)) \
+ pr_err("Unexpected! This kernel was built with " #kconfig "=y\n"); \
+ else \
+ pr_warn("This is probably expected, since this kernel was built *without* " #kconfig "=y\n"); \
+}
+
+#ifndef MODULE
+int lkdtm_check_bool_cmdline(const char *param);
+#define pr_expected_config_param(kconfig, param) \
+{ \
+ if (IS_ENABLED(kconfig)) { \
+ switch (lkdtm_check_bool_cmdline(param)) { \
+ case 0: \
+ pr_warn("This is probably expected, since this kernel was built with " #kconfig "=y but booted with '" param "=N'\n"); \
+ break; \
+ case 1: \
+ pr_err("Unexpected! This kernel was built with " #kconfig "=y and booted with '" param "=Y'\n"); \
+ break; \
+ default: \
+ pr_err("Unexpected! This kernel was built with " #kconfig "=y (and booted without '" param "' specified)\n"); \
+ } \
+ } else { \
+ switch (lkdtm_check_bool_cmdline(param)) { \
+ case 0: \
+ pr_warn("This is probably expected, as kernel was built *without* " #kconfig "=y and booted with '" param "=N'\n"); \
+ break; \
+ case 1: \
+ pr_err("Unexpected! This kernel was built *without* " #kconfig "=y but booted with '" param "=Y'\n"); \
+ break; \
+ default: \
+ pr_err("This is probably expected, since this kernel was built *without* " #kconfig "=y (and booted without '" param "' specified)\n"); \
+ break; \
+ } \
+ } \
+}
+#else
+#define pr_expected_config_param(kconfig, param) pr_expected_config(kconfig)
+#endif
+
/* bugs.c */
void __init lkdtm_bugs_init(int *recur_param);
void lkdtm_PANIC(void);
@@ -39,11 +80,14 @@ void lkdtm_FORTIFY_SUBOBJECT(void);
/* heap.c */
void __init lkdtm_heap_init(void);
void __exit lkdtm_heap_exit(void);
-void lkdtm_OVERWRITE_ALLOCATION(void);
+void lkdtm_VMALLOC_LINEAR_OVERFLOW(void);
+void lkdtm_SLAB_LINEAR_OVERFLOW(void);
void lkdtm_WRITE_AFTER_FREE(void);
void lkdtm_READ_AFTER_FREE(void);
void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
void lkdtm_READ_BUDDY_AFTER_FREE(void);
+void lkdtm_SLAB_INIT_ON_ALLOC(void);
+void lkdtm_BUDDY_INIT_ON_ALLOC(void);
void lkdtm_SLAB_FREE_DOUBLE(void);
void lkdtm_SLAB_FREE_CROSS(void);
void lkdtm_SLAB_FREE_PAGE(void);
diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c
index d1a5c0705be3..00db21ff115e 100644
--- a/drivers/misc/lkdtm/stackleak.c
+++ b/drivers/misc/lkdtm/stackleak.c
@@ -74,8 +74,8 @@ void lkdtm_STACKLEAK_ERASING(void)
end:
if (test_failed) {
- pr_err("FAIL: the thread stack is NOT properly erased\n");
- dump_stack();
+ pr_err("FAIL: the thread stack is NOT properly erased!\n");
+ pr_expected_config(CONFIG_GCC_PLUGIN_STACKLEAK);
} else {
pr_info("OK: the rest of the thread stack is properly erased\n");
}
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 15d220ef35a5..9161ce7ed47a 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -173,6 +173,8 @@ static void do_usercopy_heap_size(bool to_user)
goto free_user;
}
}
+ pr_err("FAIL: bad usercopy not detected!\n");
+ pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
free_user:
vm_munmap(user_addr, PAGE_SIZE);
@@ -248,6 +250,8 @@ static void do_usercopy_heap_whitelist(bool to_user)
goto free_user;
}
}
+ pr_err("FAIL: bad usercopy not detected!\n");
+ pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
free_user:
vm_munmap(user_alloc, PAGE_SIZE);
@@ -319,7 +323,8 @@ void lkdtm_USERCOPY_KERNEL(void)
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
- pr_err("FAIL: survived bad copy_to_user()\n");
+ pr_err("FAIL: bad copy_to_user() not detected!\n");
+ pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
free_user:
vm_munmap(user_addr, PAGE_SIZE);
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index d8e760b11ae3..67844089db21 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -498,7 +498,7 @@ static struct mei_fixup {
};
/**
- * mei_cldev_fixup - run fixup handlers
+ * mei_cl_bus_dev_fixup - run fixup handlers
*
* @cldev: me client device
*/
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 2cc370adb238..96f4e59c32a5 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -326,7 +326,7 @@ void mei_io_cb_free(struct mei_cl_cb *cb)
}
/**
- * mei_tx_cb_queue - queue tx callback
+ * mei_tx_cb_enqueue - queue tx callback
*
* Locking: called under "dev->device_lock" lock
*
@@ -1726,12 +1726,15 @@ nortpm:
return rets;
}
-static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag)
+static inline u8 mei_ext_hdr_set_vtag(void *ext, u8 vtag)
{
- ext->type = MEI_EXT_HDR_VTAG;
- ext->ext_payload[0] = vtag;
- ext->length = mei_data2slots(sizeof(*ext));
- return ext->length;
+ struct mei_ext_hdr_vtag *vtag_hdr = ext;
+
+ vtag_hdr->hdr.type = MEI_EXT_HDR_VTAG;
+ vtag_hdr->hdr.length = mei_data2slots(sizeof(*vtag_hdr));
+ vtag_hdr->vtag = vtag;
+ vtag_hdr->reserved = 0;
+ return vtag_hdr->hdr.length;
}
/**
@@ -1745,7 +1748,6 @@ static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
{
size_t hdr_len;
struct mei_ext_meta_hdr *meta;
- struct mei_ext_hdr *ext;
struct mei_msg_hdr *mei_hdr;
bool is_ext, is_vtag;
@@ -1764,7 +1766,7 @@ static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
hdr_len += sizeof(*meta);
if (is_vtag)
- hdr_len += sizeof(*ext);
+ hdr_len += sizeof(struct mei_ext_hdr_vtag);
setup_hdr:
mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
@@ -2250,7 +2252,7 @@ static void mei_cl_dma_free(struct mei_cl *cl)
}
/**
- * mei_cl_alloc_and_map - send client dma map request
+ * mei_cl_dma_alloc_and_map - send client dma map request
*
* @cl: host client
* @fp: pointer to file structure
@@ -2349,7 +2351,7 @@ out:
}
/**
- * mei_cl_unmap_and_free - send client dma unmap request
+ * mei_cl_dma_unmap - send client dma unmap request
*
* @cl: host client
* @fp: pointer to file structure
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index d0277c7fed10..99b5c1ecc444 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -853,7 +853,7 @@ out:
}
/**
- * mei_hbm_cl_flow_control_res - flow control response from me
+ * mei_hbm_cl_tx_flow_ctrl_creds_res - flow control response from me
*
* @dev: the device structure
* @fctrl: flow control response bus message
diff --git a/drivers/misc/mei/hdcp/Kconfig b/drivers/misc/mei/hdcp/Kconfig
index 95b2d6d37f10..54e1c9526909 100644
--- a/drivers/misc/mei/hdcp/Kconfig
+++ b/drivers/misc/mei/hdcp/Kconfig
@@ -1,4 +1,3 @@
-
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2019, Intel Corporation. All rights reserved.
#
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index cda0829ac589..d3a6c0728645 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1380,7 +1380,7 @@ static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
.quirk_probe = mei_me_fw_type_nm
/**
- * mei_me_fw_sku_sps_4() - check for sps 4.0 sku
+ * mei_me_fw_type_sps_4() - check for sps 4.0 sku
*
* Read ME FW Status register to check for SPS Firmware.
* The SPS FW is only signaled in the PCI function 0.
@@ -1405,7 +1405,7 @@ static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
.quirk_probe = mei_me_fw_type_sps_4
/**
- * mei_me_fw_sku_sps() - check for sps sku
+ * mei_me_fw_type_sps() - check for sps sku
*
* Read ME FW Status register to check for SPS Firmware.
* The SPS FW is only signaled in pci function 0
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index b10606550613..dfd60c916da0 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -235,9 +235,8 @@ enum mei_ext_hdr_type {
struct mei_ext_hdr {
u8 type;
u8 length;
- u8 ext_payload[2];
- u8 hdr[];
-};
+ u8 data[];
+} __packed;
/**
* struct mei_ext_meta_hdr - extend header meta data
@@ -250,8 +249,21 @@ struct mei_ext_meta_hdr {
u8 count;
u8 size;
u8 reserved[2];
- struct mei_ext_hdr hdrs[];
-};
+ u8 hdrs[];
+} __packed;
+
+/**
+ * struct mei_ext_hdr_vtag - extend header for vtag
+ *
+ * @hdr: standard extend header
+ * @vtag: virtual tag
+ * @reserved: reserved
+ */
+struct mei_ext_hdr_vtag {
+ struct mei_ext_hdr hdr;
+ u8 vtag;
+ u8 reserved;
+} __packed;
/*
* Extended header iterator functions
@@ -266,7 +278,7 @@ struct mei_ext_meta_hdr {
*/
static inline struct mei_ext_hdr *mei_ext_begin(struct mei_ext_meta_hdr *meta)
{
- return meta->hdrs;
+ return (struct mei_ext_hdr *)meta->hdrs;
}
/**
@@ -284,7 +296,7 @@ static inline bool mei_ext_last(struct mei_ext_meta_hdr *meta,
}
/**
- *mei_ext_next - following extended header on the TLV list
+ * mei_ext_next - following extended header on the TLV list
*
* @ext: current extend header
*
@@ -295,7 +307,7 @@ static inline bool mei_ext_last(struct mei_ext_meta_hdr *meta,
*/
static inline struct mei_ext_hdr *mei_ext_next(struct mei_ext_hdr *ext)
{
- return (struct mei_ext_hdr *)(ext->hdr + (ext->length * 4));
+ return (struct mei_ext_hdr *)((u8 *)ext + (ext->length * 4));
}
/**
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index aab3ebfa9fc4..a67f4f2d33a9 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -123,13 +123,13 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl,
if (mei_hdr->extended) {
struct mei_ext_hdr *ext;
- struct mei_ext_hdr *vtag = NULL;
+ struct mei_ext_hdr_vtag *vtag_hdr = NULL;
ext = mei_ext_begin(meta);
do {
switch (ext->type) {
case MEI_EXT_HDR_VTAG:
- vtag = ext;
+ vtag_hdr = (struct mei_ext_hdr_vtag *)ext;
break;
case MEI_EXT_HDR_NONE:
fallthrough;
@@ -141,20 +141,20 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl,
ext = mei_ext_next(ext);
} while (!mei_ext_last(meta, ext));
- if (!vtag) {
+ if (!vtag_hdr) {
cl_dbg(dev, cl, "vtag not found in extended header.\n");
cb->status = -EPROTO;
goto discard;
}
- cl_dbg(dev, cl, "vtag: %d\n", vtag->ext_payload[0]);
- if (cb->vtag && cb->vtag != vtag->ext_payload[0]) {
+ cl_dbg(dev, cl, "vtag: %d\n", vtag_hdr->vtag);
+ if (cb->vtag && cb->vtag != vtag_hdr->vtag) {
cl_err(dev, cl, "mismatched tag: %d != %d\n",
- cb->vtag, vtag->ext_payload[0]);
+ cb->vtag, vtag_hdr->vtag);
cb->status = -EPROTO;
goto discard;
}
- cb->vtag = vtag->ext_payload[0];
+ cb->vtag = vtag_hdr->vtag;
}
if (!mei_cl_is_connected(cl)) {
@@ -331,7 +331,6 @@ int mei_irq_read_handler(struct mei_device *dev,
struct mei_ext_meta_hdr *meta_hdr = NULL;
struct mei_cl *cl;
int ret;
- u32 ext_meta_hdr_u32;
u32 hdr_size_left;
u32 hdr_size_ext;
int i;
@@ -367,14 +366,12 @@ int mei_irq_read_handler(struct mei_device *dev,
if (mei_hdr->extended) {
if (!dev->rd_msg_hdr[1]) {
- ext_meta_hdr_u32 = mei_read_hdr(dev);
- dev->rd_msg_hdr[1] = ext_meta_hdr_u32;
+ dev->rd_msg_hdr[1] = mei_read_hdr(dev);
dev->rd_msg_hdr_count++;
(*slots)--;
- dev_dbg(dev->dev, "extended header is %08x\n",
- ext_meta_hdr_u32);
+ dev_dbg(dev->dev, "extended header is %08x\n", dev->rd_msg_hdr[1]);
}
- meta_hdr = ((struct mei_ext_meta_hdr *)dev->rd_msg_hdr + 1);
+ meta_hdr = ((struct mei_ext_meta_hdr *)&dev->rd_msg_hdr[1]);
if (check_add_overflow((u32)sizeof(*meta_hdr),
mei_slots2data(meta_hdr->size),
&hdr_size_ext)) {
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 28937b6e7e0c..786f7c8f7f61 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -50,8 +50,6 @@ static int mei_open(struct inode *inode, struct file *file)
int err;
dev = container_of(inode->i_cdev, struct mei_device, cdev);
- if (!dev)
- return -ENODEV;
mutex_lock(&dev->device_lock);
@@ -1104,7 +1102,7 @@ static ssize_t dev_state_show(struct device *device,
static DEVICE_ATTR_RO(dev_state);
/**
- * dev_set_devstate: set to new device state and notify sysfs file.
+ * mei_set_devstate: set to new device state and notify sysfs file.
*
* @dev: mei_device
* @state: new device state
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h
index df758033dc93..fe46ff2b9d69 100644
--- a/drivers/misc/mei/mei-trace.h
+++ b/drivers/misc/mei/mei-trace.h
@@ -26,7 +26,7 @@ TRACE_EVENT(mei_reg_read,
__field(u32, val)
),
TP_fast_assign(
- __assign_str(dev, dev_name(dev))
+ __assign_str(dev, dev_name(dev));
__entry->reg = reg;
__entry->offs = offs;
__entry->val = val;
@@ -45,7 +45,7 @@ TRACE_EVENT(mei_reg_write,
__field(u32, val)
),
TP_fast_assign(
- __assign_str(dev, dev_name(dev))
+ __assign_str(dev, dev_name(dev));
__entry->reg = reg;
__entry->offs = offs;
__entry->val = val;
@@ -64,7 +64,7 @@ TRACE_EVENT(mei_pci_cfg_read,
__field(u32, val)
),
TP_fast_assign(
- __assign_str(dev, dev_name(dev))
+ __assign_str(dev, dev_name(dev));
__entry->reg = reg;
__entry->offs = offs;
__entry->val = val;
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 4bf26ce61044..aec0483b8e72 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -156,7 +156,7 @@ end:
}
/**
- * mei_txe_remove - Device Shutdown Routine
+ * mei_txe_shutdown- Device Shutdown Routine
*
* @pdev: PCI device structure
*
diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c
index 4c0841776087..be4016084979 100644
--- a/drivers/misc/pvpanic/pvpanic-mmio.c
+++ b/drivers/misc/pvpanic/pvpanic-mmio.c
@@ -93,7 +93,7 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
return -EINVAL;
}
- pi = kmalloc(sizeof(*pi), GFP_ATOMIC);
+ pi = devm_kmalloc(dev, sizeof(*pi), GFP_KERNEL);
if (!pi)
return -ENOMEM;
@@ -104,19 +104,7 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
pi->capability &= ioread8(base);
pi->events = pi->capability;
- dev_set_drvdata(dev, pi);
-
- return pvpanic_probe(pi);
-}
-
-static int pvpanic_mmio_remove(struct platform_device *pdev)
-{
- struct pvpanic_instance *pi = dev_get_drvdata(&pdev->dev);
-
- pvpanic_remove(pi);
- kfree(pi);
-
- return 0;
+ return devm_pvpanic_probe(dev, pi);
}
static const struct of_device_id pvpanic_mmio_match[] = {
@@ -139,6 +127,5 @@ static struct platform_driver pvpanic_mmio_driver = {
.dev_groups = pvpanic_mmio_dev_groups,
},
.probe = pvpanic_mmio_probe,
- .remove = pvpanic_mmio_remove,
};
module_platform_driver(pvpanic_mmio_driver);
diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
index 9ecc4e8559d5..a43c401017ae 100644
--- a/drivers/misc/pvpanic/pvpanic-pci.c
+++ b/drivers/misc/pvpanic/pvpanic-pci.c
@@ -73,20 +73,19 @@ ATTRIBUTE_GROUPS(pvpanic_pci_dev);
static int pvpanic_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- struct device *dev = &pdev->dev;
struct pvpanic_instance *pi;
void __iomem *base;
int ret;
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret < 0)
return ret;
- base = pci_iomap(pdev, 0, 0);
+ base = pcim_iomap(pdev, 0, 0);
if (!base)
return -ENOMEM;
- pi = kmalloc(sizeof(*pi), GFP_ATOMIC);
+ pi = devm_kmalloc(&pdev->dev, sizeof(*pi), GFP_KERNEL);
if (!pi)
return -ENOMEM;
@@ -97,26 +96,13 @@ static int pvpanic_pci_probe(struct pci_dev *pdev,
pi->capability &= ioread8(base);
pi->events = pi->capability;
- dev_set_drvdata(dev, pi);
-
- return pvpanic_probe(pi);
-}
-
-static void pvpanic_pci_remove(struct pci_dev *pdev)
-{
- struct pvpanic_instance *pi = dev_get_drvdata(&pdev->dev);
-
- pvpanic_remove(pi);
- iounmap(pi->base);
- kfree(pi);
- pci_disable_device(pdev);
+ return devm_pvpanic_probe(&pdev->dev, pi);
}
static struct pci_driver pvpanic_pci_driver = {
.name = "pvpanic-pci",
.id_table = pvpanic_pci_id_tbl,
.probe = pvpanic_pci_probe,
- .remove = pvpanic_pci_remove,
.driver = {
.dev_groups = pvpanic_pci_dev_groups,
},
diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
index 65f70a4da8c0..02b807c788c9 100644
--- a/drivers/misc/pvpanic/pvpanic.c
+++ b/drivers/misc/pvpanic/pvpanic.c
@@ -13,6 +13,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/panic_notifier.h>
#include <linux/types.h>
#include <linux/cdev.h>
#include <linux/list.h>
@@ -60,25 +61,10 @@ static struct notifier_block pvpanic_panic_nb = {
.priority = 1, /* let this called before broken drm_fb_helper */
};
-int pvpanic_probe(struct pvpanic_instance *pi)
-{
- if (!pi || !pi->base)
- return -EINVAL;
-
- spin_lock(&pvpanic_lock);
- list_add(&pi->list, &pvpanic_list);
- spin_unlock(&pvpanic_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pvpanic_probe);
-
-void pvpanic_remove(struct pvpanic_instance *pi)
+static void pvpanic_remove(void *param)
{
struct pvpanic_instance *pi_cur, *pi_next;
-
- if (!pi)
- return;
+ struct pvpanic_instance *pi = param;
spin_lock(&pvpanic_lock);
list_for_each_entry_safe(pi_cur, pi_next, &pvpanic_list, list) {
@@ -89,7 +75,19 @@ void pvpanic_remove(struct pvpanic_instance *pi)
}
spin_unlock(&pvpanic_lock);
}
-EXPORT_SYMBOL_GPL(pvpanic_remove);
+
+int devm_pvpanic_probe(struct device *dev, struct pvpanic_instance *pi)
+{
+ if (!pi || !pi->base)
+ return -EINVAL;
+
+ spin_lock(&pvpanic_lock);
+ list_add(&pi->list, &pvpanic_list);
+ spin_unlock(&pvpanic_lock);
+
+ return devm_add_action_or_reset(dev, pvpanic_remove, pi);
+}
+EXPORT_SYMBOL_GPL(devm_pvpanic_probe);
static int pvpanic_init(void)
{
diff --git a/drivers/misc/pvpanic/pvpanic.h b/drivers/misc/pvpanic/pvpanic.h
index 1afccc2e9fec..493545951754 100644
--- a/drivers/misc/pvpanic/pvpanic.h
+++ b/drivers/misc/pvpanic/pvpanic.h
@@ -15,7 +15,6 @@ struct pvpanic_instance {
struct list_head list;
};
-int pvpanic_probe(struct pvpanic_instance *pi);
-void pvpanic_remove(struct pvpanic_instance *pi);
+int devm_pvpanic_probe(struct device *dev, struct pvpanic_instance *pi);
#endif /* PVPANIC_H_ */
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 202bf951e909..93638ae2753a 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -341,6 +341,7 @@ static int sram_probe(struct platform_device *pdev)
{
struct sram_dev *sram;
int ret;
+ struct resource *res;
int (*init_func)(void);
sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
@@ -349,10 +350,11 @@ static int sram_probe(struct platform_device *pdev)
sram->dev = &pdev->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc"))
- sram->virt_base = devm_platform_ioremap_resource(pdev, 0);
+ sram->virt_base = devm_ioremap_resource(&pdev->dev, res);
else
- sram->virt_base = devm_platform_ioremap_resource_wc(pdev, 0);
+ sram->virt_base = devm_ioremap_resource_wc(&pdev->dev, res);
if (IS_ERR(sram->virt_base)) {
dev_err(&pdev->dev, "could not map SRAM registers\n");
return PTR_ERR(sram->virt_base);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 071844b58073..7f6976a9f508 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -52,13 +52,12 @@ static void remove_channel_from_table(struct st_data_s *st_gdata,
*/
int st_get_uart_wr_room(struct st_data_s *st_gdata)
{
- struct tty_struct *tty;
if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) {
pr_err("tty unavailable to perform write");
return -1;
}
- tty = st_gdata->tty;
- return tty->ops->write_room(tty);
+
+ return tty_write_room(st_gdata->tty);
}
/*
@@ -798,7 +797,7 @@ static void st_tty_close(struct tty_struct *tty)
}
static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
- char *tty_flags, int count)
+ const char *tty_flags, int count)
{
#ifdef VERBOSE
print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
@@ -845,6 +844,7 @@ static void st_tty_flush_buffer(struct tty_struct *tty)
}
static struct tty_ldisc_ops st_ldisc_ops = {
+ .num = N_TI_WL,
.name = "n_st",
.open = st_tty_open,
.close = st_tty_close,
@@ -860,7 +860,7 @@ int st_core_init(struct st_data_s **core_data)
struct st_data_s *st_gdata;
long err;
- err = tty_register_ldisc(N_TI_WL, &st_ldisc_ops);
+ err = tty_register_ldisc(&st_ldisc_ops);
if (err) {
pr_err("error registering %d line discipline %ld",
N_TI_WL, err);
@@ -871,11 +871,8 @@ int st_core_init(struct st_data_s **core_data)
st_gdata = kzalloc(sizeof(struct st_data_s), GFP_KERNEL);
if (!st_gdata) {
pr_err("memory allocation failed");
- err = tty_unregister_ldisc(N_TI_WL);
- if (err)
- pr_err("unable to un-register ldisc %ld", err);
err = -ENOMEM;
- return err;
+ goto err_unreg_ldisc;
}
/* Initialize ST TxQ and Tx waitQ queue head. All BT/FM/GPS module skb's
@@ -890,17 +887,18 @@ int st_core_init(struct st_data_s **core_data)
err = st_ll_init(st_gdata);
if (err) {
pr_err("error during st_ll initialization(%ld)", err);
- kfree(st_gdata);
- err = tty_unregister_ldisc(N_TI_WL);
- if (err)
- pr_err("unable to un-register ldisc");
- return err;
+ goto err_free_gdata;
}
INIT_WORK(&st_gdata->work_write_wakeup, work_fn_write_wakeup);
*core_data = st_gdata;
return 0;
+err_free_gdata:
+ kfree(st_gdata);
+err_unreg_ldisc:
+ tty_unregister_ldisc(&st_ldisc_ops);
+ return err;
}
void st_core_exit(struct st_data_s *st_gdata)
@@ -918,9 +916,7 @@ void st_core_exit(struct st_data_s *st_gdata)
kfree_skb(st_gdata->rx_skb);
kfree_skb(st_gdata->tx_skb);
/* TTY ldisc cleanup */
- err = tty_unregister_ldisc(N_TI_WL);
- if (err)
- pr_err("unable to un-register ldisc %ld", err);
+ tty_unregister_ldisc(&st_ldisc_ops);
/* free the global data pointer */
kfree(st_gdata);
}
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index bae18ef03dcb..488eeb2811ae 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -387,15 +387,22 @@ static void uacce_release(struct device *dev)
static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags)
{
+ int ret;
+
if (!(flags & UACCE_DEV_SVA))
return flags;
flags &= ~UACCE_DEV_SVA;
- if (iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF))
+ ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF);
+ if (ret) {
+ dev_err(parent, "failed to enable IOPF feature! ret = %pe\n", ERR_PTR(ret));
return flags;
+ }
- if (iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA)) {
+ ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
+ if (ret) {
+ dev_err(parent, "failed to enable SVA feature! ret = %pe\n", ERR_PTR(ret));
iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF);
return flags;
}
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
index 26ff49fdf0f7..c0b5e339d5a1 100644
--- a/drivers/misc/vmw_vmci/vmci_context.c
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -107,7 +107,7 @@ struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) {
pr_warn("Failed to allocate memory for VMCI context\n");
- error = -EINVAL;
+ error = -ENOMEM;
goto err_out;
}
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index 23c8448a9c3b..d6e3c650bd11 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -1013,9 +1013,6 @@ static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
- if (!xsdfec)
- return EPOLLNVAL | EPOLLHUP;
-
poll_wait(file, &xsdfec->waitq, wait);
/* XSDFEC ISR detected an error */
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 88f4c215caa6..ce8aed562929 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
+#include <linux/kref.h>
#include <linux/blkdev.h>
#include <linux/cdev.h>
#include <linux/mutex.h>
@@ -111,7 +112,7 @@ struct mmc_blk_data {
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
- unsigned int usage;
+ struct kref kref;
unsigned int read_only;
unsigned int part_type;
unsigned int reset_done;
@@ -181,10 +182,8 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
mutex_lock(&open_lock);
md = disk->private_data;
- if (md && md->usage == 0)
+ if (md && !kref_get_unless_zero(&md->kref))
md = NULL;
- if (md)
- md->usage++;
mutex_unlock(&open_lock);
return md;
@@ -196,18 +195,25 @@ static inline int mmc_get_devidx(struct gendisk *disk)
return devidx;
}
-static void mmc_blk_put(struct mmc_blk_data *md)
+static void mmc_blk_kref_release(struct kref *ref)
{
+ struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref);
+ int devidx;
+
+ devidx = mmc_get_devidx(md->disk);
+ ida_simple_remove(&mmc_blk_ida, devidx);
+
mutex_lock(&open_lock);
- md->usage--;
- if (md->usage == 0) {
- int devidx = mmc_get_devidx(md->disk);
- blk_put_queue(md->queue.queue);
- ida_simple_remove(&mmc_blk_ida, devidx);
- put_disk(md->disk);
- kfree(md);
- }
+ md->disk->private_data = NULL;
mutex_unlock(&open_lock);
+
+ put_disk(md->disk);
+ kfree(md);
+}
+
+static void mmc_blk_put(struct mmc_blk_data *md)
+{
+ kref_put(&md->kref, mmc_blk_kref_release);
}
static ssize_t power_ro_lock_show(struct device *dev,
@@ -2319,39 +2325,23 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
md->read_only = mmc_blk_readonly(card);
- md->disk = alloc_disk(perdev_minors);
- if (md->disk == NULL) {
- ret = -ENOMEM;
+ md->disk = mmc_init_queue(&md->queue, card);
+ if (IS_ERR(md->disk)) {
+ ret = PTR_ERR(md->disk);
goto err_kfree;
}
INIT_LIST_HEAD(&md->part);
INIT_LIST_HEAD(&md->rpmbs);
- md->usage = 1;
-
- ret = mmc_init_queue(&md->queue, card);
- if (ret)
- goto err_putdisk;
+ kref_init(&md->kref);
md->queue.blkdata = md;
- /*
- * Keep an extra reference to the queue so that we can shutdown the
- * queue (i.e. call blk_cleanup_queue()) while there are still
- * references to the 'md'. The corresponding blk_put_queue() is in
- * mmc_blk_put().
- */
- if (!blk_get_queue(md->queue.queue)) {
- mmc_cleanup_queue(&md->queue);
- ret = -ENODEV;
- goto err_putdisk;
- }
-
md->disk->major = MMC_BLOCK_MAJOR;
+ md->disk->minors = perdev_minors;
md->disk->first_minor = devidx * perdev_minors;
md->disk->fops = &mmc_bdops;
md->disk->private_data = md;
- md->disk->queue = md->queue.queue;
md->parent = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
md->disk->flags = GENHD_FL_EXT_DEVT;
@@ -2400,8 +2390,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
return md;
- err_putdisk:
- put_disk(md->disk);
err_kfree:
kfree(md);
out:
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index b039dcff17f8..95fedcf56e4a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -937,11 +937,14 @@ int mmc_execute_tuning(struct mmc_card *card)
err = host->ops->execute_tuning(host, opcode);
- if (err)
+ if (err) {
pr_err("%s: tuning execution failed: %d\n",
mmc_hostname(host), err);
- else
+ } else {
+ host->retune_now = 0;
+ host->need_retune = 0;
mmc_retune_enable(host);
+ }
return err;
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index eda4a1892c33..0475d96047c4 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -75,7 +75,8 @@ static void mmc_host_classdev_release(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
wakeup_source_unregister(host->ws);
- ida_simple_remove(&mmc_host_ida, host->index);
+ if (of_alias_get_id(host->parent->of_node, "mmc") < 0)
+ ida_simple_remove(&mmc_host_ida, host->index);
kfree(host);
}
@@ -502,7 +503,7 @@ static int mmc_first_nonreserved_index(void)
*/
struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
{
- int err;
+ int index;
struct mmc_host *host;
int alias_id, min_idx, max_idx;
@@ -515,20 +516,19 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
alias_id = of_alias_get_id(dev->of_node, "mmc");
if (alias_id >= 0) {
- min_idx = alias_id;
- max_idx = alias_id + 1;
+ index = alias_id;
} else {
min_idx = mmc_first_nonreserved_index();
max_idx = 0;
- }
- err = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
- if (err < 0) {
- kfree(host);
- return NULL;
+ index = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
+ if (index < 0) {
+ kfree(host);
+ return NULL;
+ }
}
- host->index = err;
+ host->index = index;
dev_set_name(&host->class_dev, "mmc%d", host->index);
host->ws = wakeup_source_register(NULL, dev_name(&host->class_dev));
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index d600e0a4a460..cc3261777637 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -424,9 +424,10 @@ static inline bool mmc_merge_capable(struct mmc_host *host)
*
* Initialise a MMC card request queue.
*/
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
+struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
+ struct gendisk *disk;
int ret;
mq->card = card;
@@ -464,26 +465,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
ret = blk_mq_alloc_tag_set(&mq->tag_set);
if (ret)
- return ret;
+ return ERR_PTR(ret);
+
- mq->queue = blk_mq_init_queue(&mq->tag_set);
- if (IS_ERR(mq->queue)) {
- ret = PTR_ERR(mq->queue);
- goto free_tag_set;
+ disk = blk_mq_alloc_disk(&mq->tag_set, mq);
+ if (IS_ERR(disk)) {
+ blk_mq_free_tag_set(&mq->tag_set);
+ return disk;
}
+ mq->queue = disk->queue;
if (mmc_host_is_spi(host) && host->use_spi_crc)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
-
- mq->queue->queuedata = mq;
blk_queue_rq_timeout(mq->queue, 60 * HZ);
mmc_setup_queue(mq, card);
- return 0;
-
-free_tag_set:
- blk_mq_free_tag_set(&mq->tag_set);
- return ret;
+ return disk;
}
void mmc_queue_suspend(struct mmc_queue *mq)
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 3319d8ab57d0..9ade3bcbb714 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -94,7 +94,7 @@ struct mmc_queue {
struct work_struct complete_work;
};
-extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *);
+struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card);
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index be4067281caa..c36242b86b1d 100644
--- a/drivers/mmc/core/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -439,7 +439,7 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
tty = tty_port_tty_get(&port->port);
if (tty == NULL || !kfifo_len(xmit) ||
- tty->stopped || tty->hw_stopped) {
+ tty->flow.stopped || tty->hw_stopped) {
sdio_uart_stop_tx(port);
tty_kref_put(tty);
return;
@@ -797,13 +797,13 @@ static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
return ret;
}
-static int sdio_uart_write_room(struct tty_struct *tty)
+static unsigned int sdio_uart_write_room(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
return FIFO_SIZE - kfifo_len(&port->xmit_fifo);
}
-static int sdio_uart_chars_in_buffer(struct tty_struct *tty)
+static unsigned int sdio_uart_chars_in_buffer(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
return kfifo_len(&port->xmit_fifo);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index d333130d1531..c3229d8c7041 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2018,8 +2018,8 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
continue;
}
- dw_mci_stop_dma(host);
send_stop_abort(host, data);
+ dw_mci_stop_dma(host);
state = STATE_SENDING_STOP;
break;
}
@@ -2043,10 +2043,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
- dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
+ dw_mci_stop_dma(host);
state = STATE_DATA_ERROR;
break;
}
@@ -2079,10 +2079,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
- dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
+ dw_mci_stop_dma(host);
state = STATE_DATA_ERROR;
break;
}
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 0db17bcc9c16..cb1a64a5c256 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -789,6 +789,8 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
break;
}
}
+ fallthrough;
+
case JZ4740_MMC_STATE_DONE:
break;
}
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 51db30acf4dc..fdaa11f92fe6 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -479,8 +479,9 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
u32 status;
int ret = 0;
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
- spin_lock_irqsave(&host->lock, flags);
+ spin_lock_irqsave(&host->lock, flags);
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 &&
+ host->pwr_reg & MCI_STM32_VSWITCHEN) {
mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH);
spin_unlock_irqrestore(&host->lock, flags);
@@ -492,9 +493,11 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC,
host->base + MMCICLEAR);
+ spin_lock_irqsave(&host->lock, flags);
mmci_write_pwrreg(host, host->pwr_reg &
~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH));
}
+ spin_unlock_irqrestore(&host->lock, flags);
return ret;
}
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index cce390fe9cf3..e7565c671998 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -173,6 +173,23 @@ static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host)
return pltfm_host->clock;
}
+/*
+ * There is a known bug on BCM2711's SDHCI core integration where the
+ * controller will hang when the difference between the core clock and the bus
+ * clock is too great. Specifically this can be reproduced under the following
+ * conditions:
+ *
+ * - No SD card plugged in, polling thread is running, probing cards at
+ * 100 kHz.
+ * - BCM2711's core clock configured at 500MHz or more
+ *
+ * So we set 200kHz as the minimum clock frequency available for that SoC.
+ */
+static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host)
+{
+ return 200000;
+}
+
static const struct sdhci_ops sdhci_iproc_ops = {
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_iproc_get_max_clock,
@@ -271,13 +288,15 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
.set_clock = sdhci_set_clock,
.set_power = sdhci_set_power_and_bus_voltage,
.get_max_clock = sdhci_iproc_get_max_clock,
+ .get_min_clock = sdhci_iproc_bcm2711_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
- .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.ops = &sdhci_iproc_bcm2711_ops,
};
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index e44b7a66b73c..290a14cdc1cf 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2089,6 +2089,23 @@ static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
sdhci_cqe_disable(mmc, recovery);
}
+static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ u32 count, start = 15;
+
+ __sdhci_set_timeout(host, cmd);
+ count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
+ /*
+ * Update software timeout value if its value is less than hardware data
+ * timeout value. Qcom SoC hardware data timeout value was calculated
+ * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
+ */
+ if (cmd && cmd->data && host->clock > 400000 &&
+ host->clock <= 50000000 &&
+ ((1 << (count + start)) > (10 * host->clock)))
+ host->data_timeout = 22LL * NSEC_PER_SEC;
+}
+
static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
.enable = sdhci_msm_cqe_enable,
.disable = sdhci_msm_cqe_disable,
@@ -2438,6 +2455,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
.irq = sdhci_msm_cqe_irq,
.dump_vendor_regs = sdhci_msm_dump_vendor_regs,
.set_power = sdhci_set_power_noreg,
+ .set_timeout = sdhci_msm_set_timeout,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 839965f7c717..0e7c07ed9690 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -1542,6 +1542,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
}
}
+ sdhci_get_of_property(pdev);
+
sdhci_arasan->clk_ahb = devm_clk_get(dev, "clk_ahb");
if (IS_ERR(sdhci_arasan->clk_ahb)) {
ret = dev_err_probe(dev, PTR_ERR(sdhci_arasan->clk_ahb),
@@ -1561,14 +1563,22 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto err_pltfm_free;
}
+ /* If clock-frequency property is set, use the provided value */
+ if (pltfm_host->clock &&
+ pltfm_host->clock != clk_get_rate(clk_xin)) {
+ ret = clk_set_rate(clk_xin, pltfm_host->clock);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set SD clock rate\n");
+ goto clk_dis_ahb;
+ }
+ }
+
ret = clk_prepare_enable(clk_xin);
if (ret) {
dev_err(dev, "Unable to enable SD clock.\n");
goto clk_dis_ahb;
}
- sdhci_get_of_property(pdev);
-
if (of_property_read_bool(np, "xlnx,fails-without-test-cd"))
sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_FORCE_CDTEST;
diff --git a/drivers/mmc/host/sdhci-of-aspeed-test.c b/drivers/mmc/host/sdhci-of-aspeed-test.c
index bb67d159b7d8..1ed4f86291f2 100644
--- a/drivers/mmc/host/sdhci-of-aspeed-test.c
+++ b/drivers/mmc/host/sdhci-of-aspeed-test.c
@@ -26,23 +26,23 @@ static void aspeed_sdhci_phase_ddr52(struct kunit *test)
KUNIT_EXPECT_EQ(test, 15,
aspeed_sdhci_phase_to_tap(NULL, rate, 25));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 0,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 0,
aspeed_sdhci_phase_to_tap(NULL, rate, 180));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 0,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 0,
aspeed_sdhci_phase_to_tap(NULL, rate, 181));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 1,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 1,
aspeed_sdhci_phase_to_tap(NULL, rate, 182));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 1,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 1,
aspeed_sdhci_phase_to_tap(NULL, rate, 183));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 2,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 2,
aspeed_sdhci_phase_to_tap(NULL, rate, 184));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 3,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 3,
aspeed_sdhci_phase_to_tap(NULL, rate, 185));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 14,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 14,
aspeed_sdhci_phase_to_tap(NULL, rate, 203));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
aspeed_sdhci_phase_to_tap(NULL, rate, 204));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
aspeed_sdhci_phase_to_tap(NULL, rate, 205));
}
@@ -67,21 +67,21 @@ static void aspeed_sdhci_phase_hs200(struct kunit *test)
KUNIT_EXPECT_EQ(test, 15,
aspeed_sdhci_phase_to_tap(NULL, rate, 96));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK,
aspeed_sdhci_phase_to_tap(NULL, rate, 180));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK,
aspeed_sdhci_phase_to_tap(NULL, rate, 185));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 1,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 1,
aspeed_sdhci_phase_to_tap(NULL, rate, 186));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 1,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 1,
aspeed_sdhci_phase_to_tap(NULL, rate, 187));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 14,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 14,
aspeed_sdhci_phase_to_tap(NULL, rate, 269));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
aspeed_sdhci_phase_to_tap(NULL, rate, 270));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
aspeed_sdhci_phase_to_tap(NULL, rate, 271));
- KUNIT_EXPECT_EQ(test, (int)ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
+ KUNIT_EXPECT_EQ(test, ASPEED_SDHCI_TAP_PARAM_INVERT_CLK | 15,
aspeed_sdhci_phase_to_tap(NULL, rate, 276));
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6aaf5c3ce34c..aba6e10b8605 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1812,6 +1812,10 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
u16 preset = 0;
switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
+ break;
case MMC_TIMING_UHS_SDR12:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
break;
@@ -4072,9 +4076,13 @@ static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
bounce_size,
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr);
- if (ret)
+ if (ret) {
+ devm_kfree(mmc_dev(mmc), host->bounce_buffer);
+ host->bounce_buffer = NULL;
/* Again fall back to max_segs == 1 */
return;
+ }
+
host->bounce_buffer_size = bounce_size;
/* Lie about this since we're bouncing */
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index c35ed4be75b7..074dc182b184 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -255,6 +255,7 @@
/* 60-FB reserved */
+#define SDHCI_PRESET_FOR_HIGH_SPEED 0x64
#define SDHCI_PRESET_FOR_SDR12 0x66
#define SDHCI_PRESET_FOR_SDR25 0x68
#define SDHCI_PRESET_FOR_SDR50 0x6A
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 3097e93787f7..a761134fd3be 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -119,7 +119,7 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
- return extp->MinorVersion >= '5' &&
+ return extp && extp->MinorVersion >= '5' &&
(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
}
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 99b7986002f0..6a6a2a21d2ed 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -108,8 +108,8 @@ map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi
#if BITS_PER_LONG >= 64
case 8:
onecmd |= (onecmd << (chip_mode * 32));
-#endif
fallthrough;
+#endif
case 4:
onecmd |= (onecmd << (chip_mode * 16));
fallthrough;
@@ -164,8 +164,8 @@ unsigned long cfi_merge_status(map_word val, struct map_info *map,
#if BITS_PER_LONG >= 64
case 8:
res |= (onestat >> (chip_mode * 32));
-#endif
fallthrough;
+#endif
case 4:
res |= (onestat >> (chip_mode * 16));
fallthrough;
diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c
index ff86373d7d24..a05e103682a4 100644
--- a/drivers/mtd/chips/chipreg.c
+++ b/drivers/mtd/chips/chipreg.c
@@ -31,14 +31,11 @@ void unregister_mtd_chip_driver(struct mtd_chip_driver *drv)
static struct mtd_chip_driver *get_mtd_chip_driver (const char *name)
{
- struct list_head *pos;
struct mtd_chip_driver *ret = NULL, *this;
spin_lock(&chip_drvs_lock);
- list_for_each(pos, &chip_drvs_list) {
- this = list_entry(pos, typeof(*this), list);
-
+ list_for_each_entry(this, &chip_drvs_list, list) {
if (!strcmp(this->name, name)) {
ret = this;
break;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 0f4c2d823de8..79cb981ececc 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -89,6 +89,12 @@ config MTD_MCHP23K256
platform data, or a device tree description if you want to
specify device partitioning
+config MTD_MCHP48L640
+ tristate "Microchip 48L640 EERAM"
+ depends on SPI_MASTER
+ help
+ This enables access to Microchip 48L640 EERAM chips, using SPI.
+
config MTD_SPEAR_SMI
tristate "SPEAR MTD NOR Support through SMI controller"
depends on PLAT_SPEAR || COMPILE_TEST
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 991c8d12c016..0362cf6bdc67 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_MCHP23K256) += mchp23k256.o
+obj-$(CONFIG_MTD_MCHP48L640) += mchp48l640.o
obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
new file mode 100644
index 000000000000..99400d0fb8c1
--- /dev/null
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Microchip 48L640 64 Kb SPI Serial EERAM
+ *
+ * Copyright Heiko Schocher <hs@denx.de>
+ *
+ * datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20006055B.pdf
+ *
+ * we set continuous mode but reading/writing more bytes than
+ * pagesize seems to bring chip into state where readden values
+ * are wrong ... no idea why.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/of_device.h>
+
+struct mchp48_caps {
+ unsigned int size;
+ unsigned int page_size;
+};
+
+struct mchp48l640_flash {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct mtd_info mtd;
+ const struct mchp48_caps *caps;
+};
+
+#define MCHP48L640_CMD_WREN 0x06
+#define MCHP48L640_CMD_WRDI 0x04
+#define MCHP48L640_CMD_WRITE 0x02
+#define MCHP48L640_CMD_READ 0x03
+#define MCHP48L640_CMD_WRSR 0x01
+#define MCHP48L640_CMD_RDSR 0x05
+
+#define MCHP48L640_STATUS_RDY 0x01
+#define MCHP48L640_STATUS_WEL 0x02
+#define MCHP48L640_STATUS_BP0 0x04
+#define MCHP48L640_STATUS_BP1 0x08
+#define MCHP48L640_STATUS_SWM 0x10
+#define MCHP48L640_STATUS_PRO 0x20
+#define MCHP48L640_STATUS_ASE 0x40
+
+#define MCHP48L640_TIMEOUT 100
+
+#define MAX_CMD_SIZE 0x10
+
+#define to_mchp48l640_flash(x) container_of(x, struct mchp48l640_flash, mtd)
+
+static int mchp48l640_mkcmd(struct mchp48l640_flash *flash, u8 cmd, loff_t addr, char *buf)
+{
+ buf[0] = cmd;
+ buf[1] = addr >> 8;
+ buf[2] = addr;
+
+ return 3;
+}
+
+static int mchp48l640_read_status(struct mchp48l640_flash *flash, int *status)
+{
+ unsigned char cmd[2];
+ int ret;
+
+ cmd[0] = MCHP48L640_CMD_RDSR;
+ cmd[1] = 0x00;
+ mutex_lock(&flash->lock);
+ ret = spi_write_then_read(flash->spi, &cmd[0], 1, &cmd[1], 1);
+ mutex_unlock(&flash->lock);
+ if (!ret)
+ *status = cmd[1];
+ dev_dbg(&flash->spi->dev, "read status ret: %d status: %x", ret, *status);
+
+ return ret;
+}
+
+static int mchp48l640_waitforbit(struct mchp48l640_flash *flash, int bit, bool set)
+{
+ int ret, status;
+ unsigned long deadline;
+
+ deadline = jiffies + msecs_to_jiffies(MCHP48L640_TIMEOUT);
+ do {
+ ret = mchp48l640_read_status(flash, &status);
+ dev_dbg(&flash->spi->dev, "read status ret: %d bit: %x %sset status: %x",
+ ret, bit, (set ? "" : "not"), status);
+ if (ret)
+ return ret;
+
+ if (set) {
+ if ((status & bit) == bit)
+ return 0;
+ } else {
+ if ((status & bit) == 0)
+ return 0;
+ }
+
+ usleep_range(1000, 2000);
+ } while (!time_after_eq(jiffies, deadline));
+
+ dev_err(&flash->spi->dev, "Timeout waiting for bit %x %s set in status register.",
+ bit, (set ? "" : "not"));
+ return -ETIMEDOUT;
+}
+
+static int mchp48l640_write_prepare(struct mchp48l640_flash *flash, bool enable)
+{
+ unsigned char cmd[2];
+ int ret;
+
+ if (enable)
+ cmd[0] = MCHP48L640_CMD_WREN;
+ else
+ cmd[0] = MCHP48L640_CMD_WRDI;
+
+ mutex_lock(&flash->lock);
+ ret = spi_write(flash->spi, cmd, 1);
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ dev_err(&flash->spi->dev, "write %sable failed ret: %d",
+ (enable ? "en" : "dis"), ret);
+
+ dev_dbg(&flash->spi->dev, "write %sable success ret: %d",
+ (enable ? "en" : "dis"), ret);
+ if (enable)
+ return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, true);
+
+ return ret;
+}
+
+static int mchp48l640_set_mode(struct mchp48l640_flash *flash)
+{
+ unsigned char cmd[2];
+ int ret;
+
+ ret = mchp48l640_write_prepare(flash, true);
+ if (ret)
+ return ret;
+
+ cmd[0] = MCHP48L640_CMD_WRSR;
+ cmd[1] = MCHP48L640_STATUS_PRO;
+
+ mutex_lock(&flash->lock);
+ ret = spi_write(flash->spi, cmd, 2);
+ mutex_unlock(&flash->lock);
+ if (ret)
+ dev_err(&flash->spi->dev, "Could not set continuous mode ret: %d", ret);
+
+ return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_PRO, true);
+}
+
+static int mchp48l640_wait_rdy(struct mchp48l640_flash *flash)
+{
+ return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_RDY, false);
+};
+
+static int mchp48l640_write_page(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const unsigned char *buf)
+{
+ struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd);
+ unsigned char *cmd;
+ int ret;
+ int cmdlen;
+
+ cmd = kmalloc((3 + len), GFP_KERNEL | GFP_DMA);
+ if (!cmd)
+ return -ENOMEM;
+
+ ret = mchp48l640_wait_rdy(flash);
+ if (ret)
+ goto fail;
+
+ ret = mchp48l640_write_prepare(flash, true);
+ if (ret)
+ goto fail;
+
+ mutex_lock(&flash->lock);
+ cmdlen = mchp48l640_mkcmd(flash, MCHP48L640_CMD_WRITE, to, cmd);
+ memcpy(&cmd[cmdlen], buf, len);
+ ret = spi_write(flash->spi, cmd, cmdlen + len);
+ mutex_unlock(&flash->lock);
+ if (!ret)
+ *retlen += len;
+ else
+ goto fail;
+
+ ret = mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, false);
+ if (ret)
+ goto fail;
+
+ kfree(cmd);
+ return 0;
+fail:
+ kfree(cmd);
+ dev_err(&flash->spi->dev, "write fail with: %d", ret);
+ return ret;
+};
+
+static int mchp48l640_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const unsigned char *buf)
+{
+ struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd);
+ int ret;
+ size_t wlen = 0;
+ loff_t woff = to;
+ size_t ws;
+ size_t page_sz = flash->caps->page_size;
+
+ /*
+ * we set PRO bit (page rollover), but writing length > page size
+ * does result in total chaos, so write in 32 byte chunks.
+ */
+ while (wlen < len) {
+ ws = min((len - wlen), page_sz);
+ ret = mchp48l640_write_page(mtd, woff, ws, retlen, &buf[wlen]);
+ if (ret)
+ return ret;
+ wlen += ws;
+ woff += ws;
+ }
+
+ return 0;
+}
+
+static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd);
+ unsigned char *cmd;
+ int ret;
+ int cmdlen;
+
+ cmd = kmalloc((3 + len), GFP_KERNEL | GFP_DMA);
+ if (!cmd)
+ return -ENOMEM;
+
+ ret = mchp48l640_wait_rdy(flash);
+ if (ret)
+ goto fail;
+
+ mutex_lock(&flash->lock);
+ cmdlen = mchp48l640_mkcmd(flash, MCHP48L640_CMD_READ, from, cmd);
+ ret = spi_write_then_read(flash->spi, cmd, cmdlen, buf, len);
+ mutex_unlock(&flash->lock);
+ if (!ret)
+ *retlen += len;
+
+ kfree(cmd);
+ return ret;
+
+fail:
+ kfree(cmd);
+ dev_err(&flash->spi->dev, "read fail with: %d", ret);
+ return ret;
+}
+
+static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd);
+ int ret;
+ size_t wlen = 0;
+ loff_t woff = from;
+ size_t ws;
+ size_t page_sz = flash->caps->page_size;
+
+ /*
+ * we set PRO bit (page rollover), but if read length > page size
+ * does result in total chaos in result ...
+ */
+ while (wlen < len) {
+ ws = min((len - wlen), page_sz);
+ ret = mchp48l640_read_page(mtd, woff, ws, retlen, &buf[wlen]);
+ if (ret)
+ return ret;
+ wlen += ws;
+ woff += ws;
+ }
+
+ return 0;
+};
+
+static const struct mchp48_caps mchp48l640_caps = {
+ .size = SZ_8K,
+ .page_size = 32,
+};
+
+static int mchp48l640_probe(struct spi_device *spi)
+{
+ struct mchp48l640_flash *flash;
+ struct flash_platform_data *data;
+ int err;
+ int status;
+
+ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
+ if (!flash)
+ return -ENOMEM;
+
+ flash->spi = spi;
+ mutex_init(&flash->lock);
+ spi_set_drvdata(spi, flash);
+
+ err = mchp48l640_read_status(flash, &status);
+ if (err)
+ return err;
+
+ err = mchp48l640_set_mode(flash);
+ if (err)
+ return err;
+
+ data = dev_get_platdata(&spi->dev);
+
+ flash->caps = of_device_get_match_data(&spi->dev);
+ if (!flash->caps)
+ flash->caps = &mchp48l640_caps;
+
+ mtd_set_of_node(&flash->mtd, spi->dev.of_node);
+ flash->mtd.dev.parent = &spi->dev;
+ flash->mtd.type = MTD_RAM;
+ flash->mtd.flags = MTD_CAP_RAM;
+ flash->mtd.writesize = flash->caps->page_size;
+ flash->mtd.size = flash->caps->size;
+ flash->mtd._read = mchp48l640_read;
+ flash->mtd._write = mchp48l640_write;
+
+ err = mtd_device_register(&flash->mtd, data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mchp48l640_remove(struct spi_device *spi)
+{
+ struct mchp48l640_flash *flash = spi_get_drvdata(spi);
+
+ return mtd_device_unregister(&flash->mtd);
+}
+
+static const struct of_device_id mchp48l640_of_table[] = {
+ {
+ .compatible = "microchip,48l640",
+ .data = &mchp48l640_caps,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mchp48l640_of_table);
+
+static struct spi_driver mchp48l640_driver = {
+ .driver = {
+ .name = "mchp48l640",
+ .of_match_table = of_match_ptr(mchp48l640_of_table),
+ },
+ .probe = mchp48l640_probe,
+ .remove = mchp48l640_remove,
+};
+
+module_spi_driver(mchp48l640_driver);
+
+MODULE_DESCRIPTION("MTD SPI driver for Microchip 48l640 EERAM chips");
+MODULE_AUTHOR("Heiko Schocher <hs@denx.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:mchp48l640");
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index fb4a6aa24543..08f76ff839a7 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -286,7 +286,6 @@ static int __init ms02nv_init(void)
break;
default:
return -ENODEV;
- break;
}
for (i = 0; i < ARRAY_SIZE(ms02nv_addrs); i++)
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 5b04ae6c3057..6ed6c51fac69 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -270,6 +270,7 @@ static int phram_setup(const char *val)
if (len == 0 || erasesize == 0 || erasesize > len
|| erasesize > UINT_MAX || rem) {
parse_err("illegal erasesize or len\n");
+ ret = -EINVAL;
goto error;
}
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index af16d3485de0..6276daa296da 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -259,20 +259,13 @@ static int find_boot_record(struct INFTLrecord *inftl)
/* Memory alloc */
inftl->PUtable = kmalloc_array(inftl->nb_blocks, sizeof(u16),
GFP_KERNEL);
- if (!inftl->PUtable) {
- printk(KERN_WARNING "INFTL: allocation of PUtable "
- "failed (%zd bytes)\n",
- inftl->nb_blocks * sizeof(u16));
+ if (!inftl->PUtable)
return -ENOMEM;
- }
inftl->VUtable = kmalloc_array(inftl->nb_blocks, sizeof(u16),
GFP_KERNEL);
if (!inftl->VUtable) {
kfree(inftl->PUtable);
- printk(KERN_WARNING "INFTL: allocation of VUtable "
- "failed (%zd bytes)\n",
- inftl->nb_blocks * sizeof(u16));
return -ENOMEM;
}
@@ -330,7 +323,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
buf = kmalloc(SECTORSIZE + mtd->oobsize, GFP_KERNEL);
if (!buf)
- return -1;
+ return -ENOMEM;
ret = -1;
for (i = 0; i < len; i += SECTORSIZE) {
@@ -558,12 +551,8 @@ int INFTL_mount(struct INFTLrecord *s)
/* Temporary buffer to store ANAC numbers. */
ANACtable = kcalloc(s->nb_blocks, sizeof(u8), GFP_KERNEL);
- if (!ANACtable) {
- printk(KERN_WARNING "INFTL: allocation of ANACtable "
- "failed (%zd bytes)\n",
- s->nb_blocks * sizeof(u8));
+ if (!ANACtable)
return -ENOMEM;
- }
/*
* First pass is to explore each physical unit, and construct the
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 42a95ba40f2c..281fcbaa74e7 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -189,10 +189,8 @@ static int amd76xrom_init_one(struct pci_dev *pdev,
if (!map) {
map = kmalloc(sizeof(*map), GFP_KERNEL);
- }
- if (!map) {
- printk(KERN_ERR MOD_NAME ": kmalloc failed");
- goto out;
+ if (!map)
+ goto out;
}
memset(map, 0, sizeof(*map));
INIT_LIST_HEAD(&map->list);
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 460494212f6a..c0216bc740cc 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -217,12 +217,10 @@ static int __init ck804xrom_init_one(struct pci_dev *pdev,
unsigned long offset;
int i;
- if (!map)
- map = kmalloc(sizeof(*map), GFP_KERNEL);
-
if (!map) {
- printk(KERN_ERR MOD_NAME ": kmalloc failed");
- goto out;
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ goto out;
}
memset(map, 0, sizeof(*map));
INIT_LIST_HEAD(&map->list);
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 85e14150a073..15d5b76ff504 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -277,11 +277,10 @@ static int __init esb2rom_init_one(struct pci_dev *pdev,
unsigned long offset;
int i;
- if (!map)
- map = kmalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
- printk(KERN_ERR MOD_NAME ": kmalloc failed");
- goto out;
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ goto out;
}
memset(map, 0, sizeof(*map));
INIT_LIST_HEAD(&map->list);
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index fda72c5fd8f9..c8b2793691db 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -213,10 +213,8 @@ static int __init ichxrom_init_one(struct pci_dev *pdev,
if (!map) {
map = kmalloc(sizeof(*map), GFP_KERNEL);
- }
- if (!map) {
- printk(KERN_ERR MOD_NAME ": kmalloc failed");
- goto out;
+ if (!map)
+ goto out;
}
memset(map, 0, sizeof(*map));
INIT_LIST_HEAD(&map->list);
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 0bec7c791d17..cedd8ef9a6bf 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -127,7 +127,6 @@ static int platram_probe(struct platform_device *pdev)
info->map.virt = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(info->map.virt)) {
err = PTR_ERR(info->map.virt);
- dev_err(&pdev->dev, "failed to ioremap() region\n");
goto exit_free;
}
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index f9cfb084c029..6c0c91bfec05 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -62,10 +62,8 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
}
up = kzalloc(sizeof(struct uflash_dev), GFP_KERNEL);
- if (!up) {
- printk(KERN_ERR PFX "Cannot allocate struct uflash_dev\n");
+ if (!up)
return -ENOMEM;
- }
/* copy defaults and tweak parameters */
memcpy(&up->map, &uflash_map_templ, sizeof(uflash_map_templ));
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 6ce4bc57f919..44bea3f65060 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -419,6 +419,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (tr->discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
blk_queue_max_discard_sectors(new->rq, UINT_MAX);
+ new->rq->limits.discard_granularity = tr->blksize;
}
gd->queue = new->rq;
@@ -525,14 +526,10 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (!blktrans_notifier.list.next)
register_mtd_user(&blktrans_notifier);
-
- mutex_lock(&mtd_table_mutex);
-
ret = register_blkdev(tr->major, tr->name);
if (ret < 0) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
- mutex_unlock(&mtd_table_mutex);
return ret;
}
@@ -542,12 +539,12 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
tr->blkshift = ffs(tr->blksize) - 1;
INIT_LIST_HEAD(&tr->devs);
- list_add(&tr->list, &blktrans_majors);
+ mutex_lock(&mtd_table_mutex);
+ list_add(&tr->list, &blktrans_majors);
mtd_for_each_device(mtd)
if (mtd->type != MTD_ABSENT)
tr->add_mtd(tr, mtd);
-
mutex_unlock(&mtd_table_mutex);
return 0;
}
@@ -564,8 +561,8 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
list_for_each_entry_safe(dev, next, &tr->devs, list)
tr->remove_dev(dev);
- unregister_blkdev(tr->major, tr->name);
mutex_unlock(&mtd_table_mutex);
+ unregister_blkdev(tr->major, tr->name);
BUG_ON(!list_empty(&tr->devs));
return 0;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9aaeadd53eb4..c8fd7f758938 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -96,6 +96,12 @@ static void mtd_release(struct device *dev)
device_destroy(&mtd_class, index + 1);
}
+#define MTD_DEVICE_ATTR_RO(name) \
+static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
+
+#define MTD_DEVICE_ATTR_RW(name) \
+static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
+
static ssize_t mtd_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -131,46 +137,45 @@ static ssize_t mtd_type_show(struct device *dev,
type = "unknown";
}
- return snprintf(buf, PAGE_SIZE, "%s\n", type);
+ return sysfs_emit(buf, "%s\n", type);
}
-static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
+MTD_DEVICE_ATTR_RO(type);
static ssize_t mtd_flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
+ return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
}
-static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
+MTD_DEVICE_ATTR_RO(flags);
static ssize_t mtd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- (unsigned long long)mtd->size);
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
}
-static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
+MTD_DEVICE_ATTR_RO(size);
static ssize_t mtd_erasesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
+ return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
}
-static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
+MTD_DEVICE_ATTR_RO(erasesize);
static ssize_t mtd_writesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
+ return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
}
-static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
+MTD_DEVICE_ATTR_RO(writesize);
static ssize_t mtd_subpagesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -178,55 +183,54 @@ static ssize_t mtd_subpagesize_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
- return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
+ return sysfs_emit(buf, "%u\n", subpagesize);
}
-static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
+MTD_DEVICE_ATTR_RO(subpagesize);
static ssize_t mtd_oobsize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
+ return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
}
-static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
+MTD_DEVICE_ATTR_RO(oobsize);
static ssize_t mtd_oobavail_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", mtd->oobavail);
+ return sysfs_emit(buf, "%u\n", mtd->oobavail);
}
-static DEVICE_ATTR(oobavail, S_IRUGO, mtd_oobavail_show, NULL);
+MTD_DEVICE_ATTR_RO(oobavail);
static ssize_t mtd_numeraseregions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
+ return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
}
-static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
- NULL);
+MTD_DEVICE_ATTR_RO(numeraseregions);
static ssize_t mtd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
+ return sysfs_emit(buf, "%s\n", mtd->name);
}
-static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
+MTD_DEVICE_ATTR_RO(name);
static ssize_t mtd_ecc_strength_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
+ return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
}
-static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
+MTD_DEVICE_ATTR_RO(ecc_strength);
static ssize_t mtd_bitflip_threshold_show(struct device *dev,
struct device_attribute *attr,
@@ -234,7 +238,7 @@ static ssize_t mtd_bitflip_threshold_show(struct device *dev,
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
+ return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
}
static ssize_t mtd_bitflip_threshold_store(struct device *dev,
@@ -252,60 +256,57 @@ static ssize_t mtd_bitflip_threshold_store(struct device *dev,
mtd->bitflip_threshold = bitflip_threshold;
return count;
}
-static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
- mtd_bitflip_threshold_show,
- mtd_bitflip_threshold_store);
+MTD_DEVICE_ATTR_RW(bitflip_threshold);
static ssize_t mtd_ecc_step_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
+ return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
}
-static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
+MTD_DEVICE_ATTR_RO(ecc_step_size);
-static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
+static ssize_t mtd_corrected_bits_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
- return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
+ return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
}
-static DEVICE_ATTR(corrected_bits, S_IRUGO,
- mtd_ecc_stats_corrected_show, NULL);
+MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */
-static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
+static ssize_t mtd_ecc_failures_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
- return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
+ return sysfs_emit(buf, "%u\n", ecc_stats->failed);
}
-static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
+MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */
-static ssize_t mtd_badblocks_show(struct device *dev,
+static ssize_t mtd_bad_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
- return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
+ return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
}
-static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
+MTD_DEVICE_ATTR_RO(bad_blocks);
-static ssize_t mtd_bbtblocks_show(struct device *dev,
+static ssize_t mtd_bbt_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
- return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
+ return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
}
-static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
+MTD_DEVICE_ATTR_RO(bbt_blocks);
static struct attribute *mtd_attrs[] = {
&dev_attr_type.attr,
@@ -361,6 +362,7 @@ static struct dentry *dfs_dir_mtd;
static void mtd_debugfs_populate(struct mtd_info *mtd)
{
+ struct mtd_info *master = mtd_get_master(mtd);
struct device *dev = &mtd->dev;
struct dentry *root;
@@ -370,12 +372,12 @@ static void mtd_debugfs_populate(struct mtd_info *mtd)
root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
mtd->dbg.dfs_dir = root;
- if (mtd->dbg.partid)
- debugfs_create_file("partid", 0400, root, mtd,
+ if (master->dbg.partid)
+ debugfs_create_file("partid", 0400, root, master,
&mtd_partid_debug_fops);
- if (mtd->dbg.partname)
- debugfs_create_file("partname", 0400, root, mtd,
+ if (master->dbg.partname)
+ debugfs_create_file("partname", 0400, root, master,
&mtd_partname_debug_fops);
}
@@ -777,6 +779,150 @@ static void mtd_set_dev_defaults(struct mtd_info *mtd)
mutex_init(&mtd->master.chrdev_lock);
}
+static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
+{
+ struct otp_info *info;
+ ssize_t size = 0;
+ unsigned int i;
+ size_t retlen;
+ int ret;
+
+ info = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if (is_user)
+ ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
+ else
+ ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < retlen / sizeof(*info); i++)
+ size += info[i].length;
+
+ kfree(info);
+ return size;
+
+err:
+ kfree(info);
+
+ /* ENODATA means there is no OTP region. */
+ return ret == -ENODATA ? 0 : ret;
+}
+
+static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
+ const char *compatible,
+ int size,
+ nvmem_reg_read_t reg_read)
+{
+ struct nvmem_device *nvmem = NULL;
+ struct nvmem_config config = {};
+ struct device_node *np;
+
+ /* DT binding is optional */
+ np = of_get_compatible_child(mtd->dev.of_node, compatible);
+
+ /* OTP nvmem will be registered on the physical device */
+ config.dev = mtd->dev.parent;
+ /* just reuse the compatible as name */
+ config.name = compatible;
+ config.id = NVMEM_DEVID_NONE;
+ config.owner = THIS_MODULE;
+ config.type = NVMEM_TYPE_OTP;
+ config.root_only = true;
+ config.reg_read = reg_read;
+ config.size = size;
+ config.of_node = np;
+ config.priv = mtd;
+
+ nvmem = nvmem_register(&config);
+ /* Just ignore if there is no NVMEM support in the kernel */
+ if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
+ nvmem = NULL;
+
+ of_node_put(np);
+
+ return nvmem;
+}
+
+static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct mtd_info *mtd = priv;
+ size_t retlen;
+ int ret;
+
+ ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
+ if (ret)
+ return ret;
+
+ return retlen == bytes ? 0 : -EIO;
+}
+
+static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct mtd_info *mtd = priv;
+ size_t retlen;
+ int ret;
+
+ ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
+ if (ret)
+ return ret;
+
+ return retlen == bytes ? 0 : -EIO;
+}
+
+static int mtd_otp_nvmem_add(struct mtd_info *mtd)
+{
+ struct nvmem_device *nvmem;
+ ssize_t size;
+ int err;
+
+ if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
+ size = mtd_otp_size(mtd, true);
+ if (size < 0)
+ return size;
+
+ if (size > 0) {
+ nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
+ mtd_nvmem_user_otp_reg_read);
+ if (IS_ERR(nvmem)) {
+ dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
+ return PTR_ERR(nvmem);
+ }
+ mtd->otp_user_nvmem = nvmem;
+ }
+ }
+
+ if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
+ size = mtd_otp_size(mtd, false);
+ if (size < 0) {
+ err = size;
+ goto err;
+ }
+
+ if (size > 0) {
+ nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
+ mtd_nvmem_fact_otp_reg_read);
+ if (IS_ERR(nvmem)) {
+ dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
+ err = PTR_ERR(nvmem);
+ goto err;
+ }
+ mtd->otp_factory_nvmem = nvmem;
+ }
+ }
+
+ return 0;
+
+err:
+ if (mtd->otp_user_nvmem)
+ nvmem_unregister(mtd->otp_user_nvmem);
+ return err;
+}
+
/**
* mtd_device_parse_register - parse partitions and register an MTD device.
*
@@ -852,6 +998,8 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
register_reboot_notifier(&mtd->reboot_notifier);
}
+ ret = mtd_otp_nvmem_add(mtd);
+
out:
if (ret && device_is_registered(&mtd->dev))
del_mtd_device(mtd);
@@ -873,6 +1021,12 @@ int mtd_device_unregister(struct mtd_info *master)
if (master->_reboot)
unregister_reboot_notifier(&master->reboot_notifier);
+ if (master->otp_user_nvmem)
+ nvmem_unregister(master->otp_user_nvmem);
+
+ if (master->otp_factory_nvmem)
+ nvmem_unregister(master->otp_factory_nvmem);
+
err = del_mtd_partitions(master);
if (err)
return err;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 862c4a889234..227df24387df 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -401,10 +401,8 @@ static int __init mtdoops_init(void)
cxt->mtd_index = mtd_index;
cxt->oops_buf = vmalloc(record_size);
- if (!cxt->oops_buf) {
- printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
+ if (!cxt->oops_buf)
return -ENOMEM;
- }
memset(cxt->oops_buf, 0xff, record_size);
cxt->oops_buf_busy = 0;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 665fd9020b76..04af12b66110 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -212,15 +212,14 @@ out_register:
return child;
}
-static ssize_t mtd_partition_offset_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%lld\n", mtd->part.offset);
+ return sysfs_emit(buf, "%lld\n", mtd->part.offset);
}
-
-static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
+static DEVICE_ATTR_RO(offset); /* mtd partition offset */
static const struct attribute *mtd_partition_attrs[] = {
&dev_attr_offset.attr,
diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c
index 044adf913854..64af6898131d 100644
--- a/drivers/mtd/nand/bbt.c
+++ b/drivers/mtd/nand/bbt.c
@@ -123,7 +123,7 @@ int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
unsigned int rbits = bits_per_block + offs - BITS_PER_LONG;
pos[1] &= ~GENMASK(rbits - 1, 0);
- pos[1] |= val >> rbits;
+ pos[1] |= val >> (bits_per_block - rbits);
}
return 0;
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 30f061939560..630728de4b7c 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -453,6 +453,14 @@ config MTD_NAND_ROCKCHIP
NFC v800: RK3308, RV1108
NFC v900: PX30, RK3326
+config MTD_NAND_PL35X
+ tristate "ARM PL35X NAND controller"
+ depends on OF || COMPILE_TEST
+ depends on PL353_SMC
+ help
+ Enables support for PrimeCell SMC PL351 and PL353 NAND
+ controller found on Zynq7000.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index d011c6c53f8f..2f97958c3a33 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o
obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o
obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o
obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
+obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 549aac00228e..9cbcc698c64d 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -53,6 +54,7 @@
#define PROG_RST BIT(8)
#define PROG_GET_FEATURE BIT(9)
#define PROG_SET_FEATURE BIT(10)
+#define PROG_CHG_RD_COL_ENH BIT(14)
#define INTR_STS_EN_REG 0x14
#define INTR_SIG_EN_REG 0x18
@@ -70,6 +72,15 @@
#define FLASH_STS_REG 0x28
+#define TIMING_REG 0x2C
+#define TCCS_TIME_500NS 0
+#define TCCS_TIME_300NS 3
+#define TCCS_TIME_200NS 2
+#define TCCS_TIME_100NS 1
+#define FAST_TCAD BIT(2)
+#define DQS_BUFF_SEL_IN(x) FIELD_PREP(GENMASK(6, 3), (x))
+#define DQS_BUFF_SEL_OUT(x) FIELD_PREP(GENMASK(18, 15), (x))
+
#define DATA_PORT_REG 0x30
#define ECC_CONF_REG 0x34
@@ -91,7 +102,7 @@
#define DATA_INTERFACE_REG 0x6C
#define DIFACE_SDR_MODE(x) FIELD_PREP(GENMASK(2, 0), (x))
-#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (X))
+#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (x))
#define DIFACE_SDR 0
#define DIFACE_NVDDR BIT(9)
@@ -107,6 +118,8 @@
#define ANFC_XLNX_SDR_DFLT_CORE_CLK 100000000
#define ANFC_XLNX_SDR_HS_CORE_CLK 80000000
+static struct gpio_desc *anfc_default_cs_array[2] = {NULL, NULL};
+
/**
* struct anfc_op - Defines how to execute an operation
* @pkt_reg: Packet register
@@ -137,11 +150,11 @@ struct anfc_op {
* struct anand - Defines the NAND chip related information
* @node: Used to store NAND chips into a list
* @chip: NAND chip information structure
- * @cs: Chip select line
* @rb: Ready-busy line
* @page_sz: Register value of the page_sz field to use
* @clk: Expected clock frequency to use
- * @timings: Data interface timing mode to use
+ * @data_iface: Data interface timing mode to use
+ * @timings: NV-DDR specific timings to use
* @ecc_conf: Hardware ECC configuration value
* @strength: Register value of the ECC strength
* @raddr_cycles: Row address cycle information
@@ -151,14 +164,17 @@ struct anfc_op {
* @errloc: Array of errors located with soft BCH
* @hw_ecc: Buffer to store syndromes computed by hardware
* @bch: BCH structure
+ * @cs_idx: Array of chip-select for this device, values are indexes
+ * of the controller structure @gpio_cs array
+ * @ncs_idx: Size of the @cs_idx array
*/
struct anand {
struct list_head node;
struct nand_chip chip;
- unsigned int cs;
unsigned int rb;
unsigned int page_sz;
unsigned long clk;
+ u32 data_iface;
u32 timings;
u32 ecc_conf;
u32 strength;
@@ -169,6 +185,8 @@ struct anand {
unsigned int *errloc;
u8 *hw_ecc;
struct bch_control *bch;
+ int *cs_idx;
+ int ncs_idx;
};
/**
@@ -179,8 +197,14 @@ struct anand {
* @bus_clk: Pointer to the flash clock
* @controller: Base controller structure
* @chips: List of all NAND chips attached to the controller
- * @assigned_cs: Bitmask describing already assigned CS lines
* @cur_clk: Current clock rate
+ * @cs_array: CS array. Native CS are left empty, the other cells are
+ * populated with their corresponding GPIO descriptor.
+ * @ncs: Size of @cs_array
+ * @cur_cs: Index in @cs_array of the currently in use CS
+ * @native_cs: Currently selected native CS
+ * @spare_cs: Native CS that is not wired (may be selected when a GPIO
+ * CS is in use)
*/
struct arasan_nfc {
struct device *dev;
@@ -189,8 +213,12 @@ struct arasan_nfc {
struct clk *bus_clk;
struct nand_controller controller;
struct list_head chips;
- unsigned long assigned_cs;
unsigned int cur_clk;
+ struct gpio_desc **cs_array;
+ unsigned int ncs;
+ int cur_cs;
+ unsigned int native_cs;
+ unsigned int spare_cs;
};
static struct anand *to_anand(struct nand_chip *nand)
@@ -273,6 +301,72 @@ static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
return 0;
}
+static bool anfc_is_gpio_cs(struct arasan_nfc *nfc, int nfc_cs)
+{
+ return nfc_cs >= 0 && nfc->cs_array[nfc_cs];
+}
+
+static int anfc_relative_to_absolute_cs(struct anand *anand, int num)
+{
+ return anand->cs_idx[num];
+}
+
+static void anfc_assert_cs(struct arasan_nfc *nfc, unsigned int nfc_cs_idx)
+{
+ /* CS did not change: do nothing */
+ if (nfc->cur_cs == nfc_cs_idx)
+ return;
+
+ /* Deassert the previous CS if it was a GPIO */
+ if (anfc_is_gpio_cs(nfc, nfc->cur_cs))
+ gpiod_set_value_cansleep(nfc->cs_array[nfc->cur_cs], 1);
+
+ /* Assert the new one */
+ if (anfc_is_gpio_cs(nfc, nfc_cs_idx)) {
+ nfc->native_cs = nfc->spare_cs;
+ gpiod_set_value_cansleep(nfc->cs_array[nfc_cs_idx], 0);
+ } else {
+ nfc->native_cs = nfc_cs_idx;
+ }
+
+ nfc->cur_cs = nfc_cs_idx;
+}
+
+static int anfc_select_target(struct nand_chip *chip, int target)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ unsigned int nfc_cs_idx = anfc_relative_to_absolute_cs(anand, target);
+ int ret;
+
+ anfc_assert_cs(nfc, nfc_cs_idx);
+
+ /* Update the controller timings and the potential ECC configuration */
+ writel_relaxed(anand->data_iface, nfc->base + DATA_INTERFACE_REG);
+ writel_relaxed(anand->timings, nfc->base + TIMING_REG);
+
+ /* Update clock frequency */
+ if (nfc->cur_clk != anand->clk) {
+ clk_disable_unprepare(nfc->controller_clk);
+ ret = clk_set_rate(nfc->controller_clk, anand->clk);
+ if (ret) {
+ dev_err(nfc->dev, "Failed to change clock rate\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(nfc->controller_clk);
+ if (ret) {
+ dev_err(nfc->dev,
+ "Failed to re-enable the controller clock\n");
+ return ret;
+ }
+
+ nfc->cur_clk = anand->clk;
+ }
+
+ return 0;
+}
+
/*
* When using the embedded hardware ECC engine, the controller is in charge of
* feeding the engine with, first, the ECC residue present in the data array.
@@ -315,7 +409,7 @@ static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
.addr2_reg =
((page >> 16) & 0xFF) |
ADDR2_STRENGTH(anand->strength) |
- ADDR2_CS(anand->cs),
+ ADDR2_CS(nfc->native_cs),
.cmd_reg =
CMD_1(NAND_CMD_READ0) |
CMD_2(NAND_CMD_READSTART) |
@@ -401,6 +495,18 @@ static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
return 0;
}
+static int anfc_sel_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = anfc_select_target(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return anfc_read_page_hw_ecc(chip, buf, oob_required, page);
+};
+
static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
@@ -420,7 +526,7 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
.addr2_reg =
((page >> 16) & 0xFF) |
ADDR2_STRENGTH(anand->strength) |
- ADDR2_CS(anand->cs),
+ ADDR2_CS(nfc->native_cs),
.cmd_reg =
CMD_1(NAND_CMD_SEQIN) |
CMD_2(NAND_CMD_PAGEPROG) |
@@ -461,11 +567,24 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
return ret;
}
+static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = anfc_select_target(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return anfc_write_page_hw_ecc(chip, buf, oob_required, page);
+};
+
/* NAND framework ->exec_op() hooks and related helpers */
static int anfc_parse_instructions(struct nand_chip *chip,
const struct nand_subop *subop,
struct anfc_op *nfc_op)
{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
struct anand *anand = to_anand(chip);
const struct nand_op_instr *instr = NULL;
bool first_cmd = true;
@@ -473,7 +592,7 @@ static int anfc_parse_instructions(struct nand_chip *chip,
int ret, i;
memset(nfc_op, 0, sizeof(*nfc_op));
- nfc_op->addr2_reg = ADDR2_CS(anand->cs);
+ nfc_op->addr2_reg = ADDR2_CS(nfc->native_cs);
nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz);
for (op_id = 0; op_id < subop->ninstrs; op_id++) {
@@ -622,7 +741,23 @@ static int anfc_param_read_type_exec(struct nand_chip *chip,
static int anfc_data_read_type_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
- return anfc_misc_data_type_exec(chip, subop, PROG_PGRD);
+ u32 prog_reg = PROG_PGRD;
+
+ /*
+ * Experience shows that while in SDR mode sending a CHANGE READ COLUMN
+ * command through the READ PAGE "type" always works fine, when in
+ * NV-DDR mode the same command simply fails. However, it was also
+ * spotted that any CHANGE READ COLUMN command sent through the CHANGE
+ * READ COLUMN ENHANCED "type" would correctly work in both cases (SDR
+ * and NV-DDR). So, for simplicity, let's program the controller with
+ * the CHANGE READ COLUMN ENHANCED "type" whenever we are requested to
+ * perform a CHANGE READ COLUMN operation.
+ */
+ if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_RNDOUT &&
+ subop->instrs[2].ctx.cmd.opcode == NAND_CMD_RNDOUTSTART)
+ prog_reg = PROG_CHG_RD_COL_ENH;
+
+ return anfc_misc_data_type_exec(chip, subop, prog_reg);
}
static int anfc_param_write_type_exec(struct nand_chip *chip,
@@ -753,37 +888,6 @@ static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
);
-static int anfc_select_target(struct nand_chip *chip, int target)
-{
- struct anand *anand = to_anand(chip);
- struct arasan_nfc *nfc = to_anfc(chip->controller);
- int ret;
-
- /* Update the controller timings and the potential ECC configuration */
- writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG);
-
- /* Update clock frequency */
- if (nfc->cur_clk != anand->clk) {
- clk_disable_unprepare(nfc->controller_clk);
- ret = clk_set_rate(nfc->controller_clk, anand->clk);
- if (ret) {
- dev_err(nfc->dev, "Failed to change clock rate\n");
- return ret;
- }
-
- ret = clk_prepare_enable(nfc->controller_clk);
- if (ret) {
- dev_err(nfc->dev,
- "Failed to re-enable the controller clock\n");
- return ret;
- }
-
- nfc->cur_clk = anand->clk;
- }
-
- return 0;
-}
-
static int anfc_check_op(struct nand_chip *chip,
const struct nand_operation *op)
{
@@ -861,21 +965,79 @@ static int anfc_setup_interface(struct nand_chip *chip, int target,
struct anand *anand = to_anand(chip);
struct arasan_nfc *nfc = to_anfc(chip->controller);
struct device_node *np = nfc->dev->of_node;
+ const struct nand_sdr_timings *sdr;
+ const struct nand_nvddr_timings *nvddr;
+ unsigned int tccs_min, dqs_mode, fast_tcad;
+
+ if (nand_interface_is_nvddr(conf)) {
+ nvddr = nand_get_nvddr_timings(conf);
+ if (IS_ERR(nvddr))
+ return PTR_ERR(nvddr);
+ } else {
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+ }
if (target < 0)
return 0;
- anand->timings = DIFACE_SDR | DIFACE_SDR_MODE(conf->timings.mode);
+ if (nand_interface_is_sdr(conf)) {
+ anand->data_iface = DIFACE_SDR |
+ DIFACE_SDR_MODE(conf->timings.mode);
+ anand->timings = 0;
+ } else {
+ anand->data_iface = DIFACE_NVDDR |
+ DIFACE_DDR_MODE(conf->timings.mode);
+
+ if (conf->timings.nvddr.tCCS_min <= 100000)
+ tccs_min = TCCS_TIME_100NS;
+ else if (conf->timings.nvddr.tCCS_min <= 200000)
+ tccs_min = TCCS_TIME_200NS;
+ else if (conf->timings.nvddr.tCCS_min <= 300000)
+ tccs_min = TCCS_TIME_300NS;
+ else
+ tccs_min = TCCS_TIME_500NS;
+
+ fast_tcad = 0;
+ if (conf->timings.nvddr.tCAD_min < 45000)
+ fast_tcad = FAST_TCAD;
+
+ switch (conf->timings.mode) {
+ case 5:
+ case 4:
+ dqs_mode = 2;
+ break;
+ case 3:
+ dqs_mode = 3;
+ break;
+ case 2:
+ dqs_mode = 4;
+ break;
+ case 1:
+ dqs_mode = 5;
+ break;
+ case 0:
+ default:
+ dqs_mode = 6;
+ break;
+ }
+
+ anand->timings = tccs_min | fast_tcad |
+ DQS_BUFF_SEL_IN(dqs_mode) |
+ DQS_BUFF_SEL_OUT(dqs_mode);
+ }
+
anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
/*
* Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
* with f > 90MHz (default clock is 100MHz) but signals are unstable
* with higher modes. Hence we decrease a little bit the clock rate to
- * 80MHz when using modes 2-5 with this SoC.
+ * 80MHz when using SDR modes 2-5 with this SoC.
*/
if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") &&
- conf->timings.mode >= 2)
+ nand_interface_is_sdr(conf) && conf->timings.mode >= 2)
anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK;
return 0;
@@ -1007,8 +1169,8 @@ static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
if (!anand->bch)
return -EINVAL;
- ecc->read_page = anfc_read_page_hw_ecc;
- ecc->write_page = anfc_write_page_hw_ecc;
+ ecc->read_page = anfc_sel_read_page_hw_ecc;
+ ecc->write_page = anfc_sel_write_page_hw_ecc;
return 0;
}
@@ -1094,37 +1256,43 @@ static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np)
struct anand *anand;
struct nand_chip *chip;
struct mtd_info *mtd;
- int cs, rb, ret;
+ int rb, ret, i;
anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL);
if (!anand)
return -ENOMEM;
- /* We do not support multiple CS per chip yet */
- if (of_property_count_elems_of_size(np, "reg", sizeof(u32)) != 1) {
+ /* Chip-select init */
+ anand->ncs_idx = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (anand->ncs_idx <= 0 || anand->ncs_idx > nfc->ncs) {
dev_err(nfc->dev, "Invalid reg property\n");
return -EINVAL;
}
- ret = of_property_read_u32(np, "reg", &cs);
- if (ret)
- return ret;
+ anand->cs_idx = devm_kcalloc(nfc->dev, anand->ncs_idx,
+ sizeof(*anand->cs_idx), GFP_KERNEL);
+ if (!anand->cs_idx)
+ return -ENOMEM;
+ for (i = 0; i < anand->ncs_idx; i++) {
+ ret = of_property_read_u32_index(np, "reg", i,
+ &anand->cs_idx[i]);
+ if (ret) {
+ dev_err(nfc->dev, "invalid CS property: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* Ready-busy init */
ret = of_property_read_u32(np, "nand-rb", &rb);
if (ret)
return ret;
- if (cs >= ANFC_MAX_CS || rb >= ANFC_MAX_CS) {
- dev_err(nfc->dev, "Wrong CS %d or RB %d\n", cs, rb);
- return -EINVAL;
- }
-
- if (test_and_set_bit(cs, &nfc->assigned_cs)) {
- dev_err(nfc->dev, "Already assigned CS %d\n", cs);
+ if (rb >= ANFC_MAX_CS) {
+ dev_err(nfc->dev, "Wrong RB %d\n", rb);
return -EINVAL;
}
- anand->cs = cs;
anand->rb = rb;
chip = &anand->chip;
@@ -1140,7 +1308,7 @@ static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np)
return -EINVAL;
}
- ret = nand_scan(chip, 1);
+ ret = nand_scan(chip, anand->ncs_idx);
if (ret) {
dev_err(nfc->dev, "Scan operation failed\n");
return ret;
@@ -1178,7 +1346,7 @@ static int anfc_chips_init(struct arasan_nfc *nfc)
int nchips = of_get_child_count(np);
int ret;
- if (!nchips || nchips > ANFC_MAX_CS) {
+ if (!nchips) {
dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
nchips);
return -EINVAL;
@@ -1203,6 +1371,47 @@ static void anfc_reset(struct arasan_nfc *nfc)
/* Enable interrupt status */
writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG);
+
+ nfc->cur_cs = -1;
+}
+
+static int anfc_parse_cs(struct arasan_nfc *nfc)
+{
+ int ret;
+
+ /* Check the gpio-cs property */
+ ret = rawnand_dt_parse_gpio_cs(nfc->dev, &nfc->cs_array, &nfc->ncs);
+ if (ret)
+ return ret;
+
+ /*
+ * The controller native CS cannot be both disabled at the same time.
+ * Hence, only one native CS can be used if GPIO CS are needed, so that
+ * the other is selected when a non-native CS must be asserted (not
+ * wired physically or configured as GPIO instead of NAND CS). In this
+ * case, the "not" chosen CS is assigned to nfc->spare_cs and selected
+ * whenever a GPIO CS must be asserted.
+ */
+ if (nfc->cs_array && nfc->ncs > 2) {
+ if (!nfc->cs_array[0] && !nfc->cs_array[1]) {
+ dev_err(nfc->dev,
+ "Assign a single native CS when using GPIOs\n");
+ return -EINVAL;
+ }
+
+ if (nfc->cs_array[0])
+ nfc->spare_cs = 0;
+ else
+ nfc->spare_cs = 1;
+ }
+
+ if (!nfc->cs_array) {
+ nfc->cs_array = anfc_default_cs_array;
+ nfc->ncs = ANFC_MAX_CS;
+ return 0;
+ }
+
+ return 0;
}
static int anfc_probe(struct platform_device *pdev)
@@ -1241,6 +1450,14 @@ static int anfc_probe(struct platform_device *pdev)
if (ret)
goto disable_controller_clk;
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret)
+ goto disable_bus_clk;
+
+ ret = anfc_parse_cs(nfc);
+ if (ret)
+ goto disable_bus_clk;
+
ret = anfc_chips_init(nfc);
if (ret)
goto disable_bus_clk;
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 8aab1017b460..f3276ee9e4fe 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1246,7 +1246,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
nc = to_nand_controller(nand->base.controller);
/* DDR interface not supported. */
- if (conf->type != NAND_SDR_IFACE)
+ if (!nand_interface_is_sdr(conf))
return -ENOTSUPP;
/*
@@ -1524,8 +1524,13 @@ static int atmel_nand_setup_interface(struct nand_chip *chip, int csline,
const struct nand_interface_config *conf)
{
struct atmel_nand *nand = to_atmel_nand(chip);
+ const struct nand_sdr_timings *sdr;
struct atmel_nand_controller *nc;
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
nc = to_nand_controller(nand->base.controller);
if (csline >= nand->numcs ||
@@ -1629,10 +1634,8 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
}
nand = devm_kzalloc(nc->dev, struct_size(nand, cs, numcs), GFP_KERNEL);
- if (!nand) {
- dev_err(nc->dev, "Failed to allocate NAND object\n");
+ if (!nand)
return ERR_PTR(-ENOMEM);
- }
nand->numcs = numcs;
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index b46786cd53e0..7eec60ea9056 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -2348,9 +2348,9 @@ cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
* for tRP and tRH timings. If it is NOT possible to sample data
* with optimal tRP/tRH settings, the parameters will be extended.
* If clk_period is 50ns (the lowest value) this condition is met
- * for asynchronous timing modes 1, 2, 3, 4 and 5.
- * If clk_period is 20ns the condition is met only
- * for asynchronous timing mode 5.
+ * for SDR timing modes 1, 2, 3, 4 and 5.
+ * If clk_period is 20ns the condition is met only for SDR timing
+ * mode 5.
*/
if (sdr->tRC_min <= clk_period &&
sdr->tRP_min <= (clk_period / 2) &&
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
index fdc5ed7de083..5e1c3ddae5f8 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
@@ -79,7 +79,7 @@ enum gpmi_type {
struct gpmi_devdata {
enum gpmi_type type;
int bch_max_ecc_strength;
- int max_chain_delay; /* See the async EDO mode */
+ int max_chain_delay; /* See the SDR EDO mode */
const char * const *clks;
const int clks_count;
};
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index 8b2122ce6ec3..78c4e05434e2 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -761,10 +761,8 @@ static int hisi_nfc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
host->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR(host->mmio)) {
- dev_err(dev, "devm_ioremap_resource[1] fail\n");
+ if (IS_ERR(host->mmio))
return PTR_ERR(host->mmio);
- }
mtd->name = "hisi_nand";
mtd->dev.parent = &pdev->dev;
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
index 012876e14317..7016e0f38398 100644
--- a/drivers/mtd/nand/raw/internals.h
+++ b/drivers/mtd/nand/raw/internals.h
@@ -90,9 +90,14 @@ void onfi_fill_interface_config(struct nand_chip *chip,
unsigned int timing_mode);
unsigned int
onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings);
+unsigned int
+onfi_find_closest_nvddr_mode(const struct nand_nvddr_timings *spec_timings);
int nand_choose_best_sdr_timings(struct nand_chip *chip,
struct nand_interface_config *iface,
struct nand_sdr_timings *spec_timings);
+int nand_choose_best_nvddr_timings(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ struct nand_nvddr_timings *spec_timings);
const struct nand_interface_config *nand_get_reset_interface_config(void);
int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 79da6b02e209..2455a581fd70 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -451,7 +451,7 @@ struct marvell_nfc_timings {
};
/**
- * Derives a duration in numbers of clock cycles.
+ * TO_CYCLES() - Derives a duration in numbers of clock cycles.
*
* @ps: Duration in pico-seconds
* @period_ns: Clock period in nano-seconds
@@ -3030,8 +3030,10 @@ static int __maybe_unused marvell_nfc_resume(struct device *dev)
return ret;
ret = clk_prepare_enable(nfc->reg_clk);
- if (ret < 0)
+ if (ret < 0) {
+ clk_disable_unprepare(nfc->core_clk);
return ret;
+ }
/*
* Reset nfc->selected_chip so the next command will cause the timing
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index 75f1fa3d4d35..c437d97debb8 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -515,10 +515,8 @@ static int mtk_ecc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ecc->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(ecc->regs)) {
- dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
+ if (IS_ERR(ecc->regs))
return PTR_ERR(ecc->regs);
- }
ecc->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ecc->clk)) {
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index fb072c444495..3d6c6e880520 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -42,6 +42,7 @@
#include <linux/io.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include "internals.h"
@@ -647,7 +648,7 @@ static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
{
- const struct nand_sdr_timings *timings;
+ const struct nand_interface_config *conf;
u8 status = 0;
int ret;
@@ -655,8 +656,8 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
return -ENOTSUPP;
/* Wait tWB before polling the STATUS reg. */
- timings = nand_get_sdr_timings(nand_get_interface_config(chip));
- ndelay(PSEC_TO_NSEC(timings->tWB_max));
+ conf = nand_get_interface_config(chip);
+ ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max));
ret = nand_status_op(chip, NULL);
if (ret)
@@ -832,7 +833,7 @@ static int nand_reset_interface(struct nand_chip *chip, int chipnr)
static int nand_setup_interface(struct nand_chip *chip, int chipnr)
{
const struct nand_controller_ops *ops = chip->controller->ops;
- u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { };
+ u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request;
int ret;
if (!nand_controller_can_setup_interface(chip))
@@ -848,7 +849,12 @@ static int nand_setup_interface(struct nand_chip *chip, int chipnr)
if (!chip->best_interface_config)
return 0;
- tmode_param[0] = chip->best_interface_config->timings.mode;
+ request = chip->best_interface_config->timings.mode;
+ if (nand_interface_is_sdr(chip->best_interface_config))
+ request |= ONFI_DATA_INTERFACE_SDR;
+ else
+ request |= ONFI_DATA_INTERFACE_NVDDR;
+ tmode_param[0] = request;
/* Change the mode on the chip side (if supported by the NAND chip) */
if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
@@ -877,9 +883,13 @@ static int nand_setup_interface(struct nand_chip *chip, int chipnr)
if (ret)
goto err_reset_chip;
- if (tmode_param[0] != chip->best_interface_config->timings.mode) {
- pr_warn("timing mode %d not acknowledged by the NAND chip\n",
+ if (request != tmode_param[0]) {
+ pr_warn("%s timing mode %d not acknowledged by the NAND chip\n",
+ nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR",
chip->best_interface_config->timings.mode);
+ pr_debug("NAND chip would work in %s timing mode %d\n",
+ tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR",
+ (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0]));
goto err_reset_chip;
}
@@ -935,7 +945,7 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip,
/* Fallback to slower modes */
best_mode = iface->timings.mode;
} else if (chip->parameters.onfi) {
- best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1;
+ best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1;
}
for (mode = best_mode; mode >= 0; mode--) {
@@ -943,13 +953,87 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip,
ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
iface);
- if (!ret)
+ if (!ret) {
+ chip->best_interface_config = iface;
break;
+ }
}
- chip->best_interface_config = iface;
+ return ret;
+}
- return 0;
+/**
+ * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the
+ * NAND controller and the NAND chip support
+ * @chip: the NAND chip
+ * @iface: the interface configuration (can eventually be updated)
+ * @spec_timings: specific timings, when not fitting the ONFI specification
+ *
+ * If specific timings are provided, use them. Otherwise, retrieve supported
+ * timing modes from ONFI information.
+ */
+int nand_choose_best_nvddr_timings(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ struct nand_nvddr_timings *spec_timings)
+{
+ const struct nand_controller_ops *ops = chip->controller->ops;
+ int best_mode = 0, mode, ret;
+
+ iface->type = NAND_NVDDR_IFACE;
+
+ if (spec_timings) {
+ iface->timings.nvddr = *spec_timings;
+ iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings);
+
+ /* Verify the controller supports the requested interface */
+ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+ iface);
+ if (!ret) {
+ chip->best_interface_config = iface;
+ return ret;
+ }
+
+ /* Fallback to slower modes */
+ best_mode = iface->timings.mode;
+ } else if (chip->parameters.onfi) {
+ best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1;
+ }
+
+ for (mode = best_mode; mode >= 0; mode--) {
+ onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode);
+
+ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+ iface);
+ if (!ret) {
+ chip->best_interface_config = iface;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both
+ * NAND controller and the NAND chip support
+ * @chip: the NAND chip
+ * @iface: the interface configuration (can eventually be updated)
+ *
+ * If specific timings are provided, use them. Otherwise, retrieve supported
+ * timing modes from ONFI information.
+ */
+static int nand_choose_best_timings(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ int ret;
+
+ /* Try the fastest timings: NV-DDR */
+ ret = nand_choose_best_nvddr_timings(chip, iface, NULL);
+ if (!ret)
+ return 0;
+
+ /* Fallback to SDR timings otherwise */
+ return nand_choose_best_sdr_timings(chip, iface, NULL);
}
/**
@@ -980,7 +1064,7 @@ static int nand_choose_interface_config(struct nand_chip *chip)
if (chip->ops.choose_interface_config)
ret = chip->ops.choose_interface_config(chip, iface);
else
- ret = nand_choose_best_sdr_timings(chip, iface, NULL);
+ ret = nand_choose_best_timings(chip, iface);
if (ret)
kfree(iface);
@@ -1046,15 +1130,15 @@ static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf,
unsigned int len)
{
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
u8 addrs[4];
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READ0, 0),
- NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
- NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
- PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+ NAND_COMMON_TIMING_NS(conf, tRR_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
@@ -1089,15 +1173,15 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf,
unsigned int len)
{
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
u8 addrs[5];
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READ0, 0),
NAND_OP_ADDR(4, addrs, 0),
- NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
- NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
- PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+ NAND_COMMON_TIMING_NS(conf, tRR_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
@@ -1186,13 +1270,14 @@ int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
return -EINVAL;
if (nand_has_exec_op(chip)) {
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_PARAM, 0),
- NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
- NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
- PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_ADDR(1, &page,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+ NAND_COMMON_TIMING_NS(conf, tRR_min)),
NAND_OP_8BIT_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
@@ -1241,14 +1326,14 @@ int nand_change_read_column_op(struct nand_chip *chip,
return -ENOTSUPP;
if (nand_has_exec_op(chip)) {
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
u8 addrs[2] = {};
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
NAND_OP_ADDR(2, addrs, 0),
NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
- PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_COMMON_TIMING_NS(conf, tCCS_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
@@ -1316,8 +1401,8 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, const void *buf,
unsigned int len, bool prog)
{
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
u8 addrs[5] = {};
struct nand_op_instr instrs[] = {
@@ -1328,10 +1413,11 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
*/
NAND_OP_CMD(NAND_CMD_READ0, 0),
NAND_OP_CMD(NAND_CMD_SEQIN, 0),
- NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)),
NAND_OP_DATA_OUT(len, buf, 0),
- NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
- NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ NAND_OP_CMD(NAND_CMD_PAGEPROG,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
@@ -1430,12 +1516,13 @@ int nand_prog_page_end_op(struct nand_chip *chip)
u8 status;
if (nand_has_exec_op(chip)) {
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_PAGEPROG,
- PSEC_TO_NSEC(sdr->tWB_max)),
- NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max),
+ 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
@@ -1548,12 +1635,12 @@ int nand_change_write_column_op(struct nand_chip *chip,
return -ENOTSUPP;
if (nand_has_exec_op(chip)) {
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
u8 addrs[2];
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_RNDIN, 0),
- NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)),
NAND_OP_DATA_OUT(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
@@ -1597,26 +1684,46 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
unsigned int len)
{
unsigned int i;
- u8 *id = buf;
+ u8 *id = buf, *ddrbuf = NULL;
if (len && !buf)
return -EINVAL;
if (nand_has_exec_op(chip)) {
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READID, 0),
- NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_ADDR(1, &addr,
+ NAND_COMMON_TIMING_NS(conf, tADL_min)),
NAND_OP_8BIT_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ /* READ_ID data bytes are received twice in NV-DDR mode */
+ if (len && nand_interface_is_nvddr(conf)) {
+ ddrbuf = kzalloc(len * 2, GFP_KERNEL);
+ if (!ddrbuf)
+ return -ENOMEM;
+
+ instrs[2].ctx.data.len *= 2;
+ instrs[2].ctx.data.buf.in = ddrbuf;
+ }
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
op.ninstrs--;
- return nand_exec_op(chip, &op);
+ ret = nand_exec_op(chip, &op);
+ if (!ret && len && nand_interface_is_nvddr(conf)) {
+ for (i = 0; i < len; i++)
+ id[i] = ddrbuf[i * 2];
+ }
+
+ kfree(ddrbuf);
+
+ return ret;
}
chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
@@ -1642,19 +1749,31 @@ EXPORT_SYMBOL_GPL(nand_readid_op);
int nand_status_op(struct nand_chip *chip, u8 *status)
{
if (nand_has_exec_op(chip)) {
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ u8 ddrstatus[2];
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_STATUS,
- PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_COMMON_TIMING_NS(conf, tADL_min)),
NAND_OP_8BIT_DATA_IN(1, status, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ /* The status data byte will be received twice in NV-DDR mode */
+ if (status && nand_interface_is_nvddr(conf)) {
+ instrs[1].ctx.data.len *= 2;
+ instrs[1].ctx.data.buf.in = ddrstatus;
+ }
if (!status)
op.ninstrs--;
- return nand_exec_op(chip, &op);
+ ret = nand_exec_op(chip, &op);
+ if (!ret && status && nand_interface_is_nvddr(conf))
+ *status = ddrstatus[0];
+
+ return ret;
}
chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
@@ -1711,15 +1830,16 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
u8 status;
if (nand_has_exec_op(chip)) {
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
u8 addrs[3] = { page, page >> 8, page >> 16 };
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_ERASE1, 0),
NAND_OP_ADDR(2, addrs, 0),
NAND_OP_CMD(NAND_CMD_ERASE2,
- PSEC_TO_MSEC(sdr->tWB_max)),
- NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
+ NAND_COMMON_TIMING_MS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
+ 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
@@ -1770,14 +1890,17 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
int i, ret;
if (nand_has_exec_op(chip)) {
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
- NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf,
+ tADL_min)),
NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
- PSEC_TO_NSEC(sdr->tWB_max)),
- NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
+ NAND_COMMON_TIMING_NS(conf,
+ tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
+ 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
@@ -1813,23 +1936,37 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
static int nand_get_features_op(struct nand_chip *chip, u8 feature,
void *data)
{
- u8 *params = data;
+ u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2];
int i;
if (nand_has_exec_op(chip)) {
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
- NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
- NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
- PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_ADDR(1, &feature,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
+ NAND_COMMON_TIMING_NS(conf, tRR_min)),
NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
data, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
- return nand_exec_op(chip, &op);
+ /* GET_FEATURE data bytes are received twice in NV-DDR mode */
+ if (nand_interface_is_nvddr(conf)) {
+ instrs[3].ctx.data.len *= 2;
+ instrs[3].ctx.data.buf.in = ddrbuf;
+ }
+
+ ret = nand_exec_op(chip, &op);
+ if (nand_interface_is_nvddr(conf)) {
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++)
+ params[i] = ddrbuf[i * 2];
+ }
+
+ return ret;
}
chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
@@ -1874,11 +2011,13 @@ static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
int nand_reset_op(struct nand_chip *chip)
{
if (nand_has_exec_op(chip)) {
- const struct nand_sdr_timings *sdr =
- nand_get_sdr_timings(nand_get_interface_config(chip));
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
- NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
- NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
+ NAND_OP_CMD(NAND_CMD_RESET,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max),
+ 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
@@ -1913,17 +2052,50 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
return -EINVAL;
if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ u8 *ddrbuf = NULL;
+ int ret, i;
instrs[0].ctx.data.force_8bit = force_8bit;
- if (check_only)
- return nand_check_op(chip, &op);
+ /*
+ * Parameter payloads (ID, status, features, etc) do not go
+ * through the same pipeline as regular data, hence the
+ * force_8bit flag must be set and this also indicates that in
+ * case NV-DDR timings are being used the data will be received
+ * twice.
+ */
+ if (force_8bit && nand_interface_is_nvddr(conf)) {
+ ddrbuf = kzalloc(len * 2, GFP_KERNEL);
+ if (!ddrbuf)
+ return -ENOMEM;
- return nand_exec_op(chip, &op);
+ instrs[0].ctx.data.len *= 2;
+ instrs[0].ctx.data.buf.in = ddrbuf;
+ }
+
+ if (check_only) {
+ ret = nand_check_op(chip, &op);
+ kfree(ddrbuf);
+ return ret;
+ }
+
+ ret = nand_exec_op(chip, &op);
+ if (!ret && force_8bit && nand_interface_is_nvddr(conf)) {
+ u8 *dst = buf;
+
+ for (i = 0; i < len; i++)
+ dst[i] = ddrbuf[i * 2];
+ }
+
+ kfree(ddrbuf);
+
+ return ret;
}
if (check_only)
@@ -3136,13 +3308,13 @@ static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
static void nand_wait_readrdy(struct nand_chip *chip)
{
- const struct nand_sdr_timings *sdr;
+ const struct nand_interface_config *conf;
if (!(chip->options & NAND_NEED_READRDY))
return;
- sdr = nand_get_sdr_timings(nand_get_interface_config(chip));
- WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
+ conf = nand_get_interface_config(chip);
+ WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0));
}
/**
@@ -5056,12 +5228,18 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np)
static int of_get_nand_secure_regions(struct nand_chip *chip)
{
struct device_node *dn = nand_get_flash_node(chip);
+ struct property *prop;
int nr_elem, i, j;
- nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
- if (!nr_elem)
+ /* Only proceed if the "secure-regions" property is present in DT */
+ prop = of_find_property(dn, "secure-regions", NULL);
+ if (!prop)
return 0;
+ nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
+ if (nr_elem <= 0)
+ return nr_elem;
+
chip->nr_secure_regions = nr_elem / 2;
chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
GFP_KERNEL);
@@ -5078,6 +5256,44 @@ static int of_get_nand_secure_regions(struct nand_chip *chip)
return 0;
}
+/**
+ * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller
+ * @dev: Device that will be parsed. Also used for managed allocations.
+ * @cs_array: Array of GPIO desc pointers allocated on success
+ * @ncs_array: Number of entries in @cs_array updated on success.
+ * @return 0 on success, an error otherwise.
+ */
+int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
+ unsigned int *ncs_array)
+{
+ struct device_node *np = dev->of_node;
+ struct gpio_desc **descs;
+ int ndescs, i;
+
+ ndescs = of_gpio_named_count(np, "cs-gpios");
+ if (ndescs < 0) {
+ dev_dbg(dev, "No valid cs-gpios property\n");
+ return 0;
+ }
+
+ descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL);
+ if (!descs)
+ return -ENOMEM;
+
+ for (i = 0; i < ndescs; i++) {
+ descs[i] = gpiod_get_index_optional(dev, "cs", i,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(descs[i]))
+ return PTR_ERR(descs[i]);
+ }
+
+ *ncs_array = ndescs;
+ *cs_array = descs;
+
+ return 0;
+}
+EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs);
+
static int rawnand_dt_init(struct nand_chip *chip)
{
struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
index eccc18b266d5..743792edf98d 100644
--- a/drivers/mtd/nand/raw/nand_legacy.c
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -369,7 +369,7 @@ static void nand_ccs_delay(struct nand_chip *chip)
* Wait tCCS_min if it is correctly defined, otherwise wait 500ns
* (which should be safe for all NANDs).
*/
- if (nand_controller_can_setup_interface(chip))
+ if (!IS_ERR(sdr) && nand_controller_can_setup_interface(chip))
ndelay(sdr->tCCS_min / 1000);
else
ndelay(500);
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index 45649e03797d..7586befce7f9 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -315,7 +315,10 @@ int nand_onfi_detect(struct nand_chip *chip)
onfi->tBERS = le16_to_cpu(p->t_bers);
onfi->tR = le16_to_cpu(p->t_r);
onfi->tCCS = le16_to_cpu(p->t_ccs);
- onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
+ onfi->fast_tCAD = le16_to_cpu(p->nvddr_nvddr2_features) & BIT(0);
+ onfi->sdr_timing_modes = le16_to_cpu(p->sdr_timing_modes);
+ if (le16_to_cpu(p->features) & ONFI_FEATURE_NV_DDR)
+ onfi->nvddr_timing_modes = le16_to_cpu(p->nvddr_timing_modes);
onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
chip->parameters.onfi = onfi;
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
index 94d832646487..7b41afc372d2 100644
--- a/drivers/mtd/nand/raw/nand_timings.c
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -292,6 +292,261 @@ static const struct nand_interface_config onfi_sdr_timings[] = {
},
};
+static const struct nand_interface_config onfi_nvddr_timings[] = {
+ /* Mode 0 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 0,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 10000,
+ .tCALH_min = 10000,
+ .tCALS_min = 10000,
+ .tCAS_min = 10000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCK_min = 50000,
+ .tCS_min = 35000,
+ .tDH_min = 5000,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 5000,
+ .tDS_min = 5000,
+ .tDSC_min = 50000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 6000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 1 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 1,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 5000,
+ .tCALH_min = 5000,
+ .tCALS_min = 5000,
+ .tCAS_min = 5000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCK_min = 30000,
+ .tCS_min = 25000,
+ .tDH_min = 2500,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 2500,
+ .tDS_min = 3000,
+ .tDSC_min = 30000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 3000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 2 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 2,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 4000,
+ .tCALH_min = 4000,
+ .tCALS_min = 4000,
+ .tCAS_min = 4000,
+ .tCEH_min = 20000,
+ .tCH_min = 4000,
+ .tCK_min = 20000,
+ .tCS_min = 15000,
+ .tDH_min = 1700,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 1700,
+ .tDS_min = 2000,
+ .tDSC_min = 20000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 2000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 3 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 3,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 3000,
+ .tCALH_min = 3000,
+ .tCALS_min = 3000,
+ .tCAS_min = 3000,
+ .tCEH_min = 20000,
+ .tCH_min = 3000,
+ .tCK_min = 15000,
+ .tCS_min = 15000,
+ .tDH_min = 1300,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 1300,
+ .tDS_min = 1500,
+ .tDSC_min = 15000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 1500,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 4 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 4,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 2500,
+ .tCALH_min = 2500,
+ .tCALS_min = 2500,
+ .tCAS_min = 2500,
+ .tCEH_min = 20000,
+ .tCH_min = 2500,
+ .tCK_min = 12000,
+ .tCS_min = 15000,
+ .tDH_min = 1100,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 1000,
+ .tDS_min = 1100,
+ .tDSC_min = 12000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 1200,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 5 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 5,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 2000,
+ .tCALH_min = 2000,
+ .tCALS_min = 2000,
+ .tCAS_min = 2000,
+ .tCEH_min = 20000,
+ .tCH_min = 2000,
+ .tCK_min = 10000,
+ .tCS_min = 15000,
+ .tDH_min = 900,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 850,
+ .tDS_min = 900,
+ .tDSC_min = 10000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 1000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+};
+
/* All NAND chips share the same reset data interface: SDR mode 0 */
const struct nand_interface_config *nand_get_reset_interface_config(void)
{
@@ -346,23 +601,60 @@ onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings)
}
/**
- * onfi_fill_interface_config - Initialize an interface config from a given
- * ONFI mode
+ * onfi_find_closest_nvddr_mode - Derive the closest ONFI NVDDR timing mode
+ * given a set of timings
+ * @spec_timings: the timings to challenge
+ */
+unsigned int
+onfi_find_closest_nvddr_mode(const struct nand_nvddr_timings *spec_timings)
+{
+ const struct nand_nvddr_timings *onfi_timings;
+ int mode;
+
+ for (mode = ARRAY_SIZE(onfi_nvddr_timings) - 1; mode > 0; mode--) {
+ onfi_timings = &onfi_nvddr_timings[mode].timings.nvddr;
+
+ if (spec_timings->tCCS_min <= onfi_timings->tCCS_min &&
+ spec_timings->tAC_min <= onfi_timings->tAC_min &&
+ spec_timings->tADL_min <= onfi_timings->tADL_min &&
+ spec_timings->tCAD_min <= onfi_timings->tCAD_min &&
+ spec_timings->tCAH_min <= onfi_timings->tCAH_min &&
+ spec_timings->tCALH_min <= onfi_timings->tCALH_min &&
+ spec_timings->tCALS_min <= onfi_timings->tCALS_min &&
+ spec_timings->tCAS_min <= onfi_timings->tCAS_min &&
+ spec_timings->tCEH_min <= onfi_timings->tCEH_min &&
+ spec_timings->tCH_min <= onfi_timings->tCH_min &&
+ spec_timings->tCK_min <= onfi_timings->tCK_min &&
+ spec_timings->tCS_min <= onfi_timings->tCS_min &&
+ spec_timings->tDH_min <= onfi_timings->tDH_min &&
+ spec_timings->tDQSCK_min <= onfi_timings->tDQSCK_min &&
+ spec_timings->tDQSD_min <= onfi_timings->tDQSD_min &&
+ spec_timings->tDS_min <= onfi_timings->tDS_min &&
+ spec_timings->tDSC_min <= onfi_timings->tDSC_min &&
+ spec_timings->tRHW_min <= onfi_timings->tRHW_min &&
+ spec_timings->tRR_min <= onfi_timings->tRR_min &&
+ spec_timings->tWHR_min <= onfi_timings->tWHR_min &&
+ spec_timings->tWRCK_min <= onfi_timings->tWRCK_min &&
+ spec_timings->tWW_min <= onfi_timings->tWW_min)
+ return mode;
+ }
+
+ return 0;
+}
+
+/*
+ * onfi_fill_sdr_interface_config - Initialize a SDR interface config from a
+ * given ONFI mode
* @chip: The NAND chip
* @iface: The interface configuration to fill
- * @type: The interface type
* @timing_mode: The ONFI timing mode
*/
-void onfi_fill_interface_config(struct nand_chip *chip,
- struct nand_interface_config *iface,
- enum nand_interface_type type,
- unsigned int timing_mode)
+static void onfi_fill_sdr_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ unsigned int timing_mode)
{
struct onfi_params *onfi = chip->parameters.onfi;
- if (WARN_ON(type != NAND_SDR_IFACE))
- return;
-
if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_sdr_timings)))
return;
@@ -385,3 +677,61 @@ void onfi_fill_interface_config(struct nand_chip *chip,
timings->tCCS_min = 1000UL * onfi->tCCS;
}
}
+
+/**
+ * onfi_fill_nvddr_interface_config - Initialize a NVDDR interface config from a
+ * given ONFI mode
+ * @chip: The NAND chip
+ * @iface: The interface configuration to fill
+ * @timing_mode: The ONFI timing mode
+ */
+static void onfi_fill_nvddr_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ unsigned int timing_mode)
+{
+ struct onfi_params *onfi = chip->parameters.onfi;
+
+ if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_nvddr_timings)))
+ return;
+
+ *iface = onfi_nvddr_timings[timing_mode];
+
+ /*
+ * Initialize timings that cannot be deduced from timing mode:
+ * tPROG, tBERS, tR, tCCS and tCAD.
+ * These information are part of the ONFI parameter page.
+ */
+ if (onfi) {
+ struct nand_nvddr_timings *timings = &iface->timings.nvddr;
+
+ /* microseconds -> picoseconds */
+ timings->tPROG_max = 1000000ULL * onfi->tPROG;
+ timings->tBERS_max = 1000000ULL * onfi->tBERS;
+ timings->tR_max = 1000000ULL * onfi->tR;
+
+ /* nanoseconds -> picoseconds */
+ timings->tCCS_min = 1000UL * onfi->tCCS;
+
+ if (onfi->fast_tCAD)
+ timings->tCAD_min = 25000;
+ }
+}
+
+/**
+ * onfi_fill_interface_config - Initialize an interface config from a given
+ * ONFI mode
+ * @chip: The NAND chip
+ * @iface: The interface configuration to fill
+ * @type: The interface type
+ * @timing_mode: The ONFI timing mode
+ */
+void onfi_fill_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ enum nand_interface_type type,
+ unsigned int timing_mode)
+{
+ if (type == NAND_SDR_IFACE)
+ return onfi_fill_sdr_interface_config(chip, iface, timing_mode);
+ else
+ return onfi_fill_nvddr_interface_config(chip, iface, timing_mode);
+}
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index c75e7a0b101f..b1839eef5b65 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -131,7 +131,7 @@
#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
-#define BADBLOCK_MARKER_LENGTH 2
+#define BBM_LEN 2
static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
@@ -171,6 +171,10 @@ struct omap_nand_info {
struct device *elm_dev;
/* NAND ready gpio */
struct gpio_desc *ready_gpiod;
+ unsigned int neccpg;
+ unsigned int nsteps_per_eccpg;
+ unsigned int eccpg_size;
+ unsigned int eccpg_bytes;
};
static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
@@ -1355,7 +1359,7 @@ static int omap_elm_correct_data(struct nand_chip *chip, u_char *data,
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
struct nand_ecc_ctrl *ecc = &info->nand.ecc;
- int eccsteps = info->nand.ecc.steps;
+ int eccsteps = info->nsteps_per_eccpg;
int i , j, stat = 0;
int eccflag, actual_eccbytes;
struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
@@ -1525,24 +1529,37 @@ static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- int ret;
+ struct omap_nand_info *info = mtd_to_omap(mtd);
uint8_t *ecc_calc = chip->ecc.calc_buf;
+ unsigned int eccpg;
+ int ret;
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
- /* Enable GPMC ecc engine */
- chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+ for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
- /* Write data */
- chip->legacy.write_buf(chip, buf, mtd->writesize);
+ /* Write data */
+ chip->legacy.write_buf(chip, buf + (eccpg * info->eccpg_size),
+ info->eccpg_size);
- /* Update ecc vector from GPMC result registers */
- omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
+ /* Update ecc vector from GPMC result registers */
+ ret = omap_calculate_ecc_bch_multi(mtd,
+ buf + (eccpg * info->eccpg_size),
+ ecc_calc);
+ if (ret)
+ return ret;
- ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
- chip->ecc.total);
- if (ret)
- return ret;
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc,
+ chip->oob_poi,
+ eccpg * info->eccpg_bytes,
+ info->eccpg_bytes);
+ if (ret)
+ return ret;
+ }
/* Write ecc vector to OOB area */
chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
@@ -1566,12 +1583,13 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
u8 *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- int ecc_steps = chip->ecc.steps;
u32 start_step = offset / ecc_size;
u32 end_step = (offset + data_len - 1) / ecc_size;
+ unsigned int eccpg;
int step, ret = 0;
/*
@@ -1580,36 +1598,48 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
* ECC is calculated for all subpages but we choose
* only what we want.
*/
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
-
- /* Enable GPMC ECC engine */
- chip->ecc.hwctl(chip, NAND_ECC_WRITE);
-
- /* Write data */
- chip->legacy.write_buf(chip, buf, mtd->writesize);
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
- for (step = 0; step < ecc_steps; step++) {
- /* mask ECC of un-touched subpages by padding 0xFF */
- if (step < start_step || step > end_step)
- memset(ecc_calc, 0xff, ecc_bytes);
- else
- ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
+ for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
+ /* Enable GPMC ECC engine */
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->legacy.write_buf(chip, buf + (eccpg * info->eccpg_size),
+ info->eccpg_size);
+
+ for (step = 0; step < info->nsteps_per_eccpg; step++) {
+ unsigned int base_step = eccpg * info->nsteps_per_eccpg;
+ const u8 *bufoffs = buf + (eccpg * info->eccpg_size);
+
+ /* Mask ECC of un-touched subpages with 0xFFs */
+ if ((step + base_step) < start_step ||
+ (step + base_step) > end_step)
+ memset(ecc_calc + (step * ecc_bytes), 0xff,
+ ecc_bytes);
+ else
+ ret = _omap_calculate_ecc_bch(mtd,
+ bufoffs + (step * ecc_size),
+ ecc_calc + (step * ecc_bytes),
+ step);
+
+ if (ret)
+ return ret;
+ }
+ /*
+ * Copy the calculated ECC for the whole page including the
+ * masked values (0xFF) corresponding to unwritten subpages.
+ */
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi,
+ eccpg * info->eccpg_bytes,
+ info->eccpg_bytes);
if (ret)
return ret;
-
- buf += ecc_size;
- ecc_calc += ecc_bytes;
}
- /* copy calculated ECC for whole page to chip->buffer->oob */
- /* this include masked-value(0xFF) for unwritten subpages */
- ecc_calc = chip->ecc.calc_buf;
- ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
- chip->ecc.total);
- if (ret)
- return ret;
-
/* write OOB buffer to NAND device */
chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
@@ -1634,40 +1664,60 @@ static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
uint8_t *ecc_calc = chip->ecc.calc_buf;
uint8_t *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0, eccpg;
int stat, ret;
- unsigned int max_bitflips = 0;
-
- nand_read_page_op(chip, page, 0, NULL, 0);
- /* Enable GPMC ecc engine */
- chip->ecc.hwctl(chip, NAND_ECC_READ);
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
- /* Read data */
- chip->legacy.read_buf(chip, buf, mtd->writesize);
+ for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
- /* Read oob bytes */
- nand_change_read_column_op(chip,
- mtd->writesize + BADBLOCK_MARKER_LENGTH,
- chip->oob_poi + BADBLOCK_MARKER_LENGTH,
- chip->ecc.total, false);
+ /* Read data */
+ ret = nand_change_read_column_op(chip, eccpg * info->eccpg_size,
+ buf + (eccpg * info->eccpg_size),
+ info->eccpg_size, false);
+ if (ret)
+ return ret;
- /* Calculate ecc bytes */
- omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
+ /* Read oob bytes */
+ ret = nand_change_read_column_op(chip,
+ mtd->writesize + BBM_LEN +
+ (eccpg * info->eccpg_bytes),
+ chip->oob_poi + BBM_LEN +
+ (eccpg * info->eccpg_bytes),
+ info->eccpg_bytes, false);
+ if (ret)
+ return ret;
- ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
- chip->ecc.total);
- if (ret)
- return ret;
+ /* Calculate ecc bytes */
+ ret = omap_calculate_ecc_bch_multi(mtd,
+ buf + (eccpg * info->eccpg_size),
+ ecc_calc);
+ if (ret)
+ return ret;
- stat = chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code,
+ chip->oob_poi,
+ eccpg * info->eccpg_bytes,
+ info->eccpg_bytes);
+ if (ret)
+ return ret;
- if (stat < 0) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += stat;
- max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ stat = chip->ecc.correct(chip,
+ buf + (eccpg * info->eccpg_size),
+ ecc_code, ecc_calc);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
return max_bitflips;
@@ -1820,7 +1870,7 @@ static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
{
struct omap_nand_info *info = mtd_to_omap(mtd);
struct nand_chip *chip = &info->nand;
- int off = BADBLOCK_MARKER_LENGTH;
+ int off = BBM_LEN;
if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
!(chip->options & NAND_BUSWIDTH_16))
@@ -1840,7 +1890,7 @@ static int omap_ooblayout_free(struct mtd_info *mtd, int section,
{
struct omap_nand_info *info = mtd_to_omap(mtd);
struct nand_chip *chip = &info->nand;
- int off = BADBLOCK_MARKER_LENGTH;
+ int off = BBM_LEN;
if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
!(chip->options & NAND_BUSWIDTH_16))
@@ -1870,7 +1920,7 @@ static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int nsteps = nanddev_get_ecc_nsteps(nand);
unsigned int ecc_bytes = nanddev_get_ecc_bytes_per_step(nand);
- int off = BADBLOCK_MARKER_LENGTH;
+ int off = BBM_LEN;
if (section >= nsteps)
return -ERANGE;
@@ -1891,7 +1941,7 @@ static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int nsteps = nanddev_get_ecc_nsteps(nand);
unsigned int ecc_bytes = nanddev_get_ecc_bytes_per_step(nand);
- int off = BADBLOCK_MARKER_LENGTH;
+ int off = BBM_LEN;
if (section)
return -ERANGE;
@@ -1920,7 +1970,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
struct device *dev = &info->pdev->dev;
- int min_oobbytes = BADBLOCK_MARKER_LENGTH;
+ int min_oobbytes = BBM_LEN;
+ int elm_bch_strength = -1;
int oobbytes_per_step;
dma_cap_mask_t mask;
int err;
@@ -2074,12 +2125,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = chip->ecc.bytes;
-
- err = elm_config(info->elm_dev, BCH4_ECC,
- mtd->writesize / chip->ecc.size,
- chip->ecc.size, chip->ecc.bytes);
- if (err < 0)
- return err;
+ elm_bch_strength = BCH4_ECC;
break;
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
@@ -2116,13 +2162,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = chip->ecc.bytes;
-
- err = elm_config(info->elm_dev, BCH8_ECC,
- mtd->writesize / chip->ecc.size,
- chip->ecc.size, chip->ecc.bytes);
- if (err < 0)
- return err;
-
+ elm_bch_strength = BCH8_ECC;
break;
case OMAP_ECC_BCH16_CODE_HW:
@@ -2138,19 +2178,32 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = chip->ecc.bytes;
-
- err = elm_config(info->elm_dev, BCH16_ECC,
- mtd->writesize / chip->ecc.size,
- chip->ecc.size, chip->ecc.bytes);
- if (err < 0)
- return err;
-
+ elm_bch_strength = BCH16_ECC;
break;
default:
dev_err(dev, "Invalid or unsupported ECC scheme\n");
return -EINVAL;
}
+ if (elm_bch_strength >= 0) {
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ info->neccpg = chip->ecc.steps / ERROR_VECTOR_MAX;
+ if (info->neccpg) {
+ info->nsteps_per_eccpg = ERROR_VECTOR_MAX;
+ } else {
+ info->neccpg = 1;
+ info->nsteps_per_eccpg = chip->ecc.steps;
+ }
+ info->eccpg_size = info->nsteps_per_eccpg * chip->ecc.size;
+ info->eccpg_bytes = info->nsteps_per_eccpg * chip->ecc.bytes;
+
+ err = elm_config(info->elm_dev, elm_bch_strength,
+ info->nsteps_per_eccpg, chip->ecc.size,
+ chip->ecc.bytes);
+ if (err < 0)
+ return err;
+ }
+
/* Check if NAND device's OOB is enough to store ECC signatures */
min_oobbytes += (oobbytes_per_step *
(mtd->writesize / chip->ecc.size));
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index 550695a4c1ab..2b21ce04b3ec 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -116,7 +116,7 @@ int elm_config(struct device *dev, enum bch_ecc bch_type,
return -EINVAL;
}
/* ELM support 8 error syndrome process */
- if (ecc_steps > ERROR_VECTOR_MAX) {
+ if (ecc_steps > ERROR_VECTOR_MAX && ecc_steps % ERROR_VECTOR_MAX) {
dev_err(dev, "unsupported config ecc-step=%d\n", ecc_steps);
return -EINVAL;
}
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
new file mode 100644
index 000000000000..8a91e069ee2e
--- /dev/null
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -0,0 +1,1194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM PL35X NAND flash controller driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc
+ * Author:
+ * Miquel Raynal <miquel.raynal@bootlin.com>
+ * Original work (rewritten):
+ * Punnaiah Choudary Kalluri <punnaia@xilinx.com>
+ * Naga Sureshkumar Relli <nagasure@xilinx.com>
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#define PL35X_NANDC_DRIVER_NAME "pl35x-nand-controller"
+
+/* SMC controller status register (RO) */
+#define PL35X_SMC_MEMC_STATUS 0x0
+#define PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1 BIT(6)
+/* SMC clear config register (WO) */
+#define PL35X_SMC_MEMC_CFG_CLR 0xC
+#define PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1 BIT(1)
+#define PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 BIT(4)
+#define PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 BIT(6)
+/* SMC direct command register (WO) */
+#define PL35X_SMC_DIRECT_CMD 0x10
+#define PL35X_SMC_DIRECT_CMD_NAND_CS (0x4 << 23)
+#define PL35X_SMC_DIRECT_CMD_UPD_REGS (0x2 << 21)
+/* SMC set cycles register (WO) */
+#define PL35X_SMC_CYCLES 0x14
+#define PL35X_SMC_NAND_TRC_CYCLES(x) ((x) << 0)
+#define PL35X_SMC_NAND_TWC_CYCLES(x) ((x) << 4)
+#define PL35X_SMC_NAND_TREA_CYCLES(x) ((x) << 8)
+#define PL35X_SMC_NAND_TWP_CYCLES(x) ((x) << 11)
+#define PL35X_SMC_NAND_TCLR_CYCLES(x) ((x) << 14)
+#define PL35X_SMC_NAND_TAR_CYCLES(x) ((x) << 17)
+#define PL35X_SMC_NAND_TRR_CYCLES(x) ((x) << 20)
+/* SMC set opmode register (WO) */
+#define PL35X_SMC_OPMODE 0x18
+#define PL35X_SMC_OPMODE_BW_8 0
+#define PL35X_SMC_OPMODE_BW_16 1
+/* SMC ECC status register (RO) */
+#define PL35X_SMC_ECC_STATUS 0x400
+#define PL35X_SMC_ECC_STATUS_ECC_BUSY BIT(6)
+/* SMC ECC configuration register */
+#define PL35X_SMC_ECC_CFG 0x404
+#define PL35X_SMC_ECC_CFG_MODE_MASK 0xC
+#define PL35X_SMC_ECC_CFG_MODE_BYPASS 0
+#define PL35X_SMC_ECC_CFG_MODE_APB BIT(2)
+#define PL35X_SMC_ECC_CFG_MODE_MEM BIT(3)
+#define PL35X_SMC_ECC_CFG_PGSIZE_MASK 0x3
+/* SMC ECC command 1 register */
+#define PL35X_SMC_ECC_CMD1 0x408
+#define PL35X_SMC_ECC_CMD1_WRITE(x) ((x) << 0)
+#define PL35X_SMC_ECC_CMD1_READ(x) ((x) << 8)
+#define PL35X_SMC_ECC_CMD1_READ_END(x) ((x) << 16)
+#define PL35X_SMC_ECC_CMD1_READ_END_VALID(x) ((x) << 24)
+/* SMC ECC command 2 register */
+#define PL35X_SMC_ECC_CMD2 0x40C
+#define PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(x) ((x) << 0)
+#define PL35X_SMC_ECC_CMD2_READ_COL_CHG(x) ((x) << 8)
+#define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(x) ((x) << 16)
+#define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(x) ((x) << 24)
+/* SMC ECC value registers (RO) */
+#define PL35X_SMC_ECC_VALUE(x) (0x418 + (4 * (x)))
+#define PL35X_SMC_ECC_VALUE_IS_CORRECTABLE(x) ((x) & BIT(27))
+#define PL35X_SMC_ECC_VALUE_HAS_FAILED(x) ((x) & BIT(28))
+#define PL35X_SMC_ECC_VALUE_IS_VALID(x) ((x) & BIT(30))
+
+/* NAND AXI interface */
+#define PL35X_SMC_CMD_PHASE 0
+#define PL35X_SMC_CMD_PHASE_CMD0(x) ((x) << 3)
+#define PL35X_SMC_CMD_PHASE_CMD1(x) ((x) << 11)
+#define PL35X_SMC_CMD_PHASE_CMD1_VALID BIT(20)
+#define PL35X_SMC_CMD_PHASE_ADDR(pos, x) ((x) << (8 * (pos)))
+#define PL35X_SMC_CMD_PHASE_NADDRS(x) ((x) << 21)
+#define PL35X_SMC_DATA_PHASE BIT(19)
+#define PL35X_SMC_DATA_PHASE_ECC_LAST BIT(10)
+#define PL35X_SMC_DATA_PHASE_CLEAR_CS BIT(21)
+
+#define PL35X_NAND_MAX_CS 1
+#define PL35X_NAND_LAST_XFER_SZ 4
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP((ps) / 1000, period_ns))
+
+#define PL35X_NAND_ECC_BITS_MASK 0xFFF
+#define PL35X_NAND_ECC_BYTE_OFF_MASK 0x1FF
+#define PL35X_NAND_ECC_BIT_OFF_MASK 0x7
+
+struct pl35x_nand_timings {
+ unsigned int t_rc:4;
+ unsigned int t_wc:4;
+ unsigned int t_rea:3;
+ unsigned int t_wp:3;
+ unsigned int t_clr:3;
+ unsigned int t_ar:3;
+ unsigned int t_rr:4;
+ unsigned int rsvd:8;
+};
+
+struct pl35x_nand {
+ struct list_head node;
+ struct nand_chip chip;
+ unsigned int cs;
+ unsigned int addr_cycles;
+ u32 ecc_cfg;
+ u32 timings;
+};
+
+/**
+ * struct pl35x_nandc - NAND flash controller driver structure
+ * @dev: Kernel device
+ * @conf_regs: SMC configuration registers for command phase
+ * @io_regs: NAND data registers for data phase
+ * @controller: Core NAND controller structure
+ * @chip: NAND chip information structure
+ * @selected_chip: NAND chip currently selected by the controller
+ * @assigned_cs: List of assigned CS
+ * @ecc_buf: Temporary buffer to extract ECC bytes
+ */
+struct pl35x_nandc {
+ struct device *dev;
+ void __iomem *conf_regs;
+ void __iomem *io_regs;
+ struct nand_controller controller;
+ struct list_head chips;
+ struct nand_chip *selected_chip;
+ unsigned long assigned_cs;
+ u8 *ecc_buf;
+};
+
+static inline struct pl35x_nandc *to_pl35x_nandc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct pl35x_nandc, controller);
+}
+
+static inline struct pl35x_nand *to_pl35x_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct pl35x_nand, chip);
+}
+
+static int pl35x_ecc_ooblayout16_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes);
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int pl35x_ecc_ooblayout16_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops pl35x_ecc_ooblayout16_ops = {
+ .ecc = pl35x_ecc_ooblayout16_ecc,
+ .free = pl35x_ecc_ooblayout16_free,
+};
+
+/* Generic flash bbt decriptors */
+static u8 bbt_pattern[] = { 'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+static void pl35x_smc_update_regs(struct pl35x_nandc *nfc)
+{
+ writel(PL35X_SMC_DIRECT_CMD_NAND_CS |
+ PL35X_SMC_DIRECT_CMD_UPD_REGS,
+ nfc->conf_regs + PL35X_SMC_DIRECT_CMD);
+}
+
+static int pl35x_smc_set_buswidth(struct pl35x_nandc *nfc, unsigned int bw)
+{
+ if (bw != PL35X_SMC_OPMODE_BW_8 && bw != PL35X_SMC_OPMODE_BW_16)
+ return -EINVAL;
+
+ writel(bw, nfc->conf_regs + PL35X_SMC_OPMODE);
+ pl35x_smc_update_regs(nfc);
+
+ return 0;
+}
+
+static void pl35x_smc_clear_irq(struct pl35x_nandc *nfc)
+{
+ writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1,
+ nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR);
+}
+
+static int pl35x_smc_wait_for_irq(struct pl35x_nandc *nfc)
+{
+ u32 reg;
+ int ret;
+
+ ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_MEMC_STATUS, reg,
+ reg & PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1,
+ 10, 1000000);
+ if (ret)
+ dev_err(nfc->dev,
+ "Timeout polling on NAND controller interrupt (0x%x)\n",
+ reg);
+
+ pl35x_smc_clear_irq(nfc);
+
+ return ret;
+}
+
+static int pl35x_smc_wait_for_ecc_done(struct pl35x_nandc *nfc)
+{
+ u32 reg;
+ int ret;
+
+ ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_ECC_STATUS, reg,
+ !(reg & PL35X_SMC_ECC_STATUS_ECC_BUSY),
+ 10, 1000000);
+ if (ret)
+ dev_err(nfc->dev,
+ "Timeout polling on ECC controller interrupt\n");
+
+ return ret;
+}
+
+static int pl35x_smc_set_ecc_mode(struct pl35x_nandc *nfc,
+ struct nand_chip *chip,
+ unsigned int mode)
+{
+ struct pl35x_nand *plnand;
+ u32 ecc_cfg;
+
+ ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG);
+ ecc_cfg &= ~PL35X_SMC_ECC_CFG_MODE_MASK;
+ ecc_cfg |= mode;
+ writel(ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
+
+ if (chip) {
+ plnand = to_pl35x_nand(chip);
+ plnand->ecc_cfg = ecc_cfg;
+ }
+
+ if (mode != PL35X_SMC_ECC_CFG_MODE_BYPASS)
+ return pl35x_smc_wait_for_ecc_done(nfc);
+
+ return 0;
+}
+
+static void pl35x_smc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ int ret;
+
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return;
+
+ if (force_8bit)
+ ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8);
+ else
+ ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_16);
+
+ if (ret)
+ dev_err(nfc->dev, "Error in Buswidth\n");
+}
+
+static void pl35x_nand_select_target(struct nand_chip *chip,
+ unsigned int die_nr)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ struct pl35x_nand *plnand = to_pl35x_nand(chip);
+
+ if (chip == nfc->selected_chip)
+ return;
+
+ /* Setup the timings */
+ writel(plnand->timings, nfc->conf_regs + PL35X_SMC_CYCLES);
+ pl35x_smc_update_regs(nfc);
+
+ /* Configure the ECC engine */
+ writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
+
+ nfc->selected_chip = chip;
+}
+
+static void pl35x_nand_read_data_op(struct nand_chip *chip, u8 *in,
+ unsigned int len, bool force_8bit,
+ unsigned int flags, unsigned int last_flags)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ unsigned int buf_end = len / 4;
+ unsigned int in_start = round_down(len, 4);
+ unsigned int data_phase_addr;
+ u32 *buf32 = (u32 *)in;
+ u8 *buf8 = (u8 *)in;
+ int i;
+
+ if (force_8bit)
+ pl35x_smc_force_byte_access(chip, true);
+
+ for (i = 0; i < buf_end; i++) {
+ data_phase_addr = PL35X_SMC_DATA_PHASE + flags;
+ if (i + 1 == buf_end)
+ data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags;
+
+ buf32[i] = readl(nfc->io_regs + data_phase_addr);
+ }
+
+ /* No working extra flags on unaligned data accesses */
+ for (i = in_start; i < len; i++)
+ buf8[i] = readb(nfc->io_regs + PL35X_SMC_DATA_PHASE);
+
+ if (force_8bit)
+ pl35x_smc_force_byte_access(chip, false);
+}
+
+static void pl35x_nand_write_data_op(struct nand_chip *chip, const u8 *out,
+ int len, bool force_8bit,
+ unsigned int flags,
+ unsigned int last_flags)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ unsigned int buf_end = len / 4;
+ unsigned int in_start = round_down(len, 4);
+ const u32 *buf32 = (const u32 *)out;
+ const u8 *buf8 = (const u8 *)out;
+ unsigned int data_phase_addr;
+ int i;
+
+ if (force_8bit)
+ pl35x_smc_force_byte_access(chip, true);
+
+ for (i = 0; i < buf_end; i++) {
+ data_phase_addr = PL35X_SMC_DATA_PHASE + flags;
+ if (i + 1 == buf_end)
+ data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags;
+
+ writel(buf32[i], nfc->io_regs + data_phase_addr);
+ }
+
+ /* No working extra flags on unaligned data accesses */
+ for (i = in_start; i < len; i++)
+ writeb(buf8[i], nfc->io_regs + PL35X_SMC_DATA_PHASE);
+
+ if (force_8bit)
+ pl35x_smc_force_byte_access(chip, false);
+}
+
+static int pl35x_nand_correct_data(struct pl35x_nandc *nfc, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ unsigned short ecc_odd, ecc_even, read_ecc_lower, read_ecc_upper;
+ unsigned short calc_ecc_lower, calc_ecc_upper;
+ unsigned short byte_addr, bit_addr;
+
+ read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) &
+ PL35X_NAND_ECC_BITS_MASK;
+ read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) &
+ PL35X_NAND_ECC_BITS_MASK;
+
+ calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) &
+ PL35X_NAND_ECC_BITS_MASK;
+ calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) &
+ PL35X_NAND_ECC_BITS_MASK;
+
+ ecc_odd = read_ecc_lower ^ calc_ecc_lower;
+ ecc_even = read_ecc_upper ^ calc_ecc_upper;
+
+ /* No error */
+ if (likely(!ecc_odd && !ecc_even))
+ return 0;
+
+ /* One error in the main data; to be corrected */
+ if (ecc_odd == (~ecc_even & PL35X_NAND_ECC_BITS_MASK)) {
+ /* Bits [11:3] of error code give the byte offset */
+ byte_addr = (ecc_odd >> 3) & PL35X_NAND_ECC_BYTE_OFF_MASK;
+ /* Bits [2:0] of error code give the bit offset */
+ bit_addr = ecc_odd & PL35X_NAND_ECC_BIT_OFF_MASK;
+ /* Toggle the faulty bit */
+ buf[byte_addr] ^= (BIT(bit_addr));
+
+ return 1;
+ }
+
+ /* One error in the ECC data; no action needed */
+ if (hweight32(ecc_odd | ecc_even) == 1)
+ return 1;
+
+ return -EBADMSG;
+}
+
+static void pl35x_nand_ecc_reg_to_array(struct nand_chip *chip, u32 ecc_reg,
+ u8 *ecc_array)
+{
+ u32 ecc_value = ~ecc_reg;
+ unsigned int ecc_byte;
+
+ for (ecc_byte = 0; ecc_byte < chip->ecc.bytes; ecc_byte++)
+ ecc_array[ecc_byte] = ecc_value >> (8 * ecc_byte);
+}
+
+static int pl35x_nand_read_eccbytes(struct pl35x_nandc *nfc,
+ struct nand_chip *chip, u8 *read_ecc)
+{
+ u32 ecc_value;
+ int chunk;
+
+ for (chunk = 0; chunk < chip->ecc.steps;
+ chunk++, read_ecc += chip->ecc.bytes) {
+ ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk));
+ if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value))
+ return -EINVAL;
+
+ pl35x_nand_ecc_reg_to_array(chip, ecc_value, read_ecc);
+ }
+
+ return 0;
+}
+
+static int pl35x_nand_recover_data_hwecc(struct pl35x_nandc *nfc,
+ struct nand_chip *chip, u8 *data,
+ u8 *read_ecc)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int max_bitflips = 0, chunk;
+ u8 calc_ecc[3];
+ u32 ecc_value;
+ int stats;
+
+ for (chunk = 0; chunk < chip->ecc.steps;
+ chunk++, data += chip->ecc.size, read_ecc += chip->ecc.bytes) {
+ /* Read ECC value for each chunk */
+ ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk));
+
+ if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value))
+ return -EINVAL;
+
+ if (PL35X_SMC_ECC_VALUE_HAS_FAILED(ecc_value)) {
+ mtd->ecc_stats.failed++;
+ continue;
+ }
+
+ pl35x_nand_ecc_reg_to_array(chip, ecc_value, calc_ecc);
+ stats = pl35x_nand_correct_data(nfc, data, read_ecc, calc_ecc);
+ if (stats < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stats;
+ max_bitflips = max_t(unsigned int, max_bitflips, stats);
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ struct pl35x_nand *plnand = to_pl35x_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2;
+ unsigned int nrows = plnand->addr_cycles;
+ u32 addr1 = 0, addr2 = 0, row;
+ u32 cmd_addr;
+ int i, ret;
+
+ ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
+ if (ret)
+ return ret;
+
+ cmd_addr = PL35X_SMC_CMD_PHASE |
+ PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) |
+ PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_SEQIN);
+
+ for (i = 0, row = first_row; row < nrows; i++, row++) {
+ u8 addr = page >> ((i * 8) & 0xFF);
+
+ if (row < 4)
+ addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr);
+ else
+ addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr);
+ }
+
+ /* Send the command and address cycles */
+ writel(addr1, nfc->io_regs + cmd_addr);
+ if (plnand->addr_cycles > 4)
+ writel(addr2, nfc->io_regs + cmd_addr);
+
+ /* Write the data with the engine enabled */
+ pl35x_nand_write_data_op(chip, buf, mtd->writesize, false,
+ 0, PL35X_SMC_DATA_PHASE_ECC_LAST);
+ ret = pl35x_smc_wait_for_ecc_done(nfc);
+ if (ret)
+ goto disable_ecc_engine;
+
+ /* Copy the HW calculated ECC bytes in the OOB buffer */
+ ret = pl35x_nand_read_eccbytes(nfc, chip, nfc->ecc_buf);
+ if (ret)
+ goto disable_ecc_engine;
+
+ if (!oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ ret = mtd_ooblayout_set_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi,
+ 0, chip->ecc.total);
+ if (ret)
+ goto disable_ecc_engine;
+
+ /* Write the spare area with ECC bytes */
+ pl35x_nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false, 0,
+ PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_PAGEPROG) |
+ PL35X_SMC_CMD_PHASE_CMD1_VALID |
+ PL35X_SMC_DATA_PHASE_CLEAR_CS);
+ ret = pl35x_smc_wait_for_irq(nfc);
+ if (ret)
+ goto disable_ecc_engine;
+
+disable_ecc_engine:
+ pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
+
+ return ret;
+}
+
+/*
+ * This functions reads data and checks the data integrity by comparing hardware
+ * generated ECC values and read ECC values from spare area.
+ *
+ * There is a limitation with SMC controller: ECC_LAST must be set on the
+ * last data access to tell the ECC engine not to expect any further data.
+ * In practice, this implies to shrink the last data transfert by eg. 4 bytes,
+ * and doing a last 4-byte transfer with the additional bit set. The last block
+ * should be aligned with the end of an ECC block. Because of this limitation,
+ * it is not possible to use the core routines.
+ */
+static int pl35x_nand_read_page_hwecc(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ struct pl35x_nand *plnand = to_pl35x_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2;
+ unsigned int nrows = plnand->addr_cycles;
+ unsigned int addr1 = 0, addr2 = 0, row;
+ u32 cmd_addr;
+ int i, ret;
+
+ ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
+ if (ret)
+ return ret;
+
+ cmd_addr = PL35X_SMC_CMD_PHASE |
+ PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) |
+ PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_READ0) |
+ PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_READSTART) |
+ PL35X_SMC_CMD_PHASE_CMD1_VALID;
+
+ for (i = 0, row = first_row; row < nrows; i++, row++) {
+ u8 addr = page >> ((i * 8) & 0xFF);
+
+ if (row < 4)
+ addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr);
+ else
+ addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr);
+ }
+
+ /* Send the command and address cycles */
+ writel(addr1, nfc->io_regs + cmd_addr);
+ if (plnand->addr_cycles > 4)
+ writel(addr2, nfc->io_regs + cmd_addr);
+
+ /* Wait the data to be available in the NAND cache */
+ ndelay(PSEC_TO_NSEC(sdr->tRR_min));
+ ret = pl35x_smc_wait_for_irq(nfc);
+ if (ret)
+ goto disable_ecc_engine;
+
+ /* Retrieve the raw data with the engine enabled */
+ pl35x_nand_read_data_op(chip, buf, mtd->writesize, false,
+ 0, PL35X_SMC_DATA_PHASE_ECC_LAST);
+ ret = pl35x_smc_wait_for_ecc_done(nfc);
+ if (ret)
+ goto disable_ecc_engine;
+
+ /* Retrieve the stored ECC bytes */
+ pl35x_nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+ 0, PL35X_SMC_DATA_PHASE_CLEAR_CS);
+ ret = mtd_ooblayout_get_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ goto disable_ecc_engine;
+
+ pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
+
+ /* Correct the data and report failures */
+ return pl35x_nand_recover_data_hwecc(nfc, chip, buf, nfc->ecc_buf);
+
+disable_ecc_engine:
+ pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
+
+ return ret;
+}
+
+static int pl35x_nand_exec_op(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ const struct nand_op_instr *instr, *data_instr = NULL;
+ unsigned int rdy_tim_ms = 0, naddrs = 0, cmds = 0, last_flags = 0;
+ u32 addr1 = 0, addr2 = 0, cmd0 = 0, cmd1 = 0, cmd_addr = 0;
+ unsigned int op_id, len, offset, rdy_del_ns;
+ int last_instr_type = -1;
+ bool cmd1_valid = false;
+ const u8 *addrs;
+ int i, ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (!cmds) {
+ cmd0 = PL35X_SMC_CMD_PHASE_CMD0(instr->ctx.cmd.opcode);
+ } else {
+ cmd1 = PL35X_SMC_CMD_PHASE_CMD1(instr->ctx.cmd.opcode);
+ if (last_instr_type != NAND_OP_DATA_OUT_INSTR)
+ cmd1_valid = true;
+ }
+ cmds++;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ cmd_addr |= PL35X_SMC_CMD_PHASE_NADDRS(naddrs);
+
+ for (i = offset; i < naddrs; i++) {
+ if (i < 4)
+ addr1 |= PL35X_SMC_CMD_PHASE_ADDR(i, addrs[i]);
+ else
+ addr2 |= PL35X_SMC_CMD_PHASE_ADDR(i - 4, addrs[i]);
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ data_instr = instr;
+ len = nand_subop_get_data_len(subop, op_id);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ rdy_tim_ms = instr->ctx.waitrdy.timeout_ms;
+ rdy_del_ns = instr->delay_ns;
+ break;
+ }
+
+ last_instr_type = instr->type;
+ }
+
+ /* Command phase */
+ cmd_addr |= PL35X_SMC_CMD_PHASE | cmd0 | cmd1 |
+ (cmd1_valid ? PL35X_SMC_CMD_PHASE_CMD1_VALID : 0);
+ writel(addr1, nfc->io_regs + cmd_addr);
+ if (naddrs > 4)
+ writel(addr2, nfc->io_regs + cmd_addr);
+
+ /* Data phase */
+ if (data_instr && data_instr->type == NAND_OP_DATA_OUT_INSTR) {
+ last_flags = PL35X_SMC_DATA_PHASE_CLEAR_CS;
+ if (cmds == 2)
+ last_flags |= cmd1 | PL35X_SMC_CMD_PHASE_CMD1_VALID;
+
+ pl35x_nand_write_data_op(chip, data_instr->ctx.data.buf.out,
+ len, data_instr->ctx.data.force_8bit,
+ 0, last_flags);
+ }
+
+ if (rdy_tim_ms) {
+ ndelay(rdy_del_ns);
+ ret = pl35x_smc_wait_for_irq(nfc);
+ if (ret)
+ return ret;
+ }
+
+ if (data_instr && data_instr->type == NAND_OP_DATA_IN_INSTR)
+ pl35x_nand_read_data_op(chip, data_instr->ctx.data.buf.in,
+ len, data_instr->ctx.data.force_8bit,
+ 0, PL35X_SMC_DATA_PHASE_CLEAR_CS);
+
+ return 0;
+}
+
+static const struct nand_op_parser pl35x_nandc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 2112)),
+ NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ );
+
+static int pl35x_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ if (!check_only)
+ pl35x_nand_select_target(chip, op->cs);
+
+ return nand_op_parser_exec_op(chip, &pl35x_nandc_op_parser,
+ op, check_only);
+}
+
+static int pl35x_nfc_setup_interface(struct nand_chip *chip, int cs,
+ const struct nand_interface_config *conf)
+{
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ struct pl35x_nand *plnand = to_pl35x_nand(chip);
+ struct pl35x_nand_timings tmgs = {};
+ const struct nand_sdr_timings *sdr;
+ unsigned int period_ns, val;
+ struct clk *mclk;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ mclk = of_clk_get_by_name(nfc->dev->parent->of_node, "memclk");
+ if (IS_ERR(mclk)) {
+ dev_err(nfc->dev, "Failed to retrieve SMC memclk\n");
+ return PTR_ERR(mclk);
+ }
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles. We use the TO_CYCLE()
+ * macro to convert from one to the other.
+ */
+ period_ns = NSEC_PER_SEC / clk_get_rate(mclk);
+
+ /*
+ * PL35X SMC needs one extra read cycle in SDR Mode 5. This is not
+ * written anywhere in the datasheet but is an empirical observation.
+ */
+ val = TO_CYCLES(sdr->tRC_min, period_ns);
+ if (sdr->tRC_min <= 20000)
+ val++;
+
+ tmgs.t_rc = val;
+ if (tmgs.t_rc != val || tmgs.t_rc < 2)
+ return -EINVAL;
+
+ val = TO_CYCLES(sdr->tWC_min, period_ns);
+ tmgs.t_wc = val;
+ if (tmgs.t_wc != val || tmgs.t_wc < 2)
+ return -EINVAL;
+
+ /*
+ * For all SDR modes, PL35X SMC needs tREA_max being 1,
+ * this is also an empirical result.
+ */
+ tmgs.t_rea = 1;
+
+ val = TO_CYCLES(sdr->tWP_min, period_ns);
+ tmgs.t_wp = val;
+ if (tmgs.t_wp != val || tmgs.t_wp < 1)
+ return -EINVAL;
+
+ val = TO_CYCLES(sdr->tCLR_min, period_ns);
+ tmgs.t_clr = val;
+ if (tmgs.t_clr != val)
+ return -EINVAL;
+
+ val = TO_CYCLES(sdr->tAR_min, period_ns);
+ tmgs.t_ar = val;
+ if (tmgs.t_ar != val)
+ return -EINVAL;
+
+ val = TO_CYCLES(sdr->tRR_min, period_ns);
+ tmgs.t_rr = val;
+ if (tmgs.t_rr != val)
+ return -EINVAL;
+
+ if (cs == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ plnand->timings = PL35X_SMC_NAND_TRC_CYCLES(tmgs.t_rc) |
+ PL35X_SMC_NAND_TWC_CYCLES(tmgs.t_wc) |
+ PL35X_SMC_NAND_TREA_CYCLES(tmgs.t_rea) |
+ PL35X_SMC_NAND_TWP_CYCLES(tmgs.t_wp) |
+ PL35X_SMC_NAND_TCLR_CYCLES(tmgs.t_clr) |
+ PL35X_SMC_NAND_TAR_CYCLES(tmgs.t_ar) |
+ PL35X_SMC_NAND_TRR_CYCLES(tmgs.t_rr);
+
+ return 0;
+}
+
+static void pl35x_smc_set_ecc_pg_size(struct pl35x_nandc *nfc,
+ struct nand_chip *chip,
+ unsigned int pg_sz)
+{
+ struct pl35x_nand *plnand = to_pl35x_nand(chip);
+ u32 sz;
+
+ switch (pg_sz) {
+ case SZ_512:
+ sz = 1;
+ break;
+ case SZ_1K:
+ sz = 2;
+ break;
+ case SZ_2K:
+ sz = 3;
+ break;
+ default:
+ sz = 0;
+ break;
+ }
+
+ plnand->ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG);
+ plnand->ecc_cfg &= ~PL35X_SMC_ECC_CFG_PGSIZE_MASK;
+ plnand->ecc_cfg |= sz;
+ writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
+}
+
+static int pl35x_nand_init_hw_ecc_controller(struct pl35x_nandc *nfc,
+ struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret = 0;
+
+ if (mtd->writesize < SZ_512 || mtd->writesize > SZ_2K) {
+ dev_err(nfc->dev,
+ "The hardware ECC engine is limited to pages up to 2kiB\n");
+ return -EOPNOTSUPP;
+ }
+
+ chip->ecc.strength = 1;
+ chip->ecc.bytes = 3;
+ chip->ecc.size = SZ_512;
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ chip->ecc.read_page = pl35x_nand_read_page_hwecc;
+ chip->ecc.write_page = pl35x_nand_write_page_hwecc;
+ chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+ pl35x_smc_set_ecc_pg_size(nfc, chip, mtd->writesize);
+
+ nfc->ecc_buf = devm_kmalloc(nfc->dev, chip->ecc.bytes * chip->ecc.steps,
+ GFP_KERNEL);
+ if (!nfc->ecc_buf)
+ return -ENOMEM;
+
+ switch (mtd->oobsize) {
+ case 16:
+ /* Legacy Xilinx layout */
+ mtd_set_ooblayout(mtd, &pl35x_ecc_ooblayout16_ops);
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ break;
+ case 64:
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported OOB size\n");
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int pl35x_nand_attach_chip(struct nand_chip *chip)
+{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
+ struct pl35x_nand *plnand = to_pl35x_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
+ (!chip->ecc.size || !chip->ecc.strength)) {
+ if (requirements->step_size && requirements->strength) {
+ chip->ecc.size = requirements->step_size;
+ chip->ecc.strength = requirements->strength;
+ } else {
+ dev_info(nfc->dev,
+ "No minimum ECC strength, using 1b/512B\n");
+ chip->ecc.size = 512;
+ chip->ecc.strength = 1;
+ }
+ }
+
+ if (mtd->writesize <= SZ_512)
+ plnand->addr_cycles = 1;
+ else
+ plnand->addr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ plnand->addr_cycles += 3;
+ else
+ plnand->addr_cycles += 2;
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ /* Keep these legacy BBT descriptors for ON_DIE situations */
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ fallthrough;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ break;
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = pl35x_nand_init_hw_ecc_controller(nfc, chip);
+ if (ret)
+ return ret;
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
+ chip->ecc.engine_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops pl35x_nandc_ops = {
+ .attach_chip = pl35x_nand_attach_chip,
+ .exec_op = pl35x_nfc_exec_op,
+ .setup_interface = pl35x_nfc_setup_interface,
+};
+
+static int pl35x_nand_reset_state(struct pl35x_nandc *nfc)
+{
+ int ret;
+
+ /* Disable interrupts and clear their status */
+ writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 |
+ PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 |
+ PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1,
+ nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR);
+
+ /* Set default bus width to 8-bit */
+ ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8);
+ if (ret)
+ return ret;
+
+ /* Ensure the ECC controller is bypassed by default */
+ ret = pl35x_smc_set_ecc_mode(nfc, NULL, PL35X_SMC_ECC_CFG_MODE_BYPASS);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the commands that the ECC block uses to detect the
+ * operations it should start/end.
+ */
+ writel(PL35X_SMC_ECC_CMD1_WRITE(NAND_CMD_SEQIN) |
+ PL35X_SMC_ECC_CMD1_READ(NAND_CMD_READ0) |
+ PL35X_SMC_ECC_CMD1_READ_END(NAND_CMD_READSTART) |
+ PL35X_SMC_ECC_CMD1_READ_END_VALID(NAND_CMD_READ1),
+ nfc->conf_regs + PL35X_SMC_ECC_CMD1);
+ writel(PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(NAND_CMD_RNDIN) |
+ PL35X_SMC_ECC_CMD2_READ_COL_CHG(NAND_CMD_RNDOUT) |
+ PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(NAND_CMD_RNDOUTSTART) |
+ PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(NAND_CMD_READ1),
+ nfc->conf_regs + PL35X_SMC_ECC_CMD2);
+
+ return 0;
+}
+
+static int pl35x_nand_chip_init(struct pl35x_nandc *nfc,
+ struct device_node *np)
+{
+ struct pl35x_nand *plnand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ int cs, ret;
+
+ plnand = devm_kzalloc(nfc->dev, sizeof(*plnand), GFP_KERNEL);
+ if (!plnand)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "reg", &cs);
+ if (ret)
+ return ret;
+
+ if (cs >= PL35X_NAND_MAX_CS) {
+ dev_err(nfc->dev, "Wrong CS %d\n", cs);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+ dev_err(nfc->dev, "Already assigned CS %d\n", cs);
+ return -EINVAL;
+ }
+
+ plnand->cs = cs;
+
+ chip = &plnand->chip;
+ chip->options = NAND_BUSWIDTH_AUTO | NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->controller = &nfc->controller;
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = nfc->dev;
+ nand_set_flash_node(chip, nfc->dev->of_node);
+ if (!mtd->name) {
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s", PL35X_NANDC_DRIVER_NAME);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&plnand->node, &nfc->chips);
+
+ return ret;
+}
+
+static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc)
+{
+ struct pl35x_nand *plnand, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(plnand, tmp, &nfc->chips, node) {
+ chip = &plnand->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&plnand->node);
+ }
+}
+
+static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
+{
+ struct device_node *np = nfc->dev->of_node, *nand_np;
+ int nchips = of_get_child_count(np);
+ int ret;
+
+ if (!nchips || nchips > PL35X_NAND_MAX_CS) {
+ dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
+ nchips);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = pl35x_nand_chip_init(nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ pl35x_nand_chips_cleanup(nfc);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int pl35x_nand_probe(struct platform_device *pdev)
+{
+ struct device *smc_dev = pdev->dev.parent;
+ struct amba_device *smc_amba = to_amba_device(smc_dev);
+ struct pl35x_nandc *nfc;
+ u32 ret;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = &pdev->dev;
+ nand_controller_init(&nfc->controller);
+ nfc->controller.ops = &pl35x_nandc_ops;
+ INIT_LIST_HEAD(&nfc->chips);
+
+ nfc->conf_regs = devm_ioremap_resource(&smc_amba->dev, &smc_amba->res);
+ if (IS_ERR(nfc->conf_regs))
+ return PTR_ERR(nfc->conf_regs);
+
+ nfc->io_regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->io_regs))
+ return PTR_ERR(nfc->io_regs);
+
+ ret = pl35x_nand_reset_state(nfc);
+ if (ret)
+ return ret;
+
+ ret = pl35x_nand_chips_init(nfc);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, nfc);
+
+ return 0;
+}
+
+static int pl35x_nand_remove(struct platform_device *pdev)
+{
+ struct pl35x_nandc *nfc = platform_get_drvdata(pdev);
+
+ pl35x_nand_chips_cleanup(nfc);
+
+ return 0;
+}
+
+static const struct of_device_id pl35x_nand_of_match[] = {
+ { .compatible = "arm,pl353-nand-r2p1" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pl35x_nand_of_match);
+
+static struct platform_driver pl35x_nandc_driver = {
+ .probe = pl35x_nand_probe,
+ .remove = pl35x_nand_remove,
+ .driver = {
+ .name = PL35X_NANDC_DRIVER_NAME,
+ .of_match_table = pl35x_nand_of_match,
+ },
+};
+module_platform_driver(pl35x_nandc_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_ALIAS("platform:" PL35X_NANDC_DRIVER_NAME);
+MODULE_DESCRIPTION("ARM PL35X NAND controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index a64fb6ce915d..ef0badea4f41 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -734,6 +734,7 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
{
struct nand_chip *chip = &host->chip;
u32 cmd, cfg0, cfg1, ecc_bch_cfg;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
if (read) {
if (host->use_ecc)
@@ -762,7 +763,8 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
+ if (!nandc->props->qpic_v2)
+ nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
@@ -1133,7 +1135,8 @@ static void config_nand_page_read(struct nand_chip *chip)
write_reg_dma(nandc, NAND_ADDR0, 2, 0);
write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+ if (!nandc->props->qpic_v2)
+ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
@@ -1191,8 +1194,9 @@ static void config_nand_page_write(struct nand_chip *chip)
write_reg_dma(nandc, NAND_ADDR0, 2, 0);
write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
- NAND_BAM_NEXT_SGL);
+ if (!nandc->props->qpic_v2)
+ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
+ NAND_BAM_NEXT_SGL);
}
/*
@@ -1248,7 +1252,8 @@ static int nandc_param(struct qcom_nand_host *host)
| 2 << WR_RD_BSY_GAP
| 0 << WIDE_FLASH
| 1 << DEV0_CFG1_ECC_DISABLE);
- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
+ if (!nandc->props->qpic_v2)
+ nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
if (!nandc->props->qpic_v2) {
@@ -1850,8 +1855,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
* ERASED_CW bits are set.
*/
if (host->bch_enabled) {
- erased = (erased_cw & ERASED_CW) == ERASED_CW ?
- true : false;
+ erased = (erased_cw & ERASED_CW) == ERASED_CW;
/*
* For RS ECC, HW reports the erased CW by placing
* special characters at certain offsets in the buffer.
@@ -2689,7 +2693,8 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
| ecc_mode << ECC_MODE
| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
- host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+ if (!nandc->props->qpic_v2)
+ host->ecc_buf_cfg = 0x203 << NUM_STEPS;
host->clrflashstatus = FS_READY_BSY_N;
host->clrreadstatus = 0xc0;
@@ -2882,7 +2887,7 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
return 0;
}
-static const char * const probes[] = { "qcomsmem", NULL };
+static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL };
static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
struct qcom_nand_host *host,
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index ebe859ca49cb..ed0cf732d20e 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -583,8 +583,8 @@ static void r852_update_card_detect(struct r852_device *dev)
r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
}
-static ssize_t r852_media_type_show(struct device *sys_dev,
- struct device_attribute *attr, char *buf)
+static ssize_t media_type_show(struct device *sys_dev,
+ struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
struct r852_device *dev = r852_get_dev(mtd);
@@ -593,8 +593,7 @@ static ssize_t r852_media_type_show(struct device *sys_dev,
strcpy(buf, data);
return strlen(data);
}
-
-static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
+static DEVICE_ATTR_RO(media_type);
/* Detect properties of card in slot */
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 923a9e236fcf..ea953e31933e 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -1972,10 +1972,8 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
sunxi_nand = devm_kzalloc(dev, struct_size(sunxi_nand, sels, nsels),
GFP_KERNEL);
- if (!sunxi_nand) {
- dev_err(dev, "could not allocate chip\n");
+ if (!sunxi_nand)
return -ENOMEM;
- }
sunxi_nand->nsels = nsels;
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 3131fae0c715..446ba8d43fbc 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -138,20 +138,12 @@ int spinand_select_target(struct spinand_device *spinand, unsigned int target)
return 0;
}
-static int spinand_init_cfg_cache(struct spinand_device *spinand)
+static int spinand_read_cfg(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
- struct device *dev = &spinand->spimem->spi->dev;
unsigned int target;
int ret;
- spinand->cfg_cache = devm_kcalloc(dev,
- nand->memorg.ntargets,
- sizeof(*spinand->cfg_cache),
- GFP_KERNEL);
- if (!spinand->cfg_cache)
- return -ENOMEM;
-
for (target = 0; target < nand->memorg.ntargets; target++) {
ret = spinand_select_target(spinand, target);
if (ret)
@@ -170,6 +162,21 @@ static int spinand_init_cfg_cache(struct spinand_device *spinand)
return 0;
}
+static int spinand_init_cfg_cache(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct device *dev = &spinand->spimem->spi->dev;
+
+ spinand->cfg_cache = devm_kcalloc(dev,
+ nand->memorg.ntargets,
+ sizeof(*spinand->cfg_cache),
+ GFP_KERNEL);
+ if (!spinand->cfg_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int spinand_init_quad_enable(struct spinand_device *spinand)
{
bool enable = false;
@@ -290,6 +297,8 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
{
struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
struct spinand_device *spinand = nand_to_spinand(nand);
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ int ret;
if (req->mode == MTD_OPS_RAW)
return 0;
@@ -299,7 +308,13 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
return 0;
/* Finish a page write: check the status, report errors/bitflips */
- return spinand_check_ecc_status(spinand, engine_conf->status);
+ ret = spinand_check_ecc_status(spinand, engine_conf->status);
+ if (ret == -EBADMSG)
+ mtd->ecc_stats.failed++;
+ else if (ret > 0)
+ mtd->ecc_stats.corrected += ret;
+
+ return ret;
}
static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
@@ -635,13 +650,10 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
if (ret < 0 && ret != -EBADMSG)
break;
- if (ret == -EBADMSG) {
+ if (ret == -EBADMSG)
ecc_failed = true;
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += ret;
+ else
max_bitflips = max_t(unsigned int, max_bitflips, ret);
- }
ret = 0;
ops->retlen += iter.req.datalen;
@@ -1093,12 +1105,71 @@ static int spinand_detect(struct spinand_device *spinand)
return 0;
}
+static int spinand_init_flash(struct spinand_device *spinand)
+{
+ struct device *dev = &spinand->spimem->spi->dev;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret, i;
+
+ ret = spinand_read_cfg(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_init_quad_enable(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
+ if (ret)
+ return ret;
+
+ ret = spinand_manufacturer_init(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize the SPI NAND chip (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ /* After power up, all blocks are locked, so unlock them here. */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ ret = spinand_select_target(spinand, i);
+ if (ret)
+ break;
+
+ ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ spinand_manufacturer_cleanup(spinand);
+
+ return ret;
+}
+
+static void spinand_mtd_resume(struct mtd_info *mtd)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ ret = spinand_reset_op(spinand);
+ if (ret)
+ return;
+
+ ret = spinand_init_flash(spinand);
+ if (ret)
+ return;
+
+ spinand_ecc_enable(spinand, false);
+}
+
static int spinand_init(struct spinand_device *spinand)
{
struct device *dev = &spinand->spimem->spi->dev;
struct mtd_info *mtd = spinand_to_mtd(spinand);
struct nand_device *nand = mtd_to_nanddev(mtd);
- int ret, i;
+ int ret;
/*
* We need a scratch buffer because the spi_mem interface requires that
@@ -1131,22 +1202,10 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_free_bufs;
- ret = spinand_init_quad_enable(spinand);
+ ret = spinand_init_flash(spinand);
if (ret)
goto err_free_bufs;
- ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
- if (ret)
- goto err_free_bufs;
-
- ret = spinand_manufacturer_init(spinand);
- if (ret) {
- dev_err(dev,
- "Failed to initialize the SPI NAND chip (err = %d)\n",
- ret);
- goto err_free_bufs;
- }
-
ret = spinand_create_dirmaps(spinand);
if (ret) {
dev_err(dev,
@@ -1155,17 +1214,6 @@ static int spinand_init(struct spinand_device *spinand)
goto err_manuf_cleanup;
}
- /* After power up, all blocks are locked, so unlock them here. */
- for (i = 0; i < nand->memorg.ntargets; i++) {
- ret = spinand_select_target(spinand, i);
- if (ret)
- goto err_manuf_cleanup;
-
- ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
- if (ret)
- goto err_manuf_cleanup;
- }
-
ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
if (ret)
goto err_manuf_cleanup;
@@ -1186,6 +1234,7 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_block_isreserved = spinand_mtd_block_isreserved;
mtd->_erase = spinand_mtd_erase;
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
+ mtd->_resume = spinand_mtd_resume;
if (nand->ecc.engine) {
ret = mtd_ooblayout_count_freebytes(mtd);
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 6701aaa21a49..a9890350db02 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -186,6 +186,118 @@ static const struct spinand_info macronix_spinand_table[] = {
0 /*SPINAND_HAS_QE_BIT*/,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
+
+ SPINAND_INFO("MX35LF2G14AC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x20),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF4G24AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF4GE4AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF2G14AC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF2G24AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF2GE4AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF2GE4AC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF1G14AC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x90),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF1G24AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF1GE4AD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF1GE4AC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+
};
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index bcd0094f172d..913db0dd6a8d 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -619,7 +619,6 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
return BLOCK_NIL;
}
//printk("Restarting scan\n");
- lastEUN = BLOCK_NIL;
continue;
}
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 444a77bb7692..75e86ed3e678 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -188,17 +188,14 @@ device is already correct.
/* memory alloc */
nftl->EUNtable = kmalloc_array(nftl->nb_blocks, sizeof(u16),
GFP_KERNEL);
- if (!nftl->EUNtable) {
- printk(KERN_NOTICE "NFTL: allocation of EUNtable failed\n");
+ if (!nftl->EUNtable)
return -ENOMEM;
- }
nftl->ReplUnitTable = kmalloc_array(nftl->nb_blocks,
sizeof(u16),
GFP_KERNEL);
if (!nftl->ReplUnitTable) {
kfree(nftl->EUNtable);
- printk(KERN_NOTICE "NFTL: allocation of ReplUnitTable failed\n");
return -ENOMEM;
}
@@ -269,7 +266,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int
buf = kmalloc(SECTORSIZE + mtd->oobsize, GFP_KERNEL);
if (!buf)
- return -1;
+ return -ENOMEM;
ret = -1;
for (i = 0; i < len; i += SECTORSIZE) {
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index 9babe678c41b..337ea8b9a4c3 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -115,7 +115,7 @@ config MTD_AFS_PARTS
config MTD_PARSER_TRX
tristate "Parser for TRX format partitions"
- depends on MTD && (BCM47XX || ARCH_BCM_5301X || COMPILE_TEST)
+ depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || COMPILE_TEST)
help
TRX is a firmware format used by Broadcom on their devices. It
may contain up to 3/4 partitions (depending on the version).
diff --git a/drivers/mtd/parsers/parser_trx.c b/drivers/mtd/parsers/parser_trx.c
index 8541182134d4..4814cf218e17 100644
--- a/drivers/mtd/parsers/parser_trx.c
+++ b/drivers/mtd/parsers/parser_trx.c
@@ -51,13 +51,20 @@ static int parser_trx_parse(struct mtd_info *mtd,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
+ struct device_node *np = mtd_get_of_node(mtd);
struct mtd_partition *parts;
struct mtd_partition *part;
struct trx_header trx;
size_t bytes_read;
uint8_t curr_part = 0, i = 0;
+ uint32_t trx_magic = TRX_MAGIC;
int err;
+ /* Get different magic from device tree if specified */
+ err = of_property_read_u32(np, "brcm,trx-magic", &trx_magic);
+ if (err != 0 && err != -EINVAL)
+ pr_err("failed to parse \"brcm,trx-magic\" DT attribute, using default: %d\n", err);
+
parts = kcalloc(TRX_PARSER_MAX_PARTS, sizeof(struct mtd_partition),
GFP_KERNEL);
if (!parts)
@@ -70,7 +77,7 @@ static int parser_trx_parse(struct mtd_info *mtd,
return err;
}
- if (trx.magic != TRX_MAGIC) {
+ if (trx.magic != trx_magic) {
kfree(parts);
return -ENOENT;
}
diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
index d9083308f6ba..06a818cd2433 100644
--- a/drivers/mtd/parsers/qcomsmempart.c
+++ b/drivers/mtd/parsers/qcomsmempart.c
@@ -159,6 +159,15 @@ out_free_parts:
return ret;
}
+static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
+ int nr_parts)
+{
+ int i;
+
+ for (i = 0; i < nr_parts; i++)
+ kfree(pparts[i].name);
+}
+
static const struct of_device_id qcomsmem_of_match_table[] = {
{ .compatible = "qcom,smem-part" },
{},
@@ -167,6 +176,7 @@ MODULE_DEVICE_TABLE(of, qcomsmem_of_match_table);
static struct mtd_part_parser mtd_parser_qcomsmem = {
.parse_fn = parse_qcomsmem_part,
+ .cleanup = parse_qcomsmem_cleanup,
.name = "qcomsmem",
.of_match_table = qcomsmem_of_match_table,
};
diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
index 91146bdc4713..feb44a573d44 100644
--- a/drivers/mtd/parsers/redboot.c
+++ b/drivers/mtd/parsers/redboot.c
@@ -17,15 +17,15 @@
#include <linux/module.h>
struct fis_image_desc {
- unsigned char name[16]; // Null terminated name
- uint32_t flash_base; // Address within FLASH of image
- uint32_t mem_base; // Address in memory where it executes
- uint32_t size; // Length of image
- uint32_t entry_point; // Execution entry point
- uint32_t data_length; // Length of actual data
- unsigned char _pad[256-(16+7*sizeof(uint32_t))];
- uint32_t desc_cksum; // Checksum over image descriptor
- uint32_t file_cksum; // Checksum over image data
+ unsigned char name[16]; // Null terminated name
+ u32 flash_base; // Address within FLASH of image
+ u32 mem_base; // Address in memory where it executes
+ u32 size; // Length of image
+ u32 entry_point; // Execution entry point
+ u32 data_length; // Length of actual data
+ unsigned char _pad[256 - (16 + 7 * sizeof(u32))];
+ u32 desc_cksum; // Checksum over image descriptor
+ u32 file_cksum; // Checksum over image data
};
struct fis_list {
@@ -45,6 +45,7 @@ static inline int redboot_checksum(struct fis_image_desc *img)
static void parse_redboot_of(struct mtd_info *master)
{
struct device_node *np;
+ struct device_node *npart;
u32 dirblock;
int ret;
@@ -52,7 +53,11 @@ static void parse_redboot_of(struct mtd_info *master)
if (!np)
return;
- ret = of_property_read_u32(np, "fis-index-block", &dirblock);
+ npart = of_get_child_by_name(np, "partitions");
+ if (!npart)
+ return;
+
+ ret = of_property_read_u32(npart, "fis-index-block", &dirblock);
if (ret)
return;
@@ -85,12 +90,12 @@ static int parse_redboot_partitions(struct mtd_info *master,
parse_redboot_of(master);
- if ( directory < 0 ) {
+ if (directory < 0) {
offset = master->size + directory * master->erasesize;
while (mtd_block_isbad(master, offset)) {
if (!offset) {
- nogood:
- printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
+nogood:
+ pr_notice("Failed to find a non-bad block to check for RedBoot partition table\n");
return -EIO;
}
offset -= master->erasesize;
@@ -108,8 +113,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
if (!buf)
return -ENOMEM;
- printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
- master->name, offset);
+ pr_notice("Searching for RedBoot partition table in %s at offset 0x%lx\n",
+ master->name, offset);
ret = mtd_read(master, offset, master->erasesize, &retlen,
(void *)buf);
@@ -145,14 +150,13 @@ static int parse_redboot_partitions(struct mtd_info *master,
&& swab32(buf[i].size) < master->erasesize)) {
int j;
/* Update numslots based on actual FIS directory size */
- numslots = swab32(buf[i].size) / sizeof (struct fis_image_desc);
+ numslots = swab32(buf[i].size) / sizeof(struct fis_image_desc);
for (j = 0; j < numslots; ++j) {
-
/* A single 0xff denotes a deleted entry.
* Two of them in a row is the end of the table.
*/
if (buf[j].name[0] == 0xff) {
- if (buf[j].name[1] == 0xff) {
+ if (buf[j].name[1] == 0xff) {
break;
} else {
continue;
@@ -179,8 +183,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
}
if (i == numslots) {
/* Didn't find it */
- printk(KERN_NOTICE "No RedBoot partition table detected in %s\n",
- master->name);
+ pr_notice("No RedBoot partition table detected in %s\n",
+ master->name);
ret = 0;
goto out;
}
@@ -199,7 +203,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
break;
new_fl = kmalloc(sizeof(struct fis_list), GFP_KERNEL);
- namelen += strlen(buf[i].name)+1;
+ namelen += strlen(buf[i].name) + 1;
if (!new_fl) {
ret = -ENOMEM;
goto out;
@@ -208,13 +212,13 @@ static int parse_redboot_partitions(struct mtd_info *master,
if (data && data->origin)
buf[i].flash_base -= data->origin;
else
- buf[i].flash_base &= master->size-1;
+ buf[i].flash_base &= master->size - 1;
/* I'm sure the JFFS2 code has done me permanent damage.
* I now think the following is _normal_
*/
prev = &fl;
- while(*prev && (*prev)->img->flash_base < new_fl->img->flash_base)
+ while (*prev && (*prev)->img->flash_base < new_fl->img->flash_base)
prev = &(*prev)->next;
new_fl->next = *prev;
*prev = new_fl;
@@ -234,7 +238,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
}
}
#endif
- parts = kzalloc(sizeof(*parts)*nrparts + nulllen + namelen, GFP_KERNEL);
+ parts = kzalloc(sizeof(*parts) * nrparts + nulllen + namelen, GFP_KERNEL);
if (!parts) {
ret = -ENOMEM;
@@ -243,23 +247,22 @@ static int parse_redboot_partitions(struct mtd_info *master,
nullname = (char *)&parts[nrparts];
#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
- if (nulllen > 0) {
+ if (nulllen > 0)
strcpy(nullname, nullstring);
- }
#endif
names = nullname + nulllen;
- i=0;
+ i = 0;
#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
if (fl->img->flash_base) {
- parts[0].name = nullname;
- parts[0].size = fl->img->flash_base;
- parts[0].offset = 0;
+ parts[0].name = nullname;
+ parts[0].size = fl->img->flash_base;
+ parts[0].offset = 0;
i++;
}
#endif
- for ( ; i<nrparts; i++) {
+ for ( ; i < nrparts; i++) {
parts[i].size = fl->img->size;
parts[i].offset = fl->img->flash_base;
parts[i].name = names;
@@ -267,17 +270,17 @@ static int parse_redboot_partitions(struct mtd_info *master,
strcpy(names, fl->img->name);
#ifdef CONFIG_MTD_REDBOOT_PARTS_READONLY
if (!memcmp(names, "RedBoot", 8) ||
- !memcmp(names, "RedBoot config", 15) ||
- !memcmp(names, "FIS directory", 14)) {
+ !memcmp(names, "RedBoot config", 15) ||
+ !memcmp(names, "FIS directory", 14)) {
parts[i].mask_flags = MTD_WRITEABLE;
}
#endif
- names += strlen(names)+1;
+ names += strlen(names) + 1;
#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
- if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
+ if (fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
i++;
- parts[i].offset = parts[i-1].size + parts[i-1].offset;
+ parts[i].offset = parts[i - 1].size + parts[i - 1].offset;
parts[i].size = fl->next->img->flash_base - parts[i].offset;
parts[i].name = nullname;
}
@@ -291,6 +294,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
out:
while (fl) {
struct fis_list *old = fl;
+
fl = fl->next;
kfree(old);
}
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index cce3bf6f99b4..6e0d5ce9b010 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -192,11 +192,8 @@ static int scan_header(struct partition *part)
part->sector_map = vmalloc(array_size(sizeof(u_long),
part->sector_count));
- if (!part->sector_map) {
- printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
- "sector map", part->mbd.mtd->name);
+ if (!part->sector_map)
goto err;
- }
for (i=0; i<part->sector_count; i++)
part->sector_map[i] = -1;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 4d1ae25507ab..0cff2cda1b5a 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -265,7 +265,8 @@ static int sm_read_sector(struct sm_ftl *ftl,
again:
if (try++) {
/* Avoid infinite recursion on CIS reads, sm_recheck_media
- won't help anyway */
+ * won't help anyway
+ */
if (zone == 0 && block == ftl->cis_block && boffset ==
ftl->cis_boffset)
return ret;
@@ -276,7 +277,8 @@ again:
}
/* Unfortunately, oob read will _always_ succeed,
- despite card removal..... */
+ * despite card removal.....
+ */
ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Test for unknown errors */
@@ -411,9 +413,10 @@ restart:
/* If write fails. try to erase the block */
/* This is safe, because we never write in blocks
- that contain valuable data.
- This is intended to repair block that are marked
- as erased, but that isn't fully erased*/
+ * that contain valuable data.
+ * This is intended to repair block that are marked
+ * as erased, but that isn't fully erased
+ */
if (sm_erase_block(ftl, zone, block, 0))
return -EIO;
@@ -448,7 +451,8 @@ static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
/* We aren't checking the return value, because we don't care */
/* This also fails on fake xD cards, but I guess these won't expose
- any bad blocks till fail completely */
+ * any bad blocks till fail completely
+ */
for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
}
@@ -505,7 +509,8 @@ static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
/* First just check that block doesn't look fishy */
/* Only blocks that are valid or are sliced in two parts, are
- accepted */
+ * accepted
+ */
for (boffset = 0; boffset < ftl->block_size;
boffset += SM_SECTOR_SIZE) {
@@ -554,7 +559,8 @@ static const uint8_t cis_signature[] = {
0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
};
/* Find out media parameters.
- * This ideally has to be based on nand id, but for now device size is enough */
+ * This ideally has to be based on nand id, but for now device size is enough
+ */
static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
{
int i;
@@ -607,7 +613,8 @@ static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
}
/* Minimum xD size is 16MiB. Also, all xD cards have standard zone
- sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
+ * sizes. SmartMedia cards exist up to 128 MiB and have same layout
+ */
if (size_in_megs >= 16) {
ftl->zone_count = size_in_megs / 16;
ftl->zone_size = 1024;
@@ -782,7 +789,8 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
}
/* Test to see if block is erased. It is enough to test
- first sector, because erase happens in one shot */
+ * first sector, because erase happens in one shot
+ */
if (sm_block_erased(&oob)) {
kfifo_in(&zone->free_sectors,
(unsigned char *)&block, 2);
@@ -792,7 +800,8 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
/* If block is marked as bad, skip it */
/* This assumes we can trust first sector*/
/* However the way the block valid status is defined, ensures
- very low probability of failure here */
+ * very low probability of failure here
+ */
if (!sm_block_valid(&oob)) {
dbg("PH %04d <-> <marked bad>", block);
continue;
@@ -803,7 +812,8 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
/* Invalid LBA means that block is damaged. */
/* We can try to erase it, or mark it as bad, but
- lets leave that to recovery application */
+ * lets leave that to recovery application
+ */
if (lba == -2 || lba >= ftl->max_lba) {
dbg("PH %04d <-> LBA %04d(bad)", block, lba);
continue;
@@ -811,7 +821,8 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
/* If there is no collision,
- just put the sector in the FTL table */
+ * just put the sector in the FTL table
+ */
if (zone->lba_to_phys_table[lba] < 0) {
dbg_verbose("PH %04d <-> LBA %04d", block, lba);
zone->lba_to_phys_table[lba] = block;
@@ -834,9 +845,9 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
}
/* If both blocks are valid and share same LBA, it means that
- they hold different versions of same data. It not
- known which is more recent, thus just erase one of them
- */
+ * they hold different versions of same data. It not
+ * known which is more recent, thus just erase one of them
+ */
sm_printk("both blocks are valid, erasing the later");
sm_erase_block(ftl, zone_num, block, 1);
}
@@ -845,7 +856,8 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
zone->initialized = 1;
/* No free sectors, means that the zone is heavily damaged, write won't
- work, but it can still can be (partially) read */
+ * work, but it can still can be (partially) read
+ */
if (!kfifo_len(&zone->free_sectors)) {
sm_printk("no free blocks in zone %d", zone_num);
return 0;
@@ -952,8 +964,9 @@ restart:
/* If there are no spare blocks, */
/* we could still continue by erasing/writing the current block,
- but for such worn out media it doesn't worth the trouble,
- and the dangers */
+ * but for such worn out media it doesn't worth the trouble,
+ * and the dangers
+ */
if (kfifo_out(&zone->free_sectors,
(unsigned char *)&write_sector, 2) != 2) {
dbg("no free sectors for write!");
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 136f245c91dc..6b904e439372 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-spi-nor-objs := core.o sfdp.o swp.o otp.o
+spi-nor-objs := core.o sfdp.o swp.o otp.o sysfs.o
spi-nor-objs += atmel.o
spi-nor-objs += catalyst.o
spi-nor-objs += eon.o
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
index 825610a2e9dc..1bc53b8bb88a 100644
--- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
@@ -74,6 +74,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 5703e8313980..2635c80231bb 100644
--- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
@@ -326,7 +326,7 @@ static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
ctrl |= SPIFI_CTRL_DUAL;
}
- switch (mode & (SPI_CPHA | SPI_CPOL)) {
+ switch (mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
ctrl &= ~SPIFI_CTRL_MODE3;
break;
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index bd2c7717eb10..cc08bd707378 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -1318,7 +1318,7 @@ static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
/*
* Initiate the erasure of a single sector
*/
-static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
+int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
{
int i;
@@ -1411,9 +1411,7 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
continue;
spi_nor_div_by_erase_size(erase, addr, &rem);
- if (rem)
- continue;
- else
+ if (!rem)
return erase;
}
@@ -2839,6 +2837,21 @@ static int spi_nor_init(struct spi_nor *nor)
return 0;
}
+/**
+ * spi_nor_soft_reset() - Perform a software reset
+ * @nor: pointer to 'struct spi_nor'
+ *
+ * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets
+ * the device to its power-on-reset state. This is useful when the software has
+ * made some changes to device (volatile) registers and needs to reset it before
+ * shutting down, for example.
+ *
+ * Not every flash supports this sequence. The same set of opcodes might be used
+ * for some other operation on a flash that does not support this. Support for
+ * this sequence can be discovered via SFDP in the BFPT table.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
static void spi_nor_soft_reset(struct spi_nor *nor)
{
struct spi_mem_op op;
@@ -3444,6 +3457,7 @@ static struct spi_mem_driver spi_nor_driver = {
.driver = {
.name = "spi-nor",
.of_match_table = spi_nor_of_table,
+ .dev_groups = spi_nor_sysfs_groups,
},
.id_table = spi_nor_dev_ids,
},
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 28a2e0be97a3..3348e1dd1445 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -207,6 +207,7 @@ struct spi_nor_otp_organization {
* @read: read from the SPI NOR OTP area.
* @write: write to the SPI NOR OTP area.
* @lock: lock an OTP region.
+ * @erase: erase an OTP region.
* @is_locked: check if an OTP region of the SPI NOR is locked.
*/
struct spi_nor_otp_ops {
@@ -214,6 +215,7 @@ struct spi_nor_otp_ops {
int (*write)(struct spi_nor *nor, loff_t addr, size_t len,
const u8 *buf);
int (*lock)(struct spi_nor *nor, unsigned int region);
+ int (*erase)(struct spi_nor *nor, loff_t addr);
int (*is_locked)(struct spi_nor *nor, unsigned int region);
};
@@ -459,6 +461,16 @@ struct spi_nor_manufacturer {
const struct spi_nor_fixups *fixups;
};
+/**
+ * struct sfdp - SFDP data
+ * @num_dwords: number of entries in the dwords array
+ * @dwords: array of double words of the SFDP data
+ */
+struct sfdp {
+ size_t num_dwords;
+ u32 *dwords;
+};
+
/* Manufacturer drivers. */
extern const struct spi_nor_manufacturer spi_nor_atmel;
extern const struct spi_nor_manufacturer spi_nor_catalyst;
@@ -478,6 +490,8 @@ extern const struct spi_nor_manufacturer spi_nor_winbond;
extern const struct spi_nor_manufacturer spi_nor_xilinx;
extern const struct spi_nor_manufacturer spi_nor_xmc;
+extern const struct attribute_group *spi_nor_sysfs_groups[];
+
void spi_nor_spimem_setup_op(const struct spi_nor *nor,
struct spi_mem_op *op,
const enum spi_nor_protocol proto);
@@ -503,10 +517,12 @@ ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
u8 *buf);
ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
const u8 *buf);
+int spi_nor_erase_sector(struct spi_nor *nor, u32 addr);
int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf);
int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
const u8 *buf);
+int spi_nor_otp_erase_secr(struct spi_nor *nor, loff_t addr);
int spi_nor_otp_lock_sr2(struct spi_nor *nor, unsigned int region);
int spi_nor_otp_is_locked_sr2(struct spi_nor *nor, unsigned int region);
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 42c2cf31702e..27498ed0cc0d 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -49,7 +49,8 @@ static const struct flash_info macronix_parts[] = {
{ "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
- { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, SECT_4K) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, SECT_4K |
+ SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
{ "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ |
@@ -72,7 +73,7 @@ static const struct flash_info macronix_parts[] = {
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
+ { "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024,
diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
index fcf38d260345..983e40b19134 100644
--- a/drivers/mtd/spi-nor/otp.c
+++ b/drivers/mtd/spi-nor/otp.c
@@ -15,14 +15,21 @@
#define spi_nor_otp_n_regions(nor) ((nor)->params->otp.org->n_regions)
/**
- * spi_nor_otp_read_secr() - read OTP data
+ * spi_nor_otp_read_secr() - read security register
* @nor: pointer to 'struct spi_nor'
- * @from: offset to read from
+ * @addr: offset to read from
* @len: number of bytes to read
* @buf: pointer to dst buffer
*
- * Read OTP data from one region by using the SPINOR_OP_RSECR commands. This
- * method is used on GigaDevice and Winbond flashes.
+ * Read a security register by using the SPINOR_OP_RSECR commands.
+ *
+ * In Winbond/GigaDevice datasheets the term "security register" stands for
+ * an one-time-programmable memory area, consisting of multiple bytes (usually
+ * 256). Thus one "security register" maps to one OTP region.
+ *
+ * This method is used on GigaDevice and Winbond flashes.
+ *
+ * Please note, the read must not span multiple registers.
*
* Return: number of bytes read successfully, -errno otherwise
*/
@@ -40,7 +47,6 @@ int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
rdesc = nor->dirmap.rdesc;
nor->read_opcode = SPINOR_OP_RSECR;
- nor->addr_width = 3;
nor->read_dummy = 8;
nor->read_proto = SNOR_PROTO_1_1_1;
nor->dirmap.rdesc = NULL;
@@ -57,16 +63,20 @@ int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
}
/**
- * spi_nor_otp_write_secr() - write OTP data
+ * spi_nor_otp_write_secr() - write security register
* @nor: pointer to 'struct spi_nor'
- * @to: offset to write to
+ * @addr: offset to write to
* @len: number of bytes to write
* @buf: pointer to src buffer
*
- * Write OTP data to one region by using the SPINOR_OP_PSECR commands. This
- * method is used on GigaDevice and Winbond flashes.
+ * Write a security register by using the SPINOR_OP_PSECR commands.
+ *
+ * For more information on the term "security register", see the documentation
+ * of spi_nor_otp_read_secr().
*
- * Please note, the write must not span multiple OTP regions.
+ * This method is used on GigaDevice and Winbond flashes.
+ *
+ * Please note, the write must not span multiple registers.
*
* Return: number of bytes written successfully, -errno otherwise
*/
@@ -84,13 +94,12 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
wdesc = nor->dirmap.wdesc;
nor->program_opcode = SPINOR_OP_PSECR;
- nor->addr_width = 3;
nor->write_proto = SNOR_PROTO_1_1_1;
nor->dirmap.wdesc = NULL;
/*
* We only support a write to one single page. For now all winbond
- * flashes only have one page per OTP region.
+ * flashes only have one page per security register.
*/
ret = spi_nor_write_enable(nor);
if (ret)
@@ -111,6 +120,38 @@ out:
return ret ?: written;
}
+/**
+ * spi_nor_otp_erase_secr() - erase a security register
+ * @nor: pointer to 'struct spi_nor'
+ * @addr: offset of the security register to be erased
+ *
+ * Erase a security register by using the SPINOR_OP_ESECR command.
+ *
+ * For more information on the term "security register", see the documentation
+ * of spi_nor_otp_read_secr().
+ *
+ * This method is used on GigaDevice and Winbond flashes.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+int spi_nor_otp_erase_secr(struct spi_nor *nor, loff_t addr)
+{
+ u8 erase_opcode = nor->erase_opcode;
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ nor->erase_opcode = SPINOR_OP_ESECR;
+ ret = spi_nor_erase_sector(nor, addr);
+ nor->erase_opcode = erase_opcode;
+ if (ret)
+ return ret;
+
+ return spi_nor_wait_till_ready(nor);
+}
+
static int spi_nor_otp_lock_bit_cr(unsigned int region)
{
static const int lock_bits[] = { SR2_LB1, SR2_LB2, SR2_LB3 };
@@ -240,6 +281,29 @@ out:
return ret;
}
+static int spi_nor_mtd_otp_range_is_locked(struct spi_nor *nor, loff_t ofs,
+ size_t len)
+{
+ const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
+ unsigned int region;
+ int locked;
+
+ /*
+ * If any of the affected OTP regions are locked the entire range is
+ * considered locked.
+ */
+ for (region = spi_nor_otp_offset_to_region(nor, ofs);
+ region <= spi_nor_otp_offset_to_region(nor, ofs + len - 1);
+ region++) {
+ locked = ops->is_locked(nor, region);
+ /* take the branch it is locked or in case of an error */
+ if (locked)
+ return locked;
+ }
+
+ return 0;
+}
+
static int spi_nor_mtd_otp_read_write(struct mtd_info *mtd, loff_t ofs,
size_t total_len, size_t *retlen,
const u8 *buf, bool is_write)
@@ -255,14 +319,26 @@ static int spi_nor_mtd_otp_read_write(struct mtd_info *mtd, loff_t ofs,
if (ofs < 0 || ofs >= spi_nor_otp_size(nor))
return 0;
+ /* don't access beyond the end */
+ total_len = min_t(size_t, total_len, spi_nor_otp_size(nor) - ofs);
+
+ if (!total_len)
+ return 0;
+
ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
- /* don't access beyond the end */
- total_len = min_t(size_t, total_len, spi_nor_otp_size(nor) - ofs);
+ if (is_write) {
+ ret = spi_nor_mtd_otp_range_is_locked(nor, ofs, total_len);
+ if (ret < 0) {
+ goto out;
+ } else if (ret) {
+ ret = -EROFS;
+ goto out;
+ }
+ }
- *retlen = 0;
while (total_len) {
/*
* The OTP regions are mapped into a contiguous area starting
@@ -316,6 +392,59 @@ static int spi_nor_mtd_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
return spi_nor_mtd_otp_read_write(mtd, to, len, retlen, buf, true);
}
+static int spi_nor_mtd_otp_erase(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
+ const size_t rlen = spi_nor_otp_region_len(nor);
+ unsigned int region;
+ loff_t rstart;
+ int ret;
+
+ /* OTP erase is optional */
+ if (!ops->erase)
+ return -EOPNOTSUPP;
+
+ if (!len)
+ return 0;
+
+ if (from < 0 || (from + len) > spi_nor_otp_size(nor))
+ return -EINVAL;
+
+ /* the user has to explicitly ask for whole regions */
+ if (!IS_ALIGNED(len, rlen) || !IS_ALIGNED(from, rlen))
+ return -EINVAL;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_mtd_otp_range_is_locked(nor, from, len);
+ if (ret < 0) {
+ goto out;
+ } else if (ret) {
+ ret = -EROFS;
+ goto out;
+ }
+
+ while (len) {
+ region = spi_nor_otp_offset_to_region(nor, from);
+ rstart = spi_nor_otp_region_start(nor, region);
+
+ ret = ops->erase(nor, rstart);
+ if (ret)
+ goto out;
+
+ len -= rlen;
+ from += rlen;
+ }
+
+out:
+ spi_nor_unlock_and_unprep(nor);
+
+ return ret;
+}
+
static int spi_nor_mtd_otp_lock(struct mtd_info *mtd, loff_t from, size_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
@@ -374,4 +503,5 @@ void spi_nor_otp_init(struct spi_nor *nor)
mtd->_read_user_prot_reg = spi_nor_mtd_otp_read;
mtd->_write_user_prot_reg = spi_nor_mtd_otp_write;
mtd->_lock_user_prot_reg = spi_nor_mtd_otp_lock;
+ mtd->_erase_user_prot_reg = spi_nor_mtd_otp_erase;
}
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index 23c28e91f698..c500c2118a5d 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -16,6 +16,7 @@
(((p)->parameter_table_pointer[2] << 16) | \
((p)->parameter_table_pointer[1] << 8) | \
((p)->parameter_table_pointer[0] << 0))
+#define SFDP_PARAM_HEADER_PARAM_LEN(p) ((p)->length * 4)
#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
@@ -1245,6 +1246,8 @@ int spi_nor_parse_sfdp(struct spi_nor *nor)
struct sfdp_parameter_header *param_headers = NULL;
struct sfdp_header header;
struct device *dev = nor->dev;
+ struct sfdp *sfdp;
+ size_t sfdp_size;
size_t psize;
int i, err;
@@ -1267,6 +1270,9 @@ int spi_nor_parse_sfdp(struct spi_nor *nor)
bfpt_header->major != SFDP_JESD216_MAJOR)
return -EINVAL;
+ sfdp_size = SFDP_PARAM_HEADER_PTP(bfpt_header) +
+ SFDP_PARAM_HEADER_PARAM_LEN(bfpt_header);
+
/*
* Allocate memory then read all parameter headers with a single
* Read SFDP command. These parameter headers will actually be parsed
@@ -1294,6 +1300,58 @@ int spi_nor_parse_sfdp(struct spi_nor *nor)
}
/*
+ * Cache the complete SFDP data. It is not (easily) possible to fetch
+ * SFDP after probe time and we need it for the sysfs access.
+ */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+ sfdp_size = max_t(size_t, sfdp_size,
+ SFDP_PARAM_HEADER_PTP(param_header) +
+ SFDP_PARAM_HEADER_PARAM_LEN(param_header));
+ }
+
+ /*
+ * Limit the total size to a reasonable value to avoid allocating too
+ * much memory just of because the flash returned some insane values.
+ */
+ if (sfdp_size > PAGE_SIZE) {
+ dev_dbg(dev, "SFDP data (%zu) too big, truncating\n",
+ sfdp_size);
+ sfdp_size = PAGE_SIZE;
+ }
+
+ sfdp = devm_kzalloc(dev, sizeof(*sfdp), GFP_KERNEL);
+ if (!sfdp) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /*
+ * The SFDP is organized in chunks of DWORDs. Thus, in theory, the
+ * sfdp_size should be a multiple of DWORDs. But in case a flash
+ * is not spec compliant, make sure that we have enough space to store
+ * the complete SFDP data.
+ */
+ sfdp->num_dwords = DIV_ROUND_UP(sfdp_size, sizeof(*sfdp->dwords));
+ sfdp->dwords = devm_kcalloc(dev, sfdp->num_dwords,
+ sizeof(*sfdp->dwords), GFP_KERNEL);
+ if (!sfdp->dwords) {
+ err = -ENOMEM;
+ devm_kfree(dev, sfdp);
+ goto exit;
+ }
+
+ err = spi_nor_read_sfdp(nor, 0, sfdp_size, sfdp->dwords);
+ if (err < 0) {
+ dev_dbg(dev, "failed to read SFDP data\n");
+ devm_kfree(dev, sfdp->dwords);
+ devm_kfree(dev, sfdp);
+ goto exit;
+ }
+
+ nor->sfdp = sfdp;
+
+ /*
* Check other parameter headers to get the latest revision of
* the basic flash parameter table.
*/
diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c
new file mode 100644
index 000000000000..9aec9d8a98ad
--- /dev/null
+++ b/drivers/mtd/spi-nor/sysfs.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mtd/spi-nor.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/sysfs.h>
+
+#include "core.h"
+
+static ssize_t manufacturer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_mem *spimem = spi_get_drvdata(spi);
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ return sysfs_emit(buf, "%s\n", nor->manufacturer->name);
+}
+static DEVICE_ATTR_RO(manufacturer);
+
+static ssize_t partname_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_mem *spimem = spi_get_drvdata(spi);
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ return sysfs_emit(buf, "%s\n", nor->info->name);
+}
+static DEVICE_ATTR_RO(partname);
+
+static ssize_t jedec_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_mem *spimem = spi_get_drvdata(spi);
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ return sysfs_emit(buf, "%*phN\n", nor->info->id_len, nor->info->id);
+}
+static DEVICE_ATTR_RO(jedec_id);
+
+static struct attribute *spi_nor_sysfs_entries[] = {
+ &dev_attr_manufacturer.attr,
+ &dev_attr_partname.attr,
+ &dev_attr_jedec_id.attr,
+ NULL
+};
+
+static ssize_t sfdp_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
+ struct spi_mem *spimem = spi_get_drvdata(spi);
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+ struct sfdp *sfdp = nor->sfdp;
+ size_t sfdp_size = sfdp->num_dwords * sizeof(*sfdp->dwords);
+
+ return memory_read_from_buffer(buf, count, &off, nor->sfdp->dwords,
+ sfdp_size);
+}
+static BIN_ATTR_RO(sfdp, 0);
+
+static struct bin_attribute *spi_nor_sysfs_bin_entries[] = {
+ &bin_attr_sfdp,
+ NULL
+};
+
+static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int n)
+{
+ struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
+ struct spi_mem *spimem = spi_get_drvdata(spi);
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ if (attr == &bin_attr_sfdp && nor->sfdp)
+ return 0444;
+
+ return 0;
+}
+
+static const struct attribute_group spi_nor_sysfs_group = {
+ .name = "spi-nor",
+ .is_bin_visible = spi_nor_sysfs_is_bin_visible,
+ .attrs = spi_nor_sysfs_entries,
+ .bin_attrs = spi_nor_sysfs_bin_entries,
+};
+
+const struct attribute_group *spi_nor_sysfs_groups[] = {
+ &spi_nor_sysfs_group,
+ NULL
+};
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 9a81c67a60c6..96573f61caf5 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -139,6 +139,7 @@ static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
static const struct spi_nor_otp_ops winbond_otp_ops = {
.read = spi_nor_otp_read_secr,
.write = spi_nor_otp_write_secr,
+ .erase = spi_nor_otp_erase_secr,
.lock = spi_nor_otp_lock_sr2,
.is_locked = spi_nor_otp_is_locked_sr2,
};
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index c71daa89bfce..532997e10e29 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -506,7 +506,6 @@ static int __init mtd_oobtest_init(void)
err = mtd_write_oob(mtd, addr0, &ops);
if (err) {
pr_info("error occurred as expected\n");
- err = 0;
} else {
pr_err("error: can write past end of OOB\n");
errcnt += 1;
@@ -529,7 +528,6 @@ static int __init mtd_oobtest_init(void)
if (err) {
pr_info("error occurred as expected\n");
- err = 0;
} else {
pr_err("error: can read past end of OOB\n");
errcnt += 1;
@@ -553,7 +551,6 @@ static int __init mtd_oobtest_init(void)
err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
pr_info("error occurred as expected\n");
- err = 0;
} else {
pr_err("error: wrote past end of device\n");
errcnt += 1;
@@ -576,7 +573,6 @@ static int __init mtd_oobtest_init(void)
if (err) {
pr_info("error occurred as expected\n");
- err = 0;
} else {
pr_err("error: read past end of device\n");
errcnt += 1;
@@ -600,7 +596,6 @@ static int __init mtd_oobtest_init(void)
err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
pr_info("error occurred as expected\n");
- err = 0;
} else {
pr_err("error: wrote past end of device\n");
errcnt += 1;
@@ -623,7 +618,6 @@ static int __init mtd_oobtest_init(void)
if (err) {
pr_info("error occurred as expected\n");
- err = 0;
} else {
pr_err("error: read past end of device\n");
errcnt += 1;
@@ -701,6 +695,7 @@ static int __init mtd_oobtest_init(void)
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
+ err = -EINVAL;
pr_err("error: too many errors\n");
goto out;
}
diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c
index 6787ac5471a9..841689b4d86d 100644
--- a/drivers/mtd/tests/torturetest.c
+++ b/drivers/mtd/tests/torturetest.c
@@ -230,8 +230,6 @@ static int __init tort_init(void)
if (!bad_ebs)
goto out_check_buf;
- err = 0;
-
/* Initialize patterns */
memset(patt_FF, 0xFF, mtd->erasesize);
for (i = 0; i < mtd->erasesize / pgsize; i++) {
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index ac2bdba8bb1a..3c0c8eca4d51 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -511,7 +511,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
ubi->ubi_num);
- if (n == UBI_DFS_DIR_LEN) {
+ if (n > UBI_DFS_DIR_LEN) {
/* The array size is too small */
return -EINVAL;
}
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index a7ee0af1af90..54e321a695ce 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -71,12 +71,18 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
family = AF_INET6;
if (bareudp->ethertype == htons(ETH_P_IP)) {
- struct iphdr *iphdr;
+ __u8 ipversion;
- iphdr = (struct iphdr *)(skb->data + BAREUDP_BASE_HLEN);
- if (iphdr->version == 4) {
- proto = bareudp->ethertype;
- } else if (bareudp->multi_proto_mode && (iphdr->version == 6)) {
+ if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
+ sizeof(ipversion))) {
+ bareudp->dev->stats.rx_dropped++;
+ goto drop;
+ }
+ ipversion >>= 4;
+
+ if (ipversion == 4) {
+ proto = htons(ETH_P_IP);
+ } else if (ipversion == 6 && bareudp->multi_proto_mode) {
proto = htons(ETH_P_IPV6);
} else {
bareudp->dev->stats.rx_dropped++;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0ff7567bd04f..31730efa7538 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -401,24 +401,85 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
static int bond_ipsec_add_sa(struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
+ struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
+ int err;
if (!bond_dev)
return -EINVAL;
+ rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
- xs->xso.real_dev = slave->dev;
- bond->xs = xs;
+ if (!slave) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
- if (!(slave->dev->xfrmdev_ops
- && slave->dev->xfrmdev_ops->xdo_dev_state_add)) {
+ if (!slave->dev->xfrmdev_ops ||
+ !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(slave->dev)) {
slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
+ rcu_read_unlock();
return -EINVAL;
}
- return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+ ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
+ if (!ipsec) {
+ rcu_read_unlock();
+ return -ENOMEM;
+ }
+ xs->xso.real_dev = slave->dev;
+
+ err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+ if (!err) {
+ ipsec->xs = xs;
+ INIT_LIST_HEAD(&ipsec->list);
+ spin_lock_bh(&bond->ipsec_lock);
+ list_add(&ipsec->list, &bond->ipsec_list);
+ spin_unlock_bh(&bond->ipsec_lock);
+ } else {
+ kfree(ipsec);
+ }
+ rcu_read_unlock();
+ return err;
+}
+
+static void bond_ipsec_add_sa_all(struct bonding *bond)
+{
+ struct net_device *bond_dev = bond->dev;
+ struct bond_ipsec *ipsec;
+ struct slave *slave;
+
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (!slave)
+ goto out;
+
+ if (!slave->dev->xfrmdev_ops ||
+ !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(slave->dev)) {
+ spin_lock_bh(&bond->ipsec_lock);
+ if (!list_empty(&bond->ipsec_list))
+ slave_warn(bond_dev, slave->dev,
+ "%s: no slave xdo_dev_state_add\n",
+ __func__);
+ spin_unlock_bh(&bond->ipsec_lock);
+ goto out;
+ }
+
+ spin_lock_bh(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ ipsec->xs->xso.real_dev = slave->dev;
+ if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
+ slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
+ ipsec->xs->xso.real_dev = NULL;
+ }
+ }
+ spin_unlock_bh(&bond->ipsec_lock);
+out:
+ rcu_read_unlock();
}
/**
@@ -428,27 +489,77 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
static void bond_ipsec_del_sa(struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
+ struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
if (!bond_dev)
return;
+ rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
if (!slave)
- return;
+ goto out;
- xs->xso.real_dev = slave->dev;
+ if (!xs->xso.real_dev)
+ goto out;
+
+ WARN_ON(xs->xso.real_dev != slave->dev);
- if (!(slave->dev->xfrmdev_ops
- && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) {
+ if (!slave->dev->xfrmdev_ops ||
+ !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(slave->dev)) {
slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
- return;
+ goto out;
}
slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
+out:
+ spin_lock_bh(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (ipsec->xs == xs) {
+ list_del(&ipsec->list);
+ kfree(ipsec);
+ break;
+ }
+ }
+ spin_unlock_bh(&bond->ipsec_lock);
+ rcu_read_unlock();
+}
+
+static void bond_ipsec_del_sa_all(struct bonding *bond)
+{
+ struct net_device *bond_dev = bond->dev;
+ struct bond_ipsec *ipsec;
+ struct slave *slave;
+
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (!slave) {
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (!ipsec->xs->xso.real_dev)
+ continue;
+
+ if (!slave->dev->xfrmdev_ops ||
+ !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(slave->dev)) {
+ slave_warn(bond_dev, slave->dev,
+ "%s: no slave xdo_dev_state_delete\n",
+ __func__);
+ } else {
+ slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
+ }
+ ipsec->xs->xso.real_dev = NULL;
+ }
+ spin_unlock_bh(&bond->ipsec_lock);
+ rcu_read_unlock();
}
/**
@@ -459,21 +570,37 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *curr_active = rcu_dereference(bond->curr_active_slave);
- struct net_device *slave_dev = curr_active->dev;
+ struct net_device *real_dev;
+ struct slave *curr_active;
+ struct bonding *bond;
+ int err;
- if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
- return true;
+ bond = netdev_priv(bond_dev);
+ rcu_read_lock();
+ curr_active = rcu_dereference(bond->curr_active_slave);
+ real_dev = curr_active->dev;
- if (!(slave_dev->xfrmdev_ops
- && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) {
- slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__);
- return false;
+ if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
+ err = false;
+ goto out;
}
- xs->xso.real_dev = slave_dev;
- return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+ if (!xs->xso.real_dev) {
+ err = false;
+ goto out;
+ }
+
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
+ netif_is_bond_master(real_dev)) {
+ err = false;
+ goto out;
+ }
+
+ err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+out:
+ rcu_read_unlock();
+ return err;
}
static const struct xfrmdev_ops bond_xfrmdev_ops = {
@@ -990,8 +1117,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
return;
#ifdef CONFIG_XFRM_OFFLOAD
- if (old_active && bond->xs)
- bond_ipsec_del_sa(bond->xs);
+ bond_ipsec_del_sa_all(bond);
#endif /* CONFIG_XFRM_OFFLOAD */
if (new_active) {
@@ -1066,10 +1192,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
#ifdef CONFIG_XFRM_OFFLOAD
- if (new_active && bond->xs) {
- xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true);
- bond_ipsec_add_sa(bond->xs);
- }
+ bond_ipsec_add_sa_all(bond);
#endif /* CONFIG_XFRM_OFFLOAD */
/* resend IGMP joins since active slave has changed or
@@ -3327,6 +3450,9 @@ static int bond_master_netdev_event(unsigned long event,
return bond_event_changename(event_bond);
case NETDEV_UNREGISTER:
bond_remove_proc_entry(event_bond);
+#ifdef CONFIG_XFRM_OFFLOAD
+ xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
+#endif /* CONFIG_XFRM_OFFLOAD */
break;
case NETDEV_REGISTER:
bond_create_proc_entry(event_bond);
@@ -4894,7 +5020,8 @@ void bond_setup(struct net_device *bond_dev)
#ifdef CONFIG_XFRM_OFFLOAD
/* set up xfrm device ops (only supported in active-backup right now) */
bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
- bond->xs = NULL;
+ INIT_LIST_HEAD(&bond->ipsec_list);
+ spin_lock_init(&bond->ipsec_lock);
#endif /* CONFIG_XFRM_OFFLOAD */
/* don't acquire bond device's netif_tx_lock when transmitting */
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index a77124bc1f4b..709660cb38f8 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -20,15 +20,6 @@ config CAIF_TTY
identified as N_CAIF. When this ldisc is opened from user space
it will redirect the TTY's traffic into the CAIF stack.
-config CAIF_HSI
- tristate "CAIF HSI transport driver"
- depends on CAIF
- default n
- help
- The CAIF low level driver for CAIF over HSI.
- Be aware that if you enable this then you also need to
- enable a low-level HSI driver.
-
config CAIF_VIRTIO
tristate "CAIF virtio transport driver"
depends on CAIF && HAS_DMA
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index b1918c8c126c..97f664f8016c 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -4,8 +4,5 @@ ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
# Serial interface
obj-$(CONFIG_CAIF_TTY) += caif_serial.o
-# HSI interface
-obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
-
# Virtio interface
obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
deleted file mode 100644
index 3d63b15bbaa1..000000000000
--- a/drivers/net/caif/caif_hsi.c
+++ /dev/null
@@ -1,1454 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson
- * Dmitry.Tarnyagin / dmitry.tarnyagin@lockless.no
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/if_arp.h>
-#include <linux/timer.h>
-#include <net/rtnetlink.h>
-#include <linux/pkt_sched.h>
-#include <net/caif/caif_layer.h>
-#include <net/caif/caif_hsi.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Daniel Martensson");
-MODULE_DESCRIPTION("CAIF HSI driver");
-
-/* Returns the number of padding bytes for alignment. */
-#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
- (((pow)-((x)&((pow)-1)))))
-
-static const struct cfhsi_config hsi_default_config = {
-
- /* Inactivity timeout on HSI, ms */
- .inactivity_timeout = HZ,
-
- /* Aggregation timeout (ms) of zero means no aggregation is done*/
- .aggregation_timeout = 1,
-
- /*
- * HSI link layer flow-control thresholds.
- * Threshold values for the HSI packet queue. Flow-control will be
- * asserted when the number of packets exceeds q_high_mark. It will
- * not be de-asserted before the number of packets drops below
- * q_low_mark.
- * Warning: A high threshold value might increase throughput but it
- * will at the same time prevent channel prioritization and increase
- * the risk of flooding the modem. The high threshold should be above
- * the low.
- */
- .q_high_mark = 100,
- .q_low_mark = 50,
-
- /*
- * HSI padding options.
- * Warning: must be a base of 2 (& operation used) and can not be zero !
- */
- .head_align = 4,
- .tail_align = 4,
-};
-
-#define ON 1
-#define OFF 0
-
-static LIST_HEAD(cfhsi_list);
-
-static void cfhsi_inactivity_tout(struct timer_list *t)
-{
- struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
-
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- /* Schedule power down work queue. */
- if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- queue_work(cfhsi->wq, &cfhsi->wake_down_work);
-}
-
-static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
- const struct sk_buff *skb,
- int direction)
-{
- struct caif_payload_info *info;
- int hpad, tpad, len;
-
- info = (struct caif_payload_info *)&skb->cb;
- hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
- tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
- len = skb->len + hpad + tpad;
-
- if (direction > 0)
- cfhsi->aggregation_len += len;
- else if (direction < 0)
- cfhsi->aggregation_len -= len;
-}
-
-static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
-{
- int i;
-
- if (cfhsi->cfg.aggregation_timeout == 0)
- return true;
-
- for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
- if (cfhsi->qhead[i].qlen)
- return true;
- }
-
- /* TODO: Use aggregation_len instead */
- if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
- return true;
-
- return false;
-}
-
-static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
-{
- struct sk_buff *skb;
- int i;
-
- for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
- skb = skb_dequeue(&cfhsi->qhead[i]);
- if (skb)
- break;
- }
-
- return skb;
-}
-
-static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
-{
- int i, len = 0;
- for (i = 0; i < CFHSI_PRIO_LAST; ++i)
- len += skb_queue_len(&cfhsi->qhead[i]);
- return len;
-}
-
-static void cfhsi_abort_tx(struct cfhsi *cfhsi)
-{
- struct sk_buff *skb;
-
- for (;;) {
- spin_lock_bh(&cfhsi->lock);
- skb = cfhsi_dequeue(cfhsi);
- if (!skb)
- break;
-
- cfhsi->ndev->stats.tx_errors++;
- cfhsi->ndev->stats.tx_dropped++;
- cfhsi_update_aggregation_stats(cfhsi, skb, -1);
- spin_unlock_bh(&cfhsi->lock);
- kfree_skb(skb);
- }
- cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->cfg.inactivity_timeout);
- spin_unlock_bh(&cfhsi->lock);
-}
-
-static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
-{
- char buffer[32]; /* Any reasonable value */
- size_t fifo_occupancy;
- int ret;
-
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- do {
- ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
- &fifo_occupancy);
- if (ret) {
- netdev_warn(cfhsi->ndev,
- "%s: can't get FIFO occupancy: %d.\n",
- __func__, ret);
- break;
- } else if (!fifo_occupancy)
- /* No more data, exitting normally */
- break;
-
- fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
- set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
- ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
- cfhsi->ops);
- if (ret) {
- clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
- netdev_warn(cfhsi->ndev,
- "%s: can't read data: %d.\n",
- __func__, ret);
- break;
- }
-
- ret = 5 * HZ;
- ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
- !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
-
- if (ret < 0) {
- netdev_warn(cfhsi->ndev,
- "%s: can't wait for flush complete: %d.\n",
- __func__, ret);
- break;
- } else if (!ret) {
- ret = -ETIMEDOUT;
- netdev_warn(cfhsi->ndev,
- "%s: timeout waiting for flush complete.\n",
- __func__);
- break;
- }
- } while (1);
-
- return ret;
-}
-
-static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
-{
- int nfrms = 0;
- int pld_len = 0;
- struct sk_buff *skb;
- u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
-
- skb = cfhsi_dequeue(cfhsi);
- if (!skb)
- return 0;
-
- /* Clear offset. */
- desc->offset = 0;
-
- /* Check if we can embed a CAIF frame. */
- if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
- struct caif_payload_info *info;
- int hpad;
- int tpad;
-
- /* Calculate needed head alignment and tail alignment. */
- info = (struct caif_payload_info *)&skb->cb;
-
- hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
- tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
-
- /* Check if frame still fits with added alignment. */
- if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
- u8 *pemb = desc->emb_frm;
- desc->offset = CFHSI_DESC_SHORT_SZ;
- *pemb = (u8)(hpad - 1);
- pemb += hpad;
-
- /* Update network statistics. */
- spin_lock_bh(&cfhsi->lock);
- cfhsi->ndev->stats.tx_packets++;
- cfhsi->ndev->stats.tx_bytes += skb->len;
- cfhsi_update_aggregation_stats(cfhsi, skb, -1);
- spin_unlock_bh(&cfhsi->lock);
-
- /* Copy in embedded CAIF frame. */
- skb_copy_bits(skb, 0, pemb, skb->len);
-
- /* Consume the SKB */
- consume_skb(skb);
- skb = NULL;
- }
- }
-
- /* Create payload CAIF frames. */
- while (nfrms < CFHSI_MAX_PKTS) {
- struct caif_payload_info *info;
- int hpad;
- int tpad;
-
- if (!skb)
- skb = cfhsi_dequeue(cfhsi);
-
- if (!skb)
- break;
-
- /* Calculate needed head alignment and tail alignment. */
- info = (struct caif_payload_info *)&skb->cb;
-
- hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
- tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
-
- /* Fill in CAIF frame length in descriptor. */
- desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
-
- /* Fill head padding information. */
- *pfrm = (u8)(hpad - 1);
- pfrm += hpad;
-
- /* Update network statistics. */
- spin_lock_bh(&cfhsi->lock);
- cfhsi->ndev->stats.tx_packets++;
- cfhsi->ndev->stats.tx_bytes += skb->len;
- cfhsi_update_aggregation_stats(cfhsi, skb, -1);
- spin_unlock_bh(&cfhsi->lock);
-
- /* Copy in CAIF frame. */
- skb_copy_bits(skb, 0, pfrm, skb->len);
-
- /* Update payload length. */
- pld_len += desc->cffrm_len[nfrms];
-
- /* Update frame pointer. */
- pfrm += skb->len + tpad;
-
- /* Consume the SKB */
- consume_skb(skb);
- skb = NULL;
-
- /* Update number of frames. */
- nfrms++;
- }
-
- /* Unused length fields should be zero-filled (according to SPEC). */
- while (nfrms < CFHSI_MAX_PKTS) {
- desc->cffrm_len[nfrms] = 0x0000;
- nfrms++;
- }
-
- /* Check if we can piggy-back another descriptor. */
- if (cfhsi_can_send_aggregate(cfhsi))
- desc->header |= CFHSI_PIGGY_DESC;
- else
- desc->header &= ~CFHSI_PIGGY_DESC;
-
- return CFHSI_DESC_SZ + pld_len;
-}
-
-static void cfhsi_start_tx(struct cfhsi *cfhsi)
-{
- struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
- int len, res;
-
- netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- do {
- /* Create HSI frame. */
- len = cfhsi_tx_frm(desc, cfhsi);
- if (!len) {
- spin_lock_bh(&cfhsi->lock);
- if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
- spin_unlock_bh(&cfhsi->lock);
- res = -EAGAIN;
- continue;
- }
- cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- /* Start inactivity timer. */
- mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->cfg.inactivity_timeout);
- spin_unlock_bh(&cfhsi->lock);
- break;
- }
-
- /* Set up new transfer. */
- res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
- if (WARN_ON(res < 0))
- netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
- __func__, res);
- } while (res < 0);
-}
-
-static void cfhsi_tx_done(struct cfhsi *cfhsi)
-{
- netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- /*
- * Send flow on if flow off has been previously signalled
- * and number of packets is below low water mark.
- */
- spin_lock_bh(&cfhsi->lock);
- if (cfhsi->flow_off_sent &&
- cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
- cfhsi->cfdev.flowctrl) {
-
- cfhsi->flow_off_sent = 0;
- cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
- }
-
- if (cfhsi_can_send_aggregate(cfhsi)) {
- spin_unlock_bh(&cfhsi->lock);
- cfhsi_start_tx(cfhsi);
- } else {
- mod_timer(&cfhsi->aggregation_timer,
- jiffies + cfhsi->cfg.aggregation_timeout);
- spin_unlock_bh(&cfhsi->lock);
- }
-
- return;
-}
-
-static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
-{
- struct cfhsi *cfhsi;
-
- cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
- cfhsi_tx_done(cfhsi);
-}
-
-static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
-{
- int xfer_sz = 0;
- int nfrms = 0;
- u16 *plen = NULL;
- u8 *pfrm = NULL;
-
- if ((desc->header & ~CFHSI_PIGGY_DESC) ||
- (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
- netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
- __func__);
- return -EPROTO;
- }
-
- /* Check for embedded CAIF frame. */
- if (desc->offset) {
- struct sk_buff *skb;
- int len = 0;
- pfrm = ((u8 *)desc) + desc->offset;
-
- /* Remove offset padding. */
- pfrm += *pfrm + 1;
-
- /* Read length of CAIF frame (little endian). */
- len = *pfrm;
- len |= ((*(pfrm+1)) << 8) & 0xFF00;
- len += 2; /* Add FCS fields. */
-
- /* Sanity check length of CAIF frame. */
- if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
- netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
- __func__);
- return -EPROTO;
- }
-
- /* Allocate SKB (OK even in IRQ context). */
- skb = alloc_skb(len + 1, GFP_ATOMIC);
- if (!skb) {
- netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
- __func__);
- return -ENOMEM;
- }
- caif_assert(skb != NULL);
-
- skb_put_data(skb, pfrm, len);
-
- skb->protocol = htons(ETH_P_CAIF);
- skb_reset_mac_header(skb);
- skb->dev = cfhsi->ndev;
-
- netif_rx_any_context(skb);
-
- /* Update network statistics. */
- cfhsi->ndev->stats.rx_packets++;
- cfhsi->ndev->stats.rx_bytes += len;
- }
-
- /* Calculate transfer length. */
- plen = desc->cffrm_len;
- while (nfrms < CFHSI_MAX_PKTS && *plen) {
- xfer_sz += *plen;
- plen++;
- nfrms++;
- }
-
- /* Check for piggy-backed descriptor. */
- if (desc->header & CFHSI_PIGGY_DESC)
- xfer_sz += CFHSI_DESC_SZ;
-
- if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
- netdev_err(cfhsi->ndev,
- "%s: Invalid payload len: %d, ignored.\n",
- __func__, xfer_sz);
- return -EPROTO;
- }
- return xfer_sz;
-}
-
-static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
-{
- int xfer_sz = 0;
- int nfrms = 0;
- u16 *plen;
-
- if ((desc->header & ~CFHSI_PIGGY_DESC) ||
- (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
-
- pr_err("Invalid descriptor. %x %x\n", desc->header,
- desc->offset);
- return -EPROTO;
- }
-
- /* Calculate transfer length. */
- plen = desc->cffrm_len;
- while (nfrms < CFHSI_MAX_PKTS && *plen) {
- xfer_sz += *plen;
- plen++;
- nfrms++;
- }
-
- if (xfer_sz % 4) {
- pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
- return -EPROTO;
- }
- return xfer_sz;
-}
-
-static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
-{
- int rx_sz = 0;
- int nfrms = 0;
- u16 *plen = NULL;
- u8 *pfrm = NULL;
-
- /* Sanity check header and offset. */
- if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
- (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
- netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
- __func__);
- return -EPROTO;
- }
-
- /* Set frame pointer to start of payload. */
- pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
- plen = desc->cffrm_len;
-
- /* Skip already processed frames. */
- while (nfrms < cfhsi->rx_state.nfrms) {
- pfrm += *plen;
- rx_sz += *plen;
- plen++;
- nfrms++;
- }
-
- /* Parse payload. */
- while (nfrms < CFHSI_MAX_PKTS && *plen) {
- struct sk_buff *skb;
- u8 *pcffrm = NULL;
- int len;
-
- /* CAIF frame starts after head padding. */
- pcffrm = pfrm + *pfrm + 1;
-
- /* Read length of CAIF frame (little endian). */
- len = *pcffrm;
- len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
- len += 2; /* Add FCS fields. */
-
- /* Sanity check length of CAIF frames. */
- if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
- netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
- __func__);
- return -EPROTO;
- }
-
- /* Allocate SKB (OK even in IRQ context). */
- skb = alloc_skb(len + 1, GFP_ATOMIC);
- if (!skb) {
- netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
- __func__);
- cfhsi->rx_state.nfrms = nfrms;
- return -ENOMEM;
- }
- caif_assert(skb != NULL);
-
- skb_put_data(skb, pcffrm, len);
-
- skb->protocol = htons(ETH_P_CAIF);
- skb_reset_mac_header(skb);
- skb->dev = cfhsi->ndev;
-
- netif_rx_any_context(skb);
-
- /* Update network statistics. */
- cfhsi->ndev->stats.rx_packets++;
- cfhsi->ndev->stats.rx_bytes += len;
-
- pfrm += *plen;
- rx_sz += *plen;
- plen++;
- nfrms++;
- }
-
- return rx_sz;
-}
-
-static void cfhsi_rx_done(struct cfhsi *cfhsi)
-{
- int res;
- int desc_pld_len = 0, rx_len, rx_state;
- struct cfhsi_desc *desc = NULL;
- u8 *rx_ptr, *rx_buf;
- struct cfhsi_desc *piggy_desc = NULL;
-
- desc = (struct cfhsi_desc *)cfhsi->rx_buf;
-
- netdev_dbg(cfhsi->ndev, "%s\n", __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- /* Update inactivity timer if pending. */
- spin_lock_bh(&cfhsi->lock);
- mod_timer_pending(&cfhsi->inactivity_timer,
- jiffies + cfhsi->cfg.inactivity_timeout);
- spin_unlock_bh(&cfhsi->lock);
-
- if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
- desc_pld_len = cfhsi_rx_desc_len(desc);
-
- if (desc_pld_len < 0)
- goto out_of_sync;
-
- rx_buf = cfhsi->rx_buf;
- rx_len = desc_pld_len;
- if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
- rx_len += CFHSI_DESC_SZ;
- if (desc_pld_len == 0)
- rx_buf = cfhsi->rx_flip_buf;
- } else {
- rx_buf = cfhsi->rx_flip_buf;
-
- rx_len = CFHSI_DESC_SZ;
- if (cfhsi->rx_state.pld_len > 0 &&
- (desc->header & CFHSI_PIGGY_DESC)) {
-
- piggy_desc = (struct cfhsi_desc *)
- (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
- cfhsi->rx_state.pld_len);
-
- cfhsi->rx_state.piggy_desc = true;
-
- /* Extract payload len from piggy-backed descriptor. */
- desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
- if (desc_pld_len < 0)
- goto out_of_sync;
-
- if (desc_pld_len > 0) {
- rx_len = desc_pld_len;
- if (piggy_desc->header & CFHSI_PIGGY_DESC)
- rx_len += CFHSI_DESC_SZ;
- }
-
- /*
- * Copy needed information from the piggy-backed
- * descriptor to the descriptor in the start.
- */
- memcpy(rx_buf, (u8 *)piggy_desc,
- CFHSI_DESC_SHORT_SZ);
- }
- }
-
- if (desc_pld_len) {
- rx_state = CFHSI_RX_STATE_PAYLOAD;
- rx_ptr = rx_buf + CFHSI_DESC_SZ;
- } else {
- rx_state = CFHSI_RX_STATE_DESC;
- rx_ptr = rx_buf;
- rx_len = CFHSI_DESC_SZ;
- }
-
- /* Initiate next read */
- if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
- /* Set up new transfer. */
- netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
- __func__);
-
- res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
- cfhsi->ops);
- if (WARN_ON(res < 0)) {
- netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
- __func__, res);
- cfhsi->ndev->stats.rx_errors++;
- cfhsi->ndev->stats.rx_dropped++;
- }
- }
-
- if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
- /* Extract payload from descriptor */
- if (cfhsi_rx_desc(desc, cfhsi) < 0)
- goto out_of_sync;
- } else {
- /* Extract payload */
- if (cfhsi_rx_pld(desc, cfhsi) < 0)
- goto out_of_sync;
- if (piggy_desc) {
- /* Extract any payload in piggyback descriptor. */
- if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
- goto out_of_sync;
- /* Mark no embedded frame after extracting it */
- piggy_desc->offset = 0;
- }
- }
-
- /* Update state info */
- memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
- cfhsi->rx_state.state = rx_state;
- cfhsi->rx_ptr = rx_ptr;
- cfhsi->rx_len = rx_len;
- cfhsi->rx_state.pld_len = desc_pld_len;
- cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
-
- if (rx_buf != cfhsi->rx_buf)
- swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
- return;
-
-out_of_sync:
- netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
- print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
- cfhsi->rx_buf, CFHSI_DESC_SZ);
- schedule_work(&cfhsi->out_of_sync_work);
-}
-
-static void cfhsi_rx_slowpath(struct timer_list *t)
-{
- struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
-
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- cfhsi_rx_done(cfhsi);
-}
-
-static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
-{
- struct cfhsi *cfhsi;
-
- cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
- wake_up_interruptible(&cfhsi->flush_fifo_wait);
- else
- cfhsi_rx_done(cfhsi);
-}
-
-static void cfhsi_wake_up(struct work_struct *work)
-{
- struct cfhsi *cfhsi = NULL;
- int res;
- int len;
- long ret;
-
- cfhsi = container_of(work, struct cfhsi, wake_up_work);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
- /* It happenes when wakeup is requested by
- * both ends at the same time. */
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
- return;
- }
-
- /* Activate wake line. */
- cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
-
- netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
- __func__);
-
- /* Wait for acknowledge. */
- ret = CFHSI_WAKE_TOUT;
- ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
- test_and_clear_bit(CFHSI_WAKE_UP_ACK,
- &cfhsi->bits), ret);
- if (unlikely(ret < 0)) {
- /* Interrupted by signal. */
- netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
- __func__, ret);
-
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
- return;
- } else if (!ret) {
- bool ca_wake = false;
- size_t fifo_occupancy = 0;
-
- /* Wakeup timeout */
- netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
- __func__);
-
- /* Check FIFO to check if modem has sent something. */
- WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
- &fifo_occupancy));
-
- netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
- __func__, (unsigned) fifo_occupancy);
-
- /* Check if we misssed the interrupt. */
- WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
- &ca_wake));
-
- if (ca_wake) {
- netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
- __func__);
-
- /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
- clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
-
- /* Continue execution. */
- goto wake_ack;
- }
-
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
- return;
- }
-wake_ack:
- netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
- __func__);
-
- /* Clear power up bit. */
- set_bit(CFHSI_AWAKE, &cfhsi->bits);
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
-
- /* Resume read operation. */
- netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
- res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
-
- if (WARN_ON(res < 0))
- netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
-
- /* Clear power up acknowledment. */
- clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
-
- spin_lock_bh(&cfhsi->lock);
-
- /* Resume transmit if queues are not empty. */
- if (!cfhsi_tx_queue_len(cfhsi)) {
- netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
- __func__);
- /* Start inactivity timer. */
- mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->cfg.inactivity_timeout);
- spin_unlock_bh(&cfhsi->lock);
- return;
- }
-
- netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
- __func__);
-
- spin_unlock_bh(&cfhsi->lock);
-
- /* Create HSI frame. */
- len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
-
- if (likely(len > 0)) {
- /* Set up new transfer. */
- res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
- if (WARN_ON(res < 0)) {
- netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
- __func__, res);
- cfhsi_abort_tx(cfhsi);
- }
- } else {
- netdev_err(cfhsi->ndev,
- "%s: Failed to create HSI frame: %d.\n",
- __func__, len);
- }
-}
-
-static void cfhsi_wake_down(struct work_struct *work)
-{
- long ret;
- struct cfhsi *cfhsi = NULL;
- size_t fifo_occupancy = 0;
- int retry = CFHSI_WAKE_TOUT;
-
- cfhsi = container_of(work, struct cfhsi, wake_down_work);
- netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- /* Deactivate wake line. */
- cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
-
- /* Wait for acknowledge. */
- ret = CFHSI_WAKE_TOUT;
- ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
- test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
- &cfhsi->bits), ret);
- if (ret < 0) {
- /* Interrupted by signal. */
- netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
- __func__, ret);
- return;
- } else if (!ret) {
- bool ca_wake = true;
-
- /* Timeout */
- netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
-
- /* Check if we misssed the interrupt. */
- WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
- &ca_wake));
- if (!ca_wake)
- netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
- __func__);
- }
-
- /* Check FIFO occupancy. */
- while (retry) {
- WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
- &fifo_occupancy));
-
- if (!fifo_occupancy)
- break;
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- retry--;
- }
-
- if (!retry)
- netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
-
- /* Clear AWAKE condition. */
- clear_bit(CFHSI_AWAKE, &cfhsi->bits);
-
- /* Cancel pending RX requests. */
- cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
-}
-
-static void cfhsi_out_of_sync(struct work_struct *work)
-{
- struct cfhsi *cfhsi = NULL;
-
- cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
-
- rtnl_lock();
- dev_close(cfhsi->ndev);
- rtnl_unlock();
-}
-
-static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
-{
- struct cfhsi *cfhsi = NULL;
-
- cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
- wake_up_interruptible(&cfhsi->wake_up_wait);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- /* Schedule wake up work queue if the peer initiates. */
- if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
- queue_work(cfhsi->wq, &cfhsi->wake_up_work);
-}
-
-static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
-{
- struct cfhsi *cfhsi = NULL;
-
- cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- /* Initiating low power is only permitted by the host (us). */
- set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
- wake_up_interruptible(&cfhsi->wake_down_wait);
-}
-
-static void cfhsi_aggregation_tout(struct timer_list *t)
-{
- struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
-
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- cfhsi_start_tx(cfhsi);
-}
-
-static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct cfhsi *cfhsi = NULL;
- int start_xfer = 0;
- int timer_active;
- int prio;
-
- if (!dev)
- return -EINVAL;
-
- cfhsi = netdev_priv(dev);
-
- switch (skb->priority) {
- case TC_PRIO_BESTEFFORT:
- case TC_PRIO_FILLER:
- case TC_PRIO_BULK:
- prio = CFHSI_PRIO_BEBK;
- break;
- case TC_PRIO_INTERACTIVE_BULK:
- prio = CFHSI_PRIO_VI;
- break;
- case TC_PRIO_INTERACTIVE:
- prio = CFHSI_PRIO_VO;
- break;
- case TC_PRIO_CONTROL:
- default:
- prio = CFHSI_PRIO_CTL;
- break;
- }
-
- spin_lock_bh(&cfhsi->lock);
-
- /* Update aggregation statistics */
- cfhsi_update_aggregation_stats(cfhsi, skb, 1);
-
- /* Queue the SKB */
- skb_queue_tail(&cfhsi->qhead[prio], skb);
-
- /* Sanity check; xmit should not be called after unregister_netdev */
- if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
- spin_unlock_bh(&cfhsi->lock);
- cfhsi_abort_tx(cfhsi);
- return -EINVAL;
- }
-
- /* Send flow off if number of packets is above high water mark. */
- if (!cfhsi->flow_off_sent &&
- cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
- cfhsi->cfdev.flowctrl) {
- cfhsi->flow_off_sent = 1;
- cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
- }
-
- if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
- cfhsi->tx_state = CFHSI_TX_STATE_XFER;
- start_xfer = 1;
- }
-
- if (!start_xfer) {
- /* Send aggregate if it is possible */
- bool aggregate_ready =
- cfhsi_can_send_aggregate(cfhsi) &&
- del_timer(&cfhsi->aggregation_timer) > 0;
- spin_unlock_bh(&cfhsi->lock);
- if (aggregate_ready)
- cfhsi_start_tx(cfhsi);
- return NETDEV_TX_OK;
- }
-
- /* Delete inactivity timer if started. */
- timer_active = del_timer_sync(&cfhsi->inactivity_timer);
-
- spin_unlock_bh(&cfhsi->lock);
-
- if (timer_active) {
- struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
- int len;
- int res;
-
- /* Create HSI frame. */
- len = cfhsi_tx_frm(desc, cfhsi);
- WARN_ON(!len);
-
- /* Set up new transfer. */
- res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
- if (WARN_ON(res < 0)) {
- netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
- __func__, res);
- cfhsi_abort_tx(cfhsi);
- }
- } else {
- /* Schedule wake up work queue if the we initiate. */
- if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
- queue_work(cfhsi->wq, &cfhsi->wake_up_work);
- }
-
- return NETDEV_TX_OK;
-}
-
-static const struct net_device_ops cfhsi_netdevops;
-
-static void cfhsi_setup(struct net_device *dev)
-{
- int i;
- struct cfhsi *cfhsi = netdev_priv(dev);
- dev->features = 0;
- dev->type = ARPHRD_CAIF;
- dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
- dev->priv_flags |= IFF_NO_QUEUE;
- dev->needs_free_netdev = true;
- dev->netdev_ops = &cfhsi_netdevops;
- for (i = 0; i < CFHSI_PRIO_LAST; ++i)
- skb_queue_head_init(&cfhsi->qhead[i]);
- cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
- cfhsi->cfdev.use_frag = false;
- cfhsi->cfdev.use_stx = false;
- cfhsi->cfdev.use_fcs = false;
- cfhsi->ndev = dev;
- cfhsi->cfg = hsi_default_config;
-}
-
-static int cfhsi_open(struct net_device *ndev)
-{
- struct cfhsi *cfhsi = netdev_priv(ndev);
- int res;
-
- clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
-
- /* Initialize state vaiables. */
- cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
-
- /* Set flow info */
- cfhsi->flow_off_sent = 0;
-
- /*
- * Allocate a TX buffer with the size of a HSI packet descriptors
- * and the necessary room for CAIF payload frames.
- */
- cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
- if (!cfhsi->tx_buf) {
- res = -ENODEV;
- goto err_alloc_tx;
- }
-
- /*
- * Allocate a RX buffer with the size of two HSI packet descriptors and
- * the necessary room for CAIF payload frames.
- */
- cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
- if (!cfhsi->rx_buf) {
- res = -ENODEV;
- goto err_alloc_rx;
- }
-
- cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
- if (!cfhsi->rx_flip_buf) {
- res = -ENODEV;
- goto err_alloc_rx_flip;
- }
-
- /* Initialize aggregation timeout */
- cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
-
- /* Initialize recieve vaiables. */
- cfhsi->rx_ptr = cfhsi->rx_buf;
- cfhsi->rx_len = CFHSI_DESC_SZ;
-
- /* Initialize spin locks. */
- spin_lock_init(&cfhsi->lock);
-
- /* Set up the driver. */
- cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
- cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
- cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
- cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
-
- /* Initialize the work queues. */
- INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
- INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
- INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
-
- /* Clear all bit fields. */
- clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
- clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- clear_bit(CFHSI_AWAKE, &cfhsi->bits);
-
- /* Create work thread. */
- cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM);
- if (!cfhsi->wq) {
- netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
- __func__);
- res = -ENODEV;
- goto err_create_wq;
- }
-
- /* Initialize wait queues. */
- init_waitqueue_head(&cfhsi->wake_up_wait);
- init_waitqueue_head(&cfhsi->wake_down_wait);
- init_waitqueue_head(&cfhsi->flush_fifo_wait);
-
- /* Setup the inactivity timer. */
- timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
- /* Setup the slowpath RX timer. */
- timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
- /* Setup the aggregation timer. */
- timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
-
- /* Activate HSI interface. */
- res = cfhsi->ops->cfhsi_up(cfhsi->ops);
- if (res) {
- netdev_err(cfhsi->ndev,
- "%s: can't activate HSI interface: %d.\n",
- __func__, res);
- goto err_activate;
- }
-
- /* Flush FIFO */
- res = cfhsi_flush_fifo(cfhsi);
- if (res) {
- netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
- __func__, res);
- goto err_net_reg;
- }
- return res;
-
- err_net_reg:
- cfhsi->ops->cfhsi_down(cfhsi->ops);
- err_activate:
- destroy_workqueue(cfhsi->wq);
- err_create_wq:
- kfree(cfhsi->rx_flip_buf);
- err_alloc_rx_flip:
- kfree(cfhsi->rx_buf);
- err_alloc_rx:
- kfree(cfhsi->tx_buf);
- err_alloc_tx:
- return res;
-}
-
-static int cfhsi_close(struct net_device *ndev)
-{
- struct cfhsi *cfhsi = netdev_priv(ndev);
- u8 *tx_buf, *rx_buf, *flip_buf;
-
- /* going to shutdown driver */
- set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
-
- /* Delete timers if pending */
- del_timer_sync(&cfhsi->inactivity_timer);
- del_timer_sync(&cfhsi->rx_slowpath_timer);
- del_timer_sync(&cfhsi->aggregation_timer);
-
- /* Cancel pending RX request (if any) */
- cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
-
- /* Destroy workqueue */
- destroy_workqueue(cfhsi->wq);
-
- /* Store bufferes: will be freed later. */
- tx_buf = cfhsi->tx_buf;
- rx_buf = cfhsi->rx_buf;
- flip_buf = cfhsi->rx_flip_buf;
- /* Flush transmit queues. */
- cfhsi_abort_tx(cfhsi);
-
- /* Deactivate interface */
- cfhsi->ops->cfhsi_down(cfhsi->ops);
-
- /* Free buffers. */
- kfree(tx_buf);
- kfree(rx_buf);
- kfree(flip_buf);
- return 0;
-}
-
-static void cfhsi_uninit(struct net_device *dev)
-{
- struct cfhsi *cfhsi = netdev_priv(dev);
- ASSERT_RTNL();
- symbol_put(cfhsi_get_device);
- list_del(&cfhsi->list);
-}
-
-static const struct net_device_ops cfhsi_netdevops = {
- .ndo_uninit = cfhsi_uninit,
- .ndo_open = cfhsi_open,
- .ndo_stop = cfhsi_close,
- .ndo_start_xmit = cfhsi_xmit
-};
-
-static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
-{
- int i;
-
- if (!data) {
- pr_debug("no params data found\n");
- return;
- }
-
- i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
- /*
- * Inactivity timeout in millisecs. Lowest possible value is 1,
- * and highest possible is NEXT_TIMER_MAX_DELTA.
- */
- if (data[i]) {
- u32 inactivity_timeout = nla_get_u32(data[i]);
- /* Pre-calculate inactivity timeout. */
- cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
- if (cfhsi->cfg.inactivity_timeout == 0)
- cfhsi->cfg.inactivity_timeout = 1;
- else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
- cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
- }
-
- i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
- if (data[i])
- cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
-
- i = __IFLA_CAIF_HSI_HEAD_ALIGN;
- if (data[i])
- cfhsi->cfg.head_align = nla_get_u32(data[i]);
-
- i = __IFLA_CAIF_HSI_TAIL_ALIGN;
- if (data[i])
- cfhsi->cfg.tail_align = nla_get_u32(data[i]);
-
- i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
- if (data[i])
- cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
-
- i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
- if (data[i])
- cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
-}
-
-static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- cfhsi_netlink_parms(data, netdev_priv(dev));
- netdev_state_change(dev);
- return 0;
-}
-
-static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
- [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
-};
-
-static size_t caif_hsi_get_size(const struct net_device *dev)
-{
- int i;
- size_t s = 0;
- for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
- s += nla_total_size(caif_hsi_policy[i].len);
- return s;
-}
-
-static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
-{
- struct cfhsi *cfhsi = netdev_priv(dev);
-
- if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
- cfhsi->cfg.inactivity_timeout) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
- cfhsi->cfg.aggregation_timeout) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
- cfhsi->cfg.head_align) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
- cfhsi->cfg.tail_align) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
- cfhsi->cfg.q_high_mark) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
- cfhsi->cfg.q_low_mark))
- return -EMSGSIZE;
-
- return 0;
-}
-
-static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- struct cfhsi *cfhsi = NULL;
- struct cfhsi_ops *(*get_ops)(void);
-
- ASSERT_RTNL();
-
- cfhsi = netdev_priv(dev);
- cfhsi_netlink_parms(data, cfhsi);
-
- get_ops = symbol_get(cfhsi_get_ops);
- if (!get_ops) {
- pr_err("%s: failed to get the cfhsi_ops\n", __func__);
- return -ENODEV;
- }
-
- /* Assign the HSI device. */
- cfhsi->ops = (*get_ops)();
- if (!cfhsi->ops) {
- pr_err("%s: failed to get the cfhsi_ops\n", __func__);
- goto err;
- }
-
- /* Assign the driver to this HSI device. */
- cfhsi->ops->cb_ops = &cfhsi->cb_ops;
- if (register_netdevice(dev)) {
- pr_warn("%s: caif_hsi device registration failed\n", __func__);
- goto err;
- }
- /* Add CAIF HSI device to list. */
- list_add_tail(&cfhsi->list, &cfhsi_list);
-
- return 0;
-err:
- symbol_put(cfhsi_get_ops);
- return -ENODEV;
-}
-
-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
- .kind = "cfhsi",
- .priv_size = sizeof(struct cfhsi),
- .setup = cfhsi_setup,
- .maxtype = __IFLA_CAIF_HSI_MAX,
- .policy = caif_hsi_policy,
- .newlink = caif_hsi_newlink,
- .changelink = caif_hsi_changelink,
- .get_size = caif_hsi_get_size,
- .fill_info = caif_hsi_fill_info,
-};
-
-static void __exit cfhsi_exit_module(void)
-{
- struct list_head *list_node;
- struct list_head *n;
- struct cfhsi *cfhsi;
-
- rtnl_link_unregister(&caif_hsi_link_ops);
-
- rtnl_lock();
- list_for_each_safe(list_node, n, &cfhsi_list) {
- cfhsi = list_entry(list_node, struct cfhsi, list);
- unregister_netdevice(cfhsi->ndev);
- }
- rtnl_unlock();
-}
-
-static int __init cfhsi_init_module(void)
-{
- return rtnl_link_register(&caif_hsi_link_ops);
-}
-
-module_init(cfhsi_init_module);
-module_exit(cfhsi_exit_module);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 4ffbfd534f18..2a7af611d43a 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -87,9 +87,9 @@ static void ldisc_tx_wakeup(struct tty_struct *tty);
static inline void update_tty_status(struct ser_device *ser)
{
ser->tty_status =
- ser->tty->stopped << 5 |
- ser->tty->flow_stopped << 3 |
- ser->tty->packet << 2;
+ ser->tty->flow.stopped << 5 |
+ ser->tty->flow.tco_stopped << 3 |
+ ser->tty->ctrl.packet << 2;
}
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
{
@@ -159,7 +159,7 @@ static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
#endif
static void ldisc_receive(struct tty_struct *tty, const u8 *data,
- char *flags, int count)
+ const char *flags, int count)
{
struct sk_buff *skb = NULL;
struct ser_device *ser;
@@ -380,6 +380,7 @@ static void ldisc_close(struct tty_struct *tty)
/* The line discipline structure. */
static struct tty_ldisc_ops caif_ldisc = {
.owner = THIS_MODULE,
+ .num = N_CAIF,
.name = "n_caif",
.open = ldisc_open,
.close = ldisc_close,
@@ -429,7 +430,7 @@ static int __init caif_ser_init(void)
{
int ret;
- ret = tty_register_ldisc(N_CAIF, &caif_ldisc);
+ ret = tty_register_ldisc(&caif_ldisc);
if (ret < 0)
pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF, ret);
@@ -444,7 +445,7 @@ static void __exit caif_ser_exit(void)
spin_unlock(&ser_lock);
ser_release(NULL);
cancel_work_sync(&ser_release_work);
- tty_unregister_ldisc(N_CAIF);
+ tty_unregister_ldisc(&caif_ldisc);
debugfs_remove_recursive(debugfsdir);
}
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index bba2a449ac70..43bca315a66c 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1164,10 +1164,10 @@ static int m_can_set_bittiming(struct net_device *dev)
FIELD_PREP(TDCR_TDCO_MASK, tdco));
}
- reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
- FIELD_PREP(NBTP_NSJW_MASK, sjw) |
- FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
- FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
+ reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) |
+ FIELD_PREP(DBTP_DSJW_MASK, sjw) |
+ FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) |
+ FIELD_PREP(DBTP_DTSEG2_MASK, tseg2);
m_can_write(cdev, M_CAN_DBTP, reg_btp);
}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 31ba6664503d..d42ec7d1bc14 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -467,7 +467,8 @@ static void slc_setup(struct net_device *dev)
*/
static void slcan_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+ const unsigned char *cp, const char *fp,
+ int count)
{
struct slcan *sl = (struct slcan *) tty->disc_data;
@@ -697,6 +698,7 @@ static int slcan_ioctl(struct tty_struct *tty, struct file *file,
static struct tty_ldisc_ops slc_ldisc = {
.owner = THIS_MODULE,
+ .num = N_SLCAN,
.name = "slcan",
.open = slcan_open,
.close = slcan_close,
@@ -721,7 +723,7 @@ static int __init slcan_init(void)
return -ENOMEM;
/* Fill in our line protocol discipline, and register it */
- status = tty_register_ldisc(N_SLCAN, &slc_ldisc);
+ status = tty_register_ldisc(&slc_ldisc);
if (status) {
printk(KERN_ERR "slcan: can't register line discipline\n");
kfree(slcan_devs);
@@ -782,9 +784,7 @@ static void __exit slcan_exit(void)
kfree(slcan_devs);
slcan_devs = NULL;
- i = tty_unregister_ldisc(N_SLCAN);
- if (i)
- printk(KERN_ERR "slcan: can't unregister ldisc (err %d)\n", i);
+ tty_unregister_ldisc(&slc_ldisc);
}
module_init(slcan_init);
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index dd17b8c53e1c..89d9c986a229 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -218,7 +218,7 @@ static int hi3110_spi_trans(struct spi_device *spi, int len)
return ret;
}
-static u8 hi3110_cmd(struct spi_device *spi, u8 command)
+static int hi3110_cmd(struct spi_device *spi, u8 command)
{
struct hi3110_priv *priv = spi_get_drvdata(spi);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 47c3f408a799..9ae48072b6c6 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -2300,6 +2300,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
err, priv->regs_status.intf);
mcp251xfd_dump(priv);
mcp251xfd_chip_interrupts_disable(priv);
+ mcp251xfd_timestamp_stop(priv);
return handled;
}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 0a37af4a3fa4..2b5302e72435 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -255,6 +255,8 @@ struct ems_usb {
unsigned int free_slots; /* remember number of available slots */
struct ems_cpc_msg active_params; /* active controller parameters */
+ void *rxbuf[MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
static void ems_usb_read_interrupt_callback(struct urb *urb)
@@ -587,6 +589,7 @@ static int ems_usb_start(struct ems_usb *dev)
for (i = 0; i < MAX_RX_URBS; i++) {
struct urb *urb = NULL;
u8 *buf = NULL;
+ dma_addr_t buf_dma;
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -596,7 +599,7 @@ static int ems_usb_start(struct ems_usb *dev)
}
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
- &urb->transfer_dma);
+ &buf_dma);
if (!buf) {
netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
@@ -604,6 +607,8 @@ static int ems_usb_start(struct ems_usb *dev)
break;
}
+ urb->transfer_dma = buf_dma;
+
usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
buf, RX_BUFFER_SIZE,
ems_usb_read_bulk_callback, dev);
@@ -619,6 +624,9 @@ static int ems_usb_start(struct ems_usb *dev)
break;
}
+ dev->rxbuf[i] = buf;
+ dev->rxbuf_dma[i] = buf_dma;
+
/* Drop reference, USB core will take care of freeing it */
usb_free_urb(urb);
}
@@ -684,6 +692,10 @@ static void unlink_all_urbs(struct ems_usb *dev)
usb_kill_anchored_urbs(&dev->rx_submitted);
+ for (i = 0; i < MAX_RX_URBS; ++i)
+ usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
+ dev->rxbuf[i], dev->rxbuf_dma[i]);
+
usb_kill_anchored_urbs(&dev->tx_submitted);
atomic_set(&dev->active_tx_urbs, 0);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 65b58f8fc328..66fa8b07c2e6 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -195,6 +195,8 @@ struct esd_usb2 {
int net_count;
u32 version;
int rxinitdone;
+ void *rxbuf[MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
struct esd_usb2_net_priv {
@@ -545,6 +547,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
for (i = 0; i < MAX_RX_URBS; i++) {
struct urb *urb = NULL;
u8 *buf = NULL;
+ dma_addr_t buf_dma;
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -554,7 +557,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
}
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
- &urb->transfer_dma);
+ &buf_dma);
if (!buf) {
dev_warn(dev->udev->dev.parent,
"No memory left for USB buffer\n");
@@ -562,6 +565,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
goto freeurb;
}
+ urb->transfer_dma = buf_dma;
+
usb_fill_bulk_urb(urb, dev->udev,
usb_rcvbulkpipe(dev->udev, 1),
buf, RX_BUFFER_SIZE,
@@ -574,8 +579,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
urb->transfer_dma);
+ goto freeurb;
}
+ dev->rxbuf[i] = buf;
+ dev->rxbuf_dma[i] = buf_dma;
+
freeurb:
/* Drop reference, USB core will take care of freeing it */
usb_free_urb(urb);
@@ -663,6 +672,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
int i, j;
usb_kill_anchored_urbs(&dev->rx_submitted);
+
+ for (i = 0; i < MAX_RX_URBS; ++i)
+ usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
+ dev->rxbuf[i], dev->rxbuf_dma[i]);
+
for (i = 0; i < dev->net_count; i++) {
priv = dev->nets[i];
if (priv) {
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index a45865bd7254..a1a154c08b7f 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -653,6 +653,8 @@ static int mcba_usb_start(struct mcba_priv *priv)
break;
}
+ urb->transfer_dma = buf_dma;
+
usb_fill_bulk_urb(urb, priv->udev,
usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
buf, MCBA_USB_RX_BUFF_SIZE,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 1d6f77252f01..899a3d21b77f 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -117,7 +117,8 @@
#define PCAN_USB_BERR_MASK (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR)
/* identify bus event packets with rx/tx error counters */
-#define PCAN_USB_ERR_CNT 0x80
+#define PCAN_USB_ERR_CNT_DEC 0x00 /* counters are decreasing */
+#define PCAN_USB_ERR_CNT_INC 0x80 /* counters are increasing */
/* private to PCAN-USB adapter */
struct pcan_usb {
@@ -608,11 +609,12 @@ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir)
/* acccording to the content of the packet */
switch (ir) {
- case PCAN_USB_ERR_CNT:
+ case PCAN_USB_ERR_CNT_DEC:
+ case PCAN_USB_ERR_CNT_INC:
/* save rx/tx error counters from in the device context */
- pdev->bec.rxerr = mc->ptr[0];
- pdev->bec.txerr = mc->ptr[1];
+ pdev->bec.rxerr = mc->ptr[1];
+ pdev->bec.txerr = mc->ptr[2];
break;
default:
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index b6e7ef0d5bc6..d1b83bd1b3cb 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -137,7 +137,8 @@ struct usb_8dev_priv {
u8 *cmd_msg_buffer;
struct mutex usb_8dev_cmd_lock;
-
+ void *rxbuf[MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
/* tx frame */
@@ -733,6 +734,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
for (i = 0; i < MAX_RX_URBS; i++) {
struct urb *urb = NULL;
u8 *buf;
+ dma_addr_t buf_dma;
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -742,7 +744,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
}
buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
- &urb->transfer_dma);
+ &buf_dma);
if (!buf) {
netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
@@ -750,6 +752,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
break;
}
+ urb->transfer_dma = buf_dma;
+
usb_fill_bulk_urb(urb, priv->udev,
usb_rcvbulkpipe(priv->udev,
USB_8DEV_ENDP_DATA_RX),
@@ -767,6 +771,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
break;
}
+ priv->rxbuf[i] = buf;
+ priv->rxbuf_dma[i] = buf_dma;
+
/* Drop reference, USB core will take care of freeing it */
usb_free_urb(urb);
}
@@ -836,6 +843,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv)
usb_kill_anchored_urbs(&priv->rx_submitted);
+ for (i = 0; i < MAX_RX_URBS; ++i)
+ usb_free_coherent(priv->udev, RX_BUFFER_SIZE,
+ priv->rxbuf[i], priv->rxbuf_dma[i]);
+
usb_kill_anchored_urbs(&priv->tx_submitted);
atomic_set(&priv->active_tx_urbs, 0);
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 9fdcc4bde480..5c54ae1be62c 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -912,6 +912,7 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
{
struct hellcreek *hellcreek = ds->priv;
u16 entries;
+ int ret = 0;
size_t i;
mutex_lock(&hellcreek->reg_lock);
@@ -943,12 +944,14 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
if (!(entry.portmask & BIT(port)))
continue;
- cb(entry.mac, 0, entry.is_static, data);
+ ret = cb(entry.mac, 0, entry.is_static, data);
+ if (ret)
+ break;
}
mutex_unlock(&hellcreek->reg_lock);
- return 0;
+ return ret;
}
static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 344374025426..d7ce281570b5 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -557,12 +557,12 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
return 0;
}
-typedef void alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
- int portmap, void *ctx);
+typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
+ int portmap, void *ctx);
-static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
+static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
{
- int i;
+ int ret = 0, i;
mutex_lock(&chip->alr_mutex);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
@@ -582,13 +582,17 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
LAN9303_ALR_DAT1_PORT_BITOFFS;
portmap = alrport_2_portmap[alrport];
- cb(chip, dat0, dat1, portmap, ctx);
+ ret = cb(chip, dat0, dat1, portmap, ctx);
+ if (ret)
+ break;
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
LAN9303_ALR_CMD_GET_NEXT);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
}
mutex_unlock(&chip->alr_mutex);
+
+ return ret;
}
static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
@@ -606,18 +610,20 @@ struct del_port_learned_ctx {
};
/* Clear learned (non-static) entry on given port */
-static void alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
- u32 dat1, int portmap, void *ctx)
+static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
+ u32 dat1, int portmap, void *ctx)
{
struct del_port_learned_ctx *del_ctx = ctx;
int port = del_ctx->port;
if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
- return;
+ return 0;
/* learned entries has only one port, we can just delete */
dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
lan9303_alr_make_entry_raw(chip, dat0, dat1);
+
+ return 0;
}
struct port_fdb_dump_ctx {
@@ -626,19 +632,19 @@ struct port_fdb_dump_ctx {
dsa_fdb_dump_cb_t *cb;
};
-static void alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
- u32 dat1, int portmap, void *ctx)
+static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
+ u32 dat1, int portmap, void *ctx)
{
struct port_fdb_dump_ctx *dump_ctx = ctx;
u8 mac[ETH_ALEN];
bool is_static;
if ((BIT(dump_ctx->port) & portmap) == 0)
- return;
+ return 0;
alr_reg_to_mac(dat0, dat1, mac);
is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
- dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
+ return dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
}
/* Set a static ALR entry. Delete entry if port_map is zero */
@@ -1210,9 +1216,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
};
dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
- lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
-
- return 0;
+ return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
}
static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 314ae78bbdd6..e78026ef6d8c 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1404,11 +1404,17 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
addr[1] = mac_bridge.key[2] & 0xff;
addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
- if (mac_bridge.val[0] & BIT(port))
- cb(addr, 0, true, data);
+ if (mac_bridge.val[0] & BIT(port)) {
+ err = cb(addr, 0, true, data);
+ if (err)
+ return err;
+ }
} else {
- if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port)
- cb(addr, 0, false, data);
+ if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
+ err = cb(addr, 0, false, data);
+ if (err)
+ return err;
+ }
}
}
return 0;
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 560f6843bb65..c5142f86a3c7 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -687,8 +687,8 @@ static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
shifts = ksz8->shifts;
ksz8_r_table(dev, TABLE_VLAN, addr, &data);
- addr *= dev->phy_port_cnt;
- for (i = 0; i < dev->phy_port_cnt; i++) {
+ addr *= 4;
+ for (i = 0; i < 4; i++) {
dev->vlan_cache[addr + i].table[0] = (u16)data;
data >>= shifts[VLAN_TABLE];
}
@@ -702,7 +702,7 @@ static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
u64 buf;
data = (u16 *)&buf;
- addr = vid / dev->phy_port_cnt;
+ addr = vid / 4;
index = vid & 3;
ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
*vlan = data[index];
@@ -716,7 +716,7 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
u64 buf;
data = (u16 *)&buf;
- addr = vid / dev->phy_port_cnt;
+ addr = vid / 4;
index = vid & 3;
ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
data[index] = vlan;
@@ -1119,24 +1119,67 @@ static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
if (ksz_is_ksz88x3(dev))
return -ENOTSUPP;
+ /* Discard packets with VID not enabled on the switch */
ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
+ /* Discard packets with VID not enabled on the ingress port */
+ for (port = 0; port < dev->phy_port_cnt; ++port)
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER,
+ flag);
+
return 0;
}
+static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
+{
+ if (ksz_is_ksz88x3(dev)) {
+ ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
+ 0x03 << (4 - 2 * port), state);
+ } else {
+ ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
+ }
+}
+
static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
u16 data, new_pvid = 0;
u8 fid, member, valid;
if (ksz_is_ksz88x3(dev))
return -ENOTSUPP;
- ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+ /* If a VLAN is added with untagged flag different from the
+ * port's Remove Tag flag, we need to change the latter.
+ * Ignore VID 0, which is always untagged.
+ * Ignore CPU port, which will always be tagged.
+ */
+ if (untagged != p->remove_tag && vlan->vid != 0 &&
+ port != dev->cpu_port) {
+ unsigned int vid;
+
+ /* Reject attempts to add a VLAN that requires the
+ * Remove Tag flag to be changed, unless there are no
+ * other VLANs currently configured.
+ */
+ for (vid = 1; vid < dev->num_vlans; ++vid) {
+ /* Skip the VID we are going to add or reconfigure */
+ if (vid == vlan->vid)
+ continue;
+
+ ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0],
+ &fid, &member, &valid);
+ if (valid && (member & BIT(port)))
+ return -EINVAL;
+ }
+
+ ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+ p->remove_tag = untagged;
+ }
ksz8_r_vlan_table(dev, vlan->vid, &data);
ksz8_from_vlan(dev, data, &fid, &member, &valid);
@@ -1160,9 +1203,11 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
u16 vid;
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
- vid &= 0xfff;
+ vid &= ~VLAN_VID_MASK;
vid |= new_pvid;
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
+
+ ksz8_port_enable_pvid(dev, port, true);
}
return 0;
@@ -1171,9 +1216,8 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct ksz_device *dev = ds->priv;
- u16 data, pvid, new_pvid = 0;
+ u16 data, pvid;
u8 fid, member, valid;
if (ksz_is_ksz88x3(dev))
@@ -1182,8 +1226,6 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
pvid = pvid & 0xFFF;
- ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
-
ksz8_r_vlan_table(dev, vlan->vid, &data);
ksz8_from_vlan(dev, data, &fid, &member, &valid);
@@ -1195,14 +1237,11 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
valid = 0;
}
- if (pvid == vlan->vid)
- new_pvid = 1;
-
ksz8_to_vlan(dev, fid, member, valid, &data);
ksz8_w_vlan_table(dev, vlan->vid, data);
- if (new_pvid != pvid)
- ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
+ if (pvid == vlan->vid)
+ ksz8_port_enable_pvid(dev, port, false);
return 0;
}
@@ -1435,6 +1474,9 @@ static int ksz8_setup(struct dsa_switch *ds)
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+ if (!ksz_is_ksz88x3(dev))
+ ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
+
/* set broadcast storm protection 10% rate */
regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
BROADCAST_STORM_RATE,
@@ -1717,6 +1759,16 @@ static int ksz8_switch_init(struct ksz_device *dev)
/* set the real number of ports */
dev->ds->num_ports = dev->port_cnt;
+ /* We rely on software untagging on the CPU port, so that we
+ * can support both tagged and untagged VLANs
+ */
+ dev->ds->untag_bridge_pvid = true;
+
+ /* VLAN filtering is partly controlled by the global VLAN
+ * Enable flag
+ */
+ dev->ds->vlan_filtering_is_global = true;
+
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index a32355624f31..6b40bc25f7ff 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -631,6 +631,10 @@
#define REG_PORT_4_OUT_RATE_3 0xEE
#define REG_PORT_5_OUT_RATE_3 0xFE
+/* 88x3 specific */
+
+#define REG_SW_INSERT_SRC_PVID 0xC2
+
/* PME */
#define SW_PME_OUTPUT_ENABLE BIT(1)
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index a7e5ac60baef..1542bfb8b5e5 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -419,8 +419,10 @@ int ksz_switch_register(struct ksz_device *dev,
if (of_property_read_u32(port, "reg",
&port_num))
continue;
- if (!(dev->port_mask & BIT(port_num)))
+ if (!(dev->port_mask & BIT(port_num))) {
+ of_node_put(port);
return -EINVAL;
+ }
of_get_phy_mode(port,
&dev->ports[port_num].interface);
}
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 2e6bfd333f50..1597c63988b4 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -27,6 +27,7 @@ struct ksz_port_mib {
struct ksz_port {
u16 member;
u16 vid_member;
+ bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
int stp_state;
struct phy_device phydev;
@@ -205,12 +206,8 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
int ret;
ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
- if (!ret) {
- /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */
- value[0] = swab32(value[0]);
- value[1] = swab32(value[1]);
- *val = swab64((u64)*value);
- }
+ if (!ret)
+ *val = (u64)value[0] << 32 | value[1];
return ret;
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 93136f7e69f5..632f0fcc5aa7 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -47,6 +47,7 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
MIB_DESC(2, 0x48, "TxBytes"),
MIB_DESC(1, 0x60, "RxDrop"),
MIB_DESC(1, 0x64, "RxFiltering"),
+ MIB_DESC(1, 0x68, "RxUnicast"),
MIB_DESC(1, 0x6c, "RxMulticast"),
MIB_DESC(1, 0x70, "RxBroadcast"),
MIB_DESC(1, 0x74, "RxAlignErr"),
@@ -366,6 +367,8 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
int i;
reg[1] |= vid & CVID_MASK;
+ if (vid > 1)
+ reg[1] |= ATA2_IVL;
reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER;
reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP;
/* STATIC_ENT indicate that entry is static wouldn't
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 334d610a503d..b19b389ff10a 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -79,6 +79,7 @@ enum mt753x_bpdu_port_fw {
#define STATIC_EMP 0
#define STATIC_ENT 3
#define MT7530_ATA2 0x78
+#define ATA2_IVL BIT(15)
/* Register for address table write data */
#define MT7530_ATWD 0x7c
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
index 05af632b0f59..634a48e6616b 100644
--- a/drivers/net/dsa/mv88e6xxx/Kconfig
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -12,7 +12,7 @@ config NET_DSA_MV88E6XXX
config NET_DSA_MV88E6XXX_PTP
bool "PTP support for Marvell 88E6xxx"
default n
- depends on PTP_1588_CLOCK
+ depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK
help
Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
chips that support it.
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 961fa6b75cad..272b0535d946 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2155,7 +2155,7 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
int i, err;
if (!vid)
- return -EOPNOTSUPP;
+ return 0;
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
@@ -3583,6 +3583,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3596,7 +3597,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
- .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
@@ -3606,6 +3607,9 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3619,6 +3623,11 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6341_phylink_validate,
};
@@ -4383,6 +4392,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -4396,7 +4406,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
- .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
@@ -4406,6 +4416,9 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -4421,6 +4434,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6341_phylink_validate,
};
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index e4fbef81bc52..b1d46dd8eaab 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -722,7 +722,7 @@ static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = {
int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
{
- if (mv88e6390_serdes_get_lane(chip, port) < 0)
+ if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
return 0;
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
@@ -734,7 +734,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
struct mv88e6390_serdes_hw_stat *stat;
int i;
- if (mv88e6390_serdes_get_lane(chip, port) < 0)
+ if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
return 0;
for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
@@ -770,7 +770,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
int lane;
int i;
- lane = mv88e6390_serdes_get_lane(chip, port);
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
if (lane < 0)
return 0;
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index ca2ad77b71f1..563d8a279030 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -101,6 +101,23 @@
AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
AR9331_SW_PORT_STATUS_SPEED_M)
+#define AR9331_SW_REG_PORT_CTRL(_port) (0x104 + (_port) * 0x100)
+#define AR9331_SW_PORT_CTRL_HEAD_EN BIT(11)
+#define AR9331_SW_PORT_CTRL_PORT_STATE GENMASK(2, 0)
+#define AR9331_SW_PORT_CTRL_PORT_STATE_DISABLED 0
+#define AR9331_SW_PORT_CTRL_PORT_STATE_BLOCKING 1
+#define AR9331_SW_PORT_CTRL_PORT_STATE_LISTENING 2
+#define AR9331_SW_PORT_CTRL_PORT_STATE_LEARNING 3
+#define AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD 4
+
+#define AR9331_SW_REG_PORT_VLAN(_port) (0x108 + (_port) * 0x100)
+#define AR9331_SW_PORT_VLAN_8021Q_MODE GENMASK(31, 30)
+#define AR9331_SW_8021Q_MODE_SECURE 3
+#define AR9331_SW_8021Q_MODE_CHECK 2
+#define AR9331_SW_8021Q_MODE_FALLBACK 1
+#define AR9331_SW_8021Q_MODE_NONE 0
+#define AR9331_SW_PORT_VLAN_PORT_VID_MEMBER GENMASK(25, 16)
+
/* MIB registers */
#define AR9331_MIB_COUNTER(x) (0x20000 + ((x) * 0x100))
@@ -371,12 +388,60 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
return 0;
}
-static int ar9331_sw_setup(struct dsa_switch *ds)
+static int ar9331_sw_setup_port(struct dsa_switch *ds, int port)
{
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
struct regmap *regmap = priv->regmap;
+ u32 port_mask, port_ctrl, val;
int ret;
+ /* Generate default port settings */
+ port_ctrl = FIELD_PREP(AR9331_SW_PORT_CTRL_PORT_STATE,
+ AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD);
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* CPU port should be allowed to communicate with all user
+ * ports.
+ */
+ port_mask = dsa_user_ports(ds);
+ /* Enable Atheros header on CPU port. This will allow us
+ * communicate with each port separately
+ */
+ port_ctrl |= AR9331_SW_PORT_CTRL_HEAD_EN;
+ } else if (dsa_is_user_port(ds, port)) {
+ /* User ports should communicate only with the CPU port.
+ */
+ port_mask = BIT(dsa_upstream_port(ds, port));
+ } else {
+ /* Other ports do not need to communicate at all */
+ port_mask = 0;
+ }
+
+ val = FIELD_PREP(AR9331_SW_PORT_VLAN_8021Q_MODE,
+ AR9331_SW_8021Q_MODE_NONE) |
+ FIELD_PREP(AR9331_SW_PORT_VLAN_PORT_VID_MEMBER, port_mask);
+
+ ret = regmap_write(regmap, AR9331_SW_REG_PORT_VLAN(port), val);
+ if (ret)
+ goto error;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_PORT_CTRL(port), port_ctrl);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ dev_err(priv->dev, "%s: error: %i\n", __func__, ret);
+
+ return ret;
+}
+
+static int ar9331_sw_setup(struct dsa_switch *ds)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret, i;
+
ret = ar9331_sw_reset(priv);
if (ret)
return ret;
@@ -402,6 +467,12 @@ static int ar9331_sw_setup(struct dsa_switch *ds)
if (ret)
goto error;
+ for (i = 0; i < ds->num_ports; i++) {
+ ret = ar9331_sw_setup_port(ds, i);
+ if (ret)
+ goto error;
+ }
+
ds->configure_vlan_while_not_filtering = false;
return 0;
@@ -837,16 +908,24 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
return 0;
}
- ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
+ /* In case of this switch we work with 32bit registers on top of 16bit
+ * bus. Some registers (for example access to forwarding database) have
+ * trigger bit on the first 16bit half of request, the result and
+ * configuration of request in the second half.
+ * To make it work properly, we should do the second part of transfer
+ * before the first one is done.
+ */
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
+ val >> 16);
if (ret < 0)
goto error;
- ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
- val >> 16);
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
if (ret < 0)
goto error;
return 0;
+
error:
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
return ret;
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 56fead68ea9f..147709131c13 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -304,6 +304,15 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
hostcmd = SJA1105_HOSTCMD_INVALIDATE;
}
sja1105_packing(p, &hostcmd, 25, 23, size, op);
+}
+
+static void
+sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ int entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+
+ sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
/* Hack - The hardware takes the 'index' field within
* struct sja1105_l2_lookup_entry as the index on which this command
@@ -313,26 +322,18 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
* such that our API doesn't need to ask for a full-blown entry
* structure when e.g. a delete is requested.
*/
- sja1105_packing(buf, &cmd->index, 15, 6,
- SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op);
-}
-
-static void
-sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
- enum packing_op op)
-{
- int size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
-
- return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
+ sja1105_packing(buf, &cmd->index, 15, 6, entry_size, op);
}
static void
sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
- int size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+ int entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+
+ sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
- return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
+ sja1105_packing(buf, &cmd->index, 10, 1, entry_size, op);
}
/* The switch is so retarded that it makes our command/entry abstraction
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 4f0545605f6b..49eb0ac41b7d 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -122,14 +122,12 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
for (i = 0; i < ds->num_ports; i++) {
mac[i] = default_mac;
- if (i == dsa_upstream_port(priv->ds, i)) {
- /* STP doesn't get called for CPU port, so we need to
- * set the I/O parameters statically.
- */
- mac[i].dyn_learn = true;
- mac[i].ingress = true;
- mac[i].egress = true;
- }
+
+ /* Let sja1105_bridge_stp_state_set() keep address learning
+ * enabled for the CPU port.
+ */
+ if (dsa_is_cpu_port(ds, i))
+ priv->learn_ena |= BIT(i);
}
return 0;
@@ -399,6 +397,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
if (dsa_is_cpu_port(ds, port))
v->pvid = true;
list_add(&v->list, &priv->dsa_8021q_vlans);
+
+ v = kmemdup(v, sizeof(*v), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ list_add(&v->list, &priv->bridge_vlans);
}
((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
@@ -1314,10 +1318,11 @@ static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
int sja1105et_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
- struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int last_unused = -1;
+ int start, end, i;
int bin, way, rc;
bin = sja1105et_fdb_hash(priv, addr, vid);
@@ -1329,7 +1334,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
* mask? If yes, we need to do nothing. If not, we need
* to rewrite the entry by adding this port to it.
*/
- if (l2_lookup.destports & BIT(port))
+ if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
return 0;
l2_lookup.destports |= BIT(port);
} else {
@@ -1360,6 +1365,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
index, NULL, false);
}
}
+ l2_lookup.lockeds = true;
l2_lookup.index = sja1105et_fdb_index(bin, way);
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
@@ -1368,6 +1374,29 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
if (rc < 0)
return rc;
+ /* Invalidate a dynamically learned entry if that exists */
+ start = sja1105et_fdb_index(bin, 0);
+ end = sja1105et_fdb_index(bin, way);
+
+ for (i = start; i < end; i++) {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, &tmp);
+ if (rc == -ENOENT)
+ continue;
+ if (rc)
+ return rc;
+
+ if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid)
+ continue;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ i, NULL, false);
+ if (rc)
+ return rc;
+
+ break;
+ }
+
return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
}
@@ -1409,32 +1438,30 @@ int sja1105et_fdb_del(struct dsa_switch *ds, int port,
int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
- struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
struct sja1105_private *priv = ds->priv;
int rc, i;
/* Search for an existing entry in the FDB table */
l2_lookup.macaddr = ether_addr_to_u64(addr);
l2_lookup.vlanid = vid;
- l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
- l2_lookup.mask_vlanid = VLAN_VID_MASK;
- l2_lookup.mask_iotag = BIT(0);
- } else {
- l2_lookup.mask_vlanid = 0;
- l2_lookup.mask_iotag = 0;
- }
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.destports = BIT(port);
+ tmp = l2_lookup;
+
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
- SJA1105_SEARCH, &l2_lookup);
- if (rc == 0) {
- /* Found and this port is already in the entry's
+ SJA1105_SEARCH, &tmp);
+ if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) {
+ /* Found a static entry and this port is already in the entry's
* port mask => job done
*/
- if (l2_lookup.destports & BIT(port))
+ if ((tmp.destports & BIT(port)) && tmp.lockeds)
return 0;
+
+ l2_lookup = tmp;
+
/* l2_lookup.index is populated by the switch in case it
* found something.
*/
@@ -1456,16 +1483,46 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
dev_err(ds->dev, "FDB is full, cannot add entry.\n");
return -EINVAL;
}
- l2_lookup.lockeds = true;
l2_lookup.index = i;
skip_finding_an_index:
+ l2_lookup.lockeds = true;
+
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup.index, &l2_lookup,
true);
if (rc < 0)
return rc;
+ /* The switch learns dynamic entries and looks up the FDB left to
+ * right. It is possible that our addition was concurrent with the
+ * dynamic learning of the same address, so now that the static entry
+ * has been installed, we are certain that address learning for this
+ * particular address has been turned off, so the dynamic entry either
+ * is in the FDB at an index smaller than the static one, or isn't (it
+ * can also be at a larger index, but in that case it is inactive
+ * because the static FDB entry will match first, and the dynamic one
+ * will eventually age out). Search for a dynamically learned address
+ * prior to our static one and invalidate it.
+ */
+ tmp = l2_lookup;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &tmp);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "port %d failed to read back entry for %pM vid %d: %pe\n",
+ port, addr, vid, ERR_PTR(rc));
+ return rc;
+ }
+
+ if (tmp.index < l2_lookup.index) {
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ tmp.index, NULL, false);
+ if (rc < 0)
+ return rc;
+ }
+
return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
}
@@ -1479,15 +1536,8 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
l2_lookup.macaddr = ether_addr_to_u64(addr);
l2_lookup.vlanid = vid;
- l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
- l2_lookup.mask_vlanid = VLAN_VID_MASK;
- l2_lookup.mask_iotag = BIT(0);
- } else {
- l2_lookup.mask_vlanid = 0;
- l2_lookup.mask_iotag = 0;
- }
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.destports = BIT(port);
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
@@ -1585,7 +1635,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
/* We need to hide the dsa_8021q VLANs from the user. */
if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
l2_lookup.vlanid = 0;
- cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
+ rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
+ if (rc)
+ return rc;
}
return 0;
}
@@ -3135,6 +3187,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
}
sja1105_devlink_teardown(ds);
+ sja1105_mdiobus_unregister(ds);
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
sja1105_ptp_clock_unregister(ds);
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 19aea8fb76f6..705d3900e43a 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -284,8 +284,7 @@ static int sja1105_mdiobus_base_tx_register(struct sja1105_private *priv,
struct mii_bus *bus;
int rc = 0;
- np = of_find_compatible_node(mdio_node, NULL,
- "nxp,sja1110-base-tx-mdio");
+ np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-tx-mdio");
if (!np)
return 0;
@@ -339,8 +338,7 @@ static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
struct mii_bus *bus;
int rc = 0;
- np = of_find_compatible_node(mdio_node, NULL,
- "nxp,sja1110-base-t1-mdio");
+ np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-t1-mdio");
if (!np)
return 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 7dff20350865..f19370c33444 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -594,6 +594,11 @@ int atl1c_phy_init(struct atl1c_hw *hw)
int ret_val;
u16 mii_bmcr_data = BMCR_RESET;
+ if (hw->nic_type == athr_mt) {
+ hw->phy_configured = true;
+ return 0;
+ }
+
if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) ||
(atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) {
dev_err(&pdev->dev, "Error get phy ID\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 1a6ec1a12d53..b5d954cb409a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2669,7 +2669,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
/* Allocated memory for FW statistics */
- if (bnx2x_alloc_fw_stats_mem(bp))
+ rc = bnx2x_alloc_fw_stats_mem(bp);
+ if (rc)
LOAD_ERROR_EXIT(bp, load_error0);
/* request pf to initialize status blocks */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f56245eeef7b..8a97640cdfe7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -72,7 +72,8 @@
#include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
-#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW)
+#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
+ NETIF_MSG_TX_ERR)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
@@ -365,6 +366,33 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
return md_dst->u.port_info.port_id;
}
+static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ u16 prod)
+{
+ bnxt_db_write(bp, &txr->tx_db, prod);
+ txr->kick_pending = 0;
+}
+
+static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ struct netdev_queue *txq)
+{
+ netif_tx_stop_queue(txq);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * tx index in bnxt_tx_avail() below, because in
+ * bnxt_tx_int(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+ netif_tx_wake_queue(txq);
+ return false;
+ }
+
+ return true;
+}
+
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -384,6 +412,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb);
+ atomic_long_inc(&dev->tx_dropped);
return NETDEV_TX_OK;
}
@@ -393,8 +422,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
- netif_tx_stop_queue(txq);
- return NETDEV_TX_BUSY;
+ /* We must have raced with NAPI cleanup */
+ if (net_ratelimit() && txr->kick_pending)
+ netif_warn(bp, tx_err, dev,
+ "bnxt: ring busy w/ flush pending!\n");
+ if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
+ return NETDEV_TX_BUSY;
}
length = skb->len;
@@ -426,7 +459,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
- if (!bnxt_ptp_parse(skb, &ptp->tx_seqid)) {
+ if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
+ &ptp->tx_hdr_off)) {
+ if (vlan_tag_flags)
+ ptp->tx_hdr_off += VLAN_HLEN;
lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
} else {
@@ -514,21 +550,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
normal_tx:
if (length < BNXT_MIN_PKT_SIZE) {
pad = BNXT_MIN_PKT_SIZE - length;
- if (skb_pad(skb, pad)) {
+ if (skb_pad(skb, pad))
/* SKB already freed. */
- tx_buf->skb = NULL;
- return NETDEV_TX_OK;
- }
+ goto tx_kick_pending;
length = BNXT_MIN_PKT_SIZE;
}
mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
- dev_kfree_skb_any(skb);
- tx_buf->skb = NULL;
- return NETDEV_TX_OK;
- }
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+ goto tx_free;
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
@@ -615,24 +646,17 @@ normal_tx:
txr->tx_prod = prod;
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
- bnxt_db_write(bp, &txr->tx_db, prod);
+ bnxt_txr_db_kick(bp, txr, prod);
+ else
+ txr->kick_pending = 1;
tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (netdev_xmit_more() && !tx_buf->is_push)
- bnxt_db_write(bp, &txr->tx_db, prod);
-
- netif_tx_stop_queue(txq);
+ bnxt_txr_db_kick(bp, txr, prod);
- /* netif_tx_stop_queue() must be done before checking
- * tx index in bnxt_tx_avail() below, because in
- * bnxt_tx_int(), we update tx index before checking for
- * netif_tx_queue_stopped().
- */
- smp_mb();
- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
- netif_tx_wake_queue(txq);
+ bnxt_txr_netif_try_stop_queue(bp, txr, txq);
}
return NETDEV_TX_OK;
@@ -645,7 +669,6 @@ tx_dma_error:
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
- tx_buf->skb = NULL;
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
prod = NEXT_TX(prod);
@@ -659,7 +682,13 @@ tx_dma_error:
PCI_DMA_TODEVICE);
}
+tx_free:
dev_kfree_skb_any(skb);
+tx_kick_pending:
+ if (txr->kick_pending)
+ bnxt_txr_db_kick(bp, txr, txr->tx_prod);
+ txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+ atomic_long_inc(&dev->tx_dropped);
return NETDEV_TX_OK;
}
@@ -729,14 +758,9 @@ next_tx_int:
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq)) &&
- (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
- __netif_tx_lock(txq, smp_processor_id());
- if (netif_tx_queue_stopped(txq) &&
- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
- txr->dev_state != BNXT_DEV_STATE_CLOSING)
- netif_tx_wake_queue(txq);
- __netif_tx_unlock(txq);
- }
+ bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+ READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
+ netif_tx_wake_queue(txq);
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -1671,11 +1695,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
(skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
- u16 vlan_proto = tpa_info->metadata >>
- RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ __be16 vlan_proto = htons(tpa_info->metadata >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT);
u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+ if (eth_type_vlan(vlan_proto)) {
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ } else {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
}
skb_checksum_none_assert(skb);
@@ -1759,6 +1788,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
prod = rxr->rx_prod;
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@@ -1897,9 +1930,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
(skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
- u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ __be16 vlan_proto = htons(meta_data >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT);
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+ if (eth_type_vlan(vlan_proto)) {
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ } else {
+ dev_kfree_skb(skb);
+ goto next_rx;
+ }
}
skb_checksum_none_assert(skb);
@@ -1975,6 +2014,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
cmp_type = RX_CMP_TYPE(rxcmp);
if (cmp_type == CMP_TYPE_RX_L2_CMP) {
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
@@ -2440,6 +2483,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
if (!TX_CMP_VALID(txcmp, raw_cons))
break;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
@@ -7563,8 +7610,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
- if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED)
+ if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
__bnxt_hwrm_ptp_qcfg(bp);
+ } else {
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
+ }
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
@@ -9110,10 +9161,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+ napi_disable(&bp->bnapi[i]->napi);
if (bp->bnapi[i]->rx_ring)
cancel_work_sync(&cpr->dim.work);
-
- napi_disable(&bp->bnapi[i]->napi);
}
}
@@ -9147,9 +9197,11 @@ void bnxt_tx_disable(struct bnxt *bp)
if (bp->tx_ring) {
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
- txr->dev_state = BNXT_DEV_STATE_CLOSING;
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
}
}
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
/* Drop carrier first to prevent TX timeout */
netif_carrier_off(bp->dev);
/* Stop all TX queues */
@@ -9163,8 +9215,10 @@ void bnxt_tx_enable(struct bnxt *bp)
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
- txr->dev_state = 0;
+ WRITE_ONCE(txr->dev_state, 0);
}
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
netif_tx_wake_all_queues(bp->dev);
if (bp->link_info.link_up)
netif_carrier_on(bp->dev);
@@ -10123,7 +10177,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
}
- bnxt_ptp_start(bp);
rc = bnxt_init_nic(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
@@ -10197,6 +10250,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
{
int rc = 0;
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
+ rc = -ENODEV;
+ goto half_open_err;
+ }
+
rc = bnxt_alloc_mem(bp, false);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
@@ -10256,9 +10315,16 @@ static int bnxt_open(struct net_device *dev)
rc = bnxt_hwrm_if_change(bp, true);
if (rc)
return rc;
+
+ if (bnxt_ptp_init(bp)) {
+ netdev_warn(dev, "PTP initialization failed.\n");
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
+ }
rc = __bnxt_open_nic(bp, true, true);
if (rc) {
bnxt_hwrm_if_change(bp, false);
+ bnxt_ptp_clear(bp);
} else {
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -10349,6 +10415,7 @@ static int bnxt_close(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
+ bnxt_ptp_clear(bp);
bnxt_hwmon_close(bp);
bnxt_close_nic(bp, true, true);
bnxt_hwrm_shutdown_link(bp);
@@ -10737,6 +10804,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
return true;
return false;
}
+ /* 212 firmware is broken for aRFS */
+ if (BNXT_FW_MAJ(bp) == 212)
+ return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
@@ -11335,6 +11405,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
bnxt_clear_int_mode(bp);
pci_disable_device(bp->pdev);
}
+ bnxt_ptp_clear(bp);
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
bnxt_clear_int_mode(bp);
@@ -11959,10 +12030,21 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp)
(bp->fw_reset_max_dsecs * HZ / 10));
}
+static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
+{
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
+ bnxt_ulp_start(bp, rc);
+ bnxt_dl_health_status_update(bp, false);
+ }
+ bp->fw_reset_state = 0;
+ dev_close(bp->dev);
+}
+
static void bnxt_fw_reset_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
- int rc;
+ int rc = 0;
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
@@ -11992,6 +12074,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
bp->fw_reset_timestamp = jiffies;
rtnl_lock();
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ bnxt_fw_reset_abort(bp, rc);
+ rtnl_unlock();
+ return;
+ }
bnxt_fw_reset_close(bp);
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
@@ -12039,6 +12126,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (val == 0xffff) {
if (bnxt_fw_reset_timeout(bp)) {
netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
+ rc = -ETIMEDOUT;
goto fw_reset_abort;
}
bnxt_queue_fw_reset_work(bp, HZ / 1000);
@@ -12048,6 +12136,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
if (pci_enable_device(bp->pdev)) {
netdev_err(bp->dev, "Cannot re-enable PCI device\n");
+ rc = -ENODEV;
goto fw_reset_abort;
}
pci_set_master(bp->pdev);
@@ -12074,18 +12163,18 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
rc = bnxt_open(bp->dev);
if (rc) {
- netdev_err(bp->dev, "bnxt_open_nic() failed\n");
- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- dev_close(bp->dev);
+ netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
+ bnxt_fw_reset_abort(bp, rc);
+ rtnl_unlock();
+ return;
}
bp->fw_reset_state = 0;
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_ulp_start(bp, rc);
- if (!rc)
- bnxt_reenable_sriov(bp);
+ bnxt_ulp_start(bp, 0);
+ bnxt_reenable_sriov(bp);
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
bnxt_dl_health_recovery_done(bp);
@@ -12103,12 +12192,8 @@ fw_reset_abort_status:
netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
}
fw_reset_abort:
- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
- bnxt_dl_health_status_update(bp, false);
- bp->fw_reset_state = 0;
rtnl_lock();
- dev_close(bp->dev);
+ bnxt_fw_reset_abort(bp, rc);
rtnl_unlock();
}
@@ -12662,7 +12747,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
devlink_port_type_clear(&bp->dl_port);
- bnxt_ptp_clear(bp);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
@@ -13246,11 +13330,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc);
}
- if (bnxt_ptp_init(bp)) {
- netdev_warn(dev, "PTP initialization failed.\n");
- kfree(bp->ptp_cfg);
- bp->ptp_cfg = NULL;
- }
bnxt_inv_fw_health_reg(bp);
bnxt_dl_register(bp);
@@ -13436,7 +13515,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
bnxt_close(netdev);
- pci_disable_device(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index bcf8d00b8c80..ba4e0fc38520 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -786,6 +786,7 @@ struct bnxt_tx_ring_info {
u16 tx_prod;
u16 tx_cons;
u16 txq_index;
+ u8 kick_pending;
struct bnxt_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 8e90224c43a2..8a68df4d9e59 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -433,6 +433,7 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
{
int total_ets_bw = 0;
+ bool zero = false;
u8 max_tc = 0;
int i;
@@ -453,13 +454,20 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
break;
case IEEE_8021QAZ_TSA_ETS:
total_ets_bw += ets->tc_tx_bw[i];
+ zero = zero || !ets->tc_tx_bw[i];
break;
default:
return -ENOTSUPP;
}
}
- if (total_ets_bw > 100)
+ if (total_ets_bw > 100) {
+ netdev_warn(bp->dev, "rejecting ETS config exceeding available bandwidth\n");
return -EINVAL;
+ }
+ if (zero && total_ets_bw == 100) {
+ netdev_warn(bp->dev, "rejecting ETS config starving a TC\n");
+ return -EINVAL;
+ }
if (max_tc >= bp->max_tc)
*tc = bp->max_tc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 3fc6781c5b98..94d07a9f7034 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -368,6 +368,7 @@ struct cmd_nums {
#define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
#define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
#define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
+ #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -531,8 +532,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 47
-#define HWRM_VERSION_STR "1.10.2.47"
+#define HWRM_VERSION_RSVD 52
+#define HWRM_VERSION_STR "1.10.2.52"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -585,6 +586,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -886,7 +888,8 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
};
@@ -1236,13 +1239,14 @@ struct hwrm_async_event_cmpl_error_report_base {
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
};
/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1446,6 +1450,8 @@ struct hwrm_func_vf_cfg_input {
#define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
#define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
#define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS 0x1000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS 0x2000UL
__le16 mtu;
__le16 guest_vlan;
__le16 async_event_cr;
@@ -1469,7 +1475,8 @@ struct hwrm_func_vf_cfg_input {
__le16 num_vnics;
__le16 num_stat_ctxs;
__le16 num_hw_ring_grps;
- u8 unused_0[4];
+ __le16 num_tx_key_ctxs;
+ __le16 num_rx_key_ctxs;
};
/* hwrm_func_vf_cfg_output (size:128b/16B) */
@@ -1493,7 +1500,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:704b/88B) */
+/* hwrm_func_qcaps_output (size:768b/96B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1587,7 +1594,8 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
- u8 unused_1;
+ __le16 max_key_ctxs_alloc;
+ u8 unused_1[7];
u8 valid;
};
@@ -1602,7 +1610,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:832b/104B) */
+/* hwrm_func_qcfg_output (size:896b/112B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1749,11 +1757,13 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__le16 host_mtu;
- u8 unused_3;
+ __le16 alloc_tx_key_ctxs;
+ __le16 alloc_rx_key_ctxs;
+ u8 unused_3[5];
u8 valid;
};
-/* hwrm_func_cfg_input (size:832b/104B) */
+/* hwrm_func_cfg_input (size:896b/112B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1820,6 +1830,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
#define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
#define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
+ #define FUNC_CFG_REQ_ENABLES_TX_KEY_CTXS 0x40000000UL
+ #define FUNC_CFG_REQ_ENABLES_RX_KEY_CTXS 0x80000000UL
__le16 admin_mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -1929,6 +1941,9 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__be16 tpid;
__le16 host_mtu;
+ __le16 num_tx_key_ctxs;
+ __le16 num_rx_key_ctxs;
+ u8 unused_0[4];
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -2099,6 +2114,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
#define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
#define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -2268,7 +2284,7 @@ struct hwrm_func_resource_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_resource_qcaps_output (size:448b/56B) */
+/* hwrm_func_resource_qcaps_output (size:512b/64B) */
struct hwrm_func_resource_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -2300,11 +2316,15 @@ struct hwrm_func_resource_qcaps_output {
__le16 max_tx_scheduler_inputs;
__le16 flags;
#define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_tx_key_ctxs;
+ __le16 max_tx_key_ctxs;
+ __le16 min_rx_key_ctxs;
+ __le16 max_rx_key_ctxs;
u8 unused_0[5];
u8 valid;
};
-/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */
struct hwrm_func_vf_resource_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2331,6 +2351,10 @@ struct hwrm_func_vf_resource_cfg_input {
__le16 max_hw_ring_grps;
__le16 flags;
#define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_tx_key_ctxs;
+ __le16 max_tx_key_ctxs;
+ __le16 min_rx_key_ctxs;
+ __le16 max_rx_key_ctxs;
u8 unused_0[2];
};
@@ -2348,7 +2372,9 @@ struct hwrm_func_vf_resource_cfg_output {
__le16 reserved_vnics;
__le16 reserved_stat_ctx;
__le16 reserved_hw_ring_grps;
- u8 unused_0[7];
+ __le16 reserved_tx_key_ctxs;
+ __le16 reserved_rx_key_ctxs;
+ u8 unused_0[3];
u8 valid;
};
@@ -4220,7 +4246,7 @@ struct hwrm_port_lpbk_clr_stats_output {
u8 valid;
};
-/* hwrm_port_ts_query_input (size:256b/32B) */
+/* hwrm_port_ts_query_input (size:320b/40B) */
struct hwrm_port_ts_query_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -4238,8 +4264,11 @@ struct hwrm_port_ts_query_input {
__le16 enables;
#define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
#define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
__le16 ts_req_timeout;
__le32 ptp_seq_id;
+ __le16 ptp_hdr_offset;
+ u8 unused_1[6];
};
/* hwrm_port_ts_query_output (size:192b/24B) */
@@ -8172,6 +8201,7 @@ struct hwrm_fw_reset_input {
u8 host_idx;
u8 flags;
#define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
+ #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
u8 unused_0[4];
};
@@ -8952,7 +8982,7 @@ struct hwrm_nvm_get_dir_info_output {
u8 valid;
};
-/* hwrm_nvm_write_input (size:384b/48B) */
+/* hwrm_nvm_write_input (size:448b/56B) */
struct hwrm_nvm_write_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8968,7 +8998,11 @@ struct hwrm_nvm_write_input {
__le16 option;
__le16 flags;
#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
__le32 dir_item_length;
+ __le32 offset;
+ __le32 len;
__le32 unused_0;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index f698b6bd4ff8..81f40ab748f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -20,7 +20,7 @@
#include "bnxt.h"
#include "bnxt_ptp.h"
-int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off)
{
unsigned int ptp_class;
struct ptp_header *hdr;
@@ -34,6 +34,7 @@ int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
if (!hdr)
return -EINVAL;
+ *hdr_off = (u8 *)hdr - skb->data;
*seq_id = ntohs(hdr->sequence_id);
return 0;
default:
@@ -91,6 +92,7 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
+ req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
}
mutex_lock(&bp->hwrm_cmd_lock);
@@ -353,6 +355,12 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
bnxt_ptp_get_current_time(bp);
ptp->next_period = now + HZ;
+ if (time_after_eq(now, ptp->next_overflow_check)) {
+ spin_lock_bh(&ptp->ptp_lock);
+ timecounter_read(&ptp->tc);
+ spin_unlock_bh(&ptp->ptp_lock);
+ ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
+ }
return HZ;
}
@@ -385,22 +393,6 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
return 0;
}
-void bnxt_ptp_start(struct bnxt *bp)
-{
- struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
-
- if (!ptp)
- return;
-
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- spin_lock_bh(&ptp->ptp_lock);
- ptp->current_time = bnxt_refclk_read(bp, NULL);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
- spin_unlock_bh(&ptp->ptp_lock);
- ptp_schedule_worker(ptp->ptp_clock, 0);
- }
-}
-
static const struct ptp_clock_info bnxt_ptp_caps = {
.owner = THIS_MODULE,
.name = "bnxt clock",
@@ -439,6 +431,7 @@ int bnxt_ptp_init(struct bnxt *bp)
ptp->cc.shift = 0;
ptp->cc.mult = 1;
+ ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
ptp->ptp_info = bnxt_ptp_caps;
@@ -450,7 +443,13 @@ int bnxt_ptp_init(struct bnxt *bp)
bnxt_unmap_ptp_regs(bp);
return err;
}
-
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ spin_lock_bh(&ptp->ptp_lock);
+ ptp->current_time = bnxt_refclk_read(bp, NULL);
+ WRITE_ONCE(ptp->old_time, ptp->current_time);
+ spin_unlock_bh(&ptp->ptp_lock);
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 6b6245750e20..524f1c272054 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -10,8 +10,8 @@
#ifndef BNXT_PTP_H
#define BNXT_PTP_H
-#define BNXT_PTP_GRC_WIN 5
-#define BNXT_PTP_GRC_WIN_BASE 0x5000
+#define BNXT_PTP_GRC_WIN 6
+#define BNXT_PTP_GRC_WIN_BASE 0x6000
#define BNXT_MAX_PHC_DRIFT 31000000
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
@@ -19,7 +19,8 @@
#define BNXT_PTP_QTS_TIMEOUT 1000
#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
- PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT)
+ PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
+ PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
struct bnxt_ptp_cfg {
struct ptp_clock_info ptp_info;
@@ -32,7 +33,12 @@ struct bnxt_ptp_cfg {
u64 current_time;
u64 old_time;
unsigned long next_period;
+ unsigned long next_overflow_check;
+ /* 48-bit PHC overflows in 78 hours. Check overflow every 19 hours. */
+ #define BNXT_PHC_OVERFLOW_PERIOD (19 * 3600 * HZ)
+
u16 tx_seqid;
+ u16 tx_hdr_off;
struct bnxt *bp;
atomic_t tx_avail;
#define BNXT_MAX_TX_TS 1
@@ -70,12 +76,11 @@ do { \
((dst) = READ_ONCE(src))
#endif
-int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id);
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
-void bnxt_ptp_start(struct bnxt *bp);
int bnxt_ptp_init(struct bnxt *bp);
void bnxt_ptp_clear(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index a918e374f3c5..187ff643ad2a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -479,16 +479,17 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
if (!edev)
return ERR_PTR(-ENOMEM);
edev->en_ops = &bnxt_en_ops_tbl;
- if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
- edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
- if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
- edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
edev->net = dev;
edev->pdev = bp->pdev;
edev->l2_db_size = bp->db_size;
edev->l2_db_size_nc = bp->db_size;
bp->edev = edev;
}
+ edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
+ if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
+ edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
+ if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
+ edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
return bp->edev;
}
EXPORT_SYMBOL(bnxt_ulp_probe);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 41f7f078cd27..db74241935ab 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1640,7 +1640,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
switch (mode) {
case GENET_POWER_PASSIVE:
- reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
+ EXT_ENERGY_DET_MASK);
if (GENET_IS_V5(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
@@ -3237,15 +3238,21 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
/* Returns a reusable dma control register value */
static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
{
+ unsigned int i;
u32 reg;
u32 dma_ctrl;
/* disable DMA */
dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
reg &= ~dma_ctrl;
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ for (i = 0; i < priv->hw_params->rx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
reg &= ~dma_ctrl;
bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
@@ -3292,7 +3299,6 @@ static int bcmgenet_open(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned long dma_ctrl;
- u32 reg;
int ret;
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@@ -3318,12 +3324,6 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- if (priv->internal_phy) {
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= EXT_ENERGY_DET_MASK;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- }
-
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
@@ -4139,7 +4139,6 @@ static int bcmgenet_resume(struct device *d)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
unsigned long dma_ctrl;
- u32 reg;
int ret;
if (!netif_running(dev))
@@ -4176,12 +4175,6 @@ static int bcmgenet_resume(struct device *d)
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
- if (priv->internal_phy) {
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= EXT_ENERGY_DET_MASK;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- }
-
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index facde824bcaa..e31a5a397f11 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -186,12 +186,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- if (priv->hw_params->flags & GENET_HAS_EXT) {
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg &= ~EXT_ENERGY_DET_MASK;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- }
-
reg = UMAC_IRQ_MPD_R;
if (hfb_enable)
reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 4cddd628d41b..9ed3d1ab2ca5 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -420,7 +420,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
* bits 32:47 indicate the PVF num.
*/
for (q_no = 0; q_no < ern; q_no++) {
- reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
+ reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
/* for VF assigned queues. */
if (q_no < oct->sriov_info.pf_srn) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 9a2b166d651e..dbf9a0e6601d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2643,6 +2643,9 @@ static void detach_ulds(struct adapter *adap)
{
unsigned int i;
+ if (!is_uld(adap))
+ return;
+
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
@@ -7141,10 +7144,13 @@ static void remove_one(struct pci_dev *pdev)
*/
destroy_workqueue(adapter->workq);
- if (is_uld(adapter)) {
- detach_ulds(adapter);
- t4_uld_clean_up(adapter);
- }
+ detach_ulds(adapter);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ unregister_netdev(adapter->port[i]);
+
+ t4_uld_clean_up(adapter);
adap_free_hma_mem(adapter);
@@ -7152,10 +7158,6 @@ static void remove_one(struct pci_dev *pdev)
cxgb4_free_mps_ref_entries(adapter);
- for_each_port(adapter, i)
- if (adapter->port[i]->reg_state == NETREG_REGISTERED)
- unregister_netdev(adapter->port[i]);
-
debugfs_remove_recursive(adapter->debugfs_root);
if (!is_t4(adapter->params.chip))
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 743af9e654aa..17faac715882 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -581,6 +581,9 @@ void t4_uld_clean_up(struct adapter *adap)
{
unsigned int i;
+ if (!is_uld(adap))
+ return;
+
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_ULD_MAX; i++) {
if (!adap->uld[i].handle)
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index f6ff1f76eacb..1876f15dd827 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -357,7 +357,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
void __iomem *ioaddr;
- i = pci_enable_device(pdev);
+ i = pcim_enable_device(pdev);
if (i) return i;
pci_set_master(pdev);
@@ -379,7 +379,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
if (!ioaddr)
- goto err_out_free_res;
+ goto err_out_netdev;
for (i = 0; i < 3; i++)
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
@@ -458,8 +458,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_cleardev:
pci_iounmap(pdev, ioaddr);
-err_out_free_res:
- pci_release_regions(pdev);
err_out_netdev:
free_netdev (dev);
return -ENODEV;
@@ -1526,7 +1524,6 @@ static void w840_remove1(struct pci_dev *pdev)
if (dev) {
struct netdev_private *np = netdev_priv(dev);
unregister_netdev(dev);
- pci_release_regions(pdev);
pci_iounmap(pdev, np->base_addr);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index f3d12d0714fb..98cc0133c343 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -2770,32 +2770,32 @@ static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
if (err)
return err;
- err = dpaa2_switch_seed_bp(ethsw);
- if (err)
- goto err_free_dpbp;
-
err = dpaa2_switch_alloc_rings(ethsw);
if (err)
- goto err_drain_dpbp;
+ goto err_free_dpbp;
err = dpaa2_switch_setup_dpio(ethsw);
if (err)
goto err_destroy_rings;
+ err = dpaa2_switch_seed_bp(ethsw);
+ if (err)
+ goto err_deregister_dpio;
+
err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
if (err) {
dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
- goto err_deregister_dpio;
+ goto err_drain_dpbp;
}
return 0;
+err_drain_dpbp:
+ dpaa2_switch_drain_bp(ethsw);
err_deregister_dpio:
dpaa2_switch_free_dpio(ethsw);
err_destroy_rings:
dpaa2_switch_destroy_rings(ethsw);
-err_drain_dpbp:
- dpaa2_switch_drain_bp(ethsw);
err_free_dpbp:
dpaa2_switch_free_dpbp(ethsw);
@@ -3038,26 +3038,30 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
return err;
}
-static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
+static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
+{
+ dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ dpaa2_switch_free_dpio(ethsw);
+ dpaa2_switch_destroy_rings(ethsw);
+ dpaa2_switch_drain_bp(ethsw);
+ dpaa2_switch_free_dpbp(ethsw);
+}
+
+static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
+ dpaa2_switch_ctrl_if_teardown(ethsw);
+
+ destroy_workqueue(ethsw->workqueue);
+
err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
if (err)
dev_warn(dev, "dpsw_close err %d\n", err);
}
-static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
-{
- dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
- dpaa2_switch_free_dpio(ethsw);
- dpaa2_switch_destroy_rings(ethsw);
- dpaa2_switch_drain_bp(ethsw);
- dpaa2_switch_free_dpbp(ethsw);
-}
-
static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
{
struct ethsw_port_priv *port_priv;
@@ -3068,8 +3072,6 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
dev = &sw_dev->dev;
ethsw = dev_get_drvdata(dev);
- dpaa2_switch_ctrl_if_teardown(ethsw);
-
dpaa2_switch_teardown_irqs(sw_dev);
dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
@@ -3084,9 +3086,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
kfree(ethsw->acls);
kfree(ethsw->ports);
- dpaa2_switch_takedown(sw_dev);
-
- destroy_workqueue(ethsw->workqueue);
+ dpaa2_switch_teardown(sw_dev);
fsl_mc_portal_free(ethsw->mc_io);
@@ -3199,7 +3199,7 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
GFP_KERNEL);
if (!(ethsw->ports)) {
err = -ENOMEM;
- goto err_takedown;
+ goto err_teardown;
}
ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
@@ -3270,8 +3270,8 @@ err_free_fdbs:
err_free_ports:
kfree(ethsw->ports);
-err_takedown:
- dpaa2_switch_takedown(sw_dev);
+err_teardown:
+ dpaa2_switch_teardown(sw_dev);
err_free_cmdport:
fsl_mc_portal_free(ethsw->mc_io);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8aea707a65a7..7e4c4980ced7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3843,13 +3843,13 @@ fec_drv_remove(struct platform_device *pdev)
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
- free_netdev(ndev);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 46ecb42f2ef8..d9fc5c456bf3 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -524,6 +524,7 @@ static void setup_memac(struct mac_device *mac_dev)
| SUPPORTED_Autoneg \
| SUPPORTED_Pause \
| SUPPORTED_Asym_Pause \
+ | SUPPORTED_FIBRE \
| SUPPORTED_MII)
static DEFINE_MUTEX(eth_lock);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 867e87af3432..099a2bc5ae67 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1469,7 +1469,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if (err)
- return -ENXIO;
+ return err;
err = pci_request_regions(pdev, "gvnic-cfg");
if (err)
@@ -1477,19 +1477,12 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
goto abort_with_pci_region;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev,
- "Failed to set consistent dma mask: err=%d\n", err);
- goto abort_with_pci_region;
- }
-
reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
if (!reg_bar) {
dev_err(&pdev->dev, "Failed to map pci bar!\n");
@@ -1512,6 +1505,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
if (!dev) {
dev_err(&pdev->dev, "could not allocate netdev\n");
+ err = -ENOMEM;
goto abort_with_db_bar;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1565,7 +1559,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = register_netdev(dev);
if (err)
- goto abort_with_wq;
+ goto abort_with_gve_init;
dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
@@ -1573,6 +1567,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
queue_work(priv->gve_wq, &priv->service_task);
return 0;
+abort_with_gve_init:
+ gve_teardown_priv_resources(priv);
+
abort_with_wq:
destroy_workqueue(priv->gve_wq);
@@ -1590,7 +1587,7 @@ abort_with_pci_region:
abort_with_enabled:
pci_disable_device(pdev);
- return -ENXIO;
+ return err;
}
static void gve_remove(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 77bb8227f89b..8500621b2cd4 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -566,13 +566,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
- /* Prefetch the payload header. */
- prefetch((char *)buf_state->addr + buf_state->page_info.page_offset);
-#if L1_CACHE_BYTES < 128
- prefetch((char *)buf_state->addr + buf_state->page_info.page_offset +
- L1_CACHE_BYTES);
-#endif
-
if (eop && buf_len <= priv->rx_copybreak) {
rx->skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len, 0);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 12f6c2442a7a..e53512f6878a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -131,7 +131,7 @@
/* buf unit size is cache_line_size, which is 64, so the shift is 6 */
#define PPE_BUF_SIZE_SHIFT 6
#define PPE_TX_BUF_HOLD BIT(31)
-#define CACHE_LINE_MASK 0x3F
+#define SOC_CACHE_LINE_MASK 0x3F
#else
#define PPE_CFG_QOS_VMID_GRP_SHIFT 8
#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
@@ -531,8 +531,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
#if defined(CONFIG_HI13X1_GMAC)
desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
| TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
- desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK);
- desc->send_addr = (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK);
+ desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK);
+ desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK);
#else
desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
desc->send_addr = (__force u32)cpu_to_be32(phys);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 0a6cda309b24..aa86a81c8f4a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -98,6 +98,7 @@ struct hclgevf_mbx_resp_status {
u32 origin_mbx_msg;
bool received_resp;
int resp_status;
+ u16 match_id;
u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
};
@@ -143,7 +144,8 @@ struct hclge_mbx_vf_to_pf_cmd {
u8 mbx_need_resp;
u8 rsv1[1];
u8 msg_len;
- u8 rsv2[3];
+ u8 rsv2;
+ u16 match_id;
struct hclge_vf_to_pf_msg msg;
};
@@ -153,7 +155,8 @@ struct hclge_mbx_pf_to_vf_cmd {
u8 dest_vfid;
u8 rsv[3];
u8 msg_len;
- u8 rsv1[3];
+ u8 rsv1;
+ u16 match_id;
struct hclge_pf_to_vf_msg msg;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index dd3354a57c62..ebeaf12e409b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -9552,13 +9552,17 @@ static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
if (ret)
return ret;
- if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps))
+ if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
!enable);
- else if (!vport->vport_id)
+ } else if (!vport->vport_id) {
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ enable = false;
+
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
HCLGE_FILTER_FE_INGRESS,
enable, 0);
+ }
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index e10a2c36b706..c0a478ae9583 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -47,6 +47,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+ resp_pf_to_vf->match_id = vf_to_pf_req->match_id;
resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP;
resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 3b1f84502e36..befa9bcc2f2f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -5,9 +5,27 @@
#include "hclge_main.h"
#include "hnae3.h"
+static int hclge_ptp_get_cycle(struct hclge_dev *hdev)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) &
+ HCLGE_PTP_CYCLE_QUO_MASK;
+ ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
+ ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+
+ if (ptp->cycle.den == 0) {
+ dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle;
u64 adj_val, adj_base, diff;
unsigned long flags;
bool is_neg = false;
@@ -18,7 +36,7 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
is_neg = true;
}
- adj_base = HCLGE_PTP_CYCLE_ADJ_BASE * HCLGE_PTP_CYCLE_ADJ_UNIT;
+ adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer;
adj_val = adj_base * ppb;
diff = div_u64(adj_val, 1000000000ULL);
@@ -29,16 +47,16 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
/* This clock cycle is defined by three part: quotient, numerator
* and denominator. For example, 2.5ns, the quotient is 2,
- * denominator is fixed to HCLGE_PTP_CYCLE_ADJ_UNIT, and numerator
- * is 0.5 * HCLGE_PTP_CYCLE_ADJ_UNIT.
+ * denominator is fixed to ptp->cycle.den, and numerator
+ * is 0.5 * ptp->cycle.den.
*/
- quo = div_u64_rem(adj_val, HCLGE_PTP_CYCLE_ADJ_UNIT, &numerator);
+ quo = div_u64_rem(adj_val, cycle->den, &numerator);
spin_lock_irqsave(&hdev->ptp->lock, flags);
- writel(quo, hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
+ writel(quo & HCLGE_PTP_CYCLE_QUO_MASK,
+ hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
- writel(HCLGE_PTP_CYCLE_ADJ_UNIT,
- hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+ writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
writel(HCLGE_PTP_CYCLE_ADJ_EN,
hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG);
spin_unlock_irqrestore(&hdev->ptp->lock, flags);
@@ -475,6 +493,10 @@ int hclge_ptp_init(struct hclge_dev *hdev)
ret = hclge_ptp_create_clock(hdev);
if (ret)
return ret;
+
+ ret = hclge_ptp_get_cycle(hdev);
+ if (ret)
+ return ret;
}
ret = hclge_ptp_int_en(hdev, true);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index 5a202b775471..dbf5f4c08019 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -29,6 +29,7 @@
#define HCLGE_PTP_TIME_ADJ_REG 0x60
#define HCLGE_PTP_TIME_ADJ_EN BIT(0)
#define HCLGE_PTP_CYCLE_QUO_REG 0x64
+#define HCLGE_PTP_CYCLE_QUO_MASK GENMASK(7, 0)
#define HCLGE_PTP_CYCLE_DEN_REG 0x68
#define HCLGE_PTP_CYCLE_NUM_REG 0x6C
#define HCLGE_PTP_CYCLE_CFG_REG 0x70
@@ -37,9 +38,7 @@
#define HCLGE_PTP_CUR_TIME_SEC_L_REG 0x78
#define HCLGE_PTP_CUR_TIME_NSEC_REG 0x7C
-#define HCLGE_PTP_CYCLE_ADJ_BASE 2
#define HCLGE_PTP_CYCLE_ADJ_MAX 500000000
-#define HCLGE_PTP_CYCLE_ADJ_UNIT 100000000
#define HCLGE_PTP_SEC_H_OFFSET 32u
#define HCLGE_PTP_SEC_L_MASK GENMASK(31, 0)
@@ -47,6 +46,12 @@
#define HCLGE_PTP_FLAG_TX_EN 1
#define HCLGE_PTP_FLAG_RX_EN 2
+struct hclge_ptp_cycle {
+ u32 quo;
+ u32 numer;
+ u32 den;
+};
+
struct hclge_ptp {
struct hclge_dev *hdev;
struct ptp_clock *clock;
@@ -58,6 +63,7 @@ struct hclge_ptp {
spinlock_t lock; /* protects ptp registers */
u32 ptp_cfg;
u32 last_tx_seqid;
+ struct hclge_ptp_cycle cycle;
unsigned long tx_start;
unsigned long tx_cnt;
unsigned long tx_skipped;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 52eaf82b7cd7..8784d61e833f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2641,6 +2641,16 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
{
+ struct hnae3_handle *nic = &hdev->nic;
+ int ret;
+
+ ret = hclgevf_en_hw_strip_rxvtag(nic, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to enable rx vlan offload, ret = %d\n", ret);
+ return ret;
+ }
+
return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
false);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 9b17735b9f4c..772b2f8acd2e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -13,6 +13,7 @@ static int hclgevf_resp_to_errno(u16 resp_code)
return resp_code ? -resp_code : 0;
}
+#define HCLGEVF_MBX_MATCH_ID_START 1
static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
{
/* this function should be called with mbx_resp.mbx_mutex held
@@ -21,6 +22,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
hdev->mbx_resp.received_resp = false;
hdev->mbx_resp.origin_mbx_msg = 0;
hdev->mbx_resp.resp_status = 0;
+ hdev->mbx_resp.match_id++;
+ /* Update match_id and ensure the value of match_id is not zero */
+ if (hdev->mbx_resp.match_id == 0)
+ hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START;
memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
}
@@ -115,6 +120,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
if (need_resp) {
mutex_lock(&hdev->mbx_resp.mbx_mutex);
hclgevf_reset_mbx_resp_status(hdev);
+ req->match_id = hdev->mbx_resp.match_id;
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status) {
dev_err(&hdev->pdev->dev,
@@ -211,6 +217,19 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
resp->additional_info[i] = *temp;
temp++;
}
+
+ /* If match_id is not zero, it means PF support
+ * match_id. If the match_id is right, VF get the
+ * right response, otherwise ignore the response.
+ * Driver will clear hdev->mbx_resp when send
+ * next message which need response.
+ */
+ if (req->match_id) {
+ if (req->match_id == resp->match_id)
+ resp->received_resp = true;
+ } else {
+ resp->received_resp = true;
+ }
break;
case HCLGE_MBX_LINK_STAT_CHANGE:
case HCLGE_MBX_ASSERTING_RESET:
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 374a75d4faea..a775c69e4fd7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1731,7 +1731,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
@@ -1753,6 +1752,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb_any(skb);
tx_send_failed++;
tx_dropped++;
+ ibmvnic_tx_scrq_flush(adapter, tx_scrq);
ret = NETDEV_TX_OK;
goto out;
}
@@ -2420,9 +2420,10 @@ out:
static void __ibmvnic_reset(struct work_struct *work)
{
- struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
bool saved_state = false;
+ struct ibmvnic_rwi *tmprwi;
+ struct ibmvnic_rwi *rwi;
unsigned long flags;
u32 reset_state;
int rc = 0;
@@ -2489,7 +2490,7 @@ static void __ibmvnic_reset(struct work_struct *work)
} else {
rc = do_reset(adapter, rwi, reset_state);
}
- kfree(rwi);
+ tmprwi = rwi;
adapter->last_reset_time = jiffies;
if (rc)
@@ -2497,8 +2498,23 @@ static void __ibmvnic_reset(struct work_struct *work)
rwi = get_next_rwi(adapter);
+ /*
+ * If there is another reset queued, free the previous rwi
+ * and process the new reset even if previous reset failed
+ * (the previous reset could have failed because of a fail
+ * over for instance, so process the fail over).
+ *
+ * If there are no resets queued and the previous reset failed,
+ * the adapter would be in an undefined state. So retry the
+ * previous reset as a hard reset.
+ */
+ if (rwi)
+ kfree(tmprwi);
+ else if (rc)
+ rwi = tmprwi;
+
if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
- rwi->reset_reason == VNIC_RESET_MOBILITY))
+ rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
adapter->force_reset_recovery = true;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d150dade06cf..757a54c39eef 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7664,6 +7664,7 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index dbcae92bb18d..adfa2768f024 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2227,6 +2227,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 3e822bad4851..2c9e4eeb7270 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -980,7 +980,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
default:
/* if we got here and link is up something bad is afoot */
netdev_info(netdev,
- "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
+ "WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n",
hw_link_info->phy_type);
}
@@ -5294,6 +5294,10 @@ flags_complete:
dev_warn(&pf->pdev->dev,
"Device configuration forbids SW from starting the LLDP agent.\n");
return -EINVAL;
+ case I40E_AQ_RC_EAGAIN:
+ dev_warn(&pf->pdev->dev,
+ "Stop FW LLDP agent command is still being processed, please try again in a second.\n");
+ return -EBUSY;
default:
dev_warn(&pf->pdev->dev,
"Starting FW LLDP agent failed: error: %s, %s\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 861e59a350bd..1d1f52756a93 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4454,11 +4454,10 @@ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
}
/**
- * i40e_vsi_control_tx - Start or stop a VSI's rings
+ * i40e_vsi_enable_tx - Start a VSI's rings
* @vsi: the VSI being configured
- * @enable: start or stop the rings
**/
-static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
+static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret = 0;
@@ -4467,7 +4466,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
ret = i40e_control_wait_tx_q(vsi->seid, pf,
pf_q,
- false /*is xdp*/, enable);
+ false /*is xdp*/, true);
if (ret)
break;
@@ -4476,7 +4475,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
ret = i40e_control_wait_tx_q(vsi->seid, pf,
pf_q + vsi->alloc_queue_pairs,
- true /*is xdp*/, enable);
+ true /*is xdp*/, true);
if (ret)
break;
}
@@ -4574,32 +4573,25 @@ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
- * i40e_vsi_control_rx - Start or stop a VSI's rings
+ * i40e_vsi_enable_rx - Start a VSI's rings
* @vsi: the VSI being configured
- * @enable: start or stop the rings
**/
-static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
+static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret = 0;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- ret = i40e_control_wait_rx_q(pf, pf_q, enable);
+ ret = i40e_control_wait_rx_q(pf, pf_q, true);
if (ret) {
dev_info(&pf->pdev->dev,
- "VSI seid %d Rx ring %d %sable timeout\n",
- vsi->seid, pf_q, (enable ? "en" : "dis"));
+ "VSI seid %d Rx ring %d enable timeout\n",
+ vsi->seid, pf_q);
break;
}
}
- /* Due to HW errata, on Rx disable only, the register can indicate done
- * before it really is. Needs 50ms to be sure
- */
- if (!enable)
- mdelay(50);
-
return ret;
}
@@ -4612,29 +4604,47 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
int ret = 0;
/* do rx first for enable and last for disable */
- ret = i40e_vsi_control_rx(vsi, true);
+ ret = i40e_vsi_enable_rx(vsi);
if (ret)
return ret;
- ret = i40e_vsi_control_tx(vsi, true);
+ ret = i40e_vsi_enable_tx(vsi);
return ret;
}
+#define I40E_DISABLE_TX_GAP_MSEC 50
+
/**
* i40e_vsi_stop_rings - Stop a VSI's rings
* @vsi: the VSI being configured
**/
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
+ struct i40e_pf *pf = vsi->back;
+ int pf_q, err, q_end;
+
/* When port TX is suspended, don't wait */
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
return i40e_vsi_stop_rings_no_wait(vsi);
- /* do rx first for enable and last for disable
- * Ignore return value, we need to shutdown whatever we can
- */
- i40e_vsi_control_tx(vsi, false);
- i40e_vsi_control_rx(vsi, false);
+ q_end = vsi->base_queue + vsi->num_queue_pairs;
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
+ err = i40e_control_wait_rx_q(pf, pf_q, false);
+ if (err)
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Rx ring %d dissable timeout\n",
+ vsi->seid, pf_q);
+ }
+
+ msleep(I40E_DISABLE_TX_GAP_MSEC);
+ pf_q = vsi->base_queue;
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
+
+ i40e_vsi_wait_queues_disabled(vsi);
}
/**
@@ -7280,6 +7290,8 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
}
if (vsi->num_queue_pairs <
(mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to create traffic channel, insufficient number of queues.\n");
return -EINVAL;
}
if (sum_max_rate > i40e_get_link_speed(vsi)) {
@@ -13261,6 +13273,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_poll_controller = i40e_netpoll,
#endif
.ndo_setup_tc = __i40e_setup_tc,
+ .ndo_select_queue = i40e_lan_select_queue,
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 38eb8151ee9a..10a83e5385c7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3631,6 +3631,55 @@ dma_error:
return -1;
}
+static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
+ const struct sk_buff *skb,
+ u16 num_tx_queues)
+{
+ u32 jhash_initval_salt = 0xd631614b;
+ u32 hash;
+
+ if (skb->sk && skb->sk->sk_hash)
+ hash = skb->sk->sk_hash;
+ else
+ hash = (__force u16)skb->protocol ^ skb->hash;
+
+ hash = jhash_1word(hash, jhash_initval_salt);
+
+ return (u16)(((u64)hash * num_tx_queues) >> 32);
+}
+
+u16 i40e_lan_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device __always_unused *sb_dev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_hw *hw;
+ u16 qoffset;
+ u16 qcount;
+ u8 tclass;
+ u16 hash;
+ u8 prio;
+
+ /* is DCB enabled at all? */
+ if (vsi->tc_config.numtc == 1)
+ return netdev_pick_tx(netdev, skb, sb_dev);
+
+ prio = skb->priority;
+ hw = &vsi->back->hw;
+ tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
+ /* sanity check */
+ if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
+ tclass = 0;
+
+ /* select a queue assigned for the given TC */
+ qcount = vsi->tc_config.tc_info[tclass].qcount;
+ hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
+
+ qoffset = vsi->tc_config.tc_info[tclass].qoffset;
+ return qoffset + hash;
+}
+
/**
* i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
* @xdpf: data to transmit
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 86fed05b4f19..bfc2845c99d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -451,6 +451,8 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index e8bd04100ecd..90793b36126e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -136,6 +136,7 @@ struct iavf_q_vector {
struct iavf_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
+ bool is_new_mac; /* filter is new, wait for PF decision */
bool remove; /* filter needs to be removed */
bool add; /* filter needs to be added */
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index e612c24fa384..606a01ce4073 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -751,6 +751,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
+ f->is_new_mac = true;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else {
f->remove = false;
@@ -1506,11 +1507,6 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
iavf_map_rings_to_vectors(adapter);
-
- if (RSS_AQ(adapter))
- adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
- else
- err = iavf_init_rss(adapter);
err:
return err;
}
@@ -2200,6 +2196,14 @@ continue_reset:
goto reset_err;
}
+ if (RSS_AQ(adapter)) {
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
+ } else {
+ err = iavf_init_rss(adapter);
+ if (err)
+ goto reset_err;
+ }
+
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
@@ -3798,6 +3802,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 0eab3c43bdc5..3c735968e1b8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -541,6 +541,47 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
}
/**
+ * iavf_mac_add_ok
+ * @adapter: adapter structure
+ *
+ * Submit list of filters based on PF response.
+ **/
+static void iavf_mac_add_ok(struct iavf_adapter *adapter)
+{
+ struct iavf_mac_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ f->is_new_mac = false;
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * iavf_mac_add_reject
+ * @adapter: adapter structure
+ *
+ * Remove filters from list based on PF response.
+ **/
+static void iavf_mac_add_reject(struct iavf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct iavf_mac_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
+ f->remove = false;
+
+ if (f->is_new_mac) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
* iavf_add_vlans
* @adapter: adapter structure
*
@@ -1492,6 +1533,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
iavf_stat_str(&adapter->hw, v_retval));
+ iavf_mac_add_reject(adapter);
/* restore administratively set MAC address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
break;
@@ -1639,10 +1681,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
switch (v_opcode) {
- case VIRTCHNL_OP_ADD_ETH_ADDR: {
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ if (!v_retval)
+ iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
- }
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index a450343fbb92..eadcb9958346 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -234,6 +234,7 @@ enum ice_pf_state {
ICE_VFLR_EVENT_PENDING,
ICE_FLTR_OVERFLOW_PROMISC,
ICE_VF_DIS,
+ ICE_VF_DEINIT_IN_PROGRESS,
ICE_CFG_BUSY,
ICE_SERVICE_SCHED,
ICE_SERVICE_DIS,
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index ef8d1815af56..fe2ded775f25 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -191,6 +191,14 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ /* Under some circumstances, we might receive a request to delete our
+ * own device address from our uc list. Because we store the device
+ * address in the VSI's MAC filter list, we need to ignore such
+ * requests and not delete our device address from this list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
ICE_FWD_TO_VSI))
return -EINVAL;
@@ -4194,6 +4202,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
struct ice_hw *hw;
int i, err;
+ if (pdev->is_virtfn) {
+ dev_err(dev, "can't probe a virtual function\n");
+ return -EINVAL;
+ }
+
/* this driver uses devres, see
* Documentation/driver-api/driver-model/devres.rst
*/
@@ -5119,7 +5132,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EADDRNOTAVAIL;
if (ether_addr_equal(netdev->dev_addr, mac)) {
- netdev_warn(netdev, "already using mac %pM\n", mac);
+ netdev_dbg(netdev, "already using mac %pM\n", mac);
return 0;
}
@@ -5130,6 +5143,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
+ netif_addr_lock_bh(netdev);
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
if (status && status != ICE_ERR_DOES_NOT_EXIST) {
@@ -5139,30 +5153,28 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
/* Add filter for new MAC. If filter exists, return success */
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_ALREADY_EXISTS) {
+ if (status == ICE_ERR_ALREADY_EXISTS)
/* Although this MAC filter is already present in hardware it's
* possible in some cases (e.g. bonding) that dev_addr was
* modified outside of the driver and needs to be restored back
* to this value.
*/
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
- return 0;
- }
-
- /* error if the new filter addition failed */
- if (status)
+ else if (status)
+ /* error if the new filter addition failed */
err = -EADDRNOTAVAIL;
err_update_filters:
if (err) {
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
mac);
+ netif_addr_unlock_bh(netdev);
return err;
}
/* change the netdev's MAC address */
memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ netif_addr_unlock_bh(netdev);
netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
netdev->dev_addr);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 5d5207b56ca9..9e3ddb9b8b51 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -656,7 +656,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
* maintaining phase
*/
if (start_time < current_time)
- start_time = div64_u64(current_time + NSEC_PER_MSEC - 1,
+ start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
NSEC_PER_SEC) * NSEC_PER_SEC + phase;
start_time -= E810_OUT_PROP_DELAY_NS;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 2826570dab51..e93430ab37f1 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -615,6 +615,8 @@ void ice_free_vfs(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
unsigned int tmp, i;
+ set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
+
if (!pf->vf)
return;
@@ -680,6 +682,7 @@ void ice_free_vfs(struct ice_pf *pf)
i);
clear_bit(ICE_VF_DIS, pf->state);
+ clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
}
@@ -4415,6 +4418,10 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
struct device *dev;
int err = 0;
+ /* if de-init is underway, don't process messages from VF */
+ if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
+ return;
+
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id)) {
err = -EINVAL;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 7e6435dc7e80..171a7a629b20 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -931,6 +931,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
**/
static int igb_request_msix(struct igb_adapter *adapter)
{
+ unsigned int num_q_vectors = adapter->num_q_vectors;
struct net_device *netdev = adapter->netdev;
int i, err = 0, vector = 0, free_vector = 0;
@@ -939,7 +940,13 @@ static int igb_request_msix(struct igb_adapter *adapter)
if (err)
goto err_out;
- for (i = 0; i < adapter->num_q_vectors; i++) {
+ if (num_q_vectors > MAX_Q_VECTORS) {
+ num_q_vectors = MAX_Q_VECTORS;
+ dev_warn(&adapter->pdev->dev,
+ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
+ adapter->num_q_vectors, MAX_Q_VECTORS);
+ }
+ for (i = 0; i < num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
vector++;
@@ -1678,14 +1685,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter)
**/
static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
{
- struct igb_ring *ring = adapter->tx_ring[queue];
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *ring;
u32 tqavcc, tqavctrl;
u16 value;
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
+ ring = adapter->tx_ring[queue];
/* If any of the Qav features is enabled, configure queues as SR and
* with HIGH PRIO. If none is, then configure them with LOW PRIO and
@@ -3615,6 +3623,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -4835,6 +4844,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
+ tx_buffer->next_to_watch = NULL;
+
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 9e0bbb2e55e3..5901ed9fb545 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -578,7 +578,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
if (hw->phy.ops.read_reg)
return hw->phy.ops.read_reg(hw, offset, data);
- return 0;
+ return -EOPNOTSUPP;
}
void igc_reinit_locked(struct igc_adapter *);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 95323095094d..e29aadbc6744 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -232,6 +232,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
}
+ tx_buffer->next_to_watch = NULL;
+
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
@@ -6054,6 +6056,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ffff69efd78a..14aea40da50f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1825,7 +1825,8 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
if (ring_uses_build_skb(rx_ring)) {
- unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
+ unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1;
+ unsigned long offset = (unsigned long)(skb->data) & mask;
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
@@ -11067,6 +11068,7 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 96dd1a4f956a..b1d22e4d5ec9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -52,8 +52,11 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
/* Kick start the NAPI context so that receiving will start */
err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
- if (err)
+ if (err) {
+ clear_bit(qid, adapter->af_xdp_zc_qps);
+ xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
return err;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index caaea2c920a6..e3e4676af9e4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -211,7 +211,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.dev;
+ struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -260,12 +260,15 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
**/
static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.dev;
- struct ixgbevf_adapter *adapter = netdev_priv(dev);
- struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ struct net_device *dev = xs->xso.real_dev;
+ struct ixgbevf_adapter *adapter;
+ struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
int ret;
+ adapter = netdev_priv(dev);
+ ipsec = adapter->ipsec;
+
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
xs->id.proto);
@@ -383,11 +386,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
**/
static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.dev;
- struct ixgbevf_adapter *adapter = netdev_priv(dev);
- struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ struct net_device *dev = xs->xso.real_dev;
+ struct ixgbevf_adapter *adapter;
+ struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
+ adapter = netdev_priv(dev);
+ ipsec = adapter->ipsec;
+
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 361bc4fbe20b..76a7777c746d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2299,19 +2299,19 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
-
- /* last fragment */
- if (len == *size) {
- struct skb_shared_info *sinfo;
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
- sinfo->nr_frags = xdp_sinfo->nr_frags;
- memcpy(sinfo->frags, xdp_sinfo->frags,
- sinfo->nr_frags * sizeof(skb_frag_t));
- }
} else {
page_pool_put_full_page(rxq->page_pool, page, true);
}
+
+ /* last fragment */
+ if (len == *size) {
+ struct skb_shared_info *sinfo;
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ sinfo->nr_frags = xdp_sinfo->nr_frags;
+ memcpy(sinfo->frags, xdp_sinfo->frags,
+ sinfo->nr_frags * sizeof(skb_frag_t));
+ }
*size -= len;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index b9fbc9f000f2..cf8acabb90ac 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -938,7 +938,7 @@ enum mvpp22_ptp_packet_format {
#define MVPP2_BM_COOKIE_POOL_OFFS 8
#define MVPP2_BM_COOKIE_CPU_OFFS 24
-#define MVPP2_BM_SHORT_FRAME_SIZE 704 /* frame size 128 */
+#define MVPP2_BM_SHORT_FRAME_SIZE 736 /* frame size 128 */
#define MVPP2_BM_LONG_FRAME_SIZE 2240 /* frame size 1664 */
#define MVPP2_BM_JUMBO_FRAME_SIZE 10432 /* frame size 9856 */
/* BM short pool packet size
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 1a3455620b38..cc8ac36cf687 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
- rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o
+ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index fac6474ad694..544c96c8fe1d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -86,6 +86,22 @@ bool is_lmac_valid(struct cgx *cgx, int lmac_id)
return test_bit(lmac_id, &cgx->lmac_bmap);
}
+/* Helper function to get sequential index
+ * given the enabled LMAC of a CGX
+ */
+static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
+{
+ int tmp, id = 0;
+
+ for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ if (tmp == lmac_id)
+ break;
+ id++;
+ }
+
+ return id;
+}
+
struct mac_ops *get_mac_ops(void *cgxd)
{
if (!cgxd)
@@ -211,37 +227,257 @@ static u64 mac2u64 (u8 *mac_addr)
return mac;
}
+static void cfg2mac(u64 cfg, u8 *mac_addr)
+{
+ int i, index = 0;
+
+ for (i = ETH_ALEN - 1; i >= 0; i--, index++)
+ mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
+}
+
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
+ int index, id;
u64 cfg;
+ /* access mac_ops to know csr_offset */
mac_ops = cgx_dev->mac_ops;
+
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
cfg = mac2u64 (mac_addr);
- cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+ cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
+ CGX_DMAC_MCAST_MODE);
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return 0;
}
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx = cgxd;
+
+ if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
+ return 0;
+
+ cgx = cgxd;
+ /* Get mac_ops to know csr offset */
+ mac_ops = cgx->mac_ops;
+
+ return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+}
+
+u64 cgx_read_dmac_entry(void *cgxd, int index)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx;
+
+ if (!cgxd)
+ return 0;
+
+ cgx = cgxd;
+ mac_ops = cgx->mac_ops;
+ return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
+}
+
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, idx;
+ u64 cfg = 0;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Get available index where entry is to be installed */
+ idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
+ if (idx < 0)
+ return idx;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + idx;
+
+ cfg = mac2u64 (mac_addr);
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cfg |= ((u64)lmac_id << 49);
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
+
+ if (is_multicast_ether_addr(mac_addr)) {
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE_CAM;
+ lmac->mcast_filters_count++;
+ } else if (!lmac->mcast_filters_count) {
+ cfg |= CGX_DMAC_MCAST_MODE;
+ }
+
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return idx;
+}
+
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 index = 0, id;
+ u64 cfg;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Restore index 0 to its default init value as done during
+ * cgx_lmac_init
+ */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+
+/* Allows caller to change macaddress associated with index
+ * in dmac filter table including index 0 reserved for
+ * interface mac address
+ */
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
+ struct lmac *lmac;
+ u64 cfg;
+ int id;
+
+ lmac = lmac_pdata(lmac_id, cgx_dev);
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* ensure index is already set */
+ if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
+ return -EINVAL;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+ cfg &= ~CGX_RX_DMAC_ADR_MASK;
+ cfg |= mac2u64 (mac_addr);
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+ return 0;
+}
+
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 mac[ETH_ALEN];
+ u64 cfg;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* Skip deletion for reserved index i.e. index 0 */
+ if (index == 0)
+ return 0;
+
+ rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ /* Read MAC address to check whether it is ucast or mcast */
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+
+ cfg2mac(cfg, mac);
+ if (is_multicast_ether_addr(mac))
+ lmac->mcast_filters_count--;
+
+ if (!lmac->mcast_filters_count) {
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ }
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ return 0;
+}
+
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+
+ if (lmac)
+ return lmac->mac_to_index_bmap.max;
+
+ return 0;
+}
+
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
+ int index;
u64 cfg;
+ int id;
mac_ops = cgx_dev->mac_ops;
- cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
@@ -297,35 +533,51 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx);
+ u16 max_dmac = lmac->mac_to_index_bmap.max;
struct mac_ops *mac_ops;
+ int index, i;
u64 cfg = 0;
+ int id;
if (!cgx)
return;
+ id = get_sequence_id_of_lmac(cgx, lmac_id);
+
mac_ops = cgx->mac_ops;
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
- cfg |= CGX_DMAC_BCAST_MODE;
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
+ }
} else {
/* Disable promiscuous mode */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 +
+ index * 0x8),
+ cfg);
+ }
+ }
}
}
@@ -1234,6 +1486,15 @@ static int cgx_lmac_init(struct cgx *cgx)
}
lmac->cgx = cgx;
+ lmac->mac_to_index_bmap.max =
+ MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
+ err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
+ if (err)
+ return err;
+
+ /* Reserve first entry for default MAC address */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
@@ -1243,8 +1504,8 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Add reference */
cgx->lmac_idmap[lmac->lmac_id] = lmac;
- cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
set_bit(lmac->lmac_id, &cgx->lmac_bmap);
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
}
return cgx_lmac_verify_fwi_version(cgx);
@@ -1274,6 +1535,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
continue;
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
+ kfree(lmac->mac_to_index_bmap.bmap);
kfree(lmac->name);
kfree(lmac);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 12521262164a..237ba2b56210 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -23,6 +23,7 @@
#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
+#define MAX_DMAC_ENTRIES_PER_CGX 32
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
@@ -46,10 +47,12 @@
#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset)
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
#define CGXX_CMRX_TX_STAT0 0x700
@@ -139,7 +142,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index);
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
@@ -165,4 +172,7 @@ u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
unsigned long cgx_get_lmac_bmap(void *cgxd);
void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
+u64 cgx_read_dmac_entry(void *cgxd, int index);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index 45706fd87120..a8b7b1c7a1d5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -10,17 +10,19 @@
#include "rvu.h"
#include "cgx.h"
/**
- * struct lmac
+ * struct lmac - per lmac locks and properties
* @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
* @cmd_lock: Lock to serialize the command interface
* @resp: command response
* @link_info: link related information
+ * @mac_to_index_bmap: Mac address to CGX table index mapping
* @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
- * @cmd_pend: flag set before new command is started
- * flag cleared after command response is received
* @cgx: parent cgx port
+ * @mcast_filters_count: Number of multicast filters installed
* @lmac_id: lmac port id
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
* @name: lmac port name
*/
struct lmac {
@@ -29,12 +31,14 @@ struct lmac {
struct mutex cmd_lock;
u64 resp;
struct cgx_link_user_info link_info;
+ struct rsrc_bmap mac_to_index_bmap;
struct cgx_event_cb event_cb;
/* lock for serializing callback with unregister */
spinlock_t event_cb_lock;
- bool cmd_pend;
struct cgx *cgx;
+ u8 mcast_filters_count;
u8 lmac_id;
+ bool cmd_pend;
char *name;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 770d86262838..f5ec39de026a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -134,6 +134,8 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
+ msg_rsp) \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
@@ -163,7 +165,15 @@ M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \
- /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+ msg_rsp) \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \
+ cgx_max_dmac_entries_get_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+ msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -401,6 +411,38 @@ struct cgx_mac_addr_set_or_get {
u8 mac_addr[ETH_ALEN];
};
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 max_dmac_filters;
+};
+
struct cgx_link_user_info {
uint64_t link_up:1;
uint64_t full_duplex:1;
@@ -499,6 +541,12 @@ struct cgx_set_link_mode_rsp {
int status;
};
+struct cgx_mac_addr_update_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+ u8 index;
+};
+
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */
#define RVU_MAC_VERSION BIT_ULL(2)
@@ -1278,6 +1326,14 @@ struct set_vf_perm {
u64 flags;
};
+struct lmtst_tbl_setup_req {
+ struct mbox_msghdr hdr;
+ u16 base_pcifunc;
+ u8 use_local_lmt_region;
+ u64 lmt_iova;
+ u64 rsvd[4];
+};
+
/* CPT mailbox error codes
* Range 901 - 1000.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 19bad9a59c8f..243cf8070e77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -151,7 +151,10 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND
+
enum npc_pkind_type {
+ NPC_RX_LBK_PKIND = 0ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
NPC_RX_CPT_HDR_PKIND,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 0b092949d7ac..5fe277e354f7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -391,8 +391,10 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
/* Get numVFs attached to this PF and first HWVF */
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
- *numvfs = (cfg >> 12) & 0xFF;
- *hwvf = cfg & 0xFFF;
+ if (numvfs)
+ *numvfs = (cfg >> 12) & 0xFF;
+ if (hwvf)
+ *hwvf = cfg & 0xFFF;
}
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
@@ -1314,7 +1316,7 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu,
return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
}
-static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int blkaddr = BLKADDR_NIX0, vf;
@@ -2333,6 +2335,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+ rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
mutex_unlock(&rvu->flr_lock);
}
@@ -2858,6 +2861,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
if (!vfs)
return 0;
+ /* LBK channel number 63 is used for switching packets between
+ * CGX mapped VFs. Hence limit LBK pairs till 62 only.
+ */
+ if (vfs > 62)
+ vfs = 62;
+
/* Save VFs number for reference in VF interrupts handlers.
* Since interrupts might start arriving during SRIOV enablement
* ordinary API cannot be used to get number of enabled VFs.
@@ -3000,6 +3009,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Initialize debugfs */
rvu_dbg_init(rvu);
+ mutex_init(&rvu->rswitch.switch_lock);
+
return 0;
err_dl:
rvu_unregister_dl(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 9e5d9ba6f01e..91503fb2762c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -243,6 +243,7 @@ struct rvu_pfvf {
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
+ u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
unsigned long flags;
};
@@ -414,6 +415,16 @@ struct npc_kpu_profile_adapter {
size_t kpus;
};
+#define RVU_SWITCH_LBK_CHAN 63
+
+struct rvu_switch {
+ struct mutex switch_lock; /* Serialize flow installation */
+ u32 used_entries;
+ u16 *entry2pcifunc;
+ u16 mode;
+ u16 start_entry;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -444,6 +455,7 @@ struct rvu {
/* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
+ u16 cgx_mapped_vfs; /* maximum CGX mapped VFs */
u8 cgx_mapped_pfs;
u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
@@ -476,6 +488,9 @@ struct rvu {
struct rvu_debugfs rvu_dbg;
#endif
struct rvu_devlink *rvu_dl;
+
+ /* RVU switch implementation over NPC with DMAC rules */
+ struct rvu_switch rswitch;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -656,6 +671,8 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
int rxtxflag, u64 *stat);
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
+
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
@@ -688,6 +705,7 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_cn10k_aq_enq_req *aq_req,
struct nix_cn10k_aq_enq_rsp *aq_rsp,
u16 pcifunc, u8 ctype, u32 qidx);
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -741,6 +759,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
@@ -754,6 +773,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);
+/* CN10K RVU - LMT*/
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
void rvu_dbg_exit(struct rvu *rvu);
@@ -761,4 +783,10 @@ void rvu_dbg_exit(struct rvu *rvu);
static inline void rvu_dbg_init(struct rvu *rvu) {}
static inline void rvu_dbg_exit(struct rvu *rvu) {}
#endif
+
+/* RVU Switch */
+void rvu_switch_enable(struct rvu *rvu);
+void rvu_switch_disable(struct rvu *rvu);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 6e2bf4fcd29c..fe99ac4a4dd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -63,7 +63,7 @@ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
-static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
{
unsigned long pfmap;
@@ -126,6 +126,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
unsigned long lmac_bmap;
int size, free_pkind;
int cgx, lmac, iter;
+ int numvfs, hwvfs;
if (!cgx_cnt_max)
return 0;
@@ -166,6 +167,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
rvu->cgx_mapped_pfs++;
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+ rvu->cgx_mapped_vfs += numvfs;
pf++;
}
}
@@ -454,6 +457,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return 0;
}
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int i = 0, lmac_count = 0;
+ u8 max_dmac_filters;
+ u8 cgx_id, lmac_id;
+ void *cgx_dev;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgx_dev = cgx_get_pdata(cgx_id);
+ lmac_count = cgx_get_lmac_cnt(cgx_dev);
+ max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
+
+ for (i = 0; i < max_dmac_filters; i++)
+ cgx_lmac_addr_del(cgx_id, lmac_id, i);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -557,6 +585,63 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
+ if (rc >= 0) {
+ rsp->index = rc;
+ return 0;
+ }
+
+ return rc;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
+}
+
+int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_max_dmac_entries_get_rsp
+ *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* If msg is received from PFs(which are not mapped to CGX LMACs)
+ * or VF then no entries are allocated for DMAC filters at CGX level.
+ * So returning zero.
+ */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
+ rsp->max_dmac_filters = 0;
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
+ return 0;
+}
+
int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
@@ -953,3 +1038,30 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
return 0;
}
+
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
+int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7d9e71c6965f..8d48b64485c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -10,6 +10,206 @@
#include "cgx.h"
#include "rvu_reg.h"
+/* RVU LMTST */
+#define LMT_TBL_OP_READ 0
+#define LMT_TBL_OP_WRITE 1
+#define LMT_MAP_TABLE_SIZE (128 * 1024)
+#define LMT_MAPTBL_ENTRY_SIZE 16
+
+/* Function to perform operations (read/write) on lmtst map table */
+static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+ int lmt_tbl_op)
+{
+ void __iomem *lmt_map_base;
+ u64 tbl_base;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ return -ENOMEM;
+ }
+
+ if (lmt_tbl_op == LMT_TBL_OP_READ) {
+ *val = readq(lmt_map_base + index);
+ } else {
+ writeq((*val), (lmt_map_base + index));
+ /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
+ * changes effective. Write 1 for flush and read is being used as a
+ * barrier and sets up a data dependency. Write to 0 after a write
+ * to 1 to complete the flush.
+ */
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
+ rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
+ }
+
+ iounmap(lmt_map_base);
+ return 0;
+}
+
+static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
+{
+ return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
+}
+
+static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ u64 iova, u64 *lmt_addr)
+{
+ u64 pa, val, pf;
+ int err;
+
+ if (!iova) {
+ dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+ return -EINVAL;
+ }
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+ pf = rvu_get_pf(pcifunc) & 0x1F;
+ val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+ ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
+
+ err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+ if (err) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+ return err;
+ }
+ val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+ if (val & ~0x1ULL) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+ return -EIO;
+ }
+ /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21]
+ * PA[11:0] = IOVA[11:0]
+ */
+ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21;
+ pa &= GENMASK_ULL(39, 0);
+ *lmt_addr = (pa << 12) | (iova & 0xFFF);
+
+ return 0;
+}
+
+static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err = 0;
+ u64 val;
+
+ /* Read the current lmt addr of pcifunc */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+
+ /* Storing the seondary's lmt base address as this needs to be
+ * reverted in FLR. Also making sure this default value doesn't
+ * get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_base_addr)
+ pfvf->lmt_base_addr = val;
+
+ /* Update the LMT table with new addr */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
+ struct lmtst_tbl_setup_req *req,
+ struct msg_rsp *rsp)
+{
+ u64 lmt_addr, val;
+ u32 pri_tbl_idx;
+ int err = 0;
+
+ /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ * region, if so, convert that IOVA to physical address and
+ * populate LMT table with that address
+ */
+ if (req->use_local_lmt_region) {
+ err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
+ req->lmt_iova, &lmt_addr);
+ if (err < 0)
+ return err;
+
+ /* Update the lmt addr for this PFFUNC in the LMT table */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
+ if (err)
+ return err;
+ }
+
+ /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
+ * multiple PF_FUNCs to share an LMTLINE region, so primary/base
+ * pcifunc (which is passed as an argument to mailbox) is the one
+ * whose lmt base address will be shared among other secondary
+ * pcifunc (will be the one who is calling this mailbox).
+ */
+ if (req->base_pcifunc) {
+ /* Calculating the LMT table index equivalent to primary
+ * pcifunc.
+ */
+ pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
+
+ /* Read the base lmt addr of the primary pcifunc */
+ err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
+ LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ pri_tbl_idx, err);
+ return err;
+ }
+
+ /* Update the base lmt addr of secondary with primary's base
+ * lmt addr.
+ */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Resetting the lmtst map table to original base addresses */
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ if (pfvf->lmt_base_addr) {
+ /* This corresponds to lmt map table index */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ /* Reverting back original lmt base addr for respective
+ * pcifunc.
+ */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ pfvf->lmt_base_addr = 0;
+ }
+}
+
int rvu_set_channels_base(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 3cc3c6fd1d84..9b2dfbf90e51 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1971,10 +1971,9 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
{
struct dentry *current_dir;
- int err, lmac_id;
char *buf;
current_dir = filp->file->f_path.dentry->d_parent;
@@ -1982,17 +1981,87 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
if (!buf)
return -EINVAL;
- err = kstrtoint(buf + 1, 10, &lmac_id);
- if (!err) {
- err = cgx_print_stats(filp, lmac_id);
- if (err)
- return err;
- }
+ return kstrtoint(buf + 1, 10, lmac_id);
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ int lmac_id, err;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_stats(filp, lmac_id);
+
return err;
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
+{
+ struct pci_dev *pdev = NULL;
+ void *cgxd = s->private;
+ char *bcast, *mcast;
+ u16 index, domain;
+ u8 dmac[ETH_ALEN];
+ struct rvu *rvu;
+ u64 cfg, mac;
+ int pf;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ domain = 2;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ return 0;
+
+ cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
+ bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
+ mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
+
+ seq_puts(s,
+ "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
+ seq_printf(s, "%s PF%d %9s %9s",
+ dev_name(&pdev->dev), pf, bcast, mcast);
+ if (cfg & CGX_DMAC_CAM_ACCEPT)
+ seq_printf(s, "%12s\n\n", "UNICAST");
+ else
+ seq_printf(s, "%16s\n\n", "PROMISCUOUS");
+
+ seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
+
+ for (index = 0 ; index < 32 ; index++) {
+ cfg = cgx_read_dmac_entry(cgxd, index);
+ /* Display enabled dmac entries associated with current lmac */
+ if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
+ FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
+ mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
+ u64_to_ether_addr(mac, dmac);
+ seq_printf(s, "%7d %pM\n", index, dmac);
+ }
+ }
+
+ return 0;
+}
+
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+{
+ int err, lmac_id;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_dmac_flt(filp, lmac_id);
+
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
struct mac_ops *mac_ops;
@@ -2029,6 +2098,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
cgx, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_dmac_flt_fops);
}
}
}
@@ -2041,9 +2113,6 @@ static void rvu_print_npc_mcam_info(struct seq_file *s,
int entry_acnt, entry_ecnt;
int cntr_acnt, cntr_ecnt;
- /* Skip PF0 */
- if (!pcifunc)
- return;
rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
&entry_acnt, &entry_ecnt);
rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
@@ -2226,7 +2295,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
struct rvu_npc_mcam_rule *rule)
{
- if (rule->intf == NIX_INTF_TX) {
+ if (is_npc_intf_tx(rule->intf)) {
switch (rule->tx_action.op) {
case NIX_TX_ACTIONOP_DROP:
seq_puts(s, "\taction: Drop\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 10a98bcb7c54..2688186066d9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1364,6 +1364,44 @@ static void rvu_health_reporters_destroy(struct rvu *rvu)
rvu_nix_health_reporters_destroy(rvu_dl);
}
+static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ *mode = rswitch->mode;
+
+ return 0;
+}
+
+static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ if (rswitch->mode == mode)
+ return 0;
+ rswitch->mode = mode;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ rvu_switch_enable(rvu);
+ else
+ rvu_switch_disable(rvu);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
@@ -1372,6 +1410,8 @@ static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req
static const struct devlink_ops rvu_devlink_ops = {
.info_get = rvu_devlink_info_get,
+ .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
+ .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
};
int rvu_register_dl(struct rvu *rvu)
@@ -1380,14 +1420,9 @@ int rvu_register_dl(struct rvu *rvu)
struct devlink *dl;
int err;
- rvu_dl = kzalloc(sizeof(*rvu_dl), GFP_KERNEL);
- if (!rvu_dl)
- return -ENOMEM;
-
dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink));
if (!dl) {
dev_warn(rvu->dev, "devlink_alloc failed\n");
- kfree(rvu_dl);
return -ENOMEM;
}
@@ -1395,10 +1430,10 @@ int rvu_register_dl(struct rvu *rvu)
if (err) {
dev_err(rvu->dev, "devlink register failed with error %d\n", err);
devlink_free(dl);
- kfree(rvu_dl);
return err;
}
+ rvu_dl = devlink_priv(dl);
rvu_dl->dl = dl;
rvu_dl->rvu = rvu;
rvu->rvu_dl = rvu_dl;
@@ -1417,5 +1452,4 @@ void rvu_unregister_dl(struct rvu *rvu)
rvu_health_reporters_destroy(rvu);
devlink_unregister(dl);
devlink_free(dl);
- kfree(rvu_dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d6f8210652c5..4bfbbdf38770 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -196,11 +196,22 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr)
{
int err;
- /*Sync all in flight RX packets to LLC/DRAM */
+ /* Sync all in flight RX packets to LLC/DRAM */
rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
if (err)
- dev_err(rvu->dev, "NIX RX software sync failed\n");
+ dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
+
+ /* SW_SYNC ensures all existing transactions are finished and pkts
+ * are written to LLC/DRAM, queues should be teared down after
+ * successful SW_SYNC. Due to a HW errata, in some rare scenarios
+ * an existing transaction might end after SW_SYNC operation. To
+ * ensure operation is fully done, do the SW_SYNC twice.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
}
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
@@ -298,6 +309,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
+ rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
@@ -346,6 +358,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
/* Free and disable any MCAM entries used by this NIX LF */
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Disable DMAC filters used */
+ rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
@@ -1949,6 +1964,35 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
}
+static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, struct nix_txsch *txsch)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lbk_link_start, lbk_links;
+ u8 pf = rvu_get_pf(pcifunc);
+ int schq;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ lbk_link_start = hw->cgx_links;
+
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ /* Enable all LBK links with channel 63 by default so that
+ * packets can be sent to LBK with a NPC TX MCAM rule
+ */
+ lbk_links = hw->lbk_links;
+ while (lbk_links--)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ lbk_link_start +
+ lbk_links),
+ BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
+ }
+}
+
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp)
@@ -2037,6 +2081,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
rvu_write64(rvu, blkaddr, reg, regval);
}
+ rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
+ &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
+
return 0;
}
@@ -3177,6 +3224,8 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
ether_addr_copy(pfvf->default_mac, req->mac_addr);
+ rvu_switch_update_rules(rvu, pcifunc);
+
return 0;
}
@@ -3805,7 +3854,6 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
vlan = &nix_hw->txvlan;
kfree(vlan->rsrc.bmap);
mutex_destroy(&vlan->rsrc_lock);
- devm_kfree(rvu->dev, vlan->entry2pfvf_map);
mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx);
@@ -3846,6 +3894,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
pfvf = rvu_get_pfvf(rvu, pcifunc);
set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+ rvu_switch_update_rules(rvu, pcifunc);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 3612e0a2cab3..52b255426c22 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -442,7 +442,8 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
owner = mcam->entry2pfvf_map[index];
target_func = (entry->action >> 4) & 0xffff;
/* do nothing when target is LBK/PF or owner is not PF */
- if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) ||
+ if (is_pffunc_af(owner) || is_afvf(target_func) ||
+ (owner & RVU_PFVF_FUNC_MASK) ||
!(target_func & RVU_PFVF_FUNC_MASK))
return;
@@ -468,6 +469,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
{
int bank = npc_get_bank(mcam, index);
int kw = 0, actbank, actindex;
+ u8 tx_intf_mask = ~intf & 0x3;
+ u8 tx_intf = intf;
u64 cam0, cam1;
actbank = bank; /* Save bank id, to set action later on */
@@ -488,12 +491,21 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
*/
for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
/* Interface should be set in all banks */
+ if (is_npc_intf_tx(intf)) {
+ /* Last bit must be set and rest don't care
+ * for TX interfaces
+ */
+ tx_intf_mask = 0x1;
+ tx_intf = intf & tx_intf_mask;
+ tx_intf_mask = ~tx_intf & tx_intf_mask;
+ }
+
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
- intf);
+ tx_intf);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
- ~intf & 0x3);
+ tx_intf_mask);
/* Set the match key */
npc_get_keyword(entry, kw, &cam0, &cam1);
@@ -650,6 +662,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
eth_broadcast_addr((u8 *)&req.mask.dmac);
req.features = BIT_ULL(NPC_DMAC);
req.channel = chan;
+ req.chan_mask = 0xFFFU;
req.intf = pfvf->nix_rx_intf;
req.op = action.op;
req.hdr.pcifunc = 0; /* AF is requester */
@@ -799,6 +812,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
eth_broadcast_addr((u8 *)&req.mask.dmac);
req.features = BIT_ULL(NPC_DMAC);
req.channel = chan;
+ req.chan_mask = 0xFFFU;
req.intf = pfvf->nix_rx_intf;
req.entry = index;
req.hdr.pcifunc = 0; /* AF is requester */
@@ -1707,7 +1721,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
int num_pkinds, num_kpus, idx;
- struct npc_pkind *pkind;
/* Disable all KPUs and their entries */
for (idx = 0; idx < hw->npc_kpus; idx++) {
@@ -1725,9 +1738,8 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
* Check HW max count to avoid configuring junk or
* writing to unsupported CSR addresses.
*/
- pkind = &hw->pkind;
num_pkinds = rvu->kpu.pkinds;
- num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
+ num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
for (idx = 0; idx < num_pkinds; idx++)
npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
@@ -1745,6 +1757,8 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
int nixlf_count = rvu_get_nixlf_count(rvu);
struct npc_mcam *mcam = &rvu->hw->mcam;
int rsvd, err;
+ u16 index;
+ int cntr;
u64 cfg;
/* Actual number of MCAM entries vary by entry size */
@@ -1845,6 +1859,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
if (!mcam->entry2target_pffunc)
goto free_mem;
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+ mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+ }
+
+ for (cntr = 0; cntr < mcam->counters.max; cntr++)
+ mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+
mutex_init(&mcam->lock);
return 0;
@@ -1867,7 +1889,8 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
if (npc_const1 & BIT_ULL(63))
npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
- pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL;
+ pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT;
+ hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL;
hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
hw->npc_intfs = npc_const & 0xFULL;
@@ -1978,6 +2001,10 @@ int rvu_npc_init(struct rvu *rvu)
err = rvu_alloc_bitmap(&pkind->rsrc);
if (err)
return err;
+ /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0',
+ * no need to configure PKIND for all LBKs separately.
+ */
+ rvu_alloc_rsrc(&pkind->rsrc);
/* Allocate mem for pkind to PF and channel mapping info */
pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
@@ -2562,7 +2589,7 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
}
/* Alloc request from PFFUNC with no NIXLF attached should be denied */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_ALLOC_DENIED;
return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
@@ -2582,7 +2609,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
return NPC_MCAM_INVALID_REQ;
/* Free request from PFFUNC with no NIXLF attached, ignore */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_INVALID_REQ;
mutex_lock(&mcam->lock);
@@ -2594,7 +2621,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
if (rc)
goto exit;
- mcam->entry2pfvf_map[req->entry] = 0;
+ mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP;
mcam->entry2target_pffunc[req->entry] = 0x0;
npc_mcam_clear_bit(mcam, req->entry);
npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
@@ -2679,13 +2706,14 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
else
nix_intf = pfvf->nix_rx_intf;
- if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
+ if (!is_pffunc_af(pcifunc) &&
+ npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
rc = NPC_MCAM_INVALID_REQ;
goto exit;
}
- if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
- pcifunc)) {
+ if (!is_pffunc_af(pcifunc) &&
+ npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
rc = NPC_MCAM_INVALID_REQ;
goto exit;
}
@@ -2836,7 +2864,7 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
return NPC_MCAM_INVALID_REQ;
/* If the request is from a PFFUNC with no NIXLF attached, ignore */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_INVALID_REQ;
/* Since list of allocated counter IDs needs to be sent to requester,
@@ -3081,7 +3109,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (rc) {
/* Free allocated MCAM entry */
mutex_lock(&mcam->lock);
- mcam->entry2pfvf_map[entry] = 0;
+ mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP;
npc_mcam_clear_bit(mcam, entry);
mutex_unlock(&mcam->lock);
return rc;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 68633145a8b8..5c01cf4a9c5b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -910,14 +910,17 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct mcam_entry *entry,
- struct npc_install_flow_req *req, u16 target)
+ struct npc_install_flow_req *req,
+ u16 target, bool pf_set_vfs_mac)
{
+ struct rvu_switch *rswitch = &rvu->rswitch;
struct nix_rx_action action;
- u64 chan_mask;
- chan_mask = req->chan_mask ? req->chan_mask : ~0ULL;
- npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0,
- NIX_INTF_RX);
+ if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
+ req->chan_mask = 0x0; /* Do not care channel */
+
+ npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask,
+ 0, NIX_INTF_RX);
*(u64 *)&action = 0x00;
action.pf_func = target;
@@ -949,9 +952,16 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct npc_install_flow_req *req, u16 target)
{
struct nix_tx_action action;
+ u64 mask = ~0ULL;
+
+ /* If AF is installing then do not care about
+ * PF_FUNC in Send Descriptor
+ */
+ if (is_pffunc_af(req->hdr.pcifunc))
+ mask = 0;
npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
- 0, ~0ULL, 0, NIX_INTF_TX);
+ 0, mask, 0, NIX_INTF_TX);
*(u64 *)&action = 0x00;
action.op = req->op;
@@ -1002,7 +1012,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
req->intf);
if (is_npc_intf_rx(req->intf))
- npc_update_rx_entry(rvu, pfvf, entry, req, target);
+ npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
else
npc_update_tx_entry(rvu, pfvf, entry, req, target);
@@ -1164,7 +1174,9 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
if (err)
return err;
- if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
+ /* Skip channel validation if AF is installing */
+ if (!is_pffunc_af(req->hdr.pcifunc) &&
+ npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, target);
@@ -1180,6 +1192,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
eth_broadcast_addr((u8 *)&req->mask.dmac);
}
+ /* Proceed if NIXLF is attached or not for TX rules */
err = nix_get_nixlf(rvu, target, &nixlf, NULL);
if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
return -EINVAL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 76837d5e19c6..8b01ef6e2c99 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -49,6 +49,11 @@
#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4)
#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4)
#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4)
+#define RVU_AF_SMMU_ADDR_REQ (0x6000)
+#define RVU_AF_SMMU_TXN_REQ (0x6008)
+#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010)
+#define RVU_AF_SMMU_ADDR_TLN (0x6018)
+#define RVU_AF_SMMU_TLN_FLIT1 (0x6030)
/* Admin function's privileged PF/VF registers */
#define RVU_PRIV_CONST (0x8000000)
@@ -692,4 +697,9 @@
#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6)
#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0)
+/* APR */
+#define APR_AF_LMT_CFG (0x000ull)
+#define APR_AF_LMT_MAP_BASE (0x008ull)
+#define APR_AF_LMT_CTL (0x010ull)
+
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 14aa8e37ea41..5bbe6727d11d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -35,7 +35,8 @@ enum rvu_block_addr_e {
BLKADDR_NDC_NPA0 = 0xeULL,
BLKADDR_NDC_NIX1_RX = 0x10ULL,
BLKADDR_NDC_NIX1_TX = 0x11ULL,
- BLK_COUNT = 0x12ULL,
+ BLKADDR_APR = 0x16ULL,
+ BLK_COUNT = 0x17ULL,
};
/* RVU Block Type Enumeration */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
new file mode 100644
index 000000000000..820adf390b8e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include "rvu.h"
+
+static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+ u16 chan_mask)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = pfvf->rx_chan_base;
+ req.chan_mask = chan_mask;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = NIX_RX_ACTION_DEFAULT;
+ req.default_rule = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+ u8 lbkid;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.entry = entry;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.intf = pfvf->nix_tx_intf;
+ req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+ req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+ req.set_cntr = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_rules(struct rvu *rvu)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u16 start = rswitch->start_entry;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc, entry = 0;
+ int pf, vf, numvfs;
+ int err;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ /* rvu_get_nix_blkaddr sets up the corresponding NIX block
+ * address and NIX RX and TX interfaces for a pcifunc.
+ * Generally it is called during attach call of a pcifunc but it
+ * is called here since we are pre-installing rules before
+ * nixlfs are attached
+ */
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ /* MCAM RX rule for a PF/VF already exists as default unicast
+ * rules installed by AF. Hence change the channel in those
+ * rules to ignore channel so that packets with the required
+ * DMAC received from LBK(by other PF/VFs in system) or from
+ * external world (from wire) are accepted.
+ */
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry);
+ if (err) {
+ dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev,
+ "RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc,
+ start + entry);
+ if (err) {
+ dev_err(rvu->dev,
+ "TX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ }
+ }
+
+ return 0;
+}
+
+void rvu_switch_enable(struct rvu *rvu)
+{
+ struct npc_mcam_alloc_entry_req alloc_req = { 0 };
+ struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct msg_rsp rsp;
+ int ret;
+
+ alloc_req.contig = true;
+ alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+ &alloc_rsp);
+ if (ret) {
+ dev_err(rvu->dev,
+ "Unable to allocate MCAM entries\n");
+ goto exit;
+ }
+
+ if (alloc_rsp.count != alloc_req.count) {
+ dev_err(rvu->dev,
+ "Unable to allocate %d MCAM entries, got %d\n",
+ alloc_req.count, alloc_rsp.count);
+ goto free_entries;
+ }
+
+ rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16),
+ GFP_KERNEL);
+ if (!rswitch->entry2pcifunc)
+ goto free_entries;
+
+ rswitch->used_entries = alloc_rsp.count;
+ rswitch->start_entry = alloc_rsp.entry;
+
+ ret = rvu_switch_install_rules(rvu);
+ if (ret)
+ goto uninstall_rules;
+
+ return;
+
+uninstall_rules:
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ kfree(rswitch->entry2pcifunc);
+free_entries:
+ free_req.all = 1;
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+exit:
+ return;
+}
+
+void rvu_switch_disable(struct rvu *rvu)
+{
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs;
+ struct msg_rsp rsp;
+ u16 pcifunc;
+ int err;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%d failed(%d)\n",
+ pf, err);
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ }
+ }
+
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ free_req.all = 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+ rswitch->used_entries = 0;
+ kfree(rswitch->entry2pcifunc);
+}
+
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u32 max = rswitch->used_entries;
+ u16 entry;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (entry = 0; entry < max; entry++) {
+ if (rswitch->entry2pcifunc[entry] == pcifunc)
+ break;
+ }
+
+ if (entry >= max)
+ return;
+
+ rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry);
+ rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
index e6609068e81b..64aa7d350df1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -21,7 +21,7 @@ TRACE_EVENT(otx2_msg_alloc,
__field(u16, id)
__field(u64, size)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
__entry->id = id;
__entry->size = size;
),
@@ -36,7 +36,7 @@ TRACE_EVENT(otx2_msg_send,
__field(u16, num_msgs)
__field(u64, msg_size)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
__entry->num_msgs = num_msgs;
__entry->msg_size = msg_size;
),
@@ -52,7 +52,7 @@ TRACE_EVENT(otx2_msg_check,
__field(u16, rspid)
__field(int, rc)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
__entry->reqid = reqid;
__entry->rspid = rspid;
__entry->rc = rc;
@@ -69,8 +69,8 @@ TRACE_EVENT(otx2_msg_interrupt,
__string(str, msg)
__field(u64, intr)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev))
- __assign_str(str, msg)
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ __assign_str(str, msg);
__entry->intr = intr;
),
TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev),
@@ -84,7 +84,7 @@ TRACE_EVENT(otx2_msg_process,
__field(u16, id)
__field(int, err)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ TP_fast_assign(__assign_str(dev, pci_name(pdev));
__entry->id = id;
__entry->err = err;
),
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 457c94793e63..3254b02205ca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o
+ otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o
rvu_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 1b08896b46d2..184de9466286 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = {
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
};
-int cn10k_pf_lmtst_init(struct otx2_nic *pf)
+int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
- int size, num_lines;
- u64 base;
- if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
- pf->hw_ops = &otx2_hw_ops;
+ struct lmtst_tbl_setup_req *req;
+ int qcount, err;
+
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
return 0;
}
- pf->hw_ops = &cn10k_hw_ops;
- base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
- (MBOX_SIZE * (pf->total_vfs + 1));
-
- size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) -
- (MBOX_SIZE * (pf->total_vfs + 1));
-
- pf->hw.lmt_base = ioremap(base, size);
+ pfvf->hw_ops = &cn10k_hw_ops;
+ qcount = pfvf->hw.max_queues;
+ /* LMTST lines allocation
+ * qcount = num_online_cpus();
+ * NPA = TX + RX + XDP.
+ * NIX = TX * 32 (For Burst SQE flush).
+ */
+ pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32);
+ pfvf->npa_lmt_lines = qcount * 3;
+ pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE;
- if (!pf->hw.lmt_base) {
- dev_err(pf->dev, "Unable to map PF LMTST region\n");
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
- /* FIXME: Get the num of LMTST lines from LMT table */
- pf->tot_lmt_lines = size / LMT_LINE_SIZE;
- num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) /
- pf->hw.tx_queues;
- /* Number of LMT lines per SQ queues */
- pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
-
- pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE;
- return 0;
-}
+ req->use_local_lmt_region = true;
-int cn10k_vf_lmtst_init(struct otx2_nic *vf)
-{
- int size, num_lines;
-
- if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) {
- vf->hw_ops = &otx2_hw_ops;
- return 0;
+ err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
+ LMT_LINE_SIZE);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
}
+ pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
+ req->lmt_iova = (u64)pfvf->dync_lmt->iova;
- vf->hw_ops = &cn10k_hw_ops;
- size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM);
- vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev,
- PCI_MBOX_BAR_NUM),
- size);
- if (!vf->hw.lmt_base) {
- dev_err(vf->dev, "Unable to map VF LMTST region\n");
- return -ENOMEM;
- }
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
- vf->tot_lmt_lines = size / LMT_LINE_SIZE;
- /* LMTST lines per SQ */
- num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) /
- vf->hw.tx_queues;
- vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
- vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE;
return 0;
}
-EXPORT_SYMBOL(cn10k_vf_lmtst_init);
+EXPORT_SYMBOL(cn10k_lmtst_init);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
{
@@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
struct otx2_snd_queue *sq;
sq = &pfvf->qset.sq[qidx];
- sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base +
+ sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base +
(qidx * pfvf->nix_lmt_size));
+ sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE);
+
/* Get memory to put this msg */
aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
if (!aq)
@@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
{
- struct otx2_nic *pfvf = dev;
- int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines);
u64 val = 0, tar_addr = 0;
/* FIXME: val[0:10] LMT_ID.
* [12:15] no of LMTST - 1 in the burst.
* [19:63] data size of each LMTST in the burst except first.
*/
- val = (lmt_id & 0x7FF);
+ val = (sq->lmt_id & 0x7FF);
/* Target address for LMTST flush tells HW how many 128bit
* words are present.
* tar_addr[6:4] size of first LMTST - 1 in units of 128b.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index 71292a4cf1f3..1a1ae334477d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -12,8 +12,7 @@
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
-int cn10k_pf_lmtst_init(struct otx2_nic *pf);
-int cn10k_vf_lmtst_init(struct otx2_nic *vf);
+int cn10k_lmtst_init(struct otx2_nic *pfvf);
int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index cf7875d51d87..70fcc1fd962f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -210,6 +210,9 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
/* update dmac field in vlan offload rule */
if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
otx2_install_rxvlan_offload_flow(pfvf);
+ /* update dmac address in ntuple and DMAC filter list */
+ if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_update_pfmac_flow(pfvf);
} else {
return -EPERM;
}
@@ -921,12 +924,14 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1;
- /* Enable receive CQ backpressure */
- aq->cq.bp_ena = 1;
- aq->cq.bpid = pfvf->bpid[0];
+ if (!is_otx2_lbkvf(pfvf->pdev)) {
+ /* Enable receive CQ backpressure */
+ aq->cq.bp_ena = 1;
+ aq->cq.bpid = pfvf->bpid[0];
- /* Set backpressure level is same as cq pass level */
- aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ /* Set backpressure level is same as cq pass level */
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ }
}
/* Fill AQ info */
@@ -1183,7 +1188,7 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
/* Enable backpressure for RQ aura */
- if (aura_id < pfvf->hw.rqpool_cnt) {
+ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
aq->aura.bp_ena = 0;
aq->aura.nix0_bpid = pfvf->bpid[0];
/* Set backpressure level for RQ's Aura */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 234b330f3183..8fd58cd07f50 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -218,8 +218,8 @@ struct otx2_hw {
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
-#define NIX_LMTID_BASE 72 /* RX + TX + XDP */
- void __iomem *lmt_base;
+#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
+ u64 *lmt_base;
u64 *npa_lmt_base;
u64 *nix_lmt_base;
};
@@ -288,6 +288,9 @@ struct otx2_flow_config {
u16 tc_flower_offset;
u16 ntuple_max_flows;
u16 tc_max_flows;
+ u8 dmacflt_max_flows;
+ u8 *bmap_to_dmacindex;
+ unsigned long dmacflt_bmap;
struct list_head flow_list;
};
@@ -329,6 +332,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
+#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
u64 flags;
struct otx2_qset qset;
@@ -363,8 +367,9 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
/* LMTST Lines info */
+ struct qmem *dync_lmt;
u16 tot_lmt_lines;
- u16 nix_lmt_lines;
+ u16 npa_lmt_lines;
u32 nix_lmt_size;
struct otx2_ptp *ptp;
@@ -833,4 +838,11 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
+/* CGX/RPM DMAC filters support */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
new file mode 100644
index 000000000000..383a6b5cb698
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "otx2_common.h"
+
+static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
+ u8 *dmac_index)
+{
+ struct cgx_mac_addr_add_req *req;
+ struct cgx_mac_addr_add_rsp *rsp;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ if (!err) {
+ rsp = (struct cgx_mac_addr_add_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ *dmac_index = rsp->index;
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
+{
+ struct cgx_mac_addr_set_or_get *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
+{
+ u8 *dmacindex;
+
+ /* Store dmacindex returned by CGX/RPM driver which will
+ * be used for macaddr update/remove
+ */
+ dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_add_pfmac(pf);
+ else
+ return otx2_dmacflt_do_add(pf, mac, dmacindex);
+}
+
+static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
+ u8 dmac_index)
+{
+ struct cgx_mac_addr_del_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->index = dmac_index;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return err;
+}
+
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
+{
+ struct msg_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
+ u8 bit_pos)
+{
+ u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_remove_pfmac(pf);
+ else
+ return otx2_dmacflt_do_remove(pf, mac, dmacindex);
+}
+
+/* CGX/RPM blocks support max unicast entries of 32.
+ * on typical configuration MAC block associated
+ * with 4 lmacs, each lmac will have 8 dmac entries
+ */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
+{
+ struct cgx_max_dmac_entries_get_rsp *rsp;
+ struct msg_req *msg;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox);
+
+ if (!msg) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_max_dmac_entries_get_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+ pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
+
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
+{
+ struct cgx_mac_addr_update_req *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox);
+
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 8df748e0677b..b906a0eb6e0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -298,15 +298,14 @@ static int otx2_set_channels(struct net_device *dev,
err = otx2_set_real_num_queues(dev, channel->tx_count,
channel->rx_count);
if (err)
- goto fail;
+ return err;
pfvf->hw.rx_queues = channel->rx_count;
pfvf->hw.tx_queues = channel->tx_count;
pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
-fail:
if (if_up)
- dev->netdev_ops->ndo_open(dev);
+ err = dev->netdev_ops->ndo_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
pfvf->hw.tx_queues, pfvf->hw.rx_queues);
@@ -410,7 +409,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
qs->rqe_cnt = rx_count;
if (if_up)
- netdev->netdev_ops->ndo_open(netdev);
+ return netdev->netdev_ops->ndo_open(netdev);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 8c97106bdd1c..4d9de525802d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -18,6 +18,12 @@ struct otx2_flow {
bool is_vf;
u8 rss_ctx_id;
int vf;
+ bool dmac_filter;
+};
+
+enum dmac_req {
+ DMAC_ADDR_UPDATE,
+ DMAC_ADDR_DEL
};
static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
@@ -219,6 +225,22 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
if (!pf->mac_table)
return -ENOMEM;
+ otx2_dmacflt_get_max_cnt(pf);
+
+ /* DMAC filters are not allocated */
+ if (!pf->flow_cfg->dmacflt_max_flows)
+ return 0;
+
+ pf->flow_cfg->bmap_to_dmacindex =
+ devm_kzalloc(pf->dev, sizeof(u8) *
+ pf->flow_cfg->dmacflt_max_flows,
+ GFP_KERNEL);
+
+ if (!pf->flow_cfg->bmap_to_dmacindex)
+ return -ENOMEM;
+
+ pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
+
return 0;
}
@@ -280,6 +302,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
+ if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(netdev,
+ "Add %pM to CGX/RPM DMAC filters list as well\n",
+ mac);
+
return otx2_do_add_macfilter(pf, mac);
}
@@ -351,12 +379,22 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
list_add(&flow->list, head);
}
+static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
+{
+ if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
+ bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows))
+ return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
+ else
+ return flow_cfg->ntuple_max_flows;
+}
+
int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
u32 location)
{
struct otx2_flow *iter;
- if (location >= pfvf->flow_cfg->ntuple_max_flows)
+ if (location >= otx2_get_maxflows(pfvf->flow_cfg))
return -EINVAL;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
@@ -378,7 +416,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
int idx = 0;
int err = 0;
- nfc->data = pfvf->flow_cfg->ntuple_max_flows;
+ nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
while ((!err || err == -ENOENT) && idx < rule_cnt) {
err = otx2_get_flow(pfvf, nfc, location);
if (!err)
@@ -760,6 +798,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
return 0;
}
+static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ u64 ring_cookie = fsp->ring_cookie;
+ u32 flow_type;
+
+ if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
+ return false;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+
+ /* CGX/RPM block dmac filtering configured for white listing
+ * check for action other than DROP
+ */
+ if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
+ !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
+ if (is_zero_ether_addr(eth_mask->h_dest) &&
+ is_valid_ether_addr(eth_hdr->h_dest))
+ return true;
+ }
+
+ return false;
+}
+
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
{
u64 ring_cookie = flow->flow_spec.ring_cookie;
@@ -818,14 +882,46 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
return err;
}
+static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
+ struct otx2_flow *flow)
+{
+ struct otx2_flow *pf_mac;
+ struct ethhdr *eth_hdr;
+
+ pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
+ if (!pf_mac)
+ return -ENOMEM;
+
+ pf_mac->entry = 0;
+ pf_mac->dmac_filter = true;
+ pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
+ memcpy(&pf_mac->flow_spec, &flow->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ pf_mac->flow_spec.location = pf_mac->location;
+
+ /* Copy PF mac address */
+ eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
+ ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
+
+ /* Install DMAC filter with PF mac address */
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
+
+ otx2_add_flow_to_list(pfvf, pf_mac);
+ pfvf->flow_cfg->nr_flows++;
+ set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+
+ return 0;
+}
+
int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct ethtool_rx_flow_spec *fsp = &nfc->fs;
struct otx2_flow *flow;
+ struct ethhdr *eth_hdr;
bool new = false;
+ int err = 0;
u32 ring;
- int err;
ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
@@ -834,16 +930,15 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
- if (fsp->location >= flow_cfg->ntuple_max_flows)
+ if (fsp->location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, fsp->location);
if (!flow) {
- flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
if (!flow)
return -ENOMEM;
flow->location = fsp->location;
- flow->entry = flow_cfg->flow_ent[flow->location];
new = true;
}
/* struct copy */
@@ -852,7 +947,54 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (fsp->flow_type & FLOW_RSS)
flow->rss_ctx_id = nfc->rss_context;
- err = otx2_add_flow_msg(pfvf, flow);
+ if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
+ eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* Sync dmac filter table with updated fields */
+ if (flow->dmac_filter)
+ return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
+ flow->entry);
+
+ if (bitmap_full(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows)) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert the rule %d as max allowed dmac filters are %d\n",
+ flow->location +
+ flow_cfg->dmacflt_max_flows,
+ flow_cfg->dmacflt_max_flows);
+ err = -EINVAL;
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* Install PF mac address to DMAC filter list */
+ if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+ otx2_add_flow_with_pfmac(pfvf, flow);
+
+ flow->dmac_filter = true;
+ flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows);
+ fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
+ flow->flow_spec.location = fsp->location;
+ flow->location = fsp->location;
+
+ set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
+
+ } else {
+ if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
+ flow->location,
+ flow_cfg->ntuple_max_flows - 1);
+ err = -EINVAL;
+ } else {
+ flow->entry = flow_cfg->flow_ent[flow->location];
+ err = otx2_add_flow_msg(pfvf, flow);
+ }
+ }
+
if (err) {
if (new)
kfree(flow);
@@ -890,20 +1032,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
return err;
}
+static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+ bool found = false;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->dmac_filter && iter->entry == 0) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ if (req == DMAC_ADDR_DEL) {
+ otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ 0);
+ clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ found = true;
+ } else {
+ ether_addr_copy(eth_hdr->h_dest,
+ pfvf->netdev->dev_addr);
+ otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
+ }
+ break;
+ }
+ }
+
+ if (found) {
+ list_del(&iter->list);
+ kfree(iter);
+ pfvf->flow_cfg->nr_flows--;
+ }
+}
+
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct otx2_flow *flow;
int err;
- if (location >= flow_cfg->ntuple_max_flows)
+ if (location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, location);
if (!flow)
return -ENOENT;
- err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ if (flow->dmac_filter) {
+ struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* user not allowed to remove dmac filter with interface mac */
+ if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
+ return -EPERM;
+
+ err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ flow->entry);
+ clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ /* If all dmac filters are removed delete macfilter with
+ * interface mac address and configure CGX/RPM block in
+ * promiscuous mode
+ */
+ if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows) == 1)
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
+ } else {
+ err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ }
+
if (err)
return err;
@@ -1100,3 +1292,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
mutex_unlock(&pf->mbox.lock);
return rsp_hdr->rc;
}
+
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+
+ list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
+ if (iter->dmac_filter) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ otx2_dmacflt_add(pf, eth_hdr->h_dest,
+ iter->entry);
+ }
+ }
+}
+
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
+{
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 59912f73417b..2c24944a4dba 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1110,6 +1110,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
+ if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(pf->netdev,
+ "CGX/RPM internal loopback might not work as DMAC filters are active\n");
+
mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
@@ -1533,10 +1538,10 @@ int otx2_open(struct net_device *netdev)
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
/* Reserve LMT lines for NPA AURA batch free */
- pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
+ pf->hw.npa_lmt_base = pf->hw.lmt_base;
/* Reserve LMT lines for NIX TX */
- pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
- (NIX_LMTID_BASE * LMT_LINE_SIZE));
+ pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base +
+ (pf->npa_lmt_lines * LMT_LINE_SIZE));
}
err = otx2_init_hw_resources(pf);
@@ -1644,6 +1649,10 @@ int otx2_open(struct net_device *netdev)
/* Restore pause frame settings */
otx2_config_pause_frm(pf);
+ /* Install DMAC Filters */
+ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_reinstall_flows(pf);
+
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_tx_stop_queues;
@@ -1653,6 +1662,7 @@ int otx2_open(struct net_device *netdev)
err_tx_stop_queues:
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
err_free_cints:
otx2_free_cints(pf, qidx);
vec = pci_irq_vector(pf->pdev,
@@ -1680,6 +1690,10 @@ int otx2_stop(struct net_device *netdev)
struct otx2_rss_info *rss;
int qidx, vec, wrk;
+ /* If the DOWN flag is set resources are already freed */
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return 0;
+
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
@@ -2526,7 +2540,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- err = cn10k_pf_lmtst_init(pf);
+ err = cn10k_lmtst_init(pf);
if (err)
goto err_detach_rsrc;
@@ -2630,8 +2644,8 @@ err_del_mcam_entries:
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
- if (hw->lmt_base)
- iounmap(hw->lmt_base);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
@@ -2772,9 +2786,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox);
- if (pf->hw.lmt_base)
- iounmap(pf->hw.lmt_base);
-
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 905fc02a7dfe..972b202b9884 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -288,7 +288,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct otx2_nic *priv;
u32 burst, mark = 0;
u8 nr_police = 0;
- bool pps;
+ bool pps = false;
u64 rate;
int i;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 52486c1f0973..2f144e2cf436 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -83,6 +83,7 @@ struct otx2_snd_queue {
u16 num_sqbs;
u16 sqe_thresh;
u8 sqe_per_sqb;
+ u32 lmt_id;
u64 io_addr;
u64 *aura_fc_addr;
u64 *lmt_addr;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 13a908f75ba0..a8bee5aefec1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -609,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- err = cn10k_vf_lmtst_init(vf);
+ err = cn10k_lmtst_init(vf);
if (err)
goto err_detach_rsrc;
@@ -667,8 +667,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
err_detach_rsrc:
- if (hw->lmt_base)
- iounmap(hw->lmt_base);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
@@ -700,10 +700,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
destroy_workqueue(vf->otx2_wq);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
-
- if (vf->hw.lmt_base)
- iounmap(vf->hw.lmt_base);
-
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index d12e21db9fd6..fa7a0682ad1e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -530,6 +530,8 @@ err_trap_register:
prestera_trap = &prestera_trap_items_arr[i];
devlink_traps_unregister(devlink, &prestera_trap->trap, 1);
}
+ devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr,
+ groups_count);
err_groups_register:
kfree(trap_data->trap_items_arr);
err_trap_items_alloc:
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 0b3e8f2db294..9a309169dbae 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -748,7 +748,7 @@ static void
prestera_fdb_offload_notify(struct prestera_port *port,
struct switchdev_notifier_fdb_info *info)
{
- struct switchdev_notifier_fdb_info send_info;
+ struct switchdev_notifier_fdb_info send_info = {};
send_info.addr = info->addr;
send_info.vid = info->vid;
@@ -1123,7 +1123,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused,
static void prestera_fdb_event(struct prestera_switch *sw,
struct prestera_event *evt, void *arg)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
struct net_device *dev = NULL;
struct prestera_port *port;
struct prestera_lag *lag;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 00c84656b2e7..28ac4693da3c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3535,6 +3535,7 @@ slave_start:
if (!SRIOV_VALID_STATE(dev->flags)) {
mlx4_err(dev, "Invalid SRIOV state\n");
+ err = -EINVAL;
goto err_close;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index a99e71bc7b3c..771b92019af1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2660,6 +2660,7 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_XRCD:
err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
+ break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index df3e4938ecdd..360e093874d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -134,6 +134,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cqn);
cq->uar = dev->priv.uar;
+ cq->irqn = eq->core.irqn;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index ceebfc20f65e..def2156e50ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -500,10 +500,7 @@ static int next_phys_dev(struct device *dev, const void *data)
return 1;
}
-/* This function is called with two flows:
- * 1. During initialization of mlx5_core_dev and we don't need to lock it.
- * 2. During LAG configure stage and caller holds &mlx5_intf_mutex.
- */
+/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
struct auxiliary_device *adev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 01a1d02dcf15..3f8a98093f8c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -1019,12 +1019,19 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER);
mlx5_eq_notifier_register(dev, &tracer->nb);
- mlx5_fw_tracer_start(tracer);
-
+ err = mlx5_fw_tracer_start(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err);
+ goto err_notifier_unregister;
+ }
return 0;
+err_notifier_unregister:
+ mlx5_eq_notifier_unregister(dev, &tracer->nb);
+ mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+ cancel_work_sync(&tracer->read_fw_strings_work);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 150c8e82c738..2cbf18c967f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -471,6 +471,15 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
}
+static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+ bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
+ MLX5_CAP_GEN(mdev, relaxed_ordering_write);
+
+ return ro && params->lro_en ?
+ MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
+}
+
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
@@ -508,7 +517,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
}
MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
- MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 778e229310a9..efef4adce086 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -482,8 +482,11 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
params->log_sq_size = orig->log_sq_size;
mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
}
- if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ /* RQ */
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ params->vlan_strip_disable = orig->vlan_strip_disable;
mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
+ }
}
static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
@@ -494,7 +497,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
int err;
rq->wq_type = params->rq_wq_type;
- rq->pdev = mdev->device;
+ rq->pdev = c->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
rq->clock = &mdev->clock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 8f79f04eccd6..1e2d117082d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -124,6 +124,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
if (IS_ERR(rt))
return PTR_ERR(rt);
+ if (rt->rt_type != RTN_UNICAST) {
+ ret = -ENETUNREACH;
+ goto err_rt_release;
+ }
+
if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
ret = -ENETUNREACH;
goto err_rt_release;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 86ab4e864fe6..7f94508594fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -37,7 +37,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
struct mlx5e_priv *priv = t->priv;
rq->wq_type = params->rq_wq_type;
- rq->pdev = mdev->device;
+ rq->pdev = t->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
rq->clock = &mdev->clock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d09e65557e75..24f919ef9b8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1535,15 +1535,9 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
- int eqn_not_used;
- unsigned int irqn;
int err;
u32 i;
- err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
- if (err)
- return err;
-
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
&cq->wq_ctrl);
if (err)
@@ -1557,7 +1551,6 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
mcq->vector = param->eq_ix;
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
- mcq->irqn = irqn;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -1605,11 +1598,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
void *in;
void *cqc;
int inlen;
- unsigned int irqn_not_used;
int eqn;
int err;
- err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+ err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
if (err)
return err;
@@ -1891,30 +1883,30 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_icosq;
+ err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
+ if (err)
+ goto err_close_sqs;
+
if (c->xdp) {
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
&c->rq_xdpsq, false);
if (err)
- goto err_close_sqs;
+ goto err_close_rq;
}
- err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
- if (err)
- goto err_close_xdp_sq;
-
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
if (err)
- goto err_close_rq;
+ goto err_close_xdp_sq;
return 0;
-err_close_rq:
- mlx5e_close_rq(&c->rq);
-
err_close_xdp_sq:
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
+err_close_rq:
+ mlx5e_close_rq(&c->rq);
+
err_close_sqs:
mlx5e_close_sqs(c);
@@ -1949,9 +1941,9 @@ err_close_async_icosq_cq:
static void mlx5e_close_queues(struct mlx5e_channel *c)
{
mlx5e_close_xdpsq(&c->xdpsq);
- mlx5e_close_rq(&c->rq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
+ mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
mlx5e_close_icosq(&c->async_icosq);
@@ -1983,9 +1975,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel *c;
unsigned int irq;
int err;
- int eqn;
- err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
+ err = mlx5_vector2irqn(priv->mdev, ix, &irq);
if (err)
return err;
@@ -3384,7 +3375,7 @@ static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool en
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
{
- int err = 0;
+ int err;
int i;
for (i = 0; i < chs->num; i++) {
@@ -3392,6 +3383,8 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
if (err)
return err;
}
+ if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
+ return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
return 0;
}
@@ -3829,6 +3822,24 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
return 0;
}
+static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ features &= ~NETIF_F_HW_TLS_RX;
+ if (netdev->features & NETIF_F_HW_TLS_RX)
+ netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
+
+ features &= ~NETIF_F_HW_TLS_TX;
+ if (netdev->features & NETIF_F_HW_TLS_TX)
+ netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
+
+ features &= ~NETIF_F_NTUPLE;
+ if (netdev->features & NETIF_F_NTUPLE)
+ netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
+
+ return features;
+}
+
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -3860,15 +3871,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
}
- if (mlx5e_is_uplink_rep(priv)) {
- features &= ~NETIF_F_HW_TLS_RX;
- if (netdev->features & NETIF_F_HW_TLS_RX)
- netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
-
- features &= ~NETIF_F_HW_TLS_TX;
- if (netdev->features & NETIF_F_HW_TLS_TX)
- netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
- }
+ if (mlx5e_is_uplink_rep(priv))
+ features = mlx5e_fix_uplink_rep_features(netdev, features);
mutex_unlock(&priv->state_lock);
@@ -4859,6 +4863,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (MLX5_CAP_ETH(mdev, scatter_fcs))
netdev->hw_features |= NETIF_F_RXFCS;
+ if (mlx5_qos_is_supported(mdev))
+ netdev->hw_features |= NETIF_F_HW_TC;
+
netdev->features = netdev->hw_features;
/* Defaults */
@@ -4879,8 +4886,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
- if (mlx5_qos_is_supported(mdev))
- netdev->features |= NETIF_F_HW_TC;
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 629a61e8022f..d273758255c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -452,12 +452,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
static
struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
{
+ struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_priv *priv;
- netdev = __dev_get_by_index(net, ifindex);
+ netdev = dev_get_by_index(net, ifindex);
+ if (!netdev)
+ return ERR_PTR(-ENODEV);
+
priv = netdev_priv(netdev);
- return priv->mdev;
+ mdev = priv->mdev;
+ dev_put(netdev);
+
+ /* Mirred tc action holds a refcount on the ifindex net_device (see
+ * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
+ * after dev_put(netdev), while we're in the context of adding a tc flow.
+ *
+ * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
+ * stored in a hairpin object, which exists until all flows, that refer to it, get
+ * removed.
+ *
+ * On the other hand, after a hairpin object has been created, the peer net_device may
+ * be removed/unbound while there are still some hairpin flows that are using it. This
+ * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
+ * NETDEV_UNREGISTER event of the peer net_device.
+ */
+ return mdev;
}
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
@@ -666,6 +686,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params
func_mdev = priv->mdev;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+ if (IS_ERR(peer_mdev)) {
+ err = PTR_ERR(peer_mdev);
+ goto create_pair_err;
+ }
pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
if (IS_ERR(pair)) {
@@ -804,6 +828,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
int err;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+ if (IS_ERR(peer_mdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
+ return PTR_ERR(peer_mdev);
+ }
+
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 6e074cc457de..605c8ecc3610 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -855,8 +855,8 @@ clean:
return err;
}
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn)
+static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
+ unsigned int *irqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq, *n;
@@ -865,8 +865,10 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
if (i++ == vector) {
- *eqn = eq->core.eqn;
- *irqn = eq->core.irqn;
+ if (irqn)
+ *irqn = eq->core.irqn;
+ if (eqn)
+ *eqn = eq->core.eqn;
err = 0;
break;
}
@@ -874,8 +876,18 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
return err;
}
+
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
+{
+ return vector2eqnirqn(dev, vector, eqn, NULL);
+}
EXPORT_SYMBOL(mlx5_vector2eqn);
+int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
+{
+ return vector2eqnirqn(dev, vector, NULL, irqn);
+}
+
unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
{
return dev->priv.eq_table->num_comp_eqs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index a6e1d4f78268..69a3630818d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -69,7 +69,7 @@ static void
mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
unsigned long val)
{
- struct switchdev_notifier_fdb_info send_info;
+ struct switchdev_notifier_fdb_info send_info = {};
send_info.addr = addr;
send_info.vid = vid;
@@ -579,7 +579,7 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
xa_init(&bridge->vports);
bridge->ifindex = ifindex;
bridge->refcnt = 1;
- bridge->ageing_time = BR_DEFAULT_AGEING_TIME;
+ bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
list_add(&bridge->list, &br_offloads->bridges);
return bridge;
@@ -1006,7 +1006,7 @@ int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswit
if (!vport->bridge)
return -EINVAL;
- vport->bridge->ageing_time = ageing_time;
+ vport->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
index 794012c5c476..d3ad78aa9d45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
@@ -501,6 +501,7 @@ err_sampler:
err_offload_rule:
mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
err_default_tbl:
+ kfree(sample_flow);
return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 48cac5bf606d..d562edf5b0bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -636,7 +636,7 @@ struct esw_vport_tbl_namespace {
};
struct mlx5_vport_tbl_attr {
- u16 chain;
+ u32 chain;
u16 prio;
u16 vport;
const struct esw_vport_tbl_namespace *vport_ns;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 7579f3402776..3bb71a186004 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -48,6 +48,7 @@
#include "lib/fs_chains.h"
#include "en_tc.h"
#include "en/mapping.h"
+#include "devlink.h"
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
@@ -382,10 +383,11 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
{
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
- dest[dest_idx].vport.vhca_id =
- MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+ dest[dest_idx].vport.vhca_id =
+ MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ }
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
if (pkt_reformat) {
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -2367,6 +2369,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR:
+ if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
+ break;
+
if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break;
@@ -2997,12 +3002,19 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
+ if (mlx5_devlink_trap_get_num_active(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change mode while devlink traps are active");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
err = esw_offloads_start(esw, extack);
- else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
err = esw_offloads_stop(esw, extack);
- else
+ } else {
err = -EINVAL;
+ }
unlock:
mlx5_esw_unlock(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index bd66ab2af5b5..d5da4ab65766 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -417,7 +417,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
struct mlx5_wq_param wqp;
struct mlx5_cqe64 *cqe;
int inlen, err, eqn;
- unsigned int irqn;
void *cqc, *in;
__be64 *pas;
u32 i;
@@ -446,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
goto err_cqwq;
}
- err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+ err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
@@ -476,7 +475,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
*conn->cq.mcq.arm_db = 0;
conn->cq.mcq.vector = 0;
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
- conn->cq.mcq.irqn = irqn;
conn->cq.mcq.uar = fdev->conn_res.uar;
tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d7bf0a3e4a52..c0697e1b7118 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1024,17 +1024,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
struct fs_prio *prio)
{
- struct mlx5_flow_table *next_ft;
+ struct mlx5_flow_table *next_ft, *first_ft;
int err = 0;
/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
- if (list_empty(&prio->node.children)) {
+ first_ft = list_first_entry_or_null(&prio->node.children,
+ struct mlx5_flow_table, node.list);
+ if (!first_ft || first_ft->level > ft->level) {
err = connect_prev_fts(dev, ft, prio);
if (err)
return err;
- next_ft = find_next_chained_ft(prio);
+ next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
err = connect_fwd_rules(dev, ft, next_ft);
if (err)
return err;
@@ -2120,7 +2122,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
node.list) == ft))
return 0;
- next_ft = find_next_chained_ft(prio);
+ next_ft = find_next_ft(ft);
err = connect_fwd_rules(dev, next_ft, ft);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9ff163c5bcde..9abeb80ffa31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -626,8 +626,16 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
fw_reporter_ctx.err_synd = health->synd;
fw_reporter_ctx.miss_counter = health->miss_counter;
- devlink_health_report(health->fw_fatal_reporter,
- "FW fatal error reported", &fw_reporter_ctx);
+ if (devlink_health_report(health->fw_fatal_reporter,
+ "FW fatal error reported", &fw_reporter_ctx) == -ECANCELED) {
+ /* If recovery wasn't performed, due to grace period,
+ * unload the driver. This ensures that the driver
+ * closes all its resources and it is not subjected to
+ * requests from the kernel.
+ */
+ mlx5_core_err(dev, "Driver is in error state. Unloading\n");
+ mlx5_unload_one(dev);
+ }
}
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 624cedebb510..d3d628b862f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -104,4 +104,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
#endif
+int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index eb1b316560a8..c84ad87c99bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1784,16 +1784,14 @@ static int __init init(void)
if (err)
goto err_sf;
-#ifdef CONFIG_MLX5_CORE_EN
err = mlx5e_init();
- if (err) {
- pci_unregister_driver(&mlx5_core_driver);
- goto err_debug;
- }
-#endif
+ if (err)
+ goto err_en;
return 0;
+err_en:
+ mlx5_sf_driver_unregister();
err_sf:
pci_unregister_driver(&mlx5_core_driver);
err_debug:
@@ -1803,9 +1801,7 @@ err_debug:
static void __exit cleanup(void)
{
-#ifdef CONFIG_MLX5_CORE_EN
mlx5e_cleanup();
-#endif
mlx5_sf_driver_unregister();
pci_unregister_driver(&mlx5_core_driver);
mlx5_unregister_debugfs();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 343807ac2036..da365b8f0141 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -206,8 +206,13 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
int mlx5_fw_version_query(struct mlx5_core_dev *dev,
u32 *running_ver, u32 *stored_ver);
+#ifdef CONFIG_MLX5_CORE_EN
int mlx5e_init(void);
void mlx5e_cleanup(void);
+#else
+static inline int mlx5e_init(void){ return 0; }
+static inline void mlx5e_cleanup(void){}
+#endif
static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index b25f764daa08..3465b363fc2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -214,6 +214,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
err = -ENOMEM;
goto err_cpumask;
}
+ irq->pool = pool;
kref_init(&irq->kref);
irq->index = i;
err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
@@ -222,7 +223,6 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
irq->index, err);
goto err_xa;
}
- irq->pool = pool;
return irq;
err_xa:
free_cpumask_var(irq->mask);
@@ -251,8 +251,11 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
{
+ int err = 0;
+
+ err = atomic_notifier_chain_unregister(&irq->nh, nb);
irq_put(irq);
- return atomic_notifier_chain_unregister(&irq->nh, nb);
+ return err;
}
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
@@ -437,6 +440,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
if (!pool)
return ERR_PTR(-ENOMEM);
pool->dev = dev;
+ mutex_init(&pool->lock);
xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
pool->xa_num_irqs.min = start;
pool->xa_num_irqs.max = start + size - 1;
@@ -445,7 +449,6 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
name);
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
- mutex_init(&pool->lock);
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
name, size, start);
return pool;
@@ -459,6 +462,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
xa_for_each(&pool->irqs, index, irq)
irq_release(&irq->kref);
xa_destroy(&pool->irqs);
+ mutex_destroy(&pool->lock);
kvfree(pool);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 12cf323a5943..9df0e73d1c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -749,7 +749,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_cqe64 *cqe;
struct mlx5dr_cq *cq;
int inlen, err, eqn;
- unsigned int irqn;
void *cqc, *in;
__be64 *pas;
int vector;
@@ -782,7 +781,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
goto err_cqwq;
vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
- err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
+ err = mlx5_vector2eqn(mdev, vector, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
@@ -818,7 +817,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
*cq->mcq.arm_db = cpu_to_be32(2 << 28);
cq->mcq.vector = 0;
- cq->mcq.irqn = irqn;
cq->mcq.uar = uar;
return cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index f1950e4968da..e4dd4eed5aee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -352,6 +352,7 @@ static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
{
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
DR_STE_TUNL_ACTION_DECAP);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
}
static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
@@ -365,6 +366,7 @@ static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
DR_STE_TUNL_ACTION_L3_DECAP);
MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
}
static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7e221ef01437..f69cbb3852d5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -9079,7 +9079,7 @@ mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
struct net_device *dev;
dev = br_fdb_find_port(rif->dev, mac, 0);
@@ -9127,8 +9127,8 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
{
+ struct switchdev_notifier_fdb_info info = {};
u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
- struct switchdev_notifier_fdb_info info;
struct net_device *br_dev;
struct net_device *dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index c5ef9aa64efe..8f90cd323d5f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2508,7 +2508,7 @@ mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
const char *mac, u16 vid,
struct net_device *dev, bool offloaded)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
info.addr = mac;
info.vid = vid;
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index a80419d8d4b5..7bdbb2d09a14 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -2,6 +2,8 @@ config SPARX5_SWITCH
tristate "Sparx5 switch driver"
depends on NET_SWITCHDEV
depends on HAS_IOMEM
+ depends on OF
+ depends on ARCH_SPARX5 || COMPILE_TEST
select PHYLINK
select PHY_SPARX5_SERDES
select RESET_CONTROLLER
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index 0443f66b5550..9a8e4f201eb1 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -277,7 +277,7 @@ static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
const char *mac, u16 vid,
struct net_device *dev, bool offloaded)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
info.addr = mac;
info.vid = vid;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index 9d485a9d1f1f..cb68eaaac881 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -13,19 +13,26 @@
*/
#define VSTAX 73
-static void ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
+#define ifh_encode_bitfield(ifh, value, pos, _width) \
+ ({ \
+ u32 width = (_width); \
+ \
+ /* Max width is 5 bytes - 40 bits. In worst case this will
+ * spread over 6 bytes - 48 bits
+ */ \
+ compiletime_assert(width <= 40, \
+ "Unsupported width, must be <= 40"); \
+ __ifh_encode_bitfield((ifh), (value), (pos), width); \
+ })
+
+static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
{
u8 *ifh_hdr = ifh;
/* Calculate the Start IFH byte position of this IFH bit position */
u32 byte = (35 - (pos / 8));
/* Calculate the Start bit position in the Start IFH byte */
u32 bit = (pos % 8);
- u64 encode = GENMASK(bit + width - 1, bit) & (value << bit);
-
- /* Max width is 5 bytes - 40 bits. In worst case this will
- * spread over 6 bytes - 48 bits
- */
- compiletime_assert(width <= 40, "Unsupported width, must be <= 40");
+ u64 encode = GENMASK_ULL(bit + width - 1, bit) & (value << bit);
/* The b0-b7 goes into the start IFH byte */
if (encode & 0xFF)
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 5249b64f4fc5..49def6934cad 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -540,10 +540,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
ret = register_netdev(ndev);
- if (ret) {
- free_netdev(ndev);
+ if (ret)
goto init_fail;
- }
netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
__func__, ndev->irq, ndev->dev_addr);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index adfb9781799e..2948d731a1c1 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1334,6 +1334,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
struct net_device *bond = ocelot_port->bond;
mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+ mask |= cpu_fwd_mask;
mask &= ~BIT(port);
if (bond) {
mask &= ~ocelot_get_bond_mask(ocelot, bond,
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index ea4e83410fe4..7390fa3980ec 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -21,7 +21,7 @@ u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
ocelot->map[target][reg & REG_MASK] + offset, &val);
return val;
}
-EXPORT_SYMBOL(__ocelot_read_ix);
+EXPORT_SYMBOL_GPL(__ocelot_read_ix);
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
{
@@ -32,7 +32,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
regmap_write(ocelot->targets[target],
ocelot->map[target][reg & REG_MASK] + offset, val);
}
-EXPORT_SYMBOL(__ocelot_write_ix);
+EXPORT_SYMBOL_GPL(__ocelot_write_ix);
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
u32 offset)
@@ -45,7 +45,7 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
ocelot->map[target][reg & REG_MASK] + offset,
mask, val);
}
-EXPORT_SYMBOL(__ocelot_rmw_ix);
+EXPORT_SYMBOL_GPL(__ocelot_rmw_ix);
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
{
@@ -58,7 +58,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
return val;
}
-EXPORT_SYMBOL(ocelot_port_readl);
+EXPORT_SYMBOL_GPL(ocelot_port_readl);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
{
@@ -69,7 +69,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
}
-EXPORT_SYMBOL(ocelot_port_writel);
+EXPORT_SYMBOL_GPL(ocelot_port_writel);
void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
{
@@ -77,7 +77,7 @@ void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
ocelot_port_writel(port, (cur & (~mask)) | val, reg);
}
-EXPORT_SYMBOL(ocelot_port_rmwl);
+EXPORT_SYMBOL_GPL(ocelot_port_rmwl);
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 reg, u32 offset)
@@ -128,7 +128,7 @@ int ocelot_regfields_init(struct ocelot *ocelot,
return 0;
}
-EXPORT_SYMBOL(ocelot_regfields_init);
+EXPORT_SYMBOL_GPL(ocelot_regfields_init);
static struct regmap_config ocelot_regmap_config = {
.reg_bits = 32,
@@ -148,4 +148,4 @@ struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
}
-EXPORT_SYMBOL(ocelot_regmap_init);
+EXPORT_SYMBOL_GPL(ocelot_regmap_init);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 3e89e34f86d5..e9d260d84bf3 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1298,6 +1298,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev,
}
static int ocelot_netdevice_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack;
@@ -1307,11 +1308,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
- err = ocelot_netdevice_bridge_join(dev, dev,
+ err = ocelot_netdevice_bridge_join(dev, brport_dev,
info->upper_dev,
extack);
else
- err = ocelot_netdevice_bridge_leave(dev, dev,
+ err = ocelot_netdevice_bridge_leave(dev, brport_dev,
info->upper_dev);
}
if (netif_is_lag_master(info->upper_dev)) {
@@ -1346,7 +1347,7 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev,
if (ocelot_port->bond != dev)
return NOTIFY_OK;
- err = ocelot_netdevice_changeupper(lower, info);
+ err = ocelot_netdevice_changeupper(lower, dev, info);
if (err)
return notifier_from_errno(err);
}
@@ -1385,7 +1386,7 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct netdev_notifier_changeupper_info *info = ptr;
if (ocelot_netdevice_dev_check(dev))
- return ocelot_netdevice_changeupper(dev, info);
+ return ocelot_netdevice_changeupper(dev, dev, info);
if (netif_is_lag_master(dev))
return ocelot_netdevice_lag_changeupper(dev, info);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 51b4b25d15ad..84f7dbe9edff 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -819,7 +819,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
printk(version);
#endif
- i = pci_enable_device(pdev);
+ i = pcim_enable_device(pdev);
if (i) return i;
/* natsemi has a non-standard PM control register
@@ -852,7 +852,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
ioaddr = ioremap(iostart, iosize);
if (!ioaddr) {
i = -ENOMEM;
- goto err_ioremap;
+ goto err_pci_request_regions;
}
/* Work around the dropped serial bit. */
@@ -974,9 +974,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
err_register_netdev:
iounmap(ioaddr);
- err_ioremap:
- pci_release_regions(pdev);
-
err_pci_request_regions:
free_netdev(dev);
return i;
@@ -3241,7 +3238,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
unregister_netdev (dev);
- pci_release_regions (pdev);
iounmap(ioaddr);
free_netdev (dev);
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 82eef4c72f01..7abd13e69471 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3512,13 +3512,13 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
kfree(vdev->vpaths);
- /* we are safe to free it now */
- free_netdev(dev);
-
vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
buf);
vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
__func__, __LINE__);
+
+ /* we are safe to free it now */
+ free_netdev(dev);
}
/*
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 273d529d43c2..062bb2db68bf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1141,20 +1141,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
nfp_fl_ct_clean_flow_entry(ct_entry);
kfree(ct_map_ent);
- /* If this is the last pre_ct_rule it means that it is
- * very likely that the nft table will be cleaned up next,
- * as this happens on the removal of the last act_ct flow.
- * However we cannot deregister the callback on the removal
- * of the last nft flow as this runs into a deadlock situation.
- * So deregister the callback on removal of the last pre_ct flow
- * and remove any remaining nft flow entries. We also cannot
- * save this state and delete the callback later since the
- * nft table would already have been freed at that time.
- */
if (!zt->pre_ct_count) {
- nf_flow_table_offload_del_cb(zt->nft,
- nfp_fl_ct_handle_nft_flow,
- zt);
zt->nft = NULL;
nfp_fl_ct_clean_nft_entries(zt);
}
@@ -1172,6 +1159,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
nfp_ct_map_params);
nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);
kfree(ct_map_ent);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 1b482446536d..8803faadd302 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -286,6 +286,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
/* Init to unknowns */
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index af3a5368529c..e795fa63ca12 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -29,7 +29,7 @@ static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
*/
};
-static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
+static void ionic_lif_rx_mode(struct ionic_lif *lif);
static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
static void ionic_link_status_check(struct ionic_lif *lif);
@@ -53,7 +53,19 @@ static void ionic_dim_work(struct work_struct *work)
cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
qcq = container_of(dim, struct ionic_qcq, dim);
new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
- qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
+ new_coal = new_coal ? new_coal : 1;
+
+ if (qcq->intr.dim_coal_hw != new_coal) {
+ unsigned int qi = qcq->cq.bound_q->index;
+ struct ionic_lif *lif = qcq->q.lif;
+
+ qcq->intr.dim_coal_hw = new_coal;
+
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->rxqcqs[qi]->intr.index,
+ qcq->intr.dim_coal_hw);
+ }
+
dim->state = DIM_START_MEASURE;
}
@@ -77,7 +89,7 @@ static void ionic_lif_deferred_work(struct work_struct *work)
switch (w->type) {
case IONIC_DW_TYPE_RX_MODE:
- ionic_lif_rx_mode(lif, w->rx_mode);
+ ionic_lif_rx_mode(lif);
break;
case IONIC_DW_TYPE_RX_ADDR_ADD:
ionic_lif_addr_add(lif, w->addr);
@@ -1301,10 +1313,8 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
return 0;
}
-static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
- bool can_sleep)
+static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
{
- struct ionic_deferred_work *work;
unsigned int nmfilters;
unsigned int nufilters;
@@ -1330,97 +1340,46 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
lif->nucast--;
}
- if (!can_sleep) {
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
- return -ENOMEM;
- work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
- IONIC_DW_TYPE_RX_ADDR_DEL;
- memcpy(work->addr, addr, ETH_ALEN);
- netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
- add ? "add" : "del", addr);
- ionic_lif_deferred_enqueue(&lif->deferred, work);
- } else {
- netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
- add ? "add" : "del", addr);
- if (add)
- return ionic_lif_addr_add(lif, addr);
- else
- return ionic_lif_addr_del(lif, addr);
- }
+ netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
+ add ? "add" : "del", addr);
+ if (add)
+ return ionic_lif_addr_add(lif, addr);
+ else
+ return ionic_lif_addr_del(lif, addr);
return 0;
}
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP);
-}
-
-static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
-{
- return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP);
+ return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR);
}
static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP);
+ return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR);
}
-static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
+static void ionic_lif_rx_mode(struct ionic_lif *lif)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP);
-}
-
-static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_mode_set = {
- .opcode = IONIC_CMD_RX_MODE_SET,
- .lif_index = cpu_to_le16(lif->index),
- .rx_mode = cpu_to_le16(rx_mode),
- },
- };
+ struct net_device *netdev = lif->netdev;
+ unsigned int nfilters;
+ unsigned int nd_flags;
char buf[128];
- int err;
+ u16 rx_mode;
int i;
#define REMAIN(__x) (sizeof(buf) - (__x))
- i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
- lif->rx_mode, rx_mode);
- if (rx_mode & IONIC_RX_MODE_F_UNICAST)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
- if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
- if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
- if (rx_mode & IONIC_RX_MODE_F_PROMISC)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
- if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
- i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
- netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
-
- err = ionic_adminq_post_wait(lif, &ctx);
- if (err)
- netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
- rx_mode, err);
- else
- lif->rx_mode = rx_mode;
-}
+ mutex_lock(&lif->config_lock);
-static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_deferred_work *work;
- unsigned int nfilters;
- unsigned int rx_mode;
+ /* grab the flags once for local use */
+ nd_flags = netdev->flags;
rx_mode = IONIC_RX_MODE_F_UNICAST;
- rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
- rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
- rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
- rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
+ rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
+ rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
+ rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
+ rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
/* sync unicast addresses
* next check to see if we're in an overflow state
@@ -1429,49 +1388,83 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
* we remove our overflow flag and check the netdev flags
* to see if we can disable NIC PROMISC
*/
- if (can_sleep)
- __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
- else
- __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
+ __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
if (netdev_uc_count(netdev) + 1 > nfilters) {
rx_mode |= IONIC_RX_MODE_F_PROMISC;
lif->uc_overflow = true;
} else if (lif->uc_overflow) {
lif->uc_overflow = false;
- if (!(netdev->flags & IFF_PROMISC))
+ if (!(nd_flags & IFF_PROMISC))
rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
}
/* same for multicast */
- if (can_sleep)
- __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
- else
- __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
+ __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
if (netdev_mc_count(netdev) > nfilters) {
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
lif->mc_overflow = true;
} else if (lif->mc_overflow) {
lif->mc_overflow = false;
- if (!(netdev->flags & IFF_ALLMULTI))
+ if (!(nd_flags & IFF_ALLMULTI))
rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
}
+ i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
+ lif->rx_mode, rx_mode);
+ if (rx_mode & IONIC_RX_MODE_F_UNICAST)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
+ if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
+ if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
+ if (rx_mode & IONIC_RX_MODE_F_PROMISC)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
+ if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
+ if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
+ i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
+ netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
+
if (lif->rx_mode != rx_mode) {
- if (!can_sleep) {
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- netdev_err(lif->netdev, "rxmode change dropped\n");
- return;
- }
- work->type = IONIC_DW_TYPE_RX_MODE;
- work->rx_mode = rx_mode;
- netdev_dbg(lif->netdev, "deferred: rx_mode\n");
- ionic_lif_deferred_enqueue(&lif->deferred, work);
- } else {
- ionic_lif_rx_mode(lif, rx_mode);
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_mode_set = {
+ .opcode = IONIC_CMD_RX_MODE_SET,
+ .lif_index = cpu_to_le16(lif->index),
+ },
+ };
+ int err;
+
+ ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
+ rx_mode, err);
+ else
+ lif->rx_mode = rx_mode;
+ }
+
+ mutex_unlock(&lif->config_lock);
+}
+
+static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_deferred_work *work;
+
+ if (!can_sleep) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "rxmode change dropped\n");
+ return;
}
+ work->type = IONIC_DW_TYPE_RX_MODE;
+ netdev_dbg(lif->netdev, "deferred: rx_mode\n");
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ } else {
+ ionic_lif_rx_mode(lif);
}
}
@@ -3058,6 +3051,7 @@ void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
+ mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
ionic_lif_reset(lif);
}
@@ -3185,7 +3179,7 @@ static int ionic_station_set(struct ionic_lif *lif)
*/
if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
netdev->dev_addr))
- ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
+ ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
} else {
/* Update the netdev mac with the device's mac */
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
@@ -3202,7 +3196,7 @@ static int ionic_station_set(struct ionic_lif *lif)
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
+ ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
return 0;
}
@@ -3225,6 +3219,7 @@ int ionic_lif_init(struct ionic_lif *lif)
lif->hw_index = le16_to_cpu(comp.hw_index);
mutex_init(&lif->queue_lock);
+ mutex_init(&lif->config_lock);
/* now that we have the hw_index we can figure out our doorbell page */
lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 346506f01715..69ab59fedb6c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -108,7 +108,6 @@ struct ionic_deferred_work {
struct list_head list;
enum ionic_deferred_work_type type;
union {
- unsigned int rx_mode;
u8 addr[ETH_ALEN];
u8 fw_status;
};
@@ -179,6 +178,7 @@ struct ionic_lif {
unsigned int index;
unsigned int hw_index;
struct mutex queue_lock; /* lock for queue structures */
+ struct mutex config_lock; /* lock for config actions */
spinlock_t adminq_lock; /* lock for AdminQ operations */
struct ionic_qcq *adminqcq;
struct ionic_qcq *notifyqcq;
@@ -199,7 +199,7 @@ struct ionic_lif {
unsigned int nrxq_descs;
u32 rx_copybreak;
u64 rxq_features;
- unsigned int rx_mode;
+ u16 rx_mode;
u64 hw_features;
bool registered;
bool mc_overflow;
@@ -302,7 +302,7 @@ int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
int ionic_lif_size(struct ionic *ionic);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
-int ionic_lif_hwstamp_replay(struct ionic_lif *lif);
+void ionic_lif_hwstamp_replay(struct ionic_lif *lif);
int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr);
int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr);
ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter);
@@ -311,10 +311,7 @@ void ionic_lif_unregister_phc(struct ionic_lif *lif);
void ionic_lif_alloc_phc(struct ionic_lif *lif);
void ionic_lif_free_phc(struct ionic_lif *lif);
#else
-static inline int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
-{
- return -EOPNOTSUPP;
-}
+static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {}
static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index a87c87e86aef..6e2403c71608 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -188,6 +188,9 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
+ if (!lif->phc || !lif->phc->ptp)
+ return -EOPNOTSUPP;
+
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
@@ -203,15 +206,16 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
return 0;
}
-int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
+void ionic_lif_hwstamp_replay(struct ionic_lif *lif)
{
int err;
+ if (!lif->phc || !lif->phc->ptp)
+ return;
+
err = ionic_lif_hwstamp_set_ts_config(lif, NULL);
if (err)
netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err);
-
- return err;
}
int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 08934888575c..08870190e4d2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -274,12 +274,11 @@ static void ionic_rx_clean(struct ionic_queue *q,
}
}
- if (likely(netdev->features & NETIF_F_RXCSUM)) {
- if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
- stats->csum_complete++;
- }
+ if (likely(netdev->features & NETIF_F_RXCSUM) &&
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
+ stats->csum_complete++;
} else {
stats->csum_none++;
}
@@ -451,11 +450,12 @@ void ionic_rx_empty(struct ionic_queue *q)
q->tail_idx = 0;
}
-static void ionic_dim_update(struct ionic_qcq *qcq)
+static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
{
struct dim_sample dim_sample;
struct ionic_lif *lif;
unsigned int qi;
+ u64 pkts, bytes;
if (!qcq->intr.dim_coal_hw)
return;
@@ -463,14 +463,23 @@ static void ionic_dim_update(struct ionic_qcq *qcq)
lif = qcq->q.lif;
qi = qcq->cq.bound_q->index;
- ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->rxqcqs[qi]->intr.index,
- qcq->intr.dim_coal_hw);
+ switch (napi_mode) {
+ case IONIC_LIF_F_TX_DIM_INTR:
+ pkts = lif->txqstats[qi].pkts;
+ bytes = lif->txqstats[qi].bytes;
+ break;
+ case IONIC_LIF_F_RX_DIM_INTR:
+ pkts = lif->rxqstats[qi].pkts;
+ bytes = lif->rxqstats[qi].bytes;
+ break;
+ default:
+ pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
+ bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
+ break;
+ }
dim_update_sample(qcq->cq.bound_intr->rearm_count,
- lif->txqstats[qi].pkts,
- lif->txqstats[qi].bytes,
- &dim_sample);
+ pkts, bytes, &dim_sample);
net_dim(&qcq->dim, dim_sample);
}
@@ -491,7 +500,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
ionic_tx_service, NULL, NULL);
if (work_done < budget && napi_complete_done(napi, work_done)) {
- ionic_dim_update(qcq);
+ ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
cq->bound_intr->rearm_count++;
}
@@ -530,7 +539,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(cq->bound_q);
if (work_done < budget && napi_complete_done(napi, work_done)) {
- ionic_dim_update(qcq);
+ ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
cq->bound_intr->rearm_count++;
}
@@ -576,7 +585,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
- ionic_dim_update(qcq);
+ ionic_dim_update(qcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
rxcq->bound_intr->rearm_count++;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 02a4610d9330..c46a7f756ed5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -327,6 +327,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
unsigned long flags;
int rc = -EINVAL;
+ if (!p_ll2_conn)
+ return rc;
+
spin_lock_irqsave(&p_tx->lock, flags);
if (p_tx->b_completing_packet) {
rc = -EBUSY;
@@ -500,7 +503,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
unsigned long flags = 0;
int rc = 0;
+ if (!p_ll2_conn)
+ return rc;
+
spin_lock_irqsave(&p_rx->lock, flags);
+
+ if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return 0;
+ }
+
cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
@@ -821,6 +833,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
int rc;
+ if (!p_ll2_conn)
+ return 0;
+
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0;
@@ -844,6 +859,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
u16 new_idx = 0, num_bds = 0;
int rc;
+ if (!p_ll2_conn)
+ return 0;
+
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
@@ -1728,6 +1746,8 @@ int qed_ll2_post_rx_buffer(void *cxt,
if (!p_ll2_conn)
return -EINVAL;
p_rx = &p_ll2_conn->rx_queue;
+ if (!p_rx->set_prod_addr)
+ return -EIO;
spin_lock_irqsave(&p_rx->lock, flags);
if (!list_empty(&p_rx->free_descq))
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index da864d12916b..4f4b79250a2b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1285,8 +1285,7 @@ qed_rdma_create_qp(void *rdma_cxt,
if (!rdma_cxt || !in_params || !out_params ||
!p_hwfn->p_rdma_info->active) {
- DP_ERR(p_hwfn->cdev,
- "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+ pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
rdma_cxt, in_params, out_params);
return NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 2e62a2c4eb63..5630008f38b7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -501,6 +501,7 @@ struct qede_fastpath {
#define QEDE_SP_HW_ERR 4
#define QEDE_SP_ARFS_CONFIG 5
#define QEDE_SP_AER 7
+#define QEDE_SP_DISABLE 8
#ifdef CONFIG_RFS_ACCEL
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index c59b72c90293..a2e4dfb5cb44 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -831,7 +831,7 @@ int qede_configure_vlan_filters(struct qede_dev *edev)
int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct qede_dev *edev = netdev_priv(dev);
- struct qede_vlan *vlan = NULL;
+ struct qede_vlan *vlan;
int rc = 0;
DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
@@ -842,7 +842,7 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
if (vlan->vid == vid)
break;
- if (!vlan || (vlan->vid != vid)) {
+ if (list_entry_is_head(vlan, &edev->vlan_list, list)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Vlan isn't configured\n");
goto out;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 01ac1e93d27a..7c6064baeba2 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1009,6 +1009,13 @@ static void qede_sp_task(struct work_struct *work)
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
+ /* Disable execution of this deferred work once
+ * qede removal is in progress, this stop any future
+ * scheduling of sp_task.
+ */
+ if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
+ return;
+
/* The locking scheme depends on the specific flag:
* In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
* ensure that ongoing flows are ended and new ones are not started.
@@ -1300,6 +1307,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
if (mode != QEDE_REMOVE_RECOVERY) {
+ set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 2376b2729633..c00ad57575ea 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -154,7 +154,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
"driver lock acquired\n");
return 1;
}
- ssleep(1);
+ mdelay(1000);
} while (++i < 10);
netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
@@ -3274,7 +3274,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
if ((value & ISP_CONTROL_SR) == 0)
break;
- ssleep(1);
+ mdelay(1000);
} while ((--max_wait_time));
/*
@@ -3310,7 +3310,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
ispControlStatus);
if ((value & ISP_CONTROL_FSR) == 0)
break;
- ssleep(1);
+ mdelay(1000);
} while ((--max_wait_time));
}
if (max_wait_time == 0)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index d8882d0b6b49..d51bac7ba5af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3156,8 +3156,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
ret = QLCRD32(adapter, indirect_addr, &err);
- if (err == -EIO)
+ if (err == -EIO) {
+ qlcnic_83xx_unlock_flash(adapter);
return err;
+ }
word = ret;
*(u32 *)p_data = word;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 8543bf3c3484..ad655f0a4965 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -735,12 +735,13 @@ static int emac_remove(struct platform_device *pdev)
put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
- free_netdev(netdev);
if (adpt->phy.digital)
iounmap(adpt->phy.digital);
iounmap(adpt->phy.base);
+ free_netdev(netdev);
+
return 0;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index f744557c33a3..4d8e337f5085 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -3502,12 +3502,16 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
+ /* The default value is 0x13. Change it to 0x2f */
+ rtl_csi_access_enable(tp, 0x2f);
+
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
/* disable EEE */
rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
rtl_pcie_state_l2l3_disable(tp);
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
@@ -5084,7 +5088,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_MAC_INTERRUPT;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
+ pci_domain_nr(pdev->bus), pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 86a1eb0634e8..80e62ca2e3d3 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -864,7 +864,7 @@ enum GECMR_BIT {
/* The Ethernet AVB descriptor definitions. */
struct ravb_desc {
- __le16 ds; /* Descriptor size */
+ __le16 ds; /* Descriptor size */
u8 cc; /* Content control MSBs (reserved) */
u8 die_dt; /* Descriptor interrupt enable and type */
__le32 dptr; /* Descriptor pointer */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 69c50f81e1cb..805397088850 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -920,7 +920,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
if (ravb_rx(ndev, &quota, q))
goto out;
- /* Processing RX Descriptor Ring */
+ /* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index a46633606cae..1f06b92ee5bb 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2715,7 +2715,7 @@ static void
rocker_fdb_offload_notify(struct rocker_port *rocker_port,
struct switchdev_notifier_fdb_info *recv_info)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
info.addr = recv_info->addr;
info.vid = recv_info->vid;
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 967a634ee9ac..e33a9d283a4e 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1822,7 +1822,7 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work)
container_of(work, struct ofdpa_fdb_learn_work, work);
bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
info.addr = lw->addr;
info.vid = lw->vid;
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index a3ca406a3561..e5b0d795c301 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -152,6 +152,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
* maximum size.
*/
tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
+ tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
n_xdp_tx = num_possible_cpus();
n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
@@ -169,6 +170,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
n_xdp_ev, n_channels, max_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT will not work on this interface");
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
@@ -176,12 +179,14 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
n_xdp_tx, n_channels, efx->max_vis);
+ netif_err(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT will not work on this interface");
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
} else {
efx->n_xdp_channels = n_xdp_ev;
- efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
+ efx->xdp_tx_per_channel = tx_per_ev;
efx->xdp_tx_queue_count = n_xdp_tx;
n_channels += n_xdp_ev;
netif_dbg(efx, drv, efx->net_dev,
@@ -891,18 +896,20 @@ int efx_set_channels(struct efx_nic *efx)
if (efx_channel_is_xdp_tx(channel)) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue = next_queue++;
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
- channel->channel, tx_queue->label,
- xdp_queue_number, tx_queue->queue);
+
/* We may have a few left-over XDP TX
* queues owing to xdp_tx_queue_count
* not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
* We still allocate and probe those
* TXQs, but never use them.
*/
- if (xdp_queue_number < efx->xdp_tx_queue_count)
+ if (xdp_queue_number < efx->xdp_tx_queue_count) {
+ netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+ channel->channel, tx_queue->label,
+ xdp_queue_number, tx_queue->queue);
efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
- xdp_queue_number++;
+ xdp_queue_number++;
+ }
}
} else {
efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -914,8 +921,7 @@ int efx_set_channels(struct efx_nic *efx)
}
}
}
- if (xdp_queue_number)
- efx->xdp_tx_queue_count = xdp_queue_number;
+ WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index ca9c00b7f588..cff87de9178a 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -443,7 +443,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
#endif
/* setup various bits in PCI command register */
- ret = pci_enable_device(pci_dev);
+ ret = pcim_enable_device(pci_dev);
if(ret) return ret;
i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
@@ -469,7 +469,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
ioaddr = pci_iomap(pci_dev, 0, 0);
if (!ioaddr) {
ret = -ENOMEM;
- goto err_out_cleardev;
+ goto err_out;
}
sis_priv = netdev_priv(net_dev);
@@ -581,8 +581,6 @@ err_unmap_tx:
sis_priv->tx_ring_dma);
err_out_unmap:
pci_iounmap(pci_dev, ioaddr);
-err_out_cleardev:
- pci_release_regions(pci_dev);
err_out:
free_netdev(net_dev);
return ret;
@@ -2499,7 +2497,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
sis_priv->tx_ring_dma);
pci_iounmap(pci_dev, sis_priv->ioaddr);
free_netdev(net_dev);
- pci_release_regions(pci_dev);
}
static int __maybe_unused sis900_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index e108b0d2bd28..4c9a37dd0d3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -49,9 +49,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
{
struct plat_stmmacenet_data *plat;
struct stmmac_resources res;
- bool mdio = false;
- int ret, i;
struct device_node *np;
+ int ret, i, phy_mode;
+ bool mdio = false;
np = dev_of_node(&pdev->dev);
@@ -108,10 +108,11 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (plat->bus_id < 0)
plat->bus_id = pci_dev_id(pdev);
- plat->phy_interface = device_get_phy_mode(&pdev->dev);
- if (plat->phy_interface < 0)
+ phy_mode = device_get_phy_mode(&pdev->dev);
+ if (phy_mode < 0)
dev_err(&pdev->dev, "phy_mode not found\n");
+ plat->phy_interface = phy_mode;
plat->interface = PHY_INTERFACE_MODE_GMII;
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 67ba083eb90c..b21745368983 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -1249,6 +1249,7 @@ const struct stmmac_ops dwmac410_ops = {
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
+ .est_irq_status = dwmac5_est_irq_status,
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
@@ -1300,6 +1301,7 @@ const struct stmmac_ops dwmac510_ops = {
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
+ .est_irq_status = dwmac5_est_irq_status,
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index e735134e8487..fcdb1d20389b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -349,6 +349,9 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue);
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags);
+struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
+ ktime_t current_time,
+ u64 cycle_time);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
void stmmac_selftest_run(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8d9d6ecf8c63..7b8404a21544 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -7171,6 +7171,7 @@ int stmmac_suspend(struct device *dev)
priv->plat->rx_queues_to_use, false);
stmmac_fpe_handshake(priv, false);
+ stmmac_fpe_stop_wq(priv);
}
priv->speed = SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 072eff8079d0..5ca710844cc1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -397,6 +397,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ int phy_mode;
void *ret;
int rc;
@@ -412,10 +413,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
eth_zero_addr(mac);
}
- plat->phy_interface = device_get_phy_mode(&pdev->dev);
- if (plat->phy_interface < 0)
- return ERR_PTR(plat->phy_interface);
+ phy_mode = device_get_phy_mode(&pdev->dev);
+ if (phy_mode < 0)
+ return ERR_PTR(phy_mode);
+ plat->phy_interface = phy_mode;
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
plat->interface = plat->phy_interface;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 4e86cdf2bc9f..580cc035536b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -62,7 +62,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
u32 sec, nsec;
u32 quotient, reminder;
int neg_adj = 0;
- bool xmac;
+ bool xmac, est_rst = false;
+ int ret;
xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
@@ -75,10 +76,48 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
sec = quotient;
nsec = reminder;
+ /* If EST is enabled, disabled it before adjust ptp time. */
+ if (priv->plat->est && priv->plat->est->enable) {
+ est_rst = true;
+ mutex_lock(&priv->plat->est->lock);
+ priv->plat->est->enable = false;
+ stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
+ }
+
spin_lock_irqsave(&priv->ptp_lock, flags);
stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ /* Caculate new basetime and re-configured EST after PTP time adjust. */
+ if (est_rst) {
+ struct timespec64 current_time, time;
+ ktime_t current_time_ns, basetime;
+ u64 cycle_time;
+
+ mutex_lock(&priv->plat->est->lock);
+ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ current_time_ns = timespec64_to_ktime(current_time);
+ time.tv_nsec = priv->plat->est->btr_reserve[0];
+ time.tv_sec = priv->plat->est->btr_reserve[1];
+ basetime = timespec64_to_ktime(time);
+ cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
+ priv->plat->est->ctr[0];
+ time = stmmac_calc_tas_basetime(basetime,
+ current_time_ns,
+ cycle_time);
+
+ priv->plat->est->btr[0] = (u32)time.tv_nsec;
+ priv->plat->est->btr[1] = (u32)time.tv_sec;
+ priv->plat->est->enable = true;
+ ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
+ if (ret)
+ netdev_err(priv->dev, "failed to configure EST\n");
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 92dab609d4f8..4f3b6437b114 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -711,12 +711,35 @@ static int tc_setup_cls(struct stmmac_priv *priv,
return ret;
}
+struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
+ ktime_t current_time,
+ u64 cycle_time)
+{
+ struct timespec64 time;
+
+ if (ktime_after(old_base_time, current_time)) {
+ time = ktime_to_timespec64(old_base_time);
+ } else {
+ s64 n;
+ ktime_t base_time;
+
+ n = div64_s64(ktime_sub_ns(current_time, old_base_time),
+ cycle_time);
+ base_time = ktime_add_ns(old_base_time,
+ (n + 1) * cycle_time);
+
+ time = ktime_to_timespec64(base_time);
+ }
+
+ return time;
+}
+
static int tc_setup_taprio(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
- struct timespec64 time, current_time;
+ struct timespec64 time, current_time, qopt_time;
ktime_t current_time_ns;
bool fpe = false;
int i, ret = 0;
@@ -773,14 +796,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
GFP_KERNEL);
if (!plat->est)
return -ENOMEM;
+
+ mutex_init(&priv->plat->est->lock);
} else {
memset(plat->est, 0, sizeof(*plat->est));
}
size = qopt->num_entries;
+ mutex_lock(&priv->plat->est->lock);
priv->plat->est->gcl_size = size;
priv->plat->est->enable = qopt->enable;
+ mutex_unlock(&priv->plat->est->lock);
for (i = 0; i < size; i++) {
s64 delta_ns = qopt->entries[i].interval;
@@ -811,32 +838,28 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->gcl[i] = delta_ns | (gates << wid);
}
+ mutex_lock(&priv->plat->est->lock);
/* Adjust for real system time */
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
- if (ktime_after(qopt->base_time, current_time_ns)) {
- time = ktime_to_timespec64(qopt->base_time);
- } else {
- ktime_t base_time;
- s64 n;
-
- n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
- qopt->cycle_time);
- base_time = ktime_add_ns(qopt->base_time,
- (n + 1) * qopt->cycle_time);
-
- time = ktime_to_timespec64(base_time);
- }
+ time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
+ qopt->cycle_time);
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
+ qopt_time = ktime_to_timespec64(qopt->base_time);
+ priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
+ priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
+
ctr = qopt->cycle_time;
priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
priv->plat->est->ctr[1] = (u32)ctr;
- if (fpe && !priv->dma_cap.fpesel)
+ if (fpe && !priv->dma_cap.fpesel) {
+ mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
+ }
/* Actual FPE register configuration will be done after FPE handshake
* is success.
@@ -845,6 +868,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
if (ret) {
netdev_err(priv->dev, "failed to configure EST\n");
goto disable;
@@ -860,9 +884,11 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return 0;
disable:
+ mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
priv->plat->fpe_cfg->enable = false;
stmmac_fpe_configure(priv, priv->ioaddr,
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 01ea0d6f8819..50bd4e3b0af9 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -404,7 +404,7 @@ err_out_free_dev:
return err;
}
-static int vsw_port_remove(struct vio_dev *vdev)
+static void vsw_port_remove(struct vio_dev *vdev)
{
struct vnet_port *port = dev_get_drvdata(&vdev->dev);
unsigned long flags;
@@ -430,8 +430,6 @@ static int vsw_port_remove(struct vio_dev *vdev)
free_netdev(port->dev);
}
-
- return 0;
}
static void vsw_cleanup(void)
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 74e748662ec0..860644d182ab 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -8191,8 +8191,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start)
err = niu_pci_vpd_scan_props(np, here, end);
if (err < 0)
return err;
+ /* ret == 1 is not an error */
if (err == 1)
- return -EINVAL;
+ return 0;
}
return 0;
}
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 96b883f965f6..58ee89223951 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -510,7 +510,7 @@ err_out_put_mdesc:
return err;
}
-static int vnet_port_remove(struct vio_dev *vdev)
+static void vnet_port_remove(struct vio_dev *vdev)
{
struct vnet_port *port = dev_get_drvdata(&vdev->dev);
@@ -533,7 +533,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
kfree(port);
}
- return 0;
}
static const struct vio_device_id vnet_port_match[] = {
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 718539cdd2f2..67a08cbba859 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2060,8 +2060,12 @@ static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *comm
for (i = 1; i <= common->port_num; i++) {
struct am65_cpsw_port *port = am65_common_get_port(common, i);
- struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
+ struct am65_cpsw_ndev_priv *priv;
+ if (!port->ndev)
+ continue;
+
+ priv = am65_ndev_to_priv(port->ndev);
priv->offload_fwd_mark = set_val;
}
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
index 9c29b363e9ae..599708a3e81d 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
@@ -358,7 +358,7 @@ static int am65_cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
struct switchdev_notifier_fdb_info *rcv)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
info.addr = rcv->addr;
info.vid = rcv->vid;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 57d279fdcc9f..d1d02001cef6 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -920,7 +920,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
struct cpdma_chan *txch;
int ret, q_idx;
- if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+ if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
cpsw_err(priv, tx_err, "packet pad failed\n");
ndev->stats.tx_dropped++;
return NET_XMIT_DROP;
@@ -1100,7 +1100,7 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
for (i = 0; i < n; i++) {
xdpf = frames[i];
- if (xdpf->len < CPSW_MIN_PACKET_SIZE)
+ if (xdpf->len < READ_ONCE(priv->tx_packet_min))
break;
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
@@ -1389,6 +1389,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
priv->dev = dev;
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->emac_port = i + 1;
+ priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
if (is_valid_ether_addr(slave_data->mac_addr)) {
ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
@@ -1686,6 +1687,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
priv = netdev_priv(sl_ndev);
slave->port_vlan = vlan;
+ WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
if (netif_running(sl_ndev))
cpsw_port_add_switch_def_ale_entries(priv,
slave);
@@ -1714,6 +1716,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
priv = netdev_priv(slave->ndev);
slave->port_vlan = slave->data->dual_emac_res_vlan;
+ WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index a323bea54faa..2951fb7b9dae 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -89,7 +89,8 @@ do { \
#define CPSW_POLL_WEIGHT 64
#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
-#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE_VLAN (VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE (ETH_ZLEN)
#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
ETH_FCS_LEN +\
CPSW_RX_VLAN_ENCAP_HDR_SIZE)
@@ -380,6 +381,7 @@ struct cpsw_priv {
u32 emac_port;
struct cpsw_common *cpsw;
int offload_fwd_mark;
+ u32 tx_packet_min;
};
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index f7fb6e17dadd..a7d97d429e06 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -368,7 +368,7 @@ static int cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
static void cpsw_fdb_offload_notify(struct net_device *ndev,
struct switchdev_notifier_fdb_info *rcv)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
info.addr = rcv->addr;
info.vid = rcv->vid;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 0b2ce4bdc2c3..e0cb713193ea 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -313,9 +313,8 @@ static void tlan_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
#endif
- free_netdev(dev);
-
cancel_work_sync(&priv->tlan_tqueue);
+ free_netdev(dev);
}
static void tlan_start(struct net_device *dev)
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 85c66af9e56d..7ae754eadf22 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -37,6 +37,8 @@
#include <linux/module.h>
#include <linux/soc/ixp4xx/npe.h>
#include <linux/soc/ixp4xx/qmgr.h>
+#include <mach/hardware.h>
+#include <linux/soc/ixp4xx/cpu.h>
#include "ixp46x_ts.h"
diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index 9ecc395239e9..a6fb88fd42f7 100644
--- a/drivers/net/ethernet/xscale/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -12,9 +12,10 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
-#include <linux/module.h>
-
#include <linux/ptp_clock_kernel.h>
+#include <linux/soc/ixp4xx/cpu.h>
+#include <linux/module.h>
+#include <mach/ixp4xx-regs.h>
#include "ixp46x_ts.h"
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 14f07050b6b1..0de2c4552f5e 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1504,9 +1504,8 @@ err_out_resource:
release_mem_region(start, len);
err_out_kfree:
- free_netdev(dev);
-
pr_err("%s: initialization failure, aborting!\n", fp->name);
+ free_netdev(dev);
return ret;
}
diff --git a/drivers/net/fjes/fjes_trace.h b/drivers/net/fjes/fjes_trace.h
index 9237b69d8e21..6437ddbd7842 100644
--- a/drivers/net/fjes/fjes_trace.h
+++ b/drivers/net/fjes/fjes_trace.h
@@ -232,7 +232,7 @@ TRACE_EVENT(fjes_hw_start_debug_err,
__string(err, err)
),
TP_fast_assign(
- __assign_str(err, err)
+ __assign_str(err, err);
),
TP_printk("%s", __get_str(err))
);
@@ -258,7 +258,7 @@ TRACE_EVENT(fjes_hw_stop_debug_err,
__string(err, err)
),
TP_fast_assign(
- __assign_str(err, err)
+ __assign_str(err, err);
),
TP_printk("%s", __get_str(err))
);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index a15cc5e50290..8fe8887d506a 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -428,7 +428,7 @@ out:
* and sent on to some IP layer for further processing.
*/
static void sixpack_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+ const unsigned char *cp, const char *fp, int count)
{
struct sixpack *sp;
int count1;
@@ -742,6 +742,7 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
static struct tty_ldisc_ops sp_ldisc = {
.owner = THIS_MODULE,
+ .num = N_6PACK,
.name = "6pack",
.open = sixpack_open,
.close = sixpack_close,
@@ -764,21 +765,16 @@ static int __init sixpack_init_driver(void)
printk(msg_banner);
/* Register the provided line protocol discipline */
- if ((status = tty_register_ldisc(N_6PACK, &sp_ldisc)) != 0)
+ status = tty_register_ldisc(&sp_ldisc);
+ if (status)
printk(msg_regfail, status);
return status;
}
-static const char msg_unregfail[] = KERN_ERR \
- "6pack: can't unregister line discipline (err = %d)\n";
-
static void __exit sixpack_exit_driver(void)
{
- int ret;
-
- if ((ret = tty_unregister_ldisc(N_6PACK)))
- printk(msg_unregfail, ret);
+ tty_unregister_ldisc(&sp_ldisc);
}
/* encode an AX.25 packet into 6pack */
@@ -831,6 +827,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
return;
}
+ if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
+ pr_err("6pack: cooked buffer overrun, data loss\n");
+ sp->rx_count = 0;
+ return;
+ }
+
buf = sp->raw_buf;
sp->cooked_buf[sp->rx_count_cooked++] =
buf[0] | ((buf[1] << 2) & 0xc0);
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index b99128669bc8..8666110bec55 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -872,7 +872,7 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
* and sent on to the AX.25 layer for further processing.
*/
static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+ const char *fp, int count)
{
struct mkiss *ax = mkiss_get(tty);
@@ -934,6 +934,7 @@ out:
static struct tty_ldisc_ops ax_ldisc = {
.owner = THIS_MODULE,
+ .num = N_AX25,
.name = "mkiss",
.open = mkiss_open,
.close = mkiss_close,
@@ -953,22 +954,16 @@ static int __init mkiss_init_driver(void)
printk(banner);
- status = tty_register_ldisc(N_AX25, &ax_ldisc);
+ status = tty_register_ldisc(&ax_ldisc);
if (status != 0)
printk(msg_regfail, status);
return status;
}
-static const char msg_unregfail[] = KERN_ERR \
- "mkiss: can't unregister line discipline (err = %d)\n";
-
static void __exit mkiss_exit_driver(void)
{
- int ret;
-
- if ((ret = tty_unregister_ldisc(N_AX25)))
- printk(msg_unregfail, ret);
+ tty_unregister_ldisc(&ax_ldisc);
}
MODULE_AUTHOR("Ralf Baechle DL5RB <ralf@linux-mips.org>");
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index ebc976b7fcc2..8caa61ec718f 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -418,7 +418,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info)
struct hwsim_edge *e;
u32 v0, v1;
- if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
@@ -528,14 +528,14 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
u32 v0, v1;
u8 lqi;
- if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL))
return -EINVAL;
- if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] &&
+ if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] ||
!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI])
return -EINVAL;
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index cf709df70d28..93270e50b6b3 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index 110e4ee85785..ebd001f0eece 100644
--- a/drivers/net/mdio/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
@@ -82,6 +82,17 @@ out:
static int parent_count;
+static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
+{
+ struct mdio_mux_child_bus *cb = pb->children;
+
+ while (cb) {
+ mdiobus_unregister(cb->mii_bus);
+ mdiobus_free(cb->mii_bus);
+ cb = cb->next;
+ }
+}
+
int mdio_mux_init(struct device *dev,
struct device_node *mux_node,
int (*switch_fn)(int cur, int desired, void *data),
@@ -144,7 +155,7 @@ int mdio_mux_init(struct device *dev,
cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
if (!cb) {
ret_val = -ENOMEM;
- continue;
+ goto err_loop;
}
cb->bus_number = v;
cb->parent = pb;
@@ -152,8 +163,7 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus = mdiobus_alloc();
if (!cb->mii_bus) {
ret_val = -ENOMEM;
- devm_kfree(dev, cb);
- continue;
+ goto err_loop;
}
cb->mii_bus->priv = cb;
@@ -165,11 +175,15 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus->write = mdio_mux_write;
r = of_mdiobus_register(cb->mii_bus, child_bus_node);
if (r) {
+ mdiobus_free(cb->mii_bus);
+ if (r == -EPROBE_DEFER) {
+ ret_val = r;
+ goto err_loop;
+ }
+ devm_kfree(dev, cb);
dev_err(dev,
"Error: Failed to register MDIO bus for child %pOF\n",
child_bus_node);
- mdiobus_free(cb->mii_bus);
- devm_kfree(dev, cb);
} else {
cb->next = pb->children;
pb->children = cb;
@@ -181,7 +195,10 @@ int mdio_mux_init(struct device *dev,
}
dev_err(dev, "Error: No acceptable child buses found\n");
- devm_kfree(dev, pb);
+
+err_loop:
+ mdio_mux_uninit_children(pb);
+ of_node_put(child_bus_node);
err_pb_kz:
put_device(&parent_bus->dev);
err_parent_bus:
@@ -193,14 +210,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init);
void mdio_mux_uninit(void *mux_handle)
{
struct mdio_mux_parent_bus *pb = mux_handle;
- struct mdio_mux_child_bus *cb = pb->children;
-
- while (cb) {
- mdiobus_unregister(cb->mii_bus);
- mdiobus_free(cb->mii_bus);
- cb = cb->next;
- }
+ mdio_mux_uninit_children(pb);
put_device(&pb->mii_bus->dev);
}
EXPORT_SYMBOL_GPL(mdio_mux_uninit);
diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c
index e60e38c1f09d..11be6bcdd551 100644
--- a/drivers/net/mhi/net.c
+++ b/drivers/net/mhi/net.c
@@ -335,7 +335,7 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
u64_stats_init(&mhi_netdev->stats.tx_syncp);
/* Start MHI channels */
- err = mhi_prepare_for_transfer(mhi_dev);
+ err = mhi_prepare_for_transfer(mhi_dev, 0);
if (err)
goto out_err;
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index 3811f1bde84e..b80ed2ffd45e 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -85,7 +85,7 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
const char aes_gcm_name[] = "rfc4106(gcm(aes))";
- struct net_device *dev = xs->xso.dev;
+ struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -134,7 +134,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
u16 sa_idx;
int ret;
- dev = xs->xso.dev;
+ dev = xs->xso.real_dev;
ns = netdev_priv(dev);
ipsec = &ns->ipsec;
@@ -194,7 +194,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
static void nsim_ipsec_del_sa(struct xfrm_state *xs)
{
- struct netdevsim *ns = netdev_priv(xs->xso.dev);
+ struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
u16 sa_idx;
@@ -211,7 +211,7 @@ static void nsim_ipsec_del_sa(struct xfrm_state *xs)
static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
- struct netdevsim *ns = netdev_priv(xs->xso.dev);
+ struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
ipsec->ok++;
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 63fda3fc40aa..4bd61339823c 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -1089,7 +1089,7 @@ struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL);
if (!xpcs)
- return NULL;
+ return ERR_PTR(-ENOMEM);
xpcs->mdiodev = mdiodev;
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 7bf3011b8e77..83aea5c5cd03 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -288,7 +288,7 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E ||
BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E)
+ BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
val |= BCM54XX_SHD_SCR3_RXCTXC_DIS;
else
val |= BCM54XX_SHD_SCR3_TRDDAPD;
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index bbbc6ac8fa82..53a433442803 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -78,6 +78,11 @@ enum {
/* Temperature read register (88E2110 only) */
MV_PCS_TEMP = 0x8042,
+ /* Number of ports on the device */
+ MV_PCS_PORT_INFO = 0xd00d,
+ MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380,
+ MV_PCS_PORT_INFO_NPORTS_SHIFT = 7,
+
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
* registers appear to set themselves to the 0x800X when AN is
* restarted, but status registers appear readable from either.
@@ -966,6 +971,30 @@ static const struct mv3310_chip mv2111_type = {
#endif
};
+static int mv3310_get_number_of_ports(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PORT_INFO);
+ if (ret < 0)
+ return ret;
+
+ ret &= MV_PCS_PORT_INFO_NPORTS_MASK;
+ ret >>= MV_PCS_PORT_INFO_NPORTS_SHIFT;
+
+ return ret + 1;
+}
+
+static int mv3310_match_phy_device(struct phy_device *phydev)
+{
+ return mv3310_get_number_of_ports(phydev) == 1;
+}
+
+static int mv3340_match_phy_device(struct phy_device *phydev)
+{
+ return mv3310_get_number_of_ports(phydev) == 4;
+}
+
static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g)
{
int val;
@@ -994,7 +1023,8 @@ static int mv2111_match_phy_device(struct phy_device *phydev)
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88X3310,
- .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .match_phy_device = mv3310_match_phy_device,
.name = "mv88x3310",
.driver_data = &mv3310_type,
.get_features = mv3310_get_features,
@@ -1011,8 +1041,9 @@ static struct phy_driver mv3310_drivers[] = {
.set_loopback = genphy_c45_loopback,
},
{
- .phy_id = MARVELL_PHY_ID_88X3340,
- .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK,
+ .phy_id = MARVELL_PHY_ID_88X3310,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .match_phy_device = mv3340_match_phy_device,
.name = "mv88x3340",
.driver_data = &mv3340_type,
.get_features = mv3310_get_features,
@@ -1069,8 +1100,7 @@ static struct phy_driver mv3310_drivers[] = {
module_phy_driver(mv3310_drivers);
static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
- { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_88X33X0_MASK },
- { MARVELL_PHY_ID_88X3340, MARVELL_PHY_ID_88X33X0_MASK },
+ { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK },
{ },
};
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 4d53886f7d51..5c928f827173 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -401,11 +401,11 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
}
static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
- const u32 ksz_phy_id)
+ const bool ksz_8051)
{
int ret;
- if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
+ if ((phydev->phy_id & MICREL_PHY_ID_MASK) != PHY_ID_KSZ8051)
return 0;
ret = phy_read(phydev, MII_BMSR);
@@ -418,7 +418,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
* the switch does not.
*/
ret &= BMSR_ERCAP;
- if (ksz_phy_id == PHY_ID_KSZ8051)
+ if (ksz_8051)
return ret;
else
return !ret;
@@ -426,7 +426,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
static int ksz8051_match_phy_device(struct phy_device *phydev)
{
- return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
+ return ksz8051_ksz8795_match_phy_device(phydev, true);
}
static int ksz8081_config_init(struct phy_device *phydev)
@@ -535,7 +535,7 @@ static int ksz8061_config_init(struct phy_device *phydev)
static int ksz8795_match_phy_device(struct phy_device *phydev)
{
- return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
+ return ksz8051_ksz8795_match_phy_device(phydev, false);
}
static int ksz9021_load_values_from_of(struct phy_device *phydev,
@@ -1760,8 +1760,6 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ87XX Switch",
/* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
- .config_aneg = ksz8873mll_config_aneg,
- .read_status = ksz8873mll_read_status,
.match_phy_device = ksz8795_match_phy_device,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 8b41aa3fb64e..29a93d6bfe37 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -98,7 +98,7 @@ static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
static int ppp_async_push(struct asyncppp *ap);
static void ppp_async_flush_output(struct asyncppp *ap);
static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
- char *flags, int count);
+ const char *flags, int count);
static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg);
static void ppp_async_process(struct tasklet_struct *t);
@@ -340,7 +340,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
/* May sleep, don't call from interrupt level or with interrupts disabled */
static void
ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
- char *cflags, int count)
+ const char *cflags, int count)
{
struct asyncppp *ap = ap_get(tty);
unsigned long flags;
@@ -372,6 +372,7 @@ ppp_asynctty_wakeup(struct tty_struct *tty)
static struct tty_ldisc_ops ppp_ldisc = {
.owner = THIS_MODULE,
+ .num = N_PPP,
.name = "ppp",
.open = ppp_asynctty_open,
.close = ppp_asynctty_close,
@@ -389,7 +390,7 @@ ppp_async_init(void)
{
int err;
- err = tty_register_ldisc(N_PPP, &ppp_ldisc);
+ err = tty_register_ldisc(&ppp_ldisc);
if (err != 0)
printk(KERN_ERR "PPP_async: error %d registering line disc.\n",
err);
@@ -829,7 +830,7 @@ process_input_packet(struct asyncppp *ap)
static void
ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
- char *flags, int count)
+ const char *flags, int count)
{
struct sk_buff *skb;
int c, i, j, n, s, f;
@@ -1015,8 +1016,7 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
static void __exit ppp_async_cleanup(void)
{
- if (tty_unregister_ldisc(N_PPP) != 0)
- printk(KERN_ERR "failed to unregister PPP line discipline\n");
+ tty_unregister_ldisc(&ppp_ldisc);
}
module_init(ppp_async_init);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 930e49ef15f6..7a099c37527f 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -284,7 +284,7 @@ static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
static int ppp_connect_channel(struct channel *pch, int unit);
static int ppp_disconnect_channel(struct channel *pch);
static void ppp_destroy_channel(struct channel *pch);
-static int unit_get(struct idr *p, void *ptr);
+static int unit_get(struct idr *p, void *ptr, int min);
static int unit_set(struct idr *p, void *ptr, int n);
static void unit_put(struct idr *p, int n);
static void *unit_find(struct idr *p, int n);
@@ -1155,9 +1155,20 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
mutex_lock(&pn->all_ppp_mutex);
if (unit < 0) {
- ret = unit_get(&pn->units_idr, ppp);
+ ret = unit_get(&pn->units_idr, ppp, 0);
if (ret < 0)
goto err;
+ if (!ifname_is_set) {
+ while (1) {
+ snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
+ if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name))
+ break;
+ unit_put(&pn->units_idr, ret);
+ ret = unit_get(&pn->units_idr, ppp, ret + 1);
+ if (ret < 0)
+ goto err;
+ }
+ }
} else {
/* Caller asked for a specific unit number. Fail with -EEXIST
* if unavailable. For backward compatibility, return -EEXIST
@@ -1306,7 +1317,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
* the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
* userspace to infer the device name using to the PPPIOCGUNIT ioctl.
*/
- if (!tb[IFLA_IFNAME])
+ if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
conf.ifname_is_set = false;
err = ppp_dev_configure(src_net, dev, &conf);
@@ -3552,9 +3563,9 @@ static int unit_set(struct idr *p, void *ptr, int n)
}
/* get new free unit number and associate pointer with it */
-static int unit_get(struct idr *p, void *ptr)
+static int unit_get(struct idr *p, void *ptr, int min)
{
- return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
+ return idr_alloc(p, ptr, min, 0, GFP_KERNEL);
}
/* put unit number back to a pool */
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 576b6a93bf23..af3e048695b6 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -94,7 +94,7 @@ static void ppp_sync_process(struct tasklet_struct *t);
static int ppp_sync_push(struct syncppp *ap);
static void ppp_sync_flush_output(struct syncppp *ap);
static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
- char *flags, int count);
+ const char *flags, int count);
static const struct ppp_channel_ops sync_ops = {
.start_xmit = ppp_sync_send,
@@ -333,7 +333,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
/* May sleep, don't call from interrupt level or with interrupts disabled */
static void
ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
- char *cflags, int count)
+ const char *cflags, int count)
{
struct syncppp *ap = sp_get(tty);
unsigned long flags;
@@ -365,6 +365,7 @@ ppp_sync_wakeup(struct tty_struct *tty)
static struct tty_ldisc_ops ppp_sync_ldisc = {
.owner = THIS_MODULE,
+ .num = N_SYNC_PPP,
.name = "pppsync",
.open = ppp_sync_open,
.close = ppp_sync_close,
@@ -382,7 +383,7 @@ ppp_sync_init(void)
{
int err;
- err = tty_register_ldisc(N_SYNC_PPP, &ppp_sync_ldisc);
+ err = tty_register_ldisc(&ppp_sync_ldisc);
if (err != 0)
printk(KERN_ERR "PPP_sync: error %d registering line disc.\n",
err);
@@ -665,7 +666,7 @@ ppp_sync_flush_output(struct syncppp *ap)
*/
static void
ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
- char *flags, int count)
+ const char *flags, int count)
{
struct sk_buff *skb;
unsigned char *p;
@@ -726,8 +727,7 @@ err:
static void __exit
ppp_sync_cleanup(void)
{
- if (tty_unregister_ldisc(N_SYNC_PPP) != 0)
- printk(KERN_ERR "failed to unregister Sync PPP line discipline\n");
+ tty_unregister_ldisc(&ppp_sync_ldisc);
}
module_init(ppp_sync_init);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 1ab124eba8eb..dc84cb844319 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -685,7 +685,7 @@ static void sl_setup(struct net_device *dev)
*/
static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+ const char *fp, int count)
{
struct slip *sl = tty->disc_data;
@@ -1263,6 +1263,7 @@ static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static struct tty_ldisc_ops sl_ldisc = {
.owner = THIS_MODULE,
+ .num = N_SLIP,
.name = "slip",
.open = slip_open,
.close = slip_close,
@@ -1298,7 +1299,7 @@ static int __init slip_init(void)
return -ENOMEM;
/* Fill in our line protocol discipline, and register it */
- status = tty_register_ldisc(N_SLIP, &sl_ldisc);
+ status = tty_register_ldisc(&sl_ldisc);
if (status != 0) {
printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status);
kfree(slip_devs);
@@ -1359,9 +1360,7 @@ static void __exit slip_exit(void)
kfree(slip_devs);
slip_devs = NULL;
- i = tty_unregister_ldisc(N_SLIP);
- if (i != 0)
- printk(KERN_ERR "SLIP: can't unregister line discipline (err = %d)\n", i);
+ tty_unregister_ldisc(&sl_ldisc);
}
module_init(slip_init);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index ac92bc52a85e..38cda590895c 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -63,6 +63,29 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
value, index, data, size);
}
+static int asix_check_host_enable(struct usbnet *dev, int in_pm)
+{
+ int i, ret;
+ u8 smsr;
+
+ for (i = 0; i < 30; ++i) {
+ ret = asix_set_sw_mii(dev, in_pm);
+ if (ret == -ENODEV || ret == -ETIMEDOUT)
+ break;
+ usleep_range(1000, 1100);
+ ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
+ 0, 0, 1, &smsr, in_pm);
+ if (ret == -ENODEV)
+ break;
+ else if (ret < 0)
+ continue;
+ else if (smsr & AX_HOST_EN)
+ break;
+ }
+
+ return ret;
+}
+
static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
{
/* Reset the variables that have a lifetime outside of
@@ -467,19 +490,11 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
- u8 smsr;
- int i = 0;
int ret;
mutex_lock(&dev->phy_mutex);
- do {
- ret = asix_set_sw_mii(dev, 0);
- if (ret == -ENODEV || ret == -ETIMEDOUT)
- break;
- usleep_range(1000, 1100);
- ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
- 0, 0, 1, &smsr, 0);
- } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+ ret = asix_check_host_enable(dev, 0);
if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
@@ -505,23 +520,14 @@ static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
- u8 smsr;
- int i = 0;
int ret;
netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
phy_id, loc, val);
mutex_lock(&dev->phy_mutex);
- do {
- ret = asix_set_sw_mii(dev, 0);
- if (ret == -ENODEV)
- break;
- usleep_range(1000, 1100);
- ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
- 0, 0, 1, &smsr, 0);
- } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+ ret = asix_check_host_enable(dev, 0);
if (ret == -ENODEV)
goto out;
@@ -561,19 +567,11 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
- u8 smsr;
- int i = 0;
int ret;
mutex_lock(&dev->phy_mutex);
- do {
- ret = asix_set_sw_mii(dev, 1);
- if (ret == -ENODEV || ret == -ETIMEDOUT)
- break;
- usleep_range(1000, 1100);
- ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
- 0, 0, 1, &smsr, 1);
- } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+ ret = asix_check_host_enable(dev, 1);
if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
@@ -595,22 +593,14 @@ asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
- u8 smsr;
- int i = 0;
int ret;
netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
phy_id, loc, val);
mutex_lock(&dev->phy_mutex);
- do {
- ret = asix_set_sw_mii(dev, 1);
- if (ret == -ENODEV)
- break;
- usleep_range(1000, 1100);
- ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
- 0, 0, 1, &smsr, 1);
- } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+ ret = asix_check_host_enable(dev, 1);
if (ret == -ENODEV) {
mutex_unlock(&dev->phy_mutex);
return;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index aec97b021a73..2c115216420a 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -701,6 +701,7 @@ static int ax88772_init_phy(struct usbnet *dev)
return ret;
}
+ phy_suspend(priv->phydev);
priv->phydev->mac_managed_pm = 1;
phy_attached_info(priv->phydev);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 54ef8492ca01..dec96e8ab567 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1356,10 +1356,10 @@ out:
}
/* how much room is there for writing */
-static int hso_serial_write_room(struct tty_struct *tty)
+static unsigned int hso_serial_write_room(struct tty_struct *tty)
{
struct hso_serial *serial = tty->driver_data;
- int room;
+ unsigned int room;
unsigned long flags;
spin_lock_irqsave(&serial->serial_lock, flags);
@@ -1403,11 +1403,11 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
}
/* how many characters in the buffer */
-static int hso_serial_chars_in_buffer(struct tty_struct *tty)
+static unsigned int hso_serial_chars_in_buffer(struct tty_struct *tty)
{
struct hso_serial *serial = tty->driver_data;
- int chars;
unsigned long flags;
+ unsigned int chars;
/* sanity check */
if (serial == NULL)
@@ -2495,7 +2495,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
hso_net_init);
if (!net) {
dev_err(&interface->dev, "Unable to create ethernet device\n");
- goto exit;
+ goto err_hso_dev;
}
hso_net = netdev_priv(net);
@@ -2508,13 +2508,13 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
USB_DIR_IN);
if (!hso_net->in_endp) {
dev_err(&interface->dev, "Can't find BULK IN endpoint\n");
- goto exit;
+ goto err_net;
}
hso_net->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK,
USB_DIR_OUT);
if (!hso_net->out_endp) {
dev_err(&interface->dev, "Can't find BULK OUT endpoint\n");
- goto exit;
+ goto err_net;
}
SET_NETDEV_DEV(net, &interface->dev);
SET_NETDEV_DEVTYPE(net, &hso_type);
@@ -2523,18 +2523,18 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!hso_net->mux_bulk_rx_urb_pool[i])
- goto exit;
+ goto err_mux_bulk_rx;
hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE,
GFP_KERNEL);
if (!hso_net->mux_bulk_rx_buf_pool[i])
- goto exit;
+ goto err_mux_bulk_rx;
}
hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!hso_net->mux_bulk_tx_urb)
- goto exit;
+ goto err_mux_bulk_rx;
hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL);
if (!hso_net->mux_bulk_tx_buf)
- goto exit;
+ goto err_free_tx_urb;
add_net_device(hso_dev);
@@ -2542,7 +2542,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
result = register_netdev(net);
if (result) {
dev_err(&interface->dev, "Failed to register device\n");
- goto exit;
+ goto err_free_tx_buf;
}
hso_log_port(hso_dev);
@@ -2550,8 +2550,21 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
hso_create_rfkill(hso_dev, interface);
return hso_dev;
-exit:
- hso_free_net_device(hso_dev, true);
+
+err_free_tx_buf:
+ remove_net_device(hso_dev);
+ kfree(hso_net->mux_bulk_tx_buf);
+err_free_tx_urb:
+ usb_free_urb(hso_net->mux_bulk_tx_urb);
+err_mux_bulk_rx:
+ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
+ usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]);
+ kfree(hso_net->mux_bulk_rx_buf_pool[i]);
+ }
+err_net:
+ free_netdev(net);
+err_hso_dev:
+ kfree(hso_dev);
return NULL;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 25489389ea49..6d092d78e0cb 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1154,7 +1154,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
{
struct phy_device *phydev = dev->net->phydev;
struct ethtool_link_ksettings ecmd;
- int ladv, radv, ret;
+ int ladv, radv, ret, link;
u32 buf;
/* clear LAN78xx interrupt status */
@@ -1162,9 +1162,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
if (unlikely(ret < 0))
return -EIO;
+ mutex_lock(&phydev->lock);
phy_read_status(phydev);
+ link = phydev->link;
+ mutex_unlock(&phydev->lock);
- if (!phydev->link && dev->link_on) {
+ if (!link && dev->link_on) {
dev->link_on = false;
/* reset MAC */
@@ -1177,7 +1180,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
return -EIO;
del_timer(&dev->stat_monitor);
- } else if (phydev->link && !dev->link_on) {
+ } else if (link && !dev->link_on) {
dev->link_on = true;
phy_ethtool_ksettings_get(phydev, &ecmd);
@@ -1466,9 +1469,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
static u32 lan78xx_get_link(struct net_device *net)
{
+ u32 link;
+
+ mutex_lock(&net->phydev->lock);
phy_read_status(net->phydev);
+ link = net->phydev->link;
+ mutex_unlock(&net->phydev->lock);
- return net->phydev->link;
+ return link;
}
static void lan78xx_get_drvinfo(struct net_device *net,
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 9a907182569c..652e9fcf0b77 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1,31 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 1999-2013 Petko Manolov (petkan@nucleusys.com)
+ * Copyright (c) 1999-2021 Petko Manolov (petkan@nucleusys.com)
*
- * ChangeLog:
- * .... Most of the time spent on reading sources & docs.
- * v0.2.x First official release for the Linux kernel.
- * v0.3.0 Beutified and structured, some bugs fixed.
- * v0.3.x URBifying bulk requests and bugfixing. First relatively
- * stable release. Still can touch device's registers only
- * from top-halves.
- * v0.4.0 Control messages remained unurbified are now URBs.
- * Now we can touch the HW at any time.
- * v0.4.9 Control urbs again use process context to wait. Argh...
- * Some long standing bugs (enable_net_traffic) fixed.
- * Also nasty trick about resubmiting control urb from
- * interrupt context used. Please let me know how it
- * behaves. Pegasus II support added since this version.
- * TODO: suppressing HCD warnings spewage on disconnect.
- * v0.4.13 Ethernet address is now set at probe(), not at open()
- * time as this seems to break dhcpd.
- * v0.5.0 branch to 2.5.x kernels
- * v0.5.1 ethtool support added
- * v0.5.5 rx socket buffers are in a pool and the their allocation
- * is out of the interrupt routine.
- * ...
- * v0.9.3 simplified [get|set]_register(s), async update registers
- * logic revisited, receive skb_pool removed.
*/
#include <linux/sched.h>
@@ -45,7 +21,6 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.9.3 (2013/04/25)"
#define DRIVER_AUTHOR "Petko Manolov <petkan@nucleusys.com>"
#define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver"
@@ -132,9 +107,15 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
const void *data)
{
- return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
+ int ret;
+
+ ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
PEGASUS_REQT_WRITE, 0, indx, data, size,
1000, GFP_NOIO);
+ if (ret < 0)
+ netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
+
+ return ret;
}
/*
@@ -145,10 +126,15 @@ static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{
void *buf = &data;
+ int ret;
- return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
+ ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
PEGASUS_REQT_WRITE, data, indx, buf, 1,
1000, GFP_NOIO);
+ if (ret < 0)
+ netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
+
+ return ret;
}
static int update_eth_regs_async(pegasus_t *pegasus)
@@ -188,10 +174,9 @@ static int update_eth_regs_async(pegasus_t *pegasus)
static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
{
- int i;
- __u8 data[4] = { phy, 0, 0, indx };
+ int i, ret;
__le16 regdi;
- int ret = -ETIMEDOUT;
+ __u8 data[4] = { phy, 0, 0, indx };
if (cmd & PHY_WRITE) {
__le16 *t = (__le16 *) & data[1];
@@ -207,12 +192,15 @@ static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
if (data[0] & PHY_DONE)
break;
}
- if (i >= REG_TIMEOUT)
+ if (i >= REG_TIMEOUT) {
+ ret = -ETIMEDOUT;
goto fail;
+ }
if (cmd & PHY_READ) {
ret = get_registers(p, PhyData, 2, &regdi);
+ if (ret < 0)
+ goto fail;
*regd = le16_to_cpu(regdi);
- return ret;
}
return 0;
fail:
@@ -235,9 +223,13 @@ static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
static int mdio_read(struct net_device *dev, int phy_id, int loc)
{
pegasus_t *pegasus = netdev_priv(dev);
+ int ret;
u16 res;
- read_mii_word(pegasus, phy_id, loc, &res);
+ ret = read_mii_word(pegasus, phy_id, loc, &res);
+ if (ret < 0)
+ return ret;
+
return (int)res;
}
@@ -251,10 +243,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
{
- int i;
- __u8 tmp = 0;
+ int ret, i;
__le16 retdatai;
- int ret;
+ __u8 tmp = 0;
set_register(pegasus, EpromCtrl, 0);
set_register(pegasus, EpromOffset, index);
@@ -262,21 +253,25 @@ static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
for (i = 0; i < REG_TIMEOUT; i++) {
ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
+ if (ret < 0)
+ goto fail;
if (tmp & EPROM_DONE)
break;
- if (ret == -ESHUTDOWN)
- goto fail;
}
- if (i >= REG_TIMEOUT)
+ if (i >= REG_TIMEOUT) {
+ ret = -ETIMEDOUT;
goto fail;
+ }
ret = get_registers(pegasus, EpromData, 2, &retdatai);
+ if (ret < 0)
+ goto fail;
*retdata = le16_to_cpu(retdatai);
return ret;
fail:
- netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
- return -ETIMEDOUT;
+ netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
+ return ret;
}
#ifdef PEGASUS_WRITE_EEPROM
@@ -324,10 +319,10 @@ static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data)
return ret;
fail:
- netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
+ netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
-#endif /* PEGASUS_WRITE_EEPROM */
+#endif /* PEGASUS_WRITE_EEPROM */
static inline int get_node_id(pegasus_t *pegasus, u8 *id)
{
@@ -367,19 +362,21 @@ static void set_ethernet_addr(pegasus_t *pegasus)
return;
err:
eth_hw_addr_random(pegasus->net);
- dev_info(&pegasus->intf->dev, "software assigned MAC address.\n");
+ netif_dbg(pegasus, drv, pegasus->net, "software assigned MAC address.\n");
return;
}
static inline int reset_mac(pegasus_t *pegasus)
{
+ int ret, i;
__u8 data = 0x8;
- int i;
set_register(pegasus, EthCtrl1, data);
for (i = 0; i < REG_TIMEOUT; i++) {
- get_registers(pegasus, EthCtrl1, 1, &data);
+ ret = get_registers(pegasus, EthCtrl1, 1, &data);
+ if (ret < 0)
+ goto fail;
if (~data & 0x08) {
if (loopback)
break;
@@ -402,22 +399,29 @@ static inline int reset_mac(pegasus_t *pegasus)
}
if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) {
__u16 auxmode;
- read_mii_word(pegasus, 3, 0x1b, &auxmode);
+ ret = read_mii_word(pegasus, 3, 0x1b, &auxmode);
+ if (ret < 0)
+ goto fail;
auxmode |= 4;
write_mii_word(pegasus, 3, 0x1b, &auxmode);
}
return 0;
+fail:
+ netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
+ return ret;
}
static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
{
- __u16 linkpart;
- __u8 data[4];
pegasus_t *pegasus = netdev_priv(dev);
int ret;
+ __u16 linkpart;
+ __u8 data[4];
- read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
+ ret = read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
+ if (ret < 0)
+ goto fail;
data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
data[1] = 0;
if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
@@ -435,11 +439,16 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 ||
usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) {
u16 auxmode;
- read_mii_word(pegasus, 0, 0x1b, &auxmode);
+ ret = read_mii_word(pegasus, 0, 0x1b, &auxmode);
+ if (ret < 0)
+ goto fail;
auxmode |= 4;
write_mii_word(pegasus, 0, 0x1b, &auxmode);
}
+ return 0;
+fail:
+ netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret;
}
@@ -447,9 +456,9 @@ static void read_bulk_callback(struct urb *urb)
{
pegasus_t *pegasus = urb->context;
struct net_device *net;
+ u8 *buf = urb->transfer_buffer;
int rx_status, count = urb->actual_length;
int status = urb->status;
- u8 *buf = urb->transfer_buffer;
__u16 pkt_len;
if (!pegasus)
@@ -735,12 +744,16 @@ static inline void disable_net_traffic(pegasus_t *pegasus)
set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
}
-static inline void get_interrupt_interval(pegasus_t *pegasus)
+static inline int get_interrupt_interval(pegasus_t *pegasus)
{
u16 data;
u8 interval;
+ int ret;
+
+ ret = read_eprom_word(pegasus, 4, &data);
+ if (ret < 0)
+ return ret;
- read_eprom_word(pegasus, 4, &data);
interval = data >> 8;
if (pegasus->usb->speed != USB_SPEED_HIGH) {
if (interval < 0x80) {
@@ -755,6 +768,8 @@ static inline void get_interrupt_interval(pegasus_t *pegasus)
}
}
pegasus->intr_interval = interval;
+
+ return 0;
}
static void set_carrier(struct net_device *net)
@@ -880,7 +895,6 @@ static void pegasus_get_drvinfo(struct net_device *dev,
pegasus_t *pegasus = netdev_priv(dev);
strlcpy(info->driver, driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
}
@@ -998,8 +1012,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
data[0] = pegasus->phy;
fallthrough;
case SIOCDEVPRIVATE + 1:
- read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
- res = 0;
+ res = read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
break;
case SIOCDEVPRIVATE + 2:
if (!capable(CAP_NET_ADMIN))
@@ -1033,22 +1046,25 @@ static void pegasus_set_multicast(struct net_device *net)
static __u8 mii_phy_probe(pegasus_t *pegasus)
{
- int i;
+ int i, ret;
__u16 tmp;
for (i = 0; i < 32; i++) {
- read_mii_word(pegasus, i, MII_BMSR, &tmp);
+ ret = read_mii_word(pegasus, i, MII_BMSR, &tmp);
+ if (ret < 0)
+ goto fail;
if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0)
continue;
else
return i;
}
-
+fail:
return 0xff;
}
static inline void setup_pegasus_II(pegasus_t *pegasus)
{
+ int ret;
__u8 data = 0xa5;
set_register(pegasus, Reg1d, 0);
@@ -1060,7 +1076,9 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg7b, 2);
set_register(pegasus, 0x83, data);
- get_registers(pegasus, 0x83, 1, &data);
+ ret = get_registers(pegasus, 0x83, 1, &data);
+ if (ret < 0)
+ goto fail;
if (data == 0xa5)
pegasus->chip = 0x8513;
@@ -1075,6 +1093,10 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg81, 6);
else
set_register(pegasus, Reg81, 2);
+
+ return;
+fail:
+ netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
}
static void check_carrier(struct work_struct *work)
@@ -1149,7 +1171,9 @@ static int pegasus_probe(struct usb_interface *intf,
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
pegasus->features = usb_dev_id[dev_index].private;
- get_interrupt_interval(pegasus);
+ res = get_interrupt_interval(pegasus);
+ if (res)
+ goto out2;
if (reset_mac(pegasus)) {
dev_err(&intf->dev, "can't reset MAC\n");
res = -EIO;
@@ -1296,7 +1320,7 @@ static void __init parse_id(char *id)
static int __init pegasus_init(void)
{
- pr_info("%s: %s, " DRIVER_DESC "\n", driver_name, DRIVER_VERSION);
+ pr_info("%s: " DRIVER_DESC "\n", driver_name);
if (devid)
parse_id(devid);
return usb_register(&pegasus_driver);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 1692d3b1b6e1..79832374f78d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1552,7 +1552,8 @@ static int
rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex,
u32 advertising);
-static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
+static int __rtl8152_set_mac_address(struct net_device *netdev, void *p,
+ bool in_resume)
{
struct r8152 *tp = netdev_priv(netdev);
struct sockaddr *addr = p;
@@ -1561,9 +1562,11 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
goto out1;
- ret = usb_autopm_get_interface(tp->intf);
- if (ret < 0)
- goto out1;
+ if (!in_resume) {
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out1;
+ }
mutex_lock(&tp->control);
@@ -1575,11 +1578,17 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
mutex_unlock(&tp->control);
- usb_autopm_put_interface(tp->intf);
+ if (!in_resume)
+ usb_autopm_put_interface(tp->intf);
out1:
return ret;
}
+static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
+{
+ return __rtl8152_set_mac_address(netdev, p, false);
+}
+
/* Devices containing proper chips can support a persistent
* host system provided MAC address.
* Examples of this are Dell TB15 and Dell WD15 docks
@@ -1698,7 +1707,7 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
return ret;
}
-static int set_ethernet_addr(struct r8152 *tp)
+static int set_ethernet_addr(struct r8152 *tp, bool in_resume)
{
struct net_device *dev = tp->netdev;
struct sockaddr sa;
@@ -1711,7 +1720,7 @@ static int set_ethernet_addr(struct r8152 *tp)
if (tp->version == RTL_VER_01)
ether_addr_copy(dev->dev_addr, sa.sa_data);
else
- ret = rtl8152_set_mac_address(dev, &sa);
+ ret = __rtl8152_set_mac_address(dev, &sa, in_resume);
return ret;
}
@@ -3946,17 +3955,28 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
case RTL_VER_06:
ocp_write_byte(tp, type, PLA_BP_EN, 0);
break;
+ case RTL_VER_14:
+ ocp_write_word(tp, type, USB_BP2_EN, 0);
+
+ ocp_write_word(tp, type, USB_BP_8, 0);
+ ocp_write_word(tp, type, USB_BP_9, 0);
+ ocp_write_word(tp, type, USB_BP_10, 0);
+ ocp_write_word(tp, type, USB_BP_11, 0);
+ ocp_write_word(tp, type, USB_BP_12, 0);
+ ocp_write_word(tp, type, USB_BP_13, 0);
+ ocp_write_word(tp, type, USB_BP_14, 0);
+ ocp_write_word(tp, type, USB_BP_15, 0);
+ break;
case RTL_VER_08:
case RTL_VER_09:
case RTL_VER_10:
case RTL_VER_11:
case RTL_VER_12:
case RTL_VER_13:
- case RTL_VER_14:
case RTL_VER_15:
default:
if (type == MCU_TYPE_USB) {
- ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0);
@@ -4322,7 +4342,6 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
case RTL_VER_11:
case RTL_VER_12:
case RTL_VER_13:
- case RTL_VER_14:
case RTL_VER_15:
fw_reg = 0xf800;
bp_ba_addr = PLA_BP_BA;
@@ -4330,6 +4349,13 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
bp_start = PLA_BP_0;
max_bp = 8;
break;
+ case RTL_VER_14:
+ fw_reg = 0xf800;
+ bp_ba_addr = PLA_BP_BA;
+ bp_en_addr = USB_BP2_EN;
+ bp_start = PLA_BP_0;
+ max_bp = 16;
+ break;
default:
goto out;
}
@@ -6763,9 +6789,10 @@ static int rtl8152_close(struct net_device *netdev)
tp->rtl_ops.down(tp);
mutex_unlock(&tp->control);
+ }
+ if (!res)
usb_autopm_put_interface(tp->intf);
- }
free_all_mem(tp);
@@ -8443,7 +8470,7 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
tp->rtl_ops.init(tp);
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
- set_ethernet_addr(tp);
+ set_ethernet_addr(tp, true);
return rtl8152_resume(intf);
}
@@ -9644,7 +9671,7 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->rtl_fw.retry = true;
#endif
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
- set_ethernet_addr(tp);
+ set_ethernet_addr(tp, false);
usb_set_intfdata(intf, tp);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0b81458ca94..eee493685aad 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -63,7 +63,7 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_CSUM
};
-#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
(1ULL << VIRTIO_NET_F_GUEST_UFO))
@@ -1516,12 +1516,16 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;
if (__netif_tx_trylock(txq)) {
- free_old_xmit_skbs(sq, true);
+ do {
+ virtqueue_disable_cb(sq->vq);
+ free_old_xmit_skbs(sq, true);
+ } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);
+
__netif_tx_unlock(txq);
}
-
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
- netif_tx_wake_queue(txq);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -1592,6 +1596,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
struct virtnet_info *vi = sq->vq->vdev->priv;
unsigned int index = vq2txq(sq->vq);
struct netdev_queue *txq;
+ int opaque;
+ bool done;
if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
/* We don't need to enable cb for XDP */
@@ -1601,14 +1607,32 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true);
- __netif_tx_unlock(txq);
-
- virtqueue_napi_complete(napi, sq->vq, 0);
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
netif_tx_wake_queue(txq);
+ opaque = virtqueue_enable_cb_prepare(sq->vq);
+
+ done = napi_complete_done(napi, 0);
+
+ if (!done)
+ virtqueue_disable_cb(sq->vq);
+
+ __netif_tx_unlock(txq);
+
+ if (done) {
+ if (unlikely(virtqueue_poll(sq->vq, opaque))) {
+ if (napi_schedule_prep(napi)) {
+ __netif_tx_lock(txq, raw_smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+ __netif_tx_unlock(txq);
+ __napi_schedule(napi);
+ }
+ }
+ }
+
return 0;
}
@@ -1670,10 +1694,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- free_old_xmit_skbs(sq, false);
+ do {
+ if (use_napi)
+ virtqueue_disable_cb(sq->vq);
+
+ free_old_xmit_skbs(sq, false);
- if (use_napi && kick)
- virtqueue_enable_cb_delayed(sq->vq);
+ } while (use_napi && kick &&
+ unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
/* timestamp packet in software */
skb_tx_timestamp(skb);
@@ -1743,6 +1771,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
{
struct scatterlist *sgs[4], hdr, stat;
unsigned out_num = 0, tmp;
+ int ret;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
@@ -1762,7 +1791,12 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sgs[out_num] = &stat;
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
- virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+ ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_warn(&vi->vdev->dev,
+ "Failed to add sgs for command vq: %d\n.", ret);
+ return false;
+ }
if (unlikely(!virtqueue_kick(vi->cvq)))
return vi->ctrl->status == VIRTIO_NET_OK;
@@ -2481,7 +2515,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
- NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
+ NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
return -EOPNOTSUPP;
}
@@ -2612,15 +2646,15 @@ static int virtnet_set_features(struct net_device *dev,
u64 offloads;
int err;
- if ((dev->features ^ features) & NETIF_F_LRO) {
+ if ((dev->features ^ features) & NETIF_F_GRO_HW) {
if (vi->xdp_enabled)
return -EBUSY;
- if (features & NETIF_F_LRO)
+ if (features & NETIF_F_GRO_HW)
offloads = vi->guest_offloads_capable;
else
offloads = vi->guest_offloads_capable &
- ~GUEST_OFFLOAD_LRO_MASK;
+ ~GUEST_OFFLOAD_GRO_HW_MASK;
err = virtnet_set_guest_offloads(vi, offloads);
if (err)
@@ -3100,9 +3134,9 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_RXCSUM;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
- dev->features |= NETIF_F_LRO;
+ dev->features |= NETIF_F_GRO_HW;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
- dev->hw_features |= NETIF_F_LRO;
+ dev->hw_features |= NETIF_F_GRO_HW;
dev->vlan_features = dev->features;
@@ -3310,8 +3344,11 @@ static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
virtnet_set_queues(vi, vi->curr_queue_pairs);
err = virtnet_cpu_notif_add(vi);
- if (err)
+ if (err) {
+ virtnet_freeze_down(vdev);
+ remove_vq_common(vi);
return err;
+ }
return 0;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index c0bd9cbc43b1..1b483cf2b1ca 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -26,6 +26,10 @@
#include "vmxnet3_int.h"
+#include <net/vxlan.h>
+#include <net/geneve.h>
+
+#define VXLAN_UDP_PORT 8472
struct vmxnet3_stat_desc {
char desc[ETH_GSTRING_LEN];
@@ -262,6 +266,8 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
if (VMXNET3_VERSION_GE_4(adapter) &&
skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_proto = 0;
+ u16 port;
+ struct udphdr *udph;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
@@ -274,8 +280,20 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
- if (l4_proto != IPPROTO_UDP)
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ udph = udp_hdr(skb);
+ port = be16_to_cpu(udph->dest);
+ /* Check if offloaded port is supported */
+ if (port != GENEVE_UDP_PORT &&
+ port != IANA_VXLAN_UDP_PORT &&
+ port != VXLAN_UDP_PORT) {
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
+ break;
+ default:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
}
return features;
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 2b1b944d4b28..8bbe2a7bb141 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1367,6 +1367,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb);
+ nf_reset_ct(skb);
+
/* loopback, multicast & non-ND link-local traffic; do not push through
* packet taps again. Reset pkt_type for upper layers to process skb.
* For strict packets with a source LLA, determine the dst using the
@@ -1429,6 +1431,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
skb->skb_iif = vrf_dev->ifindex;
IPCB(skb)->flags |= IPSKB_L3SLAVE;
+ nf_reset_ct(skb);
+
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
goto out;
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 349ca18088e8..c54fdae950fb 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -364,19 +364,19 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-static int __init mod_init(void)
+static int __init hdlc_cisco_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-static void __exit mod_exit(void)
+static void __exit hdlc_cisco_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_cisco_init);
+module_exit(hdlc_cisco_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 72250fe0a1df..25e3564ce118 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1279,19 +1279,19 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-static int __init mod_init(void)
+static int __init hdlc_fr_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-static void __exit mod_exit(void)
+static void __exit hdlc_fr_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_fr_init);
+module_exit(hdlc_fr_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 834be2ae3e9e..b81ecf432a0c 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -705,20 +705,20 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-static int __init mod_init(void)
+static int __init hdlc_ppp_init(void)
{
skb_queue_head_init(&tx_queue);
register_hdlc_protocol(&proto);
return 0;
}
-static void __exit mod_exit(void)
+static void __exit hdlc_ppp_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_ppp_init);
+module_exit(hdlc_ppp_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("PPP protocol support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 388fcc09b4dd..54d28496fefd 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -90,7 +90,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
}
-static int __init mod_init(void)
+static int __init hdlc_raw_init(void)
{
register_hdlc_protocol(&proto);
return 0;
@@ -98,14 +98,14 @@ static int __init mod_init(void)
-static void __exit mod_exit(void)
+static void __exit hdlc_raw_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_raw_init);
+module_exit(hdlc_raw_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index c70a518b8b47..927596276a07 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -110,7 +110,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
}
-static int __init mod_init(void)
+static int __init hdlc_eth_init(void)
{
register_hdlc_protocol(&proto);
return 0;
@@ -118,14 +118,14 @@ static int __init mod_init(void)
-static void __exit mod_exit(void)
+static void __exit hdlc_eth_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_eth_init);
+module_exit(hdlc_eth_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index d2bf72bf3bd7..9b7ebf8bd85c 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -365,19 +365,19 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-static int __init mod_init(void)
+static int __init hdlc_x25_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-static void __exit mod_exit(void)
+static void __exit hdlc_x25_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_x25_init);
+module_exit(hdlc_x25_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("X.25 protocol support for generic HDLC");
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index e97521138f7e..3c51ab239fb2 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/soc/ixp4xx/npe.h>
#include <linux/soc/ixp4xx/qmgr.h>
+#include <linux/soc/ixp4xx/cpu.h>
#define DEBUG_DESC 0
#define DEBUG_RX 0
diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c
index 89a25aefb327..efa98444e3fb 100644
--- a/drivers/net/wireless/broadcom/b43/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43/debugfs.c
@@ -643,24 +643,14 @@ bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature)
return enabled;
}
-static void b43_remove_dynamic_debug(struct b43_wldev *dev)
-{
- struct b43_dfsentry *e = dev->dfsentry;
- int i;
-
- for (i = 0; i < __B43_NR_DYNDBG; i++)
- debugfs_remove(e->dyn_debug_dentries[i]);
-}
-
static void b43_add_dynamic_debug(struct b43_wldev *dev)
{
struct b43_dfsentry *e = dev->dfsentry;
#define add_dyn_dbg(name, id, initstate) do { \
e->dyn_debug[id] = (initstate); \
- e->dyn_debug_dentries[id] = \
- debugfs_create_bool(name, 0600, e->subdir, \
- &(e->dyn_debug[id])); \
+ debugfs_create_bool(name, 0600, e->subdir, \
+ &(e->dyn_debug[id])); \
} while (0)
add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, false);
@@ -713,10 +703,9 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
#define ADD_FILE(name, mode) \
do { \
- e->file_##name.dentry = \
- debugfs_create_file(__stringify(name), \
- mode, e->subdir, dev, \
- &fops_##name.fops); \
+ debugfs_create_file(__stringify(name), \
+ mode, e->subdir, dev, \
+ &fops_##name.fops); \
} while (0)
@@ -746,19 +735,6 @@ void b43_debugfs_remove_device(struct b43_wldev *dev)
e = dev->dfsentry;
if (!e)
return;
- b43_remove_dynamic_debug(dev);
-
- debugfs_remove(e->file_shm16read.dentry);
- debugfs_remove(e->file_shm16write.dentry);
- debugfs_remove(e->file_shm32read.dentry);
- debugfs_remove(e->file_shm32write.dentry);
- debugfs_remove(e->file_mmio16read.dentry);
- debugfs_remove(e->file_mmio16write.dentry);
- debugfs_remove(e->file_mmio32read.dentry);
- debugfs_remove(e->file_mmio32write.dentry);
- debugfs_remove(e->file_txstat.dentry);
- debugfs_remove(e->file_restart.dentry);
- debugfs_remove(e->file_loctls.dentry);
debugfs_remove(e->subdir);
kfree(e->txstatlog.log);
diff --git a/drivers/net/wireless/broadcom/b43/debugfs.h b/drivers/net/wireless/broadcom/b43/debugfs.h
index 0bf437c86c67..6f6b500b8881 100644
--- a/drivers/net/wireless/broadcom/b43/debugfs.h
+++ b/drivers/net/wireless/broadcom/b43/debugfs.h
@@ -32,7 +32,6 @@ struct b43_txstatus_log {
};
struct b43_dfs_file {
- struct dentry *dentry;
char *buffer;
size_t data_len;
};
@@ -70,8 +69,6 @@ struct b43_dfsentry {
/* Enabled/Disabled list for the dynamic debugging features. */
bool dyn_debug[__B43_NR_DYNDBG];
- /* Dentries for the dynamic debugging entries. */
- struct dentry *dyn_debug_dentries[__B43_NR_DYNDBG];
};
bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature);
diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
index e7e4293c01f2..6b0e8d117061 100644
--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
@@ -336,24 +336,14 @@ int b43legacy_debug(struct b43legacy_wldev *dev, enum b43legacy_dyndbg feature)
return !!(dev->dfsentry && dev->dfsentry->dyn_debug[feature]);
}
-static void b43legacy_remove_dynamic_debug(struct b43legacy_wldev *dev)
-{
- struct b43legacy_dfsentry *e = dev->dfsentry;
- int i;
-
- for (i = 0; i < __B43legacy_NR_DYNDBG; i++)
- debugfs_remove(e->dyn_debug_dentries[i]);
-}
-
static void b43legacy_add_dynamic_debug(struct b43legacy_wldev *dev)
{
struct b43legacy_dfsentry *e = dev->dfsentry;
#define add_dyn_dbg(name, id, initstate) do { \
e->dyn_debug[id] = (initstate); \
- e->dyn_debug_dentries[id] = \
- debugfs_create_bool(name, 0600, e->subdir, \
- &(e->dyn_debug[id])); \
+ debugfs_create_bool(name, 0600, e->subdir, \
+ &(e->dyn_debug[id])); \
} while (0)
add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, false);
@@ -396,11 +386,9 @@ void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev)
#define ADD_FILE(name, mode) \
do { \
- e->file_##name.dentry = \
- debugfs_create_file(__stringify(name), \
- mode, e->subdir, dev, \
- &fops_##name.fops); \
- e->file_##name.dentry = NULL; \
+ debugfs_create_file(__stringify(name), mode, \
+ e->subdir, dev, \
+ &fops_##name.fops); \
} while (0)
@@ -424,13 +412,6 @@ void b43legacy_debugfs_remove_device(struct b43legacy_wldev *dev)
e = dev->dfsentry;
if (!e)
return;
- b43legacy_remove_dynamic_debug(dev);
-
- debugfs_remove(e->file_tsf.dentry);
- debugfs_remove(e->file_ucode_regs.dentry);
- debugfs_remove(e->file_shm.dentry);
- debugfs_remove(e->file_txstat.dentry);
- debugfs_remove(e->file_restart.dentry);
debugfs_remove(e->subdir);
kfree(e->txstatlog.log);
diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.h b/drivers/net/wireless/broadcom/b43legacy/debugfs.h
index 7a37764406b1..924130880dfe 100644
--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.h
+++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.h
@@ -28,7 +28,6 @@ struct b43legacy_txstatus_log {
};
struct b43legacy_dfs_file {
- struct dentry *dentry;
char *buffer;
size_t data_len;
};
@@ -49,8 +48,6 @@ struct b43legacy_dfsentry {
/* Enabled/Disabled list for the dynamic debugging features. */
bool dyn_debug[__B43legacy_NR_DYNDBG];
- /* Dentries for the dynamic debugging entries. */
- struct dentry *dyn_debug_dentries[__B43legacy_NR_DYNDBG];
};
int b43legacy_debug(struct b43legacy_wldev *dev,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 2403490cbc26..b4b1f75b9c2a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -37,6 +37,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
u32 sha1 = 0;
u16 mac_type = 0, rf_id = 0;
u8 *pnvm_data = NULL, *tmp;
+ bool hw_match = false;
u32 size = 0;
int ret;
@@ -83,6 +84,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
break;
}
+ if (hw_match)
+ break;
+
mac_type = le16_to_cpup((__le16 *)data);
rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
@@ -90,15 +94,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
"Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
mac_type, rf_id);
- if (mac_type != CSR_HW_REV_TYPE(trans->hw_rev) ||
- rf_id != CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
- IWL_DEBUG_FW(trans,
- "HW mismatch, skipping PNVM section, mac_type 0x%0x, rf_id 0x%0x.\n",
- CSR_HW_REV_TYPE(trans->hw_rev), trans->hw_rf_id);
- ret = -ENOENT;
- goto out;
- }
-
+ if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) &&
+ rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id))
+ hw_match = true;
break;
case IWL_UCODE_TLV_SEC_RT: {
struct iwl_pnvm_section *section = (void *)data;
@@ -149,6 +147,15 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
}
done:
+ if (!hw_match) {
+ IWL_DEBUG_FW(trans,
+ "HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n",
+ CSR_HW_REV_TYPE(trans->hw_rev),
+ CSR_HW_RFID_TYPE(trans->hw_rf_id));
+ ret = -ENOENT;
+ goto out;
+ }
+
if (!size) {
IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n");
ret = -ENOENT;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 16baee3d52ae..0b8a0cd3b652 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1110,12 +1110,80 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_bz_a0_mr_a0, iwl_ax211_name),
+/* SoF with JF2 */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
+
+/* SoF with JF */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+
/* So with GF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name)
+ iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
+
+/* So with JF2 */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
+
+/* So with JF */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
#endif /* CONFIG_IWLMVM */
};
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 94228b316df1..46517515ba72 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -1231,7 +1231,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- u32 tmp;
+ u32 *cookie;
card->sleep_cookie_vbase = dma_alloc_coherent(&card->dev->dev,
sizeof(u32),
@@ -1242,13 +1242,11 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
"dma_alloc_coherent failed!\n");
return -ENOMEM;
}
+ cookie = (u32 *)card->sleep_cookie_vbase;
/* Init val of Sleep Cookie */
- tmp = FW_AWAKE_COOKIE;
- put_unaligned(tmp, card->sleep_cookie_vbase);
+ *cookie = FW_AWAKE_COOKIE;
- mwifiex_dbg(adapter, INFO,
- "alloc_scook: sleep cookie=0x%x\n",
- get_unaligned(card->sleep_cookie_vbase));
+ mwifiex_dbg(adapter, INFO, "alloc_scook: sleep cookie=0x%x\n", *cookie);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 863aa18b3024..43960770a9af 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -111,7 +111,7 @@ mt7915_mcu_get_cipher(int cipher)
case WLAN_CIPHER_SUITE_SMS4:
return MCU_CIPHER_WAPI;
default:
- return MT_CIPHER_NONE;
+ return MCU_CIPHER_NONE;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index edd3ba3a0c2d..e68a562cc5b4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -1073,7 +1073,8 @@ enum {
};
enum mcu_cipher_type {
- MCU_CIPHER_WEP40 = 1,
+ MCU_CIPHER_NONE = 0,
+ MCU_CIPHER_WEP40,
MCU_CIPHER_WEP104,
MCU_CIPHER_WEP128,
MCU_CIPHER_TKIP,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 7fd21049ff5a..63ec140c9c37 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -389,6 +389,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_WEP104:
if (!mvif->wep_sta)
return -EOPNOTSUPP;
+ break;
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index c2c4dc196802..9fbaacc67cfa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -111,7 +111,7 @@ mt7921_mcu_get_cipher(int cipher)
case WLAN_CIPHER_SUITE_SMS4:
return MCU_CIPHER_WAPI;
default:
- return MT_CIPHER_NONE;
+ return MCU_CIPHER_NONE;
}
}
@@ -931,7 +931,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
if (ret) {
dev_dbg(dev->mt76.dev, "Firmware is already download\n");
- return -EIO;
+ goto fw_loaded;
}
ret = mt7921_load_patch(dev);
@@ -949,6 +949,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
return -EIO;
}
+fw_loaded:
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
index d76cf8f8dfdf..de3c091f6736 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -199,7 +199,8 @@ struct sta_rec_sec {
} __packed;
enum mcu_cipher_type {
- MCU_CIPHER_WEP40 = 1,
+ MCU_CIPHER_NONE = 0,
+ MCU_CIPHER_WEP40,
MCU_CIPHER_WEP104,
MCU_CIPHER_WEP128,
MCU_CIPHER_TKIP,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index e500b8405f8f..5669f17b395f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -6784,7 +6784,7 @@ int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
if (pdev_data->family && pdev_data->family->nvs_name) {
nvs_name = pdev_data->family->nvs_name;
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
nvs_name, &pdev->dev, GFP_KERNEL,
wl, wlcore_nvs_cb);
if (ret < 0) {
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 1df959532c7d..514f2c1124b6 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -136,6 +136,29 @@ static struct ieee80211_supported_band band_5ghz = {
/* Assigned at module init. Guaranteed locally-administered and unicast. */
static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {};
+static void virt_wifi_inform_bss(struct wiphy *wiphy)
+{
+ u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
+ struct cfg80211_bss *informed_bss;
+ static const struct {
+ u8 tag;
+ u8 len;
+ u8 ssid[8];
+ } __packed ssid = {
+ .tag = WLAN_EID_SSID,
+ .len = 8,
+ .ssid = "VirtWifi",
+ };
+
+ informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
+ CFG80211_BSS_FTYPE_PRESP,
+ fake_router_bssid, tsf,
+ WLAN_CAPABILITY_ESS, 0,
+ (void *)&ssid, sizeof(ssid),
+ DBM_TO_MBM(-50), GFP_KERNEL);
+ cfg80211_put_bss(wiphy, informed_bss);
+}
+
/* Called with the rtnl lock held. */
static int virt_wifi_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
@@ -156,28 +179,13 @@ static int virt_wifi_scan(struct wiphy *wiphy,
/* Acquires and releases the rdev BSS lock. */
static void virt_wifi_scan_result(struct work_struct *work)
{
- struct {
- u8 tag;
- u8 len;
- u8 ssid[8];
- } __packed ssid = {
- .tag = WLAN_EID_SSID, .len = 8, .ssid = "VirtWifi",
- };
- struct cfg80211_bss *informed_bss;
struct virt_wifi_wiphy_priv *priv =
container_of(work, struct virt_wifi_wiphy_priv,
scan_result.work);
struct wiphy *wiphy = priv_to_wiphy(priv);
struct cfg80211_scan_info scan_info = { .aborted = false };
- u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
- informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
- CFG80211_BSS_FTYPE_PRESP,
- fake_router_bssid, tsf,
- WLAN_CAPABILITY_ESS, 0,
- (void *)&ssid, sizeof(ssid),
- DBM_TO_MBM(-50), GFP_KERNEL);
- cfg80211_put_bss(wiphy, informed_bss);
+ virt_wifi_inform_bss(wiphy);
/* Schedules work which acquires and releases the rtnl lock. */
cfg80211_scan_done(priv->scan_request, &scan_info);
@@ -225,10 +233,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev,
if (!could_schedule)
return -EBUSY;
- if (sme->bssid)
+ if (sme->bssid) {
ether_addr_copy(priv->connect_requested_bss, sme->bssid);
- else
+ } else {
+ virt_wifi_inform_bss(wiphy);
eth_zero_addr(priv->connect_requested_bss);
+ }
wiphy_debug(wiphy, "connect\n");
@@ -241,11 +251,13 @@ static void virt_wifi_connect_complete(struct work_struct *work)
struct virt_wifi_netdev_priv *priv =
container_of(work, struct virt_wifi_netdev_priv, connect.work);
u8 *requested_bss = priv->connect_requested_bss;
- bool has_addr = !is_zero_ether_addr(requested_bss);
bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid);
u16 status = WLAN_STATUS_SUCCESS;
- if (!priv->is_up || (has_addr && !right_addr))
+ if (is_zero_ether_addr(requested_bss))
+ requested_bss = NULL;
+
+ if (!priv->is_up || (requested_bss && !right_addr))
status = WLAN_STATUS_UNSPECIFIED_FAILURE;
else
priv->is_connected = true;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
index 804e6c4f2c78..519361ec40df 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
@@ -64,10 +64,9 @@ static struct ipc_chnl_cfg modem_cfg[] = {
int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index)
{
- int array_size = ARRAY_SIZE(modem_cfg);
-
- if (index >= array_size) {
- pr_err("index: %d and array_size %d", index, array_size);
+ if (index >= ARRAY_SIZE(modem_cfg)) {
+ pr_err("index: %d and array size %zu", index,
+ ARRAY_SIZE(modem_cfg));
return -ECHRNG;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 46f76e8aae92..0a472ce77370 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -24,15 +24,7 @@ int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
return -EIO;
}
- /* check for the interafce id
- * if if_id 1 to 8 then create IP MUX channel sessions.
- * To start MUX session from 0 as network interface id would start
- * from 1 so map it to if_id = if_id - 1
- */
- if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
- return ipc_mux_open_session(ipc_imem->mux, if_id - 1);
-
- return -EINVAL;
+ return ipc_mux_open_session(ipc_imem->mux, if_id);
}
/* Release a net link to CP. */
@@ -41,7 +33,7 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
{
if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
if_id <= IP_MUX_SESSION_END)
- ipc_mux_close_session(ipc_imem->mux, if_id - 1);
+ ipc_mux_close_session(ipc_imem->mux, if_id);
}
/* Tasklet call to do uplink transfer. */
@@ -83,13 +75,8 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
goto out;
}
- if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
- /* Route the UL packet through IP MUX Layer */
- ret = ipc_mux_ul_trigger_encode(ipc_imem->mux,
- if_id - 1, skb);
- else
- dev_err(ipc_imem->dev,
- "invalid if_id %d: ", if_id);
+ /* Route the UL packet through IP MUX Layer */
+ ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
out:
return ret;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
index fd356dafbdd6..2007fe23e9a5 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
@@ -27,11 +27,11 @@
#define BOOT_CHECK_DEFAULT_TIMEOUT 400
/* IP MUX channel range */
-#define IP_MUX_SESSION_START 1
-#define IP_MUX_SESSION_END 8
+#define IP_MUX_SESSION_START 0
+#define IP_MUX_SESSION_END 7
/* Default IP MUX channel */
-#define IP_MUX_SESSION_DEFAULT 1
+#define IP_MUX_SESSION_DEFAULT 0
/**
* ipc_imem_sys_port_open - Open a port link to CP.
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.h b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
index 45e6923da78f..f861994a6d90 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
@@ -10,10 +10,10 @@
#define IOSM_CP_VERSION 0x0100UL
/* DL dir Aggregation support mask */
-#define DL_AGGR BIT(23)
+#define DL_AGGR BIT(9)
/* UL dir Aggregation support mask */
-#define UL_AGGR BIT(22)
+#define UL_AGGR BIT(8)
/* UL flow credit support mask */
#define UL_FLOW_CREDIT BIT(21)
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
index e634ffc6ec08..bdb2d32cdb6d 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
@@ -288,7 +288,7 @@ static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
/* Pass the packet to the netif layer. */
dest_skb->priority = service_class;
- return ipc_wwan_receive(wwan, dest_skb, false, if_id + 1);
+ return ipc_wwan_receive(wwan, dest_skb, false, if_id);
}
/* Decode Flow Credit Table in the block */
@@ -320,7 +320,7 @@ static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
return;
}
- ul_credits = fct->vfl.nr_of_bytes;
+ ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
@@ -586,7 +586,7 @@ static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
qlt->reserved[0] = 0;
qlt->reserved[1] = 0;
- qlt->vfl.nr_of_bytes = session->ul_list.qlen;
+ qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
/* Add QLT to the transfer list. */
skb_queue_tail(&ipc_mux->channel->ul_list,
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
index 4a74e3c9457f..aae83db5cbb8 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
@@ -106,7 +106,7 @@ struct mux_lite_cmdh {
* @nr_of_bytes: Number of bytes available to transmit in the queue.
*/
struct mux_lite_vfl {
- u32 nr_of_bytes;
+ __le32 nr_of_bytes;
};
/**
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
index 91109e27efd3..35d590743d3a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
@@ -412,8 +412,8 @@ struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
}
if (p_td->buffer.address != IPC_CB(skb)->mapping) {
- dev_err(ipc_protocol->dev, "invalid buf=%p or skb=%p",
- (void *)p_td->buffer.address, skb->data);
+ dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p",
+ (unsigned long long)p_td->buffer.address, skb->data);
ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
skb = NULL;
goto ret;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.c b/drivers/net/wwan/iosm/iosm_ipc_uevent.c
index 2229d752926c..d12188ffed7e 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_uevent.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.c
@@ -37,7 +37,7 @@ void ipc_uevent_send(struct device *dev, char *uevent)
/* Store the device and event information */
info->dev = dev;
- snprintf(info->uevent, MAX_UEVENT_LEN, "%s: %s", dev_name(dev), uevent);
+ snprintf(info->uevent, MAX_UEVENT_LEN, "IOSM_EVENT=%s", uevent);
/* Schedule uevent in process context using work queue */
schedule_work(&info->work);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
index c999c64001f4..b571d9cedba4 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -107,6 +107,7 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb,
{
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
+ unsigned int len = skb->len;
int if_id = priv->if_id;
int ret;
@@ -123,6 +124,8 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb,
/* Return code of zero is success */
if (ret == 0) {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += len;
ret = NETDEV_TX_OK;
} else if (ret == -EBUSY) {
ret = NETDEV_TX_BUSY;
@@ -140,7 +143,8 @@ exit:
ret);
dev_kfree_skb_any(skb);
- return ret;
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
/* Ops structure for wwan net link */
@@ -158,6 +162,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev)
iosm_dev->priv_flags |= IFF_NO_QUEUE;
iosm_dev->type = ARPHRD_NONE;
+ iosm_dev->mtu = ETH_DATA_LEN;
iosm_dev->min_mtu = ETH_MIN_MTU;
iosm_dev->max_mtu = ETH_MAX_MTU;
@@ -223,7 +228,7 @@ static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
/* unregistering includes synchronize_net() */
- unregister_netdevice(dev);
+ unregister_netdevice_queue(dev, head);
unlock:
mutex_unlock(&ipc_wwan->if_mutex);
@@ -252,8 +257,8 @@ int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
skb->pkt_type = PACKET_HOST;
- if (if_id < (IP_MUX_SESSION_START - 1) ||
- if_id > (IP_MUX_SESSION_END - 1)) {
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id > IP_MUX_SESSION_END) {
ret = -EINVAL;
goto free;
}
diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c
index 1bc6b69aa530..d0a98f34c54d 100644
--- a/drivers/net/wwan/mhi_wwan_ctrl.c
+++ b/drivers/net/wwan/mhi_wwan_ctrl.c
@@ -41,14 +41,14 @@ struct mhi_wwan_dev {
/* Increment RX budget and schedule RX refill if necessary */
static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan)
{
- spin_lock(&mhiwwan->rx_lock);
+ spin_lock_bh(&mhiwwan->rx_lock);
mhiwwan->rx_budget++;
if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
schedule_work(&mhiwwan->rx_refill);
- spin_unlock(&mhiwwan->rx_lock);
+ spin_unlock_bh(&mhiwwan->rx_lock);
}
/* Decrement RX budget if non-zero and return true on success */
@@ -56,7 +56,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
{
bool ret = false;
- spin_lock(&mhiwwan->rx_lock);
+ spin_lock_bh(&mhiwwan->rx_lock);
if (mhiwwan->rx_budget) {
mhiwwan->rx_budget--;
@@ -64,7 +64,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
ret = true;
}
- spin_unlock(&mhiwwan->rx_lock);
+ spin_unlock_bh(&mhiwwan->rx_lock);
return ret;
}
@@ -110,7 +110,7 @@ static int mhi_wwan_ctrl_start(struct wwan_port *port)
int ret;
/* Start mhi device's channel(s) */
- ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev);
+ ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev, 0);
if (ret)
return ret;
@@ -130,9 +130,9 @@ static void mhi_wwan_ctrl_stop(struct wwan_port *port)
{
struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
- spin_lock(&mhiwwan->rx_lock);
+ spin_lock_bh(&mhiwwan->rx_lock);
clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
- spin_unlock(&mhiwwan->rx_lock);
+ spin_unlock_bh(&mhiwwan->rx_lock);
cancel_work_sync(&mhiwwan->rx_refill);
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 3e16c318e705..35ece98134c0 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -164,11 +164,14 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
goto done_unlock;
id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
- if (id < 0)
+ if (id < 0) {
+ wwandev = ERR_PTR(id);
goto done_unlock;
+ }
wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
if (!wwandev) {
+ wwandev = ERR_PTR(-ENOMEM);
ida_free(&wwan_dev_ids, id);
goto done_unlock;
}
@@ -182,7 +185,8 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
err = device_register(&wwandev->dev);
if (err) {
put_device(&wwandev->dev);
- wwandev = NULL;
+ wwandev = ERR_PTR(err);
+ goto done_unlock;
}
done_unlock:
@@ -984,6 +988,8 @@ static void wwan_create_default_link(struct wwan_device *wwandev,
goto unlock;
}
+ rtnl_configure_link(dev, NULL); /* Link initialized, notify new link */
+
unlock:
rtnl_unlock();
@@ -1012,8 +1018,8 @@ int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
return -EINVAL;
wwandev = wwan_create_dev(parent);
- if (!wwandev)
- return -ENOMEM;
+ if (IS_ERR(wwandev))
+ return PTR_ERR(wwandev);
if (WARN_ON(wwandev->ops)) {
wwan_remove_dev(wwandev);
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index a9864fcdfba6..dd27c85190d3 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -192,8 +192,7 @@ static void nfcsim_recv_wq(struct work_struct *work)
if (!IS_ERR(skb))
dev_kfree_skb(skb);
-
- skb = ERR_PTR(-ENODEV);
+ return;
}
dev->cb(dev->nfc_digital_dev, dev->arg, skb);
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index eb5d7a5beac7..e3e72b8a29e3 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -423,7 +423,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
dev_err(&fw_info->ndev->nfc_dev->dev,
- "Cannot allocate shash (code=%d)\n", ret);
+ "Cannot allocate shash (code=%pe)\n", tfm);
goto out;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 3a777d0073b7..e6aa87043a95 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -363,8 +363,13 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
nvdimm_bus->dev.groups = nd_desc->attr_groups;
nvdimm_bus->dev.bus = &nvdimm_bus_type;
nvdimm_bus->dev.of_node = nd_desc->of_node;
- dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
- rc = device_register(&nvdimm_bus->dev);
+ device_initialize(&nvdimm_bus->dev);
+ device_set_pm_not_required(&nvdimm_bus->dev);
+ rc = dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
+ if (rc)
+ goto err;
+
+ rc = device_add(&nvdimm_bus->dev);
if (rc) {
dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc);
goto err;
@@ -396,21 +401,10 @@ static int child_unregister(struct device *dev, void *data)
if (dev->class)
return 0;
- if (is_nvdimm(dev)) {
- struct nvdimm *nvdimm = to_nvdimm(dev);
- bool dev_put = false;
-
- /* We are shutting down. Make state frozen artificially. */
- nvdimm_bus_lock(dev);
- set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
- if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
- dev_put = true;
- nvdimm_bus_unlock(dev);
- cancel_delayed_work_sync(&nvdimm->dwork);
- if (dev_put)
- put_device(dev);
- }
- nd_device_unregister(dev, ND_SYNC);
+ if (is_nvdimm(dev))
+ nvdimm_delete(to_nvdimm(dev));
+ else
+ nd_device_unregister(dev, ND_SYNC);
return 0;
}
@@ -536,6 +530,7 @@ void __nd_device_register(struct device *dev)
set_dev_node(dev, to_nd_region(dev)->numa_node);
dev->bus = &nvdimm_bus_type;
+ device_set_pm_not_required(dev);
if (dev->parent) {
get_device(dev->parent);
if (dev_to_node(dev) == NUMA_NO_NODE)
@@ -728,18 +723,41 @@ const struct attribute_group nd_numa_attribute_group = {
.is_visible = nd_numa_attr_visible,
};
+static void ndctl_release(struct device *dev)
+{
+ kfree(dev);
+}
+
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
{
dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id);
struct device *dev;
+ int rc;
- dev = device_create(nd_class, &nvdimm_bus->dev, devt, nvdimm_bus,
- "ndctl%d", nvdimm_bus->id);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ device_initialize(dev);
+ device_set_pm_not_required(dev);
+ dev->class = nd_class;
+ dev->parent = &nvdimm_bus->dev;
+ dev->devt = devt;
+ dev->release = ndctl_release;
+ rc = dev_set_name(dev, "ndctl%d", nvdimm_bus->id);
+ if (rc)
+ goto err;
- if (IS_ERR(dev))
- dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %ld\n",
- nvdimm_bus->id, PTR_ERR(dev));
- return PTR_ERR_OR_ZERO(dev);
+ rc = device_add(dev);
+ if (rc) {
+ dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %d\n",
+ nvdimm_bus->id, rc);
+ goto err;
+ }
+ return 0;
+
+err:
+ put_device(dev);
+ return rc;
}
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus)
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 9d208570d059..dc7449a40003 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -642,6 +642,24 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
}
EXPORT_SYMBOL_GPL(__nvdimm_create);
+void nvdimm_delete(struct nvdimm *nvdimm)
+{
+ struct device *dev = &nvdimm->dev;
+ bool dev_put = false;
+
+ /* We are shutting down. Make state frozen artificially. */
+ nvdimm_bus_lock(dev);
+ set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
+ if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
+ dev_put = true;
+ nvdimm_bus_unlock(dev);
+ cancel_delayed_work_sync(&nvdimm->dwork);
+ if (dev_put)
+ put_device(dev);
+ nd_device_unregister(dev, ND_SYNC);
+}
+EXPORT_SYMBOL_GPL(nvdimm_delete);
+
static void shutdown_security_notify(void *data)
{
struct nvdimm *nvdimm = data;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 2403b71b601e..745478213ff2 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -2527,7 +2527,7 @@ static void deactivate_labels(void *region)
static int init_active_labels(struct nd_region *nd_region)
{
- int i;
+ int i, rc = 0;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
@@ -2546,13 +2546,14 @@ static int init_active_labels(struct nd_region *nd_region)
else if (test_bit(NDD_LABELING, &nvdimm->flags))
/* fail, labels needed to disambiguate dpa */;
else
- return 0;
+ continue;
dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
dev_name(&nd_mapping->nvdimm->dev),
test_bit(NDD_LOCKED, &nvdimm->flags)
? "locked" : "disabled");
- return -ENXIO;
+ rc = -ENXIO;
+ goto out;
}
nd_mapping->ndd = ndd;
atomic_inc(&nvdimm->busy);
@@ -2586,13 +2587,17 @@ static int init_active_labels(struct nd_region *nd_region)
break;
}
- if (i < nd_region->ndr_mappings) {
+ if (i < nd_region->ndr_mappings)
+ rc = -ENOMEM;
+
+out:
+ if (rc) {
deactivate_labels(nd_region);
- return -ENOMEM;
+ return rc;
}
return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
- nd_region);
+ nd_region);
}
int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 80c656dcbbac..dfd9dec0c1f6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -609,6 +609,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
static inline void nvme_clear_nvme_request(struct request *req)
{
+ nvme_req(req)->status = 0;
nvme_req(req)->retries = 0;
nvme_req(req)->flags = 0;
req->rq_flags |= RQF_DONTPREP;
@@ -631,6 +632,8 @@ static inline void nvme_init_request(struct request *req,
cmd->common.flags &= ~NVME_CMD_SGL_ALL;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
+ if (req->mq_hctx->type == HCTX_TYPE_POLL)
+ req->cmd_flags |= REQ_HIPRI;
nvme_clear_nvme_request(req);
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
}
@@ -897,7 +900,10 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->write_zeroes.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
- cmnd->write_zeroes.control = 0;
+ if (nvme_ns_has_pi(ns))
+ cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
+ else
+ cmnd->write_zeroes.control = 0;
return BLK_STS_OK;
}
@@ -1029,29 +1035,23 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
-static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
-{
- struct completion *waiting = rq->end_io_data;
-
- rq->end_io_data = NULL;
- complete(waiting);
-}
-
-static void nvme_execute_rq_polled(struct request_queue *q,
- struct gendisk *bd_disk, struct request *rq, int at_head)
+/*
+ * Return values:
+ * 0: success
+ * >0: nvme controller's cqe status response
+ * <0: kernel error in lieu of controller response
+ */
+static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
+ bool at_head)
{
- DECLARE_COMPLETION_ONSTACK(wait);
-
- WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
+ blk_status_t status;
- rq->cmd_flags |= REQ_HIPRI;
- rq->end_io_data = &wait;
- blk_execute_rq_nowait(bd_disk, rq, at_head, nvme_end_sync_rq);
-
- while (!completion_done(&wait)) {
- blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
- cond_resched();
- }
+ status = blk_execute_rq(disk, rq, at_head);
+ if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
+ return -EINTR;
+ if (nvme_req(rq)->status)
+ return nvme_req(rq)->status;
+ return blk_status_to_errno(status);
}
/*
@@ -1061,7 +1061,7 @@ static void nvme_execute_rq_polled(struct request_queue *q,
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags, bool poll)
+ blk_mq_req_flags_t flags)
{
struct request *req;
int ret;
@@ -1082,16 +1082,9 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- if (poll)
- nvme_execute_rq_polled(req->q, NULL, req, at_head);
- else
- blk_execute_rq(NULL, req, at_head);
- if (result)
+ ret = nvme_execute_rq(NULL, req, at_head);
+ if (result && ret >= 0)
*result = nvme_req(req)->result;
- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
- ret = -EINTR;
- else
- ret = nvme_req(req)->status;
out:
blk_mq_free_request(req);
return ret;
@@ -1102,7 +1095,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
- NVME_QID_ANY, 0, 0, false);
+ NVME_QID_ANY, 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1179,18 +1172,21 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
}
}
-void nvme_execute_passthru_rq(struct request *rq)
+int nvme_execute_passthru_rq(struct request *rq)
{
struct nvme_command *cmd = nvme_req(rq)->cmd;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
struct nvme_ns *ns = rq->q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL;
u32 effects;
+ int ret;
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
- blk_execute_rq(disk, rq, 0);
+ ret = nvme_execute_rq(disk, rq, false);
if (effects) /* nothing to be done for zero cmd effects */
nvme_passthru_end(ctrl, effects);
+
+ return ret;
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
@@ -1465,7 +1461,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
+ buffer, buflen, 0, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -2047,7 +2043,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
- NVME_QID_ANY, 1, 0, false);
+ NVME_QID_ANY, 1, 0);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */
@@ -3814,6 +3810,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
static void nvme_ns_remove(struct nvme_ns *ns)
{
+ bool last_path = false;
+
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
@@ -3822,8 +3820,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
- if (list_empty(&ns->head->list))
- list_del_init(&ns->head->entry);
mutex_unlock(&ns->ctrl->subsys->lock);
synchronize_rcu(); /* guarantee not available in head->list */
@@ -3843,7 +3839,15 @@ static void nvme_ns_remove(struct nvme_ns *ns)
list_del_init(&ns->list);
up_write(&ns->ctrl->namespaces_rwsem);
- nvme_mpath_check_last_path(ns);
+ /* Synchronize with nvme_init_ns_head() */
+ mutex_lock(&ns->head->subsys->lock);
+ if (list_empty(&ns->head->list)) {
+ list_del_init(&ns->head->entry);
+ last_path = true;
+ }
+ mutex_unlock(&ns->head->subsys->lock);
+ if (last_path)
+ nvme_mpath_shutdown_disk(ns->head);
nvme_put_ns(ns);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 1e6a7cc056ca..a5469fd9d4c3 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -154,7 +154,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
- NVME_QID_ANY, 0, 0, false);
+ NVME_QID_ANY, 0, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -200,7 +200,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
- NVME_QID_ANY, 0, 0, false);
+ NVME_QID_ANY, 0, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -245,7 +245,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
- NVME_QID_ANY, 0, 0, false);
+ NVME_QID_ANY, 0, 0);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -391,7 +391,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
data, sizeof(*data), 0, NVME_QID_ANY, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
@@ -415,7 +415,6 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
* @qid: NVMe I/O queue number for the new I/O connection between
* host and target (note qid == 0 is illegal as this is
* the Admin queue, per NVMe standard).
- * @poll: Whether or not to poll for the completion of the connect cmd.
*
* This function issues a fabrics-protocol connection
* of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
@@ -427,7 +426,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
* > 0: NVMe error status code
* < 0: Linux errno error code
*/
-int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
{
struct nvme_command cmd = { };
struct nvmf_connect_data *data;
@@ -453,7 +452,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data), 0, qid, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index c31dad69a773..a146cb903869 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -182,7 +182,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
-int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll);
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
int nvmf_register_transport(struct nvmf_transport_ops *ops);
void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 7600863f7752..b08a61ca283f 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -9,7 +9,7 @@
#include <uapi/scsi/fc/fc_els.h>
#include <linux/delay.h>
#include <linux/overflow.h>
-
+#include <linux/blk-cgroup.h>
#include "nvme.h"
#include "fabrics.h"
#include <linux/nvme-fc-driver.h>
@@ -2346,7 +2346,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
(qsize / 5));
if (ret)
break;
- ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
break;
@@ -3808,10 +3808,80 @@ process_local_list:
return count;
}
+
+/* Parse the cgroup id from a buf and return the length of cgrpid */
+static int fc_parse_cgrpid(const char *buf, u64 *id)
+{
+ char cgrp_id[16+1];
+ int cgrpid_len, j;
+
+ memset(cgrp_id, 0x0, sizeof(cgrp_id));
+ for (cgrpid_len = 0, j = 0; cgrpid_len < 17; cgrpid_len++) {
+ if (buf[cgrpid_len] != ':')
+ cgrp_id[cgrpid_len] = buf[cgrpid_len];
+ else {
+ j = 1;
+ break;
+ }
+ }
+ if (!j)
+ return -EINVAL;
+ if (kstrtou64(cgrp_id, 16, id) < 0)
+ return -EINVAL;
+ return cgrpid_len;
+}
+
+/*
+ * fc_update_appid: Parse and update the appid in the blkcg associated with
+ * cgroupid.
+ * @buf: buf contains both cgrpid and appid info
+ * @count: size of the buffer
+ */
+static int fc_update_appid(const char *buf, size_t count)
+{
+ u64 cgrp_id;
+ int appid_len = 0;
+ int cgrpid_len = 0;
+ char app_id[FC_APPID_LEN];
+ int ret = 0;
+
+ if (buf[count-1] == '\n')
+ count--;
+
+ if ((count > (16+1+FC_APPID_LEN)) || (!strchr(buf, ':')))
+ return -EINVAL;
+
+ cgrpid_len = fc_parse_cgrpid(buf, &cgrp_id);
+ if (cgrpid_len < 0)
+ return -EINVAL;
+ appid_len = count - cgrpid_len - 1;
+ if (appid_len > FC_APPID_LEN)
+ return -EINVAL;
+
+ memset(app_id, 0x0, sizeof(app_id));
+ memcpy(app_id, &buf[cgrpid_len+1], appid_len);
+ ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id));
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static ssize_t fc_appid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+
+ ret = fc_update_appid(buf, count);
+ if (ret < 0)
+ return -EINVAL;
+ return count;
+}
static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
+static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
static struct attribute *nvme_fc_attrs[] = {
&dev_attr_nvme_discovery.attr,
+ &dev_attr_appid_store.attr,
NULL
};
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index d93928d1e5bd..305ddd415e45 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -93,11 +93,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
}
}
- nvme_execute_passthru_rq(req);
- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
- ret = -EINTR;
- else
- ret = nvme_req(req)->status;
+ ret = nvme_execute_passthru_rq(req);
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (meta && !ret && !write) {
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 0ea5298469c3..3f32c5e86bfc 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -760,14 +760,21 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
#endif
}
-void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
+ kblockd_schedule_work(&head->requeue_work);
if (head->disk->flags & GENHD_FL_UP) {
nvme_cdev_del(&head->cdev, &head->cdev_device);
del_gendisk(head->disk);
}
+}
+
+void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+{
+ if (!head->disk)
+ return;
blk_set_queue_dying(head->disk->queue);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 75420ceacc10..5cd1fa3b8464 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -658,7 +658,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags, bool poll);
+ blk_mq_req_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
u32 *result);
@@ -716,14 +716,7 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
-
-static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
-{
- struct nvme_ns_head *head = ns->head;
-
- if (head->disk && list_empty(&head->list))
- kblockd_schedule_work(&head->requeue_work);
-}
+void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
static inline void nvme_trace_bio_complete(struct request *req)
{
@@ -772,7 +765,7 @@ static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
}
-static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
{
}
static inline void nvme_trace_bio_complete(struct request *req)
@@ -876,7 +869,7 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
-void nvme_execute_passthru_rq(struct request *rq);
+int nvme_execute_passthru_rq(struct request *rq);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
void nvme_put_ns(struct nvme_ns *ns);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d3c5086673bc..51852085239e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1554,6 +1554,28 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
wmb(); /* ensure the first interrupt sees the initialization */
}
+/*
+ * Try getting shutdown_lock while setting up IO queues.
+ */
+static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
+{
+ /*
+ * Give up if the lock is being held by nvme_dev_disable.
+ */
+ if (!mutex_trylock(&dev->shutdown_lock))
+ return -ENODEV;
+
+ /*
+ * Controller is in wrong state, fail early.
+ */
+ if (dev->ctrl.state != NVME_CTRL_CONNECTING) {
+ mutex_unlock(&dev->shutdown_lock);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
{
struct nvme_dev *dev = nvmeq->dev;
@@ -1582,8 +1604,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
goto release_cq;
nvmeq->cq_vector = vector;
- nvme_init_queue(nvmeq, qid);
+ result = nvme_setup_io_queues_trylock(dev);
+ if (result)
+ return result;
+ nvme_init_queue(nvmeq, qid);
if (!polled) {
result = queue_request_irq(nvmeq);
if (result < 0)
@@ -1591,10 +1616,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
}
set_bit(NVMEQ_ENABLED, &nvmeq->flags);
+ mutex_unlock(&dev->shutdown_lock);
return result;
release_sq:
dev->online_queues--;
+ mutex_unlock(&dev->shutdown_lock);
adapter_delete_sq(dev, qid);
release_cq:
adapter_delete_cq(dev, qid);
@@ -2167,7 +2194,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0)
return 0;
- clear_bit(NVMEQ_ENABLED, &adminq->flags);
+ /*
+ * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions
+ * from set to unset. If there is a window to it is truely freed,
+ * pci_free_irq_vectors() jumping into this window will crash.
+ * And take lock to avoid racing with pci_free_irq_vectors() in
+ * nvme_dev_disable() path.
+ */
+ result = nvme_setup_io_queues_trylock(dev);
+ if (result)
+ return result;
+ if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
+ pci_free_irq(pdev, 0, adminq);
if (dev->cmb_use_sqes) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
@@ -2183,14 +2221,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
result = nvme_remap_bar(dev, size);
if (!result)
break;
- if (!--nr_io_queues)
- return -ENOMEM;
+ if (!--nr_io_queues) {
+ result = -ENOMEM;
+ goto out_unlock;
+ }
} while (1);
adminq->q_db = dev->dbs;
retry:
/* Deregister the admin queue's interrupt */
- pci_free_irq(pdev, 0, adminq);
+ if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
+ pci_free_irq(pdev, 0, adminq);
/*
* If we enable msix early due to not intx, disable it again before
@@ -2199,8 +2240,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
pci_free_irq_vectors(pdev);
result = nvme_setup_irqs(dev, nr_io_queues);
- if (result <= 0)
- return -EIO;
+ if (result <= 0) {
+ result = -EIO;
+ goto out_unlock;
+ }
dev->num_vecs = result;
result = max(result - 1, 1);
@@ -2214,8 +2257,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
result = queue_request_irq(adminq);
if (result)
- return result;
+ goto out_unlock;
set_bit(NVMEQ_ENABLED, &adminq->flags);
+ mutex_unlock(&dev->shutdown_lock);
result = nvme_create_io_queues(dev);
if (result || dev->online_queues < 2)
@@ -2224,6 +2268,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (dev->online_queues - 1 < dev->max_qid) {
nr_io_queues = dev->online_queues - 1;
nvme_disable_io_queues(dev);
+ result = nvme_setup_io_queues_trylock(dev);
+ if (result)
+ return result;
nvme_suspend_io_queues(dev);
goto retry;
}
@@ -2232,6 +2279,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
dev->io_queues[HCTX_TYPE_READ],
dev->io_queues[HCTX_TYPE_POLL]);
return 0;
+out_unlock:
+ mutex_unlock(&dev->shutdown_lock);
+ return result;
}
static void nvme_del_queue_end(struct request *req, blk_status_t error)
@@ -2581,7 +2631,9 @@ static void nvme_reset_work(struct work_struct *work)
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result;
- if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
+ if (dev->ctrl.state != NVME_CTRL_RESETTING) {
+ dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
+ dev->ctrl.state);
result = -ENODEV;
goto out;
}
@@ -2962,7 +3014,6 @@ static void nvme_remove(struct pci_dev *pdev)
if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
nvme_dev_disable(dev, true);
- nvme_dev_remove_admin(dev);
}
flush_work(&dev->ctrl.reset_work);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a9e70cefd7ed..7f6b3a991501 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -680,11 +680,10 @@ static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
{
struct nvme_rdma_queue *queue = &ctrl->queues[idx];
- bool poll = nvme_rdma_poll_queue(queue);
int ret;
if (idx)
- ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll);
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, idx);
else
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index c7bd37103cf4..8cb15ee5b249 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -123,7 +123,6 @@ struct nvme_tcp_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct sockaddr_storage addr;
struct sockaddr_storage src_addr;
- struct net_device *ndev;
struct nvme_ctrl ctrl;
struct work_struct err_work;
@@ -1574,7 +1573,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
int ret;
if (idx)
- ret = nvmf_connect_io_queue(nctrl, idx, false);
+ ret = nvmf_connect_io_queue(nctrl, idx);
else
ret = nvmf_connect_admin_queue(nctrl);
@@ -2533,8 +2532,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
}
if (opts->mask & NVMF_OPT_HOST_IFACE) {
- ctrl->ndev = dev_get_by_name(&init_net, opts->host_iface);
- if (!ctrl->ndev) {
+ if (!__dev_get_by_name(&init_net, opts->host_iface)) {
pr_err("invalid interface passed: %s\n",
opts->host_iface);
ret = -ENODEV;
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index daaf700eae79..35bac7a25422 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -56,7 +56,7 @@ TRACE_EVENT(nvme_setup_cmd,
__field(u8, fctype)
__field(u16, cid)
__field(u32, nsid)
- __field(u64, metadata)
+ __field(bool, metadata)
__array(u8, cdw10, 24)
),
TP_fast_assign(
@@ -66,13 +66,13 @@ TRACE_EVENT(nvme_setup_cmd,
__entry->flags = cmd->common.flags;
__entry->cid = cmd->common.command_id;
__entry->nsid = le32_to_cpu(cmd->common.nsid);
- __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ __entry->metadata = !!blk_integrity_rq(req);
__entry->fctype = cmd->fabrics.fctype;
__assign_disk_name(__entry->disk, req->rq_disk);
memcpy(__entry->cdw10, &cmd->common.cdw10,
sizeof(__entry->cdw10));
),
- TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%x, cmd=(%s %s)",
__entry->ctrl_id, __print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->nsid,
__entry->flags, __entry->metadata,
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index a5c4a1865026..3a17a7e26bbf 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -337,7 +337,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
int i, ret;
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
- ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
return ret;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index fced52de33ce..225cd1ffbe45 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -153,11 +153,10 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
struct request *rq = req->p.rq;
- u16 status;
+ int status;
- nvme_execute_passthru_rq(rq);
+ status = nvme_execute_passthru_rq(rq);
- status = nvme_req(rq)->status;
if (status == NVME_SC_SUCCESS &&
req->cmd->common.opcode == nvme_admin_identify) {
switch (req->cmd->identify.cns) {
@@ -168,7 +167,8 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
nvmet_passthru_override_id_ns(req);
break;
}
- }
+ } else if (status < 0)
+ status = NVME_SC_INTERNAL;
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, status);
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index d8aceef83284..07ee347ea3f3 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1497,7 +1497,6 @@ static void nvmet_tcp_state_change(struct sock *sk)
case TCP_CLOSE_WAIT:
case TCP_CLOSE:
/* FALLTHRU */
- sk->sk_user_data = NULL;
nvmet_tcp_schedule_release_queue(queue);
break;
default:
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index bca671ff4e54..b3bc30a04ed7 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -180,6 +180,7 @@ static const char * const nvmem_type_str[] = {
[NVMEM_TYPE_EEPROM] = "EEPROM",
[NVMEM_TYPE_OTP] = "OTP",
[NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
+ [NVMEM_TYPE_FRAM] = "FRAM",
};
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -359,6 +360,9 @@ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
if (!config->base_dev)
return -EINVAL;
+ if (config->type == NVMEM_TYPE_FRAM)
+ bin_attr_nvmem_eeprom_compat.attr.name = "fram";
+
nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
nvmem->eeprom.size = nvmem->size;
@@ -686,15 +690,17 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
continue;
if (len < 2 * sizeof(u32)) {
dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
+ of_node_put(child);
return -EINVAL;
}
cell = kzalloc(sizeof(*cell), GFP_KERNEL);
- if (!cell)
+ if (!cell) {
+ of_node_put(child);
return -ENOMEM;
+ }
cell->nvmem = nvmem;
- cell->np = of_node_get(child);
cell->offset = be32_to_cpup(addr++);
cell->bytes = be32_to_cpup(addr);
cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
@@ -715,11 +721,12 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
cell->name, nvmem->stride);
/* Cells already added will be freed later. */
kfree_const(cell->name);
- of_node_put(cell->np);
kfree(cell);
+ of_node_put(child);
return -EINVAL;
}
+ cell->np = of_node_get(child);
nvmem_cell_add(cell);
}
@@ -789,7 +796,9 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->reg_write = config->reg_write;
nvmem->keepout = config->keepout;
nvmem->nkeepout = config->nkeepout;
- if (!config->no_of_node)
+ if (config->of_node)
+ nvmem->dev.of_node = config->of_node;
+ else if (!config->no_of_node)
nvmem->dev.of_node = config->dev->of_node;
switch (config->id) {
@@ -1606,9 +1615,9 @@ int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
-static void *nvmem_cell_read_variable_common(struct device *dev,
- const char *cell_id,
- size_t max_len, size_t *len)
+static const void *nvmem_cell_read_variable_common(struct device *dev,
+ const char *cell_id,
+ size_t max_len, size_t *len)
{
struct nvmem_cell *cell;
int nbits;
@@ -1652,7 +1661,7 @@ int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
u32 *val)
{
size_t len;
- u8 *buf;
+ const u8 *buf;
int i;
buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
@@ -1683,7 +1692,7 @@ int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
u64 *val)
{
size_t len;
- u8 *buf;
+ const u8 *buf;
int i;
buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index d6d3f24685a8..81fbad5e939d 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -122,6 +122,7 @@ static const struct qfprom_soc_compatible_data sc7280_qfprom = {
.keepout = sc7280_qfprom_keepout,
.nkeepout = ARRAY_SIZE(sc7280_qfprom_keepout)
};
+
/**
* qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing.
* @priv: Our driver data.
@@ -195,9 +196,9 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
}
/*
- * Hardware requires 1.8V min for fuse blowing; this may be
- * a rail shared do don't specify a max--regulator constraints
- * will handle.
+ * Hardware requires a minimum voltage for fuse blowing.
+ * This may be a shared rail so don't specify a maximum.
+ * Regulator constraints will cap to the actual maximum.
*/
ret = regulator_set_voltage(priv->vcc, qfprom_blow_uV, INT_MAX);
if (ret) {
@@ -399,7 +400,7 @@ static int qfprom_probe(struct platform_device *pdev)
if (major_version == 7 && minor_version == 8)
priv->soc_data = &qfprom_7_8_data;
- if (major_version == 7 && minor_version == 15)
+ else if (major_version == 7 && minor_version == 15)
priv->soc_data = &qfprom_7_15_data;
priv->vcc = devm_regulator_get(&pdev->dev, "vcc");
diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
index 59523245db8a..4f1fcbfec394 100644
--- a/drivers/nvmem/sprd-efuse.c
+++ b/drivers/nvmem/sprd-efuse.c
@@ -234,7 +234,7 @@ static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
if (status) {
dev_err(efuse->dev,
- "write error status %d of block %d\n", ret, blk);
+ "write error status %u of block %d\n", status, blk);
writel(SPRD_EFUSE_ERR_CLR_MASK,
efuse->base + SPRD_EFUSE_ERR_CLR);
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index e26ef1bbf198..275b9155e473 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -142,6 +142,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
nvmem_cfg->dev = dev;
nvmem_cfg->name = "sunxi-sid";
+ nvmem_cfg->type = NVMEM_TYPE_OTP;
nvmem_cfg->read_only = true;
nvmem_cfg->size = cfg->size;
nvmem_cfg->word_size = 1;
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 18450437d5d5..3dfeae8912df 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -75,9 +75,7 @@ config OF_NET
def_bool y
config OF_RESERVED_MEM
- bool
- depends on OF_EARLY_FLATTREE
- default y if DMA_DECLARE_COHERENT || DMA_CMA
+ def_bool OF_EARLY_FLATTREE
config OF_RESOLVE
bool
diff --git a/drivers/of/address.c b/drivers/of/address.c
index aca94c348bd4..94f017d808c4 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -23,9 +23,8 @@
#define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
static struct of_bus *of_match_bus(struct device_node *np);
-static int __of_address_to_resource(struct device_node *dev,
- const __be32 *addrp, u64 size, unsigned int flags,
- const char *name, struct resource *r);
+static int __of_address_to_resource(struct device_node *dev, int index,
+ int bar_no, struct resource *r);
static bool of_mmio_is_nonposted(struct device_node *np);
/* Debug utility */
@@ -77,9 +76,7 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
s = of_read_number(range + na + pna, ns);
da = of_read_number(addr, na);
- pr_debug("default map, cp=%llx, s=%llx, da=%llx\n",
- (unsigned long long)cp, (unsigned long long)s,
- (unsigned long long)da);
+ pr_debug("default map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
if (da < cp || da >= (cp + s))
return OF_BAD_ADDR;
@@ -185,9 +182,7 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
s = of_read_number(range + na + pna, ns);
da = of_read_number(addr + 1, na - 1);
- pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n",
- (unsigned long long)cp, (unsigned long long)s,
- (unsigned long long)da);
+ pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
if (da < cp || da >= (cp + s))
return OF_BAD_ADDR;
@@ -198,62 +193,16 @@ static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
{
return of_bus_default_translate(addr + 1, offset, na - 1);
}
-
-const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
- unsigned int *flags)
-{
- const __be32 *prop;
- unsigned int psize;
- struct device_node *parent;
- struct of_bus *bus;
- int onesize, i, na, ns;
-
- /* Get parent & match bus type */
- parent = of_get_parent(dev);
- if (parent == NULL)
- return NULL;
- bus = of_match_bus(parent);
- if (strcmp(bus->name, "pci")) {
- of_node_put(parent);
- return NULL;
- }
- bus->count_cells(dev, &na, &ns);
- of_node_put(parent);
- if (!OF_CHECK_ADDR_COUNT(na))
- return NULL;
-
- /* Get "reg" or "assigned-addresses" property */
- prop = of_get_property(dev, bus->addresses, &psize);
- if (prop == NULL)
- return NULL;
- psize /= 4;
-
- onesize = na + ns;
- for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) {
- u32 val = be32_to_cpu(prop[0]);
- if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
- if (size)
- *size = of_read_number(prop + na, ns);
- if (flags)
- *flags = bus->get_flags(prop);
- return prop;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(of_get_pci_address);
+#endif /* CONFIG_PCI */
int of_pci_address_to_resource(struct device_node *dev, int bar,
struct resource *r)
{
- const __be32 *addrp;
- u64 size;
- unsigned int flags;
- addrp = of_get_pci_address(dev, bar, &size, &flags);
- if (addrp == NULL)
- return -EINVAL;
- return __of_address_to_resource(dev, addrp, size, flags, NULL, r);
+ if (!IS_ENABLED(CONFIG_PCI))
+ return -ENOSYS;
+
+ return __of_address_to_resource(dev, -1, bar, r);
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
@@ -280,6 +229,9 @@ int of_pci_range_to_resource(struct of_pci_range *range,
res->parent = res->child = res->sibling = NULL;
res->name = np->full_name;
+ if (!IS_ENABLED(CONFIG_PCI))
+ return -ENOSYS;
+
if (res->flags & IORESOURCE_IO) {
unsigned long port;
err = pci_register_io_range(&np->fwnode, range->cpu_addr,
@@ -310,7 +262,6 @@ invalid_range:
return err;
}
EXPORT_SYMBOL(of_pci_range_to_resource);
-#endif /* CONFIG_PCI */
/*
* ISA bus specific translator
@@ -344,9 +295,7 @@ static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns,
s = of_read_number(range + na + pna, ns);
da = of_read_number(addr + 1, na - 1);
- pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n",
- (unsigned long long)cp, (unsigned long long)s,
- (unsigned long long)da);
+ pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
if (da < cp || da >= (cp + s))
return OF_BAD_ADDR;
@@ -501,7 +450,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
finish:
of_dump_addr("parent translation for:", addr, pna);
- pr_debug("with offset: %llx\n", (unsigned long long)offset);
+ pr_debug("with offset: %llx\n", offset);
/* Translate it into parent bus space */
return pbus->translate(addr, offset, pna);
@@ -675,8 +624,8 @@ u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
}
EXPORT_SYMBOL(of_translate_dma_address);
-const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
- unsigned int *flags)
+const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
+ u64 *size, unsigned int *flags)
{
const __be32 *prop;
unsigned int psize;
@@ -689,6 +638,10 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
if (parent == NULL)
return NULL;
bus = of_match_bus(parent);
+ if (strcmp(bus->name, "pci") && (bar_no >= 0)) {
+ of_node_put(parent);
+ return NULL;
+ }
bus->count_cells(dev, &na, &ns);
of_node_put(parent);
if (!OF_CHECK_ADDR_COUNT(na))
@@ -701,17 +654,21 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
psize /= 4;
onesize = na + ns;
- for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
- if (i == index) {
+ for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) {
+ u32 val = be32_to_cpu(prop[0]);
+ /* PCI bus matches on BAR number instead of index */
+ if (((bar_no >= 0) && ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0))) ||
+ ((index >= 0) && (i == index))) {
if (size)
*size = of_read_number(prop + na, ns);
if (flags)
*flags = bus->get_flags(prop);
return prop;
}
+ }
return NULL;
}
-EXPORT_SYMBOL(of_get_address);
+EXPORT_SYMBOL(__of_get_address);
static int parser_init(struct of_pci_range_parser *parser,
struct device_node *node, const char *name)
@@ -834,11 +791,22 @@ static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr,
return port;
}
-static int __of_address_to_resource(struct device_node *dev,
- const __be32 *addrp, u64 size, unsigned int flags,
- const char *name, struct resource *r)
+static int __of_address_to_resource(struct device_node *dev, int index, int bar_no,
+ struct resource *r)
{
u64 taddr;
+ const __be32 *addrp;
+ u64 size;
+ unsigned int flags;
+ const char *name = NULL;
+
+ addrp = __of_get_address(dev, index, bar_no, &size, &flags);
+ if (addrp == NULL)
+ return -EINVAL;
+
+ /* Get optional "reg-names" property to add a name to a resource */
+ if (index >= 0)
+ of_property_read_string_index(dev, "reg-names", index, &name);
if (flags & IORESOURCE_MEM)
taddr = of_translate_address(dev, addrp);
@@ -876,19 +844,7 @@ static int __of_address_to_resource(struct device_node *dev,
int of_address_to_resource(struct device_node *dev, int index,
struct resource *r)
{
- const __be32 *addrp;
- u64 size;
- unsigned int flags;
- const char *name = NULL;
-
- addrp = of_get_address(dev, index, &size, &flags);
- if (addrp == NULL)
- return -EINVAL;
-
- /* Get optional "reg-names" property to add a name to a resource */
- of_property_read_string_index(dev, "reg-names", index, &name);
-
- return __of_address_to_resource(dev, addrp, size, flags, name, r);
+ return __of_address_to_resource(dev, index, -1, r);
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index ba17a80b8c79..e0f96e3ef1da 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -510,11 +510,11 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
if (size &&
early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
- pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
- uname, &base, (unsigned long)size / SZ_1M);
+ pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
+ uname, &base, (unsigned long)(size / SZ_1M));
else
- pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %ld MiB\n",
- uname, &base, (unsigned long)size / SZ_1M);
+ pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
+ uname, &base, (unsigned long)(size / SZ_1M));
len -= t_len;
if (first) {
@@ -900,8 +900,7 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
phys_initrd_start = start;
phys_initrd_size = end - start;
- pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
- (unsigned long long)start, (unsigned long long)end);
+ pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end);
}
#else
static inline void early_init_dt_check_for_initrd(unsigned long node)
@@ -1027,8 +1026,7 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
if (size == 0)
continue;
- pr_debug(" - %llx , %llx\n", (unsigned long long)base,
- (unsigned long long)size);
+ pr_debug(" - %llx, %llx\n", base, size);
early_init_dt_add_memory_arch(base, size);
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index d717efbd637d..631489f7f8c0 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -171,4 +171,8 @@ static inline int of_dma_get_range(struct device_node *np,
}
#endif
+void fdt_init_reserved_mem(void);
+void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
+ phys_addr_t base, phys_addr_t size);
+
#endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 15e2417974d6..fd3964d24224 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -22,6 +22,8 @@
#include <linux/slab.h>
#include <linux/memblock.h>
+#include "of_private.h"
+
#define MAX_RESERVED_REGIONS 64
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
@@ -40,7 +42,7 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
*res_base = base;
if (nomap)
- return memblock_remove(base, size);
+ return memblock_mark_nomap(base, size);
return memblock_reserve(base, size);
}
@@ -134,9 +136,9 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
ret = early_init_dt_alloc_reserved_memory_arch(size,
align, start, end, nomap, &base);
if (ret == 0) {
- pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
+ pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
uname, &base,
- (unsigned long)size / SZ_1M);
+ (unsigned long)(size / SZ_1M));
break;
}
len -= t_len;
@@ -146,8 +148,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
ret = early_init_dt_alloc_reserved_memory_arch(size, align,
0, 0, nomap, &base);
if (ret == 0)
- pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
- uname, &base, (unsigned long)size / SZ_1M);
+ pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
+ uname, &base, (unsigned long)(size / SZ_1M));
}
if (base == 0) {
@@ -273,9 +275,10 @@ void __init fdt_init_reserved_mem(void)
if (err != 0 && err != -ENOENT) {
pr_info("node %s compatible matching fail\n",
rmem->name);
- memblock_free(rmem->base, rmem->size);
if (nomap)
- memblock_add(rmem->base, rmem->size);
+ memblock_clear_nomap(rmem->base, rmem->size);
+ else
+ memblock_free(rmem->base, rmem->size);
}
}
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 25d448f5af91..74afbb7a4f5e 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 819a20acaa93..8c056972a6dd 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1209,11 +1209,7 @@ static void __init of_unittest_match_node(void)
}
}
-static struct resource test_bus_res = {
- .start = 0xfffffff8,
- .end = 0xfffffff9,
- .flags = IORESOURCE_MEM,
-};
+static struct resource test_bus_res = DEFINE_RES_MEM(0xfffffff8, 2);
static const struct platform_device_info test_bus_info = {
.name = "unittest-bus",
};
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index b335c077f215..5543c54dacc5 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1856,9 +1856,6 @@ void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
if (unlikely(!opp_table))
return;
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
kfree(opp_table->supported_hw);
opp_table->supported_hw = NULL;
opp_table->supported_hw_count = 0;
@@ -1944,9 +1941,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
if (unlikely(!opp_table))
return;
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
kfree(opp_table->prop_name);
opp_table->prop_name = NULL;
@@ -2056,9 +2050,6 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
if (!opp_table->regulators)
goto put_opp_table;
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
if (opp_table->enabled) {
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_disable(opp_table->regulators[i]);
@@ -2178,9 +2169,6 @@ void dev_pm_opp_put_clkname(struct opp_table *opp_table)
if (unlikely(!opp_table))
return;
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
clk_put(opp_table->clk);
opp_table->clk = ERR_PTR(-EINVAL);
@@ -2279,9 +2267,6 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
if (unlikely(!opp_table))
return;
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
opp_table->set_opp = NULL;
mutex_lock(&opp_table->lock);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index d298e38aaf7e..67f2e0710e79 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -964,8 +964,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
}
}
- /* There should be one of more OPP defined */
- if (WARN_ON(!count)) {
+ /* There should be one or more OPPs defined */
+ if (!count) {
+ dev_err(dev, "%s: no supported OPPs", __func__);
ret = -ENOENT;
goto remove_static_opp;
}
diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
index ebaf6867b457..456776bd8ee6 100644
--- a/drivers/parisc/power.c
+++ b/drivers/parisc/power.c
@@ -38,6 +38,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/sched/signal.h>
#include <linux/kthread.h>
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index 7e6d713fa5ac..5d1b9aacb130 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -8,8 +8,8 @@
#include <linux/module.h>
#include <linux/parport.h>
-#include <linux/ctype.h>
#include <linux/string.h>
+#include <linux/string_helpers.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -74,11 +74,7 @@ static void parse_data(struct parport *port, int device, char *str)
u = sep + strlen (sep) - 1;
while (u >= p && *u == ' ')
*u-- = '\0';
- u = p;
- while (*u) {
- *u = toupper(*u);
- u++;
- }
+ string_upper(p, p);
if (!strcmp(p, "MFG") || !strcmp(p, "MANUFACTURER")) {
kfree(info->mfr);
info->mfr = kstrdup(sep, GFP_KERNEL);
@@ -90,8 +86,7 @@ static void parse_data(struct parport *port, int device, char *str)
kfree(info->class_name);
info->class_name = kstrdup(sep, GFP_KERNEL);
- for (u = sep; *u; u++)
- *u = toupper(*u);
+ string_upper(sep, sep);
for (i = 0; classes[i].token; i++) {
if (!strcmp(classes[i].token, sep)) {
info->class = i;
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 2f2c8a1729f9..5e1e3796efa4 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -37,6 +37,14 @@ config PCI_FTPCI100
depends on OF
default ARCH_GEMINI
+config PCI_IXP4XX
+ bool "Intel IXP4xx PCI controller"
+ depends on ARM && OF
+ default ARCH_IXP4XX
+ help
+ Say Y here if you want support for the PCI host controller found
+ in the Intel IXP4xx XScale-based network processor SoC.
+
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA || COMPILE_TEST
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 63e3880a3e87..aaf30b3dcc14 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCIE_CADENCE) += cadence/
obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
+obj-$(CONFIG_PCI_IXP4XX) += pci-ixp4xx.o
obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 254d2570f8c9..30db2d68c17a 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -263,9 +263,12 @@ struct cdns_pcie_ops {
* struct cdns_pcie - private data for Cadence PCIe controller drivers
* @reg_base: IO mapped register base
* @mem_res: start/end offsets in the physical system memory to map PCI accesses
+ * @dev: PCIe controller
* @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
- * @bus: In Root Complex mode, the bus number
- * @ops: Platform specific ops to control various inputs from Cadence PCIe
+ * @phy_count: number of supported PHY devices
+ * @phy: list of pointers to specific PHY control blocks
+ * @link: list of pointers to corresponding device link representations
+ * @ops: Platform-specific ops to control various inputs from Cadence PCIe
* wrapper
*/
struct cdns_pcie {
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 0cf1333c0440..80fc98acf097 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -37,6 +37,7 @@
#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10)
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
+#define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
@@ -80,6 +81,7 @@ struct imx6_pcie {
u32 tx_swing_full;
u32 tx_swing_low;
struct regulator *vpcie;
+ struct regulator *vph;
void __iomem *phy_base;
/* power domain for pcie */
@@ -621,6 +623,17 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
imx6_pcie_grp_offset(imx6_pcie),
IMX8MQ_GPR_PCIE_REF_USE_PAD,
IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Regarding the datasheet, the PCIE_VPH is suggested
+ * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
+ * VREG_BYPASS should be cleared to zero.
+ */
+ if (imx6_pcie->vph &&
+ regulator_get_voltage(imx6_pcie->vph) > 3000000)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
break;
case IMX7D:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
@@ -1002,10 +1015,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return ret;
}
imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
- if (IS_ERR(imx6_pcie->phy_base)) {
- dev_err(dev, "Unable to map PCIe PHY\n");
+ if (IS_ERR(imx6_pcie->phy_base))
return PTR_ERR(imx6_pcie->phy_base);
- }
}
dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1130,6 +1141,13 @@ static int imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->vpcie = NULL;
}
+ imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
+ if (IS_ERR(imx6_pcie->vph)) {
+ if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
+ return PTR_ERR(imx6_pcie->vph);
+ imx6_pcie->vph = NULL;
+ }
+
platform_set_drvdata(pdev, imx6_pcie);
ret = imx6_pcie_attach_pd(dev);
@@ -1175,6 +1193,7 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.variant = IMX6QP,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ .dbi_length = 0x200,
},
[IMX7D] = {
.variant = IMX7D,
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index f89a7d24ba28..d15cf35fa7f2 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -39,6 +39,10 @@
#define PCIE_APP_IRN_PM_TO_ACK BIT(9)
#define PCIE_APP_IRN_LINK_AUTO_BW_STAT BIT(11)
#define PCIE_APP_IRN_BW_MGT BIT(12)
+#define PCIE_APP_IRN_INTA BIT(13)
+#define PCIE_APP_IRN_INTB BIT(14)
+#define PCIE_APP_IRN_INTC BIT(15)
+#define PCIE_APP_IRN_INTD BIT(16)
#define PCIE_APP_IRN_MSG_LTR BIT(18)
#define PCIE_APP_IRN_SYS_ERR_RC BIT(29)
#define PCIE_APP_INTX_OFST 12
@@ -48,10 +52,8 @@
PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \
PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \
PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \
- (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTA) | \
- (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTB) | \
- (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTC) | \
- (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTD))
+ PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \
+ PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD)
#define BUS_IATU_OFFSET SZ_256M
#define RESET_INTERVAL_MS 100
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 504669e3afe0..3ec7b29d5dc7 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -1826,7 +1826,7 @@ static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
if (unlikely(irq > 31))
return -EINVAL;
- appl_writel(pcie, (1 << irq), APPL_MSI_CTRL_1);
+ appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
return 0;
}
@@ -2214,6 +2214,8 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
goto fail_host_init;
}
+ dw_pcie_setup_rc(&pcie->pci.pp);
+
ret = tegra_pcie_dw_start_link(&pcie->pci);
if (ret < 0)
goto fail_host_init;
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
index ee0156921ebc..306950272fd6 100644
--- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -42,17 +42,6 @@ struct ls_pcie_g4 {
int irq;
};
-static inline u32 ls_pcie_g4_lut_readl(struct ls_pcie_g4 *pcie, u32 off)
-{
- return ioread32(pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off);
-}
-
-static inline void ls_pcie_g4_lut_writel(struct ls_pcie_g4 *pcie,
- u32 off, u32 val)
-{
- iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off);
-}
-
static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off)
{
return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index e3f5e7ab7606..c95ebe808f92 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -57,7 +57,7 @@
#define PIO_COMPLETION_STATUS_UR 1
#define PIO_COMPLETION_STATUS_CRS 2
#define PIO_COMPLETION_STATUS_CA 4
-#define PIO_NON_POSTED_REQ BIT(0)
+#define PIO_NON_POSTED_REQ BIT(10)
#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
@@ -125,6 +125,7 @@
#define LTSSM_MASK 0x3f
#define LTSSM_L0 0x10
#define RC_BAR_CONFIG 0x300
+#define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44)
/* PCIe core controller registers */
#define CTRL_CORE_BASE_ADDR 0x18000
@@ -385,6 +386,16 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg |= (IS_RC_MSK << IS_RC_SHIFT);
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+ /*
+ * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
+ * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
+ * id in high 16 bits. Updating this register changes readback value of
+ * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
+ * for erratum 4.1: "The value of device and vendor ID is incorrect".
+ */
+ reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
+ advk_writel(pcie, reg, VENDOR_ID_REG);
+
/* Set Advanced Error Capabilities and Control PF0 register */
reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index da3cd216da00..aefef1986201 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -34,12 +34,12 @@
* Special configuration registers directly in the first few words
* in I/O space.
*/
-#define PCI_IOSIZE 0x00
-#define PCI_PROT 0x04 /* AHB protection */
-#define PCI_CTRL 0x08 /* PCI control signal */
-#define PCI_SOFTRST 0x10 /* Soft reset counter and response error enable */
-#define PCI_CONFIG 0x28 /* PCI configuration command register */
-#define PCI_DATA 0x2C
+#define FTPCI_IOSIZE 0x00
+#define FTPCI_PROT 0x04 /* AHB protection */
+#define FTPCI_CTRL 0x08 /* PCI control signal */
+#define FTPCI_SOFTRST 0x10 /* Soft reset counter and response error enable */
+#define FTPCI_CONFIG 0x28 /* PCI configuration command register */
+#define FTPCI_DATA 0x2C
#define FARADAY_PCI_STATUS_CMD 0x04 /* Status and command */
#define FARADAY_PCI_PMC 0x40 /* Power management control */
@@ -195,9 +195,9 @@ static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number,
PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
PCI_CONF_WHERE(config) |
PCI_CONF_ENABLE,
- p->base + PCI_CONFIG);
+ p->base + FTPCI_CONFIG);
- *value = readl(p->base + PCI_DATA);
+ *value = readl(p->base + FTPCI_DATA);
if (size == 1)
*value = (*value >> (8 * (config & 3))) & 0xFF;
@@ -230,17 +230,17 @@ static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number,
PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
PCI_CONF_WHERE(config) |
PCI_CONF_ENABLE,
- p->base + PCI_CONFIG);
+ p->base + FTPCI_CONFIG);
switch (size) {
case 4:
- writel(value, p->base + PCI_DATA);
+ writel(value, p->base + FTPCI_DATA);
break;
case 2:
- writew(value, p->base + PCI_DATA + (config & 3));
+ writew(value, p->base + FTPCI_DATA + (config & 3));
break;
case 1:
- writeb(value, p->base + PCI_DATA + (config & 3));
+ writeb(value, p->base + FTPCI_DATA + (config & 3));
break;
default:
ret = PCIBIOS_BAD_REGISTER_NUMBER;
@@ -469,7 +469,7 @@ static int faraday_pci_probe(struct platform_device *pdev)
if (!faraday_res_to_memcfg(io->start - win->offset,
resource_size(io), &val)) {
/* setup I/O space size */
- writel(val, p->base + PCI_IOSIZE);
+ writel(val, p->base + FTPCI_IOSIZE);
} else {
dev_err(dev, "illegal IO mem size\n");
return -EINVAL;
@@ -477,11 +477,11 @@ static int faraday_pci_probe(struct platform_device *pdev)
}
/* Setup hostbridge */
- val = readl(p->base + PCI_CTRL);
+ val = readl(p->base + FTPCI_CTRL);
val |= PCI_COMMAND_IO;
val |= PCI_COMMAND_MEMORY;
val |= PCI_COMMAND_MASTER;
- writel(val, p->base + PCI_CTRL);
+ writel(val, p->base + FTPCI_CTRL);
/* Mask and clear all interrupts */
faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
if (variant->cascaded_irq) {
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index bebe3eeebc4e..a53bd8728d0d 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -444,7 +444,6 @@ enum hv_pcibus_state {
hv_pcibus_probed,
hv_pcibus_installed,
hv_pcibus_removing,
- hv_pcibus_removed,
hv_pcibus_maximum
};
@@ -453,7 +452,6 @@ struct hv_pcibus_device {
/* Protocol version negotiated with the host */
enum pci_protocol_version_t protocol_version;
enum hv_pcibus_state state;
- refcount_t remove_lock;
struct hv_device *hdev;
resource_size_t low_mmio_space;
resource_size_t high_mmio_space;
@@ -461,7 +459,6 @@ struct hv_pcibus_device {
struct resource *low_mmio_res;
struct resource *high_mmio_res;
struct completion *survey_event;
- struct completion remove_event;
struct pci_bus *pci_bus;
spinlock_t config_lock; /* Avoid two threads writing index page */
spinlock_t device_list_lock; /* Protect lists below */
@@ -593,9 +590,6 @@ static void put_pcichild(struct hv_pci_dev *hpdev)
kfree(hpdev);
}
-static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
-static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
-
/*
* There is no good way to get notified from vmbus_onoffer_rescind(),
* so let's use polling here, since this is not a hot path.
@@ -2064,10 +2058,8 @@ static void pci_devices_present_work(struct work_struct *work)
}
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
- if (!dr) {
- put_hvpcibus(hbus);
+ if (!dr)
return;
- }
/* First, mark all existing children as reported missing. */
spin_lock_irqsave(&hbus->device_list_lock, flags);
@@ -2150,7 +2142,6 @@ static void pci_devices_present_work(struct work_struct *work)
break;
}
- put_hvpcibus(hbus);
kfree(dr);
}
@@ -2191,12 +2182,10 @@ static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
list_add_tail(&dr->list_entry, &hbus->dr_list);
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
- if (pending_dr) {
+ if (pending_dr)
kfree(dr_wrk);
- } else {
- get_hvpcibus(hbus);
+ else
queue_work(hbus->wq, &dr_wrk->wrk);
- }
return 0;
}
@@ -2339,8 +2328,6 @@ static void hv_eject_device_work(struct work_struct *work)
put_pcichild(hpdev);
put_pcichild(hpdev);
/* hpdev has been freed. Do not use it any more. */
-
- put_hvpcibus(hbus);
}
/**
@@ -2364,7 +2351,6 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
hpdev->state = hv_pcichild_ejecting;
get_pcichild(hpdev);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
- get_hvpcibus(hbus);
queue_work(hbus->wq, &hpdev->wrk);
}
@@ -2964,17 +2950,6 @@ static int hv_send_resources_released(struct hv_device *hdev)
return 0;
}
-static void get_hvpcibus(struct hv_pcibus_device *hbus)
-{
- refcount_inc(&hbus->remove_lock);
-}
-
-static void put_hvpcibus(struct hv_pcibus_device *hbus)
-{
- if (refcount_dec_and_test(&hbus->remove_lock))
- complete(&hbus->remove_event);
-}
-
#define HVPCI_DOM_MAP_SIZE (64 * 1024)
static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
@@ -3094,14 +3069,12 @@ static int hv_pci_probe(struct hv_device *hdev,
hbus->sysdata.domain = dom;
hbus->hdev = hdev;
- refcount_set(&hbus->remove_lock, 1);
INIT_LIST_HEAD(&hbus->children);
INIT_LIST_HEAD(&hbus->dr_list);
INIT_LIST_HEAD(&hbus->resources_for_children);
spin_lock_init(&hbus->config_lock);
spin_lock_init(&hbus->device_list_lock);
spin_lock_init(&hbus->retarget_msi_interrupt_lock);
- init_completion(&hbus->remove_event);
hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
hbus->sysdata.domain);
if (!hbus->wq) {
@@ -3243,8 +3216,9 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
struct pci_packet teardown_packet;
u8 buffer[sizeof(struct pci_message)];
} pkt;
- struct hv_dr_state *dr;
struct hv_pci_compl comp_pkt;
+ struct hv_pci_dev *hpdev, *tmp;
+ unsigned long flags;
int ret;
/*
@@ -3256,9 +3230,16 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
if (!keep_devs) {
/* Delete any children which might still exist. */
- dr = kzalloc(sizeof(*dr), GFP_KERNEL);
- if (dr && hv_pci_start_relations_work(hbus, dr))
- kfree(dr);
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry) {
+ list_del(&hpdev->list_entry);
+ if (hpdev->pci_slot)
+ pci_destroy_slot(hpdev->pci_slot);
+ /* For the two refs got in new_pcichild_device() */
+ put_pcichild(hpdev);
+ put_pcichild(hpdev);
+ }
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
}
ret = hv_send_resources_released(hdev);
@@ -3301,13 +3282,23 @@ static int hv_pci_remove(struct hv_device *hdev)
hbus = hv_get_drvdata(hdev);
if (hbus->state == hv_pcibus_installed) {
+ tasklet_disable(&hdev->channel->callback_event);
+ hbus->state = hv_pcibus_removing;
+ tasklet_enable(&hdev->channel->callback_event);
+ destroy_workqueue(hbus->wq);
+ hbus->wq = NULL;
+ /*
+ * At this point, no work is running or can be scheduled
+ * on hbus-wq. We can't race with hv_pci_devices_present()
+ * or hv_pci_eject_device(), it's safe to proceed.
+ */
+
/* Remove the bus from PCI's point of view. */
pci_lock_rescan_remove();
pci_stop_root_bus(hbus->pci_bus);
hv_pci_remove_slots(hbus);
pci_remove_root_bus(hbus->pci_bus);
pci_unlock_rescan_remove();
- hbus->state = hv_pcibus_removed;
}
ret = hv_pci_bus_exit(hdev, false);
@@ -3320,9 +3311,6 @@ static int hv_pci_remove(struct hv_device *hdev)
hv_pci_free_bridge_windows(hbus);
irq_domain_remove(hbus->irq_domain);
irq_domain_free_fwnode(hbus->sysdata.fwnode);
- put_hvpcibus(hbus);
- wait_for_completion(&hbus->remove_event);
- destroy_workqueue(hbus->wq);
hv_put_dom_num(hbus->sysdata.domain);
diff --git a/drivers/pci/controller/pci-ixp4xx.c b/drivers/pci/controller/pci-ixp4xx.c
new file mode 100644
index 000000000000..654ac4a82beb
--- /dev/null
+++ b/drivers/pci/controller/pci-ixp4xx.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel IXP4xx PCI host controller
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the IXP4xx arch/arm/mach-ixp4xx/common-pci.c driver
+ * Copyright (C) 2002 Intel Corporation
+ * Copyright (C) 2003 Greg Ungerer <gerg@linux-m68k.org>
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ * Copyright (C) 2005 Deepak Saxena <dsaxena@plexity.net>
+ * Copyright (C) 2005 Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * TODO:
+ * - Test IO-space access
+ * - DMA support
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bits.h>
+
+/* Register offsets */
+#define IXP4XX_PCI_NP_AD 0x00
+#define IXP4XX_PCI_NP_CBE 0x04
+#define IXP4XX_PCI_NP_WDATA 0x08
+#define IXP4XX_PCI_NP_RDATA 0x0c
+#define IXP4XX_PCI_CRP_AD_CBE 0x10
+#define IXP4XX_PCI_CRP_WDATA 0x14
+#define IXP4XX_PCI_CRP_RDATA 0x18
+#define IXP4XX_PCI_CSR 0x1c
+#define IXP4XX_PCI_ISR 0x20
+#define IXP4XX_PCI_INTEN 0x24
+#define IXP4XX_PCI_DMACTRL 0x28
+#define IXP4XX_PCI_AHBMEMBASE 0x2c
+#define IXP4XX_PCI_AHBIOBASE 0x30
+#define IXP4XX_PCI_PCIMEMBASE 0x34
+#define IXP4XX_PCI_AHBDOORBELL 0x38
+#define IXP4XX_PCI_PCIDOORBELL 0x3c
+#define IXP4XX_PCI_ATPDMA0_AHBADDR 0x40
+#define IXP4XX_PCI_ATPDMA0_PCIADDR 0x44
+#define IXP4XX_PCI_ATPDMA0_LENADDR 0x48
+#define IXP4XX_PCI_ATPDMA1_AHBADDR 0x4c
+#define IXP4XX_PCI_ATPDMA1_PCIADDR 0x50
+#define IXP4XX_PCI_ATPDMA1_LENADDR 0x54
+
+/* CSR bit definitions */
+#define IXP4XX_PCI_CSR_HOST BIT(0)
+#define IXP4XX_PCI_CSR_ARBEN BIT(1)
+#define IXP4XX_PCI_CSR_ADS BIT(2)
+#define IXP4XX_PCI_CSR_PDS BIT(3)
+#define IXP4XX_PCI_CSR_ABE BIT(4)
+#define IXP4XX_PCI_CSR_DBT BIT(5)
+#define IXP4XX_PCI_CSR_ASE BIT(8)
+#define IXP4XX_PCI_CSR_IC BIT(15)
+#define IXP4XX_PCI_CSR_PRST BIT(16)
+
+/* ISR (Interrupt status) Register bit definitions */
+#define IXP4XX_PCI_ISR_PSE BIT(0)
+#define IXP4XX_PCI_ISR_PFE BIT(1)
+#define IXP4XX_PCI_ISR_PPE BIT(2)
+#define IXP4XX_PCI_ISR_AHBE BIT(3)
+#define IXP4XX_PCI_ISR_APDC BIT(4)
+#define IXP4XX_PCI_ISR_PADC BIT(5)
+#define IXP4XX_PCI_ISR_ADB BIT(6)
+#define IXP4XX_PCI_ISR_PDB BIT(7)
+
+/* INTEN (Interrupt Enable) Register bit definitions */
+#define IXP4XX_PCI_INTEN_PSE BIT(0)
+#define IXP4XX_PCI_INTEN_PFE BIT(1)
+#define IXP4XX_PCI_INTEN_PPE BIT(2)
+#define IXP4XX_PCI_INTEN_AHBE BIT(3)
+#define IXP4XX_PCI_INTEN_APDC BIT(4)
+#define IXP4XX_PCI_INTEN_PADC BIT(5)
+#define IXP4XX_PCI_INTEN_ADB BIT(6)
+#define IXP4XX_PCI_INTEN_PDB BIT(7)
+
+/* Shift value for byte enable on NP cmd/byte enable register */
+#define IXP4XX_PCI_NP_CBE_BESL 4
+
+/* PCI commands supported by NP access unit */
+#define NP_CMD_IOREAD 0x2
+#define NP_CMD_IOWRITE 0x3
+#define NP_CMD_CONFIGREAD 0xa
+#define NP_CMD_CONFIGWRITE 0xb
+#define NP_CMD_MEMREAD 0x6
+#define NP_CMD_MEMWRITE 0x7
+
+/* Constants for CRP access into local config space */
+#define CRP_AD_CBE_BESL 20
+#define CRP_AD_CBE_WRITE 0x00010000
+
+/* Special PCI configuration space registers for this controller */
+#define IXP4XX_PCI_RTOTTO 0x40
+
+struct ixp4xx_pci {
+ struct device *dev;
+ void __iomem *base;
+ bool errata_hammer;
+ bool host_mode;
+};
+
+/*
+ * The IXP4xx has a peculiar address bus that will change the
+ * byte order on SoC peripherals depending on whether the device
+ * operates in big-endian or little-endian mode. That means that
+ * readl() and writel() that always use little-endian access
+ * will not work for SoC peripherals such as the PCI controller
+ * when used in big-endian mode. The accesses to the individual
+ * PCI devices on the other hand, are always little-endian and
+ * can use readl() and writel().
+ *
+ * For local AHB bus access we need to use __raw_[readl|writel]()
+ * to make sure that we access the SoC devices in the CPU native
+ * endianness.
+ */
+static inline u32 ixp4xx_readl(struct ixp4xx_pci *p, u32 reg)
+{
+ return __raw_readl(p->base + reg);
+}
+
+static inline void ixp4xx_writel(struct ixp4xx_pci *p, u32 reg, u32 val)
+{
+ __raw_writel(val, p->base + reg);
+}
+
+static int ixp4xx_pci_check_master_abort(struct ixp4xx_pci *p)
+{
+ u32 isr = ixp4xx_readl(p, IXP4XX_PCI_ISR);
+
+ if (isr & IXP4XX_PCI_ISR_PFE) {
+ /* Make sure the master abort bit is reset */
+ ixp4xx_writel(p, IXP4XX_PCI_ISR, IXP4XX_PCI_ISR_PFE);
+ dev_dbg(p->dev, "master abort detected\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ixp4xx_pci_read_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
+{
+ ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr);
+
+ if (p->errata_hammer) {
+ int i;
+
+ /*
+ * PCI workaround - only works if NP PCI space reads have
+ * no side effects. Hammer the register and read twice 8
+ * times. last one will be good.
+ */
+ for (i = 0; i < 8; i++) {
+ ixp4xx_writel(p, IXP4XX_PCI_NP_CBE, cmd);
+ *data = ixp4xx_readl(p, IXP4XX_PCI_NP_RDATA);
+ *data = ixp4xx_readl(p, IXP4XX_PCI_NP_RDATA);
+ }
+ } else {
+ ixp4xx_writel(p, IXP4XX_PCI_NP_CBE, cmd);
+ *data = ixp4xx_readl(p, IXP4XX_PCI_NP_RDATA);
+ }
+
+ return ixp4xx_pci_check_master_abort(p);
+}
+
+static int ixp4xx_pci_write_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data)
+{
+ ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr);
+
+ /* Set up the write */
+ ixp4xx_writel(p, IXP4XX_PCI_NP_CBE, cmd);
+
+ /* Execute the write by writing to NP_WDATA */
+ ixp4xx_writel(p, IXP4XX_PCI_NP_WDATA, data);
+
+ return ixp4xx_pci_check_master_abort(p);
+}
+
+static u32 ixp4xx_config_addr(u8 bus_num, u16 devfn, int where)
+{
+ /* Root bus is always 0 in this hardware */
+ if (bus_num == 0) {
+ /* type 0 */
+ return BIT(32-PCI_SLOT(devfn)) | ((PCI_FUNC(devfn)) << 8) |
+ (where & ~3);
+ } else {
+ /* type 1 */
+ return (bus_num << 16) | ((PCI_SLOT(devfn)) << 11) |
+ ((PCI_FUNC(devfn)) << 8) | (where & ~3) | 1;
+ }
+}
+
+/*
+ * CRP functions are "Controller Configuration Port" accesses
+ * initiated from within this driver itself to read/write PCI
+ * control information in the config space.
+ */
+static u32 ixp4xx_crp_byte_lane_enable_bits(u32 n, int size)
+{
+ if (size == 1)
+ return (0xf & ~BIT(n)) << CRP_AD_CBE_BESL;
+ if (size == 2)
+ return (0xf & ~(BIT(n) | BIT(n+1))) << CRP_AD_CBE_BESL;
+ if (size == 4)
+ return 0;
+ return 0xffffffff;
+}
+
+static int ixp4xx_crp_read_config(struct ixp4xx_pci *p, int where, int size,
+ u32 *value)
+{
+ u32 n, cmd, val;
+
+ n = where % 4;
+ cmd = where & ~3;
+
+ dev_dbg(p->dev, "%s from %d size %d cmd %08x\n",
+ __func__, where, size, cmd);
+
+ ixp4xx_writel(p, IXP4XX_PCI_CRP_AD_CBE, cmd);
+ val = ixp4xx_readl(p, IXP4XX_PCI_CRP_RDATA);
+
+ val >>= (8*n);
+ switch (size) {
+ case 1:
+ val &= U8_MAX;
+ dev_dbg(p->dev, "%s read byte %02x\n", __func__, val);
+ break;
+ case 2:
+ val &= U16_MAX;
+ dev_dbg(p->dev, "%s read word %04x\n", __func__, val);
+ break;
+ case 4:
+ val &= U32_MAX;
+ dev_dbg(p->dev, "%s read long %08x\n", __func__, val);
+ break;
+ default:
+ /* Should not happen */
+ dev_err(p->dev, "%s illegal size\n", __func__);
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ *value = val;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int ixp4xx_crp_write_config(struct ixp4xx_pci *p, int where, int size,
+ u32 value)
+{
+ u32 n, cmd, val;
+
+ n = where % 4;
+ cmd = ixp4xx_crp_byte_lane_enable_bits(n, size);
+ if (cmd == 0xffffffff)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ cmd |= where & ~3;
+ cmd |= CRP_AD_CBE_WRITE;
+
+ val = value << (8*n);
+
+ dev_dbg(p->dev, "%s to %d size %d cmd %08x val %08x\n",
+ __func__, where, size, cmd, val);
+
+ ixp4xx_writel(p, IXP4XX_PCI_CRP_AD_CBE, cmd);
+ ixp4xx_writel(p, IXP4XX_PCI_CRP_WDATA, val);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * Then follows the functions that read and write from the common PCI
+ * configuration space.
+ */
+static u32 ixp4xx_byte_lane_enable_bits(u32 n, int size)
+{
+ if (size == 1)
+ return (0xf & ~BIT(n)) << 4;
+ if (size == 2)
+ return (0xf & ~(BIT(n) | BIT(n+1))) << 4;
+ if (size == 4)
+ return 0;
+ return 0xffffffff;
+}
+
+static int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct ixp4xx_pci *p = bus->sysdata;
+ u32 n, addr, val, cmd;
+ u8 bus_num = bus->number;
+ int ret;
+
+ *value = 0xffffffff;
+ n = where % 4;
+ cmd = ixp4xx_byte_lane_enable_bits(n, size);
+ if (cmd == 0xffffffff)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ addr = ixp4xx_config_addr(bus_num, devfn, where);
+ cmd |= NP_CMD_CONFIGREAD;
+ dev_dbg(p->dev, "read_config from %d size %d dev %d:%d:%d address: %08x cmd: %08x\n",
+ where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd);
+
+ ret = ixp4xx_pci_read_indirect(p, addr, cmd, &val);
+ if (ret)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ val >>= (8*n);
+ switch (size) {
+ case 1:
+ val &= U8_MAX;
+ dev_dbg(p->dev, "%s read byte %02x\n", __func__, val);
+ break;
+ case 2:
+ val &= U16_MAX;
+ dev_dbg(p->dev, "%s read word %04x\n", __func__, val);
+ break;
+ case 4:
+ val &= U32_MAX;
+ dev_dbg(p->dev, "%s read long %08x\n", __func__, val);
+ break;
+ default:
+ /* Should not happen */
+ dev_err(p->dev, "%s illegal size\n", __func__);
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ *value = val;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int ixp4xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct ixp4xx_pci *p = bus->sysdata;
+ u32 n, addr, val, cmd;
+ u8 bus_num = bus->number;
+ int ret;
+
+ n = where % 4;
+ cmd = ixp4xx_byte_lane_enable_bits(n, size);
+ if (cmd == 0xffffffff)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ addr = ixp4xx_config_addr(bus_num, devfn, where);
+ cmd |= NP_CMD_CONFIGWRITE;
+ val = value << (8*n);
+
+ dev_dbg(p->dev, "write_config_byte %#x to %d size %d dev %d:%d:%d addr: %08x cmd %08x\n",
+ value, where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd);
+
+ ret = ixp4xx_pci_write_indirect(p, addr, cmd, val);
+ if (ret)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops ixp4xx_pci_ops = {
+ .read = ixp4xx_pci_read_config,
+ .write = ixp4xx_pci_write_config,
+};
+
+static u32 ixp4xx_pci_addr_to_64mconf(phys_addr_t addr)
+{
+ u8 base;
+
+ base = ((addr & 0xff000000) >> 24);
+ return (base << 24) | ((base + 1) << 16)
+ | ((base + 2) << 8) | (base + 3);
+}
+
+static int ixp4xx_pci_parse_map_ranges(struct ixp4xx_pci *p)
+{
+ struct device *dev = p->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(p);
+ struct resource_entry *win;
+ struct resource *res;
+ phys_addr_t addr;
+
+ win = resource_list_first_type(&bridge->windows, IORESOURCE_MEM);
+ if (win) {
+ u32 pcimembase;
+
+ res = win->res;
+ addr = res->start - win->offset;
+
+ if (res->flags & IORESOURCE_PREFETCH)
+ res->name = "IXP4xx PCI PRE-MEM";
+ else
+ res->name = "IXP4xx PCI NON-PRE-MEM";
+
+ dev_dbg(dev, "%s window %pR, bus addr %pa\n",
+ res->name, res, &addr);
+ if (resource_size(res) != SZ_64M) {
+ dev_err(dev, "memory range is not 64MB\n");
+ return -EINVAL;
+ }
+
+ pcimembase = ixp4xx_pci_addr_to_64mconf(addr);
+ /* Commit configuration */
+ ixp4xx_writel(p, IXP4XX_PCI_PCIMEMBASE, pcimembase);
+ } else {
+ dev_err(dev, "no AHB memory mapping defined\n");
+ }
+
+ win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ if (win) {
+ res = win->res;
+
+ addr = pci_pio_to_address(res->start);
+ if (addr & 0xff) {
+ dev_err(dev, "IO mem at uneven address: %pa\n", &addr);
+ return -EINVAL;
+ }
+
+ res->name = "IXP4xx PCI IO MEM";
+ /*
+ * Setup I/O space location for PCI->AHB access, the
+ * upper 24 bits of the address goes into the lower
+ * 24 bits of this register.
+ */
+ ixp4xx_writel(p, IXP4XX_PCI_AHBIOBASE, (addr >> 8));
+ } else {
+ dev_info(dev, "no IO space AHB memory mapping defined\n");
+ }
+
+ return 0;
+}
+
+static int ixp4xx_pci_parse_map_dma_ranges(struct ixp4xx_pci *p)
+{
+ struct device *dev = p->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(p);
+ struct resource_entry *win;
+ struct resource *res;
+ phys_addr_t addr;
+ u32 ahbmembase;
+
+ win = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM);
+ if (win) {
+ res = win->res;
+ addr = res->start - win->offset;
+
+ if (resource_size(res) != SZ_64M) {
+ dev_err(dev, "DMA memory range is not 64MB\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "DMA MEM BASE: %pa\n", &addr);
+ /*
+ * 4 PCI-to-AHB windows of 16 MB each, write the 8 high bits
+ * into each byte of the PCI_AHBMEMBASE register.
+ */
+ ahbmembase = ixp4xx_pci_addr_to_64mconf(addr);
+ /* Commit AHB membase */
+ ixp4xx_writel(p, IXP4XX_PCI_AHBMEMBASE, ahbmembase);
+ } else {
+ dev_err(dev, "no DMA memory range defined\n");
+ }
+
+ return 0;
+}
+
+/* Only used to get context for abort handling */
+static struct ixp4xx_pci *ixp4xx_pci_abort_singleton;
+
+static int ixp4xx_pci_abort_handler(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ struct ixp4xx_pci *p = ixp4xx_pci_abort_singleton;
+ u32 isr, status;
+ int ret;
+
+ isr = ixp4xx_readl(p, IXP4XX_PCI_ISR);
+ ret = ixp4xx_crp_read_config(p, PCI_STATUS, 2, &status);
+ if (ret) {
+ dev_err(p->dev, "unable to read abort status\n");
+ return -EINVAL;
+ }
+
+ dev_err(p->dev,
+ "PCI: abort_handler addr = %#lx, isr = %#x, status = %#x\n",
+ addr, isr, status);
+
+ /* Make sure the Master Abort bit is reset */
+ ixp4xx_writel(p, IXP4XX_PCI_ISR, IXP4XX_PCI_ISR_PFE);
+ status |= PCI_STATUS_REC_MASTER_ABORT;
+ ret = ixp4xx_crp_write_config(p, PCI_STATUS, 2, status);
+ if (ret)
+ dev_err(p->dev, "unable to clear abort status bit\n");
+
+ /*
+ * If it was an imprecise abort, then we need to correct the
+ * return address to be _after_ the instruction.
+ */
+ if (fsr & (1 << 10)) {
+ dev_err(p->dev, "imprecise abort\n");
+ regs->ARM_pc += 4;
+ }
+
+ return 0;
+}
+
+static int __init ixp4xx_pci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct ixp4xx_pci *p;
+ struct pci_host_bridge *host;
+ int ret;
+ u32 val;
+ phys_addr_t addr;
+ u32 basereg[4] = {
+ PCI_BASE_ADDRESS_0,
+ PCI_BASE_ADDRESS_1,
+ PCI_BASE_ADDRESS_2,
+ PCI_BASE_ADDRESS_3,
+ };
+ int i;
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*p));
+ if (!host)
+ return -ENOMEM;
+
+ host->ops = &ixp4xx_pci_ops;
+ p = pci_host_bridge_priv(host);
+ host->sysdata = p;
+ p->dev = dev;
+ dev_set_drvdata(dev, p);
+
+ /*
+ * Set up quirk for erratic behaviour in the 42x variant
+ * when accessing config space.
+ */
+ if (of_device_is_compatible(np, "intel,ixp42x-pci")) {
+ p->errata_hammer = true;
+ dev_info(dev, "activate hammering errata\n");
+ }
+
+ p->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(p->base))
+ return PTR_ERR(p->base);
+
+ val = ixp4xx_readl(p, IXP4XX_PCI_CSR);
+ p->host_mode = !!(val & IXP4XX_PCI_CSR_HOST);
+ dev_info(dev, "controller is in %s mode\n",
+ p->host_mode ? "host" : "option");
+
+ /* Hook in our fault handler for PCI errors */
+ ixp4xx_pci_abort_singleton = p;
+ hook_fault_code(16+6, ixp4xx_pci_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+
+ ret = ixp4xx_pci_parse_map_ranges(p);
+ if (ret)
+ return ret;
+
+ ret = ixp4xx_pci_parse_map_dma_ranges(p);
+ if (ret)
+ return ret;
+
+ /* This is only configured in host mode */
+ if (p->host_mode) {
+ addr = __pa(PAGE_OFFSET);
+ /* This is a noop (0x00) but explains what is going on */
+ addr |= PCI_BASE_ADDRESS_SPACE_MEMORY;
+
+ for (i = 0; i < 4; i++) {
+ /* Write this directly into the config space */
+ ret = ixp4xx_crp_write_config(p, basereg[i], 4, addr);
+ if (ret)
+ dev_err(dev, "failed to set up PCI_BASE_ADDRESS_%d\n", i);
+ else
+ dev_info(dev, "set PCI_BASE_ADDR_%d to %pa\n", i, &addr);
+ addr += SZ_16M;
+ }
+
+ /*
+ * Enable CSR window at 64 MiB to allow PCI masters to continue
+ * prefetching past the 64 MiB boundary, if all AHB to PCI
+ * windows are consecutive.
+ */
+ ret = ixp4xx_crp_write_config(p, PCI_BASE_ADDRESS_4, 4, addr);
+ if (ret)
+ dev_err(dev, "failed to set up PCI_BASE_ADDRESS_4\n");
+ else
+ dev_info(dev, "set PCI_BASE_ADDR_4 to %pa\n", &addr);
+
+ /*
+ * Put the IO memory window at the very end of physical memory
+ * at 0xfffffc00. This is when the system is trying to access IO
+ * memory over AHB.
+ */
+ addr = 0xfffffc00;
+ addr |= PCI_BASE_ADDRESS_SPACE_IO;
+ ret = ixp4xx_crp_write_config(p, PCI_BASE_ADDRESS_5, 4, addr);
+ if (ret)
+ dev_err(dev, "failed to set up PCI_BASE_ADDRESS_5\n");
+ else
+ dev_info(dev, "set PCI_BASE_ADDR_5 to %pa\n", &addr);
+
+ /*
+ * Retry timeout to 0x80
+ * Transfer ready timeout to 0xff
+ */
+ ret = ixp4xx_crp_write_config(p, IXP4XX_PCI_RTOTTO, 4,
+ 0x000080ff);
+ if (ret)
+ dev_err(dev, "failed to set up TRDY limit\n");
+ else
+ dev_info(dev, "set TRDY limit to 0x80ff\n");
+ }
+
+ /* Clear interrupts */
+ val = IXP4XX_PCI_ISR_PSE | IXP4XX_PCI_ISR_PFE | IXP4XX_PCI_ISR_PPE | IXP4XX_PCI_ISR_AHBE;
+ ixp4xx_writel(p, IXP4XX_PCI_ISR, val);
+
+ /*
+ * Set Initialize Complete in PCI Control Register: allow IXP4XX to
+ * generate PCI configuration cycles. Specify that the AHB bus is
+ * operating in big-endian mode. Set up byte lane swapping between
+ * little-endian PCI and the big-endian AHB bus.
+ */
+ val = IXP4XX_PCI_CSR_IC | IXP4XX_PCI_CSR_ABE;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ val |= (IXP4XX_PCI_CSR_PDS | IXP4XX_PCI_CSR_ADS);
+ ixp4xx_writel(p, IXP4XX_PCI_CSR, val);
+
+ ret = ixp4xx_crp_write_config(p, PCI_COMMAND, 2, PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
+ if (ret)
+ dev_err(dev, "unable to initialize master and command memory\n");
+ else
+ dev_info(dev, "initialized as master\n");
+
+ pci_host_probe(host);
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_pci_of_match[] = {
+ {
+ .compatible = "intel,ixp42x-pci",
+ },
+ {
+ .compatible = "intel,ixp43x-pci",
+ },
+ {},
+};
+
+/*
+ * This driver needs to be a builtin module with suppressed bind
+ * attributes since the probe() is initializing a hard exception
+ * handler and this can only be done from __init-tagged code
+ * sections. This module cannot be removed and inserted at all.
+ */
+static struct platform_driver ixp4xx_pci_driver = {
+ .driver = {
+ .name = "ixp4xx-pci",
+ .suppress_bind_attrs = true,
+ .of_match_table = ixp4xx_pci_of_match,
+ },
+};
+builtin_platform_driver_probe(ixp4xx_pci_driver, ixp4xx_pci_probe);
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 8069bd9232d4..c979229a6d0d 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -2539,6 +2539,7 @@ static const struct of_device_id tegra_pcie_of_match[] = {
{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
{
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 7f503dd4ff81..e64536047b65 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0+
-/**
+/*
* APM X-Gene PCIe Driver
*
* Copyright (c) 2014 Applied Micro Circuits Corporation.
@@ -485,7 +485,7 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
{
void __iomem *cfg_base = port->cfg_base;
struct device *dev = port->dev;
- void *bar_addr;
+ void __iomem *bar_addr;
u32 pim_reg;
u64 cpu_addr = entry->res->start;
u64 pci_addr = cpu_addr - entry->offset;
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index eede4e8f3f75..35a82124a126 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -49,7 +49,7 @@ enum iproc_msi_reg {
struct iproc_msi;
/**
- * iProc MSI group
+ * struct iproc_msi_grp - iProc MSI group
*
* One MSI group is allocated per GIC interrupt, serviced by one iProc MSI
* event queue.
@@ -65,7 +65,7 @@ struct iproc_msi_grp {
};
/**
- * iProc event queue based MSI
+ * struct iproc_msi - iProc event queue based MSI
*
* Only meant to be used on platforms without MSI support integrated into the
* GIC.
@@ -171,7 +171,7 @@ static struct irq_chip iproc_msi_irq_chip = {
static struct msi_domain_info iproc_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_PCI_MSIX,
.chip = &iproc_msi_irq_chip,
};
@@ -250,20 +250,23 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
struct iproc_msi *msi = domain->host_data;
int hwirq, i;
+ if (msi->nr_cpus > 1 && nr_irqs > 1)
+ return -EINVAL;
+
mutex_lock(&msi->bitmap_lock);
- /* Allocate 'nr_cpus' number of MSI vectors each time */
- hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0,
- msi->nr_cpus, 0);
- if (hwirq < msi->nr_msi_vecs) {
- bitmap_set(msi->bitmap, hwirq, msi->nr_cpus);
- } else {
- mutex_unlock(&msi->bitmap_lock);
- return -ENOSPC;
- }
+ /*
+ * Allocate 'nr_irqs' multiplied by 'nr_cpus' number of MSI vectors
+ * each time
+ */
+ hwirq = bitmap_find_free_region(msi->bitmap, msi->nr_msi_vecs,
+ order_base_2(msi->nr_cpus * nr_irqs));
mutex_unlock(&msi->bitmap_lock);
+ if (hwirq < 0)
+ return -ENOSPC;
+
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, hwirq + i,
&iproc_msi_bottom_irq_chip,
@@ -284,7 +287,8 @@ static void iproc_msi_irq_domain_free(struct irq_domain *domain,
mutex_lock(&msi->bitmap_lock);
hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
- bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus);
+ bitmap_release_region(msi->bitmap, hwirq,
+ order_base_2(msi->nr_cpus * nr_irqs));
mutex_unlock(&msi->bitmap_lock);
@@ -539,6 +543,9 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
mutex_init(&msi->bitmap_lock);
msi->nr_cpus = num_possible_cpus();
+ if (msi->nr_cpus == 1)
+ iproc_msi_domain_info.flags |= MSI_FLAG_MULTI_PCI_MSI;
+
msi->nr_irqs = of_irq_count(node);
if (!msi->nr_irqs) {
dev_err(pcie->dev, "found no MSI GIC interrupt\n");
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 02e52f698eeb..30ac5fbefbbf 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -89,8 +89,8 @@
#define IPROC_PCIE_REG_INVALID 0xffff
/**
- * iProc PCIe outbound mapping controller specific parameters
- *
+ * struct iproc_pcie_ob_map - iProc PCIe outbound mapping controller-specific
+ * parameters
* @window_sizes: list of supported outbound mapping window sizes in MB
* @nr_sizes: number of supported outbound mapping window sizes
*/
@@ -136,22 +136,20 @@ static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = {
};
/**
- * iProc PCIe inbound mapping type
+ * enum iproc_pcie_ib_map_type - iProc PCIe inbound mapping type
+ * @IPROC_PCIE_IB_MAP_MEM: DDR memory
+ * @IPROC_PCIE_IB_MAP_IO: device I/O memory
+ * @IPROC_PCIE_IB_MAP_INVALID: invalid or unused
*/
enum iproc_pcie_ib_map_type {
- /* for DDR memory */
IPROC_PCIE_IB_MAP_MEM = 0,
-
- /* for device I/O memory */
IPROC_PCIE_IB_MAP_IO,
-
- /* invalid or unused */
IPROC_PCIE_IB_MAP_INVALID
};
/**
- * iProc PCIe inbound mapping controller specific parameters
- *
+ * struct iproc_pcie_ib_map - iProc PCIe inbound mapping controller-specific
+ * parameters
* @type: inbound mapping region type
* @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
* SZ_1G
@@ -437,7 +435,7 @@ static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
writel(val, pcie->base + offset);
}
-/**
+/*
* APB error forwarding can be disabled during access of configuration
* registers of the endpoint device, to prevent unsupported requests
* (typically seen during enumeration with multi-function devices) from
@@ -619,7 +617,7 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
}
-/**
+/*
* Note access to the configuration registers are protected at the higher layer
* by 'pci_lock' in drivers/pci/access.c
*/
@@ -897,7 +895,7 @@ static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
return 0;
}
-/**
+/*
* Some iProc SoCs require the SW to configure the outbound address mapping
*
* Outbound address translation:
diff --git a/drivers/pci/controller/pcie-iproc.h b/drivers/pci/controller/pcie-iproc.h
index c2676e442f55..dcca315897c8 100644
--- a/drivers/pci/controller/pcie-iproc.h
+++ b/drivers/pci/controller/pcie-iproc.h
@@ -7,7 +7,13 @@
#define _PCIE_IPROC_H
/**
- * iProc PCIe interface type
+ * enum iproc_pcie_type - iProc PCIe interface type
+ * @IPROC_PCIE_PAXB_BCMA: BCMA-based host controllers
+ * @IPROC_PCIE_PAXB: PAXB-based host controllers for
+ * NS, NSP, Cygnus, NS2, and Pegasus SOCs
+ * @IPROC_PCIE_PAXB_V2: PAXB-based host controllers for Stingray SoCs
+ * @IPROC_PCIE_PAXC: PAXC-based host controllers
+ * @IPROC_PCIE_PAXC_V2: PAXC-based host controllers (second generation)
*
* PAXB is the wrapper used in root complex that can be connected to an
* external endpoint device.
@@ -24,7 +30,7 @@ enum iproc_pcie_type {
};
/**
- * iProc PCIe outbound mapping
+ * struct iproc_pcie_ob - iProc PCIe outbound mapping
* @axi_offset: offset from the AXI address to the internal address used by
* the iProc PCIe core
* @nr_windows: total number of supported outbound mapping windows
@@ -35,7 +41,7 @@ struct iproc_pcie_ob {
};
/**
- * iProc PCIe inbound mapping
+ * struct iproc_pcie_ib - iProc PCIe inbound mapping
* @nr_regions: total number of supported inbound mapping regions
*/
struct iproc_pcie_ib {
@@ -47,13 +53,13 @@ struct iproc_pcie_ib_map;
struct iproc_msi;
/**
- * iProc PCIe device
- *
+ * struct iproc_pcie - iProc PCIe device
* @dev: pointer to device data structure
* @type: iProc PCIe interface type
* @reg_offsets: register offsets
* @base: PCIe host controller I/O register base
* @base_addr: PCIe host controller register base physical address
+ * @mem: host bridge memory window resource
* @phy: optional PHY device that controls the Serdes
* @map_irq: function callback to map interrupts
* @ep_is_internal: indicates an internal emulated endpoint device is connected
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 3c5b97716d40..f3aeb8d4eaca 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -1012,6 +1012,7 @@ static const struct of_device_id mtk_pcie_of_match[] = {
{ .compatible = "mediatek,mt8192-pcie" },
{},
};
+MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 62a042e75d9a..25bee693834f 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -991,10 +991,8 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
if (regs) {
pcie->base = devm_ioremap_resource(dev, regs);
- if (IS_ERR(pcie->base)) {
- dev_err(dev, "failed to map shared register\n");
+ if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- }
}
pcie->free_ck = devm_clk_get(dev, "free_ck");
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index 89c68c56d93b..fdab8202ae5d 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -341,7 +341,7 @@ static struct event_map local_status_to_event[] = {
LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
};
-struct {
+static struct {
u32 base;
u32 offset;
u32 mask;
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index f1d08a1b1591..78d04ac29cd5 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -592,10 +592,6 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
if (err)
return err;
- err = rockchip_pcie_setup_irq(rockchip);
- if (err)
- return err;
-
rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
if (IS_ERR(rockchip->vpcie12v)) {
if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
@@ -973,8 +969,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (err)
goto err_vpcie;
- rockchip_pcie_enable_interrupts(rockchip);
-
err = rockchip_pcie_init_irq_domain(rockchip);
if (err < 0)
goto err_deinit_port;
@@ -992,6 +986,12 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
bridge->sysdata = rockchip;
bridge->ops = &rockchip_pcie_ops;
+ err = rockchip_pcie_setup_irq(rockchip);
+ if (err)
+ goto err_remove_irq_domain;
+
+ rockchip_pcie_enable_interrupts(rockchip);
+
err = pci_host_probe(bridge);
if (err < 0)
goto err_remove_irq_domain;
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index d2a1920bb055..1c40d2506aef 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -32,7 +32,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
struct pci_config_window *cfg;
unsigned int bus_range, bus_range_max, bsz;
struct resource *conflict;
- int i, err;
+ int err;
if (busr->start > busr->end)
return ERR_PTR(-EINVAL);
@@ -50,6 +50,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
cfg->busr.start = busr->start;
cfg->busr.end = busr->end;
cfg->busr.flags = IORESOURCE_BUS;
+ cfg->bus_shift = bus_shift;
bus_range = resource_size(&cfg->busr);
bus_range_max = resource_size(cfgres) >> bus_shift;
if (bus_range > bus_range_max) {
@@ -77,13 +78,6 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
cfg->winp = kcalloc(bus_range, sizeof(*cfg->winp), GFP_KERNEL);
if (!cfg->winp)
goto err_exit_malloc;
- for (i = 0; i < bus_range; i++) {
- cfg->winp[i] =
- pci_remap_cfgspace(cfgres->start + i * bsz,
- bsz);
- if (!cfg->winp[i])
- goto err_exit_iomap;
- }
} else {
cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz);
if (!cfg->win)
@@ -129,6 +123,44 @@ void pci_ecam_free(struct pci_config_window *cfg)
}
EXPORT_SYMBOL_GPL(pci_ecam_free);
+static int pci_ecam_add_bus(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ unsigned int bsz = 1 << cfg->bus_shift;
+ unsigned int busn = bus->number;
+ phys_addr_t start;
+
+ if (!per_bus_mapping)
+ return 0;
+
+ if (busn < cfg->busr.start || busn > cfg->busr.end)
+ return -EINVAL;
+
+ busn -= cfg->busr.start;
+ start = cfg->res.start + busn * bsz;
+
+ cfg->winp[busn] = pci_remap_cfgspace(start, bsz);
+ if (!cfg->winp[busn])
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void pci_ecam_remove_bus(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ unsigned int busn = bus->number;
+
+ if (!per_bus_mapping || busn < cfg->busr.start || busn > cfg->busr.end)
+ return;
+
+ busn -= cfg->busr.start;
+ if (cfg->winp[busn]) {
+ iounmap(cfg->winp[busn]);
+ cfg->winp[busn] = NULL;
+ }
+}
+
/*
* Function to implement the pci_ops ->map_bus method
*/
@@ -167,6 +199,8 @@ EXPORT_SYMBOL_GPL(pci_ecam_map_bus);
/* ECAM ops */
const struct pci_ecam_ops pci_generic_ecam_ops = {
.pci_ops = {
+ .add_bus = pci_ecam_add_bus,
+ .remove_bus = pci_ecam_remove_bus,
.map_bus = pci_ecam_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
@@ -178,6 +212,8 @@ EXPORT_SYMBOL_GPL(pci_generic_ecam_ops);
/* ECAM ops for 32-bit access only (non-compliant) */
const struct pci_ecam_ops pci_32b_ops = {
.pci_ops = {
+ .add_bus = pci_ecam_add_bus,
+ .remove_bus = pci_ecam_remove_bus,
.map_bus = pci_ecam_map_bus,
.read = pci_generic_config_read32,
.write = pci_generic_config_write32,
@@ -187,6 +223,8 @@ const struct pci_ecam_ops pci_32b_ops = {
/* ECAM ops for 32-bit read only (non-compliant) */
const struct pci_ecam_ops pci_32b_read_ops = {
.pci_ops = {
+ .add_bus = pci_ecam_add_bus,
+ .remove_bus = pci_ecam_remove_bus,
.map_bus = pci_ecam_map_bus,
.read = pci_generic_config_read32,
.write = pci_generic_config_write,
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index f33ff2bca414..3fdd1b9bd8c3 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -75,6 +75,9 @@ int cpci_hp_unregister_bus(struct pci_bus *bus);
int cpci_hp_start(void);
int cpci_hp_stop(void);
+/* Global variables */
+extern int cpci_debug;
+
/*
* Internal function prototypes, these functions should not be used by
* board/chassis drivers.
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 2c16adb7f4ec..6c48066acb44 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -19,8 +19,6 @@
#define MY_NAME "cpci_hotplug"
-extern int cpci_debug;
-
#define dbg(format, arg...) \
do { \
if (cpci_debug) \
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index b8aacb41a83c..f99a7927e5a8 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -296,9 +296,10 @@ static int ctrl_slot_cleanup(struct controller *ctrl)
*
* Won't work for more than one PCI-PCI bridge in a slot.
*
- * @bus_num - bus number of PCI device
- * @dev_num - device number of PCI device
- * @slot - Pointer to u8 where slot number will be returned
+ * @bus: pointer to the PCI bus structure
+ * @bus_num: bus number of PCI device
+ * @dev_num: device number of PCI device
+ * @slot: Pointer to u8 where slot number will be returned
*
* Output: SUCCESS or FAILURE
*/
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 68de958a9be8..1b26ca0b3701 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1877,7 +1877,7 @@ static void interrupt_event_handler(struct controller *ctrl)
/**
* cpqhp_pushbutton_thread - handle pushbutton events
- * @slot: target slot (struct)
+ * @t: pointer to struct timer_list which holds all timer-related callbacks
*
* Scheduled procedure to handle blocking stuff for the pushbuttons.
* Handles all pending events and exits.
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 5ac31f683b85..058d5937d8a9 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -73,7 +73,7 @@ static ssize_t power_read_file(struct pci_slot *pci_slot, char *buf)
if (retval)
return retval;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
@@ -130,7 +130,7 @@ static ssize_t attention_read_file(struct pci_slot *pci_slot, char *buf)
if (retval)
return retval;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
@@ -175,7 +175,7 @@ static ssize_t latch_read_file(struct pci_slot *pci_slot, char *buf)
if (retval)
return retval;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static struct pci_slot_attribute hotplug_slot_attr_latch = {
@@ -192,7 +192,7 @@ static ssize_t presence_read_file(struct pci_slot *pci_slot, char *buf)
if (retval)
return retval;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static struct pci_slot_attribute hotplug_slot_attr_presence = {
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 4fd200d8b0a9..d4a930881054 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -47,6 +47,9 @@ extern int pciehp_poll_time;
* struct controller - PCIe hotplug controller
* @pcie: pointer to the controller's PCIe port service device
* @slot_cap: cached copy of the Slot Capabilities register
+ * @inband_presence_disabled: In-Band Presence Detect Disable supported by
+ * controller and disabled per spec recommendation (PCIe r5.0, appendix I
+ * implementation note)
* @slot_ctrl: cached copy of the Slot Control register
* @ctrl_lock: serializes writes to the Slot Control register
* @cmd_started: jiffies when the Slot Control register was last written;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index fb3840e222ad..9d06939736c0 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -563,6 +563,32 @@ void pciehp_power_off_slot(struct controller *ctrl)
PCI_EXP_SLTCTL_PWR_OFF);
}
+static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
+ struct pci_dev *pdev, int irq)
+{
+ /*
+ * Ignore link changes which occurred while waiting for DPC recovery.
+ * Could be several if DPC triggered multiple times consecutively.
+ */
+ synchronize_hardirq(irq);
+ atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events);
+ if (pciehp_poll_mode)
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_DLLSC);
+ ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n",
+ slot_name(ctrl));
+
+ /*
+ * If the link is unexpectedly down after successful recovery,
+ * the corresponding link change may have been ignored above.
+ * Synthesize it to ensure that it is acted on.
+ */
+ down_read(&ctrl->reset_lock);
+ if (!pciehp_check_link_active(ctrl))
+ pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
+ up_read(&ctrl->reset_lock);
+}
+
static irqreturn_t pciehp_isr(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
@@ -707,6 +733,16 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
}
/*
+ * Ignore Link Down/Up events caused by Downstream Port Containment
+ * if recovery from the error succeeded.
+ */
+ if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) &&
+ ctrl->state == ON_STATE) {
+ events &= ~PCI_EXP_SLTSTA_DLLSC;
+ pciehp_ignore_dpc_link_change(ctrl, pdev, irq);
+ }
+
+ /*
* Disable requests have higher priority than Presence Detect Changed
* or Data Link Layer State Changed events.
*/
diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
index dbfa0b55d31a..068b7810a574 100644
--- a/drivers/pci/hotplug/rpadlpar_sysfs.c
+++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
@@ -50,7 +50,7 @@ static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr,
static ssize_t add_slot_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
}
static ssize_t remove_slot_store(struct kobject *kobj,
@@ -80,7 +80,7 @@ static ssize_t remove_slot_store(struct kobject *kobj,
static ssize_t remove_slot_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
}
static struct kobj_attribute add_slot_attr =
diff --git a/drivers/pci/hotplug/shpchp_sysfs.c b/drivers/pci/hotplug/shpchp_sysfs.c
index 45658bb5c554..64beed7a26be 100644
--- a/drivers/pci/hotplug/shpchp_sysfs.c
+++ b/drivers/pci/hotplug/shpchp_sysfs.c
@@ -24,50 +24,54 @@
static ssize_t show_ctrl(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
- char *out = buf;
int index, busnr;
struct resource *res;
struct pci_bus *bus;
+ size_t len = 0;
pdev = to_pci_dev(dev);
bus = pdev->subordinate;
- out += sprintf(buf, "Free resources: memory\n");
+ len += sysfs_emit_at(buf, len, "Free resources: memory\n");
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_MEM) &&
!(res->flags & IORESOURCE_PREFETCH)) {
- out += sprintf(out, "start = %8.8llx, length = %8.8llx\n",
- (unsigned long long)res->start,
- (unsigned long long)resource_size(res));
+ len += sysfs_emit_at(buf, len,
+ "start = %8.8llx, length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)resource_size(res));
}
}
- out += sprintf(out, "Free resources: prefetchable memory\n");
+ len += sysfs_emit_at(buf, len, "Free resources: prefetchable memory\n");
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_MEM) &&
(res->flags & IORESOURCE_PREFETCH)) {
- out += sprintf(out, "start = %8.8llx, length = %8.8llx\n",
- (unsigned long long)res->start,
- (unsigned long long)resource_size(res));
+ len += sysfs_emit_at(buf, len,
+ "start = %8.8llx, length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)resource_size(res));
}
}
- out += sprintf(out, "Free resources: IO\n");
+ len += sysfs_emit_at(buf, len, "Free resources: IO\n");
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_IO)) {
- out += sprintf(out, "start = %8.8llx, length = %8.8llx\n",
- (unsigned long long)res->start,
- (unsigned long long)resource_size(res));
+ len += sysfs_emit_at(buf, len,
+ "start = %8.8llx, length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)resource_size(res));
}
}
- out += sprintf(out, "Free resources: bus numbers\n");
+ len += sysfs_emit_at(buf, len, "Free resources: bus numbers\n");
for (busnr = bus->busn_res.start; busnr <= bus->busn_res.end; busnr++) {
if (!pci_find_bus(pci_domain_nr(bus), busnr))
break;
}
if (busnr < bus->busn_res.end)
- out += sprintf(out, "start = %8.8x, length = %8.8x\n",
- busnr, (int)(bus->busn_res.end - busnr));
+ len += sysfs_emit_at(buf, len,
+ "start = %8.8x, length = %8.8x\n",
+ busnr, (int)(bus->busn_res.end - busnr));
- return out - buf;
+ return len;
}
static DEVICE_ATTR(ctrl, S_IRUGO, show_ctrl, NULL);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index afc06e6ce115..dafdc652fcd0 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -346,7 +346,7 @@ static ssize_t sriov_totalvfs_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
+ return sysfs_emit(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
}
static ssize_t sriov_numvfs_show(struct device *dev,
@@ -361,7 +361,7 @@ static ssize_t sriov_numvfs_show(struct device *dev,
num_vfs = pdev->sriov->num_VFs;
device_unlock(&pdev->dev);
- return sprintf(buf, "%u\n", num_vfs);
+ return sysfs_emit(buf, "%u\n", num_vfs);
}
/*
@@ -391,9 +391,16 @@ static ssize_t sriov_numvfs_store(struct device *dev,
if (num_vfs == pdev->sriov->num_VFs)
goto exit;
+ /* is PF driver loaded */
+ if (!pdev->driver) {
+ pci_info(pdev, "no driver bound to device; cannot configure SR-IOV\n");
+ ret = -ENOENT;
+ goto exit;
+ }
+
/* is PF driver loaded w/callback */
- if (!pdev->driver || !pdev->driver->sriov_configure) {
- pci_info(pdev, "Driver does not support SRIOV configuration via sysfs\n");
+ if (!pdev->driver->sriov_configure) {
+ pci_info(pdev, "driver does not support SR-IOV configuration via sysfs\n");
ret = -ENOENT;
goto exit;
}
@@ -435,7 +442,7 @@ static ssize_t sriov_offset_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", pdev->sriov->offset);
+ return sysfs_emit(buf, "%u\n", pdev->sriov->offset);
}
static ssize_t sriov_stride_show(struct device *dev,
@@ -444,7 +451,7 @@ static ssize_t sriov_stride_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", pdev->sriov->stride);
+ return sysfs_emit(buf, "%u\n", pdev->sriov->stride);
}
static ssize_t sriov_vf_device_show(struct device *dev,
@@ -453,7 +460,7 @@ static ssize_t sriov_vf_device_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%x\n", pdev->sriov->vf_device);
+ return sysfs_emit(buf, "%x\n", pdev->sriov->vf_device);
}
static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
@@ -462,7 +469,7 @@ static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe);
+ return sysfs_emit(buf, "%u\n", pdev->sriov->drivers_autoprobe);
}
static ssize_t sriov_drivers_autoprobe_store(struct device *dev,
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 217dc9f0231f..e5e75331b415 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -143,24 +143,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
*/
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
- u32 mask_bits = desc->masked;
+ raw_spinlock_t *lock = &desc->dev->msi_lock;
+ unsigned long flags;
if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
- return 0;
+ return;
- mask_bits &= ~mask;
- mask_bits |= flag;
+ raw_spin_lock_irqsave(lock, flags);
+ desc->masked &= ~mask;
+ desc->masked |= flag;
pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
- mask_bits);
-
- return mask_bits;
+ desc->masked);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
- desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
+ __pci_msi_desc_mask_irq(desc, mask, flag);
}
static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
@@ -289,13 +290,31 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
/* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
+ bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT);
if (!base)
goto skip;
+ /*
+ * The specification mandates that the entry is masked
+ * when the message is modified:
+ *
+ * "If software changes the Address or Data value of an
+ * entry while the entry is unmasked, the result is
+ * undefined."
+ */
+ if (unmasked)
+ __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT);
+
writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
+
+ if (unmasked)
+ __pci_msix_desc_mask_irq(entry, 0);
+
+ /* Ensure that the writes are visible in the device */
+ readl(base + PCI_MSIX_ENTRY_DATA);
} else {
int pos = dev->msi_cap;
u16 msgctl;
@@ -316,6 +335,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
msg->data);
}
+ /* Ensure that the writes are visible in the device */
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
}
skip:
@@ -464,11 +485,11 @@ static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
return retval;
entry = irq_get_msi_desc(irq);
- if (entry)
- return sprintf(buf, "%s\n",
- entry->msi_attrib.is_msix ? "msix" : "msi");
+ if (!entry)
+ return -ENODEV;
- return -ENODEV;
+ return sysfs_emit(buf, "%s\n",
+ entry->msi_attrib.is_msix ? "msix" : "msi");
}
static int populate_msi_sysfs(struct pci_dev *pdev)
@@ -636,21 +657,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) {
- msi_mask_irq(entry, mask, ~mask);
+ msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev);
return ret;
}
ret = msi_verify_entries(dev);
if (ret) {
- msi_mask_irq(entry, mask, ~mask);
+ msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev);
return ret;
}
ret = populate_msi_sysfs(dev);
if (ret) {
- msi_mask_irq(entry, mask, ~mask);
+ msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev);
return ret;
}
@@ -691,6 +712,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
{
struct irq_affinity_desc *curmsk, *masks = NULL;
struct msi_desc *entry;
+ void __iomem *addr;
int ret, i;
int vec_count = pci_msix_vec_count(dev);
@@ -711,6 +733,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.is_msix = 1;
entry->msi_attrib.is_64 = 1;
+
if (entries)
entry->msi_attrib.entry_nr = entries[i].entry;
else
@@ -722,6 +745,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.default_irq = dev->irq;
entry->mask_base = base;
+ addr = pci_msix_desc_addr(entry);
+ if (addr)
+ entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
if (masks)
curmsk++;
@@ -732,26 +759,25 @@ out:
return ret;
}
-static void msix_program_entries(struct pci_dev *dev,
- struct msix_entry *entries)
+static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
{
struct msi_desc *entry;
- int i = 0;
- void __iomem *desc_addr;
for_each_pci_msi_entry(entry, dev) {
- if (entries)
- entries[i++].vector = entry->irq;
+ if (entries) {
+ entries->vector = entry->irq;
+ entries++;
+ }
+ }
+}
- desc_addr = pci_msix_desc_addr(entry);
- if (desc_addr)
- entry->masked = readl(desc_addr +
- PCI_MSIX_ENTRY_VECTOR_CTRL);
- else
- entry->masked = 0;
+static void msix_mask_all(void __iomem *base, int tsize)
+{
+ u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ int i;
- msix_mask_irq(entry, 1);
- }
+ for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
+ writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
/**
@@ -768,22 +794,33 @@ static void msix_program_entries(struct pci_dev *dev,
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
int nvec, struct irq_affinity *affd)
{
- int ret;
- u16 control;
void __iomem *base;
+ int ret, tsize;
+ u16 control;
- /* Ensure MSI-X is disabled while it is set up */
- pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+ /*
+ * Some devices require MSI-X to be enabled before the MSI-X
+ * registers can be accessed. Mask all the vectors to prevent
+ * interrupts coming in before they're fully set up.
+ */
+ pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
+ PCI_MSIX_FLAGS_ENABLE);
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Request & Map MSI-X table region */
- base = msix_map_region(dev, msix_table_size(control));
- if (!base)
- return -ENOMEM;
+ tsize = msix_table_size(control);
+ base = msix_map_region(dev, tsize);
+ if (!base) {
+ ret = -ENOMEM;
+ goto out_disable;
+ }
+
+ /* Ensure that all table entries are masked. */
+ msix_mask_all(base, tsize);
ret = msix_setup_entries(dev, base, entries, nvec, affd);
if (ret)
- return ret;
+ goto out_disable;
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
@@ -794,15 +831,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
if (ret)
goto out_free;
- /*
- * Some devices require MSI-X to be enabled before we can touch the
- * MSI-X registers. We need to mask all the vectors to prevent
- * interrupts coming in before they're fully set up.
- */
- pci_msix_clear_and_set_ctrl(dev, 0,
- PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
-
- msix_program_entries(dev, entries);
+ msix_update_entries(dev, entries);
ret = populate_msi_sysfs(dev);
if (ret)
@@ -836,6 +865,9 @@ out_avail:
out_free:
free_msi_irqs(dev);
+out_disable:
+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+
return ret;
}
@@ -930,8 +962,7 @@ static void pci_msi_shutdown(struct pci_dev *dev)
/* Return the device with MSI unmasked as initial states */
mask = msi_mask(desc->msi_attrib.multi_cap);
- /* Keep cached state to be restored */
- __pci_msi_desc_mask_irq(desc, mask, ~mask);
+ msi_mask_irq(desc, mask, 0);
/* Restore dev->irq to its default pin-assertion IRQ */
dev->irq = desc->msi_attrib.default_irq;
@@ -1016,10 +1047,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
}
/* Return the device with MSI-X masked as initial states */
- for_each_pci_msi_entry(entry, dev) {
- /* Keep cached states to be restored */
+ for_each_pci_msi_entry(entry, dev)
__pci_msix_desc_mask_irq(entry, 1);
- }
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 196382630363..50cdde3e9a8b 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -48,12 +48,16 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_p2pdma *p2pdma;
size_t size = 0;
- if (pdev->p2pdma->pool)
- size = gen_pool_size(pdev->p2pdma->pool);
+ rcu_read_lock();
+ p2pdma = rcu_dereference(pdev->p2pdma);
+ if (p2pdma && p2pdma->pool)
+ size = gen_pool_size(p2pdma->pool);
+ rcu_read_unlock();
- return scnprintf(buf, PAGE_SIZE, "%zd\n", size);
+ return sysfs_emit(buf, "%zd\n", size);
}
static DEVICE_ATTR_RO(size);
@@ -61,12 +65,16 @@ static ssize_t available_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_p2pdma *p2pdma;
size_t avail = 0;
- if (pdev->p2pdma->pool)
- avail = gen_pool_avail(pdev->p2pdma->pool);
+ rcu_read_lock();
+ p2pdma = rcu_dereference(pdev->p2pdma);
+ if (p2pdma && p2pdma->pool)
+ avail = gen_pool_avail(p2pdma->pool);
+ rcu_read_unlock();
- return scnprintf(buf, PAGE_SIZE, "%zd\n", avail);
+ return sysfs_emit(buf, "%zd\n", avail);
}
static DEVICE_ATTR_RO(available);
@@ -74,9 +82,16 @@ static ssize_t published_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_p2pdma *p2pdma;
+ bool published = false;
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- pdev->p2pdma->p2pmem_published);
+ rcu_read_lock();
+ p2pdma = rcu_dereference(pdev->p2pdma);
+ if (p2pdma)
+ published = p2pdma->p2pmem_published;
+ rcu_read_unlock();
+
+ return sysfs_emit(buf, "%d\n", published);
}
static DEVICE_ATTR_RO(published);
@@ -95,8 +110,9 @@ static const struct attribute_group p2pmem_group = {
static void pci_p2pdma_release(void *data)
{
struct pci_dev *pdev = data;
- struct pci_p2pdma *p2pdma = pdev->p2pdma;
+ struct pci_p2pdma *p2pdma;
+ p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
if (!p2pdma)
return;
@@ -128,16 +144,14 @@ static int pci_p2pdma_setup(struct pci_dev *pdev)
if (error)
goto out_pool_destroy;
- pdev->p2pdma = p2p;
-
error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
if (error)
goto out_pool_destroy;
+ rcu_assign_pointer(pdev->p2pdma, p2p);
return 0;
out_pool_destroy:
- pdev->p2pdma = NULL;
gen_pool_destroy(p2p->pool);
out:
devm_kfree(&pdev->dev, p2p);
@@ -159,6 +173,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
{
struct pci_p2pdma_pagemap *p2p_pgmap;
struct dev_pagemap *pgmap;
+ struct pci_p2pdma *p2pdma;
void *addr;
int error;
@@ -200,7 +215,8 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
goto pgmap_free;
}
- error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
+ p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
+ error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr,
pci_bus_address(pdev, bar) + offset,
range_len(&pgmap->range), dev_to_node(&pdev->dev),
pgmap->ref);
@@ -308,10 +324,41 @@ static const struct pci_p2pdma_whitelist_entry {
{}
};
+/*
+ * This lookup function tries to find the PCI device corresponding to a given
+ * host bridge.
+ *
+ * It assumes the host bridge device is the first PCI device in the
+ * bus->devices list and that the devfn is 00.0. These assumptions should hold
+ * for all the devices in the whitelist above.
+ *
+ * This function is equivalent to pci_get_slot(host->bus, 0), however it does
+ * not take the pci_bus_sem lock seeing __host_bridge_whitelist() must not
+ * sleep.
+ *
+ * For this to be safe, the caller should hold a reference to a device on the
+ * bridge, which should ensure the host_bridge device will not be freed
+ * or removed from the head of the devices list.
+ */
+static struct pci_dev *pci_host_bridge_dev(struct pci_host_bridge *host)
+{
+ struct pci_dev *root;
+
+ root = list_first_entry_or_null(&host->bus->devices,
+ struct pci_dev, bus_list);
+
+ if (!root)
+ return NULL;
+ if (root->devfn != PCI_DEVFN(0, 0))
+ return NULL;
+
+ return root;
+}
+
static bool __host_bridge_whitelist(struct pci_host_bridge *host,
- bool same_host_bridge)
+ bool same_host_bridge, bool warn)
{
- struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0));
+ struct pci_dev *root = pci_host_bridge_dev(host);
const struct pci_p2pdma_whitelist_entry *entry;
unsigned short vendor, device;
@@ -320,7 +367,6 @@ static bool __host_bridge_whitelist(struct pci_host_bridge *host,
vendor = root->vendor;
device = root->device;
- pci_dev_put(root);
for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) {
if (vendor != entry->vendor || device != entry->device)
@@ -331,6 +377,10 @@ static bool __host_bridge_whitelist(struct pci_host_bridge *host,
return true;
}
+ if (warn)
+ pci_warn(root, "Host bridge not in P2PDMA whitelist: %04x:%04x\n",
+ vendor, device);
+
return false;
}
@@ -338,44 +388,90 @@ static bool __host_bridge_whitelist(struct pci_host_bridge *host,
* If we can't find a common upstream bridge take a look at the root
* complex and compare it to a whitelist of known good hardware.
*/
-static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b)
+static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b,
+ bool warn)
{
struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus);
struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus);
if (host_a == host_b)
- return __host_bridge_whitelist(host_a, true);
+ return __host_bridge_whitelist(host_a, true, warn);
- if (__host_bridge_whitelist(host_a, false) &&
- __host_bridge_whitelist(host_b, false))
+ if (__host_bridge_whitelist(host_a, false, warn) &&
+ __host_bridge_whitelist(host_b, false, warn))
return true;
return false;
}
+static unsigned long map_types_idx(struct pci_dev *client)
+{
+ return (pci_domain_nr(client->bus) << 16) |
+ (client->bus->number << 8) | client->devfn;
+}
+
+/*
+ * Calculate the P2PDMA mapping type and distance between two PCI devices.
+ *
+ * If the two devices are the same PCI function, return
+ * PCI_P2PDMA_MAP_BUS_ADDR and a distance of 0.
+ *
+ * If they are two functions of the same device, return
+ * PCI_P2PDMA_MAP_BUS_ADDR and a distance of 2 (one hop up to the bridge,
+ * then one hop back down to another function of the same device).
+ *
+ * In the case where two devices are connected to the same PCIe switch,
+ * return a distance of 4. This corresponds to the following PCI tree:
+ *
+ * -+ Root Port
+ * \+ Switch Upstream Port
+ * +-+ Switch Downstream Port 0
+ * + \- Device A
+ * \-+ Switch Downstream Port 1
+ * \- Device B
+ *
+ * The distance is 4 because we traverse from Device A to Downstream Port 0
+ * to the common Switch Upstream Port, back down to Downstream Port 1 and
+ * then to Device B. The mapping type returned depends on the ACS
+ * redirection setting of the ports along the path.
+ *
+ * If ACS redirect is set on any port in the path, traffic between the
+ * devices will go through the host bridge, so return
+ * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; otherwise return
+ * PCI_P2PDMA_MAP_BUS_ADDR.
+ *
+ * Any two devices that have a data path that goes through the host bridge
+ * will consult a whitelist. If the host bridge is in the whitelist, return
+ * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE with the distance set to the number of
+ * ports per above. If the device is not in the whitelist, return
+ * PCI_P2PDMA_MAP_NOT_SUPPORTED.
+ */
static enum pci_p2pdma_map_type
-__upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
- int *dist, bool *acs_redirects, struct seq_buf *acs_list)
+calc_map_type_and_dist(struct pci_dev *provider, struct pci_dev *client,
+ int *dist, bool verbose)
{
+ enum pci_p2pdma_map_type map_type = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
struct pci_dev *a = provider, *b = client, *bb;
+ bool acs_redirects = false;
+ struct pci_p2pdma *p2pdma;
+ struct seq_buf acs_list;
+ int acs_cnt = 0;
int dist_a = 0;
int dist_b = 0;
- int acs_cnt = 0;
+ char buf[128];
- if (acs_redirects)
- *acs_redirects = false;
+ seq_buf_init(&acs_list, buf, sizeof(buf));
/*
* Note, we don't need to take references to devices returned by
* pci_upstream_bridge() seeing we hold a reference to a child
* device which will already hold a reference to the upstream bridge.
*/
-
while (a) {
dist_b = 0;
if (pci_bridge_has_acs_redir(a)) {
- seq_buf_print_bus_devfn(acs_list, a);
+ seq_buf_print_bus_devfn(&acs_list, a);
acs_cnt++;
}
@@ -393,10 +489,8 @@ __upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
dist_a++;
}
- if (dist)
- *dist = dist_a + dist_b;
-
- return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
+ *dist = dist_a + dist_b;
+ goto map_through_host_bridge;
check_b_path_acs:
bb = b;
@@ -406,124 +500,45 @@ check_b_path_acs:
break;
if (pci_bridge_has_acs_redir(bb)) {
- seq_buf_print_bus_devfn(acs_list, bb);
+ seq_buf_print_bus_devfn(&acs_list, bb);
acs_cnt++;
}
bb = pci_upstream_bridge(bb);
}
- if (dist)
- *dist = dist_a + dist_b;
-
- if (acs_cnt) {
- if (acs_redirects)
- *acs_redirects = true;
-
- return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
- }
-
- return PCI_P2PDMA_MAP_BUS_ADDR;
-}
-
-static unsigned long map_types_idx(struct pci_dev *client)
-{
- return (pci_domain_nr(client->bus) << 16) |
- (client->bus->number << 8) | client->devfn;
-}
-
-/*
- * Find the distance through the nearest common upstream bridge between
- * two PCI devices.
- *
- * If the two devices are the same device then 0 will be returned.
- *
- * If there are two virtual functions of the same device behind the same
- * bridge port then 2 will be returned (one step down to the PCIe switch,
- * then one step back to the same device).
- *
- * In the case where two devices are connected to the same PCIe switch, the
- * value 4 will be returned. This corresponds to the following PCI tree:
- *
- * -+ Root Port
- * \+ Switch Upstream Port
- * +-+ Switch Downstream Port
- * + \- Device A
- * \-+ Switch Downstream Port
- * \- Device B
- *
- * The distance is 4 because we traverse from Device A through the downstream
- * port of the switch, to the common upstream port, back up to the second
- * downstream port and then to Device B.
- *
- * Any two devices that cannot communicate using p2pdma will return
- * PCI_P2PDMA_MAP_NOT_SUPPORTED.
- *
- * Any two devices that have a data path that goes through the host bridge
- * will consult a whitelist. If the host bridges are on the whitelist,
- * this function will return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE.
- *
- * If either bridge is not on the whitelist this function returns
- * PCI_P2PDMA_MAP_NOT_SUPPORTED.
- *
- * If a bridge which has any ACS redirection bits set is in the path,
- * acs_redirects will be set to true. In this case, a list of all infringing
- * bridge addresses will be populated in acs_list (assuming it's non-null)
- * for printk purposes.
- */
-static enum pci_p2pdma_map_type
-upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
- int *dist, bool *acs_redirects, struct seq_buf *acs_list)
-{
- enum pci_p2pdma_map_type map_type;
-
- map_type = __upstream_bridge_distance(provider, client, dist,
- acs_redirects, acs_list);
+ *dist = dist_a + dist_b;
- if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) {
- if (!cpu_supports_p2pdma() &&
- !host_bridge_whitelist(provider, client))
- map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
+ if (!acs_cnt) {
+ map_type = PCI_P2PDMA_MAP_BUS_ADDR;
+ goto done;
}
- if (provider->p2pdma)
- xa_store(&provider->p2pdma->map_types, map_types_idx(client),
- xa_mk_value(map_type), GFP_KERNEL);
-
- return map_type;
-}
-
-static enum pci_p2pdma_map_type
-upstream_bridge_distance_warn(struct pci_dev *provider, struct pci_dev *client,
- int *dist)
-{
- struct seq_buf acs_list;
- bool acs_redirects;
- int ret;
-
- seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
- if (!acs_list.buffer)
- return -ENOMEM;
-
- ret = upstream_bridge_distance(provider, client, dist, &acs_redirects,
- &acs_list);
- if (acs_redirects) {
+ if (verbose) {
+ acs_list.buffer[acs_list.len-1] = 0; /* drop final semicolon */
pci_warn(client, "ACS redirect is set between the client and provider (%s)\n",
pci_name(provider));
- /* Drop final semicolon */
- acs_list.buffer[acs_list.len-1] = 0;
pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
acs_list.buffer);
}
+ acs_redirects = true;
- if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED) {
- pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n",
- pci_name(provider));
+map_through_host_bridge:
+ if (!cpu_supports_p2pdma() &&
+ !host_bridge_whitelist(provider, client, acs_redirects)) {
+ if (verbose)
+ pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n",
+ pci_name(provider));
+ map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
}
-
- kfree(acs_list.buffer);
-
- return ret;
+done:
+ rcu_read_lock();
+ p2pdma = rcu_dereference(provider->p2pdma);
+ if (p2pdma)
+ xa_store(&p2pdma->map_types, map_types_idx(client),
+ xa_mk_value(map_type), GFP_KERNEL);
+ rcu_read_unlock();
+ return map_type;
}
/**
@@ -546,11 +561,11 @@ upstream_bridge_distance_warn(struct pci_dev *provider, struct pci_dev *client,
int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
int num_clients, bool verbose)
{
+ enum pci_p2pdma_map_type map;
bool not_supported = false;
struct pci_dev *pci_client;
int total_dist = 0;
- int distance;
- int i, ret;
+ int i, distance;
if (num_clients == 0)
return -1;
@@ -564,16 +579,12 @@ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
return -1;
}
- if (verbose)
- ret = upstream_bridge_distance_warn(provider,
- pci_client, &distance);
- else
- ret = upstream_bridge_distance(provider, pci_client,
- &distance, NULL, NULL);
+ map = calc_map_type_and_dist(provider, pci_client, &distance,
+ verbose);
pci_dev_put(pci_client);
- if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED)
+ if (map == PCI_P2PDMA_MAP_NOT_SUPPORTED)
not_supported = true;
if (not_supported && !verbose)
@@ -595,7 +606,15 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
*/
bool pci_has_p2pmem(struct pci_dev *pdev)
{
- return pdev->p2pdma && pdev->p2pdma->p2pmem_published;
+ struct pci_p2pdma *p2pdma;
+ bool res;
+
+ rcu_read_lock();
+ p2pdma = rcu_dereference(pdev->p2pdma);
+ res = p2pdma && p2pdma->p2pmem_published;
+ rcu_read_unlock();
+
+ return res;
}
EXPORT_SYMBOL_GPL(pci_has_p2pmem);
@@ -675,6 +694,7 @@ void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
{
void *ret = NULL;
struct percpu_ref *ref;
+ struct pci_p2pdma *p2pdma;
/*
* Pairs with synchronize_rcu() in pci_p2pdma_release() to
@@ -682,16 +702,16 @@ void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
* read-lock.
*/
rcu_read_lock();
- if (unlikely(!pdev->p2pdma))
+ p2pdma = rcu_dereference(pdev->p2pdma);
+ if (unlikely(!p2pdma))
goto out;
- ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size,
- (void **) &ref);
+ ret = (void *)gen_pool_alloc_owner(p2pdma->pool, size, (void **) &ref);
if (!ret)
goto out;
if (unlikely(!percpu_ref_tryget_live(ref))) {
- gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size);
+ gen_pool_free(p2pdma->pool, (unsigned long) ret, size);
ret = NULL;
goto out;
}
@@ -710,8 +730,9 @@ EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
{
struct percpu_ref *ref;
+ struct pci_p2pdma *p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
- gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size,
+ gen_pool_free_owner(p2pdma->pool, (uintptr_t)addr, size,
(void **) &ref);
percpu_ref_put(ref);
}
@@ -725,9 +746,13 @@ EXPORT_SYMBOL_GPL(pci_free_p2pmem);
*/
pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
{
+ struct pci_p2pdma *p2pdma;
+
if (!addr)
return 0;
- if (!pdev->p2pdma)
+
+ p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
+ if (!p2pdma)
return 0;
/*
@@ -735,7 +760,7 @@ pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
* bus address as the physical address. So gen_pool_virt_to_phys()
* actually returns the bus address despite the misleading name.
*/
- return gen_pool_virt_to_phys(pdev->p2pdma->pool, (unsigned long)addr);
+ return gen_pool_virt_to_phys(p2pdma->pool, (unsigned long)addr);
}
EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
@@ -806,19 +831,40 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
*/
void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
{
- if (pdev->p2pdma)
- pdev->p2pdma->p2pmem_published = publish;
+ struct pci_p2pdma *p2pdma;
+
+ rcu_read_lock();
+ p2pdma = rcu_dereference(pdev->p2pdma);
+ if (p2pdma)
+ p2pdma->p2pmem_published = publish;
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
-static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct pci_dev *provider,
- struct pci_dev *client)
+static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
+ struct device *dev)
{
+ enum pci_p2pdma_map_type type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
+ struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider;
+ struct pci_dev *client;
+ struct pci_p2pdma *p2pdma;
+
if (!provider->p2pdma)
return PCI_P2PDMA_MAP_NOT_SUPPORTED;
- return xa_to_value(xa_load(&provider->p2pdma->map_types,
- map_types_idx(client)));
+ if (!dev_is_pci(dev))
+ return PCI_P2PDMA_MAP_NOT_SUPPORTED;
+
+ client = to_pci_dev(dev);
+
+ rcu_read_lock();
+ p2pdma = rcu_dereference(provider->p2pdma);
+
+ if (p2pdma)
+ type = xa_to_value(xa_load(&p2pdma->map_types,
+ map_types_idx(client)));
+ rcu_read_unlock();
+ return type;
}
static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
@@ -853,14 +899,8 @@ int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
{
struct pci_p2pdma_pagemap *p2p_pgmap =
to_p2p_pgmap(sg_page(sg)->pgmap);
- struct pci_dev *client;
-
- if (WARN_ON_ONCE(!dev_is_pci(dev)))
- return 0;
- client = to_pci_dev(dev);
-
- switch (pci_p2pdma_map_type(p2p_pgmap->provider, client)) {
+ switch (pci_p2pdma_map_type(sg_page(sg)->pgmap, dev)) {
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
case PCI_P2PDMA_MAP_BUS_ADDR:
@@ -884,17 +924,9 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
- struct pci_p2pdma_pagemap *p2p_pgmap =
- to_p2p_pgmap(sg_page(sg)->pgmap);
enum pci_p2pdma_map_type map_type;
- struct pci_dev *client;
-
- if (WARN_ON_ONCE(!dev_is_pci(dev)))
- return;
-
- client = to_pci_dev(dev);
- map_type = pci_p2pdma_map_type(p2p_pgmap->provider, client);
+ map_type = pci_p2pdma_map_type(sg_page(sg)->pgmap, dev);
if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index c32f3b7540e8..0c6446519640 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -139,14 +139,17 @@ enum acpi_attr_enum {
ACPI_ATTR_INDEX_SHOW,
};
-static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
+static int dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
{
int len;
+
len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer,
obj->buffer.length,
UTF16_LITTLE_ENDIAN,
- buf, PAGE_SIZE);
- buf[len] = '\n';
+ buf, PAGE_SIZE - 1);
+ buf[len++] = '\n';
+
+ return len;
}
static int dsm_get_label(struct device *dev, char *buf,
@@ -154,7 +157,7 @@ static int dsm_get_label(struct device *dev, char *buf,
{
acpi_handle handle = ACPI_HANDLE(dev);
union acpi_object *obj, *tmp;
- int len = -1;
+ int len = 0;
if (!handle)
return -1;
@@ -175,20 +178,19 @@ static int dsm_get_label(struct device *dev, char *buf,
* this entry must return a null string.
*/
if (attr == ACPI_ATTR_INDEX_SHOW) {
- scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value);
+ len = sysfs_emit(buf, "%llu\n", tmp->integer.value);
} else if (attr == ACPI_ATTR_LABEL_SHOW) {
if (tmp[1].type == ACPI_TYPE_STRING)
- scnprintf(buf, PAGE_SIZE, "%s\n",
- tmp[1].string.pointer);
+ len = sysfs_emit(buf, "%s\n",
+ tmp[1].string.pointer);
else if (tmp[1].type == ACPI_TYPE_BUFFER)
- dsm_label_utf16s_to_utf8s(tmp + 1, buf);
+ len = dsm_label_utf16s_to_utf8s(tmp + 1, buf);
}
- len = strlen(buf) > 0 ? strlen(buf) : -1;
}
ACPI_FREE(obj);
- return len;
+ return len > 0 ? len : -1;
}
static ssize_t label_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index beb8d1f4fafe..7bbf2673c7f2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -537,7 +537,7 @@ static ssize_t devspec_show(struct device *dev,
if (np == NULL)
return 0;
- return sysfs_emit(buf, "%pOF", np);
+ return sysfs_emit(buf, "%pOF\n", np);
}
static DEVICE_ATTR_RO(devspec);
#endif
@@ -978,7 +978,7 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_mem->size = 1024*1024;
b->legacy_mem->attr.mode = 0600;
b->legacy_mem->mmap = pci_mmap_legacy_mem;
- b->legacy_io->mapping = iomem_get_mapping();
+ b->legacy_mem->mapping = iomem_get_mapping();
pci_adjust_legacy_attr(b, pci_mmap_mem);
error = device_create_bin_file(&b->dev, b->legacy_mem);
if (error)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8d4ebe095d0c..aacf575c15cf 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5030,6 +5030,16 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
}
+static int pci_reset_bus_function(struct pci_dev *dev, int probe)
+{
+ int rc;
+
+ rc = pci_dev_reset_slot_function(dev, probe);
+ if (rc != -ENOTTY)
+ return rc;
+ return pci_parent_bus_reset(dev, probe);
+}
+
static void pci_dev_lock(struct pci_dev *dev)
{
pci_cfg_access_lock(dev);
@@ -5038,7 +5048,7 @@ static void pci_dev_lock(struct pci_dev *dev)
}
/* Return 1 on successful lock, 0 on contention */
-static int pci_dev_trylock(struct pci_dev *dev)
+int pci_dev_trylock(struct pci_dev *dev)
{
if (pci_cfg_access_trylock(dev)) {
if (device_trylock(&dev->dev))
@@ -5048,12 +5058,14 @@ static int pci_dev_trylock(struct pci_dev *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(pci_dev_trylock);
-static void pci_dev_unlock(struct pci_dev *dev)
+void pci_dev_unlock(struct pci_dev *dev)
{
device_unlock(&dev->dev);
pci_cfg_access_unlock(dev);
}
+EXPORT_SYMBOL_GPL(pci_dev_unlock);
static void pci_dev_save_and_disable(struct pci_dev *dev)
{
@@ -5150,10 +5162,7 @@ int __pci_reset_function_locked(struct pci_dev *dev)
rc = pci_pm_reset(dev, 0);
if (rc != -ENOTTY)
return rc;
- rc = pci_dev_reset_slot_function(dev, 0);
- if (rc != -ENOTTY)
- return rc;
- return pci_parent_bus_reset(dev, 0);
+ return pci_reset_bus_function(dev, 0);
}
EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
@@ -5185,11 +5194,8 @@ int pci_probe_reset_function(struct pci_dev *dev)
rc = pci_pm_reset(dev, 1);
if (rc != -ENOTTY)
return rc;
- rc = pci_dev_reset_slot_function(dev, 1);
- if (rc != -ENOTTY)
- return rc;
- return pci_parent_bus_reset(dev, 1);
+ return pci_reset_bus_function(dev, 1);
}
/**
@@ -6449,34 +6455,40 @@ static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
spin_lock(&resource_alignment_lock);
if (resource_alignment_param)
- count = scnprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
+ count = sysfs_emit(buf, "%s\n", resource_alignment_param);
spin_unlock(&resource_alignment_lock);
- /*
- * When set by the command line, resource_alignment_param will not
- * have a trailing line feed, which is ugly. So conditionally add
- * it here.
- */
- if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
- buf[count - 1] = '\n';
- buf[count++] = 0;
- }
-
return count;
}
static ssize_t resource_alignment_store(struct bus_type *bus,
const char *buf, size_t count)
{
- char *param = kstrndup(buf, count, GFP_KERNEL);
+ char *param, *old, *end;
+
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+ param = kstrndup(buf, count, GFP_KERNEL);
if (!param)
return -ENOMEM;
+ end = strchr(param, '\n');
+ if (end)
+ *end = '\0';
+
spin_lock(&resource_alignment_lock);
- kfree(resource_alignment_param);
- resource_alignment_param = param;
+ old = resource_alignment_param;
+ if (strlen(param)) {
+ resource_alignment_param = param;
+ } else {
+ kfree(param);
+ resource_alignment_param = NULL;
+ }
spin_unlock(&resource_alignment_lock);
+
+ kfree(old);
+
return count;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 37c913bbc6e1..93dcdd431072 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -324,8 +324,8 @@ struct pci_sriov {
/**
* pci_dev_set_io_state - Set the new error state if possible.
*
- * @dev - pci device to set new error_state
- * @new - the state we want dev to be in
+ * @dev: PCI device to set new error_state
+ * @new: the state we want dev to be in
*
* Must be called with device_lock held.
*
@@ -385,6 +385,8 @@ static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
/* pci_dev priv_flags */
#define PCI_DEV_ADDED 0
+#define PCI_DPC_RECOVERED 1
+#define PCI_DPC_RECOVERING 2
static inline void pci_dev_assign_added(struct pci_dev *dev, bool added)
{
@@ -439,10 +441,12 @@ void pci_restore_dpc_state(struct pci_dev *dev);
void pci_dpc_init(struct pci_dev *pdev);
void dpc_process_error(struct pci_dev *pdev);
pci_ers_result_t dpc_reset_link(struct pci_dev *pdev);
+bool pci_dpc_recovered(struct pci_dev *pdev);
#else
static inline void pci_save_dpc_state(struct pci_dev *dev) {}
static inline void pci_restore_dpc_state(struct pci_dev *dev) {}
static inline void pci_dpc_init(struct pci_dev *pdev) {}
+static inline bool pci_dpc_recovered(struct pci_dev *pdev) { return false; }
#endif
#ifdef CONFIG_PCIEPORTBUS
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index ec943cee5ecc..df4ba9b384c2 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -529,21 +529,23 @@ static const char *aer_agent_string[] = {
char *buf) \
{ \
unsigned int i; \
- char *str = buf; \
struct pci_dev *pdev = to_pci_dev(dev); \
u64 *stats = pdev->aer_stats->stats_array; \
+ size_t len = 0; \
\
for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \
if (strings_array[i]) \
- str += sprintf(str, "%s %llu\n", \
- strings_array[i], stats[i]); \
+ len += sysfs_emit_at(buf, len, "%s %llu\n", \
+ strings_array[i], \
+ stats[i]); \
else if (stats[i]) \
- str += sprintf(str, #stats_array "_bit[%d] %llu\n",\
- i, stats[i]); \
+ len += sysfs_emit_at(buf, len, \
+ #stats_array "_bit[%d] %llu\n",\
+ i, stats[i]); \
} \
- str += sprintf(str, "TOTAL_%s %llu\n", total_string, \
- pdev->aer_stats->total_field); \
- return str-buf; \
+ len += sysfs_emit_at(buf, len, "TOTAL_%s %llu\n", total_string, \
+ pdev->aer_stats->total_field); \
+ return len; \
} \
static DEVICE_ATTR_RO(name)
@@ -563,7 +565,7 @@ aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
char *buf) \
{ \
struct pci_dev *pdev = to_pci_dev(dev); \
- return sprintf(buf, "%llu\n", pdev->aer_stats->field); \
+ return sysfs_emit(buf, "%llu\n", pdev->aer_stats->field); \
} \
static DEVICE_ATTR_RO(name)
@@ -983,7 +985,7 @@ static void aer_recover_work_func(struct work_struct *work)
pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
entry.devfn);
if (!pdev) {
- pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
+ pr_err("no pci_dev for %04x:%02x:%02x.%x\n",
entry.domain, entry.bus,
PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
continue;
@@ -1022,7 +1024,7 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
&aer_recover_ring_lock))
schedule_work(&aer_recover_work);
else
- pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
+ pr_err("buffer overflow in recovery for %04x:%02x:%02x.%x\n",
domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
}
EXPORT_SYMBOL_GPL(aer_recover_queue);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index ac0557a305af..013a47f587ce 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1208,7 +1208,7 @@ static ssize_t aspm_attr_show_common(struct device *dev,
struct pci_dev *pdev = to_pci_dev(dev);
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
- return sprintf(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
+ return sysfs_emit(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
}
static ssize_t aspm_attr_store_common(struct device *dev,
@@ -1265,7 +1265,7 @@ static ssize_t clkpm_show(struct device *dev,
struct pci_dev *pdev = to_pci_dev(dev);
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
- return sprintf(buf, "%d\n", link->clkpm_enabled);
+ return sysfs_emit(buf, "%d\n", link->clkpm_enabled);
}
static ssize_t clkpm_store(struct device *dev,
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index e05aba86a317..c556e7beafe3 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -71,6 +71,58 @@ void pci_restore_dpc_state(struct pci_dev *dev)
pci_write_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, *cap);
}
+static DECLARE_WAIT_QUEUE_HEAD(dpc_completed_waitqueue);
+
+#ifdef CONFIG_HOTPLUG_PCI_PCIE
+static bool dpc_completed(struct pci_dev *pdev)
+{
+ u16 status;
+
+ pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
+ if ((status != 0xffff) && (status & PCI_EXP_DPC_STATUS_TRIGGER))
+ return false;
+
+ if (test_bit(PCI_DPC_RECOVERING, &pdev->priv_flags))
+ return false;
+
+ return true;
+}
+
+/**
+ * pci_dpc_recovered - whether DPC triggered and has recovered successfully
+ * @pdev: PCI device
+ *
+ * Return true if DPC was triggered for @pdev and has recovered successfully.
+ * Wait for recovery if it hasn't completed yet. Called from the PCIe hotplug
+ * driver to recognize and ignore Link Down/Up events caused by DPC.
+ */
+bool pci_dpc_recovered(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *host;
+
+ if (!pdev->dpc_cap)
+ return false;
+
+ /*
+ * Synchronization between hotplug and DPC is not supported
+ * if DPC is owned by firmware and EDR is not enabled.
+ */
+ host = pci_find_host_bridge(pdev->bus);
+ if (!host->native_dpc && !IS_ENABLED(CONFIG_PCIE_EDR))
+ return false;
+
+ /*
+ * Need a timeout in case DPC never completes due to failure of
+ * dpc_wait_rp_inactive(). The spec doesn't mandate a time limit,
+ * but reports indicate that DPC completes within 4 seconds.
+ */
+ wait_event_timeout(dpc_completed_waitqueue, dpc_completed(pdev),
+ msecs_to_jiffies(4000));
+
+ return test_and_clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
+}
+#endif /* CONFIG_HOTPLUG_PCI_PCIE */
+
static int dpc_wait_rp_inactive(struct pci_dev *pdev)
{
unsigned long timeout = jiffies + HZ;
@@ -91,8 +143,11 @@ static int dpc_wait_rp_inactive(struct pci_dev *pdev)
pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
+ pci_ers_result_t ret;
u16 cap;
+ set_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
+
/*
* DPC disables the Link automatically in hardware, so it has
* already been reset by the time we get here.
@@ -106,18 +161,27 @@ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
if (!pcie_wait_for_link(pdev, false))
pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
- if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev))
- return PCI_ERS_RESULT_DISCONNECT;
+ if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev)) {
+ clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
+ ret = PCI_ERS_RESULT_DISCONNECT;
+ goto out;
+ }
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER);
if (!pcie_wait_for_link(pdev, true)) {
pci_info(pdev, "Data Link Layer Link Active not set in 1000 msec\n");
- return PCI_ERS_RESULT_DISCONNECT;
+ clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
+ ret = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ set_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
+ ret = PCI_ERS_RESULT_RECOVERED;
}
-
- return PCI_ERS_RESULT_RECOVERED;
+out:
+ clear_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
+ wake_up_all(&dpc_completed_waitqueue);
+ return ret;
}
static void dpc_process_rp_pio_error(struct pci_dev *pdev)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 275204646c68..79177ac37880 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1576,6 +1576,26 @@ static void set_pcie_untrusted(struct pci_dev *dev)
dev->untrusted = true;
}
+static void pci_set_removable(struct pci_dev *dev)
+{
+ struct pci_dev *parent = pci_upstream_bridge(dev);
+
+ /*
+ * We (only) consider everything downstream from an external_facing
+ * device to be removable by the user. We're mainly concerned with
+ * consumer platforms with user accessible thunderbolt ports that are
+ * vulnerable to DMA attacks, and we expect those ports to be marked by
+ * the firmware as external_facing. Devices in traditional hotplug
+ * slots can technically be removed, but the expectation is that unless
+ * the port is marked with external_facing, such devices are less
+ * accessible to user / may not be removed by end user, and thus not
+ * exposed as "removable" to userspace.
+ */
+ if (parent &&
+ (parent->external_facing || dev_is_removable(&parent->dev)))
+ dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
+}
+
/**
* pci_ext_cfg_is_aliased - Is ext config space just an alias of std config?
* @dev: PCI device
@@ -1823,6 +1843,8 @@ int pci_setup_device(struct pci_dev *dev)
/* Early fixups, before probing the BARs */
pci_fixup_device(pci_fixup_early, dev);
+ pci_set_removable(dev);
+
pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
dev->vendor, dev->device, dev->hdr_type, dev->class);
@@ -2227,6 +2249,7 @@ static void pci_release_dev(struct device *dev)
pci_bus_put(pci_dev->bus);
kfree(pci_dev->driver_override);
bitmap_free(pci_dev->dma_alias_mask);
+ dev_dbg(dev, "device released\n");
kfree(pci_dev);
}
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 9bab07302bbf..d32fbfc93ea9 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -230,8 +230,8 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
break;
}
/* If arch decided it can't, fall through... */
-#endif /* HAVE_PCI_MMAP */
fallthrough;
+#endif /* HAVE_PCI_MMAP */
default:
ret = -EINVAL;
break;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 22b2bb1109c9..ab3de1551b50 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -27,6 +27,7 @@
#include <linux/nvme.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <linux/switchtec.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
@@ -1899,6 +1900,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
#ifdef CONFIG_X86_IO_APIC
static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
@@ -3656,6 +3658,16 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
return;
if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
return;
+
+ /*
+ * SXIO/SXFP/SXLF turns off power to the Thunderbolt controller.
+ * We don't know how to turn it back on again, but firmware does,
+ * so we can only use SXIO/SXFP/SXLF if we're suspending via
+ * firmware.
+ */
+ if (!pm_suspend_via_firmware())
+ return;
+
bridge = ACPI_HANDLE(&dev->dev);
if (!bridge)
return;
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index d627dd9179b4..751a26668e3a 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -39,19 +39,19 @@ static const struct sysfs_ops pci_slot_sysfs_ops = {
static ssize_t address_read_file(struct pci_slot *slot, char *buf)
{
if (slot->number == 0xff)
- return sprintf(buf, "%04x:%02x\n",
- pci_domain_nr(slot->bus),
- slot->bus->number);
- else
- return sprintf(buf, "%04x:%02x:%02x\n",
- pci_domain_nr(slot->bus),
- slot->bus->number,
- slot->number);
+ return sysfs_emit(buf, "%04x:%02x\n",
+ pci_domain_nr(slot->bus),
+ slot->bus->number);
+
+ return sysfs_emit(buf, "%04x:%02x:%02x\n",
+ pci_domain_nr(slot->bus),
+ slot->bus->number,
+ slot->number);
}
static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf)
{
- return sprintf(buf, "%s\n", pci_speed_string(speed));
+ return sysfs_emit(buf, "%s\n", pci_speed_string(speed));
}
static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf)
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index ba52459928f7..0b301f8be9ed 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -280,7 +280,7 @@ static ssize_t device_version_show(struct device *dev,
ver = ioread32(&stdev->mmio_sys_info->device_version);
- return sprintf(buf, "%x\n", ver);
+ return sysfs_emit(buf, "%x\n", ver);
}
static DEVICE_ATTR_RO(device_version);
@@ -292,7 +292,7 @@ static ssize_t fw_version_show(struct device *dev,
ver = ioread32(&stdev->mmio_sys_info->firmware_version);
- return sprintf(buf, "%08x\n", ver);
+ return sysfs_emit(buf, "%08x\n", ver);
}
static DEVICE_ATTR_RO(fw_version);
@@ -344,7 +344,7 @@ static ssize_t component_vendor_show(struct device *dev,
/* component_vendor field not supported after gen3 */
if (stdev->gen != SWITCHTEC_GEN3)
- return sprintf(buf, "none\n");
+ return sysfs_emit(buf, "none\n");
return io_string_show(buf, &si->gen3.component_vendor,
sizeof(si->gen3.component_vendor));
@@ -359,9 +359,9 @@ static ssize_t component_id_show(struct device *dev,
/* component_id field not supported after gen3 */
if (stdev->gen != SWITCHTEC_GEN3)
- return sprintf(buf, "none\n");
+ return sysfs_emit(buf, "none\n");
- return sprintf(buf, "PM%04X\n", id);
+ return sysfs_emit(buf, "PM%04X\n", id);
}
static DEVICE_ATTR_RO(component_id);
@@ -373,9 +373,9 @@ static ssize_t component_revision_show(struct device *dev,
/* component_revision field not supported after gen3 */
if (stdev->gen != SWITCHTEC_GEN3)
- return sprintf(buf, "255\n");
+ return sysfs_emit(buf, "255\n");
- return sprintf(buf, "%d\n", rev);
+ return sysfs_emit(buf, "%d\n", rev);
}
static DEVICE_ATTR_RO(component_revision);
@@ -384,7 +384,7 @@ static ssize_t partition_show(struct device *dev,
{
struct switchtec_dev *stdev = to_stdev(dev);
- return sprintf(buf, "%d\n", stdev->partition);
+ return sysfs_emit(buf, "%d\n", stdev->partition);
}
static DEVICE_ATTR_RO(partition);
@@ -393,7 +393,7 @@ static ssize_t partition_count_show(struct device *dev,
{
struct switchtec_dev *stdev = to_stdev(dev);
- return sprintf(buf, "%d\n", stdev->partition_count);
+ return sysfs_emit(buf, "%d\n", stdev->partition_count);
}
static DEVICE_ATTR_RO(partition_count);
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 85887d885b5f..192c9049d654 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -112,6 +112,7 @@ static int i82092aa_pci_probe(struct pci_dev *dev,
for (i = 0; i < socket_count; i++) {
sockets[i].card_state = 1; /* 1 = present but empty */
sockets[i].io_base = pci_resource_start(dev, 0);
+ sockets[i].dev = dev;
sockets[i].socket.features |= SS_CAP_PCCARD;
sockets[i].socket.map_size = 0x1000;
sockets[i].socket.irq_mask = 0;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 54c1f2f0985f..7dd35f1b9cc5 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -61,6 +61,15 @@ config USB_LGM_PHY
interface to interact with USB GEN-II and USB 3.x PHY that is part
of the Intel network SOC.
+config PHY_CAN_TRANSCEIVER
+ tristate "CAN transceiver PHY"
+ select GENERIC_PHY
+ help
+ This option enables support for CAN transceivers as a PHY. This
+ driver provides function for putting the transceivers in various
+ functional modes using gpios and sets the attribute max link
+ rate, for CAN drivers.
+
source "drivers/phy/allwinner/Kconfig"
source "drivers/phy/amlogic/Kconfig"
source "drivers/phy/broadcom/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index adac1b1a39d1..01e9efffc726 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_GENERIC_PHY) += phy-core.o
obj-$(CONFIG_GENERIC_PHY_MIPI_DPHY) += phy-core-mipi-dphy.o
+obj-$(CONFIG_PHY_CAN_TRANSCEIVER) += phy-can-transceiver.o
obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
index eb10ffa13a62..b1adaecc26f8 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
@@ -215,10 +215,8 @@ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
return err;
usb3->dmp = devm_ioremap_resource(dev, &res);
- if (IS_ERR(usb3->dmp)) {
- dev_err(dev, "Failed to map DMP regs\n");
+ if (IS_ERR(usb3->dmp))
return PTR_ERR(usb3->dmp);
- }
usb3->phy = devm_phy_create(dev, NULL, &ops);
if (IS_ERR(usb3->phy)) {
diff --git a/drivers/phy/hisilicon/Kconfig b/drivers/phy/hisilicon/Kconfig
index 1c73053bcc98..4d008cfc279c 100644
--- a/drivers/phy/hisilicon/Kconfig
+++ b/drivers/phy/hisilicon/Kconfig
@@ -23,6 +23,16 @@ config PHY_HI3660_USB
To compile this driver as a module, choose M here.
+config PHY_HI3670_USB
+ tristate "hi3670 USB PHY support"
+ depends on (ARCH_HISI && ARM64) || COMPILE_TEST
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support the HISILICON HI3670 USB PHY.
+
+ To compile this driver as a module, choose M here.
+
config PHY_HISTB_COMBPHY
tristate "HiSilicon STB SoCs COMBPHY support"
depends on (ARCH_HISI && ARM64) || COMPILE_TEST
diff --git a/drivers/phy/hisilicon/Makefile b/drivers/phy/hisilicon/Makefile
index 92e874ae9c74..51729868145b 100644
--- a/drivers/phy/hisilicon/Makefile
+++ b/drivers/phy/hisilicon/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_PHY_HI6220_USB) += phy-hi6220-usb.o
obj-$(CONFIG_PHY_HI3660_USB) += phy-hi3660-usb3.o
+obj-$(CONFIG_PHY_HI3670_USB) += phy-hi3670-usb3.o
obj-$(CONFIG_PHY_HISTB_COMBPHY) += phy-histb-combphy.o
obj-$(CONFIG_PHY_HISI_INNO_USB2) += phy-hisi-inno-usb2.o
obj-$(CONFIG_PHY_HIX5HD2_SATA) += phy-hix5hd2-sata.o
diff --git a/drivers/staging/hikey9xx/phy-hi3670-usb3.c b/drivers/phy/hisilicon/phy-hi3670-usb3.c
index e7e579ce0302..b9ffe08abaab 100644
--- a/drivers/staging/hikey9xx/phy-hi3670-usb3.c
+++ b/drivers/phy/hisilicon/phy-hi3670-usb3.c
@@ -148,10 +148,8 @@ static int hi3670_phy_cr_clk(struct regmap *usb31misc)
return ret;
/* Clock down */
- ret = regmap_update_bits(usb31misc, USB_MISC_CFG54,
- CFG54_USB31PHY_CR_CLK, 0);
-
- return ret;
+ return regmap_update_bits(usb31misc, USB_MISC_CFG54,
+ CFG54_USB31PHY_CR_CLK, 0);
}
static int hi3670_phy_cr_set_sel(struct regmap *usb31misc)
@@ -215,17 +213,14 @@ static int hi3670_phy_cr_set_addr(struct regmap *usb31misc, u32 addr)
return ret;
reg = FIELD_PREP(CFG54_USB31PHY_CR_ADDR_MASK, addr);
- ret = regmap_update_bits(usb31misc, USB_MISC_CFG54,
- CFG54_USB31PHY_CR_ADDR_MASK, reg);
- return ret;
+ return regmap_update_bits(usb31misc, USB_MISC_CFG54,
+ CFG54_USB31PHY_CR_ADDR_MASK, reg);
}
static int hi3670_phy_cr_read(struct regmap *usb31misc, u32 addr, u32 *val)
{
- int reg;
- int i;
- int ret;
+ int reg, i, ret;
for (i = 0; i < 100; i++) {
ret = hi3670_phy_cr_clk(usb31misc);
@@ -286,9 +281,7 @@ static int hi3670_phy_cr_write(struct regmap *usb31misc, u32 addr, u32 val)
if (ret)
return ret;
- ret = hi3670_phy_cr_wait_ack(usb31misc);
-
- return ret;
+ return hi3670_phy_cr_wait_ack(usb31misc);
}
static int hi3670_phy_set_params(struct hi3670_priv *priv)
diff --git a/drivers/phy/intel/phy-intel-keembay-emmc.c b/drivers/phy/intel/phy-intel-keembay-emmc.c
index eb7c635ed89a..0eb11ac7c2e2 100644
--- a/drivers/phy/intel/phy-intel-keembay-emmc.c
+++ b/drivers/phy/intel/phy-intel-keembay-emmc.c
@@ -95,7 +95,8 @@ static int keembay_emmc_phy_power(struct phy *phy, bool on_off)
else
freqsel = 0x0;
- if (mhz < 50 || mhz > 200)
+ /* Check for EMMC clock rate*/
+ if (mhz > 175)
dev_warn(&phy->dev, "Unsupported rate: %d MHz\n", mhz);
/*
diff --git a/drivers/phy/marvell/phy-mmp3-hsic.c b/drivers/phy/marvell/phy-mmp3-hsic.c
index 47c1e8894939..7cccf01848d8 100644
--- a/drivers/phy/marvell/phy-mmp3-hsic.c
+++ b/drivers/phy/marvell/phy-mmp3-hsic.c
@@ -47,10 +47,8 @@ static int mmp3_hsic_phy_probe(struct platform_device *pdev)
resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, resource);
- if (IS_ERR(base)) {
- dev_err(dev, "failed to remap PHY regs\n");
+ if (IS_ERR(base))
return PTR_ERR(base);
- }
phy = devm_phy_create(dev, NULL, &mmp3_hsic_phy_ops);
if (IS_ERR(phy)) {
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c
index 8313bd517e4c..8ad8f717ef43 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.c
@@ -119,9 +119,7 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdmi_phy->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(hdmi_phy->regs)) {
- ret = PTR_ERR(hdmi_phy->regs);
- dev_err(dev, "Failed to get memory resource: %d\n", ret);
- return ret;
+ return PTR_ERR(hdmi_phy->regs);
}
ref_clk = devm_clk_get(dev, "pll_ref");
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
index c51114d8e437..01cf31633019 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
@@ -151,9 +151,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi_tx->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(mipi_tx->regs)) {
- ret = PTR_ERR(mipi_tx->regs);
- dev_err(dev, "Failed to get memory resource: %d\n", ret);
- return ret;
+ return PTR_ERR(mipi_tx->regs);
}
ref_clk = devm_clk_get(dev, NULL);
diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
new file mode 100644
index 000000000000..c2cb93b4df71
--- /dev/null
+++ b/drivers/phy/phy-can-transceiver.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-can-transceiver.c - phy driver for CAN transceivers
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
+ *
+ */
+#include<linux/phy/phy.h>
+#include<linux/platform_device.h>
+#include<linux/module.h>
+#include<linux/gpio.h>
+#include<linux/gpio/consumer.h>
+
+struct can_transceiver_data {
+ u32 flags;
+#define CAN_TRANSCEIVER_STB_PRESENT BIT(0)
+#define CAN_TRANSCEIVER_EN_PRESENT BIT(1)
+};
+
+struct can_transceiver_phy {
+ struct phy *generic_phy;
+ struct gpio_desc *standby_gpio;
+ struct gpio_desc *enable_gpio;
+};
+
+/* Power on function */
+static int can_transceiver_phy_power_on(struct phy *phy)
+{
+ struct can_transceiver_phy *can_transceiver_phy = phy_get_drvdata(phy);
+
+ if (can_transceiver_phy->standby_gpio)
+ gpiod_set_value_cansleep(can_transceiver_phy->standby_gpio, 0);
+ if (can_transceiver_phy->enable_gpio)
+ gpiod_set_value_cansleep(can_transceiver_phy->enable_gpio, 1);
+
+ return 0;
+}
+
+/* Power off function */
+static int can_transceiver_phy_power_off(struct phy *phy)
+{
+ struct can_transceiver_phy *can_transceiver_phy = phy_get_drvdata(phy);
+
+ if (can_transceiver_phy->standby_gpio)
+ gpiod_set_value_cansleep(can_transceiver_phy->standby_gpio, 1);
+ if (can_transceiver_phy->enable_gpio)
+ gpiod_set_value_cansleep(can_transceiver_phy->enable_gpio, 0);
+
+ return 0;
+}
+
+static const struct phy_ops can_transceiver_phy_ops = {
+ .power_on = can_transceiver_phy_power_on,
+ .power_off = can_transceiver_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static const struct can_transceiver_data tcan1042_drvdata = {
+ .flags = CAN_TRANSCEIVER_STB_PRESENT,
+};
+
+static const struct can_transceiver_data tcan1043_drvdata = {
+ .flags = CAN_TRANSCEIVER_STB_PRESENT | CAN_TRANSCEIVER_EN_PRESENT,
+};
+
+static const struct of_device_id can_transceiver_phy_ids[] = {
+ {
+ .compatible = "ti,tcan1042",
+ .data = &tcan1042_drvdata
+ },
+ {
+ .compatible = "ti,tcan1043",
+ .data = &tcan1043_drvdata
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, can_transceiver_phy_ids);
+
+static int can_transceiver_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct can_transceiver_phy *can_transceiver_phy;
+ const struct can_transceiver_data *drvdata;
+ const struct of_device_id *match;
+ struct phy *phy;
+ struct gpio_desc *standby_gpio;
+ struct gpio_desc *enable_gpio;
+ u32 max_bitrate = 0;
+
+ can_transceiver_phy = devm_kzalloc(dev, sizeof(struct can_transceiver_phy), GFP_KERNEL);
+ if (!can_transceiver_phy)
+ return -ENOMEM;
+
+ match = of_match_node(can_transceiver_phy_ids, pdev->dev.of_node);
+ drvdata = match->data;
+
+ phy = devm_phy_create(dev, dev->of_node,
+ &can_transceiver_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create can transceiver phy\n");
+ return PTR_ERR(phy);
+ }
+
+ device_property_read_u32(dev, "max-bitrate", &max_bitrate);
+ if (!max_bitrate)
+ dev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit\n");
+ phy->attrs.max_link_rate = max_bitrate;
+
+ can_transceiver_phy->generic_phy = phy;
+
+ if (drvdata->flags & CAN_TRANSCEIVER_STB_PRESENT) {
+ standby_gpio = devm_gpiod_get(dev, "standby", GPIOD_OUT_HIGH);
+ if (IS_ERR(standby_gpio))
+ return PTR_ERR(standby_gpio);
+ can_transceiver_phy->standby_gpio = standby_gpio;
+ }
+
+ if (drvdata->flags & CAN_TRANSCEIVER_EN_PRESENT) {
+ enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(enable_gpio))
+ return PTR_ERR(enable_gpio);
+ can_transceiver_phy->enable_gpio = enable_gpio;
+ }
+
+ phy_set_drvdata(can_transceiver_phy->generic_phy, can_transceiver_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver can_transceiver_phy_driver = {
+ .probe = can_transceiver_phy_probe,
+ .driver = {
+ .name = "can-transceiver-phy",
+ .of_match_table = can_transceiver_phy_ids,
+ },
+};
+
+module_platform_driver(can_transceiver_phy_driver);
+
+MODULE_AUTHOR("Faiz Abbas <faiz_abbas@ti.com>");
+MODULE_AUTHOR("Aswath Govindraju <a-govindraju@ti.com>");
+MODULE_DESCRIPTION("CAN TRANSCEIVER PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c
index 77fe65367ce5..288c9c67aa74 100644
--- a/drivers/phy/phy-core-mipi-dphy.c
+++ b/drivers/phy/phy-core-mipi-dphy.c
@@ -15,7 +15,7 @@
/*
* Minimum D-PHY timings based on MIPI D-PHY specification. Derived
* from the valid ranges specified in Section 6.9, Table 14, Page 41
- * of the D-PHY specification (v2.1).
+ * of the D-PHY specification (v1.2).
*/
int phy_mipi_dphy_get_default_config(unsigned long pixel_clock,
unsigned int bpp,
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index ccb575b13777..91e28d6ce450 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -697,16 +697,18 @@ struct phy *phy_get(struct device *dev, const char *string)
struct phy *phy;
struct device_link *link;
- if (string == NULL) {
- dev_WARN(dev, "missing string\n");
- return ERR_PTR(-EINVAL);
- }
-
if (dev->of_node) {
- index = of_property_match_string(dev->of_node, "phy-names",
- string);
+ if (string)
+ index = of_property_match_string(dev->of_node, "phy-names",
+ string);
+ else
+ index = 0;
phy = _of_phy_get(dev->of_node, index);
} else {
+ if (string == NULL) {
+ dev_WARN(dev, "missing string\n");
+ return ERR_PTR(-EINVAL);
+ }
phy = phy_find(dev, string);
}
if (IS_ERR(phy))
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index f4cd590fbde7..d0f4546648f0 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -961,7 +961,8 @@ static void xgene_phy_sata_cfg_lanes(struct xgene_phy_ctx *ctx)
serdes_wr(ctx, lane, RXTX_REG1, val);
/* Latch VTT value based on the termination to ground and
- enable TX FIFO */
+ * enable TX FIFO
+ */
serdes_rd(ctx, lane, RXTX_REG2, &val);
val = RXTX_REG2_VTT_ENA_SET(val, 0x1);
val = RXTX_REG2_VTT_SEL_SET(val, 0x1);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 7877f70cf86f..cfe359488f5c 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -35,6 +35,7 @@
#define PLL_READY_GATE_EN BIT(3)
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
+#define PHYSTATUS_4_20 BIT(7)
/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
#define PCS_READY BIT(0)
@@ -143,6 +144,13 @@ static const unsigned int msm8996_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_READY_STATUS] = 0x168,
};
+static const unsigned int ipq_pciephy_gen3_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x44,
+ [QPHY_PCS_STATUS] = 0x14,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+};
+
static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_SW_RESET] = 0x400,
[QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
@@ -614,6 +622,113 @@ static const struct qmp_phy_init_tbl msm8996_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG2, 0x08),
};
+static const struct qmp_phy_init_tbl ipq6018_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08),
+};
+
+static const struct qmp_phy_init_tbl ipq6018_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX0_RES_CODE_LANE_OFFSET_TX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_TX0_LANE_MODE_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_TX0_RCV_DETECT_LVL_2, 0x12),
+};
+
+static const struct qmp_phy_init_tbl ipq6018_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX0_UCDR_FO_GAIN, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_UCDR_SO_GAIN, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_UCDR_PI_CONTROLS, 0x70),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2, 0x61),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x73),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_00_LOW, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_00_HIGH, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_00_HIGH2, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_00_HIGH3, 0xd3),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_00_HIGH4, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_01_LOW, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_01_HIGH, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_01_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_01_HIGH3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_01_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_10_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_10_HIGH, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_10_HIGH3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_RX_MODE_10_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_RX0_DFE_EN_TIMER, 0x04),
+};
+
+static const struct qmp_phy_init_tbl ipq6018_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(PCS_COM_FLL_CNTRL1, 0x01),
+ QMP_PHY_INIT_CFG(PCS_COM_REFGEN_REQ_CONFIG1, 0x0d),
+ QMP_PHY_INIT_CFG(PCS_COM_G12S1_TXDEEMPH_M3P5DB, 0x10),
+ QMP_PHY_INIT_CFG(PCS_COM_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(PCS_COM_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(PCS_COM_RX_DCC_CAL_CONFIG, 0x01),
+ QMP_PHY_INIT_CFG(PCS_COM_EQ_CONFIG5, 0x01),
+ QMP_PHY_INIT_CFG(PCS_PCIE_POWER_STATE_CONFIG2, 0x0d),
+ QMP_PHY_INIT_CFG(PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+ QMP_PHY_INIT_CFG(PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+ QMP_PHY_INIT_CFG(PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+ QMP_PHY_INIT_CFG(PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(PCS_PCIE_EQ_CONFIG1, 0x11),
+ QMP_PHY_INIT_CFG(PCS_PCIE_PRESET_P10_PRE, 0x00),
+ QMP_PHY_INIT_CFG(PCS_PCIE_PRESET_P10_POST, 0x58),
+};
+
static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
@@ -2110,6 +2225,101 @@ static const struct qmp_phy_init_tbl sdx55_usb3_uniphy_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
};
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BG_TIMER, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYS_CLK_CTRL, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x50),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE1, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_CONFIG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_MISC1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTERNAL_DIG_CORECLK_DIV, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_MODE, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_DC_LEVEL_CTRL, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x56),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x22),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_LANE_MODE_3, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_VMODE_CTRL1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_TX_PI_QEC_CTRL, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_AUX_DATA_TCOARSE_TFINE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_3, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_DAC_ENABLE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_DAC_ENABLE2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_VGA_CAL_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x27),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B1, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B2, 0x5a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B4, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B0, 0xbd),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B1, 0xf9),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B3, 0xce),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE2_B4, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B0, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B1, 0x7d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B3, 0xcf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_RX_MODE_RATE3_B4, 0xd6),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_PHPRE_CTRL, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_20_RX_MARG_COARSE_CTRL2, 0x12),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_RX_SIGDET_LVL, 0x77),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG2, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG4, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_EQ_CONFIG5, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_EQ_CONFIG1, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G3_RXEQEVAL_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_RXEQEVAL_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG2, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
+};
+
static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0xd9),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x11),
@@ -2411,6 +2621,8 @@ struct qmp_phy_cfg {
unsigned int start_ctrl;
unsigned int pwrdn_ctrl;
unsigned int mask_com_pcs_ready;
+ /* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
+ unsigned int phy_status;
/* true, if PHY has a separate PHY_COM control block */
bool has_phy_com_ctrl;
@@ -2624,6 +2836,7 @@ static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
};
static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
@@ -2649,6 +2862,7 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
.start_ctrl = PCS_START | PLL_READY_GATE_EN,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.mask_com_pcs_ready = PCS_READY,
+ .phy_status = PHYSTATUS,
.has_phy_com_ctrl = true,
.has_lane_rst = true,
@@ -2678,6 +2892,7 @@ static const struct qmp_phy_cfg msm8996_ufs_cfg = {
.start_ctrl = SERDES_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.no_pcs_sw_reset = true,
};
@@ -2704,6 +2919,7 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
};
static const char * const ipq8074_pciephy_clk_l[] = {
@@ -2736,6 +2952,37 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+
+ .has_phy_com_ctrl = false,
+ .has_lane_rst = false,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = ipq6018_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq6018_pcie_serdes_tbl),
+ .tx_tbl = ipq6018_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(ipq6018_pcie_tx_tbl),
+ .rx_tbl = ipq6018_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq6018_pcie_rx_tbl),
+ .pcs_tbl = ipq6018_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq6018_pcie_pcs_tbl),
+ .clk_list = ipq8074_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
+ .reset_list = ipq8074_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
+ .vreg_list = NULL,
+ .num_vregs = 0,
+ .regs = ipq_pciephy_gen3_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.has_phy_com_ctrl = false,
.has_lane_rst = false,
@@ -2768,6 +3015,7 @@ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
.start_ctrl = PCS_START | SERDES_START,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = 995, /* us */
@@ -2796,6 +3044,7 @@ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
.start_ctrl = PCS_START | SERDES_START,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = 995, /* us */
@@ -2834,6 +3083,7 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
.start_ctrl = PCS_START | SERDES_START,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = 995, /* us */
@@ -2872,6 +3122,7 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
.start_ctrl = PCS_START | SERDES_START,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
.is_dual_lane_phy = true,
.has_pwrdn_delay = true,
@@ -2901,6 +3152,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -2932,6 +3184,7 @@ static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -3003,6 +3256,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -3029,6 +3283,7 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.start_ctrl = SERDES_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.is_dual_lane_phy = true,
.no_pcs_sw_reset = true,
@@ -3056,6 +3311,7 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
};
static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
@@ -3080,6 +3336,7 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.is_dual_lane_phy = true,
};
@@ -3104,6 +3361,7 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.start_ctrl = SERDES_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.is_dual_lane_phy = true,
};
@@ -3130,6 +3388,8 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -3161,6 +3421,7 @@ static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -3189,6 +3450,7 @@ static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -3220,6 +3482,7 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -3288,12 +3551,45 @@ static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
+static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 2,
+
+ .serdes_tbl = sdx55_qmp_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl),
+ .tx_tbl = sdx55_qmp_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_tx_tbl),
+ .rx_tbl = sdx55_qmp_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_rx_tbl),
+ .pcs_tbl = sdx55_qmp_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_tbl),
+ .pcs_misc_tbl = sdx55_qmp_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS_4_20,
+
+ .is_dual_lane_phy = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.type = PHY_TYPE_UFS,
.nlanes = 2,
@@ -3314,6 +3610,7 @@ static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.start_ctrl = SERDES_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.is_dual_lane_phy = true,
};
@@ -3340,6 +3637,7 @@ static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -3371,6 +3669,7 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -3993,10 +4292,8 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
}
ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
- if (ret) {
- dev_err(qmp->dev, "failed to enable clks, err=%d\n", ret);
+ if (ret)
goto err_rst;
- }
if (cfg->has_phy_dp_com_ctrl) {
qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
@@ -4238,7 +4535,7 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
ready = PCS_READY;
} else {
status = pcs + cfg->regs[QPHY_PCS_STATUS];
- mask = PHYSTATUS;
+ mask = cfg->phy_status;
ready = 0;
}
@@ -4430,10 +4727,8 @@ static int __maybe_unused qcom_qmp_phy_runtime_resume(struct device *dev)
}
ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
- if (ret) {
- dev_err(qmp->dev, "failed to enable clks, err=%d\n", ret);
+ if (ret)
return ret;
- }
ret = clk_prepare_enable(qphy->pipe_clk);
if (ret) {
@@ -4928,6 +5223,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,ipq8074-qmp-pcie-phy",
.data = &ipq8074_pciephy_cfg,
}, {
+ .compatible = "qcom,ipq6018-qmp-pcie-phy",
+ .data = &ipq6018_pciephy_cfg,
+ }, {
.compatible = "qcom,sc7180-qmp-usb3-phy",
.data = &sc7180_usb3phy_cfg,
}, {
@@ -4991,6 +5289,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sm8250-qmp-modem-pcie-phy",
.data = &sm8250_qmp_gen3x2_pciephy_cfg,
}, {
+ .compatible = "qcom,sdx55-qmp-pcie-phy",
+ .data = &sdx55_qmp_pciephy_cfg,
+ }, {
.compatible = "qcom,sdx55-qmp-usb3-uni-phy",
.data = &sdx55_usb3_uniphy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 67bd2dd0d8c5..6592b58b13f6 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -6,6 +6,138 @@
#ifndef QCOM_PHY_QMP_H_
#define QCOM_PHY_QMP_H_
+/* QMP V2 PHY for PCIE gen3 ports - QSERDES PLL registers */
+
+#define QSERDES_PLL_BG_TIMER 0x00c
+#define QSERDES_PLL_SSC_PER1 0x01c
+#define QSERDES_PLL_SSC_PER2 0x020
+#define QSERDES_PLL_SSC_STEP_SIZE1_MODE0 0x024
+#define QSERDES_PLL_SSC_STEP_SIZE2_MODE0 0x028
+#define QSERDES_PLL_SSC_STEP_SIZE1_MODE1 0x02c
+#define QSERDES_PLL_SSC_STEP_SIZE2_MODE1 0x030
+#define QSERDES_PLL_BIAS_EN_CLKBUFLR_EN 0x03c
+#define QSERDES_PLL_CLK_ENABLE1 0x040
+#define QSERDES_PLL_SYS_CLK_CTRL 0x044
+#define QSERDES_PLL_SYSCLK_BUF_ENABLE 0x048
+#define QSERDES_PLL_PLL_IVCO 0x050
+#define QSERDES_PLL_LOCK_CMP1_MODE0 0x054
+#define QSERDES_PLL_LOCK_CMP2_MODE0 0x058
+#define QSERDES_PLL_LOCK_CMP1_MODE1 0x060
+#define QSERDES_PLL_LOCK_CMP2_MODE1 0x064
+#define QSERDES_PLL_BG_TRIM 0x074
+#define QSERDES_PLL_CLK_EP_DIV_MODE0 0x078
+#define QSERDES_PLL_CLK_EP_DIV_MODE1 0x07c
+#define QSERDES_PLL_CP_CTRL_MODE0 0x080
+#define QSERDES_PLL_CP_CTRL_MODE1 0x084
+#define QSERDES_PLL_PLL_RCTRL_MODE0 0x088
+#define QSERDES_PLL_PLL_RCTRL_MODE1 0x08C
+#define QSERDES_PLL_PLL_CCTRL_MODE0 0x090
+#define QSERDES_PLL_PLL_CCTRL_MODE1 0x094
+#define QSERDES_PLL_BIAS_EN_CTRL_BY_PSM 0x0a4
+#define QSERDES_PLL_SYSCLK_EN_SEL 0x0a8
+#define QSERDES_PLL_RESETSM_CNTRL 0x0b0
+#define QSERDES_PLL_LOCK_CMP_EN 0x0c4
+#define QSERDES_PLL_DEC_START_MODE0 0x0cc
+#define QSERDES_PLL_DEC_START_MODE1 0x0d0
+#define QSERDES_PLL_DIV_FRAC_START1_MODE0 0x0d8
+#define QSERDES_PLL_DIV_FRAC_START2_MODE0 0x0dc
+#define QSERDES_PLL_DIV_FRAC_START3_MODE0 0x0e0
+#define QSERDES_PLL_DIV_FRAC_START1_MODE1 0x0e4
+#define QSERDES_PLL_DIV_FRAC_START2_MODE1 0x0e8
+#define QSERDES_PLL_DIV_FRAC_START3_MODE1 0x0eC
+#define QSERDES_PLL_INTEGLOOP_GAIN0_MODE0 0x100
+#define QSERDES_PLL_INTEGLOOP_GAIN1_MODE0 0x104
+#define QSERDES_PLL_INTEGLOOP_GAIN0_MODE1 0x108
+#define QSERDES_PLL_INTEGLOOP_GAIN1_MODE1 0x10c
+#define QSERDES_PLL_VCO_TUNE_MAP 0x120
+#define QSERDES_PLL_VCO_TUNE1_MODE0 0x124
+#define QSERDES_PLL_VCO_TUNE2_MODE0 0x128
+#define QSERDES_PLL_VCO_TUNE1_MODE1 0x12c
+#define QSERDES_PLL_VCO_TUNE2_MODE1 0x130
+#define QSERDES_PLL_VCO_TUNE_TIMER1 0x13c
+#define QSERDES_PLL_VCO_TUNE_TIMER2 0x140
+#define QSERDES_PLL_CLK_SELECT 0x16c
+#define QSERDES_PLL_HSCLK_SEL 0x170
+#define QSERDES_PLL_CORECLK_DIV 0x17c
+#define QSERDES_PLL_CORE_CLK_EN 0x184
+#define QSERDES_PLL_CMN_CONFIG 0x18c
+#define QSERDES_PLL_SVS_MODE_CLK_SEL 0x194
+#define QSERDES_PLL_CORECLK_DIV_MODE1 0x1b4
+
+/* QMP V2 PHY for PCIE gen3 ports - QSERDES TX registers */
+
+#define QSERDES_TX0_RES_CODE_LANE_OFFSET_TX 0x03c
+#define QSERDES_TX0_HIGHZ_DRVR_EN 0x058
+#define QSERDES_TX0_LANE_MODE_1 0x084
+#define QSERDES_TX0_RCV_DETECT_LVL_2 0x09c
+
+/* QMP V2 PHY for PCIE gen3 ports - QSERDES RX registers */
+
+#define QSERDES_RX0_UCDR_FO_GAIN 0x008
+#define QSERDES_RX0_UCDR_SO_GAIN 0x014
+#define QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_RX0_UCDR_PI_CONTROLS 0x044
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2 0x0ec
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3 0x0f0
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4 0x0f4
+#define QSERDES_RX0_RX_IDAC_TSETTLE_LOW 0x0f8
+#define QSERDES_RX0_RX_IDAC_TSETTLE_HIGH 0x0fc
+#define QSERDES_RX0_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
+#define QSERDES_RX0_RX_OFFSET_ADAPTOR_CNTRL2 0x114
+#define QSERDES_RX0_SIGDET_ENABLES 0x118
+#define QSERDES_RX0_SIGDET_CNTRL 0x11c
+#define QSERDES_RX0_SIGDET_DEGLITCH_CNTRL 0x124
+#define QSERDES_RX0_RX_MODE_00_LOW 0x170
+#define QSERDES_RX0_RX_MODE_00_HIGH 0x174
+#define QSERDES_RX0_RX_MODE_00_HIGH2 0x178
+#define QSERDES_RX0_RX_MODE_00_HIGH3 0x17c
+#define QSERDES_RX0_RX_MODE_00_HIGH4 0x180
+#define QSERDES_RX0_RX_MODE_01_LOW 0x184
+#define QSERDES_RX0_RX_MODE_01_HIGH 0x188
+#define QSERDES_RX0_RX_MODE_01_HIGH2 0x18c
+#define QSERDES_RX0_RX_MODE_01_HIGH3 0x190
+#define QSERDES_RX0_RX_MODE_01_HIGH4 0x194
+#define QSERDES_RX0_RX_MODE_10_LOW 0x198
+#define QSERDES_RX0_RX_MODE_10_HIGH 0x19c
+#define QSERDES_RX0_RX_MODE_10_HIGH2 0x1a0
+#define QSERDES_RX0_RX_MODE_10_HIGH3 0x1a4
+#define QSERDES_RX0_RX_MODE_10_HIGH4 0x1a8
+#define QSERDES_RX0_DFE_EN_TIMER 0x1b4
+
+/* QMP V2 PHY for PCIE gen3 ports - PCS registers */
+
+#define PCS_COM_FLL_CNTRL1 0x098
+#define PCS_COM_FLL_CNTRL2 0x09c
+#define PCS_COM_FLL_CNT_VAL_L 0x0a0
+#define PCS_COM_FLL_CNT_VAL_H_TOL 0x0a4
+#define PCS_COM_FLL_MAN_CODE 0x0a8
+#define PCS_COM_REFGEN_REQ_CONFIG1 0x0dc
+#define PCS_COM_G12S1_TXDEEMPH_M3P5DB 0x16c
+#define PCS_COM_RX_SIGDET_LVL 0x188
+#define PCS_COM_P2U3_WAKEUP_DLY_TIME_AUXCLK_L 0x1a4
+#define PCS_COM_P2U3_WAKEUP_DLY_TIME_AUXCLK_H 0x1a8
+#define PCS_COM_RX_DCC_CAL_CONFIG 0x1d8
+#define PCS_COM_EQ_CONFIG5 0x1ec
+
+/* QMP V2 PHY for PCIE gen3 ports - PCS Misc registers */
+
+#define PCS_PCIE_POWER_STATE_CONFIG2 0x40c
+#define PCS_PCIE_POWER_STATE_CONFIG4 0x414
+#define PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x41c
+#define PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L 0x440
+#define PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H 0x444
+#define PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L 0x448
+#define PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H 0x44c
+#define PCS_PCIE_OSC_DTCT_CONFIG2 0x45c
+#define PCS_PCIE_OSC_DTCT_MODE2_CONFIG2 0x478
+#define PCS_PCIE_OSC_DTCT_MODE2_CONFIG4 0x480
+#define PCS_PCIE_OSC_DTCT_MODE2_CONFIG5 0x484
+#define PCS_PCIE_OSC_DTCT_ACTIONS 0x490
+#define PCS_PCIE_EQ_CONFIG1 0x4a0
+#define PCS_PCIE_EQ_CONFIG2 0x4a4
+#define PCS_PCIE_PRESET_P10_PRE 0x4bc
+#define PCS_PCIE_PRESET_P10_POST 0x4e0
+
/* Only for QMP V2 PHY - QSERDES COM registers */
#define QSERDES_COM_BG_TIMER 0x00c
#define QSERDES_COM_SSC_EN_CENTER 0x010
@@ -420,6 +552,7 @@
#define QSERDES_V4_COM_SYSCLK_EN_SEL 0x094
#define QSERDES_V4_COM_RESETSM_CNTRL 0x09c
#define QSERDES_V4_COM_LOCK_CMP_EN 0x0a4
+#define QSERDES_V4_COM_LOCK_CMP_CFG 0x0a8
#define QSERDES_V4_COM_LOCK_CMP1_MODE0 0x0ac
#define QSERDES_V4_COM_LOCK_CMP2_MODE0 0x0b0
#define QSERDES_V4_COM_LOCK_CMP1_MODE1 0x0b4
@@ -434,6 +567,8 @@
#define QSERDES_V4_COM_DIV_FRAC_START3_MODE1 0x0e0
#define QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0 0x0ec
#define QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0 0x0f0
+#define QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE1 0x0f4
+#define QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE1 0x0f8
#define QSERDES_V4_COM_VCO_TUNE_CTRL 0x108
#define QSERDES_V4_COM_VCO_TUNE_MAP 0x10c
#define QSERDES_V4_COM_VCO_TUNE1_MODE0 0x110
@@ -451,11 +586,15 @@
#define QSERDES_V4_COM_C_READY_STATUS 0x178
#define QSERDES_V4_COM_CMN_CONFIG 0x17c
#define QSERDES_V4_COM_SVS_MODE_CLK_SEL 0x184
+#define QSERDES_V4_COM_CMN_MISC1 0x19c
+#define QSERDES_V4_COM_INTERNAL_DIG_CORECLK_DIV 0x1a0
+#define QSERDES_V4_COM_CMN_MODE 0x1a4
+#define QSERDES_V4_COM_VCO_DC_LEVEL_CTRL 0x1a8
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
-#define QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
+#define QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
/* Only for QMP V4 PHY - TX registers */
#define QSERDES_V4_TX_CLKBUF_ENABLE 0x08
@@ -485,6 +624,13 @@
#define QSERDES_V4_TX_VMODE_CTRL1 0xe8
#define QSERDES_V4_TX_PI_QEC_CTRL 0x104
+/* Only for QMP V4_20 PHY - TX registers */
+#define QSERDES_V4_20_TX_LANE_MODE_1 0x88
+#define QSERDES_V4_20_TX_LANE_MODE_2 0x8c
+#define QSERDES_V4_20_TX_LANE_MODE_3 0x90
+#define QSERDES_V4_20_TX_VMODE_CTRL1 0xc4
+#define QSERDES_V4_20_TX_PI_QEC_CTRL 0xe0
+
/* Only for QMP V4 PHY - RX registers */
#define QSERDES_V4_RX_UCDR_FO_GAIN 0x008
#define QSERDES_V4_RX_UCDR_SO_GAIN 0x014
@@ -551,6 +697,33 @@
#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
#define QSERDES_V4_DP_PHY_STATUS 0x0dc
+/* Only for QMP V4_20 PHY - RX registers */
+#define QSERDES_V4_20_RX_FO_GAIN_RATE2 0x008
+#define QSERDES_V4_20_RX_UCDR_PI_CONTROLS 0x058
+#define QSERDES_V4_20_RX_AUX_DATA_TCOARSE_TFINE 0x0ac
+#define QSERDES_V4_20_RX_DFE_3 0x110
+#define QSERDES_V4_20_RX_DFE_DAC_ENABLE1 0x134
+#define QSERDES_V4_20_RX_DFE_DAC_ENABLE2 0x138
+#define QSERDES_V4_20_RX_VGA_CAL_CNTRL2 0x150
+#define QSERDES_V4_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x178
+#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B1 0x1c8
+#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B2 0x1cc
+#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B3 0x1d0
+#define QSERDES_V4_20_RX_RX_MODE_RATE_0_1_B4 0x1d4
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B0 0x1d8
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B1 0x1dc
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B2 0x1e0
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B3 0x1e4
+#define QSERDES_V4_20_RX_RX_MODE_RATE2_B4 0x1e8
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B0 0x1ec
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B1 0x1f0
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B2 0x1f4
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B3 0x1f8
+#define QSERDES_V4_20_RX_RX_MODE_RATE3_B4 0x1fc
+#define QSERDES_V4_20_RX_PHPRE_CTRL 0x200
+#define QSERDES_V4_20_RX_DFE_CTLE_POST_CAL_OFFSET 0x20c
+#define QSERDES_V4_20_RX_MARG_COARSE_CTRL2 0x23c
+
/* Only for QMP V4 PHY - UFS PCS registers */
#define QPHY_V4_PCS_UFS_PHY_START 0x000
#define QPHY_V4_PCS_UFS_POWER_DOWN_CONTROL 0x004
@@ -836,6 +1009,12 @@
#define QPHY_V4_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x354
#define QPHY_V4_PCS_USB3_TEST_CONTROL 0x358
+/* Only for QMP V4_20 PHY - USB/PCIe PCS registers */
+#define QPHY_V4_20_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V4_20_PCS_EQ_CONFIG2 0x1d8
+#define QPHY_V4_20_PCS_EQ_CONFIG4 0x1e0
+#define QPHY_V4_20_PCS_EQ_CONFIG5 0x1e4
+
/* Only for QMP V4 PHY - UNI has 0x300 offset for PCS_USB3 regs */
#define QPHY_V4_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL 0x618
#define QPHY_V4_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2 0x638
@@ -861,6 +1040,14 @@
#define QPHY_V4_PCS_PCIE_PRESET_P10_PRE 0xbc
#define QPHY_V4_PCS_PCIE_PRESET_P10_POST 0xe0
+#define QPHY_V4_20_PCS_PCIE_EQ_CONFIG1 0x0a0
+#define QPHY_V4_20_PCS_PCIE_G3_RXEQEVAL_TIME 0x0f0
+#define QPHY_V4_20_PCS_PCIE_G4_RXEQEVAL_TIME 0x0f4
+#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG2 0x0fc
+#define QPHY_V4_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
+#define QPHY_V4_20_PCS_LANE1_INSIG_SW_CTRL2 0x824
+#define QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2 0x828
+
/* Only for QMP V5 PHY - QSERDES COM registers */
#define QSERDES_V5_COM_PLL_IVCO 0x058
#define QSERDES_V5_COM_CP_CTRL_MODE0 0x074
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 8f1bf7e2186b..3c1d3b71c825 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -219,6 +219,22 @@ static const struct qusb2_phy_init_tbl msm8998_init_tbl[] = {
QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_DIGITAL_TIMERS_TWO, 0x19),
};
+static const struct qusb2_phy_init_tbl sm6115_init_tbl[] = {
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE1, 0xf8),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE2, 0x53),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE3, 0x81),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE4, 0x17),
+
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_TUNE, 0x30),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL1, 0x79),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL2, 0x21),
+
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TEST2, 0x14),
+
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_AUTOPGM_CTL1, 0x9f),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_PWR_CTRL, 0x00),
+};
+
static const unsigned int qusb2_v2_regs_layout[] = {
[QUSB2PHY_PLL_CORE_INPUT_OVERRIDE] = 0xa8,
[QUSB2PHY_PLL_STATUS] = 0x1a0,
@@ -342,6 +358,18 @@ static const struct qusb2_phy_cfg sdm660_phy_cfg = {
.autoresume_en = BIT(3),
};
+static const struct qusb2_phy_cfg sm6115_phy_cfg = {
+ .tbl = sm6115_init_tbl,
+ .tbl_num = ARRAY_SIZE(sm6115_init_tbl),
+ .regs = msm8996_regs_layout,
+
+ .has_pll_test = true,
+ .se_clk_scheme_default = true,
+ .disable_ctrl = (CLAMP_N_EN | FREEZIO_N | POWER_DOWN),
+ .mask_core_ready = PLL_LOCKED,
+ .autoresume_en = BIT(3),
+};
+
static const char * const qusb2_phy_vreg_names[] = {
"vdda-pll", "vdda-phy-dpdm",
};
@@ -889,6 +917,12 @@ static const struct of_device_id qusb2_phy_of_match_table[] = {
.compatible = "qcom,sdm660-qusb2-phy",
.data = &sdm660_phy_cfg,
}, {
+ .compatible = "qcom,sm4250-qusb2-phy",
+ .data = &sm6115_phy_cfg,
+ }, {
+ .compatible = "qcom,sm6115-qusb2-phy",
+ .data = &sm6115_phy_cfg,
+ }, {
/*
* Deprecated. Only here to support legacy device
* trees that didn't include "qcom,qusb2-v2-phy"
diff --git a/drivers/phy/ralink/Kconfig b/drivers/phy/ralink/Kconfig
index ecc309ba9fee..c2373b30b8a6 100644
--- a/drivers/phy/ralink/Kconfig
+++ b/drivers/phy/ralink/Kconfig
@@ -4,7 +4,7 @@
#
config PHY_MT7621_PCI
tristate "MediaTek MT7621 PCI PHY Driver"
- depends on RALINK && OF
+ depends on (RALINK && OF) || COMPILE_TEST
select GENERIC_PHY
select REGMAP_MMIO
help
diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
index 2a9465f4bb3a..5e6530f545b5 100644
--- a/drivers/phy/ralink/phy-mt7621-pci.c
+++ b/drivers/phy/ralink/phy-mt7621-pci.c
@@ -5,6 +5,7 @@
*/
#include <dt-bindings/phy/phy.h>
+#include <linux/clk.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/module.h>
@@ -14,8 +15,6 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/sys_soc.h>
-#include <mt7621.h>
-#include <ralink_regs.h>
#define RG_PE1_PIPE_REG 0x02c
#define RG_PE1_PIPE_RST BIT(12)
@@ -62,8 +61,6 @@
#define RG_PE1_FRC_MSTCKDIV BIT(5)
-#define XTAL_MASK GENMASK(8, 6)
-
#define MAX_PHYS 2
/**
@@ -71,6 +68,7 @@
* @dev: pointer to device
* @regmap: kernel regmap pointer
* @phy: pointer to the kernel PHY device
+ * @sys_clk: pointer to the system XTAL clock
* @port_base: base register
* @has_dual_port: if the phy has dual ports.
* @bypass_pipe_rst: mark if 'mt7621_bypass_pipe_rst'
@@ -80,6 +78,7 @@ struct mt7621_pci_phy {
struct device *dev;
struct regmap *regmap;
struct phy *phy;
+ struct clk *sys_clk;
void __iomem *port_base;
bool has_dual_port;
bool bypass_pipe_rst;
@@ -116,12 +115,14 @@ static void mt7621_bypass_pipe_rst(struct mt7621_pci_phy *phy)
}
}
-static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy)
+static int mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy)
{
struct device *dev = phy->dev;
- u32 xtal_mode;
+ unsigned long clk_rate;
- xtal_mode = FIELD_GET(XTAL_MASK, rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0));
+ clk_rate = clk_get_rate(phy->sys_clk);
+ if (!clk_rate)
+ return -EINVAL;
/* Set PCIe Port PHY to disable SSC */
/* Debug Xtal Type */
@@ -139,13 +140,13 @@ static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy)
RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN);
}
- if (xtal_mode <= 5 && xtal_mode >= 3) { /* 40MHz Xtal */
+ if (clk_rate == 40000000) { /* 40MHz Xtal */
/* Set Pre-divider ratio (for host mode) */
mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, RG_PE1_H_PLL_PREDIV,
FIELD_PREP(RG_PE1_H_PLL_PREDIV, 0x01));
dev_dbg(dev, "Xtal is 40MHz\n");
- } else if (xtal_mode >= 6) { /* 25MHz Xal */
+ } else if (clk_rate == 25000000) { /* 25MHz Xal */
mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, RG_PE1_H_PLL_PREDIV,
FIELD_PREP(RG_PE1_H_PLL_PREDIV, 0x00));
@@ -196,13 +197,15 @@ static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy)
mt7621_phy_rmw(phy, RG_PE1_H_PLL_BR_REG, RG_PE1_H_PLL_BR,
FIELD_PREP(RG_PE1_H_PLL_BR, 0x00));
- if (xtal_mode <= 5 && xtal_mode >= 3) { /* 40MHz Xtal */
+ if (clk_rate == 40000000) { /* 40MHz Xtal */
/* set force mode enable of da_pe1_mstckdiv */
mt7621_phy_rmw(phy, RG_PE1_MSTCKDIV_REG,
RG_PE1_MSTCKDIV | RG_PE1_FRC_MSTCKDIV,
FIELD_PREP(RG_PE1_MSTCKDIV, 0x01) |
RG_PE1_FRC_MSTCKDIV);
}
+
+ return 0;
}
static int mt7621_pci_phy_init(struct phy *phy)
@@ -212,9 +215,7 @@ static int mt7621_pci_phy_init(struct phy *phy)
if (mphy->bypass_pipe_rst)
mt7621_bypass_pipe_rst(mphy);
- mt7621_set_phy_for_ssc(mphy);
-
- return 0;
+ return mt7621_set_phy_for_ssc(mphy);
}
static int mt7621_pci_phy_power_on(struct phy *phy)
@@ -272,8 +273,8 @@ static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev,
mt7621_phy->has_dual_port = args->args[0];
- dev_info(dev, "PHY for 0x%08x (dual port = %d)\n",
- (unsigned int)mt7621_phy->port_base, mt7621_phy->has_dual_port);
+ dev_dbg(dev, "PHY for 0x%px (dual port = %d)\n",
+ mt7621_phy->port_base, mt7621_phy->has_dual_port);
return mt7621_phy->phy;
}
@@ -324,6 +325,12 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
return PTR_ERR(phy->phy);
}
+ phy->sys_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(phy->sys_clk)) {
+ dev_err(dev, "failed to get phy clock\n");
+ return PTR_ERR(phy->sys_clk);
+ }
+
phy_set_drvdata(phy->phy, phy);
provider = devm_of_phy_provider_register(dev, mt7621_pcie_phy_of_xlate);
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 159285f42e5c..e812adad7242 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -48,6 +48,15 @@ config PHY_ROCKCHIP_INNO_USB2
help
Support for Rockchip USB2.0 PHY with Innosilicon IP block.
+config PHY_ROCKCHIP_INNO_CSIDPHY
+ tristate "Rockchip Innosilicon MIPI CSI PHY driver"
+ depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ Enable this to support the Rockchip MIPI CSI PHY with
+ Innosilicon IP block.
+
config PHY_ROCKCHIP_INNO_DSIDPHY
tristate "Rockchip Innosilicon MIPI/LVDS/TTL PHY driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index c3cfc7f0af5c..f0eec212b2aa 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o
obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0) += phy-rockchip-dphy-rx0.o
obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
+obj-$(CONFIG_PHY_ROCKCHIP_INNO_CSIDPHY) += phy-rockchip-inno-csidphy.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY) += phy-rockchip-inno-dsidphy.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
new file mode 100644
index 000000000000..ca13a604ab4f
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip MIPI RX Innosilicon DPHY driver
+ *
+ * Copyright (C) 2021 Fuzhou Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* GRF */
+#define RK1808_GRF_PD_VI_CON_OFFSET 0x0430
+
+#define RK3326_GRF_PD_VI_CON_OFFSET 0x0430
+
+#define RK3368_GRF_SOC_CON6_OFFSET 0x0418
+
+/* PHY */
+#define CSIDPHY_CTRL_LANE_ENABLE 0x00
+#define CSIDPHY_CTRL_LANE_ENABLE_CK BIT(6)
+#define CSIDPHY_CTRL_LANE_ENABLE_MASK GENMASK(5, 2)
+#define CSIDPHY_CTRL_LANE_ENABLE_UNDEFINED BIT(0)
+
+/* not present on all variants */
+#define CSIDPHY_CTRL_PWRCTL 0x04
+#define CSIDPHY_CTRL_PWRCTL_UNDEFINED GENMASK(7, 5)
+#define CSIDPHY_CTRL_PWRCTL_SYNCRST BIT(2)
+#define CSIDPHY_CTRL_PWRCTL_LDO_PD BIT(1)
+#define CSIDPHY_CTRL_PWRCTL_PLL_PD BIT(0)
+
+#define CSIDPHY_CTRL_DIG_RST 0x80
+#define CSIDPHY_CTRL_DIG_RST_UNDEFINED 0x1e
+#define CSIDPHY_CTRL_DIG_RST_RESET BIT(0)
+
+/* offset after ths_settle_offset */
+#define CSIDPHY_CLK_THS_SETTLE 0
+#define CSIDPHY_LANE_THS_SETTLE(n) (((n) + 1) * 0x80)
+#define CSIDPHY_THS_SETTLE_MASK GENMASK(6, 0)
+
+/* offset after calib_offset */
+#define CSIDPHY_CLK_CALIB_EN 0
+#define CSIDPHY_LANE_CALIB_EN(n) (((n) + 1) * 0x80)
+#define CSIDPHY_CALIB_EN BIT(7)
+
+/* Configure the count time of the THS-SETTLE by protocol. */
+#define RK1808_CSIDPHY_CLK_WR_THS_SETTLE 0x160
+#define RK3326_CSIDPHY_CLK_WR_THS_SETTLE 0x100
+#define RK3368_CSIDPHY_CLK_WR_THS_SETTLE 0x100
+
+/* Calibration reception enable */
+#define RK1808_CSIDPHY_CLK_CALIB_EN 0x168
+
+/*
+ * The higher 16-bit of this register is used for write protection
+ * only if BIT(x + 16) set to 1 the BIT(x) can be written.
+ */
+#define HIWORD_UPDATE(val, mask, shift) \
+ ((val) << (shift) | (mask) << ((shift) + 16))
+
+#define HZ_TO_MHZ(freq) div_u64(freq, 1000 * 1000)
+
+enum dphy_reg_id {
+ /* rk1808 & rk3326 */
+ GRF_DPHY_CSIPHY_FORCERXMODE,
+ GRF_DPHY_CSIPHY_CLKLANE_EN,
+ GRF_DPHY_CSIPHY_DATALANE_EN,
+};
+
+struct dphy_reg {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+};
+
+#define PHY_REG(_offset, _width, _shift) \
+ { .offset = _offset, .mask = BIT(_width) - 1, .shift = _shift, }
+
+static const struct dphy_reg rk1808_grf_dphy_regs[] = {
+ [GRF_DPHY_CSIPHY_FORCERXMODE] = PHY_REG(RK1808_GRF_PD_VI_CON_OFFSET, 4, 0),
+ [GRF_DPHY_CSIPHY_CLKLANE_EN] = PHY_REG(RK1808_GRF_PD_VI_CON_OFFSET, 1, 8),
+ [GRF_DPHY_CSIPHY_DATALANE_EN] = PHY_REG(RK1808_GRF_PD_VI_CON_OFFSET, 4, 4),
+};
+
+static const struct dphy_reg rk3326_grf_dphy_regs[] = {
+ [GRF_DPHY_CSIPHY_FORCERXMODE] = PHY_REG(RK3326_GRF_PD_VI_CON_OFFSET, 4, 0),
+ [GRF_DPHY_CSIPHY_CLKLANE_EN] = PHY_REG(RK3326_GRF_PD_VI_CON_OFFSET, 1, 8),
+ [GRF_DPHY_CSIPHY_DATALANE_EN] = PHY_REG(RK3326_GRF_PD_VI_CON_OFFSET, 4, 4),
+};
+
+static const struct dphy_reg rk3368_grf_dphy_regs[] = {
+ [GRF_DPHY_CSIPHY_FORCERXMODE] = PHY_REG(RK3368_GRF_SOC_CON6_OFFSET, 4, 8),
+};
+
+struct hsfreq_range {
+ u32 range_h;
+ u8 cfg_bit;
+};
+
+struct dphy_drv_data {
+ int pwrctl_offset;
+ int ths_settle_offset;
+ int calib_offset;
+ const struct hsfreq_range *hsfreq_ranges;
+ int num_hsfreq_ranges;
+ const struct dphy_reg *grf_regs;
+};
+
+struct rockchip_inno_csidphy {
+ struct device *dev;
+ void __iomem *phy_base;
+ struct clk *pclk;
+ struct regmap *grf;
+ struct reset_control *rst;
+ const struct dphy_drv_data *drv_data;
+ struct phy_configure_opts_mipi_dphy config;
+ u8 hsfreq;
+};
+
+static inline void write_grf_reg(struct rockchip_inno_csidphy *priv,
+ int index, u8 value)
+{
+ const struct dphy_drv_data *drv_data = priv->drv_data;
+ const struct dphy_reg *reg = &drv_data->grf_regs[index];
+
+ if (reg->offset)
+ regmap_write(priv->grf, reg->offset,
+ HIWORD_UPDATE(value, reg->mask, reg->shift));
+}
+
+/* These tables must be sorted by .range_h ascending. */
+static const struct hsfreq_range rk1808_mipidphy_hsfreq_ranges[] = {
+ { 109, 0x02}, { 149, 0x03}, { 199, 0x06}, { 249, 0x06},
+ { 299, 0x06}, { 399, 0x08}, { 499, 0x0b}, { 599, 0x0e},
+ { 699, 0x10}, { 799, 0x12}, { 999, 0x16}, {1199, 0x1e},
+ {1399, 0x23}, {1599, 0x2d}, {1799, 0x32}, {1999, 0x37},
+ {2199, 0x3c}, {2399, 0x41}, {2499, 0x46}
+};
+
+static const struct hsfreq_range rk3326_mipidphy_hsfreq_ranges[] = {
+ { 109, 0x00}, { 149, 0x01}, { 199, 0x02}, { 249, 0x03},
+ { 299, 0x04}, { 399, 0x05}, { 499, 0x06}, { 599, 0x07},
+ { 699, 0x08}, { 799, 0x09}, { 899, 0x0a}, {1099, 0x0b},
+ {1249, 0x0c}, {1349, 0x0d}, {1500, 0x0e}
+};
+
+static const struct hsfreq_range rk3368_mipidphy_hsfreq_ranges[] = {
+ { 109, 0x00}, { 149, 0x01}, { 199, 0x02}, { 249, 0x03},
+ { 299, 0x04}, { 399, 0x05}, { 499, 0x06}, { 599, 0x07},
+ { 699, 0x08}, { 799, 0x09}, { 899, 0x0a}, {1099, 0x0b},
+ {1249, 0x0c}, {1349, 0x0d}, {1500, 0x0e}
+};
+
+static void rockchip_inno_csidphy_ths_settle(struct rockchip_inno_csidphy *priv,
+ int hsfreq, int offset)
+{
+ const struct dphy_drv_data *drv_data = priv->drv_data;
+ u32 val;
+
+ val = readl(priv->phy_base + drv_data->ths_settle_offset + offset);
+ val &= ~CSIDPHY_THS_SETTLE_MASK;
+ val |= hsfreq;
+ writel(val, priv->phy_base + drv_data->ths_settle_offset + offset);
+}
+
+static int rockchip_inno_csidphy_configure(struct phy *phy,
+ union phy_configure_opts *opts)
+{
+ struct rockchip_inno_csidphy *priv = phy_get_drvdata(phy);
+ const struct dphy_drv_data *drv_data = priv->drv_data;
+ struct phy_configure_opts_mipi_dphy *config = &opts->mipi_dphy;
+ unsigned int hsfreq = 0;
+ unsigned int i;
+ u64 data_rate_mbps;
+ int ret;
+
+ /* pass with phy_mipi_dphy_get_default_config (with pixel rate?) */
+ ret = phy_mipi_dphy_config_validate(config);
+ if (ret)
+ return ret;
+
+ data_rate_mbps = HZ_TO_MHZ(config->hs_clk_rate);
+
+ dev_dbg(priv->dev, "lanes %d - data_rate_mbps %llu\n",
+ config->lanes, data_rate_mbps);
+ for (i = 0; i < drv_data->num_hsfreq_ranges; i++) {
+ if (drv_data->hsfreq_ranges[i].range_h >= data_rate_mbps) {
+ hsfreq = drv_data->hsfreq_ranges[i].cfg_bit;
+ break;
+ }
+ }
+ if (!hsfreq)
+ return -EINVAL;
+
+ priv->hsfreq = hsfreq;
+ priv->config = *config;
+ return 0;
+}
+
+static int rockchip_inno_csidphy_power_on(struct phy *phy)
+{
+ struct rockchip_inno_csidphy *priv = phy_get_drvdata(phy);
+ const struct dphy_drv_data *drv_data = priv->drv_data;
+ u64 data_rate_mbps = HZ_TO_MHZ(priv->config.hs_clk_rate);
+ u32 val;
+ int ret, i;
+
+ ret = clk_enable(priv->pclk);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(priv->dev);
+ if (ret < 0) {
+ clk_disable(priv->pclk);
+ return ret;
+ }
+
+ /* phy start */
+ if (drv_data->pwrctl_offset >= 0)
+ writel(CSIDPHY_CTRL_PWRCTL_UNDEFINED |
+ CSIDPHY_CTRL_PWRCTL_SYNCRST,
+ priv->phy_base + drv_data->pwrctl_offset);
+
+ /* set data lane num and enable clock lane */
+ val = FIELD_PREP(CSIDPHY_CTRL_LANE_ENABLE_MASK, GENMASK(priv->config.lanes - 1, 0)) |
+ FIELD_PREP(CSIDPHY_CTRL_LANE_ENABLE_CK, 1) |
+ FIELD_PREP(CSIDPHY_CTRL_LANE_ENABLE_UNDEFINED, 1);
+ writel(val, priv->phy_base + CSIDPHY_CTRL_LANE_ENABLE);
+
+ /* Reset dphy analog part */
+ if (drv_data->pwrctl_offset >= 0)
+ writel(CSIDPHY_CTRL_PWRCTL_UNDEFINED,
+ priv->phy_base + drv_data->pwrctl_offset);
+ usleep_range(500, 1000);
+
+ /* Reset dphy digital part */
+ writel(CSIDPHY_CTRL_DIG_RST_UNDEFINED,
+ priv->phy_base + CSIDPHY_CTRL_DIG_RST);
+ writel(CSIDPHY_CTRL_DIG_RST_UNDEFINED + CSIDPHY_CTRL_DIG_RST_RESET,
+ priv->phy_base + CSIDPHY_CTRL_DIG_RST);
+
+ /* not into receive mode/wait stopstate */
+ write_grf_reg(priv, GRF_DPHY_CSIPHY_FORCERXMODE, 0x0);
+
+ /* enable calibration */
+ if (data_rate_mbps > 1500 && drv_data->calib_offset >= 0) {
+ writel(CSIDPHY_CALIB_EN,
+ priv->phy_base + drv_data->calib_offset +
+ CSIDPHY_CLK_CALIB_EN);
+ for (i = 0; i < priv->config.lanes; i++)
+ writel(CSIDPHY_CALIB_EN,
+ priv->phy_base + drv_data->calib_offset +
+ CSIDPHY_LANE_CALIB_EN(i));
+ }
+
+ rockchip_inno_csidphy_ths_settle(priv, priv->hsfreq,
+ CSIDPHY_CLK_THS_SETTLE);
+ for (i = 0; i < priv->config.lanes; i++)
+ rockchip_inno_csidphy_ths_settle(priv, priv->hsfreq,
+ CSIDPHY_LANE_THS_SETTLE(i));
+
+ write_grf_reg(priv, GRF_DPHY_CSIPHY_CLKLANE_EN, 0x1);
+ write_grf_reg(priv, GRF_DPHY_CSIPHY_DATALANE_EN,
+ GENMASK(priv->config.lanes - 1, 0));
+
+ return 0;
+}
+
+static int rockchip_inno_csidphy_power_off(struct phy *phy)
+{
+ struct rockchip_inno_csidphy *priv = phy_get_drvdata(phy);
+ const struct dphy_drv_data *drv_data = priv->drv_data;
+
+ /* disable all lanes */
+ writel(CSIDPHY_CTRL_LANE_ENABLE_UNDEFINED,
+ priv->phy_base + CSIDPHY_CTRL_LANE_ENABLE);
+
+ /* disable pll and ldo */
+ if (drv_data->pwrctl_offset >= 0)
+ writel(CSIDPHY_CTRL_PWRCTL_UNDEFINED |
+ CSIDPHY_CTRL_PWRCTL_LDO_PD |
+ CSIDPHY_CTRL_PWRCTL_PLL_PD,
+ priv->phy_base + drv_data->pwrctl_offset);
+ usleep_range(500, 1000);
+
+ pm_runtime_put(priv->dev);
+ clk_disable(priv->pclk);
+
+ return 0;
+}
+
+static int rockchip_inno_csidphy_init(struct phy *phy)
+{
+ struct rockchip_inno_csidphy *priv = phy_get_drvdata(phy);
+
+ return clk_prepare(priv->pclk);
+}
+
+static int rockchip_inno_csidphy_exit(struct phy *phy)
+{
+ struct rockchip_inno_csidphy *priv = phy_get_drvdata(phy);
+
+ clk_unprepare(priv->pclk);
+
+ return 0;
+}
+
+static const struct phy_ops rockchip_inno_csidphy_ops = {
+ .power_on = rockchip_inno_csidphy_power_on,
+ .power_off = rockchip_inno_csidphy_power_off,
+ .init = rockchip_inno_csidphy_init,
+ .exit = rockchip_inno_csidphy_exit,
+ .configure = rockchip_inno_csidphy_configure,
+ .owner = THIS_MODULE,
+};
+
+static const struct dphy_drv_data rk1808_mipidphy_drv_data = {
+ .pwrctl_offset = -1,
+ .ths_settle_offset = RK1808_CSIDPHY_CLK_WR_THS_SETTLE,
+ .calib_offset = RK1808_CSIDPHY_CLK_CALIB_EN,
+ .hsfreq_ranges = rk1808_mipidphy_hsfreq_ranges,
+ .num_hsfreq_ranges = ARRAY_SIZE(rk1808_mipidphy_hsfreq_ranges),
+ .grf_regs = rk1808_grf_dphy_regs,
+};
+
+static const struct dphy_drv_data rk3326_mipidphy_drv_data = {
+ .pwrctl_offset = CSIDPHY_CTRL_PWRCTL,
+ .ths_settle_offset = RK3326_CSIDPHY_CLK_WR_THS_SETTLE,
+ .calib_offset = -1,
+ .hsfreq_ranges = rk3326_mipidphy_hsfreq_ranges,
+ .num_hsfreq_ranges = ARRAY_SIZE(rk3326_mipidphy_hsfreq_ranges),
+ .grf_regs = rk3326_grf_dphy_regs,
+};
+
+static const struct dphy_drv_data rk3368_mipidphy_drv_data = {
+ .pwrctl_offset = CSIDPHY_CTRL_PWRCTL,
+ .ths_settle_offset = RK3368_CSIDPHY_CLK_WR_THS_SETTLE,
+ .calib_offset = -1,
+ .hsfreq_ranges = rk3368_mipidphy_hsfreq_ranges,
+ .num_hsfreq_ranges = ARRAY_SIZE(rk3368_mipidphy_hsfreq_ranges),
+ .grf_regs = rk3368_grf_dphy_regs,
+};
+
+static const struct of_device_id rockchip_inno_csidphy_match_id[] = {
+ {
+ .compatible = "rockchip,px30-csi-dphy",
+ .data = &rk3326_mipidphy_drv_data,
+ },
+ {
+ .compatible = "rockchip,rk1808-csi-dphy",
+ .data = &rk1808_mipidphy_drv_data,
+ },
+ {
+ .compatible = "rockchip,rk3326-csi-dphy",
+ .data = &rk3326_mipidphy_drv_data,
+ },
+ {
+ .compatible = "rockchip,rk3368-csi-dphy",
+ .data = &rk3368_mipidphy_drv_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rockchip_inno_csidphy_match_id);
+
+static int rockchip_inno_csidphy_probe(struct platform_device *pdev)
+{
+ struct rockchip_inno_csidphy *priv;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct phy *phy;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ platform_set_drvdata(pdev, priv);
+
+ priv->drv_data = of_device_get_match_data(dev);
+ if (!priv->drv_data) {
+ dev_err(dev, "Can't find device data\n");
+ return -ENODEV;
+ }
+
+ priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ if (IS_ERR(priv->grf)) {
+ dev_err(dev, "Can't find GRF syscon\n");
+ return PTR_ERR(priv->grf);
+ }
+
+ priv->phy_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->phy_base))
+ return PTR_ERR(priv->phy_base);
+
+ priv->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ dev_err(dev, "failed to get pclk\n");
+ return PTR_ERR(priv->pclk);
+ }
+
+ priv->rst = devm_reset_control_get(dev, "apb");
+ if (IS_ERR(priv->rst)) {
+ dev_err(dev, "failed to get system reset control\n");
+ return PTR_ERR(priv->rst);
+ }
+
+ phy = devm_phy_create(dev, NULL, &rockchip_inno_csidphy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create phy\n");
+ return PTR_ERR(phy);
+ }
+
+ phy_set_drvdata(phy, priv);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(dev, "failed to register phy provider\n");
+ return PTR_ERR(phy_provider);
+ }
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int rockchip_inno_csidphy_remove(struct platform_device *pdev)
+{
+ struct rockchip_inno_csidphy *priv = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(priv->dev);
+
+ return 0;
+}
+
+static struct platform_driver rockchip_inno_csidphy_driver = {
+ .driver = {
+ .name = "rockchip-inno-csidphy",
+ .of_match_table = rockchip_inno_csidphy_match_id,
+ },
+ .probe = rockchip_inno_csidphy_probe,
+ .remove = rockchip_inno_csidphy_remove,
+};
+
+module_platform_driver(rockchip_inno_csidphy_driver);
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("Rockchip MIPI Innosilicon CSI-DPHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
index a37f3f342642..80acca4e9e14 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -620,7 +620,7 @@ static int inno_hdmi_phy_rk3228_clk_set_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
- const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ const struct pre_pll_config *cfg;
unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno, rate);
u32 v;
int ret;
@@ -774,7 +774,7 @@ static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
- const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ const struct pre_pll_config *cfg;
unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno, rate);
u32 val;
int ret;
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 46ebdb1460a3..beacac1dd253 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -1256,6 +1256,49 @@ static const struct rockchip_usb2phy_cfg rk3228_phy_cfgs[] = {
{ /* sentinel */ }
};
+static const struct rockchip_usb2phy_cfg rk3308_phy_cfgs[] = {
+ {
+ .reg = 0x100,
+ .num_ports = 2,
+ .clkout_ctl = { 0x108, 4, 4, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x0100, 8, 0, 0, 0x1d1 },
+ .bvalid_det_en = { 0x3020, 2, 2, 0, 1 },
+ .bvalid_det_st = { 0x3024, 2, 2, 0, 1 },
+ .bvalid_det_clr = { 0x3028, 2, 2, 0, 1 },
+ .ls_det_en = { 0x3020, 0, 0, 0, 1 },
+ .ls_det_st = { 0x3024, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x3028, 0, 0, 0, 1 },
+ .utmi_avalid = { 0x0120, 10, 10, 0, 1 },
+ .utmi_bvalid = { 0x0120, 9, 9, 0, 1 },
+ .utmi_ls = { 0x0120, 5, 4, 0, 1 },
+ },
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0x0104, 8, 0, 0, 0x1d1 },
+ .ls_det_en = { 0x3020, 1, 1, 0, 1 },
+ .ls_det_st = { 0x3024, 1, 1, 0, 1 },
+ .ls_det_clr = { 0x3028, 1, 1, 0, 1 },
+ .utmi_ls = { 0x0120, 17, 16, 0, 1 },
+ .utmi_hstdet = { 0x0120, 19, 19, 0, 1 }
+ }
+ },
+ .chg_det = {
+ .opmode = { 0x0100, 3, 0, 5, 1 },
+ .cp_det = { 0x0120, 24, 24, 0, 1 },
+ .dcp_det = { 0x0120, 23, 23, 0, 1 },
+ .dp_det = { 0x0120, 25, 25, 0, 1 },
+ .idm_sink_en = { 0x0108, 8, 8, 0, 1 },
+ .idp_sink_en = { 0x0108, 7, 7, 0, 1 },
+ .idp_src_en = { 0x0108, 9, 9, 0, 1 },
+ .rdm_pdwn_en = { 0x0108, 10, 10, 0, 1 },
+ .vdm_src_en = { 0x0108, 12, 12, 0, 1 },
+ .vdp_src_en = { 0x0108, 11, 11, 0, 1 },
+ },
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_usb2phy_cfg rk3328_phy_cfgs[] = {
{
.reg = 0x100,
@@ -1425,6 +1468,7 @@ static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
static const struct of_device_id rockchip_usb2phy_dt_match[] = {
{ .compatible = "rockchip,px30-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3228-usb2phy", .data = &rk3228_phy_cfgs },
+ { .compatible = "rockchip,rk3308-usb2phy", .data = &rk3308_phy_cfgs },
{ .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
{ .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs },
diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c
index e4adab375c73..6bdbd1f214dd 100644
--- a/drivers/phy/socionext/phy-uniphier-pcie.c
+++ b/drivers/phy/socionext/phy-uniphier-pcie.c
@@ -24,11 +24,13 @@
#define PORT_SEL_1 FIELD_PREP(PORT_SEL_MASK, 1)
#define PCL_PHY_TEST_I 0x2000
-#define PCL_PHY_TEST_O 0x2004
#define TESTI_DAT_MASK GENMASK(13, 6)
#define TESTI_ADR_MASK GENMASK(5, 1)
#define TESTI_WR_EN BIT(0)
+#define PCL_PHY_TEST_O 0x2004
+#define TESTO_DAT_MASK GENMASK(7, 0)
+
#define PCL_PHY_RESET 0x200c
#define PCL_PHY_RESET_N_MNMODE BIT(8) /* =1:manual */
#define PCL_PHY_RESET_N BIT(0) /* =1:deasssert */
@@ -77,11 +79,12 @@ static void uniphier_pciephy_set_param(struct uniphier_pciephy_priv *priv,
val = FIELD_PREP(TESTI_DAT_MASK, 1);
val |= FIELD_PREP(TESTI_ADR_MASK, reg);
uniphier_pciephy_testio_write(priv, val);
- val = readl(priv->base + PCL_PHY_TEST_O);
+ val = readl(priv->base + PCL_PHY_TEST_O) & TESTO_DAT_MASK;
/* update value */
- val &= ~FIELD_PREP(TESTI_DAT_MASK, mask);
- val = FIELD_PREP(TESTI_DAT_MASK, mask & param);
+ val &= ~mask;
+ val |= mask & param;
+ val = FIELD_PREP(TESTI_DAT_MASK, val);
val |= FIELD_PREP(TESTI_ADR_MASK, reg);
uniphier_pciephy_testio_write(priv, val);
uniphier_pciephy_testio_write(priv, val | TESTI_WR_EN);
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index c184f4e34584..3e491dfb2525 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -57,6 +57,7 @@ struct pll_params {
struct stm32_usbphyc_phy {
struct phy *phy;
struct stm32_usbphyc *usbphyc;
+ struct regulator *vbus;
u32 index;
bool active;
};
@@ -291,9 +292,31 @@ static int stm32_usbphyc_phy_exit(struct phy *phy)
return stm32_usbphyc_pll_disable(usbphyc);
}
+static int stm32_usbphyc_phy_power_on(struct phy *phy)
+{
+ struct stm32_usbphyc_phy *usbphyc_phy = phy_get_drvdata(phy);
+
+ if (usbphyc_phy->vbus)
+ return regulator_enable(usbphyc_phy->vbus);
+
+ return 0;
+}
+
+static int stm32_usbphyc_phy_power_off(struct phy *phy)
+{
+ struct stm32_usbphyc_phy *usbphyc_phy = phy_get_drvdata(phy);
+
+ if (usbphyc_phy->vbus)
+ return regulator_disable(usbphyc_phy->vbus);
+
+ return 0;
+}
+
static const struct phy_ops stm32_usbphyc_phy_ops = {
.init = stm32_usbphyc_phy_init,
.exit = stm32_usbphyc_phy_exit,
+ .power_on = stm32_usbphyc_phy_power_on,
+ .power_off = stm32_usbphyc_phy_power_off,
.owner = THIS_MODULE,
};
@@ -519,6 +542,14 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
usbphyc->phys[port]->index = index;
usbphyc->phys[port]->active = false;
+ usbphyc->phys[port]->vbus = devm_regulator_get_optional(&phy->dev, "vbus");
+ if (IS_ERR(usbphyc->phys[port]->vbus)) {
+ ret = PTR_ERR(usbphyc->phys[port]->vbus);
+ if (ret == -EPROBE_DEFER)
+ goto put_child;
+ usbphyc->phys[port]->vbus = NULL;
+ }
+
port++;
}
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 5d64f69b39a9..ae3915ed9fef 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
@@ -113,6 +113,117 @@
#define ID_OVERRIDE_FLOATING ID_OVERRIDE(8)
#define ID_OVERRIDE_GROUNDED ID_OVERRIDE(0)
+/* XUSB AO registers */
+#define XUSB_AO_USB_DEBOUNCE_DEL (0x4)
+#define UHSIC_LINE_DEB_CNT(x) (((x) & 0xf) << 4)
+#define UTMIP_LINE_DEB_CNT(x) ((x) & 0xf)
+
+#define XUSB_AO_UTMIP_TRIGGERS(x) (0x40 + (x) * 4)
+#define CLR_WALK_PTR BIT(0)
+#define CAP_CFG BIT(1)
+#define CLR_WAKE_ALARM BIT(3)
+
+#define XUSB_AO_UHSIC_TRIGGERS(x) (0x60 + (x) * 4)
+#define HSIC_CLR_WALK_PTR BIT(0)
+#define HSIC_CLR_WAKE_ALARM BIT(3)
+#define HSIC_CAP_CFG BIT(4)
+
+#define XUSB_AO_UTMIP_SAVED_STATE(x) (0x70 + (x) * 4)
+#define SPEED(x) ((x) & 0x3)
+#define UTMI_HS SPEED(0)
+#define UTMI_FS SPEED(1)
+#define UTMI_LS SPEED(2)
+#define UTMI_RST SPEED(3)
+
+#define XUSB_AO_UHSIC_SAVED_STATE(x) (0x90 + (x) * 4)
+#define MODE(x) ((x) & 0x1)
+#define MODE_HS MODE(0)
+#define MODE_RST MODE(1)
+
+#define XUSB_AO_UTMIP_SLEEPWALK_CFG(x) (0xd0 + (x) * 4)
+#define XUSB_AO_UHSIC_SLEEPWALK_CFG(x) (0xf0 + (x) * 4)
+#define FAKE_USBOP_VAL BIT(0)
+#define FAKE_USBON_VAL BIT(1)
+#define FAKE_USBOP_EN BIT(2)
+#define FAKE_USBON_EN BIT(3)
+#define FAKE_STROBE_VAL BIT(0)
+#define FAKE_DATA_VAL BIT(1)
+#define FAKE_STROBE_EN BIT(2)
+#define FAKE_DATA_EN BIT(3)
+#define WAKE_WALK_EN BIT(14)
+#define MASTER_ENABLE BIT(15)
+#define LINEVAL_WALK_EN BIT(16)
+#define WAKE_VAL(x) (((x) & 0xf) << 17)
+#define WAKE_VAL_NONE WAKE_VAL(12)
+#define WAKE_VAL_ANY WAKE_VAL(15)
+#define WAKE_VAL_DS10 WAKE_VAL(2)
+#define LINE_WAKEUP_EN BIT(21)
+#define MASTER_CFG_SEL BIT(22)
+
+#define XUSB_AO_UTMIP_SLEEPWALK(x) (0x100 + (x) * 4)
+/* phase A */
+#define USBOP_RPD_A BIT(0)
+#define USBON_RPD_A BIT(1)
+#define AP_A BIT(4)
+#define AN_A BIT(5)
+#define HIGHZ_A BIT(6)
+/* phase B */
+#define USBOP_RPD_B BIT(8)
+#define USBON_RPD_B BIT(9)
+#define AP_B BIT(12)
+#define AN_B BIT(13)
+#define HIGHZ_B BIT(14)
+/* phase C */
+#define USBOP_RPD_C BIT(16)
+#define USBON_RPD_C BIT(17)
+#define AP_C BIT(20)
+#define AN_C BIT(21)
+#define HIGHZ_C BIT(22)
+/* phase D */
+#define USBOP_RPD_D BIT(24)
+#define USBON_RPD_D BIT(25)
+#define AP_D BIT(28)
+#define AN_D BIT(29)
+#define HIGHZ_D BIT(30)
+
+#define XUSB_AO_UHSIC_SLEEPWALK(x) (0x120 + (x) * 4)
+/* phase A */
+#define RPD_STROBE_A BIT(0)
+#define RPD_DATA0_A BIT(1)
+#define RPU_STROBE_A BIT(2)
+#define RPU_DATA0_A BIT(3)
+/* phase B */
+#define RPD_STROBE_B BIT(8)
+#define RPD_DATA0_B BIT(9)
+#define RPU_STROBE_B BIT(10)
+#define RPU_DATA0_B BIT(11)
+/* phase C */
+#define RPD_STROBE_C BIT(16)
+#define RPD_DATA0_C BIT(17)
+#define RPU_STROBE_C BIT(18)
+#define RPU_DATA0_C BIT(19)
+/* phase D */
+#define RPD_STROBE_D BIT(24)
+#define RPD_DATA0_D BIT(25)
+#define RPU_STROBE_D BIT(26)
+#define RPU_DATA0_D BIT(27)
+
+#define XUSB_AO_UTMIP_PAD_CFG(x) (0x130 + (x) * 4)
+#define FSLS_USE_XUSB_AO BIT(3)
+#define TRK_CTRL_USE_XUSB_AO BIT(4)
+#define RPD_CTRL_USE_XUSB_AO BIT(5)
+#define RPU_USE_XUSB_AO BIT(6)
+#define VREG_USE_XUSB_AO BIT(7)
+#define USBOP_VAL_PD BIT(8)
+#define USBON_VAL_PD BIT(9)
+#define E_DPD_OVRD_EN BIT(10)
+#define E_DPD_OVRD_VAL BIT(11)
+
+#define XUSB_AO_UHSIC_PAD_CFG(x) (0x150 + (x) * 4)
+#define STROBE_VAL_PD BIT(0)
+#define DATA0_VAL_PD BIT(1)
+#define USE_XUSB_AO BIT(4)
+
#define TEGRA186_LANE(_name, _offset, _shift, _mask, _type) \
{ \
.name = _name, \
@@ -130,16 +241,37 @@ struct tegra_xusb_fuse_calibration {
u32 rpd_ctrl;
};
+struct tegra186_xusb_padctl_context {
+ u32 vbus_id;
+ u32 usb2_pad_mux;
+ u32 usb2_port_cap;
+ u32 ss_port_cap;
+};
+
struct tegra186_xusb_padctl {
struct tegra_xusb_padctl base;
+ void __iomem *ao_regs;
struct tegra_xusb_fuse_calibration calib;
/* UTMI bias and tracking */
struct clk *usb2_trk_clk;
unsigned int bias_pad_enable;
+
+ /* padctl context */
+ struct tegra186_xusb_padctl_context context;
};
+static inline void ao_writel(struct tegra186_xusb_padctl *priv, u32 value, unsigned int offset)
+{
+ writel(value, priv->ao_regs + offset);
+}
+
+static inline u32 ao_readl(struct tegra186_xusb_padctl *priv, unsigned int offset)
+{
+ return readl(priv->ao_regs + offset);
+}
+
static inline struct tegra186_xusb_padctl *
to_tegra186_xusb_padctl(struct tegra_xusb_padctl *padctl)
{
@@ -180,9 +312,264 @@ static void tegra186_usb2_lane_remove(struct tegra_xusb_lane *lane)
kfree(usb2);
}
+static int tegra186_utmi_enable_phy_sleepwalk(struct tegra_xusb_lane *lane,
+ enum usb_device_speed speed)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
+ unsigned int index = lane->index;
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ /* ensure sleepwalk logic is disabled */
+ value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+ value &= ~MASTER_ENABLE;
+ ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+
+ /* ensure sleepwalk logics are in low power mode */
+ value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+ value |= MASTER_CFG_SEL;
+ ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+
+ /* set debounce time */
+ value = ao_readl(priv, XUSB_AO_USB_DEBOUNCE_DEL);
+ value &= ~UTMIP_LINE_DEB_CNT(~0);
+ value |= UTMIP_LINE_DEB_CNT(1);
+ ao_writel(priv, value, XUSB_AO_USB_DEBOUNCE_DEL);
+
+ /* ensure fake events of sleepwalk logic are desiabled */
+ value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+ value &= ~(FAKE_USBOP_VAL | FAKE_USBON_VAL |
+ FAKE_USBOP_EN | FAKE_USBON_EN);
+ ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+
+ /* ensure wake events of sleepwalk logic are not latched */
+ value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+ value &= ~LINE_WAKEUP_EN;
+ ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+
+ /* disable wake event triggers of sleepwalk logic */
+ value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+ value &= ~WAKE_VAL(~0);
+ value |= WAKE_VAL_NONE;
+ ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+
+ /* power down the line state detectors of the pad */
+ value = ao_readl(priv, XUSB_AO_UTMIP_PAD_CFG(index));
+ value |= (USBOP_VAL_PD | USBON_VAL_PD);
+ ao_writel(priv, value, XUSB_AO_UTMIP_PAD_CFG(index));
+
+ /* save state per speed */
+ value = ao_readl(priv, XUSB_AO_UTMIP_SAVED_STATE(index));
+ value &= ~SPEED(~0);
+
+ switch (speed) {
+ case USB_SPEED_HIGH:
+ value |= UTMI_HS;
+ break;
+
+ case USB_SPEED_FULL:
+ value |= UTMI_FS;
+ break;
+
+ case USB_SPEED_LOW:
+ value |= UTMI_LS;
+ break;
+
+ default:
+ value |= UTMI_RST;
+ break;
+ }
+
+ ao_writel(priv, value, XUSB_AO_UTMIP_SAVED_STATE(index));
+
+ /* enable the trigger of the sleepwalk logic */
+ value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+ value |= LINEVAL_WALK_EN;
+ value &= ~WAKE_WALK_EN;
+ ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+
+ /* reset the walk pointer and clear the alarm of the sleepwalk logic,
+ * as well as capture the configuration of the USB2.0 pad
+ */
+ value = ao_readl(priv, XUSB_AO_UTMIP_TRIGGERS(index));
+ value |= (CLR_WALK_PTR | CLR_WAKE_ALARM | CAP_CFG);
+ ao_writel(priv, value, XUSB_AO_UTMIP_TRIGGERS(index));
+
+ /* setup the pull-ups and pull-downs of the signals during the four
+ * stages of sleepwalk.
+ * if device is connected, program sleepwalk logic to maintain a J and
+ * keep driving K upon seeing remote wake.
+ */
+ value = USBOP_RPD_A | USBOP_RPD_B | USBOP_RPD_C | USBOP_RPD_D;
+ value |= USBON_RPD_A | USBON_RPD_B | USBON_RPD_C | USBON_RPD_D;
+
+ switch (speed) {
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ /* J state: D+/D- = high/low, K state: D+/D- = low/high */
+ value |= HIGHZ_A;
+ value |= AP_A;
+ value |= AN_B | AN_C | AN_D;
+ break;
+
+ case USB_SPEED_LOW:
+ /* J state: D+/D- = low/high, K state: D+/D- = high/low */
+ value |= HIGHZ_A;
+ value |= AN_A;
+ value |= AP_B | AP_C | AP_D;
+ break;
+
+ default:
+ value |= HIGHZ_A | HIGHZ_B | HIGHZ_C | HIGHZ_D;
+ break;
+ }
+
+ ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK(index));
+
+ /* power up the line state detectors of the pad */
+ value = ao_readl(priv, XUSB_AO_UTMIP_PAD_CFG(index));
+ value &= ~(USBOP_VAL_PD | USBON_VAL_PD);
+ ao_writel(priv, value, XUSB_AO_UTMIP_PAD_CFG(index));
+
+ usleep_range(150, 200);
+
+ /* switch the electric control of the USB2.0 pad to XUSB_AO */
+ value = ao_readl(priv, XUSB_AO_UTMIP_PAD_CFG(index));
+ value |= FSLS_USE_XUSB_AO | TRK_CTRL_USE_XUSB_AO | RPD_CTRL_USE_XUSB_AO |
+ RPU_USE_XUSB_AO | VREG_USE_XUSB_AO;
+ ao_writel(priv, value, XUSB_AO_UTMIP_PAD_CFG(index));
+
+ /* set the wake signaling trigger events */
+ value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+ value &= ~WAKE_VAL(~0);
+ value |= WAKE_VAL_ANY;
+ ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+
+ /* enable the wake detection */
+ value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+ value |= MASTER_ENABLE | LINE_WAKEUP_EN;
+ ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra186_utmi_disable_phy_sleepwalk(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
+ unsigned int index = lane->index;
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ /* disable the wake detection */
+ value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+ value &= ~(MASTER_ENABLE | LINE_WAKEUP_EN);
+ ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+
+ /* switch the electric control of the USB2.0 pad to XUSB vcore logic */
+ value = ao_readl(priv, XUSB_AO_UTMIP_PAD_CFG(index));
+ value &= ~(FSLS_USE_XUSB_AO | TRK_CTRL_USE_XUSB_AO | RPD_CTRL_USE_XUSB_AO |
+ RPU_USE_XUSB_AO | VREG_USE_XUSB_AO);
+ ao_writel(priv, value, XUSB_AO_UTMIP_PAD_CFG(index));
+
+ /* disable wake event triggers of sleepwalk logic */
+ value = ao_readl(priv, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+ value &= ~WAKE_VAL(~0);
+ value |= WAKE_VAL_NONE;
+ ao_writel(priv, value, XUSB_AO_UTMIP_SLEEPWALK_CFG(index));
+
+ /* power down the line state detectors of the port */
+ value = ao_readl(priv, XUSB_AO_UTMIP_PAD_CFG(index));
+ value |= USBOP_VAL_PD | USBON_VAL_PD;
+ ao_writel(priv, value, XUSB_AO_UTMIP_PAD_CFG(index));
+
+ /* clear alarm of the sleepwalk logic */
+ value = ao_readl(priv, XUSB_AO_UTMIP_TRIGGERS(index));
+ value |= CLR_WAKE_ALARM;
+ ao_writel(priv, value, XUSB_AO_UTMIP_TRIGGERS(index));
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra186_utmi_enable_phy_wake(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= USB2_PORT_WAKEUP_EVENT(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(10, 20);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= USB2_PORT_WAKE_INTERRUPT_ENABLE(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra186_utmi_disable_phy_wake(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~ALL_WAKE_EVENTS;
+ value &= ~USB2_PORT_WAKE_INTERRUPT_ENABLE(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(10, 20);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= USB2_PORT_WAKEUP_EVENT(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static bool tegra186_utmi_phy_remote_wake_detected(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ if ((value & USB2_PORT_WAKE_INTERRUPT_ENABLE(index)) &&
+ (value & USB2_PORT_WAKEUP_EVENT(index)))
+ return true;
+
+ return false;
+}
+
static const struct tegra_xusb_lane_ops tegra186_usb2_lane_ops = {
.probe = tegra186_usb2_lane_probe,
.remove = tegra186_usb2_lane_remove,
+ .enable_phy_sleepwalk = tegra186_utmi_enable_phy_sleepwalk,
+ .disable_phy_sleepwalk = tegra186_utmi_disable_phy_sleepwalk,
+ .enable_phy_wake = tegra186_utmi_enable_phy_wake,
+ .disable_phy_wake = tegra186_utmi_disable_phy_wake,
+ .remote_wake_detected = tegra186_utmi_phy_remote_wake_detected,
};
static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
@@ -656,10 +1043,128 @@ static void tegra186_usb3_lane_remove(struct tegra_xusb_lane *lane)
kfree(usb3);
}
+static int tegra186_usb3_enable_phy_sleepwalk(struct tegra_xusb_lane *lane,
+ enum usb_device_speed speed)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
+ value |= SSPX_ELPG_CLAMP_EN_EARLY(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
+ value |= SSPX_ELPG_CLAMP_EN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
+
+ usleep_range(250, 350);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra186_usb3_disable_phy_sleepwalk(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
+ value &= ~SSPX_ELPG_CLAMP_EN_EARLY(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
+ value &= ~SSPX_ELPG_CLAMP_EN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra186_usb3_enable_phy_wake(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= SS_PORT_WAKEUP_EVENT(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(10, 20);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= SS_PORT_WAKE_INTERRUPT_ENABLE(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra186_usb3_disable_phy_wake(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~ALL_WAKE_EVENTS;
+ value &= ~SS_PORT_WAKE_INTERRUPT_ENABLE(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(10, 20);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= SS_PORT_WAKEUP_EVENT(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static bool tegra186_usb3_phy_remote_wake_detected(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ if ((value & SS_PORT_WAKE_INTERRUPT_ENABLE(index)) && (value & SS_PORT_WAKEUP_EVENT(index)))
+ return true;
+
+ return false;
+}
+
static const struct tegra_xusb_lane_ops tegra186_usb3_lane_ops = {
.probe = tegra186_usb3_lane_probe,
.remove = tegra186_usb3_lane_remove,
+ .enable_phy_sleepwalk = tegra186_usb3_enable_phy_sleepwalk,
+ .disable_phy_sleepwalk = tegra186_usb3_disable_phy_sleepwalk,
+ .enable_phy_wake = tegra186_usb3_enable_phy_wake,
+ .disable_phy_wake = tegra186_usb3_disable_phy_wake,
+ .remote_wake_detected = tegra186_usb3_phy_remote_wake_detected,
};
+
static int tegra186_usb3_port_enable(struct tegra_xusb_port *port)
{
return 0;
@@ -913,7 +1418,9 @@ static struct tegra_xusb_padctl *
tegra186_xusb_padctl_probe(struct device *dev,
const struct tegra_xusb_padctl_soc *soc)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct tegra186_xusb_padctl *priv;
+ struct resource *res;
int err;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -923,6 +1430,11 @@ tegra186_xusb_padctl_probe(struct device *dev,
priv->base.dev = dev;
priv->base.soc = soc;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ao");
+ priv->ao_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ao_regs))
+ return ERR_CAST(priv->ao_regs);
+
err = tegra186_xusb_read_fuse_calibration(priv);
if (err < 0)
return ERR_PTR(err);
@@ -930,6 +1442,40 @@ tegra186_xusb_padctl_probe(struct device *dev,
return &priv->base;
}
+static void tegra186_xusb_padctl_save(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
+
+ priv->context.vbus_id = padctl_readl(padctl, USB2_VBUS_ID);
+ priv->context.usb2_pad_mux = padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
+ priv->context.usb2_port_cap = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
+ priv->context.ss_port_cap = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_CAP);
+}
+
+static void tegra186_xusb_padctl_restore(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
+
+ padctl_writel(padctl, priv->context.usb2_pad_mux, XUSB_PADCTL_USB2_PAD_MUX);
+ padctl_writel(padctl, priv->context.usb2_port_cap, XUSB_PADCTL_USB2_PORT_CAP);
+ padctl_writel(padctl, priv->context.ss_port_cap, XUSB_PADCTL_SS_PORT_CAP);
+ padctl_writel(padctl, priv->context.vbus_id, USB2_VBUS_ID);
+}
+
+static int tegra186_xusb_padctl_suspend_noirq(struct tegra_xusb_padctl *padctl)
+{
+ tegra186_xusb_padctl_save(padctl);
+
+ return 0;
+}
+
+static int tegra186_xusb_padctl_resume_noirq(struct tegra_xusb_padctl *padctl)
+{
+ tegra186_xusb_padctl_restore(padctl);
+
+ return 0;
+}
+
static void tegra186_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
{
}
@@ -937,6 +1483,8 @@ static void tegra186_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
static const struct tegra_xusb_padctl_ops tegra186_xusb_padctl_ops = {
.probe = tegra186_xusb_padctl_probe,
.remove = tegra186_xusb_padctl_remove,
+ .suspend_noirq = tegra186_xusb_padctl_suspend_noirq,
+ .resume_noirq = tegra186_xusb_padctl_resume_noirq,
.vbus_override = tegra186_xusb_padctl_vbus_override,
};
diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c
index 66bd4613835b..eedfc7c2cc05 100644
--- a/drivers/phy/tegra/xusb-tegra210.c
+++ b/drivers/phy/tegra/xusb-tegra210.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (C) 2015 Google, Inc.
*/
@@ -11,8 +11,10 @@
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -52,6 +54,20 @@
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(x, v) (((v) & 0x7) << ((x) * 5))
#define XUSB_PADCTL_SS_PORT_MAP_PORT_DISABLED 0x7
+#define XUSB_PADCTL_ELPG_PROGRAM_0 0x20
+#define USB2_PORT_WAKE_INTERRUPT_ENABLE(x) BIT((x))
+#define USB2_PORT_WAKEUP_EVENT(x) BIT((x) + 7)
+#define SS_PORT_WAKE_INTERRUPT_ENABLE(x) BIT((x) + 14)
+#define SS_PORT_WAKEUP_EVENT(x) BIT((x) + 21)
+#define USB2_HSIC_PORT_WAKE_INTERRUPT_ENABLE(x) BIT((x) + 28)
+#define USB2_HSIC_PORT_WAKEUP_EVENT(x) BIT((x) + 30)
+#define ALL_WAKE_EVENTS ( \
+ USB2_PORT_WAKEUP_EVENT(0) | USB2_PORT_WAKEUP_EVENT(1) | \
+ USB2_PORT_WAKEUP_EVENT(2) | USB2_PORT_WAKEUP_EVENT(3) | \
+ SS_PORT_WAKEUP_EVENT(0) | SS_PORT_WAKEUP_EVENT(1) | \
+ SS_PORT_WAKEUP_EVENT(2) | SS_PORT_WAKEUP_EVENT(3) | \
+ USB2_HSIC_PORT_WAKEUP_EVENT(0))
+
#define XUSB_PADCTL_ELPG_PROGRAM1 0x024
#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN (1 << 31)
#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY (1 << 30)
@@ -90,6 +106,8 @@
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR (1 << 2)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_OVRD (1 << 1)
#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_OVRD (1 << 0)
+#define RPD_CTRL(x) (((x) & 0x1f) << 26)
+#define RPD_CTRL_VALUE(x) (((x) >> 26) & 0x1f)
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0 0x284
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD (1 << 11)
@@ -108,6 +126,8 @@
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_SHIFT 12
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_MASK 0x7f
#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_VAL 0x1e
+#define TCTRL_VALUE(x) (((x) & 0x3f) >> 0)
+#define PCTRL_VALUE(x) (((x) >> 6) & 0x3f)
#define XUSB_PADCTL_HSIC_PADX_CTL0(x) (0x300 + (x) * 0x20)
#define XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE (1 << 18)
@@ -198,6 +218,18 @@
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_TERM_EN BIT(18)
#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_MODE_OVRD BIT(13)
+#define XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(x) (0x464 + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_IDDQ BIT(0)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_IDDQ_OVRD BIT(1)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_SLEEP_MASK GENMASK(5, 4)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_SLEEP_VAL GENMASK(5, 4)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_PWR_OVRD BIT(24)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_IDDQ BIT(8)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_IDDQ_OVRD BIT(9)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_SLEEP_MASK GENMASK(13, 12)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_SLEEP_VAL GENMASK(13, 12)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_PWR_OVRD BIT(25)
+
#define XUSB_PADCTL_UPHY_PLL_S0_CTL1 0x860
#define XUSB_PADCTL_UPHY_PLL_S0_CTL2 0x864
@@ -209,6 +241,7 @@
#define XUSB_PADCTL_UPHY_PLL_S0_CTL8 0x87c
#define XUSB_PADCTL_UPHY_MISC_PAD_S0_CTL1 0x960
+#define XUSB_PADCTL_UPHY_MISC_PAD_S0_CTL2 0x964
#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(x) (0xa60 + (x) * 0x40)
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT 16
@@ -238,16 +271,161 @@
#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING 8
#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_GROUNDED 0
+/* USB2 SLEEPWALK registers */
+#define UTMIP(_port, _offset1, _offset2) \
+ (((_port) <= 2) ? (_offset1) : (_offset2))
+
+#define PMC_UTMIP_UHSIC_SLEEP_CFG(x) UTMIP(x, 0x1fc, 0x4d0)
+#define UTMIP_MASTER_ENABLE(x) UTMIP(x, BIT(8 * (x)), BIT(0))
+#define UTMIP_FSLS_USE_PMC(x) UTMIP(x, BIT(8 * (x) + 1), \
+ BIT(1))
+#define UTMIP_PCTRL_USE_PMC(x) UTMIP(x, BIT(8 * (x) + 2), \
+ BIT(2))
+#define UTMIP_TCTRL_USE_PMC(x) UTMIP(x, BIT(8 * (x) + 3), \
+ BIT(3))
+#define UTMIP_WAKE_VAL(_port, _value) (((_value) & 0xf) << \
+ (UTMIP(_port, 8 * (_port) + 4, 4)))
+#define UTMIP_WAKE_VAL_NONE(_port) UTMIP_WAKE_VAL(_port, 12)
+#define UTMIP_WAKE_VAL_ANY(_port) UTMIP_WAKE_VAL(_port, 15)
+
+#define PMC_UTMIP_UHSIC_SLEEP_CFG1 (0x4d0)
+#define UTMIP_RPU_SWITC_LOW_USE_PMC_PX(x) BIT((x) + 8)
+#define UTMIP_RPD_CTRL_USE_PMC_PX(x) BIT((x) + 16)
+
+#define PMC_UTMIP_MASTER_CONFIG (0x274)
+#define UTMIP_PWR(x) UTMIP(x, BIT(x), BIT(4))
+#define UHSIC_PWR BIT(3)
+
+#define PMC_USB_DEBOUNCE_DEL (0xec)
+#define DEBOUNCE_VAL(x) (((x) & 0xffff) << 0)
+#define UTMIP_LINE_DEB_CNT(x) (((x) & 0xf) << 16)
+#define UHSIC_LINE_DEB_CNT(x) (((x) & 0xf) << 20)
+
+#define PMC_UTMIP_UHSIC_FAKE(x) UTMIP(x, 0x218, 0x294)
+#define UTMIP_FAKE_USBOP_VAL(x) UTMIP(x, BIT(4 * (x)), BIT(8))
+#define UTMIP_FAKE_USBON_VAL(x) UTMIP(x, BIT(4 * (x) + 1), \
+ BIT(9))
+#define UTMIP_FAKE_USBOP_EN(x) UTMIP(x, BIT(4 * (x) + 2), \
+ BIT(10))
+#define UTMIP_FAKE_USBON_EN(x) UTMIP(x, BIT(4 * (x) + 3), \
+ BIT(11))
+
+#define PMC_UTMIP_UHSIC_SLEEPWALK_CFG(x) UTMIP(x, 0x200, 0x288)
+#define UTMIP_LINEVAL_WALK_EN(x) UTMIP(x, BIT(8 * (x) + 7), \
+ BIT(15))
+
+#define PMC_USB_AO (0xf0)
+#define USBOP_VAL_PD(x) UTMIP(x, BIT(4 * (x)), BIT(20))
+#define USBON_VAL_PD(x) UTMIP(x, BIT(4 * (x) + 1), \
+ BIT(21))
+#define STROBE_VAL_PD BIT(12)
+#define DATA0_VAL_PD BIT(13)
+#define DATA1_VAL_PD BIT(24)
+
+#define PMC_UTMIP_UHSIC_SAVED_STATE(x) UTMIP(x, 0x1f0, 0x280)
+#define SPEED(_port, _value) (((_value) & 0x3) << \
+ (UTMIP(_port, 8 * (_port), 8)))
+#define UTMI_HS(_port) SPEED(_port, 0)
+#define UTMI_FS(_port) SPEED(_port, 1)
+#define UTMI_LS(_port) SPEED(_port, 2)
+#define UTMI_RST(_port) SPEED(_port, 3)
+
+#define PMC_UTMIP_UHSIC_TRIGGERS (0x1ec)
+#define UTMIP_CLR_WALK_PTR(x) UTMIP(x, BIT(x), BIT(16))
+#define UTMIP_CAP_CFG(x) UTMIP(x, BIT((x) + 4), BIT(17))
+#define UTMIP_CLR_WAKE_ALARM(x) UTMIP(x, BIT((x) + 12), \
+ BIT(19))
+#define UHSIC_CLR_WALK_PTR BIT(3)
+#define UHSIC_CLR_WAKE_ALARM BIT(15)
+
+#define PMC_UTMIP_SLEEPWALK_PX(x) UTMIP(x, 0x204 + (4 * (x)), \
+ 0x4e0)
+/* phase A */
+#define UTMIP_USBOP_RPD_A BIT(0)
+#define UTMIP_USBON_RPD_A BIT(1)
+#define UTMIP_AP_A BIT(4)
+#define UTMIP_AN_A BIT(5)
+#define UTMIP_HIGHZ_A BIT(6)
+/* phase B */
+#define UTMIP_USBOP_RPD_B BIT(8)
+#define UTMIP_USBON_RPD_B BIT(9)
+#define UTMIP_AP_B BIT(12)
+#define UTMIP_AN_B BIT(13)
+#define UTMIP_HIGHZ_B BIT(14)
+/* phase C */
+#define UTMIP_USBOP_RPD_C BIT(16)
+#define UTMIP_USBON_RPD_C BIT(17)
+#define UTMIP_AP_C BIT(20)
+#define UTMIP_AN_C BIT(21)
+#define UTMIP_HIGHZ_C BIT(22)
+/* phase D */
+#define UTMIP_USBOP_RPD_D BIT(24)
+#define UTMIP_USBON_RPD_D BIT(25)
+#define UTMIP_AP_D BIT(28)
+#define UTMIP_AN_D BIT(29)
+#define UTMIP_HIGHZ_D BIT(30)
+
+#define PMC_UTMIP_UHSIC_LINE_WAKEUP (0x26c)
+#define UTMIP_LINE_WAKEUP_EN(x) UTMIP(x, BIT(x), BIT(4))
+#define UHSIC_LINE_WAKEUP_EN BIT(3)
+
+#define PMC_UTMIP_TERM_PAD_CFG (0x1f8)
+#define PCTRL_VAL(x) (((x) & 0x3f) << 1)
+#define TCTRL_VAL(x) (((x) & 0x3f) << 7)
+
+#define PMC_UTMIP_PAD_CFGX(x) (0x4c0 + (4 * (x)))
+#define RPD_CTRL_PX(x) (((x) & 0x1f) << 22)
+
+#define PMC_UHSIC_SLEEP_CFG PMC_UTMIP_UHSIC_SLEEP_CFG(0)
+#define UHSIC_MASTER_ENABLE BIT(24)
+#define UHSIC_WAKE_VAL(_value) (((_value) & 0xf) << 28)
+#define UHSIC_WAKE_VAL_SD10 UHSIC_WAKE_VAL(2)
+#define UHSIC_WAKE_VAL_NONE UHSIC_WAKE_VAL(12)
+
+#define PMC_UHSIC_FAKE PMC_UTMIP_UHSIC_FAKE(0)
+#define UHSIC_FAKE_STROBE_VAL BIT(12)
+#define UHSIC_FAKE_DATA_VAL BIT(13)
+#define UHSIC_FAKE_STROBE_EN BIT(14)
+#define UHSIC_FAKE_DATA_EN BIT(15)
+
+#define PMC_UHSIC_SAVED_STATE PMC_UTMIP_UHSIC_SAVED_STATE(0)
+#define UHSIC_MODE(_value) (((_value) & 0x1) << 24)
+#define UHSIC_HS UHSIC_MODE(0)
+#define UHSIC_RST UHSIC_MODE(1)
+
+#define PMC_UHSIC_SLEEPWALK_CFG PMC_UTMIP_UHSIC_SLEEPWALK_CFG(0)
+#define UHSIC_WAKE_WALK_EN BIT(30)
+#define UHSIC_LINEVAL_WALK_EN BIT(31)
+
+#define PMC_UHSIC_SLEEPWALK_P0 (0x210)
+#define UHSIC_DATA0_RPD_A BIT(1)
+#define UHSIC_DATA0_RPU_B BIT(11)
+#define UHSIC_DATA0_RPU_C BIT(19)
+#define UHSIC_DATA0_RPU_D BIT(27)
+#define UHSIC_STROBE_RPU_A BIT(2)
+#define UHSIC_STROBE_RPD_B BIT(8)
+#define UHSIC_STROBE_RPD_C BIT(16)
+#define UHSIC_STROBE_RPD_D BIT(24)
+
struct tegra210_xusb_fuse_calibration {
u32 hs_curr_level[4];
u32 hs_term_range_adj;
u32 rpd_ctrl;
};
+struct tegra210_xusb_padctl_context {
+ u32 usb2_pad_mux;
+ u32 usb2_port_cap;
+ u32 ss_port_map;
+ u32 usb3_pad_mux;
+};
+
struct tegra210_xusb_padctl {
struct tegra_xusb_padctl base;
+ struct regmap *regmap;
struct tegra210_xusb_fuse_calibration fuse;
+ struct tegra210_xusb_padctl_context context;
};
static inline struct tegra210_xusb_padctl *
@@ -256,23 +434,51 @@ to_tegra210_xusb_padctl(struct tegra_xusb_padctl *padctl)
return container_of(padctl, struct tegra210_xusb_padctl, base);
}
+static const struct tegra_xusb_lane_map tegra210_usb3_map[] = {
+ { 0, "pcie", 6 },
+ { 1, "pcie", 5 },
+ { 2, "pcie", 0 },
+ { 2, "pcie", 3 },
+ { 3, "pcie", 4 },
+ { 3, "sata", 0 },
+ { 0, NULL, 0 }
+};
+
+static int tegra210_usb3_lane_map(struct tegra_xusb_lane *lane)
+{
+ const struct tegra_xusb_lane_map *map;
+
+ for (map = tegra210_usb3_map; map->type; map++) {
+ if (map->index == lane->index &&
+ strcmp(map->type, lane->pad->soc->name) == 0) {
+ dev_dbg(lane->pad->padctl->dev, "lane = %s map to port = usb3-%d\n",
+ lane->pad->soc->lanes[lane->index].name, map->port);
+ return map->port;
+ }
+ }
+
+ return -EINVAL;
+}
+
/* must be called under padctl->lock */
static int tegra210_pex_uphy_enable(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(padctl->pcie);
unsigned long timeout;
u32 value;
+ unsigned int i;
int err;
- if (pcie->enable > 0) {
- pcie->enable++;
+ if (pcie->enable)
return 0;
- }
err = clk_prepare_enable(pcie->pll);
if (err < 0)
return err;
+ if (tegra210_plle_hw_sequence_is_enabled())
+ goto skip_pll_init;
+
err = reset_control_deassert(pcie->rst);
if (err < 0)
goto disable;
@@ -455,7 +661,14 @@ static int tegra210_pex_uphy_enable(struct tegra_xusb_padctl *padctl)
tegra210_xusb_pll_hw_sequence_start();
- pcie->enable++;
+skip_pll_init:
+ pcie->enable = true;
+
+ for (i = 0; i < padctl->pcie->soc->num_lanes; i++) {
+ value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+ value |= XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(i);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+ }
return 0;
@@ -469,34 +682,44 @@ disable:
static void tegra210_pex_uphy_disable(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(padctl->pcie);
+ u32 value;
+ unsigned int i;
- mutex_lock(&padctl->lock);
+ if (WARN_ON(!pcie->enable))
+ return;
- if (WARN_ON(pcie->enable == 0))
- goto unlock;
+ pcie->enable = false;
- if (--pcie->enable > 0)
- goto unlock;
+ for (i = 0; i < padctl->pcie->soc->num_lanes; i++) {
+ value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+ value &= ~XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(i);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+ }
- reset_control_assert(pcie->rst);
clk_disable_unprepare(pcie->pll);
-
-unlock:
- mutex_unlock(&padctl->lock);
}
/* must be called under padctl->lock */
-static int tegra210_sata_uphy_enable(struct tegra_xusb_padctl *padctl, bool usb)
+static int tegra210_sata_uphy_enable(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_sata_pad *sata = to_sata_pad(padctl->sata);
+ struct tegra_xusb_lane *lane = tegra_xusb_find_lane(padctl, "sata", 0);
unsigned long timeout;
u32 value;
+ unsigned int i;
int err;
+ bool usb;
- if (sata->enable > 0) {
- sata->enable++;
+ if (sata->enable)
return 0;
- }
+
+ if (IS_ERR(lane))
+ return 0;
+
+ if (tegra210_plle_hw_sequence_is_enabled())
+ goto skip_pll_init;
+
+ usb = tegra_xusb_lane_check(lane, "usb3-ss");
err = clk_prepare_enable(sata->pll);
if (err < 0)
@@ -697,7 +920,14 @@ static int tegra210_sata_uphy_enable(struct tegra_xusb_padctl *padctl, bool usb)
tegra210_sata_pll_hw_sequence_start();
- sata->enable++;
+skip_pll_init:
+ sata->enable = true;
+
+ for (i = 0; i < padctl->sata->soc->num_lanes; i++) {
+ value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+ value |= XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(i);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+ }
return 0;
@@ -711,31 +941,27 @@ disable:
static void tegra210_sata_uphy_disable(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_sata_pad *sata = to_sata_pad(padctl->sata);
+ u32 value;
+ unsigned int i;
- mutex_lock(&padctl->lock);
+ if (WARN_ON(!sata->enable))
+ return;
- if (WARN_ON(sata->enable == 0))
- goto unlock;
+ sata->enable = false;
- if (--sata->enable > 0)
- goto unlock;
+ for (i = 0; i < padctl->sata->soc->num_lanes; i++) {
+ value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+ value &= ~XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(i);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+ }
- reset_control_assert(sata->rst);
clk_disable_unprepare(sata->pll);
-
-unlock:
- mutex_unlock(&padctl->lock);
}
-static int tegra210_xusb_padctl_enable(struct tegra_xusb_padctl *padctl)
+static void tegra210_aux_mux_lp0_clamp_disable(struct tegra_xusb_padctl *padctl)
{
u32 value;
- mutex_lock(&padctl->lock);
-
- if (padctl->enable++ > 0)
- goto out;
-
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
@@ -751,24 +977,12 @@ static int tegra210_xusb_padctl_enable(struct tegra_xusb_padctl *padctl)
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value &= ~XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
-
-out:
- mutex_unlock(&padctl->lock);
- return 0;
}
-static int tegra210_xusb_padctl_disable(struct tegra_xusb_padctl *padctl)
+static void tegra210_aux_mux_lp0_clamp_enable(struct tegra_xusb_padctl *padctl)
{
u32 value;
- mutex_lock(&padctl->lock);
-
- if (WARN_ON(padctl->enable == 0))
- goto out;
-
- if (--padctl->enable > 0)
- goto out;
-
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
@@ -784,12 +998,38 @@ static int tegra210_xusb_padctl_disable(struct tegra_xusb_padctl *padctl)
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
value |= XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN;
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+}
+
+static int tegra210_uphy_init(struct tegra_xusb_padctl *padctl)
+{
+ if (padctl->pcie)
+ tegra210_pex_uphy_enable(padctl);
+
+ if (padctl->sata)
+ tegra210_sata_uphy_enable(padctl);
+
+ if (!tegra210_plle_hw_sequence_is_enabled())
+ tegra210_plle_hw_sequence_start();
+ else
+ dev_dbg(padctl->dev, "PLLE is already in HW control\n");
+
+ tegra210_aux_mux_lp0_clamp_disable(padctl);
-out:
- mutex_unlock(&padctl->lock);
return 0;
}
+static void __maybe_unused
+tegra210_uphy_deinit(struct tegra_xusb_padctl *padctl)
+{
+ tegra210_aux_mux_lp0_clamp_enable(padctl);
+
+ if (padctl->sata)
+ tegra210_sata_uphy_disable(padctl);
+
+ if (padctl->pcie)
+ tegra210_pex_uphy_disable(padctl);
+}
+
static int tegra210_hsic_set_idle(struct tegra_xusb_padctl *padctl,
unsigned int index, bool idle)
{
@@ -815,6 +1055,643 @@ static int tegra210_hsic_set_idle(struct tegra_xusb_padctl *padctl,
return 0;
}
+static int tegra210_usb3_enable_phy_sleepwalk(struct tegra_xusb_lane *lane,
+ enum usb_device_speed speed)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ int port = tegra210_usb3_lane_map(lane);
+ struct device *dev = padctl->dev;
+ u32 value;
+
+ if (port < 0) {
+ dev_err(dev, "invalid usb3 port number\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(port);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(port);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(250, 350);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra210_usb3_disable_phy_sleepwalk(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ int port = tegra210_usb3_lane_map(lane);
+ struct device *dev = padctl->dev;
+ u32 value;
+
+ if (port < 0) {
+ dev_err(dev, "invalid usb3 port number\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(port);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(port);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra210_usb3_enable_phy_wake(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ int port = tegra210_usb3_lane_map(lane);
+ struct device *dev = padctl->dev;
+ u32 value;
+
+ if (port < 0) {
+ dev_err(dev, "invalid usb3 port number\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= SS_PORT_WAKEUP_EVENT(port);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
+
+ usleep_range(10, 20);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= SS_PORT_WAKE_INTERRUPT_ENABLE(port);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra210_usb3_disable_phy_wake(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ int port = tegra210_usb3_lane_map(lane);
+ struct device *dev = padctl->dev;
+ u32 value;
+
+ if (port < 0) {
+ dev_err(dev, "invalid usb3 port number\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ value &= ~ALL_WAKE_EVENTS;
+ value &= ~SS_PORT_WAKE_INTERRUPT_ENABLE(port);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
+
+ usleep_range(10, 20);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= SS_PORT_WAKEUP_EVENT(port);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static bool tegra210_usb3_phy_remote_wake_detected(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ int index = tegra210_usb3_lane_map(lane);
+ u32 value;
+
+ if (index < 0)
+ return false;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ if ((value & SS_PORT_WAKE_INTERRUPT_ENABLE(index)) && (value & SS_PORT_WAKEUP_EVENT(index)))
+ return true;
+
+ return false;
+}
+
+static int tegra210_utmi_enable_phy_wake(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= USB2_PORT_WAKEUP_EVENT(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
+
+ usleep_range(10, 20);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= USB2_PORT_WAKE_INTERRUPT_ENABLE(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra210_utmi_disable_phy_wake(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ value &= ~ALL_WAKE_EVENTS;
+ value &= ~USB2_PORT_WAKE_INTERRUPT_ENABLE(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
+
+ usleep_range(10, 20);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= USB2_PORT_WAKEUP_EVENT(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static bool tegra210_utmi_phy_remote_wake_detected(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ if ((value & USB2_PORT_WAKE_INTERRUPT_ENABLE(index)) &&
+ (value & USB2_PORT_WAKEUP_EVENT(index)))
+ return true;
+
+ return false;
+}
+
+static int tegra210_hsic_enable_phy_wake(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= USB2_HSIC_PORT_WAKEUP_EVENT(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
+
+ usleep_range(10, 20);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= USB2_HSIC_PORT_WAKE_INTERRUPT_ENABLE(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static int tegra210_hsic_disable_phy_wake(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ value &= ~ALL_WAKE_EVENTS;
+ value &= ~USB2_HSIC_PORT_WAKE_INTERRUPT_ENABLE(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
+
+ usleep_range(10, 20);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ value &= ~ALL_WAKE_EVENTS;
+ value |= USB2_HSIC_PORT_WAKEUP_EVENT(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_0);
+
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+}
+
+static bool tegra210_hsic_phy_remote_wake_detected(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_0);
+ if ((value & USB2_HSIC_PORT_WAKE_INTERRUPT_ENABLE(index)) &&
+ (value & USB2_HSIC_PORT_WAKEUP_EVENT(index)))
+ return true;
+
+ return false;
+}
+
+#define padctl_pmc_readl(_priv, _offset) \
+({ \
+ u32 value; \
+ WARN(regmap_read(_priv->regmap, _offset, &value), "read %s failed\n", #_offset);\
+ value; \
+})
+
+#define padctl_pmc_writel(_priv, _value, _offset) \
+ WARN(regmap_write(_priv->regmap, _offset, _value), "write %s failed\n", #_offset)
+
+static int tegra210_pmc_utmi_enable_phy_sleepwalk(struct tegra_xusb_lane *lane,
+ enum usb_device_speed speed)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra210_xusb_padctl *priv = to_tegra210_xusb_padctl(padctl);
+ unsigned int port = lane->index;
+ u32 value, tctrl, pctrl, rpd_ctrl;
+
+ if (!priv->regmap)
+ return -EOPNOTSUPP;
+
+ if (speed > USB_SPEED_HIGH)
+ return -EINVAL;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ tctrl = TCTRL_VALUE(value);
+ pctrl = PCTRL_VALUE(value);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(port));
+ rpd_ctrl = RPD_CTRL_VALUE(value);
+
+ /* ensure sleepwalk logic is disabled */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+ value &= ~UTMIP_MASTER_ENABLE(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+
+ /* ensure sleepwalk logics are in low power mode */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_MASTER_CONFIG);
+ value |= UTMIP_PWR(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_MASTER_CONFIG);
+
+ /* set debounce time */
+ value = padctl_pmc_readl(priv, PMC_USB_DEBOUNCE_DEL);
+ value &= ~UTMIP_LINE_DEB_CNT(~0);
+ value |= UTMIP_LINE_DEB_CNT(0x1);
+ padctl_pmc_writel(priv, value, PMC_USB_DEBOUNCE_DEL);
+
+ /* ensure fake events of sleepwalk logic are desiabled */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_FAKE(port));
+ value &= ~(UTMIP_FAKE_USBOP_VAL(port) | UTMIP_FAKE_USBON_VAL(port) |
+ UTMIP_FAKE_USBOP_EN(port) | UTMIP_FAKE_USBON_EN(port));
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_FAKE(port));
+
+ /* ensure wake events of sleepwalk logic are not latched */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_LINE_WAKEUP);
+ value &= ~UTMIP_LINE_WAKEUP_EN(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_LINE_WAKEUP);
+
+ /* disable wake event triggers of sleepwalk logic */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+ value &= ~UTMIP_WAKE_VAL(port, ~0);
+ value |= UTMIP_WAKE_VAL_NONE(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+
+ /* power down the line state detectors of the pad */
+ value = padctl_pmc_readl(priv, PMC_USB_AO);
+ value |= (USBOP_VAL_PD(port) | USBON_VAL_PD(port));
+ padctl_pmc_writel(priv, value, PMC_USB_AO);
+
+ /* save state per speed */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SAVED_STATE(port));
+ value &= ~SPEED(port, ~0);
+
+ switch (speed) {
+ case USB_SPEED_HIGH:
+ value |= UTMI_HS(port);
+ break;
+
+ case USB_SPEED_FULL:
+ value |= UTMI_FS(port);
+ break;
+
+ case USB_SPEED_LOW:
+ value |= UTMI_LS(port);
+ break;
+
+ default:
+ value |= UTMI_RST(port);
+ break;
+ }
+
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SAVED_STATE(port));
+
+ /* enable the trigger of the sleepwalk logic */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEPWALK_CFG(port));
+ value |= UTMIP_LINEVAL_WALK_EN(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEPWALK_CFG(port));
+
+ /*
+ * Reset the walk pointer and clear the alarm of the sleepwalk logic,
+ * as well as capture the configuration of the USB2.0 pad.
+ */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_TRIGGERS);
+ value |= UTMIP_CLR_WALK_PTR(port) | UTMIP_CLR_WAKE_ALARM(port) | UTMIP_CAP_CFG(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_TRIGGERS);
+
+ /* program electrical parameters read from XUSB PADCTL */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_TERM_PAD_CFG);
+ value &= ~(TCTRL_VAL(~0) | PCTRL_VAL(~0));
+ value |= (TCTRL_VAL(tctrl) | PCTRL_VAL(pctrl));
+ padctl_pmc_writel(priv, value, PMC_UTMIP_TERM_PAD_CFG);
+
+ value = padctl_pmc_readl(priv, PMC_UTMIP_PAD_CFGX(port));
+ value &= ~RPD_CTRL_PX(~0);
+ value |= RPD_CTRL_PX(rpd_ctrl);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_PAD_CFGX(port));
+
+ /*
+ * Set up the pull-ups and pull-downs of the signals during the four
+ * stages of sleepwalk. If a device is connected, program sleepwalk
+ * logic to maintain a J and keep driving K upon seeing remote wake.
+ */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_SLEEPWALK_PX(port));
+ value = UTMIP_USBOP_RPD_A | UTMIP_USBOP_RPD_B | UTMIP_USBOP_RPD_C | UTMIP_USBOP_RPD_D;
+ value |= UTMIP_USBON_RPD_A | UTMIP_USBON_RPD_B | UTMIP_USBON_RPD_C | UTMIP_USBON_RPD_D;
+
+ switch (speed) {
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ /* J state: D+/D- = high/low, K state: D+/D- = low/high */
+ value |= UTMIP_HIGHZ_A;
+ value |= UTMIP_AP_A;
+ value |= UTMIP_AN_B | UTMIP_AN_C | UTMIP_AN_D;
+ break;
+
+ case USB_SPEED_LOW:
+ /* J state: D+/D- = low/high, K state: D+/D- = high/low */
+ value |= UTMIP_HIGHZ_A;
+ value |= UTMIP_AN_A;
+ value |= UTMIP_AP_B | UTMIP_AP_C | UTMIP_AP_D;
+ break;
+
+ default:
+ value |= UTMIP_HIGHZ_A | UTMIP_HIGHZ_B | UTMIP_HIGHZ_C | UTMIP_HIGHZ_D;
+ break;
+ }
+
+ padctl_pmc_writel(priv, value, PMC_UTMIP_SLEEPWALK_PX(port));
+
+ /* power up the line state detectors of the pad */
+ value = padctl_pmc_readl(priv, PMC_USB_AO);
+ value &= ~(USBOP_VAL_PD(port) | USBON_VAL_PD(port));
+ padctl_pmc_writel(priv, value, PMC_USB_AO);
+
+ usleep_range(50, 100);
+
+ /* switch the electric control of the USB2.0 pad to PMC */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+ value |= UTMIP_FSLS_USE_PMC(port) | UTMIP_PCTRL_USE_PMC(port) | UTMIP_TCTRL_USE_PMC(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG1);
+ value |= UTMIP_RPD_CTRL_USE_PMC_PX(port) | UTMIP_RPU_SWITC_LOW_USE_PMC_PX(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG1);
+
+ /* set the wake signaling trigger events */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+ value &= ~UTMIP_WAKE_VAL(port, ~0);
+ value |= UTMIP_WAKE_VAL_ANY(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+
+ /* enable the wake detection */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+ value |= UTMIP_MASTER_ENABLE(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_LINE_WAKEUP);
+ value |= UTMIP_LINE_WAKEUP_EN(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_LINE_WAKEUP);
+
+ return 0;
+}
+
+static int tegra210_pmc_utmi_disable_phy_sleepwalk(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra210_xusb_padctl *priv = to_tegra210_xusb_padctl(padctl);
+ unsigned int port = lane->index;
+ u32 value;
+
+ if (!priv->regmap)
+ return -EOPNOTSUPP;
+
+ /* disable the wake detection */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+ value &= ~UTMIP_MASTER_ENABLE(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_LINE_WAKEUP);
+ value &= ~UTMIP_LINE_WAKEUP_EN(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_LINE_WAKEUP);
+
+ /* switch the electric control of the USB2.0 pad to XUSB or USB2 */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+ value &= ~(UTMIP_FSLS_USE_PMC(port) | UTMIP_PCTRL_USE_PMC(port) |
+ UTMIP_TCTRL_USE_PMC(port));
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG1);
+ value &= ~(UTMIP_RPD_CTRL_USE_PMC_PX(port) | UTMIP_RPU_SWITC_LOW_USE_PMC_PX(port));
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG1);
+
+ /* disable wake event triggers of sleepwalk logic */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+ value &= ~UTMIP_WAKE_VAL(port, ~0);
+ value |= UTMIP_WAKE_VAL_NONE(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_SLEEP_CFG(port));
+
+ /* power down the line state detectors of the port */
+ value = padctl_pmc_readl(priv, PMC_USB_AO);
+ value |= (USBOP_VAL_PD(port) | USBON_VAL_PD(port));
+ padctl_pmc_writel(priv, value, PMC_USB_AO);
+
+ /* clear alarm of the sleepwalk logic */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_TRIGGERS);
+ value |= UTMIP_CLR_WAKE_ALARM(port);
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_TRIGGERS);
+
+ return 0;
+}
+
+static int tegra210_pmc_hsic_enable_phy_sleepwalk(struct tegra_xusb_lane *lane,
+ enum usb_device_speed speed)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra210_xusb_padctl *priv = to_tegra210_xusb_padctl(padctl);
+ u32 value;
+
+ if (!priv->regmap)
+ return -EOPNOTSUPP;
+
+ /* ensure sleepwalk logic is disabled */
+ value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEP_CFG);
+ value &= ~UHSIC_MASTER_ENABLE;
+ padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEP_CFG);
+
+ /* ensure sleepwalk logics are in low power mode */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_MASTER_CONFIG);
+ value |= UHSIC_PWR;
+ padctl_pmc_writel(priv, value, PMC_UTMIP_MASTER_CONFIG);
+
+ /* set debounce time */
+ value = padctl_pmc_readl(priv, PMC_USB_DEBOUNCE_DEL);
+ value &= ~UHSIC_LINE_DEB_CNT(~0);
+ value |= UHSIC_LINE_DEB_CNT(0x1);
+ padctl_pmc_writel(priv, value, PMC_USB_DEBOUNCE_DEL);
+
+ /* ensure fake events of sleepwalk logic are desiabled */
+ value = padctl_pmc_readl(priv, PMC_UHSIC_FAKE);
+ value &= ~(UHSIC_FAKE_STROBE_VAL | UHSIC_FAKE_DATA_VAL |
+ UHSIC_FAKE_STROBE_EN | UHSIC_FAKE_DATA_EN);
+ padctl_pmc_writel(priv, value, PMC_UHSIC_FAKE);
+
+ /* ensure wake events of sleepwalk logic are not latched */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_LINE_WAKEUP);
+ value &= ~UHSIC_LINE_WAKEUP_EN;
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_LINE_WAKEUP);
+
+ /* disable wake event triggers of sleepwalk logic */
+ value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEP_CFG);
+ value &= ~UHSIC_WAKE_VAL(~0);
+ value |= UHSIC_WAKE_VAL_NONE;
+ padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEP_CFG);
+
+ /* power down the line state detectors of the port */
+ value = padctl_pmc_readl(priv, PMC_USB_AO);
+ value |= STROBE_VAL_PD | DATA0_VAL_PD | DATA1_VAL_PD;
+ padctl_pmc_writel(priv, value, PMC_USB_AO);
+
+ /* save state, HSIC always comes up as HS */
+ value = padctl_pmc_readl(priv, PMC_UHSIC_SAVED_STATE);
+ value &= ~UHSIC_MODE(~0);
+ value |= UHSIC_HS;
+ padctl_pmc_writel(priv, value, PMC_UHSIC_SAVED_STATE);
+
+ /* enable the trigger of the sleepwalk logic */
+ value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEPWALK_CFG);
+ value |= UHSIC_WAKE_WALK_EN | UHSIC_LINEVAL_WALK_EN;
+ padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEPWALK_CFG);
+
+ /*
+ * Reset the walk pointer and clear the alarm of the sleepwalk logic,
+ * as well as capture the configuration of the USB2.0 port.
+ */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_TRIGGERS);
+ value |= UHSIC_CLR_WALK_PTR | UHSIC_CLR_WAKE_ALARM;
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_TRIGGERS);
+
+ /*
+ * Set up the pull-ups and pull-downs of the signals during the four
+ * stages of sleepwalk. Maintain a HSIC IDLE and keep driving HSIC
+ * RESUME upon remote wake.
+ */
+ value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEPWALK_P0);
+ value = UHSIC_DATA0_RPD_A | UHSIC_DATA0_RPU_B | UHSIC_DATA0_RPU_C | UHSIC_DATA0_RPU_D |
+ UHSIC_STROBE_RPU_A | UHSIC_STROBE_RPD_B | UHSIC_STROBE_RPD_C | UHSIC_STROBE_RPD_D;
+ padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEPWALK_P0);
+
+ /* power up the line state detectors of the port */
+ value = padctl_pmc_readl(priv, PMC_USB_AO);
+ value &= ~(STROBE_VAL_PD | DATA0_VAL_PD | DATA1_VAL_PD);
+ padctl_pmc_writel(priv, value, PMC_USB_AO);
+
+ usleep_range(50, 100);
+
+ /* set the wake signaling trigger events */
+ value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEP_CFG);
+ value &= ~UHSIC_WAKE_VAL(~0);
+ value |= UHSIC_WAKE_VAL_SD10;
+ padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEP_CFG);
+
+ /* enable the wake detection */
+ value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEP_CFG);
+ value |= UHSIC_MASTER_ENABLE;
+ padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEP_CFG);
+
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_LINE_WAKEUP);
+ value |= UHSIC_LINE_WAKEUP_EN;
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_LINE_WAKEUP);
+
+ return 0;
+}
+
+static int tegra210_pmc_hsic_disable_phy_sleepwalk(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra210_xusb_padctl *priv = to_tegra210_xusb_padctl(padctl);
+ u32 value;
+
+ if (!priv->regmap)
+ return -EOPNOTSUPP;
+
+ /* disable the wake detection */
+ value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEP_CFG);
+ value &= ~UHSIC_MASTER_ENABLE;
+ padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEP_CFG);
+
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_LINE_WAKEUP);
+ value &= ~UHSIC_LINE_WAKEUP_EN;
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_LINE_WAKEUP);
+
+ /* disable wake event triggers of sleepwalk logic */
+ value = padctl_pmc_readl(priv, PMC_UHSIC_SLEEP_CFG);
+ value &= ~UHSIC_WAKE_VAL(~0);
+ value |= UHSIC_WAKE_VAL_NONE;
+ padctl_pmc_writel(priv, value, PMC_UHSIC_SLEEP_CFG);
+
+ /* power down the line state detectors of the port */
+ value = padctl_pmc_readl(priv, PMC_USB_AO);
+ value |= STROBE_VAL_PD | DATA0_VAL_PD | DATA1_VAL_PD;
+ padctl_pmc_writel(priv, value, PMC_USB_AO);
+
+ /* clear alarm of the sleepwalk logic */
+ value = padctl_pmc_readl(priv, PMC_UTMIP_UHSIC_TRIGGERS);
+ value |= UHSIC_CLR_WAKE_ALARM;
+ padctl_pmc_writel(priv, value, PMC_UTMIP_UHSIC_TRIGGERS);
+
+ return 0;
+}
+
static int tegra210_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
unsigned int index, bool enable)
{
@@ -911,14 +1788,36 @@ static void tegra210_usb2_lane_remove(struct tegra_xusb_lane *lane)
static const struct tegra_xusb_lane_ops tegra210_usb2_lane_ops = {
.probe = tegra210_usb2_lane_probe,
.remove = tegra210_usb2_lane_remove,
+ .enable_phy_sleepwalk = tegra210_pmc_utmi_enable_phy_sleepwalk,
+ .disable_phy_sleepwalk = tegra210_pmc_utmi_disable_phy_sleepwalk,
+ .enable_phy_wake = tegra210_utmi_enable_phy_wake,
+ .disable_phy_wake = tegra210_utmi_disable_phy_wake,
+ .remote_wake_detected = tegra210_utmi_phy_remote_wake_detected,
};
static int tegra210_usb2_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ struct tegra_xusb_usb2_port *port;
+ int err;
u32 value;
+ port = tegra_xusb_find_usb2_port(padctl, index);
+ if (!port) {
+ dev_err(&phy->dev, "no port found for USB2 lane %u\n", index);
+ return -ENODEV;
+ }
+
+ if (port->supply && port->mode == USB_DR_MODE_HOST) {
+ err = regulator_enable(port->supply);
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&padctl->lock);
+
value = padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
value &= ~(XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_MASK <<
XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_SHIFT);
@@ -926,14 +1825,31 @@ static int tegra210_usb2_phy_init(struct phy *phy)
XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_PAD_MUX);
- return tegra210_xusb_padctl_enable(padctl);
+ mutex_unlock(&padctl->lock);
+
+ return 0;
}
static int tegra210_usb2_phy_exit(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_usb2_port *port;
+ int err;
- return tegra210_xusb_padctl_disable(lane->pad->padctl);
+ port = tegra_xusb_find_usb2_port(padctl, lane->index);
+ if (!port) {
+ dev_err(&phy->dev, "no port found for USB2 lane %u\n", lane->index);
+ return -ENODEV;
+ }
+
+ if (port->supply && port->mode == USB_DR_MODE_HOST) {
+ err = regulator_disable(port->supply);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static int tegra210_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
@@ -1053,6 +1969,8 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
priv = to_tegra210_xusb_padctl(padctl);
+ mutex_lock(&padctl->lock);
+
if (port->usb3_port_fake != -1) {
value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(
@@ -1146,14 +2064,6 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
padctl_writel(padctl, value,
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
- if (port->supply && port->mode == USB_DR_MODE_HOST) {
- err = regulator_enable(port->supply);
- if (err)
- return err;
- }
-
- mutex_lock(&padctl->lock);
-
if (pad->enable > 0) {
pad->enable++;
mutex_unlock(&padctl->lock);
@@ -1162,7 +2072,7 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
err = clk_prepare_enable(pad->clk);
if (err)
- goto disable_regulator;
+ goto out;
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_MASK <<
@@ -1194,8 +2104,7 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
return 0;
-disable_regulator:
- regulator_disable(port->supply);
+out:
mutex_unlock(&padctl->lock);
return err;
}
@@ -1254,7 +2163,6 @@ static int tegra210_usb2_phy_power_off(struct phy *phy)
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
out:
- regulator_disable(port->supply);
mutex_unlock(&padctl->lock);
return 0;
}
@@ -1376,6 +2284,11 @@ static void tegra210_hsic_lane_remove(struct tegra_xusb_lane *lane)
static const struct tegra_xusb_lane_ops tegra210_hsic_lane_ops = {
.probe = tegra210_hsic_lane_probe,
.remove = tegra210_hsic_lane_remove,
+ .enable_phy_sleepwalk = tegra210_pmc_hsic_enable_phy_sleepwalk,
+ .disable_phy_sleepwalk = tegra210_pmc_hsic_disable_phy_sleepwalk,
+ .enable_phy_wake = tegra210_hsic_enable_phy_wake,
+ .disable_phy_wake = tegra210_hsic_disable_phy_wake,
+ .remote_wake_detected = tegra210_hsic_phy_remote_wake_detected,
};
static int tegra210_hsic_phy_init(struct phy *phy)
@@ -1391,14 +2304,12 @@ static int tegra210_hsic_phy_init(struct phy *phy)
XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_SHIFT;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_PAD_MUX);
- return tegra210_xusb_padctl_enable(padctl);
+ return 0;
}
static int tegra210_hsic_phy_exit(struct phy *phy)
{
- struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
-
- return tegra210_xusb_padctl_disable(lane->pad->padctl);
+ return 0;
}
static int tegra210_hsic_phy_power_on(struct phy *phy)
@@ -1582,6 +2493,55 @@ static const struct tegra_xusb_pad_soc tegra210_hsic_pad = {
.ops = &tegra210_hsic_ops,
};
+static void tegra210_uphy_lane_iddq_enable(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ u32 value;
+
+ value = padctl_readl(padctl, lane->soc->regs.misc_ctl2);
+ value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_IDDQ_OVRD;
+ value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_IDDQ_OVRD;
+ value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_PWR_OVRD;
+ value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_PWR_OVRD;
+ value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_IDDQ;
+ value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_SLEEP_MASK;
+ value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_SLEEP_VAL;
+ value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_IDDQ;
+ value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_SLEEP_MASK;
+ value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_SLEEP_VAL;
+ padctl_writel(padctl, value, lane->soc->regs.misc_ctl2);
+}
+
+static void tegra210_uphy_lane_iddq_disable(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ u32 value;
+
+ value = padctl_readl(padctl, lane->soc->regs.misc_ctl2);
+ value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_IDDQ_OVRD;
+ value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_IDDQ_OVRD;
+ value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_PWR_OVRD;
+ value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_PWR_OVRD;
+ value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_IDDQ;
+ value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_SLEEP_MASK;
+ value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_TX_SLEEP_VAL;
+ value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_IDDQ;
+ value &= ~XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_SLEEP_MASK;
+ value |= XUSB_PADCTL_UPHY_MISC_PAD_CTL2_RX_SLEEP_VAL;
+ padctl_writel(padctl, value, lane->soc->regs.misc_ctl2);
+}
+
+#define TEGRA210_UPHY_LANE(_name, _offset, _shift, _mask, _type, _misc) \
+ { \
+ .name = _name, \
+ .offset = _offset, \
+ .shift = _shift, \
+ .mask = _mask, \
+ .num_funcs = ARRAY_SIZE(tegra210_##_type##_functions), \
+ .funcs = tegra210_##_type##_functions, \
+ .regs.misc_ctl2 = _misc, \
+ }
+
static const char *tegra210_pcie_functions[] = {
"pcie-x1",
"usb3-ss",
@@ -1590,15 +2550,137 @@ static const char *tegra210_pcie_functions[] = {
};
static const struct tegra_xusb_lane_soc tegra210_pcie_lanes[] = {
- TEGRA210_LANE("pcie-0", 0x028, 12, 0x3, pcie),
- TEGRA210_LANE("pcie-1", 0x028, 14, 0x3, pcie),
- TEGRA210_LANE("pcie-2", 0x028, 16, 0x3, pcie),
- TEGRA210_LANE("pcie-3", 0x028, 18, 0x3, pcie),
- TEGRA210_LANE("pcie-4", 0x028, 20, 0x3, pcie),
- TEGRA210_LANE("pcie-5", 0x028, 22, 0x3, pcie),
- TEGRA210_LANE("pcie-6", 0x028, 24, 0x3, pcie),
+ TEGRA210_UPHY_LANE("pcie-0", 0x028, 12, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(0)),
+ TEGRA210_UPHY_LANE("pcie-1", 0x028, 14, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(1)),
+ TEGRA210_UPHY_LANE("pcie-2", 0x028, 16, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(2)),
+ TEGRA210_UPHY_LANE("pcie-3", 0x028, 18, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(3)),
+ TEGRA210_UPHY_LANE("pcie-4", 0x028, 20, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(4)),
+ TEGRA210_UPHY_LANE("pcie-5", 0x028, 22, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(5)),
+ TEGRA210_UPHY_LANE("pcie-6", 0x028, 24, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL2(6)),
};
+static struct tegra_xusb_usb3_port *
+tegra210_lane_to_usb3_port(struct tegra_xusb_lane *lane)
+{
+ int port;
+
+ if (!lane || !lane->pad || !lane->pad->padctl)
+ return NULL;
+
+ port = tegra210_usb3_lane_map(lane);
+ if (port < 0)
+ return NULL;
+
+ return tegra_xusb_find_usb3_port(lane->pad->padctl, port);
+}
+
+static int tegra210_usb3_phy_power_on(struct phy *phy)
+{
+ struct device *dev = &phy->dev;
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_usb3_port *usb3 = tegra210_lane_to_usb3_port(lane);
+ unsigned int index;
+ u32 value;
+
+ if (!usb3) {
+ dev_err(dev, "no USB3 port found for lane %u\n", lane->index);
+ return -ENODEV;
+ }
+
+ index = usb3->base.index;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+
+ if (!usb3->internal)
+ value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
+ else
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
+
+ value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(index);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(index, usb3->port);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(index));
+ value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_MASK <<
+ XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT);
+ value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_VAL <<
+ XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(index));
+ value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_MASK <<
+ XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT);
+ value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_VAL <<
+ XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(index));
+
+ padctl_writel(padctl, XUSB_PADCTL_UPHY_USB3_PAD_ECTL3_RX_DFE_VAL,
+ XUSB_PADCTL_UPHY_USB3_PADX_ECTL3(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(index));
+ value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_MASK <<
+ XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT);
+ value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_VAL <<
+ XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(index));
+
+ padctl_writel(padctl, XUSB_PADCTL_UPHY_USB3_PAD_ECTL6_RX_EQ_CTRL_H_VAL,
+ XUSB_PADCTL_UPHY_USB3_PADX_ECTL6(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ return 0;
+}
+
+static int tegra210_usb3_phy_power_off(struct phy *phy)
+{
+ struct device *dev = &phy->dev;
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_usb3_port *usb3 = tegra210_lane_to_usb3_port(lane);
+ unsigned int index;
+ u32 value;
+
+ if (!usb3) {
+ dev_err(dev, "no USB3 port found for lane %u\n", lane->index);
+ return -ENODEV;
+ }
+
+ index = usb3->base.index;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(250, 350);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ return 0;
+}
static struct tegra_xusb_lane *
tegra210_pcie_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
unsigned int index)
@@ -1635,40 +2717,40 @@ static void tegra210_pcie_lane_remove(struct tegra_xusb_lane *lane)
static const struct tegra_xusb_lane_ops tegra210_pcie_lane_ops = {
.probe = tegra210_pcie_lane_probe,
.remove = tegra210_pcie_lane_remove,
+ .iddq_enable = tegra210_uphy_lane_iddq_enable,
+ .iddq_disable = tegra210_uphy_lane_iddq_disable,
+ .enable_phy_sleepwalk = tegra210_usb3_enable_phy_sleepwalk,
+ .disable_phy_sleepwalk = tegra210_usb3_disable_phy_sleepwalk,
+ .enable_phy_wake = tegra210_usb3_enable_phy_wake,
+ .disable_phy_wake = tegra210_usb3_disable_phy_wake,
+ .remote_wake_detected = tegra210_usb3_phy_remote_wake_detected,
};
static int tegra210_pcie_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
- return tegra210_xusb_padctl_enable(lane->pad->padctl);
-}
+ mutex_lock(&padctl->lock);
-static int tegra210_pcie_phy_exit(struct phy *phy)
-{
- struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ tegra210_uphy_init(padctl);
+
+ mutex_unlock(&padctl->lock);
- return tegra210_xusb_padctl_disable(lane->pad->padctl);
+ return 0;
}
static int tegra210_pcie_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
- u32 value;
- int err;
+ int err = 0;
mutex_lock(&padctl->lock);
- err = tegra210_pex_uphy_enable(padctl);
- if (err < 0)
- goto unlock;
-
- value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
- value |= XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->index);
- padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+ if (tegra_xusb_lane_check(lane, "usb3-ss"))
+ err = tegra210_usb3_phy_power_on(phy);
-unlock:
mutex_unlock(&padctl->lock);
return err;
}
@@ -1677,20 +2759,19 @@ static int tegra210_pcie_phy_power_off(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
- u32 value;
+ int err = 0;
- value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
- value &= ~XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->index);
- padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+ mutex_lock(&padctl->lock);
- tegra210_pex_uphy_disable(padctl);
+ if (tegra_xusb_lane_check(lane, "usb3-ss"))
+ err = tegra210_usb3_phy_power_off(phy);
- return 0;
+ mutex_unlock(&padctl->lock);
+ return err;
}
static const struct phy_ops tegra210_pcie_phy_ops = {
.init = tegra210_pcie_phy_init,
- .exit = tegra210_pcie_phy_exit,
.power_on = tegra210_pcie_phy_power_on,
.power_off = tegra210_pcie_phy_power_off,
.owner = THIS_MODULE,
@@ -1767,7 +2848,7 @@ static const struct tegra_xusb_pad_soc tegra210_pcie_pad = {
};
static const struct tegra_xusb_lane_soc tegra210_sata_lanes[] = {
- TEGRA210_LANE("sata-0", 0x028, 30, 0x3, pcie),
+ TEGRA210_UPHY_LANE("sata-0", 0x028, 30, 0x3, pcie, XUSB_PADCTL_UPHY_MISC_PAD_S0_CTL2),
};
static struct tegra_xusb_lane *
@@ -1806,40 +2887,39 @@ static void tegra210_sata_lane_remove(struct tegra_xusb_lane *lane)
static const struct tegra_xusb_lane_ops tegra210_sata_lane_ops = {
.probe = tegra210_sata_lane_probe,
.remove = tegra210_sata_lane_remove,
+ .iddq_enable = tegra210_uphy_lane_iddq_enable,
+ .iddq_disable = tegra210_uphy_lane_iddq_disable,
+ .enable_phy_sleepwalk = tegra210_usb3_enable_phy_sleepwalk,
+ .disable_phy_sleepwalk = tegra210_usb3_disable_phy_sleepwalk,
+ .enable_phy_wake = tegra210_usb3_enable_phy_wake,
+ .disable_phy_wake = tegra210_usb3_disable_phy_wake,
+ .remote_wake_detected = tegra210_usb3_phy_remote_wake_detected,
};
static int tegra210_sata_phy_init(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
- return tegra210_xusb_padctl_enable(lane->pad->padctl);
-}
+ mutex_lock(&padctl->lock);
-static int tegra210_sata_phy_exit(struct phy *phy)
-{
- struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ tegra210_uphy_init(padctl);
- return tegra210_xusb_padctl_disable(lane->pad->padctl);
+ mutex_unlock(&padctl->lock);
+ return 0;
}
static int tegra210_sata_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
- u32 value;
- int err;
+ int err = 0;
mutex_lock(&padctl->lock);
- err = tegra210_sata_uphy_enable(padctl, false);
- if (err < 0)
- goto unlock;
-
- value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
- value |= XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->index);
- padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+ if (tegra_xusb_lane_check(lane, "usb3-ss"))
+ err = tegra210_usb3_phy_power_on(phy);
-unlock:
mutex_unlock(&padctl->lock);
return err;
}
@@ -1848,20 +2928,19 @@ static int tegra210_sata_phy_power_off(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
- u32 value;
+ int err = 0;
- value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
- value &= ~XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->index);
- padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+ mutex_lock(&padctl->lock);
- tegra210_sata_uphy_disable(lane->pad->padctl);
+ if (tegra_xusb_lane_check(lane, "usb3-ss"))
+ err = tegra210_usb3_phy_power_off(phy);
- return 0;
+ mutex_unlock(&padctl->lock);
+ return err;
}
static const struct phy_ops tegra210_sata_phy_ops = {
.init = tegra210_sata_phy_init,
- .exit = tegra210_sata_phy_exit,
.power_on = tegra210_sata_phy_power_on,
.power_off = tegra210_sata_phy_power_off,
.owner = THIS_MODULE,
@@ -1984,137 +3063,13 @@ static const struct tegra_xusb_port_ops tegra210_hsic_port_ops = {
static int tegra210_usb3_port_enable(struct tegra_xusb_port *port)
{
- struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port);
- struct tegra_xusb_padctl *padctl = port->padctl;
- struct tegra_xusb_lane *lane = usb3->base.lane;
- unsigned int index = port->index;
- u32 value;
- int err;
-
- value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
-
- if (!usb3->internal)
- value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
- else
- value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
-
- value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(index);
- value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(index, usb3->port);
- padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
-
- /*
- * TODO: move this code into the PCIe/SATA PHY ->power_on() callbacks
- * and conditionalize based on mux function? This seems to work, but
- * might not be the exact proper sequence.
- */
- err = regulator_enable(usb3->supply);
- if (err < 0)
- return err;
-
- value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(index));
- value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_MASK <<
- XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT);
- value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_VAL <<
- XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT;
- padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(index));
-
- value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(index));
- value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_MASK <<
- XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT);
- value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_VAL <<
- XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT;
- padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(index));
-
- padctl_writel(padctl, XUSB_PADCTL_UPHY_USB3_PAD_ECTL3_RX_DFE_VAL,
- XUSB_PADCTL_UPHY_USB3_PADX_ECTL3(index));
-
- value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(index));
- value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_MASK <<
- XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT);
- value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_VAL <<
- XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT;
- padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(index));
-
- padctl_writel(padctl, XUSB_PADCTL_UPHY_USB3_PAD_ECTL6_RX_EQ_CTRL_H_VAL,
- XUSB_PADCTL_UPHY_USB3_PADX_ECTL6(index));
-
- if (lane->pad == padctl->sata)
- err = tegra210_sata_uphy_enable(padctl, true);
- else
- err = tegra210_pex_uphy_enable(padctl);
-
- if (err) {
- dev_err(&port->dev, "%s: failed to enable UPHY: %d\n",
- __func__, err);
- return err;
- }
-
- value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
- value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(index);
- padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
-
- usleep_range(100, 200);
-
- value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
- value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(index);
- padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
-
- usleep_range(100, 200);
-
- value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
- value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(index);
- padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
-
return 0;
}
static void tegra210_usb3_port_disable(struct tegra_xusb_port *port)
{
- struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port);
- struct tegra_xusb_padctl *padctl = port->padctl;
- struct tegra_xusb_lane *lane = port->lane;
- unsigned int index = port->index;
- u32 value;
-
- value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
- value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(index);
- padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
-
- usleep_range(100, 200);
-
- value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
- value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(index);
- padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
-
- usleep_range(250, 350);
-
- value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
- value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(index);
- padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
-
- if (lane->pad == padctl->sata)
- tegra210_sata_uphy_disable(padctl);
- else
- tegra210_pex_uphy_disable(padctl);
-
- regulator_disable(usb3->supply);
-
- value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
- value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(index);
- value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(index, 0x7);
- padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
}
-static const struct tegra_xusb_lane_map tegra210_usb3_map[] = {
- { 0, "pcie", 6 },
- { 1, "pcie", 5 },
- { 2, "pcie", 0 },
- { 2, "pcie", 3 },
- { 3, "pcie", 4 },
- { 3, "pcie", 4 },
- { 0, NULL, 0 }
-};
-
static struct tegra_xusb_lane *
tegra210_usb3_port_map(struct tegra_xusb_port *port)
{
@@ -2188,6 +3143,8 @@ tegra210_xusb_padctl_probe(struct device *dev,
const struct tegra_xusb_padctl_soc *soc)
{
struct tegra210_xusb_padctl *padctl;
+ struct platform_device *pdev;
+ struct device_node *np;
int err;
padctl = devm_kzalloc(dev, sizeof(*padctl), GFP_KERNEL);
@@ -2201,6 +3158,26 @@ tegra210_xusb_padctl_probe(struct device *dev,
if (err < 0)
return ERR_PTR(err);
+ np = of_parse_phandle(dev->of_node, "nvidia,pmc", 0);
+ if (!np) {
+ dev_warn(dev, "nvidia,pmc property is missing\n");
+ goto out;
+ }
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_warn(dev, "PMC device is not available\n");
+ goto out;
+ }
+
+ if (!platform_get_drvdata(pdev))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ padctl->regmap = dev_get_regmap(&pdev->dev, "usb_sleepwalk");
+ if (!padctl->regmap)
+ dev_info(dev, "failed to find PMC regmap\n");
+
+out:
return &padctl->base;
}
@@ -2208,9 +3185,75 @@ static void tegra210_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
{
}
+static void tegra210_xusb_padctl_save(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra210_xusb_padctl *priv = to_tegra210_xusb_padctl(padctl);
+
+ priv->context.usb2_pad_mux =
+ padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
+ priv->context.usb2_port_cap =
+ padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
+ priv->context.ss_port_map =
+ padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+ priv->context.usb3_pad_mux =
+ padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+}
+
+static void tegra210_xusb_padctl_restore(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra210_xusb_padctl *priv = to_tegra210_xusb_padctl(padctl);
+ struct tegra_xusb_lane *lane;
+
+ padctl_writel(padctl, priv->context.usb2_pad_mux,
+ XUSB_PADCTL_USB2_PAD_MUX);
+ padctl_writel(padctl, priv->context.usb2_port_cap,
+ XUSB_PADCTL_USB2_PORT_CAP);
+ padctl_writel(padctl, priv->context.ss_port_map,
+ XUSB_PADCTL_SS_PORT_MAP);
+
+ list_for_each_entry(lane, &padctl->lanes, list) {
+ if (lane->pad->ops->iddq_enable)
+ tegra210_uphy_lane_iddq_enable(lane);
+ }
+
+ padctl_writel(padctl, priv->context.usb3_pad_mux,
+ XUSB_PADCTL_USB3_PAD_MUX);
+
+ list_for_each_entry(lane, &padctl->lanes, list) {
+ if (lane->pad->ops->iddq_disable)
+ tegra210_uphy_lane_iddq_disable(lane);
+ }
+}
+
+static int tegra210_xusb_padctl_suspend_noirq(struct tegra_xusb_padctl *padctl)
+{
+ mutex_lock(&padctl->lock);
+
+ tegra210_uphy_deinit(padctl);
+
+ tegra210_xusb_padctl_save(padctl);
+
+ mutex_unlock(&padctl->lock);
+ return 0;
+}
+
+static int tegra210_xusb_padctl_resume_noirq(struct tegra_xusb_padctl *padctl)
+{
+ mutex_lock(&padctl->lock);
+
+ tegra210_xusb_padctl_restore(padctl);
+
+ tegra210_uphy_init(padctl);
+
+ mutex_unlock(&padctl->lock);
+ return 0;
+}
+
static const struct tegra_xusb_padctl_ops tegra210_xusb_padctl_ops = {
.probe = tegra210_xusb_padctl_probe,
.remove = tegra210_xusb_padctl_remove,
+ .suspend_noirq = tegra210_xusb_padctl_suspend_noirq,
+ .resume_noirq = tegra210_xusb_padctl_resume_noirq,
.usb3_set_lfps_detect = tegra210_usb3_set_lfps_detect,
.hsic_set_idle = tegra210_hsic_set_idle,
.vbus_override = tegra210_xusb_padctl_vbus_override,
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 941006f503e4..0aadac678191 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
@@ -321,11 +321,17 @@ static void tegra_xusb_lane_program(struct tegra_xusb_lane *lane)
if (soc->num_funcs < 2)
return;
+ if (lane->pad->ops->iddq_enable)
+ lane->pad->ops->iddq_enable(lane);
+
/* choose function */
value = padctl_readl(padctl, soc->offset);
value &= ~(soc->mask << soc->shift);
value |= lane->function << soc->shift;
padctl_writel(padctl, value, soc->offset);
+
+ if (lane->pad->ops->iddq_disable)
+ lane->pad->ops->iddq_disable(lane);
}
static void tegra_xusb_pad_program(struct tegra_xusb_pad *pad)
@@ -376,7 +382,7 @@ static int tegra_xusb_setup_pads(struct tegra_xusb_padctl *padctl)
return 0;
}
-static bool tegra_xusb_lane_check(struct tegra_xusb_lane *lane,
+bool tegra_xusb_lane_check(struct tegra_xusb_lane *lane,
const char *function)
{
const char *func = lane->soc->funcs[lane->function];
@@ -1267,10 +1273,36 @@ static int tegra_xusb_padctl_remove(struct platform_device *pdev)
return err;
}
+static int tegra_xusb_padctl_suspend_noirq(struct device *dev)
+{
+ struct tegra_xusb_padctl *padctl = dev_get_drvdata(dev);
+
+ if (padctl->soc && padctl->soc->ops && padctl->soc->ops->suspend_noirq)
+ return padctl->soc->ops->suspend_noirq(padctl);
+
+ return 0;
+}
+
+static int tegra_xusb_padctl_resume_noirq(struct device *dev)
+{
+ struct tegra_xusb_padctl *padctl = dev_get_drvdata(dev);
+
+ if (padctl->soc && padctl->soc->ops && padctl->soc->ops->resume_noirq)
+ return padctl->soc->ops->resume_noirq(padctl);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_xusb_padctl_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_xusb_padctl_suspend_noirq,
+ tegra_xusb_padctl_resume_noirq)
+};
+
static struct platform_driver tegra_xusb_padctl_driver = {
.driver = {
.name = "tegra-xusb-padctl",
.of_match_table = tegra_xusb_padctl_of_match,
+ .pm = &tegra_xusb_padctl_pm_ops,
},
.probe = tegra_xusb_padctl_probe,
.remove = tegra_xusb_padctl_remove,
@@ -1337,6 +1369,62 @@ int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl,
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_hsic_set_idle);
+int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy,
+ enum usb_device_speed speed)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ if (lane->pad->ops->enable_phy_sleepwalk)
+ return lane->pad->ops->enable_phy_sleepwalk(lane, speed);
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_enable_phy_sleepwalk);
+
+int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ if (lane->pad->ops->disable_phy_sleepwalk)
+ return lane->pad->ops->disable_phy_sleepwalk(lane);
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_disable_phy_sleepwalk);
+
+int tegra_xusb_padctl_enable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ if (lane->pad->ops->enable_phy_wake)
+ return lane->pad->ops->enable_phy_wake(lane);
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_enable_phy_wake);
+
+int tegra_xusb_padctl_disable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ if (lane->pad->ops->disable_phy_wake)
+ return lane->pad->ops->disable_phy_wake(lane);
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_disable_phy_wake);
+
+bool tegra_xusb_padctl_remote_wake_detected(struct tegra_xusb_padctl *padctl, struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ if (lane->pad->ops->remote_wake_detected)
+ return lane->pad->ops->remote_wake_detected(lane);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_remote_wake_detected);
+
int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
unsigned int port, bool enable)
{
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index ea35af747066..034f7a2c28d6 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015, Google Inc.
*/
@@ -11,6 +11,7 @@
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/usb/ch9.h>
#include <linux/usb/otg.h>
#include <linux/usb/role.h>
@@ -35,6 +36,10 @@ struct tegra_xusb_lane_soc {
const char * const *funcs;
unsigned int num_funcs;
+
+ struct {
+ unsigned int misc_ctl2;
+ } regs;
};
struct tegra_xusb_lane {
@@ -126,8 +131,17 @@ struct tegra_xusb_lane_ops {
struct device_node *np,
unsigned int index);
void (*remove)(struct tegra_xusb_lane *lane);
+ void (*iddq_enable)(struct tegra_xusb_lane *lane);
+ void (*iddq_disable)(struct tegra_xusb_lane *lane);
+ int (*enable_phy_sleepwalk)(struct tegra_xusb_lane *lane, enum usb_device_speed speed);
+ int (*disable_phy_sleepwalk)(struct tegra_xusb_lane *lane);
+ int (*enable_phy_wake)(struct tegra_xusb_lane *lane);
+ int (*disable_phy_wake)(struct tegra_xusb_lane *lane);
+ bool (*remote_wake_detected)(struct tegra_xusb_lane *lane);
};
+bool tegra_xusb_lane_check(struct tegra_xusb_lane *lane, const char *function);
+
/*
* pads
*/
@@ -230,7 +244,7 @@ struct tegra_xusb_pcie_pad {
struct reset_control *rst;
struct clk *pll;
- unsigned int enable;
+ bool enable;
};
static inline struct tegra_xusb_pcie_pad *
@@ -245,7 +259,7 @@ struct tegra_xusb_sata_pad {
struct reset_control *rst;
struct clk *pll;
- unsigned int enable;
+ bool enable;
};
static inline struct tegra_xusb_sata_pad *
@@ -388,6 +402,8 @@ struct tegra_xusb_padctl_ops {
const struct tegra_xusb_padctl_soc *soc);
void (*remove)(struct tegra_xusb_padctl *padctl);
+ int (*suspend_noirq)(struct tegra_xusb_padctl *padctl);
+ int (*resume_noirq)(struct tegra_xusb_padctl *padctl);
int (*usb3_save_context)(struct tegra_xusb_padctl *padctl,
unsigned int index);
int (*hsic_set_idle)(struct tegra_xusb_padctl *padctl,
diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
index 57adc08a89b2..9fe6ea6fdae5 100644
--- a/drivers/phy/ti/phy-dm816x-usb.c
+++ b/drivers/phy/ti/phy-dm816x-usb.c
@@ -242,19 +242,28 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
pm_runtime_enable(phy->dev);
generic_phy = devm_phy_create(phy->dev, NULL, &ops);
- if (IS_ERR(generic_phy))
- return PTR_ERR(generic_phy);
+ if (IS_ERR(generic_phy)) {
+ error = PTR_ERR(generic_phy);
+ goto clk_unprepare;
+ }
phy_set_drvdata(generic_phy, phy);
phy_provider = devm_of_phy_provider_register(phy->dev,
of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
+ if (IS_ERR(phy_provider)) {
+ error = PTR_ERR(phy_provider);
+ goto clk_unprepare;
+ }
usb_add_phy_dev(&phy->phy);
return 0;
+
+clk_unprepare:
+ pm_runtime_disable(phy->dev);
+ clk_unprepare(phy->refclk);
+ return error;
}
static int dm816x_usb_phy_remove(struct platform_device *pdev)
diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
index 812e5409d359..5771e2486a3b 100644
--- a/drivers/phy/ti/phy-twl4030-usb.c
+++ b/drivers/phy/ti/phy-twl4030-usb.c
@@ -544,8 +544,8 @@ static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
return 0;
}
-static ssize_t twl4030_usb_vbus_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t vbus_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
int ret = -EINVAL;
@@ -557,7 +557,7 @@ static ssize_t twl4030_usb_vbus_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(vbus, 0444, twl4030_usb_vbus_show, NULL);
+static DEVICE_ATTR_RO(vbus);
static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
{
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 3e4ef2b87526..0bcd19597e4a 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -701,32 +701,32 @@ static const struct pinctrl_pin_desc tglh_pins[] = {
static const struct intel_padgroup tglh_community0_gpps[] = {
TGL_GPP(0, 0, 24, 0), /* GPP_A */
- TGL_GPP(1, 25, 44, 128), /* GPP_R */
- TGL_GPP(2, 45, 70, 32), /* GPP_B */
- TGL_GPP(3, 71, 78, INTEL_GPIO_BASE_NOMAP), /* vGPIO_0 */
+ TGL_GPP(1, 25, 44, 32), /* GPP_R */
+ TGL_GPP(2, 45, 70, 64), /* GPP_B */
+ TGL_GPP(3, 71, 78, 96), /* vGPIO_0 */
};
static const struct intel_padgroup tglh_community1_gpps[] = {
- TGL_GPP(0, 79, 104, 96), /* GPP_D */
- TGL_GPP(1, 105, 128, 64), /* GPP_C */
- TGL_GPP(2, 129, 136, 160), /* GPP_S */
- TGL_GPP(3, 137, 153, 192), /* GPP_G */
- TGL_GPP(4, 154, 180, 224), /* vGPIO */
+ TGL_GPP(0, 79, 104, 128), /* GPP_D */
+ TGL_GPP(1, 105, 128, 160), /* GPP_C */
+ TGL_GPP(2, 129, 136, 192), /* GPP_S */
+ TGL_GPP(3, 137, 153, 224), /* GPP_G */
+ TGL_GPP(4, 154, 180, 256), /* vGPIO */
};
static const struct intel_padgroup tglh_community3_gpps[] = {
- TGL_GPP(0, 181, 193, 256), /* GPP_E */
- TGL_GPP(1, 194, 217, 288), /* GPP_F */
+ TGL_GPP(0, 181, 193, 288), /* GPP_E */
+ TGL_GPP(1, 194, 217, 320), /* GPP_F */
};
static const struct intel_padgroup tglh_community4_gpps[] = {
- TGL_GPP(0, 218, 241, 320), /* GPP_H */
+ TGL_GPP(0, 218, 241, 352), /* GPP_H */
TGL_GPP(1, 242, 251, 384), /* GPP_J */
- TGL_GPP(2, 252, 266, 352), /* GPP_K */
+ TGL_GPP(2, 252, 266, 416), /* GPP_K */
};
static const struct intel_padgroup tglh_community5_gpps[] = {
- TGL_GPP(0, 267, 281, 416), /* GPP_I */
+ TGL_GPP(0, 267, 281, 448), /* GPP_I */
TGL_GPP(1, 282, 290, INTEL_GPIO_BASE_NOMAP), /* JTAG */
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 5b3b048725cc..45ebdeba985a 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -925,12 +925,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
err = hw->soc->bias_set(hw, desc, pullup);
if (err)
return err;
- } else if (hw->soc->bias_set_combo) {
- err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
- if (err)
- return err;
} else {
- return -ENOTSUPP;
+ err = mtk_pinconf_bias_set_rev1(hw, desc, pullup);
+ if (err)
+ err = mtk_pinconf_bias_set(hw, desc, pullup);
}
}
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index a76be6cc26ee..5b764740b829 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -444,8 +444,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
- BIT(WAKE_CNTRL_OFF_S4);
+ u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index f831526d06ff..49e32684dbb2 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -950,23 +950,37 @@ static int k210_fpioa_probe(struct platform_device *pdev)
return ret;
pdata->pclk = devm_clk_get_optional(dev, "pclk");
- if (!IS_ERR(pdata->pclk))
- clk_prepare_enable(pdata->pclk);
+ if (!IS_ERR(pdata->pclk)) {
+ ret = clk_prepare_enable(pdata->pclk);
+ if (ret)
+ goto disable_clk;
+ }
pdata->sysctl_map =
syscon_regmap_lookup_by_phandle_args(np,
"canaan,k210-sysctl-power",
1, &pdata->power_offset);
- if (IS_ERR(pdata->sysctl_map))
- return PTR_ERR(pdata->sysctl_map);
+ if (IS_ERR(pdata->sysctl_map)) {
+ ret = PTR_ERR(pdata->sysctl_map);
+ goto disable_pclk;
+ }
k210_fpioa_init_ties(pdata);
pdata->pctl = pinctrl_register(&k210_pinctrl_desc, dev, (void *)pdata);
- if (IS_ERR(pdata->pctl))
- return PTR_ERR(pdata->pctl);
+ if (IS_ERR(pdata->pctl)) {
+ ret = PTR_ERR(pdata->pctl);
+ goto disable_pclk;
+ }
return 0;
+
+disable_pclk:
+ clk_disable_unprepare(pdata->pclk);
+disable_clk:
+ clk_disable_unprepare(pdata->clk);
+
+ return ret;
}
static const struct of_device_id k210_fpioa_dt_ids[] = {
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 2f51b4f99393..cad4e60df618 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -13,7 +13,7 @@ config PINCTRL_MSM
config PINCTRL_APQ8064
tristate "Qualcomm APQ8064 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -21,7 +21,7 @@ config PINCTRL_APQ8064
config PINCTRL_APQ8084
tristate "Qualcomm APQ8084 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -29,7 +29,7 @@ config PINCTRL_APQ8084
config PINCTRL_IPQ4019
tristate "Qualcomm IPQ4019 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -37,7 +37,7 @@ config PINCTRL_IPQ4019
config PINCTRL_IPQ8064
tristate "Qualcomm IPQ8064 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -45,7 +45,7 @@ config PINCTRL_IPQ8064
config PINCTRL_IPQ8074
tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -55,7 +55,7 @@ config PINCTRL_IPQ8074
config PINCTRL_IPQ6018
tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -65,7 +65,7 @@ config PINCTRL_IPQ6018
config PINCTRL_MSM8226
tristate "Qualcomm 8226 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -74,7 +74,7 @@ config PINCTRL_MSM8226
config PINCTRL_MSM8660
tristate "Qualcomm 8660 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -82,7 +82,7 @@ config PINCTRL_MSM8660
config PINCTRL_MSM8960
tristate "Qualcomm 8960 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -90,7 +90,7 @@ config PINCTRL_MSM8960
config PINCTRL_MDM9615
tristate "Qualcomm 9615 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -98,7 +98,7 @@ config PINCTRL_MDM9615
config PINCTRL_MSM8X74
tristate "Qualcomm 8x74 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -106,7 +106,7 @@ config PINCTRL_MSM8X74
config PINCTRL_MSM8916
tristate "Qualcomm 8916 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -114,7 +114,7 @@ config PINCTRL_MSM8916
config PINCTRL_MSM8953
tristate "Qualcomm 8953 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -124,7 +124,7 @@ config PINCTRL_MSM8953
config PINCTRL_MSM8976
tristate "Qualcomm 8976 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -134,7 +134,7 @@ config PINCTRL_MSM8976
config PINCTRL_MSM8994
tristate "Qualcomm 8994 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -143,7 +143,7 @@ config PINCTRL_MSM8994
config PINCTRL_MSM8996
tristate "Qualcomm MSM8996 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -151,7 +151,7 @@ config PINCTRL_MSM8996
config PINCTRL_MSM8998
tristate "Qualcomm MSM8998 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -159,7 +159,7 @@ config PINCTRL_MSM8998
config PINCTRL_QCS404
tristate "Qualcomm QCS404 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -167,7 +167,7 @@ config PINCTRL_QCS404
config PINCTRL_QDF2XXX
tristate "Qualcomm Technologies QDF2xxx pin controller driver"
- depends on GPIOLIB && ACPI
+ depends on ACPI
depends on PINCTRL_MSM
help
This is the GPIO driver for the TLMM block found on the
@@ -175,7 +175,7 @@ config PINCTRL_QDF2XXX
config PINCTRL_QCOM_SPMI_PMIC
tristate "Qualcomm SPMI PMIC pin controller driver"
- depends on GPIOLIB && OF && SPMI
+ depends on OF && SPMI
select REGMAP_SPMI
select PINMUX
select PINCONF
@@ -190,7 +190,7 @@ config PINCTRL_QCOM_SPMI_PMIC
config PINCTRL_QCOM_SSBI_PMIC
tristate "Qualcomm SSBI PMIC pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
select PINMUX
select PINCONF
select GENERIC_PINCONF
@@ -204,7 +204,7 @@ config PINCTRL_QCOM_SSBI_PMIC
config PINCTRL_SC7180
tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -213,7 +213,7 @@ config PINCTRL_SC7180
config PINCTRL_SC7280
tristate "Qualcomm Technologies Inc SC7280 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -222,7 +222,7 @@ config PINCTRL_SC7280
config PINCTRL_SC8180X
tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
- depends on GPIOLIB && (OF || ACPI)
+ depends on (OF || ACPI)
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -231,7 +231,7 @@ config PINCTRL_SC8180X
config PINCTRL_SDM660
tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -240,7 +240,7 @@ config PINCTRL_SDM660
config PINCTRL_SDM845
tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
- depends on GPIOLIB && (OF || ACPI)
+ depends on (OF || ACPI)
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -249,7 +249,7 @@ config PINCTRL_SDM845
config PINCTRL_SDX55
tristate "Qualcomm Technologies Inc SDX55 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -258,7 +258,7 @@ config PINCTRL_SDX55
config PINCTRL_SM6125
tristate "Qualcomm Technologies Inc SM6125 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -267,7 +267,7 @@ config PINCTRL_SM6125
config PINCTRL_SM8150
tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -276,7 +276,7 @@ config PINCTRL_SM8150
config PINCTRL_SM8250
tristate "Qualcomm Technologies Inc SM8250 pin controller driver"
- depends on GPIOLIB && OF
+ depends on OF
depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -285,8 +285,7 @@ config PINCTRL_SM8250
config PINCTRL_SM8350
tristate "Qualcomm Technologies Inc SM8350 pin controller driver"
- depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index dc8d39ae045b..9c7679c06dca 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -1219,10 +1219,12 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
}
/*
- * We suppose that we won't have any more functions than pins,
- * we'll reallocate that later anyway
+ * Find an upper bound for the maximum number of functions: in
+ * the worst case we have gpio_in, gpio_out, irq and up to four
+ * special functions per pin, plus one entry for the sentinel.
+ * We'll reallocate that later anyway.
*/
- pctl->functions = kcalloc(pctl->ngroups,
+ pctl->functions = kcalloc(4 * pctl->ngroups + 4,
sizeof(*pctl->functions),
GFP_KERNEL);
if (!pctl->functions)
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 7d385c3b2239..d12db6c316ea 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -508,6 +508,7 @@ config THINKPAD_ACPI
depends on RFKILL || RFKILL = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on BACKLIGHT_CLASS_DEVICE
+ depends on I2C
select ACPI_PLATFORM_PROFILE
select HWMON
select NVRAM
@@ -691,6 +692,7 @@ config INTEL_HID_EVENT
tristate "INTEL HID Event"
depends on ACPI
depends on INPUT
+ depends on I2C
select INPUT_SPARSEKMAP
help
This driver provides support for the Intel HID Event hotkey interface.
@@ -742,6 +744,7 @@ config INTEL_VBTN
tristate "INTEL VIRTUAL BUTTON"
depends on ACPI
depends on INPUT
+ depends on I2C
select INPUT_SPARSEKMAP
help
This driver provides support for the Intel Virtual Button interface.
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index b9da58ee9b1e..3481479a2942 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -46,34 +46,79 @@
#define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
#define AMD_PMC_RESULT_FAILED 0xFF
+/* FCH SSC Registers */
+#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
+#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
+#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
+#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
+#define FCH_SSC_MAPPING_SIZE 0x800
+#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
+#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
+
+/* SMU Message Definations */
+#define SMU_MSG_GETSMUVERSION 0x02
+#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
+#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
+#define SMU_MSG_LOG_START 0x06
+#define SMU_MSG_LOG_RESET 0x07
+#define SMU_MSG_LOG_DUMP_DATA 0x08
+#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
/* List of supported CPU ids */
#define AMD_CPU_ID_RV 0x15D0
#define AMD_CPU_ID_RN 0x1630
#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
+#define AMD_CPU_ID_YC 0x14B5
-#define AMD_SMU_FW_VERSION 0x0
#define PMC_MSG_DELAY_MIN_US 100
#define RESPONSE_REGISTER_LOOP_MAX 200
+#define SOC_SUBSYSTEM_IP_MAX 12
+#define DELAY_MIN_US 2000
+#define DELAY_MAX_US 3000
enum amd_pmc_def {
MSG_TEST = 0x01,
MSG_OS_HINT_PCO,
MSG_OS_HINT_RN,
};
+struct amd_pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+static const struct amd_pmc_bit_map soc15_ip_blk[] = {
+ {"DISPLAY", BIT(0)},
+ {"CPU", BIT(1)},
+ {"GFX", BIT(2)},
+ {"VDD", BIT(3)},
+ {"ACP", BIT(4)},
+ {"VCN", BIT(5)},
+ {"ISP", BIT(6)},
+ {"NBIO", BIT(7)},
+ {"DF", BIT(8)},
+ {"USB0", BIT(9)},
+ {"USB1", BIT(10)},
+ {"LAPIC", BIT(11)},
+ {}
+};
+
struct amd_pmc_dev {
void __iomem *regbase;
- void __iomem *smu_base;
+ void __iomem *smu_virt_addr;
+ void __iomem *fch_virt_addr;
u32 base_addr;
u32 cpu_id;
+ u32 active_ips;
struct device *dev;
+ struct mutex lock; /* generic mutex lock */
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
};
static struct amd_pmc_dev pmc;
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret);
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -85,18 +130,77 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
iowrite32(val, dev->regbase + reg_offset);
}
+struct smu_metrics {
+ u32 table_version;
+ u32 hint_count;
+ u32 s0i3_cyclecount;
+ u32 timein_s0i2;
+ u64 timeentering_s0i3_lastcapture;
+ u64 timeentering_s0i3_totaltime;
+ u64 timeto_resume_to_os_lastcapture;
+ u64 timeto_resume_to_os_totaltime;
+ u64 timein_s0i3_lastcapture;
+ u64 timein_s0i3_totaltime;
+ u64 timein_swdrips_lastcapture;
+ u64 timein_swdrips_totaltime;
+ u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
+ u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
+} __packed;
+
#ifdef CONFIG_DEBUG_FS
static int smu_fw_info_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
- u32 value;
+ struct smu_metrics table;
+ int idx;
+
+ if (dev->cpu_id == AMD_CPU_ID_PCO)
+ return -EINVAL;
+
+ memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics));
+
+ seq_puts(s, "\n=== SMU Statistics ===\n");
+ seq_printf(s, "Table Version: %d\n", table.table_version);
+ seq_printf(s, "Hint Count: %d\n", table.hint_count);
+ seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount);
+ seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
+ seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
+
+ seq_puts(s, "\n=== Active time (in us) ===\n");
+ for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
+ if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
+ seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
+ table.timecondition_notmet_lastcapture[idx]);
+ }
- value = ioread32(dev->smu_base + AMD_SMU_FW_VERSION);
- seq_printf(s, "SMU FW Info: %x\n", value);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
+static int s0ix_stats_show(struct seq_file *s, void *unused)
+{
+ struct amd_pmc_dev *dev = s->private;
+ u64 entry_time, exit_time, residency;
+
+ entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
+ entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
+
+ exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
+ exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
+
+ /* It's in 48MHz. We need to convert it */
+ residency = exit_time - entry_time;
+ do_div(residency, 48);
+
+ seq_puts(s, "=== S0ix statistics ===\n");
+ seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
+ seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
+ seq_printf(s, "Residency Time: %lld\n", residency);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+
static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
{
debugfs_remove_recursive(dev->dbgfs_dir);
@@ -107,6 +211,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
&smu_fw_info_fops);
+ debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
+ &s0ix_stats_fops);
}
#else
static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
@@ -118,6 +224,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
}
#endif /* CONFIG_DEBUG_FS */
+static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
+{
+ u32 phys_addr_low, phys_addr_hi;
+ u64 smu_phys_addr;
+
+ if (dev->cpu_id == AMD_CPU_ID_PCO)
+ return -EINVAL;
+
+ /* Get Active devices list from SMU */
+ amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
+
+ /* Get dram address */
+ amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
+ amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
+ smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+ dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics));
+ if (!dev->smu_virt_addr)
+ return -ENOMEM;
+
+ /* Start the logging */
+ amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
+
+ return 0;
+}
+
static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
{
u32 value;
@@ -132,19 +264,19 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
}
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret)
{
int rc;
- u8 msg;
u32 val;
+ mutex_lock(&dev->lock);
/* Wait until we get a valid response */
rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
- val, val > 0, PMC_MSG_DELAY_MIN_US,
+ val, val != 0, PMC_MSG_DELAY_MIN_US,
PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "failed to talk to SMU\n");
- return rc;
+ goto out_unlock;
}
/* Write zero to response register */
@@ -154,34 +286,91 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set);
/* Write message ID to message ID register */
- msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
- return 0;
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
+ val, val != 0, PMC_MSG_DELAY_MIN_US,
+ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "SMU response timed out\n");
+ goto out_unlock;
+ }
+
+ switch (val) {
+ case AMD_PMC_RESULT_OK:
+ if (ret) {
+ /* PMFW may take longer time to return back the data */
+ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+ *data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT);
+ }
+ break;
+ case AMD_PMC_RESULT_CMD_REJECT_BUSY:
+ dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+ rc = -EBUSY;
+ goto out_unlock;
+ case AMD_PMC_RESULT_CMD_UNKNOWN:
+ dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+ rc = -EINVAL;
+ goto out_unlock;
+ case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
+ case AMD_PMC_RESULT_FAILED:
+ default:
+ dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+ rc = -EIO;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&dev->lock);
+ amd_pmc_dump_registers(dev);
+ return rc;
+}
+
+static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+{
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_PCO:
+ return MSG_OS_HINT_PCO;
+ case AMD_CPU_ID_RN:
+ case AMD_CPU_ID_YC:
+ return MSG_OS_HINT_RN;
+ }
+ return -EINVAL;
}
static int __maybe_unused amd_pmc_suspend(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
int rc;
+ u8 msg;
+
+ /* Reset and Start SMU logging - to monitor the s0i3 stats */
+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
- rc = amd_pmc_send_cmd(pdev, 1);
+ msg = amd_pmc_get_os_hint(pdev);
+ rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
if (rc)
dev_err(pdev->dev, "suspend failed\n");
- amd_pmc_dump_registers(pdev);
- return 0;
+ return rc;
}
static int __maybe_unused amd_pmc_resume(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
int rc;
+ u8 msg;
+
+ /* Let SMU know that we are looking for stats */
+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
- rc = amd_pmc_send_cmd(pdev, 0);
+ msg = amd_pmc_get_os_hint(pdev);
+ rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
if (rc)
dev_err(pdev->dev, "resume failed\n");
- amd_pmc_dump_registers(pdev);
return 0;
}
@@ -190,6 +379,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = {
};
static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
@@ -201,9 +391,8 @@ static int amd_pmc_probe(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = &pmc;
struct pci_dev *rdev;
- u32 base_addr_lo;
- u32 base_addr_hi;
- u64 base_addr;
+ u32 base_addr_lo, base_addr_hi;
+ u64 base_addr, fch_phys_addr;
int err;
u32 val;
@@ -248,16 +437,25 @@ static int amd_pmc_probe(struct platform_device *pdev)
pci_dev_put(rdev);
base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
- dev->smu_base = devm_ioremap(dev->dev, base_addr, AMD_PMC_MAPPING_SIZE);
- if (!dev->smu_base)
- return -ENOMEM;
-
dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
AMD_PMC_MAPPING_SIZE);
if (!dev->regbase)
return -ENOMEM;
- amd_pmc_dump_registers(dev);
+ mutex_init(&dev->lock);
+
+ /* Use FCH registers to get the S0ix stats */
+ base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
+ base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
+ fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+ dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
+ if (!dev->fch_virt_addr)
+ return -ENOMEM;
+
+ /* Use SMU to get the s0i3 debug stats */
+ err = amd_pmc_setup_smu_logging(dev);
+ if (err)
+ dev_err(dev->dev, "SMU debugging info not supported on this platform\n");
platform_set_drvdata(pdev, dev);
amd_pmc_dbgfs_register(dev);
@@ -269,11 +467,14 @@ static int amd_pmc_remove(struct platform_device *pdev)
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
amd_pmc_dbgfs_unregister(dev);
+ mutex_destroy(&dev->lock);
return 0;
}
static const struct acpi_device_id amd_pmc_acpi_ids[] = {
{"AMDI0005", 0},
+ {"AMDI0006", 0},
+ {"AMDI0007", 0},
{"AMD0004", 0},
{ }
};
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 0cb927f0f301..a81dc4b191b7 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -41,6 +41,10 @@ static int wapf = -1;
module_param(wapf, uint, 0444);
MODULE_PARM_DESC(wapf, "WAPF value");
+static int tablet_mode_sw = -1;
+module_param(tablet_mode_sw, uint, 0444);
+MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip");
+
static struct quirk_entry *quirks;
static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
@@ -458,6 +462,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_use_lid_flip_devid,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS TP200s / E205SA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E205SA"),
+ },
+ .driver_data = &quirk_asus_use_lid_flip_devid,
+ },
{},
};
@@ -477,6 +490,21 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
else
wapf = quirks->wapf;
+ switch (tablet_mode_sw) {
+ case 0:
+ quirks->use_kbd_dock_devid = false;
+ quirks->use_lid_flip_devid = false;
+ break;
+ case 1:
+ quirks->use_kbd_dock_devid = true;
+ quirks->use_lid_flip_devid = false;
+ break;
+ case 2:
+ quirks->use_kbd_dock_devid = false;
+ quirks->use_lid_flip_devid = true;
+ break;
+ }
+
if (quirks->i8042_filter) {
ret = i8042_install_filter(quirks->i8042_filter);
if (ret) {
diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
index 085ad0a0d22e..e9f4b30dcafa 100644
--- a/drivers/platform/x86/dell/dell_rbu.c
+++ b/drivers/platform/x86/dell/dell_rbu.c
@@ -573,7 +573,7 @@ static ssize_t image_type_write(struct file *filp, struct kobject *kobj,
if (!rbu_data.entry_created) {
spin_unlock(&rbu_data.lock);
req_firm_rc = request_firmware_nowait(THIS_MODULE,
- FW_ACTION_NOHOTPLUG, "dell_rbu",
+ FW_ACTION_NOUEVENT, "dell_rbu",
&rbu_device->dev, GFP_KERNEL, &context,
callbackfn_rbu);
if (req_firm_rc) {
diff --git a/drivers/platform/x86/dual_accel_detect.h b/drivers/platform/x86/dual_accel_detect.h
new file mode 100644
index 000000000000..a9eae17cc43d
--- /dev/null
+++ b/drivers/platform/x86/dual_accel_detect.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Helper code to detect 360 degree hinges (yoga) style 2-in-1 devices using 2 accelerometers
+ * to allow the OS to determine the angle between the display and the base of the device.
+ *
+ * On Windows these are read by a special HingeAngleService process which calls undocumented
+ * ACPI methods, to let the firmware know if the 2-in-1 is in tablet- or laptop-mode.
+ * The firmware may use this to disable the kbd and touchpad to avoid spurious input in
+ * tablet-mode as well as to report SW_TABLET_MODE info to the OS.
+ *
+ * Since Linux does not call these undocumented methods, the SW_TABLET_MODE info reported
+ * by various drivers/platform/x86 drivers is incorrect. These drivers use the detection
+ * code in this file to disable SW_TABLET_MODE reporting to avoid reporting broken info
+ * (instead userspace can derive the status itself by directly reading the 2 accels).
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+
+static int dual_accel_i2c_resource_count(struct acpi_resource *ares, void *data)
+{
+ struct acpi_resource_i2c_serialbus *sb;
+ int *count = data;
+
+ if (i2c_acpi_get_i2c_resource(ares, &sb))
+ *count = *count + 1;
+
+ return 1;
+}
+
+static int dual_accel_i2c_client_count(struct acpi_device *adev)
+{
+ int ret, count = 0;
+ LIST_HEAD(r);
+
+ ret = acpi_dev_get_resources(adev, &r, dual_accel_i2c_resource_count, &count);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&r);
+ return count;
+}
+
+static bool dual_accel_detect_bosc0200(void)
+{
+ struct acpi_device *adev;
+ int count;
+
+ adev = acpi_dev_get_first_match_dev("BOSC0200", NULL, -1);
+ if (!adev)
+ return false;
+
+ count = dual_accel_i2c_client_count(adev);
+
+ acpi_dev_put(adev);
+
+ return count == 2;
+}
+
+static bool dual_accel_detect(void)
+{
+ /* Systems which use a pair of accels with KIOX010A / KIOX020A ACPI ids */
+ if (acpi_dev_present("KIOX010A", NULL, -1) &&
+ acpi_dev_present("KIOX020A", NULL, -1))
+ return true;
+
+ /* Systems which use a single DUAL250E ACPI device to model 2 accels */
+ if (acpi_dev_present("DUAL250E", NULL, -1))
+ return true;
+
+ /* Systems which use a single BOSC0200 ACPI device to model 2 accels */
+ if (dual_accel_detect_bosc0200())
+ return true;
+
+ return false;
+}
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 5529d7b0abea..7f3a03f937f6 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -140,12 +140,15 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
}}
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
{ }
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 078648a9201b..2e4e97a626a5 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/suspend.h>
+#include "dual_accel_detect.h"
/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
#define TABLET_MODE_FLAG BIT(6)
@@ -25,6 +26,7 @@ static const struct acpi_device_id intel_hid_ids[] = {
{"INT33D5", 0},
{"INTC1051", 0},
{"INTC1054", 0},
+ {"INTC1070", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
@@ -121,6 +123,7 @@ struct intel_hid_priv {
struct input_dev *array;
struct input_dev *switches;
bool wakeup_mode;
+ bool dual_accel;
};
#define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054"
@@ -450,22 +453,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
* SW_TABLET_MODE report, in these cases we enable support when receiving
* the first event instead of during driver setup.
*
- * Some 360 degree hinges (yoga) style 2-in-1 devices use 2 accelerometers
- * to allow the OS to determine the angle between the display and the base
- * of the device. On Windows these are read by a special HingeAngleService
- * process which calls an ACPI DSM (Device Specific Method) on the
- * ACPI KIOX010A device node for the sensor in the display, to let the
- * firmware know if the 2-in-1 is in tablet- or laptop-mode so that it can
- * disable the kbd and touchpad to avoid spurious input in tablet-mode.
- *
- * The linux kxcjk1013 driver calls the DSM for this once at probe time
- * to ensure that the builtin kbd and touchpad work. On some devices this
- * causes a "spurious" 0xcd event on the intel-hid ACPI dev. In this case
- * there is not a functional tablet-mode switch, so we should not register
- * the tablet-mode switch device.
+ * See dual_accel_detect.h for more info on the dual_accel check.
*/
- if (!priv->switches && (event == 0xcc || event == 0xcd) &&
- !acpi_dev_present("KIOX010A", NULL, -1)) {
+ if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
dev_info(&device->dev, "switch event received, enable switches supports\n");
err = intel_hid_switches_setup(device);
if (err)
@@ -606,6 +596,8 @@ static int intel_hid_probe(struct platform_device *device)
return -ENOMEM;
dev_set_drvdata(&device->dev, priv);
+ priv->dual_accel = dual_accel_detect();
+
err = intel_hid_input_setup(device);
if (err) {
pr_err("Failed to setup Intel HID hotkeys\n");
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 888a764efad1..309166431063 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/suspend.h>
+#include "dual_accel_detect.h"
/* Returned when NOT in tablet mode on some HP Stream x360 11 models */
#define VGBS_TABLET_MODE_FLAG_ALT 0x10
@@ -66,6 +67,7 @@ static const struct key_entry intel_vbtn_switchmap[] = {
struct intel_vbtn_priv {
struct input_dev *buttons_dev;
struct input_dev *switches_dev;
+ bool dual_accel;
bool has_buttons;
bool has_switches;
bool wakeup_mode;
@@ -160,6 +162,10 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
input_dev = priv->buttons_dev;
} else if ((ke = sparse_keymap_entry_from_scancode(priv->switches_dev, event))) {
if (!priv->has_switches) {
+ /* See dual_accel_detect.h for more info */
+ if (priv->dual_accel)
+ return;
+
dev_info(&device->dev, "Registering Intel Virtual Switches input-dev after receiving a switch event\n");
ret = input_register_device(priv->switches_dev);
if (ret)
@@ -248,11 +254,15 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
{} /* Array terminator */
};
-static bool intel_vbtn_has_switches(acpi_handle handle)
+static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
{
unsigned long long vgbs;
acpi_status status;
+ /* See dual_accel_detect.h for more info */
+ if (dual_accel)
+ return false;
+
if (!dmi_check_system(dmi_switches_allow_list))
return false;
@@ -263,13 +273,14 @@ static bool intel_vbtn_has_switches(acpi_handle handle)
static int intel_vbtn_probe(struct platform_device *device)
{
acpi_handle handle = ACPI_HANDLE(&device->dev);
- bool has_buttons, has_switches;
+ bool dual_accel, has_buttons, has_switches;
struct intel_vbtn_priv *priv;
acpi_status status;
int err;
+ dual_accel = dual_accel_detect();
has_buttons = acpi_has_method(handle, "VBDL");
- has_switches = intel_vbtn_has_switches(handle);
+ has_switches = intel_vbtn_has_switches(handle, dual_accel);
if (!has_buttons && !has_switches) {
dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
@@ -281,6 +292,7 @@ static int intel_vbtn_probe(struct platform_device *device)
return -ENOMEM;
dev_set_drvdata(&device->dev, priv);
+ priv->dual_accel = dual_accel;
priv->has_buttons = has_buttons;
priv->has_switches = has_switches;
diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
index c37349f97bb8..d063d91db9bc 100644
--- a/drivers/platform/x86/pcengines-apuv2.c
+++ b/drivers/platform/x86/pcengines-apuv2.c
@@ -94,6 +94,7 @@ static struct gpiod_lookup_table gpios_led_table = {
NULL, 1, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3,
NULL, 2, GPIO_ACTIVE_LOW),
+ {} /* Terminating entry */
}
};
@@ -123,6 +124,7 @@ static struct gpiod_lookup_table gpios_key_table = {
.table = {
GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_MODESW,
NULL, 0, GPIO_ACTIVE_LOW),
+ {} /* Terminating entry */
}
};
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 3671b5d20613..6cfed4427fb0 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -571,6 +571,11 @@ static ssize_t current_value_store(struct kobject *kobj,
else
ret = tlmi_save_bios_settings("");
+ if (!ret && !tlmi_priv.pending_changes) {
+ tlmi_priv.pending_changes = true;
+ /* let userland know it may need to check reboot pending again */
+ kobject_uevent(&tlmi_priv.class_dev->kobj, KOBJ_CHANGE);
+ }
out:
kfree(auth_str);
kfree(set_str);
@@ -647,6 +652,14 @@ static struct kobj_type tlmi_pwd_setting_ktype = {
.sysfs_ops = &tlmi_kobj_sysfs_ops,
};
+static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tlmi_priv.pending_changes);
+}
+
+static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
+
/* ---- Initialisation --------------------------------------------------------- */
static void tlmi_release_attr(void)
{
@@ -659,6 +672,7 @@ static void tlmi_release_attr(void)
kobject_put(&tlmi_priv.setting[i]->kobj);
}
}
+ sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
kset_unregister(tlmi_priv.attribute_kset);
/* Authentication structures */
@@ -709,8 +723,8 @@ static int tlmi_sysfs_init(void)
/* Build attribute */
tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset;
- ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype,
- NULL, "%s", tlmi_priv.setting[i]->display_name);
+ ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL,
+ "%s", tlmi_priv.setting[i]->display_name);
if (ret)
goto fail_create_attr;
@@ -719,6 +733,10 @@ static int tlmi_sysfs_init(void)
goto fail_create_attr;
}
+ ret = sysfs_create_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
+ if (ret)
+ goto fail_create_attr;
+
/* Create authentication entries */
tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL,
&tlmi_priv.class_dev->kobj);
@@ -727,8 +745,7 @@ static int tlmi_sysfs_init(void)
goto fail_create_attr;
}
tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset;
- ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype,
- NULL, "%s", "Admin");
+ ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin");
if (ret)
goto fail_create_attr;
@@ -737,8 +754,7 @@ static int tlmi_sysfs_init(void)
goto fail_create_attr;
tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset;
- ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype,
- NULL, "%s", "System");
+ ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "System");
if (ret)
goto fail_create_attr;
@@ -818,6 +834,7 @@ static int tlmi_analyze(void)
pr_info("Error retrieving possible values for %d : %s\n",
i, setting->display_name);
}
+ kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
tlmi_priv.setting[i] = setting;
tlmi_priv.settings_count++;
kfree(item);
@@ -844,10 +861,12 @@ static int tlmi_analyze(void)
if (pwdcfg.password_state & TLMI_PAP_PWD)
tlmi_priv.pwd_admin->valid = true;
+ kobject_init(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype);
+
tlmi_priv.pwd_power = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL);
if (!tlmi_priv.pwd_power) {
ret = -ENOMEM;
- goto fail_clear_attr;
+ goto fail_free_pwd_admin;
}
strscpy(tlmi_priv.pwd_power->kbdlang, "us", TLMI_LANG_MAXLEN);
tlmi_priv.pwd_power->encoding = TLMI_ENCODING_ASCII;
@@ -859,11 +878,19 @@ static int tlmi_analyze(void)
if (pwdcfg.password_state & TLMI_POP_PWD)
tlmi_priv.pwd_power->valid = true;
+ kobject_init(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype);
+
return 0;
+fail_free_pwd_admin:
+ kfree(tlmi_priv.pwd_admin);
fail_clear_attr:
- for (i = 0; i < TLMI_SETTINGS_COUNT; ++i)
- kfree(tlmi_priv.setting[i]);
+ for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) {
+ if (tlmi_priv.setting[i]) {
+ kfree(tlmi_priv.setting[i]->possible_values);
+ kfree(tlmi_priv.setting[i]);
+ }
+ }
return ret;
}
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index 6fa8da7af6c7..eb598846628a 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -60,6 +60,7 @@ struct think_lmi {
bool can_get_bios_selections;
bool can_set_bios_password;
bool can_get_password_settings;
+ bool pending_changes;
struct tlmi_attr_setting *setting[TLMI_SETTINGS_COUNT];
struct device *class_dev;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 603156a6e3ed..50ff04c84650 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -73,6 +73,7 @@
#include <linux/uaccess.h>
#include <acpi/battery.h>
#include <acpi/video.h>
+#include "dual_accel_detect.h"
/* ThinkPad CMOS commands */
#define TP_CMOS_VOLUME_DOWN 0
@@ -3232,7 +3233,7 @@ static int hotkey_init_tablet_mode(void)
* the laptop/tent/tablet mode to the EC. The bmc150 iio driver
* does not support this, so skip the hotkey on these models.
*/
- if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1))
+ if (has_tablet_mode && !dual_accel_detect())
tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
type = "GMMS";
} else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c
index b010e4ca3383..11c60a273446 100644
--- a/drivers/platform/x86/wireless-hotkey.c
+++ b/drivers/platform/x86/wireless-hotkey.c
@@ -78,7 +78,7 @@ static int wl_add(struct acpi_device *device)
err = wireless_input_setup();
if (err)
- pr_err("Failed to setup hp wireless hotkeys\n");
+ pr_err("Failed to setup wireless hotkeys\n");
return err;
}
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index d40ed8621571..9610a9f08ff4 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -369,6 +369,7 @@ err_out:
dev->card_link = NULL;
return NULL;
}
+EXPORT_SYMBOL(pnp_request_card_device);
/**
* pnp_release_card_device - call this when the driver no longer needs the device
@@ -382,6 +383,7 @@ void pnp_release_card_device(struct pnp_dev *dev)
device_release_driver(&dev->dev);
drv->link.remove = &card_remove_first;
}
+EXPORT_SYMBOL(pnp_release_card_device);
/*
* suspend/resume callbacks
@@ -439,6 +441,7 @@ int pnp_register_card_driver(struct pnp_card_driver *drv)
}
return 0;
}
+EXPORT_SYMBOL(pnp_register_card_driver);
/**
* pnp_unregister_card_driver - unregisters a PnP card driver from the PnP Layer
@@ -451,8 +454,4 @@ void pnp_unregister_card_driver(struct pnp_card_driver *drv)
mutex_unlock(&pnp_lock);
pnp_unregister_driver(&drv->link);
}
-
-EXPORT_SYMBOL(pnp_request_card_device);
-EXPORT_SYMBOL(pnp_release_card_device);
-EXPORT_SYMBOL(pnp_register_card_driver);
EXPORT_SYMBOL(pnp_unregister_card_driver);
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 93a30a8f88d1..c29d590c5e4f 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -68,6 +68,7 @@ int pnp_device_attach(struct pnp_dev *pnp_dev)
mutex_unlock(&pnp_lock);
return 0;
}
+EXPORT_SYMBOL(pnp_device_attach);
void pnp_device_detach(struct pnp_dev *pnp_dev)
{
@@ -76,6 +77,7 @@ void pnp_device_detach(struct pnp_dev *pnp_dev)
pnp_dev->status = PNP_READY;
mutex_unlock(&pnp_lock);
}
+EXPORT_SYMBOL(pnp_device_detach);
static int pnp_device_probe(struct device *dev)
{
@@ -271,11 +273,13 @@ int pnp_register_driver(struct pnp_driver *drv)
return driver_register(&drv->driver);
}
+EXPORT_SYMBOL(pnp_register_driver);
void pnp_unregister_driver(struct pnp_driver *drv)
{
driver_unregister(&drv->driver);
}
+EXPORT_SYMBOL(pnp_unregister_driver);
/**
* pnp_add_id - adds an EISA id to the specified device
@@ -310,8 +314,3 @@ struct pnp_id *pnp_add_id(struct pnp_dev *dev, const char *id)
return dev_id;
}
-
-EXPORT_SYMBOL(pnp_register_driver);
-EXPORT_SYMBOL(pnp_unregister_driver);
-EXPORT_SYMBOL(pnp_device_attach);
-EXPORT_SYMBOL(pnp_device_detach);
diff --git a/drivers/pnp/isapnp/compat.c b/drivers/pnp/isapnp/compat.c
index 035e95092489..d60d9e377da5 100644
--- a/drivers/pnp/isapnp/compat.c
+++ b/drivers/pnp/isapnp/compat.c
@@ -63,5 +63,4 @@ struct pnp_dev *pnp_find_dev(struct pnp_card *card, unsigned short vendor,
}
return NULL;
}
-
EXPORT_SYMBOL(pnp_find_dev);
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index 144055593ec8..1765d6e60a8a 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -350,6 +350,7 @@ int pnp_start_dev(struct pnp_dev *dev)
dev_info(&dev->dev, "activated\n");
return 0;
}
+EXPORT_SYMBOL(pnp_start_dev);
/**
* pnp_stop_dev - low-level disable of the PnP device
@@ -371,6 +372,7 @@ int pnp_stop_dev(struct pnp_dev *dev)
dev_info(&dev->dev, "disabled\n");
return 0;
}
+EXPORT_SYMBOL(pnp_stop_dev);
/**
* pnp_activate_dev - activates a PnP device for use
@@ -396,6 +398,7 @@ int pnp_activate_dev(struct pnp_dev *dev)
dev->active = 1;
return 0;
}
+EXPORT_SYMBOL(pnp_activate_dev);
/**
* pnp_disable_dev - disables device
@@ -423,8 +426,4 @@ int pnp_disable_dev(struct pnp_dev *dev)
return 0;
}
-
-EXPORT_SYMBOL(pnp_start_dev);
-EXPORT_SYMBOL(pnp_stop_dev);
-EXPORT_SYMBOL(pnp_activate_dev);
EXPORT_SYMBOL(pnp_disable_dev);
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index e4f53d31191d..a6073db10ec6 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -30,7 +30,6 @@ int pnp_is_active(struct pnp_dev *dev)
else
return 1;
}
-
EXPORT_SYMBOL(pnp_is_active);
/*
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 125e592af445..d8ecffe72f16 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -351,10 +351,8 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
at91_shdwc->shdwc_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(at91_shdwc->shdwc_base)) {
- dev_err(&pdev->dev, "Could not map reset controller address\n");
+ if (IS_ERR(at91_shdwc->shdwc_base))
return PTR_ERR(at91_shdwc->shdwc_base);
- }
match = of_match_node(at91_shdwc_of_match, pdev->dev.of_node);
at91_shdwc->rcfg = match->data;
diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c
index c5067eb75370..1c5af2fef142 100644
--- a/drivers/power/reset/gpio-poweroff.c
+++ b/drivers/power/reset/gpio-poweroff.c
@@ -90,6 +90,7 @@ static const struct of_device_id of_gpio_poweroff_match[] = {
{ .compatible = "gpio-poweroff", },
{},
};
+MODULE_DEVICE_TABLE(of, of_gpio_poweroff_match);
static struct platform_driver gpio_poweroff_driver = {
.probe = gpio_poweroff_probe,
diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c
index 211eeef0c81a..c720112db704 100644
--- a/drivers/power/reset/keystone-reset.c
+++ b/drivers/power/reset/keystone-reset.c
@@ -71,6 +71,7 @@ static const struct of_device_id rsctrl_of_match[] = {
{.compatible = "ti,keystone-reset", },
{},
};
+MODULE_DEVICE_TABLE(of, rsctrl_of_match);
static int rsctrl_probe(struct platform_device *pdev)
{
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index d1495af30081..8688c8ba8894 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -52,6 +52,7 @@
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/module.h>
+#include <linux/panic_notifier.h>
#include <linux/mod_devicetable.h>
#include <linux/gpio/consumer.h>
#include <linux/reboot.h>
diff --git a/drivers/power/reset/regulator-poweroff.c b/drivers/power/reset/regulator-poweroff.c
index f697088e0ad1..20701203935f 100644
--- a/drivers/power/reset/regulator-poweroff.c
+++ b/drivers/power/reset/regulator-poweroff.c
@@ -64,6 +64,7 @@ static const struct of_device_id of_regulator_poweroff_match[] = {
{ .compatible = "regulator-poweroff", },
{},
};
+MODULE_DEVICE_TABLE(of, of_regulator_poweroff_match);
static struct platform_driver regulator_poweroff_driver = {
.probe = regulator_poweroff_probe,
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index e696364126f1..11f5368e810e 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -712,7 +712,8 @@ config BATTERY_GOLDFISH
config BATTERY_RT5033
tristate "RT5033 fuel gauge support"
- depends on MFD_RT5033
+ depends on I2C
+ select REGMAP_I2C
help
This adds support for battery fuel gauge in Richtek RT5033 PMIC.
The fuelgauge calculates and determines the battery state of charge
@@ -760,15 +761,6 @@ config CHARGER_UCS1002
Say Y to enable support for Microchip UCS1002 Programmable
USB Port Power Controller with Charger Emulation.
-config CHARGER_BD70528
- tristate "ROHM bd70528 charger driver"
- depends on MFD_ROHM_BD70528
- select LINEAR_RANGES
- help
- Say Y here to enable support for getting battery status
- information and altering charger configurations from charger
- block of the ROHM BD70528 Power Management IC.
-
config CHARGER_BD99954
tristate "ROHM bd99954 charger driver"
depends on I2C
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index a7309a3d1a47..33059a91f60c 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -60,7 +60,7 @@ obj-$(CONFIG_BATTERY_TWL4030_MADC) += twl4030_madc_battery.o
obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_RX51) += rx51_battery.o
-obj-$(CONFIG_AB8500_BM) += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o pm2301_charger.o
+obj-$(CONFIG_AB8500_BM) += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o
obj-$(CONFIG_CHARGER_CPCAP) += cpcap-charger.o
obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
@@ -96,7 +96,6 @@ obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o
obj-$(CONFIG_FUEL_GAUGE_SC27XX) += sc27xx_fuel_gauge.o
obj-$(CONFIG_CHARGER_UCS1002) += ucs1002_power.o
-obj-$(CONFIG_CHARGER_BD70528) += bd70528-charger.o
obj-$(CONFIG_CHARGER_BD99954) += bd99954-charger.o
obj-$(CONFIG_CHARGER_WILCO) += wilco-charger.o
obj-$(CONFIG_RN5T618_POWER) += rn5t618_power.o
diff --git a/drivers/power/supply/ab8500-bm.h b/drivers/power/supply/ab8500-bm.h
index 41c69a4f2a1f..0c940571e5b0 100644
--- a/drivers/power/supply/ab8500-bm.h
+++ b/drivers/power/supply/ab8500-bm.h
@@ -506,9 +506,6 @@ struct abx500_bm_data {
int usb_safety_tmr_h;
int bkup_bat_v;
int bkup_bat_i;
- bool autopower_cfg;
- bool ac_enabled;
- bool usb_enabled;
bool no_maintenance;
bool capacity_scaling;
bool chg_unknown_bat;
@@ -730,4 +727,8 @@ int ab8500_bm_of_probe(struct device *dev,
struct device_node *np,
struct abx500_bm_data *bm);
+extern struct platform_driver ab8500_fg_driver;
+extern struct platform_driver ab8500_btemp_driver;
+extern struct platform_driver abx500_chargalg_driver;
+
#endif /* _AB8500_CHARGER_H_ */
diff --git a/drivers/power/supply/ab8500-chargalg.h b/drivers/power/supply/ab8500-chargalg.h
index 94a6f9068bc5..07e6ff50084f 100644
--- a/drivers/power/supply/ab8500-chargalg.h
+++ b/drivers/power/supply/ab8500-chargalg.h
@@ -15,7 +15,7 @@
* - POWER_SUPPLY_TYPE_USB,
* because only them store as drv_data pointer to struct ux500_charger.
*/
-#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy)
+#define psy_to_ux500_charger(x) power_supply_get_drvdata(x)
/* Forward declaration */
struct ux500_charger;
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index fdfcd59fc43e..dbdcff32f353 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/component.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -932,26 +933,6 @@ static int __maybe_unused ab8500_btemp_suspend(struct device *dev)
return 0;
}
-static int ab8500_btemp_remove(struct platform_device *pdev)
-{
- struct ab8500_btemp *di = platform_get_drvdata(pdev);
- int i, irq;
-
- /* Disable interrupts */
- for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
- irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
- free_irq(irq, di);
- }
-
- /* Delete the work queue */
- destroy_workqueue(di->btemp_wq);
-
- flush_scheduled_work();
- power_supply_unregister(di->btemp_psy);
-
- return 0;
-}
-
static char *supply_interface[] = {
"ab8500_chargalg",
"ab8500_fg",
@@ -966,9 +947,42 @@ static const struct power_supply_desc ab8500_btemp_desc = {
.external_power_changed = ab8500_btemp_external_power_changed,
};
+static int ab8500_btemp_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct ab8500_btemp *di = dev_get_drvdata(dev);
+
+ /* Create a work queue for the btemp */
+ di->btemp_wq =
+ alloc_workqueue("ab8500_btemp_wq", WQ_MEM_RECLAIM, 0);
+ if (di->btemp_wq == NULL) {
+ dev_err(dev, "failed to create work queue\n");
+ return -ENOMEM;
+ }
+
+ /* Kick off periodic temperature measurements */
+ ab8500_btemp_periodic(di, true);
+
+ return 0;
+}
+
+static void ab8500_btemp_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct ab8500_btemp *di = dev_get_drvdata(dev);
+
+ /* Delete the work queue */
+ destroy_workqueue(di->btemp_wq);
+ flush_scheduled_work();
+}
+
+static const struct component_ops ab8500_btemp_component_ops = {
+ .bind = ab8500_btemp_bind,
+ .unbind = ab8500_btemp_unbind,
+};
+
static int ab8500_btemp_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct power_supply_config psy_cfg = {};
struct device *dev = &pdev->dev;
struct ab8500_btemp *di;
@@ -981,12 +995,6 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
di->bm = &ab8500_bm_data;
- ret = ab8500_bm_of_probe(dev, np, di->bm);
- if (ret) {
- dev_err(dev, "failed to get battery information\n");
- return ret;
- }
-
/* get parent data */
di->dev = dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
@@ -1011,14 +1019,6 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
psy_cfg.drv_data = di;
- /* Create a work queue for the btemp */
- di->btemp_wq =
- alloc_workqueue("ab8500_btemp_wq", WQ_MEM_RECLAIM, 0);
- if (di->btemp_wq == NULL) {
- dev_err(dev, "failed to create work queue\n");
- return -ENOMEM;
- }
-
/* Init work for measuring temperature periodically */
INIT_DEFERRABLE_WORK(&di->btemp_periodic_work,
ab8500_btemp_periodic_work);
@@ -1031,7 +1031,7 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
AB8500_BTEMP_HIGH_TH, &val);
if (ret < 0) {
dev_err(dev, "%s ab8500 read failed\n", __func__);
- goto free_btemp_wq;
+ return ret;
}
switch (val) {
case BTEMP_HIGH_TH_57_0:
@@ -1050,30 +1050,28 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
}
/* Register BTEMP power supply class */
- di->btemp_psy = power_supply_register(dev, &ab8500_btemp_desc,
- &psy_cfg);
+ di->btemp_psy = devm_power_supply_register(dev, &ab8500_btemp_desc,
+ &psy_cfg);
if (IS_ERR(di->btemp_psy)) {
dev_err(dev, "failed to register BTEMP psy\n");
- ret = PTR_ERR(di->btemp_psy);
- goto free_btemp_wq;
+ return PTR_ERR(di->btemp_psy);
}
/* Register interrupts */
for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
- if (irq < 0) {
- ret = irq;
- goto free_irq;
- }
+ if (irq < 0)
+ return irq;
- ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ ab8500_btemp_irq[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
ab8500_btemp_irq[i].name, di);
if (ret) {
dev_err(dev, "failed to request %s IRQ %d: %d\n"
, ab8500_btemp_irq[i].name, irq, ret);
- goto free_irq;
+ return ret;
}
dev_dbg(dev, "Requested %s IRQ %d: %d\n",
ab8500_btemp_irq[i].name, irq, ret);
@@ -1081,23 +1079,16 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, di);
- /* Kick off periodic temperature measurements */
- ab8500_btemp_periodic(di, true);
list_add_tail(&di->node, &ab8500_btemp_list);
- return ret;
+ return component_add(dev, &ab8500_btemp_component_ops);
+}
-free_irq:
- /* We also have to free all successfully registered irqs */
- for (i = i - 1; i >= 0; i--) {
- irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
- free_irq(irq, di);
- }
+static int ab8500_btemp_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &ab8500_btemp_component_ops);
- power_supply_unregister(di->btemp_psy);
-free_btemp_wq:
- destroy_workqueue(di->btemp_wq);
- return ret;
+ return 0;
}
static SIMPLE_DEV_PM_OPS(ab8500_btemp_pm_ops, ab8500_btemp_suspend, ab8500_btemp_resume);
@@ -1106,8 +1097,9 @@ static const struct of_device_id ab8500_btemp_match[] = {
{ .compatible = "stericsson,ab8500-btemp", },
{ },
};
+MODULE_DEVICE_TABLE(of, ab8500_btemp_match);
-static struct platform_driver ab8500_btemp_driver = {
+struct platform_driver ab8500_btemp_driver = {
.probe = ab8500_btemp_probe,
.remove = ab8500_btemp_remove,
.driver = {
@@ -1116,20 +1108,6 @@ static struct platform_driver ab8500_btemp_driver = {
.pm = &ab8500_btemp_pm_ops,
},
};
-
-static int __init ab8500_btemp_init(void)
-{
- return platform_driver_register(&ab8500_btemp_driver);
-}
-
-static void __exit ab8500_btemp_exit(void)
-{
- platform_driver_unregister(&ab8500_btemp_driver);
-}
-
-device_initcall(ab8500_btemp_init);
-module_exit(ab8500_btemp_exit);
-
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Johan Palsson, Karl Komierowski, Arun R Murthy");
MODULE_ALIAS("platform:ab8500-btemp");
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index a9be10eb2c22..fa49e12e5a60 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/component.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/notifier.h>
@@ -414,6 +415,14 @@ disable_otp:
static void ab8500_power_supply_changed(struct ab8500_charger *di,
struct power_supply *psy)
{
+ /*
+ * This happens if we get notifications or interrupts and
+ * the platform has been configured not to support one or
+ * other type of charging.
+ */
+ if (!psy)
+ return;
+
if (di->autopower_cfg) {
if (!di->usb.charger_connected &&
!di->ac.charger_connected &&
@@ -440,7 +449,15 @@ static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
if (!connected)
di->flags.vbus_drop_end = false;
- sysfs_notify(&di->usb_chg.psy->dev.kobj, NULL, "present");
+ /*
+ * Sometimes the platform is configured not to support
+ * USB charging and no psy has been created, but we still
+ * will get these notifications.
+ */
+ if (di->usb_chg.psy) {
+ sysfs_notify(&di->usb_chg.psy->dev.kobj, NULL,
+ "present");
+ }
if (connected) {
mutex_lock(&di->charger_attached_mutex);
@@ -3171,9 +3188,6 @@ static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
enum ab8500_usb_state bm_usb_state;
unsigned mA = *((unsigned *)power);
- if (!di)
- return NOTIFY_DONE;
-
if (event != USB_EVENT_VBUS) {
dev_dbg(di->dev, "not a standard host, returning\n");
return NOTIFY_DONE;
@@ -3276,10 +3290,74 @@ static struct notifier_block charger_nb = {
.notifier_call = ab8500_external_charger_prepare,
};
-static int ab8500_charger_remove(struct platform_device *pdev)
+static char *supply_interface[] = {
+ "ab8500_chargalg",
+ "ab8500_fg",
+ "ab8500_btemp",
+};
+
+static const struct power_supply_desc ab8500_ac_chg_desc = {
+ .name = "ab8500_ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = ab8500_charger_ac_props,
+ .num_properties = ARRAY_SIZE(ab8500_charger_ac_props),
+ .get_property = ab8500_charger_ac_get_property,
+};
+
+static const struct power_supply_desc ab8500_usb_chg_desc = {
+ .name = "ab8500_usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = ab8500_charger_usb_props,
+ .num_properties = ARRAY_SIZE(ab8500_charger_usb_props),
+ .get_property = ab8500_charger_usb_get_property,
+};
+
+static int ab8500_charger_bind(struct device *dev)
{
- struct ab8500_charger *di = platform_get_drvdata(pdev);
- int i, irq, ret;
+ struct ab8500_charger *di = dev_get_drvdata(dev);
+ int ch_stat;
+ int ret;
+
+ /* Create a work queue for the charger */
+ di->charger_wq = alloc_ordered_workqueue("ab8500_charger_wq",
+ WQ_MEM_RECLAIM);
+ if (di->charger_wq == NULL) {
+ dev_err(dev, "failed to create work queue\n");
+ return -ENOMEM;
+ }
+
+ ch_stat = ab8500_charger_detect_chargers(di, false);
+
+ if (ch_stat & AC_PW_CONN) {
+ if (is_ab8500(di->parent))
+ queue_delayed_work(di->charger_wq,
+ &di->ac_charger_attached_work,
+ HZ);
+ }
+ if (ch_stat & USB_PW_CONN) {
+ if (is_ab8500(di->parent))
+ queue_delayed_work(di->charger_wq,
+ &di->usb_charger_attached_work,
+ HZ);
+ di->vbus_detected = true;
+ di->vbus_detected_start = true;
+ queue_work(di->charger_wq,
+ &di->detect_usb_type_work);
+ }
+
+ ret = component_bind_all(dev, di);
+ if (ret) {
+ dev_err(dev, "can't bind component devices\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ab8500_charger_unbind(struct device *dev)
+{
+ struct ab8500_charger *di = dev_get_drvdata(dev);
+ int ret;
/* Disable AC charging */
ab8500_charger_ac_en(&di->ac_chg, false, 0, 0);
@@ -3287,68 +3365,47 @@ static int ab8500_charger_remove(struct platform_device *pdev)
/* Disable USB charging */
ab8500_charger_usb_en(&di->usb_chg, false, 0, 0);
- /* Disable interrupts */
- for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
- irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
- free_irq(irq, di);
- }
-
/* Backup battery voltage and current disable */
ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_RTC, AB8500_RTC_CTRL_REG, RTC_BUP_CH_ENA, 0);
if (ret < 0)
dev_err(di->dev, "%s mask and set failed\n", __func__);
- usb_unregister_notifier(di->usb_phy, &di->nb);
- usb_put_phy(di->usb_phy);
-
/* Delete the work queue */
destroy_workqueue(di->charger_wq);
- /* Unregister external charger enable notifier */
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_unregister(
- &charger_notifier_list, &charger_nb);
-
flush_scheduled_work();
- if (di->usb_chg.enabled)
- power_supply_unregister(di->usb_chg.psy);
- if (di->ac_chg.enabled && !di->ac_chg.external)
- power_supply_unregister(di->ac_chg.psy);
-
- return 0;
+ /* Unbind fg, btemp, algorithm */
+ component_unbind_all(dev, di);
}
-static char *supply_interface[] = {
- "ab8500_chargalg",
- "ab8500_fg",
- "ab8500_btemp",
+static const struct component_master_ops ab8500_charger_comp_ops = {
+ .bind = ab8500_charger_bind,
+ .unbind = ab8500_charger_unbind,
};
-static const struct power_supply_desc ab8500_ac_chg_desc = {
- .name = "ab8500_ac",
- .type = POWER_SUPPLY_TYPE_MAINS,
- .properties = ab8500_charger_ac_props,
- .num_properties = ARRAY_SIZE(ab8500_charger_ac_props),
- .get_property = ab8500_charger_ac_get_property,
+static struct platform_driver *const ab8500_charger_component_drivers[] = {
+ &ab8500_fg_driver,
+ &ab8500_btemp_driver,
+ &abx500_chargalg_driver,
};
-static const struct power_supply_desc ab8500_usb_chg_desc = {
- .name = "ab8500_usb",
- .type = POWER_SUPPLY_TYPE_USB,
- .properties = ab8500_charger_usb_props,
- .num_properties = ARRAY_SIZE(ab8500_charger_usb_props),
- .get_property = ab8500_charger_usb_get_property,
-};
+static int ab8500_charger_compare_dev(struct device *dev, void *data)
+{
+ return dev == data;
+}
static int ab8500_charger_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct component_match *match = NULL;
struct power_supply_config ac_psy_cfg = {}, usb_psy_cfg = {};
struct ab8500_charger *di;
- int irq, i, charger_status, ret = 0, ch_stat;
- struct device *dev = &pdev->dev;
+ int charger_status;
+ int i, irq;
+ int ret;
di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
if (!di)
@@ -3393,6 +3450,38 @@ static int ab8500_charger_probe(struct platform_device *pdev)
return ret;
}
+ /*
+ * VDD ADC supply needs to be enabled from this driver when there
+ * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
+ * interrupts during charging
+ */
+ di->regu = devm_regulator_get(dev, "vddadc");
+ if (IS_ERR(di->regu)) {
+ ret = PTR_ERR(di->regu);
+ dev_err(dev, "failed to get vddadc regulator\n");
+ return ret;
+ }
+
+ /* Request interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev,
+ irq, NULL, ab8500_charger_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ ab8500_charger_irq[i].name, di);
+
+ if (ret != 0) {
+ dev_err(dev, "failed to request %s IRQ %d: %d\n"
+ , ab8500_charger_irq[i].name, irq, ret);
+ return ret;
+ }
+ dev_dbg(dev, "Requested %s IRQ %d: %d\n",
+ ab8500_charger_irq[i].name, irq, ret);
+ }
+
/* initialize lock */
spin_lock_init(&di->usb_state.usb_lock);
mutex_init(&di->usb_ipt_crnt_lock);
@@ -3419,14 +3508,16 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->ac_chg.max_out_curr =
di->bm->chg_output_curr[di->bm->n_chg_out_curr - 1];
di->ac_chg.wdt_refresh = CHG_WD_INTERVAL;
- di->ac_chg.enabled = di->bm->ac_enabled;
+ /*
+ * The AB8505 only supports USB charging. If we are not the
+ * AB8505, register an AC charger.
+ *
+ * TODO: if this should be opt-in, add DT properties for this.
+ */
+ if (!is_ab8505(di->parent))
+ di->ac_chg.enabled = true;
di->ac_chg.external = false;
- /*notifier for external charger enabling*/
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_register(
- &charger_notifier_list, &charger_nb);
-
/* USB supply */
/* ux500_charger sub-class */
di->usb_chg.ops.enable = &ab8500_charger_usb_en;
@@ -3438,18 +3529,9 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_chg.max_out_curr =
di->bm->chg_output_curr[di->bm->n_chg_out_curr - 1];
di->usb_chg.wdt_refresh = CHG_WD_INTERVAL;
- di->usb_chg.enabled = di->bm->usb_enabled;
di->usb_chg.external = false;
di->usb_state.usb_current = -1;
- /* Create a work queue for the charger */
- di->charger_wq = alloc_ordered_workqueue("ab8500_charger_wq",
- WQ_MEM_RECLAIM);
- if (di->charger_wq == NULL) {
- dev_err(dev, "failed to create work queue\n");
- return -ENOMEM;
- }
-
mutex_init(&di->charger_attached_mutex);
/* Init work for HW failure check */
@@ -3500,61 +3582,32 @@ static int ab8500_charger_probe(struct platform_device *pdev)
INIT_WORK(&di->check_usb_thermal_prot_work,
ab8500_charger_check_usb_thermal_prot_work);
- /*
- * VDD ADC supply needs to be enabled from this driver when there
- * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
- * interrupts during charging
- */
- di->regu = devm_regulator_get(dev, "vddadc");
- if (IS_ERR(di->regu)) {
- ret = PTR_ERR(di->regu);
- dev_err(dev, "failed to get vddadc regulator\n");
- goto free_charger_wq;
- }
-
/* Initialize OVV, and other registers */
ret = ab8500_charger_init_hw_registers(di);
if (ret) {
dev_err(dev, "failed to initialize ABB registers\n");
- goto free_charger_wq;
+ return ret;
}
/* Register AC charger class */
if (di->ac_chg.enabled) {
- di->ac_chg.psy = power_supply_register(dev,
+ di->ac_chg.psy = devm_power_supply_register(dev,
&ab8500_ac_chg_desc,
&ac_psy_cfg);
if (IS_ERR(di->ac_chg.psy)) {
dev_err(dev, "failed to register AC charger\n");
- ret = PTR_ERR(di->ac_chg.psy);
- goto free_charger_wq;
+ return PTR_ERR(di->ac_chg.psy);
}
}
/* Register USB charger class */
- if (di->usb_chg.enabled) {
- di->usb_chg.psy = power_supply_register(dev,
- &ab8500_usb_chg_desc,
- &usb_psy_cfg);
- if (IS_ERR(di->usb_chg.psy)) {
- dev_err(dev, "failed to register USB charger\n");
- ret = PTR_ERR(di->usb_chg.psy);
- goto free_ac;
- }
- }
-
- di->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
- if (IS_ERR_OR_NULL(di->usb_phy)) {
- dev_err(dev, "failed to get usb transceiver\n");
- ret = -EINVAL;
- goto free_usb;
- }
- di->nb.notifier_call = ab8500_charger_usb_notifier_call;
- ret = usb_register_notifier(di->usb_phy, &di->nb);
- if (ret) {
- dev_err(dev, "failed to register usb notifier\n");
- goto put_usb_phy;
+ di->usb_chg.psy = devm_power_supply_register(dev,
+ &ab8500_usb_chg_desc,
+ &usb_psy_cfg);
+ if (IS_ERR(di->usb_chg.psy)) {
+ dev_err(dev, "failed to register USB charger\n");
+ return PTR_ERR(di->usb_chg.psy);
}
/* Identify the connected charger types during startup */
@@ -3566,84 +3619,93 @@ static int ab8500_charger_probe(struct platform_device *pdev)
sysfs_notify(&di->ac_chg.psy->dev.kobj, NULL, "present");
}
- if (charger_status & USB_PW_CONN) {
- di->vbus_detected = true;
- di->vbus_detected_start = true;
- queue_work(di->charger_wq,
- &di->detect_usb_type_work);
- }
-
- /* Register interrupts */
- for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
- irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
- if (irq < 0) {
- ret = irq;
- goto free_irq;
- }
+ platform_set_drvdata(pdev, di);
- ret = request_threaded_irq(irq, NULL, ab8500_charger_irq[i].isr,
- IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
- ab8500_charger_irq[i].name, di);
+ /* Create something that will match the subdrivers when we bind */
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_component_drivers); i++) {
+ struct device_driver *drv = &ab8500_charger_component_drivers[i]->driver;
+ struct device *p = NULL, *d;
- if (ret != 0) {
- dev_err(dev, "failed to request %s IRQ %d: %d\n"
- , ab8500_charger_irq[i].name, irq, ret);
- goto free_irq;
+ while ((d = platform_find_device_by_driver(p, drv))) {
+ put_device(p);
+ component_match_add(dev, &match,
+ ab8500_charger_compare_dev, d);
+ p = d;
}
- dev_dbg(dev, "Requested %s IRQ %d: %d\n",
- ab8500_charger_irq[i].name, irq, ret);
+ put_device(p);
+ }
+ if (!match) {
+ dev_err(dev, "no matching components\n");
+ return -ENODEV;
+ }
+ if (IS_ERR(match)) {
+ dev_err(dev, "could not create component match\n");
+ return PTR_ERR(match);
}
- platform_set_drvdata(pdev, di);
-
- mutex_lock(&di->charger_attached_mutex);
+ /* Notifier for external charger enabling */
+ if (!di->ac_chg.enabled)
+ blocking_notifier_chain_register(
+ &charger_notifier_list, &charger_nb);
- ch_stat = ab8500_charger_detect_chargers(di, false);
- if ((ch_stat & AC_PW_CONN) == AC_PW_CONN) {
- if (is_ab8500(di->parent))
- queue_delayed_work(di->charger_wq,
- &di->ac_charger_attached_work,
- HZ);
+ di->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(di->usb_phy)) {
+ dev_err(dev, "failed to get usb transceiver\n");
+ ret = -EINVAL;
+ goto out_charger_notifier;
}
- if ((ch_stat & USB_PW_CONN) == USB_PW_CONN) {
- if (is_ab8500(di->parent))
- queue_delayed_work(di->charger_wq,
- &di->usb_charger_attached_work,
- HZ);
+ di->nb.notifier_call = ab8500_charger_usb_notifier_call;
+ ret = usb_register_notifier(di->usb_phy, &di->nb);
+ if (ret) {
+ dev_err(dev, "failed to register usb notifier\n");
+ goto put_usb_phy;
}
- mutex_unlock(&di->charger_attached_mutex);
- return ret;
+ ret = component_master_add_with_match(&pdev->dev,
+ &ab8500_charger_comp_ops,
+ match);
+ if (ret) {
+ dev_err(dev, "failed to add component master\n");
+ goto free_notifier;
+ }
-free_irq:
- usb_unregister_notifier(di->usb_phy, &di->nb);
+ return 0;
- /* We also have to free all successfully registered irqs */
- for (i = i - 1; i >= 0; i--) {
- irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
- free_irq(irq, di);
- }
+free_notifier:
+ usb_unregister_notifier(di->usb_phy, &di->nb);
put_usb_phy:
usb_put_phy(di->usb_phy);
-free_usb:
- if (di->usb_chg.enabled)
- power_supply_unregister(di->usb_chg.psy);
-free_ac:
- if (di->ac_chg.enabled)
- power_supply_unregister(di->ac_chg.psy);
-free_charger_wq:
- destroy_workqueue(di->charger_wq);
+out_charger_notifier:
+ if (!di->ac_chg.enabled)
+ blocking_notifier_chain_unregister(
+ &charger_notifier_list, &charger_nb);
return ret;
}
+static int ab8500_charger_remove(struct platform_device *pdev)
+{
+ struct ab8500_charger *di = platform_get_drvdata(pdev);
+
+ component_master_del(&pdev->dev, &ab8500_charger_comp_ops);
+
+ usb_unregister_notifier(di->usb_phy, &di->nb);
+ usb_put_phy(di->usb_phy);
+ if (!di->ac_chg.enabled)
+ blocking_notifier_chain_unregister(
+ &charger_notifier_list, &charger_nb);
+
+ return 0;
+}
+
static SIMPLE_DEV_PM_OPS(ab8500_charger_pm_ops, ab8500_charger_suspend, ab8500_charger_resume);
static const struct of_device_id ab8500_charger_match[] = {
{ .compatible = "stericsson,ab8500-charger", },
{ },
};
+MODULE_DEVICE_TABLE(of, ab8500_charger_match);
static struct platform_driver ab8500_charger_driver = {
.probe = ab8500_charger_probe,
@@ -3657,15 +3719,24 @@ static struct platform_driver ab8500_charger_driver = {
static int __init ab8500_charger_init(void)
{
+ int ret;
+
+ ret = platform_register_drivers(ab8500_charger_component_drivers,
+ ARRAY_SIZE(ab8500_charger_component_drivers));
+ if (ret)
+ return ret;
+
return platform_driver_register(&ab8500_charger_driver);
}
static void __exit ab8500_charger_exit(void)
{
+ platform_unregister_drivers(ab8500_charger_component_drivers,
+ ARRAY_SIZE(ab8500_charger_component_drivers));
platform_driver_unregister(&ab8500_charger_driver);
}
-subsys_initcall_sync(ab8500_charger_init);
+module_init(ab8500_charger_init);
module_exit(ab8500_charger_exit);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 0c7c01a0d979..a6ebdb269fdd 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/component.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -59,7 +60,7 @@
((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1))));
/**
- * struct ab8500_fg_interrupts - ab8500 fg interupts
+ * struct ab8500_fg_interrupts - ab8500 fg interrupts
* @name: name of the interrupt
* @isr function pointer to the isr
*/
@@ -1727,6 +1728,7 @@ static void ab8500_fg_algorithm_calibrate(struct ab8500_fg *di)
break;
case AB8500_FG_CALIB_WAIT:
dev_dbg(di->dev, "Calibration WFI\n");
+ break;
default:
break;
}
@@ -2223,6 +2225,7 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
queue_work(di->fg_wq, &di->fg_work);
break;
}
+ break;
default:
break;
}
@@ -2980,27 +2983,6 @@ static int __maybe_unused ab8500_fg_suspend(struct device *dev)
return 0;
}
-static int ab8500_fg_remove(struct platform_device *pdev)
-{
- int ret = 0;
- struct ab8500_fg *di = platform_get_drvdata(pdev);
-
- list_del(&di->node);
-
- /* Disable coulomb counter */
- ret = ab8500_fg_coulomb_counter(di, false);
- if (ret)
- dev_err(di->dev, "failed to disable coulomb counter\n");
-
- destroy_workqueue(di->fg_wq);
- ab8500_fg_sysfs_exit(di);
-
- flush_scheduled_work();
- ab8500_fg_sysfs_psy_remove_attrs(di);
- power_supply_unregister(di->fg_psy);
- return ret;
-}
-
/* ab8500 fg driver interrupts and their respective isr */
static struct ab8500_fg_interrupts ab8500_fg_irq[] = {
{"NCONV_ACCU", ab8500_fg_cc_convend_handler},
@@ -3024,11 +3006,50 @@ static const struct power_supply_desc ab8500_fg_desc = {
.external_power_changed = ab8500_fg_external_power_changed,
};
+static int ab8500_fg_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct ab8500_fg *di = dev_get_drvdata(dev);
+
+ /* Create a work queue for running the FG algorithm */
+ di->fg_wq = alloc_ordered_workqueue("ab8500_fg_wq", WQ_MEM_RECLAIM);
+ if (di->fg_wq == NULL) {
+ dev_err(dev, "failed to create work queue\n");
+ return -ENOMEM;
+ }
+
+ /* Start the coulomb counter */
+ ab8500_fg_coulomb_counter(di, true);
+ /* Run the FG algorithm */
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+
+ return 0;
+}
+
+static void ab8500_fg_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct ab8500_fg *di = dev_get_drvdata(dev);
+ int ret;
+
+ /* Disable coulomb counter */
+ ret = ab8500_fg_coulomb_counter(di, false);
+ if (ret)
+ dev_err(dev, "failed to disable coulomb counter\n");
+
+ destroy_workqueue(di->fg_wq);
+ flush_scheduled_work();
+}
+
+static const struct component_ops ab8500_fg_component_ops = {
+ .bind = ab8500_fg_bind,
+ .unbind = ab8500_fg_unbind,
+};
+
static int ab8500_fg_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct power_supply_config psy_cfg = {};
struct device *dev = &pdev->dev;
+ struct power_supply_config psy_cfg = {};
struct ab8500_fg *di;
int i, irq;
int ret = 0;
@@ -3039,12 +3060,6 @@ static int ab8500_fg_probe(struct platform_device *pdev)
di->bm = &ab8500_bm_data;
- ret = ab8500_bm_of_probe(dev, np, di->bm);
- if (ret) {
- dev_err(dev, "failed to get battery information\n");
- return ret;
- }
-
mutex_init(&di->cc_lock);
/* get parent data */
@@ -3074,13 +3089,6 @@ static int ab8500_fg_probe(struct platform_device *pdev)
ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
- /* Create a work queue for running the FG algorithm */
- di->fg_wq = alloc_ordered_workqueue("ab8500_fg_wq", WQ_MEM_RECLAIM);
- if (di->fg_wq == NULL) {
- dev_err(dev, "failed to create work queue\n");
- return -ENOMEM;
- }
-
/* Init work for running the fg algorithm instantly */
INIT_WORK(&di->fg_work, ab8500_fg_instant_work);
@@ -3113,7 +3121,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
ret = ab8500_fg_init_hw_registers(di);
if (ret) {
dev_err(dev, "failed to initialize registers\n");
- goto free_inst_curr_wq;
+ return ret;
}
/* Consider battery unknown until we're informed otherwise */
@@ -3121,15 +3129,13 @@ static int ab8500_fg_probe(struct platform_device *pdev)
di->flags.batt_id_received = false;
/* Register FG power supply class */
- di->fg_psy = power_supply_register(dev, &ab8500_fg_desc, &psy_cfg);
+ di->fg_psy = devm_power_supply_register(dev, &ab8500_fg_desc, &psy_cfg);
if (IS_ERR(di->fg_psy)) {
dev_err(dev, "failed to register FG psy\n");
- ret = PTR_ERR(di->fg_psy);
- goto free_inst_curr_wq;
+ return PTR_ERR(di->fg_psy);
}
di->fg_samples = SEC_TO_SAMPLE(di->bm->fg_params->init_timer);
- ab8500_fg_coulomb_counter(di, true);
/*
* Initialize completion used to notify completion and start
@@ -3141,19 +3147,18 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Register primary interrupt handlers */
for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
- if (irq < 0) {
- ret = irq;
- goto free_irq;
- }
+ if (irq < 0)
+ return irq;
- ret = request_threaded_irq(irq, NULL, ab8500_fg_irq[i].isr,
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ ab8500_fg_irq[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
ab8500_fg_irq[i].name, di);
if (ret != 0) {
dev_err(dev, "failed to request %s IRQ %d: %d\n",
ab8500_fg_irq[i].name, irq, ret);
- goto free_irq;
+ return ret;
}
dev_dbg(dev, "Requested %s IRQ %d: %d\n",
ab8500_fg_irq[i].name, irq, ret);
@@ -3168,14 +3173,14 @@ static int ab8500_fg_probe(struct platform_device *pdev)
ret = ab8500_fg_sysfs_init(di);
if (ret) {
dev_err(dev, "failed to create sysfs entry\n");
- goto free_irq;
+ return ret;
}
ret = ab8500_fg_sysfs_psy_create_attrs(di);
if (ret) {
dev_err(dev, "failed to create FG psy\n");
ab8500_fg_sysfs_exit(di);
- goto free_irq;
+ return ret;
}
/* Calibrate the fg first time */
@@ -3185,24 +3190,21 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Use room temp as default value until we get an update from driver. */
di->bat_temp = 210;
- /* Run the FG algorithm */
- queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
-
list_add_tail(&di->node, &ab8500_fg_list);
- return ret;
+ return component_add(dev, &ab8500_fg_component_ops);
+}
-free_irq:
- /* We also have to free all registered irqs */
- while (--i >= 0) {
- /* Last assignment of i from primary interrupt handlers */
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
- free_irq(irq, di);
- }
+static int ab8500_fg_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+ component_del(&pdev->dev, &ab8500_fg_component_ops);
+ list_del(&di->node);
+ ab8500_fg_sysfs_exit(di);
+ ab8500_fg_sysfs_psy_remove_attrs(di);
- power_supply_unregister(di->fg_psy);
-free_inst_curr_wq:
- destroy_workqueue(di->fg_wq);
return ret;
}
@@ -3212,8 +3214,9 @@ static const struct of_device_id ab8500_fg_match[] = {
{ .compatible = "stericsson,ab8500-fg", },
{ },
};
+MODULE_DEVICE_TABLE(of, ab8500_fg_match);
-static struct platform_driver ab8500_fg_driver = {
+struct platform_driver ab8500_fg_driver = {
.probe = ab8500_fg_probe,
.remove = ab8500_fg_remove,
.driver = {
@@ -3222,20 +3225,6 @@ static struct platform_driver ab8500_fg_driver = {
.pm = &ab8500_fg_pm_ops,
},
};
-
-static int __init ab8500_fg_init(void)
-{
- return platform_driver_register(&ab8500_fg_driver);
-}
-
-static void __exit ab8500_fg_exit(void)
-{
- platform_driver_unregister(&ab8500_fg_driver);
-}
-
-subsys_initcall_sync(ab8500_fg_init);
-module_exit(ab8500_fg_exit);
-
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
MODULE_ALIAS("platform:ab8500-fg");
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index f5b792243727..b72826cf6794 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/component.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -1149,6 +1150,7 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
default:
break;
}
+ break;
default:
break;
}
@@ -1943,13 +1945,44 @@ static int __maybe_unused abx500_chargalg_suspend(struct device *dev)
return 0;
}
-static int abx500_chargalg_remove(struct platform_device *pdev)
+static char *supply_interface[] = {
+ "ab8500_fg",
+};
+
+static const struct power_supply_desc abx500_chargalg_desc = {
+ .name = "abx500_chargalg",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = abx500_chargalg_props,
+ .num_properties = ARRAY_SIZE(abx500_chargalg_props),
+ .get_property = abx500_chargalg_get_property,
+ .external_power_changed = abx500_chargalg_external_power_changed,
+};
+
+static int abx500_chargalg_bind(struct device *dev, struct device *master,
+ void *data)
{
- struct abx500_chargalg *di = platform_get_drvdata(pdev);
+ struct abx500_chargalg *di = dev_get_drvdata(dev);
- /* sysfs interface to enable/disbale charging from user space */
- abx500_chargalg_sysfs_exit(di);
+ /* Create a work queue for the chargalg */
+ di->chargalg_wq = alloc_ordered_workqueue("abx500_chargalg_wq",
+ WQ_MEM_RECLAIM);
+ if (di->chargalg_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ return -ENOMEM;
+ }
+
+ /* Run the charging algorithm */
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+ return 0;
+}
+static void abx500_chargalg_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct abx500_chargalg *di = dev_get_drvdata(dev);
+
+ /* Stop all timers and work */
hrtimer_cancel(&di->safety_timer);
hrtimer_cancel(&di->maintenance_timer);
@@ -1959,48 +1992,29 @@ static int abx500_chargalg_remove(struct platform_device *pdev)
/* Delete the work queue */
destroy_workqueue(di->chargalg_wq);
-
- power_supply_unregister(di->chargalg_psy);
-
- return 0;
+ flush_scheduled_work();
}
-static char *supply_interface[] = {
- "ab8500_fg",
-};
-
-static const struct power_supply_desc abx500_chargalg_desc = {
- .name = "abx500_chargalg",
- .type = POWER_SUPPLY_TYPE_BATTERY,
- .properties = abx500_chargalg_props,
- .num_properties = ARRAY_SIZE(abx500_chargalg_props),
- .get_property = abx500_chargalg_get_property,
- .external_power_changed = abx500_chargalg_external_power_changed,
+static const struct component_ops abx500_chargalg_component_ops = {
+ .bind = abx500_chargalg_bind,
+ .unbind = abx500_chargalg_unbind,
};
static int abx500_chargalg_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct power_supply_config psy_cfg = {};
struct abx500_chargalg *di;
int ret = 0;
- di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
- if (!di) {
- dev_err(&pdev->dev, "%s no mem for ab8500_chargalg\n", __func__);
+ di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
+ if (!di)
return -ENOMEM;
- }
di->bm = &ab8500_bm_data;
- ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
- if (ret) {
- dev_err(&pdev->dev, "failed to get battery information\n");
- return ret;
- }
-
/* get device struct and parent */
- di->dev = &pdev->dev;
+ di->dev = dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
psy_cfg.supplied_to = supply_interface;
@@ -2016,14 +2030,6 @@ static int abx500_chargalg_probe(struct platform_device *pdev)
di->maintenance_timer.function =
abx500_chargalg_maintenance_timer_expired;
- /* Create a work queue for the chargalg */
- di->chargalg_wq = alloc_ordered_workqueue("abx500_chargalg_wq",
- WQ_MEM_RECLAIM);
- if (di->chargalg_wq == NULL) {
- dev_err(di->dev, "failed to create work queue\n");
- return -ENOMEM;
- }
-
/* Init work for chargalg */
INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
abx500_chargalg_periodic_work);
@@ -2037,12 +2043,12 @@ static int abx500_chargalg_probe(struct platform_device *pdev)
di->chg_info.prev_conn_chg = -1;
/* Register chargalg power supply class */
- di->chargalg_psy = power_supply_register(di->dev, &abx500_chargalg_desc,
+ di->chargalg_psy = devm_power_supply_register(di->dev,
+ &abx500_chargalg_desc,
&psy_cfg);
if (IS_ERR(di->chargalg_psy)) {
dev_err(di->dev, "failed to register chargalg psy\n");
- ret = PTR_ERR(di->chargalg_psy);
- goto free_chargalg_wq;
+ return PTR_ERR(di->chargalg_psy);
}
platform_set_drvdata(pdev, di);
@@ -2051,21 +2057,24 @@ static int abx500_chargalg_probe(struct platform_device *pdev)
ret = abx500_chargalg_sysfs_init(di);
if (ret) {
dev_err(di->dev, "failed to create sysfs entry\n");
- goto free_psy;
+ return ret;
}
di->curr_status.curr_step = CHARGALG_CURR_STEP_HIGH;
- /* Run the charging algorithm */
- queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
-
dev_info(di->dev, "probe success\n");
- return ret;
+ return component_add(dev, &abx500_chargalg_component_ops);
+}
-free_psy:
- power_supply_unregister(di->chargalg_psy);
-free_chargalg_wq:
- destroy_workqueue(di->chargalg_wq);
- return ret;
+static int abx500_chargalg_remove(struct platform_device *pdev)
+{
+ struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+ component_del(&pdev->dev, &abx500_chargalg_component_ops);
+
+ /* sysfs interface to enable/disable charging from user space */
+ abx500_chargalg_sysfs_exit(di);
+
+ return 0;
}
static SIMPLE_DEV_PM_OPS(abx500_chargalg_pm_ops, abx500_chargalg_suspend, abx500_chargalg_resume);
@@ -2075,7 +2084,7 @@ static const struct of_device_id ab8500_chargalg_match[] = {
{ },
};
-static struct platform_driver abx500_chargalg_driver = {
+struct platform_driver abx500_chargalg_driver = {
.probe = abx500_chargalg_probe,
.remove = abx500_chargalg_remove,
.driver = {
@@ -2084,9 +2093,6 @@ static struct platform_driver abx500_chargalg_driver = {
.pm = &abx500_chargalg_pm_ops,
},
};
-
-module_platform_driver(abx500_chargalg_driver);
-
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
MODULE_ALIAS("platform:abx500-chargalg");
diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
index e84b6e4da14a..18a9db0df4b1 100644
--- a/drivers/power/supply/axp20x_battery.c
+++ b/drivers/power/supply/axp20x_battery.c
@@ -40,6 +40,7 @@
#define AXP209_FG_PERCENT GENMASK(6, 0)
#define AXP22X_FG_VALID BIT(7)
+#define AXP20X_CHRG_CTRL1_ENABLE BIT(7)
#define AXP20X_CHRG_CTRL1_TGT_VOLT GENMASK(6, 5)
#define AXP20X_CHRG_CTRL1_TGT_4_1V (0 << 5)
#define AXP20X_CHRG_CTRL1_TGT_4_15V (1 << 5)
@@ -468,7 +469,18 @@ static int axp20x_battery_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
return axp20x_set_max_constant_charge_current(axp20x_batt,
val->intval);
-
+ case POWER_SUPPLY_PROP_STATUS:
+ switch (val->intval) {
+ case POWER_SUPPLY_STATUS_CHARGING:
+ return regmap_update_bits(axp20x_batt->regmap, AXP20X_CHRG_CTRL1,
+ AXP20X_CHRG_CTRL1_ENABLE, AXP20X_CHRG_CTRL1_ENABLE);
+
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ case POWER_SUPPLY_STATUS_NOT_CHARGING:
+ return regmap_update_bits(axp20x_batt->regmap, AXP20X_CHRG_CTRL1,
+ AXP20X_CHRG_CTRL1_ENABLE, 0);
+ }
+ fallthrough;
default:
return -EINVAL;
}
@@ -491,7 +503,8 @@ static enum power_supply_property axp20x_battery_props[] = {
static int axp20x_battery_prop_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
- return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN ||
+ return psp == POWER_SUPPLY_PROP_STATUS ||
+ psp == POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN ||
psp == POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN ||
psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT ||
psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX;
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 39e16ecb7638..2ba2d8d6b8e6 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -142,9 +142,7 @@ static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
for (i = 0; i < NR_RETRY_CNT; i++) {
ret = regmap_read(info->regmap, reg, &val);
- if (ret == -EBUSY)
- continue;
- else
+ if (ret != -EBUSY)
break;
}
@@ -676,7 +674,7 @@ intr_failed:
* detection reports one despite it not being there.
* Please keep this listed sorted alphabetically.
*/
-static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
+static const struct dmi_system_id axp288_no_battery_list[] = {
{
/* ACEPC T8 Cherry Trail Z8350 mini PC */
.matches = {
@@ -723,15 +721,6 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "MEEGOPAD T02"),
},
},
- {
- /* Meegopad T08 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
- DMI_MATCH(DMI_BOARD_VENDOR, "To be filled by OEM."),
- DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
- DMI_MATCH(DMI_BOARD_VERSION, "V1.1"),
- },
- },
{ /* Mele PCG03 Mini PC */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Mini PC"),
@@ -745,6 +734,15 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
}
},
+ {
+ /* Various Ace PC/Meegopad/MinisForum/Wintel Mini-PCs/HDMI-sticks */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+ DMI_MATCH(DMI_BIOS_VERSION, "5.11"),
+ },
+ },
{}
};
@@ -764,7 +762,7 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
};
unsigned int val;
- if (dmi_check_system(axp288_fuel_gauge_blacklist))
+ if (dmi_check_system(axp288_no_battery_list))
return -ENODEV;
/*
diff --git a/drivers/power/supply/bd70528-charger.c b/drivers/power/supply/bd70528-charger.c
deleted file mode 100644
index 7c1f0b99c71b..000000000000
--- a/drivers/power/supply/bd70528-charger.c
+++ /dev/null
@@ -1,710 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-//
-// Copyright (C) 2018 ROHM Semiconductors
-//
-// power-supply driver for ROHM BD70528 PMIC
-
-/*
- * BD70528 charger HW state machine.
- *
- * The thermal shutdown state is not drawn. From any other state but
- * battery error and suspend it is possible to go to TSD/TMP states
- * if temperature is out of bounds.
- *
- * CHG_RST = H
- * or CHG_EN=L
- * or (DCIN2_UVLO=L && DCIN1_UVLO=L)
- * or (DCIN2_OVLO=H & DCIN1_UVKLO=L)
- *
- * +--------------+ +--------------+
- * | | | |
- * | Any state +-------> | Suspend |
- * | | | |
- * +--------------+ +------+-------+
- * |
- * CHG_EN = H && BAT_DET = H && |
- * No errors (temp, bat_ov, UVLO, |
- * OVLO...) |
- * |
- * BAT_OV or +---------v----------+
- * (DBAT && TTRI) | |
- * +-----------------+ Trickle Charge | <---------------+
- * | | | |
- * | +-------+------------+ |
- * | | |
- * | | ^ |
- * | V_BAT > VTRI_TH | | VBAT < VTRI_TH - 50mV |
- * | | | |
- * | v | |
- * | | |
- * | BAT_OV or +----------+----+ |
- * | (DBAT && TFST) | | |
- * | +----------------+ Fast Charge | |
- * | | | | |
- * v v +----+----------+ |
- * | |
- *+----------------+ ILIM_DET=L | ^ ILIM_DET |
- *| | & CV_DET=H | | or CV_DET=L |
- *| Battery Error | & VBAT > | | or VBAT < VRECHG_TH |
- *| | VRECHG_TH | | or IBAT > IFST/x |
- *+----------------+ & IBAT < | | |
- * IFST/x v | |
- * ^ | |
- * | +---------+-+ |
- * | | | |
- * +-------------------+ Top OFF | |
- * BAT_OV = H or | | |
- * (DBAT && TFST) +-----+-----+ |
- * | |
- * Stay top-off for 15s | |
- * v |
- * |
- * +--------+ |
- * | | |
- * | Done +-------------------------+
- * | |
- * +--------+ VBAT < VRECHG_TH
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/rohm-bd70528.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/power_supply.h>
-#include <linux/linear_range.h>
-
-#define CHG_STAT_SUSPEND 0x0
-#define CHG_STAT_TRICKLE 0x1
-#define CHG_STAT_FAST 0x3
-#define CHG_STAT_TOPOFF 0xe
-#define CHG_STAT_DONE 0xf
-#define CHG_STAT_OTP_TRICKLE 0x10
-#define CHG_STAT_OTP_FAST 0x11
-#define CHG_STAT_OTP_DONE 0x12
-#define CHG_STAT_TSD_TRICKLE 0x20
-#define CHG_STAT_TSD_FAST 0x21
-#define CHG_STAT_TSD_TOPOFF 0x22
-#define CHG_STAT_BAT_ERR 0x7f
-
-static const char *bd70528_charger_model = "BD70528";
-static const char *bd70528_charger_manufacturer = "ROHM Semiconductors";
-
-#define BD_ERR_IRQ_HND(_name_, _wrn_) \
-static irqreturn_t bd0528_##_name_##_interrupt(int irq, void *arg) \
-{ \
- struct power_supply *psy = (struct power_supply *)arg; \
- \
- power_supply_changed(psy); \
- dev_err(&psy->dev, (_wrn_)); \
- \
- return IRQ_HANDLED; \
-}
-
-#define BD_INFO_IRQ_HND(_name_, _wrn_) \
-static irqreturn_t bd0528_##_name_##_interrupt(int irq, void *arg) \
-{ \
- struct power_supply *psy = (struct power_supply *)arg; \
- \
- power_supply_changed(psy); \
- dev_dbg(&psy->dev, (_wrn_)); \
- \
- return IRQ_HANDLED; \
-}
-
-#define BD_IRQ_HND(_name_) bd0528_##_name_##_interrupt
-
-struct bd70528_psy {
- struct regmap *regmap;
- struct device *dev;
- struct power_supply *psy;
-};
-
-BD_ERR_IRQ_HND(BAT_OV_DET, "Battery overvoltage detected\n");
-BD_ERR_IRQ_HND(DBAT_DET, "Dead battery detected\n");
-BD_ERR_IRQ_HND(COLD_DET, "Battery cold\n");
-BD_ERR_IRQ_HND(HOT_DET, "Battery hot\n");
-BD_ERR_IRQ_HND(CHG_TSD, "Charger thermal shutdown\n");
-BD_ERR_IRQ_HND(DCIN2_OV_DET, "DCIN2 overvoltage detected\n");
-
-BD_INFO_IRQ_HND(BAT_OV_RES, "Battery voltage back to normal\n");
-BD_INFO_IRQ_HND(COLD_RES, "Battery temperature back to normal\n");
-BD_INFO_IRQ_HND(HOT_RES, "Battery temperature back to normal\n");
-BD_INFO_IRQ_HND(BAT_RMV, "Battery removed\n");
-BD_INFO_IRQ_HND(BAT_DET, "Battery detected\n");
-BD_INFO_IRQ_HND(DCIN2_OV_RES, "DCIN2 voltage back to normal\n");
-BD_INFO_IRQ_HND(DCIN2_RMV, "DCIN2 removed\n");
-BD_INFO_IRQ_HND(DCIN2_DET, "DCIN2 detected\n");
-BD_INFO_IRQ_HND(DCIN1_RMV, "DCIN1 removed\n");
-BD_INFO_IRQ_HND(DCIN1_DET, "DCIN1 detected\n");
-
-struct irq_name_pair {
- const char *n;
- irqreturn_t (*h)(int irq, void *arg);
-};
-
-static int bd70528_get_irqs(struct platform_device *pdev,
- struct bd70528_psy *bdpsy)
-{
- int irq, i, ret;
- unsigned int mask;
- static const struct irq_name_pair bd70528_chg_irqs[] = {
- { .n = "bd70528-bat-ov-res", .h = BD_IRQ_HND(BAT_OV_RES) },
- { .n = "bd70528-bat-ov-det", .h = BD_IRQ_HND(BAT_OV_DET) },
- { .n = "bd70528-bat-dead", .h = BD_IRQ_HND(DBAT_DET) },
- { .n = "bd70528-bat-warmed", .h = BD_IRQ_HND(COLD_RES) },
- { .n = "bd70528-bat-cold", .h = BD_IRQ_HND(COLD_DET) },
- { .n = "bd70528-bat-cooled", .h = BD_IRQ_HND(HOT_RES) },
- { .n = "bd70528-bat-hot", .h = BD_IRQ_HND(HOT_DET) },
- { .n = "bd70528-chg-tshd", .h = BD_IRQ_HND(CHG_TSD) },
- { .n = "bd70528-bat-removed", .h = BD_IRQ_HND(BAT_RMV) },
- { .n = "bd70528-bat-detected", .h = BD_IRQ_HND(BAT_DET) },
- { .n = "bd70528-dcin2-ov-res", .h = BD_IRQ_HND(DCIN2_OV_RES) },
- { .n = "bd70528-dcin2-ov-det", .h = BD_IRQ_HND(DCIN2_OV_DET) },
- { .n = "bd70528-dcin2-removed", .h = BD_IRQ_HND(DCIN2_RMV) },
- { .n = "bd70528-dcin2-detected", .h = BD_IRQ_HND(DCIN2_DET) },
- { .n = "bd70528-dcin1-removed", .h = BD_IRQ_HND(DCIN1_RMV) },
- { .n = "bd70528-dcin1-detected", .h = BD_IRQ_HND(DCIN1_DET) },
- };
-
- for (i = 0; i < ARRAY_SIZE(bd70528_chg_irqs); i++) {
- irq = platform_get_irq_byname(pdev, bd70528_chg_irqs[i].n);
- if (irq < 0) {
- dev_err(&pdev->dev, "Bad IRQ information for %s (%d)\n",
- bd70528_chg_irqs[i].n, irq);
- return irq;
- }
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- bd70528_chg_irqs[i].h,
- IRQF_ONESHOT,
- bd70528_chg_irqs[i].n,
- bdpsy->psy);
-
- if (ret)
- return ret;
- }
- /*
- * BD70528 irq controller is not touching the main mask register.
- * So enable the charger block interrupts at main level. We can just
- * leave them enabled as irq-controller should disable irqs
- * from sub-registers when IRQ is disabled or freed.
- */
- mask = BD70528_REG_INT_BAT1_MASK | BD70528_REG_INT_BAT2_MASK;
- ret = regmap_update_bits(bdpsy->regmap,
- BD70528_REG_INT_MAIN_MASK, mask, 0);
- if (ret)
- dev_err(&pdev->dev, "Failed to enable charger IRQs\n");
-
- return ret;
-}
-
-static int bd70528_get_charger_status(struct bd70528_psy *bdpsy, int *val)
-{
- int ret;
- unsigned int v;
-
- ret = regmap_read(bdpsy->regmap, BD70528_REG_CHG_CURR_STAT, &v);
- if (ret) {
- dev_err(bdpsy->dev, "Charger state read failure %d\n",
- ret);
- return ret;
- }
-
- switch (v & BD70528_MASK_CHG_STAT) {
- case CHG_STAT_SUSPEND:
- /* Maybe we should check the CHG_TTRI_EN? */
- case CHG_STAT_OTP_TRICKLE:
- case CHG_STAT_OTP_FAST:
- case CHG_STAT_OTP_DONE:
- case CHG_STAT_TSD_TRICKLE:
- case CHG_STAT_TSD_FAST:
- case CHG_STAT_TSD_TOPOFF:
- case CHG_STAT_BAT_ERR:
- *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
- break;
- case CHG_STAT_DONE:
- *val = POWER_SUPPLY_STATUS_FULL;
- break;
- case CHG_STAT_TRICKLE:
- case CHG_STAT_FAST:
- case CHG_STAT_TOPOFF:
- *val = POWER_SUPPLY_STATUS_CHARGING;
- break;
- default:
- *val = POWER_SUPPLY_STATUS_UNKNOWN;
- break;
- }
-
- return 0;
-}
-
-static int bd70528_get_charge_type(struct bd70528_psy *bdpsy, int *val)
-{
- int ret;
- unsigned int v;
-
- ret = regmap_read(bdpsy->regmap, BD70528_REG_CHG_CURR_STAT, &v);
- if (ret) {
- dev_err(bdpsy->dev, "Charger state read failure %d\n",
- ret);
- return ret;
- }
-
- switch (v & BD70528_MASK_CHG_STAT) {
- case CHG_STAT_TRICKLE:
- *val = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
- break;
- case CHG_STAT_FAST:
- case CHG_STAT_TOPOFF:
- *val = POWER_SUPPLY_CHARGE_TYPE_FAST;
- break;
- case CHG_STAT_DONE:
- case CHG_STAT_SUSPEND:
- /* Maybe we should check the CHG_TTRI_EN? */
- case CHG_STAT_OTP_TRICKLE:
- case CHG_STAT_OTP_FAST:
- case CHG_STAT_OTP_DONE:
- case CHG_STAT_TSD_TRICKLE:
- case CHG_STAT_TSD_FAST:
- case CHG_STAT_TSD_TOPOFF:
- case CHG_STAT_BAT_ERR:
- *val = POWER_SUPPLY_CHARGE_TYPE_NONE;
- break;
- default:
- *val = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
- break;
- }
-
- return 0;
-}
-
-static int bd70528_get_battery_health(struct bd70528_psy *bdpsy, int *val)
-{
- int ret;
- unsigned int v;
-
- ret = regmap_read(bdpsy->regmap, BD70528_REG_CHG_BAT_STAT, &v);
- if (ret) {
- dev_err(bdpsy->dev, "Battery state read failure %d\n",
- ret);
- return ret;
- }
- /* No battery? */
- if (!(v & BD70528_MASK_CHG_BAT_DETECT))
- *val = POWER_SUPPLY_HEALTH_DEAD;
- else if (v & BD70528_MASK_CHG_BAT_OVERVOLT)
- *val = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- else if (v & BD70528_MASK_CHG_BAT_TIMER)
- *val = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
- else
- *val = POWER_SUPPLY_HEALTH_GOOD;
-
- return 0;
-}
-
-static int bd70528_get_online(struct bd70528_psy *bdpsy, int *val)
-{
- int ret;
- unsigned int v;
-
- ret = regmap_read(bdpsy->regmap, BD70528_REG_CHG_IN_STAT, &v);
- if (ret) {
- dev_err(bdpsy->dev, "DC1 IN state read failure %d\n",
- ret);
- return ret;
- }
-
- *val = (v & BD70528_MASK_CHG_DCIN1_UVLO) ? 1 : 0;
-
- return 0;
-}
-
-static int bd70528_get_present(struct bd70528_psy *bdpsy, int *val)
-{
- int ret;
- unsigned int v;
-
- ret = regmap_read(bdpsy->regmap, BD70528_REG_CHG_BAT_STAT, &v);
- if (ret) {
- dev_err(bdpsy->dev, "Battery state read failure %d\n",
- ret);
- return ret;
- }
-
- *val = (v & BD70528_MASK_CHG_BAT_DETECT) ? 1 : 0;
-
- return 0;
-}
-
-static const struct linear_range current_limit_ranges[] = {
- {
- .min = 5,
- .step = 1,
- .min_sel = 0,
- .max_sel = 0x22,
- },
- {
- .min = 40,
- .step = 5,
- .min_sel = 0x23,
- .max_sel = 0x26,
- },
- {
- .min = 60,
- .step = 20,
- .min_sel = 0x27,
- .max_sel = 0x2d,
- },
- {
- .min = 200,
- .step = 50,
- .min_sel = 0x2e,
- .max_sel = 0x34,
- },
- {
- .min = 500,
- .step = 0,
- .min_sel = 0x35,
- .max_sel = 0x3f,
- },
-};
-
-/*
- * BD70528 would support setting and getting own charge current/
- * voltage for low temperatures. The driver currently only reads
- * the charge current at room temperature. We do set both though.
- */
-static const struct linear_range warm_charge_curr[] = {
- {
- .min = 10,
- .step = 10,
- .min_sel = 0,
- .max_sel = 0x12,
- },
- {
- .min = 200,
- .step = 25,
- .min_sel = 0x13,
- .max_sel = 0x1f,
- },
-};
-
-/*
- * Cold charge current selectors are identical to warm charge current
- * selectors. The difference is that only smaller currents are available
- * at cold charge range.
- */
-#define MAX_COLD_CHG_CURR_SEL 0x15
-#define MAX_WARM_CHG_CURR_SEL 0x1f
-#define MIN_CHG_CURR_SEL 0x0
-
-static int get_charge_current(struct bd70528_psy *bdpsy, int *ma)
-{
- unsigned int sel;
- int ret;
-
- ret = regmap_read(bdpsy->regmap, BD70528_REG_CHG_CHG_CURR_WARM,
- &sel);
- if (ret) {
- dev_err(bdpsy->dev,
- "Charge current reading failed (%d)\n", ret);
- return ret;
- }
-
- sel &= BD70528_MASK_CHG_CHG_CURR;
-
- ret = linear_range_get_value_array(&warm_charge_curr[0],
- ARRAY_SIZE(warm_charge_curr),
- sel, ma);
- if (ret) {
- dev_err(bdpsy->dev,
- "Unknown charge current value 0x%x\n",
- sel);
- }
-
- return ret;
-}
-
-static int get_current_limit(struct bd70528_psy *bdpsy, int *ma)
-{
- unsigned int sel;
- int ret;
-
- ret = regmap_read(bdpsy->regmap, BD70528_REG_CHG_DCIN_ILIM,
- &sel);
-
- if (ret) {
- dev_err(bdpsy->dev,
- "Input current limit reading failed (%d)\n", ret);
- return ret;
- }
-
- sel &= BD70528_MASK_CHG_DCIN_ILIM;
-
- ret = linear_range_get_value_array(&current_limit_ranges[0],
- ARRAY_SIZE(current_limit_ranges),
- sel, ma);
- if (ret) {
- /* Unspecified values mean 500 mA */
- *ma = 500;
- }
- return 0;
-}
-
-static enum power_supply_property bd70528_charger_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_CHARGE_TYPE,
- POWER_SUPPLY_PROP_HEALTH,
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
- POWER_SUPPLY_PROP_MODEL_NAME,
- POWER_SUPPLY_PROP_MANUFACTURER,
-};
-
-static int bd70528_charger_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct bd70528_psy *bdpsy = power_supply_get_drvdata(psy);
- int ret = 0;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- return bd70528_get_charger_status(bdpsy, &val->intval);
- case POWER_SUPPLY_PROP_CHARGE_TYPE:
- return bd70528_get_charge_type(bdpsy, &val->intval);
- case POWER_SUPPLY_PROP_HEALTH:
- return bd70528_get_battery_health(bdpsy, &val->intval);
- case POWER_SUPPLY_PROP_PRESENT:
- return bd70528_get_present(bdpsy, &val->intval);
- case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
- ret = get_current_limit(bdpsy, &val->intval);
- val->intval *= 1000;
- return ret;
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- ret = get_charge_current(bdpsy, &val->intval);
- val->intval *= 1000;
- return ret;
- case POWER_SUPPLY_PROP_ONLINE:
- return bd70528_get_online(bdpsy, &val->intval);
- case POWER_SUPPLY_PROP_MODEL_NAME:
- val->strval = bd70528_charger_model;
- return 0;
- case POWER_SUPPLY_PROP_MANUFACTURER:
- val->strval = bd70528_charger_manufacturer;
- return 0;
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-static int bd70528_prop_is_writable(struct power_supply *psy,
- enum power_supply_property psp)
-{
- switch (psp) {
- case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- return 1;
- default:
- break;
- }
- return 0;
-}
-
-static int set_charge_current(struct bd70528_psy *bdpsy, int ma)
-{
- unsigned int reg;
- int ret = 0, tmpret;
- bool found;
-
- if (ma > 500) {
- dev_warn(bdpsy->dev,
- "Requested charge current %u exceed maximum (500mA)\n",
- ma);
- reg = MAX_WARM_CHG_CURR_SEL;
- goto set;
- }
- if (ma < 10) {
- dev_err(bdpsy->dev,
- "Requested charge current %u smaller than min (10mA)\n",
- ma);
- reg = MIN_CHG_CURR_SEL;
- ret = -EINVAL;
- goto set;
- }
-
-/*
- * For BD70528 voltage/current limits we happily accept any value which
- * belongs the range. We could check if value matching the selector is
- * desired by computing the range min + (sel - sel_low) * range step - but
- * I guess it is enough if we use voltage/current which is closest (below)
- * the requested?
- */
-
- ret = linear_range_get_selector_low_array(warm_charge_curr,
- ARRAY_SIZE(warm_charge_curr),
- ma, &reg, &found);
- if (ret) {
- dev_err(bdpsy->dev,
- "Unsupported charge current %u mA\n", ma);
- reg = MIN_CHG_CURR_SEL;
- goto set;
- }
- if (!found) {
- /*
- * There was a gap in supported values and we hit it.
- * Yet a smaller value was found so we use it.
- */
- dev_warn(bdpsy->dev,
- "Unsupported charge current %u mA\n", ma);
- }
-set:
-
- tmpret = regmap_update_bits(bdpsy->regmap,
- BD70528_REG_CHG_CHG_CURR_WARM,
- BD70528_MASK_CHG_CHG_CURR, reg);
- if (tmpret)
- dev_err(bdpsy->dev,
- "Charge current write failure (%d)\n", tmpret);
-
- if (reg > MAX_COLD_CHG_CURR_SEL)
- reg = MAX_COLD_CHG_CURR_SEL;
-
- if (!tmpret)
- tmpret = regmap_update_bits(bdpsy->regmap,
- BD70528_REG_CHG_CHG_CURR_COLD,
- BD70528_MASK_CHG_CHG_CURR, reg);
-
- if (!ret)
- ret = tmpret;
-
- return ret;
-}
-
-#define MAX_CURR_LIMIT_SEL 0x34
-#define MIN_CURR_LIMIT_SEL 0x0
-
-static int set_current_limit(struct bd70528_psy *bdpsy, int ma)
-{
- unsigned int reg;
- int ret = 0, tmpret;
- bool found;
-
- if (ma > 500) {
- dev_warn(bdpsy->dev,
- "Requested current limit %u exceed maximum (500mA)\n",
- ma);
- reg = MAX_CURR_LIMIT_SEL;
- goto set;
- }
- if (ma < 5) {
- dev_err(bdpsy->dev,
- "Requested current limit %u smaller than min (5mA)\n",
- ma);
- reg = MIN_CURR_LIMIT_SEL;
- ret = -EINVAL;
- goto set;
- }
-
- ret = linear_range_get_selector_low_array(current_limit_ranges,
- ARRAY_SIZE(current_limit_ranges),
- ma, &reg, &found);
- if (ret) {
- dev_err(bdpsy->dev, "Unsupported current limit %umA\n", ma);
- reg = MIN_CURR_LIMIT_SEL;
- goto set;
- }
- if (!found) {
- /*
- * There was a gap in supported values and we hit it.
- * We found a smaller value from ranges and use it.
- * Warn user though.
- */
- dev_warn(bdpsy->dev, "Unsupported current limit %umA\n", ma);
- }
-
-set:
- tmpret = regmap_update_bits(bdpsy->regmap,
- BD70528_REG_CHG_DCIN_ILIM,
- BD70528_MASK_CHG_DCIN_ILIM, reg);
-
- if (!ret)
- ret = tmpret;
-
- return ret;
-}
-
-static int bd70528_charger_set_property(struct power_supply *psy,
- enum power_supply_property psp,
- const union power_supply_propval *val)
-{
- struct bd70528_psy *bdpsy = power_supply_get_drvdata(psy);
-
- switch (psp) {
- case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
- return set_current_limit(bdpsy, val->intval / 1000);
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- return set_charge_current(bdpsy, val->intval / 1000);
- default:
- break;
- }
- return -EINVAL;
-}
-
-static const struct power_supply_desc bd70528_charger_desc = {
- .name = "bd70528-charger",
- .type = POWER_SUPPLY_TYPE_MAINS,
- .properties = bd70528_charger_props,
- .num_properties = ARRAY_SIZE(bd70528_charger_props),
- .get_property = bd70528_charger_get_property,
- .set_property = bd70528_charger_set_property,
- .property_is_writeable = bd70528_prop_is_writable,
-};
-
-static int bd70528_power_probe(struct platform_device *pdev)
-{
- struct bd70528_psy *bdpsy;
- struct power_supply_config cfg = {};
-
- bdpsy = devm_kzalloc(&pdev->dev, sizeof(*bdpsy), GFP_KERNEL);
- if (!bdpsy)
- return -ENOMEM;
-
- bdpsy->regmap = dev_get_regmap(pdev->dev.parent, NULL);
- if (!bdpsy->regmap) {
- dev_err(&pdev->dev, "No regmap found for chip\n");
- return -EINVAL;
- }
- bdpsy->dev = &pdev->dev;
-
- platform_set_drvdata(pdev, bdpsy);
- cfg.drv_data = bdpsy;
- cfg.of_node = pdev->dev.parent->of_node;
-
- bdpsy->psy = devm_power_supply_register(&pdev->dev,
- &bd70528_charger_desc, &cfg);
- if (IS_ERR(bdpsy->psy)) {
- dev_err(&pdev->dev, "failed: power supply register\n");
- return PTR_ERR(bdpsy->psy);
- }
-
- return bd70528_get_irqs(pdev, bdpsy);
-}
-
-static struct platform_driver bd70528_power = {
- .driver = {
- .name = "bd70528-power"
- },
- .probe = bd70528_power_probe,
-};
-
-module_platform_driver(bd70528_power);
-
-MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("BD70528 power-supply driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:bd70528-power");
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 852e86bfe2fb..35ff0c8fe96f 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -5,11 +5,10 @@
* Author: Mark A. Greer <mgreer@animalcreek.com>
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/of_irq.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/power_supply.h>
#include <linux/power/bq24190_charger.h>
@@ -1959,7 +1958,6 @@ static const struct i2c_device_id bq24190_i2c_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, bq24190_i2c_ids);
-#ifdef CONFIG_OF
static const struct of_device_id bq24190_of_match[] = {
{ .compatible = "ti,bq24190", },
{ .compatible = "ti,bq24192", },
@@ -1968,11 +1966,6 @@ static const struct of_device_id bq24190_of_match[] = {
{ },
};
MODULE_DEVICE_TABLE(of, bq24190_of_match);
-#else
-static const struct of_device_id bq24190_of_match[] = {
- { },
-};
-#endif
static struct i2c_driver bq24190_driver = {
.probe = bq24190_probe,
@@ -1981,7 +1974,7 @@ static struct i2c_driver bq24190_driver = {
.driver = {
.name = "bq24190-charger",
.pm = &bq24190_pm_ops,
- .of_match_table = of_match_ptr(bq24190_of_match),
+ .of_match_table = bq24190_of_match,
},
};
module_i2c_driver(bq24190_driver);
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index 45da870aecca..d67edb760c94 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -1279,6 +1279,7 @@ static const struct of_device_id charger_manager_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, charger_manager_match);
static struct charger_desc *of_cm_parse_desc(struct device *dev)
{
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index a3fc0084cda0..8d62d4241da3 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -667,10 +667,23 @@ static int cpcap_battery_get_property(struct power_supply *psy,
if (!empty->voltage)
return -ENODATA;
val->intval = empty->counter_uah - latest->counter_uah;
- if (val->intval < 0)
+ if (val->intval < 0) {
+ /* Assume invalid config if CHARGE_NOW is -20% */
+ if (ddata->charge_full && abs(val->intval) > ddata->charge_full/5) {
+ empty->voltage = 0;
+ ddata->charge_full = 0;
+ return -ENODATA;
+ }
val->intval = 0;
- else if (ddata->charge_full && ddata->charge_full < val->intval)
+ } else if (ddata->charge_full && ddata->charge_full < val->intval) {
+ /* Assume invalid config if CHARGE_NOW exceeds CHARGE_FULL by 20% */
+ if (val->intval > (6*ddata->charge_full)/5) {
+ empty->voltage = 0;
+ ddata->charge_full = 0;
+ return -ENODATA;
+ }
val->intval = ddata->charge_full;
+ }
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
if (!ddata->charge_full)
@@ -747,7 +760,7 @@ static int cpcap_battery_set_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_FULL:
if (val->intval < 0)
return -EINVAL;
- if (val->intval > ddata->config.info.charge_full_design)
+ if (val->intval > (6*ddata->config.info.charge_full_design)/5)
return -EINVAL;
ddata->charge_full = val->intval;
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index df01abc49ce8..60e0ce105a29 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -173,23 +173,6 @@ static enum power_supply_property cpcap_charger_props[] = {
POWER_SUPPLY_PROP_CURRENT_NOW,
};
-/* No battery always shows temperature of -40000 */
-static bool cpcap_charger_battery_found(struct cpcap_charger_ddata *ddata)
-{
- struct iio_channel *channel;
- int error, temperature;
-
- channel = ddata->channels[CPCAP_CHARGER_IIO_BATTDET];
- error = iio_read_channel_processed(channel, &temperature);
- if (error < 0) {
- dev_warn(ddata->dev, "%s failed: %i\n", __func__, error);
-
- return false;
- }
-
- return temperature > -20000 && temperature < 60000;
-}
-
static int cpcap_charger_get_charge_voltage(struct cpcap_charger_ddata *ddata)
{
struct iio_channel *channel;
@@ -700,11 +683,29 @@ static void cpcap_usb_detect(struct work_struct *work)
if (!ddata->feeding_vbus && cpcap_charger_vbus_valid(ddata) &&
s.chrgcurr1) {
- int max_current = 532000;
+ int max_current;
int vchrg, ichrg;
+ union power_supply_propval val;
+ struct power_supply *battery;
- if (cpcap_charger_battery_found(ddata))
+ battery = power_supply_get_by_name("battery");
+ if (IS_ERR_OR_NULL(battery)) {
+ dev_err(ddata->dev, "battery power_supply not available %li\n",
+ PTR_ERR(battery));
+ return;
+ }
+
+ error = power_supply_get_property(battery, POWER_SUPPLY_PROP_PRESENT, &val);
+ power_supply_put(battery);
+ if (error)
+ goto out_err;
+
+ if (val.intval) {
max_current = 1596000;
+ } else {
+ dev_info(ddata->dev, "battery not inserted, charging disabled\n");
+ max_current = 0;
+ }
if (max_current > ddata->limit_current)
max_current = ddata->limit_current;
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 1aab868adabf..3cea92e28dc3 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/power_supply.h>
#include <linux/of_device.h>
-#include <linux/max17040_battery.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -142,13 +141,10 @@ struct max17040_chip {
struct regmap *regmap;
struct delayed_work work;
struct power_supply *battery;
- struct max17040_platform_data *pdata;
struct chip_data data;
/* battery capacity */
int soc;
- /* State Of Charge */
- int status;
/* Low alert threshold from 32% to 1% of the State of Charge */
u32 low_soc_alert;
/* some devices return twice the capacity */
@@ -221,26 +217,7 @@ static int max17040_get_version(struct max17040_chip *chip)
static int max17040_get_online(struct max17040_chip *chip)
{
- return chip->pdata && chip->pdata->battery_online ?
- chip->pdata->battery_online() : 1;
-}
-
-static int max17040_get_status(struct max17040_chip *chip)
-{
- if (!chip->pdata || !chip->pdata->charger_online
- || !chip->pdata->charger_enable)
- return POWER_SUPPLY_STATUS_UNKNOWN;
-
- if (max17040_get_soc(chip) > MAX17040_BATTERY_FULL)
- return POWER_SUPPLY_STATUS_FULL;
-
- if (chip->pdata->charger_online())
- if (chip->pdata->charger_enable())
- return POWER_SUPPLY_STATUS_CHARGING;
- else
- return POWER_SUPPLY_STATUS_NOT_CHARGING;
- else
- return POWER_SUPPLY_STATUS_DISCHARGING;
+ return 1;
}
static int max17040_get_of_data(struct max17040_chip *chip)
@@ -283,7 +260,6 @@ static int max17040_get_of_data(struct max17040_chip *chip)
static void max17040_check_changes(struct max17040_chip *chip)
{
chip->soc = max17040_get_soc(chip);
- chip->status = max17040_get_status(chip);
}
static void max17040_queue_work(struct max17040_chip *chip)
@@ -302,17 +278,16 @@ static void max17040_stop_work(void *data)
static void max17040_work(struct work_struct *work)
{
struct max17040_chip *chip;
- int last_soc, last_status;
+ int last_soc;
chip = container_of(work, struct max17040_chip, work.work);
- /* store SOC and status to check changes */
+ /* store SOC to check changes */
last_soc = chip->soc;
- last_status = chip->status;
max17040_check_changes(chip);
/* check changes and send uevent */
- if (last_soc != chip->soc || last_status != chip->status)
+ if (last_soc != chip->soc)
power_supply_changed(chip->battery);
max17040_queue_work(chip);
@@ -361,12 +336,10 @@ static irqreturn_t max17040_thread_handler(int id, void *dev)
static int max17040_enable_alert_irq(struct max17040_chip *chip)
{
struct i2c_client *client = chip->client;
- unsigned int flags;
int ret;
- flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
- max17040_thread_handler, flags,
+ max17040_thread_handler, IRQF_ONESHOT,
chip->battery->desc->name, chip);
return ret;
@@ -415,9 +388,6 @@ static int max17040_get_property(struct power_supply *psy,
struct max17040_chip *chip = power_supply_get_drvdata(psy);
switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- val->intval = max17040_get_status(chip);
- break;
case POWER_SUPPLY_PROP_ONLINE:
val->intval = max17040_get_online(chip);
break;
@@ -444,7 +414,6 @@ static const struct regmap_config max17040_regmap = {
};
static enum power_supply_property max17040_battery_props[] = {
- POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CAPACITY,
@@ -480,7 +449,6 @@ static int max17040_probe(struct i2c_client *client,
chip->client = client;
chip->regmap = devm_regmap_init_i2c(client, &max17040_regmap);
- chip->pdata = client->dev.platform_data;
chip_id = (enum chip_id) id->driver_data;
if (client->dev.of_node) {
ret = max17040_get_of_data(chip);
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 1d7326cd8fc6..ce2041b30a06 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -1104,7 +1104,7 @@ static int max17042_probe(struct i2c_client *client,
}
if (client->irq) {
- unsigned int flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
+ unsigned int flags = IRQF_ONESHOT;
/*
* On ACPI systems the IRQ may be handled by ACPI-event code,
diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
deleted file mode 100644
index f86bbbeaff6c..000000000000
--- a/drivers/power/supply/pm2301_charger.c
+++ /dev/null
@@ -1,1249 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2012 ST Ericsson.
- *
- * Power supply driver for ST Ericsson pm2xxx_charger charger
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/power_supply.h>
-#include <linux/regulator/consumer.h>
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/workqueue.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/pm2301_charger.h>
-#include <linux/gpio.h>
-#include <linux/pm_runtime.h>
-#include <linux/pm.h>
-
-#include "ab8500-bm.h"
-#include "ab8500-chargalg.h"
-#include "pm2301_charger.h"
-
-#define to_pm2xxx_charger_ac_device_info(x) container_of((x), \
- struct pm2xxx_charger, ac_chg)
-#define SLEEP_MIN 50
-#define SLEEP_MAX 100
-#define PM2XXX_AUTOSUSPEND_DELAY 500
-
-static int pm2xxx_interrupt_registers[] = {
- PM2XXX_REG_INT1,
- PM2XXX_REG_INT2,
- PM2XXX_REG_INT3,
- PM2XXX_REG_INT4,
- PM2XXX_REG_INT5,
- PM2XXX_REG_INT6,
-};
-
-static enum power_supply_property pm2xxx_charger_ac_props[] = {
- POWER_SUPPLY_PROP_HEALTH,
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_VOLTAGE_AVG,
-};
-
-static int pm2xxx_charger_voltage_map[] = {
- 3500,
- 3525,
- 3550,
- 3575,
- 3600,
- 3625,
- 3650,
- 3675,
- 3700,
- 3725,
- 3750,
- 3775,
- 3800,
- 3825,
- 3850,
- 3875,
- 3900,
- 3925,
- 3950,
- 3975,
- 4000,
- 4025,
- 4050,
- 4075,
- 4100,
- 4125,
- 4150,
- 4175,
- 4200,
- 4225,
- 4250,
- 4275,
- 4300,
-};
-
-static int pm2xxx_charger_current_map[] = {
- 200,
- 200,
- 400,
- 600,
- 800,
- 1000,
- 1200,
- 1400,
- 1600,
- 1800,
- 2000,
- 2200,
- 2400,
- 2600,
- 2800,
- 3000,
-};
-
-static void set_lpn_pin(struct pm2xxx_charger *pm2)
-{
- if (!pm2->ac.charger_connected && gpio_is_valid(pm2->lpn_pin)) {
- gpio_set_value(pm2->lpn_pin, 1);
- usleep_range(SLEEP_MIN, SLEEP_MAX);
- }
-}
-
-static void clear_lpn_pin(struct pm2xxx_charger *pm2)
-{
- if (!pm2->ac.charger_connected && gpio_is_valid(pm2->lpn_pin))
- gpio_set_value(pm2->lpn_pin, 0);
-}
-
-static int pm2xxx_reg_read(struct pm2xxx_charger *pm2, int reg, u8 *val)
-{
- int ret;
-
- /* wake up the device */
- pm_runtime_get_sync(pm2->dev);
-
- ret = i2c_smbus_read_i2c_block_data(pm2->config.pm2xxx_i2c, reg,
- 1, val);
- if (ret < 0)
- dev_err(pm2->dev, "Error reading register at 0x%x\n", reg);
- else
- ret = 0;
-
- pm_runtime_put_sync(pm2->dev);
-
- return ret;
-}
-
-static int pm2xxx_reg_write(struct pm2xxx_charger *pm2, int reg, u8 val)
-{
- int ret;
-
- /* wake up the device */
- pm_runtime_get_sync(pm2->dev);
-
- ret = i2c_smbus_write_i2c_block_data(pm2->config.pm2xxx_i2c, reg,
- 1, &val);
- if (ret < 0)
- dev_err(pm2->dev, "Error writing register at 0x%x\n", reg);
- else
- ret = 0;
-
- pm_runtime_put_sync(pm2->dev);
-
- return ret;
-}
-
-static int pm2xxx_charging_enable_mngt(struct pm2xxx_charger *pm2)
-{
- int ret;
-
- /* Enable charging */
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG2,
- (PM2XXX_CH_AUTO_RESUME_EN | PM2XXX_CHARGER_ENA));
-
- return ret;
-}
-
-static int pm2xxx_charging_disable_mngt(struct pm2xxx_charger *pm2)
-{
- int ret;
-
- /* Disable SW EOC ctrl */
- ret = pm2xxx_reg_write(pm2, PM2XXX_SW_CTRL_REG, PM2XXX_SWCTRL_HW);
- if (ret < 0) {
- dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
- return ret;
- }
-
- /* Disable charging */
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG2,
- (PM2XXX_CH_AUTO_RESUME_DIS | PM2XXX_CHARGER_DIS));
- if (ret < 0) {
- dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
- return ret;
- }
-
- return 0;
-}
-
-static int pm2xxx_charger_batt_therm_mngt(struct pm2xxx_charger *pm2, int val)
-{
- queue_work(pm2->charger_wq, &pm2->check_main_thermal_prot_work);
-
- return 0;
-}
-
-
-static int pm2xxx_charger_die_therm_mngt(struct pm2xxx_charger *pm2, int val)
-{
- queue_work(pm2->charger_wq, &pm2->check_main_thermal_prot_work);
-
- return 0;
-}
-
-static int pm2xxx_charger_ovv_mngt(struct pm2xxx_charger *pm2, int val)
-{
- dev_err(pm2->dev, "Overvoltage detected\n");
- pm2->flags.ovv = true;
- power_supply_changed(pm2->ac_chg.psy);
-
- /* Schedule a new HW failure check */
- queue_delayed_work(pm2->charger_wq, &pm2->check_hw_failure_work, 0);
-
- return 0;
-}
-
-static int pm2xxx_charger_wd_exp_mngt(struct pm2xxx_charger *pm2, int val)
-{
- dev_dbg(pm2->dev , "20 minutes watchdog expired\n");
-
- pm2->ac.wd_expired = true;
- power_supply_changed(pm2->ac_chg.psy);
-
- return 0;
-}
-
-static int pm2xxx_charger_vbat_lsig_mngt(struct pm2xxx_charger *pm2, int val)
-{
- int ret;
-
- switch (val) {
- case PM2XXX_INT1_ITVBATLOWR:
- dev_dbg(pm2->dev, "VBAT grows above VBAT_LOW level\n");
- /* Enable SW EOC ctrl */
- ret = pm2xxx_reg_write(pm2, PM2XXX_SW_CTRL_REG,
- PM2XXX_SWCTRL_SW);
- if (ret < 0) {
- dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
- return ret;
- }
- break;
-
- case PM2XXX_INT1_ITVBATLOWF:
- dev_dbg(pm2->dev, "VBAT drops below VBAT_LOW level\n");
- /* Disable SW EOC ctrl */
- ret = pm2xxx_reg_write(pm2, PM2XXX_SW_CTRL_REG,
- PM2XXX_SWCTRL_HW);
- if (ret < 0) {
- dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
- return ret;
- }
- break;
-
- default:
- dev_err(pm2->dev, "Unknown VBAT level\n");
- }
-
- return 0;
-}
-
-static int pm2xxx_charger_bat_disc_mngt(struct pm2xxx_charger *pm2, int val)
-{
- dev_dbg(pm2->dev, "battery disconnected\n");
-
- return 0;
-}
-
-static int pm2xxx_charger_detection(struct pm2xxx_charger *pm2, u8 *val)
-{
- int ret;
-
- ret = pm2xxx_reg_read(pm2, PM2XXX_SRCE_REG_INT2, val);
-
- if (ret < 0) {
- dev_err(pm2->dev, "Charger detection failed\n");
- goto out;
- }
-
- *val &= (PM2XXX_INT2_S_ITVPWR1PLUG | PM2XXX_INT2_S_ITVPWR2PLUG);
-
-out:
- return ret;
-}
-
-static int pm2xxx_charger_itv_pwr_plug_mngt(struct pm2xxx_charger *pm2, int val)
-{
-
- int ret;
- u8 read_val;
-
- /*
- * Since we can't be sure that the events are received
- * synchronously, we have the check if the main charger is
- * connected by reading the interrupt source register.
- */
- ret = pm2xxx_charger_detection(pm2, &read_val);
-
- if ((ret == 0) && read_val) {
- pm2->ac.charger_connected = 1;
- pm2->ac_conn = true;
- queue_work(pm2->charger_wq, &pm2->ac_work);
- }
-
-
- return ret;
-}
-
-static int pm2xxx_charger_itv_pwr_unplug_mngt(struct pm2xxx_charger *pm2,
- int val)
-{
- pm2->ac.charger_connected = 0;
- queue_work(pm2->charger_wq, &pm2->ac_work);
-
- return 0;
-}
-
-static int pm2_int_reg0(void *pm2_data, int val)
-{
- struct pm2xxx_charger *pm2 = pm2_data;
- int ret = 0;
-
- if (val & PM2XXX_INT1_ITVBATLOWR) {
- ret = pm2xxx_charger_vbat_lsig_mngt(pm2,
- PM2XXX_INT1_ITVBATLOWR);
- if (ret < 0)
- goto out;
- }
-
- if (val & PM2XXX_INT1_ITVBATLOWF) {
- ret = pm2xxx_charger_vbat_lsig_mngt(pm2,
- PM2XXX_INT1_ITVBATLOWF);
- if (ret < 0)
- goto out;
- }
-
- if (val & PM2XXX_INT1_ITVBATDISCONNECT) {
- ret = pm2xxx_charger_bat_disc_mngt(pm2,
- PM2XXX_INT1_ITVBATDISCONNECT);
- if (ret < 0)
- goto out;
- }
-out:
- return ret;
-}
-
-static int pm2_int_reg1(void *pm2_data, int val)
-{
- struct pm2xxx_charger *pm2 = pm2_data;
- int ret = 0;
-
- if (val & (PM2XXX_INT2_ITVPWR1PLUG | PM2XXX_INT2_ITVPWR2PLUG)) {
- dev_dbg(pm2->dev , "Main charger plugged\n");
- ret = pm2xxx_charger_itv_pwr_plug_mngt(pm2, val &
- (PM2XXX_INT2_ITVPWR1PLUG | PM2XXX_INT2_ITVPWR2PLUG));
- }
-
- if (val &
- (PM2XXX_INT2_ITVPWR1UNPLUG | PM2XXX_INT2_ITVPWR2UNPLUG)) {
- dev_dbg(pm2->dev , "Main charger unplugged\n");
- ret = pm2xxx_charger_itv_pwr_unplug_mngt(pm2, val &
- (PM2XXX_INT2_ITVPWR1UNPLUG |
- PM2XXX_INT2_ITVPWR2UNPLUG));
- }
-
- return ret;
-}
-
-static int pm2_int_reg2(void *pm2_data, int val)
-{
- struct pm2xxx_charger *pm2 = pm2_data;
- int ret = 0;
-
- if (val & PM2XXX_INT3_ITAUTOTIMEOUTWD)
- ret = pm2xxx_charger_wd_exp_mngt(pm2, val);
-
- if (val & (PM2XXX_INT3_ITCHPRECHARGEWD |
- PM2XXX_INT3_ITCHCCWD | PM2XXX_INT3_ITCHCVWD)) {
- dev_dbg(pm2->dev,
- "Watchdog occurred for precharge, CC and CV charge\n");
- }
-
- return ret;
-}
-
-static int pm2_int_reg3(void *pm2_data, int val)
-{
- struct pm2xxx_charger *pm2 = pm2_data;
- int ret = 0;
-
- if (val & (PM2XXX_INT4_ITCHARGINGON)) {
- dev_dbg(pm2->dev ,
- "charging operation has started\n");
- }
-
- if (val & (PM2XXX_INT4_ITVRESUME)) {
- dev_dbg(pm2->dev,
- "battery discharged down to VResume threshold\n");
- }
-
- if (val & (PM2XXX_INT4_ITBATTFULL)) {
- dev_dbg(pm2->dev , "battery fully detected\n");
- }
-
- if (val & (PM2XXX_INT4_ITCVPHASE)) {
- dev_dbg(pm2->dev, "CV phase enter with 0.5C charging\n");
- }
-
- if (val & (PM2XXX_INT4_ITVPWR2OVV | PM2XXX_INT4_ITVPWR1OVV)) {
- pm2->failure_case = VPWR_OVV;
- ret = pm2xxx_charger_ovv_mngt(pm2, val &
- (PM2XXX_INT4_ITVPWR2OVV | PM2XXX_INT4_ITVPWR1OVV));
- dev_dbg(pm2->dev, "VPWR/VSYSTEM overvoltage detected\n");
- }
-
- if (val & (PM2XXX_INT4_S_ITBATTEMPCOLD |
- PM2XXX_INT4_S_ITBATTEMPHOT)) {
- ret = pm2xxx_charger_batt_therm_mngt(pm2, val &
- (PM2XXX_INT4_S_ITBATTEMPCOLD |
- PM2XXX_INT4_S_ITBATTEMPHOT));
- dev_dbg(pm2->dev, "BTEMP is too Low/High\n");
- }
-
- return ret;
-}
-
-static int pm2_int_reg4(void *pm2_data, int val)
-{
- struct pm2xxx_charger *pm2 = pm2_data;
- int ret = 0;
-
- if (val & PM2XXX_INT5_ITVSYSTEMOVV) {
- pm2->failure_case = VSYSTEM_OVV;
- ret = pm2xxx_charger_ovv_mngt(pm2, val &
- PM2XXX_INT5_ITVSYSTEMOVV);
- dev_dbg(pm2->dev, "VSYSTEM overvoltage detected\n");
- }
-
- if (val & (PM2XXX_INT5_ITTHERMALWARNINGFALL |
- PM2XXX_INT5_ITTHERMALWARNINGRISE |
- PM2XXX_INT5_ITTHERMALSHUTDOWNFALL |
- PM2XXX_INT5_ITTHERMALSHUTDOWNRISE)) {
- dev_dbg(pm2->dev, "BTEMP die temperature is too Low/High\n");
- ret = pm2xxx_charger_die_therm_mngt(pm2, val &
- (PM2XXX_INT5_ITTHERMALWARNINGFALL |
- PM2XXX_INT5_ITTHERMALWARNINGRISE |
- PM2XXX_INT5_ITTHERMALSHUTDOWNFALL |
- PM2XXX_INT5_ITTHERMALSHUTDOWNRISE));
- }
-
- return ret;
-}
-
-static int pm2_int_reg5(void *pm2_data, int val)
-{
- struct pm2xxx_charger *pm2 = pm2_data;
-
- if (val & (PM2XXX_INT6_ITVPWR2DROP | PM2XXX_INT6_ITVPWR1DROP)) {
- dev_dbg(pm2->dev, "VMPWR drop to VBAT level\n");
- }
-
- if (val & (PM2XXX_INT6_ITVPWR2VALIDRISE |
- PM2XXX_INT6_ITVPWR1VALIDRISE |
- PM2XXX_INT6_ITVPWR2VALIDFALL |
- PM2XXX_INT6_ITVPWR1VALIDFALL)) {
- dev_dbg(pm2->dev, "Falling/Rising edge on WPWR1/2\n");
- }
-
- return 0;
-}
-
-static irqreturn_t pm2xxx_irq_int(int irq, void *data)
-{
- struct pm2xxx_charger *pm2 = data;
- struct pm2xxx_interrupts *interrupt = pm2->pm2_int;
- int i;
-
- /* wake up the device */
- pm_runtime_get_sync(pm2->dev);
-
- do {
- for (i = 0; i < PM2XXX_NUM_INT_REG; i++) {
- pm2xxx_reg_read(pm2,
- pm2xxx_interrupt_registers[i],
- &(interrupt->reg[i]));
-
- if (interrupt->reg[i] > 0)
- interrupt->handler[i](pm2, interrupt->reg[i]);
- }
- } while (gpio_get_value(pm2->pdata->gpio_irq_number) == 0);
-
- pm_runtime_mark_last_busy(pm2->dev);
- pm_runtime_put_autosuspend(pm2->dev);
-
- return IRQ_HANDLED;
-}
-
-static int pm2xxx_charger_get_ac_cv(struct pm2xxx_charger *pm2)
-{
- int ret = 0;
- u8 val;
-
- if (pm2->ac.charger_connected && pm2->ac.charger_online) {
-
- ret = pm2xxx_reg_read(pm2, PM2XXX_SRCE_REG_INT4, &val);
- if (ret < 0) {
- dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
- goto out;
- }
-
- if (val & PM2XXX_INT4_S_ITCVPHASE)
- ret = PM2XXX_CONST_VOLT;
- else
- ret = PM2XXX_CONST_CURR;
- }
-out:
- return ret;
-}
-
-static int pm2xxx_current_to_regval(int curr)
-{
- int i;
-
- if (curr < pm2xxx_charger_current_map[0])
- return 0;
-
- for (i = 1; i < ARRAY_SIZE(pm2xxx_charger_current_map); i++) {
- if (curr < pm2xxx_charger_current_map[i])
- return (i - 1);
- }
-
- i = ARRAY_SIZE(pm2xxx_charger_current_map) - 1;
- if (curr == pm2xxx_charger_current_map[i])
- return i;
- else
- return -EINVAL;
-}
-
-static int pm2xxx_voltage_to_regval(int curr)
-{
- int i;
-
- if (curr < pm2xxx_charger_voltage_map[0])
- return 0;
-
- for (i = 1; i < ARRAY_SIZE(pm2xxx_charger_voltage_map); i++) {
- if (curr < pm2xxx_charger_voltage_map[i])
- return i - 1;
- }
-
- i = ARRAY_SIZE(pm2xxx_charger_voltage_map) - 1;
- if (curr == pm2xxx_charger_voltage_map[i])
- return i;
- else
- return -EINVAL;
-}
-
-static int pm2xxx_charger_update_charger_current(struct ux500_charger *charger,
- int ich_out)
-{
- int ret;
- int curr_index;
- struct pm2xxx_charger *pm2;
- u8 val;
-
- if (charger->psy->desc->type == POWER_SUPPLY_TYPE_MAINS)
- pm2 = to_pm2xxx_charger_ac_device_info(charger);
- else
- return -ENXIO;
-
- curr_index = pm2xxx_current_to_regval(ich_out);
- if (curr_index < 0) {
- dev_err(pm2->dev,
- "Charger current too high, charging not started\n");
- return -ENXIO;
- }
-
- ret = pm2xxx_reg_read(pm2, PM2XXX_BATT_CTRL_REG6, &val);
- if (ret >= 0) {
- val &= ~PM2XXX_DIR_CH_CC_CURRENT_MASK;
- val |= curr_index;
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG6, val);
- if (ret < 0) {
- dev_err(pm2->dev,
- "%s write failed\n", __func__);
- }
- }
- else
- dev_err(pm2->dev, "%s read failed\n", __func__);
-
- return ret;
-}
-
-static int pm2xxx_charger_ac_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct pm2xxx_charger *pm2;
-
- pm2 = to_pm2xxx_charger_ac_device_info(psy_to_ux500_charger(psy));
-
- switch (psp) {
- case POWER_SUPPLY_PROP_HEALTH:
- if (pm2->flags.mainextchnotok)
- val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
- else if (pm2->ac.wd_expired)
- val->intval = POWER_SUPPLY_HEALTH_DEAD;
- else if (pm2->flags.main_thermal_prot)
- val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
- else if (pm2->flags.ovv)
- val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- else
- val->intval = POWER_SUPPLY_HEALTH_GOOD;
- break;
- case POWER_SUPPLY_PROP_ONLINE:
- val->intval = pm2->ac.charger_online;
- break;
- case POWER_SUPPLY_PROP_PRESENT:
- val->intval = pm2->ac.charger_connected;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_AVG:
- pm2->ac.cv_active = pm2xxx_charger_get_ac_cv(pm2);
- val->intval = pm2->ac.cv_active;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int pm2xxx_charging_init(struct pm2xxx_charger *pm2)
-{
- int ret = 0;
-
- /* enable CC and CV watchdog */
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG3,
- (PM2XXX_CH_WD_CV_PHASE_60MIN | PM2XXX_CH_WD_CC_PHASE_60MIN));
- if( ret < 0)
- return ret;
-
- /* enable precharge watchdog */
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG4,
- PM2XXX_CH_WD_PRECH_PHASE_60MIN);
-
- /* Disable auto timeout */
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG5,
- PM2XXX_CH_WD_AUTO_TIMEOUT_20MIN);
-
- /*
- * EOC current level = 100mA
- * Precharge current level = 100mA
- * CC current level = 1000mA
- */
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG6,
- (PM2XXX_DIR_CH_CC_CURRENT_1000MA |
- PM2XXX_CH_PRECH_CURRENT_100MA |
- PM2XXX_CH_EOC_CURRENT_100MA));
-
- /*
- * recharge threshold = 3.8V
- * Precharge to CC threshold = 2.9V
- */
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG7,
- (PM2XXX_CH_PRECH_VOL_2_9 | PM2XXX_CH_VRESUME_VOL_3_8));
-
- /* float voltage charger level = 4.2V */
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG8,
- PM2XXX_CH_VOLT_4_2);
-
- /* Voltage drop between VBAT and VSYS in HW charging = 300mV */
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG9,
- (PM2XXX_CH_150MV_DROP_300MV | PM2XXX_CHARCHING_INFO_DIS |
- PM2XXX_CH_CC_REDUCED_CURRENT_IDENT |
- PM2XXX_CH_CC_MODEDROP_DIS));
-
- /* Input charger level of over voltage = 10V */
- ret = pm2xxx_reg_write(pm2, PM2XXX_INP_VOLT_VPWR2,
- PM2XXX_VPWR2_OVV_10);
- ret = pm2xxx_reg_write(pm2, PM2XXX_INP_VOLT_VPWR1,
- PM2XXX_VPWR1_OVV_10);
-
- /* Input charger drop */
- ret = pm2xxx_reg_write(pm2, PM2XXX_INP_DROP_VPWR2,
- (PM2XXX_VPWR2_HW_OPT_DIS | PM2XXX_VPWR2_VALID_DIS |
- PM2XXX_VPWR2_DROP_DIS));
- ret = pm2xxx_reg_write(pm2, PM2XXX_INP_DROP_VPWR1,
- (PM2XXX_VPWR1_HW_OPT_DIS | PM2XXX_VPWR1_VALID_DIS |
- PM2XXX_VPWR1_DROP_DIS));
-
- /* Disable battery low monitoring */
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_LOW_LEV_COMP_REG,
- PM2XXX_VBAT_LOW_MONITORING_ENA);
-
- return ret;
-}
-
-static int pm2xxx_charger_ac_en(struct ux500_charger *charger,
- int enable, int vset, int iset)
-{
- int ret;
- int volt_index;
- int curr_index;
- u8 val;
-
- struct pm2xxx_charger *pm2 = to_pm2xxx_charger_ac_device_info(charger);
-
- if (enable) {
- if (!pm2->ac.charger_connected) {
- dev_dbg(pm2->dev, "AC charger not connected\n");
- return -ENXIO;
- }
-
- dev_dbg(pm2->dev, "Enable AC: %dmV %dmA\n", vset, iset);
- if (!pm2->vddadc_en_ac) {
- ret = regulator_enable(pm2->regu);
- if (ret)
- dev_warn(pm2->dev,
- "Failed to enable vddadc regulator\n");
- else
- pm2->vddadc_en_ac = true;
- }
-
- ret = pm2xxx_charging_init(pm2);
- if (ret < 0) {
- dev_err(pm2->dev, "%s charging init failed\n",
- __func__);
- goto error_occured;
- }
-
- volt_index = pm2xxx_voltage_to_regval(vset);
- curr_index = pm2xxx_current_to_regval(iset);
-
- if (volt_index < 0 || curr_index < 0) {
- dev_err(pm2->dev,
- "Charger voltage or current too high, "
- "charging not started\n");
- return -ENXIO;
- }
-
- ret = pm2xxx_reg_read(pm2, PM2XXX_BATT_CTRL_REG8, &val);
- if (ret < 0) {
- dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
- goto error_occured;
- }
- val &= ~PM2XXX_CH_VOLT_MASK;
- val |= volt_index;
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG8, val);
- if (ret < 0) {
- dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
- goto error_occured;
- }
-
- ret = pm2xxx_reg_read(pm2, PM2XXX_BATT_CTRL_REG6, &val);
- if (ret < 0) {
- dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
- goto error_occured;
- }
- val &= ~PM2XXX_DIR_CH_CC_CURRENT_MASK;
- val |= curr_index;
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG6, val);
- if (ret < 0) {
- dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
- goto error_occured;
- }
-
- if (!pm2->bat->enable_overshoot) {
- ret = pm2xxx_reg_read(pm2, PM2XXX_LED_CTRL_REG, &val);
- if (ret < 0) {
- dev_err(pm2->dev, "%s pm2xxx read failed\n",
- __func__);
- goto error_occured;
- }
- val |= PM2XXX_ANTI_OVERSHOOT_EN;
- ret = pm2xxx_reg_write(pm2, PM2XXX_LED_CTRL_REG, val);
- if (ret < 0) {
- dev_err(pm2->dev, "%s pm2xxx write failed\n",
- __func__);
- goto error_occured;
- }
- }
-
- ret = pm2xxx_charging_enable_mngt(pm2);
- if (ret < 0) {
- dev_err(pm2->dev, "Failed to enable"
- "pm2xxx ac charger\n");
- goto error_occured;
- }
-
- pm2->ac.charger_online = 1;
- } else {
- pm2->ac.charger_online = 0;
- pm2->ac.wd_expired = false;
-
- /* Disable regulator if enabled */
- if (pm2->vddadc_en_ac) {
- regulator_disable(pm2->regu);
- pm2->vddadc_en_ac = false;
- }
-
- ret = pm2xxx_charging_disable_mngt(pm2);
- if (ret < 0) {
- dev_err(pm2->dev, "failed to disable"
- "pm2xxx ac charger\n");
- goto error_occured;
- }
-
- dev_dbg(pm2->dev, "PM2301: " "Disabled AC charging\n");
- }
- power_supply_changed(pm2->ac_chg.psy);
-
-error_occured:
- return ret;
-}
-
-static int pm2xxx_charger_watchdog_kick(struct ux500_charger *charger)
-{
- int ret;
- struct pm2xxx_charger *pm2;
-
- if (charger->psy->desc->type == POWER_SUPPLY_TYPE_MAINS)
- pm2 = to_pm2xxx_charger_ac_device_info(charger);
- else
- return -ENXIO;
-
- ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_WD_KICK, WD_TIMER);
- if (ret)
- dev_err(pm2->dev, "Failed to kick WD!\n");
-
- return ret;
-}
-
-static void pm2xxx_charger_ac_work(struct work_struct *work)
-{
- struct pm2xxx_charger *pm2 = container_of(work,
- struct pm2xxx_charger, ac_work);
-
-
- power_supply_changed(pm2->ac_chg.psy);
- sysfs_notify(&pm2->ac_chg.psy->dev.kobj, NULL, "present");
-};
-
-static void pm2xxx_charger_check_hw_failure_work(struct work_struct *work)
-{
- u8 reg_value;
-
- struct pm2xxx_charger *pm2 = container_of(work,
- struct pm2xxx_charger, check_hw_failure_work.work);
-
- if (pm2->flags.ovv) {
- pm2xxx_reg_read(pm2, PM2XXX_SRCE_REG_INT4, &reg_value);
-
- if (!(reg_value & (PM2XXX_INT4_S_ITVPWR1OVV |
- PM2XXX_INT4_S_ITVPWR2OVV))) {
- pm2->flags.ovv = false;
- power_supply_changed(pm2->ac_chg.psy);
- }
- }
-
- /* If we still have a failure, schedule a new check */
- if (pm2->flags.ovv) {
- queue_delayed_work(pm2->charger_wq,
- &pm2->check_hw_failure_work, round_jiffies(HZ));
- }
-}
-
-static void pm2xxx_charger_check_main_thermal_prot_work(
- struct work_struct *work)
-{
- int ret;
- u8 val;
-
- struct pm2xxx_charger *pm2 = container_of(work, struct pm2xxx_charger,
- check_main_thermal_prot_work);
-
- /* Check if die temp warning is still active */
- ret = pm2xxx_reg_read(pm2, PM2XXX_SRCE_REG_INT5, &val);
- if (ret < 0) {
- dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
- return;
- }
- if (val & (PM2XXX_INT5_S_ITTHERMALWARNINGRISE
- | PM2XXX_INT5_S_ITTHERMALSHUTDOWNRISE))
- pm2->flags.main_thermal_prot = true;
- else if (val & (PM2XXX_INT5_S_ITTHERMALWARNINGFALL
- | PM2XXX_INT5_S_ITTHERMALSHUTDOWNFALL))
- pm2->flags.main_thermal_prot = false;
-
- power_supply_changed(pm2->ac_chg.psy);
-}
-
-static struct pm2xxx_interrupts pm2xxx_int = {
- .handler[0] = pm2_int_reg0,
- .handler[1] = pm2_int_reg1,
- .handler[2] = pm2_int_reg2,
- .handler[3] = pm2_int_reg3,
- .handler[4] = pm2_int_reg4,
- .handler[5] = pm2_int_reg5,
-};
-
-static struct pm2xxx_irq pm2xxx_charger_irq[] = {
- {"PM2XXX_IRQ_INT", pm2xxx_irq_int},
-};
-
-static int __maybe_unused pm2xxx_wall_charger_resume(struct device *dev)
-{
- struct i2c_client *i2c_client = to_i2c_client(dev);
- struct pm2xxx_charger *pm2;
-
- pm2 = (struct pm2xxx_charger *)i2c_get_clientdata(i2c_client);
- set_lpn_pin(pm2);
-
- /* If we still have a HW failure, schedule a new check */
- if (pm2->flags.ovv)
- queue_delayed_work(pm2->charger_wq,
- &pm2->check_hw_failure_work, 0);
-
- return 0;
-}
-
-static int __maybe_unused pm2xxx_wall_charger_suspend(struct device *dev)
-{
- struct i2c_client *i2c_client = to_i2c_client(dev);
- struct pm2xxx_charger *pm2;
-
- pm2 = (struct pm2xxx_charger *)i2c_get_clientdata(i2c_client);
- clear_lpn_pin(pm2);
-
- /* Cancel any pending HW failure check */
- if (delayed_work_pending(&pm2->check_hw_failure_work))
- cancel_delayed_work(&pm2->check_hw_failure_work);
-
- flush_work(&pm2->ac_work);
- flush_work(&pm2->check_main_thermal_prot_work);
-
- return 0;
-}
-
-static int __maybe_unused pm2xxx_runtime_suspend(struct device *dev)
-{
- struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev);
- struct pm2xxx_charger *pm2;
-
- pm2 = (struct pm2xxx_charger *)i2c_get_clientdata(pm2xxx_i2c_client);
- clear_lpn_pin(pm2);
-
- return 0;
-}
-
-static int __maybe_unused pm2xxx_runtime_resume(struct device *dev)
-{
- struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev);
- struct pm2xxx_charger *pm2;
-
- pm2 = (struct pm2xxx_charger *)i2c_get_clientdata(pm2xxx_i2c_client);
-
- if (gpio_is_valid(pm2->lpn_pin) && gpio_get_value(pm2->lpn_pin) == 0)
- set_lpn_pin(pm2);
-
- return 0;
-}
-
-static const struct dev_pm_ops pm2xxx_pm_ops __maybe_unused = {
- SET_SYSTEM_SLEEP_PM_OPS(pm2xxx_wall_charger_suspend,
- pm2xxx_wall_charger_resume)
- SET_RUNTIME_PM_OPS(pm2xxx_runtime_suspend, pm2xxx_runtime_resume, NULL)
-};
-
-static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
- const struct i2c_device_id *id)
-{
- struct pm2xxx_platform_data *pl_data = i2c_client->dev.platform_data;
- struct power_supply_config psy_cfg = {};
- struct pm2xxx_charger *pm2;
- int ret = 0;
- u8 val;
- int i;
-
- if (!pl_data) {
- dev_err(&i2c_client->dev, "No platform data supplied\n");
- return -EINVAL;
- }
-
- pm2 = kzalloc(sizeof(struct pm2xxx_charger), GFP_KERNEL);
- if (!pm2) {
- dev_err(&i2c_client->dev, "pm2xxx_charger allocation failed\n");
- return -ENOMEM;
- }
-
- /* get parent data */
- pm2->dev = &i2c_client->dev;
-
- pm2->pm2_int = &pm2xxx_int;
-
- /* get charger spcific platform data */
- if (!pl_data->wall_charger) {
- dev_err(pm2->dev, "no charger platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
-
- pm2->pdata = pl_data->wall_charger;
-
- /* get battery specific platform data */
- if (!pl_data->battery) {
- dev_err(pm2->dev, "no battery platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
-
- pm2->bat = pl_data->battery;
-
- if (!i2c_check_functionality(i2c_client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_READ_WORD_DATA)) {
- ret = -ENODEV;
- dev_info(pm2->dev, "pm2301 i2c_check_functionality failed\n");
- goto free_device_info;
- }
-
- pm2->config.pm2xxx_i2c = i2c_client;
- pm2->config.pm2xxx_id = (struct i2c_device_id *) id;
- i2c_set_clientdata(i2c_client, pm2);
-
- /* AC supply */
- /* power_supply base class */
- pm2->ac_chg_desc.name = pm2->pdata->label;
- pm2->ac_chg_desc.type = POWER_SUPPLY_TYPE_MAINS;
- pm2->ac_chg_desc.properties = pm2xxx_charger_ac_props;
- pm2->ac_chg_desc.num_properties = ARRAY_SIZE(pm2xxx_charger_ac_props);
- pm2->ac_chg_desc.get_property = pm2xxx_charger_ac_get_property;
-
- psy_cfg.supplied_to = pm2->pdata->supplied_to;
- psy_cfg.num_supplicants = pm2->pdata->num_supplicants;
- /* pm2xxx_charger sub-class */
- pm2->ac_chg.ops.enable = &pm2xxx_charger_ac_en;
- pm2->ac_chg.ops.kick_wd = &pm2xxx_charger_watchdog_kick;
- pm2->ac_chg.ops.update_curr = &pm2xxx_charger_update_charger_current;
- pm2->ac_chg.max_out_volt = pm2xxx_charger_voltage_map[
- ARRAY_SIZE(pm2xxx_charger_voltage_map) - 1];
- pm2->ac_chg.max_out_curr = pm2xxx_charger_current_map[
- ARRAY_SIZE(pm2xxx_charger_current_map) - 1];
- pm2->ac_chg.wdt_refresh = WD_KICK_INTERVAL;
- pm2->ac_chg.enabled = true;
- pm2->ac_chg.external = true;
-
- /* Create a work queue for the charger */
- pm2->charger_wq = alloc_ordered_workqueue("pm2xxx_charger_wq",
- WQ_MEM_RECLAIM);
- if (pm2->charger_wq == NULL) {
- ret = -ENOMEM;
- dev_err(pm2->dev, "failed to create work queue\n");
- goto free_device_info;
- }
-
- /* Init work for charger detection */
- INIT_WORK(&pm2->ac_work, pm2xxx_charger_ac_work);
-
- /* Init work for checking HW status */
- INIT_WORK(&pm2->check_main_thermal_prot_work,
- pm2xxx_charger_check_main_thermal_prot_work);
-
- /* Init work for HW failure check */
- INIT_DEFERRABLE_WORK(&pm2->check_hw_failure_work,
- pm2xxx_charger_check_hw_failure_work);
-
- /*
- * VDD ADC supply needs to be enabled from this driver when there
- * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
- * interrupts during charging
- */
- pm2->regu = regulator_get(pm2->dev, "vddadc");
- if (IS_ERR(pm2->regu)) {
- ret = PTR_ERR(pm2->regu);
- dev_err(pm2->dev, "failed to get vddadc regulator\n");
- goto free_charger_wq;
- }
-
- /* Register AC charger class */
- pm2->ac_chg.psy = power_supply_register(pm2->dev, &pm2->ac_chg_desc,
- &psy_cfg);
- if (IS_ERR(pm2->ac_chg.psy)) {
- dev_err(pm2->dev, "failed to register AC charger\n");
- ret = PTR_ERR(pm2->ac_chg.psy);
- goto free_regulator;
- }
-
- /* Register interrupts */
- ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number),
- NULL,
- pm2xxx_charger_irq[0].isr,
- pm2->pdata->irq_type | IRQF_ONESHOT,
- pm2xxx_charger_irq[0].name, pm2);
-
- if (ret != 0) {
- dev_err(pm2->dev, "failed to request %s IRQ %d: %d\n",
- pm2xxx_charger_irq[0].name,
- gpio_to_irq(pm2->pdata->gpio_irq_number), ret);
- goto unregister_pm2xxx_charger;
- }
-
- ret = pm_runtime_set_active(pm2->dev);
- if (ret)
- dev_err(pm2->dev, "set active Error\n");
-
- pm_runtime_enable(pm2->dev);
- pm_runtime_set_autosuspend_delay(pm2->dev, PM2XXX_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(pm2->dev);
- pm_runtime_resume(pm2->dev);
-
- /* pm interrupt can wake up system */
- ret = enable_irq_wake(gpio_to_irq(pm2->pdata->gpio_irq_number));
- if (ret) {
- dev_err(pm2->dev, "failed to set irq wake\n");
- goto unregister_pm2xxx_interrupt;
- }
-
- mutex_init(&pm2->lock);
-
- if (gpio_is_valid(pm2->pdata->lpn_gpio)) {
- /* get lpn GPIO from platform data */
- pm2->lpn_pin = pm2->pdata->lpn_gpio;
-
- /*
- * Charger detection mechanism requires pulling up the LPN pin
- * while i2c communication if Charger is not connected
- * LPN pin of PM2301 is GPIO60 of AB9540
- */
- ret = gpio_request(pm2->lpn_pin, "pm2301_lpm_gpio");
-
- if (ret < 0) {
- dev_err(pm2->dev, "pm2301_lpm_gpio request failed\n");
- goto disable_pm2_irq_wake;
- }
- ret = gpio_direction_output(pm2->lpn_pin, 0);
- if (ret < 0) {
- dev_err(pm2->dev, "pm2301_lpm_gpio direction failed\n");
- goto free_gpio;
- }
- set_lpn_pin(pm2);
- }
-
- /* read interrupt registers */
- for (i = 0; i < PM2XXX_NUM_INT_REG; i++)
- pm2xxx_reg_read(pm2,
- pm2xxx_interrupt_registers[i],
- &val);
-
- ret = pm2xxx_charger_detection(pm2, &val);
-
- if ((ret == 0) && val) {
- pm2->ac.charger_connected = 1;
- ab8500_override_turn_on_stat(~AB8500_POW_KEY_1_ON,
- AB8500_MAIN_CH_DET);
- pm2->ac_conn = true;
- power_supply_changed(pm2->ac_chg.psy);
- sysfs_notify(&pm2->ac_chg.psy->dev.kobj, NULL, "present");
- }
-
- return 0;
-
-free_gpio:
- if (gpio_is_valid(pm2->lpn_pin))
- gpio_free(pm2->lpn_pin);
-disable_pm2_irq_wake:
- disable_irq_wake(gpio_to_irq(pm2->pdata->gpio_irq_number));
-unregister_pm2xxx_interrupt:
- /* disable interrupt */
- free_irq(gpio_to_irq(pm2->pdata->gpio_irq_number), pm2);
-unregister_pm2xxx_charger:
- /* unregister power supply */
- power_supply_unregister(pm2->ac_chg.psy);
-free_regulator:
- /* disable the regulator */
- regulator_put(pm2->regu);
-free_charger_wq:
- destroy_workqueue(pm2->charger_wq);
-free_device_info:
- kfree(pm2);
-
- return ret;
-}
-
-static int pm2xxx_wall_charger_remove(struct i2c_client *i2c_client)
-{
- struct pm2xxx_charger *pm2 = i2c_get_clientdata(i2c_client);
-
- /* Disable pm_runtime */
- pm_runtime_disable(pm2->dev);
- /* Disable AC charging */
- pm2xxx_charger_ac_en(&pm2->ac_chg, false, 0, 0);
-
- /* Disable wake by pm interrupt */
- disable_irq_wake(gpio_to_irq(pm2->pdata->gpio_irq_number));
-
- /* Disable interrupts */
- free_irq(gpio_to_irq(pm2->pdata->gpio_irq_number), pm2);
-
- /* Delete the work queue */
- destroy_workqueue(pm2->charger_wq);
-
- flush_scheduled_work();
-
- /* disable the regulator */
- regulator_put(pm2->regu);
-
- power_supply_unregister(pm2->ac_chg.psy);
-
- if (gpio_is_valid(pm2->lpn_pin))
- gpio_free(pm2->lpn_pin);
-
- kfree(pm2);
-
- return 0;
-}
-
-static const struct i2c_device_id pm2xxx_id[] = {
- { "pm2301", 0 },
- { }
-};
-
-MODULE_DEVICE_TABLE(i2c, pm2xxx_id);
-
-static struct i2c_driver pm2xxx_charger_driver = {
- .probe = pm2xxx_wall_charger_probe,
- .remove = pm2xxx_wall_charger_remove,
- .driver = {
- .name = "pm2xxx-wall_charger",
- .pm = IS_ENABLED(CONFIG_PM) ? &pm2xxx_pm_ops : NULL,
- },
- .id_table = pm2xxx_id,
-};
-
-static int __init pm2xxx_charger_init(void)
-{
- return i2c_add_driver(&pm2xxx_charger_driver);
-}
-
-static void __exit pm2xxx_charger_exit(void)
-{
- i2c_del_driver(&pm2xxx_charger_driver);
-}
-
-device_initcall_sync(pm2xxx_charger_init);
-module_exit(pm2xxx_charger_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay");
-MODULE_DESCRIPTION("PM2xxx charger management driver");
diff --git a/drivers/power/supply/rn5t618_power.c b/drivers/power/supply/rn5t618_power.c
index dee520f0fdf5..819061918b2a 100644
--- a/drivers/power/supply/rn5t618_power.c
+++ b/drivers/power/supply/rn5t618_power.c
@@ -37,8 +37,27 @@
#define CHG_STATE_NO_BAT2 13
#define CHG_STATE_CHG_READY_VUSB 14
+#define GCHGDET_TYPE_MASK 0x30
+#define GCHGDET_TYPE_SDP 0x00
+#define GCHGDET_TYPE_CDP 0x10
+#define GCHGDET_TYPE_DCP 0x20
+
#define FG_ENABLE 1
+/*
+ * Formula seems accurate for battery current, but for USB current around 70mA
+ * per step was seen on Kobo Clara HD but all sources show the same formula
+ * also fur USB current. To avoid accidentially unwanted high currents we stick
+ * to that formula
+ */
+#define TO_CUR_REG(x) ((x) / 100000 - 1)
+#define FROM_CUR_REG(x) ((((x) & 0x1f) + 1) * 100000)
+#define CHG_MIN_CUR 100000
+#define CHG_MAX_CUR 1800000
+#define ADP_MAX_CUR 2500000
+#define USB_MAX_CUR 1400000
+
+
struct rn5t618_power_info {
struct rn5t618 *rn5t618;
struct platform_device *pdev;
@@ -48,12 +67,24 @@ struct rn5t618_power_info {
int irq;
};
+static enum power_supply_usb_type rn5t618_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_UNKNOWN
+};
+
static enum power_supply_property rn5t618_usb_props[] = {
+ /* input current limit is not very accurate */
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_USB_TYPE,
POWER_SUPPLY_PROP_ONLINE,
};
static enum power_supply_property rn5t618_adp_props[] = {
+ /* input current limit is not very accurate */
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
};
@@ -69,6 +100,7 @@ static enum power_supply_property rn5t618_battery_props[] = {
POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
};
@@ -258,6 +290,36 @@ static int rn5t618_battery_ttf(struct rn5t618_power_info *info,
return 0;
}
+static int rn5t618_battery_set_current_limit(struct rn5t618_power_info *info,
+ const union power_supply_propval *val)
+{
+ if (val->intval < CHG_MIN_CUR)
+ return -EINVAL;
+
+ if (val->intval >= CHG_MAX_CUR)
+ return -EINVAL;
+
+ return regmap_update_bits(info->rn5t618->regmap,
+ RN5T618_CHGISET,
+ 0x1F, TO_CUR_REG(val->intval));
+}
+
+static int rn5t618_battery_get_current_limit(struct rn5t618_power_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->rn5t618->regmap, RN5T618_CHGISET,
+ &regval);
+ if (ret < 0)
+ return ret;
+
+ val->intval = FROM_CUR_REG(regval);
+
+ return 0;
+}
+
static int rn5t618_battery_charge_full(struct rn5t618_power_info *info,
union power_supply_propval *val)
{
@@ -323,6 +385,9 @@ static int rn5t618_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ ret = rn5t618_battery_get_current_limit(info, val);
+ break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
ret = rn5t618_battery_charge_full(info, val);
break;
@@ -336,12 +401,38 @@ static int rn5t618_battery_get_property(struct power_supply *psy,
return ret;
}
+static int rn5t618_battery_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct rn5t618_power_info *info = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ return rn5t618_battery_set_current_limit(info, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rn5t618_battery_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int rn5t618_adp_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct rn5t618_power_info *info = power_supply_get_drvdata(psy);
unsigned int chgstate;
+ unsigned int regval;
bool online;
int ret;
@@ -365,6 +456,14 @@ static int rn5t618_adp_get_property(struct power_supply *psy,
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = regmap_read(info->rn5t618->regmap,
+ RN5T618_REGISET1, &regval);
+ if (ret < 0)
+ return ret;
+
+ val->intval = FROM_CUR_REG(regval);
+ break;
default:
return -EINVAL;
}
@@ -372,12 +471,79 @@ static int rn5t618_adp_get_property(struct power_supply *psy,
return 0;
}
+static int rn5t618_adp_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct rn5t618_power_info *info = power_supply_get_drvdata(psy);
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ if (val->intval > ADP_MAX_CUR)
+ return -EINVAL;
+
+ if (val->intval < CHG_MIN_CUR)
+ return -EINVAL;
+
+ ret = regmap_write(info->rn5t618->regmap, RN5T618_REGISET1,
+ TO_CUR_REG(val->intval));
+ if (ret < 0)
+ return ret;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rn5t618_adp_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int rc5t619_usb_get_type(struct rn5t618_power_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(info->rn5t618->regmap, RN5T618_GCHGDET, &regval);
+ if (ret < 0)
+ return ret;
+
+ switch (regval & GCHGDET_TYPE_MASK) {
+ case GCHGDET_TYPE_SDP:
+ val->intval = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case GCHGDET_TYPE_CDP:
+ val->intval = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ case GCHGDET_TYPE_DCP:
+ val->intval = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ }
+
+ return 0;
+}
+
static int rn5t618_usb_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct rn5t618_power_info *info = power_supply_get_drvdata(psy);
unsigned int chgstate;
+ unsigned int regval;
bool online;
int ret;
@@ -401,6 +567,28 @@ static int rn5t618_usb_get_property(struct power_supply *psy,
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ if (!online || (info->rn5t618->variant != RC5T619))
+ return -ENODATA;
+
+ return rc5t619_usb_get_type(info, val);
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = regmap_read(info->rn5t618->regmap, RN5T618_CHGCTL1,
+ &regval);
+ if (ret < 0)
+ return ret;
+
+ val->intval = 0;
+ if (regval & 2) {
+ ret = regmap_read(info->rn5t618->regmap,
+ RN5T618_REGISET2,
+ &regval);
+ if (ret < 0)
+ return ret;
+
+ val->intval = FROM_CUR_REG(regval);
+ }
+ break;
default:
return -EINVAL;
}
@@ -408,12 +596,53 @@ static int rn5t618_usb_get_property(struct power_supply *psy,
return 0;
}
+static int rn5t618_usb_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct rn5t618_power_info *info = power_supply_get_drvdata(psy);
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ if (val->intval > USB_MAX_CUR)
+ return -EINVAL;
+
+ if (val->intval < CHG_MIN_CUR)
+ return -EINVAL;
+
+ ret = regmap_write(info->rn5t618->regmap, RN5T618_REGISET2,
+ 0xE0 | TO_CUR_REG(val->intval));
+ if (ret < 0)
+ return ret;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rn5t618_usb_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct power_supply_desc rn5t618_battery_desc = {
.name = "rn5t618-battery",
.type = POWER_SUPPLY_TYPE_BATTERY,
.properties = rn5t618_battery_props,
.num_properties = ARRAY_SIZE(rn5t618_battery_props),
.get_property = rn5t618_battery_get_property,
+ .set_property = rn5t618_battery_set_property,
+ .property_is_writeable = rn5t618_battery_property_is_writeable,
};
static const struct power_supply_desc rn5t618_adp_desc = {
@@ -422,14 +651,20 @@ static const struct power_supply_desc rn5t618_adp_desc = {
.properties = rn5t618_adp_props,
.num_properties = ARRAY_SIZE(rn5t618_adp_props),
.get_property = rn5t618_adp_get_property,
+ .set_property = rn5t618_adp_set_property,
+ .property_is_writeable = rn5t618_adp_property_is_writeable,
};
static const struct power_supply_desc rn5t618_usb_desc = {
.name = "rn5t618-usb",
.type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = rn5t618_usb_types,
+ .num_usb_types = ARRAY_SIZE(rn5t618_usb_types),
.properties = rn5t618_usb_props,
.num_properties = ARRAY_SIZE(rn5t618_usb_props),
.get_property = rn5t618_usb_get_property,
+ .set_property = rn5t618_usb_set_property,
+ .property_is_writeable = rn5t618_usb_property_is_writeable,
};
static irqreturn_t rn5t618_charger_irq(int irq, void *data)
diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c
index f330452341f0..9ad0afe83d1b 100644
--- a/drivers/power/supply/rt5033_battery.c
+++ b/drivers/power/supply/rt5033_battery.c
@@ -164,9 +164,16 @@ static const struct i2c_device_id rt5033_battery_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rt5033_battery_id);
+static const struct of_device_id rt5033_battery_of_match[] = {
+ { .compatible = "richtek,rt5033-battery", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rt5033_battery_of_match);
+
static struct i2c_driver rt5033_battery_driver = {
.driver = {
.name = "rt5033-battery",
+ .of_match_table = rt5033_battery_of_match,
},
.probe = rt5033_battery_probe,
.remove = rt5033_battery_remove,
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index 8d7a10730e43..f84dbaab283a 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -189,6 +189,14 @@ static const enum power_supply_property sbs_properties[] = {
/* Supports special manufacturer commands from TI BQ20Z65 and BQ20Z75 IC. */
#define SBS_FLAGS_TI_BQ20ZX5 BIT(0)
+static const enum power_supply_property string_properties[] = {
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+};
+
+#define NR_STRING_BUFFERS ARRAY_SIZE(string_properties)
+
struct sbs_info {
struct i2c_client *client;
struct power_supply *power_supply;
@@ -202,11 +210,32 @@ struct sbs_info {
struct delayed_work work;
struct mutex mode_lock;
u32 flags;
+ int technology;
+ char strings[NR_STRING_BUFFERS][I2C_SMBUS_BLOCK_MAX + 1];
};
-static char model_name[I2C_SMBUS_BLOCK_MAX + 1];
-static char manufacturer[I2C_SMBUS_BLOCK_MAX + 1];
-static char chemistry[I2C_SMBUS_BLOCK_MAX + 1];
+static char *sbs_get_string_buf(struct sbs_info *chip,
+ enum power_supply_property psp)
+{
+ int i = 0;
+
+ for (i = 0; i < NR_STRING_BUFFERS; i++)
+ if (string_properties[i] == psp)
+ return chip->strings[i];
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void sbs_invalidate_cached_props(struct sbs_info *chip)
+{
+ int i = 0;
+
+ chip->technology = -1;
+
+ for (i = 0; i < NR_STRING_BUFFERS; i++)
+ chip->strings[i][0] = 0;
+}
+
static bool force_load;
static int sbs_read_word_data(struct i2c_client *client, u8 address);
@@ -244,6 +273,7 @@ static int sbs_update_presence(struct sbs_info *chip, bool is_present)
chip->is_present = false;
/* Disable PEC when no device is present */
client->flags &= ~I2C_CLIENT_PEC;
+ sbs_invalidate_cached_props(chip);
return 0;
}
@@ -640,17 +670,45 @@ static int sbs_get_battery_property(struct i2c_client *client,
return 0;
}
-static int sbs_get_battery_string_property(struct i2c_client *client,
- int reg_offset, enum power_supply_property psp, char *val)
+static int sbs_get_property_index(struct i2c_client *client,
+ enum power_supply_property psp)
{
- s32 ret;
+ int count;
- ret = sbs_read_string_data(client, sbs_data[reg_offset].addr, val);
+ for (count = 0; count < ARRAY_SIZE(sbs_data); count++)
+ if (psp == sbs_data[count].psp)
+ return count;
- if (ret < 0)
- return ret;
+ dev_warn(&client->dev,
+ "%s: Invalid Property - %d\n", __func__, psp);
- return 0;
+ return -EINVAL;
+}
+
+static const char *sbs_get_constant_string(struct sbs_info *chip,
+ enum power_supply_property psp)
+{
+ int ret;
+ char *buf;
+ u8 addr;
+
+ buf = sbs_get_string_buf(chip, psp);
+ if (IS_ERR(buf))
+ return buf;
+
+ if (!buf[0]) {
+ ret = sbs_get_property_index(chip->client, psp);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ addr = sbs_data[ret].addr;
+
+ ret = sbs_read_string_data(chip->client, addr, buf);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+
+ return buf;
}
static void sbs_unit_adjustment(struct i2c_client *client,
@@ -773,48 +831,36 @@ static int sbs_get_battery_serial_number(struct i2c_client *client,
return 0;
}
-static int sbs_get_property_index(struct i2c_client *client,
- enum power_supply_property psp)
-{
- int count;
- for (count = 0; count < ARRAY_SIZE(sbs_data); count++)
- if (psp == sbs_data[count].psp)
- return count;
-
- dev_warn(&client->dev,
- "%s: Invalid Property - %d\n", __func__, psp);
-
- return -EINVAL;
-}
-
-static int sbs_get_chemistry(struct i2c_client *client,
+static int sbs_get_chemistry(struct sbs_info *chip,
union power_supply_propval *val)
{
- enum power_supply_property psp = POWER_SUPPLY_PROP_TECHNOLOGY;
- int ret;
+ const char *chemistry;
- ret = sbs_get_property_index(client, psp);
- if (ret < 0)
- return ret;
+ if (chip->technology != -1) {
+ val->intval = chip->technology;
+ return 0;
+ }
- ret = sbs_get_battery_string_property(client, ret, psp,
- chemistry);
- if (ret < 0)
- return ret;
+ chemistry = sbs_get_constant_string(chip, POWER_SUPPLY_PROP_TECHNOLOGY);
+
+ if (IS_ERR(chemistry))
+ return PTR_ERR(chemistry);
if (!strncasecmp(chemistry, "LION", 4))
- val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ chip->technology = POWER_SUPPLY_TECHNOLOGY_LION;
else if (!strncasecmp(chemistry, "LiP", 3))
- val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO;
+ chip->technology = POWER_SUPPLY_TECHNOLOGY_LIPO;
else if (!strncasecmp(chemistry, "NiCd", 4))
- val->intval = POWER_SUPPLY_TECHNOLOGY_NiCd;
+ chip->technology = POWER_SUPPLY_TECHNOLOGY_NiCd;
else if (!strncasecmp(chemistry, "NiMH", 4))
- val->intval = POWER_SUPPLY_TECHNOLOGY_NiMH;
+ chip->technology = POWER_SUPPLY_TECHNOLOGY_NiMH;
else
- val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+ chip->technology = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+
+ if (chip->technology == POWER_SUPPLY_TECHNOLOGY_UNKNOWN)
+ dev_warn(&chip->client->dev, "Unknown chemistry: %s\n", chemistry);
- if (val->intval == POWER_SUPPLY_TECHNOLOGY_UNKNOWN)
- dev_warn(&client->dev, "Unknown chemistry: %s\n", chemistry);
+ val->intval = chip->technology;
return 0;
}
@@ -858,6 +904,7 @@ static int sbs_get_property(struct power_supply *psy,
int ret = 0;
struct sbs_info *chip = power_supply_get_drvdata(psy);
struct i2c_client *client = chip->client;
+ const char *str;
if (chip->gpio_detect) {
ret = gpiod_get_value_cansleep(chip->gpio_detect);
@@ -883,7 +930,7 @@ static int sbs_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
- ret = sbs_get_chemistry(client, val);
+ ret = sbs_get_chemistry(chip, val);
if (ret < 0)
break;
@@ -935,23 +982,12 @@ static int sbs_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
- ret = sbs_get_property_index(client, psp);
- if (ret < 0)
- break;
-
- ret = sbs_get_battery_string_property(client, ret, psp,
- model_name);
- val->strval = model_name;
- break;
-
case POWER_SUPPLY_PROP_MANUFACTURER:
- ret = sbs_get_property_index(client, psp);
- if (ret < 0)
- break;
-
- ret = sbs_get_battery_string_property(client, ret, psp,
- manufacturer);
- val->strval = manufacturer;
+ str = sbs_get_constant_string(chip, psp);
+ if (IS_ERR(str))
+ ret = PTR_ERR(str);
+ else
+ val->strval = str;
break;
case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
@@ -1098,6 +1134,7 @@ static int sbs_probe(struct i2c_client *client)
psy_cfg.of_node = client->dev.of_node;
psy_cfg.drv_data = chip;
chip->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
+ sbs_invalidate_cached_props(chip);
mutex_init(&chip->mode_lock);
/* use pdata if available, fall back to DT properties,
diff --git a/drivers/power/supply/sc2731_charger.c b/drivers/power/supply/sc2731_charger.c
index 335cb857ef30..288b79836c13 100644
--- a/drivers/power/supply/sc2731_charger.c
+++ b/drivers/power/supply/sc2731_charger.c
@@ -524,6 +524,7 @@ static const struct of_device_id sc2731_charger_of_match[] = {
{ .compatible = "sprd,sc2731-charger", },
{ }
};
+MODULE_DEVICE_TABLE(of, sc2731_charger_of_match);
static struct platform_driver sc2731_charger_driver = {
.driver = {
diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
index 9c627618c224..1ae8374e1ceb 100644
--- a/drivers/power/supply/sc27xx_fuel_gauge.c
+++ b/drivers/power/supply/sc27xx_fuel_gauge.c
@@ -1342,6 +1342,7 @@ static const struct of_device_id sc27xx_fgu_of_match[] = {
{ .compatible = "sprd,sc2731-fgu", },
{ }
};
+MODULE_DEVICE_TABLE(of, sc27xx_fgu_of_match);
static struct platform_driver sc27xx_fgu_driver = {
.probe = sc27xx_fgu_probe,
diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
index 3376f42d46c3..df240420f2de 100644
--- a/drivers/power/supply/smb347-charger.c
+++ b/drivers/power/supply/smb347-charger.c
@@ -10,7 +10,6 @@
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/power/supply/surface_battery.c b/drivers/power/supply/surface_battery.c
index 7efa431a62b2..5ec2e6bb2465 100644
--- a/drivers/power/supply/surface_battery.c
+++ b/drivers/power/supply/surface_battery.c
@@ -345,6 +345,16 @@ static u32 spwr_notify_bat(struct ssam_event_notifier *nf, const struct ssam_eve
struct spwr_battery_device *bat = container_of(nf, struct spwr_battery_device, notif);
int status;
+ /*
+ * We cannot use strict matching when registering the notifier as the
+ * EC expects us to register it against instance ID 0. Strict matching
+ * would thus drop events, as those may have non-zero instance IDs in
+ * this subsystem. So we need to check the instance ID of the event
+ * here manually.
+ */
+ if (event->instance_id != bat->sdev->uid.instance)
+ return 0;
+
dev_dbg(&bat->sdev->dev, "power event (cid = %#04x, iid = %#04x, tid = %#04x)\n",
event->command_id, event->instance_id, event->target_id);
@@ -720,8 +730,8 @@ static void spwr_battery_init(struct spwr_battery_device *bat, struct ssam_devic
bat->notif.base.fn = spwr_notify_bat;
bat->notif.event.reg = registry;
bat->notif.event.id.target_category = sdev->uid.category;
- bat->notif.event.id.instance = 0;
- bat->notif.event.mask = SSAM_EVENT_MASK_STRICT;
+ bat->notif.event.id.instance = 0; /* need to register with instance 0 */
+ bat->notif.event.mask = SSAM_EVENT_MASK_TARGET;
bat->notif.event.flags = SSAM_EVENT_SEQUENCED;
bat->psy_desc.name = bat->name;
diff --git a/drivers/power/supply/surface_charger.c b/drivers/power/supply/surface_charger.c
index 81a5b79822c9..a060c36c7766 100644
--- a/drivers/power/supply/surface_charger.c
+++ b/drivers/power/supply/surface_charger.c
@@ -66,7 +66,7 @@ struct spwr_ac_device {
static int spwr_ac_update_unlocked(struct spwr_ac_device *ac)
{
- u32 old = ac->state;
+ __le32 old = ac->state;
int status;
lockdep_assert_held(&ac->lock);
diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
index bf26cc56b863..d73c4c2ed4e1 100644
--- a/drivers/pps/clients/pps-ldisc.c
+++ b/drivers/pps/clients/pps-ldisc.c
@@ -112,12 +112,13 @@ static int __init pps_tty_init(void)
/* Init PPS_TTY data */
pps_ldisc_ops.owner = THIS_MODULE;
+ pps_ldisc_ops.num = N_PPS;
pps_ldisc_ops.name = "pps_tty";
pps_ldisc_ops.dcd_change = pps_tty_dcd_change;
pps_ldisc_ops.open = pps_tty_open;
pps_ldisc_ops.close = pps_tty_close;
- err = tty_register_ldisc(N_PPS, &pps_ldisc_ops);
+ err = tty_register_ldisc(&pps_ldisc_ops);
if (err)
pr_err("can't register PPS line discipline\n");
else
@@ -128,13 +129,7 @@ static int __init pps_tty_init(void)
static void __exit pps_tty_cleanup(void)
{
- int err;
-
- err = tty_unregister_ldisc(N_PPS);
- if (err)
- pr_err("can't unregister PPS line discipline\n");
- else
- pr_info("PPS line discipline removed\n");
+ tty_unregister_ldisc(&pps_ldisc_ops);
}
module_init(pps_tty_init);
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index e34ae6a442c7..6328abd51ffa 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -358,7 +358,7 @@ static int ps3_vuart_raw_write(struct ps3_system_bus_device *dev,
ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_written);
if (result) {
- dev_dbg(&dev->core, "%s:%d: lv1_write_virtual_uart failed: "
+ dev_warn(&dev->core, "%s:%d: lv1_write_virtual_uart failed: "
"%s\n", __func__, __LINE__, ps3_result(result));
return result;
}
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index 9d66257e1da5..516e6d14d32e 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -217,9 +217,9 @@ static int ps3av_send_cmd_pkt(const struct ps3av_send_hdr *send_buf,
/* send pkt */
res = ps3av_vuart_write(ps3av->dev, send_buf, write_len);
if (res < 0) {
- dev_dbg(&ps3av->dev->core,
- "%s: ps3av_vuart_write() failed (result=%d)\n",
- __func__, res);
+ dev_warn(&ps3av->dev->core,
+ "%s:%d: ps3av_vuart_write() failed: %s\n", __func__,
+ __LINE__, ps3_result(res));
return res;
}
@@ -230,9 +230,9 @@ static int ps3av_send_cmd_pkt(const struct ps3av_send_hdr *send_buf,
res = ps3av_vuart_read(ps3av->dev, recv_buf, PS3AV_HDR_SIZE,
timeout);
if (res != PS3AV_HDR_SIZE) {
- dev_dbg(&ps3av->dev->core,
- "%s: ps3av_vuart_read() failed (result=%d)\n",
- __func__, res);
+ dev_warn(&ps3av->dev->core,
+ "%s:%d: ps3av_vuart_read() failed: %s\n", __func__,
+ __LINE__, ps3_result(res));
return res;
}
@@ -240,9 +240,9 @@ static int ps3av_send_cmd_pkt(const struct ps3av_send_hdr *send_buf,
res = ps3av_vuart_read(ps3av->dev, &recv_buf->cid,
recv_buf->size, timeout);
if (res < 0) {
- dev_dbg(&ps3av->dev->core,
- "%s: ps3av_vuart_read() failed (result=%d)\n",
- __func__, res);
+ dev_warn(&ps3av->dev->core,
+ "%s:%d: ps3av_vuart_read() failed: %s\n", __func__,
+ __LINE__, ps3_result(res));
return res;
}
res += PS3AV_HDR_SIZE; /* total len */
@@ -251,8 +251,8 @@ static int ps3av_send_cmd_pkt(const struct ps3av_send_hdr *send_buf,
} while (event);
if ((cmd | PS3AV_REPLY_BIT) != recv_buf->cid) {
- dev_dbg(&ps3av->dev->core, "%s: reply err (result=%x)\n",
- __func__, recv_buf->cid);
+ dev_warn(&ps3av->dev->core, "%s:%d: reply err: %x\n", __func__,
+ __LINE__, recv_buf->cid);
return -EINVAL;
}
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 8c20e524e9ad..e085c255da0c 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -90,7 +90,8 @@ config PTP_1588_CLOCK_INES
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86_32 || COMPILE_TEST
- depends on HAS_IOMEM && NET
+ depends on HAS_IOMEM && PCI
+ depends on NET
imply PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 8673d1743faa..28a6fe342d3e 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -3,7 +3,7 @@
# Makefile for PTP 1588 clock support.
#
-ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
+ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o ptp_vclock.o
ptp_kvm-$(CONFIG_X86) := ptp_kvm_x86.o ptp_kvm_common.o
ptp_kvm-$(CONFIG_HAVE_ARM_SMCCC) := ptp_kvm_arm.o ptp_kvm_common.o
obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index a23a37a4d5dc..4dfc52e06704 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -24,10 +24,11 @@
#define PTP_PPS_EVENT PPS_CAPTUREASSERT
#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
+struct class *ptp_class;
+
/* private globals */
static dev_t ptp_devt;
-static struct class *ptp_class;
static DEFINE_IDA(ptp_clocks_map);
@@ -76,6 +77,11 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ if (ptp_vclock_in_use(ptp)) {
+ pr_err("ptp: virtual clock in use\n");
+ return -EBUSY;
+ }
+
return ptp->info->settime64(ptp->info, tp);
}
@@ -97,6 +103,11 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
struct ptp_clock_info *ops;
int err = -EOPNOTSUPP;
+ if (ptp_vclock_in_use(ptp)) {
+ pr_err("ptp: virtual clock in use\n");
+ return -EBUSY;
+ }
+
ops = ptp->info;
if (tx->modes & ADJ_SETOFFSET) {
@@ -161,6 +172,7 @@ static void ptp_clock_release(struct device *dev)
ptp_cleanup_pin_groups(ptp);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
+ mutex_destroy(&ptp->n_vclocks_mux);
ida_simple_remove(&ptp_clocks_map, ptp->index);
kfree(ptp);
}
@@ -185,6 +197,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
{
struct ptp_clock *ptp;
int err = 0, index, major = MAJOR(ptp_devt);
+ size_t size;
if (info->n_alarm > PTP_MAX_ALARMS)
return ERR_PTR(-EINVAL);
@@ -208,6 +221,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
spin_lock_init(&ptp->tsevq.lock);
mutex_init(&ptp->tsevq_mux);
mutex_init(&ptp->pincfg_mux);
+ mutex_init(&ptp->n_vclocks_mux);
init_waitqueue_head(&ptp->tsev_wq);
if (ptp->info->do_aux_work) {
@@ -218,7 +232,22 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
pr_err("failed to create ptp aux_worker %d\n", err);
goto kworker_err;
}
- ptp->pps_source->lookup_cookie = ptp;
+ }
+
+ /* PTP virtual clock is being registered under physical clock */
+ if (parent && parent->class && parent->class->name &&
+ strcmp(parent->class->name, "ptp") == 0)
+ ptp->is_virtual_clock = true;
+
+ if (!ptp->is_virtual_clock) {
+ ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
+
+ size = sizeof(int) * ptp->max_vclocks;
+ ptp->vclock_index = kzalloc(size, GFP_KERNEL);
+ if (!ptp->vclock_index) {
+ err = -ENOMEM;
+ goto no_mem_for_vclocks;
+ }
}
err = ptp_populate_pin_groups(ptp);
@@ -238,6 +267,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
pr_err("failed to register pps source\n");
goto no_pps;
}
+ ptp->pps_source->lookup_cookie = ptp;
}
/* Initialize a new device of our class in our clock structure. */
@@ -265,11 +295,14 @@ no_clock:
no_pps:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
+ kfree(ptp->vclock_index);
+no_mem_for_vclocks:
if (ptp->kworker)
kthread_destroy_worker(ptp->kworker);
kworker_err:
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
+ mutex_destroy(&ptp->n_vclocks_mux);
ida_simple_remove(&ptp_clocks_map, index);
no_slot:
kfree(ptp);
@@ -280,9 +313,16 @@ EXPORT_SYMBOL(ptp_clock_register);
int ptp_clock_unregister(struct ptp_clock *ptp)
{
+ if (ptp_vclock_in_use(ptp)) {
+ pr_err("ptp: virtual clock in use\n");
+ return -EBUSY;
+ }
+
ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq);
+ kfree(ptp->vclock_index);
+
if (ptp->kworker) {
kthread_cancel_delayed_work_sync(&ptp->aux_work);
kthread_destroy_worker(ptp->kworker);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 6b97155148f1..dba6be477067 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -18,6 +18,7 @@
#define PTP_MAX_TIMESTAMPS 128
#define PTP_BUF_TIMESTAMPS 30
+#define PTP_DEFAULT_MAX_VCLOCKS 20
struct timestamp_event_queue {
struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS];
@@ -46,6 +47,24 @@ struct ptp_clock {
const struct attribute_group *pin_attr_groups[2];
struct kthread_worker *kworker;
struct kthread_delayed_work aux_work;
+ unsigned int max_vclocks;
+ unsigned int n_vclocks;
+ int *vclock_index;
+ struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */
+ bool is_virtual_clock;
+};
+
+#define info_to_vclock(d) container_of((d), struct ptp_vclock, info)
+#define cc_to_vclock(d) container_of((d), struct ptp_vclock, cc)
+#define dw_to_vclock(d) container_of((d), struct ptp_vclock, refresh_work)
+
+struct ptp_vclock {
+ struct ptp_clock *pclock;
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ spinlock_t lock; /* protects tc/cc */
};
/*
@@ -61,6 +80,24 @@ static inline int queue_cnt(struct timestamp_event_queue *q)
return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
}
+/* Check if ptp virtual clock is in use */
+static inline bool ptp_vclock_in_use(struct ptp_clock *ptp)
+{
+ bool in_use = false;
+
+ if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+ return true;
+
+ if (!ptp->is_virtual_clock && ptp->n_vclocks)
+ in_use = true;
+
+ mutex_unlock(&ptp->n_vclocks_mux);
+
+ return in_use;
+}
+
+extern struct class *ptp_class;
+
/*
* see ptp_chardev.c
*/
@@ -89,4 +126,6 @@ extern const struct attribute_group *ptp_groups[];
int ptp_populate_pin_groups(struct ptp_clock *ptp);
void ptp_cleanup_pin_groups(struct ptp_clock *ptp);
+struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock);
+void ptp_vclock_unregister(struct ptp_vclock *vclock);
#endif
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index be076a91e20e..41b92dc2f011 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -3,6 +3,7 @@
* PTP 1588 clock support - sysfs interface.
*
* Copyright (C) 2010 OMICRON electronics GmbH
+ * Copyright 2021 NXP
*/
#include <linux/capability.h>
#include <linux/slab.h>
@@ -148,6 +149,159 @@ out:
}
static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
+static int unregister_vclock(struct device *dev, void *data)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ struct ptp_clock_info *info = ptp->info;
+ struct ptp_vclock *vclock;
+ u32 *num = data;
+
+ vclock = info_to_vclock(info);
+ dev_info(dev->parent, "delete virtual clock ptp%d\n",
+ vclock->clock->index);
+
+ ptp_vclock_unregister(vclock);
+ (*num)--;
+
+ /* For break. Not error. */
+ if (*num == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t n_vclocks_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ ssize_t size;
+
+ if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+ return -ERESTARTSYS;
+
+ size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->n_vclocks);
+
+ mutex_unlock(&ptp->n_vclocks_mux);
+
+ return size;
+}
+
+static ssize_t n_vclocks_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ struct ptp_vclock *vclock;
+ int err = -EINVAL;
+ u32 num, i;
+
+ if (kstrtou32(buf, 0, &num))
+ return err;
+
+ if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+ return -ERESTARTSYS;
+
+ if (num > ptp->max_vclocks) {
+ dev_err(dev, "max value is %d\n", ptp->max_vclocks);
+ goto out;
+ }
+
+ /* Need to create more vclocks */
+ if (num > ptp->n_vclocks) {
+ for (i = 0; i < num - ptp->n_vclocks; i++) {
+ vclock = ptp_vclock_register(ptp);
+ if (!vclock)
+ goto out;
+
+ *(ptp->vclock_index + ptp->n_vclocks + i) =
+ vclock->clock->index;
+
+ dev_info(dev, "new virtual clock ptp%d\n",
+ vclock->clock->index);
+ }
+ }
+
+ /* Need to delete vclocks */
+ if (num < ptp->n_vclocks) {
+ i = ptp->n_vclocks - num;
+ device_for_each_child_reverse(dev, &i,
+ unregister_vclock);
+
+ for (i = 1; i <= ptp->n_vclocks - num; i++)
+ *(ptp->vclock_index + ptp->n_vclocks - i) = -1;
+ }
+
+ if (num == 0)
+ dev_info(dev, "only physical clock in use now\n");
+ else
+ dev_info(dev, "guarantee physical clock free running\n");
+
+ ptp->n_vclocks = num;
+ mutex_unlock(&ptp->n_vclocks_mux);
+
+ return count;
+out:
+ mutex_unlock(&ptp->n_vclocks_mux);
+ return err;
+}
+static DEVICE_ATTR_RW(n_vclocks);
+
+static ssize_t max_vclocks_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ ssize_t size;
+
+ size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->max_vclocks);
+
+ return size;
+}
+
+static ssize_t max_vclocks_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ unsigned int *vclock_index;
+ int err = -EINVAL;
+ size_t size;
+ u32 max;
+
+ if (kstrtou32(buf, 0, &max) || max == 0)
+ return -EINVAL;
+
+ if (max == ptp->max_vclocks)
+ return count;
+
+ if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+ return -ERESTARTSYS;
+
+ if (max < ptp->n_vclocks)
+ goto out;
+
+ size = sizeof(int) * max;
+ vclock_index = kzalloc(size, GFP_KERNEL);
+ if (!vclock_index) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ size = sizeof(int) * ptp->n_vclocks;
+ memcpy(vclock_index, ptp->vclock_index, size);
+
+ kfree(ptp->vclock_index);
+ ptp->vclock_index = vclock_index;
+ ptp->max_vclocks = max;
+
+ mutex_unlock(&ptp->n_vclocks_mux);
+
+ return count;
+out:
+ mutex_unlock(&ptp->n_vclocks_mux);
+ return err;
+}
+static DEVICE_ATTR_RW(max_vclocks);
+
static struct attribute *ptp_attrs[] = {
&dev_attr_clock_name.attr,
@@ -162,6 +316,8 @@ static struct attribute *ptp_attrs[] = {
&dev_attr_fifo.attr,
&dev_attr_period.attr,
&dev_attr_pps_enable.attr,
+ &dev_attr_n_vclocks.attr,
+ &dev_attr_max_vclocks.attr,
NULL
};
@@ -183,6 +339,10 @@ static umode_t ptp_is_attribute_visible(struct kobject *kobj,
} else if (attr == &dev_attr_pps_enable.attr) {
if (!info->pps)
mode = 0;
+ } else if (attr == &dev_attr_n_vclocks.attr ||
+ attr == &dev_attr_max_vclocks.attr) {
+ if (ptp->is_virtual_clock)
+ mode = 0;
}
return mode;
diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
new file mode 100644
index 000000000000..e0f87c57749a
--- /dev/null
+++ b/drivers/ptp/ptp_vclock.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PTP virtual clock driver
+ *
+ * Copyright 2021 NXP
+ */
+#include <linux/slab.h>
+#include "ptp_private.h"
+
+#define PTP_VCLOCK_CC_SHIFT 31
+#define PTP_VCLOCK_CC_MULT (1 << PTP_VCLOCK_CC_SHIFT)
+#define PTP_VCLOCK_FADJ_SHIFT 9
+#define PTP_VCLOCK_FADJ_DENOMINATOR 15625ULL
+#define PTP_VCLOCK_REFRESH_INTERVAL (HZ * 2)
+
+static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ptp_vclock *vclock = info_to_vclock(ptp);
+ unsigned long flags;
+ s64 adj;
+
+ adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT;
+ adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR);
+
+ spin_lock_irqsave(&vclock->lock, flags);
+ timecounter_read(&vclock->tc);
+ vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj;
+ spin_unlock_irqrestore(&vclock->lock, flags);
+
+ return 0;
+}
+
+static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct ptp_vclock *vclock = info_to_vclock(ptp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vclock->lock, flags);
+ timecounter_adjtime(&vclock->tc, delta);
+ spin_unlock_irqrestore(&vclock->lock, flags);
+
+ return 0;
+}
+
+static int ptp_vclock_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct ptp_vclock *vclock = info_to_vclock(ptp);
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&vclock->lock, flags);
+ ns = timecounter_read(&vclock->tc);
+ spin_unlock_irqrestore(&vclock->lock, flags);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int ptp_vclock_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ptp_vclock *vclock = info_to_vclock(ptp);
+ u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vclock->lock, flags);
+ timecounter_init(&vclock->tc, &vclock->cc, ns);
+ spin_unlock_irqrestore(&vclock->lock, flags);
+
+ return 0;
+}
+
+static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
+{
+ struct ptp_vclock *vclock = info_to_vclock(ptp);
+ struct timespec64 ts;
+
+ ptp_vclock_gettime(&vclock->info, &ts);
+
+ return PTP_VCLOCK_REFRESH_INTERVAL;
+}
+
+static const struct ptp_clock_info ptp_vclock_info = {
+ .owner = THIS_MODULE,
+ .name = "ptp virtual clock",
+ /* The maximum ppb value that long scaled_ppm can support */
+ .max_adj = 32767999,
+ .adjfine = ptp_vclock_adjfine,
+ .adjtime = ptp_vclock_adjtime,
+ .gettime64 = ptp_vclock_gettime,
+ .settime64 = ptp_vclock_settime,
+ .do_aux_work = ptp_vclock_refresh,
+};
+
+static u64 ptp_vclock_read(const struct cyclecounter *cc)
+{
+ struct ptp_vclock *vclock = cc_to_vclock(cc);
+ struct ptp_clock *ptp = vclock->pclock;
+ struct timespec64 ts = {};
+
+ if (ptp->info->gettimex64)
+ ptp->info->gettimex64(ptp->info, &ts, NULL);
+ else
+ ptp->info->gettime64(ptp->info, &ts);
+
+ return timespec64_to_ns(&ts);
+}
+
+static const struct cyclecounter ptp_vclock_cc = {
+ .read = ptp_vclock_read,
+ .mask = CYCLECOUNTER_MASK(32),
+ .mult = PTP_VCLOCK_CC_MULT,
+ .shift = PTP_VCLOCK_CC_SHIFT,
+};
+
+struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
+{
+ struct ptp_vclock *vclock;
+
+ vclock = kzalloc(sizeof(*vclock), GFP_KERNEL);
+ if (!vclock)
+ return NULL;
+
+ vclock->pclock = pclock;
+ vclock->info = ptp_vclock_info;
+ vclock->cc = ptp_vclock_cc;
+
+ snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt",
+ pclock->index);
+
+ spin_lock_init(&vclock->lock);
+
+ vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev);
+ if (IS_ERR_OR_NULL(vclock->clock)) {
+ kfree(vclock);
+ return NULL;
+ }
+
+ timecounter_init(&vclock->tc, &vclock->cc, 0);
+ ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);
+
+ return vclock;
+}
+
+void ptp_vclock_unregister(struct ptp_vclock *vclock)
+{
+ ptp_clock_unregister(vclock->clock);
+ kfree(vclock);
+}
+
+int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
+{
+ char name[PTP_CLOCK_NAME_LEN] = "";
+ struct ptp_clock *ptp;
+ struct device *dev;
+ int num = 0;
+
+ if (pclock_index < 0)
+ return num;
+
+ snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index);
+ dev = class_find_device_by_name(ptp_class, name);
+ if (!dev)
+ return num;
+
+ ptp = dev_get_drvdata(dev);
+
+ if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) {
+ put_device(dev);
+ return num;
+ }
+
+ *vclock_index = kzalloc(sizeof(int) * ptp->n_vclocks, GFP_KERNEL);
+ if (!(*vclock_index))
+ goto out;
+
+ memcpy(*vclock_index, ptp->vclock_index, sizeof(int) * ptp->n_vclocks);
+ num = ptp->n_vclocks;
+out:
+ mutex_unlock(&ptp->n_vclocks_mux);
+ put_device(dev);
+ return num;
+}
+EXPORT_SYMBOL(ptp_get_vclocks_index);
+
+void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
+ int vclock_index)
+{
+ char name[PTP_CLOCK_NAME_LEN] = "";
+ struct ptp_vclock *vclock;
+ struct ptp_clock *ptp;
+ unsigned long flags;
+ struct device *dev;
+ u64 ns;
+
+ snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", vclock_index);
+ dev = class_find_device_by_name(ptp_class, name);
+ if (!dev)
+ return;
+
+ ptp = dev_get_drvdata(dev);
+ if (!ptp->is_virtual_clock) {
+ put_device(dev);
+ return;
+ }
+
+ vclock = info_to_vclock(ptp->info);
+
+ ns = ktime_to_ns(hwtstamps->hwtstamp);
+
+ spin_lock_irqsave(&vclock->lock, flags);
+ ns = timecounter_cyc2time(&vclock->tc, ns);
+ spin_unlock_irqrestore(&vclock->lock, flags);
+
+ put_device(dev);
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+EXPORT_SYMBOL(ptp_convert_timestamp);
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index c4d5c0667137..35e894f4a379 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -126,8 +126,7 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
{
struct pwm_device *pwm;
- /* check, whether the driver supports a third cell for flags */
- if (pc->of_pwm_n_cells < 3)
+ if (pc->of_pwm_n_cells < 2)
return ERR_PTR(-EINVAL);
/* flags in the third cell are optional */
@@ -144,46 +143,29 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
pwm->args.period = args->args[1];
pwm->args.polarity = PWM_POLARITY_NORMAL;
- if (args->args_count > 2 && args->args[2] & PWM_POLARITY_INVERTED)
- pwm->args.polarity = PWM_POLARITY_INVERSED;
+ if (pc->of_pwm_n_cells >= 3) {
+ if (args->args_count > 2 && args->args[2] & PWM_POLARITY_INVERTED)
+ pwm->args.polarity = PWM_POLARITY_INVERSED;
+ }
return pwm;
}
EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
-static struct pwm_device *
-of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
-{
- struct pwm_device *pwm;
-
- /* sanity check driver support */
- if (pc->of_pwm_n_cells < 2)
- return ERR_PTR(-EINVAL);
-
- /* all cells are required */
- if (args->args_count != pc->of_pwm_n_cells)
- return ERR_PTR(-EINVAL);
-
- if (args->args[0] >= pc->npwm)
- return ERR_PTR(-EINVAL);
-
- pwm = pwm_request_from_chip(pc, args->args[0], NULL);
- if (IS_ERR(pwm))
- return pwm;
-
- pwm->args.period = args->args[1];
-
- return pwm;
-}
-
static void of_pwmchip_add(struct pwm_chip *chip)
{
if (!chip->dev || !chip->dev->of_node)
return;
if (!chip->of_xlate) {
- chip->of_xlate = of_pwm_simple_xlate;
- chip->of_pwm_n_cells = 2;
+ u32 pwm_cells;
+
+ if (of_property_read_u32(chip->dev->of_node, "#pwm-cells",
+ &pwm_cells))
+ pwm_cells = 2;
+
+ chip->of_xlate = of_pwm_xlate_with_flags;
+ chip->of_pwm_n_cells = pwm_cells;
}
of_node_get(chip->dev->of_node);
@@ -324,22 +306,10 @@ EXPORT_SYMBOL_GPL(pwmchip_add);
*/
int pwmchip_remove(struct pwm_chip *chip)
{
- unsigned int i;
- int ret = 0;
-
pwmchip_sysfs_unexport(chip);
mutex_lock(&pwm_lock);
- for (i = 0; i < chip->npwm; i++) {
- struct pwm_device *pwm = &chip->pwms[i];
-
- if (test_bit(PWMF_REQUESTED, &pwm->flags)) {
- ret = -EBUSY;
- goto out;
- }
- }
-
list_del_init(&chip->list);
if (IS_ENABLED(CONFIG_OF))
@@ -347,12 +317,31 @@ int pwmchip_remove(struct pwm_chip *chip)
free_pwms(chip);
-out:
mutex_unlock(&pwm_lock);
- return ret;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(pwmchip_remove);
+static void devm_pwmchip_remove(void *data)
+{
+ struct pwm_chip *chip = data;
+
+ pwmchip_remove(chip);
+}
+
+int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip)
+{
+ int ret;
+
+ ret = pwmchip_add(chip);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pwmchip_remove, chip);
+}
+EXPORT_SYMBOL_GPL(devm_pwmchip_add);
+
/**
* pwm_request() - request a PWM device
* @pwm: global PWM device index
@@ -554,7 +543,8 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
if (state->period == pwm->state.period &&
state->duty_cycle == pwm->state.duty_cycle &&
state->polarity == pwm->state.polarity &&
- state->enabled == pwm->state.enabled)
+ state->enabled == pwm->state.enabled &&
+ state->usage_power == pwm->state.usage_power)
return 0;
if (chip->ops->apply) {
@@ -709,14 +699,14 @@ int pwm_adjust_config(struct pwm_device *pwm)
}
EXPORT_SYMBOL_GPL(pwm_adjust_config);
-static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
+static struct pwm_chip *fwnode_to_pwmchip(struct fwnode_handle *fwnode)
{
struct pwm_chip *chip;
mutex_lock(&pwm_lock);
list_for_each_entry(chip, &pwm_chips, list)
- if (chip->dev && chip->dev->of_node == np) {
+ if (chip->dev && dev_fwnode(chip->dev) == fwnode) {
mutex_unlock(&pwm_lock);
return chip;
}
@@ -795,7 +785,7 @@ struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
return ERR_PTR(err);
}
- pc = of_node_to_pwmchip(args.np);
+ pc = fwnode_to_pwmchip(of_fwnode_handle(args.np));
if (IS_ERR(pc)) {
if (PTR_ERR(pc) != -EPROBE_DEFER)
pr_err("%s(): PWM chip not found\n", __func__);
@@ -837,31 +827,9 @@ put:
}
EXPORT_SYMBOL_GPL(of_pwm_get);
-#if IS_ENABLED(CONFIG_ACPI)
-static struct pwm_chip *device_to_pwmchip(struct device *dev)
-{
- struct pwm_chip *chip;
-
- mutex_lock(&pwm_lock);
-
- list_for_each_entry(chip, &pwm_chips, list) {
- struct acpi_device *adev = ACPI_COMPANION(chip->dev);
-
- if ((chip->dev == dev) || (adev && &adev->dev == dev)) {
- mutex_unlock(&pwm_lock);
- return chip;
- }
- }
-
- mutex_unlock(&pwm_lock);
-
- return ERR_PTR(-EPROBE_DEFER);
-}
-#endif
-
/**
* acpi_pwm_get() - request a PWM via parsing "pwms" property in ACPI
- * @fwnode: firmware node to get the "pwm" property from
+ * @fwnode: firmware node to get the "pwms" property from
*
* Returns the PWM device parsed from the fwnode and index specified in the
* "pwms" property or a negative error-code on failure.
@@ -876,12 +844,10 @@ static struct pwm_chip *device_to_pwmchip(struct device *dev)
* Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
* error code on failure.
*/
-static struct pwm_device *acpi_pwm_get(struct fwnode_handle *fwnode)
+static struct pwm_device *acpi_pwm_get(const struct fwnode_handle *fwnode)
{
- struct pwm_device *pwm = ERR_PTR(-ENODEV);
-#if IS_ENABLED(CONFIG_ACPI)
+ struct pwm_device *pwm;
struct fwnode_reference_args args;
- struct acpi_device *acpi;
struct pwm_chip *chip;
int ret;
@@ -891,14 +857,10 @@ static struct pwm_device *acpi_pwm_get(struct fwnode_handle *fwnode)
if (ret < 0)
return ERR_PTR(ret);
- acpi = to_acpi_device_node(args.fwnode);
- if (!acpi)
- return ERR_PTR(-EINVAL);
-
if (args.nargs < 2)
return ERR_PTR(-EPROTO);
- chip = device_to_pwmchip(&acpi->dev);
+ chip = fwnode_to_pwmchip(args.fwnode);
if (IS_ERR(chip))
return ERR_CAST(chip);
@@ -911,7 +873,6 @@ static struct pwm_device *acpi_pwm_get(struct fwnode_handle *fwnode)
if (args.nargs > 2 && args.args[2] & PWM_POLARITY_INVERTED)
pwm->args.polarity = PWM_POLARITY_INVERSED;
-#endif
return pwm;
}
@@ -967,6 +928,7 @@ void pwm_remove_table(struct pwm_lookup *table, size_t num)
*/
struct pwm_device *pwm_get(struct device *dev, const char *con_id)
{
+ const struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
const char *dev_id = dev ? dev_name(dev) : NULL;
struct pwm_device *pwm;
struct pwm_chip *chip;
@@ -977,12 +939,12 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
int err;
/* look up via DT first */
- if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
- return of_pwm_get(dev, dev->of_node, con_id);
+ if (is_of_node(fwnode))
+ return of_pwm_get(dev, to_of_node(fwnode), con_id);
/* then lookup via ACPI */
- if (dev && is_acpi_node(dev->fwnode)) {
- pwm = acpi_pwm_get(dev->fwnode);
+ if (is_acpi_node(fwnode)) {
+ pwm = acpi_pwm_get(fwnode);
if (!IS_ERR(pwm) || PTR_ERR(pwm) != -ENOENT)
return pwm;
}
@@ -1103,9 +1065,9 @@ out:
}
EXPORT_SYMBOL_GPL(pwm_put);
-static void devm_pwm_release(struct device *dev, void *res)
+static void devm_pwm_release(void *pwm)
{
- pwm_put(*(struct pwm_device **)res);
+ pwm_put(pwm);
}
/**
@@ -1121,19 +1083,16 @@ static void devm_pwm_release(struct device *dev, void *res)
*/
struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id)
{
- struct pwm_device **ptr, *pwm;
-
- ptr = devres_alloc(devm_pwm_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
+ struct pwm_device *pwm;
+ int ret;
pwm = pwm_get(dev, con_id);
- if (!IS_ERR(pwm)) {
- *ptr = pwm;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
+ if (IS_ERR(pwm))
+ return pwm;
+
+ ret = devm_add_action_or_reset(dev, devm_pwm_release, pwm);
+ if (ret)
+ return ERR_PTR(ret);
return pwm;
}
@@ -1154,19 +1113,16 @@ EXPORT_SYMBOL_GPL(devm_pwm_get);
struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
const char *con_id)
{
- struct pwm_device **ptr, *pwm;
-
- ptr = devres_alloc(devm_pwm_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
+ struct pwm_device *pwm;
+ int ret;
pwm = of_pwm_get(dev, np, con_id);
- if (!IS_ERR(pwm)) {
- *ptr = pwm;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
+ if (IS_ERR(pwm))
+ return pwm;
+
+ ret = devm_add_action_or_reset(dev, devm_pwm_release, pwm);
+ if (ret)
+ return ERR_PTR(ret);
return pwm;
}
@@ -1188,53 +1144,24 @@ struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
struct fwnode_handle *fwnode,
const char *con_id)
{
- struct pwm_device **ptr, *pwm = ERR_PTR(-ENODEV);
-
- ptr = devres_alloc(devm_pwm_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
+ struct pwm_device *pwm = ERR_PTR(-ENODEV);
+ int ret;
if (is_of_node(fwnode))
pwm = of_pwm_get(dev, to_of_node(fwnode), con_id);
else if (is_acpi_node(fwnode))
pwm = acpi_pwm_get(fwnode);
+ if (IS_ERR(pwm))
+ return pwm;
- if (!IS_ERR(pwm)) {
- *ptr = pwm;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
+ ret = devm_add_action_or_reset(dev, devm_pwm_release, pwm);
+ if (ret)
+ return ERR_PTR(ret);
return pwm;
}
EXPORT_SYMBOL_GPL(devm_fwnode_pwm_get);
-static int devm_pwm_match(struct device *dev, void *res, void *data)
-{
- struct pwm_device **p = res;
-
- if (WARN_ON(!p || !*p))
- return 0;
-
- return *p == data;
-}
-
-/**
- * devm_pwm_put() - resource managed pwm_put()
- * @dev: device for PWM consumer
- * @pwm: PWM device
- *
- * Release a PWM previously allocated using devm_pwm_get(). Calling this
- * function is usually not needed because devm-allocated resources are
- * automatically released on driver detach.
- */
-void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
-{
- WARN_ON(devres_release(dev, devm_pwm_release, devm_pwm_match, pwm));
-}
-EXPORT_SYMBOL_GPL(devm_pwm_put);
-
#ifdef CONFIG_DEBUG_FS
static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
{
@@ -1259,6 +1186,9 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
seq_printf(s, " polarity: %s",
state.polarity ? "inverse" : "normal");
+ if (state.usage_power)
+ seq_puts(s, " usage_power");
+
seq_puts(s, "\n");
}
}
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 6ab597e54005..4459325d3650 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -266,8 +266,6 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
chip->chip.ops = &atmel_hlcdc_pwm_ops;
chip->chip.dev = dev;
chip->chip.npwm = 1;
- chip->chip.of_xlate = of_pwm_xlate_with_flags;
- chip->chip.of_pwm_n_cells = 3;
ret = pwmchip_add(&chip->chip);
if (ret) {
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 8451d3e846be..bf398f21484d 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -469,8 +469,6 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
tcbpwm->chip.dev = &pdev->dev;
tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
- tcbpwm->chip.of_xlate = of_pwm_xlate_with_flags;
- tcbpwm->chip.of_pwm_n_cells = 3;
tcbpwm->chip.npwm = NPWM;
tcbpwm->channel = channel;
tcbpwm->regmap = regmap;
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 29b5ad03f715..a8162bae3e8a 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -436,8 +436,6 @@ static int atmel_pwm_probe(struct platform_device *pdev)
atmel_pwm->chip.dev = &pdev->dev;
atmel_pwm->chip.ops = &atmel_pwm_ops;
- atmel_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
- atmel_pwm->chip.of_pwm_n_cells = 3;
atmel_pwm->chip.npwm = 4;
ret = pwmchip_add(&atmel_pwm->chip);
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index edd2ce1760ab..0226bf697f09 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -210,8 +210,6 @@ static int iproc_pwmc_probe(struct platform_device *pdev)
ip->chip.dev = &pdev->dev;
ip->chip.ops = &iproc_pwm_ops;
ip->chip.npwm = 4;
- ip->chip.of_xlate = of_pwm_xlate_with_flags;
- ip->chip.of_pwm_n_cells = 3;
ip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ip->base))
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 800b9edf2e71..8c85c66ea5c9 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -272,8 +272,6 @@ static int kona_pwmc_probe(struct platform_device *pdev)
kp->chip.dev = &pdev->dev;
kp->chip.ops = &kona_pwm_ops;
kp->chip.npwm = 6;
- kp->chip.of_xlate = of_pwm_xlate_with_flags;
- kp->chip.of_pwm_n_cells = 3;
kp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kp->base))
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index fc240d5b8121..50b8594be31d 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -159,8 +159,6 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &bcm2835_pwm_ops;
pc->chip.npwm = 2;
- pc->chip.of_xlate = of_pwm_xlate_with_flags;
- pc->chip.of_pwm_n_cells = 3;
platform_set_drvdata(pdev, pc);
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index acb6fbc3cc32..e157273fd2f7 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -56,17 +56,17 @@ static inline struct berlin_pwm_chip *to_berlin_pwm_chip(struct pwm_chip *chip)
return container_of(chip, struct berlin_pwm_chip, chip);
}
-static inline u32 berlin_pwm_readl(struct berlin_pwm_chip *chip,
+static inline u32 berlin_pwm_readl(struct berlin_pwm_chip *bpc,
unsigned int channel, unsigned long offset)
{
- return readl_relaxed(chip->base + channel * 0x10 + offset);
+ return readl_relaxed(bpc->base + channel * 0x10 + offset);
}
-static inline void berlin_pwm_writel(struct berlin_pwm_chip *chip,
+static inline void berlin_pwm_writel(struct berlin_pwm_chip *bpc,
unsigned int channel, u32 value,
unsigned long offset)
{
- writel_relaxed(value, chip->base + channel * 0x10 + offset);
+ writel_relaxed(value, bpc->base + channel * 0x10 + offset);
}
static int berlin_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -87,15 +87,15 @@ static void berlin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
kfree(channel);
}
-static int berlin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm_dev,
- int duty_ns, int period_ns)
+static int berlin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ u64 duty_ns, u64 period_ns)
{
- struct berlin_pwm_chip *pwm = to_berlin_pwm_chip(chip);
+ struct berlin_pwm_chip *bpc = to_berlin_pwm_chip(chip);
bool prescale_4096 = false;
u32 value, duty, period;
u64 cycles;
- cycles = clk_get_rate(pwm->clk);
+ cycles = clk_get_rate(bpc->clk);
cycles *= period_ns;
do_div(cycles, NSEC_PER_SEC);
@@ -112,68 +112,98 @@ static int berlin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm_dev,
do_div(cycles, period_ns);
duty = cycles;
- value = berlin_pwm_readl(pwm, pwm_dev->hwpwm, BERLIN_PWM_CONTROL);
+ value = berlin_pwm_readl(bpc, pwm->hwpwm, BERLIN_PWM_CONTROL);
if (prescale_4096)
value |= BERLIN_PWM_PRESCALE_4096;
else
value &= ~BERLIN_PWM_PRESCALE_4096;
- berlin_pwm_writel(pwm, pwm_dev->hwpwm, value, BERLIN_PWM_CONTROL);
+ berlin_pwm_writel(bpc, pwm->hwpwm, value, BERLIN_PWM_CONTROL);
- berlin_pwm_writel(pwm, pwm_dev->hwpwm, duty, BERLIN_PWM_DUTY);
- berlin_pwm_writel(pwm, pwm_dev->hwpwm, period, BERLIN_PWM_TCNT);
+ berlin_pwm_writel(bpc, pwm->hwpwm, duty, BERLIN_PWM_DUTY);
+ berlin_pwm_writel(bpc, pwm->hwpwm, period, BERLIN_PWM_TCNT);
return 0;
}
static int berlin_pwm_set_polarity(struct pwm_chip *chip,
- struct pwm_device *pwm_dev,
+ struct pwm_device *pwm,
enum pwm_polarity polarity)
{
- struct berlin_pwm_chip *pwm = to_berlin_pwm_chip(chip);
+ struct berlin_pwm_chip *bpc = to_berlin_pwm_chip(chip);
u32 value;
- value = berlin_pwm_readl(pwm, pwm_dev->hwpwm, BERLIN_PWM_CONTROL);
+ value = berlin_pwm_readl(bpc, pwm->hwpwm, BERLIN_PWM_CONTROL);
if (polarity == PWM_POLARITY_NORMAL)
value &= ~BERLIN_PWM_INVERT_POLARITY;
else
value |= BERLIN_PWM_INVERT_POLARITY;
- berlin_pwm_writel(pwm, pwm_dev->hwpwm, value, BERLIN_PWM_CONTROL);
+ berlin_pwm_writel(bpc, pwm->hwpwm, value, BERLIN_PWM_CONTROL);
return 0;
}
-static int berlin_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm_dev)
+static int berlin_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct berlin_pwm_chip *pwm = to_berlin_pwm_chip(chip);
+ struct berlin_pwm_chip *bpc = to_berlin_pwm_chip(chip);
u32 value;
- value = berlin_pwm_readl(pwm, pwm_dev->hwpwm, BERLIN_PWM_EN);
+ value = berlin_pwm_readl(bpc, pwm->hwpwm, BERLIN_PWM_EN);
value |= BERLIN_PWM_ENABLE;
- berlin_pwm_writel(pwm, pwm_dev->hwpwm, value, BERLIN_PWM_EN);
+ berlin_pwm_writel(bpc, pwm->hwpwm, value, BERLIN_PWM_EN);
return 0;
}
static void berlin_pwm_disable(struct pwm_chip *chip,
- struct pwm_device *pwm_dev)
+ struct pwm_device *pwm)
{
- struct berlin_pwm_chip *pwm = to_berlin_pwm_chip(chip);
+ struct berlin_pwm_chip *bpc = to_berlin_pwm_chip(chip);
u32 value;
- value = berlin_pwm_readl(pwm, pwm_dev->hwpwm, BERLIN_PWM_EN);
+ value = berlin_pwm_readl(bpc, pwm->hwpwm, BERLIN_PWM_EN);
value &= ~BERLIN_PWM_ENABLE;
- berlin_pwm_writel(pwm, pwm_dev->hwpwm, value, BERLIN_PWM_EN);
+ berlin_pwm_writel(bpc, pwm->hwpwm, value, BERLIN_PWM_EN);
+}
+
+static int berlin_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+ bool enabled = pwm->state.enabled;
+
+ if (state->polarity != pwm->state.polarity) {
+ if (enabled) {
+ berlin_pwm_disable(chip, pwm);
+ enabled = false;
+ }
+
+ err = berlin_pwm_set_polarity(chip, pwm, state->polarity);
+ if (err)
+ return err;
+ }
+
+ if (!state->enabled) {
+ if (enabled)
+ berlin_pwm_disable(chip, pwm);
+ return 0;
+ }
+
+ err = berlin_pwm_config(chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!enabled)
+ return berlin_pwm_enable(chip, pwm);
+
+ return 0;
}
static const struct pwm_ops berlin_pwm_ops = {
.request = berlin_pwm_request,
.free = berlin_pwm_free,
- .config = berlin_pwm_config,
- .set_polarity = berlin_pwm_set_polarity,
- .enable = berlin_pwm_enable,
- .disable = berlin_pwm_disable,
+ .apply = berlin_pwm_apply,
.owner = THIS_MODULE,
};
@@ -185,99 +215,97 @@ MODULE_DEVICE_TABLE(of, berlin_pwm_match);
static int berlin_pwm_probe(struct platform_device *pdev)
{
- struct berlin_pwm_chip *pwm;
+ struct berlin_pwm_chip *bpc;
int ret;
- pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
- if (!pwm)
+ bpc = devm_kzalloc(&pdev->dev, sizeof(*bpc), GFP_KERNEL);
+ if (!bpc)
return -ENOMEM;
- pwm->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(pwm->base))
- return PTR_ERR(pwm->base);
+ bpc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bpc->base))
+ return PTR_ERR(bpc->base);
- pwm->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pwm->clk))
- return PTR_ERR(pwm->clk);
+ bpc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(bpc->clk))
+ return PTR_ERR(bpc->clk);
- ret = clk_prepare_enable(pwm->clk);
+ ret = clk_prepare_enable(bpc->clk);
if (ret)
return ret;
- pwm->chip.dev = &pdev->dev;
- pwm->chip.ops = &berlin_pwm_ops;
- pwm->chip.npwm = 4;
- pwm->chip.of_xlate = of_pwm_xlate_with_flags;
- pwm->chip.of_pwm_n_cells = 3;
+ bpc->chip.dev = &pdev->dev;
+ bpc->chip.ops = &berlin_pwm_ops;
+ bpc->chip.npwm = 4;
- ret = pwmchip_add(&pwm->chip);
+ ret = pwmchip_add(&bpc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
- clk_disable_unprepare(pwm->clk);
+ clk_disable_unprepare(bpc->clk);
return ret;
}
- platform_set_drvdata(pdev, pwm);
+ platform_set_drvdata(pdev, bpc);
return 0;
}
static int berlin_pwm_remove(struct platform_device *pdev)
{
- struct berlin_pwm_chip *pwm = platform_get_drvdata(pdev);
- int ret;
+ struct berlin_pwm_chip *bpc = platform_get_drvdata(pdev);
+
+ pwmchip_remove(&bpc->chip);
- ret = pwmchip_remove(&pwm->chip);
- clk_disable_unprepare(pwm->clk);
+ clk_disable_unprepare(bpc->clk);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
static int berlin_pwm_suspend(struct device *dev)
{
- struct berlin_pwm_chip *pwm = dev_get_drvdata(dev);
+ struct berlin_pwm_chip *bpc = dev_get_drvdata(dev);
unsigned int i;
- for (i = 0; i < pwm->chip.npwm; i++) {
+ for (i = 0; i < bpc->chip.npwm; i++) {
struct berlin_pwm_channel *channel;
- channel = pwm_get_chip_data(&pwm->chip.pwms[i]);
+ channel = pwm_get_chip_data(&bpc->chip.pwms[i]);
if (!channel)
continue;
- channel->enable = berlin_pwm_readl(pwm, i, BERLIN_PWM_ENABLE);
- channel->ctrl = berlin_pwm_readl(pwm, i, BERLIN_PWM_CONTROL);
- channel->duty = berlin_pwm_readl(pwm, i, BERLIN_PWM_DUTY);
- channel->tcnt = berlin_pwm_readl(pwm, i, BERLIN_PWM_TCNT);
+ channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_ENABLE);
+ channel->ctrl = berlin_pwm_readl(bpc, i, BERLIN_PWM_CONTROL);
+ channel->duty = berlin_pwm_readl(bpc, i, BERLIN_PWM_DUTY);
+ channel->tcnt = berlin_pwm_readl(bpc, i, BERLIN_PWM_TCNT);
}
- clk_disable_unprepare(pwm->clk);
+ clk_disable_unprepare(bpc->clk);
return 0;
}
static int berlin_pwm_resume(struct device *dev)
{
- struct berlin_pwm_chip *pwm = dev_get_drvdata(dev);
+ struct berlin_pwm_chip *bpc = dev_get_drvdata(dev);
unsigned int i;
int ret;
- ret = clk_prepare_enable(pwm->clk);
+ ret = clk_prepare_enable(bpc->clk);
if (ret)
return ret;
- for (i = 0; i < pwm->chip.npwm; i++) {
+ for (i = 0; i < bpc->chip.npwm; i++) {
struct berlin_pwm_channel *channel;
- channel = pwm_get_chip_data(&pwm->chip.pwms[i]);
+ channel = pwm_get_chip_data(&bpc->chip.pwms[i]);
if (!channel)
continue;
- berlin_pwm_writel(pwm, i, channel->ctrl, BERLIN_PWM_CONTROL);
- berlin_pwm_writel(pwm, i, channel->duty, BERLIN_PWM_DUTY);
- berlin_pwm_writel(pwm, i, channel->tcnt, BERLIN_PWM_TCNT);
- berlin_pwm_writel(pwm, i, channel->enable, BERLIN_PWM_ENABLE);
+ berlin_pwm_writel(bpc, i, channel->ctrl, BERLIN_PWM_CONTROL);
+ berlin_pwm_writel(bpc, i, channel->duty, BERLIN_PWM_DUTY);
+ berlin_pwm_writel(bpc, i, channel->tcnt, BERLIN_PWM_TCNT);
+ berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_ENABLE);
}
return 0;
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index f3d17a590305..d7ad88685830 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -134,16 +134,7 @@ static int clps711x_pwm_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
- platform_set_drvdata(pdev, priv);
-
- return pwmchip_add(&priv->chip);
-}
-
-static int clps711x_pwm_remove(struct platform_device *pdev)
-{
- struct clps711x_chip *priv = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&priv->chip);
+ return devm_pwmchip_add(&pdev->dev, &priv->chip);
}
static const struct of_device_id __maybe_unused clps711x_pwm_dt_ids[] = {
@@ -158,7 +149,6 @@ static struct platform_driver clps711x_pwm_driver = {
.of_match_table = of_match_ptr(clps711x_pwm_dt_ids),
},
.probe = clps711x_pwm_probe,
- .remove = clps711x_pwm_remove,
};
module_platform_driver(clps711x_pwm_driver);
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index 02522a9a3073..7b357d1cf642 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -173,21 +173,11 @@ static int crystalcove_pwm_probe(struct platform_device *pdev)
/* get the PMIC regmap */
pwm->regmap = pmic->regmap;
- platform_set_drvdata(pdev, pwm);
-
- return pwmchip_add(&pwm->chip);
-}
-
-static int crystalcove_pwm_remove(struct platform_device *pdev)
-{
- struct crystalcove_pwm *pwm = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&pwm->chip);
+ return devm_pwmchip_add(&pdev->dev, &pwm->chip);
}
static struct platform_driver crystalcove_pwm_driver = {
.probe = crystalcove_pwm_probe,
- .remove = crystalcove_pwm_remove,
.driver = {
.name = "crystal_cove_pwm",
},
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index 4ca70794ad96..fc3cb7d669c6 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -58,35 +58,68 @@ static void ep93xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
ep93xx_pwm_release_gpio(pdev);
}
-static int ep93xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
+ int ret;
struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
+ bool enabled = state->enabled;
void __iomem *base = ep93xx_pwm->base;
unsigned long long c;
unsigned long period_cycles;
unsigned long duty_cycles;
unsigned long term;
- int ret = 0;
+
+ if (state->polarity != pwm->state.polarity) {
+ if (enabled) {
+ writew(0x0, ep93xx_pwm->base + EP93XX_PWMx_ENABLE);
+ clk_disable_unprepare(ep93xx_pwm->clk);
+ enabled = false;
+ }
+
+ /*
+ * The clock needs to be enabled to access the PWM registers.
+ * Polarity can only be changed when the PWM is disabled.
+ */
+ ret = clk_prepare_enable(ep93xx_pwm->clk);
+ if (ret)
+ return ret;
+
+ if (state->polarity == PWM_POLARITY_INVERSED)
+ writew(0x1, ep93xx_pwm->base + EP93XX_PWMx_INVERT);
+ else
+ writew(0x0, ep93xx_pwm->base + EP93XX_PWMx_INVERT);
+
+ clk_disable_unprepare(ep93xx_pwm->clk);
+ }
+
+ if (!state->enabled) {
+ if (enabled) {
+ writew(0x0, ep93xx_pwm->base + EP93XX_PWMx_ENABLE);
+ clk_disable_unprepare(ep93xx_pwm->clk);
+ }
+
+ return 0;
+ }
/*
* The clock needs to be enabled to access the PWM registers.
* Configuration can be changed at any time.
*/
if (!pwm_is_enabled(pwm)) {
- ret = clk_enable(ep93xx_pwm->clk);
+ ret = clk_prepare_enable(ep93xx_pwm->clk);
if (ret)
return ret;
}
c = clk_get_rate(ep93xx_pwm->clk);
- c *= period_ns;
+ c *= state->period;
do_div(c, 1000000000);
period_cycles = c;
c = period_cycles;
- c *= duty_ns;
- do_div(c, period_ns);
+ c *= state->duty_cycle;
+ do_div(c, state->period);
duty_cycles = c;
if (period_cycles < 0x10000 && duty_cycles < 0x10000) {
@@ -100,69 +133,32 @@ static int ep93xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
}
+ ret = 0;
} else {
ret = -EINVAL;
}
if (!pwm_is_enabled(pwm))
- clk_disable(ep93xx_pwm->clk);
-
- return ret;
-}
-
-static int ep93xx_pwm_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
- int ret;
+ clk_disable_unprepare(ep93xx_pwm->clk);
- /*
- * The clock needs to be enabled to access the PWM registers.
- * Polarity can only be changed when the PWM is disabled.
- */
- ret = clk_enable(ep93xx_pwm->clk);
if (ret)
return ret;
- if (polarity == PWM_POLARITY_INVERSED)
- writew(0x1, ep93xx_pwm->base + EP93XX_PWMx_INVERT);
- else
- writew(0x0, ep93xx_pwm->base + EP93XX_PWMx_INVERT);
-
- clk_disable(ep93xx_pwm->clk);
-
- return 0;
-}
-
-static int ep93xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
- int ret;
-
- ret = clk_enable(ep93xx_pwm->clk);
- if (ret)
- return ret;
+ if (!enabled) {
+ ret = clk_prepare_enable(ep93xx_pwm->clk);
+ if (ret)
+ return ret;
- writew(0x1, ep93xx_pwm->base + EP93XX_PWMx_ENABLE);
+ writew(0x1, ep93xx_pwm->base + EP93XX_PWMx_ENABLE);
+ }
return 0;
}
-static void ep93xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
-
- writew(0x0, ep93xx_pwm->base + EP93XX_PWMx_ENABLE);
- clk_disable(ep93xx_pwm->clk);
-}
-
static const struct pwm_ops ep93xx_pwm_ops = {
.request = ep93xx_pwm_request,
.free = ep93xx_pwm_free,
- .config = ep93xx_pwm_config,
- .set_polarity = ep93xx_pwm_polarity,
- .enable = ep93xx_pwm_enable,
- .disable = ep93xx_pwm_disable,
+ .apply = ep93xx_pwm_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 0e1ae9469eda..96ccd772280c 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -451,8 +451,6 @@ static int fsl_pwm_probe(struct platform_device *pdev)
fpc->chip.ops = &fsl_pwm_ops;
- fpc->chip.of_xlate = of_pwm_xlate_with_flags;
- fpc->chip.of_pwm_n_cells = 3;
fpc->chip.npwm = 8;
ret = pwmchip_add(&fpc->chip);
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index 82d17fc75c21..4a6e9ad3c0ff 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -206,8 +206,6 @@ static int hibvt_pwm_probe(struct platform_device *pdev)
pwm_chip->chip.ops = &hibvt_pwm_ops;
pwm_chip->chip.dev = &pdev->dev;
pwm_chip->chip.npwm = soc->num_pwms;
- pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags;
- pwm_chip->chip.of_pwm_n_cells = 3;
pwm_chip->soc = soc;
pwm_chip->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index cc37054589cc..11b16ecc4f96 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -156,7 +156,7 @@ static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
int ret;
- ret = pm_runtime_get_sync(chip->dev);
+ ret = pm_runtime_resume_and_get(chip->dev);
if (ret < 0)
return ret;
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index 97c9133b6876..dbb50493abdd 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -363,8 +363,6 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
tpm->chip.dev = &pdev->dev;
tpm->chip.ops = &imx_tpm_pwm_ops;
- tpm->chip.of_xlate = of_pwm_xlate_with_flags;
- tpm->chip.of_pwm_n_cells = 3;
/* get number of channels */
val = readl(tpm->base + PWM_IMX_TPM_PARAM);
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
index c957b365448e..bcd849496f8d 100644
--- a/drivers/pwm/pwm-imx1.c
+++ b/drivers/pwm/pwm-imx1.c
@@ -141,8 +141,6 @@ static int pwm_imx1_probe(struct platform_device *pdev)
if (!imx)
return -ENOMEM;
- platform_set_drvdata(pdev, imx);
-
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx->clk_ipg))
return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_ipg),
@@ -161,16 +159,7 @@ static int pwm_imx1_probe(struct platform_device *pdev)
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
- return pwmchip_add(&imx->chip);
-}
-
-static int pwm_imx1_remove(struct platform_device *pdev)
-{
- struct pwm_imx1_chip *imx = platform_get_drvdata(pdev);
-
- pwm_imx1_clk_disable_unprepare(&imx->chip);
-
- return pwmchip_remove(&imx->chip);
+ return devm_pwmchip_add(&pdev->dev, &imx->chip);
}
static struct platform_driver pwm_imx1_driver = {
@@ -179,7 +168,6 @@ static struct platform_driver pwm_imx1_driver = {
.of_match_table = pwm_imx1_dt_ids,
},
.probe = pwm_imx1_probe,
- .remove = pwm_imx1_remove,
};
module_platform_driver(pwm_imx1_driver);
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index ba695115c160..f6588a96fbd9 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -329,9 +329,6 @@ static int pwm_imx27_probe(struct platform_device *pdev)
imx->chip.dev = &pdev->dev;
imx->chip.npwm = 1;
- imx->chip.of_xlate = of_pwm_xlate_with_flags;
- imx->chip.of_pwm_n_cells = 3;
-
imx->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 5b6bdcdcecf5..990e7904c7f1 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -244,8 +244,6 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
jz4740->chip.dev = dev;
jz4740->chip.ops = &jz4740_pwm_ops;
jz4740->chip.npwm = info->num_pwms;
- jz4740->chip.of_xlate = of_pwm_xlate_with_flags;
- jz4740->chip.of_pwm_n_cells = 3;
platform_set_drvdata(pdev, jz4740);
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index b643ac61a2e7..8e461f3baa05 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -371,8 +371,6 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
lpc18xx_pwm->chip.dev = &pdev->dev;
lpc18xx_pwm->chip.ops = &lpc18xx_pwm_ops;
lpc18xx_pwm->chip.npwm = 16;
- lpc18xx_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
- lpc18xx_pwm->chip.of_pwm_n_cells = 3;
/* SCT counter must be in unify (32 bit) mode */
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CONFIG,
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index cf749ea0de9f..c893ec3d2fb4 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -69,12 +69,8 @@ static int pwm_lpss_probe_pci(struct pci_dev *pdev,
static void pwm_lpss_remove_pci(struct pci_dev *pdev)
{
- struct pwm_lpss_chip *lpwm = pci_get_drvdata(pdev);
-
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
-
- pwm_lpss_remove(lpwm);
}
#ifdef CONFIG_PM
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index 986786be1e49..928570430cef 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -85,10 +85,8 @@ static int pwm_lpss_probe_platform(struct platform_device *pdev)
static int pwm_lpss_remove_platform(struct platform_device *pdev)
{
- struct pwm_lpss_chip *lpwm = platform_get_drvdata(pdev);
-
pm_runtime_disable(&pdev->dev);
- return pwm_lpss_remove(lpwm);
+ return 0;
}
static const struct acpi_device_id pwm_lpss_acpi_match[] = {
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 58b4031524af..36d4e83e6b79 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -236,7 +236,7 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
lpwm->chip.ops = &pwm_lpss_ops;
lpwm->chip.npwm = info->npwm;
- ret = pwmchip_add(&lpwm->chip);
+ ret = devm_pwmchip_add(dev, &lpwm->chip);
if (ret) {
dev_err(dev, "failed to add PWM chip: %d\n", ret);
return ERR_PTR(ret);
@@ -252,12 +252,6 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
}
EXPORT_SYMBOL_GPL(pwm_lpss_probe);
-int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
-{
- return pwmchip_remove(&lpwm->chip);
-}
-EXPORT_SYMBOL_GPL(pwm_lpss_remove);
-
MODULE_DESCRIPTION("PWM driver for Intel LPSS");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index 70db7e389d66..8b3476f25e06 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -35,6 +35,5 @@ struct pwm_lpss_boardinfo {
struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
const struct pwm_lpss_boardinfo *info);
-int pwm_lpss_remove(struct pwm_lpss_chip *lpwm);
#endif /* __PWM_LPSS_H */
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 9eb060613cb4..3cf3bcf5ddfc 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -551,8 +551,6 @@ static int meson_pwm_probe(struct platform_device *pdev)
meson->chip.dev = &pdev->dev;
meson->chip.ops = &meson_pwm_ops;
meson->chip.npwm = MESON_NUM_PWMS;
- meson->chip.of_xlate = of_pwm_xlate_with_flags;
- meson->chip.of_pwm_n_cells = 3;
meson->data = of_device_get_match_data(&pdev->dev);
@@ -560,31 +558,21 @@ static int meson_pwm_probe(struct platform_device *pdev)
if (err < 0)
return err;
- err = pwmchip_add(&meson->chip);
+ err = devm_pwmchip_add(&pdev->dev, &meson->chip);
if (err < 0) {
dev_err(&pdev->dev, "failed to register PWM chip: %d\n", err);
return err;
}
- platform_set_drvdata(pdev, meson);
-
return 0;
}
-static int meson_pwm_remove(struct platform_device *pdev)
-{
- struct meson_pwm *meson = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&meson->chip);
-}
-
static struct platform_driver meson_pwm_driver = {
.driver = {
.name = "meson-pwm",
.of_match_table = meson_pwm_matches,
},
.probe = meson_pwm_probe,
- .remove = meson_pwm_remove,
};
module_platform_driver(meson_pwm_driver);
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index 0266e84e982c..a22180803bd7 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -138,8 +138,6 @@ static int mxs_pwm_probe(struct platform_device *pdev)
mxs->chip.dev = &pdev->dev;
mxs->chip.ops = &mxs_pwm_ops;
- mxs->chip.of_xlate = of_pwm_xlate_with_flags;
- mxs->chip.of_pwm_n_cells = 3;
ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm);
if (ret < 0) {
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 612b3c859295..507a2d945b90 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -404,8 +404,6 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
omap->chip.dev = &pdev->dev;
omap->chip.ops = &pwm_omap_dmtimer_ops;
omap->chip.npwm = 1;
- omap->chip.of_xlate = of_pwm_xlate_with_flags;
- omap->chip.of_pwm_n_cells = 3;
mutex_init(&omap->mutex);
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 7c9f174de64e..42ed770b432c 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -23,11 +23,11 @@
#include <linux/bitmap.h>
/*
- * Because the PCA9685 has only one prescaler per chip, changing the period of
- * one channel affects the period of all 16 PWM outputs!
- * However, the ratio between each configured duty cycle and the chip-wide
- * period remains constant, because the OFF time is set in proportion to the
- * counter range.
+ * Because the PCA9685 has only one prescaler per chip, only the first channel
+ * that is enabled is allowed to change the prescale register.
+ * PWM channels requested afterwards must use a period that results in the same
+ * prescale setting as the one set by the first requested channel.
+ * GPIOs do not count as enabled PWMs as they are not using the prescaler.
*/
#define PCA9685_MODE1 0x00
@@ -78,8 +78,9 @@
struct pca9685 {
struct pwm_chip chip;
struct regmap *regmap;
-#if IS_ENABLED(CONFIG_GPIOLIB)
struct mutex lock;
+ DECLARE_BITMAP(pwms_enabled, PCA9685_MAXCHAN + 1);
+#if IS_ENABLED(CONFIG_GPIOLIB)
struct gpio_chip gpio;
DECLARE_BITMAP(pwms_inuse, PCA9685_MAXCHAN + 1);
#endif
@@ -90,51 +91,120 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip)
return container_of(chip, struct pca9685, chip);
}
+/* This function is supposed to be called with the lock mutex held */
+static bool pca9685_prescaler_can_change(struct pca9685 *pca, int channel)
+{
+ /* No PWM enabled: Change allowed */
+ if (bitmap_empty(pca->pwms_enabled, PCA9685_MAXCHAN + 1))
+ return true;
+ /* More than one PWM enabled: Change not allowed */
+ if (bitmap_weight(pca->pwms_enabled, PCA9685_MAXCHAN + 1) > 1)
+ return false;
+ /*
+ * Only one PWM enabled: Change allowed if the PWM about to
+ * be changed is the one that is already enabled
+ */
+ return test_bit(channel, pca->pwms_enabled);
+}
+
+static int pca9685_read_reg(struct pca9685 *pca, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = pca->chip.dev;
+ int err;
+
+ err = regmap_read(pca->regmap, reg, val);
+ if (err)
+ dev_err(dev, "regmap_read of register 0x%x failed: %pe\n", reg, ERR_PTR(err));
+
+ return err;
+}
+
+static int pca9685_write_reg(struct pca9685 *pca, unsigned int reg, unsigned int val)
+{
+ struct device *dev = pca->chip.dev;
+ int err;
+
+ err = regmap_write(pca->regmap, reg, val);
+ if (err)
+ dev_err(dev, "regmap_write to register 0x%x failed: %pe\n", reg, ERR_PTR(err));
+
+ return err;
+}
+
/* Helper function to set the duty cycle ratio to duty/4096 (e.g. duty=2048 -> 50%) */
static void pca9685_pwm_set_duty(struct pca9685 *pca, int channel, unsigned int duty)
{
+ struct pwm_device *pwm = &pca->chip.pwms[channel];
+ unsigned int on, off;
+
if (duty == 0) {
/* Set the full OFF bit, which has the highest precedence */
- regmap_write(pca->regmap, REG_OFF_H(channel), LED_FULL);
+ pca9685_write_reg(pca, REG_OFF_H(channel), LED_FULL);
+ return;
} else if (duty >= PCA9685_COUNTER_RANGE) {
/* Set the full ON bit and clear the full OFF bit */
- regmap_write(pca->regmap, REG_ON_H(channel), LED_FULL);
- regmap_write(pca->regmap, REG_OFF_H(channel), 0);
- } else {
- /* Set OFF time (clears the full OFF bit) */
- regmap_write(pca->regmap, REG_OFF_L(channel), duty & 0xff);
- regmap_write(pca->regmap, REG_OFF_H(channel), (duty >> 8) & 0xf);
- /* Clear the full ON bit */
- regmap_write(pca->regmap, REG_ON_H(channel), 0);
+ pca9685_write_reg(pca, REG_ON_H(channel), LED_FULL);
+ pca9685_write_reg(pca, REG_OFF_H(channel), 0);
+ return;
}
+
+
+ if (pwm->state.usage_power && channel < PCA9685_MAXCHAN) {
+ /*
+ * If usage_power is set, the pca9685 driver will phase shift
+ * the individual channels relative to their channel number.
+ * This improves EMI because the enabled channels no longer
+ * turn on at the same time, while still maintaining the
+ * configured duty cycle / power output.
+ */
+ on = channel * PCA9685_COUNTER_RANGE / PCA9685_MAXCHAN;
+ } else
+ on = 0;
+
+ off = (on + duty) % PCA9685_COUNTER_RANGE;
+
+ /* Set ON time (clears full ON bit) */
+ pca9685_write_reg(pca, REG_ON_L(channel), on & 0xff);
+ pca9685_write_reg(pca, REG_ON_H(channel), (on >> 8) & 0xf);
+ /* Set OFF time (clears full OFF bit) */
+ pca9685_write_reg(pca, REG_OFF_L(channel), off & 0xff);
+ pca9685_write_reg(pca, REG_OFF_H(channel), (off >> 8) & 0xf);
}
static unsigned int pca9685_pwm_get_duty(struct pca9685 *pca, int channel)
{
- unsigned int off_h = 0, val = 0;
+ struct pwm_device *pwm = &pca->chip.pwms[channel];
+ unsigned int off = 0, on = 0, val = 0;
if (WARN_ON(channel >= PCA9685_MAXCHAN)) {
/* HW does not support reading state of "all LEDs" channel */
return 0;
}
- regmap_read(pca->regmap, LED_N_OFF_H(channel), &off_h);
- if (off_h & LED_FULL) {
+ pca9685_read_reg(pca, LED_N_OFF_H(channel), &off);
+ if (off & LED_FULL) {
/* Full OFF bit is set */
return 0;
}
- regmap_read(pca->regmap, LED_N_ON_H(channel), &val);
- if (val & LED_FULL) {
+ pca9685_read_reg(pca, LED_N_ON_H(channel), &on);
+ if (on & LED_FULL) {
/* Full ON bit is set */
return PCA9685_COUNTER_RANGE;
}
- if (regmap_read(pca->regmap, LED_N_OFF_L(channel), &val)) {
- /* Reset val to 0 in case reading LED_N_OFF_L failed */
+ pca9685_read_reg(pca, LED_N_OFF_L(channel), &val);
+ off = ((off & 0xf) << 8) | (val & 0xff);
+ if (!pwm->state.usage_power)
+ return off;
+
+ /* Read ON register to calculate duty cycle of staggered output */
+ if (pca9685_read_reg(pca, LED_N_ON_L(channel), &val)) {
+ /* Reset val to 0 in case reading LED_N_ON_L failed */
val = 0;
}
- return ((off_h & 0xf) << 8) | (val & 0xff);
+ on = ((on & 0xf) << 8) | (val & 0xff);
+ return (off - on) & (PCA9685_COUNTER_RANGE - 1);
}
#if IS_ENABLED(CONFIG_GPIOLIB)
@@ -240,8 +310,6 @@ static int pca9685_pwm_gpio_probe(struct pca9685 *pca)
{
struct device *dev = pca->chip.dev;
- mutex_init(&pca->lock);
-
pca->gpio.label = dev_name(dev);
pca->gpio.parent = dev;
pca->gpio.request = pca9685_pwm_gpio_request;
@@ -277,16 +345,23 @@ static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca)
static void pca9685_set_sleep_mode(struct pca9685 *pca, bool enable)
{
- regmap_update_bits(pca->regmap, PCA9685_MODE1,
- MODE1_SLEEP, enable ? MODE1_SLEEP : 0);
+ struct device *dev = pca->chip.dev;
+ int err = regmap_update_bits(pca->regmap, PCA9685_MODE1,
+ MODE1_SLEEP, enable ? MODE1_SLEEP : 0);
+ if (err) {
+ dev_err(dev, "regmap_update_bits of register 0x%x failed: %pe\n",
+ PCA9685_MODE1, ERR_PTR(err));
+ return;
+ }
+
if (!enable) {
/* Wait 500us for the oscillator to be back up */
udelay(500);
}
}
-static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- const struct pwm_state *state)
+static int __pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
struct pca9685 *pca = to_pca(chip);
unsigned long long duty, prescale;
@@ -307,8 +382,14 @@ static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- regmap_read(pca->regmap, PCA9685_PRESCALE, &val);
+ pca9685_read_reg(pca, PCA9685_PRESCALE, &val);
if (prescale != val) {
+ if (!pca9685_prescaler_can_change(pca, pwm->hwpwm)) {
+ dev_err(chip->dev,
+ "pwm not changed: periods of enabled pwms must match!\n");
+ return -EBUSY;
+ }
+
/*
* Putting the chip briefly into SLEEP mode
* at this point won't interfere with the
@@ -319,7 +400,7 @@ static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
pca9685_set_sleep_mode(pca, true);
/* Change the chip-wide output frequency */
- regmap_write(pca->regmap, PCA9685_PRESCALE, prescale);
+ pca9685_write_reg(pca, PCA9685_PRESCALE, prescale);
/* Wake the chip up */
pca9685_set_sleep_mode(pca, false);
@@ -331,6 +412,25 @@ static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
+static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct pca9685 *pca = to_pca(chip);
+ int ret;
+
+ mutex_lock(&pca->lock);
+ ret = __pca9685_pwm_apply(chip, pwm, state);
+ if (ret == 0) {
+ if (state->enabled)
+ set_bit(pwm->hwpwm, pca->pwms_enabled);
+ else
+ clear_bit(pwm->hwpwm, pca->pwms_enabled);
+ }
+ mutex_unlock(&pca->lock);
+
+ return ret;
+}
+
static void pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
@@ -339,7 +439,7 @@ static void pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned int val = 0;
/* Calculate (chip-wide) period from prescale value */
- regmap_read(pca->regmap, PCA9685_PRESCALE, &val);
+ pca9685_read_reg(pca, PCA9685_PRESCALE, &val);
/*
* PCA9685_OSC_CLOCK_MHZ is 25, i.e. an integer divider of 1000.
* The following calculation is therefore only a multiplication
@@ -372,6 +472,14 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
if (pca9685_pwm_test_and_set_inuse(pca, pwm->hwpwm))
return -EBUSY;
+
+ if (pwm->hwpwm < PCA9685_MAXCHAN) {
+ /* PWMs - except the "all LEDs" channel - default to enabled */
+ mutex_lock(&pca->lock);
+ set_bit(pwm->hwpwm, pca->pwms_enabled);
+ mutex_unlock(&pca->lock);
+ }
+
pm_runtime_get_sync(chip->dev);
return 0;
@@ -381,7 +489,11 @@ static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
+ mutex_lock(&pca->lock);
pca9685_pwm_set_duty(pca, pwm->hwpwm, 0);
+ clear_bit(pwm->hwpwm, pca->pwms_enabled);
+ mutex_unlock(&pca->lock);
+
pm_runtime_put(chip->dev);
pca9685_pwm_clear_inuse(pca, pwm->hwpwm);
}
@@ -422,7 +534,11 @@ static int pca9685_pwm_probe(struct i2c_client *client,
i2c_set_clientdata(client, pca);
- regmap_read(pca->regmap, PCA9685_MODE2, &reg);
+ mutex_init(&pca->lock);
+
+ ret = pca9685_read_reg(pca, PCA9685_MODE2, &reg);
+ if (ret)
+ return ret;
if (device_property_read_bool(&client->dev, "invert"))
reg |= MODE2_INVRT;
@@ -434,16 +550,20 @@ static int pca9685_pwm_probe(struct i2c_client *client,
else
reg |= MODE2_OUTDRV;
- regmap_write(pca->regmap, PCA9685_MODE2, reg);
+ ret = pca9685_write_reg(pca, PCA9685_MODE2, reg);
+ if (ret)
+ return ret;
/* Disable all LED ALLCALL and SUBx addresses to avoid bus collisions */
- regmap_read(pca->regmap, PCA9685_MODE1, &reg);
+ pca9685_read_reg(pca, PCA9685_MODE1, &reg);
reg &= ~(MODE1_ALLCALL | MODE1_SUB1 | MODE1_SUB2 | MODE1_SUB3);
- regmap_write(pca->regmap, PCA9685_MODE1, reg);
+ pca9685_write_reg(pca, PCA9685_MODE1, reg);
- /* Reset OFF registers to POR default */
- regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_L, LED_FULL);
- regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_H, LED_FULL);
+ /* Reset OFF/ON registers to POR default */
+ pca9685_write_reg(pca, PCA9685_ALL_LED_OFF_L, LED_FULL);
+ pca9685_write_reg(pca, PCA9685_ALL_LED_OFF_H, LED_FULL);
+ pca9685_write_reg(pca, PCA9685_ALL_LED_ON_L, 0);
+ pca9685_write_reg(pca, PCA9685_ALL_LED_ON_H, 0);
pca->chip.ops = &pca9685_pwm_ops;
/* Add an extra channel for ALL_LED */
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index cfb683827d32..e091a528e33c 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -165,7 +165,7 @@ pxa_pwm_of_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
static int pwm_probe(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
- struct pxa_pwm_chip *pwm;
+ struct pxa_pwm_chip *pc;
int ret = 0;
if (IS_ENABLED(CONFIG_OF) && id == NULL)
@@ -174,46 +174,44 @@ static int pwm_probe(struct platform_device *pdev)
if (id == NULL)
return -EINVAL;
- pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
- if (pwm == NULL)
+ pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
+ if (pc == NULL)
return -ENOMEM;
- pwm->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pwm->clk))
- return PTR_ERR(pwm->clk);
+ pc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pc->clk))
+ return PTR_ERR(pc->clk);
- pwm->chip.dev = &pdev->dev;
- pwm->chip.ops = &pxa_pwm_ops;
- pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
+ pc->chip.dev = &pdev->dev;
+ pc->chip.ops = &pxa_pwm_ops;
+ pc->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
if (IS_ENABLED(CONFIG_OF)) {
- pwm->chip.of_xlate = pxa_pwm_of_xlate;
- pwm->chip.of_pwm_n_cells = 1;
+ pc->chip.of_xlate = pxa_pwm_of_xlate;
+ pc->chip.of_pwm_n_cells = 1;
}
- pwm->mmio_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(pwm->mmio_base))
- return PTR_ERR(pwm->mmio_base);
+ pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pc->mmio_base))
+ return PTR_ERR(pc->mmio_base);
- ret = pwmchip_add(&pwm->chip);
+ ret = pwmchip_add(&pc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, pwm);
+ platform_set_drvdata(pdev, pc);
return 0;
}
static int pwm_remove(struct platform_device *pdev)
{
- struct pxa_pwm_chip *chip;
+ struct pxa_pwm_chip *pc;
- chip = platform_get_drvdata(pdev);
- if (chip == NULL)
- return -ENODEV;
+ pc = platform_get_drvdata(pdev);
- return pwmchip_remove(&chip->chip);
+ return pwmchip_remove(&pc->chip);
}
static struct platform_driver pwm_driver = {
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index e2959fae0969..b853e7942605 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -408,8 +408,6 @@ static int tpu_probe(struct platform_device *pdev)
tpu->chip.dev = &pdev->dev;
tpu->chip.ops = &tpu_pwm_ops;
- tpu->chip.of_xlate = of_pwm_xlate_with_flags;
- tpu->chip.of_pwm_n_cells = 3;
tpu->chip.npwm = TPU_CHANNEL_MAX;
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 301785fa293e..cbe900877724 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -354,11 +354,6 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
pc->chip.ops = &rockchip_pwm_ops;
pc->chip.npwm = 1;
- if (pc->data->supports_polarity) {
- pc->chip.of_xlate = of_pwm_xlate_with_flags;
- pc->chip.of_pwm_n_cells = 3;
- }
-
enable_conf = pc->data->enable_conf;
ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
enabled = (ctrl & enable_conf) == enable_conf;
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 515489fa4f6d..f6c528f02d43 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -526,9 +526,6 @@ static int pwm_samsung_probe(struct platform_device *pdev)
ret = pwm_samsung_parse_dt(chip);
if (ret)
return ret;
-
- chip->chip.of_xlate = of_pwm_xlate_with_flags;
- chip->chip.of_pwm_n_cells = 3;
} else {
if (!pdev->dev.platform_data) {
dev_err(&pdev->dev, "no platform data specified\n");
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 688737f091ac..420edc4aa94a 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -242,8 +242,6 @@ static int pwm_sifive_probe(struct platform_device *pdev)
chip = &ddata->chip;
chip->dev = dev;
chip->ops = &pwm_sifive_ops;
- chip->of_xlate = of_pwm_xlate_with_flags;
- chip->of_pwm_n_cells = 3;
chip->npwm = 4;
ddata->regs = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index 1a1cedfd11ce..54c7990967dd 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -75,7 +75,7 @@ static inline void spear_pwm_writel(struct spear_pwm_chip *chip,
}
static int spear_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+ u64 duty_ns, u64 period_ns)
{
struct spear_pwm_chip *pc = to_spear_pwm_chip(chip);
u64 val, div, clk_rate;
@@ -163,10 +163,32 @@ static void spear_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
clk_disable(pc->clk);
}
+static int spear_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ spear_pwm_disable(chip, pwm);
+ return 0;
+ }
+
+ err = spear_pwm_config(chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ return spear_pwm_enable(chip, pwm);
+
+ return 0;
+}
+
static const struct pwm_ops spear_pwm_ops = {
- .config = spear_pwm_config,
- .enable = spear_pwm_enable,
- .disable = spear_pwm_disable,
+ .apply = spear_pwm_apply,
.owner = THIS_MODULE,
};
@@ -228,14 +250,13 @@ static int spear_pwm_probe(struct platform_device *pdev)
static int spear_pwm_remove(struct platform_device *pdev)
{
struct spear_pwm_chip *pc = platform_get_drvdata(pdev);
- int i;
- for (i = 0; i < NUM_PWM; i++)
- pwm_disable(&pc->chip.pwms[i]);
+ pwmchip_remove(&pc->chip);
/* clk was prepared in probe, hence unprepare it here */
clk_unprepare(pc->clk);
- return pwmchip_remove(&pc->chip);
+
+ return 0;
}
static const struct of_device_id spear_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
index 98c479dfae31..7004f55bbf11 100644
--- a/drivers/pwm/pwm-sprd.c
+++ b/drivers/pwm/pwm-sprd.c
@@ -183,13 +183,10 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
}
- if (state->period != cstate->period ||
- state->duty_cycle != cstate->duty_cycle) {
- ret = sprd_pwm_config(spc, pwm, state->duty_cycle,
- state->period);
- if (ret)
- return ret;
- }
+ ret = sprd_pwm_config(spc, pwm, state->duty_cycle,
+ state->period);
+ if (ret)
+ return ret;
sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_ENABLE, 1);
} else if (cstate->enabled) {
@@ -284,7 +281,9 @@ static int sprd_pwm_remove(struct platform_device *pdev)
{
struct sprd_pwm_chip *spc = platform_get_drvdata(pdev);
- return pwmchip_remove(&spc->chip);
+ pwmchip_remove(&spc->chip);
+
+ return 0;
}
static const struct of_device_id sprd_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index af08f564ef1d..93dd03618465 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -208,8 +208,6 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
priv->chip.dev = &pdev->dev;
priv->chip.ops = &stm32_pwm_lp_ops;
priv->chip.npwm = 1;
- priv->chip.of_xlate = of_pwm_xlate_with_flags;
- priv->chip.of_pwm_n_cells = 3;
ret = pwmchip_add(&priv->chip);
if (ret < 0)
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index c46fb90036ab..794ca5b02968 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -621,8 +621,6 @@ static int stm32_pwm_probe(struct platform_device *pdev)
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
- priv->chip.of_xlate = of_pwm_xlate_with_flags;
- priv->chip.of_pwm_n_cells = 3;
if (!priv->regmap || !priv->clk)
return -EINVAL;
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index e01becd102c0..c952604e91f3 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -460,8 +460,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &sun4i_pwm_ops;
pwm->chip.npwm = pwm->data->npwm;
- pwm->chip.of_xlate = of_pwm_xlate_with_flags;
- pwm->chip.of_pwm_n_cells = 3;
spin_lock_init(&pwm->ctrl_lock);
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index c529a170bcdd..11a10b575ace 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -300,32 +300,12 @@ static int tegra_pwm_probe(struct platform_device *pdev)
static int tegra_pwm_remove(struct platform_device *pdev)
{
struct tegra_pwm_chip *pc = platform_get_drvdata(pdev);
- unsigned int i;
- int err;
-
- if (WARN_ON(!pc))
- return -ENODEV;
-
- err = clk_prepare_enable(pc->clk);
- if (err < 0)
- return err;
-
- for (i = 0; i < pc->chip.npwm; i++) {
- struct pwm_device *pwm = &pc->chip.pwms[i];
-
- if (!pwm_is_enabled(pwm))
- if (clk_prepare_enable(pc->clk) < 0)
- continue;
- pwm_writel(pc, i, 0);
-
- clk_disable_unprepare(pc->clk);
- }
+ pwmchip_remove(&pc->chip);
reset_control_assert(pc->rst);
- clk_disable_unprepare(pc->clk);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index b9a17ab0c202..35eb19a5a0d1 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -48,16 +48,13 @@ static inline struct ecap_pwm_chip *to_ecap_pwm_chip(struct pwm_chip *chip)
* duty_ns = 10^9 * duty_cycles / PWM_CLK_RATE
*/
static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+ int duty_ns, int period_ns, int enabled)
{
struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip);
u32 period_cycles, duty_cycles;
unsigned long long c;
u16 value;
- if (period_ns > NSEC_PER_SEC)
- return -ERANGE;
-
c = pc->clk_rate;
c = c * period_ns;
do_div(c, NSEC_PER_SEC);
@@ -82,7 +79,7 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
writew(value, pc->mmio_base + ECCTL2);
- if (!pwm_is_enabled(pwm)) {
+ if (!enabled) {
/* Update active registers if not running */
writel(duty_cycles, pc->mmio_base + CAP2);
writel(period_cycles, pc->mmio_base + CAP1);
@@ -96,7 +93,7 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
writel(period_cycles, pc->mmio_base + CAP3);
}
- if (!pwm_is_enabled(pwm)) {
+ if (!enabled) {
value = readw(pc->mmio_base + ECCTL2);
/* Disable APWM mode to put APWM output Low */
value &= ~ECCTL2_APWM_MODE;
@@ -168,20 +165,46 @@ static void ecap_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
pm_runtime_put_sync(pc->chip.dev);
}
-static void ecap_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+static int ecap_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
- if (pwm_is_enabled(pwm)) {
- dev_warn(chip->dev, "Removing PWM device without disabling\n");
- pm_runtime_put_sync(chip->dev);
+ int err;
+ int enabled = pwm->state.enabled;
+
+ if (state->polarity != pwm->state.polarity) {
+
+ if (enabled) {
+ ecap_pwm_disable(chip, pwm);
+ enabled = false;
+ }
+
+ err = ecap_pwm_set_polarity(chip, pwm, state->polarity);
+ if (err)
+ return err;
+ }
+
+ if (!state->enabled) {
+ if (enabled)
+ ecap_pwm_disable(chip, pwm);
+ return 0;
}
+
+ if (state->period > NSEC_PER_SEC)
+ return -ERANGE;
+
+ err = ecap_pwm_config(chip, pwm, state->duty_cycle,
+ state->period, enabled);
+ if (err)
+ return err;
+
+ if (!enabled)
+ return ecap_pwm_enable(chip, pwm);
+
+ return 0;
}
static const struct pwm_ops ecap_pwm_ops = {
- .free = ecap_pwm_free,
- .config = ecap_pwm_config,
- .set_polarity = ecap_pwm_set_polarity,
- .enable = ecap_pwm_enable,
- .disable = ecap_pwm_disable,
+ .apply = ecap_pwm_apply,
.owner = THIS_MODULE,
};
@@ -224,8 +247,6 @@ static int ecap_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &ecap_pwm_ops;
- pc->chip.of_xlate = of_pwm_xlate_with_flags;
- pc->chip.of_pwm_n_cells = 3;
pc->chip.npwm = 1;
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 90095a19bf2d..17909fa53211 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -447,8 +447,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &ehrpwm_pwm_ops;
- pc->chip.of_xlate = of_pwm_xlate_with_flags;
- pc->chip.of_pwm_n_cells = 3;
pc->chip.npwm = NUM_PWM_CHANNEL;
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c
index 46d903786366..af4e37d3e3a6 100644
--- a/drivers/pwm/pwm-visconti.c
+++ b/drivers/pwm/pwm-visconti.c
@@ -82,17 +82,14 @@ static int visconti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return -ERANGE;
/*
- * PWMC controls a divider that divides the input clk by a
- * power of two between 1 and 8. As a smaller divider yields
- * higher precision, pick the smallest possible one.
+ * PWMC controls a divider that divides the input clk by a power of two
+ * between 1 and 8. As a smaller divider yields higher precision, pick
+ * the smallest possible one. As period is at most 0xffff << 3, pwmc0 is
+ * in the intended range [0..3].
*/
- if (period > 0xffff) {
- pwmc0 = ilog2(period >> 16);
- if (WARN_ON(pwmc0 > 3))
- return -EINVAL;
- } else {
- pwmc0 = 0;
- }
+ pwmc0 = fls(period >> 16);
+ if (WARN_ON(pwmc0 > 3))
+ return -EINVAL;
period >>= pwmc0;
duty_cycle >>= pwmc0;
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 52fe5d19473a..ea2aa151080a 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -207,8 +207,6 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
chip->chip.dev = &pdev->dev;
chip->chip.ops = &vt8500_pwm_ops;
- chip->chip.of_xlate = of_pwm_xlate_with_flags;
- chip->chip.of_pwm_n_cells = 3;
chip->chip.npwm = VT8500_NR_PWMS;
chip->clk = devm_clk_get(&pdev->dev, NULL);
@@ -240,15 +238,13 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
static int vt8500_pwm_remove(struct platform_device *pdev)
{
- struct vt8500_chip *chip;
+ struct vt8500_chip *chip = platform_get_drvdata(pdev);
- chip = platform_get_drvdata(pdev);
- if (chip == NULL)
- return -ENODEV;
+ pwmchip_remove(&chip->chip);
clk_unprepare(chip->clk);
- return pwmchip_remove(&chip->chip);
+ return 0;
}
static struct platform_driver vt8500_pwm_driver = {
diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
index e16c3727db7a..aa42da4d141e 100644
--- a/drivers/regulator/bd9576-regulator.c
+++ b/drivers/regulator/bd9576-regulator.c
@@ -294,9 +294,9 @@ static bool check_temp_flag_mismatch(struct regulator_dev *rdev, int severity,
struct bd957x_regulator_data *r)
{
if ((severity == REGULATOR_SEVERITY_ERR &&
- r->ovd_notif != REGULATOR_EVENT_OVER_TEMP) ||
+ r->temp_notif != REGULATOR_EVENT_OVER_TEMP) ||
(severity == REGULATOR_SEVERITY_WARN &&
- r->ovd_notif != REGULATOR_EVENT_OVER_TEMP_WARN)) {
+ r->temp_notif != REGULATOR_EVENT_OVER_TEMP_WARN)) {
dev_warn(rdev_get_dev(rdev),
"Can't support both thermal WARN and ERR\n");
if (severity == REGULATOR_SEVERITY_WARN)
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index bff8c515dcde..d144a4bdb76d 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -366,9 +366,8 @@ static struct hi6421_regulator_info
static int hi6421_regulator_enable(struct regulator_dev *rdev)
{
- struct hi6421_regulator_pdata *pdata;
+ struct hi6421_regulator_pdata *pdata = rdev_get_drvdata(rdev);
- pdata = dev_get_drvdata(rdev->dev.parent);
/* hi6421 spec requires regulator enablement must be serialized:
* - Because when BUCK, LDO switching from off to on, it will have
* a huge instantaneous current; so you can not turn on two or
@@ -385,9 +384,10 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev)
static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
{
- struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+ struct hi6421_regulator_info *info;
unsigned int reg_val;
+ info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & info->mode_mask)
return REGULATOR_MODE_IDLE;
@@ -397,9 +397,10 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
{
- struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+ struct hi6421_regulator_info *info;
unsigned int reg_val;
+ info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & info->mode_mask)
return REGULATOR_MODE_STANDBY;
@@ -410,9 +411,10 @@ static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
- struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+ struct hi6421_regulator_info *info;
unsigned int new_mode;
+ info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
switch (mode) {
case REGULATOR_MODE_NORMAL:
new_mode = 0;
@@ -434,9 +436,10 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
- struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+ struct hi6421_regulator_info *info;
unsigned int new_mode;
+ info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
switch (mode) {
case REGULATOR_MODE_NORMAL:
new_mode = 0;
@@ -459,7 +462,9 @@ static unsigned int
hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev,
int input_uV, int output_uV, int load_uA)
{
- struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+ struct hi6421_regulator_info *info;
+
+ info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
if (load_uA > info->eco_microamp)
return REGULATOR_MODE_NORMAL;
@@ -543,14 +548,13 @@ static int hi6421_regulator_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
mutex_init(&pdata->lock);
- platform_set_drvdata(pdev, pdata);
for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) {
/* assign per-regulator data */
info = &hi6421_regulator_info[i];
config.dev = pdev->dev.parent;
- config.driver_data = info;
+ config.driver_data = pdata;
config.regmap = pmic->regmap;
rdev = devm_regulator_register(&pdev->dev, &info->desc,
diff --git a/drivers/regulator/hi6421v600-regulator.c b/drivers/regulator/hi6421v600-regulator.c
index 9b162c0555c3..845bc3b4026d 100644
--- a/drivers/regulator/hi6421v600-regulator.c
+++ b/drivers/regulator/hi6421v600-regulator.c
@@ -98,10 +98,9 @@ static const unsigned int ldo34_voltages[] = {
static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev)
{
- struct hi6421_spmi_reg_priv *priv;
+ struct hi6421_spmi_reg_priv *priv = rdev_get_drvdata(rdev);
int ret;
- priv = dev_get_drvdata(rdev->dev.parent);
/* cannot enable more than one regulator at one time */
mutex_lock(&priv->enable_mutex);
@@ -119,9 +118,10 @@ static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev)
static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
{
- struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_reg_info *sreg;
unsigned int reg_val;
+ sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc);
regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & sreg->eco_mode_mask)
@@ -133,9 +133,10 @@ static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
- struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_reg_info *sreg;
unsigned int val;
+ sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc);
switch (mode) {
case REGULATOR_MODE_NORMAL:
val = 0;
@@ -159,7 +160,9 @@ hi6421_spmi_regulator_get_optimum_mode(struct regulator_dev *rdev,
int input_uV, int output_uV,
int load_uA)
{
- struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_reg_info *sreg;
+
+ sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc);
if (!sreg->eco_uA || ((unsigned int)load_uA > sreg->eco_uA))
return REGULATOR_MODE_NORMAL;
@@ -252,13 +255,12 @@ static int hi6421_spmi_regulator_probe(struct platform_device *pdev)
return -ENOMEM;
mutex_init(&priv->enable_mutex);
- platform_set_drvdata(pdev, priv);
for (i = 0; i < ARRAY_SIZE(regulator_info); i++) {
info = &regulator_info[i];
config.dev = pdev->dev.parent;
- config.driver_data = info;
+ config.driver_data = priv;
config.regmap = pmic->regmap;
rdev = devm_regulator_register(dev, &info->desc, &config);
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index eeab9d3c824b..d059ae85047a 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -11,6 +11,17 @@
#include <linux/mfd/lp87565.h>
+enum LP87565_regulator_id {
+ /* BUCK's */
+ LP87565_BUCK_0,
+ LP87565_BUCK_1,
+ LP87565_BUCK_2,
+ LP87565_BUCK_3,
+ LP87565_BUCK_10,
+ LP87565_BUCK_23,
+ LP87565_BUCK_3210,
+};
+
#define LP87565_REGULATOR(_name, _id, _of, _ops, _n, _vr, _vm, \
_er, _em, _ev, _delay, _lr, _cr) \
[_id] = { \
diff --git a/drivers/regulator/mtk-dvfsrc-regulator.c b/drivers/regulator/mtk-dvfsrc-regulator.c
index d3d876198d6e..234af3a66c77 100644
--- a/drivers/regulator/mtk-dvfsrc-regulator.c
+++ b/drivers/regulator/mtk-dvfsrc-regulator.c
@@ -179,8 +179,7 @@ static int dvfsrc_vcore_regulator_probe(struct platform_device *pdev)
for (i = 0; i < regulator_init_data->size; i++) {
config.dev = dev->parent;
config.driver_data = (mt_regulators + i);
- rdev = devm_regulator_register(dev->parent,
- &(mt_regulators + i)->desc,
+ rdev = devm_regulator_register(dev, &(mt_regulators + i)->desc,
&config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n",
diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c
index 4bca64de0f67..2ee334174e2b 100644
--- a/drivers/regulator/rtmv20-regulator.c
+++ b/drivers/regulator/rtmv20-regulator.c
@@ -37,7 +37,7 @@
#define RTMV20_WIDTH2_MASK GENMASK(7, 0)
#define RTMV20_LBPLVL_MASK GENMASK(3, 0)
#define RTMV20_LBPEN_MASK BIT(7)
-#define RTMV20_STROBEPOL_MASK BIT(1)
+#define RTMV20_STROBEPOL_MASK BIT(0)
#define RTMV20_VSYNPOL_MASK BIT(1)
#define RTMV20_FSINEN_MASK BIT(7)
#define RTMV20_ESEN_MASK BIT(6)
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index e68fcedc999c..9a6eedc3994a 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -26,6 +26,7 @@ config REMOTEPROC_CDEV
config IMX_REMOTEPROC
tristate "i.MX remoteproc support"
depends on ARCH_MXC
+ depends on HAVE_ARM_SMCCC
select MAILBOX
help
Say y here to support iMX's remote processors via the remote
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index d6338872c6db..d88f76f5305e 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -3,6 +3,7 @@
* Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
*/
+#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -50,6 +51,11 @@
#define IMX_RPROC_MEM_MAX 32
+#define IMX_SIP_RPROC 0xC2000005
+#define IMX_SIP_RPROC_START 0x00
+#define IMX_SIP_RPROC_STARTED 0x01
+#define IMX_SIP_RPROC_STOP 0x02
+
/**
* struct imx_rproc_mem - slim internal memory structure
* @cpu_addr: MPU virtual address of the memory region
@@ -74,6 +80,15 @@ struct imx_rproc_att {
int flags;
};
+/* Remote core start/stop method */
+enum imx_rproc_method {
+ IMX_RPROC_NONE,
+ /* Through syscon regmap */
+ IMX_RPROC_MMIO,
+ /* Through ARM SMCCC */
+ IMX_RPROC_SMC,
+};
+
struct imx_rproc_dcfg {
u32 src_reg;
u32 src_mask;
@@ -81,6 +96,7 @@ struct imx_rproc_dcfg {
u32 src_stop;
const struct imx_rproc_att *att;
size_t att_size;
+ enum imx_rproc_method method;
};
struct imx_rproc {
@@ -98,6 +114,36 @@ struct imx_rproc {
void __iomem *rsc_table;
};
+static const struct imx_rproc_att imx_rproc_att_imx8mn[] = {
+ /* dev addr , sys addr , size , flags */
+ /* ITCM */
+ { 0x00000000, 0x007E0000, 0x00020000, ATT_OWN },
+ /* OCRAM_S */
+ { 0x00180000, 0x00180000, 0x00009000, 0 },
+ /* OCRAM */
+ { 0x00900000, 0x00900000, 0x00020000, 0 },
+ /* OCRAM */
+ { 0x00920000, 0x00920000, 0x00020000, 0 },
+ /* OCRAM */
+ { 0x00940000, 0x00940000, 0x00050000, 0 },
+ /* QSPI Code - alias */
+ { 0x08000000, 0x08000000, 0x08000000, 0 },
+ /* DDR (Code) - alias */
+ { 0x10000000, 0x40000000, 0x0FFE0000, 0 },
+ /* DTCM */
+ { 0x20000000, 0x00800000, 0x00020000, ATT_OWN },
+ /* OCRAM_S - alias */
+ { 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
+ /* OCRAM */
+ { 0x20200000, 0x00900000, 0x00020000, ATT_OWN },
+ /* OCRAM */
+ { 0x20220000, 0x00920000, 0x00020000, ATT_OWN },
+ /* OCRAM */
+ { 0x20240000, 0x00940000, 0x00040000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x40000000, 0x40000000, 0x80000000, 0 },
+};
+
static const struct imx_rproc_att imx_rproc_att_imx8mq[] = {
/* dev addr , sys addr , size , flags */
/* TCML - alias */
@@ -126,6 +172,20 @@ static const struct imx_rproc_att imx_rproc_att_imx8mq[] = {
{ 0x40000000, 0x40000000, 0x80000000, 0 },
};
+static const struct imx_rproc_att imx_rproc_att_imx8ulp[] = {
+ {0x1FFC0000, 0x1FFC0000, 0xC0000, ATT_OWN},
+ {0x21000000, 0x21000000, 0x10000, ATT_OWN},
+ {0x80000000, 0x80000000, 0x60000000, 0}
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx7ulp[] = {
+ {0x1FFD0000, 0x1FFD0000, 0x30000, ATT_OWN},
+ {0x20000000, 0x20000000, 0x10000, ATT_OWN},
+ {0x2F000000, 0x2F000000, 0x20000, ATT_OWN},
+ {0x2F020000, 0x2F020000, 0x20000, ATT_OWN},
+ {0x60000000, 0x60000000, 0x40000000, 0}
+};
+
static const struct imx_rproc_att imx_rproc_att_imx7d[] = {
/* dev addr , sys addr , size , flags */
/* OCRAM_S (M4 Boot code) - alias */
@@ -176,6 +236,12 @@ static const struct imx_rproc_att imx_rproc_att_imx6sx[] = {
{ 0x80000000, 0x80000000, 0x60000000, 0 },
};
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn = {
+ .att = imx_rproc_att_imx8mn,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
+ .method = IMX_RPROC_SMC,
+};
+
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
.src_reg = IMX7D_SRC_SCR,
.src_mask = IMX7D_M4_RST_MASK,
@@ -183,6 +249,19 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
.src_stop = IMX7D_M4_STOP,
.att = imx_rproc_att_imx8mq,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mq),
+ .method = IMX_RPROC_MMIO,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8ulp = {
+ .att = imx_rproc_att_imx8ulp,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8ulp),
+ .method = IMX_RPROC_NONE,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
+ .att = imx_rproc_att_imx7ulp,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
+ .method = IMX_RPROC_NONE,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
@@ -192,6 +271,7 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
.src_stop = IMX7D_M4_STOP,
.att = imx_rproc_att_imx7d,
.att_size = ARRAY_SIZE(imx_rproc_att_imx7d),
+ .method = IMX_RPROC_MMIO,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
@@ -201,6 +281,7 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
.src_stop = IMX6SX_M4_STOP,
.att = imx_rproc_att_imx6sx,
.att_size = ARRAY_SIZE(imx_rproc_att_imx6sx),
+ .method = IMX_RPROC_MMIO,
};
static int imx_rproc_start(struct rproc *rproc)
@@ -208,12 +289,24 @@ static int imx_rproc_start(struct rproc *rproc)
struct imx_rproc *priv = rproc->priv;
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
struct device *dev = priv->dev;
+ struct arm_smccc_res res;
int ret;
- ret = regmap_update_bits(priv->regmap, dcfg->src_reg,
- dcfg->src_mask, dcfg->src_start);
+ switch (dcfg->method) {
+ case IMX_RPROC_MMIO:
+ ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
+ dcfg->src_start);
+ break;
+ case IMX_RPROC_SMC:
+ arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_START, 0, 0, 0, 0, 0, 0, &res);
+ ret = res.a0;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (ret)
- dev_err(dev, "Failed to enable M4!\n");
+ dev_err(dev, "Failed to enable remote core!\n");
return ret;
}
@@ -223,12 +316,26 @@ static int imx_rproc_stop(struct rproc *rproc)
struct imx_rproc *priv = rproc->priv;
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
struct device *dev = priv->dev;
+ struct arm_smccc_res res;
int ret;
- ret = regmap_update_bits(priv->regmap, dcfg->src_reg,
- dcfg->src_mask, dcfg->src_stop);
+ switch (dcfg->method) {
+ case IMX_RPROC_MMIO:
+ ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
+ dcfg->src_stop);
+ break;
+ case IMX_RPROC_SMC:
+ arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STOP, 0, 0, 0, 0, 0, 0, &res);
+ ret = res.a0;
+ if (res.a1)
+ dev_info(dev, "Not in wfi, force stopped\n");
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (ret)
- dev_err(dev, "Failed to stop M4!\n");
+ dev_err(dev, "Failed to stop remote core\n");
return ret;
}
@@ -560,12 +667,37 @@ static void imx_rproc_free_mbox(struct rproc *rproc)
static int imx_rproc_detect_mode(struct imx_rproc *priv)
{
+ struct regmap_config config = { .name = "imx-rproc" };
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
struct device *dev = priv->dev;
+ struct regmap *regmap;
+ struct arm_smccc_res res;
int ret;
u32 val;
- ret = regmap_read(priv->regmap, dcfg->src_reg, &val);
+ switch (dcfg->method) {
+ case IMX_RPROC_NONE:
+ priv->rproc->state = RPROC_DETACHED;
+ return 0;
+ case IMX_RPROC_SMC:
+ arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STARTED, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0)
+ priv->rproc->state = RPROC_DETACHED;
+ return 0;
+ default:
+ break;
+ }
+
+ regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to find syscon\n");
+ return PTR_ERR(regmap);
+ }
+
+ priv->regmap = regmap;
+ regmap_attach_dev(dev, regmap, &config);
+
+ ret = regmap_read(regmap, dcfg->src_reg, &val);
if (ret) {
dev_err(dev, "Failed to read src\n");
return ret;
@@ -577,24 +709,44 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv)
return 0;
}
+static int imx_rproc_clk_enable(struct imx_rproc *priv)
+{
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ struct device *dev = priv->dev;
+ int ret;
+
+ /* Remote core is not under control of Linux */
+ if (dcfg->method == IMX_RPROC_NONE)
+ return 0;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ /*
+ * clk for M4 block including memory. Should be
+ * enabled before .start for FW transfer.
+ */
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int imx_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx_rproc *priv;
struct rproc *rproc;
- struct regmap_config config = { .name = "imx-rproc" };
const struct imx_rproc_dcfg *dcfg;
- struct regmap *regmap;
int ret;
- regmap = syscon_regmap_lookup_by_phandle(np, "syscon");
- if (IS_ERR(regmap)) {
- dev_err(dev, "failed to find syscon\n");
- return PTR_ERR(regmap);
- }
- regmap_attach_dev(dev, regmap, &config);
-
/* set some other name then imx */
rproc = rproc_alloc(dev, "imx-rproc", &imx_rproc_ops,
NULL, sizeof(*priv));
@@ -609,7 +761,6 @@ static int imx_rproc_probe(struct platform_device *pdev)
priv = rproc->priv;
priv->rproc = rproc;
- priv->regmap = regmap;
priv->dcfg = dcfg;
priv->dev = dev;
@@ -635,25 +786,15 @@ static int imx_rproc_probe(struct platform_device *pdev)
if (ret)
goto err_put_mbox;
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "Failed to get clock\n");
- ret = PTR_ERR(priv->clk);
- goto err_put_mbox;
- }
-
- /*
- * clk for M4 block including memory. Should be
- * enabled before .start for FW transfer.
- */
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(&rproc->dev, "Failed to enable clock\n");
+ ret = imx_rproc_clk_enable(priv);
+ if (ret)
goto err_put_mbox;
- }
INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
+ if (rproc->state != RPROC_DETACHED)
+ rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
+
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "rproc_add failed\n");
@@ -688,10 +829,14 @@ static int imx_rproc_remove(struct platform_device *pdev)
}
static const struct of_device_id imx_rproc_of_match[] = {
+ { .compatible = "fsl,imx7ulp-cm4", .data = &imx_rproc_cfg_imx7ulp },
{ .compatible = "fsl,imx7d-cm4", .data = &imx_rproc_cfg_imx7d },
{ .compatible = "fsl,imx6sx-cm4", .data = &imx_rproc_cfg_imx6sx },
{ .compatible = "fsl,imx8mq-cm4", .data = &imx_rproc_cfg_imx8mq },
{ .compatible = "fsl,imx8mm-cm4", .data = &imx_rproc_cfg_imx8mq },
+ { .compatible = "fsl,imx8mn-cm7", .data = &imx_rproc_cfg_imx8mn },
+ { .compatible = "fsl,imx8mp-cm7", .data = &imx_rproc_cfg_imx8mn },
+ { .compatible = "fsl,imx8ulp-cm33", .data = &imx_rproc_cfg_imx8ulp },
{},
};
MODULE_DEVICE_TABLE(of, imx_rproc_of_match);
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index e5778e476245..1777a01fa84e 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -887,6 +887,9 @@ static const struct of_device_id pru_rproc_match[] = {
{ .compatible = "ti,am3356-pru", .data = &pru_data },
{ .compatible = "ti,am4376-pru", .data = &pru_data },
{ .compatible = "ti,am5728-pru", .data = &pru_data },
+ { .compatible = "ti,am642-pru", .data = &k3_pru_data },
+ { .compatible = "ti,am642-rtu", .data = &k3_rtu_data },
+ { .compatible = "ti,am642-tx-pru", .data = &k3_tx_pru_data },
{ .compatible = "ti,k2g-pru", .data = &pru_data },
{ .compatible = "ti,am654-pru", .data = &k3_pru_data },
{ .compatible = "ti,am654-rtu", .data = &k3_rtu_data },
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index 9627a950928e..7e9244c748da 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -280,7 +280,7 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
return ret;
}
- q6v5->state = qcom_smem_state_get(&pdev->dev, "stop", &q6v5->stop_bit);
+ q6v5->state = devm_qcom_smem_state_get(&pdev->dev, "stop", &q6v5->stop_bit);
if (IS_ERR(q6v5->state)) {
dev_err(&pdev->dev, "failed to acquire stop state\n");
return PTR_ERR(q6v5->state);
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index b921fc26cd04..a79bee901e9b 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -689,6 +689,25 @@ static const struct adsp_data mpss_resource_init = {
.ssctl_id = 0x12,
};
+static const struct adsp_data sc8180x_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .has_aggre2_clk = false,
+ .auto_boot = false,
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+};
+
static const struct adsp_data slpi_resource_init = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
@@ -811,6 +830,9 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
{ .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource},
+ { .compatible = "qcom,sc8180x-cdsp-pas", .data = &sm8150_cdsp_resource},
+ { .compatible = "qcom,sc8180x-mpss-pas", .data = &sc8180x_mpss_resource},
{ .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
{ .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 5f3455aa7e0e..f1cbc6b2edbb 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -624,8 +624,8 @@ static int wcnss_probe(struct platform_device *pdev)
wcnss->stop_ack_irq = ret;
if (wcnss->stop_ack_irq) {
- wcnss->state = qcom_smem_state_get(&pdev->dev, "stop",
- &wcnss->stop_bit);
+ wcnss->state = devm_qcom_smem_state_get(&pdev->dev, "stop",
+ &wcnss->stop_bit);
if (IS_ERR(wcnss->state)) {
ret = PTR_ERR(wcnss->state);
goto detach_pds;
@@ -659,7 +659,6 @@ static int wcnss_remove(struct platform_device *pdev)
of_platform_depopulate(&pdev->dev);
- qcom_smem_state_put(wcnss->state);
rproc_del(wcnss->rproc);
qcom_remove_sysmon_subdev(wcnss->sysmon);
diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c
index 0b8a84c04f76..4ad98b0b8caa 100644
--- a/drivers/remoteproc/remoteproc_cdev.c
+++ b/drivers/remoteproc/remoteproc_cdev.c
@@ -124,7 +124,7 @@ int rproc_char_device_add(struct rproc *rproc)
void rproc_char_device_remove(struct rproc *rproc)
{
- __unregister_chrdev(MAJOR(rproc->dev.devt), rproc->index, 1, "remoteproc");
+ cdev_del(&rproc->cdev);
}
void __init rproc_init_cdev(void)
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 626a6b90fba2..7de5905d276a 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/panic_notifier.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/dma-map-ops.h>
@@ -165,6 +166,7 @@ EXPORT_SYMBOL(rproc_va_to_pa);
* @rproc: handle of a remote processor
* @da: remoteproc device address to translate
* @len: length of the memory region @da is pointing to
+ * @is_iomem: optional pointer filled in to indicate if @da is iomapped memory
*
* Some remote processors will ask us to allocate them physically contiguous
* memory regions (which we call "carveouts"), and map them to specific
@@ -182,12 +184,12 @@ EXPORT_SYMBOL(rproc_va_to_pa);
* translations on the internal remoteproc memory regions through a platform
* implementation specific da_to_va ops, if present.
*
- * The function returns a valid kernel address on success or NULL on failure.
- *
* Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
* but only on kernel direct mapped RAM memory. Instead, we're just using
* here the output of the DMA API for the carveouts, which should be more
* correct.
+ *
+ * Return: a valid kernel address on success or NULL on failure
*/
void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
@@ -508,7 +510,7 @@ static int copy_dma_range_map(struct device *to, struct device *from)
* use RSC_DEVMEM resource entries to map their required @da to the physical
* address of their base CMA region (ouch, hacky!).
*
- * Returns 0 on success, or an appropriate error code otherwise
+ * Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
int offset, int avail)
@@ -643,7 +645,7 @@ void rproc_vdev_release(struct kref *ref)
* support dynamically allocating this address using the generic
* DMA API (but currently there isn't a use case for that).
*
- * Returns 0 on success, or an appropriate error code otherwise
+ * Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_trace(struct rproc *rproc, void *ptr,
int offset, int avail)
@@ -720,6 +722,8 @@ static int rproc_handle_trace(struct rproc *rproc, void *ptr,
* tell us ranges of physical addresses the firmware is allowed to request,
* and not allow firmwares to request access to physical addresses that
* are outside those ranges.
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
int offset, int avail)
@@ -782,6 +786,8 @@ out:
*
* This function allocate specified memory entry @mem using
* dma_alloc_coherent() as default allocator
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_alloc_carveout(struct rproc *rproc,
struct rproc_mem_entry *mem)
@@ -888,6 +894,8 @@ dma_free:
*
* This function releases specified memory entry @mem allocated via
* rproc_alloc_carveout() function by @rproc.
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_release_carveout(struct rproc *rproc,
struct rproc_mem_entry *mem)
@@ -917,6 +925,8 @@ static int rproc_release_carveout(struct rproc *rproc,
* (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
* needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
* pressure is important; it may have a substantial impact on performance.
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_carveout(struct rproc *rproc,
void *ptr, int offset, int avail)
@@ -1005,6 +1015,8 @@ EXPORT_SYMBOL(rproc_add_carveout);
*
* This function allocates a rproc_mem_entry struct and fill it with parameters
* provided by client.
+ *
+ * Return: a valid pointer on success, or NULL on failure
*/
__printf(8, 9)
struct rproc_mem_entry *
@@ -1049,6 +1061,8 @@ EXPORT_SYMBOL(rproc_mem_entry_init);
*
* This function allocates a rproc_mem_entry struct and fill it with parameters
* provided by client.
+ *
+ * Return: a valid pointer on success, or NULL on failure
*/
__printf(5, 6)
struct rproc_mem_entry *
@@ -1788,7 +1802,7 @@ static int rproc_trigger_auto_boot(struct rproc *rproc)
* We're initiating an asynchronous firmware loading, so we can
* be built-in kernel code, without hanging the boot process.
*/
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
rproc->firmware, &rproc->dev, GFP_KERNEL,
rproc, rproc_auto_boot_callback);
if (ret < 0)
@@ -1880,6 +1894,8 @@ static int __rproc_detach(struct rproc *rproc)
* remoteproc functional again.
*
* This function can sleep, so it cannot be called from atomic context.
+ *
+ * Return: 0 on success or a negative value upon failure
*/
int rproc_trigger_recovery(struct rproc *rproc)
{
@@ -1964,7 +1980,7 @@ static void rproc_crash_handler_work(struct work_struct *work)
* If the remote processor is already powered on, this function immediately
* returns (successfully).
*
- * Returns 0 on success, and an appropriate error value otherwise.
+ * Return: 0 on success, and an appropriate error value otherwise
*/
int rproc_boot(struct rproc *rproc)
{
@@ -2099,6 +2115,8 @@ EXPORT_SYMBOL(rproc_shutdown);
* no longer available. From there it should be possible to remove the
* platform driver and even power cycle the application processor (if the HW
* supports it) without needing to switch off the remote processor.
+ *
+ * Return: 0 on success, and an appropriate error value otherwise
*/
int rproc_detach(struct rproc *rproc)
{
@@ -2151,7 +2169,7 @@ EXPORT_SYMBOL(rproc_detach);
* This function increments the remote processor's refcount, so always
* use rproc_put() to decrement it back once rproc isn't needed anymore.
*
- * Returns the rproc handle on success, and NULL on failure.
+ * Return: rproc handle on success, and NULL on failure
*/
#ifdef CONFIG_OF
struct rproc *rproc_get_by_phandle(phandle phandle)
@@ -2301,8 +2319,6 @@ static int rproc_validate(struct rproc *rproc)
* This is called by the platform-specific rproc implementation, whenever
* a new remote processor device is probed.
*
- * Returns 0 on success and an appropriate error code otherwise.
- *
* Note: this function initiates an asynchronous firmware loading
* context, which will look for virtio devices supported by the rproc's
* firmware.
@@ -2310,35 +2326,39 @@ static int rproc_validate(struct rproc *rproc)
* If found, those virtio devices will be created and added, so as a result
* of registering this remote processor, additional virtio drivers might be
* probed.
+ *
+ * Return: 0 on success and an appropriate error code otherwise
*/
int rproc_add(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
- ret = device_add(dev);
+ ret = rproc_validate(rproc);
if (ret < 0)
return ret;
- ret = rproc_validate(rproc);
+ /* add char device for this remoteproc */
+ ret = rproc_char_device_add(rproc);
if (ret < 0)
return ret;
+ ret = device_add(dev);
+ if (ret < 0) {
+ put_device(dev);
+ goto rproc_remove_cdev;
+ }
+
dev_info(dev, "%s is available\n", rproc->name);
/* create debugfs entries */
rproc_create_debug_dir(rproc);
- /* add char device for this remoteproc */
- ret = rproc_char_device_add(rproc);
- if (ret < 0)
- return ret;
-
/* if rproc is marked always-on, request it to boot */
if (rproc->auto_boot) {
ret = rproc_trigger_auto_boot(rproc);
if (ret < 0)
- return ret;
+ goto rproc_remove_dev;
}
/* expose to rproc_get_by_phandle users */
@@ -2347,6 +2367,13 @@ int rproc_add(struct rproc *rproc)
mutex_unlock(&rproc_list_mutex);
return 0;
+
+rproc_remove_dev:
+ rproc_delete_debug_dir(rproc);
+ device_del(dev);
+rproc_remove_cdev:
+ rproc_char_device_remove(rproc);
+ return ret;
}
EXPORT_SYMBOL(rproc_add);
@@ -2363,7 +2390,7 @@ static void devm_rproc_remove(void *rproc)
* This function performs like rproc_add() but the registered rproc device will
* automatically be removed on driver detach.
*
- * Returns: 0 on success, negative errno on failure
+ * Return: 0 on success, negative errno on failure
*/
int devm_rproc_add(struct device *dev, struct rproc *rproc)
{
@@ -2471,10 +2498,10 @@ static int rproc_alloc_ops(struct rproc *rproc, const struct rproc_ops *ops)
* implementations should then call rproc_add() to complete
* the registration of the remote processor.
*
- * On success the new rproc is returned, and on failure, NULL.
- *
* Note: _never_ directly deallocate @rproc, even if it was not registered
* yet. Instead, when you need to unroll rproc_alloc(), use rproc_free().
+ *
+ * Return: new rproc pointer on success, and NULL on failure
*/
struct rproc *rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
@@ -2587,7 +2614,7 @@ EXPORT_SYMBOL(rproc_put);
* of the outstanding reference created by rproc_alloc. To decrement that
* one last refcount, one still needs to call rproc_free().
*
- * Returns 0 on success and -EINVAL if @rproc isn't valid.
+ * Return: 0 on success and -EINVAL if @rproc isn't valid
*/
int rproc_del(struct rproc *rproc)
{
@@ -2602,7 +2629,6 @@ int rproc_del(struct rproc *rproc)
mutex_unlock(&rproc->lock);
rproc_delete_debug_dir(rproc);
- rproc_char_device_remove(rproc);
/* the rproc is downref'ed as soon as it's removed from the klist */
mutex_lock(&rproc_list_mutex);
@@ -2613,6 +2639,7 @@ int rproc_del(struct rproc *rproc)
synchronize_rcu();
device_del(&rproc->dev);
+ rproc_char_device_remove(rproc);
return 0;
}
@@ -2634,7 +2661,7 @@ static void devm_rproc_free(struct device *dev, void *res)
* This function performs like rproc_alloc() but the acquired rproc device will
* automatically be released on driver detach.
*
- * Returns: new rproc instance, or NULL on failure
+ * Return: new rproc instance, or NULL on failure
*/
struct rproc *devm_rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
@@ -2686,7 +2713,7 @@ EXPORT_SYMBOL(rproc_remove_subdev);
* rproc_get_by_child() - acquire rproc handle of @dev's ancestor
* @dev: child device to find ancestor of
*
- * Returns the ancestor rproc instance, or NULL if not found.
+ * Return: the ancestor rproc instance, or NULL if not found
*/
struct rproc *rproc_get_by_child(struct device *dev)
{
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
index 11423588965a..469c52e62faf 100644
--- a/drivers/remoteproc/remoteproc_elf_loader.c
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -31,6 +31,8 @@
* @fw: the ELF firmware image
*
* Make sure this fw image is sane (ie a correct ELF32/ELF64 file).
+ *
+ * Return: 0 on success and -EINVAL upon any failure
*/
int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
{
@@ -117,11 +119,11 @@ EXPORT_SYMBOL(rproc_elf_sanity_check);
* @rproc: the remote processor handle
* @fw: the ELF firmware image
*
- * This function returns the entry point address of the ELF
- * image.
- *
* Note that the boot address is not a configurable property of all remote
* processors. Some will always boot at a specific hard-coded address.
+ *
+ * Return: entry point address of the ELF image
+ *
*/
u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
{
@@ -152,6 +154,8 @@ EXPORT_SYMBOL(rproc_elf_get_boot_addr);
* might be different: they might not have iommus, and would prefer to
* directly allocate memory for every segment/resource. This is not yet
* supported, though.
+ *
+ * Return: 0 on success and an appropriate error code otherwise
*/
int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
{
@@ -362,7 +366,7 @@ EXPORT_SYMBOL(rproc_elf_load_rsc_table);
* This function finds the location of the loaded resource table. Don't
* call this function if the table wasn't loaded yet - it's a bug if you do.
*
- * Returns the pointer to the resource table if it is found or NULL otherwise.
+ * Return: pointer to the resource table if it is found or NULL otherwise.
* If the table wasn't loaded yet the result is unspecified.
*/
struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 0cc617f76068..cf4d54e98e6a 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -45,7 +45,7 @@ static bool rproc_virtio_notify(struct virtqueue *vq)
* when the remote processor signals that a specific virtqueue has pending
* messages available.
*
- * Returns IRQ_NONE if no message was found in the @notifyid virtqueue,
+ * Return: IRQ_NONE if no message was found in the @notifyid virtqueue,
* and otherwise returns IRQ_HANDLED.
*/
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
@@ -325,7 +325,7 @@ static void rproc_virtio_dev_release(struct device *dev)
* This function registers a virtio device. This vdev's partent is
* the rproc device.
*
- * Returns 0 on success or an appropriate error value otherwise.
+ * Return: 0 on success or an appropriate error value otherwise
*/
int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
{
@@ -432,6 +432,8 @@ out:
* @data: must be null
*
* This function unregisters an existing virtio device.
+ *
+ * Return: 0
*/
int rproc_remove_virtio_dev(struct device *dev, void *data)
{
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 7353f9e7e7af..b643efcf995a 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -474,14 +474,12 @@ static int stm32_rproc_attach(struct rproc *rproc)
static int stm32_rproc_detach(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
- int err, dummy_data, idx;
+ int err, idx;
/* Inform the remote processor of the detach */
idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_DETACH);
if (idx >= 0 && ddata->mb[idx].chan) {
- /* A dummy data is sent to allow to block on transmit */
- err = mbox_send_message(ddata->mb[idx].chan,
- &dummy_data);
+ err = mbox_send_message(ddata->mb[idx].chan, "stop");
if (err < 0)
dev_warn(&rproc->dev, "warning: remote FW detach without ack\n");
}
@@ -493,15 +491,13 @@ static int stm32_rproc_detach(struct rproc *rproc)
static int stm32_rproc_stop(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
- int err, dummy_data, idx;
+ int err, idx;
/* request shutdown of the remote processor */
if (rproc->state != RPROC_OFFLINE) {
idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN);
if (idx >= 0 && ddata->mb[idx].chan) {
- /* a dummy data is sent to allow to block on transmit */
- err = mbox_send_message(ddata->mb[idx].chan,
- &dummy_data);
+ err = mbox_send_message(ddata->mb[idx].chan, "detach");
if (err < 0)
dev_warn(&rproc->dev, "warning: remote FW shutdown without ack\n");
}
@@ -556,7 +552,7 @@ static void stm32_rproc_kick(struct rproc *rproc, int vqid)
continue;
if (!ddata->mb[i].chan)
return;
- err = mbox_send_message(ddata->mb[i].chan, (void *)(long)vqid);
+ err = mbox_send_message(ddata->mb[i].chan, "kick");
if (err < 0)
dev_err(&rproc->dev, "%s: failed (%s, err:%d)\n",
__func__, ddata->mb[i].name, err);
@@ -580,7 +576,7 @@ static int stm32_rproc_da_to_pa(struct rproc *rproc,
continue;
*pa = da - p_mem->dev_addr + p_mem->bus_addr;
- dev_dbg(dev, "da %llx to pa %#x\n", da, *pa);
+ dev_dbg(dev, "da %llx to pa %pap\n", da, pa);
return 0;
}
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 5cf8d030a1f0..71615210df3e 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -40,6 +40,8 @@
#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
/* Available from J7200 SoCs onwards */
#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
+/* Applicable to only AM64x SoCs */
+#define PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE 0x00008000
/* R5 TI-SCI Processor Control Flags */
#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
@@ -49,6 +51,8 @@
#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
+/* Applicable to only AM64x SoCs */
+#define PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY 0x00000200
/**
* struct k3_r5_mem - internal memory structure
@@ -64,19 +68,29 @@ struct k3_r5_mem {
size_t size;
};
+/*
+ * All cluster mode values are not applicable on all SoCs. The following
+ * are the modes supported on various SoCs:
+ * Split mode : AM65x, J721E, J7200 and AM64x SoCs
+ * LockStep mode : AM65x, J721E and J7200 SoCs
+ * Single-CPU mode : AM64x SoCs only
+ */
enum cluster_mode {
CLUSTER_MODE_SPLIT = 0,
CLUSTER_MODE_LOCKSTEP,
+ CLUSTER_MODE_SINGLECPU,
};
/**
* struct k3_r5_soc_data - match data to handle SoC variations
* @tcm_is_double: flag to denote the larger unified TCMs in certain modes
* @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
+ * @single_cpu_mode: flag to denote if SoC/IP supports Single-CPU mode
*/
struct k3_r5_soc_data {
bool tcm_is_double;
bool tcm_ecc_autoinit;
+ bool single_cpu_mode;
};
/**
@@ -369,6 +383,13 @@ static inline int k3_r5_core_run(struct k3_r5_core *core)
* applicable cores to allow loading into the TCMs. The .prepare() ops is
* invoked by remoteproc core before any firmware loading, and is followed
* by the .start() ops after loading to actually let the R5 cores run.
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to
+ * execute code, but combines the TCMs from both cores. The resets for both
+ * cores need to be released to make this possible, as the TCMs are in general
+ * private to each core. Only Core0 needs to be unhalted for running the
+ * cluster in this mode. The function uses the same reset logic as LockStep
+ * mode for this (though the behavior is agnostic of the reset release order).
*/
static int k3_r5_rproc_prepare(struct rproc *rproc)
{
@@ -386,7 +407,9 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
return ret;
mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
- ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
+ /* Re-use LockStep-mode reset logic for Single-CPU mode */
+ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) ?
k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
if (ret) {
dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
@@ -427,6 +450,12 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
* cores. The cores themselves are only halted in the .stop() ops, and the
* .unprepare() ops is invoked by the remoteproc core after the remoteproc is
* stopped.
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) combines the TCMs from
+ * both cores. The access is made possible only with releasing the resets for
+ * both cores, but with only Core0 unhalted. This function re-uses the same
+ * reset assert logic as LockStep mode for this mode (though the behavior is
+ * agnostic of the reset assert order).
*/
static int k3_r5_rproc_unprepare(struct rproc *rproc)
{
@@ -436,7 +465,9 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc)
struct device *dev = kproc->dev;
int ret;
- ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
+ /* Re-use LockStep-mode reset logic for Single-CPU mode */
+ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) ?
k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
if (ret)
dev_err(dev, "unable to disable cores, ret = %d\n", ret);
@@ -455,6 +486,10 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc)
* first followed by Core0. The Split-mode requires that Core0 to be maintained
* always in a higher power state that Core1 (implying Core1 needs to be started
* always only after Core0 is started).
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
+ * code, so only Core0 needs to be unhalted. The function uses the same logic
+ * flow as Split-mode for this.
*/
static int k3_r5_rproc_start(struct rproc *rproc)
{
@@ -539,6 +574,10 @@ put_mbox:
* Core0 to be maintained always in a higher power state that Core1 (implying
* Core1 needs to be stopped first before Core0).
*
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
+ * code, so only Core0 needs to be halted. The function uses the same logic
+ * flow as Split-mode for this.
+ *
* Note that the R5F halt operation in general is not effective when the R5F
* core is running, but is needed to make sure the core won't run after
* deasserting the reset the subsequent time. The asserting of reset can
@@ -665,7 +704,9 @@ static const struct rproc_ops k3_r5_rproc_ops = {
*
* Each R5FSS has a cluster-level setting for configuring the processor
* subsystem either in a safety/fault-tolerant LockStep mode or a performance
- * oriented Split mode. Each R5F core has a number of settings to either
+ * oriented Split mode on most SoCs. A fewer SoCs support a non-safety mode
+ * as an alternate for LockStep mode that exercises only a single R5F core
+ * called Single-CPU mode. Each R5F core has a number of settings to either
* enable/disable each of the TCMs, control which TCM appears at the R5F core's
* address 0x0. These settings need to be configured before the resets for the
* corresponding core are released. These settings are all protected and managed
@@ -677,11 +718,13 @@ static const struct rproc_ops k3_r5_rproc_ops = {
* the cores are halted before the .prepare() step.
*
* The function is called from k3_r5_cluster_rproc_init() and is invoked either
- * once (in LockStep mode) or twice (in Split mode). Support for LockStep-mode
- * is dictated by an eFUSE register bit, and the config settings retrieved from
- * DT are adjusted accordingly as per the permitted cluster mode. All cluster
- * level settings like Cluster mode and TEINIT (exception handling state
- * dictating ARM or Thumb mode) can only be set and retrieved using Core0.
+ * once (in LockStep mode or Single-CPU modes) or twice (in Split mode). Support
+ * for LockStep-mode is dictated by an eFUSE register bit, and the config
+ * settings retrieved from DT are adjusted accordingly as per the permitted
+ * cluster mode. Another eFUSE register bit dictates if the R5F cluster only
+ * supports a Single-CPU mode. All cluster level settings like Cluster mode and
+ * TEINIT (exception handling state dictating ARM or Thumb mode) can only be set
+ * and retrieved using Core0.
*
* The function behavior is different based on the cluster mode. The R5F cores
* are configured independently as per their individual settings in Split mode.
@@ -700,10 +743,16 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
u32 set_cfg = 0, clr_cfg = 0;
u64 boot_vec = 0;
bool lockstep_en;
+ bool single_cpu;
int ret;
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
- core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ? core0 : kproc->core;
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) {
+ core = core0;
+ } else {
+ core = kproc->core;
+ }
ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
&stat);
@@ -713,23 +762,48 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n",
boot_vec, cfg, ctrl, stat);
+ /* check if only Single-CPU mode is supported on applicable SoCs */
+ if (cluster->soc_data->single_cpu_mode) {
+ single_cpu =
+ !!(stat & PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY);
+ if (single_cpu && cluster->mode == CLUSTER_MODE_SPLIT) {
+ dev_err(cluster->dev, "split-mode not permitted, force configuring for single-cpu mode\n");
+ cluster->mode = CLUSTER_MODE_SINGLECPU;
+ }
+ goto config;
+ }
+
+ /* check conventional LockStep vs Split mode configuration */
lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) {
dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n");
cluster->mode = CLUSTER_MODE_SPLIT;
}
+config:
/* always enable ARM mode and set boot vector to 0 */
boot_vec = 0x0;
if (core == core0) {
clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT;
- /*
- * LockStep configuration bit is Read-only on Split-mode _only_
- * devices and system firmware will NACK any requests with the
- * bit configured, so program it only on permitted devices
- */
- if (lockstep_en)
- clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ if (cluster->soc_data->single_cpu_mode) {
+ /*
+ * Single-CPU configuration bit can only be configured
+ * on Core0 and system firmware will NACK any requests
+ * with the bit configured, so program it only on
+ * permitted cores
+ */
+ if (cluster->mode == CLUSTER_MODE_SINGLECPU)
+ set_cfg = PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE;
+ } else {
+ /*
+ * LockStep configuration bit is Read-only on Split-mode
+ * _only_ devices and system firmware will NACK any
+ * requests with the bit configured, so program it only
+ * on permitted devices
+ */
+ if (lockstep_en)
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ }
}
if (core->atcm_enable)
@@ -894,12 +968,12 @@ static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
* cores are usable in Split-mode, but only the Core0 TCMs can be used in
* LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
* leveraging the Core1 TCMs as well in certain modes where they would have
- * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs). This is done by
- * making a Core1 TCM visible immediately after the corresponding Core0 TCM.
- * The SoC memory map uses the larger 64 KB sizes for the Core0 TCMs, and the
- * dts representation reflects this increased size on supported SoCs. The Core0
- * TCM sizes therefore have to be adjusted to only half the original size in
- * Split mode.
+ * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs, Single-CPU mode on
+ * AM64x SoCs). This is done by making a Core1 TCM visible immediately after the
+ * corresponding Core0 TCM. The SoC memory map uses the larger 64 KB sizes for
+ * the Core0 TCMs, and the dts representation reflects this increased size on
+ * supported SoCs. The Core0 TCM sizes therefore have to be adjusted to only
+ * half the original size in Split mode.
*/
static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
{
@@ -909,6 +983,7 @@ static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
struct k3_r5_core *core0;
if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU ||
!cluster->soc_data->tcm_is_double)
return;
@@ -987,8 +1062,9 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
goto err_add;
}
- /* create only one rproc in lockstep mode */
- if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
+ /* create only one rproc in lockstep mode or single-cpu mode */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU)
break;
}
@@ -1020,11 +1096,12 @@ static void k3_r5_cluster_rproc_exit(void *data)
struct rproc *rproc;
/*
- * lockstep mode has only one rproc associated with first core, whereas
- * split-mode has two rprocs associated with each core, and requires
- * that core1 be powered down first
+ * lockstep mode and single-cpu modes have only one rproc associated
+ * with first core, whereas split-mode has two rprocs associated with
+ * each core, and requires that core1 be powered down first
*/
- core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
+ core = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) ?
list_first_entry(&cluster->cores, struct k3_r5_core, elem) :
list_last_entry(&cluster->cores, struct k3_r5_core, elem);
@@ -1272,9 +1349,9 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
if (IS_ERR(core->tsp)) {
+ ret = PTR_ERR(core->tsp);
dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
ret);
- ret = PTR_ERR(core->tsp);
goto err;
}
@@ -1396,7 +1473,12 @@ static int k3_r5_probe(struct platform_device *pdev)
return -ENOMEM;
cluster->dev = dev;
- cluster->mode = CLUSTER_MODE_LOCKSTEP;
+ /*
+ * default to most common efuse configurations - Split-mode on AM64x
+ * and LockStep-mode on all others
+ */
+ cluster->mode = data->single_cpu_mode ?
+ CLUSTER_MODE_SPLIT : CLUSTER_MODE_LOCKSTEP;
cluster->soc_data = data;
INIT_LIST_HEAD(&cluster->cores);
@@ -1450,17 +1532,26 @@ static int k3_r5_probe(struct platform_device *pdev)
static const struct k3_r5_soc_data am65_j721e_soc_data = {
.tcm_is_double = false,
.tcm_ecc_autoinit = false,
+ .single_cpu_mode = false,
};
static const struct k3_r5_soc_data j7200_soc_data = {
.tcm_is_double = true,
.tcm_ecc_autoinit = true,
+ .single_cpu_mode = false,
+};
+
+static const struct k3_r5_soc_data am64_soc_data = {
+ .tcm_is_double = true,
+ .tcm_ecc_autoinit = true,
+ .single_cpu_mode = true,
};
static const struct of_device_id k3_r5_of_match[] = {
{ .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, },
{ .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, },
{ .compatible = "ti,j7200-r5fss", .data = &j7200_soc_data, },
+ { .compatible = "ti,am64-r5fss", .data = &am64_soc_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_r5_of_match);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 29fb33c0e26d..328f70f633eb 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -43,8 +43,9 @@ config RESET_BCM6345
This enables the reset controller driver for BCM6345 SoCs.
config RESET_BERLIN
- bool "Berlin Reset Driver" if COMPILE_TEST
- default ARCH_BERLIN
+ tristate "Berlin Reset Driver"
+ depends on ARCH_BERLIN || COMPILE_TEST
+ default m if ARCH_BERLIN
help
This enables the reset controller driver for Marvell Berlin SoCs.
@@ -59,7 +60,8 @@ config RESET_BRCMSTB
config RESET_BRCMSTB_RESCAL
bool "Broadcom STB RESCAL reset controller"
depends on HAS_IOMEM
- default ARCH_BRCMSTB || COMPILE_TEST
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
help
This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on
BCM7216.
@@ -82,6 +84,7 @@ config RESET_IMX7
config RESET_INTEL_GW
bool "Intel Reset Controller Driver"
+ depends on X86 || COMPILE_TEST
depends on OF && HAS_IOMEM
select REGMAP_MMIO
help
@@ -111,6 +114,14 @@ config RESET_LPC18XX
help
This enables the reset controller driver for NXP LPC18xx/43xx SoCs.
+config RESET_MCHP_SPARX5
+ bool "Microchip Sparx5 reset driver"
+ depends on HAS_IOMEM || COMPILE_TEST
+ default y if SPARX5_SWITCH
+ select MFD_SYSCON
+ help
+ This driver supports switch core reset for the Microchip Sparx5 SoC.
+
config RESET_MESON
tristate "Meson Reset Driver"
depends on ARCH_MESON || COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index ac3e612ad953..ea8b8d9ca565 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o
obj-$(CONFIG_RESET_K210) += reset-k210.o
obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
+obj-$(CONFIG_RESET_MCHP_SPARX5) += reset-microchip-sparx5.o
obj-$(CONFIG_RESET_MESON) += reset-meson.o
obj-$(CONFIG_RESET_MESON_AUDIO_ARB) += reset-meson-audio-arb.o
obj-$(CONFIG_RESET_NPCM) += reset-npcm.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 71c1c8264b2d..61e688882643 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -84,7 +84,7 @@ static const char *rcdev_name(struct reset_controller_dev *rcdev)
* without gaps.
*/
static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
- const struct of_phandle_args *reset_spec)
+ const struct of_phandle_args *reset_spec)
{
if (reset_spec->args[0] >= rcdev->nr_resets)
return -EINVAL;
@@ -744,9 +744,9 @@ void reset_control_bulk_release(int num_rstcs,
}
EXPORT_SYMBOL_GPL(reset_control_bulk_release);
-static struct reset_control *__reset_control_get_internal(
- struct reset_controller_dev *rcdev,
- unsigned int index, bool shared, bool acquired)
+static struct reset_control *
+__reset_control_get_internal(struct reset_controller_dev *rcdev,
+ unsigned int index, bool shared, bool acquired)
{
struct reset_control *rstc;
@@ -774,7 +774,10 @@ static struct reset_control *__reset_control_get_internal(
if (!rstc)
return ERR_PTR(-ENOMEM);
- try_module_get(rcdev->owner);
+ if (!try_module_get(rcdev->owner)) {
+ kfree(rstc);
+ return ERR_PTR(-ENODEV);
+ }
rstc->rcdev = rcdev;
list_add(&rstc->list, &rcdev->reset_control_head);
@@ -806,9 +809,9 @@ static void __reset_control_put_internal(struct reset_control *rstc)
kref_put(&rstc->refcnt, __reset_control_release);
}
-struct reset_control *__of_reset_control_get(struct device_node *node,
- const char *id, int index, bool shared,
- bool optional, bool acquired)
+struct reset_control *
+__of_reset_control_get(struct device_node *node, const char *id, int index,
+ bool shared, bool optional, bool acquired)
{
struct reset_control *rstc;
struct reset_controller_dev *r, *rcdev;
@@ -1027,9 +1030,9 @@ static void devm_reset_control_release(struct device *dev, void *res)
reset_control_put(*(struct reset_control **)res);
}
-struct reset_control *__devm_reset_control_get(struct device *dev,
- const char *id, int index, bool shared,
- bool optional, bool acquired)
+struct reset_control *
+__devm_reset_control_get(struct device *dev, const char *id, int index,
+ bool shared, bool optional, bool acquired)
{
struct reset_control **ptr, *rstc;
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 19926506d033..5ca145b64e63 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -3,7 +3,7 @@
* Hisilicon Hi6220 reset controller driver
*
* Copyright (c) 2016 Linaro Limited.
- * Copyright (c) 2015-2016 Hisilicon Limited.
+ * Copyright (c) 2015-2016 HiSilicon Limited.
*
* Author: Feng Chen <puck.chen@hisilicon.com>
*/
diff --git a/drivers/reset/reset-a10sr.c b/drivers/reset/reset-a10sr.c
index 7eacc89382f8..99b3bc8382f3 100644
--- a/drivers/reset/reset-a10sr.c
+++ b/drivers/reset/reset-a10sr.c
@@ -118,6 +118,7 @@ static struct platform_driver a10sr_reset_driver = {
.probe = a10sr_reset_probe,
.driver = {
.name = "altr_a10sr_reset",
+ .of_match_table = a10sr_reset_of_match,
},
};
module_platform_driver(a10sr_reset_driver);
diff --git a/drivers/reset/reset-bcm6345.c b/drivers/reset/reset-bcm6345.c
index 737e4e81f6b7..ac6c7ad1deda 100644
--- a/drivers/reset/reset-bcm6345.c
+++ b/drivers/reset/reset-bcm6345.c
@@ -86,7 +86,7 @@ static int bcm6345_reset_status(struct reset_controller_dev *rcdev,
return !(__raw_readl(bcm6345_reset->base) & BIT(id));
}
-static struct reset_control_ops bcm6345_reset_ops = {
+static const struct reset_control_ops bcm6345_reset_ops = {
.assert = bcm6345_reset_assert,
.deassert = bcm6345_reset_deassert,
.reset = bcm6345_reset_reset,
diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c
index 371197bbd055..2537ec05ecee 100644
--- a/drivers/reset/reset-berlin.c
+++ b/drivers/reset/reset-berlin.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -55,7 +55,7 @@ static const struct reset_control_ops berlin_reset_ops = {
static int berlin_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
- unsigned offset, bit;
+ unsigned int offset, bit;
offset = reset_spec->args[0];
bit = reset_spec->args[1];
@@ -93,6 +93,7 @@ static const struct of_device_id berlin_reset_dt_match[] = {
{ .compatible = "marvell,berlin2-reset" },
{ },
};
+MODULE_DEVICE_TABLE(of, berlin_reset_dt_match);
static struct platform_driver berlin_reset_driver = {
.probe = berlin2_reset_probe,
@@ -101,4 +102,9 @@ static struct platform_driver berlin_reset_driver = {
.of_match_table = berlin_reset_dt_match,
},
};
-builtin_platform_driver(berlin_reset_driver);
+module_platform_driver(berlin_reset_driver);
+
+MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
+MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>");
+MODULE_DESCRIPTION("Synaptics Berlin reset controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c
index f213264c8567..42c9d5241c53 100644
--- a/drivers/reset/reset-brcmstb.c
+++ b/drivers/reset/reset-brcmstb.c
@@ -111,6 +111,7 @@ static const struct of_device_id brcmstb_reset_of_match[] = {
{ .compatible = "brcm,brcmstb-reset" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, brcmstb_reset_of_match);
static struct platform_driver brcmstb_reset_driver = {
.probe = brcmstb_reset_probe,
diff --git a/drivers/reset/reset-lantiq.c b/drivers/reset/reset-lantiq.c
index ac41d093de13..b936cfe85641 100644
--- a/drivers/reset/reset-lantiq.c
+++ b/drivers/reset/reset-lantiq.c
@@ -186,7 +186,7 @@ static int lantiq_rcu_reset_probe(struct platform_device *pdev)
priv->rcdev.of_xlate = lantiq_rcu_reset_xlate;
priv->rcdev.of_reset_n_cells = 2;
- return reset_controller_register(&priv->rcdev);
+ return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
}
static const struct of_device_id lantiq_rcu_reset_dt_ids[] = {
diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c
new file mode 100644
index 000000000000..f01e7db8e83b
--- /dev/null
+++ b/drivers/reset/reset-microchip-sparx5.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch Reset driver
+ *
+ * Copyright (c) 2020 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#define PROTECT_REG 0x84
+#define PROTECT_BIT BIT(10)
+#define SOFT_RESET_REG 0x00
+#define SOFT_RESET_BIT BIT(1)
+
+struct mchp_reset_context {
+ struct regmap *cpu_ctrl;
+ struct regmap *gcb_ctrl;
+ struct reset_controller_dev rcdev;
+};
+
+static struct regmap_config sparx5_reset_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct mchp_reset_context *ctx =
+ container_of(rcdev, struct mchp_reset_context, rcdev);
+ u32 val;
+
+ /* Make sure the core is PROTECTED from reset */
+ regmap_update_bits(ctx->cpu_ctrl, PROTECT_REG, PROTECT_BIT, PROTECT_BIT);
+
+ /* Start soft reset */
+ regmap_write(ctx->gcb_ctrl, SOFT_RESET_REG, SOFT_RESET_BIT);
+
+ /* Wait for soft reset done */
+ return regmap_read_poll_timeout(ctx->gcb_ctrl, SOFT_RESET_REG, val,
+ (val & SOFT_RESET_BIT) == 0,
+ 1, 100);
+}
+
+static const struct reset_control_ops sparx5_reset_ops = {
+ .reset = sparx5_switch_reset,
+};
+
+static int mchp_sparx5_map_syscon(struct platform_device *pdev, char *name,
+ struct regmap **target)
+{
+ struct device_node *syscon_np;
+ struct regmap *regmap;
+ int err;
+
+ syscon_np = of_parse_phandle(pdev->dev.of_node, name, 0);
+ if (!syscon_np)
+ return -ENODEV;
+ regmap = syscon_node_to_regmap(syscon_np);
+ of_node_put(syscon_np);
+ if (IS_ERR(regmap)) {
+ err = PTR_ERR(regmap);
+ dev_err(&pdev->dev, "No '%s' map: %d\n", name, err);
+ return err;
+ }
+ *target = regmap;
+ return 0;
+}
+
+static int mchp_sparx5_map_io(struct platform_device *pdev, int index,
+ struct regmap **target)
+{
+ struct resource *res;
+ struct regmap *map;
+ void __iomem *mem;
+
+ mem = devm_platform_get_and_ioremap_resource(pdev, index, &res);
+ if (IS_ERR(mem)) {
+ dev_err(&pdev->dev, "Could not map resource %d\n", index);
+ return PTR_ERR(mem);
+ }
+ sparx5_reset_regmap_config.name = res->name;
+ map = devm_regmap_init_mmio(&pdev->dev, mem, &sparx5_reset_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+ *target = map;
+ return 0;
+}
+
+static int mchp_sparx5_reset_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct mchp_reset_context *ctx;
+ int err;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ err = mchp_sparx5_map_syscon(pdev, "cpu-syscon", &ctx->cpu_ctrl);
+ if (err)
+ return err;
+ err = mchp_sparx5_map_io(pdev, 0, &ctx->gcb_ctrl);
+ if (err)
+ return err;
+
+ ctx->rcdev.owner = THIS_MODULE;
+ ctx->rcdev.nr_resets = 1;
+ ctx->rcdev.ops = &sparx5_reset_ops;
+ ctx->rcdev.of_node = dn;
+
+ return devm_reset_controller_register(&pdev->dev, &ctx->rcdev);
+}
+
+static const struct of_device_id mchp_sparx5_reset_of_match[] = {
+ {
+ .compatible = "microchip,sparx5-switch-reset",
+ },
+ { }
+};
+
+static struct platform_driver mchp_sparx5_reset_driver = {
+ .probe = mchp_sparx5_reset_probe,
+ .driver = {
+ .name = "sparx5-switch-reset",
+ .of_match_table = mchp_sparx5_reset_of_match,
+ },
+};
+
+static int __init mchp_sparx5_reset_init(void)
+{
+ return platform_driver_register(&mchp_sparx5_reset_driver);
+}
+
+postcore_initcall(mchp_sparx5_reset_init);
+
+MODULE_DESCRIPTION("Microchip Sparx5 switch reset driver");
+MODULE_AUTHOR("Steen Hegelund <steen.hegelund@microchip.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c
index c4013165bdda..8209f922dc16 100644
--- a/drivers/reset/reset-oxnas.c
+++ b/drivers/reset/reset-oxnas.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * drivers/reset/reset-oxnas.c
+ * Oxford Semiconductor Reset Controller driver
*
* Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
* Copyright (C) 2014 Ma Haijun <mahaijuns@gmail.com>
diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c
index 218370faf37b..2b92775d58f0 100644
--- a/drivers/reset/reset-ti-syscon.c
+++ b/drivers/reset/reset-ti-syscon.c
@@ -58,8 +58,8 @@ struct ti_syscon_reset_data {
unsigned int nr_controls;
};
-#define to_ti_syscon_reset_data(rcdev) \
- container_of(rcdev, struct ti_syscon_reset_data, rcdev)
+#define to_ti_syscon_reset_data(_rcdev) \
+ container_of(_rcdev, struct ti_syscon_reset_data, rcdev)
/**
* ti_syscon_reset_assert() - assert device reset
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index 279e535bf5d8..5f75783f9397 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -20,7 +20,7 @@ struct uniphier_reset_data {
#define UNIPHIER_RESET_ACTIVE_LOW BIT(0)
};
-#define UNIPHIER_RESET_ID_END (unsigned int)(-1)
+#define UNIPHIER_RESET_ID_END ((unsigned int)(-1))
#define UNIPHIER_RESET_END \
{ .id = UNIPHIER_RESET_ID_END }
diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c
index ebd433fa09dd..daa425e74c96 100644
--- a/drivers/reset/reset-zynqmp.c
+++ b/drivers/reset/reset-zynqmp.c
@@ -83,8 +83,8 @@ static const struct zynqmp_reset_soc_data zynqmp_reset_data = {
};
static const struct zynqmp_reset_soc_data versal_reset_data = {
- .reset_id = 0,
- .num_resets = VERSAL_NR_RESETS,
+ .reset_id = 0,
+ .num_resets = VERSAL_NR_RESETS,
};
static const struct reset_control_ops zynqmp_reset_ops = {
diff --git a/drivers/reset/sti/reset-syscfg.c b/drivers/reset/sti/reset-syscfg.c
index 99b63035fe72..b4b46e0f207e 100644
--- a/drivers/reset/sti/reset-syscfg.c
+++ b/drivers/reset/sti/reset-syscfg.c
@@ -153,7 +153,7 @@ static int syscfg_reset_controller_register(struct device *dev,
if (!rc->channels)
return -ENOMEM;
- rc->rst.ops = &syscfg_reset_ops,
+ rc->rst.ops = &syscfg_reset_ops;
rc->rst.of_node = dev->of_node;
rc->rst.nr_resets = data->nr_channels;
rc->active_low = data->active_low;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index d8c13fded164..12153d5801ce 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -501,11 +501,11 @@ config RTC_DRV_M41T80_WDT
watchdog timer in the ST M41T60 and M41T80 RTC chips series.
config RTC_DRV_BD70528
- tristate "ROHM BD70528, BD71815 and BD71828 PMIC RTC"
- depends on MFD_ROHM_BD71828 || MFD_ROHM_BD70528 && (BD70528_WATCHDOG || !BD70528_WATCHDOG)
+ tristate "ROHM BD71815 and BD71828 PMIC RTC"
+ depends on MFD_ROHM_BD71828
help
If you say Y here you will get support for the RTC
- block on ROHM BD70528, BD71815 and BD71828 Power Management IC.
+ block on ROHM BD71815 and BD71828 Power Management IC.
This driver can also be built as a module. If so, the module
will be called rtc-bd70528.
diff --git a/drivers/rtc/proc.c b/drivers/rtc/proc.c
index 73344598fc1b..cbcdbb19d848 100644
--- a/drivers/rtc/proc.c
+++ b/drivers/rtc/proc.c
@@ -23,8 +23,8 @@ static bool is_rtc_hctosys(struct rtc_device *rtc)
int size;
char name[NAME_SIZE];
- size = scnprintf(name, NAME_SIZE, "rtc%d", rtc->id);
- if (size > NAME_SIZE)
+ size = snprintf(name, NAME_SIZE, "rtc%d", rtc->id);
+ if (size >= NAME_SIZE)
return false;
return !strncmp(name, CONFIG_RTC_HCTOSYS_DEVICE, NAME_SIZE);
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 2216be429ab7..b7b5ea1a4e67 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -184,7 +184,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
return -EILSEQ;
memset(alrm, 0, sizeof(*alrm));
- if (alarm != ALARM_DISABLED && offset != 0) {
+ if (alarm != ALARM_DISABLED) {
rtc_time64_to_tm(offset + alarm, tm);
dev_dbg(dev, "%s: %ptR\n", __func__, tm);
diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c
index e6428b27b5d4..630ea5de6871 100644
--- a/drivers/rtc/rtc-au1xxx.c
+++ b/drivers/rtc/rtc-au1xxx.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Au1xxx counter0 (aka Time-Of-Year counter) RTC interface driver.
*
* Copyright (C) 2008 Manuel Lauss <mano@roarinelk.homelinux.net>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
/* All current Au1xxx SoCs have 2 counters fed by an external 32.768 kHz
diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c
index 6454afca02a6..59b627fc1ecf 100644
--- a/drivers/rtc/rtc-bd70528.c
+++ b/drivers/rtc/rtc-bd70528.c
@@ -2,10 +2,9 @@
//
// Copyright (C) 2018 ROHM Semiconductors
//
-// RTC driver for ROHM BD70528 PMIC
+// RTC driver for ROHM BD71828 and BD71815 PMIC
#include <linux/bcd.h>
-#include <linux/mfd/rohm-bd70528.h>
#include <linux/mfd/rohm-bd71815.h>
#include <linux/mfd/rohm-bd71828.h>
#include <linux/module.h>
@@ -39,11 +38,6 @@ struct bd70528_rtc_data {
u8 year;
} __packed;
-struct bd70528_rtc_wake {
- struct bd70528_rtc_day time;
- u8 ctrl;
-} __packed;
-
struct bd71828_rtc_alm {
struct bd70528_rtc_data alm0;
struct bd70528_rtc_data alm1;
@@ -51,141 +45,14 @@ struct bd71828_rtc_alm {
u8 alm1_mask;
} __packed;
-struct bd70528_rtc_alm {
- struct bd70528_rtc_data data;
- u8 alm_mask;
- u8 alm_repeat;
-} __packed;
-
struct bd70528_rtc {
struct rohm_regmap_dev *parent;
struct regmap *regmap;
struct device *dev;
u8 reg_time_start;
u8 bd718xx_alm_block_start;
- bool has_rtc_timers;
};
-static int bd70528_set_wake(struct rohm_regmap_dev *bd70528,
- int enable, int *old_state)
-{
- int ret;
- unsigned int ctrl_reg;
-
- ret = regmap_read(bd70528->regmap, BD70528_REG_WAKE_EN, &ctrl_reg);
- if (ret)
- return ret;
-
- if (old_state) {
- if (ctrl_reg & BD70528_MASK_WAKE_EN)
- *old_state |= BD70528_WAKE_STATE_BIT;
- else
- *old_state &= ~BD70528_WAKE_STATE_BIT;
-
- if (!enable == !(*old_state & BD70528_WAKE_STATE_BIT))
- return 0;
- }
-
- if (enable)
- ctrl_reg |= BD70528_MASK_WAKE_EN;
- else
- ctrl_reg &= ~BD70528_MASK_WAKE_EN;
-
- return regmap_write(bd70528->regmap, BD70528_REG_WAKE_EN,
- ctrl_reg);
-}
-
-static int bd70528_set_elapsed_tmr(struct rohm_regmap_dev *bd70528,
- int enable, int *old_state)
-{
- int ret;
- unsigned int ctrl_reg;
-
- /*
- * TBD
- * What is the purpose of elapsed timer ?
- * Is the timeout registers counting down, or is the disable - re-enable
- * going to restart the elapsed-time counting? If counting is restarted
- * the timeout should be decreased by the amount of time that has
- * elapsed since starting the timer. Maybe we should store the monotonic
- * clock value when timer is started so that if RTC is set while timer
- * is armed we could do the compensation. This is a hack if RTC/system
- * clk are drifting. OTOH, RTC controlled via I2C is in any case
- * inaccurate...
- */
- ret = regmap_read(bd70528->regmap, BD70528_REG_ELAPSED_TIMER_EN,
- &ctrl_reg);
- if (ret)
- return ret;
-
- if (old_state) {
- if (ctrl_reg & BD70528_MASK_ELAPSED_TIMER_EN)
- *old_state |= BD70528_ELAPSED_STATE_BIT;
- else
- *old_state &= ~BD70528_ELAPSED_STATE_BIT;
-
- if ((!enable) == (!(*old_state & BD70528_ELAPSED_STATE_BIT)))
- return 0;
- }
-
- if (enable)
- ctrl_reg |= BD70528_MASK_ELAPSED_TIMER_EN;
- else
- ctrl_reg &= ~BD70528_MASK_ELAPSED_TIMER_EN;
-
- return regmap_write(bd70528->regmap, BD70528_REG_ELAPSED_TIMER_EN,
- ctrl_reg);
-}
-
-static int bd70528_set_rtc_based_timers(struct bd70528_rtc *r, int new_state,
- int *old_state)
-{
- int ret;
-
- ret = bd70528_wdt_set(r->parent, new_state & BD70528_WDT_STATE_BIT,
- old_state);
- if (ret) {
- dev_err(r->dev,
- "Failed to disable WDG for RTC setting (%d)\n", ret);
- return ret;
- }
- ret = bd70528_set_elapsed_tmr(r->parent,
- new_state & BD70528_ELAPSED_STATE_BIT,
- old_state);
- if (ret) {
- dev_err(r->dev,
- "Failed to disable 'elapsed timer' for RTC setting\n");
- return ret;
- }
- ret = bd70528_set_wake(r->parent, new_state & BD70528_WAKE_STATE_BIT,
- old_state);
- if (ret) {
- dev_err(r->dev,
- "Failed to disable 'wake timer' for RTC setting\n");
- return ret;
- }
-
- return ret;
-}
-
-static int bd70528_re_enable_rtc_based_timers(struct bd70528_rtc *r,
- int old_state)
-{
- if (!r->has_rtc_timers)
- return 0;
-
- return bd70528_set_rtc_based_timers(r, old_state, NULL);
-}
-
-static int bd70528_disable_rtc_based_timers(struct bd70528_rtc *r,
- int *old_state)
-{
- if (!r->has_rtc_timers)
- return 0;
-
- return bd70528_set_rtc_based_timers(r, 0, old_state);
-}
-
static inline void tmday2rtc(struct rtc_time *t, struct bd70528_rtc_day *d)
{
d->sec &= ~BD70528_MASK_RTC_SEC;
@@ -267,52 +134,6 @@ static int bd71828_set_alarm(struct device *dev, struct rtc_wkalrm *a)
}
-static int bd70528_set_alarm(struct device *dev, struct rtc_wkalrm *a)
-{
- struct bd70528_rtc_wake wake;
- struct bd70528_rtc_alm alm;
- int ret;
- struct bd70528_rtc *r = dev_get_drvdata(dev);
-
- ret = regmap_bulk_read(r->regmap, BD70528_REG_RTC_WAKE_START, &wake,
- sizeof(wake));
- if (ret) {
- dev_err(dev, "Failed to read wake regs\n");
- return ret;
- }
-
- ret = regmap_bulk_read(r->regmap, BD70528_REG_RTC_ALM_START, &alm,
- sizeof(alm));
- if (ret) {
- dev_err(dev, "Failed to read alarm regs\n");
- return ret;
- }
-
- tm2rtc(&a->time, &alm.data);
- tmday2rtc(&a->time, &wake.time);
-
- if (a->enabled) {
- alm.alm_mask &= ~BD70528_MASK_ALM_EN;
- wake.ctrl |= BD70528_MASK_WAKE_EN;
- } else {
- alm.alm_mask |= BD70528_MASK_ALM_EN;
- wake.ctrl &= ~BD70528_MASK_WAKE_EN;
- }
-
- ret = regmap_bulk_write(r->regmap, BD70528_REG_RTC_WAKE_START, &wake,
- sizeof(wake));
- if (ret) {
- dev_err(dev, "Failed to set wake time\n");
- return ret;
- }
- ret = regmap_bulk_write(r->regmap, BD70528_REG_RTC_ALM_START, &alm,
- sizeof(alm));
- if (ret)
- dev_err(dev, "Failed to set alarm time\n");
-
- return ret;
-}
-
static int bd71828_read_alarm(struct device *dev, struct rtc_wkalrm *a)
{
int ret;
@@ -336,78 +157,28 @@ static int bd71828_read_alarm(struct device *dev, struct rtc_wkalrm *a)
return 0;
}
-static int bd70528_read_alarm(struct device *dev, struct rtc_wkalrm *a)
+static int bd71828_set_time(struct device *dev, struct rtc_time *t)
{
- struct bd70528_rtc_alm alm;
int ret;
- struct bd70528_rtc *r = dev_get_drvdata(dev);
-
- ret = regmap_bulk_read(r->regmap, BD70528_REG_RTC_ALM_START, &alm,
- sizeof(alm));
- if (ret) {
- dev_err(dev, "Failed to read alarm regs\n");
- return ret;
- }
-
- rtc2tm(&alm.data, &a->time);
- a->time.tm_mday = -1;
- a->time.tm_mon = -1;
- a->time.tm_year = -1;
- a->enabled = !(alm.alm_mask & BD70528_MASK_ALM_EN);
- a->pending = 0;
-
- return 0;
-}
-
-static int bd70528_set_time_locked(struct device *dev, struct rtc_time *t)
-{
- int ret, tmpret, old_states;
struct bd70528_rtc_data rtc_data;
struct bd70528_rtc *r = dev_get_drvdata(dev);
- ret = bd70528_disable_rtc_based_timers(r, &old_states);
- if (ret)
- return ret;
-
- tmpret = regmap_bulk_read(r->regmap, r->reg_time_start, &rtc_data,
- sizeof(rtc_data));
- if (tmpret) {
+ ret = regmap_bulk_read(r->regmap, r->reg_time_start, &rtc_data,
+ sizeof(rtc_data));
+ if (ret) {
dev_err(dev, "Failed to read RTC time registers\n");
- goto renable_out;
+ return ret;
}
tm2rtc(t, &rtc_data);
- tmpret = regmap_bulk_write(r->regmap, r->reg_time_start, &rtc_data,
- sizeof(rtc_data));
- if (tmpret) {
+ ret = regmap_bulk_write(r->regmap, r->reg_time_start, &rtc_data,
+ sizeof(rtc_data));
+ if (ret)
dev_err(dev, "Failed to set RTC time\n");
- goto renable_out;
- }
-
-renable_out:
- ret = bd70528_re_enable_rtc_based_timers(r, old_states);
- if (tmpret)
- ret = tmpret;
return ret;
}
-static int bd71828_set_time(struct device *dev, struct rtc_time *t)
-{
- return bd70528_set_time_locked(dev, t);
-}
-
-static int bd70528_set_time(struct device *dev, struct rtc_time *t)
-{
- int ret;
- struct bd70528_rtc *r = dev_get_drvdata(dev);
-
- bd70528_wdt_lock(r->parent);
- ret = bd70528_set_time_locked(dev, t);
- bd70528_wdt_unlock(r->parent);
- return ret;
-}
-
static int bd70528_get_time(struct device *dev, struct rtc_time *t)
{
struct bd70528_rtc *r = dev_get_drvdata(dev);
@@ -427,31 +198,6 @@ static int bd70528_get_time(struct device *dev, struct rtc_time *t)
return 0;
}
-static int bd70528_alm_enable(struct device *dev, unsigned int enabled)
-{
- int ret;
- unsigned int enableval = BD70528_MASK_ALM_EN;
- struct bd70528_rtc *r = dev_get_drvdata(dev);
-
- if (enabled)
- enableval = 0;
-
- bd70528_wdt_lock(r->parent);
- ret = bd70528_set_wake(r->parent, enabled, NULL);
- if (ret) {
- dev_err(dev, "Failed to change wake state\n");
- goto out_unlock;
- }
- ret = regmap_update_bits(r->regmap, BD70528_REG_RTC_ALM_MASK,
- BD70528_MASK_ALM_EN, enableval);
- if (ret)
- dev_err(dev, "Failed to change alarm state\n");
-
-out_unlock:
- bd70528_wdt_unlock(r->parent);
- return ret;
-}
-
static int bd71828_alm_enable(struct device *dev, unsigned int enabled)
{
int ret;
@@ -470,14 +216,6 @@ static int bd71828_alm_enable(struct device *dev, unsigned int enabled)
return ret;
}
-static const struct rtc_class_ops bd70528_rtc_ops = {
- .read_time = bd70528_get_time,
- .set_time = bd70528_set_time,
- .read_alarm = bd70528_read_alarm,
- .set_alarm = bd70528_set_alarm,
- .alarm_irq_enable = bd70528_alm_enable,
-};
-
static const struct rtc_class_ops bd71828_rtc_ops = {
.read_time = bd70528_get_time,
.set_time = bd71828_set_time,
@@ -503,7 +241,6 @@ static int bd70528_probe(struct platform_device *pdev)
struct rtc_device *rtc;
int irq;
unsigned int hr;
- bool enable_main_irq = false;
u8 hour_reg;
enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
@@ -518,21 +255,9 @@ static int bd70528_probe(struct platform_device *pdev)
}
bd_rtc->dev = &pdev->dev;
+ rtc_ops = &bd71828_rtc_ops;
switch (chip) {
- case ROHM_CHIP_TYPE_BD70528:
- bd_rtc->parent = dev_get_drvdata(pdev->dev.parent);
- if (!bd_rtc->parent) {
- dev_err(&pdev->dev, "No MFD data\n");
- return -EINVAL;
- }
- irq_name = "bd70528-rtc-alm";
- bd_rtc->has_rtc_timers = true;
- bd_rtc->reg_time_start = BD70528_REG_RTC_START;
- hour_reg = BD70528_REG_RTC_HOUR;
- enable_main_irq = true;
- rtc_ops = &bd70528_rtc_ops;
- break;
case ROHM_CHIP_TYPE_BD71815:
irq_name = "bd71815-rtc-alm-0";
bd_rtc->reg_time_start = BD71815_REG_RTC_START;
@@ -549,14 +274,12 @@ static int bd70528_probe(struct platform_device *pdev)
*/
bd_rtc->bd718xx_alm_block_start = BD71815_REG_RTC_ALM_START;
hour_reg = BD71815_REG_HOUR;
- rtc_ops = &bd71828_rtc_ops;
break;
case ROHM_CHIP_TYPE_BD71828:
irq_name = "bd71828-rtc-alm-0";
bd_rtc->reg_time_start = BD71828_REG_RTC_START;
bd_rtc->bd718xx_alm_block_start = BD71828_REG_RTC_ALM_START;
hour_reg = BD71828_REG_RTC_HOUR;
- rtc_ops = &bd71828_rtc_ops;
break;
default:
dev_err(&pdev->dev, "Unknown chip\n");
@@ -611,27 +334,10 @@ static int bd70528_probe(struct platform_device *pdev)
if (ret)
return ret;
- /*
- * BD70528 irq controller is not touching the main mask register.
- * So enable the RTC block interrupts at main level. We can just
- * leave them enabled as irq-controller should disable irqs
- * from sub-registers when IRQ is disabled or freed.
- */
- if (enable_main_irq) {
- ret = regmap_update_bits(bd_rtc->regmap,
- BD70528_REG_INT_MAIN_MASK,
- BD70528_INT_RTC_MASK, 0);
- if (ret) {
- dev_err(&pdev->dev, "Failed to enable RTC interrupts\n");
- return ret;
- }
- }
-
return devm_rtc_register_device(rtc);
}
static const struct platform_device_id bd718x7_rtc_id[] = {
- { "bd70528-rtc", ROHM_CHIP_TYPE_BD70528 },
{ "bd71828-rtc", ROHM_CHIP_TYPE_BD71828 },
{ "bd71815-rtc", ROHM_CHIP_TYPE_BD71815 },
{ },
@@ -649,6 +355,6 @@ static struct platform_driver bd70528_rtc = {
module_platform_driver(bd70528_rtc);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("ROHM BD70528 and BD71828 PMIC RTC driver");
+MODULE_DESCRIPTION("ROHM BD71828 and BD71815 PMIC RTC driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:bd70528-rtc");
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index fab79921a712..8db5a631bca8 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* RTC client/driver for the Maxim/Dallas DS1374 Real-Time Clock over I2C
*
@@ -6,11 +7,7 @@
*
* Copyright (C) 2014 Rose Technology
* Copyright (C) 2006-2007 Freescale Semiconductor
- *
- * 2005 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * Copyright (c) 2005 MontaVista Software, Inc.
*/
/*
* It would be more efficient to use i2c msgs/i2c_transfer directly but, as
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index edb64debd173..138c5e0046c8 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -280,7 +280,6 @@ static struct platform_driver efi_rtc_driver = {
module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe);
-MODULE_ALIAS("platform:rtc-efi");
MODULE_AUTHOR("dann frazier <dannf@dannf.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("EFI RTC driver");
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
index 47cd12db2356..16fdefafec5d 100644
--- a/drivers/rtc/rtc-hid-sensor-time.c
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -328,3 +328,4 @@ module_platform_driver(hid_time_platform_driver);
MODULE_DESCRIPTION("HID Sensor Time");
MODULE_AUTHOR("Alexander Holler <holler@ahsoftware.de>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HID);
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index c1806f4d68e7..4b712e5ab08a 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/rtc.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
@@ -811,6 +812,9 @@ static int __init dryice_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imxdi);
+ device_init_wakeup(&pdev->dev, true);
+ dev_pm_set_wake_irq(&pdev->dev, norm_irq);
+
imxdi->rtc->ops = &dryice_rtc_ops;
imxdi->rtc->range_max = U32_MAX;
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 89128fc29ccc..f736f8c22e96 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -544,10 +544,22 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80)
{
struct i2c_client *client = m41t80->client;
struct device_node *node = client->dev.of_node;
+ struct device_node *fixed_clock;
struct clk *clk;
struct clk_init_data init;
int ret;
+ fixed_clock = of_get_child_by_name(node, "clock");
+ if (fixed_clock) {
+ /*
+ * skip registering square wave clock when a fixed
+ * clock has been registered. The fixed clock is
+ * registered automatically when being referenced.
+ */
+ of_node_put(fixed_clock);
+ return 0;
+ }
+
/* First disable the clock */
ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
if (ret < 0)
@@ -599,10 +611,8 @@ static unsigned long wdt_is_open;
static int boot_flag;
/**
- * wdt_ping:
- *
- * Reload counter one with the watchdog timeout. We don't bother reloading
- * the cascade counter.
+ * wdt_ping - Reload counter one with the watchdog timeout.
+ * We don't bother reloading the cascade counter.
*/
static void wdt_ping(void)
{
@@ -638,9 +648,7 @@ static void wdt_ping(void)
}
/**
- * wdt_disable:
- *
- * disables watchdog.
+ * wdt_disable - disables watchdog.
*/
static void wdt_disable(void)
{
@@ -677,7 +685,7 @@ static void wdt_disable(void)
}
/**
- * wdt_write:
+ * wdt_write - write to watchdog.
* @file: file handle to the watchdog
* @buf: buffer to write (unused as data does not matter here
* @count: count of bytes
@@ -703,7 +711,7 @@ static ssize_t wdt_read(struct file *file, char __user *buf,
}
/**
- * wdt_ioctl:
+ * wdt_ioctl - ioctl handler to set watchdog.
* @file: file handle to the device
* @cmd: watchdog command
* @arg: argument pointer
@@ -778,7 +786,7 @@ static long wdt_unlocked_ioctl(struct file *file, unsigned int cmd,
}
/**
- * wdt_open:
+ * wdt_open - open a watchdog.
* @inode: inode of device
* @file: file handle to device
*
@@ -802,7 +810,7 @@ static int wdt_open(struct inode *inode, struct file *file)
}
/**
- * wdt_close:
+ * wdt_release - release a watchdog.
* @inode: inode to board
* @file: file handle to board
*
@@ -815,7 +823,7 @@ static int wdt_release(struct inode *inode, struct file *file)
}
/**
- * notify_sys:
+ * wdt_notify_sys - notify to watchdog.
* @this: our notifier block
* @code: the event being reported
* @unused: unused
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index ab60f13fa3ef..4beadfa41644 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -1,14 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* rtc class driver for the Maxim MAX6900 chip
*
+ * Copyright (c) 2007 MontaVista, Software, Inc.
+ *
* Author: Dale Farnsworth <dale@farnsworth.org>
*
* based on previously existing rtc class drivers
- *
- * 2007 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
*/
#include <linux/module.h>
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index d51cc12114cb..eae7cb9faf1e 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -717,8 +717,8 @@ static int max77686_init_rtc_regmap(struct max77686_rtc_info *info)
add_rtc_irq:
ret = regmap_add_irq_chip(info->rtc_regmap, info->rtc_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
- IRQF_SHARED, 0, info->drv_data->rtc_irq_chip,
+ IRQF_ONESHOT | IRQF_SHARED,
+ 0, info->drv_data->rtc_irq_chip,
&info->rtc_irq_data);
if (ret < 0) {
dev_err(info->dev, "Failed to add RTC irq chip: %d\n", ret);
diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
index a577a74aaf75..5e0383401629 100644
--- a/drivers/rtc/rtc-mxc_v2.c
+++ b/drivers/rtc/rtc-mxc_v2.c
@@ -372,6 +372,7 @@ static const struct of_device_id mxc_ids[] = {
{ .compatible = "fsl,imx53-rtc", },
{}
};
+MODULE_DEVICE_TABLE(of, mxc_ids);
static struct platform_driver mxc_rtc_driver = {
.driver = {
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index 4bcfb88674d3..67571f7f0bbc 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* rtc-palmas.c -- Palmas Real Time Clock driver.
@@ -7,20 +8,6 @@
* Copyright (c) 2012, NVIDIA Corporation.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
*/
#include <linux/bcd.h>
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index d13c20a2adf7..56c58b055dff 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -10,7 +10,7 @@
*
* based on the other drivers in this same directory.
*
- * Datasheet: http://cache.nxp.com/documents/data_sheet/PCF2127.pdf
+ * Datasheet: https://www.nxp.com/docs/en/data-sheet/PCF2127.pdf
*/
#include <linux/i2c.h>
@@ -94,10 +94,20 @@
#define PCF2127_WD_VAL_MAX 255
#define PCF2127_WD_VAL_DEFAULT 60
+/* Mask for currently enabled interrupts */
+#define PCF2127_CTRL1_IRQ_MASK (PCF2127_BIT_CTRL1_TSF1)
+#define PCF2127_CTRL2_IRQ_MASK ( \
+ PCF2127_BIT_CTRL2_AF | \
+ PCF2127_BIT_CTRL2_WDTF | \
+ PCF2127_BIT_CTRL2_TSF2)
+
struct pcf2127 {
struct rtc_device *rtc;
struct watchdog_device wdd;
struct regmap *regmap;
+ time64_t ts;
+ bool ts_valid;
+ bool irq_enabled;
};
/*
@@ -434,23 +444,96 @@ static int pcf2127_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return pcf2127_rtc_alarm_irq_enable(dev, alrm->enabled);
}
+/*
+ * This function reads ctrl2 register, caller is responsible for calling
+ * pcf2127_wdt_active_ping()
+ */
+static int pcf2127_rtc_ts_read(struct device *dev, time64_t *ts)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ struct rtc_time tm;
+ int ret;
+ unsigned char data[25];
+
+ ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_CTRL1, data,
+ sizeof(data));
+ if (ret) {
+ dev_err(dev, "%s: read error ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ dev_dbg(dev,
+ "%s: raw data is cr1=%02x, cr2=%02x, cr3=%02x, ts_sc=%02x, ts_mn=%02x, ts_hr=%02x, ts_dm=%02x, ts_mo=%02x, ts_yr=%02x\n",
+ __func__, data[PCF2127_REG_CTRL1], data[PCF2127_REG_CTRL2],
+ data[PCF2127_REG_CTRL3], data[PCF2127_REG_TS_SC],
+ data[PCF2127_REG_TS_MN], data[PCF2127_REG_TS_HR],
+ data[PCF2127_REG_TS_DM], data[PCF2127_REG_TS_MO],
+ data[PCF2127_REG_TS_YR]);
+
+ tm.tm_sec = bcd2bin(data[PCF2127_REG_TS_SC] & 0x7F);
+ tm.tm_min = bcd2bin(data[PCF2127_REG_TS_MN] & 0x7F);
+ tm.tm_hour = bcd2bin(data[PCF2127_REG_TS_HR] & 0x3F);
+ tm.tm_mday = bcd2bin(data[PCF2127_REG_TS_DM] & 0x3F);
+ /* TS_MO register (month) value range: 1-12 */
+ tm.tm_mon = bcd2bin(data[PCF2127_REG_TS_MO] & 0x1F) - 1;
+ tm.tm_year = bcd2bin(data[PCF2127_REG_TS_YR]);
+ if (tm.tm_year < 70)
+ tm.tm_year += 100; /* assume we are in 1970...2069 */
+
+ ret = rtc_valid_tm(&tm);
+ if (ret) {
+ dev_err(dev, "Invalid timestamp. ret=%d\n", ret);
+ return ret;
+ }
+
+ *ts = rtc_tm_to_time64(&tm);
+ return 0;
+};
+
+static void pcf2127_rtc_ts_snapshot(struct device *dev)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ int ret;
+
+ /* Let userspace read the first timestamp */
+ if (pcf2127->ts_valid)
+ return;
+
+ ret = pcf2127_rtc_ts_read(dev, &pcf2127->ts);
+ if (!ret)
+ pcf2127->ts_valid = true;
+}
+
static irqreturn_t pcf2127_rtc_irq(int irq, void *dev)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
- unsigned int ctrl2 = 0;
+ unsigned int ctrl1, ctrl2;
int ret = 0;
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL1, &ctrl1);
+ if (ret)
+ return IRQ_NONE;
+
ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
if (ret)
return IRQ_NONE;
- if (!(ctrl2 & PCF2127_BIT_CTRL2_AF))
+ if (!(ctrl1 & PCF2127_CTRL1_IRQ_MASK || ctrl2 & PCF2127_CTRL2_IRQ_MASK))
return IRQ_NONE;
- regmap_write(pcf2127->regmap, PCF2127_REG_CTRL2,
- ctrl2 & ~(PCF2127_BIT_CTRL2_AF | PCF2127_BIT_CTRL2_WDTF));
+ if (ctrl1 & PCF2127_BIT_CTRL1_TSF1 || ctrl2 & PCF2127_BIT_CTRL2_TSF2)
+ pcf2127_rtc_ts_snapshot(dev);
+
+ if (ctrl1 & PCF2127_CTRL1_IRQ_MASK)
+ regmap_write(pcf2127->regmap, PCF2127_REG_CTRL1,
+ ctrl1 & ~PCF2127_CTRL1_IRQ_MASK);
+
+ if (ctrl2 & PCF2127_CTRL2_IRQ_MASK)
+ regmap_write(pcf2127->regmap, PCF2127_REG_CTRL2,
+ ctrl2 & ~PCF2127_CTRL2_IRQ_MASK);
- rtc_update_irq(pcf2127->rtc, 1, RTC_IRQF | RTC_AF);
+ if (ctrl2 & PCF2127_BIT_CTRL2_AF)
+ rtc_update_irq(pcf2127->rtc, 1, RTC_IRQF | RTC_AF);
pcf2127_wdt_active_ping(&pcf2127->wdd);
@@ -475,23 +558,27 @@ static ssize_t timestamp0_store(struct device *dev,
struct pcf2127 *pcf2127 = dev_get_drvdata(dev->parent);
int ret;
- ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
- PCF2127_BIT_CTRL1_TSF1, 0);
- if (ret) {
- dev_err(dev, "%s: update ctrl1 ret=%d\n", __func__, ret);
- return ret;
- }
+ if (pcf2127->irq_enabled) {
+ pcf2127->ts_valid = false;
+ } else {
+ ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
+ PCF2127_BIT_CTRL1_TSF1, 0);
+ if (ret) {
+ dev_err(dev, "%s: update ctrl1 ret=%d\n", __func__, ret);
+ return ret;
+ }
- ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL2,
- PCF2127_BIT_CTRL2_TSF2, 0);
- if (ret) {
- dev_err(dev, "%s: update ctrl2 ret=%d\n", __func__, ret);
- return ret;
- }
+ ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL2,
+ PCF2127_BIT_CTRL2_TSF2, 0);
+ if (ret) {
+ dev_err(dev, "%s: update ctrl2 ret=%d\n", __func__, ret);
+ return ret;
+ }
- ret = pcf2127_wdt_active_ping(&pcf2127->wdd);
- if (ret)
- return ret;
+ ret = pcf2127_wdt_active_ping(&pcf2127->wdd);
+ if (ret)
+ return ret;
+ }
return count;
};
@@ -500,50 +587,36 @@ static ssize_t timestamp0_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev->parent);
- struct rtc_time tm;
+ unsigned int ctrl1, ctrl2;
int ret;
- unsigned char data[25];
-
- ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_CTRL1, data,
- sizeof(data));
- if (ret) {
- dev_err(dev, "%s: read error ret=%d\n", __func__, ret);
- return ret;
- }
-
- dev_dbg(dev,
- "%s: raw data is cr1=%02x, cr2=%02x, cr3=%02x, ts_sc=%02x, "
- "ts_mn=%02x, ts_hr=%02x, ts_dm=%02x, ts_mo=%02x, ts_yr=%02x\n",
- __func__, data[PCF2127_REG_CTRL1], data[PCF2127_REG_CTRL2],
- data[PCF2127_REG_CTRL3], data[PCF2127_REG_TS_SC],
- data[PCF2127_REG_TS_MN], data[PCF2127_REG_TS_HR],
- data[PCF2127_REG_TS_DM], data[PCF2127_REG_TS_MO],
- data[PCF2127_REG_TS_YR]);
+ time64_t ts;
+
+ if (pcf2127->irq_enabled) {
+ if (!pcf2127->ts_valid)
+ return 0;
+ ts = pcf2127->ts;
+ } else {
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL1, &ctrl1);
+ if (ret)
+ return 0;
- ret = pcf2127_wdt_active_ping(&pcf2127->wdd);
- if (ret)
- return ret;
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
+ if (ret)
+ return 0;
- if (!(data[PCF2127_REG_CTRL1] & PCF2127_BIT_CTRL1_TSF1) &&
- !(data[PCF2127_REG_CTRL2] & PCF2127_BIT_CTRL2_TSF2))
- return 0;
+ if (!(ctrl1 & PCF2127_BIT_CTRL1_TSF1) &&
+ !(ctrl2 & PCF2127_BIT_CTRL2_TSF2))
+ return 0;
- tm.tm_sec = bcd2bin(data[PCF2127_REG_TS_SC] & 0x7F);
- tm.tm_min = bcd2bin(data[PCF2127_REG_TS_MN] & 0x7F);
- tm.tm_hour = bcd2bin(data[PCF2127_REG_TS_HR] & 0x3F);
- tm.tm_mday = bcd2bin(data[PCF2127_REG_TS_DM] & 0x3F);
- /* TS_MO register (month) value range: 1-12 */
- tm.tm_mon = bcd2bin(data[PCF2127_REG_TS_MO] & 0x1F) - 1;
- tm.tm_year = bcd2bin(data[PCF2127_REG_TS_YR]);
- if (tm.tm_year < 70)
- tm.tm_year += 100; /* assume we are in 1970...2069 */
-
- ret = rtc_valid_tm(&tm);
- if (ret)
- return ret;
+ ret = pcf2127_rtc_ts_read(dev->parent, &ts);
+ if (ret)
+ return 0;
- return sprintf(buf, "%llu\n",
- (unsigned long long)rtc_tm_to_time64(&tm));
+ ret = pcf2127_wdt_active_ping(&pcf2127->wdd);
+ if (ret)
+ return ret;
+ }
+ return sprintf(buf, "%llu\n", (unsigned long long)ts);
};
static DEVICE_ATTR_RW(timestamp0);
@@ -594,6 +667,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
dev_err(dev, "failed to request alarm irq\n");
return ret;
}
+ pcf2127->irq_enabled = true;
}
if (alarm_irq > 0 || device_property_read_bool(dev, "wakeup-source")) {
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 82becae14229..14da4ab30104 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -21,10 +21,10 @@
/*
* Information for this driver was pulled from the following datasheets.
*
- * https://www.nxp.com/documents/data_sheet/PCF85063A.pdf
- * https://www.nxp.com/documents/data_sheet/PCF85063TP.pdf
+ * https://www.nxp.com/docs/en/data-sheet/PCF85063A.pdf
+ * https://www.nxp.com/docs/en/data-sheet/PCF85063TP.pdf
*
- * PCF85063A -- Rev. 6 — 18 November 2015
+ * PCF85063A -- Rev. 7 — 30 March 2018
* PCF85063TP -- Rev. 4 — 6 May 2015
*
* https://www.microcrystal.com/fileadmin/Media/Products/RTC/App.Manual/RV-8263-C7_App-Manual.pdf
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 740e2136ca98..8b6fb20774bf 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -10,41 +10,41 @@
#include <linux/of.h>
#include <linux/pm_wakeirq.h>
-#define REG_CONTROL1 0x00
-#define REG_CONTROL1_CAP_SEL BIT(7)
-#define REG_CONTROL1_STOP BIT(5)
-#define REG_CONTROL1_AIE BIT(1)
-
-#define REG_CONTROL2 0x01
-#define REG_CONTROL2_AF BIT(3)
-
-#define REG_CONTROL3 0x02
-#define REG_CONTROL3_PM_BLD BIT(7) /* battery low detection disabled */
-#define REG_CONTROL3_PM_VDD BIT(6) /* switch-over disabled */
-#define REG_CONTROL3_PM_DSM BIT(5) /* direct switching mode */
-#define REG_CONTROL3_PM_MASK 0xe0
-#define REG_CONTROL3_BLF BIT(2) /* battery low bit, read-only */
-
-#define REG_SECONDS 0x03
-#define REG_SECONDS_OS BIT(7)
-
-#define REG_MINUTES 0x04
-#define REG_HOURS 0x05
-#define REG_DAYS 0x06
-#define REG_WEEKDAYS 0x07
-#define REG_MONTHS 0x08
-#define REG_YEARS 0x09
-
-#define REG_MINUTE_ALARM 0x0a
-#define REG_HOUR_ALARM 0x0b
-#define REG_DAY_ALARM 0x0c
-#define REG_WEEKDAY_ALARM 0x0d
+#define PCF8523_REG_CONTROL1 0x00
+#define PCF8523_CONTROL1_CAP_SEL BIT(7)
+#define PCF8523_CONTROL1_STOP BIT(5)
+#define PCF8523_CONTROL1_AIE BIT(1)
+
+#define PCF8523_REG_CONTROL2 0x01
+#define PCF8523_CONTROL2_AF BIT(3)
+
+#define PCF8523_REG_CONTROL3 0x02
+#define PCF8523_CONTROL3_PM_BLD BIT(7) /* battery low detection disabled */
+#define PCF8523_CONTROL3_PM_VDD BIT(6) /* switch-over disabled */
+#define PCF8523_CONTROL3_PM_DSM BIT(5) /* direct switching mode */
+#define PCF8523_CONTROL3_PM_MASK 0xe0
+#define PCF8523_CONTROL3_BLF BIT(2) /* battery low bit, read-only */
+
+#define PCF8523_REG_SECONDS 0x03
+#define PCF8523_SECONDS_OS BIT(7)
+
+#define PCF8523_REG_MINUTES 0x04
+#define PCF8523_REG_HOURS 0x05
+#define PCF8523_REG_DAYS 0x06
+#define PCF8523_REG_WEEKDAYS 0x07
+#define PCF8523_REG_MONTHS 0x08
+#define PCF8523_REG_YEARS 0x09
+
+#define PCF8523_REG_MINUTE_ALARM 0x0a
+#define PCF8523_REG_HOUR_ALARM 0x0b
+#define PCF8523_REG_DAY_ALARM 0x0c
+#define PCF8523_REG_WEEKDAY_ALARM 0x0d
#define ALARM_DIS BIT(7)
-#define REG_OFFSET 0x0e
-#define REG_OFFSET_MODE BIT(7)
+#define PCF8523_REG_OFFSET 0x0e
+#define PCF8523_OFFSET_MODE BIT(7)
-#define REG_TMR_CLKOUT_CTRL 0x0f
+#define PCF8523_TMR_CLKOUT_CTRL 0x0f
struct pcf8523 {
struct rtc_device *rtc;
@@ -99,11 +99,11 @@ static int pcf8523_voltage_low(struct i2c_client *client)
u8 value;
int err;
- err = pcf8523_read(client, REG_CONTROL3, &value);
+ err = pcf8523_read(client, PCF8523_REG_CONTROL3, &value);
if (err < 0)
return err;
- return !!(value & REG_CONTROL3_BLF);
+ return !!(value & PCF8523_CONTROL3_BLF);
}
static int pcf8523_load_capacitance(struct i2c_client *client)
@@ -112,7 +112,7 @@ static int pcf8523_load_capacitance(struct i2c_client *client)
u8 value;
int err;
- err = pcf8523_read(client, REG_CONTROL1, &value);
+ err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value);
if (err < 0)
return err;
@@ -126,14 +126,14 @@ static int pcf8523_load_capacitance(struct i2c_client *client)
load);
fallthrough;
case 12500:
- value |= REG_CONTROL1_CAP_SEL;
+ value |= PCF8523_CONTROL1_CAP_SEL;
break;
case 7000:
- value &= ~REG_CONTROL1_CAP_SEL;
+ value &= ~PCF8523_CONTROL1_CAP_SEL;
break;
}
- err = pcf8523_write(client, REG_CONTROL1, value);
+ err = pcf8523_write(client, PCF8523_REG_CONTROL1, value);
return err;
}
@@ -143,13 +143,13 @@ static int pcf8523_set_pm(struct i2c_client *client, u8 pm)
u8 value;
int err;
- err = pcf8523_read(client, REG_CONTROL3, &value);
+ err = pcf8523_read(client, PCF8523_REG_CONTROL3, &value);
if (err < 0)
return err;
- value = (value & ~REG_CONTROL3_PM_MASK) | pm;
+ value = (value & ~PCF8523_CONTROL3_PM_MASK) | pm;
- err = pcf8523_write(client, REG_CONTROL3, value);
+ err = pcf8523_write(client, PCF8523_REG_CONTROL3, value);
if (err < 0)
return err;
@@ -162,13 +162,13 @@ static irqreturn_t pcf8523_irq(int irq, void *dev_id)
u8 value;
int err;
- err = pcf8523_read(pcf8523->client, REG_CONTROL2, &value);
+ err = pcf8523_read(pcf8523->client, PCF8523_REG_CONTROL2, &value);
if (err < 0)
return IRQ_HANDLED;
- if (value & REG_CONTROL2_AF) {
- value &= ~REG_CONTROL2_AF;
- pcf8523_write(pcf8523->client, REG_CONTROL2, value);
+ if (value & PCF8523_CONTROL2_AF) {
+ value &= ~PCF8523_CONTROL2_AF;
+ pcf8523_write(pcf8523->client, PCF8523_REG_CONTROL2, value);
rtc_update_irq(pcf8523->rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
@@ -182,13 +182,13 @@ static int pcf8523_stop_rtc(struct i2c_client *client)
u8 value;
int err;
- err = pcf8523_read(client, REG_CONTROL1, &value);
+ err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value);
if (err < 0)
return err;
- value |= REG_CONTROL1_STOP;
+ value |= PCF8523_CONTROL1_STOP;
- err = pcf8523_write(client, REG_CONTROL1, value);
+ err = pcf8523_write(client, PCF8523_REG_CONTROL1, value);
if (err < 0)
return err;
@@ -200,13 +200,13 @@ static int pcf8523_start_rtc(struct i2c_client *client)
u8 value;
int err;
- err = pcf8523_read(client, REG_CONTROL1, &value);
+ err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value);
if (err < 0)
return err;
- value &= ~REG_CONTROL1_STOP;
+ value &= ~PCF8523_CONTROL1_STOP;
- err = pcf8523_write(client, REG_CONTROL1, value);
+ err = pcf8523_write(client, PCF8523_REG_CONTROL1, value);
if (err < 0)
return err;
@@ -216,7 +216,7 @@ static int pcf8523_start_rtc(struct i2c_client *client)
static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
- u8 start = REG_SECONDS, regs[7];
+ u8 start = PCF8523_REG_SECONDS, regs[7];
struct i2c_msg msgs[2];
int err;
@@ -242,7 +242,7 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (err < 0)
return err;
- if (regs[0] & REG_SECONDS_OS)
+ if (regs[0] & PCF8523_SECONDS_OS)
return -EINVAL;
tm->tm_sec = bcd2bin(regs[0] & 0x7f);
@@ -267,8 +267,8 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
if (err < 0)
return err;
- regs[0] = REG_SECONDS;
- /* This will purposely overwrite REG_SECONDS_OS */
+ regs[0] = PCF8523_REG_SECONDS;
+ /* This will purposely overwrite PCF8523_SECONDS_OS */
regs[1] = bin2bcd(tm->tm_sec);
regs[2] = bin2bcd(tm->tm_min);
regs[3] = bin2bcd(tm->tm_hour);
@@ -299,7 +299,7 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
static int pcf8523_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
{
struct i2c_client *client = to_i2c_client(dev);
- u8 start = REG_MINUTE_ALARM, regs[4];
+ u8 start = PCF8523_REG_MINUTE_ALARM, regs[4];
struct i2c_msg msgs[2];
u8 value;
int err;
@@ -324,15 +324,15 @@ static int pcf8523_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
tm->time.tm_mday = bcd2bin(regs[2] & 0x3F);
tm->time.tm_wday = bcd2bin(regs[3] & 0x7);
- err = pcf8523_read(client, REG_CONTROL1, &value);
+ err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value);
if (err < 0)
return err;
- tm->enabled = !!(value & REG_CONTROL1_AIE);
+ tm->enabled = !!(value & PCF8523_CONTROL1_AIE);
- err = pcf8523_read(client, REG_CONTROL2, &value);
+ err = pcf8523_read(client, PCF8523_REG_CONTROL2, &value);
if (err < 0)
return err;
- tm->pending = !!(value & REG_CONTROL2_AF);
+ tm->pending = !!(value & PCF8523_CONTROL2_AF);
return 0;
}
@@ -343,16 +343,16 @@ static int pcf8523_irq_enable(struct device *dev, unsigned int enabled)
u8 value;
int err;
- err = pcf8523_read(client, REG_CONTROL1, &value);
+ err = pcf8523_read(client, PCF8523_REG_CONTROL1, &value);
if (err < 0)
return err;
- value &= REG_CONTROL1_AIE;
+ value &= PCF8523_CONTROL1_AIE;
if (enabled)
- value |= REG_CONTROL1_AIE;
+ value |= PCF8523_CONTROL1_AIE;
- err = pcf8523_write(client, REG_CONTROL1, value);
+ err = pcf8523_write(client, PCF8523_REG_CONTROL1, value);
if (err < 0)
return err;
@@ -370,7 +370,7 @@ static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
if (err)
return err;
- err = pcf8523_write(client, REG_CONTROL2, 0);
+ err = pcf8523_write(client, PCF8523_REG_CONTROL2, 0);
if (err < 0)
return err;
@@ -382,7 +382,7 @@ static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
rtc_time64_to_tm(alarm_time, &tm->time);
}
- regs[0] = REG_MINUTE_ALARM;
+ regs[0] = PCF8523_REG_MINUTE_ALARM;
regs[1] = bin2bcd(tm->time.tm_min);
regs[2] = bin2bcd(tm->time.tm_hour);
regs[3] = bin2bcd(tm->time.tm_mday);
@@ -418,11 +418,11 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
if (ret)
flags |= RTC_VL_BACKUP_LOW;
- ret = pcf8523_read(client, REG_SECONDS, &value);
+ ret = pcf8523_read(client, PCF8523_REG_SECONDS, &value);
if (ret < 0)
return ret;
- if (value & REG_SECONDS_OS)
+ if (value & PCF8523_SECONDS_OS)
flags |= RTC_VL_DATA_INVALID;
return put_user(flags, (unsigned int __user *)arg);
@@ -442,13 +442,13 @@ static int pcf8523_rtc_read_offset(struct device *dev, long *offset)
u8 value;
s8 val;
- err = pcf8523_read(client, REG_OFFSET, &value);
+ err = pcf8523_read(client, PCF8523_REG_OFFSET, &value);
if (err < 0)
return err;
/* sign extend the 7-bit offset value */
val = value << 1;
- *offset = (value & REG_OFFSET_MODE ? 4069 : 4340) * (val >> 1);
+ *offset = (value & PCF8523_OFFSET_MODE ? 4069 : 4340) * (val >> 1);
return 0;
}
@@ -465,9 +465,9 @@ static int pcf8523_rtc_set_offset(struct device *dev, long offset)
if (abs(reg_m0 * 4340 - offset) < abs(reg_m1 * 4069 - offset))
value = reg_m0 & 0x7f;
else
- value = (reg_m1 & 0x7f) | REG_OFFSET_MODE;
+ value = (reg_m1 & 0x7f) | PCF8523_OFFSET_MODE;
- return pcf8523_write(client, REG_OFFSET, value);
+ return pcf8523_write(client, PCF8523_REG_OFFSET, value);
}
static const struct rtc_class_ops pcf8523_rtc_ops = {
@@ -519,7 +519,7 @@ static int pcf8523_probe(struct i2c_client *client,
rtc->uie_unsupported = 1;
if (client->irq > 0) {
- err = pcf8523_write(client, REG_TMR_CLKOUT_CTRL, 0x38);
+ err = pcf8523_write(client, PCF8523_TMR_CLKOUT_CTRL, 0x38);
if (err < 0)
return err;
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 18f12f36eb2b..c8bddfb94129 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -8,7 +8,7 @@
*
* based on the other drivers in this same directory.
*
- * http://www.semiconductors.philips.com/acrobat/datasheets/PCF8563-04.pdf
+ * https://www.nxp.com/docs/en/data-sheet/PCF8563.pdf
*/
#include <linux/clk-provider.h>
diff --git a/drivers/rtc/rtc-rtd119x.c b/drivers/rtc/rtc-rtd119x.c
index bb98f2d574a5..8f9abd65846c 100644
--- a/drivers/rtc/rtc-rtd119x.c
+++ b/drivers/rtc/rtc-rtd119x.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Realtek RTD129x RTC
*
* Copyright (c) 2017 Andreas Färber
- *
- * SPDX-License-Identifier: GPL-2.0+
*/
#include <linux/clk.h>
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 038269a6b08c..6b56f8eacba6 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -488,9 +488,7 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
dev_dbg(dev, "%s: %ptR(%d)\n", __func__, &alrm->time, alrm->time.tm_wday);
- ret = s5m_check_peding_alarm_interrupt(info, alrm);
-
- return 0;
+ return s5m_check_peding_alarm_interrupt(info, alrm);
}
static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
index 187aa955b79c..ce7a2ddbbc16 100644
--- a/drivers/rtc/rtc-sc27xx.c
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Spreadtrum Communications Inc.
*
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/bitops.h>
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index ee721e53c155..b4a520056b1a 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/rtc/rtc-spear.c
*
* Copyright (C) 2010 ST Microelectronics
* Rajeev Kumar<rajeev-dlh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/bcd.h>
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 75a8924ba12b..ac9e228b56d0 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -754,7 +754,7 @@ static int stm32_rtc_probe(struct platform_device *pdev)
ret = clk_prepare_enable(rtc->rtc_ck);
if (ret)
- goto err;
+ goto err_no_rtc_ck;
if (rtc->data->need_dbp)
regmap_update_bits(rtc->dbp, rtc->dbp_reg,
@@ -830,10 +830,12 @@ static int stm32_rtc_probe(struct platform_device *pdev)
}
return 0;
+
err:
+ clk_disable_unprepare(rtc->rtc_ck);
+err_no_rtc_ck:
if (rtc->data->has_pclk)
clk_disable_unprepare(rtc->pclk);
- clk_disable_unprepare(rtc->rtc_ck);
if (rtc->data->need_dbp)
regmap_update_bits(rtc->dbp, rtc->dbp_reg, rtc->dbp_mask, 0);
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index a980337c3065..52093e7ba22d 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* rtc-tps6586x.c: RTC driver for TI PMIC TPS6586X
*
* Copyright (c) 2012, NVIDIA Corporation.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
*/
#include <linux/device.h>
diff --git a/drivers/rtc/rtc-tps80031.c b/drivers/rtc/rtc-tps80031.c
index 737f26eb284a..c77b8eab94a0 100644
--- a/drivers/rtc/rtc-tps80031.c
+++ b/drivers/rtc/rtc-tps80031.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* rtc-tps80031.c -- TI TPS80031/TPS80032 RTC driver
*
@@ -7,20 +8,6 @@
* Copyright (c) 2012, NVIDIA Corporation.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
*/
#include <linux/bcd.h>
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index d2da92187d56..4e8341c49f51 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -282,7 +282,7 @@ static int rtc_probe(struct platform_device *pdev)
{
struct v3020_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct v3020 *chip;
- int retval = -EBUSY;
+ int retval;
int i;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c
index 74026f67fdfb..00f1945bcb7e 100644
--- a/drivers/rtc/sysfs.c
+++ b/drivers/rtc/sysfs.c
@@ -102,7 +102,7 @@ max_user_freq_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(max_user_freq);
/**
- * rtc_sysfs_show_hctosys - indicate if the given RTC set the system time
+ * hctosys_show - indicate if the given RTC set the system time
* @dev: The device that the attribute belongs to.
* @attr: The attribute being read.
* @buf: The result buffer.
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index c8df75e99f4c..e34c6cc61983 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -621,7 +621,6 @@ void dasd_set_target_state(struct dasd_device *device, int target)
mutex_unlock(&device->state_mutex);
dasd_put_device(device);
}
-EXPORT_SYMBOL(dasd_set_target_state);
/*
* Enable devices with device numbers in [from..to].
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index fd42a5fffaed..6bb775236c16 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -69,25 +69,24 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
* resulting condition code and DIAG return code. */
static inline int __dia250(void *iob, int cmd)
{
- register unsigned long reg2 asm ("2") = (unsigned long) iob;
+ union register_pair rx = { .even = (unsigned long)iob, };
typedef union {
struct dasd_diag_init_io init_io;
struct dasd_diag_rw_io rw_io;
} addr_type;
- int rc;
+ int cc;
- rc = 3;
+ cc = 3;
asm volatile(
- " diag 2,%2,0x250\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- " or %0,3\n"
+ " diag %[rx],%[cmd],0x250\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
"1:\n"
EX_TABLE(0b,1b)
- : "+d" (rc), "=m" (*(addr_type *) iob)
- : "d" (cmd), "d" (reg2), "m" (*(addr_type *) iob)
- : "3", "cc");
- return rc;
+ : [cc] "+&d" (cc), [rx] "+&d" (rx.pair), "+m" (*(addr_type *)iob)
+ : [cmd] "d" (cmd)
+ : "cc");
+ return cc | rx.odd;
}
static inline int dia250(void *iob, int cmd)
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a6ac505cbdd7..fb5d8152652d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -746,7 +746,7 @@ static void create_uid(struct dasd_eckd_private *private)
memcpy(uid->vendor, private->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
- memcpy(uid->serial, private->ned->HDA_location,
+ memcpy(uid->serial, &private->ned->serial,
sizeof(uid->serial) - 1);
EBCASC(uid->serial, sizeof(uid->serial) - 1);
uid->ssid = private->gneq->subsystemID;
@@ -1004,15 +1004,23 @@ static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
static void dasd_eckd_store_conf_data(struct dasd_device *device,
struct dasd_conf_data *conf_data, int chp)
{
+ struct dasd_eckd_private *private = device->private;
struct channel_path_desc_fmt0 *chp_desc;
struct subchannel_id sch_id;
+ void *cdp;
- ccw_device_get_schid(device->cdev, &sch_id);
/*
* path handling and read_conf allocate data
* free it before replacing the pointer
+ * also replace the old private->conf_data pointer
+ * with the new one if this points to the same data
*/
- kfree(device->path[chp].conf_data);
+ cdp = device->path[chp].conf_data;
+ if (private->conf_data == cdp) {
+ private->conf_data = (void *)conf_data;
+ dasd_eckd_identify_conf_parts(private);
+ }
+ ccw_device_get_schid(device->cdev, &sch_id);
device->path[chp].conf_data = conf_data;
device->path[chp].cssid = sch_id.cssid;
device->path[chp].ssid = sch_id.ssid;
@@ -1020,6 +1028,7 @@ static void dasd_eckd_store_conf_data(struct dasd_device *device,
if (chp_desc)
device->path[chp].chpid = chp_desc->chpid;
kfree(chp_desc);
+ kfree(cdp);
}
static void dasd_eckd_clear_conf_data(struct dasd_device *device)
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 73651211789f..65e4630ad2ae 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -332,8 +332,10 @@ struct dasd_ned {
__u8 dev_type[6];
__u8 dev_model[3];
__u8 HDA_manufacturer[3];
- __u8 HDA_location[2];
- __u8 HDA_seqno[12];
+ struct {
+ __u8 HDA_location[2];
+ __u8 HDA_seqno[12];
+ } serial;
__u8 ID;
__u8 unit_addr;
} __attribute__ ((packed));
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 7faa56399999..29180bdf0977 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -17,7 +17,6 @@
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/pfn_t.h>
#include <linux/uio.h>
#include <linux/dax.h>
@@ -984,94 +983,11 @@ dcssblk_check_params(void)
}
/*
- * Suspend / Resume
- */
-static int dcssblk_freeze(struct device *dev)
-{
- struct dcssblk_dev_info *dev_info;
- int rc = 0;
-
- list_for_each_entry(dev_info, &dcssblk_devices, lh) {
- switch (dev_info->segment_type) {
- case SEG_TYPE_SR:
- case SEG_TYPE_ER:
- case SEG_TYPE_SC:
- if (!dev_info->is_shared)
- rc = -EINVAL;
- break;
- default:
- rc = -EINVAL;
- break;
- }
- if (rc)
- break;
- }
- if (rc)
- pr_err("Suspending the system failed because DCSS device %s "
- "is writable\n",
- dev_info->segment_name);
- return rc;
-}
-
-static int dcssblk_restore(struct device *dev)
-{
- struct dcssblk_dev_info *dev_info;
- struct segment_info *entry;
- unsigned long start, end;
- int rc = 0;
-
- list_for_each_entry(dev_info, &dcssblk_devices, lh) {
- list_for_each_entry(entry, &dev_info->seg_list, lh) {
- segment_unload(entry->segment_name);
- rc = segment_load(entry->segment_name, SEGMENT_SHARED,
- &start, &end);
- if (rc < 0) {
-// TODO in_use check ?
- segment_warning(rc, entry->segment_name);
- goto out_panic;
- }
- if (start != entry->start || end != entry->end) {
- pr_err("The address range of DCSS %s changed "
- "while the system was suspended\n",
- entry->segment_name);
- goto out_panic;
- }
- }
- }
- return 0;
-out_panic:
- panic("fatal dcssblk resume error\n");
-}
-
-static int dcssblk_thaw(struct device *dev)
-{
- return 0;
-}
-
-static const struct dev_pm_ops dcssblk_pm_ops = {
- .freeze = dcssblk_freeze,
- .thaw = dcssblk_thaw,
- .restore = dcssblk_restore,
-};
-
-static struct platform_driver dcssblk_pdrv = {
- .driver = {
- .name = "dcssblk",
- .pm = &dcssblk_pm_ops,
- },
-};
-
-static struct platform_device *dcssblk_pdev;
-
-
-/*
* The init/exit functions.
*/
static void __exit
dcssblk_exit(void)
{
- platform_device_unregister(dcssblk_pdev);
- platform_driver_unregister(&dcssblk_pdrv);
root_device_unregister(dcssblk_root_dev);
unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
}
@@ -1081,22 +997,9 @@ dcssblk_init(void)
{
int rc;
- rc = platform_driver_register(&dcssblk_pdrv);
- if (rc)
- return rc;
-
- dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL,
- 0);
- if (IS_ERR(dcssblk_pdev)) {
- rc = PTR_ERR(dcssblk_pdev);
- goto out_pdrv;
- }
-
dcssblk_root_dev = root_device_register("dcssblk");
- if (IS_ERR(dcssblk_root_dev)) {
- rc = PTR_ERR(dcssblk_root_dev);
- goto out_pdev;
- }
+ if (IS_ERR(dcssblk_root_dev))
+ return PTR_ERR(dcssblk_root_dev);
rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
if (rc)
goto out_root;
@@ -1114,10 +1017,7 @@ dcssblk_init(void)
out_root:
root_device_unregister(dcssblk_root_dev);
-out_pdev:
- platform_device_unregister(dcssblk_pdev);
-out_pdrv:
- platform_driver_unregister(&dcssblk_pdrv);
+
return rc;
}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 91ef710edfd2..ce98fab4d43c 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -39,8 +39,6 @@
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/device.h>
#include <linux/bio.h>
-#include <linux/suspend.h>
-#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <linux/uaccess.h>
@@ -140,7 +138,7 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
/*
* Check if xpram is available.
*/
-static int xpram_present(void)
+static int __init xpram_present(void)
{
unsigned long mem_page;
int rc;
@@ -156,7 +154,7 @@ static int xpram_present(void)
/*
* Return index of the last available xpram page.
*/
-static unsigned long xpram_highest_page_index(void)
+static unsigned long __init xpram_highest_page_index(void)
{
unsigned int page_index, add_bit;
unsigned long mem_page;
@@ -384,42 +382,6 @@ out:
}
/*
- * Resume failed: Print error message and call panic.
- */
-static void xpram_resume_error(const char *message)
-{
- pr_err("Resuming the system failed: %s\n", message);
- panic("xpram resume error\n");
-}
-
-/*
- * Check if xpram setup changed between suspend and resume.
- */
-static int xpram_restore(struct device *dev)
-{
- if (!xpram_pages)
- return 0;
- if (xpram_present() != 0)
- xpram_resume_error("xpram disappeared");
- if (xpram_pages != xpram_highest_page_index() + 1)
- xpram_resume_error("Size of xpram changed");
- return 0;
-}
-
-static const struct dev_pm_ops xpram_pm_ops = {
- .restore = xpram_restore,
-};
-
-static struct platform_driver xpram_pdrv = {
- .driver = {
- .name = XPRAM_NAME,
- .pm = &xpram_pm_ops,
- },
-};
-
-static struct platform_device *xpram_pdev;
-
-/*
* Finally, the init/exit functions.
*/
static void __exit xpram_exit(void)
@@ -430,8 +392,6 @@ static void __exit xpram_exit(void)
blk_cleanup_disk(xpram_disks[i]);
}
unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
- platform_device_unregister(xpram_pdev);
- platform_driver_unregister(&xpram_pdrv);
}
static int __init xpram_init(void)
@@ -449,24 +409,7 @@ static int __init xpram_init(void)
rc = xpram_setup_sizes(xpram_pages);
if (rc)
return rc;
- rc = platform_driver_register(&xpram_pdrv);
- if (rc)
- return rc;
- xpram_pdev = platform_device_register_simple(XPRAM_NAME, -1, NULL, 0);
- if (IS_ERR(xpram_pdev)) {
- rc = PTR_ERR(xpram_pdev);
- goto fail_platform_driver_unregister;
- }
- rc = xpram_setup_blkdev();
- if (rc)
- goto fail_platform_device_unregister;
- return 0;
-
-fail_platform_device_unregister:
- platform_device_unregister(xpram_pdev);
-fail_platform_driver_unregister:
- platform_driver_unregister(&xpram_pdrv);
- return rc;
+ return xpram_setup_blkdev();
}
module_init(xpram_init);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 1fd5bca9fa20..67c0009ca545 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -19,6 +19,7 @@
#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/err.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/serial.h> /* ASYNC_* flags */
#include <linux/slab.h>
@@ -924,7 +925,7 @@ static void tty3215_close(struct tty_struct *tty, struct file * filp)
/*
* Returns the amount of free space in the output buffer.
*/
-static int tty3215_write_room(struct tty_struct *tty)
+static unsigned int tty3215_write_room(struct tty_struct *tty)
{
struct raw3215_info *raw = tty->driver_data;
@@ -980,7 +981,7 @@ static void tty3215_flush_chars(struct tty_struct *tty)
/*
* Returns the number of characters in the output buffer
*/
-static int tty3215_chars_in_buffer(struct tty_struct *tty)
+static unsigned int tty3215_chars_in_buffer(struct tty_struct *tty)
{
struct raw3215_info *raw = tty->driver_data;
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index e21962c0fd94..87cdbace1453 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/panic_notifier.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/err.h>
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 7bc616b253f1..9fa92e45e0ee 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -21,7 +21,6 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
-#include <linux/device.h>
#include <linux/slab.h>
#include <net/iucv/iucv.h>
#include <linux/uaccess.h>
@@ -79,8 +78,6 @@ static u8 user_data_sever[16] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
-static struct device *monreader_device;
-
/******************************************************************************
* helper functions *
*****************************************************************************/
@@ -319,7 +316,6 @@ static int mon_open(struct inode *inode, struct file *filp)
goto out_path;
}
filp->private_data = monpriv;
- dev_set_drvdata(monreader_device, monpriv);
return nonseekable_open(inode, filp);
out_path:
@@ -354,7 +350,6 @@ static int mon_close(struct inode *inode, struct file *filp)
atomic_set(&monpriv->msglim_count, 0);
monpriv->write_index = 0;
monpriv->read_index = 0;
- dev_set_drvdata(monreader_device, NULL);
for (i = 0; i < MON_MSGLIM; i++)
kfree(monpriv->msg_array[i]);
@@ -456,94 +451,6 @@ static struct miscdevice mon_dev = {
.minor = MISC_DYNAMIC_MINOR,
};
-
-/******************************************************************************
- * suspend / resume *
- *****************************************************************************/
-static int monreader_freeze(struct device *dev)
-{
- struct mon_private *monpriv = dev_get_drvdata(dev);
- int rc;
-
- if (!monpriv)
- return 0;
- if (monpriv->path) {
- rc = iucv_path_sever(monpriv->path, user_data_sever);
- if (rc)
- pr_warn("Disconnecting the z/VM *MONITOR system service failed with rc=%i\n",
- rc);
- iucv_path_free(monpriv->path);
- }
- atomic_set(&monpriv->iucv_severed, 0);
- atomic_set(&monpriv->iucv_connected, 0);
- atomic_set(&monpriv->read_ready, 0);
- atomic_set(&monpriv->msglim_count, 0);
- monpriv->write_index = 0;
- monpriv->read_index = 0;
- monpriv->path = NULL;
- return 0;
-}
-
-static int monreader_thaw(struct device *dev)
-{
- struct mon_private *monpriv = dev_get_drvdata(dev);
- int rc;
-
- if (!monpriv)
- return 0;
- rc = -ENOMEM;
- monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
- if (!monpriv->path)
- goto out;
- rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
- MON_SERVICE, NULL, user_data_connect, monpriv);
- if (rc) {
- pr_err("Connecting to the z/VM *MONITOR system service "
- "failed with rc=%i\n", rc);
- goto out_path;
- }
- wait_event(mon_conn_wait_queue,
- atomic_read(&monpriv->iucv_connected) ||
- atomic_read(&monpriv->iucv_severed));
- if (atomic_read(&monpriv->iucv_severed))
- goto out_path;
- return 0;
-out_path:
- rc = -EIO;
- iucv_path_free(monpriv->path);
- monpriv->path = NULL;
-out:
- atomic_set(&monpriv->iucv_severed, 1);
- return rc;
-}
-
-static int monreader_restore(struct device *dev)
-{
- int rc;
-
- segment_unload(mon_dcss_name);
- rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
- &mon_dcss_start, &mon_dcss_end);
- if (rc < 0) {
- segment_warning(rc, mon_dcss_name);
- panic("fatal monreader resume error: no monitor dcss\n");
- }
- return monreader_thaw(dev);
-}
-
-static const struct dev_pm_ops monreader_pm_ops = {
- .freeze = monreader_freeze,
- .thaw = monreader_thaw,
- .restore = monreader_restore,
-};
-
-static struct device_driver monreader_driver = {
- .name = "monreader",
- .bus = &iucv_bus,
- .pm = &monreader_pm_ops,
-};
-
-
/******************************************************************************
* module init/exit *
*****************************************************************************/
@@ -567,36 +474,16 @@ static int __init mon_init(void)
return rc;
}
- rc = driver_register(&monreader_driver);
- if (rc)
- goto out_iucv;
- monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!monreader_device) {
- rc = -ENOMEM;
- goto out_driver;
- }
-
- dev_set_name(monreader_device, "monreader-dev");
- monreader_device->bus = &iucv_bus;
- monreader_device->parent = iucv_root;
- monreader_device->driver = &monreader_driver;
- monreader_device->release = (void (*)(struct device *))kfree;
- rc = device_register(monreader_device);
- if (rc) {
- put_device(monreader_device);
- goto out_driver;
- }
-
rc = segment_type(mon_dcss_name);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
- goto out_device;
+ goto out_iucv;
}
if (rc != SEG_TYPE_SC) {
pr_err("The specified *MONITOR DCSS %s does not have the "
"required type SC\n", mon_dcss_name);
rc = -EINVAL;
- goto out_device;
+ goto out_iucv;
}
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
@@ -604,7 +491,7 @@ static int __init mon_init(void)
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
rc = -EINVAL;
- goto out_device;
+ goto out_iucv;
}
dcss_mkname(mon_dcss_name, &user_data_connect[8]);
@@ -619,10 +506,6 @@ static int __init mon_init(void)
out:
segment_unload(mon_dcss_name);
-out_device:
- device_unregister(monreader_device);
-out_driver:
- driver_unregister(&monreader_driver);
out_iucv:
iucv_unregister(&monreader_iucv_handler, 1);
return rc;
@@ -632,8 +515,6 @@ static void __exit mon_exit(void)
{
segment_unload(mon_dcss_name);
misc_deregister(&mon_dev);
- device_unregister(monreader_device);
- driver_unregister(&monreader_driver);
iucv_unregister(&monreader_iucv_handler, 1);
return;
}
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index fdc0c0b7a6f5..9cd1ea92d619 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -20,7 +20,6 @@
#include <linux/ctype.h>
#include <linux/poll.h>
#include <linux/mutex.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <asm/ebcdic.h>
@@ -40,10 +39,7 @@ struct mon_buf {
char *data;
};
-static LIST_HEAD(mon_priv_list);
-
struct mon_private {
- struct list_head priv_list;
struct list_head list;
struct monwrite_hdr hdr;
size_t hdr_to_read;
@@ -199,7 +195,6 @@ static int monwrite_open(struct inode *inode, struct file *filp)
monpriv->hdr_to_read = sizeof(monpriv->hdr);
mutex_init(&monpriv->thread_mutex);
filp->private_data = monpriv;
- list_add_tail(&monpriv->priv_list, &mon_priv_list);
return nonseekable_open(inode, filp);
}
@@ -217,7 +212,6 @@ static int monwrite_close(struct inode *inode, struct file *filp)
kfree(entry->data);
kfree(entry);
}
- list_del(&monpriv->priv_list);
kfree(monpriv);
return 0;
}
@@ -294,105 +288,23 @@ static struct miscdevice mon_dev = {
};
/*
- * suspend/resume
- */
-
-static int monwriter_freeze(struct device *dev)
-{
- struct mon_private *monpriv;
- struct mon_buf *monbuf;
-
- list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
- list_for_each_entry(monbuf, &monpriv->list, list) {
- if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT)
- monwrite_diag(&monbuf->hdr, monbuf->data,
- APPLDATA_STOP_REC);
- }
- }
- return 0;
-}
-
-static int monwriter_restore(struct device *dev)
-{
- struct mon_private *monpriv;
- struct mon_buf *monbuf;
-
- list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
- list_for_each_entry(monbuf, &monpriv->list, list) {
- if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL)
- monwrite_diag(&monbuf->hdr, monbuf->data,
- APPLDATA_START_INTERVAL_REC);
- if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG)
- monwrite_diag(&monbuf->hdr, monbuf->data,
- APPLDATA_START_CONFIG_REC);
- }
- }
- return 0;
-}
-
-static int monwriter_thaw(struct device *dev)
-{
- return monwriter_restore(dev);
-}
-
-static const struct dev_pm_ops monwriter_pm_ops = {
- .freeze = monwriter_freeze,
- .thaw = monwriter_thaw,
- .restore = monwriter_restore,
-};
-
-static struct platform_driver monwriter_pdrv = {
- .driver = {
- .name = "monwriter",
- .pm = &monwriter_pm_ops,
- },
-};
-
-static struct platform_device *monwriter_pdev;
-
-/*
* module init/exit
*/
static int __init mon_init(void)
{
- int rc;
-
if (!MACHINE_IS_VM)
return -ENODEV;
-
- rc = platform_driver_register(&monwriter_pdrv);
- if (rc)
- return rc;
-
- monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL,
- 0);
- if (IS_ERR(monwriter_pdev)) {
- rc = PTR_ERR(monwriter_pdev);
- goto out_driver;
- }
-
/*
* misc_register() has to be the last action in module_init(), because
* file operations will be available right after this.
*/
- rc = misc_register(&mon_dev);
- if (rc)
- goto out_device;
- return 0;
-
-out_device:
- platform_device_unregister(monwriter_pdev);
-out_driver:
- platform_driver_unregister(&monwriter_pdrv);
- return rc;
+ return misc_register(&mon_dev);
}
static void __exit mon_exit(void)
{
misc_deregister(&mon_dev);
- platform_device_unregister(monwriter_pdev);
- platform_driver_unregister(&monwriter_pdrv);
}
module_init(mon_init);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 986bbbc23d0a..792b4bfa6d9a 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -11,14 +11,13 @@
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/err.h>
+#include <linux/panic_notifier.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/reboot.h>
#include <linux/jiffies.h>
#include <linux/init.h>
-#include <linux/suspend.h>
-#include <linux/completion.h>
#include <linux/platform_device.h>
#include <asm/types.h>
#include <asm/irq.h>
@@ -48,9 +47,6 @@ static struct sclp_req sclp_init_req;
static void *sclp_read_sccb;
static struct init_sccb *sclp_init_sccb;
-/* Suspend request */
-static DECLARE_COMPLETION(sclp_request_queue_flushed);
-
/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
int sclp_console_pages = SCLP_CONSOLE_PAGES;
/* Flag to indicate if buffer pages are dropped on buffer full condition */
@@ -58,11 +54,6 @@ int sclp_console_drop = 1;
/* Number of times the console dropped buffer pages */
unsigned long sclp_console_full;
-static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
-{
- complete(&sclp_request_queue_flushed);
-}
-
static int __init sclp_setup_console_pages(char *str)
{
int pages, rc;
@@ -87,8 +78,6 @@ static int __init sclp_setup_console_drop(char *str)
__setup("sclp_con_drop=", sclp_setup_console_drop);
-static struct sclp_req sclp_suspend_req;
-
/* Timer for request retries. */
static struct timer_list sclp_request_timer;
@@ -122,12 +111,6 @@ static volatile enum sclp_mask_state_t {
sclp_mask_state_initializing
} sclp_mask_state = sclp_mask_state_idle;
-/* Internal state: is the driver suspended? */
-static enum sclp_suspend_state_t {
- sclp_suspend_state_running,
- sclp_suspend_state_suspended,
-} sclp_suspend_state = sclp_suspend_state_running;
-
/* Maximum retry counts */
#define SCLP_INIT_RETRY 3
#define SCLP_MASK_RETRY 3
@@ -313,8 +296,6 @@ sclp_process_queue(void)
del_timer(&sclp_request_timer);
while (!list_empty(&sclp_req_queue)) {
req = list_entry(sclp_req_queue.next, struct sclp_req, list);
- if (!req->sccb)
- goto do_post;
rc = __sclp_start_request(req);
if (rc == 0)
break;
@@ -326,7 +307,6 @@ sclp_process_queue(void)
sclp_request_timeout_normal);
break;
}
-do_post:
/* Post-processing for aborted request */
list_del(&req->list);
if (req->callback) {
@@ -340,10 +320,8 @@ do_post:
static int __sclp_can_add_request(struct sclp_req *req)
{
- if (req == &sclp_suspend_req || req == &sclp_init_req)
+ if (req == &sclp_init_req)
return 1;
- if (sclp_suspend_state != sclp_suspend_state_running)
- return 0;
if (sclp_init_state != sclp_init_state_initialized)
return 0;
if (sclp_activation_state != sclp_activation_state_active)
@@ -377,16 +355,10 @@ sclp_add_request(struct sclp_req *req)
/* Start if request is first in list */
if (sclp_running_state == sclp_running_state_idle &&
req->list.prev == &sclp_req_queue) {
- if (!req->sccb) {
- list_del(&req->list);
- rc = -ENODATA;
- goto out;
- }
rc = __sclp_start_request(req);
if (rc)
list_del(&req->list);
}
-out:
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
@@ -692,7 +664,6 @@ sclp_register(struct sclp_register *reg)
/* Trigger initial state change callback */
reg->sclp_receive_mask = 0;
reg->sclp_send_mask = 0;
- reg->pm_event_posted = 0;
list_add(&reg->list, &sclp_reg_list);
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_init_mask(1);
@@ -1010,112 +981,6 @@ static struct notifier_block sclp_reboot_notifier = {
.notifier_call = sclp_reboot_event
};
-/*
- * Suspend/resume SCLP notifier implementation
- */
-
-static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
-{
- struct sclp_register *reg;
- unsigned long flags;
-
- if (!rollback) {
- spin_lock_irqsave(&sclp_lock, flags);
- list_for_each_entry(reg, &sclp_reg_list, list)
- reg->pm_event_posted = 0;
- spin_unlock_irqrestore(&sclp_lock, flags);
- }
- do {
- spin_lock_irqsave(&sclp_lock, flags);
- list_for_each_entry(reg, &sclp_reg_list, list) {
- if (rollback && reg->pm_event_posted)
- goto found;
- if (!rollback && !reg->pm_event_posted)
- goto found;
- }
- spin_unlock_irqrestore(&sclp_lock, flags);
- return;
-found:
- spin_unlock_irqrestore(&sclp_lock, flags);
- if (reg->pm_event_fn)
- reg->pm_event_fn(reg, sclp_pm_event);
- reg->pm_event_posted = rollback ? 0 : 1;
- } while (1);
-}
-
-/*
- * Susend/resume callbacks for platform device
- */
-
-static int sclp_freeze(struct device *dev)
-{
- unsigned long flags;
- int rc;
-
- sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
-
- spin_lock_irqsave(&sclp_lock, flags);
- sclp_suspend_state = sclp_suspend_state_suspended;
- spin_unlock_irqrestore(&sclp_lock, flags);
-
- /* Init supend data */
- memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
- sclp_suspend_req.callback = sclp_suspend_req_cb;
- sclp_suspend_req.status = SCLP_REQ_FILLED;
- init_completion(&sclp_request_queue_flushed);
-
- rc = sclp_add_request(&sclp_suspend_req);
- if (rc == 0)
- wait_for_completion(&sclp_request_queue_flushed);
- else if (rc != -ENODATA)
- goto fail_thaw;
-
- rc = sclp_deactivate();
- if (rc)
- goto fail_thaw;
- return 0;
-
-fail_thaw:
- spin_lock_irqsave(&sclp_lock, flags);
- sclp_suspend_state = sclp_suspend_state_running;
- spin_unlock_irqrestore(&sclp_lock, flags);
- sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
- return rc;
-}
-
-static int sclp_undo_suspend(enum sclp_pm_event event)
-{
- unsigned long flags;
- int rc;
-
- rc = sclp_reactivate();
- if (rc)
- return rc;
-
- spin_lock_irqsave(&sclp_lock, flags);
- sclp_suspend_state = sclp_suspend_state_running;
- spin_unlock_irqrestore(&sclp_lock, flags);
-
- sclp_pm_event(event, 0);
- return 0;
-}
-
-static int sclp_thaw(struct device *dev)
-{
- return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
-}
-
-static int sclp_restore(struct device *dev)
-{
- return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
-}
-
-static const struct dev_pm_ops sclp_pm_ops = {
- .freeze = sclp_freeze,
- .thaw = sclp_thaw,
- .restore = sclp_restore,
-};
-
static ssize_t con_pages_show(struct device_driver *dev, char *buf)
{
return sprintf(buf, "%i\n", sclp_console_pages);
@@ -1154,13 +1019,10 @@ static const struct attribute_group *sclp_drv_attr_groups[] = {
static struct platform_driver sclp_pdrv = {
.driver = {
.name = "sclp",
- .pm = &sclp_pm_ops,
.groups = sclp_drv_attr_groups,
},
};
-static struct platform_device *sclp_pdev;
-
/* Initialize SCLP driver. Return zero if driver is operational, non-zero
* otherwise. */
static int
@@ -1214,23 +1076,6 @@ fail_unlock:
return rc;
}
-/*
- * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
- * to print the panic message.
- */
-static int sclp_panic_notify(struct notifier_block *self,
- unsigned long event, void *data)
-{
- if (sclp_suspend_state == sclp_suspend_state_suspended)
- sclp_undo_suspend(SCLP_PM_EVENT_THAW);
- return NOTIFY_OK;
-}
-
-static struct notifier_block sclp_on_panic_nb = {
- .notifier_call = sclp_panic_notify,
- .priority = SCLP_PANIC_PRIO,
-};
-
static __init int sclp_initcall(void)
{
int rc;
@@ -1239,23 +1084,7 @@ static __init int sclp_initcall(void)
if (rc)
return rc;
- sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
- rc = PTR_ERR_OR_ZERO(sclp_pdev);
- if (rc)
- goto fail_platform_driver_unregister;
-
- rc = atomic_notifier_chain_register(&panic_notifier_list,
- &sclp_on_panic_nb);
- if (rc)
- goto fail_platform_device_unregister;
-
return sclp_init();
-
-fail_platform_device_unregister:
- platform_device_unregister(sclp_pdev);
-fail_platform_driver_unregister:
- platform_driver_unregister(&sclp_pdrv);
- return rc;
}
arch_initcall(sclp_initcall);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 6de919944a39..8dd8ad83b78b 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -81,15 +81,6 @@ typedef unsigned int sclp_cmdw_t;
#define GDS_KEY_SELFDEFTEXTMSG 0x31
-enum sclp_pm_event {
- SCLP_PM_EVENT_FREEZE,
- SCLP_PM_EVENT_THAW,
- SCLP_PM_EVENT_RESTORE,
-};
-
-#define SCLP_PANIC_PRIO 1
-#define SCLP_PANIC_PRIO_CLIENT 0
-
typedef u64 sccb_mask_t;
struct sccb_header {
@@ -293,10 +284,6 @@ struct sclp_register {
void (*state_change_fn)(struct sclp_register *);
/* called for events in cp_receive_mask/sclp_receive_mask */
void (*receiver_fn)(struct evbuf_header *);
- /* called for power management events */
- void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event);
- /* pm event posted flag */
- int pm_event_posted;
};
/* externals from sclp.c */
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index d41bc144c183..ab0518cfdcfe 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -20,7 +20,6 @@
#include <linux/mmzone.h>
#include <linux/memory.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <asm/ctl_reg.h>
#include <asm/chpid.h>
#include <asm/setup.h>
@@ -168,7 +167,6 @@ static DEFINE_MUTEX(sclp_mem_mutex);
static LIST_HEAD(sclp_mem_list);
static u8 sclp_max_storage_id;
static DECLARE_BITMAP(sclp_storage_ids, 256);
-static int sclp_mem_state_changed;
struct memory_increment {
struct list_head list;
@@ -359,8 +357,6 @@ static int sclp_mem_notifier(struct notifier_block *nb,
rc = -EINVAL;
break;
}
- if (!rc)
- sclp_mem_state_changed = 1;
mutex_unlock(&sclp_mem_mutex);
return rc ? NOTIFY_BAD : NOTIFY_OK;
}
@@ -456,28 +452,8 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
list_add(&new_incr->list, prev);
}
-static int sclp_mem_freeze(struct device *dev)
-{
- if (!sclp_mem_state_changed)
- return 0;
- pr_err("Memory hotplug state changed, suspend refused.\n");
- return -EPERM;
-}
-
-static const struct dev_pm_ops sclp_mem_pm_ops = {
- .freeze = sclp_mem_freeze,
-};
-
-static struct platform_driver sclp_mem_pdrv = {
- .driver = {
- .name = "sclp_mem",
- .pm = &sclp_mem_pm_ops,
- },
-};
-
static int __init sclp_detect_standby_memory(void)
{
- struct platform_device *sclp_pdev;
struct read_storage_sccb *sccb;
int i, id, assigned, rc;
@@ -530,17 +506,7 @@ static int __init sclp_detect_standby_memory(void)
rc = register_memory_notifier(&sclp_mem_nb);
if (rc)
goto out;
- rc = platform_driver_register(&sclp_mem_pdrv);
- if (rc)
- goto out;
- sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
- rc = PTR_ERR_OR_ZERO(sclp_pdev);
- if (rc)
- goto out_driver;
sclp_add_standby_memory();
- goto out;
-out_driver:
- platform_driver_unregister(&sclp_mem_pdrv);
out:
free_page((unsigned long) sccb);
return rc;
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 9b852a47ccc1..de028868c6f4 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -10,6 +10,7 @@
#include <linux/kmod.h>
#include <linux/console.h>
#include <linux/init.h>
+#include <linux/panic_notifier.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/termios.h>
@@ -35,8 +36,6 @@ static LIST_HEAD(sclp_con_outqueue);
static struct sclp_buffer *sclp_conbuf;
/* Timer for delayed output of console messages */
static struct timer_list sclp_con_timer;
-/* Suspend mode flag */
-static int sclp_con_suspended;
/* Flag that output queue is currently running */
static int sclp_con_queue_running;
@@ -63,7 +62,7 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
if (!list_empty(&sclp_con_outqueue))
buffer = list_first_entry(&sclp_con_outqueue,
struct sclp_buffer, list);
- if (!buffer || sclp_con_suspended) {
+ if (!buffer) {
sclp_con_queue_running = 0;
spin_unlock_irqrestore(&sclp_con_lock, flags);
break;
@@ -85,7 +84,7 @@ static void sclp_conbuf_emit(void)
if (sclp_conbuf)
list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue);
sclp_conbuf = NULL;
- if (sclp_con_queue_running || sclp_con_suspended)
+ if (sclp_con_queue_running)
goto out_unlock;
if (list_empty(&sclp_con_outqueue))
goto out_unlock;
@@ -179,8 +178,6 @@ sclp_console_write(struct console *console, const char *message,
if (list_empty(&sclp_con_pages))
sclp_console_full++;
while (list_empty(&sclp_con_pages)) {
- if (sclp_con_suspended)
- goto out;
if (sclp_console_drop_buffer())
break;
spin_unlock_irqrestore(&sclp_con_lock, flags);
@@ -213,7 +210,6 @@ sclp_console_write(struct console *console, const char *message,
!timer_pending(&sclp_con_timer)) {
mod_timer(&sclp_con_timer, jiffies + HZ / 10);
}
-out:
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
@@ -234,32 +230,6 @@ sclp_console_flush(void)
sclp_console_sync_queue();
}
-/*
- * Resume console: If there are cached messages, emit them.
- */
-static void sclp_console_resume(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sclp_con_lock, flags);
- sclp_con_suspended = 0;
- spin_unlock_irqrestore(&sclp_con_lock, flags);
- sclp_conbuf_emit();
-}
-
-/*
- * Suspend console: Set suspend flag and flush console
- */
-static void sclp_console_suspend(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sclp_con_lock, flags);
- sclp_con_suspended = 1;
- spin_unlock_irqrestore(&sclp_con_lock, flags);
- sclp_console_flush();
-}
-
static int sclp_console_notify(struct notifier_block *self,
unsigned long event, void *data)
{
@@ -269,7 +239,7 @@ static int sclp_console_notify(struct notifier_block *self,
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_console_notify,
- .priority = SCLP_PANIC_PRIO_CLIENT,
+ .priority = 1,
};
static struct notifier_block on_reboot_nb = {
@@ -291,22 +261,6 @@ static struct console sclp_console =
};
/*
- * This function is called for SCLP suspend and resume events.
- */
-void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event)
-{
- switch (sclp_pm_event) {
- case SCLP_PM_EVENT_FREEZE:
- sclp_console_suspend();
- break;
- case SCLP_PM_EVENT_RESTORE:
- case SCLP_PM_EVENT_THAW:
- sclp_console_resume();
- break;
- }
-}
-
-/*
* called by console_init() in drivers/char/tty_io.c at boot-time.
*/
static int __init
diff --git a/drivers/s390/char/sclp_ftp.c b/drivers/s390/char/sclp_ftp.c
index dfdd6c8fd17e..1e9de99dcd02 100644
--- a/drivers/s390/char/sclp_ftp.c
+++ b/drivers/s390/char/sclp_ftp.c
@@ -231,7 +231,6 @@ static struct sclp_register sclp_ftp_event = {
.receive_mask = EVTYP_DIAG_TEST_MASK, /* want rx events */
.receiver_fn = sclp_ftp_rxcb, /* async callback (rx) */
.state_change_fn = NULL,
- .pm_event_fn = NULL,
};
/**
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 76956c2131cd..ade721467804 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -18,10 +18,6 @@
#include "sclp.h"
-static void (*old_machine_restart)(char *);
-static void (*old_machine_halt)(void);
-static void (*old_machine_power_off)(void);
-
/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
static void do_machine_quiesce(void)
{
@@ -37,42 +33,15 @@ static void do_machine_quiesce(void)
/* Handler for quiesce event. Start shutdown procedure. */
static void sclp_quiesce_handler(struct evbuf_header *evbuf)
{
- if (_machine_restart != (void *) do_machine_quiesce) {
- old_machine_restart = _machine_restart;
- old_machine_halt = _machine_halt;
- old_machine_power_off = _machine_power_off;
- _machine_restart = (void *) do_machine_quiesce;
- _machine_halt = do_machine_quiesce;
- _machine_power_off = do_machine_quiesce;
- }
+ _machine_restart = (void *) do_machine_quiesce;
+ _machine_halt = do_machine_quiesce;
+ _machine_power_off = do_machine_quiesce;
ctrl_alt_del();
}
-/* Undo machine restart/halt/power_off modification on resume */
-static void sclp_quiesce_pm_event(struct sclp_register *reg,
- enum sclp_pm_event sclp_pm_event)
-{
- switch (sclp_pm_event) {
- case SCLP_PM_EVENT_RESTORE:
- if (old_machine_restart) {
- _machine_restart = old_machine_restart;
- _machine_halt = old_machine_halt;
- _machine_power_off = old_machine_power_off;
- old_machine_restart = NULL;
- old_machine_halt = NULL;
- old_machine_power_off = NULL;
- }
- break;
- case SCLP_PM_EVENT_FREEZE:
- case SCLP_PM_EVENT_THAW:
- break;
- }
-}
-
static struct sclp_register sclp_quiesce_event = {
.receive_mask = EVTYP_SIGQUIESCE_MASK,
.receiver_fn = sclp_quiesce_handler,
- .pm_event_fn = sclp_quiesce_pm_event
};
/* Initialize quiesce driver. */
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index d6c84e354df5..1690326553b1 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -26,16 +26,9 @@
*/
#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
-static void sclp_rw_pm_event(struct sclp_register *reg,
- enum sclp_pm_event sclp_pm_event)
-{
- sclp_console_pm_event(sclp_pm_event);
-}
-
/* Event type structure for write message and write priority message */
static struct sclp_register sclp_rw_event = {
.send_mask = EVTYP_MSG_MASK,
- .pm_event_fn = sclp_rw_pm_event,
};
/*
@@ -325,10 +318,10 @@ sclp_buffer_space(struct sclp_buffer *buffer)
/*
* Return number of characters in buffer
*/
-int
+unsigned int
sclp_chars_in_buffer(struct sclp_buffer *buffer)
{
- int count;
+ unsigned int count;
count = buffer->char_sum;
if (buffer->current_line != NULL)
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
index 93d706e4935c..9b779ee4f979 100644
--- a/drivers/s390/char/sclp_rw.h
+++ b/drivers/s390/char/sclp_rw.h
@@ -86,12 +86,6 @@ void *sclp_unmake_buffer(struct sclp_buffer *);
int sclp_buffer_space(struct sclp_buffer *);
int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
-int sclp_chars_in_buffer(struct sclp_buffer *);
-
-#ifdef CONFIG_SCLP_CONSOLE
-void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event);
-#else
-static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { }
-#endif
+unsigned int sclp_chars_in_buffer(struct sclp_buffer *);
#endif /* __SCLP_RW_H__ */
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 4456ceb23bd2..6be9de8ed37d 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -86,12 +86,12 @@ sclp_tty_close(struct tty_struct *tty, struct file *filp)
* a string of newlines. Every newline creates a new message which
* needs 82 bytes.
*/
-static int
+static unsigned int
sclp_tty_write_room (struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
- int count;
+ unsigned int count;
spin_lock_irqsave(&sclp_tty_lock, flags);
count = 0;
@@ -280,20 +280,17 @@ sclp_tty_flush_chars(struct tty_struct *tty)
* characters in the write buffer (will not be written as long as there is a
* final line feed missing).
*/
-static int
+static unsigned int
sclp_tty_chars_in_buffer(struct tty_struct *tty)
{
unsigned long flags;
- struct list_head *l;
struct sclp_buffer *t;
- int count;
+ unsigned int count = 0;
spin_lock_irqsave(&sclp_tty_lock, flags);
- count = 0;
if (sclp_ttybuf != NULL)
count = sclp_chars_in_buffer(sclp_ttybuf);
- list_for_each(l, &sclp_tty_outqueue) {
- t = list_entry(l, struct sclp_buffer, list);
+ list_for_each_entry(t, &sclp_tty_outqueue, list) {
count += sclp_chars_in_buffer(t);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 7f4445b0f819..da2496306c04 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/panic_notifier.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/timer.h>
@@ -35,8 +36,8 @@
#define SCLP_VT220_MINOR 65
#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
#define SCLP_VT220_DEVICE_NAME "ttysclp"
-#define SCLP_VT220_CONSOLE_NAME "ttyS"
-#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
+#define SCLP_VT220_CONSOLE_NAME "ttysclp"
+#define SCLP_VT220_CONSOLE_INDEX 0 /* console=ttysclp0 */
/* Representation of a single write request */
struct sclp_vt220_request {
@@ -69,9 +70,6 @@ static LIST_HEAD(sclp_vt220_empty);
/* List of pending requests */
static LIST_HEAD(sclp_vt220_outqueue);
-/* Suspend mode flag */
-static int sclp_vt220_suspended;
-
/* Flag that output queue is currently running */
static int sclp_vt220_queue_running;
@@ -95,15 +93,12 @@ static int __initdata sclp_vt220_init_count;
static int sclp_vt220_flush_later;
static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
-static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
- enum sclp_pm_event sclp_pm_event);
static int __sclp_vt220_emit(struct sclp_vt220_request *request);
static void sclp_vt220_emit_current(void);
/* Registration structure for SCLP output event buffers */
static struct sclp_register sclp_vt220_register = {
.send_mask = EVTYP_VT220MSG_MASK,
- .pm_event_fn = sclp_vt220_pm_event_fn,
};
/* Registration structure for SCLP input event buffers */
@@ -135,7 +130,7 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request)
if (!list_empty(&sclp_vt220_outqueue))
request = list_entry(sclp_vt220_outqueue.next,
struct sclp_vt220_request, list);
- if (!request || sclp_vt220_suspended) {
+ if (!request) {
sclp_vt220_queue_running = 0;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
break;
@@ -241,7 +236,7 @@ sclp_vt220_emit_current(void)
}
sclp_vt220_flush_later = 0;
}
- if (sclp_vt220_queue_running || sclp_vt220_suspended)
+ if (sclp_vt220_queue_running)
goto out_unlock;
if (list_empty(&sclp_vt220_outqueue))
goto out_unlock;
@@ -420,7 +415,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
if (list_empty(&sclp_vt220_empty))
sclp_console_full++;
while (list_empty(&sclp_vt220_empty)) {
- if (may_fail || sclp_vt220_suspended)
+ if (may_fail)
goto out;
if (sclp_vt220_drop_buffer())
break;
@@ -609,12 +604,12 @@ sclp_vt220_flush_chars(struct tty_struct *tty)
* to change as output buffers get emptied, or if the output flow
* control is acted.
*/
-static int
+static unsigned int
sclp_vt220_write_room(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
- int count;
+ unsigned int count;
spin_lock_irqsave(&sclp_vt220_lock, flags);
count = 0;
@@ -629,16 +624,15 @@ sclp_vt220_write_room(struct tty_struct *tty)
/*
* Return number of buffered chars.
*/
-static int
+static unsigned int
sclp_vt220_chars_in_buffer(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
struct sclp_vt220_request *r;
- int count;
+ unsigned int count = 0;
spin_lock_irqsave(&sclp_vt220_lock, flags);
- count = 0;
if (sclp_vt220_current_request != NULL)
count = sclp_vt220_chars_stored(sclp_vt220_current_request);
list_for_each(l, &sclp_vt220_outqueue) {
@@ -791,46 +785,6 @@ static void __sclp_vt220_flush_buffer(void)
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
}
-/*
- * Resume console: If there are cached messages, emit them.
- */
-static void sclp_vt220_resume(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sclp_vt220_lock, flags);
- sclp_vt220_suspended = 0;
- spin_unlock_irqrestore(&sclp_vt220_lock, flags);
- sclp_vt220_emit_current();
-}
-
-/*
- * Suspend console: Set suspend flag and flush console
- */
-static void sclp_vt220_suspend(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sclp_vt220_lock, flags);
- sclp_vt220_suspended = 1;
- spin_unlock_irqrestore(&sclp_vt220_lock, flags);
- __sclp_vt220_flush_buffer();
-}
-
-static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
- enum sclp_pm_event sclp_pm_event)
-{
- switch (sclp_pm_event) {
- case SCLP_PM_EVENT_FREEZE:
- sclp_vt220_suspend();
- break;
- case SCLP_PM_EVENT_RESTORE:
- case SCLP_PM_EVENT_THAW:
- sclp_vt220_resume();
- break;
- }
-}
-
#ifdef CONFIG_SCLP_VT220_CONSOLE
static void
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 8abb42923307..cc8237afeffa 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -371,8 +371,6 @@ __tapechar_ioctl(struct tape_device *device,
case MTSEEK:
if (device->required_tapemarks)
tape_std_terminate_write(device);
- default:
- ;
}
rc = tape_mtop(device, op.mt_op, op.mt_count);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 307a80f85c07..adc33846bf8e 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -1071,7 +1071,7 @@ static void tty3270_cleanup(struct tty_struct *tty)
/*
* We always have room.
*/
-static int
+static unsigned int
tty3270_write_room(struct tty_struct *tty)
{
return INT_MAX;
@@ -1640,7 +1640,7 @@ tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty,
int i_msg, i;
spin_lock_bh(&tp->view.lock);
- for (i_msg = 0; !tty->stopped && i_msg < count; i_msg++) {
+ for (i_msg = 0; !tty->flow.stopped && i_msg < count; i_msg++) {
if (tp->esc_state != 0) {
/* Continue escape sequence. */
tty3270_escape_sequence(tp, buf[i_msg]);
@@ -1757,22 +1757,6 @@ tty3270_flush_chars(struct tty_struct *tty)
}
/*
- * Returns the number of characters in the output buffer. This is
- * used in tty_wait_until_sent to wait until all characters have
- * appeared on the screen.
- */
-static int
-tty3270_chars_in_buffer(struct tty_struct *tty)
-{
- return 0;
-}
-
-static void
-tty3270_flush_buffer(struct tty_struct *tty)
-{
-}
-
-/*
* Check for visible/invisible input switches
*/
static void
@@ -1892,8 +1876,6 @@ static const struct tty_operations tty3270_ops = {
.put_char = tty3270_put_char,
.flush_chars = tty3270_flush_chars,
.write_room = tty3270_write_room,
- .chars_in_buffer = tty3270_chars_in_buffer,
- .flush_buffer = tty3270_flush_buffer,
.throttle = tty3270_throttle,
.unthrottle = tty3270_unthrottle,
.hangup = tty3270_hangup,
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 58333cb4503f..ed970ecfafdf 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -679,34 +679,10 @@ static const struct attribute_group *vmlogrdr_attr_groups[] = {
NULL,
};
-static int vmlogrdr_pm_prepare(struct device *dev)
-{
- int rc;
- struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
-
- rc = 0;
- if (priv) {
- spin_lock_bh(&priv->priv_lock);
- if (priv->dev_in_use)
- rc = -EBUSY;
- spin_unlock_bh(&priv->priv_lock);
- }
- if (rc)
- pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
- dev_name(dev));
- return rc;
-}
-
-
-static const struct dev_pm_ops vmlogrdr_pm_ops = {
- .prepare = vmlogrdr_pm_prepare,
-};
-
static struct class *vmlogrdr_class;
static struct device_driver vmlogrdr_driver = {
.name = "vmlogrdr",
.bus = &iucv_bus,
- .pm = &vmlogrdr_pm_ops,
.groups = vmlogrdr_drv_attr_groups,
};
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index bd3c724bf695..b5b0848da93b 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <asm/asm-offsets.h>
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index cb466ed7eb5e..e56535c99888 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -93,7 +93,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
struct hlist_head *head;
set_cpu_flag(CIF_NOHZ_DELAY);
- tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+ tpi_info = &get_irq_regs()->tpi_info;
trace_s390_cio_adapter_int(tpi_info);
head = &airq_lists[tpi_info->isc];
rcu_read_lock();
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 444385da5792..9748165e08e9 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -45,27 +45,6 @@ static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
}
}
-/*
- * Remove references from ccw devices to ccw group device and from
- * ccw group device to ccw devices.
- */
-static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
-{
- struct ccw_device *cdev;
- int i;
-
- for (i = 0; i < gdev->count; i++) {
- cdev = gdev->cdev[i];
- if (!cdev)
- continue;
- spin_lock_irq(cdev->ccwlock);
- dev_set_drvdata(&cdev->dev, NULL);
- spin_unlock_irq(cdev->ccwlock);
- gdev->cdev[i] = NULL;
- put_device(&cdev->dev);
- }
-}
-
/**
* ccwgroup_set_online() - enable a ccwgroup device
* @gdev: target ccwgroup device
@@ -175,7 +154,6 @@ static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
device_unregister(&gdev->dev);
- __ccwgroup_remove_cdev_refs(gdev);
}
mutex_unlock(&gdev->reg_mutex);
}
@@ -228,7 +206,23 @@ static void ccwgroup_ungroup_workfn(struct work_struct *work)
static void ccwgroup_release(struct device *dev)
{
- kfree(to_ccwgroupdev(dev));
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ unsigned int i;
+
+ for (i = 0; i < gdev->count; i++) {
+ struct ccw_device *cdev = gdev->cdev[i];
+ unsigned long flags;
+
+ if (cdev) {
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (dev_get_drvdata(&cdev->dev) == gdev)
+ dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ put_device(&cdev->dev);
+ }
+ }
+
+ kfree(gdev);
}
static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
@@ -396,15 +390,6 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
mutex_unlock(&gdev->reg_mutex);
return 0;
error:
- for (i = 0; i < num_devices; i++)
- if (gdev->cdev[i]) {
- spin_lock_irq(gdev->cdev[i]->ccwlock);
- if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
- dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
- spin_unlock_irq(gdev->cdev[i]->ccwlock);
- put_device(&gdev->cdev[i]->dev);
- gdev->cdev[i] = NULL;
- }
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
return rc;
@@ -416,7 +401,7 @@ static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
{
struct ccwgroup_device *gdev = to_ccwgroupdev(data);
- if (action == BUS_NOTIFY_UNBIND_DRIVER) {
+ if (action == BUS_NOTIFY_UNBOUND_DRIVER) {
get_device(&gdev->dev);
schedule_work(&gdev->ungroup_work);
}
@@ -514,15 +499,6 @@ EXPORT_SYMBOL(ccwgroup_driver_register);
*/
void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
{
- struct device *dev;
-
- /* We don't want ccwgroup devices to live longer than their driver. */
- while ((dev = driver_find_next_device(&cdriver->driver, NULL))) {
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-
- ccwgroup_ungroup(gdev);
- put_device(dev);
- }
driver_unregister(&cdriver->driver);
}
EXPORT_SYMBOL(ccwgroup_driver_unregister);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index e42113825415..1097e76982a5 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -255,6 +255,9 @@ static ssize_t chp_status_write(struct device *dev,
if (!num_args)
return count;
+ /* Wait until previous actions have settled. */
+ css_wait_for_slow_path();
+
if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
mutex_lock(&cp->lock);
error = s390_vary_chpid(cp->chpid, 1);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index c22d9ee27ba1..297fb399363c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -801,8 +801,6 @@ int chsc_chp_vary(struct chp_id chpid, int on)
{
struct channel_path *chp = chpid_to_chp(chpid);
- /* Wait until previous actions have settled. */
- css_wait_for_slow_path();
/*
* Redo PathVerification on the devices the chpid connects to
*/
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 6d716db2a46a..923f5ca4f5e6 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -536,7 +536,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
struct irb *irb;
set_cpu_flag(CIF_NOHZ_DELAY);
- tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+ tpi_info = &get_irq_regs()->tpi_info;
trace_s390_cio_interrupt(tpi_info);
irb = this_cpu_ptr(&cio_irb);
sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index dcdaba689b20..1cb9daf9c645 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -9,6 +9,7 @@
#include <asm/cio.h>
#include <asm/fcx.h>
#include <asm/schid.h>
+#include <asm/tpi.h>
#include "chsc.h"
/*
@@ -46,18 +47,6 @@ struct pmcw {
/* ... in an operand exception. */
} __attribute__ ((packed));
-/* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */
-struct tpi_info {
- struct subchannel_id schid;
- u32 intparm;
- u32 adapter_IO:1;
- u32 directed_irq:1;
- u32 isc:3;
- u32 :27;
- u32 type:3;
- u32 :12;
-} __packed __aligned(4);
-
/* Target SCHIB configuration. */
struct schib_config {
u64 mba;
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index b7b590646d58..5584aa46c94e 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -163,13 +163,14 @@ static inline u64 time_to_avg_nsec(u32 value, u32 count)
*/
static inline void cmf_activate(void *area, unsigned int onoff)
{
- register void * __gpr2 asm("2");
- register long __gpr1 asm("1");
-
- __gpr2 = area;
- __gpr1 = onoff;
/* activate channel measurement */
- asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
+ asm volatile(
+ " lgr 1,%[r1]\n"
+ " lgr 2,%[mbo]\n"
+ " schm\n"
+ :
+ : [r1] "d" ((unsigned long)onoff), [mbo] "d" (area)
+ : "1", "2");
}
static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 4c5244d6052b..180913007824 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -16,18 +16,19 @@
static inline int __stsch(struct subchannel_id schid, struct schib *addr)
{
- register struct subchannel_id reg1 asm ("1") = schid;
+ unsigned long r1 = *(unsigned int *)&schid;
int ccode = -EIO;
asm volatile(
- " stsch 0(%3)\n"
- "0: ipm %0\n"
- " srl %0,28\n"
+ " lgr 1,%[r1]\n"
+ " stsch %[addr]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : "+d" (ccode), "=m" (*addr)
- : "d" (reg1), "a" (addr)
- : "cc");
+ : [cc] "+&d" (ccode), [addr] "=Q" (*addr)
+ : [r1] "d" (r1)
+ : "cc", "1");
return ccode;
}
@@ -44,18 +45,19 @@ EXPORT_SYMBOL(stsch);
static inline int __msch(struct subchannel_id schid, struct schib *addr)
{
- register struct subchannel_id reg1 asm ("1") = schid;
+ unsigned long r1 = *(unsigned int *)&schid;
int ccode = -EIO;
asm volatile(
- " msch 0(%2)\n"
- "0: ipm %0\n"
- " srl %0,28\n"
+ " lgr 1,%[r1]\n"
+ " msch %[addr]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : "+d" (ccode)
- : "d" (reg1), "a" (addr), "m" (*addr)
- : "cc");
+ : [cc] "+&d" (ccode)
+ : [r1] "d" (r1), [addr] "Q" (*addr)
+ : "cc", "1");
return ccode;
}
@@ -71,16 +73,17 @@ int msch(struct subchannel_id schid, struct schib *addr)
static inline int __tsch(struct subchannel_id schid, struct irb *addr)
{
- register struct subchannel_id reg1 asm ("1") = schid;
+ unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
- " tsch 0(%3)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode), "=m" (*addr)
- : "d" (reg1), "a" (addr)
- : "cc");
+ " lgr 1,%[r1]\n"
+ " tsch %[addr]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28"
+ : [cc] "=&d" (ccode), [addr] "=Q" (*addr)
+ : [r1] "d" (r1)
+ : "cc", "1");
return ccode;
}
@@ -96,18 +99,19 @@ int tsch(struct subchannel_id schid, struct irb *addr)
static inline int __ssch(struct subchannel_id schid, union orb *addr)
{
- register struct subchannel_id reg1 asm("1") = schid;
+ unsigned long r1 = *(unsigned int *)&schid;
int ccode = -EIO;
asm volatile(
- " ssch 0(%2)\n"
- "0: ipm %0\n"
- " srl %0,28\n"
+ " lgr 1,%[r1]\n"
+ " ssch %[addr]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : "+d" (ccode)
- : "d" (reg1), "a" (addr), "m" (*addr)
- : "cc", "memory");
+ : [cc] "+&d" (ccode)
+ : [r1] "d" (r1), [addr] "Q" (*addr)
+ : "cc", "memory", "1");
return ccode;
}
@@ -124,16 +128,17 @@ EXPORT_SYMBOL(ssch);
static inline int __csch(struct subchannel_id schid)
{
- register struct subchannel_id reg1 asm("1") = schid;
+ unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
+ " lgr 1,%[r1]\n"
" csch\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (reg1)
- : "cc");
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (ccode)
+ : [r1] "d" (r1)
+ : "cc", "1");
return ccode;
}
@@ -153,11 +158,11 @@ int tpi(struct tpi_info *addr)
int ccode;
asm volatile(
- " tpi 0(%2)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode), "=m" (*addr)
- : "a" (addr)
+ " tpi %[addr]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28"
+ : [cc] "=&d" (ccode), [addr] "=Q" (*addr)
+ :
: "cc");
trace_s390_cio_tpi(addr, ccode);
@@ -170,13 +175,13 @@ int chsc(void *chsc_area)
int cc = -EIO;
asm volatile(
- " .insn rre,0xb25f0000,%2,0\n"
- "0: ipm %0\n"
- " srl %0,28\n"
+ " .insn rre,0xb25f0000,%[chsc_area],0\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : "+d" (cc), "=m" (*(addr_type *) chsc_area)
- : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
+ : [cc] "+&d" (cc), "+m" (*(addr_type *)chsc_area)
+ : [chsc_area] "d" (chsc_area)
: "cc");
trace_s390_cio_chsc(chsc_area, cc);
@@ -186,17 +191,17 @@ EXPORT_SYMBOL(chsc);
static inline int __rsch(struct subchannel_id schid)
{
- register struct subchannel_id reg1 asm("1") = schid;
+ unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
+ " lgr 1,%[r1]\n"
" rsch\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (reg1)
- : "cc", "memory");
-
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (ccode)
+ : [r1] "d" (r1)
+ : "cc", "memory", "1");
return ccode;
}
@@ -212,16 +217,17 @@ int rsch(struct subchannel_id schid)
static inline int __hsch(struct subchannel_id schid)
{
- register struct subchannel_id reg1 asm("1") = schid;
+ unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
+ " lgr 1,%[r1]\n"
" hsch\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (reg1)
- : "cc");
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (ccode)
+ : [r1] "d" (r1)
+ : "cc", "1");
return ccode;
}
@@ -238,16 +244,17 @@ EXPORT_SYMBOL(hsch);
static inline int __xsch(struct subchannel_id schid)
{
- register struct subchannel_id reg1 asm("1") = schid;
+ unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
+ " lgr 1,%[r1]\n"
" xsch\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (reg1)
- : "cc");
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (ccode)
+ : [r1] "d" (r1)
+ : "cc", "1");
return ccode;
}
@@ -266,11 +273,11 @@ static inline int __stcrw(struct crw *crw)
int ccode;
asm volatile(
- " stcrw 0(%2)\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (ccode), "=m" (*crw)
- : "a" (crw)
+ " stcrw %[crw]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (ccode), [crw] "=Q" (*crw)
+ :
: "cc");
return ccode;
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 0e0044d70844..f69ffbb8edc9 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -88,15 +88,15 @@ enum qdio_irq_states {
static inline int do_sqbs(u64 token, unsigned char state, int queue,
int *start, int *count)
{
- register unsigned long _ccq asm ("0") = *count;
- register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+ unsigned long _ccq = *count;
asm volatile(
- " .insn rsy,0xeb000000008A,%1,0,0(%2)"
- : "+d" (_ccq), "+d" (_queuestart)
- : "d" ((unsigned long)state), "d" (_token)
- : "memory", "cc");
+ " lgr 1,%[token]\n"
+ " .insn rsy,0xeb000000008a,%[qs],%[ccq],0(%[state])"
+ : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart)
+ : [state] "d" ((unsigned long)state), [token] "d" (token)
+ : "memory", "cc", "1");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
@@ -106,16 +106,17 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue,
static inline int do_eqbs(u64 token, unsigned char *state, int queue,
int *start, int *count, int ack)
{
- register unsigned long _ccq asm ("0") = *count;
- register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
unsigned long _state = (unsigned long)ack << 63;
+ unsigned long _ccq = *count;
asm volatile(
- " .insn rrf,0xB99c0000,%1,%2,0,0"
- : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
- : "d" (_token)
- : "memory", "cc");
+ " lgr 1,%[token]\n"
+ " .insn rrf,0xb99c0000,%[qs],%[state],%[ccq],0"
+ : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart),
+ [state] "+&d" (_state)
+ : [token] "d" (token)
+ : "memory", "cc", "1");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
*state = _state & 0xff;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 307ce7ff5ca4..3052fab00597 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -31,38 +31,41 @@ MODULE_DESCRIPTION("QDIO base support");
MODULE_LICENSE("GPL");
static inline int do_siga_sync(unsigned long schid,
- unsigned int out_mask, unsigned int in_mask,
+ unsigned long out_mask, unsigned long in_mask,
unsigned int fc)
{
- register unsigned long __fc asm ("0") = fc;
- register unsigned long __schid asm ("1") = schid;
- register unsigned long out asm ("2") = out_mask;
- register unsigned long in asm ("3") = in_mask;
int cc;
asm volatile(
+ " lgr 0,%[fc]\n"
+ " lgr 1,%[schid]\n"
+ " lgr 2,%[out]\n"
+ " lgr 3,%[in]\n"
" siga 0\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc)
- : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc)
+ : [fc] "d" (fc), [schid] "d" (schid),
+ [out] "d" (out_mask), [in] "d" (in_mask)
+ : "cc", "0", "1", "2", "3");
return cc;
}
-static inline int do_siga_input(unsigned long schid, unsigned int mask,
- unsigned int fc)
+static inline int do_siga_input(unsigned long schid, unsigned long mask,
+ unsigned long fc)
{
- register unsigned long __fc asm ("0") = fc;
- register unsigned long __schid asm ("1") = schid;
- register unsigned long __mask asm ("2") = mask;
int cc;
asm volatile(
+ " lgr 0,%[fc]\n"
+ " lgr 1,%[schid]\n"
+ " lgr 2,%[mask]\n"
" siga 0\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc)
- : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc)
+ : [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
+ : "cc", "0", "1", "2");
return cc;
}
@@ -78,23 +81,24 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
* Note: For IQDC unicast queues only the highest priority queue is processed.
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
- unsigned int *bb, unsigned int fc,
+ unsigned int *bb, unsigned long fc,
unsigned long aob)
{
- register unsigned long __fc asm("0") = fc;
- register unsigned long __schid asm("1") = schid;
- register unsigned long __mask asm("2") = mask;
- register unsigned long __aob asm("3") = aob;
int cc;
asm volatile(
+ " lgr 0,%[fc]\n"
+ " lgr 1,%[schid]\n"
+ " lgr 2,%[mask]\n"
+ " lgr 3,%[aob]\n"
" siga 0\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc), "+d" (__fc), "+d" (__aob)
- : "d" (__schid), "d" (__mask)
- : "cc");
- *bb = __fc >> 31;
+ " lgr %[fc],0\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), [fc] "+&d" (fc)
+ : [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
+ : "cc", "0", "1", "2", "3");
+ *bb = fc >> 31;
return cc;
}
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
index 4803139bce14..86993de25345 100644
--- a/drivers/s390/cio/trace.h
+++ b/drivers/s390/cio/trace.h
@@ -168,10 +168,8 @@ TRACE_EVENT(s390_cio_tpi,
memset(&__entry->tpi_info, 0, sizeof(struct tpi_info));
else if (addr)
__entry->tpi_info = *addr;
- else {
- memcpy(&__entry->tpi_info, &S390_lowcore.subchannel_id,
- sizeof(struct tpi_info));
- }
+ else
+ __entry->tpi_info = S390_lowcore.tpi_info;
__entry->cssid = __entry->tpi_info.schid.cssid;
__entry->ssid = __entry->tpi_info.schid.ssid;
__entry->schno = __entry->tpi_info.schid.sch_no;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 2758d05a802d..8d3a1d84a757 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright IBM Corp. 2006, 2020
+ * Copyright IBM Corp. 2006, 2021
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -61,6 +61,9 @@ static char *aqm_str;
module_param_named(aqmask, aqm_str, charp, 0440);
MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
+atomic_t ap_max_msg_size = ATOMIC_INIT(AP_DEFAULT_MAX_MSG_SIZE);
+EXPORT_SYMBOL(ap_max_msg_size);
+
static struct device *ap_root_device;
/* Hashtable of all queue devices on the AP bus */
@@ -77,6 +80,9 @@ EXPORT_SYMBOL(ap_perms_mutex);
/* # of bus scans since init */
static atomic64_t ap_scan_bus_count;
+/* # of bindings complete since init */
+static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
+
/* completion for initial APQN bindings complete */
static DECLARE_COMPLETION(ap_init_apqn_bindings_complete);
@@ -313,11 +319,24 @@ EXPORT_SYMBOL(ap_test_config_ctrl_domain);
* Returns true if TAPQ succeeded and the info is filled or
* false otherwise.
*/
-static bool ap_queue_info(ap_qid_t qid, int *q_type,
- unsigned int *q_fac, int *q_depth, bool *q_decfg)
+static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
+ int *q_depth, int *q_ml, bool *q_decfg)
{
struct ap_queue_status status;
- unsigned long info = 0;
+ union {
+ unsigned long value;
+ struct {
+ unsigned int fac : 32; /* facility bits */
+ unsigned int at : 8; /* ap type */
+ unsigned int _res1 : 8;
+ unsigned int _res2 : 4;
+ unsigned int ml : 4; /* apxl ml */
+ unsigned int _res3 : 4;
+ unsigned int qd : 4; /* queue depth */
+ } tapq_gr2;
+ } tapq_info;
+
+ tapq_info.value = 0;
/* make sure we don't run into a specifiation exception */
if (AP_QID_CARD(qid) > ap_max_adapter_id ||
@@ -325,7 +344,7 @@ static bool ap_queue_info(ap_qid_t qid, int *q_type,
return false;
/* call TAPQ on this APQN */
- status = ap_test_queue(qid, ap_apft_available(), &info);
+ status = ap_test_queue(qid, ap_apft_available(), &tapq_info.value);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
@@ -337,11 +356,12 @@ static bool ap_queue_info(ap_qid_t qid, int *q_type,
* info should be filled. All bits 0 is not possible as
* there is at least one of the mode bits set.
*/
- if (WARN_ON_ONCE(!info))
+ if (WARN_ON_ONCE(!tapq_info.value))
return false;
- *q_type = (int)((info >> 24) & 0xff);
- *q_fac = (unsigned int)(info >> 32);
- *q_depth = (int)(info & 0xff);
+ *q_type = tapq_info.tapq_gr2.at;
+ *q_fac = tapq_info.tapq_gr2.fac;
+ *q_depth = tapq_info.tapq_gr2.qd;
+ *q_ml = tapq_info.tapq_gr2.ml;
*q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
switch (*q_type) {
/* For CEX2 and CEX3 the available functions
@@ -584,22 +604,47 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
*/
static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- int rc;
+ int rc = 0;
struct ap_device *ap_dev = to_ap_dev(dev);
/* Uevents from ap bus core don't need extensions to the env */
if (dev == ap_root_device)
return 0;
- /* Set up DEV_TYPE environment variable. */
- rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
- if (rc)
- return rc;
+ if (is_card_dev(dev)) {
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
- /* Add MODALIAS= */
- rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
- if (rc)
- return rc;
+ /* Set up DEV_TYPE environment variable. */
+ rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
+ if (rc)
+ return rc;
+ /* Add MODALIAS= */
+ rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
+ if (rc)
+ return rc;
+
+ /* Add MODE=<accel|cca|ep11> */
+ if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL))
+ rc = add_uevent_var(env, "MODE=accel");
+ else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
+ rc = add_uevent_var(env, "MODE=cca");
+ else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
+ rc = add_uevent_var(env, "MODE=ep11");
+ if (rc)
+ return rc;
+ } else {
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+
+ /* Add MODE=<accel|cca|ep11> */
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL))
+ rc = add_uevent_var(env, "MODE=accel");
+ else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
+ rc = add_uevent_var(env, "MODE=cca");
+ else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
+ rc = add_uevent_var(env, "MODE=ep11");
+ if (rc)
+ return rc;
+ }
return 0;
}
@@ -613,11 +658,36 @@ static void ap_send_init_scan_done_uevent(void)
static void ap_send_bindings_complete_uevent(void)
{
- char *envp[] = { "BINDINGS=complete", NULL };
+ char buf[32];
+ char *envp[] = { "BINDINGS=complete", buf, NULL };
+ snprintf(buf, sizeof(buf), "COMPLETECOUNT=%llu",
+ atomic64_inc_return(&ap_bindings_complete_count));
kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
}
+void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg)
+{
+ char buf[16];
+ char *envp[] = { buf, NULL };
+
+ snprintf(buf, sizeof(buf), "CONFIG=%d", cfg ? 1 : 0);
+
+ kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(ap_send_config_uevent);
+
+void ap_send_online_uevent(struct ap_device *ap_dev, int online)
+{
+ char buf[16];
+ char *envp[] = { buf, NULL };
+
+ snprintf(buf, sizeof(buf), "ONLINE=%d", online ? 1 : 0);
+
+ kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(ap_send_online_uevent);
+
/*
* calc # of bound APQNs
*/
@@ -885,8 +955,6 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
struct device_driver *drv = &ap_drv->driver;
drv->bus = &ap_bus_type;
- drv->probe = ap_device_probe;
- drv->remove = ap_device_remove;
drv->owner = owner;
drv->name = name;
return driver_register(drv);
@@ -1319,6 +1387,8 @@ static struct bus_type ap_bus_type = {
.bus_groups = ap_bus_groups,
.match = &ap_bus_match,
.uevent = &ap_uevent,
+ .probe = ap_device_probe,
+ .remove = ap_device_remove,
};
/**
@@ -1463,7 +1533,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
unsigned int func;
struct device *dev;
struct ap_queue *aq;
- int rc, dom, depth, type;
+ int rc, dom, depth, type, ml;
/*
* Go through the configuration for the domains and compare them
@@ -1487,7 +1557,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
continue;
}
/* domain is valid, get info from this APQN */
- if (!ap_queue_info(qid, &type, &func, &depth, &decfg)) {
+ if (!ap_queue_info(qid, &type, &func, &depth, &ml, &decfg)) {
if (aq) {
AP_DBF_INFO(
"%s(%d,%d) ap_queue_info() not successful, rm queue device\n",
@@ -1540,6 +1610,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
spin_unlock_bh(&aq->lock);
AP_DBF_INFO("%s(%d,%d) queue device config off\n",
__func__, ac->id, dom);
+ ap_send_config_uevent(&aq->ap_dev, aq->config);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
goto put_dev_and_continue;
@@ -1554,6 +1625,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
spin_unlock_bh(&aq->lock);
AP_DBF_INFO("%s(%d,%d) queue device config on\n",
__func__, ac->id, dom);
+ ap_send_config_uevent(&aq->ap_dev, aq->config);
goto put_dev_and_continue;
}
/* handle other error states */
@@ -1584,7 +1656,7 @@ static inline void ap_scan_adapter(int ap)
unsigned int func;
struct device *dev;
struct ap_card *ac;
- int rc, dom, depth, type, comp_type;
+ int rc, dom, depth, type, comp_type, ml;
/* Is there currently a card device for this adapter ? */
dev = bus_find_device(&ap_bus_type, NULL,
@@ -1613,7 +1685,8 @@ static inline void ap_scan_adapter(int ap)
for (dom = 0; dom <= ap_max_domain_id; dom++)
if (ap_test_config_usage_domain(dom)) {
qid = AP_MKQID(ap, dom);
- if (ap_queue_info(qid, &type, &func, &depth, &decfg))
+ if (ap_queue_info(qid, &type, &func,
+ &depth, &ml, &decfg))
break;
}
if (dom > ap_max_domain_id) {
@@ -1663,12 +1736,13 @@ static inline void ap_scan_adapter(int ap)
ac->config = false;
AP_DBF_INFO("%s(%d) card device config off\n",
__func__, ap);
-
+ ap_send_config_uevent(&ac->ap_dev, ac->config);
}
if (!decfg && !ac->config) {
ac->config = true;
AP_DBF_INFO("%s(%d) card device config on\n",
__func__, ap);
+ ap_send_config_uevent(&ac->ap_dev, ac->config);
}
}
}
@@ -1681,7 +1755,7 @@ static inline void ap_scan_adapter(int ap)
__func__, ap, type);
return;
}
- ac = ap_card_create(ap, depth, type, comp_type, func);
+ ac = ap_card_create(ap, depth, type, comp_type, func, ml);
if (!ac) {
AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
__func__, ap);
@@ -1692,6 +1766,12 @@ static inline void ap_scan_adapter(int ap)
dev->bus = &ap_bus_type;
dev->parent = ap_root_device;
dev_set_name(dev, "card%02x", ap);
+ /* maybe enlarge ap_max_msg_size to support this card */
+ if (ac->maxmsgsize > atomic_read(&ap_max_msg_size)) {
+ atomic_set(&ap_max_msg_size, ac->maxmsgsize);
+ AP_DBF_INFO("%s(%d) ap_max_msg_size update to %d byte\n",
+ __func__, ap, atomic_read(&ap_max_msg_size));
+ }
/* Register the new card device with AP bus */
rc = device_register(dev);
if (rc) {
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 472efd3a755c..8f18abdbbc2b 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -25,8 +25,11 @@
#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
+#define AP_DEFAULT_MAX_MSG_SIZE (12 * 1024)
+#define AP_TAPQ_ML_FIELD_CHUNK_SIZE (4096)
extern int ap_domain_index;
+extern atomic_t ap_max_msg_size;
extern DECLARE_HASHTABLE(ap_queues, 8);
extern spinlock_t ap_queues_lock;
@@ -167,6 +170,7 @@ struct ap_card {
unsigned int functions; /* AP device function bitfield. */
int queue_depth; /* AP queue depth.*/
int id; /* AP card number. */
+ unsigned int maxmsgsize; /* AP msg limit for this card */
bool config; /* configured state */
atomic64_t total_request_count; /* # requests ever for this AP device.*/
};
@@ -228,7 +232,8 @@ struct ap_message {
struct list_head list; /* Request queueing. */
unsigned long long psmid; /* Message id. */
void *msg; /* Pointer to message buffer. */
- unsigned int len; /* Message length. */
+ unsigned int len; /* actual msg len in msg buffer */
+ unsigned int bufsize; /* allocated msg buffer size */
u16 flags; /* Flags, see AP_MSG_FLAG_xxx */
struct ap_fi fi; /* Failure Injection cmd */
int rc; /* Return code for this message */
@@ -290,8 +295,8 @@ void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_init_state(struct ap_queue *aq);
-struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
- int comp_device_type, unsigned int functions);
+struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
+ int comp_type, unsigned int functions, int ml);
struct ap_perms {
unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)];
@@ -362,4 +367,7 @@ int ap_parse_mask_str(const char *str,
*/
int ap_wait_init_apqn_bindings_complete(unsigned long timeout);
+void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg);
+void ap_send_online_uevent(struct ap_device *ap_dev, int online);
+
#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index d98bdd28d23e..196325a66662 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -167,11 +167,23 @@ static ssize_t config_store(struct device *dev,
ac->config = cfg ? true : false;
+ ap_send_config_uevent(&ac->ap_dev, ac->config);
+
return count;
}
static DEVICE_ATTR_RW(config);
+static ssize_t max_msg_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", ac->maxmsgsize);
+}
+
+static DEVICE_ATTR_RO(max_msg_size);
+
static struct attribute *ap_card_dev_attrs[] = {
&dev_attr_hwtype.attr,
&dev_attr_raw_hwtype.attr,
@@ -182,6 +194,7 @@ static struct attribute *ap_card_dev_attrs[] = {
&dev_attr_pendingq_count.attr,
&dev_attr_modalias.attr,
&dev_attr_config.attr,
+ &dev_attr_max_msg_size.attr,
NULL
};
@@ -207,7 +220,7 @@ static void ap_card_device_release(struct device *dev)
}
struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
- int comp_type, unsigned int functions)
+ int comp_type, unsigned int functions, int ml)
{
struct ap_card *ac;
@@ -221,5 +234,8 @@ struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
ac->queue_depth = queue_depth;
ac->functions = functions;
ac->id = id;
+ ac->maxmsgsize = ml > 0 ?
+ ml * AP_TAPQ_ML_FIELD_CHUNK_SIZE : AP_DEFAULT_MAX_MSG_SIZE;
+
return ac;
}
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 337353c9655e..669f96fddad6 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -101,7 +101,7 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
if (msg == NULL)
return -EINVAL;
- status = ap_dqap(qid, psmid, msg, length);
+ status = ap_dqap(qid, psmid, msg, length, NULL, NULL);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
return 0;
@@ -136,9 +136,24 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
struct ap_queue_status status;
struct ap_message *ap_msg;
bool found = false;
+ size_t reslen;
+ unsigned long resgr0 = 0;
+ int parts = 0;
+
+ /*
+ * DQAP loop until response code and resgr0 indicate that
+ * the msg is totally received. As we use the very same buffer
+ * the msg is overwritten with each invocation. That's intended
+ * and the receiver of the msg is informed with a msg rc code
+ * of EMSGSIZE in such a case.
+ */
+ do {
+ status = ap_dqap(aq->qid, &aq->reply->psmid,
+ aq->reply->msg, aq->reply->bufsize,
+ &reslen, &resgr0);
+ parts++;
+ } while (status.response_code == 0xFF && resgr0 != 0);
- status = ap_dqap(aq->qid, &aq->reply->psmid,
- aq->reply->msg, aq->reply->len);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
aq->queue_count = max_t(int, 0, aq->queue_count - 1);
@@ -150,7 +165,12 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
continue;
list_del_init(&ap_msg->list);
aq->pendingq_count--;
- ap_msg->receive(aq, ap_msg, aq->reply);
+ if (parts > 1) {
+ ap_msg->rc = -EMSGSIZE;
+ ap_msg->receive(aq, ap_msg, NULL);
+ } else {
+ ap_msg->receive(aq, ap_msg, aq->reply);
+ }
found = true;
break;
}
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 7dc72cb718b0..4d2556bc7fe5 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -22,8 +22,6 @@ MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
MODULE_LICENSE("GPL v2");
-static struct ap_driver vfio_ap_drv;
-
struct ap_matrix_dev *matrix_dev;
/* Only type 10 adapters (CEX4 and later) are supported
@@ -80,6 +78,12 @@ static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
mutex_unlock(&matrix_dev->lock);
}
+static struct ap_driver vfio_ap_drv = {
+ .probe = vfio_ap_queue_dev_probe,
+ .remove = vfio_ap_queue_dev_remove,
+ .ids = ap_queue_ids,
+};
+
static void vfio_ap_matrix_dev_release(struct device *dev)
{
struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev);
@@ -181,11 +185,6 @@ static int __init vfio_ap_init(void)
if (ret)
return ret;
- memset(&vfio_ap_drv, 0, sizeof(vfio_ap_drv));
- vfio_ap_drv.probe = vfio_ap_queue_dev_probe;
- vfio_ap_drv.remove = vfio_ap_queue_dev_remove;
- vfio_ap_drv.ids = ap_queue_ids;
-
ret = ap_driver_register(&vfio_ap_drv, THIS_MODULE, VFIO_AP_DRV_NAME);
if (ret) {
vfio_ap_matrix_dev_destroy();
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 52eaf51c9bb6..529ffe26ea9d 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -59,7 +59,6 @@ MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
DEFINE_SPINLOCK(zcrypt_list_lock);
LIST_HEAD(zcrypt_card_list);
-int zcrypt_device_count;
static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
@@ -901,6 +900,9 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
if (xcRB->user_defined != AUTOSELECT &&
xcRB->user_defined != zc->card->id)
continue;
+ /* check if request size exceeds card max msg size */
+ if (ap_msg.len > zc->card->maxmsgsize)
+ continue;
/* check if device node has admission for this card */
if (!zcrypt_check_card(perms, zc->card->id))
continue;
@@ -1069,6 +1071,9 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
if (targets &&
!is_desired_ep11_card(zc->card->id, target_num, targets))
continue;
+ /* check if request size exceeds card max msg size */
+ if (ap_msg.len > zc->card->maxmsgsize)
+ continue;
/* check if device node has admission for this card */
if (!zcrypt_check_card(perms, zc->card->id))
continue;
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 16219efb2f61..93e77e83ad14 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -124,7 +124,6 @@ struct zcrypt_queue {
extern atomic_t zcrypt_rescan_req;
extern spinlock_t zcrypt_list_lock;
-extern int zcrypt_device_count;
extern struct list_head zcrypt_card_list;
#define for_each_zcrypt_card(_zc) \
@@ -146,7 +145,7 @@ void zcrypt_queue_get(struct zcrypt_queue *);
int zcrypt_queue_put(struct zcrypt_queue *);
int zcrypt_queue_register(struct zcrypt_queue *);
void zcrypt_queue_unregister(struct zcrypt_queue *);
-void zcrypt_queue_force_online(struct zcrypt_queue *, int);
+bool zcrypt_queue_force_online(struct zcrypt_queue *zq, int online);
int zcrypt_rng_device_add(void);
void zcrypt_rng_device_remove(void);
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index 09fe6bb8880b..40fd5d37d26a 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -64,7 +64,8 @@ static ssize_t online_store(struct device *dev,
struct ap_card *ac = to_ap_card(dev);
struct zcrypt_card *zc = ac->private;
struct zcrypt_queue *zq;
- int online, id;
+ int online, id, i = 0, maxzqs = 0;
+ struct zcrypt_queue **zq_uelist = NULL;
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
@@ -77,10 +78,35 @@ static ssize_t online_store(struct device *dev,
ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online);
+ ap_send_online_uevent(&ac->ap_dev, online);
+
spin_lock(&zcrypt_list_lock);
+ /*
+ * As we are in atomic context here, directly sending uevents
+ * does not work. So collect the zqueues in a dynamic array
+ * and process them after zcrypt_list_lock release. As we get/put
+ * the zqueue objects, we make sure they exist after lock release.
+ */
+ list_for_each_entry(zq, &zc->zqueues, list)
+ maxzqs++;
+ if (maxzqs > 0)
+ zq_uelist = kcalloc(maxzqs + 1, sizeof(zq), GFP_ATOMIC);
list_for_each_entry(zq, &zc->zqueues, list)
- zcrypt_queue_force_online(zq, online);
+ if (zcrypt_queue_force_online(zq, online))
+ if (zq_uelist) {
+ zcrypt_queue_get(zq);
+ zq_uelist[i++] = zq;
+ }
spin_unlock(&zcrypt_list_lock);
+ if (zq_uelist) {
+ for (i = 0; zq_uelist[i]; i++) {
+ zq = zq_uelist[i];
+ ap_send_online_uevent(&zq->queue->ap_dev, online);
+ zcrypt_queue_put(zq);
+ }
+ kfree(zq_uelist);
+ }
+
return count;
}
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index d68c0ed5e0dd..bc34bedf9db8 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -295,7 +295,7 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb,
* Generate (random) CCA AES DATA secure key.
*/
int cca_genseckey(u16 cardnr, u16 domain,
- u32 keybitsize, u8 seckey[SECKEYBLOBSIZE])
+ u32 keybitsize, u8 *seckey)
{
int i, rc, keysize;
int seckeysize;
@@ -330,7 +330,7 @@ int cca_genseckey(u16 cardnr, u16 domain,
struct {
u16 toklen;
u16 tokattr;
- u8 tok[0];
+ u8 tok[];
/* ... some more data ... */
} keyblock;
} lv3;
@@ -438,7 +438,7 @@ EXPORT_SYMBOL(cca_genseckey);
* Generate an CCA AES DATA secure key with given key value.
*/
int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
- const u8 *clrkey, u8 seckey[SECKEYBLOBSIZE])
+ const u8 *clrkey, u8 *seckey)
{
int rc, keysize, seckeysize;
u8 *mem, *ptr;
@@ -471,7 +471,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
struct {
u16 toklen;
u16 tokattr;
- u8 tok[0];
+ u8 tok[];
/* ... some more data ... */
} keyblock;
} lv3;
@@ -577,8 +577,8 @@ EXPORT_SYMBOL(cca_clr2seckey);
* Derive proteced key from an CCA AES DATA secure key.
*/
int cca_sec2protkey(u16 cardnr, u16 domain,
- const u8 seckey[SECKEYBLOBSIZE],
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+ const u8 *seckey, u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype)
{
int rc;
u8 *mem, *ptr;
@@ -596,7 +596,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
u16 len;
u16 attr_len;
u16 attr_flags;
- u8 token[0]; /* cca secure key token */
+ u8 token[]; /* cca secure key token */
} lv2;
} __packed * preqparm;
struct uskrepparm {
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index e7105443d5cb..3513cd8ab9bc 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -171,8 +171,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
* Derive proteced key from an CCA AES DATA secure key.
*/
int cca_sec2protkey(u16 cardnr, u16 domain,
- const u8 seckey[SECKEYBLOBSIZE],
- u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+ const u8 *seckey, u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype);
/*
* Generate (random) CCA AES CIPHER secure key.
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index f4a6d3744241..f518b5fc7e5d 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -28,9 +28,6 @@
#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */
#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */
-#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
-#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
-
/* Waiting time for requests to be processed.
* Currently there are some types of request which are not deterministic.
* But the maximum time limit managed by the stomper code is set to 60sec.
@@ -605,19 +602,19 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
int rc;
if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) {
- zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE);
+ zq = zcrypt_queue_alloc(aq->card->maxmsgsize);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE50_NAME,
MSGTYPE50_VARIANT_DEFAULT);
} else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
- zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ zq = zcrypt_queue_alloc(aq->card->maxmsgsize);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_DEFAULT);
} else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
- zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ zq = zcrypt_queue_alloc(aq->card->maxmsgsize);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index bf14ee445f89..99405472824d 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -375,6 +375,7 @@ static int convert_type80(struct zcrypt_queue *zq,
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
t80h->code);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
@@ -412,6 +413,7 @@ static int convert_response_cex2a(struct zcrypt_queue *zq,
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int) rtype);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
}
@@ -440,11 +442,13 @@ static void zcrypt_cex2a_receive(struct ap_queue *aq,
goto out; /* ap_msg->rc indicates the error */
t80h = reply->msg;
if (t80h->type == TYPE80_RSP_CODE) {
- if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
- len = min_t(int, CEX2A_MAX_RESPONSE_SIZE, t80h->len);
- else
- len = min_t(int, CEX3A_MAX_RESPONSE_SIZE, t80h->len);
- memcpy(msg->msg, reply->msg, len);
+ len = t80h->len;
+ if (len > reply->bufsize || len > msg->bufsize) {
+ msg->rc = -EMSGSIZE;
+ } else {
+ memcpy(msg->msg, reply->msg, len);
+ msg->len = len;
+ }
} else
memcpy(msg->msg, reply->msg, sizeof(error_reply));
out:
@@ -467,10 +471,9 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
struct completion work;
int rc;
- if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
- ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
- else
- ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
+ ap_msg->bufsize = (zq->zcard->user_space_type == ZCRYPT_CEX2A) ?
+ MSGTYPE50_CRB2_MAX_MSG_SIZE : MSGTYPE50_CRB3_MAX_MSG_SIZE;
+ ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_cex2a_receive;
@@ -513,10 +516,9 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
struct completion work;
int rc;
- if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
- ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
- else
- ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
+ ap_msg->bufsize = (zq->zcard->user_space_type == ZCRYPT_CEX2A) ?
+ MSGTYPE50_CRB2_MAX_MSG_SIZE : MSGTYPE50_CRB3_MAX_MSG_SIZE;
+ ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_cex2a_receive;
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 307f90657d1d..752c6398fcd6 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -403,7 +403,7 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg,
} __packed * msg = ap_msg->msg;
int rcblen = CEIL4(xcRB->request_control_blk_length);
- int replylen, req_sumlen, resp_sumlen;
+ int req_sumlen, resp_sumlen;
char *req_data = ap_msg->msg + sizeof(struct type6_hdr) + rcblen;
char *function_code;
@@ -415,7 +415,7 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg,
ap_msg->len = sizeof(struct type6_hdr) +
CEIL4(xcRB->request_control_blk_length) +
xcRB->request_data_length;
- if (ap_msg->len > MSGTYPE06_MAX_MSG_SIZE)
+ if (ap_msg->len > ap_msg->bufsize)
return -EINVAL;
/*
@@ -435,12 +435,6 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg,
xcRB->reply_control_blk_length)
return -EINVAL; /* overflow after alignment*/
- replylen = sizeof(struct type86_fmt2_msg) +
- CEIL4(xcRB->reply_control_blk_length) +
- xcRB->reply_data_length;
- if (replylen > MSGTYPE06_MAX_MSG_SIZE)
- return -EINVAL;
-
/*
* Overflow check
* sum must be greater (or equal) than the largest operand
@@ -530,18 +524,13 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap
return -EINVAL; /* overflow after alignment*/
/* length checks */
- ap_msg->len = sizeof(struct type6_hdr) + xcRB->req_len;
- if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE -
- (sizeof(struct type6_hdr)))
+ ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcRB->req_len);
+ if (ap_msg->len > ap_msg->bufsize)
return -EINVAL;
if (CEIL4(xcRB->resp_len) < xcRB->resp_len)
return -EINVAL; /* overflow after alignment*/
- if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE -
- (sizeof(struct type86_fmt2_msg)))
- return -EINVAL;
-
/* prepare type6 header */
msg->hdr = static_type6_ep11_hdr;
msg->hdr.ToCardLen1 = xcRB->req_len;
@@ -675,6 +664,7 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int) service_rc, (int) service_rs);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
data = msg->text;
@@ -820,6 +810,7 @@ static int convert_response_ica(struct zcrypt_queue *zq,
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int) msg->hdr.type);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
}
@@ -854,6 +845,7 @@ static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq,
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int) msg->hdr.type);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
}
@@ -883,6 +875,7 @@ static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int) msg->hdr.type);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
}
@@ -913,6 +906,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int) msg->hdr.type);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
}
@@ -947,13 +941,21 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
switch (resp_type->type) {
case CEXXC_RESPONSE_TYPE_ICA:
len = sizeof(struct type86x_reply) + t86r->length - 2;
- len = min_t(int, CEXXC_MAX_ICA_RESPONSE_SIZE, len);
- memcpy(msg->msg, reply->msg, len);
+ if (len > reply->bufsize || len > msg->bufsize) {
+ msg->rc = -EMSGSIZE;
+ } else {
+ memcpy(msg->msg, reply->msg, len);
+ msg->len = len;
+ }
break;
case CEXXC_RESPONSE_TYPE_XCRB:
len = t86r->fmt2.offset2 + t86r->fmt2.count2;
- len = min_t(int, MSGTYPE06_MAX_MSG_SIZE, len);
- memcpy(msg->msg, reply->msg, len);
+ if (len > reply->bufsize || len > msg->bufsize) {
+ msg->rc = -EMSGSIZE;
+ } else {
+ memcpy(msg->msg, reply->msg, len);
+ msg->len = len;
+ }
break;
default:
memcpy(msg->msg, &error_reply, sizeof(error_reply));
@@ -994,8 +996,12 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
switch (resp_type->type) {
case CEXXC_RESPONSE_TYPE_EP11:
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
- len = min_t(int, MSGTYPE06_MAX_MSG_SIZE, len);
- memcpy(msg->msg, reply->msg, len);
+ if (len > reply->bufsize || len > msg->bufsize) {
+ msg->rc = -EMSGSIZE;
+ } else {
+ memcpy(msg->msg, reply->msg, len);
+ msg->len = len;
+ }
break;
default:
memcpy(msg->msg, &error_reply, sizeof(error_reply));
@@ -1028,6 +1034,7 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
+ ap_msg->bufsize = PAGE_SIZE;
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
@@ -1075,6 +1082,7 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
+ ap_msg->bufsize = PAGE_SIZE;
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
@@ -1119,7 +1127,8 @@ unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *xcRB,
.type = CEXXC_RESPONSE_TYPE_XCRB,
};
- ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ ap_msg->bufsize = atomic_read(&ap_max_msg_size);
+ ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
@@ -1176,7 +1185,8 @@ unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *xcrb,
.type = CEXXC_RESPONSE_TYPE_EP11,
};
- ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ ap_msg->bufsize = atomic_read(&ap_max_msg_size);
+ ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive_ep11;
@@ -1272,7 +1282,8 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
.type = CEXXC_RESPONSE_TYPE_XCRB,
};
- ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE;
+ ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 0a0bf074206b..155c73514bac 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -19,8 +19,6 @@
#define MSGTYPE06_VARIANT_NORNG 1
#define MSGTYPE06_VARIANT_EP11 2
-#define MSGTYPE06_MAX_MSG_SIZE (12*1024)
-
/**
* The type 6 message family is associated with CEXxC/CEXxP cards.
*
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index c3ffbd26b73f..20f12288a8c1 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -70,6 +70,8 @@ static ssize_t online_store(struct device *dev,
AP_QID_QUEUE(zq->queue->qid),
online);
+ ap_send_online_uevent(&aq->ap_dev, online);
+
if (!online)
ap_flush_queue(zq->queue);
return count;
@@ -98,24 +100,28 @@ static const struct attribute_group zcrypt_queue_attr_group = {
.attrs = zcrypt_queue_attrs,
};
-void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online)
+bool zcrypt_queue_force_online(struct zcrypt_queue *zq, int online)
{
- zq->online = online;
- if (!online)
- ap_flush_queue(zq->queue);
+ if (!!zq->online != !!online) {
+ zq->online = online;
+ if (!online)
+ ap_flush_queue(zq->queue);
+ return true;
+ }
+ return false;
}
-struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size)
+struct zcrypt_queue *zcrypt_queue_alloc(size_t reply_buf_size)
{
struct zcrypt_queue *zq;
zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
if (!zq)
return NULL;
- zq->reply.msg = kmalloc(max_response_size, GFP_KERNEL);
+ zq->reply.msg = kmalloc(reply_buf_size, GFP_KERNEL);
if (!zq->reply.msg)
goto out_free;
- zq->reply.len = max_response_size;
+ zq->reply.bufsize = reply_buf_size;
INIT_LIST_HEAD(&zq->list);
kref_init(&zq->refcount);
return zq;
@@ -173,7 +179,6 @@ int zcrypt_queue_register(struct zcrypt_queue *zq)
AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
list_add_tail(&zq->list, &zc->zqueues);
- zcrypt_device_count++;
spin_unlock(&zcrypt_list_lock);
rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj,
@@ -216,7 +221,6 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq)
zc = zq->zcard;
spin_lock(&zcrypt_list_lock);
list_del_init(&zq->list);
- zcrypt_device_count--;
spin_unlock(&zcrypt_list_lock);
if (zq->ops->rng)
zcrypt_rng_device_remove();
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index b341075397d9..377e3689d1d4 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1454,6 +1454,7 @@ again:
get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0)
ctcm_ccw_check_rc(ch, rc, "normal RX");
+ break;
default:
break;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2abf86c104d5..d7cdd9cfe485 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -279,7 +279,7 @@ static void qeth_l2_set_pnso_mode(struct qeth_card *card,
static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
QETH_CARD_TEXT(card, 2, "fdbflush");
@@ -679,7 +679,7 @@ static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
struct net_if_token *token,
struct mac_addr_lnid *addr_lnid)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
u8 ntfy_mac[ETH_ALEN];
ether_addr_copy(ntfy_mac, addr_lnid->mac);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d308ff744a29..f0d6f205c53c 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -434,6 +434,7 @@ static int qeth_l3_correct_routing_type(struct qeth_card *card,
if (qeth_is_ipafunc_supported(card, prot,
IPA_OSA_MC_ROUTER))
return 0;
+ goto out_inval;
default:
goto out_inval;
}
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index d58bf79892f2..9da9b2b2a580 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -856,10 +856,7 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
*/
void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
{
- scsi_build_sense_buffer(1, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x10, ascq);
- set_driver_byte(scmd, DRIVER_SENSE);
- scmd->result |= SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 1, ILLEGAL_REQUEST, 0x10, ascq);
set_host_byte(scmd, DID_SOFT_ERROR);
}
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 544efd4c42f0..b8cd75a872ee 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -487,6 +487,7 @@ static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev,
if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
0 == (status & ZFCP_STATUS_PORT_PHYS_OPEN) ||
+ 0 != (status & ZFCP_STATUS_PORT_LINK_TEST) ||
0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
i = sprintf(buf, "unknown\n");
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 54e686dca6de..d35e7a3f7067 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -388,31 +388,6 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
}
-static inline long __do_kvm_notify(struct subchannel_id schid,
- unsigned long queue_index,
- long cookie)
-{
- register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
- register struct subchannel_id __schid asm("2") = schid;
- register unsigned long __index asm("3") = queue_index;
- register long __rc asm("2");
- register long __cookie asm("4") = cookie;
-
- asm volatile ("diag 2,4,0x500\n"
- : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
- "d"(__cookie)
- : "memory", "cc");
- return __rc;
-}
-
-static inline long do_kvm_notify(struct subchannel_id schid,
- unsigned long queue_index,
- long cookie)
-{
- diag_stat_inc(DIAG_STAT_X500);
- return __do_kvm_notify(schid, queue_index, cookie);
-}
-
static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
{
struct virtio_ccw_vq_info *info = vq->priv;
@@ -421,7 +396,10 @@ static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
vcdev = to_vc_device(info->vq->vdev);
ccw_device_get_schid(vcdev->cdev, &schid);
- info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
+ BUILD_BUG_ON(sizeof(struct subchannel_id) != sizeof(unsigned int));
+ info->cookie = kvm_hypercall3(KVM_S390_VIRTIO_CCW_NOTIFY,
+ *((unsigned int *)&schid),
+ vq->index, info->cookie);
if (info->cookie < 0)
return false;
return true;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 47028f5e57ab..e41cc354cc8a 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -303,10 +303,10 @@ static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
/* Initialize sglist */
memset(&sglist, 0, sizeof(TW_SG_Entry));
- sglist[0].length = TW_SECTOR_SIZE;
- sglist[0].address = tw_dev->generic_buffer_phys[request_id];
+ sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
+ sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
- if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
+ if (tw_dev->generic_buffer_phys[request_id] & TW_ALIGNMENT_9000_SGL) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
goto out;
}
@@ -440,8 +440,8 @@ static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
/* Initialize sglist */
memset(&sglist, 0, sizeof(TW_SG_Entry));
- sglist[0].length = TW_SECTOR_SIZE;
- sglist[0].address = tw_dev->generic_buffer_phys[request_id];
+ sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
+ sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
/* Mark internal command */
tw_dev->srb[request_id] = NULL;
@@ -501,9 +501,8 @@ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
Sunday 12:00AM */
local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
- schedulertime = cpu_to_le32(schedulertime % 604800);
- memcpy(param->data, &schedulertime, sizeof(u32));
+ memcpy(param->data, &(__le32){cpu_to_le32(schedulertime)}, sizeof(__le32));
/* Mark internal command */
tw_dev->srb[request_id] = NULL;
@@ -676,7 +675,9 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
/* Now allocate ioctl buf memory */
- cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
+ &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
retval = TW_IOCTL_ERROR_OS_ENOMEM;
goto out2;
@@ -685,7 +686,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
/* Now copy down the entire ioctl */
- if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
+ if (copy_from_user(tw_ioctl, argp, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length))
goto out3;
/* See which ioctl we are doing */
@@ -867,11 +868,13 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
}
/* Now copy the entire response to userspace */
- if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0)
retval = 0;
out3:
/* Now free ioctl buf memory */
- dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
+ cpu_addr, dma_handle);
out2:
mutex_unlock(&tw_dev->ioctl_lock);
out:
@@ -1000,19 +1003,13 @@ static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_
if (print_host)
printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
tw_dev->host->host_no,
- TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
- full_command_packet->header.status_block.error,
- error_str[0] == '\0' ?
- twa_string_lookup(twa_error_table,
- full_command_packet->header.status_block.error) : error_str,
+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
+ error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
full_command_packet->header.err_specific_desc);
else
printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
- TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
- full_command_packet->header.status_block.error,
- error_str[0] == '\0' ?
- twa_string_lookup(twa_error_table,
- full_command_packet->header.status_block.error) : error_str,
+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
+ error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
full_command_packet->header.err_specific_desc);
}
@@ -1129,12 +1126,11 @@ static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
tw_initconnect->request_id = request_id;
tw_initconnect->message_credits = cpu_to_le16(message_credits);
- tw_initconnect->features = set_features;
/* Turn on 64-bit sgl support if we need to */
- tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
+ set_features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
- tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
+ tw_initconnect->features = cpu_to_le32(set_features);
if (set_features & TW_EXTENDED_INIT_CONNECT) {
tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
@@ -1342,13 +1338,15 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
/* If error, command failed */
if (error == 1) {
/* Ask for a host reset */
- cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ cmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
}
/* Report residual bytes for single sgl */
if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
- if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
- scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
+ u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length);
+
+ if (length < scsi_bufflen(cmd))
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - length);
}
/* Now complete the io */
@@ -1390,13 +1388,13 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
newcommand = &full_command_packet->command.newcommand;
newcommand->request_id__lunl =
- cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
+ TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id);
if (length) {
- newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
newcommand->sg_list[0].length = cpu_to_le32(length);
}
newcommand->sgl_entries__lunh =
- cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
+ TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0);
} else {
oldcommand = &full_command_packet->command.oldcommand;
oldcommand->request_id = request_id;
@@ -1407,7 +1405,7 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
else
sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
- sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
sgl->length = cpu_to_le32(length);
oldcommand->size += pae;
@@ -1831,10 +1829,10 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
if (srb) {
command_packet->unit = srb->device->id;
command_packet->request_id__lunl =
- cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
+ TW_REQ_LUN_IN(srb->device->lun, request_id);
} else {
command_packet->request_id__lunl =
- cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
+ TW_REQ_LUN_IN(0, request_id);
command_packet->unit = 0;
}
@@ -1866,19 +1864,19 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
}
}
}
- command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
+ command_packet->sgl_entries__lunh = TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]));
}
} else {
/* Internal cdb post */
for (i = 0; i < use_sg; i++) {
- command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
- command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
+ command_packet->sg_list[i].address = sglistarg[i].address;
+ command_packet->sg_list[i].length = sglistarg[i].length;
if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
goto out;
}
}
- command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
+ command_packet->sgl_entries__lunh = TW_REQ_LUN_IN(0, use_sg);
}
if (srb) {
@@ -2103,7 +2101,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
(char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
- le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
+ le32_to_cpu(*(__le32 *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
/* Try to enable MSI */
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index d3f479324527..0b23b0422e88 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -50,7 +50,7 @@
/* AEN string type */
typedef struct TAG_twa_message_type {
unsigned int code;
- char* text;
+ char *text;
} twa_message_type;
/* AEN strings */
@@ -435,8 +435,8 @@ static twa_message_type twa_error_table[] = {
/* request_id: 12, lun: 4 */
#define TW_REQ_LUN_IN(lun, request_id) \
- (((lun << 12) & 0xf000) | (request_id & 0xfff))
-#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
+ cpu_to_le16(((lun << 12) & 0xf000) | (request_id & 0xfff))
+#define TW_LUN_OUT(lun) ((le16_to_cpu(lun) >> 12) & 0xf)
/* Macros */
#define TW_CONTROL_REG_ADDR(x) (x->base_addr)
@@ -483,70 +483,75 @@ printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
#define TW_APACHE_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 72 : 109)
#define TW_ESCALADE_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 41 : 62)
#define TW_PADDING_LENGTH (sizeof(dma_addr_t) > 4 ? 8 : 0)
-#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
-#pragma pack(1)
+#if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+typedef __le64 twa_addr_t;
+#define TW_CPU_TO_SGL(x) cpu_to_le64(x)
+#else
+typedef __le32 twa_addr_t;
+#define TW_CPU_TO_SGL(x) cpu_to_le32(x)
+#endif
/* Scatter Gather List Entry */
typedef struct TAG_TW_SG_Entry {
- dma_addr_t address;
- u32 length;
-} TW_SG_Entry;
+ twa_addr_t address;
+ __le32 length;
+} __packed TW_SG_Entry;
/* Command Packet */
typedef struct TW_Command {
- unsigned char opcode__sgloffset;
- unsigned char size;
- unsigned char request_id;
- unsigned char unit__hostid;
+ u8 opcode__sgloffset;
+ u8 size;
+ u8 request_id;
+ u8 unit__hostid;
/* Second DWORD */
- unsigned char status;
- unsigned char flags;
+ u8 status;
+ u8 flags;
union {
- unsigned short block_count;
- unsigned short parameter_count;
+ __le16 block_count;
+ __le16 parameter_count;
} byte6_offset;
union {
struct {
- u32 lba;
- TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
- dma_addr_t padding;
+ __le32 lba;
+ TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
+ twa_addr_t padding;
} io;
struct {
- TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
- u32 padding;
- dma_addr_t padding2;
+ TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
+ __le32 padding;
+ twa_addr_t padding2;
} param;
} byte8_offset;
} TW_Command;
/* Command Packet for 9000+ controllers */
typedef struct TAG_TW_Command_Apache {
- unsigned char opcode__reserved;
- unsigned char unit;
- unsigned short request_id__lunl;
- unsigned char status;
- unsigned char sgl_offset;
- unsigned short sgl_entries__lunh;
- unsigned char cdb[16];
- TW_SG_Entry sg_list[TW_APACHE_MAX_SGL_LENGTH];
- unsigned char padding[TW_PADDING_LENGTH];
+ u8 opcode__reserved;
+ u8 unit;
+ __le16 request_id__lunl;
+ u8 status;
+ u8 sgl_offset;
+ __le16 sgl_entries__lunh;
+ u8 cdb[16];
+ TW_SG_Entry sg_list[TW_APACHE_MAX_SGL_LENGTH];
+ u8 padding[TW_PADDING_LENGTH];
} TW_Command_Apache;
/* New command packet header */
typedef struct TAG_TW_Command_Apache_Header {
unsigned char sense_data[TW_SENSE_DATA_LENGTH];
struct {
- char reserved[4];
- unsigned short error;
- unsigned char padding;
- unsigned char severity__reserved;
+ u8 reserved[4];
+ __le16 error;
+ u8 padding;
+ u8 severity__reserved;
} status_block;
unsigned char err_specific_desc[98];
struct {
- unsigned char size_header;
- unsigned short reserved;
- unsigned char size_sense;
+ u8 size_header;
+ u8 reserved[2];
+ u8 size_sense;
} header_desc;
} TW_Command_Apache_Header;
@@ -561,19 +566,19 @@ typedef struct TAG_TW_Command_Full {
/* Initconnection structure */
typedef struct TAG_TW_Initconnect {
- unsigned char opcode__reserved;
- unsigned char size;
- unsigned char request_id;
- unsigned char res2;
- unsigned char status;
- unsigned char flags;
- unsigned short message_credits;
- u32 features;
- unsigned short fw_srl;
- unsigned short fw_arch_id;
- unsigned short fw_branch;
- unsigned short fw_build;
- u32 result;
+ u8 opcode__reserved;
+ u8 size;
+ u8 request_id;
+ u8 res2;
+ u8 status;
+ u8 flags;
+ __le16 message_credits;
+ __le32 features;
+ __le16 fw_srl;
+ __le16 fw_arch_id;
+ __le16 fw_branch;
+ __le16 fw_build;
+ __le32 result;
} TW_Initconnect;
/* Event info structure */
@@ -602,7 +607,7 @@ typedef struct TAG_TW_Ioctl_Apache {
TW_Ioctl_Driver_Command driver_command;
char padding[488];
TW_Command_Full firmware_command;
- char data_buffer[1];
+ char data_buffer[];
} TW_Ioctl_Buf_Apache;
/* Lock structure for ioctl get/release lock */
@@ -614,11 +619,11 @@ typedef struct TAG_TW_Lock {
/* GetParam descriptor */
typedef struct {
- unsigned short table_id;
- unsigned short parameter_id;
- unsigned short parameter_size_bytes;
- unsigned short actual_parameter_size_bytes;
- unsigned char data[1];
+ __le16 table_id;
+ __le16 parameter_id;
+ __le16 parameter_size_bytes;
+ __le16 actual_parameter_size_bytes;
+ u8 data[];
} TW_Param_Apache, *PTW_Param_Apache;
/* Response queue */
@@ -645,8 +650,6 @@ typedef struct TAG_TW_Compatibility_Info
unsigned short fw_on_ctlr_build;
} TW_Compatibility_Info;
-#pragma pack()
-
typedef struct TAG_TW_Device_Extension {
u32 __iomem *base_addr;
unsigned long *generic_buffer_virt[TW_Q_LENGTH];
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index a7292883b72b..4ee485ab2714 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -429,7 +429,7 @@ static int tw_decode_sense(TW_Device_Extension *tw_dev, int request_id, int fill
/* Additional sense code qualifier */
tw_dev->srb[request_id]->sense_buffer[13] = tw_sense_table[i][3];
- tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ tw_dev->srb[request_id]->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
return TW_ISR_DONT_RESULT; /* Special case for isr to not over-write result */
}
}
@@ -1977,7 +1977,7 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
printk(KERN_NOTICE "3w-xxxx: scsi%d: Unknown scsi opcode: 0x%x\n", tw_dev->host->host_no, *command);
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
- scsi_build_sense_buffer(1, SCpnt->sense_buffer, ILLEGAL_REQUEST, 0x20, 0);
+ scsi_build_sense(SCpnt, 1, ILLEGAL_REQUEST, 0x20, 0);
done(SCpnt);
retval = 0;
}
@@ -2159,7 +2159,7 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
/* If error, command failed */
if (error == 1) {
/* Ask for a host reset */
- tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ tw_dev->srb[request_id]->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
}
/* Now complete the io */
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 77ccb96e5ed4..1c6b4e672687 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -978,10 +978,10 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
NCR_700_set_tag_neg_state(SCp->device,
NCR_700_FINISHED_TAG_NEGOTIATION);
-
+
/* check for contingent allegiance conditions */
- if (hostdata->status[0] >> 1 == CHECK_CONDITION ||
- hostdata->status[0] >> 1 == COMMAND_TERMINATED) {
+ if (hostdata->status[0] == SAM_STAT_CHECK_CONDITION ||
+ hostdata->status[0] == SAM_STAT_COMMAND_TERMINATED) {
struct NCR_700_command_slot *slot =
(struct NCR_700_command_slot *)SCp->host_scribble;
if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 0464e37c806a..90253208a72f 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -40,7 +40,7 @@ struct sccb_mgr_info {
u16 si_per_targ_ultra_nego;
u16 si_per_targ_no_disc;
u16 si_per_targ_wide_nego;
- u16 si_flags;
+ u16 si_mflags;
unsigned char si_card_family;
unsigned char si_bustype;
unsigned char si_card_model[3];
@@ -304,40 +304,12 @@ typedef struct SCCBscam_info {
} SCCBSCAM_INFO;
-#define SCSI_REQUEST_SENSE 0x03
-#define SCSI_READ 0x08
-#define SCSI_WRITE 0x0A
-#define SCSI_START_STOP_UNIT 0x1B
-#define SCSI_READ_EXTENDED 0x28
-#define SCSI_WRITE_EXTENDED 0x2A
-#define SCSI_WRITE_AND_VERIFY 0x2E
-
-#define SSGOOD 0x00
-#define SSCHECK 0x02
-#define SSQ_FULL 0x28
-
-#define SMCMD_COMP 0x00
-#define SMEXT 0x01
-#define SMSAVE_DATA_PTR 0x02
-#define SMREST_DATA_PTR 0x03
-#define SMDISC 0x04
-#define SMABORT 0x06
-#define SMREJECT 0x07
-#define SMNO_OP 0x08
-#define SMPARITY 0x09
-#define SMDEV_RESET 0x0C
-#define SMABORT_TAG 0x0D
-#define SMINIT_RECOVERY 0x0F
-#define SMREL_RECOVERY 0x10
#define SMIDENT 0x80
#define DISC_PRIV 0x40
-#define SMSYNC 0x01
-#define SMWDTR 0x03
#define SM8BIT 0x00
#define SM16BIT 0x01
-#define SMIGNORWR 0x23 /* Ignore Wide Residue */
#define SIX_BYTE_CMD 0x06
#define TWELVE_BYTE_CMD 0x0C
@@ -1073,22 +1045,22 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
ScamFlg =
(unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2);
- pCardInfo->si_flags = 0x0000;
+ pCardInfo->si_mflags = 0x0000;
if (i & 0x01)
- pCardInfo->si_flags |= SCSI_PARITY_ENA;
+ pCardInfo->si_mflags |= SCSI_PARITY_ENA;
if (!(i & 0x02))
- pCardInfo->si_flags |= SOFT_RESET;
+ pCardInfo->si_mflags |= SOFT_RESET;
if (i & 0x10)
- pCardInfo->si_flags |= EXTENDED_TRANSLATION;
+ pCardInfo->si_mflags |= EXTENDED_TRANSLATION;
if (ScamFlg & SCAM_ENABLED)
- pCardInfo->si_flags |= FLAG_SCAM_ENABLED;
+ pCardInfo->si_mflags |= FLAG_SCAM_ENABLED;
if (ScamFlg & SCAM_LEVEL2)
- pCardInfo->si_flags |= FLAG_SCAM_LEVEL2;
+ pCardInfo->si_mflags |= FLAG_SCAM_LEVEL2;
j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
if (i & 0x04) {
@@ -1104,7 +1076,7 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD))
- pCardInfo->si_flags |= SUPPORT_16TAR_32LUN;
+ pCardInfo->si_mflags |= SUPPORT_16TAR_32LUN;
pCardInfo->si_card_family = HARPOON_FAMILY;
pCardInfo->si_bustype = BUSTYPE_PCI;
@@ -1140,15 +1112,15 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
if (pCardInfo->si_card_model[1] == '3') {
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
- pCardInfo->si_flags |= LOW_BYTE_TERM;
+ pCardInfo->si_mflags |= LOW_BYTE_TERM;
} else if (pCardInfo->si_card_model[2] == '0') {
temp = RD_HARPOON(ioport + hp_xfer_pad);
WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4)));
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
- pCardInfo->si_flags |= LOW_BYTE_TERM;
+ pCardInfo->si_mflags |= LOW_BYTE_TERM;
WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4)));
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
- pCardInfo->si_flags |= HIGH_BYTE_TERM;
+ pCardInfo->si_mflags |= HIGH_BYTE_TERM;
WR_HARPOON(ioport + hp_xfer_pad, temp);
} else {
temp = RD_HARPOON(ioport + hp_ee_ctrl);
@@ -1166,9 +1138,9 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
WR_HARPOON(ioport + hp_ee_ctrl, temp);
WR_HARPOON(ioport + hp_xfer_pad, temp2);
if (!(temp3 & BIT(7)))
- pCardInfo->si_flags |= LOW_BYTE_TERM;
+ pCardInfo->si_mflags |= LOW_BYTE_TERM;
if (!(temp3 & BIT(6)))
- pCardInfo->si_flags |= HIGH_BYTE_TERM;
+ pCardInfo->si_mflags |= HIGH_BYTE_TERM;
}
ARAM_ACCESS(ioport);
@@ -1275,7 +1247,7 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id);
CurrCard->ourId = pCardInfo->si_id;
- i = (unsigned char)pCardInfo->si_flags;
+ i = (unsigned char)pCardInfo->si_mflags;
if (i & SCSI_PARITY_ENA)
WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P));
@@ -1289,14 +1261,14 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
j |= SCSI_TERM_ENA_H;
WR_HARPOON(ioport + hp_ee_ctrl, j);
- if (!(pCardInfo->si_flags & SOFT_RESET)) {
+ if (!(pCardInfo->si_mflags & SOFT_RESET)) {
FPT_sresb(ioport, thisCard);
FPT_scini(thisCard, pCardInfo->si_id, 0);
}
- if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS)
+ if (pCardInfo->si_mflags & POST_ALL_UNDERRRUNS)
CurrCard->globalFlags |= F_NO_FILTER;
if (pCurrNvRam) {
@@ -1660,7 +1632,7 @@ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
p_Sccb->Sccb_scsistat =
ABORT_ST;
p_Sccb->Sccb_scsimsg =
- SMABORT_TAG;
+ ABORT_TASK;
if (((struct sccb_card *)
pCurrCard)->currentSCCB ==
@@ -1812,7 +1784,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
FPT_phaseChkFifo(ioport, thisCard);
if (RD_HARPOON(ioport + hp_gp_reg_1) ==
- SMSAVE_DATA_PTR) {
+ SAVE_POINTERS) {
WR_HARPOON(ioport + hp_gp_reg_1, 0x00);
currSCCB->Sccb_XferState |= F_NO_DATA_YET;
@@ -1865,7 +1837,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
FPT_phaseChkFifo(ioport, thisCard);
if (RD_HARPOON(ioport + hp_gp_reg_1) ==
- SMSAVE_DATA_PTR) {
+ SAVE_POINTERS) {
WR_HARPOON(ioport + hp_gp_reg_1, 0x00);
currSCCB->Sccb_XferState |=
F_NO_DATA_YET;
@@ -2258,7 +2230,7 @@ static unsigned char FPT_sfm(u32 port, struct sccb *pCurrSCCB)
WR_HARPOON(port + hp_fiforead, 0);
WR_HARPOON(port + hp_fifowrite, 0);
if (pCurrSCCB != NULL) {
- pCurrSCCB->Sccb_scsimsg = SMPARITY;
+ pCurrSCCB->Sccb_scsimsg = MSG_PARITY_ERROR;
}
message = 0x00;
do {
@@ -2411,7 +2383,7 @@ static void FPT_ssel(u32 port, unsigned char p_card)
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + NP);
- currSCCB->Sccb_scsimsg = SMDEV_RESET;
+ currSCCB->Sccb_scsimsg = TARGET_RESET;
WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT));
auto_loaded = 1;
@@ -2758,9 +2730,9 @@ static void FPT_sres(u32 port, unsigned char p_card,
if (message == 0) {
msgRetryCount++;
if (msgRetryCount == 1) {
- FPT_SendMsg(port, SMPARITY);
+ FPT_SendMsg(port, MSG_PARITY_ERROR);
} else {
- FPT_SendMsg(port, SMDEV_RESET);
+ FPT_SendMsg(port, TARGET_RESET);
FPT_sssyncv(port, our_target, NARROW_SCSI,
currTar_Info);
@@ -2860,8 +2832,8 @@ static void FPT_SendMsg(u32 port, unsigned char message)
WR_HARPOON(port + hp_portctrl_0, 0x00);
- if ((message == SMABORT) || (message == SMDEV_RESET) ||
- (message == SMABORT_TAG)) {
+ if ((message == ABORT_TASK_SET) || (message == TARGET_RESET) ||
+ (message == ABORT_TASK)) {
while (!
(RDW_HARPOON((port + hp_intstat)) &
(BUS_FREE | PHASE))) {
@@ -2893,7 +2865,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
- if (message == SMREST_DATA_PTR) {
+ if (message == RESTORE_POINTERS) {
if (!(currSCCB->Sccb_XferState & F_NO_DATA_YET)) {
currSCCB->Sccb_ATC = currSCCB->Sccb_savedATC;
@@ -2905,7 +2877,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
(AUTO_IMMED + DISCONNECT_START));
}
- else if (message == SMCMD_COMP) {
+ else if (message == COMMAND_COMPLETE) {
if (currSCCB->Sccb_scsistat == SELECT_Q_ST) {
currTar_Info->TarStatus &=
@@ -2917,15 +2889,16 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
}
- else if ((message == SMNO_OP) || (message >= SMIDENT)
- || (message == SMINIT_RECOVERY) || (message == SMREL_RECOVERY)) {
+ else if ((message == NOP) || (message >= IDENTIFY_BASE) ||
+ (message == INITIATE_RECOVERY) ||
+ (message == RELEASE_RECOVERY)) {
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
- else if (message == SMREJECT) {
+ else if (message == MESSAGE_REJECT) {
if ((currSCCB->Sccb_scsistat == SELECT_SN_ST) ||
(currSCCB->Sccb_scsistat == SELECT_WN_ST) ||
@@ -3026,19 +2999,19 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
}
}
- else if (message == SMEXT) {
+ else if (message == EXTENDED_MESSAGE) {
ACCEPT_MSG(port);
FPT_shandem(port, p_card, currSCCB);
}
- else if (message == SMIGNORWR) {
+ else if (message == IGNORE_WIDE_RESIDUE) {
ACCEPT_MSG(port); /* ACK the RESIDUE MSG */
message = FPT_sfm(port, currSCCB);
- if (currSCCB->Sccb_scsimsg != SMPARITY)
+ if (currSCCB->Sccb_scsimsg != MSG_PARITY_ERROR)
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
@@ -3047,7 +3020,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
else {
currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
- currSCCB->Sccb_scsimsg = SMREJECT;
+ currSCCB->Sccb_scsimsg = MESSAGE_REJECT;
ACCEPT_MSG_ATN(port);
WR_HARPOON(port + hp_autostart_1,
@@ -3073,7 +3046,7 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
message = FPT_sfm(port, pCurrSCCB);
if (message) {
- if (message == SMSYNC) {
+ if (message == EXTENDED_SDTR) {
if (length == 0x03) {
@@ -3081,10 +3054,10 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
FPT_stsyncn(port, p_card);
} else {
- pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT;
ACCEPT_MSG_ATN(port);
}
- } else if (message == SMWDTR) {
+ } else if (message == EXTENDED_WDTR) {
if (length == 0x02) {
@@ -3092,7 +3065,7 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
FPT_stwidn(port, p_card);
} else {
- pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT;
ACCEPT_MSG_ATN(port);
WR_HARPOON(port + hp_autostart_1,
@@ -3101,20 +3074,20 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
}
} else {
- pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT;
ACCEPT_MSG_ATN(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
} else {
- if (pCurrSCCB->Sccb_scsimsg != SMPARITY)
+ if (pCurrSCCB->Sccb_scsimsg != MSG_PARITY_ERROR)
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
} else {
- if (pCurrSCCB->Sccb_scsimsg == SMPARITY)
+ if (pCurrSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
@@ -3148,10 +3121,10 @@ static unsigned char FPT_sisyncn(u32 port, unsigned char p_card,
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
WRW_HARPOON((port + SYNC_MSGS + 0),
- (MPM_OP + AMSG_OUT + SMEXT));
+ (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03));
WRW_HARPOON((port + SYNC_MSGS + 4),
- (MPM_OP + AMSG_OUT + SMSYNC));
+ (MPM_OP + AMSG_OUT + EXTENDED_SDTR));
if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB)
@@ -3221,7 +3194,7 @@ static void FPT_stsyncn(u32 port, unsigned char p_card)
sync_msg = FPT_sfm(port, currSCCB);
- if ((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
+ if ((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
return;
@@ -3231,7 +3204,7 @@ static void FPT_stsyncn(u32 port, unsigned char p_card)
offset = FPT_sfm(port, currSCCB);
- if ((offset == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
+ if ((offset == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
return;
@@ -3343,9 +3316,11 @@ static void FPT_sisyncr(u32 port, unsigned char sync_pulse,
unsigned char offset)
{
ARAM_ACCESS(port);
- WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT));
+ WRW_HARPOON((port + SYNC_MSGS + 0),
+ (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03));
- WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + SMSYNC));
+ WRW_HARPOON((port + SYNC_MSGS + 4),
+ (MPM_OP + AMSG_OUT + EXTENDED_SDTR));
WRW_HARPOON((port + SYNC_MSGS + 6), (MPM_OP + AMSG_OUT + sync_pulse));
WRW_HARPOON((port + SYNC_MSGS + 8), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 10), (MPM_OP + AMSG_OUT + offset));
@@ -3388,10 +3363,10 @@ static unsigned char FPT_siwidn(u32 port, unsigned char p_card)
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
WRW_HARPOON((port + SYNC_MSGS + 0),
- (MPM_OP + AMSG_OUT + SMEXT));
+ (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02));
WRW_HARPOON((port + SYNC_MSGS + 4),
- (MPM_OP + AMSG_OUT + SMWDTR));
+ (MPM_OP + AMSG_OUT + EXTENDED_WDTR));
WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 8),
(MPM_OP + AMSG_OUT + SM16BIT));
@@ -3436,7 +3411,7 @@ static void FPT_stwidn(u32 port, unsigned char p_card)
width = FPT_sfm(port, currSCCB);
- if ((width == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
+ if ((width == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
return;
@@ -3499,9 +3474,11 @@ static void FPT_stwidn(u32 port, unsigned char p_card)
static void FPT_siwidr(u32 port, unsigned char width)
{
ARAM_ACCESS(port);
- WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT));
+ WRW_HARPOON((port + SYNC_MSGS + 0),
+ (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02));
- WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + SMWDTR));
+ WRW_HARPOON((port + SYNC_MSGS + 4),
+ (MPM_OP + AMSG_OUT + EXTENDED_WDTR));
WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 8), (MPM_OP + AMSG_OUT + width));
WRW_HARPOON((port + SYNC_MSGS + 10), (BRH_OP + ALWAYS + NP));
@@ -3682,7 +3659,7 @@ static void FPT_ssenss(struct sccb_card *pCurrCard)
}
currSCCB->CdbLength = SIX_BYTE_CMD;
- currSCCB->Cdb[0] = SCSI_REQUEST_SENSE;
+ currSCCB->Cdb[0] = REQUEST_SENSE;
currSCCB->Cdb[1] = currSCCB->Cdb[1] & (unsigned char)0xE0; /*Keep LUN. */
currSCCB->Cdb[2] = 0x00;
currSCCB->Cdb[3] = 0x00;
@@ -3939,13 +3916,9 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
*/
if ((currTar_Info->TarStatus & TAR_ALLOW_DISC) ||
(currTar_Info->TarStatus & TAG_Q_TRYING)) {
- p_sccb->Sccb_idmsg =
- (unsigned char)(SMIDENT | DISC_PRIV) | p_sccb->Lun;
- }
-
- else {
-
- p_sccb->Sccb_idmsg = (unsigned char)SMIDENT | p_sccb->Lun;
+ p_sccb->Sccb_idmsg = IDENTIFY(true, p_sccb->Lun);
+ } else {
+ p_sccb->Sccb_idmsg = IDENTIFY(false, p_sccb->Lun);
}
p_sccb->HostStatus = 0x00;
@@ -3962,7 +3935,7 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
*/
p_sccb->Sccb_scsistat = BUS_FREE_ST;
p_sccb->SccbStatus = SCCB_IN_PROCESS;
- p_sccb->Sccb_scsimsg = SMNO_OP;
+ p_sccb->Sccb_scsimsg = NOP;
}
@@ -4167,7 +4140,7 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
message = currSCCB->Sccb_scsimsg;
scsiID = currSCCB->TargID;
- if (message == SMDEV_RESET) {
+ if (message == TARGET_RESET) {
currTar_Info = &FPT_sccbMgrTbl[p_card][scsiID];
currTar_Info->TarSyncCtrl = 0;
@@ -4203,7 +4176,7 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
else if (currSCCB->Sccb_scsistat < COMMAND_ST) {
- if (message == SMNO_OP) {
+ if (message == NOP) {
currSCCB->Sccb_MGRFlags |= F_DEV_SELECTED;
FPT_ssel(port, p_card);
@@ -4211,13 +4184,13 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
}
} else {
- if (message == SMABORT)
+ if (message == ABORT_TASK_SET)
FPT_queueFlushSccb(p_card, SCCB_COMPLETE);
}
} else {
- message = SMABORT;
+ message = ABORT_TASK_SET;
}
WRW_HARPOON((port + hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0));
@@ -4232,8 +4205,8 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
WR_HARPOON(port + hp_portctrl_0, 0x00);
- if ((message == SMABORT) || (message == SMDEV_RESET) ||
- (message == SMABORT_TAG)) {
+ if ((message == ABORT_TASK_SET) || (message == TARGET_RESET) ||
+ (message == ABORT_TASK)) {
while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | PHASE))) {
}
@@ -4275,8 +4248,8 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
else {
- if (message == SMPARITY) {
- currSCCB->Sccb_scsimsg = SMNO_OP;
+ if (message == MSG_PARITY_ERROR) {
+ currSCCB->Sccb_scsimsg = NOP;
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
} else {
@@ -4306,7 +4279,7 @@ static void FPT_phaseMsgIn(u32 port, unsigned char p_card)
}
message = RD_HARPOON(port + hp_scsidata_0);
- if ((message == SMDISC) || (message == SMSAVE_DATA_PTR)) {
+ if ((message == DISCONNECT) || (message == SAVE_POINTERS)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + END_DATA_START));
@@ -4321,7 +4294,7 @@ static void FPT_phaseMsgIn(u32 port, unsigned char p_card)
FPT_sdecm(message, port, p_card);
} else {
- if (currSCCB->Sccb_scsimsg != SMPARITY)
+ if (currSCCB->Sccb_scsimsg != MSG_PARITY_ERROR)
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
@@ -4351,7 +4324,7 @@ static void FPT_phaseIllegal(u32 port, unsigned char p_card)
currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
currSCCB->Sccb_scsistat = ABORT_ST;
- currSCCB->Sccb_scsimsg = SMABORT;
+ currSCCB->Sccb_scsimsg = ABORT_TASK_SET;
}
ACCEPT_MSG_ATN(port);
@@ -4650,9 +4623,9 @@ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUN_CA = 0;
- if (status_byte != SSGOOD) {
+ if (status_byte != SAM_STAT_GOOD) {
- if (status_byte == SSQ_FULL) {
+ if (status_byte == SAM_STAT_TASK_SET_FULL) {
if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
@@ -4784,7 +4757,7 @@ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
}
- if (status_byte == SSCHECK) {
+ if (status_byte == SAM_STAT_CHECK_CONDITION) {
if (FPT_BL_Card[p_card].globalFlags & F_DO_RENEGO) {
if (FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarEEValue & EE_SYNC_MASK) {
@@ -4806,7 +4779,7 @@ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
currSCCB->SccbStatus = SCCB_ERROR;
currSCCB->TargetStatus = status_byte;
- if (status_byte == SSCHECK) {
+ if (status_byte == SAM_STAT_CHECK_CONDITION) {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUN_CA = 1;
@@ -6868,14 +6841,14 @@ static void FPT_queueCmdComplete(struct sccb_card *pCurrCard,
if ((p_sccb->
ControlByte & (SCCB_DATA_XFER_OUT | SCCB_DATA_XFER_IN))
&& (p_sccb->HostStatus == SCCB_COMPLETE)
- && (p_sccb->TargetStatus != SSCHECK))
-
- if ((SCSIcmd == SCSI_READ) ||
- (SCSIcmd == SCSI_WRITE) ||
- (SCSIcmd == SCSI_READ_EXTENDED) ||
- (SCSIcmd == SCSI_WRITE_EXTENDED) ||
- (SCSIcmd == SCSI_WRITE_AND_VERIFY) ||
- (SCSIcmd == SCSI_START_STOP_UNIT) ||
+ && (p_sccb->TargetStatus != SAM_STAT_CHECK_CONDITION))
+
+ if ((SCSIcmd == READ_6) ||
+ (SCSIcmd == WRITE_6) ||
+ (SCSIcmd == READ_10) ||
+ (SCSIcmd == WRITE_10) ||
+ (SCSIcmd == WRITE_VERIFY) ||
+ (SCSIcmd == START_STOP) ||
(pCurrCard->globalFlags & F_NO_FILTER)
)
p_sccb->HostStatus = SCCB_DATA_UNDER_RUN;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3d114be5b662..8f44d433e06e 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -311,7 +311,7 @@ source "drivers/scsi/cxlflash/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
depends on SGI_HAS_WD93 && SCSI
- help
+ help
If you have a Western Digital WD93 SCSI controller on
an SGI MIPS system, say Y. Otherwise, say N.
@@ -482,6 +482,7 @@ config SCSI_ARCMSR
source "drivers/scsi/esas2r/Kconfig"
source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt3sas/Kconfig"
+source "drivers/scsi/mpi3mr/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
source "drivers/scsi/ufs/Kconfig"
@@ -1157,6 +1158,8 @@ config SCSI_LPFC_DEBUG_FS
This makes debugging information from the lpfc driver
available via the debugfs filesystem.
+source "drivers/scsi/elx/Kconfig"
+
config SCSI_SIM710
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
depends on EISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index bc3882f5cc69..1748d1ec1338 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -85,6 +85,7 @@ obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
+obj-$(CONFIG_SCSI_EFCT) += elx/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
@@ -99,6 +100,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
+obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr/
obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 2ddbcaa667d1..3baadd068768 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -538,11 +538,11 @@ static void complete_cmd(struct Scsi_Host *instance,
if (hostdata->sensing == cmd) {
/* Autosense processing ends here */
- if (status_byte(cmd->result) != GOOD) {
+ if (get_status_byte(cmd) != SAM_STAT_GOOD) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
} else {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
- set_driver_byte(cmd, DRIVER_SENSE);
+ set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
}
hostdata->sensing = NULL;
}
@@ -1815,6 +1815,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
switch (tmp) {
case ABORT:
+ set_host_byte(cmd, DID_ABORT);
+ fallthrough;
case COMMAND_COMPLETE:
/* Accept message by clearing ACK */
sink = 1;
@@ -1826,9 +1828,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
- cmd->result &= ~0xffff;
- cmd->result |= cmd->SCp.Status;
- cmd->result |= cmd->SCp.Message << 8;
+ set_status_byte(cmd, cmd->SCp.Status);
set_resid_from_SCp(cmd);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index f1f62b5da8b7..46b8dffce2dd 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1235,8 +1235,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
if (ret < 0)
return ret;
command = ContainerRawIo2;
- fibsize = sizeof(struct aac_raw_io2) +
- ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+ fibsize = struct_size(readcmd2, sge,
+ le32_to_cpu(readcmd2->sgeCnt));
} else {
struct aac_raw_io *readcmd;
readcmd = (struct aac_raw_io *) fib_data(fib);
@@ -1366,8 +1366,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
if (ret < 0)
return ret;
command = ContainerRawIo2;
- fibsize = sizeof(struct aac_raw_io2) +
- ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+ fibsize = struct_size(writecmd2, sge,
+ le32_to_cpu(writecmd2->sgeCnt));
} else {
struct aac_raw_io *writecmd;
writecmd = (struct aac_raw_io *) fib_data(fib);
@@ -3998,7 +3998,7 @@ static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int
if (aac_convert_sgl == 0)
return 0;
- sge = kmalloc_array(nseg_new, sizeof(struct sge_ieee1212), GFP_ATOMIC);
+ sge = kmalloc_array(nseg_new, sizeof(*sge), GFP_ATOMIC);
if (sge == NULL)
return -ENOMEM;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index e3e4ecbea726..3733df77bc65 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1929,7 +1929,7 @@ struct aac_raw_io2 {
u8 bpComplete; /* reserved for F/W use */
u8 sgeFirstIndex; /* reserved for F/W use */
u8 unused[4];
- struct sge_ieee1212 sge[1];
+ struct sge_ieee1212 sge[];
};
#define CT_FLUSH_CACHE 129
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 800052f10699..f3377e2ef5fb 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -5964,7 +5964,6 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
- set_driver_byte(scp, DRIVER_SENSE);
}
break;
@@ -6715,7 +6714,6 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
- set_driver_byte(scp, DRIVER_SENSE);
}
break;
@@ -6730,14 +6728,12 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
case QD_ABORTED_BY_HOST:
ASC_DBG(1, "QD_ABORTED_BY_HOST\n");
set_status_byte(scp, qdonep->d3.scsi_stat);
- set_msg_byte(scp, qdonep->d3.scsi_msg);
set_host_byte(scp, DID_ABORT);
break;
default:
ASC_DBG(1, "done_stat 0x%x\n", qdonep->d3.done_stat);
set_status_byte(scp, qdonep->d3.scsi_stat);
- set_msg_byte(scp, qdonep->d3.scsi_msg);
set_host_byte(scp, DID_ERROR);
break;
}
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index d8e19afa7a14..b13b5c85f3de 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -619,7 +619,8 @@ static struct {
static irqreturn_t intr(int irq, void *dev_id);
static void reset_ports(struct Scsi_Host *shpnt);
static void aha152x_error(struct Scsi_Host *shpnt, char *msg);
-static void done(struct Scsi_Host *shpnt, int error);
+static void done(struct Scsi_Host *shpnt, unsigned char status_byte,
+ unsigned char host_byte);
/* diagnostics */
static void show_command(struct scsi_cmnd * ptr);
@@ -1271,7 +1272,8 @@ static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev
* Internal done function
*
*/
-static void done(struct Scsi_Host *shpnt, int error)
+static void done(struct Scsi_Host *shpnt, unsigned char status_byte,
+ unsigned char host_byte)
{
if (CURRENT_SC) {
if(DONE_SC)
@@ -1281,7 +1283,8 @@ static void done(struct Scsi_Host *shpnt, int error)
DONE_SC = CURRENT_SC;
CURRENT_SC = NULL;
- DONE_SC->result = error;
+ set_status_byte(DONE_SC, status_byte);
+ set_host_byte(DONE_SC, host_byte);
} else
printk(KERN_ERR "aha152x: done() called outside of command\n");
}
@@ -1376,13 +1379,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
if(CURRENT_SC->SCp.phase & completed) {
/* target sent COMMAND COMPLETE */
- done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16));
+ done(shpnt, CURRENT_SC->SCp.Status, DID_OK);
} else if(CURRENT_SC->SCp.phase & aborted) {
- done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_ABORT << 16));
+ done(shpnt, CURRENT_SC->SCp.Status, DID_ABORT);
} else if(CURRENT_SC->SCp.phase & resetted) {
- done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_RESET << 16));
+ done(shpnt, CURRENT_SC->SCp.Status, DID_RESET);
} else if(CURRENT_SC->SCp.phase & disconnected) {
/* target sent DISCONNECT */
@@ -1394,7 +1397,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
CURRENT_SC = NULL;
} else {
- done(shpnt, DID_ERROR << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_ERROR);
}
#if defined(AHA152X_STAT)
} else {
@@ -1515,7 +1518,7 @@ static void seldo_run(struct Scsi_Host *shpnt)
if (TESTLO(SSTAT0, SELDO)) {
scmd_printk(KERN_ERR, CURRENT_SC,
"aha152x: passing bus free condition\n");
- done(shpnt, DID_NO_CONNECT << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_NO_CONNECT);
return;
}
@@ -1552,12 +1555,12 @@ static void selto_run(struct Scsi_Host *shpnt)
CURRENT_SC->SCp.phase &= ~selecting;
if (CURRENT_SC->SCp.phase & aborted)
- done(shpnt, DID_ABORT << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_ABORT);
else if (TESTLO(SSTAT0, SELINGO))
- done(shpnt, DID_BUS_BUSY << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY);
else
/* ARBITRATION won, but SELECTION failed */
- done(shpnt, DID_NO_CONNECT << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_NO_CONNECT);
}
/*
@@ -1891,7 +1894,7 @@ static void cmd_init(struct Scsi_Host *shpnt)
if (CURRENT_SC->SCp.sent_command) {
scmd_printk(KERN_ERR, CURRENT_SC,
"command already sent\n");
- done(shpnt, DID_ERROR << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_ERROR);
return;
}
@@ -2231,7 +2234,7 @@ static int update_state(struct Scsi_Host *shpnt)
static void parerr_run(struct Scsi_Host *shpnt)
{
scmd_printk(KERN_ERR, CURRENT_SC, "parity error\n");
- done(shpnt, DID_PARITY << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_PARITY);
}
/*
@@ -2254,7 +2257,7 @@ static void rsti_run(struct Scsi_Host *shpnt)
kfree(ptr->host_scribble);
ptr->host_scribble=NULL;
- ptr->result = DID_RESET << 16;
+ set_host_byte(ptr, DID_RESET);
ptr->scsi_done(ptr);
}
@@ -2262,7 +2265,7 @@ static void rsti_run(struct Scsi_Host *shpnt)
}
if(CURRENT_SC && !CURRENT_SC->device->soft_reset)
- done(shpnt, DID_RESET << 16 );
+ done(shpnt, SAM_STAT_GOOD, DID_RESET);
}
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 0dc831026e9e..39d8759fe558 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -267,8 +267,11 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
guarantee that we will still have it in the
cdb when we come back */
if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) {
- memcpy(SCtmp->sense_buffer, ecbptr->sense,
- SCSI_SENSE_BUFFERSIZE);
+ memcpy_and_pad(SCtmp->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ ecbptr->sense,
+ sizeof(ecbptr->sense),
+ 0);
errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
} else
errstatus = 0;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 4f7102f8eeb0..92ea24a075b8 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1928,7 +1928,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
memcpy(cmd->sense_buffer,
ahd_get_sense_buf(ahd, scb)
+ sense_offset, sense_size);
- cmd->result |= (DRIVER_SENSE << 24);
+ set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
#ifdef AHD_DEBUG
if (ahd_debug & AHD_SHOW_SENSE) {
@@ -2018,6 +2018,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
int new_status = DID_OK;
int do_fallback = 0;
int scsi_status;
+ struct scsi_sense_data *sense;
/*
* Map CAM error codes into Linux Error codes. We
@@ -2041,18 +2042,12 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
switch(scsi_status) {
case SAM_STAT_COMMAND_TERMINATED:
case SAM_STAT_CHECK_CONDITION:
- if ((cmd->result >> 24) != DRIVER_SENSE) {
+ sense = (struct scsi_sense_data *)
+ cmd->sense_buffer;
+ if (sense->extra_len >= 5 &&
+ (sense->add_sense_code == 0x47
+ || sense->add_sense_code == 0x48))
do_fallback = 1;
- } else {
- struct scsi_sense_data *sense;
-
- sense = (struct scsi_sense_data *)
- cmd->sense_buffer;
- if (sense->extra_len >= 5 &&
- (sense->add_sense_code == 0x47
- || sense->add_sense_code == 0x48))
- do_fallback = 1;
- }
break;
default:
break;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 4b04ab8908f8..a396f048a031 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -493,7 +493,7 @@ ahc_inq(struct ahc_softc *ahc, u_int port)
return ((ahc_inb(ahc, port))
| (ahc_inb(ahc, port+1) << 8)
| (ahc_inb(ahc, port+2) << 16)
- | (ahc_inb(ahc, port+3) << 24)
+ | (((uint64_t)ahc_inb(ahc, port+3)) << 24)
| (((uint64_t)ahc_inb(ahc, port+4)) << 32)
| (((uint64_t)ahc_inb(ahc, port+5)) << 40)
| (((uint64_t)ahc_inb(ahc, port+6)) << 48)
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index d33f5a00bf0b..8b3d472aa3cc 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1838,7 +1838,6 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
if (sense_size < SCSI_SENSE_BUFFERSIZE)
memset(&cmd->sense_buffer[sense_size], 0,
SCSI_SENSE_BUFFERSIZE - sense_size);
- cmd->result |= (DRIVER_SENSE << 24);
#ifdef AHC_DEBUG
if (ahc_debug & AHC_SHOW_SENSE) {
int i;
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index a195bfe9eccc..7a78606598c4 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -53,6 +53,7 @@ static struct scsi_host_template aic94xx_sht = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 297a66770260..46815e65f7a4 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -718,10 +718,12 @@ static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1)
do {
switch (id1) {
default:
- if (el->id1 == id1)
+ if (el->id1 == id1) {
+ fallthrough;
case 0xFF:
if (el->id0 == id0)
return el;
+ }
}
el = start + le16_to_cpu(el->next);
} while (el != start);
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 71d18f607dae..c6b63eae28f5 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -205,7 +205,7 @@ Again:
switch (opcode) {
case TC_NO_ERROR:
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
break;
case TC_UNDERRUN:
ts->resp = SAS_TASK_COMPLETE;
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 0f6abd233614..6ce57f031df5 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -49,7 +49,7 @@ struct device_attribute;
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
#define ARCMSR_MIN_OUTSTANDING_CMD 32
-#define ARCMSR_DRIVER_VERSION "v1.50.00.02-20200819"
+#define ARCMSR_DRIVER_VERSION "v1.50.00.05-20210429"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 4b79661275c9..ec1a834c922d 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1323,19 +1323,21 @@ static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
{
-
struct scsi_cmnd *pcmd = ccb->pcmd;
- struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
- pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- if (sensebuffer) {
- int sense_data_length =
- sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
- ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
- memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
- memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
+
+ pcmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ if (pcmd->sense_buffer) {
+ struct SENSE_DATA *sensebuffer;
+
+ memcpy_and_pad(pcmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ ccb->arcmsr_cdb.SenseData,
+ sizeof(ccb->arcmsr_cdb.SenseData),
+ 0);
+
+ sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
sensebuffer->Valid = 1;
- pcmd->result |= (DRIVER_SENSE << 24);
}
}
@@ -1923,8 +1925,12 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
if (ccb->arc_cdb_size <= 0x300)
arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
- else
- arc_cdb_size = (((ccb->arc_cdb_size + 0xff) >> 8) + 2) << 1 | 1;
+ else {
+ arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2;
+ if (arc_cdb_size > 0xF)
+ arc_cdb_size = 0xF;
+ arc_cdb_size = (arc_cdb_size << 1) | 1;
+ }
ccb_post_stamp = (ccb->smid | arc_cdb_size);
writel(0, &pmu->inbound_queueport_high);
writel(ccb_post_stamp, &pmu->inbound_queueport_low);
@@ -2415,10 +2421,17 @@ static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
{
- uint32_t outbound_doorbell, in_doorbell, tmp;
+ uint32_t outbound_doorbell, in_doorbell, tmp, i;
struct MessageUnit_E __iomem *reg = pACB->pmuE;
- in_doorbell = readl(&reg->iobound_doorbell);
+ if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) {
+ for (i = 0; i < 5; i++) {
+ in_doorbell = readl(&reg->iobound_doorbell);
+ if (in_doorbell != 0)
+ break;
+ }
+ } else
+ in_doorbell = readl(&reg->iobound_doorbell);
outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
do {
writel(0, &reg->host_int_status); /* clear interrupt */
@@ -3243,7 +3256,7 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
if (!ccb)
return SCSI_MLQUEUE_HOST_BUSY;
if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
- cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
+ cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT;
cmd->scsi_done(cmd);
return 0;
}
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 248a5bfad153..4a84599ff491 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -794,7 +794,10 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
acornscsi_dma_cleanup(host);
- SCpnt->result = result << 16 | host->scsi.SCp.Message << 8 | host->scsi.SCp.Status;
+ set_host_byte(SCpnt, result);
+ if (result == DID_OK)
+ scsi_msg_to_host_byte(SCpnt, host->scsi.SCp.Message);
+ set_status_byte(SCpnt, host->scsi.SCp.Status);
/*
* In theory, this should not happen. In practice, it seems to.
@@ -833,12 +836,12 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
xfer_warn = 0;
if (xfer_warn) {
- switch (status_byte(SCpnt->result)) {
- case CHECK_CONDITION:
- case COMMAND_TERMINATED:
- case BUSY:
- case QUEUE_FULL:
- case RESERVATION_CONFLICT:
+ switch (get_status_byte(SCpnt)) {
+ case SAM_STAT_CHECK_CONDITION:
+ case SAM_STAT_COMMAND_TERMINATED:
+ case SAM_STAT_BUSY:
+ case SAM_STAT_TASK_SET_FULL:
+ case SAM_STAT_RESERVATION_CONFLICT:
break;
default:
@@ -2470,7 +2473,7 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
if (acornscsi_cmdtype(SCpnt->cmnd[0]) == CMD_WRITE && (NO_WRITE & (1 << SCpnt->device->id))) {
printk(KERN_CRIT "scsi%d.%c: WRITE attempted with NO_WRITE flag set\n",
host->host->host_no, '0' + SCpnt->device->id);
- SCpnt->result = DID_NO_CONNECT << 16;
+ set_host_byte(SCpnt, DID_NO_CONNECT);
done(SCpnt);
return 0;
}
@@ -2492,7 +2495,7 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
unsigned long flags;
if (!queue_add_cmd_ordered(&host->queues.issue, SCpnt)) {
- SCpnt->result = DID_ERROR << 16;
+ set_host_byte(SCpnt, DID_ERROR);
done(SCpnt);
return 0;
}
@@ -2506,31 +2509,6 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
DEF_SCSI_QCMD(acornscsi_queuecmd)
-/*
- * Prototype: void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1, struct scsi_cmnd **SCpntp2, int result)
- * Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2
- * Params : SCpntp1 - pointer to command to return
- * SCpntp2 - pointer to command to check
- * result - result to pass back to mid-level done function
- * Returns : *SCpntp2 = NULL if *SCpntp1 is the same command structure as *SCpntp2.
- */
-static inline void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1,
- struct scsi_cmnd **SCpntp2,
- int result)
-{
- struct scsi_cmnd *SCpnt = *SCpntp1;
-
- if (SCpnt) {
- *SCpntp1 = NULL;
-
- SCpnt->result = result;
- SCpnt->scsi_done(SCpnt);
- }
-
- if (SCpnt == *SCpntp2)
- *SCpntp2 = NULL;
-}
-
enum res_abort { res_not_running, res_success, res_success_clear, res_snooze };
/*
@@ -2664,6 +2642,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
//#endif
clear_bit(SCpnt->device->id * 8 +
(u8)(SCpnt->device->lun & 0x7), host->busyluns);
+ fallthrough;
/*
* We found the command, and cleared it out. Either
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 2e687ce60753..9c4458a99025 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -1375,6 +1375,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
case IS_COMPLETE:
break;
}
+ break;
default:
break;
@@ -1479,7 +1480,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
if (msgqueue_msglength(&info->scsi.msgs) > 1)
fas216_cmd(info, CMD_SETATN);
- /*FALLTHROUGH*/
+ fallthrough;
/*
* Any -> Message Out
@@ -2010,7 +2011,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
"request sense complete, result=0x%04x%02x%02x",
result, SCpnt->SCp.Message, SCpnt->SCp.Status);
- if (result != DID_OK || SCpnt->SCp.Status != GOOD)
+ if (result != DID_OK || SCpnt->SCp.Status != SAM_STAT_GOOD)
/*
* Something went wrong. Make sure that we don't
* have valid data in the sense buffer that could
@@ -2042,8 +2043,10 @@ fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
{
info->stats.fins += 1;
- SCpnt->result = result << 16 | info->scsi.SCp.Message << 8 |
- info->scsi.SCp.Status;
+ set_host_byte(SCpnt, result);
+ if (result == DID_OK)
+ scsi_msg_to_host_byte(SCpnt, info->scsi.SCp.Message);
+ set_status_byte(SCpnt, info->scsi.SCp.Status);
fas216_log_command(info, LOG_CONNECT, SCpnt,
"command complete, result=0x%08x", SCpnt->result);
@@ -2051,23 +2054,22 @@ fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
/*
* If the driver detected an error, we're all done.
*/
- if (host_byte(SCpnt->result) != DID_OK ||
- msg_byte(SCpnt->result) != COMMAND_COMPLETE)
+ if (get_host_byte(SCpnt) != DID_OK)
goto done;
/*
* If the command returned CHECK_CONDITION or COMMAND_TERMINATED
* status, request the sense information.
*/
- if (status_byte(SCpnt->result) == CHECK_CONDITION ||
- status_byte(SCpnt->result) == COMMAND_TERMINATED)
+ if (get_status_byte(SCpnt) == SAM_STAT_CHECK_CONDITION ||
+ get_status_byte(SCpnt) == SAM_STAT_COMMAND_TERMINATED)
goto request_sense;
/*
* If the command did not complete with GOOD status,
* we are all done here.
*/
- if (status_byte(SCpnt->result) != GOOD)
+ if (get_status_byte(SCpnt) != SAM_STAT_GOOD)
goto done;
/*
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 0e935c49b57b..8aeaddc93b16 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -182,6 +182,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
uint16_t cri_index;
+ int rc = 0;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
@@ -189,15 +190,17 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
beiscsi_ep = ep->dd_data;
- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
- return -EINVAL;
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
if (beiscsi_ep->phba != phba) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
beiscsi_ep->phba, phba);
-
- return -EEXIST;
+ rc = -EEXIST;
+ goto put_ep;
}
cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
if (phba->conn_table[cri_index]) {
@@ -209,7 +212,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
beiscsi_ep->ep_cid,
beiscsi_conn,
phba->conn_table[cri_index]);
- return -EINVAL;
+ rc = -EINVAL;
+ goto put_ep;
}
}
@@ -226,7 +230,10 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
"BS_%d : cid %d phba->conn_table[%u]=%p\n",
beiscsi_ep->ep_cid, cri_index, beiscsi_conn);
phba->conn_table[cri_index] = beiscsi_conn;
- return 0;
+
+put_ep:
+ iscsi_put_endpoint(ep);
+ return rc;
}
static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
@@ -1293,7 +1300,6 @@ static int beiscsi_conn_close(struct beiscsi_endpoint *beiscsi_ep)
void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
{
struct beiscsi_endpoint *beiscsi_ep;
- struct beiscsi_conn *beiscsi_conn;
struct beiscsi_hba *phba;
uint16_t cri_index;
@@ -1312,11 +1318,6 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
return;
}
- if (beiscsi_ep->conn) {
- beiscsi_conn = beiscsi_ep->conn;
- iscsi_suspend_queue(beiscsi_conn->conn);
- }
-
if (!beiscsi_hba_is_online(phba)) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : HBA in error 0x%lx\n", phba->state);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 22cf7f4b8d8c..e70f69f791db 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -143,8 +143,7 @@ DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
beiscsi_##_name##_disp, beiscsi_##_name##_store)
/*
- * When new log level added update the
- * the MAX allowed value for log_enable
+ * When new log level added update MAX allowed value for log_enable
*/
BEISCSI_RW_ATTR(log_enable, 0x00,
0xFF, 0x00, "Enable logging Bit Mask\n"
@@ -416,7 +415,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
return NULL;
}
- shost->max_id = BE2_MAX_SESSIONS;
+ shost->max_id = BE2_MAX_SESSIONS - 1;
shost->max_channel = 0;
shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
shost->max_lun = BEISCSI_NUM_MAX_LUN;
@@ -825,9 +824,8 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
&phwi_context->be_eq[i]);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_init_irqs-Failed to"
- "register msix for i = %d\n",
- i);
+ "BM_%d : %s-Failed to register msix for i = %d\n",
+ __func__, i);
kfree(phba->msi_name[i]);
goto free_msix_irqs;
}
@@ -841,9 +839,9 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0,
phba->msi_name[i], &phwi_context->be_eq[i]);
if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
- "BM_%d : beiscsi_init_irqs-"
- "Failed to register beiscsi_msix_mcc\n");
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : %s-Failed to register beiscsi_msix_mcc\n",
+ __func__);
kfree(phba->msi_name[i]);
goto free_msix_irqs;
}
@@ -853,8 +851,8 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
"beiscsi", phba);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_init_irqs-"
- "Failed to register irq\\n");
+ "BM_%d : %s-Failed to register irq\n",
+ __func__);
return ret;
}
}
@@ -1030,7 +1028,7 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
phba->params.wrbs_per_cxn);
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
+ "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x "
"wrb_handles_available=%d\n",
pwrb_handle, pwrb_context->free_index,
pwrb_context->wrb_handles_available);
@@ -1374,7 +1372,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
"BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
- " hwi_complete_cmd- Solicited path\n");
+ " %s- Solicited path\n", __func__);
break;
case HWH_TYPE_NOP:
@@ -1384,8 +1382,8 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
default:
beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
- "BM_%d : In hwi_complete_cmd, unknown type = %d"
- "wrb_index 0x%x CID 0x%x\n", type,
+ "BM_%d : In %s, unknown type = %d "
+ "wrb_index 0x%x CID 0x%x\n", __func__, type,
csol_cqe.wrb_index,
csol_cqe.cid);
break;
@@ -1883,9 +1881,9 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
cid = AMAP_GET_BITS(
struct amap_i_t_dpdu_cqe_v2,
cid, sol);
- else
- cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
- cid, sol);
+ else
+ cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cid, sol);
}
cri_index = BE_GET_CRI_FROM_CID(cid);
@@ -2010,8 +2008,7 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
default:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Invalid CQE Event Received Code : %d"
- "CID 0x%x...\n",
+ "BM_%d : Invalid CQE Event Received Code : %d CID 0x%x...\n",
code, cid);
break;
}
@@ -3001,7 +2998,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
void *eq_vaddress;
dma_addr_t paddr;
- num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
+ num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries *
sizeof(struct be_eq_entry));
if (phba->pcidev->msix_enabled)
@@ -3034,8 +3031,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
BEISCSI_EQ_DELAY_DEF);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_cmd_eq_create"
- "Failed for EQ\n");
+ "BM_%d : beiscsi_cmd_eq_create Failed for EQ\n");
goto create_eq_error;
}
@@ -3068,7 +3064,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
int ret = -ENOMEM;
dma_addr_t paddr;
- num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
+ num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries *
sizeof(struct sol_cqe));
for (i = 0; i < phba->num_cpus; i++) {
@@ -3090,8 +3086,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
sizeof(struct sol_cqe), cq_vaddress);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : be_fill_queue Failed "
- "for ISCSI CQ\n");
+ "BM_%d : be_fill_queue Failed for ISCSI CQ\n");
goto create_cq_error;
}
@@ -3100,8 +3095,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
false, 0);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_cmd_eq_create"
- "Failed for ISCSI CQ\n");
+ "BM_%d : beiscsi_cmd_eq_create Failed for ISCSI CQ\n");
goto create_cq_error;
}
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
@@ -3226,8 +3220,8 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
phwi_context->be_def_dataq[ulp_num].id);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : DEFAULT PDU DATA RING CREATED"
- "on ULP : %d\n", ulp_num);
+ "BM_%d : DEFAULT PDU DATA RING CREATED on ULP : %d\n",
+ ulp_num);
return 0;
}
@@ -3253,13 +3247,13 @@ beiscsi_post_template_hdr(struct beiscsi_hba *phba)
if (status != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Post Template HDR Failed for"
+ "BM_%d : Post Template HDR Failed for "
"ULP_%d\n", ulp_num);
return status;
}
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : Template HDR Pages Posted for"
+ "BM_%d : Template HDR Pages Posted for "
"ULP_%d\n", ulp_num);
}
}
@@ -3374,18 +3368,17 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
} else {
idx++;
wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
- pa_addr_lo = mem_descr->mem_array[idx].\
+ pa_addr_lo = mem_descr->mem_array[idx].
bus_address.u.a64.address;
num_wrb_rings = mem_descr->mem_array[idx].size /
(phba->params.wrbs_per_cxn *
sizeof(struct iscsi_wrb));
pwrb_arr[num].virtual_address = wrb_vaddr;
- pwrb_arr[num].bus_address.u.a64.address\
- = pa_addr_lo;
+ pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
pwrb_arr[num].size = phba->params.wrbs_per_cxn *
sizeof(struct iscsi_wrb);
wrb_vaddr += pwrb_arr[num].size;
- pa_addr_lo += pwrb_arr[num].size;
+ pa_addr_lo += pwrb_arr[num].size;
num_wrb_rings--;
}
}
@@ -3858,8 +3851,6 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
int i, j;
mem_descr = phba->init_mem;
- i = 0;
- j = 0;
for (i = 0; i < SE_MEM_MAX; i++) {
for (j = mem_descr->num_elements; j > 0; j--) {
dma_free_coherent(&phba->pcidev->dev,
@@ -3939,7 +3930,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
idx++;
}
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : phba->io_sgl_hndl_avbl=%d"
+ "BM_%d : phba->io_sgl_hndl_avbl=%d "
"phba->eh_sgl_hndl_avbl=%d\n",
phba->io_sgl_hndl_avbl,
phba->eh_sgl_hndl_avbl);
@@ -3997,13 +3988,8 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
GFP_KERNEL);
if (!ptr_cid_info) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to allocate memory"
- "for ULP_CID_INFO for ULP : %d\n",
- ulp_num);
ret = -ENOMEM;
goto free_memory;
-
}
/* Allocate memory for CID array */
@@ -4012,10 +3998,6 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
sizeof(*ptr_cid_info->cid_array),
GFP_KERNEL);
if (!ptr_cid_info->cid_array) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to allocate memory"
- "for CID_ARRAY for ULP : %d\n",
- ulp_num);
kfree(ptr_cid_info);
ptr_cid_info = NULL;
ret = -ENOMEM;
@@ -4033,9 +4015,6 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
sizeof(struct iscsi_endpoint *),
GFP_KERNEL);
if (!phba->ep_array) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to allocate memory in "
- "hba_setup_cid_tbls\n");
ret = -ENOMEM;
goto free_memory;
@@ -4045,10 +4024,6 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
sizeof(struct beiscsi_conn *),
GFP_KERNEL);
if (!phba->conn_table) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to allocate memory in"
- "hba_setup_cid_tbls\n");
-
kfree(phba->ep_array);
phba->ep_array = NULL;
ret = -ENOMEM;
@@ -4401,7 +4376,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if (!io_task->psgl_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of IO_SGL_ICD Failed"
+ "BM_%d : Alloc of IO_SGL_ICD Failed "
"for the CID : %d\n",
beiscsi_conn->beiscsi_conn_cid);
goto free_hndls;
@@ -4412,7 +4387,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if (!io_task->pwrb_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of WRB_HANDLE Failed"
+ "BM_%d : Alloc of WRB_HANDLE Failed "
"for the CID : %d\n",
beiscsi_conn->beiscsi_conn_cid);
goto free_io_hndls;
@@ -4428,10 +4403,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of MGMT_SGL_ICD Failed"
+ "BM_%d : Alloc of MGMT_SGL_ICD Failed "
"for the CID : %d\n",
- beiscsi_conn->
- beiscsi_conn_cid);
+ beiscsi_conn->beiscsi_conn_cid);
goto free_hndls;
}
@@ -4446,10 +4420,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of WRB_HANDLE Failed"
+ "BM_%d : Alloc of WRB_HANDLE Failed "
"for the CID : %d\n",
- beiscsi_conn->
- beiscsi_conn_cid);
+ beiscsi_conn->beiscsi_conn_cid);
goto free_mgmt_hndls;
}
beiscsi_conn->plogin_wrb_handle =
@@ -4467,10 +4440,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of MGMT_SGL_ICD Failed"
+ "BM_%d : Alloc of MGMT_SGL_ICD Failed "
"for the CID : %d\n",
- beiscsi_conn->
- beiscsi_conn_cid);
+ beiscsi_conn->beiscsi_conn_cid);
goto free_hndls;
}
io_task->pwrb_handle =
@@ -4480,7 +4452,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if (!io_task->pwrb_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of WRB_HANDLE Failed"
+ "BM_%d : Alloc of WRB_HANDLE Failed "
"for the CID : %d\n",
beiscsi_conn->beiscsi_conn_cid);
goto free_mgmt_hndls;
@@ -5318,7 +5290,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
/* Re-enable UER. If different TPE occurs then it is recoverable. */
beiscsi_set_uer_feature(phba);
- phba->shost->max_id = phba->params.cxns_per_ctrl;
+ phba->shost->max_id = phba->params.cxns_per_ctrl - 1;
phba->shost->can_queue = phba->params.ios_per_ctrl;
ret = beiscsi_init_port(phba);
if (ret < 0) {
@@ -5745,6 +5717,7 @@ free_hba:
pci_disable_msix(phba->pcidev);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
+ pci_disable_pcie_error_reporting(pcidev);
pci_set_drvdata(pcidev, NULL);
disable_pci:
pci_release_regions(pcidev);
@@ -5809,6 +5782,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.destroy_session = beiscsi_session_destroy,
.create_conn = beiscsi_conn_create,
.bind_conn = beiscsi_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_conn_teardown,
.attr_is_visible = beiscsi_attr_is_visible,
.set_iface_param = beiscsi_iface_set_param,
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 8439951d95ac..f2c49f0e5c8b 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -871,7 +871,7 @@ enum bfa_port_linkstate_rsn {
/*
* Initially flash content may be fff. On making LUN mask enable and disable
- * state chnage. when report lun command is being processed it goes from
+ * state change. when report lun command is being processed it goes from
* BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
* BFA_LUN_MASK_ACTIVE.
*/
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 11c0c3e6f014..4e3cef02f10f 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -369,13 +369,10 @@ bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event,
u16 misc, struct fchs_s *fchdr)
{
- struct bfa_plog_rec_s lp;
u32 *tmp_int = (u32 *) fchdr;
u32 ints[BFA_PL_INT_LOG_SZ];
if (plog->plog_enabled) {
- memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
-
ints[0] = tmp_int[0];
ints[1] = tmp_int[1];
ints[2] = tmp_int[4];
@@ -389,13 +386,10 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
u32 pld_w0)
{
- struct bfa_plog_rec_s lp;
u32 *tmp_int = (u32 *) fchdr;
u32 ints[BFA_PL_INT_LOG_SZ];
if (plog->plog_enabled) {
- memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
-
ints[0] = tmp_int[0];
ints[1] = tmp_int[1];
ints[2] = tmp_int[4];
@@ -3173,7 +3167,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
m->port_cfg = fcport->cfg;
m->msgtag = fcport->msgtag;
m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
- m->use_flash_cfg = fcport->use_flash_cfg;
+ m->use_flash_cfg = fcport->use_flash_cfg;
bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index ed300a279a38..f2996a9b2f63 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1213,7 +1213,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
* cleanup the command and return that I/O was successfully
* aborted.
*/
- rc = bnx2fc_abts_cleanup(io_req);
+ bnx2fc_abts_cleanup(io_req);
/* This only occurs when an task abort was requested while ABTS
is in progress. Setting the IO_CLEANUP flag will skip the
RRQ process in the case when the fw generated SCSI_CMD cmpl
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1e6d8f62ea3c..1b5f3e143f07 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -791,7 +791,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
return NULL;
shost->dma_boundary = cnic->pcidev->dma_mask;
shost->transportt = bnx2i_scsi_xport_template;
- shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
+ shost->max_id = ISCSI_MAX_CONNS_PER_HBA - 1;
shost->max_channel = 0;
shost->max_lun = 512;
shost->max_cmd_len = 16;
@@ -1420,17 +1420,23 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
* Forcefully terminate all in progress connection recovery at the
* earliest, either in bind(), send_pdu(LOGIN), or conn_start()
*/
- if (bnx2i_adapter_ready(hba))
- return -EIO;
+ if (bnx2i_adapter_ready(hba)) {
+ ret_code = -EIO;
+ goto put_ep;
+ }
bnx2i_ep = ep->dd_data;
if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
- (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
+ (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) {
/* Peer disconnect via' FIN or RST */
- return -EINVAL;
+ ret_code = -EINVAL;
+ goto put_ep;
+ }
- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
- return -EINVAL;
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
+ ret_code = -EINVAL;
+ goto put_ep;
+ }
if (bnx2i_ep->hba != hba) {
/* Error - TCP connection does not belong to this device
@@ -1441,7 +1447,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
"belong to hba (%s)\n",
hba->netdev->name);
- return -EEXIST;
+ ret_code = -EEXIST;
+ goto put_ep;
}
bnx2i_ep->conn = bnx2i_conn;
bnx2i_conn->ep = bnx2i_ep;
@@ -1458,6 +1465,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
bnx2i_put_rq_buf(bnx2i_conn, 0);
bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+put_ep:
+ iscsi_put_endpoint(ep);
return ret_code;
}
@@ -2113,7 +2122,6 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
{
struct bnx2i_endpoint *bnx2i_ep;
struct bnx2i_conn *bnx2i_conn = NULL;
- struct iscsi_conn *conn = NULL;
struct bnx2i_hba *hba;
bnx2i_ep = ep->dd_data;
@@ -2126,11 +2134,8 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
!time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
msleep(250);
- if (bnx2i_ep->conn) {
+ if (bnx2i_ep->conn)
bnx2i_conn = bnx2i_ep->conn;
- conn = bnx2i_conn->cls_conn->dd_data;
- iscsi_suspend_queue(conn);
- }
hba = bnx2i_ep->hba;
mutex_lock(&hba->net_dev_lock);
@@ -2276,6 +2281,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.destroy_session = bnx2i_session_destroy,
.create_conn = bnx2i_conn_create,
.bind_conn = bnx2i_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = bnx2i_conn_destroy,
.attr_is_visible = bnx2i_attr_is_visible,
.set_param = iscsi_set_param,
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 9b89c26ccfdb..fc7197abfcdf 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -198,8 +198,9 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
result = scsi_execute_req(ch->device, cmd, direction, buffer,
buflength, &sshdr, timeout * HZ,
MAX_RETRIES, NULL);
-
- if (driver_byte(result) == DRIVER_SENSE) {
+ if (result < 0)
+ return result;
+ if (scsi_sense_valid(&sshdr)) {
if (debug)
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
errno = ch_find_errno(&sshdr);
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 84d73f57292b..340785536998 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -406,14 +406,10 @@ static const char * const hostbyte_table[]={
"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
"DID_NEXUS_FAILURE", "DID_ALLOC_FAILURE", "DID_MEDIUM_ERROR" };
-static const char * const driverbyte_table[]={
-"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
-"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
-
const char *scsi_hostbyte_string(int result)
{
+ enum scsi_host_status hb = host_byte(result);
const char *hb_string = NULL;
- int hb = host_byte(result);
if (hb < ARRAY_SIZE(hostbyte_table))
hb_string = hostbyte_table[hb];
@@ -421,17 +417,6 @@ const char *scsi_hostbyte_string(int result)
}
EXPORT_SYMBOL(scsi_hostbyte_string);
-const char *scsi_driverbyte_string(int result)
-{
- const char *db_string = NULL;
- int db = driver_byte(result);
-
- if (db < ARRAY_SIZE(driverbyte_table))
- db_string = driverbyte_table[db];
- return db_string;
-}
-EXPORT_SYMBOL(scsi_driverbyte_string);
-
#define scsi_mlreturn_name(result) { result, #result }
static const struct value_name_pair scsi_mlreturn_arr[] = {
scsi_mlreturn_name(NEEDS_RETRY),
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 203f938fca7e..f949a4e00783 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -117,6 +117,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
/* connection management */
.create_conn = cxgbi_create_conn,
.bind_conn = cxgbi_bind_conn,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 2c3491528d42..efb3e2b3398e 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -134,6 +134,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
/* connection management */
.create_conn = cxgbi_create_conn,
.bind_conn = cxgbi_bind_conn,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index f078b3c4e083..8c7d4dda4cf2 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -337,7 +337,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev)
EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
- unsigned int max_id, struct scsi_host_template *sht,
+ unsigned int max_conns, struct scsi_host_template *sht,
struct scsi_transport_template *stt)
{
struct cxgbi_hba *chba;
@@ -357,7 +357,7 @@ int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
shost->transportt = stt;
shost->max_lun = max_lun;
- shost->max_id = max_id;
+ shost->max_id = max_conns - 1;
shost->max_channel = 0;
shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
@@ -2690,11 +2690,13 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
ppm->tformat.pgsz_idx_dflt);
if (err < 0)
- return err;
+ goto put_ep;
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
- if (err)
- return -EINVAL;
+ if (err) {
+ err = -EINVAL;
+ goto put_ep;
+ }
/* calculate the tag idx bits needed for this conn based on cmds_max */
cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
@@ -2715,7 +2717,9 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
/* init recv engine */
iscsi_tcp_hdr_recv_prep(tcp_conn);
- return 0;
+put_ep:
+ iscsi_put_endpoint(ep);
+ return err;
}
EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
@@ -2968,7 +2972,6 @@ void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
ep, cep, cconn, csk, csk->state, csk->flags);
if (cconn && cconn->iconn) {
- iscsi_suspend_tx(cconn->iconn);
write_lock_bh(&csk->callback_lock);
cep->csk->user_data = NULL;
cconn->cep = NULL;
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index ee11ec340654..df0ebabbf387 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -369,8 +369,7 @@ retry:
goto out;
}
- if (driver_byte(result) == DRIVER_SENSE) {
- result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
+ if (result > 0 && scsi_sense_valid(&sshdr)) {
if (result & SAM_STAT_CHECK_CONDITION) {
switch (sshdr.sense_key) {
case NO_SENSE:
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index be87d5a7583d..24c7cefb0b78 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -160,22 +160,6 @@
#define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
#define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
-/* cmd->result */
-#define RES_TARGET 0x000000FF /* Target State */
-#define RES_TARGET_LNX STATUS_MASK /* Only official ... */
-#define RES_ENDMSG 0x0000FF00 /* End Message */
-#define RES_DID 0x00FF0000 /* DID_ codes */
-#define RES_DRV 0xFF000000 /* DRIVER_ codes */
-
-#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
-#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
-
-#define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
-#define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
-#define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
-#define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
-#define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
-
#define TAG_NONE 255
/*
@@ -986,7 +970,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
/* Assume BAD_TARGET; will be cleared later */
- cmd->result = DID_BAD_TARGET << 16;
+ set_host_byte(cmd, DID_BAD_TARGET);
/* ignore invalid targets */
if (cmd->device->id >= acb->scsi_host->max_id ||
@@ -1013,7 +997,8 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
/* set callback and clear result in the command */
cmd->scsi_done = done;
- cmd->result = 0;
+ set_host_byte(cmd, DID_OK);
+ set_status_byte(cmd, SAM_STAT_GOOD);
srb = list_first_entry_or_null(&acb->srb_free_list,
struct ScsiReqBlk, list);
@@ -1250,7 +1235,7 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
free_tag(dcb, srb);
list_add_tail(&srb->list, &acb->srb_free_list);
dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
- cmd->result = DID_ABORT << 16;
+ set_host_byte(cmd, DID_ABORT);
return SUCCESS;
}
srb = find_cmd(cmd, &dcb->srb_going_list);
@@ -3178,6 +3163,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
scsi_sgtalbe(cmd));
status = srb->target_status;
+ set_host_byte(cmd, DID_OK);
+ set_status_byte(cmd, SAM_STAT_GOOD);
if (srb->flag & AUTO_REQSENSE) {
dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
pci_unmap_srb_sense(acb, srb);
@@ -3186,7 +3173,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
*/
srb->flag &= ~AUTO_REQSENSE;
srb->adapter_status = 0;
- srb->target_status = CHECK_CONDITION << 1;
+ srb->target_status = SAM_STAT_CHECK_CONDITION;
if (debug_enabled(DBG_1)) {
switch (cmd->sense_buffer[2] & 0x0f) {
case NOT_READY:
@@ -3233,22 +3220,13 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
*((unsigned int *)(cmd->sense_buffer + 3)));
}
- if (status == (CHECK_CONDITION << 1)) {
- cmd->result = DID_BAD_TARGET << 16;
+ if (status == SAM_STAT_CHECK_CONDITION) {
+ set_host_byte(cmd, DID_BAD_TARGET);
goto ckc_e;
}
dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
- if (srb->total_xfer_length
- && srb->total_xfer_length >= cmd->underflow)
- cmd->result =
- MK_RES_LNX(DRIVER_SENSE, DID_OK,
- srb->end_message, CHECK_CONDITION);
- /*SET_RES_DID(cmd->result,DID_OK) */
- else
- cmd->result =
- MK_RES_LNX(DRIVER_SENSE, DID_OK,
- srb->end_message, CHECK_CONDITION);
+ set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
goto ckc_e;
}
@@ -3258,10 +3236,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
/*
* target status..........................
*/
- if (status >> 1 == CHECK_CONDITION) {
+ if (status == SAM_STAT_CHECK_CONDITION) {
request_sense(acb, dcb, srb);
return;
- } else if (status >> 1 == QUEUE_FULL) {
+ } else if (status == SAM_STAT_TASK_SET_FULL) {
tempcnt = (u8)list_size(&dcb->srb_going_list);
dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
dcb->target_id, dcb->target_lun, tempcnt);
@@ -3277,13 +3255,11 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
} else if (status == SCSI_STAT_SEL_TIMEOUT) {
srb->adapter_status = H_SEL_TIMEOUT;
srb->target_status = 0;
- cmd->result = DID_NO_CONNECT << 16;
+ set_host_byte(cmd, DID_NO_CONNECT);
} else {
srb->adapter_status = 0;
- SET_RES_DID(cmd->result, DID_ERROR);
- SET_RES_MSG(cmd->result, srb->end_message);
- SET_RES_TARGET(cmd->result, status);
-
+ set_host_byte(cmd, DID_ERROR);
+ set_status_byte(cmd, status);
}
} else {
/*
@@ -3292,16 +3268,13 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
status = srb->adapter_status;
if (status & H_OVER_UNDER_RUN) {
srb->target_status = 0;
- SET_RES_DID(cmd->result, DID_OK);
- SET_RES_MSG(cmd->result, srb->end_message);
+ scsi_msg_to_host_byte(cmd, srb->end_message);
} else if (srb->status & PARITY_ERROR) {
- SET_RES_DID(cmd->result, DID_PARITY);
- SET_RES_MSG(cmd->result, srb->end_message);
+ set_host_byte(cmd, DID_PARITY);
} else { /* No error */
srb->adapter_status = 0;
srb->target_status = 0;
- SET_RES_DID(cmd->result, DID_OK);
}
}
@@ -3322,15 +3295,15 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
ptr = (struct ScsiInqData *)(base + offset);
- if (!ckc_only && (cmd->result & RES_DID) == 0
+ if (!ckc_only && get_host_byte(cmd) == DID_OK
&& cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
&& dir != DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
dcb->inquiry7 = ptr->Flags;
/*if( srb->cmd->cmnd[0] == INQUIRY && */
/* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
- if ((cmd->result == (DID_OK << 16) ||
- status_byte(cmd->result) == CHECK_CONDITION)) {
+ if ((get_host_byte(cmd) == DID_OK) ||
+ (get_status_byte(cmd) == SAM_STAT_CHECK_CONDITION)) {
if (!dcb->init_tcq_flag) {
add_dev(acb, dcb, ptr);
dcb->init_tcq_flag = 1;
@@ -3357,7 +3330,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
if (srb != acb->tmp_srb) {
/* Add to free list */
dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
- cmd, cmd->result);
+ cmd, cmd->result);
list_move_tail(&srb->list, &acb->srb_free_list);
} else {
dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
@@ -3381,16 +3354,14 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
struct scsi_cmnd *p;
list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
- int result;
-
p = srb->cmd;
- result = MK_RES(0, did_flag, 0, 0);
printk("G:%p(%02i-%i) ", p,
p->device->id, (u8)p->device->lun);
list_del(&srb->list);
free_tag(dcb, srb);
list_add_tail(&srb->list, &acb->srb_free_list);
- p->result = result;
+ set_host_byte(p, did_flag);
+ set_status_byte(p, SAM_STAT_GOOD);
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
if (force) {
@@ -3411,14 +3382,13 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
/* Waiting queue */
list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
- int result;
p = srb->cmd;
- result = MK_RES(0, did_flag, 0, 0);
printk("W:%p<%02i-%i>", p, p->device->id,
(u8)p->device->lun);
list_move_tail(&srb->list, &acb->srb_free_list);
- p->result = result;
+ set_host_byte(p, did_flag);
+ set_status_byte(p, SAM_STAT_GOOD);
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
if (force) {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index efa8c0381476..37d06f993b76 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -88,6 +88,7 @@ struct alua_dh_data {
struct scsi_device *sdev;
int init_error;
struct mutex init_mutex;
+ bool disabled;
};
struct alua_queue_data {
@@ -517,7 +518,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
int len, k, off, bufflen = ALUA_RTPG_SIZE;
int group_id_old, state_old, pref_old, valid_states_old;
unsigned char *desc, *buff;
- unsigned err, retval;
+ unsigned err;
+ int retval;
unsigned int tpg_desc_tbl_off;
unsigned char orig_transition_tmo;
unsigned long flags;
@@ -562,13 +564,15 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
kfree(buff);
return SCSI_DH_OK;
}
- if (!scsi_sense_valid(&sense_hdr)) {
+ if (retval < 0 || !scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: rtpg failed, result %d\n",
ALUA_DH_NAME, retval);
kfree(buff);
- if (driver_byte(retval) == DRIVER_ERROR)
+ if (retval < 0)
return SCSI_DH_DEV_TEMP_BUSY;
+ if (host_byte(retval) == DID_NO_CONNECT)
+ return SCSI_DH_RES_TEMP_UNAVAIL;
return SCSI_DH_IO;
}
@@ -791,11 +795,11 @@ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
if (retval) {
- if (!scsi_sense_valid(&sense_hdr)) {
+ if (retval < 0 || !scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: stpg failed, result %d",
ALUA_DH_NAME, retval);
- if (driver_byte(retval) == DRIVER_ERROR)
+ if (retval < 0)
return SCSI_DH_DEV_TEMP_BUSY;
} else {
sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n",
@@ -807,6 +811,51 @@ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
return SCSI_DH_RETRY;
}
+static bool alua_rtpg_select_sdev(struct alua_port_group *pg)
+{
+ struct alua_dh_data *h;
+ struct scsi_device *sdev = NULL;
+
+ lockdep_assert_held(&pg->lock);
+ if (WARN_ON(!pg->rtpg_sdev))
+ return false;
+
+ /*
+ * RCU protection isn't necessary for dh_list here
+ * as we hold pg->lock, but for access to h->pg.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &pg->dh_list, node) {
+ if (!h->sdev)
+ continue;
+ if (h->sdev == pg->rtpg_sdev) {
+ h->disabled = true;
+ continue;
+ }
+ if (rcu_dereference(h->pg) == pg &&
+ !h->disabled &&
+ !scsi_device_get(h->sdev)) {
+ sdev = h->sdev;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (!sdev) {
+ pr_warn("%s: no device found for rtpg\n",
+ (pg->device_id_len ?
+ (char *)pg->device_id_str : "(nameless PG)"));
+ return false;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "rtpg retry on different device\n");
+
+ scsi_device_put(pg->rtpg_sdev);
+ pg->rtpg_sdev = sdev;
+
+ return true;
+}
+
static void alua_rtpg_work(struct work_struct *work)
{
struct alua_port_group *pg =
@@ -815,6 +864,7 @@ static void alua_rtpg_work(struct work_struct *work)
LIST_HEAD(qdata_list);
int err = SCSI_DH_OK;
struct alua_queue_data *qdata, *tmp;
+ struct alua_dh_data *h;
unsigned long flags;
spin_lock_irqsave(&pg->lock, flags);
@@ -848,9 +898,18 @@ static void alua_rtpg_work(struct work_struct *work)
}
err = alua_rtpg(sdev, pg);
spin_lock_irqsave(&pg->lock, flags);
- if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
+
+ /* If RTPG failed on the current device, try using another */
+ if (err == SCSI_DH_RES_TEMP_UNAVAIL &&
+ alua_rtpg_select_sdev(pg))
+ err = SCSI_DH_IMM_RETRY;
+
+ if (err == SCSI_DH_RETRY || err == SCSI_DH_IMM_RETRY ||
+ pg->flags & ALUA_PG_RUN_RTPG) {
pg->flags &= ~ALUA_PG_RUNNING;
- if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
+ if (err == SCSI_DH_IMM_RETRY)
+ pg->interval = 0;
+ else if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
pg->interval = ALUA_RTPG_RETRY_DELAY;
pg->flags |= ALUA_PG_RUN_RTPG;
spin_unlock_irqrestore(&pg->lock, flags);
@@ -878,6 +937,12 @@ static void alua_rtpg_work(struct work_struct *work)
}
list_splice_init(&pg->rtpg_list, &qdata_list);
+ /*
+ * We went through an RTPG, for good or bad.
+ * Re-enable all devices for the next attempt.
+ */
+ list_for_each_entry(h, &pg->dh_list, node)
+ h->disabled = false;
pg->rtpg_sdev = NULL;
spin_unlock_irqrestore(&pg->lock, flags);
@@ -962,6 +1027,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
int err = SCSI_DH_DEV_UNSUPP, tpgs;
mutex_lock(&h->init_mutex);
+ h->disabled = false;
tpgs = alua_check_tpgs(sdev);
if (tpgs != TPGS_MODE_NONE)
err = alua_check_vpd(sdev, h, tpgs);
@@ -1080,7 +1146,6 @@ static void alua_check(struct scsi_device *sdev, bool force)
return;
}
rcu_read_unlock();
-
alua_rtpg_queue(pg, sdev, NULL, force);
kref_put(&pg->kref, release_port_group);
}
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 25f6e1ac9e7b..66652ab409cc 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
else {
- list_add_rcu(&h->node, &h->ctlr->dh_list);
h->sdev = sdev;
+ list_add_rcu(&h->node, &h->ctlr->dh_list);
}
spin_unlock(&list_lock);
err = SCSI_DH_OK;
@@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
spin_lock(&list_lock);
if (h->ctlr) {
list_del_rcu(&h->node);
- h->sdev = NULL;
kref_put(&h->ctlr->kref, release_controller);
}
spin_unlock(&list_lock);
sdev->handler_data = NULL;
+ synchronize_rcu();
kfree(h);
}
diff --git a/drivers/scsi/elx/Kconfig b/drivers/scsi/elx/Kconfig
new file mode 100644
index 000000000000..831daea7a951
--- /dev/null
+++ b/drivers/scsi/elx/Kconfig
@@ -0,0 +1,9 @@
+config SCSI_EFCT
+ tristate "Emulex Fibre Channel Target"
+ depends on PCI && SCSI
+ depends on TARGET_CORE
+ depends on SCSI_FC_ATTRS
+ select CRC_T10DIF
+ help
+ The efct driver provides enhanced SCSI Target Mode
+ support for specific SLI-4 adapters.
diff --git a/drivers/scsi/elx/Makefile b/drivers/scsi/elx/Makefile
new file mode 100644
index 000000000000..a8537d7a2a6e
--- /dev/null
+++ b/drivers/scsi/elx/Makefile
@@ -0,0 +1,18 @@
+#// SPDX-License-Identifier: GPL-2.0
+#/*
+# * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+# * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+# */
+
+
+obj-$(CONFIG_SCSI_EFCT) := efct.o
+
+efct-objs := efct/efct_driver.o efct/efct_io.o efct/efct_scsi.o \
+ efct/efct_xport.o efct/efct_hw.o efct/efct_hw_queues.o \
+ efct/efct_lio.o efct/efct_unsol.o
+
+efct-objs += libefc/efc_cmds.o libefc/efc_domain.o libefc/efc_fabric.o \
+ libefc/efc_node.o libefc/efc_nport.o libefc/efc_device.o \
+ libefc/efclib.o libefc/efc_sm.o libefc/efc_els.o
+
+efct-objs += libefc_sli/sli4.o
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
new file mode 100644
index 000000000000..eab68fd9337a
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -0,0 +1,786 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+
+#include "efct_hw.h"
+#include "efct_unsol.h"
+#include "efct_scsi.h"
+
+LIST_HEAD(efct_devices);
+
+static int logmask;
+module_param(logmask, int, 0444);
+MODULE_PARM_DESC(logmask, "logging bitmask (default 0)");
+
+static struct libefc_function_template efct_libefc_templ = {
+ .issue_mbox_rqst = efct_issue_mbox_rqst,
+ .send_els = efct_els_hw_srrs_send,
+ .send_bls = efct_efc_bls_send,
+
+ .new_nport = efct_scsi_tgt_new_nport,
+ .del_nport = efct_scsi_tgt_del_nport,
+ .scsi_new_node = efct_scsi_new_initiator,
+ .scsi_del_node = efct_scsi_del_initiator,
+ .hw_seq_free = efct_efc_hw_sequence_free,
+};
+
+static int
+efct_device_init(void)
+{
+ int rc;
+
+ /* driver-wide init for target-server */
+ rc = efct_scsi_tgt_driver_init();
+ if (rc) {
+ pr_err("efct_scsi_tgt_init failed rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = efct_scsi_reg_fc_transport();
+ if (rc) {
+ pr_err("failed to register to FC host\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void
+efct_device_shutdown(void)
+{
+ efct_scsi_release_fc_transport();
+
+ efct_scsi_tgt_driver_exit();
+}
+
+static void *
+efct_device_alloc(u32 nid)
+{
+ struct efct *efct = NULL;
+
+ efct = kzalloc_node(sizeof(*efct), GFP_KERNEL, nid);
+ if (!efct)
+ return efct;
+
+ INIT_LIST_HEAD(&efct->list_entry);
+ list_add_tail(&efct->list_entry, &efct_devices);
+
+ return efct;
+}
+
+static void
+efct_teardown_msix(struct efct *efct)
+{
+ u32 i;
+
+ for (i = 0; i < efct->n_msix_vec; i++) {
+ free_irq(pci_irq_vector(efct->pci, i),
+ &efct->intr_context[i]);
+ }
+
+ pci_free_irq_vectors(efct->pci);
+}
+
+static int
+efct_efclib_config(struct efct *efct, struct libefc_function_template *tt)
+{
+ struct efc *efc;
+ struct sli4 *sli;
+ int rc = 0;
+
+ efc = kzalloc(sizeof(*efc), GFP_KERNEL);
+ if (!efc)
+ return -ENOMEM;
+
+ efct->efcport = efc;
+
+ memcpy(&efc->tt, tt, sizeof(*tt));
+ efc->base = efct;
+ efc->pci = efct->pci;
+
+ efc->def_wwnn = efct_get_wwnn(&efct->hw);
+ efc->def_wwpn = efct_get_wwpn(&efct->hw);
+ efc->enable_tgt = 1;
+ efc->log_level = EFC_LOG_LIB;
+
+ sli = &efct->hw.sli;
+ efc->max_xfer_size = sli->sge_supported_length *
+ sli_get_max_sgl(&efct->hw.sli);
+ efc->sli = sli;
+ efc->fcfi = efct->hw.fcf_indicator;
+
+ rc = efcport_init(efc);
+ if (rc)
+ efc_log_err(efc, "efcport_init failed\n");
+
+ return rc;
+}
+
+static int efct_request_firmware_update(struct efct *efct);
+
+static const char*
+efct_pci_model(u16 device)
+{
+ switch (device) {
+ case EFCT_DEVICE_LANCER_G6: return "LPE31004";
+ case EFCT_DEVICE_LANCER_G7: return "LPE36000";
+ default: return "unknown";
+ }
+}
+
+static int
+efct_device_attach(struct efct *efct)
+{
+ u32 rc = 0, i = 0;
+
+ if (efct->attached) {
+ efc_log_err(efct, "Device is already attached\n");
+ return -EIO;
+ }
+
+ snprintf(efct->name, sizeof(efct->name), "[%s%d] ", "fc",
+ efct->instance_index);
+
+ efct->logmask = logmask;
+ efct->filter_def = EFCT_DEFAULT_FILTER;
+ efct->max_isr_time_msec = EFCT_OS_MAX_ISR_TIME_MSEC;
+
+ efct->model = efct_pci_model(efct->pci->device);
+
+ efct->efct_req_fw_upgrade = true;
+
+ /* Allocate transport object and bring online */
+ efct->xport = efct_xport_alloc(efct);
+ if (!efct->xport) {
+ efc_log_err(efct, "failed to allocate transport object\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = efct_xport_attach(efct->xport);
+ if (rc) {
+ efc_log_err(efct, "failed to attach transport object\n");
+ goto xport_out;
+ }
+
+ rc = efct_xport_initialize(efct->xport);
+ if (rc) {
+ efc_log_err(efct, "failed to initialize transport object\n");
+ goto xport_out;
+ }
+
+ rc = efct_efclib_config(efct, &efct_libefc_templ);
+ if (rc) {
+ efc_log_err(efct, "failed to init efclib\n");
+ goto efclib_out;
+ }
+
+ for (i = 0; i < efct->n_msix_vec; i++) {
+ efc_log_debug(efct, "irq %d enabled\n", i);
+ enable_irq(pci_irq_vector(efct->pci, i));
+ }
+
+ efct->attached = true;
+
+ if (efct->efct_req_fw_upgrade)
+ efct_request_firmware_update(efct);
+
+ return rc;
+
+efclib_out:
+ efct_xport_detach(efct->xport);
+xport_out:
+ efct_xport_free(efct->xport);
+ efct->xport = NULL;
+out:
+ return rc;
+}
+
+static int
+efct_device_detach(struct efct *efct)
+{
+ int i;
+
+ if (!efct || !efct->attached) {
+ pr_err("Device is not attached\n");
+ return -EIO;
+ }
+
+ if (efct_xport_control(efct->xport, EFCT_XPORT_SHUTDOWN))
+ efc_log_err(efct, "Transport Shutdown timed out\n");
+
+ for (i = 0; i < efct->n_msix_vec; i++)
+ disable_irq(pci_irq_vector(efct->pci, i));
+
+ efct_xport_detach(efct->xport);
+
+ efct_xport_free(efct->xport);
+ efct->xport = NULL;
+
+ efcport_destroy(efct->efcport);
+ kfree(efct->efcport);
+
+ efct->attached = false;
+
+ return 0;
+}
+
+static void
+efct_fw_write_cb(int status, u32 actual_write_length,
+ u32 change_status, void *arg)
+{
+ struct efct_fw_write_result *result = arg;
+
+ result->status = status;
+ result->actual_xfer = actual_write_length;
+ result->change_status = change_status;
+
+ complete(&result->done);
+}
+
+static int
+efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
+ u8 *change_status)
+{
+ int rc = 0;
+ u32 bytes_left;
+ u32 xfer_size;
+ u32 offset;
+ struct efc_dma dma;
+ int last = 0;
+ struct efct_fw_write_result result;
+
+ init_completion(&result.done);
+
+ bytes_left = buf_len;
+ offset = 0;
+
+ dma.size = FW_WRITE_BUFSIZE;
+ dma.virt = dma_alloc_coherent(&efct->pci->dev,
+ dma.size, &dma.phys, GFP_DMA);
+ if (!dma.virt)
+ return -ENOMEM;
+
+ while (bytes_left > 0) {
+ if (bytes_left > FW_WRITE_BUFSIZE)
+ xfer_size = FW_WRITE_BUFSIZE;
+ else
+ xfer_size = bytes_left;
+
+ memcpy(dma.virt, buf + offset, xfer_size);
+
+ if (bytes_left == xfer_size)
+ last = 1;
+
+ efct_hw_firmware_write(&efct->hw, &dma, xfer_size, offset,
+ last, efct_fw_write_cb, &result);
+
+ if (wait_for_completion_interruptible(&result.done) != 0) {
+ rc = -ENXIO;
+ break;
+ }
+
+ if (result.actual_xfer == 0 || result.status != 0) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (last)
+ *change_status = result.change_status;
+
+ bytes_left -= result.actual_xfer;
+ offset += result.actual_xfer;
+ }
+
+ dma_free_coherent(&efct->pci->dev, dma.size, dma.virt, dma.phys);
+ return rc;
+}
+
+static int
+efct_fw_reset(struct efct *efct)
+{
+ /*
+ * Firmware reset to activate the new firmware.
+ * Function 0 will update and load the new firmware
+ * during attach.
+ */
+ if (timer_pending(&efct->xport->stats_timer))
+ del_timer(&efct->xport->stats_timer);
+
+ if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) {
+ efc_log_info(efct, "failed to reset firmware\n");
+ return -EIO;
+ }
+
+ efc_log_info(efct, "successfully reset firmware.Now resetting port\n");
+
+ efct_device_detach(efct);
+ return efct_device_attach(efct);
+}
+
+static int
+efct_request_firmware_update(struct efct *efct)
+{
+ int rc = 0;
+ u8 file_name[256], fw_change_status = 0;
+ const struct firmware *fw;
+ struct efct_hw_grp_hdr *fw_image;
+
+ snprintf(file_name, 256, "%s.grp", efct->model);
+
+ rc = request_firmware(&fw, file_name, &efct->pci->dev);
+ if (rc) {
+ efc_log_debug(efct, "Firmware file(%s) not found.\n", file_name);
+ return rc;
+ }
+
+ fw_image = (struct efct_hw_grp_hdr *)fw->data;
+
+ if (!strncmp(efct->hw.sli.fw_name[0], fw_image->revision,
+ strnlen(fw_image->revision, 16))) {
+ efc_log_debug(efct,
+ "Skip update. Firmware is already up to date.\n");
+ goto exit;
+ }
+
+ efc_log_info(efct, "Firmware update is initiated. %s -> %s\n",
+ efct->hw.sli.fw_name[0], fw_image->revision);
+
+ rc = efct_firmware_write(efct, fw->data, fw->size, &fw_change_status);
+ if (rc) {
+ efc_log_err(efct, "Firmware update failed. rc = %d\n", rc);
+ goto exit;
+ }
+
+ efc_log_info(efct, "Firmware updated successfully\n");
+ switch (fw_change_status) {
+ case 0x00:
+ efc_log_info(efct, "New firmware is active.\n");
+ break;
+ case 0x01:
+ efc_log_info(efct,
+ "System reboot needed to activate the new firmware\n");
+ break;
+ case 0x02:
+ case 0x03:
+ efc_log_info(efct,
+ "firmware reset to activate the new firmware\n");
+ efct_fw_reset(efct);
+ break;
+ default:
+ efc_log_info(efct, "Unexpected value change_status:%d\n",
+ fw_change_status);
+ break;
+ }
+
+exit:
+ release_firmware(fw);
+
+ return rc;
+}
+
+static void
+efct_device_free(struct efct *efct)
+{
+ if (efct) {
+ list_del(&efct->list_entry);
+ kfree(efct);
+ }
+}
+
+static int
+efct_device_interrupts_required(struct efct *efct)
+{
+ int rc;
+
+ rc = efct_hw_setup(&efct->hw, efct, efct->pci);
+ if (rc < 0)
+ return rc;
+
+ return efct->hw.config.n_eq;
+}
+
+static irqreturn_t
+efct_intr_thread(int irq, void *handle)
+{
+ struct efct_intr_context *intr_ctx = handle;
+ struct efct *efct = intr_ctx->efct;
+
+ efct_hw_process(&efct->hw, intr_ctx->index, efct->max_isr_time_msec);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+efct_intr_msix(int irq, void *handle)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+static int
+efct_setup_msix(struct efct *efct, u32 num_intrs)
+{
+ int rc = 0, i;
+
+ if (!pci_find_capability(efct->pci, PCI_CAP_ID_MSIX)) {
+ dev_err(&efct->pci->dev,
+ "%s : MSI-X not available\n", __func__);
+ return -EIO;
+ }
+
+ efct->n_msix_vec = num_intrs;
+
+ rc = pci_alloc_irq_vectors(efct->pci, num_intrs, num_intrs,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+
+ if (rc < 0) {
+ dev_err(&efct->pci->dev, "Failed to alloc irq : %d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < num_intrs; i++) {
+ struct efct_intr_context *intr_ctx = NULL;
+
+ intr_ctx = &efct->intr_context[i];
+ intr_ctx->efct = efct;
+ intr_ctx->index = i;
+
+ rc = request_threaded_irq(pci_irq_vector(efct->pci, i),
+ efct_intr_msix, efct_intr_thread, 0,
+ EFCT_DRIVER_NAME, intr_ctx);
+ if (rc) {
+ dev_err(&efct->pci->dev,
+ "Failed to register %d vector: %d\n", i, rc);
+ goto out;
+ }
+ }
+
+ return rc;
+
+out:
+ while (--i >= 0)
+ free_irq(pci_irq_vector(efct->pci, i),
+ &efct->intr_context[i]);
+
+ pci_free_irq_vectors(efct->pci);
+ return rc;
+}
+
+static struct pci_device_id efct_pci_table[] = {
+ {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0},
+ {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0},
+ {} /* terminate list */
+};
+
+static int
+efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct efct *efct = NULL;
+ int rc;
+ u32 i, r;
+ int num_interrupts = 0;
+ int nid;
+
+ dev_info(&pdev->dev, "%s\n", EFCT_DRIVER_NAME);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc)
+ return rc;
+
+ pci_set_master(pdev);
+
+ rc = pci_set_mwi(pdev);
+ if (rc) {
+ dev_info(&pdev->dev, "pci_set_mwi returned %d\n", rc);
+ goto mwi_out;
+ }
+
+ rc = pci_request_regions(pdev, EFCT_DRIVER_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_request_regions failed %d\n", rc);
+ goto req_regions_out;
+ }
+
+ /* Fetch the Numa node id for this device */
+ nid = dev_to_node(&pdev->dev);
+ if (nid < 0) {
+ dev_err(&pdev->dev, "Warning Numa node ID is %d\n", nid);
+ nid = 0;
+ }
+
+ /* Allocate efct */
+ efct = efct_device_alloc(nid);
+ if (!efct) {
+ dev_err(&pdev->dev, "Failed to allocate efct\n");
+ rc = -ENOMEM;
+ goto alloc_out;
+ }
+
+ efct->pci = pdev;
+ efct->numa_node = nid;
+
+ /* Map all memory BARs */
+ for (i = 0, r = 0; i < EFCT_PCI_MAX_REGS; i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ efct->reg[r] = ioremap(pci_resource_start(pdev, i),
+ pci_resource_len(pdev, i));
+ r++;
+ }
+
+ /*
+ * If the 64-bit attribute is set, both this BAR and the
+ * next form the complete address. Skip processing the
+ * next BAR.
+ */
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM_64)
+ i++;
+ }
+
+ pci_set_drvdata(pdev, efct);
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 ||
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+ dev_warn(&pdev->dev, "trying DMA_BIT_MASK(32)\n");
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 ||
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
+ rc = -1;
+ goto dma_mask_out;
+ }
+ }
+
+ num_interrupts = efct_device_interrupts_required(efct);
+ if (num_interrupts < 0) {
+ efc_log_err(efct, "efct_device_interrupts_required failed\n");
+ rc = -1;
+ goto dma_mask_out;
+ }
+
+ /*
+ * Initialize MSIX interrupts, note,
+ * efct_setup_msix() enables the interrupt
+ */
+ rc = efct_setup_msix(efct, num_interrupts);
+ if (rc) {
+ dev_err(&pdev->dev, "Can't setup msix\n");
+ goto dma_mask_out;
+ }
+ /* Disable interrupt for now */
+ for (i = 0; i < efct->n_msix_vec; i++) {
+ efc_log_debug(efct, "irq %d disabled\n", i);
+ disable_irq(pci_irq_vector(efct->pci, i));
+ }
+
+ rc = efct_device_attach(efct);
+ if (rc)
+ goto attach_out;
+
+ return 0;
+
+attach_out:
+ efct_teardown_msix(efct);
+dma_mask_out:
+ pci_set_drvdata(pdev, NULL);
+
+ for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
+ if (efct->reg[i])
+ iounmap(efct->reg[i]);
+ }
+ efct_device_free(efct);
+alloc_out:
+ pci_release_regions(pdev);
+req_regions_out:
+ pci_clear_mwi(pdev);
+mwi_out:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void
+efct_pci_remove(struct pci_dev *pdev)
+{
+ struct efct *efct = pci_get_drvdata(pdev);
+ u32 i;
+
+ if (!efct)
+ return;
+
+ efct_device_detach(efct);
+
+ efct_teardown_msix(efct);
+
+ for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
+ if (efct->reg[i])
+ iounmap(efct->reg[i]);
+ }
+
+ pci_set_drvdata(pdev, NULL);
+
+ efct_device_free(efct);
+
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static void
+efct_device_prep_for_reset(struct efct *efct, struct pci_dev *pdev)
+{
+ if (efct) {
+ efc_log_debug(efct,
+ "PCI channel disable preparing for reset\n");
+ efct_device_detach(efct);
+ /* Disable interrupt and pci device */
+ efct_teardown_msix(efct);
+ }
+ pci_disable_device(pdev);
+}
+
+static void
+efct_device_prep_for_recover(struct efct *efct)
+{
+ if (efct) {
+ efc_log_debug(efct, "PCI channel preparing for recovery\n");
+ efct_hw_io_abort_all(&efct->hw);
+ }
+}
+
+/**
+ * efct_pci_io_error_detected - method for handling PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called by the PCI subsystem after a PCI bus error affecting
+ * this device has been detected. When this routine is invoked, it dispatches
+ * device error detected handling routine, which will perform the proper
+ * error detected operation.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+efct_pci_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct efct *efct = pci_get_drvdata(pdev);
+ pci_ers_result_t rc;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ efct_device_prep_for_recover(efct);
+ rc = PCI_ERS_RESULT_CAN_RECOVER;
+ break;
+ case pci_channel_io_frozen:
+ efct_device_prep_for_reset(efct, pdev);
+ rc = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ case pci_channel_io_perm_failure:
+ efct_device_detach(efct);
+ rc = PCI_ERS_RESULT_DISCONNECT;
+ break;
+ default:
+ efc_log_debug(efct, "Unknown PCI error state:0x%x\n", state);
+ efct_device_prep_for_reset(efct, pdev);
+ rc = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ }
+
+ return rc;
+}
+
+static pci_ers_result_t
+efct_pci_io_slot_reset(struct pci_dev *pdev)
+{
+ int rc;
+ struct efct *efct = pci_get_drvdata(pdev);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ efc_log_err(efct, "failed to enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+
+ pci_save_state(pdev);
+
+ pci_set_master(pdev);
+
+ rc = efct_setup_msix(efct, efct->n_msix_vec);
+ if (rc)
+ efc_log_err(efct, "rc %d returned, IRQ allocation failed\n",
+ rc);
+
+ /* Perform device reset */
+ efct_device_detach(efct);
+ /* Bring device to online*/
+ efct_device_attach(efct);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void
+efct_pci_io_resume(struct pci_dev *pdev)
+{
+ struct efct *efct = pci_get_drvdata(pdev);
+
+ /* Perform device reset */
+ efct_device_detach(efct);
+ /* Bring device to online*/
+ efct_device_attach(efct);
+}
+
+MODULE_DEVICE_TABLE(pci, efct_pci_table);
+
+static struct pci_error_handlers efct_pci_err_handler = {
+ .error_detected = efct_pci_io_error_detected,
+ .slot_reset = efct_pci_io_slot_reset,
+ .resume = efct_pci_io_resume,
+};
+
+static struct pci_driver efct_pci_driver = {
+ .name = EFCT_DRIVER_NAME,
+ .id_table = efct_pci_table,
+ .probe = efct_pci_probe,
+ .remove = efct_pci_remove,
+ .err_handler = &efct_pci_err_handler,
+};
+
+static
+int __init efct_init(void)
+{
+ int rc;
+
+ rc = efct_device_init();
+ if (rc) {
+ pr_err("efct_device_init failed rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = pci_register_driver(&efct_pci_driver);
+ if (rc) {
+ pr_err("pci_register_driver failed rc=%d\n", rc);
+ efct_device_shutdown();
+ }
+
+ return rc;
+}
+
+static void __exit efct_exit(void)
+{
+ pci_unregister_driver(&efct_pci_driver);
+ efct_device_shutdown();
+}
+
+module_init(efct_init);
+module_exit(efct_exit);
+MODULE_VERSION(EFCT_DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom");
diff --git a/drivers/scsi/elx/efct/efct_driver.h b/drivers/scsi/elx/efct/efct_driver.h
new file mode 100644
index 000000000000..dab8eac4f243
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_driver.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_DRIVER_H__)
+#define __EFCT_DRIVER_H__
+
+/***************************************************************************
+ * OS specific includes
+ */
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/firmware.h>
+#include "../include/efc_common.h"
+#include "../libefc/efclib.h"
+#include "efct_hw.h"
+#include "efct_io.h"
+#include "efct_xport.h"
+
+#define EFCT_DRIVER_NAME "efct"
+#define EFCT_DRIVER_VERSION "1.0.0.0"
+
+/* EFCT_DEFAULT_FILTER-
+ * MRQ filter to segregate the IO flow.
+ */
+#define EFCT_DEFAULT_FILTER "0x01ff22ff,0,0,0"
+
+/* EFCT_OS_MAX_ISR_TIME_MSEC -
+ * maximum time driver code should spend in an interrupt
+ * or kernel thread context without yielding
+ */
+#define EFCT_OS_MAX_ISR_TIME_MSEC 1000
+
+#define EFCT_FC_MAX_SGL 64
+#define EFCT_FC_DIF_SEED 0
+
+/* Watermark */
+#define EFCT_WATERMARK_HIGH_PCT 90
+#define EFCT_WATERMARK_LOW_PCT 80
+#define EFCT_IO_WATERMARK_PER_INITIATOR 8
+
+#define EFCT_PCI_MAX_REGS 6
+#define MAX_PCI_INTERRUPTS 16
+
+struct efct_intr_context {
+ struct efct *efct;
+ u32 index;
+};
+
+struct efct {
+ struct pci_dev *pci;
+ void __iomem *reg[EFCT_PCI_MAX_REGS];
+
+ u32 n_msix_vec;
+ bool attached;
+ bool soft_wwn_enable;
+ u8 efct_req_fw_upgrade;
+ struct efct_intr_context intr_context[MAX_PCI_INTERRUPTS];
+ u32 numa_node;
+
+ char name[EFC_NAME_LENGTH];
+ u32 instance_index;
+ struct list_head list_entry;
+ struct efct_scsi_tgt tgt_efct;
+ struct efct_xport *xport;
+ struct efc *efcport;
+ struct Scsi_Host *shost;
+ int logmask;
+ u32 max_isr_time_msec;
+
+ const char *desc;
+
+ const char *model;
+
+ struct efct_hw hw;
+
+ u32 rq_selection_policy;
+ char *filter_def;
+ int topology;
+
+ /* Look up for target node */
+ struct xarray lookup;
+
+ /*
+ * Target IO timer value:
+ * Zero: target command timeout disabled.
+ * Non-zero: Timeout value, in seconds, for target commands
+ */
+ u32 target_io_timer_sec;
+
+ int speed;
+ struct dentry *sess_debugfs_dir;
+};
+
+#define FW_WRITE_BUFSIZE (64 * 1024)
+
+struct efct_fw_write_result {
+ struct completion done;
+ int status;
+ u32 actual_xfer;
+ u32 change_status;
+};
+
+extern struct list_head efct_devices;
+
+#endif /* __EFCT_DRIVER_H__ */
diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
new file mode 100644
index 000000000000..ba8256b4c782
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -0,0 +1,3581 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+#include "efct_unsol.h"
+
+struct efct_hw_link_stat_cb_arg {
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters, void *arg);
+ void *arg;
+};
+
+struct efct_hw_host_stat_cb_arg {
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters, void *arg);
+ void *arg;
+};
+
+struct efct_hw_fw_wr_cb_arg {
+ void (*cb)(int status, u32 bytes_written, u32 change_status, void *arg);
+ void *arg;
+};
+
+struct efct_mbox_rqst_ctx {
+ int (*callback)(struct efc *efc, int status, u8 *mqe, void *arg);
+ void *arg;
+};
+
+static int
+efct_hw_link_event_init(struct efct_hw *hw)
+{
+ hw->link.status = SLI4_LINK_STATUS_MAX;
+ hw->link.topology = SLI4_LINK_TOPO_NONE;
+ hw->link.medium = SLI4_LINK_MEDIUM_MAX;
+ hw->link.speed = 0;
+ hw->link.loop_map = NULL;
+ hw->link.fc_id = U32_MAX;
+
+ return 0;
+}
+
+static int
+efct_hw_read_max_dump_size(struct efct_hw *hw)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ struct efct *efct = hw->os;
+ int rc = 0;
+ struct sli4_rsp_cmn_set_dump_location *rsp;
+
+ /* attempt to detemine the dump size for function 0 only. */
+ if (PCI_FUNC(efct->pci->devfn) != 0)
+ return rc;
+
+ if (sli_cmd_common_set_dump_location(&hw->sli, buf, 1, 0, NULL, 0))
+ return -EIO;
+
+ rsp = (struct sli4_rsp_cmn_set_dump_location *)
+ (buf + offsetof(struct sli4_cmd_sli_config, payload.embed));
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+ if (rc != 0) {
+ efc_log_debug(hw->os, "set dump location cmd failed\n");
+ return rc;
+ }
+
+ hw->dump_size =
+ le32_to_cpu(rsp->buffer_length_dword) & SLI4_CMN_SET_DUMP_BUFFER_LEN;
+
+ efc_log_debug(hw->os, "Dump size %x\n", hw->dump_size);
+
+ return rc;
+}
+
+static int
+__efct_read_topology_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct sli4_cmd_read_topology *read_topo =
+ (struct sli4_cmd_read_topology *)mqe;
+ u8 speed;
+ struct efc_domain_record drec = {0};
+ struct efct *efct = hw->os;
+
+ if (status || le16_to_cpu(read_topo->hdr.status)) {
+ efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
+ le16_to_cpu(read_topo->hdr.status));
+ return -EIO;
+ }
+
+ switch (le32_to_cpu(read_topo->dw2_attentype) &
+ SLI4_READTOPO_ATTEN_TYPE) {
+ case SLI4_READ_TOPOLOGY_LINK_UP:
+ hw->link.status = SLI4_LINK_STATUS_UP;
+ break;
+ case SLI4_READ_TOPOLOGY_LINK_DOWN:
+ hw->link.status = SLI4_LINK_STATUS_DOWN;
+ break;
+ case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
+ hw->link.status = SLI4_LINK_STATUS_NO_ALPA;
+ break;
+ default:
+ hw->link.status = SLI4_LINK_STATUS_MAX;
+ break;
+ }
+
+ switch (read_topo->topology) {
+ case SLI4_READ_TOPO_NON_FC_AL:
+ hw->link.topology = SLI4_LINK_TOPO_NON_FC_AL;
+ break;
+ case SLI4_READ_TOPO_FC_AL:
+ hw->link.topology = SLI4_LINK_TOPO_FC_AL;
+ if (hw->link.status == SLI4_LINK_STATUS_UP)
+ hw->link.loop_map = hw->loop_map.virt;
+ hw->link.fc_id = read_topo->acquired_al_pa;
+ break;
+ default:
+ hw->link.topology = SLI4_LINK_TOPO_MAX;
+ break;
+ }
+
+ hw->link.medium = SLI4_LINK_MEDIUM_FC;
+
+ speed = (le32_to_cpu(read_topo->currlink_state) &
+ SLI4_READTOPO_LINKSTATE_SPEED) >> 8;
+ switch (speed) {
+ case SLI4_READ_TOPOLOGY_SPEED_1G:
+ hw->link.speed = 1 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_2G:
+ hw->link.speed = 2 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_4G:
+ hw->link.speed = 4 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_8G:
+ hw->link.speed = 8 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_16G:
+ hw->link.speed = 16 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_32G:
+ hw->link.speed = 32 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_64G:
+ hw->link.speed = 64 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_128G:
+ hw->link.speed = 128 * 1000;
+ break;
+ }
+
+ drec.speed = hw->link.speed;
+ drec.fc_id = hw->link.fc_id;
+ drec.is_nport = true;
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, &drec);
+
+ return 0;
+}
+
+static int
+efct_hw_cb_link(void *ctx, void *e)
+{
+ struct efct_hw *hw = ctx;
+ struct sli4_link_event *event = e;
+ struct efc_domain *d = NULL;
+ int rc = 0;
+ struct efct *efct = hw->os;
+
+ efct_hw_link_event_init(hw);
+
+ switch (event->status) {
+ case SLI4_LINK_STATUS_UP:
+
+ hw->link = *event;
+ efct->efcport->link_status = EFC_LINK_STATUS_UP;
+
+ if (event->topology == SLI4_LINK_TOPO_NON_FC_AL) {
+ struct efc_domain_record drec = {0};
+
+ efc_log_info(hw->os, "Link Up, NPORT, speed is %d\n",
+ event->speed);
+ drec.speed = event->speed;
+ drec.fc_id = event->fc_id;
+ drec.is_nport = true;
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND,
+ &drec);
+ } else if (event->topology == SLI4_LINK_TOPO_FC_AL) {
+ u8 buf[SLI4_BMBX_SIZE];
+
+ efc_log_info(hw->os, "Link Up, LOOP, speed is %d\n",
+ event->speed);
+
+ if (!sli_cmd_read_topology(&hw->sli, buf,
+ &hw->loop_map)) {
+ rc = efct_hw_command(hw, buf, EFCT_CMD_NOWAIT,
+ __efct_read_topology_cb, NULL);
+ }
+
+ if (rc)
+ efc_log_debug(hw->os, "READ_TOPOLOGY failed\n");
+ } else {
+ efc_log_info(hw->os, "%s(%#x), speed is %d\n",
+ "Link Up, unsupported topology ",
+ event->topology, event->speed);
+ }
+ break;
+ case SLI4_LINK_STATUS_DOWN:
+ efc_log_info(hw->os, "Link down\n");
+
+ hw->link.status = event->status;
+ efct->efcport->link_status = EFC_LINK_STATUS_DOWN;
+
+ d = efct->efcport->domain;
+ if (d)
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, d);
+ break;
+ default:
+ efc_log_debug(hw->os, "unhandled link status %#x\n",
+ event->status);
+ break;
+ }
+
+ return 0;
+}
+
+int
+efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev)
+{
+ u32 i, max_sgl, cpus;
+
+ if (hw->hw_setup_called)
+ return 0;
+
+ /*
+ * efct_hw_init() relies on NULL pointers indicating that a structure
+ * needs allocation. If a structure is non-NULL, efct_hw_init() won't
+ * free/realloc that memory
+ */
+ memset(hw, 0, sizeof(struct efct_hw));
+
+ hw->hw_setup_called = true;
+
+ hw->os = os;
+
+ mutex_init(&hw->bmbx_lock);
+ spin_lock_init(&hw->cmd_lock);
+ INIT_LIST_HEAD(&hw->cmd_head);
+ INIT_LIST_HEAD(&hw->cmd_pending);
+ hw->cmd_head_count = 0;
+
+ /* Create mailbox command ctx pool */
+ hw->cmd_ctx_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
+ sizeof(struct efct_command_ctx));
+ if (!hw->cmd_ctx_pool) {
+ efc_log_err(hw->os, "failed to allocate mailbox buffer pool\n");
+ return -EIO;
+ }
+
+ /* Create mailbox request ctx pool for library callback */
+ hw->mbox_rqst_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
+ sizeof(struct efct_mbox_rqst_ctx));
+ if (!hw->mbox_rqst_pool) {
+ efc_log_err(hw->os, "failed to allocate mbox request pool\n");
+ return -EIO;
+ }
+
+ spin_lock_init(&hw->io_lock);
+ INIT_LIST_HEAD(&hw->io_inuse);
+ INIT_LIST_HEAD(&hw->io_free);
+ INIT_LIST_HEAD(&hw->io_wait_free);
+
+ atomic_set(&hw->io_alloc_failed_count, 0);
+
+ hw->config.speed = SLI4_LINK_SPEED_AUTO_16_8_4;
+ if (sli_setup(&hw->sli, hw->os, pdev, ((struct efct *)os)->reg)) {
+ efc_log_err(hw->os, "SLI setup failed\n");
+ return -EIO;
+ }
+
+ efct_hw_link_event_init(hw);
+
+ sli_callback(&hw->sli, SLI4_CB_LINK, efct_hw_cb_link, hw);
+
+ /*
+ * Set all the queue sizes to the maximum allowed.
+ */
+ for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++)
+ hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i];
+ /*
+ * Adjust the size of the WQs so that the CQ is twice as big as
+ * the WQ to allow for 2 completions per IO. This allows us to
+ * handle multi-phase as well as aborts.
+ */
+ hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2;
+
+ /*
+ * The RQ assignment for RQ pair mode.
+ */
+
+ hw->config.rq_default_buffer_size = EFCT_HW_RQ_SIZE_PAYLOAD;
+ hw->config.n_io = hw->sli.ext[SLI4_RSRC_XRI].size;
+
+ cpus = num_possible_cpus();
+ hw->config.n_eq = cpus > EFCT_HW_MAX_NUM_EQ ? EFCT_HW_MAX_NUM_EQ : cpus;
+
+ max_sgl = sli_get_max_sgl(&hw->sli) - SLI4_SGE_MAX_RESERVED;
+ max_sgl = (max_sgl > EFCT_FC_MAX_SGL) ? EFCT_FC_MAX_SGL : max_sgl;
+ hw->config.n_sgl = max_sgl;
+
+ (void)efct_hw_read_max_dump_size(hw);
+
+ return 0;
+}
+
+static void
+efct_logfcfi(struct efct_hw *hw, u32 j, u32 i, u32 id)
+{
+ efc_log_info(hw->os,
+ "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
+ j, hw->config.filter_def[j], i, id);
+}
+
+static inline void
+efct_hw_init_free_io(struct efct_hw_io *io)
+{
+ /*
+ * Set io->done to NULL, to avoid any callbacks, should
+ * a completion be received for one of these IOs
+ */
+ io->done = NULL;
+ io->abort_done = NULL;
+ io->status_saved = false;
+ io->abort_in_progress = false;
+ io->type = 0xFFFF;
+ io->wq = NULL;
+}
+
+static bool efct_hw_iotype_is_originator(u16 io_type)
+{
+ switch (io_type) {
+ case EFCT_HW_FC_CT:
+ case EFCT_HW_ELS_REQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void
+efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ /* Restore the default */
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+}
+
+static void
+efct_hw_wq_process_io(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_io *io = arg;
+ struct efct_hw *hw = io->hw;
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+ u32 len = 0;
+ u32 ext = 0;
+
+ /* clear xbusy flag if WCQE[XB] is clear */
+ if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0)
+ io->xbusy = false;
+
+ /* get extended CQE status */
+ switch (io->type) {
+ case EFCT_HW_BLS_ACC:
+ case EFCT_HW_BLS_RJT:
+ break;
+ case EFCT_HW_ELS_REQ:
+ sli_fc_els_did(&hw->sli, cqe, &ext);
+ len = sli_fc_response_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_ELS_RSP:
+ case EFCT_HW_FC_CT_RSP:
+ break;
+ case EFCT_HW_FC_CT:
+ len = sli_fc_response_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_IO_TARGET_WRITE:
+ len = sli_fc_io_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_IO_TARGET_READ:
+ len = sli_fc_io_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_IO_TARGET_RSP:
+ break;
+ case EFCT_HW_IO_DNRX_REQUEUE:
+ /* release the count for re-posting the buffer */
+ /* efct_hw_io_free(hw, io); */
+ break;
+ default:
+ efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n",
+ io->type, io->indicator);
+ break;
+ }
+ if (status) {
+ ext = sli_fc_ext_status(&hw->sli, cqe);
+ /*
+ * If we're not an originator IO, and XB is set, then issue
+ * abort for the IO from within the HW
+ */
+ if (efct_hw_iotype_is_originator(io->type) &&
+ wcqe->flags & SLI4_WCQE_XB) {
+ int rc;
+
+ efc_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
+ io->indicator, io->reqtag);
+
+ /*
+ * Because targets may send a response when the IO
+ * completes using the same XRI, we must wait for the
+ * XRI_ABORTED CQE to issue the IO callback
+ */
+ rc = efct_hw_io_abort(hw, io, false, NULL, NULL);
+ if (rc == 0) {
+ /*
+ * latch status to return after abort is
+ * complete
+ */
+ io->status_saved = true;
+ io->saved_status = status;
+ io->saved_ext = ext;
+ io->saved_len = len;
+ goto exit_efct_hw_wq_process_io;
+ } else if (rc == -EINPROGRESS) {
+ /*
+ * Already being aborted by someone else (ABTS
+ * perhaps). Just return original
+ * error.
+ */
+ efc_log_debug(hw->os, "%s%#x tag=%#x\n",
+ "abort in progress xri=",
+ io->indicator, io->reqtag);
+
+ } else {
+ /* Failed to abort for some other reason, log
+ * error
+ */
+ efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n",
+ "Failed to abort xri=",
+ io->indicator, io->reqtag, rc);
+ }
+ }
+ }
+
+ if (io->done) {
+ efct_hw_done_t done = io->done;
+
+ io->done = NULL;
+
+ if (io->status_saved) {
+ /* use latched status if exists */
+ status = io->saved_status;
+ len = io->saved_len;
+ ext = io->saved_ext;
+ io->status_saved = false;
+ }
+
+ /* Restore default SGL */
+ efct_hw_io_restore_sgl(hw, io);
+ done(io, len, status, ext, io->arg);
+ }
+
+exit_efct_hw_wq_process_io:
+ return;
+}
+
+static int
+efct_hw_setup_io(struct efct_hw *hw)
+{
+ u32 i = 0;
+ struct efct_hw_io *io = NULL;
+ uintptr_t xfer_virt = 0;
+ uintptr_t xfer_phys = 0;
+ u32 index;
+ bool new_alloc = true;
+ struct efc_dma *dma;
+ struct efct *efct = hw->os;
+
+ if (!hw->io) {
+ hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL);
+ if (!hw->io)
+ return -ENOMEM;
+
+ memset(hw->io, 0, hw->config.n_io * sizeof(io));
+
+ for (i = 0; i < hw->config.n_io; i++) {
+ hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!hw->io[i])
+ goto error;
+ }
+
+ /* Create WQE buffs for IO */
+ hw->wqe_buffs = kzalloc((hw->config.n_io * hw->sli.wqe_size),
+ GFP_KERNEL);
+ if (!hw->wqe_buffs) {
+ kfree(hw->io);
+ return -ENOMEM;
+ }
+
+ } else {
+ /* re-use existing IOs, including SGLs */
+ new_alloc = false;
+ }
+
+ if (new_alloc) {
+ dma = &hw->xfer_rdy;
+ dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
+ dma->virt = dma_alloc_coherent(&efct->pci->dev,
+ dma->size, &dma->phys, GFP_DMA);
+ if (!dma->virt)
+ return -ENOMEM;
+ }
+ xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
+ xfer_phys = hw->xfer_rdy.phys;
+
+ /* Initialize the pool of HW IO objects */
+ for (i = 0; i < hw->config.n_io; i++) {
+ struct hw_wq_callback *wqcb;
+
+ io = hw->io[i];
+
+ /* initialize IO fields */
+ io->hw = hw;
+
+ /* Assign a WQE buff */
+ io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size];
+
+ /* Allocate the request tag for this IO */
+ wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io);
+ if (!wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return -ENOSPC;
+ }
+ io->reqtag = wqcb->instance_index;
+
+ /* Now for the fields that are initialized on each free */
+ efct_hw_init_free_io(io);
+
+ /* The XB flag isn't cleared on IO free, so init to zero */
+ io->xbusy = 0;
+
+ if (sli_resource_alloc(&hw->sli, SLI4_RSRC_XRI,
+ &io->indicator, &index)) {
+ efc_log_err(hw->os,
+ "sli_resource_alloc failed @ %d\n", i);
+ return -ENOMEM;
+ }
+
+ if (new_alloc) {
+ dma = &io->def_sgl;
+ dma->size = hw->config.n_sgl *
+ sizeof(struct sli4_sge);
+ dma->virt = dma_alloc_coherent(&efct->pci->dev,
+ dma->size, &dma->phys,
+ GFP_DMA);
+ if (!dma->virt) {
+ efc_log_err(hw->os, "dma_alloc fail %d\n", i);
+ memset(&io->def_sgl, 0,
+ sizeof(struct efc_dma));
+ return -ENOMEM;
+ }
+ }
+ io->def_sgl_count = hw->config.n_sgl;
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+
+ if (hw->xfer_rdy.size) {
+ io->xfer_rdy.virt = (void *)xfer_virt;
+ io->xfer_rdy.phys = xfer_phys;
+ io->xfer_rdy.size = sizeof(struct fcp_txrdy);
+
+ xfer_virt += sizeof(struct fcp_txrdy);
+ xfer_phys += sizeof(struct fcp_txrdy);
+ }
+ }
+
+ return 0;
+error:
+ for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
+ kfree(hw->io[i]);
+ hw->io[i] = NULL;
+ }
+
+ kfree(hw->io);
+ hw->io = NULL;
+
+ return -ENOMEM;
+}
+
+static int
+efct_hw_init_prereg_io(struct efct_hw *hw)
+{
+ u32 i, idx = 0;
+ struct efct_hw_io *io = NULL;
+ u8 cmd[SLI4_BMBX_SIZE];
+ int rc = 0;
+ u32 n_rem;
+ u32 n = 0;
+ u32 sgls_per_request = 256;
+ struct efc_dma **sgls = NULL;
+ struct efc_dma req;
+ struct efct *efct = hw->os;
+
+ sgls = kmalloc_array(sgls_per_request, sizeof(*sgls), GFP_KERNEL);
+ if (!sgls)
+ return -ENOMEM;
+
+ memset(&req, 0, sizeof(struct efc_dma));
+ req.size = 32 + sgls_per_request * 16;
+ req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
+ GFP_DMA);
+ if (!req.virt) {
+ kfree(sgls);
+ return -ENOMEM;
+ }
+
+ for (n_rem = hw->config.n_io; n_rem; n_rem -= n) {
+ /* Copy address of SGL's into local sgls[] array, break
+ * out if the xri is not contiguous.
+ */
+ u32 min = (sgls_per_request < n_rem) ? sgls_per_request : n_rem;
+
+ for (n = 0; n < min; n++) {
+ /* Check that we have contiguous xri values */
+ if (n > 0) {
+ if (hw->io[idx + n]->indicator !=
+ hw->io[idx + n - 1]->indicator + 1)
+ break;
+ }
+
+ sgls[n] = hw->io[idx + n]->sgl;
+ }
+
+ if (sli_cmd_post_sgl_pages(&hw->sli, cmd,
+ hw->io[idx]->indicator, n, sgls, NULL, &req)) {
+ rc = -EIO;
+ break;
+ }
+
+ rc = efct_hw_command(hw, cmd, EFCT_CMD_POLL, NULL, NULL);
+ if (rc) {
+ efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc);
+ break;
+ }
+
+ /* Add to tail if successful */
+ for (i = 0; i < n; i++, idx++) {
+ io = hw->io[idx];
+ io->state = EFCT_HW_IO_STATE_FREE;
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ }
+ }
+
+ dma_free_coherent(&efct->pci->dev, req.size, req.virt, req.phys);
+ memset(&req, 0, sizeof(struct efc_dma));
+ kfree(sgls);
+
+ return rc;
+}
+
+static int
+efct_hw_init_io(struct efct_hw *hw)
+{
+ u32 i, idx = 0;
+ bool prereg = false;
+ struct efct_hw_io *io = NULL;
+ int rc = 0;
+
+ prereg = hw->sli.params.sgl_pre_registered;
+
+ if (prereg)
+ return efct_hw_init_prereg_io(hw);
+
+ for (i = 0; i < hw->config.n_io; i++, idx++) {
+ io = hw->io[idx];
+ io->state = EFCT_HW_IO_STATE_FREE;
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ }
+
+ return rc;
+}
+
+static int
+efct_hw_config_set_fdt_xfer_hint(struct efct_hw *hw, u32 fdt_xfer_hint)
+{
+ int rc = 0;
+ u8 buf[SLI4_BMBX_SIZE];
+ struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint param;
+
+ memset(&param, 0, sizeof(param));
+ param.fdt_xfer_hint = cpu_to_le32(fdt_xfer_hint);
+ /* build the set_features command */
+ sli_cmd_common_set_features(&hw->sli, buf,
+ SLI4_SET_FEATURES_SET_FTD_XFER_HINT, sizeof(param), &param);
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+ if (rc)
+ efc_log_warn(hw->os, "set FDT hint %d failed: %d\n",
+ fdt_xfer_hint, rc);
+ else
+ efc_log_info(hw->os, "Set FTD transfer hint to %d\n",
+ le32_to_cpu(param.fdt_xfer_hint));
+
+ return rc;
+}
+
+static int
+efct_hw_config_rq(struct efct_hw *hw)
+{
+ u32 min_rq_count, i, rc;
+ struct sli4_cmd_rq_cfg rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+ u8 buf[SLI4_BMBX_SIZE];
+
+ efc_log_info(hw->os, "using REG_FCFI standard\n");
+
+ /*
+ * Set the filter match/mask values from hw's
+ * filter_def values
+ */
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ rq_cfg[i].rq_id = cpu_to_le16(0xffff);
+ rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i];
+ rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8);
+ rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16);
+ rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24);
+ }
+
+ /*
+ * Update the rq_id's of the FCF configuration
+ * (don't update more than the number of rq_cfg
+ * elements)
+ */
+ min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG) ?
+ hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG;
+ for (i = 0; i < min_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+ u32 j;
+
+ for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
+ u32 mask = (rq->filter_mask != 0) ?
+ rq->filter_mask : 1;
+
+ if (!(mask & (1U << j)))
+ continue;
+
+ rq_cfg[i].rq_id = cpu_to_le16(rq->hdr->id);
+ efct_logfcfi(hw, j, i, rq->hdr->id);
+ }
+ }
+
+ rc = -EIO;
+ if (!sli_cmd_reg_fcfi(&hw->sli, buf, 0, rq_cfg))
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+
+ if (rc != 0) {
+ efc_log_err(hw->os, "FCFI registration failed\n");
+ return rc;
+ }
+ hw->fcf_indicator =
+ le16_to_cpu(((struct sli4_cmd_reg_fcfi *)buf)->fcfi);
+
+ return rc;
+}
+
+static int
+efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index)
+{
+ u8 buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
+ struct hw_rq *rq;
+ struct sli4_cmd_reg_fcfi_mrq *rsp = NULL;
+ struct sli4_cmd_rq_cfg rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
+ u32 rc, i;
+
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
+ goto issue_cmd;
+
+ /* Set the filter match/mask values from hw's filter_def values */
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ rq_filter[i].rq_id = cpu_to_le16(0xffff);
+ rq_filter[i].type_mask = (u8)hw->config.filter_def[i];
+ rq_filter[i].type_match = (u8)(hw->config.filter_def[i] >> 8);
+ rq_filter[i].r_ctl_mask = (u8)(hw->config.filter_def[i] >> 16);
+ rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 24);
+ }
+
+ rq = hw->hw_rq[0];
+ rq_filter[0].rq_id = cpu_to_le16(rq->hdr->id);
+ rq_filter[1].rq_id = cpu_to_le16(rq->hdr->id);
+
+ mrq_bitmask = 0x2;
+issue_cmd:
+ efc_log_debug(hw->os, "Issue reg_fcfi_mrq count:%d policy:%d mode:%d\n",
+ hw->hw_rq_count, hw->config.rq_selection_policy, mode);
+ /* Invoke REG_FCFI_MRQ */
+ rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, mode, fcf_index,
+ hw->config.rq_selection_policy, mrq_bitmask,
+ hw->hw_mrq_count, rq_filter);
+ if (rc) {
+ efc_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed\n");
+ return -EIO;
+ }
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+
+ rsp = (struct sli4_cmd_reg_fcfi_mrq *)buf;
+
+ if ((rc) || (le16_to_cpu(rsp->hdr.status))) {
+ efc_log_err(hw->os, "FCFI MRQ reg failed. cmd=%x status=%x\n",
+ rsp->hdr.command, le16_to_cpu(rsp->hdr.status));
+ return -EIO;
+ }
+
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
+ hw->fcf_indicator = le16_to_cpu(rsp->fcfi);
+
+ return 0;
+}
+
+static void
+efct_hw_queue_hash_add(struct efct_queue_hash *hash,
+ u16 id, u16 index)
+{
+ u32 hash_index = id & (EFCT_HW_Q_HASH_SIZE - 1);
+
+ /*
+ * Since the hash is always bigger than the number of queues, then we
+ * never have to worry about an infinite loop.
+ */
+ while (hash[hash_index].in_use)
+ hash_index = (hash_index + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
+
+ /* not used, claim the entry */
+ hash[hash_index].id = id;
+ hash[hash_index].in_use = true;
+ hash[hash_index].index = index;
+}
+
+static int
+efct_hw_config_sli_port_health_check(struct efct_hw *hw, u8 query, u8 enable)
+{
+ int rc = 0;
+ u8 buf[SLI4_BMBX_SIZE];
+ struct sli4_rqst_cmn_set_features_health_check param;
+ u32 health_check_flag = 0;
+
+ memset(&param, 0, sizeof(param));
+
+ if (enable)
+ health_check_flag |= SLI4_RQ_HEALTH_CHECK_ENABLE;
+
+ if (query)
+ health_check_flag |= SLI4_RQ_HEALTH_CHECK_QUERY;
+
+ param.health_check_dword = cpu_to_le32(health_check_flag);
+
+ /* build the set_features command */
+ sli_cmd_common_set_features(&hw->sli, buf,
+ SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, sizeof(param), &param);
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+ if (rc)
+ efc_log_err(hw->os, "efct_hw_command returns %d\n", rc);
+ else
+ efc_log_debug(hw->os, "SLI Port Health Check is enabled\n");
+
+ return rc;
+}
+
+int
+efct_hw_init(struct efct_hw *hw)
+{
+ int rc;
+ u32 i = 0;
+ int rem_count;
+ unsigned long flags = 0;
+ struct efct_hw_io *temp;
+ struct efc_dma *dma;
+
+ /*
+ * Make sure the command lists are empty. If this is start-of-day,
+ * they'll be empty since they were just initialized in efct_hw_setup.
+ * If we've just gone through a reset, the command and command pending
+ * lists should have been cleaned up as part of the reset
+ * (efct_hw_reset()).
+ */
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+ if (!list_empty(&hw->cmd_head)) {
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ efc_log_err(hw->os, "command found on cmd list\n");
+ return -EIO;
+ }
+ if (!list_empty(&hw->cmd_pending)) {
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ efc_log_err(hw->os, "command found on pending list\n");
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+
+ /* Free RQ buffers if prevously allocated */
+ efct_hw_rx_free(hw);
+
+ /*
+ * The IO queues must be initialized here for the reset case. The
+ * efct_hw_init_io() function will re-add the IOs to the free list.
+ * The cmd_head list should be OK since we free all entries in
+ * efct_hw_command_cancel() that is called in the efct_hw_reset().
+ */
+
+ /* If we are in this function due to a reset, there may be stale items
+ * on lists that need to be removed. Clean them up.
+ */
+ rem_count = 0;
+ while ((!list_empty(&hw->io_wait_free))) {
+ rem_count++;
+ temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io,
+ list_entry);
+ list_del_init(&temp->list_entry);
+ }
+ if (rem_count > 0)
+ efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n",
+ rem_count);
+
+ rem_count = 0;
+ while ((!list_empty(&hw->io_inuse))) {
+ rem_count++;
+ temp = list_first_entry(&hw->io_inuse, struct efct_hw_io,
+ list_entry);
+ list_del_init(&temp->list_entry);
+ }
+ if (rem_count > 0)
+ efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n",
+ rem_count);
+
+ rem_count = 0;
+ while ((!list_empty(&hw->io_free))) {
+ rem_count++;
+ temp = list_first_entry(&hw->io_free, struct efct_hw_io,
+ list_entry);
+ list_del_init(&temp->list_entry);
+ }
+ if (rem_count > 0)
+ efc_log_debug(hw->os, "rmvd %d items from io_free list\n",
+ rem_count);
+
+ /* If MRQ not required, Make sure we dont request feature. */
+ if (hw->config.n_rq == 1)
+ hw->sli.features &= (~SLI4_REQFEAT_MRQP);
+
+ if (sli_init(&hw->sli)) {
+ efc_log_err(hw->os, "SLI failed to initialize\n");
+ return -EIO;
+ }
+
+ if (hw->sliport_healthcheck) {
+ rc = efct_hw_config_sli_port_health_check(hw, 0, 1);
+ if (rc != 0) {
+ efc_log_err(hw->os, "Enable port Health check fail\n");
+ return rc;
+ }
+ }
+
+ /*
+ * Set FDT transfer hint, only works on Lancer
+ */
+ if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) {
+ /*
+ * Non-fatal error. In particular, we can disregard failure to
+ * set EFCT_HW_FDT_XFER_HINT on devices with legacy firmware
+ * that do not support EFCT_HW_FDT_XFER_HINT feature.
+ */
+ efct_hw_config_set_fdt_xfer_hint(hw, EFCT_HW_FDT_XFER_HINT);
+ }
+
+ /* zero the hashes */
+ memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
+ efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
+ EFCT_HW_MAX_NUM_CQ, EFCT_HW_Q_HASH_SIZE);
+
+ memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
+ efc_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
+ EFCT_HW_MAX_NUM_RQ, EFCT_HW_Q_HASH_SIZE);
+
+ memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
+ efc_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
+ EFCT_HW_MAX_NUM_WQ, EFCT_HW_Q_HASH_SIZE);
+
+ rc = efct_hw_init_queues(hw);
+ if (rc)
+ return rc;
+
+ rc = efct_hw_map_wq_cpu(hw);
+ if (rc)
+ return rc;
+
+ /* Allocate and p_st RQ buffers */
+ rc = efct_hw_rx_allocate(hw);
+ if (rc) {
+ efc_log_err(hw->os, "rx_allocate failed\n");
+ return rc;
+ }
+
+ rc = efct_hw_rx_post(hw);
+ if (rc) {
+ efc_log_err(hw->os, "WARNING - error posting RQ buffers\n");
+ return rc;
+ }
+
+ if (hw->config.n_eq == 1) {
+ rc = efct_hw_config_rq(hw);
+ if (rc) {
+ efc_log_err(hw->os, "config rq failed %d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0);
+ if (rc != 0) {
+ efc_log_err(hw->os, "REG_FCFI_MRQ FCFI reg failed\n");
+ return rc;
+ }
+
+ rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0);
+ if (rc != 0) {
+ efc_log_err(hw->os, "REG_FCFI_MRQ MRQ reg failed\n");
+ return rc;
+ }
+ }
+
+ /*
+ * Allocate the WQ request tag pool, if not previously allocated
+ * (the request tag value is 16 bits, thus the pool allocation size
+ * of 64k)
+ */
+ hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw);
+ if (!hw->wq_reqtag_pool) {
+ efc_log_err(hw->os, "efct_hw_reqtag_pool_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ rc = efct_hw_setup_io(hw);
+ if (rc) {
+ efc_log_err(hw->os, "IO allocation failure\n");
+ return rc;
+ }
+
+ rc = efct_hw_init_io(hw);
+ if (rc) {
+ efc_log_err(hw->os, "IO initialization failure\n");
+ return rc;
+ }
+
+ dma = &hw->loop_map;
+ dma->size = SLI4_MIN_LOOP_MAP_BYTES;
+ dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
+ GFP_DMA);
+ if (!dma->virt)
+ return -EIO;
+
+ /*
+ * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ
+ * entries
+ */
+ for (i = 0; i < hw->eq_count; i++)
+ sli_queue_arm(&hw->sli, &hw->eq[i], true);
+
+ /*
+ * Initialize RQ hash
+ */
+ for (i = 0; i < hw->rq_count; i++)
+ efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
+
+ /*
+ * Initialize WQ hash
+ */
+ for (i = 0; i < hw->wq_count; i++)
+ efct_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
+
+ /*
+ * Arming the CQ allows (e.g.) MQ completions to write CQ entries
+ */
+ for (i = 0; i < hw->cq_count; i++) {
+ efct_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
+ sli_queue_arm(&hw->sli, &hw->cq[i], true);
+ }
+
+ /* Set RQ process limit*/
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+
+ hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2;
+ }
+
+ /* record the fact that the queues are functional */
+ hw->state = EFCT_HW_STATE_ACTIVE;
+ /*
+ * Allocate a HW IOs for send frame.
+ */
+ hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw);
+ if (!hw->hw_wq[0]->send_frame_io)
+ efc_log_err(hw->os, "alloc for send_frame_io failed\n");
+
+ /* Initialize send frame sequence id */
+ atomic_set(&hw->send_frame_seq_id, 0);
+
+ return 0;
+}
+
+int
+efct_hw_parse_filter(struct efct_hw *hw, void *value)
+{
+ int rc = 0;
+ char *p = NULL;
+ char *token;
+ u32 idx = 0;
+
+ for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++)
+ hw->config.filter_def[idx] = 0;
+
+ p = kstrdup(value, GFP_KERNEL);
+ if (!p || !*p) {
+ efc_log_err(hw->os, "p is NULL\n");
+ return -ENOMEM;
+ }
+
+ idx = 0;
+ while ((token = strsep(&p, ",")) && *token) {
+ if (kstrtou32(token, 0, &hw->config.filter_def[idx++]))
+ efc_log_err(hw->os, "kstrtoint failed\n");
+
+ if (!p || !*p)
+ break;
+
+ if (idx == ARRAY_SIZE(hw->config.filter_def))
+ break;
+ }
+ kfree(p);
+
+ return rc;
+}
+
+u64
+efct_get_wwnn(struct efct_hw *hw)
+{
+ struct sli4 *sli = &hw->sli;
+ u8 p[8];
+
+ memcpy(p, sli->wwnn, sizeof(p));
+ return get_unaligned_be64(p);
+}
+
+u64
+efct_get_wwpn(struct efct_hw *hw)
+{
+ struct sli4 *sli = &hw->sli;
+ u8 p[8];
+
+ memcpy(p, sli->wwpn, sizeof(p));
+ return get_unaligned_be64(p);
+}
+
+static struct efc_hw_rq_buffer *
+efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
+ u32 size)
+{
+ struct efct *efct = hw->os;
+ struct efc_hw_rq_buffer *rq_buf = NULL;
+ struct efc_hw_rq_buffer *prq;
+ u32 i;
+
+ if (!count)
+ return NULL;
+
+ rq_buf = kmalloc_array(count, sizeof(*rq_buf), GFP_KERNEL);
+ if (!rq_buf)
+ return NULL;
+ memset(rq_buf, 0, sizeof(*rq_buf) * count);
+
+ for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
+ prq->rqindex = rqindex;
+ prq->dma.size = size;
+ prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
+ prq->dma.size,
+ &prq->dma.phys,
+ GFP_DMA);
+ if (!prq->dma.virt) {
+ efc_log_err(hw->os, "DMA allocation failed\n");
+ kfree(rq_buf);
+ return NULL;
+ }
+ }
+ return rq_buf;
+}
+
+static void
+efct_hw_rx_buffer_free(struct efct_hw *hw,
+ struct efc_hw_rq_buffer *rq_buf,
+ u32 count)
+{
+ struct efct *efct = hw->os;
+ u32 i;
+ struct efc_hw_rq_buffer *prq;
+
+ if (rq_buf) {
+ for (i = 0, prq = rq_buf; i < count; i++, prq++) {
+ dma_free_coherent(&efct->pci->dev,
+ prq->dma.size, prq->dma.virt,
+ prq->dma.phys);
+ memset(&prq->dma, 0, sizeof(struct efc_dma));
+ }
+
+ kfree(rq_buf);
+ }
+}
+
+int
+efct_hw_rx_allocate(struct efct_hw *hw)
+{
+ struct efct *efct = hw->os;
+ u32 i;
+ int rc = 0;
+ u32 rqindex = 0;
+ u32 hdr_size = EFCT_HW_RQ_SIZE_HDR;
+ u32 payload_size = hw->config.rq_default_buffer_size;
+
+ rqindex = 0;
+
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+
+ /* Allocate header buffers */
+ rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
+ rq->entry_count,
+ hdr_size);
+ if (!rq->hdr_buf) {
+ efc_log_err(efct, "rx_buffer_alloc hdr_buf failed\n");
+ rc = -EIO;
+ break;
+ }
+
+ efc_log_debug(hw->os,
+ "rq[%2d] rq_id %02d header %4d by %4d bytes\n",
+ i, rq->hdr->id, rq->entry_count, hdr_size);
+
+ rqindex++;
+
+ /* Allocate payload buffers */
+ rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
+ rq->entry_count,
+ payload_size);
+ if (!rq->payload_buf) {
+ efc_log_err(efct, "rx_buffer_alloc fb_buf failed\n");
+ rc = -EIO;
+ break;
+ }
+ efc_log_debug(hw->os,
+ "rq[%2d] rq_id %02d default %4d by %4d bytes\n",
+ i, rq->data->id, rq->entry_count, payload_size);
+ rqindex++;
+ }
+
+ return rc ? -EIO : 0;
+}
+
+int
+efct_hw_rx_post(struct efct_hw *hw)
+{
+ u32 i;
+ u32 idx;
+ u32 rq_idx;
+ int rc = 0;
+
+ if (!hw->seq_pool) {
+ u32 count = 0;
+
+ for (i = 0; i < hw->hw_rq_count; i++)
+ count += hw->hw_rq[i]->entry_count;
+
+ hw->seq_pool = kmalloc_array(count,
+ sizeof(struct efc_hw_sequence), GFP_KERNEL);
+ if (!hw->seq_pool)
+ return -ENOMEM;
+ }
+
+ /*
+ * In RQ pair mode, we MUST post the header and payload buffer at the
+ * same time.
+ */
+ for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
+ struct hw_rq *rq = hw->hw_rq[rq_idx];
+
+ for (i = 0; i < rq->entry_count - 1; i++) {
+ struct efc_hw_sequence *seq;
+
+ seq = hw->seq_pool + idx;
+ idx++;
+ seq->header = &rq->hdr_buf[i];
+ seq->payload = &rq->payload_buf[i];
+ rc = efct_hw_sequence_free(hw, seq);
+ if (rc)
+ break;
+ }
+ if (rc)
+ break;
+ }
+
+ if (rc && hw->seq_pool)
+ kfree(hw->seq_pool);
+
+ return rc;
+}
+
+void
+efct_hw_rx_free(struct efct_hw *hw)
+{
+ u32 i;
+
+ /* Free hw_rq buffers */
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+
+ if (rq) {
+ efct_hw_rx_buffer_free(hw, rq->hdr_buf,
+ rq->entry_count);
+ rq->hdr_buf = NULL;
+ efct_hw_rx_buffer_free(hw, rq->payload_buf,
+ rq->entry_count);
+ rq->payload_buf = NULL;
+ }
+ }
+}
+
+static int
+efct_hw_cmd_submit_pending(struct efct_hw *hw)
+{
+ int rc = 0;
+
+ /* Assumes lock held */
+
+ /* Only submit MQE if there's room */
+ while (hw->cmd_head_count < (EFCT_HW_MQ_DEPTH - 1) &&
+ !list_empty(&hw->cmd_pending)) {
+ struct efct_command_ctx *ctx;
+
+ ctx = list_first_entry(&hw->cmd_pending,
+ struct efct_command_ctx, list_entry);
+ if (!ctx)
+ break;
+
+ list_del_init(&ctx->list_entry);
+
+ list_add_tail(&ctx->list_entry, &hw->cmd_head);
+ hw->cmd_head_count++;
+ if (sli_mq_write(&hw->sli, hw->mq, ctx->buf) < 0) {
+ efc_log_debug(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ rc = -EIO;
+ break;
+ }
+ }
+ return rc;
+}
+
+int
+efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
+{
+ int rc = -EIO;
+ unsigned long flags = 0;
+ void *bmbx = NULL;
+
+ /*
+ * If the chip is in an error state (UE'd) then reject this mailbox
+ * command.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ efc_log_crit(hw->os, "Chip in an error state - reset needed\n");
+ efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read_status(&hw->sli),
+ sli_reg_read_err1(&hw->sli),
+ sli_reg_read_err2(&hw->sli));
+
+ return -EIO;
+ }
+
+ /*
+ * Send a mailbox command to the hardware, and either wait for
+ * a completion (EFCT_CMD_POLL) or get an optional asynchronous
+ * completion (EFCT_CMD_NOWAIT).
+ */
+
+ if (opts == EFCT_CMD_POLL) {
+ mutex_lock(&hw->bmbx_lock);
+ bmbx = hw->sli.bmbx.virt;
+
+ memset(bmbx, 0, SLI4_BMBX_SIZE);
+ memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
+
+ if (sli_bmbx_command(&hw->sli) == 0) {
+ rc = 0;
+ memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
+ }
+ mutex_unlock(&hw->bmbx_lock);
+ } else if (opts == EFCT_CMD_NOWAIT) {
+ struct efct_command_ctx *ctx = NULL;
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "Can't send command, HW state=%d\n",
+ hw->state);
+ return -EIO;
+ }
+
+ ctx = mempool_alloc(hw->cmd_ctx_pool, GFP_ATOMIC);
+ if (!ctx)
+ return -ENOSPC;
+
+ memset(ctx, 0, sizeof(struct efct_command_ctx));
+
+ if (cb) {
+ ctx->cb = cb;
+ ctx->arg = arg;
+ }
+
+ memcpy(ctx->buf, cmd, SLI4_BMBX_SIZE);
+ ctx->ctx = hw;
+
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+
+ /* Add to pending list */
+ INIT_LIST_HEAD(&ctx->list_entry);
+ list_add_tail(&ctx->list_entry, &hw->cmd_pending);
+
+ /* Submit as much of the pending list as we can */
+ rc = efct_hw_cmd_submit_pending(hw);
+
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ }
+
+ return rc;
+}
+
+static int
+efct_hw_command_process(struct efct_hw *hw, int status, u8 *mqe,
+ size_t size)
+{
+ struct efct_command_ctx *ctx = NULL;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+ if (!list_empty(&hw->cmd_head)) {
+ ctx = list_first_entry(&hw->cmd_head,
+ struct efct_command_ctx, list_entry);
+ list_del_init(&ctx->list_entry);
+ }
+ if (!ctx) {
+ efc_log_err(hw->os, "no command context\n");
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ return -EIO;
+ }
+
+ hw->cmd_head_count--;
+
+ /* Post any pending requests */
+ efct_hw_cmd_submit_pending(hw);
+
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+
+ if (ctx->cb) {
+ memcpy(ctx->buf, mqe, size);
+ ctx->cb(hw, status, ctx->buf, ctx->arg);
+ }
+
+ mempool_free(ctx, hw->cmd_ctx_pool);
+
+ return 0;
+}
+
+static int
+efct_hw_mq_process(struct efct_hw *hw,
+ int status, struct sli4_queue *mq)
+{
+ u8 mqe[SLI4_BMBX_SIZE];
+ int rc;
+
+ rc = sli_mq_read(&hw->sli, mq, mqe);
+ if (!rc)
+ rc = efct_hw_command_process(hw, status, mqe, mq->size);
+
+ return rc;
+}
+
+static int
+efct_hw_command_cancel(struct efct_hw *hw)
+{
+ unsigned long flags = 0;
+ int rc = 0;
+
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+
+ /*
+ * Manually clean up remaining commands. Note: since this calls
+ * efct_hw_command_process(), we'll also process the cmd_pending
+ * list, so no need to manually clean that out.
+ */
+ while (!list_empty(&hw->cmd_head)) {
+ u8 mqe[SLI4_BMBX_SIZE] = { 0 };
+ struct efct_command_ctx *ctx;
+
+ ctx = list_first_entry(&hw->cmd_head,
+ struct efct_command_ctx, list_entry);
+
+ efc_log_debug(hw->os, "hung command %08x\n",
+ !ctx ? U32_MAX : *((u32 *)ctx->buf));
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ rc = efct_hw_command_process(hw, -1, mqe, SLI4_BMBX_SIZE);
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+
+ return rc;
+}
+
+static void
+efct_mbox_rsp_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct efct_mbox_rqst_ctx *ctx = arg;
+
+ if (ctx) {
+ if (ctx->callback)
+ (*ctx->callback)(hw->os->efcport, status, mqe,
+ ctx->arg);
+
+ mempool_free(ctx, hw->mbox_rqst_pool);
+ }
+}
+
+int
+efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg)
+{
+ struct efct_mbox_rqst_ctx *ctx;
+ struct efct *efct = base;
+ struct efct_hw *hw = &efct->hw;
+ int rc;
+
+ /*
+ * Allocate a callback context (which includes the mbox cmd buffer),
+ * we need this to be persistent as the mbox cmd submission may be
+ * queued and executed later execution.
+ */
+ ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC);
+ if (!ctx)
+ return -EIO;
+
+ ctx->callback = cb;
+ ctx->arg = arg;
+
+ rc = efct_hw_command(hw, cmd, EFCT_CMD_NOWAIT, efct_mbox_rsp_cb, ctx);
+ if (rc) {
+ efc_log_err(efct, "issue mbox rqst failure rc:%d\n", rc);
+ mempool_free(ctx, hw->mbox_rqst_pool);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline struct efct_hw_io *
+_efct_hw_io_alloc(struct efct_hw *hw)
+{
+ struct efct_hw_io *io = NULL;
+
+ if (!list_empty(&hw->io_free)) {
+ io = list_first_entry(&hw->io_free, struct efct_hw_io,
+ list_entry);
+ list_del(&io->list_entry);
+ }
+ if (io) {
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_inuse);
+ io->state = EFCT_HW_IO_STATE_INUSE;
+ io->abort_reqtag = U32_MAX;
+ io->wq = hw->wq_cpu_array[raw_smp_processor_id()];
+ if (!io->wq) {
+ efc_log_err(hw->os, "WQ not assigned for cpu:%d\n",
+ raw_smp_processor_id());
+ io->wq = hw->hw_wq[0];
+ }
+ kref_init(&io->ref);
+ io->release = efct_hw_io_free_internal;
+ } else {
+ atomic_add(1, &hw->io_alloc_failed_count);
+ }
+
+ return io;
+}
+
+struct efct_hw_io *
+efct_hw_io_alloc(struct efct_hw *hw)
+{
+ struct efct_hw_io *io = NULL;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ io = _efct_hw_io_alloc(hw);
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+
+ return io;
+}
+
+static void
+efct_hw_io_free_move_correct_list(struct efct_hw *hw,
+ struct efct_hw_io *io)
+{
+ /*
+ * When an IO is freed, depending on the exchange busy flag,
+ * move it to the correct list.
+ */
+ if (io->xbusy) {
+ /*
+ * add to wait_free list and wait for XRI_ABORTED CQEs to clean
+ * up
+ */
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_wait_free);
+ io->state = EFCT_HW_IO_STATE_WAIT_FREE;
+ } else {
+ /* IO not busy, add to free list */
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ io->state = EFCT_HW_IO_STATE_FREE;
+ }
+}
+
+static inline void
+efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ /* initialize IO fields */
+ efct_hw_init_free_io(io);
+
+ /* Restore default SGL */
+ efct_hw_io_restore_sgl(hw, io);
+}
+
+void
+efct_hw_io_free_internal(struct kref *arg)
+{
+ unsigned long flags = 0;
+ struct efct_hw_io *io = container_of(arg, struct efct_hw_io, ref);
+ struct efct_hw *hw = io->hw;
+
+ /* perform common cleanup */
+ efct_hw_io_free_common(hw, io);
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ /* remove from in-use list */
+ if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) {
+ list_del_init(&io->list_entry);
+ efct_hw_io_free_move_correct_list(hw, io);
+ }
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+}
+
+int
+efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ return kref_put(&io->ref, io->release);
+}
+
+struct efct_hw_io *
+efct_hw_io_lookup(struct efct_hw *hw, u32 xri)
+{
+ u32 ioindex;
+
+ ioindex = xri - hw->sli.ext[SLI4_RSRC_XRI].base[0];
+ return hw->io[ioindex];
+}
+
+int
+efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io,
+ enum efct_hw_io_type type)
+{
+ struct sli4_sge *data = NULL;
+ u32 i = 0;
+ u32 skips = 0;
+ u32 sge_flags = 0;
+
+ if (!io) {
+ efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io);
+ return -EIO;
+ }
+
+ /* Clear / reset the scatter-gather list */
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+ io->first_data_sge = 0;
+
+ memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
+ io->n_sge = 0;
+ io->sge_offset = 0;
+
+ io->type = type;
+
+ data = io->sgl->virt;
+
+ /*
+ * Some IO types have underlying hardware requirements on the order
+ * of SGEs. Process all special entries here.
+ */
+ switch (type) {
+ case EFCT_HW_IO_TARGET_WRITE:
+
+ /* populate host resident XFER_RDY buffer */
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+ data->buffer_address_high =
+ cpu_to_le32(upper_32_bits(io->xfer_rdy.phys));
+ data->buffer_address_low =
+ cpu_to_le32(lower_32_bits(io->xfer_rdy.phys));
+ data->buffer_length = cpu_to_le32(io->xfer_rdy.size);
+ data->dw2_flags = cpu_to_le32(sge_flags);
+ data++;
+
+ skips = EFCT_TARGET_WRITE_SKIPS;
+
+ io->n_sge = 1;
+ break;
+ case EFCT_HW_IO_TARGET_READ:
+ /*
+ * For FCP_TSEND64, the first 2 entries are SKIP SGE's
+ */
+ skips = EFCT_TARGET_READ_SKIPS;
+ break;
+ case EFCT_HW_IO_TARGET_RSP:
+ /*
+ * No skips, etc. for FCP_TRSP64
+ */
+ break;
+ default:
+ efc_log_err(hw->os, "unsupported IO type %#x\n", type);
+ return -EIO;
+ }
+
+ /*
+ * Write skip entries
+ */
+ for (i = 0; i < skips; i++) {
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
+ data->dw2_flags = cpu_to_le32(sge_flags);
+ data++;
+ }
+
+ io->n_sge += skips;
+
+ /*
+ * Set last
+ */
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags |= SLI4_SGE_LAST;
+ data->dw2_flags = cpu_to_le32(sge_flags);
+
+ return 0;
+}
+
+int
+efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
+ uintptr_t addr, u32 length)
+{
+ struct sli4_sge *data = NULL;
+ u32 sge_flags = 0;
+
+ if (!io || !addr || !length) {
+ efc_log_err(hw->os,
+ "bad parameter hw=%p io=%p addr=%lx length=%u\n",
+ hw, io, addr, length);
+ return -EIO;
+ }
+
+ if (length > hw->sli.sge_supported_length) {
+ efc_log_err(hw->os,
+ "length of SGE %d bigger than allowed %d\n",
+ length, hw->sli.sge_supported_length);
+ return -EIO;
+ }
+
+ data = io->sgl->virt;
+ data += io->n_sge;
+
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= ~SLI4_SGE_TYPE_MASK;
+ sge_flags |= SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT;
+ sge_flags &= ~SLI4_SGE_DATA_OFFSET_MASK;
+ sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset;
+
+ data->buffer_address_high = cpu_to_le32(upper_32_bits(addr));
+ data->buffer_address_low = cpu_to_le32(lower_32_bits(addr));
+ data->buffer_length = cpu_to_le32(length);
+
+ /*
+ * Always assume this is the last entry and mark as such.
+ * If this is not the first entry unset the "last SGE"
+ * indication for the previous entry
+ */
+ sge_flags |= SLI4_SGE_LAST;
+ data->dw2_flags = cpu_to_le32(sge_flags);
+
+ if (io->n_sge) {
+ sge_flags = le32_to_cpu(data[-1].dw2_flags);
+ sge_flags &= ~SLI4_SGE_LAST;
+ data[-1].dw2_flags = cpu_to_le32(sge_flags);
+ }
+
+ /* Set first_data_bde if not previously set */
+ if (io->first_data_sge == 0)
+ io->first_data_sge = io->n_sge;
+
+ io->sge_offset += length;
+ io->n_sge++;
+
+ return 0;
+}
+
+void
+efct_hw_io_abort_all(struct efct_hw *hw)
+{
+ struct efct_hw_io *io_to_abort = NULL;
+ struct efct_hw_io *next_io = NULL;
+
+ list_for_each_entry_safe(io_to_abort, next_io,
+ &hw->io_inuse, list_entry) {
+ efct_hw_io_abort(hw, io_to_abort, true, NULL, NULL);
+ }
+}
+
+static void
+efct_hw_wq_process_abort(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_io *io = arg;
+ struct efct_hw *hw = io->hw;
+ u32 ext = 0;
+ u32 len = 0;
+ struct hw_wq_callback *wqcb;
+
+ /*
+ * For IOs that were aborted internally, we may need to issue the
+ * callback here depending on whether a XRI_ABORTED CQE is expected ot
+ * not. If the status is Local Reject/No XRI, then
+ * issue the callback now.
+ */
+ ext = sli_fc_ext_status(&hw->sli, cqe);
+ if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
+ ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) {
+ efct_hw_done_t done = io->done;
+
+ io->done = NULL;
+
+ /*
+ * Use latched status as this is always saved for an internal
+ * abort Note: We won't have both a done and abort_done
+ * function, so don't worry about
+ * clobbering the len, status and ext fields.
+ */
+ status = io->saved_status;
+ len = io->saved_len;
+ ext = io->saved_ext;
+ io->status_saved = false;
+ done(io, len, status, ext, io->arg);
+ }
+
+ if (io->abort_done) {
+ efct_hw_done_t done = io->abort_done;
+
+ io->abort_done = NULL;
+ done(io, len, status, ext, io->abort_arg);
+ }
+
+ /* clear abort bit to indicate abort is complete */
+ io->abort_in_progress = false;
+
+ /* Free the WQ callback */
+ if (io->abort_reqtag == U32_MAX) {
+ efc_log_err(hw->os, "HW IO already freed\n");
+ return;
+ }
+
+ wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag);
+ efct_hw_reqtag_free(hw, wqcb);
+
+ /*
+ * Call efct_hw_io_free() because this releases the WQ reservation as
+ * well as doing the refcount put. Don't duplicate the code here.
+ */
+ (void)efct_hw_io_free(hw, io);
+}
+
+static void
+efct_hw_fill_abort_wqe(struct efct_hw *hw, struct efct_hw_wqe *wqe)
+{
+ struct sli4_abort_wqe *abort = (void *)wqe->wqebuf;
+
+ memset(abort, 0, hw->sli.wqe_size);
+
+ abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
+ abort->ia_ir_byte |= wqe->send_abts ? 0 : 1;
+
+ /* Suppress ABTS retries */
+ abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
+
+ abort->t_tag = cpu_to_le32(wqe->id);
+ abort->command = SLI4_WQE_ABORT;
+ abort->request_tag = cpu_to_le16(wqe->abort_reqtag);
+
+ abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
+
+ abort->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+}
+
+int
+efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
+ bool send_abts, void *cb, void *arg)
+{
+ struct hw_wq_callback *wqcb;
+ unsigned long flags = 0;
+
+ if (!io_to_abort) {
+ efc_log_err(hw->os, "bad parameter hw=%p io=%p\n",
+ hw, io_to_abort);
+ return -EIO;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
+ hw->state);
+ return -EIO;
+ }
+
+ /* take a reference on IO being aborted */
+ if (kref_get_unless_zero(&io_to_abort->ref) == 0) {
+ /* command no longer active */
+ efc_log_debug(hw->os,
+ "io not active xri=0x%x tag=0x%x\n",
+ io_to_abort->indicator, io_to_abort->reqtag);
+ return -ENOENT;
+ }
+
+ /* Must have a valid WQ reference */
+ if (!io_to_abort->wq) {
+ efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
+ io_to_abort->indicator);
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ return -ENOENT;
+ }
+
+ /*
+ * Validation checks complete; now check to see if already being
+ * aborted, if not set the flag.
+ */
+ if (cmpxchg(&io_to_abort->abort_in_progress, false, true)) {
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ efc_log_debug(hw->os,
+ "io already being aborted xri=0x%x tag=0x%x\n",
+ io_to_abort->indicator, io_to_abort->reqtag);
+ return -EINPROGRESS;
+ }
+
+ /*
+ * If we got here, the possibilities are:
+ * - host owned xri
+ * - io_to_abort->wq_index != U32_MAX
+ * - submit ABORT_WQE to same WQ
+ * - port owned xri:
+ * - rxri: io_to_abort->wq_index == U32_MAX
+ * - submit ABORT_WQE to any WQ
+ * - non-rxri
+ * - io_to_abort->index != U32_MAX
+ * - submit ABORT_WQE to same WQ
+ * - io_to_abort->index == U32_MAX
+ * - submit ABORT_WQE to any WQ
+ */
+ io_to_abort->abort_done = cb;
+ io_to_abort->abort_arg = arg;
+
+ /* Allocate a request tag for the abort portion of this IO */
+ wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort);
+ if (!wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return -ENOSPC;
+ }
+
+ io_to_abort->abort_reqtag = wqcb->instance_index;
+ io_to_abort->wqe.send_abts = send_abts;
+ io_to_abort->wqe.id = io_to_abort->indicator;
+ io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
+
+ /*
+ * If the wqe is on the pending list, then set this wqe to be
+ * aborted when the IO's wqe is removed from the list.
+ */
+ if (io_to_abort->wq) {
+ spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags);
+ if (io_to_abort->wqe.list_entry.next) {
+ io_to_abort->wqe.abort_wqe_submit_needed = true;
+ spin_unlock_irqrestore(&io_to_abort->wq->queue->lock,
+ flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags);
+ }
+
+ efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe);
+
+ /* ABORT_WQE does not actually utilize an XRI on the Port,
+ * therefore, keep xbusy as-is to track the exchange's state,
+ * not the ABORT_WQE's state
+ */
+ if (efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe)) {
+ io_to_abort->abort_in_progress = false;
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void
+efct_hw_reqtag_pool_free(struct efct_hw *hw)
+{
+ u32 i;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+ struct hw_wq_callback *wqcb = NULL;
+
+ if (reqtag_pool) {
+ for (i = 0; i < U16_MAX; i++) {
+ wqcb = reqtag_pool->tags[i];
+ if (!wqcb)
+ continue;
+
+ kfree(wqcb);
+ }
+ kfree(reqtag_pool);
+ hw->wq_reqtag_pool = NULL;
+ }
+}
+
+struct reqtag_pool *
+efct_hw_reqtag_pool_alloc(struct efct_hw *hw)
+{
+ u32 i = 0;
+ struct reqtag_pool *reqtag_pool;
+ struct hw_wq_callback *wqcb;
+
+ reqtag_pool = kzalloc(sizeof(*reqtag_pool), GFP_KERNEL);
+ if (!reqtag_pool)
+ return NULL;
+
+ INIT_LIST_HEAD(&reqtag_pool->freelist);
+ /* initialize reqtag pool lock */
+ spin_lock_init(&reqtag_pool->lock);
+ for (i = 0; i < U16_MAX; i++) {
+ wqcb = kmalloc(sizeof(*wqcb), GFP_KERNEL);
+ if (!wqcb)
+ break;
+
+ reqtag_pool->tags[i] = wqcb;
+ wqcb->instance_index = i;
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ INIT_LIST_HEAD(&wqcb->list_entry);
+ list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist);
+ }
+
+ return reqtag_pool;
+}
+
+struct hw_wq_callback *
+efct_hw_reqtag_alloc(struct efct_hw *hw,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg)
+{
+ struct hw_wq_callback *wqcb = NULL;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+ unsigned long flags = 0;
+
+ if (!callback)
+ return wqcb;
+
+ spin_lock_irqsave(&reqtag_pool->lock, flags);
+
+ if (!list_empty(&reqtag_pool->freelist)) {
+ wqcb = list_first_entry(&reqtag_pool->freelist,
+ struct hw_wq_callback, list_entry);
+ }
+
+ if (wqcb) {
+ list_del_init(&wqcb->list_entry);
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+ wqcb->callback = callback;
+ wqcb->arg = arg;
+ } else {
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+ }
+
+ return wqcb;
+}
+
+void
+efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb)
+{
+ unsigned long flags = 0;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+
+ if (!wqcb->callback)
+ efc_log_err(hw->os, "WQCB is already freed\n");
+
+ spin_lock_irqsave(&reqtag_pool->lock, flags);
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ INIT_LIST_HEAD(&wqcb->list_entry);
+ list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist);
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+}
+
+struct hw_wq_callback *
+efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index)
+{
+ struct hw_wq_callback *wqcb;
+
+ wqcb = hw->wq_reqtag_pool->tags[instance_index];
+ if (!wqcb)
+ efc_log_err(hw->os, "wqcb for instance %d is null\n",
+ instance_index);
+
+ return wqcb;
+}
+
+int
+efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id)
+{
+ int index = -1;
+ int i = id & (EFCT_HW_Q_HASH_SIZE - 1);
+
+ /*
+ * Since the hash is always bigger than the maximum number of Qs, then
+ * we never have to worry about an infinite loop. We will always find
+ * an unused entry.
+ */
+ do {
+ if (hash[i].in_use && hash[i].id == id)
+ index = hash[i].index;
+ else
+ i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
+ } while (index == -1 && hash[i].in_use);
+
+ return index;
+}
+
+int
+efct_hw_process(struct efct_hw *hw, u32 vector,
+ u32 max_isr_time_msec)
+{
+ struct hw_eq *eq;
+
+ /*
+ * The caller should disable interrupts if they wish to prevent us
+ * from processing during a shutdown. The following states are defined:
+ * EFCT_HW_STATE_UNINITIALIZED - No queues allocated
+ * EFCT_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
+ * queues are cleared.
+ * EFCT_HW_STATE_ACTIVE - Chip and queues are operational
+ * EFCT_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
+ * EFCT_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
+ * completions.
+ */
+ if (hw->state == EFCT_HW_STATE_UNINITIALIZED)
+ return 0;
+
+ /* Get pointer to struct hw_eq */
+ eq = hw->hw_eq[vector];
+ if (!eq)
+ return 0;
+
+ eq->use_count++;
+
+ return efct_hw_eq_process(hw, eq, max_isr_time_msec);
+}
+
+int
+efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
+ u32 max_isr_time_msec)
+{
+ u8 eqe[sizeof(struct sli4_eqe)] = { 0 };
+ u32 tcheck_count;
+ u64 tstart;
+ u64 telapsed;
+ bool done = false;
+
+ tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
+ tstart = jiffies_to_msecs(jiffies);
+
+ while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) {
+ u16 cq_id = 0;
+ int rc;
+
+ rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
+ if (unlikely(rc)) {
+ if (rc == SLI4_EQE_STATUS_EQ_FULL) {
+ u32 i;
+
+ /*
+ * Received a sentinel EQE indicating the
+ * EQ is full. Process all CQs
+ */
+ for (i = 0; i < hw->cq_count; i++)
+ efct_hw_cq_process(hw, hw->hw_cq[i]);
+ continue;
+ } else {
+ return rc;
+ }
+ } else {
+ int index;
+
+ index = efct_hw_queue_hash_find(hw->cq_hash, cq_id);
+
+ if (likely(index >= 0))
+ efct_hw_cq_process(hw, hw->hw_cq[index]);
+ else
+ efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
+ }
+
+ if (eq->queue->n_posted > eq->queue->posted_limit)
+ sli_queue_arm(&hw->sli, eq->queue, false);
+
+ if (tcheck_count && (--tcheck_count == 0)) {
+ tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
+ telapsed = jiffies_to_msecs(jiffies) - tstart;
+ if (telapsed >= max_isr_time_msec)
+ done = true;
+ }
+ }
+ sli_queue_eq_arm(&hw->sli, eq->queue, true);
+
+ return 0;
+}
+
+static int
+_efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
+{
+ int queue_rc;
+
+ /* Every so often, set the wqec bit to generate comsummed completions */
+ if (wq->wqec_count)
+ wq->wqec_count--;
+
+ if (wq->wqec_count == 0) {
+ struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf;
+
+ genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC;
+ wq->wqec_count = wq->wqec_set_count;
+ }
+
+ /* Decrement WQ free count */
+ wq->free_count--;
+
+ queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
+
+ return (queue_rc < 0) ? -EIO : 0;
+}
+
+static void
+hw_wq_submit_pending(struct hw_wq *wq, u32 update_free_count)
+{
+ struct efct_hw_wqe *wqe;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&wq->queue->lock, flags);
+
+ /* Update free count with value passed in */
+ wq->free_count += update_free_count;
+
+ while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) {
+ wqe = list_first_entry(&wq->pending_list,
+ struct efct_hw_wqe, list_entry);
+ list_del_init(&wqe->list_entry);
+ _efct_hw_wq_write(wq, wqe);
+
+ if (wqe->abort_wqe_submit_needed) {
+ wqe->abort_wqe_submit_needed = false;
+ efct_hw_fill_abort_wqe(wq->hw, wqe);
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ }
+ }
+
+ spin_unlock_irqrestore(&wq->queue->lock, flags);
+}
+
+void
+efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq)
+{
+ u8 cqe[sizeof(struct sli4_mcqe)];
+ u16 rid = U16_MAX;
+ /* completion type */
+ enum sli4_qentry ctype;
+ u32 n_processed = 0;
+ u32 tstart, telapsed;
+
+ tstart = jiffies_to_msecs(jiffies);
+
+ while (!sli_cq_read(&hw->sli, cq->queue, cqe)) {
+ int status;
+
+ status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
+ /*
+ * The sign of status is significant. If status is:
+ * == 0 : call completed correctly and
+ * the CQE indicated success
+ * > 0 : call completed correctly and
+ * the CQE indicated an error
+ * < 0 : call failed and no information is available about the
+ * CQE
+ */
+ if (status < 0) {
+ if (status == SLI4_MCQE_STATUS_NOT_COMPLETED)
+ /*
+ * Notification that an entry was consumed,
+ * but not completed
+ */
+ continue;
+
+ break;
+ }
+
+ switch (ctype) {
+ case SLI4_QENTRY_ASYNC:
+ sli_cqe_async(&hw->sli, cqe);
+ break;
+ case SLI4_QENTRY_MQ:
+ /*
+ * Process MQ entry. Note there is no way to determine
+ * the MQ_ID from the completion entry.
+ */
+ efct_hw_mq_process(hw, status, hw->mq);
+ break;
+ case SLI4_QENTRY_WQ:
+ efct_hw_wq_process(hw, cq, cqe, status, rid);
+ break;
+ case SLI4_QENTRY_WQ_RELEASE: {
+ u32 wq_id = rid;
+ int index;
+ struct hw_wq *wq = NULL;
+
+ index = efct_hw_queue_hash_find(hw->wq_hash, wq_id);
+
+ if (likely(index >= 0)) {
+ wq = hw->hw_wq[index];
+ } else {
+ efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id);
+ break;
+ }
+ /* Submit any HW IOs that are on the WQ pending list */
+ hw_wq_submit_pending(wq, wq->wqec_set_count);
+
+ break;
+ }
+
+ case SLI4_QENTRY_RQ:
+ efct_hw_rqpair_process_rq(hw, cq, cqe);
+ break;
+ case SLI4_QENTRY_XABT: {
+ efct_hw_xabt_process(hw, cq, cqe, rid);
+ break;
+ }
+ default:
+ efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n",
+ ctype, rid);
+ break;
+ }
+
+ n_processed++;
+ if (n_processed == cq->queue->proc_limit)
+ break;
+
+ if (cq->queue->n_posted >= cq->queue->posted_limit)
+ sli_queue_arm(&hw->sli, cq->queue, false);
+ }
+
+ sli_queue_arm(&hw->sli, cq->queue, true);
+
+ if (n_processed > cq->queue->max_num_processed)
+ cq->queue->max_num_processed = n_processed;
+ telapsed = jiffies_to_msecs(jiffies) - tstart;
+ if (telapsed > cq->queue->max_process_time)
+ cq->queue->max_process_time = telapsed;
+}
+
+void
+efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, int status, u16 rid)
+{
+ struct hw_wq_callback *wqcb;
+
+ if (rid == EFCT_HW_REQUE_XRI_REGTAG) {
+ if (status)
+ efc_log_err(hw->os, "reque xri failed, status = %d\n",
+ status);
+ return;
+ }
+
+ wqcb = efct_hw_reqtag_get_instance(hw, rid);
+ if (!wqcb) {
+ efc_log_err(hw->os, "invalid request tag: x%x\n", rid);
+ return;
+ }
+
+ if (!wqcb->callback) {
+ efc_log_err(hw->os, "wqcb callback is NULL\n");
+ return;
+ }
+
+ (*wqcb->callback)(wqcb->arg, cqe, status);
+}
+
+void
+efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, u16 rid)
+{
+ /* search IOs wait free list */
+ struct efct_hw_io *io = NULL;
+ unsigned long flags = 0;
+
+ io = efct_hw_io_lookup(hw, rid);
+ if (!io) {
+ /* IO lookup failure should never happen */
+ efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid);
+ return;
+ }
+
+ if (!io->xbusy)
+ efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
+ else
+ /* mark IO as no longer busy */
+ io->xbusy = false;
+
+ /*
+ * For IOs that were aborted internally, we need to issue any pending
+ * callback here.
+ */
+ if (io->done) {
+ efct_hw_done_t done = io->done;
+ void *arg = io->arg;
+
+ /*
+ * Use latched status as this is always saved for an internal
+ * abort
+ */
+ int status = io->saved_status;
+ u32 len = io->saved_len;
+ u32 ext = io->saved_ext;
+
+ io->done = NULL;
+ io->status_saved = false;
+
+ done(io, len, status, ext, arg);
+ }
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ if (io->state == EFCT_HW_IO_STATE_INUSE ||
+ io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
+ /* if on wait_free list, caller has already freed IO;
+ * remove from wait_free list and add to free list.
+ * if on in-use list, already marked as no longer busy;
+ * just leave there and wait for caller to free.
+ */
+ if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
+ io->state = EFCT_HW_IO_STATE_FREE;
+ list_del_init(&io->list_entry);
+ efct_hw_io_free_move_correct_list(hw, io);
+ }
+ }
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+}
+
+static int
+efct_hw_flush(struct efct_hw *hw)
+{
+ u32 i = 0;
+
+ /* Process any remaining completions */
+ for (i = 0; i < hw->eq_count; i++)
+ efct_hw_process(hw, i, ~0);
+
+ return 0;
+}
+
+int
+efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
+{
+ int rc = 0;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&wq->queue->lock, flags);
+ if (list_empty(&wq->pending_list)) {
+ if (wq->free_count > 0) {
+ rc = _efct_hw_wq_write(wq, wqe);
+ } else {
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ }
+
+ spin_unlock_irqrestore(&wq->queue->lock, flags);
+ return rc;
+ }
+
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ while (wq->free_count > 0) {
+ wqe = list_first_entry(&wq->pending_list, struct efct_hw_wqe,
+ list_entry);
+ if (!wqe)
+ break;
+
+ list_del_init(&wqe->list_entry);
+ rc = _efct_hw_wq_write(wq, wqe);
+ if (rc)
+ break;
+
+ if (wqe->abort_wqe_submit_needed) {
+ wqe->abort_wqe_submit_needed = false;
+ efct_hw_fill_abort_wqe(wq->hw, wqe);
+
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ }
+ }
+
+ spin_unlock_irqrestore(&wq->queue->lock, flags);
+
+ return rc;
+}
+
+int
+efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls)
+{
+ struct efct *efct = efc->base;
+
+ return efct_hw_bls_send(efct, type, bls, NULL, NULL);
+}
+
+int
+efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
+ void *cb, void *arg)
+{
+ struct efct_hw *hw = &efct->hw;
+ struct efct_hw_io *hio;
+ struct sli_bls_payload bls;
+ int rc;
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os,
+ "cannot send BLS, HW state=%d\n", hw->state);
+ return -EIO;
+ }
+
+ hio = efct_hw_io_alloc(hw);
+ if (!hio) {
+ efc_log_err(hw->os, "HIO allocation failed\n");
+ return -EIO;
+ }
+
+ hio->done = cb;
+ hio->arg = arg;
+
+ bls_params->xri = hio->indicator;
+ bls_params->tag = hio->reqtag;
+
+ if (type == FC_RCTL_BA_ACC) {
+ hio->type = EFCT_HW_BLS_ACC;
+ bls.type = SLI4_SLI_BLS_ACC;
+ memcpy(&bls.u.acc, bls_params->payload, sizeof(bls.u.acc));
+ } else {
+ hio->type = EFCT_HW_BLS_RJT;
+ bls.type = SLI4_SLI_BLS_RJT;
+ memcpy(&bls.u.rjt, bls_params->payload, sizeof(bls.u.rjt));
+ }
+
+ bls.ox_id = cpu_to_le16(bls_params->ox_id);
+ bls.rx_id = cpu_to_le16(bls_params->rx_id);
+
+ if (sli_xmit_bls_rsp64_wqe(&hw->sli, hio->wqe.wqebuf,
+ &bls, bls_params)) {
+ efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
+ return -EIO;
+ }
+
+ hio->xbusy = true;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ hio->wq->use_count++;
+ rc = efct_hw_wq_write(hio->wq, &hio->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ efc_log_err(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ hio->xbusy = false;
+ }
+
+ return rc;
+}
+
+static int
+efct_els_ssrs_send_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *arg)
+{
+ struct efc_disc_io *io = arg;
+
+ efc_disc_io_complete(io, length, status, ext_status);
+ return 0;
+}
+
+static inline void
+efct_fill_els_params(struct efc_disc_io *io, struct sli_els_params *params)
+{
+ u8 *cmd = io->req.virt;
+
+ params->cmd = *cmd;
+ params->s_id = io->s_id;
+ params->d_id = io->d_id;
+ params->ox_id = io->iparam.els.ox_id;
+ params->rpi = io->rpi;
+ params->vpi = io->vpi;
+ params->rpi_registered = io->rpi_registered;
+ params->xmit_len = io->xmit_len;
+ params->rsp_len = io->rsp_len;
+ params->timeout = io->iparam.els.timeout;
+}
+
+static inline void
+efct_fill_ct_params(struct efc_disc_io *io, struct sli_ct_params *params)
+{
+ params->r_ctl = io->iparam.ct.r_ctl;
+ params->type = io->iparam.ct.type;
+ params->df_ctl = io->iparam.ct.df_ctl;
+ params->d_id = io->d_id;
+ params->ox_id = io->iparam.ct.ox_id;
+ params->rpi = io->rpi;
+ params->vpi = io->vpi;
+ params->rpi_registered = io->rpi_registered;
+ params->xmit_len = io->xmit_len;
+ params->rsp_len = io->rsp_len;
+ params->timeout = io->iparam.ct.timeout;
+}
+
+/**
+ * efct_els_hw_srrs_send() - Send a single request and response cmd.
+ * @efc: efc library structure
+ * @io: Discovery IO used to hold els and ct cmd context.
+ *
+ * This routine supports communication sequences consisting of a single
+ * request and single response between two endpoints. Examples include:
+ * - Sending an ELS request.
+ * - Sending an ELS response - To send an ELS response, the caller must provide
+ * the OX_ID from the received request.
+ * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
+ * the caller must provide the R_CTL, TYPE, and DF_CTL
+ * values to place in the FC frame header.
+ *
+ * Return: Status of the request.
+ */
+int
+efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io)
+{
+ struct efct *efct = efc->base;
+ struct efct_hw_io *hio;
+ struct efct_hw *hw = &efct->hw;
+ struct efc_dma *send = &io->req;
+ struct efc_dma *receive = &io->rsp;
+ struct sli4_sge *sge = NULL;
+ int rc = 0;
+ u32 len = io->xmit_len;
+ u32 sge0_flags;
+ u32 sge1_flags;
+
+ hio = efct_hw_io_alloc(hw);
+ if (!hio) {
+ pr_err("HIO alloc failed\n");
+ return -EIO;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_debug(hw->os,
+ "cannot send SRRS, HW state=%d\n", hw->state);
+ return -EIO;
+ }
+
+ hio->done = efct_els_ssrs_send_cb;
+ hio->arg = io;
+
+ sge = hio->sgl->virt;
+
+ /* clear both SGE */
+ memset(hio->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
+
+ sge0_flags = le32_to_cpu(sge[0].dw2_flags);
+ sge1_flags = le32_to_cpu(sge[1].dw2_flags);
+ if (send->size) {
+ sge[0].buffer_address_high =
+ cpu_to_le32(upper_32_bits(send->phys));
+ sge[0].buffer_address_low =
+ cpu_to_le32(lower_32_bits(send->phys));
+
+ sge0_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+
+ sge[0].buffer_length = cpu_to_le32(len);
+ }
+
+ if (io->io_type == EFC_DISC_IO_ELS_REQ ||
+ io->io_type == EFC_DISC_IO_CT_REQ) {
+ sge[1].buffer_address_high =
+ cpu_to_le32(upper_32_bits(receive->phys));
+ sge[1].buffer_address_low =
+ cpu_to_le32(lower_32_bits(receive->phys));
+
+ sge1_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+ sge1_flags |= SLI4_SGE_LAST;
+
+ sge[1].buffer_length = cpu_to_le32(receive->size);
+ } else {
+ sge0_flags |= SLI4_SGE_LAST;
+ }
+
+ sge[0].dw2_flags = cpu_to_le32(sge0_flags);
+ sge[1].dw2_flags = cpu_to_le32(sge1_flags);
+
+ switch (io->io_type) {
+ case EFC_DISC_IO_ELS_REQ: {
+ struct sli_els_params els_params;
+
+ hio->type = EFCT_HW_ELS_REQ;
+ efct_fill_els_params(io, &els_params);
+ els_params.xri = hio->indicator;
+ els_params.tag = hio->reqtag;
+
+ if (sli_els_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
+ &els_params)) {
+ efc_log_err(hw->os, "REQ WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFC_DISC_IO_ELS_RESP: {
+ struct sli_els_params els_params;
+
+ hio->type = EFCT_HW_ELS_RSP;
+ efct_fill_els_params(io, &els_params);
+ els_params.xri = hio->indicator;
+ els_params.tag = hio->reqtag;
+ if (sli_xmit_els_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, send,
+ &els_params)){
+ efc_log_err(hw->os, "RSP WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFC_DISC_IO_CT_REQ: {
+ struct sli_ct_params ct_params;
+
+ hio->type = EFCT_HW_FC_CT;
+ efct_fill_ct_params(io, &ct_params);
+ ct_params.xri = hio->indicator;
+ ct_params.tag = hio->reqtag;
+ if (sli_gen_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
+ &ct_params)){
+ efc_log_err(hw->os, "GEN WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFC_DISC_IO_CT_RESP: {
+ struct sli_ct_params ct_params;
+
+ hio->type = EFCT_HW_FC_CT_RSP;
+ efct_fill_ct_params(io, &ct_params);
+ ct_params.xri = hio->indicator;
+ ct_params.tag = hio->reqtag;
+ if (sli_xmit_sequence64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
+ &ct_params)){
+ efc_log_err(hw->os, "XMIT SEQ WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ default:
+ efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type);
+ rc = -EIO;
+ }
+
+ if (rc == 0) {
+ hio->xbusy = true;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ hio->wq->use_count++;
+ rc = efct_hw_wq_write(hio->wq, &hio->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ efc_log_err(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ hio->xbusy = false;
+ }
+ }
+
+ return rc;
+}
+
+int
+efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
+ struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
+ void *cb, void *arg)
+{
+ int rc = 0;
+ bool send_wqe = true;
+
+ if (!io) {
+ pr_err("bad parm hw=%p io=%p\n", hw, io);
+ return -EIO;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
+ return -EIO;
+ }
+
+ /*
+ * Save state needed during later stages
+ */
+ io->type = type;
+ io->done = cb;
+ io->arg = arg;
+
+ /*
+ * Format the work queue entry used to send the IO
+ */
+ switch (type) {
+ case EFCT_HW_IO_TARGET_WRITE: {
+ u16 *flags = &iparam->fcp_tgt.flags;
+ struct fcp_txrdy *xfer = io->xfer_rdy.virt;
+
+ /*
+ * Fill in the XFER_RDY for IF_TYPE 0 devices
+ */
+ xfer->ft_data_ro = cpu_to_be32(iparam->fcp_tgt.offset);
+ xfer->ft_burst_len = cpu_to_be32(iparam->fcp_tgt.xmit_len);
+
+ if (io->xbusy)
+ *flags |= SLI4_IO_CONTINUATION;
+ else
+ *flags &= ~SLI4_IO_CONTINUATION;
+ iparam->fcp_tgt.xri = io->indicator;
+ iparam->fcp_tgt.tag = io->reqtag;
+
+ if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf,
+ &io->def_sgl, io->first_data_sge,
+ SLI4_CQ_DEFAULT,
+ 0, 0, &iparam->fcp_tgt)) {
+ efc_log_err(hw->os, "TRECEIVE WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFCT_HW_IO_TARGET_READ: {
+ u16 *flags = &iparam->fcp_tgt.flags;
+
+ if (io->xbusy)
+ *flags |= SLI4_IO_CONTINUATION;
+ else
+ *flags &= ~SLI4_IO_CONTINUATION;
+
+ iparam->fcp_tgt.xri = io->indicator;
+ iparam->fcp_tgt.tag = io->reqtag;
+
+ if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf,
+ &io->def_sgl, io->first_data_sge,
+ SLI4_CQ_DEFAULT,
+ 0, 0, &iparam->fcp_tgt)) {
+ efc_log_err(hw->os, "TSEND WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFCT_HW_IO_TARGET_RSP: {
+ u16 *flags = &iparam->fcp_tgt.flags;
+
+ if (io->xbusy)
+ *flags |= SLI4_IO_CONTINUATION;
+ else
+ *flags &= ~SLI4_IO_CONTINUATION;
+
+ iparam->fcp_tgt.xri = io->indicator;
+ iparam->fcp_tgt.tag = io->reqtag;
+
+ if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf,
+ &io->def_sgl, SLI4_CQ_DEFAULT,
+ 0, &iparam->fcp_tgt)) {
+ efc_log_err(hw->os, "TRSP WQE error\n");
+ rc = -EIO;
+ }
+
+ break;
+ }
+ default:
+ efc_log_err(hw->os, "unsupported IO type %#x\n", type);
+ rc = -EIO;
+ }
+
+ if (send_wqe && rc == 0) {
+ io->xbusy = true;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ hw->tcmd_wq_submit[io->wq->instance]++;
+ io->wq->use_count++;
+ rc = efct_hw_wq_write(io->wq, &io->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ efc_log_err(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ io->xbusy = false;
+ }
+ }
+
+ return rc;
+}
+
+int
+efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
+ u8 sof, u8 eof, struct efc_dma *payload,
+ struct efct_hw_send_frame_context *ctx,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg)
+{
+ int rc;
+ struct efct_hw_wqe *wqe;
+ u32 xri;
+ struct hw_wq *wq;
+
+ wqe = &ctx->wqe;
+
+ /* populate the callback object */
+ ctx->hw = hw;
+
+ /* Fetch and populate request tag */
+ ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg);
+ if (!ctx->wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return -ENOSPC;
+ }
+
+ wq = hw->hw_wq[0];
+
+ /* Set XRI and RX_ID in the header based on which WQ, and which
+ * send_frame_io we are using
+ */
+ xri = wq->send_frame_io->indicator;
+
+ /* Build the send frame WQE */
+ rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf,
+ sof, eof, (u32 *)hdr, payload, payload->len,
+ EFCT_HW_SEND_FRAME_TIMEOUT, xri,
+ ctx->wqcb->instance_index);
+ if (rc) {
+ efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
+ return -EIO;
+ }
+
+ /* Write to WQ */
+ rc = efct_hw_wq_write(wq, wqe);
+ if (rc) {
+ efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc);
+ return -EIO;
+ }
+
+ wq->use_count++;
+
+ return 0;
+}
+
+static int
+efct_hw_cb_link_stat(struct efct_hw *hw, int status,
+ u8 *mqe, void *arg)
+{
+ struct sli4_cmd_read_link_stats *mbox_rsp;
+ struct efct_hw_link_stat_cb_arg *cb_arg = arg;
+ struct efct_hw_link_stat_counts counts[EFCT_HW_LINK_STAT_MAX];
+ u32 num_counters, i;
+ u32 mbox_rsp_flags = 0;
+
+ mbox_rsp = (struct sli4_cmd_read_link_stats *)mqe;
+ mbox_rsp_flags = le32_to_cpu(mbox_rsp->dw1_flags);
+ num_counters = (mbox_rsp_flags & SLI4_READ_LNKSTAT_GEC) ? 20 : 13;
+ memset(counts, 0, sizeof(struct efct_hw_link_stat_counts) *
+ EFCT_HW_LINK_STAT_MAX);
+
+ /* Fill overflow counts, mask starts from SLI4_READ_LNKSTAT_W02OF*/
+ for (i = 0; i < EFCT_HW_LINK_STAT_MAX; i++)
+ counts[i].overflow = (mbox_rsp_flags & (1 << (i + 2)));
+
+ counts[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter =
+ le32_to_cpu(mbox_rsp->linkfail_errcnt);
+ counts[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter =
+ le32_to_cpu(mbox_rsp->losssync_errcnt);
+ counts[EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter =
+ le32_to_cpu(mbox_rsp->losssignal_errcnt);
+ counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter =
+ le32_to_cpu(mbox_rsp->primseq_errcnt);
+ counts[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter =
+ le32_to_cpu(mbox_rsp->inval_txword_errcnt);
+ counts[EFCT_HW_LINK_STAT_CRC_COUNT].counter =
+ le32_to_cpu(mbox_rsp->crc_errcnt);
+ counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter =
+ le32_to_cpu(mbox_rsp->primseq_eventtimeout_cnt);
+ counts[EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter =
+ le32_to_cpu(mbox_rsp->elastic_bufoverrun_errcnt);
+ counts[EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter =
+ le32_to_cpu(mbox_rsp->arbit_fc_al_timeout_cnt);
+ counts[EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->adv_rx_buftor_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->curr_rx_buf_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->adv_tx_buf_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->curr_tx_buf_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_RCV_EOFA_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_eofa_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_eofdti_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_eofni_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_SOFF_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_soff_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_dropped_no_aer_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_dropped_no_avail_rpi_rescnt);
+ counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_dropped_no_avail_xri_rescnt);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
+ status = le16_to_cpu(mbox_rsp->hdr.status);
+ cb_arg->cb(status, num_counters, counts, cb_arg->arg);
+ }
+
+ kfree(cb_arg);
+ }
+
+ return 0;
+}
+
+int
+efct_hw_get_link_stats(struct efct_hw *hw, u8 req_ext_counters,
+ u8 clear_overflow_flags, u8 clear_all_counters,
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters,
+ void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ struct efct_hw_link_stat_cb_arg *cb_arg;
+ u8 mbxdata[SLI4_BMBX_SIZE];
+
+ cb_arg = kzalloc(sizeof(*cb_arg), GFP_ATOMIC);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Send the HW command */
+ if (!sli_cmd_read_link_stats(&hw->sli, mbxdata, req_ext_counters,
+ clear_overflow_flags, clear_all_counters))
+ rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+ efct_hw_cb_link_stat, cb_arg);
+
+ if (rc)
+ kfree(cb_arg);
+
+ return rc;
+}
+
+static int
+efct_hw_cb_host_stat(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct sli4_cmd_read_status *mbox_rsp =
+ (struct sli4_cmd_read_status *)mqe;
+ struct efct_hw_host_stat_cb_arg *cb_arg = arg;
+ struct efct_hw_host_stat_counts counts[EFCT_HW_HOST_STAT_MAX];
+ u32 num_counters = EFCT_HW_HOST_STAT_MAX;
+
+ memset(counts, 0, sizeof(struct efct_hw_host_stat_counts) *
+ EFCT_HW_HOST_STAT_MAX);
+
+ counts[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter =
+ le32_to_cpu(mbox_rsp->trans_kbyte_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_kbyte_cnt);
+ counts[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter =
+ le32_to_cpu(mbox_rsp->trans_frame_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_frame_cnt);
+ counts[EFCT_HW_HOST_STAT_TX_SEQ_COUNT].counter =
+ le32_to_cpu(mbox_rsp->trans_seq_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_SEQ_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_seq_cnt);
+ counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter =
+ le32_to_cpu(mbox_rsp->tot_exchanges_orig);
+ counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP].counter =
+ le32_to_cpu(mbox_rsp->tot_exchanges_resp);
+ counts[EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_p_bsy_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_F_BSY_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_f_bsy_cnt);
+ counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter =
+ le32_to_cpu(mbox_rsp->no_rq_buf_dropped_frames_cnt);
+ counts[EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter =
+ le32_to_cpu(mbox_rsp->empty_rq_timeout_cnt);
+ counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->no_xri_dropped_frames_cnt);
+ counts[EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter =
+ le32_to_cpu(mbox_rsp->empty_xri_pool_cnt);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
+ status = le16_to_cpu(mbox_rsp->hdr.status);
+ cb_arg->cb(status, num_counters, counts, cb_arg->arg);
+ }
+
+ kfree(cb_arg);
+ }
+
+ return 0;
+}
+
+int
+efct_hw_get_host_stats(struct efct_hw *hw, u8 cc,
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters,
+ void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ struct efct_hw_host_stat_cb_arg *cb_arg;
+ u8 mbxdata[SLI4_BMBX_SIZE];
+
+ cb_arg = kmalloc(sizeof(*cb_arg), GFP_ATOMIC);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Send the HW command to get the host stats */
+ if (!sli_cmd_read_status(&hw->sli, mbxdata, cc))
+ rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+ efct_hw_cb_host_stat, cb_arg);
+
+ if (rc) {
+ efc_log_debug(hw->os, "READ_HOST_STATS failed\n");
+ kfree(cb_arg);
+ }
+
+ return rc;
+}
+
+struct efct_hw_async_call_ctx {
+ efct_hw_async_cb_t callback;
+ void *arg;
+ u8 cmd[SLI4_BMBX_SIZE];
+};
+
+static void
+efct_hw_async_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct efct_hw_async_call_ctx *ctx = arg;
+
+ if (ctx) {
+ if (ctx->callback)
+ (*ctx->callback)(hw, status, mqe, ctx->arg);
+
+ kfree(ctx);
+ }
+}
+
+int
+efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg)
+{
+ struct efct_hw_async_call_ctx *ctx;
+ int rc;
+
+ /*
+ * Allocate a callback context (which includes the mbox cmd buffer),
+ * we need this to be persistent as the mbox cmd submission may be
+ * queued and executed later execution.
+ */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->callback = callback;
+ ctx->arg = arg;
+
+ /* Build and send a NOP mailbox command */
+ if (sli_cmd_common_nop(&hw->sli, ctx->cmd, 0)) {
+ efc_log_err(hw->os, "COMMON_NOP format failure\n");
+ kfree(ctx);
+ return -EIO;
+ }
+
+ rc = efct_hw_command(hw, ctx->cmd, EFCT_CMD_NOWAIT, efct_hw_async_cb,
+ ctx);
+ if (rc) {
+ efc_log_err(hw->os, "COMMON_NOP command failure, rc=%d\n", rc);
+ kfree(ctx);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+efct_hw_cb_fw_write(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct sli4_cmd_sli_config *mbox_rsp =
+ (struct sli4_cmd_sli_config *)mqe;
+ struct sli4_rsp_cmn_write_object *wr_obj_rsp;
+ struct efct_hw_fw_wr_cb_arg *cb_arg = arg;
+ u32 bytes_written;
+ u16 mbox_status;
+ u32 change_status;
+
+ wr_obj_rsp = (struct sli4_rsp_cmn_write_object *)
+ &mbox_rsp->payload.embed;
+ bytes_written = le32_to_cpu(wr_obj_rsp->actual_write_length);
+ mbox_status = le16_to_cpu(mbox_rsp->hdr.status);
+ change_status = (le32_to_cpu(wr_obj_rsp->change_status_dword) &
+ RSP_CHANGE_STATUS);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if (!status && mbox_status)
+ status = mbox_status;
+ cb_arg->cb(status, bytes_written, change_status,
+ cb_arg->arg);
+ }
+
+ kfree(cb_arg);
+ }
+
+ return 0;
+}
+
+int
+efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma, u32 size,
+ u32 offset, int last,
+ void (*cb)(int status, u32 bytes_written,
+ u32 change_status, void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ u8 mbxdata[SLI4_BMBX_SIZE];
+ struct efct_hw_fw_wr_cb_arg *cb_arg;
+ int noc = 0;
+
+ cb_arg = kzalloc(sizeof(*cb_arg), GFP_KERNEL);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Write a portion of a firmware image to the device */
+ if (!sli_cmd_common_write_object(&hw->sli, mbxdata,
+ noc, last, size, offset, "/prg/",
+ dma))
+ rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+ efct_hw_cb_fw_write, cb_arg);
+
+ if (rc != 0) {
+ efc_log_debug(hw->os, "COMMON_WRITE_OBJECT failed\n");
+ kfree(cb_arg);
+ }
+
+ return rc;
+}
+
+static int
+efct_hw_cb_port_control(struct efct_hw *hw, int status, u8 *mqe,
+ void *arg)
+{
+ return 0;
+}
+
+int
+efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
+ uintptr_t value,
+ void (*cb)(int status, uintptr_t value, void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ u8 link[SLI4_BMBX_SIZE];
+ u32 speed = 0;
+ u8 reset_alpa = 0;
+
+ switch (ctrl) {
+ case EFCT_HW_PORT_INIT:
+ if (!sli_cmd_config_link(&hw->sli, link))
+ rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
+ efct_hw_cb_port_control, NULL);
+
+ if (rc != 0) {
+ efc_log_err(hw->os, "CONFIG_LINK failed\n");
+ break;
+ }
+ speed = hw->config.speed;
+ reset_alpa = (u8)(value & 0xff);
+
+ rc = -EIO;
+ if (!sli_cmd_init_link(&hw->sli, link, speed, reset_alpa))
+ rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
+ efct_hw_cb_port_control, NULL);
+ /* Free buffer on error, since no callback is coming */
+ if (rc)
+ efc_log_err(hw->os, "INIT_LINK failed\n");
+ break;
+
+ case EFCT_HW_PORT_SHUTDOWN:
+ if (!sli_cmd_down_link(&hw->sli, link))
+ rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
+ efct_hw_cb_port_control, NULL);
+ /* Free buffer on error, since no callback is coming */
+ if (rc)
+ efc_log_err(hw->os, "DOWN_LINK failed\n");
+ break;
+
+ default:
+ efc_log_debug(hw->os, "unhandled control %#x\n", ctrl);
+ break;
+ }
+
+ return rc;
+}
+
+void
+efct_hw_teardown(struct efct_hw *hw)
+{
+ u32 i = 0;
+ u32 destroy_queues;
+ u32 free_memory;
+ struct efc_dma *dma;
+ struct efct *efct = hw->os;
+
+ destroy_queues = (hw->state == EFCT_HW_STATE_ACTIVE);
+ free_memory = (hw->state != EFCT_HW_STATE_UNINITIALIZED);
+
+ /* Cancel Sliport Healthcheck */
+ if (hw->sliport_healthcheck) {
+ hw->sliport_healthcheck = 0;
+ efct_hw_config_sli_port_health_check(hw, 0, 0);
+ }
+
+ if (hw->state != EFCT_HW_STATE_QUEUES_ALLOCATED) {
+ hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
+
+ efct_hw_flush(hw);
+
+ if (list_empty(&hw->cmd_head))
+ efc_log_debug(hw->os,
+ "All commands completed on MQ queue\n");
+ else
+ efc_log_debug(hw->os,
+ "Some cmds still pending on MQ queue\n");
+
+ /* Cancel any remaining commands */
+ efct_hw_command_cancel(hw);
+ } else {
+ hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
+ }
+
+ dma_free_coherent(&efct->pci->dev,
+ hw->rnode_mem.size, hw->rnode_mem.virt,
+ hw->rnode_mem.phys);
+ memset(&hw->rnode_mem, 0, sizeof(struct efc_dma));
+
+ if (hw->io) {
+ for (i = 0; i < hw->config.n_io; i++) {
+ if (hw->io[i] && hw->io[i]->sgl &&
+ hw->io[i]->sgl->virt) {
+ dma_free_coherent(&efct->pci->dev,
+ hw->io[i]->sgl->size,
+ hw->io[i]->sgl->virt,
+ hw->io[i]->sgl->phys);
+ }
+ kfree(hw->io[i]);
+ hw->io[i] = NULL;
+ }
+ kfree(hw->io);
+ hw->io = NULL;
+ kfree(hw->wqe_buffs);
+ hw->wqe_buffs = NULL;
+ }
+
+ dma = &hw->xfer_rdy;
+ dma_free_coherent(&efct->pci->dev,
+ dma->size, dma->virt, dma->phys);
+ memset(dma, 0, sizeof(struct efc_dma));
+
+ dma = &hw->loop_map;
+ dma_free_coherent(&efct->pci->dev,
+ dma->size, dma->virt, dma->phys);
+ memset(dma, 0, sizeof(struct efc_dma));
+
+ for (i = 0; i < hw->wq_count; i++)
+ sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->rq_count; i++)
+ sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->mq_count; i++)
+ sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->cq_count; i++)
+ sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->eq_count; i++)
+ sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues,
+ free_memory);
+
+ /* Free rq buffers */
+ efct_hw_rx_free(hw);
+
+ efct_hw_queue_teardown(hw);
+
+ kfree(hw->wq_cpu_array);
+
+ sli_teardown(&hw->sli);
+
+ /* record the fact that the queues are non-functional */
+ hw->state = EFCT_HW_STATE_UNINITIALIZED;
+
+ /* free sequence free pool */
+ kfree(hw->seq_pool);
+ hw->seq_pool = NULL;
+
+ /* free hw_wq_callback pool */
+ efct_hw_reqtag_pool_free(hw);
+
+ mempool_destroy(hw->cmd_ctx_pool);
+ mempool_destroy(hw->mbox_rqst_pool);
+
+ /* Mark HW setup as not having been called */
+ hw->hw_setup_called = false;
+}
+
+static int
+efct_hw_sli_reset(struct efct_hw *hw, enum efct_hw_reset reset,
+ enum efct_hw_state prev_state)
+{
+ int rc = 0;
+
+ switch (reset) {
+ case EFCT_HW_RESET_FUNCTION:
+ efc_log_debug(hw->os, "issuing function level reset\n");
+ if (sli_reset(&hw->sli)) {
+ efc_log_err(hw->os, "sli_reset failed\n");
+ rc = -EIO;
+ }
+ break;
+ case EFCT_HW_RESET_FIRMWARE:
+ efc_log_debug(hw->os, "issuing firmware reset\n");
+ if (sli_fw_reset(&hw->sli)) {
+ efc_log_err(hw->os, "sli_soft_reset failed\n");
+ rc = -EIO;
+ }
+ /*
+ * Because the FW reset leaves the FW in a non-running state,
+ * follow that with a regular reset.
+ */
+ efc_log_debug(hw->os, "issuing function level reset\n");
+ if (sli_reset(&hw->sli)) {
+ efc_log_err(hw->os, "sli_reset failed\n");
+ rc = -EIO;
+ }
+ break;
+ default:
+ efc_log_err(hw->os, "unknown type - no reset performed\n");
+ hw->state = prev_state;
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+int
+efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset)
+{
+ int rc = 0;
+ enum efct_hw_state prev_state = hw->state;
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE)
+ efc_log_debug(hw->os,
+ "HW state %d is not active\n", hw->state);
+
+ hw->state = EFCT_HW_STATE_RESET_IN_PROGRESS;
+
+ /*
+ * If the prev_state is already reset/teardown in progress,
+ * don't continue further
+ */
+ if (prev_state == EFCT_HW_STATE_RESET_IN_PROGRESS ||
+ prev_state == EFCT_HW_STATE_TEARDOWN_IN_PROGRESS)
+ return efct_hw_sli_reset(hw, reset, prev_state);
+
+ if (prev_state != EFCT_HW_STATE_UNINITIALIZED) {
+ efct_hw_flush(hw);
+
+ if (list_empty(&hw->cmd_head))
+ efc_log_debug(hw->os,
+ "All commands completed on MQ queue\n");
+ else
+ efc_log_err(hw->os,
+ "Some commands still pending on MQ queue\n");
+ }
+
+ /* Reset the chip */
+ rc = efct_hw_sli_reset(hw, reset, prev_state);
+ if (rc == -EINVAL)
+ return -EIO;
+
+ return rc;
+}
diff --git a/drivers/scsi/elx/efct/efct_hw.h b/drivers/scsi/elx/efct/efct_hw.h
new file mode 100644
index 000000000000..f3f4aa78dce9
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_hw.h
@@ -0,0 +1,764 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef _EFCT_HW_H
+#define _EFCT_HW_H
+
+#include "../libefc_sli/sli4.h"
+
+/*
+ * EFCT PCI IDs
+ */
+#define EFCT_VENDOR_ID 0x10df
+/* LightPulse 16Gb x 4 FC (lancer-g6) */
+#define EFCT_DEVICE_LANCER_G6 0xe307
+/* LightPulse 32Gb x 4 FC (lancer-g7) */
+#define EFCT_DEVICE_LANCER_G7 0xf407
+
+/*Default RQ entries len used by driver*/
+#define EFCT_HW_RQ_ENTRIES_MIN 512
+#define EFCT_HW_RQ_ENTRIES_DEF 1024
+#define EFCT_HW_RQ_ENTRIES_MAX 4096
+
+/*Defines the size of the RQ buffers used for each RQ*/
+#define EFCT_HW_RQ_SIZE_HDR 128
+#define EFCT_HW_RQ_SIZE_PAYLOAD 1024
+
+/*Define the maximum number of multi-receive queues*/
+#define EFCT_HW_MAX_MRQS 8
+
+/*
+ * Define count of when to set the WQEC bit in a submitted
+ * WQE, causing a consummed/released completion to be posted.
+ */
+#define EFCT_HW_WQEC_SET_COUNT 32
+
+/*Send frame timeout in seconds*/
+#define EFCT_HW_SEND_FRAME_TIMEOUT 10
+
+/*
+ * FDT Transfer Hint value, reads greater than this value
+ * will be segmented to implement fairness. A value of zero disables
+ * the feature.
+ */
+#define EFCT_HW_FDT_XFER_HINT 8192
+
+#define EFCT_HW_TIMECHECK_ITERATIONS 100
+#define EFCT_HW_MAX_NUM_MQ 1
+#define EFCT_HW_MAX_NUM_RQ 32
+#define EFCT_HW_MAX_NUM_EQ 16
+#define EFCT_HW_MAX_NUM_WQ 32
+#define EFCT_HW_DEF_NUM_EQ 1
+
+#define OCE_HW_MAX_NUM_MRQ_PAIRS 16
+
+#define EFCT_HW_MQ_DEPTH 128
+#define EFCT_HW_EQ_DEPTH 1024
+
+/*
+ * A CQ will be assinged to each WQ
+ * (CQ must have 2X entries of the WQ for abort
+ * processing), plus a separate one for each RQ PAIR and one for MQ
+ */
+#define EFCT_HW_MAX_NUM_CQ \
+ ((EFCT_HW_MAX_NUM_WQ * 2) + 1 + (OCE_HW_MAX_NUM_MRQ_PAIRS * 2))
+
+#define EFCT_HW_Q_HASH_SIZE 128
+#define EFCT_HW_RQ_HEADER_SIZE 128
+#define EFCT_HW_RQ_HEADER_INDEX 0
+
+#define EFCT_HW_REQUE_XRI_REGTAG 65534
+
+/* Options for efct_hw_command() */
+enum efct_cmd_opts {
+ /* command executes synchronously and busy-waits for completion */
+ EFCT_CMD_POLL,
+ /* command executes asynchronously. Uses callback */
+ EFCT_CMD_NOWAIT,
+};
+
+enum efct_hw_reset {
+ EFCT_HW_RESET_FUNCTION,
+ EFCT_HW_RESET_FIRMWARE,
+ EFCT_HW_RESET_MAX
+};
+
+enum efct_hw_topo {
+ EFCT_HW_TOPOLOGY_AUTO,
+ EFCT_HW_TOPOLOGY_NPORT,
+ EFCT_HW_TOPOLOGY_LOOP,
+ EFCT_HW_TOPOLOGY_NONE,
+ EFCT_HW_TOPOLOGY_MAX
+};
+
+/* pack fw revision values into a single uint64_t */
+#define HW_FWREV(a, b, c, d) (((uint64_t)(a) << 48) | ((uint64_t)(b) << 32) \
+ | ((uint64_t)(c) << 16) | ((uint64_t)(d)))
+
+#define EFCT_FW_VER_STR(a, b, c, d) (#a "." #b "." #c "." #d)
+
+enum efct_hw_io_type {
+ EFCT_HW_ELS_REQ,
+ EFCT_HW_ELS_RSP,
+ EFCT_HW_FC_CT,
+ EFCT_HW_FC_CT_RSP,
+ EFCT_HW_BLS_ACC,
+ EFCT_HW_BLS_RJT,
+ EFCT_HW_IO_TARGET_READ,
+ EFCT_HW_IO_TARGET_WRITE,
+ EFCT_HW_IO_TARGET_RSP,
+ EFCT_HW_IO_DNRX_REQUEUE,
+ EFCT_HW_IO_MAX,
+};
+
+enum efct_hw_io_state {
+ EFCT_HW_IO_STATE_FREE,
+ EFCT_HW_IO_STATE_INUSE,
+ EFCT_HW_IO_STATE_WAIT_FREE,
+ EFCT_HW_IO_STATE_WAIT_SEC_HIO,
+};
+
+#define EFCT_TARGET_WRITE_SKIPS 1
+#define EFCT_TARGET_READ_SKIPS 2
+
+struct efct_hw;
+struct efct_io;
+
+#define EFCT_CMD_CTX_POOL_SZ 32
+/**
+ * HW command context.
+ * Stores the state for the asynchronous commands sent to the hardware.
+ */
+struct efct_command_ctx {
+ struct list_head list_entry;
+ int (*cb)(struct efct_hw *hw, int status, u8 *mqe, void *arg);
+ void *arg; /* Argument for callback */
+ /* buffer holding command / results */
+ u8 buf[SLI4_BMBX_SIZE];
+ void *ctx; /* upper layer context */
+};
+
+struct efct_hw_sgl {
+ uintptr_t addr;
+ size_t len;
+};
+
+union efct_hw_io_param_u {
+ struct sli_bls_params bls;
+ struct sli_els_params els;
+ struct sli_ct_params fc_ct;
+ struct sli_fcp_tgt_params fcp_tgt;
+};
+
+/* WQ steering mode */
+enum efct_hw_wq_steering {
+ EFCT_HW_WQ_STEERING_CLASS,
+ EFCT_HW_WQ_STEERING_REQUEST,
+ EFCT_HW_WQ_STEERING_CPU,
+};
+
+/* HW wqe object */
+struct efct_hw_wqe {
+ struct list_head list_entry;
+ bool abort_wqe_submit_needed;
+ bool send_abts;
+ u32 id;
+ u32 abort_reqtag;
+ u8 *wqebuf;
+};
+
+struct efct_hw_io;
+/* Typedef for HW "done" callback */
+typedef int (*efct_hw_done_t)(struct efct_hw_io *, u32 len, int status,
+ u32 ext, void *ul_arg);
+
+/**
+ * HW IO object.
+ *
+ * Stores the per-IO information necessary
+ * for both SLI and efct.
+ * @ref: reference counter for hw io object
+ * @state: state of IO: free, busy, wait_free
+ * @list_entry used for busy, wait_free, free lists
+ * @wqe Work queue object, with link for pending
+ * @hw pointer back to hardware context
+ * @xfer_rdy transfer ready data
+ * @type IO type
+ * @xbusy Exchange is active in FW
+ * @abort_in_progress if TRUE, abort is in progress
+ * @status_saved if TRUE, latched status should be returned
+ * @wq_class WQ class if steering mode is Class
+ * @reqtag request tag for this HW IO
+ * @wq WQ assigned to the exchange
+ * @done Function called on IO completion
+ * @arg argument passed to IO done callback
+ * @abort_done Function called on abort completion
+ * @abort_arg argument passed to abort done callback
+ * @wq_steering WQ steering mode request
+ * @saved_status Saved status
+ * @saved_len Status length
+ * @saved_ext Saved extended status
+ * @eq EQ on which this HIO came up
+ * @sge_offset SGE data offset
+ * @def_sgl_count Count of SGEs in default SGL
+ * @abort_reqtag request tag for an abort of this HW IO
+ * @indicator Exchange indicator
+ * @def_sgl default SGL
+ * @sgl pointer to current active SGL
+ * @sgl_count count of SGEs in io->sgl
+ * @first_data_sge index of first data SGE
+ * @n_sge number of active SGEs
+ */
+struct efct_hw_io {
+ struct kref ref;
+ enum efct_hw_io_state state;
+ void (*release)(struct kref *arg);
+ struct list_head list_entry;
+ struct efct_hw_wqe wqe;
+
+ struct efct_hw *hw;
+ struct efc_dma xfer_rdy;
+ u16 type;
+ bool xbusy;
+ int abort_in_progress;
+ bool status_saved;
+ u8 wq_class;
+ u16 reqtag;
+
+ struct hw_wq *wq;
+ efct_hw_done_t done;
+ void *arg;
+ efct_hw_done_t abort_done;
+ void *abort_arg;
+
+ enum efct_hw_wq_steering wq_steering;
+
+ u32 saved_status;
+ u32 saved_len;
+ u32 saved_ext;
+
+ struct hw_eq *eq;
+ u32 sge_offset;
+ u32 def_sgl_count;
+ u32 abort_reqtag;
+ u32 indicator;
+ struct efc_dma def_sgl;
+ struct efc_dma *sgl;
+ u32 sgl_count;
+ u32 first_data_sge;
+ u32 n_sge;
+};
+
+enum efct_hw_port {
+ EFCT_HW_PORT_INIT,
+ EFCT_HW_PORT_SHUTDOWN,
+};
+
+/* Node group rpi reference */
+struct efct_hw_rpi_ref {
+ atomic_t rpi_count;
+ atomic_t rpi_attached;
+};
+
+enum efct_hw_link_stat {
+ EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT,
+ EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT,
+ EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT,
+ EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT,
+ EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT,
+ EFCT_HW_LINK_STAT_CRC_COUNT,
+ EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT,
+ EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT,
+ EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT,
+ EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_RCV_EOFA_COUNT,
+ EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT,
+ EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT,
+ EFCT_HW_LINK_STAT_RCV_SOFF_COUNT,
+ EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT,
+ EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT,
+ EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT,
+ EFCT_HW_LINK_STAT_MAX,
+};
+
+enum efct_hw_host_stat {
+ EFCT_HW_HOST_STAT_TX_KBYTE_COUNT,
+ EFCT_HW_HOST_STAT_RX_KBYTE_COUNT,
+ EFCT_HW_HOST_STAT_TX_FRAME_COUNT,
+ EFCT_HW_HOST_STAT_RX_FRAME_COUNT,
+ EFCT_HW_HOST_STAT_TX_SEQ_COUNT,
+ EFCT_HW_HOST_STAT_RX_SEQ_COUNT,
+ EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG,
+ EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP,
+ EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT,
+ EFCT_HW_HOST_STAT_RX_F_BSY_COUNT,
+ EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT,
+ EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT,
+ EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT,
+ EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT,
+ EFCT_HW_HOST_STAT_MAX,
+};
+
+enum efct_hw_state {
+ EFCT_HW_STATE_UNINITIALIZED,
+ EFCT_HW_STATE_QUEUES_ALLOCATED,
+ EFCT_HW_STATE_ACTIVE,
+ EFCT_HW_STATE_RESET_IN_PROGRESS,
+ EFCT_HW_STATE_TEARDOWN_IN_PROGRESS,
+};
+
+struct efct_hw_link_stat_counts {
+ u8 overflow;
+ u32 counter;
+};
+
+struct efct_hw_host_stat_counts {
+ u32 counter;
+};
+
+/* Structure used for the hash lookup of queue IDs */
+struct efct_queue_hash {
+ bool in_use;
+ u16 id;
+ u16 index;
+};
+
+/* WQ callback object */
+struct hw_wq_callback {
+ u16 instance_index; /* use for request tag */
+ void (*callback)(void *arg, u8 *cqe, int status);
+ void *arg;
+ struct list_head list_entry;
+};
+
+struct reqtag_pool {
+ spinlock_t lock; /* pool lock */
+ struct hw_wq_callback *tags[U16_MAX];
+ struct list_head freelist;
+};
+
+struct efct_hw_config {
+ u32 n_eq;
+ u32 n_cq;
+ u32 n_mq;
+ u32 n_rq;
+ u32 n_wq;
+ u32 n_io;
+ u32 n_sgl;
+ u32 speed;
+ u32 topology;
+ /* size of the buffers for first burst */
+ u32 rq_default_buffer_size;
+ u8 esoc;
+ /* MRQ RQ selection policy */
+ u8 rq_selection_policy;
+ /* RQ quanta if rq_selection_policy == 2 */
+ u8 rr_quanta;
+ u32 filter_def[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+};
+
+struct efct_hw {
+ struct efct *os;
+ struct sli4 sli;
+ u16 ulp_start;
+ u16 ulp_max;
+ u32 dump_size;
+ enum efct_hw_state state;
+ bool hw_setup_called;
+ u8 sliport_healthcheck;
+ u16 fcf_indicator;
+
+ /* HW configuration */
+ struct efct_hw_config config;
+
+ /* calculated queue sizes for each type */
+ u32 num_qentries[SLI4_QTYPE_MAX];
+
+ /* Storage for SLI queue objects */
+ struct sli4_queue wq[EFCT_HW_MAX_NUM_WQ];
+ struct sli4_queue rq[EFCT_HW_MAX_NUM_RQ];
+ u16 hw_rq_lookup[EFCT_HW_MAX_NUM_RQ];
+ struct sli4_queue mq[EFCT_HW_MAX_NUM_MQ];
+ struct sli4_queue cq[EFCT_HW_MAX_NUM_CQ];
+ struct sli4_queue eq[EFCT_HW_MAX_NUM_EQ];
+
+ /* HW queue */
+ u32 eq_count;
+ u32 cq_count;
+ u32 mq_count;
+ u32 wq_count;
+ u32 rq_count;
+ u32 cmd_head_count;
+ struct list_head eq_list;
+
+ struct efct_queue_hash cq_hash[EFCT_HW_Q_HASH_SIZE];
+ struct efct_queue_hash rq_hash[EFCT_HW_Q_HASH_SIZE];
+ struct efct_queue_hash wq_hash[EFCT_HW_Q_HASH_SIZE];
+
+ /* Storage for HW queue objects */
+ struct hw_wq *hw_wq[EFCT_HW_MAX_NUM_WQ];
+ struct hw_rq *hw_rq[EFCT_HW_MAX_NUM_RQ];
+ struct hw_mq *hw_mq[EFCT_HW_MAX_NUM_MQ];
+ struct hw_cq *hw_cq[EFCT_HW_MAX_NUM_CQ];
+ struct hw_eq *hw_eq[EFCT_HW_MAX_NUM_EQ];
+ /* count of hw_rq[] entries */
+ u32 hw_rq_count;
+ /* count of multirq RQs */
+ u32 hw_mrq_count;
+
+ struct hw_wq **wq_cpu_array;
+
+ /* Sequence objects used in incoming frame processing */
+ struct efc_hw_sequence *seq_pool;
+
+ /* Maintain an ordered, linked list of outstanding HW commands. */
+ struct mutex bmbx_lock;
+ spinlock_t cmd_lock;
+ struct list_head cmd_head;
+ struct list_head cmd_pending;
+ mempool_t *cmd_ctx_pool;
+ mempool_t *mbox_rqst_pool;
+
+ struct sli4_link_event link;
+
+ /* pointer array of IO objects */
+ struct efct_hw_io **io;
+ /* array of WQE buffs mapped to IO objects */
+ u8 *wqe_buffs;
+
+ /* IO lock to synchronize list access */
+ spinlock_t io_lock;
+ /* List of IO objects in use */
+ struct list_head io_inuse;
+ /* List of IO objects waiting to be freed */
+ struct list_head io_wait_free;
+ /* List of IO objects available for allocation */
+ struct list_head io_free;
+
+ struct efc_dma loop_map;
+
+ struct efc_dma xfer_rdy;
+
+ struct efc_dma rnode_mem;
+
+ atomic_t io_alloc_failed_count;
+
+ /* stat: wq sumbit count */
+ u32 tcmd_wq_submit[EFCT_HW_MAX_NUM_WQ];
+ /* stat: wq complete count */
+ u32 tcmd_wq_complete[EFCT_HW_MAX_NUM_WQ];
+
+ atomic_t send_frame_seq_id;
+ struct reqtag_pool *wq_reqtag_pool;
+};
+
+enum efct_hw_io_count_type {
+ EFCT_HW_IO_INUSE_COUNT,
+ EFCT_HW_IO_FREE_COUNT,
+ EFCT_HW_IO_WAIT_FREE_COUNT,
+ EFCT_HW_IO_N_TOTAL_IO_COUNT,
+};
+
+/* HW queue data structures */
+struct hw_eq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+ u32 entry_count;
+ u32 entry_size;
+ struct efct_hw *hw;
+ struct sli4_queue *queue;
+ struct list_head cq_list;
+ u32 use_count;
+};
+
+struct hw_cq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+ u32 entry_count;
+ u32 entry_size;
+ struct hw_eq *eq;
+ struct sli4_queue *queue;
+ struct list_head q_list;
+ u32 use_count;
+};
+
+struct hw_q {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+};
+
+struct hw_mq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+
+ u32 entry_count;
+ u32 entry_size;
+ struct hw_cq *cq;
+ struct sli4_queue *queue;
+
+ u32 use_count;
+};
+
+struct hw_wq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+ struct efct_hw *hw;
+
+ u32 entry_count;
+ u32 entry_size;
+ struct hw_cq *cq;
+ struct sli4_queue *queue;
+ u32 class;
+
+ /* WQ consumed */
+ u32 wqec_set_count;
+ u32 wqec_count;
+ u32 free_count;
+ u32 total_submit_count;
+ struct list_head pending_list;
+
+ /* HW IO allocated for use with Send Frame */
+ struct efct_hw_io *send_frame_io;
+
+ /* Stats */
+ u32 use_count;
+ u32 wq_pending_count;
+};
+
+struct hw_rq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+
+ u32 entry_count;
+ u32 use_count;
+ u32 hdr_entry_size;
+ u32 first_burst_entry_size;
+ u32 data_entry_size;
+ bool is_mrq;
+ u32 base_mrq_id;
+
+ struct hw_cq *cq;
+
+ u8 filter_mask;
+ struct sli4_queue *hdr;
+ struct sli4_queue *first_burst;
+ struct sli4_queue *data;
+
+ struct efc_hw_rq_buffer *hdr_buf;
+ struct efc_hw_rq_buffer *fb_buf;
+ struct efc_hw_rq_buffer *payload_buf;
+ /* RQ tracker for this RQ */
+ struct efc_hw_sequence **rq_tracker;
+};
+
+struct efct_hw_send_frame_context {
+ struct efct_hw *hw;
+ struct hw_wq_callback *wqcb;
+ struct efct_hw_wqe wqe;
+ void (*callback)(int status, void *arg);
+ void *arg;
+
+ /* General purpose elements */
+ struct efc_hw_sequence *seq;
+ struct efc_dma payload;
+};
+
+struct efct_hw_grp_hdr {
+ u32 size;
+ __be32 magic_number;
+ u32 word2;
+ u8 rev_name[128];
+ u8 date[12];
+ u8 revision[32];
+};
+
+static inline int
+efct_hw_get_link_speed(struct efct_hw *hw) {
+ return hw->link.speed;
+}
+
+int
+efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev);
+int efct_hw_init(struct efct_hw *hw);
+int
+efct_hw_parse_filter(struct efct_hw *hw, void *value);
+int
+efct_hw_init_queues(struct efct_hw *hw);
+int
+efct_hw_map_wq_cpu(struct efct_hw *hw);
+uint64_t
+efct_get_wwnn(struct efct_hw *hw);
+uint64_t
+efct_get_wwpn(struct efct_hw *hw);
+
+int efct_hw_rx_allocate(struct efct_hw *hw);
+int efct_hw_rx_post(struct efct_hw *hw);
+void efct_hw_rx_free(struct efct_hw *hw);
+int
+efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb,
+ void *arg);
+int
+efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg);
+
+struct efct_hw_io *efct_hw_io_alloc(struct efct_hw *hw);
+int efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io);
+u8 efct_hw_io_inuse(struct efct_hw *hw, struct efct_hw_io *io);
+int
+efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
+ struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
+ void *cb, void *arg);
+int
+efct_hw_io_register_sgl(struct efct_hw *hw, struct efct_hw_io *io,
+ struct efc_dma *sgl,
+ u32 sgl_count);
+int
+efct_hw_io_init_sges(struct efct_hw *hw,
+ struct efct_hw_io *io, enum efct_hw_io_type type);
+
+int
+efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
+ uintptr_t addr, u32 length);
+int
+efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
+ bool send_abts, void *cb, void *arg);
+u32
+efct_hw_io_get_count(struct efct_hw *hw,
+ enum efct_hw_io_count_type io_count_type);
+struct efct_hw_io
+*efct_hw_io_lookup(struct efct_hw *hw, u32 indicator);
+void efct_hw_io_abort_all(struct efct_hw *hw);
+void efct_hw_io_free_internal(struct kref *arg);
+
+/* HW WQ request tag API */
+struct reqtag_pool *efct_hw_reqtag_pool_alloc(struct efct_hw *hw);
+void efct_hw_reqtag_pool_free(struct efct_hw *hw);
+struct hw_wq_callback
+*efct_hw_reqtag_alloc(struct efct_hw *hw,
+ void (*callback)(void *arg, u8 *cqe,
+ int status), void *arg);
+void
+efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb);
+struct hw_wq_callback
+*efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index);
+
+/* RQ completion handlers for RQ pair mode */
+int
+efct_hw_rqpair_process_rq(struct efct_hw *hw,
+ struct hw_cq *cq, u8 *cqe);
+int
+efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq);
+static inline void
+efct_hw_sequence_copy(struct efc_hw_sequence *dst,
+ struct efc_hw_sequence *src)
+{
+ /* Copy src to dst, then zero out the linked list link */
+ *dst = *src;
+}
+
+int
+efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq);
+
+static inline int
+efct_hw_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
+{
+ /* Only RQ pair mode is supported */
+ return efct_hw_rqpair_sequence_free(hw, seq);
+}
+
+int
+efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
+ u32 max_isr_time_msec);
+void efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq);
+void
+efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, int status, u16 rid);
+void
+efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, u16 rid);
+int
+efct_hw_process(struct efct_hw *hw, u32 vector, u32 max_isr_time_msec);
+int
+efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id);
+int efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe);
+int
+efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
+ u8 sof, u8 eof, struct efc_dma *payload,
+ struct efct_hw_send_frame_context *ctx,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg);
+int
+efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io);
+int
+efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls);
+int
+efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
+ void *cb, void *arg);
+
+/* Function for retrieving link statistics */
+int
+efct_hw_get_link_stats(struct efct_hw *hw,
+ u8 req_ext_counters,
+ u8 clear_overflow_flags,
+ u8 clear_all_counters,
+ void (*efct_hw_link_stat_cb_t)(int status,
+ u32 num_counters,
+ struct efct_hw_link_stat_counts *counters, void *arg),
+ void *arg);
+/* Function for retrieving host statistics */
+int
+efct_hw_get_host_stats(struct efct_hw *hw,
+ u8 cc,
+ void (*efct_hw_host_stat_cb_t)(int status,
+ u32 num_counters,
+ struct efct_hw_host_stat_counts *counters, void *arg),
+ void *arg);
+int
+efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma,
+ u32 size, u32 offset, int last,
+ void (*cb)(int status, u32 bytes_written,
+ u32 change_status, void *arg),
+ void *arg);
+typedef void (*efct_hw_async_cb_t)(struct efct_hw *hw, int status,
+ u8 *mqe, void *arg);
+int
+efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg);
+
+struct hw_eq *efct_hw_new_eq(struct efct_hw *hw, u32 entry_count);
+struct hw_cq *efct_hw_new_cq(struct hw_eq *eq, u32 entry_count);
+u32
+efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
+ u32 num_cqs, u32 entry_count);
+struct hw_mq *efct_hw_new_mq(struct hw_cq *cq, u32 entry_count);
+struct hw_wq
+*efct_hw_new_wq(struct hw_cq *cq, u32 entry_count);
+u32
+efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
+ u32 num_rq_pairs, u32 entry_count);
+void efct_hw_del_eq(struct hw_eq *eq);
+void efct_hw_del_cq(struct hw_cq *cq);
+void efct_hw_del_mq(struct hw_mq *mq);
+void efct_hw_del_wq(struct hw_wq *wq);
+void efct_hw_del_rq(struct hw_rq *rq);
+void efct_hw_queue_teardown(struct efct_hw *hw);
+void efct_hw_teardown(struct efct_hw *hw);
+int
+efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset);
+
+int
+efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
+ uintptr_t value,
+ void (*cb)(int status, uintptr_t value, void *arg),
+ void *arg);
+
+#endif /* __EFCT_H__ */
diff --git a/drivers/scsi/elx/efct/efct_hw_queues.c b/drivers/scsi/elx/efct/efct_hw_queues.c
new file mode 100644
index 000000000000..3a1d1a5864a3
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_hw_queues.c
@@ -0,0 +1,677 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+#include "efct_unsol.h"
+
+int
+efct_hw_init_queues(struct efct_hw *hw)
+{
+ struct hw_eq *eq = NULL;
+ struct hw_cq *cq = NULL;
+ struct hw_wq *wq = NULL;
+ struct hw_mq *mq = NULL;
+
+ struct hw_eq *eqs[EFCT_HW_MAX_NUM_EQ];
+ struct hw_cq *cqs[EFCT_HW_MAX_NUM_EQ];
+ struct hw_rq *rqs[EFCT_HW_MAX_NUM_EQ];
+ u32 i = 0, j;
+
+ hw->eq_count = 0;
+ hw->cq_count = 0;
+ hw->mq_count = 0;
+ hw->wq_count = 0;
+ hw->rq_count = 0;
+ hw->hw_rq_count = 0;
+ INIT_LIST_HEAD(&hw->eq_list);
+
+ for (i = 0; i < hw->config.n_eq; i++) {
+ /* Create EQ */
+ eq = efct_hw_new_eq(hw, EFCT_HW_EQ_DEPTH);
+ if (!eq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+
+ eqs[i] = eq;
+
+ /* Create one MQ */
+ if (!i) {
+ cq = efct_hw_new_cq(eq,
+ hw->num_qentries[SLI4_QTYPE_CQ]);
+ if (!cq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+
+ mq = efct_hw_new_mq(cq, EFCT_HW_MQ_DEPTH);
+ if (!mq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+ }
+
+ /* Create WQ */
+ cq = efct_hw_new_cq(eq, hw->num_qentries[SLI4_QTYPE_CQ]);
+ if (!cq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+
+ wq = efct_hw_new_wq(cq, hw->num_qentries[SLI4_QTYPE_WQ]);
+ if (!wq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+ }
+
+ /* Create CQ set */
+ if (efct_hw_new_cq_set(eqs, cqs, i, hw->num_qentries[SLI4_QTYPE_CQ])) {
+ efct_hw_queue_teardown(hw);
+ return -EIO;
+ }
+
+ /* Create RQ set */
+ if (efct_hw_new_rq_set(cqs, rqs, i, EFCT_HW_RQ_ENTRIES_DEF)) {
+ efct_hw_queue_teardown(hw);
+ return -EIO;
+ }
+
+ for (j = 0; j < i ; j++) {
+ rqs[j]->filter_mask = 0;
+ rqs[j]->is_mrq = true;
+ rqs[j]->base_mrq_id = rqs[0]->hdr->id;
+ }
+
+ hw->hw_mrq_count = i;
+
+ return 0;
+}
+
+int
+efct_hw_map_wq_cpu(struct efct_hw *hw)
+{
+ struct efct *efct = hw->os;
+ u32 cpu = 0, i;
+
+ /* Init cpu_map array */
+ hw->wq_cpu_array = kcalloc(num_possible_cpus(), sizeof(void *),
+ GFP_KERNEL);
+ if (!hw->wq_cpu_array)
+ return -ENOMEM;
+
+ for (i = 0; i < hw->config.n_eq; i++) {
+ const struct cpumask *maskp;
+
+ /* Get a CPU mask for all CPUs affinitized to this vector */
+ maskp = pci_irq_get_affinity(efct->pci, i);
+ if (!maskp) {
+ efc_log_debug(efct, "maskp null for vector:%d\n", i);
+ continue;
+ }
+
+ /* Loop through all CPUs associated with vector idx */
+ for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ efc_log_debug(efct, "CPU:%d irq vector:%d\n", cpu, i);
+ hw->wq_cpu_array[cpu] = hw->hw_wq[i];
+ }
+ }
+
+ return 0;
+}
+
+struct hw_eq *
+efct_hw_new_eq(struct efct_hw *hw, u32 entry_count)
+{
+ struct hw_eq *eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+
+ if (!eq)
+ return NULL;
+
+ eq->type = SLI4_QTYPE_EQ;
+ eq->hw = hw;
+ eq->entry_count = entry_count;
+ eq->instance = hw->eq_count++;
+ eq->queue = &hw->eq[eq->instance];
+ INIT_LIST_HEAD(&eq->cq_list);
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_EQ, eq->queue, entry_count,
+ NULL)) {
+ efc_log_err(hw->os, "EQ[%d] alloc failure\n", eq->instance);
+ kfree(eq);
+ return NULL;
+ }
+
+ sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
+ hw->hw_eq[eq->instance] = eq;
+ INIT_LIST_HEAD(&eq->list_entry);
+ list_add_tail(&eq->list_entry, &hw->eq_list);
+ efc_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance,
+ eq->queue->id, eq->entry_count);
+ return eq;
+}
+
+struct hw_cq *
+efct_hw_new_cq(struct hw_eq *eq, u32 entry_count)
+{
+ struct efct_hw *hw = eq->hw;
+ struct hw_cq *cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+
+ if (!cq)
+ return NULL;
+
+ cq->eq = eq;
+ cq->type = SLI4_QTYPE_CQ;
+ cq->instance = eq->hw->cq_count++;
+ cq->entry_count = entry_count;
+ cq->queue = &hw->cq[cq->instance];
+
+ INIT_LIST_HEAD(&cq->q_list);
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_CQ, cq->queue,
+ cq->entry_count, eq->queue)) {
+ efc_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
+ eq->instance, eq->entry_count);
+ kfree(cq);
+ return NULL;
+ }
+
+ hw->hw_cq[cq->instance] = cq;
+ INIT_LIST_HEAD(&cq->list_entry);
+ list_add_tail(&cq->list_entry, &eq->cq_list);
+ efc_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance,
+ cq->queue->id, cq->entry_count);
+ return cq;
+}
+
+u32
+efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
+ u32 num_cqs, u32 entry_count)
+{
+ u32 i;
+ struct efct_hw *hw = eqs[0]->hw;
+ struct sli4 *sli4 = &hw->sli;
+ struct hw_cq *cq = NULL;
+ struct sli4_queue *qs[SLI4_MAX_CQ_SET_COUNT];
+ struct sli4_queue *assefct[SLI4_MAX_CQ_SET_COUNT];
+
+ /* Initialise CQS pointers to NULL */
+ for (i = 0; i < num_cqs; i++)
+ cqs[i] = NULL;
+
+ for (i = 0; i < num_cqs; i++) {
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ goto error;
+
+ cqs[i] = cq;
+ cq->eq = eqs[i];
+ cq->type = SLI4_QTYPE_CQ;
+ cq->instance = hw->cq_count++;
+ cq->entry_count = entry_count;
+ cq->queue = &hw->cq[cq->instance];
+ qs[i] = cq->queue;
+ assefct[i] = eqs[i]->queue;
+ INIT_LIST_HEAD(&cq->q_list);
+ }
+
+ if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assefct)) {
+ efc_log_err(hw->os, "Failed to create CQ Set.\n");
+ goto error;
+ }
+
+ for (i = 0; i < num_cqs; i++) {
+ hw->hw_cq[cqs[i]->instance] = cqs[i];
+ INIT_LIST_HEAD(&cqs[i]->list_entry);
+ list_add_tail(&cqs[i]->list_entry, &cqs[i]->eq->cq_list);
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < num_cqs; i++) {
+ kfree(cqs[i]);
+ cqs[i] = NULL;
+ }
+ return -EIO;
+}
+
+struct hw_mq *
+efct_hw_new_mq(struct hw_cq *cq, u32 entry_count)
+{
+ struct efct_hw *hw = cq->eq->hw;
+ struct hw_mq *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+
+ if (!mq)
+ return NULL;
+
+ mq->cq = cq;
+ mq->type = SLI4_QTYPE_MQ;
+ mq->instance = cq->eq->hw->mq_count++;
+ mq->entry_count = entry_count;
+ mq->entry_size = EFCT_HW_MQ_DEPTH;
+ mq->queue = &hw->mq[mq->instance];
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_MQ, mq->queue, mq->entry_size,
+ cq->queue)) {
+ efc_log_err(hw->os, "MQ allocation failure\n");
+ kfree(mq);
+ return NULL;
+ }
+
+ hw->hw_mq[mq->instance] = mq;
+ INIT_LIST_HEAD(&mq->list_entry);
+ list_add_tail(&mq->list_entry, &cq->q_list);
+ efc_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance,
+ mq->queue->id, mq->entry_count);
+ return mq;
+}
+
+struct hw_wq *
+efct_hw_new_wq(struct hw_cq *cq, u32 entry_count)
+{
+ struct efct_hw *hw = cq->eq->hw;
+ struct hw_wq *wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+
+ if (!wq)
+ return NULL;
+
+ wq->hw = cq->eq->hw;
+ wq->cq = cq;
+ wq->type = SLI4_QTYPE_WQ;
+ wq->instance = cq->eq->hw->wq_count++;
+ wq->entry_count = entry_count;
+ wq->queue = &hw->wq[wq->instance];
+ wq->wqec_set_count = EFCT_HW_WQEC_SET_COUNT;
+ wq->wqec_count = wq->wqec_set_count;
+ wq->free_count = wq->entry_count - 1;
+ INIT_LIST_HEAD(&wq->pending_list);
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_WQ, wq->queue,
+ wq->entry_count, cq->queue)) {
+ efc_log_err(hw->os, "WQ allocation failure\n");
+ kfree(wq);
+ return NULL;
+ }
+
+ hw->hw_wq[wq->instance] = wq;
+ INIT_LIST_HEAD(&wq->list_entry);
+ list_add_tail(&wq->list_entry, &cq->q_list);
+ efc_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d\n",
+ wq->instance, wq->queue->id, wq->entry_count, wq->class);
+ return wq;
+}
+
+u32
+efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
+ u32 num_rq_pairs, u32 entry_count)
+{
+ struct efct_hw *hw = cqs[0]->eq->hw;
+ struct hw_rq *rq = NULL;
+ struct sli4_queue *qs[SLI4_MAX_RQ_SET_COUNT * 2] = { NULL };
+ u32 i, q_count, size;
+
+ /* Initialise RQS pointers */
+ for (i = 0; i < num_rq_pairs; i++)
+ rqs[i] = NULL;
+
+ /*
+ * Allocate an RQ object SET, where each element in set
+ * encapsulates 2 SLI queues (for rq pair)
+ */
+ for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
+ goto error;
+
+ rqs[i] = rq;
+ rq->instance = hw->hw_rq_count++;
+ rq->cq = cqs[i];
+ rq->type = SLI4_QTYPE_RQ;
+ rq->entry_count = entry_count;
+
+ /* Header RQ */
+ rq->hdr = &hw->rq[hw->rq_count];
+ rq->hdr_entry_size = EFCT_HW_RQ_HEADER_SIZE;
+ hw->hw_rq_lookup[hw->rq_count] = rq->instance;
+ hw->rq_count++;
+ qs[q_count] = rq->hdr;
+
+ /* Data RQ */
+ rq->data = &hw->rq[hw->rq_count];
+ rq->data_entry_size = hw->config.rq_default_buffer_size;
+ hw->hw_rq_lookup[hw->rq_count] = rq->instance;
+ hw->rq_count++;
+ qs[q_count + 1] = rq->data;
+
+ rq->rq_tracker = NULL;
+ }
+
+ if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
+ cqs[0]->queue->id,
+ rqs[0]->entry_count,
+ rqs[0]->hdr_entry_size,
+ rqs[0]->data_entry_size)) {
+ efc_log_err(hw->os, "RQ Set alloc failure for base CQ=%d\n",
+ cqs[0]->queue->id);
+ goto error;
+ }
+
+ for (i = 0; i < num_rq_pairs; i++) {
+ hw->hw_rq[rqs[i]->instance] = rqs[i];
+ INIT_LIST_HEAD(&rqs[i]->list_entry);
+ list_add_tail(&rqs[i]->list_entry, &cqs[i]->q_list);
+ size = sizeof(struct efc_hw_sequence *) * rqs[i]->entry_count;
+ rqs[i]->rq_tracker = kzalloc(size, GFP_KERNEL);
+ if (!rqs[i]->rq_tracker)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < num_rq_pairs; i++) {
+ if (rqs[i]) {
+ kfree(rqs[i]->rq_tracker);
+ kfree(rqs[i]);
+ }
+ }
+
+ return -EIO;
+}
+
+void
+efct_hw_del_eq(struct hw_eq *eq)
+{
+ struct hw_cq *cq;
+ struct hw_cq *cq_next;
+
+ if (!eq)
+ return;
+
+ list_for_each_entry_safe(cq, cq_next, &eq->cq_list, list_entry)
+ efct_hw_del_cq(cq);
+ list_del(&eq->list_entry);
+ eq->hw->hw_eq[eq->instance] = NULL;
+ kfree(eq);
+}
+
+void
+efct_hw_del_cq(struct hw_cq *cq)
+{
+ struct hw_q *q;
+ struct hw_q *q_next;
+
+ if (!cq)
+ return;
+
+ list_for_each_entry_safe(q, q_next, &cq->q_list, list_entry) {
+ switch (q->type) {
+ case SLI4_QTYPE_MQ:
+ efct_hw_del_mq((struct hw_mq *)q);
+ break;
+ case SLI4_QTYPE_WQ:
+ efct_hw_del_wq((struct hw_wq *)q);
+ break;
+ case SLI4_QTYPE_RQ:
+ efct_hw_del_rq((struct hw_rq *)q);
+ break;
+ default:
+ break;
+ }
+ }
+ list_del(&cq->list_entry);
+ cq->eq->hw->hw_cq[cq->instance] = NULL;
+ kfree(cq);
+}
+
+void
+efct_hw_del_mq(struct hw_mq *mq)
+{
+ if (!mq)
+ return;
+
+ list_del(&mq->list_entry);
+ mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
+ kfree(mq);
+}
+
+void
+efct_hw_del_wq(struct hw_wq *wq)
+{
+ if (!wq)
+ return;
+
+ list_del(&wq->list_entry);
+ wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
+ kfree(wq);
+}
+
+void
+efct_hw_del_rq(struct hw_rq *rq)
+{
+ struct efct_hw *hw = NULL;
+
+ if (!rq)
+ return;
+ /* Free RQ tracker */
+ kfree(rq->rq_tracker);
+ rq->rq_tracker = NULL;
+ list_del(&rq->list_entry);
+ hw = rq->cq->eq->hw;
+ hw->hw_rq[rq->instance] = NULL;
+ kfree(rq);
+}
+
+void
+efct_hw_queue_teardown(struct efct_hw *hw)
+{
+ struct hw_eq *eq;
+ struct hw_eq *eq_next;
+
+ if (!hw->eq_list.next)
+ return;
+
+ list_for_each_entry_safe(eq, eq_next, &hw->eq_list, list_entry)
+ efct_hw_del_eq(eq);
+}
+
+static inline int
+efct_hw_rqpair_find(struct efct_hw *hw, u16 rq_id)
+{
+ return efct_hw_queue_hash_find(hw->rq_hash, rq_id);
+}
+
+static struct efc_hw_sequence *
+efct_hw_rqpair_get(struct efct_hw *hw, u16 rqindex, u16 bufindex)
+{
+ struct sli4_queue *rq_hdr = &hw->rq[rqindex];
+ struct efc_hw_sequence *seq = NULL;
+ struct hw_rq *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
+ unsigned long flags = 0;
+
+ if (bufindex >= rq_hdr->length) {
+ efc_log_err(hw->os,
+ "RQidx %d bufidx %d exceed ring len %d for id %d\n",
+ rqindex, bufindex, rq_hdr->length, rq_hdr->id);
+ return NULL;
+ }
+
+ /* rq_hdr lock also covers rqindex+1 queue */
+ spin_lock_irqsave(&rq_hdr->lock, flags);
+
+ seq = rq->rq_tracker[bufindex];
+ rq->rq_tracker[bufindex] = NULL;
+
+ if (!seq) {
+ efc_log_err(hw->os,
+ "RQbuf NULL, rqidx %d, bufidx %d, cur q idx = %d\n",
+ rqindex, bufindex, rq_hdr->index);
+ }
+
+ spin_unlock_irqrestore(&rq_hdr->lock, flags);
+ return seq;
+}
+
+int
+efct_hw_rqpair_process_rq(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe)
+{
+ u16 rq_id;
+ u32 index;
+ int rqindex;
+ int rq_status;
+ u32 h_len;
+ u32 p_len;
+ struct efc_hw_sequence *seq;
+ struct hw_rq *rq;
+
+ rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe,
+ &rq_id, &index);
+ if (rq_status != 0) {
+ switch (rq_status) {
+ case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
+ case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
+ /* just get RQ buffer then return to chip */
+ rqindex = efct_hw_rqpair_find(hw, rq_id);
+ if (rqindex < 0) {
+ efc_log_debug(hw->os,
+ "status=%#x: lookup fail id=%#x\n",
+ rq_status, rq_id);
+ break;
+ }
+
+ /* get RQ buffer */
+ seq = efct_hw_rqpair_get(hw, rqindex, index);
+
+ /* return to chip */
+ if (efct_hw_rqpair_sequence_free(hw, seq)) {
+ efc_log_debug(hw->os,
+ "status=%#x,fail rtrn buf to RQ\n",
+ rq_status);
+ break;
+ }
+ break;
+ case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
+ case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
+ /*
+ * since RQ buffers were not consumed, cannot return
+ * them to chip
+ */
+ efc_log_debug(hw->os, "Warning: RCQE status=%#x,\n",
+ rq_status);
+ fallthrough;
+ default:
+ break;
+ }
+ return -EIO;
+ }
+
+ rqindex = efct_hw_rqpair_find(hw, rq_id);
+ if (rqindex < 0) {
+ efc_log_debug(hw->os, "Error: rq_id lookup failed for id=%#x\n",
+ rq_id);
+ return -EIO;
+ }
+
+ rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
+ rq->use_count++;
+
+ seq = efct_hw_rqpair_get(hw, rqindex, index);
+ if (WARN_ON(!seq))
+ return -EIO;
+
+ seq->hw = hw;
+
+ sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
+ seq->header->dma.len = h_len;
+ seq->payload->dma.len = p_len;
+ seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
+ seq->hw_priv = cq->eq;
+
+ efct_unsolicited_cb(hw->os, seq);
+
+ return 0;
+}
+
+static int
+efct_hw_rqpair_put(struct efct_hw *hw, struct efc_hw_sequence *seq)
+{
+ struct sli4_queue *rq_hdr = &hw->rq[seq->header->rqindex];
+ struct sli4_queue *rq_payload = &hw->rq[seq->payload->rqindex];
+ u32 hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
+ struct hw_rq *rq = hw->hw_rq[hw_rq_index];
+ u32 phys_hdr[2];
+ u32 phys_payload[2];
+ int qindex_hdr;
+ int qindex_payload;
+ unsigned long flags = 0;
+
+ /* Update the RQ verification lookup tables */
+ phys_hdr[0] = upper_32_bits(seq->header->dma.phys);
+ phys_hdr[1] = lower_32_bits(seq->header->dma.phys);
+ phys_payload[0] = upper_32_bits(seq->payload->dma.phys);
+ phys_payload[1] = lower_32_bits(seq->payload->dma.phys);
+
+ /* rq_hdr lock also covers payload / header->rqindex+1 queue */
+ spin_lock_irqsave(&rq_hdr->lock, flags);
+
+ /*
+ * Note: The header must be posted last for buffer pair mode because
+ * posting on the header queue posts the payload queue as well.
+ * We do not ring the payload queue independently in RQ pair mode.
+ */
+ qindex_payload = sli_rq_write(&hw->sli, rq_payload,
+ (void *)phys_payload);
+ qindex_hdr = sli_rq_write(&hw->sli, rq_hdr, (void *)phys_hdr);
+ if (qindex_hdr < 0 ||
+ qindex_payload < 0) {
+ efc_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
+ spin_unlock_irqrestore(&rq_hdr->lock, flags);
+ return -EIO;
+ }
+
+ /* ensure the indexes are the same */
+ WARN_ON(qindex_hdr != qindex_payload);
+
+ /* Update the lookup table */
+ if (!rq->rq_tracker[qindex_hdr]) {
+ rq->rq_tracker[qindex_hdr] = seq;
+ } else {
+ efc_log_debug(hw->os,
+ "expected rq_tracker[%d][%d] buffer to be NULL\n",
+ hw_rq_index, qindex_hdr);
+ }
+
+ spin_unlock_irqrestore(&rq_hdr->lock, flags);
+ return 0;
+}
+
+int
+efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
+{
+ int rc = 0;
+
+ /*
+ * Post the data buffer first. Because in RQ pair mode, ringing the
+ * doorbell of the header ring will post the data buffer as well.
+ */
+ if (efct_hw_rqpair_put(hw, seq)) {
+ efc_log_err(hw->os, "error writing buffers\n");
+ return -EIO;
+ }
+
+ return rc;
+}
+
+int
+efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = efc->base;
+
+ return efct_hw_rqpair_sequence_free(&efct->hw, seq);
+}
diff --git a/drivers/scsi/elx/efct/efct_io.c b/drivers/scsi/elx/efct/efct_io.c
new file mode 100644
index 000000000000..71e21655916a
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_io.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+#include "efct_io.h"
+
+struct efct_io_pool {
+ struct efct *efct;
+ spinlock_t lock; /* IO pool lock */
+ u32 io_num_ios; /* Total IOs allocated */
+ struct efct_io *ios[EFCT_NUM_SCSI_IOS];
+ struct list_head freelist;
+
+};
+
+struct efct_io_pool *
+efct_io_pool_create(struct efct *efct, u32 num_sgl)
+{
+ u32 i = 0;
+ struct efct_io_pool *io_pool;
+ struct efct_io *io;
+
+ /* Allocate the IO pool */
+ io_pool = kzalloc(sizeof(*io_pool), GFP_KERNEL);
+ if (!io_pool)
+ return NULL;
+
+ io_pool->efct = efct;
+ INIT_LIST_HEAD(&io_pool->freelist);
+ /* initialize IO pool lock */
+ spin_lock_init(&io_pool->lock);
+
+ for (i = 0; i < EFCT_NUM_SCSI_IOS; i++) {
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!io)
+ break;
+
+ io_pool->io_num_ios++;
+ io_pool->ios[i] = io;
+ io->tag = i;
+ io->instance_index = i;
+
+ /* Allocate a response buffer */
+ io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
+ io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
+ io->rspbuf.size,
+ &io->rspbuf.phys, GFP_DMA);
+ if (!io->rspbuf.virt) {
+ efc_log_err(efct, "dma_alloc rspbuf failed\n");
+ efct_io_pool_free(io_pool);
+ return NULL;
+ }
+
+ /* Allocate SGL */
+ io->sgl = kzalloc(sizeof(*io->sgl) * num_sgl, GFP_KERNEL);
+ if (!io->sgl) {
+ efct_io_pool_free(io_pool);
+ return NULL;
+ }
+
+ memset(io->sgl, 0, sizeof(*io->sgl) * num_sgl);
+ io->sgl_allocated = num_sgl;
+ io->sgl_count = 0;
+
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &io_pool->freelist);
+ }
+
+ return io_pool;
+}
+
+int
+efct_io_pool_free(struct efct_io_pool *io_pool)
+{
+ struct efct *efct;
+ u32 i;
+ struct efct_io *io;
+
+ if (io_pool) {
+ efct = io_pool->efct;
+
+ for (i = 0; i < io_pool->io_num_ios; i++) {
+ io = io_pool->ios[i];
+ if (!io)
+ continue;
+
+ kfree(io->sgl);
+ dma_free_coherent(&efct->pci->dev,
+ io->rspbuf.size, io->rspbuf.virt,
+ io->rspbuf.phys);
+ memset(&io->rspbuf, 0, sizeof(struct efc_dma));
+ }
+
+ kfree(io_pool);
+ efct->xport->io_pool = NULL;
+ }
+
+ return 0;
+}
+
+struct efct_io *
+efct_io_pool_io_alloc(struct efct_io_pool *io_pool)
+{
+ struct efct_io *io = NULL;
+ struct efct *efct;
+ unsigned long flags = 0;
+
+ efct = io_pool->efct;
+
+ spin_lock_irqsave(&io_pool->lock, flags);
+
+ if (!list_empty(&io_pool->freelist)) {
+ io = list_first_entry(&io_pool->freelist, struct efct_io,
+ list_entry);
+ list_del_init(&io->list_entry);
+ }
+
+ spin_unlock_irqrestore(&io_pool->lock, flags);
+
+ if (!io)
+ return NULL;
+
+ io->io_type = EFCT_IO_TYPE_MAX;
+ io->hio_type = EFCT_HW_IO_MAX;
+ io->hio = NULL;
+ io->transferred = 0;
+ io->efct = efct;
+ io->timeout = 0;
+ io->sgl_count = 0;
+ io->tgt_task_tag = 0;
+ io->init_task_tag = 0;
+ io->hw_tag = 0;
+ io->display_name = "pending";
+ io->seq_init = 0;
+ io->io_free = 0;
+ io->release = NULL;
+ atomic_add_return(1, &efct->xport->io_active_count);
+ atomic_add_return(1, &efct->xport->io_total_alloc);
+ return io;
+}
+
+/* Free an object used to track an IO */
+void
+efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io)
+{
+ struct efct *efct;
+ struct efct_hw_io *hio = NULL;
+ unsigned long flags = 0;
+
+ efct = io_pool->efct;
+
+ spin_lock_irqsave(&io_pool->lock, flags);
+ hio = io->hio;
+ io->hio = NULL;
+ io->io_free = 1;
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add(&io->list_entry, &io_pool->freelist);
+ spin_unlock_irqrestore(&io_pool->lock, flags);
+
+ if (hio)
+ efct_hw_io_free(&efct->hw, hio);
+
+ atomic_sub_return(1, &efct->xport->io_active_count);
+ atomic_add_return(1, &efct->xport->io_total_free);
+}
+
+/* Find an I/O given it's node and ox_id */
+struct efct_io *
+efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
+ u16 ox_id, u16 rx_id)
+{
+ struct efct_io *io = NULL;
+ unsigned long flags = 0;
+ u8 found = false;
+
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ list_for_each_entry(io, &node->active_ios, list_entry) {
+ if ((io->cmd_tgt && io->init_task_tag == ox_id) &&
+ (rx_id == 0xffff || io->tgt_task_tag == rx_id)) {
+ if (kref_get_unless_zero(&io->ref))
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+ return found ? io : NULL;
+}
diff --git a/drivers/scsi/elx/efct/efct_io.h b/drivers/scsi/elx/efct/efct_io.h
new file mode 100644
index 000000000000..bb0f51811a7c
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_io.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_IO_H__)
+#define __EFCT_IO_H__
+
+#include "efct_lio.h"
+
+#define EFCT_LOG_ENABLE_IO_ERRORS(efct) \
+ (((efct) != NULL) ? (((efct)->logmask & (1U << 6)) != 0) : 0)
+
+#define io_error_log(io, fmt, ...) \
+ do { \
+ if (EFCT_LOG_ENABLE_IO_ERRORS(io->efct)) \
+ efc_log_warn(io->efct, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define SCSI_CMD_BUF_LENGTH 48
+#define SCSI_RSP_BUF_LENGTH (FCP_RESP_WITH_EXT + SCSI_SENSE_BUFFERSIZE)
+#define EFCT_NUM_SCSI_IOS 8192
+
+enum efct_io_type {
+ EFCT_IO_TYPE_IO = 0,
+ EFCT_IO_TYPE_ELS,
+ EFCT_IO_TYPE_CT,
+ EFCT_IO_TYPE_CT_RESP,
+ EFCT_IO_TYPE_BLS_RESP,
+ EFCT_IO_TYPE_ABORT,
+
+ EFCT_IO_TYPE_MAX,
+};
+
+enum efct_els_state {
+ EFCT_ELS_REQUEST = 0,
+ EFCT_ELS_REQUEST_DELAYED,
+ EFCT_ELS_REQUEST_DELAY_ABORT,
+ EFCT_ELS_REQ_ABORT,
+ EFCT_ELS_REQ_ABORTED,
+ EFCT_ELS_ABORT_IO_COMPL,
+};
+
+/**
+ * Scsi target IO object
+ * @efct: pointer back to efct
+ * @instance_index: unique instance index value
+ * @io: IO display name
+ * @node: pointer to node
+ * @list_entry: io list entry
+ * @io_pending_link: io pending list entry
+ * @ref: reference counter
+ * @release: release callback function
+ * @init_task_tag: initiator task tag (OX_ID) for back-end and SCSI logging
+ * @tgt_task_tag: target task tag (RX_ID) for back-end and SCSI logging
+ * @hw_tag: HW layer unique IO id
+ * @tag: unique IO identifier
+ * @sgl: SGL
+ * @sgl_allocated: Number of allocated SGEs
+ * @sgl_count: Number of SGEs in this SGL
+ * @tgt_io: backend target private IO data
+ * @exp_xfer_len: expected data transfer length, based on FC header
+ * @hw_priv: Declarations private to HW/SLI
+ * @io_type: indicates what this struct efct_io structure is used for
+ * @hio: hw io object
+ * @transferred: Number of bytes transferred
+ * @auto_resp: set if auto_trsp was set
+ * @low_latency: set if low latency request
+ * @wq_steering: selected WQ steering request
+ * @wq_class: selected WQ class if steering is class
+ * @xfer_req: transfer size for current request
+ * @scsi_tgt_cb: target callback function
+ * @scsi_tgt_cb_arg: target callback function argument
+ * @abort_cb: abort callback function
+ * @abort_cb_arg: abort callback function argument
+ * @bls_cb: BLS callback function
+ * @bls_cb_arg: BLS callback function argument
+ * @tmf_cmd: TMF command being processed
+ * @abort_rx_id: rx_id from the ABTS that initiated the command abort
+ * @cmd_tgt: True if this is a Target command
+ * @send_abts: when aborting, indicates ABTS is to be sent
+ * @cmd_ini: True if this is an Initiator command
+ * @seq_init: True if local node has sequence initiative
+ * @iparam: iparams for hw io send call
+ * @hio_type: HW IO type
+ * @wire_len: wire length
+ * @hw_cb: saved HW callback
+ * @io_to_abort: for abort handling, pointer to IO to abort
+ * @rspbuf: SCSI Response buffer
+ * @timeout: Timeout value in seconds for this IO
+ * @cs_ctl: CS_CTL priority for this IO
+ * @io_free: Is io object in freelist
+ * @app_id: application id
+ */
+struct efct_io {
+ struct efct *efct;
+ u32 instance_index;
+ const char *display_name;
+ struct efct_node *node;
+
+ struct list_head list_entry;
+ struct list_head io_pending_link;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ u32 init_task_tag;
+ u32 tgt_task_tag;
+ u32 hw_tag;
+ u32 tag;
+ struct efct_scsi_sgl *sgl;
+ u32 sgl_allocated;
+ u32 sgl_count;
+ struct efct_scsi_tgt_io tgt_io;
+ u32 exp_xfer_len;
+
+ void *hw_priv;
+
+ enum efct_io_type io_type;
+ struct efct_hw_io *hio;
+ size_t transferred;
+
+ bool auto_resp;
+ bool low_latency;
+ u8 wq_steering;
+ u8 wq_class;
+ u64 xfer_req;
+ efct_scsi_io_cb_t scsi_tgt_cb;
+ void *scsi_tgt_cb_arg;
+ efct_scsi_io_cb_t abort_cb;
+ void *abort_cb_arg;
+ efct_scsi_io_cb_t bls_cb;
+ void *bls_cb_arg;
+ enum efct_scsi_tmf_cmd tmf_cmd;
+ u16 abort_rx_id;
+
+ bool cmd_tgt;
+ bool send_abts;
+ bool cmd_ini;
+ bool seq_init;
+ union efct_hw_io_param_u iparam;
+ enum efct_hw_io_type hio_type;
+ u64 wire_len;
+ void *hw_cb;
+
+ struct efct_io *io_to_abort;
+
+ struct efc_dma rspbuf;
+ u32 timeout;
+ u8 cs_ctl;
+ u8 io_free;
+ u32 app_id;
+};
+
+struct efct_io_cb_arg {
+ int status;
+ int ext_status;
+ void *app;
+};
+
+struct efct_io_pool *
+efct_io_pool_create(struct efct *efct, u32 num_sgl);
+int
+efct_io_pool_free(struct efct_io_pool *io_pool);
+u32
+efct_io_pool_allocated(struct efct_io_pool *io_pool);
+
+struct efct_io *
+efct_io_pool_io_alloc(struct efct_io_pool *io_pool);
+void
+efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io);
+struct efct_io *
+efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
+ u16 ox_id, u16 rx_id);
+#endif /* __EFCT_IO_H__ */
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
new file mode 100644
index 000000000000..e0d798d6baee
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -0,0 +1,1698 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include "efct_driver.h"
+#include "efct_lio.h"
+
+/*
+ * lio_wq is used to call the LIO backed during creation or deletion of
+ * sessions. This brings serialization to the session management as we create
+ * single threaded work queue.
+ */
+static struct workqueue_struct *lio_wq;
+
+static int
+efct_format_wwn(char *str, size_t len, const char *pre, u64 wwn)
+{
+ u8 a[8];
+
+ put_unaligned_be64(wwn, a);
+ return snprintf(str, len, "%s%8phC", pre, a);
+}
+
+static int
+efct_lio_parse_wwn(const char *name, u64 *wwp, u8 npiv)
+{
+ int num;
+ u8 b[8];
+
+ if (npiv) {
+ num = sscanf(name,
+ "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx",
+ &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
+ &b[7]);
+ } else {
+ num = sscanf(name,
+ "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
+ &b[7]);
+ }
+
+ if (num != 8)
+ return -EINVAL;
+
+ *wwp = get_unaligned_be64(b);
+ return 0;
+}
+
+static int
+efct_lio_parse_npiv_wwn(const char *name, size_t size, u64 *wwpn, u64 *wwnn)
+{
+ unsigned int cnt = size;
+ int rc;
+
+ *wwpn = *wwnn = 0;
+ if (name[cnt - 1] == '\n' || name[cnt - 1] == 0)
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16 + 1 + 16)) || (name[16] != ':'))
+ return -EINVAL;
+
+ rc = efct_lio_parse_wwn(&name[0], wwpn, 1);
+ if (rc)
+ return rc;
+
+ rc = efct_lio_parse_wwn(&name[17], wwnn, 1);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static ssize_t
+efct_lio_tpg_enable_show(struct config_item *item, char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
+}
+
+static ssize_t
+efct_lio_tpg_enable_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+ struct efct *efct;
+ struct efc *efc;
+ unsigned long op;
+
+ if (!tpg->nport || !tpg->nport->efct) {
+ pr_err("%s: Unable to find EFCT device\n", __func__);
+ return -EINVAL;
+ }
+
+ efct = tpg->nport->efct;
+ efc = efct->efcport;
+
+ if (kstrtoul(page, 0, &op) < 0)
+ return -EINVAL;
+
+ if (op == 1) {
+ int ret;
+
+ tpg->enabled = true;
+ efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
+
+ ret = efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE);
+ if (ret) {
+ efct->tgt_efct.lio_nport = NULL;
+ efc_log_debug(efct, "cannot bring port online\n");
+ return ret;
+ }
+ } else if (op == 0) {
+ efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
+
+ if (efc->domain && efc->domain->nport)
+ efct_scsi_tgt_del_nport(efc, efc->domain->nport);
+
+ tpg->enabled = false;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t
+efct_lio_npiv_tpg_enable_show(struct config_item *item, char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
+}
+
+static ssize_t
+efct_lio_npiv_tpg_enable_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+ struct efct_lio_vport *lio_vport = tpg->vport;
+ struct efct *efct;
+ struct efc *efc;
+ unsigned long op;
+
+ if (kstrtoul(page, 0, &op) < 0)
+ return -EINVAL;
+
+ if (!lio_vport) {
+ pr_err("Unable to find vport\n");
+ return -EINVAL;
+ }
+
+ efct = lio_vport->efct;
+ efc = efct->efcport;
+
+ if (op == 1) {
+ tpg->enabled = true;
+ efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
+
+ if (efc->domain) {
+ int ret;
+
+ ret = efc_nport_vport_new(efc->domain,
+ lio_vport->npiv_wwpn,
+ lio_vport->npiv_wwnn,
+ U32_MAX, false, true,
+ NULL, NULL);
+ if (ret != 0) {
+ efc_log_err(efct, "Failed to create Vport\n");
+ return ret;
+ }
+ return count;
+ }
+
+ if (!(efc_vport_create_spec(efc, lio_vport->npiv_wwnn,
+ lio_vport->npiv_wwpn, U32_MAX,
+ false, true, NULL, NULL)))
+ return -ENOMEM;
+
+ } else if (op == 0) {
+ efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
+
+ tpg->enabled = false;
+ /* only physical nport should exist, free lio_nport
+ * allocated in efct_lio_make_nport
+ */
+ if (efc->domain) {
+ efc_nport_vport_del(efct->efcport, efc->domain,
+ lio_vport->npiv_wwpn,
+ lio_vport->npiv_wwnn);
+ return count;
+ }
+ } else {
+ return -EINVAL;
+ }
+ return count;
+}
+
+static char *efct_lio_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->nport->wwpn_str;
+}
+
+static char *efct_lio_get_npiv_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->vport->wwpn_str;
+}
+
+static u16 efct_lio_get_tag(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpgt;
+}
+
+static u16 efct_lio_get_npiv_tag(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpgt;
+}
+
+static int efct_lio_check_demo_mode(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int efct_lio_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int efct_lio_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_write_protect;
+}
+
+static int
+efct_lio_npiv_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_write_protect;
+}
+
+static int efct_lio_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.prod_mode_write_protect;
+}
+
+static int
+efct_lio_npiv_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.prod_mode_write_protect;
+}
+
+static u32 efct_lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int efct_lio_check_stop_free(struct se_cmd *se_cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_CHK_STOP_FREE);
+ return target_put_sess_cmd(se_cmd);
+}
+
+static int
+efct_lio_abort_tgt_cb(struct efct_io *io,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_lio_io_printf(io, "Abort done, status:%d\n", scsi_status);
+ return 0;
+}
+
+static void
+efct_lio_aborted_task(struct se_cmd *se_cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_ABORTED_TASK);
+
+ if (ocp->rsp_sent)
+ return;
+
+ /* command has been aborted, cleanup here */
+ ocp->aborting = true;
+ ocp->err = EFCT_SCSI_STATUS_ABORTED;
+ /* terminate the exchange */
+ efct_scsi_tgt_abort_io(io, efct_lio_abort_tgt_cb, NULL);
+}
+
+static void efct_lio_release_cmd(struct se_cmd *se_cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ struct efct *efct = io->efct;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_RELEASE_CMD);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_CMPL_CMD);
+ efct_scsi_io_complete(io);
+ atomic_sub_return(1, &efct->tgt_efct.ios_in_use);
+}
+
+static void efct_lio_close_session(struct se_session *se_sess)
+{
+ struct efc_node *node = se_sess->fabric_sess_ptr;
+
+ pr_debug("se_sess=%p node=%p", se_sess, node);
+
+ if (!node) {
+ pr_debug("node is NULL");
+ return;
+ }
+
+ efc_node_post_shutdown(node, NULL);
+}
+
+static u32 efct_lio_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void efct_lio_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+static int efct_lio_get_cmd_state(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+
+ if (!io)
+ return 0;
+
+ return io->tgt_io.state;
+}
+
+static int
+efct_lio_sg_map(struct efct_io *io)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *cmd = &ocp->cmd;
+
+ ocp->seg_map_cnt = pci_map_sg(io->efct->pci, cmd->t_data_sg,
+ cmd->t_data_nents, cmd->data_direction);
+ if (ocp->seg_map_cnt == 0)
+ return -EFAULT;
+ return 0;
+}
+
+static void
+efct_lio_sg_unmap(struct efct_io *io)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *cmd = &ocp->cmd;
+
+ if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg))
+ return;
+
+ pci_unmap_sg(io->efct->pci, cmd->t_data_sg,
+ ocp->seg_map_cnt, cmd->data_direction);
+ ocp->seg_map_cnt = 0;
+}
+
+static int
+efct_lio_status_done(struct efct_io *io,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RSP_DONE);
+ if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
+ efct_lio_io_printf(io, "callback completed with error=%d\n",
+ scsi_status);
+ ocp->err = scsi_status;
+ }
+ if (ocp->seg_map_cnt)
+ efct_lio_sg_unmap(io);
+
+ efct_lio_io_printf(io, "status=%d, err=%d flags=0x%x, dir=%d\n",
+ scsi_status, ocp->err, flags, ocp->ddir);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ return 0;
+}
+
+static int
+efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg);
+
+static int
+efct_lio_write_pending(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ struct efct_scsi_sgl *sgl = io->sgl;
+ struct scatterlist *sg;
+ u32 flags = 0, cnt, curcnt;
+ u64 length = 0;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_WRITE_PENDING);
+ efct_lio_io_printf(io, "trans_state=0x%x se_cmd_flags=0x%x\n",
+ cmd->transport_state, cmd->se_cmd_flags);
+
+ if (ocp->seg_cnt == 0) {
+ ocp->seg_cnt = cmd->t_data_nents;
+ ocp->cur_seg = 0;
+ if (efct_lio_sg_map(io)) {
+ efct_lio_io_printf(io, "efct_lio_sg_map failed\n");
+ return -EFAULT;
+ }
+ }
+ curcnt = (ocp->seg_map_cnt - ocp->cur_seg);
+ curcnt = (curcnt < io->sgl_allocated) ? curcnt : io->sgl_allocated;
+ /* find current sg */
+ for (cnt = 0, sg = cmd->t_data_sg; cnt < ocp->cur_seg; cnt++,
+ sg = sg_next(sg))
+ ;/* do nothing */
+
+ for (cnt = 0; cnt < curcnt; cnt++, sg = sg_next(sg)) {
+ sgl[cnt].addr = sg_dma_address(sg);
+ sgl[cnt].dif_addr = 0;
+ sgl[cnt].len = sg_dma_len(sg);
+ length += sgl[cnt].len;
+ ocp->cur_seg++;
+ }
+
+ if (ocp->cur_seg == ocp->seg_cnt)
+ flags = EFCT_SCSI_LAST_DATAPHASE;
+
+ return efct_scsi_recv_wr_data(io, flags, sgl, curcnt, length,
+ efct_lio_datamove_done, NULL);
+}
+
+static int
+efct_lio_queue_data_in(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ struct efct_scsi_sgl *sgl = io->sgl;
+ struct scatterlist *sg = NULL;
+ uint flags = 0, cnt = 0, curcnt = 0;
+ u64 length = 0;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_DATA_IN);
+
+ if (ocp->seg_cnt == 0) {
+ if (cmd->data_length) {
+ ocp->seg_cnt = cmd->t_data_nents;
+ ocp->cur_seg = 0;
+ if (efct_lio_sg_map(io)) {
+ efct_lio_io_printf(io,
+ "efct_lio_sg_map failed\n");
+ return -EAGAIN;
+ }
+ } else {
+ /* If command length is 0, send the response status */
+ struct efct_scsi_cmd_resp rsp;
+
+ memset(&rsp, 0, sizeof(rsp));
+ efct_lio_io_printf(io,
+ "cmd : %p length 0, send status\n",
+ cmd);
+ return efct_scsi_send_resp(io, 0, &rsp,
+ efct_lio_status_done, NULL);
+ }
+ }
+ curcnt = min(ocp->seg_map_cnt - ocp->cur_seg, io->sgl_allocated);
+
+ while (cnt < curcnt) {
+ sg = &cmd->t_data_sg[ocp->cur_seg];
+ sgl[cnt].addr = sg_dma_address(sg);
+ sgl[cnt].dif_addr = 0;
+ if (ocp->transferred_len + sg_dma_len(sg) >= cmd->data_length)
+ sgl[cnt].len = cmd->data_length - ocp->transferred_len;
+ else
+ sgl[cnt].len = sg_dma_len(sg);
+
+ ocp->transferred_len += sgl[cnt].len;
+ length += sgl[cnt].len;
+ ocp->cur_seg++;
+ cnt++;
+ if (ocp->transferred_len == cmd->data_length)
+ break;
+ }
+
+ if (ocp->transferred_len == cmd->data_length) {
+ flags = EFCT_SCSI_LAST_DATAPHASE;
+ ocp->seg_cnt = ocp->cur_seg;
+ }
+
+ /* If there is residual, disable Auto Good Response */
+ if (cmd->residual_count)
+ flags |= EFCT_SCSI_NO_AUTO_RESPONSE;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RD_DATA);
+
+ return efct_scsi_send_rd_data(io, flags, sgl, curcnt, length,
+ efct_lio_datamove_done, NULL);
+}
+
+static void
+efct_lio_send_resp(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags)
+{
+ struct efct_scsi_cmd_resp rsp;
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *cmd = &io->tgt_io.cmd;
+ int rc;
+
+ if (flags & EFCT_SCSI_IO_CMPL_RSP_SENT) {
+ ocp->rsp_sent = true;
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ return;
+ }
+
+ /* send check condition if an error occurred */
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.scsi_status = cmd->scsi_status;
+ rsp.sense_data = (uint8_t *)io->tgt_io.sense_buffer;
+ rsp.sense_data_length = cmd->scsi_sense_length;
+
+ /* Check for residual underrun or overrun */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
+ rsp.residual = -cmd->residual_count;
+ else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
+ rsp.residual = cmd->residual_count;
+
+ rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
+ if (rc != 0) {
+ efct_lio_io_printf(io, "Read done, send rsp failed %d\n", rc);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ } else {
+ ocp->rsp_sent = true;
+ }
+}
+
+static int
+efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_DATA_DONE);
+ if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
+ efct_lio_io_printf(io, "callback completed with error=%d\n",
+ scsi_status);
+ ocp->err = scsi_status;
+ }
+ efct_lio_io_printf(io, "seg_map_cnt=%d\n", ocp->seg_map_cnt);
+ if (ocp->seg_map_cnt) {
+ if (ocp->err == EFCT_SCSI_STATUS_GOOD &&
+ ocp->cur_seg < ocp->seg_cnt) {
+ int rc;
+
+ efct_lio_io_printf(io, "continuing cmd at segm=%d\n",
+ ocp->cur_seg);
+ if (ocp->ddir == DMA_TO_DEVICE)
+ rc = efct_lio_write_pending(&ocp->cmd);
+ else
+ rc = efct_lio_queue_data_in(&ocp->cmd);
+ if (!rc)
+ return 0;
+
+ ocp->err = EFCT_SCSI_STATUS_ERROR;
+ efct_lio_io_printf(io, "could not continue command\n");
+ }
+ efct_lio_sg_unmap(io);
+ }
+
+ if (io->tgt_io.aborting) {
+ efct_lio_io_printf(io, "IO done aborted\n");
+ return 0;
+ }
+
+ if (ocp->ddir == DMA_TO_DEVICE) {
+ efct_lio_io_printf(io, "Write done, trans_state=0x%x\n",
+ io->tgt_io.cmd.transport_state);
+ if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
+ transport_generic_request_failure(&io->tgt_io.cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
+ efct_set_lio_io_state(io,
+ EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE);
+ } else {
+ efct_set_lio_io_state(io,
+ EFCT_LIO_STATE_TGT_EXECUTE_CMD);
+ target_execute_cmd(&io->tgt_io.cmd);
+ }
+ } else {
+ efct_lio_send_resp(io, scsi_status, flags);
+ }
+ return 0;
+}
+
+static int
+efct_lio_tmf_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_lio_tmfio_printf(io, "cmd=%p status=%d, flags=0x%x\n",
+ &io->tgt_io.cmd, scsi_status, flags);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ return 0;
+}
+
+static int
+efct_lio_null_tmf_done(struct efct_io *tmfio,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_lio_tmfio_printf(tmfio, "cmd=%p status=%d, flags=0x%x\n",
+ &tmfio->tgt_io.cmd, scsi_status, flags);
+
+ /* free struct efct_io only, no active se_cmd */
+ efct_scsi_io_complete(tmfio);
+ return 0;
+}
+
+static int
+efct_lio_queue_status(struct se_cmd *cmd)
+{
+ struct efct_scsi_cmd_resp rsp;
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ int rc = 0;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_STATUS);
+ efct_lio_io_printf(io,
+ "status=0x%x trans_state=0x%x se_cmd_flags=0x%x sns_len=%d\n",
+ cmd->scsi_status, cmd->transport_state, cmd->se_cmd_flags,
+ cmd->scsi_sense_length);
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.scsi_status = cmd->scsi_status;
+ rsp.sense_data = (u8 *)io->tgt_io.sense_buffer;
+ rsp.sense_data_length = cmd->scsi_sense_length;
+
+ /* Check for residual underrun or overrun, mark negitive value for
+ * underrun to recognize in HW
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
+ rsp.residual = -cmd->residual_count;
+ else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
+ rsp.residual = cmd->residual_count;
+
+ rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
+ if (rc == 0)
+ ocp->rsp_sent = true;
+ return rc;
+}
+
+static void efct_lio_queue_tm_rsp(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *tmfio = container_of(ocp, struct efct_io, tgt_io);
+ struct se_tmr_req *se_tmr = cmd->se_tmr_req;
+ u8 rspcode;
+
+ efct_lio_tmfio_printf(tmfio, "cmd=%p function=0x%x tmr->response=%d\n",
+ cmd, se_tmr->function, se_tmr->response);
+ switch (se_tmr->response) {
+ case TMR_FUNCTION_COMPLETE:
+ rspcode = EFCT_SCSI_TMF_FUNCTION_COMPLETE;
+ break;
+ case TMR_TASK_DOES_NOT_EXIST:
+ rspcode = EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND;
+ break;
+ case TMR_LUN_DOES_NOT_EXIST:
+ rspcode = EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER;
+ break;
+ case TMR_FUNCTION_REJECTED:
+ default:
+ rspcode = EFCT_SCSI_TMF_FUNCTION_REJECTED;
+ break;
+ }
+ efct_scsi_send_tmf_resp(tmfio, rspcode, NULL, efct_lio_tmf_done, NULL);
+}
+
+static struct efct *efct_find_wwpn(u64 wwpn)
+{
+ struct efct *efct;
+
+ /* Search for the HBA that has this WWPN */
+ list_for_each_entry(efct, &efct_devices, list_entry) {
+
+ if (wwpn == efct_get_wwpn(&efct->hw))
+ return efct;
+ }
+
+ return NULL;
+}
+
+static struct se_wwn *
+efct_lio_make_nport(struct target_fabric_configfs *tf,
+ struct config_group *group, const char *name)
+{
+ struct efct_lio_nport *lio_nport;
+ struct efct *efct;
+ int ret;
+ u64 wwpn;
+
+ ret = efct_lio_parse_wwn(name, &wwpn, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ efct = efct_find_wwpn(wwpn);
+ if (!efct) {
+ pr_err("cannot find EFCT for base wwpn %s\n", name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ lio_nport = kzalloc(sizeof(*lio_nport), GFP_KERNEL);
+ if (!lio_nport)
+ return ERR_PTR(-ENOMEM);
+
+ lio_nport->efct = efct;
+ lio_nport->wwpn = wwpn;
+ efct_format_wwn(lio_nport->wwpn_str, sizeof(lio_nport->wwpn_str),
+ "naa.", wwpn);
+ efct->tgt_efct.lio_nport = lio_nport;
+
+ return &lio_nport->nport_wwn;
+}
+
+static struct se_wwn *
+efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
+ struct config_group *group, const char *name)
+{
+ struct efct_lio_vport *lio_vport;
+ struct efct *efct;
+ int ret = -1;
+ u64 p_wwpn, npiv_wwpn, npiv_wwnn;
+ char *p, *pbuf, tmp[128];
+ struct efct_lio_vport_list_t *vport_list;
+ struct fc_vport *new_fc_vport;
+ struct fc_vport_identifiers vport_id;
+ unsigned long flags = 0;
+
+ snprintf(tmp, sizeof(tmp), "%s", name);
+ pbuf = &tmp[0];
+
+ p = strsep(&pbuf, "@");
+
+ if (!p || !pbuf) {
+ pr_err("Unable to find separator operator(@)\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = efct_lio_parse_wwn(p, &p_wwpn, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = efct_lio_parse_npiv_wwn(pbuf, strlen(pbuf), &npiv_wwpn,
+ &npiv_wwnn);
+ if (ret)
+ return ERR_PTR(ret);
+
+ efct = efct_find_wwpn(p_wwpn);
+ if (!efct) {
+ pr_err("cannot find EFCT for base wwpn %s\n", name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ lio_vport = kzalloc(sizeof(*lio_vport), GFP_KERNEL);
+ if (!lio_vport)
+ return ERR_PTR(-ENOMEM);
+
+ lio_vport->efct = efct;
+ lio_vport->wwpn = p_wwpn;
+ lio_vport->npiv_wwpn = npiv_wwpn;
+ lio_vport->npiv_wwnn = npiv_wwnn;
+
+ efct_format_wwn(lio_vport->wwpn_str, sizeof(lio_vport->wwpn_str),
+ "naa.", npiv_wwpn);
+
+ vport_list = kzalloc(sizeof(*vport_list), GFP_KERNEL);
+ if (!vport_list) {
+ kfree(lio_vport);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vport_list->lio_vport = lio_vport;
+
+ memset(&vport_id, 0, sizeof(vport_id));
+ vport_id.port_name = npiv_wwpn;
+ vport_id.node_name = npiv_wwnn;
+ vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vport_id.vport_type = FC_PORTTYPE_NPIV;
+ vport_id.disable = false;
+
+ new_fc_vport = fc_vport_create(efct->shost, 0, &vport_id);
+ if (!new_fc_vport) {
+ efc_log_err(efct, "fc_vport_create failed\n");
+ kfree(lio_vport);
+ kfree(vport_list);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ lio_vport->fc_vport = new_fc_vport;
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+ INIT_LIST_HEAD(&vport_list->list_entry);
+ list_add_tail(&vport_list->list_entry, &efct->tgt_efct.vport_list);
+ spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
+
+ return &lio_vport->vport_wwn;
+}
+
+static void
+efct_lio_drop_nport(struct se_wwn *wwn)
+{
+ struct efct_lio_nport *lio_nport =
+ container_of(wwn, struct efct_lio_nport, nport_wwn);
+ struct efct *efct = lio_nport->efct;
+
+ /* only physical nport should exist, free lio_nport allocated
+ * in efct_lio_make_nport.
+ */
+ kfree(efct->tgt_efct.lio_nport);
+ efct->tgt_efct.lio_nport = NULL;
+}
+
+static void
+efct_lio_npiv_drop_nport(struct se_wwn *wwn)
+{
+ struct efct_lio_vport *lio_vport =
+ container_of(wwn, struct efct_lio_vport, vport_wwn);
+ struct efct_lio_vport_list_t *vport, *next_vport;
+ struct efct *efct = lio_vport->efct;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+
+ if (lio_vport->fc_vport)
+ fc_vport_terminate(lio_vport->fc_vport);
+
+ list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
+ list_entry) {
+ if (vport->lio_vport == lio_vport) {
+ list_del(&vport->list_entry);
+ kfree(vport->lio_vport);
+ kfree(vport);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
+}
+
+static struct se_portal_group *
+efct_lio_make_tpg(struct se_wwn *wwn, const char *name)
+{
+ struct efct_lio_nport *lio_nport =
+ container_of(wwn, struct efct_lio_nport, nport_wwn);
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ unsigned long n;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+ if (!tpg)
+ return ERR_PTR(-ENOMEM);
+
+ tpg->nport = lio_nport;
+ tpg->tpgt = n;
+ tpg->enabled = false;
+
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
+ tpg->tpg_attrib.session_deletion_wait = 1;
+
+ ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ efct = lio_nport->efct;
+ efct->tgt_efct.tpg = tpg;
+ efc_log_debug(efct, "create portal group %d\n", tpg->tpgt);
+
+ xa_init(&efct->lookup);
+ return &tpg->tpg;
+}
+
+static void
+efct_lio_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ struct efct *efct = tpg->nport->efct;
+
+ efc_log_debug(efct, "drop portal group %d\n", tpg->tpgt);
+ tpg->nport->efct->tgt_efct.tpg = NULL;
+ core_tpg_deregister(se_tpg);
+ xa_destroy(&efct->lookup);
+ kfree(tpg);
+}
+
+static struct se_portal_group *
+efct_lio_npiv_make_tpg(struct se_wwn *wwn, const char *name)
+{
+ struct efct_lio_vport *lio_vport =
+ container_of(wwn, struct efct_lio_vport, vport_wwn);
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ unsigned long n;
+ int ret;
+
+ efct = lio_vport->efct;
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (n != 1) {
+ efc_log_err(efct, "Invalid tpgt index: %ld provided\n", n);
+ return ERR_PTR(-EINVAL);
+ }
+
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+ if (!tpg)
+ return ERR_PTR(-ENOMEM);
+
+ tpg->vport = lio_vport;
+ tpg->tpgt = n;
+ tpg->enabled = false;
+
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
+ tpg->tpg_attrib.session_deletion_wait = 1;
+
+ ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
+
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ lio_vport->tpg = tpg;
+ efc_log_debug(efct, "create vport portal group %d\n", tpg->tpgt);
+
+ return &tpg->tpg;
+}
+
+static void
+efct_lio_npiv_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ efc_log_debug(tpg->vport->efct, "drop npiv portal group %d\n",
+ tpg->tpgt);
+ core_tpg_deregister(se_tpg);
+ kfree(tpg);
+}
+
+static int
+efct_lio_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
+{
+ struct efct_lio_nacl *nacl;
+ u64 wwnn;
+
+ if (efct_lio_parse_wwn(name, &wwnn, 0) < 0)
+ return -EINVAL;
+
+ nacl = container_of(se_nacl, struct efct_lio_nacl, se_node_acl);
+ nacl->nport_wwnn = wwnn;
+
+ efct_format_wwn(nacl->nport_name, sizeof(nacl->nport_name), "", wwnn);
+ return 0;
+}
+
+static int efct_lio_check_demo_mode_login_only(struct se_portal_group *stpg)
+{
+ struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_login_only;
+}
+
+static int
+efct_lio_npiv_check_demo_mode_login_only(struct se_portal_group *stpg)
+{
+ struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_login_only;
+}
+
+static struct efct_lio_tpg *
+efct_get_vport_tpg(struct efc_node *node)
+{
+ struct efct *efct;
+ u64 wwpn = node->nport->wwpn;
+ struct efct_lio_vport_list_t *vport, *next;
+ struct efct_lio_vport *lio_vport = NULL;
+ struct efct_lio_tpg *tpg = NULL;
+ unsigned long flags = 0;
+
+ efct = node->efc->base;
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+ list_for_each_entry_safe(vport, next, &efct->tgt_efct.vport_list,
+ list_entry) {
+ lio_vport = vport->lio_vport;
+ if (wwpn && lio_vport && lio_vport->npiv_wwpn == wwpn) {
+ efc_log_debug(efct, "found tpg on vport\n");
+ tpg = lio_vport->tpg;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
+ return tpg;
+}
+
+static void
+_efct_tgt_node_free(struct kref *arg)
+{
+ struct efct_node *tgt_node = container_of(arg, struct efct_node, ref);
+ struct efc_node *node = tgt_node->node;
+
+ efc_scsi_del_initiator_complete(node->efc, node);
+ kfree(tgt_node);
+}
+
+static int efct_session_cb(struct se_portal_group *se_tpg,
+ struct se_session *se_sess, void *private)
+{
+ struct efc_node *node = private;
+ struct efct_node *tgt_node;
+ struct efct *efct = node->efc->base;
+
+ tgt_node = kzalloc(sizeof(*tgt_node), GFP_KERNEL);
+ if (!tgt_node)
+ return -ENOMEM;
+
+ kref_init(&tgt_node->ref);
+ tgt_node->release = _efct_tgt_node_free;
+
+ tgt_node->session = se_sess;
+ node->tgt_node = tgt_node;
+ tgt_node->efct = efct;
+
+ tgt_node->node = node;
+
+ tgt_node->node_fc_id = node->rnode.fc_id;
+ tgt_node->port_fc_id = node->nport->fc_id;
+ tgt_node->vpi = node->nport->indicator;
+ tgt_node->rpi = node->rnode.indicator;
+
+ spin_lock_init(&tgt_node->active_ios_lock);
+ INIT_LIST_HEAD(&tgt_node->active_ios);
+
+ return 0;
+}
+
+int efct_scsi_tgt_new_device(struct efct *efct)
+{
+ u32 total_ios;
+
+ /* Get the max settings */
+ efct->tgt_efct.max_sge = sli_get_max_sge(&efct->hw.sli);
+ efct->tgt_efct.max_sgl = sli_get_max_sgl(&efct->hw.sli);
+
+ /* initialize IO watermark fields */
+ atomic_set(&efct->tgt_efct.ios_in_use, 0);
+ total_ios = efct->hw.config.n_io;
+ efc_log_debug(efct, "total_ios=%d\n", total_ios);
+ efct->tgt_efct.watermark_min =
+ (total_ios * EFCT_WATERMARK_LOW_PCT) / 100;
+ efct->tgt_efct.watermark_max =
+ (total_ios * EFCT_WATERMARK_HIGH_PCT) / 100;
+ atomic_set(&efct->tgt_efct.io_high_watermark,
+ efct->tgt_efct.watermark_max);
+ atomic_set(&efct->tgt_efct.watermark_hit, 0);
+ atomic_set(&efct->tgt_efct.initiator_count, 0);
+
+ lio_wq = create_singlethread_workqueue("efct_lio_worker");
+ if (!lio_wq) {
+ efc_log_err(efct, "workqueue create failed\n");
+ return -EIO;
+ }
+
+ spin_lock_init(&efct->tgt_efct.efct_lio_lock);
+ INIT_LIST_HEAD(&efct->tgt_efct.vport_list);
+
+ return 0;
+}
+
+int efct_scsi_tgt_del_device(struct efct *efct)
+{
+ flush_workqueue(lio_wq);
+
+ return 0;
+}
+
+int
+efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport)
+{
+ struct efct *efct = nport->efc->base;
+
+ efc_log_debug(efct, "New SPORT: %s bound to %s\n", nport->display_name,
+ efct->tgt_efct.lio_nport->wwpn_str);
+
+ return 0;
+}
+
+void
+efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport)
+{
+ efc_log_debug(efc, "Del SPORT: %s\n", nport->display_name);
+}
+
+static void efct_lio_setup_session(struct work_struct *work)
+{
+ struct efct_lio_wq_data *wq_data =
+ container_of(work, struct efct_lio_wq_data, work);
+ struct efct *efct = wq_data->efct;
+ struct efc_node *node = wq_data->ptr;
+ char wwpn[WWN_NAME_LEN];
+ struct efct_lio_tpg *tpg;
+ struct efct_node *tgt_node;
+ struct se_portal_group *se_tpg;
+ struct se_session *se_sess;
+ int watermark;
+ int ini_count;
+ u64 id;
+
+ /* Check to see if it's belongs to vport,
+ * if not get physical port
+ */
+ tpg = efct_get_vport_tpg(node);
+ if (tpg) {
+ se_tpg = &tpg->tpg;
+ } else if (efct->tgt_efct.tpg) {
+ tpg = efct->tgt_efct.tpg;
+ se_tpg = &tpg->tpg;
+ } else {
+ efc_log_err(efct, "failed to init session\n");
+ return;
+ }
+
+ /*
+ * Format the FCP Initiator port_name into colon
+ * separated values to match the format by our explicit
+ * ConfigFS NodeACLs.
+ */
+ efct_format_wwn(wwpn, sizeof(wwpn), "", efc_node_get_wwpn(node));
+
+ se_sess = target_setup_session(se_tpg, 0, 0, TARGET_PROT_NORMAL, wwpn,
+ node, efct_session_cb);
+ if (IS_ERR(se_sess)) {
+ efc_log_err(efct, "failed to setup session\n");
+ kfree(wq_data);
+ efc_scsi_sess_reg_complete(node, -EIO);
+ return;
+ }
+
+ tgt_node = node->tgt_node;
+ id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
+
+ efc_log_debug(efct, "new initiator sess=%p node=%p id: %llx\n",
+ se_sess, node, id);
+
+ if (xa_err(xa_store(&efct->lookup, id, tgt_node, GFP_KERNEL)))
+ efc_log_err(efct, "Node lookup store failed\n");
+
+ efc_scsi_sess_reg_complete(node, 0);
+
+ /* update IO watermark: increment initiator count */
+ ini_count = atomic_add_return(1, &efct->tgt_efct.initiator_count);
+ watermark = efct->tgt_efct.watermark_max -
+ ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
+ watermark = (efct->tgt_efct.watermark_min > watermark) ?
+ efct->tgt_efct.watermark_min : watermark;
+ atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
+
+ kfree(wq_data);
+}
+
+int efct_scsi_new_initiator(struct efc *efc, struct efc_node *node)
+{
+ struct efct *efct = node->efc->base;
+ struct efct_lio_wq_data *wq_data;
+
+ /*
+ * Since LIO only supports initiator validation at thread level,
+ * we are open minded and accept all callers.
+ */
+ wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
+ if (!wq_data)
+ return -ENOMEM;
+
+ wq_data->ptr = node;
+ wq_data->efct = efct;
+ INIT_WORK(&wq_data->work, efct_lio_setup_session);
+ queue_work(lio_wq, &wq_data->work);
+ return EFC_SCSI_CALL_ASYNC;
+}
+
+static void efct_lio_remove_session(struct work_struct *work)
+{
+ struct efct_lio_wq_data *wq_data =
+ container_of(work, struct efct_lio_wq_data, work);
+ struct efct *efct = wq_data->efct;
+ struct efc_node *node = wq_data->ptr;
+ struct efct_node *tgt_node;
+ struct se_session *se_sess;
+
+ tgt_node = node->tgt_node;
+ if (!tgt_node) {
+ /* base driver has sent back-to-back requests
+ * to unreg session with no intervening
+ * register
+ */
+ efc_log_err(efct, "unreg session for NULL session\n");
+ efc_scsi_del_initiator_complete(node->efc, node);
+ return;
+ }
+
+ se_sess = tgt_node->session;
+ efc_log_debug(efct, "unreg session se_sess=%p node=%p\n",
+ se_sess, node);
+
+ /* first flag all session commands to complete */
+ target_stop_session(se_sess);
+
+ /* now wait for session commands to complete */
+ target_wait_for_sess_cmds(se_sess);
+ target_remove_session(se_sess);
+ tgt_node->session = NULL;
+ node->tgt_node = NULL;
+ kref_put(&tgt_node->ref, tgt_node->release);
+
+ kfree(wq_data);
+}
+
+int efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason)
+{
+ struct efct *efct = node->efc->base;
+ struct efct_node *tgt_node = node->tgt_node;
+ struct efct_lio_wq_data *wq_data;
+ int watermark;
+ int ini_count;
+ u64 id;
+
+ if (reason == EFCT_SCSI_INITIATOR_MISSING)
+ return EFC_SCSI_CALL_COMPLETE;
+
+ if (!tgt_node) {
+ efc_log_err(efct, "tgt_node is NULL\n");
+ return -EIO;
+ }
+
+ wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
+ if (!wq_data)
+ return -ENOMEM;
+
+ id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
+ xa_erase(&efct->lookup, id);
+
+ wq_data->ptr = node;
+ wq_data->efct = efct;
+ INIT_WORK(&wq_data->work, efct_lio_remove_session);
+ queue_work(lio_wq, &wq_data->work);
+
+ /*
+ * update IO watermark: decrement initiator count
+ */
+ ini_count = atomic_sub_return(1, &efct->tgt_efct.initiator_count);
+
+ watermark = efct->tgt_efct.watermark_max -
+ ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
+ watermark = (efct->tgt_efct.watermark_min > watermark) ?
+ efct->tgt_efct.watermark_min : watermark;
+ atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
+
+ return EFC_SCSI_CALL_ASYNC;
+}
+
+void efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb,
+ u32 cdb_len, u32 flags)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *se_cmd = &io->tgt_io.cmd;
+ struct efct *efct = io->efct;
+ char *ddir;
+ struct efct_node *tgt_node;
+ struct se_session *se_sess;
+ int rc = 0;
+
+ memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RECV_CMD);
+ atomic_add_return(1, &efct->tgt_efct.ios_in_use);
+
+ /* set target timeout */
+ io->timeout = efct->target_io_timer_sec;
+
+ if (flags & EFCT_SCSI_CMD_SIMPLE)
+ ocp->task_attr = TCM_SIMPLE_TAG;
+ else if (flags & EFCT_SCSI_CMD_HEAD_OF_QUEUE)
+ ocp->task_attr = TCM_HEAD_TAG;
+ else if (flags & EFCT_SCSI_CMD_ORDERED)
+ ocp->task_attr = TCM_ORDERED_TAG;
+ else if (flags & EFCT_SCSI_CMD_ACA)
+ ocp->task_attr = TCM_ACA_TAG;
+
+ switch (flags & (EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT)) {
+ case EFCT_SCSI_CMD_DIR_IN:
+ ddir = "FROM_INITIATOR";
+ ocp->ddir = DMA_TO_DEVICE;
+ break;
+ case EFCT_SCSI_CMD_DIR_OUT:
+ ddir = "TO_INITIATOR";
+ ocp->ddir = DMA_FROM_DEVICE;
+ break;
+ case EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT:
+ ddir = "BIDIR";
+ ocp->ddir = DMA_BIDIRECTIONAL;
+ break;
+ default:
+ ddir = "NONE";
+ ocp->ddir = DMA_NONE;
+ break;
+ }
+
+ ocp->lun = lun;
+ efct_lio_io_printf(io, "new cmd=0x%x ddir=%s dl=%u\n",
+ cdb[0], ddir, io->exp_xfer_len);
+
+ tgt_node = io->node;
+ se_sess = tgt_node->session;
+ if (!se_sess) {
+ efc_log_err(efct, "No session found to submit IO se_cmd: %p\n",
+ &ocp->cmd);
+ efct_scsi_io_free(io);
+ return;
+ }
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_SUBMIT_CMD);
+ rc = target_init_cmd(se_cmd, se_sess, &io->tgt_io.sense_buffer[0],
+ ocp->lun, io->exp_xfer_len, ocp->task_attr,
+ ocp->ddir, TARGET_SCF_ACK_KREF);
+ if (rc) {
+ efc_log_err(efct, "failed to init cmd se_cmd: %p\n", se_cmd);
+ efct_scsi_io_free(io);
+ return;
+ }
+
+ if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0,
+ NULL, 0, GFP_ATOMIC))
+ return;
+
+ target_submit(se_cmd);
+}
+
+int
+efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
+ struct efct_io *io_to_abort, u32 flags)
+{
+ unsigned char tmr_func;
+ struct efct *efct = tmfio->efct;
+ struct efct_scsi_tgt_io *ocp = &tmfio->tgt_io;
+ struct efct_node *tgt_node;
+ struct se_session *se_sess;
+ int rc;
+
+ memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
+ efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_SCSI_RECV_TMF);
+ atomic_add_return(1, &efct->tgt_efct.ios_in_use);
+ efct_lio_tmfio_printf(tmfio, "%s: new tmf %x lun=%u\n",
+ tmfio->display_name, cmd, lun);
+
+ switch (cmd) {
+ case EFCT_SCSI_TMF_ABORT_TASK:
+ tmr_func = TMR_ABORT_TASK;
+ break;
+ case EFCT_SCSI_TMF_ABORT_TASK_SET:
+ tmr_func = TMR_ABORT_TASK_SET;
+ break;
+ case EFCT_SCSI_TMF_CLEAR_TASK_SET:
+ tmr_func = TMR_CLEAR_TASK_SET;
+ break;
+ case EFCT_SCSI_TMF_LOGICAL_UNIT_RESET:
+ tmr_func = TMR_LUN_RESET;
+ break;
+ case EFCT_SCSI_TMF_CLEAR_ACA:
+ tmr_func = TMR_CLEAR_ACA;
+ break;
+ case EFCT_SCSI_TMF_TARGET_RESET:
+ tmr_func = TMR_TARGET_WARM_RESET;
+ break;
+ case EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT:
+ case EFCT_SCSI_TMF_QUERY_TASK_SET:
+ default:
+ goto tmf_fail;
+ }
+
+ tmfio->tgt_io.tmf = tmr_func;
+ tmfio->tgt_io.lun = lun;
+ tmfio->tgt_io.io_to_abort = io_to_abort;
+
+ tgt_node = tmfio->node;
+
+ se_sess = tgt_node->session;
+ if (!se_sess)
+ return 0;
+
+ rc = target_submit_tmr(&ocp->cmd, se_sess, NULL, lun, ocp, tmr_func,
+ GFP_ATOMIC, tmfio->init_task_tag, TARGET_SCF_ACK_KREF);
+
+ efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_TGT_SUBMIT_TMR);
+ if (rc)
+ goto tmf_fail;
+
+ return 0;
+
+tmf_fail:
+ efct_scsi_send_tmf_resp(tmfio, EFCT_SCSI_TMF_FUNCTION_REJECTED,
+ NULL, efct_lio_null_tmf_done, NULL);
+ return 0;
+}
+
+/* Start items for efct_lio_tpg_attrib_cit */
+
+#define DEF_EFCT_TPG_ATTRIB(name) \
+ \
+static ssize_t efct_lio_tpg_attrib_##name##_show( \
+ struct config_item *item, char *page) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ \
+ return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+} \
+ \
+static ssize_t efct_lio_tpg_attrib_##name##_store( \
+ struct config_item *item, const char *page, size_t count) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with ret: %d\n", ret); \
+ return ret; \
+ } \
+ \
+ if (val != 0 && val != 1) { \
+ pr_err("Illegal boolean value %lu\n", val); \
+ return -EINVAL; \
+ } \
+ \
+ a->name = val; \
+ \
+ return count; \
+} \
+CONFIGFS_ATTR(efct_lio_tpg_attrib_, name)
+
+DEF_EFCT_TPG_ATTRIB(generate_node_acls);
+DEF_EFCT_TPG_ATTRIB(cache_dynamic_acls);
+DEF_EFCT_TPG_ATTRIB(demo_mode_write_protect);
+DEF_EFCT_TPG_ATTRIB(prod_mode_write_protect);
+DEF_EFCT_TPG_ATTRIB(demo_mode_login_only);
+DEF_EFCT_TPG_ATTRIB(session_deletion_wait);
+
+static struct configfs_attribute *efct_lio_tpg_attrib_attrs[] = {
+ &efct_lio_tpg_attrib_attr_generate_node_acls,
+ &efct_lio_tpg_attrib_attr_cache_dynamic_acls,
+ &efct_lio_tpg_attrib_attr_demo_mode_write_protect,
+ &efct_lio_tpg_attrib_attr_prod_mode_write_protect,
+ &efct_lio_tpg_attrib_attr_demo_mode_login_only,
+ &efct_lio_tpg_attrib_attr_session_deletion_wait,
+ NULL,
+};
+
+#define DEF_EFCT_NPIV_TPG_ATTRIB(name) \
+ \
+static ssize_t efct_lio_npiv_tpg_attrib_##name##_show( \
+ struct config_item *item, char *page) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ \
+ return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+} \
+ \
+static ssize_t efct_lio_npiv_tpg_attrib_##name##_store( \
+ struct config_item *item, const char *page, size_t count) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with ret: %d\n", ret); \
+ return ret; \
+ } \
+ \
+ if (val != 0 && val != 1) { \
+ pr_err("Illegal boolean value %lu\n", val); \
+ return -EINVAL; \
+ } \
+ \
+ a->name = val; \
+ \
+ return count; \
+} \
+CONFIGFS_ATTR(efct_lio_npiv_tpg_attrib_, name)
+
+DEF_EFCT_NPIV_TPG_ATTRIB(generate_node_acls);
+DEF_EFCT_NPIV_TPG_ATTRIB(cache_dynamic_acls);
+DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_write_protect);
+DEF_EFCT_NPIV_TPG_ATTRIB(prod_mode_write_protect);
+DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_login_only);
+DEF_EFCT_NPIV_TPG_ATTRIB(session_deletion_wait);
+
+static struct configfs_attribute *efct_lio_npiv_tpg_attrib_attrs[] = {
+ &efct_lio_npiv_tpg_attrib_attr_generate_node_acls,
+ &efct_lio_npiv_tpg_attrib_attr_cache_dynamic_acls,
+ &efct_lio_npiv_tpg_attrib_attr_demo_mode_write_protect,
+ &efct_lio_npiv_tpg_attrib_attr_prod_mode_write_protect,
+ &efct_lio_npiv_tpg_attrib_attr_demo_mode_login_only,
+ &efct_lio_npiv_tpg_attrib_attr_session_deletion_wait,
+ NULL,
+};
+
+CONFIGFS_ATTR(efct_lio_tpg_, enable);
+static struct configfs_attribute *efct_lio_tpg_attrs[] = {
+ &efct_lio_tpg_attr_enable, NULL };
+CONFIGFS_ATTR(efct_lio_npiv_tpg_, enable);
+static struct configfs_attribute *efct_lio_npiv_tpg_attrs[] = {
+ &efct_lio_npiv_tpg_attr_enable, NULL };
+
+static const struct target_core_fabric_ops efct_lio_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "efct",
+ .node_acl_size = sizeof(struct efct_lio_nacl),
+ .max_data_sg_nents = 65535,
+ .tpg_get_wwn = efct_lio_get_fabric_wwn,
+ .tpg_get_tag = efct_lio_get_tag,
+ .fabric_init_nodeacl = efct_lio_init_nodeacl,
+ .tpg_check_demo_mode = efct_lio_check_demo_mode,
+ .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect = efct_lio_check_demo_write_protect,
+ .tpg_check_prod_mode_write_protect = efct_lio_check_prod_write_protect,
+ .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
+ .check_stop_free = efct_lio_check_stop_free,
+ .aborted_task = efct_lio_aborted_task,
+ .release_cmd = efct_lio_release_cmd,
+ .close_session = efct_lio_close_session,
+ .sess_get_index = efct_lio_sess_get_index,
+ .write_pending = efct_lio_write_pending,
+ .set_default_node_attributes = efct_lio_set_default_node_attrs,
+ .get_cmd_state = efct_lio_get_cmd_state,
+ .queue_data_in = efct_lio_queue_data_in,
+ .queue_status = efct_lio_queue_status,
+ .queue_tm_rsp = efct_lio_queue_tm_rsp,
+ .fabric_make_wwn = efct_lio_make_nport,
+ .fabric_drop_wwn = efct_lio_drop_nport,
+ .fabric_make_tpg = efct_lio_make_tpg,
+ .fabric_drop_tpg = efct_lio_drop_tpg,
+ .tpg_check_demo_mode_login_only = efct_lio_check_demo_mode_login_only,
+ .tpg_check_prot_fabric_only = NULL,
+ .sess_get_initiator_sid = NULL,
+ .tfc_tpg_base_attrs = efct_lio_tpg_attrs,
+ .tfc_tpg_attrib_attrs = efct_lio_tpg_attrib_attrs,
+};
+
+static const struct target_core_fabric_ops efct_lio_npiv_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "efct_npiv",
+ .node_acl_size = sizeof(struct efct_lio_nacl),
+ .max_data_sg_nents = 65535,
+ .tpg_get_wwn = efct_lio_get_npiv_fabric_wwn,
+ .tpg_get_tag = efct_lio_get_npiv_tag,
+ .fabric_init_nodeacl = efct_lio_init_nodeacl,
+ .tpg_check_demo_mode = efct_lio_check_demo_mode,
+ .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ efct_lio_npiv_check_demo_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ efct_lio_npiv_check_prod_write_protect,
+ .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
+ .check_stop_free = efct_lio_check_stop_free,
+ .aborted_task = efct_lio_aborted_task,
+ .release_cmd = efct_lio_release_cmd,
+ .close_session = efct_lio_close_session,
+ .sess_get_index = efct_lio_sess_get_index,
+ .write_pending = efct_lio_write_pending,
+ .set_default_node_attributes = efct_lio_set_default_node_attrs,
+ .get_cmd_state = efct_lio_get_cmd_state,
+ .queue_data_in = efct_lio_queue_data_in,
+ .queue_status = efct_lio_queue_status,
+ .queue_tm_rsp = efct_lio_queue_tm_rsp,
+ .fabric_make_wwn = efct_lio_npiv_make_nport,
+ .fabric_drop_wwn = efct_lio_npiv_drop_nport,
+ .fabric_make_tpg = efct_lio_npiv_make_tpg,
+ .fabric_drop_tpg = efct_lio_npiv_drop_tpg,
+ .tpg_check_demo_mode_login_only =
+ efct_lio_npiv_check_demo_mode_login_only,
+ .tpg_check_prot_fabric_only = NULL,
+ .sess_get_initiator_sid = NULL,
+ .tfc_tpg_base_attrs = efct_lio_npiv_tpg_attrs,
+ .tfc_tpg_attrib_attrs = efct_lio_npiv_tpg_attrib_attrs,
+};
+
+int efct_scsi_tgt_driver_init(void)
+{
+ int rc;
+
+ /* Register the top level struct config_item_type with TCM core */
+ rc = target_register_template(&efct_lio_ops);
+ if (rc < 0) {
+ pr_err("target_fabric_configfs_register failed with %d\n", rc);
+ return rc;
+ }
+ rc = target_register_template(&efct_lio_npiv_ops);
+ if (rc < 0) {
+ pr_err("target_fabric_configfs_register failed with %d\n", rc);
+ target_unregister_template(&efct_lio_ops);
+ return rc;
+ }
+ return 0;
+}
+
+int efct_scsi_tgt_driver_exit(void)
+{
+ target_unregister_template(&efct_lio_ops);
+ target_unregister_template(&efct_lio_npiv_ops);
+ return 0;
+}
diff --git a/drivers/scsi/elx/efct/efct_lio.h b/drivers/scsi/elx/efct/efct_lio.h
new file mode 100644
index 000000000000..569a0d4b1894
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_lio.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFCT_LIO_H__
+#define __EFCT_LIO_H__
+
+#include "efct_scsi.h"
+#include <target/target_core_base.h>
+
+#define efct_lio_io_printf(io, fmt, ...) \
+ efc_log_debug(io->efct, \
+ "[%s] [%04x][i:%04x t:%04x h:%04x]" fmt,\
+ io->node->display_name, io->instance_index, \
+ io->init_task_tag, io->tgt_task_tag, io->hw_tag,\
+ ##__VA_ARGS__)
+
+#define efct_lio_tmfio_printf(io, fmt, ...) \
+ efc_log_debug(io->efct, \
+ "[%s] [%04x][i:%04x t:%04x h:%04x][f:%02x]" fmt,\
+ io->node->display_name, io->instance_index, \
+ io->init_task_tag, io->tgt_task_tag, io->hw_tag,\
+ io->tgt_io.tmf, ##__VA_ARGS__)
+
+#define efct_set_lio_io_state(io, value) (io->tgt_io.state |= value)
+
+struct efct_lio_wq_data {
+ struct efct *efct;
+ void *ptr;
+ struct work_struct work;
+};
+
+/* Target private efct structure */
+struct efct_scsi_tgt {
+ u32 max_sge;
+ u32 max_sgl;
+
+ /*
+ * Variables used to send task set full. We are using a high watermark
+ * method to send task set full. We will reserve a fixed number of IOs
+ * per initiator plus a fudge factor. Once we reach this number,
+ * then the target will start sending task set full/busy responses.
+ */
+ atomic_t initiator_count;
+ atomic_t ios_in_use;
+ atomic_t io_high_watermark;
+
+ atomic_t watermark_hit;
+ int watermark_min;
+ int watermark_max;
+
+ struct efct_lio_nport *lio_nport;
+ struct efct_lio_tpg *tpg;
+
+ struct list_head vport_list;
+ /* Protects vport list*/
+ spinlock_t efct_lio_lock;
+
+ u64 wwnn;
+};
+
+struct efct_scsi_tgt_nport {
+ struct efct_lio_nport *lio_nport;
+};
+
+struct efct_node {
+ struct list_head list_entry;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ struct efct *efct;
+ struct efc_node *node;
+ struct se_session *session;
+ spinlock_t active_ios_lock;
+ struct list_head active_ios;
+ char display_name[EFC_NAME_LENGTH];
+ u32 port_fc_id;
+ u32 node_fc_id;
+ u32 vpi;
+ u32 rpi;
+ u32 abort_cnt;
+};
+
+#define EFCT_LIO_STATE_SCSI_RECV_CMD (1 << 0)
+#define EFCT_LIO_STATE_TGT_SUBMIT_CMD (1 << 1)
+#define EFCT_LIO_STATE_TFO_QUEUE_DATA_IN (1 << 2)
+#define EFCT_LIO_STATE_TFO_WRITE_PENDING (1 << 3)
+#define EFCT_LIO_STATE_TGT_EXECUTE_CMD (1 << 4)
+#define EFCT_LIO_STATE_SCSI_SEND_RD_DATA (1 << 5)
+#define EFCT_LIO_STATE_TFO_CHK_STOP_FREE (1 << 6)
+#define EFCT_LIO_STATE_SCSI_DATA_DONE (1 << 7)
+#define EFCT_LIO_STATE_TFO_QUEUE_STATUS (1 << 8)
+#define EFCT_LIO_STATE_SCSI_SEND_RSP (1 << 9)
+#define EFCT_LIO_STATE_SCSI_RSP_DONE (1 << 10)
+#define EFCT_LIO_STATE_TGT_GENERIC_FREE (1 << 11)
+#define EFCT_LIO_STATE_SCSI_RECV_TMF (1 << 12)
+#define EFCT_LIO_STATE_TGT_SUBMIT_TMR (1 << 13)
+#define EFCT_LIO_STATE_TFO_WRITE_PEND_STATUS (1 << 14)
+#define EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE (1 << 15)
+
+#define EFCT_LIO_STATE_TFO_ABORTED_TASK (1 << 29)
+#define EFCT_LIO_STATE_TFO_RELEASE_CMD (1 << 30)
+#define EFCT_LIO_STATE_SCSI_CMPL_CMD (1u << 31)
+
+struct efct_scsi_tgt_io {
+ struct se_cmd cmd;
+ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+ enum dma_data_direction ddir;
+ int task_attr;
+ u64 lun;
+
+ u32 state;
+ u8 tmf;
+ struct efct_io *io_to_abort;
+ u32 seg_map_cnt;
+ u32 seg_cnt;
+ u32 cur_seg;
+ enum efct_scsi_io_status err;
+ bool aborting;
+ bool rsp_sent;
+ u32 transferred_len;
+};
+
+/* Handler return codes */
+enum {
+ SCSI_HANDLER_DATAPHASE_STARTED = 1,
+ SCSI_HANDLER_RESP_STARTED,
+ SCSI_HANDLER_VALIDATED_DATAPHASE_STARTED,
+ SCSI_CMD_NOT_SUPPORTED,
+};
+
+#define WWN_NAME_LEN 32
+struct efct_lio_vport {
+ u64 wwpn;
+ u64 npiv_wwpn;
+ u64 npiv_wwnn;
+ unsigned char wwpn_str[WWN_NAME_LEN];
+ struct se_wwn vport_wwn;
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ struct Scsi_Host *shost;
+ struct fc_vport *fc_vport;
+ atomic_t enable;
+};
+
+struct efct_lio_nport {
+ u64 wwpn;
+ unsigned char wwpn_str[WWN_NAME_LEN];
+ struct se_wwn nport_wwn;
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ atomic_t enable;
+};
+
+struct efct_lio_tpg_attrib {
+ u32 generate_node_acls;
+ u32 cache_dynamic_acls;
+ u32 demo_mode_write_protect;
+ u32 prod_mode_write_protect;
+ u32 demo_mode_login_only;
+ bool session_deletion_wait;
+};
+
+struct efct_lio_tpg {
+ struct se_portal_group tpg;
+ struct efct_lio_nport *nport;
+ struct efct_lio_vport *vport;
+ struct efct_lio_tpg_attrib tpg_attrib;
+ unsigned short tpgt;
+ bool enabled;
+};
+
+struct efct_lio_nacl {
+ u64 nport_wwnn;
+ char nport_name[WWN_NAME_LEN];
+ struct se_session *session;
+ struct se_node_acl se_node_acl;
+};
+
+struct efct_lio_vport_list_t {
+ struct list_head list_entry;
+ struct efct_lio_vport *lio_vport;
+};
+
+int efct_scsi_tgt_driver_init(void);
+int efct_scsi_tgt_driver_exit(void);
+
+#endif /*__EFCT_LIO_H__ */
diff --git a/drivers/scsi/elx/efct/efct_scsi.c b/drivers/scsi/elx/efct/efct_scsi.c
new file mode 100644
index 000000000000..40fb3a724c76
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_scsi.c
@@ -0,0 +1,1159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+
+#define enable_tsend_auto_resp(efct) 1
+#define enable_treceive_auto_resp(efct) 0
+
+#define SCSI_IOFMT "[%04x][i:%04x t:%04x h:%04x]"
+
+#define scsi_io_printf(io, fmt, ...) \
+ efc_log_debug(io->efct, "[%s]" SCSI_IOFMT fmt, \
+ io->node->display_name, io->instance_index,\
+ io->init_task_tag, io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
+
+#define EFCT_LOG_ENABLE_SCSI_TRACE(efct) \
+ (((efct) != NULL) ? (((efct)->logmask & (1U << 2)) != 0) : 0)
+
+#define scsi_io_trace(io, fmt, ...) \
+ do { \
+ if (EFCT_LOG_ENABLE_SCSI_TRACE(io->efct)) \
+ scsi_io_printf(io, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+struct efct_io *
+efct_scsi_io_alloc(struct efct_node *node)
+{
+ struct efct *efct;
+ struct efct_xport *xport;
+ struct efct_io *io;
+ unsigned long flags = 0;
+
+ efct = node->efct;
+
+ xport = efct->xport;
+
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+
+ io = efct_io_pool_io_alloc(efct->xport->io_pool);
+ if (!io) {
+ efc_log_err(efct, "IO alloc Failed\n");
+ atomic_add_return(1, &xport->io_alloc_failed_count);
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+ return NULL;
+ }
+
+ /* initialize refcount */
+ kref_init(&io->ref);
+ io->release = _efct_scsi_io_free;
+
+ /* set generic fields */
+ io->efct = efct;
+ io->node = node;
+ kref_get(&node->ref);
+
+ /* set type and name */
+ io->io_type = EFCT_IO_TYPE_IO;
+ io->display_name = "scsi_io";
+
+ io->cmd_ini = false;
+ io->cmd_tgt = true;
+
+ /* Add to node's active_ios list */
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add(&io->list_entry, &node->active_ios);
+
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+
+ return io;
+}
+
+void
+_efct_scsi_io_free(struct kref *arg)
+{
+ struct efct_io *io = container_of(arg, struct efct_io, ref);
+ struct efct *efct = io->efct;
+ struct efct_node *node = io->node;
+ unsigned long flags = 0;
+
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+
+ if (io->io_free) {
+ efc_log_err(efct, "IO already freed.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ list_del_init(&io->list_entry);
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+
+ kref_put(&node->ref, node->release);
+ io->node = NULL;
+ efct_io_pool_io_free(efct->xport->io_pool, io);
+}
+
+void
+efct_scsi_io_free(struct efct_io *io)
+{
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+ WARN_ON(!refcount_read(&io->ref.refcount));
+ kref_put(&io->ref, io->release);
+}
+
+static void
+efct_target_io_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ u32 flags = 0;
+ struct efct_io *io = app;
+ struct efct *efct;
+ enum efct_scsi_io_status scsi_stat = EFCT_SCSI_STATUS_GOOD;
+ efct_scsi_io_cb_t cb;
+
+ if (!io || !io->efct) {
+ pr_err("%s: IO can not be NULL\n", __func__);
+ return;
+ }
+
+ scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
+
+ efct = io->efct;
+
+ io->transferred += length;
+
+ if (!io->scsi_tgt_cb) {
+ efct_scsi_check_pending(efct);
+ return;
+ }
+
+ /* Call target server completion */
+ cb = io->scsi_tgt_cb;
+
+ /* Clear the callback before invoking the callback */
+ io->scsi_tgt_cb = NULL;
+
+ /* if status was good, and auto-good-response was set,
+ * then callback target-server with IO_CMPL_RSP_SENT,
+ * otherwise send IO_CMPL
+ */
+ if (status == 0 && io->auto_resp)
+ flags |= EFCT_SCSI_IO_CMPL_RSP_SENT;
+ else
+ flags |= EFCT_SCSI_IO_CMPL;
+
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ scsi_stat = EFCT_SCSI_STATUS_GOOD;
+ break;
+ case SLI4_FC_WCQE_STATUS_DI_ERROR:
+ if (ext_status & SLI4_FC_DI_ERROR_GE)
+ scsi_stat = EFCT_SCSI_STATUS_DIF_GUARD_ERR;
+ else if (ext_status & SLI4_FC_DI_ERROR_AE)
+ scsi_stat = EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR;
+ else if (ext_status & SLI4_FC_DI_ERROR_RE)
+ scsi_stat = EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR;
+ else
+ scsi_stat = EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET:
+ case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
+ scsi_stat = EFCT_SCSI_STATUS_ABORTED;
+ break;
+ case SLI4_FC_LOCAL_REJECT_INVALID_RPI:
+ scsi_stat = EFCT_SCSI_STATUS_NEXUS_LOST;
+ break;
+ case SLI4_FC_LOCAL_REJECT_NO_XRI:
+ scsi_stat = EFCT_SCSI_STATUS_NO_IO;
+ break;
+ default:
+ /*we have seen 0x0d(TX_DMA_FAILED err)*/
+ scsi_stat = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+ break;
+
+ case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT:
+ /* target IO timed out */
+ scsi_stat = EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED;
+ break;
+
+ case SLI4_FC_WCQE_STATUS_SHUTDOWN:
+ /* Target IO cancelled by HW */
+ scsi_stat = EFCT_SCSI_STATUS_SHUTDOWN;
+ break;
+
+ default:
+ scsi_stat = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+
+ cb(io, scsi_stat, flags, io->scsi_tgt_cb_arg);
+
+ efct_scsi_check_pending(efct);
+}
+
+static int
+efct_scsi_build_sgls(struct efct_hw *hw, struct efct_hw_io *hio,
+ struct efct_scsi_sgl *sgl, u32 sgl_count,
+ enum efct_hw_io_type type)
+{
+ int rc;
+ u32 i;
+ struct efct *efct = hw->os;
+
+ /* Initialize HW SGL */
+ rc = efct_hw_io_init_sges(hw, hio, type);
+ if (rc) {
+ efc_log_err(efct, "efct_hw_io_init_sges failed: %d\n", rc);
+ return -EIO;
+ }
+
+ for (i = 0; i < sgl_count; i++) {
+ /* Add data SGE */
+ rc = efct_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
+ if (rc) {
+ efc_log_err(efct, "add sge failed cnt=%d rc=%d\n",
+ sgl_count, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void efc_log_sgl(struct efct_io *io)
+{
+ struct efct_hw_io *hio = io->hio;
+ struct sli4_sge *data = NULL;
+ u32 *dword = NULL;
+ u32 i;
+ u32 n_sge;
+
+ scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n",
+ upper_32_bits(hio->def_sgl.phys),
+ lower_32_bits(hio->def_sgl.phys));
+ n_sge = (hio->sgl == &hio->def_sgl) ? hio->n_sge : hio->def_sgl_count;
+ for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) {
+ dword = (u32 *)data;
+
+ scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, dword[0], dword[1], dword[2], dword[3]);
+
+ if (dword[2] & (1U << 31))
+ break;
+ }
+}
+
+static void
+efct_scsi_check_pending_async_cb(struct efct_hw *hw, int status,
+ u8 *mqe, void *arg)
+{
+ struct efct_io *io = arg;
+
+ if (io) {
+ efct_hw_done_t cb = io->hw_cb;
+
+ if (!io->hw_cb)
+ return;
+
+ io->hw_cb = NULL;
+ (cb)(io->hio, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
+ }
+}
+
+static int
+efct_scsi_io_dispatch_hw_io(struct efct_io *io, struct efct_hw_io *hio)
+{
+ int rc = 0;
+ struct efct *efct = io->efct;
+
+ /* Got a HW IO;
+ * update ini/tgt_task_tag with HW IO info and dispatch
+ */
+ io->hio = hio;
+ if (io->cmd_tgt)
+ io->tgt_task_tag = hio->indicator;
+ else if (io->cmd_ini)
+ io->init_task_tag = hio->indicator;
+ io->hw_tag = hio->reqtag;
+
+ hio->eq = io->hw_priv;
+
+ /* Copy WQ steering */
+ switch (io->wq_steering) {
+ case EFCT_SCSI_WQ_STEERING_CLASS >> EFCT_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = EFCT_HW_WQ_STEERING_CLASS;
+ break;
+ case EFCT_SCSI_WQ_STEERING_REQUEST >> EFCT_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = EFCT_HW_WQ_STEERING_REQUEST;
+ break;
+ case EFCT_SCSI_WQ_STEERING_CPU >> EFCT_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = EFCT_HW_WQ_STEERING_CPU;
+ break;
+ }
+
+ switch (io->io_type) {
+ case EFCT_IO_TYPE_IO:
+ rc = efct_scsi_build_sgls(&efct->hw, io->hio,
+ io->sgl, io->sgl_count, io->hio_type);
+ if (rc)
+ break;
+
+ if (EFCT_LOG_ENABLE_SCSI_TRACE(efct))
+ efc_log_sgl(io);
+
+ if (io->app_id)
+ io->iparam.fcp_tgt.app_id = io->app_id;
+
+ io->iparam.fcp_tgt.vpi = io->node->vpi;
+ io->iparam.fcp_tgt.rpi = io->node->rpi;
+ io->iparam.fcp_tgt.s_id = io->node->port_fc_id;
+ io->iparam.fcp_tgt.d_id = io->node->node_fc_id;
+ io->iparam.fcp_tgt.xmit_len = io->wire_len;
+
+ rc = efct_hw_io_send(&io->efct->hw, io->hio_type, io->hio,
+ &io->iparam, io->hw_cb, io);
+ break;
+ default:
+ scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
+ rc = -EIO;
+ break;
+ }
+ return rc;
+}
+
+static int
+efct_scsi_io_dispatch_no_hw_io(struct efct_io *io)
+{
+ int rc;
+
+ switch (io->io_type) {
+ case EFCT_IO_TYPE_ABORT: {
+ struct efct_hw_io *hio_to_abort = NULL;
+
+ hio_to_abort = io->io_to_abort->hio;
+
+ if (!hio_to_abort) {
+ /*
+ * If "IO to abort" does not have an
+ * associated HW IO, immediately make callback with
+ * success. The command must have been sent to
+ * the backend, but the data phase has not yet
+ * started, so we don't have a HW IO.
+ *
+ * Note: since the backend shims should be
+ * taking a reference on io_to_abort, it should not
+ * be possible to have been completed and freed by
+ * the backend before the abort got here.
+ */
+ scsi_io_printf(io, "IO: not active\n");
+ ((efct_hw_done_t)io->hw_cb)(io->hio, 0,
+ SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
+ rc = 0;
+ break;
+ }
+
+ /* HW IO is valid, abort it */
+ scsi_io_printf(io, "aborting\n");
+ rc = efct_hw_io_abort(&io->efct->hw, hio_to_abort,
+ io->send_abts, io->hw_cb, io);
+ if (rc) {
+ int status = SLI4_FC_WCQE_STATUS_SUCCESS;
+ efct_hw_done_t cb = io->hw_cb;
+
+ if (rc != -ENOENT && rc != -EINPROGRESS) {
+ status = -1;
+ scsi_io_printf(io, "Failed to abort IO rc=%d\n",
+ rc);
+ }
+ cb(io->hio, 0, status, 0, io);
+ rc = 0;
+ }
+
+ break;
+ }
+ default:
+ scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
+ rc = -EIO;
+ break;
+ }
+ return rc;
+}
+
+static struct efct_io *
+efct_scsi_dispatch_pending(struct efct *efct)
+{
+ struct efct_xport *xport = efct->xport;
+ struct efct_io *io = NULL;
+ struct efct_hw_io *hio;
+ unsigned long flags = 0;
+ int status;
+
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+
+ if (!list_empty(&xport->io_pending_list)) {
+ io = list_first_entry(&xport->io_pending_list, struct efct_io,
+ io_pending_link);
+ list_del_init(&io->io_pending_link);
+ }
+
+ if (!io) {
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+ return NULL;
+ }
+
+ if (io->io_type == EFCT_IO_TYPE_ABORT) {
+ hio = NULL;
+ } else {
+ hio = efct_hw_io_alloc(&efct->hw);
+ if (!hio) {
+ /*
+ * No HW IO available.Put IO back on
+ * the front of pending list
+ */
+ list_add(&xport->io_pending_list, &io->io_pending_link);
+ io = NULL;
+ } else {
+ hio->eq = io->hw_priv;
+ }
+ }
+
+ /* Must drop the lock before dispatching the IO */
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ if (!io)
+ return NULL;
+
+ /*
+ * We pulled an IO off the pending list,
+ * and either got an HW IO or don't need one
+ */
+ atomic_sub_return(1, &xport->io_pending_count);
+ if (!hio)
+ status = efct_scsi_io_dispatch_no_hw_io(io);
+ else
+ status = efct_scsi_io_dispatch_hw_io(io, hio);
+ if (status) {
+ /*
+ * Invoke the HW callback, but do so in the
+ * separate execution context,provided by the
+ * NOP mailbox completion processing context
+ * by using efct_hw_async_call()
+ */
+ if (efct_hw_async_call(&efct->hw,
+ efct_scsi_check_pending_async_cb, io)) {
+ efc_log_debug(efct, "call hw async failed\n");
+ }
+ }
+
+ return io;
+}
+
+void
+efct_scsi_check_pending(struct efct *efct)
+{
+ struct efct_xport *xport = efct->xport;
+ struct efct_io *io = NULL;
+ int count = 0;
+ unsigned long flags = 0;
+ int dispatch = 0;
+
+ /* Guard against recursion */
+ if (atomic_add_return(1, &xport->io_pending_recursing)) {
+ /* This function is already running. Decrement and return. */
+ atomic_sub_return(1, &xport->io_pending_recursing);
+ return;
+ }
+
+ while (efct_scsi_dispatch_pending(efct))
+ count++;
+
+ if (count) {
+ atomic_sub_return(1, &xport->io_pending_recursing);
+ return;
+ }
+
+ /*
+ * If nothing was removed from the list,
+ * we might be in a case where we need to abort an
+ * active IO and the abort is on the pending list.
+ * Look for an abort we can dispatch.
+ */
+
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+
+ list_for_each_entry(io, &xport->io_pending_list, io_pending_link) {
+ if (io->io_type == EFCT_IO_TYPE_ABORT && io->io_to_abort->hio) {
+ /* This IO has a HW IO, so it is
+ * active. Dispatch the abort.
+ */
+ dispatch = 1;
+ list_del_init(&io->io_pending_link);
+ atomic_sub_return(1, &xport->io_pending_count);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ if (dispatch) {
+ if (efct_scsi_io_dispatch_no_hw_io(io)) {
+ if (efct_hw_async_call(&efct->hw,
+ efct_scsi_check_pending_async_cb, io)) {
+ efc_log_debug(efct, "hw async failed\n");
+ }
+ }
+ }
+
+ atomic_sub_return(1, &xport->io_pending_recursing);
+}
+
+int
+efct_scsi_io_dispatch(struct efct_io *io, void *cb)
+{
+ struct efct_hw_io *hio;
+ struct efct *efct = io->efct;
+ struct efct_xport *xport = efct->xport;
+ unsigned long flags = 0;
+
+ io->hw_cb = cb;
+
+ /*
+ * if this IO already has a HW IO, then this is either
+ * not the first phase of the IO. Send it to the HW.
+ */
+ if (io->hio)
+ return efct_scsi_io_dispatch_hw_io(io, io->hio);
+
+ /*
+ * We don't already have a HW IO associated with the IO. First check
+ * the pending list. If not empty, add IO to the tail and process the
+ * pending list.
+ */
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+ if (!list_empty(&xport->io_pending_list)) {
+ /*
+ * If this is a low latency request,
+ * the put at the front of the IO pending
+ * queue, otherwise put it at the end of the queue.
+ */
+ if (io->low_latency) {
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add(&xport->io_pending_list, &io->io_pending_link);
+ } else {
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add_tail(&io->io_pending_link,
+ &xport->io_pending_list);
+ }
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+ atomic_add_return(1, &xport->io_pending_count);
+ atomic_add_return(1, &xport->io_total_pending);
+
+ /* process pending list */
+ efct_scsi_check_pending(efct);
+ return 0;
+ }
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ /*
+ * We don't have a HW IO associated with the IO and there's nothing
+ * on the pending list. Attempt to allocate a HW IO and dispatch it.
+ */
+ hio = efct_hw_io_alloc(&io->efct->hw);
+ if (!hio) {
+ /* Couldn't get a HW IO. Save this IO on the pending list */
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add_tail(&io->io_pending_link, &xport->io_pending_list);
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ atomic_add_return(1, &xport->io_total_pending);
+ atomic_add_return(1, &xport->io_pending_count);
+ return 0;
+ }
+
+ /* We successfully allocated a HW IO; dispatch to HW */
+ return efct_scsi_io_dispatch_hw_io(io, hio);
+}
+
+int
+efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb)
+{
+ struct efct *efct = io->efct;
+ struct efct_xport *xport = efct->xport;
+ unsigned long flags = 0;
+
+ io->hw_cb = cb;
+
+ /*
+ * For aborts, we don't need a HW IO, but we still want
+ * to pass through the pending list to preserve ordering.
+ * Thus, if the pending list is not empty, add this abort
+ * to the pending list and process the pending list.
+ */
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+ if (!list_empty(&xport->io_pending_list)) {
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add_tail(&io->io_pending_link, &xport->io_pending_list);
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+ atomic_add_return(1, &xport->io_pending_count);
+ atomic_add_return(1, &xport->io_total_pending);
+
+ /* process pending list */
+ efct_scsi_check_pending(efct);
+ return 0;
+ }
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ /* nothing on pending list, dispatch abort */
+ return efct_scsi_io_dispatch_no_hw_io(io);
+}
+
+static inline int
+efct_scsi_xfer_data(struct efct_io *io, u32 flags,
+ struct efct_scsi_sgl *sgl, u32 sgl_count, u64 xwire_len,
+ enum efct_hw_io_type type, int enable_ar,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct *efct;
+ size_t residual = 0;
+
+ io->sgl_count = sgl_count;
+
+ efct = io->efct;
+
+ scsi_io_trace(io, "%s wire_len %llu\n",
+ (type == EFCT_HW_IO_TARGET_READ) ? "send" : "recv",
+ xwire_len);
+
+ io->hio_type = type;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ residual = io->exp_xfer_len - io->transferred;
+ io->wire_len = (xwire_len < residual) ? xwire_len : residual;
+ residual = (xwire_len - io->wire_len);
+
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = io->transferred;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ /* if this is the last data phase and there is no residual, enable
+ * auto-good-response
+ */
+ if (enable_ar && (flags & EFCT_SCSI_LAST_DATAPHASE) && residual == 0 &&
+ ((io->transferred + io->wire_len) == io->exp_xfer_len) &&
+ (!(flags & EFCT_SCSI_NO_AUTO_RESPONSE))) {
+ io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
+ io->auto_resp = true;
+ } else {
+ io->auto_resp = false;
+ }
+
+ /* save this transfer length */
+ io->xfer_req = io->wire_len;
+
+ /* Adjust the transferred count to account for overrun
+ * when the residual is calculated in efct_scsi_send_resp
+ */
+ io->transferred += residual;
+
+ /* Adjust the SGL size if there is overrun */
+
+ if (residual) {
+ struct efct_scsi_sgl *sgl_ptr = &io->sgl[sgl_count - 1];
+
+ while (residual) {
+ size_t len = sgl_ptr->len;
+
+ if (len > residual) {
+ sgl_ptr->len = len - residual;
+ residual = 0;
+ } else {
+ sgl_ptr->len = 0;
+ residual -= len;
+ io->sgl_count--;
+ }
+ sgl_ptr--;
+ }
+ }
+
+ /* Set latency and WQ steering */
+ io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
+ io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
+ EFCT_SCSI_WQ_STEERING_SHIFT;
+ io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
+ EFCT_SCSI_WQ_CLASS_SHIFT;
+
+ if (efct->xport) {
+ struct efct_xport *xport = efct->xport;
+
+ if (type == EFCT_HW_IO_TARGET_READ) {
+ xport->fcp_stats.input_requests++;
+ xport->fcp_stats.input_bytes += xwire_len;
+ } else if (type == EFCT_HW_IO_TARGET_WRITE) {
+ xport->fcp_stats.output_requests++;
+ xport->fcp_stats.output_bytes += xwire_len;
+ }
+ }
+ return efct_scsi_io_dispatch(io, efct_target_io_cb);
+}
+
+int
+efct_scsi_send_rd_data(struct efct_io *io, u32 flags,
+ struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ return efct_scsi_xfer_data(io, flags, sgl, sgl_count,
+ len, EFCT_HW_IO_TARGET_READ,
+ enable_tsend_auto_resp(io->efct), cb, arg);
+}
+
+int
+efct_scsi_recv_wr_data(struct efct_io *io, u32 flags,
+ struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ return efct_scsi_xfer_data(io, flags, sgl, sgl_count, len,
+ EFCT_HW_IO_TARGET_WRITE,
+ enable_treceive_auto_resp(io->efct), cb, arg);
+}
+
+int
+efct_scsi_send_resp(struct efct_io *io, u32 flags,
+ struct efct_scsi_cmd_resp *rsp,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct *efct;
+ int residual;
+ /* Always try auto resp */
+ bool auto_resp = true;
+ u8 scsi_status = 0;
+ u16 scsi_status_qualifier = 0;
+ u8 *sense_data = NULL;
+ u32 sense_data_length = 0;
+
+ efct = io->efct;
+
+ if (rsp) {
+ scsi_status = rsp->scsi_status;
+ scsi_status_qualifier = rsp->scsi_status_qualifier;
+ sense_data = rsp->sense_data;
+ sense_data_length = rsp->sense_data_length;
+ residual = rsp->residual;
+ } else {
+ residual = io->exp_xfer_len - io->transferred;
+ }
+
+ io->wire_len = 0;
+ io->hio_type = EFCT_HW_IO_TARGET_RSP;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = 0;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ /* Set low latency queueing request */
+ io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
+ io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
+ EFCT_SCSI_WQ_STEERING_SHIFT;
+ io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
+ EFCT_SCSI_WQ_CLASS_SHIFT;
+
+ if (scsi_status != 0 || residual || sense_data_length) {
+ struct fcp_resp_with_ext *fcprsp = io->rspbuf.virt;
+ u8 *sns_data;
+
+ if (!fcprsp) {
+ efc_log_err(efct, "NULL response buffer\n");
+ return -EIO;
+ }
+
+ sns_data = (u8 *)io->rspbuf.virt + sizeof(*fcprsp);
+
+ auto_resp = false;
+
+ memset(fcprsp, 0, sizeof(*fcprsp));
+
+ io->wire_len += sizeof(*fcprsp);
+
+ fcprsp->resp.fr_status = scsi_status;
+ fcprsp->resp.fr_retry_delay =
+ cpu_to_be16(scsi_status_qualifier);
+
+ /* set residual status if necessary */
+ if (residual != 0) {
+ /* FCP: if data transferred is less than the
+ * amount expected, then this is an underflow.
+ * If data transferred would have been greater
+ * than the amount expected this is an overflow
+ */
+ if (residual > 0) {
+ fcprsp->resp.fr_flags |= FCP_RESID_UNDER;
+ fcprsp->ext.fr_resid = cpu_to_be32(residual);
+ } else {
+ fcprsp->resp.fr_flags |= FCP_RESID_OVER;
+ fcprsp->ext.fr_resid = cpu_to_be32(-residual);
+ }
+ }
+
+ if (EFCT_SCSI_SNS_BUF_VALID(sense_data) && sense_data_length) {
+ if (sense_data_length > SCSI_SENSE_BUFFERSIZE) {
+ efc_log_err(efct, "Sense exceeds max size.\n");
+ return -EIO;
+ }
+
+ fcprsp->resp.fr_flags |= FCP_SNS_LEN_VAL;
+ memcpy(sns_data, sense_data, sense_data_length);
+ fcprsp->ext.fr_sns_len = cpu_to_be32(sense_data_length);
+ io->wire_len += sense_data_length;
+ }
+
+ io->sgl[0].addr = io->rspbuf.phys;
+ io->sgl[0].dif_addr = 0;
+ io->sgl[0].len = io->wire_len;
+ io->sgl_count = 1;
+ }
+
+ if (auto_resp)
+ io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
+
+ return efct_scsi_io_dispatch(io, efct_target_io_cb);
+}
+
+static int
+efct_target_bls_resp_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ struct efct_io *io = app;
+ struct efct *efct;
+ enum efct_scsi_io_status bls_status;
+
+ efct = io->efct;
+
+ /* BLS isn't really a "SCSI" concept, but use SCSI status */
+ if (status) {
+ io_error_log(io, "s=%#x x=%#x\n", status, ext_status);
+ bls_status = EFCT_SCSI_STATUS_ERROR;
+ } else {
+ bls_status = EFCT_SCSI_STATUS_GOOD;
+ }
+
+ if (io->bls_cb) {
+ efct_scsi_io_cb_t bls_cb = io->bls_cb;
+ void *bls_cb_arg = io->bls_cb_arg;
+
+ io->bls_cb = NULL;
+ io->bls_cb_arg = NULL;
+
+ /* invoke callback */
+ bls_cb(io, bls_status, 0, bls_cb_arg);
+ }
+
+ efct_scsi_check_pending(efct);
+ return 0;
+}
+
+static int
+efct_target_send_bls_resp(struct efct_io *io,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct_node *node = io->node;
+ struct sli_bls_params *bls = &io->iparam.bls;
+ struct efct *efct = node->efct;
+ struct fc_ba_acc *acc;
+ int rc;
+
+ /* fill out IO structure with everything needed to send BA_ACC */
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ bls->ox_id = io->init_task_tag;
+ bls->rx_id = io->abort_rx_id;
+ bls->vpi = io->node->vpi;
+ bls->rpi = io->node->rpi;
+ bls->s_id = U32_MAX;
+ bls->d_id = io->node->node_fc_id;
+ bls->rpi_registered = true;
+
+ acc = (void *)bls->payload;
+ acc->ba_ox_id = cpu_to_be16(bls->ox_id);
+ acc->ba_rx_id = cpu_to_be16(bls->rx_id);
+ acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
+
+ /* generic io fields have already been populated */
+
+ /* set type and BLS-specific fields */
+ io->io_type = EFCT_IO_TYPE_BLS_RESP;
+ io->display_name = "bls_rsp";
+ io->hio_type = EFCT_HW_BLS_ACC;
+ io->bls_cb = cb;
+ io->bls_cb_arg = arg;
+
+ /* dispatch IO */
+ rc = efct_hw_bls_send(efct, FC_RCTL_BA_ACC, bls,
+ efct_target_bls_resp_cb, io);
+ return rc;
+}
+
+static int efct_bls_send_rjt_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ struct efct_io *io = app;
+
+ efct_scsi_io_free(io);
+ return 0;
+}
+
+struct efct_io *
+efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr)
+{
+ struct efct_node *node = io->node;
+ struct sli_bls_params *bls = &io->iparam.bls;
+ struct efct *efct = node->efct;
+ struct fc_ba_rjt *acc;
+ int rc;
+
+ /* fill out BLS Response-specific fields */
+ io->io_type = EFCT_IO_TYPE_BLS_RESP;
+ io->display_name = "ba_rjt";
+ io->hio_type = EFCT_HW_BLS_RJT;
+ io->init_task_tag = be16_to_cpu(hdr->fh_ox_id);
+
+ /* fill out iparam fields */
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ bls->ox_id = be16_to_cpu(hdr->fh_ox_id);
+ bls->rx_id = be16_to_cpu(hdr->fh_rx_id);
+ bls->vpi = io->node->vpi;
+ bls->rpi = io->node->rpi;
+ bls->s_id = U32_MAX;
+ bls->d_id = io->node->node_fc_id;
+ bls->rpi_registered = true;
+
+ acc = (void *)bls->payload;
+ acc->br_reason = ELS_RJT_UNAB;
+ acc->br_explan = ELS_EXPL_NONE;
+
+ rc = efct_hw_bls_send(efct, FC_RCTL_BA_RJT, bls, efct_bls_send_rjt_cb,
+ io);
+ if (rc) {
+ efc_log_err(efct, "efct_scsi_io_dispatch() failed: %d\n", rc);
+ efct_scsi_io_free(io);
+ io = NULL;
+ }
+ return io;
+}
+
+int
+efct_scsi_send_tmf_resp(struct efct_io *io,
+ enum efct_scsi_tmf_resp rspcode,
+ u8 addl_rsp_info[3],
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ int rc;
+ struct {
+ struct fcp_resp_with_ext rsp_ext;
+ struct fcp_resp_rsp_info info;
+ } *fcprsp;
+ u8 fcp_rspcode;
+
+ io->wire_len = 0;
+
+ switch (rspcode) {
+ case EFCT_SCSI_TMF_FUNCTION_COMPLETE:
+ fcp_rspcode = FCP_TMF_CMPL;
+ break;
+ case EFCT_SCSI_TMF_FUNCTION_SUCCEEDED:
+ case EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND:
+ fcp_rspcode = FCP_TMF_CMPL;
+ break;
+ case EFCT_SCSI_TMF_FUNCTION_REJECTED:
+ fcp_rspcode = FCP_TMF_REJECTED;
+ break;
+ case EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER:
+ fcp_rspcode = FCP_TMF_INVALID_LUN;
+ break;
+ case EFCT_SCSI_TMF_SERVICE_DELIVERY:
+ fcp_rspcode = FCP_TMF_FAILED;
+ break;
+ default:
+ fcp_rspcode = FCP_TMF_REJECTED;
+ break;
+ }
+
+ io->hio_type = EFCT_HW_IO_TARGET_RSP;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ if (io->tmf_cmd == EFCT_SCSI_TMF_ABORT_TASK) {
+ rc = efct_target_send_bls_resp(io, cb, arg);
+ return rc;
+ }
+
+ /* populate the FCP TMF response */
+ fcprsp = io->rspbuf.virt;
+ memset(fcprsp, 0, sizeof(*fcprsp));
+
+ fcprsp->rsp_ext.resp.fr_flags |= FCP_SNS_LEN_VAL;
+
+ if (addl_rsp_info) {
+ memcpy(fcprsp->info._fr_resvd, addl_rsp_info,
+ sizeof(fcprsp->info._fr_resvd));
+ }
+ fcprsp->info.rsp_code = fcp_rspcode;
+
+ io->wire_len = sizeof(*fcprsp);
+
+ fcprsp->rsp_ext.ext.fr_rsp_len =
+ cpu_to_be32(sizeof(struct fcp_resp_rsp_info));
+
+ io->sgl[0].addr = io->rspbuf.phys;
+ io->sgl[0].dif_addr = 0;
+ io->sgl[0].len = io->wire_len;
+ io->sgl_count = 1;
+
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = 0;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ rc = efct_scsi_io_dispatch(io, efct_target_io_cb);
+
+ return rc;
+}
+
+static int
+efct_target_abort_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ struct efct_io *io = app;
+ struct efct *efct;
+ enum efct_scsi_io_status scsi_status;
+ efct_scsi_io_cb_t abort_cb;
+ void *abort_cb_arg;
+
+ efct = io->efct;
+
+ if (!io->abort_cb)
+ goto done;
+
+ abort_cb = io->abort_cb;
+ abort_cb_arg = io->abort_cb_arg;
+
+ io->abort_cb = NULL;
+ io->abort_cb_arg = NULL;
+
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ scsi_status = EFCT_SCSI_STATUS_GOOD;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_NO_XRI:
+ scsi_status = EFCT_SCSI_STATUS_NO_IO;
+ break;
+ case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS:
+ scsi_status = EFCT_SCSI_STATUS_ABORT_IN_PROGRESS;
+ break;
+ default:
+ /*we have seen 0x15 (abort in progress)*/
+ scsi_status = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+ break;
+ case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
+ scsi_status = EFCT_SCSI_STATUS_CHECK_RESPONSE;
+ break;
+ default:
+ scsi_status = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+ /* invoke callback */
+ abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg);
+
+done:
+ /* done with IO to abort,efct_ref_get(): efct_scsi_tgt_abort_io() */
+ kref_put(&io->io_to_abort->ref, io->io_to_abort->release);
+
+ efct_io_pool_io_free(efct->xport->io_pool, io);
+
+ efct_scsi_check_pending(efct);
+ return 0;
+}
+
+int
+efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct *efct;
+ struct efct_xport *xport;
+ int rc;
+ struct efct_io *abort_io = NULL;
+
+ efct = io->efct;
+ xport = efct->xport;
+
+ /* take a reference on IO being aborted */
+ if (kref_get_unless_zero(&io->ref) == 0) {
+ /* command no longer active */
+ scsi_io_printf(io, "command no longer active\n");
+ return -EIO;
+ }
+
+ /*
+ * allocate a new IO to send the abort request. Use efct_io_alloc()
+ * directly, as we need an IO object that will not fail allocation
+ * due to allocations being disabled (in efct_scsi_io_alloc())
+ */
+ abort_io = efct_io_pool_io_alloc(efct->xport->io_pool);
+ if (!abort_io) {
+ atomic_add_return(1, &xport->io_alloc_failed_count);
+ kref_put(&io->ref, io->release);
+ return -EIO;
+ }
+
+ /* Save the target server callback and argument */
+ /* set generic fields */
+ abort_io->cmd_tgt = true;
+ abort_io->node = io->node;
+
+ /* set type and abort-specific fields */
+ abort_io->io_type = EFCT_IO_TYPE_ABORT;
+ abort_io->display_name = "tgt_abort";
+ abort_io->io_to_abort = io;
+ abort_io->send_abts = false;
+ abort_io->abort_cb = cb;
+ abort_io->abort_cb_arg = arg;
+
+ /* now dispatch IO */
+ rc = efct_scsi_io_dispatch_abort(abort_io, efct_target_abort_cb);
+ if (rc)
+ kref_put(&io->ref, io->release);
+ return rc;
+}
+
+void
+efct_scsi_io_complete(struct efct_io *io)
+{
+ if (io->io_free) {
+ efc_log_debug(io->efct, "completion for non-busy io tag 0x%x\n",
+ io->tag);
+ return;
+ }
+
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+ kref_put(&io->ref, io->release);
+}
diff --git a/drivers/scsi/elx/efct/efct_scsi.h b/drivers/scsi/elx/efct/efct_scsi.h
new file mode 100644
index 000000000000..b04faffa3984
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_scsi.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_SCSI_H__)
+#define __EFCT_SCSI_H__
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+/* efct_scsi_rcv_cmd() efct_scsi_rcv_tmf() flags */
+#define EFCT_SCSI_CMD_DIR_IN (1 << 0)
+#define EFCT_SCSI_CMD_DIR_OUT (1 << 1)
+#define EFCT_SCSI_CMD_SIMPLE (1 << 2)
+#define EFCT_SCSI_CMD_HEAD_OF_QUEUE (1 << 3)
+#define EFCT_SCSI_CMD_ORDERED (1 << 4)
+#define EFCT_SCSI_CMD_UNTAGGED (1 << 5)
+#define EFCT_SCSI_CMD_ACA (1 << 6)
+#define EFCT_SCSI_FIRST_BURST_ERR (1 << 7)
+#define EFCT_SCSI_FIRST_BURST_ABORTED (1 << 8)
+
+/* efct_scsi_send_rd_data/recv_wr_data/send_resp flags */
+#define EFCT_SCSI_LAST_DATAPHASE (1 << 0)
+#define EFCT_SCSI_NO_AUTO_RESPONSE (1 << 1)
+#define EFCT_SCSI_LOW_LATENCY (1 << 2)
+
+#define EFCT_SCSI_SNS_BUF_VALID(sense) ((sense) && \
+ (0x70 == (((const u8 *)(sense))[0] & 0x70)))
+
+#define EFCT_SCSI_WQ_STEERING_SHIFT 16
+#define EFCT_SCSI_WQ_STEERING_MASK (0xf << EFCT_SCSI_WQ_STEERING_SHIFT)
+#define EFCT_SCSI_WQ_STEERING_CLASS (0 << EFCT_SCSI_WQ_STEERING_SHIFT)
+#define EFCT_SCSI_WQ_STEERING_REQUEST (1 << EFCT_SCSI_WQ_STEERING_SHIFT)
+#define EFCT_SCSI_WQ_STEERING_CPU (2 << EFCT_SCSI_WQ_STEERING_SHIFT)
+
+#define EFCT_SCSI_WQ_CLASS_SHIFT (20)
+#define EFCT_SCSI_WQ_CLASS_MASK (0xf << EFCT_SCSI_WQ_CLASS_SHIFT)
+#define EFCT_SCSI_WQ_CLASS(x) ((x & EFCT_SCSI_WQ_CLASS_MASK) << \
+ EFCT_SCSI_WQ_CLASS_SHIFT)
+
+#define EFCT_SCSI_WQ_CLASS_LOW_LATENCY 1
+
+struct efct_scsi_cmd_resp {
+ u8 scsi_status;
+ u16 scsi_status_qualifier;
+ u8 *response_data;
+ u32 response_data_length;
+ u8 *sense_data;
+ u32 sense_data_length;
+ int residual;
+ u32 response_wire_length;
+};
+
+struct efct_vport {
+ struct efct *efct;
+ bool is_vport;
+ struct fc_host_statistics fc_host_stats;
+ struct Scsi_Host *shost;
+ struct fc_vport *fc_vport;
+ u64 npiv_wwpn;
+ u64 npiv_wwnn;
+};
+
+/* Status values returned by IO callbacks */
+enum efct_scsi_io_status {
+ EFCT_SCSI_STATUS_GOOD = 0,
+ EFCT_SCSI_STATUS_ABORTED,
+ EFCT_SCSI_STATUS_ERROR,
+ EFCT_SCSI_STATUS_DIF_GUARD_ERR,
+ EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR,
+ EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR,
+ EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR,
+ EFCT_SCSI_STATUS_PROTOCOL_CRC_ERROR,
+ EFCT_SCSI_STATUS_NO_IO,
+ EFCT_SCSI_STATUS_ABORT_IN_PROGRESS,
+ EFCT_SCSI_STATUS_CHECK_RESPONSE,
+ EFCT_SCSI_STATUS_COMMAND_TIMEOUT,
+ EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED,
+ EFCT_SCSI_STATUS_SHUTDOWN,
+ EFCT_SCSI_STATUS_NEXUS_LOST,
+};
+
+struct efct_node;
+struct efct_io;
+struct efc_node;
+struct efc_nport;
+
+/* Callback used by send_rd_data(), recv_wr_data(), send_resp() */
+typedef int (*efct_scsi_io_cb_t)(struct efct_io *io,
+ enum efct_scsi_io_status status,
+ u32 flags, void *arg);
+
+/* Callback used by send_rd_io(), send_wr_io() */
+typedef int (*efct_scsi_rsp_io_cb_t)(struct efct_io *io,
+ enum efct_scsi_io_status status,
+ struct efct_scsi_cmd_resp *rsp,
+ u32 flags, void *arg);
+
+/* efct_scsi_cb_t flags */
+#define EFCT_SCSI_IO_CMPL (1 << 0)
+/* IO completed, response sent */
+#define EFCT_SCSI_IO_CMPL_RSP_SENT (1 << 1)
+#define EFCT_SCSI_IO_ABORTED (1 << 2)
+
+/* efct_scsi_recv_tmf() request values */
+enum efct_scsi_tmf_cmd {
+ EFCT_SCSI_TMF_ABORT_TASK = 1,
+ EFCT_SCSI_TMF_QUERY_TASK_SET,
+ EFCT_SCSI_TMF_ABORT_TASK_SET,
+ EFCT_SCSI_TMF_CLEAR_TASK_SET,
+ EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT,
+ EFCT_SCSI_TMF_LOGICAL_UNIT_RESET,
+ EFCT_SCSI_TMF_CLEAR_ACA,
+ EFCT_SCSI_TMF_TARGET_RESET,
+};
+
+/* efct_scsi_send_tmf_resp() response values */
+enum efct_scsi_tmf_resp {
+ EFCT_SCSI_TMF_FUNCTION_COMPLETE = 1,
+ EFCT_SCSI_TMF_FUNCTION_SUCCEEDED,
+ EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND,
+ EFCT_SCSI_TMF_FUNCTION_REJECTED,
+ EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER,
+ EFCT_SCSI_TMF_SERVICE_DELIVERY,
+};
+
+struct efct_scsi_sgl {
+ uintptr_t addr;
+ uintptr_t dif_addr;
+ size_t len;
+};
+
+enum efct_scsi_io_role {
+ EFCT_SCSI_IO_ROLE_ORIGINATOR,
+ EFCT_SCSI_IO_ROLE_RESPONDER,
+};
+
+struct efct_io *
+efct_scsi_io_alloc(struct efct_node *node);
+void efct_scsi_io_free(struct efct_io *io);
+struct efct_io *efct_io_get_instance(struct efct *efct, u32 index);
+
+int efct_scsi_tgt_driver_init(void);
+int efct_scsi_tgt_driver_exit(void);
+int efct_scsi_tgt_new_device(struct efct *efct);
+int efct_scsi_tgt_del_device(struct efct *efct);
+int
+efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport);
+void
+efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport);
+
+int
+efct_scsi_new_initiator(struct efc *efc, struct efc_node *node);
+
+enum efct_scsi_del_initiator_reason {
+ EFCT_SCSI_INITIATOR_DELETED,
+ EFCT_SCSI_INITIATOR_MISSING,
+};
+
+int
+efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason);
+void
+efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb, u32 cdb_len,
+ u32 flags);
+int
+efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
+ struct efct_io *abortio, u32 flags);
+int
+efct_scsi_send_rd_data(struct efct_io *io, u32 flags, struct efct_scsi_sgl *sgl,
+ u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_recv_wr_data(struct efct_io *io, u32 flags, struct efct_scsi_sgl *sgl,
+ u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_send_resp(struct efct_io *io, u32 flags,
+ struct efct_scsi_cmd_resp *rsp, efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_send_tmf_resp(struct efct_io *io, enum efct_scsi_tmf_resp rspcode,
+ u8 addl_rsp_info[3], efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg);
+
+void efct_scsi_io_complete(struct efct_io *io);
+
+int efct_scsi_reg_fc_transport(void);
+void efct_scsi_release_fc_transport(void);
+int efct_scsi_new_device(struct efct *efct);
+void efct_scsi_del_device(struct efct *efct);
+void _efct_scsi_io_free(struct kref *arg);
+
+int
+efct_scsi_del_vport(struct efct *efct, struct Scsi_Host *shost);
+struct efct_vport *
+efct_scsi_new_vport(struct efct *efct, struct device *dev);
+
+int efct_scsi_io_dispatch(struct efct_io *io, void *cb);
+int efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb);
+void efct_scsi_check_pending(struct efct *efct);
+struct efct_io *
+efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr);
+
+#endif /* __EFCT_SCSI_H__ */
diff --git a/drivers/scsi/elx/efct/efct_unsol.c b/drivers/scsi/elx/efct/efct_unsol.c
new file mode 100644
index 000000000000..e6addab66a60
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_unsol.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_unsol.h"
+
+#define frame_printf(efct, hdr, fmt, ...) \
+ do { \
+ char s_id_text[16]; \
+ efc_node_fcid_display(ntoh24((hdr)->fh_s_id), \
+ s_id_text, sizeof(s_id_text)); \
+ efc_log_debug(efct, "[%06x.%s] %02x/%04x/%04x: " fmt, \
+ ntoh24((hdr)->fh_d_id), s_id_text, \
+ (hdr)->fh_r_ctl, be16_to_cpu((hdr)->fh_ox_id), \
+ be16_to_cpu((hdr)->fh_rx_id), ##__VA_ARGS__); \
+ } while (0)
+
+static struct efct_node *
+efct_node_find(struct efct *efct, u32 port_id, u32 node_id)
+{
+ struct efct_node *node;
+ u64 id = (u64)port_id << 32 | node_id;
+
+ /*
+ * During node shutdown, Lookup will be removed first,
+ * before announcing to backend. So, no new IOs will be allowed
+ */
+ /* Find a target node, given s_id and d_id */
+ node = xa_load(&efct->lookup, id);
+ if (node)
+ kref_get(&node->ref);
+
+ return node;
+}
+
+static int
+efct_dispatch_frame(struct efct *efct, struct efc_hw_sequence *seq)
+{
+ struct efct_node *node;
+ struct fc_frame_header *hdr;
+ u32 s_id, d_id;
+
+ hdr = seq->header->dma.virt;
+
+ /* extract the s_id and d_id */
+ s_id = ntoh24(hdr->fh_s_id);
+ d_id = ntoh24(hdr->fh_d_id);
+
+ if (!(hdr->fh_type == FC_TYPE_FCP || hdr->fh_type == FC_TYPE_BLS))
+ return -EIO;
+
+ if (hdr->fh_type == FC_TYPE_FCP) {
+ node = efct_node_find(efct, d_id, s_id);
+ if (!node) {
+ efc_log_err(efct,
+ "Node not found, drop cmd d_id:%x s_id:%x\n",
+ d_id, s_id);
+ efct_hw_sequence_free(&efct->hw, seq);
+ return 0;
+ }
+
+ efct_dispatch_fcp_cmd(node, seq);
+ } else {
+ node = efct_node_find(efct, d_id, s_id);
+ if (!node) {
+ efc_log_err(efct, "ABTS: Node not found, d_id:%x s_id:%x\n",
+ d_id, s_id);
+ return -EIO;
+ }
+
+ efc_log_err(efct, "Received ABTS for Node:%p\n", node);
+ efct_node_recv_abts_frame(node, seq);
+ }
+
+ kref_put(&node->ref, node->release);
+ efct_hw_sequence_free(&efct->hw, seq);
+ return 0;
+}
+
+int
+efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = arg;
+
+ /* Process FCP command */
+ if (!efct_dispatch_frame(efct, seq))
+ return 0;
+
+ /* Forward frame to discovery lib */
+ efc_dispatch_frame(efct->efcport, seq);
+ return 0;
+}
+
+static int
+efct_fc_tmf_rejected_cb(struct efct_io *io,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_scsi_io_free(io);
+ return 0;
+}
+
+static void
+efct_dispatch_unsol_tmf(struct efct_io *io, u8 tm_flags, u32 lun)
+{
+ u32 i;
+ struct {
+ u32 mask;
+ enum efct_scsi_tmf_cmd cmd;
+ } tmflist[] = {
+ {FCP_TMF_ABT_TASK_SET, EFCT_SCSI_TMF_ABORT_TASK_SET},
+ {FCP_TMF_CLR_TASK_SET, EFCT_SCSI_TMF_CLEAR_TASK_SET},
+ {FCP_TMF_LUN_RESET, EFCT_SCSI_TMF_LOGICAL_UNIT_RESET},
+ {FCP_TMF_TGT_RESET, EFCT_SCSI_TMF_TARGET_RESET},
+ {FCP_TMF_CLR_ACA, EFCT_SCSI_TMF_CLEAR_ACA} };
+
+ io->exp_xfer_len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(tmflist); i++) {
+ if (tmflist[i].mask & tm_flags) {
+ io->tmf_cmd = tmflist[i].cmd;
+ efct_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(tmflist)) {
+ /* Not handled */
+ efc_log_err(io->node->efct, "TMF x%x rejected\n", tm_flags);
+ efct_scsi_send_tmf_resp(io, EFCT_SCSI_TMF_FUNCTION_REJECTED,
+ NULL, efct_fc_tmf_rejected_cb, NULL);
+ }
+}
+
+static int
+efct_validate_fcp_cmd(struct efct *efct, struct efc_hw_sequence *seq)
+{
+ /*
+ * If we received less than FCP_CMND_IU bytes, assume that the frame is
+ * corrupted in some way and drop it.
+ * This was seen when jamming the FCTL
+ * fill bytes field.
+ */
+ if (seq->payload->dma.len < sizeof(struct fcp_cmnd)) {
+ struct fc_frame_header *fchdr = seq->header->dma.virt;
+
+ efc_log_debug(efct,
+ "drop ox_id %04x payload (%zd) less than (%zd)\n",
+ be16_to_cpu(fchdr->fh_ox_id),
+ seq->payload->dma.len, sizeof(struct fcp_cmnd));
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
+efct_populate_io_fcp_cmd(struct efct_io *io, struct fcp_cmnd *cmnd,
+ struct fc_frame_header *fchdr, bool sit)
+{
+ io->init_task_tag = be16_to_cpu(fchdr->fh_ox_id);
+ /* note, tgt_task_tag, hw_tag set when HW io is allocated */
+ io->exp_xfer_len = be32_to_cpu(cmnd->fc_dl);
+ io->transferred = 0;
+
+ /* The upper 7 bits of CS_CTL is the frame priority thru the SAN.
+ * Our assertion here is, the priority given to a frame containing
+ * the FCP cmd should be the priority given to ALL frames contained
+ * in that IO. Thus we need to save the incoming CS_CTL here.
+ */
+ if (ntoh24(fchdr->fh_f_ctl) & FC_FC_RES_B17)
+ io->cs_ctl = fchdr->fh_cs_ctl;
+ else
+ io->cs_ctl = 0;
+
+ io->seq_init = sit;
+}
+
+static u32
+efct_get_flags_fcp_cmd(struct fcp_cmnd *cmnd)
+{
+ u32 flags = 0;
+
+ switch (cmnd->fc_pri_ta & FCP_PTA_MASK) {
+ case FCP_PTA_SIMPLE:
+ flags |= EFCT_SCSI_CMD_SIMPLE;
+ break;
+ case FCP_PTA_HEADQ:
+ flags |= EFCT_SCSI_CMD_HEAD_OF_QUEUE;
+ break;
+ case FCP_PTA_ORDERED:
+ flags |= EFCT_SCSI_CMD_ORDERED;
+ break;
+ case FCP_PTA_ACA:
+ flags |= EFCT_SCSI_CMD_ACA;
+ break;
+ }
+ if (cmnd->fc_flags & FCP_CFL_WRDATA)
+ flags |= EFCT_SCSI_CMD_DIR_IN;
+ if (cmnd->fc_flags & FCP_CFL_RDDATA)
+ flags |= EFCT_SCSI_CMD_DIR_OUT;
+
+ return flags;
+}
+
+static void
+efct_sframe_common_send_cb(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_send_frame_context *ctx = arg;
+ struct efct_hw *hw = ctx->hw;
+
+ /* Free WQ completion callback */
+ efct_hw_reqtag_free(hw, ctx->wqcb);
+
+ /* Free sequence */
+ efct_hw_sequence_free(hw, ctx->seq);
+}
+
+static int
+efct_sframe_common_send(struct efct_node *node,
+ struct efc_hw_sequence *seq,
+ enum fc_rctl r_ctl, u32 f_ctl,
+ u8 type, void *payload, u32 payload_len)
+{
+ struct efct *efct = node->efct;
+ struct efct_hw *hw = &efct->hw;
+ int rc = 0;
+ struct fc_frame_header *req_hdr = seq->header->dma.virt;
+ struct fc_frame_header hdr;
+ struct efct_hw_send_frame_context *ctx;
+
+ u32 heap_size = seq->payload->dma.size;
+ uintptr_t heap_phys_base = seq->payload->dma.phys;
+ u8 *heap_virt_base = seq->payload->dma.virt;
+ u32 heap_offset = 0;
+
+ /* Build the FC header reusing the RQ header DMA buffer */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.fh_r_ctl = r_ctl;
+ /* send it back to whomever sent it to us */
+ memcpy(hdr.fh_d_id, req_hdr->fh_s_id, sizeof(hdr.fh_d_id));
+ memcpy(hdr.fh_s_id, req_hdr->fh_d_id, sizeof(hdr.fh_s_id));
+ hdr.fh_type = type;
+ hton24(hdr.fh_f_ctl, f_ctl);
+ hdr.fh_ox_id = req_hdr->fh_ox_id;
+ hdr.fh_rx_id = req_hdr->fh_rx_id;
+ hdr.fh_cs_ctl = 0;
+ hdr.fh_df_ctl = 0;
+ hdr.fh_seq_cnt = 0;
+ hdr.fh_parm_offset = 0;
+
+ /*
+ * send_frame_seq_id is an atomic, we just let it increment,
+ * while storing only the low 8 bits to hdr->seq_id
+ */
+ hdr.fh_seq_id = (u8)atomic_add_return(1, &hw->send_frame_seq_id);
+ hdr.fh_seq_id--;
+
+ /* Allocate and fill in the send frame request context */
+ ctx = (void *)(heap_virt_base + heap_offset);
+ heap_offset += sizeof(*ctx);
+ if (heap_offset > heap_size) {
+ efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
+ heap_offset, heap_size);
+ return -EIO;
+ }
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ /* Save sequence */
+ ctx->seq = seq;
+
+ /* Allocate a response payload DMA buffer from the heap */
+ ctx->payload.phys = heap_phys_base + heap_offset;
+ ctx->payload.virt = heap_virt_base + heap_offset;
+ ctx->payload.size = payload_len;
+ ctx->payload.len = payload_len;
+ heap_offset += payload_len;
+ if (heap_offset > heap_size) {
+ efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
+ heap_offset, heap_size);
+ return -EIO;
+ }
+
+ /* Copy the payload in */
+ memcpy(ctx->payload.virt, payload, payload_len);
+
+ /* Send */
+ rc = efct_hw_send_frame(&efct->hw, (void *)&hdr, FC_SOF_N3,
+ FC_EOF_T, &ctx->payload, ctx,
+ efct_sframe_common_send_cb, ctx);
+ if (rc)
+ efc_log_debug(efct, "efct_hw_send_frame failed: %d\n", rc);
+
+ return rc;
+}
+
+static int
+efct_sframe_send_fcp_rsp(struct efct_node *node, struct efc_hw_sequence *seq,
+ void *rsp, u32 rsp_len)
+{
+ return efct_sframe_common_send(node, seq, FC_RCTL_DD_CMD_STATUS,
+ FC_FC_EX_CTX |
+ FC_FC_LAST_SEQ |
+ FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT,
+ FC_TYPE_FCP,
+ rsp, rsp_len);
+}
+
+static int
+efct_sframe_send_task_set_full_or_busy(struct efct_node *node,
+ struct efc_hw_sequence *seq)
+{
+ struct fcp_resp_with_ext fcprsp;
+ struct fcp_cmnd *fcpcmd = seq->payload->dma.virt;
+ int rc = 0;
+ unsigned long flags = 0;
+ struct efct *efct = node->efct;
+
+ /* construct task set full or busy response */
+ memset(&fcprsp, 0, sizeof(fcprsp));
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ fcprsp.resp.fr_status = list_empty(&node->active_ios) ?
+ SAM_STAT_BUSY : SAM_STAT_TASK_SET_FULL;
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+ *((u32 *)&fcprsp.ext.fr_resid) = be32_to_cpu(fcpcmd->fc_dl);
+
+ /* send it using send_frame */
+ rc = efct_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp));
+ if (rc)
+ efc_log_debug(efct, "efct_sframe_send_fcp_rsp failed %d\n", rc);
+
+ return rc;
+}
+
+int
+efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = node->efct;
+ struct fc_frame_header *fchdr = seq->header->dma.virt;
+ struct fcp_cmnd *cmnd = NULL;
+ struct efct_io *io = NULL;
+ u32 lun;
+
+ if (!seq->payload) {
+ efc_log_err(efct, "Sequence payload is NULL.\n");
+ return -EIO;
+ }
+
+ cmnd = seq->payload->dma.virt;
+
+ /* perform FCP_CMND validation check(s) */
+ if (efct_validate_fcp_cmd(efct, seq))
+ return -EIO;
+
+ lun = scsilun_to_int(&cmnd->fc_lun);
+ if (lun == U32_MAX)
+ return -EIO;
+
+ io = efct_scsi_io_alloc(node);
+ if (!io) {
+ int rc;
+
+ /* Use SEND_FRAME to send task set full or busy */
+ rc = efct_sframe_send_task_set_full_or_busy(node, seq);
+ if (rc)
+ efc_log_err(efct, "Failed to send busy task: %d\n", rc);
+
+ return rc;
+ }
+
+ io->hw_priv = seq->hw_priv;
+
+ io->app_id = 0;
+
+ /* RQ pair, if we got here, SIT=1 */
+ efct_populate_io_fcp_cmd(io, cmnd, fchdr, true);
+
+ if (cmnd->fc_tm_flags) {
+ efct_dispatch_unsol_tmf(io, cmnd->fc_tm_flags, lun);
+ } else {
+ u32 flags = efct_get_flags_fcp_cmd(cmnd);
+
+ if (cmnd->fc_flags & FCP_CFL_LEN_MASK) {
+ efc_log_err(efct, "Additional CDB not supported\n");
+ return -EIO;
+ }
+ /*
+ * Can return failure for things like task set full and UAs,
+ * no need to treat as a dropped frame if rc != 0
+ */
+ efct_scsi_recv_cmd(io, lun, cmnd->fc_cdb,
+ sizeof(cmnd->fc_cdb), flags);
+ }
+
+ return 0;
+}
+
+static int
+efct_process_abts(struct efct_io *io, struct fc_frame_header *hdr)
+{
+ struct efct_node *node = io->node;
+ struct efct *efct = io->efct;
+ u16 ox_id = be16_to_cpu(hdr->fh_ox_id);
+ u16 rx_id = be16_to_cpu(hdr->fh_rx_id);
+ struct efct_io *abortio;
+
+ /* Find IO and attempt to take a reference on it */
+ abortio = efct_io_find_tgt_io(efct, node, ox_id, rx_id);
+
+ if (abortio) {
+ /* Got a reference on the IO. Hold it until backend
+ * is notified below
+ */
+ efc_log_info(node->efct, "Abort ox_id [%04x] rx_id [%04x]\n",
+ ox_id, rx_id);
+
+ /*
+ * Save the ox_id for the ABTS as the init_task_tag in our
+ * manufactured
+ * TMF IO object
+ */
+ io->display_name = "abts";
+ io->init_task_tag = ox_id;
+ /* don't set tgt_task_tag, don't want to confuse with XRI */
+
+ /*
+ * Save the rx_id from the ABTS as it is
+ * needed for the BLS response,
+ * regardless of the IO context's rx_id
+ */
+ io->abort_rx_id = rx_id;
+
+ /* Call target server command abort */
+ io->tmf_cmd = EFCT_SCSI_TMF_ABORT_TASK;
+ efct_scsi_recv_tmf(io, abortio->tgt_io.lun,
+ EFCT_SCSI_TMF_ABORT_TASK, abortio, 0);
+
+ /*
+ * Backend will have taken an additional
+ * reference on the IO if needed;
+ * done with current reference.
+ */
+ kref_put(&abortio->ref, abortio->release);
+ } else {
+ /*
+ * Either IO was not found or it has been
+ * freed between finding it
+ * and attempting to get the reference,
+ */
+ efc_log_info(node->efct, "Abort: ox_id [%04x], IO not found\n",
+ ox_id);
+
+ /* Send a BA_RJT */
+ efct_bls_send_rjt(io, hdr);
+ }
+ return 0;
+}
+
+int
+efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = node->efct;
+ struct fc_frame_header *hdr = seq->header->dma.virt;
+ struct efct_io *io = NULL;
+
+ node->abort_cnt++;
+ io = efct_scsi_io_alloc(node);
+ if (io) {
+ io->hw_priv = seq->hw_priv;
+ /* If we got this far, SIT=1 */
+ io->seq_init = 1;
+
+ /* fill out generic fields */
+ io->efct = efct;
+ io->node = node;
+ io->cmd_tgt = true;
+
+ efct_process_abts(io, seq->header->dma.virt);
+ } else {
+ efc_log_err(efct,
+ "SCSI IO allocation failed for ABTS received ");
+ efc_log_err(efct, "s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
+ ntoh24(hdr->fh_s_id), ntoh24(hdr->fh_d_id),
+ be16_to_cpu(hdr->fh_ox_id),
+ be16_to_cpu(hdr->fh_rx_id));
+ }
+
+ return 0;
+}
diff --git a/drivers/scsi/elx/efct/efct_unsol.h b/drivers/scsi/elx/efct/efct_unsol.h
new file mode 100644
index 000000000000..16d1e3ba1833
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_unsol.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__OSC_UNSOL_H__)
+#define __OSC_UNSOL_H__
+
+int
+efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq);
+int
+efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq);
+int
+efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq);
+
+#endif /* __OSC_UNSOL_H__ */
diff --git a/drivers/scsi/elx/efct/efct_xport.c b/drivers/scsi/elx/efct/efct_xport.c
new file mode 100644
index 000000000000..9495cedcc0b9
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_xport.c
@@ -0,0 +1,1111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_unsol.h"
+
+static struct dentry *efct_debugfs_root;
+static atomic_t efct_debugfs_count;
+
+static struct scsi_host_template efct_template = {
+ .module = THIS_MODULE,
+ .name = EFCT_DRIVER_NAME,
+ .supported_mode = MODE_TARGET,
+};
+
+/* globals */
+static struct fc_function_template efct_xport_functions;
+static struct fc_function_template efct_vport_functions;
+
+static struct scsi_transport_template *efct_xport_fc_tt;
+static struct scsi_transport_template *efct_vport_fc_tt;
+
+struct efct_xport *
+efct_xport_alloc(struct efct *efct)
+{
+ struct efct_xport *xport;
+
+ xport = kzalloc(sizeof(*xport), GFP_KERNEL);
+ if (!xport)
+ return xport;
+
+ xport->efct = efct;
+ return xport;
+}
+
+static int
+efct_xport_init_debugfs(struct efct *efct)
+{
+ /* Setup efct debugfs root directory */
+ if (!efct_debugfs_root) {
+ efct_debugfs_root = debugfs_create_dir("efct", NULL);
+ atomic_set(&efct_debugfs_count, 0);
+ }
+
+ /* Create a directory for sessions in root */
+ if (!efct->sess_debugfs_dir) {
+ efct->sess_debugfs_dir = debugfs_create_dir("sessions",
+ efct_debugfs_root);
+ if (IS_ERR(efct->sess_debugfs_dir)) {
+ efc_log_err(efct,
+ "failed to create debugfs entry for sessions\n");
+ goto debugfs_fail;
+ }
+ atomic_inc(&efct_debugfs_count);
+ }
+
+ return 0;
+
+debugfs_fail:
+ return -EIO;
+}
+
+static void efct_xport_delete_debugfs(struct efct *efct)
+{
+ /* Remove session debugfs directory */
+ debugfs_remove(efct->sess_debugfs_dir);
+ efct->sess_debugfs_dir = NULL;
+ atomic_dec(&efct_debugfs_count);
+
+ if (atomic_read(&efct_debugfs_count) == 0) {
+ /* remove root debugfs directory */
+ debugfs_remove(efct_debugfs_root);
+ efct_debugfs_root = NULL;
+ }
+}
+
+int
+efct_xport_attach(struct efct_xport *xport)
+{
+ struct efct *efct = xport->efct;
+ int rc;
+
+ rc = efct_hw_setup(&efct->hw, efct, efct->pci);
+ if (rc) {
+ efc_log_err(efct, "%s: Can't setup hardware\n", efct->desc);
+ return rc;
+ }
+
+ efct_hw_parse_filter(&efct->hw, (void *)efct->filter_def);
+
+ xport->io_pool = efct_io_pool_create(efct, efct->hw.config.n_sgl);
+ if (!xport->io_pool) {
+ efc_log_err(efct, "Can't allocate IO pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+efct_xport_link_stats_cb(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters, void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.link_stats.link_failure_error_count =
+ counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter;
+ result->stats.link_stats.loss_of_sync_error_count =
+ counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter;
+ result->stats.link_stats.primitive_sequence_error_count =
+ counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter;
+ result->stats.link_stats.invalid_transmission_word_error_count =
+ counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter;
+ result->stats.link_stats.crc_error_count =
+ counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter;
+
+ complete(&result->stats.done);
+}
+
+static void
+efct_xport_host_stats_cb(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters, void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.host_stats.transmit_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter;
+ result->stats.host_stats.receive_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter;
+ result->stats.host_stats.transmit_frame_count =
+ counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter;
+ result->stats.host_stats.receive_frame_count =
+ counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter;
+
+ complete(&result->stats.done);
+}
+
+static void
+efct_xport_async_link_stats_cb(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters,
+ void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.link_stats.link_failure_error_count =
+ counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter;
+ result->stats.link_stats.loss_of_sync_error_count =
+ counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter;
+ result->stats.link_stats.primitive_sequence_error_count =
+ counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter;
+ result->stats.link_stats.invalid_transmission_word_error_count =
+ counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter;
+ result->stats.link_stats.crc_error_count =
+ counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter;
+}
+
+static void
+efct_xport_async_host_stats_cb(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters,
+ void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.host_stats.transmit_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter;
+ result->stats.host_stats.receive_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter;
+ result->stats.host_stats.transmit_frame_count =
+ counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter;
+ result->stats.host_stats.receive_frame_count =
+ counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter;
+}
+
+static void
+efct_xport_config_stats_timer(struct efct *efct);
+
+static void
+efct_xport_stats_timer_cb(struct timer_list *t)
+{
+ struct efct_xport *xport = from_timer(xport, t, stats_timer);
+ struct efct *efct = xport->efct;
+
+ efct_xport_config_stats_timer(efct);
+}
+
+static void
+efct_xport_config_stats_timer(struct efct *efct)
+{
+ u32 timeout = 3 * 1000;
+ struct efct_xport *xport = NULL;
+
+ if (!efct) {
+ pr_err("%s: failed to locate EFCT device\n", __func__);
+ return;
+ }
+
+ xport = efct->xport;
+ efct_hw_get_link_stats(&efct->hw, 0, 0, 0,
+ efct_xport_async_link_stats_cb,
+ &xport->fc_xport_stats);
+ efct_hw_get_host_stats(&efct->hw, 0, efct_xport_async_host_stats_cb,
+ &xport->fc_xport_stats);
+
+ timer_setup(&xport->stats_timer,
+ &efct_xport_stats_timer_cb, 0);
+ mod_timer(&xport->stats_timer,
+ jiffies + msecs_to_jiffies(timeout));
+}
+
+int
+efct_xport_initialize(struct efct_xport *xport)
+{
+ struct efct *efct = xport->efct;
+ int rc = 0;
+
+ /* Initialize io lists */
+ spin_lock_init(&xport->io_pending_lock);
+ INIT_LIST_HEAD(&xport->io_pending_list);
+ atomic_set(&xport->io_active_count, 0);
+ atomic_set(&xport->io_pending_count, 0);
+ atomic_set(&xport->io_total_free, 0);
+ atomic_set(&xport->io_total_pending, 0);
+ atomic_set(&xport->io_alloc_failed_count, 0);
+ atomic_set(&xport->io_pending_recursing, 0);
+
+ rc = efct_hw_init(&efct->hw);
+ if (rc) {
+ efc_log_err(efct, "efct_hw_init failure\n");
+ goto out;
+ }
+
+ rc = efct_scsi_tgt_new_device(efct);
+ if (rc) {
+ efc_log_err(efct, "failed to initialize target\n");
+ goto hw_init_out;
+ }
+
+ rc = efct_scsi_new_device(efct);
+ if (rc) {
+ efc_log_err(efct, "failed to initialize initiator\n");
+ goto tgt_dev_out;
+ }
+
+ /* Get FC link and host statistics perodically*/
+ efct_xport_config_stats_timer(efct);
+
+ efct_xport_init_debugfs(efct);
+
+ return rc;
+
+tgt_dev_out:
+ efct_scsi_tgt_del_device(efct);
+
+hw_init_out:
+ efct_hw_teardown(&efct->hw);
+out:
+ return rc;
+}
+
+int
+efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd,
+ union efct_xport_stats_u *result)
+{
+ int rc = 0;
+ struct efct *efct = NULL;
+ union efct_xport_stats_u value;
+
+ efct = xport->efct;
+
+ switch (cmd) {
+ case EFCT_XPORT_CONFIG_PORT_STATUS:
+ if (xport->configured_link_state == 0) {
+ /*
+ * Initial state is offline. configured_link_state is
+ * set to online explicitly when port is brought online
+ */
+ xport->configured_link_state = EFCT_XPORT_PORT_OFFLINE;
+ }
+ result->value = xport->configured_link_state;
+ break;
+
+ case EFCT_XPORT_PORT_STATUS:
+ /* Determine port status based on link speed. */
+ value.value = efct_hw_get_link_speed(&efct->hw);
+ if (value.value == 0)
+ result->value = EFCT_XPORT_PORT_OFFLINE;
+ else
+ result->value = EFCT_XPORT_PORT_ONLINE;
+ break;
+
+ case EFCT_XPORT_LINK_SPEED:
+ result->value = efct_hw_get_link_speed(&efct->hw);
+ break;
+
+ case EFCT_XPORT_LINK_STATISTICS:
+ memcpy((void *)result, &efct->xport->fc_xport_stats,
+ sizeof(union efct_xport_stats_u));
+ break;
+ case EFCT_XPORT_LINK_STAT_RESET: {
+ /* Create a completion to synchronize the stat reset process */
+ init_completion(&result->stats.done);
+
+ /* First reset the link stats */
+ rc = efct_hw_get_link_stats(&efct->hw, 0, 1, 1,
+ efct_xport_link_stats_cb, result);
+ if (rc)
+ break;
+
+ /* Wait for completion to be signaled when the cmd completes */
+ if (wait_for_completion_interruptible(&result->stats.done)) {
+ /* Undefined failure */
+ efc_log_debug(efct, "sem wait failed\n");
+ rc = -EIO;
+ break;
+ }
+
+ /* Next reset the host stats */
+ rc = efct_hw_get_host_stats(&efct->hw, 1,
+ efct_xport_host_stats_cb, result);
+
+ if (rc)
+ break;
+
+ /* Wait for completion to be signaled when the cmd completes */
+ if (wait_for_completion_interruptible(&result->stats.done)) {
+ /* Undefined failure */
+ efc_log_debug(efct, "sem wait failed\n");
+ rc = -EIO;
+ break;
+ }
+ break;
+ }
+ default:
+ rc = -EIO;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+efct_get_link_supported_speeds(struct efct *efct)
+{
+ u32 supported_speeds = 0;
+ u32 link_module_type, i;
+ struct {
+ u32 lmt_speed;
+ u32 speed;
+ } supported_speed_list[] = {
+ {SLI4_LINK_MODULE_TYPE_1GB, FC_PORTSPEED_1GBIT},
+ {SLI4_LINK_MODULE_TYPE_2GB, FC_PORTSPEED_2GBIT},
+ {SLI4_LINK_MODULE_TYPE_4GB, FC_PORTSPEED_4GBIT},
+ {SLI4_LINK_MODULE_TYPE_8GB, FC_PORTSPEED_8GBIT},
+ {SLI4_LINK_MODULE_TYPE_16GB, FC_PORTSPEED_16GBIT},
+ {SLI4_LINK_MODULE_TYPE_32GB, FC_PORTSPEED_32GBIT},
+ {SLI4_LINK_MODULE_TYPE_64GB, FC_PORTSPEED_64GBIT},
+ {SLI4_LINK_MODULE_TYPE_128GB, FC_PORTSPEED_128GBIT},
+ };
+
+ link_module_type = sli_get_lmt(&efct->hw.sli);
+
+ /* populate link supported speeds */
+ for (i = 0; i < ARRAY_SIZE(supported_speed_list); i++) {
+ if (link_module_type & supported_speed_list[i].lmt_speed)
+ supported_speeds |= supported_speed_list[i].speed;
+ }
+
+ return supported_speeds;
+}
+
+int
+efct_scsi_new_device(struct efct *efct)
+{
+ struct Scsi_Host *shost = NULL;
+ int error = 0;
+ struct efct_vport *vport = NULL;
+
+ shost = scsi_host_alloc(&efct_template, sizeof(*vport));
+ if (!shost) {
+ efc_log_err(efct, "failed to allocate Scsi_Host struct\n");
+ return -ENOMEM;
+ }
+
+ /* save shost to initiator-client context */
+ efct->shost = shost;
+
+ /* save efct information to shost LLD-specific space */
+ vport = (struct efct_vport *)shost->hostdata;
+ vport->efct = efct;
+
+ /*
+ * Set initial can_queue value to the max SCSI IOs. This is the maximum
+ * global queue depth (as opposed to the per-LUN queue depth --
+ * .cmd_per_lun This may need to be adjusted for I+T mode.
+ */
+ shost->can_queue = efct->hw.config.n_io;
+ shost->max_cmd_len = 16; /* 16-byte CDBs */
+ shost->max_id = 0xffff;
+ shost->max_lun = 0xffffffff;
+
+ /*
+ * can only accept (from mid-layer) as many SGEs as we've
+ * pre-registered
+ */
+ shost->sg_tablesize = sli_get_max_sgl(&efct->hw.sli);
+
+ /* attach FC Transport template to shost */
+ shost->transportt = efct_xport_fc_tt;
+ efc_log_debug(efct, "transport template=%p\n", efct_xport_fc_tt);
+
+ /* get pci_dev structure and add host to SCSI ML */
+ error = scsi_add_host_with_dma(shost, &efct->pci->dev,
+ &efct->pci->dev);
+ if (error) {
+ efc_log_debug(efct, "failed scsi_add_host_with_dma\n");
+ return -EIO;
+ }
+
+ /* Set symbolic name for host port */
+ snprintf(fc_host_symbolic_name(shost),
+ sizeof(fc_host_symbolic_name(shost)),
+ "Emulex %s FV%s DV%s", efct->model,
+ efct->hw.sli.fw_name[0], EFCT_DRIVER_VERSION);
+
+ /* Set host port supported classes */
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+
+ fc_host_supported_speeds(shost) = efct_get_link_supported_speeds(efct);
+
+ fc_host_node_name(shost) = efct_get_wwnn(&efct->hw);
+ fc_host_port_name(shost) = efct_get_wwpn(&efct->hw);
+ fc_host_max_npiv_vports(shost) = 128;
+
+ return 0;
+}
+
+struct scsi_transport_template *
+efct_attach_fc_transport(void)
+{
+ struct scsi_transport_template *efct_fc_template = NULL;
+
+ efct_fc_template = fc_attach_transport(&efct_xport_functions);
+
+ if (!efct_fc_template)
+ pr_err("failed to attach EFCT with fc transport\n");
+
+ return efct_fc_template;
+}
+
+struct scsi_transport_template *
+efct_attach_vport_fc_transport(void)
+{
+ struct scsi_transport_template *efct_fc_template = NULL;
+
+ efct_fc_template = fc_attach_transport(&efct_vport_functions);
+
+ if (!efct_fc_template)
+ pr_err("failed to attach EFCT with fc transport\n");
+
+ return efct_fc_template;
+}
+
+int
+efct_scsi_reg_fc_transport(void)
+{
+ /* attach to appropriate scsi_tranport_* module */
+ efct_xport_fc_tt = efct_attach_fc_transport();
+ if (!efct_xport_fc_tt) {
+ pr_err("%s: failed to attach to scsi_transport_*", __func__);
+ return -EIO;
+ }
+
+ efct_vport_fc_tt = efct_attach_vport_fc_transport();
+ if (!efct_vport_fc_tt) {
+ pr_err("%s: failed to attach to scsi_transport_*", __func__);
+ efct_release_fc_transport(efct_xport_fc_tt);
+ efct_xport_fc_tt = NULL;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void
+efct_scsi_release_fc_transport(void)
+{
+ /* detach from scsi_transport_* */
+ efct_release_fc_transport(efct_xport_fc_tt);
+ efct_xport_fc_tt = NULL;
+ if (efct_vport_fc_tt)
+ efct_release_fc_transport(efct_vport_fc_tt);
+
+ efct_vport_fc_tt = NULL;
+}
+
+void
+efct_xport_detach(struct efct_xport *xport)
+{
+ struct efct *efct = xport->efct;
+
+ /* free resources associated with target-server and initiator-client */
+ efct_scsi_tgt_del_device(efct);
+
+ efct_scsi_del_device(efct);
+
+ /*Shutdown FC Statistics timer*/
+ if (timer_pending(&xport->stats_timer))
+ del_timer(&xport->stats_timer);
+
+ efct_hw_teardown(&efct->hw);
+
+ efct_xport_delete_debugfs(efct);
+}
+
+static void
+efct_xport_domain_free_cb(struct efc *efc, void *arg)
+{
+ struct completion *done = arg;
+
+ complete(done);
+}
+
+int
+efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...)
+{
+ u32 rc = 0;
+ struct efct *efct = NULL;
+ va_list argp;
+
+ efct = xport->efct;
+
+ switch (cmd) {
+ case EFCT_XPORT_PORT_ONLINE: {
+ /* Bring the port on-line */
+ rc = efct_hw_port_control(&efct->hw, EFCT_HW_PORT_INIT, 0,
+ NULL, NULL);
+ if (rc)
+ efc_log_err(efct,
+ "%s: Can't init port\n", efct->desc);
+ else
+ xport->configured_link_state = cmd;
+ break;
+ }
+ case EFCT_XPORT_PORT_OFFLINE: {
+ if (efct_hw_port_control(&efct->hw, EFCT_HW_PORT_SHUTDOWN, 0,
+ NULL, NULL))
+ efc_log_err(efct, "port shutdown failed\n");
+ else
+ xport->configured_link_state = cmd;
+ break;
+ }
+
+ case EFCT_XPORT_SHUTDOWN: {
+ struct completion done;
+ unsigned long timeout;
+
+ /* if a PHYSDEV reset was performed (e.g. hw dump), will affect
+ * all PCI functions; orderly shutdown won't work,
+ * just force free
+ */
+ if (sli_reset_required(&efct->hw.sli)) {
+ struct efc_domain *domain = efct->efcport->domain;
+
+ if (domain)
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST,
+ domain);
+ } else {
+ efct_hw_port_control(&efct->hw, EFCT_HW_PORT_SHUTDOWN,
+ 0, NULL, NULL);
+ }
+
+ init_completion(&done);
+
+ efc_register_domain_free_cb(efct->efcport,
+ efct_xport_domain_free_cb, &done);
+
+ efc_log_debug(efct, "Waiting %d seconds for domain shutdown\n",
+ (EFC_SHUTDOWN_TIMEOUT_USEC / 1000000));
+
+ timeout = usecs_to_jiffies(EFC_SHUTDOWN_TIMEOUT_USEC);
+ if (!wait_for_completion_timeout(&done, timeout)) {
+ efc_log_err(efct, "Domain shutdown timed out!!\n");
+ WARN_ON(1);
+ }
+
+ efc_register_domain_free_cb(efct->efcport, NULL, NULL);
+
+ /* Free up any saved virtual ports */
+ efc_vport_del_all(efct->efcport);
+ break;
+ }
+
+ /*
+ * Set wwnn for the port. This will be used instead of the default
+ * provided by FW.
+ */
+ case EFCT_XPORT_WWNN_SET: {
+ u64 wwnn;
+
+ /* Retrieve arguments */
+ va_start(argp, cmd);
+ wwnn = va_arg(argp, uint64_t);
+ va_end(argp);
+
+ efc_log_debug(efct, " WWNN %016llx\n", wwnn);
+ xport->req_wwnn = wwnn;
+
+ break;
+ }
+ /*
+ * Set wwpn for the port. This will be used instead of the default
+ * provided by FW.
+ */
+ case EFCT_XPORT_WWPN_SET: {
+ u64 wwpn;
+
+ /* Retrieve arguments */
+ va_start(argp, cmd);
+ wwpn = va_arg(argp, uint64_t);
+ va_end(argp);
+
+ efc_log_debug(efct, " WWPN %016llx\n", wwpn);
+ xport->req_wwpn = wwpn;
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ return rc;
+}
+
+void
+efct_xport_free(struct efct_xport *xport)
+{
+ if (xport) {
+ efct_io_pool_free(xport->io_pool);
+
+ kfree(xport);
+ }
+}
+
+void
+efct_release_fc_transport(struct scsi_transport_template *transport_template)
+{
+ if (transport_template)
+ pr_err("releasing transport layer\n");
+
+ /* Releasing FC transport */
+ fc_release_transport(transport_template);
+}
+
+static void
+efct_xport_remove_host(struct Scsi_Host *shost)
+{
+ fc_remove_host(shost);
+}
+
+void
+efct_scsi_del_device(struct efct *efct)
+{
+ if (!efct->shost)
+ return;
+
+ efc_log_debug(efct, "Unregistering with Transport Layer\n");
+ efct_xport_remove_host(efct->shost);
+ efc_log_debug(efct, "Unregistering with SCSI Midlayer\n");
+ scsi_remove_host(efct->shost);
+ scsi_host_put(efct->shost);
+ efct->shost = NULL;
+}
+
+static void
+efct_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+ struct efc_nport *nport;
+
+ if (efc->domain && efc->domain->nport) {
+ nport = efc->domain->nport;
+ fc_host_port_id(shost) = nport->fc_id;
+ }
+}
+
+static void
+efct_get_host_port_type(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+ int type = FC_PORTTYPE_UNKNOWN;
+
+ if (efc->domain && efc->domain->nport) {
+ if (efc->domain->is_loop) {
+ type = FC_PORTTYPE_LPORT;
+ } else {
+ struct efc_nport *nport = efc->domain->nport;
+
+ if (nport->is_vport)
+ type = FC_PORTTYPE_NPIV;
+ else if (nport->topology == EFC_NPORT_TOPO_P2P)
+ type = FC_PORTTYPE_PTP;
+ else if (nport->topology == EFC_NPORT_TOPO_UNKNOWN)
+ type = FC_PORTTYPE_UNKNOWN;
+ else
+ type = FC_PORTTYPE_NPORT;
+ }
+ }
+ fc_host_port_type(shost) = type;
+}
+
+static void
+efct_get_host_vport_type(struct Scsi_Host *shost)
+{
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+}
+
+static void
+efct_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ union efct_xport_stats_u status;
+ int rc;
+
+ rc = efct_xport_status(efct->xport, EFCT_XPORT_PORT_STATUS, &status);
+ if ((!rc) && (status.value == EFCT_XPORT_PORT_ONLINE))
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+}
+
+static void
+efct_get_host_speed(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+ union efct_xport_stats_u speed;
+ u32 fc_speed = FC_PORTSPEED_UNKNOWN;
+ int rc;
+
+ if (!efc->domain || !efc->domain->nport) {
+ fc_host_speed(shost) = fc_speed;
+ return;
+ }
+
+ rc = efct_xport_status(efct->xport, EFCT_XPORT_LINK_SPEED, &speed);
+ if (!rc) {
+ switch (speed.value) {
+ case 1000:
+ fc_speed = FC_PORTSPEED_1GBIT;
+ break;
+ case 2000:
+ fc_speed = FC_PORTSPEED_2GBIT;
+ break;
+ case 4000:
+ fc_speed = FC_PORTSPEED_4GBIT;
+ break;
+ case 8000:
+ fc_speed = FC_PORTSPEED_8GBIT;
+ break;
+ case 10000:
+ fc_speed = FC_PORTSPEED_10GBIT;
+ break;
+ case 16000:
+ fc_speed = FC_PORTSPEED_16GBIT;
+ break;
+ case 32000:
+ fc_speed = FC_PORTSPEED_32GBIT;
+ break;
+ case 64000:
+ fc_speed = FC_PORTSPEED_64GBIT;
+ break;
+ case 128000:
+ fc_speed = FC_PORTSPEED_128GBIT;
+ break;
+ }
+ }
+
+ fc_host_speed(shost) = fc_speed;
+}
+
+static void
+efct_get_host_fabric_name(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+
+ if (efc->domain) {
+ struct fc_els_flogi *sp =
+ (struct fc_els_flogi *)
+ efc->domain->flogi_service_params;
+
+ fc_host_fabric_name(shost) = be64_to_cpu(sp->fl_wwnn);
+ }
+}
+
+static struct fc_host_statistics *
+efct_get_stats(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ union efct_xport_stats_u stats;
+ struct efct_xport *xport = efct->xport;
+ int rc = 0;
+
+ rc = efct_xport_status(xport, EFCT_XPORT_LINK_STATISTICS, &stats);
+ if (rc) {
+ pr_err("efct_xport_status returned non 0 - %d\n", rc);
+ return NULL;
+ }
+
+ vport->fc_host_stats.loss_of_sync_count =
+ stats.stats.link_stats.loss_of_sync_error_count;
+ vport->fc_host_stats.link_failure_count =
+ stats.stats.link_stats.link_failure_error_count;
+ vport->fc_host_stats.prim_seq_protocol_err_count =
+ stats.stats.link_stats.primitive_sequence_error_count;
+ vport->fc_host_stats.invalid_tx_word_count =
+ stats.stats.link_stats.invalid_transmission_word_error_count;
+ vport->fc_host_stats.invalid_crc_count =
+ stats.stats.link_stats.crc_error_count;
+ /* mbox returns kbyte count so we need to convert to words */
+ vport->fc_host_stats.tx_words =
+ stats.stats.host_stats.transmit_kbyte_count * 256;
+ /* mbox returns kbyte count so we need to convert to words */
+ vport->fc_host_stats.rx_words =
+ stats.stats.host_stats.receive_kbyte_count * 256;
+ vport->fc_host_stats.tx_frames =
+ stats.stats.host_stats.transmit_frame_count;
+ vport->fc_host_stats.rx_frames =
+ stats.stats.host_stats.receive_frame_count;
+
+ vport->fc_host_stats.fcp_input_requests =
+ xport->fcp_stats.input_requests;
+ vport->fc_host_stats.fcp_output_requests =
+ xport->fcp_stats.output_requests;
+ vport->fc_host_stats.fcp_output_megabytes =
+ xport->fcp_stats.output_bytes >> 20;
+ vport->fc_host_stats.fcp_input_megabytes =
+ xport->fcp_stats.input_bytes >> 20;
+ vport->fc_host_stats.fcp_control_requests =
+ xport->fcp_stats.control_requests;
+
+ return &vport->fc_host_stats;
+}
+
+static void
+efct_reset_stats(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ /* argument has no purpose for this action */
+ union efct_xport_stats_u dummy;
+ int rc;
+
+ rc = efct_xport_status(efct->xport, EFCT_XPORT_LINK_STAT_RESET, &dummy);
+ if (rc)
+ pr_err("efct_xport_status returned non 0 - %d\n", rc);
+}
+
+static int
+efct_issue_lip(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport =
+ shost ? (struct efct_vport *)shost->hostdata : NULL;
+ struct efct *efct = vport ? vport->efct : NULL;
+
+ if (!shost || !vport || !efct) {
+ pr_err("%s: shost=%p vport=%p efct=%p\n", __func__,
+ shost, vport, efct);
+ return -EPERM;
+ }
+
+ /*
+ * Bring the link down gracefully then re-init the link.
+ * The firmware will re-initialize the Fibre Channel interface as
+ * required. It does not issue a LIP.
+ */
+
+ if (efct_xport_control(efct->xport, EFCT_XPORT_PORT_OFFLINE))
+ efc_log_debug(efct, "EFCT_XPORT_PORT_OFFLINE failed\n");
+
+ if (efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE))
+ efc_log_debug(efct, "EFCT_XPORT_PORT_ONLINE failed\n");
+
+ return 0;
+}
+
+struct efct_vport *
+efct_scsi_new_vport(struct efct *efct, struct device *dev)
+{
+ struct Scsi_Host *shost = NULL;
+ int error = 0;
+ struct efct_vport *vport = NULL;
+
+ shost = scsi_host_alloc(&efct_template, sizeof(*vport));
+ if (!shost) {
+ efc_log_err(efct, "failed to allocate Scsi_Host struct\n");
+ return NULL;
+ }
+
+ /* save efct information to shost LLD-specific space */
+ vport = (struct efct_vport *)shost->hostdata;
+ vport->efct = efct;
+ vport->is_vport = true;
+
+ shost->can_queue = efct->hw.config.n_io;
+ shost->max_cmd_len = 16; /* 16-byte CDBs */
+ shost->max_id = 0xffff;
+ shost->max_lun = 0xffffffff;
+
+ /* can only accept (from mid-layer) as many SGEs as we've pre-regited*/
+ shost->sg_tablesize = sli_get_max_sgl(&efct->hw.sli);
+
+ /* attach FC Transport template to shost */
+ shost->transportt = efct_vport_fc_tt;
+ efc_log_debug(efct, "vport transport template=%p\n",
+ efct_vport_fc_tt);
+
+ /* get pci_dev structure and add host to SCSI ML */
+ error = scsi_add_host_with_dma(shost, dev, &efct->pci->dev);
+ if (error) {
+ efc_log_debug(efct, "failed scsi_add_host_with_dma\n");
+ return NULL;
+ }
+
+ /* Set symbolic name for host port */
+ snprintf(fc_host_symbolic_name(shost),
+ sizeof(fc_host_symbolic_name(shost)),
+ "Emulex %s FV%s DV%s", efct->model, efct->hw.sli.fw_name[0],
+ EFCT_DRIVER_VERSION);
+
+ /* Set host port supported classes */
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+
+ fc_host_supported_speeds(shost) = efct_get_link_supported_speeds(efct);
+ vport->shost = shost;
+
+ return vport;
+}
+
+int efct_scsi_del_vport(struct efct *efct, struct Scsi_Host *shost)
+{
+ if (shost) {
+ efc_log_debug(efct,
+ "Unregistering vport with Transport Layer\n");
+ efct_xport_remove_host(shost);
+ efc_log_debug(efct, "Unregistering vport with SCSI Midlayer\n");
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
+ return 0;
+ }
+ return -EIO;
+}
+
+static int
+efct_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ struct Scsi_Host *shost = fc_vport ? fc_vport->shost : NULL;
+ struct efct_vport *pport = shost ?
+ (struct efct_vport *)shost->hostdata :
+ NULL;
+ struct efct *efct = pport ? pport->efct : NULL;
+ struct efct_vport *vport = NULL;
+
+ if (!fc_vport || !shost || !efct)
+ goto fail;
+
+ vport = efct_scsi_new_vport(efct, &fc_vport->dev);
+ if (!vport) {
+ efc_log_err(efct, "failed to create vport\n");
+ goto fail;
+ }
+
+ vport->fc_vport = fc_vport;
+ vport->npiv_wwpn = fc_vport->port_name;
+ vport->npiv_wwnn = fc_vport->node_name;
+ fc_host_node_name(vport->shost) = vport->npiv_wwnn;
+ fc_host_port_name(vport->shost) = vport->npiv_wwpn;
+ *(struct efct_vport **)fc_vport->dd_data = vport;
+
+ return 0;
+
+fail:
+ return -EIO;
+}
+
+static int
+efct_vport_delete(struct fc_vport *fc_vport)
+{
+ struct efct_vport *vport = *(struct efct_vport **)fc_vport->dd_data;
+ struct Scsi_Host *shost = vport ? vport->shost : NULL;
+ struct efct *efct = vport ? vport->efct : NULL;
+ int rc;
+
+ rc = efct_scsi_del_vport(efct, shost);
+
+ if (rc)
+ pr_err("%s: vport delete failed\n", __func__);
+
+ return rc;
+}
+
+static int
+efct_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ return 0;
+}
+
+static struct fc_function_template efct_xport_functions = {
+ .get_host_port_id = efct_get_host_port_id,
+ .get_host_port_type = efct_get_host_port_type,
+ .get_host_port_state = efct_get_host_port_state,
+ .get_host_speed = efct_get_host_speed,
+ .get_host_fabric_name = efct_get_host_fabric_name,
+
+ .get_fc_host_stats = efct_get_stats,
+ .reset_fc_host_stats = efct_reset_stats,
+
+ .issue_fc_host_lip = efct_issue_lip,
+
+ .vport_disable = efct_vport_disable,
+
+ /* allocation lengths for host-specific data */
+ .dd_fcrport_size = sizeof(struct efct_rport_data),
+ .dd_fcvport_size = 128, /* should be sizeof(...) */
+
+ /* remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+
+ /* target dynamic attributes */
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+
+ /* host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* host dynamic attributes */
+ .show_host_port_id = 1,
+ .show_host_port_type = 1,
+ .show_host_port_state = 1,
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+ .show_host_speed = 1,
+ .show_host_fabric_name = 1,
+ .show_host_symbolic_name = 1,
+ .vport_create = efct_vport_create,
+ .vport_delete = efct_vport_delete,
+};
+
+static struct fc_function_template efct_vport_functions = {
+ .get_host_port_id = efct_get_host_port_id,
+ .get_host_port_type = efct_get_host_vport_type,
+ .get_host_port_state = efct_get_host_port_state,
+ .get_host_speed = efct_get_host_speed,
+ .get_host_fabric_name = efct_get_host_fabric_name,
+
+ .get_fc_host_stats = efct_get_stats,
+ .reset_fc_host_stats = efct_reset_stats,
+
+ .issue_fc_host_lip = efct_issue_lip,
+
+ /* allocation lengths for host-specific data */
+ .dd_fcrport_size = sizeof(struct efct_rport_data),
+ .dd_fcvport_size = 128, /* should be sizeof(...) */
+
+ /* remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+
+ /* target dynamic attributes */
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+
+ /* host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* host dynamic attributes */
+ .show_host_port_id = 1,
+ .show_host_port_type = 1,
+ .show_host_port_state = 1,
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+ .show_host_speed = 1,
+ .show_host_fabric_name = 1,
+ .show_host_symbolic_name = 1,
+};
diff --git a/drivers/scsi/elx/efct/efct_xport.h b/drivers/scsi/elx/efct/efct_xport.h
new file mode 100644
index 000000000000..89f3c20ecb59
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_xport.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_XPORT_H__)
+#define __EFCT_XPORT_H__
+
+enum efct_xport_ctrl {
+ EFCT_XPORT_PORT_ONLINE = 1,
+ EFCT_XPORT_PORT_OFFLINE,
+ EFCT_XPORT_SHUTDOWN,
+ EFCT_XPORT_POST_NODE_EVENT,
+ EFCT_XPORT_WWNN_SET,
+ EFCT_XPORT_WWPN_SET,
+};
+
+enum efct_xport_status {
+ EFCT_XPORT_PORT_STATUS,
+ EFCT_XPORT_CONFIG_PORT_STATUS,
+ EFCT_XPORT_LINK_SPEED,
+ EFCT_XPORT_IS_SUPPORTED_LINK_SPEED,
+ EFCT_XPORT_LINK_STATISTICS,
+ EFCT_XPORT_LINK_STAT_RESET,
+ EFCT_XPORT_IS_QUIESCED
+};
+
+struct efct_xport_link_stats {
+ bool rec;
+ bool gec;
+ bool w02of;
+ bool w03of;
+ bool w04of;
+ bool w05of;
+ bool w06of;
+ bool w07of;
+ bool w08of;
+ bool w09of;
+ bool w10of;
+ bool w11of;
+ bool w12of;
+ bool w13of;
+ bool w14of;
+ bool w15of;
+ bool w16of;
+ bool w17of;
+ bool w18of;
+ bool w19of;
+ bool w20of;
+ bool w21of;
+ bool clrc;
+ bool clof1;
+ u32 link_failure_error_count;
+ u32 loss_of_sync_error_count;
+ u32 loss_of_signal_error_count;
+ u32 primitive_sequence_error_count;
+ u32 invalid_transmission_word_error_count;
+ u32 crc_error_count;
+ u32 primitive_sequence_event_timeout_count;
+ u32 elastic_buffer_overrun_error_count;
+ u32 arbitration_fc_al_timeout_count;
+ u32 advertised_receive_bufftor_to_buffer_credit;
+ u32 current_receive_buffer_to_buffer_credit;
+ u32 advertised_transmit_buffer_to_buffer_credit;
+ u32 current_transmit_buffer_to_buffer_credit;
+ u32 received_eofa_count;
+ u32 received_eofdti_count;
+ u32 received_eofni_count;
+ u32 received_soff_count;
+ u32 received_dropped_no_aer_count;
+ u32 received_dropped_no_available_rpi_resources_count;
+ u32 received_dropped_no_available_xri_resources_count;
+};
+
+struct efct_xport_host_stats {
+ bool cc;
+ u32 transmit_kbyte_count;
+ u32 receive_kbyte_count;
+ u32 transmit_frame_count;
+ u32 receive_frame_count;
+ u32 transmit_sequence_count;
+ u32 receive_sequence_count;
+ u32 total_exchanges_originator;
+ u32 total_exchanges_responder;
+ u32 receive_p_bsy_count;
+ u32 receive_f_bsy_count;
+ u32 dropped_frames_due_to_no_rq_buffer_count;
+ u32 empty_rq_timeout_count;
+ u32 dropped_frames_due_to_no_xri_count;
+ u32 empty_xri_pool_count;
+};
+
+struct efct_xport_host_statistics {
+ struct completion done;
+ struct efct_xport_link_stats link_stats;
+ struct efct_xport_host_stats host_stats;
+};
+
+union efct_xport_stats_u {
+ u32 value;
+ struct efct_xport_host_statistics stats;
+};
+
+struct efct_xport_fcp_stats {
+ u64 input_bytes;
+ u64 output_bytes;
+ u64 input_requests;
+ u64 output_requests;
+ u64 control_requests;
+};
+
+struct efct_xport {
+ struct efct *efct;
+ /* wwpn requested by user for primary nport */
+ u64 req_wwpn;
+ /* wwnn requested by user for primary nport */
+ u64 req_wwnn;
+
+ /* Nodes */
+ /* number of allocated nodes */
+ u32 nodes_count;
+ /* used to track how often IO pool is empty */
+ atomic_t io_alloc_failed_count;
+ /* array of pointers to nodes */
+ struct efc_node **nodes;
+
+ /* Io pool and counts */
+ /* pointer to IO pool */
+ struct efct_io_pool *io_pool;
+ /* lock for io_pending_list */
+ spinlock_t io_pending_lock;
+ /* list of IOs waiting for HW resources
+ * lock: xport->io_pending_lock
+ * link: efct_io_s->io_pending_link
+ */
+ struct list_head io_pending_list;
+ /* count of totals IOS allocated */
+ atomic_t io_total_alloc;
+ /* count of totals IOS free'd */
+ atomic_t io_total_free;
+ /* count of totals IOS that were pended */
+ atomic_t io_total_pending;
+ /* count of active IOS */
+ atomic_t io_active_count;
+ /* count of pending IOS */
+ atomic_t io_pending_count;
+ /* non-zero if efct_scsi_check_pending is executing */
+ atomic_t io_pending_recursing;
+
+ /* Port */
+ /* requested link state */
+ u32 configured_link_state;
+
+ /* Timer for Statistics */
+ struct timer_list stats_timer;
+ union efct_xport_stats_u fc_xport_stats;
+ struct efct_xport_fcp_stats fcp_stats;
+};
+
+struct efct_rport_data {
+ struct efc_node *node;
+};
+
+struct efct_xport *
+efct_xport_alloc(struct efct *efct);
+int
+efct_xport_attach(struct efct_xport *xport);
+int
+efct_xport_initialize(struct efct_xport *xport);
+void
+efct_xport_detach(struct efct_xport *xport);
+int
+efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...);
+int
+efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd,
+ union efct_xport_stats_u *result);
+void
+efct_xport_free(struct efct_xport *xport);
+
+struct scsi_transport_template *efct_attach_fc_transport(void);
+struct scsi_transport_template *efct_attach_vport_fc_transport(void);
+void
+efct_release_fc_transport(struct scsi_transport_template *transport_template);
+
+#endif /* __EFCT_XPORT_H__ */
diff --git a/drivers/scsi/elx/include/efc_common.h b/drivers/scsi/elx/include/efc_common.h
new file mode 100644
index 000000000000..8d57f69ace0a
--- /dev/null
+++ b/drivers/scsi/elx/include/efc_common.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_COMMON_H__
+#define __EFC_COMMON_H__
+
+#include <linux/pci.h>
+
+struct efc_dma {
+ void *virt;
+ void *alloc;
+ dma_addr_t phys;
+
+ size_t size;
+ size_t len;
+ struct pci_dev *pdev;
+};
+
+#define efc_log_crit(efc, fmt, args...) \
+ dev_crit(&((efc)->pci)->dev, fmt, ##args)
+
+#define efc_log_err(efc, fmt, args...) \
+ dev_err(&((efc)->pci)->dev, fmt, ##args)
+
+#define efc_log_warn(efc, fmt, args...) \
+ dev_warn(&((efc)->pci)->dev, fmt, ##args)
+
+#define efc_log_info(efc, fmt, args...) \
+ dev_info(&((efc)->pci)->dev, fmt, ##args)
+
+#define efc_log_debug(efc, fmt, args...) \
+ dev_dbg(&((efc)->pci)->dev, fmt, ##args)
+
+#endif /* __EFC_COMMON_H__ */
diff --git a/drivers/scsi/elx/libefc/efc.h b/drivers/scsi/elx/libefc/efc.h
new file mode 100644
index 000000000000..927016283f41
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_H__
+#define __EFC_H__
+
+#include "../include/efc_common.h"
+#include "efclib.h"
+#include "efc_sm.h"
+#include "efc_cmds.h"
+#include "efc_domain.h"
+#include "efc_nport.h"
+#include "efc_node.h"
+#include "efc_fabric.h"
+#include "efc_device.h"
+#include "efc_els.h"
+
+#define EFC_MAX_REMOTE_NODES 2048
+#define NODE_SPARAMS_SIZE 256
+
+enum efc_scsi_del_initiator_reason {
+ EFC_SCSI_INITIATOR_DELETED,
+ EFC_SCSI_INITIATOR_MISSING,
+};
+
+enum efc_scsi_del_target_reason {
+ EFC_SCSI_TARGET_DELETED,
+ EFC_SCSI_TARGET_MISSING,
+};
+
+#define EFC_FC_ELS_DEFAULT_RETRIES 3
+
+#define domain_sm_trace(domain) \
+ efc_log_debug(domain->efc, "[domain:%s] %-20s %-20s\n", \
+ domain->display_name, __func__, efc_sm_event_name(evt)) \
+
+#define domain_trace(domain, fmt, ...) \
+ efc_log_debug(domain->efc, \
+ "[%s]" fmt, domain->display_name, ##__VA_ARGS__) \
+
+#define node_sm_trace() \
+ efc_log_debug(node->efc, "[%s] %-20s %-20s\n", \
+ node->display_name, __func__, efc_sm_event_name(evt)) \
+
+#define nport_sm_trace(nport) \
+ efc_log_debug(nport->efc, \
+ "[%s] %-20s\n", nport->display_name, efc_sm_event_name(evt)) \
+
+#endif /* __EFC_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_cmds.c b/drivers/scsi/elx/libefc/efc_cmds.c
new file mode 100644
index 000000000000..37e6697d86b8
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_cmds.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efclib.h"
+#include "../libefc_sli/sli4.h"
+#include "efc_cmds.h"
+#include "efc_sm.h"
+
+static void
+efc_nport_free_resources(struct efc_nport *nport, int evt, void *data)
+{
+ struct efc *efc = nport->efc;
+
+ /* Clear the nport attached flag */
+ nport->attached = false;
+
+ /* Free the service parameters buffer */
+ if (nport->dma.virt) {
+ dma_free_coherent(&efc->pci->dev, nport->dma.size,
+ nport->dma.virt, nport->dma.phys);
+ memset(&nport->dma, 0, sizeof(struct efc_dma));
+ }
+
+ /* Free the SLI resources */
+ sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
+
+ efc_nport_cb(efc, evt, nport);
+}
+
+static int
+efc_nport_get_mbox_status(struct efc_nport *nport, u8 *mqe, int status)
+{
+ struct efc *efc = nport->efc;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status vpi=%#x st=%x hdr=%x\n",
+ nport->indicator, status, le16_to_cpu(hdr->status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+efc_nport_free_unreg_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_nport *nport = arg;
+ int evt = EFC_EVT_NPORT_FREE_OK;
+ int rc;
+
+ rc = efc_nport_get_mbox_status(nport, mqe, status);
+ if (rc)
+ evt = EFC_EVT_NPORT_FREE_FAIL;
+
+ efc_nport_free_resources(nport, evt, mqe);
+ return rc;
+}
+
+static void
+efc_nport_free_unreg_vpi(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ int rc;
+ u8 data[SLI4_BMBX_SIZE];
+
+ rc = sli_cmd_unreg_vpi(efc->sli, data, nport->indicator,
+ SLI4_UNREG_TYPE_PORT);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VPI format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_nport_free_unreg_vpi_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VPI command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
+ }
+}
+
+static void
+efc_nport_send_evt(struct efc_nport *nport, int evt, void *data)
+{
+ struct efc *efc = nport->efc;
+
+ /* Now inform the registered callbacks */
+ efc_nport_cb(efc, evt, nport);
+
+ /* Set the nport attached flag */
+ if (evt == EFC_EVT_NPORT_ATTACH_OK)
+ nport->attached = true;
+
+ /* If there is a pending free request, then handle it now */
+ if (nport->free_req_pending)
+ efc_nport_free_unreg_vpi(nport);
+}
+
+static int
+efc_nport_alloc_init_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_nport *nport = arg;
+
+ if (efc_nport_get_mbox_status(nport, mqe, status)) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_nport_send_evt(nport, EFC_EVT_NPORT_ALLOC_OK, mqe);
+ return 0;
+}
+
+static void
+efc_nport_alloc_init_vpi(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ /* If there is a pending free request, then handle it now */
+ if (nport->free_req_pending) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_OK, data);
+ return;
+ }
+
+ rc = sli_cmd_init_vpi(efc->sli, data,
+ nport->indicator, nport->domain->indicator);
+ if (rc) {
+ efc_log_err(efc, "INIT_VPI format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_nport_alloc_init_vpi_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "INIT_VPI command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ }
+}
+
+static int
+efc_nport_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_nport *nport = arg;
+ u8 *payload = NULL;
+
+ if (efc_nport_get_mbox_status(nport, mqe, status)) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ payload = nport->dma.virt;
+
+ memcpy(&nport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
+ sizeof(nport->sli_wwpn));
+ memcpy(&nport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
+ sizeof(nport->sli_wwnn));
+
+ dma_free_coherent(&efc->pci->dev, nport->dma.size, nport->dma.virt,
+ nport->dma.phys);
+ memset(&nport->dma, 0, sizeof(struct efc_dma));
+ efc_nport_alloc_init_vpi(nport);
+ return 0;
+}
+
+static void
+efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
+{
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ /* Allocate memory for the service parameters */
+ nport->dma.size = EFC_SPARAM_DMA_SZ;
+ nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
+ nport->dma.size, &nport->dma.phys,
+ GFP_DMA);
+ if (!nport->dma.virt) {
+ efc_log_err(efc, "Failed to allocate DMA memory\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = sli_cmd_read_sparm64(efc->sli, data,
+ &nport->dma, nport->indicator);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_nport_alloc_read_sparm64_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ }
+}
+
+int
+efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
+ struct efc_domain *domain, u8 *wwpn)
+{
+ u32 index;
+
+ nport->indicator = U32_MAX;
+ nport->free_req_pending = false;
+
+ if (wwpn)
+ memcpy(&nport->sli_wwpn, wwpn, sizeof(nport->sli_wwpn));
+
+ /*
+ * allocate a VPI object for the port and stores it in the
+ * indicator field of the port object.
+ */
+ if (sli_resource_alloc(efc->sli, SLI4_RSRC_VPI,
+ &nport->indicator, &index)) {
+ efc_log_err(efc, "VPI allocation failure\n");
+ return -EIO;
+ }
+
+ if (domain) {
+ /*
+ * If the WWPN is NULL, fetch the default
+ * WWPN and WWNN before initializing the VPI
+ */
+ if (!wwpn)
+ efc_nport_alloc_read_sparm64(efc, nport);
+ else
+ efc_nport_alloc_init_vpi(nport);
+ } else if (!wwpn) {
+ /* domain NULL and wwpn non-NULL */
+ efc_log_err(efc, "need WWN for physical port\n");
+ sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_nport *nport = arg;
+
+ if (efc_nport_get_mbox_status(nport, mqe, status)) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_nport_send_evt(nport, EFC_EVT_NPORT_ATTACH_OK, mqe);
+ return 0;
+}
+
+int
+efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ int rc = 0;
+
+ if (!nport) {
+ efc_log_err(efc, "bad param(s) nport=%p\n", nport);
+ return -EIO;
+ }
+
+ nport->fc_id = fc_id;
+
+ /* register previously-allocated VPI with the device */
+ rc = sli_cmd_reg_vpi(efc->sli, buf, nport->fc_id,
+ nport->sli_wwpn, nport->indicator,
+ nport->domain->indicator, false);
+ if (rc) {
+ efc_log_err(efc, "REG_VPI format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
+ return rc;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_nport_attach_reg_vpi_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "REG_VPI command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
+ }
+
+ return rc;
+}
+
+int
+efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport)
+{
+ if (!nport) {
+ efc_log_err(efc, "bad parameter(s) nport=%p\n", nport);
+ return -EIO;
+ }
+
+ /* Issue the UNREG_VPI command to free the assigned VPI context */
+ if (nport->attached)
+ efc_nport_free_unreg_vpi(nport);
+ else
+ nport->free_req_pending = true;
+
+ return 0;
+}
+
+static int
+efc_domain_get_mbox_status(struct efc_domain *domain, u8 *mqe, int status)
+{
+ struct efc *efc = domain->efc;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status vfi=%#x st=%x hdr=%x\n",
+ domain->indicator, status,
+ le16_to_cpu(hdr->status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void
+efc_domain_free_resources(struct efc_domain *domain, int evt, void *data)
+{
+ struct efc *efc = domain->efc;
+
+ /* Free the service parameters buffer */
+ if (domain->dma.virt) {
+ dma_free_coherent(&efc->pci->dev,
+ domain->dma.size, domain->dma.virt,
+ domain->dma.phys);
+ memset(&domain->dma, 0, sizeof(struct efc_dma));
+ }
+
+ /* Free the SLI resources */
+ sli_resource_free(efc->sli, SLI4_RSRC_VFI, domain->indicator);
+
+ efc_domain_cb(efc, evt, domain);
+}
+
+static void
+efc_domain_send_nport_evt(struct efc_domain *domain,
+ int port_evt, int domain_evt, void *data)
+{
+ struct efc *efc = domain->efc;
+
+ /* Send alloc/attach ok to the physical nport */
+ efc_nport_send_evt(domain->nport, port_evt, NULL);
+
+ /* Now inform the registered callbacks */
+ efc_domain_cb(efc, domain_evt, domain);
+}
+
+static int
+efc_domain_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_domain *domain = arg;
+
+ if (efc_domain_get_mbox_status(domain, mqe, status)) {
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ALLOC_OK,
+ EFC_HW_DOMAIN_ALLOC_OK, mqe);
+ return 0;
+}
+
+static void
+efc_domain_alloc_read_sparm64(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ rc = sli_cmd_read_sparm64(efc->sli, data, &domain->dma, 0);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 format failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_domain_alloc_read_sparm64_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 command failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ }
+}
+
+static int
+efc_domain_alloc_init_vfi_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_domain *domain = arg;
+
+ if (efc_domain_get_mbox_status(domain, mqe, status)) {
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_domain_alloc_read_sparm64(domain);
+ return 0;
+}
+
+static void
+efc_domain_alloc_init_vfi(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ struct efc_nport *nport = domain->nport;
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ /*
+ * For FC, the HW alread registered an FCFI.
+ * Copy FCF information into the domain and jump to INIT_VFI.
+ */
+ domain->fcf_indicator = efc->fcfi;
+ rc = sli_cmd_init_vfi(efc->sli, data, domain->indicator,
+ domain->fcf_indicator, nport->indicator);
+ if (rc) {
+ efc_log_err(efc, "INIT_VFI format failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ return;
+ }
+
+ efc_log_err(efc, "%s issue mbox\n", __func__);
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_domain_alloc_init_vfi_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "INIT_VFI command failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ }
+}
+
+int
+efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
+{
+ u32 index;
+
+ if (!domain || !domain->nport) {
+ efc_log_err(efc, "bad parameter(s) domain=%p nport=%p\n",
+ domain, domain ? domain->nport : NULL);
+ return -EIO;
+ }
+
+ /* allocate memory for the service parameters */
+ domain->dma.size = EFC_SPARAM_DMA_SZ;
+ domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
+ domain->dma.size,
+ &domain->dma.phys, GFP_DMA);
+ if (!domain->dma.virt) {
+ efc_log_err(efc, "Failed to allocate DMA memory\n");
+ return -EIO;
+ }
+
+ domain->fcf = fcf;
+ domain->fcf_indicator = U32_MAX;
+ domain->indicator = U32_MAX;
+
+ if (sli_resource_alloc(efc->sli, SLI4_RSRC_VFI, &domain->indicator,
+ &index)) {
+ efc_log_err(efc, "VFI allocation failure\n");
+
+ dma_free_coherent(&efc->pci->dev,
+ domain->dma.size, domain->dma.virt,
+ domain->dma.phys);
+ memset(&domain->dma, 0, sizeof(struct efc_dma));
+
+ return -EIO;
+ }
+
+ efc_domain_alloc_init_vfi(domain);
+ return 0;
+}
+
+static int
+efc_domain_attach_reg_vfi_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_domain *domain = arg;
+
+ if (efc_domain_get_mbox_status(domain, mqe, status)) {
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ATTACH_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ATTACH_OK,
+ EFC_HW_DOMAIN_ATTACH_OK, mqe);
+ return 0;
+}
+
+int
+efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ int rc = 0;
+
+ if (!domain) {
+ efc_log_err(efc, "bad param(s) domain=%p\n", domain);
+ return -EIO;
+ }
+
+ domain->nport->fc_id = fc_id;
+
+ rc = sli_cmd_reg_vfi(efc->sli, buf, SLI4_BMBX_SIZE, domain->indicator,
+ domain->fcf_indicator, domain->dma,
+ domain->nport->indicator, domain->nport->sli_wwpn,
+ domain->nport->fc_id);
+ if (rc) {
+ efc_log_err(efc, "REG_VFI format failure\n");
+ goto cleanup;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_domain_attach_reg_vfi_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "REG_VFI command failure\n");
+ goto cleanup;
+ }
+
+ return rc;
+
+cleanup:
+ efc_domain_free_resources(domain, EFC_HW_DOMAIN_ATTACH_FAIL, buf);
+
+ return rc;
+}
+
+static int
+efc_domain_free_unreg_vfi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_domain *domain = arg;
+ int evt = EFC_HW_DOMAIN_FREE_OK;
+ int rc;
+
+ rc = efc_domain_get_mbox_status(domain, mqe, status);
+ if (rc) {
+ evt = EFC_HW_DOMAIN_FREE_FAIL;
+ rc = -EIO;
+ }
+
+ efc_domain_free_resources(domain, evt, mqe);
+ return rc;
+}
+
+static void
+efc_domain_free_unreg_vfi(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ int rc;
+ u8 data[SLI4_BMBX_SIZE];
+
+ rc = sli_cmd_unreg_vfi(efc->sli, data, domain->indicator,
+ SLI4_UNREG_TYPE_DOMAIN);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VFI format failure\n");
+ goto cleanup;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_domain_free_unreg_vfi_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VFI command failure\n");
+ goto cleanup;
+ }
+
+ return;
+
+cleanup:
+ efc_domain_free_resources(domain, EFC_HW_DOMAIN_FREE_FAIL, data);
+}
+
+int
+efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain)
+{
+ if (!domain) {
+ efc_log_err(efc, "bad parameter(s) domain=%p\n", domain);
+ return -EIO;
+ }
+
+ efc_domain_free_unreg_vfi(domain);
+ return 0;
+}
+
+int
+efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
+ struct efc_nport *nport)
+{
+ /* Check for invalid indicator */
+ if (rnode->indicator != U32_MAX) {
+ efc_log_err(efc,
+ "RPI allocation failure addr=%#x rpi=%#x\n",
+ fc_addr, rnode->indicator);
+ return -EIO;
+ }
+
+ /* NULL SLI port indicates an unallocated remote node */
+ rnode->nport = NULL;
+
+ if (sli_resource_alloc(efc->sli, SLI4_RSRC_RPI,
+ &rnode->indicator, &rnode->index)) {
+ efc_log_err(efc, "RPI allocation failure addr=%#x\n",
+ fc_addr);
+ return -EIO;
+ }
+
+ rnode->fc_id = fc_addr;
+ rnode->nport = nport;
+
+ return 0;
+}
+
+static int
+efc_cmd_node_attach_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_remote_node *rnode = arg;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+ int evt = 0;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
+ le16_to_cpu(hdr->status));
+ rnode->attached = false;
+ evt = EFC_EVT_NODE_ATTACH_FAIL;
+ } else {
+ rnode->attached = true;
+ evt = EFC_EVT_NODE_ATTACH_OK;
+ }
+
+ efc_remote_node_cb(efc, evt, rnode);
+
+ return 0;
+}
+
+int
+efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
+ struct efc_dma *sparms)
+{
+ int rc = -EIO;
+ u8 buf[SLI4_BMBX_SIZE];
+
+ if (!rnode || !sparms) {
+ efc_log_err(efc, "bad parameter(s) rnode=%p sparms=%p\n",
+ rnode, sparms);
+ return -EIO;
+ }
+
+ /*
+ * If the attach count is non-zero, this RPI has already been reg'd.
+ * Otherwise, register the RPI
+ */
+ if (rnode->index == U32_MAX) {
+ efc_log_err(efc, "bad parameter rnode->index invalid\n");
+ return -EIO;
+ }
+
+ /* Update a remote node object with the remote port's service params */
+ if (!sli_cmd_reg_rpi(efc->sli, buf, rnode->indicator,
+ rnode->nport->indicator, rnode->fc_id, sparms, 0, 0))
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_cmd_node_attach_cb, rnode);
+
+ return rc;
+}
+
+int
+efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode)
+{
+ int rc = 0;
+
+ if (!rnode) {
+ efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
+ return -EIO;
+ }
+
+ if (rnode->nport) {
+ if (rnode->attached) {
+ efc_log_err(efc, "rnode is still attached\n");
+ return -EIO;
+ }
+ if (rnode->indicator != U32_MAX) {
+ if (sli_resource_free(efc->sli, SLI4_RSRC_RPI,
+ rnode->indicator)) {
+ efc_log_err(efc,
+ "RPI free fail RPI %d addr=%#x\n",
+ rnode->indicator, rnode->fc_id);
+ rc = -EIO;
+ } else {
+ rnode->indicator = U32_MAX;
+ rnode->index = U32_MAX;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int
+efc_cmd_node_free_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_remote_node *rnode = arg;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+ int evt = EFC_EVT_NODE_FREE_FAIL;
+ int rc = 0;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
+ le16_to_cpu(hdr->status));
+
+ /*
+ * In certain cases, a non-zero MQE status is OK (all must be
+ * true):
+ * - node is attached
+ * - status is 0x1400
+ */
+ if (!rnode->attached ||
+ (le16_to_cpu(hdr->status) != SLI4_MBX_STATUS_RPI_NOT_REG))
+ rc = -EIO;
+ }
+
+ if (!rc) {
+ rnode->attached = false;
+ evt = EFC_EVT_NODE_FREE_OK;
+ }
+
+ efc_remote_node_cb(efc, evt, rnode);
+
+ return rc;
+}
+
+int
+efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ int rc = -EIO;
+
+ if (!rnode) {
+ efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
+ return -EIO;
+ }
+
+ if (rnode->nport) {
+ if (!rnode->attached)
+ return -EIO;
+
+ rc = -EIO;
+
+ if (!sli_cmd_unreg_rpi(efc->sli, buf, rnode->indicator,
+ SLI4_RSRC_RPI, U32_MAX))
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_cmd_node_free_cb, rnode);
+
+ if (rc != 0) {
+ efc_log_err(efc, "UNREG_RPI failed\n");
+ rc = -EIO;
+ }
+ }
+
+ return rc;
+}
diff --git a/drivers/scsi/elx/libefc/efc_cmds.h b/drivers/scsi/elx/libefc/efc_cmds.h
new file mode 100644
index 000000000000..4d353ab04dc3
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_cmds.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_CMDS_H__
+#define __EFC_CMDS_H__
+
+#define EFC_SPARAM_DMA_SZ 112
+int
+efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
+ struct efc_domain *domain, u8 *wwpn);
+int
+efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id);
+int
+efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport);
+int
+efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf);
+int
+efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id);
+int
+efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain);
+int
+efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode);
+int
+efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode);
+int
+efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
+ struct efc_dma *sparms);
+int
+efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
+ struct efc_nport *nport);
+
+#endif /* __EFC_CMDS_H */
diff --git a/drivers/scsi/elx/libefc/efc_device.c b/drivers/scsi/elx/libefc/efc_device.c
new file mode 100644
index 000000000000..725ca2a23fb2
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_device.c
@@ -0,0 +1,1603 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * device_sm Node State Machine: Remote Device States
+ */
+
+#include "efc.h"
+#include "efc_device.h"
+#include "efc_fabric.h"
+
+void
+efc_d_send_prli_rsp(struct efc_node *node, u16 ox_id)
+{
+ int rc = EFC_SCSI_CALL_COMPLETE;
+ struct efc *efc = node->efc;
+
+ node->ls_acc_oxid = ox_id;
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_PRLI;
+
+ /*
+ * Wait for backend session registration
+ * to complete before sending PRLI resp
+ */
+
+ if (node->init) {
+ efc_log_info(efc, "[%s] found(initiator) WWPN:%s WWNN:%s\n",
+ node->display_name, node->wwpn, node->wwnn);
+ if (node->nport->enable_tgt)
+ rc = efc->tt.scsi_new_node(efc, node);
+ }
+
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_FAIL, NULL);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_OK, NULL);
+}
+
+static void
+__efc_d_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = NULL;
+ struct efc *efc = NULL;
+
+ node = ctx->app;
+ efc = node->efc;
+
+ switch (evt) {
+ /* Handle shutdown events */
+ case EFC_EVT_SHUTDOWN:
+ efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name,
+ funcname, efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ efc_log_debug(efc, "[%s] %-20s %-20s\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name,
+ funcname, efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ default:
+ /* call default event handler common to all nodes */
+ __efc_node_common(funcname, ctx, evt, arg);
+ }
+}
+
+static void
+__efc_d_wait_del_node(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ /*
+ * State is entered when a node sends a delete initiator/target call
+ * to the target-server/initiator-client and needs to wait for that
+ * work to complete.
+ */
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ fallthrough;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* These are expected events. */
+ break;
+
+ case EFC_EVT_NODE_DEL_INI_COMPLETE:
+ case EFC_EVT_NODE_DEL_TGT_COMPLETE:
+ /*
+ * node has either been detached or is in the process
+ * of being detached,
+ * call common node's initiate cleanup function
+ */
+ efc_node_initiate_cleanup(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+__efc_d_wait_del_ini_tgt(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ fallthrough;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* These are expected events. */
+ break;
+
+ case EFC_EVT_NODE_DEL_INI_COMPLETE:
+ case EFC_EVT_NODE_DEL_TGT_COMPLETE:
+ efc_node_transition(node, __efc_d_wait_del_node, NULL);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_initiate_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ int rc = EFC_SCSI_CALL_COMPLETE;
+
+ /* assume no wait needed */
+ node->els_io_enabled = false;
+
+ /* make necessary delete upcall(s) */
+ if (node->init && !node->targ) {
+ efc_log_info(node->efc,
+ "[%s] delete (initiator) WWPN %s WWNN %s\n",
+ node->display_name,
+ node->wwpn, node->wwnn);
+ efc_node_transition(node,
+ __efc_d_wait_del_node,
+ NULL);
+ if (node->nport->enable_tgt)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE || rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
+
+ } else if (node->targ && !node->init) {
+ efc_log_info(node->efc,
+ "[%s] delete (target) WWPN %s WWNN %s\n",
+ node->display_name,
+ node->wwpn, node->wwnn);
+ efc_node_transition(node,
+ __efc_d_wait_del_node,
+ NULL);
+ if (node->nport->enable_ini)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+
+ } else if (node->init && node->targ) {
+ efc_log_info(node->efc,
+ "[%s] delete (I+T) WWPN %s WWNN %s\n",
+ node->display_name, node->wwpn, node->wwnn);
+ efc_node_transition(node, __efc_d_wait_del_ini_tgt,
+ NULL);
+ if (node->nport->enable_tgt)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
+ /* assume no wait needed */
+ rc = EFC_SCSI_CALL_COMPLETE;
+ if (node->nport->enable_ini)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+ }
+
+ /* we've initiated the upcalls as needed, now kick off the node
+ * detach to precipitate the aborting of outstanding exchanges
+ * associated with said node
+ *
+ * Beware: if we've made upcall(s), we've already transitioned
+ * to a new state by the time we execute this.
+ * consider doing this before the upcalls?
+ */
+ if (node->attached) {
+ /* issue hw node free; don't care if succeeds right
+ * away or sometime later, will check node->attached
+ * later in shutdown process
+ */
+ rc = efc_cmd_node_detach(efc, &node->rnode);
+ if (rc < 0)
+ node_printf(node,
+ "Failed freeing HW node, rc=%d\n",
+ rc);
+ }
+
+ /* if neither initiator nor target, proceed to cleanup */
+ if (!node->init && !node->targ) {
+ /*
+ * node has either been detached or is in
+ * the process of being detached,
+ * call common node's initiate cleanup function
+ */
+ efc_node_initiate_cleanup(node);
+ }
+ break;
+ }
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* Ignore, this can happen if an ELS is
+ * aborted while in a delay/retry state
+ */
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_loop(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ /* send PLOGI automatically if initiator */
+ efc_node_init_device(node, true);
+ break;
+ }
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_send_ls_acc_after_attach(struct efc_node *node,
+ struct fc_frame_header *hdr,
+ enum efc_node_send_ls_acc ls)
+{
+ u16 ox_id = be16_to_cpu(hdr->fh_ox_id);
+
+ /* Save the OX_ID for sending LS_ACC sometime later */
+ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE);
+
+ node->ls_acc_oxid = ox_id;
+ node->send_ls_acc = ls;
+ node->ls_acc_did = ntoh24(hdr->fh_d_id);
+}
+
+void
+efc_process_prli_payload(struct efc_node *node, void *prli)
+{
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp sp;
+ } *pp;
+
+ pp = prli;
+ node->init = (pp->sp.spp_flags & FCP_SPPF_INIT_FCN) != 0;
+ node->targ = (pp->sp.spp_flags & FCP_SPPF_TARG_FCN) != 0;
+}
+
+void
+__efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ efc_node_transition(node, __efc_d_port_logged_in, NULL);
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* LOGO response received, sent shutdown */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_LOGO,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ node_printf(node,
+ "LOGO sent (evt=%s), shutdown node\n",
+ efc_sm_event_name(evt));
+ /* sm: / post explicit logout */
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_node_init_device(struct efc_node *node, bool send_plogi)
+{
+ node->send_plogi = send_plogi;
+ if ((node->efc->nodedb_mask & EFC_NODEDB_PAUSE_NEW_NODES) &&
+ (node->rnode.fc_id != FC_FID_DOM_MGR)) {
+ node->nodedb_state = __efc_d_init;
+ efc_node_transition(node, __efc_node_paused, NULL);
+ } else {
+ efc_node_transition(node, __efc_d_init, NULL);
+ }
+}
+
+static void
+efc_d_check_plogi_topology(struct efc_node *node, u32 d_id)
+{
+ switch (node->nport->topology) {
+ case EFC_NPORT_TOPO_P2P:
+ /* we're not attached and nport is p2p,
+ * need to attach
+ */
+ efc_domain_attach(node->nport->domain, d_id);
+ efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
+ break;
+ case EFC_NPORT_TOPO_FABRIC:
+ /* we're not attached and nport is fabric, domain
+ * attach should have already been requested as part
+ * of the fabric state machine, wait for it
+ */
+ efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
+ break;
+ case EFC_NPORT_TOPO_UNKNOWN:
+ /* Two possibilities:
+ * 1. received a PLOGI before our FLOGI has completed
+ * (possible since completion comes in on another
+ * CQ), thus we don't know what we're connected to
+ * yet; transition to a state to wait for the
+ * fabric node to tell us;
+ * 2. PLOGI received before link went down and we
+ * haven't performed domain attach yet.
+ * Note: we cannot distinguish between 1. and 2.
+ * so have to assume PLOGI
+ * was received after link back up.
+ */
+ node_printf(node, "received PLOGI, unknown topology did=0x%x\n",
+ d_id);
+ efc_node_transition(node, __efc_d_wait_topology_notify, NULL);
+ break;
+ default:
+ node_printf(node, "received PLOGI, unexpected topology %d\n",
+ node->nport->topology);
+ }
+}
+
+void
+__efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * This state is entered when a node is instantiated,
+ * either having been discovered from a name services query,
+ * or having received a PLOGI/FLOGI.
+ */
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ if (!node->send_plogi)
+ break;
+ /* only send if we have initiator capability,
+ * and domain is attached
+ */
+ if (node->nport->enable_ini &&
+ node->nport->domain->attached) {
+ efc_send_plogi(node);
+
+ efc_node_transition(node, __efc_d_wait_plogi_rsp, NULL);
+ } else {
+ node_printf(node, "not sending plogi nport.ini=%d,",
+ node->nport->enable_ini);
+ node_printf(node, "domain attached=%d\n",
+ node->nport->domain->attached);
+ }
+ break;
+ case EFC_EVT_PLOGI_RCVD: {
+ /* T, or I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ int rc;
+
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /* domain not attached; several possibilities: */
+ if (!node->nport->domain->attached) {
+ efc_d_check_plogi_topology(node, ntoh24(hdr->fh_d_id));
+ break;
+ }
+
+ /* domain already attached */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+ }
+
+ case EFC_EVT_FDISC_RCVD: {
+ __efc_d_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ case EFC_EVT_FLOGI_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ u32 d_id = ntoh24(hdr->fh_d_id);
+
+ /* sm: / save sparams, send FLOGI acc */
+ memcpy(node->nport->domain->flogi_service_params,
+ cbdata->payload->dma.virt,
+ sizeof(struct fc_els_flogi));
+
+ /* send FC LS_ACC response, override s_id */
+ efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
+
+ efc_send_flogi_p2p_acc(node, be16_to_cpu(hdr->fh_ox_id), d_id);
+
+ if (efc_p2p_setup(node->nport)) {
+ node_printf(node, "p2p failed, shutting down node\n");
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ efc_node_transition(node, __efc_p2p_wait_flogi_acc_cmpl, NULL);
+ break;
+ }
+
+ case EFC_EVT_LOGO_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ if (!node->nport->domain->attached) {
+ /* most likely a frame left over from before a link
+ * down; drop and
+ * shut node down w/ "explicit logout" so pending
+ * frames are processed
+ */
+ node_printf(node, "%s domain not attached, dropping\n",
+ efc_sm_event_name(evt));
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+ }
+
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD:
+ case EFC_EVT_PRLO_RCVD:
+ case EFC_EVT_PDISC_RCVD:
+ case EFC_EVT_ADISC_RCVD:
+ case EFC_EVT_RSCN_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ if (!node->nport->domain->attached) {
+ /* most likely a frame left over from before a link
+ * down; drop and shut node down w/ "explicit logout"
+ * so pending frames are processed
+ */
+ node_printf(node, "%s domain not attached, dropping\n",
+ efc_sm_event_name(evt));
+
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+ }
+ node_printf(node, "%s received, sending reject\n",
+ efc_sm_event_name(evt));
+
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0);
+
+ break;
+ }
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ /* note: problem, we're now expecting an ELS REQ completion
+ * from both the LOGO and PLOGI
+ */
+ if (!node->nport->domain->attached) {
+ /* most likely a frame left over from before a
+ * link down; drop and
+ * shut node down w/ "explicit logout" so pending
+ * frames are processed
+ */
+ node_printf(node, "%s domain not attached, dropping\n",
+ efc_sm_event_name(evt));
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+ }
+
+ /* Send LOGO */
+ node_printf(node, "FCP_CMND received, send LOGO\n");
+ if (efc_send_logo(node)) {
+ /*
+ * failed to send LOGO, go ahead and cleanup node
+ * anyways
+ */
+ node_printf(node, "Failed to send LOGO\n");
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ } else {
+ /* sent LOGO, wait for response */
+ efc_node_transition(node,
+ __efc_d_wait_logo_rsp, NULL);
+ }
+ break;
+ }
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_PLOGI_RCVD: {
+ /* T, or I+T */
+ /* received PLOGI with svc parms, go ahead and attach node
+ * when PLOGI that was sent ultimately completes, it'll be a
+ * no-op
+ *
+ * If there is an outstanding PLOGI sent, can we set a flag
+ * to indicate that we don't want to retry it if it times out?
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+ /* sm: domain->attached / efc_node_attach */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD:
+ /* I, or I+T */
+ /* sent PLOGI and before completion was seen, received the
+ * PRLI from the remote node (WCQEs and RCQEs come in on
+ * different queues and order of processing cannot be assumed)
+ * Save OXID so PRLI can be sent after the attach and continue
+ * to wait for PLOGI response
+ */
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PRLI);
+ efc_node_transition(node, __efc_d_wait_plogi_rsp_recvd_prli,
+ NULL);
+ break;
+
+ case EFC_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */
+ case EFC_EVT_PRLO_RCVD:
+ case EFC_EVT_PDISC_RCVD:
+ case EFC_EVT_FDISC_RCVD:
+ case EFC_EVT_ADISC_RCVD:
+ case EFC_EVT_RSCN_RCVD:
+ case EFC_EVT_SCR_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received, sending reject\n",
+ efc_sm_event_name(evt));
+
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0);
+
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
+ /* Completion from PLOGI sent */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ /* PLOGI failed, shutdown the node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* Our PLOGI was rejected, this is ok in some cases */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ /* not logged in yet and outstanding PLOGI so don't send LOGO,
+ * just drop
+ */
+ node_printf(node, "FCP_CMND received, drop\n");
+ break;
+ }
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /*
+ * Since we've received a PRLI, we have a port login and will
+ * just need to wait for the PLOGI response to do the node
+ * attach and then we can send the LS_ACC for the PRLI. If,
+ * during this time, we receive FCP_CMNDs (which is possible
+ * since we've already sent a PRLI and our peer may have
+ * accepted). At this time, we are not waiting on any other
+ * unsolicited frames to continue with the login process. Thus,
+ * it will not hurt to hold frames here.
+ */
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
+ /* Completion from PLOGI sent */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* PLOGI failed, shutdown the node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ WARN_ON(!node->nport->domain->attached);
+ /* sm: / efc_node_attach */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: {
+ enum efc_nport_topology topology =
+ (enum efc_nport_topology)arg;
+
+ WARN_ON(node->nport->domain->attached);
+
+ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ node_printf(node, "topology notification, topology=%d\n",
+ topology);
+
+ /* At the time the PLOGI was received, the topology was unknown,
+ * so we didn't know which node would perform the domain attach:
+ * 1. The node from which the PLOGI was sent (p2p) or
+ * 2. The node to which the FLOGI was sent (fabric).
+ */
+ if (topology == EFC_NPORT_TOPO_P2P) {
+ /* if this is p2p, need to attach to the domain using
+ * the d_id from the PLOGI received
+ */
+ efc_domain_attach(node->nport->domain,
+ node->ls_acc_did);
+ }
+ /* else, if this is fabric, the domain attach
+ * should be performed by the fabric node (node sending FLOGI);
+ * just wait for attach to complete
+ */
+
+ efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
+ break;
+ }
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ WARN_ON(!node->nport->domain->attached);
+ node_printf(node, "domain attach ok\n");
+ /* sm: / efc_node_attach */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ switch (node->send_ls_acc) {
+ case EFC_NODE_SEND_LS_ACC_PLOGI: {
+ /* sm: send_plogi_acc is set / send PLOGI acc */
+ /* Normal case for T, or I+T */
+ efc_send_plogi_acc(node, node->ls_acc_oxid);
+ efc_node_transition(node, __efc_d_wait_plogi_acc_cmpl,
+ NULL);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case EFC_NODE_SEND_LS_ACC_PRLI: {
+ efc_d_send_prli_rsp(node, node->ls_acc_oxid);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case EFC_NODE_SEND_LS_ACC_NONE:
+ default:
+ /* Normal case for I */
+ /* sm: send_plogi_acc is not set / send PLOGI acc */
+ efc_node_transition(node,
+ __efc_d_port_logged_in, NULL);
+ break;
+ }
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "node attach failed\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ /* Handle shutdown events */
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_wait_attach_evt_shutdown,
+ NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO;
+ efc_node_transition(node, __efc_d_wait_attach_evt_shutdown,
+ NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO;
+ efc_node_transition(node,
+ __efc_d_wait_attach_evt_shutdown, NULL);
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ /* wait for any of these attach events and then shutdown */
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_port_logged_in(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* Normal case for I or I+T */
+ if (node->nport->enable_ini &&
+ !(node->rnode.fc_id != FC_FID_DOM_MGR)) {
+ /* sm: if enable_ini / send PRLI */
+ efc_send_prli(node);
+ /* can now expect ELS_REQ_OK/FAIL/RJT */
+ }
+ break;
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD: {
+ /* Normal case for T or I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp sp;
+ } *pp;
+
+ pp = cbdata->payload->dma.virt;
+ if (pp->sp.spp_type != FC_TYPE_FCP) {
+ /*Only FCP is supported*/
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0);
+ break;
+ }
+
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_d_send_prli_rsp(node, be16_to_cpu(hdr->fh_ox_id));
+ break;
+ }
+
+ case EFC_EVT_NODE_SESS_REG_OK:
+ if (node->send_ls_acc == EFC_NODE_SEND_LS_ACC_PRLI)
+ efc_send_prli_acc(node, node->ls_acc_oxid);
+
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ efc_node_transition(node, __efc_d_device_ready, NULL);
+ break;
+
+ case EFC_EVT_NODE_SESS_REG_FAIL:
+ efc_send_ls_rjt(node, node->ls_acc_oxid, ELS_RJT_UNAB,
+ ELS_EXPL_UNSUPR, 0);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */
+ /* Normal case for I or I+T */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / process PRLI payload */
+ efc_process_prli_payload(node, cbdata->els_rsp.virt);
+ efc_node_transition(node, __efc_d_device_ready, NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */
+ /* I, I+T, assume some link failure, shutdown node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT: {
+ /* PRLI rejected by remote
+ * Normal for I, I+T (connected to an I)
+ * Node doesn't want to be a target, stay here and wait for a
+ * PRLI from the remote node
+ * if it really wants to connect to us as target
+ */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK: {
+ /* Normal T, I+T, target-server rejected the process login */
+ /* This would be received only in the case where we sent
+ * LS_RJT for the PRLI, so
+ * do nothing. (note: as T only we could shutdown the node)
+ */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ break;
+ }
+
+ case EFC_EVT_PLOGI_RCVD: {
+ /*sm: / save sparams, set send_plogi_acc,
+ *post implicit logout
+ * Save plogi parameters
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /* Restart node attach with new service parameters,
+ * and send ACC
+ */
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
+ NULL);
+ break;
+ }
+
+ case EFC_EVT_LOGO_RCVD: {
+ /* I, T, I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt),
+ node->attached);
+ /* sm: / send LOGO acc */
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ /* sm: / post explicit logout */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_device_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ if (evt != EFC_EVT_FCP_CMD_RCVD)
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ node->fcp_enabled = true;
+ if (node->targ) {
+ efc_log_info(efc,
+ "[%s] found (target) WWPN %s WWNN %s\n",
+ node->display_name,
+ node->wwpn, node->wwnn);
+ if (node->nport->enable_ini)
+ efc->tt.scsi_new_node(efc, node);
+ }
+ break;
+
+ case EFC_EVT_EXIT:
+ node->fcp_enabled = false;
+ break;
+
+ case EFC_EVT_PLOGI_RCVD: {
+ /* sm: / save sparams, set send_plogi_acc, post implicit
+ * logout
+ * Save plogi parameters
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /*
+ * Restart node attach with new service parameters,
+ * and send ACC
+ */
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD: {
+ /* T, I+T: remote initiator is slow to get started */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp sp;
+ } *pp;
+
+ pp = cbdata->payload->dma.virt;
+ if (pp->sp.spp_type != FC_TYPE_FCP) {
+ /*Only FCP is supported*/
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0);
+ break;
+ }
+
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_prli_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ break;
+ }
+
+ case EFC_EVT_PRLO_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ /* sm: / send PRLO acc */
+ efc_send_prlo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ /* need implicit logout? */
+ break;
+ }
+
+ case EFC_EVT_LOGO_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt), node->attached);
+ /* sm: / send LOGO acc */
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ case EFC_EVT_ADISC_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ /* sm: / send ADISC acc */
+ efc_send_adisc_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ break;
+ }
+
+ case EFC_EVT_ABTS_RCVD:
+ /* sm: / process ABTS */
+ efc_log_err(efc, "Unexpected event:%s\n",
+ efc_sm_event_name(evt));
+ break;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ break;
+
+ case EFC_EVT_NODE_REFOUND:
+ break;
+
+ case EFC_EVT_NODE_MISSING:
+ if (node->nport->enable_rscn)
+ efc_node_transition(node, __efc_d_device_gone, NULL);
+
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ /* T, or I+T, PRLI accept completed ok */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ /* T, or I+T, PRLI accept failed to complete */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ node_printf(node, "Failed to send PRLI LS_ACC\n");
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_device_gone(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ int rc = EFC_SCSI_CALL_COMPLETE;
+ int rc_2 = EFC_SCSI_CALL_COMPLETE;
+ static const char * const labels[] = {
+ "none", "initiator", "target", "initiator+target"
+ };
+
+ efc_log_info(efc, "[%s] missing (%s) WWPN %s WWNN %s\n",
+ node->display_name,
+ labels[(node->targ << 1) | (node->init)],
+ node->wwpn, node->wwnn);
+
+ switch (efc_node_get_enable(node)) {
+ case EFC_NODE_ENABLE_T_TO_T:
+ case EFC_NODE_ENABLE_I_TO_T:
+ case EFC_NODE_ENABLE_IT_TO_T:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_T_TO_I:
+ case EFC_NODE_ENABLE_I_TO_I:
+ case EFC_NODE_ENABLE_IT_TO_I:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_T_TO_IT:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_I_TO_IT:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_IT_TO_IT:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_MISSING);
+ rc_2 = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_MISSING);
+ break;
+
+ default:
+ rc = EFC_SCSI_CALL_COMPLETE;
+ break;
+ }
+
+ if (rc == EFC_SCSI_CALL_COMPLETE &&
+ rc_2 == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+
+ break;
+ }
+ case EFC_EVT_NODE_REFOUND:
+ /* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */
+
+ /* reauthenticate with PLOGI/PRLI */
+ /* efc_node_transition(node, __efc_d_discovered, NULL); */
+
+ /* reauthenticate with ADISC */
+ /* sm: / send ADISC */
+ efc_send_adisc(node);
+ efc_node_transition(node, __efc_d_wait_adisc_rsp, NULL);
+ break;
+
+ case EFC_EVT_PLOGI_RCVD: {
+ /* sm: / save sparams, set send_plogi_acc, post implicit
+ * logout
+ * Save plogi parameters
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /*
+ * Restart node attach with new service parameters, and send
+ * ACC
+ */
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
+ NULL);
+ break;
+ }
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ /* most likely a stale frame (received prior to link down),
+ * if attempt to send LOGO, will probably timeout and eat
+ * up 20s; thus, drop FCP_CMND
+ */
+ node_printf(node, "FCP_CMND received, drop\n");
+ break;
+ }
+ case EFC_EVT_LOGO_RCVD: {
+ /* I, T, I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt), node->attached);
+ /* sm: / send LOGO acc */
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_transition(node, __efc_d_device_ready, NULL);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* received an LS_RJT, in this case, send shutdown
+ * (explicit logo) event which will unregister the node,
+ * and start over with PLOGI
+ */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / post explicit logout */
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+
+ case EFC_EVT_LOGO_RCVD: {
+ /* In this case, we have the equivalent of an LS_RJT for
+ * the ADISC, so we need to abort the ADISC, and re-login
+ * with PLOGI
+ */
+ /* sm: / request abort, send LOGO acc */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt), node->attached);
+
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
diff --git a/drivers/scsi/elx/libefc/efc_device.h b/drivers/scsi/elx/libefc/efc_device.h
new file mode 100644
index 000000000000..3cf1d8c6698f
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_device.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Node state machine functions for remote device node sm
+ */
+
+#ifndef __EFCT_DEVICE_H__
+#define __EFCT_DEVICE_H__
+void
+efc_node_init_device(struct efc_node *node, bool send_plogi);
+void
+efc_process_prli_payload(struct efc_node *node,
+ void *prli);
+void
+efc_d_send_prli_rsp(struct efc_node *node, uint16_t ox_id);
+void
+efc_send_ls_acc_after_attach(struct efc_node *node,
+ struct fc_frame_header *hdr,
+ enum efc_node_send_ls_acc ls);
+void
+__efc_d_wait_loop(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_initiate_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_port_logged_in(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_device_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_device_gone(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+#endif /* __EFCT_DEVICE_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_domain.c b/drivers/scsi/elx/libefc/efc_domain.c
new file mode 100644
index 000000000000..ca9d7ff2c0d2
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_domain.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * domain_sm Domain State Machine: States
+ */
+
+#include "efc.h"
+
+int
+efc_domain_cb(void *arg, int event, void *data)
+{
+ struct efc *efc = arg;
+ struct efc_domain *domain = NULL;
+ int rc = 0;
+ unsigned long flags = 0;
+
+ if (event != EFC_HW_DOMAIN_FOUND)
+ domain = data;
+
+ /* Accept domain callback events from the user driver */
+ spin_lock_irqsave(&efc->lock, flags);
+ switch (event) {
+ case EFC_HW_DOMAIN_FOUND: {
+ u64 fcf_wwn = 0;
+ struct efc_domain_record *drec = data;
+
+ /* extract the fcf_wwn */
+ fcf_wwn = be64_to_cpu(*((__be64 *)drec->wwn));
+
+ efc_log_debug(efc, "Domain found: wwn %016llX\n", fcf_wwn);
+
+ /* lookup domain, or allocate a new one */
+ domain = efc->domain;
+ if (!domain) {
+ domain = efc_domain_alloc(efc, fcf_wwn);
+ if (!domain) {
+ efc_log_err(efc, "efc_domain_alloc() failed\n");
+ rc = -1;
+ break;
+ }
+ efc_sm_transition(&domain->drvsm, __efc_domain_init,
+ NULL);
+ }
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_FOUND, drec);
+ break;
+ }
+
+ case EFC_HW_DOMAIN_LOST:
+ domain_trace(domain, "EFC_HW_DOMAIN_LOST:\n");
+ efc->hold_frames = true;
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_LOST, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ALLOC_OK:
+ domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_OK:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_OK, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ALLOC_FAIL:
+ domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_FAIL:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_FAIL,
+ NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ATTACH_OK:
+ domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_OK:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_ATTACH_OK, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ATTACH_FAIL:
+ domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_FAIL:\n");
+ efc_domain_post_event(domain,
+ EFC_EVT_DOMAIN_ATTACH_FAIL, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_FREE_OK:
+ domain_trace(domain, "EFC_HW_DOMAIN_FREE_OK:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_OK, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_FREE_FAIL:
+ domain_trace(domain, "EFC_HW_DOMAIN_FREE_FAIL:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_FAIL, NULL);
+ break;
+
+ default:
+ efc_log_warn(efc, "unsupported event %#x\n", event);
+ }
+ spin_unlock_irqrestore(&efc->lock, flags);
+
+ if (efc->domain && domain->req_accept_frames) {
+ domain->req_accept_frames = false;
+ efc->hold_frames = false;
+ }
+
+ return rc;
+}
+
+static void
+_efc_domain_free(struct kref *arg)
+{
+ struct efc_domain *domain = container_of(arg, struct efc_domain, ref);
+ struct efc *efc = domain->efc;
+
+ if (efc->domain_free_cb)
+ (*efc->domain_free_cb)(efc, efc->domain_free_cb_arg);
+
+ kfree(domain);
+}
+
+void
+efc_domain_free(struct efc_domain *domain)
+{
+ struct efc *efc;
+
+ efc = domain->efc;
+
+ /* Hold frames to clear the domain pointer from the xport lookup */
+ efc->hold_frames = false;
+
+ efc_log_debug(efc, "Domain free: wwn %016llX\n", domain->fcf_wwn);
+
+ xa_destroy(&domain->lookup);
+ efc->domain = NULL;
+ kref_put(&domain->ref, domain->release);
+}
+
+struct efc_domain *
+efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn)
+{
+ struct efc_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_ATOMIC);
+ if (!domain)
+ return NULL;
+
+ domain->efc = efc;
+ domain->drvsm.app = domain;
+
+ /* initialize refcount */
+ kref_init(&domain->ref);
+ domain->release = _efc_domain_free;
+
+ xa_init(&domain->lookup);
+
+ INIT_LIST_HEAD(&domain->nport_list);
+ efc->domain = domain;
+ domain->fcf_wwn = fcf_wwn;
+ efc_log_debug(efc, "Domain allocated: wwn %016llX\n", domain->fcf_wwn);
+
+ return domain;
+}
+
+void
+efc_register_domain_free_cb(struct efc *efc,
+ void (*callback)(struct efc *efc, void *arg),
+ void *arg)
+{
+ /* Register a callback to be called when the domain is freed */
+ efc->domain_free_cb = callback;
+ efc->domain_free_cb_arg = arg;
+ if (!efc->domain && callback)
+ (*callback)(efc, arg);
+}
+
+static void
+__efc_domain_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_domain *domain = ctx->app;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /*
+ * this can arise if an FLOGI fails on the NPORT,
+ * and the NPORT is shutdown
+ */
+ break;
+ default:
+ efc_log_warn(domain->efc, "%-20s %-20s not handled\n",
+ funcname, efc_sm_event_name(evt));
+ }
+}
+
+static void
+__efc_domain_common_shutdown(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_domain *domain = ctx->app;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ break;
+ case EFC_EVT_DOMAIN_FOUND:
+ /* save drec, mark domain_found_pending */
+ memcpy(&domain->pending_drec, arg,
+ sizeof(domain->pending_drec));
+ domain->domain_found_pending = true;
+ break;
+ case EFC_EVT_DOMAIN_LOST:
+ /* unmark domain_found_pending */
+ domain->domain_found_pending = false;
+ break;
+
+ default:
+ efc_log_warn(domain->efc, "%-20s %-20s not handled\n",
+ funcname, efc_sm_event_name(evt));
+ }
+}
+
+#define std_domain_state_decl(...)\
+ struct efc_domain *domain = NULL;\
+ struct efc *efc = NULL;\
+ \
+ WARN_ON(!ctx || !ctx->app);\
+ domain = ctx->app;\
+ WARN_ON(!domain->efc);\
+ efc = domain->efc
+
+void
+__efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ domain->attached = false;
+ break;
+
+ case EFC_EVT_DOMAIN_FOUND: {
+ u32 i;
+ struct efc_domain_record *drec = arg;
+ struct efc_nport *nport;
+
+ u64 my_wwnn = efc->req_wwnn;
+ u64 my_wwpn = efc->req_wwpn;
+ __be64 bewwpn;
+
+ if (my_wwpn == 0 || my_wwnn == 0) {
+ efc_log_debug(efc, "using default hardware WWN config\n");
+ my_wwpn = efc->def_wwpn;
+ my_wwnn = efc->def_wwnn;
+ }
+
+ efc_log_debug(efc, "Create nport WWPN %016llX WWNN %016llX\n",
+ my_wwpn, my_wwnn);
+
+ /* Allocate a nport and transition to __efc_nport_allocated */
+ nport = efc_nport_alloc(domain, my_wwpn, my_wwnn, U32_MAX,
+ efc->enable_ini, efc->enable_tgt);
+
+ if (!nport) {
+ efc_log_err(efc, "efc_nport_alloc() failed\n");
+ break;
+ }
+ efc_sm_transition(&nport->sm, __efc_nport_allocated, NULL);
+
+ bewwpn = cpu_to_be64(nport->wwpn);
+
+ /* allocate struct efc_nport object for local port
+ * Note: drec->fc_id is ALPA from read_topology only if loop
+ */
+ if (efc_cmd_nport_alloc(efc, nport, NULL, (uint8_t *)&bewwpn)) {
+ efc_log_err(efc, "Can't allocate port\n");
+ efc_nport_free(nport);
+ break;
+ }
+
+ domain->is_loop = drec->is_loop;
+
+ /*
+ * If the loop position map includes ALPA == 0,
+ * then we are in a public loop (NL_PORT)
+ * Note that the first element of the loopmap[]
+ * contains the count of elements, and if
+ * ALPA == 0 is present, it will occupy the first
+ * location after the count.
+ */
+ domain->is_nlport = drec->map.loop[1] == 0x00;
+
+ if (!domain->is_loop) {
+ /* Initiate HW domain alloc */
+ if (efc_cmd_domain_alloc(efc, domain, drec->index)) {
+ efc_log_err(efc,
+ "Failed to initiate HW domain allocation\n");
+ break;
+ }
+ efc_sm_transition(ctx, __efc_domain_wait_alloc, arg);
+ break;
+ }
+
+ efc_log_debug(efc, "%s fc_id=%#x speed=%d\n",
+ drec->is_loop ?
+ (domain->is_nlport ?
+ "public-loop" : "loop") : "other",
+ drec->fc_id, drec->speed);
+
+ nport->fc_id = drec->fc_id;
+ nport->topology = EFC_NPORT_TOPO_FC_AL;
+ snprintf(nport->display_name, sizeof(nport->display_name),
+ "s%06x", drec->fc_id);
+
+ if (efc->enable_ini) {
+ u32 count = drec->map.loop[0];
+
+ efc_log_debug(efc, "%d position map entries\n",
+ count);
+ for (i = 1; i <= count; i++) {
+ if (drec->map.loop[i] != drec->fc_id) {
+ struct efc_node *node;
+
+ efc_log_debug(efc, "%#x -> %#x\n",
+ drec->fc_id,
+ drec->map.loop[i]);
+ node = efc_node_alloc(nport,
+ drec->map.loop[i],
+ false, true);
+ if (!node) {
+ efc_log_err(efc,
+ "efc_node_alloc() failed\n");
+ break;
+ }
+ efc_node_transition(node,
+ __efc_d_wait_loop,
+ NULL);
+ }
+ }
+ }
+
+ /* Initiate HW domain alloc */
+ if (efc_cmd_domain_alloc(efc, domain, drec->index)) {
+ efc_log_err(efc,
+ "Failed to initiate HW domain allocation\n");
+ break;
+ }
+ efc_sm_transition(ctx, __efc_domain_wait_alloc, arg);
+ break;
+ }
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_alloc(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ALLOC_OK: {
+ struct fc_els_flogi *sp;
+ struct efc_nport *nport;
+
+ nport = domain->nport;
+ if (WARN_ON(!nport))
+ return;
+
+ sp = (struct fc_els_flogi *)nport->service_params;
+
+ /* Save the domain service parameters */
+ memcpy(domain->service_params + 4, domain->dma.virt,
+ sizeof(struct fc_els_flogi) - 4);
+ memcpy(nport->service_params + 4, domain->dma.virt,
+ sizeof(struct fc_els_flogi) - 4);
+
+ /*
+ * Update the nport's service parameters,
+ * user might have specified non-default names
+ */
+ sp->fl_wwpn = cpu_to_be64(nport->wwpn);
+ sp->fl_wwnn = cpu_to_be64(nport->wwnn);
+
+ /*
+ * Take the loop topology path,
+ * unless we are an NL_PORT (public loop)
+ */
+ if (domain->is_loop && !domain->is_nlport) {
+ /*
+ * For loop, we already have our FC ID
+ * and don't need fabric login.
+ * Transition to the allocated state and
+ * post an event to attach to
+ * the domain. Note that this breaks the
+ * normal action/transition
+ * pattern here to avoid a race with the
+ * domain attach callback.
+ */
+ /* sm: is_loop / domain_attach */
+ efc_sm_transition(ctx, __efc_domain_allocated, NULL);
+ __efc_domain_attach_internal(domain, nport->fc_id);
+ break;
+ }
+ {
+ struct efc_node *node;
+
+ /* alloc fabric node, send FLOGI */
+ node = efc_node_find(nport, FC_FID_FLOGI);
+ if (node) {
+ efc_log_err(efc,
+ "Fabric Controller node already exists\n");
+ break;
+ }
+ node = efc_node_alloc(nport, FC_FID_FLOGI,
+ false, false);
+ if (!node) {
+ efc_log_err(efc,
+ "Error: efc_node_alloc() failed\n");
+ } else {
+ efc_node_transition(node,
+ __efc_fabric_init, NULL);
+ }
+ /* Accept frames */
+ domain->req_accept_frames = true;
+ }
+ /* sm: / start fabric logins */
+ efc_sm_transition(ctx, __efc_domain_allocated, NULL);
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_ALLOC_FAIL:
+ efc_log_err(efc, "%s recv'd waiting for DOMAIN_ALLOC_OK;",
+ efc_sm_event_name(evt));
+ efc_log_err(efc, "shutting down domain\n");
+ domain->req_domain_free = true;
+ break;
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ break;
+
+ case EFC_EVT_DOMAIN_LOST:
+ efc_log_debug(efc,
+ "%s received while waiting for hw_domain_alloc()\n",
+ efc_sm_event_name(evt));
+ efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL);
+ break;
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_REQ_ATTACH: {
+ int rc = 0;
+ u32 fc_id;
+
+ if (WARN_ON(!arg))
+ return;
+
+ fc_id = *((u32 *)arg);
+ efc_log_debug(efc, "Requesting hw domain attach fc_id x%x\n",
+ fc_id);
+ /* Update nport lookup */
+ rc = xa_err(xa_store(&domain->lookup, fc_id, domain->nport,
+ GFP_ATOMIC));
+ if (rc) {
+ efc_log_err(efc, "Sport lookup store failed: %d\n", rc);
+ return;
+ }
+
+ /* Update display name for the nport */
+ efc_node_fcid_display(fc_id, domain->nport->display_name,
+ sizeof(domain->nport->display_name));
+
+ /* Issue domain attach call */
+ rc = efc_cmd_domain_attach(efc, domain, fc_id);
+ if (rc) {
+ efc_log_err(efc, "efc_hw_domain_attach failed: %d\n",
+ rc);
+ return;
+ }
+ /* sm: / domain_attach */
+ efc_sm_transition(ctx, __efc_domain_wait_attach, NULL);
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ efc_log_err(efc, "%s: evt: %d should not happen\n",
+ __func__, evt);
+ break;
+
+ case EFC_EVT_DOMAIN_LOST: {
+ efc_log_debug(efc,
+ "%s received while in EFC_EVT_DOMAIN_REQ_ATTACH\n",
+ efc_sm_event_name(evt));
+ if (!list_empty(&domain->nport_list)) {
+ /*
+ * if there are nports, transition to
+ * wait state and send shutdown to each
+ * nport
+ */
+ struct efc_nport *nport = NULL, *nport_next = NULL;
+
+ efc_sm_transition(ctx, __efc_domain_wait_nports_free,
+ NULL);
+ list_for_each_entry_safe(nport, nport_next,
+ &domain->nport_list,
+ list_entry) {
+ efc_sm_post_event(&nport->sm,
+ EFC_EVT_SHUTDOWN, NULL);
+ }
+ } else {
+ /* no nports exist, free domain */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown,
+ NULL);
+ if (efc_cmd_domain_free(efc, domain))
+ efc_log_err(efc, "hw_domain_free failed\n");
+ }
+
+ break;
+ }
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ struct efc_node *node = NULL;
+ struct efc_nport *nport, *next_nport;
+ unsigned long index;
+
+ /*
+ * Set domain notify pending state to avoid
+ * duplicate domain event post
+ */
+ domain->domain_notify_pend = true;
+
+ /* Mark as attached */
+ domain->attached = true;
+
+ /* Transition to ready */
+ /* sm: / forward event to all nports and nodes */
+ efc_sm_transition(ctx, __efc_domain_ready, NULL);
+
+ /* We have an FCFI, so we can accept frames */
+ domain->req_accept_frames = true;
+
+ /*
+ * Notify all nodes that the domain attach request
+ * has completed
+ * Note: nport will have already received notification
+ * of nport attached as a result of the HW's port attach.
+ */
+ list_for_each_entry_safe(nport, next_nport,
+ &domain->nport_list, list_entry) {
+ xa_for_each(&nport->lookup, index, node) {
+ efc_node_post_event(node,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ NULL);
+ }
+ }
+ domain->domain_notify_pend = false;
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_ATTACH_FAIL:
+ efc_log_debug(efc,
+ "%s received while waiting for hw attach\n",
+ efc_sm_event_name(evt));
+ break;
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ efc_log_err(efc, "%s: evt: %d should not happen\n",
+ __func__, evt);
+ break;
+
+ case EFC_EVT_DOMAIN_LOST:
+ /*
+ * Domain lost while waiting for an attach to complete,
+ * go to a state that waits for the domain attach to
+ * complete, then handle domain lost
+ */
+ efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL);
+ break;
+
+ case EFC_EVT_DOMAIN_REQ_ATTACH:
+ /*
+ * In P2P we can get an attach request from
+ * the other FLOGI path, so drop this one
+ */
+ break;
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ /* start any pending vports */
+ if (efc_vport_start(domain)) {
+ efc_log_debug(domain->efc,
+ "efc_vport_start didn't start vports\n");
+ }
+ break;
+ }
+ case EFC_EVT_DOMAIN_LOST: {
+ if (!list_empty(&domain->nport_list)) {
+ /*
+ * if there are nports, transition to wait state
+ * and send shutdown to each nport
+ */
+ struct efc_nport *nport = NULL, *nport_next = NULL;
+
+ efc_sm_transition(ctx, __efc_domain_wait_nports_free,
+ NULL);
+ list_for_each_entry_safe(nport, nport_next,
+ &domain->nport_list,
+ list_entry) {
+ efc_sm_post_event(&nport->sm,
+ EFC_EVT_SHUTDOWN, NULL);
+ }
+ } else {
+ /* no nports exist, free domain */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown,
+ NULL);
+ if (efc_cmd_domain_free(efc, domain))
+ efc_log_err(efc, "hw_domain_free failed\n");
+ }
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ efc_log_err(efc, "%s: evt: %d should not happen\n",
+ __func__, evt);
+ break;
+
+ case EFC_EVT_DOMAIN_REQ_ATTACH: {
+ /* can happen during p2p */
+ u32 fc_id;
+
+ fc_id = *((u32 *)arg);
+
+ /* Assume that the domain is attached */
+ WARN_ON(!domain->attached);
+
+ /*
+ * Verify that the requested FC_ID
+ * is the same as the one we're working with
+ */
+ WARN_ON(domain->nport->fc_id != fc_id);
+ break;
+ }
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ /* Wait for nodes to free prior to the domain shutdown */
+ switch (evt) {
+ case EFC_EVT_ALL_CHILD_NODES_FREE: {
+ int rc;
+
+ /* sm: / efc_hw_domain_free */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown, NULL);
+
+ /* Request efc_hw_domain_free and wait for completion */
+ rc = efc_cmd_domain_free(efc, domain);
+ if (rc) {
+ efc_log_err(efc, "efc_hw_domain_free() failed: %d\n",
+ rc);
+ }
+ break;
+ }
+ default:
+ __efc_domain_common_shutdown(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_FREE_OK:
+ /* sm: / domain_free */
+ if (domain->domain_found_pending) {
+ /*
+ * save fcf_wwn and drec from this domain,
+ * free current domain and allocate
+ * a new one with the same fcf_wwn
+ * could use a SLI-4 "re-register VPI"
+ * operation here?
+ */
+ u64 fcf_wwn = domain->fcf_wwn;
+ struct efc_domain_record drec = domain->pending_drec;
+
+ efc_log_debug(efc, "Reallocating domain\n");
+ domain->req_domain_free = true;
+ domain = efc_domain_alloc(efc, fcf_wwn);
+
+ if (!domain) {
+ efc_log_err(efc,
+ "efc_domain_alloc() failed\n");
+ return;
+ }
+ /*
+ * got a new domain; at this point,
+ * there are at least two domains
+ * once the req_domain_free flag is processed,
+ * the associated domain will be removed.
+ */
+ efc_sm_transition(&domain->drvsm, __efc_domain_init,
+ NULL);
+ efc_sm_post_event(&domain->drvsm,
+ EFC_EVT_DOMAIN_FOUND, &drec);
+ } else {
+ domain->req_domain_free = true;
+ }
+ break;
+ default:
+ __efc_domain_common_shutdown(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ /*
+ * Wait for the domain alloc/attach completion
+ * after receiving a domain lost.
+ */
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ALLOC_OK:
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ if (!list_empty(&domain->nport_list)) {
+ /*
+ * if there are nports, transition to
+ * wait state and send shutdown to each nport
+ */
+ struct efc_nport *nport = NULL, *nport_next = NULL;
+
+ efc_sm_transition(ctx, __efc_domain_wait_nports_free,
+ NULL);
+ list_for_each_entry_safe(nport, nport_next,
+ &domain->nport_list,
+ list_entry) {
+ efc_sm_post_event(&nport->sm,
+ EFC_EVT_SHUTDOWN, NULL);
+ }
+ } else {
+ /* no nports exist, free domain */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown,
+ NULL);
+ if (efc_cmd_domain_free(efc, domain))
+ efc_log_err(efc, "hw_domain_free() failed\n");
+ }
+ break;
+ }
+ case EFC_EVT_DOMAIN_ALLOC_FAIL:
+ case EFC_EVT_DOMAIN_ATTACH_FAIL:
+ efc_log_err(efc, "[domain] %-20s: failed\n",
+ efc_sm_event_name(evt));
+ break;
+
+ default:
+ __efc_domain_common_shutdown(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_attach_internal(struct efc_domain *domain, u32 s_id)
+{
+ memcpy(domain->dma.virt,
+ ((uint8_t *)domain->flogi_service_params) + 4,
+ sizeof(struct fc_els_flogi) - 4);
+ (void)efc_sm_post_event(&domain->drvsm, EFC_EVT_DOMAIN_REQ_ATTACH,
+ &s_id);
+}
+
+void
+efc_domain_attach(struct efc_domain *domain, u32 s_id)
+{
+ __efc_domain_attach_internal(domain, s_id);
+}
+
+int
+efc_domain_post_event(struct efc_domain *domain,
+ enum efc_sm_event event, void *arg)
+{
+ int rc;
+ bool req_domain_free;
+
+ rc = efc_sm_post_event(&domain->drvsm, event, arg);
+
+ req_domain_free = domain->req_domain_free;
+ domain->req_domain_free = false;
+
+ if (req_domain_free)
+ efc_domain_free(domain);
+
+ return rc;
+}
+
+static void
+efct_domain_process_pending(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ struct efc_hw_sequence *seq = NULL;
+ u32 processed = 0;
+ unsigned long flags = 0;
+
+ for (;;) {
+ /* need to check for hold frames condition after each frame
+ * processed because any given frame could cause a transition
+ * to a state that holds frames
+ */
+ if (efc->hold_frames)
+ break;
+
+ /* Get next frame/sequence */
+ spin_lock_irqsave(&efc->pend_frames_lock, flags);
+
+ if (!list_empty(&efc->pend_frames)) {
+ seq = list_first_entry(&efc->pend_frames,
+ struct efc_hw_sequence, list_entry);
+ list_del(&seq->list_entry);
+ }
+
+ if (!seq) {
+ processed = efc->pend_frames_processed;
+ efc->pend_frames_processed = 0;
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+ break;
+ }
+ efc->pend_frames_processed++;
+
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+
+ /* now dispatch frame(s) to dispatch function */
+ if (efc_domain_dispatch_frame(domain, seq))
+ efc->tt.hw_seq_free(efc, seq);
+
+ seq = NULL;
+ }
+
+ if (processed != 0)
+ efc_log_debug(efc, "%u domain frames held and processed\n",
+ processed);
+}
+
+void
+efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq)
+{
+ struct efc_domain *domain = efc->domain;
+
+ /*
+ * If we are holding frames or the domain is not yet registered or
+ * there's already frames on the pending list,
+ * then add the new frame to pending list
+ */
+ if (!domain || efc->hold_frames || !list_empty(&efc->pend_frames)) {
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->pend_frames_lock, flags);
+ INIT_LIST_HEAD(&seq->list_entry);
+ list_add_tail(&seq->list_entry, &efc->pend_frames);
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+
+ if (domain) {
+ /* immediately process pending frames */
+ efct_domain_process_pending(domain);
+ }
+ } else {
+ /*
+ * We are not holding frames and pending list is empty,
+ * just process frame. A non-zero return means the frame
+ * was not handled - so cleanup
+ */
+ if (efc_domain_dispatch_frame(domain, seq))
+ efc->tt.hw_seq_free(efc, seq);
+ }
+}
+
+int
+efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq)
+{
+ struct efc_domain *domain = (struct efc_domain *)arg;
+ struct efc *efc = domain->efc;
+ struct fc_frame_header *hdr;
+ struct efc_node *node = NULL;
+ struct efc_nport *nport = NULL;
+ unsigned long flags = 0;
+ u32 s_id, d_id, rc = EFC_HW_SEQ_FREE;
+
+ if (!seq->header || !seq->header->dma.virt || !seq->payload->dma.virt) {
+ efc_log_err(efc, "Sequence header or payload is null\n");
+ return rc;
+ }
+
+ hdr = seq->header->dma.virt;
+
+ /* extract the s_id and d_id */
+ s_id = ntoh24(hdr->fh_s_id);
+ d_id = ntoh24(hdr->fh_d_id);
+
+ spin_lock_irqsave(&efc->lock, flags);
+
+ nport = efc_nport_find(domain, d_id);
+ if (!nport) {
+ if (hdr->fh_type == FC_TYPE_FCP) {
+ /* Drop frame */
+ efc_log_warn(efc, "FCP frame with invalid d_id x%x\n",
+ d_id);
+ goto out;
+ }
+
+ /* p2p will use this case */
+ nport = domain->nport;
+ if (!nport || !kref_get_unless_zero(&nport->ref)) {
+ efc_log_err(efc, "Physical nport is NULL\n");
+ goto out;
+ }
+ }
+
+ /* Lookup the node given the remote s_id */
+ node = efc_node_find(nport, s_id);
+
+ /* If not found, then create a new node */
+ if (!node) {
+ /*
+ * If this is solicited data or control based on R_CTL and
+ * there is no node context, then we can drop the frame
+ */
+ if ((hdr->fh_r_ctl == FC_RCTL_DD_SOL_DATA) ||
+ (hdr->fh_r_ctl == FC_RCTL_DD_SOL_CTL)) {
+ efc_log_debug(efc, "sol data/ctrl frame without node\n");
+ goto out_release;
+ }
+
+ node = efc_node_alloc(nport, s_id, false, false);
+ if (!node) {
+ efc_log_err(efc, "efc_node_alloc() failed\n");
+ goto out_release;
+ }
+ /* don't send PLOGI on efc_d_init entry */
+ efc_node_init_device(node, false);
+ }
+
+ if (node->hold_frames || !list_empty(&node->pend_frames)) {
+ /* add frame to node's pending list */
+ spin_lock(&node->pend_frames_lock);
+ INIT_LIST_HEAD(&seq->list_entry);
+ list_add_tail(&seq->list_entry, &node->pend_frames);
+ spin_unlock(&node->pend_frames_lock);
+ rc = EFC_HW_SEQ_HOLD;
+ goto out_release;
+ }
+
+ /* now dispatch frame to the node frame handler */
+ efc_node_dispatch_frame(node, seq);
+
+out_release:
+ kref_put(&nport->ref, nport->release);
+out:
+ spin_unlock_irqrestore(&efc->lock, flags);
+ return rc;
+}
+
+void
+efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq)
+{
+ struct fc_frame_header *hdr = seq->header->dma.virt;
+ u32 port_id;
+ struct efc_node *node = (struct efc_node *)arg;
+ struct efc *efc = node->efc;
+
+ port_id = ntoh24(hdr->fh_s_id);
+
+ if (WARN_ON(port_id != node->rnode.fc_id))
+ return;
+
+ if ((!(ntoh24(hdr->fh_f_ctl) & FC_FC_END_SEQ)) ||
+ !(ntoh24(hdr->fh_f_ctl) & FC_FC_SEQ_INIT)) {
+ node_printf(node,
+ "Drop frame hdr = %08x %08x %08x %08x %08x %08x\n",
+ cpu_to_be32(((u32 *)hdr)[0]),
+ cpu_to_be32(((u32 *)hdr)[1]),
+ cpu_to_be32(((u32 *)hdr)[2]),
+ cpu_to_be32(((u32 *)hdr)[3]),
+ cpu_to_be32(((u32 *)hdr)[4]),
+ cpu_to_be32(((u32 *)hdr)[5]));
+ return;
+ }
+
+ switch (hdr->fh_r_ctl) {
+ case FC_RCTL_ELS_REQ:
+ case FC_RCTL_ELS_REP:
+ efc_node_recv_els_frame(node, seq);
+ break;
+
+ case FC_RCTL_BA_ABTS:
+ case FC_RCTL_BA_ACC:
+ case FC_RCTL_BA_RJT:
+ case FC_RCTL_BA_NOP:
+ efc_log_err(efc, "Received ABTS:\n");
+ break;
+
+ case FC_RCTL_DD_UNSOL_CMD:
+ case FC_RCTL_DD_UNSOL_CTL:
+ switch (hdr->fh_type) {
+ case FC_TYPE_FCP:
+ if ((hdr->fh_r_ctl & 0xf) == FC_RCTL_DD_UNSOL_CMD) {
+ if (!node->fcp_enabled) {
+ efc_node_recv_fcp_cmd(node, seq);
+ break;
+ }
+ efc_log_err(efc, "Recvd FCP CMD. Drop IO\n");
+ } else if ((hdr->fh_r_ctl & 0xf) ==
+ FC_RCTL_DD_SOL_DATA) {
+ node_printf(node,
+ "solicited data recvd. Drop IO\n");
+ }
+ break;
+
+ case FC_TYPE_CT:
+ efc_node_recv_ct_frame(node, seq);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ efc_log_err(efc, "Unhandled frame rctl: %02x\n", hdr->fh_r_ctl);
+ }
+}
diff --git a/drivers/scsi/elx/libefc/efc_domain.h b/drivers/scsi/elx/libefc/efc_domain.h
new file mode 100644
index 000000000000..5468ea7ab19b
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_domain.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Declare driver's domain handler exported interface
+ */
+
+#ifndef __EFCT_DOMAIN_H__
+#define __EFCT_DOMAIN_H__
+
+struct efc_domain *
+efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn);
+void
+efc_domain_free(struct efc_domain *domain);
+
+void
+__efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_domain_wait_alloc(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_allocated(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_wait_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_wait_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+efc_domain_attach(struct efc_domain *domain, u32 s_id);
+int
+efc_domain_post_event(struct efc_domain *domain, enum efc_sm_event event,
+ void *arg);
+void
+__efc_domain_attach_internal(struct efc_domain *domain, u32 s_id);
+
+int
+efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq);
+void
+efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq);
+
+#endif /* __EFCT_DOMAIN_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_els.c b/drivers/scsi/elx/libefc/efc_els.c
new file mode 100644
index 000000000000..24db0accb256
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_els.c
@@ -0,0 +1,1098 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Functions to build and send ELS/CT/BLS commands and responses.
+ */
+
+#include "efc.h"
+#include "efc_els.h"
+#include "../libefc_sli/sli4.h"
+
+#define EFC_LOG_ENABLE_ELS_TRACE(efc) \
+ (((efc) != NULL) ? (((efc)->logmask & (1U << 1)) != 0) : 0)
+
+#define node_els_trace() \
+ do { \
+ if (EFC_LOG_ENABLE_ELS_TRACE(efc)) \
+ efc_log_info(efc, "[%s] %-20s\n", \
+ node->display_name, __func__); \
+ } while (0)
+
+#define els_io_printf(els, fmt, ...) \
+ efc_log_err((struct efc *)els->node->efc,\
+ "[%s] %-8s " fmt, \
+ els->node->display_name,\
+ els->display_name, ##__VA_ARGS__)
+
+#define EFC_ELS_RSP_LEN 1024
+#define EFC_ELS_GID_PT_RSP_LEN 8096
+
+struct efc_els_io_req *
+efc_els_io_alloc(struct efc_node *node, u32 reqlen)
+{
+ return efc_els_io_alloc_size(node, reqlen, EFC_ELS_RSP_LEN);
+}
+
+struct efc_els_io_req *
+efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
+{
+ struct efc *efc;
+ struct efc_els_io_req *els;
+ unsigned long flags = 0;
+
+ efc = node->efc;
+
+ spin_lock_irqsave(&node->els_ios_lock, flags);
+
+ if (!node->els_io_enabled) {
+ efc_log_err(efc, "els io alloc disabled\n");
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ return NULL;
+ }
+
+ els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC);
+ if (!els) {
+ atomic_add_return(1, &efc->els_io_alloc_failed_count);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ return NULL;
+ }
+
+ /* initialize refcount */
+ kref_init(&els->ref);
+ els->release = _efc_els_io_free;
+
+ /* populate generic io fields */
+ els->node = node;
+
+ /* now allocate DMA for request and response */
+ els->io.req.size = reqlen;
+ els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size,
+ &els->io.req.phys, GFP_DMA);
+ if (!els->io.req.virt) {
+ mempool_free(els, efc->els_io_pool);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ return NULL;
+ }
+
+ els->io.rsp.size = rsplen;
+ els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size,
+ &els->io.rsp.phys, GFP_DMA);
+ if (!els->io.rsp.virt) {
+ dma_free_coherent(&efc->pci->dev, els->io.req.size,
+ els->io.req.virt, els->io.req.phys);
+ mempool_free(els, efc->els_io_pool);
+ els = NULL;
+ }
+
+ if (els) {
+ /* initialize fields */
+ els->els_retries_remaining = EFC_FC_ELS_DEFAULT_RETRIES;
+
+ /* add els structure to ELS IO list */
+ INIT_LIST_HEAD(&els->list_entry);
+ list_add_tail(&els->list_entry, &node->els_ios_list);
+ }
+
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ return els;
+}
+
+void
+efc_els_io_free(struct efc_els_io_req *els)
+{
+ kref_put(&els->ref, els->release);
+}
+
+void
+_efc_els_io_free(struct kref *arg)
+{
+ struct efc_els_io_req *els =
+ container_of(arg, struct efc_els_io_req, ref);
+ struct efc *efc;
+ struct efc_node *node;
+ int send_empty_event = false;
+ unsigned long flags = 0;
+
+ node = els->node;
+ efc = node->efc;
+
+ spin_lock_irqsave(&node->els_ios_lock, flags);
+
+ list_del(&els->list_entry);
+ /* Send list empty event if the IO allocator
+ * is disabled, and the list is empty
+ * If node->els_io_enabled was not checked,
+ * the event would be posted continually
+ */
+ send_empty_event = (!node->els_io_enabled &&
+ list_empty(&node->els_ios_list));
+
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+
+ /* free ELS request and response buffers */
+ dma_free_coherent(&efc->pci->dev, els->io.rsp.size,
+ els->io.rsp.virt, els->io.rsp.phys);
+ dma_free_coherent(&efc->pci->dev, els->io.req.size,
+ els->io.req.virt, els->io.req.phys);
+
+ mempool_free(els, efc->els_io_pool);
+
+ if (send_empty_event)
+ efc_scsi_io_list_empty(node->efc, node);
+}
+
+static void
+efc_els_retry(struct efc_els_io_req *els);
+
+static void
+efc_els_delay_timer_cb(struct timer_list *t)
+{
+ struct efc_els_io_req *els = from_timer(els, t, delay_timer);
+
+ /* Retry delay timer expired, retry the ELS request */
+ efc_els_retry(els);
+}
+
+static int
+efc_els_req_cb(void *arg, u32 length, int status, u32 ext_status)
+{
+ struct efc_els_io_req *els;
+ struct efc_node *node;
+ struct efc *efc;
+ struct efc_node_cb cbdata;
+ u32 reason_code;
+
+ els = arg;
+ node = els->node;
+ efc = node->efc;
+
+ if (status)
+ els_io_printf(els, "status x%x ext x%x\n", status, ext_status);
+
+ /* set the response len element of els->rsp */
+ els->io.rsp.len = length;
+
+ cbdata.status = status;
+ cbdata.ext_status = ext_status;
+ cbdata.header = NULL;
+ cbdata.els_rsp = els->io.rsp;
+
+ /* set the response len element of els->rsp */
+ cbdata.rsp_len = length;
+
+ /* FW returns the number of bytes received on the link in
+ * the WCQE, not the amount placed in the buffer; use this info to
+ * check if there was an overrun.
+ */
+ if (length > els->io.rsp.size) {
+ efc_log_warn(efc,
+ "ELS response returned len=%d > buflen=%zu\n",
+ length, els->io.rsp.size);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ return 0;
+ }
+
+ /* Post event to ELS IO object */
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_OK, &cbdata);
+ break;
+
+ case SLI4_FC_WCQE_STATUS_LS_RJT:
+ reason_code = (ext_status >> 16) & 0xff;
+
+ /* delay and retry if reason code is Logical Busy */
+ switch (reason_code) {
+ case ELS_RJT_BUSY:
+ els->node->els_req_cnt--;
+ els_io_printf(els,
+ "LS_RJT Logical Busy, delay and retry\n");
+ timer_setup(&els->delay_timer,
+ efc_els_delay_timer_cb, 0);
+ mod_timer(&els->delay_timer,
+ jiffies + msecs_to_jiffies(5000));
+ break;
+ default:
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_RJT,
+ &cbdata);
+ break;
+ }
+ break;
+
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT:
+ efc_els_retry(els);
+ break;
+ default:
+ efc_log_err(efc, "LOCAL_REJECT with ext status:%x\n",
+ ext_status);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL,
+ &cbdata);
+ break;
+ }
+ break;
+ default: /* Other error */
+ efc_log_warn(efc, "els req failed status x%x, ext_status x%x\n",
+ status, ext_status);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ break;
+ }
+
+ return 0;
+}
+
+void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status,
+ u32 ext_status)
+{
+ struct efc_els_io_req *els =
+ container_of(io, struct efc_els_io_req, io);
+
+ WARN_ON_ONCE(!els->cb);
+
+ ((efc_hw_srrs_cb_t)els->cb) (els, len, status, ext_status);
+}
+
+static int efc_els_send_req(struct efc_node *node, struct efc_els_io_req *els,
+ enum efc_disc_io_type io_type)
+{
+ int rc = 0;
+ struct efc *efc = node->efc;
+ struct efc_node_cb cbdata;
+
+ /* update ELS request counter */
+ els->node->els_req_cnt++;
+
+ /* Prepare the IO request details */
+ els->io.io_type = io_type;
+ els->io.xmit_len = els->io.req.size;
+ els->io.rsp_len = els->io.rsp.size;
+ els->io.rpi = node->rnode.indicator;
+ els->io.vpi = node->nport->indicator;
+ els->io.s_id = node->nport->fc_id;
+ els->io.d_id = node->rnode.fc_id;
+
+ if (node->rnode.attached)
+ els->io.rpi_registered = true;
+
+ els->cb = efc_els_req_cb;
+
+ rc = efc->tt.send_els(efc, &els->io);
+ if (!rc)
+ return rc;
+
+ cbdata.status = EFC_STATUS_INVALID;
+ cbdata.ext_status = EFC_STATUS_INVALID;
+ cbdata.els_rsp = els->io.rsp;
+ efc_log_err(efc, "efc_els_send failed: %d\n", rc);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+
+ return rc;
+}
+
+static void
+efc_els_retry(struct efc_els_io_req *els)
+{
+ struct efc *efc;
+ struct efc_node_cb cbdata;
+ u32 rc;
+
+ efc = els->node->efc;
+ cbdata.status = EFC_STATUS_INVALID;
+ cbdata.ext_status = EFC_STATUS_INVALID;
+ cbdata.els_rsp = els->io.rsp;
+
+ if (els->els_retries_remaining) {
+ els->els_retries_remaining--;
+ rc = efc->tt.send_els(efc, &els->io);
+ } else {
+ rc = -EIO;
+ }
+
+ if (rc) {
+ efc_log_err(efc, "ELS retries exhausted\n");
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ }
+}
+
+static int
+efc_els_acc_cb(void *arg, u32 length, int status, u32 ext_status)
+{
+ struct efc_els_io_req *els;
+ struct efc_node *node;
+ struct efc *efc;
+ struct efc_node_cb cbdata;
+
+ els = arg;
+ node = els->node;
+ efc = node->efc;
+
+ cbdata.status = status;
+ cbdata.ext_status = ext_status;
+ cbdata.header = NULL;
+ cbdata.els_rsp = els->io.rsp;
+
+ /* Post node event */
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_OK, &cbdata);
+ break;
+
+ default: /* Other error */
+ efc_log_warn(efc, "[%s] %-8s failed status x%x, ext x%x\n",
+ node->display_name, els->display_name,
+ status, ext_status);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+efc_els_send_rsp(struct efc_els_io_req *els, u32 rsplen)
+{
+ int rc = 0;
+ struct efc_node_cb cbdata;
+ struct efc_node *node = els->node;
+ struct efc *efc = node->efc;
+
+ /* increment ELS completion counter */
+ node->els_cmpl_cnt++;
+
+ els->io.io_type = EFC_DISC_IO_ELS_RESP;
+ els->cb = efc_els_acc_cb;
+
+ /* Prepare the IO request details */
+ els->io.xmit_len = rsplen;
+ els->io.rsp_len = els->io.rsp.size;
+ els->io.rpi = node->rnode.indicator;
+ els->io.vpi = node->nport->indicator;
+ if (node->nport->fc_id != U32_MAX)
+ els->io.s_id = node->nport->fc_id;
+ else
+ els->io.s_id = els->io.iparam.els.s_id;
+ els->io.d_id = node->rnode.fc_id;
+
+ if (node->attached)
+ els->io.rpi_registered = true;
+
+ rc = efc->tt.send_els(efc, &els->io);
+ if (!rc)
+ return rc;
+
+ cbdata.status = EFC_STATUS_INVALID;
+ cbdata.ext_status = EFC_STATUS_INVALID;
+ cbdata.els_rsp = els->io.rsp;
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata);
+
+ return rc;
+}
+
+int
+efc_send_plogi(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc = node->efc;
+ struct fc_els_flogi *plogi;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*plogi));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+ els->display_name = "plogi";
+
+ /* Build PLOGI request */
+ plogi = els->io.req.virt;
+
+ memcpy(plogi, node->nport->service_params, sizeof(*plogi));
+
+ plogi->fl_cmd = ELS_PLOGI;
+ memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_flogi(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc;
+ struct fc_els_flogi *flogi;
+
+ efc = node->efc;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*flogi));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "flogi";
+
+ /* Build FLOGI request */
+ flogi = els->io.req.virt;
+
+ memcpy(flogi, node->nport->service_params, sizeof(*flogi));
+ flogi->fl_cmd = ELS_FLOGI;
+ memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_fdisc(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc;
+ struct fc_els_flogi *fdisc;
+
+ efc = node->efc;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*fdisc));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "fdisc";
+
+ /* Build FDISC request */
+ fdisc = els->io.req.virt;
+
+ memcpy(fdisc, node->nport->service_params, sizeof(*fdisc));
+ fdisc->fl_cmd = ELS_FDISC;
+ memset(fdisc->_fl_resvd, 0, sizeof(fdisc->_fl_resvd));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_prli(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*pp));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "prli";
+
+ /* Build PRLI request */
+ pp = els->io.req.virt;
+
+ memset(pp, 0, sizeof(*pp));
+
+ pp->prli.prli_cmd = ELS_PRLI;
+ pp->prli.prli_spp_len = 16;
+ pp->prli.prli_len = cpu_to_be16(sizeof(*pp));
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_type_ext = 0;
+ pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
+ pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS |
+ (node->nport->enable_ini ?
+ FCP_SPPF_INIT_FCN : 0) |
+ (node->nport->enable_tgt ?
+ FCP_SPPF_TARG_FCN : 0));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_logo(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct fc_els_logo *logo;
+ struct fc_els_flogi *sparams;
+
+ node_els_trace();
+
+ sparams = (struct fc_els_flogi *)node->nport->service_params;
+
+ els = efc_els_io_alloc(node, sizeof(*logo));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "logo";
+
+ /* Build LOGO request */
+
+ logo = els->io.req.virt;
+
+ memset(logo, 0, sizeof(*logo));
+ logo->fl_cmd = ELS_LOGO;
+ hton24(logo->fl_n_port_id, node->rnode.nport->fc_id);
+ logo->fl_n_port_wwn = sparams->fl_wwpn;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_adisc(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct fc_els_adisc *adisc;
+ struct fc_els_flogi *sparams;
+ struct efc_nport *nport = node->nport;
+
+ node_els_trace();
+
+ sparams = (struct fc_els_flogi *)node->nport->service_params;
+
+ els = efc_els_io_alloc(node, sizeof(*adisc));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "adisc";
+
+ /* Build ADISC request */
+
+ adisc = els->io.req.virt;
+
+ memset(adisc, 0, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_ADISC;
+ hton24(adisc->adisc_hard_addr, nport->fc_id);
+ adisc->adisc_wwpn = sparams->fl_wwpn;
+ adisc->adisc_wwnn = sparams->fl_wwnn;
+ hton24(adisc->adisc_port_id, node->rnode.nport->fc_id);
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_scr(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc = node->efc;
+ struct fc_els_scr *req;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*req));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "scr";
+
+ req = els->io.req.virt;
+
+ memset(req, 0, sizeof(*req));
+ req->scr_cmd = ELS_SCR;
+ req->scr_reg_func = ELS_SCRF_FULL;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_code,
+ u32 reason_code_expl, u32 vendor_unique)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_ls_rjt *rjt;
+
+ els = efc_els_io_alloc(node, sizeof(*rjt));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ node_els_trace();
+
+ els->display_name = "ls_rjt";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ rjt = els->io.req.virt;
+ memset(rjt, 0, sizeof(*rjt));
+
+ rjt->er_cmd = ELS_LS_RJT;
+ rjt->er_reason = reason_code;
+ rjt->er_explan = reason_code_expl;
+
+ return efc_els_send_rsp(els, sizeof(*rjt));
+}
+
+int
+efc_send_plogi_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_flogi *plogi;
+ struct fc_els_flogi *req = (struct fc_els_flogi *)node->service_params;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*plogi));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "plogi_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ plogi = els->io.req.virt;
+
+ /* copy our port's service parameters to payload */
+ memcpy(plogi, node->nport->service_params, sizeof(*plogi));
+ plogi->fl_cmd = ELS_LS_ACC;
+ memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd));
+
+ /* Set Application header support bit if requested */
+ if (req->fl_csp.sp_features & cpu_to_be16(FC_SP_FT_BCAST))
+ plogi->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_BCAST);
+
+ return efc_els_send_rsp(els, sizeof(*plogi));
+}
+
+int
+efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_flogi *flogi;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*flogi));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "flogi_p2p_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+ els->io.iparam.els.s_id = s_id;
+
+ flogi = els->io.req.virt;
+
+ /* copy our port's service parameters to payload */
+ memcpy(flogi, node->nport->service_params, sizeof(*flogi));
+ flogi->fl_cmd = ELS_LS_ACC;
+ memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd));
+
+ memset(flogi->fl_cssp, 0, sizeof(flogi->fl_cssp));
+
+ return efc_els_send_rsp(els, sizeof(*flogi));
+}
+
+int
+efc_send_prli_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*pp));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "prli_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ pp = els->io.req.virt;
+ memset(pp, 0, sizeof(*pp));
+
+ pp->prli.prli_cmd = ELS_LS_ACC;
+ pp->prli.prli_spp_len = 0x10;
+ pp->prli.prli_len = cpu_to_be16(sizeof(*pp));
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_type_ext = 0;
+ pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR | FC_SPP_RESP_ACK;
+
+ pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS |
+ (node->nport->enable_ini ?
+ FCP_SPPF_INIT_FCN : 0) |
+ (node->nport->enable_tgt ?
+ FCP_SPPF_TARG_FCN : 0));
+
+ return efc_els_send_rsp(els, sizeof(*pp));
+}
+
+int
+efc_send_prlo_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct {
+ struct fc_els_prlo prlo;
+ struct fc_els_spp spp;
+ } *pp;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*pp));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "prlo_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ pp = els->io.req.virt;
+ memset(pp, 0, sizeof(*pp));
+ pp->prlo.prlo_cmd = ELS_LS_ACC;
+ pp->prlo.prlo_obs = 0x10;
+ pp->prlo.prlo_len = cpu_to_be16(sizeof(*pp));
+
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_type_ext = 0;
+ pp->spp.spp_flags = FC_SPP_RESP_ACK;
+
+ return efc_els_send_rsp(els, sizeof(*pp));
+}
+
+int
+efc_send_ls_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_ls_acc *acc;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*acc));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "ls_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ acc = els->io.req.virt;
+ memset(acc, 0, sizeof(*acc));
+
+ acc->la_cmd = ELS_LS_ACC;
+
+ return efc_els_send_rsp(els, sizeof(*acc));
+}
+
+int
+efc_send_logo_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc_els_io_req *els = NULL;
+ struct efc *efc = node->efc;
+ struct fc_els_ls_acc *logo;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*logo));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "logo_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ logo = els->io.req.virt;
+ memset(logo, 0, sizeof(*logo));
+
+ logo->la_cmd = ELS_LS_ACC;
+
+ return efc_els_send_rsp(els, sizeof(*logo));
+}
+
+int
+efc_send_adisc_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_adisc *adisc;
+ struct fc_els_flogi *sparams;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*adisc));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "adisc_acc";
+
+ /* Go ahead and send the ELS_ACC */
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ sparams = (struct fc_els_flogi *)node->nport->service_params;
+ adisc = els->io.req.virt;
+ memset(adisc, 0, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_LS_ACC;
+ adisc->adisc_wwpn = sparams->fl_wwpn;
+ adisc->adisc_wwnn = sparams->fl_wwnn;
+ hton24(adisc->adisc_port_id, node->rnode.nport->fc_id);
+
+ return efc_els_send_rsp(els, sizeof(*adisc));
+}
+
+static inline void
+fcct_build_req_header(struct fc_ct_hdr *hdr, u16 cmd, u16 max_size)
+{
+ hdr->ct_rev = FC_CT_REV;
+ hdr->ct_fs_type = FC_FST_DIR;
+ hdr->ct_fs_subtype = FC_NS_SUBTYPE;
+ hdr->ct_options = 0;
+ hdr->ct_cmd = cpu_to_be16(cmd);
+ /* words */
+ hdr->ct_mr_size = cpu_to_be16(max_size / (sizeof(u32)));
+ hdr->ct_reason = 0;
+ hdr->ct_explan = 0;
+ hdr->ct_vendor = 0;
+}
+
+int
+efc_ns_send_rftid(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_ns_rft_id rftid;
+ } *ct;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*ct));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
+
+ els->display_name = "rftid";
+
+ ct = els->io.req.virt;
+ memset(ct, 0, sizeof(*ct));
+ fcct_build_req_header(&ct->hdr, FC_NS_RFT_ID,
+ sizeof(struct fc_ns_rft_id));
+
+ hton24(ct->rftid.fr_fid.fp_fid, node->rnode.nport->fc_id);
+ ct->rftid.fr_fts.ff_type_map[FC_TYPE_FCP / FC_NS_BPW] =
+ cpu_to_be32(1 << (FC_TYPE_FCP % FC_NS_BPW));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
+}
+
+int
+efc_ns_send_rffid(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_ns_rff_id rffid;
+ } *ct;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*ct));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
+
+ els->display_name = "rffid";
+ ct = els->io.req.virt;
+
+ memset(ct, 0, sizeof(*ct));
+ fcct_build_req_header(&ct->hdr, FC_NS_RFF_ID,
+ sizeof(struct fc_ns_rff_id));
+
+ hton24(ct->rffid.fr_fid.fp_fid, node->rnode.nport->fc_id);
+ if (node->nport->enable_ini)
+ ct->rffid.fr_feat |= FCP_FEAT_INIT;
+ if (node->nport->enable_tgt)
+ ct->rffid.fr_feat |= FCP_FEAT_TARG;
+ ct->rffid.fr_type = FC_TYPE_FCP;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
+}
+
+int
+efc_ns_send_gidpt(struct efc_node *node)
+{
+ struct efc_els_io_req *els = NULL;
+ struct efc *efc = node->efc;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_ns_gid_pt gidpt;
+ } *ct;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc_size(node, sizeof(*ct), EFC_ELS_GID_PT_RSP_LEN);
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
+
+ els->display_name = "gidpt";
+
+ ct = els->io.req.virt;
+
+ memset(ct, 0, sizeof(*ct));
+ fcct_build_req_header(&ct->hdr, FC_NS_GID_PT,
+ sizeof(struct fc_ns_gid_pt));
+
+ ct->gidpt.fn_pt_type = FC_TYPE_FCP;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
+}
+
+void
+efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg)
+{
+ /* don't want further events that could come; e.g. abort requests
+ * from the node state machine; thus, disable state machine
+ */
+ els->els_req_free = true;
+ efc_node_post_els_resp(els->node, evt, arg);
+
+ efc_els_io_free(els);
+}
+
+static int
+efc_ct_acc_cb(void *arg, u32 length, int status, u32 ext_status)
+{
+ struct efc_els_io_req *els = arg;
+
+ efc_els_io_free(els);
+
+ return 0;
+}
+
+int
+efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id,
+ struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code,
+ u32 reason_code, u32 reason_code_explanation)
+{
+ struct efc_els_io_req *els = NULL;
+ struct fc_ct_hdr *rsp = NULL;
+
+ els = efc_els_io_alloc(node, 256);
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ rsp = els->io.rsp.virt;
+
+ *rsp = *ct_hdr;
+
+ fcct_build_req_header(rsp, cmd_rsp_code, 0);
+ rsp->ct_reason = reason_code;
+ rsp->ct_explan = reason_code_explanation;
+
+ els->display_name = "ct_rsp";
+ els->cb = efc_ct_acc_cb;
+
+ /* Prepare the IO request details */
+ els->io.io_type = EFC_DISC_IO_CT_RESP;
+ els->io.xmit_len = sizeof(*rsp);
+
+ els->io.rpi = node->rnode.indicator;
+ els->io.d_id = node->rnode.fc_id;
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+
+ els->io.iparam.ct.ox_id = ox_id;
+ els->io.iparam.ct.r_ctl = 3;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = 5;
+
+ if (efc->tt.send_els(efc, &els->io)) {
+ efc_els_io_free(els);
+ return -EIO;
+ }
+ return 0;
+}
+
+int
+efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr)
+{
+ struct sli_bls_params bls;
+ struct fc_ba_acc *acc;
+ struct efc *efc = node->efc;
+
+ memset(&bls, 0, sizeof(bls));
+ bls.ox_id = be16_to_cpu(hdr->fh_ox_id);
+ bls.rx_id = be16_to_cpu(hdr->fh_rx_id);
+ bls.s_id = ntoh24(hdr->fh_d_id);
+ bls.d_id = node->rnode.fc_id;
+ bls.rpi = node->rnode.indicator;
+ bls.vpi = node->nport->indicator;
+
+ acc = (void *)bls.payload;
+ acc->ba_ox_id = cpu_to_be16(bls.ox_id);
+ acc->ba_rx_id = cpu_to_be16(bls.rx_id);
+ acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
+
+ return efc->tt.send_bls(efc, FC_RCTL_BA_ACC, &bls);
+}
diff --git a/drivers/scsi/elx/libefc/efc_els.h b/drivers/scsi/elx/libefc/efc_els.h
new file mode 100644
index 000000000000..3c4f820f602e
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_els.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_ELS_H__
+#define __EFC_ELS_H__
+
+#define EFC_STATUS_INVALID INT_MAX
+#define EFC_ELS_IO_POOL_SZ 1024
+
+struct efc_els_io_req {
+ struct list_head list_entry;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ struct efc_node *node;
+ void *cb;
+ u32 els_retries_remaining;
+ bool els_req_free;
+ struct timer_list delay_timer;
+
+ const char *display_name;
+
+ struct efc_disc_io io;
+};
+
+typedef int(*efc_hw_srrs_cb_t)(void *arg, u32 length, int status,
+ u32 ext_status);
+
+void _efc_els_io_free(struct kref *arg);
+struct efc_els_io_req *
+efc_els_io_alloc(struct efc_node *node, u32 reqlen);
+struct efc_els_io_req *
+efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen);
+void efc_els_io_free(struct efc_els_io_req *els);
+
+/* ELS command send */
+typedef void (*els_cb_t)(struct efc_node *node,
+ struct efc_node_cb *cbdata, void *arg);
+int
+efc_send_plogi(struct efc_node *node);
+int
+efc_send_flogi(struct efc_node *node);
+int
+efc_send_fdisc(struct efc_node *node);
+int
+efc_send_prli(struct efc_node *node);
+int
+efc_send_prlo(struct efc_node *node);
+int
+efc_send_logo(struct efc_node *node);
+int
+efc_send_adisc(struct efc_node *node);
+int
+efc_send_pdisc(struct efc_node *node);
+int
+efc_send_scr(struct efc_node *node);
+int
+efc_ns_send_rftid(struct efc_node *node);
+int
+efc_ns_send_rffid(struct efc_node *node);
+int
+efc_ns_send_gidpt(struct efc_node *node);
+void
+efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg);
+
+/* ELS acc send */
+int
+efc_send_ls_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_cod,
+ u32 reason_code_expl, u32 vendor_unique);
+int
+efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id);
+int
+efc_send_flogi_acc(struct efc_node *node, u32 ox_id, u32 is_fport);
+int
+efc_send_plogi_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_prli_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_logo_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_prlo_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_adisc_acc(struct efc_node *node, u32 ox_id);
+
+int
+efc_bls_send_acc_hdr(struct efc *efc, struct efc_node *node,
+ struct fc_frame_header *hdr);
+int
+efc_bls_send_rjt_hdr(struct efc_els_io_req *io, struct fc_frame_header *hdr);
+
+int
+efc_els_io_list_empty(struct efc_node *node, struct list_head *list);
+
+/* CT */
+int
+efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id,
+ struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code, u32 reason_code,
+ u32 reason_code_explanation);
+
+int
+efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr);
+
+#endif /* __EFC_ELS_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c
new file mode 100644
index 000000000000..d397220d9e54
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_fabric.c
@@ -0,0 +1,1564 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * This file implements remote node state machines for:
+ * - Fabric logins.
+ * - Fabric controller events.
+ * - Name/directory services interaction.
+ * - Point-to-point logins.
+ */
+
+/*
+ * fabric_sm Node State Machine: Fabric States
+ * ns_sm Node State Machine: Name/Directory Services States
+ * p2p_sm Node State Machine: Point-to-Point Node States
+ */
+
+#include "efc.h"
+
+static void
+efc_fabric_initiate_shutdown(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+
+ node->els_io_enabled = false;
+
+ if (node->attached) {
+ int rc;
+
+ /* issue hw node free; don't care if succeeds right away
+ * or sometime later, will check node->attached later in
+ * shutdown process
+ */
+ rc = efc_cmd_node_detach(efc, &node->rnode);
+ if (rc < 0) {
+ node_printf(node, "Failed freeing HW node, rc=%d\n",
+ rc);
+ }
+ }
+ /*
+ * node has either been detached or is in the process of being detached,
+ * call common node's initiate cleanup function
+ */
+ efc_node_initiate_cleanup(node);
+}
+
+static void
+__efc_fabric_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = NULL;
+
+ node = ctx->app;
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ break;
+ case EFC_EVT_SHUTDOWN:
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ default:
+ /* call default event handler common to all nodes */
+ __efc_node_common(funcname, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_REENTER:
+ efc_log_debug(efc, ">>> reenter !!\n");
+ fallthrough;
+
+ case EFC_EVT_ENTER:
+ /* send FLOGI */
+ efc_send_flogi(node);
+ efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_fabric_set_topology(struct efc_node *node,
+ enum efc_nport_topology topology)
+{
+ node->nport->topology = topology;
+}
+
+void
+efc_fabric_notify_topology(struct efc_node *node)
+{
+ struct efc_node *tmp_node;
+ enum efc_nport_topology topology = node->nport->topology;
+ unsigned long index;
+
+ /*
+ * now loop through the nodes in the nport
+ * and send topology notification
+ */
+ xa_for_each(&node->nport->lookup, index, tmp_node) {
+ if (tmp_node != node) {
+ efc_node_post_event(tmp_node,
+ EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
+ (void *)topology);
+ }
+ }
+}
+
+static bool efc_rnode_is_nport(struct fc_els_flogi *rsp)
+{
+ return !(ntohs(rsp->fl_csp.sp_features) & FC_SP_FT_FPORT);
+}
+
+void
+__efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+
+ memcpy(node->nport->domain->flogi_service_params,
+ cbdata->els_rsp.virt,
+ sizeof(struct fc_els_flogi));
+
+ /* Check to see if the fabric is an F_PORT or and N_PORT */
+ if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) {
+ /* sm: if not nport / efc_domain_attach */
+ /* ext_status has the fc_id, attach domain */
+ efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC);
+ efc_fabric_notify_topology(node);
+ WARN_ON(node->nport->domain->attached);
+ efc_domain_attach(node->nport->domain,
+ cbdata->ext_status);
+ efc_node_transition(node,
+ __efc_fabric_wait_domain_attach,
+ NULL);
+ break;
+ }
+
+ /* sm: if nport and p2p_winner / efc_domain_attach */
+ efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
+ if (efc_p2p_setup(node->nport)) {
+ node_printf(node,
+ "p2p setup failed, shutting down node\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+ }
+
+ if (node->nport->p2p_winner) {
+ efc_node_transition(node,
+ __efc_p2p_wait_domain_attach,
+ NULL);
+ if (node->nport->domain->attached &&
+ !node->nport->domain->domain_notify_pend) {
+ /*
+ * already attached,
+ * just send ATTACH_OK
+ */
+ node_printf(node,
+ "p2p winner, domain already attached\n");
+ efc_node_post_event(node,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ NULL);
+ }
+ } else {
+ /*
+ * peer is p2p winner;
+ * PLOGI will be received on the
+ * remote SID=1 node;
+ * this node has served its purpose
+ */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ }
+
+ break;
+ }
+
+ case EFC_EVT_ELS_REQ_ABORTED:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ struct efc_nport *nport = node->nport;
+ /*
+ * with these errors, we have no recovery,
+ * so shutdown the nport, leave the link
+ * up and the domain ready
+ */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ node_printf(node,
+ "FLOGI failed evt=%s, shutting down nport [%s]\n",
+ efc_sm_event_name(evt), nport->display_name);
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_vport_fabric_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* sm: / send FDISC */
+ efc_send_fdisc(node);
+ efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ /* fc_id is in ext_status */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / efc_nport_attach */
+ efc_nport_attach(node->nport, cbdata->ext_status);
+ efc_node_transition(node, __efc_fabric_wait_domain_attach,
+ NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_log_err(node->efc, "FDISC failed, shutting down nport\n");
+ /* sm: / shutdown nport */
+ efc_sm_post_event(&node->nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static int
+efc_start_ns_node(struct efc_nport *nport)
+{
+ struct efc_node *ns;
+
+ /* Instantiate a name services node */
+ ns = efc_node_find(nport, FC_FID_DIR_SERV);
+ if (!ns) {
+ ns = efc_node_alloc(nport, FC_FID_DIR_SERV, false, false);
+ if (!ns)
+ return -EIO;
+ }
+ /*
+ * for found ns, should we be transitioning from here?
+ * breaks transition only
+ * 1. from within state machine or
+ * 2. if after alloc
+ */
+ if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER)
+ efc_node_pause(ns, __efc_ns_init);
+ else
+ efc_node_transition(ns, __efc_ns_init, NULL);
+ return 0;
+}
+
+static int
+efc_start_fabctl_node(struct efc_nport *nport)
+{
+ struct efc_node *fabctl;
+
+ fabctl = efc_node_find(nport, FC_FID_FCTRL);
+ if (!fabctl) {
+ fabctl = efc_node_alloc(nport, FC_FID_FCTRL,
+ false, false);
+ if (!fabctl)
+ return -EIO;
+ }
+ /*
+ * for found ns, should we be transitioning from here?
+ * breaks transition only
+ * 1. from within state machine or
+ * 2. if after alloc
+ */
+ efc_node_transition(fabctl, __efc_fabctl_init, NULL);
+ return 0;
+}
+
+void
+__efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ case EFC_EVT_NPORT_ATTACH_OK: {
+ int rc;
+
+ rc = efc_start_ns_node(node->nport);
+ if (rc)
+ return;
+
+ /* sm: if enable_ini / start fabctl node */
+ /* Instantiate the fabric controller (sends SCR) */
+ if (node->nport->enable_rscn) {
+ rc = efc_start_fabctl_node(node->nport);
+ if (rc)
+ return;
+ }
+ efc_node_transition(node, __efc_fabric_idle, NULL);
+ break;
+ }
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ break;
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* sm: / send PLOGI */
+ efc_send_plogi(node);
+ efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL);
+ break;
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ int rc;
+
+ /* Save service parameters */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_ns_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+ break;
+ }
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ /* sm: / send RFTID */
+ efc_ns_send_rftid(node);
+ efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "Node attach failed\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "Shutdown event received\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node,
+ __efc_fabric_wait_attach_evt_shutdown,
+ NULL);
+ break;
+
+ /*
+ * if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node)
+ */
+ case EFC_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ /* wait for any of these attach events and then shutdown */
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ node->attached = false;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ /* ignore shutdown event as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "Shutdown event received\n");
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFT_ID,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / send RFFID */
+ efc_ns_send_rffid(node);
+ efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL);
+ break;
+
+ /*
+ * if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node)
+ */
+ case EFC_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Waits for an RFFID response event;
+ * if rscn enabled, a GIDPT name services request is issued.
+ */
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ if (node->nport->enable_rscn) {
+ /* sm: if enable_rscn / send GIDPT */
+ efc_ns_send_gidpt(node);
+
+ efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
+ NULL);
+ } else {
+ /* if 'T' only, we're done, go to idle */
+ efc_node_transition(node, __efc_ns_idle, NULL);
+ }
+ break;
+ }
+ /*
+ * if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node)
+ */
+ case EFC_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static int
+efc_process_gidpt_payload(struct efc_node *node,
+ void *data, u32 gidpt_len)
+{
+ u32 i, j;
+ struct efc_node *newnode;
+ struct efc_nport *nport = node->nport;
+ struct efc *efc = node->efc;
+ u32 port_id = 0, port_count, plist_count;
+ struct efc_node *n;
+ struct efc_node **active_nodes;
+ int residual;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_gid_pn_resp pn_rsp;
+ } *rsp;
+ struct fc_gid_pn_resp *gidpt;
+ unsigned long index;
+
+ rsp = data;
+ gidpt = &rsp->pn_rsp;
+ residual = be16_to_cpu(rsp->hdr.ct_mr_size);
+
+ if (residual != 0)
+ efc_log_debug(node->efc, "residual is %u words\n", residual);
+
+ if (be16_to_cpu(rsp->hdr.ct_cmd) == FC_FS_RJT) {
+ node_printf(node,
+ "GIDPT request failed: rsn x%x rsn_expl x%x\n",
+ rsp->hdr.ct_reason, rsp->hdr.ct_explan);
+ return -EIO;
+ }
+
+ plist_count = (gidpt_len - sizeof(struct fc_ct_hdr)) / sizeof(*gidpt);
+
+ /* Count the number of nodes */
+ port_count = 0;
+ xa_for_each(&nport->lookup, index, n) {
+ port_count++;
+ }
+
+ /* Allocate a buffer for all nodes */
+ active_nodes = kzalloc(port_count * sizeof(*active_nodes), GFP_ATOMIC);
+ if (!active_nodes) {
+ node_printf(node, "efc_malloc failed\n");
+ return -EIO;
+ }
+
+ /* Fill buffer with fc_id of active nodes */
+ i = 0;
+ xa_for_each(&nport->lookup, index, n) {
+ port_id = n->rnode.fc_id;
+ switch (port_id) {
+ case FC_FID_FLOGI:
+ case FC_FID_FCTRL:
+ case FC_FID_DIR_SERV:
+ break;
+ default:
+ if (port_id != FC_FID_DOM_MGR)
+ active_nodes[i++] = n;
+ break;
+ }
+ }
+
+ /* update the active nodes buffer */
+ for (i = 0; i < plist_count; i++) {
+ hton24(gidpt[i].fp_fid, port_id);
+
+ for (j = 0; j < port_count; j++) {
+ if (active_nodes[j] &&
+ port_id == active_nodes[j]->rnode.fc_id) {
+ active_nodes[j] = NULL;
+ }
+ }
+
+ if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
+ break;
+ }
+
+ /* Those remaining in the active_nodes[] are now gone ! */
+ for (i = 0; i < port_count; i++) {
+ /*
+ * if we're an initiator and the remote node
+ * is a target, then post the node missing event.
+ * if we're target and we have enabled
+ * target RSCN, then post the node missing event.
+ */
+ if (!active_nodes[i])
+ continue;
+
+ if ((node->nport->enable_ini && active_nodes[i]->targ) ||
+ (node->nport->enable_tgt && enable_target_rscn(efc))) {
+ efc_node_post_event(active_nodes[i],
+ EFC_EVT_NODE_MISSING, NULL);
+ } else {
+ node_printf(node,
+ "GID_PT: skipping non-tgt port_id x%06x\n",
+ active_nodes[i]->rnode.fc_id);
+ }
+ }
+ kfree(active_nodes);
+
+ for (i = 0; i < plist_count; i++) {
+ hton24(gidpt[i].fp_fid, port_id);
+
+ /* Don't create node for ourselves */
+ if (port_id == node->rnode.nport->fc_id) {
+ if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
+ break;
+ continue;
+ }
+
+ newnode = efc_node_find(nport, port_id);
+ if (!newnode) {
+ if (!node->nport->enable_ini)
+ continue;
+
+ newnode = efc_node_alloc(nport, port_id, false, false);
+ if (!newnode) {
+ efc_log_err(efc, "efc_node_alloc() failed\n");
+ return -EIO;
+ }
+ /*
+ * send PLOGI automatically
+ * if initiator
+ */
+ efc_node_init_device(newnode, true);
+ }
+
+ if (node->nport->enable_ini && newnode->targ) {
+ efc_node_post_event(newnode, EFC_EVT_NODE_REFOUND,
+ NULL);
+ }
+
+ if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
+ break;
+ }
+ return 0;
+}
+
+void
+__efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+ /*
+ * Wait for a GIDPT response from the name server. Process the FC_IDs
+ * that are reported by creating new remote ports, as needed.
+ */
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_GID_PT,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / process GIDPT payload */
+ efc_process_gidpt_payload(node, cbdata->els_rsp.virt,
+ cbdata->els_rsp.len);
+ efc_node_transition(node, __efc_ns_idle, NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ /* not much we can do; will retry with the next RSCN */
+ node_printf(node, "GID_PT failed to complete\n");
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_transition(node, __efc_ns_idle, NULL);
+ break;
+ }
+
+ /* if receive RSCN here, queue up another discovery processing */
+ case EFC_EVT_RSCN_RCVD: {
+ node_printf(node, "RSCN received during GID_PT processing\n");
+ node->rscn_pending = true;
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Wait for RSCN received events (posted from the fabric controller)
+ * and restart the GIDPT name services query and processing.
+ */
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ if (!node->rscn_pending)
+ break;
+
+ node_printf(node, "RSCN pending, restart discovery\n");
+ node->rscn_pending = false;
+ fallthrough;
+
+ case EFC_EVT_RSCN_RCVD: {
+ /* sm: / send GIDPT */
+ /*
+ * If target RSCN processing is enabled,
+ * and this is target only (not initiator),
+ * and tgt_rscn_delay is non-zero,
+ * then we delay issuing the GID_PT
+ */
+ if (efc->tgt_rscn_delay_msec != 0 &&
+ !node->nport->enable_ini && node->nport->enable_tgt &&
+ enable_target_rscn(efc)) {
+ efc_node_transition(node, __efc_ns_gidpt_delay, NULL);
+ } else {
+ efc_ns_send_gidpt(node);
+ efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
+ NULL);
+ }
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+gidpt_delay_timer_cb(struct timer_list *t)
+{
+ struct efc_node *node = from_timer(node, t, gidpt_delay_timer);
+
+ del_timer(&node->gidpt_delay_timer);
+
+ efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL);
+}
+
+void
+__efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ u64 delay_msec, tmp;
+
+ /*
+ * Compute the delay time.
+ * Set to tgt_rscn_delay, if the time since last GIDPT
+ * is less than tgt_rscn_period, then use tgt_rscn_period.
+ */
+ delay_msec = efc->tgt_rscn_delay_msec;
+ tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec;
+ if (tmp < efc->tgt_rscn_period_msec)
+ delay_msec = efc->tgt_rscn_period_msec;
+
+ timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb,
+ 0);
+ mod_timer(&node->gidpt_delay_timer,
+ jiffies + msecs_to_jiffies(delay_msec));
+
+ break;
+ }
+
+ case EFC_EVT_GIDPT_DELAY_EXPIRED:
+ node->time_last_gidpt_msec = jiffies_to_msecs(jiffies);
+
+ efc_ns_send_gidpt(node);
+ efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL);
+ break;
+
+ case EFC_EVT_RSCN_RCVD: {
+ efc_log_debug(efc,
+ "RSCN received while in GIDPT delay - no action\n");
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabctl_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* no need to login to fabric controller, just send SCR */
+ efc_send_scr(node);
+ efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Fabric controller node state machine:
+ * Wait for an SCR response from the fabric controller.
+ */
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_SCR,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_transition(node, __efc_fabctl_ready, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+efc_process_rscn(struct efc_node *node, struct efc_node_cb *cbdata)
+{
+ struct efc *efc = node->efc;
+ struct efc_nport *nport = node->nport;
+ struct efc_node *ns;
+
+ /* Forward this event to the name-services node */
+ ns = efc_node_find(nport, FC_FID_DIR_SERV);
+ if (ns)
+ efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata);
+ else
+ efc_log_warn(efc, "can't find name server node\n");
+}
+
+void
+__efc_fabctl_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Fabric controller node state machine: Ready.
+ * In this state, the fabric controller sends a RSCN, which is received
+ * by this node and is forwarded to the name services node object; and
+ * the RSCN LS_ACC is sent.
+ */
+ switch (evt) {
+ case EFC_EVT_RSCN_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ /*
+ * sm: / process RSCN (forward to name services node),
+ * send LS_ACC
+ */
+ efc_process_rscn(node, cbdata);
+ efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl,
+ NULL);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ efc_node_transition(node, __efc_fabctl_ready, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static uint64_t
+efc_get_wwpn(struct fc_els_flogi *sp)
+{
+ return be64_to_cpu(sp->fl_wwnn);
+}
+
+static int
+efc_rnode_is_winner(struct efc_nport *nport)
+{
+ struct fc_els_flogi *remote_sp;
+ u64 remote_wwpn;
+ u64 local_wwpn = nport->wwpn;
+ u64 wwn_bump = 0;
+
+ remote_sp = (struct fc_els_flogi *)nport->domain->flogi_service_params;
+ remote_wwpn = efc_get_wwpn(remote_sp);
+
+ local_wwpn ^= wwn_bump;
+
+ efc_log_debug(nport->efc, "r: %llx\n",
+ be64_to_cpu(remote_sp->fl_wwpn));
+ efc_log_debug(nport->efc, "l: %llx\n", local_wwpn);
+
+ if (remote_wwpn == local_wwpn) {
+ efc_log_warn(nport->efc,
+ "WWPN of remote node [%08x %08x] matches local WWPN\n",
+ (u32)(local_wwpn >> 32ll),
+ (u32)local_wwpn);
+ return -1;
+ }
+
+ return (remote_wwpn > local_wwpn);
+}
+
+void
+__efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ struct efc_nport *nport = node->nport;
+ struct efc_node *rnode;
+
+ /*
+ * this transient node (SID=0 (recv'd FLOGI)
+ * or DID=fabric (sent FLOGI))
+ * is the p2p winner, will use a separate node
+ * to send PLOGI to peer
+ */
+ WARN_ON(!node->nport->p2p_winner);
+
+ rnode = efc_node_find(nport, node->nport->p2p_remote_port_id);
+ if (rnode) {
+ /*
+ * the "other" transient p2p node has
+ * already kicked off the
+ * new node from which PLOGI is sent
+ */
+ node_printf(node,
+ "Node with fc_id x%x already exists\n",
+ rnode->rnode.fc_id);
+ } else {
+ /*
+ * create new node (SID=1, DID=2)
+ * from which to send PLOGI
+ */
+ rnode = efc_node_alloc(nport,
+ nport->p2p_remote_port_id,
+ false, false);
+ if (!rnode) {
+ efc_log_err(efc, "node alloc failed\n");
+ return;
+ }
+
+ efc_fabric_notify_topology(node);
+ /* sm: / allocate p2p remote node */
+ efc_node_transition(rnode, __efc_p2p_rnode_init,
+ NULL);
+ }
+
+ /*
+ * the transient node (SID=0 or DID=fabric)
+ * has served its purpose
+ */
+ if (node->rnode.fc_id == 0) {
+ /*
+ * if this is the SID=0 node,
+ * move to the init state in case peer
+ * has restarted FLOGI discovery and FLOGI is pending
+ */
+ /* don't send PLOGI on efc_d_init entry */
+ efc_node_init_device(node, false);
+ } else {
+ /*
+ * if this is the DID=fabric node
+ * (we initiated FLOGI), shut it down
+ */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ }
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* sm: / send PLOGI */
+ efc_send_plogi(node);
+ efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL);
+ break;
+
+ case EFC_EVT_ABTS_RCVD:
+ /* sm: send BA_ACC */
+ efc_send_bls_acc(node, cbdata->header->dma.virt);
+
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+
+ /* sm: if p2p_winner / domain_attach */
+ if (node->nport->p2p_winner) {
+ efc_node_transition(node,
+ __efc_p2p_wait_domain_attach,
+ NULL);
+ if (!node->nport->domain->attached) {
+ node_printf(node, "Domain not attached\n");
+ efc_domain_attach(node->nport->domain,
+ node->nport->p2p_port_id);
+ } else {
+ node_printf(node, "Domain already attached\n");
+ efc_node_post_event(node,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ NULL);
+ }
+ } else {
+ /* this node has served its purpose;
+ * we'll expect a PLOGI on a separate
+ * node (remote SID=0x1); return this node
+ * to init state in case peer
+ * restarts discovery -- it may already
+ * have (pending frames may exist).
+ */
+ /* don't send PLOGI on efc_d_init entry */
+ efc_node_init_device(node, false);
+ }
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ /*
+ * LS_ACC failed, possibly due to link down;
+ * shutdown node and wait
+ * for FLOGI discovery to restart
+ */
+ node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_ABTS_RCVD: {
+ /* sm: / send BA_ACC */
+ efc_send_bls_acc(node, cbdata->header->dma.virt);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ int rc;
+
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+ break;
+ }
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ node_printf(node, "PLOGI failed, shutting down\n");
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+ }
+
+ case EFC_EVT_PLOGI_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ /* if we're in external loopback mode, just send LS_ACC */
+ if (node->efc->external_loopback) {
+ efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ } else {
+ /*
+ * if this isn't external loopback,
+ * pass to default handler
+ */
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+ break;
+ }
+ case EFC_EVT_PRLI_RCVD:
+ /* I, or I+T */
+ /* sent PLOGI and before completion was seen, received the
+ * PRLI from the remote node (WCQEs and RCQEs come in on
+ * different queues and order of processing cannot be assumed)
+ * Save OXID so PRLI can be sent after the attach and continue
+ * to wait for PLOGI response
+ */
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PRLI);
+ efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli,
+ NULL);
+ break;
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /*
+ * Since we've received a PRLI, we have a port login and will
+ * just need to wait for the PLOGI response to do the node
+ * attach and then we can send the LS_ACC for the PRLI. If,
+ * during this time, we receive FCP_CMNDs (which is possible
+ * since we've already sent a PRLI and our peer may have
+ * accepted).
+ * At this time, we are not waiting on any other unsolicited
+ * frames to continue with the login process. Thus, it will not
+ * hurt to hold frames here.
+ */
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: { /* PLOGI response received */
+ int rc;
+
+ /* Completion from PLOGI sent */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+ break;
+ }
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* PLOGI failed, shutdown the node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ switch (node->send_ls_acc) {
+ case EFC_NODE_SEND_LS_ACC_PRLI: {
+ efc_d_send_prli_rsp(node->ls_acc_io,
+ node->ls_acc_oxid);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
+ case EFC_NODE_SEND_LS_ACC_NONE:
+ default:
+ /* Normal case for I */
+ /* sm: send_plogi_acc is not set / send PLOGI acc */
+ efc_node_transition(node, __efc_d_port_logged_in,
+ NULL);
+ break;
+ }
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "Node attach failed\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node,
+ __efc_fabric_wait_attach_evt_shutdown,
+ NULL);
+ break;
+ case EFC_EVT_PRLI_RCVD:
+ node_printf(node, "%s: PRLI received before node is attached\n",
+ efc_sm_event_name(evt));
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PRLI);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+int
+efc_p2p_setup(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ int rnode_winner;
+
+ rnode_winner = efc_rnode_is_winner(nport);
+
+ /* set nport flags to indicate p2p "winner" */
+ if (rnode_winner == 1) {
+ nport->p2p_remote_port_id = 0;
+ nport->p2p_port_id = 0;
+ nport->p2p_winner = false;
+ } else if (rnode_winner == 0) {
+ nport->p2p_remote_port_id = 2;
+ nport->p2p_port_id = 1;
+ nport->p2p_winner = true;
+ } else {
+ /* no winner; only okay if external loopback enabled */
+ if (nport->efc->external_loopback) {
+ /*
+ * External loopback mode enabled;
+ * local nport and remote node
+ * will be registered with an NPortID = 1;
+ */
+ efc_log_debug(efc,
+ "External loopback mode enabled\n");
+ nport->p2p_remote_port_id = 1;
+ nport->p2p_port_id = 1;
+ nport->p2p_winner = true;
+ } else {
+ efc_log_warn(efc,
+ "failed to determine p2p winner\n");
+ return rnode_winner;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/scsi/elx/libefc/efc_fabric.h b/drivers/scsi/elx/libefc/efc_fabric.h
new file mode 100644
index 000000000000..b0947ae6fdca
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_fabric.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Declarations for the interface exported by efc_fabric
+ */
+
+#ifndef __EFCT_FABRIC_H__
+#define __EFCT_FABRIC_H__
+#include "scsi/fc/fc_els.h"
+#include "scsi/fc/fc_fs.h"
+#include "scsi/fc/fc_ns.h"
+
+void
+__efc_fabric_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_domain_attach_wait(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+__efc_vport_fabric_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_wait_nport_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+__efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_logo_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event, void *arg);
+void
+__efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_idle(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+__efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_domain_attach_wait(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+int
+efc_p2p_setup(struct efc_nport *nport);
+void
+efc_fabric_set_topology(struct efc_node *node,
+ enum efc_nport_topology topology);
+void efc_fabric_notify_topology(struct efc_node *node);
+
+#endif /* __EFCT_FABRIC_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_node.c b/drivers/scsi/elx/libefc/efc_node.c
new file mode 100644
index 000000000000..a1b4ce6a27b4
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_node.c
@@ -0,0 +1,1102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efc.h"
+
+int
+efc_remote_node_cb(void *arg, int event, void *data)
+{
+ struct efc *efc = arg;
+ struct efc_remote_node *rnode = data;
+ struct efc_node *node = rnode->node;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, event, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+
+ return 0;
+}
+
+struct efc_node *
+efc_node_find(struct efc_nport *nport, u32 port_id)
+{
+ /* Find an FC node structure given the FC port ID */
+ return xa_load(&nport->lookup, port_id);
+}
+
+static void
+_efc_node_free(struct kref *arg)
+{
+ struct efc_node *node = container_of(arg, struct efc_node, ref);
+ struct efc *efc = node->efc;
+ struct efc_dma *dma;
+
+ dma = &node->sparm_dma_buf;
+ dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
+ memset(dma, 0, sizeof(struct efc_dma));
+ mempool_free(node, efc->node_pool);
+}
+
+struct efc_node *efc_node_alloc(struct efc_nport *nport,
+ u32 port_id, bool init, bool targ)
+{
+ int rc;
+ struct efc_node *node = NULL;
+ struct efc *efc = nport->efc;
+ struct efc_dma *dma;
+
+ if (nport->shutting_down) {
+ efc_log_debug(efc, "node allocation when shutting down %06x",
+ port_id);
+ return NULL;
+ }
+
+ node = mempool_alloc(efc->node_pool, GFP_ATOMIC);
+ if (!node) {
+ efc_log_err(efc, "node allocation failed %06x", port_id);
+ return NULL;
+ }
+ memset(node, 0, sizeof(*node));
+
+ dma = &node->sparm_dma_buf;
+ dma->size = NODE_SPARAMS_SIZE;
+ dma->virt = dma_pool_zalloc(efc->node_dma_pool, GFP_ATOMIC, &dma->phys);
+ if (!dma->virt) {
+ efc_log_err(efc, "node dma alloc failed\n");
+ goto dma_fail;
+ }
+ node->rnode.indicator = U32_MAX;
+ node->nport = nport;
+
+ node->efc = efc;
+ node->init = init;
+ node->targ = targ;
+
+ spin_lock_init(&node->pend_frames_lock);
+ INIT_LIST_HEAD(&node->pend_frames);
+ spin_lock_init(&node->els_ios_lock);
+ INIT_LIST_HEAD(&node->els_ios_list);
+ node->els_io_enabled = true;
+
+ rc = efc_cmd_node_alloc(efc, &node->rnode, port_id, nport);
+ if (rc) {
+ efc_log_err(efc, "efc_hw_node_alloc failed: %d\n", rc);
+ goto hw_alloc_fail;
+ }
+
+ node->rnode.node = node;
+ node->sm.app = node;
+ node->evtdepth = 0;
+
+ efc_node_update_display_name(node);
+
+ rc = xa_err(xa_store(&nport->lookup, port_id, node, GFP_ATOMIC));
+ if (rc) {
+ efc_log_err(efc, "Node lookup store failed: %d\n", rc);
+ goto xa_fail;
+ }
+
+ /* initialize refcount */
+ kref_init(&node->ref);
+ node->release = _efc_node_free;
+ kref_get(&nport->ref);
+
+ return node;
+
+xa_fail:
+ efc_node_free_resources(efc, &node->rnode);
+hw_alloc_fail:
+ dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
+dma_fail:
+ mempool_free(node, efc->node_pool);
+ return NULL;
+}
+
+void
+efc_node_free(struct efc_node *node)
+{
+ struct efc_nport *nport;
+ struct efc *efc;
+ int rc = 0;
+ struct efc_node *ns = NULL;
+
+ nport = node->nport;
+ efc = node->efc;
+
+ node_printf(node, "Free'd\n");
+
+ if (node->refound) {
+ /*
+ * Save the name server node. We will send fake RSCN event at
+ * the end to handle ignored RSCN event during node deletion
+ */
+ ns = efc_node_find(node->nport, FC_FID_DIR_SERV);
+ }
+
+ if (!node->nport) {
+ efc_log_err(efc, "Node already Freed\n");
+ return;
+ }
+
+ /* Free HW resources */
+ rc = efc_node_free_resources(efc, &node->rnode);
+ if (rc < 0)
+ efc_log_err(efc, "efc_hw_node_free failed: %d\n", rc);
+
+ /* if the gidpt_delay_timer is still running, then delete it */
+ if (timer_pending(&node->gidpt_delay_timer))
+ del_timer(&node->gidpt_delay_timer);
+
+ xa_erase(&nport->lookup, node->rnode.fc_id);
+
+ /*
+ * If the node_list is empty,
+ * then post a ALL_CHILD_NODES_FREE event to the nport,
+ * after the lock is released.
+ * The nport may be free'd as a result of the event.
+ */
+ if (xa_empty(&nport->lookup))
+ efc_sm_post_event(&nport->sm, EFC_EVT_ALL_CHILD_NODES_FREE,
+ NULL);
+
+ node->nport = NULL;
+ node->sm.current_state = NULL;
+
+ kref_put(&nport->ref, nport->release);
+ kref_put(&node->ref, node->release);
+
+ if (ns) {
+ /* sending fake RSCN event to name server node */
+ efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, NULL);
+ }
+}
+
+static void
+efc_dma_copy_in(struct efc_dma *dma, void *buffer, u32 buffer_length)
+{
+ if (!dma || !buffer || !buffer_length)
+ return;
+
+ if (buffer_length > dma->size)
+ buffer_length = dma->size;
+
+ memcpy(dma->virt, buffer, buffer_length);
+ dma->len = buffer_length;
+}
+
+int
+efc_node_attach(struct efc_node *node)
+{
+ int rc = 0;
+ struct efc_nport *nport = node->nport;
+ struct efc_domain *domain = nport->domain;
+ struct efc *efc = node->efc;
+
+ if (!domain->attached) {
+ efc_log_err(efc, "Warning: unattached domain\n");
+ return -EIO;
+ }
+ /* Update node->wwpn/wwnn */
+
+ efc_node_build_eui_name(node->wwpn, sizeof(node->wwpn),
+ efc_node_get_wwpn(node));
+ efc_node_build_eui_name(node->wwnn, sizeof(node->wwnn),
+ efc_node_get_wwnn(node));
+
+ efc_dma_copy_in(&node->sparm_dma_buf, node->service_params + 4,
+ sizeof(node->service_params) - 4);
+
+ /* take lock to protect node->rnode.attached */
+ rc = efc_cmd_node_attach(efc, &node->rnode, &node->sparm_dma_buf);
+ if (rc < 0)
+ efc_log_debug(efc, "efc_hw_node_attach failed: %d\n", rc);
+
+ return rc;
+}
+
+void
+efc_node_fcid_display(u32 fc_id, char *buffer, u32 buffer_length)
+{
+ switch (fc_id) {
+ case FC_FID_FLOGI:
+ snprintf(buffer, buffer_length, "fabric");
+ break;
+ case FC_FID_FCTRL:
+ snprintf(buffer, buffer_length, "fabctl");
+ break;
+ case FC_FID_DIR_SERV:
+ snprintf(buffer, buffer_length, "nserve");
+ break;
+ default:
+ if (fc_id == FC_FID_DOM_MGR) {
+ snprintf(buffer, buffer_length, "dctl%02x",
+ (fc_id & 0x0000ff));
+ } else {
+ snprintf(buffer, buffer_length, "%06x", fc_id);
+ }
+ break;
+ }
+}
+
+void
+efc_node_update_display_name(struct efc_node *node)
+{
+ u32 port_id = node->rnode.fc_id;
+ struct efc_nport *nport = node->nport;
+ char portid_display[16];
+
+ efc_node_fcid_display(port_id, portid_display, sizeof(portid_display));
+
+ snprintf(node->display_name, sizeof(node->display_name), "%s.%s",
+ nport->display_name, portid_display);
+}
+
+void
+efc_node_send_ls_io_cleanup(struct efc_node *node)
+{
+ if (node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE) {
+ efc_log_debug(node->efc, "[%s] cleaning up LS_ACC oxid=0x%x\n",
+ node->display_name, node->ls_acc_oxid);
+
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ }
+}
+
+static void efc_node_handle_implicit_logo(struct efc_node *node)
+{
+ int rc;
+
+ /*
+ * currently, only case for implicit logo is PLOGI
+ * recvd. Thus, node's ELS IO pending list won't be
+ * empty (PLOGI will be on it)
+ */
+ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
+ node_printf(node, "Reason: implicit logout, re-authenticate\n");
+
+ /* Re-attach node with the same HW node resources */
+ node->req_free = false;
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ node->els_io_enabled = true;
+
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
+}
+
+static void efc_node_handle_explicit_logo(struct efc_node *node)
+{
+ s8 pend_frames_empty;
+ unsigned long flags = 0;
+
+ /* cleanup any pending LS_ACC ELSs */
+ efc_node_send_ls_io_cleanup(node);
+
+ spin_lock_irqsave(&node->pend_frames_lock, flags);
+ pend_frames_empty = list_empty(&node->pend_frames);
+ spin_unlock_irqrestore(&node->pend_frames_lock, flags);
+
+ /*
+ * there are two scenarios where we want to keep
+ * this node alive:
+ * 1. there are pending frames that need to be
+ * processed or
+ * 2. we're an initiator and the remote node is
+ * a target and we need to re-authenticate
+ */
+ node_printf(node, "Shutdown: explicit logo pend=%d ", !pend_frames_empty);
+ node_printf(node, "nport.ini=%d node.tgt=%d\n",
+ node->nport->enable_ini, node->targ);
+ if (!pend_frames_empty || (node->nport->enable_ini && node->targ)) {
+ u8 send_plogi = false;
+
+ if (node->nport->enable_ini && node->targ) {
+ /*
+ * we're an initiator and
+ * node shutting down is a target;
+ * we'll need to re-authenticate in
+ * initial state
+ */
+ send_plogi = true;
+ }
+
+ /*
+ * transition to __efc_d_init
+ * (will retain HW node resources)
+ */
+ node->els_io_enabled = true;
+ node->req_free = false;
+
+ /*
+ * either pending frames exist or we are re-authenticating
+ * with PLOGI (or both); in either case, return to initial
+ * state
+ */
+ efc_node_init_device(node, send_plogi);
+ }
+ /* else: let node shutdown occur */
+}
+
+static void
+efc_node_purge_pending(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_hw_sequence *frame, *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&node->pend_frames_lock, flags);
+
+ list_for_each_entry_safe(frame, next, &node->pend_frames, list_entry) {
+ list_del(&frame->list_entry);
+ efc->tt.hw_seq_free(efc, frame);
+ }
+
+ spin_unlock_irqrestore(&node->pend_frames_lock, flags);
+}
+
+void
+__efc_node_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ efc_node_hold_frames(node);
+ WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
+ /* by default, we will be freeing node after we unwind */
+ node->req_free = true;
+
+ switch (node->shutdown_reason) {
+ case EFC_NODE_SHUTDOWN_IMPLICIT_LOGO:
+ /* Node shutdown b/c of PLOGI received when node
+ * already logged in. We have PLOGI service
+ * parameters, so submit node attach; we won't be
+ * freeing this node
+ */
+
+ efc_node_handle_implicit_logo(node);
+ break;
+
+ case EFC_NODE_SHUTDOWN_EXPLICIT_LOGO:
+ efc_node_handle_explicit_logo(node);
+ break;
+
+ case EFC_NODE_SHUTDOWN_DEFAULT:
+ default: {
+ /*
+ * shutdown due to link down,
+ * node going away (xport event) or
+ * nport shutdown, purge pending and
+ * proceed to cleanup node
+ */
+
+ /* cleanup any pending LS_ACC ELSs */
+ efc_node_send_ls_io_cleanup(node);
+
+ node_printf(node,
+ "Shutdown reason: default, purge pending\n");
+ efc_node_purge_pending(node);
+ break;
+ }
+ }
+
+ break;
+ }
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+static bool
+efc_node_check_els_quiesced(struct efc_node *node)
+{
+ /* check to see if ELS requests, completions are quiesced */
+ if (node->els_req_cnt == 0 && node->els_cmpl_cnt == 0 &&
+ efc_els_io_list_empty(node, &node->els_ios_list)) {
+ if (!node->attached) {
+ /* hw node detach already completed, proceed */
+ node_printf(node, "HW node not attached\n");
+ efc_node_transition(node,
+ __efc_node_wait_ios_shutdown,
+ NULL);
+ } else {
+ /*
+ * hw node detach hasn't completed,
+ * transition and wait
+ */
+ node_printf(node, "HW node still attached\n");
+ efc_node_transition(node, __efc_node_wait_node_free,
+ NULL);
+ }
+ return true;
+ }
+ return false;
+}
+
+void
+efc_node_initiate_cleanup(struct efc_node *node)
+{
+ /*
+ * if ELS's have already been quiesced, will move to next state
+ * if ELS's have not been quiesced, abort them
+ */
+ if (!efc_node_check_els_quiesced(node)) {
+ efc_node_hold_frames(node);
+ efc_node_transition(node, __efc_node_wait_els_shutdown, NULL);
+ }
+}
+
+void
+__efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ bool check_quiesce = false;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+ /* Node state machine: Wait for all ELSs to complete */
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ if (efc_els_io_list_empty(node, &node->els_ios_list)) {
+ node_printf(node, "All ELS IOs complete\n");
+ check_quiesce = true;
+ }
+ break;
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_ELS_REQ_ABORTED:
+ if (WARN_ON(!node->els_req_cnt))
+ break;
+ node->els_req_cnt--;
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ if (WARN_ON(!node->els_cmpl_cnt))
+ break;
+ node->els_cmpl_cnt--;
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* all ELS IO's complete */
+ node_printf(node, "All ELS IOs complete\n");
+ WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+
+ if (check_quiesce)
+ efc_node_check_els_quiesced(node);
+}
+
+void
+__efc_node_wait_node_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_FREE_OK:
+ /* node is officially no longer attached */
+ node->attached = false;
+ efc_node_transition(node, __efc_node_wait_ios_shutdown, NULL);
+ break;
+
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ /* As IOs and ELS IO's complete we expect to get these events */
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+
+ /* first check to see if no ELS IOs are outstanding */
+ if (efc_els_io_list_empty(node, &node->els_ios_list))
+ /* If there are any active IOS, Free them. */
+ efc_node_transition(node, __efc_node_shutdown, NULL);
+ break;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ if (efc_els_io_list_empty(node, &node->els_ios_list))
+ efc_node_transition(node, __efc_node_shutdown, NULL);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ if (WARN_ON(!node->els_req_cnt))
+ break;
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ efc_log_debug(efc, "[%s] %-20s\n", node->display_name,
+ efc_sm_event_name(evt));
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_node_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = NULL;
+ struct efc *efc = NULL;
+ struct efc_node_cb *cbdata = arg;
+
+ node = ctx->app;
+ efc = node->efc;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ case EFC_EVT_NPORT_TOPOLOGY_NOTIFY:
+ case EFC_EVT_NODE_MISSING:
+ case EFC_EVT_FCP_CMD_RCVD:
+ break;
+
+ case EFC_EVT_NODE_REFOUND:
+ node->refound = true;
+ break;
+
+ /*
+ * node->attached must be set appropriately
+ * for all node attach/detach events
+ */
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ break;
+
+ case EFC_EVT_NODE_FREE_OK:
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ node->attached = false;
+ break;
+
+ /*
+ * handle any ELS completions that
+ * other states either didn't care about
+ * or forgot about
+ */
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ if (WARN_ON(!node->els_cmpl_cnt))
+ break;
+ node->els_cmpl_cnt--;
+ break;
+
+ /*
+ * handle any ELS request completions that
+ * other states either didn't care about
+ * or forgot about
+ */
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_ELS_REQ_ABORTED:
+ if (WARN_ON(!node->els_req_cnt))
+ break;
+ node->els_req_cnt--;
+ break;
+
+ case EFC_EVT_ELS_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ /*
+ * Unsupported ELS was received,
+ * send LS_RJT, command not supported
+ */
+ efc_log_debug(efc,
+ "[%s] (%s) ELS x%02x, LS_RJT not supported\n",
+ node->display_name, funcname,
+ ((u8 *)cbdata->payload->dma.virt)[0]);
+
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNSUP, ELS_EXPL_NONE, 0);
+ break;
+ }
+
+ case EFC_EVT_PLOGI_RCVD:
+ case EFC_EVT_FLOGI_RCVD:
+ case EFC_EVT_LOGO_RCVD:
+ case EFC_EVT_PRLI_RCVD:
+ case EFC_EVT_PRLO_RCVD:
+ case EFC_EVT_PDISC_RCVD:
+ case EFC_EVT_FDISC_RCVD:
+ case EFC_EVT_ADISC_RCVD:
+ case EFC_EVT_RSCN_RCVD:
+ case EFC_EVT_SCR_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ /* sm: / send ELS_RJT */
+ efc_log_debug(efc, "[%s] (%s) %s sending ELS_RJT\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+ /* if we didn't catch this in a state, send generic LS_RJT */
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_NONE, 0);
+ break;
+ }
+ case EFC_EVT_ABTS_RCVD: {
+ efc_log_debug(efc, "[%s] (%s) %s sending BA_ACC\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+
+ /* sm: / send BA_ACC */
+ efc_send_bls_acc(node, cbdata->header->dma.virt);
+ break;
+ }
+
+ default:
+ efc_log_debug(node->efc, "[%s] %-20s %-20s not handled\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+ }
+}
+
+void
+efc_node_save_sparms(struct efc_node *node, void *payload)
+{
+ memcpy(node->service_params, payload, sizeof(node->service_params));
+}
+
+void
+efc_node_post_event(struct efc_node *node,
+ enum efc_sm_event evt, void *arg)
+{
+ bool free_node = false;
+
+ node->evtdepth++;
+
+ efc_sm_post_event(&node->sm, evt, arg);
+
+ /* If our event call depth is one and
+ * we're not holding frames
+ * then we can dispatch any pending frames.
+ * We don't want to allow the efc_process_node_pending()
+ * call to recurse.
+ */
+ if (!node->hold_frames && node->evtdepth == 1)
+ efc_process_node_pending(node);
+
+ node->evtdepth--;
+
+ /*
+ * Free the node object if so requested,
+ * and we're at an event call depth of zero
+ */
+ if (node->evtdepth == 0 && node->req_free)
+ free_node = true;
+
+ if (free_node)
+ efc_node_free(node);
+}
+
+void
+efc_node_transition(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *,
+ enum efc_sm_event, void *), void *data)
+{
+ struct efc_sm_ctx *ctx = &node->sm;
+
+ if (ctx->current_state == state) {
+ efc_node_post_event(node, EFC_EVT_REENTER, data);
+ } else {
+ efc_node_post_event(node, EFC_EVT_EXIT, data);
+ ctx->current_state = state;
+ efc_node_post_event(node, EFC_EVT_ENTER, data);
+ }
+}
+
+void
+efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name)
+{
+ memset(buf, 0, buf_len);
+
+ snprintf(buf, buf_len, "eui.%016llX", (unsigned long long)eui_name);
+}
+
+u64
+efc_node_get_wwpn(struct efc_node *node)
+{
+ struct fc_els_flogi *sp =
+ (struct fc_els_flogi *)node->service_params;
+
+ return be64_to_cpu(sp->fl_wwpn);
+}
+
+u64
+efc_node_get_wwnn(struct efc_node *node)
+{
+ struct fc_els_flogi *sp =
+ (struct fc_els_flogi *)node->service_params;
+
+ return be64_to_cpu(sp->fl_wwnn);
+}
+
+int
+efc_node_check_els_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
+ u8 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname)
+{
+ return 0;
+}
+
+int
+efc_node_check_ns_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
+ u16 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname)
+{
+ return 0;
+}
+
+int
+efc_els_io_list_empty(struct efc_node *node, struct list_head *list)
+{
+ int empty;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&node->els_ios_lock, flags);
+ empty = list_empty(list);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ return empty;
+}
+
+void
+efc_node_pause(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *,
+ enum efc_sm_event, void *))
+
+{
+ node->nodedb_state = state;
+ efc_node_transition(node, __efc_node_paused, NULL);
+}
+
+void
+__efc_node_paused(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * This state is entered when a state is "paused". When resumed, the
+ * node is transitioned to a previously saved state (node->ndoedb_state)
+ */
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ node_printf(node, "Paused\n");
+ break;
+
+ case EFC_EVT_RESUME: {
+ void (*pf)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+ pf = node->nodedb_state;
+
+ node->nodedb_state = NULL;
+ efc_node_transition(node, pf, NULL);
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ break;
+
+ case EFC_EVT_SHUTDOWN:
+ node->req_free = true;
+ break;
+
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_node_recv_els_frame(struct efc_node *node,
+ struct efc_hw_sequence *seq)
+{
+ u32 prli_size = sizeof(struct fc_els_prli) + sizeof(struct fc_els_spp);
+ struct {
+ u32 cmd;
+ enum efc_sm_event evt;
+ u32 payload_size;
+ } els_cmd_list[] = {
+ {ELS_PLOGI, EFC_EVT_PLOGI_RCVD, sizeof(struct fc_els_flogi)},
+ {ELS_FLOGI, EFC_EVT_FLOGI_RCVD, sizeof(struct fc_els_flogi)},
+ {ELS_LOGO, EFC_EVT_LOGO_RCVD, sizeof(struct fc_els_ls_acc)},
+ {ELS_PRLI, EFC_EVT_PRLI_RCVD, prli_size},
+ {ELS_PRLO, EFC_EVT_PRLO_RCVD, prli_size},
+ {ELS_PDISC, EFC_EVT_PDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {ELS_FDISC, EFC_EVT_FDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {ELS_ADISC, EFC_EVT_ADISC_RCVD, sizeof(struct fc_els_adisc)},
+ {ELS_RSCN, EFC_EVT_RSCN_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {ELS_SCR, EFC_EVT_SCR_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ };
+ struct efc_node_cb cbdata;
+ u8 *buf = seq->payload->dma.virt;
+ enum efc_sm_event evt = EFC_EVT_ELS_RCVD;
+ u32 i;
+
+ memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.header = seq->header;
+ cbdata.payload = seq->payload;
+
+ /* find a matching event for the ELS command */
+ for (i = 0; i < ARRAY_SIZE(els_cmd_list); i++) {
+ if (els_cmd_list[i].cmd == buf[0]) {
+ evt = els_cmd_list[i].evt;
+ break;
+ }
+ }
+
+ efc_node_post_event(node, evt, &cbdata);
+}
+
+void
+efc_node_recv_ct_frame(struct efc_node *node,
+ struct efc_hw_sequence *seq)
+{
+ struct fc_ct_hdr *iu = seq->payload->dma.virt;
+ struct fc_frame_header *hdr = seq->header->dma.virt;
+ struct efc *efc = node->efc;
+ u16 gscmd = be16_to_cpu(iu->ct_cmd);
+
+ efc_log_err(efc, "[%s] Received cmd :%x sending CT_REJECT\n",
+ node->display_name, gscmd);
+ efc_send_ct_rsp(efc, node, be16_to_cpu(hdr->fh_ox_id), iu,
+ FC_FS_RJT, FC_FS_RJT_UNSUP, 0);
+}
+
+void
+efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq)
+{
+ struct efc_node_cb cbdata;
+
+ memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.header = seq->header;
+ cbdata.payload = seq->payload;
+
+ efc_node_post_event(node, EFC_EVT_FCP_CMD_RCVD, &cbdata);
+}
+
+void
+efc_process_node_pending(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_hw_sequence *seq = NULL;
+ u32 pend_frames_processed = 0;
+ unsigned long flags = 0;
+
+ for (;;) {
+ /* need to check for hold frames condition after each frame
+ * processed because any given frame could cause a transition
+ * to a state that holds frames
+ */
+ if (node->hold_frames)
+ break;
+
+ seq = NULL;
+ /* Get next frame/sequence */
+ spin_lock_irqsave(&node->pend_frames_lock, flags);
+
+ if (!list_empty(&node->pend_frames)) {
+ seq = list_first_entry(&node->pend_frames,
+ struct efc_hw_sequence, list_entry);
+ list_del(&seq->list_entry);
+ }
+ spin_unlock_irqrestore(&node->pend_frames_lock, flags);
+
+ if (!seq) {
+ pend_frames_processed = node->pend_frames_processed;
+ node->pend_frames_processed = 0;
+ break;
+ }
+ node->pend_frames_processed++;
+
+ /* now dispatch frame(s) to dispatch function */
+ efc_node_dispatch_frame(node, seq);
+ efc->tt.hw_seq_free(efc, seq);
+ }
+
+ if (pend_frames_processed != 0)
+ efc_log_debug(efc, "%u node frames held and processed\n",
+ pend_frames_processed);
+}
+
+void
+efc_scsi_sess_reg_complete(struct efc_node *node, u32 status)
+{
+ unsigned long flags = 0;
+ enum efc_sm_event evt = EFC_EVT_NODE_SESS_REG_OK;
+ struct efc *efc = node->efc;
+
+ if (status)
+ evt = EFC_EVT_NODE_SESS_REG_FAIL;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ /* Notify the node to resume */
+ efc_node_post_event(node, evt, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void
+efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ /* Notify the node to resume */
+ efc_node_post_event(node, EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void
+efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ /* Notify the node to resume */
+ efc_node_post_event(node, EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void
+efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg)
+{
+ struct efc *efc = node->efc;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, evt, arg);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void efc_node_post_shutdown(struct efc_node *node, void *arg)
+{
+ unsigned long flags = 0;
+ struct efc *efc = node->efc;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, arg);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
diff --git a/drivers/scsi/elx/libefc/efc_node.h b/drivers/scsi/elx/libefc/efc_node.h
new file mode 100644
index 000000000000..e9c600ac45d5
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_node.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFC_NODE_H__)
+#define __EFC_NODE_H__
+#include "scsi/fc/fc_ns.h"
+
+#define EFC_NODEDB_PAUSE_FABRIC_LOGIN (1 << 0)
+#define EFC_NODEDB_PAUSE_NAMESERVER (1 << 1)
+#define EFC_NODEDB_PAUSE_NEW_NODES (1 << 2)
+
+#define MAX_ACC_REJECT_PAYLOAD sizeof(struct fc_els_ls_rjt)
+
+#define scsi_io_printf(io, fmt, ...) \
+ efc_log_debug(io->efc, "[%s] [%04x][i:%04x t:%04x h:%04x]" fmt, \
+ io->node->display_name, io->instance_index, io->init_task_tag, \
+ io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
+
+static inline void
+efc_node_evt_set(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ const char *handler)
+{
+ struct efc_node *node = ctx->app;
+
+ if (evt == EFC_EVT_ENTER) {
+ strncpy(node->current_state_name, handler,
+ sizeof(node->current_state_name));
+ } else if (evt == EFC_EVT_EXIT) {
+ strncpy(node->prev_state_name, node->current_state_name,
+ sizeof(node->prev_state_name));
+ strncpy(node->current_state_name, "invalid",
+ sizeof(node->current_state_name));
+ }
+ node->prev_evt = node->current_evt;
+ node->current_evt = evt;
+}
+
+/**
+ * hold frames in pending frame list
+ *
+ * Unsolicited receive frames are held on the node pending frame list,
+ * rather than being processed.
+ */
+
+static inline void
+efc_node_hold_frames(struct efc_node *node)
+{
+ node->hold_frames = true;
+}
+
+/**
+ * accept frames
+ *
+ * Unsolicited receive frames processed rather than being held on the node
+ * pending frame list.
+ */
+
+static inline void
+efc_node_accept_frames(struct efc_node *node)
+{
+ node->hold_frames = false;
+}
+
+/*
+ * Node initiator/target enable defines
+ * All combinations of the SLI port (nport) initiator/target enable,
+ * and remote node initiator/target enable are enumerated.
+ * ex: EFC_NODE_ENABLE_T_TO_IT decodes to target mode is enabled on SLI port
+ * and I+T is enabled on remote node.
+ */
+enum efc_node_enable {
+ EFC_NODE_ENABLE_x_TO_x,
+ EFC_NODE_ENABLE_x_TO_T,
+ EFC_NODE_ENABLE_x_TO_I,
+ EFC_NODE_ENABLE_x_TO_IT,
+ EFC_NODE_ENABLE_T_TO_x,
+ EFC_NODE_ENABLE_T_TO_T,
+ EFC_NODE_ENABLE_T_TO_I,
+ EFC_NODE_ENABLE_T_TO_IT,
+ EFC_NODE_ENABLE_I_TO_x,
+ EFC_NODE_ENABLE_I_TO_T,
+ EFC_NODE_ENABLE_I_TO_I,
+ EFC_NODE_ENABLE_I_TO_IT,
+ EFC_NODE_ENABLE_IT_TO_x,
+ EFC_NODE_ENABLE_IT_TO_T,
+ EFC_NODE_ENABLE_IT_TO_I,
+ EFC_NODE_ENABLE_IT_TO_IT,
+};
+
+static inline enum efc_node_enable
+efc_node_get_enable(struct efc_node *node)
+{
+ u32 retval = 0;
+
+ if (node->nport->enable_ini)
+ retval |= (1U << 3);
+ if (node->nport->enable_tgt)
+ retval |= (1U << 2);
+ if (node->init)
+ retval |= (1U << 1);
+ if (node->targ)
+ retval |= (1U << 0);
+ return (enum efc_node_enable)retval;
+}
+
+int
+efc_node_check_els_req(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg,
+ u8 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname);
+int
+efc_node_check_ns_req(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg,
+ u16 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname);
+int
+efc_node_attach(struct efc_node *node);
+struct efc_node *
+efc_node_alloc(struct efc_nport *nport, u32 port_id,
+ bool init, bool targ);
+void
+efc_node_free(struct efc_node *efc);
+void
+efc_node_update_display_name(struct efc_node *node);
+void efc_node_post_event(struct efc_node *node, enum efc_sm_event evt,
+ void *arg);
+
+void
+__efc_node_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_node_wait_node_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+efc_node_save_sparms(struct efc_node *node, void *payload);
+void
+efc_node_transition(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *, enum efc_sm_event,
+ void *), void *data);
+void
+__efc_node_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+efc_node_initiate_cleanup(struct efc_node *node);
+
+void
+efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name);
+
+void
+efc_node_pause(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg));
+void
+__efc_node_paused(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+int
+efc_node_active_ios_empty(struct efc_node *node);
+void
+efc_node_send_ls_io_cleanup(struct efc_node *node);
+
+int
+efc_els_io_list_empty(struct efc_node *node, struct list_head *list);
+
+void
+efc_process_node_pending(struct efc_node *domain);
+
+u64 efc_node_get_wwnn(struct efc_node *node);
+struct efc_node *
+efc_node_find(struct efc_nport *nport, u32 id);
+void
+efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg);
+void
+efc_node_recv_els_frame(struct efc_node *node, struct efc_hw_sequence *s);
+void
+efc_node_recv_ct_frame(struct efc_node *node, struct efc_hw_sequence *seq);
+void
+efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq);
+
+#endif /* __EFC_NODE_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c
new file mode 100644
index 000000000000..2e83a667901f
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_nport.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * NPORT
+ *
+ * Port object for physical port and NPIV ports.
+ */
+
+/*
+ * NPORT REFERENCE COUNTING
+ *
+ * A nport reference should be taken when:
+ * - an nport is allocated
+ * - a vport populates associated nport
+ * - a remote node is allocated
+ * - a unsolicited frame is processed
+ * The reference should be dropped when:
+ * - the unsolicited frame processesing is done
+ * - the remote node is removed
+ * - the vport is removed
+ * - the nport is removed
+ */
+
+#include "efc.h"
+
+void
+efc_nport_cb(void *arg, int event, void *data)
+{
+ struct efc *efc = arg;
+ struct efc_nport *nport = data;
+ unsigned long flags = 0;
+
+ efc_log_debug(efc, "nport event: %s\n", efc_sm_event_name(event));
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_sm_post_event(&nport->sm, event, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+static struct efc_nport *
+efc_nport_find_wwn(struct efc_domain *domain, uint64_t wwnn, uint64_t wwpn)
+{
+ struct efc_nport *nport = NULL;
+
+ /* Find a nport, given the WWNN and WWPN */
+ list_for_each_entry(nport, &domain->nport_list, list_entry) {
+ if (nport->wwnn == wwnn && nport->wwpn == wwpn)
+ return nport;
+ }
+ return NULL;
+}
+
+static void
+_efc_nport_free(struct kref *arg)
+{
+ struct efc_nport *nport = container_of(arg, struct efc_nport, ref);
+
+ kfree(nport);
+}
+
+struct efc_nport *
+efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
+ u32 fc_id, bool enable_ini, bool enable_tgt)
+{
+ struct efc_nport *nport;
+
+ if (domain->efc->enable_ini)
+ enable_ini = 0;
+
+ /* Return a failure if this nport has already been allocated */
+ if ((wwpn != 0) || (wwnn != 0)) {
+ nport = efc_nport_find_wwn(domain, wwnn, wwpn);
+ if (nport) {
+ efc_log_err(domain->efc,
+ "NPORT %016llX %016llX already allocated\n",
+ wwnn, wwpn);
+ return NULL;
+ }
+ }
+
+ nport = kzalloc(sizeof(*nport), GFP_ATOMIC);
+ if (!nport)
+ return nport;
+
+ /* initialize refcount */
+ kref_init(&nport->ref);
+ nport->release = _efc_nport_free;
+
+ nport->efc = domain->efc;
+ snprintf(nport->display_name, sizeof(nport->display_name), "------");
+ nport->domain = domain;
+ xa_init(&nport->lookup);
+ nport->instance_index = domain->nport_count++;
+ nport->sm.app = nport;
+ nport->enable_ini = enable_ini;
+ nport->enable_tgt = enable_tgt;
+ nport->enable_rscn = (nport->enable_ini ||
+ (nport->enable_tgt && enable_target_rscn(nport->efc)));
+
+ /* Copy service parameters from domain */
+ memcpy(nport->service_params, domain->service_params,
+ sizeof(struct fc_els_flogi));
+
+ /* Update requested fc_id */
+ nport->fc_id = fc_id;
+
+ /* Update the nport's service parameters for the new wwn's */
+ nport->wwpn = wwpn;
+ nport->wwnn = wwnn;
+ snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), "%016llX",
+ (unsigned long long)wwnn);
+
+ /*
+ * if this is the "first" nport of the domain,
+ * then make it the "phys" nport
+ */
+ if (list_empty(&domain->nport_list))
+ domain->nport = nport;
+
+ INIT_LIST_HEAD(&nport->list_entry);
+ list_add_tail(&nport->list_entry, &domain->nport_list);
+
+ kref_get(&domain->ref);
+
+ efc_log_debug(domain->efc, "New Nport [%s]\n", nport->display_name);
+
+ return nport;
+}
+
+void
+efc_nport_free(struct efc_nport *nport)
+{
+ struct efc_domain *domain;
+
+ if (!nport)
+ return;
+
+ domain = nport->domain;
+ efc_log_debug(domain->efc, "[%s] free nport\n", nport->display_name);
+ list_del(&nport->list_entry);
+ /*
+ * if this is the physical nport,
+ * then clear it out of the domain
+ */
+ if (nport == domain->nport)
+ domain->nport = NULL;
+
+ xa_destroy(&nport->lookup);
+ xa_erase(&domain->lookup, nport->fc_id);
+
+ if (list_empty(&domain->nport_list))
+ efc_domain_post_event(domain, EFC_EVT_ALL_CHILD_NODES_FREE,
+ NULL);
+
+ kref_put(&domain->ref, domain->release);
+ kref_put(&nport->ref, nport->release);
+}
+
+struct efc_nport *
+efc_nport_find(struct efc_domain *domain, u32 d_id)
+{
+ struct efc_nport *nport;
+
+ /* Find a nport object, given an FC_ID */
+ nport = xa_load(&domain->lookup, d_id);
+ if (!nport || !kref_get_unless_zero(&nport->ref))
+ return NULL;
+
+ return nport;
+}
+
+int
+efc_nport_attach(struct efc_nport *nport, u32 fc_id)
+{
+ int rc;
+ struct efc_node *node;
+ struct efc *efc = nport->efc;
+ unsigned long index;
+
+ /* Set our lookup */
+ rc = xa_err(xa_store(&nport->domain->lookup, fc_id, nport, GFP_ATOMIC));
+ if (rc) {
+ efc_log_err(efc, "Sport lookup store failed: %d\n", rc);
+ return rc;
+ }
+
+ /* Update our display_name */
+ efc_node_fcid_display(fc_id, nport->display_name,
+ sizeof(nport->display_name));
+
+ xa_for_each(&nport->lookup, index, node) {
+ efc_node_update_display_name(node);
+ }
+
+ efc_log_debug(nport->efc, "[%s] attach nport: fc_id x%06x\n",
+ nport->display_name, fc_id);
+
+ /* Register a nport, given an FC_ID */
+ rc = efc_cmd_nport_attach(efc, nport, fc_id);
+ if (rc < 0) {
+ efc_log_err(nport->efc,
+ "efc_hw_port_attach failed: %d\n", rc);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
+efc_nport_shutdown(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ struct efc_node *node;
+ unsigned long index;
+
+ xa_for_each(&nport->lookup, index, node) {
+ if (!(node->rnode.fc_id == FC_FID_FLOGI && nport->is_vport)) {
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ continue;
+ }
+
+ /*
+ * If this is a vport, logout of the fabric
+ * controller so that it deletes the vport
+ * on the switch.
+ */
+ /* if link is down, don't send logo */
+ if (efc->link_status == EFC_LINK_STATUS_DOWN) {
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ continue;
+ }
+
+ efc_log_debug(efc, "[%s] nport shutdown vport, send logo\n",
+ node->display_name);
+
+ if (!efc_send_logo(node)) {
+ /* sent LOGO, wait for response */
+ efc_node_transition(node, __efc_d_wait_logo_rsp, NULL);
+ continue;
+ }
+
+ /*
+ * failed to send LOGO,
+ * go ahead and cleanup node anyways
+ */
+ node_printf(node, "Failed to send LOGO\n");
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ }
+}
+
+static void
+efc_vport_link_down(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ struct efc_vport *vport;
+
+ /* Clear the nport reference in the vport specification */
+ list_for_each_entry(vport, &efc->vport_list, list_entry) {
+ if (vport->nport == nport) {
+ kref_put(&nport->ref, nport->release);
+ vport->nport = NULL;
+ break;
+ }
+ }
+}
+
+static void
+__efc_nport_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc_domain *domain = nport->domain;
+ struct efc *efc = nport->efc;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ break;
+ case EFC_EVT_NPORT_ATTACH_OK:
+ efc_sm_transition(ctx, __efc_nport_attached, NULL);
+ break;
+ case EFC_EVT_SHUTDOWN:
+ /* Flag this nport as shutting down */
+ nport->shutting_down = true;
+
+ if (nport->is_vport)
+ efc_vport_link_down(nport);
+
+ if (xa_empty(&nport->lookup)) {
+ /* Remove the nport from the domain's lookup table */
+ xa_erase(&domain->lookup, nport->fc_id);
+ efc_sm_transition(ctx, __efc_nport_wait_port_free,
+ NULL);
+ if (efc_cmd_nport_free(efc, nport)) {
+ efc_log_debug(nport->efc,
+ "efc_hw_port_free failed\n");
+ /* Not much we can do, free the nport anyways */
+ efc_nport_free(nport);
+ }
+ } else {
+ /* sm: node list is not empty / shutdown nodes */
+ efc_sm_transition(ctx,
+ __efc_nport_wait_shutdown, NULL);
+ efc_nport_shutdown(nport);
+ }
+ break;
+ default:
+ efc_log_debug(nport->efc, "[%s] %-20s %-20s not handled\n",
+ nport->display_name, funcname,
+ efc_sm_event_name(evt));
+ }
+}
+
+void
+__efc_nport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc_domain *domain = nport->domain;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ /* the physical nport is attached */
+ case EFC_EVT_NPORT_ATTACH_OK:
+ WARN_ON(nport != domain->nport);
+ efc_sm_transition(ctx, __efc_nport_attached, NULL);
+ break;
+
+ case EFC_EVT_NPORT_ALLOC_OK:
+ /* ignore */
+ break;
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_vport_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ __be64 be_wwpn = cpu_to_be64(nport->wwpn);
+
+ if (nport->wwpn == 0)
+ efc_log_debug(efc, "vport: letting f/w select WWN\n");
+
+ if (nport->fc_id != U32_MAX) {
+ efc_log_debug(efc, "vport: hard coding port id: %x\n",
+ nport->fc_id);
+ }
+
+ efc_sm_transition(ctx, __efc_nport_vport_wait_alloc, NULL);
+ /* If wwpn is zero, then we'll let the f/w assign wwpn*/
+ if (efc_cmd_nport_alloc(efc, nport, nport->domain,
+ nport->wwpn == 0 ? NULL :
+ (uint8_t *)&be_wwpn)) {
+ efc_log_err(efc, "Can't allocate port\n");
+ break;
+ }
+
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_NPORT_ALLOC_OK: {
+ struct fc_els_flogi *sp;
+
+ sp = (struct fc_els_flogi *)nport->service_params;
+
+ if (nport->wwnn == 0) {
+ nport->wwnn = be64_to_cpu(nport->sli_wwnn);
+ nport->wwpn = be64_to_cpu(nport->sli_wwpn);
+ snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
+ "%016llX", nport->wwpn);
+ }
+
+ /* Update the nport's service parameters */
+ sp->fl_wwpn = cpu_to_be64(nport->wwpn);
+ sp->fl_wwnn = cpu_to_be64(nport->wwnn);
+
+ /*
+ * if nport->fc_id is uninitialized,
+ * then request that the fabric node use FDISC
+ * to find an fc_id.
+ * Otherwise we're restoring vports, or we're in
+ * fabric emulation mode, so attach the fc_id
+ */
+ if (nport->fc_id == U32_MAX) {
+ struct efc_node *fabric;
+
+ fabric = efc_node_alloc(nport, FC_FID_FLOGI, false,
+ false);
+ if (!fabric) {
+ efc_log_err(efc, "efc_node_alloc() failed\n");
+ return;
+ }
+ efc_node_transition(fabric, __efc_vport_fabric_init,
+ NULL);
+ } else {
+ snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
+ "%016llX", nport->wwpn);
+ efc_nport_attach(nport, nport->fc_id);
+ }
+ efc_sm_transition(ctx, __efc_nport_vport_allocated, NULL);
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_vport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ /*
+ * This state is entered after the nport is allocated;
+ * it then waits for a fabric node
+ * FDISC to complete, which requests a nport attach.
+ * The nport attach complete is handled in this state.
+ */
+ switch (evt) {
+ case EFC_EVT_NPORT_ATTACH_OK: {
+ struct efc_node *node;
+
+ /* Find our fabric node, and forward this event */
+ node = efc_node_find(nport, FC_FID_FLOGI);
+ if (!node) {
+ efc_log_debug(efc, "can't find node %06x\n", FC_FID_FLOGI);
+ break;
+ }
+ /* sm: / forward nport attach to fabric node */
+ efc_node_post_event(node, evt, NULL);
+ efc_sm_transition(ctx, __efc_nport_attached, NULL);
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+efc_vport_update_spec(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ struct efc_vport *vport;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry(vport, &efc->vport_list, list_entry) {
+ if (vport->nport == nport) {
+ vport->wwnn = nport->wwnn;
+ vport->wwpn = nport->wwpn;
+ vport->tgt_data = nport->tgt_data;
+ vport->ini_data = nport->ini_data;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+}
+
+void
+__efc_nport_attached(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ struct efc_node *node;
+ unsigned long index;
+
+ efc_log_debug(efc,
+ "[%s] NPORT attached WWPN %016llX WWNN %016llX\n",
+ nport->display_name,
+ nport->wwpn, nport->wwnn);
+
+ xa_for_each(&nport->lookup, index, node)
+ efc_node_update_display_name(node);
+
+ efc->tt.new_nport(efc, nport);
+
+ /*
+ * Update the vport (if its not the physical nport)
+ * parameters
+ */
+ if (nport->is_vport)
+ efc_vport_update_spec(nport);
+ break;
+ }
+
+ case EFC_EVT_EXIT:
+ efc_log_debug(efc,
+ "[%s] NPORT deattached WWPN %016llX WWNN %016llX\n",
+ nport->display_name,
+ nport->wwpn, nport->wwnn);
+
+ efc->tt.del_nport(efc, nport);
+ break;
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_wait_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc_domain *domain = nport->domain;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_NPORT_ALLOC_OK:
+ case EFC_EVT_NPORT_ALLOC_FAIL:
+ case EFC_EVT_NPORT_ATTACH_OK:
+ case EFC_EVT_NPORT_ATTACH_FAIL:
+ /* ignore these events - just wait for the all free event */
+ break;
+
+ case EFC_EVT_ALL_CHILD_NODES_FREE: {
+ /*
+ * Remove the nport from the domain's
+ * sparse vector lookup table
+ */
+ xa_erase(&domain->lookup, nport->fc_id);
+ efc_sm_transition(ctx, __efc_nport_wait_port_free, NULL);
+ if (efc_cmd_nport_free(efc, nport)) {
+ efc_log_err(nport->efc, "efc_hw_port_free failed\n");
+ /* Not much we can do, free the nport anyways */
+ efc_nport_free(nport);
+ }
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_wait_port_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_NPORT_ATTACH_OK:
+ /* Ignore as we are waiting for the free CB */
+ break;
+ case EFC_EVT_NPORT_FREE_OK: {
+ /* All done, free myself */
+ efc_nport_free(nport);
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+static int
+efc_vport_nport_alloc(struct efc_domain *domain, struct efc_vport *vport)
+{
+ struct efc_nport *nport;
+
+ lockdep_assert_held(&domain->efc->lock);
+
+ nport = efc_nport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id,
+ vport->enable_ini, vport->enable_tgt);
+ vport->nport = nport;
+ if (!nport)
+ return -EIO;
+
+ kref_get(&nport->ref);
+ nport->is_vport = true;
+ nport->tgt_data = vport->tgt_data;
+ nport->ini_data = vport->ini_data;
+
+ efc_sm_transition(&nport->sm, __efc_nport_vport_init, NULL);
+
+ return 0;
+}
+
+int
+efc_vport_start(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ struct efc_vport *vport;
+ struct efc_vport *next;
+ int rc = 0;
+ unsigned long flags = 0;
+
+ /* Use the vport spec to find the associated vports and start them */
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
+ if (!vport->nport) {
+ if (efc_vport_nport_alloc(domain, vport))
+ rc = -EIO;
+ }
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+
+ return rc;
+}
+
+int
+efc_nport_vport_new(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
+ u32 fc_id, bool ini, bool tgt, void *tgt_data,
+ void *ini_data)
+{
+ struct efc *efc = domain->efc;
+ struct efc_vport *vport;
+ int rc = 0;
+ unsigned long flags = 0;
+
+ if (ini && domain->efc->enable_ini == 0) {
+ efc_log_debug(efc, "driver initiator mode not enabled\n");
+ return -EIO;
+ }
+
+ if (tgt && domain->efc->enable_tgt == 0) {
+ efc_log_debug(efc, "driver target mode not enabled\n");
+ return -EIO;
+ }
+
+ /*
+ * Create a vport spec if we need to recreate
+ * this vport after a link up event
+ */
+ vport = efc_vport_create_spec(domain->efc, wwnn, wwpn, fc_id, ini, tgt,
+ tgt_data, ini_data);
+ if (!vport) {
+ efc_log_err(efc, "failed to create vport object entry\n");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&efc->lock, flags);
+ rc = efc_vport_nport_alloc(domain, vport);
+ spin_unlock_irqrestore(&efc->lock, flags);
+
+ return rc;
+}
+
+int
+efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
+ u64 wwpn, uint64_t wwnn)
+{
+ struct efc_nport *nport;
+ struct efc_vport *vport;
+ struct efc_vport *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ /* walk the efc_vport_list and remove from there */
+ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
+ if (vport->wwpn == wwpn && vport->wwnn == wwnn) {
+ list_del(&vport->list_entry);
+ kfree(vport);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+
+ if (!domain) {
+ /* No domain means no nport to look for */
+ return 0;
+ }
+
+ spin_lock_irqsave(&efc->lock, flags);
+ list_for_each_entry(nport, &domain->nport_list, list_entry) {
+ if (nport->wwpn == wwpn && nport->wwnn == wwnn) {
+ kref_put(&nport->ref, nport->release);
+ /* Shutdown this NPORT */
+ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&efc->lock, flags);
+ return 0;
+}
+
+void
+efc_vport_del_all(struct efc *efc)
+{
+ struct efc_vport *vport;
+ struct efc_vport *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
+ list_del(&vport->list_entry);
+ kfree(vport);
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+}
+
+struct efc_vport *
+efc_vport_create_spec(struct efc *efc, uint64_t wwnn, uint64_t wwpn,
+ u32 fc_id, bool enable_ini,
+ bool enable_tgt, void *tgt_data, void *ini_data)
+{
+ struct efc_vport *vport;
+ unsigned long flags = 0;
+
+ /*
+ * walk the efc_vport_list and return failure
+ * if a valid(vport with non zero WWPN and WWNN) vport entry
+ * is already created
+ */
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry(vport, &efc->vport_list, list_entry) {
+ if ((wwpn && vport->wwpn == wwpn) &&
+ (wwnn && vport->wwnn == wwnn)) {
+ efc_log_err(efc,
+ "VPORT %016llX %016llX already allocated\n",
+ wwnn, wwpn);
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+ return NULL;
+ }
+ }
+
+ vport = kzalloc(sizeof(*vport), GFP_ATOMIC);
+ if (!vport) {
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+ return NULL;
+ }
+
+ vport->wwnn = wwnn;
+ vport->wwpn = wwpn;
+ vport->fc_id = fc_id;
+ vport->enable_tgt = enable_tgt;
+ vport->enable_ini = enable_ini;
+ vport->tgt_data = tgt_data;
+ vport->ini_data = ini_data;
+
+ INIT_LIST_HEAD(&vport->list_entry);
+ list_add_tail(&vport->list_entry, &efc->vport_list);
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+ return vport;
+}
diff --git a/drivers/scsi/elx/libefc/efc_nport.h b/drivers/scsi/elx/libefc/efc_nport.h
new file mode 100644
index 000000000000..b575ea205bbf
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_nport.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/**
+ * EFC FC port (NPORT) exported declarations
+ *
+ */
+
+#ifndef __EFC_NPORT_H__
+#define __EFC_NPORT_H__
+
+struct efc_nport *
+efc_nport_find(struct efc_domain *domain, u32 d_id);
+struct efc_nport *
+efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
+ u32 fc_id, bool enable_ini, bool enable_tgt);
+void
+efc_nport_free(struct efc_nport *nport);
+int
+efc_nport_attach(struct efc_nport *nport, u32 fc_id);
+
+void
+__efc_nport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_wait_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_wait_port_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_vport_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_vport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_attached(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+int
+efc_vport_start(struct efc_domain *domain);
+
+#endif /* __EFC_NPORT_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_sm.c b/drivers/scsi/elx/libefc/efc_sm.c
new file mode 100644
index 000000000000..afd963782c1c
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_sm.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Generic state machine framework.
+ */
+#include "efc.h"
+#include "efc_sm.h"
+
+/**
+ * efc_sm_post_event() - Post an event to a context.
+ *
+ * @ctx: State machine context
+ * @evt: Event to post
+ * @data: Event-specific data (if any)
+ */
+int
+efc_sm_post_event(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *data)
+{
+ if (!ctx->current_state)
+ return -EIO;
+
+ ctx->current_state(ctx, evt, data);
+ return 0;
+}
+
+void
+efc_sm_transition(struct efc_sm_ctx *ctx,
+ void (*state)(struct efc_sm_ctx *,
+ enum efc_sm_event, void *), void *data)
+
+{
+ if (ctx->current_state == state) {
+ efc_sm_post_event(ctx, EFC_EVT_REENTER, data);
+ } else {
+ efc_sm_post_event(ctx, EFC_EVT_EXIT, data);
+ ctx->current_state = state;
+ efc_sm_post_event(ctx, EFC_EVT_ENTER, data);
+ }
+}
+
+static char *event_name[] = EFC_SM_EVENT_NAME;
+
+const char *efc_sm_event_name(enum efc_sm_event evt)
+{
+ if (evt > EFC_EVT_LAST)
+ return "unknown";
+
+ return event_name[evt];
+}
diff --git a/drivers/scsi/elx/libefc/efc_sm.h b/drivers/scsi/elx/libefc/efc_sm.h
new file mode 100644
index 000000000000..e26867b4db24
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_sm.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ */
+
+/**
+ * Generic state machine framework declarations.
+ */
+
+#ifndef _EFC_SM_H
+#define _EFC_SM_H
+
+struct efc_sm_ctx;
+
+/* State Machine events */
+enum efc_sm_event {
+ /* Common Events */
+ EFC_EVT_ENTER,
+ EFC_EVT_REENTER,
+ EFC_EVT_EXIT,
+ EFC_EVT_SHUTDOWN,
+ EFC_EVT_ALL_CHILD_NODES_FREE,
+ EFC_EVT_RESUME,
+ EFC_EVT_TIMER_EXPIRED,
+
+ /* Domain Events */
+ EFC_EVT_RESPONSE,
+ EFC_EVT_ERROR,
+
+ EFC_EVT_DOMAIN_FOUND,
+ EFC_EVT_DOMAIN_ALLOC_OK,
+ EFC_EVT_DOMAIN_ALLOC_FAIL,
+ EFC_EVT_DOMAIN_REQ_ATTACH,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ EFC_EVT_DOMAIN_ATTACH_FAIL,
+ EFC_EVT_DOMAIN_LOST,
+ EFC_EVT_DOMAIN_FREE_OK,
+ EFC_EVT_DOMAIN_FREE_FAIL,
+ EFC_EVT_HW_DOMAIN_REQ_ATTACH,
+ EFC_EVT_HW_DOMAIN_REQ_FREE,
+
+ /* Sport Events */
+ EFC_EVT_NPORT_ALLOC_OK,
+ EFC_EVT_NPORT_ALLOC_FAIL,
+ EFC_EVT_NPORT_ATTACH_OK,
+ EFC_EVT_NPORT_ATTACH_FAIL,
+ EFC_EVT_NPORT_FREE_OK,
+ EFC_EVT_NPORT_FREE_FAIL,
+ EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
+ EFC_EVT_HW_PORT_ALLOC_OK,
+ EFC_EVT_HW_PORT_ALLOC_FAIL,
+ EFC_EVT_HW_PORT_ATTACH_OK,
+ EFC_EVT_HW_PORT_REQ_ATTACH,
+ EFC_EVT_HW_PORT_REQ_FREE,
+ EFC_EVT_HW_PORT_FREE_OK,
+
+ /* Login Events */
+ EFC_EVT_SRRS_ELS_REQ_OK,
+ EFC_EVT_SRRS_ELS_CMPL_OK,
+ EFC_EVT_SRRS_ELS_REQ_FAIL,
+ EFC_EVT_SRRS_ELS_CMPL_FAIL,
+ EFC_EVT_SRRS_ELS_REQ_RJT,
+ EFC_EVT_NODE_ATTACH_OK,
+ EFC_EVT_NODE_ATTACH_FAIL,
+ EFC_EVT_NODE_FREE_OK,
+ EFC_EVT_NODE_FREE_FAIL,
+ EFC_EVT_ELS_FRAME,
+ EFC_EVT_ELS_REQ_TIMEOUT,
+ EFC_EVT_ELS_REQ_ABORTED,
+ /* request an ELS IO be aborted */
+ EFC_EVT_ABORT_ELS,
+ /* ELS abort process complete */
+ EFC_EVT_ELS_ABORT_CMPL,
+
+ EFC_EVT_ABTS_RCVD,
+
+ /* node is not in the GID_PT payload */
+ EFC_EVT_NODE_MISSING,
+ /* node is allocated and in the GID_PT payload */
+ EFC_EVT_NODE_REFOUND,
+ /* node shutting down due to PLOGI recvd (implicit logo) */
+ EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
+ /* node shutting down due to LOGO recvd/sent (explicit logo) */
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+
+ EFC_EVT_PLOGI_RCVD,
+ EFC_EVT_FLOGI_RCVD,
+ EFC_EVT_LOGO_RCVD,
+ EFC_EVT_PRLI_RCVD,
+ EFC_EVT_PRLO_RCVD,
+ EFC_EVT_PDISC_RCVD,
+ EFC_EVT_FDISC_RCVD,
+ EFC_EVT_ADISC_RCVD,
+ EFC_EVT_RSCN_RCVD,
+ EFC_EVT_SCR_RCVD,
+ EFC_EVT_ELS_RCVD,
+
+ EFC_EVT_FCP_CMD_RCVD,
+
+ EFC_EVT_GIDPT_DELAY_EXPIRED,
+
+ /* SCSI Target Server events */
+ EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY,
+ EFC_EVT_NODE_DEL_INI_COMPLETE,
+ EFC_EVT_NODE_DEL_TGT_COMPLETE,
+ EFC_EVT_NODE_SESS_REG_OK,
+ EFC_EVT_NODE_SESS_REG_FAIL,
+
+ /* Must be last */
+ EFC_EVT_LAST
+};
+
+/* State Machine event name lookup array */
+#define EFC_SM_EVENT_NAME { \
+ [EFC_EVT_ENTER] = "EFC_EVT_ENTER", \
+ [EFC_EVT_REENTER] = "EFC_EVT_REENTER", \
+ [EFC_EVT_EXIT] = "EFC_EVT_EXIT", \
+ [EFC_EVT_SHUTDOWN] = "EFC_EVT_SHUTDOWN", \
+ [EFC_EVT_ALL_CHILD_NODES_FREE] = "EFC_EVT_ALL_CHILD_NODES_FREE",\
+ [EFC_EVT_RESUME] = "EFC_EVT_RESUME", \
+ [EFC_EVT_TIMER_EXPIRED] = "EFC_EVT_TIMER_EXPIRED", \
+ [EFC_EVT_RESPONSE] = "EFC_EVT_RESPONSE", \
+ [EFC_EVT_ERROR] = "EFC_EVT_ERROR", \
+ [EFC_EVT_DOMAIN_FOUND] = "EFC_EVT_DOMAIN_FOUND", \
+ [EFC_EVT_DOMAIN_ALLOC_OK] = "EFC_EVT_DOMAIN_ALLOC_OK", \
+ [EFC_EVT_DOMAIN_ALLOC_FAIL] = "EFC_EVT_DOMAIN_ALLOC_FAIL", \
+ [EFC_EVT_DOMAIN_REQ_ATTACH] = "EFC_EVT_DOMAIN_REQ_ATTACH", \
+ [EFC_EVT_DOMAIN_ATTACH_OK] = "EFC_EVT_DOMAIN_ATTACH_OK", \
+ [EFC_EVT_DOMAIN_ATTACH_FAIL] = "EFC_EVT_DOMAIN_ATTACH_FAIL", \
+ [EFC_EVT_DOMAIN_LOST] = "EFC_EVT_DOMAIN_LOST", \
+ [EFC_EVT_DOMAIN_FREE_OK] = "EFC_EVT_DOMAIN_FREE_OK", \
+ [EFC_EVT_DOMAIN_FREE_FAIL] = "EFC_EVT_DOMAIN_FREE_FAIL", \
+ [EFC_EVT_HW_DOMAIN_REQ_ATTACH] = "EFC_EVT_HW_DOMAIN_REQ_ATTACH",\
+ [EFC_EVT_HW_DOMAIN_REQ_FREE] = "EFC_EVT_HW_DOMAIN_REQ_FREE", \
+ [EFC_EVT_NPORT_ALLOC_OK] = "EFC_EVT_NPORT_ALLOC_OK", \
+ [EFC_EVT_NPORT_ALLOC_FAIL] = "EFC_EVT_NPORT_ALLOC_FAIL", \
+ [EFC_EVT_NPORT_ATTACH_OK] = "EFC_EVT_NPORT_ATTACH_OK", \
+ [EFC_EVT_NPORT_ATTACH_FAIL] = "EFC_EVT_NPORT_ATTACH_FAIL", \
+ [EFC_EVT_NPORT_FREE_OK] = "EFC_EVT_NPORT_FREE_OK", \
+ [EFC_EVT_NPORT_FREE_FAIL] = "EFC_EVT_NPORT_FREE_FAIL", \
+ [EFC_EVT_NPORT_TOPOLOGY_NOTIFY] = "EFC_EVT_NPORT_TOPOLOGY_NOTIFY",\
+ [EFC_EVT_HW_PORT_ALLOC_OK] = "EFC_EVT_HW_PORT_ALLOC_OK", \
+ [EFC_EVT_HW_PORT_ALLOC_FAIL] = "EFC_EVT_HW_PORT_ALLOC_FAIL", \
+ [EFC_EVT_HW_PORT_ATTACH_OK] = "EFC_EVT_HW_PORT_ATTACH_OK", \
+ [EFC_EVT_HW_PORT_REQ_ATTACH] = "EFC_EVT_HW_PORT_REQ_ATTACH", \
+ [EFC_EVT_HW_PORT_REQ_FREE] = "EFC_EVT_HW_PORT_REQ_FREE", \
+ [EFC_EVT_HW_PORT_FREE_OK] = "EFC_EVT_HW_PORT_FREE_OK", \
+ [EFC_EVT_SRRS_ELS_REQ_OK] = "EFC_EVT_SRRS_ELS_REQ_OK", \
+ [EFC_EVT_SRRS_ELS_CMPL_OK] = "EFC_EVT_SRRS_ELS_CMPL_OK", \
+ [EFC_EVT_SRRS_ELS_REQ_FAIL] = "EFC_EVT_SRRS_ELS_REQ_FAIL", \
+ [EFC_EVT_SRRS_ELS_CMPL_FAIL] = "EFC_EVT_SRRS_ELS_CMPL_FAIL", \
+ [EFC_EVT_SRRS_ELS_REQ_RJT] = "EFC_EVT_SRRS_ELS_REQ_RJT", \
+ [EFC_EVT_NODE_ATTACH_OK] = "EFC_EVT_NODE_ATTACH_OK", \
+ [EFC_EVT_NODE_ATTACH_FAIL] = "EFC_EVT_NODE_ATTACH_FAIL", \
+ [EFC_EVT_NODE_FREE_OK] = "EFC_EVT_NODE_FREE_OK", \
+ [EFC_EVT_NODE_FREE_FAIL] = "EFC_EVT_NODE_FREE_FAIL", \
+ [EFC_EVT_ELS_FRAME] = "EFC_EVT_ELS_FRAME", \
+ [EFC_EVT_ELS_REQ_TIMEOUT] = "EFC_EVT_ELS_REQ_TIMEOUT", \
+ [EFC_EVT_ELS_REQ_ABORTED] = "EFC_EVT_ELS_REQ_ABORTED", \
+ [EFC_EVT_ABORT_ELS] = "EFC_EVT_ABORT_ELS", \
+ [EFC_EVT_ELS_ABORT_CMPL] = "EFC_EVT_ELS_ABORT_CMPL", \
+ [EFC_EVT_ABTS_RCVD] = "EFC_EVT_ABTS_RCVD", \
+ [EFC_EVT_NODE_MISSING] = "EFC_EVT_NODE_MISSING", \
+ [EFC_EVT_NODE_REFOUND] = "EFC_EVT_NODE_REFOUND", \
+ [EFC_EVT_SHUTDOWN_IMPLICIT_LOGO] = "EFC_EVT_SHUTDOWN_IMPLICIT_LOGO",\
+ [EFC_EVT_SHUTDOWN_EXPLICIT_LOGO] = "EFC_EVT_SHUTDOWN_EXPLICIT_LOGO",\
+ [EFC_EVT_PLOGI_RCVD] = "EFC_EVT_PLOGI_RCVD", \
+ [EFC_EVT_FLOGI_RCVD] = "EFC_EVT_FLOGI_RCVD", \
+ [EFC_EVT_LOGO_RCVD] = "EFC_EVT_LOGO_RCVD", \
+ [EFC_EVT_PRLI_RCVD] = "EFC_EVT_PRLI_RCVD", \
+ [EFC_EVT_PRLO_RCVD] = "EFC_EVT_PRLO_RCVD", \
+ [EFC_EVT_PDISC_RCVD] = "EFC_EVT_PDISC_RCVD", \
+ [EFC_EVT_FDISC_RCVD] = "EFC_EVT_FDISC_RCVD", \
+ [EFC_EVT_ADISC_RCVD] = "EFC_EVT_ADISC_RCVD", \
+ [EFC_EVT_RSCN_RCVD] = "EFC_EVT_RSCN_RCVD", \
+ [EFC_EVT_SCR_RCVD] = "EFC_EVT_SCR_RCVD", \
+ [EFC_EVT_ELS_RCVD] = "EFC_EVT_ELS_RCVD", \
+ [EFC_EVT_FCP_CMD_RCVD] = "EFC_EVT_FCP_CMD_RCVD", \
+ [EFC_EVT_NODE_DEL_INI_COMPLETE] = "EFC_EVT_NODE_DEL_INI_COMPLETE",\
+ [EFC_EVT_NODE_DEL_TGT_COMPLETE] = "EFC_EVT_NODE_DEL_TGT_COMPLETE",\
+ [EFC_EVT_LAST] = "EFC_EVT_LAST", \
+}
+
+int
+efc_sm_post_event(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *data);
+void
+efc_sm_transition(struct efc_sm_ctx *ctx,
+ void (*state)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg),
+ void *data);
+void efc_sm_disable(struct efc_sm_ctx *ctx);
+const char *efc_sm_event_name(enum efc_sm_event evt);
+
+#endif /* ! _EFC_SM_H */
diff --git a/drivers/scsi/elx/libefc/efclib.c b/drivers/scsi/elx/libefc/efclib.c
new file mode 100644
index 000000000000..dd3e3d0a4761
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efclib.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * LIBEFC LOCKING
+ *
+ * The critical sections protected by the efc's spinlock are quite broad and
+ * may be improved upon in the future. The libefc code and its locking doesn't
+ * influence the I/O path, so excessive locking doesn't impact I/O performance.
+ *
+ * The strategy is to lock whenever processing a request from user driver. This
+ * means that the entry points into the libefc library are protected by efc
+ * lock. So all the state machine transitions are protected.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "efc.h"
+
+int efcport_init(struct efc *efc)
+{
+ u32 rc = 0;
+
+ spin_lock_init(&efc->lock);
+ INIT_LIST_HEAD(&efc->vport_list);
+ efc->hold_frames = false;
+ spin_lock_init(&efc->pend_frames_lock);
+ INIT_LIST_HEAD(&efc->pend_frames);
+
+ /* Create Node pool */
+ efc->node_pool = mempool_create_kmalloc_pool(EFC_MAX_REMOTE_NODES,
+ sizeof(struct efc_node));
+ if (!efc->node_pool) {
+ efc_log_err(efc, "Can't allocate node pool\n");
+ return -ENOMEM;
+ }
+
+ efc->node_dma_pool = dma_pool_create("node_dma_pool", &efc->pci->dev,
+ NODE_SPARAMS_SIZE, 0, 0);
+ if (!efc->node_dma_pool) {
+ efc_log_err(efc, "Can't allocate node dma pool\n");
+ mempool_destroy(efc->node_pool);
+ return -ENOMEM;
+ }
+
+ efc->els_io_pool = mempool_create_kmalloc_pool(EFC_ELS_IO_POOL_SZ,
+ sizeof(struct efc_els_io_req));
+ if (!efc->els_io_pool) {
+ efc_log_err(efc, "Can't allocate els io pool\n");
+ return -ENOMEM;
+ }
+
+ return rc;
+}
+
+static void
+efc_purge_pending(struct efc *efc)
+{
+ struct efc_hw_sequence *frame, *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->pend_frames_lock, flags);
+
+ list_for_each_entry_safe(frame, next, &efc->pend_frames, list_entry) {
+ list_del(&frame->list_entry);
+ efc->tt.hw_seq_free(efc, frame);
+ }
+
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+}
+
+void efcport_destroy(struct efc *efc)
+{
+ efc_purge_pending(efc);
+ mempool_destroy(efc->els_io_pool);
+ mempool_destroy(efc->node_pool);
+ dma_pool_destroy(efc->node_dma_pool);
+}
diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h
new file mode 100644
index 000000000000..ee291cabf7e0
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efclib.h
@@ -0,0 +1,620 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFCLIB_H__
+#define __EFCLIB_H__
+
+#include "scsi/fc/fc_els.h"
+#include "scsi/fc/fc_fs.h"
+#include "scsi/fc/fc_ns.h"
+#include "scsi/fc/fc_gs.h"
+#include "scsi/fc_frame.h"
+#include "../include/efc_common.h"
+#include "../libefc_sli/sli4.h"
+
+#define EFC_SERVICE_PARMS_LENGTH 120
+#define EFC_NAME_LENGTH 32
+#define EFC_SM_NAME_LENGTH 64
+#define EFC_DISPLAY_BUS_INFO_LENGTH 16
+
+#define EFC_WWN_LENGTH 32
+
+#define EFC_FC_ELS_DEFAULT_RETRIES 3
+
+/* Timeouts */
+#define EFC_FC_ELS_SEND_DEFAULT_TIMEOUT 0
+#define EFC_FC_FLOGI_TIMEOUT_SEC 5
+#define EFC_SHUTDOWN_TIMEOUT_USEC 30000000
+
+/* Return values for calls from base driver to libefc */
+#define EFC_SCSI_CALL_COMPLETE 0
+#define EFC_SCSI_CALL_ASYNC 1
+
+/* Local port topology */
+enum efc_nport_topology {
+ EFC_NPORT_TOPO_UNKNOWN = 0,
+ EFC_NPORT_TOPO_FABRIC,
+ EFC_NPORT_TOPO_P2P,
+ EFC_NPORT_TOPO_FC_AL,
+};
+
+#define enable_target_rscn(efc) 1
+
+enum efc_node_shutd_rsn {
+ EFC_NODE_SHUTDOWN_DEFAULT = 0,
+ EFC_NODE_SHUTDOWN_EXPLICIT_LOGO,
+ EFC_NODE_SHUTDOWN_IMPLICIT_LOGO,
+};
+
+enum efc_node_send_ls_acc {
+ EFC_NODE_SEND_LS_ACC_NONE = 0,
+ EFC_NODE_SEND_LS_ACC_PLOGI,
+ EFC_NODE_SEND_LS_ACC_PRLI,
+};
+
+#define EFC_LINK_STATUS_UP 0
+#define EFC_LINK_STATUS_DOWN 1
+
+/* State machine context header */
+struct efc_sm_ctx {
+ void (*current_state)(struct efc_sm_ctx *ctx,
+ u32 evt, void *arg);
+
+ const char *description;
+ void *app;
+};
+
+/* Description of discovered Fabric Domain */
+struct efc_domain_record {
+ u32 index;
+ u32 priority;
+ u8 address[6];
+ u8 wwn[8];
+ union {
+ u8 vlan[512];
+ u8 loop[128];
+ } map;
+ u32 speed;
+ u32 fc_id;
+ bool is_loop;
+ bool is_nport;
+};
+
+/* Domain events */
+enum efc_hw_domain_event {
+ EFC_HW_DOMAIN_ALLOC_OK,
+ EFC_HW_DOMAIN_ALLOC_FAIL,
+ EFC_HW_DOMAIN_ATTACH_OK,
+ EFC_HW_DOMAIN_ATTACH_FAIL,
+ EFC_HW_DOMAIN_FREE_OK,
+ EFC_HW_DOMAIN_FREE_FAIL,
+ EFC_HW_DOMAIN_LOST,
+ EFC_HW_DOMAIN_FOUND,
+ EFC_HW_DOMAIN_CHANGED,
+};
+
+/**
+ * Fibre Channel port object
+ *
+ * @list_entry: nport list entry
+ * @ref: reference count, each node takes a reference
+ * @release: function to free nport object
+ * @efc: pointer back to efc
+ * @instance_index: unique instance index value
+ * @display_name: port display name
+ * @is_vport: Is NPIV port
+ * @free_req_pending: pending request to free resources
+ * @attached: mark attached if reg VPI succeeds
+ * @p2p_winner: TRUE if we're the point-to-point winner
+ * @domain: pointer back to domain
+ * @wwpn: port wwpn
+ * @wwnn: port wwnn
+ * @tgt_data: target backend private port data
+ * @ini_data: initiator backend private port data
+ * @indicator: VPI
+ * @fc_id: port FC address
+ * @dma: memory for Service Parameters
+ * @wwnn_str: wwpn string
+ * @sli_wwpn: SLI provided wwpn
+ * @sli_wwnn: SLI provided wwnn
+ * @sm: nport state machine context
+ * @lookup: fc_id to node lookup object
+ * @enable_ini: SCSI initiator enabled for this port
+ * @enable_tgt: SCSI target enabled for this port
+ * @enable_rscn: port will be expecting RSCN
+ * @shutting_down: nport in process of shutting down
+ * @p2p_port_id: our port id for point-to-point
+ * @topology: topology: fabric/p2p/unknown
+ * @service_params: login parameters
+ * @p2p_remote_port_id: remote node's port id for point-to-point
+ */
+
+struct efc_nport {
+ struct list_head list_entry;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ struct efc *efc;
+ u32 instance_index;
+ char display_name[EFC_NAME_LENGTH];
+ bool is_vport;
+ bool free_req_pending;
+ bool attached;
+ bool p2p_winner;
+ struct efc_domain *domain;
+ u64 wwpn;
+ u64 wwnn;
+ void *tgt_data;
+ void *ini_data;
+
+ u32 indicator;
+ u32 fc_id;
+ struct efc_dma dma;
+
+ u8 wwnn_str[EFC_WWN_LENGTH];
+ __be64 sli_wwpn;
+ __be64 sli_wwnn;
+
+ struct efc_sm_ctx sm;
+ struct xarray lookup;
+ bool enable_ini;
+ bool enable_tgt;
+ bool enable_rscn;
+ bool shutting_down;
+ u32 p2p_port_id;
+ enum efc_nport_topology topology;
+ u8 service_params[EFC_SERVICE_PARMS_LENGTH];
+ u32 p2p_remote_port_id;
+};
+
+/**
+ * Fibre Channel domain object
+ *
+ * This object is a container for the various SLI components needed
+ * to connect to the domain of a FC or FCoE switch
+ * @efc: pointer back to efc
+ * @instance_index: unique instance index value
+ * @display_name: Node display name
+ * @nport_list: linked list of nports associated with this domain
+ * @ref: Reference count, each nport takes a reference
+ * @release: Function to free domain object
+ * @ini_domain: initiator backend private domain data
+ * @tgt_domain: target backend private domain data
+ * @sm: state machine context
+ * @fcf: FC Forwarder table index
+ * @fcf_indicator: FCFI
+ * @indicator: VFI
+ * @nport_count: Number of nports allocated
+ * @dma: memory for Service Parameters
+ * @fcf_wwn: WWN for FCF/switch
+ * @drvsm: driver domain sm context
+ * @attached: set true after attach completes
+ * @is_fc: is FC
+ * @is_loop: is loop topology
+ * @is_nlport: is public loop
+ * @domain_found_pending:A domain found is pending, drec is updated
+ * @req_domain_free: True if domain object should be free'd
+ * @req_accept_frames: set in domain state machine to enable frames
+ * @domain_notify_pend: Set in domain SM to avoid duplicate node event post
+ * @pending_drec: Pending drec if a domain found is pending
+ * @service_params: any nports service parameters
+ * @flogi_service_params:Fabric/P2p service parameters from FLOGI
+ * @lookup: d_id to node lookup object
+ * @nport: Pointer to first (physical) SLI port
+ */
+struct efc_domain {
+ struct efc *efc;
+ char display_name[EFC_NAME_LENGTH];
+ struct list_head nport_list;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ void *ini_domain;
+ void *tgt_domain;
+
+ /* Declarations private to HW/SLI */
+ u32 fcf;
+ u32 fcf_indicator;
+ u32 indicator;
+ u32 nport_count;
+ struct efc_dma dma;
+
+ /* Declarations private to FC trannport */
+ u64 fcf_wwn;
+ struct efc_sm_ctx drvsm;
+ bool attached;
+ bool is_fc;
+ bool is_loop;
+ bool is_nlport;
+ bool domain_found_pending;
+ bool req_domain_free;
+ bool req_accept_frames;
+ bool domain_notify_pend;
+
+ struct efc_domain_record pending_drec;
+ u8 service_params[EFC_SERVICE_PARMS_LENGTH];
+ u8 flogi_service_params[EFC_SERVICE_PARMS_LENGTH];
+
+ struct xarray lookup;
+
+ struct efc_nport *nport;
+};
+
+/**
+ * Remote Node object
+ *
+ * This object represents a connection between the SLI port and another
+ * Nx_Port on the fabric. Note this can be either a well known port such
+ * as a F_Port (i.e. ff:ff:fe) or another N_Port.
+ * @indicator: RPI
+ * @fc_id: FC address
+ * @attached: true if attached
+ * @nport: associated SLI port
+ * @node: associated node
+ */
+struct efc_remote_node {
+ u32 indicator;
+ u32 index;
+ u32 fc_id;
+
+ bool attached;
+
+ struct efc_nport *nport;
+ void *node;
+};
+
+/**
+ * FC Node object
+ * @efc: pointer back to efc structure
+ * @display_name: Node display name
+ * @nort: Assosiated nport pointer.
+ * @hold_frames: hold incoming frames if true
+ * @els_io_enabled: Enable allocating els ios for this node
+ * @els_ios_lock: lock to protect the els ios list
+ * @els_ios_list: ELS I/O's for this node
+ * @ini_node: backend initiator private node data
+ * @tgt_node: backend target private node data
+ * @rnode: Remote node
+ * @sm: state machine context
+ * @evtdepth: current event posting nesting depth
+ * @req_free: this node is to be free'd
+ * @attached: node is attached (REGLOGIN complete)
+ * @fcp_enabled: node is enabled to handle FCP
+ * @rscn_pending: for name server node RSCN is pending
+ * @send_plogi: send PLOGI accept, upon completion of node attach
+ * @send_plogi_acc: TRUE if io_alloc() is enabled.
+ * @send_ls_acc: type of LS acc to send
+ * @ls_acc_io: SCSI IO for LS acc
+ * @ls_acc_oxid: OX_ID for pending accept
+ * @ls_acc_did: D_ID for pending accept
+ * @shutdown_reason: reason for node shutdown
+ * @sparm_dma_buf: service parameters buffer
+ * @service_params: plogi/acc frame from remote device
+ * @pend_frames_lock: lock for inbound pending frames list
+ * @pend_frames: inbound pending frames list
+ * @pend_frames_processed:count of frames processed in hold frames interval
+ * @ox_id_in_use: used to verify one at a time us of ox_id
+ * @els_retries_remaining:for ELS, number of retries remaining
+ * @els_req_cnt: number of outstanding ELS requests
+ * @els_cmpl_cnt: number of outstanding ELS completions
+ * @abort_cnt: Abort counter for debugging purpos
+ * @current_state_name: current node state
+ * @prev_state_name: previous node state
+ * @current_evt: current event
+ * @prev_evt: previous event
+ * @targ: node is target capable
+ * @init: node is init capable
+ * @refound: Handle node refound case when node is being deleted
+ * @els_io_pend_list: list of pending (not yet processed) ELS IOs
+ * @els_io_active_list: list of active (processed) ELS IOs
+ * @nodedb_state: Node debugging, saved state
+ * @gidpt_delay_timer: GIDPT delay timer
+ * @time_last_gidpt_msec:Start time of last target RSCN GIDPT
+ * @wwnn: remote port WWNN
+ * @wwpn: remote port WWPN
+ */
+struct efc_node {
+ struct efc *efc;
+ char display_name[EFC_NAME_LENGTH];
+ struct efc_nport *nport;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ bool hold_frames;
+ bool els_io_enabled;
+ bool send_plogi_acc;
+ bool send_plogi;
+ bool rscn_pending;
+ bool fcp_enabled;
+ bool attached;
+ bool req_free;
+
+ spinlock_t els_ios_lock;
+ struct list_head els_ios_list;
+ void *ini_node;
+ void *tgt_node;
+
+ struct efc_remote_node rnode;
+ /* Declarations private to FC trannport */
+ struct efc_sm_ctx sm;
+ u32 evtdepth;
+
+ enum efc_node_send_ls_acc send_ls_acc;
+ void *ls_acc_io;
+ u32 ls_acc_oxid;
+ u32 ls_acc_did;
+ enum efc_node_shutd_rsn shutdown_reason;
+ bool targ;
+ bool init;
+ bool refound;
+ struct efc_dma sparm_dma_buf;
+ u8 service_params[EFC_SERVICE_PARMS_LENGTH];
+ spinlock_t pend_frames_lock;
+ struct list_head pend_frames;
+ u32 pend_frames_processed;
+ u32 ox_id_in_use;
+ u32 els_retries_remaining;
+ u32 els_req_cnt;
+ u32 els_cmpl_cnt;
+ u32 abort_cnt;
+
+ char current_state_name[EFC_SM_NAME_LENGTH];
+ char prev_state_name[EFC_SM_NAME_LENGTH];
+ int current_evt;
+ int prev_evt;
+
+ void (*nodedb_state)(struct efc_sm_ctx *ctx,
+ u32 evt, void *arg);
+ struct timer_list gidpt_delay_timer;
+ u64 time_last_gidpt_msec;
+
+ char wwnn[EFC_WWN_LENGTH];
+ char wwpn[EFC_WWN_LENGTH];
+};
+
+/**
+ * NPIV port
+ *
+ * Collection of the information required to restore a virtual port across
+ * link events
+ * @wwnn: node name
+ * @wwpn: port name
+ * @fc_id: port id
+ * @tgt_data: target backend pointer
+ * @ini_data: initiator backend pointe
+ * @nport: Used to match record after attaching for update
+ *
+ */
+
+struct efc_vport {
+ struct list_head list_entry;
+ u64 wwnn;
+ u64 wwpn;
+ u32 fc_id;
+ bool enable_tgt;
+ bool enable_ini;
+ void *tgt_data;
+ void *ini_data;
+ struct efc_nport *nport;
+};
+
+#define node_printf(node, fmt, args...) \
+ efc_log_info(node->efc, "[%s] " fmt, node->display_name, ##args)
+
+/* Node SM IO Context Callback structure */
+struct efc_node_cb {
+ int status;
+ int ext_status;
+ struct efc_hw_rq_buffer *header;
+ struct efc_hw_rq_buffer *payload;
+ struct efc_dma els_rsp;
+
+ /* Actual length of data received */
+ int rsp_len;
+};
+
+struct efc_hw_rq_buffer {
+ u16 rqindex;
+ struct efc_dma dma;
+};
+
+/**
+ * FC sequence object
+ *
+ * Defines a general FC sequence object
+ * @hw: HW that owns this sequence
+ * @fcfi: FCFI associated with sequence
+ * @header: Received frame header
+ * @payload: Received frame header
+ * @hw_priv: HW private context
+ */
+struct efc_hw_sequence {
+ struct list_head list_entry;
+ void *hw;
+ u8 fcfi;
+ struct efc_hw_rq_buffer *header;
+ struct efc_hw_rq_buffer *payload;
+ void *hw_priv;
+};
+
+enum efc_disc_io_type {
+ EFC_DISC_IO_ELS_REQ,
+ EFC_DISC_IO_ELS_RESP,
+ EFC_DISC_IO_CT_REQ,
+ EFC_DISC_IO_CT_RESP
+};
+
+struct efc_io_els_params {
+ u32 s_id;
+ u16 ox_id;
+ u8 timeout;
+};
+
+struct efc_io_ct_params {
+ u8 r_ctl;
+ u8 type;
+ u8 df_ctl;
+ u8 timeout;
+ u16 ox_id;
+};
+
+union efc_disc_io_param {
+ struct efc_io_els_params els;
+ struct efc_io_ct_params ct;
+};
+
+struct efc_disc_io {
+ struct efc_dma req; /* send buffer */
+ struct efc_dma rsp; /* receive buffer */
+ enum efc_disc_io_type io_type; /* EFC_DISC_IO_TYPE enum*/
+ u16 xmit_len; /* Length of els request*/
+ u16 rsp_len; /* Max length of rsps to be rcvd */
+ u32 rpi; /* Registered RPI */
+ u32 vpi; /* VPI for this nport */
+ u32 s_id;
+ u32 d_id;
+ bool rpi_registered; /* if false, use tmp RPI */
+ union efc_disc_io_param iparam;
+};
+
+/* Return value indiacating the sequence can not be freed */
+#define EFC_HW_SEQ_HOLD 0
+/* Return value indiacating the sequence can be freed */
+#define EFC_HW_SEQ_FREE 1
+
+struct libefc_function_template {
+ /*Sport*/
+ int (*new_nport)(struct efc *efc, struct efc_nport *sp);
+ void (*del_nport)(struct efc *efc, struct efc_nport *sp);
+
+ /*Scsi Node*/
+ int (*scsi_new_node)(struct efc *efc, struct efc_node *n);
+ int (*scsi_del_node)(struct efc *efc, struct efc_node *n, int reason);
+
+ int (*issue_mbox_rqst)(void *efct, void *buf, void *cb, void *arg);
+ /*Send ELS IO*/
+ int (*send_els)(struct efc *efc, struct efc_disc_io *io);
+ /*Send BLS IO*/
+ int (*send_bls)(struct efc *efc, u32 type, struct sli_bls_params *bls);
+ /*Free HW frame*/
+ int (*hw_seq_free)(struct efc *efc, struct efc_hw_sequence *seq);
+};
+
+#define EFC_LOG_LIB 0x01
+#define EFC_LOG_NODE 0x02
+#define EFC_LOG_PORT 0x04
+#define EFC_LOG_DOMAIN 0x08
+#define EFC_LOG_ELS 0x10
+#define EFC_LOG_DOMAIN_SM 0x20
+#define EFC_LOG_SM 0x40
+
+/* efc library port structure */
+struct efc {
+ void *base;
+ struct pci_dev *pci;
+ struct sli4 *sli;
+ u32 fcfi;
+ u64 req_wwpn;
+ u64 req_wwnn;
+
+ u64 def_wwpn;
+ u64 def_wwnn;
+ u64 max_xfer_size;
+ mempool_t *node_pool;
+ struct dma_pool *node_dma_pool;
+ u32 nodes_count;
+
+ u32 link_status;
+
+ struct list_head vport_list;
+ /* lock to protect the vport list */
+ spinlock_t vport_lock;
+
+ struct libefc_function_template tt;
+ /* lock to protect the discovery library.
+ * Refer to efclib.c for more details.
+ */
+ spinlock_t lock;
+
+ bool enable_ini;
+ bool enable_tgt;
+
+ u32 log_level;
+
+ struct efc_domain *domain;
+ void (*domain_free_cb)(struct efc *efc, void *arg);
+ void *domain_free_cb_arg;
+
+ u64 tgt_rscn_delay_msec;
+ u64 tgt_rscn_period_msec;
+
+ bool external_loopback;
+ u32 nodedb_mask;
+ u32 logmask;
+ mempool_t *els_io_pool;
+ atomic_t els_io_alloc_failed_count;
+
+ /* hold pending frames */
+ bool hold_frames;
+ /* lock to protect pending frames list access */
+ spinlock_t pend_frames_lock;
+ struct list_head pend_frames;
+ /* count of pending frames that were processed */
+ u32 pend_frames_processed;
+
+};
+
+/*
+ * EFC library registration
+ * **********************************/
+int efcport_init(struct efc *efc);
+void efcport_destroy(struct efc *efc);
+/*
+ * EFC Domain
+ * **********************************/
+int efc_domain_cb(void *arg, int event, void *data);
+void
+efc_register_domain_free_cb(struct efc *efc,
+ void (*callback)(struct efc *efc, void *arg),
+ void *arg);
+
+/*
+ * EFC nport
+ * **********************************/
+void efc_nport_cb(void *arg, int event, void *data);
+struct efc_vport *
+efc_vport_create_spec(struct efc *efc, u64 wwnn, u64 wwpn, u32 fc_id,
+ bool enable_ini, bool enable_tgt,
+ void *tgt_data, void *ini_data);
+int efc_nport_vport_new(struct efc_domain *domain, u64 wwpn,
+ u64 wwnn, u32 fc_id, bool ini, bool tgt,
+ void *tgt_data, void *ini_data);
+int efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
+ u64 wwpn, u64 wwnn);
+
+void efc_vport_del_all(struct efc *efc);
+
+/*
+ * EFC Node
+ * **********************************/
+int efc_remote_node_cb(void *arg, int event, void *data);
+void efc_node_fcid_display(u32 fc_id, char *buffer, u32 buf_len);
+void efc_node_post_shutdown(struct efc_node *node, void *arg);
+u64 efc_node_get_wwpn(struct efc_node *node);
+
+/*
+ * EFC FCP/ELS/CT interface
+ * **********************************/
+void efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq);
+void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status,
+ u32 ext_status);
+
+/*
+ * EFC SCSI INTERACTION LAYER
+ * **********************************/
+void efc_scsi_sess_reg_complete(struct efc_node *node, u32 status);
+void efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node);
+void efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node);
+void efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node);
+
+#endif /* __EFCLIB_H__ */
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
new file mode 100644
index 000000000000..6c6c04e1b74d
--- /dev/null
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -0,0 +1,5160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/**
+ * All common (i.e. transport-independent) SLI-4 functions are implemented
+ * in this file.
+ */
+#include "sli4.h"
+
+static struct sli4_asic_entry_t sli4_asic_table[] = {
+ { SLI4_ASIC_REV_B0, SLI4_ASIC_GEN_5},
+ { SLI4_ASIC_REV_D0, SLI4_ASIC_GEN_5},
+ { SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6},
+ { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_6},
+ { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_6},
+ { SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6},
+ { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_7},
+ { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_7},
+};
+
+/* Convert queue type enum (SLI_QTYPE_*) into a string */
+static char *SLI4_QNAME[] = {
+ "Event Queue",
+ "Completion Queue",
+ "Mailbox Queue",
+ "Work Queue",
+ "Receive Queue",
+ "Undefined"
+};
+
+/**
+ * sli_config_cmd_init() - Write a SLI_CONFIG command to the provided buffer.
+ *
+ * @sli4: SLI context pointer.
+ * @buf: Destination buffer for the command.
+ * @length: Length in bytes of attached command.
+ * @dma: DMA buffer for non-embedded commands.
+ * Return: Command payload buffer.
+ */
+static void *
+sli_config_cmd_init(struct sli4 *sli4, void *buf, u32 length,
+ struct efc_dma *dma)
+{
+ struct sli4_cmd_sli_config *config;
+ u32 flags;
+
+ if (length > sizeof(config->payload.embed) && !dma) {
+ efc_log_err(sli4, "Too big for an embedded cmd with len(%d)\n",
+ length);
+ return NULL;
+ }
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ config = buf;
+
+ config->hdr.command = SLI4_MBX_CMD_SLI_CONFIG;
+ if (!dma) {
+ flags = SLI4_SLICONF_EMB;
+ config->dw1_flags = cpu_to_le32(flags);
+ config->payload_len = cpu_to_le32(length);
+ return config->payload.embed;
+ }
+
+ flags = SLI4_SLICONF_PMDCMD_VAL_1;
+ flags &= ~SLI4_SLICONF_EMB;
+ config->dw1_flags = cpu_to_le32(flags);
+
+ config->payload.mem.addr.low = cpu_to_le32(lower_32_bits(dma->phys));
+ config->payload.mem.addr.high = cpu_to_le32(upper_32_bits(dma->phys));
+ config->payload.mem.length =
+ cpu_to_le32(dma->size & SLI4_SLICONF_PMD_LEN);
+ config->payload_len = cpu_to_le32(dma->size);
+ /* save pointer to DMA for BMBX dumping purposes */
+ sli4->bmbx_non_emb_pmd = dma;
+ return dma->virt;
+}
+
+/**
+ * sli_cmd_common_create_cq() - Write a COMMON_CREATE_CQ V2 command.
+ *
+ * @sli4: SLI context pointer.
+ * @buf: Destination buffer for the command.
+ * @qmem: DMA memory for queue.
+ * @eq_id: EQ id assosiated with this cq.
+ * Return: status -EIO/0.
+ */
+static int
+sli_cmd_common_create_cq(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
+ u16 eq_id)
+{
+ struct sli4_rqst_cmn_create_cq_v2 *cqv2 = NULL;
+ u32 p;
+ uintptr_t addr;
+ u32 num_pages = 0;
+ size_t cmd_size = 0;
+ u32 page_size = 0;
+ u32 n_cqe = 0;
+ u32 dw5_flags = 0;
+ u16 dw6w1_arm = 0;
+ __le32 len;
+
+ /* First calculate number of pages and the mailbox cmd length */
+ n_cqe = qmem->size / SLI4_CQE_BYTES;
+ switch (n_cqe) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ page_size = SZ_4K;
+ break;
+ case 4096:
+ page_size = SZ_8K;
+ break;
+ default:
+ return -EIO;
+ }
+ num_pages = sli_page_count(qmem->size, page_size);
+
+ cmd_size = SLI4_RQST_CMDSZ(cmn_create_cq_v2)
+ + SZ_DMAADDR * num_pages;
+
+ cqv2 = sli_config_cmd_init(sli4, buf, cmd_size, NULL);
+ if (!cqv2)
+ return -EIO;
+
+ len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_v2, SZ_DMAADDR * num_pages);
+ sli_cmd_fill_hdr(&cqv2->hdr, SLI4_CMN_CREATE_CQ, SLI4_SUBSYSTEM_COMMON,
+ CMD_V2, len);
+ cqv2->page_size = page_size / SLI_PAGE_SIZE;
+
+ /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */
+ cqv2->num_pages = cpu_to_le16(num_pages);
+ if (!num_pages || num_pages > SLI4_CREATE_CQV2_MAX_PAGES)
+ return -EIO;
+
+ switch (num_pages) {
+ case 1:
+ dw5_flags |= SLI4_CQ_CNT_VAL(256);
+ break;
+ case 2:
+ dw5_flags |= SLI4_CQ_CNT_VAL(512);
+ break;
+ case 4:
+ dw5_flags |= SLI4_CQ_CNT_VAL(1024);
+ break;
+ case 8:
+ dw5_flags |= SLI4_CQ_CNT_VAL(LARGE);
+ cqv2->cqe_count = cpu_to_le16(n_cqe);
+ break;
+ default:
+ efc_log_err(sli4, "num_pages %d not valid\n", num_pages);
+ return -EIO;
+ }
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ dw5_flags |= SLI4_CREATE_CQV2_AUTOVALID;
+
+ dw5_flags |= SLI4_CREATE_CQV2_EVT;
+ dw5_flags |= SLI4_CREATE_CQV2_VALID;
+
+ cqv2->dw5_flags = cpu_to_le32(dw5_flags);
+ cqv2->dw6w1_arm = cpu_to_le16(dw6w1_arm);
+ cqv2->eq_id = cpu_to_le16(eq_id);
+
+ for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) {
+ cqv2->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
+ cqv2->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+static int
+sli_cmd_common_create_eq(struct sli4 *sli4, void *buf, struct efc_dma *qmem)
+{
+ struct sli4_rqst_cmn_create_eq *eq;
+ u32 p;
+ uintptr_t addr;
+ u16 num_pages;
+ u32 dw5_flags = 0;
+ u32 dw6_flags = 0, ver;
+
+ eq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_create_eq),
+ NULL);
+ if (!eq)
+ return -EIO;
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ ver = CMD_V2;
+ else
+ ver = CMD_V0;
+
+ sli_cmd_fill_hdr(&eq->hdr, SLI4_CMN_CREATE_EQ, SLI4_SUBSYSTEM_COMMON,
+ ver, SLI4_RQST_PYLD_LEN(cmn_create_eq));
+
+ /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */
+ num_pages = qmem->size / SLI_PAGE_SIZE;
+ eq->num_pages = cpu_to_le16(num_pages);
+
+ switch (num_pages) {
+ case 1:
+ dw5_flags |= SLI4_EQE_SIZE_4;
+ dw6_flags |= SLI4_EQ_CNT_VAL(1024);
+ break;
+ case 2:
+ dw5_flags |= SLI4_EQE_SIZE_4;
+ dw6_flags |= SLI4_EQ_CNT_VAL(2048);
+ break;
+ case 4:
+ dw5_flags |= SLI4_EQE_SIZE_4;
+ dw6_flags |= SLI4_EQ_CNT_VAL(4096);
+ break;
+ default:
+ efc_log_err(sli4, "num_pages %d not valid\n", num_pages);
+ return -EIO;
+ }
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ dw5_flags |= SLI4_CREATE_EQ_AUTOVALID;
+
+ dw5_flags |= SLI4_CREATE_EQ_VALID;
+ dw6_flags &= (~SLI4_CREATE_EQ_ARM);
+ eq->dw5_flags = cpu_to_le32(dw5_flags);
+ eq->dw6_flags = cpu_to_le32(dw6_flags);
+ eq->dw7_delaymulti = cpu_to_le32(SLI4_CREATE_EQ_DELAYMULTI);
+
+ for (p = 0, addr = qmem->phys; p < num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ eq->page_address[p].low = cpu_to_le32(lower_32_bits(addr));
+ eq->page_address[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+static int
+sli_cmd_common_create_mq_ext(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
+ u16 cq_id)
+{
+ struct sli4_rqst_cmn_create_mq_ext *mq;
+ u32 p;
+ uintptr_t addr;
+ u32 num_pages;
+ u16 dw6w1_flags = 0;
+
+ mq = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(cmn_create_mq_ext), NULL);
+ if (!mq)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&mq->hdr, SLI4_CMN_CREATE_MQ_EXT,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_create_mq_ext));
+
+ /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */
+ num_pages = qmem->size / SLI_PAGE_SIZE;
+ mq->num_pages = cpu_to_le16(num_pages);
+ switch (num_pages) {
+ case 1:
+ dw6w1_flags |= SLI4_MQE_SIZE_16;
+ break;
+ case 2:
+ dw6w1_flags |= SLI4_MQE_SIZE_32;
+ break;
+ case 4:
+ dw6w1_flags |= SLI4_MQE_SIZE_64;
+ break;
+ case 8:
+ dw6w1_flags |= SLI4_MQE_SIZE_128;
+ break;
+ default:
+ efc_log_info(sli4, "num_pages %d not valid\n", num_pages);
+ return -EIO;
+ }
+
+ mq->async_event_bitmap = cpu_to_le32(SLI4_ASYNC_EVT_FC_ALL);
+
+ if (sli4->params.mq_create_version) {
+ mq->cq_id_v1 = cpu_to_le16(cq_id);
+ mq->hdr.dw3_version = cpu_to_le32(CMD_V1);
+ } else {
+ dw6w1_flags |= (cq_id << SLI4_CREATE_MQEXT_CQID_SHIFT);
+ }
+ mq->dw7_val = cpu_to_le32(SLI4_CREATE_MQEXT_VAL);
+
+ mq->dw6w1_flags = cpu_to_le16(dw6w1_flags);
+ for (p = 0, addr = qmem->phys; p < num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ mq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
+ mq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_wq_create(struct sli4 *sli4, void *buf, struct efc_dma *qmem, u16 cq_id)
+{
+ struct sli4_rqst_wq_create *wq;
+ u32 p;
+ uintptr_t addr;
+ u32 page_size = 0;
+ u32 n_wqe = 0;
+ u16 num_pages;
+
+ wq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(wq_create),
+ NULL);
+ if (!wq)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&wq->hdr, SLI4_OPC_WQ_CREATE, SLI4_SUBSYSTEM_FC,
+ CMD_V1, SLI4_RQST_PYLD_LEN(wq_create));
+ n_wqe = qmem->size / sli4->wqe_size;
+
+ switch (qmem->size) {
+ case 4096:
+ case 8192:
+ case 16384:
+ case 32768:
+ page_size = SZ_4K;
+ break;
+ case 65536:
+ page_size = SZ_8K;
+ break;
+ case 131072:
+ page_size = SZ_16K;
+ break;
+ case 262144:
+ page_size = SZ_32K;
+ break;
+ case 524288:
+ page_size = SZ_64K;
+ break;
+ default:
+ return -EIO;
+ }
+
+ /* valid values for number of pages(num_pages): 1-8 */
+ num_pages = sli_page_count(qmem->size, page_size);
+ wq->num_pages = cpu_to_le16(num_pages);
+ if (!num_pages || num_pages > SLI4_WQ_CREATE_MAX_PAGES)
+ return -EIO;
+
+ wq->cq_id = cpu_to_le16(cq_id);
+
+ wq->page_size = page_size / SLI_PAGE_SIZE;
+
+ if (sli4->wqe_size == SLI4_WQE_EXT_BYTES)
+ wq->wqe_size_byte |= SLI4_WQE_EXT_SIZE;
+ else
+ wq->wqe_size_byte |= SLI4_WQE_SIZE;
+
+ wq->wqe_count = cpu_to_le16(n_wqe);
+
+ for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) {
+ wq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
+ wq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+static int
+sli_cmd_rq_create_v1(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
+ u16 cq_id, u16 buffer_size)
+{
+ struct sli4_rqst_rq_create_v1 *rq;
+ u32 p;
+ uintptr_t addr;
+ u32 num_pages;
+
+ rq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(rq_create_v1),
+ NULL);
+ if (!rq)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&rq->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC,
+ CMD_V1, SLI4_RQST_PYLD_LEN(rq_create_v1));
+ /* Disable "no buffer warnings" to avoid Lancer bug */
+ rq->dim_dfd_dnb |= SLI4_RQ_CREATE_V1_DNB;
+
+ /* valid values for number of pages: 1-8 (sec 4.5.6) */
+ num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
+ rq->num_pages = cpu_to_le16(num_pages);
+ if (!num_pages ||
+ num_pages > SLI4_RQ_CREATE_V1_MAX_PAGES) {
+ efc_log_info(sli4, "num_pages %d not valid, max %d\n",
+ num_pages, SLI4_RQ_CREATE_V1_MAX_PAGES);
+ return -EIO;
+ }
+
+ /*
+ * RQE count is the total number of entries (note not lg2(# entries))
+ */
+ rq->rqe_count = cpu_to_le16(qmem->size / SLI4_RQE_SIZE);
+
+ rq->rqe_size_byte |= SLI4_RQE_SIZE_8;
+
+ rq->page_size = SLI4_RQ_PAGE_SIZE_4096;
+
+ if (buffer_size < sli4->rq_min_buf_size ||
+ buffer_size > sli4->rq_max_buf_size) {
+ efc_log_err(sli4, "buffer_size %d out of range (%d-%d)\n",
+ buffer_size, sli4->rq_min_buf_size,
+ sli4->rq_max_buf_size);
+ return -EIO;
+ }
+ rq->buffer_size = cpu_to_le32(buffer_size);
+
+ rq->cq_id = cpu_to_le16(cq_id);
+
+ for (p = 0, addr = qmem->phys;
+ p < num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ rq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
+ rq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+static int
+sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs,
+ struct sli4_queue *qs[], u32 base_cq_id,
+ u32 header_buffer_size,
+ u32 payload_buffer_size, struct efc_dma *dma)
+{
+ struct sli4_rqst_rq_create_v2 *req = NULL;
+ u32 i, p, offset = 0;
+ u32 payload_size, page_count;
+ uintptr_t addr;
+ u32 num_pages;
+ __le32 len;
+
+ page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rqs;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(SLI4_RQST_CMDSZ(rq_create_v2) +
+ SZ_DMAADDR * page_count,
+ sizeof(struct sli4_rsp_cmn_create_queue_set));
+
+ dma->size = payload_size;
+ dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
+ &dma->phys, GFP_DMA);
+ if (!dma->virt)
+ return -EIO;
+
+ memset(dma->virt, 0, payload_size);
+
+ req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma);
+ if (!req)
+ return -EIO;
+
+ len = SLI4_RQST_PYLD_LEN_VAR(rq_create_v2, SZ_DMAADDR * page_count);
+ sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC,
+ CMD_V2, len);
+ /* Fill Payload fields */
+ req->dim_dfd_dnb |= SLI4_RQCREATEV2_DNB;
+ num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE);
+ req->num_pages = cpu_to_le16(num_pages);
+ req->rqe_count = cpu_to_le16(qs[0]->dma.size / SLI4_RQE_SIZE);
+ req->rqe_size_byte |= SLI4_RQE_SIZE_8;
+ req->page_size = SLI4_RQ_PAGE_SIZE_4096;
+ req->rq_count = num_rqs;
+ req->base_cq_id = cpu_to_le16(base_cq_id);
+ req->hdr_buffer_size = cpu_to_le16(header_buffer_size);
+ req->payload_buffer_size = cpu_to_le16(payload_buffer_size);
+
+ for (i = 0; i < num_rqs; i++) {
+ for (p = 0, addr = qs[i]->dma.phys; p < num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ req->page_phys_addr[offset].low =
+ cpu_to_le32(lower_32_bits(addr));
+ req->page_phys_addr[offset].high =
+ cpu_to_le32(upper_32_bits(addr));
+ offset++;
+ }
+ }
+
+ return 0;
+}
+
+static void
+__sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q)
+{
+ if (!q->dma.size)
+ return;
+
+ dma_free_coherent(&sli4->pci->dev, q->dma.size,
+ q->dma.virt, q->dma.phys);
+ memset(&q->dma, 0, sizeof(struct efc_dma));
+}
+
+int
+__sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
+ size_t size, u32 n_entries, u32 align)
+{
+ if (q->dma.virt) {
+ efc_log_err(sli4, "%s failed\n", __func__);
+ return -EIO;
+ }
+
+ memset(q, 0, sizeof(struct sli4_queue));
+
+ q->dma.size = size * n_entries;
+ q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
+ &q->dma.phys, GFP_DMA);
+ if (!q->dma.virt) {
+ memset(&q->dma, 0, sizeof(struct efc_dma));
+ efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]);
+ return -EIO;
+ }
+
+ memset(q->dma.virt, 0, size * n_entries);
+
+ spin_lock_init(&q->lock);
+
+ q->type = qtype;
+ q->size = size;
+ q->length = n_entries;
+
+ if (q->type == SLI4_QTYPE_EQ || q->type == SLI4_QTYPE_CQ) {
+ /* For prism, phase will be flipped after
+ * a sweep through eq and cq
+ */
+ q->phase = 1;
+ }
+
+ /* Limit to hwf the queue size per interrupt */
+ q->proc_limit = n_entries / 2;
+
+ if (q->type == SLI4_QTYPE_EQ)
+ q->posted_limit = q->length / 2;
+ else
+ q->posted_limit = 64;
+
+ return 0;
+}
+
+int
+sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q,
+ u32 n_entries, u32 buffer_size,
+ struct sli4_queue *cq, bool is_hdr)
+{
+ if (__sli_queue_init(sli4, q, SLI4_QTYPE_RQ, SLI4_RQE_SIZE,
+ n_entries, SLI_PAGE_SIZE))
+ return -EIO;
+
+ if (sli_cmd_rq_create_v1(sli4, sli4->bmbx.virt, &q->dma, cq->id,
+ buffer_size))
+ goto error;
+
+ if (__sli_create_queue(sli4, q))
+ goto error;
+
+ if (is_hdr && q->id & 1) {
+ efc_log_info(sli4, "bad header RQ_ID %d\n", q->id);
+ goto error;
+ } else if (!is_hdr && (q->id & 1) == 0) {
+ efc_log_info(sli4, "bad data RQ_ID %d\n", q->id);
+ goto error;
+ }
+
+ if (is_hdr)
+ q->u.flag |= SLI4_QUEUE_FLAG_HDR;
+ else
+ q->u.flag &= ~SLI4_QUEUE_FLAG_HDR;
+
+ return 0;
+
+error:
+ __sli_queue_destroy(sli4, q);
+ return -EIO;
+}
+
+int
+sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs,
+ struct sli4_queue *qs[], u32 base_cq_id,
+ u32 n_entries, u32 header_buffer_size,
+ u32 payload_buffer_size)
+{
+ u32 i;
+ struct efc_dma dma = {0};
+ struct sli4_rsp_cmn_create_queue_set *rsp = NULL;
+ void __iomem *db_regaddr = NULL;
+ u32 num_rqs = num_rq_pairs * 2;
+
+ for (i = 0; i < num_rqs; i++) {
+ if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_RQ,
+ SLI4_RQE_SIZE, n_entries,
+ SLI_PAGE_SIZE)) {
+ goto error;
+ }
+ }
+
+ if (sli_cmd_rq_create_v2(sli4, num_rqs, qs, base_cq_id,
+ header_buffer_size, payload_buffer_size,
+ &dma)) {
+ goto error;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_err(sli4, "bootstrap mailbox write failed RQSet\n");
+ goto error;
+ }
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG;
+ else
+ db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG;
+
+ rsp = dma.virt;
+ if (rsp->hdr.status) {
+ efc_log_err(sli4, "bad create RQSet status=%#x addl=%#x\n",
+ rsp->hdr.status, rsp->hdr.additional_status);
+ goto error;
+ }
+
+ for (i = 0; i < num_rqs; i++) {
+ qs[i]->id = i + le16_to_cpu(rsp->q_id);
+ if ((qs[i]->id & 1) == 0)
+ qs[i]->u.flag |= SLI4_QUEUE_FLAG_HDR;
+ else
+ qs[i]->u.flag &= ~SLI4_QUEUE_FLAG_HDR;
+
+ qs[i]->db_regaddr = db_regaddr;
+ }
+
+ dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys);
+
+ return 0;
+
+error:
+ for (i = 0; i < num_rqs; i++)
+ __sli_queue_destroy(sli4, qs[i]);
+
+ if (dma.virt)
+ dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt,
+ dma.phys);
+
+ return -EIO;
+}
+
+static int
+sli_res_sli_config(struct sli4 *sli4, void *buf)
+{
+ struct sli4_cmd_sli_config *sli_config = buf;
+
+ /* sanity check */
+ if (!buf || sli_config->hdr.command !=
+ SLI4_MBX_CMD_SLI_CONFIG) {
+ efc_log_err(sli4, "bad parameter buf=%p cmd=%#x\n", buf,
+ buf ? sli_config->hdr.command : -1);
+ return -EIO;
+ }
+
+ if (le16_to_cpu(sli_config->hdr.status))
+ return le16_to_cpu(sli_config->hdr.status);
+
+ if (le32_to_cpu(sli_config->dw1_flags) & SLI4_SLICONF_EMB)
+ return sli_config->payload.embed[4];
+
+ efc_log_info(sli4, "external buffers not supported\n");
+ return -EIO;
+}
+
+int
+__sli_create_queue(struct sli4 *sli4, struct sli4_queue *q)
+{
+ struct sli4_rsp_cmn_create_queue *res_q = NULL;
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail %s\n",
+ SLI4_QNAME[q->type]);
+ return -EIO;
+ }
+ if (sli_res_sli_config(sli4, sli4->bmbx.virt)) {
+ efc_log_err(sli4, "bad status create %s\n",
+ SLI4_QNAME[q->type]);
+ return -EIO;
+ }
+ res_q = (void *)((u8 *)sli4->bmbx.virt +
+ offsetof(struct sli4_cmd_sli_config, payload));
+
+ if (res_q->hdr.status) {
+ efc_log_err(sli4, "bad create %s status=%#x addl=%#x\n",
+ SLI4_QNAME[q->type], res_q->hdr.status,
+ res_q->hdr.additional_status);
+ return -EIO;
+ }
+ q->id = le16_to_cpu(res_q->q_id);
+ switch (q->type) {
+ case SLI4_QTYPE_EQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_EQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
+ break;
+ case SLI4_QTYPE_CQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
+ break;
+ case SLI4_QTYPE_MQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_MQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_MQ_DB_REG;
+ break;
+ case SLI4_QTYPE_RQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG;
+ break;
+ case SLI4_QTYPE_WQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_WQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_IO_WQ_DB_REG;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int
+sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype)
+{
+ u32 size = 0;
+
+ switch (qtype) {
+ case SLI4_QTYPE_EQ:
+ size = sizeof(u32);
+ break;
+ case SLI4_QTYPE_CQ:
+ size = 16;
+ break;
+ case SLI4_QTYPE_MQ:
+ size = 256;
+ break;
+ case SLI4_QTYPE_WQ:
+ size = sli4->wqe_size;
+ break;
+ case SLI4_QTYPE_RQ:
+ size = SLI4_RQE_SIZE;
+ break;
+ default:
+ efc_log_info(sli4, "unknown queue type %d\n", qtype);
+ return -1;
+ }
+ return size;
+}
+
+int
+sli_queue_alloc(struct sli4 *sli4, u32 qtype,
+ struct sli4_queue *q, u32 n_entries,
+ struct sli4_queue *assoc)
+{
+ int size;
+ u32 align = 0;
+
+ /* get queue size */
+ size = sli_get_queue_entry_size(sli4, qtype);
+ if (size < 0)
+ return -EIO;
+ align = SLI_PAGE_SIZE;
+
+ if (__sli_queue_init(sli4, q, qtype, size, n_entries, align))
+ return -EIO;
+
+ switch (qtype) {
+ case SLI4_QTYPE_EQ:
+ if (!sli_cmd_common_create_eq(sli4, sli4->bmbx.virt, &q->dma) &&
+ !__sli_create_queue(sli4, q))
+ return 0;
+
+ break;
+ case SLI4_QTYPE_CQ:
+ if (!sli_cmd_common_create_cq(sli4, sli4->bmbx.virt, &q->dma,
+ assoc ? assoc->id : 0) &&
+ !__sli_create_queue(sli4, q))
+ return 0;
+
+ break;
+ case SLI4_QTYPE_MQ:
+ assoc->u.flag |= SLI4_QUEUE_FLAG_MQ;
+ if (!sli_cmd_common_create_mq_ext(sli4, sli4->bmbx.virt,
+ &q->dma, assoc->id) &&
+ !__sli_create_queue(sli4, q))
+ return 0;
+
+ break;
+ case SLI4_QTYPE_WQ:
+ if (!sli_cmd_wq_create(sli4, sli4->bmbx.virt, &q->dma,
+ assoc ? assoc->id : 0) &&
+ !__sli_create_queue(sli4, q))
+ return 0;
+
+ break;
+ default:
+ efc_log_info(sli4, "unknown queue type %d\n", qtype);
+ }
+
+ __sli_queue_destroy(sli4, q);
+ return -EIO;
+}
+
+static int sli_cmd_cq_set_create(struct sli4 *sli4,
+ struct sli4_queue *qs[], u32 num_cqs,
+ struct sli4_queue *eqs[],
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_cmn_create_cq_set_v0 *req = NULL;
+ uintptr_t addr;
+ u32 i, offset = 0, page_bytes = 0, payload_size;
+ u32 p = 0, page_size = 0, n_cqe = 0, num_pages_cq;
+ u32 dw5_flags = 0;
+ u16 dw6w1_flags = 0;
+ __le32 req_len;
+
+ n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES;
+ switch (n_cqe) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ page_size = 1;
+ break;
+ case 4096:
+ page_size = 2;
+ break;
+ default:
+ return -EIO;
+ }
+
+ page_bytes = page_size * SLI_PAGE_SIZE;
+ num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes);
+ payload_size = max(SLI4_RQST_CMDSZ(cmn_create_cq_set_v0) +
+ (SZ_DMAADDR * num_pages_cq * num_cqs),
+ sizeof(struct sli4_rsp_cmn_create_queue_set));
+
+ dma->size = payload_size;
+ dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
+ &dma->phys, GFP_DMA);
+ if (!dma->virt)
+ return -EIO;
+
+ memset(dma->virt, 0, payload_size);
+
+ req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma);
+ if (!req)
+ return -EIO;
+
+ req_len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_set_v0,
+ SZ_DMAADDR * num_pages_cq * num_cqs);
+ sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_CREATE_CQ_SET, SLI4_SUBSYSTEM_FC,
+ CMD_V0, req_len);
+ req->page_size = page_size;
+
+ req->num_pages = cpu_to_le16(num_pages_cq);
+ switch (num_pages_cq) {
+ case 1:
+ dw5_flags |= SLI4_CQ_CNT_VAL(256);
+ break;
+ case 2:
+ dw5_flags |= SLI4_CQ_CNT_VAL(512);
+ break;
+ case 4:
+ dw5_flags |= SLI4_CQ_CNT_VAL(1024);
+ break;
+ case 8:
+ dw5_flags |= SLI4_CQ_CNT_VAL(LARGE);
+ dw6w1_flags |= (n_cqe & SLI4_CREATE_CQSETV0_CQE_COUNT);
+ break;
+ default:
+ efc_log_info(sli4, "num_pages %d not valid\n", num_pages_cq);
+ return -EIO;
+ }
+
+ dw5_flags |= SLI4_CREATE_CQSETV0_EVT;
+ dw5_flags |= SLI4_CREATE_CQSETV0_VALID;
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ dw5_flags |= SLI4_CREATE_CQSETV0_AUTOVALID;
+
+ dw6w1_flags &= ~SLI4_CREATE_CQSETV0_ARM;
+
+ req->dw5_flags = cpu_to_le32(dw5_flags);
+ req->dw6w1_flags = cpu_to_le16(dw6w1_flags);
+
+ req->num_cq_req = cpu_to_le16(num_cqs);
+
+ /* Fill page addresses of all the CQs. */
+ for (i = 0; i < num_cqs; i++) {
+ req->eq_id[i] = cpu_to_le16(eqs[i]->id);
+ for (p = 0, addr = qs[i]->dma.phys; p < num_pages_cq;
+ p++, addr += page_bytes) {
+ req->page_phys_addr[offset].low =
+ cpu_to_le32(lower_32_bits(addr));
+ req->page_phys_addr[offset].high =
+ cpu_to_le32(upper_32_bits(addr));
+ offset++;
+ }
+ }
+
+ return 0;
+}
+
+int
+sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[],
+ u32 num_cqs, u32 n_entries, struct sli4_queue *eqs[])
+{
+ u32 i;
+ struct efc_dma dma = {0};
+ struct sli4_rsp_cmn_create_queue_set *res;
+ void __iomem *db_regaddr;
+
+ /* Align the queue DMA memory */
+ for (i = 0; i < num_cqs; i++) {
+ if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_CQ, SLI4_CQE_BYTES,
+ n_entries, SLI_PAGE_SIZE))
+ goto error;
+ }
+
+ if (sli_cmd_cq_set_create(sli4, qs, num_cqs, eqs, &dma))
+ goto error;
+
+ if (sli_bmbx_command(sli4))
+ goto error;
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG;
+ else
+ db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
+
+ res = dma.virt;
+ if (res->hdr.status) {
+ efc_log_err(sli4, "bad create CQSet status=%#x addl=%#x\n",
+ res->hdr.status, res->hdr.additional_status);
+ goto error;
+ }
+
+ /* Check if we got all requested CQs. */
+ if (le16_to_cpu(res->num_q_allocated) != num_cqs) {
+ efc_log_crit(sli4, "Requested count CQs doesn't match.\n");
+ goto error;
+ }
+ /* Fill the resp cq ids. */
+ for (i = 0; i < num_cqs; i++) {
+ qs[i]->id = le16_to_cpu(res->q_id) + i;
+ qs[i]->db_regaddr = db_regaddr;
+ }
+
+ dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys);
+
+ return 0;
+
+error:
+ for (i = 0; i < num_cqs; i++)
+ __sli_queue_destroy(sli4, qs[i]);
+
+ if (dma.virt)
+ dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt,
+ dma.phys);
+
+ return -EIO;
+}
+
+static int
+sli_cmd_common_destroy_q(struct sli4 *sli4, u8 opc, u8 subsystem, u16 q_id)
+{
+ struct sli4_rqst_cmn_destroy_q *req;
+
+ /* Payload length must accommodate both request and response */
+ req = sli_config_cmd_init(sli4, sli4->bmbx.virt,
+ SLI4_CFG_PYLD_LENGTH(cmn_destroy_q), NULL);
+ if (!req)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&req->hdr, opc, subsystem,
+ CMD_V0, SLI4_RQST_PYLD_LEN(cmn_destroy_q));
+ req->q_id = cpu_to_le16(q_id);
+
+ return 0;
+}
+
+int
+sli_queue_free(struct sli4 *sli4, struct sli4_queue *q,
+ u32 destroy_queues, u32 free_memory)
+{
+ int rc = 0;
+ u8 opcode, subsystem;
+ struct sli4_rsp_hdr *res;
+
+ if (!q) {
+ efc_log_err(sli4, "bad parameter sli4=%p q=%p\n", sli4, q);
+ return -EIO;
+ }
+
+ if (!destroy_queues)
+ goto free_mem;
+
+ switch (q->type) {
+ case SLI4_QTYPE_EQ:
+ opcode = SLI4_CMN_DESTROY_EQ;
+ subsystem = SLI4_SUBSYSTEM_COMMON;
+ break;
+ case SLI4_QTYPE_CQ:
+ opcode = SLI4_CMN_DESTROY_CQ;
+ subsystem = SLI4_SUBSYSTEM_COMMON;
+ break;
+ case SLI4_QTYPE_MQ:
+ opcode = SLI4_CMN_DESTROY_MQ;
+ subsystem = SLI4_SUBSYSTEM_COMMON;
+ break;
+ case SLI4_QTYPE_WQ:
+ opcode = SLI4_OPC_WQ_DESTROY;
+ subsystem = SLI4_SUBSYSTEM_FC;
+ break;
+ case SLI4_QTYPE_RQ:
+ opcode = SLI4_OPC_RQ_DESTROY;
+ subsystem = SLI4_SUBSYSTEM_FC;
+ break;
+ default:
+ efc_log_info(sli4, "bad queue type %d\n", q->type);
+ rc = -EIO;
+ goto free_mem;
+ }
+
+ rc = sli_cmd_common_destroy_q(sli4, opcode, subsystem, q->id);
+ if (rc)
+ goto free_mem;
+
+ rc = sli_bmbx_command(sli4);
+ if (rc)
+ goto free_mem;
+
+ rc = sli_res_sli_config(sli4, sli4->bmbx.virt);
+ if (rc)
+ goto free_mem;
+
+ res = (void *)((u8 *)sli4->bmbx.virt +
+ offsetof(struct sli4_cmd_sli_config, payload));
+ if (res->status) {
+ efc_log_err(sli4, "destroy %s st=%#x addl=%#x\n",
+ SLI4_QNAME[q->type], res->status,
+ res->additional_status);
+ rc = -EIO;
+ goto free_mem;
+ }
+
+free_mem:
+ if (free_memory)
+ __sli_queue_destroy(sli4, q);
+
+ return rc;
+}
+
+int
+sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
+{
+ u32 val;
+ unsigned long flags = 0;
+ u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ val = sli_format_if6_eq_db_data(q->n_posted, q->id, a);
+ else
+ val = sli_format_eq_db_data(q->n_posted, q->id, a);
+
+ writel(val, q->db_regaddr);
+ q->n_posted = 0;
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
+{
+ u32 val = 0;
+ unsigned long flags = 0;
+ u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ switch (q->type) {
+ case SLI4_QTYPE_EQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ val = sli_format_if6_eq_db_data(q->n_posted, q->id, a);
+ else
+ val = sli_format_eq_db_data(q->n_posted, q->id, a);
+
+ writel(val, q->db_regaddr);
+ q->n_posted = 0;
+ break;
+ case SLI4_QTYPE_CQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ val = sli_format_if6_cq_db_data(q->n_posted, q->id, a);
+ else
+ val = sli_format_cq_db_data(q->n_posted, q->id, a);
+
+ writel(val, q->db_regaddr);
+ q->n_posted = 0;
+ break;
+ default:
+ efc_log_info(sli4, "should only be used for EQ/CQ, not %s\n",
+ SLI4_QNAME[q->type]);
+ }
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ u32 qindex;
+ u32 val = 0;
+
+ qindex = q->index;
+ qe += q->index * q->size;
+
+ if (sli4->params.perf_wq_id_association)
+ sli_set_wq_id_association(entry, q->id);
+
+ memcpy(qe, entry, q->size);
+ val = sli_format_wq_db_data(q->id);
+
+ writel(val, q->db_regaddr);
+ q->index = (q->index + 1) & (q->length - 1);
+
+ return qindex;
+}
+
+int
+sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ u32 qindex;
+ u32 val = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ qindex = q->index;
+ qe += q->index * q->size;
+
+ memcpy(qe, entry, q->size);
+ val = sli_format_mq_db_data(q->id);
+ writel(val, q->db_regaddr);
+ q->index = (q->index + 1) & (q->length - 1);
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return qindex;
+}
+
+int
+sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ u32 qindex;
+ u32 val = 0;
+
+ qindex = q->index;
+ qe += q->index * q->size;
+
+ memcpy(qe, entry, q->size);
+
+ /*
+ * In RQ-pair, an RQ either contains the FC header
+ * (i.e. is_hdr == TRUE) or the payload.
+ *
+ * Don't ring doorbell for payload RQ
+ */
+ if (!(q->u.flag & SLI4_QUEUE_FLAG_HDR))
+ goto skip;
+
+ val = sli_format_rq_db_data(q->id);
+ writel(val, q->db_regaddr);
+skip:
+ q->index = (q->index + 1) & (q->length - 1);
+
+ return qindex;
+}
+
+int
+sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ unsigned long flags = 0;
+ u16 wflags = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ qe += q->index * q->size;
+
+ /* Check if eqe is valid */
+ wflags = le16_to_cpu(((struct sli4_eqe *)qe)->dw0w0_flags);
+
+ if ((wflags & SLI4_EQE_VALID) != q->phase) {
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -EIO;
+ }
+
+ if (sli4->if_type != SLI4_INTF_IF_TYPE_6) {
+ wflags &= ~SLI4_EQE_VALID;
+ ((struct sli4_eqe *)qe)->dw0w0_flags = cpu_to_le16(wflags);
+ }
+
+ memcpy(entry, qe, q->size);
+ q->index = (q->index + 1) & (q->length - 1);
+ q->n_posted++;
+ /*
+ * For prism, the phase value will be used
+ * to check the validity of eq/cq entries.
+ * The value toggles after a complete sweep
+ * through the queue.
+ */
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0)
+ q->phase ^= (u16)0x1;
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ unsigned long flags = 0;
+ u32 dwflags = 0;
+ bool valid_bit_set;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ qe += q->index * q->size;
+
+ /* Check if cqe is valid */
+ dwflags = le32_to_cpu(((struct sli4_mcqe *)qe)->dw3_flags);
+ valid_bit_set = (dwflags & SLI4_MCQE_VALID) != 0;
+
+ if (valid_bit_set != q->phase) {
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -EIO;
+ }
+
+ if (sli4->if_type != SLI4_INTF_IF_TYPE_6) {
+ dwflags &= ~SLI4_MCQE_VALID;
+ ((struct sli4_mcqe *)qe)->dw3_flags = cpu_to_le32(dwflags);
+ }
+
+ memcpy(entry, qe, q->size);
+ q->index = (q->index + 1) & (q->length - 1);
+ q->n_posted++;
+ /*
+ * For prism, the phase value will be used
+ * to check the validity of eq/cq entries.
+ * The value toggles after a complete sweep
+ * through the queue.
+ */
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0)
+ q->phase ^= (u16)0x1;
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ qe += q->u.r_idx * q->size;
+
+ /* Check if mqe is valid */
+ if (q->index == q->u.r_idx) {
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -EIO;
+ }
+
+ memcpy(entry, qe, q->size);
+ q->u.r_idx = (q->u.r_idx + 1) & (q->length - 1);
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id)
+{
+ struct sli4_eqe *eqe = (void *)buf;
+ int rc = 0;
+ u16 flags = 0;
+ u16 majorcode;
+ u16 minorcode;
+
+ if (!buf || !cq_id) {
+ efc_log_err(sli4, "bad parameters sli4=%p buf=%p cq_id=%p\n",
+ sli4, buf, cq_id);
+ return -EIO;
+ }
+
+ flags = le16_to_cpu(eqe->dw0w0_flags);
+ majorcode = (flags & SLI4_EQE_MJCODE) >> 1;
+ minorcode = (flags & SLI4_EQE_MNCODE) >> 4;
+ switch (majorcode) {
+ case SLI4_MAJOR_CODE_STANDARD:
+ *cq_id = le16_to_cpu(eqe->resource_id);
+ break;
+ case SLI4_MAJOR_CODE_SENTINEL:
+ efc_log_info(sli4, "sentinel EQE\n");
+ rc = SLI4_EQE_STATUS_EQ_FULL;
+ break;
+ default:
+ efc_log_info(sli4, "Unsupported EQE: major %x minor %x\n",
+ majorcode, minorcode);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+int
+sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe,
+ enum sli4_qentry *etype, u16 *q_id)
+{
+ int rc = 0;
+
+ if (!cq || !cqe || !etype) {
+ efc_log_err(sli4, "bad params sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n",
+ sli4, cq, cqe, etype, q_id);
+ return -EINVAL;
+ }
+
+ /* Parse a CQ entry to retrieve the event type and the queue id */
+ if (cq->u.flag & SLI4_QUEUE_FLAG_MQ) {
+ struct sli4_mcqe *mcqe = (void *)cqe;
+
+ if (le32_to_cpu(mcqe->dw3_flags) & SLI4_MCQE_AE) {
+ *etype = SLI4_QENTRY_ASYNC;
+ } else {
+ *etype = SLI4_QENTRY_MQ;
+ rc = sli_cqe_mq(sli4, mcqe);
+ }
+ *q_id = -1;
+ } else {
+ rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id);
+ }
+
+ return rc;
+}
+
+int
+sli_abort_wqe(struct sli4 *sli, void *buf, enum sli4_abort_type type,
+ bool send_abts, u32 ids, u32 mask, u16 tag, u16 cq_id)
+{
+ struct sli4_abort_wqe *abort = buf;
+
+ memset(buf, 0, sli->wqe_size);
+
+ switch (type) {
+ case SLI4_ABORT_XRI:
+ abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
+ if (mask) {
+ efc_log_warn(sli, "%#x aborting XRI %#x warning non-zero mask",
+ mask, ids);
+ mask = 0;
+ }
+ break;
+ case SLI4_ABORT_ABORT_ID:
+ abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG;
+ break;
+ case SLI4_ABORT_REQUEST_ID:
+ abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG;
+ break;
+ default:
+ efc_log_info(sli, "unsupported type %#x\n", type);
+ return -EIO;
+ }
+
+ abort->ia_ir_byte |= send_abts ? 0 : 1;
+
+ /* Suppress ABTS retries */
+ abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
+
+ abort->t_mask = cpu_to_le32(mask);
+ abort->t_tag = cpu_to_le32(ids);
+ abort->command = SLI4_WQE_ABORT;
+ abort->request_tag = cpu_to_le16(tag);
+
+ abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
+
+ abort->cq_id = cpu_to_le16(cq_id);
+ abort->cmdtype_wqec_byte |= SLI4_CMD_ABORT_WQE;
+
+ return 0;
+}
+
+int
+sli_els_request64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ struct sli_els_params *params)
+{
+ struct sli4_els_request64_wqe *els = buf;
+ struct sli4_sge *sge = sgl->virt;
+ bool is_fabric = false;
+ struct sli4_bde *bptr;
+
+ memset(buf, 0, sli->wqe_size);
+
+ bptr = &els->els_request_payload;
+ if (sli->params.sgl_pre_registered) {
+ els->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_REQ_WQE_XBL;
+
+ els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_DBDE;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (params->xmit_len & SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+ } else {
+ els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_XBL;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ ((2 * sizeof(struct sli4_sge)) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+
+ els->els_request_payload_length = cpu_to_le32(params->xmit_len);
+ els->max_response_payload_length = cpu_to_le32(params->rsp_len);
+
+ els->xri_tag = cpu_to_le16(params->xri);
+ els->timer = params->timeout;
+ els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+
+ els->command = SLI4_WQE_ELS_REQUEST64;
+
+ els->request_tag = cpu_to_le16(params->tag);
+
+ els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_IOD;
+
+ els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_QOSD;
+
+ /* figure out the ELS_ID value from the request buffer */
+
+ switch (params->cmd) {
+ case ELS_LOGO:
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_LOGO << SLI4_REQ_WQE_ELSID_SHFT;
+ if (params->rpi_registered) {
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->rpi);
+ } else {
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ }
+ if (params->d_id == FC_FID_FLOGI)
+ is_fabric = true;
+ break;
+ case ELS_FDISC:
+ if (params->d_id == FC_FID_FLOGI)
+ is_fabric = true;
+ if (params->s_id == 0) {
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_FDISC << SLI4_REQ_WQE_ELSID_SHFT;
+ is_fabric = true;
+ } else {
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
+ }
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT);
+ break;
+ case ELS_FLOGI:
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ /*
+ * Set SP here ... we haven't done a REG_VPI yet
+ * need to maybe not set this when we have
+ * completed VFI/VPI registrations ...
+ *
+ * Use the FC_ID of the SPORT if it has been allocated,
+ * otherwise use an S_ID of zero.
+ */
+ els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT);
+ if (params->s_id != U32_MAX)
+ els->sid_sp_dword |= cpu_to_le32(params->s_id);
+ break;
+ case ELS_PLOGI:
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_PLOGI << SLI4_REQ_WQE_ELSID_SHFT;
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ break;
+ case ELS_SCR:
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ break;
+ default:
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
+ if (params->rpi_registered) {
+ els->ct_byte |= (SLI4_GENERIC_CONTEXT_RPI <<
+ SLI4_REQ_WQE_CT_SHFT);
+ els->context_tag = cpu_to_le16(params->vpi);
+ } else {
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ }
+ break;
+ }
+
+ if (is_fabric)
+ els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_FABRIC;
+ else
+ els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_NON_FABRIC;
+
+ els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+
+ if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) !=
+ SLI4_GENERIC_CONTEXT_RPI)
+ els->remote_id_dword = cpu_to_le32(params->d_id);
+
+ if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) ==
+ SLI4_GENERIC_CONTEXT_VPI)
+ els->temporary_rpi = cpu_to_le16(params->rpi);
+
+ return 0;
+}
+
+int
+sli_fcp_icmnd64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, u16 xri,
+ u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 timeout)
+{
+ struct sli4_fcp_icmnd64_wqe *icmnd = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+ u32 len;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+ bptr = &icmnd->bde;
+ if (sli->params.sgl_pre_registered) {
+ icmnd->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_ICMD_WQE_XBL;
+
+ icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_DBDE;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+ } else {
+ icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_XBL;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ (sgl->size & SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+
+ len = le32_to_cpu(sge[0].buffer_length) +
+ le32_to_cpu(sge[1].buffer_length);
+ icmnd->payload_offset_length = cpu_to_le16(len);
+ icmnd->xri_tag = cpu_to_le16(xri);
+ icmnd->context_tag = cpu_to_le16(rpi);
+ icmnd->timer = timeout;
+
+ /* WQE word 4 contains read transfer length */
+ icmnd->class_pu_byte |= 2 << SLI4_ICMD_WQE_PU_SHFT;
+ icmnd->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ icmnd->command = SLI4_WQE_FCP_ICMND64;
+ icmnd->dif_ct_bs_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_ICMD_WQE_CT_SHFT;
+
+ icmnd->abort_tag = cpu_to_le32(xri);
+
+ icmnd->request_tag = cpu_to_le16(tag);
+ icmnd->len_loc1_byte |= SLI4_ICMD_WQE_LEN_LOC_BIT1;
+ icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_LEN_LOC_BIT2;
+ icmnd->cmd_type_byte |= SLI4_CMD_FCP_ICMND64_WQE;
+ icmnd->cq_id = cpu_to_le16(cq_id);
+
+ return 0;
+}
+
+int
+sli_fcp_iread64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u32 xfer_len, u16 xri, u16 tag,
+ u16 cq_id, u32 rpi, u32 rnode_fcid,
+ u8 dif, u8 bs, u8 timeout)
+{
+ struct sli4_fcp_iread64_wqe *iread = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+ u32 sge_flags, len;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+
+ sge = sgl->virt;
+ bptr = &iread->bde;
+ if (sli->params.sgl_pre_registered) {
+ iread->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IR_WQE_XBL;
+
+ iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_DBDE;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low = sge[0].buffer_address_low;
+ bptr->u.blp.high = sge[0].buffer_address_high;
+ } else {
+ iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_XBL;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ (sgl->size & SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low =
+ cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high =
+ cpu_to_le32(upper_32_bits(sgl->phys));
+
+ /*
+ * fill out fcp_cmnd buffer len and change resp buffer to be of
+ * type "skip" (note: response will still be written to sge[1]
+ * if necessary)
+ */
+ len = le32_to_cpu(sge[0].buffer_length);
+ iread->fcp_cmd_buffer_length = cpu_to_le16(len);
+
+ sge_flags = le32_to_cpu(sge[1].dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
+ sge[1].dw2_flags = cpu_to_le32(sge_flags);
+ }
+
+ len = le32_to_cpu(sge[0].buffer_length) +
+ le32_to_cpu(sge[1].buffer_length);
+ iread->payload_offset_length = cpu_to_le16(len);
+ iread->total_transfer_length = cpu_to_le32(xfer_len);
+
+ iread->xri_tag = cpu_to_le16(xri);
+ iread->context_tag = cpu_to_le16(rpi);
+
+ iread->timer = timeout;
+
+ /* WQE word 4 contains read transfer length */
+ iread->class_pu_byte |= 2 << SLI4_IR_WQE_PU_SHFT;
+ iread->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ iread->command = SLI4_WQE_FCP_IREAD64;
+ iread->dif_ct_bs_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_IR_WQE_CT_SHFT;
+ iread->dif_ct_bs_byte |= dif;
+ iread->dif_ct_bs_byte |= bs << SLI4_IR_WQE_BS_SHFT;
+
+ iread->abort_tag = cpu_to_le32(xri);
+
+ iread->request_tag = cpu_to_le16(tag);
+ iread->len_loc1_byte |= SLI4_IR_WQE_LEN_LOC_BIT1;
+ iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_LEN_LOC_BIT2;
+ iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_IOD;
+ iread->cmd_type_byte |= SLI4_CMD_FCP_IREAD64_WQE;
+ iread->cq_id = cpu_to_le16(cq_id);
+
+ if (sli->params.perf_hint) {
+ bptr = &iread->first_data_bde;
+ bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[first_data_sge].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.data.low =
+ sge[first_data_sge].buffer_address_low;
+ bptr->u.data.high =
+ sge[first_data_sge].buffer_address_high;
+ }
+
+ return 0;
+}
+
+int
+sli_fcp_iwrite64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u32 xfer_len,
+ u32 first_burst, u16 xri, u16 tag,
+ u16 cq_id, u32 rpi,
+ u32 rnode_fcid,
+ u8 dif, u8 bs, u8 timeout)
+{
+ struct sli4_fcp_iwrite64_wqe *iwrite = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+ u32 sge_flags, min, len;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+ bptr = &iwrite->bde;
+ if (sli->params.sgl_pre_registered) {
+ iwrite->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IWR_WQE_XBL;
+
+ iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_DBDE;
+ bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length) & SLI4_BDE_LEN_MASK));
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+ } else {
+ iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_XBL;
+
+ bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (sgl->size & SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
+
+ /*
+ * fill out fcp_cmnd buffer len and change resp buffer to be of
+ * type "skip" (note: response will still be written to sge[1]
+ * if necessary)
+ */
+ len = le32_to_cpu(sge[0].buffer_length);
+ iwrite->fcp_cmd_buffer_length = cpu_to_le16(len);
+ sge_flags = le32_to_cpu(sge[1].dw2_flags);
+ sge_flags &= ~SLI4_SGE_TYPE_MASK;
+ sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
+ sge[1].dw2_flags = cpu_to_le32(sge_flags);
+ }
+
+ len = le32_to_cpu(sge[0].buffer_length) +
+ le32_to_cpu(sge[1].buffer_length);
+ iwrite->payload_offset_length = cpu_to_le16(len);
+ iwrite->total_transfer_length = cpu_to_le16(xfer_len);
+ min = (xfer_len < first_burst) ? xfer_len : first_burst;
+ iwrite->initial_transfer_length = cpu_to_le16(min);
+
+ iwrite->xri_tag = cpu_to_le16(xri);
+ iwrite->context_tag = cpu_to_le16(rpi);
+
+ iwrite->timer = timeout;
+ /* WQE word 4 contains read transfer length */
+ iwrite->class_pu_byte |= 2 << SLI4_IWR_WQE_PU_SHFT;
+ iwrite->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ iwrite->command = SLI4_WQE_FCP_IWRITE64;
+ iwrite->dif_ct_bs_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_IWR_WQE_CT_SHFT;
+ iwrite->dif_ct_bs_byte |= dif;
+ iwrite->dif_ct_bs_byte |= bs << SLI4_IWR_WQE_BS_SHFT;
+
+ iwrite->abort_tag = cpu_to_le32(xri);
+
+ iwrite->request_tag = cpu_to_le16(tag);
+ iwrite->len_loc1_byte |= SLI4_IWR_WQE_LEN_LOC_BIT1;
+ iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_LEN_LOC_BIT2;
+ iwrite->cmd_type_byte |= SLI4_CMD_FCP_IWRITE64_WQE;
+ iwrite->cq_id = cpu_to_le16(cq_id);
+
+ if (sli->params.perf_hint) {
+ bptr = &iwrite->first_data_bde;
+
+ bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[first_data_sge].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[first_data_sge].buffer_address_low;
+ bptr->u.data.high = sge[first_data_sge].buffer_address_high;
+ }
+
+ return 0;
+}
+
+int
+sli_fcp_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params)
+{
+ struct sli4_fcp_treceive64_wqe *trecv = buf;
+ struct sli4_fcp_128byte_wqe *trecv_128 = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+ bptr = &trecv->bde;
+ if (sli->params.sgl_pre_registered) {
+ trecv->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_TRCV_WQE_XBL;
+
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length)
+ & SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+
+ trecv->payload_offset_length = sge[0].buffer_length;
+ } else {
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_XBL;
+
+ /* if data is a single physical address, use a BDE */
+ if (!dif &&
+ params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) {
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[2].buffer_length)
+ & SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[2].buffer_address_low;
+ bptr->u.data.high = sge[2].buffer_address_high;
+ } else {
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ (sgl->size & SLI4_BDE_LEN_MASK));
+ bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high =
+ cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+ }
+
+ trecv->relative_offset = cpu_to_le32(params->offset);
+
+ if (params->flags & SLI4_IO_CONTINUATION)
+ trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_XC;
+
+ trecv->xri_tag = cpu_to_le16(params->xri);
+
+ trecv->context_tag = cpu_to_le16(params->rpi);
+
+ /* WQE uses relative offset */
+ trecv->class_ar_pu_byte |= 1 << SLI4_TRCV_WQE_PU_SHFT;
+
+ if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE)
+ trecv->class_ar_pu_byte |= SLI4_TRCV_WQE_AR;
+
+ trecv->command = SLI4_WQE_FCP_TRECEIVE64;
+ trecv->class_ar_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ trecv->dif_ct_bs_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_TRCV_WQE_CT_SHFT;
+ trecv->dif_ct_bs_byte |= bs << SLI4_TRCV_WQE_BS_SHFT;
+
+ trecv->remote_xid = cpu_to_le16(params->ox_id);
+
+ trecv->request_tag = cpu_to_le16(params->tag);
+
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_IOD;
+
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_LEN_LOC_BIT2;
+
+ trecv->cmd_type_byte |= SLI4_CMD_FCP_TRECEIVE64_WQE;
+
+ trecv->cq_id = cpu_to_le16(cq_id);
+
+ trecv->fcp_data_receive_length = cpu_to_le32(params->xmit_len);
+
+ if (sli->params.perf_hint) {
+ bptr = &trecv->first_data_bde;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[first_data_sge].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.data.low = sge[first_data_sge].buffer_address_low;
+ bptr->u.data.high = sge[first_data_sge].buffer_address_high;
+ }
+
+ /* The upper 7 bits of csctl is the priority */
+ if (params->cs_ctl & SLI4_MASK_CCP) {
+ trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_CCPE;
+ trecv->ccp = (params->cs_ctl & SLI4_MASK_CCP);
+ }
+
+ if (params->app_id && sli->wqe_size == SLI4_WQE_EXT_BYTES &&
+ !(trecv->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) {
+ trecv->lloc1_appid |= SLI4_TRCV_WQE_APPID;
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_WQES;
+ trecv_128->dw[31] = params->app_id;
+ }
+ return 0;
+}
+
+int
+sli_fcp_cont_treceive64_wqe(struct sli4 *sli, void *buf,
+ struct efc_dma *sgl, u32 first_data_sge,
+ u16 sec_xri, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params)
+{
+ int rc;
+
+ rc = sli_fcp_treceive64_wqe(sli, buf, sgl, first_data_sge,
+ cq_id, dif, bs, params);
+ if (!rc) {
+ struct sli4_fcp_treceive64_wqe *trecv = buf;
+
+ trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64;
+ trecv->dword5.sec_xri_tag = cpu_to_le16(sec_xri);
+ }
+ return rc;
+}
+
+int
+sli_fcp_trsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u16 cq_id, u8 port_owned, struct sli_fcp_tgt_params *params)
+{
+ struct sli4_fcp_trsp64_wqe *trsp = buf;
+ struct sli4_fcp_128byte_wqe *trsp_128 = buf;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
+ trsp->class_ag_byte |= SLI4_TRSP_WQE_AG;
+ } else {
+ struct sli4_sge *sge = sgl->virt;
+ struct sli4_bde *bptr;
+
+ if (sli4->params.sgl_pre_registered || port_owned)
+ trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_DBDE;
+ else
+ trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_XBL;
+ bptr = &trsp->bde;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+
+ trsp->fcp_response_length = cpu_to_le32(params->xmit_len);
+ }
+
+ if (params->flags & SLI4_IO_CONTINUATION)
+ trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_XC;
+
+ trsp->xri_tag = cpu_to_le16(params->xri);
+ trsp->rpi = cpu_to_le16(params->rpi);
+
+ trsp->command = SLI4_WQE_FCP_TRSP64;
+ trsp->class_ag_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+
+ trsp->remote_xid = cpu_to_le16(params->ox_id);
+ trsp->request_tag = cpu_to_le16(params->tag);
+ if (params->flags & SLI4_IO_DNRX)
+ trsp->ct_dnrx_byte |= SLI4_TRSP_WQE_DNRX;
+ else
+ trsp->ct_dnrx_byte &= ~SLI4_TRSP_WQE_DNRX;
+
+ trsp->lloc1_appid |= 0x1;
+ trsp->cq_id = cpu_to_le16(cq_id);
+ trsp->cmd_type_byte = SLI4_CMD_FCP_TRSP64_WQE;
+
+ /* The upper 7 bits of csctl is the priority */
+ if (params->cs_ctl & SLI4_MASK_CCP) {
+ trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_CCPE;
+ trsp->ccp = (params->cs_ctl & SLI4_MASK_CCP);
+ }
+
+ if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES &&
+ !(trsp->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) {
+ trsp->lloc1_appid |= SLI4_TRSP_WQE_APPID;
+ trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_WQES;
+ trsp_128->dw[31] = params->app_id;
+ }
+ return 0;
+}
+
+int
+sli_fcp_tsend64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params)
+{
+ struct sli4_fcp_tsend64_wqe *tsend = buf;
+ struct sli4_fcp_128byte_wqe *tsend_128 = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+
+ bptr = &tsend->bde;
+ if (sli4->params.sgl_pre_registered) {
+ tsend->ll_qd_xbl_hlm_iod_dbde &= ~SLI4_TSEND_WQE_XBL;
+
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[2].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+
+ /* TSEND64_WQE specifies first two SGE are skipped (3rd is
+ * valid)
+ */
+ bptr->u.data.low = sge[2].buffer_address_low;
+ bptr->u.data.high = sge[2].buffer_address_high;
+ } else {
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_XBL;
+
+ /* if data is a single physical address, use a BDE */
+ if (!dif &&
+ params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) {
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[2].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ /*
+ * TSEND64_WQE specifies first two SGE are skipped
+ * (i.e. 3rd is valid)
+ */
+ bptr->u.data.low =
+ sge[2].buffer_address_low;
+ bptr->u.data.high =
+ sge[2].buffer_address_high;
+ } else {
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ (sgl->size &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.blp.low =
+ cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high =
+ cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+ }
+
+ tsend->relative_offset = cpu_to_le32(params->offset);
+
+ if (params->flags & SLI4_IO_CONTINUATION)
+ tsend->dw10byte2 |= SLI4_TSEND_XC;
+
+ tsend->xri_tag = cpu_to_le16(params->xri);
+
+ tsend->rpi = cpu_to_le16(params->rpi);
+ /* WQE uses relative offset */
+ tsend->class_pu_ar_byte |= 1 << SLI4_TSEND_WQE_PU_SHFT;
+
+ if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE)
+ tsend->class_pu_ar_byte |= SLI4_TSEND_WQE_AR;
+
+ tsend->command = SLI4_WQE_FCP_TSEND64;
+ tsend->class_pu_ar_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ tsend->ct_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_TSEND_CT_SHFT;
+ tsend->ct_byte |= dif;
+ tsend->ct_byte |= bs << SLI4_TSEND_BS_SHFT;
+
+ tsend->remote_xid = cpu_to_le16(params->ox_id);
+
+ tsend->request_tag = cpu_to_le16(params->tag);
+
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_LEN_LOC_BIT2;
+
+ tsend->cq_id = cpu_to_le16(cq_id);
+
+ tsend->cmd_type_byte |= SLI4_CMD_FCP_TSEND64_WQE;
+
+ tsend->fcp_data_transmit_length = cpu_to_le32(params->xmit_len);
+
+ if (sli4->params.perf_hint) {
+ bptr = &tsend->first_data_bde;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[first_data_sge].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.data.low =
+ sge[first_data_sge].buffer_address_low;
+ bptr->u.data.high =
+ sge[first_data_sge].buffer_address_high;
+ }
+
+ /* The upper 7 bits of csctl is the priority */
+ if (params->cs_ctl & SLI4_MASK_CCP) {
+ tsend->dw10byte2 |= SLI4_TSEND_CCPE;
+ tsend->ccp = (params->cs_ctl & SLI4_MASK_CCP);
+ }
+
+ if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES &&
+ !(tsend->dw10byte2 & SLI4_TSEND_EAT)) {
+ tsend->dw10byte0 |= SLI4_TSEND_APPID_VALID;
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQES;
+ tsend_128->dw[31] = params->app_id;
+ }
+ return 0;
+}
+
+int
+sli_gen_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ struct sli_ct_params *params)
+{
+ struct sli4_gen_request64_wqe *gen = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+ bptr = &gen->bde;
+
+ if (sli4->params.sgl_pre_registered) {
+ gen->dw10flags1 &= ~SLI4_GEN_REQ64_WQE_XBL;
+
+ gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_DBDE;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (params->xmit_len & SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+ } else {
+ gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_XBL;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ ((2 * sizeof(struct sli4_sge)) &
+ SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low =
+ cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high =
+ cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+
+ gen->request_payload_length = cpu_to_le32(params->xmit_len);
+ gen->max_response_payload_length = cpu_to_le32(params->rsp_len);
+
+ gen->df_ctl = params->df_ctl;
+ gen->type = params->type;
+ gen->r_ctl = params->r_ctl;
+
+ gen->xri_tag = cpu_to_le16(params->xri);
+
+ gen->ct_byte = SLI4_GENERIC_CONTEXT_RPI << SLI4_GEN_REQ64_CT_SHFT;
+ gen->context_tag = cpu_to_le16(params->rpi);
+
+ gen->class_byte = SLI4_GENERIC_CLASS_CLASS_3;
+
+ gen->command = SLI4_WQE_GEN_REQUEST64;
+
+ gen->timer = params->timeout;
+
+ gen->request_tag = cpu_to_le16(params->tag);
+
+ gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_IOD;
+
+ gen->dw10flags0 |= SLI4_GEN_REQ64_WQE_QOSD;
+
+ gen->cmd_type_byte = SLI4_CMD_GEN_REQUEST64_WQE;
+
+ gen->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+
+ return 0;
+}
+
+int
+sli_send_frame_wqe(struct sli4 *sli, void *buf, u8 sof, u8 eof, u32 *hdr,
+ struct efc_dma *payload, u32 req_len, u8 timeout, u16 xri,
+ u16 req_tag)
+{
+ struct sli4_send_frame_wqe *sf = buf;
+
+ memset(buf, 0, sli->wqe_size);
+
+ sf->dw10flags1 |= SLI4_SF_WQE_DBDE;
+ sf->bde.bde_type_buflen = cpu_to_le32(req_len &
+ SLI4_BDE_LEN_MASK);
+ sf->bde.u.data.low = cpu_to_le32(lower_32_bits(payload->phys));
+ sf->bde.u.data.high = cpu_to_le32(upper_32_bits(payload->phys));
+
+ /* Copy FC header */
+ sf->fc_header_0_1[0] = cpu_to_le32(hdr[0]);
+ sf->fc_header_0_1[1] = cpu_to_le32(hdr[1]);
+ sf->fc_header_2_5[0] = cpu_to_le32(hdr[2]);
+ sf->fc_header_2_5[1] = cpu_to_le32(hdr[3]);
+ sf->fc_header_2_5[2] = cpu_to_le32(hdr[4]);
+ sf->fc_header_2_5[3] = cpu_to_le32(hdr[5]);
+
+ sf->frame_length = cpu_to_le32(req_len);
+
+ sf->xri_tag = cpu_to_le16(xri);
+ sf->dw7flags0 &= ~SLI4_SF_PU;
+ sf->context_tag = 0;
+
+ sf->ct_byte &= ~SLI4_SF_CT;
+ sf->command = SLI4_WQE_SEND_FRAME;
+ sf->dw7flags0 |= SLI4_GENERIC_CLASS_CLASS_3;
+ sf->timer = timeout;
+
+ sf->request_tag = cpu_to_le16(req_tag);
+ sf->eof = eof;
+ sf->sof = sof;
+
+ sf->dw10flags1 &= ~SLI4_SF_QOSD;
+ sf->dw10flags0 |= SLI4_SF_LEN_LOC_BIT1;
+ sf->dw10flags2 &= ~SLI4_SF_XC;
+
+ sf->dw10flags1 |= SLI4_SF_XBL;
+
+ sf->cmd_type_byte |= SLI4_CMD_SEND_FRAME_WQE;
+ sf->cq_id = cpu_to_le16(0xffff);
+
+ return 0;
+}
+
+int
+sli_xmit_bls_rsp64_wqe(struct sli4 *sli, void *buf,
+ struct sli_bls_payload *payload,
+ struct sli_bls_params *params)
+{
+ struct sli4_xmit_bls_rsp_wqe *bls = buf;
+ u32 dw_ridflags = 0;
+
+ /*
+ * Callers can either specify RPI or S_ID, but not both
+ */
+ if (params->rpi_registered && params->s_id != U32_MAX) {
+ efc_log_info(sli, "S_ID specified for attached remote node %d\n",
+ params->rpi);
+ return -EIO;
+ }
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (payload->type == SLI4_SLI_BLS_ACC) {
+ bls->payload_word0 =
+ cpu_to_le32((payload->u.acc.seq_id_last << 16) |
+ (payload->u.acc.seq_id_validity << 24));
+ bls->high_seq_cnt = payload->u.acc.high_seq_cnt;
+ bls->low_seq_cnt = payload->u.acc.low_seq_cnt;
+ } else if (payload->type == SLI4_SLI_BLS_RJT) {
+ bls->payload_word0 =
+ cpu_to_le32(*((u32 *)&payload->u.rjt));
+ dw_ridflags |= SLI4_BLS_RSP_WQE_AR;
+ } else {
+ efc_log_info(sli, "bad BLS type %#x\n", payload->type);
+ return -EIO;
+ }
+
+ bls->ox_id = payload->ox_id;
+ bls->rx_id = payload->rx_id;
+
+ if (params->rpi_registered) {
+ bls->dw8flags0 |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_BLS_RSP_WQE_CT_SHFT;
+ bls->context_tag = cpu_to_le16(params->rpi);
+ } else {
+ bls->dw8flags0 |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_BLS_RSP_WQE_CT_SHFT;
+ bls->context_tag = cpu_to_le16(params->vpi);
+
+ if (params->s_id != U32_MAX)
+ bls->local_n_port_id_dword |=
+ cpu_to_le32(params->s_id & 0x00ffffff);
+ else
+ bls->local_n_port_id_dword |=
+ cpu_to_le32(params->s_id & 0x00ffffff);
+
+ dw_ridflags = (dw_ridflags & ~SLI4_BLS_RSP_RID) |
+ (params->d_id & SLI4_BLS_RSP_RID);
+
+ bls->temporary_rpi = cpu_to_le16(params->rpi);
+ }
+
+ bls->xri_tag = cpu_to_le16(params->xri);
+
+ bls->dw8flags1 |= SLI4_GENERIC_CLASS_CLASS_3;
+
+ bls->command = SLI4_WQE_XMIT_BLS_RSP;
+
+ bls->request_tag = cpu_to_le16(params->tag);
+
+ bls->dw11flags1 |= SLI4_BLS_RSP_WQE_QOSD;
+
+ bls->remote_id_dword = cpu_to_le32(dw_ridflags);
+ bls->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+
+ bls->dw12flags0 |= SLI4_CMD_XMIT_BLS_RSP64_WQE;
+
+ return 0;
+}
+
+int
+sli_xmit_els_rsp64_wqe(struct sli4 *sli, void *buf, struct efc_dma *rsp,
+ struct sli_els_params *params)
+{
+ struct sli4_xmit_els_rsp64_wqe *els = buf;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (sli->params.sgl_pre_registered)
+ els->flags2 |= SLI4_ELS_DBDE;
+ else
+ els->flags2 |= SLI4_ELS_XBL;
+
+ els->els_response_payload.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (params->rsp_len & SLI4_BDE_LEN_MASK));
+ els->els_response_payload.u.data.low =
+ cpu_to_le32(lower_32_bits(rsp->phys));
+ els->els_response_payload.u.data.high =
+ cpu_to_le32(upper_32_bits(rsp->phys));
+
+ els->els_response_payload_length = cpu_to_le32(params->rsp_len);
+
+ els->xri_tag = cpu_to_le16(params->xri);
+
+ els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+
+ els->command = SLI4_WQE_ELS_RSP64;
+
+ els->request_tag = cpu_to_le16(params->tag);
+
+ els->ox_id = cpu_to_le16(params->ox_id);
+
+ els->flags2 |= SLI4_ELS_QOSD;
+
+ els->cmd_type_wqec = SLI4_ELS_REQUEST64_CMD_GEN;
+
+ els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+
+ if (params->rpi_registered) {
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_ELS_CT_OFFSET;
+ els->context_tag = cpu_to_le16(params->rpi);
+ return 0;
+ }
+
+ els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_ELS_CT_OFFSET;
+ els->context_tag = cpu_to_le16(params->vpi);
+ els->rid_dw = cpu_to_le32(params->d_id & SLI4_ELS_RID);
+ els->temporary_rpi = cpu_to_le16(params->rpi);
+ if (params->s_id != U32_MAX) {
+ els->sid_dw |=
+ cpu_to_le32(SLI4_ELS_SP | (params->s_id & SLI4_ELS_SID));
+ }
+
+ return 0;
+}
+
+int
+sli_xmit_sequence64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *payload,
+ struct sli_ct_params *params)
+{
+ struct sli4_xmit_sequence64_wqe *xmit = buf;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ if (!payload || !payload->virt) {
+ efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
+ payload, payload ? payload->virt : NULL);
+ return -EIO;
+ }
+
+ if (sli4->params.sgl_pre_registered)
+ xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_DBDE);
+ else
+ xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_XBL);
+
+ xmit->bde.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (params->rsp_len & SLI4_BDE_LEN_MASK));
+ xmit->bde.u.data.low =
+ cpu_to_le32(lower_32_bits(payload->phys));
+ xmit->bde.u.data.high =
+ cpu_to_le32(upper_32_bits(payload->phys));
+ xmit->sequence_payload_len = cpu_to_le32(params->rsp_len);
+
+ xmit->remote_n_port_id_dword |= cpu_to_le32(params->d_id & 0x00ffffff);
+
+ xmit->relative_offset = 0;
+
+ /* sequence initiative - this matches what is seen from
+ * FC switches in response to FCGS commands
+ */
+ xmit->dw5flags0 &= (~SLI4_SEQ_WQE_SI);
+ xmit->dw5flags0 &= (~SLI4_SEQ_WQE_FT);/* force transmit */
+ xmit->dw5flags0 &= (~SLI4_SEQ_WQE_XO);/* exchange responder */
+ xmit->dw5flags0 |= SLI4_SEQ_WQE_LS;/* last in seqence */
+ xmit->df_ctl = params->df_ctl;
+ xmit->type = params->type;
+ xmit->r_ctl = params->r_ctl;
+
+ xmit->xri_tag = cpu_to_le16(params->xri);
+ xmit->context_tag = cpu_to_le16(params->rpi);
+
+ xmit->dw7flags0 &= ~SLI4_SEQ_WQE_DIF;
+ xmit->dw7flags0 |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_SEQ_WQE_CT_SHIFT;
+ xmit->dw7flags0 &= ~SLI4_SEQ_WQE_BS;
+
+ xmit->command = SLI4_WQE_XMIT_SEQUENCE64;
+ xmit->dw7flags1 |= SLI4_GENERIC_CLASS_CLASS_3;
+ xmit->dw7flags1 &= ~SLI4_SEQ_WQE_PU;
+ xmit->timer = params->timeout;
+
+ xmit->abort_tag = 0;
+ xmit->request_tag = cpu_to_le16(params->tag);
+ xmit->remote_xid = cpu_to_le16(params->ox_id);
+
+ xmit->dw10w0 |=
+ cpu_to_le16(SLI4_ELS_REQUEST64_DIR_READ << SLI4_SEQ_WQE_IOD_SHIFT);
+
+ xmit->cmd_type_wqec_byte |= SLI4_CMD_XMIT_SEQUENCE64_WQE;
+
+ xmit->dw10w0 |= cpu_to_le16(2 << SLI4_SEQ_WQE_LEN_LOC_SHIFT);
+
+ xmit->cq_id = cpu_to_le16(0xFFFF);
+
+ return 0;
+}
+
+int
+sli_requeue_xri_wqe(struct sli4 *sli4, void *buf, u16 xri, u16 tag, u16 cq_id)
+{
+ struct sli4_requeue_xri_wqe *requeue = buf;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ requeue->command = SLI4_WQE_REQUEUE_XRI;
+ requeue->xri_tag = cpu_to_le16(xri);
+ requeue->request_tag = cpu_to_le16(tag);
+ requeue->flags2 |= cpu_to_le16(SLI4_REQU_XRI_WQE_XC);
+ requeue->flags1 |= cpu_to_le16(SLI4_REQU_XRI_WQE_QOSD);
+ requeue->cq_id = cpu_to_le16(cq_id);
+ requeue->cmd_type_wqec_byte = SLI4_CMD_REQUEUE_XRI_WQE;
+ return 0;
+}
+
+int
+sli_fc_process_link_attention(struct sli4 *sli4, void *acqe)
+{
+ struct sli4_link_attention *link_attn = acqe;
+ struct sli4_link_event event = { 0 };
+
+ efc_log_info(sli4, "link=%d attn_type=%#x top=%#x speed=%#x pfault=%#x\n",
+ link_attn->link_number, link_attn->attn_type,
+ link_attn->topology, link_attn->port_speed,
+ link_attn->port_fault);
+ efc_log_info(sli4, "shared_lnk_status=%#x logl_lnk_speed=%#x evttag=%#x\n",
+ link_attn->shared_link_status,
+ le16_to_cpu(link_attn->logical_link_speed),
+ le32_to_cpu(link_attn->event_tag));
+
+ if (!sli4->link)
+ return -EIO;
+
+ event.medium = SLI4_LINK_MEDIUM_FC;
+
+ switch (link_attn->attn_type) {
+ case SLI4_LNK_ATTN_TYPE_LINK_UP:
+ event.status = SLI4_LINK_STATUS_UP;
+ break;
+ case SLI4_LNK_ATTN_TYPE_LINK_DOWN:
+ event.status = SLI4_LINK_STATUS_DOWN;
+ break;
+ case SLI4_LNK_ATTN_TYPE_NO_HARD_ALPA:
+ efc_log_info(sli4, "attn_type: no hard alpa\n");
+ event.status = SLI4_LINK_STATUS_NO_ALPA;
+ break;
+ default:
+ efc_log_info(sli4, "attn_type: unknown\n");
+ break;
+ }
+
+ switch (link_attn->event_type) {
+ case SLI4_EVENT_LINK_ATTENTION:
+ break;
+ case SLI4_EVENT_SHARED_LINK_ATTENTION:
+ efc_log_info(sli4, "event_type: FC shared link event\n");
+ break;
+ default:
+ efc_log_info(sli4, "event_type: unknown\n");
+ break;
+ }
+
+ switch (link_attn->topology) {
+ case SLI4_LNK_ATTN_P2P:
+ event.topology = SLI4_LINK_TOPO_NON_FC_AL;
+ break;
+ case SLI4_LNK_ATTN_FC_AL:
+ event.topology = SLI4_LINK_TOPO_FC_AL;
+ break;
+ case SLI4_LNK_ATTN_INTERNAL_LOOPBACK:
+ efc_log_info(sli4, "topology Internal loopback\n");
+ event.topology = SLI4_LINK_TOPO_LOOPBACK_INTERNAL;
+ break;
+ case SLI4_LNK_ATTN_SERDES_LOOPBACK:
+ efc_log_info(sli4, "topology serdes loopback\n");
+ event.topology = SLI4_LINK_TOPO_LOOPBACK_EXTERNAL;
+ break;
+ default:
+ efc_log_info(sli4, "topology: unknown\n");
+ break;
+ }
+
+ event.speed = link_attn->port_speed * 1000;
+
+ sli4->link(sli4->link_arg, (void *)&event);
+
+ return 0;
+}
+
+int
+sli_fc_cqe_parse(struct sli4 *sli4, struct sli4_queue *cq,
+ u8 *cqe, enum sli4_qentry *etype, u16 *r_id)
+{
+ u8 code = cqe[SLI4_CQE_CODE_OFFSET];
+ int rc;
+
+ switch (code) {
+ case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION:
+ {
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_WQ;
+ *r_id = le16_to_cpu(wcqe->request_tag);
+ rc = wcqe->status;
+
+ /* Flag errors except for FCP_RSP_FAILURE */
+ if (rc && rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE) {
+ efc_log_info(sli4, "WCQE: status=%#x hw_status=%#x tag=%#x\n",
+ wcqe->status, wcqe->hw_status,
+ le16_to_cpu(wcqe->request_tag));
+ efc_log_info(sli4, "w1=%#x w2=%#x xb=%d\n",
+ le32_to_cpu(wcqe->wqe_specific_1),
+ le32_to_cpu(wcqe->wqe_specific_2),
+ (wcqe->flags & SLI4_WCQE_XB));
+ efc_log_info(sli4, " %08X %08X %08X %08X\n",
+ ((u32 *)cqe)[0], ((u32 *)cqe)[1],
+ ((u32 *)cqe)[2], ((u32 *)cqe)[3]);
+ }
+
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_ASYNC:
+ {
+ struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_RQ;
+ *r_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID;
+ rc = rcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_ASYNC_V1:
+ {
+ struct sli4_fc_async_rcqe_v1 *rcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_RQ;
+ *r_id = le16_to_cpu(rcqe->rq_id);
+ rc = rcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD:
+ {
+ struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_OPT_WRITE_CMD;
+ *r_id = le16_to_cpu(optcqe->rq_id);
+ rc = optcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA:
+ {
+ struct sli4_fc_optimized_write_data_cqe *dcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_OPT_WRITE_DATA;
+ *r_id = le16_to_cpu(dcqe->xri);
+ rc = dcqe->status;
+
+ /* Flag errors */
+ if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) {
+ efc_log_info(sli4, "Optimized DATA CQE: status=%#x\n",
+ dcqe->status);
+ efc_log_info(sli4, "hstat=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n",
+ dcqe->hw_status, le16_to_cpu(dcqe->xri),
+ le32_to_cpu(dcqe->total_data_placed),
+ ((u32 *)cqe)[3],
+ (dcqe->flags & SLI4_OCQE_XB));
+ }
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_COALESCING:
+ {
+ struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_RQ;
+ *r_id = le16_to_cpu(rcqe->rq_id);
+ rc = rcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_XRI_ABORTED:
+ {
+ struct sli4_fc_xri_aborted_cqe *xa = (void *)cqe;
+
+ *etype = SLI4_QENTRY_XABT;
+ *r_id = le16_to_cpu(xa->xri);
+ rc = 0;
+ break;
+ }
+ case SLI4_CQE_CODE_RELEASE_WQE:
+ {
+ struct sli4_fc_wqec *wqec = (void *)cqe;
+
+ *etype = SLI4_QENTRY_WQ_RELEASE;
+ *r_id = le16_to_cpu(wqec->wq_id);
+ rc = 0;
+ break;
+ }
+ default:
+ efc_log_info(sli4, "CQE completion code %d not handled\n",
+ code);
+ *etype = SLI4_QENTRY_MAX;
+ *r_id = U16_MAX;
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+u32
+sli_fc_response_length(struct sli4 *sli4, u8 *cqe)
+{
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+
+ return le32_to_cpu(wcqe->wqe_specific_1);
+}
+
+u32
+sli_fc_io_length(struct sli4 *sli4, u8 *cqe)
+{
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+
+ return le32_to_cpu(wcqe->wqe_specific_1);
+}
+
+int
+sli_fc_els_did(struct sli4 *sli4, u8 *cqe, u32 *d_id)
+{
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+
+ *d_id = 0;
+
+ if (wcqe->status)
+ return -EIO;
+ *d_id = le32_to_cpu(wcqe->wqe_specific_2) & 0x00ffffff;
+ return 0;
+}
+
+u32
+sli_fc_ext_status(struct sli4 *sli4, u8 *cqe)
+{
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+ u32 mask;
+
+ switch (wcqe->status) {
+ case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
+ mask = U32_MAX;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ case SLI4_FC_WCQE_STATUS_CMD_REJECT:
+ mask = 0xff;
+ break;
+ case SLI4_FC_WCQE_STATUS_NPORT_RJT:
+ case SLI4_FC_WCQE_STATUS_FABRIC_RJT:
+ case SLI4_FC_WCQE_STATUS_NPORT_BSY:
+ case SLI4_FC_WCQE_STATUS_FABRIC_BSY:
+ case SLI4_FC_WCQE_STATUS_LS_RJT:
+ mask = U32_MAX;
+ break;
+ case SLI4_FC_WCQE_STATUS_DI_ERROR:
+ mask = U32_MAX;
+ break;
+ default:
+ mask = 0;
+ }
+
+ return le32_to_cpu(wcqe->wqe_specific_2) & mask;
+}
+
+int
+sli_fc_rqe_rqid_and_index(struct sli4 *sli4, u8 *cqe, u16 *rq_id, u32 *index)
+{
+ int rc = -EIO;
+ u8 code = 0;
+ u16 rq_element_index;
+
+ *rq_id = 0;
+ *index = U32_MAX;
+
+ code = cqe[SLI4_CQE_CODE_OFFSET];
+
+ /* Retrieve the RQ index from the completion */
+ if (code == SLI4_CQE_CODE_RQ_ASYNC) {
+ struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
+
+ *rq_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID;
+ rq_element_index =
+ le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX;
+ *index = rq_element_index;
+ if (rcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
+ rc = 0;
+ } else {
+ rc = rcqe->status;
+ efc_log_info(sli4, "status=%02x (%s) rq_id=%d\n",
+ rcqe->status,
+ sli_fc_get_status_string(rcqe->status),
+ le16_to_cpu(rcqe->fcfi_rq_id_word) &
+ SLI4_RACQE_RQ_ID);
+
+ efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
+ le16_to_cpu(rcqe->data_placement_length),
+ rcqe->sof_byte, rcqe->eof_byte,
+ rcqe->hdpl_byte & SLI4_RACQE_HDPL);
+ }
+ } else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) {
+ struct sli4_fc_async_rcqe_v1 *rcqe_v1 = (void *)cqe;
+
+ *rq_id = le16_to_cpu(rcqe_v1->rq_id);
+ rq_element_index =
+ (le16_to_cpu(rcqe_v1->rq_elmt_indx_word) &
+ SLI4_RACQE_RQ_EL_INDX);
+ *index = rq_element_index;
+ if (rcqe_v1->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
+ rc = 0;
+ } else {
+ rc = rcqe_v1->status;
+ efc_log_info(sli4, "status=%02x (%s) rq_id=%d, index=%x\n",
+ rcqe_v1->status,
+ sli_fc_get_status_string(rcqe_v1->status),
+ le16_to_cpu(rcqe_v1->rq_id), rq_element_index);
+
+ efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
+ le16_to_cpu(rcqe_v1->data_placement_length),
+ rcqe_v1->sof_byte, rcqe_v1->eof_byte,
+ rcqe_v1->hdpl_byte & SLI4_RACQE_HDPL);
+ }
+ } else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) {
+ struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe;
+
+ *rq_id = le16_to_cpu(optcqe->rq_id);
+ *index = le16_to_cpu(optcqe->w1) & SLI4_OCQE_RQ_EL_INDX;
+ if (optcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
+ rc = 0;
+ } else {
+ rc = optcqe->status;
+ efc_log_info(sli4, "stat=%02x (%s) rqid=%d, idx=%x pdpl=%x\n",
+ optcqe->status,
+ sli_fc_get_status_string(optcqe->status),
+ le16_to_cpu(optcqe->rq_id), *index,
+ le16_to_cpu(optcqe->data_placement_length));
+
+ efc_log_info(sli4, "hdpl=%x oox=%d agxr=%d xri=0x%x rpi=%x\n",
+ (optcqe->hdpl_vld & SLI4_OCQE_HDPL),
+ (optcqe->flags1 & SLI4_OCQE_OOX),
+ (optcqe->flags1 & SLI4_OCQE_AGXR),
+ optcqe->xri, le16_to_cpu(optcqe->rpi));
+ }
+ } else if (code == SLI4_CQE_CODE_RQ_COALESCING) {
+ struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe;
+
+ rq_element_index = (le16_to_cpu(rcqe->rq_elmt_indx_word) &
+ SLI4_RCQE_RQ_EL_INDX);
+
+ *rq_id = le16_to_cpu(rcqe->rq_id);
+ if (rcqe->status == SLI4_FC_COALESCE_RQ_SUCCESS) {
+ *index = rq_element_index;
+ rc = 0;
+ } else {
+ *index = U32_MAX;
+ rc = rcqe->status;
+
+ efc_log_info(sli4, "stat=%02x (%s) rq_id=%d, idx=%x\n",
+ rcqe->status,
+ sli_fc_get_status_string(rcqe->status),
+ le16_to_cpu(rcqe->rq_id), rq_element_index);
+ efc_log_info(sli4, "rq_id=%#x sdpl=%x\n",
+ le16_to_cpu(rcqe->rq_id),
+ le16_to_cpu(rcqe->seq_placement_length));
+ }
+ } else {
+ struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
+
+ *index = U32_MAX;
+ rc = rcqe->status;
+
+ efc_log_info(sli4, "status=%02x rq_id=%d, index=%x pdpl=%x\n",
+ rcqe->status,
+ le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID,
+ (le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX),
+ le16_to_cpu(rcqe->data_placement_length));
+ efc_log_info(sli4, "sof=%02x eof=%02x hdpl=%x\n",
+ rcqe->sof_byte, rcqe->eof_byte,
+ rcqe->hdpl_byte & SLI4_RACQE_HDPL);
+ }
+
+ return rc;
+}
+
+static int
+sli_bmbx_wait(struct sli4 *sli4, u32 msec)
+{
+ u32 val;
+ unsigned long end;
+
+ /* Wait for the bootstrap mailbox to report "ready" */
+ end = jiffies + msecs_to_jiffies(msec);
+ do {
+ val = readl(sli4->reg[0] + SLI4_BMBX_REG);
+ if (val & SLI4_BMBX_RDY)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, end));
+
+ return -EIO;
+}
+
+static int
+sli_bmbx_write(struct sli4 *sli4)
+{
+ u32 val;
+
+ /* write buffer location to bootstrap mailbox register */
+ val = sli_bmbx_write_hi(sli4->bmbx.phys);
+ writel(val, (sli4->reg[0] + SLI4_BMBX_REG));
+
+ if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
+ efc_log_crit(sli4, "BMBX WRITE_HI failed\n");
+ return -EIO;
+ }
+ val = sli_bmbx_write_lo(sli4->bmbx.phys);
+ writel(val, (sli4->reg[0] + SLI4_BMBX_REG));
+
+ /* wait for SLI Port to set ready bit */
+ return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC);
+}
+
+int
+sli_bmbx_command(struct sli4 *sli4)
+{
+ void *cqe = (u8 *)sli4->bmbx.virt + SLI4_BMBX_SIZE;
+
+ if (sli_fw_error_status(sli4) > 0) {
+ efc_log_crit(sli4, "Chip is in an error state -Mailbox command rejected");
+ efc_log_crit(sli4, " status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read_status(sli4),
+ sli_reg_read_err1(sli4),
+ sli_reg_read_err2(sli4));
+ return -EIO;
+ }
+
+ /* Submit a command to the bootstrap mailbox and check the status */
+ if (sli_bmbx_write(sli4)) {
+ efc_log_crit(sli4, "bmbx write fail phys=%pad reg=%#x\n",
+ &sli4->bmbx.phys, readl(sli4->reg[0] + SLI4_BMBX_REG));
+ return -EIO;
+ }
+
+ /* check completion queue entry status */
+ if (le32_to_cpu(((struct sli4_mcqe *)cqe)->dw3_flags) &
+ SLI4_MCQE_VALID) {
+ return sli_cqe_mq(sli4, cqe);
+ }
+ efc_log_crit(sli4, "invalid or wrong type\n");
+ return -EIO;
+}
+
+int
+sli_cmd_config_link(struct sli4 *sli4, void *buf)
+{
+ struct sli4_cmd_config_link *config_link = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ config_link->hdr.command = SLI4_MBX_CMD_CONFIG_LINK;
+
+ /* Port interprets zero in a field as "use default value" */
+
+ return 0;
+}
+
+int
+sli_cmd_down_link(struct sli4 *sli4, void *buf)
+{
+ struct sli4_mbox_command_header *hdr = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ hdr->command = SLI4_MBX_CMD_DOWN_LINK;
+
+ /* Port interprets zero in a field as "use default value" */
+
+ return 0;
+}
+
+int
+sli_cmd_dump_type4(struct sli4 *sli4, void *buf, u16 wki)
+{
+ struct sli4_cmd_dump4 *cmd = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ cmd->hdr.command = SLI4_MBX_CMD_DUMP;
+ cmd->type_dword = cpu_to_le32(0x4);
+ cmd->wki_selection = cpu_to_le16(wki);
+ return 0;
+}
+
+int
+sli_cmd_common_read_transceiver_data(struct sli4 *sli4, void *buf, u32 page_num,
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_cmn_read_transceiver_data *req = NULL;
+ u32 psize;
+
+ if (!dma)
+ psize = SLI4_CFG_PYLD_LENGTH(cmn_read_transceiver_data);
+ else
+ psize = dma->size;
+
+ req = sli_config_cmd_init(sli4, buf, psize, dma);
+ if (!req)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_READ_TRANS_DATA,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_read_transceiver_data));
+
+ req->page_number = cpu_to_le32(page_num);
+ req->port = cpu_to_le32(sli4->port_number);
+
+ return 0;
+}
+
+int
+sli_cmd_read_link_stats(struct sli4 *sli4, void *buf, u8 req_ext_counters,
+ u8 clear_overflow_flags,
+ u8 clear_all_counters)
+{
+ struct sli4_cmd_read_link_stats *cmd = buf;
+ u32 flags;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ cmd->hdr.command = SLI4_MBX_CMD_READ_LNK_STAT;
+
+ flags = 0;
+ if (req_ext_counters)
+ flags |= SLI4_READ_LNKSTAT_REC;
+ if (clear_all_counters)
+ flags |= SLI4_READ_LNKSTAT_CLRC;
+ if (clear_overflow_flags)
+ flags |= SLI4_READ_LNKSTAT_CLOF;
+
+ cmd->dw1_flags = cpu_to_le32(flags);
+ return 0;
+}
+
+int
+sli_cmd_read_status(struct sli4 *sli4, void *buf, u8 clear_counters)
+{
+ struct sli4_cmd_read_status *cmd = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ cmd->hdr.command = SLI4_MBX_CMD_READ_STATUS;
+ if (clear_counters)
+ flags |= SLI4_READSTATUS_CLEAR_COUNTERS;
+ else
+ flags &= ~SLI4_READSTATUS_CLEAR_COUNTERS;
+
+ cmd->dw1_flags = cpu_to_le32(flags);
+ return 0;
+}
+
+int
+sli_cmd_init_link(struct sli4 *sli4, void *buf, u32 speed, u8 reset_alpa)
+{
+ struct sli4_cmd_init_link *init_link = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ init_link->hdr.command = SLI4_MBX_CMD_INIT_LINK;
+
+ init_link->sel_reset_al_pa_dword =
+ cpu_to_le32(reset_alpa);
+ flags &= ~SLI4_INIT_LINK_F_LOOPBACK;
+
+ init_link->link_speed_sel_code = cpu_to_le32(speed);
+ switch (speed) {
+ case SLI4_LINK_SPEED_1G:
+ case SLI4_LINK_SPEED_2G:
+ case SLI4_LINK_SPEED_4G:
+ case SLI4_LINK_SPEED_8G:
+ case SLI4_LINK_SPEED_16G:
+ case SLI4_LINK_SPEED_32G:
+ case SLI4_LINK_SPEED_64G:
+ flags |= SLI4_INIT_LINK_F_FIXED_SPEED;
+ break;
+ case SLI4_LINK_SPEED_10G:
+ efc_log_info(sli4, "unsupported FC speed %d\n", speed);
+ init_link->flags0 = cpu_to_le32(flags);
+ return -EIO;
+ }
+
+ switch (sli4->topology) {
+ case SLI4_READ_CFG_TOPO_FC:
+ /* Attempt P2P but failover to FC-AL */
+ flags |= SLI4_INIT_LINK_F_FAIL_OVER;
+ flags |= SLI4_INIT_LINK_F_P2P_FAIL_OVER;
+ break;
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ flags |= SLI4_INIT_LINK_F_FCAL_ONLY;
+ if (speed == SLI4_LINK_SPEED_16G ||
+ speed == SLI4_LINK_SPEED_32G) {
+ efc_log_info(sli4, "unsupported FC-AL speed %d\n",
+ speed);
+ init_link->flags0 = cpu_to_le32(flags);
+ return -EIO;
+ }
+ break;
+ case SLI4_READ_CFG_TOPO_NON_FC_AL:
+ flags |= SLI4_INIT_LINK_F_P2P_ONLY;
+ break;
+ default:
+
+ efc_log_info(sli4, "unsupported topology %#x\n", sli4->topology);
+
+ init_link->flags0 = cpu_to_le32(flags);
+ return -EIO;
+ }
+
+ flags &= ~SLI4_INIT_LINK_F_UNFAIR;
+ flags &= ~SLI4_INIT_LINK_F_NO_LIRP;
+ flags &= ~SLI4_INIT_LINK_F_LOOP_VALID_CHK;
+ flags &= ~SLI4_INIT_LINK_F_NO_LISA;
+ flags &= ~SLI4_INIT_LINK_F_PICK_HI_ALPA;
+ init_link->flags0 = cpu_to_le32(flags);
+
+ return 0;
+}
+
+int
+sli_cmd_init_vfi(struct sli4 *sli4, void *buf, u16 vfi, u16 fcfi, u16 vpi)
+{
+ struct sli4_cmd_init_vfi *init_vfi = buf;
+ u16 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ init_vfi->hdr.command = SLI4_MBX_CMD_INIT_VFI;
+ init_vfi->vfi = cpu_to_le16(vfi);
+ init_vfi->fcfi = cpu_to_le16(fcfi);
+
+ /*
+ * If the VPI is valid, initialize it at the same time as
+ * the VFI
+ */
+ if (vpi != U16_MAX) {
+ flags |= SLI4_INIT_VFI_FLAG_VP;
+ init_vfi->flags0_word = cpu_to_le16(flags);
+ init_vfi->vpi = cpu_to_le16(vpi);
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_init_vpi(struct sli4 *sli4, void *buf, u16 vpi, u16 vfi)
+{
+ struct sli4_cmd_init_vpi *init_vpi = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ init_vpi->hdr.command = SLI4_MBX_CMD_INIT_VPI;
+ init_vpi->vpi = cpu_to_le16(vpi);
+ init_vpi->vfi = cpu_to_le16(vfi);
+
+ return 0;
+}
+
+int
+sli_cmd_post_xri(struct sli4 *sli4, void *buf, u16 xri_base, u16 xri_count)
+{
+ struct sli4_cmd_post_xri *post_xri = buf;
+ u16 xri_count_flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ post_xri->hdr.command = SLI4_MBX_CMD_POST_XRI;
+ post_xri->xri_base = cpu_to_le16(xri_base);
+ xri_count_flags = xri_count & SLI4_POST_XRI_COUNT;
+ xri_count_flags |= SLI4_POST_XRI_FLAG_ENX;
+ xri_count_flags |= SLI4_POST_XRI_FLAG_VAL;
+ post_xri->xri_count_flags = cpu_to_le16(xri_count_flags);
+
+ return 0;
+}
+
+int
+sli_cmd_release_xri(struct sli4 *sli4, void *buf, u8 num_xri)
+{
+ struct sli4_cmd_release_xri *release_xri = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ release_xri->hdr.command = SLI4_MBX_CMD_RELEASE_XRI;
+ release_xri->xri_count_word = cpu_to_le16(num_xri &
+ SLI4_RELEASE_XRI_COUNT);
+
+ return 0;
+}
+
+static int
+sli_cmd_read_config(struct sli4 *sli4, void *buf)
+{
+ struct sli4_cmd_read_config *read_config = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_config->hdr.command = SLI4_MBX_CMD_READ_CONFIG;
+
+ return 0;
+}
+
+int
+sli_cmd_read_nvparms(struct sli4 *sli4, void *buf)
+{
+ struct sli4_cmd_read_nvparms *read_nvparms = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_nvparms->hdr.command = SLI4_MBX_CMD_READ_NVPARMS;
+
+ return 0;
+}
+
+int
+sli_cmd_write_nvparms(struct sli4 *sli4, void *buf, u8 *wwpn, u8 *wwnn,
+ u8 hard_alpa, u32 preferred_d_id)
+{
+ struct sli4_cmd_write_nvparms *write_nvparms = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ write_nvparms->hdr.command = SLI4_MBX_CMD_WRITE_NVPARMS;
+ memcpy(write_nvparms->wwpn, wwpn, 8);
+ memcpy(write_nvparms->wwnn, wwnn, 8);
+
+ write_nvparms->hard_alpa_d_id =
+ cpu_to_le32((preferred_d_id << 8) | hard_alpa);
+ return 0;
+}
+
+static int
+sli_cmd_read_rev(struct sli4 *sli4, void *buf, struct efc_dma *vpd)
+{
+ struct sli4_cmd_read_rev *read_rev = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_rev->hdr.command = SLI4_MBX_CMD_READ_REV;
+
+ if (vpd && vpd->size) {
+ read_rev->flags0_word |= cpu_to_le16(SLI4_READ_REV_FLAG_VPD);
+
+ read_rev->available_length_dword =
+ cpu_to_le32(vpd->size &
+ SLI4_READ_REV_AVAILABLE_LENGTH);
+
+ read_rev->hostbuf.low =
+ cpu_to_le32(lower_32_bits(vpd->phys));
+ read_rev->hostbuf.high =
+ cpu_to_le32(upper_32_bits(vpd->phys));
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_read_sparm64(struct sli4 *sli4, void *buf, struct efc_dma *dma, u16 vpi)
+{
+ struct sli4_cmd_read_sparm64 *read_sparm64 = buf;
+
+ if (vpi == U16_MAX) {
+ efc_log_err(sli4, "special VPI not supported!!!\n");
+ return -EIO;
+ }
+
+ if (!dma || !dma->phys) {
+ efc_log_err(sli4, "bad DMA buffer\n");
+ return -EIO;
+ }
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_sparm64->hdr.command = SLI4_MBX_CMD_READ_SPARM64;
+
+ read_sparm64->bde_64.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (dma->size & SLI4_BDE_LEN_MASK));
+ read_sparm64->bde_64.u.data.low =
+ cpu_to_le32(lower_32_bits(dma->phys));
+ read_sparm64->bde_64.u.data.high =
+ cpu_to_le32(upper_32_bits(dma->phys));
+
+ read_sparm64->vpi = cpu_to_le16(vpi);
+
+ return 0;
+}
+
+int
+sli_cmd_read_topology(struct sli4 *sli4, void *buf, struct efc_dma *dma)
+{
+ struct sli4_cmd_read_topology *read_topo = buf;
+
+ if (!dma || !dma->size)
+ return -EIO;
+
+ if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) {
+ efc_log_err(sli4, "loop map buffer too small %zx\n", dma->size);
+ return -EIO;
+ }
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_topo->hdr.command = SLI4_MBX_CMD_READ_TOPOLOGY;
+
+ memset(dma->virt, 0, dma->size);
+
+ read_topo->bde_loop_map.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (dma->size & SLI4_BDE_LEN_MASK));
+ read_topo->bde_loop_map.u.data.low =
+ cpu_to_le32(lower_32_bits(dma->phys));
+ read_topo->bde_loop_map.u.data.high =
+ cpu_to_le32(upper_32_bits(dma->phys));
+
+ return 0;
+}
+
+int
+sli_cmd_reg_fcfi(struct sli4 *sli4, void *buf, u16 index,
+ struct sli4_cmd_rq_cfg *rq_cfg)
+{
+ struct sli4_cmd_reg_fcfi *reg_fcfi = buf;
+ u32 i;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_fcfi->hdr.command = SLI4_MBX_CMD_REG_FCFI;
+
+ reg_fcfi->fcf_index = cpu_to_le16(index);
+
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ switch (i) {
+ case 0:
+ reg_fcfi->rqid0 = rq_cfg[0].rq_id;
+ break;
+ case 1:
+ reg_fcfi->rqid1 = rq_cfg[1].rq_id;
+ break;
+ case 2:
+ reg_fcfi->rqid2 = rq_cfg[2].rq_id;
+ break;
+ case 3:
+ reg_fcfi->rqid3 = rq_cfg[3].rq_id;
+ break;
+ }
+ reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
+ reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
+ reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
+ reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match;
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_reg_fcfi_mrq(struct sli4 *sli4, void *buf, u8 mode, u16 fcf_index,
+ u8 rq_selection_policy, u8 mrq_bit_mask, u16 num_mrqs,
+ struct sli4_cmd_rq_cfg *rq_cfg)
+{
+ struct sli4_cmd_reg_fcfi_mrq *reg_fcfi_mrq = buf;
+ u32 i;
+ u32 mrq_flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_fcfi_mrq->hdr.command = SLI4_MBX_CMD_REG_FCFI_MRQ;
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
+ reg_fcfi_mrq->fcf_index = cpu_to_le16(fcf_index);
+ goto done;
+ }
+
+ reg_fcfi_mrq->dw8_vlan = cpu_to_le32(SLI4_REGFCFI_MRQ_MODE);
+
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
+ reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
+ reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
+ reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match;
+
+ switch (i) {
+ case 3:
+ reg_fcfi_mrq->rqid3 = rq_cfg[i].rq_id;
+ break;
+ case 2:
+ reg_fcfi_mrq->rqid2 = rq_cfg[i].rq_id;
+ break;
+ case 1:
+ reg_fcfi_mrq->rqid1 = rq_cfg[i].rq_id;
+ break;
+ case 0:
+ reg_fcfi_mrq->rqid0 = rq_cfg[i].rq_id;
+ break;
+ }
+ }
+
+ mrq_flags = num_mrqs & SLI4_REGFCFI_MRQ_MASK_NUM_PAIRS;
+ mrq_flags |= (mrq_bit_mask << 8);
+ mrq_flags |= (rq_selection_policy << 12);
+ reg_fcfi_mrq->dw9_mrqflags = cpu_to_le32(mrq_flags);
+done:
+ return 0;
+}
+
+int
+sli_cmd_reg_rpi(struct sli4 *sli4, void *buf, u32 rpi, u32 vpi, u32 fc_id,
+ struct efc_dma *dma, u8 update, u8 enable_t10_pi)
+{
+ struct sli4_cmd_reg_rpi *reg_rpi = buf;
+ u32 rportid_flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_rpi->hdr.command = SLI4_MBX_CMD_REG_RPI;
+
+ reg_rpi->rpi = cpu_to_le16(rpi);
+
+ rportid_flags = fc_id & SLI4_REGRPI_REMOTE_N_PORTID;
+
+ if (update)
+ rportid_flags |= SLI4_REGRPI_UPD;
+ else
+ rportid_flags &= ~SLI4_REGRPI_UPD;
+
+ if (enable_t10_pi)
+ rportid_flags |= SLI4_REGRPI_ETOW;
+ else
+ rportid_flags &= ~SLI4_REGRPI_ETOW;
+
+ reg_rpi->dw2_rportid_flags = cpu_to_le32(rportid_flags);
+
+ reg_rpi->bde_64.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK));
+ reg_rpi->bde_64.u.data.low =
+ cpu_to_le32(lower_32_bits(dma->phys));
+ reg_rpi->bde_64.u.data.high =
+ cpu_to_le32(upper_32_bits(dma->phys));
+
+ reg_rpi->vpi = cpu_to_le16(vpi);
+
+ return 0;
+}
+
+int
+sli_cmd_reg_vfi(struct sli4 *sli4, void *buf, size_t size,
+ u16 vfi, u16 fcfi, struct efc_dma dma,
+ u16 vpi, __be64 sli_wwpn, u32 fc_id)
+{
+ struct sli4_cmd_reg_vfi *reg_vfi = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_vfi->hdr.command = SLI4_MBX_CMD_REG_VFI;
+
+ reg_vfi->vfi = cpu_to_le16(vfi);
+
+ reg_vfi->fcfi = cpu_to_le16(fcfi);
+
+ reg_vfi->sparm.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK));
+ reg_vfi->sparm.u.data.low =
+ cpu_to_le32(lower_32_bits(dma.phys));
+ reg_vfi->sparm.u.data.high =
+ cpu_to_le32(upper_32_bits(dma.phys));
+
+ reg_vfi->e_d_tov = cpu_to_le32(sli4->e_d_tov);
+ reg_vfi->r_a_tov = cpu_to_le32(sli4->r_a_tov);
+
+ reg_vfi->dw0w1_flags |= cpu_to_le16(SLI4_REGVFI_VP);
+ reg_vfi->vpi = cpu_to_le16(vpi);
+ memcpy(reg_vfi->wwpn, &sli_wwpn, sizeof(reg_vfi->wwpn));
+ reg_vfi->dw10_lportid_flags = cpu_to_le32(fc_id);
+
+ return 0;
+}
+
+int
+sli_cmd_reg_vpi(struct sli4 *sli4, void *buf, u32 fc_id, __be64 sli_wwpn,
+ u16 vpi, u16 vfi, bool update)
+{
+ struct sli4_cmd_reg_vpi *reg_vpi = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_vpi->hdr.command = SLI4_MBX_CMD_REG_VPI;
+
+ flags = (fc_id & SLI4_REGVPI_LOCAL_N_PORTID);
+ if (update)
+ flags |= SLI4_REGVPI_UPD;
+ else
+ flags &= ~SLI4_REGVPI_UPD;
+
+ reg_vpi->dw2_lportid_flags = cpu_to_le32(flags);
+ memcpy(reg_vpi->wwpn, &sli_wwpn, sizeof(reg_vpi->wwpn));
+ reg_vpi->vpi = cpu_to_le16(vpi);
+ reg_vpi->vfi = cpu_to_le16(vfi);
+
+ return 0;
+}
+
+static int
+sli_cmd_request_features(struct sli4 *sli4, void *buf, u32 features_mask,
+ bool query)
+{
+ struct sli4_cmd_request_features *req_features = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ req_features->hdr.command = SLI4_MBX_CMD_RQST_FEATURES;
+
+ if (query)
+ req_features->dw1_qry = cpu_to_le32(SLI4_REQFEAT_QRY);
+
+ req_features->cmd = cpu_to_le32(features_mask);
+
+ return 0;
+}
+
+int
+sli_cmd_unreg_fcfi(struct sli4 *sli4, void *buf, u16 indicator)
+{
+ struct sli4_cmd_unreg_fcfi *unreg_fcfi = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ unreg_fcfi->hdr.command = SLI4_MBX_CMD_UNREG_FCFI;
+ unreg_fcfi->fcfi = cpu_to_le16(indicator);
+
+ return 0;
+}
+
+int
+sli_cmd_unreg_rpi(struct sli4 *sli4, void *buf, u16 indicator,
+ enum sli4_resource which, u32 fc_id)
+{
+ struct sli4_cmd_unreg_rpi *unreg_rpi = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ unreg_rpi->hdr.command = SLI4_MBX_CMD_UNREG_RPI;
+ switch (which) {
+ case SLI4_RSRC_RPI:
+ flags |= SLI4_UNREG_RPI_II_RPI;
+ if (fc_id == U32_MAX)
+ break;
+
+ flags |= SLI4_UNREG_RPI_DP;
+ unreg_rpi->dw2_dest_n_portid =
+ cpu_to_le32(fc_id & SLI4_UNREG_RPI_DEST_N_PORTID_MASK);
+ break;
+ case SLI4_RSRC_VPI:
+ flags |= SLI4_UNREG_RPI_II_VPI;
+ break;
+ case SLI4_RSRC_VFI:
+ flags |= SLI4_UNREG_RPI_II_VFI;
+ break;
+ case SLI4_RSRC_FCFI:
+ flags |= SLI4_UNREG_RPI_II_FCFI;
+ break;
+ default:
+ efc_log_info(sli4, "unknown type %#x\n", which);
+ return -EIO;
+ }
+
+ unreg_rpi->dw1w1_flags = cpu_to_le16(flags);
+ unreg_rpi->index = cpu_to_le16(indicator);
+
+ return 0;
+}
+
+int
+sli_cmd_unreg_vfi(struct sli4 *sli4, void *buf, u16 index, u32 which)
+{
+ struct sli4_cmd_unreg_vfi *unreg_vfi = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ unreg_vfi->hdr.command = SLI4_MBX_CMD_UNREG_VFI;
+ switch (which) {
+ case SLI4_UNREG_TYPE_DOMAIN:
+ unreg_vfi->index = cpu_to_le16(index);
+ break;
+ case SLI4_UNREG_TYPE_FCF:
+ unreg_vfi->index = cpu_to_le16(index);
+ break;
+ case SLI4_UNREG_TYPE_ALL:
+ unreg_vfi->index = cpu_to_le16(U32_MAX);
+ break;
+ default:
+ return -EIO;
+ }
+
+ if (which != SLI4_UNREG_TYPE_DOMAIN)
+ unreg_vfi->dw2_flags = cpu_to_le16(SLI4_UNREG_VFI_II_FCFI);
+
+ return 0;
+}
+
+int
+sli_cmd_unreg_vpi(struct sli4 *sli4, void *buf, u16 indicator, u32 which)
+{
+ struct sli4_cmd_unreg_vpi *unreg_vpi = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ unreg_vpi->hdr.command = SLI4_MBX_CMD_UNREG_VPI;
+ unreg_vpi->index = cpu_to_le16(indicator);
+ switch (which) {
+ case SLI4_UNREG_TYPE_PORT:
+ flags |= SLI4_UNREG_VPI_II_VPI;
+ break;
+ case SLI4_UNREG_TYPE_DOMAIN:
+ flags |= SLI4_UNREG_VPI_II_VFI;
+ break;
+ case SLI4_UNREG_TYPE_FCF:
+ flags |= SLI4_UNREG_VPI_II_FCFI;
+ break;
+ case SLI4_UNREG_TYPE_ALL:
+ /* override indicator */
+ unreg_vpi->index = cpu_to_le16(U32_MAX);
+ flags |= SLI4_UNREG_VPI_II_FCFI;
+ break;
+ default:
+ return -EIO;
+ }
+
+ unreg_vpi->dw2w0_flags = cpu_to_le16(flags);
+ return 0;
+}
+
+static int
+sli_cmd_common_modify_eq_delay(struct sli4 *sli4, void *buf,
+ struct sli4_queue *q, int num_q, u32 shift,
+ u32 delay_mult)
+{
+ struct sli4_rqst_cmn_modify_eq_delay *req = NULL;
+ int i;
+
+ req = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(cmn_modify_eq_delay), NULL);
+ if (!req)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_MODIFY_EQ_DELAY,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_modify_eq_delay));
+ req->num_eq = cpu_to_le32(num_q);
+
+ for (i = 0; i < num_q; i++) {
+ req->eq_delay_record[i].eq_id = cpu_to_le32(q[i].id);
+ req->eq_delay_record[i].phase = cpu_to_le32(shift);
+ req->eq_delay_record[i].delay_multiplier =
+ cpu_to_le32(delay_mult);
+ }
+
+ return 0;
+}
+
+void
+sli4_cmd_lowlevel_set_watchdog(struct sli4 *sli4, void *buf,
+ size_t size, u16 timeout)
+{
+ struct sli4_rqst_lowlevel_set_watchdog *req = NULL;
+
+ req = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(lowlevel_set_watchdog), NULL);
+ if (!req)
+ return;
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_LOWLEVEL_SET_WATCHDOG,
+ SLI4_SUBSYSTEM_LOWLEVEL, CMD_V0,
+ SLI4_RQST_PYLD_LEN(lowlevel_set_watchdog));
+ req->watchdog_timeout = cpu_to_le16(timeout);
+}
+
+static int
+sli_cmd_common_get_cntl_attributes(struct sli4 *sli4, void *buf,
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_hdr *hdr = NULL;
+
+ hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma);
+ if (!hdr)
+ return -EIO;
+
+ hdr->opcode = SLI4_CMN_GET_CNTL_ATTRIBUTES;
+ hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
+ hdr->request_length = cpu_to_le32(dma->size);
+
+ return 0;
+}
+
+static int
+sli_cmd_common_get_cntl_addl_attributes(struct sli4 *sli4, void *buf,
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_hdr *hdr = NULL;
+
+ hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma);
+ if (!hdr)
+ return -EIO;
+
+ hdr->opcode = SLI4_CMN_GET_CNTL_ADDL_ATTRS;
+ hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
+ hdr->request_length = cpu_to_le32(dma->size);
+
+ return 0;
+}
+
+int
+sli_cmd_common_nop(struct sli4 *sli4, void *buf, uint64_t context)
+{
+ struct sli4_rqst_cmn_nop *nop = NULL;
+
+ nop = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_nop),
+ NULL);
+ if (!nop)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&nop->hdr, SLI4_CMN_NOP, SLI4_SUBSYSTEM_COMMON,
+ CMD_V0, SLI4_RQST_PYLD_LEN(cmn_nop));
+
+ memcpy(&nop->context, &context, sizeof(context));
+
+ return 0;
+}
+
+int
+sli_cmd_common_get_resource_extent_info(struct sli4 *sli4, void *buf, u16 rtype)
+{
+ struct sli4_rqst_cmn_get_resource_extent_info *ext = NULL;
+
+ ext = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_get_resource_extent_info), NULL);
+ if (!ext)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&ext->hdr, SLI4_CMN_GET_RSC_EXTENT_INFO,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_get_resource_extent_info));
+
+ ext->resource_type = cpu_to_le16(rtype);
+
+ return 0;
+}
+
+int
+sli_cmd_common_get_sli4_parameters(struct sli4 *sli4, void *buf)
+{
+ struct sli4_rqst_hdr *hdr = NULL;
+
+ hdr = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(cmn_get_sli4_params), NULL);
+ if (!hdr)
+ return -EIO;
+
+ hdr->opcode = SLI4_CMN_GET_SLI4_PARAMS;
+ hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
+ hdr->request_length = SLI4_RQST_PYLD_LEN(cmn_get_sli4_params);
+
+ return 0;
+}
+
+static int
+sli_cmd_common_get_port_name(struct sli4 *sli4, void *buf)
+{
+ struct sli4_rqst_cmn_get_port_name *pname;
+
+ pname = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(cmn_get_port_name), NULL);
+ if (!pname)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&pname->hdr, SLI4_CMN_GET_PORT_NAME,
+ SLI4_SUBSYSTEM_COMMON, CMD_V1,
+ SLI4_RQST_PYLD_LEN(cmn_get_port_name));
+
+ /* Set the port type value (ethernet=0, FC=1) for V1 commands */
+ pname->port_type = SLI4_PORT_TYPE_FC;
+
+ return 0;
+}
+
+int
+sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc,
+ u16 eof, u32 desired_write_length,
+ u32 offset, char *obj_name,
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_cmn_write_object *wr_obj = NULL;
+ struct sli4_bde *bde;
+ u32 dwflags = 0;
+
+ wr_obj = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_write_object) + sizeof(*bde), NULL);
+ if (!wr_obj)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&wr_obj->hdr, SLI4_CMN_WRITE_OBJECT,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN_VAR(cmn_write_object, sizeof(*bde)));
+
+ if (noc)
+ dwflags |= SLI4_RQ_DES_WRITE_LEN_NOC;
+ if (eof)
+ dwflags |= SLI4_RQ_DES_WRITE_LEN_EOF;
+ dwflags |= (desired_write_length & SLI4_RQ_DES_WRITE_LEN);
+
+ wr_obj->desired_write_len_dword = cpu_to_le32(dwflags);
+
+ wr_obj->write_offset = cpu_to_le32(offset);
+ strncpy(wr_obj->object_name, obj_name, sizeof(wr_obj->object_name) - 1);
+ wr_obj->host_buffer_descriptor_count = cpu_to_le32(1);
+
+ bde = (struct sli4_bde *)wr_obj->host_buffer_descriptor;
+
+ /* Setup to transfer xfer_size bytes to device */
+ bde->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (desired_write_length & SLI4_BDE_LEN_MASK));
+ bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys));
+ bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys));
+
+ return 0;
+}
+
+int
+sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *obj_name)
+{
+ struct sli4_rqst_cmn_delete_object *req = NULL;
+
+ req = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_delete_object), NULL);
+ if (!req)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_DELETE_OBJECT,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_delete_object));
+
+ strncpy(req->object_name, obj_name, sizeof(req->object_name) - 1);
+ return 0;
+}
+
+int
+sli_cmd_common_read_object(struct sli4 *sli4, void *buf, u32 desired_read_len,
+ u32 offset, char *obj_name, struct efc_dma *dma)
+{
+ struct sli4_rqst_cmn_read_object *rd_obj = NULL;
+ struct sli4_bde *bde;
+
+ rd_obj = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_read_object) + sizeof(*bde), NULL);
+ if (!rd_obj)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&rd_obj->hdr, SLI4_CMN_READ_OBJECT,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN_VAR(cmn_read_object, sizeof(*bde)));
+ rd_obj->desired_read_length_dword =
+ cpu_to_le32(desired_read_len & SLI4_REQ_DESIRE_READLEN);
+
+ rd_obj->read_offset = cpu_to_le32(offset);
+ strncpy(rd_obj->object_name, obj_name, sizeof(rd_obj->object_name) - 1);
+ rd_obj->host_buffer_descriptor_count = cpu_to_le32(1);
+
+ bde = (struct sli4_bde *)rd_obj->host_buffer_descriptor;
+
+ /* Setup to transfer xfer_size bytes to device */
+ bde->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (desired_read_len & SLI4_BDE_LEN_MASK));
+ if (dma) {
+ bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys));
+ bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys));
+ } else {
+ bde->u.data.low = 0;
+ bde->u.data.high = 0;
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_dmtf_exec_clp_cmd(struct sli4 *sli4, void *buf, struct efc_dma *cmd,
+ struct efc_dma *resp)
+{
+ struct sli4_rqst_dmtf_exec_clp_cmd *clp_cmd = NULL;
+
+ clp_cmd = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(dmtf_exec_clp_cmd), NULL);
+ if (!clp_cmd)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&clp_cmd->hdr, DMTF_EXEC_CLP_CMD, SLI4_SUBSYSTEM_DMTF,
+ CMD_V0, SLI4_RQST_PYLD_LEN(dmtf_exec_clp_cmd));
+
+ clp_cmd->cmd_buf_length = cpu_to_le32(cmd->size);
+ clp_cmd->cmd_buf_addr_low = cpu_to_le32(lower_32_bits(cmd->phys));
+ clp_cmd->cmd_buf_addr_high = cpu_to_le32(upper_32_bits(cmd->phys));
+ clp_cmd->resp_buf_length = cpu_to_le32(resp->size);
+ clp_cmd->resp_buf_addr_low = cpu_to_le32(lower_32_bits(resp->phys));
+ clp_cmd->resp_buf_addr_high = cpu_to_le32(upper_32_bits(resp->phys));
+ return 0;
+}
+
+int
+sli_cmd_common_set_dump_location(struct sli4 *sli4, void *buf, bool query,
+ bool is_buffer_list,
+ struct efc_dma *buffer, u8 fdb)
+{
+ struct sli4_rqst_cmn_set_dump_location *set_dump_loc = NULL;
+ u32 buffer_length_flag = 0;
+
+ set_dump_loc = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_set_dump_location), NULL);
+ if (!set_dump_loc)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&set_dump_loc->hdr, SLI4_CMN_SET_DUMP_LOCATION,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_set_dump_location));
+
+ if (is_buffer_list)
+ buffer_length_flag |= SLI4_CMN_SET_DUMP_BLP;
+
+ if (query)
+ buffer_length_flag |= SLI4_CMN_SET_DUMP_QRY;
+
+ if (fdb)
+ buffer_length_flag |= SLI4_CMN_SET_DUMP_FDB;
+
+ if (buffer) {
+ set_dump_loc->buf_addr_low =
+ cpu_to_le32(lower_32_bits(buffer->phys));
+ set_dump_loc->buf_addr_high =
+ cpu_to_le32(upper_32_bits(buffer->phys));
+
+ buffer_length_flag |=
+ buffer->len & SLI4_CMN_SET_DUMP_BUFFER_LEN;
+ } else {
+ set_dump_loc->buf_addr_low = 0;
+ set_dump_loc->buf_addr_high = 0;
+ set_dump_loc->buffer_length_dword = 0;
+ }
+ set_dump_loc->buffer_length_dword = cpu_to_le32(buffer_length_flag);
+ return 0;
+}
+
+int
+sli_cmd_common_set_features(struct sli4 *sli4, void *buf, u32 feature,
+ u32 param_len, void *parameter)
+{
+ struct sli4_rqst_cmn_set_features *cmd = NULL;
+
+ cmd = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_set_features), NULL);
+ if (!cmd)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&cmd->hdr, SLI4_CMN_SET_FEATURES,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_set_features));
+
+ cmd->feature = cpu_to_le32(feature);
+ cmd->param_len = cpu_to_le32(param_len);
+ memcpy(cmd->params, parameter, param_len);
+
+ return 0;
+}
+
+int
+sli_cqe_mq(struct sli4 *sli4, void *buf)
+{
+ struct sli4_mcqe *mcqe = buf;
+ u32 dwflags = le32_to_cpu(mcqe->dw3_flags);
+ /*
+ * Firmware can split mbx completions into two MCQEs: first with only
+ * the "consumed" bit set and a second with the "complete" bit set.
+ * Thus, ignore MCQE unless "complete" is set.
+ */
+ if (!(dwflags & SLI4_MCQE_COMPLETED))
+ return SLI4_MCQE_STATUS_NOT_COMPLETED;
+
+ if (le16_to_cpu(mcqe->completion_status)) {
+ efc_log_info(sli4, "status(st=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n",
+ le16_to_cpu(mcqe->completion_status),
+ le16_to_cpu(mcqe->extended_status),
+ (dwflags & SLI4_MCQE_CONSUMED),
+ (dwflags & SLI4_MCQE_COMPLETED),
+ (dwflags & SLI4_MCQE_AE),
+ (dwflags & SLI4_MCQE_VALID));
+ }
+
+ return le16_to_cpu(mcqe->completion_status);
+}
+
+int
+sli_cqe_async(struct sli4 *sli4, void *buf)
+{
+ struct sli4_acqe *acqe = buf;
+ int rc = -EIO;
+
+ if (!buf) {
+ efc_log_err(sli4, "bad parameter sli4=%p buf=%p\n", sli4, buf);
+ return -EIO;
+ }
+
+ switch (acqe->event_code) {
+ case SLI4_ACQE_EVENT_CODE_LINK_STATE:
+ efc_log_info(sli4, "Unsupported by FC link, evt code:%#x\n",
+ acqe->event_code);
+ break;
+ case SLI4_ACQE_EVENT_CODE_GRP_5:
+ efc_log_info(sli4, "ACQE GRP5\n");
+ break;
+ case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT:
+ efc_log_info(sli4, "ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n",
+ acqe->event_type,
+ le32_to_cpu(acqe->event_data[0]),
+ le32_to_cpu(acqe->event_data[1]));
+ break;
+ case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT:
+ rc = sli_fc_process_link_attention(sli4, buf);
+ break;
+ default:
+ efc_log_info(sli4, "ACQE unknown=%#x\n", acqe->event_code);
+ }
+
+ return rc;
+}
+
+bool
+sli_fw_ready(struct sli4 *sli4)
+{
+ u32 val;
+
+ /* Determine if the chip FW is in a ready state */
+ val = sli_reg_read_status(sli4);
+ return (val & SLI4_PORT_STATUS_RDY) ? 1 : 0;
+}
+
+static bool
+sli_wait_for_fw_ready(struct sli4 *sli4, u32 timeout_ms)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(timeout_ms);
+
+ do {
+ if (sli_fw_ready(sli4))
+ return true;
+
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, end));
+
+ return false;
+}
+
+static bool
+sli_sliport_reset(struct sli4 *sli4)
+{
+ bool rc;
+ u32 val;
+
+ val = SLI4_PORT_CTRL_IP;
+ /* Initialize port, endian */
+ writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG));
+
+ rc = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
+ if (!rc)
+ efc_log_crit(sli4, "port failed to become ready after initialization\n");
+
+ return rc;
+}
+
+static bool
+sli_fw_init(struct sli4 *sli4)
+{
+ /*
+ * Is firmware ready for operation?
+ */
+ if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
+ efc_log_crit(sli4, "FW status is NOT ready\n");
+ return false;
+ }
+
+ /*
+ * Reset port to a known state
+ */
+ return sli_sliport_reset(sli4);
+}
+
+static int
+sli_request_features(struct sli4 *sli4, u32 *features, bool query)
+{
+ struct sli4_cmd_request_features *req_features = sli4->bmbx.virt;
+
+ if (sli_cmd_request_features(sli4, sli4->bmbx.virt, *features, query)) {
+ efc_log_err(sli4, "bad REQUEST_FEATURES write\n");
+ return -EIO;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail\n");
+ return -EIO;
+ }
+
+ if (le16_to_cpu(req_features->hdr.status)) {
+ efc_log_err(sli4, "REQUEST_FEATURES bad status %#x\n",
+ le16_to_cpu(req_features->hdr.status));
+ return -EIO;
+ }
+
+ *features = le32_to_cpu(req_features->resp);
+ return 0;
+}
+
+void
+sli_calc_max_qentries(struct sli4 *sli4)
+{
+ enum sli4_qtype q;
+ u32 qentries;
+
+ for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) {
+ sli4->qinfo.max_qentries[q] =
+ sli_convert_mask_to_count(sli4->qinfo.count_method[q],
+ sli4->qinfo.count_mask[q]);
+ }
+
+ /* single, continguous DMA allocations will be called for each queue
+ * of size (max_qentries * queue entry size); since these can be large,
+ * check against the OS max DMA allocation size
+ */
+ for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) {
+ qentries = sli4->qinfo.max_qentries[q];
+
+ efc_log_info(sli4, "[%s]: max_qentries from %d to %d\n",
+ SLI4_QNAME[q],
+ sli4->qinfo.max_qentries[q], qentries);
+ sli4->qinfo.max_qentries[q] = qentries;
+ }
+}
+
+static int
+sli_get_read_config(struct sli4 *sli4)
+{
+ struct sli4_rsp_read_config *conf = sli4->bmbx.virt;
+ u32 i, total, total_size;
+ u32 *base;
+
+ if (sli_cmd_read_config(sli4, sli4->bmbx.virt)) {
+ efc_log_err(sli4, "bad READ_CONFIG write\n");
+ return -EIO;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox fail (READ_CONFIG)\n");
+ return -EIO;
+ }
+
+ if (le16_to_cpu(conf->hdr.status)) {
+ efc_log_err(sli4, "READ_CONFIG bad status %#x\n",
+ le16_to_cpu(conf->hdr.status));
+ return -EIO;
+ }
+
+ sli4->params.has_extents =
+ le32_to_cpu(conf->ext_dword) & SLI4_READ_CFG_RESP_RESOURCE_EXT;
+ if (sli4->params.has_extents) {
+ efc_log_err(sli4, "extents not supported\n");
+ return -EIO;
+ }
+
+ base = sli4->ext[0].base;
+ if (!base) {
+ int size = SLI4_RSRC_MAX * sizeof(u32);
+
+ base = kzalloc(size, GFP_KERNEL);
+ if (!base)
+ return -EIO;
+ }
+
+ for (i = 0; i < SLI4_RSRC_MAX; i++) {
+ sli4->ext[i].number = 1;
+ sli4->ext[i].n_alloc = 0;
+ sli4->ext[i].base = &base[i];
+ }
+
+ sli4->ext[SLI4_RSRC_VFI].base[0] = le16_to_cpu(conf->vfi_base);
+ sli4->ext[SLI4_RSRC_VFI].size = le16_to_cpu(conf->vfi_count);
+
+ sli4->ext[SLI4_RSRC_VPI].base[0] = le16_to_cpu(conf->vpi_base);
+ sli4->ext[SLI4_RSRC_VPI].size = le16_to_cpu(conf->vpi_count);
+
+ sli4->ext[SLI4_RSRC_RPI].base[0] = le16_to_cpu(conf->rpi_base);
+ sli4->ext[SLI4_RSRC_RPI].size = le16_to_cpu(conf->rpi_count);
+
+ sli4->ext[SLI4_RSRC_XRI].base[0] = le16_to_cpu(conf->xri_base);
+ sli4->ext[SLI4_RSRC_XRI].size = le16_to_cpu(conf->xri_count);
+
+ sli4->ext[SLI4_RSRC_FCFI].base[0] = 0;
+ sli4->ext[SLI4_RSRC_FCFI].size = le16_to_cpu(conf->fcfi_count);
+
+ for (i = 0; i < SLI4_RSRC_MAX; i++) {
+ total = sli4->ext[i].number * sli4->ext[i].size;
+ total_size = BITS_TO_LONGS(total) * sizeof(long);
+ sli4->ext[i].use_map = kzalloc(total_size, GFP_KERNEL);
+ if (!sli4->ext[i].use_map) {
+ efc_log_err(sli4, "bitmap memory allocation failed %d\n",
+ i);
+ return -EIO;
+ }
+ sli4->ext[i].map_size = total;
+ }
+
+ sli4->topology = (le32_to_cpu(conf->topology_dword) &
+ SLI4_READ_CFG_RESP_TOPOLOGY) >> 24;
+ switch (sli4->topology) {
+ case SLI4_READ_CFG_TOPO_FC:
+ efc_log_info(sli4, "FC (unknown)\n");
+ break;
+ case SLI4_READ_CFG_TOPO_NON_FC_AL:
+ efc_log_info(sli4, "FC (direct attach)\n");
+ break;
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ efc_log_info(sli4, "FC (arbitrated loop)\n");
+ break;
+ default:
+ efc_log_info(sli4, "bad topology %#x\n", sli4->topology);
+ }
+
+ sli4->e_d_tov = le16_to_cpu(conf->e_d_tov);
+ sli4->r_a_tov = le16_to_cpu(conf->r_a_tov);
+
+ sli4->link_module_type = le16_to_cpu(conf->lmt);
+
+ sli4->qinfo.max_qcount[SLI4_QTYPE_EQ] = le16_to_cpu(conf->eq_count);
+ sli4->qinfo.max_qcount[SLI4_QTYPE_CQ] = le16_to_cpu(conf->cq_count);
+ sli4->qinfo.max_qcount[SLI4_QTYPE_WQ] = le16_to_cpu(conf->wq_count);
+ sli4->qinfo.max_qcount[SLI4_QTYPE_RQ] = le16_to_cpu(conf->rq_count);
+
+ /*
+ * READ_CONFIG doesn't give the max number of MQ. Applications
+ * will typically want 1, but we may need another at some future
+ * date. Dummy up a "max" MQ count here.
+ */
+ sli4->qinfo.max_qcount[SLI4_QTYPE_MQ] = SLI4_USER_MQ_COUNT;
+ return 0;
+}
+
+static int
+sli_get_sli4_parameters(struct sli4 *sli4)
+{
+ struct sli4_rsp_cmn_get_sli4_params *parms;
+ u32 dw_loopback;
+ u32 dw_eq_pg_cnt;
+ u32 dw_cq_pg_cnt;
+ u32 dw_mq_pg_cnt;
+ u32 dw_wq_pg_cnt;
+ u32 dw_rq_pg_cnt;
+ u32 dw_sgl_pg_cnt;
+
+ if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt))
+ return -EIO;
+
+ parms = (struct sli4_rsp_cmn_get_sli4_params *)
+ (((u8 *)sli4->bmbx.virt) +
+ offsetof(struct sli4_cmd_sli_config, payload.embed));
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail\n");
+ return -EIO;
+ }
+
+ if (parms->hdr.status) {
+ efc_log_err(sli4, "COMMON_GET_SLI4_PARAMETERS bad status %#x",
+ parms->hdr.status);
+ efc_log_err(sli4, "additional status %#x\n",
+ parms->hdr.additional_status);
+ return -EIO;
+ }
+
+ dw_loopback = le32_to_cpu(parms->dw16_loopback_scope);
+ dw_eq_pg_cnt = le32_to_cpu(parms->dw6_eq_page_cnt);
+ dw_cq_pg_cnt = le32_to_cpu(parms->dw8_cq_page_cnt);
+ dw_mq_pg_cnt = le32_to_cpu(parms->dw10_mq_page_cnt);
+ dw_wq_pg_cnt = le32_to_cpu(parms->dw12_wq_page_cnt);
+ dw_rq_pg_cnt = le32_to_cpu(parms->dw14_rq_page_cnt);
+
+ sli4->params.auto_reg = (dw_loopback & SLI4_PARAM_AREG);
+ sli4->params.auto_xfer_rdy = (dw_loopback & SLI4_PARAM_AGXF);
+ sli4->params.hdr_template_req = (dw_loopback & SLI4_PARAM_HDRR);
+ sli4->params.t10_dif_inline_capable = (dw_loopback & SLI4_PARAM_TIMM);
+ sli4->params.t10_dif_separate_capable = (dw_loopback & SLI4_PARAM_TSMM);
+
+ sli4->params.mq_create_version = GET_Q_CREATE_VERSION(dw_mq_pg_cnt);
+ sli4->params.cq_create_version = GET_Q_CREATE_VERSION(dw_cq_pg_cnt);
+
+ sli4->rq_min_buf_size = le16_to_cpu(parms->min_rq_buffer_size);
+ sli4->rq_max_buf_size = le32_to_cpu(parms->max_rq_buffer_size);
+
+ sli4->qinfo.qpage_count[SLI4_QTYPE_EQ] =
+ (dw_eq_pg_cnt & SLI4_PARAM_EQ_PAGE_CNT_MASK);
+ sli4->qinfo.qpage_count[SLI4_QTYPE_CQ] =
+ (dw_cq_pg_cnt & SLI4_PARAM_CQ_PAGE_CNT_MASK);
+ sli4->qinfo.qpage_count[SLI4_QTYPE_MQ] =
+ (dw_mq_pg_cnt & SLI4_PARAM_MQ_PAGE_CNT_MASK);
+ sli4->qinfo.qpage_count[SLI4_QTYPE_WQ] =
+ (dw_wq_pg_cnt & SLI4_PARAM_WQ_PAGE_CNT_MASK);
+ sli4->qinfo.qpage_count[SLI4_QTYPE_RQ] =
+ (dw_rq_pg_cnt & SLI4_PARAM_RQ_PAGE_CNT_MASK);
+
+ /* save count methods and masks for each queue type */
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_EQ] =
+ le16_to_cpu(parms->eqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_EQ] =
+ GET_Q_CNT_METHOD(dw_eq_pg_cnt);
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_CQ] =
+ le16_to_cpu(parms->cqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_CQ] =
+ GET_Q_CNT_METHOD(dw_cq_pg_cnt);
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_MQ] =
+ le16_to_cpu(parms->mqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_MQ] =
+ GET_Q_CNT_METHOD(dw_mq_pg_cnt);
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_WQ] =
+ le16_to_cpu(parms->wqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_WQ] =
+ GET_Q_CNT_METHOD(dw_wq_pg_cnt);
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_RQ] =
+ le16_to_cpu(parms->rqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_RQ] =
+ GET_Q_CNT_METHOD(dw_rq_pg_cnt);
+
+ /* now calculate max queue entries */
+ sli_calc_max_qentries(sli4);
+
+ dw_sgl_pg_cnt = le32_to_cpu(parms->dw18_sgl_page_cnt);
+
+ /* max # of pages */
+ sli4->max_sgl_pages = (dw_sgl_pg_cnt & SLI4_PARAM_SGL_PAGE_CNT_MASK);
+
+ /* bit map of available sizes */
+ sli4->sgl_page_sizes = (dw_sgl_pg_cnt &
+ SLI4_PARAM_SGL_PAGE_SZS_MASK) >> 8;
+ /* ignore HLM here. Use value from REQUEST_FEATURES */
+ sli4->sge_supported_length = le32_to_cpu(parms->sge_supported_length);
+ sli4->params.sgl_pre_reg_required = (dw_loopback & SLI4_PARAM_SGLR);
+ /* default to using pre-registered SGL's */
+ sli4->params.sgl_pre_registered = true;
+
+ sli4->params.perf_hint = dw_loopback & SLI4_PARAM_PHON;
+ sli4->params.perf_wq_id_association = (dw_loopback & SLI4_PARAM_PHWQ);
+
+ sli4->rq_batch = (le16_to_cpu(parms->dw15w1_rq_db_window) &
+ SLI4_PARAM_RQ_DB_WINDOW_MASK) >> 12;
+
+ /* Use the highest available WQE size. */
+ if (((dw_wq_pg_cnt & SLI4_PARAM_WQE_SZS_MASK) >> 8) &
+ SLI4_128BYTE_WQE_SUPPORT)
+ sli4->wqe_size = SLI4_WQE_EXT_BYTES;
+ else
+ sli4->wqe_size = SLI4_WQE_BYTES;
+
+ return 0;
+}
+
+static int
+sli_get_ctrl_attributes(struct sli4 *sli4)
+{
+ struct sli4_rsp_cmn_get_cntl_attributes *attr;
+ struct sli4_rsp_cmn_get_cntl_addl_attributes *add_attr;
+ struct efc_dma data;
+ u32 psize;
+
+ /*
+ * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily
+ * uses VPD DMA buffer as the response won't fit in the embedded
+ * buffer.
+ */
+ memset(sli4->vpd_data.virt, 0, sli4->vpd_data.size);
+ if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt,
+ &sli4->vpd_data)) {
+ efc_log_err(sli4, "bad COMMON_GET_CNTL_ATTRIBUTES write\n");
+ return -EIO;
+ }
+
+ attr = sli4->vpd_data.virt;
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail\n");
+ return -EIO;
+ }
+
+ if (attr->hdr.status) {
+ efc_log_err(sli4, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x",
+ attr->hdr.status);
+ efc_log_err(sli4, "additional status %#x\n",
+ attr->hdr.additional_status);
+ return -EIO;
+ }
+
+ sli4->port_number = attr->port_num_type_flags & SLI4_CNTL_ATTR_PORTNUM;
+
+ memcpy(sli4->bios_version_string, attr->bios_version_str,
+ sizeof(sli4->bios_version_string));
+
+ /* get additional attributes */
+ psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes);
+ data.size = psize;
+ data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size,
+ &data.phys, GFP_DMA);
+ if (!data.virt) {
+ memset(&data, 0, sizeof(struct efc_dma));
+ efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n");
+ return -EIO;
+ }
+
+ if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt,
+ &data)) {
+ efc_log_err(sli4, "bad GET_CNTL_ADDL_ATTR write\n");
+ dma_free_coherent(&sli4->pci->dev, data.size,
+ data.virt, data.phys);
+ return -EIO;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "mailbox fail (GET_CNTL_ADDL_ATTR)\n");
+ dma_free_coherent(&sli4->pci->dev, data.size,
+ data.virt, data.phys);
+ return -EIO;
+ }
+
+ add_attr = data.virt;
+ if (add_attr->hdr.status) {
+ efc_log_err(sli4, "GET_CNTL_ADDL_ATTR bad status %#x\n",
+ add_attr->hdr.status);
+ dma_free_coherent(&sli4->pci->dev, data.size,
+ data.virt, data.phys);
+ return -EIO;
+ }
+
+ memcpy(sli4->ipl_name, add_attr->ipl_file_name, sizeof(sli4->ipl_name));
+
+ efc_log_info(sli4, "IPL:%s\n", (char *)sli4->ipl_name);
+
+ dma_free_coherent(&sli4->pci->dev, data.size, data.virt,
+ data.phys);
+ memset(&data, 0, sizeof(struct efc_dma));
+ return 0;
+}
+
+static int
+sli_get_fw_rev(struct sli4 *sli4)
+{
+ struct sli4_cmd_read_rev *read_rev = sli4->bmbx.virt;
+
+ if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, &sli4->vpd_data))
+ return -EIO;
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail (READ_REV)\n");
+ return -EIO;
+ }
+
+ if (le16_to_cpu(read_rev->hdr.status)) {
+ efc_log_err(sli4, "READ_REV bad status %#x\n",
+ le16_to_cpu(read_rev->hdr.status));
+ return -EIO;
+ }
+
+ sli4->fw_rev[0] = le32_to_cpu(read_rev->first_fw_id);
+ memcpy(sli4->fw_name[0], read_rev->first_fw_name,
+ sizeof(sli4->fw_name[0]));
+
+ sli4->fw_rev[1] = le32_to_cpu(read_rev->second_fw_id);
+ memcpy(sli4->fw_name[1], read_rev->second_fw_name,
+ sizeof(sli4->fw_name[1]));
+
+ sli4->hw_rev[0] = le32_to_cpu(read_rev->first_hw_rev);
+ sli4->hw_rev[1] = le32_to_cpu(read_rev->second_hw_rev);
+ sli4->hw_rev[2] = le32_to_cpu(read_rev->third_hw_rev);
+
+ efc_log_info(sli4, "FW1:%s (%08x) / FW2:%s (%08x)\n",
+ read_rev->first_fw_name, le32_to_cpu(read_rev->first_fw_id),
+ read_rev->second_fw_name, le32_to_cpu(read_rev->second_fw_id));
+
+ efc_log_info(sli4, "HW1: %08x / HW2: %08x\n",
+ le32_to_cpu(read_rev->first_hw_rev),
+ le32_to_cpu(read_rev->second_hw_rev));
+
+ /* Check that all VPD data was returned */
+ if (le32_to_cpu(read_rev->returned_vpd_length) !=
+ le32_to_cpu(read_rev->actual_vpd_length)) {
+ efc_log_info(sli4, "VPD length: avail=%d return=%d actual=%d\n",
+ le32_to_cpu(read_rev->available_length_dword) &
+ SLI4_READ_REV_AVAILABLE_LENGTH,
+ le32_to_cpu(read_rev->returned_vpd_length),
+ le32_to_cpu(read_rev->actual_vpd_length));
+ }
+ sli4->vpd_length = le32_to_cpu(read_rev->returned_vpd_length);
+ return 0;
+}
+
+static int
+sli_get_config(struct sli4 *sli4)
+{
+ struct sli4_rsp_cmn_get_port_name *port_name;
+ struct sli4_cmd_read_nvparms *read_nvparms;
+
+ /*
+ * Read the device configuration
+ */
+ if (sli_get_read_config(sli4))
+ return -EIO;
+
+ if (sli_get_sli4_parameters(sli4))
+ return -EIO;
+
+ if (sli_get_ctrl_attributes(sli4))
+ return -EIO;
+
+ if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt))
+ return -EIO;
+
+ port_name = (struct sli4_rsp_cmn_get_port_name *)
+ (((u8 *)sli4->bmbx.virt) +
+ offsetof(struct sli4_cmd_sli_config, payload.embed));
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox fail (GET_PORT_NAME)\n");
+ return -EIO;
+ }
+
+ sli4->port_name[0] = port_name->port_name[sli4->port_number];
+ sli4->port_name[1] = '\0';
+
+ if (sli_get_fw_rev(sli4))
+ return -EIO;
+
+ if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt)) {
+ efc_log_err(sli4, "bad READ_NVPARMS write\n");
+ return -EIO;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox fail (READ_NVPARMS)\n");
+ return -EIO;
+ }
+
+ read_nvparms = sli4->bmbx.virt;
+ if (le16_to_cpu(read_nvparms->hdr.status)) {
+ efc_log_err(sli4, "READ_NVPARMS bad status %#x\n",
+ le16_to_cpu(read_nvparms->hdr.status));
+ return -EIO;
+ }
+
+ memcpy(sli4->wwpn, read_nvparms->wwpn, sizeof(sli4->wwpn));
+ memcpy(sli4->wwnn, read_nvparms->wwnn, sizeof(sli4->wwnn));
+
+ efc_log_info(sli4, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ sli4->wwpn[0], sli4->wwpn[1], sli4->wwpn[2], sli4->wwpn[3],
+ sli4->wwpn[4], sli4->wwpn[5], sli4->wwpn[6], sli4->wwpn[7]);
+ efc_log_info(sli4, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ sli4->wwnn[0], sli4->wwnn[1], sli4->wwnn[2], sli4->wwnn[3],
+ sli4->wwnn[4], sli4->wwnn[5], sli4->wwnn[6], sli4->wwnn[7]);
+
+ return 0;
+}
+
+int
+sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
+ void __iomem *reg[])
+{
+ u32 intf = U32_MAX;
+ u32 pci_class_rev = 0;
+ u32 rev_id = 0;
+ u32 family = 0;
+ u32 asic_id = 0;
+ u32 i;
+ struct sli4_asic_entry_t *asic;
+
+ memset(sli4, 0, sizeof(struct sli4));
+
+ sli4->os = os;
+ sli4->pci = pdev;
+
+ for (i = 0; i < 6; i++)
+ sli4->reg[i] = reg[i];
+ /*
+ * Read the SLI_INTF register to discover the register layout
+ * and other capability information
+ */
+ if (pci_read_config_dword(pdev, SLI4_INTF_REG, &intf))
+ return -EIO;
+
+ if ((intf & SLI4_INTF_VALID_MASK) != (u32)SLI4_INTF_VALID_VALUE) {
+ efc_log_err(sli4, "SLI_INTF is not valid\n");
+ return -EIO;
+ }
+
+ /* driver only support SLI-4 */
+ if ((intf & SLI4_INTF_REV_MASK) != SLI4_INTF_REV_S4) {
+ efc_log_err(sli4, "Unsupported SLI revision (intf=%#x)\n", intf);
+ return -EIO;
+ }
+
+ sli4->sli_family = intf & SLI4_INTF_FAMILY_MASK;
+
+ sli4->if_type = intf & SLI4_INTF_IF_TYPE_MASK;
+ efc_log_info(sli4, "status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read_status(sli4),
+ sli_reg_read_err1(sli4),
+ sli_reg_read_err2(sli4));
+
+ /*
+ * set the ASIC type and revision
+ */
+ if (pci_read_config_dword(pdev, PCI_CLASS_REVISION, &pci_class_rev))
+ return -EIO;
+
+ rev_id = pci_class_rev & 0xff;
+ family = sli4->sli_family;
+ if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) {
+ if (!pci_read_config_dword(pdev, SLI4_ASIC_ID_REG, &asic_id))
+ family = asic_id & SLI4_ASIC_GEN_MASK;
+ }
+
+ for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table);
+ i++, asic++) {
+ if (rev_id == asic->rev_id && family == asic->family) {
+ sli4->asic_type = family;
+ sli4->asic_rev = rev_id;
+ break;
+ }
+ }
+ /* Fail if no matching asic type/rev was found */
+ if (!sli4->asic_type) {
+ efc_log_err(sli4, "no matching asic family/rev found: %02x/%02x\n",
+ family, rev_id);
+ return -EIO;
+ }
+
+ /*
+ * The bootstrap mailbox is equivalent to a MQ with a single 256 byte
+ * entry, a CQ with a single 16 byte entry, and no event queue.
+ * Alignment must be 16 bytes as the low order address bits in the
+ * address register are also control / status.
+ */
+ sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe);
+ sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size,
+ &sli4->bmbx.phys, GFP_DMA);
+ if (!sli4->bmbx.virt) {
+ memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
+ efc_log_err(sli4, "bootstrap mailbox allocation failed\n");
+ return -EIO;
+ }
+
+ if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) {
+ efc_log_err(sli4, "bad alignment for bootstrap mailbox\n");
+ return -EIO;
+ }
+
+ efc_log_info(sli4, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt,
+ upper_32_bits(sli4->bmbx.phys),
+ lower_32_bits(sli4->bmbx.phys), sli4->bmbx.size);
+
+ /* 4096 is arbitrary. What should this value actually be? */
+ sli4->vpd_data.size = 4096;
+ sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev,
+ sli4->vpd_data.size,
+ &sli4->vpd_data.phys,
+ GFP_DMA);
+ if (!sli4->vpd_data.virt) {
+ memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
+ /* Note that failure isn't fatal in this specific case */
+ efc_log_info(sli4, "VPD buffer allocation failed\n");
+ }
+
+ if (!sli_fw_init(sli4)) {
+ efc_log_err(sli4, "FW initialization failed\n");
+ return -EIO;
+ }
+
+ /*
+ * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true
+ * in addition to any other desired features
+ */
+ sli4->features = (SLI4_REQFEAT_IAAB | SLI4_REQFEAT_NPIV |
+ SLI4_REQFEAT_DIF | SLI4_REQFEAT_VF |
+ SLI4_REQFEAT_FCPC | SLI4_REQFEAT_IAAR |
+ SLI4_REQFEAT_HLM | SLI4_REQFEAT_PERFH |
+ SLI4_REQFEAT_RXSEQ | SLI4_REQFEAT_RXRI |
+ SLI4_REQFEAT_MRQP);
+
+ /* use performance hints if available */
+ if (sli4->params.perf_hint)
+ sli4->features |= SLI4_REQFEAT_PERFH;
+
+ if (sli_request_features(sli4, &sli4->features, true))
+ return -EIO;
+
+ if (sli_get_config(sli4))
+ return -EIO;
+
+ return 0;
+}
+
+int
+sli_init(struct sli4 *sli4)
+{
+ if (sli4->params.has_extents) {
+ efc_log_info(sli4, "extend allocation not supported\n");
+ return -EIO;
+ }
+
+ sli4->features &= (~SLI4_REQFEAT_HLM);
+ sli4->features &= (~SLI4_REQFEAT_RXSEQ);
+ sli4->features &= (~SLI4_REQFEAT_RXRI);
+
+ if (sli_request_features(sli4, &sli4->features, false))
+ return -EIO;
+
+ return 0;
+}
+
+int
+sli_reset(struct sli4 *sli4)
+{
+ u32 i;
+
+ if (!sli_fw_init(sli4)) {
+ efc_log_crit(sli4, "FW initialization failed\n");
+ return -EIO;
+ }
+
+ kfree(sli4->ext[0].base);
+ sli4->ext[0].base = NULL;
+
+ for (i = 0; i < SLI4_RSRC_MAX; i++) {
+ kfree(sli4->ext[i].use_map);
+ sli4->ext[i].use_map = NULL;
+ sli4->ext[i].base = NULL;
+ }
+
+ return sli_get_config(sli4);
+}
+
+int
+sli_fw_reset(struct sli4 *sli4)
+{
+ /*
+ * Firmware must be ready before issuing the reset.
+ */
+ if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
+ efc_log_crit(sli4, "FW status is NOT ready\n");
+ return -EIO;
+ }
+
+ /* Lancer uses PHYDEV_CONTROL */
+ writel(SLI4_PHYDEV_CTRL_FRST, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG));
+
+ /* wait for the FW to become ready after the reset */
+ if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
+ efc_log_crit(sli4, "Failed to be ready after firmware reset\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void
+sli_teardown(struct sli4 *sli4)
+{
+ u32 i;
+
+ kfree(sli4->ext[0].base);
+ sli4->ext[0].base = NULL;
+
+ for (i = 0; i < SLI4_RSRC_MAX; i++) {
+ sli4->ext[i].base = NULL;
+
+ kfree(sli4->ext[i].use_map);
+ sli4->ext[i].use_map = NULL;
+ }
+
+ if (!sli_sliport_reset(sli4))
+ efc_log_err(sli4, "FW deinitialization failed\n");
+
+ dma_free_coherent(&sli4->pci->dev, sli4->vpd_data.size,
+ sli4->vpd_data.virt, sli4->vpd_data.phys);
+ memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
+
+ dma_free_coherent(&sli4->pci->dev, sli4->bmbx.size,
+ sli4->bmbx.virt, sli4->bmbx.phys);
+ memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
+}
+
+int
+sli_callback(struct sli4 *sli4, enum sli4_callback which,
+ void *func, void *arg)
+{
+ if (!func) {
+ efc_log_err(sli4, "bad parameter sli4=%p which=%#x func=%p\n",
+ sli4, which, func);
+ return -EIO;
+ }
+
+ switch (which) {
+ case SLI4_CB_LINK:
+ sli4->link = func;
+ sli4->link_arg = arg;
+ break;
+ default:
+ efc_log_info(sli4, "unknown callback %#x\n", which);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+sli_eq_modify_delay(struct sli4 *sli4, struct sli4_queue *eq,
+ u32 num_eq, u32 shift, u32 delay_mult)
+{
+ sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, eq, num_eq,
+ shift, delay_mult);
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n");
+ return -EIO;
+ }
+ if (sli_res_sli_config(sli4, sli4->bmbx.virt)) {
+ efc_log_err(sli4, "bad status MODIFY EQ DELAY\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+sli_resource_alloc(struct sli4 *sli4, enum sli4_resource rtype,
+ u32 *rid, u32 *index)
+{
+ int rc = 0;
+ u32 size;
+ u32 ext_idx;
+ u32 item_idx;
+ u32 position;
+
+ *rid = U32_MAX;
+ *index = U32_MAX;
+
+ switch (rtype) {
+ case SLI4_RSRC_VFI:
+ case SLI4_RSRC_VPI:
+ case SLI4_RSRC_RPI:
+ case SLI4_RSRC_XRI:
+ position =
+ find_first_zero_bit(sli4->ext[rtype].use_map,
+ sli4->ext[rtype].map_size);
+ if (position >= sli4->ext[rtype].map_size) {
+ efc_log_err(sli4, "out of resource %d (alloc=%d)\n",
+ rtype, sli4->ext[rtype].n_alloc);
+ rc = -EIO;
+ break;
+ }
+ set_bit(position, sli4->ext[rtype].use_map);
+ *index = position;
+
+ size = sli4->ext[rtype].size;
+
+ ext_idx = *index / size;
+ item_idx = *index % size;
+
+ *rid = sli4->ext[rtype].base[ext_idx] + item_idx;
+
+ sli4->ext[rtype].n_alloc++;
+ break;
+ default:
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+int
+sli_resource_free(struct sli4 *sli4, enum sli4_resource rtype, u32 rid)
+{
+ int rc = -EIO;
+ u32 x;
+ u32 size, *base;
+
+ switch (rtype) {
+ case SLI4_RSRC_VFI:
+ case SLI4_RSRC_VPI:
+ case SLI4_RSRC_RPI:
+ case SLI4_RSRC_XRI:
+ /*
+ * Figure out which extent contains the resource ID. I.e. find
+ * the extent such that
+ * extent->base <= resource ID < extent->base + extent->size
+ */
+ base = sli4->ext[rtype].base;
+ size = sli4->ext[rtype].size;
+
+ /*
+ * In the case of FW reset, this may be cleared
+ * but the force_free path will still attempt to
+ * free the resource. Prevent a NULL pointer access.
+ */
+ if (!base)
+ break;
+
+ for (x = 0; x < sli4->ext[rtype].number; x++) {
+ if ((rid < base[x] || (rid >= (base[x] + size))))
+ continue;
+
+ rid -= base[x];
+ clear_bit((x * size) + rid, sli4->ext[rtype].use_map);
+ rc = 0;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+int
+sli_resource_reset(struct sli4 *sli4, enum sli4_resource rtype)
+{
+ int rc = -EIO;
+ u32 i;
+
+ switch (rtype) {
+ case SLI4_RSRC_VFI:
+ case SLI4_RSRC_VPI:
+ case SLI4_RSRC_RPI:
+ case SLI4_RSRC_XRI:
+ for (i = 0; i < sli4->ext[rtype].map_size; i++)
+ clear_bit(i, sli4->ext[rtype].use_map);
+ rc = 0;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+int sli_raise_ue(struct sli4 *sli4, u8 dump)
+{
+ u32 val = 0;
+
+ if (dump == SLI4_FUNC_DESC_DUMP) {
+ val = SLI4_PORT_CTRL_FDD | SLI4_PORT_CTRL_IP;
+ writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG));
+ } else {
+ val = SLI4_PHYDEV_CTRL_FRST;
+
+ if (dump == SLI4_CHIP_LEVEL_DUMP)
+ val |= SLI4_PHYDEV_CTRL_DD;
+ writel(val, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG));
+ }
+
+ return 0;
+}
+
+int sli_dump_is_ready(struct sli4 *sli4)
+{
+ int rc = SLI4_DUMP_READY_STATUS_NOT_READY;
+ u32 port_val;
+ u32 bmbx_val;
+
+ /*
+ * Ensure that the port is ready AND the mailbox is
+ * ready before signaling that the dump is ready to go.
+ */
+ port_val = sli_reg_read_status(sli4);
+ bmbx_val = readl(sli4->reg[0] + SLI4_BMBX_REG);
+
+ if ((bmbx_val & SLI4_BMBX_RDY) &&
+ (port_val & SLI4_PORT_STATUS_RDY)) {
+ if (port_val & SLI4_PORT_STATUS_DIP)
+ rc = SLI4_DUMP_READY_STATUS_DD_PRESENT;
+ else if (port_val & SLI4_PORT_STATUS_FDP)
+ rc = SLI4_DUMP_READY_STATUS_FDB_PRESENT;
+ }
+
+ return rc;
+}
+
+bool sli_reset_required(struct sli4 *sli4)
+{
+ u32 val;
+
+ val = sli_reg_read_status(sli4);
+ return (val & SLI4_PORT_STATUS_RN);
+}
+
+int
+sli_cmd_post_sgl_pages(struct sli4 *sli4, void *buf, u16 xri,
+ u32 xri_count, struct efc_dma *page0[],
+ struct efc_dma *page1[], struct efc_dma *dma)
+{
+ struct sli4_rqst_post_sgl_pages *post = NULL;
+ u32 i;
+ __le32 req_len;
+
+ post = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(post_sgl_pages), dma);
+ if (!post)
+ return -EIO;
+
+ /* payload size calculation */
+ /* 4 = xri_start + xri_count */
+ /* xri_count = # of XRI's registered */
+ /* sizeof(uint64_t) = physical address size */
+ /* 2 = # of physical addresses per page set */
+ req_len = cpu_to_le32(4 + (xri_count * (sizeof(uint64_t) * 2)));
+ sli_cmd_fill_hdr(&post->hdr, SLI4_OPC_POST_SGL_PAGES, SLI4_SUBSYSTEM_FC,
+ CMD_V0, req_len);
+ post->xri_start = cpu_to_le16(xri);
+ post->xri_count = cpu_to_le16(xri_count);
+
+ for (i = 0; i < xri_count; i++) {
+ post->page_set[i].page0_low =
+ cpu_to_le32(lower_32_bits(page0[i]->phys));
+ post->page_set[i].page0_high =
+ cpu_to_le32(upper_32_bits(page0[i]->phys));
+ }
+
+ if (page1) {
+ for (i = 0; i < xri_count; i++) {
+ post->page_set[i].page1_low =
+ cpu_to_le32(lower_32_bits(page1[i]->phys));
+ post->page_set[i].page1_high =
+ cpu_to_le32(upper_32_bits(page1[i]->phys));
+ }
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma,
+ u16 rpi, struct efc_dma *payload_dma)
+{
+ struct sli4_rqst_post_hdr_templates *req = NULL;
+ uintptr_t phys = 0;
+ u32 i = 0;
+ u32 page_count, payload_size;
+
+ page_count = sli_page_count(dma->size, SLI_PAGE_SIZE);
+
+ payload_size = ((sizeof(struct sli4_rqst_post_hdr_templates) +
+ (page_count * SZ_DMAADDR)) - sizeof(struct sli4_rqst_hdr));
+
+ if (page_count > 16) {
+ /*
+ * We can't fit more than 16 descriptors into an embedded mbox
+ * command, it has to be non-embedded
+ */
+ payload_dma->size = payload_size;
+ payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev,
+ payload_dma->size,
+ &payload_dma->phys, GFP_DMA);
+ if (!payload_dma->virt) {
+ memset(payload_dma, 0, sizeof(struct efc_dma));
+ efc_log_err(sli4, "mbox payload memory allocation fail\n");
+ return -EIO;
+ }
+ req = sli_config_cmd_init(sli4, buf, payload_size, payload_dma);
+ } else {
+ req = sli_config_cmd_init(sli4, buf, payload_size, NULL);
+ }
+
+ if (!req)
+ return -EIO;
+
+ if (rpi == U16_MAX)
+ rpi = sli4->ext[SLI4_RSRC_RPI].base[0];
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_POST_HDR_TEMPLATES,
+ SLI4_SUBSYSTEM_FC, CMD_V0,
+ SLI4_RQST_PYLD_LEN(post_hdr_templates));
+
+ req->rpi_offset = cpu_to_le16(rpi);
+ req->page_count = cpu_to_le16(page_count);
+ phys = dma->phys;
+ for (i = 0; i < page_count; i++) {
+ req->page_descriptor[i].low = cpu_to_le32(lower_32_bits(phys));
+ req->page_descriptor[i].high = cpu_to_le32(upper_32_bits(phys));
+
+ phys += SLI_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+u32
+sli_fc_get_rpi_requirements(struct sli4 *sli4, u32 n_rpi)
+{
+ u32 bytes = 0;
+
+ /* Check if header templates needed */
+ if (sli4->params.hdr_template_req)
+ /* round up to a page */
+ bytes = round_up(n_rpi * SLI4_HDR_TEMPLATE_SIZE, SLI_PAGE_SIZE);
+
+ return bytes;
+}
+
+const char *
+sli_fc_get_status_string(u32 status)
+{
+ static struct {
+ u32 code;
+ const char *label;
+ } lookup[] = {
+ {SLI4_FC_WCQE_STATUS_SUCCESS, "SUCCESS"},
+ {SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE, "FCP_RSP_FAILURE"},
+ {SLI4_FC_WCQE_STATUS_REMOTE_STOP, "REMOTE_STOP"},
+ {SLI4_FC_WCQE_STATUS_LOCAL_REJECT, "LOCAL_REJECT"},
+ {SLI4_FC_WCQE_STATUS_NPORT_RJT, "NPORT_RJT"},
+ {SLI4_FC_WCQE_STATUS_FABRIC_RJT, "FABRIC_RJT"},
+ {SLI4_FC_WCQE_STATUS_NPORT_BSY, "NPORT_BSY"},
+ {SLI4_FC_WCQE_STATUS_FABRIC_BSY, "FABRIC_BSY"},
+ {SLI4_FC_WCQE_STATUS_LS_RJT, "LS_RJT"},
+ {SLI4_FC_WCQE_STATUS_CMD_REJECT, "CMD_REJECT"},
+ {SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK, "FCP_TGT_LENCHECK"},
+ {SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED,
+ "RQ_INSUFF_BUF_NEEDED"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"},
+ {SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE, "RQ_DMA_FAILURE"},
+ {SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE, "FCP_RSP_TRUNCATE"},
+ {SLI4_FC_WCQE_STATUS_DI_ERROR, "DI_ERROR"},
+ {SLI4_FC_WCQE_STATUS_BA_RJT, "BA_RJT"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED,
+ "RQ_INSUFF_XRI_NEEDED"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"},
+ {SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT, "RX_ERROR_DETECT"},
+ {SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST, "RX_ABORT_REQUEST"},
+ };
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(lookup); i++) {
+ if (status == lookup[i].code)
+ return lookup[i].label;
+ }
+ return "unknown";
+}
diff --git a/drivers/scsi/elx/libefc_sli/sli4.h b/drivers/scsi/elx/libefc_sli/sli4.h
new file mode 100644
index 000000000000..ee2a9e65a88d
--- /dev/null
+++ b/drivers/scsi/elx/libefc_sli/sli4.h
@@ -0,0 +1,4132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ */
+
+/*
+ * All common SLI-4 structures and function prototypes.
+ */
+
+#ifndef _SLI4_H
+#define _SLI4_H
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "scsi/fc/fc_els.h"
+#include "scsi/fc/fc_fs.h"
+#include "../include/efc_common.h"
+
+/*************************************************************************
+ * Common SLI-4 register offsets and field definitions
+ */
+
+/* SLI_INTF - SLI Interface Definition Register */
+#define SLI4_INTF_REG 0x0058
+enum sli4_intf {
+ SLI4_INTF_REV_SHIFT = 4,
+ SLI4_INTF_REV_MASK = 0xf0,
+
+ SLI4_INTF_REV_S3 = 0x30,
+ SLI4_INTF_REV_S4 = 0x40,
+
+ SLI4_INTF_FAMILY_SHIFT = 8,
+ SLI4_INTF_FAMILY_MASK = 0x0f00,
+
+ SLI4_FAMILY_CHECK_ASIC_TYPE = 0x0f00,
+
+ SLI4_INTF_IF_TYPE_SHIFT = 12,
+ SLI4_INTF_IF_TYPE_MASK = 0xf000,
+
+ SLI4_INTF_IF_TYPE_2 = 0x2000,
+ SLI4_INTF_IF_TYPE_6 = 0x6000,
+
+ SLI4_INTF_VALID_SHIFT = 29,
+ SLI4_INTF_VALID_MASK = 0xe0000000,
+
+ SLI4_INTF_VALID_VALUE = 0xc0000000,
+};
+
+/* ASIC_ID - SLI ASIC Type and Revision Register */
+#define SLI4_ASIC_ID_REG 0x009c
+enum sli4_asic {
+ SLI4_ASIC_GEN_SHIFT = 8,
+ SLI4_ASIC_GEN_MASK = 0xff00,
+ SLI4_ASIC_GEN_5 = 0x0b00,
+ SLI4_ASIC_GEN_6 = 0x0c00,
+ SLI4_ASIC_GEN_7 = 0x0d00,
+};
+
+enum sli4_acic_revisions {
+ SLI4_ASIC_REV_A0 = 0x00,
+ SLI4_ASIC_REV_A1 = 0x01,
+ SLI4_ASIC_REV_A2 = 0x02,
+ SLI4_ASIC_REV_A3 = 0x03,
+ SLI4_ASIC_REV_B0 = 0x10,
+ SLI4_ASIC_REV_B1 = 0x11,
+ SLI4_ASIC_REV_B2 = 0x12,
+ SLI4_ASIC_REV_C0 = 0x20,
+ SLI4_ASIC_REV_C1 = 0x21,
+ SLI4_ASIC_REV_C2 = 0x22,
+ SLI4_ASIC_REV_D0 = 0x30,
+};
+
+struct sli4_asic_entry_t {
+ u32 rev_id;
+ u32 family;
+};
+
+/* BMBX - Bootstrap Mailbox Register */
+#define SLI4_BMBX_REG 0x0160
+enum sli4_bmbx {
+ SLI4_BMBX_MASK_HI = 0x3,
+ SLI4_BMBX_MASK_LO = 0xf,
+ SLI4_BMBX_RDY = 1 << 0,
+ SLI4_BMBX_HI = 1 << 1,
+ SLI4_BMBX_SIZE = 256,
+};
+
+static inline u32
+sli_bmbx_write_hi(u64 addr) {
+ u32 val;
+
+ val = upper_32_bits(addr) & ~SLI4_BMBX_MASK_HI;
+ val |= SLI4_BMBX_HI;
+
+ return val;
+}
+
+static inline u32
+sli_bmbx_write_lo(u64 addr) {
+ u32 val;
+
+ val = (upper_32_bits(addr) & SLI4_BMBX_MASK_HI) << 30;
+ val |= ((addr) & ~SLI4_BMBX_MASK_LO) >> 2;
+
+ return val;
+}
+
+/* SLIPORT_CONTROL - SLI Port Control Register */
+#define SLI4_PORT_CTRL_REG 0x0408
+enum sli4_port_ctrl {
+ SLI4_PORT_CTRL_IP = 1u << 27,
+ SLI4_PORT_CTRL_IDIS = 1u << 22,
+ SLI4_PORT_CTRL_FDD = 1u << 31,
+};
+
+/* SLI4_SLIPORT_ERROR - SLI Port Error Register */
+#define SLI4_PORT_ERROR1 0x040c
+#define SLI4_PORT_ERROR2 0x0410
+
+/* EQCQ_DOORBELL - EQ and CQ Doorbell Register */
+#define SLI4_EQCQ_DB_REG 0x120
+enum sli4_eqcq_e {
+ SLI4_EQ_ID_LO_MASK = 0x01ff,
+
+ SLI4_CQ_ID_LO_MASK = 0x03ff,
+
+ SLI4_EQCQ_CI_EQ = 0x0200,
+
+ SLI4_EQCQ_QT_EQ = 0x00000400,
+ SLI4_EQCQ_QT_CQ = 0x00000000,
+
+ SLI4_EQCQ_ID_HI_SHIFT = 11,
+ SLI4_EQCQ_ID_HI_MASK = 0xf800,
+
+ SLI4_EQCQ_NUM_SHIFT = 16,
+ SLI4_EQCQ_NUM_MASK = 0x1fff0000,
+
+ SLI4_EQCQ_ARM = 0x20000000,
+ SLI4_EQCQ_UNARM = 0x00000000,
+};
+
+static inline u32
+sli_format_eq_db_data(u16 num_popped, u16 id, u32 arm) {
+ u32 reg;
+
+ reg = (id & SLI4_EQ_ID_LO_MASK) | SLI4_EQCQ_QT_EQ;
+ reg |= (((id) >> 9) << SLI4_EQCQ_ID_HI_SHIFT) & SLI4_EQCQ_ID_HI_MASK;
+ reg |= ((num_popped) << SLI4_EQCQ_NUM_SHIFT) & SLI4_EQCQ_NUM_MASK;
+ reg |= arm | SLI4_EQCQ_CI_EQ;
+
+ return reg;
+}
+
+static inline u32
+sli_format_cq_db_data(u16 num_popped, u16 id, u32 arm) {
+ u32 reg;
+
+ reg = ((id) & SLI4_CQ_ID_LO_MASK) | SLI4_EQCQ_QT_CQ;
+ reg |= (((id) >> 10) << SLI4_EQCQ_ID_HI_SHIFT) & SLI4_EQCQ_ID_HI_MASK;
+ reg |= ((num_popped) << SLI4_EQCQ_NUM_SHIFT) & SLI4_EQCQ_NUM_MASK;
+ reg |= arm;
+
+ return reg;
+}
+
+/* EQ_DOORBELL - EQ Doorbell Register for IF_TYPE = 6*/
+#define SLI4_IF6_EQ_DB_REG 0x120
+enum sli4_eq_e {
+ SLI4_IF6_EQ_ID_MASK = 0x0fff,
+
+ SLI4_IF6_EQ_NUM_SHIFT = 16,
+ SLI4_IF6_EQ_NUM_MASK = 0x1fff0000,
+};
+
+static inline u32
+sli_format_if6_eq_db_data(u16 num_popped, u16 id, u32 arm) {
+ u32 reg;
+
+ reg = id & SLI4_IF6_EQ_ID_MASK;
+ reg |= (num_popped << SLI4_IF6_EQ_NUM_SHIFT) & SLI4_IF6_EQ_NUM_MASK;
+ reg |= arm;
+
+ return reg;
+}
+
+/* CQ_DOORBELL - CQ Doorbell Register for IF_TYPE = 6 */
+#define SLI4_IF6_CQ_DB_REG 0xc0
+enum sli4_cq_e {
+ SLI4_IF6_CQ_ID_MASK = 0xffff,
+
+ SLI4_IF6_CQ_NUM_SHIFT = 16,
+ SLI4_IF6_CQ_NUM_MASK = 0x1fff0000,
+};
+
+static inline u32
+sli_format_if6_cq_db_data(u16 num_popped, u16 id, u32 arm) {
+ u32 reg;
+
+ reg = id & SLI4_IF6_CQ_ID_MASK;
+ reg |= ((num_popped) << SLI4_IF6_CQ_NUM_SHIFT) & SLI4_IF6_CQ_NUM_MASK;
+ reg |= arm;
+
+ return reg;
+}
+
+/* MQ_DOORBELL - MQ Doorbell Register */
+#define SLI4_MQ_DB_REG 0x0140
+#define SLI4_IF6_MQ_DB_REG 0x0160
+enum sli4_mq_e {
+ SLI4_MQ_ID_MASK = 0xffff,
+
+ SLI4_MQ_NUM_SHIFT = 16,
+ SLI4_MQ_NUM_MASK = 0x3fff0000,
+};
+
+static inline u32
+sli_format_mq_db_data(u16 id) {
+ u32 reg;
+
+ reg = id & SLI4_MQ_ID_MASK;
+ reg |= (1 << SLI4_MQ_NUM_SHIFT) & SLI4_MQ_NUM_MASK;
+
+ return reg;
+}
+
+/* RQ_DOORBELL - RQ Doorbell Register */
+#define SLI4_RQ_DB_REG 0x0a0
+#define SLI4_IF6_RQ_DB_REG 0x0080
+enum sli4_rq_e {
+ SLI4_RQ_DB_ID_MASK = 0xffff,
+
+ SLI4_RQ_DB_NUM_SHIFT = 16,
+ SLI4_RQ_DB_NUM_MASK = 0x3fff0000,
+};
+
+static inline u32
+sli_format_rq_db_data(u16 id) {
+ u32 reg;
+
+ reg = id & SLI4_RQ_DB_ID_MASK;
+ reg |= (1 << SLI4_RQ_DB_NUM_SHIFT) & SLI4_RQ_DB_NUM_MASK;
+
+ return reg;
+}
+
+/* WQ_DOORBELL - WQ Doorbell Register */
+#define SLI4_IO_WQ_DB_REG 0x040
+#define SLI4_IF6_WQ_DB_REG 0x040
+enum sli4_wq_e {
+ SLI4_WQ_ID_MASK = 0xffff,
+
+ SLI4_WQ_IDX_SHIFT = 16,
+ SLI4_WQ_IDX_MASK = 0xff0000,
+
+ SLI4_WQ_NUM_SHIFT = 24,
+ SLI4_WQ_NUM_MASK = 0x0ff00000,
+};
+
+static inline u32
+sli_format_wq_db_data(u16 id) {
+ u32 reg;
+
+ reg = id & SLI4_WQ_ID_MASK;
+ reg |= (1 << SLI4_WQ_NUM_SHIFT) & SLI4_WQ_NUM_MASK;
+
+ return reg;
+}
+
+/* SLIPORT_STATUS - SLI Port Status Register */
+#define SLI4_PORT_STATUS_REGOFF 0x0404
+enum sli4_port_status {
+ SLI4_PORT_STATUS_FDP = 1u << 21,
+ SLI4_PORT_STATUS_RDY = 1u << 23,
+ SLI4_PORT_STATUS_RN = 1u << 24,
+ SLI4_PORT_STATUS_DIP = 1u << 25,
+ SLI4_PORT_STATUS_OTI = 1u << 29,
+ SLI4_PORT_STATUS_ERR = 1u << 31,
+};
+
+#define SLI4_PHYDEV_CTRL_REG 0x0414
+#define SLI4_PHYDEV_CTRL_FRST (1 << 1)
+#define SLI4_PHYDEV_CTRL_DD (1 << 2)
+
+/* Register name enums */
+enum sli4_regname_en {
+ SLI4_REG_BMBX,
+ SLI4_REG_EQ_DOORBELL,
+ SLI4_REG_CQ_DOORBELL,
+ SLI4_REG_RQ_DOORBELL,
+ SLI4_REG_IO_WQ_DOORBELL,
+ SLI4_REG_MQ_DOORBELL,
+ SLI4_REG_PHYSDEV_CONTROL,
+ SLI4_REG_PORT_CONTROL,
+ SLI4_REG_PORT_ERROR1,
+ SLI4_REG_PORT_ERROR2,
+ SLI4_REG_PORT_SEMAPHORE,
+ SLI4_REG_PORT_STATUS,
+ SLI4_REG_UNKWOWN /* must be last */
+};
+
+struct sli4_reg {
+ u32 rset;
+ u32 off;
+};
+
+struct sli4_dmaaddr {
+ __le32 low;
+ __le32 high;
+};
+
+/*
+ * a 3-word Buffer Descriptor Entry with
+ * address 1st 2 words, length last word
+ */
+struct sli4_bufptr {
+ struct sli4_dmaaddr addr;
+ __le32 length;
+};
+
+/* Buffer Descriptor Entry (BDE) */
+enum sli4_bde_e {
+ SLI4_BDE_LEN_MASK = 0x00ffffff,
+ SLI4_BDE_TYPE_MASK = 0xff000000,
+};
+
+struct sli4_bde {
+ __le32 bde_type_buflen;
+ union {
+ struct sli4_dmaaddr data;
+ struct {
+ __le32 offset;
+ __le32 rsvd2;
+ } imm;
+ struct sli4_dmaaddr blp;
+ } u;
+};
+
+/* Buffer Descriptors */
+enum sli4_bde_type {
+ SLI4_BDE_TYPE_SHIFT = 24,
+ SLI4_BDE_TYPE_64 = 0x00, /* Generic 64-bit data */
+ SLI4_BDE_TYPE_IMM = 0x01, /* Immediate data */
+ SLI4_BDE_TYPE_BLP = 0x40, /* Buffer List Pointer */
+};
+
+#define SLI4_BDE_TYPE_VAL(type) \
+ (SLI4_BDE_TYPE_##type << SLI4_BDE_TYPE_SHIFT)
+
+/* Scatter-Gather Entry (SGE) */
+#define SLI4_SGE_MAX_RESERVED 3
+
+enum sli4_sge_type {
+ /* DW2 */
+ SLI4_SGE_DATA_OFFSET_MASK = 0x07ffffff,
+ /*DW2W1*/
+ SLI4_SGE_TYPE_SHIFT = 27,
+ SLI4_SGE_TYPE_MASK = 0x78000000,
+ /*SGE Types*/
+ SLI4_SGE_TYPE_DATA = 0x00,
+ SLI4_SGE_TYPE_DIF = 0x04, /* Data Integrity Field */
+ SLI4_SGE_TYPE_LSP = 0x05, /* List Segment Pointer */
+ SLI4_SGE_TYPE_PEDIF = 0x06, /* Post Encryption Engine DIF */
+ SLI4_SGE_TYPE_PESEED = 0x07, /* Post Encryption DIF Seed */
+ SLI4_SGE_TYPE_DISEED = 0x08, /* DIF Seed */
+ SLI4_SGE_TYPE_ENC = 0x09, /* Encryption */
+ SLI4_SGE_TYPE_ATM = 0x0a, /* DIF Application Tag Mask */
+ SLI4_SGE_TYPE_SKIP = 0x0c, /* SKIP */
+
+ SLI4_SGE_LAST = 1u << 31,
+};
+
+struct sli4_sge {
+ __le32 buffer_address_high;
+ __le32 buffer_address_low;
+ __le32 dw2_flags;
+ __le32 buffer_length;
+};
+
+/* T10 DIF Scatter-Gather Entry (SGE) */
+struct sli4_dif_sge {
+ __le32 buffer_address_high;
+ __le32 buffer_address_low;
+ __le32 dw2_flags;
+ __le32 rsvd12;
+};
+
+/* Data Integrity Seed (DISEED) SGE */
+enum sli4_diseed_sge_flags {
+ /* DW2W1 */
+ SLI4_DISEED_SGE_HS = 1 << 2,
+ SLI4_DISEED_SGE_WS = 1 << 3,
+ SLI4_DISEED_SGE_IC = 1 << 4,
+ SLI4_DISEED_SGE_ICS = 1 << 5,
+ SLI4_DISEED_SGE_ATRT = 1 << 6,
+ SLI4_DISEED_SGE_AT = 1 << 7,
+ SLI4_DISEED_SGE_FAT = 1 << 8,
+ SLI4_DISEED_SGE_NA = 1 << 9,
+ SLI4_DISEED_SGE_HI = 1 << 10,
+
+ /* DW3W1 */
+ SLI4_DISEED_SGE_BS_MASK = 0x0007,
+ SLI4_DISEED_SGE_AI = 1 << 3,
+ SLI4_DISEED_SGE_ME = 1 << 4,
+ SLI4_DISEED_SGE_RE = 1 << 5,
+ SLI4_DISEED_SGE_CE = 1 << 6,
+ SLI4_DISEED_SGE_NR = 1 << 7,
+
+ SLI4_DISEED_SGE_OP_RX_SHIFT = 8,
+ SLI4_DISEED_SGE_OP_RX_MASK = 0x0f00,
+ SLI4_DISEED_SGE_OP_TX_SHIFT = 12,
+ SLI4_DISEED_SGE_OP_TX_MASK = 0xf000,
+};
+
+/* Opcode values */
+enum sli4_diseed_sge_opcodes {
+ SLI4_DISEED_SGE_OP_IN_NODIF_OUT_CRC,
+ SLI4_DISEED_SGE_OP_IN_CRC_OUT_NODIF,
+ SLI4_DISEED_SGE_OP_IN_NODIF_OUT_CSUM,
+ SLI4_DISEED_SGE_OP_IN_CSUM_OUT_NODIF,
+ SLI4_DISEED_SGE_OP_IN_CRC_OUT_CRC,
+ SLI4_DISEED_SGE_OP_IN_CSUM_OUT_CSUM,
+ SLI4_DISEED_SGE_OP_IN_CRC_OUT_CSUM,
+ SLI4_DISEED_SGE_OP_IN_CSUM_OUT_CRC,
+ SLI4_DISEED_SGE_OP_IN_RAW_OUT_RAW,
+};
+
+#define SLI4_DISEED_SGE_OP_RX_VALUE(stype) \
+ (SLI4_DISEED_SGE_OP_##stype << SLI4_DISEED_SGE_OP_RX_SHIFT)
+#define SLI4_DISEED_SGE_OP_TX_VALUE(stype) \
+ (SLI4_DISEED_SGE_OP_##stype << SLI4_DISEED_SGE_OP_TX_SHIFT)
+
+struct sli4_diseed_sge {
+ __le32 ref_tag_cmp;
+ __le32 ref_tag_repl;
+ __le16 app_tag_repl;
+ __le16 dw2w1_flags;
+ __le16 app_tag_cmp;
+ __le16 dw3w1_flags;
+};
+
+/* List Segment Pointer Scatter-Gather Entry (SGE) */
+#define SLI4_LSP_SGE_SEGLEN 0x00ffffff
+
+struct sli4_lsp_sge {
+ __le32 buffer_address_high;
+ __le32 buffer_address_low;
+ __le32 dw2_flags;
+ __le32 dw3_seglen;
+};
+
+enum sli4_eqe_e {
+ SLI4_EQE_VALID = 1,
+ SLI4_EQE_MJCODE = 0xe,
+ SLI4_EQE_MNCODE = 0xfff0,
+};
+
+struct sli4_eqe {
+ __le16 dw0w0_flags;
+ __le16 resource_id;
+};
+
+#define SLI4_MAJOR_CODE_STANDARD 0
+#define SLI4_MAJOR_CODE_SENTINEL 1
+
+/* Sentinel EQE indicating the EQ is full */
+#define SLI4_EQE_STATUS_EQ_FULL 2
+
+enum sli4_mcqe_e {
+ SLI4_MCQE_CONSUMED = 1u << 27,
+ SLI4_MCQE_COMPLETED = 1u << 28,
+ SLI4_MCQE_AE = 1u << 30,
+ SLI4_MCQE_VALID = 1u << 31,
+};
+
+/* Entry was consumed but not completed */
+#define SLI4_MCQE_STATUS_NOT_COMPLETED -2
+
+struct sli4_mcqe {
+ __le16 completion_status;
+ __le16 extended_status;
+ __le32 mqe_tag_low;
+ __le32 mqe_tag_high;
+ __le32 dw3_flags;
+};
+
+enum sli4_acqe_e {
+ SLI4_ACQE_AE = 1 << 6, /* async event - this is an ACQE */
+ SLI4_ACQE_VAL = 1 << 7, /* valid - contents of CQE are valid */
+};
+
+struct sli4_acqe {
+ __le32 event_data[3];
+ u8 rsvd12;
+ u8 event_code;
+ u8 event_type;
+ u8 ae_val;
+};
+
+enum sli4_acqe_event_code {
+ SLI4_ACQE_EVENT_CODE_LINK_STATE = 0x01,
+ SLI4_ACQE_EVENT_CODE_FIP = 0x02,
+ SLI4_ACQE_EVENT_CODE_DCBX = 0x03,
+ SLI4_ACQE_EVENT_CODE_ISCSI = 0x04,
+ SLI4_ACQE_EVENT_CODE_GRP_5 = 0x05,
+ SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT = 0x10,
+ SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT = 0x11,
+ SLI4_ACQE_EVENT_CODE_VF_EVENT = 0x12,
+ SLI4_ACQE_EVENT_CODE_MR_EVENT = 0x13,
+};
+
+enum sli4_qtype {
+ SLI4_QTYPE_EQ,
+ SLI4_QTYPE_CQ,
+ SLI4_QTYPE_MQ,
+ SLI4_QTYPE_WQ,
+ SLI4_QTYPE_RQ,
+ SLI4_QTYPE_MAX, /* must be last */
+};
+
+#define SLI4_USER_MQ_COUNT 1
+#define SLI4_MAX_CQ_SET_COUNT 16
+#define SLI4_MAX_RQ_SET_COUNT 16
+
+enum sli4_qentry {
+ SLI4_QENTRY_ASYNC,
+ SLI4_QENTRY_MQ,
+ SLI4_QENTRY_RQ,
+ SLI4_QENTRY_WQ,
+ SLI4_QENTRY_WQ_RELEASE,
+ SLI4_QENTRY_OPT_WRITE_CMD,
+ SLI4_QENTRY_OPT_WRITE_DATA,
+ SLI4_QENTRY_XABT,
+ SLI4_QENTRY_MAX /* must be last */
+};
+
+enum sli4_queue_flags {
+ SLI4_QUEUE_FLAG_MQ = 1 << 0, /* CQ has MQ/Async completion */
+ SLI4_QUEUE_FLAG_HDR = 1 << 1, /* RQ for packet headers */
+ SLI4_QUEUE_FLAG_RQBATCH = 1 << 2, /* RQ index increment by 8 */
+};
+
+/* Generic Command Request header */
+enum sli4_cmd_version {
+ CMD_V0,
+ CMD_V1,
+ CMD_V2,
+};
+
+struct sli4_rqst_hdr {
+ u8 opcode;
+ u8 subsystem;
+ __le16 rsvd2;
+ __le32 timeout;
+ __le32 request_length;
+ __le32 dw3_version;
+};
+
+/* Generic Command Response header */
+struct sli4_rsp_hdr {
+ u8 opcode;
+ u8 subsystem;
+ __le16 rsvd2;
+ u8 status;
+ u8 additional_status;
+ __le16 rsvd6;
+ __le32 response_length;
+ __le32 actual_response_length;
+};
+
+#define SLI4_QUEUE_RQ_BATCH 8
+
+#define SZ_DMAADDR sizeof(struct sli4_dmaaddr)
+#define SLI4_RQST_CMDSZ(stype) sizeof(struct sli4_rqst_##stype)
+
+#define SLI4_RQST_PYLD_LEN(stype) \
+ cpu_to_le32(sizeof(struct sli4_rqst_##stype) - \
+ sizeof(struct sli4_rqst_hdr))
+
+#define SLI4_RQST_PYLD_LEN_VAR(stype, varpyld) \
+ cpu_to_le32((sizeof(struct sli4_rqst_##stype) + \
+ varpyld) - sizeof(struct sli4_rqst_hdr))
+
+#define SLI4_CFG_PYLD_LENGTH(stype) \
+ max(sizeof(struct sli4_rqst_##stype), \
+ sizeof(struct sli4_rsp_##stype))
+
+enum sli4_create_cqv2_e {
+ /* DW5_flags values*/
+ SLI4_CREATE_CQV2_CLSWM_MASK = 0x00003000,
+ SLI4_CREATE_CQV2_NODELAY = 0x00004000,
+ SLI4_CREATE_CQV2_AUTOVALID = 0x00008000,
+ SLI4_CREATE_CQV2_CQECNT_MASK = 0x18000000,
+ SLI4_CREATE_CQV2_VALID = 0x20000000,
+ SLI4_CREATE_CQV2_EVT = 0x80000000,
+ /* DW6W1_flags values*/
+ SLI4_CREATE_CQV2_ARM = 0x8000,
+};
+
+struct sli4_rqst_cmn_create_cq_v2 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 page_size;
+ u8 rsvd19;
+ __le32 dw5_flags;
+ __le16 eq_id;
+ __le16 dw6w1_arm;
+ __le16 cqe_count;
+ __le16 rsvd30;
+ __le32 rsvd32;
+ struct sli4_dmaaddr page_phys_addr[0];
+};
+
+enum sli4_create_cqset_e {
+ /* DW5_flags values*/
+ SLI4_CREATE_CQSETV0_CLSWM_MASK = 0x00003000,
+ SLI4_CREATE_CQSETV0_NODELAY = 0x00004000,
+ SLI4_CREATE_CQSETV0_AUTOVALID = 0x00008000,
+ SLI4_CREATE_CQSETV0_CQECNT_MASK = 0x18000000,
+ SLI4_CREATE_CQSETV0_VALID = 0x20000000,
+ SLI4_CREATE_CQSETV0_EVT = 0x80000000,
+ /* DW5W1_flags values */
+ SLI4_CREATE_CQSETV0_CQE_COUNT = 0x7fff,
+ SLI4_CREATE_CQSETV0_ARM = 0x8000,
+};
+
+struct sli4_rqst_cmn_create_cq_set_v0 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 page_size;
+ u8 rsvd19;
+ __le32 dw5_flags;
+ __le16 num_cq_req;
+ __le16 dw6w1_flags;
+ __le16 eq_id[16];
+ struct sli4_dmaaddr page_phys_addr[0];
+};
+
+/* CQE count */
+enum sli4_cq_cnt {
+ SLI4_CQ_CNT_256,
+ SLI4_CQ_CNT_512,
+ SLI4_CQ_CNT_1024,
+ SLI4_CQ_CNT_LARGE,
+};
+
+#define SLI4_CQ_CNT_SHIFT 27
+#define SLI4_CQ_CNT_VAL(type) (SLI4_CQ_CNT_##type << SLI4_CQ_CNT_SHIFT)
+
+#define SLI4_CQE_BYTES (4 * sizeof(u32))
+
+#define SLI4_CREATE_CQV2_MAX_PAGES 8
+
+/* Generic Common Create EQ/CQ/MQ/WQ/RQ Queue completion */
+struct sli4_rsp_cmn_create_queue {
+ struct sli4_rsp_hdr hdr;
+ __le16 q_id;
+ u8 rsvd18;
+ u8 ulp;
+ __le32 db_offset;
+ __le16 db_rs;
+ __le16 db_fmt;
+};
+
+struct sli4_rsp_cmn_create_queue_set {
+ struct sli4_rsp_hdr hdr;
+ __le16 q_id;
+ __le16 num_q_allocated;
+};
+
+/* Common Destroy Queue */
+struct sli4_rqst_cmn_destroy_q {
+ struct sli4_rqst_hdr hdr;
+ __le16 q_id;
+ __le16 rsvd;
+};
+
+struct sli4_rsp_cmn_destroy_q {
+ struct sli4_rsp_hdr hdr;
+};
+
+/* Modify the delay multiplier for EQs */
+struct sli4_eqdelay_rec {
+ __le32 eq_id;
+ __le32 phase;
+ __le32 delay_multiplier;
+};
+
+struct sli4_rqst_cmn_modify_eq_delay {
+ struct sli4_rqst_hdr hdr;
+ __le32 num_eq;
+ struct sli4_eqdelay_rec eq_delay_record[8];
+};
+
+struct sli4_rsp_cmn_modify_eq_delay {
+ struct sli4_rsp_hdr hdr;
+};
+
+enum sli4_create_cq_e {
+ /* DW5 */
+ SLI4_CREATE_EQ_AUTOVALID = 1u << 28,
+ SLI4_CREATE_EQ_VALID = 1u << 29,
+ SLI4_CREATE_EQ_EQESZ = 1u << 31,
+ /* DW6 */
+ SLI4_CREATE_EQ_COUNT = 7 << 26,
+ SLI4_CREATE_EQ_ARM = 1u << 31,
+ /* DW7 */
+ SLI4_CREATE_EQ_DELAYMULTI_SHIFT = 13,
+ SLI4_CREATE_EQ_DELAYMULTI_MASK = 0x007fe000,
+ SLI4_CREATE_EQ_DELAYMULTI = 0x00040000,
+};
+
+struct sli4_rqst_cmn_create_eq {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ __le16 rsvd18;
+ __le32 dw5_flags;
+ __le32 dw6_flags;
+ __le32 dw7_delaymulti;
+ __le32 rsvd32;
+ struct sli4_dmaaddr page_address[8];
+};
+
+struct sli4_rsp_cmn_create_eq {
+ struct sli4_rsp_cmn_create_queue q_rsp;
+};
+
+/* EQ count */
+enum sli4_eq_cnt {
+ SLI4_EQ_CNT_256,
+ SLI4_EQ_CNT_512,
+ SLI4_EQ_CNT_1024,
+ SLI4_EQ_CNT_2048,
+ SLI4_EQ_CNT_4096 = 3,
+};
+
+#define SLI4_EQ_CNT_SHIFT 26
+#define SLI4_EQ_CNT_VAL(type) (SLI4_EQ_CNT_##type << SLI4_EQ_CNT_SHIFT)
+
+#define SLI4_EQE_SIZE_4 0
+#define SLI4_EQE_SIZE_16 1
+
+/* Create a Mailbox Queue; accommodate v0 and v1 forms. */
+enum sli4_create_mq_flags {
+ /* DW6W1 */
+ SLI4_CREATE_MQEXT_RINGSIZE = 0xf,
+ SLI4_CREATE_MQEXT_CQID_SHIFT = 6,
+ SLI4_CREATE_MQEXT_CQIDV0_MASK = 0xffc0,
+ /* DW7 */
+ SLI4_CREATE_MQEXT_VAL = 1u << 31,
+ /* DW8 */
+ SLI4_CREATE_MQEXT_ACQV = 1u << 0,
+ SLI4_CREATE_MQEXT_ASYNC_CQIDV0 = 0x7fe,
+};
+
+struct sli4_rqst_cmn_create_mq_ext {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ __le16 cq_id_v1;
+ __le32 async_event_bitmap;
+ __le16 async_cq_id_v1;
+ __le16 dw6w1_flags;
+ __le32 dw7_val;
+ __le32 dw8_flags;
+ __le32 rsvd36;
+ struct sli4_dmaaddr page_phys_addr[0];
+};
+
+struct sli4_rsp_cmn_create_mq_ext {
+ struct sli4_rsp_cmn_create_queue q_rsp;
+};
+
+enum sli4_mqe_size {
+ SLI4_MQE_SIZE_16 = 0x05,
+ SLI4_MQE_SIZE_32,
+ SLI4_MQE_SIZE_64,
+ SLI4_MQE_SIZE_128,
+};
+
+enum sli4_async_evt {
+ SLI4_ASYNC_EVT_LINK_STATE = 1 << 1,
+ SLI4_ASYNC_EVT_FIP = 1 << 2,
+ SLI4_ASYNC_EVT_GRP5 = 1 << 5,
+ SLI4_ASYNC_EVT_FC = 1 << 16,
+ SLI4_ASYNC_EVT_SLI_PORT = 1 << 17,
+};
+
+#define SLI4_ASYNC_EVT_FC_ALL \
+ (SLI4_ASYNC_EVT_LINK_STATE | \
+ SLI4_ASYNC_EVT_FIP | \
+ SLI4_ASYNC_EVT_GRP5 | \
+ SLI4_ASYNC_EVT_FC | \
+ SLI4_ASYNC_EVT_SLI_PORT)
+
+/* Create a Completion Queue. */
+struct sli4_rqst_cmn_create_cq_v0 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ __le16 rsvd18;
+ __le32 dw5_flags;
+ __le32 dw6_flags;
+ __le32 rsvd28;
+ __le32 rsvd32;
+ struct sli4_dmaaddr page_phys_addr[0];
+};
+
+enum sli4_create_rq_e {
+ SLI4_RQ_CREATE_DUA = 0x1,
+ SLI4_RQ_CREATE_BQU = 0x2,
+
+ SLI4_RQE_SIZE = 8,
+ SLI4_RQE_SIZE_8 = 0x2,
+ SLI4_RQE_SIZE_16 = 0x3,
+ SLI4_RQE_SIZE_32 = 0x4,
+ SLI4_RQE_SIZE_64 = 0x5,
+ SLI4_RQE_SIZE_128 = 0x6,
+
+ SLI4_RQ_PAGE_SIZE_4096 = 0x1,
+ SLI4_RQ_PAGE_SIZE_8192 = 0x2,
+ SLI4_RQ_PAGE_SIZE_16384 = 0x4,
+ SLI4_RQ_PAGE_SIZE_32768 = 0x8,
+ SLI4_RQ_PAGE_SIZE_64536 = 0x10,
+
+ SLI4_RQ_CREATE_V0_MAX_PAGES = 8,
+ SLI4_RQ_CREATE_V0_MIN_BUF_SIZE = 128,
+ SLI4_RQ_CREATE_V0_MAX_BUF_SIZE = 2048,
+};
+
+struct sli4_rqst_rq_create {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 dua_bqu_byte;
+ u8 ulp;
+ __le16 rsvd16;
+ u8 rqe_count_byte;
+ u8 rsvd19;
+ __le32 rsvd20;
+ __le16 buffer_size;
+ __le16 cq_id;
+ __le32 rsvd28;
+ struct sli4_dmaaddr page_phys_addr[SLI4_RQ_CREATE_V0_MAX_PAGES];
+};
+
+struct sli4_rsp_rq_create {
+ struct sli4_rsp_cmn_create_queue rsp;
+};
+
+enum sli4_create_rqv1_e {
+ SLI4_RQ_CREATE_V1_DNB = 0x80,
+ SLI4_RQ_CREATE_V1_MAX_PAGES = 8,
+ SLI4_RQ_CREATE_V1_MIN_BUF_SIZE = 64,
+ SLI4_RQ_CREATE_V1_MAX_BUF_SIZE = 2048,
+};
+
+struct sli4_rqst_rq_create_v1 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 rsvd14;
+ u8 dim_dfd_dnb;
+ u8 page_size;
+ u8 rqe_size_byte;
+ __le16 rqe_count;
+ __le32 rsvd20;
+ __le16 rsvd24;
+ __le16 cq_id;
+ __le32 buffer_size;
+ struct sli4_dmaaddr page_phys_addr[SLI4_RQ_CREATE_V1_MAX_PAGES];
+};
+
+struct sli4_rsp_rq_create_v1 {
+ struct sli4_rsp_cmn_create_queue rsp;
+};
+
+#define SLI4_RQCREATEV2_DNB 0x80
+
+struct sli4_rqst_rq_create_v2 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 rq_count;
+ u8 dim_dfd_dnb;
+ u8 page_size;
+ u8 rqe_size_byte;
+ __le16 rqe_count;
+ __le16 hdr_buffer_size;
+ __le16 payload_buffer_size;
+ __le16 base_cq_id;
+ __le16 rsvd26;
+ __le32 rsvd42;
+ struct sli4_dmaaddr page_phys_addr[0];
+};
+
+struct sli4_rsp_rq_create_v2 {
+ struct sli4_rsp_cmn_create_queue rsp;
+};
+
+#define SLI4_CQE_CODE_OFFSET 14
+
+enum sli4_cqe_code {
+ SLI4_CQE_CODE_WORK_REQUEST_COMPLETION = 0x01,
+ SLI4_CQE_CODE_RELEASE_WQE,
+ SLI4_CQE_CODE_RSVD,
+ SLI4_CQE_CODE_RQ_ASYNC,
+ SLI4_CQE_CODE_XRI_ABORTED,
+ SLI4_CQE_CODE_RQ_COALESCING,
+ SLI4_CQE_CODE_RQ_CONSUMPTION,
+ SLI4_CQE_CODE_MEASUREMENT_REPORTING,
+ SLI4_CQE_CODE_RQ_ASYNC_V1,
+ SLI4_CQE_CODE_RQ_COALESCING_V1,
+ SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD,
+ SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA,
+};
+
+#define SLI4_WQ_CREATE_MAX_PAGES 8
+
+struct sli4_rqst_wq_create {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ __le16 cq_id;
+ u8 page_size;
+ u8 wqe_size_byte;
+ __le16 wqe_count;
+ __le32 rsvd;
+ struct sli4_dmaaddr page_phys_addr[SLI4_WQ_CREATE_MAX_PAGES];
+};
+
+struct sli4_rsp_wq_create {
+ struct sli4_rsp_cmn_create_queue rsp;
+};
+
+enum sli4_link_attention_flags {
+ SLI4_LNK_ATTN_TYPE_LINK_UP = 0x01,
+ SLI4_LNK_ATTN_TYPE_LINK_DOWN = 0x02,
+ SLI4_LNK_ATTN_TYPE_NO_HARD_ALPA = 0x03,
+
+ SLI4_LNK_ATTN_P2P = 0x01,
+ SLI4_LNK_ATTN_FC_AL = 0x02,
+ SLI4_LNK_ATTN_INTERNAL_LOOPBACK = 0x03,
+ SLI4_LNK_ATTN_SERDES_LOOPBACK = 0x04,
+};
+
+struct sli4_link_attention {
+ u8 link_number;
+ u8 attn_type;
+ u8 topology;
+ u8 port_speed;
+ u8 port_fault;
+ u8 shared_link_status;
+ __le16 logical_link_speed;
+ __le32 event_tag;
+ u8 rsvd12;
+ u8 event_code;
+ u8 event_type;
+ u8 flags;
+};
+
+enum sli4_link_event_type {
+ SLI4_EVENT_LINK_ATTENTION = 0x01,
+ SLI4_EVENT_SHARED_LINK_ATTENTION = 0x02,
+};
+
+enum sli4_wcqe_flags {
+ SLI4_WCQE_XB = 0x10,
+ SLI4_WCQE_QX = 0x80,
+};
+
+struct sli4_fc_wcqe {
+ u8 hw_status;
+ u8 status;
+ __le16 request_tag;
+ __le32 wqe_specific_1;
+ __le32 wqe_specific_2;
+ u8 rsvd12;
+ u8 qx_byte;
+ u8 code;
+ u8 flags;
+};
+
+/* FC WQ consumed CQ queue entry */
+struct sli4_fc_wqec {
+ __le32 rsvd0;
+ __le32 rsvd1;
+ __le16 wqe_index;
+ __le16 wq_id;
+ __le16 rsvd12;
+ u8 code;
+ u8 vld_byte;
+};
+
+/* FC Completion Status Codes. */
+enum sli4_wcqe_status {
+ SLI4_FC_WCQE_STATUS_SUCCESS,
+ SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE,
+ SLI4_FC_WCQE_STATUS_REMOTE_STOP,
+ SLI4_FC_WCQE_STATUS_LOCAL_REJECT,
+ SLI4_FC_WCQE_STATUS_NPORT_RJT,
+ SLI4_FC_WCQE_STATUS_FABRIC_RJT,
+ SLI4_FC_WCQE_STATUS_NPORT_BSY,
+ SLI4_FC_WCQE_STATUS_FABRIC_BSY,
+ SLI4_FC_WCQE_STATUS_RSVD,
+ SLI4_FC_WCQE_STATUS_LS_RJT,
+ SLI4_FC_WCQE_STATUS_RX_BUF_OVERRUN,
+ SLI4_FC_WCQE_STATUS_CMD_REJECT,
+ SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK,
+ SLI4_FC_WCQE_STATUS_RSVD1,
+ SLI4_FC_WCQE_STATUS_ELS_CMPLT_NO_AUTOREG,
+ SLI4_FC_WCQE_STATUS_RSVD2,
+ SLI4_FC_WCQE_STATUS_RQ_SUCCESS,
+ SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED,
+ SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED,
+ SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC,
+ SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE,
+ SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE,
+ SLI4_FC_WCQE_STATUS_DI_ERROR,
+ SLI4_FC_WCQE_STATUS_BA_RJT,
+ SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED,
+ SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC,
+ SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT,
+ SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST,
+
+ /* driver generated status codes */
+ SLI4_FC_WCQE_STATUS_DISPATCH_ERROR = 0xfd,
+ SLI4_FC_WCQE_STATUS_SHUTDOWN = 0xfe,
+ SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT = 0xff,
+};
+
+/* DI_ERROR Extended Status */
+enum sli4_fc_di_error_status {
+ SLI4_FC_DI_ERROR_GE = 1 << 0,
+ SLI4_FC_DI_ERROR_AE = 1 << 1,
+ SLI4_FC_DI_ERROR_RE = 1 << 2,
+ SLI4_FC_DI_ERROR_TDPV = 1 << 3,
+ SLI4_FC_DI_ERROR_UDB = 1 << 4,
+ SLI4_FC_DI_ERROR_EDIR = 1 << 5,
+};
+
+/* WQE DIF field contents */
+enum sli4_dif_fields {
+ SLI4_DIF_DISABLED,
+ SLI4_DIF_PASS_THROUGH,
+ SLI4_DIF_STRIP,
+ SLI4_DIF_INSERT,
+};
+
+/* Work Queue Entry (WQE) types */
+enum sli4_wqe_types {
+ SLI4_WQE_ABORT = 0x0f,
+ SLI4_WQE_ELS_REQUEST64 = 0x8a,
+ SLI4_WQE_FCP_IBIDIR64 = 0xac,
+ SLI4_WQE_FCP_IREAD64 = 0x9a,
+ SLI4_WQE_FCP_IWRITE64 = 0x98,
+ SLI4_WQE_FCP_ICMND64 = 0x9c,
+ SLI4_WQE_FCP_TRECEIVE64 = 0xa1,
+ SLI4_WQE_FCP_CONT_TRECEIVE64 = 0xe5,
+ SLI4_WQE_FCP_TRSP64 = 0xa3,
+ SLI4_WQE_FCP_TSEND64 = 0x9f,
+ SLI4_WQE_GEN_REQUEST64 = 0xc2,
+ SLI4_WQE_SEND_FRAME = 0xe1,
+ SLI4_WQE_XMIT_BCAST64 = 0x84,
+ SLI4_WQE_XMIT_BLS_RSP = 0x97,
+ SLI4_WQE_ELS_RSP64 = 0x95,
+ SLI4_WQE_XMIT_SEQUENCE64 = 0x82,
+ SLI4_WQE_REQUEUE_XRI = 0x93,
+};
+
+/* WQE command types */
+enum sli4_wqe_cmds {
+ SLI4_CMD_FCP_IREAD64_WQE = 0x00,
+ SLI4_CMD_FCP_ICMND64_WQE = 0x00,
+ SLI4_CMD_FCP_IWRITE64_WQE = 0x01,
+ SLI4_CMD_FCP_TRECEIVE64_WQE = 0x02,
+ SLI4_CMD_FCP_TRSP64_WQE = 0x03,
+ SLI4_CMD_FCP_TSEND64_WQE = 0x07,
+ SLI4_CMD_GEN_REQUEST64_WQE = 0x08,
+ SLI4_CMD_XMIT_BCAST64_WQE = 0x08,
+ SLI4_CMD_XMIT_BLS_RSP64_WQE = 0x08,
+ SLI4_CMD_ABORT_WQE = 0x08,
+ SLI4_CMD_XMIT_SEQUENCE64_WQE = 0x08,
+ SLI4_CMD_REQUEUE_XRI_WQE = 0x0a,
+ SLI4_CMD_SEND_FRAME_WQE = 0x0a,
+};
+
+#define SLI4_WQE_SIZE 0x05
+#define SLI4_WQE_EXT_SIZE 0x06
+
+#define SLI4_WQE_BYTES (16 * sizeof(u32))
+#define SLI4_WQE_EXT_BYTES (32 * sizeof(u32))
+
+/* Mask for ccp (CS_CTL) */
+#define SLI4_MASK_CCP 0xfe
+
+/* Generic WQE */
+enum sli4_gen_wqe_flags {
+ SLI4_GEN_WQE_EBDECNT = 0xf,
+ SLI4_GEN_WQE_LEN_LOC = 0x3 << 7,
+ SLI4_GEN_WQE_QOSD = 1 << 9,
+ SLI4_GEN_WQE_XBL = 1 << 11,
+ SLI4_GEN_WQE_HLM = 1 << 12,
+ SLI4_GEN_WQE_IOD = 1 << 13,
+ SLI4_GEN_WQE_DBDE = 1 << 14,
+ SLI4_GEN_WQE_WQES = 1 << 15,
+
+ SLI4_GEN_WQE_PRI = 0x7,
+ SLI4_GEN_WQE_PV = 1 << 3,
+ SLI4_GEN_WQE_EAT = 1 << 4,
+ SLI4_GEN_WQE_XC = 1 << 5,
+ SLI4_GEN_WQE_CCPE = 1 << 7,
+
+ SLI4_GEN_WQE_CMDTYPE = 0xf,
+ SLI4_GEN_WQE_WQEC = 1 << 7,
+};
+
+struct sli4_generic_wqe {
+ __le32 cmd_spec0_5[6];
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ __le16 dw10w0_flags;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmdtype_wqec_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+};
+
+/* WQE used to abort exchanges. */
+enum sli4_abort_wqe_flags {
+ SLI4_ABRT_WQE_IR = 0x02,
+
+ SLI4_ABRT_WQE_EBDECNT = 0xf,
+ SLI4_ABRT_WQE_LEN_LOC = 0x3 << 7,
+ SLI4_ABRT_WQE_QOSD = 1 << 9,
+ SLI4_ABRT_WQE_XBL = 1 << 11,
+ SLI4_ABRT_WQE_IOD = 1 << 13,
+ SLI4_ABRT_WQE_DBDE = 1 << 14,
+ SLI4_ABRT_WQE_WQES = 1 << 15,
+
+ SLI4_ABRT_WQE_PRI = 0x7,
+ SLI4_ABRT_WQE_PV = 1 << 3,
+ SLI4_ABRT_WQE_EAT = 1 << 4,
+ SLI4_ABRT_WQE_XC = 1 << 5,
+ SLI4_ABRT_WQE_CCPE = 1 << 7,
+
+ SLI4_ABRT_WQE_CMDTYPE = 0xf,
+ SLI4_ABRT_WQE_WQEC = 1 << 7,
+};
+
+struct sli4_abort_wqe {
+ __le32 rsvd0;
+ __le32 rsvd4;
+ __le32 ext_t_tag;
+ u8 ia_ir_byte;
+ u8 criteria;
+ __le16 rsvd10;
+ __le32 ext_t_mask;
+ __le32 t_mask;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 t_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ __le16 dw10w0_flags;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmdtype_wqec_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+};
+
+enum sli4_abort_criteria {
+ SLI4_ABORT_CRITERIA_XRI_TAG = 0x01,
+ SLI4_ABORT_CRITERIA_ABORT_TAG,
+ SLI4_ABORT_CRITERIA_REQUEST_TAG,
+ SLI4_ABORT_CRITERIA_EXT_ABORT_TAG,
+};
+
+enum sli4_abort_type {
+ SLI4_ABORT_XRI,
+ SLI4_ABORT_ABORT_ID,
+ SLI4_ABORT_REQUEST_ID,
+ SLI4_ABORT_MAX, /* must be last */
+};
+
+/* WQE used to create an ELS request. */
+enum sli4_els_req_wqe_flags {
+ SLI4_REQ_WQE_QOSD = 0x2,
+ SLI4_REQ_WQE_DBDE = 0x40,
+ SLI4_REQ_WQE_XBL = 0x8,
+ SLI4_REQ_WQE_XC = 0x20,
+ SLI4_REQ_WQE_IOD = 0x20,
+ SLI4_REQ_WQE_HLM = 0x10,
+ SLI4_REQ_WQE_CCPE = 0x80,
+ SLI4_REQ_WQE_EAT = 0x10,
+ SLI4_REQ_WQE_WQES = 0x80,
+ SLI4_REQ_WQE_PU_SHFT = 4,
+ SLI4_REQ_WQE_CT_SHFT = 2,
+ SLI4_REQ_WQE_CT = 0xc,
+ SLI4_REQ_WQE_ELSID_SHFT = 4,
+ SLI4_REQ_WQE_SP_SHFT = 24,
+ SLI4_REQ_WQE_LEN_LOC_BIT1 = 0x80,
+ SLI4_REQ_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_els_request64_wqe {
+ struct sli4_bde els_request_payload;
+ __le32 els_request_payload_length;
+ __le32 sid_sp_dword;
+ __le32 remote_id_dword;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 temporary_rpi;
+ u8 len_loc1_byte;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmdtype_elsid_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ struct sli4_bde els_response_payload_bde;
+ __le32 max_response_payload_length;
+};
+
+/* WQE used to create an FCP initiator no data command. */
+enum sli4_icmd_wqe_flags {
+ SLI4_ICMD_WQE_DBDE = 0x40,
+ SLI4_ICMD_WQE_XBL = 0x8,
+ SLI4_ICMD_WQE_XC = 0x20,
+ SLI4_ICMD_WQE_IOD = 0x20,
+ SLI4_ICMD_WQE_HLM = 0x10,
+ SLI4_ICMD_WQE_CCPE = 0x80,
+ SLI4_ICMD_WQE_EAT = 0x10,
+ SLI4_ICMD_WQE_APPID = 0x10,
+ SLI4_ICMD_WQE_WQES = 0x80,
+ SLI4_ICMD_WQE_PU_SHFT = 4,
+ SLI4_ICMD_WQE_CT_SHFT = 2,
+ SLI4_ICMD_WQE_BS_SHFT = 4,
+ SLI4_ICMD_WQE_LEN_LOC_BIT1 = 0x80,
+ SLI4_ICMD_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_fcp_icmnd64_wqe {
+ struct sli4_bde bde;
+ __le16 payload_offset_length;
+ __le16 fcp_cmd_buffer_length;
+ __le32 rsvd12;
+ __le32 remote_n_port_id_dword;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dif_ct_bs_byte;
+ u8 command;
+ u8 class_pu_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ u8 len_loc1_byte;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 rsvd44;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 rsvd56;
+};
+
+/* WQE used to create an FCP initiator read. */
+enum sli4_ir_wqe_flags {
+ SLI4_IR_WQE_DBDE = 0x40,
+ SLI4_IR_WQE_XBL = 0x8,
+ SLI4_IR_WQE_XC = 0x20,
+ SLI4_IR_WQE_IOD = 0x20,
+ SLI4_IR_WQE_HLM = 0x10,
+ SLI4_IR_WQE_CCPE = 0x80,
+ SLI4_IR_WQE_EAT = 0x10,
+ SLI4_IR_WQE_APPID = 0x10,
+ SLI4_IR_WQE_WQES = 0x80,
+ SLI4_IR_WQE_PU_SHFT = 4,
+ SLI4_IR_WQE_CT_SHFT = 2,
+ SLI4_IR_WQE_BS_SHFT = 4,
+ SLI4_IR_WQE_LEN_LOC_BIT1 = 0x80,
+ SLI4_IR_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_fcp_iread64_wqe {
+ struct sli4_bde bde;
+ __le16 payload_offset_length;
+ __le16 fcp_cmd_buffer_length;
+
+ __le32 total_transfer_length;
+
+ __le32 remote_n_port_id_dword;
+
+ __le16 xri_tag;
+ __le16 context_tag;
+
+ u8 dif_ct_bs_byte;
+ u8 command;
+ u8 class_pu_byte;
+ u8 timer;
+
+ __le32 abort_tag;
+
+ __le16 request_tag;
+ __le16 rsvd34;
+
+ u8 len_loc1_byte;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+
+ __le32 rsvd44;
+ struct sli4_bde first_data_bde;
+};
+
+/* WQE used to create an FCP initiator write. */
+enum sli4_iwr_wqe_flags {
+ SLI4_IWR_WQE_DBDE = 0x40,
+ SLI4_IWR_WQE_XBL = 0x8,
+ SLI4_IWR_WQE_XC = 0x20,
+ SLI4_IWR_WQE_IOD = 0x20,
+ SLI4_IWR_WQE_HLM = 0x10,
+ SLI4_IWR_WQE_DNRX = 0x10,
+ SLI4_IWR_WQE_CCPE = 0x80,
+ SLI4_IWR_WQE_EAT = 0x10,
+ SLI4_IWR_WQE_APPID = 0x10,
+ SLI4_IWR_WQE_WQES = 0x80,
+ SLI4_IWR_WQE_PU_SHFT = 4,
+ SLI4_IWR_WQE_CT_SHFT = 2,
+ SLI4_IWR_WQE_BS_SHFT = 4,
+ SLI4_IWR_WQE_LEN_LOC_BIT1 = 0x80,
+ SLI4_IWR_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_fcp_iwrite64_wqe {
+ struct sli4_bde bde;
+ __le16 payload_offset_length;
+ __le16 fcp_cmd_buffer_length;
+ __le16 total_transfer_length;
+ __le16 initial_transfer_length;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dif_ct_bs_byte;
+ u8 command;
+ u8 class_pu_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ u8 len_loc1_byte;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 remote_n_port_id_dword;
+ struct sli4_bde first_data_bde;
+};
+
+struct sli4_fcp_128byte_wqe {
+ u32 dw[32];
+};
+
+/* WQE used to create an FCP target receive */
+enum sli4_trcv_wqe_flags {
+ SLI4_TRCV_WQE_DBDE = 0x40,
+ SLI4_TRCV_WQE_XBL = 0x8,
+ SLI4_TRCV_WQE_AR = 0x8,
+ SLI4_TRCV_WQE_XC = 0x20,
+ SLI4_TRCV_WQE_IOD = 0x20,
+ SLI4_TRCV_WQE_HLM = 0x10,
+ SLI4_TRCV_WQE_DNRX = 0x10,
+ SLI4_TRCV_WQE_CCPE = 0x80,
+ SLI4_TRCV_WQE_EAT = 0x10,
+ SLI4_TRCV_WQE_APPID = 0x10,
+ SLI4_TRCV_WQE_WQES = 0x80,
+ SLI4_TRCV_WQE_PU_SHFT = 4,
+ SLI4_TRCV_WQE_CT_SHFT = 2,
+ SLI4_TRCV_WQE_BS_SHFT = 4,
+ SLI4_TRCV_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_fcp_treceive64_wqe {
+ struct sli4_bde bde;
+ __le32 payload_offset_length;
+ __le32 relative_offset;
+ union {
+ __le16 sec_xri_tag;
+ __le16 rsvd;
+ __le32 dword;
+ } dword5;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dif_ct_bs_byte;
+ u8 command;
+ u8 class_ar_pu_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 remote_xid;
+ u8 lloc1_appid;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 fcp_data_receive_length;
+ struct sli4_bde first_data_bde;
+};
+
+/* WQE used to create an FCP target response */
+enum sli4_trsp_wqe_flags {
+ SLI4_TRSP_WQE_AG = 0x8,
+ SLI4_TRSP_WQE_DBDE = 0x40,
+ SLI4_TRSP_WQE_XBL = 0x8,
+ SLI4_TRSP_WQE_XC = 0x20,
+ SLI4_TRSP_WQE_HLM = 0x10,
+ SLI4_TRSP_WQE_DNRX = 0x10,
+ SLI4_TRSP_WQE_CCPE = 0x80,
+ SLI4_TRSP_WQE_EAT = 0x10,
+ SLI4_TRSP_WQE_APPID = 0x10,
+ SLI4_TRSP_WQE_WQES = 0x80,
+};
+
+struct sli4_fcp_trsp64_wqe {
+ struct sli4_bde bde;
+ __le32 fcp_response_length;
+ __le32 rsvd12;
+ __le32 dword5;
+ __le16 xri_tag;
+ __le16 rpi;
+ u8 ct_dnrx_byte;
+ u8 command;
+ u8 class_ag_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 remote_xid;
+ u8 lloc1_appid;
+ u8 qosd_xbl_hlm_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 rsvd44;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 rsvd56;
+};
+
+/* WQE used to create an FCP target send (DATA IN). */
+enum sli4_tsend_wqe_flags {
+ SLI4_TSEND_WQE_XBL = 0x8,
+ SLI4_TSEND_WQE_DBDE = 0x40,
+ SLI4_TSEND_WQE_IOD = 0x20,
+ SLI4_TSEND_WQE_QOSD = 0x2,
+ SLI4_TSEND_WQE_HLM = 0x10,
+ SLI4_TSEND_WQE_PU_SHFT = 4,
+ SLI4_TSEND_WQE_AR = 0x8,
+ SLI4_TSEND_CT_SHFT = 2,
+ SLI4_TSEND_BS_SHFT = 4,
+ SLI4_TSEND_LEN_LOC_BIT2 = 0x1,
+ SLI4_TSEND_CCPE = 0x80,
+ SLI4_TSEND_APPID_VALID = 0x20,
+ SLI4_TSEND_WQES = 0x80,
+ SLI4_TSEND_XC = 0x20,
+ SLI4_TSEND_EAT = 0x10,
+};
+
+struct sli4_fcp_tsend64_wqe {
+ struct sli4_bde bde;
+ __le32 payload_offset_length;
+ __le32 relative_offset;
+ __le32 dword5;
+ __le16 xri_tag;
+ __le16 rpi;
+ u8 ct_byte;
+ u8 command;
+ u8 class_pu_ar_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 remote_xid;
+ u8 dw10byte0;
+ u8 ll_qd_xbl_hlm_iod_dbde;
+ u8 dw10byte2;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd45;
+ __le16 cq_id;
+ __le32 fcp_data_transmit_length;
+ struct sli4_bde first_data_bde;
+};
+
+/* WQE used to create a general request. */
+enum sli4_gen_req_wqe_flags {
+ SLI4_GEN_REQ64_WQE_XBL = 0x8,
+ SLI4_GEN_REQ64_WQE_DBDE = 0x40,
+ SLI4_GEN_REQ64_WQE_IOD = 0x20,
+ SLI4_GEN_REQ64_WQE_QOSD = 0x2,
+ SLI4_GEN_REQ64_WQE_HLM = 0x10,
+ SLI4_GEN_REQ64_CT_SHFT = 2,
+};
+
+struct sli4_gen_request64_wqe {
+ struct sli4_bde bde;
+ __le32 request_payload_length;
+ __le32 relative_offset;
+ u8 rsvd17;
+ u8 df_ctl;
+ u8 type;
+ u8 r_ctl;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ u8 dw10flags0;
+ u8 dw10flags1;
+ u8 dw10flags2;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 remote_n_port_id_dword;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 max_response_payload_length;
+};
+
+/* WQE used to create a send frame request */
+enum sli4_sf_wqe_flags {
+ SLI4_SF_WQE_DBDE = 0x40,
+ SLI4_SF_PU = 0x30,
+ SLI4_SF_CT = 0xc,
+ SLI4_SF_QOSD = 0x2,
+ SLI4_SF_LEN_LOC_BIT1 = 0x80,
+ SLI4_SF_LEN_LOC_BIT2 = 0x1,
+ SLI4_SF_XC = 0x20,
+ SLI4_SF_XBL = 0x8,
+};
+
+struct sli4_send_frame_wqe {
+ struct sli4_bde bde;
+ __le32 frame_length;
+ __le32 fc_header_0_1[2];
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 dw7flags0;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ u8 eof;
+ u8 sof;
+ u8 dw10flags0;
+ u8 dw10flags1;
+ u8 dw10flags2;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 fc_header_2_5[4];
+};
+
+/* WQE used to create a transmit sequence */
+enum sli4_seq_wqe_flags {
+ SLI4_SEQ_WQE_DBDE = 0x4000,
+ SLI4_SEQ_WQE_XBL = 0x800,
+ SLI4_SEQ_WQE_SI = 0x4,
+ SLI4_SEQ_WQE_FT = 0x8,
+ SLI4_SEQ_WQE_XO = 0x40,
+ SLI4_SEQ_WQE_LS = 0x80,
+ SLI4_SEQ_WQE_DIF = 0x3,
+ SLI4_SEQ_WQE_BS = 0x70,
+ SLI4_SEQ_WQE_PU = 0x30,
+ SLI4_SEQ_WQE_HLM = 0x1000,
+ SLI4_SEQ_WQE_IOD_SHIFT = 13,
+ SLI4_SEQ_WQE_CT_SHIFT = 2,
+ SLI4_SEQ_WQE_LEN_LOC_SHIFT = 7,
+};
+
+struct sli4_xmit_sequence64_wqe {
+ struct sli4_bde bde;
+ __le32 remote_n_port_id_dword;
+ __le32 relative_offset;
+ u8 dw5flags0;
+ u8 df_ctl;
+ u8 type;
+ u8 r_ctl;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dw7flags0;
+ u8 command;
+ u8 dw7flags1;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 remote_xid;
+ __le16 dw10w0;
+ u8 dw10flags0;
+ u8 ccp;
+ u8 cmd_type_wqec_byte;
+ u8 rsvd45;
+ __le16 cq_id;
+ __le32 sequence_payload_len;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 rsvd56;
+};
+
+/*
+ * WQE used unblock the specified XRI and to release
+ * it to the SLI Port's free pool.
+ */
+enum sli4_requeue_wqe_flags {
+ SLI4_REQU_XRI_WQE_XC = 0x20,
+ SLI4_REQU_XRI_WQE_QOSD = 0x2,
+};
+
+struct sli4_requeue_xri_wqe {
+ __le32 rsvd0;
+ __le32 rsvd4;
+ __le32 rsvd8;
+ __le32 rsvd12;
+ __le32 rsvd16;
+ __le32 rsvd20;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 rsvd32;
+ __le16 request_tag;
+ __le16 rsvd34;
+ __le16 flags0;
+ __le16 flags1;
+ __le16 flags2;
+ u8 ccp;
+ u8 cmd_type_wqec_byte;
+ u8 rsvd42;
+ __le16 cq_id;
+ __le32 rsvd44;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 rsvd56;
+};
+
+/* WQE used to create a BLS response */
+enum sli4_bls_rsp_wqe_flags {
+ SLI4_BLS_RSP_RID = 0xffffff,
+ SLI4_BLS_RSP_WQE_AR = 0x40000000,
+ SLI4_BLS_RSP_WQE_CT_SHFT = 2,
+ SLI4_BLS_RSP_WQE_QOSD = 0x2,
+ SLI4_BLS_RSP_WQE_HLM = 0x10,
+};
+
+struct sli4_xmit_bls_rsp_wqe {
+ __le32 payload_word0;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le16 high_seq_cnt;
+ __le16 low_seq_cnt;
+ __le32 rsvd12;
+ __le32 local_n_port_id_dword;
+ __le32 remote_id_dword;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dw8flags0;
+ u8 command;
+ u8 dw8flags1;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd38;
+ u8 dw11flags0;
+ u8 dw11flags1;
+ u8 dw11flags2;
+ u8 ccp;
+ u8 dw12flags0;
+ u8 rsvd45;
+ __le16 cq_id;
+ __le16 temporary_rpi;
+ u8 rsvd50;
+ u8 rsvd51;
+ __le32 rsvd52;
+ __le32 rsvd56;
+ __le32 rsvd60;
+};
+
+enum sli_bls_type {
+ SLI4_SLI_BLS_ACC,
+ SLI4_SLI_BLS_RJT,
+ SLI4_SLI_BLS_MAX
+};
+
+struct sli_bls_payload {
+ enum sli_bls_type type;
+ __le16 ox_id;
+ __le16 rx_id;
+ union {
+ struct {
+ u8 seq_id_validity;
+ u8 seq_id_last;
+ u8 rsvd2;
+ u8 rsvd3;
+ u16 ox_id;
+ u16 rx_id;
+ __le16 low_seq_cnt;
+ __le16 high_seq_cnt;
+ } acc;
+ struct {
+ u8 vendor_unique;
+ u8 reason_explanation;
+ u8 reason_code;
+ u8 rsvd3;
+ } rjt;
+ } u;
+};
+
+/* WQE used to create an ELS response */
+
+enum sli4_els_rsp_flags {
+ SLI4_ELS_SID = 0xffffff,
+ SLI4_ELS_RID = 0xffffff,
+ SLI4_ELS_DBDE = 0x40,
+ SLI4_ELS_XBL = 0x8,
+ SLI4_ELS_IOD = 0x20,
+ SLI4_ELS_QOSD = 0x2,
+ SLI4_ELS_XC = 0x20,
+ SLI4_ELS_CT_OFFSET = 0X2,
+ SLI4_ELS_SP = 0X1000000,
+ SLI4_ELS_HLM = 0X10,
+};
+
+struct sli4_xmit_els_rsp64_wqe {
+ struct sli4_bde els_response_payload;
+ __le32 els_response_payload_length;
+ __le32 sid_dw;
+ __le32 rid_dw;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 ox_id;
+ u8 flags1;
+ u8 flags2;
+ u8 flags3;
+ u8 flags4;
+ u8 cmd_type_wqec;
+ u8 rsvd34;
+ __le16 cq_id;
+ __le16 temporary_rpi;
+ __le16 rsvd38;
+ u32 rsvd40;
+ u32 rsvd44;
+ u32 rsvd48;
+};
+
+/* Local Reject Reason Codes */
+enum sli4_fc_local_rej_codes {
+ SLI4_FC_LOCAL_REJECT_UNKNOWN,
+ SLI4_FC_LOCAL_REJECT_MISSING_CONTINUE,
+ SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT,
+ SLI4_FC_LOCAL_REJECT_INTERNAL_ERROR,
+ SLI4_FC_LOCAL_REJECT_INVALID_RPI,
+ SLI4_FC_LOCAL_REJECT_NO_XRI,
+ SLI4_FC_LOCAL_REJECT_ILLEGAL_COMMAND,
+ SLI4_FC_LOCAL_REJECT_XCHG_DROPPED,
+ SLI4_FC_LOCAL_REJECT_ILLEGAL_FIELD,
+ SLI4_FC_LOCAL_REJECT_RPI_SUSPENDED,
+ SLI4_FC_LOCAL_REJECT_RSVD,
+ SLI4_FC_LOCAL_REJECT_RSVD1,
+ SLI4_FC_LOCAL_REJECT_NO_ABORT_MATCH,
+ SLI4_FC_LOCAL_REJECT_TX_DMA_FAILED,
+ SLI4_FC_LOCAL_REJECT_RX_DMA_FAILED,
+ SLI4_FC_LOCAL_REJECT_ILLEGAL_FRAME,
+ SLI4_FC_LOCAL_REJECT_RSVD2,
+ SLI4_FC_LOCAL_REJECT_NO_RESOURCES, //0x11
+ SLI4_FC_LOCAL_REJECT_FCP_CONF_FAILURE,
+ SLI4_FC_LOCAL_REJECT_ILLEGAL_LENGTH,
+ SLI4_FC_LOCAL_REJECT_UNSUPPORTED_FEATURE,
+ SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS,
+ SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED,
+ SLI4_FC_LOCAL_REJECT_RCV_BUFFER_TIMEOUT,
+ SLI4_FC_LOCAL_REJECT_LOOP_OPEN_FAILURE,
+ SLI4_FC_LOCAL_REJECT_RSVD3,
+ SLI4_FC_LOCAL_REJECT_LINK_DOWN,
+ SLI4_FC_LOCAL_REJECT_CORRUPTED_DATA,
+ SLI4_FC_LOCAL_REJECT_CORRUPTED_RPI,
+ SLI4_FC_LOCAL_REJECT_OUTOFORDER_DATA,
+ SLI4_FC_LOCAL_REJECT_OUTOFORDER_ACK,
+ SLI4_FC_LOCAL_REJECT_DUP_FRAME,
+ SLI4_FC_LOCAL_REJECT_LINK_CONTROL_FRAME, //0x20
+ SLI4_FC_LOCAL_REJECT_BAD_HOST_ADDRESS,
+ SLI4_FC_LOCAL_REJECT_RSVD4,
+ SLI4_FC_LOCAL_REJECT_MISSING_HDR_BUFFER,
+ SLI4_FC_LOCAL_REJECT_MSEQ_CHAIN_CORRUPTED,
+ SLI4_FC_LOCAL_REJECT_ABORTMULT_REQUESTED,
+ SLI4_FC_LOCAL_REJECT_BUFFER_SHORTAGE = 0x28,
+ SLI4_FC_LOCAL_REJECT_RCV_XRIBUF_WAITING,
+ SLI4_FC_LOCAL_REJECT_INVALID_VPI = 0x2e,
+ SLI4_FC_LOCAL_REJECT_NO_FPORT_DETECTED,
+ SLI4_FC_LOCAL_REJECT_MISSING_XRIBUF,
+ SLI4_FC_LOCAL_REJECT_RSVD5,
+ SLI4_FC_LOCAL_REJECT_INVALID_XRI,
+ SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET = 0x40,
+ SLI4_FC_LOCAL_REJECT_MISSING_RELOFFSET,
+ SLI4_FC_LOCAL_REJECT_INSUFF_BUFFERSPACE,
+ SLI4_FC_LOCAL_REJECT_MISSING_SI,
+ SLI4_FC_LOCAL_REJECT_MISSING_ES,
+ SLI4_FC_LOCAL_REJECT_INCOMPLETE_XFER,
+ SLI4_FC_LOCAL_REJECT_SLER_FAILURE,
+ SLI4_FC_LOCAL_REJECT_SLER_CMD_RCV_FAILURE,
+ SLI4_FC_LOCAL_REJECT_SLER_REC_RJT_ERR,
+ SLI4_FC_LOCAL_REJECT_SLER_REC_SRR_RETRY_ERR,
+ SLI4_FC_LOCAL_REJECT_SLER_SRR_RJT_ERR,
+ SLI4_FC_LOCAL_REJECT_RSVD6,
+ SLI4_FC_LOCAL_REJECT_SLER_RRQ_RJT_ERR,
+ SLI4_FC_LOCAL_REJECT_SLER_RRQ_RETRY_ERR,
+ SLI4_FC_LOCAL_REJECT_SLER_ABTS_ERR,
+};
+
+enum sli4_async_rcqe_flags {
+ SLI4_RACQE_RQ_EL_INDX = 0xfff,
+ SLI4_RACQE_FCFI = 0x3f,
+ SLI4_RACQE_HDPL = 0x3f,
+ SLI4_RACQE_RQ_ID = 0xffc0,
+};
+
+struct sli4_fc_async_rcqe {
+ u8 rsvd0;
+ u8 status;
+ __le16 rq_elmt_indx_word;
+ __le32 rsvd4;
+ __le16 fcfi_rq_id_word;
+ __le16 data_placement_length;
+ u8 sof_byte;
+ u8 eof_byte;
+ u8 code;
+ u8 hdpl_byte;
+};
+
+struct sli4_fc_async_rcqe_v1 {
+ u8 rsvd0;
+ u8 status;
+ __le16 rq_elmt_indx_word;
+ u8 fcfi_byte;
+ u8 rsvd5;
+ __le16 rsvd6;
+ __le16 rq_id;
+ __le16 data_placement_length;
+ u8 sof_byte;
+ u8 eof_byte;
+ u8 code;
+ u8 hdpl_byte;
+};
+
+enum sli4_fc_async_rq_status {
+ SLI4_FC_ASYNC_RQ_SUCCESS = 0x10,
+ SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED,
+ SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED,
+ SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC,
+ SLI4_FC_ASYNC_RQ_DMA_FAILURE,
+};
+
+#define SLI4_RCQE_RQ_EL_INDX 0xfff
+
+struct sli4_fc_coalescing_rcqe {
+ u8 rsvd0;
+ u8 status;
+ __le16 rq_elmt_indx_word;
+ __le32 rsvd4;
+ __le16 rq_id;
+ __le16 seq_placement_length;
+ __le16 rsvd14;
+ u8 code;
+ u8 vld_byte;
+};
+
+#define SLI4_FC_COALESCE_RQ_SUCCESS 0x10
+#define SLI4_FC_COALESCE_RQ_INSUFF_XRI_NEEDED 0x18
+
+enum sli4_optimized_write_cmd_cqe_flags {
+ SLI4_OCQE_RQ_EL_INDX = 0x7f, /* DW0 bits 16:30 */
+ SLI4_OCQE_FCFI = 0x3f, /* DW1 bits 0:6 */
+ SLI4_OCQE_OOX = 1 << 6, /* DW1 bit 15 */
+ SLI4_OCQE_AGXR = 1 << 7, /* DW1 bit 16 */
+ SLI4_OCQE_HDPL = 0x3f, /* DW3 bits 24:29*/
+};
+
+struct sli4_fc_optimized_write_cmd_cqe {
+ u8 rsvd0;
+ u8 status;
+ __le16 w1;
+ u8 flags0;
+ u8 flags1;
+ __le16 xri;
+ __le16 rq_id;
+ __le16 data_placement_length;
+ __le16 rpi;
+ u8 code;
+ u8 hdpl_vld;
+};
+
+#define SLI4_OCQE_XB 0x10
+
+struct sli4_fc_optimized_write_data_cqe {
+ u8 hw_status;
+ u8 status;
+ __le16 xri;
+ __le32 total_data_placed;
+ __le32 extended_status;
+ __le16 rsvd12;
+ u8 code;
+ u8 flags;
+};
+
+struct sli4_fc_xri_aborted_cqe {
+ u8 rsvd0;
+ u8 status;
+ __le16 rsvd2;
+ __le32 extended_status;
+ __le16 xri;
+ __le16 remote_xid;
+ __le16 rsvd12;
+ u8 code;
+ u8 flags;
+};
+
+enum sli4_generic_ctx {
+ SLI4_GENERIC_CONTEXT_RPI,
+ SLI4_GENERIC_CONTEXT_VPI,
+ SLI4_GENERIC_CONTEXT_VFI,
+ SLI4_GENERIC_CONTEXT_FCFI,
+};
+
+#define SLI4_GENERIC_CLASS_CLASS_2 0x1
+#define SLI4_GENERIC_CLASS_CLASS_3 0x2
+
+#define SLI4_ELS_REQUEST64_DIR_WRITE 0x0
+#define SLI4_ELS_REQUEST64_DIR_READ 0x1
+
+enum sli4_els_request {
+ SLI4_ELS_REQUEST64_OTHER,
+ SLI4_ELS_REQUEST64_LOGO,
+ SLI4_ELS_REQUEST64_FDISC,
+ SLI4_ELS_REQUEST64_FLOGIN,
+ SLI4_ELS_REQUEST64_PLOGI,
+};
+
+enum sli4_els_cmd_type {
+ SLI4_ELS_REQUEST64_CMD_GEN = 0x08,
+ SLI4_ELS_REQUEST64_CMD_NON_FABRIC = 0x0c,
+ SLI4_ELS_REQUEST64_CMD_FABRIC = 0x0d,
+};
+
+#define SLI_PAGE_SIZE SZ_4K
+
+#define SLI4_BMBX_TIMEOUT_MSEC 30000
+#define SLI4_FW_READY_TIMEOUT_MSEC 30000
+
+#define SLI4_BMBX_DELAY_US 1000 /* 1 ms */
+#define SLI4_INIT_PORT_DELAY_US 10000 /* 10 ms */
+
+static inline u32
+sli_page_count(size_t bytes, u32 page_size)
+{
+ if (!page_size)
+ return 0;
+
+ return (bytes + (page_size - 1)) >> __ffs(page_size);
+}
+
+/*************************************************************************
+ * SLI-4 mailbox command formats and definitions
+ */
+
+struct sli4_mbox_command_header {
+ u8 resvd0;
+ u8 command;
+ __le16 status; /* Port writes to indicate success/fail */
+};
+
+enum sli4_mbx_cmd_value {
+ SLI4_MBX_CMD_CONFIG_LINK = 0x07,
+ SLI4_MBX_CMD_DUMP = 0x17,
+ SLI4_MBX_CMD_DOWN_LINK = 0x06,
+ SLI4_MBX_CMD_INIT_LINK = 0x05,
+ SLI4_MBX_CMD_INIT_VFI = 0xa3,
+ SLI4_MBX_CMD_INIT_VPI = 0xa4,
+ SLI4_MBX_CMD_POST_XRI = 0xa7,
+ SLI4_MBX_CMD_RELEASE_XRI = 0xac,
+ SLI4_MBX_CMD_READ_CONFIG = 0x0b,
+ SLI4_MBX_CMD_READ_STATUS = 0x0e,
+ SLI4_MBX_CMD_READ_NVPARMS = 0x02,
+ SLI4_MBX_CMD_READ_REV = 0x11,
+ SLI4_MBX_CMD_READ_LNK_STAT = 0x12,
+ SLI4_MBX_CMD_READ_SPARM64 = 0x8d,
+ SLI4_MBX_CMD_READ_TOPOLOGY = 0x95,
+ SLI4_MBX_CMD_REG_FCFI = 0xa0,
+ SLI4_MBX_CMD_REG_FCFI_MRQ = 0xaf,
+ SLI4_MBX_CMD_REG_RPI = 0x93,
+ SLI4_MBX_CMD_REG_RX_RQ = 0xa6,
+ SLI4_MBX_CMD_REG_VFI = 0x9f,
+ SLI4_MBX_CMD_REG_VPI = 0x96,
+ SLI4_MBX_CMD_RQST_FEATURES = 0x9d,
+ SLI4_MBX_CMD_SLI_CONFIG = 0x9b,
+ SLI4_MBX_CMD_UNREG_FCFI = 0xa2,
+ SLI4_MBX_CMD_UNREG_RPI = 0x14,
+ SLI4_MBX_CMD_UNREG_VFI = 0xa1,
+ SLI4_MBX_CMD_UNREG_VPI = 0x97,
+ SLI4_MBX_CMD_WRITE_NVPARMS = 0x03,
+ SLI4_MBX_CMD_CFG_AUTO_XFER_RDY = 0xad,
+};
+
+enum sli4_mbx_status {
+ SLI4_MBX_STATUS_SUCCESS = 0x0000,
+ SLI4_MBX_STATUS_FAILURE = 0x0001,
+ SLI4_MBX_STATUS_RPI_NOT_REG = 0x1400,
+};
+
+/* CONFIG_LINK - configure link-oriented parameters,
+ * such as default N_Port_ID address and various timers
+ */
+enum sli4_cmd_config_link_flags {
+ SLI4_CFG_LINK_BBSCN = 0xf00,
+ SLI4_CFG_LINK_CSCN = 0x1000,
+};
+
+struct sli4_cmd_config_link {
+ struct sli4_mbox_command_header hdr;
+ u8 maxbbc;
+ u8 rsvd5;
+ u8 rsvd6;
+ u8 rsvd7;
+ u8 alpa;
+ __le16 n_port_id;
+ u8 rsvd11;
+ __le32 rsvd12;
+ __le32 e_d_tov;
+ __le32 lp_tov;
+ __le32 r_a_tov;
+ __le32 r_t_tov;
+ __le32 al_tov;
+ __le32 rsvd36;
+ __le32 bbscn_dword;
+};
+
+#define SLI4_DUMP4_TYPE 0xf
+
+#define SLI4_WKI_TAG_SAT_TEM 0x1040
+
+struct sli4_cmd_dump4 {
+ struct sli4_mbox_command_header hdr;
+ __le32 type_dword;
+ __le16 wki_selection;
+ __le16 rsvd10;
+ __le32 rsvd12;
+ __le32 returned_byte_cnt;
+ __le32 resp_data[59];
+};
+
+/* INIT_LINK - initialize the link for a FC port */
+enum sli4_init_link_flags {
+ SLI4_INIT_LINK_F_LOOPBACK = 1 << 0,
+
+ SLI4_INIT_LINK_F_P2P_ONLY = 1 << 1,
+ SLI4_INIT_LINK_F_FCAL_ONLY = 2 << 1,
+ SLI4_INIT_LINK_F_FCAL_FAIL_OVER = 0 << 1,
+ SLI4_INIT_LINK_F_P2P_FAIL_OVER = 1 << 1,
+
+ SLI4_INIT_LINK_F_UNFAIR = 1 << 6,
+ SLI4_INIT_LINK_F_NO_LIRP = 1 << 7,
+ SLI4_INIT_LINK_F_LOOP_VALID_CHK = 1 << 8,
+ SLI4_INIT_LINK_F_NO_LISA = 1 << 9,
+ SLI4_INIT_LINK_F_FAIL_OVER = 1 << 10,
+ SLI4_INIT_LINK_F_FIXED_SPEED = 1 << 11,
+ SLI4_INIT_LINK_F_PICK_HI_ALPA = 1 << 15,
+
+};
+
+enum sli4_fc_link_speed {
+ SLI4_LINK_SPEED_1G = 1,
+ SLI4_LINK_SPEED_2G,
+ SLI4_LINK_SPEED_AUTO_1_2,
+ SLI4_LINK_SPEED_4G,
+ SLI4_LINK_SPEED_AUTO_4_1,
+ SLI4_LINK_SPEED_AUTO_4_2,
+ SLI4_LINK_SPEED_AUTO_4_2_1,
+ SLI4_LINK_SPEED_8G,
+ SLI4_LINK_SPEED_AUTO_8_1,
+ SLI4_LINK_SPEED_AUTO_8_2,
+ SLI4_LINK_SPEED_AUTO_8_2_1,
+ SLI4_LINK_SPEED_AUTO_8_4,
+ SLI4_LINK_SPEED_AUTO_8_4_1,
+ SLI4_LINK_SPEED_AUTO_8_4_2,
+ SLI4_LINK_SPEED_10G,
+ SLI4_LINK_SPEED_16G,
+ SLI4_LINK_SPEED_AUTO_16_8_4,
+ SLI4_LINK_SPEED_AUTO_16_8,
+ SLI4_LINK_SPEED_32G,
+ SLI4_LINK_SPEED_AUTO_32_16_8,
+ SLI4_LINK_SPEED_AUTO_32_16,
+ SLI4_LINK_SPEED_64G,
+ SLI4_LINK_SPEED_AUTO_64_32_16,
+ SLI4_LINK_SPEED_AUTO_64_32,
+ SLI4_LINK_SPEED_128G,
+ SLI4_LINK_SPEED_AUTO_128_64_32,
+ SLI4_LINK_SPEED_AUTO_128_64,
+};
+
+struct sli4_cmd_init_link {
+ struct sli4_mbox_command_header hdr;
+ __le32 sel_reset_al_pa_dword;
+ __le32 flags0;
+ __le32 link_speed_sel_code;
+};
+
+/* INIT_VFI - initialize the VFI resource */
+enum sli4_init_vfi_flags {
+ SLI4_INIT_VFI_FLAG_VP = 0x1000,
+ SLI4_INIT_VFI_FLAG_VF = 0x2000,
+ SLI4_INIT_VFI_FLAG_VT = 0x4000,
+ SLI4_INIT_VFI_FLAG_VR = 0x8000,
+
+ SLI4_INIT_VFI_VFID = 0x1fff,
+ SLI4_INIT_VFI_PRI = 0xe000,
+
+ SLI4_INIT_VFI_HOP_COUNT = 0xff000000,
+};
+
+struct sli4_cmd_init_vfi {
+ struct sli4_mbox_command_header hdr;
+ __le16 vfi;
+ __le16 flags0_word;
+ __le16 fcfi;
+ __le16 vpi;
+ __le32 vf_id_pri_dword;
+ __le32 hop_cnt_dword;
+};
+
+/* INIT_VPI - initialize the VPI resource */
+struct sli4_cmd_init_vpi {
+ struct sli4_mbox_command_header hdr;
+ __le16 vpi;
+ __le16 vfi;
+};
+
+/* POST_XRI - post XRI resources to the SLI Port */
+enum sli4_post_xri_flags {
+ SLI4_POST_XRI_COUNT = 0xfff,
+ SLI4_POST_XRI_FLAG_ENX = 0x1000,
+ SLI4_POST_XRI_FLAG_DL = 0x2000,
+ SLI4_POST_XRI_FLAG_DI = 0x4000,
+ SLI4_POST_XRI_FLAG_VAL = 0x8000,
+};
+
+struct sli4_cmd_post_xri {
+ struct sli4_mbox_command_header hdr;
+ __le16 xri_base;
+ __le16 xri_count_flags;
+};
+
+/* RELEASE_XRI - Release XRI resources from the SLI Port */
+enum sli4_release_xri_flags {
+ SLI4_RELEASE_XRI_REL_XRI_CNT = 0x1f,
+ SLI4_RELEASE_XRI_COUNT = 0x1f,
+};
+
+struct sli4_cmd_release_xri {
+ struct sli4_mbox_command_header hdr;
+ __le16 rel_xri_count_word;
+ __le16 xri_count_word;
+
+ struct {
+ __le16 xri_tag0;
+ __le16 xri_tag1;
+ } xri_tbl[62];
+};
+
+/* READ_CONFIG - read SLI port configuration parameters */
+struct sli4_cmd_read_config {
+ struct sli4_mbox_command_header hdr;
+};
+
+enum sli4_read_cfg_resp_flags {
+ SLI4_READ_CFG_RESP_RESOURCE_EXT = 0x80000000, /* DW1 */
+ SLI4_READ_CFG_RESP_TOPOLOGY = 0xff000000, /* DW2 */
+};
+
+enum sli4_read_cfg_topo {
+ SLI4_READ_CFG_TOPO_FC = 0x1, /* FC topology unknown */
+ SLI4_READ_CFG_TOPO_NON_FC_AL = 0x2, /* FC point-to-point or fabric */
+ SLI4_READ_CFG_TOPO_FC_AL = 0x3, /* FC-AL topology */
+};
+
+/* Link Module Type */
+enum sli4_read_cfg_lmt {
+ SLI4_LINK_MODULE_TYPE_1GB = 0x0004,
+ SLI4_LINK_MODULE_TYPE_2GB = 0x0008,
+ SLI4_LINK_MODULE_TYPE_4GB = 0x0040,
+ SLI4_LINK_MODULE_TYPE_8GB = 0x0080,
+ SLI4_LINK_MODULE_TYPE_16GB = 0x0200,
+ SLI4_LINK_MODULE_TYPE_32GB = 0x0400,
+ SLI4_LINK_MODULE_TYPE_64GB = 0x0800,
+ SLI4_LINK_MODULE_TYPE_128GB = 0x1000,
+};
+
+struct sli4_rsp_read_config {
+ struct sli4_mbox_command_header hdr;
+ __le32 ext_dword;
+ __le32 topology_dword;
+ __le32 resvd8;
+ __le16 e_d_tov;
+ __le16 resvd14;
+ __le32 resvd16;
+ __le16 r_a_tov;
+ __le16 resvd22;
+ __le32 resvd24;
+ __le32 resvd28;
+ __le16 lmt;
+ __le16 resvd34;
+ __le32 resvd36;
+ __le32 resvd40;
+ __le16 xri_base;
+ __le16 xri_count;
+ __le16 rpi_base;
+ __le16 rpi_count;
+ __le16 vpi_base;
+ __le16 vpi_count;
+ __le16 vfi_base;
+ __le16 vfi_count;
+ __le16 resvd60;
+ __le16 fcfi_count;
+ __le16 rq_count;
+ __le16 eq_count;
+ __le16 wq_count;
+ __le16 cq_count;
+ __le32 pad[45];
+};
+
+/* READ_NVPARMS - read SLI port configuration parameters */
+enum sli4_read_nvparms_flags {
+ SLI4_READ_NVPARAMS_HARD_ALPA = 0xff,
+ SLI4_READ_NVPARAMS_PREFERRED_D_ID = 0xffffff00,
+};
+
+struct sli4_cmd_read_nvparms {
+ struct sli4_mbox_command_header hdr;
+ __le32 resvd0;
+ __le32 resvd4;
+ __le32 resvd8;
+ __le32 resvd12;
+ u8 wwpn[8];
+ u8 wwnn[8];
+ __le32 hard_alpa_d_id;
+};
+
+/* WRITE_NVPARMS - write SLI port configuration parameters */
+struct sli4_cmd_write_nvparms {
+ struct sli4_mbox_command_header hdr;
+ __le32 resvd0;
+ __le32 resvd4;
+ __le32 resvd8;
+ __le32 resvd12;
+ u8 wwpn[8];
+ u8 wwnn[8];
+ __le32 hard_alpa_d_id;
+};
+
+/* READ_REV - read the Port revision levels */
+enum {
+ SLI4_READ_REV_FLAG_SLI_LEVEL = 0xf,
+ SLI4_READ_REV_FLAG_FCOEM = 0x10,
+ SLI4_READ_REV_FLAG_CEEV = 0x60,
+ SLI4_READ_REV_FLAG_VPD = 0x2000,
+
+ SLI4_READ_REV_AVAILABLE_LENGTH = 0xffffff,
+};
+
+struct sli4_cmd_read_rev {
+ struct sli4_mbox_command_header hdr;
+ __le16 resvd0;
+ __le16 flags0_word;
+ __le32 first_hw_rev;
+ __le32 second_hw_rev;
+ __le32 resvd12;
+ __le32 third_hw_rev;
+ u8 fc_ph_low;
+ u8 fc_ph_high;
+ u8 feature_level_low;
+ u8 feature_level_high;
+ __le32 resvd24;
+ __le32 first_fw_id;
+ u8 first_fw_name[16];
+ __le32 second_fw_id;
+ u8 second_fw_name[16];
+ __le32 rsvd18[30];
+ __le32 available_length_dword;
+ struct sli4_dmaaddr hostbuf;
+ __le32 returned_vpd_length;
+ __le32 actual_vpd_length;
+};
+
+/* READ_SPARM64 - read the Port service parameters */
+#define SLI4_READ_SPARM64_WWPN_OFFSET (4 * sizeof(u32))
+#define SLI4_READ_SPARM64_WWNN_OFFSET (6 * sizeof(u32))
+
+struct sli4_cmd_read_sparm64 {
+ struct sli4_mbox_command_header hdr;
+ __le32 resvd0;
+ __le32 resvd4;
+ struct sli4_bde bde_64;
+ __le16 vpi;
+ __le16 resvd22;
+ __le16 port_name_start;
+ __le16 port_name_len;
+ __le16 node_name_start;
+ __le16 node_name_len;
+};
+
+/* READ_TOPOLOGY - read the link event information */
+enum sli4_read_topo_e {
+ SLI4_READTOPO_ATTEN_TYPE = 0xff,
+ SLI4_READTOPO_FLAG_IL = 0x100,
+ SLI4_READTOPO_FLAG_PB_RECVD = 0x200,
+
+ SLI4_READTOPO_LINKSTATE_RECV = 0x3,
+ SLI4_READTOPO_LINKSTATE_TRANS = 0xc,
+ SLI4_READTOPO_LINKSTATE_MACHINE = 0xf0,
+ SLI4_READTOPO_LINKSTATE_SPEED = 0xff00,
+ SLI4_READTOPO_LINKSTATE_TF = 0x40000000,
+ SLI4_READTOPO_LINKSTATE_LU = 0x80000000,
+
+ SLI4_READTOPO_SCN_BBSCN = 0xf,
+ SLI4_READTOPO_SCN_CBBSCN = 0xf0,
+
+ SLI4_READTOPO_R_T_TOV = 0x1ff,
+ SLI4_READTOPO_AL_TOV = 0xf000,
+
+ SLI4_READTOPO_PB_FLAG = 0x80,
+
+ SLI4_READTOPO_INIT_N_PORTID = 0xffffff,
+};
+
+#define SLI4_MIN_LOOP_MAP_BYTES 128
+
+struct sli4_cmd_read_topology {
+ struct sli4_mbox_command_header hdr;
+ __le32 event_tag;
+ __le32 dw2_attentype;
+ u8 topology;
+ u8 lip_type;
+ u8 lip_al_ps;
+ u8 al_pa_granted;
+ struct sli4_bde bde_loop_map;
+ __le32 linkdown_state;
+ __le32 currlink_state;
+ u8 max_bbc;
+ u8 init_bbc;
+ u8 scn_flags;
+ u8 rsvd39;
+ __le16 dw10w0_al_rt_tov;
+ __le16 lp_tov;
+ u8 acquired_al_pa;
+ u8 pb_flags;
+ __le16 specified_al_pa;
+ __le32 dw12_init_n_port_id;
+};
+
+enum sli4_read_topo_link {
+ SLI4_READ_TOPOLOGY_LINK_UP = 0x1,
+ SLI4_READ_TOPOLOGY_LINK_DOWN,
+ SLI4_READ_TOPOLOGY_LINK_NO_ALPA,
+};
+
+enum sli4_read_topo {
+ SLI4_READ_TOPO_UNKNOWN = 0x0,
+ SLI4_READ_TOPO_NON_FC_AL,
+ SLI4_READ_TOPO_FC_AL,
+};
+
+enum sli4_read_topo_speed {
+ SLI4_READ_TOPOLOGY_SPEED_NONE = 0x00,
+ SLI4_READ_TOPOLOGY_SPEED_1G = 0x04,
+ SLI4_READ_TOPOLOGY_SPEED_2G = 0x08,
+ SLI4_READ_TOPOLOGY_SPEED_4G = 0x10,
+ SLI4_READ_TOPOLOGY_SPEED_8G = 0x20,
+ SLI4_READ_TOPOLOGY_SPEED_10G = 0x40,
+ SLI4_READ_TOPOLOGY_SPEED_16G = 0x80,
+ SLI4_READ_TOPOLOGY_SPEED_32G = 0x90,
+ SLI4_READ_TOPOLOGY_SPEED_64G = 0xa0,
+ SLI4_READ_TOPOLOGY_SPEED_128G = 0xb0,
+};
+
+/* REG_FCFI - activate a FC Forwarder */
+struct sli4_cmd_reg_fcfi_rq_cfg {
+ u8 r_ctl_mask;
+ u8 r_ctl_match;
+ u8 type_mask;
+ u8 type_match;
+};
+
+enum sli4_regfcfi_tag {
+ SLI4_REGFCFI_VLAN_TAG = 0xfff,
+ SLI4_REGFCFI_VLANTAG_VALID = 0x1000,
+};
+
+#define SLI4_CMD_REG_FCFI_NUM_RQ_CFG 4
+struct sli4_cmd_reg_fcfi {
+ struct sli4_mbox_command_header hdr;
+ __le16 fcf_index;
+ __le16 fcfi;
+ __le16 rqid1;
+ __le16 rqid0;
+ __le16 rqid3;
+ __le16 rqid2;
+ struct sli4_cmd_reg_fcfi_rq_cfg
+ rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+ __le32 dw8_vlan;
+};
+
+#define SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG 4
+#define SLI4_CMD_REG_FCFI_MRQ_MAX_NUM_RQ 32
+#define SLI4_CMD_REG_FCFI_SET_FCFI_MODE 0
+#define SLI4_CMD_REG_FCFI_SET_MRQ_MODE 1
+
+enum sli4_reg_fcfi_mrq {
+ SLI4_REGFCFI_MRQ_VLAN_TAG = 0xfff,
+ SLI4_REGFCFI_MRQ_VLANTAG_VALID = 0x1000,
+ SLI4_REGFCFI_MRQ_MODE = 0x2000,
+
+ SLI4_REGFCFI_MRQ_MASK_NUM_PAIRS = 0xff,
+ SLI4_REGFCFI_MRQ_FILTER_BITMASK = 0xf00,
+ SLI4_REGFCFI_MRQ_RQ_SEL_POLICY = 0xf000,
+};
+
+struct sli4_cmd_reg_fcfi_mrq {
+ struct sli4_mbox_command_header hdr;
+ __le16 fcf_index;
+ __le16 fcfi;
+ __le16 rqid1;
+ __le16 rqid0;
+ __le16 rqid3;
+ __le16 rqid2;
+ struct sli4_cmd_reg_fcfi_rq_cfg
+ rq_cfg[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
+ __le32 dw8_vlan;
+ __le32 dw9_mrqflags;
+};
+
+struct sli4_cmd_rq_cfg {
+ __le16 rq_id;
+ u8 r_ctl_mask;
+ u8 r_ctl_match;
+ u8 type_mask;
+ u8 type_match;
+};
+
+/* REG_RPI - register a Remote Port Indicator */
+enum sli4_reg_rpi {
+ SLI4_REGRPI_REMOTE_N_PORTID = 0xffffff, /* DW2 */
+ SLI4_REGRPI_UPD = 0x1000000,
+ SLI4_REGRPI_ETOW = 0x8000000,
+ SLI4_REGRPI_TERP = 0x20000000,
+ SLI4_REGRPI_CI = 0x80000000,
+};
+
+struct sli4_cmd_reg_rpi {
+ struct sli4_mbox_command_header hdr;
+ __le16 rpi;
+ __le16 rsvd2;
+ __le32 dw2_rportid_flags;
+ struct sli4_bde bde_64;
+ __le16 vpi;
+ __le16 rsvd26;
+};
+
+#define SLI4_REG_RPI_BUF_LEN 0x70
+
+/* REG_VFI - register a Virtual Fabric Indicator */
+enum sli_reg_vfi {
+ SLI4_REGVFI_VP = 0x1000, /* DW1 */
+ SLI4_REGVFI_UPD = 0x2000,
+
+ SLI4_REGVFI_LOCAL_N_PORTID = 0xffffff, /* DW10 */
+};
+
+struct sli4_cmd_reg_vfi {
+ struct sli4_mbox_command_header hdr;
+ __le16 vfi;
+ __le16 dw0w1_flags;
+ __le16 fcfi;
+ __le16 vpi;
+ u8 wwpn[8];
+ struct sli4_bde sparm;
+ __le32 e_d_tov;
+ __le32 r_a_tov;
+ __le32 dw10_lportid_flags;
+};
+
+/* REG_VPI - register a Virtual Port Indicator */
+enum sli4_reg_vpi {
+ SLI4_REGVPI_LOCAL_N_PORTID = 0xffffff,
+ SLI4_REGVPI_UPD = 0x1000000,
+};
+
+struct sli4_cmd_reg_vpi {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le32 dw2_lportid_flags;
+ u8 wwpn[8];
+ __le32 rsvd12;
+ __le16 vpi;
+ __le16 vfi;
+};
+
+/* REQUEST_FEATURES - request / query SLI features */
+enum sli4_req_features_flags {
+ SLI4_REQFEAT_QRY = 0x1, /* Dw1 */
+
+ SLI4_REQFEAT_IAAB = 1 << 0, /* DW2 & DW3 */
+ SLI4_REQFEAT_NPIV = 1 << 1,
+ SLI4_REQFEAT_DIF = 1 << 2,
+ SLI4_REQFEAT_VF = 1 << 3,
+ SLI4_REQFEAT_FCPI = 1 << 4,
+ SLI4_REQFEAT_FCPT = 1 << 5,
+ SLI4_REQFEAT_FCPC = 1 << 6,
+ SLI4_REQFEAT_RSVD = 1 << 7,
+ SLI4_REQFEAT_RQD = 1 << 8,
+ SLI4_REQFEAT_IAAR = 1 << 9,
+ SLI4_REQFEAT_HLM = 1 << 10,
+ SLI4_REQFEAT_PERFH = 1 << 11,
+ SLI4_REQFEAT_RXSEQ = 1 << 12,
+ SLI4_REQFEAT_RXRI = 1 << 13,
+ SLI4_REQFEAT_DCL2 = 1 << 14,
+ SLI4_REQFEAT_RSCO = 1 << 15,
+ SLI4_REQFEAT_MRQP = 1 << 16,
+};
+
+struct sli4_cmd_request_features {
+ struct sli4_mbox_command_header hdr;
+ __le32 dw1_qry;
+ __le32 cmd;
+ __le32 resp;
+};
+
+/*
+ * SLI_CONFIG - submit a configuration command to Port
+ *
+ * Command is either embedded as part of the payload (embed) or located
+ * in a separate memory buffer (mem)
+ */
+enum sli4_sli_config {
+ SLI4_SLICONF_EMB = 0x1, /* DW1 */
+ SLI4_SLICONF_PMDCMD_SHIFT = 3,
+ SLI4_SLICONF_PMDCMD_MASK = 0xf8,
+ SLI4_SLICONF_PMDCMD_VAL_1 = 8,
+ SLI4_SLICONF_PMDCNT = 0xf8,
+
+ SLI4_SLICONF_PMD_LEN = 0x00ffffff,
+};
+
+struct sli4_cmd_sli_config {
+ struct sli4_mbox_command_header hdr;
+ __le32 dw1_flags;
+ __le32 payload_len;
+ __le32 rsvd12[3];
+ union {
+ u8 embed[58 * sizeof(u32)];
+ struct sli4_bufptr mem;
+ } payload;
+};
+
+/* READ_STATUS - read tx/rx status of a particular port */
+#define SLI4_READSTATUS_CLEAR_COUNTERS 0x1
+
+struct sli4_cmd_read_status {
+ struct sli4_mbox_command_header hdr;
+ __le32 dw1_flags;
+ __le32 rsvd4;
+ __le32 trans_kbyte_cnt;
+ __le32 recv_kbyte_cnt;
+ __le32 trans_frame_cnt;
+ __le32 recv_frame_cnt;
+ __le32 trans_seq_cnt;
+ __le32 recv_seq_cnt;
+ __le32 tot_exchanges_orig;
+ __le32 tot_exchanges_resp;
+ __le32 recv_p_bsy_cnt;
+ __le32 recv_f_bsy_cnt;
+ __le32 no_rq_buf_dropped_frames_cnt;
+ __le32 empty_rq_timeout_cnt;
+ __le32 no_xri_dropped_frames_cnt;
+ __le32 empty_xri_pool_cnt;
+};
+
+/* READ_LNK_STAT - read link status of a particular port */
+enum sli4_read_link_stats_flags {
+ SLI4_READ_LNKSTAT_REC = 1u << 0,
+ SLI4_READ_LNKSTAT_GEC = 1u << 1,
+ SLI4_READ_LNKSTAT_W02OF = 1u << 2,
+ SLI4_READ_LNKSTAT_W03OF = 1u << 3,
+ SLI4_READ_LNKSTAT_W04OF = 1u << 4,
+ SLI4_READ_LNKSTAT_W05OF = 1u << 5,
+ SLI4_READ_LNKSTAT_W06OF = 1u << 6,
+ SLI4_READ_LNKSTAT_W07OF = 1u << 7,
+ SLI4_READ_LNKSTAT_W08OF = 1u << 8,
+ SLI4_READ_LNKSTAT_W09OF = 1u << 9,
+ SLI4_READ_LNKSTAT_W10OF = 1u << 10,
+ SLI4_READ_LNKSTAT_W11OF = 1u << 11,
+ SLI4_READ_LNKSTAT_W12OF = 1u << 12,
+ SLI4_READ_LNKSTAT_W13OF = 1u << 13,
+ SLI4_READ_LNKSTAT_W14OF = 1u << 14,
+ SLI4_READ_LNKSTAT_W15OF = 1u << 15,
+ SLI4_READ_LNKSTAT_W16OF = 1u << 16,
+ SLI4_READ_LNKSTAT_W17OF = 1u << 17,
+ SLI4_READ_LNKSTAT_W18OF = 1u << 18,
+ SLI4_READ_LNKSTAT_W19OF = 1u << 19,
+ SLI4_READ_LNKSTAT_W20OF = 1u << 20,
+ SLI4_READ_LNKSTAT_W21OF = 1u << 21,
+ SLI4_READ_LNKSTAT_CLRC = 1u << 30,
+ SLI4_READ_LNKSTAT_CLOF = 1u << 31,
+};
+
+struct sli4_cmd_read_link_stats {
+ struct sli4_mbox_command_header hdr;
+ __le32 dw1_flags;
+ __le32 linkfail_errcnt;
+ __le32 losssync_errcnt;
+ __le32 losssignal_errcnt;
+ __le32 primseq_errcnt;
+ __le32 inval_txword_errcnt;
+ __le32 crc_errcnt;
+ __le32 primseq_eventtimeout_cnt;
+ __le32 elastic_bufoverrun_errcnt;
+ __le32 arbit_fc_al_timeout_cnt;
+ __le32 adv_rx_buftor_to_buf_credit;
+ __le32 curr_rx_buf_to_buf_credit;
+ __le32 adv_tx_buf_to_buf_credit;
+ __le32 curr_tx_buf_to_buf_credit;
+ __le32 rx_eofa_cnt;
+ __le32 rx_eofdti_cnt;
+ __le32 rx_eofni_cnt;
+ __le32 rx_soff_cnt;
+ __le32 rx_dropped_no_aer_cnt;
+ __le32 rx_dropped_no_avail_rpi_rescnt;
+ __le32 rx_dropped_no_avail_xri_rescnt;
+};
+
+/* Format a WQE with WQ_ID Association performance hint */
+static inline void
+sli_set_wq_id_association(void *entry, u16 q_id)
+{
+ u32 *wqe = entry;
+
+ /*
+ * Set Word 10, bit 0 to zero
+ * Set Word 10, bits 15:1 to the WQ ID
+ */
+ wqe[10] &= ~0xffff;
+ wqe[10] |= q_id << 1;
+}
+
+/* UNREG_FCFI - unregister a FCFI */
+struct sli4_cmd_unreg_fcfi {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le16 fcfi;
+ __le16 rsvd6;
+};
+
+/* UNREG_RPI - unregister one or more RPI */
+enum sli4_unreg_rpi {
+ SLI4_UNREG_RPI_DP = 0x2000,
+ SLI4_UNREG_RPI_II_SHIFT = 14,
+ SLI4_UNREG_RPI_II_MASK = 0xc000,
+ SLI4_UNREG_RPI_II_RPI = 0x0000,
+ SLI4_UNREG_RPI_II_VPI = 0x4000,
+ SLI4_UNREG_RPI_II_VFI = 0x8000,
+ SLI4_UNREG_RPI_II_FCFI = 0xc000,
+
+ SLI4_UNREG_RPI_DEST_N_PORTID_MASK = 0x00ffffff,
+};
+
+struct sli4_cmd_unreg_rpi {
+ struct sli4_mbox_command_header hdr;
+ __le16 index;
+ __le16 dw1w1_flags;
+ __le32 dw2_dest_n_portid;
+};
+
+/* UNREG_VFI - unregister one or more VFI */
+enum sli4_unreg_vfi {
+ SLI4_UNREG_VFI_II_SHIFT = 14,
+ SLI4_UNREG_VFI_II_MASK = 0xc000,
+ SLI4_UNREG_VFI_II_VFI = 0x0000,
+ SLI4_UNREG_VFI_II_FCFI = 0xc000,
+};
+
+struct sli4_cmd_unreg_vfi {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le16 index;
+ __le16 dw2_flags;
+};
+
+enum sli4_unreg_type {
+ SLI4_UNREG_TYPE_PORT,
+ SLI4_UNREG_TYPE_DOMAIN,
+ SLI4_UNREG_TYPE_FCF,
+ SLI4_UNREG_TYPE_ALL
+};
+
+/* UNREG_VPI - unregister one or more VPI */
+enum sli4_unreg_vpi {
+ SLI4_UNREG_VPI_II_SHIFT = 14,
+ SLI4_UNREG_VPI_II_MASK = 0xc000,
+ SLI4_UNREG_VPI_II_VPI = 0x0000,
+ SLI4_UNREG_VPI_II_VFI = 0x8000,
+ SLI4_UNREG_VPI_II_FCFI = 0xc000,
+};
+
+struct sli4_cmd_unreg_vpi {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le16 index;
+ __le16 dw2w0_flags;
+};
+
+/* AUTO_XFER_RDY - Configure the auto-generate XFER-RDY feature */
+struct sli4_cmd_config_auto_xfer_rdy {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le32 max_burst_len;
+};
+
+#define SLI4_CONFIG_AUTO_XFERRDY_BLKSIZE 0xffff
+
+struct sli4_cmd_config_auto_xfer_rdy_hp {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le32 max_burst_len;
+ __le32 dw3_esoc_flags;
+ __le16 block_size;
+ __le16 rsvd14;
+};
+
+/*************************************************************************
+ * SLI-4 common configuration command formats and definitions
+ */
+
+/*
+ * Subsystem values.
+ */
+enum sli4_subsystem {
+ SLI4_SUBSYSTEM_COMMON = 0x01,
+ SLI4_SUBSYSTEM_LOWLEVEL = 0x0b,
+ SLI4_SUBSYSTEM_FC = 0x0c,
+ SLI4_SUBSYSTEM_DMTF = 0x11,
+};
+
+#define SLI4_OPC_LOWLEVEL_SET_WATCHDOG 0X36
+
+/*
+ * Common opcode (OPC) values.
+ */
+enum sli4_cmn_opcode {
+ SLI4_CMN_FUNCTION_RESET = 0x3d,
+ SLI4_CMN_CREATE_CQ = 0x0c,
+ SLI4_CMN_CREATE_CQ_SET = 0x1d,
+ SLI4_CMN_DESTROY_CQ = 0x36,
+ SLI4_CMN_MODIFY_EQ_DELAY = 0x29,
+ SLI4_CMN_CREATE_EQ = 0x0d,
+ SLI4_CMN_DESTROY_EQ = 0x37,
+ SLI4_CMN_CREATE_MQ_EXT = 0x5a,
+ SLI4_CMN_DESTROY_MQ = 0x35,
+ SLI4_CMN_GET_CNTL_ATTRIBUTES = 0x20,
+ SLI4_CMN_NOP = 0x21,
+ SLI4_CMN_GET_RSC_EXTENT_INFO = 0x9a,
+ SLI4_CMN_GET_SLI4_PARAMS = 0xb5,
+ SLI4_CMN_QUERY_FW_CONFIG = 0x3a,
+ SLI4_CMN_GET_PORT_NAME = 0x4d,
+
+ SLI4_CMN_WRITE_FLASHROM = 0x07,
+ /* TRANSCEIVER Data */
+ SLI4_CMN_READ_TRANS_DATA = 0x49,
+ SLI4_CMN_GET_CNTL_ADDL_ATTRS = 0x79,
+ SLI4_CMN_GET_FUNCTION_CFG = 0xa0,
+ SLI4_CMN_GET_PROFILE_CFG = 0xa4,
+ SLI4_CMN_SET_PROFILE_CFG = 0xa5,
+ SLI4_CMN_GET_PROFILE_LIST = 0xa6,
+ SLI4_CMN_GET_ACTIVE_PROFILE = 0xa7,
+ SLI4_CMN_SET_ACTIVE_PROFILE = 0xa8,
+ SLI4_CMN_READ_OBJECT = 0xab,
+ SLI4_CMN_WRITE_OBJECT = 0xac,
+ SLI4_CMN_DELETE_OBJECT = 0xae,
+ SLI4_CMN_READ_OBJECT_LIST = 0xad,
+ SLI4_CMN_SET_DUMP_LOCATION = 0xb8,
+ SLI4_CMN_SET_FEATURES = 0xbf,
+ SLI4_CMN_GET_RECFG_LINK_INFO = 0xc9,
+ SLI4_CMN_SET_RECNG_LINK_ID = 0xca,
+};
+
+/* DMTF opcode (OPC) values */
+#define DMTF_EXEC_CLP_CMD 0x01
+
+/*
+ * COMMON_FUNCTION_RESET
+ *
+ * Resets the Port, returning it to a power-on state. This configuration
+ * command does not have a payload and should set/expect the lengths to
+ * be zero.
+ */
+struct sli4_rqst_cmn_function_reset {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_function_reset {
+ struct sli4_rsp_hdr hdr;
+};
+
+/*
+ * COMMON_GET_CNTL_ATTRIBUTES
+ *
+ * Query for information about the SLI Port
+ */
+enum sli4_cntrl_attr_flags {
+ SLI4_CNTL_ATTR_PORTNUM = 0x3f,
+ SLI4_CNTL_ATTR_PORTTYPE = 0xc0,
+};
+
+struct sli4_rsp_cmn_get_cntl_attributes {
+ struct sli4_rsp_hdr hdr;
+ u8 version_str[32];
+ u8 manufacturer_name[32];
+ __le32 supported_modes;
+ u8 eprom_version_lo;
+ u8 eprom_version_hi;
+ __le16 rsvd17;
+ __le32 mbx_ds_version;
+ __le32 ep_fw_ds_version;
+ u8 ncsi_version_str[12];
+ __le32 def_extended_timeout;
+ u8 model_number[32];
+ u8 description[64];
+ u8 serial_number[32];
+ u8 ip_version_str[32];
+ u8 fw_version_str[32];
+ u8 bios_version_str[32];
+ u8 redboot_version_str[32];
+ u8 driver_version_str[32];
+ u8 fw_on_flash_version_str[32];
+ __le32 functionalities_supported;
+ __le16 max_cdb_length;
+ u8 asic_revision;
+ u8 generational_guid0;
+ __le32 generational_guid1_12[3];
+ __le16 generational_guid13_14;
+ u8 generational_guid15;
+ u8 hba_port_count;
+ __le16 default_link_down_timeout;
+ u8 iscsi_version_min_max;
+ u8 multifunctional_device;
+ u8 cache_valid;
+ u8 hba_status;
+ u8 max_domains_supported;
+ u8 port_num_type_flags;
+ __le32 firmware_post_status;
+ __le32 hba_mtu;
+ u8 iscsi_features;
+ u8 rsvd121[3];
+ __le16 pci_vendor_id;
+ __le16 pci_device_id;
+ __le16 pci_sub_vendor_id;
+ __le16 pci_sub_system_id;
+ u8 pci_bus_number;
+ u8 pci_device_number;
+ u8 pci_function_number;
+ u8 interface_type;
+ __le64 unique_identifier;
+ u8 number_of_netfilters;
+ u8 rsvd122[3];
+};
+
+/*
+ * COMMON_GET_CNTL_ATTRIBUTES
+ *
+ * This command queries the controller information from the Flash ROM.
+ */
+struct sli4_rqst_cmn_get_cntl_addl_attributes {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_get_cntl_addl_attributes {
+ struct sli4_rsp_hdr hdr;
+ __le16 ipl_file_number;
+ u8 ipl_file_version;
+ u8 rsvd4;
+ u8 on_die_temperature;
+ u8 rsvd5[3];
+ __le32 driver_advanced_features_supported;
+ __le32 rsvd7[4];
+ char universal_bios_version[32];
+ char x86_bios_version[32];
+ char efi_bios_version[32];
+ char fcode_version[32];
+ char uefi_bios_version[32];
+ char uefi_nic_version[32];
+ char uefi_fcode_version[32];
+ char uefi_iscsi_version[32];
+ char iscsi_x86_bios_version[32];
+ char pxe_x86_bios_version[32];
+ u8 default_wwpn[8];
+ u8 ext_phy_version[32];
+ u8 fc_universal_bios_version[32];
+ u8 fc_x86_bios_version[32];
+ u8 fc_efi_bios_version[32];
+ u8 fc_fcode_version[32];
+ u8 ext_phy_crc_label[8];
+ u8 ipl_file_name[16];
+ u8 rsvd139[72];
+};
+
+/*
+ * COMMON_NOP
+ *
+ * This command does not do anything; it only returns
+ * the payload in the completion.
+ */
+struct sli4_rqst_cmn_nop {
+ struct sli4_rqst_hdr hdr;
+ __le32 context[2];
+};
+
+struct sli4_rsp_cmn_nop {
+ struct sli4_rsp_hdr hdr;
+ __le32 context[2];
+};
+
+struct sli4_rqst_cmn_get_resource_extent_info {
+ struct sli4_rqst_hdr hdr;
+ __le16 resource_type;
+ __le16 rsvd16;
+};
+
+enum sli4_rsc_type {
+ SLI4_RSC_TYPE_VFI = 0x20,
+ SLI4_RSC_TYPE_VPI = 0x21,
+ SLI4_RSC_TYPE_RPI = 0x22,
+ SLI4_RSC_TYPE_XRI = 0x23,
+};
+
+struct sli4_rsp_cmn_get_resource_extent_info {
+ struct sli4_rsp_hdr hdr;
+ __le16 resource_extent_count;
+ __le16 resource_extent_size;
+};
+
+#define SLI4_128BYTE_WQE_SUPPORT 0x02
+
+#define GET_Q_CNT_METHOD(m) \
+ (((m) & SLI4_PARAM_Q_CNT_MTHD_MASK) >> SLI4_PARAM_Q_CNT_MTHD_SHFT)
+#define GET_Q_CREATE_VERSION(v) \
+ (((v) & SLI4_PARAM_QV_MASK) >> SLI4_PARAM_QV_SHIFT)
+
+enum sli4_rsp_get_params_e {
+ /*GENERIC*/
+ SLI4_PARAM_Q_CNT_MTHD_SHFT = 24,
+ SLI4_PARAM_Q_CNT_MTHD_MASK = 0xf << 24,
+ SLI4_PARAM_QV_SHIFT = 14,
+ SLI4_PARAM_QV_MASK = 3 << 14,
+
+ /* DW4 */
+ SLI4_PARAM_PROTO_TYPE_MASK = 0xff,
+ /* DW5 */
+ SLI4_PARAM_FT = 1 << 0,
+ SLI4_PARAM_SLI_REV_MASK = 0xf << 4,
+ SLI4_PARAM_SLI_FAM_MASK = 0xf << 8,
+ SLI4_PARAM_IF_TYPE_MASK = 0xf << 12,
+ SLI4_PARAM_SLI_HINT1_MASK = 0xff << 16,
+ SLI4_PARAM_SLI_HINT2_MASK = 0x1f << 24,
+ /* DW6 */
+ SLI4_PARAM_EQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_EQE_SZS_MASK = 0xf << 8,
+ SLI4_PARAM_EQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW8 */
+ SLI4_PARAM_CQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_CQE_SZS_MASK = 0xf << 8,
+ SLI4_PARAM_CQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW10 */
+ SLI4_PARAM_MQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_MQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW12 */
+ SLI4_PARAM_WQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_WQE_SZS_MASK = 0xf << 8,
+ SLI4_PARAM_WQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW14 */
+ SLI4_PARAM_RQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_RQE_SZS_MASK = 0xf << 8,
+ SLI4_PARAM_RQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW15W1*/
+ SLI4_PARAM_RQ_DB_WINDOW_MASK = 0xf000,
+ /* DW16 */
+ SLI4_PARAM_FC = 1 << 0,
+ SLI4_PARAM_EXT = 1 << 1,
+ SLI4_PARAM_HDRR = 1 << 2,
+ SLI4_PARAM_SGLR = 1 << 3,
+ SLI4_PARAM_FBRR = 1 << 4,
+ SLI4_PARAM_AREG = 1 << 5,
+ SLI4_PARAM_TGT = 1 << 6,
+ SLI4_PARAM_TERP = 1 << 7,
+ SLI4_PARAM_ASSI = 1 << 8,
+ SLI4_PARAM_WCHN = 1 << 9,
+ SLI4_PARAM_TCCA = 1 << 10,
+ SLI4_PARAM_TRTY = 1 << 11,
+ SLI4_PARAM_TRIR = 1 << 12,
+ SLI4_PARAM_PHOFF = 1 << 13,
+ SLI4_PARAM_PHON = 1 << 14,
+ SLI4_PARAM_PHWQ = 1 << 15,
+ SLI4_PARAM_BOUND_4GA = 1 << 16,
+ SLI4_PARAM_RXC = 1 << 17,
+ SLI4_PARAM_HLM = 1 << 18,
+ SLI4_PARAM_IPR = 1 << 19,
+ SLI4_PARAM_RXRI = 1 << 20,
+ SLI4_PARAM_SGLC = 1 << 21,
+ SLI4_PARAM_TIMM = 1 << 22,
+ SLI4_PARAM_TSMM = 1 << 23,
+ SLI4_PARAM_OAS = 1 << 25,
+ SLI4_PARAM_LC = 1 << 26,
+ SLI4_PARAM_AGXF = 1 << 27,
+ SLI4_PARAM_LOOPBACK_MASK = 0xf << 28,
+ /* DW18 */
+ SLI4_PARAM_SGL_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_SGL_PAGE_SZS_MASK = 0xff << 8,
+ SLI4_PARAM_SGL_PP_ALIGN_MASK = 0xff << 16,
+};
+
+struct sli4_rqst_cmn_get_sli4_params {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_get_sli4_params {
+ struct sli4_rsp_hdr hdr;
+ __le32 dw4_protocol_type;
+ __le32 dw5_sli;
+ __le32 dw6_eq_page_cnt;
+ __le16 eqe_count_mask;
+ __le16 rsvd26;
+ __le32 dw8_cq_page_cnt;
+ __le16 cqe_count_mask;
+ __le16 rsvd34;
+ __le32 dw10_mq_page_cnt;
+ __le16 mqe_count_mask;
+ __le16 rsvd42;
+ __le32 dw12_wq_page_cnt;
+ __le16 wqe_count_mask;
+ __le16 rsvd50;
+ __le32 dw14_rq_page_cnt;
+ __le16 rqe_count_mask;
+ __le16 dw15w1_rq_db_window;
+ __le32 dw16_loopback_scope;
+ __le32 sge_supported_length;
+ __le32 dw18_sgl_page_cnt;
+ __le16 min_rq_buffer_size;
+ __le16 rsvd75;
+ __le32 max_rq_buffer_size;
+ __le16 physical_xri_max;
+ __le16 physical_rpi_max;
+ __le16 physical_vpi_max;
+ __le16 physical_vfi_max;
+ __le32 rsvd88;
+ __le16 frag_num_field_offset;
+ __le16 frag_num_field_size;
+ __le16 sgl_index_field_offset;
+ __le16 sgl_index_field_size;
+ __le32 chain_sge_initial_value_lo;
+ __le32 chain_sge_initial_value_hi;
+};
+
+/*Port Types*/
+enum sli4_port_types {
+ SLI4_PORT_TYPE_ETH = 0,
+ SLI4_PORT_TYPE_FC = 1,
+};
+
+struct sli4_rqst_cmn_get_port_name {
+ struct sli4_rqst_hdr hdr;
+ u8 port_type;
+ u8 rsvd4[3];
+};
+
+struct sli4_rsp_cmn_get_port_name {
+ struct sli4_rsp_hdr hdr;
+ char port_name[4];
+};
+
+struct sli4_rqst_cmn_write_flashrom {
+ struct sli4_rqst_hdr hdr;
+ __le32 flash_rom_access_opcode;
+ __le32 flash_rom_access_operation_type;
+ __le32 data_buffer_size;
+ __le32 offset;
+ u8 data_buffer[4];
+};
+
+/*
+ * COMMON_READ_TRANSCEIVER_DATA
+ *
+ * This command reads SFF transceiver data(Format is defined
+ * by the SFF-8472 specification).
+ */
+struct sli4_rqst_cmn_read_transceiver_data {
+ struct sli4_rqst_hdr hdr;
+ __le32 page_number;
+ __le32 port;
+};
+
+struct sli4_rsp_cmn_read_transceiver_data {
+ struct sli4_rsp_hdr hdr;
+ __le32 page_number;
+ __le32 port;
+ u8 page_data[128];
+ u8 page_data_2[128];
+};
+
+#define SLI4_REQ_DESIRE_READLEN 0xffffff
+
+struct sli4_rqst_cmn_read_object {
+ struct sli4_rqst_hdr hdr;
+ __le32 desired_read_length_dword;
+ __le32 read_offset;
+ u8 object_name[104];
+ __le32 host_buffer_descriptor_count;
+ struct sli4_bde host_buffer_descriptor[0];
+};
+
+#define RSP_COM_READ_OBJ_EOF 0x80000000
+
+struct sli4_rsp_cmn_read_object {
+ struct sli4_rsp_hdr hdr;
+ __le32 actual_read_length;
+ __le32 eof_dword;
+};
+
+enum sli4_rqst_write_object_flags {
+ SLI4_RQ_DES_WRITE_LEN = 0xffffff,
+ SLI4_RQ_DES_WRITE_LEN_NOC = 0x40000000,
+ SLI4_RQ_DES_WRITE_LEN_EOF = 0x80000000,
+};
+
+struct sli4_rqst_cmn_write_object {
+ struct sli4_rqst_hdr hdr;
+ __le32 desired_write_len_dword;
+ __le32 write_offset;
+ u8 object_name[104];
+ __le32 host_buffer_descriptor_count;
+ struct sli4_bde host_buffer_descriptor[0];
+};
+
+#define RSP_CHANGE_STATUS 0xff
+
+struct sli4_rsp_cmn_write_object {
+ struct sli4_rsp_hdr hdr;
+ __le32 actual_write_length;
+ __le32 change_status_dword;
+};
+
+struct sli4_rqst_cmn_delete_object {
+ struct sli4_rqst_hdr hdr;
+ __le32 rsvd4;
+ __le32 rsvd5;
+ u8 object_name[104];
+};
+
+#define SLI4_RQ_OBJ_LIST_READ_LEN 0xffffff
+
+struct sli4_rqst_cmn_read_object_list {
+ struct sli4_rqst_hdr hdr;
+ __le32 desired_read_length_dword;
+ __le32 read_offset;
+ u8 object_name[104];
+ __le32 host_buffer_descriptor_count;
+ struct sli4_bde host_buffer_descriptor[0];
+};
+
+enum sli4_rqst_set_dump_flags {
+ SLI4_CMN_SET_DUMP_BUFFER_LEN = 0xffffff,
+ SLI4_CMN_SET_DUMP_FDB = 0x20000000,
+ SLI4_CMN_SET_DUMP_BLP = 0x40000000,
+ SLI4_CMN_SET_DUMP_QRY = 0x80000000,
+};
+
+struct sli4_rqst_cmn_set_dump_location {
+ struct sli4_rqst_hdr hdr;
+ __le32 buffer_length_dword;
+ __le32 buf_addr_low;
+ __le32 buf_addr_high;
+};
+
+struct sli4_rsp_cmn_set_dump_location {
+ struct sli4_rsp_hdr hdr;
+ __le32 buffer_length_dword;
+};
+
+enum sli4_dump_level {
+ SLI4_DUMP_LEVEL_NONE,
+ SLI4_CHIP_LEVEL_DUMP,
+ SLI4_FUNC_DESC_DUMP,
+};
+
+enum sli4_dump_state {
+ SLI4_DUMP_STATE_NONE,
+ SLI4_CHIP_DUMP_STATE_VALID,
+ SLI4_FUNC_DUMP_STATE_VALID,
+};
+
+enum sli4_dump_status {
+ SLI4_DUMP_READY_STATUS_NOT_READY,
+ SLI4_DUMP_READY_STATUS_DD_PRESENT,
+ SLI4_DUMP_READY_STATUS_FDB_PRESENT,
+ SLI4_DUMP_READY_STATUS_SKIP_DUMP,
+ SLI4_DUMP_READY_STATUS_FAILED = -1,
+};
+
+enum sli4_set_features {
+ SLI4_SET_FEATURES_DIF_SEED = 0x01,
+ SLI4_SET_FEATURES_XRI_TIMER = 0x03,
+ SLI4_SET_FEATURES_MAX_PCIE_SPEED = 0x04,
+ SLI4_SET_FEATURES_FCTL_CHECK = 0x05,
+ SLI4_SET_FEATURES_FEC = 0x06,
+ SLI4_SET_FEATURES_PCIE_RECV_DETECT = 0x07,
+ SLI4_SET_FEATURES_DIF_MEMORY_MODE = 0x08,
+ SLI4_SET_FEATURES_DISABLE_SLI_PORT_PAUSE_STATE = 0x09,
+ SLI4_SET_FEATURES_ENABLE_PCIE_OPTIONS = 0x0a,
+ SLI4_SET_FEAT_CFG_AUTO_XFER_RDY_T10PI = 0x0c,
+ SLI4_SET_FEATURES_ENABLE_MULTI_RECEIVE_QUEUE = 0x0d,
+ SLI4_SET_FEATURES_SET_FTD_XFER_HINT = 0x0f,
+ SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK = 0x11,
+};
+
+struct sli4_rqst_cmn_set_features {
+ struct sli4_rqst_hdr hdr;
+ __le32 feature;
+ __le32 param_len;
+ __le32 params[8];
+};
+
+struct sli4_rqst_cmn_set_features_dif_seed {
+ __le16 seed;
+ __le16 rsvd16;
+};
+
+enum sli4_rqst_set_mrq_features {
+ SLI4_RQ_MULTIRQ_ISR = 0x1,
+ SLI4_RQ_MULTIRQ_AUTOGEN_XFER_RDY = 0x2,
+
+ SLI4_RQ_MULTIRQ_NUM_RQS = 0xff,
+ SLI4_RQ_MULTIRQ_RQ_SELECT = 0xf00,
+};
+
+struct sli4_rqst_cmn_set_features_multirq {
+ __le32 auto_gen_xfer_dword;
+ __le32 num_rqs_dword;
+};
+
+enum sli4_rqst_health_check_flags {
+ SLI4_RQ_HEALTH_CHECK_ENABLE = 0x1,
+ SLI4_RQ_HEALTH_CHECK_QUERY = 0x2,
+};
+
+struct sli4_rqst_cmn_set_features_health_check {
+ __le32 health_check_dword;
+};
+
+struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint {
+ __le32 fdt_xfer_hint;
+};
+
+struct sli4_rqst_dmtf_exec_clp_cmd {
+ struct sli4_rqst_hdr hdr;
+ __le32 cmd_buf_length;
+ __le32 resp_buf_length;
+ __le32 cmd_buf_addr_low;
+ __le32 cmd_buf_addr_high;
+ __le32 resp_buf_addr_low;
+ __le32 resp_buf_addr_high;
+};
+
+struct sli4_rsp_dmtf_exec_clp_cmd {
+ struct sli4_rsp_hdr hdr;
+ __le32 rsvd4;
+ __le32 resp_length;
+ __le32 rsvd6;
+ __le32 rsvd7;
+ __le32 rsvd8;
+ __le32 rsvd9;
+ __le32 clp_status;
+ __le32 clp_detailed_status;
+};
+
+#define SLI4_PROTOCOL_FC 0x10
+#define SLI4_PROTOCOL_DEFAULT 0xff
+
+struct sli4_rspource_descriptor_v1 {
+ u8 descriptor_type;
+ u8 descriptor_length;
+ __le16 rsvd16;
+ __le32 type_specific[0];
+};
+
+enum sli4_pcie_desc_flags {
+ SLI4_PCIE_DESC_IMM = 0x4000,
+ SLI4_PCIE_DESC_NOSV = 0x8000,
+
+ SLI4_PCIE_DESC_PF_NO = 0x3ff0000,
+
+ SLI4_PCIE_DESC_MISSN_ROLE = 0xff,
+ SLI4_PCIE_DESC_PCHG = 0x8000000,
+ SLI4_PCIE_DESC_SCHG = 0x10000000,
+ SLI4_PCIE_DESC_XCHG = 0x20000000,
+ SLI4_PCIE_DESC_XROM = 0xc0000000
+};
+
+struct sli4_pcie_resource_descriptor_v1 {
+ u8 descriptor_type;
+ u8 descriptor_length;
+ __le16 imm_nosv_dword;
+ __le32 pf_number_dword;
+ __le32 rsvd3;
+ u8 sriov_state;
+ u8 pf_state;
+ u8 pf_type;
+ u8 rsvd4;
+ __le16 number_of_vfs;
+ __le16 rsvd5;
+ __le32 mission_roles_dword;
+ __le32 rsvd7[16];
+};
+
+struct sli4_rqst_cmn_get_function_config {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_get_function_config {
+ struct sli4_rsp_hdr hdr;
+ __le32 desc_count;
+ __le32 desc[54];
+};
+
+/* Link Config Descriptor for link config functions */
+struct sli4_link_config_descriptor {
+ u8 link_config_id;
+ u8 rsvd1[3];
+ __le32 config_description[8];
+};
+
+#define MAX_LINK_DES 10
+
+struct sli4_rqst_cmn_get_reconfig_link_info {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_get_reconfig_link_info {
+ struct sli4_rsp_hdr hdr;
+ u8 active_link_config_id;
+ u8 rsvd17;
+ u8 next_link_config_id;
+ u8 rsvd19;
+ __le32 link_configuration_descriptor_count;
+ struct sli4_link_config_descriptor
+ desc[MAX_LINK_DES];
+};
+
+enum sli4_set_reconfig_link_flags {
+ SLI4_SET_RECONFIG_LINKID_NEXT = 0xff,
+ SLI4_SET_RECONFIG_LINKID_FD = 1u << 31,
+};
+
+struct sli4_rqst_cmn_set_reconfig_link_id {
+ struct sli4_rqst_hdr hdr;
+ __le32 dw4_flags;
+};
+
+struct sli4_rsp_cmn_set_reconfig_link_id {
+ struct sli4_rsp_hdr hdr;
+};
+
+struct sli4_rqst_lowlevel_set_watchdog {
+ struct sli4_rqst_hdr hdr;
+ __le16 watchdog_timeout;
+ __le16 rsvd18;
+};
+
+struct sli4_rsp_lowlevel_set_watchdog {
+ struct sli4_rsp_hdr hdr;
+ __le32 rsvd;
+};
+
+/* FC opcode (OPC) values */
+enum sli4_fc_opcodes {
+ SLI4_OPC_WQ_CREATE = 0x1,
+ SLI4_OPC_WQ_DESTROY = 0x2,
+ SLI4_OPC_POST_SGL_PAGES = 0x3,
+ SLI4_OPC_RQ_CREATE = 0x5,
+ SLI4_OPC_RQ_DESTROY = 0x6,
+ SLI4_OPC_READ_FCF_TABLE = 0x8,
+ SLI4_OPC_POST_HDR_TEMPLATES = 0xb,
+ SLI4_OPC_REDISCOVER_FCF = 0x10,
+};
+
+/* Use the default CQ associated with the WQ */
+#define SLI4_CQ_DEFAULT 0xffff
+
+/*
+ * POST_SGL_PAGES
+ *
+ * Register the scatter gather list (SGL) memory and
+ * associate it with an XRI.
+ */
+struct sli4_rqst_post_sgl_pages {
+ struct sli4_rqst_hdr hdr;
+ __le16 xri_start;
+ __le16 xri_count;
+ struct {
+ __le32 page0_low;
+ __le32 page0_high;
+ __le32 page1_low;
+ __le32 page1_high;
+ } page_set[10];
+};
+
+struct sli4_rsp_post_sgl_pages {
+ struct sli4_rsp_hdr hdr;
+};
+
+struct sli4_rqst_post_hdr_templates {
+ struct sli4_rqst_hdr hdr;
+ __le16 rpi_offset;
+ __le16 page_count;
+ struct sli4_dmaaddr page_descriptor[0];
+};
+
+#define SLI4_HDR_TEMPLATE_SIZE 64
+
+enum sli4_io_flags {
+/* The XRI associated with this IO is already active */
+ SLI4_IO_CONTINUATION = 1 << 0,
+/* Automatically generate a good RSP frame */
+ SLI4_IO_AUTO_GOOD_RESPONSE = 1 << 1,
+ SLI4_IO_NO_ABORT = 1 << 2,
+/* Set the DNRX bit because no auto xref rdy buffer is posted */
+ SLI4_IO_DNRX = 1 << 3,
+};
+
+enum sli4_callback {
+ SLI4_CB_LINK,
+ SLI4_CB_MAX,
+};
+
+enum sli4_link_status {
+ SLI4_LINK_STATUS_UP,
+ SLI4_LINK_STATUS_DOWN,
+ SLI4_LINK_STATUS_NO_ALPA,
+ SLI4_LINK_STATUS_MAX,
+};
+
+enum sli4_link_topology {
+ SLI4_LINK_TOPO_NON_FC_AL = 1,
+ SLI4_LINK_TOPO_FC_AL,
+ SLI4_LINK_TOPO_LOOPBACK_INTERNAL,
+ SLI4_LINK_TOPO_LOOPBACK_EXTERNAL,
+ SLI4_LINK_TOPO_NONE,
+ SLI4_LINK_TOPO_MAX,
+};
+
+enum sli4_link_medium {
+ SLI4_LINK_MEDIUM_ETHERNET,
+ SLI4_LINK_MEDIUM_FC,
+ SLI4_LINK_MEDIUM_MAX,
+};
+/******Driver specific structures******/
+
+struct sli4_queue {
+ /* Common to all queue types */
+ struct efc_dma dma;
+ spinlock_t lock; /* Lock to protect the doorbell register
+ * writes and queue reads
+ */
+ u32 index; /* current host entry index */
+ u16 size; /* entry size */
+ u16 length; /* number of entries */
+ u16 n_posted; /* number entries posted for CQ, EQ */
+ u16 id; /* Port assigned xQ_ID */
+ u8 type; /* queue type ie EQ, CQ, ... */
+ void __iomem *db_regaddr; /* register address for the doorbell */
+ u16 phase; /* For if_type = 6, this value toggle
+ * for each iteration of the queue,
+ * a queue entry is valid when a cqe
+ * valid bit matches this value
+ */
+ u32 proc_limit; /* limit CQE processed per iteration */
+ u32 posted_limit; /* CQE/EQE process before ring db */
+ u32 max_num_processed;
+ u64 max_process_time;
+ union {
+ u32 r_idx; /* "read" index (MQ only) */
+ u32 flag;
+ } u;
+};
+
+/* Parameters used to populate WQE*/
+struct sli_bls_params {
+ u32 s_id;
+ u32 d_id;
+ u16 ox_id;
+ u16 rx_id;
+ u32 rpi;
+ u32 vpi;
+ bool rpi_registered;
+ u8 payload[12];
+ u16 xri;
+ u16 tag;
+};
+
+struct sli_els_params {
+ u32 s_id;
+ u32 d_id;
+ u16 ox_id;
+ u32 rpi;
+ u32 vpi;
+ bool rpi_registered;
+ u32 xmit_len;
+ u32 rsp_len;
+ u8 timeout;
+ u8 cmd;
+ u16 xri;
+ u16 tag;
+};
+
+struct sli_ct_params {
+ u8 r_ctl;
+ u8 type;
+ u8 df_ctl;
+ u8 timeout;
+ u16 ox_id;
+ u32 d_id;
+ u32 rpi;
+ u32 vpi;
+ bool rpi_registered;
+ u32 xmit_len;
+ u32 rsp_len;
+ u16 xri;
+ u16 tag;
+};
+
+struct sli_fcp_tgt_params {
+ u32 s_id;
+ u32 d_id;
+ u32 rpi;
+ u32 vpi;
+ u32 offset;
+ u16 ox_id;
+ u16 flags;
+ u8 cs_ctl;
+ u8 timeout;
+ u32 app_id;
+ u32 xmit_len;
+ u16 xri;
+ u16 tag;
+};
+
+struct sli4_link_event {
+ enum sli4_link_status status;
+ enum sli4_link_topology topology;
+ enum sli4_link_medium medium;
+ u32 speed;
+ u8 *loop_map;
+ u32 fc_id;
+};
+
+enum sli4_resource {
+ SLI4_RSRC_VFI,
+ SLI4_RSRC_VPI,
+ SLI4_RSRC_RPI,
+ SLI4_RSRC_XRI,
+ SLI4_RSRC_FCFI,
+ SLI4_RSRC_MAX,
+};
+
+struct sli4_extent {
+ u32 number;
+ u32 size;
+ u32 n_alloc;
+ u32 *base;
+ unsigned long *use_map;
+ u32 map_size;
+};
+
+struct sli4_queue_info {
+ u16 max_qcount[SLI4_QTYPE_MAX];
+ u32 max_qentries[SLI4_QTYPE_MAX];
+ u16 count_mask[SLI4_QTYPE_MAX];
+ u16 count_method[SLI4_QTYPE_MAX];
+ u32 qpage_count[SLI4_QTYPE_MAX];
+};
+
+struct sli4_params {
+ u8 has_extents;
+ u8 auto_reg;
+ u8 auto_xfer_rdy;
+ u8 hdr_template_req;
+ u8 perf_hint;
+ u8 perf_wq_id_association;
+ u8 cq_create_version;
+ u8 mq_create_version;
+ u8 high_login_mode;
+ u8 sgl_pre_registered;
+ u8 sgl_pre_reg_required;
+ u8 t10_dif_inline_capable;
+ u8 t10_dif_separate_capable;
+};
+
+struct sli4 {
+ void *os;
+ struct pci_dev *pci;
+ void __iomem *reg[PCI_STD_NUM_BARS];
+
+ u32 sli_rev;
+ u32 sli_family;
+ u32 if_type;
+
+ u16 asic_type;
+ u16 asic_rev;
+
+ u16 e_d_tov;
+ u16 r_a_tov;
+ struct sli4_queue_info qinfo;
+ u16 link_module_type;
+ u8 rq_batch;
+ u8 port_number;
+ char port_name[2];
+ u16 rq_min_buf_size;
+ u32 rq_max_buf_size;
+ u8 topology;
+ u8 wwpn[8];
+ u8 wwnn[8];
+ u32 fw_rev[2];
+ u8 fw_name[2][16];
+ char ipl_name[16];
+ u32 hw_rev[3];
+ char modeldesc[64];
+ char bios_version_string[32];
+ u32 wqe_size;
+ u32 vpd_length;
+ /*
+ * Tracks the port resources using extents metaphor. For
+ * devices that don't implement extents (i.e.
+ * has_extents == FALSE), the code models each resource as
+ * a single large extent.
+ */
+ struct sli4_extent ext[SLI4_RSRC_MAX];
+ u32 features;
+ struct sli4_params params;
+ u32 sge_supported_length;
+ u32 sgl_page_sizes;
+ u32 max_sgl_pages;
+
+ /*
+ * Callback functions
+ */
+ int (*link)(void *ctx, void *event);
+ void *link_arg;
+
+ struct efc_dma bmbx;
+
+ /* Save pointer to physical memory descriptor for non-embedded
+ * SLI_CONFIG commands for BMBX dumping purposes
+ */
+ struct efc_dma *bmbx_non_emb_pmd;
+
+ struct efc_dma vpd_data;
+};
+
+static inline void
+sli_cmd_fill_hdr(struct sli4_rqst_hdr *hdr, u8 opc, u8 sub, u32 ver, __le32 len)
+{
+ hdr->opcode = opc;
+ hdr->subsystem = sub;
+ hdr->dw3_version = cpu_to_le32(ver);
+ hdr->request_length = len;
+}
+
+/**
+ * Get / set parameter functions
+ */
+
+static inline u32
+sli_get_max_sge(struct sli4 *sli4)
+{
+ return sli4->sge_supported_length;
+}
+
+static inline u32
+sli_get_max_sgl(struct sli4 *sli4)
+{
+ if (sli4->sgl_page_sizes != 1) {
+ efc_log_err(sli4, "unsupported SGL page sizes %#x\n",
+ sli4->sgl_page_sizes);
+ return 0;
+ }
+
+ return (sli4->max_sgl_pages * SLI_PAGE_SIZE) / sizeof(struct sli4_sge);
+}
+
+static inline enum sli4_link_medium
+sli_get_medium(struct sli4 *sli4)
+{
+ switch (sli4->topology) {
+ case SLI4_READ_CFG_TOPO_FC:
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ case SLI4_READ_CFG_TOPO_NON_FC_AL:
+ return SLI4_LINK_MEDIUM_FC;
+ default:
+ return SLI4_LINK_MEDIUM_MAX;
+ }
+}
+
+static inline u32
+sli_get_lmt(struct sli4 *sli4)
+{
+ return sli4->link_module_type;
+}
+
+static inline int
+sli_set_topology(struct sli4 *sli4, u32 value)
+{
+ int rc = 0;
+
+ switch (value) {
+ case SLI4_READ_CFG_TOPO_FC:
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ case SLI4_READ_CFG_TOPO_NON_FC_AL:
+ sli4->topology = value;
+ break;
+ default:
+ efc_log_err(sli4, "unsupported topology %#x\n", value);
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static inline u32
+sli_convert_mask_to_count(u32 method, u32 mask)
+{
+ u32 count = 0;
+
+ if (method) {
+ count = 1 << (31 - __builtin_clz(mask));
+ count *= 16;
+ } else {
+ count = mask;
+ }
+
+ return count;
+}
+
+static inline u32
+sli_reg_read_status(struct sli4 *sli)
+{
+ return readl(sli->reg[0] + SLI4_PORT_STATUS_REGOFF);
+}
+
+static inline int
+sli_fw_error_status(struct sli4 *sli4)
+{
+ return (sli_reg_read_status(sli4) & SLI4_PORT_STATUS_ERR) ? 1 : 0;
+}
+
+static inline u32
+sli_reg_read_err1(struct sli4 *sli)
+{
+ return readl(sli->reg[0] + SLI4_PORT_ERROR1);
+}
+
+static inline u32
+sli_reg_read_err2(struct sli4 *sli)
+{
+ return readl(sli->reg[0] + SLI4_PORT_ERROR2);
+}
+
+static inline int
+sli_fc_rqe_length(struct sli4 *sli4, void *cqe, u32 *len_hdr,
+ u32 *len_data)
+{
+ struct sli4_fc_async_rcqe *rcqe = cqe;
+
+ *len_hdr = *len_data = 0;
+
+ if (rcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
+ *len_hdr = rcqe->hdpl_byte & SLI4_RACQE_HDPL;
+ *len_data = le16_to_cpu(rcqe->data_placement_length);
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+static inline u8
+sli_fc_rqe_fcfi(struct sli4 *sli4, void *cqe)
+{
+ u8 code = ((u8 *)cqe)[SLI4_CQE_CODE_OFFSET];
+ u8 fcfi = U8_MAX;
+
+ switch (code) {
+ case SLI4_CQE_CODE_RQ_ASYNC: {
+ struct sli4_fc_async_rcqe *rcqe = cqe;
+
+ fcfi = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_FCFI;
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_ASYNC_V1: {
+ struct sli4_fc_async_rcqe_v1 *rcqev1 = cqe;
+
+ fcfi = rcqev1->fcfi_byte & SLI4_RACQE_FCFI;
+ break;
+ }
+ case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD: {
+ struct sli4_fc_optimized_write_cmd_cqe *opt_wr = cqe;
+
+ fcfi = opt_wr->flags0 & SLI4_OCQE_FCFI;
+ break;
+ }
+ }
+
+ return fcfi;
+}
+
+/****************************************************************************
+ * Function prototypes
+ */
+int
+sli_cmd_config_link(struct sli4 *sli4, void *buf);
+int
+sli_cmd_down_link(struct sli4 *sli4, void *buf);
+int
+sli_cmd_dump_type4(struct sli4 *sli4, void *buf, u16 wki);
+int
+sli_cmd_common_read_transceiver_data(struct sli4 *sli4, void *buf,
+ u32 page_num, struct efc_dma *dma);
+int
+sli_cmd_read_link_stats(struct sli4 *sli4, void *buf, u8 req_stats,
+ u8 clear_overflow_flags, u8 clear_all_counters);
+int
+sli_cmd_read_status(struct sli4 *sli4, void *buf, u8 clear);
+int
+sli_cmd_init_link(struct sli4 *sli4, void *buf, u32 speed,
+ u8 reset_alpa);
+int
+sli_cmd_init_vfi(struct sli4 *sli4, void *buf, u16 vfi, u16 fcfi,
+ u16 vpi);
+int
+sli_cmd_init_vpi(struct sli4 *sli4, void *buf, u16 vpi, u16 vfi);
+int
+sli_cmd_post_xri(struct sli4 *sli4, void *buf, u16 base, u16 cnt);
+int
+sli_cmd_release_xri(struct sli4 *sli4, void *buf, u8 num_xri);
+int
+sli_cmd_read_sparm64(struct sli4 *sli4, void *buf,
+ struct efc_dma *dma, u16 vpi);
+int
+sli_cmd_read_topology(struct sli4 *sli4, void *buf, struct efc_dma *dma);
+int
+sli_cmd_read_nvparms(struct sli4 *sli4, void *buf);
+int
+sli_cmd_write_nvparms(struct sli4 *sli4, void *buf, u8 *wwpn,
+ u8 *wwnn, u8 hard_alpa, u32 preferred_d_id);
+int
+sli_cmd_reg_fcfi(struct sli4 *sli4, void *buf, u16 index,
+ struct sli4_cmd_rq_cfg *rq_cfg);
+int
+sli_cmd_reg_fcfi_mrq(struct sli4 *sli4, void *buf, u8 mode, u16 index,
+ u8 rq_selection_policy, u8 mrq_bit_mask, u16 num_mrqs,
+ struct sli4_cmd_rq_cfg *rq_cfg);
+int
+sli_cmd_reg_rpi(struct sli4 *sli4, void *buf, u32 rpi, u32 vpi, u32 fc_id,
+ struct efc_dma *dma, u8 update, u8 enable_t10_pi);
+int
+sli_cmd_unreg_fcfi(struct sli4 *sli4, void *buf, u16 indicator);
+int
+sli_cmd_unreg_rpi(struct sli4 *sli4, void *buf, u16 indicator,
+ enum sli4_resource which, u32 fc_id);
+int
+sli_cmd_reg_vpi(struct sli4 *sli4, void *buf, u32 fc_id,
+ __be64 sli_wwpn, u16 vpi, u16 vfi, bool update);
+int
+sli_cmd_reg_vfi(struct sli4 *sli4, void *buf, size_t size,
+ u16 vfi, u16 fcfi, struct efc_dma dma,
+ u16 vpi, __be64 sli_wwpn, u32 fc_id);
+int
+sli_cmd_unreg_vpi(struct sli4 *sli4, void *buf, u16 id, u32 type);
+int
+sli_cmd_unreg_vfi(struct sli4 *sli4, void *buf, u16 idx, u32 type);
+int
+sli_cmd_common_nop(struct sli4 *sli4, void *buf, uint64_t context);
+int
+sli_cmd_common_get_resource_extent_info(struct sli4 *sli4, void *buf,
+ u16 rtype);
+int
+sli_cmd_common_get_sli4_parameters(struct sli4 *sli4, void *buf);
+int
+sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc,
+ u16 eof, u32 len, u32 offset, char *name, struct efc_dma *dma);
+int
+sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *object_name);
+int
+sli_cmd_common_read_object(struct sli4 *sli4, void *buf,
+ u32 length, u32 offset, char *name, struct efc_dma *dma);
+int
+sli_cmd_dmtf_exec_clp_cmd(struct sli4 *sli4, void *buf,
+ struct efc_dma *cmd, struct efc_dma *resp);
+int
+sli_cmd_common_set_dump_location(struct sli4 *sli4, void *buf,
+ bool query, bool is_buffer_list, struct efc_dma *dma, u8 fdb);
+int
+sli_cmd_common_set_features(struct sli4 *sli4, void *buf,
+ u32 feature, u32 param_len, void *parameter);
+
+int sli_cqe_mq(struct sli4 *sli4, void *buf);
+int sli_cqe_async(struct sli4 *sli4, void *buf);
+
+int
+sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev, void __iomem *r[]);
+void sli_calc_max_qentries(struct sli4 *sli4);
+int sli_init(struct sli4 *sli4);
+int sli_reset(struct sli4 *sli4);
+int sli_fw_reset(struct sli4 *sli4);
+void sli_teardown(struct sli4 *sli4);
+int
+sli_callback(struct sli4 *sli4, enum sli4_callback cb, void *func, void *arg);
+int
+sli_bmbx_command(struct sli4 *sli4);
+int
+__sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
+ size_t size, u32 n_entries, u32 align);
+int
+__sli_create_queue(struct sli4 *sli4, struct sli4_queue *q);
+int
+sli_eq_modify_delay(struct sli4 *sli4, struct sli4_queue *eq, u32 num_eq,
+ u32 shift, u32 delay_mult);
+int
+sli_queue_alloc(struct sli4 *sli4, u32 qtype, struct sli4_queue *q,
+ u32 n_entries, struct sli4_queue *assoc);
+int
+sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[], u32 num_cqs,
+ u32 n_entries, struct sli4_queue *eqs[]);
+int
+sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype);
+int
+sli_queue_free(struct sli4 *sli4, struct sli4_queue *q, u32 destroy_queues,
+ u32 free_memory);
+int
+sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm);
+int
+sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm);
+
+int
+sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_resource_alloc(struct sli4 *sli4, enum sli4_resource rtype, u32 *rid,
+ u32 *index);
+int
+sli_resource_free(struct sli4 *sli4, enum sli4_resource rtype, u32 rid);
+int
+sli_resource_reset(struct sli4 *sli4, enum sli4_resource rtype);
+int
+sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id);
+int
+sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe,
+ enum sli4_qentry *etype, u16 *q_id);
+
+int sli_raise_ue(struct sli4 *sli4, u8 dump);
+int sli_dump_is_ready(struct sli4 *sli4);
+bool sli_reset_required(struct sli4 *sli4);
+bool sli_fw_ready(struct sli4 *sli4);
+
+int
+sli_fc_process_link_attention(struct sli4 *sli4, void *acqe);
+int
+sli_fc_cqe_parse(struct sli4 *sli4, struct sli4_queue *cq,
+ u8 *cqe, enum sli4_qentry *etype,
+ u16 *rid);
+u32 sli_fc_response_length(struct sli4 *sli4, u8 *cqe);
+u32 sli_fc_io_length(struct sli4 *sli4, u8 *cqe);
+int sli_fc_els_did(struct sli4 *sli4, u8 *cqe, u32 *d_id);
+u32 sli_fc_ext_status(struct sli4 *sli4, u8 *cqe);
+int
+sli_fc_rqe_rqid_and_index(struct sli4 *sli4, u8 *cqe, u16 *rq_id, u32 *index);
+int
+sli_cmd_wq_create(struct sli4 *sli4, void *buf,
+ struct efc_dma *qmem, u16 cq_id);
+int sli_cmd_post_sgl_pages(struct sli4 *sli4, void *buf, u16 xri,
+ u32 xri_count, struct efc_dma *page0[], struct efc_dma *page1[],
+ struct efc_dma *dma);
+int
+sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf,
+ struct efc_dma *dma, u16 rpi, struct efc_dma *payload_dma);
+int
+sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q, u32 n_entries,
+ u32 buffer_size, struct sli4_queue *cq, bool is_hdr);
+int
+sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs, struct sli4_queue *q[],
+ u32 base_cq_id, u32 num, u32 hdr_buf_size, u32 data_buf_size);
+u32 sli_fc_get_rpi_requirements(struct sli4 *sli4, u32 n_rpi);
+int
+sli_abort_wqe(struct sli4 *sli4, void *buf, enum sli4_abort_type type,
+ bool send_abts, u32 ids, u32 mask, u16 tag, u16 cq_id);
+
+int
+sli_send_frame_wqe(struct sli4 *sli4, void *buf, u8 sof, u8 eof,
+ u32 *hdr, struct efc_dma *payload, u32 req_len, u8 timeout,
+ u16 xri, u16 req_tag);
+
+int
+sli_xmit_els_rsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *rsp,
+ struct sli_els_params *params);
+
+int
+sli_els_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ struct sli_els_params *params);
+
+int
+sli_fcp_icmnd64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, u16 xri,
+ u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 timeout);
+
+int
+sli_fcp_iread64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u32 xfer_len, u16 xri,
+ u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 dif, u8 bs,
+ u8 timeout);
+
+int
+sli_fcp_iwrite64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u32 xfer_len,
+ u32 first_burst, u16 xri, u16 tag, u16 cq_id, u32 rpi,
+ u32 rnode_fcid, u8 dif, u8 bs, u8 timeout);
+
+int
+sli_fcp_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params);
+int
+sli_fcp_cont_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 sec_xri, u16 cq_id, u8 dif,
+ u8 bs, struct sli_fcp_tgt_params *params);
+
+int
+sli_fcp_trsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u16 cq_id, u8 port_owned, struct sli_fcp_tgt_params *params);
+
+int
+sli_fcp_tsend64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params);
+int
+sli_gen_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ struct sli_ct_params *params);
+
+int
+sli_xmit_bls_rsp64_wqe(struct sli4 *sli4, void *buf,
+ struct sli_bls_payload *payload, struct sli_bls_params *params);
+
+int
+sli_xmit_sequence64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *payload,
+ struct sli_ct_params *params);
+
+int
+sli_requeue_xri_wqe(struct sli4 *sli4, void *buf, u16 xri, u16 tag, u16 cq_id);
+void
+sli4_cmd_lowlevel_set_watchdog(struct sli4 *sli4, void *buf, size_t size,
+ u16 timeout);
+
+const char *sli_fc_get_status_string(u32 status);
+
+#endif /* !_SLI4_H */
diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h
index 4aca3d52c851..ff2ad9b38575 100644
--- a/drivers/scsi/esas2r/atioctl.h
+++ b/drivers/scsi/esas2r/atioctl.h
@@ -1141,7 +1141,7 @@ struct __packed atto_ioctl_vda_gsv_cmd {
u8 rsp_len;
u8 reserved[7];
- u8 version_info[1];
+ u8 version_info[];
#define ATTO_VDA_VER_UNSUPPORTED 0xFF
};
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 45ec9f16c085..647f82898b6e 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -1525,7 +1525,7 @@ void esas2r_complete_request_cb(struct esas2r_adapter *a,
rq->cmd->result =
((esas2r_req_status_to_error(rq->req_stat) << 16)
- | (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK));
+ | rq->func_rsp.scsi_rsp.scsi_stat);
if (rq->req_stat == RS_UNDERRUN)
scsi_set_resid(rq->cmd,
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 342535ac0570..9a8c037a2f21 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -922,9 +922,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
* saw originally. Also, report that we are providing
* the sense data.
*/
- cmd->result = ((DRIVER_SENSE << 24) |
- (DID_OK << 16) |
- (SAM_STAT_CHECK_CONDITION << 0));
+ cmd->result = SAM_STAT_CHECK_CONDITION;
ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
if (esp_debug & ESP_DEBUG_AUTOSENSE) {
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 89ec735929c3..5ae6c207d3ac 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -293,7 +293,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
struct netdev_hw_addr *ha;
struct net_device *real_dev;
- u8 flogi_maddr[ETH_ALEN];
+ static const u8 flogi_maddr[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
const struct net_device_ops *ops;
fcoe->netdev = netdev;
@@ -336,7 +336,6 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
* or enter promiscuous mode if not capable of listening
* for multiple unicast MACs.
*/
- memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
dev_uc_add(netdev, flogi_maddr);
if (fip->spma)
dev_uc_add(netdev, fip->ctl_src_addr);
@@ -442,7 +441,7 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
- u8 flogi_maddr[ETH_ALEN];
+ static const u8 flogi_maddr[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
const struct net_device_ops *ops;
/*
@@ -458,7 +457,6 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
synchronize_net();
/* Delete secondary MAC addresses */
- memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
dev_uc_del(netdev, flogi_maddr);
if (fip->spma)
dev_uc_del(netdev, fip->ctl_src_addr);
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 772bdc93930a..eda2be534aa7 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -202,11 +202,10 @@ static int fdomain_select(struct Scsi_Host *sh, int target)
return 1;
}
-static void fdomain_finish_cmd(struct fdomain *fd, int result)
+static void fdomain_finish_cmd(struct fdomain *fd)
{
outb(0, fd->base + REG_ICTL);
fdomain_make_bus_idle(fd);
- fd->cur_cmd->result = result;
fd->cur_cmd->scsi_done(fd->cur_cmd);
fd->cur_cmd = NULL;
}
@@ -273,7 +272,8 @@ static void fdomain_work(struct work_struct *work)
if (cmd->SCp.phase & in_arbitration) {
status = inb(fd->base + REG_ASTAT);
if (!(status & ASTAT_ARB)) {
- fdomain_finish_cmd(fd, DID_BUS_BUSY << 16);
+ set_host_byte(cmd, DID_BUS_BUSY);
+ fdomain_finish_cmd(fd);
goto out;
}
cmd->SCp.phase = in_selection;
@@ -290,7 +290,8 @@ static void fdomain_work(struct work_struct *work)
if (!(status & BSTAT_BSY)) {
/* Try again, for slow devices */
if (fdomain_select(cmd->device->host, scmd_id(cmd))) {
- fdomain_finish_cmd(fd, DID_NO_CONNECT << 16);
+ set_host_byte(cmd, DID_NO_CONNECT);
+ fdomain_finish_cmd(fd);
goto out;
}
/* Stop arbitration and enable parity */
@@ -333,7 +334,7 @@ static void fdomain_work(struct work_struct *work)
break;
case BSTAT_MSG | BSTAT_CMD | BSTAT_IO: /* MESSAGE IN */
cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA);
- if (!cmd->SCp.Message)
+ if (cmd->SCp.Message == COMMAND_COMPLETE)
++done;
break;
}
@@ -359,9 +360,10 @@ static void fdomain_work(struct work_struct *work)
fdomain_read_data(cmd);
if (done) {
- fdomain_finish_cmd(fd, (cmd->SCp.Status & 0xff) |
- ((cmd->SCp.Message & 0xff) << 8) |
- (DID_OK << 16));
+ set_status_byte(cmd, cmd->SCp.Status);
+ set_host_byte(cmd, DID_OK);
+ scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
+ fdomain_finish_cmd(fd);
} else {
if (cmd->SCp.phase & disconnect) {
outb(ICTL_FIFO | ICTL_SEL | ICTL_REQ | FIFO_COUNT,
@@ -439,10 +441,10 @@ static int fdomain_abort(struct scsi_cmnd *cmd)
fdomain_make_bus_idle(fd);
fd->cur_cmd->SCp.phase |= aborted;
- fd->cur_cmd->result = DID_ABORT << 16;
/* Aborts are not done well. . . */
- fdomain_finish_cmd(fd, DID_ABORT << 16);
+ set_host_byte(fd->cur_cmd, DID_ABORT);
+ fdomain_finish_cmd(fd);
spin_unlock_irqrestore(sh->host_lock, flags);
return SUCCESS;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index cf879cc59e4c..436d174f2194 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -8,6 +8,7 @@
#define _HISI_SAS_H_
#include <linux/acpi.h>
+#include <linux/async.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
@@ -37,6 +38,7 @@
#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
#define HISI_SAS_PM_BIT 2
+#define HISI_SAS_HW_FAULT_BIT 3
#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS)
#define HISI_SAS_RESERVED_IPTT 96
#define HISI_SAS_UNRESERVED_IPTT \
@@ -90,8 +92,8 @@
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
-#define HISI_SAS_WAIT_PHYUP_TIMEOUT 20
-#define CLEAR_ITCT_TIMEOUT 20
+#define HISI_SAS_WAIT_PHYUP_TIMEOUT (20 * HZ)
+#define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ)
struct hisi_hba;
@@ -185,6 +187,7 @@ struct hisi_sas_phy {
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
int enable;
+ int wait_phyup_cnt;
atomic_t down_cnt;
/* Trace FIFO */
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 5a204074099c..3a903e8e0384 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -15,7 +15,7 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
static int
hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct domain_device *device,
- int abort_flag, int tag);
+ int abort_flag, int tag, bool rst_to_recover);
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
@@ -857,6 +857,7 @@ static void hisi_sas_phyup_work(struct work_struct *work)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
+ phy->wait_phyup_cnt = 0;
if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
@@ -899,6 +900,8 @@ static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
}
+#define HISI_SAS_WAIT_PHYUP_RETRIES 10
+
void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
@@ -909,8 +912,16 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
return;
if (!timer_pending(&phy->timer)) {
- phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
- add_timer(&phy->timer);
+ if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
+ phy->wait_phyup_cnt++;
+ phy->timer.expires = jiffies +
+ HISI_SAS_WAIT_PHYUP_TIMEOUT;
+ add_timer(&phy->timer);
+ } else {
+ dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
+ phy_no, phy->wait_phyup_cnt);
+ phy->wait_phyup_cnt = 0;
+ }
}
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
@@ -1063,7 +1074,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
down(&hisi_hba->sem);
if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0, true);
hisi_sas_dereg_device(hisi_hba, device);
@@ -1182,9 +1193,9 @@ static void hisi_sas_tmf_timedout(struct timer_list *t)
complete(&task->slow_task->completion);
}
-#define TASK_TIMEOUT 20
-#define TASK_RETRY 3
-#define INTERNAL_ABORT_TIMEOUT 6
+#define TASK_TIMEOUT (20 * HZ)
+#define TASK_RETRY 3
+#define INTERNAL_ABORT_TIMEOUT (6 * HZ)
static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
void *parameter, u32 para_len,
struct hisi_sas_tmf_task *tmf)
@@ -1212,7 +1223,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
task->task_done = hisi_sas_task_done;
task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ;
+ task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
add_timer(&task->slow_task->timer);
res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
@@ -1505,7 +1516,8 @@ static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
continue;
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0,
+ false);
if (rc < 0)
dev_err(dev, "STP reject: abort dev failed %d\n", rc);
}
@@ -1604,6 +1616,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
}
hisi_sas_controller_reset_done(hisi_hba);
+ clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
dev_info(dev, "controller reset complete\n");
return 0;
@@ -1660,7 +1673,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
&tmf_task);
rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag);
+ HISI_SAS_INT_ABT_CMD, tag,
+ false);
if (rc2 < 0) {
dev_err(dev, "abort task: internal abort (%d)\n", rc2);
return TMF_RESP_FUNC_FAILED;
@@ -1682,7 +1696,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
if (task->dev->dev_type == SAS_SATA_DEV) {
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV,
- 0);
+ 0, false);
if (rc < 0) {
dev_err(dev, "abort task: internal abort failed\n");
goto out;
@@ -1697,7 +1711,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag);
+ HISI_SAS_INT_ABT_CMD, tag,
+ false);
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
task->lldd_task) {
/*
@@ -1723,7 +1738,7 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
int rc;
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0, false);
if (rc < 0) {
dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
return TMF_RESP_FUNC_FAILED;
@@ -1750,6 +1765,8 @@ static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
return rc;
}
+#define I_T_NEXUS_RESET_PHYUP_TIMEOUT (2 * HZ)
+
static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
{
struct sas_phy *local_phy = sas_get_local_phy(device);
@@ -1784,7 +1801,8 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
sas_ha->sas_phy[local_phy->number];
struct hisi_sas_phy *phy =
container_of(sas_phy, struct hisi_sas_phy, sas_phy);
- int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
+ int ret = wait_for_completion_timeout(&phyreset,
+ I_T_NEXUS_RESET_PHYUP_TIMEOUT);
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
@@ -1814,7 +1832,7 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
int rc;
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0, false);
if (rc < 0) {
dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
return TMF_RESP_FUNC_FAILED;
@@ -1844,7 +1862,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
/* Clear internal IO and then lu reset */
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0, false);
if (rc < 0) {
dev_err(dev, "lu_reset: internal abort failed\n");
goto out;
@@ -1875,12 +1893,24 @@ out:
return rc;
}
+static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie)
+{
+ struct domain_device *device = data;
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ int rc;
+
+ rc = hisi_sas_debug_I_T_nexus_reset(device);
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n",
+ SAS_ADDR(device->sas_addr), rc);
+}
+
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
- struct device *dev = hisi_hba->dev;
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
- int rc, i;
+ ASYNC_DOMAIN_EXCLUSIVE(async);
+ int i;
queue_work(hisi_hba->wq, &r.work);
wait_for_completion(r.completion);
@@ -1895,12 +1925,11 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
dev_is_expander(device->dev_type))
continue;
- rc = hisi_sas_debug_I_T_nexus_reset(device);
- if (rc != TMF_RESP_FUNC_COMPLETE)
- dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
- sas_dev->device_id, rc);
+ async_schedule_domain(hisi_sas_async_I_T_nexus_reset,
+ device, &async);
}
+ async_synchronize_full_domain(&async);
hisi_sas_release_tasks(hisi_hba);
return TMF_RESP_FUNC_COMPLETE;
@@ -2029,11 +2058,13 @@ err_out:
* @tag: tag of IO to be aborted (only relevant to single
* IO mode)
* @dq: delivery queue for this internal abort command
+ * @rst_to_recover: If rst_to_recover set, queue a controller
+ * reset if an internal abort times out.
*/
static int
_hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct domain_device *device, int abort_flag,
- int tag, struct hisi_sas_dq *dq)
+ int tag, struct hisi_sas_dq *dq, bool rst_to_recover)
{
struct sas_task *task;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -2049,6 +2080,9 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (!hisi_hba->hw->prep_abort)
return TMF_RESP_FUNC_FAILED;
+ if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
+ return -EIO;
+
task = sas_alloc_slow_task(GFP_KERNEL);
if (!task)
return -ENOMEM;
@@ -2057,7 +2091,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
task->task_proto = device->tproto;
task->task_done = hisi_sas_task_done;
task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ;
+ task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT;
add_timer(&task->slow_task->timer);
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
@@ -2079,6 +2113,8 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
struct hisi_sas_slot *slot = task->lldd_task;
+ set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
+
if (slot) {
struct hisi_sas_cq *cq =
&hisi_hba->cq[slot->dlvry_queue];
@@ -2089,7 +2125,13 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
synchronize_irq(cq->irq_no);
slot->task = NULL;
}
- dev_err(dev, "internal task abort: timeout and not done.\n");
+
+ if (rst_to_recover) {
+ dev_err(dev, "internal task abort: timeout and not done. Queuing reset.\n");
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ } else {
+ dev_err(dev, "internal task abort: timeout and not done.\n");
+ }
res = -EIO;
goto exit;
@@ -2122,7 +2164,7 @@ exit:
static int
hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct domain_device *device,
- int abort_flag, int tag)
+ int abort_flag, int tag, bool rst_to_recover)
{
struct hisi_sas_slot *slot;
struct device *dev = hisi_hba->dev;
@@ -2134,7 +2176,8 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
slot = &hisi_hba->slot_info[tag];
dq = &hisi_hba->dq[slot->dlvry_queue];
return _hisi_sas_internal_task_abort(hisi_hba, device,
- abort_flag, tag, dq);
+ abort_flag, tag, dq,
+ rst_to_recover);
case HISI_SAS_INT_ABT_DEV:
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
@@ -2145,7 +2188,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
dq = &hisi_hba->dq[i];
rc = _hisi_sas_internal_task_abort(hisi_hba, device,
abort_flag, tag,
- dq);
+ dq, rst_to_recover);
if (rc)
return rc;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 3e359ac752fd..afe639994f3d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1152,14 +1152,14 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
}
default:
{
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
}
}
break;
case SAS_PROTOCOL_SMP:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
case SAS_PROTOCOL_SATA:
@@ -1281,7 +1281,7 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
void *to = page_address(sg_page(sg_resp));
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
@@ -1298,7 +1298,7 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
break;
default:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
@@ -1649,7 +1649,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
if (irq < 0) {
dev_err(dev, "irq init: fail map phy interrupt %d\n",
idx);
- return -ENOENT;
+ return irq;
}
rc = devm_request_irq(dev, irq, phy_interrupts[j], 0,
@@ -1657,7 +1657,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
if (rc) {
dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
irq, rc);
- return -ENOENT;
+ return rc;
}
}
}
@@ -1668,7 +1668,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
if (irq < 0) {
dev_err(dev, "irq init: could not map cq interrupt %d\n",
idx);
- return -ENOENT;
+ return irq;
}
rc = devm_request_irq(dev, irq, cq_interrupt_v1_hw, 0,
@@ -1676,7 +1676,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
if (rc) {
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
irq, rc);
- return -ENOENT;
+ return rc;
}
}
@@ -1686,7 +1686,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
if (irq < 0) {
dev_err(dev, "irq init: could not map fatal interrupt %d\n",
idx);
- return -ENOENT;
+ return irq;
}
rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
@@ -1694,7 +1694,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
if (rc) {
dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
irq, rc);
- return -ENOENT;
+ return rc;
}
}
@@ -1771,6 +1771,7 @@ static struct scsi_host_template sht_v1_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 46f60fc2a069..b0b2361e63fe 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -994,7 +994,7 @@ static int clear_itct_v2_hw(struct hisi_hba *hisi_hba,
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
if (!wait_for_completion_timeout(sas_dev->completion,
- CLEAR_ITCT_TIMEOUT * HZ)) {
+ HISI_SAS_CLEAR_ITCT_TIMEOUT)) {
dev_warn(dev, "failed to clear ITCT\n");
return -ETIMEDOUT;
}
@@ -2168,7 +2168,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
}
break;
case SAS_PROTOCOL_SMP:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
case SAS_PROTOCOL_SATA:
@@ -2427,7 +2427,7 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
void *to = page_address(sg_page(sg_resp));
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
@@ -2441,12 +2441,12 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
hisi_sas_sata_done(task, slot);
break;
}
default:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
@@ -3584,6 +3584,7 @@ static struct scsi_host_template sht_v2_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index e95408314078..a4885d03afe2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -843,7 +843,7 @@ static int clear_itct_v3_hw(struct hisi_hba *hisi_hba,
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
if (!wait_for_completion_timeout(sas_dev->completion,
- CLEAR_ITCT_TIMEOUT * HZ)) {
+ HISI_SAS_CLEAR_ITCT_TIMEOUT)) {
dev_warn(dev, "failed to clear ITCT\n");
return -ETIMEDOUT;
}
@@ -2178,7 +2178,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
hisi_sas_sata_done(task, slot);
break;
case SAS_PROTOCOL_SMP:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
default:
break;
@@ -2285,7 +2285,7 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
void *to = page_address(sg_page(sg_resp));
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
@@ -2298,11 +2298,11 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
hisi_sas_sata_done(task, slot);
break;
default:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
@@ -3155,6 +3155,7 @@ static struct scsi_host_template sht_v3_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index cd52664920e1..3f6f14f0cafb 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -220,6 +220,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
+ shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
+ shost->can_queue);
+
error = scsi_init_sense_cache(shost);
if (error)
goto fail;
@@ -319,7 +322,7 @@ static void scsi_host_dev_release(struct device *dev)
scsi_proc_hostdir_rm(shost->hostt);
- /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
+ /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
rcu_barrier();
if (shost->tmf_work_q)
@@ -485,6 +488,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost_printk(KERN_WARNING, shost,
"error handler thread failed to spawn, error = %ld\n",
PTR_ERR(shost->ehandler));
+ shost->ehandler = NULL;
goto fail;
}
@@ -657,10 +661,11 @@ EXPORT_SYMBOL_GPL(scsi_flush_work);
static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
- int status = *(int *)data;
+ enum scsi_host_status status = *(enum scsi_host_status *)data;
scsi_dma_unmap(scmd);
- scmd->result = status << 16;
+ scmd->result = 0;
+ set_host_byte(scmd, status);
scmd->scsi_done(scmd);
return true;
}
@@ -675,7 +680,8 @@ static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
* caller to ensure that concurrent I/O submission and/or
* completion is stopped when calling this function.
*/
-void scsi_host_complete_all_commands(struct Scsi_Host *shost, int status)
+void scsi_host_complete_all_commands(struct Scsi_Host *shost,
+ enum scsi_host_status status)
{
blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter,
&status);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index db4c7a7ff4dd..61cda7b7624f 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -760,7 +760,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
goto skip_resid;
default:
- scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
+ scp->result = DID_ABORT << 16;
break;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 715c34904e3e..935b01ee44b7 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -655,8 +655,10 @@ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
**/
static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
{
- if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
+ if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
+ tgt->init_retries = 0;
+ }
wake_up(&tgt->vhost->work_wait_q);
}
@@ -805,6 +807,13 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
for (i = 0; i < size; ++i) {
struct ibmvfc_event *evt = &pool->events[i];
+ /*
+ * evt->active states
+ * 1 = in flight
+ * 0 = being completed
+ * -1 = free/freed
+ */
+ atomic_set(&evt->active, -1);
atomic_set(&evt->free, 1);
evt->crq.valid = 0x80;
evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
@@ -1015,6 +1024,7 @@ static void ibmvfc_free_event(struct ibmvfc_event *evt)
BUG_ON(!ibmvfc_valid_event(pool, evt));
BUG_ON(atomic_inc_return(&evt->free) != 1);
+ BUG_ON(atomic_dec_and_test(&evt->active));
spin_lock_irqsave(&evt->queue->l_lock, flags);
list_add_tail(&evt->queue_list, &evt->queue->free);
@@ -1070,6 +1080,12 @@ static void ibmvfc_complete_purge(struct list_head *purge_list)
**/
static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
{
+ /*
+ * Anything we are failing should still be active. Otherwise, it
+ * implies we already got a response for the command and are doing
+ * something bad like double completing it.
+ */
+ BUG_ON(!atomic_dec_and_test(&evt->active));
if (evt->cmnd) {
evt->cmnd->result = (error_code << 16);
evt->done = ibmvfc_scsi_eh_done;
@@ -1721,6 +1737,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
evt->done(evt);
} else {
+ atomic_set(&evt->active, 1);
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
ibmvfc_trc_start(evt);
}
@@ -3249,7 +3266,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
return;
}
- if (unlikely(atomic_read(&evt->free))) {
+ if (unlikely(atomic_dec_if_positive(&evt->active))) {
dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
crq->ioba);
return;
@@ -3776,7 +3793,7 @@ static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost
return;
}
- if (unlikely(atomic_read(&evt->free))) {
+ if (unlikely(atomic_dec_if_positive(&evt->active))) {
dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
crq->ioba);
return;
@@ -4300,9 +4317,10 @@ static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
switch (status) {
case IBMVFC_MAD_SUCCESS:
- tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
+ tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
+ tgt->scsi_id = tgt->new_scsi_id;
tgt->ids.port_id = tgt->scsi_id;
memcpy(&tgt->service_parms, &rsp->service_parms,
sizeof(tgt->service_parms));
@@ -4320,8 +4338,8 @@ static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
tgt_log(tgt, level,
- "Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
- tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
+ "Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
+ tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
status);
break;
}
@@ -4358,8 +4376,8 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
move->common.length = cpu_to_be16(sizeof(*move));
- move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
- move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
+ move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
+ move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
move->wwpn = cpu_to_be64(tgt->wwpn);
move->node_name = cpu_to_be64(tgt->ids.node_name);
@@ -4368,7 +4386,7 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
kref_put(&tgt->kref, ibmvfc_release_tgt);
} else
- tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
+ tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
}
/**
@@ -4728,20 +4746,25 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
* and it failed for some reason, such as there being I/O
* pending to the target. In this case, we will have already
* deleted the rport from the FC transport so we do a move
- * login, which works even with I/O pending, as it will cancel
- * any active commands.
+ * login, which works even with I/O pending, however, if
+ * there is still I/O pending, it will stay outstanding, so
+ * we only do this if fast fail is disabled for the rport,
+ * otherwise we let terminate_rport_io clean up the port
+ * before we login at the new location.
*/
if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
- /*
- * Do a move login here. The old target is no longer
- * known to the transport layer We don't use the
- * normal ibmvfc_set_tgt_action to set this, as we
- * don't normally want to allow this state change.
- */
- wtgt->old_scsi_id = wtgt->scsi_id;
- wtgt->scsi_id = scsi_id;
- wtgt->action = IBMVFC_TGT_ACTION_INIT;
- ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
+ if (wtgt->move_login) {
+ /*
+ * Do a move login here. The old target is no longer
+ * known to the transport layer We don't use the
+ * normal ibmvfc_set_tgt_action to set this, as we
+ * don't normally want to allow this state change.
+ */
+ wtgt->new_scsi_id = scsi_id;
+ wtgt->action = IBMVFC_TGT_ACTION_INIT;
+ wtgt->init_retries = 0;
+ ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
+ }
goto unlock_out;
} else {
tgt_err(wtgt, "Unexpected target state: %d, %p\n",
@@ -5332,6 +5355,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
tgt->rport = NULL;
+ tgt->init_retries = 0;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
fc_remote_port_delete(rport);
return;
@@ -5486,7 +5510,20 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
rport = tgt->rport;
tgt->rport = NULL;
+ tgt->init_retries = 0;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
+
+ /*
+ * If fast fail is enabled, we wait for it to fire and then clean up
+ * the old port, since we expect the fast fail timer to clean up the
+ * outstanding I/O faster than waiting for normal command timeouts.
+ * However, if fast fail is disabled, any I/O outstanding to the
+ * rport LUNs will stay outstanding indefinitely, since the EH handlers
+ * won't get invoked for I/O's timing out. If this is a NPIV failover
+ * scenario, the better alternative is to use the move login.
+ */
+ if (rport && rport->fast_io_fail_tmo == -1)
+ tgt->move_login = 1;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rport)
fc_remote_port_delete(rport);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 19dcec3ae9ba..92fb889d7eb0 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -718,7 +718,7 @@ struct ibmvfc_target {
struct ibmvfc_host *vhost;
u64 scsi_id;
u64 wwpn;
- u64 old_scsi_id;
+ u64 new_scsi_id;
struct fc_rport *rport;
int target_id;
enum ibmvfc_target_action action;
@@ -726,6 +726,7 @@ struct ibmvfc_target {
int add_rport;
int init_retries;
int logo_rcvd;
+ int move_login;
u32 cancel_key;
struct ibmvfc_service_parms service_parms;
struct ibmvfc_service_parms service_parms_change;
@@ -744,6 +745,7 @@ struct ibmvfc_event {
struct ibmvfc_target *tgt;
struct scsi_cmnd *cmnd;
atomic_t free;
+ atomic_t active;
union ibmvfc_iu *xfer_iu;
void (*done)(struct ibmvfc_event *evt);
void (*_done)(struct ibmvfc_event *evt);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e75b0068ad84..e6a3eaaa57d9 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1005,7 +1005,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (cmnd) {
cmnd->result |= rsp->status;
- if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
+ if (scsi_status_is_check_condition(cmnd->result))
memcpy(cmnd->sense_buffer,
rsp->data,
be32_to_cpu(rsp->sense_data_len));
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 862d35a098cf..943c9102a7eb 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1283,19 +1283,6 @@ static struct parport_driver imm_driver = {
.detach = imm_detach,
.devmodel = true,
};
-
-static int __init imm_driver_init(void)
-{
- printk("imm: Version %s\n", IMM_VERSION);
- return parport_register_driver(&imm_driver);
-}
-
-static void __exit imm_driver_exit(void)
-{
- parport_unregister_driver(&imm_driver);
-}
-
-module_init(imm_driver_init);
-module_exit(imm_driver_exit);
+module_parport_driver(imm_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 30c30a1db5b1..5d78f7e939a3 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1300,7 +1300,7 @@ static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
*p = '\0';
p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
- for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
+ for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++)
p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
return buffer;
@@ -1323,7 +1323,7 @@ static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
*p = '\0';
p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
- __ipr_format_res_path(res_path, p, len - (buffer - p));
+ __ipr_format_res_path(res_path, p, len - (p - buffer));
return buffer;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 783ee03ad9ea..69444d21fca1 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -428,6 +428,7 @@ struct ipr_config_table_entry64 {
__be64 lun;
__be64 lun_wwn[2];
#define IPR_MAX_RES_PATH_LENGTH 48
+#define IPR_RES_PATH_BYTES 8
__be64 res_path;
struct ipr_std_inq_data std_inq_data;
u8 reserved2[4];
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bc33d54a4011..8b33c9871484 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3344,13 +3344,15 @@ ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
IPS_CMD_EXTENDED_DCDB_SG)) {
tapeDCDB =
(IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
- memcpy(scb->scsi_cmd->sense_buffer,
+ memcpy_and_pad(scb->scsi_cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
tapeDCDB->sense_info,
- SCSI_SENSE_BUFFERSIZE);
+ sizeof(tapeDCDB->sense_info), 0);
} else {
- memcpy(scb->scsi_cmd->sense_buffer,
+ memcpy_and_pad(scb->scsi_cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
scb->dcdb.sense_info,
- SCSI_SENSE_BUFFERSIZE);
+ sizeof(scb->dcdb.sense_info), 0);
}
device_error = 2; /* check condition */
}
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index c452849e7bb4..ffd33e5decae 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -167,6 +167,7 @@ static struct scsi_host_template isci_sht = {
.eh_abort_handler = sas_eh_abort_handler,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index e7c6cb4c1556..e1ff79464131 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2566,7 +2566,7 @@ static void isci_request_handle_controller_specific_errors(
if (!idev)
*status_ptr = SAS_DEVICE_UNKNOWN;
else
- *status_ptr = SAM_STAT_TASK_ABORTED;
+ *status_ptr = SAS_SAM_STAT_TASK_ABORTED;
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
}
@@ -2696,7 +2696,7 @@ static void isci_request_handle_controller_specific_errors(
default:
/* Task in the target is not done. */
*response_ptr = SAS_TASK_UNDELIVERED;
- *status_ptr = SAM_STAT_TASK_ABORTED;
+ *status_ptr = SAS_SAM_STAT_TASK_ABORTED;
if (task->task_proto == SAS_PROTOCOL_SMP)
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
@@ -2719,7 +2719,7 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_
if (ac_err_mask(fis->status))
ts->stat = SAS_PROTO_RESPONSE;
else
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
ts->resp = SAS_TASK_COMPLETE;
}
@@ -2782,7 +2782,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
case SCI_IO_SUCCESS_IO_DONE_EARLY:
response = SAS_TASK_COMPLETE;
- status = SAM_STAT_GOOD;
+ status = SAS_SAM_STAT_GOOD;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
@@ -2852,7 +2852,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
/* Fail the I/O. */
response = SAS_TASK_UNDELIVERED;
- status = SAM_STAT_TASK_ABORTED;
+ status = SAS_SAM_STAT_TASK_ABORTED;
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
break;
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 62062ed6cd9a..3fd88d72a0c0 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -160,7 +160,7 @@ int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags)
isci_task_refuse(ihost, task,
SAS_TASK_UNDELIVERED,
- SAM_STAT_TASK_ABORTED);
+ SAS_SAM_STAT_TASK_ABORTED);
} else {
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -709,8 +709,8 @@ isci_task_request_complete(struct isci_host *ihost,
tmf->status = completion_status;
if (tmf->proto == SAS_PROTOCOL_SSP) {
- memcpy(&tmf->resp.resp_iu,
- &ireq->ssp.rsp,
+ memcpy(tmf->resp.rsp_buf,
+ ireq->ssp.rsp_buf,
SSP_RESP_IU_MAX_SIZE);
} else if (tmf->proto == SAS_PROTOCOL_SATA) {
memcpy(&tmf->resp.d2h_fis,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index dd33ce0e3737..1bc37593c88f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -600,6 +600,12 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
if (!sock)
return;
+ /*
+ * Make sure we start socket shutdown now in case userspace is up
+ * but delayed in releasing the socket.
+ */
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+
sock_hold(sock->sk);
iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
@@ -689,6 +695,7 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
sk->sk_allocation = GFP_ATOMIC;
sk_set_memalloc(sk);
+ sock_no_linger(sk);
iscsi_sw_tcp_conn_set_callbacks(conn);
tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h
index 602c97a651bc..74ae7fd15d8d 100644
--- a/drivers/scsi/libfc/fc_encode.h
+++ b/drivers/scsi/libfc/fc_encode.h
@@ -166,9 +166,11 @@ static inline int fc_ct_ns_fill(struct fc_lport *lport,
static inline void fc_ct_ms_fill_attr(struct fc_fdmi_attr_entry *entry,
const char *in, size_t len)
{
- int copied = strscpy(entry->value, in, len);
- if (copied > 0)
- memset(entry->value, copied, len - copied);
+ int copied;
+
+ copied = strscpy(entry->value, in, len);
+ if (copied > 0 && copied + 1 < len)
+ memset(entry->value + copied + 1, 0, len - copied - 1);
}
/**
@@ -190,10 +192,11 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
struct fc_fdmi_attr_entry *entry;
struct fs_fdmi_attrs *hba_attrs;
int numattrs = 0;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
switch (op) {
case FC_FDMI_RHBA:
- numattrs = 10;
+ numattrs = 11;
len = sizeof(struct fc_fdmi_rhba);
len -= sizeof(struct fc_fdmi_attr_entry);
len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
@@ -207,8 +210,21 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 7;
+ len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
+ len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
+ len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
+ }
+
ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
- FC_FDMI_SUBTYPE);
+ FC_FDMI_SUBTYPE);
/* HBA Identifier */
put_unaligned_be64(lport->wwpn, &ct->payload.rhba.hbaid.id);
@@ -313,7 +329,7 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
&entry->type);
put_unaligned_be16(len, &entry->len);
fc_ct_ms_fill_attr(entry,
- fc_host_optionrom_version(lport->host),
+ "unknown",
FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
/* Firmware Version */
@@ -341,6 +357,100 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
"%s v%s",
init_utsname()->sysname,
init_utsname()->release);
+
+ /* Max CT payload */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_max_ct_payload(lport->host),
+ &entry->value);
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ /* Node symbolic name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_NODESYMBLNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_symbolic_name(lport->host),
+ FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN);
+
+ /* Vendor specific info */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(0,
+ &entry->value);
+
+ /* Number of ports */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_NUMBEROFPORTS,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_num_ports(lport->host),
+ &entry->value);
+
+ /* Fabric name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_FABRICNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(fc_host_fabric_name(lport->host),
+ &entry->value);
+
+ /* BIOS version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_FABRICNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_BIOSVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_bootbios_version(lport->host),
+ FC_FDMI_HBA_ATTR_BIOSVERSION_LEN);
+
+ /* BIOS state */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_BIOSVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_BIOSSTATE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_bootbios_state(lport->host),
+ &entry->value);
+
+ /* Vendor identifier */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_BIOSSTATE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_VENDORIDENTIFIER,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_vendor_identifier(lport->host),
+ FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN);
+ }
+
break;
case FC_FDMI_RPA:
numattrs = 6;
@@ -353,6 +463,24 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 10;
+
+ len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
+ len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
+ len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTID_LEN;
+
+ }
+
ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
FC_FDMI_SUBTYPE);
@@ -441,6 +569,122 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
fc_ct_ms_fill_attr(entry,
init_utsname()->nodename,
FC_FDMI_PORT_ATTR_HOSTNAME_LEN);
+
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+
+ /* Node name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_NODENAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(fc_host_node_name(lport->host),
+ &entry->value);
+
+ /* Port name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_NODENAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(lport->wwpn,
+ &entry->value);
+
+ /* Port symbolic name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_PORTNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_SYMBOLICNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_symbolic_name(lport->host),
+ FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN);
+
+ /* Port type */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTTYPE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_port_type(lport->host),
+ &entry->value);
+
+ /* Supported class of service */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_PORTTYPE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_supported_classes(lport->host),
+ &entry->value);
+
+ /* Port Fabric name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_FABRICNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(fc_host_fabric_name(lport->host),
+ &entry->value);
+
+ /* Port active FC-4 */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_FABRICNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_CURRENTFC4TYPE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ memcpy(&entry->value, fc_host_active_fc4s(lport->host),
+ FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN);
+
+ /* Port state */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTSTATE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_port_state(lport->host),
+ &entry->value);
+
+ /* Discovered ports */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_PORTSTATE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_DISCOVEREDPORTS,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_num_discovered_ports(lport->host),
+ &entry->value);
+
+ /* Port ID */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTID_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTID,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_port_id(lport->host),
+ &entry->value);
+ }
+
break;
case FC_FDMI_DPRT:
len = sizeof(struct fc_fdmi_dprt);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index cf36c8cb5493..19cd4a95d354 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -93,7 +93,10 @@
#define FC_LOCAL_PTP_FID_LO 0x010101
#define FC_LOCAL_PTP_FID_HI 0x010102
-#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
+#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
+#define MAX_CT_PAYLOAD 2048
+#define DISCOVERED_PORTS 4
+#define NUMBER_OF_PORTS 1
static void fc_lport_error(struct fc_lport *, struct fc_frame *);
@@ -1185,7 +1188,7 @@ static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_lport *lport = lp_arg;
struct fc_frame_header *fh;
struct fc_ct_hdr *ct;
-
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
@@ -1219,7 +1222,13 @@ static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
switch (lport->state) {
case LPORT_ST_RHBA:
- if (ntohs(ct->ct_cmd) == FC_FS_ACC)
+ if ((ntohs(ct->ct_cmd) == FC_FS_RJT) && fc_host->fdmi_version == FDMI_V2) {
+ FC_LPORT_DBG(lport, "Error for FDMI-V2, fall back to FDMI-V1\n");
+ fc_host->fdmi_version = FDMI_V1;
+
+ fc_lport_enter_ms(lport, LPORT_ST_RHBA);
+
+ } else if (ntohs(ct->ct_cmd) == FC_FS_ACC)
fc_lport_enter_ms(lport, LPORT_ST_RPA);
else /* Error Skip RPA */
fc_lport_enter_scr(lport);
@@ -1433,7 +1442,7 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
int size = sizeof(struct fc_ct_hdr);
size_t len;
int numattrs;
-
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
@@ -1446,10 +1455,10 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
case LPORT_ST_RHBA:
cmd = FC_FDMI_RHBA;
/* Number of HBA Attributes */
- numattrs = 10;
+ numattrs = 11;
len = sizeof(struct fc_fdmi_rhba);
len -= sizeof(struct fc_fdmi_attr_entry);
- len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+
len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
@@ -1460,6 +1469,21 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
+
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 7;
+ len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
+ len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
+ len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
+ }
+
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
size += len;
break;
@@ -1469,7 +1493,6 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
numattrs = 6;
len = sizeof(struct fc_fdmi_rpa);
len -= sizeof(struct fc_fdmi_attr_entry);
- len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
@@ -1477,6 +1500,22 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 10;
+ len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
+ len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
+ len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTID_LEN;
+ }
+
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+
size += len;
break;
case LPORT_ST_DPRT:
@@ -1546,6 +1585,7 @@ static void fc_lport_timeout(struct work_struct *work)
struct fc_lport *lport =
container_of(work, struct fc_lport,
retry_work.work);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
mutex_lock(&lport->lp_mutex);
@@ -1573,6 +1613,13 @@ static void fc_lport_timeout(struct work_struct *work)
fc_lport_enter_fdmi(lport);
break;
case LPORT_ST_RHBA:
+ if (fc_host->fdmi_version == FDMI_V2) {
+ FC_LPORT_DBG(lport, "timeout for FDMI-V2 RHBA,fall back to FDMI-V1\n");
+ fc_host->fdmi_version = FDMI_V1;
+ fc_lport_enter_ms(lport, LPORT_ST_RHBA);
+ break;
+ }
+ fallthrough;
case LPORT_ST_RPA:
case LPORT_ST_DHBA:
case LPORT_ST_DPRT:
@@ -1839,6 +1886,13 @@ EXPORT_SYMBOL(fc_lport_config);
*/
int fc_lport_init(struct fc_lport *lport)
{
+ struct fc_host_attrs *fc_host;
+
+ fc_host = shost_to_fc_host(lport->host);
+
+ /* Set FDMI version to FDMI-2 specification*/
+ fc_host->fdmi_version = FDMI_V2;
+
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
fc_host_node_name(lport->host) = lport->wwnn;
fc_host_port_name(lport->host) = lport->wwpn;
@@ -1847,6 +1901,7 @@ int fc_lport_init(struct fc_lport *lport)
sizeof(fc_host_supported_fc4s(lport->host)));
fc_host_supported_fc4s(lport->host)[2] = 1;
fc_host_supported_fc4s(lport->host)[7] = 1;
+ fc_host_num_discovered_ports(lport->host) = 4;
/* This value is also unchanging */
memset(fc_host_active_fc4s(lport->host), 0,
@@ -1859,8 +1914,27 @@ int fc_lport_init(struct fc_lport *lport)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_40GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_40GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_100GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_100GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_25GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_25GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_50GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_50GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_100GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_100GBIT;
+
fc_fc4_add_lport(lport);
+ fc_host_num_discovered_ports(lport->host) = DISCOVERED_PORTS;
+ fc_host_port_state(lport->host) = FC_PORTSTATE_ONLINE;
+ fc_host_max_ct_payload(lport->host) = MAX_CT_PAYLOAD;
+ fc_host_num_ports(lport->host) = NUMBER_OF_PORTS;
+ fc_host_bootbios_state(lport->host) = 0X00000000;
+ snprintf(fc_host_bootbios_version(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s", "Unknown");
+
return 0;
}
EXPORT_SYMBOL(fc_lport_init);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index cd0fb8ca2425..33da3c1085f0 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1162,6 +1162,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
pp->spp.spp_flags, pp->spp.spp_type);
+
rdata->spp_type = pp->spp.spp_type;
if (resp_code != FC_SPP_RESP_ACK) {
if (resp_code == FC_SPP_RESP_CONF)
@@ -1184,11 +1185,13 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
/*
* Call prli provider if we should act as a target
*/
- prov = fc_passive_prov[rdata->spp_type];
- if (prov) {
- memset(&temp_spp, 0, sizeof(temp_spp));
- prov->prli(rdata, pp->prli.prli_spp_len,
- &pp->spp, &temp_spp);
+ if (rdata->spp_type < FC_FC4_PROV_SIZE) {
+ prov = fc_passive_prov[rdata->spp_type];
+ if (prov) {
+ memset(&temp_spp, 0, sizeof(temp_spp));
+ prov->prli(rdata, pp->prli.prli_spp_len,
+ &pp->spp, &temp_spp);
+ }
}
/*
* Check if the image pair could be established
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 4834219497ee..4683c183e9d4 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -230,11 +230,11 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
*/
static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
{
- struct iscsi_conn *conn = task->conn;
- struct iscsi_tm *tmf = &conn->tmhdr;
+ struct iscsi_session *session = task->conn->session;
+ struct iscsi_tm *tmf = &session->tmhdr;
u64 hdr_lun;
- if (conn->tmf_state == TMF_INITIAL)
+ if (session->tmf_state == TMF_INITIAL)
return 0;
if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC)
@@ -254,24 +254,19 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
* Fail all SCSI cmd PDUs
*/
if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
- iscsi_conn_printk(KERN_INFO, conn,
- "task [op %x itt "
- "0x%x/0x%x] "
- "rejected.\n",
- opcode, task->itt,
- task->hdr_itt);
+ iscsi_session_printk(KERN_INFO, session,
+ "task [op %x itt 0x%x/0x%x] rejected.\n",
+ opcode, task->itt, task->hdr_itt);
return -EACCES;
}
/*
* And also all data-out PDUs in response to R2T
* if fast_abort is set.
*/
- if (conn->session->fast_abort) {
- iscsi_conn_printk(KERN_INFO, conn,
- "task [op %x itt "
- "0x%x/0x%x] fast abort.\n",
- opcode, task->itt,
- task->hdr_itt);
+ if (session->fast_abort) {
+ iscsi_session_printk(KERN_INFO, session,
+ "task [op %x itt 0x%x/0x%x] fast abort.\n",
+ opcode, task->itt, task->hdr_itt);
return -EACCES;
}
break;
@@ -284,7 +279,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
*/
if (opcode == ISCSI_OP_SCSI_DATA_OUT &&
task->hdr_itt == tmf->rtt) {
- ISCSI_DBG_SESSION(conn->session,
+ ISCSI_DBG_SESSION(session,
"Preventing task %x/%x from sending "
"data-out due to abort task in "
"progress\n", task->itt,
@@ -578,6 +573,11 @@ static bool cleanup_queued_task(struct iscsi_task *task)
__iscsi_put_task(task);
}
+ if (conn->session->running_aborted_task == task) {
+ conn->session->running_aborted_task = NULL;
+ __iscsi_put_task(task);
+ }
+
if (conn->task == task) {
conn->task = NULL;
__iscsi_put_task(task);
@@ -829,10 +829,7 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
ascq = session->tt->check_protection(task, &sector);
if (ascq) {
- sc->result = DRIVER_SENSE << 24 |
- SAM_STAT_CHECK_CONDITION;
- scsi_build_sense_buffer(1, sc->sense_buffer,
- ILLEGAL_REQUEST, 0x10, ascq);
+ scsi_build_sense(sc, 1, ILLEGAL_REQUEST, 0x10, ascq);
scsi_set_sense_information(sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
sector);
@@ -936,20 +933,21 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
{
struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
conn->tmfrsp_pdus_cnt++;
- if (conn->tmf_state != TMF_QUEUED)
+ if (session->tmf_state != TMF_QUEUED)
return;
if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
- conn->tmf_state = TMF_SUCCESS;
+ session->tmf_state = TMF_SUCCESS;
else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
- conn->tmf_state = TMF_NOT_FOUND;
+ session->tmf_state = TMF_NOT_FOUND;
else
- conn->tmf_state = TMF_FAILED;
- wake_up(&conn->ehwait);
+ session->tmf_state = TMF_FAILED;
+ wake_up(&session->ehwait);
}
static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
@@ -1361,7 +1359,6 @@ void iscsi_session_failure(struct iscsi_session *session,
enum iscsi_err err)
{
struct iscsi_conn *conn;
- struct device *dev;
spin_lock_bh(&session->frwd_lock);
conn = session->leadconn;
@@ -1370,10 +1367,8 @@ void iscsi_session_failure(struct iscsi_session *session,
return;
}
- dev = get_device(&conn->cls_conn->dev);
+ iscsi_get_conn(conn->cls_conn);
spin_unlock_bh(&session->frwd_lock);
- if (!dev)
- return;
/*
* if the host is being removed bypass the connection
* recovery initialization because we are going to kill
@@ -1383,27 +1378,36 @@ void iscsi_session_failure(struct iscsi_session *session,
iscsi_conn_error_event(conn->cls_conn, err);
else
iscsi_conn_failure(conn, err);
- put_device(dev);
+ iscsi_put_conn(conn->cls_conn);
}
EXPORT_SYMBOL_GPL(iscsi_session_failure);
-void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
{
struct iscsi_session *session = conn->session;
- spin_lock_bh(&session->frwd_lock);
- if (session->state == ISCSI_STATE_FAILED) {
- spin_unlock_bh(&session->frwd_lock);
- return;
- }
+ if (session->state == ISCSI_STATE_FAILED)
+ return false;
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
- spin_unlock_bh(&session->frwd_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- iscsi_conn_error_event(conn->cls_conn, err);
+ return true;
+}
+
+void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+{
+ struct iscsi_session *session = conn->session;
+ bool needs_evt;
+
+ spin_lock_bh(&session->frwd_lock);
+ needs_evt = iscsi_set_conn_failed(conn);
+ spin_unlock_bh(&session->frwd_lock);
+
+ if (needs_evt)
+ iscsi_conn_error_event(conn->cls_conn, err);
}
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
@@ -1820,15 +1824,14 @@ EXPORT_SYMBOL_GPL(iscsi_target_alloc);
static void iscsi_tmf_timedout(struct timer_list *t)
{
- struct iscsi_conn *conn = from_timer(conn, t, tmf_timer);
- struct iscsi_session *session = conn->session;
+ struct iscsi_session *session = from_timer(session, t, tmf_timer);
spin_lock(&session->frwd_lock);
- if (conn->tmf_state == TMF_QUEUED) {
- conn->tmf_state = TMF_TIMEDOUT;
+ if (session->tmf_state == TMF_QUEUED) {
+ session->tmf_state = TMF_TIMEDOUT;
ISCSI_DBG_EH(session, "tmf timedout\n");
/* unblock eh_abort() */
- wake_up(&conn->ehwait);
+ wake_up(&session->ehwait);
}
spin_unlock(&session->frwd_lock);
}
@@ -1851,8 +1854,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
return -EPERM;
}
conn->tmfcmd_pdus_cnt++;
- conn->tmf_timer.expires = timeout * HZ + jiffies;
- add_timer(&conn->tmf_timer);
+ session->tmf_timer.expires = timeout * HZ + jiffies;
+ add_timer(&session->tmf_timer);
ISCSI_DBG_EH(session, "tmf set timeout\n");
spin_unlock_bh(&session->frwd_lock);
@@ -1866,12 +1869,12 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
* 3) session is terminated or restarted or userspace has
* given up on recovery
*/
- wait_event_interruptible(conn->ehwait, age != session->age ||
+ wait_event_interruptible(session->ehwait, age != session->age ||
session->state != ISCSI_STATE_LOGGED_IN ||
- conn->tmf_state != TMF_QUEUED);
+ session->tmf_state != TMF_QUEUED);
if (signal_pending(current))
flush_signals(current);
- del_timer_sync(&conn->tmf_timer);
+ del_timer_sync(&session->tmf_timer);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
@@ -2180,6 +2183,51 @@ done:
spin_unlock(&session->frwd_lock);
}
+/**
+ * iscsi_conn_unbind - prevent queueing to conn.
+ * @cls_conn: iscsi conn ep is bound to.
+ * @is_active: is the conn in use for boot or is this for EH/termination
+ *
+ * This must be called by drivers implementing the ep_disconnect callout.
+ * It disables queueing to the connection from libiscsi in preparation for
+ * an ep_disconnect call.
+ */
+void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
+{
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+
+ if (!cls_conn)
+ return;
+
+ conn = cls_conn->dd_data;
+ session = conn->session;
+ /*
+ * Wait for iscsi_eh calls to exit. We don't wait for the tmf to
+ * complete or timeout. The caller just wants to know what's running
+ * is everything that needs to be cleaned up, and no cmds will be
+ * queued.
+ */
+ mutex_lock(&session->eh_mutex);
+
+ iscsi_suspend_queue(conn);
+ iscsi_suspend_tx(conn);
+
+ spin_lock_bh(&session->frwd_lock);
+ if (!is_active) {
+ /*
+ * if logout timed out before userspace could even send a PDU
+ * the state might still be in ISCSI_STATE_LOGGED_IN and
+ * allowing new cmds and TMFs.
+ */
+ if (session->state == ISCSI_STATE_LOGGED_IN)
+ iscsi_set_conn_failed(conn);
+ }
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_unbind);
+
static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
struct iscsi_tm *hdr)
{
@@ -2234,6 +2282,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
}
conn = session->leadconn;
+ iscsi_get_conn(conn->cls_conn);
conn->eh_abort_cnt++;
age = session->age;
@@ -2244,9 +2293,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
spin_unlock(&session->back_lock);
- spin_unlock_bh(&session->frwd_lock);
- mutex_unlock(&session->eh_mutex);
- return SUCCESS;
+ goto success;
}
ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
__iscsi_get_task(task);
@@ -2258,17 +2305,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
}
/* only have one tmf outstanding at a time */
- if (conn->tmf_state != TMF_INITIAL)
+ if (session->tmf_state != TMF_INITIAL)
goto failed;
- conn->tmf_state = TMF_QUEUED;
+ session->tmf_state = TMF_QUEUED;
- hdr = &conn->tmhdr;
+ hdr = &session->tmhdr;
iscsi_prep_abort_task_pdu(task, hdr);
if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout))
goto failed;
- switch (conn->tmf_state) {
+ switch (session->tmf_state) {
case TMF_SUCCESS:
spin_unlock_bh(&session->frwd_lock);
/*
@@ -2283,18 +2330,19 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
*/
spin_lock_bh(&session->frwd_lock);
fail_scsi_task(task, DID_ABORT);
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
memset(hdr, 0, sizeof(*hdr));
spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
goto success_unlocked;
case TMF_TIMEDOUT:
+ session->running_aborted_task = task;
spin_unlock_bh(&session->frwd_lock);
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto failed_unlocked;
case TMF_NOT_FOUND:
- if (!sc->SCp.ptr) {
- conn->tmf_state = TMF_INITIAL;
+ if (iscsi_task_is_completed(task)) {
+ session->tmf_state = TMF_INITIAL;
memset(hdr, 0, sizeof(*hdr));
/* task completed before tmf abort response */
ISCSI_DBG_EH(session, "sc completed while abort in "
@@ -2303,7 +2351,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
}
fallthrough;
default:
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
goto failed;
}
@@ -2313,6 +2361,7 @@ success_unlocked:
ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
sc, task->itt);
iscsi_put_task(task);
+ iscsi_put_conn(conn->cls_conn);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
@@ -2321,7 +2370,15 @@ failed:
failed_unlocked:
ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
task ? task->itt : 0);
- iscsi_put_task(task);
+ /*
+ * The driver might be accessing the task so hold the ref. The conn
+ * stop cleanup will drop the ref after ep_disconnect so we know the
+ * driver's no longer touching the task.
+ */
+ if (!session->running_aborted_task)
+ iscsi_put_task(task);
+
+ iscsi_put_conn(conn->cls_conn);
mutex_unlock(&session->eh_mutex);
return FAILED;
}
@@ -2362,11 +2419,11 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
conn = session->leadconn;
/* only have one tmf outstanding at a time */
- if (conn->tmf_state != TMF_INITIAL)
+ if (session->tmf_state != TMF_INITIAL)
goto unlock;
- conn->tmf_state = TMF_QUEUED;
+ session->tmf_state = TMF_QUEUED;
- hdr = &conn->tmhdr;
+ hdr = &session->tmhdr;
iscsi_prep_lun_reset_pdu(sc, hdr);
if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
@@ -2375,7 +2432,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
goto unlock;
}
- switch (conn->tmf_state) {
+ switch (session->tmf_state) {
case TMF_SUCCESS:
break;
case TMF_TIMEDOUT:
@@ -2383,7 +2440,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto done;
default:
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
goto unlock;
}
@@ -2395,7 +2452,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
spin_lock_bh(&session->frwd_lock);
memset(hdr, 0, sizeof(*hdr));
fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
@@ -2418,8 +2475,7 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
spin_lock_bh(&session->frwd_lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
session->state = ISCSI_STATE_RECOVERY_FAILED;
- if (session->leadconn)
- wake_up(&session->leadconn->ehwait);
+ wake_up(&session->ehwait);
}
spin_unlock_bh(&session->frwd_lock);
}
@@ -2440,7 +2496,6 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- conn = session->leadconn;
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
@@ -2455,16 +2510,17 @@ failed:
return FAILED;
}
+ conn = session->leadconn;
+ iscsi_get_conn(conn->cls_conn);
+
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
- /*
- * we drop the lock here but the leadconn cannot be destoyed while
- * we are in the scsi eh
- */
+
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
+ iscsi_put_conn(conn->cls_conn);
ISCSI_DBG_EH(session, "wait for relogin\n");
- wait_event_interruptible(conn->ehwait,
+ wait_event_interruptible(session->ehwait,
session->state == ISCSI_STATE_TERMINATE ||
session->state == ISCSI_STATE_LOGGED_IN ||
session->state == ISCSI_STATE_RECOVERY_FAILED);
@@ -2525,11 +2581,11 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
conn = session->leadconn;
/* only have one tmf outstanding at a time */
- if (conn->tmf_state != TMF_INITIAL)
+ if (session->tmf_state != TMF_INITIAL)
goto unlock;
- conn->tmf_state = TMF_QUEUED;
+ session->tmf_state = TMF_QUEUED;
- hdr = &conn->tmhdr;
+ hdr = &session->tmhdr;
iscsi_prep_tgt_reset_pdu(sc, hdr);
if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
@@ -2538,7 +2594,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
goto unlock;
}
- switch (conn->tmf_state) {
+ switch (session->tmf_state) {
case TMF_SUCCESS:
break;
case TMF_TIMEDOUT:
@@ -2546,7 +2602,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto done;
default:
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
goto unlock;
}
@@ -2558,7 +2614,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
spin_lock_bh(&session->frwd_lock);
memset(hdr, 0, sizeof(*hdr));
fail_scsi_tasks(conn, -1, DID_ERROR);
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
@@ -2888,7 +2944,10 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
session->tt = iscsit;
session->dd_data = cls_session->dd_data + sizeof(*session);
+ session->tmf_state = TMF_INITIAL;
+ timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0);
mutex_init(&session->eh_mutex);
+
spin_lock_init(&session->frwd_lock);
spin_lock_init(&session->back_lock);
@@ -2939,10 +2998,9 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
struct module *owner = cls_session->transport->owner;
struct Scsi_Host *shost = session->host;
- iscsi_pool_free(&session->cmdpool);
-
iscsi_remove_session(cls_session);
+ iscsi_pool_free(&session->cmdpool);
kfree(session->password);
kfree(session->password_in);
kfree(session->username);
@@ -2992,7 +3050,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
conn->id = conn_idx;
conn->exp_statsn = 0;
- conn->tmf_state = TMF_INITIAL;
timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0);
@@ -3017,8 +3074,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
goto login_task_data_alloc_fail;
conn->login_task->data = conn->data = data;
- timer_setup(&conn->tmf_timer, iscsi_tmf_timedout, 0);
- init_waitqueue_head(&conn->ehwait);
+ init_waitqueue_head(&session->ehwait);
return cls_conn;
@@ -3053,7 +3109,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
* leading connection? then give up on recovery.
*/
session->state = ISCSI_STATE_TERMINATE;
- wake_up(&conn->ehwait);
+ wake_up(&session->ehwait);
}
spin_unlock_bh(&session->frwd_lock);
@@ -3128,7 +3184,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
* commands after successful recovery
*/
conn->stop_stage = 0;
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
session->age++;
if (session->age == 16)
session->age = 0;
@@ -3142,7 +3198,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
spin_unlock_bh(&session->frwd_lock);
iscsi_unblock_session(session->cls_session);
- wake_up(&conn->ehwait);
+ wake_up(&session->ehwait);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_conn_start);
@@ -3236,7 +3292,7 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
spin_lock_bh(&session->frwd_lock);
fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
fail_mgmt_tasks(session, conn);
- memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
+ memset(&session->tmhdr, 0, sizeof(session->tmhdr));
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index e9a86128f1f1..4aa1fda95f35 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -116,9 +116,10 @@ static void sas_ata_task_done(struct sas_task *task)
}
}
- if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
- ((stat->stat == SAM_STAT_CHECK_CONDITION &&
- dev->sata_dev.class == ATA_DEV_ATAPI))) {
+ if (stat->stat == SAS_PROTO_RESPONSE ||
+ stat->stat == SAS_SAM_STAT_GOOD ||
+ (stat->stat == SAS_SAM_STAT_CHECK_CONDITION &&
+ dev->sata_dev.class == ATA_DEV_ATAPI)) {
memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
if (!link->sactive) {
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 9f5068f3bcfb..dd205414e505 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -461,7 +461,7 @@ static void sas_discover_domain(struct work_struct *work)
break;
#else
pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
- /* Fall through */
+ fallthrough;
#endif
/* Fall through - only for the #else condition above. */
default:
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 6d583e8c403a..e00688540219 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -101,7 +101,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
}
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
+ task->task_status.stat == SAS_SAM_STAT_GOOD) {
res = 0;
break;
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 1bf939818c98..ee44a0d7730b 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -911,6 +911,14 @@ void sas_task_abort(struct sas_task *task)
blk_abort_request(sc->request);
}
+int sas_slave_alloc(struct scsi_device *sdev)
+{
+ if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
+ return -ENXIO;
+
+ return 0;
+}
+
void sas_target_destroy(struct scsi_target *starget)
{
struct domain_device *found_dev = starget->hostdata;
@@ -957,5 +965,6 @@ EXPORT_SYMBOL_GPL(sas_task_abort);
EXPORT_SYMBOL_GPL(sas_phy_reset);
EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
+EXPORT_SYMBOL_GPL(sas_slave_alloc);
EXPORT_SYMBOL_GPL(sas_target_destroy);
EXPORT_SYMBOL_GPL(sas_ioctl);
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
index e2d42593ce52..2966ead1d421 100644
--- a/drivers/scsi/libsas/sas_task.c
+++ b/drivers/scsi/libsas/sas_task.c
@@ -20,7 +20,7 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
else if (iu->datapres == 1)
tstat->stat = iu->resp_data[3];
else if (iu->datapres == 2) {
- tstat->stat = SAM_STAT_CHECK_CONDITION;
+ tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
tstat->buf_valid_size =
min_t(int, SAS_STATUS_BUF_SIZE,
be32_to_cpu(iu->sense_data_len));
@@ -32,7 +32,7 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
}
else
/* when datapres contains corrupt/unknown value... */
- tstat->stat = SAM_STAT_CHECK_CONDITION;
+ tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
}
EXPORT_SYMBOL_GPL(sas_ssp_task_response);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index f8de0d10620b..17028861234b 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -266,6 +266,7 @@ struct lpfc_stats {
uint32_t elsRcvECHO;
uint32_t elsRcvLCB;
uint32_t elsRcvRDP;
+ uint32_t elsRcvRDF;
uint32_t elsXmitFLOGI;
uint32_t elsXmitFDISC;
uint32_t elsXmitPLOGI;
@@ -303,6 +304,64 @@ struct lpfc_stats {
struct lpfc_hba;
+#define LPFC_VMID_TIMER 300 /* timer interval in seconds */
+
+#define LPFC_MAX_VMID_SIZE 256
+#define LPFC_COMPRESS_VMID_SIZE 16
+
+union lpfc_vmid_io_tag {
+ u32 app_id; /* App Id vmid */
+ u8 cs_ctl_vmid; /* Priority tag vmid */
+};
+
+#define JIFFIES_PER_HR (HZ * 60 * 60)
+
+struct lpfc_vmid {
+ u8 flag;
+#define LPFC_VMID_SLOT_FREE 0x0
+#define LPFC_VMID_SLOT_USED 0x1
+#define LPFC_VMID_REQ_REGISTER 0x2
+#define LPFC_VMID_REGISTERED 0x4
+#define LPFC_VMID_DE_REGISTER 0x8
+ char host_vmid[LPFC_MAX_VMID_SIZE];
+ union lpfc_vmid_io_tag un;
+ struct hlist_node hnode;
+ u64 io_rd_cnt;
+ u64 io_wr_cnt;
+ u8 vmid_len;
+ u8 delete_inactive; /* Delete if inactive flag 0 = no, 1 = yes */
+ u32 hash_index;
+ u64 __percpu *last_io_time;
+};
+
+#define lpfc_vmid_is_type_priority_tag(vport)\
+ (vport->vmid_priority_tagging ? 1 : 0)
+
+#define LPFC_VMID_HASH_SIZE 256
+#define LPFC_VMID_HASH_MASK 255
+#define LPFC_VMID_HASH_SHIFT 6
+
+struct lpfc_vmid_context {
+ struct lpfc_vmid *vmp;
+ struct lpfc_nodelist *nlp;
+ bool instantiated;
+};
+
+struct lpfc_vmid_priority_range {
+ u8 low;
+ u8 high;
+ u8 qos;
+};
+
+struct lpfc_vmid_priority_info {
+ u32 num_descriptors;
+ struct lpfc_vmid_priority_range *vmid_range;
+};
+
+#define QFPA_EVEN_ONLY 0x01
+#define QFPA_ODD_ONLY 0x02
+#define QFPA_EVEN_ODD 0x03
+
enum discovery_state {
LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */
LPFC_VPORT_FAILED = 1, /* vport has failed */
@@ -442,6 +501,9 @@ struct lpfc_vport {
#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */
#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */
#define WORKER_SERVICE_TXQ 0x2000 /* hba: IOCBs on the txq */
+#define WORKER_CHECK_INACTIVE_VMID 0x4000 /* hba: check inactive vmids */
+#define WORKER_CHECK_VMID_ISSUE_QFPA 0x8000 /* vport: Check if qfpa needs
+ * to be issued */
struct timer_list els_tmofunc;
struct timer_list delayed_disc_tmo;
@@ -452,6 +514,8 @@ struct lpfc_vport {
#define FC_LOADING 0x1 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
#define FC_ALLOW_FDMI 0x4 /* port is ready for FDMI requests */
+#define FC_ALLOW_VMID 0x8 /* Allow VMID I/Os */
+#define FC_DEREGISTER_ALL_APP_ID 0x10 /* Deregister all VMIDs */
/* Vport Config Parameters */
uint32_t cfg_scan_down;
uint32_t cfg_lun_queue_depth;
@@ -470,9 +534,36 @@ struct lpfc_vport {
uint32_t cfg_tgt_queue_depth;
uint32_t cfg_first_burst_size;
uint32_t dev_loss_tmo_changed;
+ /* VMID parameters */
+ u8 lpfc_vmid_host_uuid[LPFC_COMPRESS_VMID_SIZE];
+ u32 max_vmid; /* maximum VMIDs allowed per port */
+ u32 cur_vmid_cnt; /* Current VMID count */
+#define LPFC_MIN_VMID 4
+#define LPFC_MAX_VMID 255
+ u32 vmid_inactivity_timeout; /* Time after which the VMID */
+ /* deregisters from switch */
+ u32 vmid_priority_tagging;
+#define LPFC_VMID_PRIO_TAG_DISABLE 0 /* Disable */
+#define LPFC_VMID_PRIO_TAG_SUP_TARGETS 1 /* Allow supported targets only */
+#define LPFC_VMID_PRIO_TAG_ALL_TARGETS 2 /* Allow all targets */
+ unsigned long *vmid_priority_range;
+#define LPFC_VMID_MAX_PRIORITY_RANGE 256
+#define LPFC_VMID_PRIORITY_BITMAP_SIZE 32
+ u8 vmid_flag;
+#define LPFC_VMID_IN_USE 0x1
+#define LPFC_VMID_ISSUE_QFPA 0x2
+#define LPFC_VMID_QFPA_CMPL 0x4
+#define LPFC_VMID_QOS_ENABLED 0x8
+#define LPFC_VMID_TIMER_ENBLD 0x10
+ struct fc_qfpa_res *qfpa_res;
struct fc_vport *fc_vport;
+ struct lpfc_vmid *vmid;
+ DECLARE_HASHTABLE(hash_table, 8);
+ rwlock_t vmid_lock;
+ struct lpfc_vmid_priority_info vmid_priority;
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *debug_disc_trc;
struct dentry *debug_nodelist;
@@ -915,6 +1006,7 @@ struct lpfc_hba {
uint32_t cfg_request_firmware_upgrade;
uint32_t cfg_suppress_link_up;
uint32_t cfg_rrq_xri_bitmap_sz;
+ u32 cfg_fcp_wait_abts_rsp;
uint32_t cfg_delay_discovery;
uint32_t cfg_sli_mode;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
@@ -938,6 +1030,13 @@ struct lpfc_hba {
struct nvmet_fc_target_port *targetport;
lpfc_vpd_t vpd; /* vital product data */
+ u32 cfg_max_vmid; /* maximum VMIDs allowed per port */
+ u32 cfg_vmid_app_header;
+#define LPFC_VMID_APP_HEADER_DISABLE 0
+#define LPFC_VMID_APP_HEADER_ENABLE 1
+ u32 cfg_vmid_priority_tagging;
+ u32 cfg_vmid_inactivity_timeout; /* Time after which the VMID */
+ /* deregisters from switch */
struct pci_dev *pcidev;
struct list_head work_list;
uint32_t work_ha; /* Host Attention Bits for WT */
@@ -1178,6 +1277,7 @@ struct lpfc_hba {
struct list_head ct_ev_waiters;
struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
uint32_t ctx_idx;
+ struct timer_list inactive_vmid_poll;
/* RAS Support */
struct lpfc_ras_fwlog ras_fwlog;
@@ -1419,3 +1519,27 @@ static const char *routine(enum enum_name table_key) \
} \
return name; \
}
+
+/**
+ * lpfc_is_vmid_enabled - returns if VMID is enabled for either switch types
+ * @phba: Pointer to HBA context object.
+ *
+ * Relationship between the enable, target support and if vmid tag is required
+ * for the particular combination
+ * ---------------------------------------------------
+ * Switch Enable Flag Target Support VMID Needed
+ * ---------------------------------------------------
+ * App Id 0 NA N
+ * App Id 1 0 N
+ * App Id 1 1 Y
+ * Pr Tag 0 NA N
+ * Pr Tag 1 0 N
+ * Pr Tag 1 1 Y
+ * Pr Tag 2 * Y
+ ---------------------------------------------------
+ *
+ **/
+static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba)
+{
+ return phba->cfg_vmid_app_header || phba->cfg_vmid_priority_tagging;
+}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 0975a8b252a0..eb88aaaf36eb 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3449,6 +3449,15 @@ LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
"FCF Fast failover=1 Priority failover=2");
/*
+ * lpfc_fcp_wait_abts_rsp: Modifies criteria for reporting completion of
+ * aborted IO.
+ * The range is [0,1]. Default value is 0
+ * 0, IO completes after ABTS issued (default).
+ * 1, IO completes after receipt of ABTS response or timeout.
+ */
+LPFC_ATTR_R(fcp_wait_abts_rsp, 0, 0, 1, "Wait for FCP ABTS completion");
+
+/*
# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
# 0x0 = disabled, XRI/OXID use not tracked.
# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
@@ -6153,6 +6162,45 @@ LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
*/
LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI");
+/*
+ * lpfc_max_vmid: Maximum number of VMs to be tagged. This is valid only if
+ * either vmid_app_header or vmid_priority_tagging is enabled.
+ * 4 - 255 = vmid support enabled for 4-255 VMs
+ * Value range is [4,255].
+ */
+LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID,
+ "Maximum number of VMs supported");
+
+/*
+ * lpfc_vmid_inactivity_timeout: Inactivity timeout duration in hours
+ * 0 = Timeout is disabled
+ * Value range is [0,24].
+ */
+LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24,
+ "Inactivity timeout in hours");
+
+/*
+ * lpfc_vmid_app_header: Enable App Header VMID support
+ * 0 = Support is disabled (default)
+ * 1 = Support is enabled
+ * Value range is [0,1].
+ */
+LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE,
+ LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE,
+ "Enable App Header VMID support");
+
+/*
+ * lpfc_vmid_priority_tagging: Enable Priority Tagging VMID support
+ * 0 = Support is disabled (default)
+ * 1 = Allow supported targets only
+ * 2 = Allow all targets
+ * Value range is [0,2].
+ */
+LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE,
+ LPFC_VMID_PRIO_TAG_DISABLE,
+ LPFC_VMID_PRIO_TAG_ALL_TARGETS,
+ "Enable Priority Tagging VMID support");
+
struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_nvme_info,
&dev_attr_scsi_stat,
@@ -6205,6 +6253,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_enable_npiv,
&dev_attr_lpfc_fcf_failover_policy,
&dev_attr_lpfc_enable_rrq,
+ &dev_attr_lpfc_fcp_wait_abts_rsp,
&dev_attr_nport_evt_cnt,
&dev_attr_board_mode,
&dev_attr_max_vpi,
@@ -6271,6 +6320,10 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_enable_bbcr,
&dev_attr_lpfc_enable_dpp,
&dev_attr_lpfc_enable_mi,
+ &dev_attr_lpfc_max_vmid,
+ &dev_attr_lpfc_vmid_inactivity_timeout,
+ &dev_attr_lpfc_vmid_app_header,
+ &dev_attr_lpfc_vmid_priority_tagging,
NULL,
};
@@ -7332,6 +7385,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
+ lpfc_fcp_wait_abts_rsp_init(phba, lpfc_fcp_wait_abts_rsp);
lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
lpfc_use_msi_init(phba, lpfc_use_msi);
@@ -7346,6 +7400,11 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
+ /* VMID Inits */
+ lpfc_max_vmid_init(phba, lpfc_max_vmid);
+ lpfc_vmid_inactivity_timeout_init(phba, lpfc_vmid_inactivity_timeout);
+ lpfc_vmid_app_header_init(phba, lpfc_vmid_app_header);
+ lpfc_vmid_priority_tagging_init(phba, lpfc_vmid_priority_tagging);
if (phba->sli_rev != LPFC_SLI_REV4)
phba->cfg_EnableXLane = 0;
lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 383abf46fd29..737483c3f01d 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -80,6 +80,7 @@ void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -607,3 +608,14 @@ extern unsigned long lpfc_no_hba_reset[];
extern union lpfc_wqe128 lpfc_iread_cmd_template;
extern union lpfc_wqe128 lpfc_iwrite_cmd_template;
extern union lpfc_wqe128 lpfc_icmnd_cmd_template;
+
+/* vmid interface */
+int lpfc_vmid_uvem(struct lpfc_vport *vport, struct lpfc_vmid *vmid, bool ins);
+uint32_t lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport);
+int lpfc_vmid_cmd(struct lpfc_vport *vport,
+ int cmdcode, struct lpfc_vmid *vmid);
+int lpfc_vmid_hash_fn(const char *vmid, int len);
+struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
+ uint32_t hash, uint8_t *buf);
+void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
+int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 3bbefa225484..610b6dabb3b5 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -75,6 +75,9 @@
static char *lpfc_release_version = LPFC_DRIVER_VERSION;
+static void
+lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
static void
lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
@@ -587,7 +590,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *),
- struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
+ struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry,
uint32_t tmo, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
@@ -608,15 +611,14 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
- if (usr_flg)
- geniocb->context3 = NULL;
- else
- geniocb->context3 = (uint8_t *) bmp;
+ geniocb->context3 = (uint8_t *) bmp;
/* Save for completion so we can release these resources */
geniocb->context1 = (uint8_t *) inp;
geniocb->context2 = (uint8_t *) outp;
+ geniocb->event_tag = event_tag;
+
/* Fill in payload, bp points to frame payload */
icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
@@ -707,8 +709,8 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
* lpfc_alloc_ct_rsp.
*/
cnt += 1;
- status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
- cnt, 0, retry);
+ status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp,
+ phba->fc_eventTag, cnt, 0, retry);
if (status) {
lpfc_free_ct_rsp(phba, outmp);
return -ENOMEM;
@@ -957,6 +959,13 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"GID_FT cmpl: status:x%x/x%x rtry:%d",
irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
+ /* Ignore response if link flipped after this request was made */
+ if (cmdiocb->event_tag != phba->fc_eventTag) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "9043 Event tag mismatch. Ignoring NS rsp\n");
+ goto out;
+ }
+
/* Don't bother processing response if vport is being torn down. */
if (vport->load_flag & FC_UNLOADING) {
if (vport->fc_flag & FC_RSCN_MODE)
@@ -1167,6 +1176,13 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4],
vport->fc_ns_retry);
+ /* Ignore response if link flipped after this request was made */
+ if (cmdiocb->event_tag != phba->fc_eventTag) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "9044 Event tag mismatch. Ignoring NS rsp\n");
+ goto out;
+ }
+
/* Don't bother processing response if vport is being torn down. */
if (vport->load_flag & FC_UNLOADING) {
if (vport->fc_flag & FC_RSCN_MODE)
@@ -1366,6 +1382,13 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"GFF_ID cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ /* Ignore response if link flipped after this request was made */
+ if (cmdiocb->event_tag != phba->fc_eventTag) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "9045 Event tag mismatch. Ignoring NS rsp\n");
+ goto iocb_free;
+ }
+
if (irsp->ulpStatus == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
@@ -1479,6 +1502,7 @@ out:
lpfc_disc_start(vport);
}
+iocb_free:
free_ndlp = cmdiocb->context_un.ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(free_ndlp);
@@ -1506,6 +1530,13 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"GFT_ID cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ /* Ignore response if link flipped after this request was made */
+ if ((uint32_t) cmdiocb->event_tag != phba->fc_eventTag) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "9046 Event tag mismatch. Ignoring NS rsp\n");
+ goto out;
+ }
+
/* Preserve the nameserver node to release the reference. */
ns_ndlp = cmdiocb->context_un.ndlp;
@@ -1572,6 +1603,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
+out:
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ns_ndlp);
}
@@ -3748,3 +3780,255 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
}
return;
}
+
+static void
+lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_dmabuf *inp = cmdiocb->context1;
+ struct lpfc_dmabuf *outp = cmdiocb->context2;
+ struct lpfc_sli_ct_request *ctcmd = inp->virt;
+ struct lpfc_sli_ct_request *ctrsp = outp->virt;
+ u16 rsp = ctrsp->CommandResponse.bits.CmdRsp;
+ struct app_id_object *app;
+ u32 cmd, hash, bucket;
+ struct lpfc_vmid *vmp, *cur;
+ u8 *data = outp->virt;
+ int i;
+
+ cmd = be16_to_cpu(ctcmd->CommandResponse.bits.CmdRsp);
+ if (cmd == SLI_CTAS_DALLAPP_ID)
+ lpfc_ct_free_iocb(phba, cmdiocb);
+
+ if (lpfc_els_chk_latt(vport) || rspiocb->iocb.ulpStatus) {
+ if (cmd != SLI_CTAS_DALLAPP_ID)
+ return;
+ }
+ /* Check for a CT LS_RJT response */
+ if (rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+ if (cmd != SLI_CTAS_DALLAPP_ID)
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "3306 VMID FS_RJT Data: x%x x%x x%x\n",
+ cmd, ctrsp->ReasonCode,
+ ctrsp->Explanation);
+ if ((cmd != SLI_CTAS_DALLAPP_ID) ||
+ (ctrsp->ReasonCode != SLI_CT_UNABLE_TO_PERFORM_REQ) ||
+ (ctrsp->Explanation != SLI_CT_APP_ID_NOT_AVAILABLE)) {
+ /* If DALLAPP_ID failed retry later */
+ if (cmd == SLI_CTAS_DALLAPP_ID)
+ vport->load_flag |= FC_DEREGISTER_ALL_APP_ID;
+ return;
+ }
+ }
+
+ switch (cmd) {
+ case SLI_CTAS_RAPP_IDENT:
+ app = (struct app_id_object *)(RAPP_IDENT_OFFSET + data);
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "6712 RAPP_IDENT app id %d port id x%x id "
+ "len %d\n", be32_to_cpu(app->app_id),
+ be32_to_cpu(app->port_id),
+ app->obj.entity_id_len);
+
+ if (app->obj.entity_id_len == 0 || app->port_id == 0)
+ return;
+
+ hash = lpfc_vmid_hash_fn(app->obj.entity_id,
+ app->obj.entity_id_len);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash,
+ app->obj.entity_id);
+ if (vmp) {
+ write_lock(&vport->vmid_lock);
+ vmp->un.app_id = be32_to_cpu(app->app_id);
+ vmp->flag |= LPFC_VMID_REGISTERED;
+ vmp->flag &= ~LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ /* Set IN USE flag */
+ vport->vmid_flag |= LPFC_VMID_IN_USE;
+ } else {
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "6901 No entry found %s hash %d\n",
+ app->obj.entity_id, hash);
+ }
+ break;
+ case SLI_CTAS_DAPP_IDENT:
+ app = (struct app_id_object *)(DAPP_IDENT_OFFSET + data);
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "6713 DAPP_IDENT app id %d port id x%x\n",
+ be32_to_cpu(app->app_id),
+ be32_to_cpu(app->port_id));
+ break;
+ case SLI_CTAS_DALLAPP_ID:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "8856 Deregistered all app ids\n");
+ read_lock(&vport->vmid_lock);
+ for (i = 0; i < phba->cfg_max_vmid; i++) {
+ vmp = &vport->vmid[i];
+ if (vmp->flag != LPFC_VMID_SLOT_FREE)
+ memset(vmp, 0, sizeof(struct lpfc_vmid));
+ }
+ read_unlock(&vport->vmid_lock);
+ /* for all elements in the hash table */
+ if (!hash_empty(vport->hash_table))
+ hash_for_each(vport->hash_table, bucket, cur, hnode)
+ hash_del(&cur->hnode);
+ vport->load_flag |= FC_ALLOW_VMID;
+ break;
+ default:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "8857 Invalid command code\n");
+ }
+}
+
+/**
+ * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID)
+ * cmdcode: FDMI command to send
+ * mask: Mask of HBA or PORT Attributes to send
+ *
+ * Builds and sends a FDMI command using the CT subsystem.
+ */
+int
+lpfc_vmid_cmd(struct lpfc_vport *vport,
+ int cmdcode, struct lpfc_vmid *vmid)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_sli_ct_request *ctreq;
+ struct ulp_bde64 *bpl;
+ u32 size;
+ u32 rsp_size;
+ u8 *data;
+ struct lpfc_vmid_rapp_ident_list *rap;
+ struct lpfc_vmid_dapp_ident_list *dap;
+ u8 retry = 0;
+ struct lpfc_nodelist *ndlp;
+
+ void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
+
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return 0;
+
+ cmpl = lpfc_cmpl_ct_cmd_vmid;
+
+ /* fill in BDEs for command */
+ /* Allocate buffer for command payload */
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ goto vmid_free_mp_exit;
+
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp->virt)
+ goto vmid_free_mp_virt_exit;
+
+ /* Allocate buffer for Buffer ptr list */
+ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
+ if (!bmp)
+ goto vmid_free_bmp_exit;
+
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+ if (!bmp->virt)
+ goto vmid_free_bmp_virt_exit;
+
+ INIT_LIST_HEAD(&mp->list);
+ INIT_LIST_HEAD(&bmp->list);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "3275 VMID Request Data: x%x x%x x%x\n",
+ vport->fc_flag, vport->port_state, cmdcode);
+ ctreq = (struct lpfc_sli_ct_request *)mp->virt;
+ data = mp->virt;
+ /* First populate the CT_IU preamble */
+ memset(data, 0, LPFC_BPL_SIZE);
+ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ ctreq->RevisionId.bits.InId = 0;
+
+ ctreq->FsType = SLI_CT_MANAGEMENT_SERVICE;
+ ctreq->FsSubType = SLI_CT_APP_SEV_Subtypes;
+
+ ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
+ rsp_size = LPFC_BPL_SIZE;
+ size = 0;
+
+ switch (cmdcode) {
+ case SLI_CTAS_RAPP_IDENT:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "1329 RAPP_IDENT for %s\n", vmid->host_vmid);
+ ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
+ rap = (struct lpfc_vmid_rapp_ident_list *)
+ (DAPP_IDENT_OFFSET + data);
+ rap->no_of_objects = cpu_to_be32(1);
+ rap->obj[0].entity_id_len = vmid->vmid_len;
+ memcpy(rap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len);
+ size = RAPP_IDENT_OFFSET +
+ sizeof(struct lpfc_vmid_rapp_ident_list);
+ retry = 1;
+ break;
+
+ case SLI_CTAS_GALLAPPIA_ID:
+ ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
+ size = GALLAPPIA_ID_SIZE;
+ break;
+
+ case SLI_CTAS_DAPP_IDENT:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "1469 DAPP_IDENT for %s\n", vmid->host_vmid);
+ ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
+ dap = (struct lpfc_vmid_dapp_ident_list *)
+ (DAPP_IDENT_OFFSET + data);
+ dap->no_of_objects = cpu_to_be32(1);
+ dap->obj[0].entity_id_len = vmid->vmid_len;
+ memcpy(dap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len);
+ size = DAPP_IDENT_OFFSET +
+ sizeof(struct lpfc_vmid_dapp_ident_list);
+ write_lock(&vport->vmid_lock);
+ vmid->flag &= ~LPFC_VMID_REGISTERED;
+ write_unlock(&vport->vmid_lock);
+ retry = 1;
+ break;
+
+ case SLI_CTAS_DALLAPP_ID:
+ ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
+ size = DALLAPP_ID_SIZE;
+ break;
+
+ default:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "7062 VMID cmdcode x%x not supported\n",
+ cmdcode);
+ goto vmid_free_all_mem;
+ }
+
+ ctreq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
+
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ bpl->addrHigh = putPaddrHigh(mp->phys);
+ bpl->addrLow = putPaddrLow(mp->phys);
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeSize = size;
+
+ /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+ * to hold ndlp reference for the corresponding callback function.
+ */
+ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry))
+ return 0;
+
+ vmid_free_all_mem:
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ vmid_free_bmp_virt_exit:
+ kfree(bmp);
+ vmid_free_bmp_exit:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ vmid_free_mp_virt_exit:
+ kfree(mp);
+ vmid_free_mp_exit:
+
+ /* Issue CT request failed */
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "3276 VMID CT request failed Data: x%x\n", cmdcode);
+ return -EIO;
+}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 658a962832b3..6ff85ae57e79 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -863,16 +863,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(buf+len, size-len, "%s DID:x%06x ",
statep, ndlp->nlp_DID);
len += scnprintf(buf+len, size-len,
- "WWPN x%llx ",
+ "WWPN x%016llx ",
wwn_to_u64(ndlp->nlp_portname.u.wwn));
len += scnprintf(buf+len, size-len,
- "WWNN x%llx ",
+ "WWNN x%016llx ",
wwn_to_u64(ndlp->nlp_nodename.u.wwn));
- if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
- len += scnprintf(buf+len, size-len, "RPI:%04d ",
- ndlp->nlp_rpi);
- else
- len += scnprintf(buf+len, size-len, "RPI:none ");
+ len += scnprintf(buf+len, size-len, "RPI:x%04x ",
+ ndlp->nlp_rpi);
len += scnprintf(buf+len, size-len, "flag:x%08x ",
ndlp->nlp_flag);
if (!ndlp->nlp_type)
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 08999aad6a10..131374a61d7e 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -86,6 +86,7 @@ enum lpfc_fc4_xpt_flags {
struct lpfc_nodelist {
struct list_head nlp_listp;
+ struct serv_parm fc_sparam; /* buffer for service params */
struct lpfc_name nlp_portname;
struct lpfc_name nlp_nodename;
@@ -124,6 +125,7 @@ struct lpfc_nodelist {
uint8_t nlp_fcp_info; /* class info, bits 0-3 */
#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
u8 nlp_nvme_info; /* NVME NSLER Support */
+ uint8_t vmid_support; /* destination VMID support */
#define NLP_NVME_NSLER 0x1 /* NVME NSLER device */
struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 21108f322c99..e481f5fe29d7 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -25,6 +25,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -55,9 +56,15 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp, uint8_t retry);
static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb);
+static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
static int lpfc_max_els_tries = 3;
+static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport);
+static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max);
+static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid);
+
/**
* lpfc_els_chk_latt - Check host link attention event for a vport
* @vport: pointer to a host virtual N_Port data structure.
@@ -314,10 +321,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0116 Xmit ELS command x%x to remote "
"NPORT x%x I/O tag: x%x, port state:x%x "
- "rpi x%x fc_flag:x%x\n",
+ "rpi x%x fc_flag:x%x nlp_flag:x%x vport:x%p\n",
elscmd, did, elsiocb->iotag,
vport->port_state, ndlp->nlp_rpi,
- vport->fc_flag);
+ vport->fc_flag, ndlp->nlp_flag, vport);
} else {
/* Xmit ELS response <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -1112,11 +1119,15 @@ stop_rr_fcf_flogi:
/* FLOGI completes successfully */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0101 FLOGI completes successfully, I/O tag:x%x, "
- "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
+ "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x\n",
cmdiocb->iotag, cmdiocb->sli4_xritag,
irsp->un.ulpWord[4], sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
- vport->port_state, vport->fc_flag);
+ vport->port_state, vport->fc_flag,
+ sp->cmn.priority_tagging);
+
+ if (sp->cmn.priority_tagging)
+ vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA;
if (vport->port_state == LPFC_FLOGI) {
/*
@@ -1175,6 +1186,15 @@ stop_rr_fcf_flogi:
phba->fcf.fcf_redisc_attempted = 0; /* reset */
goto out;
}
+ } else if (vport->port_state > LPFC_FLOGI &&
+ vport->fc_flag & FC_PT2PT) {
+ /*
+ * In a p2p topology, it is possible that discovery has
+ * already progressed, and this completion can be ignored.
+ * Recheck the indicated topology.
+ */
+ if (!sp->cmn.fPort)
+ goto out;
}
flogifail:
@@ -1299,6 +1319,18 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (sp->cmn.fcphHigh < FC_PH3)
sp->cmn.fcphHigh = FC_PH3;
+ /* Determine if switch supports priority tagging */
+ if (phba->cfg_vmid_priority_tagging) {
+ sp->cmn.priority_tagging = 1;
+ /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */
+ if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) {
+ memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn,
+ sizeof(phba->wwpn));
+ memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn,
+ sizeof(phba->wwnn));
+ }
+ }
+
if (phba->sli_rev == LPFC_SLI_REV4) {
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) {
@@ -1925,6 +1957,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp, *free_ndlp;
struct lpfc_dmabuf *prsp;
int disc;
+ struct serv_parm *sp = NULL;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -1998,9 +2031,20 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
- /* As long as this node is not registered with the scsi or nvme
- * transport, it is no longer an active node. Otherwise
- * devloss handles the final cleanup.
+ /* If a PLOGI collision occurred, the node needs to continue
+ * with the reglogin process.
+ */
+ spin_lock_irq(&ndlp->lock);
+ if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) &&
+ ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
+ spin_unlock_irq(&ndlp->lock);
+ goto out;
+ }
+ spin_unlock_irq(&ndlp->lock);
+
+ /* No PLOGI collision and the node is not registered with the
+ * scsi or nvme transport. It is no longer an active node. Just
+ * start the device remove process.
*/
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
spin_lock_irq(&ndlp->lock);
@@ -2015,6 +2059,23 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->context2)->list.next,
struct lpfc_dmabuf, list);
ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
+
+ sp = (struct serv_parm *)((u8 *)prsp->virt +
+ sizeof(u32));
+
+ ndlp->vmid_support = 0;
+ if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) ||
+ (phba->cfg_vmid_priority_tagging &&
+ sp->cmn.priority_tagging)) {
+ lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS,
+ "4018 app_hdr_support %d tagging %d DID x%x\n",
+ sp->cmn.app_hdr_support,
+ sp->cmn.priority_tagging,
+ ndlp->nlp_DID);
+ /* if the dest port supports VMID, mark it in ndlp */
+ ndlp->vmid_support = 1;
+ }
+
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
}
@@ -2137,6 +2198,14 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
sp->cmn.bbRcvSizeMsb &= 0xF;
+ /* Check if the destination port supports VMID */
+ ndlp->vmid_support = 0;
+ if (vport->vmid_priority_tagging)
+ sp->cmn.priority_tagging = 1;
+ else if (phba->cfg_vmid_app_header &&
+ bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags))
+ sp->cmn.app_hdr_support = 1;
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PLOGI: did:x%x",
did, 0, 0);
@@ -2869,6 +2938,11 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* log into the remote port.
*/
if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
+ spin_lock_irq(&ndlp->lock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
lpfc_els_free_iocb(phba, cmdiocb);
@@ -3061,6 +3135,95 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/**
+ * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node.
+ * @vport: pointer to lpfc_vport data structure.
+ * @fc_ndlp: pointer to the fabric controller (0xfffffd) node.
+ *
+ * This routine registers the rpi assigned to the fabric controller
+ * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED
+ * state triggering a registration with the SCSI transport.
+ *
+ * This routine is single out because the fabric controller node
+ * does not receive a PLOGI. This routine is consumed by the
+ * SCR and RDF ELS commands. Callers are expected to qualify
+ * with SLI4 first.
+ **/
+static int
+lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
+{
+ int rc = 0;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ns_ndlp;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_dmabuf *mp;
+
+ if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED)
+ return rc;
+
+ ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (!ns_ndlp)
+ return -ENODEV;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n",
+ __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID,
+ ns_ndlp->nlp_state);
+ if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return -ENODEV;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0936 %s: no memory for reg_login "
+ "Data: x%x x%x x%x x%x\n", __func__,
+ fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
+ fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
+ return -ENOMEM;
+ }
+ rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID,
+ (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi);
+ if (rc) {
+ rc = -EACCES;
+ goto out;
+ }
+
+ fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login;
+ mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp);
+ if (!mbox->ctx_ndlp) {
+ rc = -ENOMEM;
+ goto out_mem;
+ }
+
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ rc = -ENODEV;
+ lpfc_nlp_put(fc_ndlp);
+ goto out_mem;
+ }
+ /* Success path. Exit. */
+ lpfc_nlp_set_state(vport, fc_ndlp,
+ NLP_STE_REG_LOGIN_ISSUE);
+ return 0;
+
+ out_mem:
+ fc_ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+ mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+
+ out:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0938 %s: failed to format reg_login "
+ "Data: x%x x%x x%x x%x\n", __func__,
+ fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
+ fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
+ return rc;
+}
+
+/**
* lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd
* @phba: pointer to lpfc hba data structure.
* @cmdiocb: pointer to lpfc command iocb data structure.
@@ -3206,10 +3369,18 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_SCR);
-
if (!elsiocb)
return 1;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
+ if (rc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0937 %s: Failed to reg fc node, rc %d\n",
+ __func__, rc);
+ return 1;
+ }
+ }
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
@@ -3497,6 +3668,17 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
if (!elsiocb)
return -ENOMEM;
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0939 %s: FC_NODE x%x RPI x%x flag x%x "
+ "ste x%x type x%x Not registered\n",
+ __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
+ ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_type);
+ return -ENODEV;
+ }
+
/* Configure the payload for the supported FPIN events. */
prdf = (struct lpfc_els_rdf_req *)
(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
@@ -3537,6 +3719,43 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
return 0;
}
+ /**
+ * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * A received RDF implies a possible change to fabric supported diagnostic
+ * functions. This routine sends LS_ACC and then has the Nx_Port issue a new
+ * RDF request to reregister for supported diagnostic functions.
+ *
+ * Return code
+ * 0 - Success
+ * -EIO - Failed to process received RDF
+ **/
+static int
+lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ /* Send LS_ACC */
+ if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "1623 Failed to RDF_ACC from x%x for x%x\n",
+ ndlp->nlp_DID, vport->fc_myDID);
+ return -EIO;
+ }
+
+ /* Issue new RDF for reregistering */
+ if (lpfc_issue_els_rdf(vport, 0)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2623 Failed to re register RDF for x%x\n",
+ vport->fc_myDID);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
* @vport: pointer to a host virtual N_Port data structure.
@@ -4383,12 +4602,27 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
+ /* This clause allows the LOGO ACC to complete and free resources
+ * for the Fabric Domain Controller. It does deliberately skip
+ * the unreg_rpi and release rpi because some fabrics send RDP
+ * requests after logging out from the initiator.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC &&
+ ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
+ goto out;
+
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
/* NPort Recovery mode or node is just allocated */
if (!lpfc_nlp_not_used(ndlp)) {
- /* If the ndlp is being used by another discovery
- * thread, just unregister the RPI.
+ /* A LOGO is completing and the node is in NPR state.
+ * If this a fabric node that cleared its transport
+ * registration, release the rpi.
*/
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
lpfc_unreg_rpi(vport, ndlp);
} else {
/* Indicate the node has already released, should
@@ -4397,7 +4631,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->context1 = NULL;
}
}
-
+ out:
/*
* The driver received a LOGO from the rport and has ACK'd it.
* At this point, the driver is done so release the IOCB
@@ -4424,28 +4658,37 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ u32 mbx_flag = pmb->mbox_flag;
+ u32 mbx_cmd = pmb->u.mb.mbxCommand;
pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0006 rpi x%x DID:%x flg:%x %d x%px\n",
+ "0006 rpi x%x DID:%x flg:%x %d x%px "
+ "mbx_cmd x%x mbx_flag x%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp);
- /* This is the end of the default RPI cleanup logic for
- * this ndlp and it could get released. Clear the nlp_flags to
- * prevent any further processing.
+ kref_read(&ndlp->kref), ndlp, mbx_cmd,
+ mbx_flag, pmb);
+
+ /* This ends the default/temporary RPI cleanup logic for this
+ * ndlp and the node and rpi needs to be released. Free the rpi
+ * first on an UNREG_LOGIN and then release the final
+ * references.
*/
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+ if (mbx_cmd == MBX_UNREG_LOGIN)
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ spin_unlock_irq(&ndlp->lock);
lpfc_nlp_put(ndlp);
- lpfc_nlp_not_used(ndlp);
+ lpfc_drop_node(ndlp->vport, ndlp);
}
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
}
@@ -4503,11 +4746,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ELS response tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0110 ELS response tag x%x completes "
- "Data: x%x x%x x%x x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n",
cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi);
+ ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox);
if (mbox) {
if ((rspiocb->iocb.ulpStatus == 0) &&
(ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
@@ -4587,6 +4830,20 @@ out:
spin_unlock_irq(&ndlp->lock);
}
+ /* An SLI4 NPIV instance wants to drop the node at this point under
+ * these conditions and release the RPI.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (vport && vport->port_type == LPFC_NPIV_PORT) &&
+ ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_drop_node(vport, ndlp);
+ }
+
/* Release the originating I/O reference. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
@@ -4632,6 +4889,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
uint16_t cmdsize;
int rc;
ELS_PKT *els_pkt_ptr;
+ struct fc_els_rdf_resp *rdf_resp;
oldcmd = &oldiocb->iocb;
@@ -4743,6 +5001,29 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
"Issue ACC PRLO: did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
+ case ELS_CMD_RDF:
+ cmdsize = sizeof(*rdf_resp);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ rdf_resp = (struct fc_els_rdf_resp *)pcmd;
+ memset(rdf_resp, 0, sizeof(*rdf_resp));
+ rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC;
+
+ /* FC-LS-5 specifies desc_list_len shall be set to 12 */
+ rdf_resp->desc_list_len = cpu_to_be32(12);
+
+ /* FC-LS-5 specifies LS REQ Information descriptor */
+ rdf_resp->lsri.desc_tag = cpu_to_be32(1);
+ rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32));
+ rdf_resp->lsri.rqst_w0.cmd = ELS_RDF;
+ break;
default:
return 1;
}
@@ -4775,10 +5056,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
"XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
- "RPI: x%x, fc_flag x%x\n",
+ "RPI: x%x, fc_flag x%x refcnt %d\n",
rc, elsiocb->iotag, elsiocb->sli4_xritag,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi, vport->fc_flag);
+ ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref));
return 0;
}
@@ -4856,6 +5137,17 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
return 1;
}
+ /* The NPIV instance is rejecting this unsolicited ELS. Make sure the
+ * node's assigned RPI needs to be released as this node will get
+ * freed.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_NPIV_PORT) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
+ }
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
@@ -8845,6 +9137,20 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* There are no replies, so no rjt codes */
break;
+ case ELS_CMD_RDF:
+ phba->fc_stat.elsRcvRDF++;
+ /* Accept RDF only from fabric controller */
+ if (did != Fabric_Cntl_DID) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "1115 Received RDF from invalid DID "
+ "x%x\n", did);
+ rjt_err = LSRJT_PROTOCOL_ERR;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ goto lsrjt;
+ }
+
+ lpfc_els_rcv_rdf(vport, elsiocb, ndlp);
+ break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -10208,3 +10514,312 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
lpfc_unreg_rpi(vport, ndlp);
}
+static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport)
+{
+ bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE);
+}
+
+static void
+lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max)
+{
+ u32 i;
+
+ if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE))
+ return;
+
+ for (i = min; i <= max; i++)
+ set_bit(i, vport->vmid_priority_range);
+}
+
+static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid)
+{
+ set_bit(ctcl_vmid, vport->vmid_priority_range);
+}
+
+u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport)
+{
+ u32 i;
+
+ i = find_first_bit(vport->vmid_priority_range,
+ LPFC_VMID_MAX_PRIORITY_RANGE);
+
+ if (i == LPFC_VMID_MAX_PRIORITY_RANGE)
+ return 0;
+
+ clear_bit(i, vport->vmid_priority_range);
+ return i;
+}
+
+#define MAX_PRIORITY_DESC 255
+
+static void
+lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct priority_range_desc *desc;
+ struct lpfc_dmabuf *prsp = NULL;
+ struct lpfc_vmid_priority_range *vmid_range = NULL;
+ u32 *data;
+ struct lpfc_dmabuf *dmabuf = cmdiocb->context2;
+ IOCB_t *irsp = &rspiocb->iocb;
+ u8 *pcmd, max_desc;
+ u32 len, i;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+
+ prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+
+ pcmd = prsp->virt;
+ data = (u32 *)pcmd;
+ if (data[0] == ELS_CMD_LS_RJT) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+ "3277 QFPA LS_RJT x%x x%x\n",
+ data[0], data[1]);
+ goto out;
+ }
+ if (irsp->ulpStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+ "6529 QFPA failed with status x%x x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ goto out;
+ }
+
+ if (!vport->qfpa_res) {
+ max_desc = FCELSSIZE / sizeof(*vport->qfpa_res);
+ vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res),
+ GFP_KERNEL);
+ if (!vport->qfpa_res)
+ goto out;
+ }
+
+ len = *((u32 *)(pcmd + 4));
+ len = be32_to_cpu(len);
+ memcpy(vport->qfpa_res, pcmd, len + 8);
+ len = len / LPFC_PRIORITY_RANGE_DESC_SIZE;
+
+ desc = (struct priority_range_desc *)(pcmd + 8);
+ vmid_range = vport->vmid_priority.vmid_range;
+ if (!vmid_range) {
+ vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range),
+ GFP_KERNEL);
+ if (!vmid_range) {
+ kfree(vport->qfpa_res);
+ goto out;
+ }
+ vport->vmid_priority.vmid_range = vmid_range;
+ }
+ vport->vmid_priority.num_descriptors = len;
+
+ for (i = 0; i < len; i++, vmid_range++, desc++) {
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
+ "6539 vmid values low=%d, high=%d, qos=%d, "
+ "local ve id=%d\n", desc->lo_range,
+ desc->hi_range, desc->qos_priority,
+ desc->local_ve_id);
+
+ vmid_range->low = desc->lo_range << 1;
+ if (desc->local_ve_id == QFPA_ODD_ONLY)
+ vmid_range->low++;
+ if (desc->qos_priority)
+ vport->vmid_flag |= LPFC_VMID_QOS_ENABLED;
+ vmid_range->qos = desc->qos_priority;
+
+ vmid_range->high = desc->hi_range << 1;
+ if ((desc->local_ve_id == QFPA_ODD_ONLY) ||
+ (desc->local_ve_id == QFPA_EVEN_ODD))
+ vmid_range->high++;
+ }
+ lpfc_init_cs_ctl_bitmap(vport);
+ for (i = 0; i < vport->vmid_priority.num_descriptors; i++) {
+ lpfc_vmid_set_cs_ctl_range(vport,
+ vport->vmid_priority.vmid_range[i].low,
+ vport->vmid_priority.vmid_range[i].high);
+ }
+
+ vport->vmid_flag |= LPFC_VMID_QFPA_CMPL;
+ out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+}
+
+int lpfc_issue_els_qfpa(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_iocbq *elsiocb;
+ u8 *pcmd;
+ int ret;
+
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return -ENXIO;
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp,
+ ndlp->nlp_DID, ELS_CMD_QFPA);
+ if (!elsiocb)
+ return -ENOMEM;
+
+ pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+
+ *((u32 *)(pcmd)) = ELS_CMD_QFPA;
+ pcmd += 4;
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_qfpa;
+
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(vport->phba, elsiocb);
+ return -ENXIO;
+ }
+
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2);
+ if (ret != IOCB_SUCCESS) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return -EIO;
+ }
+ vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED;
+ return 0;
+}
+
+int
+lpfc_vmid_uvem(struct lpfc_vport *vport,
+ struct lpfc_vmid *vmid, bool instantiated)
+{
+ struct lpfc_vem_id_desc *vem_id_desc;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_iocbq *elsiocb;
+ struct instantiated_ve_desc *inst_desc;
+ struct lpfc_vmid_context *vmid_context;
+ u8 *pcmd;
+ u32 *len;
+ int ret = 0;
+
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return -ENXIO;
+
+ vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL);
+ if (!vmid_context)
+ return -ENOMEM;
+ elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2,
+ ndlp, Fabric_DID, ELS_CMD_UVEM);
+ if (!elsiocb)
+ goto out;
+
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
+ "3427 Host vmid %s %d\n",
+ vmid->host_vmid, instantiated);
+ vmid_context->vmp = vmid;
+ vmid_context->nlp = ndlp;
+ vmid_context->instantiated = instantiated;
+ elsiocb->vmid_tag.vmid_context = vmid_context;
+ pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+
+ if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid))
+ memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
+ LPFC_COMPRESS_VMID_SIZE);
+
+ *((u32 *)(pcmd)) = ELS_CMD_UVEM;
+ len = (u32 *)(pcmd + 4);
+ *len = cpu_to_be32(LPFC_UVEM_SIZE - 8);
+
+ vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8);
+ vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG);
+ vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE);
+ memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid,
+ LPFC_COMPRESS_VMID_SIZE);
+
+ inst_desc = (struct instantiated_ve_desc *)(pcmd + 32);
+ inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
+ inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE);
+ memcpy(inst_desc->global_vem_id, vmid->host_vmid,
+ LPFC_COMPRESS_VMID_SIZE);
+
+ bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID);
+ bf_set(lpfc_instantiated_local_id, inst_desc,
+ vmid->un.cs_ctl_vmid);
+ if (instantiated) {
+ inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
+ } else {
+ inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG);
+ lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid);
+ }
+ inst_desc->word6 = cpu_to_be32(inst_desc->word6);
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_uvem;
+
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(vport->phba, elsiocb);
+ goto out;
+ }
+
+ ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0);
+ if (ret != IOCB_SUCCESS) {
+ lpfc_els_free_iocb(vport->phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ goto out;
+ }
+
+ return 0;
+ out:
+ kfree(vmid_context);
+ return -EIO;
+}
+
+static void
+lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = icmdiocb->vport;
+ struct lpfc_dmabuf *prsp = NULL;
+ struct lpfc_vmid_context *vmid_context =
+ icmdiocb->vmid_tag.vmid_context;
+ struct lpfc_nodelist *ndlp = icmdiocb->context1;
+ u8 *pcmd;
+ u32 *data;
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_dmabuf *dmabuf = icmdiocb->context2;
+ struct lpfc_vmid *vmid;
+
+ vmid = vmid_context->vmp;
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ ndlp = NULL;
+
+ prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+ pcmd = prsp->virt;
+ data = (u32 *)pcmd;
+ if (data[0] == ELS_CMD_LS_RJT) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+ "4532 UVEM LS_RJT %x %x\n", data[0], data[1]);
+ goto out;
+ }
+ if (irsp->ulpStatus) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+ "4533 UVEM error status %x: %x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ goto out;
+ }
+ spin_lock(&phba->hbalock);
+ /* Set IN USE flag */
+ vport->vmid_flag |= LPFC_VMID_IN_USE;
+ phba->pport->vmid_flag |= LPFC_VMID_IN_USE;
+ spin_unlock(&phba->hbalock);
+
+ if (vmid_context->instantiated) {
+ write_lock(&vport->vmid_lock);
+ vmid->flag |= LPFC_VMID_REGISTERED;
+ vmid->flag &= ~LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ }
+
+ out:
+ kfree(vmid_context);
+ lpfc_els_free_iocb(phba, icmdiocb);
+ lpfc_nlp_put(ndlp);
+}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f5a898c2c904..7cc5920979f8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -72,14 +72,14 @@ static void lpfc_disc_flush_list(struct lpfc_vport *vport);
static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
static int lpfc_fcf_inuse(struct lpfc_hba *);
static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
+static void lpfc_check_inactive_vmid(struct lpfc_hba *phba);
+static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba);
static int
lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp)
{
if (ndlp->nlp_fc4_type ||
- ndlp->nlp_DID == Fabric_DID ||
- ndlp->nlp_DID == NameServer_DID ||
- ndlp->nlp_DID == FDMI_DID)
+ ndlp->nlp_type & NLP_FABRIC)
return 1;
return 0;
}
@@ -237,6 +237,110 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
}
/**
+ * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport
+ * @vport: Pointer to vport context object.
+ *
+ * This function checks for idle VMID entries related to a particular vport. If
+ * found unused/idle, free them accordingly.
+ **/
+static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport)
+{
+ u16 keep;
+ u32 difftime = 0, r, bucket;
+ u64 *lta;
+ int cpu;
+ struct lpfc_vmid *vmp;
+
+ write_lock(&vport->vmid_lock);
+
+ if (!vport->cur_vmid_cnt)
+ goto out;
+
+ /* iterate through the table */
+ hash_for_each(vport->hash_table, bucket, vmp, hnode) {
+ keep = 0;
+ if (vmp->flag & LPFC_VMID_REGISTERED) {
+ /* check if the particular VMID is in use */
+ /* for all available per cpu variable */
+ for_each_possible_cpu(cpu) {
+ /* if last access time is less than timeout */
+ lta = per_cpu_ptr(vmp->last_io_time, cpu);
+ if (!lta)
+ continue;
+ difftime = (jiffies) - (*lta);
+ if ((vport->vmid_inactivity_timeout *
+ JIFFIES_PER_HR) > difftime) {
+ keep = 1;
+ break;
+ }
+ }
+
+ /* if none of the cpus have been used by the vm, */
+ /* remove the entry if already registered */
+ if (!keep) {
+ /* mark the entry for deregistration */
+ vmp->flag = LPFC_VMID_DE_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ if (vport->vmid_priority_tagging)
+ r = lpfc_vmid_uvem(vport, vmp, false);
+ else
+ r = lpfc_vmid_cmd(vport,
+ SLI_CTAS_DAPP_IDENT,
+ vmp);
+
+ /* decrement number of active vms and mark */
+ /* entry in slot as free */
+ write_lock(&vport->vmid_lock);
+ if (!r) {
+ struct lpfc_vmid *ht = vmp;
+
+ vport->cur_vmid_cnt--;
+ ht->flag = LPFC_VMID_SLOT_FREE;
+ free_percpu(ht->last_io_time);
+ ht->last_io_time = NULL;
+ hash_del(&ht->hnode);
+ }
+ }
+ }
+ }
+ out:
+ write_unlock(&vport->vmid_lock);
+}
+
+/**
+ * lpfc_check_inactive_vmid - VMID inactivity checker
+ * @phba: Pointer to hba context object.
+ *
+ * This function is called from the worker thread to determine if an entry in
+ * the VMID table can be released since there was no I/O activity seen from that
+ * particular VM for the specified time. When this happens, the entry in the
+ * table is released and also the resources on the switch cleared.
+ **/
+
+static void lpfc_check_inactive_vmid(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (!vports)
+ return;
+
+ for (i = 0; i <= phba->max_vports; i++) {
+ if ((!vports[i]) && (i == 0))
+ vport = phba->pport;
+ else
+ vport = vports[i];
+ if (!vport)
+ break;
+
+ lpfc_check_inactive_vmid_one(vport);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
* lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
* @ndlp: Pointer to remote node object.
*
@@ -325,6 +429,32 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
return fcf_inuse;
}
+static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (!vports)
+ return;
+
+ for (i = 0; i <= phba->max_vports; i++) {
+ if ((!vports[i]) && (i == 0))
+ vport = phba->pport;
+ else
+ vport = vports[i];
+ if (!vport)
+ break;
+
+ if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) {
+ if (!lpfc_issue_els_qfpa(vport))
+ vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA;
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
/**
* lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
* @phba: Pointer to hba context object.
@@ -645,6 +775,22 @@ lpfc_work_done(struct lpfc_hba *phba)
if (ha_copy & HA_LATT)
lpfc_handle_latt(phba);
+ /* Handle VMID Events */
+ if (lpfc_is_vmid_enabled(phba)) {
+ if (phba->pport->work_port_events &
+ WORKER_CHECK_VMID_ISSUE_QFPA) {
+ lpfc_check_vmid_qfpa_issue(phba);
+ phba->pport->work_port_events &=
+ ~WORKER_CHECK_VMID_ISSUE_QFPA;
+ }
+ if (phba->pport->work_port_events &
+ WORKER_CHECK_INACTIVE_VMID) {
+ lpfc_check_inactive_vmid(phba);
+ phba->pport->work_port_events &=
+ ~WORKER_CHECK_INACTIVE_VMID;
+ }
+ }
+
/* Process SLI4 events */
if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
if (phba->hba_flag & HBA_RRQ_ACTIVE)
@@ -826,7 +972,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
((vport->port_type == LPFC_NPIV_PORT) &&
((ndlp->nlp_DID == NameServer_DID) ||
- (ndlp->nlp_DID == FDMI_DID))))
+ (ndlp->nlp_DID == FDMI_DID) ||
+ (ndlp->nlp_DID == Fabric_Cntl_DID))))
lpfc_unreg_rpi(vport, ndlp);
/* Leave Fabric nodes alone on link down */
@@ -4160,6 +4307,53 @@ out:
return;
}
+/*
+ * This routine handles processing a Fabric Controller REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ pmb->ctx_ndlp = NULL;
+ pmb->ctx_buf = NULL;
+
+ if (mb->mbxStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0933 %s: Register FC login error: 0x%x\n",
+ __func__, mb->mbxStatus);
+ goto out;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n",
+ __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
+ ndlp->nlp_state);
+
+ ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ ndlp->nlp_type |= NLP_FABRIC;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+ out:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Drop the reference count from the mbox at the end after
+ * all the current reference to the ndlp have been done.
+ */
+ lpfc_nlp_put(ndlp);
+}
+
static void
lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
@@ -4789,12 +4983,17 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
} else {
+ /* NLP_RELEASE_RPI is only set for SLI4 ports. */
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ spin_unlock_irq(&ndlp->lock);
}
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ spin_unlock_irq(&ndlp->lock);
}
}
@@ -5129,8 +5328,10 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_del_init(&ndlp->dev_loss_evt.evt_listp);
list_del_init(&ndlp->recovery_evt.evt_listp);
lpfc_cleanup_vports_rrqs(vport, ndlp);
+
if (phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_flag |= NLP_RELEASE_RPI;
+
return 0;
}
@@ -6176,8 +6377,23 @@ lpfc_nlp_release(struct kref *kref)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
lpfc_cleanup_node(vport, ndlp);
- /* Clear Node key fields to give other threads notice
- * that this node memory is not valid anymore.
+ /* Not all ELS transactions have registered the RPI with the port.
+ * In these cases the rpi usage is temporary and the node is
+ * released when the WQE is completed. Catch this case to free the
+ * RPI to the pool. Because this node is in the release path, a lock
+ * is unnecessary. All references are gone and the node has been
+ * dequeued.
+ */
+ if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR &&
+ !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) {
+ lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ }
+ }
+
+ /* The node is not freed back to memory, it is released to a pool so
+ * the node fields need to be cleaned up.
*/
ndlp->vport = NULL;
ndlp->nlp_state = NLP_STE_FREED_NODE;
@@ -6257,6 +6473,7 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
"node not used: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
+
if (kref_read(&ndlp->kref) == 1)
if (lpfc_nlp_put(ndlp))
return 1;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 42682d95af52..4a5a85ed42ec 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -275,6 +275,7 @@ struct lpfc_sli_ct_request {
#define SLI_CT_ACCESS_DENIED 0x10
#define SLI_CT_INVALID_PORT_ID 0x11
#define SLI_CT_DATABASE_EMPTY 0x12
+#define SLI_CT_APP_ID_NOT_AVAILABLE 0x40
/*
* Name Server Command Codes
@@ -400,16 +401,16 @@ struct csp {
uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
uint16_t multicast:1; /* FC Word 1, bit 25 */
- uint16_t broadcast:1; /* FC Word 1, bit 24 */
+ uint16_t app_hdr_support:1; /* FC Word 1, bit 24 */
- uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+ uint16_t priority_tagging:1; /* FC Word 1, bit 23 */
uint16_t simplex:1; /* FC Word 1, bit 22 */
uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
uint16_t dhd:1; /* FC Word 1, bit 18 */
uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
uint16_t payloadlength:1; /* FC Word 1, bit 16 */
#else /* __LITTLE_ENDIAN_BITFIELD */
- uint16_t broadcast:1; /* FC Word 1, bit 24 */
+ uint16_t app_hdr_support:1; /* FC Word 1, bit 24 */
uint16_t multicast:1; /* FC Word 1, bit 25 */
uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
@@ -423,7 +424,7 @@ struct csp {
uint16_t dhd:1; /* FC Word 1, bit 18 */
uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
uint16_t simplex:1; /* FC Word 1, bit 22 */
- uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+ uint16_t priority_tagging:1; /* FC Word 1, bit 23 */
#endif
uint8_t bbRcvSizeMsb; /* Upper nibble is reserved */
@@ -607,6 +608,8 @@ struct fc_vft_header {
#define ELS_CMD_LIRR 0x7A000000
#define ELS_CMD_LCB 0x81000000
#define ELS_CMD_FPIN 0x16000000
+#define ELS_CMD_QFPA 0xB0000000
+#define ELS_CMD_UVEM 0xB1000000
#else /* __LITTLE_ENDIAN_BITFIELD */
#define ELS_CMD_MASK 0xffff
#define ELS_RSP_MASK 0xff
@@ -649,6 +652,8 @@ struct fc_vft_header {
#define ELS_CMD_LIRR 0x7A
#define ELS_CMD_LCB 0x81
#define ELS_CMD_FPIN ELS_FPIN
+#define ELS_CMD_QFPA 0xB0
+#define ELS_CMD_UVEM 0xB1
#endif
/*
@@ -1317,6 +1322,117 @@ struct fc_rdp_res_frame {
};
+/* UVEM */
+
+#define LPFC_UVEM_SIZE 60
+#define LPFC_UVEM_VEM_ID_DESC_SIZE 16
+#define LPFC_UVEM_VE_MAP_DESC_SIZE 20
+
+#define VEM_ID_DESC_TAG 0x0001000A
+struct lpfc_vem_id_desc {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t vem_id[16];
+};
+
+#define LPFC_QFPA_SIZE 4
+
+#define INSTANTIATED_VE_DESC_TAG 0x0001000B
+struct instantiated_ve_desc {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t global_vem_id[16];
+ uint32_t word6;
+#define lpfc_instantiated_local_id_SHIFT 0
+#define lpfc_instantiated_local_id_MASK 0x000000ff
+#define lpfc_instantiated_local_id_WORD word6
+#define lpfc_instantiated_nport_id_SHIFT 8
+#define lpfc_instantiated_nport_id_MASK 0x00ffffff
+#define lpfc_instantiated_nport_id_WORD word6
+};
+
+#define DEINSTANTIATED_VE_DESC_TAG 0x0001000C
+struct deinstantiated_ve_desc {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t global_vem_id[16];
+ uint32_t word6;
+#define lpfc_deinstantiated_nport_id_SHIFT 0
+#define lpfc_deinstantiated_nport_id_MASK 0x000000ff
+#define lpfc_deinstantiated_nport_id_WORD word6
+#define lpfc_deinstantiated_local_id_SHIFT 24
+#define lpfc_deinstantiated_local_id_MASK 0x00ffffff
+#define lpfc_deinstantiated_local_id_WORD word6
+};
+
+/* Query Fabric Priority Allocation Response */
+#define LPFC_PRIORITY_RANGE_DESC_SIZE 12
+
+struct priority_range_desc {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t lo_range;
+ uint8_t hi_range;
+ uint8_t qos_priority;
+ uint8_t local_ve_id;
+};
+
+struct fc_qfpa_res {
+ uint32_t reply_sequence; /* LS_ACC or LS_RJT */
+ uint32_t length; /* FC Word 1 */
+ struct priority_range_desc desc[1];
+};
+
+/* Application Server command code */
+/* VMID */
+
+#define SLI_CT_APP_SEV_Subtypes 0x20 /* Application Server subtype */
+
+#define SLI_CTAS_GAPPIA_ENT 0x0100 /* Get Application Identifier */
+#define SLI_CTAS_GALLAPPIA 0x0101 /* Get All Application Identifier */
+#define SLI_CTAS_GALLAPPIA_ID 0x0102 /* Get All Application Identifier */
+ /* for Nport */
+#define SLI_CTAS_GAPPIA_IDAPP 0x0103 /* Get Application Identifier */
+ /* for Nport */
+#define SLI_CTAS_RAPP_IDENT 0x0200 /* Register Application Identifier */
+#define SLI_CTAS_DAPP_IDENT 0x0300 /* Deregister Application */
+ /* Identifier */
+#define SLI_CTAS_DALLAPP_ID 0x0301 /* Deregister All Application */
+ /* Identifier */
+
+struct entity_id_object {
+ uint8_t entity_id_len;
+ uint8_t entity_id[255]; /* VM UUID */
+};
+
+struct app_id_object {
+ uint32_t port_id;
+ uint32_t app_id;
+ struct entity_id_object obj;
+};
+
+struct lpfc_vmid_rapp_ident_list {
+ uint32_t no_of_objects;
+ struct entity_id_object obj[1];
+};
+
+struct lpfc_vmid_dapp_ident_list {
+ uint32_t no_of_objects;
+ struct entity_id_object obj[1];
+};
+
+#define GALLAPPIA_ID_LAST 0x80
+struct lpfc_vmid_gallapp_ident_list {
+ uint8_t control;
+ uint8_t reserved[3];
+ struct app_id_object app_id;
+};
+
+#define RAPP_IDENT_OFFSET (offsetof(struct lpfc_sli_ct_request, un) + 4)
+#define DAPP_IDENT_OFFSET (offsetof(struct lpfc_sli_ct_request, un) + 4)
+#define GALLAPPIA_ID_SIZE (offsetof(struct lpfc_sli_ct_request, un) + 4)
+#define DALLAPP_ID_SIZE (offsetof(struct lpfc_sli_ct_request, un) + 4)
+
/******** FDMI ********/
/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index f77e71e6dbbd..eb8c735a243b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -273,6 +273,9 @@ struct lpfc_sli4_flags {
#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
#define lpfc_vfi_rsrc_rdy_WORD word0
#define LPFC_VFI_RSRC_RDY 1
+#define lpfc_ftr_ashdr_SHIFT 4
+#define lpfc_ftr_ashdr_MASK 0x00000001
+#define lpfc_ftr_ashdr_WORD word0
};
struct sli4_bls_rsp {
@@ -2944,6 +2947,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rq_mrqp_SHIFT 16
#define lpfc_mbx_rq_ftr_rq_mrqp_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rq_mrqp_WORD word2
+#define lpfc_mbx_rq_ftr_rq_ashdr_SHIFT 17
+#define lpfc_mbx_rq_ftr_rq_ashdr_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_ashdr_WORD word2
uint32_t word3;
#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
@@ -2975,6 +2981,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_mrqp_SHIFT 16
#define lpfc_mbx_rq_ftr_rsp_mrqp_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_ashdr_SHIFT 17
+#define lpfc_mbx_rq_ftr_rsp_ashdr_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_ashdr_WORD word3
};
struct lpfc_mbx_memory_dump_type3 {
@@ -4219,6 +4228,9 @@ struct wqe_common {
#define wqe_xchg_WORD word10
#define LPFC_SCSI_XCHG 0x0
#define LPFC_NVME_XCHG 0x1
+#define wqe_appid_SHIFT 5
+#define wqe_appid_MASK 0x00000001
+#define wqe_appid_WORD word10
#define wqe_oas_SHIFT 6
#define wqe_oas_MASK 0x00000001
#define wqe_oas_WORD word10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 5f018d02bf56..e29523a1b530 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -98,6 +98,7 @@ static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
static DEFINE_IDR(lpfc_hba_index);
#define LPFC_NVMET_BUF_POST 254
+static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
/**
* lpfc_config_port_prep - Perform lpfc initialization prior to config port
@@ -2888,6 +2889,10 @@ lpfc_cleanup(struct lpfc_vport *vport)
if (phba->link_state > LPFC_LINK_DOWN)
lpfc_port_link_failure(vport);
+ /* Clean up VMID resources */
+ if (lpfc_is_vmid_enabled(phba))
+ lpfc_vmid_vport_cleanup(vport);
+
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (vport->port_type != LPFC_PHYSICAL_PORT &&
ndlp->nlp_DID == Fabric_DID) {
@@ -3532,13 +3537,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
- /* Driver must assume RPI is invalid for
- * any unused or inactive node.
- */
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- continue;
- }
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
@@ -4315,6 +4313,55 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
}
/**
+ * lpfc_vmid_res_alloc - Allocates resources for VMID
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to vport data structure
+ *
+ * This routine allocated the resources needed for the VMID.
+ *
+ * Return codes
+ * 0 on Success
+ * Non-0 on Failure
+ */
+static int
+lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ /* VMID feature is supported only on SLI4 */
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ phba->cfg_vmid_app_header = 0;
+ phba->cfg_vmid_priority_tagging = 0;
+ }
+
+ if (lpfc_is_vmid_enabled(phba)) {
+ vport->vmid =
+ kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
+ GFP_KERNEL);
+ if (!vport->vmid)
+ return -ENOMEM;
+
+ rwlock_init(&vport->vmid_lock);
+
+ /* Set the VMID parameters for the vport */
+ vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
+ vport->vmid_inactivity_timeout =
+ phba->cfg_vmid_inactivity_timeout;
+ vport->max_vmid = phba->cfg_max_vmid;
+ vport->cur_vmid_cnt = 0;
+
+ vport->vmid_priority_range = bitmap_zalloc
+ (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
+
+ if (!vport->vmid_priority_range) {
+ kfree(vport->vmid);
+ return -ENOMEM;
+ }
+
+ hash_init(vport->hash_table);
+ }
+ return 0;
+}
+
+/**
* lpfc_create_port - Create an FC port
* @phba: pointer to lpfc hba data structure.
* @instance: a unique integer ID to this FC port.
@@ -4466,6 +4513,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
vport->port_type, shost->sg_tablesize,
phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
+ /* Allocate the resources for VMID */
+ rc = lpfc_vmid_res_alloc(phba, vport);
+
+ if (rc)
+ goto out;
+
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
INIT_LIST_HEAD(&vport->rcv_buffer_list);
@@ -4490,6 +4543,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
return vport;
out_put_shost:
+ kfree(vport->vmid);
+ bitmap_free(vport->vmid_priority_range);
scsi_host_put(shost);
out:
return NULL;
@@ -4789,6 +4844,42 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
}
/**
+ * lpfc_vmid_poll - VMID timeout detection
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine is invoked when there is no I/O on by a VM for the specified
+ * amount of time. When this situation is detected, the VMID has to be
+ * deregistered from the switch and all the local resources freed. The VMID
+ * will be reassigned to the VM once the I/O begins.
+ **/
+static void
+lpfc_vmid_poll(struct timer_list *t)
+{
+ struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
+ u32 wake_up = 0;
+
+ /* check if there is a need to issue QFPA */
+ if (phba->pport->vmid_priority_tagging) {
+ wake_up = 1;
+ phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
+ }
+
+ /* Is the vmid inactivity timer enabled */
+ if (phba->pport->vmid_inactivity_timeout ||
+ phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
+ wake_up = 1;
+ phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
+ }
+
+ if (wake_up)
+ lpfc_worker_wake_up(phba);
+
+ /* restart the timer for the next iteration */
+ mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
+ LPFC_VMID_TIMER));
+}
+
+/**
* lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
* @phba: pointer to lpfc hba data structure.
* @acqe_link: pointer to the async link completion queue entry.
@@ -6636,6 +6727,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
+ /* for VMID idle timeout if VMID is enabled */
+ if (lpfc_is_vmid_enabled(phba))
+ timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
+
/*
* Initialize the SLI Layer to run with lpfc SLI4 HBAs.
*/
@@ -13051,7 +13146,7 @@ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
if (fw_upgrade == INT_FW_UPGRADE) {
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
file_name, &phba->pcidev->dev,
GFP_KERNEL, (void *)phba,
lpfc_write_firmware);
@@ -13098,6 +13193,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
if (!phba)
return -ENOMEM;
+ INIT_LIST_HEAD(&phba->poll_list);
+
/* Perform generic PCI device enabling operation */
error = lpfc_enable_pci_dev(phba);
if (error)
@@ -13232,7 +13329,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Enable RAS FW log support */
lpfc_sli4_ras_setup(phba);
- INIT_LIST_HEAD(&phba->poll_list);
timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 1b40a3bbd1cd..84bc373190d8 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -522,7 +522,8 @@ lpfc_init_link(struct lpfc_hba * phba,
}
/* Enable asynchronous ABTS responses from firmware */
- mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
+ if (phba->sli_rev == LPFC_SLI_REV3 && !phba->cfg_fcp_wait_abts_rsp)
+ mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
/* NEW_FEATURE
* Setting up the link speed
@@ -2100,6 +2101,12 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
}
+
+ /* Enable Application Services Header for appheader VMID */
+ if (phba->cfg_vmid_app_header) {
+ bf_set(lpfc_mbx_rq_ftr_rq_ashdr, &mboxq->u.mqe.un.req_ftrs, 1);
+ bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 1);
+ }
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bb4e65a32ecc..e12f83fb795c 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -567,15 +567,24 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* no deferred ACC */
kfree(save_iocb);
- /* In order to preserve RPIs, we want to cleanup
- * the default RPI the firmware created to rcv
- * this ELS request. The only way to do this is
- * to register, then unregister the RPI.
+ /* This is an NPIV SLI4 instance that does not need to register
+ * a default RPI.
*/
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
- NLP_RCV_PLOGI);
- spin_unlock_irq(&ndlp->lock);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ login_mbox = NULL;
+ } else {
+ /* In order to preserve RPIs, we want to cleanup
+ * the default RPI the firmware created to rcv
+ * this ELS request. The only way to do this is
+ * to register, then unregister the RPI.
+ */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
+ NLP_RCV_PLOGI);
+ spin_unlock_irq(&ndlp->lock);
+ }
+
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
@@ -653,6 +662,10 @@ lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
ndlp, NULL);
}
+
+ /* This nlp_put pairs with lpfc_sli4_resume_rpi */
+ lpfc_nlp_put(ndlp);
+
kfree(elsiocb);
mempool_free(mboxq, phba->mbox_mem_pool);
}
@@ -772,6 +785,15 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ /* This clause allows the initiator to ACC the LOGO back to the
+ * Fabric Domain Controller. It does deliberately skip all other
+ * steps because some fabrics send RDP requests after logging out
+ * from the initiator.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC &&
+ ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
+ return 0;
+
/* Notify transport of connectivity loss to trigger cleanup. */
if (phba->nvmet_support &&
ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
@@ -1410,6 +1432,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
switch (ndlp->nlp_DID) {
case NameServer_DID:
mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
+ /* Fabric Controller Node needs these parameters. */
+ memcpy(&ndlp->fc_sparam, sp, sizeof(struct serv_parm));
break;
case FDMI_DID:
mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 41e49f61fac2..bcc804cefd30 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1049,9 +1049,19 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd->transferred_length = wcqe->total_data_placed;
nCmd->rcv_rsplen = wcqe->parameter;
nCmd->status = 0;
- /* Sanity check */
- if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
+
+ /* Check if this is really an ERSP */
+ if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
+ lpfc_ncmd->result = 0;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
+ "6084 NVME Completion ERSP: "
+ "xri %x placed x%x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ wcqe->total_data_placed);
break;
+ }
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6081 NVME Completion Protocol Error: "
"xri %x status x%x result x%x "
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index eefbb9b22798..1b248c237be1 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -28,6 +28,7 @@
#include <asm/unaligned.h>
#include <linux/t10-pi.h>
#include <linux/crc-t10dif.h>
+#include <linux/blk-cgroup.h>
#include <net/checksum.h>
#include <scsi/scsi.h>
@@ -86,6 +87,14 @@ static void
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
+static void
+lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
+ struct lpfc_vmid *vmp);
+static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
+ *cmd, struct lpfc_vmid *vmp,
+ union lpfc_vmid_io_tag *tag);
+static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
+ struct lpfc_vmid *vmid);
static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd *sc)
@@ -518,6 +527,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp;
int rrq_empty = 0;
struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
+ struct scsi_cmnd *cmd;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return;
@@ -553,6 +563,31 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
psb->cur_iocbq.sli4_lxritag, rxid, 1);
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
}
+
+ if (phba->cfg_fcp_wait_abts_rsp) {
+ spin_lock_irqsave(&psb->buf_lock, iflag);
+ cmd = psb->pCmd;
+ psb->pCmd = NULL;
+ spin_unlock_irqrestore(&psb->buf_lock, iflag);
+
+ /* The sdev is not guaranteed to be valid post
+ * scsi_done upcall.
+ */
+ if (cmd)
+ cmd->scsi_done(cmd);
+
+ /*
+ * We expect there is an abort thread waiting
+ * for command completion wake up the thread.
+ */
+ spin_lock_irqsave(&psb->buf_lock, iflag);
+ psb->cur_iocbq.iocb_flag &=
+ ~LPFC_DRIVER_ABORTED;
+ if (psb->waitq)
+ wake_up(psb->waitq);
+ spin_unlock_irqrestore(&psb->buf_lock, iflag);
+ }
+
lpfc_release_scsi_buf_s4(phba, psb);
if (rrq_empty)
lpfc_worker_wake_up(phba);
@@ -780,7 +815,8 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
qp = psb->hdwq;
if (psb->flags & LPFC_SBUF_XBUSY) {
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
- psb->pCmd = NULL;
+ if (!phba->cfg_fcp_wait_abts_rsp)
+ psb->pCmd = NULL;
list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
qp->abts_scsi_io_bufs++;
spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
@@ -2869,10 +2905,8 @@ skipit:
}
out:
if (err_type == BGS_GUARD_ERR_MASK) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
@@ -2880,10 +2914,8 @@ out:
sum, guard_tag);
} else if (err_type == BGS_REFTAG_ERR_MASK) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -2892,10 +2924,8 @@ out:
ref_tag, start_ref_tag);
} else if (err_type == BGS_APPTAG_ERR_MASK) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -2954,10 +2984,8 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_guard_err(bgstat)) {
ret = 1;
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9059 BLKGRD: Guard Tag error in cmd"
@@ -2970,10 +2998,8 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_reftag_err(bgstat)) {
ret = 1;
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -2987,10 +3013,8 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_apptag_err(bgstat)) {
ret = 1;
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3100,10 +3124,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_guard_err(bgstat)) {
ret = 1;
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9055 BLKGRD: Guard Tag error in cmd "
@@ -3116,10 +3138,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_reftag_err(bgstat)) {
ret = 1;
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3133,10 +3153,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_apptag_err(bgstat)) {
ret = 1;
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -4045,6 +4063,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
u32 logit = LOG_FCP;
u32 status, idx;
unsigned long iflags = 0;
+ u8 wait_xb_clr = 0;
/* Sanity check on return of outstanding command */
if (!lpfc_cmd) {
@@ -4096,8 +4115,11 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ if (phba->cfg_fcp_wait_abts_rsp)
+ wait_xb_clr = 1;
+ }
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->prot_data_type) {
@@ -4329,6 +4351,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_io_ktime(phba, lpfc_cmd);
}
#endif
+ if (wait_xb_clr)
+ goto out;
lpfc_cmd->pCmd = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
@@ -4343,8 +4367,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
+out:
spin_unlock(&lpfc_cmd->buf_lock);
-
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
@@ -4398,11 +4422,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
- /* pick up SLI4 exhange busy status from HBA */
+ /* pick up SLI4 exchange busy status from HBA */
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
- else
- lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->prot_data_type) {
@@ -4601,6 +4624,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_io_ktime(phba, lpfc_cmd);
}
#endif
+
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd);
@@ -5145,6 +5169,269 @@ void lpfc_poll_timeout(struct timer_list *t)
}
}
+/*
+ * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash: calculated hash value
+ * @buf: uuid associated with the VE
+ * Return the VMID entry associated with the UUID
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
+ u32 hash, u8 *buf)
+{
+ struct lpfc_vmid *vmp;
+
+ hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
+ if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
+ return vmp;
+ }
+ return NULL;
+}
+
+/*
+ * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash - calculated hash value
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ *
+ * This routine will insert the newly acquired VMID entity in the hash table.
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+static void
+lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
+ struct lpfc_vmid *vmp)
+{
+ hash_add(vport->hash_table, &vmp->hnode, hash);
+}
+
+/*
+ * lpfc_vmid_hash_fn - create a hash value of the UUID
+ * @vmid: uuid associated with the VE
+ * @len: length of the VMID string
+ * Returns the calculated hash value
+ */
+int lpfc_vmid_hash_fn(const char *vmid, int len)
+{
+ int c;
+ int hash = 0;
+
+ if (len == 0)
+ return 0;
+ while (len--) {
+ c = *vmid++;
+ if (c >= 'A' && c <= 'Z')
+ c += 'a' - 'A';
+
+ hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
+ (c >> LPFC_VMID_HASH_SHIFT)) * 19;
+ }
+
+ return hash & LPFC_VMID_HASH_MASK;
+}
+
+/*
+ * lpfc_vmid_update_entry - update the vmid entry in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @cmd: address of scsi cmd descriptor
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ * @tag: VMID tag
+ */
+static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
+ *cmd, struct lpfc_vmid *vmp,
+ union lpfc_vmid_io_tag *tag)
+{
+ u64 *lta;
+
+ if (vport->vmid_priority_tagging)
+ tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
+ else
+ tag->app_id = vmp->un.app_id;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ vmp->io_wr_cnt++;
+ else
+ vmp->io_rd_cnt++;
+
+ /* update the last access timestamp in the table */
+ lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
+ *lta = jiffies;
+}
+
+static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
+ struct lpfc_vmid *vmid)
+{
+ u32 hash;
+ struct lpfc_vmid *pvmid;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ } else {
+ hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
+ pvmid =
+ lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
+ vmid->host_vmid);
+ if (pvmid)
+ vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
+ else
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ }
+}
+
+/*
+ * lpfc_vmid_get_appid - get the VMID associated with the UUID
+ * @vport: The virtual port for which this call is being executed.
+ * @uuid: UUID associated with the VE
+ * @cmd: address of scsi_cmd descriptor
+ * @tag: VMID tag
+ * Returns status of the function
+ */
+static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
+ scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
+{
+ struct lpfc_vmid *vmp = NULL;
+ int hash, len, rc, i;
+
+ /* check if QFPA is complete */
+ if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag &
+ LPFC_VMID_QFPA_CMPL)) {
+ vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
+ return -EAGAIN;
+ }
+
+ /* search if the UUID has already been mapped to the VMID */
+ len = strlen(uuid);
+ hash = lpfc_vmid_hash_fn(uuid, len);
+
+ /* search for the VMID in the table */
+ read_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* if found, check if its already registered */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ read_unlock(&vport->vmid_lock);
+ lpfc_vmid_update_entry(vport, cmd, vmp, tag);
+ rc = 0;
+ } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
+ vmp->flag & LPFC_VMID_DE_REGISTER)) {
+ /* else if register or dereg request has already been sent */
+ /* Hence VMID tag will not be added for this I/O */
+ read_unlock(&vport->vmid_lock);
+ rc = -EBUSY;
+ } else {
+ /* The VMID was not found in the hashtable. At this point, */
+ /* drop the read lock first before proceeding further */
+ read_unlock(&vport->vmid_lock);
+ /* start the process to obtain one as per the */
+ /* type of the VMID indicated */
+ write_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* while the read lock was released, in case the entry was */
+ /* added by other context or is in process of being added */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ lpfc_vmid_update_entry(vport, cmd, vmp, tag);
+ write_unlock(&vport->vmid_lock);
+ return 0;
+ } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
+ write_unlock(&vport->vmid_lock);
+ return -EBUSY;
+ }
+
+ /* else search and allocate a free slot in the hash table */
+ if (vport->cur_vmid_cnt < vport->max_vmid) {
+ for (i = 0; i < vport->max_vmid; i++) {
+ vmp = vport->vmid + i;
+ if (vmp->flag == LPFC_VMID_SLOT_FREE)
+ break;
+ }
+ if (i == vport->max_vmid)
+ vmp = NULL;
+ } else {
+ vmp = NULL;
+ }
+
+ if (!vmp) {
+ write_unlock(&vport->vmid_lock);
+ return -ENOMEM;
+ }
+
+ /* Add the vmid and register */
+ lpfc_put_vmid_in_hashtable(vport, hash, vmp);
+ vmp->vmid_len = len;
+ memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
+ vmp->io_rd_cnt = 0;
+ vmp->io_wr_cnt = 0;
+ vmp->flag = LPFC_VMID_SLOT_USED;
+
+ vmp->delete_inactive =
+ vport->vmid_inactivity_timeout ? 1 : 0;
+
+ /* if type priority tag, get next available VMID */
+ if (lpfc_vmid_is_type_priority_tag(vport))
+ lpfc_vmid_assign_cs_ctl(vport, vmp);
+
+ /* allocate the per cpu variable for holding */
+ /* the last access time stamp only if VMID is enabled */
+ if (!vmp->last_io_time)
+ vmp->last_io_time = __alloc_percpu(sizeof(u64),
+ __alignof__(struct
+ lpfc_vmid));
+ if (!vmp->last_io_time) {
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ write_unlock(&vport->vmid_lock);
+
+ /* complete transaction with switch */
+ if (lpfc_vmid_is_type_priority_tag(vport))
+ rc = lpfc_vmid_uvem(vport, vmp, true);
+ else
+ rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
+ if (!rc) {
+ write_lock(&vport->vmid_lock);
+ vport->cur_vmid_cnt++;
+ vmp->flag |= LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ } else {
+ write_lock(&vport->vmid_lock);
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ free_percpu(vmp->last_io_time);
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ /* finally, enable the idle timer once */
+ if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
+ mod_timer(&vport->phba->inactive_vmid_poll,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
+ vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
+ }
+ }
+ return rc;
+}
+
+/*
+ * lpfc_is_command_vm_io - get the UUID from blk cgroup
+ * @cmd: Pointer to scsi_cmnd data structure
+ * Returns UUID if present, otherwise NULL
+ */
+static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
+{
+ char *uuid = NULL;
+
+ if (cmd->request) {
+ if (cmd->request->bio)
+ uuid = blkcg_get_fc_appid(cmd->request->bio);
+ }
+ return uuid;
+}
+
/**
* lpfc_queuecommand - scsi_host_template queuecommand entry point
* @shost: kernel scsi host pointer.
@@ -5168,6 +5455,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct lpfc_io_buf *lpfc_cmd;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err, idx;
+ u8 *uuid = NULL;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0L;
@@ -5297,6 +5585,25 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
+ /* check the necessary and sufficient condition to support VMID */
+ if (lpfc_is_vmid_enabled(phba) &&
+ (ndlp->vmid_support ||
+ phba->pport->vmid_priority_tagging ==
+ LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
+ /* is the I/O generated by a VM, get the associated virtual */
+ /* entity id */
+ uuid = lpfc_is_command_vm_io(cmnd);
+
+ if (uuid) {
+ err = lpfc_vmid_get_appid(vport, uuid, cmnd,
+ (union lpfc_vmid_io_tag *)
+ &lpfc_cmd->cur_iocbq.vmid_tag);
+ if (!err)
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID;
+ }
+ }
+
+ atomic_inc(&ndlp->cmd_pending);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
@@ -5384,6 +5691,31 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
return 0;
}
+/*
+ * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport
+ * @vport: The virtual port for which this call is being executed.
+ */
+void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
+{
+ u32 bucket;
+ struct lpfc_vmid *cur;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ del_timer_sync(&vport->phba->inactive_vmid_poll);
+
+ kfree(vport->qfpa_res);
+ kfree(vport->vmid_priority.vmid_range);
+ kfree(vport->vmid);
+
+ if (!hash_empty(vport->hash_table))
+ hash_for_each(vport->hash_table, bucket, cur, hnode)
+ hash_del(&cur->hnode);
+
+ vport->qfpa_res = NULL;
+ vport->vmid_priority.vmid_range = NULL;
+ vport->vmid = NULL;
+ vport->cur_vmid_cnt = 0;
+}
/**
* lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fc3682f15f50..f530d8fe7a8c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2679,6 +2679,12 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
}
+ /* This nlp_put pairs with lpfc_sli4_resume_rpi */
+ if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ lpfc_nlp_put(ndlp);
+ }
+
/* Check security permission status on INIT_LINK mailbox command */
if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
(pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
@@ -2749,7 +2755,6 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
__lpfc_sli_rpi_release(vport, ndlp);
}
-
lpfc_nlp_put(ndlp);
}
}
@@ -7694,6 +7699,15 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
+ /* Disable VMID if app header is not supported */
+ if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
+ &mqe->un.req_ftrs))) {
+ bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
+ phba->cfg_vmid_app_header = 0;
+ lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
+ "1242 vmid feature not supported\n");
+ }
+
/*
* The port must support FCP initiator mode as this is the
* only mode running in the host.
@@ -7959,7 +7973,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"0393 Error %d during rpi post operation\n",
rc);
rc = -ENODEV;
- goto out_destroy_queue;
+ goto out_free_iocblist;
}
lpfc_sli4_node_prep(phba);
@@ -8125,8 +8139,9 @@ out_io_buff_free:
out_unset_queue:
/* Unset all the queues set up in this routine when error out */
lpfc_sli4_queue_unset(phba);
-out_destroy_queue:
+out_free_iocblist:
lpfc_free_iocb_list(phba);
+out_destroy_queue:
lpfc_sli4_queue_destroy(phba);
out_stop_timers:
lpfc_stop_hba_timers(phba);
@@ -9751,6 +9766,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
*pcmd == ELS_CMD_RSCN_XMT ||
*pcmd == ELS_CMD_FDISC ||
*pcmd == ELS_CMD_LOGO ||
+ *pcmd == ELS_CMD_QFPA ||
+ *pcmd == ELS_CMD_UVEM ||
*pcmd == ELS_CMD_PLOGI)) {
bf_set(els_req64_sp, &wqe->els_req, 1);
bf_set(els_req64_sid, &wqe->els_req,
@@ -10313,6 +10330,18 @@ __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
}
+ /* add the VMID tags as per switch response */
+ if (unlikely(piocb->iocb_flag & LPFC_IO_VMID)) {
+ if (phba->pport->vmid_priority_tagging) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ (piocb->vmid_tag.cs_ctl_vmid));
+ } else {
+ bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+ wqe->words[31] = piocb->vmid_tag.app_id;
+ }
+ }
rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
return rc;
}
@@ -13625,9 +13654,15 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- /* Reg_LOGIN of dflt RPI was successful. Now lets get
- * RID of the PPI using the same mbox buffer.
+
+ /* Reg_LOGIN of dflt RPI was successful. Mark the
+ * node as having an UNREG_LOGIN in progress to stop
+ * an unsolicited PLOGI from the same NPortId from
+ * starting another mailbox transaction.
*/
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ ndlp->nlp_flag |= NLP_UNREG_INP;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
lpfc_unreg_login(phba, vport->vpi,
pmbox->un.varWords[0], pmb);
pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
@@ -17943,7 +17978,6 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
seq_dmabuf->time_stamp = jiffies;
lpfc_update_rcv_time_stamp(vport);
if (list_empty(&seq_dmabuf->dbuf.list)) {
- temp_hdr = dmabuf->hbuf.virt;
list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
return seq_dmabuf;
}
@@ -19032,14 +19066,28 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
if (!mboxq)
return -ENOMEM;
+ /* If cmpl assigned, then this nlp_get pairs with
+ * lpfc_mbx_cmpl_resume_rpi.
+ *
+ * Else cmpl is NULL, then this nlp_get pairs with
+ * lpfc_sli_def_mbox_cmpl.
+ */
+ if (!lpfc_nlp_get(ndlp)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2122 %s: Failed to get nlp ref\n",
+ __func__);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
/* Post all rpi memory regions to the port. */
lpfc_resume_rpi(mboxq, ndlp);
if (cmpl) {
mboxq->mbox_cmpl = cmpl;
mboxq->ctx_buf = arg;
- mboxq->ctx_ndlp = ndlp;
} else
mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mboxq->ctx_ndlp = ndlp;
mboxq->vport = ndlp->vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
@@ -19047,6 +19095,7 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
"2010 Resume RPI Mailbox failed "
"status %d, mbxStatus x%x\n", rc,
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ lpfc_nlp_put(ndlp);
mempool_free(mboxq, phba->mbox_mem_pool);
return -EIO;
}
@@ -20136,8 +20185,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
(mb->u.mb.mbxCommand != MBX_REG_VPI))
continue;
- list_del(&mb->list);
- list_add_tail(&mb->list, &mbox_cmd_list);
+ list_move_tail(&mb->list, &mbox_cmd_list);
}
/* Clean up active mailbox command with the vport */
mb = phba->sli.mbox_active;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 4f6936014ff5..dde8eb9d796d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -35,6 +35,12 @@ typedef enum _lpfc_ctx_cmd {
LPFC_CTX_HOST
} lpfc_ctx_cmd;
+union lpfc_vmid_iocb_tag {
+ uint32_t app_id;
+ uint8_t cs_ctl_vmid;
+ struct lpfc_vmid_context *vmid_context; /* UVEM context information */
+};
+
struct lpfc_cq_event {
struct list_head list;
uint16_t hdwq;
@@ -100,12 +106,14 @@ struct lpfc_iocbq {
#define LPFC_IO_NVME 0x200000 /* NVME FCP command */
#define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */
#define LPFC_IO_NVMET 0x800000 /* NVMET command */
+#define LPFC_IO_VMID 0x1000000 /* VMID tagged IO */
uint32_t drvrTimeout; /* driver timeout in seconds */
struct lpfc_vport *vport;/* virtual port pointer */
void *context1; /* caller context information */
void *context2; /* caller context information */
void *context3; /* caller context information */
+ uint32_t event_tag; /* LA Event tag */
union {
wait_queue_head_t *wait_queue;
struct lpfc_iocbq *rsp_iocb;
@@ -114,6 +122,7 @@ struct lpfc_iocbq {
struct lpfc_node_rrq *rrq;
} context_un;
+ union lpfc_vmid_iocb_tag vmid_tag;
void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4b8e89375644..2d62fd2a9824 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.9"
+#define LPFC_DRIVER_VERSION "12.8.0.10"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 80f546976c7e..56910e94dbf2 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1583,9 +1583,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
memcpy(cmd->sense_buffer, pthru->reqsensearea,
14);
- cmd->result = (DRIVER_SENSE << 24) |
- (DID_OK << 16) |
- (CHECK_CONDITION << 1);
+ cmd->result = SAM_STAT_CHECK_CONDITION;
}
else {
if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) {
@@ -1593,14 +1591,10 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
memcpy(cmd->sense_buffer,
epthru->reqsensearea, 14);
- cmd->result = (DRIVER_SENSE << 24) |
- (DID_OK << 16) |
- (CHECK_CONDITION << 1);
- } else {
- cmd->sense_buffer[0] = 0x70;
- cmd->sense_buffer[2] = ABORTED_COMMAND;
- cmd->result |= (CHECK_CONDITION << 1);
- }
+ cmd->result = SAM_STAT_CHECK_CONDITION;
+ } else
+ scsi_build_sense(cmd, 0,
+ ABORTED_COMMAND, 0, 0);
}
break;
@@ -1617,7 +1611,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
*/
if( cmd->cmnd[0] == TEST_UNIT_READY ) {
cmd->result |= (DID_ERROR << 16) |
- (RESERVATION_CONFLICT << 1);
+ SAM_STAT_RESERVATION_CONFLICT;
}
else
/*
@@ -1629,7 +1623,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
cmd->cmnd[0] == RELEASE) ) {
cmd->result |= (DID_ERROR << 16) |
- (RESERVATION_CONFLICT << 1);
+ SAM_STAT_RESERVATION_CONFLICT;
}
else
#endif
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 145fde302d7d..d20c2e4ee793 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -121,8 +121,8 @@ static irqreturn_t megaraid_isr(int, void *);
static void megaraid_mbox_dpc(unsigned long);
-static ssize_t megaraid_sysfs_show_app_hndl(struct device *, struct device_attribute *attr, char *);
-static ssize_t megaraid_sysfs_show_ldnum(struct device *, struct device_attribute *attr, char *);
+static ssize_t megaraid_mbox_app_hndl_show(struct device *, struct device_attribute *attr, char *);
+static ssize_t megaraid_mbox_ld_show(struct device *, struct device_attribute *attr, char *);
static int megaraid_cmm_register(adapter_t *);
static int megaraid_cmm_unregister(adapter_t *);
@@ -302,8 +302,7 @@ static struct pci_driver megaraid_pci_driver = {
// definitions for the device attributes for exporting logical drive number
// for a scsi address (Host, Channel, Id, Lun)
-static DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
- NULL);
+static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_app_hndl);
// Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_shost_attrs[] = {
@@ -312,7 +311,7 @@ static struct device_attribute *megaraid_shost_attrs[] = {
};
-static DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
+static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_ld);
// Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_sdev_attrs[] = {
@@ -1574,10 +1573,8 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
}
if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
- scp->sense_buffer[0] = 0x70;
- scp->sense_buffer[2] = ILLEGAL_REQUEST;
- scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
- scp->result = CHECK_CONDITION << 1;
+ scsi_build_sense(scp, 0, ILLEGAL_REQUEST,
+ MEGA_INVALID_FIELD_IN_CDB, 0);
return NULL;
}
@@ -2301,8 +2298,7 @@ megaraid_mbox_dpc(unsigned long devp)
memcpy(scp->sense_buffer, pthru->reqsensearea,
14);
- scp->result = DRIVER_SENSE << 24 |
- DID_OK << 16 | CHECK_CONDITION << 1;
+ scp->result = SAM_STAT_CHECK_CONDITION;
}
else {
if (mbox->cmd == MBOXCMD_EXTPTHRU) {
@@ -2310,14 +2306,10 @@ megaraid_mbox_dpc(unsigned long devp)
memcpy(scp->sense_buffer,
epthru->reqsensearea, 14);
- scp->result = DRIVER_SENSE << 24 |
- DID_OK << 16 |
- CHECK_CONDITION << 1;
- } else {
- scp->sense_buffer[0] = 0x70;
- scp->sense_buffer[2] = ABORTED_COMMAND;
- scp->result = CHECK_CONDITION << 1;
- }
+ scp->result = SAM_STAT_CHECK_CONDITION;
+ } else
+ scsi_build_sense(scp, 0,
+ ABORTED_COMMAND, 0, 0);
}
break;
@@ -2334,7 +2326,7 @@ megaraid_mbox_dpc(unsigned long devp)
*/
if (scp->cmnd[0] == TEST_UNIT_READY) {
scp->result = DID_ERROR << 16 |
- RESERVATION_CONFLICT << 1;
+ SAM_STAT_RESERVATION_CONFLICT;
}
else
/*
@@ -2345,7 +2337,7 @@ megaraid_mbox_dpc(unsigned long devp)
scp->cmnd[0] == RELEASE)) {
scp->result = DID_ERROR << 16 |
- RESERVATION_CONFLICT << 1;
+ SAM_STAT_RESERVATION_CONFLICT;
}
else {
scp->result = DID_BAD_TARGET << 16 | status;
@@ -3240,8 +3232,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
int i;
uint32_t dword;
- mbox = (mbox_t *)raw_mbox;
-
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = 0xFF;
@@ -3970,7 +3960,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
/**
- * megaraid_sysfs_show_app_hndl - display application handle for this adapter
+ * megaraid_mbox_app_hndl_show - display application handle for this adapter
* @dev : class device object representation for the host
* @attr : device attribute (unused)
* @buf : buffer to send data to
@@ -3980,8 +3970,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
* handle, since we do not interface with applications directly.
*/
static ssize_t
-megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
- char *buf)
+megaraid_mbox_app_hndl_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(shost);
@@ -3994,7 +3983,7 @@ megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
/**
- * megaraid_sysfs_show_ldnum - display the logical drive number for this device
+ * megaraid_mbox_ld_show - display the logical drive number for this device
* @dev : device object representation for the scsi device
* @attr : device attribute to show
* @buf : buffer to send data to
@@ -4009,7 +3998,7 @@ megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
* <int> <int> <int> <int>
*/
static ssize_t
-megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf)
+megaraid_mbox_ld_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host);
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index abf7b401f5b9..c509440bd161 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -238,7 +238,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
mimd_t mimd;
uint32_t adapno;
int iterator;
-
+ bool is_found;
if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
*rval = -EFAULT;
@@ -254,12 +254,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
adapter = NULL;
iterator = 0;
+ is_found = false;
list_for_each_entry(adapter, &adapters_list_g, list) {
- if (iterator++ == adapno) break;
+ if (iterator++ == adapno) {
+ is_found = true;
+ break;
+ }
}
- if (!adapter) {
+ if (!is_found) {
*rval = -ENODEV;
return NULL;
}
@@ -725,6 +729,7 @@ ioctl_done(uioc_t *kioc)
uint32_t adapno;
int iterator;
mraid_mmadp_t* adapter;
+ bool is_found;
/*
* When the kioc returns from driver, make sure it still doesn't
@@ -747,19 +752,23 @@ ioctl_done(uioc_t *kioc)
iterator = 0;
adapter = NULL;
adapno = kioc->adapno;
+ is_found = false;
con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
"ioctl that was timedout before\n"));
list_for_each_entry(adapter, &adapters_list_g, list) {
- if (iterator++ == adapno) break;
+ if (iterator++ == adapno) {
+ is_found = true;
+ break;
+ }
}
kioc->timedout = 0;
- if (adapter) {
+ if (is_found)
mraid_mm_dealloc_kioc( adapter, kioc );
- }
+
}
else {
wake_up(&wait_q);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index b5a765b73c76..7af2c23652b0 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -21,8 +21,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.714.04.00-rc1"
-#define MEGASAS_RELDATE "Apr 14, 2020"
+#define MEGASAS_VERSION "07.717.02.00-rc1"
+#define MEGASAS_RELDATE "May 19, 2021"
#define MEGASAS_MSIX_NAME_LEN 32
@@ -2262,6 +2262,15 @@ enum MR_PERF_MODE {
(mode) == MR_LATENCY_PERF_MODE ? "Latency" : \
"Unknown")
+enum MEGASAS_LD_TARGET_ID_STATUS {
+ LD_TARGET_ID_INITIAL,
+ LD_TARGET_ID_ACTIVE,
+ LD_TARGET_ID_DELETED,
+};
+
+#define MEGASAS_TARGET_ID(sdev) \
+ (((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id)
+
struct megasas_instance {
unsigned int *reply_map;
@@ -2326,6 +2335,9 @@ struct megasas_instance {
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD];
u8 ld_ids[MEGASAS_MAX_LD_IDS];
+ u8 ld_tgtid_status[MEGASAS_MAX_LD_IDS];
+ u8 ld_ids_prev[MEGASAS_MAX_LD_IDS];
+ u8 ld_ids_from_raidmap[MEGASAS_MAX_LD_IDS];
s8 init_id;
u16 max_num_sge;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 4d4e9dbe5193..ec10b2497310 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -141,6 +141,8 @@ static int megasas_register_aen(struct megasas_instance *instance,
u32 seq_num, u32 class_locale_word);
static void megasas_get_pd_info(struct megasas_instance *instance,
struct scsi_device *sdev);
+static void
+megasas_set_ld_removed_by_fw(struct megasas_instance *instance);
/*
* PCI ID table for all supported controllers
@@ -213,7 +215,7 @@ static bool support_nvme_encapsulation;
static bool support_pci_lane_margining;
/* define lock for aen poll */
-static spinlock_t poll_aen_lock;
+static DEFINE_SPINLOCK(poll_aen_lock);
extern struct dentry *megasas_debugfs_root;
extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
@@ -436,6 +438,12 @@ megasas_decode_evt(struct megasas_instance *instance)
(class_locale.members.locale),
format_class(class_locale.members.class),
evt_detail->description);
+
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "evt_detail.args.ld.target_id/index %d/%d\n",
+ evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index);
+
}
/*
@@ -1779,6 +1787,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
{
struct megasas_instance *instance;
struct MR_PRIV_DEVICE *mr_device_priv_data;
+ u32 ld_tgt_id;
instance = (struct megasas_instance *)
scmd->device->host->hostdata;
@@ -1805,17 +1814,21 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
}
}
- if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+ mr_device_priv_data = scmd->device->hostdata;
+ if (!mr_device_priv_data ||
+ (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
}
- mr_device_priv_data = scmd->device->hostdata;
- if (!mr_device_priv_data) {
- scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
- return 0;
+ if (MEGASAS_IS_LOGICAL(scmd->device)) {
+ ld_tgt_id = MEGASAS_TARGET_ID(scmd->device);
+ if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
}
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
@@ -2095,7 +2108,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
static int megasas_slave_alloc(struct scsi_device *sdev)
{
- u16 pd_index = 0;
+ u16 pd_index = 0, ld_tgt_id;
struct megasas_instance *instance ;
struct MR_PRIV_DEVICE *mr_device_priv_data;
@@ -2120,6 +2133,14 @@ scan_target:
GFP_KERNEL);
if (!mr_device_priv_data)
return -ENOMEM;
+
+ if (MEGASAS_IS_LOGICAL(sdev)) {
+ ld_tgt_id = MEGASAS_TARGET_ID(sdev);
+ instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id);
+ }
+
sdev->hostdata = mr_device_priv_data;
atomic_set(&mr_device_priv_data->r1_ldio_hint,
@@ -2129,6 +2150,19 @@ scan_target:
static void megasas_slave_destroy(struct scsi_device *sdev)
{
+ u16 ld_tgt_id;
+ struct megasas_instance *instance;
+
+ instance = megasas_lookup_instance(sdev->host->host_no);
+
+ if (MEGASAS_IS_LOGICAL(sdev)) {
+ ld_tgt_id = MEGASAS_TARGET_ID(sdev);
+ instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ sdev_printk(KERN_INFO, sdev,
+ "LD target ID %d removed from OS stack\n", ld_tgt_id);
+ }
+
kfree(sdev->hostdata);
sdev->hostdata = NULL;
}
@@ -3525,6 +3559,22 @@ megasas_complete_abort(struct megasas_instance *instance,
}
}
+static void
+megasas_set_ld_removed_by_fw(struct megasas_instance *instance)
+{
+ uint i;
+
+ for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) {
+ if (instance->ld_ids_prev[i] != 0xff &&
+ instance->ld_ids_from_raidmap[i] == 0xff) {
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "LD target ID %d removed from RAID map\n", i);
+ instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED;
+ }
+ }
+}
+
/**
* megasas_complete_cmd - Completes a command
* @instance: Adapter soft state
@@ -3617,8 +3667,6 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
SCSI_SENSE_BUFFERSIZE);
memcpy(cmd->scmd->sense_buffer, cmd->sense,
hdr->sense_len);
-
- cmd->scmd->result |= DRIVER_SENSE << 24;
}
break;
@@ -3687,9 +3735,13 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
fusion->fast_path_io = 0;
}
+ if (instance->adapter_type >= INVADER_SERIES)
+ megasas_set_ld_removed_by_fw(instance);
+
megasas_sync_map_info(instance);
spin_unlock_irqrestore(instance->host->host_lock,
flags);
+
break;
}
if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
@@ -7545,11 +7597,16 @@ static int megasas_probe_one(struct pci_dev *pdev,
return 0;
fail_start_aen:
+ instance->unload = 1;
+ scsi_remove_host(instance->host);
fail_io_attach:
megasas_mgmt_info.count--;
megasas_mgmt_info.max_index--;
megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
@@ -7557,8 +7614,16 @@ fail_io_attach:
megasas_release_fusion(instance);
else
megasas_release_mfi(instance);
+
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
+ instance->msix_vectors = 0;
+
+ if (instance->fw_crash_state != UNAVAILABLE)
+ megasas_free_host_crash_buffer(instance);
+
+ if (instance->adapter_type != MFI_SERIES)
+ megasas_fusion_stop_watchdog(instance);
fail_init_mfi:
scsi_host_put(host);
fail_alloc_instance:
@@ -8818,8 +8883,10 @@ megasas_aen_polling(struct work_struct *work)
union megasas_evt_class_locale class_locale;
int event_type = 0;
u32 seq_num;
+ u16 ld_target_id;
int error;
u8 dcmd_ret = DCMD_SUCCESS;
+ struct scsi_device *sdev1;
if (!instance) {
printk(KERN_ERR "invalid instance!\n");
@@ -8842,12 +8909,23 @@ megasas_aen_polling(struct work_struct *work)
break;
case MR_EVT_LD_OFFLINE:
- case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
+ ld_target_id = instance->evt_detail->args.ld.target_id;
+ sdev1 = scsi_device_lookup(instance->host,
+ MEGASAS_MAX_PD_CHANNELS +
+ (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL),
+ (ld_target_id - MEGASAS_MAX_DEV_PER_CHANNEL),
+ 0);
+ if (sdev1)
+ megasas_remove_scsi_device(sdev1);
+
+ event_type = SCAN_VD_CHANNEL;
+ break;
case MR_EVT_LD_CREATED:
event_type = SCAN_VD_CHANNEL;
break;
+ case MR_EVT_CFG_CLEARED:
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
case MR_EVT_LD_STATE_CHANGE:
@@ -8934,8 +9012,6 @@ static int __init megasas_init(void)
*/
pr_info("megasas: %s\n", MEGASAS_VERSION);
- spin_lock_init(&poll_aen_lock);
-
support_poll_for_event = 2;
support_device_change = 1;
support_nvme_encapsulation = true;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index b6c08d620033..83f69c33b01a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -349,6 +349,10 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
+ memcpy(instance->ld_ids_prev,
+ instance->ld_ids_from_raidmap,
+ sizeof(instance->ld_ids_from_raidmap));
+ memset(instance->ld_ids_from_raidmap, 0xff, MEGASAS_MAX_LD_IDS);
/*Convert Raid capability values to CPU arch */
for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) {
ld = MR_TargetIdToLdGet(i, drv_map);
@@ -359,7 +363,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
raid = MR_LdRaidGet(ld, drv_map);
le32_to_cpus((u32 *)&raid->capability);
-
+ instance->ld_ids_from_raidmap[i] = i;
num_lds--;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 2221175ae051..06399c026a8d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2051,7 +2051,6 @@ map_cmd_status(struct fusion_context *fusion,
SCSI_SENSE_BUFFERSIZE);
memcpy(scmd->sense_buffer, sense,
SCSI_SENSE_BUFFERSIZE);
- scmd->result |= DRIVER_SENSE << 24;
}
/*
@@ -3203,6 +3202,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
{
int sge_count;
u8 cmd_type;
+ u16 pd_index = 0;
+ u8 drive_type = 0;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
struct MR_PRIV_DEVICE *mr_device_priv_data;
mr_device_priv_data = scp->device->hostdata;
@@ -3237,8 +3238,12 @@ megasas_build_io_fusion(struct megasas_instance *instance,
megasas_build_syspd_fusion(instance, scp, cmd, true);
break;
case NON_READ_WRITE_SYSPDIO:
- if (instance->secure_jbod_support ||
- mr_device_priv_data->is_tm_capable)
+ pd_index = MEGASAS_PD_INDEX(scp);
+ drive_type = instance->pd_list[pd_index].driveType;
+ if ((instance->secure_jbod_support ||
+ mr_device_priv_data->is_tm_capable) ||
+ (instance->adapter_type >= VENTURA_SERIES &&
+ drive_type == TYPE_ENCLOSURE))
megasas_build_syspd_fusion(instance, scp, cmd, false);
else
megasas_build_syspd_fusion(instance, scp, cmd, true);
@@ -3739,6 +3744,7 @@ static void megasas_sync_irqs(unsigned long instance_addr)
if (irq_ctx->irq_poll_scheduled) {
irq_ctx->irq_poll_scheduled = false;
enable_irq(irq_ctx->os_irq);
+ complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
}
}
}
@@ -3770,6 +3776,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
irq_poll_complete(irqpoll);
irq_ctx->irq_poll_scheduled = false;
enable_irq(irq_ctx->os_irq);
+ complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
}
return num_entries;
@@ -3786,6 +3793,7 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
{
struct megasas_instance *instance =
(struct megasas_instance *)instance_addr;
+ struct megasas_irq_context *irq_ctx = NULL;
u32 count, MSIxIndex;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
@@ -3794,8 +3802,10 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return;
- for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
- complete_cmd_fusion(instance, MSIxIndex, NULL);
+ for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) {
+ irq_ctx = &instance->irq_context[MSIxIndex];
+ complete_cmd_fusion(instance, MSIxIndex, irq_ctx);
+ }
}
/**
@@ -5266,6 +5276,7 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
if (!fusion->log_to_span) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
+ kfree(instance->ctrl_context);
return -ENOMEM;
}
}
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 0a9f4e44ab2c..78b72bcf58fe 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -595,9 +595,10 @@ static void mesh_done(struct mesh_state *ms, int start_next)
ms->current_req = NULL;
tp->current_req = NULL;
if (cmd) {
- cmd->result = (ms->stat << 16) | cmd->SCp.Status;
+ set_host_byte(cmd, ms->stat);
+ set_status_byte(cmd, cmd->SCp.Status);
if (ms->stat == DID_OK)
- cmd->result |= cmd->SCp.Message << 8;
+ scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
if (DEBUG_TARGET(cmd)) {
printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
cmd->result, ms->data_ptr, scsi_bufflen(cmd));
@@ -993,7 +994,7 @@ static void handle_reset(struct mesh_state *ms)
for (tgt = 0; tgt < 8; ++tgt) {
tp = &ms->tgts[tgt];
if ((cmd = tp->current_req) != NULL) {
- cmd->result = DID_RESET << 16;
+ set_host_byte(cmd, DID_RESET);
tp->current_req = NULL;
mesh_completed(ms, cmd);
}
@@ -1003,7 +1004,7 @@ static void handle_reset(struct mesh_state *ms)
ms->current_req = NULL;
while ((cmd = ms->request_q) != NULL) {
ms->request_q = (struct scsi_cmnd *) cmd->host_scribble;
- cmd->result = DID_RESET << 16;
+ set_host_byte(cmd, DID_RESET);
mesh_completed(ms, cmd);
}
ms->phase = idle;
diff --git a/drivers/scsi/mpi3mr/Kconfig b/drivers/scsi/mpi3mr/Kconfig
new file mode 100644
index 000000000000..f7882375e74f
--- /dev/null
+++ b/drivers/scsi/mpi3mr/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config SCSI_MPI3MR
+ tristate "Broadcom MPI3 Storage Controller Device Driver"
+ depends on PCI && SCSI
+ help
+ MPI3 based Storage & RAID Controllers Driver.
diff --git a/drivers/scsi/mpi3mr/Makefile b/drivers/scsi/mpi3mr/Makefile
new file mode 100644
index 000000000000..7c2063e04c81
--- /dev/null
+++ b/drivers/scsi/mpi3mr/Makefile
@@ -0,0 +1,4 @@
+# mpi3mr makefile
+obj-m += mpi3mr.o
+mpi3mr-y += mpi3mr_os.o \
+ mpi3mr_fw.o \
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
new file mode 100644
index 000000000000..d43bbecef651
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -0,0 +1,1880 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2017-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_CNFG_H
+#define MPI30_CNFG_H 1
+#define MPI3_CONFIG_PAGETYPE_IO_UNIT (0x00)
+#define MPI3_CONFIG_PAGETYPE_MANUFACTURING (0x01)
+#define MPI3_CONFIG_PAGETYPE_IOC (0x02)
+#define MPI3_CONFIG_PAGETYPE_UEFI_BSD (0x03)
+#define MPI3_CONFIG_PAGETYPE_SECURITY (0x04)
+#define MPI3_CONFIG_PAGETYPE_ENCLOSURE (0x11)
+#define MPI3_CONFIG_PAGETYPE_DEVICE (0x12)
+#define MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT (0x20)
+#define MPI3_CONFIG_PAGETYPE_SAS_EXPANDER (0x21)
+#define MPI3_CONFIG_PAGETYPE_SAS_PHY (0x23)
+#define MPI3_CONFIG_PAGETYPE_SAS_PORT (0x24)
+#define MPI3_CONFIG_PAGETYPE_PCIE_IO_UNIT (0x30)
+#define MPI3_CONFIG_PAGETYPE_PCIE_SWITCH (0x31)
+#define MPI3_CONFIG_PAGETYPE_PCIE_LINK (0x33)
+#define MPI3_CONFIG_PAGEATTR_MASK (0xf0)
+#define MPI3_CONFIG_PAGEATTR_READ_ONLY (0x00)
+#define MPI3_CONFIG_PAGEATTR_CHANGEABLE (0x10)
+#define MPI3_CONFIG_PAGEATTR_PERSISTENT (0x20)
+#define MPI3_CONFIG_ACTION_PAGE_HEADER (0x00)
+#define MPI3_CONFIG_ACTION_READ_DEFAULT (0x01)
+#define MPI3_CONFIG_ACTION_READ_CURRENT (0x02)
+#define MPI3_CONFIG_ACTION_WRITE_CURRENT (0x03)
+#define MPI3_CONFIG_ACTION_READ_PERSISTENT (0x04)
+#define MPI3_CONFIG_ACTION_WRITE_PERSISTENT (0x05)
+#define MPI3_DEVICE_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+#define MPI3_DEVICE_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_SAS_EXPAND_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM (0x10000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE (0x20000000)
+#define MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00ff0000)
+#define MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
+#define MPI3_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_SAS_PHY_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
+#define MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000ff)
+#define MPI3_SASPORT_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000)
+#define MPI3_SASPORT_PGAD_FORM_PORT_NUM (0x10000000)
+#define MPI3_SASPORT_PGAD_PORT_NUMBER_MASK (0x000000ff)
+#define MPI3_ENCLOS_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+#define MPI3_ENCLOS_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_HANDLE_PORT_NUM (0x10000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_HANDLE (0x20000000)
+#define MPI3_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00ff0000)
+#define MPI3_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16)
+#define MPI3_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_PCIE_LINK_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000)
+#define MPI3_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000)
+#define MPI3_PCIE_LINK_PGAD_LINKNUM_MASK (0x000000ff)
+#define MPI3_SECURITY_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SECURITY_PGAD_FORM_GET_NEXT_SLOT (0x00000000)
+#define MPI3_SECURITY_PGAD_FORM_SOT_NUM (0x10000000)
+#define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00)
+#define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff)
+struct mpi3_config_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ u8 page_version;
+ u8 page_number;
+ u8 page_type;
+ u8 action;
+ __le32 page_address;
+ __le16 page_length;
+ __le16 reserved16;
+ __le32 reserved18[2];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_config_page_header {
+ u8 page_version;
+ u8 reserved01;
+ u8 page_number;
+ u8 page_attribute;
+ __le16 page_length;
+ u8 page_type;
+ u8 reserved07;
+};
+
+#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK (0xf0)
+#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT (4)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_MASK (0x0f)
+#define MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
+#define MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
+#define MPI3_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI3_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
+#define MPI3_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI3_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06)
+#define MPI3_SAS_NEG_LINK_RATE_1_5 (0x08)
+#define MPI3_SAS_NEG_LINK_RATE_3_0 (0x09)
+#define MPI3_SAS_NEG_LINK_RATE_6_0 (0x0a)
+#define MPI3_SAS_NEG_LINK_RATE_12_0 (0x0b)
+#define MPI3_SAS_NEG_LINK_RATE_22_5 (0x0c)
+#define MPI3_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040)
+#define MPI3_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020)
+#define MPI3_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010)
+#define MPI3_SAS_APHYINFO_REASON_MASK (0x0000000f)
+#define MPI3_SAS_APHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI3_SAS_APHYINFO_REASON_POWER_ON (0x00000001)
+#define MPI3_SAS_APHYINFO_REASON_HARD_RESET (0x00000002)
+#define MPI3_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003)
+#define MPI3_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004)
+#define MPI3_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005)
+#define MPI3_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006)
+#define MPI3_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007)
+#define MPI3_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008)
+#define MPI3_SAS_APHYINFO_REASON_EXP_REDUCED_FUNC (0x00000009)
+#define MPI3_SAS_PHYINFO_STATUS_MASK (0xc0000000)
+#define MPI3_SAS_PHYINFO_STATUS_SHIFT (30)
+#define MPI3_SAS_PHYINFO_STATUS_ACCESSIBLE (0x00000000)
+#define MPI3_SAS_PHYINFO_STATUS_NOT_EXIST (0x40000000)
+#define MPI3_SAS_PHYINFO_STATUS_VACANT (0x80000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_ACTIVE (0x00000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_PARTIAL (0x08000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_SLUMBER (0x10000000)
+#define MPI3_SAS_PHYINFO_REASON_MASK (0x000f0000)
+#define MPI3_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI3_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
+#define MPI3_SAS_PHYINFO_REASON_HARD_RESET (0x00020000)
+#define MPI3_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000)
+#define MPI3_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000)
+#define MPI3_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000)
+#define MPI3_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000)
+#define MPI3_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000)
+#define MPI3_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000)
+#define MPI3_SAS_PHYINFO_REASON_EXP_REDUCED_FUNC (0x00090000)
+#define MPI3_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
+#define MPI3_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000)
+#define MPI3_SAS_PHYINFO_VIRTUAL_PHY (0x00001000)
+#define MPI3_SAS_PHYINFO_PARTIAL_PATHWAY_TIME_MASK (0x00000f00)
+#define MPI3_SAS_PHYINFO_PARTIAL_PATHWAY_TIME_SHIFT (8)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_MASK (0x000000f0)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_DIRECT (0x00000000)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_SUBTRACTIVE (0x00000010)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_TABLE (0x00000020)
+#define MPI3_SAS_PRATE_MAX_RATE_MASK (0xf0)
+#define MPI3_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI3_SAS_PRATE_MAX_RATE_1_5 (0x80)
+#define MPI3_SAS_PRATE_MAX_RATE_3_0 (0x90)
+#define MPI3_SAS_PRATE_MAX_RATE_6_0 (0xa0)
+#define MPI3_SAS_PRATE_MAX_RATE_12_0 (0xb0)
+#define MPI3_SAS_PRATE_MAX_RATE_22_5 (0xc0)
+#define MPI3_SAS_PRATE_MIN_RATE_MASK (0x0f)
+#define MPI3_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI3_SAS_PRATE_MIN_RATE_1_5 (0x08)
+#define MPI3_SAS_PRATE_MIN_RATE_3_0 (0x09)
+#define MPI3_SAS_PRATE_MIN_RATE_6_0 (0x0a)
+#define MPI3_SAS_PRATE_MIN_RATE_12_0 (0x0b)
+#define MPI3_SAS_PRATE_MIN_RATE_22_5 (0x0c)
+#define MPI3_SAS_HWRATE_MAX_RATE_MASK (0xf0)
+#define MPI3_SAS_HWRATE_MAX_RATE_1_5 (0x80)
+#define MPI3_SAS_HWRATE_MAX_RATE_3_0 (0x90)
+#define MPI3_SAS_HWRATE_MAX_RATE_6_0 (0xa0)
+#define MPI3_SAS_HWRATE_MAX_RATE_12_0 (0xb0)
+#define MPI3_SAS_HWRATE_MAX_RATE_22_5 (0xc0)
+#define MPI3_SAS_HWRATE_MIN_RATE_MASK (0x0f)
+#define MPI3_SAS_HWRATE_MIN_RATE_1_5 (0x08)
+#define MPI3_SAS_HWRATE_MIN_RATE_3_0 (0x09)
+#define MPI3_SAS_HWRATE_MIN_RATE_6_0 (0x0a)
+#define MPI3_SAS_HWRATE_MIN_RATE_12_0 (0x0b)
+#define MPI3_SAS_HWRATE_MIN_RATE_22_5 (0x0c)
+#define MPI3_SLOT_INVALID (0xffff)
+#define MPI3_SLOT_INDEX_INVALID (0xffff)
+struct mpi3_man_page0 {
+ struct mpi3_config_page_header header;
+ u8 chip_revision[8];
+ u8 chip_name[32];
+ u8 board_name[32];
+ u8 board_assembly[32];
+ u8 board_tracer_number[32];
+ __le32 board_power;
+ __le32 reserved94;
+ __le32 reserved98;
+ u8 oem;
+ u8 sub_oem;
+ __le16 reserved9e;
+ u8 board_mfg_day;
+ u8 board_mfg_month;
+ __le16 board_mfg_year;
+ u8 board_rework_day;
+ u8 board_rework_month;
+ __le16 board_rework_year;
+ __le64 board_revision;
+ u8 e_pack_fru[16];
+ u8 product_name[256];
+};
+
+#define MPI3_MAN0_PAGEVERSION (0x00)
+#define MPI3_MAN1_VPD_SIZE (512)
+struct mpi3_man_page1 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08[2];
+ u8 vpd[MPI3_MAN1_VPD_SIZE];
+};
+
+#define MPI3_MAN1_PAGEVERSION (0x00)
+struct mpi3_man5_phy_entry {
+ __le64 ioc_wwid;
+ __le64 device_name;
+ __le64 sata_wwid;
+};
+
+#ifndef MPI3_MAN5_PHY_MAX
+#define MPI3_MAN5_PHY_MAX (1)
+#endif
+struct mpi3_man_page5 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_man5_phy_entry phy[MPI3_MAN5_PHY_MAX];
+};
+
+#define MPI3_MAN5_PAGEVERSION (0x00)
+struct mpi3_man6_gpio_entry {
+ u8 function_code;
+ u8 reserved01;
+ __le16 flags;
+ u8 param1;
+ u8 param2;
+ __le16 reserved06;
+ __le32 param3;
+};
+
+#define MPI3_MAN6_GPIO_FUNCTION_GENERIC (0x00)
+#define MPI3_MAN6_GPIO_FUNCTION_ALTERNATE (0x01)
+#define MPI3_MAN6_GPIO_FUNCTION_EXT_INTERRUPT (0x02)
+#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_ACTIVITY (0x03)
+#define MPI3_MAN6_GPIO_FUNCTION_OVER_TEMPERATURE (0x04)
+#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_GREEN (0x05)
+#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_YELLOW (0x06)
+#define MPI3_MAN6_GPIO_FUNCTION_CABLE_MANAGEMENT (0x07)
+#define MPI3_MAN6_GPIO_FUNCTION_BKPLANE_MGMT_TYPE (0x08)
+#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_MUX_RESET (0x09)
+#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_RESET (0x0a)
+#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET (0x0b)
+#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_FAULT (0x0c)
+#define MPI3_MAN6_GPIO_FUNCTION_EPACK_ATTN (0x0d)
+#define MPI3_MAN6_GPIO_FUNCTION_EPACK_ONLINE (0x0e)
+#define MPI3_MAN6_GPIO_FUNCTION_EPACK_FAULT (0x0f)
+#define MPI3_MAN6_GPIO_FUNCTION_CTRL_TYPE (0x10)
+#define MPI3_MAN6_GPIO_FUNCTION_LICENSE (0x11)
+#define MPI3_MAN6_GPIO_FUNCTION_REFCLK_CONTROL (0x12)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_MASK (0xf0)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_ACTIVE_CABLE_OVERCURRENT (0x20)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_MASK (0x01)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_EDGE (0x00)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_LEVEL (0x01)
+#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ALL_UP (0x00)
+#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ONE_OR_MORE_UP (0x01)
+#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00)
+#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_ACTIVE_CABLE_ENABLE (0x01)
+#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_CABLE_MGMT_ENABLE (0x02)
+#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_SPEC_MUX (0x00)
+#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_ALL_MUXES (0x01)
+#define MPI3_MAN6_GPIO_LICENSE_PARAM1_TYPE_IBUTTON (0x00)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_MASK (0x0100)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_FAST_EDGE (0x0100)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_SLOW_EDGE (0x0000)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_MASK (0x00c0)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_100OHM (0x0000)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_66OHM (0x0040)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_50OHM (0x0080)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_33OHM (0x00c0)
+#define MPI3_MAN6_GPIO_FLAGS_ALT_DATA_SEL_MASK (0x0030)
+#define MPI3_MAN6_GPIO_FLAGS_ALT_DATA_SEL_SHIFT (4)
+#define MPI3_MAN6_GPIO_FLAGS_ACTIVE_HIGH (0x0008)
+#define MPI3_MAN6_GPIO_FLAGS_BI_DIR_ENABLED (0x0004)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_MASK (0x0003)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_INPUT (0x0000)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_OPEN_DRAIN_OUTPUT (0x0001)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_OPEN_SOURCE_OUTPUT (0x0002)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_PUSH_PULL_OUTPUT (0x0003)
+#ifndef MPI3_MAN6_GPIO_MAX
+#define MPI3_MAN6_GPIO_MAX (1)
+#endif
+struct mpi3_man_page6 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_gpio;
+ u8 reserved0d[3];
+ struct mpi3_man6_gpio_entry gpio[MPI3_MAN6_GPIO_MAX];
+};
+
+#define MPI3_MAN6_PAGEVERSION (0x00)
+#define MPI3_MAN6_FLAGS_HEARTBEAT_LED_DISABLED (0x0001)
+struct mpi3_man7_receptacle_info {
+ __le32 name[4];
+ u8 location;
+ u8 connector_type;
+ u8 ped_clk;
+ u8 connector_id;
+ __le32 reserved14;
+};
+
+#define MPI3_MAN7_LOCATION_UNKNOWN (0x00)
+#define MPI3_MAN7_LOCATION_INTERNAL (0x01)
+#define MPI3_MAN7_LOCATION_EXTERNAL (0x02)
+#define MPI3_MAN7_LOCATION_VIRTUAL (0x03)
+#define MPI3_MAN7_PEDCLK_ROUTING_MASK (0x10)
+#define MPI3_MAN7_PEDCLK_ROUTING_DIRECT (0x00)
+#define MPI3_MAN7_PEDCLK_ROUTING_CLOCK_BUFFER (0x10)
+#define MPI3_MAN7_PEDCLK_ID_MASK (0x0f)
+#ifndef MPI3_MAN7_RECEPTACLE_INFO_MAX
+#define MPI3_MAN7_RECEPTACLE_INFO_MAX (1)
+#endif
+struct mpi3_man_page7 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ u8 num_receptacles;
+ u8 reserved0d[3];
+ __le32 enclosure_name[4];
+ struct mpi3_man7_receptacle_info receptacle_info[MPI3_MAN7_RECEPTACLE_INFO_MAX];
+};
+
+#define MPI3_MAN7_PAGEVERSION (0x00)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_MASK (0x01)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_0 (0x00)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_1 (0x01)
+struct mpi3_man8_phy_info {
+ u8 receptacle_id;
+ u8 connector_lane;
+ __le16 reserved02;
+ __le16 slotx1;
+ __le16 slotx2;
+ __le16 slotx4;
+ __le16 reserved0a;
+ __le32 reserved0c;
+};
+
+#ifndef MPI3_MAN8_PHY_INFO_MAX
+#define MPI3_MAN8_PHY_INFO_MAX (1)
+#endif
+struct mpi3_man_page8 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phys;
+ u8 reserved0d[3];
+ struct mpi3_man8_phy_info phy_info[MPI3_MAN8_PHY_INFO_MAX];
+};
+
+#define MPI3_MAN8_PAGEVERSION (0x00)
+struct mpi3_man9_rsrc_entry {
+ __le32 maximum;
+ __le32 decrement;
+ __le32 minimum;
+ __le32 actual;
+};
+
+enum mpi3_man9_resources {
+ MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0,
+ MPI3_MAN9_RSRC_TARGET_CMDS = 1,
+ MPI3_MAN9_RSRC_SAS_TARGETS = 2,
+ MPI3_MAN9_RSRC_PCIE_TARGETS = 3,
+ MPI3_MAN9_RSRC_INITIATORS = 4,
+ MPI3_MAN9_RSRC_VDS = 5,
+ MPI3_MAN9_RSRC_ENCLOSURES = 6,
+ MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7,
+ MPI3_MAN9_RSRC_EXPANDERS = 8,
+ MPI3_MAN9_RSRC_PCIE_SWITCHES = 9,
+ MPI3_MAN9_RSRC_PDS = 10,
+ MPI3_MAN9_RSRC_HOST_PDS = 11,
+ MPI3_MAN9_RSRC_ADV_HOST_PDS = 12,
+ MPI3_MAN9_RSRC_RAID_PDS = 13,
+ MPI3_MAN9_RSRC_NUM_RESOURCES
+};
+
+#define MPI3_MAN9_MIN_OUTSTANDING_REQS (1)
+#define MPI3_MAN9_MAX_OUTSTANDING_REQS (65000)
+#define MPI3_MAN9_MIN_TARGET_CMDS (0)
+#define MPI3_MAN9_MAX_TARGET_CMDS (65535)
+#define MPI3_MAN9_MIN_SAS_TARGETS (0)
+#define MPI3_MAN9_MAX_SAS_TARGETS (65535)
+#define MPI3_MAN9_MIN_PCIE_TARGETS (0)
+#define MPI3_MAN9_MIN_INITIATORS (0)
+#define MPI3_MAN9_MAX_INITIATORS (65535)
+#define MPI3_MAN9_MIN_ENCLOSURES (0)
+#define MPI3_MAN9_MAX_ENCLOSURES (65535)
+#define MPI3_MAN9_MIN_ENCLOSURE_PHYS (0)
+#define MPI3_MAN9_MIN_EXPANDERS (0)
+#define MPI3_MAN9_MAX_EXPANDERS (65535)
+#define MPI3_MAN9_MIN_PCIE_SWITCHES (0)
+struct mpi3_man_page9 {
+ struct mpi3_config_page_header header;
+ u8 num_resources;
+ u8 reserved09;
+ __le16 reserved0a;
+ __le32 reserved0c;
+ __le32 reserved10;
+ __le32 reserved14;
+ __le32 reserved18;
+ __le32 reserved1c;
+ struct mpi3_man9_rsrc_entry resource[MPI3_MAN9_RSRC_NUM_RESOURCES];
+};
+
+#define MPI3_MAN9_PAGEVERSION (0x00)
+struct mpi3_man10_istwi_ctrlr_entry {
+ __le16 slave_address;
+ __le16 flags;
+ __le32 reserved04;
+};
+
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_SLAVE_ENABLED (0x0002)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_MASTER_ENABLED (0x0001)
+#ifndef MPI3_MAN10_ISTWI_CTRLR_MAX
+#define MPI3_MAN10_ISTWI_CTRLR_MAX (1)
+#endif
+struct mpi3_man_page10 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_istwi_ctrl;
+ u8 reserved0d[3];
+ struct mpi3_man10_istwi_ctrlr_entry istwi_controller[MPI3_MAN10_ISTWI_CTRLR_MAX];
+};
+
+#define MPI3_MAN10_PAGEVERSION (0x00)
+struct mpi3_man11_mux_device_format {
+ u8 max_channel;
+ u8 reserved01[3];
+ __le32 reserved04;
+};
+
+struct mpi3_man11_temp_sensor_device_format {
+ u8 type;
+ u8 reserved01[3];
+ u8 temp_channel[4];
+};
+
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01)
+struct mpi3_man11_seeprom_device_format {
+ u8 size;
+ u8 page_write_size;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_MAN11_SEEPROM_SIZE_1KBITS (0x01)
+#define MPI3_MAN11_SEEPROM_SIZE_2KBITS (0x02)
+#define MPI3_MAN11_SEEPROM_SIZE_4KBITS (0x03)
+#define MPI3_MAN11_SEEPROM_SIZE_8KBITS (0x04)
+#define MPI3_MAN11_SEEPROM_SIZE_16KBITS (0x05)
+#define MPI3_MAN11_SEEPROM_SIZE_32KBITS (0x06)
+#define MPI3_MAN11_SEEPROM_SIZE_64KBITS (0x07)
+#define MPI3_MAN11_SEEPROM_SIZE_128KBITS (0x08)
+struct mpi3_man11_ddr_spd_device_format {
+ u8 channel;
+ u8 reserved01[3];
+ __le32 reserved04;
+};
+
+struct mpi3_man11_cable_mgmt_device_format {
+ u8 type;
+ u8 receptacle_id;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_MAN11_CABLE_MGMT_TYPE_SFF_8636 (0x00)
+struct mpi3_man11_bkplane_spec_ubm_format {
+ __le16 flags;
+ __le16 reserved02;
+};
+
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_FORCE_POLLING (0x0100)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_MASK (0x00f0)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_SHIFT (4)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
+struct mpi3_man11_bkplane_spec_vpp_format {
+ __le16 flags;
+ __le16 reserved02;
+};
+
+#define MPI3_MAN11_BKPLANE_VPP_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0040)
+#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_MASK (0x0030)
+#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
+#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_REG (0x0010)
+#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_MASK (0x000f)
+#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_SHIFT (0)
+union mpi3_man11_bkplane_spec_format {
+ struct mpi3_man11_bkplane_spec_ubm_format ubm;
+ struct mpi3_man11_bkplane_spec_vpp_format vpp;
+};
+
+struct mpi3_man11_bkplane_mgmt_device_format {
+ u8 type;
+ u8 receptacle_id;
+ __le16 reserved02;
+ union mpi3_man11_bkplane_spec_format backplane_mgmt_specific;
+};
+
+#define MPI3_MAN11_BKPLANE_MGMT_TYPE_UBM (0x00)
+#define MPI3_MAN11_BKPLANE_MGMT_TYPE_VPP (0x01)
+struct mpi3_man11_gas_gauge_device_format {
+ u8 type;
+ u8 reserved01[3];
+ __le32 reserved04;
+};
+
+#define MPI3_MAN11_GAS_GAUGE_TYPE_STANDARD (0x00)
+union mpi3_man11_device_specific_format {
+ struct mpi3_man11_mux_device_format mux;
+ struct mpi3_man11_temp_sensor_device_format temp_sensor;
+ struct mpi3_man11_seeprom_device_format seeprom;
+ struct mpi3_man11_ddr_spd_device_format ddr_spd;
+ struct mpi3_man11_cable_mgmt_device_format cable_mgmt;
+ struct mpi3_man11_bkplane_mgmt_device_format bkplane_mgmt;
+ struct mpi3_man11_gas_gauge_device_format gas_gauge;
+ __le32 words[2];
+};
+
+struct mpi3_man11_istwi_device_format {
+ u8 device_type;
+ u8 controller;
+ u8 reserved02;
+ u8 flags;
+ __le16 device_address;
+ u8 mux_channel;
+ u8 mux_index;
+ union mpi3_man11_device_specific_format device_specific;
+};
+
+#define MPI3_MAN11_ISTWI_DEVTYPE_MUX (0x00)
+#define MPI3_MAN11_ISTWI_DEVTYPE_TEMP_SENSOR (0x01)
+#define MPI3_MAN11_ISTWI_DEVTYPE_SEEPROM (0x02)
+#define MPI3_MAN11_ISTWI_DEVTYPE_DDR_SPD (0x03)
+#define MPI3_MAN11_ISTWI_DEVTYPE_CABLE_MGMT (0x04)
+#define MPI3_MAN11_ISTWI_DEVTYPE_BACKPLANE_MGMT (0x05)
+#define MPI3_MAN11_ISTWI_DEVTYPE_GAS_GAUGE (0x06)
+#define MPI3_MAN11_ISTWI_FLAGS_MUX_PRESENT (0x01)
+#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_MASK (0x06)
+#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_100KHZ (0x00)
+#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_400KHZ (0x02)
+#ifndef MPI3_MAN11_ISTWI_DEVICE_MAX
+#define MPI3_MAN11_ISTWI_DEVICE_MAX (1)
+#endif
+struct mpi3_man_page11 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_istwi_dev;
+ u8 reserved0d[3];
+ struct mpi3_man11_istwi_device_format istwi_device[MPI3_MAN11_ISTWI_DEVICE_MAX];
+};
+
+#define MPI3_MAN11_PAGEVERSION (0x00)
+#ifndef MPI3_MAN12_NUM_SGPIO_MAX
+#define MPI3_MAN12_NUM_SGPIO_MAX (1)
+#endif
+struct mpi3_man12_sgpio_info {
+ u8 slot_count;
+ u8 reserved01[3];
+ __le32 reserved04;
+ u8 phy_order[32];
+};
+
+struct mpi3_man_page12 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 s_clock_freq;
+ __le32 activity_modulation;
+ u8 num_sgpio;
+ u8 reserved15[3];
+ __le32 reserved18;
+ __le32 reserved1c;
+ __le32 pattern[8];
+ struct mpi3_man12_sgpio_info sgpio_info[MPI3_MAN12_NUM_SGPIO_MAX];
+};
+
+#define MPI3_MAN12_PAGEVERSION (0x00)
+#define MPI3_MAN12_FLAGS_ERROR_PRESENCE_ENABLED (0x0400)
+#define MPI3_MAN12_FLAGS_ACTIVITY_INVERT_ENABLED (0x0200)
+#define MPI3_MAN12_FLAGS_GROUP_ID_DISABLED (0x0100)
+#define MPI3_MAN12_FLAGS_SIO_CLK_FILTER_ENABLED (0x0004)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_MASK (0x0002)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_PUSH_PULL (0x0000)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_OPEN_DRAIN (0x0002)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_MASK (0x0001)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_PUSH_PULL (0x0000)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_OPEN_DRAIN (0x0001)
+#define MPI3_MAN12_SIO_CLK_FREQ_MIN (32)
+#define MPI3_MAN12_SIO_CLK_FREQ_MAX (100000)
+#define MPI3_MAN12_ACTIVITY_MODULATION_FORCE_OFF_MASK (0x0000f000)
+#define MPI3_MAN12_ACTIVITY_MODULATION_FORCE_OFF_SHIFT (12)
+#define MPI3_MAN12_ACTIVITY_MODULATION_MAX_ON_MASK (0x00000f00)
+#define MPI3_MAN12_ACTIVITY_MODULATION_MAX_ON_SHIFT (8)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_OFF_MASK (0x000000f0)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_OFF_SHIFT (4)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_ON_MASK (0x0000000f)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_ON_SHIFT (0)
+#define MPI3_MAN12_PATTERN_RATE_MASK (0xe0000000)
+#define MPI3_MAN12_PATTERN_RATE_2_HZ (0x00000000)
+#define MPI3_MAN12_PATTERN_RATE_4_HZ (0x20000000)
+#define MPI3_MAN12_PATTERN_RATE_8_HZ (0x40000000)
+#define MPI3_MAN12_PATTERN_RATE_16_HZ (0x60000000)
+#define MPI3_MAN12_PATTERN_RATE_10_HZ (0x80000000)
+#define MPI3_MAN12_PATTERN_RATE_20_HZ (0xa0000000)
+#define MPI3_MAN12_PATTERN_RATE_40_HZ (0xc0000000)
+#define MPI3_MAN12_PATTERN_LENGTH_MASK (0x1f000000)
+#define MPI3_MAN12_PATTERN_LENGTH_SHIFT (24)
+#define MPI3_MAN12_PATTERN_BIT_PATTERN_MASK (0x00ffffff)
+#define MPI3_MAN12_PATTERN_BIT_PATTERN_SHIFT (0)
+#ifndef MPI3_MAN13_NUM_TRANSLATION_MAX
+#define MPI3_MAN13_NUM_TRANSLATION_MAX (1)
+#endif
+struct mpi3_man13_translation_info {
+ __le32 slot_status;
+ __le32 mask;
+ u8 activity;
+ u8 locate;
+ u8 error;
+ u8 reserved0b;
+};
+
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_FAULT (0x20000000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_OFF (0x10000000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_ACTIVITY (0x00800000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DO_NOT_REMOVE (0x00400000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_MISSING (0x00100000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_INSERT (0x00080000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REMOVAL (0x00040000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IDENTIFY (0x00020000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_OK (0x00008000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_RESERVED_DEVICE (0x00004000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_HOT_SPARE (0x00002000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_CONSISTENCY_CHECK (0x00001000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000800)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IN_FAILED_ARRAY (0x00000400)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REBUILD_REMAP (0x00000200)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REBUILD_REMAP_ABORT (0x00000100)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_PREDICTED_FAILURE (0x00000040)
+#define MPI3_MAN13_BLINK_PATTERN_FORCE_OFF (0x00)
+#define MPI3_MAN13_BLINK_PATTERN_FORCE_ON (0x01)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_0 (0x02)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_1 (0x03)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_2 (0x04)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_3 (0x05)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_4 (0x06)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_5 (0x07)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_6 (0x08)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_7 (0x09)
+#define MPI3_MAN13_BLINK_PATTERN_ACTIVITY (0x0a)
+#define MPI3_MAN13_BLINK_PATTERN_ACTIVITY_TRAIL (0x0b)
+struct mpi3_man_page13 {
+ struct mpi3_config_page_header header;
+ u8 num_trans;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_man13_translation_info translation[MPI3_MAN13_NUM_TRANSLATION_MAX];
+};
+
+#define MPI3_MAN13_PAGEVERSION (0x00)
+struct mpi3_man_page14 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_slot_groups;
+ u8 num_slots;
+ __le16 max_cert_chain_length;
+ __le32 sealed_slots;
+};
+
+#define MPI3_MAN14_PAGEVERSION (0x00)
+#define MPI3_MAN14_FLAGS_AUTH_SESSION_REQ (0x01)
+#define MPI3_MAN14_FLAGS_AUTH_API_MASK (0x0e)
+#define MPI3_MAN14_FLAGS_AUTH_API_NONE (0x00)
+#define MPI3_MAN14_FLAGS_AUTH_API_CEREBUS (0x02)
+#define MPI3_MAN14_FLAGS_AUTH_API_DMTF_PMCI (0x04)
+#ifndef MPI3_MAN15_VERSION_RECORD_MAX
+#define MPI3_MAN15_VERSION_RECORD_MAX 1
+#endif
+struct mpi3_man15_version_record {
+ __le16 spdm_version;
+ __le16 reserved02;
+};
+
+struct mpi3_man_page15 {
+ struct mpi3_config_page_header header;
+ u8 num_version_records;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_man15_version_record version_record[MPI3_MAN15_VERSION_RECORD_MAX];
+};
+
+#define MPI3_MAN15_PAGEVERSION (0x00)
+#ifndef MPI3_MAN16_CERT_ALGO_MAX
+#define MPI3_MAN16_CERT_ALGO_MAX 1
+#endif
+struct mpi3_man16_certificate_algorithm {
+ u8 slot_group;
+ u8 reserved01[3];
+ __le32 base_asym_algo;
+ __le32 base_hash_algo;
+ __le32 reserved0c[3];
+};
+
+struct mpi3_man_page16 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_cert_algos;
+ u8 reserved0d[3];
+ struct mpi3_man16_certificate_algorithm certificate_algorithm[MPI3_MAN16_CERT_ALGO_MAX];
+};
+
+#define MPI3_MAN16_PAGEVERSION (0x00)
+#ifndef MPI3_MAN17_HASH_ALGORITHM_MAX
+#define MPI3_MAN17_HASH_ALGORITHM_MAX 1
+#endif
+struct mpi3_man17_hash_algorithm {
+ u8 meas_specification;
+ u8 reserved01[3];
+ __le32 measurement_hash_algo;
+ __le32 reserved08[2];
+};
+
+struct mpi3_man_page17 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_hash_algos;
+ u8 reserved0d[3];
+ struct mpi3_man17_hash_algorithm hash_algorithm[MPI3_MAN17_HASH_ALGORITHM_MAX];
+};
+
+#define MPI3_MAN17_PAGEVERSION (0x00)
+struct mpi3_man_page20 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 nonpremium_features;
+ u8 allowed_personalities;
+ u8 reserved11[3];
+};
+
+#define MPI3_MAN20_PAGEVERSION (0x00)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_MASK (0x02)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_ALLOWED (0x02)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_NOT_ALLOWED (0x00)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_MASK (0x01)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_ALLOWED (0x01)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_NOT_ALLOWED (0x00)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_MASK (0x01)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_ENABLED (0x00)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_DISABLED (0x01)
+struct mpi3_man_page21 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 flags;
+};
+
+#define MPI3_MAN21_PAGEVERSION (0x00)
+#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_MASK (0x80)
+#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_ENABLED (0x80)
+#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_DISABLED (0x00)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_MASK (0x60)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_BLOCK (0x00)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_ALLOW (0x20)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_WARN (0x40)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_MASK (0x08)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_ALLOW (0x00)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_PREVENT (0x08)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_MASK (0x01)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_DEFAULT (0x00)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_OEM_SPECIFIC (0x01)
+#ifndef MPI3_MAN_PROD_SPECIFIC_MAX
+#define MPI3_MAN_PROD_SPECIFIC_MAX (1)
+#endif
+struct mpi3_man_page_product_specific {
+ struct mpi3_config_page_header header;
+ __le32 product_specific_info[MPI3_MAN_PROD_SPECIFIC_MAX];
+};
+
+struct mpi3_io_unit_page0 {
+ struct mpi3_config_page_header header;
+ __le64 unique_value;
+ __le32 nvdata_version_default;
+ __le32 nvdata_version_persistent;
+};
+
+#define MPI3_IOUNIT0_PAGEVERSION (0x00)
+struct mpi3_io_unit_page1 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ u8 dmd_io_delay;
+ u8 dmd_report_pc_ie;
+ u8 dmd_report_sata;
+ u8 dmd_report_sas;
+};
+
+#define MPI3_IOUNIT1_PAGEVERSION (0x00)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_MASK (0x00000030)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_ENABLE (0x00000000)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_DISABLE (0x00000010)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_NO_MODIFY (0x00000020)
+#define MPI3_IOUNIT1_FLAGS_ATA_SECURITY_FREEZE_LOCK (0x00000008)
+#define MPI3_IOUNIT1_FLAGS_WRITE_SAME_BUFFER (0x00000004)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_MASK (0x00000003)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_ENABLE (0x00000000)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_DISABLE (0x00000001)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_UNCHANGED (0x00000002)
+#define MPI3_IOUNIT1_DMD_REPORT_DELAY_TIME_MASK (0x7f)
+#define MPI3_IOUNIT1_DMD_REPORT_UNIT_16_SEC (0x80)
+#ifndef MPI3_IO_UNIT2_GPIO_VAL_MAX
+#define MPI3_IO_UNIT2_GPIO_VAL_MAX (1)
+#endif
+struct mpi3_io_unit_page2 {
+ struct mpi3_config_page_header header;
+ u8 gpio_count;
+ u8 reserved09[3];
+ __le16 gpio_val[MPI3_IO_UNIT2_GPIO_VAL_MAX];
+};
+
+#define MPI3_IOUNIT2_PAGEVERSION (0x00)
+#define MPI3_IOUNIT2_GPIO_FUNCTION_MASK (0xfffc)
+#define MPI3_IOUNIT2_GPIO_FUNCTION_SHIFT (2)
+#define MPI3_IOUNIT2_GPIO_SETTING_MASK (0x0001)
+#define MPI3_IOUNIT2_GPIO_SETTING_OFF (0x0000)
+#define MPI3_IOUNIT2_GPIO_SETTING_ON (0x0001)
+struct mpi3_io_unit3_sensor {
+ __le16 flags;
+ __le16 reserved02;
+ __le16 threshold[4];
+ __le32 reserved0c;
+ __le32 reserved10;
+ __le32 reserved14;
+};
+
+#define MPI3_IOUNIT3_SENSOR_FLAGS_T3_ENABLE (0x0008)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_T2_ENABLE (0x0004)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_T1_ENABLE (0x0002)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_T0_ENABLE (0x0001)
+#ifndef MPI3_IO_UNIT3_SENSOR_MAX
+#define MPI3_IO_UNIT3_SENSOR_MAX (1)
+#endif
+struct mpi3_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_sensors;
+ u8 polling_interval;
+ __le16 reserved0e;
+ struct mpi3_io_unit3_sensor sensor[MPI3_IO_UNIT3_SENSOR_MAX];
+};
+
+#define MPI3_IOUNIT3_PAGEVERSION (0x00)
+struct mpi3_io_unit4_sensor {
+ __le16 current_temperature;
+ __le16 reserved02;
+ u8 flags;
+ u8 reserved05[3];
+ __le32 reserved08;
+ __le32 reserved0c;
+};
+
+#define MPI3_IOUNIT4_SENSOR_FLAGS_TEMP_VALID (0x01)
+#ifndef MPI3_IO_UNIT4_SENSOR_MAX
+#define MPI3_IO_UNIT4_SENSOR_MAX (1)
+#endif
+struct mpi3_io_unit_page4 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_sensors;
+ u8 reserved0d[3];
+ struct mpi3_io_unit4_sensor sensor[MPI3_IO_UNIT4_SENSOR_MAX];
+};
+
+#define MPI3_IOUNIT4_PAGEVERSION (0x00)
+struct mpi3_io_unit5_spinup_group {
+ u8 max_target_spinup;
+ u8 spinup_delay;
+ u8 spinup_flags;
+ u8 reserved03;
+};
+
+#define MPI3_IOUNIT5_SPINUP_FLAGS_DISABLE (0x01)
+#ifndef MPI3_IO_UNIT5_PHY_MAX
+#define MPI3_IO_UNIT5_PHY_MAX (4)
+#endif
+struct mpi3_io_unit_page5 {
+ struct mpi3_config_page_header header;
+ struct mpi3_io_unit5_spinup_group spinup_group_parameters[4];
+ __le32 reserved18;
+ __le32 reserved1c;
+ __le32 reserved20;
+ u8 reserved24;
+ u8 sata_device_wait_time;
+ u8 spinup_encl_drive_count;
+ u8 spinup_encl_delay;
+ u8 num_phys;
+ u8 pe_initial_spinup_delay;
+ u8 topology_stable_time;
+ u8 flags;
+ u8 phy[MPI3_IO_UNIT5_PHY_MAX];
+};
+
+#define MPI3_IOUNIT5_PAGEVERSION (0x00)
+#define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02)
+#define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01)
+#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03)
+struct mpi3_io_unit_page6 {
+ struct mpi3_config_page_header header;
+ __le32 board_power_requirement;
+ __le32 pci_slot_power_allocation;
+ u8 flags;
+ u8 reserved11[3];
+};
+
+#define MPI3_IOUNIT6_PAGEVERSION (0x00)
+#define MPI3_IOUNIT6_FLAGS_ACT_CABLE_PWR_EXC (0x01)
+struct mpi3_io_unit_page7 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+};
+
+#define MPI3_IOUNIT7_PAGEVERSION (0x00)
+#ifndef MPI3_IOUNIT8_DIGEST_MAX
+#define MPI3_IOUNIT8_DIGEST_MAX (1)
+#endif
+union mpi3_iounit8_digest {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
+struct mpi3_io_unit_page8 {
+ struct mpi3_config_page_header header;
+ u8 sb_mode;
+ u8 sb_state;
+ __le16 reserved0a;
+ u8 num_slots;
+ u8 slots_available;
+ u8 current_key_encryption_algo;
+ u8 key_digest_hash_algo;
+ __le32 reserved10[2];
+ __le32 current_key[128];
+ union mpi3_iounit8_digest digest[MPI3_IOUNIT8_DIGEST_MAX];
+};
+
+#define MPI3_IOUNIT8_PAGEVERSION (0x00)
+#define MPI3_IOUNIT8_SBMODE_SECURE_DEBUG (0x04)
+#define MPI3_IOUNIT8_SBMODE_HARD_SECURE (0x02)
+#define MPI3_IOUNIT8_SBMODE_CONFIG_SECURE (0x01)
+#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02)
+#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01)
+struct mpi3_io_unit_page9 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le16 first_device;
+ __le16 reserved0e;
+};
+
+#define MPI3_IOUNIT9_PAGEVERSION (0x00)
+#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x01)
+#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
+struct mpi3_ioc_page0 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le16 vendor_id;
+ __le16 device_id;
+ u8 revision_id;
+ u8 reserved11[3];
+ __le32 class_code;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+};
+
+#define MPI3_IOC0_PAGEVERSION (0x00)
+struct mpi3_ioc_page1 {
+ struct mpi3_config_page_header header;
+ __le32 coalescing_timeout;
+ u8 coalescing_depth;
+ u8 pci_slot_num;
+ __le16 reserved0e;
+};
+
+#define MPI3_IOC1_PAGEVERSION (0x00)
+#define MPI3_IOC1_PCISLOTNUM_UNKNOWN (0xff)
+#ifndef MPI3_IOC2_EVENTMASK_WORDS
+#define MPI3_IOC2_EVENTMASK_WORDS (4)
+#endif
+struct mpi3_ioc_page2 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le16 sas_broadcast_primitive_masks;
+ __le16 sas_notify_primitive_masks;
+ __le32 event_masks[MPI3_IOC2_EVENTMASK_WORDS];
+};
+
+#define MPI3_IOC2_PAGEVERSION (0x00)
+struct mpi3_uefibsd_page0 {
+ struct mpi3_config_page_header header;
+ __le32 bsd_options;
+ u8 ssu_timeout;
+ u8 io_timeout;
+ u8 tur_retries;
+ u8 tur_interval;
+ u8 reserved10;
+ u8 security_key_timeout;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le32 reserved18;
+};
+
+#define MPI3_UEFIBSD_PAGEVERSION (0x00)
+#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_MASK (0x00000003)
+#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
+#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
+#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_NONE (0x00000002)
+#define MPI3_UEFIBSD_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
+#define MPI3_UEFIBSD_BSDOPTS_EN_ADV_ADAPTER_CONFIG (0x00000008)
+union mpi3_security_mac {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
+union mpi3_security_nonce {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
+union mpi3_security0_cert_chain {
+ __le32 dword[1024];
+ __le16 word[2048];
+ u8 byte[4096];
+};
+
+struct mpi3_security_page0 {
+ struct mpi3_config_page_header header;
+ u8 slot_num_group;
+ u8 slot_num;
+ __le16 cert_chain_length;
+ u8 cert_chain_flags;
+ u8 reserved0d[3];
+ __le32 base_asym_algo;
+ __le32 base_hash_algo;
+ __le32 reserved18[4];
+ union mpi3_security_mac mac;
+ union mpi3_security_nonce nonce;
+ union mpi3_security0_cert_chain certificate_chain;
+};
+
+#define MPI3_SECURITY0_PAGEVERSION (0x00)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_MASK (0x0e)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_UNUSED (0x00)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_CERBERUS (0x02)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_SPDM (0x04)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_SEALED (0x01)
+#ifndef MPI3_SECURITY1_KEY_RECORD_MAX
+#define MPI3_SECURITY1_KEY_RECORD_MAX 1
+#endif
+#ifndef MPI3_SECURITY1_PAD_MAX
+#define MPI3_SECURITY1_PAD_MAX 1
+#endif
+union mpi3_security1_key_data {
+ __le32 dword[128];
+ __le16 word[256];
+ u8 byte[512];
+};
+
+struct mpi3_security1_key_record {
+ u8 flags;
+ u8 consumer;
+ __le16 key_data_size;
+ __le32 additional_key_data;
+ __le32 reserved08[2];
+ union mpi3_security1_key_data key_data;
+};
+
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_MASK (0x1f)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_NOT_VALID (0x00)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_HMAC (0x01)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_AES (0x02)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_ECDSA_PRIVATE (0x03)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_ECDSA_PUBLIC (0x04)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_NOT_VALID (0x00)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_SAFESTORE (0x01)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CERT_CHAIN (0x02)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_AUTH_DEV_KEY (0x03)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CACHE_OFFLOAD (0x04)
+struct mpi3_security_page1 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08[2];
+ union mpi3_security_mac mac;
+ union mpi3_security_nonce nonce;
+ u8 num_keys;
+ u8 reserved91[3];
+ __le32 reserved94[3];
+ struct mpi3_security1_key_record key_record[MPI3_SECURITY1_KEY_RECORD_MAX];
+ u8 pad[MPI3_SECURITY1_PAD_MAX];
+};
+
+#define MPI3_SECURITY1_PAGEVERSION (0x00)
+struct mpi3_sas_io_unit0_phy_data {
+ u8 io_unit_port;
+ u8 port_flags;
+ u8 phy_flags;
+ u8 negotiated_link_rate;
+ __le16 controller_phy_device_info;
+ __le16 reserved06;
+ __le16 attached_dev_handle;
+ __le16 controller_dev_handle;
+ __le32 discovery_status;
+ __le32 reserved10;
+};
+
+#ifndef MPI3_SAS_IO_UNIT0_PHY_MAX
+#define MPI3_SAS_IO_UNIT0_PHY_MAX (1)
+#endif
+struct mpi3_sas_io_unit_page0 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phys;
+ u8 reserved0d[3];
+ struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX];
+};
+
+#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
+#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
+#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+struct mpi3_sas_io_unit1_phy_data {
+ u8 io_unit_port;
+ u8 port_flags;
+ u8 phy_flags;
+ u8 max_min_link_rate;
+ __le16 controller_phy_device_info;
+ __le16 max_target_port_connect_time;
+ __le32 reserved08;
+};
+
+#ifndef MPI3_SAS_IO_UNIT1_PHY_MAX
+#define MPI3_SAS_IO_UNIT1_PHY_MAX (1)
+#endif
+struct mpi3_sas_io_unit_page1 {
+ struct mpi3_config_page_header header;
+ __le16 control_flags;
+ __le16 sas_narrow_max_queue_depth;
+ __le16 additional_control_flags;
+ __le16 sas_wide_max_queue_depth;
+ u8 num_phys;
+ u8 sata_max_q_depth;
+ __le16 reserved12;
+ struct mpi3_sas_io_unit1_phy_data phy_data[MPI3_SAS_IO_UNIT1_PHY_MAX];
+};
+
+#define MPI3_SASIOUNIT1_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT1_CONTROL_CONTROLLER_DEVICE_SELF_TEST (0x8000)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010)
+#define MPI3_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008)
+#define MPI3_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
+#define MPI3_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_MASK (0x0001)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_DEVICE_NAME (0x0000)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_SAS_ADDRESS (0x0001)
+#define MPI3_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100)
+#define MPI3_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
+#define MPI3_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
+#define MPI3_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
+#define MPI3_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
+#define MPI3_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
+#define MPI3_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
+#define MPI3_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
+#define MPI3_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
+#define MPI3_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
+#define MPI3_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI3_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
+#define MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_6_0 (0xa0)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_12_0 (0xb0)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_22_5 (0xc0)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_MASK (0x0f)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_6_0 (0x0a)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_12_0 (0x0b)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_22_5 (0x0c)
+struct mpi3_sas_io_unit2_phy_pm_settings {
+ u8 control_flags;
+ u8 reserved01;
+ __le16 inactivity_timer_exponent;
+ u8 sata_partial_timeout;
+ u8 reserved05;
+ u8 sata_slumber_timeout;
+ u8 reserved07;
+ u8 sas_partial_timeout;
+ u8 reserved09;
+ u8 sas_slumber_timeout;
+ u8 reserved0b;
+};
+
+#ifndef MPI3_SAS_IO_UNIT2_PHY_MAX
+#define MPI3_SAS_IO_UNIT2_PHY_MAX (1)
+#endif
+struct mpi3_sas_io_unit_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_sas_io_unit2_phy_pm_settings sas_phy_power_management_settings[MPI3_SAS_IO_UNIT2_PHY_MAX];
+};
+
+#define MPI3_SASIOUNIT2_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT2_CONTROL_SAS_SLUMBER_ENABLE (0x08)
+#define MPI3_SASIOUNIT2_CONTROL_SAS_PARTIAL_ENABLE (0x04)
+#define MPI3_SASIOUNIT2_CONTROL_SATA_SLUMBER_ENABLE (0x02)
+#define MPI3_SASIOUNIT2_CONTROL_SATA_PARTIAL_ENABLE (0x01)
+#define MPI3_SASIOUNIT2_ITE_SAS_SLUMBER_MASK (0x7000)
+#define MPI3_SASIOUNIT2_ITE_SAS_SLUMBER_SHIFT (12)
+#define MPI3_SASIOUNIT2_ITE_SAS_PARTIAL_MASK (0x0700)
+#define MPI3_SASIOUNIT2_ITE_SAS_PARTIAL_SHIFT (8)
+#define MPI3_SASIOUNIT2_ITE_SATA_SLUMBER_MASK (0x0070)
+#define MPI3_SASIOUNIT2_ITE_SATA_SLUMBER_SHIFT (4)
+#define MPI3_SASIOUNIT2_ITE_SATA_PARTIAL_MASK (0x0007)
+#define MPI3_SASIOUNIT2_ITE_SATA_PARTIAL_SHIFT (0)
+#define MPI3_SASIOUNIT2_ITE_EXP_TEN_SECONDS (7)
+#define MPI3_SASIOUNIT2_ITE_EXP_ONE_SECOND (6)
+#define MPI3_SASIOUNIT2_ITE_EXP_HUNDRED_MILLISECONDS (5)
+#define MPI3_SASIOUNIT2_ITE_EXP_TEN_MILLISECONDS (4)
+#define MPI3_SASIOUNIT2_ITE_EXP_ONE_MILLISECOND (3)
+#define MPI3_SASIOUNIT2_ITE_EXP_HUNDRED_MICROSECONDS (2)
+#define MPI3_SASIOUNIT2_ITE_EXP_TEN_MICROSECONDS (1)
+#define MPI3_SASIOUNIT2_ITE_EXP_ONE_MICROSECOND (0)
+struct mpi3_sas_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 power_management_capabilities;
+};
+
+#define MPI3_SASIOUNIT3_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT3_PM_HOST_SAS_SLUMBER_MODE (0x00000800)
+#define MPI3_SASIOUNIT3_PM_HOST_SAS_PARTIAL_MODE (0x00000400)
+#define MPI3_SASIOUNIT3_PM_HOST_SATA_SLUMBER_MODE (0x00000200)
+#define MPI3_SASIOUNIT3_PM_HOST_SATA_PARTIAL_MODE (0x00000100)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001)
+struct mpi3_sas_expander_page0 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 report_gen_length;
+ __le16 enclosure_handle;
+ __le32 reserved0c;
+ __le64 sas_address;
+ __le32 discovery_status;
+ __le16 dev_handle;
+ __le16 parent_dev_handle;
+ __le16 expander_change_count;
+ __le16 expander_route_indexes;
+ u8 num_phys;
+ u8 sas_level;
+ __le16 flags;
+ __le16 stp_bus_inactivity_time_limit;
+ __le16 stp_max_connect_time_limit;
+ __le16 stp_smp_nexus_loss_time;
+ __le16 max_num_routed_sas_addresses;
+ __le64 active_zone_manager_sas_address;
+ __le16 zone_lock_inactivity_limit;
+ __le16 reserved3a;
+ u8 time_to_reduced_func;
+ u8 initial_time_to_reduced_func;
+ u8 max_reduced_func_time;
+ u8 exp_status;
+};
+
+#define MPI3_SASEXPANDER0_PAGEVERSION (0x00)
+#define MPI3_SASEXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000)
+#define MPI3_SASEXPANDER0_FLAGS_ZONE_LOCKED (0x1000)
+#define MPI3_SASEXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800)
+#define MPI3_SASEXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400)
+#define MPI3_SASEXPANDER0_FLAGS_ZONING_SUPPORT (0x0200)
+#define MPI3_SASEXPANDER0_FLAGS_ENABLED_ZONING (0x0100)
+#define MPI3_SASEXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080)
+#define MPI3_SASEXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010)
+#define MPI3_SASEXPANDER0_FLAGS_OTHERS_CONFIG (0x0004)
+#define MPI3_SASEXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002)
+#define MPI3_SASEXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001)
+#define MPI3_SASEXPANDER0_ES_NOT_RESPONDING (0x02)
+#define MPI3_SASEXPANDER0_ES_RESPONDING (0x03)
+#define MPI3_SASEXPANDER0_ES_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_sas_expander_page1 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 reserved09[3];
+ u8 num_phys;
+ u8 phy;
+ __le16 num_table_entries_programmed;
+ u8 programmed_link_rate;
+ u8 hw_link_rate;
+ __le16 attached_dev_handle;
+ __le32 phy_info;
+ __le16 attached_device_info;
+ __le16 reserved1a;
+ __le16 expander_dev_handle;
+ u8 change_count;
+ u8 negotiated_link_rate;
+ u8 phy_identifier;
+ u8 attached_phy_identifier;
+ u8 reserved22;
+ u8 discovery_info;
+ __le32 attached_phy_info;
+ u8 zone_group;
+ u8 self_config_status;
+ __le16 reserved2a;
+ __le16 slot;
+ __le16 slot_index;
+};
+
+#define MPI3_SASEXPANDER1_PAGEVERSION (0x00)
+#define MPI3_SASEXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
+#define MPI3_SASEXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
+#define MPI3_SASEXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+struct mpi3_sas_port_page0 {
+ struct mpi3_config_page_header header;
+ u8 port_number;
+ u8 reserved09;
+ u8 port_width;
+ u8 reserved0b;
+ u8 zone_group;
+ u8 reserved0d[3];
+ __le64 sas_address;
+ __le16 device_info;
+ __le16 reserved1a;
+ __le32 reserved1c;
+};
+
+#define MPI3_SASPORT0_PAGEVERSION (0x00)
+struct mpi3_sas_phy_page0 {
+ struct mpi3_config_page_header header;
+ __le16 owner_dev_handle;
+ __le16 reserved0a;
+ __le16 attached_dev_handle;
+ u8 attached_phy_identifier;
+ u8 reserved0f;
+ __le32 attached_phy_info;
+ u8 programmed_link_rate;
+ u8 hw_link_rate;
+ u8 change_count;
+ u8 flags;
+ __le32 phy_info;
+ u8 negotiated_link_rate;
+ u8 reserved1d[3];
+ __le16 slot;
+ __le16 slot_index;
+};
+
+#define MPI3_SASPHY0_PAGEVERSION (0x00)
+#define MPI3_SASPHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
+struct mpi3_sas_phy_page1 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 invalid_dword_count;
+ __le32 running_disparity_error_count;
+ __le32 loss_dword_synch_count;
+ __le32 phy_reset_problem_count;
+};
+
+#define MPI3_SASPHY1_PAGEVERSION (0x00)
+struct mpi3_sas_phy2_phy_event {
+ u8 phy_event_code;
+ u8 reserved01[3];
+ __le32 phy_event_info;
+};
+
+#ifndef MPI3_SAS_PHY2_PHY_EVENT_MAX
+#define MPI3_SAS_PHY2_PHY_EVENT_MAX (1)
+#endif
+struct mpi3_sas_phy_page2 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phy_events;
+ u8 reserved0d[3];
+ struct mpi3_sas_phy2_phy_event phy_event[MPI3_SAS_PHY2_PHY_EVENT_MAX];
+};
+
+#define MPI3_SASPHY2_PAGEVERSION (0x00)
+struct mpi3_sas_phy3_phy_event_config {
+ u8 phy_event_code;
+ u8 reserved01[3];
+ u8 counter_type;
+ u8 threshold_window;
+ u8 time_units;
+ u8 reserved07;
+ __le32 event_threshold;
+ __le16 threshold_flags;
+ __le16 reserved0e;
+};
+
+#define MPI3_SASPHY3_EVENT_CODE_NO_EVENT (0x00)
+#define MPI3_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01)
+#define MPI3_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02)
+#define MPI3_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03)
+#define MPI3_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04)
+#define MPI3_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05)
+#define MPI3_SASPHY3_EVENT_CODE_RX_ERROR (0x06)
+#define MPI3_SASPHY3_EVENT_CODE_INV_SPL_PACKETS (0x07)
+#define MPI3_SASPHY3_EVENT_CODE_LOSS_SPL_PACKET_SYNC (0x08)
+#define MPI3_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20)
+#define MPI3_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22)
+#define MPI3_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23)
+#define MPI3_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26)
+#define MPI3_SASPHY3_EVENT_CODE_TX_BREAK (0x27)
+#define MPI3_SASPHY3_EVENT_CODE_RX_BREAK (0x28)
+#define MPI3_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29)
+#define MPI3_SASPHY3_EVENT_CODE_CONNECTION (0x2a)
+#define MPI3_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2b)
+#define MPI3_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2c)
+#define MPI3_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2d)
+#define MPI3_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2e)
+#define MPI3_SASPHY3_EVENT_CODE_PERSIST_CONN (0x2f)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43)
+#define MPI3_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44)
+#define MPI3_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51)
+#define MPI3_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63)
+#define MPI3_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xd0)
+#define MPI3_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xd1)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AIP (0xd2)
+#define MPI3_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xd3)
+#define MPI3_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xd4)
+#define MPI3_SASPHY3_EVENT_CODE_LCCONN_TIME (0xd5)
+#define MPI3_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xd6)
+#define MPI3_SASPHY3_EVENT_CODE_SATA_TX_START (0xd7)
+#define MPI3_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xd8)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xd9)
+#define MPI3_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xda)
+#define MPI3_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xdb)
+#define MPI3_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xdc)
+#define MPI3_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
+#define MPI3_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
+#define MPI3_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02)
+#define MPI3_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00)
+#define MPI3_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01)
+#define MPI3_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02)
+#define MPI3_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03)
+#define MPI3_SASPHY3_TFLAGS_PHY_RESET (0x0002)
+#define MPI3_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001)
+#ifndef MPI3_SAS_PHY3_PHY_EVENT_MAX
+#define MPI3_SAS_PHY3_PHY_EVENT_MAX (1)
+#endif
+struct mpi3_sas_phy_page3 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phy_events;
+ u8 reserved0d[3];
+ struct mpi3_sas_phy3_phy_event_config phy_event_config[MPI3_SAS_PHY3_PHY_EVENT_MAX];
+};
+
+#define MPI3_SASPHY3_PAGEVERSION (0x00)
+struct mpi3_sas_phy_page4 {
+ struct mpi3_config_page_header header;
+ u8 reserved08[3];
+ u8 flags;
+ u8 initial_frame[28];
+};
+
+#define MPI3_SASPHY4_PAGEVERSION (0x00)
+#define MPI3_SASPHY4_FLAGS_FRAME_VALID (0x02)
+#define MPI3_SASPHY4_FLAGS_SATA_FRAME (0x01)
+#define MPI3_PCIE_LINK_RETIMERS_MASK (0x30)
+#define MPI3_PCIE_LINK_RETIMERS_SHIFT (4)
+#define MPI3_PCIE_NEG_LINK_RATE_MASK (0x0f)
+#define MPI3_PCIE_NEG_LINK_RATE_UNKNOWN (0x00)
+#define MPI3_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI3_PCIE_NEG_LINK_RATE_2_5 (0x02)
+#define MPI3_PCIE_NEG_LINK_RATE_5_0 (0x03)
+#define MPI3_PCIE_NEG_LINK_RATE_8_0 (0x04)
+#define MPI3_PCIE_NEG_LINK_RATE_16_0 (0x05)
+#define MPI3_PCIE_NEG_LINK_RATE_32_0 (0x06)
+struct mpi3_pcie_io_unit0_phy_data {
+ u8 link;
+ u8 link_flags;
+ u8 phy_flags;
+ u8 negotiated_link_rate;
+ __le16 attached_dev_handle;
+ __le16 controller_dev_handle;
+ __le32 enumeration_status;
+ u8 io_unit_port;
+ u8 reserved0d[3];
+};
+
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_MASK (0x10)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_IOUNIT1 (0x00)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_BKPLANE (0x10)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_ENUM_IN_PROGRESS (0x08)
+#define MPI3_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+#define MPI3_PCIEIOUNIT0_PHYFLAGS_HOST_PHY (0x01)
+#define MPI3_PCIEIOUNIT0_ES_MAX_SWITCH_DEPTH_EXCEEDED (0x80000000)
+#define MPI3_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000)
+#define MPI3_PCIEIOUNIT0_ES_MAX_ENDPOINTS_EXCEEDED (0x20000000)
+#define MPI3_PCIEIOUNIT0_ES_INSUFFICIENT_RESOURCES (0x10000000)
+#ifndef MPI3_PCIE_IO_UNIT0_PHY_MAX
+#define MPI3_PCIE_IO_UNIT0_PHY_MAX (1)
+#endif
+struct mpi3_pcie_io_unit_page0 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phys;
+ u8 init_status;
+ __le16 reserved0e;
+ struct mpi3_pcie_io_unit0_phy_data phy_data[MPI3_PCIE_IO_UNIT0_PHY_MAX];
+};
+
+#define MPI3_PCIEIOUNIT0_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_NO_ERRORS (0x00)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_RESOURCE_ALLOC_FAILED (0x03)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_HOST_PORT_MISMATCH (0x06)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_PHYS_NOT_CONSECUTIVE (0x07)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_CLOCKING_MODE (0x08)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_START (0xf0)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_END (0xff)
+struct mpi3_pcie_io_unit1_phy_data {
+ u8 link;
+ u8 link_flags;
+ u8 phy_flags;
+ u8 max_min_link_rate;
+ __le32 reserved04;
+ __le32 reserved08;
+};
+
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_MASK (0x03)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_DIS_SEPARATE_REFCLK (0x00)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRIS (0x01)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRNS (0x02)
+#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60)
+#ifndef MPI3_PCIE_IO_UNIT1_PHY_MAX
+#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1)
+#endif
+struct mpi3_pcie_io_unit_page1 {
+ struct mpi3_config_page_header header;
+ __le32 control_flags;
+ __le32 reserved0c;
+ u8 num_phys;
+ u8 reserved11;
+ __le16 reserved12;
+ struct mpi3_pcie_io_unit1_phy_data phy_data[MPI3_PCIE_IO_UNIT1_PHY_MAX];
+};
+
+#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
+struct mpi3_pcie_io_unit_page2 {
+ struct mpi3_config_page_header header;
+ __le16 nv_me_max_queue_depth;
+ __le16 reserved0a;
+ u8 nv_me_abort_to;
+ u8 reserved0d;
+ __le16 reserved0e;
+};
+
+#define MPI3_PCIEIOUNIT2_PAGEVERSION (0x00)
+struct mpi3_pcie_switch_page0 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 switch_status;
+ u8 reserved0a[2];
+ __le16 dev_handle;
+ __le16 parent_dev_handle;
+ u8 num_ports;
+ u8 pc_ie_level;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le32 reserved18;
+ __le32 reserved1c;
+};
+
+#define MPI3_PCIESWITCH0_PAGEVERSION (0x00)
+#define MPI3_PCIESWITCH0_SS_NOT_RESPONDING (0x02)
+#define MPI3_PCIESWITCH0_SS_RESPONDING (0x03)
+#define MPI3_PCIESWITCH0_SS_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_pcie_switch_page1 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 reserved09[3];
+ u8 num_ports;
+ u8 port_num;
+ __le16 attached_dev_handle;
+ __le16 switch_dev_handle;
+ u8 negotiated_port_width;
+ u8 negotiated_link_rate;
+ __le16 slot;
+ __le16 slot_index;
+ __le32 reserved18;
+};
+
+#define MPI3_PCIESWITCH1_PAGEVERSION (0x00)
+struct mpi3_pcie_link_page0 {
+ struct mpi3_config_page_header header;
+ u8 link;
+ u8 reserved09[3];
+ __le32 correctable_error_count;
+ __le16 n_fatal_error_count;
+ __le16 reserved12;
+ __le16 fatal_error_count;
+ __le16 reserved16;
+};
+
+#define MPI3_PCIELINK0_PAGEVERSION (0x00)
+struct mpi3_enclosure_page0 {
+ struct mpi3_config_page_header header;
+ __le64 enclosure_logical_id;
+ __le16 flags;
+ __le16 enclosure_handle;
+ __le16 num_slots;
+ __le16 start_slot;
+ u8 io_unit_port;
+ u8 enclosure_level;
+ __le16 sep_dev_handle;
+ __le32 reserved1c;
+};
+
+#define MPI3_ENCLOSURE0_PAGEVERSION (0x00)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_MASK (0xc000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_VIRTUAL (0x0000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_SAS (0x4000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_PCIE (0x8000)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK (0x0010)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_NOT_FOUND (0x0000)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT (0x0010)
+#define MPI3_ENCLS0_FLAGS_MNG_MASK (0x000f)
+#define MPI3_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI3_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI3_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0002)
+#define MPI3_DEVICE_DEVFORM_SAS_SATA (0x00)
+#define MPI3_DEVICE_DEVFORM_PCIE (0x01)
+#define MPI3_DEVICE_DEVFORM_VD (0x02)
+struct mpi3_device0_sas_sata_format {
+ __le64 sas_address;
+ __le16 flags;
+ __le16 device_info;
+ u8 phy_num;
+ u8 attached_phy_identifier;
+ u8 max_port_connections;
+ u8 zone_group;
+};
+
+#define MPI3_DEVICE0_SASSATA_FLAGS_SLUMBER_CAP (0x0200)
+#define MPI3_DEVICE0_SASSATA_FLAGS_PARTIAL_CAP (0x0100)
+#define MPI3_DEVICE0_SASSATA_FLAGS_ASYNC_NOTIFY (0x0080)
+#define MPI3_DEVICE0_SASSATA_FLAGS_SW_PRESERVE (0x0040)
+#define MPI3_DEVICE0_SASSATA_FLAGS_UNSUPP_DEV (0x0020)
+#define MPI3_DEVICE0_SASSATA_FLAGS_48BIT_LBA (0x0010)
+#define MPI3_DEVICE0_SASSATA_FLAGS_SMART_SUPP (0x0008)
+#define MPI3_DEVICE0_SASSATA_FLAGS_NCQ_SUPP (0x0004)
+#define MPI3_DEVICE0_SASSATA_FLAGS_FUA_SUPP (0x0002)
+#define MPI3_DEVICE0_SASSATA_FLAGS_PERSIST_CAP (0x0001)
+struct mpi3_device0_pcie_format {
+ u8 supported_link_rates;
+ u8 max_port_width;
+ u8 negotiated_port_width;
+ u8 negotiated_link_rate;
+ u8 port_num;
+ u8 controller_reset_to;
+ __le16 device_info;
+ __le32 maximum_data_transfer_size;
+ __le32 capabilities;
+ __le16 noiob;
+ u8 nv_me_abort_to;
+ u8 page_size;
+ __le16 shutdown_latency;
+ __le16 reserved16;
+};
+
+#define MPI3_DEVICE0_PCIE_LINK_RATE_32_0_SUPP (0x10)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_16_0_SUPP (0x08)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_8_0_SUPP (0x04)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_5_0_SUPP (0x02)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_2_5_SUPP (0x01)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0003)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NO_DEVICE (0x0000)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE (0x0001)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SWITCH_DEVICE (0x0002)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE (0x0003)
+#define MPI3_DEVICE0_PCIE_CAP_METADATA_SEPARATED (0x00000010)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_DWORD_ALIGN_REQUIRED (0x00000008)
+#define MPI3_DEVICE0_PCIE_CAP_NVME_SGL_ENABLED (0x00000004)
+#define MPI3_DEVICE0_PCIE_CAP_BIT_BUCKET_SGL_SUPP (0x00000002)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_SUPP (0x00000001)
+struct mpi3_device0_vd_format {
+ u8 vd_state;
+ u8 raid_level;
+ __le16 device_info;
+ __le16 flags;
+ __le16 reserved06;
+ __le32 reserved08[2];
+};
+
+#define MPI3_DEVICE0_VD_STATE_OFFLINE (0x00)
+#define MPI3_DEVICE0_VD_STATE_PARTIALLY_DEGRADED (0x01)
+#define MPI3_DEVICE0_VD_STATE_DEGRADED (0x02)
+#define MPI3_DEVICE0_VD_STATE_OPTIMAL (0x03)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_0 (0)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_1 (1)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_5 (5)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_6 (6)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_10 (10)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_50 (50)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_60 (60)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_HDD (0x0010)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_SSD (0x0008)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_NVME (0x0004)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_SATA (0x0002)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001)
+#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_MASK (0x0003)
+#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_NONE (0x0000)
+#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_HOST (0x0001)
+#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_IOC (0x0002)
+union mpi3_device0_dev_spec_format {
+ struct mpi3_device0_sas_sata_format sas_sata_format;
+ struct mpi3_device0_pcie_format pcie_format;
+ struct mpi3_device0_vd_format vd_format;
+};
+
+struct mpi3_device_page0 {
+ struct mpi3_config_page_header header;
+ __le16 dev_handle;
+ __le16 parent_dev_handle;
+ __le16 slot;
+ __le16 enclosure_handle;
+ __le64 wwid;
+ __le16 persistent_id;
+ u8 io_unit_port;
+ u8 access_status;
+ __le16 flags;
+ __le16 reserved1e;
+ __le16 slot_index;
+ __le16 queue_depth;
+ u8 reserved24[3];
+ u8 device_form;
+ union mpi3_device0_dev_spec_format device_specific;
+};
+
+#define MPI3_DEVICE0_PAGEVERSION (0x00)
+#define MPI3_DEVICE0_WWID_INVALID (0xffffffffffffffff)
+#define MPI3_DEVICE0_PERSISTENTID_INVALID (0xffff)
+#define MPI3_DEVICE0_IOUNITPORT_INVALID (0xff)
+#define MPI3_DEVICE0_ASTATUS_NO_ERRORS (0x00)
+#define MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_DEVICE0_ASTATUS_CAP_UNSUPPORTED (0x02)
+#define MPI3_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x03)
+#define MPI3_DEVICE0_ASTATUS_UNAUTHORIZED (0x04)
+#define MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY (0x05)
+#define MPI3_DEVICE0_ASTATUS_SAS_UNKNOWN (0x10)
+#define MPI3_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x11)
+#define MPI3_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x12)
+#define MPI3_DEVICE0_ASTATUS_SIF_UNKNOWN (0x20)
+#define MPI3_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x21)
+#define MPI3_DEVICE0_ASTATUS_SIF_DIAG (0x22)
+#define MPI3_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x23)
+#define MPI3_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x24)
+#define MPI3_DEVICE0_ASTATUS_SIF_PIO_SN (0x25)
+#define MPI3_DEVICE0_ASTATUS_SIF_MDMA_SN (0x26)
+#define MPI3_DEVICE0_ASTATUS_SIF_UDMA_SN (0x27)
+#define MPI3_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x28)
+#define MPI3_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x29)
+#define MPI3_DEVICE0_ASTATUS_SIF_MAX (0x2f)
+#define MPI3_DEVICE0_ASTATUS_PCIE_UNKNOWN (0x30)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MEM_SPACE_ACCESS (0x31)
+#define MPI3_DEVICE0_ASTATUS_PCIE_UNSUPPORTED (0x32)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MSIX_REQUIRED (0x33)
+#define MPI3_DEVICE0_ASTATUS_NVME_UNKNOWN (0x40)
+#define MPI3_DEVICE0_ASTATUS_NVME_READY_TIMEOUT (0x41)
+#define MPI3_DEVICE0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x42)
+#define MPI3_DEVICE0_ASTATUS_NVME_IDENTIFY_FAILED (0x43)
+#define MPI3_DEVICE0_ASTATUS_NVME_QCONFIG_FAILED (0x44)
+#define MPI3_DEVICE0_ASTATUS_NVME_QCREATION_FAILED (0x45)
+#define MPI3_DEVICE0_ASTATUS_NVME_EVENTCFG_FAILED (0x46)
+#define MPI3_DEVICE0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x47)
+#define MPI3_DEVICE0_ASTATUS_NVME_IDLE_TIMEOUT (0x48)
+#define MPI3_DEVICE0_ASTATUS_NVME_CTRL_FAILURE_STATUS (0x49)
+#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x50)
+#define MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE (0x0080)
+#define MPI3_DEVICE0_FLAGS_HIDDEN (0x0008)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_MASK (0x0006)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_NOT_DIR_ATTACHED (0x0000)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED (0x0002)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL (0x0004)
+#define MPI3_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
+#define MPI3_DEVICE0_QUEUE_DEPTH_NOT_APPLICABLE (0x0000)
+struct mpi3_device1_sas_sata_format {
+ __le32 reserved00;
+};
+
+struct mpi3_device1_pcie_format {
+ __le16 vendor_id;
+ __le16 device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+ __le32 reserved08;
+ u8 revision_id;
+ u8 reserved0d;
+ __le16 pci_parameters;
+};
+
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_128B (0x0)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_256B (0x1)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_512B (0x2)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_1024B (0x3)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_2048B (0x4)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_4096B (0x5)
+#define MPI3_DEVICE1_PCIE_PARAMS_MAX_READ_REQ_MASK (0x01c0)
+#define MPI3_DEVICE1_PCIE_PARAMS_MAX_READ_REQ_SHIFT (6)
+#define MPI3_DEVICE1_PCIE_PARAMS_CURR_MAX_PAYLOAD_MASK (0x0038)
+#define MPI3_DEVICE1_PCIE_PARAMS_CURR_MAX_PAYLOAD_SHIFT (3)
+#define MPI3_DEVICE1_PCIE_PARAMS_SUPP_MAX_PAYLOAD_MASK (0x0007)
+#define MPI3_DEVICE1_PCIE_PARAMS_SUPP_MAX_PAYLOAD_SHIFT (0)
+struct mpi3_device1_vd_format {
+ __le32 reserved00;
+};
+
+union mpi3_device1_dev_spec_format {
+ struct mpi3_device1_sas_sata_format sas_sata_format;
+ struct mpi3_device1_pcie_format pcie_format;
+ struct mpi3_device1_vd_format vd_format;
+};
+
+struct mpi3_device_page1 {
+ struct mpi3_config_page_header header;
+ __le16 dev_handle;
+ __le16 reserved0a;
+ __le32 reserved0c[12];
+ u8 reserved3c[3];
+ u8 device_form;
+ union mpi3_device1_dev_spec_format device_specific;
+};
+
+#define MPI3_DEVICE1_PAGEVERSION (0x00)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
new file mode 100644
index 000000000000..169e4f9b7b7c
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2018-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_IMAGE_H
+#define MPI30_IMAGE_H 1
+struct mpi3_comp_image_version {
+ __le16 build_num;
+ __le16 customer_id;
+ u8 phase_minor;
+ u8 phase_major;
+ u8 gen_minor;
+ u8 gen_major;
+};
+
+struct mpi3_hash_exclusion_format {
+ __le32 offset;
+ __le32 size;
+};
+
+#define MPI3_IMAGE_HASH_EXCUSION_NUM (4)
+struct mpi3_component_image_header {
+ __le32 signature0;
+ __le32 load_address;
+ __le32 data_size;
+ __le32 start_offset;
+ __le32 signature1;
+ __le32 flash_offset;
+ __le32 image_size;
+ __le32 version_string_offset;
+ __le32 build_date_string_offset;
+ __le32 build_time_string_offset;
+ __le32 environment_variable_offset;
+ __le32 application_specific;
+ __le32 signature2;
+ __le32 header_size;
+ __le32 crc;
+ __le32 flags;
+ __le32 secondary_flash_offset;
+ __le32 etp_offset;
+ __le32 etp_size;
+ union mpi3_version_union rmc_interface_version;
+ union mpi3_version_union etp_interface_version;
+ struct mpi3_comp_image_version component_image_version;
+ struct mpi3_hash_exclusion_format hash_exclusion[MPI3_IMAGE_HASH_EXCUSION_NUM];
+ __le32 next_image_header_offset;
+ union mpi3_version_union security_version;
+ __le32 reserved84[31];
+};
+
+#define MPI3_IMAGE_HEADER_SIGNATURE0_MPI3 (0xeb00003e)
+#define MPI3_IMAGE_HEADER_LOAD_ADDRESS_INVALID (0x00000000)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_APPLICATION (0x20505041)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_FIRST_MUTABLE (0x20434d46)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_BSP (0x20505342)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_ROM_BIOS (0x534f4942)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_HII_X64 (0x4d494948)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_HII_ARM (0x41494948)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_CPLD (0x444c5043)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_PBLP (0x504c4250)
+#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_DI (0x00000010)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_NVDATA (0x00000008)
+#define MPI3_IMAGE_HEADER_FLAGS_REQUIRES_ACTIVATION (0x00000004)
+#define MPI3_IMAGE_HEADER_FLAGS_COMPRESSED (0x00000002)
+#define MPI3_IMAGE_HEADER_FLAGS_FLASH (0x00000001)
+#define MPI3_IMAGE_HEADER_SIGNATURE0_OFFSET (0x00)
+#define MPI3_IMAGE_HEADER_LOAD_ADDRESS_OFFSET (0x04)
+#define MPI3_IMAGE_HEADER_DATA_SIZE_OFFSET (0x08)
+#define MPI3_IMAGE_HEADER_START_OFFSET_OFFSET (0x0c)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_OFFSET (0x10)
+#define MPI3_IMAGE_HEADER_FLASH_OFFSET_OFFSET (0x14)
+#define MPI3_IMAGE_HEADER_FLASH_SIZE_OFFSET (0x18)
+#define MPI3_IMAGE_HEADER_VERSION_STRING_OFFSET_OFFSET (0x1c)
+#define MPI3_IMAGE_HEADER_BUILD_DATE_STRING_OFFSET_OFFSET (0x20)
+#define MPI3_IMAGE_HEADER_BUILD_TIME_OFFSET_OFFSET (0x24)
+#define MPI3_IMAGE_HEADER_ENVIROMENT_VAR_OFFSET_OFFSET (0x28)
+#define MPI3_IMAGE_HEADER_APPLICATION_SPECIFIC_OFFSET (0x2c)
+#define MPI3_IMAGE_HEADER_SIGNATURE2_OFFSET (0x30)
+#define MPI3_IMAGE_HEADER_HEADER_SIZE_OFFSET (0x34)
+#define MPI3_IMAGE_HEADER_CRC_OFFSET (0x38)
+#define MPI3_IMAGE_HEADER_FLAGS_OFFSET (0x3c)
+#define MPI3_IMAGE_HEADER_SECONDARY_FLASH_OFFSET_OFFSET (0x40)
+#define MPI3_IMAGE_HEADER_ETP_OFFSET_OFFSET (0x44)
+#define MPI3_IMAGE_HEADER_ETP_SIZE_OFFSET (0x48)
+#define MPI3_IMAGE_HEADER_RMC_INTERFACE_VER_OFFSET (0x4c)
+#define MPI3_IMAGE_HEADER_ETP_INTERFACE_VER_OFFSET (0x50)
+#define MPI3_IMAGE_HEADER_COMPONENT_IMAGE_VER_OFFSET (0x54)
+#define MPI3_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5c)
+#define MPI3_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7c)
+#define MPI3_IMAGE_HEADER_SIZE (0x100)
+struct mpi3_extended_image_header {
+ u8 image_type;
+ u8 reserved01[3];
+ __le32 checksum;
+ __le32 image_size;
+ __le32 next_image_header_offset;
+ __le32 reserved10[4];
+ __le32 identify_string[8];
+};
+
+#define MPI3_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
+#define MPI3_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
+#define MPI3_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0c)
+#define MPI3_EXT_IMAGE_HEADER_SIZE (0x40)
+#define MPI3_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI3_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI3_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI3_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
+#define MPI3_EXT_IMAGE_TYPE_RDE (0x0a)
+#define MPI3_EXT_IMAGE_TYPE_AUXILIARY_PROCESSOR (0x0b)
+#define MPI3_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI3_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xff)
+struct mpi3_supported_device {
+ __le16 device_id;
+ __le16 vendor_id;
+ __le16 device_id_mask;
+ __le16 reserved06;
+ u8 low_pci_rev;
+ u8 high_pci_rev;
+ __le16 reserved0a;
+ __le32 reserved0c;
+};
+
+#ifndef MPI3_SUPPORTED_DEVICE_MAX
+#define MPI3_SUPPORTED_DEVICE_MAX (1)
+#endif
+struct mpi3_supported_devices_data {
+ u8 image_version;
+ u8 reserved01;
+ u8 num_devices;
+ u8 reserved03;
+ __le32 reserved04;
+ struct mpi3_supported_device supported_device[MPI3_SUPPORTED_DEVICE_MAX];
+};
+
+#ifndef MPI3_ENCRYPTED_HASH_MAX
+#define MPI3_ENCRYPTED_HASH_MAX (1)
+#endif
+struct mpi3_encrypted_hash_entry {
+ u8 hash_image_type;
+ u8 hash_algorithm;
+ u8 encryption_algorithm;
+ u8 reserved03;
+ __le32 reserved04;
+ __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX];
+};
+
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03)
+#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0)
+#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00)
+#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20)
+#define MPI3_HASH_ALGORITHM_VERSION_SHA2 (0x40)
+#define MPI3_HASH_ALGORITHM_VERSION_SHA3 (0x60)
+#define MPI3_HASH_ALGORITHM_SIZE_MASK (0x1f)
+#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
+#define MPI3_ENCRYPTION_ALGORITHM_UNUSED (0x00)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA256 (0x01)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA512 (0x02)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA1024 (0x03)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA2048 (0x04)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06)
+#ifndef MPI3_PUBLIC_KEY_MAX
+#define MPI3_PUBLIC_KEY_MAX (1)
+#endif
+struct mpi3_encrypted_key_with_hash_entry {
+ u8 hash_image_type;
+ u8 hash_algorithm;
+ u8 encryption_algorithm;
+ u8 reserved03;
+ __le32 reserved04;
+ __le32 public_key[MPI3_PUBLIC_KEY_MAX];
+ __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX];
+};
+
+#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
+#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
+#endif
+struct mpi3_encrypted_hash_data {
+ u8 image_version;
+ u8 num_hash;
+ __le16 reserved02;
+ __le32 reserved04;
+ struct mpi3_encrypted_hash_entry encrypted_hash_entry[MPI3_ENCRYPTED_HASH_ENTRY_MAX];
+};
+
+#ifndef MPI3_AUX_PROC_DATA_MAX
+#define MPI3_AUX_PROC_DATA_MAX (1)
+#endif
+struct mpi3_aux_processor_data {
+ u8 boot_method;
+ u8 num_load_addr;
+ u8 reserved02;
+ u8 type;
+ __le32 version;
+ __le32 load_address[8];
+ __le32 reserved28[22];
+ __le32 aux_processor_data[MPI3_AUX_PROC_DATA_MAX];
+};
+
+#define MPI3_AUX_PROC_DATA_OFFSET (0x80)
+#define MPI3_AUXPROCESSOR_BOOT_METHOD_MO_MSG (0x00)
+#define MPI3_AUXPROCESSOR_BOOT_METHOD_MO_DOORBELL (0x01)
+#define MPI3_AUXPROCESSOR_BOOT_METHOD_COMPONENT (0x02)
+#define MPI3_AUXPROCESSOR_TYPE_ARM_A15 (0x00)
+#define MPI3_AUXPROCESSOR_TYPE_ARM_M0 (0x01)
+#define MPI3_AUXPROCESSOR_TYPE_ARM_R4 (0x02)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
new file mode 100644
index 000000000000..e02b6d3cfba2
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_INIT_H
+#define MPI30_INIT_H 1
+struct mpi3_scsi_io_cdb_eedp32 {
+ u8 cdb[20];
+ __be32 primary_reference_tag;
+ __le16 primary_application_tag;
+ __le16 primary_application_tag_mask;
+ __le32 transfer_length;
+};
+
+union mpi3_scso_io_cdb_union {
+ u8 cdb32[32];
+ struct mpi3_scsi_io_cdb_eedp32 eedp32;
+ struct mpi3_sge_common sge;
+};
+
+struct mpi3_scsi_io_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 dev_handle;
+ __le32 flags;
+ __le32 skip_count;
+ __le32 data_length;
+ u8 lun[8];
+ union mpi3_scso_io_cdb_union cdb;
+ union mpi3_sge_union sgl[4];
+};
+
+#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
+#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
+#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
+#define MPI3_SCSIIO_FLAGS_CDB_IN_SEPARATE_BUFFER (0x40000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_MASK (0x07000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ (0x00000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ (0x01000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ (0x02000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ (0x04000000)
+#define MPI3_SCSIIO_FLAGS_CMDPRI_MASK (0x00f00000)
+#define MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT (20)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER (0x00000000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE (0x00040000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
+#define MPI3_SCSIIO_METASGL_INDEX (3)
+struct mpi3_scsi_io_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 scsi_status;
+ u8 scsi_state;
+ __le16 dev_handle;
+ __le32 transfer_count;
+ __le32 sense_count;
+ __le32 response_data;
+ __le16 task_tag;
+ __le16 scsi_status_qualifier;
+ __le32 eedp_error_offset;
+ __le16 eedp_observed_app_tag;
+ __le16 eedp_observed_guard;
+ __le32 eedp_observed_ref_tag;
+ __le64 sense_data_buffer_address;
+};
+
+#define MPI3_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01)
+#define MPI3_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x02)
+#define MPI3_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x04)
+#define MPI3_SCSI_STATUS_GOOD (0x00)
+#define MPI3_SCSI_STATUS_CHECK_CONDITION (0x02)
+#define MPI3_SCSI_STATUS_CONDITION_MET (0x04)
+#define MPI3_SCSI_STATUS_BUSY (0x08)
+#define MPI3_SCSI_STATUS_INTERMEDIATE (0x10)
+#define MPI3_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
+#define MPI3_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
+#define MPI3_SCSI_STATUS_COMMAND_TERMINATED (0x22)
+#define MPI3_SCSI_STATUS_TASK_SET_FULL (0x28)
+#define MPI3_SCSI_STATUS_ACA_ACTIVE (0x30)
+#define MPI3_SCSI_STATUS_TASK_ABORTED (0x40)
+#define MPI3_SCSI_STATE_SENSE_MASK (0x03)
+#define MPI3_SCSI_STATE_SENSE_VALID (0x00)
+#define MPI3_SCSI_STATE_SENSE_FAILED (0x01)
+#define MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY (0x02)
+#define MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE (0x03)
+#define MPI3_SCSI_STATE_NO_SCSI_STATUS (0x04)
+#define MPI3_SCSI_STATE_TERMINATED (0x08)
+#define MPI3_SCSI_STATE_RESPONSE_DATA_VALID (0x10)
+#define MPI3_SCSI_RSP_RESPONSECODE_MASK (0x000000ff)
+#define MPI3_SCSI_RSP_RESPONSECODE_SHIFT (0)
+#define MPI3_SCSI_RSP_ARI2_MASK (0x0000ff00)
+#define MPI3_SCSI_RSP_ARI2_SHIFT (8)
+#define MPI3_SCSI_RSP_ARI1_MASK (0x00ff0000)
+#define MPI3_SCSI_RSP_ARI1_SHIFT (16)
+#define MPI3_SCSI_RSP_ARI0_MASK (0xff000000)
+#define MPI3_SCSI_RSP_ARI0_SHIFT (24)
+#define MPI3_SCSI_TASKTAG_UNKNOWN (0xffff)
+struct mpi3_scsi_task_mgmt_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 dev_handle;
+ __le16 task_host_tag;
+ u8 task_type;
+ u8 reserved0f;
+ __le16 task_request_queue_id;
+ __le16 reserved12;
+ __le32 reserved14;
+ u8 lun[8];
+};
+
+#define MPI3_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x08)
+#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK_SET (0x02)
+#define MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_ACA (0x08)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK_SET (0x09)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_ASYNC_EVENT (0x0a)
+#define MPI3_SCSITASKMGMT_TASKTYPE_I_T_NEXUS_RESET (0x0b)
+struct mpi3_scsi_task_mgmt_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 termination_count;
+ __le32 response_data;
+ __le32 reserved18;
+};
+
+#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
new file mode 100644
index 000000000000..1af99a5382d5
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -0,0 +1,1004 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_IOC_H
+#define MPI30_IOC_H 1
+struct mpi3_ioc_init_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ union mpi3_version_union mpi_version;
+ __le64 time_stamp;
+ u8 reserved18;
+ u8 who_init;
+ __le16 reserved1a;
+ __le16 reply_free_queue_depth;
+ __le16 reserved1e;
+ __le64 reply_free_queue_address;
+ __le32 reserved28;
+ __le16 sense_buffer_free_queue_depth;
+ __le16 sense_buffer_length;
+ __le64 sense_buffer_free_queue_address;
+ __le64 driver_information_address;
+};
+
+#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI3_WHOINIT_ROM_BIOS (0x02)
+#define MPI3_WHOINIT_HOST_DRIVER (0x03)
+#define MPI3_WHOINIT_MANUFACTURER (0x04)
+struct mpi3_driver_info_layout {
+ __le32 information_length;
+ u8 driver_signature[12];
+ u8 os_name[16];
+ u8 os_version[12];
+ u8 driver_name[20];
+ u8 driver_version[32];
+ u8 driver_release_date[20];
+ __le32 driver_capabilities;
+};
+
+struct mpi3_ioc_facts_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le32 reserved0c;
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_ioc_facts_data {
+ __le16 ioc_facts_data_length;
+ __le16 reserved02;
+ union mpi3_version_union mpi_version;
+ struct mpi3_comp_image_version fw_version;
+ __le32 ioc_capabilities;
+ u8 ioc_number;
+ u8 who_init;
+ __le16 max_msix_vectors;
+ __le16 max_outstanding_request;
+ __le16 product_id;
+ __le16 ioc_request_frame_size;
+ __le16 reply_frame_size;
+ __le16 ioc_exceptions;
+ __le16 max_persistent_id;
+ u8 sge_modifier_mask;
+ u8 sge_modifier_value;
+ u8 sge_modifier_shift;
+ u8 protocol_flags;
+ __le16 max_sas_initiators;
+ __le16 max_sas_targets;
+ __le16 max_sas_expanders;
+ __le16 max_enclosures;
+ __le16 min_dev_handle;
+ __le16 max_dev_handle;
+ __le16 max_pc_ie_switches;
+ __le16 max_nvme;
+ __le16 max_pds;
+ __le16 max_vds;
+ __le16 max_host_pds;
+ __le16 max_advanced_host_pds;
+ __le16 max_raid_pds;
+ __le16 max_posted_cmd_buffers;
+ __le32 flags;
+ __le16 max_operational_request_queues;
+ __le16 max_operational_reply_queues;
+ __le16 shutdown_timeout;
+ __le16 reserved4e;
+ __le32 diag_trace_size;
+ __le32 diag_fw_size;
+};
+
+#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD (0x00000010)
+#define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008)
+#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_GRAN_MASK (0x00000001)
+#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_IOC_GRAN (0x00000000)
+#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_REPLY_Q_GRAN (0x00000001)
+#define MPI3_IOCFACTS_PID_TYPE_MASK (0xf000)
+#define MPI3_IOCFACTS_PID_TYPE_SHIFT (12)
+#define MPI3_IOCFACTS_PID_PRODUCT_MASK (0x0f00)
+#define MPI3_IOCFACTS_PID_PRODUCT_SHIFT (8)
+#define MPI3_IOCFACTS_PID_FAMILY_MASK (0x00ff)
+#define MPI3_IOCFACTS_PID_FAMILY_SHIFT (0)
+#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_RAID (0x0100)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0200)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_RAID (0x0300)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0400)
+#define MPI3_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0080)
+#define MPI3_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0040)
+#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020)
+#define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010)
+#define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)
+#define MPI3_IOCFACTS_PROTOCOL_SAS (0x0010)
+#define MPI3_IOCFACTS_PROTOCOL_SATA (0x0008)
+#define MPI3_IOCFACTS_PROTOCOL_NVME (0x0004)
+#define MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
+#define MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
+#define MPI3_IOCFACTS_FLAGS_SIGNED_NVDATA_REQUIRED (0x00010000)
+#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00)
+#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_COMPLETE (0x00000020)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000f)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
+struct mpi3_mgmt_passthrough_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le32 reserved0c[5];
+ union mpi3_sge_union command_sgl;
+ union mpi3_sge_union response_sgl;
+};
+
+struct mpi3_create_request_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 flags;
+ u8 burst;
+ __le16 size;
+ __le16 queue_id;
+ __le16 reply_queue_id;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le64 base_address;
+};
+
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+struct mpi3_delete_request_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 queue_id;
+};
+
+struct mpi3_create_reply_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 flags;
+ u8 reserved0b;
+ __le16 size;
+ __le16 queue_id;
+ __le16 msix_index;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le64 base_address;
+};
+
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
+struct mpi3_delete_reply_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 queue_id;
+};
+
+struct mpi3_port_enable_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+};
+
+#define MPI3_EVENT_LOG_DATA (0x01)
+#define MPI3_EVENT_CHANGE (0x02)
+#define MPI3_EVENT_GPIO_INTERRUPT (0x04)
+#define MPI3_EVENT_TEMP_THRESHOLD (0x05)
+#define MPI3_EVENT_CABLE_MGMT (0x06)
+#define MPI3_EVENT_DEVICE_ADDED (0x07)
+#define MPI3_EVENT_DEVICE_INFO_CHANGED (0x08)
+#define MPI3_EVENT_PREPARE_FOR_RESET (0x09)
+#define MPI3_EVENT_COMP_IMAGE_ACT_START (0x0a)
+#define MPI3_EVENT_ENCL_DEVICE_ADDED (0x0b)
+#define MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x0c)
+#define MPI3_EVENT_DEVICE_STATUS_CHANGE (0x0d)
+#define MPI3_EVENT_ENERGY_PACK_CHANGE (0x0e)
+#define MPI3_EVENT_SAS_DISCOVERY (0x11)
+#define MPI3_EVENT_SAS_BROADCAST_PRIMITIVE (0x12)
+#define MPI3_EVENT_SAS_NOTIFY_PRIMITIVE (0x13)
+#define MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x14)
+#define MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW (0x15)
+#define MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x16)
+#define MPI3_EVENT_SAS_PHY_COUNTER (0x18)
+#define MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x19)
+#define MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x20)
+#define MPI3_EVENT_PCIE_ENUMERATION (0x22)
+#define MPI3_EVENT_HARD_RESET_RECEIVED (0x40)
+#define MPI3_EVENT_MIN_PRODUCT_SPECIFIC (0x60)
+#define MPI3_EVENT_MAX_PRODUCT_SPECIFIC (0x7f)
+#define MPI3_EVENT_NOTIFY_EVENTMASK_WORDS (4)
+struct mpi3_event_notification_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le16 sas_broadcast_primitive_masks;
+ __le16 sas_notify_primitive_masks;
+ __le32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS];
+};
+
+struct mpi3_event_notification_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 event_data_length;
+ u8 event;
+ __le16 ioc_change_count;
+ __le32 event_context;
+ __le32 event_data[1];
+};
+
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_NOT_REQUIRED (0x00)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_MASK (0x02)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_ORIGINAL (0x00)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_REPLAY (0x02)
+struct mpi3_event_data_gpio_interrupt {
+ u8 gpio_num;
+ u8 reserved01[3];
+};
+
+struct mpi3_event_data_temp_threshold {
+ __le16 status;
+ u8 sensor_num;
+ u8 reserved03;
+ __le16 current_temperature;
+ __le16 reserved06;
+ __le32 reserved08;
+ __le32 reserved0c;
+};
+
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD3_EXCEEDED (0x0008)
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD2_EXCEEDED (0x0004)
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD1_EXCEEDED (0x0002)
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD0_EXCEEDED (0x0001)
+struct mpi3_event_data_cable_management {
+ __le32 active_cable_power_requirement;
+ u8 status;
+ u8 receptacle_id;
+ __le16 reserved06;
+};
+
+#define MPI3_EVENT_CABLE_MGMT_ACT_CABLE_PWR_INVALID (0xffffffff)
+#define MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER (0x00)
+#define MPI3_EVENT_CABLE_MGMT_STATUS_PRESENT (0x01)
+#define MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED (0x02)
+struct mpi3_event_ack_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ u8 event;
+ u8 reserved0d[3];
+ __le32 event_context;
+};
+
+struct mpi3_event_data_prepare_for_reset {
+ u8 reason_code;
+ u8 reserved01;
+ __le16 reserved02;
+};
+
+#define MPI3_EVENT_PREPARE_RESET_RC_START (0x01)
+#define MPI3_EVENT_PREPARE_RESET_RC_ABORT (0x02)
+struct mpi3_event_data_comp_image_activation {
+ __le32 reserved00;
+};
+
+struct mpi3_event_data_device_status_change {
+ __le16 task_tag;
+ u8 reason_code;
+ u8 io_unit_port;
+ __le16 parent_dev_handle;
+ __le16 dev_handle;
+ __le64 wwid;
+ u8 lun[8];
+};
+
+#define MPI3_EVENT_DEV_STAT_RC_MOVED (0x01)
+#define MPI3_EVENT_DEV_STAT_RC_HIDDEN (0x02)
+#define MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN (0x03)
+#define MPI3_EVENT_DEV_STAT_RC_ASYNC_NOTIFICATION (0x04)
+#define MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT (0x20)
+#define MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP (0x21)
+#define MPI3_EVENT_DEV_STAT_RC_INT_TASK_ABORT_STRT (0x22)
+#define MPI3_EVENT_DEV_STAT_RC_INT_TASK_ABORT_CMP (0x23)
+#define MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT (0x24)
+#define MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP (0x25)
+#define MPI3_EVENT_DEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x30)
+#define MPI3_EVENT_DEV_STAT_RC_EXPANDER_REDUCED_FUNC_STRT (0x40)
+#define MPI3_EVENT_DEV_STAT_RC_EXPANDER_REDUCED_FUNC_CMP (0x41)
+#define MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING (0x50)
+struct mpi3_event_data_energy_pack_change {
+ __le32 reserved00;
+ __le16 shutdown_timeout;
+ __le16 reserved06;
+};
+
+struct mpi3_event_data_sas_discovery {
+ u8 flags;
+ u8 reason_code;
+ u8 io_unit_port;
+ u8 reserved03;
+ __le32 discovery_status;
+};
+
+#define MPI3_EVENT_SAS_DISC_FLAGS_DEVICE_CHANGE (0x02)
+#define MPI3_EVENT_SAS_DISC_FLAGS_IN_PROGRESS (0x01)
+#define MPI3_EVENT_SAS_DISC_RC_STARTED (0x01)
+#define MPI3_EVENT_SAS_DISC_RC_COMPLETED (0x02)
+#define MPI3_SAS_DISC_STATUS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI3_SAS_DISC_STATUS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI3_SAS_DISC_STATUS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI3_SAS_DISC_STATUS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI3_SAS_DISC_STATUS_MULTIPLE_DEVICES_IN_SLOT (0x00004000)
+#define MPI3_SAS_DISC_STATUS_SLOT_COUNT_MISMATCH (0x00002000)
+#define MPI3_SAS_DISC_STATUS_TOO_MANY_SLOTS (0x00001000)
+#define MPI3_SAS_DISC_STATUS_EXP_MULTI_SUBTRACTIVE (0x00000800)
+#define MPI3_SAS_DISC_STATUS_MULTI_PORT_DOMAIN (0x00000400)
+#define MPI3_SAS_DISC_STATUS_TABLE_TO_SUBTRACTIVE_LINK (0x00000200)
+#define MPI3_SAS_DISC_STATUS_UNSUPPORTED_DEVICE (0x00000100)
+#define MPI3_SAS_DISC_STATUS_TABLE_LINK (0x00000080)
+#define MPI3_SAS_DISC_STATUS_SUBTRACTIVE_LINK (0x00000040)
+#define MPI3_SAS_DISC_STATUS_SMP_CRC_ERROR (0x00000020)
+#define MPI3_SAS_DISC_STATUS_SMP_FUNCTION_FAILED (0x00000010)
+#define MPI3_SAS_DISC_STATUS_SMP_TIMEOUT (0x00000008)
+#define MPI3_SAS_DISC_STATUS_MULTIPLE_PORTS (0x00000004)
+#define MPI3_SAS_DISC_STATUS_INVALID_SAS_ADDRESS (0x00000002)
+#define MPI3_SAS_DISC_STATUS_LOOP_DETECTED (0x00000001)
+struct mpi3_event_data_sas_broadcast_primitive {
+ u8 phy_num;
+ u8 io_unit_port;
+ u8 port_width;
+ u8 primitive;
+};
+
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE (0x01)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_SES (0x02)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_EXPANDER (0x03)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_RESERVED3 (0x05)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_RESERVED4 (0x06)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE0_RESERVED (0x07)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE1_RESERVED (0x08)
+struct mpi3_event_data_sas_notify_primitive {
+ u8 phy_num;
+ u8 io_unit_port;
+ u8 reserved02;
+ u8 primitive;
+};
+
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_ENABLE_SPINUP (0x01)
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_POWER_LOSS_EXPECTED (0x02)
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED1 (0x03)
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED2 (0x04)
+#ifndef MPI3_EVENT_SAS_TOPO_PHY_COUNT
+#define MPI3_EVENT_SAS_TOPO_PHY_COUNT (1)
+#endif
+struct mpi3_event_sas_topo_phy_entry {
+ __le16 attached_dev_handle;
+ u8 link_rate;
+ u8 status;
+};
+
+#define MPI3_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xf0)
+#define MPI3_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4)
+#define MPI3_EVENT_SAS_TOPO_LR_PREV_MASK (0x0f)
+#define MPI3_EVENT_SAS_TOPO_LR_PREV_SHIFT (0)
+#define MPI3_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00)
+#define MPI3_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01)
+#define MPI3_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02)
+#define MPI3_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
+#define MPI3_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
+#define MPI3_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI3_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06)
+#define MPI3_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0a)
+#define MPI3_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0b)
+#define MPI3_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0c)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_MASK (0xc0)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_SHIFT (6)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_ACCESSIBLE (0x00)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_NO_EXIST (0x40)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_VACANT (0x80)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_MASK (0x0f)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED (0x03)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE (0x04)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING (0x05)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING (0x06)
+struct mpi3_event_data_sas_topology_change_list {
+ __le16 enclosure_handle;
+ __le16 expander_dev_handle;
+ u8 num_phys;
+ u8 reserved05[3];
+ u8 num_entries;
+ u8 start_phy_num;
+ u8 exp_status;
+ u8 io_unit_port;
+ struct mpi3_event_sas_topo_phy_entry phy_entry[MPI3_EVENT_SAS_TOPO_PHY_COUNT];
+};
+
+#define MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
+#define MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
+#define MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_event_data_sas_phy_counter {
+ __le64 time_stamp;
+ __le32 reserved08;
+ u8 phy_event_code;
+ u8 phy_num;
+ __le16 reserved0e;
+ __le32 phy_event_info;
+ u8 counter_type;
+ u8 threshold_window;
+ u8 time_units;
+ u8 reserved17;
+ __le32 event_threshold;
+ __le16 threshold_flags;
+ __le16 reserved1e;
+};
+
+struct mpi3_event_data_sas_device_disc_err {
+ __le16 dev_handle;
+ u8 reason_code;
+ u8 io_unit_port;
+ __le32 reserved04;
+ __le64 sas_address;
+};
+
+#define MPI3_EVENT_SAS_DISC_ERR_RC_SMP_FAILED (0x01)
+#define MPI3_EVENT_SAS_DISC_ERR_RC_SMP_TIMEOUT (0x02)
+struct mpi3_event_data_pcie_enumeration {
+ u8 flags;
+ u8 reason_code;
+ u8 io_unit_port;
+ u8 reserved03;
+ __le32 enumeration_status;
+};
+
+#define MPI3_EVENT_PCIE_ENUM_FLAGS_DEVICE_CHANGE (0x02)
+#define MPI3_EVENT_PCIE_ENUM_FLAGS_IN_PROGRESS (0x01)
+#define MPI3_EVENT_PCIE_ENUM_RC_STARTED (0x01)
+#define MPI3_EVENT_PCIE_ENUM_RC_COMPLETED (0x02)
+#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCH_DEPTH_EXCEED (0x80000000)
+#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000)
+#define MPI3_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI3_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000)
+#ifndef MPI3_EVENT_PCIE_TOPO_PORT_COUNT
+#define MPI3_EVENT_PCIE_TOPO_PORT_COUNT (1)
+#endif
+struct mpi3_event_pcie_topo_port_entry {
+ __le16 attached_dev_handle;
+ u8 port_status;
+ u8 reserved03;
+ u8 current_port_info;
+ u8 reserved05;
+ u8 previous_port_info;
+ u8 reserved07;
+};
+
+#define MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03)
+#define MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04)
+#define MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05)
+#define MPI3_EVENT_PCIE_TOPO_PS_RESPONDING (0x06)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_MASK (0xf0)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_1 (0x10)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_2 (0x20)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_4 (0x30)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_8 (0x40)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_16 (0x50)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0f)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_32_0 (0x06)
+struct mpi3_event_data_pcie_topology_change_list {
+ __le16 enclosure_handle;
+ __le16 switch_dev_handle;
+ u8 num_ports;
+ u8 reserved05[3];
+ u8 num_entries;
+ u8 start_port_num;
+ u8 switch_status;
+ u8 io_unit_port;
+ __le32 reserved0c;
+ struct mpi3_event_pcie_topo_port_entry port_entry[MPI3_EVENT_PCIE_TOPO_PORT_COUNT];
+};
+
+#define MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00)
+#define MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_PCIE_TOPO_SS_RESPONDING (0x03)
+#define MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_event_data_sas_init_dev_status_change {
+ u8 reason_code;
+ u8 io_unit_port;
+ __le16 dev_handle;
+ __le32 reserved04;
+ __le64 sas_address;
+};
+
+#define MPI3_EVENT_SAS_INIT_RC_ADDED (0x01)
+#define MPI3_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02)
+struct mpi3_event_data_sas_init_table_overflow {
+ __le16 max_init;
+ __le16 current_init;
+ __le32 reserved04;
+ __le64 sas_address;
+};
+
+struct mpi3_event_data_hard_reset_received {
+ u8 reserved00;
+ u8 io_unit_port;
+ __le16 reserved02;
+};
+
+#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
+#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
+#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
+#define MPI3_PEL_LOCALE_FLAGS_CONFIGURATION (0x0040)
+#define MPI3_PEL_LOCALE_FLAGS_CONTROLER (0x0020)
+#define MPI3_PEL_LOCALE_FLAGS_SAS (0x0010)
+#define MPI3_PEL_LOCALE_FLAGS_EPACK (0x0008)
+#define MPI3_PEL_LOCALE_FLAGS_ENCLOSURE (0x0004)
+#define MPI3_PEL_LOCALE_FLAGS_PD (0x0002)
+#define MPI3_PEL_LOCALE_FLAGS_VD (0x0001)
+#define MPI3_PEL_CLASS_DEBUG (0x00)
+#define MPI3_PEL_CLASS_PROGRESS (0x01)
+#define MPI3_PEL_CLASS_INFORMATIONAL (0x02)
+#define MPI3_PEL_CLASS_WARNING (0x03)
+#define MPI3_PEL_CLASS_CRITICAL (0x04)
+#define MPI3_PEL_CLASS_FATAL (0x05)
+#define MPI3_PEL_CLASS_FAULT (0x06)
+#define MPI3_PEL_CLEARTYPE_CLEAR (0x00)
+#define MPI3_PEL_WAITTIME_INFINITE_WAIT (0x00)
+#define MPI3_PEL_ACTION_GET_SEQNUM (0x01)
+#define MPI3_PEL_ACTION_MARK_CLEAR (0x02)
+#define MPI3_PEL_ACTION_GET_LOG (0x03)
+#define MPI3_PEL_ACTION_GET_COUNT (0x04)
+#define MPI3_PEL_ACTION_WAIT (0x05)
+#define MPI3_PEL_ACTION_ABORT (0x06)
+#define MPI3_PEL_ACTION_GET_PRINT_STRINGS (0x07)
+#define MPI3_PEL_ACTION_ACKNOWLEDGE (0x08)
+#define MPI3_PEL_STATUS_SUCCESS (0x00)
+#define MPI3_PEL_STATUS_NOT_FOUND (0x01)
+#define MPI3_PEL_STATUS_ABORTED (0x02)
+#define MPI3_PEL_STATUS_NOT_READY (0x03)
+struct mpi3_pel_seq {
+ __le32 newest;
+ __le32 oldest;
+ __le32 clear;
+ __le32 shutdown;
+ __le32 boot;
+ __le32 last_acknowledged;
+};
+
+struct mpi3_pel_entry {
+ __le32 sequence_number;
+ __le32 time_stamp[2];
+ __le16 log_code;
+ __le16 arg_type;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ u8 ext_num;
+ u8 num_exts;
+ u8 arg_data_size;
+ u8 fixed_format_size;
+ __le32 reserved18[2];
+ __le32 pel_info[24];
+};
+
+struct mpi3_pel_list {
+ __le32 log_count;
+ __le32 reserved04;
+ struct mpi3_pel_entry entry[1];
+};
+
+struct mpi3_pel_arg_map {
+ u8 arg_type;
+ u8 length;
+ __le16 start_location;
+};
+
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_APPEND_STRING (0x00)
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_INTEGER (0x01)
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_STRING (0x02)
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_BIT_FIELD (0x03)
+struct mpi3_pel_print_string {
+ __le16 log_code;
+ __le16 string_length;
+ u8 num_arg_map;
+ u8 reserved05[3];
+ struct mpi3_pel_arg_map arg_map[1];
+};
+
+struct mpi3_pel_print_string_list {
+ __le32 num_print_strings;
+ __le32 residual_bytes_remain;
+ __le32 reserved08[2];
+ struct mpi3_pel_print_string print_string[1];
+};
+
+#ifndef MPI3_PEL_ACTION_SPECIFIC_MAX
+#define MPI3_PEL_ACTION_SPECIFIC_MAX (1)
+#endif
+struct mpi3_pel_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 action_specific[MPI3_PEL_ACTION_SPECIFIC_MAX];
+};
+
+struct mpi3_pel_req_action_get_sequence_numbers {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 reserved0c[5];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_clear_log_marker {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ u8 clear_type;
+ u8 reserved0d[3];
+};
+
+struct mpi3_pel_req_action_get_log {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 starting_sequence_number;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ __le32 reserved14[3];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_get_count {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 starting_sequence_number;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ __le32 reserved14[3];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_wait {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 starting_sequence_number;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ __le16 wait_time;
+ __le16 reserved16;
+ __le32 reserved18[2];
+};
+
+struct mpi3_pel_req_action_abort {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 reserved0c;
+ __le16 abort_host_tag;
+ __le16 reserved12;
+ __le32 reserved14;
+};
+
+struct mpi3_pel_req_action_get_print_strings {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 reserved0c;
+ __le16 start_log_code;
+ __le16 reserved12;
+ __le32 reserved14[3];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_acknowledge {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 sequence_number;
+ __le32 reserved10;
+};
+
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT (0x01)
+struct mpi3_pel_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 action;
+ u8 reserved11;
+ __le16 reserved12;
+ __le16 pe_log_status;
+ __le16 reserved16;
+ __le32 transfer_length;
+};
+
+struct mpi3_ci_download_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 signature1;
+ __le32 total_image_size;
+ __le32 image_offset;
+ __le32 segment_size;
+ __le32 reserved1c;
+ union mpi3_sge_union sgl;
+};
+
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_LAST_SEGMENT (0x80)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_FORCE_FMC_ENABLE (0x40)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_SIGNED_NVDATA (0x20)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MASK (0x03)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_FAST (0x00)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MEDIUM (0x01)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SLOW (0x02)
+#define MPI3_CI_DOWNLOAD_ACTION_DOWNLOAD (0x01)
+#define MPI3_CI_DOWNLOAD_ACTION_ONLINE_ACTIVATION (0x02)
+#define MPI3_CI_DOWNLOAD_ACTION_OFFLINE_ACTIVATION (0x03)
+#define MPI3_CI_DOWNLOAD_ACTION_GET_STATUS (0x04)
+struct mpi3_ci_download_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 flags;
+ u8 cache_dirty;
+ u8 pending_count;
+ u8 reserved13;
+};
+
+#define MPI3_CI_DOWNLOAD_FLAGS_DOWNLOAD_IN_PROGRESS (0x80)
+#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_AWAITING (0x02)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_ONLINE_PENDING (0x04)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_OFFLINE_PENDING (0x06)
+#define MPI3_CI_DOWNLOAD_FLAGS_COMPATIBLE (0x01)
+struct mpi3_ci_upload_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le32 signature1;
+ __le32 reserved10;
+ __le32 image_offset;
+ __le32 segment_size;
+ __le32 reserved1c;
+ union mpi3_sge_union sgl;
+};
+
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_MASK (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY (0x00)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SECONDARY (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_MASK (0x02)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_FLASH (0x00)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_EXECUTABLE (0x02)
+#define MPI3_CTRL_OP_FORCE_FULL_DISCOVERY (0x01)
+#define MPI3_CTRL_OP_LOOKUP_MAPPING (0x02)
+#define MPI3_CTRL_OP_UPDATE_TIMESTAMP (0x04)
+#define MPI3_CTRL_OP_GET_TIMESTAMP (0x05)
+#define MPI3_CTRL_OP_REMOVE_DEVICE (0x10)
+#define MPI3_CTRL_OP_CLOSE_PERSISTENT_CONNECTION (0x11)
+#define MPI3_CTRL_OP_HIDDEN_ACK (0x12)
+#define MPI3_CTRL_OP_SAS_SEND_PRIMITIVE (0x20)
+#define MPI3_CTRL_OP_SAS_CLEAR_ERROR_LOG (0x21)
+#define MPI3_CTRL_OP_PCIE_CLEAR_ERROR_LOG (0x22)
+#define MPI3_CTRL_OP_LOOKUP_MAPPING_PARAM8_LOOKUP_METHOD_INDEX (0x00)
+#define MPI3_CTRL_OP_UPDATE_TIMESTAMP_PARAM64_TIMESTAMP_INDEX (0x00)
+#define MPI3_CTRL_OP_REMOVE_DEVICE_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_CLOSE_PERSIST_CONN_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_HIDDEN_ACK_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PRIMSEQ_INDEX (0x01)
+#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM32_PRIMITIVE_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_OP_PCIE_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01)
+#define MPI3_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02)
+#define MPI3_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
+#define MPI3_CTRL_LOOKUP_METHOD_PERSISTENT_ID (0x04)
+#define MPI3_CTRL_LOOKUP_METHOD_WWIDADDR_PARAM16_DEVH_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_WWIDADDR_PARAM64_WWID_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_ENCLSLOT_PARAM16_SLOTNUM_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_ENCLSLOT_PARAM64_ENCLOSURELID_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_SASDEVNAME_PARAM16_DEVH_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_SASDEVNAME_PARAM64_DEVNAME_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_DEVH_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_PERSISTENT_ID_INDEX (1)
+#define MPI3_CTRL_LOOKUP_METHOD_VALUE16_DEVH_INDEX (0)
+#define MPI3_CTRL_GET_TIMESTAMP_VALUE64_TIMESTAMP_INDEX (0)
+#define MPI3_CTRL_PRIMFLAGS_SINGLE (0x01)
+#define MPI3_CTRL_PRIMFLAGS_TRIPLE (0x03)
+#define MPI3_CTRL_PRIMFLAGS_REDUNDANT (0x06)
+struct mpi3_iounit_control_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 reserved0a;
+ u8 operation;
+ __le32 reserved0c;
+ __le64 param64[2];
+ __le32 param32[4];
+ __le16 param16[4];
+ u8 param8[8];
+};
+
+struct mpi3_iounit_control_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le64 value64[2];
+ __le32 value32[4];
+ __le16 value16[4];
+ u8 value8[8];
+};
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
new file mode 100644
index 000000000000..ba5018702960
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_SAS_H
+#define MPI30_SAS_H 1
+#define MPI3_SAS_DEVICE_INFO_SSP_TARGET (0x00000100)
+#define MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET (0x00000080)
+#define MPI3_SAS_DEVICE_INFO_SMP_TARGET (0x00000040)
+#define MPI3_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000020)
+#define MPI3_SAS_DEVICE_INFO_STP_INITIATOR (0x00000010)
+#define MPI3_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000008)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK (0x00000007)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE (0x00000000)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE (0x00000001)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER (0x00000002)
+struct mpi3_smp_passthrough_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 reserved0a;
+ u8 io_unit_port;
+ __le32 reserved0c[3];
+ __le64 sas_address;
+ struct mpi3_sge_common request_sge;
+ struct mpi3_sge_common response_sge;
+};
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
new file mode 100644
index 000000000000..63e4e81d5397
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -0,0 +1,463 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_TRANSPORT_H
+#define MPI30_TRANSPORT_H 1
+struct mpi3_version_struct {
+ u8 dev;
+ u8 unit;
+ u8 minor;
+ u8 major;
+};
+
+union mpi3_version_union {
+ struct mpi3_version_struct mpi3_version;
+ __le32 word;
+};
+
+#define MPI3_VERSION_MAJOR (3)
+#define MPI3_VERSION_MINOR (0)
+#define MPI3_VERSION_UNIT (0)
+#define MPI3_VERSION_DEV (18)
+struct mpi3_sysif_oper_queue_indexes {
+ __le16 producer_index;
+ __le16 reserved02;
+ __le16 consumer_index;
+ __le16 reserved06;
+};
+
+struct mpi3_sysif_registers {
+ __le64 ioc_information;
+ union mpi3_version_union version;
+ __le32 reserved0c[2];
+ __le32 ioc_configuration;
+ __le32 reserved18;
+ __le32 ioc_status;
+ __le32 reserved20;
+ __le32 admin_queue_num_entries;
+ __le64 admin_request_queue_address;
+ __le64 admin_reply_queue_address;
+ __le32 reserved38[2];
+ __le32 coalesce_control;
+ __le32 reserved44[1007];
+ __le16 admin_request_queue_pi;
+ __le16 reserved1002;
+ __le16 admin_reply_queue_ci;
+ __le16 reserved1006;
+ struct mpi3_sysif_oper_queue_indexes oper_queue_indexes[383];
+ __le32 reserved1c00;
+ __le32 write_sequence;
+ __le32 host_diagnostic;
+ __le32 reserved1c0c;
+ __le32 fault;
+ __le32 fault_info[3];
+ __le32 reserved1c20[4];
+ __le64 hcb_address;
+ __le32 hcb_size;
+ __le32 reserved1c3c;
+ __le32 reply_free_host_index;
+ __le32 sense_buffer_free_host_index;
+ __le32 reserved1c48[2];
+ __le64 diag_rw_data;
+ __le64 diag_rw_address;
+ __le16 diag_rw_control;
+ __le16 diag_rw_status;
+ __le32 reserved1c64[35];
+ __le32 scratchpad[4];
+ __le32 reserved1d00[192];
+ __le32 device_assigned_registers[2048];
+};
+
+#define MPI3_SYSIF_IOC_INFO_LOW_OFFSET (0x00000000)
+#define MPI3_SYSIF_IOC_INFO_HIGH_OFFSET (0x00000004)
+#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK (0xff000000)
+#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT (24)
+#define MPI3_SYSIF_IOC_CONFIG_OFFSET (0x00000014)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ (0x00f00000)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ (0x000f0000)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT (16)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
+#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN (0x00002000)
+#define MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE (0x00000010)
+#define MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC (0x00000001)
+#define MPI3_SYSIF_IOC_STATUS_OFFSET (0x0000001c)
+#define MPI3_SYSIF_IOC_STATUS_RESET_HISTORY (0x00000010)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK (0x0000000c)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_NONE (0x00000000)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS (0x00000004)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE (0x00000008)
+#define MPI3_SYSIF_IOC_STATUS_FAULT (0x00000002)
+#define MPI3_SYSIF_IOC_STATUS_READY (0x00000001)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET (0x00000024)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_MASK (0x0fff)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_OFFSET (0x00000026)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_MASK (0x0fff0000)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_SHIFT (16)
+#define MPI3_SYSIF_ADMIN_REQ_Q_ADDR_LOW_OFFSET (0x00000028)
+#define MPI3_SYSIF_ADMIN_REQ_Q_ADDR_HIGH_OFFSET (0x0000002c)
+#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_LOW_OFFSET (0x00000030)
+#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_HIGH_OFFSET (0x00000034)
+#define MPI3_SYSIF_COALESCE_CONTROL_OFFSET (0x00000040)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_MASK (0xc0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x30000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_MASK (0x00ff0000)
+#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_SHIFT (16)
+#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_MASK (0x0000ff00)
+#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_SHIFT (8)
+#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_MASK (0x000000ff)
+#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_SHIFT (0)
+#define MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET (0x00001000)
+#define MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET (0x00001004)
+#define MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET (0x00001008)
+#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(n) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((n) - 1) * 8))
+#define MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET (0x0000100c)
+#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(n) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((n) - 1) * 8))
+#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST (0xf)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND (0x4)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD (0xb)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH (0x2)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH (0x7)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH (0xd)
+#define MPI3_SYSIF_HOST_DIAG_OFFSET (0x00001c08)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_FLASH_RCVRY_RESET (0x00000200)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_COMPLETE_RESET (0x00000300)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS (0x00000080)
+#define MPI3_SYSIF_HOST_DIAG_SECURE_BOOT (0x00000040)
+#define MPI3_SYSIF_HOST_DIAG_CLEAR_INVALID_FW_IMAGE (0x00000020)
+#define MPI3_SYSIF_HOST_DIAG_INVALID_FW_IMAGE (0x00000010)
+#define MPI3_SYSIF_HOST_DIAG_HCBENABLE (0x00000008)
+#define MPI3_SYSIF_HOST_DIAG_HCBMODE (0x00000004)
+#define MPI3_SYSIF_HOST_DIAG_DIAG_RW_ENABLE (0x00000002)
+#define MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE (0x00000001)
+#define MPI3_SYSIF_FAULT_OFFSET (0x00001c10)
+#define MPI3_SYSIF_FAULT_FUNC_AREA_MASK (0xff000000)
+#define MPI3_SYSIF_FAULT_FUNC_AREA_SHIFT (24)
+#define MPI3_SYSIF_FAULT_FUNC_AREA_MPI_DEFINED (0x00000000)
+#define MPI3_SYSIF_FAULT_CODE_MASK (0x0000ffff)
+#define MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET (0x0000f000)
+#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001)
+#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002)
+#define MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED (0x0000f003)
+#define MPI3_SYSIF_FAULT_CODE_SAFE_MODE_EXIT (0x0000f004)
+#define MPI3_SYSIF_FAULT_CODE_FACTORY_RESET (0x0000f005)
+#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14)
+#define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18)
+#define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c)
+#define MPI3_SYSIF_HCB_ADDRESS_LOW_OFFSET (0x00001c30)
+#define MPI3_SYSIF_HCB_ADDRESS_HIGH_OFFSET (0x00001c34)
+#define MPI3_SYSIF_HCB_SIZE_OFFSET (0x00001c38)
+#define MPI3_SYSIF_HCB_SIZE_SIZE_MASK (0xfffff000)
+#define MPI3_SYSIF_HCB_SIZE_SIZE_SHIFT (12)
+#define MPI3_SYSIF_HCB_SIZE_HCDW_ENABLE (0x00000001)
+#define MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET (0x00001c40)
+#define MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET (0x00001c44)
+#define MPI3_SYSIF_DIAG_RW_DATA_LOW_OFFSET (0x00001c50)
+#define MPI3_SYSIF_DIAG_RW_DATA_HIGH_OFFSET (0x00001c54)
+#define MPI3_SYSIF_DIAG_RW_ADDRESS_LOW_OFFSET (0x00001c58)
+#define MPI3_SYSIF_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00001c5c)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_OFFSET (0x00001c60)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_MASK (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_1BYTE (0x00000000)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_2BYTES (0x00000010)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_4BYTES (0x00000020)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_8BYTES (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_RESET (0x00000004)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_MASK (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_READ (0x00000000)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_WRITE (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_START (0x00000001)
+#define MPI3_SYSIF_DIAG_RW_STATUS_OFFSET (0x00001c62)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_MASK (0x0000000e)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SUCCESS (0x00000000)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_INV_ADDR (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_ACC_ERR (0x00000004)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_PAR_ERR (0x00000006)
+#define MPI3_SYSIF_DIAG_RW_STATUS_BUSY (0x00000001)
+#define MPI3_SYSIF_SCRATCHPAD0_OFFSET (0x00001cf0)
+#define MPI3_SYSIF_SCRATCHPAD1_OFFSET (0x00001cf4)
+#define MPI3_SYSIF_SCRATCHPAD2_OFFSET (0x00001cf8)
+#define MPI3_SYSIF_SCRATCHPAD3_OFFSET (0x00001cfc)
+#define MPI3_SYSIF_DEVICE_ASSIGNED_REGS_OFFSET (0x00002000)
+#define MPI3_SYSIF_DIAG_SAVE_TIMEOUT (60)
+struct mpi3_default_reply_descriptor {
+ __le32 descriptor_type_dependent1[2];
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 descriptor_type_dependent2;
+ __le16 reply_flags;
+};
+
+#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK (0x0001)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK (0xf000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY (0x0000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS (0x3000)
+struct mpi3_address_reply_descriptor {
+ __le64 reply_frame_address;
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 reserved0c;
+ __le16 reply_flags;
+};
+
+struct mpi3_success_reply_descriptor {
+ __le32 reserved00[2];
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 host_tag;
+ __le16 reply_flags;
+};
+
+struct mpi3_target_command_buffer_reply_descriptor {
+ __le32 reserved00;
+ __le16 initiator_dev_handle;
+ u8 phy_num;
+ u8 reserved07;
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 io_index;
+ __le16 reply_flags;
+};
+
+struct mpi3_status_reply_descriptor {
+ __le16 ioc_status;
+ __le16 reserved02;
+ __le32 ioc_log_info;
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 host_tag;
+ __le16 reply_flags;
+};
+
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL (0x8000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK (0x7fff)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_MASK (0xf0000000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_NO_INFO (0x00000000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_SAS (0x30000000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_DATA_MASK (0x0fffffff)
+union mpi3_reply_descriptors_union {
+ struct mpi3_default_reply_descriptor default_reply;
+ struct mpi3_address_reply_descriptor address_reply;
+ struct mpi3_success_reply_descriptor success;
+ struct mpi3_target_command_buffer_reply_descriptor target_command_buffer;
+ struct mpi3_status_reply_descriptor status;
+ __le32 words[4];
+};
+
+struct mpi3_sge_common {
+ __le64 address;
+ __le32 length;
+ u8 reserved0c[3];
+ u8 flags;
+};
+
+struct mpi3_sge_bit_bucket {
+ __le64 reserved00;
+ __le32 length;
+ u8 reserved0c[3];
+ u8 flags;
+};
+
+struct mpi3_sge_extended_eedp {
+ u8 user_data_size;
+ u8 reserved01;
+ __le16 eedp_flags;
+ __le32 secondary_reference_tag;
+ __le16 secondary_application_tag;
+ __le16 application_tag_translation_mask;
+ __le16 reserved0c;
+ u8 extended_operation;
+ u8 flags;
+};
+
+union mpi3_sge_union {
+ struct mpi3_sge_common simple;
+ struct mpi3_sge_common chain;
+ struct mpi3_sge_common last_chain;
+ struct mpi3_sge_bit_bucket bit_bucket;
+ struct mpi3_sge_extended_eedp eedp;
+ __le32 words[4];
+};
+
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_MASK (0xf0)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE (0x00)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_BIT_BUCKET (0x10)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_CHAIN (0x20)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN (0x30)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED (0xf0)
+#define MPI3_SGE_FLAGS_END_OF_LIST (0x08)
+#define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04)
+#define MPI3_SGE_FLAGS_DLAS_MASK (0x03)
+#define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00)
+#define MPI3_SGE_FLAGS_DLAS_IOC_DDR (0x01)
+#define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02)
+#define MPI3_SGE_EXT_OPER_EEDP (0x00)
+#define MPI3_EEDPFLAGS_INCR_PRI_REF_TAG (0x8000)
+#define MPI3_EEDPFLAGS_INCR_SEC_REF_TAG (0x4000)
+#define MPI3_EEDPFLAGS_INCR_PRI_APP_TAG (0x2000)
+#define MPI3_EEDPFLAGS_INCR_SEC_APP_TAG (0x1000)
+#define MPI3_EEDPFLAGS_ESC_PASSTHROUGH (0x0800)
+#define MPI3_EEDPFLAGS_CHK_REF_TAG (0x0400)
+#define MPI3_EEDPFLAGS_CHK_APP_TAG (0x0200)
+#define MPI3_EEDPFLAGS_CHK_GUARD (0x0100)
+#define MPI3_EEDPFLAGS_ESC_MODE_MASK (0x00c0)
+#define MPI3_EEDPFLAGS_ESC_MODE_DO_NOT_DISABLE (0x0040)
+#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE (0x0080)
+#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_REFTAG_DISABLE (0x00c0)
+#define MPI3_EEDPFLAGS_HOST_GUARD_MASK (0x0030)
+#define MPI3_EEDPFLAGS_HOST_GUARD_T10_CRC (0x0000)
+#define MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM (0x0010)
+#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020)
+#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008)
+#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007)
+#define MPI3_EEDPFLAGS_EEDP_OP_NOOP (0x0000)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001)
+#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003)
+#define MPI3_EEDPFLAGS_EEDP_OP_INSERT (0x0004)
+#define MPI3_EEDPFLAGS_EEDP_OP_REPLACE (0x0006)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN (0x0007)
+#define MPI3_EEDP_UDS_512 (0x01)
+#define MPI3_EEDP_UDS_520 (0x02)
+#define MPI3_EEDP_UDS_4080 (0x03)
+#define MPI3_EEDP_UDS_4088 (0x04)
+#define MPI3_EEDP_UDS_4096 (0x05)
+#define MPI3_EEDP_UDS_4104 (0x06)
+#define MPI3_EEDP_UDS_4160 (0x07)
+struct mpi3_request_header {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 function_dependent;
+};
+
+struct mpi3_default_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+};
+
+#define MPI3_HOST_TAG_INVALID (0xffff)
+#define MPI3_FUNCTION_IOC_FACTS (0x01)
+#define MPI3_FUNCTION_IOC_INIT (0x02)
+#define MPI3_FUNCTION_PORT_ENABLE (0x03)
+#define MPI3_FUNCTION_EVENT_NOTIFICATION (0x04)
+#define MPI3_FUNCTION_EVENT_ACK (0x05)
+#define MPI3_FUNCTION_CI_DOWNLOAD (0x06)
+#define MPI3_FUNCTION_CI_UPLOAD (0x07)
+#define MPI3_FUNCTION_IO_UNIT_CONTROL (0x08)
+#define MPI3_FUNCTION_PERSISTENT_EVENT_LOG (0x09)
+#define MPI3_FUNCTION_MGMT_PASSTHROUGH (0x0a)
+#define MPI3_FUNCTION_CONFIG (0x10)
+#define MPI3_FUNCTION_SCSI_IO (0x20)
+#define MPI3_FUNCTION_SCSI_TASK_MGMT (0x21)
+#define MPI3_FUNCTION_SMP_PASSTHROUGH (0x22)
+#define MPI3_FUNCTION_NVME_ENCAPSULATED (0x24)
+#define MPI3_FUNCTION_TARGET_ASSIST (0x30)
+#define MPI3_FUNCTION_TARGET_STATUS_SEND (0x31)
+#define MPI3_FUNCTION_TARGET_MODE_ABORT (0x32)
+#define MPI3_FUNCTION_TARGET_CMD_BUF_POST_BASE (0x33)
+#define MPI3_FUNCTION_TARGET_CMD_BUF_POST_LIST (0x34)
+#define MPI3_FUNCTION_CREATE_REQUEST_QUEUE (0x70)
+#define MPI3_FUNCTION_DELETE_REQUEST_QUEUE (0x71)
+#define MPI3_FUNCTION_CREATE_REPLY_QUEUE (0x72)
+#define MPI3_FUNCTION_DELETE_REPLY_QUEUE (0x73)
+#define MPI3_FUNCTION_TOOLBOX (0x80)
+#define MPI3_FUNCTION_DIAG_BUFFER_POST (0x81)
+#define MPI3_FUNCTION_DIAG_BUFFER_MANAGE (0x82)
+#define MPI3_FUNCTION_DIAG_BUFFER_UPLOAD (0x83)
+#define MPI3_FUNCTION_MIN_IOC_USE_ONLY (0xc0)
+#define MPI3_FUNCTION_MAX_IOC_USE_ONLY (0xef)
+#define MPI3_FUNCTION_MIN_PRODUCT_SPECIFIC (0xf0)
+#define MPI3_FUNCTION_MAX_PRODUCT_SPECIFIC (0xff)
+#define MPI3_IOCSTATUS_LOG_INFO_AVAIL_MASK (0x8000)
+#define MPI3_IOCSTATUS_LOG_INFO_AVAILABLE (0x8000)
+#define MPI3_IOCSTATUS_STATUS_MASK (0x7fff)
+#define MPI3_IOCSTATUS_SUCCESS (0x0000)
+#define MPI3_IOCSTATUS_INVALID_FUNCTION (0x0001)
+#define MPI3_IOCSTATUS_BUSY (0x0002)
+#define MPI3_IOCSTATUS_INVALID_SGL (0x0003)
+#define MPI3_IOCSTATUS_INTERNAL_ERROR (0x0004)
+#define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
+#define MPI3_IOCSTATUS_INVALID_FIELD (0x0007)
+#define MPI3_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a)
+#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b)
+#define MPI3_IOCSTATUS_FAILURE (0x001f)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
+#define MPI3_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
+#define MPI3_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
+#define MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
+#define MPI3_IOCSTATUS_SCSI_TM_NOT_SUPPORTED (0x0041)
+#define MPI3_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042)
+#define MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
+#define MPI3_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
+#define MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
+#define MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
+#define MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
+#define MPI3_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
+#define MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
+#define MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004a)
+#define MPI3_IOCSTATUS_SCSI_IOC_TERMINATED (0x004b)
+#define MPI3_IOCSTATUS_SCSI_EXT_TERMINATED (0x004c)
+#define MPI3_IOCSTATUS_EEDP_GUARD_ERROR (0x004d)
+#define MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004e)
+#define MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004f)
+#define MPI3_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
+#define MPI3_IOCSTATUS_TARGET_ABORTED (0x0063)
+#define MPI3_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
+#define MPI3_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
+#define MPI3_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006a)
+#define MPI3_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006d)
+#define MPI3_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006e)
+#define MPI3_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006f)
+#define MPI3_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
+#define MPI3_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
+#define MPI3_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
+#define MPI3_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
+#define MPI3_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00a0)
+#define MPI3_IOCSTATUS_CI_UNSUPPORTED (0x00b0)
+#define MPI3_IOCSTATUS_CI_UPDATE_SEQUENCE (0x00b1)
+#define MPI3_IOCSTATUS_CI_VALIDATION_FAILED (0x00b2)
+#define MPI3_IOCSTATUS_CI_UPDATE_PENDING (0x00b3)
+#define MPI3_IOCSTATUS_SECURITY_KEY_REQUIRED (0x00c0)
+#define MPI3_IOCSTATUS_INVALID_QUEUE_ID (0x0f00)
+#define MPI3_IOCSTATUS_INVALID_QUEUE_SIZE (0x0f01)
+#define MPI3_IOCSTATUS_INVALID_MSIX_VECTOR (0x0f02)
+#define MPI3_IOCSTATUS_INVALID_REPLY_QUEUE_ID (0x0f03)
+#define MPI3_IOCSTATUS_INVALID_QUEUE_DELETION (0x0f04)
+#define MPI3_IOCLOGINFO_TYPE_MASK (0xf0000000)
+#define MPI3_IOCLOGINFO_TYPE_SHIFT (28)
+#define MPI3_IOCLOGINFO_TYPE_NONE (0x0)
+#define MPI3_IOCLOGINFO_TYPE_SAS (0x3)
+#define MPI3_IOCLOGINFO_LOG_DATA_MASK (0x0fffffff)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
new file mode 100644
index 000000000000..6f5dc9e78553
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -0,0 +1,901 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2021 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#ifndef MPI3MR_H_INCLUDED
+#define MPI3MR_H_INCLUDED
+
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
+#include <linux/delay.h>
+#include <linux/dmapool.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+
+#include "mpi/mpi30_transport.h"
+#include "mpi/mpi30_cnfg.h"
+#include "mpi/mpi30_image.h"
+#include "mpi/mpi30_init.h"
+#include "mpi/mpi30_ioc.h"
+#include "mpi/mpi30_sas.h"
+#include "mpi3mr_debug.h"
+
+/* Global list and lock for storing multiple adapters managed by the driver */
+extern spinlock_t mrioc_list_lock;
+extern struct list_head mrioc_list;
+extern int prot_mask;
+
+#define MPI3MR_DRIVER_VERSION "00.255.45.01"
+#define MPI3MR_DRIVER_RELDATE "12-December-2020"
+
+#define MPI3MR_DRIVER_NAME "mpi3mr"
+#define MPI3MR_DRIVER_LICENSE "GPL"
+#define MPI3MR_DRIVER_AUTHOR "Broadcom Inc. <mpi3mr-linuxdrv.pdl@broadcom.com>"
+#define MPI3MR_DRIVER_DESC "MPI3 Storage Controller Device Driver"
+
+#define MPI3MR_NAME_LENGTH 32
+#define IOCNAME "%s: "
+
+/* Definitions for internal SGL and Chain SGL buffers */
+#define MPI3MR_PAGE_SIZE_4K 4096
+#define MPI3MR_SG_DEPTH (MPI3MR_PAGE_SIZE_4K / sizeof(struct mpi3_sge_common))
+
+/* Definitions for MAX values for shost */
+#define MPI3MR_MAX_CMDS_LUN 7
+#define MPI3MR_MAX_CDB_LENGTH 32
+
+/* Admin queue management definitions */
+#define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REQ_FRAME_SZ 128
+#define MPI3MR_ADMIN_REPLY_FRAME_SZ 16
+
+/* Operational queue management definitions */
+#define MPI3MR_OP_REQ_Q_QD 512
+#define MPI3MR_OP_REP_Q_QD 4096
+#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
+#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
+#define MPI3MR_MAX_SEG_LIST_SIZE 4096
+
+/* Reserved Host Tag definitions */
+#define MPI3MR_HOSTTAG_INVALID 0xFFFF
+#define MPI3MR_HOSTTAG_INITCMDS 1
+#define MPI3MR_HOSTTAG_IOCTLCMDS 2
+#define MPI3MR_HOSTTAG_BLK_TMS 5
+
+#define MPI3MR_NUM_DEVRMCMD 1
+#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_BLK_TMS + 1)
+#define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \
+ MPI3MR_NUM_DEVRMCMD - 1)
+
+#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
+
+/* Reduced resource count definition for crash kernel */
+#define MPI3MR_HOST_IOS_KDUMP 128
+
+/* command/controller interaction timeout definitions in seconds */
+#define MPI3MR_INTADMCMD_TIMEOUT 10
+#define MPI3MR_PORTENABLE_TIMEOUT 300
+#define MPI3MR_ABORTTM_TIMEOUT 30
+#define MPI3MR_RESETTM_TIMEOUT 30
+#define MPI3MR_RESET_HOST_IOWAIT_TIMEOUT 5
+#define MPI3MR_TSUPDATE_INTERVAL 900
+#define MPI3MR_DEFAULT_SHUTDOWN_TIME 120
+#define MPI3MR_RAID_ERRREC_RESET_TIMEOUT 180
+
+#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
+
+/* Internal admin command state definitions*/
+#define MPI3MR_CMD_NOTUSED 0x8000
+#define MPI3MR_CMD_COMPLETE 0x0001
+#define MPI3MR_CMD_PENDING 0x0002
+#define MPI3MR_CMD_REPLY_VALID 0x0004
+#define MPI3MR_CMD_RESET 0x0008
+
+/* Definitions for Event replies and sense buffer allocated per controller */
+#define MPI3MR_NUM_EVT_REPLIES 64
+#define MPI3MR_SENSEBUF_SZ 256
+#define MPI3MR_SENSEBUF_FACTOR 3
+#define MPI3MR_CHAINBUF_FACTOR 3
+#define MPI3MR_CHAINBUFDIX_FACTOR 2
+
+/* Invalid target device handle */
+#define MPI3MR_INVALID_DEV_HANDLE 0xFFFF
+
+/* Controller Reset related definitions */
+#define MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT 5
+#define MPI3MR_MAX_RESET_RETRY_COUNT 3
+
+/* ResponseCode definitions */
+#define MPI3MR_RI_MASK_RESPCODE (0x000000FF)
+#define MPI3MR_RSP_TM_COMPLETE 0x00
+#define MPI3MR_RSP_INVALID_FRAME 0x02
+#define MPI3MR_RSP_TM_NOT_SUPPORTED 0x04
+#define MPI3MR_RSP_TM_FAILED 0x05
+#define MPI3MR_RSP_TM_SUCCEEDED 0x08
+#define MPI3MR_RSP_TM_INVALID_LUN 0x09
+#define MPI3MR_RSP_TM_OVERLAPPED_TAG 0x0A
+#define MPI3MR_RSP_IO_QUEUED_ON_IOC \
+ MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC
+
+#define MPI3MR_DEFAULT_MDTS (128 * 1024)
+/* Command retry count definitions */
+#define MPI3MR_DEV_RMHS_RETRY_COUNT 3
+
+/* Default target device queue depth */
+#define MPI3MR_DEFAULT_SDEV_QD 32
+
+/* Definitions for Threaded IRQ poll*/
+#define MPI3MR_IRQ_POLL_SLEEP 2
+#define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8
+
+/* Definitions for the controller security status*/
+#define MPI3MR_CTLR_SECURITY_STATUS_MASK 0x0C
+#define MPI3MR_CTLR_SECURE_DBG_STATUS_MASK 0x02
+
+#define MPI3MR_INVALID_DEVICE 0x00
+#define MPI3MR_CONFIG_SECURE_DEVICE 0x04
+#define MPI3MR_HARD_SECURE_DEVICE 0x08
+#define MPI3MR_TAMPERED_DEVICE 0x0C
+
+/* SGE Flag definition */
+#define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \
+ (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | MPI3_SGE_FLAGS_DLAS_SYSTEM | \
+ MPI3_SGE_FLAGS_END_OF_LIST)
+
+/* MSI Index from Reply Queue Index */
+#define REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, offset) (qidx + offset)
+
+/* IOC State definitions */
+enum mpi3mr_iocstate {
+ MRIOC_STATE_READY = 1,
+ MRIOC_STATE_RESET,
+ MRIOC_STATE_FAULT,
+ MRIOC_STATE_BECOMING_READY,
+ MRIOC_STATE_RESET_REQUESTED,
+ MRIOC_STATE_UNRECOVERABLE,
+};
+
+/* Reset reason code definitions*/
+enum mpi3mr_reset_reason {
+ MPI3MR_RESET_FROM_BRINGUP = 1,
+ MPI3MR_RESET_FROM_FAULT_WATCH = 2,
+ MPI3MR_RESET_FROM_IOCTL = 3,
+ MPI3MR_RESET_FROM_EH_HOS = 4,
+ MPI3MR_RESET_FROM_TM_TIMEOUT = 5,
+ MPI3MR_RESET_FROM_IOCTL_TIMEOUT = 6,
+ MPI3MR_RESET_FROM_MUR_FAILURE = 7,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP = 8,
+ MPI3MR_RESET_FROM_CIACTIV_FAULT = 9,
+ MPI3MR_RESET_FROM_PE_TIMEOUT = 10,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT = 11,
+ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT = 12,
+ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT = 13,
+ MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT = 14,
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT = 15,
+ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT = 16,
+ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT = 17,
+ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT = 18,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT = 19,
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER = 20,
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT = 21,
+ MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22,
+ MPI3MR_RESET_FROM_SYSFS = 23,
+ MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24
+};
+
+/**
+ * struct mpi3mr_compimg_ver - replica of component image
+ * version defined in mpi30_image.h in host endianness
+ *
+ */
+struct mpi3mr_compimg_ver {
+ u16 build_num;
+ u16 cust_id;
+ u8 ph_minor;
+ u8 ph_major;
+ u8 gen_minor;
+ u8 gen_major;
+};
+
+/**
+ * struct mpi3mr_ioc_facs - replica of component image version
+ * defined in mpi30_ioc.h in host endianness
+ *
+ */
+struct mpi3mr_ioc_facts {
+ u32 ioc_capabilities;
+ struct mpi3mr_compimg_ver fw_ver;
+ u32 mpi_version;
+ u16 max_reqs;
+ u16 product_id;
+ u16 op_req_sz;
+ u16 reply_sz;
+ u16 exceptions;
+ u16 max_perids;
+ u16 max_pds;
+ u16 max_sasexpanders;
+ u16 max_sasinitiators;
+ u16 max_enclosures;
+ u16 max_pcie_switches;
+ u16 max_nvme;
+ u16 max_vds;
+ u16 max_hpds;
+ u16 max_advhpds;
+ u16 max_raidpds;
+ u16 min_devhandle;
+ u16 max_devhandle;
+ u16 max_op_req_q;
+ u16 max_op_reply_q;
+ u16 shutdown_timeout;
+ u8 ioc_num;
+ u8 who_init;
+ u16 max_msix_vectors;
+ u8 personality;
+ u8 dma_mask;
+ u8 protocol_flags;
+ u8 sge_mod_mask;
+ u8 sge_mod_value;
+ u8 sge_mod_shift;
+};
+
+/**
+ * struct segments - memory descriptor structure to store
+ * virtual and dma addresses for operational queue segments.
+ *
+ * @segment: virtual address
+ * @segment_dma: dma address
+ */
+struct segments {
+ void *segment;
+ dma_addr_t segment_dma;
+};
+
+/**
+ * struct op_req_qinfo - Operational Request Queue Information
+ *
+ * @ci: consumer index
+ * @pi: producer index
+ * @num_request: Maximum number of entries in the queue
+ * @qid: Queue Id starting from 1
+ * @reply_qid: Associated reply queue Id
+ * @num_segments: Number of discontiguous memory segments
+ * @segment_qd: Depth of each segments
+ * @q_lock: Concurrent queue access lock
+ * @q_segments: Segment descriptor pointer
+ * @q_segment_list: Segment list base virtual address
+ * @q_segment_list_dma: Segment list base DMA address
+ */
+struct op_req_qinfo {
+ u16 ci;
+ u16 pi;
+ u16 num_requests;
+ u16 qid;
+ u16 reply_qid;
+ u16 num_segments;
+ u16 segment_qd;
+ spinlock_t q_lock;
+ struct segments *q_segments;
+ void *q_segment_list;
+ dma_addr_t q_segment_list_dma;
+};
+
+/**
+ * struct op_reply_qinfo - Operational Reply Queue Information
+ *
+ * @ci: consumer index
+ * @qid: Queue Id starting from 1
+ * @num_replies: Maximum number of entries in the queue
+ * @num_segments: Number of discontiguous memory segments
+ * @segment_qd: Depth of each segments
+ * @q_segments: Segment descriptor pointer
+ * @q_segment_list: Segment list base virtual address
+ * @q_segment_list_dma: Segment list base DMA address
+ * @ephase: Expected phased identifier for the reply queue
+ * @pend_ios: Number of IOs pending in HW for this queue
+ * @enable_irq_poll: Flag to indicate polling is enabled
+ * @in_use: Queue is handled by poll/ISR
+ */
+struct op_reply_qinfo {
+ u16 ci;
+ u16 qid;
+ u16 num_replies;
+ u16 num_segments;
+ u16 segment_qd;
+ struct segments *q_segments;
+ void *q_segment_list;
+ dma_addr_t q_segment_list_dma;
+ u8 ephase;
+ atomic_t pend_ios;
+ bool enable_irq_poll;
+ atomic_t in_use;
+};
+
+/**
+ * struct mpi3mr_intr_info - Interrupt cookie information
+ *
+ * @mrioc: Adapter instance reference
+ * @msix_index: MSIx index
+ * @op_reply_q: Associated operational reply queue
+ * @name: Dev name for the irq claiming device
+ */
+struct mpi3mr_intr_info {
+ struct mpi3mr_ioc *mrioc;
+ u16 msix_index;
+ struct op_reply_qinfo *op_reply_q;
+ char name[MPI3MR_NAME_LENGTH];
+};
+
+/**
+ * struct tgt_dev_sas_sata - SAS/SATA device specific
+ * information cached from firmware given data
+ *
+ * @sas_address: World wide unique SAS address
+ * @dev_info: Device information bits
+ */
+struct tgt_dev_sas_sata {
+ u64 sas_address;
+ u16 dev_info;
+};
+
+/**
+ * struct tgt_dev_pcie - PCIe device specific information cached
+ * from firmware given data
+ *
+ * @mdts: Maximum data transfer size
+ * @capb: Device capabilities
+ * @pgsz: Device page size
+ * @abort_to: Timeout for abort TM
+ * @reset_to: Timeout for Target/LUN reset TM
+ */
+struct tgt_dev_pcie {
+ u32 mdts;
+ u16 capb;
+ u8 pgsz;
+ u8 abort_to;
+ u8 reset_to;
+};
+
+/**
+ * struct tgt_dev_volume - virtual device specific information
+ * cached from firmware given data
+ *
+ * @state: State of the VD
+ */
+struct tgt_dev_volume {
+ u8 state;
+};
+
+/**
+ * union _form_spec_inf - union of device specific information
+ */
+union _form_spec_inf {
+ struct tgt_dev_sas_sata sas_sata_inf;
+ struct tgt_dev_pcie pcie_inf;
+ struct tgt_dev_volume vol_inf;
+};
+
+
+
+/**
+ * struct mpi3mr_tgt_dev - target device data structure
+ *
+ * @list: List pointer
+ * @starget: Scsi_target pointer
+ * @dev_handle: FW device handle
+ * @parent_handle: FW parent device handle
+ * @slot: Slot number
+ * @encl_handle: FW enclosure handle
+ * @perst_id: FW assigned Persistent ID
+ * @dev_type: SAS/SATA/PCIE device type
+ * @is_hidden: Should be exposed to upper layers or not
+ * @host_exposed: Already exposed to host or not
+ * @q_depth: Device specific Queue Depth
+ * @wwid: World wide ID
+ * @dev_spec: Device type specific information
+ * @ref_count: Reference count
+ */
+struct mpi3mr_tgt_dev {
+ struct list_head list;
+ struct scsi_target *starget;
+ u16 dev_handle;
+ u16 parent_handle;
+ u16 slot;
+ u16 encl_handle;
+ u16 perst_id;
+ u8 dev_type;
+ u8 is_hidden;
+ u8 host_exposed;
+ u16 q_depth;
+ u64 wwid;
+ union _form_spec_inf dev_spec;
+ struct kref ref_count;
+};
+
+/**
+ * mpi3mr_tgtdev_get - k reference incrementor
+ * @s: Target device reference
+ *
+ * Increment target device reference count.
+ */
+static inline void mpi3mr_tgtdev_get(struct mpi3mr_tgt_dev *s)
+{
+ kref_get(&s->ref_count);
+}
+
+/**
+ * mpi3mr_free_tgtdev - target device memory dealloctor
+ * @r: k reference pointer of the target device
+ *
+ * Free target device memory when no reference.
+ */
+static inline void mpi3mr_free_tgtdev(struct kref *r)
+{
+ kfree(container_of(r, struct mpi3mr_tgt_dev, ref_count));
+}
+
+/**
+ * mpi3mr_tgtdev_put - k reference decrementor
+ * @s: Target device reference
+ *
+ * Decrement target device reference count.
+ */
+static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
+{
+ kref_put(&s->ref_count, mpi3mr_free_tgtdev);
+}
+
+
+/**
+ * struct mpi3mr_stgt_priv_data - SCSI target private structure
+ *
+ * @starget: Scsi_target pointer
+ * @dev_handle: FW device handle
+ * @perst_id: FW assigned Persistent ID
+ * @num_luns: Number of Logical Units
+ * @block_io: I/O blocked to the device or not
+ * @dev_removed: Device removed in the Firmware
+ * @dev_removedelay: Device is waiting to be removed in FW
+ * @dev_type: Device type
+ * @tgt_dev: Internal target device pointer
+ */
+struct mpi3mr_stgt_priv_data {
+ struct scsi_target *starget;
+ u16 dev_handle;
+ u16 perst_id;
+ u32 num_luns;
+ atomic_t block_io;
+ u8 dev_removed;
+ u8 dev_removedelay;
+ u8 dev_type;
+ struct mpi3mr_tgt_dev *tgt_dev;
+};
+
+/**
+ * struct mpi3mr_stgt_priv_data - SCSI device private structure
+ *
+ * @tgt_priv_data: Scsi_target private data pointer
+ * @lun_id: LUN ID of the device
+ * @ncq_prio_enable: NCQ priority enable for SATA device
+ */
+struct mpi3mr_sdev_priv_data {
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ u32 lun_id;
+ u8 ncq_prio_enable;
+};
+
+/**
+ * struct mpi3mr_drv_cmd - Internal command tracker
+ *
+ * @mutex: Command mutex
+ * @done: Completeor for wakeup
+ * @reply: Firmware reply for internal commands
+ * @sensebuf: Sensebuf for SCSI IO commands
+ * @iou_rc: IO Unit control reason code
+ * @state: Command State
+ * @dev_handle: Firmware handle for device specific commands
+ * @ioc_status: IOC status from the firmware
+ * @ioc_loginfo:IOC log info from the firmware
+ * @is_waiting: Is the command issued in block mode
+ * @retry_count: Retry count for retriable commands
+ * @host_tag: Host tag used by the command
+ * @callback: Callback for non blocking commands
+ */
+struct mpi3mr_drv_cmd {
+ struct mutex mutex;
+ struct completion done;
+ void *reply;
+ u8 *sensebuf;
+ u8 iou_rc;
+ u16 state;
+ u16 dev_handle;
+ u16 ioc_status;
+ u32 ioc_loginfo;
+ u8 is_waiting;
+ u8 retry_count;
+ u16 host_tag;
+
+ void (*callback)(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+};
+
+
+/**
+ * struct chain_element - memory descriptor structure to store
+ * virtual and dma addresses for chain elements.
+ *
+ * @addr: virtual address
+ * @dma_addr: dma address
+ */
+struct chain_element {
+ void *addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct scmd_priv - SCSI command private data
+ *
+ * @host_tag: Host tag specific to operational queue
+ * @in_lld_scope: Command in LLD scope or not
+ * @meta_sg_valid: DIX command with meta data SGL or not
+ * @scmd: SCSI Command pointer
+ * @req_q_idx: Operational request queue index
+ * @chain_idx: Chain frame index
+ * @meta_chain_idx: Chain frame index of meta data SGL
+ * @mpi3mr_scsiio_req: MPI SCSI IO request
+ */
+struct scmd_priv {
+ u16 host_tag;
+ u8 in_lld_scope;
+ u8 meta_sg_valid;
+ struct scsi_cmnd *scmd;
+ u16 req_q_idx;
+ int chain_idx;
+ int meta_chain_idx;
+ u8 mpi3mr_scsiio_req[MPI3MR_ADMIN_REQ_FRAME_SZ];
+};
+
+/**
+ * struct mpi3mr_ioc - Adapter anchor structure stored in shost
+ * private data
+ *
+ * @list: List pointer
+ * @pdev: PCI device pointer
+ * @shost: Scsi_Host pointer
+ * @id: Controller ID
+ * @cpu_count: Number of online CPUs
+ * @irqpoll_sleep: usleep unit used in threaded isr irqpoll
+ * @name: Controller ASCII name
+ * @driver_name: Driver ASCII name
+ * @sysif_regs: System interface registers virtual address
+ * @sysif_regs_phys: System interface registers physical address
+ * @bars: PCI BARS
+ * @dma_mask: DMA mask
+ * @msix_count: Number of MSIX vectors used
+ * @intr_enabled: Is interrupts enabled
+ * @num_admin_req: Number of admin requests
+ * @admin_req_q_sz: Admin request queue size
+ * @admin_req_pi: Admin request queue producer index
+ * @admin_req_ci: Admin request queue consumer index
+ * @admin_req_base: Admin request queue base virtual address
+ * @admin_req_dma: Admin request queue base dma address
+ * @admin_req_lock: Admin queue access lock
+ * @num_admin_replies: Number of admin replies
+ * @admin_reply_q_sz: Admin reply queue size
+ * @admin_reply_ci: Admin reply queue consumer index
+ * @admin_reply_ephase:Admin reply queue expected phase
+ * @admin_reply_base: Admin reply queue base virtual address
+ * @admin_reply_dma: Admin reply queue base dma address
+ * @ready_timeout: Controller ready timeout
+ * @intr_info: Interrupt cookie pointer
+ * @intr_info_count: Number of interrupt cookies
+ * @num_queues: Number of operational queues
+ * @num_op_req_q: Number of operational request queues
+ * @req_qinfo: Operational request queue info pointer
+ * @num_op_reply_q: Number of operational reply queues
+ * @op_reply_qinfo: Operational reply queue info pointer
+ * @init_cmds: Command tracker for initialization commands
+ * @facts: Cached IOC facts data
+ * @op_reply_desc_sz: Operational reply descriptor size
+ * @num_reply_bufs: Number of reply buffers allocated
+ * @reply_buf_pool: Reply buffer pool
+ * @reply_buf: Reply buffer base virtual address
+ * @reply_buf_dma: Reply buffer DMA address
+ * @reply_buf_dma_max_address: Reply DMA address max limit
+ * @reply_free_qsz: Reply free queue size
+ * @reply_free_q_pool: Reply free queue pool
+ * @reply_free_q: Reply free queue base virtual address
+ * @reply_free_q_dma: Reply free queue base DMA address
+ * @reply_free_queue_lock: Reply free queue lock
+ * @reply_free_queue_host_index: Reply free queue host index
+ * @num_sense_bufs: Number of sense buffers
+ * @sense_buf_pool: Sense buffer pool
+ * @sense_buf: Sense buffer base virtual address
+ * @sense_buf_dma: Sense buffer base DMA address
+ * @sense_buf_q_sz: Sense buffer queue size
+ * @sense_buf_q_pool: Sense buffer queue pool
+ * @sense_buf_q: Sense buffer queue virtual address
+ * @sense_buf_q_dma: Sense buffer queue DMA address
+ * @sbq_lock: Sense buffer queue lock
+ * @sbq_host_index: Sense buffer queuehost index
+ * @event_masks: Event mask bitmap
+ * @fwevt_worker_name: Firmware event worker thread name
+ * @fwevt_worker_thread: Firmware event worker thread
+ * @fwevt_lock: Firmware event lock
+ * @fwevt_list: Firmware event list
+ * @watchdog_work_q_name: Fault watchdog worker thread name
+ * @watchdog_work_q: Fault watchdog worker thread
+ * @watchdog_work: Fault watchdog work
+ * @watchdog_lock: Fault watchdog lock
+ * @is_driver_loading: Is driver still loading
+ * @scan_started: Async scan started
+ * @scan_failed: Asycn scan failed
+ * @stop_drv_processing: Stop all command processing
+ * @max_host_ios: Maximum host I/O count
+ * @chain_buf_count: Chain buffer count
+ * @chain_buf_pool: Chain buffer pool
+ * @chain_sgl_list: Chain SGL list
+ * @chain_bitmap_sz: Chain buffer allocator bitmap size
+ * @chain_bitmap: Chain buffer allocator bitmap
+ * @chain_buf_lock: Chain buffer list lock
+ * @host_tm_cmds: Command tracker for task management commands
+ * @dev_rmhs_cmds: Command tracker for device removal commands
+ * @devrem_bitmap_sz: Device removal bitmap size
+ * @devrem_bitmap: Device removal bitmap
+ * @dev_handle_bitmap_sz: Device handle bitmap size
+ * @removepend_bitmap: Remove pending bitmap
+ * @delayed_rmhs_list: Delayed device removal list
+ * @ts_update_counter: Timestamp update counter
+ * @fault_dbg: Fault debug flag
+ * @reset_in_progress: Reset in progress flag
+ * @unrecoverable: Controller unrecoverable flag
+ * @reset_mutex: Controller reset mutex
+ * @reset_waitq: Controller reset wait queue
+ * @diagsave_timeout: Diagnostic information save timeout
+ * @logging_level: Controller debug logging level
+ * @flush_io_count: I/O count to flush after reset
+ * @current_event: Firmware event currently in process
+ * @driver_info: Driver, Kernel, OS information to firmware
+ * @change_count: Topology change count
+ * @op_reply_q_offset: Operational reply queue offset with MSIx
+ */
+struct mpi3mr_ioc {
+ struct list_head list;
+ struct pci_dev *pdev;
+ struct Scsi_Host *shost;
+ u8 id;
+ int cpu_count;
+ bool enable_segqueue;
+ u32 irqpoll_sleep;
+
+ char name[MPI3MR_NAME_LENGTH];
+ char driver_name[MPI3MR_NAME_LENGTH];
+
+ volatile struct mpi3_sysif_registers __iomem *sysif_regs;
+ resource_size_t sysif_regs_phys;
+ int bars;
+ u64 dma_mask;
+
+ u16 msix_count;
+ u8 intr_enabled;
+
+ u16 num_admin_req;
+ u32 admin_req_q_sz;
+ u16 admin_req_pi;
+ u16 admin_req_ci;
+ void *admin_req_base;
+ dma_addr_t admin_req_dma;
+ spinlock_t admin_req_lock;
+
+ u16 num_admin_replies;
+ u32 admin_reply_q_sz;
+ u16 admin_reply_ci;
+ u8 admin_reply_ephase;
+ void *admin_reply_base;
+ dma_addr_t admin_reply_dma;
+
+ u32 ready_timeout;
+
+ struct mpi3mr_intr_info *intr_info;
+ u16 intr_info_count;
+
+ u16 num_queues;
+ u16 num_op_req_q;
+ struct op_req_qinfo *req_qinfo;
+
+ u16 num_op_reply_q;
+ struct op_reply_qinfo *op_reply_qinfo;
+
+ struct mpi3mr_drv_cmd init_cmds;
+ struct mpi3mr_ioc_facts facts;
+ u16 op_reply_desc_sz;
+
+ u32 num_reply_bufs;
+ struct dma_pool *reply_buf_pool;
+ u8 *reply_buf;
+ dma_addr_t reply_buf_dma;
+ dma_addr_t reply_buf_dma_max_address;
+
+ u16 reply_free_qsz;
+ struct dma_pool *reply_free_q_pool;
+ __le64 *reply_free_q;
+ dma_addr_t reply_free_q_dma;
+ spinlock_t reply_free_queue_lock;
+ u32 reply_free_queue_host_index;
+
+ u32 num_sense_bufs;
+ struct dma_pool *sense_buf_pool;
+ u8 *sense_buf;
+ dma_addr_t sense_buf_dma;
+
+ u16 sense_buf_q_sz;
+ struct dma_pool *sense_buf_q_pool;
+ __le64 *sense_buf_q;
+ dma_addr_t sense_buf_q_dma;
+ spinlock_t sbq_lock;
+ u32 sbq_host_index;
+ u32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS];
+
+ char fwevt_worker_name[MPI3MR_NAME_LENGTH];
+ struct workqueue_struct *fwevt_worker_thread;
+ spinlock_t fwevt_lock;
+ struct list_head fwevt_list;
+
+ char watchdog_work_q_name[20];
+ struct workqueue_struct *watchdog_work_q;
+ struct delayed_work watchdog_work;
+ spinlock_t watchdog_lock;
+
+ u8 is_driver_loading;
+ u8 scan_started;
+ u16 scan_failed;
+ u8 stop_drv_processing;
+
+ u16 max_host_ios;
+ spinlock_t tgtdev_lock;
+ struct list_head tgtdev_list;
+
+ u32 chain_buf_count;
+ struct dma_pool *chain_buf_pool;
+ struct chain_element *chain_sgl_list;
+ u16 chain_bitmap_sz;
+ void *chain_bitmap;
+ spinlock_t chain_buf_lock;
+
+ struct mpi3mr_drv_cmd host_tm_cmds;
+ struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
+ u16 devrem_bitmap_sz;
+ void *devrem_bitmap;
+ u16 dev_handle_bitmap_sz;
+ void *removepend_bitmap;
+ struct list_head delayed_rmhs_list;
+
+ u32 ts_update_counter;
+ u8 fault_dbg;
+ u8 reset_in_progress;
+ u8 unrecoverable;
+ struct mutex reset_mutex;
+ wait_queue_head_t reset_waitq;
+
+ u16 diagsave_timeout;
+ int logging_level;
+ u16 flush_io_count;
+
+ struct mpi3mr_fwevt *current_event;
+ struct mpi3_driver_info_layout driver_info;
+ u16 change_count;
+ u16 op_reply_q_offset;
+};
+
+/**
+ * struct mpi3mr_fwevt - Firmware event structure.
+ *
+ * @list: list head
+ * @work: Work structure
+ * @mrioc: Adapter instance reference
+ * @event_id: MPI3 firmware event ID
+ * @send_ack: Event acknowledgment required or not
+ * @process_evt: Bottomhalf processing required or not
+ * @evt_ctx: Event context to send in Ack
+ * @ref_count: kref count
+ * @event_data: Actual MPI3 event data
+ */
+struct mpi3mr_fwevt {
+ struct list_head list;
+ struct work_struct work;
+ struct mpi3mr_ioc *mrioc;
+ u16 event_id;
+ bool send_ack;
+ bool process_evt;
+ u32 evt_ctx;
+ struct kref ref_count;
+ char event_data[0] __aligned(4);
+};
+
+
+/**
+ * struct delayed_dev_rmhs_node - Delayed device removal node
+ *
+ * @list: list head
+ * @handle: Device handle
+ * @iou_rc: IO Unit Control Reason Code
+ */
+struct delayed_dev_rmhs_node {
+ struct list_head list;
+ u16 handle;
+ u8 iou_rc;
+};
+
+int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc);
+void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc);
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init);
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init);
+int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async);
+int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
+u16 admin_req_sz, u8 ignore_reset);
+int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
+ struct op_req_qinfo *opreqq, u8 *req);
+void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
+ dma_addr_t dma_addr);
+void mpi3mr_build_zero_len_sge(void *paddr);
+void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr);
+void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr);
+void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
+ u64 sense_buf_dma);
+
+void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply);
+void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply_descriptor *reply_desc,
+ u64 *reply_dma, u16 qidx);
+void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc);
+void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc);
+
+int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason, u8 snapdump);
+int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason);
+void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc);
+void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc);
+
+enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc);
+int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ u32 event_ctx);
+
+void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout);
+void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc);
+void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc);
+void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc);
+
+#endif /*MPI3MR_H_INCLUDED*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_debug.h b/drivers/scsi/mpi3mr/mpi3mr_debug.h
new file mode 100644
index 000000000000..c085bb048d41
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_debug.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2021 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#ifndef MPI3SAS_DEBUG_H_INCLUDED
+
+#define MPI3SAS_DEBUG_H_INCLUDED
+
+/*
+ * debug levels
+ */
+#define MPI3_DEBUG 0x00000001
+#define MPI3_DEBUG_MSG_FRAME 0x00000002
+#define MPI3_DEBUG_SG 0x00000004
+#define MPI3_DEBUG_EVENTS 0x00000008
+#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000010
+#define MPI3_DEBUG_INIT 0x00000020
+#define MPI3_DEBUG_EXIT 0x00000040
+#define MPI3_DEBUG_FAIL 0x00000080
+#define MPI3_DEBUG_TM 0x00000100
+#define MPI3_DEBUG_REPLY 0x00000200
+#define MPI3_DEBUG_HANDSHAKE 0x00000400
+#define MPI3_DEBUG_CONFIG 0x00000800
+#define MPI3_DEBUG_DL 0x00001000
+#define MPI3_DEBUG_RESET 0x00002000
+#define MPI3_DEBUG_SCSI 0x00004000
+#define MPI3_DEBUG_IOCTL 0x00008000
+#define MPI3_DEBUG_CSMISAS 0x00010000
+#define MPI3_DEBUG_SAS 0x00020000
+#define MPI3_DEBUG_TRANSPORT 0x00040000
+#define MPI3_DEBUG_TASK_SET_FULL 0x00080000
+#define MPI3_DEBUG_TRIGGER_DIAG 0x00200000
+
+
+/*
+ * debug macros
+ */
+
+#define ioc_err(ioc, fmt, ...) \
+ pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_notice(ioc, fmt, ...) \
+ pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_warn(ioc, fmt, ...) \
+ pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_info(ioc, fmt, ...) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+
+
+#define dbgprint(IOC, FMT, ...) \
+ do { \
+ if (IOC->logging_level & MPI3_DEBUG) \
+ pr_info("%s: " FMT, (IOC)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#endif /* MPT3SAS_DEBUG_H_INCLUDED */
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
new file mode 100644
index 000000000000..2dba2b0af166
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -0,0 +1,3957 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2021 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#if defined(writeq) && defined(CONFIG_64BIT)
+static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
+{
+ writeq(b, addr);
+}
+#else
+static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
+{
+ __u64 data_out = b;
+
+ writel((u32)(data_out), addr);
+ writel((u32)(data_out >> 32), (addr + 4));
+}
+#endif
+
+static inline bool
+mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
+{
+ u16 pi, ci, max_entries;
+ bool is_qfull = false;
+
+ pi = op_req_q->pi;
+ ci = READ_ONCE(op_req_q->ci);
+ max_entries = op_req_q->num_requests;
+
+ if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
+ is_qfull = true;
+
+ return is_qfull;
+}
+
+static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
+{
+ u16 i, max_vectors;
+
+ max_vectors = mrioc->intr_info_count;
+
+ for (i = 0; i < max_vectors; i++)
+ synchronize_irq(pci_irq_vector(mrioc->pdev, i));
+}
+
+void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
+{
+ mrioc->intr_enabled = 0;
+ mpi3mr_sync_irqs(mrioc);
+}
+
+void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
+{
+ mrioc->intr_enabled = 1;
+}
+
+static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ if (!mrioc->intr_info)
+ return;
+
+ for (i = 0; i < mrioc->intr_info_count; i++)
+ free_irq(pci_irq_vector(mrioc->pdev, i),
+ (mrioc->intr_info + i));
+
+ kfree(mrioc->intr_info);
+ mrioc->intr_info = NULL;
+ mrioc->intr_info_count = 0;
+ pci_free_irq_vectors(mrioc->pdev);
+}
+
+void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
+ dma_addr_t dma_addr)
+{
+ struct mpi3_sge_common *sgel = paddr;
+
+ sgel->flags = flags;
+ sgel->length = cpu_to_le32(length);
+ sgel->address = cpu_to_le64(dma_addr);
+}
+
+void mpi3mr_build_zero_len_sge(void *paddr)
+{
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
+}
+
+void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+
+ if ((phys_addr < mrioc->reply_buf_dma) ||
+ (phys_addr > mrioc->reply_buf_dma_max_address))
+ return NULL;
+
+ return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
+}
+
+void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+
+ return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
+}
+
+static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
+ u64 reply_dma)
+{
+ u32 old_idx = 0;
+
+ spin_lock(&mrioc->reply_free_queue_lock);
+ old_idx = mrioc->reply_free_queue_host_index;
+ mrioc->reply_free_queue_host_index = (
+ (mrioc->reply_free_queue_host_index ==
+ (mrioc->reply_free_qsz - 1)) ? 0 :
+ (mrioc->reply_free_queue_host_index + 1));
+ mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
+ writel(mrioc->reply_free_queue_host_index,
+ &mrioc->sysif_regs->reply_free_host_index);
+ spin_unlock(&mrioc->reply_free_queue_lock);
+}
+
+void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
+ u64 sense_buf_dma)
+{
+ u32 old_idx = 0;
+
+ spin_lock(&mrioc->sbq_lock);
+ old_idx = mrioc->sbq_host_index;
+ mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
+ (mrioc->sense_buf_q_sz - 1)) ? 0 :
+ (mrioc->sbq_host_index + 1));
+ mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
+ writel(mrioc->sbq_host_index,
+ &mrioc->sysif_regs->sense_buffer_free_host_index);
+ spin_unlock(&mrioc->sbq_lock);
+}
+
+static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ char *desc = NULL;
+ u16 event;
+
+ event = event_reply->event;
+
+ switch (event) {
+ case MPI3_EVENT_LOG_DATA:
+ desc = "Log Data";
+ break;
+ case MPI3_EVENT_CHANGE:
+ desc = "Event Change";
+ break;
+ case MPI3_EVENT_GPIO_INTERRUPT:
+ desc = "GPIO Interrupt";
+ break;
+ case MPI3_EVENT_TEMP_THRESHOLD:
+ desc = "Temperature Threshold";
+ break;
+ case MPI3_EVENT_CABLE_MGMT:
+ desc = "Cable Management";
+ break;
+ case MPI3_EVENT_ENERGY_PACK_CHANGE:
+ desc = "Energy Pack Change";
+ break;
+ case MPI3_EVENT_DEVICE_ADDED:
+ {
+ struct mpi3_device_page0 *event_data =
+ (struct mpi3_device_page0 *)event_reply->event_data;
+ ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
+ event_data->dev_handle, event_data->device_form);
+ return;
+ }
+ case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ {
+ struct mpi3_device_page0 *event_data =
+ (struct mpi3_device_page0 *)event_reply->event_data;
+ ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
+ event_data->dev_handle, event_data->device_form);
+ return;
+ }
+ case MPI3_EVENT_DEVICE_STATUS_CHANGE:
+ {
+ struct mpi3_event_data_device_status_change *event_data =
+ (struct mpi3_event_data_device_status_change *)event_reply->event_data;
+ ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
+ event_data->dev_handle, event_data->reason_code);
+ return;
+ }
+ case MPI3_EVENT_SAS_DISCOVERY:
+ {
+ struct mpi3_event_data_sas_discovery *event_data =
+ (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
+ ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
+ (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop",
+ le32_to_cpu(event_data->discovery_status));
+ return;
+ }
+ case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
+ desc = "SAS Broadcast Primitive";
+ break;
+ case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
+ desc = "SAS Notify Primitive";
+ break;
+ case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
+ desc = "SAS Init Device Status Change";
+ break;
+ case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
+ desc = "SAS Init Table Overflow";
+ break;
+ case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ desc = "SAS Topology Change List";
+ break;
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ desc = "Enclosure Device Status Change";
+ break;
+ case MPI3_EVENT_HARD_RESET_RECEIVED:
+ desc = "Hard Reset Received";
+ break;
+ case MPI3_EVENT_SAS_PHY_COUNTER:
+ desc = "SAS PHY Counter";
+ break;
+ case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
+ desc = "SAS Device Discovery Error";
+ break;
+ case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ desc = "PCIE Topology Change List";
+ break;
+ case MPI3_EVENT_PCIE_ENUMERATION:
+ {
+ struct mpi3_event_data_pcie_enumeration *event_data =
+ (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
+ ioc_info(mrioc, "PCIE Enumeration: (%s)",
+ (event_data->reason_code ==
+ MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
+ if (event_data->enumeration_status)
+ ioc_info(mrioc, "enumeration_status(0x%08x)\n",
+ le32_to_cpu(event_data->enumeration_status));
+ return;
+ }
+ case MPI3_EVENT_PREPARE_FOR_RESET:
+ desc = "Prepare For Reset";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ ioc_info(mrioc, "%s\n", desc);
+}
+
+static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply *def_reply)
+{
+ struct mpi3_event_notification_reply *event_reply =
+ (struct mpi3_event_notification_reply *)def_reply;
+
+ mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
+ mpi3mr_print_event_data(mrioc, event_reply);
+ mpi3mr_os_handle_events(mrioc, event_reply);
+}
+
+static struct mpi3mr_drv_cmd *
+mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
+ struct mpi3_default_reply *def_reply)
+{
+ u16 idx;
+
+ switch (host_tag) {
+ case MPI3MR_HOSTTAG_INITCMDS:
+ return &mrioc->init_cmds;
+ case MPI3MR_HOSTTAG_BLK_TMS:
+ return &mrioc->host_tm_cmds;
+ case MPI3MR_HOSTTAG_INVALID:
+ if (def_reply && def_reply->function ==
+ MPI3_FUNCTION_EVENT_NOTIFICATION)
+ mpi3mr_handle_events(mrioc, def_reply);
+ return NULL;
+ default:
+ break;
+ }
+ if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
+ host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
+ idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ return &mrioc->dev_rmhs_cmds[idx];
+ }
+
+ return NULL;
+}
+
+static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
+{
+ u16 reply_desc_type, host_tag = 0;
+ u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
+ u32 ioc_loginfo = 0;
+ struct mpi3_status_reply_descriptor *status_desc;
+ struct mpi3_address_reply_descriptor *addr_desc;
+ struct mpi3_success_reply_descriptor *success_desc;
+ struct mpi3_default_reply *def_reply = NULL;
+ struct mpi3mr_drv_cmd *cmdptr = NULL;
+ struct mpi3_scsi_io_reply *scsi_reply;
+ u8 *sense_buf = NULL;
+
+ *reply_dma = 0;
+ reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
+ switch (reply_desc_type) {
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
+ status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(status_desc->host_tag);
+ ioc_status = le16_to_cpu(status_desc->ioc_status);
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
+ addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
+ *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
+ def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
+ if (!def_reply)
+ goto out;
+ host_tag = le16_to_cpu(def_reply->host_tag);
+ ioc_status = le16_to_cpu(def_reply->ioc_status);
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
+ scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
+ sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+ }
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
+ success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(success_desc->host_tag);
+ break;
+ default:
+ break;
+ }
+
+ cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
+ if (cmdptr) {
+ if (cmdptr->state & MPI3MR_CMD_PENDING) {
+ cmdptr->state |= MPI3MR_CMD_COMPLETE;
+ cmdptr->ioc_loginfo = ioc_loginfo;
+ cmdptr->ioc_status = ioc_status;
+ cmdptr->state &= ~MPI3MR_CMD_PENDING;
+ if (def_reply) {
+ cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
+ memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
+ mrioc->facts.reply_sz);
+ }
+ if (cmdptr->is_waiting) {
+ complete(&cmdptr->done);
+ cmdptr->is_waiting = 0;
+ } else if (cmdptr->callback)
+ cmdptr->callback(mrioc, cmdptr);
+ }
+ }
+out:
+ if (sense_buf)
+ mpi3mr_repost_sense_buf(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+}
+
+static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+{
+ u32 exp_phase = mrioc->admin_reply_ephase;
+ u32 admin_reply_ci = mrioc->admin_reply_ci;
+ u32 num_admin_replies = 0;
+ u64 reply_dma = 0;
+ struct mpi3_default_reply_descriptor *reply_desc;
+
+ reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
+ admin_reply_ci;
+
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ return 0;
+
+ do {
+ mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
+ mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
+ if (reply_dma)
+ mpi3mr_repost_reply_buf(mrioc, reply_dma);
+ num_admin_replies++;
+ if (++admin_reply_ci == mrioc->num_admin_replies) {
+ admin_reply_ci = 0;
+ exp_phase ^= 1;
+ }
+ reply_desc =
+ (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
+ admin_reply_ci;
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ break;
+ } while (1);
+
+ writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
+ mrioc->admin_reply_ci = admin_reply_ci;
+ mrioc->admin_reply_ephase = exp_phase;
+
+ return num_admin_replies;
+}
+
+/**
+ * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
+ * queue's consumer index from operational reply descriptor queue.
+ * @op_reply_q: op_reply_qinfo object
+ * @reply_ci: operational reply descriptor's queue consumer index
+ *
+ * Returns reply descriptor frame address
+ */
+static inline struct mpi3_default_reply_descriptor *
+mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
+{
+ void *segment_base_addr;
+ struct segments *segments = op_reply_q->q_segments;
+ struct mpi3_default_reply_descriptor *reply_desc = NULL;
+
+ segment_base_addr =
+ segments[reply_ci / op_reply_q->segment_qd].segment;
+ reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
+ (reply_ci % op_reply_q->segment_qd);
+ return reply_desc;
+}
+
+static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_intr_info *intr_info)
+{
+ struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q;
+ struct op_req_qinfo *op_req_q;
+ u32 exp_phase;
+ u32 reply_ci;
+ u32 num_op_reply = 0;
+ u64 reply_dma = 0;
+ struct mpi3_default_reply_descriptor *reply_desc;
+ u16 req_q_idx = 0, reply_qidx;
+
+ reply_qidx = op_reply_q->qid - 1;
+
+ if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
+ return 0;
+
+ exp_phase = op_reply_q->ephase;
+ reply_ci = op_reply_q->ci;
+
+ reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
+ atomic_dec(&op_reply_q->in_use);
+ return 0;
+ }
+
+ do {
+ req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
+ op_req_q = &mrioc->req_qinfo[req_q_idx];
+
+ WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
+ mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
+ reply_qidx);
+ atomic_dec(&op_reply_q->pend_ios);
+ if (reply_dma)
+ mpi3mr_repost_reply_buf(mrioc, reply_dma);
+ num_op_reply++;
+
+ if (++reply_ci == op_reply_q->num_replies) {
+ reply_ci = 0;
+ exp_phase ^= 1;
+ }
+
+ reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
+
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ break;
+ /*
+ * Exit completion loop to avoid CPU lockup
+ * Ensure remaining completion happens from threaded ISR.
+ */
+ if (num_op_reply > mrioc->max_host_ios) {
+ intr_info->op_reply_q->enable_irq_poll = true;
+ break;
+ }
+
+ } while (1);
+
+ writel(reply_ci,
+ &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
+ op_reply_q->ci = reply_ci;
+ op_reply_q->ephase = exp_phase;
+
+ atomic_dec(&op_reply_q->in_use);
+ return num_op_reply;
+}
+
+static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
+{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
+ u32 num_admin_replies = 0, num_op_reply = 0;
+
+ if (!intr_info)
+ return IRQ_NONE;
+
+ mrioc = intr_info->mrioc;
+
+ if (!mrioc->intr_enabled)
+ return IRQ_NONE;
+
+ midx = intr_info->msix_index;
+
+ if (!midx)
+ num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
+ if (intr_info->op_reply_q)
+ num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info);
+
+ if (num_admin_replies || num_op_reply)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static irqreturn_t mpi3mr_isr(int irq, void *privdata)
+{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
+ int ret;
+
+ if (!intr_info)
+ return IRQ_NONE;
+
+ mrioc = intr_info->mrioc;
+ midx = intr_info->msix_index;
+ /* Call primary ISR routine */
+ ret = mpi3mr_isr_primary(irq, privdata);
+
+ /*
+ * If more IOs are expected, schedule IRQ polling thread.
+ * Otherwise exit from ISR.
+ */
+ if (!intr_info->op_reply_q)
+ return ret;
+
+ if (!intr_info->op_reply_q->enable_irq_poll ||
+ !atomic_read(&intr_info->op_reply_q->pend_ios))
+ return ret;
+
+ disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * mpi3mr_isr_poll - Reply queue polling routine
+ * @irq: IRQ
+ * @privdata: Interrupt info
+ *
+ * poll for pending I/O completions in a loop until pending I/Os
+ * present or controller queue depth I/Os are processed.
+ *
+ * Return: IRQ_NONE or IRQ_HANDLED
+ */
+static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
+{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
+ u32 num_op_reply = 0;
+
+ if (!intr_info || !intr_info->op_reply_q)
+ return IRQ_NONE;
+
+ mrioc = intr_info->mrioc;
+ midx = intr_info->msix_index;
+
+ /* Poll for pending IOs completions */
+ do {
+ if (!mrioc->intr_enabled)
+ break;
+
+ if (!midx)
+ mpi3mr_process_admin_reply_q(mrioc);
+ if (intr_info->op_reply_q)
+ num_op_reply +=
+ mpi3mr_process_op_reply_q(mrioc, intr_info);
+
+ usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep);
+
+ } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
+ (num_op_reply < mrioc->max_host_ios));
+
+ intr_info->op_reply_q->enable_irq_poll = false;
+ enable_irq(pci_irq_vector(mrioc->pdev, midx));
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpi3mr_request_irq - Request IRQ and register ISR
+ * @mrioc: Adapter instance reference
+ * @index: IRQ vector index
+ *
+ * Request threaded ISR with primary ISR and secondary
+ *
+ * Return: 0 on success and non zero on failures.
+ */
+static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+ struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
+ int retval = 0;
+
+ intr_info->mrioc = mrioc;
+ intr_info->msix_index = index;
+ intr_info->op_reply_q = NULL;
+
+ snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
+ mrioc->driver_name, mrioc->id, index);
+
+ retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
+ mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
+ if (retval) {
+ ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
+ intr_info->name, pci_irq_vector(pdev, index));
+ return retval;
+ }
+
+ return retval;
+}
+
+/**
+ * mpi3mr_setup_isr - Setup ISR for the controller
+ * @mrioc: Adapter instance reference
+ * @setup_one: Request one IRQ or more
+ *
+ * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
+ *
+ * Return: 0 on success and non zero on failures.
+ */
+static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
+{
+ unsigned int irq_flags = PCI_IRQ_MSIX;
+ int max_vectors;
+ int retval;
+ int i;
+ struct irq_affinity desc = { .pre_vectors = 1};
+
+ mpi3mr_cleanup_isr(mrioc);
+
+ if (setup_one || reset_devices)
+ max_vectors = 1;
+ else {
+ max_vectors =
+ min_t(int, mrioc->cpu_count + 1, mrioc->msix_count);
+
+ ioc_info(mrioc,
+ "MSI-X vectors supported: %d, no of cores: %d,",
+ mrioc->msix_count, mrioc->cpu_count);
+ ioc_info(mrioc,
+ "MSI-x vectors requested: %d\n", max_vectors);
+ }
+
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
+
+ mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
+ retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
+ 1, max_vectors, irq_flags, &desc);
+ if (retval < 0) {
+ ioc_err(mrioc, "Cannot alloc irq vectors\n");
+ goto out_failed;
+ }
+ if (retval != max_vectors) {
+ ioc_info(mrioc,
+ "allocated vectors (%d) are less than configured (%d)\n",
+ retval, max_vectors);
+ /*
+ * If only one MSI-x is allocated, then MSI-x 0 will be shared
+ * between Admin queue and operational queue
+ */
+ if (retval == 1)
+ mrioc->op_reply_q_offset = 0;
+
+ max_vectors = retval;
+ }
+ mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
+ GFP_KERNEL);
+ if (!mrioc->intr_info) {
+ retval = -ENOMEM;
+ pci_free_irq_vectors(mrioc->pdev);
+ goto out_failed;
+ }
+ for (i = 0; i < max_vectors; i++) {
+ retval = mpi3mr_request_irq(mrioc, i);
+ if (retval) {
+ mrioc->intr_info_count = i;
+ goto out_failed;
+ }
+ }
+ mrioc->intr_info_count = max_vectors;
+ mpi3mr_ioc_enable_intr(mrioc);
+ return 0;
+
+out_failed:
+ mpi3mr_cleanup_isr(mrioc);
+
+ return retval;
+}
+
+static const struct {
+ enum mpi3mr_iocstate value;
+ char *name;
+} mrioc_states[] = {
+ { MRIOC_STATE_READY, "ready" },
+ { MRIOC_STATE_FAULT, "fault" },
+ { MRIOC_STATE_RESET, "reset" },
+ { MRIOC_STATE_BECOMING_READY, "becoming ready" },
+ { MRIOC_STATE_RESET_REQUESTED, "reset requested" },
+ { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
+};
+
+static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
+ if (mrioc_states[i].value == mrioc_state) {
+ name = mrioc_states[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+/* Reset reason to name mapper structure*/
+static const struct {
+ enum mpi3mr_reset_reason value;
+ char *name;
+} mpi3mr_reset_reason_codes[] = {
+ { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
+ { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
+ { MPI3MR_RESET_FROM_IOCTL, "application invocation" },
+ { MPI3MR_RESET_FROM_EH_HOS, "error handling" },
+ { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
+ { MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
+ { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
+ { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
+ { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
+ { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
+ { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
+ { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
+ { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
+ {
+ MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
+ "create request queue timeout"
+ },
+ {
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
+ "create reply queue timeout"
+ },
+ { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
+ { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
+ { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
+ { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
+ {
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER,
+ "component image activation timeout"
+ },
+ {
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
+ "get package version timeout"
+ },
+ { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
+ { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
+};
+
+/**
+ * mpi3mr_reset_rc_name - get reset reason code name
+ * @reason_code: reset reason code value
+ *
+ * Map reset reason to an NULL terminated ASCII string
+ *
+ * Return: name corresponding to reset reason value or NULL.
+ */
+static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
+ if (mpi3mr_reset_reason_codes[i].value == reason_code) {
+ name = mpi3mr_reset_reason_codes[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+/* Reset type to name mapper structure*/
+static const struct {
+ u16 reset_type;
+ char *name;
+} mpi3mr_reset_types[] = {
+ { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
+ { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
+};
+
+/**
+ * mpi3mr_reset_type_name - get reset type name
+ * @reset_type: reset type value
+ *
+ * Map reset type to an NULL terminated ASCII string
+ *
+ * Return: name corresponding to reset type value or NULL.
+ */
+static const char *mpi3mr_reset_type_name(u16 reset_type)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
+ if (mpi3mr_reset_types[i].reset_type == reset_type) {
+ name = mpi3mr_reset_types[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+/**
+ * mpi3mr_print_fault_info - Display fault information
+ * @mrioc: Adapter instance reference
+ *
+ * Display the controller fault information if there is a
+ * controller fault.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_status, code, code1, code2, code3;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
+ code = readl(&mrioc->sysif_regs->fault);
+ code1 = readl(&mrioc->sysif_regs->fault_info[0]);
+ code2 = readl(&mrioc->sysif_regs->fault_info[1]);
+ code3 = readl(&mrioc->sysif_regs->fault_info[2]);
+
+ ioc_info(mrioc,
+ "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
+ code, code1, code2, code3);
+ }
+}
+
+/**
+ * mpi3mr_get_iocstate - Get IOC State
+ * @mrioc: Adapter instance reference
+ *
+ * Return a proper IOC state enum based on the IOC status and
+ * IOC configuration and unrcoverable state of the controller.
+ *
+ * Return: Current IOC state.
+ */
+enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_status, ioc_config;
+ u8 ready, enabled;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ if (mrioc->unrecoverable)
+ return MRIOC_STATE_UNRECOVERABLE;
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
+ return MRIOC_STATE_FAULT;
+
+ ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
+ enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
+
+ if (ready && enabled)
+ return MRIOC_STATE_READY;
+ if ((!ready) && (!enabled))
+ return MRIOC_STATE_RESET;
+ if ((!ready) && (enabled))
+ return MRIOC_STATE_BECOMING_READY;
+
+ return MRIOC_STATE_RESET_REQUESTED;
+}
+
+/**
+ * mpi3mr_clear_reset_history - clear reset history
+ * @mrioc: Adapter instance reference
+ *
+ * Write the reset history bit in IOC status to clear the bit,
+ * if it is already set.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_status;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
+ writel(ioc_status, &mrioc->sysif_regs->ioc_status);
+}
+
+/**
+ * mpi3mr_issue_and_process_mur - Message unit Reset handler
+ * @mrioc: Adapter instance reference
+ * @reset_reason: Reset reason code
+ *
+ * Issue Message unit Reset to the controller and wait for it to
+ * be complete.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason)
+{
+ u32 ioc_config, timeout, ioc_status;
+ int retval = -1;
+
+ ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
+ if (mrioc->unrecoverable) {
+ ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
+ return retval;
+ }
+ mpi3mr_clear_reset_history(mrioc);
+ writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
+ mpi3mr_clear_reset_history(mrioc);
+ ioc_config =
+ readl(&mrioc->sysif_regs->ioc_configuration);
+ if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
+ (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
+ retval = 0;
+ break;
+ }
+ }
+ msleep(100);
+ } while (--timeout);
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
+ (!retval) ? "successful" : "failed", ioc_status, ioc_config);
+ return retval;
+}
+
+/**
+ * mpi3mr_bring_ioc_ready - Bring controller to ready state
+ * @mrioc: Adapter instance reference
+ *
+ * Set Enable IOC bit in IOC configuration register and wait for
+ * the controller to become ready.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_config, timeout;
+ enum mpi3mr_iocstate current_state;
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ current_state = mpi3mr_get_iocstate(mrioc);
+ if (current_state == MRIOC_STATE_READY)
+ return 0;
+ msleep(100);
+ } while (--timeout);
+
+ return -1;
+}
+
+/**
+ * mpi3mr_soft_reset_success - Check softreset is success or not
+ * @ioc_status: IOC status register value
+ * @ioc_config: IOC config register value
+ *
+ * Check whether the soft reset is successful or not based on
+ * IOC status and IOC config register values.
+ *
+ * Return: True when the soft reset is success, false otherwise.
+ */
+static inline bool
+mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
+{
+ if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
+ (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
+ return true;
+ return false;
+}
+
+/**
+ * mpi3mr_diagfault_success - Check diag fault is success or not
+ * @mrioc: Adapter reference
+ * @ioc_status: IOC status register value
+ *
+ * Check whether the controller hit diag reset fault code.
+ *
+ * Return: True when there is diag fault, false otherwise.
+ */
+static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
+ u32 ioc_status)
+{
+ u32 fault;
+
+ if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
+ return false;
+ fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
+ if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
+ return true;
+ return false;
+}
+
+/**
+ * mpi3mr_set_diagsave - Set diag save bit for snapdump
+ * @mrioc: Adapter reference
+ *
+ * Set diag save bit in IOC configuration register to enable
+ * snapdump.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_config;
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+}
+
+/**
+ * mpi3mr_issue_reset - Issue reset to the controller
+ * @mrioc: Adapter reference
+ * @reset_type: Reset type
+ * @reset_reason: Reset reason code
+ *
+ * Unlock the host diagnostic registers and write the specific
+ * reset type to that, wait for reset acknowledgment from the
+ * controller, if the reset is not successful retry for the
+ * predefined number of times.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
+ u32 reset_reason)
+{
+ int retval = -1;
+ u8 unlock_retry_count, reset_retry_count = 0;
+ u32 host_diagnostic, timeout, ioc_status, ioc_config;
+
+ pci_cfg_access_lock(mrioc->pdev);
+ if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
+ (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
+ goto out;
+ if (mrioc->unrecoverable)
+ goto out;
+retry_reset:
+ unlock_retry_count = 0;
+ mpi3mr_clear_reset_history(mrioc);
+ do {
+ ioc_info(mrioc,
+ "Write magic sequence to unlock host diag register (retry=%d)\n",
+ ++unlock_retry_count);
+ if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
+ writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ mrioc->unrecoverable = 1;
+ goto out;
+ }
+
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
+ &mrioc->sysif_regs->write_sequence);
+ usleep_range(1000, 1100);
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ ioc_info(mrioc,
+ "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
+ unlock_retry_count, host_diagnostic);
+ } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
+
+ writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
+ mpi3mr_reset_type_name(reset_type),
+ mpi3mr_reset_rc_name(reset_reason), reset_reason);
+ writel(host_diagnostic | reset_type,
+ &mrioc->sysif_regs->host_diagnostic);
+ timeout = mrioc->ready_timeout * 10;
+ if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (ioc_status &
+ MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
+ mpi3mr_clear_reset_history(mrioc);
+ ioc_config =
+ readl(&mrioc->sysif_regs->ioc_configuration);
+ if (mpi3mr_soft_reset_success(ioc_status,
+ ioc_config)) {
+ retval = 0;
+ break;
+ }
+ }
+ msleep(100);
+ } while (--timeout);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+ } else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
+ retval = 0;
+ break;
+ }
+ msleep(100);
+ } while (--timeout);
+ mpi3mr_clear_reset_history(mrioc);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+ }
+ if (retval && ((++reset_retry_count) < MPI3MR_MAX_RESET_RETRY_COUNT)) {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_info(mrioc,
+ "Base IOC Sts/Config after reset try %d is (0x%x)/(0x%x)\n",
+ reset_retry_count, ioc_status, ioc_config);
+ goto retry_reset;
+ }
+
+out:
+ pci_cfg_access_unlock(mrioc->pdev);
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ ioc_info(mrioc,
+ "Base IOC Sts/Config after %s reset is (0x%x)/(0x%x)\n",
+ (!retval) ? "successful" : "failed", ioc_status,
+ ioc_config);
+ return retval;
+}
+
+/**
+ * mpi3mr_admin_request_post - Post request to admin queue
+ * @mrioc: Adapter reference
+ * @admin_req: MPI3 request
+ * @admin_req_sz: Request size
+ * @ignore_reset: Ignore reset in process
+ *
+ * Post the MPI3 request into admin request queue and
+ * inform the controller, if the queue is full return
+ * appropriate error.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
+ u16 admin_req_sz, u8 ignore_reset)
+{
+ u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
+ int retval = 0;
+ unsigned long flags;
+ u8 *areq_entry;
+
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&mrioc->admin_req_lock, flags);
+ areq_pi = mrioc->admin_req_pi;
+ areq_ci = mrioc->admin_req_ci;
+ max_entries = mrioc->num_admin_req;
+ if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
+ (areq_pi == (max_entries - 1)))) {
+ ioc_err(mrioc, "AdminReqQ full condition detected\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ if (!ignore_reset && mrioc->reset_in_progress) {
+ ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ areq_entry = (u8 *)mrioc->admin_req_base +
+ (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
+ memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
+ memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
+
+ if (++areq_pi == max_entries)
+ areq_pi = 0;
+ mrioc->admin_req_pi = areq_pi;
+
+ writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
+
+out:
+ spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_free_op_req_q_segments - free request memory segments
+ * @mrioc: Adapter instance reference
+ * @q_idx: operational request queue index
+ *
+ * Free memory segments allocated for operational request queue
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
+{
+ u16 j;
+ int size;
+ struct segments *segments;
+
+ segments = mrioc->req_qinfo[q_idx].q_segments;
+ if (!segments)
+ return;
+
+ if (mrioc->enable_segqueue) {
+ size = MPI3MR_OP_REQ_Q_SEG_SIZE;
+ if (mrioc->req_qinfo[q_idx].q_segment_list) {
+ dma_free_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE,
+ mrioc->req_qinfo[q_idx].q_segment_list,
+ mrioc->req_qinfo[q_idx].q_segment_list_dma);
+ mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
+ }
+ } else
+ size = mrioc->req_qinfo[q_idx].num_requests *
+ mrioc->facts.op_req_sz;
+
+ for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
+ if (!segments[j].segment)
+ continue;
+ dma_free_coherent(&mrioc->pdev->dev,
+ size, segments[j].segment, segments[j].segment_dma);
+ segments[j].segment = NULL;
+ }
+ kfree(mrioc->req_qinfo[q_idx].q_segments);
+ mrioc->req_qinfo[q_idx].q_segments = NULL;
+ mrioc->req_qinfo[q_idx].qid = 0;
+}
+
+/**
+ * mpi3mr_free_op_reply_q_segments - free reply memory segments
+ * @mrioc: Adapter instance reference
+ * @q_idx: operational reply queue index
+ *
+ * Free memory segments allocated for operational reply queue
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
+{
+ u16 j;
+ int size;
+ struct segments *segments;
+
+ segments = mrioc->op_reply_qinfo[q_idx].q_segments;
+ if (!segments)
+ return;
+
+ if (mrioc->enable_segqueue) {
+ size = MPI3MR_OP_REP_Q_SEG_SIZE;
+ if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
+ dma_free_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE,
+ mrioc->op_reply_qinfo[q_idx].q_segment_list,
+ mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
+ mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
+ }
+ } else
+ size = mrioc->op_reply_qinfo[q_idx].segment_qd *
+ mrioc->op_reply_desc_sz;
+
+ for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
+ if (!segments[j].segment)
+ continue;
+ dma_free_coherent(&mrioc->pdev->dev,
+ size, segments[j].segment, segments[j].segment_dma);
+ segments[j].segment = NULL;
+ }
+
+ kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
+ mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
+ mrioc->op_reply_qinfo[q_idx].qid = 0;
+}
+
+/**
+ * mpi3mr_delete_op_reply_q - delete operational reply queue
+ * @mrioc: Adapter instance reference
+ * @qidx: operational reply queue index
+ *
+ * Delete operatinal reply queue by issuing MPI request
+ * through admin queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct mpi3_delete_reply_queue_request delq_req;
+ int retval = 0;
+ u16 reply_qid = 0, midx;
+
+ reply_qid = mrioc->op_reply_qinfo[qidx].qid;
+
+ midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
+
+ if (!reply_qid) {
+ retval = -1;
+ ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
+ goto out;
+ }
+
+ memset(&delq_req, 0, sizeof(delq_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
+ delq_req.queue_id = cpu_to_le16(reply_qid);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
+ 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue DelRepQ: command timed out\n");
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
+ mrioc->unrecoverable = 1;
+
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ mrioc->intr_info[midx].op_reply_q = NULL;
+
+ mpi3mr_free_op_reply_q_segments(mrioc, qidx);
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+
+ return retval;
+}
+
+/**
+ * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
+ * @mrioc: Adapter instance reference
+ * @qidx: request queue index
+ *
+ * Allocate segmented memory pools for operational reply
+ * queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ int i, size;
+ u64 *q_segment_list_entry = NULL;
+ struct segments *segments;
+
+ if (mrioc->enable_segqueue) {
+ op_reply_q->segment_qd =
+ MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
+
+ size = MPI3MR_OP_REP_Q_SEG_SIZE;
+
+ op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
+ GFP_KERNEL);
+ if (!op_reply_q->q_segment_list)
+ return -ENOMEM;
+ q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
+ } else {
+ op_reply_q->segment_qd = op_reply_q->num_replies;
+ size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
+ }
+
+ op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
+ op_reply_q->segment_qd);
+
+ op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
+ sizeof(struct segments), GFP_KERNEL);
+ if (!op_reply_q->q_segments)
+ return -ENOMEM;
+
+ segments = op_reply_q->q_segments;
+ for (i = 0; i < op_reply_q->num_segments; i++) {
+ segments[i].segment =
+ dma_alloc_coherent(&mrioc->pdev->dev,
+ size, &segments[i].segment_dma, GFP_KERNEL);
+ if (!segments[i].segment)
+ return -ENOMEM;
+ if (mrioc->enable_segqueue)
+ q_segment_list_entry[i] =
+ (unsigned long)segments[i].segment_dma;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
+ * @mrioc: Adapter instance reference
+ * @qidx: request queue index
+ *
+ * Allocate segmented memory pools for operational request
+ * queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
+ int i, size;
+ u64 *q_segment_list_entry = NULL;
+ struct segments *segments;
+
+ if (mrioc->enable_segqueue) {
+ op_req_q->segment_qd =
+ MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
+
+ size = MPI3MR_OP_REQ_Q_SEG_SIZE;
+
+ op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
+ GFP_KERNEL);
+ if (!op_req_q->q_segment_list)
+ return -ENOMEM;
+ q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
+
+ } else {
+ op_req_q->segment_qd = op_req_q->num_requests;
+ size = op_req_q->num_requests * mrioc->facts.op_req_sz;
+ }
+
+ op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
+ op_req_q->segment_qd);
+
+ op_req_q->q_segments = kcalloc(op_req_q->num_segments,
+ sizeof(struct segments), GFP_KERNEL);
+ if (!op_req_q->q_segments)
+ return -ENOMEM;
+
+ segments = op_req_q->q_segments;
+ for (i = 0; i < op_req_q->num_segments; i++) {
+ segments[i].segment =
+ dma_alloc_coherent(&mrioc->pdev->dev,
+ size, &segments[i].segment_dma, GFP_KERNEL);
+ if (!segments[i].segment)
+ return -ENOMEM;
+ if (mrioc->enable_segqueue)
+ q_segment_list_entry[i] =
+ (unsigned long)segments[i].segment_dma;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_create_op_reply_q - create operational reply queue
+ * @mrioc: Adapter instance reference
+ * @qidx: operational reply queue index
+ *
+ * Create operatinal reply queue by issuing MPI request
+ * through admin queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct mpi3_create_reply_queue_request create_req;
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ int retval = 0;
+ u16 reply_qid = 0, midx;
+
+ reply_qid = op_reply_q->qid;
+
+ midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
+
+ if (reply_qid) {
+ retval = -1;
+ ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
+ reply_qid);
+
+ return retval;
+ }
+
+ reply_qid = qidx + 1;
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+ op_reply_q->ci = 0;
+ op_reply_q->ephase = 1;
+ atomic_set(&op_reply_q->pend_ios, 0);
+ atomic_set(&op_reply_q->in_use, 0);
+ op_reply_q->enable_irq_poll = false;
+
+ if (!op_reply_q->q_segments) {
+ retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
+ if (retval) {
+ mpi3mr_free_op_reply_q_segments(mrioc, qidx);
+ goto out;
+ }
+ }
+
+ memset(&create_req, 0, sizeof(create_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
+ goto out_unlock;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
+ create_req.queue_id = cpu_to_le16(reply_qid);
+ create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
+ create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index);
+ if (mrioc->enable_segqueue) {
+ create_req.flags |=
+ MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
+ create_req.base_address = cpu_to_le64(
+ op_reply_q->q_segment_list_dma);
+ } else
+ create_req.base_address = cpu_to_le64(
+ op_reply_q->q_segments[0].segment_dma);
+
+ create_req.size = cpu_to_le16(op_reply_q->num_replies);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &create_req,
+ sizeof(create_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "CreateRepQ: command timed out\n");
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
+ mrioc->unrecoverable = 1;
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ op_reply_q->qid = reply_qid;
+ mrioc->intr_info[midx].op_reply_q = op_reply_q;
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+
+ return retval;
+}
+
+/**
+ * mpi3mr_create_op_req_q - create operational request queue
+ * @mrioc: Adapter instance reference
+ * @idx: operational request queue index
+ * @reply_qid: Reply queue ID
+ *
+ * Create operatinal request queue by issuing MPI request
+ * through admin queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
+ u16 reply_qid)
+{
+ struct mpi3_create_request_queue_request create_req;
+ struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
+ int retval = 0;
+ u16 req_qid = 0;
+
+ req_qid = op_req_q->qid;
+
+ if (req_qid) {
+ retval = -1;
+ ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
+ req_qid);
+
+ return retval;
+ }
+ req_qid = idx + 1;
+
+ op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
+ op_req_q->ci = 0;
+ op_req_q->pi = 0;
+ op_req_q->reply_qid = reply_qid;
+ spin_lock_init(&op_req_q->q_lock);
+
+ if (!op_req_q->q_segments) {
+ retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
+ if (retval) {
+ mpi3mr_free_op_req_q_segments(mrioc, idx);
+ goto out;
+ }
+ }
+
+ memset(&create_req, 0, sizeof(create_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
+ goto out_unlock;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
+ create_req.queue_id = cpu_to_le16(req_qid);
+ if (mrioc->enable_segqueue) {
+ create_req.flags =
+ MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
+ create_req.base_address = cpu_to_le64(
+ op_req_q->q_segment_list_dma);
+ } else
+ create_req.base_address = cpu_to_le64(
+ op_req_q->q_segments[0].segment_dma);
+ create_req.reply_queue_id = cpu_to_le16(reply_qid);
+ create_req.size = cpu_to_le16(op_req_q->num_requests);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &create_req,
+ sizeof(create_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "CreateReqQ: command timed out\n");
+ mpi3mr_set_diagsave(mrioc);
+ if (mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT))
+ mrioc->unrecoverable = 1;
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ op_req_q->qid = req_qid;
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+
+ return retval;
+}
+
+/**
+ * mpi3mr_create_op_queues - create operational queue pairs
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate memory for operational queue meta data and call
+ * create request and reply queue functions.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u16 num_queues = 0, i = 0, msix_count_op_q = 1;
+
+ num_queues = min_t(int, mrioc->facts.max_op_reply_q,
+ mrioc->facts.max_op_req_q);
+
+ msix_count_op_q =
+ mrioc->intr_info_count - mrioc->op_reply_q_offset;
+ if (!mrioc->num_queues)
+ mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
+ num_queues = mrioc->num_queues;
+ ioc_info(mrioc, "Trying to create %d Operational Q pairs\n",
+ num_queues);
+
+ if (!mrioc->req_qinfo) {
+ mrioc->req_qinfo = kcalloc(num_queues,
+ sizeof(struct op_req_qinfo), GFP_KERNEL);
+ if (!mrioc->req_qinfo) {
+ retval = -1;
+ goto out_failed;
+ }
+
+ mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
+ num_queues, GFP_KERNEL);
+ if (!mrioc->op_reply_qinfo) {
+ retval = -1;
+ goto out_failed;
+ }
+ }
+
+ if (mrioc->enable_segqueue)
+ ioc_info(mrioc,
+ "allocating operational queues through segmented queues\n");
+
+ for (i = 0; i < num_queues; i++) {
+ if (mpi3mr_create_op_reply_q(mrioc, i)) {
+ ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
+ break;
+ }
+ if (mpi3mr_create_op_req_q(mrioc, i,
+ mrioc->op_reply_qinfo[i].qid)) {
+ ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
+ mpi3mr_delete_op_reply_q(mrioc, i);
+ break;
+ }
+ }
+
+ if (i == 0) {
+ /* Not even one queue is created successfully*/
+ retval = -1;
+ goto out_failed;
+ }
+ mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
+ ioc_info(mrioc, "Successfully created %d Operational Q pairs\n",
+ mrioc->num_op_reply_q);
+
+ return retval;
+out_failed:
+ kfree(mrioc->req_qinfo);
+ mrioc->req_qinfo = NULL;
+
+ kfree(mrioc->op_reply_qinfo);
+ mrioc->op_reply_qinfo = NULL;
+
+ return retval;
+}
+
+/**
+ * mpi3mr_op_request_post - Post request to operational queue
+ * @mrioc: Adapter reference
+ * @op_req_q: Operational request queue info
+ * @req: MPI3 request
+ *
+ * Post the MPI3 request into operational request queue and
+ * inform the controller, if the queue is full return
+ * appropriate error.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
+ struct op_req_qinfo *op_req_q, u8 *req)
+{
+ u16 pi = 0, max_entries, reply_qidx = 0, midx;
+ int retval = 0;
+ unsigned long flags;
+ u8 *req_entry;
+ void *segment_base_addr;
+ u16 req_sz = mrioc->facts.op_req_sz;
+ struct segments *segments = op_req_q->q_segments;
+
+ reply_qidx = op_req_q->reply_qid - 1;
+
+ if (mrioc->unrecoverable)
+ return -EFAULT;
+
+ spin_lock_irqsave(&op_req_q->q_lock, flags);
+ pi = op_req_q->pi;
+ max_entries = op_req_q->num_requests;
+
+ if (mpi3mr_check_req_qfull(op_req_q)) {
+ midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
+ reply_qidx, mrioc->op_reply_q_offset);
+ mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]);
+
+ if (mpi3mr_check_req_qfull(op_req_q)) {
+ retval = -EAGAIN;
+ goto out;
+ }
+ }
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "OpReqQ submit reset in progress\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
+ req_entry = (u8 *)segment_base_addr +
+ ((pi % op_req_q->segment_qd) * req_sz);
+
+ memset(req_entry, 0, req_sz);
+ memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
+
+ if (++pi == max_entries)
+ pi = 0;
+ op_req_q->pi = pi;
+
+ if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
+ > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
+ mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
+
+ writel(op_req_q->pi,
+ &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
+
+out:
+ spin_unlock_irqrestore(&op_req_q->q_lock, flags);
+ return retval;
+}
+
+/**
+ * mpi3mr_sync_timestamp - Issue time stamp sync request
+ * @mrioc: Adapter reference
+ *
+ * Issue IO unit control MPI request to synchornize firmware
+ * timestamp with host time.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
+{
+ ktime_t current_time;
+ struct mpi3_iounit_control_request iou_ctrl;
+ int retval = 0;
+
+ memset(&iou_ctrl, 0, sizeof(iou_ctrl));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+ iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
+ current_time = ktime_get_real();
+ iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
+ sizeof(iou_ctrl), 0);
+ if (retval) {
+ ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
+ goto out_unlock;
+ }
+
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
+ mrioc->init_cmds.is_waiting = 0;
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_watchdog_work - watchdog thread to monitor faults
+ * @work: work struct
+ *
+ * Watch dog work periodically executed (1 second interval) to
+ * monitor firmware fault and to issue periodic timer sync to
+ * the firmware.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_watchdog_work(struct work_struct *work)
+{
+ struct mpi3mr_ioc *mrioc =
+ container_of(work, struct mpi3mr_ioc, watchdog_work.work);
+ unsigned long flags;
+ enum mpi3mr_iocstate ioc_state;
+ u32 fault, host_diagnostic;
+
+ if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
+ mrioc->ts_update_counter = 0;
+ mpi3mr_sync_timestamp(mrioc);
+ }
+
+ /*Check for fault state every one second and issue Soft reset*/
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_FAULT) {
+ fault = readl(&mrioc->sysif_regs->fault) &
+ MPI3_SYSIF_FAULT_CODE_MASK;
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
+ if (!mrioc->diagsave_timeout) {
+ mpi3mr_print_fault_info(mrioc);
+ ioc_warn(mrioc, "Diag save in progress\n");
+ }
+ if ((mrioc->diagsave_timeout++) <=
+ MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
+ goto schedule_work;
+ } else
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->diagsave_timeout = 0;
+
+ if (fault == MPI3_SYSIF_FAULT_CODE_FACTORY_RESET) {
+ ioc_info(mrioc,
+ "Factory Reset fault occurred marking controller as unrecoverable"
+ );
+ mrioc->unrecoverable = 1;
+ goto out;
+ }
+
+ if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) ||
+ (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) ||
+ (mrioc->reset_in_progress))
+ goto out;
+ if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
+ else
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_FAULT_WATCH, 0);
+ }
+
+schedule_work:
+ spin_lock_irqsave(&mrioc->watchdog_lock, flags);
+ if (mrioc->watchdog_work_q)
+ queue_delayed_work(mrioc->watchdog_work_q,
+ &mrioc->watchdog_work,
+ msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
+ spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+out:
+ return;
+}
+
+/**
+ * mpi3mr_start_watchdog - Start watchdog
+ * @mrioc: Adapter instance reference
+ *
+ * Create and start the watchdog thread to monitor controller
+ * faults.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
+{
+ if (mrioc->watchdog_work_q)
+ return;
+
+ INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
+ snprintf(mrioc->watchdog_work_q_name,
+ sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
+ mrioc->id);
+ mrioc->watchdog_work_q =
+ create_singlethread_workqueue(mrioc->watchdog_work_q_name);
+ if (!mrioc->watchdog_work_q) {
+ ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
+ return;
+ }
+
+ if (mrioc->watchdog_work_q)
+ queue_delayed_work(mrioc->watchdog_work_q,
+ &mrioc->watchdog_work,
+ msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
+}
+
+/**
+ * mpi3mr_stop_watchdog - Stop watchdog
+ * @mrioc: Adapter instance reference
+ *
+ * Stop the watchdog thread created to monitor controller
+ * faults.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
+{
+ unsigned long flags;
+ struct workqueue_struct *wq;
+
+ spin_lock_irqsave(&mrioc->watchdog_lock, flags);
+ wq = mrioc->watchdog_work_q;
+ mrioc->watchdog_work_q = NULL;
+ spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+/**
+ * mpi3mr_kill_ioc - Kill the controller
+ * @mrioc: Adapter instance reference
+ * @reason: reason for the failure.
+ *
+ * If fault debug is enabled, display the fault info else issue
+ * diag fault and freeze the system for controller debug
+ * purpose.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_kill_ioc(struct mpi3mr_ioc *mrioc, u32 reason)
+{
+ enum mpi3mr_iocstate ioc_state;
+
+ if (!mrioc->fault_dbg)
+ return;
+
+ dump_stack();
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_FAULT)
+ mpi3mr_print_fault_info(mrioc);
+ else {
+ ioc_err(mrioc, "Firmware is halted due to the reason %d\n",
+ reason);
+ mpi3mr_diagfault_reset_handler(mrioc, reason);
+ }
+ if (mrioc->fault_dbg == 2)
+ for (;;)
+ ;
+ else
+ panic("panic in %s\n", __func__);
+}
+
+/**
+ * mpi3mr_setup_admin_qpair - Setup admin queue pair
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate memory for admin queue pair if required and register
+ * the admin queue with the controller.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 num_admin_entries = 0;
+
+ mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
+ mrioc->num_admin_req = mrioc->admin_req_q_sz /
+ MPI3MR_ADMIN_REQ_FRAME_SZ;
+ mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
+ mrioc->admin_req_base = NULL;
+
+ mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
+ mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
+ MPI3MR_ADMIN_REPLY_FRAME_SZ;
+ mrioc->admin_reply_ci = 0;
+ mrioc->admin_reply_ephase = 1;
+ mrioc->admin_reply_base = NULL;
+
+ if (!mrioc->admin_req_base) {
+ mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
+
+ if (!mrioc->admin_req_base) {
+ retval = -1;
+ goto out_failed;
+ }
+
+ mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
+ GFP_KERNEL);
+
+ if (!mrioc->admin_reply_base) {
+ retval = -1;
+ goto out_failed;
+ }
+ }
+
+ num_admin_entries = (mrioc->num_admin_replies << 16) |
+ (mrioc->num_admin_req);
+ writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
+ mpi3mr_writeq(mrioc->admin_req_dma,
+ &mrioc->sysif_regs->admin_request_queue_address);
+ mpi3mr_writeq(mrioc->admin_reply_dma,
+ &mrioc->sysif_regs->admin_reply_queue_address);
+ writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
+ writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
+ return retval;
+
+out_failed:
+
+ if (mrioc->admin_reply_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
+ mrioc->admin_reply_base, mrioc->admin_reply_dma);
+ mrioc->admin_reply_base = NULL;
+ }
+ if (mrioc->admin_req_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
+ mrioc->admin_req_base, mrioc->admin_req_dma);
+ mrioc->admin_req_base = NULL;
+ }
+ return retval;
+}
+
+/**
+ * mpi3mr_issue_iocfacts - Send IOC Facts
+ * @mrioc: Adapter instance reference
+ * @facts_data: Cached IOC facts data
+ *
+ * Issue IOC Facts MPI request through admin queue and wait for
+ * the completion of it or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data)
+{
+ struct mpi3_ioc_facts_request iocfacts_req;
+ void *data = NULL;
+ dma_addr_t data_dma;
+ u32 data_len = sizeof(*facts_data);
+ int retval = 0;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+
+ if (!data) {
+ retval = -1;
+ goto out;
+ }
+
+ memset(&iocfacts_req, 0, sizeof(iocfacts_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
+
+ mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
+ data_dma);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
+ sizeof(iocfacts_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue IOCFacts: command timed out\n");
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
+ mrioc->unrecoverable = 1;
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ memcpy(facts_data, (u8 *)data, data_len);
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (data)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_check_reset_dma_mask - Process IOC facts data
+ * @mrioc: Adapter instance reference
+ *
+ * Check whether the new DMA mask requested through IOCFacts by
+ * firmware needs to be set, if so set it .
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+ int r;
+ u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
+
+ if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
+ return 0;
+
+ ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
+ mrioc->dma_mask, facts_dma_mask);
+
+ r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
+ if (r) {
+ ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
+ facts_dma_mask, r);
+ return r;
+ }
+ mrioc->dma_mask = facts_dma_mask;
+ return r;
+}
+
+/**
+ * mpi3mr_process_factsdata - Process IOC facts data
+ * @mrioc: Adapter instance reference
+ * @facts_data: Cached IOC facts data
+ *
+ * Convert IOC facts data into cpu endianness and cache it in
+ * the driver .
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data)
+{
+ u32 ioc_config, req_sz, facts_flags;
+
+ if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
+ (sizeof(*facts_data) / 4)) {
+ ioc_warn(mrioc,
+ "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
+ sizeof(*facts_data),
+ le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
+ }
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
+ MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
+ if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
+ ioc_err(mrioc,
+ "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
+ req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
+ }
+
+ memset(&mrioc->facts, 0, sizeof(mrioc->facts));
+
+ facts_flags = le32_to_cpu(facts_data->flags);
+ mrioc->facts.op_req_sz = req_sz;
+ mrioc->op_reply_desc_sz = 1 << ((ioc_config &
+ MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
+ MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
+
+ mrioc->facts.ioc_num = facts_data->ioc_number;
+ mrioc->facts.who_init = facts_data->who_init;
+ mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
+ mrioc->facts.personality = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
+ mrioc->facts.dma_mask = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
+ mrioc->facts.protocol_flags = facts_data->protocol_flags;
+ mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
+ mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request);
+ mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
+ mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
+ mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
+ mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
+ mrioc->facts.max_pds = le16_to_cpu(facts_data->max_pds);
+ mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
+ mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
+ mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_advanced_host_pds);
+ mrioc->facts.max_raidpds = le16_to_cpu(facts_data->max_raid_pds);
+ mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
+ mrioc->facts.max_pcie_switches =
+ le16_to_cpu(facts_data->max_pc_ie_switches);
+ mrioc->facts.max_sasexpanders =
+ le16_to_cpu(facts_data->max_sas_expanders);
+ mrioc->facts.max_sasinitiators =
+ le16_to_cpu(facts_data->max_sas_initiators);
+ mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
+ mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
+ mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
+ mrioc->facts.max_op_req_q =
+ le16_to_cpu(facts_data->max_operational_request_queues);
+ mrioc->facts.max_op_reply_q =
+ le16_to_cpu(facts_data->max_operational_reply_queues);
+ mrioc->facts.ioc_capabilities =
+ le32_to_cpu(facts_data->ioc_capabilities);
+ mrioc->facts.fw_ver.build_num =
+ le16_to_cpu(facts_data->fw_version.build_num);
+ mrioc->facts.fw_ver.cust_id =
+ le16_to_cpu(facts_data->fw_version.customer_id);
+ mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
+ mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
+ mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
+ mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
+ mrioc->msix_count = min_t(int, mrioc->msix_count,
+ mrioc->facts.max_msix_vectors);
+ mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
+ mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
+ mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
+ mrioc->facts.shutdown_timeout =
+ le16_to_cpu(facts_data->shutdown_timeout);
+
+ ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
+ mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
+ mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
+ ioc_info(mrioc,
+ "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
+ mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
+ mrioc->facts.max_pds, mrioc->facts.max_msix_vectors,
+ mrioc->facts.max_perids);
+ ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
+ mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
+ mrioc->facts.sge_mod_shift);
+ ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
+ mrioc->facts.dma_mask, (facts_flags &
+ MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
+
+ mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
+
+ if (reset_devices)
+ mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
+ MPI3MR_HOST_IOS_KDUMP);
+}
+
+/**
+ * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate and initialize the reply free buffers, sense
+ * buffers, reply free queue and sense buffer queue.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 sz, i;
+ dma_addr_t phy_addr;
+
+ if (mrioc->init_cmds.reply)
+ goto post_reply_sbuf;
+
+ mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
+ if (!mrioc->init_cmds.reply)
+ goto out_failed;
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
+ mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->facts.reply_sz,
+ GFP_KERNEL);
+ if (!mrioc->dev_rmhs_cmds[i].reply)
+ goto out_failed;
+ }
+
+ mrioc->host_tm_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
+ if (!mrioc->host_tm_cmds.reply)
+ goto out_failed;
+
+ mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
+ if (mrioc->facts.max_devhandle % 8)
+ mrioc->dev_handle_bitmap_sz++;
+ mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz,
+ GFP_KERNEL);
+ if (!mrioc->removepend_bitmap)
+ goto out_failed;
+
+ mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8;
+ if (MPI3MR_NUM_DEVRMCMD % 8)
+ mrioc->devrem_bitmap_sz++;
+ mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz,
+ GFP_KERNEL);
+ if (!mrioc->devrem_bitmap)
+ goto out_failed;
+
+ mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
+ mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
+ mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
+ mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
+
+ /* reply buffer pool, 16 byte align */
+ sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
+ mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
+ &mrioc->pdev->dev, sz, 16, 0);
+ if (!mrioc->reply_buf_pool) {
+ ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+
+ mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
+ &mrioc->reply_buf_dma);
+ if (!mrioc->reply_buf)
+ goto out_failed;
+
+ mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
+
+ /* reply free queue, 8 byte align */
+ sz = mrioc->reply_free_qsz * 8;
+ mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
+ &mrioc->pdev->dev, sz, 8, 0);
+ if (!mrioc->reply_free_q_pool) {
+ ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+ mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
+ GFP_KERNEL, &mrioc->reply_free_q_dma);
+ if (!mrioc->reply_free_q)
+ goto out_failed;
+
+ /* sense buffer pool, 4 byte align */
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
+ mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
+ &mrioc->pdev->dev, sz, 4, 0);
+ if (!mrioc->sense_buf_pool) {
+ ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+ mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
+ &mrioc->sense_buf_dma);
+ if (!mrioc->sense_buf)
+ goto out_failed;
+
+ /* sense buffer queue, 8 byte align */
+ sz = mrioc->sense_buf_q_sz * 8;
+ mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
+ &mrioc->pdev->dev, sz, 8, 0);
+ if (!mrioc->sense_buf_q_pool) {
+ ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+ mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
+ GFP_KERNEL, &mrioc->sense_buf_q_dma);
+ if (!mrioc->sense_buf_q)
+ goto out_failed;
+
+post_reply_sbuf:
+ sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
+ ioc_info(mrioc,
+ "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
+ mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz,
+ (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
+ sz = mrioc->reply_free_qsz * 8;
+ ioc_info(mrioc,
+ "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
+ mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
+ (unsigned long long)mrioc->reply_free_q_dma);
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
+ ioc_info(mrioc,
+ "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
+ mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
+ (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
+ sz = mrioc->sense_buf_q_sz * 8;
+ ioc_info(mrioc,
+ "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
+ mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
+ (unsigned long long)mrioc->sense_buf_q_dma);
+
+ /* initialize Reply buffer Queue */
+ for (i = 0, phy_addr = mrioc->reply_buf_dma;
+ i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz)
+ mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
+ mrioc->reply_free_q[i] = cpu_to_le64(0);
+
+ /* initialize Sense Buffer Queue */
+ for (i = 0, phy_addr = mrioc->sense_buf_dma;
+ i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSEBUF_SZ)
+ mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
+ mrioc->sense_buf_q[i] = cpu_to_le64(0);
+ return retval;
+
+out_failed:
+ retval = -1;
+ return retval;
+}
+
+/**
+ * mpi3mr_issue_iocinit - Send IOC Init
+ * @mrioc: Adapter instance reference
+ *
+ * Issue IOC Init MPI request through admin queue and wait for
+ * the completion of it or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_ioc_init_request iocinit_req;
+ struct mpi3_driver_info_layout *drv_info;
+ dma_addr_t data_dma;
+ u32 data_len = sizeof(*drv_info);
+ int retval = 0;
+ ktime_t current_time;
+
+ drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+ if (!drv_info) {
+ retval = -1;
+ goto out;
+ }
+ drv_info->information_length = cpu_to_le32(data_len);
+ strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
+ strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
+ strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
+ strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
+ strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
+ strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
+ sizeof(drv_info->driver_release_date));
+ drv_info->driver_capabilities = 0;
+ memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
+ sizeof(mrioc->driver_info));
+
+ memset(&iocinit_req, 0, sizeof(iocinit_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
+ iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
+ iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
+ iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
+ iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
+ iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
+ iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
+ iocinit_req.reply_free_queue_address =
+ cpu_to_le64(mrioc->reply_free_q_dma);
+ iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSEBUF_SZ);
+ iocinit_req.sense_buffer_free_queue_depth =
+ cpu_to_le16(mrioc->sense_buf_q_sz);
+ iocinit_req.sense_buffer_free_queue_address =
+ cpu_to_le64(mrioc->sense_buf_q_dma);
+ iocinit_req.driver_information_address = cpu_to_le64(data_dma);
+
+ current_time = ktime_get_real();
+ iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
+ sizeof(iocinit_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "Issue IOCInit: command timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (drv_info)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
+ data_dma);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_unmask_events - Unmask events in event mask bitmap
+ * @mrioc: Adapter instance reference
+ * @event: MPI event ID
+ *
+ * Un mask the specific event by resetting the event_mask
+ * bitmap.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
+{
+ u32 desired_event;
+ u8 word;
+
+ if (event >= 128)
+ return;
+
+ desired_event = (1 << (event % 32));
+ word = event / 32;
+
+ mrioc->event_masks[word] &= ~desired_event;
+}
+
+/**
+ * mpi3mr_issue_event_notification - Send event notification
+ * @mrioc: Adapter instance reference
+ *
+ * Issue event notification MPI request through admin queue and
+ * wait for the completion of it or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_event_notification_request evtnotify_req;
+ int retval = 0;
+ u8 i;
+
+ memset(&evtnotify_req, 0, sizeof(evtnotify_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ evtnotify_req.event_masks[i] =
+ cpu_to_le32(mrioc->event_masks[i]);
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
+ sizeof(evtnotify_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
+ mrioc->unrecoverable = 1;
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_send_event_ack - Send event acknowledgment
+ * @mrioc: Adapter instance reference
+ * @event: MPI3 event ID
+ * @event_ctx: Event context
+ *
+ * Send event acknowledgment through admin queue and wait for
+ * it to complete.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ u32 event_ctx)
+{
+ struct mpi3_event_ack_request evtack_req;
+ int retval = 0;
+
+ memset(&evtack_req, 0, sizeof(evtack_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
+ evtack_req.event = event;
+ evtack_req.event_context = cpu_to_le32(event_ctx);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
+ sizeof(evtack_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_alloc_chain_bufs - Allocate chain buffers
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate chain buffers and set a bitmap to indicate free
+ * chain buffers. Chain buffers are used to pass the SGE
+ * information along with MPI3 SCSI IO requests for host I/O.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 sz, i;
+ u16 num_chains;
+
+ num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
+
+ if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
+ | SHOST_DIX_TYPE1_PROTECTION
+ | SHOST_DIX_TYPE2_PROTECTION
+ | SHOST_DIX_TYPE3_PROTECTION))
+ num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
+
+ mrioc->chain_buf_count = num_chains;
+ sz = sizeof(struct chain_element) * num_chains;
+ mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
+ if (!mrioc->chain_sgl_list)
+ goto out_failed;
+
+ sz = MPI3MR_PAGE_SIZE_4K;
+ mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
+ &mrioc->pdev->dev, sz, 16, 0);
+ if (!mrioc->chain_buf_pool) {
+ ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+
+ for (i = 0; i < num_chains; i++) {
+ mrioc->chain_sgl_list[i].addr =
+ dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
+ &mrioc->chain_sgl_list[i].dma_addr);
+
+ if (!mrioc->chain_sgl_list[i].addr)
+ goto out_failed;
+ }
+ mrioc->chain_bitmap_sz = num_chains / 8;
+ if (num_chains % 8)
+ mrioc->chain_bitmap_sz++;
+ mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL);
+ if (!mrioc->chain_bitmap)
+ goto out_failed;
+ return retval;
+out_failed:
+ retval = -1;
+ return retval;
+}
+
+/**
+ * mpi3mr_port_enable_complete - Mark port enable complete
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Call back for asynchronous port enable request sets the
+ * driver command to indicate port enable request is complete.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ mrioc->scan_failed = drv_cmd->ioc_status;
+ mrioc->scan_started = 0;
+}
+
+/**
+ * mpi3mr_issue_port_enable - Issue Port Enable
+ * @mrioc: Adapter instance reference
+ * @async: Flag to wait for completion or not
+ *
+ * Issue Port Enable MPI request through admin queue and if the
+ * async flag is not set wait for the completion of the port
+ * enable or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
+{
+ struct mpi3_port_enable_request pe_req;
+ int retval = 0;
+ u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
+
+ memset(&pe_req, 0, sizeof(pe_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ if (async) {
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
+ } else {
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ init_completion(&mrioc->init_cmds.done);
+ }
+ pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
+
+ retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
+ goto out_unlock;
+ }
+ if (!async) {
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (pe_timeout * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue PortEnable: command timed out\n");
+ retval = -1;
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_PE_TIMEOUT);
+ mrioc->unrecoverable = 1;
+ goto out_unlock;
+ }
+ mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
+ }
+out_unlock:
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+ return retval;
+}
+
+/* Protocol type to name mapper structure*/
+static const struct {
+ u8 protocol;
+ char *name;
+} mpi3mr_protocols[] = {
+ { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
+ { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
+ { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
+};
+
+/* Capability to name mapper structure*/
+static const struct {
+ u32 capability;
+ char *name;
+} mpi3mr_capabilities[] = {
+ { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
+};
+
+/**
+ * mpi3mr_print_ioc_info - Display controller information
+ * @mrioc: Adapter instance reference
+ *
+ * Display controller personalit, capability, supported
+ * protocols etc.
+ *
+ * Return: Nothing
+ */
+static void
+mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
+{
+ int i = 0, bytes_wrote = 0;
+ char personality[16];
+ char protocol[50] = {0};
+ char capabilities[100] = {0};
+ bool is_string_nonempty = false;
+ struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
+
+ switch (mrioc->facts.personality) {
+ case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
+ strncpy(personality, "Enhanced HBA", sizeof(personality));
+ break;
+ case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
+ strncpy(personality, "RAID", sizeof(personality));
+ break;
+ default:
+ strncpy(personality, "Unknown", sizeof(personality));
+ break;
+ }
+
+ ioc_info(mrioc, "Running in %s Personality", personality);
+
+ ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
+ fwver->gen_major, fwver->gen_minor, fwver->ph_major,
+ fwver->ph_minor, fwver->cust_id, fwver->build_num);
+
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
+ if (mrioc->facts.protocol_flags &
+ mpi3mr_protocols[i].protocol) {
+ if (is_string_nonempty &&
+ (bytes_wrote < sizeof(protocol)))
+ bytes_wrote += snprintf(protocol + bytes_wrote,
+ (sizeof(protocol) - bytes_wrote), ",");
+
+ if (bytes_wrote < sizeof(protocol))
+ bytes_wrote += snprintf(protocol + bytes_wrote,
+ (sizeof(protocol) - bytes_wrote), "%s",
+ mpi3mr_protocols[i].name);
+ is_string_nonempty = true;
+ }
+ }
+
+ bytes_wrote = 0;
+ is_string_nonempty = false;
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
+ if (mrioc->facts.protocol_flags &
+ mpi3mr_capabilities[i].capability) {
+ if (is_string_nonempty &&
+ (bytes_wrote < sizeof(capabilities)))
+ bytes_wrote += snprintf(capabilities + bytes_wrote,
+ (sizeof(capabilities) - bytes_wrote), ",");
+
+ if (bytes_wrote < sizeof(capabilities))
+ bytes_wrote += snprintf(capabilities + bytes_wrote,
+ (sizeof(capabilities) - bytes_wrote), "%s",
+ mpi3mr_capabilities[i].name);
+ is_string_nonempty = true;
+ }
+ }
+
+ ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
+ protocol, capabilities);
+}
+
+/**
+ * mpi3mr_cleanup_resources - Free PCI resources
+ * @mrioc: Adapter instance reference
+ *
+ * Unmap PCI device memory and disable PCI device.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+
+ mpi3mr_cleanup_isr(mrioc);
+
+ if (mrioc->sysif_regs) {
+ iounmap((void __iomem *)mrioc->sysif_regs);
+ mrioc->sysif_regs = NULL;
+ }
+
+ if (pci_is_enabled(pdev)) {
+ if (mrioc->bars)
+ pci_release_selected_regions(pdev, mrioc->bars);
+ pci_disable_device(pdev);
+ }
+}
+
+/**
+ * mpi3mr_setup_resources - Enable PCI resources
+ * @mrioc: Adapter instance reference
+ *
+ * Enable PCI device memory, MSI-x registers and set DMA mask.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+ u32 memap_sz = 0;
+ int i, retval = 0, capb = 0;
+ u16 message_control;
+ u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
+ (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) &&
+ (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
+
+ if (pci_enable_device_mem(pdev)) {
+ ioc_err(mrioc, "pci_enable_device_mem: failed\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (!capb) {
+ ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+ mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ if (pci_request_selected_regions(pdev, mrioc->bars,
+ mrioc->driver_name)) {
+ ioc_err(mrioc, "pci_request_selected_regions: failed\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
+ memap_sz = pci_resource_len(pdev, i);
+ mrioc->sysif_regs =
+ ioremap(mrioc->sysif_regs_phys, memap_sz);
+ break;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
+ if (retval) {
+ if (dma_mask != DMA_BIT_MASK(32)) {
+ ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
+ dma_mask = DMA_BIT_MASK(32);
+ retval = dma_set_mask_and_coherent(&pdev->dev,
+ dma_mask);
+ }
+ if (retval) {
+ mrioc->dma_mask = 0;
+ ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
+ goto out_failed;
+ }
+ }
+ mrioc->dma_mask = dma_mask;
+
+ if (!mrioc->sysif_regs) {
+ ioc_err(mrioc,
+ "Unable to map adapter memory or resource not found\n");
+ retval = -EINVAL;
+ goto out_failed;
+ }
+
+ pci_read_config_word(pdev, capb + 2, &message_control);
+ mrioc->msix_count = (message_control & 0x3FF) + 1;
+
+ pci_save_state(pdev);
+
+ pci_set_drvdata(pdev, mrioc->shost);
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
+ (unsigned long long)mrioc->sysif_regs_phys,
+ mrioc->sysif_regs, memap_sz);
+ ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
+ mrioc->msix_count);
+ return retval;
+
+out_failed:
+ mpi3mr_cleanup_resources(mrioc);
+ return retval;
+}
+
+/**
+ * mpi3mr_init_ioc - Initialize the controller
+ * @mrioc: Adapter instance reference
+ * @re_init: Flag to indicate is this fresh init or re-init
+ *
+ * This the controller initialization routine, executed either
+ * after soft reset or from pci probe callback.
+ * Setup the required resources, memory map the controller
+ * registers, create admin and operational reply queue pairs,
+ * allocate required memory for reply pool, sense buffer pool,
+ * issue IOC init request to the firmware, unmask the events and
+ * issue port enable to discover SAS/SATA/NVMe devies and RAID
+ * volumes.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
+{
+ int retval = 0;
+ enum mpi3mr_iocstate ioc_state;
+ u64 base_info;
+ u32 timeout;
+ u32 ioc_status, ioc_config, i;
+ struct mpi3_ioc_facts_data facts_data;
+
+ mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
+ mrioc->change_count = 0;
+ if (!re_init) {
+ mrioc->cpu_count = num_online_cpus();
+ retval = mpi3mr_setup_resources(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to setup resources:error %d\n",
+ retval);
+ goto out_nocleanup;
+ }
+ }
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ ioc_info(mrioc, "SOD status %x configuration %x\n",
+ ioc_status, ioc_config);
+
+ base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
+ ioc_info(mrioc, "SOD base_info %llx\n", base_info);
+
+ /*The timeout value is in 2sec unit, changing it to seconds*/
+ mrioc->ready_timeout =
+ ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
+ MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
+
+ ioc_info(mrioc, "IOC ready timeout %d\n", mrioc->ready_timeout);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc, "IOC in %s state during detection\n",
+ mpi3mr_iocstate_name(ioc_state));
+
+ if (ioc_state == MRIOC_STATE_BECOMING_READY ||
+ ioc_state == MRIOC_STATE_RESET_REQUESTED) {
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ msleep(100);
+ } while (--timeout);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc,
+ "IOC in %s state after waiting for reset time\n",
+ mpi3mr_iocstate_name(ioc_state));
+ }
+
+ if (ioc_state == MRIOC_STATE_READY) {
+ retval = mpi3mr_issue_and_process_mur(mrioc,
+ MPI3MR_RESET_FROM_BRINGUP);
+ if (retval) {
+ ioc_err(mrioc, "Failed to MU reset IOC error %d\n",
+ retval);
+ }
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ }
+ if (ioc_state != MRIOC_STATE_RESET) {
+ mpi3mr_print_fault_info(mrioc);
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+ MPI3MR_RESET_FROM_BRINGUP);
+ if (retval) {
+ ioc_err(mrioc,
+ "%s :Failed to soft reset IOC error %d\n",
+ __func__, retval);
+ goto out_failed;
+ }
+ }
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state != MRIOC_STATE_RESET) {
+ retval = -1;
+ ioc_err(mrioc, "Cannot bring IOC to reset state\n");
+ goto out_failed;
+ }
+
+ retval = mpi3mr_setup_admin_qpair(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to setup admin Qs: error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ if (!re_init) {
+ retval = mpi3mr_setup_isr(mrioc, 1);
+ if (retval) {
+ ioc_err(mrioc, "Failed to setup ISR error %d\n",
+ retval);
+ goto out_failed;
+ }
+ } else
+ mpi3mr_ioc_enable_intr(mrioc);
+
+ retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ mpi3mr_process_factsdata(mrioc, &facts_data);
+ if (!re_init) {
+ retval = mpi3mr_check_reset_dma_mask(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Resetting dma mask failed %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+
+ mpi3mr_print_ioc_info(mrioc);
+
+ retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc,
+ "%s :Failed to allocated reply sense buffers %d\n",
+ __func__, retval);
+ goto out_failed;
+ }
+
+ if (!re_init) {
+ retval = mpi3mr_alloc_chain_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+
+ retval = mpi3mr_issue_iocinit(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
+ retval);
+ goto out_failed;
+ }
+ mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
+ writel(mrioc->reply_free_queue_host_index,
+ &mrioc->sysif_regs->reply_free_host_index);
+
+ mrioc->sbq_host_index = mrioc->num_sense_bufs;
+ writel(mrioc->sbq_host_index,
+ &mrioc->sysif_regs->sense_buffer_free_host_index);
+
+ if (!re_init) {
+ retval = mpi3mr_setup_isr(mrioc, 0);
+ if (retval) {
+ ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+
+ retval = mpi3mr_create_op_queues(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to create OpQueues error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ if (re_init &&
+ (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q)) {
+ retval = -1;
+ ioc_err(mrioc,
+ "Cannot create minimum number of OpQueues expected:%d created:%d\n",
+ mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
+ goto out_failed;
+ }
+
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mrioc->event_masks[i] = -1;
+
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
+
+ retval = mpi3mr_issue_event_notification(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to issue event notification %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ if (re_init) {
+ ioc_info(mrioc, "Issuing Port Enable\n");
+ retval = mpi3mr_issue_port_enable(mrioc, 0);
+ if (retval) {
+ ioc_err(mrioc, "Failed to issue port enable %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+ return retval;
+
+out_failed:
+ mpi3mr_cleanup_ioc(mrioc, re_init);
+out_nocleanup:
+ return retval;
+}
+
+/**
+ * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
+ * segments
+ * @mrioc: Adapter instance reference
+ * @qidx: Operational reply queue index
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ struct segments *segments;
+ int i, size;
+
+ if (!op_reply_q->q_segments)
+ return;
+
+ size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
+ segments = op_reply_q->q_segments;
+ for (i = 0; i < op_reply_q->num_segments; i++)
+ memset(segments[i].segment, 0, size);
+}
+
+/**
+ * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
+ * segments
+ * @mrioc: Adapter instance reference
+ * @qidx: Operational request queue index
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
+ struct segments *segments;
+ int i, size;
+
+ if (!op_req_q->q_segments)
+ return;
+
+ size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
+ segments = op_req_q->q_segments;
+ for (i = 0; i < op_req_q->num_segments; i++)
+ memset(segments[i].segment, 0, size);
+}
+
+/**
+ * mpi3mr_memset_buffers - memset memory for a controller
+ * @mrioc: Adapter instance reference
+ *
+ * clear all the memory allocated for a controller, typically
+ * called post reset to reuse the memory allocated during the
+ * controller init.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+
+ memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
+ memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
+
+ memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
+ memset(mrioc->host_tm_cmds.reply, 0,
+ sizeof(*mrioc->host_tm_cmds.reply));
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
+ memset(mrioc->dev_rmhs_cmds[i].reply, 0,
+ sizeof(*mrioc->dev_rmhs_cmds[i].reply));
+ memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+ memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+
+ for (i = 0; i < mrioc->num_queues; i++) {
+ mrioc->op_reply_qinfo[i].qid = 0;
+ mrioc->op_reply_qinfo[i].ci = 0;
+ mrioc->op_reply_qinfo[i].num_replies = 0;
+ mrioc->op_reply_qinfo[i].ephase = 0;
+ atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
+ atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
+ mpi3mr_memset_op_reply_q_buffers(mrioc, i);
+
+ mrioc->req_qinfo[i].ci = 0;
+ mrioc->req_qinfo[i].pi = 0;
+ mrioc->req_qinfo[i].num_requests = 0;
+ mrioc->req_qinfo[i].qid = 0;
+ mrioc->req_qinfo[i].reply_qid = 0;
+ spin_lock_init(&mrioc->req_qinfo[i].q_lock);
+ mpi3mr_memset_op_req_q_buffers(mrioc, i);
+ }
+}
+
+/**
+ * mpi3mr_free_mem - Free memory allocated for a controller
+ * @mrioc: Adapter instance reference
+ *
+ * Free all the memory allocated for a controller.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+ struct mpi3mr_intr_info *intr_info;
+
+ if (mrioc->sense_buf_pool) {
+ if (mrioc->sense_buf)
+ dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
+ mrioc->sense_buf_dma);
+ dma_pool_destroy(mrioc->sense_buf_pool);
+ mrioc->sense_buf = NULL;
+ mrioc->sense_buf_pool = NULL;
+ }
+ if (mrioc->sense_buf_q_pool) {
+ if (mrioc->sense_buf_q)
+ dma_pool_free(mrioc->sense_buf_q_pool,
+ mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
+ dma_pool_destroy(mrioc->sense_buf_q_pool);
+ mrioc->sense_buf_q = NULL;
+ mrioc->sense_buf_q_pool = NULL;
+ }
+
+ if (mrioc->reply_buf_pool) {
+ if (mrioc->reply_buf)
+ dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
+ mrioc->reply_buf_dma);
+ dma_pool_destroy(mrioc->reply_buf_pool);
+ mrioc->reply_buf = NULL;
+ mrioc->reply_buf_pool = NULL;
+ }
+ if (mrioc->reply_free_q_pool) {
+ if (mrioc->reply_free_q)
+ dma_pool_free(mrioc->reply_free_q_pool,
+ mrioc->reply_free_q, mrioc->reply_free_q_dma);
+ dma_pool_destroy(mrioc->reply_free_q_pool);
+ mrioc->reply_free_q = NULL;
+ mrioc->reply_free_q_pool = NULL;
+ }
+
+ for (i = 0; i < mrioc->num_op_req_q; i++)
+ mpi3mr_free_op_req_q_segments(mrioc, i);
+
+ for (i = 0; i < mrioc->num_op_reply_q; i++)
+ mpi3mr_free_op_reply_q_segments(mrioc, i);
+
+ for (i = 0; i < mrioc->intr_info_count; i++) {
+ intr_info = mrioc->intr_info + i;
+ intr_info->op_reply_q = NULL;
+ }
+
+ kfree(mrioc->req_qinfo);
+ mrioc->req_qinfo = NULL;
+ mrioc->num_op_req_q = 0;
+
+ kfree(mrioc->op_reply_qinfo);
+ mrioc->op_reply_qinfo = NULL;
+ mrioc->num_op_reply_q = 0;
+
+ kfree(mrioc->init_cmds.reply);
+ mrioc->init_cmds.reply = NULL;
+
+ kfree(mrioc->host_tm_cmds.reply);
+ mrioc->host_tm_cmds.reply = NULL;
+
+ kfree(mrioc->removepend_bitmap);
+ mrioc->removepend_bitmap = NULL;
+
+ kfree(mrioc->devrem_bitmap);
+ mrioc->devrem_bitmap = NULL;
+
+ kfree(mrioc->chain_bitmap);
+ mrioc->chain_bitmap = NULL;
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
+ kfree(mrioc->dev_rmhs_cmds[i].reply);
+ mrioc->dev_rmhs_cmds[i].reply = NULL;
+ }
+
+ if (mrioc->chain_buf_pool) {
+ for (i = 0; i < mrioc->chain_buf_count; i++) {
+ if (mrioc->chain_sgl_list[i].addr) {
+ dma_pool_free(mrioc->chain_buf_pool,
+ mrioc->chain_sgl_list[i].addr,
+ mrioc->chain_sgl_list[i].dma_addr);
+ mrioc->chain_sgl_list[i].addr = NULL;
+ }
+ }
+ dma_pool_destroy(mrioc->chain_buf_pool);
+ mrioc->chain_buf_pool = NULL;
+ }
+
+ kfree(mrioc->chain_sgl_list);
+ mrioc->chain_sgl_list = NULL;
+
+ if (mrioc->admin_reply_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
+ mrioc->admin_reply_base, mrioc->admin_reply_dma);
+ mrioc->admin_reply_base = NULL;
+ }
+ if (mrioc->admin_req_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
+ mrioc->admin_req_base, mrioc->admin_req_dma);
+ mrioc->admin_req_base = NULL;
+ }
+}
+
+/**
+ * mpi3mr_issue_ioc_shutdown - shutdown controller
+ * @mrioc: Adapter instance reference
+ *
+ * Send shutodwn notification to the controller and wait for the
+ * shutdown_timeout for it to be completed.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_config, ioc_status;
+ u8 retval = 1;
+ u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
+
+ ioc_info(mrioc, "Issuing shutdown Notification\n");
+ if (mrioc->unrecoverable) {
+ ioc_warn(mrioc,
+ "IOC is unrecoverable shutdown is not issued\n");
+ return;
+ }
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
+ == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
+ ioc_info(mrioc, "shutdown already in progress\n");
+ return;
+ }
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN;
+
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+
+ if (mrioc->facts.shutdown_timeout)
+ timeout = mrioc->facts.shutdown_timeout * 10;
+
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
+ == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
+ retval = 0;
+ break;
+ }
+ msleep(100);
+ } while (--timeout);
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ if (retval) {
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
+ == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
+ ioc_warn(mrioc,
+ "shutdown still in progress after timeout\n");
+ }
+
+ ioc_info(mrioc,
+ "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
+ (!retval) ? "successful" : "failed", ioc_status,
+ ioc_config);
+}
+
+/**
+ * mpi3mr_cleanup_ioc - Cleanup controller
+ * @mrioc: Adapter instance reference
+ * @re_init: Cleanup due to a reinit or not
+ *
+ * controller cleanup handler, Message unit reset or soft reset
+ * and shutdown notification is issued to the controller and the
+ * associated memory resources are freed.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
+{
+ enum mpi3mr_iocstate ioc_state;
+
+ if (!re_init)
+ mpi3mr_stop_watchdog(mrioc);
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+
+ if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) &&
+ (ioc_state == MRIOC_STATE_READY)) {
+ if (mpi3mr_issue_and_process_mur(mrioc,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP))
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+ MPI3MR_RESET_FROM_MUR_FAILURE);
+
+ if (!re_init)
+ mpi3mr_issue_ioc_shutdown(mrioc);
+ }
+
+ if (!re_init) {
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
+ }
+}
+
+/**
+ * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
+ * @mrioc: Adapter instance reference
+ * @cmdptr: Internal command tracker
+ *
+ * Complete an internal driver commands with state indicating it
+ * is completed due to reset.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *cmdptr)
+{
+ if (cmdptr->state & MPI3MR_CMD_PENDING) {
+ cmdptr->state |= MPI3MR_CMD_RESET;
+ cmdptr->state &= ~MPI3MR_CMD_PENDING;
+ if (cmdptr->is_waiting) {
+ complete(&cmdptr->done);
+ cmdptr->is_waiting = 0;
+ } else if (cmdptr->callback)
+ cmdptr->callback(mrioc, cmdptr);
+ }
+}
+
+/**
+ * mpi3mr_flush_drv_cmds - Flush internaldriver commands
+ * @mrioc: Adapter instance reference
+ *
+ * Flush all internal driver commands post reset
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_drv_cmd *cmdptr;
+ u8 i;
+
+ cmdptr = &mrioc->init_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ cmdptr = &mrioc->host_tm_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
+ cmdptr = &mrioc->dev_rmhs_cmds[i];
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ }
+}
+
+/**
+ * mpi3mr_diagfault_reset_handler - Diag fault reset handler
+ * @mrioc: Adapter instance reference
+ * @reset_reason: Reset reason code
+ *
+ * This is an handler for issuing diag fault reset from the
+ * applications through IOCTL path to stop the execution of the
+ * controller
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason)
+{
+ int retval = 0;
+
+ ioc_info(mrioc, "Entry: reason code: %s\n",
+ mpi3mr_reset_rc_name(reset_reason));
+ mrioc->reset_in_progress = 1;
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+
+ if (retval) {
+ ioc_err(mrioc, "The diag fault reset failed: reason %d\n",
+ reset_reason);
+ mpi3mr_ioc_enable_intr(mrioc);
+ }
+ ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
+ mrioc->reset_in_progress = 0;
+ return retval;
+}
+
+/**
+ * mpi3mr_soft_reset_handler - Reset the controller
+ * @mrioc: Adapter instance reference
+ * @reset_reason: Reset reason code
+ * @snapdump: Flag to generate snapdump in firmware or not
+ *
+ * This is an handler for recovering controller by issuing soft
+ * reset are diag fault reset. This is a blocking function and
+ * when one reset is executed if any other resets they will be
+ * blocked. All IOCTLs/IO will be blocked during the reset. If
+ * controller reset is successful then the controller will be
+ * reinitalized, otherwise the controller will be marked as not
+ * recoverable
+ *
+ * In snapdump bit is set, the controller is issued with diag
+ * fault reset so that the firmware can create a snap dump and
+ * post that the firmware will result in F000 fault and the
+ * driver will issue soft reset to recover from that.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason, u8 snapdump)
+{
+ int retval = 0, i;
+ unsigned long flags;
+ u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+
+ if (mrioc->fault_dbg) {
+ if (snapdump)
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_kill_ioc(mrioc, reset_reason);
+ }
+
+ /*
+ * Block new resets until the currently executing one is finished and
+ * return the status of the existing reset for all blocked resets
+ */
+ if (!mutex_trylock(&mrioc->reset_mutex)) {
+ ioc_info(mrioc, "Another reset in progress\n");
+ return -1;
+ }
+ mrioc->reset_in_progress = 1;
+
+ if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
+ (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mrioc->event_masks[i] = -1;
+
+ retval = mpi3mr_issue_event_notification(mrioc);
+
+ if (retval) {
+ ioc_err(mrioc,
+ "Failed to turn off events prior to reset %d\n",
+ retval);
+ }
+ }
+
+ mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ if (snapdump) {
+ mpi3mr_set_diagsave(mrioc);
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+ if (!retval) {
+ do {
+ host_diagnostic =
+ readl(&mrioc->sysif_regs->host_diagnostic);
+ if (!(host_diagnostic &
+ MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ msleep(100);
+ } while (--timeout);
+ }
+ }
+
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
+ if (retval) {
+ ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
+ goto out;
+ }
+
+ mpi3mr_flush_delayed_rmhs_list(mrioc);
+ mpi3mr_flush_drv_cmds(mrioc);
+ memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+ memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ mpi3mr_flush_host_io(mrioc);
+ mpi3mr_invalidate_devhandles(mrioc);
+ mpi3mr_memset_buffers(mrioc);
+ retval = mpi3mr_init_ioc(mrioc, 1);
+ if (retval) {
+ pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
+ mrioc->name, reset_reason);
+ goto out;
+ }
+ ssleep(10);
+
+out:
+ if (!retval) {
+ mrioc->reset_in_progress = 0;
+ scsi_unblock_requests(mrioc->shost);
+ mpi3mr_rfresh_tgtdevs(mrioc);
+ mrioc->ts_update_counter = 0;
+ spin_lock_irqsave(&mrioc->watchdog_lock, flags);
+ if (mrioc->watchdog_work_q)
+ queue_delayed_work(mrioc->watchdog_work_q,
+ &mrioc->watchdog_work,
+ msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
+ spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+ } else {
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+ mrioc->unrecoverable = 1;
+ mrioc->reset_in_progress = 0;
+ retval = -1;
+ }
+
+ mutex_unlock(&mrioc->reset_mutex);
+ ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
+ return retval;
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
new file mode 100644
index 000000000000..24ac7ddec749
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -0,0 +1,4046 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2021 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+
+/* global driver scop variables */
+LIST_HEAD(mrioc_list);
+DEFINE_SPINLOCK(mrioc_list_lock);
+static int mrioc_ids;
+static int warn_non_secure_ctlr;
+
+MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
+MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
+MODULE_VERSION(MPI3MR_DRIVER_VERSION);
+
+/* Module parameters*/
+int prot_mask = -1;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07");
+
+static int prot_guard_mask = 3;
+module_param(prot_guard_mask, int, 0);
+MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3");
+static int logging_level;
+module_param(logging_level, int, 0);
+MODULE_PARM_DESC(logging_level,
+ " bits for enabling additional logging info (default=0)");
+
+/* Forward declarations*/
+/**
+ * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ *
+ * Calculate the host tag based on block tag for a given scmd.
+ *
+ * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
+ */
+static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd)
+{
+ struct scmd_priv *priv = NULL;
+ u32 unique_tag;
+ u16 host_tag, hw_queue;
+
+ unique_tag = blk_mq_unique_tag(scmd->request);
+
+ hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
+ if (hw_queue >= mrioc->num_op_reply_q)
+ return MPI3MR_HOSTTAG_INVALID;
+ host_tag = blk_mq_unique_tag_to_tag(unique_tag);
+
+ if (WARN_ON(host_tag >= mrioc->max_host_ios))
+ return MPI3MR_HOSTTAG_INVALID;
+
+ priv = scsi_cmd_priv(scmd);
+ /*host_tag 0 is invalid hence incrementing by 1*/
+ priv->host_tag = host_tag + 1;
+ priv->scmd = scmd;
+ priv->in_lld_scope = 1;
+ priv->req_q_idx = hw_queue;
+ priv->meta_chain_idx = -1;
+ priv->chain_idx = -1;
+ priv->meta_sg_valid = 0;
+ return priv->host_tag;
+}
+
+/**
+ * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
+ * @mrioc: Adapter instance reference
+ * @host_tag: Host tag
+ * @qidx: Operational queue index
+ *
+ * Identify the block tag from the host tag and queue index and
+ * retrieve associated scsi command using scsi_host_find_tag().
+ *
+ * Return: SCSI command reference or NULL.
+ */
+static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
+ struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
+{
+ struct scsi_cmnd *scmd = NULL;
+ struct scmd_priv *priv = NULL;
+ u32 unique_tag = host_tag - 1;
+
+ if (WARN_ON(host_tag > mrioc->max_host_ios))
+ goto out;
+
+ unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
+
+ scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ scmd = NULL;
+ }
+out:
+ return scmd;
+}
+
+/**
+ * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ *
+ * Invalidate the SCSI command private data to mark the command
+ * is not in LLD scope anymore.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd)
+{
+ struct scmd_priv *priv = NULL;
+
+ priv = scsi_cmd_priv(scmd);
+
+ if (WARN_ON(priv->in_lld_scope == 0))
+ return;
+ priv->host_tag = MPI3MR_HOSTTAG_INVALID;
+ priv->req_q_idx = 0xFFFF;
+ priv->scmd = NULL;
+ priv->in_lld_scope = 0;
+ priv->meta_sg_valid = 0;
+ if (priv->chain_idx >= 0) {
+ clear_bit(priv->chain_idx, mrioc->chain_bitmap);
+ priv->chain_idx = -1;
+ }
+ if (priv->meta_chain_idx >= 0) {
+ clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap);
+ priv->meta_chain_idx = -1;
+ }
+}
+
+static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
+static void mpi3mr_fwevt_worker(struct work_struct *work);
+
+/**
+ * mpi3mr_fwevt_free - firmware event memory dealloctor
+ * @r: k reference pointer of the firmware event
+ *
+ * Free firmware event memory when no reference.
+ */
+static void mpi3mr_fwevt_free(struct kref *r)
+{
+ kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
+}
+
+/**
+ * mpi3mr_fwevt_get - k reference incrementor
+ * @fwevt: Firmware event reference
+ *
+ * Increment firmware event reference count.
+ */
+static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
+{
+ kref_get(&fwevt->ref_count);
+}
+
+/**
+ * mpi3mr_fwevt_put - k reference decrementor
+ * @fwevt: Firmware event reference
+ *
+ * decrement firmware event reference count.
+ */
+static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
+{
+ kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
+}
+
+/**
+ * mpi3mr_alloc_fwevt - Allocate firmware event
+ * @len: length of firmware event data to allocate
+ *
+ * Allocate firmware event with required length and initialize
+ * the reference counter.
+ *
+ * Return: firmware event reference.
+ */
+static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
+{
+ struct mpi3mr_fwevt *fwevt;
+
+ fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
+ if (!fwevt)
+ return NULL;
+
+ kref_init(&fwevt->ref_count);
+ return fwevt;
+}
+
+/**
+ * mpi3mr_fwevt_add_to_list - Add firmware event to the list
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Add the given firmware event to the firmware event list.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ unsigned long flags;
+
+ if (!mrioc->fwevt_worker_thread)
+ return;
+
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ /* get fwevt reference count while adding it to fwevt_list */
+ mpi3mr_fwevt_get(fwevt);
+ INIT_LIST_HEAD(&fwevt->list);
+ list_add_tail(&fwevt->list, &mrioc->fwevt_list);
+ INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
+ /* get fwevt reference count while enqueueing it to worker queue */
+ mpi3mr_fwevt_get(fwevt);
+ queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+}
+
+/**
+ * mpi3mr_fwevt_del_from_list - Delete firmware event from list
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Delete the given firmware event from the firmware event list.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ if (!list_empty(&fwevt->list)) {
+ list_del_init(&fwevt->list);
+ /*
+ * Put fwevt reference count after
+ * removing it from fwevt_list
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+}
+
+/**
+ * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
+ * @mrioc: Adapter instance reference
+ *
+ * Dequeue a firmware event from the firmware event list.
+ *
+ * Return: firmware event.
+ */
+static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
+ struct mpi3mr_ioc *mrioc)
+{
+ unsigned long flags;
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ if (!list_empty(&mrioc->fwevt_list)) {
+ fwevt = list_first_entry(&mrioc->fwevt_list,
+ struct mpi3mr_fwevt, list);
+ list_del_init(&fwevt->list);
+ /*
+ * Put fwevt reference count after
+ * removing it from fwevt_list
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+
+ return fwevt;
+}
+
+/**
+ * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
+ * @mrioc: Adapter instance reference
+ *
+ * Flush all pending firmware events from the firmware event
+ * list.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
+ !mrioc->fwevt_worker_thread)
+ return;
+
+ while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)) ||
+ (fwevt = mrioc->current_event)) {
+ /*
+ * Wait on the fwevt to complete. If this returns 1, then
+ * the event was never executed, and we need a put for the
+ * reference the work had on the fwevt.
+ *
+ * If it did execute, we wait for it to finish, and the put will
+ * happen from mpi3mr_process_fwevt()
+ */
+ if (cancel_work_sync(&fwevt->work)) {
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ mpi3mr_fwevt_put(fwevt);
+ /*
+ * Put fwevt reference count to neutralize
+ * kref_init increment
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+ }
+}
+
+/**
+ * mpi3mr_invalidate_devhandles -Invalidate device handles
+ * @mrioc: Adapter instance reference
+ *
+ * Invalidate the device handles in the target device structures
+ * . Called post reset prior to reinitializing the controller.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ }
+ }
+}
+
+/**
+ * mpi3mr_print_scmd - print individual SCSI command
+ * @rq: Block request
+ * @data: Adapter instance reference
+ * @reserved: N/A. Currently not used
+ *
+ * Print the SCSI command details if it is in LLD scope.
+ *
+ * Return: true always.
+ */
+static bool mpi3mr_print_scmd(struct request *rq,
+ void *data, bool reserved)
+{
+ struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv = NULL;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+
+ ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n",
+ __func__, priv->host_tag, priv->req_q_idx + 1);
+ scsi_print_command(scmd);
+ }
+
+out:
+ return(true);
+}
+
+/**
+ * mpi3mr_flush_scmd - Flush individual SCSI command
+ * @rq: Block request
+ * @data: Adapter instance reference
+ * @reserved: N/A. Currently not used
+ *
+ * Return the SCSI command to the upper layers if it is in LLD
+ * scope.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_flush_scmd(struct request *rq,
+ void *data, bool reserved)
+{
+ struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv = NULL;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+
+ if (priv->meta_sg_valid)
+ dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
+ scsi_prot_sg_count(scmd), scmd->sc_data_direction);
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ scsi_dma_unmap(scmd);
+ scmd->result = DID_RESET << 16;
+ scsi_print_command(scmd);
+ scmd->scsi_done(scmd);
+ mrioc->flush_io_count++;
+ }
+
+out:
+ return(true);
+}
+
+/**
+ * mpi3mr_flush_host_io - Flush host I/Os
+ * @mrioc: Adapter instance reference
+ *
+ * Flush all of the pending I/Os by calling
+ * blk_mq_tagset_busy_iter() for each possible tag. This is
+ * executed post controller reset
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+
+ mrioc->flush_io_count = 0;
+ ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__);
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_flush_scmd, (void *)mrioc);
+ ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__,
+ mrioc->flush_io_count);
+}
+
+/**
+ * mpi3mr_alloc_tgtdev - target device allocator
+ *
+ * Allocate target device instance and initialize the reference
+ * count
+ *
+ * Return: target device instance.
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
+ if (!tgtdev)
+ return NULL;
+ kref_init(&tgtdev->ref_count);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * Add the target device to the target device list
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ mpi3mr_tgtdev_get(tgtdev);
+ INIT_LIST_HEAD(&tgtdev->list);
+ list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * Remove the target device from the target device list
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ if (!list_empty(&tgtdev->list)) {
+ list_del_init(&tgtdev->list);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
+ * @mrioc: Adapter instance reference
+ * @handle: Device handle
+ *
+ * Accessor to retrieve target device from the device handle.
+ * Non Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if (tgtdev->dev_handle == handle)
+ goto found_tgtdev;
+ return NULL;
+
+found_tgtdev:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
+ * @mrioc: Adapter instance reference
+ * @handle: Device handle
+ *
+ * Accessor to retrieve target device from the device handle.
+ * Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ return tgtdev;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
+ * @mrioc: Adapter instance reference
+ * @persist_id: Persistent ID
+ *
+ * Accessor to retrieve target device from the Persistent ID.
+ * Non Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id(
+ struct mpi3mr_ioc *mrioc, u16 persist_id)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if (tgtdev->perst_id == persist_id)
+ goto found_tgtdev;
+ return NULL;
+
+found_tgtdev:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
+ * @mrioc: Adapter instance reference
+ * @persist_id: Persistent ID
+ *
+ * Accessor to retrieve target device from the Persistent ID.
+ * Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
+ struct mpi3mr_ioc *mrioc, u16 persist_id)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ return tgtdev;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
+ * @mrioc: Adapter instance reference
+ * @tgt_priv: Target private data
+ *
+ * Accessor to return target device from the target private
+ * data. Non Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
+ struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+ tgtdev = tgt_priv->tgt_dev;
+ if (tgtdev)
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device structure
+ *
+ * Checks whether the device is exposed to upper layers and if it
+ * is then remove the device from upper layers by calling
+ * scsi_remove_target().
+ *
+ * Return: 0 on success, non zero on failure.
+ */
+static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
+ __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ }
+
+ if (tgtdev->starget) {
+ scsi_remove_target(&tgtdev->starget->dev);
+ tgtdev->host_exposed = 0;
+ }
+ ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
+ __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
+}
+
+/**
+ * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
+ * @mrioc: Adapter instance reference
+ * @perst_id: Persistent ID of the device
+ *
+ * Checks whether the device can be exposed to upper layers and
+ * if it is not then expose the device to upper layers by
+ * calling scsi_scan_target().
+ *
+ * Return: 0 on success, non zero on failure.
+ */
+static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
+ u16 perst_id)
+{
+ int retval = 0;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
+ if (!tgtdev) {
+ retval = -1;
+ goto out;
+ }
+ if (tgtdev->is_hidden) {
+ retval = -1;
+ goto out;
+ }
+ if (!tgtdev->host_exposed && !mrioc->reset_in_progress) {
+ tgtdev->host_exposed = 1;
+ scsi_scan_target(&mrioc->shost->shost_gendev, 0,
+ tgtdev->perst_id,
+ SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
+ if (!tgtdev->starget)
+ tgtdev->host_exposed = 0;
+ }
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_change_queue_depth- Change QD callback handler
+ * @sdev: SCSI device reference
+ * @q_depth: Queue depth
+ *
+ * Validate and limit QD and call scsi_change_queue_depth.
+ *
+ * Return: return value of scsi_change_queue_depth
+ */
+static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
+ int q_depth)
+{
+ struct scsi_target *starget = scsi_target(sdev);
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ int retval = 0;
+
+ if (!sdev->tagged_supported)
+ q_depth = 1;
+ if (q_depth > shost->can_queue)
+ q_depth = shost->can_queue;
+ else if (!q_depth)
+ q_depth = MPI3MR_DEFAULT_SDEV_QD;
+ retval = scsi_change_queue_depth(sdev, q_depth);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_update_sdev - Update SCSI device information
+ * @sdev: SCSI device reference
+ * @data: target device reference
+ *
+ * This is an iterator function called for each SCSI device in a
+ * target to update the target specific information into each
+ * SCSI device.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = (struct mpi3mr_tgt_dev *)data;
+ if (!tgtdev)
+ return;
+
+ mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
+ switch (tgtdev->dev_type) {
+ case MPI3_DEVICE_DEVFORM_PCIE:
+ /*The block layer hw sector size = 512*/
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgtdev->dev_spec.pcie_inf.mdts / 512);
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
+
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
+ * @mrioc: Adapter instance reference
+ *
+ * This is executed post controller reset to identify any
+ * missing devices during reset and remove from the upper layers
+ * or expose any newly detected device to the upper layers.
+ *
+ * Return: Nothing.
+ */
+
+void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+
+ list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
+ list) {
+ if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
+ tgtdev->host_exposed) {
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ }
+
+ tgtdev = NULL;
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
+ !tgtdev->is_hidden && !tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
+ }
+}
+
+/**
+ * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device internal structure
+ * @dev_pg0: New device page0
+ *
+ * Update the information from the device page0 into the driver
+ * cached target device structure.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0)
+{
+ u16 flags = 0;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ u8 prot_mask = 0;
+
+ tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
+ tgtdev->dev_type = dev_pg0->device_form;
+ tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
+ tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
+ tgtdev->slot = le16_to_cpu(dev_pg0->slot);
+ tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
+ tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
+
+ flags = le16_to_cpu(dev_pg0->flags);
+ tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
+
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
+ scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
+ scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
+ }
+
+ switch (tgtdev->dev_type) {
+ case MPI3_DEVICE_DEVFORM_SAS_SATA:
+ {
+ struct mpi3_device0_sas_sata_format *sasinf =
+ &dev_pg0->device_specific.sas_sata_format;
+ u16 dev_info = le16_to_cpu(sasinf->device_info);
+
+ tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
+ tgtdev->dev_spec.sas_sata_inf.sas_address =
+ le64_to_cpu(sasinf->sas_address);
+ if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
+ MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
+ tgtdev->is_hidden = 1;
+ else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
+ MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
+ tgtdev->is_hidden = 1;
+ break;
+ }
+ case MPI3_DEVICE_DEVFORM_PCIE:
+ {
+ struct mpi3_device0_pcie_format *pcieinf =
+ &dev_pg0->device_specific.pcie_format;
+ u16 dev_info = le16_to_cpu(pcieinf->device_info);
+
+ tgtdev->dev_spec.pcie_inf.capb =
+ le32_to_cpu(pcieinf->capabilities);
+ tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
+ /* 2^12 = 4096 */
+ tgtdev->dev_spec.pcie_inf.pgsz = 12;
+ if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
+ tgtdev->dev_spec.pcie_inf.mdts =
+ le32_to_cpu(pcieinf->maximum_data_transfer_size);
+ tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
+ tgtdev->dev_spec.pcie_inf.reset_to =
+ pcieinf->controller_reset_to;
+ tgtdev->dev_spec.pcie_inf.abort_to =
+ pcieinf->nv_me_abort_to;
+ }
+ if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
+ tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
+ if ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)
+ tgtdev->is_hidden = 1;
+ if (!mrioc->shost)
+ break;
+ prot_mask = scsi_host_get_prot(mrioc->shost);
+ if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
+ scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
+ ioc_info(mrioc,
+ "%s : Disabling DIX0 prot capability\n", __func__);
+ ioc_info(mrioc,
+ "because HBA does not support DIX0 operation on NVME drives\n");
+ }
+ break;
+ }
+ case MPI3_DEVICE_DEVFORM_VD:
+ {
+ struct mpi3_device0_vd_format *vdinf =
+ &dev_pg0->device_specific.vd_format;
+
+ tgtdev->dev_spec.vol_inf.state = vdinf->vd_state;
+ if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
+ tgtdev->is_hidden = 1;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
+ * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event information.
+ *
+ * Process Device status Change event and based on device's new
+ * information, either expose the device to the upper layers, or
+ * remove the device from upper layers.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ u16 dev_handle = 0;
+ u8 uhide = 0, delete = 0, cleanup = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3_event_data_device_status_change *evtdata =
+ (struct mpi3_event_data_device_status_change *)fwevt->event_data;
+
+ dev_handle = le16_to_cpu(evtdata->dev_handle);
+ ioc_info(mrioc,
+ "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
+ __func__, dev_handle, evtdata->reason_code);
+ switch (evtdata->reason_code) {
+ case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
+ delete = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
+ uhide = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
+ delete = 1;
+ cleanup = 1;
+ break;
+ default:
+ ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
+ evtdata->reason_code);
+ break;
+ }
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev)
+ goto out;
+ if (uhide) {
+ tgtdev->is_hidden = 0;
+ if (!tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
+ }
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ if (delete)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ }
+ if (cleanup) {
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+}
+
+/**
+ * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @dev_pg0: New device page0
+ *
+ * Process Device Info Change event and based on device's new
+ * information, either expose the device to the upper layers, or
+ * remove the device from upper layers or update the details of
+ * the device.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3_device_page0 *dev_pg0)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ u16 dev_handle = 0, perst_id = 0;
+
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ dev_handle = le16_to_cpu(dev_pg0->dev_handle);
+ ioc_info(mrioc,
+ "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
+ __func__, dev_handle, perst_id);
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev)
+ goto out;
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ if (!tgtdev->is_hidden && !tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
+ if (tgtdev->is_hidden && tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
+ starget_for_each_device(tgtdev->starget, (void *)tgtdev,
+ mpi3mr_update_sdev);
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+}
+
+/**
+ * mpi3mr_sastopochg_evt_debug - SASTopoChange details
+ * @mrioc: Adapter instance reference
+ * @event_data: SAS topology change list event data
+ *
+ * Prints information about the SAS topology change event.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_data_sas_topology_change_list *event_data)
+{
+ int i;
+ u16 handle;
+ u8 reason_code, phy_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->exp_status) {
+ case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
+ status_str = "responding";
+ break;
+ case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
+ status_str = "direct attached";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ ioc_info(mrioc, "%s :sas topology change: (%s)\n",
+ __func__, status_str);
+ ioc_info(mrioc,
+ "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
+ __func__, le16_to_cpu(event_data->expander_dev_handle),
+ le16_to_cpu(event_data->enclosure_handle),
+ event_data->start_phy_num, event_data->num_entries);
+ for (i = 0; i < event_data->num_entries; i++) {
+ handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ phy_number = event_data->start_phy_num + i;
+ reason_code = event_data->phy_entry[i].status &
+ MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
+ switch (reason_code) {
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ status_str = "link status change";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
+ status_str = "link status no change";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->phy_entry[i].link_rate >> 4;
+ prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
+ ioc_info(mrioc,
+ "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
+ __func__, phy_number, handle, status_str, link_rate,
+ prev_link_rate);
+ }
+}
+
+/**
+ * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the SAS topology change event and
+ * for "not responding" event code, removes the device from the
+ * upper layers.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3_event_data_sas_topology_change_list *event_data =
+ (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+
+ mpi3mr_sastopochg_evt_debug(mrioc, event_data);
+
+ for (i = 0; i < event_data->num_entries; i++) {
+ handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (!tgtdev)
+ continue;
+
+ reason_code = event_data->phy_entry[i].status &
+ MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
+
+ switch (reason_code) {
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ break;
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details
+ * @mrioc: Adapter instance reference
+ * @event_data: PCIe topology change list event data
+ *
+ * Prints information about the PCIe topology change event.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_data_pcie_topology_change_list *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 port_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->switch_status) {
+ case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
+ status_str = "responding";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
+ status_str = "direct attached";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ ioc_info(mrioc, "%s :pcie topology change: (%s)\n",
+ __func__, status_str);
+ ioc_info(mrioc,
+ "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n",
+ __func__, le16_to_cpu(event_data->switch_dev_handle),
+ le16_to_cpu(event_data->enclosure_handle),
+ event_data->start_port_num, event_data->num_entries);
+ for (i = 0; i < event_data->num_entries; i++) {
+ handle =
+ le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ port_number = event_data->start_port_num + i;
+ reason_code = event_data->port_entry[i].port_status;
+ switch (reason_code) {
+ case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ status_str = "link status change";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
+ status_str = "link status no change";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->port_entry[i].current_port_info &
+ MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ prev_link_rate = event_data->port_entry[i].previous_port_info &
+ MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ ioc_info(mrioc,
+ "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
+ __func__, port_number, handle, status_str, link_rate,
+ prev_link_rate);
+ }
+}
+
+/**
+ * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the PCIe topology change event and
+ * for "not responding" event code, removes the device from the
+ * upper layers.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3_event_data_pcie_topology_change_list *event_data =
+ (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+
+ mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
+
+ for (i = 0; i < event_data->num_entries; i++) {
+ handle =
+ le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (!tgtdev)
+ continue;
+
+ reason_code = event_data->port_entry[i].port_status;
+
+ switch (reason_code) {
+ case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ break;
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Identifies the firmware event and calls corresponding bottomg
+ * half handler and sends event acknowledgment if required.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ mrioc->current_event = fwevt;
+ mpi3mr_fwevt_del_from_list(mrioc, fwevt);
+
+ if (mrioc->stop_drv_processing)
+ goto out;
+
+ if (!fwevt->process_evt)
+ goto evt_ack;
+
+ switch (fwevt->event_id) {
+ case MPI3_EVENT_DEVICE_ADDED:
+ {
+ struct mpi3_device_page0 *dev_pg0 =
+ (struct mpi3_device_page0 *)fwevt->event_data;
+ mpi3mr_report_tgtdev_to_host(mrioc,
+ le16_to_cpu(dev_pg0->persistent_id));
+ break;
+ }
+ case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ {
+ mpi3mr_devinfochg_evt_bh(mrioc,
+ (struct mpi3_device_page0 *)fwevt->event_data);
+ break;
+ }
+ case MPI3_EVENT_DEVICE_STATUS_CHANGE:
+ {
+ mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ {
+ mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ {
+ mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
+ break;
+ }
+ default:
+ break;
+ }
+
+evt_ack:
+ if (fwevt->send_ack)
+ mpi3mr_send_event_ack(mrioc, fwevt->event_id,
+ fwevt->evt_ctx);
+out:
+ /* Put fwevt reference count to neutralize kref_init increment */
+ mpi3mr_fwevt_put(fwevt);
+ mrioc->current_event = NULL;
+}
+
+/**
+ * mpi3mr_fwevt_worker - Firmware event worker
+ * @work: Work struct containing firmware event
+ *
+ * Extracts the firmware event and calls mpi3mr_fwevt_bh.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_worker(struct work_struct *work)
+{
+ struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
+ work);
+ mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ mpi3mr_fwevt_put(fwevt);
+}
+
+/**
+ * mpi3mr_create_tgtdev - Create and add a target device
+ * @mrioc: Adapter instance reference
+ * @dev_pg0: Device Page 0 data
+ *
+ * If the device specified by the device page 0 data is not
+ * present in the driver's internal list, allocate the memory
+ * for the device, populate the data and add to the list, else
+ * update the device data. The key is persistent ID.
+ *
+ * Return: 0 on success, -ENOMEM on memory allocation failure
+ */
+static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
+ struct mpi3_device_page0 *dev_pg0)
+{
+ int retval = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ u16 perst_id = 0;
+
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
+ if (tgtdev) {
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_tgtdev_put(tgtdev);
+ } else {
+ tgtdev = mpi3mr_alloc_tgtdev();
+ if (!tgtdev)
+ return -ENOMEM;
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
+ }
+
+ return retval;
+}
+
+/**
+ * mpi3mr_flush_delayed_rmhs_list - Flush pending commands
+ * @mrioc: Adapter instance reference
+ *
+ * Flush pending commands in the delayed removal handshake list
+ * due to a controller reset or driver removal as a cleanup.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc)
+{
+ struct delayed_dev_rmhs_node *_rmhs_node;
+
+ while (!list_empty(&mrioc->delayed_rmhs_list)) {
+ _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
+ struct delayed_dev_rmhs_node, list);
+ list_del(&_rmhs_node->list);
+ kfree(_rmhs_node);
+ }
+}
+
+/**
+ * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issues a target reset TM to the firmware from the device
+ * removal TM pend list or retry the removal handshake sequence
+ * based on the IOU control request IOC status.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
+
+ ioc_info(mrioc,
+ "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
+ drv_cmd->ioc_loginfo);
+ if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
+ drv_cmd->retry_count++;
+ ioc_info(mrioc,
+ "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
+ __func__, drv_cmd->dev_handle,
+ drv_cmd->retry_count);
+ mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
+ drv_cmd, drv_cmd->iou_rc);
+ return;
+ }
+ ioc_err(mrioc,
+ "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
+ __func__, drv_cmd->dev_handle);
+ } else {
+ ioc_info(mrioc,
+ "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
+ __func__, drv_cmd->dev_handle);
+ clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
+ }
+
+ if (!list_empty(&mrioc->delayed_rmhs_list)) {
+ delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
+ struct delayed_dev_rmhs_node, list);
+ drv_cmd->dev_handle = delayed_dev_rmhs->handle;
+ drv_cmd->retry_count = 0;
+ drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
+ ioc_info(mrioc,
+ "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
+ __func__, drv_cmd->dev_handle);
+ mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
+ drv_cmd->iou_rc);
+ list_del(&delayed_dev_rmhs->list);
+ kfree(delayed_dev_rmhs);
+ return;
+ }
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+ drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ clear_bit(cmd_idx, mrioc->devrem_bitmap);
+}
+
+/**
+ * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issues a target reset TM to the firmware from the device
+ * removal TM pend list or issue IO unit control request as
+ * part of device removal or hidden acknowledgment handshake.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_iounit_control_request iou_ctrl;
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
+ int retval;
+
+ if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+ tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
+
+ if (tm_reply)
+ pr_info(IOCNAME
+ "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
+ mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
+ drv_cmd->ioc_loginfo,
+ le32_to_cpu(tm_reply->termination_count));
+
+ pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
+ mrioc->name, drv_cmd->dev_handle, cmd_idx);
+
+ memset(&iou_ctrl, 0, sizeof(iou_ctrl));
+
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
+ iou_ctrl.operation = drv_cmd->iou_rc;
+ iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
+ iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+
+ retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
+ 1);
+ if (retval) {
+ pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
+ mrioc->name);
+ goto out_failed;
+ }
+
+ return;
+out_failed:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ drv_cmd->retry_count = 0;
+ clear_bit(cmd_idx, mrioc->devrem_bitmap);
+}
+
+/**
+ * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
+ * @mrioc: Adapter instance reference
+ * @handle: Device handle
+ * @cmdparam: Internal command tracker
+ * @iou_rc: IO unit reason code
+ *
+ * Issues a target reset TM to the firmware or add it to a pend
+ * list as part of device removal or hidden acknowledgment
+ * handshake.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
+{
+ struct mpi3_scsi_task_mgmt_request tm_req;
+ int retval = 0;
+ u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
+ u8 retrycount = 5;
+ struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
+ struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
+
+ if (drv_cmd)
+ goto issue_cmd;
+ do {
+ cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
+ MPI3MR_NUM_DEVRMCMD);
+ if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
+ if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
+ break;
+ cmd_idx = MPI3MR_NUM_DEVRMCMD;
+ }
+ } while (retrycount--);
+
+ if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
+ delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
+ GFP_ATOMIC);
+ if (!delayed_dev_rmhs)
+ return;
+ INIT_LIST_HEAD(&delayed_dev_rmhs->list);
+ delayed_dev_rmhs->handle = handle;
+ delayed_dev_rmhs->iou_rc = iou_rc;
+ list_add_tail(&delayed_dev_rmhs->list,
+ &mrioc->delayed_rmhs_list);
+ ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
+ __func__, handle);
+ return;
+ }
+ drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
+
+issue_cmd:
+ cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ ioc_info(mrioc,
+ "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
+ __func__, handle, cmd_idx);
+
+ memset(&tm_req, 0, sizeof(tm_req));
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
+ goto out;
+ }
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
+ drv_cmd->dev_handle = handle;
+ drv_cmd->iou_rc = iou_rc;
+ tm_req.dev_handle = cpu_to_le16(handle);
+ tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
+ tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
+
+ set_bit(handle, mrioc->removepend_bitmap);
+ retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
+ __func__);
+ goto out_failed;
+ }
+out:
+ return;
+out_failed:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ drv_cmd->retry_count = 0;
+ clear_bit(cmd_idx, mrioc->devrem_bitmap);
+}
+
+/**
+ * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Checks for the reason code and based on that either block I/O
+ * to device, or unblock I/O to the device, or start the device
+ * removal handshake with reason as remove with the firmware for
+ * PCIe devices.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_pcie_topology_change_list *topo_evt =
+ (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+
+ for (i = 0; i < topo_evt->num_entries; i++) {
+ handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ reason_code = topo_evt->port_entry[i].port_status;
+ scsi_tgt_priv_data = NULL;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ switch (reason_code) {
+ case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removed = 1;
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ }
+ mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
+ MPI3_CTRL_OP_REMOVE_DEVICE);
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removedelay = 1;
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
+ if (scsi_tgt_priv_data &&
+ scsi_tgt_priv_data->dev_removedelay) {
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_dec_if_positive
+ (&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Checks for the reason code and based on that either block I/O
+ * to device, or unblock I/O to the device, or start the device
+ * removal handshake with reason as remove with the firmware for
+ * SAS/SATA devices.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_sas_topology_change_list *topo_evt =
+ (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+
+ for (i = 0; i < topo_evt->num_entries; i++) {
+ handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ reason_code = topo_evt->phy_entry[i].status &
+ MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
+ scsi_tgt_priv_data = NULL;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ switch (reason_code) {
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removed = 1;
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ }
+ mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
+ MPI3_CTRL_OP_REMOVE_DEVICE);
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removedelay = 1;
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ if (scsi_tgt_priv_data &&
+ scsi_tgt_priv_data->dev_removedelay) {
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_dec_if_positive
+ (&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Checks for the reason code and based on that either block I/O
+ * to device, or unblock I/O to the device, or start the device
+ * removal handshake with reason as remove/hide acknowledgment
+ * with the firmware.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ u16 dev_handle = 0;
+ u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct mpi3_event_data_device_status_change *evtdata =
+ (struct mpi3_event_data_device_status_change *)event_reply->event_data;
+
+ if (mrioc->stop_drv_processing)
+ goto out;
+
+ dev_handle = le16_to_cpu(evtdata->dev_handle);
+
+ switch (evtdata->reason_code) {
+ case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
+ case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
+ block = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
+ delete = 1;
+ hide = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
+ delete = 1;
+ remove = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
+ case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
+ ublock = 1;
+ break;
+ default:
+ break;
+ }
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev)
+ goto out;
+ if (hide)
+ tgtdev->is_hidden = hide;
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ if (block)
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ if (delete)
+ scsi_tgt_priv_data->dev_removed = 1;
+ if (ublock)
+ atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
+ }
+ if (remove)
+ mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
+ MPI3_CTRL_OP_REMOVE_DEVICE);
+ if (hide)
+ mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
+ MPI3_CTRL_OP_HIDDEN_ACK);
+
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+}
+
+/**
+ * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Identifies the new shutdown timeout value and update.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_energy_pack_change *evtdata =
+ (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
+ u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
+
+ if (shutdown_timeout <= 0) {
+ ioc_warn(mrioc,
+ "%s :Invalid Shutdown Timeout received = %d\n",
+ __func__, shutdown_timeout);
+ return;
+ }
+
+ ioc_info(mrioc,
+ "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
+ __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
+ mrioc->facts.shutdown_timeout = shutdown_timeout;
+}
+
+/**
+ * mpi3mr_os_handle_events - Firmware event handler
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Identify whteher the event has to handled and acknowledged
+ * and either process the event in the tophalf and/or schedule a
+ * bottom half through mpi3mr_fwevt_worker.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ u16 evt_type, sz;
+ struct mpi3mr_fwevt *fwevt = NULL;
+ bool ack_req = 0, process_evt_bh = 0;
+
+ if (mrioc->stop_drv_processing)
+ return;
+
+ if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
+ == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
+ ack_req = 1;
+
+ evt_type = event_reply->event;
+
+ switch (evt_type) {
+ case MPI3_EVENT_DEVICE_ADDED:
+ {
+ struct mpi3_device_page0 *dev_pg0 =
+ (struct mpi3_device_page0 *)event_reply->event_data;
+ if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
+ ioc_err(mrioc,
+ "%s :Failed to add device in the device add event\n",
+ __func__);
+ else
+ process_evt_bh = 1;
+ break;
+ }
+ case MPI3_EVENT_DEVICE_STATUS_CHANGE:
+ {
+ process_evt_bh = 1;
+ mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ {
+ process_evt_bh = 1;
+ mpi3mr_sastopochg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ {
+ process_evt_bh = 1;
+ mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ {
+ process_evt_bh = 1;
+ break;
+ }
+ case MPI3_EVENT_ENERGY_PACK_CHANGE:
+ {
+ mpi3mr_energypackchg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ case MPI3_EVENT_SAS_DISCOVERY:
+ case MPI3_EVENT_CABLE_MGMT:
+ case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
+ case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
+ case MPI3_EVENT_PCIE_ENUMERATION:
+ break;
+ default:
+ ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
+ __func__, evt_type);
+ break;
+ }
+ if (process_evt_bh || ack_req) {
+ sz = event_reply->event_data_length * 4;
+ fwevt = mpi3mr_alloc_fwevt(sz);
+ if (!fwevt) {
+ ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
+ __func__, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ memcpy(fwevt->event_data, event_reply->event_data, sz);
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = evt_type;
+ fwevt->send_ack = ack_req;
+ fwevt->process_evt = process_evt_bh;
+ fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+ }
+}
+
+/**
+ * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ *
+ * Identifies the protection information flags from the SCSI
+ * command and set appropriate flags in the MPI3 SCSI IO
+ * request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
+{
+ u16 eedp_flags = 0;
+ unsigned char prot_op = scsi_get_prot_op(scmd);
+ unsigned char prot_type = scsi_get_prot_type(scmd);
+
+ switch (prot_op) {
+ case SCSI_PROT_NORMAL:
+ return;
+ case SCSI_PROT_READ_STRIP:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ case SCSI_PROT_READ_PASS:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK |
+ MPI3_EEDPFLAGS_CHK_REF_TAG | MPI3_EEDPFLAGS_CHK_APP_TAG |
+ MPI3_EEDPFLAGS_CHK_GUARD;
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ if (scsi_host_get_guard(scmd->device->host)
+ & SHOST_DIX_GUARD_IP) {
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN |
+ MPI3_EEDPFLAGS_CHK_APP_TAG |
+ MPI3_EEDPFLAGS_CHK_GUARD |
+ MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+ scsiio_req->sgl[0].eedp.application_tag_translation_mask =
+ 0xffff;
+ } else {
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK |
+ MPI3_EEDPFLAGS_CHK_REF_TAG |
+ MPI3_EEDPFLAGS_CHK_APP_TAG |
+ MPI3_EEDPFLAGS_CHK_GUARD;
+ }
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ default:
+ return;
+ }
+
+ if (scsi_host_get_guard(scmd->device->host) & SHOST_DIX_GUARD_IP)
+ eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
+
+ switch (prot_type) {
+ case SCSI_PROT_DIF_TYPE0:
+ eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+ scsiio_req->cdb.eedp32.primary_reference_tag =
+ cpu_to_be32(t10_pi_ref_tag(scmd->request));
+ break;
+ case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
+ eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG |
+ MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE |
+ MPI3_EEDPFLAGS_CHK_GUARD;
+ scsiio_req->cdb.eedp32.primary_reference_tag =
+ cpu_to_be32(t10_pi_ref_tag(scmd->request));
+ break;
+ case SCSI_PROT_DIF_TYPE3:
+ eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD |
+ MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
+ break;
+
+ default:
+ scsiio_req->msg_flags &= ~(MPI3_SCSIIO_MSGFLAGS_METASGL_VALID);
+ return;
+ }
+
+ switch (scmd->device->sector_size) {
+ case 512:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
+ break;
+ case 520:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520;
+ break;
+ case 4080:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080;
+ break;
+ case 4088:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088;
+ break;
+ case 4096:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096;
+ break;
+ case 4104:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104;
+ break;
+ case 4160:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160;
+ break;
+ default:
+ break;
+ }
+
+ scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags);
+ scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED;
+}
+
+/**
+ * mpi3mr_build_sense_buffer - Map sense information
+ * @desc: Sense type
+ * @buf: Sense buffer to populate
+ * @key: Sense key
+ * @asc: Additional sense code
+ * @ascq: Additional sense code qualifier
+ *
+ * Maps the given sense information into either descriptor or
+ * fixed format sense data.
+ *
+ * Return: Nothing
+ */
+static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key,
+ u8 asc, u8 ascq)
+{
+ if (desc) {
+ buf[0] = 0x72; /* descriptor, current */
+ buf[1] = key;
+ buf[2] = asc;
+ buf[3] = ascq;
+ buf[7] = 0;
+ } else {
+ buf[0] = 0x70; /* fixed, current */
+ buf[2] = key;
+ buf[7] = 0xa;
+ buf[12] = asc;
+ buf[13] = ascq;
+ }
+}
+
+/**
+ * mpi3mr_map_eedp_error - Map EEDP errors from IOC status
+ * @scmd: SCSI command reference
+ * @ioc_status: status of MPI3 request
+ *
+ * Maps the EEDP error status of the SCSI IO request to sense
+ * data.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd,
+ u16 ioc_status)
+{
+ u8 ascq = 0;
+
+ switch (ioc_status) {
+ case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
+ ascq = 0x01;
+ break;
+ case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ ascq = 0x02;
+ break;
+ case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ ascq = 0x03;
+ break;
+ default:
+ ascq = 0x00;
+ break;
+ }
+
+ mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, ascq);
+ scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
+}
+
+/**
+ * mpi3mr_process_op_reply_desc - reply descriptor handler
+ * @mrioc: Adapter instance reference
+ * @reply_desc: Operational reply descriptor
+ * @reply_dma: place holder for reply DMA address
+ * @qidx: Operational queue index
+ *
+ * Process the operational reply descriptor and identifies the
+ * descriptor type. Based on the descriptor map the MPI3 request
+ * status to a SCSI command status and calls scsi_done call
+ * back.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
+{
+ u16 reply_desc_type, host_tag = 0;
+ u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
+ u32 ioc_loginfo = 0;
+ struct mpi3_status_reply_descriptor *status_desc = NULL;
+ struct mpi3_address_reply_descriptor *addr_desc = NULL;
+ struct mpi3_success_reply_descriptor *success_desc = NULL;
+ struct mpi3_scsi_io_reply *scsi_reply = NULL;
+ struct scsi_cmnd *scmd = NULL;
+ struct scmd_priv *priv = NULL;
+ u8 *sense_buf = NULL;
+ u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
+ u32 xfer_count = 0, sense_count = 0, resp_data = 0;
+ u16 dev_handle = 0xFFFF;
+ struct scsi_sense_hdr sshdr;
+
+ *reply_dma = 0;
+ reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
+ switch (reply_desc_type) {
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
+ status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(status_desc->host_tag);
+ ioc_status = le16_to_cpu(status_desc->ioc_status);
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
+ addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
+ *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
+ scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
+ *reply_dma);
+ if (!scsi_reply) {
+ panic("%s: scsi_reply is NULL, this shouldn't happen\n",
+ mrioc->name);
+ goto out;
+ }
+ host_tag = le16_to_cpu(scsi_reply->host_tag);
+ ioc_status = le16_to_cpu(scsi_reply->ioc_status);
+ scsi_status = scsi_reply->scsi_status;
+ scsi_state = scsi_reply->scsi_state;
+ dev_handle = le16_to_cpu(scsi_reply->dev_handle);
+ sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
+ xfer_count = le32_to_cpu(scsi_reply->transfer_count);
+ sense_count = le32_to_cpu(scsi_reply->sense_count);
+ resp_data = le32_to_cpu(scsi_reply->response_data);
+ sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
+ panic("%s: Ran out of sense buffers\n", mrioc->name);
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
+ success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(success_desc->host_tag);
+ break;
+ default:
+ break;
+ }
+ scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
+ if (!scmd) {
+ panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
+ mrioc->name, host_tag);
+ goto out;
+ }
+ priv = scsi_cmd_priv(scmd);
+ if (success_desc) {
+ scmd->result = DID_OK << 16;
+ goto out_success;
+ }
+ if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
+ xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
+ scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
+ ioc_status = MPI3_IOCSTATUS_SUCCESS;
+
+ if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
+ sense_buf) {
+ u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
+
+ memcpy(scmd->sense_buffer, sense_buf, sz);
+ }
+
+ switch (ioc_status) {
+ case MPI3_IOCSTATUS_BUSY:
+ case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ scmd->result = SAM_STAT_BUSY;
+ break;
+ case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ scmd->result = DID_NO_CONNECT << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
+ scmd->result = DID_RESET << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ if ((xfer_count == 0) || (scmd->underflow > xfer_count))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else
+ scmd->result = (DID_OK << 16) | scsi_status;
+ break;
+ case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
+ break;
+ if (xfer_count < scmd->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ scmd->result = SAM_STAT_BUSY;
+ else
+ scmd->result = DID_SOFT_ERROR << 16;
+ } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
+ (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
+ scsi_set_resid(scmd, 0);
+ fallthrough;
+ case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI3_IOCSTATUS_SUCCESS:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
+ (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
+ (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+ case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
+ case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ mpi3mr_map_eedp_error(scmd, ioc_status);
+ break;
+ case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI3_IOCSTATUS_INVALID_FUNCTION:
+ case MPI3_IOCSTATUS_INVALID_SGL:
+ case MPI3_IOCSTATUS_INTERNAL_ERROR:
+ case MPI3_IOCSTATUS_INVALID_FIELD:
+ case MPI3_IOCSTATUS_INVALID_STATE:
+ case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
+ default:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ }
+
+ if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
+ (scmd->cmnd[0] != ATA_16)) {
+ ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
+ scmd->result);
+ scsi_print_command(scmd);
+ ioc_info(mrioc,
+ "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
+ __func__, dev_handle, ioc_status, ioc_loginfo,
+ priv->req_q_idx + 1);
+ ioc_info(mrioc,
+ " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
+ host_tag, scsi_state, scsi_status, xfer_count, resp_data);
+ if (sense_buf) {
+ scsi_normalize_sense(sense_buf, sense_count, &sshdr);
+ ioc_info(mrioc,
+ "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
+ __func__, sense_count, sshdr.sense_key,
+ sshdr.asc, sshdr.ascq);
+ }
+ }
+out_success:
+ if (priv->meta_sg_valid) {
+ dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
+ scsi_prot_sg_count(scmd), scmd->sc_data_direction);
+ }
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ scsi_dma_unmap(scmd);
+ scmd->scsi_done(scmd);
+out:
+ if (sense_buf)
+ mpi3mr_repost_sense_buf(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+}
+
+/**
+ * mpi3mr_get_chain_idx - get free chain buffer index
+ * @mrioc: Adapter instance reference
+ *
+ * Try to get a free chain buffer index from the free pool.
+ *
+ * Return: -1 on failure or the free chain buffer index
+ */
+static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
+{
+ u8 retry_count = 5;
+ int cmd_idx = -1;
+
+ do {
+ spin_lock(&mrioc->chain_buf_lock);
+ cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
+ mrioc->chain_buf_count);
+ if (cmd_idx < mrioc->chain_buf_count) {
+ set_bit(cmd_idx, mrioc->chain_bitmap);
+ spin_unlock(&mrioc->chain_buf_lock);
+ break;
+ }
+ spin_unlock(&mrioc->chain_buf_lock);
+ cmd_idx = -1;
+ } while (retry_count--);
+ return cmd_idx;
+}
+
+/**
+ * mpi3mr_prepare_sg_scmd - build scatter gather list
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ *
+ * This function maps SCSI command's data and protection SGEs to
+ * MPI request SGEs. If required additional 4K chain buffer is
+ * used to send the SGEs.
+ *
+ * Return: 0 on success, -ENOMEM on dma_map_sg failure
+ */
+static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
+{
+ dma_addr_t chain_dma;
+ struct scatterlist *sg_scmd;
+ void *sg_local, *chain;
+ u32 chain_length;
+ int sges_left, chain_idx;
+ u32 sges_in_segment;
+ u8 simple_sgl_flags;
+ u8 simple_sgl_flags_last;
+ u8 last_chain_sgl_flags;
+ struct chain_element *chain_req;
+ struct scmd_priv *priv = NULL;
+ u32 meta_sg = le32_to_cpu(scsiio_req->flags) &
+ MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI;
+
+ priv = scsi_cmd_priv(scmd);
+
+ simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
+ MPI3_SGE_FLAGS_DLAS_SYSTEM;
+ simple_sgl_flags_last = simple_sgl_flags |
+ MPI3_SGE_FLAGS_END_OF_LIST;
+ last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
+ MPI3_SGE_FLAGS_DLAS_SYSTEM;
+
+ if (meta_sg)
+ sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX];
+ else
+ sg_local = &scsiio_req->sgl;
+
+ if (!scsiio_req->data_length && !meta_sg) {
+ mpi3mr_build_zero_len_sge(sg_local);
+ return 0;
+ }
+
+ if (meta_sg) {
+ sg_scmd = scsi_prot_sglist(scmd);
+ sges_left = dma_map_sg(&mrioc->pdev->dev,
+ scsi_prot_sglist(scmd),
+ scsi_prot_sg_count(scmd),
+ scmd->sc_data_direction);
+ priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
+ } else {
+ sg_scmd = scsi_sglist(scmd);
+ sges_left = scsi_dma_map(scmd);
+ }
+
+ if (sges_left < 0) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "scsi_dma_map failed: request for %d bytes!\n",
+ scsi_bufflen(scmd));
+ return -ENOMEM;
+ }
+ if (sges_left > MPI3MR_SG_DEPTH) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "scsi_dma_map returned unsupported sge count %d!\n",
+ sges_left);
+ return -ENOMEM;
+ }
+
+ sges_in_segment = (mrioc->facts.op_req_sz -
+ offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
+
+ if (scsiio_req->sgl[0].eedp.flags ==
+ MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) {
+ sg_local += sizeof(struct mpi3_sge_common);
+ sges_in_segment--;
+ /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */
+ }
+
+ if (scsiio_req->msg_flags ==
+ MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) {
+ sges_in_segment--;
+ /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */
+ }
+
+ if (meta_sg)
+ sges_in_segment = 1;
+
+ if (sges_left <= sges_in_segment)
+ goto fill_in_last_segment;
+
+ /* fill in main message segment when there is a chain following */
+ while (sges_in_segment > 1) {
+ mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += sizeof(struct mpi3_sge_common);
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ chain_idx = mpi3mr_get_chain_idx(mrioc);
+ if (chain_idx < 0)
+ return -1;
+ chain_req = &mrioc->chain_sgl_list[chain_idx];
+ if (meta_sg)
+ priv->meta_chain_idx = chain_idx;
+ else
+ priv->chain_idx = chain_idx;
+
+ chain = chain_req->addr;
+ chain_dma = chain_req->dma_addr;
+ sges_in_segment = sges_left;
+ chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
+
+ mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
+ chain_length, chain_dma);
+
+ sg_local = chain;
+
+fill_in_last_segment:
+ while (sges_left > 0) {
+ if (sges_left == 1)
+ mpi3mr_add_sg_single(sg_local,
+ simple_sgl_flags_last, sg_dma_len(sg_scmd),
+ sg_dma_address(sg_scmd));
+ else
+ mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += sizeof(struct mpi3_sge_common);
+ sges_left--;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ *
+ * This function calls mpi3mr_prepare_sg_scmd for constructing
+ * both data SGEs and protection information SGEs in the MPI
+ * format from the SCSI Command as appropriate .
+ *
+ * Return: return value of mpi3mr_prepare_sg_scmd.
+ */
+static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
+{
+ int ret;
+
+ ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
+ if (ret)
+ return ret;
+
+ if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) {
+ /* There is a valid meta sg */
+ scsiio_req->flags |=
+ cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI);
+ ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
+ }
+
+ return ret;
+}
+
+/**
+ * mpi3mr_print_response_code - print TM response as a string
+ * @mrioc: Adapter instance reference
+ * @resp_code: TM response code
+ *
+ * Print TM response code as a readable string.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code)
+{
+ char *desc;
+
+ switch (resp_code) {
+ case MPI3MR_RSP_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI3MR_RSP_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI3MR_RSP_TM_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI3MR_RSP_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI3MR_RSP_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI3MR_RSP_TM_INVALID_LUN:
+ desc = "invalid lun";
+ break;
+ case MPI3MR_RSP_TM_OVERLAPPED_TAG:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI3MR_RSP_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+ ioc_info(mrioc, "%s :response_code(0x%01x): %s\n", __func__,
+ resp_code, desc);
+}
+
+/**
+ * mpi3mr_issue_tm - Issue Task Management request
+ * @mrioc: Adapter instance reference
+ * @tm_type: Task Management type
+ * @handle: Device handle
+ * @lun: lun ID
+ * @htag: Host tag of the TM request
+ * @drv_cmd: Internal command tracker
+ * @resp_code: Response code place holder
+ * @cmd_priv: SCSI command private data
+ *
+ * Issues a Task Management Request to the controller for a
+ * specified target, lun and command and wait for its completion
+ * and check TM response. Recover the TM if it timed out by
+ * issuing controller reset.
+ *
+ * Return: 0 on success, non-zero on errors
+ */
+static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+ u16 handle, uint lun, u16 htag, ulong timeout,
+ struct mpi3mr_drv_cmd *drv_cmd,
+ u8 *resp_code, struct scmd_priv *cmd_priv)
+{
+ struct mpi3_scsi_task_mgmt_request tm_req;
+ struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
+ int retval = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct op_req_qinfo *op_req_q = NULL;
+
+ ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
+ __func__, tm_type, handle);
+ if (mrioc->unrecoverable) {
+ retval = -1;
+ ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n",
+ __func__);
+ goto out;
+ }
+
+ memset(&tm_req, 0, sizeof(tm_req));
+ mutex_lock(&drv_cmd->mutex);
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
+ mutex_unlock(&drv_cmd->mutex);
+ goto out;
+ }
+ if (mrioc->reset_in_progress) {
+ retval = -1;
+ ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__);
+ mutex_unlock(&drv_cmd->mutex);
+ goto out;
+ }
+
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 1;
+ drv_cmd->callback = NULL;
+ tm_req.dev_handle = cpu_to_le16(handle);
+ tm_req.task_type = tm_type;
+ tm_req.host_tag = cpu_to_le16(htag);
+
+ int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun);
+ tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ }
+ if (cmd_priv) {
+ op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx];
+ tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag);
+ tm_req.task_request_queue_id = cpu_to_le16(op_req_q->qid);
+ }
+ if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
+ if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
+ timeout = tgtdev->dev_spec.pcie_inf.abort_to;
+ else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
+ timeout = tgtdev->dev_spec.pcie_inf.reset_to;
+ }
+
+ init_completion(&drv_cmd->done);
+ retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__);
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
+
+ if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "%s :Issue TM: command timed out\n", __func__);
+ drv_cmd->is_waiting = 0;
+ retval = -1;
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
+ goto out_unlock;
+ }
+
+ if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+ tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
+
+ if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "%s :Issue TM: handle(0x%04x) Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ __func__, handle, drv_cmd->ioc_status,
+ drv_cmd->ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+ if (!tm_reply) {
+ ioc_err(mrioc, "%s :Issue TM: No TM Reply message\n", __func__);
+ retval = -1;
+ goto out_unlock;
+ }
+
+ *resp_code = le32_to_cpu(tm_reply->response_data) &
+ MPI3MR_RI_MASK_RESPCODE;
+ switch (*resp_code) {
+ case MPI3MR_RSP_TM_SUCCEEDED:
+ case MPI3MR_RSP_TM_COMPLETE:
+ break;
+ case MPI3MR_RSP_IO_QUEUED_ON_IOC:
+ if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ retval = -1;
+ break;
+ default:
+ retval = -1;
+ break;
+ }
+
+ ioc_info(mrioc,
+ "%s :Issue TM: Completed TM type (0x%x) handle(0x%04x) ",
+ __func__, tm_type, handle);
+ ioc_info(mrioc,
+ "with ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+ drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
+ le32_to_cpu(tm_reply->termination_count));
+ mpi3mr_print_response_code(mrioc, *resp_code);
+
+out_unlock:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&drv_cmd->mutex);
+ if (scsi_tgt_priv_data)
+ atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ if (!retval) {
+ /*
+ * Flush all IRQ handlers by calling synchronize_irq().
+ * mpi3mr_ioc_disable_intr() takes care of it.
+ */
+ mpi3mr_ioc_disable_intr(mrioc);
+ mpi3mr_ioc_enable_intr(mrioc);
+ }
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_bios_param - BIOS param callback
+ * @sdev: SCSI device reference
+ * @bdev: Block device reference
+ * @capacity: Capacity in logical sectors
+ * @params: Parameter array
+ *
+ * Just the parameters with heads/secots/cylinders.
+ *
+ * Return: 0 always
+ */
+static int mpi3mr_bios_param(struct scsi_device *sdev,
+ struct block_device *bdev, sector_t capacity, int params[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ ulong dummy;
+
+ heads = 64;
+ sectors = 32;
+
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+
+ if ((ulong)capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+ }
+
+ params[0] = heads;
+ params[1] = sectors;
+ params[2] = cylinders;
+ return 0;
+}
+
+/**
+ * mpi3mr_map_queues - Map queues callback handler
+ * @shost: SCSI host reference
+ *
+ * Call the blk_mq_pci_map_queues with from which operational
+ * queue the mapping has to be done
+ *
+ * Return: return of blk_mq_pci_map_queues
+ */
+static int mpi3mr_map_queues(struct Scsi_Host *shost)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ mrioc->pdev, mrioc->op_reply_q_offset);
+}
+
+/**
+ * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
+ * @mrioc: Adapter instance reference
+ *
+ * Calculate the pending I/Os for the controller and return.
+ *
+ * Return: Number of pending I/Os
+ */
+static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+ uint pend_ios = 0;
+
+ for (i = 0; i < mrioc->num_op_reply_q; i++)
+ pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios);
+ return pend_ios;
+}
+
+/**
+ * mpi3mr_print_pending_host_io - print pending I/Os
+ * @mrioc: Adapter instance reference
+ *
+ * Print number of pending I/Os and each I/O details prior to
+ * reset for debug purpose.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+
+ ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n",
+ __func__, mpi3mr_get_fw_pending_ios(mrioc));
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_print_scmd, (void *)mrioc);
+}
+
+/**
+ * mpi3mr_wait_for_host_io - block for I/Os to complete
+ * @mrioc: Adapter instance reference
+ * @timeout: time out in seconds
+ * Waits for pending I/Os for the given adapter to complete or
+ * to hit the timeout.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
+{
+ enum mpi3mr_iocstate iocstate;
+ int i = 0;
+
+ iocstate = mpi3mr_get_iocstate(mrioc);
+ if (iocstate != MRIOC_STATE_READY)
+ return;
+
+ if (!mpi3mr_get_fw_pending_ios(mrioc))
+ return;
+ ioc_info(mrioc,
+ "%s :Waiting for %d seconds prior to reset for %d I/O\n",
+ __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc));
+
+ for (i = 0; i < timeout; i++) {
+ if (!mpi3mr_get_fw_pending_ios(mrioc))
+ break;
+ iocstate = mpi3mr_get_iocstate(mrioc);
+ if (iocstate != MRIOC_STATE_READY)
+ break;
+ msleep(1000);
+ }
+
+ ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__,
+ mpi3mr_get_fw_pending_ios(mrioc));
+}
+
+/**
+ * mpi3mr_eh_host_reset - Host reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Issue controller reset if the scmd is for a Physical Device,
+ * if the scmd is for RAID volume, then wait for
+ * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any
+ * pending I/Os prior to issuing reset to the controller.
+ *
+ * Return: SUCCESS of successful reset else FAILED
+ */
+static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
+ int retval = FAILED, ret;
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_type = stgt_priv_data->dev_type;
+ }
+
+ if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
+ mpi3mr_wait_for_host_io(mrioc,
+ MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
+ if (!mpi3mr_get_fw_pending_ios(mrioc)) {
+ retval = SUCCESS;
+ goto out;
+ }
+ }
+
+ mpi3mr_print_pending_host_io(mrioc);
+ ret = mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_EH_HOS, 1);
+ if (ret)
+ goto out;
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "Host reset is %s for scmd(%p)\n",
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_eh_target_reset - Target reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Issue Target reset Task Management and verify the scmd is
+ * terminated successfully and return status accordingly.
+ *
+ * Return: SUCCESS of successful termination of the scmd else
+ * FAILED
+ */
+static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ u16 dev_handle;
+ u8 resp_code = 0;
+ int retval = FAILED, ret = 0;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "Attempting Target Reset! scmd(%p)\n", scmd);
+ scsi_print_command(scmd);
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "SCSI device is not available\n");
+ retval = SUCCESS;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+ sdev_printk(KERN_INFO, scmd->device,
+ "Target Reset is issued to handle(0x%04x)\n",
+ dev_handle);
+
+ ret = mpi3mr_issue_tm(mrioc,
+ MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
+ sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL);
+
+ if (ret)
+ goto out;
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "Target reset is %s for scmd(%p)\n",
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_eh_dev_reset- Device reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Issue lun reset Task Management and verify the scmd is
+ * terminated successfully and return status accordingly.
+ *
+ * Return: SUCCESS of successful termination of the scmd else
+ * FAILED
+ */
+static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ u16 dev_handle;
+ u8 resp_code = 0;
+ int retval = FAILED, ret = 0;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "Attempting Device(lun) Reset! scmd(%p)\n", scmd);
+ scsi_print_command(scmd);
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "SCSI device is not available\n");
+ retval = SUCCESS;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+ sdev_printk(KERN_INFO, scmd->device,
+ "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
+
+ ret = mpi3mr_issue_tm(mrioc,
+ MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
+ sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL);
+
+ if (ret)
+ goto out;
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "Device(lun) reset is %s for scmd(%p)\n",
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_scan_start - Scan start callback handler
+ * @shost: SCSI host reference
+ *
+ * Issue port enable request asynchronously.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_scan_start(struct Scsi_Host *shost)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ mrioc->scan_started = 1;
+ ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
+ if (mpi3mr_issue_port_enable(mrioc, 1)) {
+ ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
+ mrioc->scan_started = 0;
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ }
+}
+
+/**
+ * mpi3mr_scan_finished - Scan finished callback handler
+ * @shost: SCSI host reference
+ * @time: Jiffies from the scan start
+ *
+ * Checks whether the port enable is completed or timedout or
+ * failed and set the scan status accordingly after taking any
+ * recovery if required.
+ *
+ * Return: 1 on scan finished or timed out, 0 for in progress
+ */
+static int mpi3mr_scan_finished(struct Scsi_Host *shost,
+ unsigned long time)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
+
+ if (time >= (pe_timeout * HZ)) {
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ ioc_err(mrioc, "%s :port enable request timed out\n", __func__);
+ mrioc->is_driver_loading = 0;
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_PE_TIMEOUT, 1);
+ }
+
+ if (mrioc->scan_failed) {
+ ioc_err(mrioc,
+ "%s :port enable failed with (ioc_status=0x%08x)\n",
+ __func__, mrioc->scan_failed);
+ mrioc->is_driver_loading = 0;
+ mrioc->stop_drv_processing = 1;
+ return 1;
+ }
+
+ if (mrioc->scan_started)
+ return 0;
+ ioc_info(mrioc, "%s :port enable: SUCCESS\n", __func__);
+ mpi3mr_start_watchdog(mrioc);
+ mrioc->is_driver_loading = 0;
+
+ return 1;
+}
+
+/**
+ * mpi3mr_slave_destroy - Slave destroy callback handler
+ * @sdev: SCSI device reference
+ *
+ * Cleanup and free per device(lun) private data.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_slave_destroy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ unsigned long flags;
+ struct scsi_target *starget;
+
+ if (!sdev->hostdata)
+ return;
+
+ starget = scsi_target(sdev);
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+ scsi_tgt_priv_data = starget->hostdata;
+
+ scsi_tgt_priv_data->num_luns--;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
+ tgt_dev->starget = NULL;
+ if (tgt_dev)
+ mpi3mr_tgtdev_put(tgt_dev);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+/**
+ * mpi3mr_target_destroy - Target destroy callback handler
+ * @starget: SCSI target reference
+ *
+ * Cleanup and free per target private data.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_target_destroy(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ unsigned long flags;
+
+ if (!starget->hostdata)
+ return;
+
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+ scsi_tgt_priv_data = starget->hostdata;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
+ if (tgt_dev && (tgt_dev->starget == starget) &&
+ (tgt_dev->perst_id == starget->id))
+ tgt_dev->starget = NULL;
+ if (tgt_dev) {
+ scsi_tgt_priv_data->tgt_dev = NULL;
+ scsi_tgt_priv_data->perst_id = 0;
+ mpi3mr_tgtdev_put(tgt_dev);
+ mpi3mr_tgtdev_put(tgt_dev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ kfree(starget->hostdata);
+ starget->hostdata = NULL;
+}
+
+/**
+ * mpi3mr_slave_configure - Slave configure callback handler
+ * @sdev: SCSI device reference
+ *
+ * Configure queue depth, max hardware sectors and virt boundary
+ * as required
+ *
+ * Return: 0 always.
+ */
+static int mpi3mr_slave_configure(struct scsi_device *sdev)
+{
+ struct scsi_target *starget;
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ unsigned long flags;
+ int retval = 0;
+
+ starget = scsi_target(sdev);
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ if (!tgt_dev)
+ return -ENXIO;
+
+ mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
+ switch (tgt_dev->dev_type) {
+ case MPI3_DEVICE_DEVFORM_PCIE:
+ /*The block layer hw sector size = 512*/
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgt_dev->dev_spec.pcie_inf.mdts / 512);
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
+ break;
+ default:
+ break;
+ }
+
+ mpi3mr_tgtdev_put(tgt_dev);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_slave_alloc -Slave alloc callback handler
+ * @sdev: SCSI device reference
+ *
+ * Allocate per device(lun) private data and initialize it.
+ *
+ * Return: 0 on success -ENOMEM on memory allocation failure.
+ */
+static int mpi3mr_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
+ unsigned long flags;
+ struct scsi_target *starget;
+ int retval = 0;
+
+ starget = scsi_target(sdev);
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+ scsi_tgt_priv_data = starget->hostdata;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+
+ if (tgt_dev) {
+ if (tgt_dev->starget == NULL)
+ tgt_dev->starget = starget;
+ mpi3mr_tgtdev_put(tgt_dev);
+ retval = 0;
+ } else {
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ return -ENXIO;
+ }
+
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
+ if (!scsi_dev_priv_data)
+ return -ENOMEM;
+
+ scsi_dev_priv_data->lun_id = sdev->lun;
+ scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
+ sdev->hostdata = scsi_dev_priv_data;
+
+ scsi_tgt_priv_data->num_luns++;
+
+ return retval;
+}
+
+/**
+ * mpi3mr_target_alloc - Target alloc callback handler
+ * @starget: SCSI target reference
+ *
+ * Allocate per target private data and initialize it.
+ *
+ * Return: 0 on success -ENOMEM on memory allocation failure.
+ */
+static int mpi3mr_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ unsigned long flags;
+ int retval = 0;
+
+ scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
+ if (!scsi_tgt_priv_data)
+ return -ENOMEM;
+
+ starget->hostdata = scsi_tgt_priv_data;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (tgt_dev && !tgt_dev->is_hidden) {
+ scsi_tgt_priv_data->starget = starget;
+ scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
+ scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
+ scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
+ scsi_tgt_priv_data->tgt_dev = tgt_dev;
+ tgt_dev->starget = starget;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ retval = 0;
+ } else
+ retval = -ENXIO;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_check_return_unmap - Whether an unmap is allowed
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI Command reference
+ *
+ * The controller hardware cannot handle certain unmap commands
+ * for NVMe drives, this routine checks those and return true
+ * and completes the SCSI command with proper status and sense
+ * data.
+ *
+ * Return: TRUE for not allowed unmap, FALSE otherwise.
+ */
+static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd)
+{
+ unsigned char *buf;
+ u16 param_len, desc_len;
+
+ param_len = get_unaligned_be16(scmd->cmnd + 7);
+
+ if (!param_len) {
+ ioc_warn(mrioc,
+ "%s: cdb received with zero parameter length\n",
+ __func__);
+ scsi_print_command(scmd);
+ scmd->result = DID_OK << 16;
+ scmd->scsi_done(scmd);
+ return true;
+ }
+
+ if (param_len < 24) {
+ ioc_warn(mrioc,
+ "%s: cdb received with invalid param_len: %d\n",
+ __func__, param_len);
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x1A, 0);
+ scmd->scsi_done(scmd);
+ return true;
+ }
+ if (param_len != scsi_bufflen(scmd)) {
+ ioc_warn(mrioc,
+ "%s: cdb received with param_len: %d bufflen: %d\n",
+ __func__, param_len, scsi_bufflen(scmd));
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x1A, 0);
+ scmd->scsi_done(scmd);
+ return true;
+ }
+ buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
+ if (!buf) {
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x55, 0x03);
+ scmd->scsi_done(scmd);
+ return true;
+ }
+ scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
+ desc_len = get_unaligned_be16(&buf[2]);
+
+ if (desc_len < 16) {
+ ioc_warn(mrioc,
+ "%s: Invalid descriptor length in param list: %d\n",
+ __func__, desc_len);
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x26, 0);
+ scmd->scsi_done(scmd);
+ kfree(buf);
+ return true;
+ }
+
+ if (param_len > (desc_len + 8)) {
+ scsi_print_command(scmd);
+ ioc_warn(mrioc,
+ "%s: Truncating param_len(%d) to desc_len+8(%d)\n",
+ __func__, param_len, (desc_len + 8));
+ param_len = desc_len + 8;
+ put_unaligned_be16(param_len, scmd->cmnd + 7);
+ scsi_print_command(scmd);
+ }
+
+ kfree(buf);
+ return false;
+}
+
+/**
+ * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown
+ * @scmd: SCSI Command reference
+ *
+ * Checks whether a cdb is allowed during shutdown or not.
+ *
+ * Return: TRUE for allowed commands, FALSE otherwise.
+ */
+
+inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case SYNCHRONIZE_CACHE:
+ case START_STOP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * mpi3mr_qcmd - I/O request despatcher
+ * @shost: SCSI Host reference
+ * @scmd: SCSI Command reference
+ *
+ * Issues the SCSI Command as an MPI3 request.
+ *
+ * Return: 0 on successful queueing of the request or if the
+ * request is completed with failure.
+ * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
+ * SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
+ */
+static int mpi3mr_qcmd(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct scmd_priv *scmd_priv_data = NULL;
+ struct mpi3_scsi_io_request *scsiio_req = NULL;
+ struct op_req_qinfo *op_req_q = NULL;
+ int retval = 0;
+ u16 dev_handle;
+ u16 host_tag;
+ u32 scsiio_flags = 0;
+ struct request *rq = scmd->request;
+ int iprio_class;
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ goto out;
+ }
+
+ if (mrioc->stop_drv_processing &&
+ !(mpi3mr_allow_scmd_to_fw(scmd))) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ goto out;
+ }
+
+ if (mrioc->reset_in_progress) {
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+
+ dev_handle = stgt_priv_data->dev_handle;
+ if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ goto out;
+ }
+ if (stgt_priv_data->dev_removed) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ goto out;
+ }
+
+ if (atomic_read(&stgt_priv_data->block_io)) {
+ if (mrioc->stop_drv_processing) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ goto out;
+ }
+ retval = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto out;
+ }
+
+ if ((scmd->cmnd[0] == UNMAP) &&
+ (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
+ mpi3mr_check_return_unmap(mrioc, scmd))
+ goto out;
+
+ host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
+ if (host_tag == MPI3MR_HOSTTAG_INVALID) {
+ scmd->result = DID_ERROR << 16;
+ scmd->scsi_done(scmd);
+ goto out;
+ }
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
+ else if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
+ else
+ scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
+
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
+
+ if (sdev_priv_data->ncq_prio_enable) {
+ iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+ if (iprio_class == IOPRIO_CLASS_RT)
+ scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
+ }
+
+ if (scmd->cmd_len > 16)
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
+
+ scmd_priv_data = scsi_cmd_priv(scmd);
+ memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
+ scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
+ scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
+ scsiio_req->host_tag = cpu_to_le16(host_tag);
+
+ mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
+
+ memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
+ scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
+ scsiio_req->dev_handle = cpu_to_le16(dev_handle);
+ scsiio_req->flags = cpu_to_le32(scsiio_flags);
+ int_to_scsilun(sdev_priv_data->lun_id,
+ (struct scsi_lun *)scsiio_req->lun);
+
+ if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+ op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
+
+ if (mpi3mr_op_request_post(mrioc, op_req_q,
+ scmd_priv_data->mpi3mr_scsiio_req)) {
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+out:
+ return retval;
+}
+
+static struct scsi_host_template mpi3mr_driver_template = {
+ .module = THIS_MODULE,
+ .name = "MPI3 Storage Controller",
+ .proc_name = MPI3MR_DRIVER_NAME,
+ .queuecommand = mpi3mr_qcmd,
+ .target_alloc = mpi3mr_target_alloc,
+ .slave_alloc = mpi3mr_slave_alloc,
+ .slave_configure = mpi3mr_slave_configure,
+ .target_destroy = mpi3mr_target_destroy,
+ .slave_destroy = mpi3mr_slave_destroy,
+ .scan_finished = mpi3mr_scan_finished,
+ .scan_start = mpi3mr_scan_start,
+ .change_queue_depth = mpi3mr_change_queue_depth,
+ .eh_device_reset_handler = mpi3mr_eh_dev_reset,
+ .eh_target_reset_handler = mpi3mr_eh_target_reset,
+ .eh_host_reset_handler = mpi3mr_eh_host_reset,
+ .bios_param = mpi3mr_bios_param,
+ .map_queues = mpi3mr_map_queues,
+ .no_write_same = 1,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = MPI3MR_SG_DEPTH,
+ /* max xfer supported is 1M (2K in 512 byte sized sectors)
+ */
+ .max_sectors = 2048,
+ .cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
+ .track_queue_depth = 1,
+ .cmd_size = sizeof(struct scmd_priv),
+};
+
+/**
+ * mpi3mr_init_drv_cmd - Initialize internal command tracker
+ * @cmdptr: Internal command tracker
+ * @host_tag: Host tag used for the specific command
+ *
+ * Initialize the internal command tracker structure with
+ * specified host tag.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
+ u16 host_tag)
+{
+ mutex_init(&cmdptr->mutex);
+ cmdptr->reply = NULL;
+ cmdptr->state = MPI3MR_CMD_NOTUSED;
+ cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ cmdptr->host_tag = host_tag;
+}
+
+/**
+ * osintfc_mrioc_security_status -Check controller secure status
+ * @pdev: PCI device instance
+ *
+ * Read the Device Serial Number capability from PCI config
+ * space and decide whether the controller is secure or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int
+osintfc_mrioc_security_status(struct pci_dev *pdev)
+{
+ u32 cap_data;
+ int base;
+ u32 ctlr_status;
+ u32 debug_status;
+ int retval = 0;
+
+ base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ if (!base) {
+ dev_err(&pdev->dev,
+ "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__);
+ return -1;
+ }
+
+ pci_read_config_dword(pdev, base + 4, &cap_data);
+
+ debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK;
+ ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK;
+
+ switch (ctlr_status) {
+ case MPI3MR_INVALID_DEVICE:
+ dev_err(&pdev->dev,
+ "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
+ __func__, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ retval = -1;
+ break;
+ case MPI3MR_CONFIG_SECURE_DEVICE:
+ if (!debug_status)
+ dev_info(&pdev->dev,
+ "%s: Config secure ctlr is detected\n",
+ __func__);
+ break;
+ case MPI3MR_HARD_SECURE_DEVICE:
+ break;
+ case MPI3MR_TAMPERED_DEVICE:
+ dev_err(&pdev->dev,
+ "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
+ __func__, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ retval = -1;
+ break;
+ default:
+ retval = -1;
+ break;
+ }
+
+ if (!retval && debug_status) {
+ dev_err(&pdev->dev,
+ "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
+ __func__, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ retval = -1;
+ }
+
+ return retval;
+}
+
+/**
+ * mpi3mr_probe - PCI probe callback
+ * @pdev: PCI device instance
+ * @id: PCI device ID details
+ *
+ * controller initialization routine. Checks the security status
+ * of the controller and if it is invalid or tampered return the
+ * probe without initializing the controller. Otherwise,
+ * allocate per adapter instance through shost_priv and
+ * initialize controller specific data structures, initializae
+ * the controller hardware, add shost to the SCSI subsystem.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+
+static int
+mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mpi3mr_ioc *mrioc = NULL;
+ struct Scsi_Host *shost = NULL;
+ int retval = 0, i;
+
+ if (osintfc_mrioc_security_status(pdev)) {
+ warn_non_secure_ctlr = 1;
+ return 1; /* For Invalid and Tampered device */
+ }
+
+ shost = scsi_host_alloc(&mpi3mr_driver_template,
+ sizeof(struct mpi3mr_ioc));
+ if (!shost) {
+ retval = -ENODEV;
+ goto shost_failed;
+ }
+
+ mrioc = shost_priv(shost);
+ mrioc->id = mrioc_ids++;
+ sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
+ sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
+ INIT_LIST_HEAD(&mrioc->list);
+ spin_lock(&mrioc_list_lock);
+ list_add_tail(&mrioc->list, &mrioc_list);
+ spin_unlock(&mrioc_list_lock);
+
+ spin_lock_init(&mrioc->admin_req_lock);
+ spin_lock_init(&mrioc->reply_free_queue_lock);
+ spin_lock_init(&mrioc->sbq_lock);
+ spin_lock_init(&mrioc->fwevt_lock);
+ spin_lock_init(&mrioc->tgtdev_lock);
+ spin_lock_init(&mrioc->watchdog_lock);
+ spin_lock_init(&mrioc->chain_buf_lock);
+
+ INIT_LIST_HEAD(&mrioc->fwevt_list);
+ INIT_LIST_HEAD(&mrioc->tgtdev_list);
+ INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
+
+ mutex_init(&mrioc->reset_mutex);
+ mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
+ mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
+ mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
+ MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
+
+ if (pdev->revision)
+ mrioc->enable_segqueue = true;
+
+ init_waitqueue_head(&mrioc->reset_waitq);
+ mrioc->logging_level = logging_level;
+ mrioc->shost = shost;
+ mrioc->pdev = pdev;
+
+ /* init shost parameters */
+ shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
+ shost->max_lun = -1;
+ shost->unique_id = mrioc->id;
+
+ shost->max_channel = 1;
+ shost->max_id = 0xFFFFFFFF;
+
+ if (prot_mask >= 0)
+ scsi_host_set_prot(shost, prot_mask);
+ else {
+ prot_mask = SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION;
+ scsi_host_set_prot(shost, prot_mask);
+ }
+
+ ioc_info(mrioc,
+ "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n",
+ __func__,
+ (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
+ (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
+ (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
+ (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
+ (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
+ (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
+ (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
+
+ if (prot_guard_mask)
+ scsi_host_set_guard(shost, (prot_guard_mask & 3));
+ else
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
+ snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
+ "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
+ mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
+ mrioc->fwevt_worker_name, WQ_MEM_RECLAIM);
+ if (!mrioc->fwevt_worker_thread) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ retval = -ENODEV;
+ goto out_fwevtthread_failed;
+ }
+
+ mrioc->is_driver_loading = 1;
+ if (mpi3mr_init_ioc(mrioc, 0)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ retval = -ENODEV;
+ goto out_iocinit_failed;
+ }
+
+ shost->nr_hw_queues = mrioc->num_op_reply_q;
+ shost->can_queue = mrioc->max_host_ios;
+ shost->sg_tablesize = MPI3MR_SG_DEPTH;
+ shost->max_id = mrioc->facts.max_perids;
+
+ retval = scsi_add_host(shost, &pdev->dev);
+ if (retval) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto addhost_failed;
+ }
+
+ scsi_scan_host(shost);
+ return retval;
+
+addhost_failed:
+ mpi3mr_cleanup_ioc(mrioc, 0);
+out_iocinit_failed:
+ destroy_workqueue(mrioc->fwevt_worker_thread);
+out_fwevtthread_failed:
+ spin_lock(&mrioc_list_lock);
+ list_del(&mrioc->list);
+ spin_unlock(&mrioc_list_lock);
+ scsi_host_put(shost);
+shost_failed:
+ return retval;
+}
+
+/**
+ * mpi3mr_remove - PCI remove callback
+ * @pdev: PCI device instance
+ *
+ * Free up all memory and resources associated with the
+ * controllerand target devices, unregister the shost.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+
+ if (!shost)
+ return;
+
+ mrioc = shost_priv(shost);
+ while (mrioc->reset_in_progress || mrioc->is_driver_loading)
+ ssleep(1);
+
+ mrioc->stop_drv_processing = 1;
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ wq = mrioc->fwevt_worker_thread;
+ mrioc->fwevt_worker_thread = NULL;
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+ scsi_remove_host(shost);
+
+ list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
+ list) {
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ mpi3mr_cleanup_ioc(mrioc, 0);
+
+ spin_lock(&mrioc_list_lock);
+ list_del(&mrioc->list);
+ spin_unlock(&mrioc_list_lock);
+
+ scsi_host_put(shost);
+}
+
+/**
+ * mpi3mr_shutdown - PCI shutdown callback
+ * @pdev: PCI device instance
+ *
+ * Free up all memory and resources associated with the
+ * controller
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!shost)
+ return;
+
+ mrioc = shost_priv(shost);
+ while (mrioc->reset_in_progress || mrioc->is_driver_loading)
+ ssleep(1);
+
+ mrioc->stop_drv_processing = 1;
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ wq = mrioc->fwevt_worker_thread;
+ mrioc->fwevt_worker_thread = NULL;
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+ mpi3mr_cleanup_ioc(mrioc, 0);
+}
+
+#ifdef CONFIG_PM
+/**
+ * mpi3mr_suspend - PCI power management suspend callback
+ * @pdev: PCI device instance
+ * @state: New power state
+ *
+ * Change the power state to the given value and cleanup the IOC
+ * by issuing MUR and shutdown notification
+ *
+ * Return: 0 always.
+ */
+static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ pci_power_t device_state;
+
+ if (!shost)
+ return 0;
+
+ mrioc = shost_priv(shost);
+ while (mrioc->reset_in_progress || mrioc->is_driver_loading)
+ ssleep(1);
+ mrioc->stop_drv_processing = 1;
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ scsi_block_requests(shost);
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc, 1);
+
+ device_state = pci_choose_state(pdev, state);
+ ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, device_state);
+ mpi3mr_cleanup_resources(mrioc);
+
+ return 0;
+}
+
+/**
+ * mpi3mr_resume - PCI power management resume callback
+ * @pdev: PCI device instance
+ *
+ * Restore the power state to D0 and reinitialize the controller
+ * and resume I/O operations to the target devices
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int mpi3mr_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ pci_power_t device_state = pdev->current_state;
+ int r;
+
+ if (!shost)
+ return 0;
+
+ mrioc = shost_priv(shost);
+
+ ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ mrioc->pdev = pdev;
+ mrioc->cpu_count = num_online_cpus();
+ r = mpi3mr_setup_resources(mrioc);
+ if (r) {
+ ioc_info(mrioc, "%s: Setup resources failed[%d]\n",
+ __func__, r);
+ return r;
+ }
+
+ mrioc->stop_drv_processing = 0;
+ mpi3mr_init_ioc(mrioc, 1);
+ scsi_unblock_requests(shost);
+ mpi3mr_start_watchdog(mrioc);
+
+ return 0;
+}
+#endif
+
+static const struct pci_device_id mpi3mr_pci_id_table[] = {
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_LSI_LOGIC, 0x00A5,
+ PCI_ANY_ID, PCI_ANY_ID)
+ },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
+
+static struct pci_driver mpi3mr_pci_driver = {
+ .name = MPI3MR_DRIVER_NAME,
+ .id_table = mpi3mr_pci_id_table,
+ .probe = mpi3mr_probe,
+ .remove = mpi3mr_remove,
+ .shutdown = mpi3mr_shutdown,
+#ifdef CONFIG_PM
+ .suspend = mpi3mr_suspend,
+ .resume = mpi3mr_resume,
+#endif
+};
+
+static int __init mpi3mr_init(void)
+{
+ int ret_val;
+
+ pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
+ MPI3MR_DRIVER_VERSION);
+
+ ret_val = pci_register_driver(&mpi3mr_pci_driver);
+
+ return ret_val;
+}
+
+static void __exit mpi3mr_exit(void)
+{
+ if (warn_non_secure_ctlr)
+ pr_warn(
+ "Unloading %s version %s while managing a non secure controller\n",
+ MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
+ else
+ pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
+ MPI3MR_DRIVER_VERSION);
+
+ pci_unregister_driver(&mpi3mr_pci_driver);
+}
+
+module_init(mpi3mr_init);
+module_exit(mpi3mr_exit);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 5779f313f6f8..cf4a3a2c22ad 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -141,7 +141,7 @@ _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
* @mpi_request:mf request pointer.
* @sz: size of buffer.
*
- * @Returns - 1/0 Reset to be done or Not
+ * Return: 1/0 Reset to be done or Not
*/
u8
mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
@@ -440,7 +440,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
return;
/* From smid we can get scsi_cmd, once we have sg_scmd,
- * we just need to get sg_virt and sg_next to get virual
+ * we just need to get sg_virt and sg_next to get virtual
* address associated with sgel->Address.
*/
@@ -600,7 +600,7 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
* _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp.
* @ioc: Per Adapter Object
*
- * Return nothing.
+ * Return: nothing.
*/
static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
{
@@ -704,7 +704,7 @@ _base_fault_reset_work(struct work_struct *work)
/*
* Call _scsih_flush_pending_cmds callback so that we flush all
- * pending commands back to OS. This call is required to aovid
+ * pending commands back to OS. This call is required to avoid
* deadlock at block layer. Dead IOC will fail to do diag reset,
* and this call is safe since dead ioc will never return any
* command back from HW.
@@ -873,7 +873,7 @@ mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
* @ioc: per adapter object
* @fault_code: fault code
*
- * Return nothing.
+ * Return: nothing.
*/
void
mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
@@ -887,7 +887,7 @@ mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
* @ioc: per adapter object
* @caller: caller function name
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
@@ -1359,11 +1359,11 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
}
/**
- * _base_display_reply_info -
+ * _base_display_reply_info - handle reply descriptors depending on IOC Status
* @ioc: per adapter object
* @smid: system request message index
* @msix_index: MSIX table index supplied by the OS
- * @reply: reply message frame(lower 32bit addr)
+ * @reply: reply message frame (lower 32bit addr)
*/
static void
_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1804,7 +1804,7 @@ _base_interrupt(int irq, void *bus_id)
* @irqpoll: irq_poll object
* @budget: irq poll weight
*
- * returns number of reply descriptors processed
+ * Return: number of reply descriptors processed
*/
static int
_base_irqpoll(struct irq_poll *irqpoll, int budget)
@@ -1826,7 +1826,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
enable_irq(reply_q->os_irq);
/*
* Go for one more round of processing the
- * reply descriptor post queue incase if HBA
+ * reply descriptor post queue in case the HBA
* Firmware has posted some reply descriptors
* while reenabling the IRQ.
*/
@@ -1840,7 +1840,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
* _base_init_irqpolls - initliaze IRQ polls
* @ioc: per adapter object
*
- * returns nothing
+ * Return: nothing
*/
static void
_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
@@ -1878,7 +1878,7 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @poll: poll over reply descriptor pools incase interrupt for
* timed-out SCSI command got delayed
- * Context: non ISR conext
+ * Context: non-ISR context
*
* Called when a Task Management request has completed.
*/
@@ -2104,7 +2104,16 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
/**
* _base_build_nvme_prp - This function is called for NVMe end devices to build
- * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
+ * a native SGL (NVMe PRP).
+ * @ioc: per adapter object
+ * @smid: system request message index for getting asscociated SGL
+ * @nvme_encap_request: the NVMe request msg frame pointer
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * The native SGL is built starting in the first PRP
* entry of the NVMe message (PRP1). If the data buffer is small enough to be
* described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
* used to describe a larger data buffer. If the data buffer is too large to
@@ -2133,7 +2142,7 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
* Each 64-bit PRP entry comprises an address and an offset field. The address
* always points at the beginning of a 4KB physical memory page, and the offset
* describes where within that 4KB page the memory segment begins. Only the
- * first element in a PRP list may contain a non-zero offest, implying that all
+ * first element in a PRP list may contain a non-zero offset, implying that all
* memory segments following the first begin at the start of a 4KB page.
*
* Each PRP element normally describes 4KB of physical memory, with exceptions
@@ -2147,14 +2156,6 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
* Since PRP entries lack any indication of size, the overall data buffer length
* is used to determine where the end of the data memory buffer is located, and
* how many PRP entries are required to describe it.
- *
- * @ioc: per adapter object
- * @smid: system request message index for getting asscociated SGL
- * @nvme_encap_request: the NVMe request msg frame pointer
- * @data_out_dma: physical address for WRITES
- * @data_out_sz: data xfer size for WRITES
- * @data_in_dma: physical address for READS
- * @data_in_sz: data xfer size for READS
*/
static void
_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -2311,8 +2312,8 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * base_make_prp_nvme -
- * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
+ * base_make_prp_nvme - Prepare PRPs (Physical Region Page) -
+ * SGLs specific to NVMe drives only
*
* @ioc: per adapter object
* @scmd: SCSI command from the mid-layer
@@ -2982,13 +2983,13 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_free_irq - free irq
+ * mpt3sas_base_free_irq - free irq
* @ioc: per adapter object
*
* Freeing respective reply_queue from the list.
*/
-static void
-_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
{
struct adapter_reply_queue *reply_q, *next;
@@ -3155,7 +3156,7 @@ fall_back:
* - loaded driver with default max_msix_vectors module parameter and
* - system booted in non kdump mode
*
- * returns nothing.
+ * Return: nothing.
*/
static void
_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
@@ -3190,12 +3191,12 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * _base_disable_msix - disables msix
+ * mpt3sas_base_disable_msix - disables msix
* @ioc: per adapter object
*
*/
-static void
-_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
{
if (!ioc->msix_enable)
return;
@@ -3303,8 +3304,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->reply_queue_count; i++) {
r = _base_request_irq(ioc, i);
if (r) {
- _base_free_irq(ioc);
- _base_disable_msix(ioc);
+ mpt3sas_base_free_irq(ioc);
+ mpt3sas_base_disable_msix(ioc);
goto try_ioapic;
}
}
@@ -3341,8 +3342,8 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
- _base_free_irq(ioc);
- _base_disable_msix(ioc);
+ mpt3sas_base_free_irq(ioc);
+ mpt3sas_base_disable_msix(ioc);
kfree(ioc->replyPostRegisterIndex);
ioc->replyPostRegisterIndex = NULL;
@@ -3364,14 +3365,14 @@ static int
_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
/**
- * _base_check_for_fault_and_issue_reset - check if IOC is in fault state
+ * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state
* and if it is in fault state then issue diag reset.
* @ioc: per adapter object
*
- * Returns: 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
-static int
-_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
+int
+mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
{
u32 ioc_state;
int rc = -EFAULT;
@@ -3385,12 +3386,14 @@ _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mpt3sas_print_fault_code(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
+ mpt3sas_base_mask_interrupts(ioc);
rc = _base_diag_reset(ioc);
} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
MPI2_IOC_STATE_COREDUMP) {
mpt3sas_print_coredump_info(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
+ mpt3sas_base_mask_interrupts(ioc);
rc = _base_diag_reset(ioc);
}
@@ -3472,7 +3475,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
r = _base_get_ioc_facts(ioc);
if (r) {
- rc = _base_check_for_fault_and_issue_reset(ioc);
+ rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
if (rc || (_base_get_ioc_facts(ioc)))
goto out_fail;
}
@@ -3633,7 +3636,7 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
* @ioc: per adapter object
* @scmd: scsi_cmnd object
*
- * returns msix index of general reply queues,
+ * Return: msix index of general reply queues,
* i.e. reply queue on which IO request's reply
* should be posted by the HBA firmware.
*/
@@ -3663,7 +3666,7 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @scmd: scsi_cmnd object
*
- * Returns: msix index of high iops reply queues.
+ * Return: msix index of high iops reply queues.
* i.e. high iops reply queue on which IO request's
* reply should be posted by the HBA firmware.
*/
@@ -3910,7 +3913,7 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
* @ioc: per adapter object
* @smid: system request message index
*
- * returns msix index.
+ * Return: msix index.
*/
static u8
_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -4005,7 +4008,7 @@ _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* _base_put_smid_hi_priority - send Task Management request to firmware
* @ioc: per adapter object
* @smid: system request message index
- * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
+ * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
*/
static void
_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4109,7 +4112,7 @@ _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @smid: system request message index
* @handle: device handle, unused in this function, for function type match
*
- * Return nothing.
+ * Return: nothing.
*/
static void
_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4131,7 +4134,7 @@ _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle, unused in this function, for function type match
- * Return nothing
+ * Return: nothing
*/
static void
_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4152,9 +4155,9 @@ _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* firmware using Atomic Request Descriptor
* @ioc: per adapter object
* @smid: system request message index
- * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
+ * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
*
- * Return nothing.
+ * Return: nothing.
*/
static void
_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4176,7 +4179,7 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @ioc: per adapter object
* @smid: system request message index
*
- * Return nothing.
+ * Return: nothing.
*/
static void
_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -4434,6 +4437,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
ioc->pdev->subsystem_device);
break;
}
+ break;
default:
break;
}
@@ -4453,7 +4457,7 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
Mpi26ComponentImageHeader_t *cmp_img_hdr;
Mpi25FWUploadRequest_t *mpi_request;
Mpi2FWUploadReply_t mpi_reply;
- int r = 0;
+ int r = 0, issue_diag_reset = 0;
u32 package_version = 0;
void *fwpkg_data = NULL;
dma_addr_t fwpkg_data_dma;
@@ -4503,7 +4507,7 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi25FWUploadRequest_t)/4);
- r = -ETIME;
+ issue_diag_reset = 1;
} else {
memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
@@ -4543,11 +4547,18 @@ out:
if (fwpkg_data)
dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
fwpkg_data_dma);
+ if (issue_diag_reset) {
+ if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
+ return -EFAULT;
+ if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
+ return -EFAULT;
+ r = -EAGAIN;
+ }
return r;
}
/**
- * _base_display_ioc_capabilities - Disply IOC's capabilities.
+ * _base_display_ioc_capabilities - Display IOC's capabilities.
* @ioc: per adapter object
*/
static void
@@ -4750,15 +4761,19 @@ out:
* according to performance mode.
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: zero on success; otherwise return EAGAIN error code asking the
+ * caller to retry.
*/
-static void
+static int
_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2IOCPage1_t ioc_pg1;
Mpi2ConfigReply_t mpi_reply;
+ int rc;
- mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
+ rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
+ if (rc)
+ return rc;
memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
switch (perf_mode) {
@@ -4780,9 +4795,11 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
*/
ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
- mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ if (rc)
+ return rc;
ioc_info(ioc, "performance mode: balanced\n");
- return;
+ return 0;
}
fallthrough;
case MPT_PERF_MODE_LATENCY:
@@ -4793,7 +4810,9 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
ioc_pg1.ProductSpecific = 0;
- mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ if (rc)
+ return rc;
ioc_info(ioc, "performance mode: latency\n");
break;
case MPT_PERF_MODE_IOPS:
@@ -4805,9 +4824,12 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
le32_to_cpu(ioc_pg1.CoalescingTimeout));
ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
ioc_pg1.ProductSpecific = 0;
- mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ if (rc)
+ return rc;
break;
}
+ return 0;
}
/**
@@ -4815,9 +4837,9 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
* persistent pages
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: nothing.
*/
-static void
+static int
_base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
Mpi26DriverTriggerPage2_t trigger_pg2;
@@ -4831,7 +4853,7 @@ _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply,
&trigger_pg2);
if (r)
- return;
+ return r;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
@@ -4840,7 +4862,7 @@ _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc,
"%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
__func__, ioc_status));
- return;
+ return 0;
}
if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) {
@@ -4859,6 +4881,7 @@ _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
mpi_event_tg++;
}
}
+ return 0;
}
/**
@@ -4866,9 +4889,9 @@ _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
* persistent pages
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: 0 on success; otherwise return failure status.
*/
-static void
+static int
_base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
Mpi26DriverTriggerPage3_t trigger_pg3;
@@ -4882,7 +4905,7 @@ _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply,
&trigger_pg3);
if (r)
- return;
+ return r;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
@@ -4891,7 +4914,7 @@ _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc,
"%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
__func__, ioc_status));
- return;
+ return 0;
}
if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) {
@@ -4910,6 +4933,7 @@ _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
mpi_scsi_tg++;
}
}
+ return 0;
}
/**
@@ -4917,9 +4941,9 @@ _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
* persistent pages
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: 0 on success; otherwise return failure status.
*/
-static void
+static int
_base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
Mpi26DriverTriggerPage4_t trigger_pg4;
@@ -4933,7 +4957,7 @@ _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply,
&trigger_pg4);
if (r)
- return;
+ return r;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
@@ -4942,7 +4966,7 @@ _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc,
"%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
__func__, ioc_status));
- return;
+ return 0;
}
if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) {
@@ -4963,6 +4987,7 @@ _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
mpi_status_tg++;
}
}
+ return 0;
}
/**
@@ -4970,9 +4995,9 @@ _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
* persistent pages
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: nothing.
*/
-static void
+static int
_base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
Mpi26DriverTriggerPage1_t trigger_pg1;
@@ -4983,7 +5008,7 @@ _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply,
&trigger_pg1);
if (r)
- return;
+ return r;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
@@ -4992,25 +5017,30 @@ _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc,
"%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
__func__, ioc_status));
- return;
+ return 0;
}
if (le16_to_cpu(trigger_pg1.NumMasterTrigger))
ioc->diag_trigger_master.MasterData |=
le32_to_cpu(
trigger_pg1.MasterTriggers[0].MasterTriggerFlags);
+ return 0;
}
/**
* _base_check_for_trigger_pages_support - checks whether HBA FW supports
* driver trigger pages or not
* @ioc : per adapter object
+ * @trigger_flags : address where trigger page0's TriggerFlags value is copied
+ *
+ * Return: trigger flags mask if HBA FW supports driver trigger pages;
+ * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or
+ * return EAGAIN if diag reset occurred due to FW fault and asking the
+ * caller to retry the command.
*
- * Returns trigger flags mask if HBA FW supports driver trigger pages,
- * otherwise returns EFAULT.
*/
static int
-_base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
+_base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags)
{
Mpi26DriverTriggerPage0_t trigger_pg0;
int r = 0;
@@ -5020,14 +5050,15 @@ _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply,
&trigger_pg0);
if (r)
- return -EFAULT;
+ return r;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
return -EFAULT;
- return le16_to_cpu(trigger_pg0.TriggerFlags);
+ *trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags);
+ return 0;
}
/**
@@ -5035,12 +5066,14 @@ _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
* persistent pages.
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: zero on success; otherwise return EAGAIN error codes
+ * asking the caller to retry.
*/
-static void
+static int
_base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
int trigger_flags;
+ int r;
/*
* Default setting of master trigger.
@@ -5048,9 +5081,16 @@ _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
ioc->diag_trigger_master.MasterData =
(MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
- trigger_flags = _base_check_for_trigger_pages_support(ioc);
- if (trigger_flags < 0)
- return;
+ r = _base_check_for_trigger_pages_support(ioc, &trigger_flags);
+ if (r) {
+ if (r == -EAGAIN)
+ return r;
+ /*
+ * Don't go for error handling when FW doesn't support
+ * driver trigger pages.
+ */
+ return 0;
+ }
ioc->supports_trigger_pages = 1;
@@ -5059,40 +5099,53 @@ _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
* if master trigger bit enabled in TriggerFlags.
*/
if ((u16)trigger_flags &
- MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID)
- _base_get_master_diag_triggers(ioc);
+ MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) {
+ r = _base_get_master_diag_triggers(ioc);
+ if (r)
+ return r;
+ }
/*
* Retrieve event diag trigger values from driver trigger pg2
* if event trigger bit enabled in TriggerFlags.
*/
if ((u16)trigger_flags &
- MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID)
- _base_get_event_diag_triggers(ioc);
+ MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) {
+ r = _base_get_event_diag_triggers(ioc);
+ if (r)
+ return r;
+ }
/*
* Retrieve scsi diag trigger values from driver trigger pg3
* if scsi trigger bit enabled in TriggerFlags.
*/
if ((u16)trigger_flags &
- MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID)
- _base_get_scsi_diag_triggers(ioc);
+ MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) {
+ r = _base_get_scsi_diag_triggers(ioc);
+ if (r)
+ return r;
+ }
/*
* Retrieve mpi error diag trigger values from driver trigger pg4
* if loginfo trigger bit enabled in TriggerFlags.
*/
if ((u16)trigger_flags &
- MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID)
- _base_get_mpi_diag_triggers(ioc);
+ MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) {
+ r = _base_get_mpi_diag_triggers(ioc);
+ if (r)
+ return r;
+ }
+ return 0;
}
/**
* _base_update_diag_trigger_pages - Update the driver trigger pages after
- * online FW update, incase updated FW supports driver
+ * online FW update, in case updated FW supports driver
* trigger pages.
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: nothing.
*/
static void
_base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
@@ -5119,23 +5172,33 @@ _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
*/
-static void
+static int
_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2ConfigReply_t mpi_reply;
u32 iounit_pg1_flags;
int tg_flags = 0;
+ int rc;
ioc->nvme_abort_timeout = 30;
- mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
- if (ioc->ir_firmware)
- mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
- &ioc->manu_pg10);
+ rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply,
+ &ioc->manu_pg0);
+ if (rc)
+ return rc;
+ if (ioc->ir_firmware) {
+ rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
+ &ioc->manu_pg10);
+ if (rc)
+ return rc;
+ }
/*
* Ensure correct T10 PI operation if vendor left EEDPTagMode
* flag unset in NVDATA.
*/
- mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
+ rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply,
+ &ioc->manu_pg11);
+ if (rc)
+ return rc;
if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
ioc->name);
@@ -5174,12 +5237,24 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
ioc_warn(ioc,
"TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
}
- mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
- mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
- mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
- mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
- mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
- mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
+ rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
+ if (rc)
+ return rc;
_base_display_ioc_capabilities(ioc);
/*
@@ -5195,16 +5270,23 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
iounit_pg1_flags |=
MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
- mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ if (rc)
+ return rc;
if (ioc->iounit_pg8.NumSensors)
ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
- if (ioc->is_aero_ioc)
- _base_update_ioc_page1_inlinewith_perf_mode(ioc);
+ if (ioc->is_aero_ioc) {
+ rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc);
+ if (rc)
+ return rc;
+ }
if (ioc->is_gen35_ioc) {
- if (ioc->is_driver_loading)
- _base_get_diag_triggers(ioc);
- else {
+ if (ioc->is_driver_loading) {
+ rc = _base_get_diag_triggers(ioc);
+ if (rc)
+ return rc;
+ } else {
/*
* In case of online HBA FW update operation,
* check whether updated FW supports the driver trigger
@@ -5216,7 +5298,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
* and new FW doesn't support them then disable
* support_trigger_pages flag.
*/
- tg_flags = _base_check_for_trigger_pages_support(ioc);
+ _base_check_for_trigger_pages_support(ioc, &tg_flags);
if (!ioc->supports_trigger_pages && tg_flags != -EFAULT)
_base_update_diag_trigger_pages(ioc);
else if (ioc->supports_trigger_pages &&
@@ -5224,6 +5306,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
ioc->supports_trigger_pages = 0;
}
}
+ return 0;
}
/**
@@ -6233,7 +6316,7 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
* _base_dump_reg_set - This function will print hexdump of register set.
* @ioc: per adapter object
*
- * Returns nothing.
+ * Return: nothing.
*/
static inline void
_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
@@ -6467,7 +6550,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
*
* Return: Waits up to timeout seconds for the IOC to
* become operational. Returns 0 if IOC is present
- * and operational; otherwise returns -EFAULT.
+ * and operational; otherwise returns %-EFAULT.
*/
int
@@ -6480,6 +6563,17 @@ mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
break;
+
+ /*
+ * Watchdog thread will be started after IOC Initialization, so
+ * no need to wait here for IOC state to become operational
+ * when IOC Initialization is on. Instead the driver will
+ * return ETIME status, so that calling function can issue
+ * diag reset operation and retry the command.
+ */
+ if (ioc->is_driver_loading)
+ return -ETIME;
+
ssleep(1);
ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
__func__, ++wait_state_count);
@@ -7112,7 +7206,8 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
ioc->port_enable_failed = 1;
- if (ioc->is_driver_loading) {
+ if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) {
+ ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC;
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
mpt3sas_port_enable_complete(ioc);
return 1;
@@ -7213,8 +7308,9 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return -EAGAIN;
}
-
+ ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED;
ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
+ ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC;
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->port_enable_cmds.smid = smid;
memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
@@ -7311,7 +7407,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
Mpi2EventNotificationRequest_t *mpi_request;
u16 smid;
int r = 0;
- int i;
+ int i, issue_diag_reset = 0;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -7345,10 +7441,19 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
if (ioc->base_cmds.status & MPT3_CMD_RESET)
r = -EFAULT;
else
- r = -ETIME;
+ issue_diag_reset = 1;
+
} else
dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+
+ if (issue_diag_reset) {
+ if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
+ return -EFAULT;
+ if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
+ return -EFAULT;
+ r = -EAGAIN;
+ }
return r;
}
@@ -7508,14 +7613,14 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_make_ioc_ready - put controller in READY state
+ * mpt3sas_base_make_ioc_ready - put controller in READY state
* @ioc: per adapter object
* @type: FORCE_BIG_HAMMER or SOFT_RESET
*
* Return: 0 for success, non-zero for failure.
*/
-static int
-_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
+int
+mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
{
u32 ioc_state;
int rc;
@@ -7712,7 +7817,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->is_driver_loading)
return r;
- rc = _base_check_for_fault_and_issue_reset(ioc);
+ rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
if (rc || (_base_send_ioc_init(ioc)))
return r;
}
@@ -7746,12 +7851,15 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
return r;
}
- _base_static_config_pages(ioc);
+ r = _base_static_config_pages(ioc);
+ if (r)
+ return r;
+
r = _base_event_notification(ioc);
if (r)
return r;
- if (ioc->is_driver_loading) {
+ if (!ioc->shost_recovery) {
if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
== 0x80) {
@@ -7789,7 +7897,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
if (ioc->chip_phys && ioc->chip) {
mpt3sas_base_mask_interrupts(ioc);
ioc->shost_recovery = 1;
- _base_make_ioc_ready(ioc, SOFT_RESET);
+ mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
ioc->shost_recovery = 0;
}
@@ -7851,7 +7959,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
pci_set_drvdata(ioc->pdev, ioc->shost);
r = _base_get_ioc_facts(ioc);
if (r) {
- rc = _base_check_for_fault_and_issue_reset(ioc);
+ rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
if (rc || (_base_get_ioc_facts(ioc)))
goto out_free_resources;
}
@@ -7868,7 +7976,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
/*
* In SAS3.0,
* SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
- * Target Status - all require the IEEE formated scatter gather
+ * Target Status - all require the IEEE formatted scatter gather
* elements.
*/
ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
@@ -7909,7 +8017,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_sg_mpi = &_base_build_sg;
ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
- r = _base_make_ioc_ready(ioc, SOFT_RESET);
+ r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
if (r)
goto out_free_resources;
@@ -7923,7 +8031,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
r = _base_get_port_facts(ioc, i);
if (r) {
- rc = _base_check_for_fault_and_issue_reset(ioc);
+ rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
if (rc || (_base_get_port_facts(ioc, i)))
goto out_free_resources;
}
@@ -8049,8 +8157,11 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
}
}
r = _base_make_ioc_operational(ioc);
- if (r)
- goto out_free_resources;
+ if (r == -EAGAIN) {
+ r = _base_make_ioc_operational(ioc);
+ if (r)
+ goto out_free_resources;
+ }
/*
* Copy current copy of IOCFacts in prev_fw_facts
@@ -8168,8 +8279,6 @@ _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
ioc->start_scan_failed =
MPI2_IOCSTATUS_INTERNAL_ERROR;
ioc->start_scan = 0;
- ioc->port_enable_cmds.status =
- MPT3_CMD_NOT_USED;
} else {
complete(&ioc->port_enable_cmds.done);
}
@@ -8362,7 +8471,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
_base_pre_reset_handler(ioc);
mpt3sas_wait_for_commands_to_complete(ioc);
mpt3sas_base_mask_interrupts(ioc);
- r = _base_make_ioc_ready(ioc, type);
+ r = mpt3sas_base_make_ioc_ready(ioc, type);
if (r)
goto out;
_base_clear_outstanding_commands(ioc);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 98558d9c8c2d..0c6c3df0038d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -500,6 +500,7 @@ struct MPT3SAS_DEVICE {
#define MPT3_CMD_PENDING 0x0002 /* pending */
#define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */
#define MPT3_CMD_RESET 0x0008 /* host reset dropped the command */
+#define MPT3_CMD_COMPLETE_ASYNC 0x0010 /* tells whether cmd completes in same thread or not */
/**
* struct _internal_cmd - internal commands struct
@@ -1175,6 +1176,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
* @thresh_hold: Max number of reply descriptors processed
* before updating Host Index
+ * @drv_internal_flags: Bit map internal to driver
* @drv_support_bitmap: driver's supported feature bit map
* @use_32bit_dma: Flag to use 32 bit consistent dma mask
* @scsi_io_cb_idx: shost generated commands
@@ -1370,6 +1372,7 @@ struct MPT3SAS_ADAPTER {
bool msix_load_balance;
u16 thresh_hold;
u8 high_iops_queues;
+ u32 drv_internal_flags;
u32 drv_support_bitmap;
u32 dma_mask;
bool enable_sdev_max_qd;
@@ -1615,6 +1618,8 @@ struct mpt3sas_debugfs_buffer {
#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001
#define MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY 0x00000002
+#define MPT_DRV_INTERNAL_FIRST_PE_ISSUED 0x00000001
+
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
@@ -1709,6 +1714,9 @@ void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
u16 device_missing_delay, u8 io_missing_delay);
+int mpt3sas_base_check_for_fault_and_issue_reset(
+ struct MPT3SAS_ADAPTER *ioc);
+
int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
void
@@ -1722,6 +1730,10 @@ do { ioc_err(ioc, "In func: %s\n", __func__); \
status, mpi_request, sz); } while (0)
int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
+int
+mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
+void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 55cd32908924..83a5c2172ad4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -359,8 +359,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
}
r = mpt3sas_wait_for_ioc(ioc, MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT);
- if (r)
+ if (r) {
+ if (r == -ETIME)
+ issue_host_reset = 1;
goto free_mem;
+ }
smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
if (!smid) {
@@ -395,7 +398,6 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
MPT3_CMD_RESET) || ioc->pci_error_recovery)
goto retry_config;
issue_host_reset = 1;
- r = -EFAULT;
goto free_mem;
}
@@ -486,8 +488,16 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
ioc->config_cmds.status = MPT3_CMD_NOT_USED;
mutex_unlock(&ioc->config_cmds.mutex);
- if (issue_host_reset)
- mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ if (issue_host_reset) {
+ if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) {
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ r = -EFAULT;
+ } else {
+ if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
+ return -EFAULT;
+ r = -EAGAIN;
+ }
+ }
return r;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index d00aca3c77ce..8e64a6f14542 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -78,6 +78,7 @@ static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
/* global parameters */
LIST_HEAD(mpt3sas_ioc_list);
@@ -3631,8 +3632,6 @@ _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
- if (ioc->is_driver_loading)
- return;
fw_event = alloc_fw_event_work(0);
if (!fw_event)
return;
@@ -3693,10 +3692,53 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
!ioc->firmware_event_thread)
return;
+ /*
+ * Set current running event as ignore, so that
+ * current running event will exit quickly.
+ * As diag reset has occurred it is of no use
+ * to process remaining stale event data entries.
+ */
+ if (ioc->shost_recovery && ioc->current_event)
+ ioc->current_event->ignore = 1;
ioc->fw_events_cleanup = 1;
while ((fw_event = dequeue_next_fw_event(ioc)) ||
(fw_event = ioc->current_event)) {
+
+ /*
+ * Don't call cancel_work_sync() for current_event
+ * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
+ * otherwise we may observe deadlock if current
+ * hard reset issued as part of processing the current_event.
+ *
+ * Orginal logic of cleaning the current_event is added
+ * for handling the back to back host reset issued by the user.
+ * i.e. during back to back host reset, driver use to process
+ * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
+ * event back to back and this made the drives to unregister
+ * the devices from SML.
+ */
+
+ if (fw_event == ioc->current_event &&
+ ioc->current_event->event !=
+ MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
+ ioc->current_event = NULL;
+ continue;
+ }
+
+ /*
+ * Driver has to clear ioc->start_scan flag when
+ * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
+ * otherwise scsi_scan_host() API waits for the
+ * 5 minute timer to expire. If we exit from
+ * scsi_scan_host() early then we can issue the
+ * new port enable request as part of current diag reset.
+ */
+ if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
+ ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
+ ioc->start_scan = 0;
+ }
+
/*
* Wait on the fw_event to complete. If this returns 1, then
* the event was never executed, and we need a put for the
@@ -5077,10 +5119,8 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
ascq = 0x00;
break;
}
- scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
- ascq);
- scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
+ set_host_byte(scmd, DID_ABORT);
}
/**
@@ -5837,12 +5877,8 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
- scmd->sense_buffer[0] = 0x70;
- scmd->sense_buffer[2] = ILLEGAL_REQUEST;
- scmd->sense_buffer[12] = 0x20;
- scmd->sense_buffer[13] = 0;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
+ 0x20, 0);
}
break;
@@ -6884,8 +6920,10 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
handle, parent_handle,
(u64)sas_expander->sas_address, sas_expander->num_phys);
- if (!sas_expander->num_phys)
+ if (!sas_expander->num_phys) {
+ rc = -1;
goto out_fail;
+ }
sas_expander->phy = kcalloc(sas_expander->num_phys,
sizeof(struct _sas_phy), GFP_KERNEL);
if (!sas_expander->phy) {
@@ -10118,6 +10156,17 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
* owner for the reference the list had on any object we prune.
*/
spin_lock_irqsave(&ioc->sas_device_lock, flags);
+
+ /*
+ * Clean up the sas_device_init_list list as
+ * driver goes for fresh scan as part of diag reset.
+ */
+ list_for_each_entry_safe(sas_device, sas_device_next,
+ &ioc->sas_device_init_list, list) {
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
+
list_for_each_entry_safe(sas_device, sas_device_next,
&ioc->sas_device_list, list) {
if (!sas_device->responding)
@@ -10139,6 +10188,16 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
INIT_LIST_HEAD(&head);
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ /*
+ * Clean up the pcie_device_init_list list as
+ * driver goes for fresh scan as part of diag reset.
+ */
+ list_for_each_entry_safe(pcie_device, pcie_device_next,
+ &ioc->pcie_device_init_list, list) {
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
+
list_for_each_entry_safe(pcie_device, pcie_device_next,
&ioc->pcie_device_list, list) {
if (!pcie_device->responding)
@@ -10541,8 +10600,7 @@ void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
{
dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
- if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
- !ioc->sas_hba.num_phys)) {
+ if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
if (ioc->multipath_on_hba) {
_scsih_sas_port_refresh(ioc);
_scsih_update_vphys_after_reset(ioc);
@@ -10597,6 +10655,18 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
_scsih_del_dirty_vphy(ioc);
_scsih_del_dirty_port_entries(ioc);
_scsih_scan_for_devices_after_reset(ioc);
+ /*
+ * If diag reset has occurred during the driver load
+ * then driver has to complete the driver load operation
+ * by executing the following items:
+ *- Register the devices from sas_device_init_list to SML
+ *- clear is_driver_loading flag,
+ *- start the watchdog thread.
+ * In happy driver load path, above things are taken care of when
+ * driver executes scsih_scan_finished().
+ */
+ if (ioc->is_driver_loading)
+ _scsih_complete_devices_scanning(ioc);
_scsih_set_nvme_max_shutdown_latency(ioc);
break;
case MPT3SAS_PORT_ENABLE_COMPLETE:
@@ -10742,11 +10812,23 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
_scsih_check_topo_delete_events(ioc,
(Mpi2EventDataSasTopologyChangeList_t *)
mpi_reply->EventData);
+ /*
+ * No need to add the topology change list
+ * event to fw event work queue when
+ * diag reset is going on. Since during diag
+ * reset driver scan the devices by reading
+ * sas device page0's not by processing the
+ * events.
+ */
+ if (ioc->shost_recovery)
+ return 1;
break;
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
_scsih_check_pcie_topo_remove_events(ioc,
(Mpi26EventDataPCIeTopologyChangeList_t *)
mpi_reply->EventData);
+ if (ioc->shost_recovery)
+ return 1;
break;
case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
_scsih_check_ir_config_unhide_events(ioc,
@@ -11213,7 +11295,12 @@ scsih_shutdown(struct pci_dev *pdev)
_scsih_ir_shutdown(ioc);
_scsih_nvme_shutdown(ioc);
- mpt3sas_base_detach(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
+ mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ mpt3sas_base_free_irq(ioc);
+ mpt3sas_base_disable_msix(ioc);
}
@@ -11262,13 +11349,27 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
if (channel == RAID_CHANNEL) {
raid_device = device;
+ /*
+ * If this boot vd is already registered with SML then
+ * no need to register it again as part of device scanning
+ * after diag reset during driver load operation.
+ */
+ if (raid_device->starget)
+ return;
rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
raid_device->id, 0);
if (rc)
_scsih_raid_device_remove(ioc, raid_device);
} else if (channel == PCIE_CHANNEL) {
- spin_lock_irqsave(&ioc->pcie_device_lock, flags);
pcie_device = device;
+ /*
+ * If this boot NVMe device is already registered with SML then
+ * no need to register it again as part of device scanning
+ * after diag reset during driver load operation.
+ */
+ if (pcie_device->starget)
+ return;
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
tid = pcie_device->id;
list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
@@ -11276,8 +11377,15 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
if (rc)
_scsih_pcie_device_remove(ioc, pcie_device);
} else {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = device;
+ /*
+ * If this boot sas/sata device is already registered with SML
+ * then no need to register it again as part of device scanning
+ * after diag reset during driver load operation.
+ */
+ if (sas_device->starget)
+ return;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
handle = sas_device->handle;
sas_address_parent = sas_device->sas_address_parent;
sas_address = sas_device->sas_address;
@@ -11576,6 +11684,25 @@ scsih_scan_start(struct Scsi_Host *shost)
}
/**
+ * _scsih_complete_devices_scanning - add the devices to sml and
+ * complete ioc initialization.
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
+{
+
+ if (ioc->wait_for_discovery_to_complete) {
+ ioc->wait_for_discovery_to_complete = 0;
+ _scsih_probe_devices(ioc);
+ }
+
+ mpt3sas_base_start_watchdog(ioc);
+ ioc->is_driver_loading = 0;
+}
+
+/**
* scsih_scan_finished - scsi lld callback for .scan_finished
* @shost: SCSI host pointer
* @time: elapsed time of the scan in jiffies
@@ -11588,6 +11715,8 @@ static int
scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ u32 ioc_state;
+ int issue_hard_reset = 0;
if (disable_discovery > 0) {
ioc->is_driver_loading = 0;
@@ -11602,9 +11731,30 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
return 1;
}
- if (ioc->start_scan)
+ if (ioc->start_scan) {
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_print_fault_code(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ issue_hard_reset = 1;
+ goto out;
+ } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_base_coredump_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
+ issue_hard_reset = 1;
+ goto out;
+ }
return 0;
+ }
+ if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
+ ioc_info(ioc,
+ "port enable: aborted due to diag reset\n");
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
if (ioc->start_scan_failed) {
ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
ioc->start_scan_failed);
@@ -11616,13 +11766,14 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
ioc_info(ioc, "port enable: SUCCESS\n");
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ _scsih_complete_devices_scanning(ioc);
- if (ioc->wait_for_discovery_to_complete) {
- ioc->wait_for_discovery_to_complete = 0;
- _scsih_probe_devices(ioc);
+out:
+ if (issue_hard_reset) {
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
+ ioc->is_driver_loading = 0;
}
- mpt3sas_base_start_watchdog(ioc);
- ioc->is_driver_loading = 0;
return 1;
}
@@ -11932,6 +12083,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->multipath_on_hba = 1;
else
ioc->multipath_on_hba = 0;
+ break;
default:
break;
}
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 6aa2697c4a15..f18dd9703595 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -46,6 +46,7 @@ static struct scsi_host_template mvs_sht = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
@@ -692,22 +693,17 @@ static struct pci_driver mvs_pci_driver = {
.remove = mvs_pci_remove,
};
-static ssize_t
-mvs_show_driver_version(struct device *cdev,
- struct device_attribute *attr, char *buffer)
+static ssize_t driver_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buffer)
{
return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
}
-static DEVICE_ATTR(driver_version,
- S_IRUGO,
- mvs_show_driver_version,
- NULL);
+static DEVICE_ATTR_RO(driver_version);
-static ssize_t
-mvs_store_interrupt_coalescing(struct device *cdev,
- struct device_attribute *attr,
- const char *buffer, size_t size)
+static ssize_t interrupt_coalescing_store(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buffer, size_t size)
{
unsigned int val = 0;
struct mvs_info *mvi = NULL;
@@ -745,16 +741,13 @@ mvs_store_interrupt_coalescing(struct device *cdev,
return strlen(buffer);
}
-static ssize_t mvs_show_interrupt_coalescing(struct device *cdev,
- struct device_attribute *attr, char *buffer)
+static ssize_t interrupt_coalescing_show(struct device *cdev,
+ struct device_attribute *attr, char *buffer)
{
return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
}
-static DEVICE_ATTR(interrupt_coalescing,
- S_IRUGO|S_IWUSR,
- mvs_show_interrupt_coalescing,
- mvs_store_interrupt_coalescing);
+static DEVICE_ATTR_RW(interrupt_coalescing);
static int __init mvs_init(void)
{
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 1acea528f27f..31d1ea5a5dd2 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1314,7 +1314,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
+ task->task_status.stat == SAS_SAM_STAT_GOOD) {
res = TMF_RESP_FUNC_COMPLETE;
break;
}
@@ -1764,7 +1764,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
case SAS_PROTOCOL_SSP:
/* hw says status == 0, datapres == 0 */
if (rx_desc & RXQ_GOOD) {
- tstat->stat = SAM_STAT_GOOD;
+ tstat->stat = SAS_SAM_STAT_GOOD;
tstat->resp = SAS_TASK_COMPLETE;
}
/* response frame present */
@@ -1773,12 +1773,12 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
sizeof(struct mvs_err_info);
sas_ssp_task_response(mvi->dev, task, iu);
} else
- tstat->stat = SAM_STAT_CHECK_CONDITION;
+ tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
case SAS_PROTOCOL_SMP: {
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
- tstat->stat = SAM_STAT_GOOD;
+ tstat->stat = SAS_SAM_STAT_GOOD;
to = kmap_atomic(sg_page(sg_resp));
memcpy(to + sg_resp->offset,
slot->response + sizeof(struct mvs_err_info),
@@ -1795,7 +1795,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
}
default:
- tstat->stat = SAM_STAT_CHECK_CONDITION;
+ tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
if (!slot->port->port_attached) {
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 9d5743627604..6bb03d7a254d 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -1317,11 +1317,10 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
sizeof(struct mvumi_sense_data));
- scmd->result |= (DRIVER_SENSE << 24);
}
break;
default:
- scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
+ scmd->result |= (DID_ABORT << 16);
break;
}
@@ -2068,10 +2067,7 @@ static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
return 0;
error:
- scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
- scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
- 0);
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
return -1;
}
@@ -2131,7 +2127,7 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
else
atomic_dec(&mhba->fw_outstanding);
- scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
+ scmd->result = (DID_ABORT << 16);
scmd->SCp.ptr = NULL;
if (scsi_bufflen(scmd)) {
dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index d9c82e211ae7..542ed88ef90d 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1397,8 +1397,7 @@ myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
static void myrb_request_sense(struct myrb_hba *cb,
struct scsi_cmnd *scmd)
{
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- NO_SENSE, 0, 0);
+ scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
}
@@ -1447,10 +1446,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case INQUIRY:
if (scmd->cmnd[1] & 1) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
} else {
myrb_inquiry(cb, scmd);
scmd->result = (DID_OK << 16);
@@ -1465,10 +1461,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
(scmd->cmnd[2] & 0x3F) != 0x08) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
} else {
myrb_mode_sense(cb, scmd, ldev_info);
scmd->result = (DID_OK << 16);
@@ -1479,20 +1472,14 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
if ((scmd->cmnd[1] & 1) ||
(scmd->cmnd[8] & 1)) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
scmd->scsi_done(scmd);
return 0;
}
lba = get_unaligned_be32(&scmd->cmnd[2]);
if (lba) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
scmd->scsi_done(scmd);
return 0;
}
@@ -1506,10 +1493,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case SEND_DIAGNOSTIC:
if (scmd->cmnd[1] != 0x04) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
} else {
/* Assume good status */
scmd->result = (DID_OK << 16);
@@ -1519,10 +1503,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case READ_6:
if (ldev_info->state == MYRB_DEVICE_WO) {
/* Data protect, attempt to read invalid data */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- DATA_PROTECT, 0x21, 0x06);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
scmd->scsi_done(scmd);
return 0;
}
@@ -1536,10 +1517,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case READ_10:
if (ldev_info->state == MYRB_DEVICE_WO) {
/* Data protect, attempt to read invalid data */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- DATA_PROTECT, 0x21, 0x06);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
scmd->scsi_done(scmd);
return 0;
}
@@ -1553,10 +1531,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case READ_12:
if (ldev_info->state == MYRB_DEVICE_WO) {
/* Data protect, attempt to read invalid data */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- DATA_PROTECT, 0x21, 0x06);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
scmd->scsi_done(scmd);
return 0;
}
@@ -1569,9 +1544,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
break;
default:
/* Illegal request, invalid opcode */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x20, 0);
- scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
scmd->scsi_done(scmd);
return 0;
}
@@ -2352,25 +2325,19 @@ static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
"Bad Data Encountered\n");
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
/* Unrecovered read error */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- MEDIUM_ERROR, 0x11, 0);
+ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
else
/* Write error */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- MEDIUM_ERROR, 0x0C, 0);
- scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
break;
case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
/* Unrecovered read error, auto-reallocation failed */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- MEDIUM_ERROR, 0x11, 0x04);
+ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
else
/* Write error, auto-reallocation failed */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- MEDIUM_ERROR, 0x0C, 0x02);
- scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
break;
case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
dev_dbg(&scmd->device->sdev_gendev,
@@ -2381,8 +2348,7 @@ static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
dev_dbg(&scmd->device->sdev_gendev,
"Attempt to Access Beyond End of Logical Drive");
/* Logical block address out of range */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- NOT_READY, 0x21, 0);
+ scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
break;
case MYRB_STATUS_DEVICE_NONRESPONSIVE:
dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 3b68c68d1716..26326af23dbc 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -1600,9 +1600,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
switch (scmd->cmnd[0]) {
case REPORT_LUNS:
- scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
- 0x20, 0x0);
- scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0x0);
scmd->scsi_done(scmd);
return 0;
case MODE_SENSE:
@@ -1612,10 +1610,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
(scmd->cmnd[2] & 0x3F) != 0x08) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
} else {
myrs_mode_sense(cs, scmd, ldev_info);
scmd->result = (DID_OK << 16);
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 134bbd2d8b66..bc9d29e5fdba 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -176,38 +176,40 @@ static nsp32_sync_table nsp32_sync_table_pci[] = {
* function declaration
*/
/* module entry point */
-static int nsp32_probe (struct pci_dev *, const struct pci_device_id *);
-static void nsp32_remove(struct pci_dev *);
+static int nsp32_probe (struct pci_dev *, const struct pci_device_id *);
+static void nsp32_remove(struct pci_dev *);
static int __init init_nsp32 (void);
static void __exit exit_nsp32 (void);
/* struct struct scsi_host_template */
-static int nsp32_show_info (struct seq_file *, struct Scsi_Host *);
+static int nsp32_show_info (struct seq_file *, struct Scsi_Host *);
-static int nsp32_detect (struct pci_dev *pdev);
-static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
-static const char *nsp32_info (struct Scsi_Host *);
-static int nsp32_release (struct Scsi_Host *);
+static int nsp32_detect (struct pci_dev *pdev);
+static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+static const char *nsp32_info (struct Scsi_Host *);
+static int nsp32_release (struct Scsi_Host *);
/* SCSI error handler */
-static int nsp32_eh_abort (struct scsi_cmnd *);
-static int nsp32_eh_host_reset(struct scsi_cmnd *);
+static int nsp32_eh_abort (struct scsi_cmnd *);
+static int nsp32_eh_host_reset(struct scsi_cmnd *);
/* generate SCSI message */
static void nsp32_build_identify(struct scsi_cmnd *);
static void nsp32_build_nop (struct scsi_cmnd *);
static void nsp32_build_reject (struct scsi_cmnd *);
-static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char, unsigned char);
+static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char,
+ unsigned char);
/* SCSI message handler */
static int nsp32_busfree_occur(struct scsi_cmnd *, unsigned short);
static void nsp32_msgout_occur (struct scsi_cmnd *);
-static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long, unsigned short);
+static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long,
+ unsigned short);
static int nsp32_setup_sg_table (struct scsi_cmnd *);
static int nsp32_selection_autopara(struct scsi_cmnd *);
static int nsp32_selection_autoscsi(struct scsi_cmnd *);
-static void nsp32_scsi_done (struct scsi_cmnd *);
+static void nsp32_scsi_done (struct scsi_cmnd *);
static int nsp32_arbitration (struct scsi_cmnd *, unsigned int);
static int nsp32_reselection (struct scsi_cmnd *, unsigned char);
static void nsp32_adjust_busfree (struct scsi_cmnd *, unsigned int);
@@ -215,10 +217,13 @@ static void nsp32_restart_autoscsi (struct scsi_cmnd *, unsigned short);
/* SCSI SDTR */
static void nsp32_analyze_sdtr (struct scsi_cmnd *);
-static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *, unsigned char);
-static void nsp32_set_async (nsp32_hw_data *, nsp32_target *);
-static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *, unsigned char *, unsigned char *);
-static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *, int, unsigned char);
+static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *,
+ unsigned char);
+static void nsp32_set_async (nsp32_hw_data *, nsp32_target *);
+static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *,
+ unsigned char *, unsigned char *);
+static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *,
+ int, unsigned char);
/* SCSI bus status handler */
static void nsp32_wait_req (nsp32_hw_data *, int);
@@ -234,16 +239,16 @@ static irqreturn_t do_nsp32_isr(int, void *);
static int nsp32hw_init(nsp32_hw_data *);
/* EEPROM handler */
-static int nsp32_getprom_param (nsp32_hw_data *);
-static int nsp32_getprom_at24 (nsp32_hw_data *);
-static int nsp32_getprom_c16 (nsp32_hw_data *);
-static void nsp32_prom_start (nsp32_hw_data *);
-static void nsp32_prom_stop (nsp32_hw_data *);
-static int nsp32_prom_read (nsp32_hw_data *, int);
-static int nsp32_prom_read_bit (nsp32_hw_data *);
-static void nsp32_prom_write_bit(nsp32_hw_data *, int);
-static void nsp32_prom_set (nsp32_hw_data *, int, int);
-static int nsp32_prom_get (nsp32_hw_data *, int);
+static int nsp32_getprom_param (nsp32_hw_data *);
+static int nsp32_getprom_at24 (nsp32_hw_data *);
+static int nsp32_getprom_c16 (nsp32_hw_data *);
+static void nsp32_prom_start (nsp32_hw_data *);
+static void nsp32_prom_stop (nsp32_hw_data *);
+static int nsp32_prom_read (nsp32_hw_data *, int);
+static int nsp32_prom_read_bit (nsp32_hw_data *);
+static void nsp32_prom_write_bit(nsp32_hw_data *, int);
+static void nsp32_prom_set (nsp32_hw_data *, int, int);
+static int nsp32_prom_get (nsp32_hw_data *, int);
/* debug/warning/info message */
static void nsp32_message (const char *, int, char *, char *, ...);
@@ -356,8 +361,8 @@ static void nsp32_dmessage(const char *func, int line, int mask, char *fmt, ...)
static void nsp32_build_identify(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int pos = data->msgout_len;
- int mode = FALSE;
+ int pos = data->msgout_len;
+ int mode = FALSE;
/* XXX: Auto DiscPriv detection is progressing... */
if (disc_priv == 0) {
@@ -377,13 +382,13 @@ static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt,
unsigned char offset)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int pos = data->msgout_len;
+ int pos = data->msgout_len;
data->msgoutbuf[pos] = EXTENDED_MESSAGE; pos++;
data->msgoutbuf[pos] = EXTENDED_SDTR_LEN; pos++;
data->msgoutbuf[pos] = EXTENDED_SDTR; pos++;
- data->msgoutbuf[pos] = period; pos++;
- data->msgoutbuf[pos] = offset; pos++;
+ data->msgoutbuf[pos] = period; pos++;
+ data->msgoutbuf[pos] = offset; pos++;
data->msgout_len = pos;
}
@@ -394,7 +399,7 @@ static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt,
static void nsp32_build_nop(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int pos = data->msgout_len;
+ int pos = data->msgout_len;
if (pos != 0) {
nsp32_msg(KERN_WARNING,
@@ -412,12 +417,12 @@ static void nsp32_build_nop(struct scsi_cmnd *SCpnt)
static void nsp32_build_reject(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int pos = data->msgout_len;
+ int pos = data->msgout_len;
data->msgoutbuf[pos] = MESSAGE_REJECT; pos++;
data->msgout_len = pos;
}
-
+
/*
* timer
*/
@@ -450,7 +455,7 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
unsigned char phase;
int i, ret;
unsigned int msgout;
- u16_le s;
+ u16_le s;
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
@@ -482,7 +487,7 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
* the sending order of the message is:
* MCNT 3: MSG#0 -> MSG#1 -> MSG#2
* MCNT 2: MSG#1 -> MSG#2
- * MCNT 1: MSG#2
+ * MCNT 1: MSG#2
*/
msgout >>= 8;
msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
@@ -494,7 +499,8 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
msgout = 0;
}
- // nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n", nsp32_read2(base, SEL_TIME_OUT));
+ // nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n",
+ // nsp32_read2(base, SEL_TIME_OUT));
// nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
/*
@@ -520,10 +526,10 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
/* command control */
param->command_control = cpu_to_le16(CLEAR_CDB_FIFO_POINTER |
- AUTOSCSI_START |
- AUTO_MSGIN_00_OR_04 |
- AUTO_MSGIN_02 |
- AUTO_ATN );
+ AUTOSCSI_START |
+ AUTO_MSGIN_00_OR_04 |
+ AUTO_MSGIN_02 |
+ AUTO_ATN );
/* transfer control */
@@ -555,9 +561,9 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
/*
* transfer parameter to ASIC
*/
- nsp32_write4(base, SGT_ADR, data->auto_paddr);
- nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER |
- AUTO_PARAMETER );
+ nsp32_write4(base, SGT_ADR, data->auto_paddr);
+ nsp32_write2(base, COMMAND_CONTROL,
+ CLEAR_CDB_FIFO_POINTER | AUTO_PARAMETER );
/*
* Check arbitration
@@ -599,7 +605,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
SCpnt->result = DID_BUS_BUSY << 16;
status = 1;
goto out;
- }
+ }
/*
* clear execph
@@ -616,13 +622,14 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
*/
for (i = 0; i < SCpnt->cmd_len; i++) {
nsp32_write1(base, COMMAND_DATA, SCpnt->cmnd[i]);
- }
+ }
nsp32_dbg(NSP32_DEBUG_CDB_CONTENTS, "CDB[0]=[0x%x]", SCpnt->cmnd[0]);
/*
* set SCSIOUT LATCH(initiator)/TARGET(target) (OR-ed) ID
*/
- nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID, BIT(host_id) | BIT(target));
+ nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID,
+ BIT(host_id) | BIT(target));
/*
* set SCSI MSGOUT REG
@@ -642,7 +649,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
* the sending order of the message is:
* MCNT 3: MSG#0 -> MSG#1 -> MSG#2
* MCNT 2: MSG#1 -> MSG#2
- * MCNT 1: MSG#2
+ * MCNT 1: MSG#2
*/
msgout >>= 8;
msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
@@ -662,7 +669,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
/*
* set SREQ hazard killer sampling rate
- *
+ *
* TODO: sample_rate (BASE+0F) is 0 when internal clock = 40MHz.
* check other internal clock!
*/
@@ -687,7 +694,8 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
nsp32_dbg(NSP32_DEBUG_AUTOSCSI,
"syncreg=0x%x, ackwidth=0x%x, sgtpaddr=0x%x, id=0x%x",
nsp32_read1(base, SYNC_REG), nsp32_read1(base, ACK_WIDTH),
- nsp32_read4(base, SGT_ADR), nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID));
+ nsp32_read4(base, SGT_ADR),
+ nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID));
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "msgout_len=%d, msgout=0x%x",
data->msgout_len, msgout);
@@ -716,10 +724,10 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
* start AUTO SCSI, kick off arbitration
*/
command = (CLEAR_CDB_FIFO_POINTER |
- AUTOSCSI_START |
+ AUTOSCSI_START |
AUTO_MSGIN_00_OR_04 |
- AUTO_MSGIN_02 |
- AUTO_ATN );
+ AUTO_MSGIN_02 |
+ AUTO_ATN);
nsp32_write2(base, COMMAND_CONTROL, command);
/*
@@ -739,9 +747,9 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
/*
* Arbitration Status Check
- *
+ *
* Note: Arbitration counter is waited during ARBIT_GO is not lifting.
- * Using udelay(1) consumes CPU time and system time, but
+ * Using udelay(1) consumes CPU time and system time, but
* arbitration delay time is defined minimal 2.4us in SCSI
* specification, thus udelay works as coarse grained wait timer.
*/
@@ -776,7 +784,7 @@ static int nsp32_arbitration(struct scsi_cmnd *SCpnt, unsigned int base)
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit timeout");
SCpnt->result = DID_NO_CONNECT << 16;
status = FALSE;
- }
+ }
/*
* clear Arbit
@@ -822,7 +830,8 @@ static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun)
* or current nexus is not existed, unexpected
* reselection is occurred. Send reject message.
*/
- if (newid >= ARRAY_SIZE(data->lunt) || newlun >= ARRAY_SIZE(data->lunt[0])) {
+ if (newid >= ARRAY_SIZE(data->lunt) ||
+ newlun >= ARRAY_SIZE(data->lunt[0])) {
nsp32_msg(KERN_WARNING, "unknown id/lun");
return FALSE;
} else if(data->lunt[newid][newlun].SCpnt == NULL) {
@@ -876,7 +885,8 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
if (le32_to_cpu(sgt[i].len) > 0x10000) {
nsp32_msg(KERN_ERR,
- "can't transfer over 64KB at a time, size=0x%x", le32_to_cpu(sgt[i].len));
+ "can't transfer over 64KB at a time, "
+ "size=0x%x", le32_to_cpu(sgt[i].len));
return FALSE;
}
nsp32_dbg(NSP32_DEBUG_SGLIST,
@@ -894,7 +904,8 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
return TRUE;
}
-static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
nsp32_target *target;
@@ -904,8 +915,9 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"enter. target: 0x%x LUN: 0x%llx cmnd: 0x%x cmndlen: 0x%x "
"use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
- SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
- scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
+ SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0],
+ SCpnt->cmd_len, scsi_sg_count(SCpnt), scsi_sglist(SCpnt),
+ scsi_bufflen(SCpnt));
if (data->CurrentSC != NULL) {
nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request");
@@ -936,7 +948,6 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
SCpnt->scsi_done = done;
data->CurrentSC = SCpnt;
SCpnt->SCp.Status = SAM_STAT_CHECK_CONDITION;
- SCpnt->SCp.Message = 0;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
@@ -966,7 +977,7 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
/* Build IDENTIFY */
nsp32_build_identify(SCpnt);
- /*
+ /*
* If target is the first time to transfer after the reset
* (target don't have SDTR_DONE and SDTR_INITIATOR), sync
* message SDTR is needed to do synchronous transfer.
@@ -1051,9 +1062,9 @@ static int nsp32hw_init(nsp32_hw_data *data)
nsp32_index_write2(base, CFG_LATE_CACHE, lc_reg & 0xffff);
}
- nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
- nsp32_write2(base, TRANSFER_CONTROL, 0);
- nsp32_write4(base, BM_CNT, 0);
+ nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
+ nsp32_write2(base, TRANSFER_CONTROL, 0);
+ nsp32_write4(base, BM_CNT, 0);
nsp32_write2(base, SCSI_EXECUTE_PHASE, 0);
do {
@@ -1081,12 +1092,13 @@ static int nsp32hw_init(nsp32_hw_data *data)
nsp32_index_read1(base, FIFO_EMPTY_SHLD_COUNT));
nsp32_index_write1(base, CLOCK_DIV, data->clock);
- nsp32_index_write1(base, BM_CYCLE, MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD);
+ nsp32_index_write1(base, BM_CYCLE,
+ MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD);
nsp32_write1(base, PARITY_CONTROL, 0); /* parity check is disable */
/*
* initialize MISC_WRRD register
- *
+ *
* Note: Designated parameters is obeyed as following:
* MISC_SCSI_DIRECTION_DETECTOR_SELECT: It must be set.
* MISC_MASTER_TERMINATION_SELECT: It must be set.
@@ -1101,10 +1113,10 @@ static int nsp32hw_init(nsp32_hw_data *data)
*/
nsp32_index_write2(base, MISC_WR,
(SCSI_DIRECTION_DETECTOR_SELECT |
- DELAYED_BMSTART |
- MASTER_TERMINATION_SELECT |
- BMREQ_NEGATE_TIMING_SEL |
- AUTOSEL_TIMING_SEL |
+ DELAYED_BMSTART |
+ MASTER_TERMINATION_SELECT |
+ BMREQ_NEGATE_TIMING_SEL |
+ AUTOSEL_TIMING_SEL |
BMSTOP_CHANGE2_NONDATA_PHASE));
nsp32_index_write1(base, TERM_PWR_CONTROL, 0);
@@ -1125,15 +1137,16 @@ static int nsp32hw_init(nsp32_hw_data *data)
* enable to select designated IRQ (except for
* IRQSELECT_SERR, IRQSELECT_PERR, IRQSELECT_BMCNTERR)
*/
- nsp32_index_write2(base, IRQ_SELECT, IRQSELECT_TIMER_IRQ |
- IRQSELECT_SCSIRESET_IRQ |
- IRQSELECT_FIFO_SHLD_IRQ |
- IRQSELECT_RESELECT_IRQ |
- IRQSELECT_PHASE_CHANGE_IRQ |
- IRQSELECT_AUTO_SCSI_SEQ_IRQ |
- // IRQSELECT_BMCNTERR_IRQ |
- IRQSELECT_TARGET_ABORT_IRQ |
- IRQSELECT_MASTER_ABORT_IRQ );
+ nsp32_index_write2(base, IRQ_SELECT,
+ IRQSELECT_TIMER_IRQ |
+ IRQSELECT_SCSIRESET_IRQ |
+ IRQSELECT_FIFO_SHLD_IRQ |
+ IRQSELECT_RESELECT_IRQ |
+ IRQSELECT_PHASE_CHANGE_IRQ |
+ IRQSELECT_AUTO_SCSI_SEQ_IRQ |
+ // IRQSELECT_BMCNTERR_IRQ |
+ IRQSELECT_TARGET_ABORT_IRQ |
+ IRQSELECT_MASTER_ABORT_IRQ );
nsp32_write2(base, IRQ_CONTROL, 0);
/* PCI LED off */
@@ -1163,11 +1176,12 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
* IRQ check, then enable IRQ mask
*/
irq_stat = nsp32_read2(base, IRQ_STATUS);
- nsp32_dbg(NSP32_DEBUG_INTR,
+ nsp32_dbg(NSP32_DEBUG_INTR,
"enter IRQ: %d, IRQstatus: 0x%x", irq, irq_stat);
/* is this interrupt comes from Ninja asic? */
if ((irq_stat & IRQSTATUS_ANY_IRQ) == 0) {
- nsp32_dbg(NSP32_DEBUG_INTR, "shared interrupt: irq other 0x%x", irq_stat);
+ nsp32_dbg(NSP32_DEBUG_INTR,
+ "shared interrupt: irq other 0x%x", irq_stat);
goto out2;
}
handled = 1;
@@ -1207,7 +1221,8 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
if (SCpnt == NULL) {
nsp32_msg(KERN_WARNING, "SCpnt==NULL this can't be happened");
- nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
+ nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x",
+ irq_stat, trans_stat);
goto out;
}
@@ -1265,13 +1280,13 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
"Data in/out phase processed");
/* read BMCNT, SGT pointer addr */
- nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx",
+ nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx",
nsp32_read4(base, BM_CNT));
- nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx",
+ nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx",
nsp32_read4(base, SGT_ADR));
- nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx",
+ nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx",
nsp32_read4(base, SACK_CNT));
- nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
+ nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
nsp32_read4(base, SAVED_SACK_CNT));
scsi_set_resid(SCpnt, 0); /* all data transferred! */
@@ -1306,7 +1321,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
* Read CSB and substitute CSB for SCpnt->result
* to save status phase stutas byte.
* scsi error handler checks host_byte (DID_*:
- * low level driver to indicate status), then checks
+ * low level driver to indicate status), then checks
* status_byte (SCSI status byte).
*/
SCpnt->result = (int)nsp32_read1(base, SCSI_CSB_IN);
@@ -1314,7 +1329,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
if (auto_stat & ILLEGAL_PHASE) {
/* Illegal phase is detected. SACK is not back. */
- nsp32_msg(KERN_WARNING,
+ nsp32_msg(KERN_WARNING,
"AUTO SCSI ILLEGAL PHASE OCCUR!!!!");
/* TODO: currently we don't have any action... bus reset? */
@@ -1367,7 +1382,8 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
break;
default:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/other phase");
- nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
+ nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x",
+ irq_stat, trans_stat);
show_busphase(busphase);
break;
}
@@ -1433,32 +1449,39 @@ static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
{
unsigned long flags;
nsp32_hw_data *data;
- int hostno;
+ int hostno;
unsigned int base;
unsigned char mode_reg;
- int id, speed;
- long model;
+ int id, speed;
+ long model;
hostno = host->host_no;
data = (nsp32_hw_data *)host->hostdata;
base = host->io_port;
seq_puts(m, "NinjaSCSI-32 status\n\n");
- seq_printf(m, "Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version);
- seq_printf(m, "SCSI host No.: %d\n", hostno);
- seq_printf(m, "IRQ: %d\n", host->irq);
- seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
- seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
- seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize);
- seq_printf(m, "Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
+ seq_printf(m, "Driver version: %s, $Revision: 1.33 $\n",
+ nsp32_release_version);
+ seq_printf(m, "SCSI host No.: %d\n", hostno);
+ seq_printf(m, "IRQ: %d\n", host->irq);
+ seq_printf(m, "IO: 0x%lx-0x%lx\n",
+ host->io_port, host->io_port + host->n_io_port - 1);
+ seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n",
+ host->base, host->base + data->MmioLength - 1);
+ seq_printf(m, "sg_tablesize: %d\n",
+ host->sg_tablesize);
+ seq_printf(m, "Chip revision: 0x%x\n",
+ (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
mode_reg = nsp32_index_read1(base, CHIP_MODE);
model = data->pci_devid->driver_data;
#ifdef CONFIG_PM
- seq_printf(m, "Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no");
+ seq_printf(m, "Power Management: %s\n",
+ (mode_reg & OPTF) ? "yes" : "no");
#endif
- seq_printf(m, "OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
+ seq_printf(m, "OEM: %ld, %s\n",
+ (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
spin_lock_irqsave(&(data->Lock), flags);
seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC);
@@ -1476,7 +1499,7 @@ static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
}
if (data->target[id].sync_flag == SDTR_DONE) {
- if (data->target[id].period == 0 &&
+ if (data->target[id].period == 0 &&
data->target[id].offset == ASYNC_OFFSET ) {
seq_puts(m, "async");
} else {
@@ -1518,7 +1541,7 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
* clear TRANSFERCONTROL_BM_START
*/
nsp32_write2(base, TRANSFER_CONTROL, 0);
- nsp32_write4(base, BM_CNT, 0);
+ nsp32_write4(base, BM_CNT, 0);
/*
* call scsi_done
@@ -1528,10 +1551,10 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
/*
* reset parameters
*/
- data->cur_lunt->SCpnt = NULL;
- data->cur_lunt = NULL;
- data->cur_target = NULL;
- data->CurrentSC = NULL;
+ data->cur_lunt->SCpnt = NULL;
+ data->cur_lunt = NULL;
+ data->cur_target = NULL;
+ data->CurrentSC = NULL;
}
@@ -1553,7 +1576,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
nsp32_dbg(NSP32_DEBUG_BUSFREE, "enter execph=0x%x", execph);
show_autophase(execph);
- nsp32_write4(base, BM_CNT, 0);
+ nsp32_write4(base, BM_CNT, 0);
nsp32_write2(base, TRANSFER_CONTROL, 0);
/*
@@ -1561,7 +1584,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
*
* VALID:
* Save Data Pointer is received. Adjust pointer.
- *
+ *
* NO-VALID:
* SCSI-3 says if Save Data Pointer is not received, then we restart
* processing and we can't adjust any SCSI data pointer in next data
@@ -1574,7 +1597,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
* Check sack_cnt/saved_sack_cnt, then adjust sg table if
* needed.
*/
- if (!(execph & MSGIN_00_VALID) &&
+ if (!(execph & MSGIN_00_VALID) &&
((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE))) {
unsigned int sacklen, s_sacklen;
@@ -1617,7 +1640,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
* no processing.
*/
}
-
+
if (execph & MSGIN_03_VALID) {
/* MsgIn03 was valid to be processed. No need processing. */
}
@@ -1639,7 +1662,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
* negotiating.
*/
if (execph & (MSGIN_00_VALID | MSGIN_04_VALID)) {
- /*
+ /*
* If valid message is received, then
* negotiation is succeeded.
*/
@@ -1666,21 +1689,18 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete");
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
- SCpnt->SCp.Message = 0;
- nsp32_dbg(NSP32_DEBUG_BUSFREE,
+ nsp32_dbg(NSP32_DEBUG_BUSFREE,
"normal end stat=0x%x resid=0x%x\n",
SCpnt->SCp.Status, scsi_get_resid(SCpnt));
- SCpnt->result = (DID_OK << 16) |
- (SCpnt->SCp.Message << 8) |
- (SCpnt->SCp.Status << 0);
+ SCpnt->result = (DID_OK << 16) |
+ (SCpnt->SCp.Status << 0);
nsp32_scsi_done(SCpnt);
/* All operation is done */
return TRUE;
} else if (execph & MSGIN_04_VALID) {
/* MsgIn 04: Disconnect */
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
- SCpnt->SCp.Message = 4;
-
+
nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect");
return TRUE;
} else {
@@ -1688,7 +1708,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
nsp32_msg(KERN_WARNING, "unexpected bus free occurred");
/* DID_ERROR? */
- //SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0);
+ //SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Status << 0);
SCpnt->result = DID_ERROR << 16;
nsp32_scsi_done(SCpnt);
return TRUE;
@@ -1706,12 +1726,12 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int old_entry = data->cur_entry;
- int new_entry;
- int sg_num = data->cur_lunt->sg_num;
- nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
- unsigned int restlen, sentlen;
- u32_le len, addr;
+ int old_entry = data->cur_entry;
+ int new_entry;
+ int sg_num = data->cur_lunt->sg_num;
+ nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
+ unsigned int restlen, sentlen;
+ u32_le len, addr;
nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt));
@@ -1719,7 +1739,7 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3;
/*
- * calculate new_entry from sack count and each sgt[].len
+ * calculate new_entry from sack count and each sgt[].len
* calculate the byte which is intent to send
*/
sentlen = 0;
@@ -1737,8 +1757,10 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
if (sentlen == s_sacklen) {
/* XXX: confirm it's ok or not */
- /* In this case, it's ok because we are at
- the head element of the sg. restlen is correctly calculated. */
+ /* In this case, it's ok because we are at
+ * the head element of the sg. restlen is correctly
+ * calculated.
+ */
}
/* calculate the rest length for transferring */
@@ -1753,7 +1775,7 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
/* set cur_entry with new_entry */
data->cur_entry = new_entry;
-
+
return;
last:
@@ -1781,7 +1803,7 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
int i;
-
+
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
"enter: msgout_len: 0x%x", data->msgout_len);
@@ -1815,10 +1837,10 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
//nsp32_restart_autoscsi(SCpnt, command);
nsp32_write2(base, COMMAND_CONTROL,
(CLEAR_CDB_FIFO_POINTER |
- AUTO_COMMAND_PHASE |
- AUTOSCSI_RESTART |
- AUTO_MSGIN_00_OR_04 |
- AUTO_MSGIN_02 ));
+ AUTO_COMMAND_PHASE |
+ AUTOSCSI_RESTART |
+ AUTO_MSGIN_00_OR_04 |
+ AUTO_MSGIN_02 ));
}
/*
* Write data with SACK, then wait sack is
@@ -1918,9 +1940,9 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
unsigned char msgtype;
unsigned char newlun;
unsigned short command = 0;
- int msgclear = TRUE;
- long new_sgtp;
- int ret;
+ int msgclear = TRUE;
+ long new_sgtp;
+ int ret;
/*
* read first message
@@ -1960,7 +1982,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
goto reject;
}
}
-
+
/*
* processing messages except for IDENTIFY
*
@@ -1976,10 +1998,10 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
* These messages should not be occurred.
* They should be processed on AutoSCSI sequencer.
*/
- nsp32_msg(KERN_WARNING,
+ nsp32_msg(KERN_WARNING,
"unexpected message of AutoSCSI MsgIn: 0x%x", msg);
break;
-
+
case RESTORE_POINTERS:
/*
* AutoMsgIn03 is disabled, and HBA gets this message.
@@ -2005,7 +2027,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
/*
* set new sg pointer
*/
- new_sgtp = data->cur_lunt->sglun_paddr +
+ new_sgtp = data->cur_lunt->sglun_paddr +
(data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
nsp32_write4(base, SGT_ADR, new_sgtp);
@@ -2016,13 +2038,13 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
* These messages should not be occurred.
* They should be processed on AutoSCSI sequencer.
*/
- nsp32_msg (KERN_WARNING,
+ nsp32_msg (KERN_WARNING,
"unexpected message of AutoSCSI MsgIn: SAVE_POINTERS");
-
+
break;
-
+
case MESSAGE_REJECT:
- /* If previous message_out is sending SDTR, and get
+ /* If previous message_out is sending SDTR, and get
message_reject from target, SDTR negotiation is failed */
if (data->cur_target->sync_flag &
(SDTR_INITIATOR | SDTR_TARGET)) {
@@ -2041,7 +2063,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
case LINKED_CMD_COMPLETE:
case LINKED_FLG_CMD_COMPLETE:
/* queue tag is not supported currently */
- nsp32_msg (KERN_WARNING,
+ nsp32_msg (KERN_WARNING,
"unsupported message: 0x%x", msgtype);
break;
@@ -2094,7 +2116,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
}
/*
- * Reach here means regular length of each type of
+ * Reach here means regular length of each type of
* extended messages.
*/
switch (data->msginbuf[2]) {
@@ -2129,12 +2151,12 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
goto reject; /* not implemented yet */
break;
-
+
default:
goto reject;
}
break;
-
+
default:
goto reject;
}
@@ -2150,7 +2172,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
* AutoSCSI restart, at the same time MsgOutOccur should be
* happened (however, such situation is really possible...?).
*/
- if (data->msgout_len > 0) {
+ if (data->msgout_len > 0) {
nsp32_write4(base, SCSI_MSG_OUT, 0);
command |= AUTO_ATN;
}
@@ -2192,7 +2214,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
return;
reject:
- nsp32_msg(KERN_WARNING,
+ nsp32_msg(KERN_WARNING,
"invalid or unsupported MessageIn, rejected. "
"current msg: 0x%x (len: 0x%x), processing msg: 0x%x",
msg, data->msgin_len, msgtype);
@@ -2203,15 +2225,15 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
}
/*
- *
+ *
*/
static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- nsp32_target *target = data->cur_target;
- unsigned char get_period = data->msginbuf[3];
- unsigned char get_offset = data->msginbuf[4];
- int entry;
+ nsp32_target *target = data->cur_target;
+ unsigned char get_period = data->msginbuf[3];
+ unsigned char get_offset = data->msginbuf[4];
+ int entry;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter");
@@ -2219,16 +2241,16 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
* If this inititor sent the SDTR message, then target responds SDTR,
* initiator SYNCREG, ACKWIDTH from SDTR parameter.
* Messages are not appropriate, then send back reject message.
- * If initiator did not send the SDTR, but target sends SDTR,
+ * If initiator did not send the SDTR, but target sends SDTR,
* initiator calculator the appropriate parameter and send back SDTR.
- */
+ */
if (target->sync_flag & SDTR_INITIATOR) {
/*
* Initiator sent SDTR, the target responds and
* send back negotiation SDTR.
*/
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target responds SDTR");
-
+
target->sync_flag &= ~SDTR_INITIATOR;
target->sync_flag |= SDTR_DONE;
@@ -2242,7 +2264,7 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
*/
goto reject;
}
-
+
if (get_offset == ASYNC_OFFSET) {
/*
* Negotiation is succeeded, the target want
@@ -2273,7 +2295,7 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
if (entry < 0) {
/*
- * Target want to use long period which is not
+ * Target want to use long period which is not
* acceptable NinjaSCSI-32Bi/UDE.
*/
goto reject;
@@ -2286,7 +2308,7 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
} else {
/* Target send SDTR to initiator. */
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target send SDTR");
-
+
target->sync_flag |= SDTR_INITIATOR;
/* offset: */
@@ -2409,7 +2431,7 @@ static void nsp32_set_max_sync(nsp32_hw_data *data,
*/
static void nsp32_set_sync_entry(nsp32_hw_data *data,
nsp32_target *target,
- int entry,
+ int entry,
unsigned char offset)
{
unsigned char period, ackwidth, sample_rate;
@@ -2438,7 +2460,7 @@ static void nsp32_set_sync_entry(nsp32_hw_data *data,
static void nsp32_wait_req(nsp32_hw_data *data, int state)
{
unsigned int base = data->BaseAddress;
- int wait_time = 0;
+ int wait_time = 0;
unsigned char bus, req_bit;
if (!((state == ASSERT) || (state == NEGATE))) {
@@ -2450,7 +2472,7 @@ static void nsp32_wait_req(nsp32_hw_data *data, int state)
do {
bus = nsp32_read1(base, SCSI_BUS_MONITOR);
if ((bus & BUSMON_REQ) == req_bit) {
- nsp32_dbg(NSP32_DEBUG_WAIT,
+ nsp32_dbg(NSP32_DEBUG_WAIT,
"wait_time: %d", wait_time);
return;
}
@@ -2467,7 +2489,7 @@ static void nsp32_wait_req(nsp32_hw_data *data, int state)
static void nsp32_wait_sack(nsp32_hw_data *data, int state)
{
unsigned int base = data->BaseAddress;
- int wait_time = 0;
+ int wait_time = 0;
unsigned char bus, ack_bit;
if (!((state == ASSERT) || (state == NEGATE))) {
@@ -2532,8 +2554,8 @@ static int nsp32_detect(struct pci_dev *pdev)
struct Scsi_Host *host; /* registered host structure */
struct resource *res;
nsp32_hw_data *data;
- int ret;
- int i, j;
+ int ret;
+ int i, j;
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
@@ -2610,7 +2632,7 @@ static int nsp32_detect(struct pci_dev *pdev)
*/
/*
- * setup DMA
+ * setup DMA
*/
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
@@ -2710,16 +2732,16 @@ static int nsp32_detect(struct pci_dev *pdev)
goto free_sg_list;
}
- /*
+ /*
* PCI IO register
*/
res = request_region(host->io_port, host->n_io_port, "nsp32");
if (res == NULL) {
- nsp32_msg(KERN_ERR,
+ nsp32_msg(KERN_ERR,
"I/O region 0x%x+0x%x is already used",
data->BaseAddress, data->NumAddress);
goto free_irq;
- }
+ }
ret = scsi_add_host(host, &pdev->dev);
if (ret) {
@@ -2743,7 +2765,7 @@ static int nsp32_detect(struct pci_dev *pdev)
free_autoparam:
dma_free_coherent(&pdev->dev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
-
+
scsi_unregister:
scsi_host_put(host);
@@ -2810,7 +2832,7 @@ static int nsp32_eh_abort(struct scsi_cmnd *SCpnt)
}
nsp32_write2(base, TRANSFER_CONTROL, 0);
- nsp32_write2(base, BM_CNT, 0);
+ nsp32_write2(base, BM_CNT, 0);
SCpnt->result = DID_ABORT << 16;
nsp32_scsi_done(SCpnt);
@@ -2833,8 +2855,8 @@ static void nsp32_do_bus_reset(nsp32_hw_data *data)
* clear counter
*/
nsp32_write2(base, TRANSFER_CONTROL, 0);
- nsp32_write4(base, BM_CNT, 0);
- nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
+ nsp32_write4(base, BM_CNT, 0);
+ nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
/*
* fall back to asynchronous transfer mode
@@ -2856,7 +2878,7 @@ static void nsp32_do_bus_reset(nsp32_hw_data *data)
for(i = 0; i < 5; i++) {
intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */
nsp32_dbg(NSP32_DEBUG_BUSRESET, "irq:1: 0x%x", intrdat);
- }
+ }
data->CurrentSC = NULL;
}
@@ -2867,7 +2889,7 @@ static int nsp32_eh_host_reset(struct scsi_cmnd *SCpnt)
unsigned int base = SCpnt->device->host->io_port;
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
- nsp32_msg(KERN_INFO, "Host Reset");
+ nsp32_msg(KERN_INFO, "Host Reset");
nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
spin_lock_irq(SCpnt->device->host->host_lock);
@@ -2942,13 +2964,13 @@ static int nsp32_getprom_param(nsp32_hw_data *data)
* AT24C01A (Logitec: LHA-600S), AT24C02 (Melco Buffalo: IFC-USLP) data map:
*
* ROMADDR
- * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
+ * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
* Value 0x0: ASYNC, 0x0c: Ultra-20M, 0x19: Fast-10M
* 0x07 : HBA Synchronous Transfer Period
* Value 0: AutoSync, 1: Manual Setting
* 0x08 - 0x0f : Not Used? (0x0)
* 0x10 : Bus Termination
- * Value 0: Auto[ON], 1: ON, 2: OFF
+ * Value 0: Auto[ON], 1: ON, 2: OFF
* 0x11 : Not Used? (0)
* 0x12 : Bus Reset Delay Time (0x03)
* 0x13 : Bootable CD Support
@@ -2956,7 +2978,7 @@ static int nsp32_getprom_param(nsp32_hw_data *data)
* 0x14 : Device Scan
* Bit 7 6 5 4 3 2 1 0
* | <----------------->
- * | SCSI ID: Value 0: Skip, 1: YES
+ * | SCSI ID: Value 0: Skip, 1: YES
* |-> Value 0: ALL scan, Value 1: Manual
* 0x15 - 0x1b : Not Used? (0)
* 0x1c : Constant? (0x01) (clock div?)
@@ -2967,10 +2989,10 @@ static int nsp32_getprom_param(nsp32_hw_data *data)
*/
static int nsp32_getprom_at24(nsp32_hw_data *data)
{
- int ret, i;
- int auto_sync;
+ int ret, i;
+ int auto_sync;
nsp32_target *target;
- int entry;
+ int entry;
/*
* Reset time which is designated by EEPROM.
@@ -3036,7 +3058,7 @@ static int nsp32_getprom_at24(nsp32_hw_data *data)
* C16 110 (I-O Data: SC-NBD) data map:
*
* ROMADDR
- * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
+ * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
* Value 0x0: 20MB/S, 0x1: 10MB/S, 0x2: 5MB/S, 0x3: ASYNC
* 0x07 : 0 (HBA Synchronous Transfer Period: Auto Sync)
* 0x08 - 0x0f : Not Used? (0x0)
@@ -3044,7 +3066,7 @@ static int nsp32_getprom_at24(nsp32_hw_data *data)
* Value 0: PIO, 1: Busmater
* 0x11 : Bus Reset Delay Time (0x00-0x20)
* 0x12 : Bus Termination
- * Value 0: Disable, 1: Enable
+ * Value 0: Disable, 1: Enable
* 0x13 - 0x19 : Disconnection
* Value 0: Disable, 1: Enable
* 0x1a - 0x7c : Not Used? (0)
@@ -3054,9 +3076,9 @@ static int nsp32_getprom_at24(nsp32_hw_data *data)
*/
static int nsp32_getprom_c16(nsp32_hw_data *data)
{
- int ret, i;
+ int ret, i;
nsp32_target *target;
- int entry, val;
+ int entry, val;
/*
* Reset time which is designated by EEPROM.
@@ -3156,7 +3178,7 @@ static int nsp32_prom_read(nsp32_hw_data *data, int romaddr)
for (i = 7; i >= 0; i--) {
val += (nsp32_prom_read_bit(data) << i);
}
-
+
/* no ack */
nsp32_prom_write_bit(data, 1);
@@ -3281,7 +3303,8 @@ static int nsp32_resume(struct pci_dev *pdev)
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
unsigned short reg;
- nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p", pdev, pci_name(pdev), host);
+ nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p",
+ pdev, pci_name(pdev), host);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake (pdev, PCI_D0, 0);
@@ -3316,13 +3339,13 @@ static int nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
- ret = pci_enable_device(pdev);
+ ret = pci_enable_device(pdev);
if (ret) {
nsp32_msg(KERN_ERR, "failed to enable pci device");
return ret;
}
- data->Pci = pdev;
+ data->Pci = pdev;
data->pci_devid = id;
data->IrqNumber = pdev->irq;
data->BaseAddress = pci_resource_start(pdev, 0);
@@ -3351,7 +3374,7 @@ static void nsp32_remove(struct pci_dev *pdev)
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
- scsi_remove_host(host);
+ scsi_remove_host(host);
nsp32_release(host);
@@ -3364,8 +3387,8 @@ static struct pci_driver nsp32_driver = {
.probe = nsp32_probe,
.remove = nsp32_remove,
#ifdef CONFIG_PM
- .suspend = nsp32_suspend,
- .resume = nsp32_resume,
+ .suspend = nsp32_suspend,
+ .resume = nsp32_resume,
#endif
};
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index ac89002646a3..7c0f931e55e8 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -221,7 +221,7 @@ static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt,
data->CurrentSC = SCpnt;
- SCpnt->SCp.Status = CHECK_CONDITION;
+ SCpnt->SCp.Status = SAM_STAT_CHECK_CONDITION;
SCpnt->SCp.Message = 0;
SCpnt->SCp.have_data_in = IO_UNKNOWN;
SCpnt->SCp.sent_command = 0;
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 0b8802beb7ce..ec05c42e8ee6 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -77,7 +77,7 @@ DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
* @attr: device attribute (unused)
* @buf: the buffer returned
*
- * A sysfs 'read only' shost attribute.
+ * A sysfs 'read-only' shost attribute.
*/
static ssize_t controller_fatal_error_show(struct device *cdev,
struct device_attribute *attr, char *buf)
@@ -149,7 +149,7 @@ static ssize_t pm8001_ctl_ila_version_show(struct device *cdev,
static DEVICE_ATTR(ila_version, 0444, pm8001_ctl_ila_version_show, NULL);
/**
- * pm8001_ctl_inactive_fw_version_show - Inacative firmware version number
+ * pm8001_ctl_inactive_fw_version_show - Inactive firmware version number
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
@@ -396,6 +396,7 @@ static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
* @cdev:pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
+ *
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
@@ -430,6 +431,7 @@ static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL);
* @cdev:pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
+ *
* A sysfs 'read-only' shost attribute.
*/
@@ -464,6 +466,7 @@ static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL);
* @cdev:pointer to embedded class device
* @attr: device attribute (unused)
* @buf:the buffer returned
+ *
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
@@ -555,13 +558,13 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
/**
- ** pm8001_ctl_fatal_log_show - fatal error logging
- ** @cdev:pointer to embedded class device
- ** @attr: device attribute
- ** @buf: the buffer returned
- **
- ** A sysfs 'read-only' shost attribute.
- **/
+ * pm8001_ctl_fatal_log_show - fatal error logging
+ * @cdev:pointer to embedded class device
+ * @attr: device attribute
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
@@ -575,13 +578,13 @@ static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
/**
- ** non_fatal_log_show - non fatal error logging
- ** @cdev:pointer to embedded class device
- ** @attr: device attribute
- ** @buf: the buffer returned
- **
- ** A sysfs 'read-only' shost attribute.
- **/
+ * non_fatal_log_show - non fatal error logging
+ * @cdev:pointer to embedded class device
+ * @attr: device attribute
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
static ssize_t non_fatal_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -620,12 +623,13 @@ static ssize_t non_fatal_count_store(struct device *cdev,
static DEVICE_ATTR_RW(non_fatal_count);
/**
- ** pm8001_ctl_gsm_log_show - gsm dump collection
- ** @cdev:pointer to embedded class device
- ** @attr: device attribute (unused)
- ** @buf: the buffer returned
- ** A sysfs 'read-only' shost attribute.
- **/
+ * pm8001_ctl_gsm_log_show - gsm dump collection
+ * @cdev:pointer to embedded class device
+ * @attr: device attribute (unused)
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 71aa6af08340..17c0f26e683a 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -384,7 +384,7 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
/**
* pm8001_bar4_shift - function is called to shift BAR base address
- * @pm8001_ha : our hba card infomation
+ * @pm8001_ha : our hba card information
* @shiftValue : shifting value in memory bar.
*/
int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
@@ -1151,7 +1151,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_chip_iounmap - which maped when initialized.
+ * pm8001_chip_iounmap - which mapped when initialized.
* @pm8001_ha: our hba card information
*/
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
@@ -1187,10 +1187,10 @@ pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
}
- /**
- * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- */
+/**
+ * pm8001_chip_intx_interrupt_disable - disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
static void
pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
{
@@ -1876,8 +1876,8 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
* @piomb: the message contents of this outbound message.
*
* When FW has completed a ssp request for example a IO request, after it has
- * filled the SG data with the data, it will trigger this event represent
- * that he has finished the job,please check the coresponding buffer.
+ * filled the SG data with the data, it will trigger this event representing
+ * that he has finished the job; please check the corresponding buffer.
* So we will tell the caller who maybe waiting the result to tell upper layer
* that the task has been finished.
*/
@@ -1930,7 +1930,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
param);
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
} else {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PROTO_RESPONSE;
@@ -2390,7 +2390,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
/* check if response is for SEND READ LOG */
if (pm8001_dev &&
(pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
@@ -2912,7 +2912,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_SUCCESS:
pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
@@ -2939,17 +2939,17 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_ERROR_HW_TIMEOUT:
pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
pm8001_dbg(pm8001_ha, IO,
@@ -3522,7 +3522,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
*
* when sas layer find a device it will notify LLDD, then the driver register
* the domain device to FW, this event is the return device ID which the FW
- * has assigned, from now,inter-communication with FW is no longer using the
+ * has assigned, from now, inter-communication with FW is no longer using the
* SAS address, use device ID which FW assigned.
*/
int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3710,7 +3710,7 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_SUCCESS:
pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
break;
case IO_NOT_VALID:
pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n");
@@ -4357,7 +4357,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
spin_lock_irqsave(&task->task_state_lock, flags);
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index af09bd282cb9..47db7e0beae6 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -101,6 +101,7 @@ static struct scsi_host_template pm8001_sht = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
+ .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
@@ -232,7 +233,7 @@ static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
/**
* pm8001_interrupt_handler_intx - main INTx interrupt handler.
* @irq: interrupt number
- * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
+ * @dev_id: sas_ha structure. The HBA is retrieved from sas_ha structure.
*/
static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
@@ -438,9 +439,9 @@ err_out:
}
/**
- * pm8001_ioremap - remap the pci high physical address to kernal virtual
+ * pm8001_ioremap - remap the pci high physical address to kernel virtual
* address so that we can access them.
- * @pm8001_ha:our hba structure.
+ * @pm8001_ha: our hba structure.
*/
static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
{
@@ -651,7 +652,7 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
* pm8001_init_sas_add - initialize sas address
* @pm8001_ha: our ha struct.
*
- * Currently we just set the fixed SAS address to our HBA,for manufacture,
+ * Currently we just set the fixed SAS address to our HBA, for manufacture,
* it should read from the EEPROM
*/
static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
@@ -789,7 +790,7 @@ struct pm8001_mpi3_phy_pg_trx_config {
};
/**
- * pm8001_get_internal_phy_settings : Retrieves the internal PHY settings
+ * pm8001_get_internal_phy_settings - Retrieves the internal PHY settings
* @pm8001_ha : our adapter
* @phycfg : PHY config page to populate
*/
@@ -809,7 +810,7 @@ void pm8001_get_internal_phy_settings(struct pm8001_hba_info *pm8001_ha,
}
/**
- * pm8001_get_external_phy_settings : Retrieves the external PHY settings
+ * pm8001_get_external_phy_settings - Retrieves the external PHY settings
* @pm8001_ha : our adapter
* @phycfg : PHY config page to populate
*/
@@ -829,7 +830,7 @@ void pm8001_get_external_phy_settings(struct pm8001_hba_info *pm8001_ha,
}
/**
- * pm8001_get_phy_mask : Retrieves the mask that denotes if a PHY is int/ext
+ * pm8001_get_phy_mask - Retrieves the mask that denotes if a PHY is int/ext
* @pm8001_ha : our adapter
* @phymask : The PHY mask
*/
@@ -867,7 +868,7 @@ void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask)
}
/**
- * pm8001_set_phy_settings_ven_117c_12G() : Configure ATTO 12Gb PHY settings
+ * pm8001_set_phy_settings_ven_117c_12G() - Configure ATTO 12Gb PHY settings
* @pm8001_ha : our adapter
*/
static
@@ -902,7 +903,7 @@ int pm8001_set_phy_settings_ven_117c_12G(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_configure_phy_settings : Configures PHY settings based on vendor ID.
+ * pm8001_configure_phy_settings - Configures PHY settings based on vendor ID.
* @pm8001_ha : our hba.
*/
static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
@@ -1052,8 +1053,8 @@ intx:
* @ent: pci device id
*
* This function is the main initialization function, when register a new
- * pci driver it is invoked, all struct an hardware initilization should be done
- * here, also, register interrupt
+ * pci driver it is invoked, all struct and hardware initialization should be
+ * done here, also, register interrupt.
*/
static int pm8001_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -1171,10 +1172,11 @@ err_out_enable:
return rc;
}
-/*
+/**
* pm8001_init_ccb_tag - allocate memory to CCB and tag.
* @pm8001_ha: our hba card information.
* @shost: scsi host which has been allocated outside.
+ * @pdev: pci device.
*/
static int
pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
@@ -1269,7 +1271,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
* pm8001_pci_suspend - power management suspend main entry point
* @dev: Device struct
*
- * Returns 0 success, anything else error.
+ * Return: 0 on success, anything else on error.
*/
static int __maybe_unused pm8001_pci_suspend(struct device *dev)
{
@@ -1314,7 +1316,7 @@ static int __maybe_unused pm8001_pci_suspend(struct device *dev)
* pm8001_pci_resume - power management resume main entry point
* @dev: Device struct
*
- * Returns 0 success, anything else error.
+ * Return: 0 on success, anything else on error.
*/
static int __maybe_unused pm8001_pci_resume(struct device *dev)
{
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 335cf37e6cb9..32e60f0c3b14 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -98,14 +98,16 @@ void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
pm8001_tag_free(pm8001_ha, i);
}
- /**
- * pm8001_mem_alloc - allocate memory for pm8001.
- * @pdev: pci device.
- * @virt_addr: the allocated virtual address
- * @pphys_addr_hi: the physical address high byte address.
- * @pphys_addr_lo: the physical address low byte address.
- * @mem_size: memory size.
- */
+/**
+ * pm8001_mem_alloc - allocate memory for pm8001.
+ * @pdev: pci device.
+ * @virt_addr: the allocated virtual address
+ * @pphys_addr: DMA address for this device
+ * @pphys_addr_hi: the physical address high byte address.
+ * @pphys_addr_lo: the physical address low byte address.
+ * @mem_size: memory size.
+ * @align: requested byte alignment
+ */
int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
u32 *pphys_addr_lo, u32 mem_size, u32 align)
@@ -118,10 +120,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
align_offset = (dma_addr_t)align - 1;
mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
&mem_dma_handle, GFP_KERNEL);
- if (!mem_virt_alloc) {
- pr_err("pm80xx: memory allocation error\n");
- return -1;
- }
+ if (!mem_virt_alloc)
+ return -ENOMEM;
*pphys_addr = mem_dma_handle;
phys_align = (*pphys_addr + align_offset) & ~align_offset;
*virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
@@ -341,7 +341,7 @@ static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
}
/**
- * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
+ * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task
* @pm8001_ha: our hba card information
* @ccb: the ccb which attached to ssp task
*/
@@ -556,10 +556,10 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
pm8001_tag_free(pm8001_ha, ccb_idx);
}
- /**
- * pm8001_alloc_dev - find a empty pm8001_device
- * @pm8001_ha: our hba card information
- */
+/**
+ * pm8001_alloc_dev - find a empty pm8001_device
+ * @pm8001_ha: our hba card information
+ */
static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
{
u32 dev;
@@ -684,8 +684,7 @@ int pm8001_dev_found(struct domain_device *dev)
void pm8001_task_done(struct sas_task *task)
{
- if (!del_timer(&task->slow_task->timer))
- return;
+ del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@@ -693,9 +692,14 @@ static void pm8001_tmf_timedout(struct timer_list *t)
{
struct sas_task_slow *slow = from_timer(slow, t, timer);
struct sas_task *task = slow->task;
+ unsigned long flags;
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- complete(&task->slow_task->completion);
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ complete(&task->slow_task->completion);
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
}
#define PM8001_TASK_TIMEOUT 20
@@ -707,7 +711,7 @@ static void pm8001_tmf_timedout(struct timer_list *t)
* @parameter: ssp task parameter.
*
* when errors or exception happened, we may want to do something, for example
- * abort the issued task which result in this execption, it is done by calling
+ * abort the issued task which result in this exception, it is done by calling
* this function, note it is also with the task execute interface.
*/
static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
@@ -748,17 +752,14 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
}
res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- pm8001_dbg(pm8001_ha, FAIL,
- "TMF task[%x]timeout.\n",
- tmf->tmf);
- goto ex_err;
- }
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
+ tmf->tmf);
+ goto ex_err;
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
+ task->task_status.stat == SAS_SAM_STAT_GOOD) {
res = TMF_RESP_FUNC_COMPLETE;
break;
}
@@ -834,16 +835,13 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
wait_for_completion(&task->slow_task->completion);
res = TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- pm8001_dbg(pm8001_ha, FAIL,
- "TMF task timeout.\n");
- goto ex_err;
- }
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n");
+ goto ex_err;
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
+ task->task_status.stat == SAS_SAM_STAT_GOOD) {
res = TMF_RESP_FUNC_COMPLETE;
break;
@@ -986,11 +984,12 @@ void pm8001_open_reject_retry(
}
/**
- * pm8001_I_T_nexus_reset()
- * Standard mandates link reset for ATA (type 0) and hard reset for
- * SSP (type 1) , only for RECOVERY
- * @dev: the device structure for the device to reset.
- */
+ * pm8001_I_T_nexus_reset() - reset the initiator/target connection
+ * @dev: the device structure for the device to reset.
+ *
+ * Standard mandates link reset for ATA (type 0) and hard reset for
+ * SSP (type 1), only for RECOVERY
+ */
int pm8001_I_T_nexus_reset(struct domain_device *dev)
{
int rc = TMF_RESP_FUNC_FAILED;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 700530e969ac..6ffe17b849ae 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -140,7 +140,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
pm8001_ha->fatal_bar_loc = 0;
}
- /* Read until accum_len is retrived */
+ /* Read until accum_len is retrieved */
accum_len = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
/* Determine length of data between previously stored transfer length
@@ -1011,7 +1011,7 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
value);
return -EBUSY;
}
- /* check the MPI-State for initialization upto 100ms*/
+ /* check the MPI-State for initialization up to 100ms*/
max_wait_count = 5;/* 100 msec */
do {
msleep(FW_READY_INTERVAL);
@@ -1093,7 +1093,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
- /**
+ /*
* lower 26 bits of SCRATCHPAD0 register describes offset within the
* PCIe BAR where the MPI configuration table is present
*/
@@ -1101,7 +1101,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
pm8001_dbg(pm8001_ha, DEV, "Scratchpad 0 Offset: 0x%x value 0x%x\n",
offset, value);
- /**
+ /*
* Upper 6 bits describe the offset within PCI config space where BAR
* is located.
*/
@@ -1109,7 +1109,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
pcibar = get_pci_bar_index(pcilogic);
pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar);
- /**
+ /*
* Make sure the offset falls inside the ioremapped PCI BAR
*/
if (offset > pm8001_ha->io_mem[pcibar].memsize) {
@@ -1121,7 +1121,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl_addr = base_addr =
pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
- /**
+ /*
* Validate main configuration table address: first DWord should read
* "PMCS"
*/
@@ -1385,7 +1385,7 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm80xx_encrypt_update - update flash with encryption informtion
+ * pm80xx_encrypt_update - update flash with encryption information
* @pm8001_ha: our hba card information.
*/
static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
@@ -1422,7 +1422,7 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm80xx_chip_init - the main init function that initialize whole PM8001 chip.
+ * pm80xx_chip_init - the main init function that initializes whole PM8001 chip.
* @pm8001_ha: our hba card information
*/
static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
@@ -1541,7 +1541,7 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm80xx_fatal_errors - returns non zero *ONLY* when fatal errors
+ * pm80xx_fatal_errors - returns non-zero *ONLY* when fatal errors
* @pm8001_ha: our hba card information
*
* Fatal errors are recoverable only after a host reboot.
@@ -1576,8 +1576,8 @@ pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
- * the FW register status to the originated status.
+ * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that all
+ * FW register status are reset to the originated status.
* @pm8001_ha: our hba card information
*/
@@ -1895,13 +1895,13 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
}
/**
- * mpi_ssp_completion- process the event that FW response to the SSP request.
+ * mpi_ssp_completion - process the event that FW response to the SSP request.
* @pm8001_ha: our hba card information
* @piomb: the message contents of this outbound message.
*
* When FW has completed a ssp request for example a IO request, after it has
- * filled the SG data with the data, it will trigger this event represent
- * that he has finished the job,please check the coresponding buffer.
+ * filled the SG data with the data, it will trigger this event representing
+ * that he has finished the job; please check the corresponding buffer.
* So we will tell the caller who maybe waiting the result to tell upper layer
* that the task has been finished.
*/
@@ -1952,7 +1952,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
param);
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
} else {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PROTO_RESPONSE;
@@ -2487,7 +2487,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
/* check if response is for SEND READ LOG */
if (pm8001_dev &&
(pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
@@ -3042,7 +3042,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_SUCCESS:
pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
@@ -3084,17 +3084,17 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_ERROR_HW_TIMEOUT:
pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
pm8001_dbg(pm8001_ha, IO,
@@ -3217,7 +3217,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
/**
- * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
+ * pm80xx_hw_event_ack_req- For PM8001, some events need to acknowledge to FW.
* @pm8001_ha: our hba card information
* @Qnum: the outbound queue message number.
* @SEA: source of event to ack
@@ -3275,7 +3275,7 @@ static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha,
}
/**
- * hw_event_sas_phy_up -FW tells me a SAS phy up event.
+ * hw_event_sas_phy_up - FW tells me a SAS phy up event.
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
@@ -3353,7 +3353,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
/**
- * hw_event_sata_phy_up -FW tells me a SATA phy up event.
+ * hw_event_sata_phy_up - FW tells me a SATA phy up event.
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
@@ -3400,7 +3400,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
/**
- * hw_event_phy_down -we should notify the libsas the phy is down.
+ * hw_event_phy_down - we should notify the libsas the phy is down.
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
@@ -3500,7 +3500,7 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
/**
- * mpi_thermal_hw_event -The hw event has come.
+ * mpi_thermal_hw_event - a thermal hw event has come.
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
@@ -3530,7 +3530,7 @@ static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
/**
- * mpi_hw_event -The hw event has come.
+ * mpi_hw_event - The hw event has come.
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
@@ -4025,7 +4025,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
case OPC_OUB_SET_DEV_INFO:
pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n");
break;
- /* spcv specifc commands */
+ /* spcv specific commands */
case OPC_OUB_PHY_START_RESP:
pm8001_dbg(pm8001_ha, MSG,
"OPC_OUB_PHY_START_RESP opcode:%x\n", opc);
@@ -4186,7 +4186,7 @@ static void build_smp_cmd(u32 deviceID, __le32 hTag,
}
/**
- * pm80xx_chip_smp_req - send a SMP task to FW
+ * pm80xx_chip_smp_req - send an SMP task to FW
* @pm8001_ha: our hba card information.
* @ccb: the ccb information this request used.
*/
@@ -4346,7 +4346,7 @@ static int check_enc_sat_cmd(struct sas_task *task)
}
/**
- * pm80xx_chip_ssp_io_req - send a SSP task to FW
+ * pm80xx_chip_ssp_io_req - send an SSP task to FW
* @pm8001_ha: our hba card information.
* @ccb: the ccb information this request used.
*/
@@ -4699,7 +4699,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
spin_lock_irqsave(&task->task_state_lock, flags);
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
@@ -4750,13 +4750,13 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
LINKMODE_AUTO | pm8001_ha->link_rate | phy_id);
/* SSC Disable and SAS Analog ST configuration */
- /**
+ /*
payload.ase_sh_lm_slr_phyid =
cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE |
LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 |
phy_id);
Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need
- **/
+ */
payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 6d36debde18e..bbb75318f1e7 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -47,8 +47,8 @@
/*
* MAX_CMD : maximum commands that can be outstanding with IOA
* MAX_IO_CMD : command blocks available for IO commands
- * MAX_HCAM_CMD : command blocks avaibale for HCAMS
- * MAX_INTERNAL_CMD : command blocks avaible for internal commands like reset
+ * MAX_HCAM_CMD : command blocks available for HCAMS
+ * MAX_INTERNAL_CMD : command blocks available for internal commands like reset
*/
#define PMCRAID_MAX_CMD 1024
#define PMCRAID_MAX_IO_CMD 1020
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index aa41f7ac91cb..977315fdc254 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -1148,18 +1148,6 @@ static struct parport_driver ppa_driver = {
.detach = ppa_detach,
.devmodel = true,
};
+module_parport_driver(ppa_driver);
-static int __init ppa_driver_init(void)
-{
- printk(KERN_INFO "ppa: Version %s\n", PPA_VERSION);
- return parport_register_driver(&ppa_driver);
-}
-
-static void __exit ppa_driver_exit(void)
-{
- parport_unregister_driver(&ppa_driver);
-}
-
-module_init(ppa_driver_init);
-module_exit(ppa_driver_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index ccb5771f1cb7..0f4b99d92f12 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -234,10 +234,8 @@ static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd,
}
if (res) {
- memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0, 0);
cmd->result = res;
- cmd->sense_buffer[0] = 0x70;
- cmd->sense_buffer[2] = ILLEGAL_REQUEST;
priv->curr_cmd = NULL;
cmd->scsi_done(cmd);
}
@@ -319,8 +317,7 @@ static irqreturn_t ps3rom_interrupt(int irq, void *data)
goto done;
}
- scsi_build_sense_buffer(0, cmd->sense_buffer, sense_key, asc, ascq);
- cmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 0, sense_key, asc, ascq);
done:
priv->curr_cmd = NULL;
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
index d995f72a6759..461c0c9180c4 100644
--- a/drivers/scsi/qedf/qedf_attr.c
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -24,9 +24,8 @@ static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
return lport_priv(base_lport);
}
-static ssize_t
-qedf_fcoe_mac_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fcoe_mac_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct fc_lport *lport = shost_priv(class_to_shost(dev));
u32 port_id;
@@ -42,9 +41,8 @@ qedf_fcoe_mac_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac);
}
-static ssize_t
-qedf_fka_period_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fka_period_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct fc_lport *lport = shost_priv(class_to_shost(dev));
struct qedf_ctx *qedf = lport_priv(lport);
@@ -59,8 +57,8 @@ qedf_fka_period_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%d\n", fka_period);
}
-static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL);
-static DEVICE_ATTR(fka_period, S_IRUGO, qedf_fka_period_show, NULL);
+static DEVICE_ATTR_RO(fcoe_mac);
+static DEVICE_ATTR_RO(fka_period);
struct device_attribute *qedf_host_attrs[] = {
&dev_attr_fcoe_mac,
diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c
index e0387e495261..0d2aed82882a 100644
--- a/drivers/scsi/qedf/qedf_dbg.c
+++ b/drivers/scsi/qedf/qedf_dbg.c
@@ -106,11 +106,10 @@ ret:
int
qedf_alloc_grc_dump_buf(u8 **buf, uint32_t len)
{
- *buf = vmalloc(len);
+ *buf = vzalloc(len);
if (!(*buf))
return -ENOMEM;
- memset(*buf, 0, len);
return 0;
}
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 4869ef813dc4..6b5b6a75ac88 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -23,11 +23,6 @@ static void qedf_cmd_timeout(struct work_struct *work)
struct qedf_ctx *qedf;
struct qedf_rport *fcport;
- if (io_req == NULL) {
- QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
- return;
- }
-
fcport = io_req->fcport;
if (io_req->fcport == NULL) {
QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
@@ -1520,9 +1515,19 @@ void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
{
int rval;
+ if (io_req == NULL) {
+ QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
+ return;
+ }
+
+ if (io_req->fcport == NULL) {
+ QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
+ return;
+ }
+
if (!cqe) {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
- "cqe is NULL for io_req %p\n", io_req);
+ "cqe is NULL for io_req %p\n", io_req);
return;
}
@@ -1538,6 +1543,16 @@ void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
le32_to_cpu(cqe->cqe_info.err_info.rx_id));
+ /* When flush is active, let the cmds be flushed out from the cleanup context */
+ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
+ (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
+ io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Dropping EQE for xid=0x%x as fcport is flushing",
+ io_req->xid);
+ return;
+ }
+
if (qedf->stop_io_on_error) {
qedf_stop_all_io(qedf);
return;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index b92570a7c309..85f41abcb56c 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1717,6 +1717,9 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
FW_ENGINEERING_VERSION);
+ snprintf(fc_host_vendor_identifier(lport->host),
+ FC_VENDOR_IDENTIFIER, "%s", "Marvell");
+
}
static int qedf_lport_setup(struct qedf_ctx *qedf)
@@ -1877,6 +1880,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vn_port->host->max_lun = qedf_max_lun;
vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
+ vn_port->host->max_id = QEDF_MAX_SESSIONS;
rc = scsi_add_host(vn_port->host, &vport->dev);
if (rc) {
@@ -3528,6 +3532,7 @@ retry_probe:
host->transportt = qedf_fc_transport_template;
host->max_lun = qedf_max_lun;
host->max_cmd_len = QEDF_MAX_CDB_LEN;
+ host->max_id = QEDF_MAX_SESSIONS;
host->can_queue = FCOE_PARAMS_NUM_TASKS;
rc = scsi_add_host(host, &pdev->dev);
if (rc) {
@@ -3971,10 +3976,6 @@ void qedf_stag_change_work(struct work_struct *work)
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, stag_work.work);
- if (!qedf) {
- QEDF_ERR(NULL, "qedf is NULL");
- return;
- }
QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
qedf_ctx_soft_reset(qedf->lport);
}
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index c342defc3f52..ce199a7a16b8 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -284,6 +284,7 @@ struct qedi_ctx {
#define QEDI_IN_RECOVERY 5
#define QEDI_IN_OFFLINE 6
#define QEDI_IN_SHUTDOWN 7
+#define QEDI_BLOCK_IO 8
u8 mac[ETH_ALEN];
u32 src_ip[4];
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 440ddd2309f1..71333d3c5c86 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -14,8 +14,8 @@
#include "qedi_fw_iscsi.h"
#include "qedi_fw_scsi.h"
-static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
- struct iscsi_task *mtask);
+static int send_iscsi_tmf(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask, struct iscsi_task *ctask);
void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
{
@@ -73,7 +73,6 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
spin_unlock(&qedi_conn->list_lock);
cmd->state = RESPONSE_RECEIVED;
- qedi_clear_task_idx(qedi, cmd->task_id);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
spin_unlock(&session->back_lock);
@@ -138,7 +137,6 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
spin_unlock(&qedi_conn->list_lock);
cmd->state = RESPONSE_RECEIVED;
- qedi_clear_task_idx(qedi, cmd->task_id);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
qedi_conn->gen_pdu.resp_buf,
@@ -158,19 +156,11 @@ static void qedi_tmf_resp_work(struct work_struct *work)
struct iscsi_tm_rsp *resp_hdr_ptr;
int rval = 0;
- set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
- iscsi_block_session(session->cls_session);
rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
- if (rval) {
- qedi_clear_task_idx(qedi, qedi_cmd->task_id);
- iscsi_unblock_session(session->cls_session);
+ if (rval)
goto exit_tmf_resp;
- }
-
- iscsi_unblock_session(session->cls_session);
- qedi_clear_task_idx(qedi, qedi_cmd->task_id);
spin_lock(&session->back_lock);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
@@ -178,7 +168,10 @@ static void qedi_tmf_resp_work(struct work_struct *work)
exit_tmf_resp:
kfree(resp_hdr_ptr);
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->fw_cleanup_works--;
+ spin_unlock(&qedi_conn->tmf_work_lock);
}
static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
@@ -234,18 +227,25 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
}
spin_unlock(&qedi_conn->list_lock);
- if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
- ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
- ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+ spin_lock(&qedi_conn->tmf_work_lock);
+ switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ if (qedi_conn->ep_disconnect_starting) {
+ /* Session is down so ep_disconnect will clean up */
+ spin_unlock(&qedi_conn->tmf_work_lock);
+ goto unblock_sess;
+ }
+
+ qedi_conn->fw_cleanup_works++;
+ spin_unlock(&qedi_conn->tmf_work_lock);
+
INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
goto unblock_sess;
}
-
- qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+ spin_unlock(&qedi_conn->tmf_work_lock);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
kfree(resp_hdr_ptr);
@@ -314,7 +314,6 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
"Freeing tid=0x%x for cid=0x%x\n",
cmd->task_id, qedi_conn->iscsi_conn_id);
cmd->state = RESPONSE_RECEIVED;
- qedi_clear_task_idx(qedi, cmd->task_id);
}
static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
@@ -468,7 +467,6 @@ static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
}
spin_unlock(&qedi_conn->list_lock);
- qedi_clear_task_idx(qedi, cmd->task_id);
}
done:
@@ -673,7 +671,6 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
if (qedi_io_tracing)
qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
- qedi_clear_task_idx(qedi, cmd->task_id);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
conn->data, datalen);
error:
@@ -730,7 +727,6 @@ static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
cqe->itid, cmd->task_id);
cmd->state = RESPONSE_RECEIVED;
- qedi_clear_task_idx(qedi, cmd->task_id);
spin_lock_bh(&session->back_lock);
__iscsi_put_task(task);
@@ -739,20 +735,17 @@ static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
struct iscsi_cqe_solicited *cqe,
- struct iscsi_task *task,
struct iscsi_conn *conn)
{
struct qedi_work_map *work, *work_tmp;
u32 proto_itt = cqe->itid;
- u32 ptmp_itt = 0;
itt_t protoitt = 0;
int found = 0;
struct qedi_cmd *qedi_cmd = NULL;
- u32 rtid = 0;
u32 iscsi_cid;
struct qedi_conn *qedi_conn;
struct qedi_cmd *dbg_cmd;
- struct iscsi_task *mtask;
+ struct iscsi_task *mtask, *task;
struct iscsi_tm *tmf_hdr = NULL;
iscsi_cid = cqe->conn_id;
@@ -778,93 +771,64 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
}
found = 1;
mtask = qedi_cmd->task;
+ task = work->ctask;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
- rtid = work->rtid;
list_del_init(&work->list);
kfree(work);
qedi_cmd->list_tmf_work = NULL;
}
}
- spin_unlock_bh(&qedi_conn->tmf_work_lock);
-
- if (found) {
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
- "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
- proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
-
- if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_ABORT_TASK) {
- spin_lock_bh(&conn->session->back_lock);
-
- protoitt = build_itt(get_itt(tmf_hdr->rtt),
- conn->session->age);
- task = iscsi_itt_to_task(conn, protoitt);
-
- spin_unlock_bh(&conn->session->back_lock);
-
- if (!task) {
- QEDI_NOTICE(&qedi->dbg_ctx,
- "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
- get_itt(tmf_hdr->rtt),
- qedi_conn->iscsi_conn_id);
- return;
- }
-
- dbg_cmd = task->dd_data;
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
- "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
- get_itt(tmf_hdr->rtt), get_itt(task->itt),
- dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
+ if (!found) {
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+ goto check_cleanup_reqs;
+ }
- if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
- qedi_cmd->state = CLEANUP_RECV;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
+ proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
+
+ spin_lock_bh(&conn->session->back_lock);
+ if (iscsi_task_is_completed(task)) {
+ QEDI_NOTICE(&qedi->dbg_ctx,
+ "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), qedi_conn->iscsi_conn_id);
+ goto unlock;
+ }
- qedi_clear_task_idx(qedi_conn->qedi, rtid);
+ dbg_cmd = task->dd_data;
- spin_lock(&qedi_conn->list_lock);
- if (likely(dbg_cmd->io_cmd_in_list)) {
- dbg_cmd->io_cmd_in_list = false;
- list_del_init(&dbg_cmd->io_cmd);
- qedi_conn->active_cmd_count--;
- }
- spin_unlock(&qedi_conn->list_lock);
- qedi_cmd->state = CLEANUP_RECV;
- wake_up_interruptible(&qedi_conn->wait_queue);
- }
- } else if (qedi_conn->cmd_cleanup_req > 0) {
- spin_lock_bh(&conn->session->back_lock);
- qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
- protoitt = build_itt(ptmp_itt, conn->session->age);
- task = iscsi_itt_to_task(conn, protoitt);
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
- "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
- cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
- qedi_conn->iscsi_conn_id);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), get_itt(task->itt), dbg_cmd->task_id,
+ qedi_conn->iscsi_conn_id);
- spin_unlock_bh(&conn->session->back_lock);
- if (!task) {
- QEDI_NOTICE(&qedi->dbg_ctx,
- "task is null, itid=0x%x, cid=0x%x\n",
- cqe->itid, qedi_conn->iscsi_conn_id);
- return;
- }
- qedi_conn->cmd_cleanup_cmpl++;
- wake_up(&qedi_conn->wait_queue);
+ spin_lock(&qedi_conn->list_lock);
+ if (likely(dbg_cmd->io_cmd_in_list)) {
+ dbg_cmd->io_cmd_in_list = false;
+ list_del_init(&dbg_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+ spin_unlock(&qedi_conn->list_lock);
+ qedi_cmd->state = CLEANUP_RECV;
+unlock:
+ spin_unlock_bh(&conn->session->back_lock);
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+ wake_up_interruptible(&qedi_conn->wait_queue);
+ return;
+check_cleanup_reqs:
+ if (qedi_conn->cmd_cleanup_req > 0) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
"Freeing tid=0x%x for cid=0x%x\n",
cqe->itid, qedi_conn->iscsi_conn_id);
- qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
-
+ qedi_conn->cmd_cleanup_cmpl++;
+ wake_up(&qedi_conn->wait_queue);
} else {
- qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
- protoitt = build_itt(ptmp_itt, conn->session->age);
- task = iscsi_itt_to_task(conn, protoitt);
QEDI_ERR(&qedi->dbg_ctx,
- "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
- protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
+ "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x\n",
+ protoitt, cqe->itid, qedi_conn->iscsi_conn_id);
}
}
@@ -959,8 +923,7 @@ void qedi_fp_process_cqes(struct qedi_work *work)
goto exit_fp_process;
case ISCSI_CQE_TYPE_TASK_CLEANUP:
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
- qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
- conn);
+ qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, conn);
goto exit_fp_process;
default:
QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
@@ -1368,7 +1331,7 @@ static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
return 0;
}
-static void qedi_tmf_work(struct work_struct *work)
+static void qedi_abort_work(struct work_struct *work)
{
struct qedi_cmd *qedi_cmd =
container_of(work, struct qedi_cmd, tmf_work);
@@ -1381,17 +1344,29 @@ static void qedi_tmf_work(struct work_struct *work)
struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr;
s16 rval = 0;
- s16 tid = 0;
mtask = qedi_cmd->task;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
- set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
- ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
- if (!ctask || !ctask->sc) {
- QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
- goto abort_ret;
+ spin_lock_bh(&conn->session->back_lock);
+ ctask = iscsi_itt_to_ctask(conn, tmf_hdr->rtt);
+ if (!ctask) {
+ spin_unlock_bh(&conn->session->back_lock);
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid RTT. Letting abort timeout.\n");
+ goto clear_cleanup;
+ }
+
+ if (iscsi_task_is_completed(ctask)) {
+ spin_unlock_bh(&conn->session->back_lock);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Task already completed\n");
+ /*
+ * We have to still send the TMF because libiscsi needs the
+ * response to avoid a timeout.
+ */
+ goto send_tmf;
}
+ spin_unlock_bh(&conn->session->back_lock);
cmd = (struct qedi_cmd *)ctask->dd_data;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
@@ -1402,19 +1377,21 @@ static void qedi_tmf_work(struct work_struct *work)
if (qedi_do_not_recover) {
QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
qedi_do_not_recover);
- goto abort_ret;
+ goto clear_cleanup;
}
- list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
+ list_work = kzalloc(sizeof(*list_work), GFP_NOIO);
if (!list_work) {
QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
- goto abort_ret;
+ goto clear_cleanup;
}
qedi_cmd->type = TYPEIO;
+ qedi_cmd->state = CLEANUP_WAIT;
list_work->qedi_cmd = qedi_cmd;
list_work->rtid = cmd->task_id;
list_work->state = QEDI_WORK_SCHEDULED;
+ list_work->ctask = ctask;
qedi_cmd->list_tmf_work = list_work;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
@@ -1437,23 +1414,13 @@ static void qedi_tmf_work(struct work_struct *work)
goto ldel_exit;
}
- tid = qedi_get_task_idx(qedi);
- if (tid == -1) {
- QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
- qedi_conn->iscsi_conn_id);
- goto ldel_exit;
- }
-
- qedi_cmd->task_id = tid;
- qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
-
-abort_ret:
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
- return;
+send_tmf:
+ send_iscsi_tmf(qedi_conn, qedi_cmd->task, ctask);
+ goto clear_cleanup;
ldel_exit:
spin_lock_bh(&qedi_conn->tmf_work_lock);
- if (!qedi_cmd->list_tmf_work) {
+ if (qedi_cmd->list_tmf_work) {
list_del_init(&list_work->list);
qedi_cmd->list_tmf_work = NULL;
kfree(list_work);
@@ -1468,18 +1435,19 @@ ldel_exit:
}
spin_unlock(&qedi_conn->list_lock);
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+clear_cleanup:
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->fw_cleanup_works--;
+ spin_unlock(&qedi_conn->tmf_work_lock);
}
-static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
- struct iscsi_task *mtask)
+static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask,
+ struct iscsi_task *ctask)
{
struct iscsi_tmf_request_hdr tmf_pdu_header;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct e4_iscsi_task_context *fw_task_ctx;
- struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
- struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr;
struct qedi_cmd *qedi_cmd;
struct qedi_cmd *cmd;
@@ -1487,7 +1455,6 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
u32 scsi_lun[2];
s16 tid = 0;
u16 sq_idx = 0;
- int rval = 0;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
@@ -1520,12 +1487,6 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
ISCSI_TM_FUNC_ABORT_TASK) {
- ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
- if (!ctask || !ctask->sc) {
- QEDI_ERR(&qedi->dbg_ctx,
- "Could not get reference task\n");
- return 0;
- }
cmd = (struct qedi_cmd *)ctask->dd_data;
tmf_pdu_header.rtt =
qedi_set_itt(cmd->task_id,
@@ -1551,10 +1512,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
task_params.sqe = &ep->sq[sq_idx];
memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
- rval = init_initiator_tmf_request_task(&task_params,
- &tmf_pdu_header);
- if (rval)
- return -1;
+ init_initiator_tmf_request_task(&task_params, &tmf_pdu_header);
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1566,47 +1524,34 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
return 0;
}
-int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
- struct iscsi_task *mtask)
+int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask)
{
+ struct iscsi_tm *tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ struct qedi_cmd *qedi_cmd = mtask->dd_data;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_tm *tmf_hdr;
- struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
- s16 tid = 0;
+ int rc = 0;
- tmf_hdr = (struct iscsi_tm *)mtask->hdr;
- qedi_cmd->task = mtask;
+ switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->fw_cleanup_works++;
+ spin_unlock(&qedi_conn->tmf_work_lock);
- /* If abort task then schedule the work and return */
- if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_ABORT_TASK) {
- qedi_cmd->state = CLEANUP_WAIT;
- INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
+ INIT_WORK(&qedi_cmd->tmf_work, qedi_abort_work);
queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
-
- } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
- ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
- ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
- tid = qedi_get_task_idx(qedi);
- if (tid == -1) {
- QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
- qedi_conn->iscsi_conn_id);
- return -1;
- }
- qedi_cmd->task_id = tid;
-
- qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
-
- } else {
+ break;
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ rc = send_iscsi_tmf(qedi_conn, mtask, NULL);
+ break;
+ default:
QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
qedi_conn->iscsi_conn_id);
- return -1;
+ return -EINVAL;
}
- return 0;
+ return rc;
}
int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 116645c08c71..9f8e8ef405a1 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -31,8 +31,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct iscsi_task *task);
int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct iscsi_task *task);
-int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
- struct iscsi_task *mtask);
+int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask);
int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct iscsi_task *task);
int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
@@ -73,6 +72,5 @@ void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
void qedi_clearsq(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn,
struct iscsi_task *task);
-void qedi_clear_session_ctx(struct iscsi_cls_session *cls_sess);
#endif
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 08c05403cd72..97f83760da88 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -330,12 +330,22 @@ free_conn:
void qedi_mark_device_missing(struct iscsi_cls_session *cls_session)
{
- iscsi_block_session(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
+ struct qedi_conn *qedi_conn = session->leadconn->dd_data;
+
+ spin_lock_bh(&session->frwd_lock);
+ set_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags);
+ spin_unlock_bh(&session->frwd_lock);
}
void qedi_mark_device_available(struct iscsi_cls_session *cls_session)
{
- iscsi_unblock_session(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
+ struct qedi_conn *qedi_conn = session->leadconn->dd_data;
+
+ spin_lock_bh(&session->frwd_lock);
+ clear_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags);
+ spin_unlock_bh(&session->frwd_lock);
}
static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi,
@@ -377,6 +387,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
struct qedi_ctx *qedi = iscsi_host_priv(shost);
struct qedi_endpoint *qedi_ep;
struct iscsi_endpoint *ep;
+ int rc = 0;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
@@ -384,11 +395,16 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
qedi_ep = ep->dd_data;
if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
- (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
- return -EINVAL;
+ (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
- return -EINVAL;
qedi_ep->conn = qedi_conn;
qedi_conn->ep = qedi_ep;
@@ -398,13 +414,18 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
qedi_conn->cmd_cleanup_req = 0;
qedi_conn->cmd_cleanup_cmpl = 0;
- if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
- return -EINVAL;
+ if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
+
spin_lock_init(&qedi_conn->tmf_work_lock);
INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
init_waitqueue_head(&qedi_conn->wait_queue);
- return 0;
+put_ep:
+ iscsi_put_endpoint(ep);
+ return rc;
}
static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
@@ -582,7 +603,11 @@ static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
goto start_err;
}
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->fw_cleanup_works = 0;
+ qedi_conn->ep_disconnect_starting = false;
+ spin_unlock(&qedi_conn->tmf_work_lock);
+
qedi_conn->abrt_conn = 0;
rval = iscsi_conn_start(cls_conn);
@@ -742,7 +767,7 @@ static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
rc = qedi_send_iscsi_logout(qedi_conn, task);
break;
case ISCSI_OP_SCSI_TMFUNC:
- rc = qedi_iscsi_abort_work(qedi_conn, task);
+ rc = qedi_send_iscsi_tmf(qedi_conn, task);
break;
case ISCSI_OP_TEXT:
rc = qedi_send_iscsi_text(qedi_conn, task);
@@ -772,7 +797,6 @@ static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
}
cmd->conn = conn->dd_data;
- cmd->scsi_cmd = NULL;
return qedi_iscsi_send_generic_request(task);
}
@@ -783,9 +807,16 @@ static int qedi_task_xmit(struct iscsi_task *task)
struct qedi_cmd *cmd = task->dd_data;
struct scsi_cmnd *sc = task->sc;
+ /* Clear now so in cleanup_task we know it didn't make it */
+ cmd->scsi_cmd = NULL;
+ cmd->task_id = U16_MAX;
+
if (test_bit(QEDI_IN_SHUTDOWN, &qedi_conn->qedi->flags))
return -ENODEV;
+ if (test_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags))
+ return -EACCES;
+
cmd->state = 0;
cmd->task = NULL;
cmd->use_slowpath = false;
@@ -988,12 +1019,10 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
{
struct qedi_endpoint *qedi_ep;
struct qedi_conn *qedi_conn = NULL;
- struct iscsi_conn *conn = NULL;
struct qedi_ctx *qedi;
int ret = 0;
int wait_delay;
int abrt_conn = 0;
- int count = 10;
wait_delay = 60 * HZ + DEF_MAX_RT_TIME;
qedi_ep = ep->dd_data;
@@ -1007,17 +1036,21 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
- conn = qedi_conn->cls_conn->dd_data;
- iscsi_suspend_queue(conn);
abrt_conn = qedi_conn->abrt_conn;
- while (count--) {
- if (!test_bit(QEDI_CONN_FW_CLEANUP,
- &qedi_conn->flags)) {
- break;
- }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "cid=0x%x qedi_ep=%p waiting for %d tmfs\n",
+ qedi_ep->iscsi_cid, qedi_ep,
+ qedi_conn->fw_cleanup_works);
+
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->ep_disconnect_starting = true;
+ while (qedi_conn->fw_cleanup_works > 0) {
+ spin_unlock(&qedi_conn->tmf_work_lock);
msleep(1000);
+ spin_lock(&qedi_conn->tmf_work_lock);
}
+ spin_unlock(&qedi_conn->tmf_work_lock);
if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
if (qedi_do_not_recover) {
@@ -1383,13 +1416,24 @@ static umode_t qedi_attr_is_visible(int param_type, int param)
static void qedi_cleanup_task(struct iscsi_task *task)
{
- if (!task->sc || task->state == ISCSI_TASK_PENDING) {
+ struct qedi_cmd *cmd;
+
+ if (task->state == ISCSI_TASK_PENDING) {
QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
refcount_read(&task->refcount));
return;
}
- qedi_iscsi_unmap_sg_list(task->dd_data);
+ if (task->sc)
+ qedi_iscsi_unmap_sg_list(task->dd_data);
+
+ cmd = task->dd_data;
+ if (cmd->task_id != U16_MAX)
+ qedi_clear_task_idx(iscsi_host_priv(task->conn->session->host),
+ cmd->task_id);
+
+ cmd->task_id = U16_MAX;
+ cmd->scsi_cmd = NULL;
}
struct iscsi_transport qedi_iscsi_transport = {
@@ -1401,6 +1445,7 @@ struct iscsi_transport qedi_iscsi_transport = {
.destroy_session = qedi_session_destroy,
.create_conn = qedi_conn_create,
.bind_conn = qedi_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.start_conn = qedi_conn_start,
.stop_conn = iscsi_conn_stop,
.destroy_conn = qedi_conn_destroy,
@@ -1614,20 +1659,6 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep,
qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
}
-void qedi_clear_session_ctx(struct iscsi_cls_session *cls_sess)
-{
- struct iscsi_session *session = cls_sess->dd_data;
- struct iscsi_conn *conn = session->leadconn;
- struct qedi_conn *qedi_conn = conn->dd_data;
-
- if (iscsi_is_session_online(cls_sess))
- qedi_ep_disconnect(qedi_conn->iscsi_ep);
-
- qedi_conn_destroy(qedi_conn->cls_conn);
-
- qedi_session_destroy(cls_sess);
-}
-
void qedi_process_tcp_error(struct qedi_endpoint *ep,
struct iscsi_eqe_data *data)
{
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 39dc27c85e3c..758735209e15 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -169,8 +169,8 @@ struct qedi_conn {
struct list_head tmf_work_list;
wait_queue_head_t wait_queue;
spinlock_t tmf_work_lock; /* tmf work lock */
- unsigned long flags;
-#define QEDI_CONN_FW_CLEANUP 1
+ bool ep_disconnect_starting;
+ int fw_cleanup_works;
};
struct qedi_cmd {
@@ -212,6 +212,7 @@ struct qedi_cmd {
struct qedi_work_map {
struct list_head list;
struct qedi_cmd *qedi_cmd;
+ struct iscsi_task *ctask;
int rtid;
int state;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 2455d1448a7e..0b0acb827071 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -640,7 +640,7 @@ static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
goto exit_setup_shost;
}
- shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+ shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA - 1;
shost->max_channel = 0;
shost->max_lun = ~0;
shost->max_cmd_len = 16;
@@ -2417,11 +2417,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
int rval;
u16 retry = 10;
- if (mode == QEDI_MODE_SHUTDOWN)
- iscsi_host_for_each_session(qedi->shost,
- qedi_clear_session_ctx);
-
if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
+ iscsi_host_remove(qedi->shost);
+
if (qedi->tmf_thread) {
flush_workqueue(qedi->tmf_thread);
destroy_workqueue(qedi->tmf_thread);
@@ -2482,7 +2480,6 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
if (qedi->boot_kset)
iscsi_boot_destroy_kset(qedi->boot_kset);
- iscsi_host_remove(qedi->shost);
iscsi_host_free(qedi->shost);
}
}
diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c
index 04ee68e6499c..be174d30eb7c 100644
--- a/drivers/scsi/qedi/qedi_sysfs.c
+++ b/drivers/scsi/qedi/qedi_sysfs.c
@@ -16,9 +16,9 @@ static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev)
return iscsi_host_priv(shost);
}
-static ssize_t qedi_show_port_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t port_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
@@ -28,8 +28,8 @@ static ssize_t qedi_show_port_state(struct device *dev,
return sprintf(buf, "Linkdown\n");
}
-static ssize_t qedi_show_speed(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t speed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
struct qed_link_output if_link;
@@ -39,8 +39,8 @@ static ssize_t qedi_show_speed(struct device *dev,
return sprintf(buf, "%d Gbit\n", if_link.speed / 1000);
}
-static DEVICE_ATTR(port_state, 0444, qedi_show_port_state, NULL);
-static DEVICE_ATTR(speed, 0444, qedi_show_speed, NULL);
+static DEVICE_ATTR_RO(port_state);
+static DEVICE_ATTR_RO(speed);
struct device_attribute *qedi_shost_attrs[] = {
&dev_attr_port_state,
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index def4d99f80e9..2f67ec1df3e6 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3660,6 +3660,8 @@ struct qla_qpair {
struct qla_tgt_counters tgt_counters;
uint16_t cpuid;
struct qla_fw_resources fwres ____cacheline_aligned;
+ u32 cmd_cnt;
+ u32 cmd_completion_cnt;
};
/* Place holder for FW buffer parameters */
@@ -4616,6 +4618,7 @@ struct qla_hw_data {
struct qla_hw_data_stat stat;
pci_error_state_t pci_error_state;
+ u64 prev_cmd_cnt;
};
struct active_regions {
@@ -4743,6 +4746,7 @@ typedef struct scsi_qla_host {
#define SET_ZIO_THRESHOLD_NEEDED 32
#define ISP_ABORT_TO_ROM 33
#define VPORT_DELETE 34
+#define HEARTBEAT_CHK 38
#define PROCESS_PUREX_IOCB 63
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index fae5cae6f0a8..2f867da822ae 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -173,7 +173,6 @@ extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
-extern int ql2xexlogins;
extern int ql2xdifbundlinginternalbuffers;
extern int ql2xfulldump_on_mpifail;
extern int ql2xenforce_iocb_limit;
@@ -220,7 +219,6 @@ extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
-extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
@@ -551,6 +549,7 @@ extern int qla2xxx_read_remote_register(scsi_qla_host_t *, uint32_t,
uint32_t *);
extern int qla2xxx_write_remote_register(scsi_qla_host_t *, uint32_t,
uint32_t);
+void qla_no_op_mb(struct scsi_qla_host *vha);
/*
* Global Function Prototypes in qla_isr.c source file.
@@ -687,8 +686,6 @@ extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
struct ct_sns_rsp *, const char *);
extern void qla2x00_async_iocb_timeout(void *data);
-extern void qla2x00_free_fcport(fc_port_t *);
-
extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 0de250570e39..f8f471157109 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4356,8 +4356,6 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
if (IS_QLAFX00(vha->hw))
return qlafx00_fw_ready(vha);
- rval = QLA_SUCCESS;
-
/* Time to wait for loop down */
if (IS_P3P_TYPE(ha))
min_wait = 30;
@@ -6872,10 +6870,14 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->flags.fw_init_done = 0;
ha->chip_reset++;
ha->base_qpair->chip_reset = ha->chip_reset;
+ ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0;
for (i = 0; i < ha->max_qpairs; i++) {
- if (ha->queue_pair_map[i])
+ if (ha->queue_pair_map[i]) {
ha->queue_pair_map[i]->chip_reset =
ha->base_qpair->chip_reset;
+ ha->queue_pair_map[i]->cmd_cnt =
+ ha->queue_pair_map[i]->cmd_completion_cnt = 0;
+ }
}
/* purge MBox commands */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 38b5bdde2405..d0ee843f6b04 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1710,6 +1710,7 @@ qla24xx_start_scsi(srb_t *sp)
} else
req->ring_ptr++;
+ sp->qpair->cmd_cnt++;
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
@@ -1912,6 +1913,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
} else
req->ring_ptr++;
+ sp->qpair->cmd_cnt++;
/* Set chip new ring index. */
wrt_reg_dword(req->req_q_in, req->ring_index);
@@ -2068,6 +2070,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
} else
req->ring_ptr++;
+ sp->qpair->cmd_cnt++;
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
@@ -2284,6 +2287,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
} else
req->ring_ptr++;
+ sp->qpair->cmd_cnt++;
/* Set chip new ring index. */
wrt_reg_dword(req->req_q_in, req->ring_index);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6e8f737a4af3..d9fb093a60a1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2322,6 +2322,8 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (unlikely(iocb->u.nvme.aen_op))
atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
+ else
+ sp->qpair->cmd_completion_cnt++;
if (unlikely(comp_status != CS_COMPLETE))
logit = 1;
@@ -2694,31 +2696,22 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
/* check guard */
if (e_guard != a_guard) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x1);
- set_driver_byte(cmd, DRIVER_SENSE);
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
/* check ref tag */
if (e_ref_tag != a_ref_tag) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
- set_driver_byte(cmd, DRIVER_SENSE);
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
/* check appl tag */
if (e_app_tag != a_app_tag) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
- set_driver_byte(cmd, DRIVER_SENSE);
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
@@ -2976,6 +2969,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
+ sp->qpair->cmd_completion_cnt++;
+
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
qla2x00_process_completed_request(vha, req, handle);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 0bcd8afdc0ff..9f3ad8aa649c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -6939,3 +6939,30 @@ ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
return rval;
}
+
+/**
+ * qla_no_op_mb(): This MB is used to check if FW is still alive and
+ * able to generate an interrupt. Otherwise, a timeout will trigger
+ * FW dump + reset
+ * @vha: host adapter pointer
+ * Return: None
+ */
+void qla_no_op_mb(struct scsi_qla_host *vha)
+{
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval;
+
+ memset(&mc, 0, sizeof(mc));
+ mcp->mb[0] = 0; // noop cmd= 0
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 5;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval) {
+ ql_dbg(ql_dbg_async, vha, 0x7071,
+ "Failed %s %x\n", __func__, rval);
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 0cacb667a88b..3e5c70a1d969 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -536,6 +536,10 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req->ring_ptr++;
}
+ /* ignore nvme async cmd due to long timeout */
+ if (!nvme->u.nvme.aen_op)
+ sp->qpair->cmd_cnt++;
+
/* Set chip new ring index. */
wrt_reg_dword(req->req_q_in, req->ring_index);
@@ -671,7 +675,7 @@ void qla_nvme_unregister_remote_port(struct fc_port *fcport)
if (!IS_ENABLED(CONFIG_NVME_FC))
return;
- ql_log(ql_log_warn, NULL, 0x2112,
+ ql_log(ql_log_warn, fcport->vha, 0x2112,
"%s: unregister remoteport on %p %8phN\n",
__func__, fcport, fcport->port_name);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 4eab564ea6a0..cedd558f65eb 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -6969,6 +6969,17 @@ intr_on_check:
qla2x00_lip_reset(base_vha);
}
+ if (test_bit(HEARTBEAT_CHK, &base_vha->dpc_flags)) {
+ /*
+ * if there is a mb in progress then that's
+ * enough of a check to see if fw is still ticking.
+ */
+ if (!ha->flags.mbox_busy && base_vha->flags.init_done)
+ qla_no_op_mb(base_vha);
+
+ clear_bit(HEARTBEAT_CHK, &base_vha->dpc_flags);
+ }
+
ha->dpc_active = 0;
end_loop:
set_current_state(TASK_INTERRUPTIBLE);
@@ -7025,6 +7036,61 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
}
}
+static bool qla_do_heartbeat(struct scsi_qla_host *vha)
+{
+ u64 cmd_cnt, prev_cmd_cnt;
+ bool do_hb = false;
+ struct qla_hw_data *ha = vha->hw;
+ int i;
+
+ /* if cmds are still pending down in fw, then do hb */
+ if (ha->base_qpair->cmd_cnt != ha->base_qpair->cmd_completion_cnt) {
+ do_hb = true;
+ goto skip;
+ }
+
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i] &&
+ ha->queue_pair_map[i]->cmd_cnt !=
+ ha->queue_pair_map[i]->cmd_completion_cnt) {
+ do_hb = true;
+ break;
+ }
+ }
+
+skip:
+ prev_cmd_cnt = ha->prev_cmd_cnt;
+ cmd_cnt = ha->base_qpair->cmd_cnt;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ cmd_cnt += ha->queue_pair_map[i]->cmd_cnt;
+ }
+ ha->prev_cmd_cnt = cmd_cnt;
+
+ if (!do_hb && ((cmd_cnt - prev_cmd_cnt) > 50))
+ /*
+ * IOs are completing before periodic hb check.
+ * IOs seems to be running, do hb for sanity check.
+ */
+ do_hb = true;
+
+ return do_hb;
+}
+
+static void qla_heart_beat(struct scsi_qla_host *vha)
+{
+ if (vha->vp_idx)
+ return;
+
+ if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
+ return;
+
+ if (qla_do_heartbeat(vha)) {
+ set_bit(HEARTBEAT_CHK, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+}
+
/**************************************************************************
* qla2x00_timer
*
@@ -7243,6 +7309,8 @@ qla2x00_timer(struct timer_list *t)
qla2xxx_wake_dpc(vha);
}
+ qla_heart_beat(vha);
+
qla2x00_restart_timer(vha, WATCH_INTERVAL);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 12a6848ade43..eb47140a899f 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -5481,8 +5481,7 @@ qlt_free_qfull_cmds(struct qla_qpair *qpair)
"%s: Unexpected cmd in QFull list %p\n", __func__,
cmd);
- list_del(&cmd->cmd_list);
- list_add_tail(&cmd->cmd_list, &free_list);
+ list_move_tail(&cmd->cmd_list, &free_list);
/* piggy back on hardware_lock for protection */
vha->hw->tgt.num_qfull_cmds_alloc--;
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 5f56122f6664..db41d90a5b6e 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -472,8 +472,7 @@ int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
} else if (device_map[i].device_type == ISCSI_CLASS) {
if (drv_active & (1 << device_map[i].func_num)) {
if (!iscsi_present ||
- (iscsi_present &&
- (iscsi_func_low > device_map[i].func_num)))
+ iscsi_func_low > device_map[i].func_num)
iscsi_func_low = device_map[i].func_num;
iscsi_present++;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ad3afe30f617..6ee7ea4c27e0 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -259,6 +259,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.start_conn = qla4xxx_conn_start,
.create_conn = qla4xxx_conn_create,
.bind_conn = qla4xxx_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.stop_conn = iscsi_conn_stop,
.destroy_conn = qla4xxx_conn_destroy,
.set_param = iscsi_set_param,
@@ -814,8 +815,6 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
valid_chap_entries++;
if (valid_chap_entries == *num_entries)
break;
- else
- continue;
}
mutex_unlock(&ha->chap_sem);
@@ -3234,6 +3233,7 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
qla_conn->qla_ep = ep->dd_data;
+ iscsi_put_endpoint(ep);
return 0;
}
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 136681ad18a5..3bbe0b5545d9 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -4,9 +4,9 @@
Use at your own risk. Support Tort Reform so you won't have to read all
these silly disclaimers.
- Copyright 1994, Tom Zerucha.
+ Copyright 1994, Tom Zerucha.
tz@execpc.com
-
+
Additional Code, and much appreciated help by
Michael A. Griffith
grif@cs.ucr.edu
@@ -22,12 +22,12 @@
Functions as standalone, loadable, and PCMCIA driver, the latter from
Dave Hinds' PCMCIA package.
-
+
Cleaned up 26/10/2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> as part of the 2.5
SCSI driver cleanup and audit. This driver still needs work on the
following
- - Non terminating hardware waits
- - Some layering violations with its pcmcia stub
+ - Non terminating hardware waits
+ - Some layering violations with its pcmcia stub
Redistributable under terms of the GNU General Public License
@@ -92,8 +92,9 @@ static void ql_zap(struct qlogicfas408_priv *priv)
/*
* Do a pseudo-dma tranfer
*/
-
-static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int reqlen)
+
+static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request,
+ int reqlen)
{
int j;
int qbase = priv->qbase;
@@ -108,7 +109,7 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
request += 128;
}
while (reqlen >= 84 && !(j & 0xc0)) /* 2/3 */
- if ((j = inb(qbase + 8)) & 4)
+ if ((j = inb(qbase + 8)) & 4)
{
insl(qbase + 4, request, 21);
reqlen -= 84;
@@ -123,11 +124,11 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
/* until both empty and int (or until reclen is 0) */
rtrc(7)
j = 0;
- while (reqlen && !((j & 0x10) && (j & 0xc0)))
+ while (reqlen && !((j & 0x10) && (j & 0xc0)))
{
/* while bytes to receive and not empty */
j &= 0xc0;
- while (reqlen && !((j = inb(qbase + 8)) & 0x10))
+ while (reqlen && !((j = inb(qbase + 8)) & 0x10))
{
*request++ = inb(qbase + 4);
reqlen--;
@@ -161,7 +162,7 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
j = 0;
while (reqlen && !((j & 2) && (j & 0xc0))) {
/* while bytes to send and not full */
- while (reqlen && !((j = inb(qbase + 8)) & 2))
+ while (reqlen && !((j = inb(qbase + 8)) & 2))
{
outb(*request++, qbase + 4);
reqlen--;
@@ -175,7 +176,7 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
}
/*
- * Wait for interrupt flag (polled - not real hardware interrupt)
+ * Wait for interrupt flag (polled - not real hardware interrupt)
*/
static int ql_wai(struct qlogicfas408_priv *priv)
@@ -205,14 +206,14 @@ static int ql_wai(struct qlogicfas408_priv *priv)
}
/*
- * Initiate scsi command - queueing handler
+ * Initiate scsi command - queueing handler
* caller must hold host lock
*/
static void ql_icmd(struct scsi_cmnd *cmd)
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
- int qbase = priv->qbase;
+ int qbase = priv->qbase;
int int_type = priv->int_type;
unsigned int i;
@@ -253,14 +254,13 @@ static void ql_icmd(struct scsi_cmnd *cmd)
}
/*
- * Process scsi command - usually after interrupt
+ * Process scsi command - usually after interrupt
*/
-static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
+static void ql_pcmd(struct scsi_cmnd *cmd)
{
unsigned int i, j;
unsigned long k;
- unsigned int result; /* ultimate return result */
unsigned int status; /* scsi returned status */
unsigned int message; /* scsi returned message */
unsigned int phase; /* recorded scsi phase */
@@ -274,13 +274,15 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
j = inb(qbase + 6);
i = inb(qbase + 5);
if (i == 0x20) {
- return (DID_NO_CONNECT << 16);
+ set_host_byte(cmd, DID_NO_CONNECT);
+ return;
}
i |= inb(qbase + 5); /* the 0x10 bit can be set after the 0x08 */
if (i != 0x18) {
printk(KERN_ERR "Ql:Bad Interrupt status:%02x\n", i);
ql_zap(priv);
- return (DID_BAD_INTR << 16);
+ set_host_byte(cmd, DID_BAD_INTR);
+ return;
}
j &= 7; /* j = inb( qbase + 7 ) >> 5; */
@@ -293,9 +295,10 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
printk(KERN_ERR "Ql:Bad sequence for command %d, int %02X, cmdleft = %d\n",
j, i, inb(qbase + 7) & 0x1f);
ql_zap(priv);
- return (DID_ERROR << 16);
+ set_host_byte(cmd, DID_ERROR);
+ return;
}
- result = DID_OK;
+
if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
outb(1, qbase + 3); /* clear fifo */
/* note that request_bufflen is the total xfer size when sg is used */
@@ -314,28 +317,31 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
if (priv->qabort) {
REG0;
- return ((priv->qabort == 1 ?
- DID_ABORT : DID_RESET) << 16);
+ set_host_byte(cmd,
+ priv->qabort == 1 ?
+ DID_ABORT : DID_RESET);
}
buf = sg_virt(sg);
if (ql_pdma(priv, phase, buf, sg->length))
break;
}
REG0;
- rtrc(2)
+ rtrc(2);
/*
* Wait for irq (split into second state of irq handler
- * if this can take time)
+ * if this can take time)
*/
- if ((k = ql_wai(priv)))
- return (k << 16);
+ if ((k = ql_wai(priv))) {
+ set_host_byte(cmd, k);
+ return;
+ }
k = inb(qbase + 5); /* should be 0x10, bus service */
}
/*
- * Enter Status (and Message In) Phase
+ * Enter Status (and Message In) Phase
*/
-
+
k = jiffies + WATCHDOG;
while (time_before(jiffies, k) && !priv->qabort &&
@@ -344,57 +350,72 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
if (time_after_eq(jiffies, k)) {
ql_zap(priv);
- return (DID_TIME_OUT << 16);
+ set_host_byte(cmd, DID_TIME_OUT);
+ return;
}
/* FIXME: timeout ?? */
while (inb(qbase + 5))
cpu_relax(); /* clear pending ints */
- if (priv->qabort)
- return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+ if (priv->qabort) {
+ set_host_byte(cmd,
+ priv->qabort == 1 ? DID_ABORT : DID_RESET);
+ return;
+ }
outb(0x11, qbase + 3); /* get status and message */
- if ((k = ql_wai(priv)))
- return (k << 16);
+ if ((k = ql_wai(priv))) {
+ set_host_byte(cmd, k);
+ return;
+ }
i = inb(qbase + 5); /* get chip irq stat */
j = inb(qbase + 7) & 0x1f; /* and bytes rec'd */
status = inb(qbase + 2);
message = inb(qbase + 2);
/*
- * Should get function complete int if Status and message, else
- * bus serv if only status
+ * Should get function complete int if Status and message, else
+ * bus serv if only status
*/
if (!((i == 8 && j == 2) || (i == 0x10 && j == 1))) {
printk(KERN_ERR "Ql:Error during status phase, int=%02X, %d bytes recd\n", i, j);
- result = DID_ERROR;
+ set_host_byte(cmd, DID_ERROR);
}
outb(0x12, qbase + 3); /* done, disconnect */
- rtrc(1)
- if ((k = ql_wai(priv)))
- return (k << 16);
+ rtrc(1);
+ if ((k = ql_wai(priv))) {
+ set_host_byte(cmd, k);
+ return;
+ }
/*
- * Should get bus service interrupt and disconnect interrupt
+ * Should get bus service interrupt and disconnect interrupt
*/
-
+
i = inb(qbase + 5); /* should be bus service */
while (!priv->qabort && ((i & 0x20) != 0x20)) {
barrier();
cpu_relax();
i |= inb(qbase + 5);
}
- rtrc(0)
+ rtrc(0);
- if (priv->qabort)
- return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16);
-
- return (result << 16) | (message << 8) | (status & STATUS_MASK);
+ if (priv->qabort) {
+ set_host_byte(cmd,
+ priv->qabort == 1 ? DID_ABORT : DID_RESET);
+ return;
+ }
+
+ set_host_byte(cmd, DID_OK);
+ if (message != COMMAND_COMPLETE)
+ scsi_msg_to_host_byte(cmd, message);
+ set_status_byte(cmd, status);
+ return;
}
/*
- * Interrupt handler
+ * Interrupt handler
*/
static void ql_ihandl(void *dev_id)
@@ -415,11 +436,11 @@ static void ql_ihandl(void *dev_id)
return;
}
icmd = priv->qlcmd;
- icmd->result = ql_pcmd(icmd);
+ ql_pcmd(icmd);
priv->qlcmd = NULL;
/*
- * If result is CHECK CONDITION done calls qcommand to request
- * sense
+ * If result is CHECK CONDITION done calls qcommand to request
+ * sense
*/
(icmd->scsi_done) (icmd);
}
@@ -443,8 +464,11 @@ static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done) (struct scsi_cmnd *))
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
+
+ set_host_byte(cmd, DID_OK);
+ set_status_byte(cmd, SAM_STAT_GOOD);
if (scmd_id(cmd) == priv->qinitid) {
- cmd->result = DID_BAD_TARGET << 16;
+ set_host_byte(cmd, DID_BAD_TARGET);
done(cmd);
return 0;
}
@@ -461,8 +485,8 @@ static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd,
DEF_SCSI_QCMD(qlogicfas408_queuecommand)
-/*
- * Return bios parameters
+/*
+ * Return bios parameters
*/
int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev,
@@ -487,7 +511,7 @@ int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev,
/*
* Abort a command in progress
*/
-
+
int qlogicfas408_abort(struct scsi_cmnd *cmd)
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
@@ -566,9 +590,9 @@ void qlogicfas408_setup(int qbase, int id, int int_type)
int qlogicfas408_detect(int qbase, int int_type)
{
- REG1;
+ REG1;
return (((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7) &&
- ((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7));
+ ((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7));
}
/*
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index e9e2f0e15ac8..d26025cf5de3 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -144,7 +144,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
(level > 1)) {
scsi_print_result(cmd, "Done", disposition);
scsi_print_command(cmd);
- if (status_byte(cmd->result) == CHECK_CONDITION)
+ if (scsi_status_is_check_condition(cmd->result))
scsi_print_sense(cmd);
if (level > 3)
scmd_printk(KERN_INFO, cmd,
@@ -185,13 +185,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
if (atomic_read(&sdev->device_blocked))
atomic_set(&sdev->device_blocked, 0);
- /*
- * If we have valid sense information, then some kind of recovery
- * must have taken place. Make a note of this.
- */
- if (SCSI_SENSE_VALID(cmd))
- cmd->result |= (DRIVER_SENSE << 24);
-
SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
"Notifying upper driver of completion "
"(result %x)\n", cmd->result));
@@ -508,6 +501,8 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
&sshdr, 30 * HZ, 3, NULL);
+ if (result < 0)
+ return result;
if (result && scsi_sense_valid(&sshdr) &&
sshdr.sense_key == ILLEGAL_REQUEST &&
(sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index a5d1633b5bd8..5b3a20a140f9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -851,10 +851,10 @@ static struct device_driver sdebug_driverfs_driver = {
};
static const int check_condition_result =
- (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ SAM_STAT_CHECK_CONDITION;
static const int illegal_condition_result =
- (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
+ (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
static const int device_qfull_result =
(DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
@@ -931,7 +931,7 @@ static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
}
asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
- scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
+ scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
memset(sks, 0, sizeof(sks));
sks[0] = 0x80;
if (c_d)
@@ -957,17 +957,14 @@ static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
{
- unsigned char *sbuff;
-
- sbuff = scp->sense_buffer;
- if (!sbuff) {
+ if (!scp->sense_buffer) {
sdev_printk(KERN_ERR, scp->device,
"%s: sense_buffer is NULL\n", __func__);
return;
}
- memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
+ memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
+ scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
if (sdebug_verbose)
sdev_printk(KERN_INFO, scp->device,
@@ -7684,11 +7681,6 @@ static int sdebug_driver_remove(struct device *dev)
sdbg_host = to_sdebug_host(dev);
- if (!sdbg_host) {
- pr_err("Unable to locate host info\n");
- return -ENODEV;
- }
-
scsi_remove_host(sdbg_host->shost);
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index d8fafe77dbbe..58a252c38992 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -742,41 +742,35 @@ static enum scsi_disposition scsi_eh_completed_normally(struct scsi_cmnd *scmd)
return FAILED;
/*
- * next, check the message byte.
- */
- if (msg_byte(scmd->result) != COMMAND_COMPLETE)
- return FAILED;
-
- /*
* now, check the status byte to see if this indicates
* anything special.
*/
- switch (status_byte(scmd->result)) {
- case GOOD:
+ switch (get_status_byte(scmd)) {
+ case SAM_STAT_GOOD:
scsi_handle_queue_ramp_up(scmd->device);
fallthrough;
- case COMMAND_TERMINATED:
+ case SAM_STAT_COMMAND_TERMINATED:
return SUCCESS;
- case CHECK_CONDITION:
+ case SAM_STAT_CHECK_CONDITION:
return scsi_check_sense(scmd);
- case CONDITION_GOOD:
- case INTERMEDIATE_GOOD:
- case INTERMEDIATE_C_GOOD:
+ case SAM_STAT_CONDITION_MET:
+ case SAM_STAT_INTERMEDIATE:
+ case SAM_STAT_INTERMEDIATE_CONDITION_MET:
/*
* who knows? FIXME(eric)
*/
return SUCCESS;
- case RESERVATION_CONFLICT:
+ case SAM_STAT_RESERVATION_CONFLICT:
if (scmd->cmnd[0] == TEST_UNIT_READY)
/* it is a success, we probed the device and
* found it */
return SUCCESS;
/* otherwise, we failed to send the command */
return FAILED;
- case QUEUE_FULL:
+ case SAM_STAT_TASK_SET_FULL:
scsi_handle_queue_full(scmd->device);
fallthrough;
- case BUSY:
+ case SAM_STAT_BUSY:
return NEEDS_RETRY;
default:
return FAILED;
@@ -1258,7 +1252,7 @@ int scsi_eh_get_sense(struct list_head *work_q,
current->comm));
break;
}
- if (status_byte(scmd->result) != CHECK_CONDITION)
+ if (!scsi_status_is_check_condition(scmd->result))
/*
* don't request sense if there's no check condition
* status because the error we're processing isn't one
@@ -1766,15 +1760,14 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)
case DID_PARITY:
return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
case DID_ERROR:
- if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
- status_byte(scmd->result) == RESERVATION_CONFLICT)
+ if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
return 0;
fallthrough;
case DID_SOFT_ERROR:
return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
}
- if (status_byte(scmd->result) != CHECK_CONDITION)
+ if (!scsi_status_is_check_condition(scmd->result))
return 0;
check_type:
@@ -1883,8 +1876,7 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
*/
return SUCCESS;
case DID_ERROR:
- if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
- status_byte(scmd->result) == RESERVATION_CONFLICT)
+ if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
/*
* execute reservation conflict processing code
* lower down
@@ -1913,23 +1905,17 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
}
/*
- * next, check the message byte.
- */
- if (msg_byte(scmd->result) != COMMAND_COMPLETE)
- return FAILED;
-
- /*
* check the status byte to see if this indicates anything special.
*/
- switch (status_byte(scmd->result)) {
- case QUEUE_FULL:
+ switch (get_status_byte(scmd)) {
+ case SAM_STAT_TASK_SET_FULL:
scsi_handle_queue_full(scmd->device);
/*
* the case of trying to send too many commands to a
* tagged queueing device.
*/
fallthrough;
- case BUSY:
+ case SAM_STAT_BUSY:
/*
* device can't talk to us at the moment. Should only
* occur (SAM-3) when the task queue is empty, so will cause
@@ -1937,16 +1923,16 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
* device.
*/
return ADD_TO_MLQUEUE;
- case GOOD:
+ case SAM_STAT_GOOD:
if (scmd->cmnd[0] == REPORT_LUNS)
scmd->device->sdev_target->expecting_lun_change = 0;
scsi_handle_queue_ramp_up(scmd->device);
fallthrough;
- case COMMAND_TERMINATED:
+ case SAM_STAT_COMMAND_TERMINATED:
return SUCCESS;
- case TASK_ABORTED:
+ case SAM_STAT_TASK_ABORTED:
goto maybe_retry;
- case CHECK_CONDITION:
+ case SAM_STAT_CHECK_CONDITION:
rtn = scsi_check_sense(scmd);
if (rtn == NEEDS_RETRY)
goto maybe_retry;
@@ -1955,16 +1941,16 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
* to collect the sense and redo the decide
* disposition */
return rtn;
- case CONDITION_GOOD:
- case INTERMEDIATE_GOOD:
- case INTERMEDIATE_C_GOOD:
- case ACA_ACTIVE:
+ case SAM_STAT_CONDITION_MET:
+ case SAM_STAT_INTERMEDIATE:
+ case SAM_STAT_INTERMEDIATE_CONDITION_MET:
+ case SAM_STAT_ACA_ACTIVE:
/*
* who knows? FIXME(eric)
*/
return SUCCESS;
- case RESERVATION_CONFLICT:
+ case SAM_STAT_RESERVATION_CONFLICT:
sdev_printk(KERN_INFO, scmd->device,
"reservation conflict\n");
set_host_byte(scmd, DID_NEXUS_FAILURE);
@@ -2011,7 +1997,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
struct request *req;
struct scsi_request *rq;
- req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, 0);
+ req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return;
rq = scsi_req(req);
@@ -2137,10 +2123,10 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
/*
* If just we got sense for the device (called
* scsi_eh_get_sense), scmd->result is already
- * set, do not set DRIVER_TIMEOUT.
+ * set, do not set DID_TIME_OUT.
*/
if (!scmd->result)
- scmd->result |= (DRIVER_TIMEOUT << 24);
+ scmd->result |= (DID_TIME_OUT << 16);
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: flush finish cmd\n",
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 14872c9dc78c..0d13610cd6bf 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -101,8 +101,9 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
"Ioctl returned 0x%x\n", result));
- if (driver_byte(result) == DRIVER_SENSE &&
- scsi_sense_valid(&sshdr)) {
+ if (result < 0)
+ goto out;
+ if (scsi_sense_valid(&sshdr)) {
switch (sshdr.sense_key) {
case ILLEGAL_REQUEST:
if (cmd[0] == ALLOW_MEDIUM_REMOVAL)
@@ -133,7 +134,7 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
break;
}
}
-
+out:
SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
"IOCTL Releasing command\n"));
return result;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 532304d42f00..7456a26aef51 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -194,7 +194,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* @bufflen: len of buffer
* @sense: optional sense buffer
* @sshdr: optional decoded sense header
- * @timeout: request timeout in seconds
+ * @timeout: request timeout in HZ
* @retries: number of times to retry request
* @flags: flags for ->cmd_flags
* @rq_flags: flags for ->rq_flags
@@ -211,20 +211,23 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
{
struct request *req;
struct scsi_request *rq;
- int ret = DRIVER_ERROR << 24;
+ int ret;
req = blk_get_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
if (IS_ERR(req))
- return ret;
- rq = scsi_req(req);
+ return PTR_ERR(req);
- if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
- buffer, bufflen, GFP_NOIO))
- goto out;
+ rq = scsi_req(req);
+ if (bufflen) {
+ ret = blk_rq_map_kern(sdev->request_queue, req,
+ buffer, bufflen, GFP_NOIO);
+ if (ret)
+ goto out;
+ }
rq->cmd_len = COMMAND_SIZE(cmd[0]);
memcpy(rq->cmd, cmd, rq->cmd_len);
rq->retries = retries;
@@ -540,7 +543,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_queue_add_random(q))
add_disk_randomness(req->rq_disk);
- if (!blk_rq_is_scsi(req)) {
+ if (!blk_rq_is_passthrough(req)) {
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
cmd->flags &= ~SCMD_INITIALIZED;
}
@@ -588,12 +591,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
{
switch (host_byte(result)) {
case DID_OK:
- /*
- * Also check the other bytes than the status byte in result
- * to handle the case when a SCSI LLD sets result to
- * DRIVER_SENSE << 24 without setting SAM_STAT_CHECK_CONDITION.
- */
- if (scsi_status_is_good(result) && (result & ~0xff) == 0)
+ if (scsi_status_is_good(result))
return BLK_STS_OK;
return BLK_STS_IOERR;
case DID_TRANSPORT_FAILFAST:
@@ -728,6 +726,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
case 0x07: /* operation in progress */
case 0x08: /* Long write in progress */
case 0x09: /* self test in progress */
+ case 0x11: /* notify (enable spinup) required */
case 0x14: /* space allocation in progress */
case 0x1a: /* start stop unit in progress */
case 0x1b: /* sanitize in progress */
@@ -787,7 +786,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
*/
if (!level && __ratelimit(&_rs)) {
scsi_print_result(cmd, NULL, FAILED);
- if (driver_byte(result) == DRIVER_SENSE)
+ if (sense_valid)
scsi_print_sense(cmd);
scsi_print_command(cmd);
}
@@ -875,7 +874,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
* if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
* intermediate statuses (both obsolete in SAM-4) as good.
*/
- if (status_byte(result) && scsi_status_is_good(result)) {
+ if ((result & 0xff) && scsi_status_is_good(result)) {
result = 0;
*blk_statp = BLK_STS_OK;
}
@@ -1115,7 +1114,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
bool in_flight;
int budget_token = cmd->budget_token;
- if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
+ if (!blk_rq_is_passthrough(rq) && !(flags & SCMD_INITIALIZED)) {
flags |= SCMD_INITIALIZED;
scsi_initialize_rq(rq);
}
@@ -1556,7 +1555,7 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
* Special handling for passthrough commands, which don't go to the ULP
* at all:
*/
- if (blk_rq_is_scsi(req))
+ if (blk_rq_is_passthrough(req))
return scsi_setup_scsi_cmnd(sdev, req);
if (sdev->handler && sdev->handler->prep_fn) {
@@ -1899,18 +1898,6 @@ static const struct blk_mq_ops scsi_mq_ops = {
.get_rq_budget_token = scsi_mq_get_rq_budget_token,
};
-struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
-{
- sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
- if (IS_ERR(sdev->request_queue))
- return NULL;
-
- sdev->request_queue->queuedata = sdev;
- __scsi_init_queue(sdev->host, sdev->request_queue);
- blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue);
- return sdev->request_queue;
-}
-
int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size;
@@ -2093,9 +2080,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select);
* @sshdr: place to put sense data (or NULL if no sense to be collected).
* must be SCSI_SENSE_BUFFERSIZE big.
*
- * Returns zero if unsuccessful, or the header offset (either 4
- * or 8 depending on whether a six or ten byte command was
- * issued) if successful.
+ * Returns zero if successful, or a negative error number on failure
*/
int
scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
@@ -2142,58 +2127,60 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
sshdr, timeout, retries, NULL);
+ if (result < 0)
+ return result;
/* This code looks awful: what it's doing is making sure an
* ILLEGAL REQUEST sense return identifies the actual command
* byte as the problem. MODE_SENSE commands can return
* ILLEGAL REQUEST if the code page isn't supported */
- if (use_10_for_ms && !scsi_status_is_good(result) &&
- driver_byte(result) == DRIVER_SENSE) {
+ if (!scsi_status_is_good(result)) {
if (scsi_sense_valid(sshdr)) {
if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
(sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
/*
* Invalid command operation code
*/
- sdev->use_10_for_ms = 0;
+ if (use_10_for_ms) {
+ sdev->use_10_for_ms = 0;
+ goto retry;
+ }
+ }
+ if (scsi_status_is_check_condition(result) &&
+ sshdr->sense_key == UNIT_ATTENTION &&
+ retry_count) {
+ retry_count--;
goto retry;
}
}
+ return -EIO;
+ }
+ if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
+ (modepage == 6 || modepage == 8))) {
+ /* Initio breakage? */
+ header_length = 0;
+ data->length = 13;
+ data->medium_type = 0;
+ data->device_specific = 0;
+ data->longlba = 0;
+ data->block_descriptor_length = 0;
+ } else if (use_10_for_ms) {
+ data->length = buffer[0]*256 + buffer[1] + 2;
+ data->medium_type = buffer[2];
+ data->device_specific = buffer[3];
+ data->longlba = buffer[4] & 0x01;
+ data->block_descriptor_length = buffer[6]*256
+ + buffer[7];
+ } else {
+ data->length = buffer[0] + 1;
+ data->medium_type = buffer[1];
+ data->device_specific = buffer[2];
+ data->block_descriptor_length = buffer[3];
}
+ data->header_length = header_length;
- if (scsi_status_is_good(result)) {
- if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
- (modepage == 6 || modepage == 8))) {
- /* Initio breakage? */
- header_length = 0;
- data->length = 13;
- data->medium_type = 0;
- data->device_specific = 0;
- data->longlba = 0;
- data->block_descriptor_length = 0;
- } else if (use_10_for_ms) {
- data->length = buffer[0]*256 + buffer[1] + 2;
- data->medium_type = buffer[2];
- data->device_specific = buffer[3];
- data->longlba = buffer[4] & 0x01;
- data->block_descriptor_length = buffer[6]*256
- + buffer[7];
- } else {
- data->length = buffer[0] + 1;
- data->medium_type = buffer[1];
- data->device_specific = buffer[2];
- data->block_descriptor_length = buffer[3];
- }
- data->header_length = header_length;
- } else if ((status_byte(result) == CHECK_CONDITION) &&
- scsi_sense_valid(sshdr) &&
- sshdr->sense_key == UNIT_ATTENTION && retry_count) {
- retry_count--;
- goto retry;
- }
-
- return result;
+ return 0;
}
EXPORT_SYMBOL(scsi_mode_sense);
@@ -3218,3 +3205,20 @@ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
return group_id;
}
EXPORT_SYMBOL(scsi_vpd_tpg_id);
+
+/**
+ * scsi_build_sense - build sense data for a command
+ * @scmd: scsi command for which the sense should be formatted
+ * @desc: Sense format (non-zero == descriptor format,
+ * 0 == fixed format)
+ * @key: Sense key
+ * @asc: Additional sense code
+ * @ascq: Additional sense code qualifier
+ *
+ **/
+void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq)
+{
+ scsi_build_sense_buffer(desc, scmd->sense_buffer, key, asc, ascq);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+}
+EXPORT_SYMBOL_GPL(scsi_build_sense);
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index 8ea44c6595ef..2317717935e9 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -385,7 +385,6 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
size_t off, logbuf_len;
const char *mlret_string = scsi_mlreturn_string(disposition);
const char *hb_string = scsi_hostbyte_string(cmd->result);
- const char *db_string = scsi_driverbyte_string(cmd->result);
unsigned long cmd_age = (jiffies - cmd->jiffies_at_alloc) / HZ;
logbuf = scsi_log_reserve_buffer(&logbuf_len);
@@ -426,13 +425,8 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
if (WARN_ON(off >= logbuf_len))
goto out_printk;
- if (db_string)
- off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=%s ", db_string);
- else
- off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=0x%02x ",
- driver_byte(cmd->result));
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "driverbyte=DRIVER_OK ");
off += scnprintf(logbuf + off, logbuf_len - off,
"cmd_age=%lus", cmd_age);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 75d6f23e4fff..eae2235f79b5 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -91,7 +91,6 @@ extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern void scsi_requeue_run_queue(struct work_struct *work);
-extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
extern void scsi_start_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 12f54571b83e..5b6996a2401b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -217,6 +217,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
{
unsigned int depth;
struct scsi_device *sdev;
+ struct request_queue *q;
int display_failure_msg = 1, ret;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
@@ -266,16 +267,19 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
*/
sdev->borken = 1;
- sdev->request_queue = scsi_mq_alloc_queue(sdev);
- if (!sdev->request_queue) {
+ q = blk_mq_init_queue(&sdev->host->tag_set);
+ if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */
put_device(&starget->dev);
kfree(sdev);
goto out;
}
- WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
- sdev->request_queue->queuedata = sdev;
+ sdev->request_queue = q;
+ q->queuedata = sdev;
+ __scsi_init_queue(sdev->host, q);
+ blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
+ WARN_ON_ONCE(!blk_get_queue(q));
depth = sdev->host->cmd_per_lun ?: 1;
@@ -471,7 +475,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
error = shost->hostt->target_alloc(starget);
if(error) {
- dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
+ if (error != -ENXIO)
+ dev_err(dev, "target allocation failed, error %d\n", error);
/* don't want scsi_target_reap to do the final
* put because it will be under the host lock */
scsi_target_destroy(starget);
@@ -616,14 +621,14 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
"scsi scan: INQUIRY %s with code 0x%x\n",
result ? "failed" : "successful", result));
- if (result) {
+ if (result > 0) {
/*
* not-ready to ready transition [asc/ascq=0x28/0x0]
* or power-on, reset [asc/ascq=0x29/0x0], continue.
* INQUIRY should not yield UNIT_ATTENTION
* but many buggy devices do so anyway.
*/
- if (driver_byte(result) == DRIVER_SENSE &&
+ if (scsi_status_is_check_condition(result) &&
scsi_sense_valid(&sshdr)) {
if ((sshdr.sense_key == UNIT_ATTENTION) &&
((sshdr.asc == 0x28) ||
@@ -631,7 +636,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
(sshdr.ascq == 0))
continue;
}
- } else {
+ } else if (result == 0) {
/*
* if nothing was transferred, we try
* again. It's a workaround for some USB
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 32489d25158f..ae9bfc658203 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -807,11 +807,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
mutex_lock(&sdev->state_mutex);
ret = scsi_device_set_state(sdev, state);
/*
- * If the device state changes to SDEV_RUNNING, we need to run
- * the queue to avoid I/O hang.
+ * If the device state changes to SDEV_RUNNING, we need to
+ * rescan the device to revalidate it, and run the queue to
+ * avoid I/O hang.
*/
- if (ret == 0 && state == SDEV_RUNNING)
+ if (ret == 0 && state == SDEV_RUNNING) {
+ scsi_rescan_device(dev);
blk_mq_run_hw_queues(sdev->request_queue, true);
+ }
mutex_unlock(&sdev->state_mutex);
return ret == 0 ? count : -EINVAL;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index da5b503dc7a1..49748cd817a5 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1686,7 +1686,7 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
- if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING | FC_VPORT_DELETING)) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return -EBUSY;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 441f0152193f..d8b05d8b5470 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -86,16 +86,10 @@ struct iscsi_internal {
struct transport_container session_cont;
};
-/* Worker to perform connection failure on unresponsive connections
- * completely in kernel space.
- */
-static void stop_conn_work_fn(struct work_struct *work);
-static DECLARE_WORK(stop_conn_work, stop_conn_work_fn);
-
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_eh_timer_workq;
-static struct workqueue_struct *iscsi_destroy_workq;
+static struct workqueue_struct *iscsi_conn_cleanup_workq;
static DEFINE_IDA(iscsi_sess_ida);
/*
@@ -268,9 +262,20 @@ void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
}
EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+void iscsi_put_endpoint(struct iscsi_endpoint *ep)
+{
+ put_device(&ep->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
+
+/**
+ * iscsi_lookup_endpoint - get ep from handle
+ * @handle: endpoint handle
+ *
+ * Caller must do a iscsi_put_endpoint.
+ */
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
- struct iscsi_endpoint *ep;
struct device *dev;
dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
@@ -278,13 +283,7 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
if (!dev)
return NULL;
- ep = iscsi_dev_to_endpoint(dev);
- /*
- * we can drop this now because the interface will prevent
- * removals and lookups from racing.
- */
- put_device(dev);
- return ep;
+ return iscsi_dev_to_endpoint(dev);
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
@@ -440,39 +439,10 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
struct iscsi_transport *t = iface->transport;
- int param;
- int param_type;
+ int param = -1;
if (attr == &dev_attr_iface_enabled.attr)
param = ISCSI_NET_PARAM_IFACE_ENABLE;
- else if (attr == &dev_attr_iface_vlan_id.attr)
- param = ISCSI_NET_PARAM_VLAN_ID;
- else if (attr == &dev_attr_iface_vlan_priority.attr)
- param = ISCSI_NET_PARAM_VLAN_PRIORITY;
- else if (attr == &dev_attr_iface_vlan_enabled.attr)
- param = ISCSI_NET_PARAM_VLAN_ENABLED;
- else if (attr == &dev_attr_iface_mtu.attr)
- param = ISCSI_NET_PARAM_MTU;
- else if (attr == &dev_attr_iface_port.attr)
- param = ISCSI_NET_PARAM_PORT;
- else if (attr == &dev_attr_iface_ipaddress_state.attr)
- param = ISCSI_NET_PARAM_IPADDR_STATE;
- else if (attr == &dev_attr_iface_delayed_ack_en.attr)
- param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
- else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
- param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
- else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
- param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
- else if (attr == &dev_attr_iface_tcp_wsf.attr)
- param = ISCSI_NET_PARAM_TCP_WSF;
- else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
- param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
- else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
- param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
- else if (attr == &dev_attr_iface_cache_id.attr)
- param = ISCSI_NET_PARAM_CACHE_ID;
- else if (attr == &dev_attr_iface_redirect_en.attr)
- param = ISCSI_NET_PARAM_REDIRECT_EN;
else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
else if (attr == &dev_attr_iface_header_digest.attr)
@@ -509,6 +479,38 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN;
else if (attr == &dev_attr_iface_initiator_name.attr)
param = ISCSI_IFACE_PARAM_INITIATOR_NAME;
+
+ if (param != -1)
+ return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
+
+ if (attr == &dev_attr_iface_vlan_id.attr)
+ param = ISCSI_NET_PARAM_VLAN_ID;
+ else if (attr == &dev_attr_iface_vlan_priority.attr)
+ param = ISCSI_NET_PARAM_VLAN_PRIORITY;
+ else if (attr == &dev_attr_iface_vlan_enabled.attr)
+ param = ISCSI_NET_PARAM_VLAN_ENABLED;
+ else if (attr == &dev_attr_iface_mtu.attr)
+ param = ISCSI_NET_PARAM_MTU;
+ else if (attr == &dev_attr_iface_port.attr)
+ param = ISCSI_NET_PARAM_PORT;
+ else if (attr == &dev_attr_iface_ipaddress_state.attr)
+ param = ISCSI_NET_PARAM_IPADDR_STATE;
+ else if (attr == &dev_attr_iface_delayed_ack_en.attr)
+ param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
+ else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
+ param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
+ else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
+ param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
+ else if (attr == &dev_attr_iface_tcp_wsf.attr)
+ param = ISCSI_NET_PARAM_TCP_WSF;
+ else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
+ param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
+ else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
+ param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
+ else if (attr == &dev_attr_iface_cache_id.attr)
+ param = ISCSI_NET_PARAM_CACHE_ID;
+ else if (attr == &dev_attr_iface_redirect_en.attr)
+ param = ISCSI_NET_PARAM_REDIRECT_EN;
else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
param = ISCSI_NET_PARAM_IPV4_ADDR;
@@ -599,32 +601,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
return 0;
}
- switch (param) {
- case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
- case ISCSI_IFACE_PARAM_HDRDGST_EN:
- case ISCSI_IFACE_PARAM_DATADGST_EN:
- case ISCSI_IFACE_PARAM_IMM_DATA_EN:
- case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
- case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
- case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
- case ISCSI_IFACE_PARAM_ERL:
- case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
- case ISCSI_IFACE_PARAM_FIRST_BURST:
- case ISCSI_IFACE_PARAM_MAX_R2T:
- case ISCSI_IFACE_PARAM_MAX_BURST:
- case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
- case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
- case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
- case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
- case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
- case ISCSI_IFACE_PARAM_INITIATOR_NAME:
- param_type = ISCSI_IFACE_PARAM;
- break;
- default:
- param_type = ISCSI_NET_PARAM;
- }
-
- return t->attr_is_visible(param_type, param);
+ return t->attr_is_visible(ISCSI_NET_PARAM, param);
}
static struct attribute *iscsi_iface_attrs[] = {
@@ -1620,12 +1597,6 @@ static DECLARE_TRANSPORT_CLASS(iscsi_connection_class,
static struct sock *nls;
static DEFINE_MUTEX(rx_queue_mutex);
-/*
- * conn_mutex protects the {start,bind,stop,destroy}_conn from racing
- * against the kernel stop_connection recovery mechanism
- */
-static DEFINE_MUTEX(conn_mutex);
-
static LIST_HEAD(sesslist);
static DEFINE_SPINLOCK(sesslock);
static LIST_HEAD(connlist);
@@ -1976,6 +1947,8 @@ static void __iscsi_unblock_session(struct work_struct *work)
*/
void iscsi_unblock_session(struct iscsi_cls_session *session)
{
+ flush_work(&session->block_work);
+
queue_work(iscsi_eh_timer_workq, &session->unblock_work);
/*
* Blocking the session can be done from any context so we only
@@ -2242,6 +2215,123 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
}
EXPORT_SYMBOL_GPL(iscsi_remove_session);
+static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
+{
+ ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n");
+
+ switch (flag) {
+ case STOP_CONN_RECOVER:
+ conn->state = ISCSI_CONN_FAILED;
+ break;
+ case STOP_CONN_TERM:
+ conn->state = ISCSI_CONN_DOWN;
+ break;
+ default:
+ iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
+ flag);
+ return;
+ }
+
+ conn->transport->stop_conn(conn, flag);
+ ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
+}
+
+static int iscsi_if_stop_conn(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ int flag = ev->u.stop_conn.flag;
+ struct iscsi_cls_conn *conn;
+
+ conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
+ if (!conn)
+ return -EINVAL;
+
+ ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n");
+ /*
+ * If this is a termination we have to call stop_conn with that flag
+ * so the correct states get set. If we haven't run the work yet try to
+ * avoid the extra run.
+ */
+ if (flag == STOP_CONN_TERM) {
+ cancel_work_sync(&conn->cleanup_work);
+ iscsi_stop_conn(conn, flag);
+ } else {
+ /*
+ * Figure out if it was the kernel or userspace initiating this.
+ */
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ iscsi_stop_conn(conn, flag);
+ } else {
+ ISCSI_DBG_TRANS_CONN(conn,
+ "flush kernel conn cleanup.\n");
+ flush_work(&conn->cleanup_work);
+ }
+ /*
+ * Only clear for recovery to avoid extra cleanup runs during
+ * termination.
+ */
+ clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+ }
+ ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
+ return 0;
+}
+
+static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
+{
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+ struct iscsi_endpoint *ep;
+
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+ conn->state = ISCSI_CONN_FAILED;
+
+ if (!conn->ep || !session->transport->ep_disconnect)
+ return;
+
+ ep = conn->ep;
+ conn->ep = NULL;
+
+ session->transport->unbind_conn(conn, is_active);
+ session->transport->ep_disconnect(ep);
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+}
+
+static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
+{
+ struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
+ cleanup_work);
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+
+ mutex_lock(&conn->ep_mutex);
+ /*
+ * If we are not at least bound there is nothing for us to do. Userspace
+ * will do a ep_disconnect call if offload is used, but will not be
+ * doing a stop since there is nothing to clean up, so we have to clear
+ * the cleanup bit here.
+ */
+ if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
+ ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
+ clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+ mutex_unlock(&conn->ep_mutex);
+ return;
+ }
+
+ iscsi_ep_disconnect(conn, false);
+
+ if (system_state != SYSTEM_RUNNING) {
+ /*
+ * If the user has set up for the session to never timeout
+ * then hang like they wanted. For all other cases fail right
+ * away since userspace is not going to relogin.
+ */
+ if (session->recovery_tmo > 0)
+ session->recovery_tmo = 0;
+ }
+
+ iscsi_stop_conn(conn, STOP_CONN_RECOVER);
+ mutex_unlock(&conn->ep_mutex);
+ ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n");
+}
+
void iscsi_free_session(struct iscsi_cls_session *session)
{
ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
@@ -2281,7 +2371,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
mutex_init(&conn->ep_mutex);
INIT_LIST_HEAD(&conn->conn_list);
- INIT_LIST_HEAD(&conn->conn_list_err);
+ INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
conn->transport = transport;
conn->cid = cid;
conn->state = ISCSI_CONN_DOWN;
@@ -2338,7 +2428,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
spin_lock_irqsave(&connlock, flags);
list_del(&conn->conn_list);
- list_del(&conn->conn_list_err);
spin_unlock_irqrestore(&connlock, flags);
transport_unregister_device(&conn->dev);
@@ -2348,6 +2437,18 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
}
EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
+void iscsi_put_conn(struct iscsi_cls_conn *conn)
+{
+ put_device(&conn->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_put_conn);
+
+void iscsi_get_conn(struct iscsi_cls_conn *conn)
+{
+ get_device(&conn->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_get_conn);
+
/*
* iscsi interface functions
*/
@@ -2453,77 +2554,6 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
}
EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
-/*
- * This can be called without the rx_queue_mutex, if invoked by the kernel
- * stop work. But, in that case, it is guaranteed not to race with
- * iscsi_destroy by conn_mutex.
- */
-static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
-{
- /*
- * It is important that this path doesn't rely on
- * rx_queue_mutex, otherwise, a thread doing allocation on a
- * start_session/start_connection could sleep waiting on a
- * writeback to a failed iscsi device, that cannot be recovered
- * because the lock is held. If we don't hold it here, the
- * kernel stop_conn_work_fn has a chance to stop the broken
- * session and resolve the allocation.
- *
- * Still, the user invoked .stop_conn() needs to be serialized
- * with stop_conn_work_fn by a private mutex. Not pretty, but
- * it works.
- */
- mutex_lock(&conn_mutex);
- switch (flag) {
- case STOP_CONN_RECOVER:
- conn->state = ISCSI_CONN_FAILED;
- break;
- case STOP_CONN_TERM:
- conn->state = ISCSI_CONN_DOWN;
- break;
- default:
- iscsi_cls_conn_printk(KERN_ERR, conn,
- "invalid stop flag %d\n", flag);
- goto unlock;
- }
-
- conn->transport->stop_conn(conn, flag);
-unlock:
- mutex_unlock(&conn_mutex);
-}
-
-static void stop_conn_work_fn(struct work_struct *work)
-{
- struct iscsi_cls_conn *conn, *tmp;
- unsigned long flags;
- LIST_HEAD(recovery_list);
-
- spin_lock_irqsave(&connlock, flags);
- if (list_empty(&connlist_err)) {
- spin_unlock_irqrestore(&connlock, flags);
- return;
- }
- list_splice_init(&connlist_err, &recovery_list);
- spin_unlock_irqrestore(&connlock, flags);
-
- list_for_each_entry_safe(conn, tmp, &recovery_list, conn_list_err) {
- uint32_t sid = iscsi_conn_get_sid(conn);
- struct iscsi_cls_session *session;
-
- session = iscsi_session_lookup(sid);
- if (session) {
- if (system_state != SYSTEM_RUNNING) {
- session->recovery_tmo = 0;
- iscsi_if_stop_conn(conn, STOP_CONN_TERM);
- } else {
- iscsi_if_stop_conn(conn, STOP_CONN_RECOVER);
- }
- }
-
- list_del_init(&conn->conn_list_err);
- }
-}
-
void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct nlmsghdr *nlh;
@@ -2531,12 +2561,9 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
- unsigned long flags;
- spin_lock_irqsave(&connlock, flags);
- list_add(&conn->conn_list_err, &connlist_err);
- spin_unlock_irqrestore(&connlock, flags);
- queue_work(system_unbound_wq, &stop_conn_work);
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
+ queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -2866,26 +2893,17 @@ static int
iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
struct iscsi_cls_conn *conn;
- unsigned long flags;
conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
if (!conn)
return -EINVAL;
- spin_lock_irqsave(&connlock, flags);
- if (!list_empty(&conn->conn_list_err)) {
- spin_unlock_irqrestore(&connlock, flags);
- return -EAGAIN;
- }
- spin_unlock_irqrestore(&connlock, flags);
-
+ ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n");
+ flush_work(&conn->cleanup_work);
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
- mutex_lock(&conn_mutex);
if (transport->destroy_conn)
transport->destroy_conn(conn);
- mutex_unlock(&conn_mutex);
-
return 0;
}
@@ -2975,15 +2993,31 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
ep = iscsi_lookup_endpoint(ep_handle);
if (!ep)
return -EINVAL;
+
conn = ep->conn;
- if (conn) {
- mutex_lock(&conn->ep_mutex);
- conn->ep = NULL;
+ if (!conn) {
+ /*
+ * conn was not even bound yet, so we can't get iscsi conn
+ * failures yet.
+ */
+ transport->ep_disconnect(ep);
+ goto put_ep;
+ }
+
+ mutex_lock(&conn->ep_mutex);
+ /* Check if this was a conn error and the kernel took ownership */
+ if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
mutex_unlock(&conn->ep_mutex);
- conn->state = ISCSI_CONN_FAILED;
+
+ flush_work(&conn->cleanup_work);
+ goto put_ep;
}
- transport->ep_disconnect(ep);
+ iscsi_ep_disconnect(conn, false);
+ mutex_unlock(&conn->ep_mutex);
+put_ep:
+ iscsi_put_endpoint(ep);
return 0;
}
@@ -3009,6 +3043,7 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
ev->r.retcode = transport->ep_poll(ep,
ev->u.ep_poll.timeout_ms);
+ iscsi_put_endpoint(ep);
break;
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
rc = iscsi_if_ep_disconnect(transport,
@@ -3639,18 +3674,129 @@ exit_host_stats:
return err;
}
+static int iscsi_if_transport_conn(struct iscsi_transport *transport,
+ struct nlmsghdr *nlh)
+{
+ struct iscsi_uevent *ev = nlmsg_data(nlh);
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn = NULL;
+ struct iscsi_endpoint *ep;
+ uint32_t pdu_len;
+ int err = 0;
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_CONN:
+ return iscsi_if_create_conn(transport, ev);
+ case ISCSI_UEVENT_DESTROY_CONN:
+ return iscsi_if_destroy_conn(transport, ev);
+ case ISCSI_UEVENT_STOP_CONN:
+ return iscsi_if_stop_conn(transport, ev);
+ }
+
+ /*
+ * The following cmds need to be run under the ep_mutex so in kernel
+ * conn cleanup (ep_disconnect + unbind and conn) is not done while
+ * these are running. They also must not run if we have just run a conn
+ * cleanup because they would set the state in a way that might allow
+ * IO or send IO themselves.
+ */
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_START_CONN:
+ conn = iscsi_conn_lookup(ev->u.start_conn.sid,
+ ev->u.start_conn.cid);
+ break;
+ case ISCSI_UEVENT_BIND_CONN:
+ conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
+ break;
+ case ISCSI_UEVENT_SEND_PDU:
+ conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
+ break;
+ }
+
+ if (!conn)
+ return -EINVAL;
+
+ mutex_lock(&conn->ep_mutex);
+ if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ mutex_unlock(&conn->ep_mutex);
+ ev->r.retcode = -ENOTCONN;
+ return 0;
+ }
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_BIND_CONN:
+ if (conn->ep) {
+ /*
+ * For offload boot support where iscsid is restarted
+ * during the pivot root stage, the ep will be intact
+ * here when the new iscsid instance starts up and
+ * reconnects.
+ */
+ iscsi_ep_disconnect(conn, true);
+ }
+
+ session = iscsi_session_lookup(ev->u.b_conn.sid);
+ if (!session) {
+ err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->bind_conn(session, conn,
+ ev->u.b_conn.transport_eph,
+ ev->u.b_conn.is_leading);
+ if (!ev->r.retcode)
+ conn->state = ISCSI_CONN_BOUND;
+
+ if (ev->r.retcode || !transport->ep_connect)
+ break;
+
+ ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
+ if (ep) {
+ ep->conn = conn;
+ conn->ep = ep;
+ iscsi_put_endpoint(ep);
+ } else {
+ err = -ENOTCONN;
+ iscsi_cls_conn_printk(KERN_ERR, conn,
+ "Could not set ep conn binding\n");
+ }
+ break;
+ case ISCSI_UEVENT_START_CONN:
+ ev->r.retcode = transport->start_conn(conn);
+ if (!ev->r.retcode)
+ conn->state = ISCSI_CONN_UP;
+ break;
+ case ISCSI_UEVENT_SEND_PDU:
+ pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
+
+ if ((ev->u.send_pdu.hdr_size > pdu_len) ||
+ (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
+ err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->send_pdu(conn,
+ (struct iscsi_hdr *)((char *)ev + sizeof(*ev)),
+ (char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
+ ev->u.send_pdu.data_size);
+ break;
+ default:
+ err = -ENOSYS;
+ }
+
+ mutex_unlock(&conn->ep_mutex);
+ return err;
+}
static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
u32 portid;
- u32 pdu_len;
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_transport *transport = NULL;
struct iscsi_internal *priv;
struct iscsi_cls_session *session;
- struct iscsi_cls_conn *conn;
struct iscsi_endpoint *ep = NULL;
if (!netlink_capable(skb, CAP_SYS_ADMIN))
@@ -3691,6 +3837,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
+ iscsi_put_endpoint(ep);
break;
case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
@@ -3715,7 +3862,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
list_del_init(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
- queue_work(iscsi_destroy_workq, &session->destroy_work);
+ queue_work(system_unbound_wq, &session->destroy_work);
}
break;
case ISCSI_UEVENT_UNBIND_SESSION:
@@ -3726,89 +3873,16 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
else
err = -EINVAL;
break;
- case ISCSI_UEVENT_CREATE_CONN:
- err = iscsi_if_create_conn(transport, ev);
- break;
- case ISCSI_UEVENT_DESTROY_CONN:
- err = iscsi_if_destroy_conn(transport, ev);
- break;
- case ISCSI_UEVENT_BIND_CONN:
- session = iscsi_session_lookup(ev->u.b_conn.sid);
- conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
-
- if (conn && conn->ep)
- iscsi_if_ep_disconnect(transport, conn->ep->id);
-
- if (!session || !conn) {
- err = -EINVAL;
- break;
- }
-
- mutex_lock(&conn_mutex);
- ev->r.retcode = transport->bind_conn(session, conn,
- ev->u.b_conn.transport_eph,
- ev->u.b_conn.is_leading);
- if (!ev->r.retcode)
- conn->state = ISCSI_CONN_BOUND;
- mutex_unlock(&conn_mutex);
-
- if (ev->r.retcode || !transport->ep_connect)
- break;
-
- ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
- if (ep) {
- ep->conn = conn;
-
- mutex_lock(&conn->ep_mutex);
- conn->ep = ep;
- mutex_unlock(&conn->ep_mutex);
- } else
- iscsi_cls_conn_printk(KERN_ERR, conn,
- "Could not set ep conn "
- "binding\n");
- break;
case ISCSI_UEVENT_SET_PARAM:
err = iscsi_set_param(transport, ev);
break;
- case ISCSI_UEVENT_START_CONN:
- conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
- if (conn) {
- mutex_lock(&conn_mutex);
- ev->r.retcode = transport->start_conn(conn);
- if (!ev->r.retcode)
- conn->state = ISCSI_CONN_UP;
- mutex_unlock(&conn_mutex);
- }
- else
- err = -EINVAL;
- break;
+ case ISCSI_UEVENT_CREATE_CONN:
+ case ISCSI_UEVENT_DESTROY_CONN:
case ISCSI_UEVENT_STOP_CONN:
- conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
- if (conn)
- iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
- else
- err = -EINVAL;
- break;
+ case ISCSI_UEVENT_START_CONN:
+ case ISCSI_UEVENT_BIND_CONN:
case ISCSI_UEVENT_SEND_PDU:
- pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
-
- if ((ev->u.send_pdu.hdr_size > pdu_len) ||
- (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
- err = -EINVAL;
- break;
- }
-
- conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
- if (conn) {
- mutex_lock(&conn_mutex);
- ev->r.retcode = transport->send_pdu(conn,
- (struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
- (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
- ev->u.send_pdu.data_size);
- mutex_unlock(&conn_mutex);
- }
- else
- err = -EINVAL;
+ err = iscsi_if_transport_conn(transport, nlh);
break;
case ISCSI_UEVENT_GET_STATS:
err = iscsi_if_get_stats(transport, nlh);
@@ -4656,6 +4730,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
int err;
BUG_ON(!tt);
+ WARN_ON(tt->ep_disconnect && !tt->unbind_conn);
priv = iscsi_if_transport_lookup(tt);
if (priv)
@@ -4810,10 +4885,10 @@ static __init int iscsi_transport_init(void)
goto release_nls;
}
- iscsi_destroy_workq = alloc_workqueue("%s",
- WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 1, "iscsi_destroy");
- if (!iscsi_destroy_workq) {
+ iscsi_conn_cleanup_workq = alloc_workqueue("%s",
+ WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
+ "iscsi_conn_cleanup");
+ if (!iscsi_conn_cleanup_workq) {
err = -ENOMEM;
goto destroy_wq;
}
@@ -4843,7 +4918,7 @@ unregister_transport_class:
static void __exit iscsi_transport_exit(void)
{
- destroy_workqueue(iscsi_destroy_workq);
+ destroy_workqueue(iscsi_conn_cleanup_workq);
destroy_workqueue(iscsi_eh_timer_workq);
netlink_kernel_release(nls);
bus_unregister(&iscsi_flashnode_bus);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index c9abed8429c9..4a96fb05731d 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1229,16 +1229,15 @@ int sas_read_port_mode_page(struct scsi_device *sdev)
char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata;
struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
struct scsi_mode_data mode_data;
- int res, error;
+ int error;
if (!buffer)
return -ENOMEM;
- res = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3,
- &mode_data, NULL);
+ error = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3,
+ &mode_data, NULL);
- error = -EINVAL;
- if (!scsi_status_is_good(res))
+ if (error)
goto out;
msdata = buffer + mode_data.header_length +
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index c37dd15d16d2..5af7a10e9514 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -127,7 +127,7 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
RQF_PM, NULL);
- if (driver_byte(result) != DRIVER_SENSE ||
+ if (result < 0 || !scsi_sense_valid(sshdr) ||
sshdr->sense_key != UNIT_ATTENTION)
break;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 746a7def2825..b8d55af763f9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -98,11 +98,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
-#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
#define SD_MINORS 16
-#else
-#define SD_MINORS 0
-#endif
static void sd_config_discard(struct scsi_disk *, unsigned int);
static void sd_config_write_same(struct scsi_disk *);
@@ -1672,7 +1668,7 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
&sshdr);
/* failed to execute TUR, assume media not present */
- if (host_byte(retval)) {
+ if (retval < 0 || host_byte(retval)) {
set_media_not_present(sdkp);
goto out;
}
@@ -1733,16 +1729,20 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
if (res) {
sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
- if (driver_byte(res) == DRIVER_SENSE)
+ if (res < 0)
+ return res;
+
+ if (scsi_status_is_check_condition(res) &&
+ scsi_sense_valid(sshdr)) {
sd_print_sense_hdr(sdkp, sshdr);
- /* we need to evaluate the error return */
- if (scsi_sense_valid(sshdr) &&
- (sshdr->asc == 0x3a || /* medium not present */
- sshdr->asc == 0x20 || /* invalid command */
- (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */
+ /* we need to evaluate the error return */
+ if (sshdr->asc == 0x3a || /* medium not present */
+ sshdr->asc == 0x20 || /* invalid command */
+ (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */
/* this is no error here */
return 0;
+ }
switch (host_byte(res)) {
/* ignore errors due to racing a disconnection */
@@ -1839,7 +1839,7 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
&sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
- if (driver_byte(result) == DRIVER_SENSE &&
+ if (scsi_status_is_check_condition(result) &&
scsi_sense_valid(&sshdr)) {
sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
scsi_print_sense_hdr(sdev, NULL, &sshdr);
@@ -2083,7 +2083,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
}
sdkp->medium_access_timed_out = 0;
- if (driver_byte(result) != DRIVER_SENSE &&
+ if (!scsi_status_is_check_condition(result) &&
(!sense_valid || sense_deferred))
goto out;
@@ -2186,12 +2186,12 @@ sd_spinup_disk(struct scsi_disk *sdkp)
if (the_result)
sense_valid = scsi_sense_valid(&sshdr);
retries++;
- } while (retries < 3 &&
+ } while (retries < 3 &&
(!scsi_status_is_good(the_result) ||
- ((driver_byte(the_result) == DRIVER_SENSE) &&
+ (scsi_status_is_check_condition(the_result) &&
sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
- if (driver_byte(the_result) != DRIVER_SENSE) {
+ if (!scsi_status_is_check_condition(the_result)) {
/* no sense, TUR either succeeded or failed
* with a status error */
if(!spintime && !scsi_status_is_good(the_result)) {
@@ -2319,7 +2319,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
struct scsi_sense_hdr *sshdr, int sense_valid,
int the_result)
{
- if (driver_byte(the_result) == DRIVER_SENSE)
+ if (sense_valid)
sd_print_sense_hdr(sdkp, sshdr);
else
sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
@@ -2376,7 +2376,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
- if (the_result) {
+ if (the_result > 0) {
sense_valid = scsi_sense_valid(&sshdr);
if (sense_valid &&
sshdr.sense_key == ILLEGAL_REQUEST &&
@@ -2461,7 +2461,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
- if (the_result) {
+ if (the_result > 0) {
sense_valid = scsi_sense_valid(&sshdr);
if (sense_valid &&
sshdr.sense_key == UNIT_ATTENTION &&
@@ -2684,18 +2684,18 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
* 5: Illegal Request, Sense Code 24: Invalid field in
* CDB.
*/
- if (!scsi_status_is_good(res))
+ if (res < 0)
res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
/*
* Third attempt: ask 255 bytes, as we did earlier.
*/
- if (!scsi_status_is_good(res))
+ if (res < 0)
res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
&data, NULL);
}
- if (!scsi_status_is_good(res)) {
+ if (res < 0) {
sd_first_printk(KERN_WARNING, sdkp,
"Test WP failed, assume Write Enabled\n");
} else {
@@ -2756,7 +2756,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
&data, &sshdr);
- if (!scsi_status_is_good(res))
+ if (res < 0)
goto bad_sense;
if (!data.header_length) {
@@ -2788,7 +2788,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
&data, &sshdr);
- if (scsi_status_is_good(res)) {
+ if (!res) {
int offset = data.header_length + data.block_descriptor_length;
while (offset < len) {
@@ -2906,7 +2906,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
sdkp->max_retries, &data, &sshdr);
- if (!scsi_status_is_good(res) || !data.header_length ||
+ if (res < 0 || !data.header_length ||
data.length < 6) {
sd_first_printk(KERN_WARNING, sdkp,
"getting Control mode page failed, assume no ATO\n");
@@ -3605,12 +3605,12 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
- if (driver_byte(res) == DRIVER_SENSE)
+ if (res > 0 && scsi_sense_valid(&sshdr)) {
sd_print_sense_hdr(sdkp, &sshdr);
- if (scsi_sense_valid(&sshdr) &&
/* 0x3a is medium not present */
- sshdr.asc == 0x3a)
- res = 0;
+ if (sshdr.asc == 0x3a)
+ res = 0;
+ }
}
/* SCSI error codes must not go to the generic layer */
@@ -3820,15 +3820,14 @@ void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
{
const char *hb_string = scsi_hostbyte_string(result);
- const char *db_string = scsi_driverbyte_string(result);
- if (hb_string || db_string)
+ if (hb_string)
sd_printk(KERN_INFO, sdkp,
"%s: Result: hostbyte=%s driverbyte=%s\n", msg,
hb_string ? hb_string : "invalid",
- db_string ? db_string : "invalid");
+ "DRIVER_OK");
else
sd_printk(KERN_INFO, sdkp,
- "%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n",
- msg, host_byte(result), driver_byte(result));
+ "%s: Result: hostbyte=0x%02x driverbyte=%s\n",
+ msg, host_byte(result), "DRIVER_OK");
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index e45d8d94574c..186b5ff52c3a 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -116,8 +116,7 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
sd_printk(KERN_ERR, sdkp,
"REPORT ZONES start lba %llu failed\n", lba);
sd_print_result(sdkp, "REPORT ZONES", result);
- if (driver_byte(result) == DRIVER_SENSE &&
- scsi_sense_valid(&sshdr))
+ if (result > 0 && scsi_sense_valid(&sshdr))
sd_print_sense_hdr(sdkp, &sshdr);
return -EIO;
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index def7ec3bbaf9..91e2221bbb0d 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -498,9 +498,11 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
old_hdr->host_status = hp->host_status;
old_hdr->driver_status = hp->driver_status;
if ((CHECK_CONDITION & hp->masked_status) ||
- (DRIVER_SENSE & hp->driver_status))
+ (srp->sense_b[0] & 0x70) == 0x70) {
+ old_hdr->driver_status = DRIVER_SENSE;
memcpy(old_hdr->sense_buffer, srp->sense_b,
sizeof (old_hdr->sense_buffer));
+ }
switch (hp->host_status) {
/* This setup of 'result' is for backward compatibility and is best
ignored by the user who should use target, host + driver status */
@@ -574,7 +576,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
hp->sb_len_wr = 0;
if ((hp->mx_sb_len > 0) && hp->sbp) {
if ((CHECK_CONDITION & hp->masked_status) ||
- (DRIVER_SENSE & hp->driver_status)) {
+ (srp->sense_b[0] & 0x70) == 0x70) {
int sb_len = SCSI_SENSE_BUFFERSIZE;
sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
@@ -583,6 +585,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
err = -EFAULT;
goto err_out;
}
+ hp->driver_status = DRIVER_SENSE;
hp->sb_len_wr = len;
}
}
@@ -1373,7 +1376,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
srp->header.status = 0xff & result;
srp->header.masked_status = status_byte(result);
- srp->header.msg_status = msg_byte(result);
+ srp->header.msg_status = COMMAND_COMPLETE;
srp->header.host_status = host_byte(result);
srp->header.driver_status = driver_byte(result);
if ((sdp->sgdebug > 0) &&
@@ -1756,7 +1759,7 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
* not expect an EWOULDBLOCK from this condition.
*/
rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq)) {
kfree(long_cmdp);
return PTR_ERR(rq);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 5db16509b6e1..dcc0b9618a64 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -3087,8 +3087,7 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
}
if (device_offline && sense_data_length == 0)
- scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
- 0x3e, 0x1);
+ scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
scmd->result = scsi_status;
set_host_byte(scmd, host_byte);
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
index 4cd86115cfb2..703f229862fc 100644
--- a/drivers/scsi/snic/snic_ctl.c
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -114,10 +114,7 @@ snic_queue_exch_ver_req(struct snic *snic)
rqi = snic_req_init(snic, 0);
if (!rqi) {
- SNIC_HOST_ERR(snic->shost,
- "Queuing Exch Ver Req failed, err = %d\n",
- ret);
-
+ SNIC_HOST_ERR(snic->shost, "Init Exch Ver Req failed\n");
ret = -ENOMEM;
goto error;
}
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 3aeee856d5c7..5e0faeba516e 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -430,21 +430,19 @@ static const struct seq_operations snic_trc_sops = {
DEFINE_SEQ_ATTRIBUTE(snic_trc);
+#define TRC_ENABLE_FILE "tracing_enable"
+#define TRC_FILE "trace"
/*
* snic_trc_debugfs_init : creates trace/tracing_enable files for trace
* under debugfs
*/
void snic_trc_debugfs_init(void)
{
- snic_glob->trc.trc_enable = debugfs_create_bool("tracing_enable",
- S_IFREG | S_IRUGO | S_IWUSR,
- snic_glob->trc_root,
- &snic_glob->trc.enable);
-
- snic_glob->trc.trc_file = debugfs_create_file("trace",
- S_IFREG | S_IRUGO | S_IWUSR,
- snic_glob->trc_root, NULL,
- &snic_trc_fops);
+ debugfs_create_bool(TRC_ENABLE_FILE, S_IFREG | S_IRUGO | S_IWUSR,
+ snic_glob->trc_root, &snic_glob->trc.enable);
+
+ debugfs_create_file(TRC_FILE, S_IFREG | S_IRUGO | S_IWUSR,
+ snic_glob->trc_root, NULL, &snic_trc_fops);
}
/*
@@ -453,9 +451,6 @@ void snic_trc_debugfs_init(void)
void
snic_trc_debugfs_term(void)
{
- debugfs_remove(snic_glob->trc.trc_file);
- snic_glob->trc.trc_file = NULL;
-
- debugfs_remove(snic_glob->trc.trc_enable);
- snic_glob->trc.trc_enable = NULL;
+ debugfs_remove(debugfs_lookup(TRC_FILE, snic_glob->trc_root));
+ debugfs_remove(debugfs_lookup(TRC_ENABLE_FILE, snic_glob->trc_root));
}
diff --git a/drivers/scsi/snic/snic_trc.h b/drivers/scsi/snic/snic_trc.h
index 87dcc7457d15..ce305b4b8fa2 100644
--- a/drivers/scsi/snic/snic_trc.h
+++ b/drivers/scsi/snic/snic_trc.h
@@ -46,9 +46,6 @@ struct snic_trc {
u32 rd_idx;
u32 wr_idx;
bool enable; /* Control Variable for Tracing */
-
- struct dentry *trc_enable; /* debugfs file object */
- struct dentry *trc_file;
};
int snic_trc_init(void);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 7815ed642d43..a6d3ac0a6cbc 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -221,7 +221,7 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
else if (med->media_event_code == 2)
return DISK_EVENT_MEDIA_CHANGE;
else if (med->media_event_code == 3)
- return DISK_EVENT_EJECT_REQUEST;
+ return DISK_EVENT_MEDIA_CHANGE;
return 0;
}
@@ -340,7 +340,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
* care is taken to avoid unnecessary additional work such as
* memcpy's that could be avoided.
*/
- if (driver_byte(result) != 0 && /* An error occurred */
+ if (scsi_status_is_check_condition(result) &&
(SCpnt->sense_buffer[0] & 0x7f) == 0x70) { /* Sense current */
switch (SCpnt->sense_buffer[2]) {
case MEDIUM_ERROR:
@@ -913,7 +913,7 @@ static void get_capabilities(struct scsi_cd *cd)
rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);
- if (!scsi_status_is_good(rc) || data.length > ms_len ||
+ if (rc < 0 || data.length > ms_len ||
data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 15c305283b6c..79d9aa2df528 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -201,7 +201,11 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
/* Minimal error checking. Ignore cases we know about, and report the rest. */
- if (driver_byte(result) != 0) {
+ if (result < 0) {
+ err = result;
+ goto out;
+ }
+ if (scsi_status_is_check_condition(result)) {
switch (sshdr->sense_key) {
case UNIT_ATTENTION:
SDev->changed = 1;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3b1afe1d5b27..c6f14540ae03 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -390,8 +390,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
if (!debugging) { /* Abnormal conditions for tape */
if (!cmdstatp->have_sense)
st_printk(KERN_WARNING, STp,
- "Error %x (driver bt 0x%x, host bt 0x%x).\n",
- result, driver_byte(result), host_byte(result));
+ "Error %x (driver bt 0, host bt 0x%x).\n",
+ result, host_byte(result));
else if (cmdstatp->have_sense &&
scode != NO_SENSE &&
scode != RECOVERED_ERROR &&
@@ -549,9 +549,9 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
req = blk_get_request(SRpnt->stp->device->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
- return DRIVER_ERROR << 24;
+ return PTR_ERR(req);
rq = scsi_req(req);
req->rq_flags |= RQF_QUIET;
@@ -562,7 +562,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
GFP_KERNEL);
if (err) {
blk_put_request(req);
- return DRIVER_ERROR << 24;
+ return err;
}
}
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 12471208c7a8..491b435273a6 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -398,11 +398,8 @@ static struct status_msg *stex_get_status(struct st_hba *hba)
static void stex_invalid_field(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
- cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
-
/* "Invalid field in cdb" */
- scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
- 0x0);
+ scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0x24, 0x0);
done(cmd);
}
@@ -740,7 +737,7 @@ static void stex_scsi_done(struct st_ccb *ccb)
result |= DID_OK << 16;
break;
case SAM_STAT_CHECK_CONDITION:
- result |= DRIVER_SENSE << 24;
+ result |= DID_OK << 16;
break;
case SAM_STAT_BUSY:
result |= DID_BUS_BUSY << 16;
@@ -751,7 +748,7 @@ static void stex_scsi_done(struct st_ccb *ccb)
}
}
else if (ccb->srb_status & SRB_SEE_SENSE)
- result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
+ result = SAM_STAT_CHECK_CONDITION;
else switch (ccb->srb_status) {
case SRB_STATUS_SELECTION_TIMEOUT:
result = DID_NO_CONNECT << 16;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 403753929320..37506b3fe5a9 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1031,17 +1031,40 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
struct storvsc_scan_work *wrk;
void (*process_err_fn)(struct work_struct *work);
struct hv_host_device *host_dev = shost_priv(host);
- bool do_work = false;
- switch (SRB_STATUS(vm_srb->srb_status)) {
- case SRB_STATUS_ERROR:
+ /*
+ * In some situations, Hyper-V sets multiple bits in the
+ * srb_status, such as ABORTED and ERROR. So process them
+ * individually, with the most specific bits first.
+ */
+
+ if (vm_srb->srb_status & SRB_STATUS_INVALID_LUN) {
+ set_host_byte(scmnd, DID_NO_CONNECT);
+ process_err_fn = storvsc_remove_lun;
+ goto do_work;
+ }
+
+ if (vm_srb->srb_status & SRB_STATUS_ABORTED) {
+ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
+ /* Capacity data has changed */
+ (asc == 0x2a) && (ascq == 0x9)) {
+ process_err_fn = storvsc_device_scan;
+ /*
+ * Retry the I/O that triggered this.
+ */
+ set_host_byte(scmnd, DID_REQUEUE);
+ goto do_work;
+ }
+ }
+
+ if (vm_srb->srb_status & SRB_STATUS_ERROR) {
/*
* Let upper layer deal with error when
* sense message is present.
*/
-
if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
- break;
+ return;
+
/*
* If there is an error; offline the device since all
* error recovery strategies would have already been
@@ -1054,37 +1077,19 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
set_host_byte(scmnd, DID_PASSTHROUGH);
break;
/*
- * On Some Windows hosts TEST_UNIT_READY command can return
- * SRB_STATUS_ERROR, let the upper level code deal with it
- * based on the sense information.
+ * On some Hyper-V hosts TEST_UNIT_READY command can
+ * return SRB_STATUS_ERROR. Let the upper level code
+ * deal with it based on the sense information.
*/
case TEST_UNIT_READY:
break;
default:
set_host_byte(scmnd, DID_ERROR);
}
- break;
- case SRB_STATUS_INVALID_LUN:
- set_host_byte(scmnd, DID_NO_CONNECT);
- do_work = true;
- process_err_fn = storvsc_remove_lun;
- break;
- case SRB_STATUS_ABORTED:
- if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
- (asc == 0x2a) && (ascq == 0x9)) {
- do_work = true;
- process_err_fn = storvsc_device_scan;
- /*
- * Retry the I/O that triggered this.
- */
- set_host_byte(scmnd, DID_REQUEUE);
- }
- break;
}
+ return;
- if (!do_work)
- return;
-
+do_work:
/*
* We need to schedule work to process this error; schedule it.
*/
@@ -1112,6 +1117,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
struct Scsi_Host *host;
u32 payload_sz = cmd_request->payload_sz;
void *payload = cmd_request->payload;
+ bool sense_ok;
host = stor_dev->host;
@@ -1121,11 +1127,10 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
scmnd->result = vm_srb->scsi_status;
if (scmnd->result) {
- if (scsi_normalize_sense(scmnd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, &sense_hdr) &&
- !(sense_hdr.sense_key == NOT_READY &&
- sense_hdr.asc == 0x03A) &&
- do_logging(STORVSC_LOGGING_ERROR))
+ sense_ok = scsi_normalize_sense(scmnd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, &sense_hdr);
+
+ if (sense_ok && do_logging(STORVSC_LOGGING_WARN))
scsi_print_sense_hdr(scmnd->device, "storvsc",
&sense_hdr);
}
@@ -1182,53 +1187,51 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
}
-
/* Copy over the status...etc */
stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
- /* Validate sense_info_length (from Hyper-V) */
- if (vstor_packet->vm_srb.sense_info_length > sense_buffer_size)
- vstor_packet->vm_srb.sense_info_length = sense_buffer_size;
-
- stor_pkt->vm_srb.sense_info_length =
- vstor_packet->vm_srb.sense_info_length;
+ /*
+ * Copy over the sense_info_length, but limit to the known max
+ * size if Hyper-V returns a bad value.
+ */
+ stor_pkt->vm_srb.sense_info_length = min_t(u8, sense_buffer_size,
+ vstor_packet->vm_srb.sense_info_length);
if (vstor_packet->vm_srb.scsi_status != 0 ||
- vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS)
- storvsc_log(device, STORVSC_LOGGING_WARN,
- "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
- stor_pkt->vm_srb.cdb[0],
- vstor_packet->vm_srb.scsi_status,
- vstor_packet->vm_srb.srb_status);
+ vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) {
- if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
- /* CHECK_CONDITION */
- if (vstor_packet->vm_srb.srb_status &
- SRB_STATUS_AUTOSENSE_VALID) {
- /* autosense data available */
-
- storvsc_log(device, STORVSC_LOGGING_WARN,
- "stor pkt %p autosense data valid - len %d\n",
- request, vstor_packet->vm_srb.sense_info_length);
-
- memcpy(request->cmd->sense_buffer,
- vstor_packet->vm_srb.sense_data,
- vstor_packet->vm_srb.sense_info_length);
+ /*
+ * Log TEST_UNIT_READY errors only as warnings. Hyper-V can
+ * return errors when detecting devices using TEST_UNIT_READY,
+ * and logging these as errors produces unhelpful noise.
+ */
+ int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ?
+ STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
- }
+ storvsc_log(device, loglevel,
+ "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
+ request->cmd->request->tag,
+ stor_pkt->vm_srb.cdb[0],
+ vstor_packet->vm_srb.scsi_status,
+ vstor_packet->vm_srb.srb_status,
+ vstor_packet->status);
}
+ if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION &&
+ (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID))
+ memcpy(request->cmd->sense_buffer,
+ vstor_packet->vm_srb.sense_data,
+ stor_pkt->vm_srb.sense_info_length);
+
stor_pkt->vm_srb.data_transfer_length =
- vstor_packet->vm_srb.data_transfer_length;
+ vstor_packet->vm_srb.data_transfer_length;
storvsc_command_completion(request, stor_device);
if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
stor_device->drain_notify)
wake_up(&stor_device->waiting_to_drain);
-
-
}
static void storvsc_on_receive(struct storvsc_device *stor_device,
@@ -1717,7 +1720,7 @@ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
* this. So, don't send it.
*/
case SET_WINDOW:
- scmnd->result = DID_ERROR << 16;
+ set_host_byte(scmnd, DID_ERROR);
allowed = false;
break;
default:
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index d9a045f9858c..16b65fc4405c 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -170,9 +170,8 @@ static int sym_xerr_cam_status(int cam_status, int x_status)
void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
{
struct scsi_cmnd *cmd = cp->cmd;
- u_int cam_status, scsi_status, drv_status;
+ u_int cam_status, scsi_status;
- drv_status = 0;
cam_status = DID_OK;
scsi_status = cp->ssss_status;
@@ -186,7 +185,6 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
cp->xerr_status == 0) {
cam_status = sym_xerr_cam_status(DID_OK,
cp->sv_xerr_status);
- drv_status = DRIVER_SENSE;
/*
* Bounce back the sense data to user.
*/
@@ -235,7 +233,7 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
}
scsi_set_resid(cmd, resid);
- cmd->result = (drv_status << 24) | (cam_status << 16) | scsi_status;
+ cmd->result = (cam_status << 16) | scsi_status;
}
static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 07cf415367b4..2d137953e7b4 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -115,6 +115,7 @@ config SCSI_UFS_MEDIATEK
tristate "Mediatek specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
select PHY_MTK_UFS
+ select RESET_TI_SYSCON
help
This selects the Mediatek specific additions to UFSHCD platform driver.
UFS host on Mediatek needs some vendor specific configuration before
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 13d92043e13b..908ff39c4856 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -323,6 +323,8 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver cdns_ufs_pltfrm_driver = {
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
index 67a6a61154b7..ec4589afbc13 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c
@@ -148,6 +148,8 @@ static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = {
.runtime_suspend = tc_dwc_g210_pci_runtime_suspend,
.runtime_resume = tc_dwc_g210_pci_runtime_resume,
.runtime_idle = tc_dwc_g210_pci_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static const struct pci_device_id tc_dwc_g210_pci_tbl[] = {
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index ced9ef4d7c78..4e1ff209b933 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -13,7 +13,7 @@ void __init ufs_debugfs_init(void)
ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL);
}
-void __exit ufs_debugfs_exit(void)
+void ufs_debugfs_exit(void)
{
debugfs_remove_recursive(ufs_debugfs_root);
}
@@ -60,14 +60,14 @@ __acquires(&hba->host_sem)
up(&hba->host_sem);
return -EBUSY;
}
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
return 0;
}
static void ufs_debugfs_put_user_access(struct ufs_hba *hba)
__releases(&hba->host_sem)
{
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
up(&hba->host_sem);
}
diff --git a/drivers/scsi/ufs/ufs-debugfs.h b/drivers/scsi/ufs/ufs-debugfs.h
index 3ca29d30460a..97548a3f90eb 100644
--- a/drivers/scsi/ufs/ufs-debugfs.h
+++ b/drivers/scsi/ufs/ufs-debugfs.h
@@ -9,7 +9,7 @@ struct ufs_hba;
#ifdef CONFIG_DEBUG_FS
void __init ufs_debugfs_init(void);
-void __exit ufs_debugfs_exit(void);
+void ufs_debugfs_exit(void);
void ufs_debugfs_hba_init(struct ufs_hba *hba);
void ufs_debugfs_hba_exit(struct ufs_hba *hba);
void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status);
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index 70647eacf195..cf46d6f86e0e 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -107,6 +107,7 @@ enum {
#define CNTR_DIV_VAL 40
+static struct exynos_ufs_drv_data exynos_ufs_drvs;
static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en);
static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en);
@@ -1048,7 +1049,7 @@ static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
exynos_ufs_ungate_clks(ufs);
if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
- const unsigned int granularity_tbl[] = {
+ static const unsigned int granularity_tbl[] = {
1, 4, 8, 16, 32, 100
};
int h8_time = attr->pa_hibern8time *
@@ -1231,8 +1232,32 @@ static int exynos_ufs_remove(struct platform_device *pdev)
return 0;
}
-struct exynos_ufs_drv_data exynos_ufs_drvs = {
+static struct exynos_ufs_uic_attr exynos7_uic_attr = {
+ .tx_trailingclks = 0x10,
+ .tx_dif_p_nsec = 3000000, /* unit: ns */
+ .tx_dif_n_nsec = 1000000, /* unit: ns */
+ .tx_high_z_cnt_nsec = 20000, /* unit: ns */
+ .tx_base_unit_nsec = 100000, /* unit: ns */
+ .tx_gran_unit_nsec = 4000, /* unit: ns */
+ .tx_sleep_cnt = 1000, /* unit: ns */
+ .tx_min_activatetime = 0xa,
+ .rx_filler_enable = 0x2,
+ .rx_dif_p_nsec = 1000000, /* unit: ns */
+ .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
+ .rx_base_unit_nsec = 100000, /* unit: ns */
+ .rx_gran_unit_nsec = 4000, /* unit: ns */
+ .rx_sleep_cnt = 1280, /* unit: ns */
+ .rx_stall_cnt = 320, /* unit: ns */
+ .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
+ .pa_dbg_option_suite = 0x30103,
+};
+static struct exynos_ufs_drv_data exynos_ufs_drvs = {
.compatible = "samsung,exynos7-ufs",
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
@@ -1267,6 +1292,8 @@ static const struct dev_pm_ops exynos_ufs_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver exynos_ufs_pltform = {
diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h
index 06ee565f7eb0..67505fe32ebf 100644
--- a/drivers/scsi/ufs/ufs-exynos.h
+++ b/drivers/scsi/ufs/ufs-exynos.h
@@ -245,30 +245,4 @@ static inline void exynos_ufs_disable_dbg_mode(struct ufs_hba *hba)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), FALSE);
}
-struct exynos_ufs_drv_data exynos_ufs_drvs;
-
-struct exynos_ufs_uic_attr exynos7_uic_attr = {
- .tx_trailingclks = 0x10,
- .tx_dif_p_nsec = 3000000, /* unit: ns */
- .tx_dif_n_nsec = 1000000, /* unit: ns */
- .tx_high_z_cnt_nsec = 20000, /* unit: ns */
- .tx_base_unit_nsec = 100000, /* unit: ns */
- .tx_gran_unit_nsec = 4000, /* unit: ns */
- .tx_sleep_cnt = 1000, /* unit: ns */
- .tx_min_activatetime = 0xa,
- .rx_filler_enable = 0x2,
- .rx_dif_p_nsec = 1000000, /* unit: ns */
- .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
- .rx_base_unit_nsec = 100000, /* unit: ns */
- .rx_gran_unit_nsec = 4000, /* unit: ns */
- .rx_sleep_cnt = 1280, /* unit: ns */
- .rx_stall_cnt = 320, /* unit: ns */
- .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
- .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
- .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
- .pa_dbg_option_suite = 0x30103,
-};
#endif /* _UFS_EXYNOS_H_ */
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index d0626773eb38..5b147a48161b 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -400,7 +400,7 @@ static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
- if (ufshcd_is_runtime_pm(pm_op))
+ if (pm_op == UFS_RUNTIME_PM)
return 0;
if (host->in_suspend) {
@@ -577,6 +577,8 @@ static const struct dev_pm_ops ufs_hisi_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver ufs_hisi_pltform = {
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 0a84ec9e7cea..d2c251628a05 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -822,12 +822,10 @@ static int ufs_mtk_post_link(struct ufs_hba *hba)
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
- /* configure auto-hibern8 timer to 10ms */
- if (ufshcd_is_auto_hibern8_supported(hba)) {
- ufshcd_auto_hibern8_update(hba,
- FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
- }
+ /* will be configured during probe hba */
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
ufs_mtk_setup_clk_gating(hba);
@@ -858,6 +856,9 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba)
{
struct arm_smccc_res res;
+ /* disable hba before device reset */
+ ufshcd_hba_stop(hba);
+
ufs_mtk_device_reset_ctrl(0, res);
/*
@@ -1084,12 +1085,42 @@ static int ufs_mtk_probe(struct platform_device *pdev)
{
int err;
struct device *dev = &pdev->dev;
+ struct device_node *reset_node;
+ struct platform_device *reset_pdev;
+ struct device_link *link;
+
+ reset_node = of_find_compatible_node(NULL, NULL,
+ "ti,syscon-reset");
+ if (!reset_node) {
+ dev_notice(dev, "find ti,syscon-reset fail\n");
+ goto skip_reset;
+ }
+ reset_pdev = of_find_device_by_node(reset_node);
+ if (!reset_pdev) {
+ dev_notice(dev, "find reset_pdev fail\n");
+ goto skip_reset;
+ }
+ link = device_link_add(dev, &reset_pdev->dev,
+ DL_FLAG_AUTOPROBE_CONSUMER);
+ if (!link) {
+ dev_notice(dev, "add reset device_link fail\n");
+ goto skip_reset;
+ }
+ /* supplier is not probed */
+ if (link->status == DL_STATE_DORMANT) {
+ err = -EPROBE_DEFER;
+ goto out;
+ }
+skip_reset:
/* perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
+
+out:
if (err)
dev_info(dev, "probe failed %d\n", err);
+ of_node_put(reset_node);
return err;
}
@@ -1114,6 +1145,8 @@ static const struct dev_pm_ops ufs_mtk_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver ufs_mtk_pltform = {
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 2a3dd21da6a6..9b1d18d7c9bb 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1551,6 +1551,8 @@ static const struct dev_pm_ops ufs_qcom_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver ufs_qcom_pltform = {
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 5d0e98a05ada..52bd807f7940 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -245,9 +245,9 @@ static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
goto out;
}
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
res = ufshcd_wb_toggle(hba, wb_enable);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
out:
up(&hba->host_sem);
return res < 0 ? res : count;
@@ -278,6 +278,242 @@ static const struct attribute_group ufs_sysfs_default_group = {
.attrs = ufs_sysfs_ufshcd_attrs,
};
+static ssize_t monitor_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->monitor.enabled);
+}
+
+static ssize_t monitor_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long value, flags;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (value == hba->monitor.enabled)
+ goto out_unlock;
+
+ if (!value) {
+ memset(&hba->monitor, 0, sizeof(hba->monitor));
+ } else {
+ hba->monitor.enabled = true;
+ hba->monitor.enabled_ts = ktime_get();
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t monitor_chunk_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size);
+}
+
+static ssize_t monitor_chunk_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long value, flags;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /* Only allow chunk size change when monitor is disabled */
+ if (!hba->monitor.enabled)
+ hba->monitor.chunk_size = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t read_total_sectors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]);
+}
+
+static ssize_t read_total_busy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.total_busy[READ]));
+}
+
+static ssize_t read_nr_requests_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]);
+}
+
+static ssize_t read_req_latency_avg_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_hba_monitor *m = &hba->monitor;
+
+ return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]),
+ m->nr_req[READ]));
+}
+
+static ssize_t read_req_latency_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_max[READ]));
+}
+
+static ssize_t read_req_latency_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_min[READ]));
+}
+
+static ssize_t read_req_latency_sum_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_sum[READ]));
+}
+
+static ssize_t write_total_sectors_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]);
+}
+
+static ssize_t write_total_busy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.total_busy[WRITE]));
+}
+
+static ssize_t write_nr_requests_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]);
+}
+
+static ssize_t write_req_latency_avg_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_hba_monitor *m = &hba->monitor;
+
+ return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]),
+ m->nr_req[WRITE]));
+}
+
+static ssize_t write_req_latency_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_max[WRITE]));
+}
+
+static ssize_t write_req_latency_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_min[WRITE]));
+}
+
+static ssize_t write_req_latency_sum_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_sum[WRITE]));
+}
+
+static DEVICE_ATTR_RW(monitor_enable);
+static DEVICE_ATTR_RW(monitor_chunk_size);
+static DEVICE_ATTR_RO(read_total_sectors);
+static DEVICE_ATTR_RO(read_total_busy);
+static DEVICE_ATTR_RO(read_nr_requests);
+static DEVICE_ATTR_RO(read_req_latency_avg);
+static DEVICE_ATTR_RO(read_req_latency_max);
+static DEVICE_ATTR_RO(read_req_latency_min);
+static DEVICE_ATTR_RO(read_req_latency_sum);
+static DEVICE_ATTR_RO(write_total_sectors);
+static DEVICE_ATTR_RO(write_total_busy);
+static DEVICE_ATTR_RO(write_nr_requests);
+static DEVICE_ATTR_RO(write_req_latency_avg);
+static DEVICE_ATTR_RO(write_req_latency_max);
+static DEVICE_ATTR_RO(write_req_latency_min);
+static DEVICE_ATTR_RO(write_req_latency_sum);
+
+static struct attribute *ufs_sysfs_monitor_attrs[] = {
+ &dev_attr_monitor_enable.attr,
+ &dev_attr_monitor_chunk_size.attr,
+ &dev_attr_read_total_sectors.attr,
+ &dev_attr_read_total_busy.attr,
+ &dev_attr_read_nr_requests.attr,
+ &dev_attr_read_req_latency_avg.attr,
+ &dev_attr_read_req_latency_max.attr,
+ &dev_attr_read_req_latency_min.attr,
+ &dev_attr_read_req_latency_sum.attr,
+ &dev_attr_write_total_sectors.attr,
+ &dev_attr_write_total_busy.attr,
+ &dev_attr_write_nr_requests.attr,
+ &dev_attr_write_req_latency_avg.attr,
+ &dev_attr_write_req_latency_max.attr,
+ &dev_attr_write_req_latency_min.attr,
+ &dev_attr_write_req_latency_sum.attr,
+ NULL
+};
+
+static const struct attribute_group ufs_sysfs_monitor_group = {
+ .name = "monitor",
+ .attrs = ufs_sysfs_monitor_attrs,
+};
+
static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
u8 desc_index,
@@ -297,10 +533,10 @@ static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
goto out;
}
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
param_offset, desc_buf, param_size);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
if (ret) {
ret = -EINVAL;
goto out;
@@ -678,7 +914,7 @@ static ssize_t _name##_show(struct device *dev, \
up(&hba->host_sem); \
return -ENOMEM; \
} \
- pm_runtime_get_sync(hba->dev); \
+ ufshcd_rpm_get_sync(hba); \
ret = ufshcd_query_descriptor_retry(hba, \
UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
0, 0, desc_buf, &desc_len); \
@@ -695,7 +931,7 @@ static ssize_t _name##_show(struct device *dev, \
goto out; \
ret = sysfs_emit(buf, "%s\n", desc_buf); \
out: \
- pm_runtime_put_sync(hba->dev); \
+ ufshcd_rpm_put_sync(hba); \
kfree(desc_buf); \
up(&hba->host_sem); \
return ret; \
@@ -724,8 +960,8 @@ static const struct attribute_group ufs_sysfs_string_descriptors_group = {
static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
{
- return ((idn >= QUERY_FLAG_IDN_WB_EN) &&
- (idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8));
+ return idn >= QUERY_FLAG_IDN_WB_EN &&
+ idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8;
}
#define UFS_FLAG(_name, _uname) \
@@ -744,10 +980,10 @@ static ssize_t _name##_show(struct device *dev, \
} \
if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
- pm_runtime_get_sync(hba->dev); \
+ ufshcd_rpm_get_sync(hba); \
ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
QUERY_FLAG_IDN##_uname, index, &flag); \
- pm_runtime_put_sync(hba->dev); \
+ ufshcd_rpm_put_sync(hba); \
if (ret) { \
ret = -EINVAL; \
goto out; \
@@ -793,8 +1029,8 @@ static const struct attribute_group ufs_sysfs_flags_group = {
static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
{
- return ((idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS) &&
- (idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE));
+ return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS &&
+ idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
}
#define UFS_ATTRIBUTE(_name, _uname) \
@@ -813,10 +1049,10 @@ static ssize_t _name##_show(struct device *dev, \
} \
if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
- pm_runtime_get_sync(hba->dev); \
+ ufshcd_rpm_get_sync(hba); \
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
QUERY_ATTR_IDN##_uname, index, 0, &value); \
- pm_runtime_put_sync(hba->dev); \
+ ufshcd_rpm_put_sync(hba); \
if (ret) { \
ret = -EINVAL; \
goto out; \
@@ -881,6 +1117,7 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
+ &ufs_sysfs_monitor_group,
&ufs_sysfs_device_descriptor_group,
&ufs_sysfs_interconnect_descriptor_group,
&ufs_sysfs_geometry_descriptor_group,
@@ -964,10 +1201,10 @@ static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
goto out;
}
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
if (ret) {
ret = -EINVAL;
goto out;
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index 5b2bc1a6f922..39bf204c6ec3 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -97,7 +97,7 @@ static int ufs_bsg_request(struct bsg_job *job)
bsg_reply->reply_payload_rcv_len = 0;
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
msgcode = bsg_request->msgcode;
switch (msgcode) {
@@ -106,7 +106,7 @@ static int ufs_bsg_request(struct bsg_job *job)
ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
&desc_len, desc_op);
if (ret) {
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
goto out;
}
@@ -138,7 +138,7 @@ static int ufs_bsg_request(struct bsg_job *job)
break;
}
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
if (!desc_buff)
goto out;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 23ee828747e2..e6c334bfb4c2 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -410,29 +410,6 @@ static int ufshcd_pci_resume(struct device *dev)
return ufshcd_system_resume(dev_get_drvdata(dev));
}
-/**
- * ufshcd_pci_poweroff - suspend-to-disk poweroff function
- * @dev: pointer to PCI device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-static int ufshcd_pci_poweroff(struct device *dev)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- int spm_lvl = hba->spm_lvl;
- int ret;
-
- /*
- * For poweroff we need to set the UFS device to PowerDown mode.
- * Force spm_lvl to ensure that.
- */
- hba->spm_lvl = 5;
- ret = ufshcd_system_suspend(hba);
- hba->spm_lvl = spm_lvl;
- return ret;
-}
-
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
@@ -533,17 +510,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .suspend = ufshcd_pci_suspend,
- .resume = ufshcd_pci_resume,
- .freeze = ufshcd_pci_suspend,
- .thaw = ufshcd_pci_resume,
- .poweroff = ufshcd_pci_poweroff,
- .restore = ufshcd_pci_resume,
-#endif
SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
ufshcd_pci_runtime_resume,
ufshcd_pci_runtime_idle)
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend, ufshcd_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+#endif
};
static const struct pci_device_id ufshcd_pci_tbl[] = {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 72fd41bfbd54..708b3b62fc4d 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -16,6 +16,7 @@
#include <linux/bitfield.h>
#include <linux/blk-pm.h>
#include <linux/blkdev.h>
+#include <scsi/scsi_driver.h>
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
@@ -24,6 +25,7 @@
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
#include <asm/unaligned.h>
+#include "../sd.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -77,6 +79,8 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
+#define wlun_dev_to_hba(dv) shost_priv(to_scsi_device(dv)->host)
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -157,17 +161,17 @@ enum {
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
- {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
- {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
- {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
+ [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
/*
* For DeepSleep, the link is first put in hibern8 and then off.
* Leaving the link in hibern8 is not supported.
*/
- {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
+ [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
};
static inline enum ufs_dev_pwr_mode
@@ -298,11 +302,17 @@ static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+ struct utp_upiu_header *header;
if (!trace_ufshcd_upiu_enabled())
return;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq->header, &rq->sc.cdb,
+ if (str_t == UFS_CMD_SEND)
+ header = &rq->header;
+ else
+ header = &hba->lrb[tag].ucd_rsp_ptr->header;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
UFS_TSF_CDB);
}
@@ -361,41 +371,40 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
- sector_t lba = -1;
+ u64 lba = -1;
u8 opcode = 0, group_id = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
int transfer_len = -1;
+ if (!cmd)
+ return;
+
if (!trace_ufshcd_command_enabled()) {
/* trace UPIU W/O tracing command */
- if (cmd)
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+ ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
return;
}
- if (cmd) { /* data phase exists */
- /* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
- opcode = cmd->cmnd[0];
- if ((opcode == READ_10) || (opcode == WRITE_10)) {
- /*
- * Currently we only fully trace read(10) and write(10)
- * commands
- */
- if (cmd->request && cmd->request->bio)
- lba = cmd->request->bio->bi_iter.bi_sector;
- transfer_len = be32_to_cpu(
- lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
- if (opcode == WRITE_10)
- group_id = lrbp->cmd->cmnd[6];
- } else if (opcode == UNMAP) {
- if (cmd->request) {
- lba = scsi_get_lba(cmd);
- transfer_len = blk_rq_bytes(cmd->request);
- }
- }
+ /* trace UPIU also */
+ ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+ opcode = cmd->cmnd[0];
+ lba = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
+
+ if (opcode == READ_10 || opcode == WRITE_10) {
+ /*
+ * Currently we only fully trace read(10) and write(10) commands
+ */
+ transfer_len =
+ be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ if (opcode == WRITE_10)
+ group_id = lrbp->cmd->cmnd[6];
+ } else if (opcode == UNMAP) {
+ /*
+ * The number of Bytes to be unmapped beginning with the lba.
+ */
+ transfer_len = blk_rq_bytes(cmd->request);
}
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
@@ -755,7 +764,7 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
*/
static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
{
- __clear_bit(tag, &hba->outstanding_reqs);
+ clear_bit(tag, &hba->outstanding_reqs);
}
/**
@@ -1551,7 +1560,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
if (value == hba->clk_scaling.is_enabled)
goto out;
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba, false);
hba->clk_scaling.is_enabled = value;
@@ -1567,7 +1576,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
}
ufshcd_release(hba);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
out:
up(&hba->host_sem);
return err ? err : count;
@@ -1981,15 +1990,19 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
ktime_t curr_t = ktime_get();
+ unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
+ spin_lock_irqsave(hba->host->host_lock, flags);
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
return;
+ }
if (queue_resume_work)
queue_work(hba->clk_scaling.workq,
@@ -2005,22 +2018,91 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
hba->clk_scaling.busy_start_t = curr_t;
hba->clk_scaling.is_busy_started = true;
}
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+ unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_scaling.active_reqs--;
if (!hba->outstanding_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static inline int ufshcd_monitor_opcode2dir(u8 opcode)
+{
+ if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
+ return READ;
+ else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
+ return WRITE;
+ else
+ return -EINVAL;
+}
+
+static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
+{
+ struct ufs_hba_monitor *m = &hba->monitor;
+
+ return (m->enabled && lrbp && lrbp->cmd &&
+ (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
+ ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
+}
+
+static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
+ hba->monitor.busy_start_ts[dir] = ktime_get();
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
+ struct request *req = lrbp->cmd->request;
+ struct ufs_hba_monitor *m = &hba->monitor;
+ ktime_t now, inc, lat;
+
+ now = lrbp->compl_time_stamp;
+ inc = ktime_sub(now, m->busy_start_ts[dir]);
+ m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
+ m->nr_sec_rw[dir] += blk_rq_sectors(req);
+
+ /* Update latencies */
+ m->nr_req[dir]++;
+ lat = ktime_sub(now, lrbp->issue_time_stamp);
+ m->lat_sum[dir] += lat;
+ if (m->lat_max[dir] < lat || !m->lat_max[dir])
+ m->lat_max[dir] = lat;
+ if (m->lat_min[dir] > lat || !m->lat_min[dir])
+ m->lat_min[dir] = lat;
+
+ m->nr_queued[dir]--;
+ /* Push forward the busy start of monitor */
+ m->busy_start_ts[dir] = now;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
}
+
/**
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
@@ -2036,8 +2118,21 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
- __set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+ ufshcd_start_monitor(hba, lrbp);
+ if (ufshcd_has_utrlcnr(hba)) {
+ set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ } else {
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
/* Make sure that doorbell is committed immediately */
wmb();
}
@@ -2565,6 +2660,17 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
}
+static inline bool is_rpmb_wlun(struct scsi_device *sdev)
+{
+ return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN);
+}
+
+static inline bool is_device_wlun(struct scsi_device *sdev)
+{
+ return sdev->lun ==
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
+}
+
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
@@ -2597,7 +2703,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct ufshcd_lrb *lrbp;
struct ufs_hba *hba;
- unsigned long flags;
int tag;
int err = 0;
@@ -2614,6 +2719,43 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY;
+ switch (hba->ufshcd_state) {
+ case UFSHCD_STATE_OPERATIONAL:
+ case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
+ break;
+ case UFSHCD_STATE_EH_SCHEDULED_FATAL:
+ /*
+ * pm_runtime_get_sync() is used at error handling preparation
+ * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
+ * PM ops, it can never be finished if we let SCSI layer keep
+ * retrying it, which gets err handler stuck forever. Neither
+ * can we let the scsi cmd pass through, because UFS is in bad
+ * state, the scsi cmd may eventually time out, which will get
+ * err handler blocked for too long. So, just fail the scsi cmd
+ * sent from PM ops, err handler can recover PM error anyways.
+ */
+ if (hba->pm_op_in_progress) {
+ hba->force_reset = true;
+ set_host_byte(cmd, DID_BAD_TARGET);
+ cmd->scsi_done(cmd);
+ goto out;
+ }
+ fallthrough;
+ case UFSHCD_STATE_RESET:
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ case UFSHCD_STATE_ERROR:
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out;
+ default:
+ dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
+ __func__, hba->ufshcd_state);
+ set_host_byte(cmd, DID_BAD_TARGET);
+ cmd->scsi_done(cmd);
+ goto out;
+ }
+
hba->req_abort_count = 0;
err = ufshcd_hold(hba, true);
@@ -2624,8 +2766,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
(hba->clk_gating.state != CLKS_ON));
- lrbp = &hba->lrb[tag];
- if (unlikely(lrbp->in_use)) {
+ if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
if (hba->pm_op_in_progress)
set_host_byte(cmd, DID_BAD_TARGET);
else
@@ -2634,6 +2775,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
+ lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
lrbp->sense_bufflen = UFS_SENSE_SIZE;
@@ -2657,51 +2799,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
- spin_lock_irqsave(hba->host->host_lock, flags);
- switch (hba->ufshcd_state) {
- case UFSHCD_STATE_OPERATIONAL:
- case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
- break;
- case UFSHCD_STATE_EH_SCHEDULED_FATAL:
- /*
- * pm_runtime_get_sync() is used at error handling preparation
- * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
- * PM ops, it can never be finished if we let SCSI layer keep
- * retrying it, which gets err handler stuck forever. Neither
- * can we let the scsi cmd pass through, because UFS is in bad
- * state, the scsi cmd may eventually time out, which will get
- * err handler blocked for too long. So, just fail the scsi cmd
- * sent from PM ops, err handler can recover PM error anyways.
- */
- if (hba->pm_op_in_progress) {
- hba->force_reset = true;
- set_host_byte(cmd, DID_BAD_TARGET);
- goto out_compl_cmd;
- }
- fallthrough;
- case UFSHCD_STATE_RESET:
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out_compl_cmd;
- case UFSHCD_STATE_ERROR:
- set_host_byte(cmd, DID_ERROR);
- goto out_compl_cmd;
- default:
- dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
- __func__, hba->ufshcd_state);
- set_host_byte(cmd, DID_BAD_TARGET);
- goto out_compl_cmd;
- }
ufshcd_send_command(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- goto out;
-
-out_compl_cmd:
- scsi_dma_unmap(lrbp->cmd);
- lrbp->cmd = NULL;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_release(hba);
- if (!err)
- cmd->scsi_done(cmd);
out:
up_read(&hba->clk_scaling_lock);
return err;
@@ -2735,7 +2833,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
- * wait for for h/w to clear corresponding bit in door-bell.
+ * wait for h/w to clear corresponding bit in door-bell.
* max. wait is 1 sec.
*/
err = ufshcd_wait_for_register(hba,
@@ -2856,7 +2954,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
int err;
int tag;
struct completion wait;
- unsigned long flags;
down_read(&hba->clk_scaling_lock);
@@ -2876,34 +2973,30 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
req->timeout = msecs_to_jiffies(2 * timeout);
blk_mq_start_request(req);
- init_completion(&wait);
- lrbp = &hba->lrb[tag];
- if (unlikely(lrbp->in_use)) {
+ if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
err = -EBUSY;
goto out;
}
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
if (unlikely(err))
- goto out_put_tag;
+ goto out;
hba->dev_cmd.complete = &wait;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_send_command(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
-
-out:
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
-out_put_tag:
+out:
blk_put_request(req);
out_unlock:
up_read(&hba->clk_scaling_lock);
@@ -4101,12 +4194,13 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (update && !pm_runtime_suspended(hba->dev)) {
- pm_runtime_get_sync(hba->dev);
+ if (update &&
+ !pm_runtime_suspended(&hba->sdev_ufs_device->sdev_gendev)) {
+ ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba, false);
ufshcd_auto_hibern8_enable(hba);
ufshcd_release(hba);
- pm_runtime_put(hba->dev);
+ ufshcd_rpm_put_sync(hba);
}
}
EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
@@ -4420,7 +4514,7 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
* ufshcd_hba_stop - Send controller to reset state
* @hba: per adapter instance
*/
-static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+void ufshcd_hba_stop(struct ufs_hba *hba)
{
unsigned long flags;
int err;
@@ -4439,6 +4533,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba)
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
+EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
/**
* ufshcd_hba_execute_hce - initialize the controller
@@ -4804,6 +4899,43 @@ static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
}
/**
+ * ufshcd_setup_links - associate link b/w device wlun and other luns
+ * @sdev: pointer to SCSI device
+ * @hba: pointer to ufs hba
+ */
+static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ struct device_link *link;
+
+ /*
+ * Device wlun is the supplier & rest of the luns are consumers.
+ * This ensures that device wlun suspends after all other luns.
+ */
+ if (hba->sdev_ufs_device) {
+ link = device_link_add(&sdev->sdev_gendev,
+ &hba->sdev_ufs_device->sdev_gendev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
+ dev_name(&hba->sdev_ufs_device->sdev_gendev));
+ return;
+ }
+ hba->luns_avail--;
+ /* Ignore REPORT_LUN wlun probing */
+ if (hba->luns_avail == 1) {
+ ufshcd_rpm_put(hba);
+ return;
+ }
+ } else {
+ /*
+ * Device wlun is probed. The assumption is that WLUNs are
+ * scanned before other LUNs.
+ */
+ hba->luns_avail--;
+ }
+}
+
+/**
* ufshcd_slave_alloc - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
@@ -4834,6 +4966,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
ufshcd_get_lu_power_on_wp_status(hba, sdev);
+ ufshcd_setup_links(hba, sdev);
+
return 0;
}
@@ -4865,8 +4999,13 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
-
- if (ufshcd_is_rpm_autosuspend_allowed(hba))
+ /*
+ * Block runtime-pm until all consumers are added.
+ * Refer ufshcd_setup_links().
+ */
+ if (is_device_wlun(sdev))
+ pm_runtime_get_noresume(&sdev->sdev_gendev);
+ else if (ufshcd_is_rpm_autosuspend_allowed(hba))
sdev->rpm_autosuspend = 1;
ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
@@ -4982,15 +5121,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
*/
if (!hba->pm_op_in_progress &&
!ufshcd_eh_in_progress(hba) &&
- ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
- schedule_work(&hba->eeh_work)) {
- /*
- * Prevent suspend once eeh_work is scheduled
- * to avoid deadlock between ufshcd_suspend
- * and exception event handler.
- */
- pm_runtime_get_noresume(hba->dev);
- }
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+ /* Flushed in suspend */
+ schedule_work(&hba->eeh_work);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -5037,6 +5170,24 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return result;
}
+static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
+ u32 intr_mask)
+{
+ if (!ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufshcd_is_auto_hibern8_enabled(hba))
+ return false;
+
+ if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
+ return false;
+
+ if (hba->active_uic_cmd &&
+ (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
+ hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
+ return false;
+
+ return true;
+}
+
/**
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
@@ -5050,6 +5201,10 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
irqreturn_t retval = IRQ_NONE;
+ spin_lock(hba->host->host_lock);
+ if (ufshcd_is_auto_hibern8_error(hba, intr_status))
+ hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
+
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
@@ -5070,6 +5225,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
if (retval == IRQ_HANDLED)
ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
UFS_CMD_COMP);
+ spin_unlock(hba->host->host_lock);
return retval;
}
@@ -5088,11 +5244,14 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
+ if (!test_and_clear_bit(index, &hba->outstanding_reqs))
+ continue;
lrbp = &hba->lrb[index];
- lrbp->in_use = false;
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
if (cmd) {
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+ ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
@@ -5101,7 +5260,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp->cmd = NULL;
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
- __ufshcd_release(hba);
+ ufshcd_release(hba);
update_scaling = true;
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
@@ -5112,28 +5271,23 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
update_scaling = true;
}
}
- if (ufshcd_is_clkscaling_supported(hba) && update_scaling)
- hba->clk_scaling.active_reqs--;
+ if (update_scaling)
+ ufshcd_clk_scaling_update_busy(hba);
}
-
- /* clear corresponding bits of completed commands */
- hba->outstanding_reqs ^= completed_reqs;
-
- ufshcd_clk_scaling_update_busy(hba);
}
/**
- * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * ufshcd_trc_handler - handle transfer requests completion
* @hba: per adapter instance
+ * @use_utrlcnr: get completed requests from UTRLCNR
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
{
- unsigned long completed_reqs;
- u32 tr_doorbell;
+ unsigned long completed_reqs = 0;
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
@@ -5146,8 +5300,24 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
ufshcd_reset_intr_aggr(hba);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+ if (use_utrlcnr) {
+ u32 utrlcnr;
+
+ utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
+ if (utrlcnr) {
+ ufshcd_writel(hba, utrlcnr,
+ REG_UTP_TRANSFER_REQ_LIST_COMPL);
+ completed_reqs = utrlcnr;
+ }
+ } else {
+ unsigned long flags;
+ u32 tr_doorbell;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
if (completed_reqs) {
__ufshcd_transfer_req_compl(hba, completed_reqs);
@@ -5589,8 +5759,8 @@ static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
* after a certain delay to recheck the threshold by next runtime
* suspend.
*/
- pm_runtime_get_sync(hba->dev);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
+ ufshcd_rpm_put_sync(hba);
}
/**
@@ -5607,7 +5777,6 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
u32 status = 0;
hba = container_of(work, struct ufs_hba, eeh_work);
- pm_runtime_get_sync(hba->dev);
ufshcd_scsi_block_requests(hba);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
@@ -5624,21 +5793,13 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufs_debugfs_exception_event(hba, status);
out:
ufshcd_scsi_unblock_requests(hba);
- /*
- * pm_runtime_get_noresume is called while scheduling
- * eeh_work to avoid suspend racing with exception work.
- * Hence decrement usage counter using pm_runtime_put_noidle
- * to allow suspend on completion of exception event handler.
- */
- pm_runtime_put_noidle(hba->dev);
- pm_runtime_put(hba->dev);
return;
}
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
{
- ufshcd_transfer_req_compl(hba);
+ ufshcd_trc_handler(hba, false);
ufshcd_tmc_handler(hba);
}
@@ -5756,12 +5917,13 @@ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
- pm_runtime_get_sync(hba->dev);
- if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) {
+ ufshcd_rpm_get_sync(hba);
+ if (pm_runtime_status_suspended(&hba->sdev_ufs_device->sdev_gendev) ||
+ hba->is_sys_suspended) {
enum ufs_pm_op pm_op;
/*
- * Don't assume anything of pm_runtime_get_sync(), if
+ * Don't assume anything of resume, if
* resume fails, irq and clocks can be OFF, and powers
* can be OFF or in LPM.
*/
@@ -5797,12 +5959,13 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
ufshcd_clear_ua_wluns(hba);
- pm_runtime_put(hba->dev);
+ ufshcd_rpm_put(hba);
}
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
{
return (!hba->is_powered || hba->shutting_down ||
+ !hba->sdev_ufs_device ||
hba->ufshcd_state == UFSHCD_STATE_ERROR ||
(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
ufshcd_is_link_broken(hba))));
@@ -5818,14 +5981,18 @@ static void ufshcd_recover_pm_error(struct ufs_hba *hba)
hba->is_sys_suspended = false;
/*
- * Set RPM status of hba device to RPM_ACTIVE,
+ * Set RPM status of wlun device to RPM_ACTIVE,
* this also clears its runtime error.
*/
- ret = pm_runtime_set_active(hba->dev);
+ ret = pm_runtime_set_active(&hba->sdev_ufs_device->sdev_gendev);
+
+ /* hba device might have a runtime error otherwise */
+ if (ret)
+ ret = pm_runtime_set_active(hba->dev);
/*
- * If hba device had runtime error, we also need to resume those
- * scsi devices under hba in case any of them has failed to be
- * resumed due to hba runtime resume failure. This is to unblock
+ * If wlun device had runtime error, we also need to resume those
+ * consumer scsi devices in case any of them has failed to be
+ * resumed due to supplier runtime resume failure. This is to unblock
* blk_queue_enter in case there are bios waiting inside it.
*/
if (!ret) {
@@ -5887,13 +6054,11 @@ static void ufshcd_err_handler(struct work_struct *work)
ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_err_handling_prepare(hba);
+ /* Complete requests that have door-bell cleared by h/w */
+ ufshcd_complete_requests(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_RESET;
-
- /* Complete requests that have door-bell cleared by h/w */
- ufshcd_complete_requests(hba);
-
/*
* A full reset and restore might have happened after preparation
* is finished, double check whether we should stop.
@@ -5976,12 +6141,11 @@ static void ufshcd_err_handler(struct work_struct *work)
}
lock_skip_pending_xfer_clear:
- spin_lock_irqsave(hba->host->host_lock, flags);
-
/* Complete the requests that are cleared by s/w */
ufshcd_complete_requests(hba);
- hba->silence_err_logs = false;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->silence_err_logs = false;
if (err_xfer || err_tm) {
needs_reset = true;
goto do_reset;
@@ -6014,19 +6178,6 @@ lock_skip_pending_xfer_clear:
do_reset:
/* Fatal errors need reset */
if (needs_reset) {
- unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
-
- /*
- * ufshcd_reset_and_restore() does the link reinitialization
- * which will need atleast one empty doorbell slot to send the
- * device management commands (NOP and query commands).
- * If there is no slot empty at this moment then free up last
- * slot forcefully.
- */
- if (hba->outstanding_reqs == max_doorbells)
- __ufshcd_transfer_req_compl(hba,
- (1UL << (hba->nutrs - 1)));
-
hba->force_reset = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_reset_and_restore(hba);
@@ -6144,37 +6295,23 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
return retval;
}
-static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
- u32 intr_mask)
-{
- if (!ufshcd_is_auto_hibern8_supported(hba) ||
- !ufshcd_is_auto_hibern8_enabled(hba))
- return false;
-
- if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
- return false;
-
- if (hba->active_uic_cmd &&
- (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
- hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
- return false;
-
- return true;
-}
-
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
+ * @intr_status: interrupt status generated by the controller
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
{
bool queue_eh_work = false;
irqreturn_t retval = IRQ_NONE;
+ spin_lock(hba->host->host_lock);
+ hba->errors |= UFSHCD_ERROR_MASK & intr_status;
+
if (hba->errors & INT_FATAL_ERRORS) {
ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
hba->errors);
@@ -6229,6 +6366,9 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
* itself without s/w intervention or errors that will be
* handled by the SCSI core layer.
*/
+ hba->errors = 0;
+ hba->uic_error = 0;
+ spin_unlock(hba->host->host_lock);
return retval;
}
@@ -6263,13 +6403,17 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
*/
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
+ unsigned long flags;
struct request_queue *q = hba->tmf_queue;
struct ctm_info ci = {
.hba = hba,
- .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
};
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
}
@@ -6286,22 +6430,17 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
irqreturn_t retval = IRQ_NONE;
- hba->errors = UFSHCD_ERROR_MASK & intr_status;
-
- if (ufshcd_is_auto_hibern8_error(hba, intr_status))
- hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
-
- if (hba->errors)
- retval |= ufshcd_check_errors(hba);
-
if (intr_status & UFSHCD_UIC_MASK)
retval |= ufshcd_uic_cmd_compl(hba, intr_status);
+ if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
+ retval |= ufshcd_check_errors(hba, intr_status);
+
if (intr_status & UTP_TASK_REQ_COMPL)
retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- retval |= ufshcd_transfer_req_compl(hba);
+ retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
return retval;
}
@@ -6322,7 +6461,6 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
- spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
hba->ufs_stats.last_intr_status = intr_status;
hba->ufs_stats.last_intr_ts = ktime_get();
@@ -6344,7 +6482,8 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
}
if (enabled_intr_status && retval == IRQ_NONE &&
- !ufshcd_eh_in_progress(hba)) {
+ (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
+ hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
__func__,
intr_status,
@@ -6353,7 +6492,6 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
- spin_unlock(hba->host->host_lock);
return retval;
}
@@ -6530,7 +6668,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
int err = 0;
int tag;
struct completion wait;
- unsigned long flags;
u8 upiu_flags;
down_read(&hba->clk_scaling_lock);
@@ -6543,13 +6680,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
tag = req->tag;
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
- init_completion(&wait);
- lrbp = &hba->lrb[tag];
- if (unlikely(lrbp->in_use)) {
+ if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
err = -EBUSY;
goto out;
}
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = NULL;
lrbp->sense_bufflen = 0;
@@ -6585,12 +6722,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_send_command(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
/*
* ignore the returning value here - ufshcd_check_query_response is
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
@@ -6616,6 +6752,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
err = -EINVAL;
}
}
+ ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
+ (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
blk_put_request(req);
@@ -6709,7 +6847,6 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
u32 pos;
int err;
u8 resp = 0xF, lun;
- unsigned long flags;
host = cmd->device->host;
hba = shost_priv(host);
@@ -6728,11 +6865,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
+ __ufshcd_transfer_req_compl(hba, pos);
}
}
- spin_lock_irqsave(host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
- spin_unlock_irqrestore(host->host_lock, flags);
out:
hba->req_abort_count = 0;
@@ -6909,20 +7044,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* will fail, due to spec violation, scsi err handling next step
* will be to send LU reset which, again, is a spec violation.
* To avoid these unnecessary/illegal steps, first we clean up
- * the lrb taken by this cmd and mark the lrb as in_use, then
- * queue the eh_work and bail.
+ * the lrb taken by this cmd and re-set it in outstanding_reqs,
+ * then queue the eh_work and bail.
*/
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
+ __ufshcd_transfer_req_compl(hba, (1UL << tag));
+ set_bit(tag, &hba->outstanding_reqs);
spin_lock_irqsave(host->host_lock, flags);
- if (lrbp->cmd) {
- __ufshcd_transfer_req_compl(hba, (1UL << tag));
- __set_bit(tag, &hba->outstanding_reqs);
- lrbp->in_use = true;
- hba->force_reset = true;
- ufshcd_schedule_eh_work(hba);
- }
-
+ hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
spin_unlock_irqrestore(host->host_lock, flags);
goto out;
}
@@ -6935,9 +7066,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
if (!err) {
cleanup:
- spin_lock_irqsave(host->host_lock, flags);
__ufshcd_transfer_req_compl(hba, (1UL << tag));
- spin_unlock_irqrestore(host->host_lock, flags);
out:
err = SUCCESS;
} else {
@@ -6967,19 +7096,15 @@ out:
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
{
int err;
- unsigned long flags;
/*
* Stop the host controller and complete the requests
* cleared by h/w
*/
ufshcd_hba_stop(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
hba->silence_err_logs = true;
ufshcd_complete_requests(hba);
hba->silence_err_logs = false;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */
ufshcd_set_clk_freq(hba, true);
@@ -7249,7 +7374,6 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
hba->sdev_ufs_device = NULL;
goto out;
}
- ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
scsi_device_put(hba->sdev_ufs_device);
hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
@@ -7413,6 +7537,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
goto out;
}
+ hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
+ desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
+
ufs_fixup_device_setup(hba);
ufshcd_wb_probe(hba, desc_buf);
@@ -7890,6 +8017,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba);
hba->wlun_dev_clr_ua = true;
+ hba->wlun_rpmb_clr_ua = true;
/* Gear up to HS gear if supported */
if (hba->max_pwr_info.is_valid) {
@@ -8475,7 +8603,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* handling context.
*/
hba->host->eh_noresume = 1;
- ufshcd_clear_ua_wluns(hba);
+ if (hba->wlun_dev_clr_ua)
+ ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
cmd[4] = pwr_mode << 4;
@@ -8490,7 +8619,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
- if (driver_byte(ret) == DRIVER_SENSE)
+ if (ret > 0 && scsi_sense_valid(&sshdr))
scsi_print_sense_hdr(sdp, NULL, &sshdr);
}
@@ -8650,23 +8779,7 @@ static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
ufshcd_setup_hba_vreg(hba, true);
}
-/**
- * ufshcd_suspend - helper function for suspend operations
- * @hba: per adapter instance
- * @pm_op: desired low power operation type
- *
- * This function will try to put the UFS device and link into low power
- * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
- * (System PM level).
- *
- * If this function is called during shutdown, it will make sure that
- * both UFS device and UFS link is powered off.
- *
- * NOTE: UFS device & link must be active before we enter in this function.
- *
- * Returns 0 for success and non-zero for failure
- */
-static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret = 0;
int check_for_bkops;
@@ -8674,9 +8787,9 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
enum ufs_dev_pwr_mode req_dev_pwr_mode;
enum uic_link_state req_link_state;
- hba->pm_op_in_progress = 1;
- if (!ufshcd_is_shutdown_pm(pm_op)) {
- pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
+ hba->pm_op_in_progress = true;
+ if (pm_op != UFS_SHUTDOWN_PM) {
+ pm_lvl = pm_op == UFS_RUNTIME_PM ?
hba->rpm_lvl : hba->spm_lvl;
req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
@@ -8697,20 +8810,20 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
- goto disable_clks;
+ goto vops_suspend;
}
if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
(req_link_state == hba->uic_link_state))
- goto enable_gating;
+ goto enable_scaling;
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
ret = -EINVAL;
- goto enable_gating;
+ goto enable_scaling;
}
- if (ufshcd_is_runtime_pm(pm_op)) {
+ if (pm_op == UFS_RUNTIME_PM) {
if (ufshcd_can_autobkops_during_suspend(hba)) {
/*
* The device is idle with no requests in the queue,
@@ -8719,7 +8832,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/
ret = ufshcd_urgent_bkops(hba);
if (ret)
- goto enable_gating;
+ goto enable_scaling;
} else {
/* make sure that auto bkops is disabled */
ufshcd_disable_auto_bkops(hba);
@@ -8740,14 +8853,14 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
flush_work(&hba->eeh_work);
if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
- if (!ufshcd_is_runtime_pm(pm_op))
+ if (pm_op != UFS_RUNTIME_PM)
/* ensure that bkops is disabled */
ufshcd_disable_auto_bkops(hba);
if (!hba->dev_info.b_rpm_dev_flush_capable) {
ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
if (ret)
- goto enable_gating;
+ goto enable_scaling;
}
}
@@ -8760,7 +8873,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto set_dev_active;
-disable_clks:
+vops_suspend:
/*
* Call vendor specific suspend callback. As these callbacks may access
* vendor specific host controller register space call them before the
@@ -8769,28 +8882,9 @@ disable_clks:
ret = ufshcd_vops_suspend(hba, pm_op);
if (ret)
goto set_link_active;
- /*
- * Disable the host irq as host controller as there won't be any
- * host controller transaction expected till resume.
- */
- ufshcd_disable_irq(hba);
-
- ufshcd_setup_clocks(hba, false);
-
- if (ufshcd_is_clkgating_allowed(hba)) {
- hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- }
-
- ufshcd_vreg_set_lpm(hba);
-
- /* Put the host controller in low power mode if possible */
- ufshcd_hba_vreg_set_lpm(hba);
goto out;
set_link_active:
- ufshcd_vreg_set_hpm(hba);
/*
* Device hardware reset is required to exit DeepSleep. Also, for
* DeepSleep, the link is off so host reset and restore will be done
@@ -8812,57 +8906,33 @@ set_dev_active:
}
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
-enable_gating:
+enable_scaling:
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
- hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
- ufshcd_clear_ua_wluns(hba);
- ufshcd_release(hba);
out:
if (hba->dev_info.b_rpm_dev_flush_capable) {
schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
}
- hba->pm_op_in_progress = 0;
-
- if (ret)
- ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret);
+ if (ret) {
+ ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
+ hba->clk_gating.is_suspended = false;
+ ufshcd_release(hba);
+ }
+ hba->pm_op_in_progress = false;
return ret;
}
-/**
- * ufshcd_resume - helper function for resume operations
- * @hba: per adapter instance
- * @pm_op: runtime PM or system PM
- *
- * This function basically brings the UFS device, UniPro link and controller
- * to active state.
- *
- * Returns 0 for success and non-zero for failure
- */
-static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+#ifdef CONFIG_PM
+static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret;
- enum uic_link_state old_link_state;
+ enum uic_link_state old_link_state = hba->uic_link_state;
- hba->pm_op_in_progress = 1;
- old_link_state = hba->uic_link_state;
-
- ufshcd_hba_vreg_set_hpm(hba);
- ret = ufshcd_vreg_set_hpm(hba);
- if (ret)
- goto out;
-
- /* Make sure clocks are enabled before accessing controller */
- ret = ufshcd_setup_clocks(hba, true);
- if (ret)
- goto disable_vreg;
-
- /* enable the host irq as host controller would be active soon */
- ufshcd_enable_irq(hba);
+ hba->pm_op_in_progress = true;
/*
* Call vendor specific resume callback. As these callbacks may access
@@ -8871,7 +8941,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/
ret = ufshcd_vops_resume(hba, pm_op);
if (ret)
- goto disable_irq_and_vops_clks;
+ goto out;
/* For DeepSleep, the only supported option is to have the link off */
WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
@@ -8919,42 +8989,218 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
- hba->clk_gating.is_suspended = false;
-
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
-
if (hba->dev_info.b_rpm_dev_flush_capable) {
hba->dev_info.b_rpm_dev_flush_capable = false;
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
- ufshcd_clear_ua_wluns(hba);
-
- /* Schedule clock gating in case of no access to UFS device yet */
- ufshcd_release(hba);
-
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
goto out;
set_old_link_state:
ufshcd_link_state_transition(hba, old_link_state, 0);
vendor_suspend:
ufshcd_vops_suspend(hba, pm_op);
-disable_irq_and_vops_clks:
+out:
+ if (ret)
+ ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
+ hba->clk_gating.is_suspended = false;
+ ufshcd_release(hba);
+ hba->pm_op_in_progress = false;
+ return ret;
+}
+
+static int ufshcd_wl_runtime_suspend(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+ int ret;
+ ktime_t start = ktime_get();
+
+ hba = shost_priv(sdev->host);
+
+ ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
+ if (ret)
+ dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
+
+ trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+
+ return ret;
+}
+
+static int ufshcd_wl_runtime_resume(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ hba = shost_priv(sdev->host);
+
+ ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
+ if (ret)
+ dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
+
+ trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int ufshcd_wl_suspend(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ hba = shost_priv(sdev->host);
+ down(&hba->host_sem);
+
+ if (pm_runtime_suspended(dev))
+ goto out;
+
+ ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
+ if (ret) {
+ dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
+ up(&hba->host_sem);
+ }
+
+out:
+ if (!ret)
+ hba->is_sys_suspended = true;
+ trace_ufshcd_wl_suspend(dev_name(dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+
+ return ret;
+}
+
+static int ufshcd_wl_resume(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ hba = shost_priv(sdev->host);
+
+ if (pm_runtime_suspended(dev))
+ goto out;
+
+ ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
+ if (ret)
+ dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
+out:
+ trace_ufshcd_wl_resume(dev_name(dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ if (!ret)
+ hba->is_sys_suspended = false;
+ up(&hba->host_sem);
+ return ret;
+}
+#endif
+
+static void ufshcd_wl_shutdown(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+
+ down(&hba->host_sem);
+ hba->shutting_down = true;
+ up(&hba->host_sem);
+
+ /* Turn on everything while shutting down */
+ ufshcd_rpm_get_sync(hba);
+ scsi_device_quiesce(sdev);
+ shost_for_each_device(sdev, hba->host) {
+ if (sdev == hba->sdev_ufs_device)
+ continue;
+ scsi_device_quiesce(sdev);
+ }
+ __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
+}
+
+/**
+ * ufshcd_suspend - helper function for suspend operations
+ * @hba: per adapter instance
+ *
+ * This function will put disable irqs, turn off clocks
+ * and set vreg and hba-vreg in lpm mode.
+ */
+static int ufshcd_suspend(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (!hba->is_powered)
+ return 0;
+ /*
+ * Disable the host irq as host controller as there won't be any
+ * host controller transaction expected till resume.
+ */
ufshcd_disable_irq(hba);
- ufshcd_setup_clocks(hba, false);
+ ret = ufshcd_setup_clocks(hba, false);
+ if (ret) {
+ ufshcd_enable_irq(hba);
+ return ret;
+ }
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
+
+ ufshcd_vreg_set_lpm(hba);
+ /* Put the host controller in low power mode if possible */
+ ufshcd_hba_vreg_set_lpm(hba);
+ return ret;
+}
+
+/**
+ * ufshcd_resume - helper function for resume operations
+ * @hba: per adapter instance
+ *
+ * This function basically turns on the regulators, clocks and
+ * irqs of the hba.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_resume(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (!hba->is_powered)
+ return 0;
+
+ ufshcd_hba_vreg_set_hpm(hba);
+ ret = ufshcd_vreg_set_hpm(hba);
+ if (ret)
+ goto out;
+
+ /* Make sure clocks are enabled before accessing controller */
+ ret = ufshcd_setup_clocks(hba, true);
+ if (ret)
+ goto disable_vreg;
+
+ /* enable the host irq as host controller would be active soon */
+ ufshcd_enable_irq(hba);
+ goto out;
+
disable_vreg:
ufshcd_vreg_set_lpm(hba);
out:
- hba->pm_op_in_progress = 0;
if (ret)
ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
return ret;
@@ -8973,44 +9219,14 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- down(&hba->host_sem);
-
- if (!hba->is_powered)
- return 0;
-
- cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
-
- if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
- hba->curr_dev_pwr_mode) &&
- (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
- hba->uic_link_state) &&
- pm_runtime_suspended(hba->dev) &&
- !hba->dev_info.b_rpm_dev_flush_capable)
+ if (pm_runtime_suspended(hba->dev))
goto out;
- if (pm_runtime_suspended(hba->dev)) {
- /*
- * UFS device and/or UFS link low power states during runtime
- * suspend seems to be different than what is expected during
- * system suspend. Hence runtime resume the devic & link and
- * let the system suspend low power states to take effect.
- * TODO: If resume takes longer time, we might have optimize
- * it in future by not resuming everything if possible.
- */
- ret = ufshcd_runtime_resume(hba);
- if (ret)
- goto out;
- }
-
- ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
+ ret = ufshcd_suspend(hba);
out:
trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
- if (!ret)
- hba->is_sys_suspended = true;
- else
- up(&hba->host_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_suspend);
@@ -9027,21 +9243,16 @@ int ufshcd_system_resume(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- if (!hba->is_powered || pm_runtime_suspended(hba->dev))
- /*
- * Let the runtime resume take care of resuming
- * if runtime suspended.
- */
+ if (pm_runtime_suspended(hba->dev))
goto out;
- else
- ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
+
+ ret = ufshcd_resume(hba);
+
out:
trace_ufshcd_system_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
- if (!ret)
- hba->is_sys_suspended = false;
- up(&hba->host_sem);
+
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
@@ -9056,14 +9267,11 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*/
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
- int ret = 0;
+ int ret;
ktime_t start = ktime_get();
- if (!hba->is_powered)
- goto out;
- else
- ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
-out:
+ ret = ufshcd_suspend(hba);
+
trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9075,33 +9283,19 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
* ufshcd_runtime_resume - runtime resume routine
* @hba: per adapter instance
*
- * This function basically brings the UFS device, UniPro link and controller
+ * This function basically brings controller
* to active state. Following operations are done in this function:
*
* 1. Turn on all the controller related clocks
- * 2. Bring the UniPro link out of Hibernate state
- * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
- * to active state.
- * 4. If auto-bkops is enabled on the device, disable it.
- *
- * So following would be the possible power state after this function return
- * successfully:
- * S1: UFS device in Active state with VCC rail ON
- * UniPro link in Active state
- * All the UFS/UniPro controller clocks are ON
- *
- * Returns 0 for success and non-zero for failure
+ * 2. Turn ON VCC rail
*/
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
- int ret = 0;
+ int ret;
ktime_t start = ktime_get();
- if (!hba->is_powered)
- goto out;
- else
- ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
-out:
+ ret = ufshcd_resume(hba);
+
trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9119,30 +9313,20 @@ EXPORT_SYMBOL(ufshcd_runtime_idle);
* ufshcd_shutdown - shutdown routine
* @hba: per adapter instance
*
- * This function would power off both UFS device and UFS link.
+ * This function would turn off both UFS device and UFS hba
+ * regulators. It would also disable clocks.
*
* Returns 0 always to allow force shutdown even in case of errors.
*/
int ufshcd_shutdown(struct ufs_hba *hba)
{
- int ret = 0;
-
- down(&hba->host_sem);
- hba->shutting_down = true;
- up(&hba->host_sem);
-
- if (!hba->is_powered)
- goto out;
-
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
goto out;
pm_runtime_get_sync(hba->dev);
- ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+ ufshcd_suspend(hba);
out:
- if (ret)
- dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
hba->is_powered = false;
/* allow force shutdown even in case of errors */
return 0;
@@ -9156,6 +9340,8 @@ EXPORT_SYMBOL(ufshcd_shutdown);
*/
void ufshcd_remove(struct ufs_hba *hba)
{
+ if (hba->sdev_ufs_device)
+ ufshcd_rpm_get_sync(hba);
ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
blk_cleanup_queue(hba->tmf_queue);
@@ -9459,15 +9645,182 @@ out_error:
}
EXPORT_SYMBOL_GPL(ufshcd_init);
+void ufshcd_resume_complete(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (hba->complete_put) {
+ ufshcd_rpm_put(hba);
+ hba->complete_put = false;
+ }
+ if (hba->rpmb_complete_put) {
+ ufshcd_rpmb_rpm_put(hba);
+ hba->rpmb_complete_put = false;
+ }
+}
+EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
+
+int ufshcd_suspend_prepare(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * SCSI assumes that runtime-pm and system-pm for scsi drivers
+ * are same. And it doesn't wake up the device for system-suspend
+ * if it's runtime suspended. But ufs doesn't follow that.
+ * Refer ufshcd_resume_complete()
+ */
+ if (hba->sdev_ufs_device) {
+ ret = ufshcd_rpm_get_sync(hba);
+ if (ret < 0 && ret != -EACCES) {
+ ufshcd_rpm_put(hba);
+ return ret;
+ }
+ hba->complete_put = true;
+ }
+ if (hba->sdev_rpmb) {
+ ufshcd_rpmb_rpm_get_sync(hba);
+ hba->rpmb_complete_put = true;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
+
+#ifdef CONFIG_PM_SLEEP
+static int ufshcd_wl_poweroff(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba = shost_priv(sdev->host);
+
+ __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
+ return 0;
+}
+#endif
+
+static int ufshcd_wl_probe(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (!is_device_wlun(sdev))
+ return -ENODEV;
+
+ blk_pm_runtime_init(sdev->request_queue, dev);
+ pm_runtime_set_autosuspend_delay(dev, 0);
+ pm_runtime_allow(dev);
+
+ return 0;
+}
+
+static int ufshcd_wl_remove(struct device *dev)
+{
+ pm_runtime_forbid(dev);
+ return 0;
+}
+
+static const struct dev_pm_ops ufshcd_wl_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_wl_suspend,
+ .resume = ufshcd_wl_resume,
+ .freeze = ufshcd_wl_suspend,
+ .thaw = ufshcd_wl_resume,
+ .poweroff = ufshcd_wl_poweroff,
+ .restore = ufshcd_wl_resume,
+#endif
+ SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
+};
+
+/*
+ * ufs_dev_wlun_template - describes ufs device wlun
+ * ufs-device wlun - used to send pm commands
+ * All luns are consumers of ufs-device wlun.
+ *
+ * Currently, no sd driver is present for wluns.
+ * Hence the no specific pm operations are performed.
+ * With ufs design, SSU should be sent to ufs-device wlun.
+ * Hence register a scsi driver for ufs wluns only.
+ */
+static struct scsi_driver ufs_dev_wlun_template = {
+ .gendrv = {
+ .name = "ufs_device_wlun",
+ .owner = THIS_MODULE,
+ .probe = ufshcd_wl_probe,
+ .remove = ufshcd_wl_remove,
+ .pm = &ufshcd_wl_pm_ops,
+ .shutdown = ufshcd_wl_shutdown,
+ },
+};
+
+static int ufshcd_rpmb_probe(struct device *dev)
+{
+ return is_rpmb_wlun(to_scsi_device(dev)) ? 0 : -ENODEV;
+}
+
+static inline int ufshcd_clear_rpmb_uac(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (!hba->wlun_rpmb_clr_ua)
+ return 0;
+ ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
+ if (!ret)
+ hba->wlun_rpmb_clr_ua = 0;
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int ufshcd_rpmb_resume(struct device *dev)
+{
+ struct ufs_hba *hba = wlun_dev_to_hba(dev);
+
+ if (hba->sdev_rpmb)
+ ufshcd_clear_rpmb_uac(hba);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops ufs_rpmb_pm_ops = {
+ SET_RUNTIME_PM_OPS(NULL, ufshcd_rpmb_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, ufshcd_rpmb_resume)
+};
+
+/* ufs_rpmb_wlun_template - Describes UFS RPMB WLUN. Used only to send UAC. */
+static struct scsi_driver ufs_rpmb_wlun_template = {
+ .gendrv = {
+ .name = "ufs_rpmb_wlun",
+ .owner = THIS_MODULE,
+ .probe = ufshcd_rpmb_probe,
+ .pm = &ufs_rpmb_pm_ops,
+ },
+};
+
static int __init ufshcd_core_init(void)
{
+ int ret;
+
ufs_debugfs_init();
- return 0;
+
+ ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
+ if (ret)
+ goto debugfs_exit;
+
+ ret = scsi_register_driver(&ufs_rpmb_wlun_template.gendrv);
+ if (ret)
+ goto unregister;
+
+ return ret;
+unregister:
+ scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
+debugfs_exit:
+ ufs_debugfs_exit();
+ return ret;
}
static void __exit ufshcd_core_exit(void)
{
ufs_debugfs_exit();
+ scsi_unregister_driver(&ufs_rpmb_wlun_template.gendrv);
+ scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
}
module_init(ufshcd_core_init);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 5eb66a8debc7..194755c9ddfe 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -72,6 +72,8 @@ enum ufs_event_type {
UFS_EVT_LINK_STARTUP_FAIL,
UFS_EVT_RESUME_ERR,
UFS_EVT_SUSPEND_ERR,
+ UFS_EVT_WL_SUSP_ERR,
+ UFS_EVT_WL_RES_ERR,
/* abnormal events */
UFS_EVT_DEV_RESET,
@@ -106,10 +108,6 @@ enum ufs_pm_op {
UFS_SHUTDOWN_PM,
};
-#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
-#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
-#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
-
/* Host <-> Device UniPro Link state */
enum uic_link_state {
UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
@@ -157,13 +155,13 @@ enum uic_link_state {
* power off.
*/
enum ufs_pm_level {
- UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */
- UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */
- UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */
- UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */
- UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */
- UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */
- UFS_PM_LVL_6, /* UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE */
+ UFS_PM_LVL_0,
+ UFS_PM_LVL_1,
+ UFS_PM_LVL_2,
+ UFS_PM_LVL_3,
+ UFS_PM_LVL_4,
+ UFS_PM_LVL_5,
+ UFS_PM_LVL_6,
UFS_PM_LVL_MAX
};
@@ -195,7 +193,6 @@ struct ufs_pm_lvl_states {
* @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
* @data_unit_num: the data unit number for the first block for inline crypto
* @req_abort_skip: skip request abort task flag
- * @in_use: indicates that this lrb is still in use
*/
struct ufshcd_lrb {
struct utp_transfer_req_desc *utr_descriptor_ptr;
@@ -225,7 +222,6 @@ struct ufshcd_lrb {
#endif
bool req_abort_skip;
- bool in_use;
};
/**
@@ -645,6 +641,25 @@ struct ufs_hba_variant_params {
u32 wb_flush_threshold;
};
+struct ufs_hba_monitor {
+ unsigned long chunk_size;
+
+ unsigned long nr_sec_rw[2];
+ ktime_t total_busy[2];
+
+ unsigned long nr_req[2];
+ /* latencies*/
+ ktime_t lat_sum[2];
+ ktime_t lat_max[2];
+ ktime_t lat_min[2];
+
+ u32 nr_queued[2];
+ ktime_t busy_start_ts[2];
+
+ ktime_t enabled_ts;
+ bool enabled;
+};
+
/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
@@ -807,6 +822,7 @@ struct ufs_hba {
struct list_head clk_list_head;
bool wlun_dev_clr_ua;
+ bool wlun_rpmb_clr_ua;
/* Number of requests aborts */
int req_abort_count;
@@ -835,6 +851,8 @@ struct ufs_hba {
struct request_queue *bsg_queue;
struct delayed_work rpm_dev_flush_recheck_work;
+ struct ufs_hba_monitor monitor;
+
#ifdef CONFIG_SCSI_UFS_CRYPTO
union ufs_crypto_capabilities crypto_capabilities;
union ufs_crypto_cap_entry *crypto_cap_array;
@@ -846,6 +864,9 @@ struct ufs_hba {
struct delayed_work debugfs_ee_work;
u32 debugfs_ee_rate_limit_ms;
#endif
+ u32 luns_avail;
+ bool complete_put;
+ bool rpmb_complete_put;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -936,7 +957,7 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_hba_enable(struct ufs_hba *hba);
-int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
+int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
int ufshcd_link_recovery(struct ufs_hba *hba);
int ufshcd_make_hba_operational(struct ufs_hba *hba);
void ufshcd_remove(struct ufs_hba *);
@@ -947,6 +968,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
unsigned long timeout_ms);
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
+void ufshcd_hba_stop(struct ufs_hba *hba);
static inline void check_upiu_size(void)
{
@@ -1105,6 +1127,8 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+int ufshcd_suspend_prepare(struct device *dev);
+void ufshcd_resume_complete(struct device *dev);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
@@ -1136,6 +1160,11 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
return ufshcd_readl(hba, REG_UFS_VERSION);
}
+static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba)
+{
+ return (hba->ufs_version >= ufshci_version(3, 0));
+}
+
static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
bool up, enum ufs_notify_change_status status)
{
@@ -1200,8 +1229,13 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
bool is_scsi_cmd)
{
- if (hba->vops && hba->vops->setup_xfer_req)
- return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
+ if (hba->vops && hba->vops->setup_xfer_req) {
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
}
static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
@@ -1309,4 +1343,29 @@ static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba,
&hba->ee_drv_mask, set, clr);
}
+static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
+{
+ return pm_runtime_get_sync(&hba->sdev_ufs_device->sdev_gendev);
+}
+
+static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
+{
+ return pm_runtime_put_sync(&hba->sdev_ufs_device->sdev_gendev);
+}
+
+static inline int ufshcd_rpm_put(struct ufs_hba *hba)
+{
+ return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev);
+}
+
+static inline int ufshcd_rpmb_rpm_get_sync(struct ufs_hba *hba)
+{
+ return pm_runtime_get_sync(&hba->sdev_rpmb->sdev_gendev);
+}
+
+static inline int ufshcd_rpmb_rpm_put(struct ufs_hba *hba)
+{
+ return pm_runtime_put(&hba->sdev_rpmb->sdev_gendev);
+}
+
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index de95be5d11d4..5affb1fce5ad 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -39,6 +39,7 @@ enum {
REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
+ REG_UTP_TRANSFER_REQ_LIST_COMPL = 0x64,
REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index b9c86a7e3b97..b0deaf4af5a3 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -97,7 +97,7 @@ static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
{
if (resid)
- scsi_set_resid(sc, resid);
+ scsi_set_resid(sc, min(resid, scsi_bufflen(sc)));
}
/*
@@ -156,13 +156,11 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
VIRTIO_SCSI_SENSE_SIZE);
- if (sc->sense_buffer) {
+ if (resp->sense_len) {
memcpy(sc->sense_buffer, resp->sense,
min_t(u32,
virtio32_to_cpu(vscsi->vdev, resp->sense_len),
VIRTIO_SCSI_SENSE_SIZE));
- if (resp->sense_len)
- set_driver_byte(sc, DRIVER_SENSE);
}
sc->scsi_done(sc);
@@ -355,7 +353,7 @@ static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
if (result == 0 && inq_result[0] >> 5) {
/* PQ indicates the LUN is not attached */
scsi_remove_device(sdev);
- } else if (host_byte(result) == DID_BAD_TARGET) {
+ } else if (result > 0 && host_byte(result) == DID_BAD_TARGET) {
/*
* If all LUNs of a virtio-scsi device are unplugged
* it will respond with BAD TARGET on any INQUIRY
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index b9969fce6b4d..ce1ba1b93629 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -576,9 +576,6 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
cmd->result = (DID_RESET << 16);
} else {
cmd->result = (DID_OK << 16) | sdstat;
- if (sdstat == SAM_STAT_CHECK_CONDITION &&
- cmd->sense_buffer)
- cmd->result |= (DRIVER_SENSE << 24);
}
} else
switch (btstat) {
@@ -610,9 +607,6 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
case BTSTAT_LUNMISMATCH:
case BTSTAT_TAGREJECT:
case BTSTAT_BADMSG:
- cmd->result = (DRIVER_INVALID << 24);
- fallthrough;
-
case BTSTAT_HAHARDWARE:
case BTSTAT_INVPHASE:
case BTSTAT_HATIMEOUT:
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index a23277bb870e..4468bc45aaa4 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -1176,13 +1176,13 @@ wd33c93_intr(struct Scsi_Host *instance)
if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
cmd->SCp.Status = lun;
if (cmd->cmnd[0] == REQUEST_SENSE
- && cmd->SCp.Status != SAM_STAT_GOOD)
- cmd->result =
- (cmd->
- result & 0x00ffff) | (DID_ERROR << 16);
- else
- cmd->result =
- cmd->SCp.Status | (cmd->SCp.Message << 8);
+ && cmd->SCp.Status != SAM_STAT_GOOD) {
+ set_host_byte(cmd, DID_ERROR);
+ } else {
+ set_host_byte(cmd, DID_OK);
+ scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
+ set_status_byte(cmd, cmd->SCp.Status);
+ }
cmd->scsi_done(cmd);
/* We are no longer connected to a target - check to see if
@@ -1262,11 +1262,14 @@ wd33c93_intr(struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
- if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != SAM_STAT_GOOD)
- cmd->result =
- (cmd->result & 0x00ffff) | (DID_ERROR << 16);
- else
- cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ if (cmd->cmnd[0] == REQUEST_SENSE &&
+ cmd->SCp.Status != SAM_STAT_GOOD) {
+ set_host_byte(cmd, DID_ERROR);
+ } else {
+ set_host_byte(cmd, DID_OK);
+ scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
+ set_status_byte(cmd, cmd->SCp.Status);
+ }
cmd->scsi_done(cmd);
/* We are no longer connected to a target - check to see if
@@ -1295,14 +1298,14 @@ wd33c93_intr(struct Scsi_Host *instance)
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
DB(DB_INTR, printk(":%d", cmd->SCp.Status))
- if (cmd->cmnd[0] == REQUEST_SENSE
- && cmd->SCp.Status != SAM_STAT_GOOD)
- cmd->result =
- (cmd->
- result & 0x00ffff) | (DID_ERROR << 16);
- else
- cmd->result =
- cmd->SCp.Status | (cmd->SCp.Message << 8);
+ if (cmd->cmnd[0] == REQUEST_SENSE
+ && cmd->SCp.Status != SAM_STAT_GOOD) {
+ set_host_byte(cmd, DID_ERROR);
+ } else {
+ set_host_byte(cmd, DID_OK);
+ scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
+ set_status_byte(cmd, cmd->SCp.Status);
+ }
cmd->scsi_done(cmd);
break;
case S_PRE_TMP_DISC:
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 259fc248d06c..ec9d399fbbd8 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -251,6 +251,7 @@ static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
struct scsi_cmnd *sc;
uint32_t id;
uint8_t sense_len;
+ int result;
id = ring_rsp->rqid;
shadow = info->shadow[id];
@@ -261,7 +262,12 @@ static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
scsifront_gnttab_done(info, shadow);
scsifront_put_rqid(info, id);
- sc->result = ring_rsp->rslt;
+ result = ring_rsp->rslt;
+ if (result >> 24)
+ set_host_byte(sc, DID_ERROR);
+ else
+ set_host_byte(sc, host_byte(result));
+ set_status_byte(sc, result & 0xff);
scsi_set_resid(sc, ring_rsp->residual_len);
sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE,
diff --git a/drivers/siox/siox-bus-gpio.c b/drivers/siox/siox-bus-gpio.c
index 46b4cda36bac..aeefeb725524 100644
--- a/drivers/siox/siox-bus-gpio.c
+++ b/drivers/siox/siox-bus-gpio.c
@@ -102,29 +102,29 @@ static int siox_gpio_probe(struct platform_device *pdev)
ddata->din = devm_gpiod_get(dev, "din", GPIOD_IN);
if (IS_ERR(ddata->din)) {
- ret = PTR_ERR(ddata->din);
- dev_err(dev, "Failed to get %s GPIO: %d\n", "din", ret);
+ ret = dev_err_probe(dev, PTR_ERR(ddata->din),
+ "Failed to get din GPIO\n");
goto err;
}
ddata->dout = devm_gpiod_get(dev, "dout", GPIOD_OUT_LOW);
if (IS_ERR(ddata->dout)) {
- ret = PTR_ERR(ddata->dout);
- dev_err(dev, "Failed to get %s GPIO: %d\n", "dout", ret);
+ ret = dev_err_probe(dev, PTR_ERR(ddata->dout),
+ "Failed to get dout GPIO\n");
goto err;
}
ddata->dclk = devm_gpiod_get(dev, "dclk", GPIOD_OUT_LOW);
if (IS_ERR(ddata->dclk)) {
- ret = PTR_ERR(ddata->dclk);
- dev_err(dev, "Failed to get %s GPIO: %d\n", "dclk", ret);
+ ret = dev_err_probe(dev, PTR_ERR(ddata->dclk),
+ "Failed to get dclk GPIO\n");
goto err;
}
ddata->dld = devm_gpiod_get(dev, "dld", GPIOD_OUT_LOW);
if (IS_ERR(ddata->dld)) {
- ret = PTR_ERR(ddata->dld);
- dev_err(dev, "Failed to get %s GPIO: %d\n", "dld", ret);
+ ret = dev_err_probe(dev, PTR_ERR(ddata->dld),
+ "Failed to get dld GPIO\n");
goto err;
}
@@ -134,7 +134,8 @@ static int siox_gpio_probe(struct platform_device *pdev)
ret = siox_master_register(smaster);
if (ret) {
- dev_err(dev, "Failed to register siox master: %d\n", ret);
+ dev_err_probe(dev, ret,
+ "Failed to register siox master\n");
err:
siox_master_put(smaster);
}
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index f678e4d9e585..a05e9fbcd3e0 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
obj-$(CONFIG_ARCH_GEMINI) += gemini/
obj-y += imx/
-obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
+obj-y += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/
obj-y += mediatek/
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 50bf5d2b828b..2be3afe6c2e3 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -68,7 +68,7 @@ struct meson_ee_pwrc_domain_desc {
struct meson_ee_pwrc_top_domain *top_pd;
unsigned int mem_pd_count;
struct meson_ee_pwrc_mem_domain *mem_pd;
- bool (*get_power)(struct meson_ee_pwrc_domain *pwrc_domain);
+ bool (*is_powered_off)(struct meson_ee_pwrc_domain *pwrc_domain);
};
struct meson_ee_pwrc_domain_data {
@@ -217,7 +217,7 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(27, 26) },
};
-#define VPU_PD(__name, __top_pd, __mem, __get_power, __resets, __clks) \
+#define VPU_PD(__name, __top_pd, __mem, __is_pwr_off, __resets, __clks) \
{ \
.name = __name, \
.reset_names_count = __resets, \
@@ -225,46 +225,46 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
.top_pd = __top_pd, \
.mem_pd_count = ARRAY_SIZE(__mem), \
.mem_pd = __mem, \
- .get_power = __get_power, \
+ .is_powered_off = __is_pwr_off, \
}
-#define TOP_PD(__name, __top_pd, __mem, __get_power) \
+#define TOP_PD(__name, __top_pd, __mem, __is_pwr_off) \
{ \
.name = __name, \
.top_pd = __top_pd, \
.mem_pd_count = ARRAY_SIZE(__mem), \
.mem_pd = __mem, \
- .get_power = __get_power, \
+ .is_powered_off = __is_pwr_off, \
}
#define MEM_PD(__name, __mem) \
TOP_PD(__name, NULL, __mem, NULL)
-static bool pwrc_ee_get_power(struct meson_ee_pwrc_domain *pwrc_domain);
+static bool pwrc_ee_is_powered_off(struct meson_ee_pwrc_domain *pwrc_domain);
static struct meson_ee_pwrc_domain_desc axg_pwrc_domains[] = {
[PWRC_AXG_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, axg_pwrc_mem_vpu,
- pwrc_ee_get_power, 5, 2),
+ pwrc_ee_is_powered_off, 5, 2),
[PWRC_AXG_ETHERNET_MEM_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
[PWRC_AXG_AUDIO_ID] = MEM_PD("AUDIO", axg_pwrc_mem_audio),
};
static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = {
[PWRC_G12A_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, g12a_pwrc_mem_vpu,
- pwrc_ee_get_power, 11, 2),
+ pwrc_ee_is_powered_off, 11, 2),
[PWRC_G12A_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
};
static struct meson_ee_pwrc_domain_desc gxbb_pwrc_domains[] = {
[PWRC_GXBB_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, gxbb_pwrc_mem_vpu,
- pwrc_ee_get_power, 12, 2),
+ pwrc_ee_is_powered_off, 12, 2),
[PWRC_GXBB_ETHERNET_MEM_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
};
static struct meson_ee_pwrc_domain_desc meson8_pwrc_domains[] = {
[PWRC_MESON8_VPU_ID] = VPU_PD("VPU", &meson8_pwrc_vpu,
- meson8_pwrc_mem_vpu, pwrc_ee_get_power,
- 0, 1),
+ meson8_pwrc_mem_vpu,
+ pwrc_ee_is_powered_off, 0, 1),
[PWRC_MESON8_ETHERNET_MEM_ID] = MEM_PD("ETHERNET_MEM",
meson_pwrc_mem_eth),
[PWRC_MESON8_AUDIO_DSP_MEM_ID] = MEM_PD("AUDIO_DSP_MEM",
@@ -273,8 +273,8 @@ static struct meson_ee_pwrc_domain_desc meson8_pwrc_domains[] = {
static struct meson_ee_pwrc_domain_desc meson8b_pwrc_domains[] = {
[PWRC_MESON8_VPU_ID] = VPU_PD("VPU", &meson8_pwrc_vpu,
- meson8_pwrc_mem_vpu, pwrc_ee_get_power,
- 11, 1),
+ meson8_pwrc_mem_vpu,
+ pwrc_ee_is_powered_off, 11, 1),
[PWRC_MESON8_ETHERNET_MEM_ID] = MEM_PD("ETHERNET_MEM",
meson_pwrc_mem_eth),
[PWRC_MESON8_AUDIO_DSP_MEM_ID] = MEM_PD("AUDIO_DSP_MEM",
@@ -283,15 +283,15 @@ static struct meson_ee_pwrc_domain_desc meson8b_pwrc_domains[] = {
static struct meson_ee_pwrc_domain_desc sm1_pwrc_domains[] = {
[PWRC_SM1_VPU_ID] = VPU_PD("VPU", &sm1_pwrc_vpu, sm1_pwrc_mem_vpu,
- pwrc_ee_get_power, 11, 2),
+ pwrc_ee_is_powered_off, 11, 2),
[PWRC_SM1_NNA_ID] = TOP_PD("NNA", &sm1_pwrc_nna, sm1_pwrc_mem_nna,
- pwrc_ee_get_power),
+ pwrc_ee_is_powered_off),
[PWRC_SM1_USB_ID] = TOP_PD("USB", &sm1_pwrc_usb, sm1_pwrc_mem_usb,
- pwrc_ee_get_power),
+ pwrc_ee_is_powered_off),
[PWRC_SM1_PCIE_ID] = TOP_PD("PCI", &sm1_pwrc_pci, sm1_pwrc_mem_pcie,
- pwrc_ee_get_power),
+ pwrc_ee_is_powered_off),
[PWRC_SM1_GE2D_ID] = TOP_PD("GE2D", &sm1_pwrc_ge2d, sm1_pwrc_mem_ge2d,
- pwrc_ee_get_power),
+ pwrc_ee_is_powered_off),
[PWRC_SM1_AUDIO_ID] = MEM_PD("AUDIO", sm1_pwrc_mem_audio),
[PWRC_SM1_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
};
@@ -314,7 +314,7 @@ struct meson_ee_pwrc {
struct genpd_onecell_data xlate;
};
-static bool pwrc_ee_get_power(struct meson_ee_pwrc_domain *pwrc_domain)
+static bool pwrc_ee_is_powered_off(struct meson_ee_pwrc_domain *pwrc_domain)
{
u32 reg;
@@ -445,7 +445,7 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
* we need to power the domain off, otherwise the internal clocks
* prepare/enable counters won't be in sync.
*/
- if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) {
+ if (dom->num_clks && dom->desc.is_powered_off && !dom->desc.is_powered_off(dom)) {
ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
if (ret)
return ret;
@@ -456,8 +456,8 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
return ret;
} else {
ret = pm_genpd_init(&dom->base, NULL,
- (dom->desc.get_power ?
- dom->desc.get_power(dom) : true));
+ (dom->desc.is_powered_off ?
+ dom->desc.is_powered_off(dom) : true));
if (ret)
return ret;
}
@@ -536,7 +536,7 @@ static void meson_ee_pwrc_shutdown(struct platform_device *pdev)
for (i = 0 ; i < pwrc->xlate.num_domains ; ++i) {
struct meson_ee_pwrc_domain *dom = &pwrc->domains[i];
- if (dom->desc.get_power && !dom->desc.get_power(dom))
+ if (dom->desc.is_powered_off && !dom->desc.is_powered_off(dom))
meson_ee_pwrc_off(&dom->base);
}
}
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
index e87dfc6660f3..2a010881f4b6 100644
--- a/drivers/soc/bcm/brcmstb/common.c
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -14,11 +14,6 @@
static u32 family_id;
static u32 product_id;
-static const struct of_device_id brcmstb_machine_match[] = {
- { .compatible = "brcm,brcmstb", },
- { }
-};
-
u32 brcmstb_get_family_id(void)
{
return family_id;
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index a673fdffe216..3cbb165d6e30 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -28,6 +28,7 @@
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/printk.h>
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index 3f711c1a0996..bbae3d39c7be 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -23,6 +23,7 @@
#include <linux/signal.h>
#include <linux/device.h>
#include <linux/spinlock.h>
+#include <linux/platform_device.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <soc/fsl/qe/qe.h>
@@ -53,8 +54,8 @@ struct qe_ic {
struct irq_chip hc_irq;
/* VIRQ numbers of QE high/low irqs */
- unsigned int virq_high;
- unsigned int virq_low;
+ int virq_high;
+ int virq_low;
};
/*
@@ -404,42 +405,40 @@ static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
-static void __init qe_ic_init(struct device_node *node)
+static int qe_ic_init(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
void (*low_handler)(struct irq_desc *desc);
void (*high_handler)(struct irq_desc *desc);
struct qe_ic *qe_ic;
- struct resource res;
- u32 ret;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
- ret = of_address_to_resource(node, 0, &res);
- if (ret)
- return;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
- qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
+ qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL);
if (qe_ic == NULL)
- return;
+ return -ENOMEM;
- qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
- &qe_ic_host_ops, qe_ic);
- if (qe_ic->irqhost == NULL) {
- kfree(qe_ic);
- return;
+ qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (qe_ic->regs == NULL) {
+ dev_err(dev, "failed to ioremap() registers\n");
+ return -ENODEV;
}
- qe_ic->regs = ioremap(res.start, resource_size(&res));
-
qe_ic->hc_irq = qe_ic_irq_chip;
- qe_ic->virq_high = irq_of_parse_and_map(node, 0);
- qe_ic->virq_low = irq_of_parse_and_map(node, 1);
+ qe_ic->virq_high = platform_get_irq(pdev, 0);
+ qe_ic->virq_low = platform_get_irq(pdev, 1);
- if (!qe_ic->virq_low) {
- printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
- kfree(qe_ic);
- return;
- }
- if (qe_ic->virq_high != qe_ic->virq_low) {
+ if (qe_ic->virq_low <= 0)
+ return -ENODEV;
+
+ if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) {
low_handler = qe_ic_cascade_low;
high_handler = qe_ic_cascade_high;
} else {
@@ -447,29 +446,42 @@ static void __init qe_ic_init(struct device_node *node)
high_handler = NULL;
}
+ qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+ &qe_ic_host_ops, qe_ic);
+ if (qe_ic->irqhost == NULL) {
+ dev_err(dev, "failed to add irq domain\n");
+ return -ENODEV;
+ }
+
qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
irq_set_handler_data(qe_ic->virq_low, qe_ic);
irq_set_chained_handler(qe_ic->virq_low, low_handler);
- if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) {
+ if (high_handler) {
irq_set_handler_data(qe_ic->virq_high, qe_ic);
irq_set_chained_handler(qe_ic->virq_high, high_handler);
}
+ return 0;
}
+static const struct of_device_id qe_ic_ids[] = {
+ { .compatible = "fsl,qe-ic"},
+ { .type = "qeic"},
+ {},
+};
-static int __init qe_ic_of_init(void)
+static struct platform_driver qe_ic_driver =
{
- struct device_node *np;
+ .driver = {
+ .name = "qe-ic",
+ .of_match_table = qe_ic_ids,
+ },
+ .probe = qe_ic_init,
+};
- np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
- if (!np) {
- np = of_find_node_by_type(NULL, "qeic");
- if (!np)
- return -ENODEV;
- }
- qe_ic_init(np);
- of_node_put(np);
+static int __init qe_ic_of_init(void)
+{
+ platform_driver_register(&qe_ic_driver);
return 0;
}
subsys_initcall(qe_ic_of_init);
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index db7e7fc321b1..34a9ac1f2b9b 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -12,11 +12,15 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/sizes.h>
#include <dt-bindings/power/imx7-power.h>
#include <dt-bindings/power/imx8mq-power.h>
+#include <dt-bindings/power/imx8mm-power.h>
+#include <dt-bindings/power/imx8mn-power.h>
#define GPC_LPCR_A_CORE_BSC 0x000
@@ -42,6 +46,25 @@
#define IMX8M_PCIE1_A53_DOMAIN BIT(3)
#define IMX8M_MIPI_A53_DOMAIN BIT(2)
+#define IMX8MM_VPUH1_A53_DOMAIN BIT(15)
+#define IMX8MM_VPUG2_A53_DOMAIN BIT(14)
+#define IMX8MM_VPUG1_A53_DOMAIN BIT(13)
+#define IMX8MM_DISPMIX_A53_DOMAIN BIT(12)
+#define IMX8MM_VPUMIX_A53_DOMAIN BIT(10)
+#define IMX8MM_GPUMIX_A53_DOMAIN BIT(9)
+#define IMX8MM_GPU_A53_DOMAIN (BIT(8) | BIT(11))
+#define IMX8MM_DDR1_A53_DOMAIN BIT(7)
+#define IMX8MM_OTG2_A53_DOMAIN BIT(5)
+#define IMX8MM_OTG1_A53_DOMAIN BIT(4)
+#define IMX8MM_PCIE_A53_DOMAIN BIT(3)
+#define IMX8MM_MIPI_A53_DOMAIN BIT(2)
+
+#define IMX8MN_DISPMIX_A53_DOMAIN BIT(12)
+#define IMX8MN_GPUMIX_A53_DOMAIN BIT(9)
+#define IMX8MN_DDR1_A53_DOMAIN BIT(7)
+#define IMX8MN_OTG1_A53_DOMAIN BIT(4)
+#define IMX8MN_MIPI_A53_DOMAIN BIT(2)
+
#define GPC_PU_PGC_SW_PUP_REQ 0x0f8
#define GPC_PU_PGC_SW_PDN_REQ 0x104
@@ -65,14 +88,55 @@
#define IMX8M_PCIE1_SW_Pxx_REQ BIT(1)
#define IMX8M_MIPI_SW_Pxx_REQ BIT(0)
+#define IMX8MM_VPUH1_SW_Pxx_REQ BIT(13)
+#define IMX8MM_VPUG2_SW_Pxx_REQ BIT(12)
+#define IMX8MM_VPUG1_SW_Pxx_REQ BIT(11)
+#define IMX8MM_DISPMIX_SW_Pxx_REQ BIT(10)
+#define IMX8MM_VPUMIX_SW_Pxx_REQ BIT(8)
+#define IMX8MM_GPUMIX_SW_Pxx_REQ BIT(7)
+#define IMX8MM_GPU_SW_Pxx_REQ (BIT(6) | BIT(9))
+#define IMX8MM_DDR1_SW_Pxx_REQ BIT(5)
+#define IMX8MM_OTG2_SW_Pxx_REQ BIT(3)
+#define IMX8MM_OTG1_SW_Pxx_REQ BIT(2)
+#define IMX8MM_PCIE_SW_Pxx_REQ BIT(1)
+#define IMX8MM_MIPI_SW_Pxx_REQ BIT(0)
+
+#define IMX8MN_DISPMIX_SW_Pxx_REQ BIT(10)
+#define IMX8MN_GPUMIX_SW_Pxx_REQ BIT(7)
+#define IMX8MN_DDR1_SW_Pxx_REQ BIT(5)
+#define IMX8MN_OTG1_SW_Pxx_REQ BIT(2)
+#define IMX8MN_MIPI_SW_Pxx_REQ BIT(0)
+
#define GPC_M4_PU_PDN_FLG 0x1bc
#define GPC_PU_PWRHSK 0x1fc
+#define IMX8M_GPU_HSK_PWRDNACKN BIT(26)
+#define IMX8M_VPU_HSK_PWRDNACKN BIT(25)
+#define IMX8M_DISP_HSK_PWRDNACKN BIT(24)
#define IMX8M_GPU_HSK_PWRDNREQN BIT(6)
#define IMX8M_VPU_HSK_PWRDNREQN BIT(5)
#define IMX8M_DISP_HSK_PWRDNREQN BIT(4)
+
+#define IMX8MM_GPUMIX_HSK_PWRDNACKN BIT(29)
+#define IMX8MM_GPU_HSK_PWRDNACKN (BIT(27) | BIT(28))
+#define IMX8MM_VPUMIX_HSK_PWRDNACKN BIT(26)
+#define IMX8MM_DISPMIX_HSK_PWRDNACKN BIT(25)
+#define IMX8MM_HSIO_HSK_PWRDNACKN (BIT(23) | BIT(24))
+#define IMX8MM_GPUMIX_HSK_PWRDNREQN BIT(11)
+#define IMX8MM_GPU_HSK_PWRDNREQN (BIT(9) | BIT(10))
+#define IMX8MM_VPUMIX_HSK_PWRDNREQN BIT(8)
+#define IMX8MM_DISPMIX_HSK_PWRDNREQN BIT(7)
+#define IMX8MM_HSIO_HSK_PWRDNREQN (BIT(5) | BIT(6))
+
+#define IMX8MN_GPUMIX_HSK_PWRDNACKN (BIT(29) | BIT(27))
+#define IMX8MN_DISPMIX_HSK_PWRDNACKN BIT(25)
+#define IMX8MN_HSIO_HSK_PWRDNACKN BIT(23)
+#define IMX8MN_GPUMIX_HSK_PWRDNREQN (BIT(11) | BIT(9))
+#define IMX8MN_DISPMIX_HSK_PWRDNREQN BIT(7)
+#define IMX8MN_HSIO_HSK_PWRDNREQN BIT(5)
+
/*
* The PGC offset values in Reference Manual
* (Rev. 1, 01/2018 and the older ones) GPC chapter's
@@ -95,18 +159,37 @@
#define IMX8M_PGC_MIPI_CSI2 28
#define IMX8M_PGC_PCIE2 29
+#define IMX8MM_PGC_MIPI 16
+#define IMX8MM_PGC_PCIE 17
+#define IMX8MM_PGC_OTG1 18
+#define IMX8MM_PGC_OTG2 19
+#define IMX8MM_PGC_DDR1 21
+#define IMX8MM_PGC_GPU2D 22
+#define IMX8MM_PGC_GPUMIX 23
+#define IMX8MM_PGC_VPUMIX 24
+#define IMX8MM_PGC_GPU3D 25
+#define IMX8MM_PGC_DISPMIX 26
+#define IMX8MM_PGC_VPUG1 27
+#define IMX8MM_PGC_VPUG2 28
+#define IMX8MM_PGC_VPUH1 29
+
+#define IMX8MN_PGC_MIPI 16
+#define IMX8MN_PGC_OTG1 18
+#define IMX8MN_PGC_DDR1 21
+#define IMX8MN_PGC_GPUMIX 23
+#define IMX8MN_PGC_DISPMIX 26
+
#define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40)
#define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc)
#define GPC_PGC_CTRL_PCR BIT(0)
-#define GPC_CLK_MAX 6
-
struct imx_pgc_domain {
struct generic_pm_domain genpd;
struct regmap *regmap;
struct regulator *regulator;
- struct clk *clk[GPC_CLK_MAX];
+ struct reset_control *reset;
+ struct clk_bulk_data *clks;
int num_clks;
unsigned int pgc;
@@ -114,7 +197,8 @@ struct imx_pgc_domain {
const struct {
u32 pxx;
u32 map;
- u32 hsk;
+ u32 hskreq;
+ u32 hskack;
} bits;
const int voltage;
@@ -127,96 +211,172 @@ struct imx_pgc_domain_data {
const struct regmap_access_table *reg_access_table;
};
-static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
- bool on)
+static inline struct imx_pgc_domain *
+to_imx_pgc_domain(struct generic_pm_domain *genpd)
{
- struct imx_pgc_domain *domain = container_of(genpd,
- struct imx_pgc_domain,
- genpd);
- unsigned int offset = on ?
- GPC_PU_PGC_SW_PUP_REQ : GPC_PU_PGC_SW_PDN_REQ;
- const bool enable_power_control = !on;
- const bool has_regulator = !IS_ERR(domain->regulator);
- int i, ret = 0;
- u32 pxx_req;
-
- regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
- domain->bits.map, domain->bits.map);
-
- if (has_regulator && on) {
+ return container_of(genpd, struct imx_pgc_domain, genpd);
+}
+
+static int imx_pgc_power_up(struct generic_pm_domain *genpd)
+{
+ struct imx_pgc_domain *domain = to_imx_pgc_domain(genpd);
+ u32 reg_val;
+ int ret;
+
+ ret = pm_runtime_get_sync(domain->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(domain->dev);
+ return ret;
+ }
+
+ if (!IS_ERR(domain->regulator)) {
ret = regulator_enable(domain->regulator);
if (ret) {
dev_err(domain->dev, "failed to enable regulator\n");
- goto unmap;
+ goto out_put_pm;
}
}
/* Enable reset clocks for all devices in the domain */
- for (i = 0; i < domain->num_clks; i++)
- clk_prepare_enable(domain->clk[i]);
+ ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
+ if (ret) {
+ dev_err(domain->dev, "failed to enable reset clocks\n");
+ goto out_regulator_disable;
+ }
- if (enable_power_control)
- regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
- GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
+ if (domain->bits.pxx) {
+ /* request the domain to power up */
+ regmap_update_bits(domain->regmap, GPC_PU_PGC_SW_PUP_REQ,
+ domain->bits.pxx, domain->bits.pxx);
+ /*
+ * As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
+ * for PUP_REQ/PDN_REQ bit to be cleared
+ */
+ ret = regmap_read_poll_timeout(domain->regmap,
+ GPC_PU_PGC_SW_PUP_REQ, reg_val,
+ !(reg_val & domain->bits.pxx),
+ 0, USEC_PER_MSEC);
+ if (ret) {
+ dev_err(domain->dev, "failed to command PGC\n");
+ goto out_clk_disable;
+ }
+
+ /* disable power control */
+ regmap_clear_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
+ GPC_PGC_CTRL_PCR);
+ }
- if (domain->bits.hsk)
+ reset_control_assert(domain->reset);
+
+ /* delay for reset to propagate */
+ udelay(5);
+
+ reset_control_deassert(domain->reset);
+
+ /* request the ADB400 to power up */
+ if (domain->bits.hskreq) {
regmap_update_bits(domain->regmap, GPC_PU_PWRHSK,
- domain->bits.hsk, on ? domain->bits.hsk : 0);
-
- regmap_update_bits(domain->regmap, offset,
- domain->bits.pxx, domain->bits.pxx);
-
- /*
- * As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
- * for PUP_REQ/PDN_REQ bit to be cleared
- */
- ret = regmap_read_poll_timeout(domain->regmap, offset, pxx_req,
- !(pxx_req & domain->bits.pxx),
- 0, USEC_PER_MSEC);
- if (ret) {
- dev_err(domain->dev, "failed to command PGC\n");
+ domain->bits.hskreq, domain->bits.hskreq);
+
/*
- * If we were in a process of enabling a
- * domain and failed we might as well disable
- * the regulator we just enabled. And if it
- * was the opposite situation and we failed to
- * power down -- keep the regulator on
+ * ret = regmap_read_poll_timeout(domain->regmap, GPC_PU_PWRHSK, reg_val,
+ * (reg_val & domain->bits.hskack), 0,
+ * USEC_PER_MSEC);
+ * Technically we need the commented code to wait handshake. But that needs
+ * the BLK-CTL module BUS clk-en bit being set.
+ *
+ * There is a separate BLK-CTL module and we will have such a driver for it,
+ * that driver will set the BUS clk-en bit and handshake will be triggered
+ * automatically there. Just add a delay and suppose the handshake finish
+ * after that.
*/
- on = !on;
}
- if (enable_power_control)
- regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
- GPC_PGC_CTRL_PCR, 0);
-
/* Disable reset clocks for all devices in the domain */
- for (i = 0; i < domain->num_clks; i++)
- clk_disable_unprepare(domain->clk[i]);
-
- if (has_regulator && !on) {
- int err;
-
- err = regulator_disable(domain->regulator);
- if (err)
- dev_err(domain->dev,
- "failed to disable regulator: %d\n", err);
- /* Preserve earlier error code */
- ret = ret ?: err;
- }
-unmap:
- regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
- domain->bits.map, 0);
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+
+ return 0;
+
+out_clk_disable:
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+out_regulator_disable:
+ if (!IS_ERR(domain->regulator))
+ regulator_disable(domain->regulator);
+out_put_pm:
+ pm_runtime_put(domain->dev);
+
return ret;
}
-static int imx_gpc_pu_pgc_sw_pup_req(struct generic_pm_domain *genpd)
+static int imx_pgc_power_down(struct generic_pm_domain *genpd)
{
- return imx_gpc_pu_pgc_sw_pxx_req(genpd, true);
-}
+ struct imx_pgc_domain *domain = to_imx_pgc_domain(genpd);
+ u32 reg_val;
+ int ret;
-static int imx_gpc_pu_pgc_sw_pdn_req(struct generic_pm_domain *genpd)
-{
- return imx_gpc_pu_pgc_sw_pxx_req(genpd, false);
+ /* Enable reset clocks for all devices in the domain */
+ ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
+ if (ret) {
+ dev_err(domain->dev, "failed to enable reset clocks\n");
+ return ret;
+ }
+
+ /* request the ADB400 to power down */
+ if (domain->bits.hskreq) {
+ regmap_clear_bits(domain->regmap, GPC_PU_PWRHSK,
+ domain->bits.hskreq);
+
+ ret = regmap_read_poll_timeout(domain->regmap, GPC_PU_PWRHSK,
+ reg_val,
+ !(reg_val & domain->bits.hskack),
+ 0, USEC_PER_MSEC);
+ if (ret) {
+ dev_err(domain->dev, "failed to power down ADB400\n");
+ goto out_clk_disable;
+ }
+ }
+
+ if (domain->bits.pxx) {
+ /* enable power control */
+ regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
+ GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
+
+ /* request the domain to power down */
+ regmap_update_bits(domain->regmap, GPC_PU_PGC_SW_PDN_REQ,
+ domain->bits.pxx, domain->bits.pxx);
+ /*
+ * As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
+ * for PUP_REQ/PDN_REQ bit to be cleared
+ */
+ ret = regmap_read_poll_timeout(domain->regmap,
+ GPC_PU_PGC_SW_PDN_REQ, reg_val,
+ !(reg_val & domain->bits.pxx),
+ 0, USEC_PER_MSEC);
+ if (ret) {
+ dev_err(domain->dev, "failed to command PGC\n");
+ goto out_clk_disable;
+ }
+ }
+
+ /* Disable reset clocks for all devices in the domain */
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+
+ if (!IS_ERR(domain->regulator)) {
+ ret = regulator_disable(domain->regulator);
+ if (ret) {
+ dev_err(domain->dev, "failed to disable regulator\n");
+ return ret;
+ }
+ }
+
+ pm_runtime_put(domain->dev);
+
+ return 0;
+
+out_clk_disable:
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+
+ return ret;
}
static const struct imx_pgc_domain imx7_pgc_domains[] = {
@@ -342,7 +502,8 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_GPU_SW_Pxx_REQ,
.map = IMX8M_GPU_A53_DOMAIN,
- .hsk = IMX8M_GPU_HSK_PWRDNREQN,
+ .hskreq = IMX8M_GPU_HSK_PWRDNREQN,
+ .hskack = IMX8M_GPU_HSK_PWRDNACKN,
},
.pgc = IMX8M_PGC_GPU,
},
@@ -354,7 +515,8 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_VPU_SW_Pxx_REQ,
.map = IMX8M_VPU_A53_DOMAIN,
- .hsk = IMX8M_VPU_HSK_PWRDNREQN,
+ .hskreq = IMX8M_VPU_HSK_PWRDNREQN,
+ .hskack = IMX8M_VPU_HSK_PWRDNACKN,
},
.pgc = IMX8M_PGC_VPU,
},
@@ -366,7 +528,8 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_DISP_SW_Pxx_REQ,
.map = IMX8M_DISP_A53_DOMAIN,
- .hsk = IMX8M_DISP_HSK_PWRDNREQN,
+ .hskreq = IMX8M_DISP_HSK_PWRDNREQN,
+ .hskack = IMX8M_DISP_HSK_PWRDNACKN,
},
.pgc = IMX8M_PGC_DISP,
},
@@ -443,40 +606,254 @@ static const struct imx_pgc_domain_data imx8m_pgc_domain_data = {
.reg_access_table = &imx8m_access_table,
};
-static int imx_pgc_get_clocks(struct imx_pgc_domain *domain)
-{
- int i, ret;
-
- for (i = 0; ; i++) {
- struct clk *clk = of_clk_get(domain->dev->of_node, i);
- if (IS_ERR(clk))
- break;
- if (i >= GPC_CLK_MAX) {
- dev_err(domain->dev, "more than %d clocks\n",
- GPC_CLK_MAX);
- ret = -EINVAL;
- goto clk_err;
- }
- domain->clk[i] = clk;
- }
- domain->num_clks = i;
+static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
+ [IMX8MM_POWER_DOMAIN_HSIOMIX] = {
+ .genpd = {
+ .name = "hsiomix",
+ },
+ .bits = {
+ .pxx = 0, /* no power sequence control */
+ .map = 0, /* no power sequence control */
+ .hskreq = IMX8MM_HSIO_HSK_PWRDNREQN,
+ .hskack = IMX8MM_HSIO_HSK_PWRDNACKN,
+ },
+ },
- return 0;
+ [IMX8MM_POWER_DOMAIN_PCIE] = {
+ .genpd = {
+ .name = "pcie",
+ },
+ .bits = {
+ .pxx = IMX8MM_PCIE_SW_Pxx_REQ,
+ .map = IMX8MM_PCIE_A53_DOMAIN,
+ },
+ .pgc = IMX8MM_PGC_PCIE,
+ },
-clk_err:
- while (i--)
- clk_put(domain->clk[i]);
+ [IMX8MM_POWER_DOMAIN_OTG1] = {
+ .genpd = {
+ .name = "usb-otg1",
+ },
+ .bits = {
+ .pxx = IMX8MM_OTG1_SW_Pxx_REQ,
+ .map = IMX8MM_OTG1_A53_DOMAIN,
+ },
+ .pgc = IMX8MM_PGC_OTG1,
+ },
- return ret;
-}
+ [IMX8MM_POWER_DOMAIN_OTG2] = {
+ .genpd = {
+ .name = "usb-otg2",
+ },
+ .bits = {
+ .pxx = IMX8MM_OTG2_SW_Pxx_REQ,
+ .map = IMX8MM_OTG2_A53_DOMAIN,
+ },
+ .pgc = IMX8MM_PGC_OTG2,
+ },
-static void imx_pgc_put_clocks(struct imx_pgc_domain *domain)
-{
- int i;
+ [IMX8MM_POWER_DOMAIN_GPUMIX] = {
+ .genpd = {
+ .name = "gpumix",
+ },
+ .bits = {
+ .pxx = IMX8MM_GPUMIX_SW_Pxx_REQ,
+ .map = IMX8MM_GPUMIX_A53_DOMAIN,
+ .hskreq = IMX8MM_GPUMIX_HSK_PWRDNREQN,
+ .hskack = IMX8MM_GPUMIX_HSK_PWRDNACKN,
+ },
+ .pgc = IMX8MM_PGC_GPUMIX,
+ },
- for (i = domain->num_clks - 1; i >= 0; i--)
- clk_put(domain->clk[i]);
-}
+ [IMX8MM_POWER_DOMAIN_GPU] = {
+ .genpd = {
+ .name = "gpu",
+ },
+ .bits = {
+ .pxx = IMX8MM_GPU_SW_Pxx_REQ,
+ .map = IMX8MM_GPU_A53_DOMAIN,
+ .hskreq = IMX8MM_GPU_HSK_PWRDNREQN,
+ .hskack = IMX8MM_GPU_HSK_PWRDNACKN,
+ },
+ .pgc = IMX8MM_PGC_GPU2D,
+ },
+
+ [IMX8MM_POWER_DOMAIN_VPUMIX] = {
+ .genpd = {
+ .name = "vpumix",
+ },
+ .bits = {
+ .pxx = IMX8MM_VPUMIX_SW_Pxx_REQ,
+ .map = IMX8MM_VPUMIX_A53_DOMAIN,
+ .hskreq = IMX8MM_VPUMIX_HSK_PWRDNREQN,
+ .hskack = IMX8MM_VPUMIX_HSK_PWRDNACKN,
+ },
+ .pgc = IMX8MM_PGC_VPUMIX,
+ },
+
+ [IMX8MM_POWER_DOMAIN_VPUG1] = {
+ .genpd = {
+ .name = "vpu-g1",
+ },
+ .bits = {
+ .pxx = IMX8MM_VPUG1_SW_Pxx_REQ,
+ .map = IMX8MM_VPUG1_A53_DOMAIN,
+ },
+ .pgc = IMX8MM_PGC_VPUG1,
+ },
+
+ [IMX8MM_POWER_DOMAIN_VPUG2] = {
+ .genpd = {
+ .name = "vpu-g2",
+ },
+ .bits = {
+ .pxx = IMX8MM_VPUG2_SW_Pxx_REQ,
+ .map = IMX8MM_VPUG2_A53_DOMAIN,
+ },
+ .pgc = IMX8MM_PGC_VPUG2,
+ },
+
+ [IMX8MM_POWER_DOMAIN_VPUH1] = {
+ .genpd = {
+ .name = "vpu-h1",
+ },
+ .bits = {
+ .pxx = IMX8MM_VPUH1_SW_Pxx_REQ,
+ .map = IMX8MM_VPUH1_A53_DOMAIN,
+ },
+ .pgc = IMX8MM_PGC_VPUH1,
+ },
+
+ [IMX8MM_POWER_DOMAIN_DISPMIX] = {
+ .genpd = {
+ .name = "dispmix",
+ },
+ .bits = {
+ .pxx = IMX8MM_DISPMIX_SW_Pxx_REQ,
+ .map = IMX8MM_DISPMIX_A53_DOMAIN,
+ .hskreq = IMX8MM_DISPMIX_HSK_PWRDNREQN,
+ .hskack = IMX8MM_DISPMIX_HSK_PWRDNACKN,
+ },
+ .pgc = IMX8MM_PGC_DISPMIX,
+ },
+
+ [IMX8MM_POWER_DOMAIN_MIPI] = {
+ .genpd = {
+ .name = "mipi",
+ },
+ .bits = {
+ .pxx = IMX8MM_MIPI_SW_Pxx_REQ,
+ .map = IMX8MM_MIPI_A53_DOMAIN,
+ },
+ .pgc = IMX8MM_PGC_MIPI,
+ },
+};
+
+static const struct regmap_range imx8mm_yes_ranges[] = {
+ regmap_reg_range(GPC_LPCR_A_CORE_BSC,
+ GPC_PU_PWRHSK),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_MIPI),
+ GPC_PGC_SR(IMX8MM_PGC_MIPI)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_PCIE),
+ GPC_PGC_SR(IMX8MM_PGC_PCIE)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_OTG1),
+ GPC_PGC_SR(IMX8MM_PGC_OTG1)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_OTG2),
+ GPC_PGC_SR(IMX8MM_PGC_OTG2)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_DDR1),
+ GPC_PGC_SR(IMX8MM_PGC_DDR1)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_GPU2D),
+ GPC_PGC_SR(IMX8MM_PGC_GPU2D)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_GPUMIX),
+ GPC_PGC_SR(IMX8MM_PGC_GPUMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_VPUMIX),
+ GPC_PGC_SR(IMX8MM_PGC_VPUMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_GPU3D),
+ GPC_PGC_SR(IMX8MM_PGC_GPU3D)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_DISPMIX),
+ GPC_PGC_SR(IMX8MM_PGC_DISPMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_VPUG1),
+ GPC_PGC_SR(IMX8MM_PGC_VPUG1)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_VPUG2),
+ GPC_PGC_SR(IMX8MM_PGC_VPUG2)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_VPUH1),
+ GPC_PGC_SR(IMX8MM_PGC_VPUH1)),
+};
+
+static const struct regmap_access_table imx8mm_access_table = {
+ .yes_ranges = imx8mm_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(imx8mm_yes_ranges),
+};
+
+static const struct imx_pgc_domain_data imx8mm_pgc_domain_data = {
+ .domains = imx8mm_pgc_domains,
+ .domains_num = ARRAY_SIZE(imx8mm_pgc_domains),
+ .reg_access_table = &imx8mm_access_table,
+};
+
+static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
+ [IMX8MN_POWER_DOMAIN_HSIOMIX] = {
+ .genpd = {
+ .name = "hsiomix",
+ },
+ .bits = {
+ .pxx = 0, /* no power sequence control */
+ .map = 0, /* no power sequence control */
+ .hskreq = IMX8MN_HSIO_HSK_PWRDNREQN,
+ .hskack = IMX8MN_HSIO_HSK_PWRDNACKN,
+ },
+ },
+
+ [IMX8MN_POWER_DOMAIN_OTG1] = {
+ .genpd = {
+ .name = "usb-otg1",
+ },
+ .bits = {
+ .pxx = IMX8MN_OTG1_SW_Pxx_REQ,
+ .map = IMX8MN_OTG1_A53_DOMAIN,
+ },
+ .pgc = IMX8MN_PGC_OTG1,
+ },
+
+ [IMX8MN_POWER_DOMAIN_GPUMIX] = {
+ .genpd = {
+ .name = "gpumix",
+ },
+ .bits = {
+ .pxx = IMX8MN_GPUMIX_SW_Pxx_REQ,
+ .map = IMX8MN_GPUMIX_A53_DOMAIN,
+ .hskreq = IMX8MN_GPUMIX_HSK_PWRDNREQN,
+ .hskack = IMX8MN_GPUMIX_HSK_PWRDNACKN,
+ },
+ .pgc = IMX8MN_PGC_GPUMIX,
+ },
+};
+
+static const struct regmap_range imx8mn_yes_ranges[] = {
+ regmap_reg_range(GPC_LPCR_A_CORE_BSC,
+ GPC_PU_PWRHSK),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_MIPI),
+ GPC_PGC_SR(IMX8MN_PGC_MIPI)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_OTG1),
+ GPC_PGC_SR(IMX8MN_PGC_OTG1)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_DDR1),
+ GPC_PGC_SR(IMX8MN_PGC_DDR1)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_GPUMIX),
+ GPC_PGC_SR(IMX8MN_PGC_GPUMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_DISPMIX),
+ GPC_PGC_SR(IMX8MN_PGC_DISPMIX)),
+};
+
+static const struct regmap_access_table imx8mn_access_table = {
+ .yes_ranges = imx8mn_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(imx8mn_yes_ranges),
+};
+
+static const struct imx_pgc_domain_data imx8mn_pgc_domain_data = {
+ .domains = imx8mn_pgc_domains,
+ .domains_num = ARRAY_SIZE(imx8mn_pgc_domains),
+ .reg_access_table = &imx8mn_access_table,
+};
static int imx_pgc_domain_probe(struct platform_device *pdev)
{
@@ -495,25 +872,45 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
domain->voltage, domain->voltage);
}
- ret = imx_pgc_get_clocks(domain);
- if (ret)
- return dev_err_probe(domain->dev, ret, "Failed to get domain's clocks\n");
+ domain->num_clks = devm_clk_bulk_get_all(domain->dev, &domain->clks);
+ if (domain->num_clks < 0)
+ return dev_err_probe(domain->dev, domain->num_clks,
+ "Failed to get domain's clocks\n");
+
+ domain->reset = devm_reset_control_array_get_optional_exclusive(domain->dev);
+ if (IS_ERR(domain->reset))
+ return dev_err_probe(domain->dev, PTR_ERR(domain->reset),
+ "Failed to get domain's resets\n");
+
+ pm_runtime_enable(domain->dev);
+
+ if (domain->bits.map)
+ regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ domain->bits.map, domain->bits.map);
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
dev_err(domain->dev, "Failed to init power domain\n");
- imx_pgc_put_clocks(domain);
- return ret;
+ goto out_domain_unmap;
}
ret = of_genpd_add_provider_simple(domain->dev->of_node,
&domain->genpd);
if (ret) {
dev_err(domain->dev, "Failed to add genpd provider\n");
- pm_genpd_remove(&domain->genpd);
- imx_pgc_put_clocks(domain);
+ goto out_genpd_remove;
}
+ return 0;
+
+out_genpd_remove:
+ pm_genpd_remove(&domain->genpd);
+out_domain_unmap:
+ if (domain->bits.map)
+ regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ domain->bits.map, 0);
+ pm_runtime_disable(domain->dev);
+
return ret;
}
@@ -523,7 +920,12 @@ static int imx_pgc_domain_remove(struct platform_device *pdev)
of_genpd_del_provider(domain->dev->of_node);
pm_genpd_remove(&domain->genpd);
- imx_pgc_put_clocks(domain);
+
+ if (domain->bits.map)
+ regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ domain->bits.map, 0);
+
+ pm_runtime_disable(domain->dev);
return 0;
}
@@ -617,8 +1019,8 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
domain = pd_pdev->dev.platform_data;
domain->regmap = regmap;
- domain->genpd.power_on = imx_gpc_pu_pgc_sw_pup_req;
- domain->genpd.power_off = imx_gpc_pu_pgc_sw_pdn_req;
+ domain->genpd.power_on = imx_pgc_power_up;
+ domain->genpd.power_off = imx_pgc_power_down;
pd_pdev->dev.parent = dev;
pd_pdev->dev.of_node = np;
@@ -636,6 +1038,8 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
static const struct of_device_id imx_gpcv2_dt_ids[] = {
{ .compatible = "fsl,imx7d-gpc", .data = &imx7_pgc_domain_data, },
+ { .compatible = "fsl,imx8mm-gpc", .data = &imx8mm_pgc_domain_data, },
+ { .compatible = "fsl,imx8mn-gpc", .data = &imx8mn_pgc_domain_data, },
{ .compatible = "fsl,imx8mq-gpc", .data = &imx8m_pgc_domain_data, },
{ }
};
diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c
index 0738c0f36792..ac6d856ba228 100644
--- a/drivers/soc/imx/soc-imx.c
+++ b/drivers/soc/imx/soc-imx.c
@@ -70,6 +70,9 @@ static int __init imx_soc_device_init(void)
case MXC_CPU_MX35:
soc_id = "i.MX35";
break;
+ case MXC_CPU_MX50:
+ soc_id = "i.MX50";
+ break;
case MXC_CPU_MX51:
ocotp_compat = "fsl,imx51-iim";
soc_id = "i.MX51";
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 071e14496e4b..cc57a384d74d 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -5,8 +5,6 @@
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/nvmem-consumer.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
@@ -31,7 +29,7 @@
struct imx8_soc_data {
char *name;
- u32 (*soc_revision)(struct device *dev);
+ u32 (*soc_revision)(void);
};
static u64 soc_uid;
@@ -52,7 +50,7 @@ static u32 imx8mq_soc_revision_from_atf(void)
static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
#endif
-static u32 __init imx8mq_soc_revision(struct device *dev)
+static u32 __init imx8mq_soc_revision(void)
{
struct device_node *np;
void __iomem *ocotp_base;
@@ -77,20 +75,9 @@ static u32 __init imx8mq_soc_revision(struct device *dev)
rev = REV_B1;
}
- if (dev) {
- int ret;
-
- ret = nvmem_cell_read_u64(dev, "soc_unique_id", &soc_uid);
- if (ret) {
- iounmap(ocotp_base);
- of_node_put(np);
- return ret;
- }
- } else {
- soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
- soc_uid <<= 32;
- soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
- }
+ soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+ soc_uid <<= 32;
+ soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
iounmap(ocotp_base);
of_node_put(np);
@@ -120,7 +107,7 @@ static void __init imx8mm_soc_uid(void)
of_node_put(np);
}
-static u32 __init imx8mm_soc_revision(struct device *dev)
+static u32 __init imx8mm_soc_revision(void)
{
struct device_node *np;
void __iomem *anatop_base;
@@ -138,15 +125,7 @@ static u32 __init imx8mm_soc_revision(struct device *dev)
iounmap(anatop_base);
of_node_put(np);
- if (dev) {
- int ret;
-
- ret = nvmem_cell_read_u64(dev, "soc_unique_id", &soc_uid);
- if (ret)
- return ret;
- } else {
- imx8mm_soc_uid();
- }
+ imx8mm_soc_uid();
return rev;
}
@@ -171,7 +150,7 @@ static const struct imx8_soc_data imx8mp_soc_data = {
.soc_revision = imx8mm_soc_revision,
};
-static __maybe_unused const struct of_device_id imx8_machine_match[] = {
+static __maybe_unused const struct of_device_id imx8_soc_match[] = {
{ .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
{ .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
{ .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
@@ -179,20 +158,12 @@ static __maybe_unused const struct of_device_id imx8_machine_match[] = {
{ }
};
-static __maybe_unused const struct of_device_id imx8_soc_match[] = {
- { .compatible = "fsl,imx8mq-soc", .data = &imx8mq_soc_data, },
- { .compatible = "fsl,imx8mm-soc", .data = &imx8mm_soc_data, },
- { .compatible = "fsl,imx8mn-soc", .data = &imx8mn_soc_data, },
- { .compatible = "fsl,imx8mp-soc", .data = &imx8mp_soc_data, },
- { }
-};
-
#define imx8_revision(soc_rev) \
soc_rev ? \
kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \
"unknown"
-static int imx8_soc_info(struct platform_device *pdev)
+static int __init imx8_soc_init(void)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
@@ -211,10 +182,7 @@ static int imx8_soc_info(struct platform_device *pdev)
if (ret)
goto free_soc;
- if (pdev)
- id = of_match_node(imx8_soc_match, pdev->dev.of_node);
- else
- id = of_match_node(imx8_machine_match, of_root);
+ id = of_match_node(imx8_soc_match, of_root);
if (!id) {
ret = -ENODEV;
goto free_soc;
@@ -223,16 +191,8 @@ static int imx8_soc_info(struct platform_device *pdev)
data = id->data;
if (data) {
soc_dev_attr->soc_id = data->name;
- if (data->soc_revision) {
- if (pdev) {
- soc_rev = data->soc_revision(&pdev->dev);
- ret = soc_rev;
- if (ret < 0)
- goto free_soc;
- } else {
- soc_rev = data->soc_revision(NULL);
- }
- }
+ if (data->soc_revision)
+ soc_rev = data->soc_revision();
}
soc_dev_attr->revision = imx8_revision(soc_rev);
@@ -270,24 +230,4 @@ free_soc:
kfree(soc_dev_attr);
return ret;
}
-
-/* Retain device_initcall is for backward compatibility with DTS. */
-static int __init imx8_soc_init(void)
-{
- if (of_find_matching_node_and_match(NULL, imx8_soc_match, NULL))
- return 0;
-
- return imx8_soc_info(NULL);
-}
device_initcall(imx8_soc_init);
-
-static struct platform_driver imx8_soc_info_driver = {
- .probe = imx8_soc_info,
- .driver = {
- .name = "imx8_soc_info",
- .of_match_table = imx8_soc_match,
- },
-};
-
-module_platform_driver(imx8_soc_info_driver);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
index 3c158251a58b..f490c4ca51f5 100644
--- a/drivers/soc/ixp4xx/ixp4xx-npe.c
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -21,6 +21,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/cpu.h>
#define DEBUG_MSG 0
#define DEBUG_FW 0
@@ -692,8 +693,8 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
if (!(ixp4xx_read_feature_bits() &
(IXP4XX_FEATURE_RESET_NPEA << i))) {
- dev_info(dev, "NPE%d at 0x%08x-0x%08x not available\n",
- i, res->start, res->end);
+ dev_info(dev, "NPE%d at %pR not available\n",
+ i, res);
continue; /* NPE already disabled or not present */
}
npe->regs = devm_ioremap_resource(dev, res);
@@ -701,13 +702,12 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
return PTR_ERR(npe->regs);
if (npe_reset(npe)) {
- dev_info(dev, "NPE%d at 0x%08x-0x%08x does not reset\n",
- i, res->start, res->end);
+ dev_info(dev, "NPE%d at %pR does not reset\n",
+ i, res);
continue;
}
npe->valid = 1;
- dev_info(dev, "NPE%d at 0x%08x-0x%08x registered\n",
- i, res->start, res->end);
+ dev_info(dev, "NPE%d at %pR registered\n", i, res);
found++;
}
diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
index 8c968382cea7..9154c7029b05 100644
--- a/drivers/soc/ixp4xx/ixp4xx-qmgr.c
+++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/soc/ixp4xx/qmgr.h>
+#include <linux/soc/ixp4xx/cpu.h>
static struct qmgr_regs __iomem *qmgr_regs;
static int qmgr_irq_1;
@@ -145,12 +146,12 @@ static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
/* ACK - it may clear any bits so don't rely on it */
__raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
- en_bitmap = qmgr_regs->irqen[0];
+ en_bitmap = __raw_readl(&qmgr_regs->irqen[0]);
while (en_bitmap) {
i = __fls(en_bitmap); /* number of the last "low" queue */
en_bitmap &= ~BIT(i);
- src = qmgr_regs->irqsrc[i >> 3];
- stat = qmgr_regs->stat1[i >> 3];
+ src = __raw_readl(&qmgr_regs->irqsrc[i >> 3]);
+ stat = __raw_readl(&qmgr_regs->stat1[i >> 3]);
if (src & 4) /* the IRQ condition is inverted */
stat = ~stat;
if (stat & BIT(src & 3)) {
@@ -170,7 +171,8 @@ static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
/* ACK - it may clear any bits so don't rely on it */
__raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
- req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
+ req_bitmap = __raw_readl(&qmgr_regs->irqen[1]) &
+ __raw_readl(&qmgr_regs->statne_h);
while (req_bitmap) {
i = __fls(req_bitmap); /* number of the last "high" queue */
req_bitmap &= ~BIT(i);
diff --git a/drivers/soc/litex/Kconfig b/drivers/soc/litex/Kconfig
index e7011d665b15..e6ba3573a772 100644
--- a/drivers/soc/litex/Kconfig
+++ b/drivers/soc/litex/Kconfig
@@ -17,16 +17,4 @@ config LITEX_SOC_CONTROLLER
All drivers that use functions from litex.h must depend on
LITEX.
-config LITEX_SUBREG_SIZE
- int "Size of a LiteX CSR subregister, in bytes"
- depends on LITEX
- range 1 4
- default 4
- help
- LiteX MMIO registers (referred to as Configuration and Status
- registers, or CSRs) are spread across adjacent 8- or 32-bit
- subregisters, located at 32-bit aligned MMIO addresses. Use
- this to select the appropriate size (1 or 4 bytes) matching
- your particular LiteX build.
-
endmenu
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
index c3e379a990f2..f75790091d38 100644
--- a/drivers/soc/litex/litex_soc_ctrl.c
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -62,8 +62,7 @@ static int litex_check_csr_access(void __iomem *reg_addr)
/* restore original value of the SCRATCH register */
litex_write32(reg_addr + SCRATCH_REG_OFF, SCRATCH_REG_VALUE);
- pr_info("LiteX SoC Controller driver initialized: subreg:%d, align:%d",
- LITEX_SUBREG_SIZE, LITEX_SUBREG_ALIGN);
+ pr_info("LiteX SoC Controller driver initialized");
return 0;
}
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
index f1cea041dc5a..7c65ad3d1f8a 100644
--- a/drivers/soc/mediatek/mtk-devapc.c
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -234,6 +234,7 @@ static const struct of_device_id mtk_devapc_dt_match[] = {
}, {
},
};
+MODULE_DEVICE_TABLE(of, mtk_devapc_dt_match);
static int mtk_devapc_probe(struct platform_device *pdev)
{
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
index 0af00efa0ef8..b762bc40f56b 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.c
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
@@ -211,7 +211,7 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
if (ret)
return ret;
- ret = clk_bulk_enable(pd->num_clks, pd->clks);
+ ret = clk_bulk_prepare_enable(pd->num_clks, pd->clks);
if (ret)
goto err_reg;
@@ -229,7 +229,7 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
- ret = clk_bulk_enable(pd->num_subsys_clks, pd->subsys_clks);
+ ret = clk_bulk_prepare_enable(pd->num_subsys_clks, pd->subsys_clks);
if (ret)
goto err_pwr_ack;
@@ -246,9 +246,9 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
err_disable_sram:
scpsys_sram_disable(pd);
err_disable_subsys_clks:
- clk_bulk_disable(pd->num_subsys_clks, pd->subsys_clks);
+ clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
err_pwr_ack:
- clk_bulk_disable(pd->num_clks, pd->clks);
+ clk_bulk_disable_unprepare(pd->num_clks, pd->clks);
err_reg:
scpsys_regulator_disable(pd->supply);
return ret;
@@ -269,7 +269,7 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
if (ret < 0)
return ret;
- clk_bulk_disable(pd->num_subsys_clks, pd->subsys_clks);
+ clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
/* subsys power off */
regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
@@ -284,7 +284,7 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
if (ret < 0)
return ret;
- clk_bulk_disable(pd->num_clks, pd->clks);
+ clk_bulk_disable_unprepare(pd->num_clks, pd->clks);
scpsys_regulator_disable(pd->supply);
@@ -297,6 +297,7 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
const struct scpsys_domain_data *domain_data;
struct scpsys_domain *pd;
struct device_node *root_node = scpsys->dev->of_node;
+ struct device_node *smi_node;
struct property *prop;
const char *clk_name;
int i, ret, num_clks;
@@ -352,9 +353,13 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
if (IS_ERR(pd->infracfg))
return ERR_CAST(pd->infracfg);
- pd->smi = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,smi");
- if (IS_ERR(pd->smi))
- return ERR_CAST(pd->smi);
+ smi_node = of_parse_phandle(node, "mediatek,smi", 0);
+ if (smi_node) {
+ pd->smi = device_node_to_regmap(smi_node);
+ of_node_put(smi_node);
+ if (IS_ERR(pd->smi))
+ return ERR_CAST(pd->smi);
+ }
num_clks = of_clk_get_parent_count(node);
if (num_clks > 0) {
@@ -405,14 +410,6 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
pd->subsys_clks[i].clk = clk;
}
- ret = clk_bulk_prepare(pd->num_clks, pd->clks);
- if (ret)
- goto err_put_subsys_clocks;
-
- ret = clk_bulk_prepare(pd->num_subsys_clks, pd->subsys_clks);
- if (ret)
- goto err_unprepare_clocks;
-
/*
* Initially turn on all domains to make the domains usable
* with !CONFIG_PM and to get the hardware in sync with the
@@ -427,7 +424,7 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
ret = scpsys_power_on(&pd->genpd);
if (ret < 0) {
dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret);
- goto err_unprepare_clocks;
+ goto err_put_subsys_clocks;
}
}
@@ -435,7 +432,7 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
ret = -EINVAL;
dev_err(scpsys->dev,
"power domain with id %d already exists, check your device-tree\n", id);
- goto err_unprepare_subsys_clocks;
+ goto err_put_subsys_clocks;
}
if (!pd->data->name)
@@ -455,10 +452,6 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
return scpsys->pd_data.domains[id];
-err_unprepare_subsys_clocks:
- clk_bulk_unprepare(pd->num_subsys_clks, pd->subsys_clks);
-err_unprepare_clocks:
- clk_bulk_unprepare(pd->num_clks, pd->clks);
err_put_subsys_clocks:
clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
err_put_clocks:
@@ -537,10 +530,7 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
"failed to remove domain '%s' : %d - state may be inconsistent\n",
pd->genpd.name, ret);
- clk_bulk_unprepare(pd->num_clks, pd->clks);
clk_bulk_put(pd->num_clks, pd->clks);
-
- clk_bulk_unprepare(pd->num_subsys_clks, pd->subsys_clks);
clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
}
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index e4de75f35c33..952bc554f443 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -961,6 +961,23 @@ static int mt8183_regs[] = {
[PWRAP_WACS2_VLDCLR] = 0xC28,
};
+static int mt8195_regs[] = {
+ [PWRAP_INIT_DONE2] = 0x0,
+ [PWRAP_STAUPD_CTRL] = 0x4C,
+ [PWRAP_TIMER_EN] = 0x3E4,
+ [PWRAP_INT_EN] = 0x420,
+ [PWRAP_INT_FLG] = 0x428,
+ [PWRAP_INT_CLR] = 0x42C,
+ [PWRAP_INT1_EN] = 0x450,
+ [PWRAP_INT1_FLG] = 0x458,
+ [PWRAP_INT1_CLR] = 0x45C,
+ [PWRAP_WACS2_CMD] = 0x880,
+ [PWRAP_SWINF_2_WDATA_31_0] = 0x884,
+ [PWRAP_SWINF_2_RDATA_31_0] = 0x894,
+ [PWRAP_WACS2_VLDCLR] = 0x8A4,
+ [PWRAP_WACS2_RDATA] = 0x8A8,
+};
+
static int mt8516_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
@@ -1066,6 +1083,7 @@ enum pwrap_type {
PWRAP_MT8135,
PWRAP_MT8173,
PWRAP_MT8183,
+ PWRAP_MT8195,
PWRAP_MT8516,
};
@@ -1525,6 +1543,7 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
break;
case PWRAP_MT6873:
case PWRAP_MT8183:
+ case PWRAP_MT8195:
break;
}
@@ -2025,6 +2044,19 @@ static const struct pmic_wrapper_type pwrap_mt8183 = {
.init_soc_specific = pwrap_mt8183_init_soc_specific,
};
+static struct pmic_wrapper_type pwrap_mt8195 = {
+ .regs = mt8195_regs,
+ .type = PWRAP_MT8195,
+ .arb_en_all = 0x777f, /* NEED CONFIRM */
+ .int_en_all = 0x180000, /* NEED CONFIRM */
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_INT1_EN | PWRAP_CAP_ARB,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
static struct pmic_wrapper_type pwrap_mt8516 = {
.regs = mt8516_regs,
.type = PWRAP_MT8516,
@@ -2066,6 +2098,9 @@ static const struct of_device_id of_pwrap_match_tbl[] = {
.compatible = "mediatek,mt8183-pwrap",
.data = &pwrap_mt8183,
}, {
+ .compatible = "mediatek,mt8195-pwrap",
+ .data = &pwrap_mt8195,
+ }, {
.compatible = "mediatek,mt8516-pwrap",
.data = &pwrap_mt8516,
}, {
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index bb21c4f1c0c4..2daa17ba54a3 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -271,9 +271,30 @@ static const struct rpmhpd_desc sc7280_desc = {
.num_pds = ARRAY_SIZE(sc7280_rpmhpds),
};
+/* SC8180x RPMH powerdomains */
+static struct rpmhpd *sc8180x_rpmhpds[] = {
+ [SC8180X_CX] = &sdm845_cx,
+ [SC8180X_CX_AO] = &sdm845_cx_ao,
+ [SC8180X_EBI] = &sdm845_ebi,
+ [SC8180X_GFX] = &sdm845_gfx,
+ [SC8180X_LCX] = &sdm845_lcx,
+ [SC8180X_LMX] = &sdm845_lmx,
+ [SC8180X_MMCX] = &sm8150_mmcx,
+ [SC8180X_MMCX_AO] = &sm8150_mmcx_ao,
+ [SC8180X_MSS] = &sdm845_mss,
+ [SC8180X_MX] = &sdm845_mx,
+ [SC8180X_MX_AO] = &sdm845_mx_ao,
+};
+
+static const struct rpmhpd_desc sc8180x_desc = {
+ .rpmhpds = sc8180x_rpmhpds,
+ .num_pds = ARRAY_SIZE(sc8180x_rpmhpds),
+};
+
static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sc7280-rpmhpd", .data = &sc7280_desc },
+ { .compatible = "qcom,sc8180x-rpmhpd", .data = &sc8180x_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
{ .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 27733b0e7fca..0b532a892d60 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -118,6 +118,27 @@ struct rpmpd_desc {
static DEFINE_MUTEX(rpmpd_lock);
+/* mdm9607 RPM Power Domains */
+DEFINE_RPMPD_PAIR(mdm9607, vddcx, vddcx_ao, SMPA, LEVEL, 3);
+DEFINE_RPMPD_VFL(mdm9607, vddcx_vfl, SMPA, 3);
+
+DEFINE_RPMPD_PAIR(mdm9607, vddmx, vddmx_ao, LDOA, LEVEL, 12);
+DEFINE_RPMPD_VFL(mdm9607, vddmx_vfl, LDOA, 12);
+static struct rpmpd *mdm9607_rpmpds[] = {
+ [MDM9607_VDDCX] = &mdm9607_vddcx,
+ [MDM9607_VDDCX_AO] = &mdm9607_vddcx_ao,
+ [MDM9607_VDDCX_VFL] = &mdm9607_vddcx_vfl,
+ [MDM9607_VDDMX] = &mdm9607_vddmx,
+ [MDM9607_VDDMX_AO] = &mdm9607_vddmx_ao,
+ [MDM9607_VDDMX_VFL] = &mdm9607_vddmx_vfl,
+};
+
+static const struct rpmpd_desc mdm9607_desc = {
+ .rpmpds = mdm9607_rpmpds,
+ .num_pds = ARRAY_SIZE(mdm9607_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO,
+};
+
/* msm8939 RPM Power Domains */
DEFINE_RPMPD_PAIR(msm8939, vddmd, vddmd_ao, SMPA, CORNER, 1);
DEFINE_RPMPD_VFC(msm8939, vddmd_vfc, SMPA, 1);
@@ -326,6 +347,7 @@ static const struct rpmpd_desc sdm660_desc = {
};
static const struct of_device_id rpmpd_match_table[] = {
+ { .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc },
{ .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
{ .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
{ .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc },
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index b93218cb50b5..bc0be1d4be5f 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -233,6 +233,7 @@ static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-apq8084" },
{ .compatible = "qcom,rpm-ipq6018" },
+ { .compatible = "qcom,rpm-msm8226" },
{ .compatible = "qcom,rpm-msm8916" },
{ .compatible = "qcom,rpm-msm8936" },
{ .compatible = "qcom,rpm-msm8974" },
@@ -241,6 +242,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-msm8996" },
{ .compatible = "qcom,rpm-msm8998" },
{ .compatible = "qcom,rpm-sdm660" },
+ { .compatible = "qcom,rpm-sm6125" },
{ .compatible = "qcom,rpm-qcs404" },
{}
};
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
index d2b558438deb..31faf4aa868e 100644
--- a/drivers/soc/qcom/smem_state.c
+++ b/drivers/soc/qcom/smem_state.c
@@ -151,6 +151,42 @@ void qcom_smem_state_put(struct qcom_smem_state *state)
}
EXPORT_SYMBOL_GPL(qcom_smem_state_put);
+static void devm_qcom_smem_state_release(struct device *dev, void *res)
+{
+ qcom_smem_state_put(*(struct qcom_smem_state **)res);
+}
+
+/**
+ * devm_qcom_smem_state_get() - acquire handle to a devres managed state
+ * @dev: client device pointer
+ * @con_id: name of the state to lookup
+ * @bit: flags from the state reference, indicating which bit's affected
+ *
+ * Returns handle to the state, or ERR_PTR(). qcom_smem_state_put() is called
+ * automatically when @dev is removed.
+ */
+struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev,
+ const char *con_id,
+ unsigned *bit)
+{
+ struct qcom_smem_state **ptr, *state;
+
+ ptr = devres_alloc(devm_qcom_smem_state_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ state = qcom_smem_state_get(dev, con_id, bit);
+ if (!IS_ERR(state)) {
+ *ptr = state;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return state;
+}
+EXPORT_SYMBOL_GPL(devm_qcom_smem_state_get);
+
/**
* qcom_smem_state_register() - register a new state
* @of_node: of_node used for matching client lookups
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index f6cfb79338f0..b2f049faa3df 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -70,21 +70,33 @@ static const char *const socinfo_image_names[] = {
static const char *const pmic_models[] = {
[0] = "Unknown PMIC model",
+ [1] = "PM8941",
+ [2] = "PM8841",
+ [3] = "PM8019",
+ [4] = "PM8226",
+ [5] = "PM8110",
+ [6] = "PMA8084",
+ [7] = "PMI8962",
+ [8] = "PMD9635",
[9] = "PM8994",
+ [10] = "PMI8994",
[11] = "PM8916",
- [13] = "PM8058",
+ [12] = "PM8004",
+ [13] = "PM8909/PM8058",
[14] = "PM8028",
[15] = "PM8901",
- [16] = "PM8027",
- [17] = "ISL9519",
+ [16] = "PM8950/PM8027",
+ [17] = "PMI8950/ISL9519",
[18] = "PM8921",
[19] = "PM8018",
- [20] = "PM8015",
- [21] = "PM8014",
+ [20] = "PM8998/PM8015",
+ [21] = "PMI8998/PM8014",
[22] = "PM8821",
[23] = "PM8038",
- [24] = "PM8922",
+ [24] = "PM8005/PM8922",
[25] = "PM8917",
+ [26] = "PM660L",
+ [27] = "PM660",
[30] = "PM8150",
[31] = "PM8150L",
[32] = "PM8150B",
@@ -195,11 +207,30 @@ static const struct soc_id soc_id[] = {
{ 139, "APQ8060AB" },
{ 140, "MSM8260AB" },
{ 141, "MSM8660AB" },
+ { 145, "MSM8626" },
+ { 147, "MSM8610" },
+ { 153, "APQ8064AB" },
+ { 158, "MSM8226" },
+ { 159, "MSM8526" },
+ { 161, "MSM8110" },
+ { 162, "MSM8210" },
+ { 163, "MSM8810" },
+ { 164, "MSM8212" },
+ { 165, "MSM8612" },
+ { 166, "MSM8112" },
+ { 168, "MSM8225Q" },
+ { 169, "MSM8625Q" },
+ { 170, "MSM8125Q" },
+ { 172, "APQ8064AA" },
{ 178, "APQ8084" },
{ 184, "APQ8074" },
{ 185, "MSM8274" },
{ 186, "MSM8674" },
{ 194, "MSM8974PRO" },
+ { 198, "MSM8126" },
+ { 199, "APQ8026" },
+ { 200, "MSM8926" },
+ { 205, "MSM8326" },
{ 206, "MSM8916" },
{ 207, "MSM8994" },
{ 208, "APQ8074-AA" },
@@ -213,6 +244,14 @@ static const struct soc_id soc_id[] = {
{ 216, "MSM8674PRO" },
{ 217, "MSM8974-AA" },
{ 218, "MSM8974-AB" },
+ { 219, "APQ8028" },
+ { 220, "MSM8128" },
+ { 221, "MSM8228" },
+ { 222, "MSM8528" },
+ { 223, "MSM8628" },
+ { 224, "MSM8928" },
+ { 225, "MSM8510" },
+ { 226, "MSM8512" },
{ 233, "MSM8936" },
{ 239, "MSM8939" },
{ 240, "APQ8036" },
@@ -254,8 +293,13 @@ static const struct soc_id soc_id[] = {
{ 350, "SDA632" },
{ 351, "SDA450" },
{ 356, "SM8250" },
+ { 394, "SM6125" },
{ 402, "IPQ6018" },
+ { 403, "IPQ6028" },
+ { 421, "IPQ6000" },
+ { 422, "IPQ6010" },
{ 425, "SC7180" },
+ { 453, "IPQ6005" },
{ 455, "QRB5165" },
};
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index b70bbc38efc6..71b44c31b012 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -279,6 +279,11 @@ config ARCH_R8A774B1
help
This enables support for the Renesas RZ/G2N SoC.
+config ARCH_R9A07G044
+ bool "ARM64 Platform support for RZ/G2L"
+ help
+ This enables support for the Renesas RZ/G2L SoC variants.
+
endif # ARM64
config RST_RCAR
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 0f8eff4a641a..8310fce7714e 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -56,6 +56,10 @@ static const struct renesas_family fam_rzg2 __initconst __maybe_unused = {
.reg = 0xfff00044, /* PRR (Product Register) */
};
+static const struct renesas_family fam_rzg2l __initconst __maybe_unused = {
+ .name = "RZ/G2L",
+};
+
static const struct renesas_family fam_shmobile __initconst __maybe_unused = {
.name = "SH-Mobile",
.reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
@@ -64,7 +68,7 @@ static const struct renesas_family fam_shmobile __initconst __maybe_unused = {
struct renesas_soc {
const struct renesas_family *family;
- u8 id;
+ u32 id;
};
static const struct renesas_soc soc_rz_a1h __initconst __maybe_unused = {
@@ -131,6 +135,11 @@ static const struct renesas_soc soc_rz_g2h __initconst __maybe_unused = {
.id = 0x4f,
};
+static const struct renesas_soc soc_rz_g2l __initconst __maybe_unused = {
+ .family = &fam_rzg2l,
+ .id = 0x841c447,
+};
+
static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = {
.family = &fam_rcar_gen1,
};
@@ -299,6 +308,9 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A779A0
{ .compatible = "renesas,r8a779a0", .data = &soc_rcar_v3u },
#endif
+#if defined(CONFIG_ARCH_R9A07G044)
+ { .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l },
+#endif
#ifdef CONFIG_ARCH_SH73A0
{ .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
#endif
@@ -348,6 +360,25 @@ static int __init renesas_soc_init(void)
goto done;
}
+ np = of_find_compatible_node(NULL, NULL, "renesas,r9a07g044-sysc");
+ if (np) {
+ chipid = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (chipid) {
+ product = readl(chipid + 0x0a04);
+ iounmap(chipid);
+
+ if (soc->id && (product & 0xfffffff) != soc->id) {
+ pr_warn("SoC mismatch (product = 0x%x)\n",
+ product);
+ return -ENODEV;
+ }
+ }
+
+ goto done;
+ }
+
/* Try PRR first, then hardcoded fallback */
np = of_find_compatible_node(NULL, NULL, "renesas,prr");
if (np) {
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 54eb6cfc5d5b..0868b7d406fb 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -27,8 +27,10 @@
#include <dt-bindings/power/rk3366-power.h>
#include <dt-bindings/power/rk3368-power.h>
#include <dt-bindings/power/rk3399-power.h>
+#include <dt-bindings/power/rk3568-power.h>
struct rockchip_domain_info {
+ const char *name;
int pwr_mask;
int status_mask;
int req_mask;
@@ -85,8 +87,9 @@ struct rockchip_pmu {
#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
-#define DOMAIN(pwr, status, req, idle, ack, wakeup) \
+#define DOMAIN(_name, pwr, status, req, idle, ack, wakeup) \
{ \
+ .name = _name, \
.pwr_mask = (pwr), \
.status_mask = (status), \
.req_mask = (req), \
@@ -95,8 +98,9 @@ struct rockchip_pmu {
.active_wakeup = (wakeup), \
}
-#define DOMAIN_M(pwr, status, req, idle, ack, wakeup) \
+#define DOMAIN_M(_name, pwr, status, req, idle, ack, wakeup) \
{ \
+ .name = _name, \
.pwr_w_mask = (pwr) << 16, \
.pwr_mask = (pwr), \
.status_mask = (status), \
@@ -107,8 +111,9 @@ struct rockchip_pmu {
.active_wakeup = wakeup, \
}
-#define DOMAIN_RK3036(req, ack, idle, wakeup) \
+#define DOMAIN_RK3036(_name, req, ack, idle, wakeup) \
{ \
+ .name = _name, \
.req_mask = (req), \
.req_w_mask = (req) << 16, \
.ack_mask = (ack), \
@@ -116,20 +121,23 @@ struct rockchip_pmu {
.active_wakeup = wakeup, \
}
-#define DOMAIN_PX30(pwr, status, req, wakeup) \
- DOMAIN_M(pwr, status, req, (req) << 16, req, wakeup)
+#define DOMAIN_PX30(name, pwr, status, req, wakeup) \
+ DOMAIN_M(name, pwr, status, req, (req) << 16, req, wakeup)
-#define DOMAIN_RK3288(pwr, status, req, wakeup) \
- DOMAIN(pwr, status, req, req, (req) << 16, wakeup)
+#define DOMAIN_RK3288(name, pwr, status, req, wakeup) \
+ DOMAIN(name, pwr, status, req, req, (req) << 16, wakeup)
-#define DOMAIN_RK3328(pwr, status, req, wakeup) \
- DOMAIN_M(pwr, pwr, req, (req) << 10, req, wakeup)
+#define DOMAIN_RK3328(name, pwr, status, req, wakeup) \
+ DOMAIN_M(name, pwr, pwr, req, (req) << 10, req, wakeup)
-#define DOMAIN_RK3368(pwr, status, req, wakeup) \
- DOMAIN(pwr, status, req, (req) << 16, req, wakeup)
+#define DOMAIN_RK3368(name, pwr, status, req, wakeup) \
+ DOMAIN(name, pwr, status, req, (req) << 16, req, wakeup)
-#define DOMAIN_RK3399(pwr, status, req, wakeup) \
- DOMAIN(pwr, status, req, req, req, wakeup)
+#define DOMAIN_RK3399(name, pwr, status, req, wakeup) \
+ DOMAIN(name, pwr, status, req, req, req, wakeup)
+
+#define DOMAIN_RK3568(name, pwr, req, wakeup) \
+ DOMAIN_M(name, pwr, pwr, req, req, req, wakeup)
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
@@ -490,7 +498,10 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
goto err_unprepare_clocks;
}
- pd->genpd.name = node->name;
+ if (pd->info->name)
+ pd->genpd.name = pd->info->name;
+ else
+ pd->genpd.name = kbasename(node->full_name);
pd->genpd.power_off = rockchip_pd_power_off;
pd->genpd.power_on = rockchip_pd_power_on;
pd->genpd.attach_dev = rockchip_pd_attach_dev;
@@ -716,129 +727,141 @@ err_out:
}
static const struct rockchip_domain_info px30_pm_domains[] = {
- [PX30_PD_USB] = DOMAIN_PX30(BIT(5), BIT(5), BIT(10), false),
- [PX30_PD_SDCARD] = DOMAIN_PX30(BIT(8), BIT(8), BIT(9), false),
- [PX30_PD_GMAC] = DOMAIN_PX30(BIT(10), BIT(10), BIT(6), false),
- [PX30_PD_MMC_NAND] = DOMAIN_PX30(BIT(11), BIT(11), BIT(5), false),
- [PX30_PD_VPU] = DOMAIN_PX30(BIT(12), BIT(12), BIT(14), false),
- [PX30_PD_VO] = DOMAIN_PX30(BIT(13), BIT(13), BIT(7), false),
- [PX30_PD_VI] = DOMAIN_PX30(BIT(14), BIT(14), BIT(8), false),
- [PX30_PD_GPU] = DOMAIN_PX30(BIT(15), BIT(15), BIT(2), false),
+ [PX30_PD_USB] = DOMAIN_PX30("usb", BIT(5), BIT(5), BIT(10), false),
+ [PX30_PD_SDCARD] = DOMAIN_PX30("sdcard", BIT(8), BIT(8), BIT(9), false),
+ [PX30_PD_GMAC] = DOMAIN_PX30("gmac", BIT(10), BIT(10), BIT(6), false),
+ [PX30_PD_MMC_NAND] = DOMAIN_PX30("mmc_nand", BIT(11), BIT(11), BIT(5), false),
+ [PX30_PD_VPU] = DOMAIN_PX30("vpu", BIT(12), BIT(12), BIT(14), false),
+ [PX30_PD_VO] = DOMAIN_PX30("vo", BIT(13), BIT(13), BIT(7), false),
+ [PX30_PD_VI] = DOMAIN_PX30("vi", BIT(14), BIT(14), BIT(8), false),
+ [PX30_PD_GPU] = DOMAIN_PX30("gpu", BIT(15), BIT(15), BIT(2), false),
};
static const struct rockchip_domain_info rk3036_pm_domains[] = {
- [RK3036_PD_MSCH] = DOMAIN_RK3036(BIT(14), BIT(23), BIT(30), true),
- [RK3036_PD_CORE] = DOMAIN_RK3036(BIT(13), BIT(17), BIT(24), false),
- [RK3036_PD_PERI] = DOMAIN_RK3036(BIT(12), BIT(18), BIT(25), false),
- [RK3036_PD_VIO] = DOMAIN_RK3036(BIT(11), BIT(19), BIT(26), false),
- [RK3036_PD_VPU] = DOMAIN_RK3036(BIT(10), BIT(20), BIT(27), false),
- [RK3036_PD_GPU] = DOMAIN_RK3036(BIT(9), BIT(21), BIT(28), false),
- [RK3036_PD_SYS] = DOMAIN_RK3036(BIT(8), BIT(22), BIT(29), false),
+ [RK3036_PD_MSCH] = DOMAIN_RK3036("msch", BIT(14), BIT(23), BIT(30), true),
+ [RK3036_PD_CORE] = DOMAIN_RK3036("core", BIT(13), BIT(17), BIT(24), false),
+ [RK3036_PD_PERI] = DOMAIN_RK3036("peri", BIT(12), BIT(18), BIT(25), false),
+ [RK3036_PD_VIO] = DOMAIN_RK3036("vio", BIT(11), BIT(19), BIT(26), false),
+ [RK3036_PD_VPU] = DOMAIN_RK3036("vpu", BIT(10), BIT(20), BIT(27), false),
+ [RK3036_PD_GPU] = DOMAIN_RK3036("gpu", BIT(9), BIT(21), BIT(28), false),
+ [RK3036_PD_SYS] = DOMAIN_RK3036("sys", BIT(8), BIT(22), BIT(29), false),
};
static const struct rockchip_domain_info rk3066_pm_domains[] = {
- [RK3066_PD_GPU] = DOMAIN(BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
- [RK3066_PD_VIDEO] = DOMAIN(BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
- [RK3066_PD_VIO] = DOMAIN(BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
- [RK3066_PD_PERI] = DOMAIN(BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
- [RK3066_PD_CPU] = DOMAIN(0, BIT(5), BIT(1), BIT(26), BIT(31), false),
+ [RK3066_PD_GPU] = DOMAIN("gpu", BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
+ [RK3066_PD_VIDEO] = DOMAIN("video", BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
+ [RK3066_PD_VIO] = DOMAIN("vio", BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
+ [RK3066_PD_PERI] = DOMAIN("peri", BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
+ [RK3066_PD_CPU] = DOMAIN("cpu", 0, BIT(5), BIT(1), BIT(26), BIT(31), false),
};
static const struct rockchip_domain_info rk3128_pm_domains[] = {
- [RK3128_PD_CORE] = DOMAIN_RK3288(BIT(0), BIT(0), BIT(4), false),
- [RK3128_PD_MSCH] = DOMAIN_RK3288(0, 0, BIT(6), true),
- [RK3128_PD_VIO] = DOMAIN_RK3288(BIT(3), BIT(3), BIT(2), false),
- [RK3128_PD_VIDEO] = DOMAIN_RK3288(BIT(2), BIT(2), BIT(1), false),
- [RK3128_PD_GPU] = DOMAIN_RK3288(BIT(1), BIT(1), BIT(3), false),
+ [RK3128_PD_CORE] = DOMAIN_RK3288("core", BIT(0), BIT(0), BIT(4), false),
+ [RK3128_PD_MSCH] = DOMAIN_RK3288("msch", 0, 0, BIT(6), true),
+ [RK3128_PD_VIO] = DOMAIN_RK3288("vio", BIT(3), BIT(3), BIT(2), false),
+ [RK3128_PD_VIDEO] = DOMAIN_RK3288("video", BIT(2), BIT(2), BIT(1), false),
+ [RK3128_PD_GPU] = DOMAIN_RK3288("gpu", BIT(1), BIT(1), BIT(3), false),
};
static const struct rockchip_domain_info rk3188_pm_domains[] = {
- [RK3188_PD_GPU] = DOMAIN(BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
- [RK3188_PD_VIDEO] = DOMAIN(BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
- [RK3188_PD_VIO] = DOMAIN(BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
- [RK3188_PD_PERI] = DOMAIN(BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
- [RK3188_PD_CPU] = DOMAIN(BIT(5), BIT(5), BIT(1), BIT(26), BIT(31), false),
+ [RK3188_PD_GPU] = DOMAIN("gpu", BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
+ [RK3188_PD_VIDEO] = DOMAIN("video", BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
+ [RK3188_PD_VIO] = DOMAIN("vio", BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
+ [RK3188_PD_PERI] = DOMAIN("peri", BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
+ [RK3188_PD_CPU] = DOMAIN("cpu", BIT(5), BIT(5), BIT(1), BIT(26), BIT(31), false),
};
static const struct rockchip_domain_info rk3228_pm_domains[] = {
- [RK3228_PD_CORE] = DOMAIN_RK3036(BIT(0), BIT(0), BIT(16), true),
- [RK3228_PD_MSCH] = DOMAIN_RK3036(BIT(1), BIT(1), BIT(17), true),
- [RK3228_PD_BUS] = DOMAIN_RK3036(BIT(2), BIT(2), BIT(18), true),
- [RK3228_PD_SYS] = DOMAIN_RK3036(BIT(3), BIT(3), BIT(19), true),
- [RK3228_PD_VIO] = DOMAIN_RK3036(BIT(4), BIT(4), BIT(20), false),
- [RK3228_PD_VOP] = DOMAIN_RK3036(BIT(5), BIT(5), BIT(21), false),
- [RK3228_PD_VPU] = DOMAIN_RK3036(BIT(6), BIT(6), BIT(22), false),
- [RK3228_PD_RKVDEC] = DOMAIN_RK3036(BIT(7), BIT(7), BIT(23), false),
- [RK3228_PD_GPU] = DOMAIN_RK3036(BIT(8), BIT(8), BIT(24), false),
- [RK3228_PD_PERI] = DOMAIN_RK3036(BIT(9), BIT(9), BIT(25), true),
- [RK3228_PD_GMAC] = DOMAIN_RK3036(BIT(10), BIT(10), BIT(26), false),
+ [RK3228_PD_CORE] = DOMAIN_RK3036("core", BIT(0), BIT(0), BIT(16), true),
+ [RK3228_PD_MSCH] = DOMAIN_RK3036("msch", BIT(1), BIT(1), BIT(17), true),
+ [RK3228_PD_BUS] = DOMAIN_RK3036("bus", BIT(2), BIT(2), BIT(18), true),
+ [RK3228_PD_SYS] = DOMAIN_RK3036("sys", BIT(3), BIT(3), BIT(19), true),
+ [RK3228_PD_VIO] = DOMAIN_RK3036("vio", BIT(4), BIT(4), BIT(20), false),
+ [RK3228_PD_VOP] = DOMAIN_RK3036("vop", BIT(5), BIT(5), BIT(21), false),
+ [RK3228_PD_VPU] = DOMAIN_RK3036("vpu", BIT(6), BIT(6), BIT(22), false),
+ [RK3228_PD_RKVDEC] = DOMAIN_RK3036("vdec", BIT(7), BIT(7), BIT(23), false),
+ [RK3228_PD_GPU] = DOMAIN_RK3036("gpu", BIT(8), BIT(8), BIT(24), false),
+ [RK3228_PD_PERI] = DOMAIN_RK3036("peri", BIT(9), BIT(9), BIT(25), true),
+ [RK3228_PD_GMAC] = DOMAIN_RK3036("gmac", BIT(10), BIT(10), BIT(26), false),
};
static const struct rockchip_domain_info rk3288_pm_domains[] = {
- [RK3288_PD_VIO] = DOMAIN_RK3288(BIT(7), BIT(7), BIT(4), false),
- [RK3288_PD_HEVC] = DOMAIN_RK3288(BIT(14), BIT(10), BIT(9), false),
- [RK3288_PD_VIDEO] = DOMAIN_RK3288(BIT(8), BIT(8), BIT(3), false),
- [RK3288_PD_GPU] = DOMAIN_RK3288(BIT(9), BIT(9), BIT(2), false),
+ [RK3288_PD_VIO] = DOMAIN_RK3288("vio", BIT(7), BIT(7), BIT(4), false),
+ [RK3288_PD_HEVC] = DOMAIN_RK3288("hevc", BIT(14), BIT(10), BIT(9), false),
+ [RK3288_PD_VIDEO] = DOMAIN_RK3288("video", BIT(8), BIT(8), BIT(3), false),
+ [RK3288_PD_GPU] = DOMAIN_RK3288("gpu", BIT(9), BIT(9), BIT(2), false),
};
static const struct rockchip_domain_info rk3328_pm_domains[] = {
- [RK3328_PD_CORE] = DOMAIN_RK3328(0, BIT(0), BIT(0), false),
- [RK3328_PD_GPU] = DOMAIN_RK3328(0, BIT(1), BIT(1), false),
- [RK3328_PD_BUS] = DOMAIN_RK3328(0, BIT(2), BIT(2), true),
- [RK3328_PD_MSCH] = DOMAIN_RK3328(0, BIT(3), BIT(3), true),
- [RK3328_PD_PERI] = DOMAIN_RK3328(0, BIT(4), BIT(4), true),
- [RK3328_PD_VIDEO] = DOMAIN_RK3328(0, BIT(5), BIT(5), false),
- [RK3328_PD_HEVC] = DOMAIN_RK3328(0, BIT(6), BIT(6), false),
- [RK3328_PD_VIO] = DOMAIN_RK3328(0, BIT(8), BIT(8), false),
- [RK3328_PD_VPU] = DOMAIN_RK3328(0, BIT(9), BIT(9), false),
+ [RK3328_PD_CORE] = DOMAIN_RK3328("core", 0, BIT(0), BIT(0), false),
+ [RK3328_PD_GPU] = DOMAIN_RK3328("gpu", 0, BIT(1), BIT(1), false),
+ [RK3328_PD_BUS] = DOMAIN_RK3328("bus", 0, BIT(2), BIT(2), true),
+ [RK3328_PD_MSCH] = DOMAIN_RK3328("msch", 0, BIT(3), BIT(3), true),
+ [RK3328_PD_PERI] = DOMAIN_RK3328("peri", 0, BIT(4), BIT(4), true),
+ [RK3328_PD_VIDEO] = DOMAIN_RK3328("video", 0, BIT(5), BIT(5), false),
+ [RK3328_PD_HEVC] = DOMAIN_RK3328("hevc", 0, BIT(6), BIT(6), false),
+ [RK3328_PD_VIO] = DOMAIN_RK3328("vio", 0, BIT(8), BIT(8), false),
+ [RK3328_PD_VPU] = DOMAIN_RK3328("vpu", 0, BIT(9), BIT(9), false),
};
static const struct rockchip_domain_info rk3366_pm_domains[] = {
- [RK3366_PD_PERI] = DOMAIN_RK3368(BIT(10), BIT(10), BIT(6), true),
- [RK3366_PD_VIO] = DOMAIN_RK3368(BIT(14), BIT(14), BIT(8), false),
- [RK3366_PD_VIDEO] = DOMAIN_RK3368(BIT(13), BIT(13), BIT(7), false),
- [RK3366_PD_RKVDEC] = DOMAIN_RK3368(BIT(11), BIT(11), BIT(7), false),
- [RK3366_PD_WIFIBT] = DOMAIN_RK3368(BIT(8), BIT(8), BIT(9), false),
- [RK3366_PD_VPU] = DOMAIN_RK3368(BIT(12), BIT(12), BIT(7), false),
- [RK3366_PD_GPU] = DOMAIN_RK3368(BIT(15), BIT(15), BIT(2), false),
+ [RK3366_PD_PERI] = DOMAIN_RK3368("peri", BIT(10), BIT(10), BIT(6), true),
+ [RK3366_PD_VIO] = DOMAIN_RK3368("vio", BIT(14), BIT(14), BIT(8), false),
+ [RK3366_PD_VIDEO] = DOMAIN_RK3368("video", BIT(13), BIT(13), BIT(7), false),
+ [RK3366_PD_RKVDEC] = DOMAIN_RK3368("vdec", BIT(11), BIT(11), BIT(7), false),
+ [RK3366_PD_WIFIBT] = DOMAIN_RK3368("wifibt", BIT(8), BIT(8), BIT(9), false),
+ [RK3366_PD_VPU] = DOMAIN_RK3368("vpu", BIT(12), BIT(12), BIT(7), false),
+ [RK3366_PD_GPU] = DOMAIN_RK3368("gpu", BIT(15), BIT(15), BIT(2), false),
};
static const struct rockchip_domain_info rk3368_pm_domains[] = {
- [RK3368_PD_PERI] = DOMAIN_RK3368(BIT(13), BIT(12), BIT(6), true),
- [RK3368_PD_VIO] = DOMAIN_RK3368(BIT(15), BIT(14), BIT(8), false),
- [RK3368_PD_VIDEO] = DOMAIN_RK3368(BIT(14), BIT(13), BIT(7), false),
- [RK3368_PD_GPU_0] = DOMAIN_RK3368(BIT(16), BIT(15), BIT(2), false),
- [RK3368_PD_GPU_1] = DOMAIN_RK3368(BIT(17), BIT(16), BIT(2), false),
+ [RK3368_PD_PERI] = DOMAIN_RK3368("peri", BIT(13), BIT(12), BIT(6), true),
+ [RK3368_PD_VIO] = DOMAIN_RK3368("vio", BIT(15), BIT(14), BIT(8), false),
+ [RK3368_PD_VIDEO] = DOMAIN_RK3368("video", BIT(14), BIT(13), BIT(7), false),
+ [RK3368_PD_GPU_0] = DOMAIN_RK3368("gpu_0", BIT(16), BIT(15), BIT(2), false),
+ [RK3368_PD_GPU_1] = DOMAIN_RK3368("gpu_1", BIT(17), BIT(16), BIT(2), false),
};
static const struct rockchip_domain_info rk3399_pm_domains[] = {
- [RK3399_PD_TCPD0] = DOMAIN_RK3399(BIT(8), BIT(8), 0, false),
- [RK3399_PD_TCPD1] = DOMAIN_RK3399(BIT(9), BIT(9), 0, false),
- [RK3399_PD_CCI] = DOMAIN_RK3399(BIT(10), BIT(10), 0, true),
- [RK3399_PD_CCI0] = DOMAIN_RK3399(0, 0, BIT(15), true),
- [RK3399_PD_CCI1] = DOMAIN_RK3399(0, 0, BIT(16), true),
- [RK3399_PD_PERILP] = DOMAIN_RK3399(BIT(11), BIT(11), BIT(1), true),
- [RK3399_PD_PERIHP] = DOMAIN_RK3399(BIT(12), BIT(12), BIT(2), true),
- [RK3399_PD_CENTER] = DOMAIN_RK3399(BIT(13), BIT(13), BIT(14), true),
- [RK3399_PD_VIO] = DOMAIN_RK3399(BIT(14), BIT(14), BIT(17), false),
- [RK3399_PD_GPU] = DOMAIN_RK3399(BIT(15), BIT(15), BIT(0), false),
- [RK3399_PD_VCODEC] = DOMAIN_RK3399(BIT(16), BIT(16), BIT(3), false),
- [RK3399_PD_VDU] = DOMAIN_RK3399(BIT(17), BIT(17), BIT(4), false),
- [RK3399_PD_RGA] = DOMAIN_RK3399(BIT(18), BIT(18), BIT(5), false),
- [RK3399_PD_IEP] = DOMAIN_RK3399(BIT(19), BIT(19), BIT(6), false),
- [RK3399_PD_VO] = DOMAIN_RK3399(BIT(20), BIT(20), 0, false),
- [RK3399_PD_VOPB] = DOMAIN_RK3399(0, 0, BIT(7), false),
- [RK3399_PD_VOPL] = DOMAIN_RK3399(0, 0, BIT(8), false),
- [RK3399_PD_ISP0] = DOMAIN_RK3399(BIT(22), BIT(22), BIT(9), false),
- [RK3399_PD_ISP1] = DOMAIN_RK3399(BIT(23), BIT(23), BIT(10), false),
- [RK3399_PD_HDCP] = DOMAIN_RK3399(BIT(24), BIT(24), BIT(11), false),
- [RK3399_PD_GMAC] = DOMAIN_RK3399(BIT(25), BIT(25), BIT(23), true),
- [RK3399_PD_EMMC] = DOMAIN_RK3399(BIT(26), BIT(26), BIT(24), true),
- [RK3399_PD_USB3] = DOMAIN_RK3399(BIT(27), BIT(27), BIT(12), true),
- [RK3399_PD_EDP] = DOMAIN_RK3399(BIT(28), BIT(28), BIT(22), false),
- [RK3399_PD_GIC] = DOMAIN_RK3399(BIT(29), BIT(29), BIT(27), true),
- [RK3399_PD_SD] = DOMAIN_RK3399(BIT(30), BIT(30), BIT(28), true),
- [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(BIT(31), BIT(31), BIT(29), true),
+ [RK3399_PD_TCPD0] = DOMAIN_RK3399("tcpd0", BIT(8), BIT(8), 0, false),
+ [RK3399_PD_TCPD1] = DOMAIN_RK3399("tcpd1", BIT(9), BIT(9), 0, false),
+ [RK3399_PD_CCI] = DOMAIN_RK3399("cci", BIT(10), BIT(10), 0, true),
+ [RK3399_PD_CCI0] = DOMAIN_RK3399("cci0", 0, 0, BIT(15), true),
+ [RK3399_PD_CCI1] = DOMAIN_RK3399("cci1", 0, 0, BIT(16), true),
+ [RK3399_PD_PERILP] = DOMAIN_RK3399("perilp", BIT(11), BIT(11), BIT(1), true),
+ [RK3399_PD_PERIHP] = DOMAIN_RK3399("perihp", BIT(12), BIT(12), BIT(2), true),
+ [RK3399_PD_CENTER] = DOMAIN_RK3399("center", BIT(13), BIT(13), BIT(14), true),
+ [RK3399_PD_VIO] = DOMAIN_RK3399("vio", BIT(14), BIT(14), BIT(17), false),
+ [RK3399_PD_GPU] = DOMAIN_RK3399("gpu", BIT(15), BIT(15), BIT(0), false),
+ [RK3399_PD_VCODEC] = DOMAIN_RK3399("vcodec", BIT(16), BIT(16), BIT(3), false),
+ [RK3399_PD_VDU] = DOMAIN_RK3399("vdu", BIT(17), BIT(17), BIT(4), false),
+ [RK3399_PD_RGA] = DOMAIN_RK3399("rga", BIT(18), BIT(18), BIT(5), false),
+ [RK3399_PD_IEP] = DOMAIN_RK3399("iep", BIT(19), BIT(19), BIT(6), false),
+ [RK3399_PD_VO] = DOMAIN_RK3399("vo", BIT(20), BIT(20), 0, false),
+ [RK3399_PD_VOPB] = DOMAIN_RK3399("vopb", 0, 0, BIT(7), false),
+ [RK3399_PD_VOPL] = DOMAIN_RK3399("vopl", 0, 0, BIT(8), false),
+ [RK3399_PD_ISP0] = DOMAIN_RK3399("isp0", BIT(22), BIT(22), BIT(9), false),
+ [RK3399_PD_ISP1] = DOMAIN_RK3399("isp1", BIT(23), BIT(23), BIT(10), false),
+ [RK3399_PD_HDCP] = DOMAIN_RK3399("hdcp", BIT(24), BIT(24), BIT(11), false),
+ [RK3399_PD_GMAC] = DOMAIN_RK3399("gmac", BIT(25), BIT(25), BIT(23), true),
+ [RK3399_PD_EMMC] = DOMAIN_RK3399("emmc", BIT(26), BIT(26), BIT(24), true),
+ [RK3399_PD_USB3] = DOMAIN_RK3399("usb3", BIT(27), BIT(27), BIT(12), true),
+ [RK3399_PD_EDP] = DOMAIN_RK3399("edp", BIT(28), BIT(28), BIT(22), false),
+ [RK3399_PD_GIC] = DOMAIN_RK3399("gic", BIT(29), BIT(29), BIT(27), true),
+ [RK3399_PD_SD] = DOMAIN_RK3399("sd", BIT(30), BIT(30), BIT(28), true),
+ [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399("sdioaudio", BIT(31), BIT(31), BIT(29), true),
+};
+
+static const struct rockchip_domain_info rk3568_pm_domains[] = {
+ [RK3568_PD_NPU] = DOMAIN_RK3568("npu", BIT(1), BIT(2), false),
+ [RK3568_PD_GPU] = DOMAIN_RK3568("gpu", BIT(0), BIT(1), false),
+ [RK3568_PD_VI] = DOMAIN_RK3568("vi", BIT(6), BIT(3), false),
+ [RK3568_PD_VO] = DOMAIN_RK3568("vo", BIT(7), BIT(4), false),
+ [RK3568_PD_RGA] = DOMAIN_RK3568("rga", BIT(5), BIT(5), false),
+ [RK3568_PD_VPU] = DOMAIN_RK3568("vpu", BIT(2), BIT(6), false),
+ [RK3568_PD_RKVDEC] = DOMAIN_RK3568("vdec", BIT(4), BIT(8), false),
+ [RK3568_PD_RKVENC] = DOMAIN_RK3568("venc", BIT(3), BIT(7), false),
+ [RK3568_PD_PIPE] = DOMAIN_RK3568("pipe", BIT(8), BIT(11), false),
};
static const struct rockchip_pmu_info px30_pmu = {
@@ -976,6 +999,17 @@ static const struct rockchip_pmu_info rk3399_pmu = {
.domain_info = rk3399_pm_domains,
};
+static const struct rockchip_pmu_info rk3568_pmu = {
+ .pwr_offset = 0xa0,
+ .status_offset = 0x98,
+ .req_offset = 0x50,
+ .idle_offset = 0x68,
+ .ack_offset = 0x60,
+
+ .num_domains = ARRAY_SIZE(rk3568_pm_domains),
+ .domain_info = rk3568_pm_domains,
+};
+
static const struct of_device_id rockchip_pm_domain_dt_match[] = {
{
.compatible = "rockchip,px30-power-controller",
@@ -1021,6 +1055,10 @@ static const struct of_device_id rockchip_pm_domain_dt_match[] = {
.compatible = "rockchip,rk3399-power-controller",
.data = (void *)&rk3399_pmu,
},
+ {
+ .compatible = "rockchip,rk3568-power-controller",
+ .data = (void *)&rk3568_pmu,
+ },
{ /* sentinel */ },
};
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 976dee036470..8b53ed1cc67e 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -15,7 +15,7 @@ config ARCH_TEGRA_2x_SOC
select PL310_ERRATA_769419 if CACHE_L2X0
select SOC_TEGRA_FLOWCTRL
select SOC_TEGRA_PMC
- select SOC_TEGRA20_VOLTAGE_COUPLER
+ select SOC_TEGRA20_VOLTAGE_COUPLER if REGULATOR
select TEGRA_TIMER
help
Support for NVIDIA Tegra AP20 and T20 processors, based on the
@@ -29,7 +29,7 @@ config ARCH_TEGRA_3x_SOC
select PL310_ERRATA_769419 if CACHE_L2X0
select SOC_TEGRA_FLOWCTRL
select SOC_TEGRA_PMC
- select SOC_TEGRA30_VOLTAGE_COUPLER
+ select SOC_TEGRA30_VOLTAGE_COUPLER if REGULATOR
select TEGRA_TIMER
help
Support for NVIDIA Tegra T30 processor family, based on the
@@ -144,6 +144,8 @@ config SOC_TEGRA_FLOWCTRL
config SOC_TEGRA_PMC
bool
select GENERIC_PINCONF
+ select PM_OPP
+ select PM_GENERIC_DOMAINS
config SOC_TEGRA_POWERGATE_BPMP
def_bool y
@@ -153,7 +155,9 @@ config SOC_TEGRA_POWERGATE_BPMP
config SOC_TEGRA20_VOLTAGE_COUPLER
bool "Voltage scaling support for Tegra20 SoCs"
depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
+ depends on REGULATOR
config SOC_TEGRA30_VOLTAGE_COUPLER
bool "Voltage scaling support for Tegra30 SoCs"
depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
+ depends on REGULATOR
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index 3dc54f59cafe..cd33e99249c3 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -3,9 +3,16 @@
* Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
*/
+#define dev_fmt(fmt) "tegra-soc: " fmt
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/export.h>
#include <linux/of.h>
+#include <linux/pm_opp.h>
#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
static const struct of_device_id tegra_machine_match[] = {
{ .compatible = "nvidia,tegra20", },
@@ -31,3 +38,93 @@ bool soc_is_tegra(void)
return match != NULL;
}
+
+static int tegra_core_dev_init_opp_state(struct device *dev)
+{
+ unsigned long rate;
+ struct clk *clk;
+ int err;
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clk: %pe\n", clk);
+ return PTR_ERR(clk);
+ }
+
+ rate = clk_get_rate(clk);
+ if (!rate) {
+ dev_err(dev, "failed to get clk rate\n");
+ return -EINVAL;
+ }
+
+ /* first dummy rate-setting initializes voltage vote */
+ err = dev_pm_opp_set_rate(dev, rate);
+ if (err) {
+ dev_err(dev, "failed to initialize OPP clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * devm_tegra_core_dev_init_opp_table() - initialize OPP table
+ * @dev: device for which OPP table is initialized
+ * @params: pointer to the OPP table configuration
+ *
+ * This function will initialize OPP table and sync OPP state of a Tegra SoC
+ * core device.
+ *
+ * Return: 0 on success or errorno.
+ */
+int devm_tegra_core_dev_init_opp_table(struct device *dev,
+ struct tegra_core_opp_params *params)
+{
+ u32 hw_version;
+ int err;
+
+ err = devm_pm_opp_set_clkname(dev, NULL);
+ if (err) {
+ dev_err(dev, "failed to set OPP clk: %d\n", err);
+ return err;
+ }
+
+ /* Tegra114+ doesn't support OPP yet */
+ if (!of_machine_is_compatible("nvidia,tegra20") &&
+ !of_machine_is_compatible("nvidia,tegra30"))
+ return -ENODEV;
+
+ if (of_machine_is_compatible("nvidia,tegra20"))
+ hw_version = BIT(tegra_sku_info.soc_process_id);
+ else
+ hw_version = BIT(tegra_sku_info.soc_speedo_id);
+
+ err = devm_pm_opp_set_supported_hw(dev, &hw_version, 1);
+ if (err) {
+ dev_err(dev, "failed to set OPP supported HW: %d\n", err);
+ return err;
+ }
+
+ /*
+ * Older device-trees have an empty OPP table, we will get
+ * -ENODEV from devm_pm_opp_of_add_table() in this case.
+ */
+ err = devm_pm_opp_of_add_table(dev);
+ if (err) {
+ if (err == -ENODEV)
+ dev_err_once(dev, "OPP table not found, please update device-tree\n");
+ else
+ dev_err(dev, "failed to add OPP table: %d\n", err);
+
+ return err;
+ }
+
+ if (params->init_state) {
+ err = tegra_core_dev_init_opp_state(dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_tegra_core_dev_init_opp_table);
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 94b60a692b51..3d9da3d359da 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -489,10 +489,8 @@ static int __init tegra_init_fuse(void)
size_t size = sizeof(*fuse->lookups) * fuse->soc->num_lookups;
fuse->lookups = kmemdup(fuse->soc->lookups, size, GFP_KERNEL);
- if (!fuse->lookups)
- return -ENOMEM;
-
- nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups);
+ if (fuse->lookups)
+ nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups);
}
return 0;
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index 9ea7f0168457..c1aa7815bd6e 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -37,7 +37,8 @@
defined(CONFIG_ARCH_TEGRA_132_SOC) || \
defined(CONFIG_ARCH_TEGRA_210_SOC) || \
defined(CONFIG_ARCH_TEGRA_186_SOC) || \
- defined(CONFIG_ARCH_TEGRA_194_SOC)
+ defined(CONFIG_ARCH_TEGRA_194_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_234_SOC)
static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
{
if (WARN_ON(!fuse->base))
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 4a582eae82ef..ea62f84d1c8b 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -38,6 +38,7 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -428,6 +429,9 @@ struct tegra_pmc {
struct irq_chip irq;
struct notifier_block clk_nb;
+
+ bool core_domain_state_synced;
+ bool core_domain_registered;
};
static struct tegra_pmc *pmc = &(struct tegra_pmc) {
@@ -1297,12 +1301,107 @@ free_mem:
return err;
}
+bool tegra_pmc_core_domain_state_synced(void)
+{
+ return pmc->core_domain_state_synced;
+}
+
+static int
+tegra_pmc_core_pd_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int level)
+{
+ struct dev_pm_opp *opp;
+ int err;
+
+ opp = dev_pm_opp_find_level_ceil(&genpd->dev, &level);
+ if (IS_ERR(opp)) {
+ dev_err(&genpd->dev, "failed to find OPP for level %u: %pe\n",
+ level, opp);
+ return PTR_ERR(opp);
+ }
+
+ mutex_lock(&pmc->powergates_lock);
+ err = dev_pm_opp_set_opp(pmc->dev, opp);
+ mutex_unlock(&pmc->powergates_lock);
+
+ dev_pm_opp_put(opp);
+
+ if (err) {
+ dev_err(&genpd->dev, "failed to set voltage to %duV: %d\n",
+ level, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static unsigned int
+tegra_pmc_core_pd_opp_to_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
+{
+ struct generic_pm_domain *genpd;
+ const char *rname = "core";
+ int err;
+
+ genpd = devm_kzalloc(pmc->dev, sizeof(*genpd), GFP_KERNEL);
+ if (!genpd)
+ return -ENOMEM;
+
+ genpd->name = np->name;
+ genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state;
+ genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state;
+
+ err = devm_pm_opp_set_regulators(pmc->dev, &rname, 1);
+ if (err)
+ return dev_err_probe(pmc->dev, err,
+ "failed to set core OPP regulator\n");
+
+ err = pm_genpd_init(genpd, NULL, false);
+ if (err) {
+ dev_err(pmc->dev, "failed to init core genpd: %d\n", err);
+ return err;
+ }
+
+ err = of_genpd_add_provider_simple(np, genpd);
+ if (err) {
+ dev_err(pmc->dev, "failed to add core genpd: %d\n", err);
+ goto remove_genpd;
+ }
+
+ pmc->core_domain_registered = true;
+
+ return 0;
+
+remove_genpd:
+ pm_genpd_remove(genpd);
+
+ return err;
+}
+
static int tegra_powergate_init(struct tegra_pmc *pmc,
struct device_node *parent)
{
+ struct of_phandle_args child_args, parent_args;
struct device_node *np, *child;
int err = 0;
+ /*
+ * Core power domain is the parent of powergate domains, hence it
+ * should be registered first.
+ */
+ np = of_get_child_by_name(parent, "core-domain");
+ if (np) {
+ err = tegra_pmc_core_pd_add(pmc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
+
np = of_get_child_by_name(parent, "powergates");
if (!np)
return 0;
@@ -1313,6 +1412,21 @@ static int tegra_powergate_init(struct tegra_pmc *pmc,
of_node_put(child);
break;
}
+
+ if (of_parse_phandle_with_args(child, "power-domains",
+ "#power-domain-cells",
+ 0, &parent_args))
+ continue;
+
+ child_args.np = child;
+ child_args.args_count = 0;
+
+ err = of_genpd_add_subdomain(&parent_args, &child_args);
+ of_node_put(parent_args.np);
+ if (err) {
+ of_node_put(child);
+ break;
+ }
}
of_node_put(np);
@@ -1356,6 +1470,12 @@ static void tegra_powergate_remove_all(struct device_node *parent)
}
of_node_put(np);
+
+ np = of_get_child_by_name(parent, "core-domain");
+ if (np) {
+ of_genpd_del_provider(np);
+ of_genpd_remove_last(np);
+ }
}
static const struct tegra_io_pad_soc *
@@ -3667,6 +3787,29 @@ static const struct of_device_id tegra_pmc_match[] = {
{ }
};
+static void tegra_pmc_sync_state(struct device *dev)
+{
+ int err;
+
+ /*
+ * Older device-trees don't have core PD, and thus, there are
+ * no dependencies that will block the state syncing. We shouldn't
+ * mark the domain as synced in this case.
+ */
+ if (!pmc->core_domain_registered)
+ return;
+
+ pmc->core_domain_state_synced = true;
+
+ /* this is a no-op if core regulator isn't used */
+ mutex_lock(&pmc->powergates_lock);
+ err = dev_pm_opp_sync_regulators(dev);
+ mutex_unlock(&pmc->powergates_lock);
+
+ if (err)
+ dev_err(dev, "failed to sync regulators: %d\n", err);
+}
+
static struct platform_driver tegra_pmc_driver = {
.driver = {
.name = "tegra-pmc",
@@ -3675,6 +3818,7 @@ static struct platform_driver tegra_pmc_driver = {
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
.pm = &tegra_pmc_pm_ops,
#endif
+ .sync_state = tegra_pmc_sync_state,
},
.probe = tegra_pmc_probe,
};
diff --git a/drivers/soc/tegra/regulators-tegra20.c b/drivers/soc/tegra/regulators-tegra20.c
index 367a71a3cd10..b8ce9fd0650d 100644
--- a/drivers/soc/tegra/regulators-tegra20.c
+++ b/drivers/soc/tegra/regulators-tegra20.c
@@ -12,16 +12,22 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/reboot.h>
#include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <soc/tegra/pmc.h>
+
struct tegra_regulator_coupler {
struct regulator_coupler coupler;
struct regulator_dev *core_rdev;
struct regulator_dev *cpu_rdev;
struct regulator_dev *rtc_rdev;
- int core_min_uV;
+ struct notifier_block reboot_notifier;
+ int core_min_uV, cpu_min_uV;
+ bool sys_reboot_mode_req;
+ bool sys_reboot_mode;
};
static inline struct tegra_regulator_coupler *
@@ -38,6 +44,21 @@ static int tegra20_core_limit(struct tegra_regulator_coupler *tegra,
int core_cur_uV;
int err;
+ /*
+ * Tegra20 SoC has critical DVFS-capable devices that are
+ * permanently-active or active at a boot time, like EMC
+ * (DRAM controller) or Display controller for example.
+ *
+ * The voltage of a CORE SoC power domain shall not be dropped below
+ * a minimum level, which is determined by device's clock rate.
+ * This means that we can't fully allow CORE voltage scaling until
+ * the state of all DVFS-critical CORE devices is synced.
+ */
+ if (tegra_pmc_core_domain_state_synced() && !tegra->sys_reboot_mode) {
+ pr_info_once("voltage state synced\n");
+ return 0;
+ }
+
if (tegra->core_min_uV > 0)
return tegra->core_min_uV;
@@ -58,7 +79,7 @@ static int tegra20_core_limit(struct tegra_regulator_coupler *tegra,
*/
tegra->core_min_uV = core_max_uV;
- pr_info("core minimum voltage limited to %duV\n", tegra->core_min_uV);
+ pr_info("core voltage initialized to %duV\n", tegra->core_min_uV);
return tegra->core_min_uV;
}
@@ -242,6 +263,10 @@ static int tegra20_cpu_voltage_update(struct tegra_regulator_coupler *tegra,
if (cpu_uV < 0)
return cpu_uV;
+ /* store boot voltage level */
+ if (!tegra->cpu_min_uV)
+ tegra->cpu_min_uV = cpu_uV;
+
/*
* CPU's regulator may not have any consumers, hence the voltage
* must not be changed in that case because CPU simply won't
@@ -250,6 +275,10 @@ static int tegra20_cpu_voltage_update(struct tegra_regulator_coupler *tegra,
if (!cpu_min_uV_consumers)
cpu_min_uV = cpu_uV;
+ /* restore boot voltage level */
+ if (tegra->sys_reboot_mode)
+ cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV);
+
if (cpu_min_uV > cpu_uV) {
err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
cpu_uV, cpu_min_uV);
@@ -290,6 +319,8 @@ static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler,
return -EINVAL;
}
+ tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req);
+
if (rdev == cpu_rdev)
return tegra20_cpu_voltage_update(tegra, cpu_rdev,
core_rdev, rtc_rdev);
@@ -303,6 +334,51 @@ static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler,
return -EPERM;
}
+static int tegra20_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra,
+ bool sys_reboot_mode)
+{
+ int err;
+
+ if (!tegra->core_rdev || !tegra->rtc_rdev || !tegra->cpu_rdev)
+ return 0;
+
+ WRITE_ONCE(tegra->sys_reboot_mode_req, true);
+
+ /*
+ * Some devices use CPU soft-reboot method and in this case we
+ * should ensure that voltages are sane for the reboot by restoring
+ * the minimum boot levels.
+ */
+ err = regulator_sync_voltage_rdev(tegra->cpu_rdev);
+ if (err)
+ return err;
+
+ err = regulator_sync_voltage_rdev(tegra->core_rdev);
+ if (err)
+ return err;
+
+ WRITE_ONCE(tegra->sys_reboot_mode_req, sys_reboot_mode);
+
+ return 0;
+}
+
+static int tegra20_regulator_reboot(struct notifier_block *notifier,
+ unsigned long event, void *cmd)
+{
+ struct tegra_regulator_coupler *tegra;
+ int ret;
+
+ if (event != SYS_RESTART)
+ return NOTIFY_DONE;
+
+ tegra = container_of(notifier, struct tegra_regulator_coupler,
+ reboot_notifier);
+
+ ret = tegra20_regulator_prepare_reboot(tegra, true);
+
+ return notifier_from_errno(ret);
+}
+
static int tegra20_regulator_attach(struct regulator_coupler *coupler,
struct regulator_dev *rdev)
{
@@ -335,6 +411,14 @@ static int tegra20_regulator_detach(struct regulator_coupler *coupler,
{
struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ /*
+ * We don't expect regulators to be decoupled during reboot,
+ * this may race with the reboot handler and shouldn't ever
+ * happen in practice.
+ */
+ if (WARN_ON_ONCE(system_state > SYSTEM_RUNNING))
+ return -EPERM;
+
if (tegra->core_rdev == rdev) {
tegra->core_rdev = NULL;
return 0;
@@ -359,13 +443,19 @@ static struct tegra_regulator_coupler tegra20_coupler = {
.detach_regulator = tegra20_regulator_detach,
.balance_voltage = tegra20_regulator_balance_voltage,
},
+ .reboot_notifier.notifier_call = tegra20_regulator_reboot,
};
static int __init tegra_regulator_coupler_init(void)
{
+ int err;
+
if (!of_machine_is_compatible("nvidia,tegra20"))
return 0;
+ err = register_reboot_notifier(&tegra20_coupler.reboot_notifier);
+ WARN_ON(err);
+
return regulator_coupler_register(&tegra20_coupler.coupler);
}
arch_initcall(tegra_regulator_coupler_init);
diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
index 0e776b20f625..e74bbc9c7859 100644
--- a/drivers/soc/tegra/regulators-tegra30.c
+++ b/drivers/soc/tegra/regulators-tegra30.c
@@ -12,17 +12,22 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/reboot.h>
#include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <soc/tegra/fuse.h>
+#include <soc/tegra/pmc.h>
struct tegra_regulator_coupler {
struct regulator_coupler coupler;
struct regulator_dev *core_rdev;
struct regulator_dev *cpu_rdev;
- int core_min_uV;
+ struct notifier_block reboot_notifier;
+ int core_min_uV, cpu_min_uV;
+ bool sys_reboot_mode_req;
+ bool sys_reboot_mode;
};
static inline struct tegra_regulator_coupler *
@@ -39,6 +44,21 @@ static int tegra30_core_limit(struct tegra_regulator_coupler *tegra,
int core_cur_uV;
int err;
+ /*
+ * Tegra30 SoC has critical DVFS-capable devices that are
+ * permanently-active or active at a boot time, like EMC
+ * (DRAM controller) or Display controller for example.
+ *
+ * The voltage of a CORE SoC power domain shall not be dropped below
+ * a minimum level, which is determined by device's clock rate.
+ * This means that we can't fully allow CORE voltage scaling until
+ * the state of all DVFS-critical CORE devices is synced.
+ */
+ if (tegra_pmc_core_domain_state_synced() && !tegra->sys_reboot_mode) {
+ pr_info_once("voltage state synced\n");
+ return 0;
+ }
+
if (tegra->core_min_uV > 0)
return tegra->core_min_uV;
@@ -59,7 +79,7 @@ static int tegra30_core_limit(struct tegra_regulator_coupler *tegra,
*/
tegra->core_min_uV = core_max_uV;
- pr_info("core minimum voltage limited to %duV\n", tegra->core_min_uV);
+ pr_info("core voltage initialized to %duV\n", tegra->core_min_uV);
return tegra->core_min_uV;
}
@@ -172,6 +192,10 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
if (cpu_uV < 0)
return cpu_uV;
+ /* store boot voltage level */
+ if (!tegra->cpu_min_uV)
+ tegra->cpu_min_uV = cpu_uV;
+
/*
* CPU's regulator may not have any consumers, hence the voltage
* must not be changed in that case because CPU simply won't
@@ -195,6 +219,10 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
if (err)
return err;
+ /* restore boot voltage level */
+ if (tegra->sys_reboot_mode)
+ cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV);
+
if (core_min_limited_uV > core_uV) {
pr_err("core voltage constraint violated: %d %d %d\n",
core_uV, core_min_limited_uV, cpu_uV);
@@ -263,9 +291,56 @@ static int tegra30_regulator_balance_voltage(struct regulator_coupler *coupler,
return -EINVAL;
}
+ tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req);
+
return tegra30_voltage_update(tegra, cpu_rdev, core_rdev);
}
+static int tegra30_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra,
+ bool sys_reboot_mode)
+{
+ int err;
+
+ if (!tegra->core_rdev || !tegra->cpu_rdev)
+ return 0;
+
+ WRITE_ONCE(tegra->sys_reboot_mode_req, true);
+
+ /*
+ * Some devices use CPU soft-reboot method and in this case we
+ * should ensure that voltages are sane for the reboot by restoring
+ * the minimum boot levels.
+ */
+ err = regulator_sync_voltage_rdev(tegra->cpu_rdev);
+ if (err)
+ return err;
+
+ err = regulator_sync_voltage_rdev(tegra->core_rdev);
+ if (err)
+ return err;
+
+ WRITE_ONCE(tegra->sys_reboot_mode_req, sys_reboot_mode);
+
+ return 0;
+}
+
+static int tegra30_regulator_reboot(struct notifier_block *notifier,
+ unsigned long event, void *cmd)
+{
+ struct tegra_regulator_coupler *tegra;
+ int ret;
+
+ if (event != SYS_RESTART)
+ return NOTIFY_DONE;
+
+ tegra = container_of(notifier, struct tegra_regulator_coupler,
+ reboot_notifier);
+
+ ret = tegra30_regulator_prepare_reboot(tegra, true);
+
+ return notifier_from_errno(ret);
+}
+
static int tegra30_regulator_attach(struct regulator_coupler *coupler,
struct regulator_dev *rdev)
{
@@ -292,6 +367,14 @@ static int tegra30_regulator_detach(struct regulator_coupler *coupler,
{
struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ /*
+ * We don't expect regulators to be decoupled during reboot,
+ * this may race with the reboot handler and shouldn't ever
+ * happen in practice.
+ */
+ if (WARN_ON_ONCE(system_state > SYSTEM_RUNNING))
+ return -EPERM;
+
if (tegra->core_rdev == rdev) {
tegra->core_rdev = NULL;
return 0;
@@ -311,13 +394,19 @@ static struct tegra_regulator_coupler tegra30_coupler = {
.detach_regulator = tegra30_regulator_detach,
.balance_voltage = tegra30_regulator_balance_voltage,
},
+ .reboot_notifier.notifier_call = tegra30_regulator_reboot,
};
static int __init tegra_regulator_coupler_init(void)
{
+ int err;
+
if (!of_machine_is_compatible("nvidia,tegra30"))
return 0;
+ err = register_reboot_notifier(&tegra30_coupler.reboot_notifier);
+ WARN_ON(err);
+
return regulator_coupler_register(&tegra30_coupler.coupler);
}
arch_initcall(tegra_regulator_coupler_init);
diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c
index 5376f3d22f31..06cbee5fd254 100644
--- a/drivers/soc/ti/smartreflex.c
+++ b/drivers/soc/ti/smartreflex.c
@@ -846,10 +846,8 @@ static int omap_sr_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sr_info->base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(sr_info->base)) {
- dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
+ if (IS_ERR(sr_info->base))
return PTR_ERR(sr_info->base);
- }
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index c3e2161df732..09abd17065ba 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -445,10 +445,8 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
m3_ipc->ipc_mem_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(m3_ipc->ipc_mem_base)) {
- dev_err(dev, "could not ioremap ipc_mem\n");
+ if (IS_ERR(m3_ipc->ipc_mem_base))
return PTR_ERR(m3_ipc->ipc_mem_base);
- }
irq = platform_get_irq(pdev, 0);
if (!irq) {
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index 016e74230bb7..2b7795233282 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -25,6 +25,7 @@ config SOUNDWIRE_INTEL
tristate "Intel SoundWire Master driver"
select SOUNDWIRE_CADENCE
select SOUNDWIRE_GENERIC_ALLOCATION
+ select AUXILIARY_BUS
depends on ACPI && SND_SOC
help
SoundWire Intel Master driver.
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index a9e0aa72654d..3e6d4addac2f 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -394,13 +394,13 @@ sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
}
static int
-sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
{
struct sdw_msg msg;
int ret;
ret = sdw_fill_msg(&msg, slave, addr, count,
- slave->dev_num, SDW_MSG_FLAG_WRITE, val);
+ slave->dev_num, SDW_MSG_FLAG_WRITE, (u8 *)val);
if (ret < 0)
return ret;
@@ -492,7 +492,7 @@ int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
}
EXPORT_SYMBOL(sdw_read_no_pm);
-static int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
+int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
{
int tmp;
@@ -503,6 +503,21 @@ static int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
tmp = (tmp & ~mask) | val;
return sdw_write_no_pm(slave, addr, tmp);
}
+EXPORT_SYMBOL(sdw_update_no_pm);
+
+/* Read-Modify-Write Slave register */
+int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
+{
+ int tmp;
+
+ tmp = sdw_read(slave, addr);
+ if (tmp < 0)
+ return tmp;
+
+ tmp = (tmp & ~mask) | val;
+ return sdw_write(slave, addr, tmp);
+}
+EXPORT_SYMBOL(sdw_update);
/**
* sdw_nread() - Read "n" contiguous SDW Slave registers
@@ -535,9 +550,9 @@ EXPORT_SYMBOL(sdw_nread);
* @slave: SDW Slave
* @addr: Register address
* @count: length
- * @val: Buffer for values to be read
+ * @val: Buffer for values to be written
*/
-int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
{
int ret;
@@ -821,26 +836,6 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
mutex_unlock(&bus->bus_lock);
}
-static enum sdw_clk_stop_mode sdw_get_clk_stop_mode(struct sdw_slave *slave)
-{
- enum sdw_clk_stop_mode mode;
-
- /*
- * Query for clock stop mode if Slave implements
- * ops->get_clk_stop_mode, else read from property.
- */
- if (slave->ops && slave->ops->get_clk_stop_mode) {
- mode = slave->ops->get_clk_stop_mode(slave);
- } else {
- if (slave->prop.clk_stop_mode1)
- mode = SDW_CLK_STOP_MODE1;
- else
- mode = SDW_CLK_STOP_MODE0;
- }
-
- return mode;
-}
-
static int sdw_slave_clk_stop_callback(struct sdw_slave *slave,
enum sdw_clk_stop_mode mode,
enum sdw_clk_stop_type type)
@@ -849,11 +844,8 @@ static int sdw_slave_clk_stop_callback(struct sdw_slave *slave,
if (slave->ops && slave->ops->clk_stop) {
ret = slave->ops->clk_stop(slave, mode, type);
- if (ret < 0) {
- dev_err(&slave->dev,
- "Clk Stop type =%d failed: %d\n", type, ret);
+ if (ret < 0)
return ret;
- }
}
return 0;
@@ -880,7 +872,8 @@ static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
} else {
ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
if (ret < 0) {
- dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret);
+ if (ret != -ENODATA)
+ dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret);
return ret;
}
val = ret;
@@ -889,9 +882,8 @@ static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val);
- if (ret < 0)
- dev_err(&slave->dev,
- "Clock Stop prepare failed for slave: %d", ret);
+ if (ret < 0 && ret != -ENODATA)
+ dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret);
return ret;
}
@@ -909,7 +901,7 @@ static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num)
}
val &= SDW_SCP_STAT_CLK_STP_NF;
if (!val) {
- dev_dbg(bus->dev, "clock stop prep/de-prep done slave:%d",
+ dev_dbg(bus->dev, "clock stop prep/de-prep done slave:%d\n",
dev_num);
return 0;
}
@@ -918,7 +910,7 @@ static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num)
retry--;
} while (retry);
- dev_err(bus->dev, "clock stop prep/de-prep failed slave:%d",
+ dev_err(bus->dev, "clock stop prep/de-prep failed slave:%d\n",
dev_num);
return -ETIMEDOUT;
@@ -933,7 +925,6 @@ static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num)
*/
int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
{
- enum sdw_clk_stop_mode slave_mode;
bool simple_clk_stop = true;
struct sdw_slave *slave;
bool is_slave = false;
@@ -943,6 +934,9 @@ int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
* In order to save on transition time, prepare
* each Slave and then wait for all Slave(s) to be
* prepared for clock stop.
+ * If one of the Slave devices has lost sync and
+ * replies with Command Ignored/-ENODATA, we continue
+ * the loop
*/
list_for_each_entry(slave, &bus->slaves, node) {
if (!slave->dev_num)
@@ -955,36 +949,45 @@ int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
/* Identify if Slave(s) are available on Bus */
is_slave = true;
- slave_mode = sdw_get_clk_stop_mode(slave);
- slave->curr_clk_stop_mode = slave_mode;
-
- ret = sdw_slave_clk_stop_callback(slave, slave_mode,
+ ret = sdw_slave_clk_stop_callback(slave,
+ SDW_CLK_STOP_MODE0,
SDW_CLK_PRE_PREPARE);
- if (ret < 0) {
- dev_err(&slave->dev,
- "pre-prepare failed:%d", ret);
- return ret;
- }
-
- ret = sdw_slave_clk_stop_prepare(slave,
- slave_mode, true);
- if (ret < 0) {
- dev_err(&slave->dev,
- "pre-prepare failed:%d", ret);
+ if (ret < 0 && ret != -ENODATA) {
+ dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret);
return ret;
}
- if (slave_mode == SDW_CLK_STOP_MODE1)
+ /* Only prepare a Slave device if needed */
+ if (!slave->prop.simple_clk_stop_capable) {
simple_clk_stop = false;
+
+ ret = sdw_slave_clk_stop_prepare(slave,
+ SDW_CLK_STOP_MODE0,
+ true);
+ if (ret < 0 && ret != -ENODATA) {
+ dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret);
+ return ret;
+ }
+ }
}
/* Skip remaining clock stop preparation if no Slave is attached */
if (!is_slave)
- return ret;
+ return 0;
+ /*
+ * Don't wait for all Slaves to be ready if they follow the simple
+ * state machine
+ */
if (!simple_clk_stop) {
ret = sdw_bus_wait_for_clk_prep_deprep(bus,
SDW_BROADCAST_DEV_NUM);
+ /*
+ * if there are no Slave devices present and the reply is
+ * Command_Ignored/-ENODATA, we don't need to continue with the
+ * flow and can just return here. The error code is not modified
+ * and its handling left as an exercise for the caller.
+ */
if (ret < 0)
return ret;
}
@@ -998,21 +1001,17 @@ int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
slave->status != SDW_SLAVE_ALERT)
continue;
- slave_mode = slave->curr_clk_stop_mode;
+ ret = sdw_slave_clk_stop_callback(slave,
+ SDW_CLK_STOP_MODE0,
+ SDW_CLK_POST_PREPARE);
- if (slave_mode == SDW_CLK_STOP_MODE1) {
- ret = sdw_slave_clk_stop_callback(slave,
- slave_mode,
- SDW_CLK_POST_PREPARE);
-
- if (ret < 0) {
- dev_err(&slave->dev,
- "post-prepare failed:%d", ret);
- }
+ if (ret < 0 && ret != -ENODATA) {
+ dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret);
+ return ret;
}
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL(sdw_bus_prep_clk_stop);
@@ -1035,12 +1034,8 @@ int sdw_bus_clk_stop(struct sdw_bus *bus)
ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM,
SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW);
if (ret < 0) {
- if (ret == -ENODATA)
- dev_dbg(bus->dev,
- "ClockStopNow Broadcast msg ignored %d", ret);
- else
- dev_err(bus->dev,
- "ClockStopNow Broadcast msg failed %d", ret);
+ if (ret != -ENODATA)
+ dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret);
return ret;
}
@@ -1059,7 +1054,6 @@ EXPORT_SYMBOL(sdw_bus_clk_stop);
*/
int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
{
- enum sdw_clk_stop_mode mode;
bool simple_clk_stop = true;
struct sdw_slave *slave;
bool is_slave = false;
@@ -1081,33 +1075,36 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
/* Identify if Slave(s) are available on Bus */
is_slave = true;
- mode = slave->curr_clk_stop_mode;
-
- if (mode == SDW_CLK_STOP_MODE1) {
- simple_clk_stop = false;
- continue;
- }
-
- ret = sdw_slave_clk_stop_callback(slave, mode,
+ ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
SDW_CLK_PRE_DEPREPARE);
if (ret < 0)
- dev_warn(&slave->dev,
- "clk stop deprep failed:%d", ret);
+ dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret);
+
+ /* Only de-prepare a Slave device if needed */
+ if (!slave->prop.simple_clk_stop_capable) {
+ simple_clk_stop = false;
- ret = sdw_slave_clk_stop_prepare(slave, mode,
- false);
+ ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0,
+ false);
- if (ret < 0)
- dev_warn(&slave->dev,
- "clk stop deprep failed:%d", ret);
+ if (ret < 0)
+ dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret);
+ }
}
/* Skip remaining clock stop de-preparation if no Slave is attached */
if (!is_slave)
return 0;
- if (!simple_clk_stop)
- sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM);
+ /*
+ * Don't wait for all Slaves to be ready if they follow the simple
+ * state machine
+ */
+ if (!simple_clk_stop) {
+ ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM);
+ if (ret < 0)
+ dev_warn(&slave->dev, "clock stop deprepare wait failed:%d\n", ret);
+ }
list_for_each_entry(slave, &bus->slaves, node) {
if (!slave->dev_num)
@@ -1117,9 +1114,10 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
slave->status != SDW_SLAVE_ALERT)
continue;
- mode = slave->curr_clk_stop_mode;
- sdw_slave_clk_stop_callback(slave, mode,
- SDW_CLK_POST_DEPREPARE);
+ ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
+ SDW_CLK_POST_DEPREPARE);
+ if (ret < 0)
+ dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret);
}
return 0;
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index 40354469860a..7631ef5e71fb 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -201,19 +201,6 @@ static inline void sdw_fill_port_params(struct sdw_port_params *params,
params->data_mode = data_mode;
}
-/* Read-Modify-Write Slave register */
-static inline int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
-{
- int tmp;
-
- tmp = sdw_read(slave, addr);
- if (tmp < 0)
- return tmp;
-
- tmp = (tmp & ~mask) | val;
- return sdw_write(slave, addr, tmp);
-}
-
/* broadcast read/write for tests */
int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr);
int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value);
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 192dac10f0c2..25950422b085 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -1428,20 +1428,6 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
}
}
- /*
- * This CMD_ACCEPT should be used when there are no devices
- * attached on the link when entering clock stop mode. If this is
- * not set and there is a broadcast write then the command ignored
- * will be treated as a failure
- */
- if (!slave_present)
- cdns_updatel(cdns, CDNS_MCP_CONTROL,
- CDNS_MCP_CONTROL_CMD_ACCEPT,
- CDNS_MCP_CONTROL_CMD_ACCEPT);
- else
- cdns_updatel(cdns, CDNS_MCP_CONTROL,
- CDNS_MCP_CONTROL_CMD_ACCEPT, 0);
-
/* commit changes */
ret = cdns_config_update(cdns);
if (ret < 0) {
@@ -1508,11 +1494,8 @@ int sdw_cdns_clock_restart(struct sdw_cdns *cdns, bool bus_reset)
cdns_updatel(cdns, CDNS_MCP_CONTROL,
CDNS_MCP_CONTROL_BLOCK_WAKEUP, 0);
- /*
- * clear CMD_ACCEPT so that the command ignored
- * will be treated as a failure during a broadcast write
- */
- cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT, 0);
+ cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT,
+ CDNS_MCP_CONTROL_CMD_ACCEPT);
if (!bus_reset) {
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 4d1aab5b5ec2..0e7f8b35bb21 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -180,9 +180,6 @@ enum sdw_command_response
cdns_xfer_msg_defer(struct sdw_bus *bus,
struct sdw_msg *msg, struct sdw_defer *defer);
-enum sdw_command_response
-cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num);
-
int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params);
int cdns_set_sdw_stream(struct snd_soc_dai *dai,
diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
index 82061c1d9835..5db0a2443a1d 100644
--- a/drivers/soundwire/dmi-quirks.c
+++ b/drivers/soundwire/dmi-quirks.c
@@ -80,7 +80,7 @@ u64 sdw_dmi_override_adr(struct sdw_bus *bus, u64 addr)
/* check if any address remap quirk applies */
dmi_id = dmi_first_match(adr_remap_quirk_table);
if (dmi_id) {
- struct adr_remap *map = dmi_id->driver_data;
+ struct adr_remap *map;
for (map = dmi_id->driver_data; map->adr; map++) {
if (map->adr == addr) {
diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c
index 84d129587084..f7c66083a4dd 100644
--- a/drivers/soundwire/generic_bandwidth_allocation.c
+++ b/drivers/soundwire/generic_bandwidth_allocation.c
@@ -382,12 +382,18 @@ static int sdw_compute_bus_params(struct sdw_bus *bus)
*/
}
- if (i == clk_values)
+ if (i == clk_values) {
+ dev_err(bus->dev, "%s: could not find clock value for bandwidth %d\n",
+ __func__, bus->params.bandwidth);
return -EINVAL;
+ }
ret = sdw_select_row_col(bus, curr_dr_freq);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(bus->dev, "%s: could not find frame configuration for bus dr_freq %d\n",
+ __func__, curr_dr_freq);
return -EINVAL;
+ }
bus->params.curr_dr_freq = curr_dr_freq;
return 0;
@@ -404,10 +410,8 @@ int sdw_compute_params(struct sdw_bus *bus)
/* Computes clock frequency, frame shape and frame frequency */
ret = sdw_compute_bus_params(bus);
- if (ret < 0) {
- dev_err(bus->dev, "Compute bus params failed: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Compute transport and port params */
ret = sdw_compute_port_params(bus);
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index fd95f94630b1..c11e3d8cd308 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/platform_device.h>
+#include <linux/auxiliary_bus.h>
#include <sound/pcm_params.h>
#include <linux/pm_runtime.h>
#include <sound/soc.h>
@@ -1327,11 +1327,14 @@ static int intel_init(struct sdw_intel *sdw)
}
/*
- * probe and init
+ * probe and init (aux_dev_id argument is required by function prototype but not used)
*/
-static int intel_master_probe(struct platform_device *pdev)
+static int intel_link_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *aux_dev_id)
+
{
- struct device *dev = &pdev->dev;
+ struct device *dev = &auxdev->dev;
+ struct sdw_intel_link_dev *ldev = auxiliary_dev_to_sdw_intel_link_dev(auxdev);
struct sdw_intel *sdw;
struct sdw_cdns *cdns;
struct sdw_bus *bus;
@@ -1344,14 +1347,14 @@ static int intel_master_probe(struct platform_device *pdev)
cdns = &sdw->cdns;
bus = &cdns->bus;
- sdw->instance = pdev->id;
- sdw->link_res = dev_get_platdata(dev);
+ sdw->instance = auxdev->id;
+ sdw->link_res = &ldev->link_res;
cdns->dev = dev;
cdns->registers = sdw->link_res->registers;
cdns->instance = sdw->instance;
cdns->msg_count = 0;
- bus->link_id = pdev->id;
+ bus->link_id = auxdev->id;
sdw_cdns_probe(cdns);
@@ -1384,10 +1387,10 @@ static int intel_master_probe(struct platform_device *pdev)
return 0;
}
-int intel_master_startup(struct platform_device *pdev)
+int intel_link_startup(struct auxiliary_device *auxdev)
{
struct sdw_cdns_stream_config config;
- struct device *dev = &pdev->dev;
+ struct device *dev = &auxdev->dev;
struct sdw_cdns *cdns = dev_get_drvdata(dev);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
@@ -1524,9 +1527,9 @@ err_init:
return ret;
}
-static int intel_master_remove(struct platform_device *pdev)
+static void intel_link_remove(struct auxiliary_device *auxdev)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = &auxdev->dev;
struct sdw_cdns *cdns = dev_get_drvdata(dev);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
@@ -1542,19 +1545,17 @@ static int intel_master_remove(struct platform_device *pdev)
snd_soc_unregister_component(dev);
}
sdw_bus_master_delete(bus);
-
- return 0;
}
-int intel_master_process_wakeen_event(struct platform_device *pdev)
+int intel_link_process_wakeen_event(struct auxiliary_device *auxdev)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = &auxdev->dev;
struct sdw_intel *sdw;
struct sdw_bus *bus;
void __iomem *shim;
u16 wake_sts;
- sdw = platform_get_drvdata(pdev);
+ sdw = dev_get_drvdata(dev);
bus = &sdw->cdns.bus;
if (bus->prop.hw_disabled) {
@@ -1976,17 +1977,22 @@ static const struct dev_pm_ops intel_pm = {
SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL)
};
-static struct platform_driver sdw_intel_drv = {
- .probe = intel_master_probe,
- .remove = intel_master_remove,
+static const struct auxiliary_device_id intel_link_id_table[] = {
+ { .name = "soundwire_intel.link" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, intel_link_id_table);
+
+static struct auxiliary_driver sdw_intel_drv = {
+ .probe = intel_link_probe,
+ .remove = intel_link_remove,
.driver = {
- .name = "intel-sdw",
+ /* auxiliary_driver_register() sets .name to be the modname */
.pm = &intel_pm,
- }
+ },
+ .id_table = intel_link_id_table
};
-
-module_platform_driver(sdw_intel_drv);
+module_auxiliary_driver(sdw_intel_drv);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_ALIAS("platform:intel-sdw");
-MODULE_DESCRIPTION("Intel Soundwire Master Driver");
+MODULE_DESCRIPTION("Intel Soundwire Link Driver");
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index 06bac8ba14e9..0b47b148da3f 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -7,7 +7,6 @@
/**
* struct sdw_intel_link_res - Soundwire Intel link resource structure,
* typically populated by the controller driver.
- * @pdev: platform_device
* @mmio_base: mmio base of SoundWire registers
* @registers: Link IO registers base
* @shim: Audio shim pointer
@@ -23,7 +22,6 @@
* @list: used to walk-through all masters exposed by the same controller
*/
struct sdw_intel_link_res {
- struct platform_device *pdev;
void __iomem *mmio_base; /* not strictly needed, useful for debug */
void __iomem *registers;
void __iomem *shim;
@@ -48,7 +46,15 @@ struct sdw_intel {
#endif
};
-int intel_master_startup(struct platform_device *pdev);
-int intel_master_process_wakeen_event(struct platform_device *pdev);
+int intel_link_startup(struct auxiliary_device *auxdev);
+int intel_link_process_wakeen_event(struct auxiliary_device *auxdev);
+
+struct sdw_intel_link_dev {
+ struct auxiliary_device auxdev;
+ struct sdw_intel_link_res link_res;
+};
+
+#define auxiliary_dev_to_sdw_intel_link_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct sdw_intel_link_dev, auxdev)
#endif /* __SDW_INTEL_LOCAL_H */
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 30ce95ec2d70..9e283bef53d2 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -12,7 +12,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/auxiliary_bus.h>
#include <linux/pm_runtime.h>
#include <linux/soundwire/sdw_intel.h>
#include "cadence_master.h"
@@ -24,28 +24,108 @@
#define SDW_LINK_BASE 0x30000
#define SDW_LINK_SIZE 0x10000
+static void intel_link_dev_release(struct device *dev)
+{
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+ struct sdw_intel_link_dev *ldev = auxiliary_dev_to_sdw_intel_link_dev(auxdev);
+
+ kfree(ldev);
+}
+
+/* alloc, init and add link devices */
+static struct sdw_intel_link_dev *intel_link_dev_register(struct sdw_intel_res *res,
+ struct sdw_intel_ctx *ctx,
+ struct fwnode_handle *fwnode,
+ const char *name,
+ int link_id)
+{
+ struct sdw_intel_link_dev *ldev;
+ struct sdw_intel_link_res *link;
+ struct auxiliary_device *auxdev;
+ int ret;
+
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return ERR_PTR(-ENOMEM);
+
+ auxdev = &ldev->auxdev;
+ auxdev->name = name;
+ auxdev->dev.parent = res->parent;
+ auxdev->dev.fwnode = fwnode;
+ auxdev->dev.release = intel_link_dev_release;
+
+ /* we don't use an IDA since we already have a link ID */
+ auxdev->id = link_id;
+
+ /*
+ * keep a handle on the allocated memory, to be used in all other functions.
+ * Since the same pattern is used to skip links that are not enabled, there is
+ * no need to check if ctx->ldev[i] is NULL later on.
+ */
+ ctx->ldev[link_id] = ldev;
+
+ /* Add link information used in the driver probe */
+ link = &ldev->link_res;
+ link->mmio_base = res->mmio_base;
+ link->registers = res->mmio_base + SDW_LINK_BASE
+ + (SDW_LINK_SIZE * link_id);
+ link->shim = res->mmio_base + SDW_SHIM_BASE;
+ link->alh = res->mmio_base + SDW_ALH_BASE;
+
+ link->ops = res->ops;
+ link->dev = res->dev;
+
+ link->clock_stop_quirks = res->clock_stop_quirks;
+ link->shim_lock = &ctx->shim_lock;
+ link->shim_mask = &ctx->shim_mask;
+ link->link_mask = ctx->link_mask;
+
+ /* now follow the two-step init/add sequence */
+ ret = auxiliary_device_init(auxdev);
+ if (ret < 0) {
+ dev_err(res->parent, "failed to initialize link dev %s link_id %d\n",
+ name, link_id);
+ kfree(ldev);
+ return ERR_PTR(ret);
+ }
+
+ ret = auxiliary_device_add(&ldev->auxdev);
+ if (ret < 0) {
+ dev_err(res->parent, "failed to add link dev %s link_id %d\n",
+ ldev->auxdev.name, link_id);
+ /* ldev will be freed with the put_device() and .release sequence */
+ auxiliary_device_uninit(&ldev->auxdev);
+ return ERR_PTR(ret);
+ }
+
+ return ldev;
+}
+
+static void intel_link_dev_unregister(struct sdw_intel_link_dev *ldev)
+{
+ auxiliary_device_delete(&ldev->auxdev);
+ auxiliary_device_uninit(&ldev->auxdev);
+}
+
static int sdw_intel_cleanup(struct sdw_intel_ctx *ctx)
{
- struct sdw_intel_link_res *link = ctx->links;
+ struct sdw_intel_link_dev *ldev;
u32 link_mask;
int i;
- if (!link)
- return 0;
-
link_mask = ctx->link_mask;
- for (i = 0; i < ctx->count; i++, link++) {
+ for (i = 0; i < ctx->count; i++) {
if (!(link_mask & BIT(i)))
continue;
- if (link->pdev) {
- pm_runtime_disable(&link->pdev->dev);
- platform_device_unregister(link->pdev);
- }
+ ldev = ctx->ldev[i];
- if (!link->clock_stop_quirks)
- pm_runtime_put_noidle(link->dev);
+ pm_runtime_disable(&ldev->auxdev.dev);
+ if (!ldev->link_res.clock_stop_quirks)
+ pm_runtime_put_noidle(ldev->link_res.dev);
+
+ intel_link_dev_unregister(ldev);
}
return 0;
@@ -91,9 +171,8 @@ EXPORT_SYMBOL_NS(sdw_intel_thread, SOUNDWIRE_INTEL_INIT);
static struct sdw_intel_ctx
*sdw_intel_probe_controller(struct sdw_intel_res *res)
{
- struct platform_device_info pdevinfo;
- struct platform_device *pdev;
struct sdw_intel_link_res *link;
+ struct sdw_intel_link_dev *ldev;
struct sdw_intel_ctx *ctx;
struct acpi_device *adev;
struct sdw_slave *slave;
@@ -116,67 +195,60 @@ static struct sdw_intel_ctx
count = res->count;
dev_dbg(&adev->dev, "Creating %d SDW Link devices\n", count);
- ctx = devm_kzalloc(&adev->dev, sizeof(*ctx), GFP_KERNEL);
+ /*
+ * we need to alloc/free memory manually and can't use devm:
+ * this routine may be called from a workqueue, and not from
+ * the parent .probe.
+ * If devm_ was used, the memory might never be freed on errors.
+ */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
ctx->count = count;
- ctx->links = devm_kcalloc(&adev->dev, ctx->count,
- sizeof(*ctx->links), GFP_KERNEL);
- if (!ctx->links)
+
+ /*
+ * allocate the array of pointers. The link-specific data is allocated
+ * as part of the first loop below and released with the auxiliary_device_uninit().
+ * If some links are disabled, the link pointer will remain NULL. Given that the
+ * number of links is small, this is simpler than using a list to keep track of links.
+ */
+ ctx->ldev = kcalloc(ctx->count, sizeof(*ctx->ldev), GFP_KERNEL);
+ if (!ctx->ldev) {
+ kfree(ctx);
return NULL;
+ }
- ctx->count = count;
ctx->mmio_base = res->mmio_base;
ctx->link_mask = res->link_mask;
ctx->handle = res->handle;
mutex_init(&ctx->shim_lock);
- link = ctx->links;
link_mask = ctx->link_mask;
INIT_LIST_HEAD(&ctx->link_list);
- /* Create SDW Master devices */
- for (i = 0; i < count; i++, link++) {
- if (!(link_mask & BIT(i))) {
- dev_dbg(&adev->dev,
- "Link %d masked, will not be enabled\n", i);
+ for (i = 0; i < count; i++) {
+ if (!(link_mask & BIT(i)))
continue;
- }
- link->mmio_base = res->mmio_base;
- link->registers = res->mmio_base + SDW_LINK_BASE
- + (SDW_LINK_SIZE * i);
- link->shim = res->mmio_base + SDW_SHIM_BASE;
- link->alh = res->mmio_base + SDW_ALH_BASE;
-
- link->ops = res->ops;
- link->dev = res->dev;
-
- link->clock_stop_quirks = res->clock_stop_quirks;
- link->shim_lock = &ctx->shim_lock;
- link->shim_mask = &ctx->shim_mask;
- link->link_mask = link_mask;
-
- memset(&pdevinfo, 0, sizeof(pdevinfo));
-
- pdevinfo.parent = res->parent;
- pdevinfo.name = "intel-sdw";
- pdevinfo.id = i;
- pdevinfo.fwnode = acpi_fwnode_handle(adev);
- pdevinfo.data = link;
- pdevinfo.size_data = sizeof(*link);
-
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev)) {
- dev_err(&adev->dev,
- "platform device creation failed: %ld\n",
- PTR_ERR(pdev));
+ /*
+ * init and add a device for each link
+ *
+ * The name of the device will be soundwire_intel.link.[i],
+ * with the "soundwire_intel" module prefix automatically added
+ * by the auxiliary bus core.
+ */
+ ldev = intel_link_dev_register(res,
+ ctx,
+ acpi_fwnode_handle(adev),
+ "link",
+ i);
+ if (IS_ERR(ldev))
goto err;
- }
- link->pdev = pdev;
- link->cdns = platform_get_drvdata(pdev);
+
+ link = &ldev->link_res;
+ link->cdns = dev_get_drvdata(&ldev->auxdev.dev);
if (!link->cdns) {
dev_err(&adev->dev, "failed to get link->cdns\n");
@@ -194,8 +266,7 @@ static struct sdw_intel_ctx
num_slaves++;
}
- ctx->ids = devm_kcalloc(&adev->dev, num_slaves,
- sizeof(*ctx->ids), GFP_KERNEL);
+ ctx->ids = kcalloc(num_slaves, sizeof(*ctx->ids), GFP_KERNEL);
if (!ctx->ids)
goto err;
@@ -213,8 +284,14 @@ static struct sdw_intel_ctx
return ctx;
err:
- ctx->count = i;
- sdw_intel_cleanup(ctx);
+ while (i--) {
+ if (!(link_mask & BIT(i)))
+ continue;
+ ldev = ctx->ldev[i];
+ intel_link_dev_unregister(ldev);
+ }
+ kfree(ctx->ldev);
+ kfree(ctx);
return NULL;
}
@@ -222,7 +299,7 @@ static int
sdw_intel_startup_controller(struct sdw_intel_ctx *ctx)
{
struct acpi_device *adev;
- struct sdw_intel_link_res *link;
+ struct sdw_intel_link_dev *ldev;
u32 caps;
u32 link_mask;
int i;
@@ -241,27 +318,28 @@ sdw_intel_startup_controller(struct sdw_intel_ctx *ctx)
return -EINVAL;
}
- if (!ctx->links)
+ if (!ctx->ldev)
return -EINVAL;
- link = ctx->links;
link_mask = ctx->link_mask;
/* Startup SDW Master devices */
- for (i = 0; i < ctx->count; i++, link++) {
+ for (i = 0; i < ctx->count; i++) {
if (!(link_mask & BIT(i)))
continue;
- intel_master_startup(link->pdev);
+ ldev = ctx->ldev[i];
+
+ intel_link_startup(&ldev->auxdev);
- if (!link->clock_stop_quirks) {
+ if (!ldev->link_res.clock_stop_quirks) {
/*
* we need to prevent the parent PCI device
* from entering pm_runtime suspend, so that
* power rails to the SoundWire IP are not
* turned off.
*/
- pm_runtime_get_noresume(link->dev);
+ pm_runtime_get_noresume(ldev->link_res.dev);
}
}
@@ -272,8 +350,8 @@ sdw_intel_startup_controller(struct sdw_intel_ctx *ctx)
* sdw_intel_probe() - SoundWire Intel probe routine
* @res: resource data
*
- * This registers a platform device for each Master handled by the controller,
- * and SoundWire Master and Slave devices will be created by the platform
+ * This registers an auxiliary device for each Master handled by the controller,
+ * and SoundWire Master and Slave devices will be created by the auxiliary
* device probe. All the information necessary is stored in the context, and
* the res argument pointer can be freed after this step.
* This function will be called after sdw_intel_acpi_scan() by SOF probe.
@@ -306,27 +384,31 @@ EXPORT_SYMBOL_NS(sdw_intel_startup, SOUNDWIRE_INTEL_INIT);
void sdw_intel_exit(struct sdw_intel_ctx *ctx)
{
sdw_intel_cleanup(ctx);
+ kfree(ctx->ids);
+ kfree(ctx->ldev);
+ kfree(ctx);
}
EXPORT_SYMBOL_NS(sdw_intel_exit, SOUNDWIRE_INTEL_INIT);
void sdw_intel_process_wakeen_event(struct sdw_intel_ctx *ctx)
{
- struct sdw_intel_link_res *link;
+ struct sdw_intel_link_dev *ldev;
u32 link_mask;
int i;
- if (!ctx->links)
+ if (!ctx->ldev)
return;
- link = ctx->links;
link_mask = ctx->link_mask;
/* Startup SDW Master devices */
- for (i = 0; i < ctx->count; i++, link++) {
+ for (i = 0; i < ctx->count; i++) {
if (!(link_mask & BIT(i)))
continue;
- intel_master_process_wakeen_event(link->pdev);
+ ldev = ctx->ldev[i];
+
+ intel_link_process_wakeen_event(&ldev->auxdev);
}
}
EXPORT_SYMBOL_NS(sdw_intel_process_wakeen_event, SOUNDWIRE_INTEL_INIT);
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 0eed38a79c6d..669d7573320b 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -39,12 +39,12 @@ int sdw_slave_add(struct sdw_bus *bus,
if (id->unique_id == SDW_IGNORED_UNIQUE_ID) {
/* name shall be sdw:link:mfg:part:class */
- dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x",
+ dev_set_name(&slave->dev, "sdw:%01x:%04x:%04x:%02x",
bus->link_id, id->mfg_id, id->part_id,
id->class_id);
} else {
/* name shall be sdw:link:mfg:part:class:unique */
- dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
+ dev_set_name(&slave->dev, "sdw:%01x:%04x:%04x:%02x:%01x",
bus->link_id, id->mfg_id, id->part_id,
id->class_id, id->unique_id);
}
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 1eaedaaba094..1a18308f4ef4 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -422,7 +422,6 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
struct completion *port_ready;
struct sdw_dpn_prop *dpn_prop;
struct sdw_prepare_ch prep_ch;
- unsigned int time_left;
bool intr = false;
int ret = 0, val;
u32 addr;
@@ -479,15 +478,15 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
/* Wait for completion on port ready */
port_ready = &s_rt->slave->port_ready[prep_ch.num];
- time_left = wait_for_completion_timeout(port_ready,
- msecs_to_jiffies(dpn_prop->ch_prep_timeout));
+ wait_for_completion_timeout(port_ready,
+ msecs_to_jiffies(dpn_prop->ch_prep_timeout));
val = sdw_read(s_rt->slave, SDW_DPN_PREPARESTATUS(p_rt->num));
- val &= p_rt->ch_mask;
- if (!time_left || val) {
+ if ((val < 0) || (val & p_rt->ch_mask)) {
+ ret = (val < 0) ? val : -ETIMEDOUT;
dev_err(&s_rt->slave->dev,
- "Chn prep failed for port:%d\n", prep_ch.num);
- return -ETIMEDOUT;
+ "Chn prep failed for port %d: %d\n", prep_ch.num, ret);
+ return ret;
}
}
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 2ef74885ffa2..788dcdf25f00 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -352,8 +352,6 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
}
mr = spi_readl(as, MR);
- if (spi->cs_gpiod)
- gpiod_set_value(spi->cs_gpiod, 1);
} else {
u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
int i;
@@ -369,8 +367,6 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
mr = spi_readl(as, MR);
mr = SPI_BFINS(PCS, ~(1 << chip_select), mr);
- if (spi->cs_gpiod)
- gpiod_set_value(spi->cs_gpiod, 1);
spi_writel(as, MR, mr);
}
@@ -400,8 +396,6 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
if (!spi->cs_gpiod)
spi_writel(as, CR, SPI_BIT(LASTXFER));
- else
- gpiod_set_value(spi->cs_gpiod, 0);
}
static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
@@ -1483,7 +1477,8 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->num_chipselect = 4;
master->setup = atmel_spi_setup;
- master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
+ master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX |
+ SPI_MASTER_GPIO_SS);
master->transfer_one = atmel_spi_one_transfer;
master->set_cs = atmel_spi_set_cs;
master->cleanup = atmel_spi_cleanup;
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 5f8771fe1a31..775c0bf2f923 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -83,6 +83,7 @@ MODULE_PARM_DESC(polling_limit_us,
* struct bcm2835_spi - BCM2835 SPI controller
* @regs: base address of register map
* @clk: core clock, divided to calculate serial clock
+ * @clk_hz: core clock cached speed
* @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
* @tfr: SPI transfer currently processed
* @ctlr: SPI controller reverse lookup
@@ -116,6 +117,7 @@ MODULE_PARM_DESC(polling_limit_us,
struct bcm2835_spi {
void __iomem *regs;
struct clk *clk;
+ unsigned long clk_hz;
int irq;
struct spi_transfer *tfr;
struct spi_controller *ctlr;
@@ -1045,19 +1047,18 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct bcm2835_spidev *slv = spi_get_ctldata(spi);
- unsigned long spi_hz, clk_hz, cdiv;
+ unsigned long spi_hz, cdiv;
unsigned long hz_per_byte, byte_limit;
u32 cs = slv->prepare_cs;
/* set clock */
spi_hz = tfr->speed_hz;
- clk_hz = clk_get_rate(bs->clk);
- if (spi_hz >= clk_hz / 2) {
+ if (spi_hz >= bs->clk_hz / 2) {
cdiv = 2; /* clk_hz/2 is the fastest we can go */
} else if (spi_hz) {
/* CDIV must be a multiple of two */
- cdiv = DIV_ROUND_UP(clk_hz, spi_hz);
+ cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz);
cdiv += (cdiv % 2);
if (cdiv >= 65536)
@@ -1065,7 +1066,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
} else {
cdiv = 0; /* 0 is the slowest we can go */
}
- tfr->effective_speed_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
+ tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
/* handle all the 3-wire mode */
@@ -1354,6 +1355,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
return bs->irq ? bs->irq : -ENODEV;
clk_prepare_enable(bs->clk);
+ bs->clk_hz = clk_get_rate(bs->clk);
err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
if (err)
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 7a00346ff9b9..101cc71bffa7 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -309,6 +309,9 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
{
unsigned int dummy_clk;
+ if (!op->dummy.nbytes)
+ return 0;
+
dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
if (dtr)
dummy_clk /= 2;
@@ -322,7 +325,15 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
- f_pdata->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr;
+
+ /*
+ * For an op to be DTR, cmd phase along with every other non-empty
+ * phase should have dtr field set to 1. If an op phase has zero
+ * nbytes, ignore its dtr field; otherwise, check its dtr field.
+ */
+ f_pdata->dtr = op->cmd.dtr &&
+ (!op->addr.nbytes || op->addr.dtr) &&
+ (!op->data.nbytes || op->data.dtr);
switch (op->data.buswidth) {
case 0:
@@ -797,19 +808,20 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
reg = cqspi_calc_rdreg(f_pdata);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
- if (f_pdata->dtr) {
- /*
- * Some flashes like the cypress Semper flash expect a 4-byte
- * dummy address with the Read SR command in DTR mode, but this
- * controller does not support sending address with the Read SR
- * command. So, disable write completion polling on the
- * controller's side. spi-nor will take care of polling the
- * status register.
- */
- reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
- reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
- writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
- }
+ /*
+ * SPI NAND flashes require the address of the status register to be
+ * passed in the Read SR command. Also, some SPI NOR flashes like the
+ * cypress Semper flash expect a 4-byte dummy address in the Read SR
+ * command in DTR mode.
+ *
+ * But this controller does not support address phase in the Read SR
+ * command when doing auto-HW polling. So, disable write completion
+ * polling on the controller's side. spinand and spi-nor will take
+ * care of polling the status register.
+ */
+ reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
+ writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
@@ -1224,8 +1236,15 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
{
bool all_true, all_false;
- all_true = op->cmd.dtr && op->addr.dtr && op->dummy.dtr &&
- op->data.dtr;
+ /*
+ * op->dummy.dtr is required for converting nbytes into ncycles.
+ * Also, don't check the dtr field of the op phase having zero nbytes.
+ */
+ all_true = op->cmd.dtr &&
+ (!op->addr.nbytes || op->addr.dtr) &&
+ (!op->dummy.nbytes || op->dummy.dtr) &&
+ (!op->data.nbytes || op->data.dtr);
+
all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
!op->data.dtr;
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index a3afd1b9ac56..ceb16e70d235 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -517,6 +517,12 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto clk_dis_apb;
}
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
if (ret < 0)
master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
@@ -531,11 +537,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
/* SPI controller initializations */
cdns_spi_init_hw(xspi);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
-
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
ret = -ENXIO;
@@ -566,6 +567,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
ret = spi_register_master(master);
if (ret) {
dev_err(&pdev->dev, "spi_register_master failed\n");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 39dc02e366f4..fa68e9817929 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -505,8 +505,10 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
struct spi_message *msg)
{
struct spi_device *spi = msg->spi;
+ struct spi_transfer *xfer;
u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
- u32 testreg;
+ u32 min_speed_hz = ~0U;
+ u32 testreg, delay;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
/* set Master or Slave mode */
@@ -567,6 +569,35 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
+ /*
+ * Wait until the changes in the configuration register CONFIGREG
+ * propagate into the hardware. It takes exactly one tick of the
+ * SCLK clock, but we will wait two SCLK clock just to be sure. The
+ * effect of the delay it takes for the hardware to apply changes
+ * is noticable if the SCLK clock run very slow. In such a case, if
+ * the polarity of SCLK should be inverted, the GPIO ChipSelect might
+ * be asserted before the SCLK polarity changes, which would disrupt
+ * the SPI communication as the device on the other end would consider
+ * the change of SCLK polarity as a clock tick already.
+ *
+ * Because spi_imx->spi_bus_clk is only set in bitbang prepare_message
+ * callback, iterate over all the transfers in spi_message, find the
+ * one with lowest bus frequency, and use that bus frequency for the
+ * delay calculation. In case all transfers have speed_hz == 0, then
+ * min_speed_hz is ~0 and the resulting delay is zero.
+ */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!xfer->speed_hz)
+ continue;
+ min_speed_hz = min(xfer->speed_hz, min_speed_hz);
+ }
+
+ delay = (2 * 1000000) / min_speed_hz;
+ if (likely(delay < 10)) /* SCLK is faster than 200 kHz */
+ udelay(delay);
+ else /* SCLK is _very_ slow */
+ usleep_range(delay, delay + 10);
+
return 0;
}
@@ -574,7 +605,7 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi)
{
u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
- u32 clk, delay;
+ u32 clk;
/* Clear BL field and set the right value */
ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
@@ -596,23 +627,6 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
- /*
- * Wait until the changes in the configuration register CONFIGREG
- * propagate into the hardware. It takes exactly one tick of the
- * SCLK clock, but we will wait two SCLK clock just to be sure. The
- * effect of the delay it takes for the hardware to apply changes
- * is noticable if the SCLK clock run very slow. In such a case, if
- * the polarity of SCLK should be inverted, the GPIO ChipSelect might
- * be asserted before the SCLK polarity changes, which would disrupt
- * the SPI communication as the device on the other end would consider
- * the change of SCLK polarity as a clock tick already.
- */
- delay = (2 * 1000000) / clk;
- if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
- udelay(delay);
- else /* SCLK is _very_ slow */
- usleep_range(delay, delay + 10);
-
return 0;
}
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index b2c4621db34d..c208efeadd18 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -785,6 +785,8 @@ static int meson_spicc_remove(struct platform_device *pdev)
clk_disable_unprepare(spicc->core);
clk_disable_unprepare(spicc->pclk);
+ spi_master_put(spicc->master);
+
return 0;
}
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 976f73b9e299..7914255521c3 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -426,14 +426,15 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
mtk_spi_prepare_transfer(master, xfer);
mtk_spi_setup_packet(master);
- cnt = xfer->len / 4;
- iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
-
- remainder = xfer->len % 4;
- if (remainder > 0) {
- reg_val = 0;
- memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
- writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ if (xfer->tx_buf) {
+ cnt = xfer->len / 4;
+ iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
+ remainder = xfer->len % 4;
+ if (remainder > 0) {
+ reg_val = 0;
+ memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
+ writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ }
}
mtk_spi_enable_transfer(master);
@@ -793,12 +794,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
- ret = devm_spi_register_master(&pdev->dev, master);
- if (ret) {
- dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
- goto err_disable_runtime_pm;
- }
-
if (mdata->dev_comp->need_pad_sel) {
if (mdata->pad_num != master->num_chipselect) {
dev_err(&pdev->dev,
@@ -838,6 +833,12 @@ static int mtk_spi_probe(struct platform_device *pdev)
dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
addr_bits, ret);
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
+ goto err_disable_runtime_pm;
+ }
+
return 0;
err_disable_runtime_pm:
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index 37dfc6e82804..9708b7827ff7 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -167,10 +167,17 @@ err_put_ctlr:
return ret;
}
+static const struct spi_device_id spi_mux_id[] = {
+ { "spi-mux" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, spi_mux_id);
+
static const struct of_device_id spi_mux_of_match[] = {
{ .compatible = "spi-mux" },
{ }
};
+MODULE_DEVICE_TABLE(of, spi_mux_of_match);
static struct spi_driver spi_mux_driver = {
.probe = spi_mux_probe,
@@ -178,6 +185,7 @@ static struct spi_driver spi_mux_driver = {
.name = "spi-mux",
.of_match_table = spi_mux_of_match,
},
+ .id_table = spi_mux_id,
};
module_spi_driver(spi_mux_driver);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 8ffcffbb8157..05618a618939 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -884,15 +884,18 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
ier = readl_relaxed(spi->base + STM32H7_SPI_IER);
mask = ier;
- /* EOTIE is triggered on EOT, SUSP and TXC events. */
+ /*
+ * EOTIE enables irq from EOT, SUSP and TXC events. We need to set
+ * SUSP to acknowledge it later. TXC is automatically cleared
+ */
+
mask |= STM32H7_SPI_SR_SUSP;
/*
- * When TXTF is set, DXPIE and TXPIE are cleared. So in case of
- * Full-Duplex, need to poll RXP event to know if there are remaining
- * data, before disabling SPI.
+ * DXPIE is set in Full-Duplex, one IT will be raised if TXP and RXP
+ * are set. So in case of Full-Duplex, need to poll TXP and RXP event.
*/
- if (spi->rx_buf && !spi->cur_usedma)
- mask |= STM32H7_SPI_SR_RXP;
+ if ((spi->cur_comm == SPI_FULL_DUPLEX) && !spi->cur_usedma)
+ mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
if (!(sr & mask)) {
dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
@@ -1925,6 +1928,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
master->can_dma = stm32_spi_can_dma;
pm_runtime_set_active(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = spi_register_master(master);
@@ -1940,6 +1944,8 @@ static int stm32_spi_probe(struct platform_device *pdev)
err_pm_disable:
pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
err_dma_release:
if (spi->dma_tx)
dma_release_channel(spi->dma_tx);
@@ -1956,9 +1962,14 @@ static int stm32_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct stm32_spi *spi = spi_master_get_devdata(master);
+ pm_runtime_get_sync(&pdev->dev);
+
spi_unregister_master(master);
spi->cfg->disable(spi);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
if (master->dma_tx)
dma_release_channel(master->dma_tx);
if (master->dma_rx)
@@ -1966,7 +1977,6 @@ static int stm32_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(spi->clk);
- pm_runtime_disable(&pdev->dev);
pinctrl_pm_select_sleep_state(&pdev->dev);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c99181165321..e4dc593b1f32 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -58,6 +58,10 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
const struct spi_device *spi = to_spi_device(dev);
int len;
+ len = of_device_modalias(dev, buf, PAGE_SIZE);
+ if (len != -ENODEV)
+ return len;
+
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
if (len != -ENODEV)
return len;
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index a53bad541f1a..2874b6c26028 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -11,6 +11,15 @@ menuconfig SPMI
if SPMI
+config SPMI_HISI3670
+ tristate "Hisilicon 3670 SPMI Controller"
+ select IRQ_DOMAIN_HIERARCHY
+ depends on HAS_IOMEM
+ help
+ If you say yes to this option, support will be included for the
+ built-in SPMI PMIC Arbiter interface on Hisilicon 3670
+ processors.
+
config SPMI_MSM_PMIC_ARB
tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)"
select IRQ_DOMAIN_HIERARCHY
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index 55a94cadeffe..6e092e6f290c 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -4,4 +4,5 @@
#
obj-$(CONFIG_SPMI) += spmi.o
+obj-$(CONFIG_SPMI_HISI3670) += hisi-spmi-controller.o
obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
diff --git a/drivers/staging/hikey9xx/hisi-spmi-controller.c b/drivers/spmi/hisi-spmi-controller.c
index 0d42bc65f39b..5bd23262abd6 100644
--- a/drivers/staging/hikey9xx/hisi-spmi-controller.c
+++ b/drivers/spmi/hisi-spmi-controller.c
@@ -290,7 +290,7 @@ static int spmi_controller_probe(struct platform_device *pdev)
goto err_put_controller;
}
- ret = of_property_read_u32(pdev->dev.of_node, "spmi-channel",
+ ret = of_property_read_u32(pdev->dev.of_node, "hisilicon,spmi-channel",
&spmi_controller->channel);
if (ret) {
dev_err(&pdev->dev, "can not get channel\n");
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index b7ae5bdc4eb5..c8eaae6412bb 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -98,8 +98,6 @@ source "drivers/staging/axis-fifo/Kconfig"
source "drivers/staging/fieldbus/Kconfig"
-source "drivers/staging/kpc2000/Kconfig"
-
source "drivers/staging/qlge/Kconfig"
source "drivers/staging/wfx/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 075c979bfe7c..818b6f964369 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -39,7 +39,6 @@ obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
-obj-$(CONFIG_KPC2000) += kpc2000/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_WFX) += wfx/
obj-y += hikey9xx/
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index d66a64e42273..ddbde3f8430e 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -179,6 +179,7 @@ static inline void lru_del(struct ashmem_range *range)
* @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
* @start: The starting page (inclusive)
* @end: The ending page (inclusive)
+ * @new_range: The placeholder for the new range
*
* This function is protected by ashmem_mutex.
*/
@@ -894,6 +895,8 @@ static void ashmem_show_fdinfo(struct seq_file *m, struct file *file)
seq_printf(m, "name:\t%s\n",
asma->name + ASHMEM_NAME_PREFIX_LEN);
+ seq_printf(m, "size:\t%zu\n", asma->size);
+
mutex_unlock(&ashmem_mutex);
}
#endif
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index ed9281089738..b23eabb863d1 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -337,11 +337,11 @@ static void reset_ip_core(struct axis_fifo *fifo)
}
/**
- * axis_fifo_write() - Read a packet from AXIS-FIFO character device.
- * @f Open file.
- * @buf User space buffer to read to.
- * @len User space buffer length.
- * @off Buffer offset.
+ * axis_fifo_read() - Read a packet from AXIS-FIFO character device.
+ * @f: Open file.
+ * @buf: User space buffer to read to.
+ * @len: User space buffer length.
+ * @off: Buffer offset.
*
* As defined by the device's documentation, we need to check the device's
* occupancy before reading the length register and then the data. All these
@@ -460,10 +460,10 @@ end_unlock:
/**
* axis_fifo_write() - Write buffer to AXIS-FIFO character device.
- * @f Open file.
- * @buf User space buffer to write to the device.
- * @len User space buffer length.
- * @off Buffer offset.
+ * @f: Open file.
+ * @buf: User space buffer to write to the device.
+ * @len: User space buffer length.
+ * @off: Buffer offset.
*
* As defined by the device's documentation, we need to write to the device's
* data buffer then to the device's packet length register atomically. Also,
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index ecc5c9da9027..b6abd3770e81 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -1073,9 +1073,8 @@ static int _nbu2ss_epn_in_pio(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
i_word_length = length / sizeof(u32);
if (i_word_length > 0) {
for (i = 0; i < i_word_length; i++) {
- _nbu2ss_writel(
- &preg->EP_REGS[ep->epnum - 1].EP_WRITE,
- p_buf_32->dw);
+ _nbu2ss_writel(&preg->EP_REGS[ep->epnum - 1].EP_WRITE,
+ p_buf_32->dw);
p_buf_32++;
}
@@ -1225,8 +1224,7 @@ static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep)
return;
if (ep->epnum > 0) {
- length = _nbu2ss_readl(
- &ep->udc->p_regs->EP_REGS[ep->epnum - 1].EP_LEN_DCNT);
+ length = _nbu2ss_readl(&ep->udc->p_regs->EP_REGS[ep->epnum - 1].EP_LEN_DCNT);
length &= EPN_LDATA;
if (length < ep->ep.maxpacket)
@@ -1462,8 +1460,7 @@ static void _nbu2ss_epn_set_stall(struct nbu2ss_udc *udc,
for (limit_cnt = 0
; limit_cnt < IN_DATA_EMPTY_COUNT
; limit_cnt++) {
- regdata = _nbu2ss_readl(
- &preg->EP_REGS[ep->epnum - 1].EP_STATUS);
+ regdata = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
if ((regdata & EPN_IN_DATA) == 0)
break;
diff --git a/drivers/staging/fbtft/TODO b/drivers/staging/fbtft/TODO
index a9f4802bb6be..e72a08bf221c 100644
--- a/drivers/staging/fbtft/TODO
+++ b/drivers/staging/fbtft/TODO
@@ -1,8 +1,3 @@
-* convert all uses of the old GPIO API from <linux/gpio.h> to the
- GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
- lines from device tree, ACPI or board files, board files should
- use <linux/gpio/machine.h>
-
* convert all these over to drm_simple_display_pipe and submit for inclusion
into the DRM subsystem under drivers/gpu/drm - fbdev doesn't take any new
drivers anymore.
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index eeeeec97ad27..207d578547cd 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -77,19 +77,6 @@ static int init_display(struct fbtft_par *par)
return 0;
}
-static void reset(struct fbtft_par *par)
-{
- if (!par->gpio.reset)
- return;
-
- dev_dbg(par->info->device, "%s()\n", __func__);
-
- gpiod_set_value(par->gpio.reset, 0);
- udelay(20);
- gpiod_set_value(par->gpio.reset, 1);
- mdelay(120);
-}
-
/* Check if all necessary GPIOS defined */
static int verify_gpios(struct fbtft_par *par)
{
@@ -194,12 +181,12 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
/* select chip */
if (*buf) {
/* cs1 */
- gpiod_set_value(par->CS0, 1);
- gpiod_set_value(par->CS1, 0);
- } else {
- /* cs0 */
gpiod_set_value(par->CS0, 0);
gpiod_set_value(par->CS1, 1);
+ } else {
+ /* cs0 */
+ gpiod_set_value(par->CS0, 1);
+ gpiod_set_value(par->CS1, 0);
}
gpiod_set_value(par->RS, 0); /* RS->0 (command mode) */
@@ -397,8 +384,8 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
}
kfree(convert_buf);
- gpiod_set_value(par->CS0, 1);
- gpiod_set_value(par->CS1, 1);
+ gpiod_set_value(par->CS0, 0);
+ gpiod_set_value(par->CS1, 0);
return ret;
}
@@ -419,10 +406,10 @@ static int write(struct fbtft_par *par, void *buf, size_t len)
for (i = 0; i < 8; ++i)
gpiod_set_value(par->gpio.db[i], data & (1 << i));
/* set E */
- gpiod_set_value(par->EPIN, 1);
+ gpiod_set_value(par->EPIN, 0);
udelay(5);
/* unset E - write */
- gpiod_set_value(par->EPIN, 0);
+ gpiod_set_value(par->EPIN, 1);
udelay(1);
}
@@ -439,7 +426,6 @@ static struct fbtft_display display = {
.set_addr_win = set_addr_win,
.verify_gpios = verify_gpios,
.request_gpios_match = request_gpios_match,
- .reset = reset,
.write = write,
.write_register = write_reg8_bus8,
.write_vmem = write_vmem,
diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c
index e2c7646588f8..1629c2c440a9 100644
--- a/drivers/staging/fbtft/fb_bd663474.c
+++ b/drivers/staging/fbtft/fb_bd663474.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -24,9 +23,6 @@
static int init_display(struct fbtft_par *par)
{
- if (par->gpio.cs)
- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
-
par->fbtftops.reset(par);
/* Initialization sequence from Lib_UTFT */
diff --git a/drivers/staging/fbtft/fb_hx8347d.c b/drivers/staging/fbtft/fb_hx8347d.c
index 37eaf0862c5b..a9b72a8b42b5 100644
--- a/drivers/staging/fbtft/fb_hx8347d.c
+++ b/drivers/staging/fbtft/fb_hx8347d.c
@@ -68,9 +68,6 @@ static int init_display(struct fbtft_par *par)
mdelay(40);
write_reg(par, 0x28, 0x3C);
- /* orientation */
- write_reg(par, 0x16, 0x60 | (par->bgr << 3));
-
return 0;
}
@@ -87,6 +84,31 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
write_reg(par, 0x22);
}
+#define MEM_Y BIT(7) /* MY row address order */
+#define MEM_X BIT(6) /* MX column address order */
+#define MEM_V BIT(5) /* MV row / column exchange */
+#define MEM_L BIT(4) /* ML vertical refresh order */
+#define MEM_BGR (3) /* RGB-BGR Order */
+static int set_var(struct fbtft_par *par)
+{
+ switch (par->info->var.rotate) {
+ case 0:
+ write_reg(par, 0x16, MEM_V | MEM_X | (par->bgr << MEM_BGR));
+ break;
+ case 270:
+ write_reg(par, 0x16, par->bgr << MEM_BGR);
+ break;
+ case 180:
+ write_reg(par, 0x16, MEM_V | MEM_Y | (par->bgr << MEM_BGR));
+ break;
+ case 90:
+ write_reg(par, 0x16, MEM_X | MEM_Y | (par->bgr << MEM_BGR));
+ break;
+ }
+
+ return 0;
+}
+
/*
* Gamma string format:
* VRP0 VRP1 VRP2 VRP3 VRP4 VRP5 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 CGM
@@ -144,6 +166,7 @@ static struct fbtft_display display = {
.fbtftops = {
.init_display = init_display,
.set_addr_win = set_addr_win,
+ .set_var = set_var,
.set_gamma = set_gamma,
},
};
diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
index 05648c3ffe47..6582a2c90aaf 100644
--- a/drivers/staging/fbtft/fb_ili9163.c
+++ b/drivers/staging/fbtft/fb_ili9163.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <video/mipi_display.h>
@@ -77,9 +76,6 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs)
- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
-
write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */
mdelay(500);
write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); /* exit sleep */
diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
index f2e72d14431d..0be7c2d51548 100644
--- a/drivers/staging/fbtft/fb_ili9320.c
+++ b/drivers/staging/fbtft/fb_ili9320.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -22,11 +21,10 @@
static unsigned int read_devicecode(struct fbtft_par *par)
{
- int ret;
u8 rxbuf[8] = {0, };
write_reg(par, 0x0000);
- ret = par->fbtftops.read(par, rxbuf, 4);
+ par->fbtftops.read(par, rxbuf, 4);
return (rxbuf[2] << 8) | rxbuf[3];
}
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index c9aa4cb43123..16d3b17ca279 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -85,9 +84,6 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs)
- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
-
bt &= 0x07;
vc &= 0x07;
vrh &= 0x0f;
diff --git a/drivers/staging/fbtft/fb_ili9340.c b/drivers/staging/fbtft/fb_ili9340.c
index 415183c7054a..704236bcaf3f 100644
--- a/drivers/staging/fbtft/fb_ili9340.c
+++ b/drivers/staging/fbtft/fb_ili9340.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <video/mipi_display.h>
diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
index 8c7de3290343..62f27172f844 100644
--- a/drivers/staging/fbtft/fb_s6d1121.c
+++ b/drivers/staging/fbtft/fb_s6d1121.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -29,9 +28,6 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs)
- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
-
/* Initialization sequence from Lib_UTFT */
write_reg(par, 0x0011, 0x2004);
diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
index 6f7249493ea3..7b9ab39e1c1a 100644
--- a/drivers/staging/fbtft/fb_sh1106.c
+++ b/drivers/staging/fbtft/fb_sh1106.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
index 7a3fe022cc69..f27bab38b3ec 100644
--- a/drivers/staging/fbtft/fb_ssd1289.c
+++ b/drivers/staging/fbtft/fb_ssd1289.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio/consumer.h>
#include "fbtft.h"
@@ -28,9 +27,6 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs)
- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
-
write_reg(par, 0x00, 0x0001);
write_reg(par, 0x03, 0xA8A4);
write_reg(par, 0x0C, 0x0000);
diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c
index 8a3140d41d8b..796a2ac3e194 100644
--- a/drivers/staging/fbtft/fb_ssd1325.c
+++ b/drivers/staging/fbtft/fb_ssd1325.c
@@ -35,8 +35,6 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- gpiod_set_value(par->gpio.cs, 0);
-
write_reg(par, 0xb3);
write_reg(par, 0xf0);
write_reg(par, 0xae);
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index 37622c9462aa..ec5eced7f8cb 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -81,8 +81,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
va_start(args, len);
*buf = (u8)va_arg(args, unsigned int);
- if (par->gpio.dc)
- gpiod_set_value(par->gpio.dc, 0);
+ gpiod_set_value(par->gpio.dc, 0);
ret = par->fbtftops.write(par, par->buf, sizeof(u8));
if (ret < 0) {
va_end(args);
@@ -104,8 +103,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
return;
}
}
- if (par->gpio.dc)
- gpiod_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
va_end(args);
}
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index 900b28d826b2..cf263a58a148 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -2,7 +2,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c
index c77832ae5e5b..c680160d6380 100644
--- a/drivers/staging/fbtft/fb_upd161704.c
+++ b/drivers/staging/fbtft/fb_upd161704.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -26,9 +25,6 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs)
- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
-
/* Initialization sequence from Lib_UTFT */
/* register reset */
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index 76b25df376b8..a57e1f4feef3 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index 63c65dd67b17..3d422bc11641 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -135,8 +135,7 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
remain = len / 2;
vmem16 = (u16 *)(par->info->screen_buffer + offset);
- if (par->gpio.dc)
- gpiod_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
/* non buffered write */
if (!par->txbuf.buf)
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 4f362dad4436..3723269890d5 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -38,8 +38,7 @@ int fbtft_write_buf_dc(struct fbtft_par *par, void *buf, size_t len, int dc)
{
int ret;
- if (par->gpio.dc)
- gpiod_set_value(par->gpio.dc, dc);
+ gpiod_set_value(par->gpio.dc, dc);
ret = par->fbtftops.write(par, buf, len);
if (ret < 0)
@@ -76,20 +75,16 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
struct gpio_desc **gpiop)
{
struct device *dev = par->info->device;
- int ret = 0;
*gpiop = devm_gpiod_get_index_optional(dev, name, index,
- GPIOD_OUT_HIGH);
- if (IS_ERR(*gpiop)) {
- ret = PTR_ERR(*gpiop);
- dev_err(dev,
- "Failed to request %s GPIO: %d\n", name, ret);
- return ret;
- }
+ GPIOD_OUT_LOW);
+ if (IS_ERR(*gpiop))
+ return dev_err_probe(dev, PTR_ERR(*gpiop), "Failed to request %s GPIO\n", name);
+
fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
__func__, name);
- return ret;
+ return 0;
}
static int fbtft_request_gpios(struct fbtft_par *par)
@@ -226,11 +221,15 @@ static void fbtft_reset(struct fbtft_par *par)
{
if (!par->gpio.reset)
return;
+
fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
+
gpiod_set_value_cansleep(par->gpio.reset, 1);
usleep_range(20, 40);
gpiod_set_value_cansleep(par->gpio.reset, 0);
msleep(120);
+
+ gpiod_set_value_cansleep(par->gpio.cs, 1); /* Activate chip */
}
static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
@@ -922,8 +921,6 @@ static int fbtft_init_display_from_property(struct fbtft_par *par)
goto out_free;
par->fbtftops.reset(par);
- if (par->gpio.cs)
- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
index = -1;
val = values[++index];
@@ -1018,8 +1015,6 @@ int fbtft_init_display(struct fbtft_par *par)
}
par->fbtftops.reset(par);
- if (par->gpio.cs)
- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
i = 0;
while (i < FBTFT_MAX_INIT_SEQUENCE) {
diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
index 0863d257d762..de1904a443c2 100644
--- a/drivers/staging/fbtft/fbtft-io.c
+++ b/drivers/staging/fbtft/fbtft-io.c
@@ -142,12 +142,12 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
data = *(u8 *)buf;
/* Start writing by pulling down /WR */
- gpiod_set_value(par->gpio.wr, 0);
+ gpiod_set_value(par->gpio.wr, 1);
/* Set data */
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
if (data == prev_data) {
- gpiod_set_value(par->gpio.wr, 0); /* used as delay */
+ gpiod_set_value(par->gpio.wr, 1); /* used as delay */
} else {
for (i = 0; i < 8; i++) {
if ((data & 1) != (prev_data & 1))
@@ -165,7 +165,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
#endif
/* Pullup /WR */
- gpiod_set_value(par->gpio.wr, 1);
+ gpiod_set_value(par->gpio.wr, 0);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
prev_data = *(u8 *)buf;
@@ -192,12 +192,12 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
data = *(u16 *)buf;
/* Start writing by pulling down /WR */
- gpiod_set_value(par->gpio.wr, 0);
+ gpiod_set_value(par->gpio.wr, 1);
/* Set data */
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
if (data == prev_data) {
- gpiod_set_value(par->gpio.wr, 0); /* used as delay */
+ gpiod_set_value(par->gpio.wr, 1); /* used as delay */
} else {
for (i = 0; i < 16; i++) {
if ((data & 1) != (prev_data & 1))
@@ -215,7 +215,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
#endif
/* Pullup /WR */
- gpiod_set_value(par->gpio.wr, 1);
+ gpiod_set_value(par->gpio.wr, 0);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
prev_data = *(u16 *)buf;
diff --git a/drivers/staging/fieldbus/anybuss/anybuss-client.h b/drivers/staging/fieldbus/anybuss/anybuss-client.h
index 8ee1f1baccf1..a219688006fe 100644
--- a/drivers/staging/fieldbus/anybuss/anybuss-client.h
+++ b/drivers/staging/fieldbus/anybuss/anybuss-client.h
@@ -32,7 +32,7 @@ struct anybuss_client {
struct anybuss_client_driver {
struct device_driver driver;
int (*probe)(struct anybuss_client *adev);
- int (*remove)(struct anybuss_client *adev);
+ void (*remove)(struct anybuss_client *adev);
u16 anybus_id;
};
diff --git a/drivers/staging/fieldbus/anybuss/hms-profinet.c b/drivers/staging/fieldbus/anybuss/hms-profinet.c
index eca7d97b8e85..e691736a53f1 100644
--- a/drivers/staging/fieldbus/anybuss/hms-profinet.c
+++ b/drivers/staging/fieldbus/anybuss/hms-profinet.c
@@ -190,12 +190,11 @@ static int profinet_probe(struct anybuss_client *client)
return 0;
}
-static int profinet_remove(struct anybuss_client *client)
+static void profinet_remove(struct anybuss_client *client)
{
struct profi_priv *priv = anybuss_get_drvdata(client);
fieldbus_dev_unregister(&priv->fbdev);
- return 0;
}
static struct anybuss_client_driver profinet_driver = {
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
index c97df91124a4..0f730efe9a6d 100644
--- a/drivers/staging/fieldbus/anybuss/host.c
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -1183,8 +1183,6 @@ static int anybus_bus_probe(struct device *dev)
struct anybuss_client *adev =
to_anybuss_client(dev);
- if (!adrv->probe)
- return -ENODEV;
return adrv->probe(adev);
}
@@ -1194,7 +1192,8 @@ static int anybus_bus_remove(struct device *dev)
to_anybuss_client_driver(dev->driver);
if (adrv->remove)
- return adrv->remove(to_anybuss_client(dev));
+ adrv->remove(to_anybuss_client(dev));
+
return 0;
}
@@ -1207,6 +1206,9 @@ static struct bus_type anybus_bus = {
int anybuss_client_driver_register(struct anybuss_client_driver *drv)
{
+ if (!drv->probe)
+ return -ENODEV;
+
drv->driver.bus = &anybus_bus;
return driver_register(&drv->driver);
}
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 1ee6382cafc4..38a280e876c2 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -45,14 +45,14 @@ module_param_named(loop, create_loop_dev, bool, 0644);
*/
#define WAKEUP_CHARS 256
-/**
+/*
* fwserial_list: list of every fw_serial created for each fw_card
* See discussion in fwserial_probe.
*/
static LIST_HEAD(fwserial_list);
static DEFINE_MUTEX(fwserial_list_mutex);
-/**
+/*
* port_table: array of tty ports allocated to each fw_card
*
* tty ports are allocated during probe when an fw_serial is first
@@ -284,7 +284,7 @@ static void fwtty_restart_tx(struct fwtty_port *port)
spin_unlock_bh(&port->lock);
}
-/**
+/*
* fwtty_update_port_status - decodes & dispatches line status changes
*
* Note: in loopback, the port->lock is being held. Only use functions that
@@ -375,7 +375,7 @@ static void fwtty_update_port_status(struct fwtty_port *port,
wake_up_interruptible(&port->port.delta_msr_wait);
}
-/**
+/*
* __fwtty_port_line_status - generate 'line status' for indicated port
*
* This function returns a remote 'MSR' state based on the local 'MCR' state,
@@ -403,7 +403,7 @@ static unsigned int __fwtty_port_line_status(struct fwtty_port *port)
return status;
}
-/**
+/*
* __fwtty_write_port_status - send the port line status to peer
*
* Note: caller must be holding the port lock.
@@ -426,7 +426,7 @@ static int __fwtty_write_port_status(struct fwtty_port *port)
return err;
}
-/**
+/*
* fwtty_write_port_status - same as above but locked by port lock
*/
static int fwtty_write_port_status(struct fwtty_port *port)
@@ -462,7 +462,7 @@ static void fwtty_throttle_port(struct fwtty_port *port)
tty_kref_put(tty);
}
-/**
+/*
* fwtty_do_hangup - wait for ldisc to deliver all pending rx; only then hangup
*
* When the remote has finished tx, and all in-flight rx has been received and
@@ -589,9 +589,8 @@ out:
return err;
}
-/**
+/*
* fwtty_port_handler - bus address handler for port reads/writes
- * @parameters: fw_address_callback_t as specified by firewire core interface
*
* This handler is responsible for handling inbound read/write dma from remotes.
*/
@@ -656,7 +655,7 @@ respond:
fw_send_response(card, request, rcode);
}
-/**
+/*
* fwtty_tx_complete - callback for tx dma
* @data: ignored, has no meaning for write txns
* @length: ignored, has no meaning for write txns
@@ -722,7 +721,7 @@ static int fwtty_tx(struct fwtty_port *port, bool drain)
/* try to write as many dma transactions out as possible */
n = -EAGAIN;
- while (!tty->stopped && !tty->hw_stopped &&
+ while (!tty->flow.stopped && !tty->hw_stopped &&
!test_bit(STOP_TX, &port->flags)) {
txn = kmem_cache_alloc(fwtty_txn_cache, GFP_ATOMIC);
if (!txn) {
@@ -904,7 +903,7 @@ static void fwtty_port_dtr_rts(struct tty_port *tty_port, int on)
spin_unlock_bh(&port->lock);
}
-/**
+/*
* fwtty_port_carrier_raised: required tty_port operation
*
* This port operation is polled after a tty has been opened and is waiting for
@@ -1011,7 +1010,7 @@ static int fwtty_port_activate(struct tty_port *tty_port,
return 0;
}
-/**
+/*
* fwtty_port_shutdown
*
* Note: the tty port core ensures this is not the console and
@@ -1113,30 +1112,30 @@ static int fwtty_write(struct tty_struct *tty, const unsigned char *buf, int c)
return (n < 0) ? 0 : n;
}
-static int fwtty_write_room(struct tty_struct *tty)
+static unsigned int fwtty_write_room(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
- int n;
+ unsigned int n;
spin_lock_bh(&port->lock);
n = dma_fifo_avail(&port->tx_fifo);
spin_unlock_bh(&port->lock);
- fwtty_dbg(port, "%d\n", n);
+ fwtty_dbg(port, "%u\n", n);
return n;
}
-static int fwtty_chars_in_buffer(struct tty_struct *tty)
+static unsigned int fwtty_chars_in_buffer(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
- int n;
+ unsigned int n;
spin_lock_bh(&port->lock);
n = dma_fifo_level(&port->tx_fifo);
spin_unlock_bh(&port->lock);
- fwtty_dbg(port, "%d\n", n);
+ fwtty_dbg(port, "%u\n", n);
return n;
}
@@ -1297,7 +1296,7 @@ static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old)
}
}
-/**
+/*
* fwtty_break_ctl - start/stop sending breaks
*
* Signals the remote to start or stop generating simulated breaks.
@@ -1669,7 +1668,7 @@ static inline int fwserial_send_mgmt_sync(struct fwtty_peer *peer,
return rcode;
}
-/**
+/*
* fwserial_claim_port - attempt to claim port @ index for peer
*
* Returns ptr to claimed port or error code (as ERR_PTR())
@@ -1697,7 +1696,7 @@ static struct fwtty_port *fwserial_claim_port(struct fwtty_peer *peer,
return port;
}
-/**
+/*
* fwserial_find_port - find avail port and claim for peer
*
* Returns ptr to claimed port or NULL if none avail
@@ -1764,7 +1763,7 @@ static void fwserial_plug_timeout(struct timer_list *t)
fwserial_release_port(port, false);
}
-/**
+/*
* fwserial_connect_peer - initiate virtual cable with peer
*
* Returns 0 if VIRT_CABLE_PLUG request was successfully sent,
@@ -1829,7 +1828,7 @@ free_pkt:
return err;
}
-/**
+/*
* fwserial_close_port -
* HUP the tty (if the tty exists) and unregister the tty device.
* Only used by the unit driver upon unit removal to disconnect and
@@ -1893,7 +1892,7 @@ static struct fw_serial *__fwserial_lookup_rcu(struct fw_card *card)
return NULL;
}
-/**
+/*
* __fwserial_peer_by_node_id - finds a peer matching the given generation + id
*
* If a matching peer could not be found for the specified generation/node id,
@@ -2076,7 +2075,7 @@ static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit)
return 0;
}
-/**
+/*
* fwserial_remove_peer - remove a 'serial' unit device as a 'peer'
*
* Remove a 'peer' from its list of peers. This function is only
@@ -2279,7 +2278,7 @@ free_ports:
return err;
}
-/**
+/*
* fwserial_probe: bus probe function for firewire 'serial' unit devices
*
* A 'serial' unit device is created and probed as a result of:
@@ -2331,7 +2330,7 @@ static int fwserial_probe(struct fw_unit *unit,
return err;
}
-/**
+/*
* fwserial_remove: bus removal function for firewire 'serial' unit devices
*
* The corresponding 'peer' for this unit device is removed from the list of
@@ -2363,7 +2362,7 @@ static void fwserial_remove(struct fw_unit *unit)
mutex_unlock(&fwserial_list_mutex);
}
-/**
+/*
* fwserial_update: bus update function for 'firewire' serial unit devices
*
* Updates the new node_id and bus generation for this peer. Note that locking
@@ -2699,9 +2698,8 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
return rcode;
}
-/**
+/*
* fwserial_mgmt_handler: bus address handler for mgmt requests
- * @parameters: fw_address_callback_t as specified by firewire core interface
*
* This handler is responsible for handling virtual cable requests from remotes
* for all cards.
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 571f47d39484..e390c924ec1c 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -611,10 +611,12 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf,
* bytes (99,130,83,99 dec)
*/
} __packed;
- void *addr = buf + sizeof(struct iphdr) +
- sizeof(struct udphdr) +
- offsetof(struct dhcp_packet, chaddr);
- ether_addr_copy(nic->dest_mac_addr, addr);
+ int offset = sizeof(struct iphdr) +
+ sizeof(struct udphdr) +
+ offsetof(struct dhcp_packet, chaddr);
+ if (offset + ETH_ALEN > len)
+ return;
+ ether_addr_copy(nic->dest_mac_addr, buf + offset);
}
}
@@ -677,6 +679,7 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
struct sdu *sdu = NULL;
u8 endian = phy_dev->get_endian(phy_dev->priv_dev);
u8 *data = (u8 *)multi_sdu->data;
+ int copied;
u16 i = 0;
u16 num_packet;
u16 hci_len;
@@ -684,10 +687,15 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
u32 nic_type;
int index;
- hci_len = gdm_dev16_to_cpu(endian, multi_sdu->len);
num_packet = gdm_dev16_to_cpu(endian, multi_sdu->num_packet);
for (i = 0; i < num_packet; i++) {
+ copied = data - multi_sdu->data;
+ if (len < copied + sizeof(*sdu)) {
+ pr_err("rx prevent buffer overflow");
+ return;
+ }
+
sdu = (struct sdu *)data;
cmd_evt = gdm_dev16_to_cpu(endian, sdu->cmd_evt);
@@ -698,7 +706,8 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
pr_err("rx sdu wrong hci %04x\n", cmd_evt);
return;
}
- if (hci_len < 12) {
+ if (hci_len < 12 ||
+ len < copied + sizeof(*sdu) + (hci_len - 12)) {
pr_err("rx sdu invalid len %d\n", hci_len);
return;
}
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
index 0ccc8c24e754..279de2cd9c4a 100644
--- a/drivers/staging/gdm724x/gdm_tty.c
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -183,7 +183,7 @@ static int gdm_tty_write(struct tty_struct *tty, const unsigned char *buf,
return len;
}
-static int gdm_tty_write_room(struct tty_struct *tty)
+static unsigned int gdm_tty_write_room(struct tty_struct *tty)
{
struct gdm *gdm = tty->driver_data;
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index 1fc7727ab7be..1e613d42d823 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -676,7 +676,7 @@ static int gbaudio_tplg_create_kcontrol(struct gbaudio_module_info *gb,
struct gbaudio_ctl_pvt *ctldata;
switch (ctl->iface) {
- case SNDRV_CTL_ELEM_IFACE_MIXER:
+ case (__force int)SNDRV_CTL_ELEM_IFACE_MIXER:
switch (ctl->info.type) {
case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
ret = gbaudio_tplg_create_enum_kctl(gb, kctl, ctl);
@@ -903,7 +903,7 @@ static int gbaudio_tplg_create_wcontrol(struct gbaudio_module_info *gb,
int ret;
switch (ctl->iface) {
- case SNDRV_CTL_ELEM_IFACE_MIXER:
+ case (__force int)SNDRV_CTL_ELEM_IFACE_MIXER:
switch (ctl->info.type) {
case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
ret = gbaudio_tplg_create_enum_ctl(gb, kctl, ctl);
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
index 9fc5c47be9bd..13d319860da5 100644
--- a/drivers/staging/greybus/gbphy.c
+++ b/drivers/staging/greybus/gbphy.c
@@ -27,7 +27,7 @@ struct gbphy_host {
static DEFINE_IDA(gbphy_id);
static ssize_t protocol_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
@@ -221,7 +221,7 @@ void gb_gbphy_deregister_driver(struct gbphy_driver *driver)
EXPORT_SYMBOL_GPL(gb_gbphy_deregister_driver);
static struct gbphy_device *gb_gbphy_create_dev(struct gb_bundle *bundle,
- struct greybus_descriptor_cport *cport_desc)
+ struct greybus_descriptor_cport *cport_desc)
{
struct gbphy_device *gbphy_dev;
int retval;
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
index 30655153df6a..ad0700a0bb81 100644
--- a/drivers/staging/greybus/spilib.c
+++ b/drivers/staging/greybus/spilib.c
@@ -246,6 +246,7 @@ static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
xfer = spi->first_xfer;
while (msg->state != GB_SPI_STATE_OP_DONE) {
int xfer_delay;
+
if (xfer == spi->last_xfer)
xfer_len = spi->last_xfer_size;
else
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index b1e63f7798b0..73f01ed1e5b7 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -440,7 +440,7 @@ static int gb_tty_write(struct tty_struct *tty, const unsigned char *buf,
return count;
}
-static int gb_tty_write_room(struct tty_struct *tty)
+static unsigned int gb_tty_write_room(struct tty_struct *tty)
{
struct gb_tty *gb_tty = tty->driver_data;
unsigned long flags;
@@ -457,11 +457,11 @@ static int gb_tty_write_room(struct tty_struct *tty)
return room;
}
-static int gb_tty_chars_in_buffer(struct tty_struct *tty)
+static unsigned int gb_tty_chars_in_buffer(struct tty_struct *tty)
{
struct gb_tty *gb_tty = tty->driver_data;
unsigned long flags;
- int chars;
+ unsigned int chars;
spin_lock_irqsave(&gb_tty->write_lock, flags);
chars = kfifo_len(&gb_tty->write_fifo);
@@ -494,21 +494,7 @@ static void gb_tty_set_termios(struct tty_struct *tty,
(termios->c_cflag & PARODD ? 1 : 2) +
(termios->c_cflag & CMSPAR ? 2 : 0) : 0;
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- newline.data_bits = 5;
- break;
- case CS6:
- newline.data_bits = 6;
- break;
- case CS7:
- newline.data_bits = 7;
- break;
- case CS8:
- default:
- newline.data_bits = 8;
- break;
- }
+ newline.data_bits = tty_get_char_size(termios->c_cflag);
/* FIXME: needs to clear unsupported bits in the termios */
gb_tty->clocal = ((termios->c_cflag & CLOCAL) != 0);
diff --git a/drivers/staging/gs_fpgaboot/README b/drivers/staging/gs_fpgaboot/README
index b85a76849fc4..ec1235a21bcc 100644
--- a/drivers/staging/gs_fpgaboot/README
+++ b/drivers/staging/gs_fpgaboot/README
@@ -39,7 +39,7 @@ TABLE OF CONTENTS.
5. USE CASE (from a mailing list discussion with Greg)
- a. As a FPGA development support tool,
+ a. As an FPGA development support tool,
During FPGA firmware development, you need to download a new FPGA
image frequently.
You would do that with a dedicated JTAG, which usually a limited
diff --git a/drivers/staging/hikey9xx/Kconfig b/drivers/staging/hikey9xx/Kconfig
index c4dc1016edf2..9f53df9068fe 100644
--- a/drivers/staging/hikey9xx/Kconfig
+++ b/drivers/staging/hikey9xx/Kconfig
@@ -1,27 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# to be placed at drivers/phy
-config PHY_HI3670_USB
- tristate "hi3670 USB PHY support"
- depends on (ARCH_HISI && ARM64) || COMPILE_TEST
- select GENERIC_PHY
- select MFD_SYSCON
- help
- Enable this to support the HISILICON HI3670 USB PHY.
-
- To compile this driver as a module, choose M here.
-
-# to be placed at drivers/spmi
-config SPMI_HISI3670
- tristate "Hisilicon 3670 SPMI Controller"
- select IRQ_DOMAIN_HIERARCHY
- depends on HAS_IOMEM
- depends on SPMI
- help
- If you say yes to this option, support will be included for the
- built-in SPMI PMIC Arbiter interface on Hisilicon 3670
- processors.
-
# to be placed at drivers/mfd
config MFD_HI6421_SPMI
tristate "HiSilicon Hi6421v600 SPMI PMU/Codec IC"
diff --git a/drivers/staging/hikey9xx/Makefile b/drivers/staging/hikey9xx/Makefile
index 9103735d8377..e3108d7dd849 100644
--- a/drivers/staging/hikey9xx/Makefile
+++ b/drivers/staging/hikey9xx/Makefile
@@ -1,6 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PHY_HI3670_USB) += phy-hi3670-usb3.o
-
-obj-$(CONFIG_SPMI_HISI3670) += hisi-spmi-controller.o
obj-$(CONFIG_MFD_HI6421_SPMI) += hi6421-spmi-pmic.o
diff --git a/drivers/staging/hikey9xx/hi6421-spmi-pmic.c b/drivers/staging/hikey9xx/hi6421-spmi-pmic.c
index 626140cb96f2..35ef3d4c760b 100644
--- a/drivers/staging/hikey9xx/hi6421-spmi-pmic.c
+++ b/drivers/staging/hikey9xx/hi6421-spmi-pmic.c
@@ -33,17 +33,27 @@ enum hi6421_spmi_pmic_irq_list {
SIM0_HPD_F,
SIM1_HPD_R,
SIM1_HPD_F,
- PMIC_IRQ_LIST_MAX,
+
+ PMIC_IRQ_LIST_MAX
};
-#define HISI_IRQ_ARRAY 2
-#define HISI_IRQ_NUM (HISI_IRQ_ARRAY * 8)
+#define HISI_IRQ_BANK_SIZE 2
-#define HISI_IRQ_KEY_NUM 0
+/*
+ * IRQ number for the power key button and mask for both UP and DOWN IRQs
+ */
+#define HISI_POWERKEY_IRQ_NUM 0
+#define HISI_IRQ_POWERKEY_UP_DOWN (BIT(POWERKEY_DOWN) | BIT(POWERKEY_UP))
-#define HISI_BITS 8
-#define HISI_IRQ_KEY_VALUE (BIT(POWERKEY_DOWN) | BIT(POWERKEY_UP))
-#define HISI_MASK GENMASK(HISI_BITS - 1, 0)
+/*
+ * Registers for IRQ address and IRQ mask bits
+ *
+ * Please notice that we need to regmap a larger region, as other
+ * registers are used by the regulators.
+ * See drivers/regulator/hi6421-regulator.c.
+ */
+#define SOC_PMIC_IRQ_MASK_0_ADDR 0x0202
+#define SOC_PMIC_IRQ0_ADDR 0x0212
/*
* The IRQs are mapped as:
@@ -67,13 +77,14 @@ enum hi6421_spmi_pmic_irq_list {
* SIM1_HPD_R 0x0203 0x213 bit 4
* SIM1_HPD_F 0x0203 0x213 bit 5
* ====================== ============= ============ =====
+ *
+ * Each mask register contains 8 bits. The ancillary macros below
+ * convert a number from 0 to 14 into a register address and a bit mask
*/
-#define SOC_PMIC_IRQ_MASK_0_ADDR 0x0202
-#define SOC_PMIC_IRQ0_ADDR 0x0212
-
-#define IRQ_MASK_REGISTER(irq_data) (SOC_PMIC_IRQ_MASK_0_ADDR + \
- (irqd_to_hwirq(irq_data) >> 3))
-#define IRQ_MASK_BIT(irq_data) BIT(irqd_to_hwirq(irq_data) & 0x07)
+#define HISI_IRQ_MASK_REG(irq_data) (SOC_PMIC_IRQ_MASK_0_ADDR + \
+ (irqd_to_hwirq(irq_data) / BITS_PER_BYTE))
+#define HISI_IRQ_MASK_BIT(irq_data) BIT(irqd_to_hwirq(irq_data) & (BITS_PER_BYTE - 1))
+#define HISI_8BITS_MASK 0xff
static const struct mfd_cell hi6421v600_devs[] = {
{ .name = "hi6421v600-regulator", },
@@ -86,23 +97,31 @@ static irqreturn_t hi6421_spmi_irq_handler(int irq, void *priv)
unsigned int in;
int i, offset;
- for (i = 0; i < HISI_IRQ_ARRAY; i++) {
+ for (i = 0; i < HISI_IRQ_BANK_SIZE; i++) {
regmap_read(ddata->regmap, SOC_PMIC_IRQ0_ADDR + i, &in);
- pending = HISI_MASK & in;
- regmap_write(ddata->regmap, SOC_PMIC_IRQ0_ADDR + i, pending);
- if (i == HISI_IRQ_KEY_NUM &&
- (pending & HISI_IRQ_KEY_VALUE) == HISI_IRQ_KEY_VALUE) {
+ /* Mark pending IRQs as handled */
+ regmap_write(ddata->regmap, SOC_PMIC_IRQ0_ADDR + i, in);
+
+ pending = in & HISI_8BITS_MASK;
+
+ if (i == HISI_POWERKEY_IRQ_NUM &&
+ (pending & HISI_IRQ_POWERKEY_UP_DOWN) == HISI_IRQ_POWERKEY_UP_DOWN) {
+ /*
+ * If both powerkey down and up IRQs are received,
+ * handle them at the right order
+ */
generic_handle_irq(ddata->irqs[POWERKEY_DOWN]);
generic_handle_irq(ddata->irqs[POWERKEY_UP]);
- pending &= (~HISI_IRQ_KEY_VALUE);
+ pending &= ~HISI_IRQ_POWERKEY_UP_DOWN;
}
if (!pending)
continue;
- for_each_set_bit(offset, &pending, HISI_BITS)
- generic_handle_irq(ddata->irqs[offset + i * HISI_BITS]);
+ for_each_set_bit(offset, &pending, BITS_PER_BYTE) {
+ generic_handle_irq(ddata->irqs[offset + i * BITS_PER_BYTE]);
+ }
}
return IRQ_HANDLED;
@@ -115,12 +134,12 @@ static void hi6421_spmi_irq_mask(struct irq_data *d)
unsigned int data;
u32 offset;
- offset = IRQ_MASK_REGISTER(d);
+ offset = HISI_IRQ_MASK_REG(d);
spin_lock_irqsave(&ddata->lock, flags);
regmap_read(ddata->regmap, offset, &data);
- data |= IRQ_MASK_BIT(d);
+ data |= HISI_IRQ_MASK_BIT(d);
regmap_write(ddata->regmap, offset, data);
spin_unlock_irqrestore(&ddata->lock, flags);
@@ -132,20 +151,19 @@ static void hi6421_spmi_irq_unmask(struct irq_data *d)
u32 data, offset;
unsigned long flags;
- offset = (irqd_to_hwirq(d) >> 3);
- offset += SOC_PMIC_IRQ_MASK_0_ADDR;
+ offset = HISI_IRQ_MASK_REG(d);
spin_lock_irqsave(&ddata->lock, flags);
regmap_read(ddata->regmap, offset, &data);
- data &= ~(1 << (irqd_to_hwirq(d) & 0x07));
+ data &= ~HISI_IRQ_MASK_BIT(d);
regmap_write(ddata->regmap, offset, data);
spin_unlock_irqrestore(&ddata->lock, flags);
}
static struct irq_chip hi6421_spmi_pmu_irqchip = {
- .name = "hisi-irq",
+ .name = "hi6421v600-irq",
.irq_mask = hi6421_spmi_irq_mask,
.irq_unmask = hi6421_spmi_irq_unmask,
.irq_disable = hi6421_spmi_irq_mask,
@@ -158,7 +176,7 @@ static int hi6421_spmi_irq_map(struct irq_domain *d, unsigned int virq,
struct hi6421_spmi_pmic *ddata = d->host_data;
irq_set_chip_and_handler_name(virq, &hi6421_spmi_pmu_irqchip,
- handle_simple_irq, "hisi");
+ handle_simple_irq, "hi6421v600");
irq_set_chip_data(virq, ddata);
irq_set_irq_type(virq, IRQ_TYPE_NONE);
@@ -175,22 +193,24 @@ static void hi6421_spmi_pmic_irq_init(struct hi6421_spmi_pmic *ddata)
int i;
unsigned int pending;
- for (i = 0; i < HISI_IRQ_ARRAY; i++)
+ /* Mask all IRQs */
+ for (i = 0; i < HISI_IRQ_BANK_SIZE; i++)
regmap_write(ddata->regmap, SOC_PMIC_IRQ_MASK_0_ADDR + i,
- HISI_MASK);
+ HISI_8BITS_MASK);
- for (i = 0; i < HISI_IRQ_ARRAY; i++) {
+ /* Mark all IRQs as handled */
+ for (i = 0; i < HISI_IRQ_BANK_SIZE; i++) {
regmap_read(ddata->regmap, SOC_PMIC_IRQ0_ADDR + i, &pending);
regmap_write(ddata->regmap, SOC_PMIC_IRQ0_ADDR + i,
- HISI_MASK);
+ HISI_8BITS_MASK);
}
}
static const struct regmap_config regmap_config = {
- .reg_bits = 16,
- .val_bits = HISI_BITS,
- .max_register = 0xffff,
- .fast_io = true
+ .reg_bits = 16,
+ .val_bits = BITS_PER_BYTE,
+ .max_register = 0xffff,
+ .fast_io = true
};
static int hi6421_spmi_pmic_probe(struct spmi_device *pdev)
@@ -230,29 +250,31 @@ static int hi6421_spmi_pmic_probe(struct spmi_device *pdev)
hi6421_spmi_pmic_irq_init(ddata);
- ddata->irqs = devm_kzalloc(dev, HISI_IRQ_NUM * sizeof(int), GFP_KERNEL);
+ ddata->irqs = devm_kzalloc(dev, PMIC_IRQ_LIST_MAX * sizeof(int), GFP_KERNEL);
if (!ddata->irqs)
return -ENOMEM;
- ddata->domain = irq_domain_add_simple(np, HISI_IRQ_NUM, 0,
+ ddata->domain = irq_domain_add_simple(np, PMIC_IRQ_LIST_MAX, 0,
&hi6421_spmi_domain_ops, ddata);
if (!ddata->domain) {
dev_err(dev, "Failed to create IRQ domain\n");
return -ENODEV;
}
- for (i = 0; i < HISI_IRQ_NUM; i++) {
+ for (i = 0; i < PMIC_IRQ_LIST_MAX; i++) {
virq = irq_create_mapping(ddata->domain, i);
if (!virq) {
dev_err(dev, "Failed to map H/W IRQ\n");
- return -ENOSPC;
+ return -ENODEV;
}
ddata->irqs[i] = virq;
}
- ret = request_threaded_irq(ddata->irq, hi6421_spmi_irq_handler, NULL,
- IRQF_TRIGGER_LOW | IRQF_SHARED | IRQF_NO_SUSPEND,
- "pmic", ddata);
+ ret = devm_request_threaded_irq(dev,
+ ddata->irq, hi6421_spmi_irq_handler,
+ NULL,
+ IRQF_TRIGGER_LOW | IRQF_SHARED | IRQF_NO_SUSPEND,
+ "pmic", ddata);
if (ret < 0) {
dev_err(dev, "Failed to start IRQ handling thread: error %d\n",
ret);
@@ -270,13 +292,6 @@ static int hi6421_spmi_pmic_probe(struct spmi_device *pdev)
return ret;
}
-static void hi6421_spmi_pmic_remove(struct spmi_device *pdev)
-{
- struct hi6421_spmi_pmic *ddata = dev_get_drvdata(&pdev->dev);
-
- free_irq(ddata->irq, ddata);
-}
-
static const struct of_device_id pmic_spmi_id_table[] = {
{ .compatible = "hisilicon,hi6421-spmi" },
{ }
@@ -289,7 +304,6 @@ static struct spmi_driver hi6421_spmi_pmic_driver = {
.of_match_table = pmic_spmi_id_table,
},
.probe = hi6421_spmi_pmic_probe,
- .remove = hi6421_spmi_pmic_remove,
};
module_spmi_driver(hi6421_spmi_pmic_driver);
diff --git a/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml b/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
index 3b23ad56b31a..8e355cddd437 100644
--- a/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
+++ b/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
@@ -17,7 +17,7 @@ description: |
node.
The SPMI controller part is provided by
- drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml.
+ Documentation/devicetree/bindings/mfd/hisilicon,hi6421-spmi-pmic.yaml
properties:
$nodename:
@@ -32,12 +32,11 @@ properties:
'#interrupt-cells':
const: 2
- interrupt-controller:
- description:
- Identify that the PMIC is capable of behaving as an interrupt controller.
+ interrupt-controller: true
gpios:
maxItems: 1
+ description: GPIO used for IRQs
regulators:
type: object
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
index b68304da288b..1d3026dae827 100644
--- a/drivers/staging/iio/accel/adis16203.c
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -5,20 +5,14 @@
* Copyright 2010 Analog Devices Inc.
*/
-#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/imu/adis.h>
-#include <linux/iio/sysfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
#define ADIS16203_STARTUP_DELAY 220 /* ms */
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index 8d3afc6dc755..2a8aa83b8d9e 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -5,20 +5,14 @@
* Copyright 2010 Analog Devices Inc.
*/
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-#include <linux/slab.h>
#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
#include <linux/iio/imu/adis.h>
#define ADIS16240_STARTUP_DELAY 220 /* ms */
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index ccbafcaaf27e..79467f056a05 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -1348,9 +1348,9 @@ static ssize_t adt7316_show_in_analog_temp_offset(struct device *dev,
}
static ssize_t adt7316_store_in_analog_temp_offset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
@@ -1375,9 +1375,9 @@ static ssize_t adt7316_show_ex_analog_temp_offset(struct device *dev,
}
static ssize_t adt7316_store_ex_analog_temp_offset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index eab534dc4bcc..78ac720266e6 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -18,8 +18,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include "ad7746.h"
-
/*
* AD7746 Register Definition
*/
@@ -84,10 +82,6 @@
#define AD7746_CAPDAC_DACEN BIT(7)
#define AD7746_CAPDAC_DACP(x) ((x) & 0x7F)
-/*
- * struct ad7746_chip_info - chip specific information
- */
-
struct ad7746_chip_info {
struct i2c_client *client;
struct mutex lock; /* protect sensor state */
@@ -215,6 +209,19 @@ static const unsigned char ad7746_cap_filter_rate_table[][2] = {
{16, 62 + 1}, {13, 77 + 1}, {11, 92 + 1}, {9, 110 + 1},
};
+static int ad7746_set_capdac(struct ad7746_chip_info *chip, int channel)
+{
+ int ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_CAPDACA,
+ chip->capdac[channel][0]);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_CAPDACB,
+ chip->capdac[channel][1]);
+}
+
static int ad7746_select_channel(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
@@ -230,17 +237,11 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
AD7746_CONF_CAPFS_SHIFT;
delay = ad7746_cap_filter_rate_table[idx][1];
+ ret = ad7746_set_capdac(chip, chan->channel);
+ if (ret < 0)
+ return ret;
+
if (chip->capdac_set != chan->channel) {
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_CAPDACA,
- chip->capdac[chan->channel][0]);
- if (ret < 0)
- return ret;
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_CAPDACB,
- chip->capdac[chan->channel][1]);
- if (ret < 0)
- return ret;
chip->capdac_set = chan->channel;
}
@@ -484,14 +485,7 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
chip->capdac[chan->channel][chan->differential] = val > 0 ?
AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0;
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_CAPDACA,
- chip->capdac[chan->channel][0]);
- if (ret < 0)
- goto out;
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_CAPDACB,
- chip->capdac[chan->channel][1]);
+ ret = ad7746_set_capdac(chip, chan->channel);
if (ret < 0)
goto out;
@@ -564,10 +558,10 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_TEMP:
- /*
- * temperature in milli degrees Celsius
- * T = ((*val / 2048) - 4096) * 1000
- */
+ /*
+ * temperature in milli degrees Celsius
+ * T = ((*val / 2048) - 4096) * 1000
+ */
*val = (*val * 125) / 256;
break;
case IIO_VOLTAGE:
@@ -669,18 +663,15 @@ static const struct iio_info ad7746_info = {
.write_raw = ad7746_write_raw,
};
-/*
- * device probe and remove
- */
-
static int ad7746_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct ad7746_platform_data *pdata = client->dev.platform_data;
+ struct device *dev = &client->dev;
struct ad7746_chip_info *chip;
struct iio_dev *indio_dev;
unsigned char regval = 0;
- int ret = 0;
+ unsigned int vdd_permille;
+ int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
@@ -702,26 +693,39 @@ static int ad7746_probe(struct i2c_client *client,
indio_dev->num_channels = ARRAY_SIZE(ad7746_channels) - 2;
indio_dev->modes = INDIO_DIRECT_MODE;
- if (pdata) {
- if (pdata->exca_en) {
- if (pdata->exca_inv_en)
- regval |= AD7746_EXCSETUP_NEXCA;
- else
- regval |= AD7746_EXCSETUP_EXCA;
- }
+ if (device_property_read_bool(dev, "adi,exca-output-en")) {
+ if (device_property_read_bool(dev, "adi,exca-output-invert"))
+ regval |= AD7746_EXCSETUP_NEXCA;
+ else
+ regval |= AD7746_EXCSETUP_EXCA;
+ }
- if (pdata->excb_en) {
- if (pdata->excb_inv_en)
- regval |= AD7746_EXCSETUP_NEXCB;
- else
- regval |= AD7746_EXCSETUP_EXCB;
- }
+ if (device_property_read_bool(dev, "adi,excb-output-en")) {
+ if (device_property_read_bool(dev, "adi,excb-output-invert"))
+ regval |= AD7746_EXCSETUP_NEXCB;
+ else
+ regval |= AD7746_EXCSETUP_EXCB;
+ }
- regval |= AD7746_EXCSETUP_EXCLVL(pdata->exclvl);
- } else {
- dev_warn(&client->dev, "No platform data? using default\n");
- regval = AD7746_EXCSETUP_EXCA | AD7746_EXCSETUP_EXCB |
- AD7746_EXCSETUP_EXCLVL(3);
+ ret = device_property_read_u32(dev, "adi,excitation-vdd-permille",
+ &vdd_permille);
+ if (!ret) {
+ switch (vdd_permille) {
+ case 125:
+ regval |= AD7746_EXCSETUP_EXCLVL(0);
+ break;
+ case 250:
+ regval |= AD7746_EXCSETUP_EXCLVL(1);
+ break;
+ case 375:
+ regval |= AD7746_EXCSETUP_EXCLVL(2);
+ break;
+ case 500:
+ regval |= AD7746_EXCSETUP_EXCLVL(3);
+ break;
+ default:
+ break;
+ }
}
ret = i2c_smbus_write_byte_data(chip->client,
@@ -729,11 +733,7 @@ static int ad7746_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
- if (ret)
- return ret;
-
- return 0;
+ return devm_iio_device_register(indio_dev->dev.parent, indio_dev);
}
static const struct i2c_device_id ad7746_id[] = {
diff --git a/drivers/staging/iio/cdc/ad7746.h b/drivers/staging/iio/cdc/ad7746.h
deleted file mode 100644
index 8bdbd732dbbd..000000000000
--- a/drivers/staging/iio/cdc/ad7746.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * AD7746 capacitive sensor driver supporting AD7745, AD7746 and AD7747
- *
- * Copyright 2011 Analog Devices Inc.
- */
-
-#ifndef IIO_CDC_AD7746_H_
-#define IIO_CDC_AD7746_H_
-
-/*
- * TODO: struct ad7746_platform_data needs to go into include/linux/iio
- */
-
-#define AD7466_EXCLVL_0 0 /* +-VDD/8 */
-#define AD7466_EXCLVL_1 1 /* +-VDD/4 */
-#define AD7466_EXCLVL_2 2 /* +-VDD * 3/8 */
-#define AD7466_EXCLVL_3 3 /* +-VDD/2 */
-
-struct ad7746_platform_data {
- unsigned char exclvl; /*Excitation Voltage Level */
- bool exca_en; /* enables EXCA pin as the excitation output */
- bool exca_inv_en; /* enables /EXCA pin as the excitation output */
- bool excb_en; /* enables EXCB pin as the excitation output */
- bool excb_inv_en; /* enables /EXCB pin as the excitation output */
-};
-
-#endif /* IIO_CDC_AD7746_H_ */
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 60a3ae5587b9..94b131ef8a22 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -58,6 +58,7 @@
* @spi: spi_device
* @mclk: external master clock
* @control: cached control word
+ * @devid: device id
* @xfer: default spi transfer
* @msg: default spi message
* @freq_xfer: tuning word spi transfer
@@ -86,7 +87,7 @@ struct ad9834_state {
__be16 freq_data[2];
};
-/**
+/*
* ad9834_supported_device_ids:
*/
@@ -316,7 +317,7 @@ ssize_t ad9834_show_out1_wavetype_available(struct device *dev,
static IIO_DEVICE_ATTR(out_altvoltage0_out1_wavetype_available, 0444,
ad9834_show_out1_wavetype_available, NULL, 0);
-/**
+/*
* see dds.h for further information
*/
diff --git a/drivers/staging/kpc2000/Kconfig b/drivers/staging/kpc2000/Kconfig
deleted file mode 100644
index 897965359fcb..000000000000
--- a/drivers/staging/kpc2000/Kconfig
+++ /dev/null
@@ -1,59 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-config KPC2000
- bool "Daktronics KPC Device support"
- select MFD_CORE
- depends on PCI
- depends on UIO
- help
- Select this if you wish to use the Daktronics KPC PCI devices
-
- If unsure, say N.
-
-config KPC2000_CORE
- tristate "Daktronics KPC PCI UIO device"
- depends on KPC2000
- help
- Say Y here if you wish to support the Daktronics KPC PCI
- device in UIO mode.
-
- To compile this driver as a module, choose M here: the module
- will be called kpc2000
-
- If unsure, say N.
-
-config KPC2000_SPI
- tristate "Daktronics KPC SPI device"
- depends on KPC2000 && SPI
- help
- Say Y here if you wish to support the Daktronics KPC PCI
- device in SPI mode.
-
- To compile this driver as a module, choose M here: the module
- will be called kpc2000_spi
-
- If unsure, say N.
-
-config KPC2000_I2C
- tristate "Daktronics KPC I2C device"
- depends on KPC2000 && I2C
- help
- Say Y here if you wish to support the Daktronics KPC PCI
- device in I2C mode.
-
- To compile this driver as a module, choose M here: the module
- will be called kpc2000_i2c
-
- If unsure, say N.
-
-config KPC2000_DMA
- tristate "Daktronics KPC DMA controller"
- depends on KPC2000
- help
- Say Y here if you wish to support the Daktronics DMA controller.
-
- To compile this driver as a module, choose M here: the module
- will be called kpc2000_dma
-
- If unsure, say N.
-
diff --git a/drivers/staging/kpc2000/Makefile b/drivers/staging/kpc2000/Makefile
deleted file mode 100644
index d15ed49807d5..000000000000
--- a/drivers/staging/kpc2000/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-$(CONFIG_KPC2000) += kpc2000/
-obj-$(CONFIG_KPC2000_I2C) += kpc2000_i2c.o
-obj-$(CONFIG_KPC2000_SPI) += kpc2000_spi.o
-obj-$(CONFIG_KPC2000_DMA) += kpc_dma/
diff --git a/drivers/staging/kpc2000/TODO b/drivers/staging/kpc2000/TODO
deleted file mode 100644
index 9b5ab37fb3a0..000000000000
--- a/drivers/staging/kpc2000/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-- the kpc_spi driver doesn't seem to let multiple transactions (to different instances of the core) happen in parallel...
-- The kpc_i2c driver is a hot mess, it should probably be cleaned up a ton. It functions against current hardware though.
diff --git a/drivers/staging/kpc2000/kpc.h b/drivers/staging/kpc2000/kpc.h
deleted file mode 100644
index a3fc9c9221aa..000000000000
--- a/drivers/staging/kpc2000/kpc.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-#ifndef KPC_H_
-#define KPC_H_
-
-/* ***** Driver Names ***** */
-#define KP_DRIVER_NAME_KP2000 "kp2000"
-#define KP_DRIVER_NAME_INVALID "kpc_invalid"
-#define KP_DRIVER_NAME_DMA_CONTROLLER "kpc_nwl_dma"
-#define KP_DRIVER_NAME_UIO "uio_pdrv_genirq"
-#define KP_DRIVER_NAME_I2C "kpc_i2c"
-#define KP_DRIVER_NAME_SPI "kpc_spi"
-
-struct kpc_core_device_platdata {
- u32 card_id;
- u32 build_version;
- u32 hardware_revision;
- u64 ssid;
- u64 ddna;
-};
-
-#define PCI_DEVICE_ID_DAKTRONICS_KADOKA_P2KR0 0x4b03
-
-#endif /* KPC_H_ */
diff --git a/drivers/staging/kpc2000/kpc2000/Makefile b/drivers/staging/kpc2000/kpc2000/Makefile
deleted file mode 100644
index c274ad083db6..000000000000
--- a/drivers/staging/kpc2000/kpc2000/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-m := kpc2000.o
-kpc2000-objs += core.o cell_probe.o
diff --git a/drivers/staging/kpc2000/kpc2000/cell_probe.c b/drivers/staging/kpc2000/kpc2000/cell_probe.c
deleted file mode 100644
index e7e963d62699..000000000000
--- a/drivers/staging/kpc2000/kpc2000/cell_probe.c
+++ /dev/null
@@ -1,548 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/mfd/core.h>
-#include <linux/platform_device.h>
-#include <linux/ioport.h>
-#include <linux/uio_driver.h>
-#include "pcie.h"
-
-/* Core (Resource) Table Layout:
- * one Resource per record (8 bytes)
- * 6 5 4 3 2 1 0
- * 3210987654321098765432109876543210987654321098765432109876543210
- * IIIIIIIIIIII Core Type [up to 4095 types]
- * D S2C DMA Present
- * DDD S2C DMA Channel Number [up to 8 channels]
- * LLLLLLLLLLLLLLLL Register Count (64-bit registers) [up to 65535 registers]
- * OOOOOOOOOOOOOOOO Core Offset (in 4kB blocks) [up to 65535 cores]
- * D C2S DMA Present
- * DDD C2S DMA Channel Number [up to 8 channels]
- * II IRQ Count [0 to 3 IRQs per core]
- * 1111111000
- * IIIIIII IRQ Base Number [up to 128 IRQs per card]
- * ___ Spare
- *
- */
-
-#define KPC_OLD_DMA_CH_NUM(present, channel) \
- ((present) ? (0x8 | ((channel) & 0x7)) : 0)
-#define KPC_OLD_S2C_DMA_CH_NUM(cte) \
- KPC_OLD_DMA_CH_NUM(cte.s2c_dma_present, cte.s2c_dma_channel_num)
-#define KPC_OLD_C2S_DMA_CH_NUM(cte) \
- KPC_OLD_DMA_CH_NUM(cte.c2s_dma_present, cte.c2s_dma_channel_num)
-
-#define KP_CORE_ID_INVALID 0
-#define KP_CORE_ID_I2C 3
-#define KP_CORE_ID_SPI 5
-
-struct core_table_entry {
- u16 type;
- u32 offset;
- u32 length;
- bool s2c_dma_present;
- u8 s2c_dma_channel_num;
- bool c2s_dma_present;
- u8 c2s_dma_channel_num;
- u8 irq_count;
- u8 irq_base_num;
-};
-
-static
-void parse_core_table_entry_v0(struct core_table_entry *cte, const u64 read_val)
-{
- cte->type = ((read_val & 0xFFF0000000000000UL) >> 52);
- cte->offset = ((read_val & 0x00000000FFFF0000UL) >> 16) * 4096;
- cte->length = ((read_val & 0x0000FFFF00000000UL) >> 32) * 8;
- cte->s2c_dma_present = ((read_val & 0x0008000000000000UL) >> 51);
- cte->s2c_dma_channel_num = ((read_val & 0x0007000000000000UL) >> 48);
- cte->c2s_dma_present = ((read_val & 0x0000000000008000UL) >> 15);
- cte->c2s_dma_channel_num = ((read_val & 0x0000000000007000UL) >> 12);
- cte->irq_count = ((read_val & 0x0000000000000C00UL) >> 10);
- cte->irq_base_num = ((read_val & 0x00000000000003F8UL) >> 3);
-}
-
-static
-void dbg_cte(struct kp2000_device *pcard, struct core_table_entry *cte)
-{
- dev_dbg(&pcard->pdev->dev,
- "CTE: type:%3d offset:%3d (%3d) length:%3d (%3d) s2c:%d c2s:%d irq_count:%d base_irq:%d\n",
- cte->type,
- cte->offset,
- cte->offset / 4096,
- cte->length,
- cte->length / 8,
- (cte->s2c_dma_present ? cte->s2c_dma_channel_num : -1),
- (cte->c2s_dma_present ? cte->c2s_dma_channel_num : -1),
- cte->irq_count,
- cte->irq_base_num
- );
-}
-
-static
-void parse_core_table_entry(struct core_table_entry *cte, const u64 read_val, const u8 entry_rev)
-{
- switch (entry_rev) {
- case 0:
- parse_core_table_entry_v0(cte, read_val);
- break;
- default:
- cte->type = 0;
- break;
- }
-}
-
-static int probe_core_basic(unsigned int core_num, struct kp2000_device *pcard,
- char *name, const struct core_table_entry cte)
-{
- struct mfd_cell cell = { .id = core_num, .name = name };
- struct resource resources[2];
-
- struct kpc_core_device_platdata core_pdata = {
- .card_id = pcard->card_id,
- .build_version = pcard->build_version,
- .hardware_revision = pcard->hardware_revision,
- .ssid = pcard->ssid,
- .ddna = pcard->ddna,
- };
-
- dev_dbg(&pcard->pdev->dev,
- "Found Basic core: type = %02d dma = %02x / %02x offset = 0x%x length = 0x%x (%d regs)\n",
- cte.type,
- KPC_OLD_S2C_DMA_CH_NUM(cte),
- KPC_OLD_C2S_DMA_CH_NUM(cte),
- cte.offset,
- cte.length,
- cte.length / 8);
-
- cell.platform_data = &core_pdata;
- cell.pdata_size = sizeof(struct kpc_core_device_platdata);
- cell.num_resources = 2;
-
- memset(&resources, 0, sizeof(resources));
-
- resources[0].start = cte.offset;
- resources[0].end = cte.offset + (cte.length - 1);
- resources[0].flags = IORESOURCE_MEM;
-
- resources[1].start = pcard->pdev->irq;
- resources[1].end = pcard->pdev->irq;
- resources[1].flags = IORESOURCE_IRQ;
-
- cell.resources = resources;
-
- return mfd_add_devices(PCARD_TO_DEV(pcard), // parent
- pcard->card_num * 100, // id
- &cell, // struct mfd_cell *
- 1, // ndevs
- &pcard->regs_base_resource,
- 0, // irq_base
- NULL); // struct irq_domain *
-}
-
-struct kpc_uio_device {
- struct list_head list;
- struct kp2000_device *pcard;
- struct device *dev;
- struct uio_info uioinfo;
- struct core_table_entry cte;
- u16 core_num;
-};
-
-static ssize_t offset_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kpc_uio_device *kudev = dev_get_drvdata(dev);
-
- return sprintf(buf, "%u\n", kudev->cte.offset);
-}
-static DEVICE_ATTR_RO(offset);
-
-static ssize_t size_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kpc_uio_device *kudev = dev_get_drvdata(dev);
-
- return sprintf(buf, "%u\n", kudev->cte.length);
-}
-static DEVICE_ATTR_RO(size);
-
-static ssize_t type_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kpc_uio_device *kudev = dev_get_drvdata(dev);
-
- return sprintf(buf, "%u\n", kudev->cte.type);
-}
-static DEVICE_ATTR_RO(type);
-
-static ssize_t s2c_dma_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kpc_uio_device *kudev = dev_get_drvdata(dev);
-
- if (!kudev->cte.s2c_dma_present)
- return sprintf(buf, "%s", "not present\n");
-
- return sprintf(buf, "%u\n", kudev->cte.s2c_dma_channel_num);
-}
-static DEVICE_ATTR_RO(s2c_dma);
-
-static ssize_t c2s_dma_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kpc_uio_device *kudev = dev_get_drvdata(dev);
-
- if (!kudev->cte.c2s_dma_present)
- return sprintf(buf, "%s", "not present\n");
-
- return sprintf(buf, "%u\n", kudev->cte.c2s_dma_channel_num);
-}
-static DEVICE_ATTR_RO(c2s_dma);
-
-static ssize_t irq_count_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kpc_uio_device *kudev = dev_get_drvdata(dev);
-
- return sprintf(buf, "%u\n", kudev->cte.irq_count);
-}
-static DEVICE_ATTR_RO(irq_count);
-
-static ssize_t irq_base_num_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kpc_uio_device *kudev = dev_get_drvdata(dev);
-
- return sprintf(buf, "%u\n", kudev->cte.irq_base_num);
-}
-static DEVICE_ATTR_RO(irq_base_num);
-
-static ssize_t core_num_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kpc_uio_device *kudev = dev_get_drvdata(dev);
-
- return sprintf(buf, "%u\n", kudev->core_num);
-}
-static DEVICE_ATTR_RO(core_num);
-
-struct attribute *kpc_uio_class_attrs[] = {
- &dev_attr_offset.attr,
- &dev_attr_size.attr,
- &dev_attr_type.attr,
- &dev_attr_s2c_dma.attr,
- &dev_attr_c2s_dma.attr,
- &dev_attr_irq_count.attr,
- &dev_attr_irq_base_num.attr,
- &dev_attr_core_num.attr,
- NULL,
-};
-
-static
-int kp2000_check_uio_irq(struct kp2000_device *pcard, u32 irq_num)
-{
- u64 interrupt_active = readq(pcard->sysinfo_regs_base + REG_INTERRUPT_ACTIVE);
- u64 interrupt_mask_inv = ~readq(pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
- u64 irq_check_mask = BIT_ULL(irq_num);
-
- if (interrupt_active & irq_check_mask) { // if it's active (interrupt pending)
- if (interrupt_mask_inv & irq_check_mask) { // and if it's not masked off
- return 1;
- }
- }
- return 0;
-}
-
-static
-irqreturn_t kuio_handler(int irq, struct uio_info *uioinfo)
-{
- struct kpc_uio_device *kudev = uioinfo->priv;
-
- if (irq != kudev->pcard->pdev->irq)
- return IRQ_NONE;
-
- if (kp2000_check_uio_irq(kudev->pcard, kudev->cte.irq_base_num)) {
- /* Clear the active flag */
- writeq(BIT_ULL(kudev->cte.irq_base_num),
- kudev->pcard->sysinfo_regs_base + REG_INTERRUPT_ACTIVE);
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
-}
-
-static
-int kuio_irqcontrol(struct uio_info *uioinfo, s32 irq_on)
-{
- struct kpc_uio_device *kudev = uioinfo->priv;
- struct kp2000_device *pcard = kudev->pcard;
- u64 mask;
-
- mutex_lock(&pcard->sem);
- mask = readq(pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
- if (irq_on)
- mask &= ~(BIT_ULL(kudev->cte.irq_base_num));
- else
- mask |= BIT_ULL(kudev->cte.irq_base_num);
- writeq(mask, pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
- mutex_unlock(&pcard->sem);
-
- return 0;
-}
-
-static int probe_core_uio(unsigned int core_num, struct kp2000_device *pcard,
- char *name, const struct core_table_entry cte)
-{
- struct kpc_uio_device *kudev;
- int rv;
-
- dev_dbg(&pcard->pdev->dev,
- "Found UIO core: type = %02d dma = %02x / %02x offset = 0x%x length = 0x%x (%d regs)\n",
- cte.type,
- KPC_OLD_S2C_DMA_CH_NUM(cte),
- KPC_OLD_C2S_DMA_CH_NUM(cte),
- cte.offset,
- cte.length,
- cte.length / 8);
-
- kudev = kzalloc(sizeof(*kudev), GFP_KERNEL);
- if (!kudev)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&kudev->list);
- kudev->pcard = pcard;
- kudev->cte = cte;
- kudev->core_num = core_num;
-
- kudev->uioinfo.priv = kudev;
- kudev->uioinfo.name = name;
- kudev->uioinfo.version = "0.0";
- if (cte.irq_count > 0) {
- kudev->uioinfo.irq_flags = IRQF_SHARED;
- kudev->uioinfo.irq = pcard->pdev->irq;
- kudev->uioinfo.handler = kuio_handler;
- kudev->uioinfo.irqcontrol = kuio_irqcontrol;
- } else {
- kudev->uioinfo.irq = 0;
- }
-
- kudev->uioinfo.mem[0].name = "uiomap";
- kudev->uioinfo.mem[0].addr = pci_resource_start(pcard->pdev, REG_BAR) + cte.offset;
-
- // Round up to nearest PAGE_SIZE boundary
- kudev->uioinfo.mem[0].size = (cte.length + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
- kudev->uioinfo.mem[0].memtype = UIO_MEM_PHYS;
-
- kudev->dev = device_create(kpc_uio_class,
- &pcard->pdev->dev, MKDEV(0, 0), kudev, "%s.%d.%d.%d",
- kudev->uioinfo.name, pcard->card_num, cte.type, kudev->core_num);
- if (IS_ERR(kudev->dev)) {
- dev_err(&pcard->pdev->dev, "%s: device_create failed!\n",
- __func__);
- kfree(kudev);
- return -ENODEV;
- }
- dev_set_drvdata(kudev->dev, kudev);
-
- rv = uio_register_device(kudev->dev, &kudev->uioinfo);
- if (rv) {
- dev_err(&pcard->pdev->dev, "%s: failed uio_register_device: %d\n",
- __func__, rv);
- put_device(kudev->dev);
- kfree(kudev);
- return rv;
- }
-
- list_add_tail(&kudev->list, &pcard->uio_devices_list);
-
- return 0;
-}
-
-static int create_dma_engine_core(struct kp2000_device *pcard,
- size_t engine_regs_offset,
- int engine_num, int irq_num)
-{
- struct mfd_cell cell = { .id = engine_num };
- struct resource resources[2];
-
- cell.platform_data = NULL;
- cell.pdata_size = 0;
- cell.name = KP_DRIVER_NAME_DMA_CONTROLLER;
- cell.num_resources = 2;
-
- memset(&resources, 0, sizeof(resources));
-
- resources[0].start = engine_regs_offset;
- resources[0].end = engine_regs_offset + (KPC_DMA_ENGINE_SIZE - 1);
- resources[0].flags = IORESOURCE_MEM;
-
- resources[1].start = irq_num;
- resources[1].end = irq_num;
- resources[1].flags = IORESOURCE_IRQ;
-
- cell.resources = resources;
-
- return mfd_add_devices(PCARD_TO_DEV(pcard), // parent
- pcard->card_num * 100, // id
- &cell, // struct mfd_cell *
- 1, // ndevs
- &pcard->dma_base_resource,
- 0, // irq_base
- NULL); // struct irq_domain *
-}
-
-static int kp2000_setup_dma_controller(struct kp2000_device *pcard)
-{
- int err;
- unsigned int i;
- u64 capabilities_reg;
-
- // S2C Engines
- for (i = 0 ; i < 32 ; i++) {
- capabilities_reg = readq(pcard->dma_bar_base +
- KPC_DMA_S2C_BASE_OFFSET +
- (KPC_DMA_ENGINE_SIZE * i));
-
- if (capabilities_reg & ENGINE_CAP_PRESENT_MASK) {
- err = create_dma_engine_core(pcard, (KPC_DMA_S2C_BASE_OFFSET +
- (KPC_DMA_ENGINE_SIZE * i)),
- i, pcard->pdev->irq);
- if (err)
- goto err_out;
- }
- }
- // C2S Engines
- for (i = 0 ; i < 32 ; i++) {
- capabilities_reg = readq(pcard->dma_bar_base +
- KPC_DMA_C2S_BASE_OFFSET +
- (KPC_DMA_ENGINE_SIZE * i));
-
- if (capabilities_reg & ENGINE_CAP_PRESENT_MASK) {
- err = create_dma_engine_core(pcard, (KPC_DMA_C2S_BASE_OFFSET +
- (KPC_DMA_ENGINE_SIZE * i)),
- 32 + i, pcard->pdev->irq);
- if (err)
- goto err_out;
- }
- }
-
- return 0;
-
-err_out:
- dev_err(&pcard->pdev->dev, "%s: failed to add a DMA Engine: %d\n",
- __func__, err);
- return err;
-}
-
-int kp2000_probe_cores(struct kp2000_device *pcard)
-{
- int err = 0;
- int i;
- int current_type_id;
- u64 read_val;
- unsigned int highest_core_id = 0;
- struct core_table_entry cte;
-
- err = kp2000_setup_dma_controller(pcard);
- if (err)
- return err;
-
- INIT_LIST_HEAD(&pcard->uio_devices_list);
-
- // First, iterate the core table looking for the highest CORE_ID
- for (i = 0 ; i < pcard->core_table_length ; i++) {
- read_val = readq(pcard->sysinfo_regs_base + ((pcard->core_table_offset + i) * 8));
- parse_core_table_entry(&cte, read_val, pcard->core_table_rev);
- dbg_cte(pcard, &cte);
- if (cte.type > highest_core_id)
- highest_core_id = cte.type;
- if (cte.type == KP_CORE_ID_INVALID)
- dev_info(&pcard->pdev->dev, "Found Invalid core: %016llx\n", read_val);
- }
- // Then, iterate over the possible core types.
- for (current_type_id = 1 ; current_type_id <= highest_core_id ; current_type_id++) {
- unsigned int core_num = 0;
- /*
- * Foreach core type, iterate the whole table and instantiate
- * subdevices for each core.
- * Yes, this is O(n*m) but the actual runtime is small enough
- * that it's an acceptable tradeoff.
- */
- for (i = 0 ; i < pcard->core_table_length ; i++) {
- read_val = readq(pcard->sysinfo_regs_base +
- ((pcard->core_table_offset + i) * 8));
- parse_core_table_entry(&cte, read_val, pcard->core_table_rev);
-
- if (cte.type != current_type_id)
- continue;
-
- switch (cte.type) {
- case KP_CORE_ID_I2C:
- err = probe_core_basic(core_num, pcard,
- KP_DRIVER_NAME_I2C, cte);
- break;
-
- case KP_CORE_ID_SPI:
- err = probe_core_basic(core_num, pcard,
- KP_DRIVER_NAME_SPI, cte);
- break;
-
- default:
- err = probe_core_uio(core_num, pcard, "kpc_uio", cte);
- break;
- }
- if (err) {
- dev_err(&pcard->pdev->dev,
- "%s: failed to add core %d: %d\n",
- __func__, i, err);
- goto error;
- }
- core_num++;
- }
- }
-
- // Finally, instantiate a UIO device for the core_table.
- cte.type = 0; // CORE_ID_BOARD_INFO
- cte.offset = 0; // board info is always at the beginning
- cte.length = 512 * 8;
- cte.s2c_dma_present = false;
- cte.s2c_dma_channel_num = 0;
- cte.c2s_dma_present = false;
- cte.c2s_dma_channel_num = 0;
- cte.irq_count = 0;
- cte.irq_base_num = 0;
- err = probe_core_uio(0, pcard, "kpc_uio", cte);
- if (err) {
- dev_err(&pcard->pdev->dev, "%s: failed to add board_info core: %d\n",
- __func__, err);
- goto error;
- }
-
- return 0;
-
-error:
- kp2000_remove_cores(pcard);
- mfd_remove_devices(PCARD_TO_DEV(pcard));
- return err;
-}
-
-void kp2000_remove_cores(struct kp2000_device *pcard)
-{
- struct list_head *ptr;
- struct list_head *next;
-
- list_for_each_safe(ptr, next, &pcard->uio_devices_list) {
- struct kpc_uio_device *kudev = list_entry(ptr, struct kpc_uio_device, list);
-
- uio_unregister_device(&kudev->uioinfo);
- device_unregister(kudev->dev);
- list_del(&kudev->list);
- kfree(kudev);
- }
-}
-
diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c
deleted file mode 100644
index 6462a3059fb0..000000000000
--- a/drivers/staging/kpc2000/kpc2000/core.c
+++ /dev/null
@@ -1,565 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-#include <linux/kernel.h>
-#include <linux/idr.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/cdev.h>
-#include <linux/rwsem.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/mfd/core.h>
-#include <linux/platform_device.h>
-#include <linux/ioport.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/device.h>
-#include <linux/sched.h>
-#include <linux/jiffies.h>
-#include "pcie.h"
-#include "uapi.h"
-
-static DEFINE_IDA(card_num_ida);
-
-/*******************************************************
- * SysFS Attributes
- ******************************************************/
-
-static ssize_t ssid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
-
- return sprintf(buf, "%016llx\n", pcard->ssid);
-}
-static DEVICE_ATTR_RO(ssid);
-
-static ssize_t ddna_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
-
- return sprintf(buf, "%016llx\n", pcard->ddna);
-}
-static DEVICE_ATTR_RO(ddna);
-
-static ssize_t card_id_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
-
- return sprintf(buf, "%08x\n", pcard->card_id);
-}
-static DEVICE_ATTR_RO(card_id);
-
-static ssize_t hw_rev_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
-
- return sprintf(buf, "%08x\n", pcard->hardware_revision);
-}
-static DEVICE_ATTR_RO(hw_rev);
-
-static ssize_t build_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
-
- return sprintf(buf, "%08x\n", pcard->build_version);
-}
-static DEVICE_ATTR_RO(build);
-
-static ssize_t build_date_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
-
- return sprintf(buf, "%08x\n", pcard->build_datestamp);
-}
-static DEVICE_ATTR_RO(build_date);
-
-static ssize_t build_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
-
- return sprintf(buf, "%08x\n", pcard->build_timestamp);
-}
-static DEVICE_ATTR_RO(build_time);
-
-static ssize_t cpld_reg_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
- u64 val;
-
- val = readq(pcard->sysinfo_regs_base + REG_CPLD_CONFIG);
- return sprintf(buf, "%016llx\n", val);
-}
-static DEVICE_ATTR_RO(cpld_reg);
-
-static ssize_t cpld_reconfigure(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
- unsigned long wr_val;
- int rv;
-
- rv = kstrtoul(buf, 0, &wr_val);
- if (rv < 0)
- return rv;
- if (wr_val > 7)
- return -EINVAL;
-
- wr_val = wr_val << 8;
- wr_val |= 0x1; // Set the "Configure Go" bit
- writeq(wr_val, pcard->sysinfo_regs_base + REG_CPLD_CONFIG);
- return count;
-}
-
-static DEVICE_ATTR(cpld_reconfigure, 0220, NULL, cpld_reconfigure);
-
-static ssize_t irq_mask_reg_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
- u64 val;
-
- val = readq(pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
- return sprintf(buf, "%016llx\n", val);
-}
-static DEVICE_ATTR_RO(irq_mask_reg);
-
-static ssize_t irq_active_reg_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
- u64 val;
-
- val = readq(pcard->sysinfo_regs_base + REG_INTERRUPT_ACTIVE);
- return sprintf(buf, "%016llx\n", val);
-}
-static DEVICE_ATTR_RO(irq_active_reg);
-
-static ssize_t pcie_error_count_reg_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
- u64 val;
-
- val = readq(pcard->sysinfo_regs_base + REG_PCIE_ERROR_COUNT);
- return sprintf(buf, "%016llx\n", val);
-}
-static DEVICE_ATTR_RO(pcie_error_count_reg);
-
-static ssize_t core_table_offset_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
-
- return sprintf(buf, "%08x\n", pcard->core_table_offset);
-}
-static DEVICE_ATTR_RO(core_table_offset);
-
-static ssize_t core_table_length_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kp2000_device *pcard = dev_get_drvdata(dev);
-
- return sprintf(buf, "%08x\n", pcard->core_table_length);
-}
-static DEVICE_ATTR_RO(core_table_length);
-
-static const struct attribute *kp_attr_list[] = {
- &dev_attr_ssid.attr,
- &dev_attr_ddna.attr,
- &dev_attr_card_id.attr,
- &dev_attr_hw_rev.attr,
- &dev_attr_build.attr,
- &dev_attr_build_date.attr,
- &dev_attr_build_time.attr,
- &dev_attr_cpld_reg.attr,
- &dev_attr_cpld_reconfigure.attr,
- &dev_attr_irq_mask_reg.attr,
- &dev_attr_irq_active_reg.attr,
- &dev_attr_pcie_error_count_reg.attr,
- &dev_attr_core_table_offset.attr,
- &dev_attr_core_table_length.attr,
- NULL,
-};
-
-/*******************************************************
- * Functions
- ******************************************************/
-
-static void wait_and_read_ssid(struct kp2000_device *pcard)
-{
- u64 read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_SSID);
- unsigned long timeout;
-
- if (read_val & 0x8000000000000000UL) {
- pcard->ssid = read_val;
- return;
- }
-
- timeout = jiffies + (HZ * 2);
- do {
- read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_SSID);
- if (read_val & 0x8000000000000000UL) {
- pcard->ssid = read_val;
- return;
- }
- cpu_relax();
- //schedule();
- } while (time_before(jiffies, timeout));
-
- dev_notice(&pcard->pdev->dev, "SSID didn't show up!\n");
-
- // Timed out waiting for the SSID to show up, stick all zeros in the
- // value
- pcard->ssid = 0;
-}
-
-static int read_system_regs(struct kp2000_device *pcard)
-{
- u64 read_val;
-
- read_val = readq(pcard->sysinfo_regs_base + REG_MAGIC_NUMBER);
- if (read_val != KP2000_MAGIC_VALUE) {
- dev_err(&pcard->pdev->dev,
- "Invalid magic! Got: 0x%016llx Want: 0x%016llx\n",
- read_val, KP2000_MAGIC_VALUE);
- return -EILSEQ;
- }
-
- read_val = readq(pcard->sysinfo_regs_base + REG_CARD_ID_AND_BUILD);
- pcard->card_id = (read_val & 0xFFFFFFFF00000000UL) >> 32;
- pcard->build_version = (read_val & 0x00000000FFFFFFFFUL) >> 0;
-
- read_val = readq(pcard->sysinfo_regs_base + REG_DATE_AND_TIME_STAMPS);
- pcard->build_datestamp = (read_val & 0xFFFFFFFF00000000UL) >> 32;
- pcard->build_timestamp = (read_val & 0x00000000FFFFFFFFUL) >> 0;
-
- read_val = readq(pcard->sysinfo_regs_base + REG_CORE_TABLE_OFFSET);
- pcard->core_table_length = (read_val & 0xFFFFFFFF00000000UL) >> 32;
- pcard->core_table_offset = (read_val & 0x00000000FFFFFFFFUL) >> 0;
-
- wait_and_read_ssid(pcard);
-
- read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_HW_ID);
- pcard->core_table_rev = (read_val & 0x0000000000000F00) >> 8;
- pcard->hardware_revision = (read_val & 0x000000000000001F);
-
- read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_DDNA);
- pcard->ddna = read_val;
-
- dev_info(&pcard->pdev->dev,
- "system_regs: %08x %08x %08x %08x %02x %d %d %016llx %016llx\n",
- pcard->card_id,
- pcard->build_version,
- pcard->build_datestamp,
- pcard->build_timestamp,
- pcard->hardware_revision,
- pcard->core_table_rev,
- pcard->core_table_length,
- pcard->ssid,
- pcard->ddna);
-
- if (pcard->core_table_rev > 1) {
- dev_err(&pcard->pdev->dev,
- "core table entry revision is higher than we can deal with, cannot continue with this card!\n");
- return 1;
- }
-
- return 0;
-}
-
-static irqreturn_t kp2000_irq_handler(int irq, void *dev_id)
-{
- struct kp2000_device *pcard = dev_id;
-
- writel(KPC_DMA_CARD_IRQ_ENABLE |
- KPC_DMA_CARD_USER_INTERRUPT_MODE |
- KPC_DMA_CARD_USER_INTERRUPT_ACTIVE,
- pcard->dma_common_regs);
- return IRQ_HANDLED;
-}
-
-static int kp2000_pcie_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- int err = 0;
- struct kp2000_device *pcard;
- unsigned long reg_bar_phys_addr;
- unsigned long reg_bar_phys_len;
- unsigned long dma_bar_phys_addr;
- unsigned long dma_bar_phys_len;
- u16 regval;
-
- pcard = kzalloc(sizeof(*pcard), GFP_KERNEL);
- if (!pcard)
- return -ENOMEM;
- dev_dbg(&pdev->dev, "probe: allocated struct kp2000_device @ %p\n",
- pcard);
-
- err = ida_simple_get(&card_num_ida, 1, INT_MAX, GFP_KERNEL);
- if (err < 0) {
- dev_err(&pdev->dev, "probe: failed to get card number (%d)\n",
- err);
- goto err_free_pcard;
- }
- pcard->card_num = err;
- scnprintf(pcard->name, 16, "kpcard%u", pcard->card_num);
-
- mutex_init(&pcard->sem);
- mutex_lock(&pcard->sem);
-
- pcard->pdev = pdev;
- pci_set_drvdata(pdev, pcard);
-
- err = pci_enable_device(pcard->pdev);
- if (err) {
- dev_err(&pcard->pdev->dev,
- "probe: failed to enable PCIE2000 PCIe device (%d)\n",
- err);
- goto err_remove_ida;
- }
-
- /* Setup the Register BAR */
- reg_bar_phys_addr = pci_resource_start(pcard->pdev, REG_BAR);
- reg_bar_phys_len = pci_resource_len(pcard->pdev, REG_BAR);
-
- pcard->regs_bar_base = ioremap(reg_bar_phys_addr, PAGE_SIZE);
- if (!pcard->regs_bar_base) {
- dev_err(&pcard->pdev->dev,
- "probe: REG_BAR could not remap memory to virtual space\n");
- err = -ENODEV;
- goto err_disable_device;
- }
- dev_dbg(&pcard->pdev->dev,
- "probe: REG_BAR virt hardware address start [%p]\n",
- pcard->regs_bar_base);
-
- err = pci_request_region(pcard->pdev, REG_BAR, KP_DRIVER_NAME_KP2000);
- if (err) {
- dev_err(&pcard->pdev->dev,
- "probe: failed to acquire PCI region (%d)\n",
- err);
- err = -ENODEV;
- goto err_unmap_regs;
- }
-
- pcard->regs_base_resource.start = reg_bar_phys_addr;
- pcard->regs_base_resource.end = reg_bar_phys_addr +
- reg_bar_phys_len - 1;
- pcard->regs_base_resource.flags = IORESOURCE_MEM;
-
- /* Setup the DMA BAR */
- dma_bar_phys_addr = pci_resource_start(pcard->pdev, DMA_BAR);
- dma_bar_phys_len = pci_resource_len(pcard->pdev, DMA_BAR);
-
- pcard->dma_bar_base = ioremap(dma_bar_phys_addr,
- dma_bar_phys_len);
- if (!pcard->dma_bar_base) {
- dev_err(&pcard->pdev->dev,
- "probe: DMA_BAR could not remap memory to virtual space\n");
- err = -ENODEV;
- goto err_release_regs;
- }
- dev_dbg(&pcard->pdev->dev,
- "probe: DMA_BAR virt hardware address start [%p]\n",
- pcard->dma_bar_base);
-
- pcard->dma_common_regs = pcard->dma_bar_base + KPC_DMA_COMMON_OFFSET;
-
- err = pci_request_region(pcard->pdev, DMA_BAR, "kp2000_pcie");
- if (err) {
- dev_err(&pcard->pdev->dev,
- "probe: failed to acquire PCI region (%d)\n", err);
- err = -ENODEV;
- goto err_unmap_dma;
- }
-
- pcard->dma_base_resource.start = dma_bar_phys_addr;
- pcard->dma_base_resource.end = dma_bar_phys_addr +
- dma_bar_phys_len - 1;
- pcard->dma_base_resource.flags = IORESOURCE_MEM;
-
- /* Read System Regs */
- pcard->sysinfo_regs_base = pcard->regs_bar_base;
- err = read_system_regs(pcard);
- if (err)
- goto err_release_dma;
-
- // Disable all "user" interrupts because they're not used yet.
- writeq(0xFFFFFFFFFFFFFFFFUL,
- pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
-
- // let the card master PCIe
- pci_set_master(pcard->pdev);
-
- // enable IO and mem if not already done
- pci_read_config_word(pcard->pdev, PCI_COMMAND, &regval);
- regval |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_write_config_word(pcard->pdev, PCI_COMMAND, regval);
-
- // Clear relaxed ordering bit
- pcie_capability_clear_and_set_word(pcard->pdev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_RELAX_EN, 0);
-
- // Set Max_Payload_Size and Max_Read_Request_Size
- regval = (0x0) << 5; // Max_Payload_Size = 128 B
- pcie_capability_clear_and_set_word(pcard->pdev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_PAYLOAD, regval);
- regval = (0x0) << 12; // Max_Read_Request_Size = 128 B
- pcie_capability_clear_and_set_word(pcard->pdev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_READRQ, regval);
-
- // Enable error reporting for: Correctable Errors, Non-Fatal Errors,
- // Fatal Errors, Unsupported Requests
- pcie_capability_clear_and_set_word(pcard->pdev, PCI_EXP_DEVCTL, 0,
- PCI_EXP_DEVCTL_CERE |
- PCI_EXP_DEVCTL_NFERE |
- PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE);
-
- err = dma_set_mask(PCARD_TO_DEV(pcard), DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pcard->pdev->dev,
- "CANNOT use DMA mask %0llx\n", DMA_BIT_MASK(64));
- goto err_release_dma;
- }
- dev_dbg(&pcard->pdev->dev,
- "Using DMA mask %0llx\n", dma_get_mask(PCARD_TO_DEV(pcard)));
-
- err = pci_enable_msi(pcard->pdev);
- if (err < 0)
- goto err_release_dma;
-
- err = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED,
- pcard->name, pcard);
- if (err) {
- dev_err(&pcard->pdev->dev,
- "%s: failed to request_irq: %d\n", __func__, err);
- goto err_disable_msi;
- }
-
- err = sysfs_create_files(&pdev->dev.kobj, kp_attr_list);
- if (err) {
- dev_err(&pdev->dev, "Failed to add sysfs files: %d\n", err);
- goto err_free_irq;
- }
-
- err = kp2000_probe_cores(pcard);
- if (err)
- goto err_remove_sysfs;
-
- /* Enable IRQs in HW */
- writel(KPC_DMA_CARD_IRQ_ENABLE | KPC_DMA_CARD_USER_INTERRUPT_MODE,
- pcard->dma_common_regs);
-
- mutex_unlock(&pcard->sem);
- return 0;
-
-err_remove_sysfs:
- sysfs_remove_files(&pdev->dev.kobj, kp_attr_list);
-err_free_irq:
- free_irq(pcard->pdev->irq, pcard);
-err_disable_msi:
- pci_disable_msi(pcard->pdev);
-err_release_dma:
- pci_release_region(pdev, DMA_BAR);
-err_unmap_dma:
- iounmap(pcard->dma_bar_base);
-err_release_regs:
- pci_release_region(pdev, REG_BAR);
-err_unmap_regs:
- iounmap(pcard->regs_bar_base);
-err_disable_device:
- pci_disable_device(pcard->pdev);
-err_remove_ida:
- mutex_unlock(&pcard->sem);
- ida_simple_remove(&card_num_ida, pcard->card_num);
-err_free_pcard:
- kfree(pcard);
- return err;
-}
-
-static void kp2000_pcie_remove(struct pci_dev *pdev)
-{
- struct kp2000_device *pcard = pci_get_drvdata(pdev);
-
- if (!pcard)
- return;
-
- mutex_lock(&pcard->sem);
- kp2000_remove_cores(pcard);
- mfd_remove_devices(PCARD_TO_DEV(pcard));
- sysfs_remove_files(&pdev->dev.kobj, kp_attr_list);
- free_irq(pcard->pdev->irq, pcard);
- pci_disable_msi(pcard->pdev);
- if (pcard->dma_bar_base) {
- iounmap(pcard->dma_bar_base);
- pci_release_region(pdev, DMA_BAR);
- pcard->dma_bar_base = NULL;
- }
- if (pcard->regs_bar_base) {
- iounmap(pcard->regs_bar_base);
- pci_release_region(pdev, REG_BAR);
- pcard->regs_bar_base = NULL;
- }
- pci_disable_device(pcard->pdev);
- pci_set_drvdata(pdev, NULL);
- mutex_unlock(&pcard->sem);
- ida_simple_remove(&card_num_ida, pcard->card_num);
- kfree(pcard);
-}
-
-struct class *kpc_uio_class;
-ATTRIBUTE_GROUPS(kpc_uio_class);
-
-static const struct pci_device_id kp2000_pci_device_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_DAKTRONICS, PCI_DEVICE_ID_DAKTRONICS) },
- { PCI_DEVICE(PCI_VENDOR_ID_DAKTRONICS, PCI_DEVICE_ID_DAKTRONICS_KADOKA_P2KR0) },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, kp2000_pci_device_ids);
-
-static struct pci_driver kp2000_driver_inst = {
- .name = "kp2000_pcie",
- .id_table = kp2000_pci_device_ids,
- .probe = kp2000_pcie_probe,
- .remove = kp2000_pcie_remove,
-};
-
-static int __init kp2000_pcie_init(void)
-{
- kpc_uio_class = class_create(THIS_MODULE, "kpc_uio");
- if (IS_ERR(kpc_uio_class))
- return PTR_ERR(kpc_uio_class);
-
- kpc_uio_class->dev_groups = kpc_uio_class_groups;
- return pci_register_driver(&kp2000_driver_inst);
-}
-module_init(kp2000_pcie_init);
-
-static void __exit kp2000_pcie_exit(void)
-{
- pci_unregister_driver(&kp2000_driver_inst);
- class_destroy(kpc_uio_class);
- ida_destroy(&card_num_ida);
-}
-module_exit(kp2000_pcie_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Lee.Brooke@Daktronics.com, Matt.Sickler@Daktronics.com");
-MODULE_SOFTDEP("pre: uio post: kpc_nwl_dma kpc_i2c kpc_spi");
diff --git a/drivers/staging/kpc2000/kpc2000/dma_common_defs.h b/drivers/staging/kpc2000/kpc2000/dma_common_defs.h
deleted file mode 100644
index 613c4898f65e..000000000000
--- a/drivers/staging/kpc2000/kpc2000/dma_common_defs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-#ifndef KPC_DMA_COMMON_DEFS_H_
-#define KPC_DMA_COMMON_DEFS_H_
-
-#define KPC_DMA_COMMON_OFFSET 0x4000
-#define KPC_DMA_S2C_BASE_OFFSET 0x0000
-#define KPC_DMA_C2S_BASE_OFFSET 0x2000
-#define KPC_DMA_ENGINE_SIZE 0x0100
-#define ENGINE_CAP_PRESENT_MASK 0x1
-
-#define KPC_DMA_CARD_IRQ_ENABLE BIT(0)
-#define KPC_DMA_CARD_IRQ_ACTIVE BIT(1)
-#define KPC_DMA_CARD_IRQ_PENDING BIT(2)
-#define KPC_DMA_CARD_IRQ_MSI BIT(3)
-#define KPC_DMA_CARD_USER_INTERRUPT_MODE BIT(4)
-#define KPC_DMA_CARD_USER_INTERRUPT_ACTIVE BIT(5)
-#define KPC_DMA_CARD_IRQ_MSIX_MODE BIT(6)
-#define KPC_DMA_CARD_MAX_PAYLOAD_SIZE_MASK 0x0700
-#define KPC_DMA_CARD_MAX_READ_REQUEST_SIZE_MASK 0x7000
-#define KPC_DMA_CARD_S2C_INTERRUPT_STATUS_MASK 0x00FF0000
-#define KPC_DMA_CARD_C2S_INTERRUPT_STATUS_MASK 0xFF000000
-
-#endif /* KPC_DMA_COMMON_DEFS_H_ */
diff --git a/drivers/staging/kpc2000/kpc2000/pcie.h b/drivers/staging/kpc2000/kpc2000/pcie.h
deleted file mode 100644
index f1fc91b4c704..000000000000
--- a/drivers/staging/kpc2000/kpc2000/pcie.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-#ifndef KP2000_PCIE_H
-#define KP2000_PCIE_H
-#include <linux/types.h>
-#include <linux/pci.h>
-#include "../kpc.h"
-#include "dma_common_defs.h"
-
-/* System Register Map (BAR 1, Start Addr 0)
- *
- * BAR Size:
- * 1048576 (0x100000) bytes = 131072 (0x20000) registers = 256 pages (4K)
- *
- * 6 5 4 3 2 1 0
- * 3210987654321098765432109876543210987654321098765432109876543210
- * 0 <--------------------------- MAGIC ---------------------------->
- * 1 <----------- Card ID ---------><----------- Revision ---------->
- * 2 <--------- Date Stamp --------><--------- Time Stamp ---------->
- * 3 <-------- Core Tbl Len -------><-------- Core Tbl Offset ------>
- * 4 <---------------------------- SSID ---------------------------->
- * 5 < HWID >
- * 6 <------------------------- FPGA DDNA -------------------------->
- * 7 <------------------------ CPLD Config ------------------------->
- * 8 <----------------------- IRQ Mask Flags ----------------------->
- * 9 <---------------------- IRQ Active Flags ---------------------->
- */
-
-#define REG_WIDTH 8
-#define REG_MAGIC_NUMBER (0 * REG_WIDTH)
-#define REG_CARD_ID_AND_BUILD (1 * REG_WIDTH)
-#define REG_DATE_AND_TIME_STAMPS (2 * REG_WIDTH)
-#define REG_CORE_TABLE_OFFSET (3 * REG_WIDTH)
-#define REG_FPGA_SSID (4 * REG_WIDTH)
-#define REG_FPGA_HW_ID (5 * REG_WIDTH)
-#define REG_FPGA_DDNA (6 * REG_WIDTH)
-#define REG_CPLD_CONFIG (7 * REG_WIDTH)
-#define REG_INTERRUPT_MASK (8 * REG_WIDTH)
-#define REG_INTERRUPT_ACTIVE (9 * REG_WIDTH)
-#define REG_PCIE_ERROR_COUNT (10 * REG_WIDTH)
-
-#define KP2000_MAGIC_VALUE 0x196C61482231894DULL
-
-#define PCI_VENDOR_ID_DAKTRONICS 0x1c33
-#define PCI_DEVICE_ID_DAKTRONICS 0x6021
-
-#define DMA_BAR 0
-#define REG_BAR 1
-
-struct kp2000_device {
- struct pci_dev *pdev;
- char name[16];
-
- unsigned int card_num;
- struct mutex sem;
-
- void __iomem *sysinfo_regs_base;
- void __iomem *regs_bar_base;
- struct resource regs_base_resource;
- void __iomem *dma_bar_base;
- void __iomem *dma_common_regs;
- struct resource dma_base_resource;
-
- // "System Registers"
- u32 card_id;
- u32 build_version;
- u32 build_datestamp;
- u32 build_timestamp;
- u32 core_table_offset;
- u32 core_table_length;
- u8 core_table_rev;
- u8 hardware_revision;
- u64 ssid;
- u64 ddna;
-
- // IRQ stuff
- unsigned int irq;
-
- struct list_head uio_devices_list;
-};
-
-extern struct class *kpc_uio_class;
-extern struct attribute *kpc_uio_class_attrs[];
-
-int kp2000_probe_cores(struct kp2000_device *pcard);
-void kp2000_remove_cores(struct kp2000_device *pcard);
-
-// Define this quick little macro because the expression is used frequently
-#define PCARD_TO_DEV(pcard) (&(pcard->pdev->dev))
-
-#endif /* KP2000_PCIE_H */
diff --git a/drivers/staging/kpc2000/kpc2000/uapi.h b/drivers/staging/kpc2000/kpc2000/uapi.h
deleted file mode 100644
index 16f37f002dc6..000000000000
--- a/drivers/staging/kpc2000/kpc2000/uapi.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-#ifndef KP2000_CDEV_UAPI_H_
-#define KP2000_CDEV_UAPI_H_
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-struct kp2000_regs {
- __u32 card_id;
- __u32 build_version;
- __u32 build_datestamp;
- __u32 build_timestamp;
- __u32 hw_rev;
- __u64 ssid;
- __u64 ddna;
- __u64 cpld_reg;
-};
-
-#define KP2000_IOCTL_GET_CPLD_REG _IOR('k', 9, __u32)
-#define KP2000_IOCTL_GET_PCIE_ERROR_REG _IOR('k', 11, __u32)
-#define KP2000_IOCTL_GET_EVERYTHING _IOR('k', 8, struct kp2000_regs*)
-
-#endif /* KP2000_CDEV_UAPI_H_ */
diff --git a/drivers/staging/kpc2000/kpc2000_i2c.c b/drivers/staging/kpc2000/kpc2000_i2c.c
deleted file mode 100644
index 14f7940fa4fb..000000000000
--- a/drivers/staging/kpc2000/kpc2000_i2c.c
+++ /dev/null
@@ -1,731 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * KPC2000 i2c driver
- *
- * Adapted i2c-i801.c for use with Kadoka hardware.
- *
- * Copyright (C) 1998 - 2002
- * Frodo Looijaard <frodol@dds.nl>,
- * Philip Edelbrock <phil@netroedge.com>,
- * Mark D. Studebaker <mdsxyz123@yahoo.com>
- * Copyright (C) 2007 - 2012
- * Jean Delvare <khali@linux-fr.org>
- * Copyright (C) 2010 Intel Corporation
- * David Woodhouse <dwmw2@infradead.org>
- * Copyright (C) 2014-2018 Daktronics
- * Matt Sickler <matt.sickler@daktronics.com>,
- * Jordon Hofer <jordon.hofer@daktronics.com>
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include "kpc.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Matt.Sickler@Daktronics.com");
-
-struct kpc_i2c {
- unsigned long smba;
- struct i2c_adapter adapter;
- unsigned int features;
-};
-
-/*****************************
- *** Part 1 - i2c Handlers ***
- *****************************/
-
-#define REG_SIZE 8
-
-/* I801 SMBus address offsets */
-#define SMBHSTSTS(p) ((0 * REG_SIZE) + (p)->smba)
-#define SMBHSTCNT(p) ((2 * REG_SIZE) + (p)->smba)
-#define SMBHSTCMD(p) ((3 * REG_SIZE) + (p)->smba)
-#define SMBHSTADD(p) ((4 * REG_SIZE) + (p)->smba)
-#define SMBHSTDAT0(p) ((5 * REG_SIZE) + (p)->smba)
-#define SMBHSTDAT1(p) ((6 * REG_SIZE) + (p)->smba)
-#define SMBBLKDAT(p) ((7 * REG_SIZE) + (p)->smba)
-#define SMBPEC(p) ((8 * REG_SIZE) + (p)->smba) /* ICH3 and later */
-#define SMBAUXSTS(p) ((12 * REG_SIZE) + (p)->smba) /* ICH4 and later */
-#define SMBAUXCTL(p) ((13 * REG_SIZE) + (p)->smba) /* ICH4 and later */
-
-/* PCI Address Constants */
-#define SMBBAR 4
-#define SMBHSTCFG 0x040
-
-/* Host configuration bits for SMBHSTCFG */
-#define SMBHSTCFG_HST_EN 1
-#define SMBHSTCFG_SMB_SMI_EN 2
-#define SMBHSTCFG_I2C_EN 4
-
-/* Auxiliary control register bits, ICH4+ only */
-#define SMBAUXCTL_CRC 1
-#define SMBAUXCTL_E32B 2
-
-/* kill bit for SMBHSTCNT */
-#define SMBHSTCNT_KILL 2
-
-/* Other settings */
-#define MAX_RETRIES 400
-#define ENABLE_INT9 0 /* set to 0x01 to enable - untested */
-
-/* I801 command constants */
-#define I801_QUICK 0x00
-#define I801_BYTE 0x04
-#define I801_BYTE_DATA 0x08
-#define I801_WORD_DATA 0x0C
-#define I801_PROC_CALL 0x10 /* unimplemented */
-#define I801_BLOCK_DATA 0x14
-#define I801_I2C_BLOCK_DATA 0x18 /* ICH5 and later */
-#define I801_BLOCK_LAST 0x34
-#define I801_I2C_BLOCK_LAST 0x38 /* ICH5 and later */
-#define I801_START 0x40
-#define I801_PEC_EN 0x80 /* ICH3 and later */
-
-/* I801 Hosts Status register bits */
-#define SMBHSTSTS_BYTE_DONE 0x80
-#define SMBHSTSTS_INUSE_STS 0x40
-#define SMBHSTSTS_SMBALERT_STS 0x20
-#define SMBHSTSTS_FAILED 0x10
-#define SMBHSTSTS_BUS_ERR 0x08
-#define SMBHSTSTS_DEV_ERR 0x04
-#define SMBHSTSTS_INTR 0x02
-#define SMBHSTSTS_HOST_BUSY 0x01
-
-#define STATUS_FLAGS (SMBHSTSTS_BYTE_DONE | SMBHSTSTS_FAILED | \
- SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | SMBHSTSTS_INTR)
-
-/* Older devices have their ID defined in <linux/pci_ids.h> */
-#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
-/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70
-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71
-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72
-#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
-#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
-#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
-#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
-#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
-
-#define FEATURE_SMBUS_PEC BIT(0)
-#define FEATURE_BLOCK_BUFFER BIT(1)
-#define FEATURE_BLOCK_PROC BIT(2)
-#define FEATURE_I2C_BLOCK_READ BIT(3)
-/* Not really a feature, but it's convenient to handle it as such */
-#define FEATURE_IDF BIT(15)
-
-// FIXME!
-#undef inb_p
-#define inb_p(a) readq((void __iomem *)a)
-#undef outb_p
-#define outb_p(d, a) writeq(d, (void __iomem *)a)
-
-/* Make sure the SMBus host is ready to start transmitting.
- * Return 0 if it is, -EBUSY if it is not.
- */
-static int i801_check_pre(struct kpc_i2c *priv)
-{
- int status;
-
- status = inb_p(SMBHSTSTS(priv));
- if (status & SMBHSTSTS_HOST_BUSY) {
- dev_err(&priv->adapter.dev,
- "SMBus is busy, can't use it! (status=%x)\n", status);
- return -EBUSY;
- }
-
- status &= STATUS_FLAGS;
- if (status) {
- outb_p(status, SMBHSTSTS(priv));
- status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
- if (status) {
- dev_err(&priv->adapter.dev,
- "Failed clearing status flags (%02x)\n", status);
- return -EBUSY;
- }
- }
- return 0;
-}
-
-/* Convert the status register to an error code, and clear it. */
-static int i801_check_post(struct kpc_i2c *priv, int status, int timeout)
-{
- int result = 0;
-
- /* If the SMBus is still busy, we give up */
- if (timeout) {
- dev_err(&priv->adapter.dev, "Transaction timeout\n");
- /* try to stop the current command */
- dev_dbg(&priv->adapter.dev,
- "Terminating the current operation\n");
- outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
- SMBHSTCNT(priv));
- usleep_range(1000, 2000);
- outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
- SMBHSTCNT(priv));
-
- /* Check if it worked */
- status = inb_p(SMBHSTSTS(priv));
- if ((status & SMBHSTSTS_HOST_BUSY) ||
- !(status & SMBHSTSTS_FAILED))
- dev_err(&priv->adapter.dev,
- "Failed terminating the transaction\n");
- outb_p(STATUS_FLAGS, SMBHSTSTS(priv));
- return -ETIMEDOUT;
- }
-
- if (status & SMBHSTSTS_FAILED) {
- result = -EIO;
- dev_err(&priv->adapter.dev, "Transaction failed\n");
- }
- if (status & SMBHSTSTS_DEV_ERR) {
- result = -ENXIO;
- dev_dbg(&priv->adapter.dev, "No response\n");
- }
- if (status & SMBHSTSTS_BUS_ERR) {
- result = -EAGAIN;
- dev_dbg(&priv->adapter.dev, "Lost arbitration\n");
- }
-
- if (result) {
- /* Clear error flags */
- outb_p(status & STATUS_FLAGS, SMBHSTSTS(priv));
- status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
- if (status)
- dev_warn(&priv->adapter.dev,
- "Failed clearing status flags at end of transaction (%02x)\n",
- status);
- }
-
- return result;
-}
-
-static int i801_transaction(struct kpc_i2c *priv, int xact)
-{
- int status;
- int result;
- int timeout = 0;
-
- result = i801_check_pre(priv);
- if (result < 0)
- return result;
- /* the current contents of SMBHSTCNT can be overwritten, since PEC,
- * INTREN, SMBSCMD are passed in xact
- */
- outb_p(xact | I801_START, SMBHSTCNT(priv));
-
- /* We will always wait for a fraction of a second! */
- do {
- usleep_range(250, 500);
- status = inb_p(SMBHSTSTS(priv));
- } while ((status & SMBHSTSTS_HOST_BUSY) && (timeout++ < MAX_RETRIES));
-
- result = i801_check_post(priv, status, timeout > MAX_RETRIES);
- if (result < 0)
- return result;
-
- outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
- return 0;
-}
-
-/* wait for INTR bit as advised by Intel */
-static void i801_wait_hwpec(struct kpc_i2c *priv)
-{
- int timeout = 0;
- int status;
-
- do {
- usleep_range(250, 500);
- status = inb_p(SMBHSTSTS(priv));
- } while ((!(status & SMBHSTSTS_INTR)) && (timeout++ < MAX_RETRIES));
-
- if (timeout > MAX_RETRIES)
- dev_dbg(&priv->adapter.dev, "PEC Timeout!\n");
-
- outb_p(status, SMBHSTSTS(priv));
-}
-
-static int i801_block_transaction_by_block(struct kpc_i2c *priv,
- union i2c_smbus_data *data,
- char read_write, int hwpec)
-{
- int i, len;
- int status;
-
- inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
-
- /* Use 32-byte buffer to process this transaction */
- if (read_write == I2C_SMBUS_WRITE) {
- len = data->block[0];
- outb_p(len, SMBHSTDAT0(priv));
- for (i = 0; i < len; i++)
- outb_p(data->block[i + 1], SMBBLKDAT(priv));
- }
-
- status = i801_transaction(priv,
- I801_BLOCK_DATA | ENABLE_INT9 | I801_PEC_EN * hwpec);
- if (status)
- return status;
-
- if (read_write == I2C_SMBUS_READ) {
- len = inb_p(SMBHSTDAT0(priv));
- if (len < 1 || len > I2C_SMBUS_BLOCK_MAX)
- return -EPROTO;
-
- data->block[0] = len;
- for (i = 0; i < len; i++)
- data->block[i + 1] = inb_p(SMBBLKDAT(priv));
- }
- return 0;
-}
-
-static int i801_block_transaction_byte_by_byte(struct kpc_i2c *priv,
- union i2c_smbus_data *data,
- char read_write, int command,
- int hwpec)
-{
- int i, len;
- int smbcmd;
- int status;
- int result;
- int timeout;
-
- result = i801_check_pre(priv);
- if (result < 0)
- return result;
-
- len = data->block[0];
-
- if (read_write == I2C_SMBUS_WRITE) {
- outb_p(len, SMBHSTDAT0(priv));
- outb_p(data->block[1], SMBBLKDAT(priv));
- }
-
- for (i = 1; i <= len; i++) {
- if (i == len && read_write == I2C_SMBUS_READ) {
- if (command == I2C_SMBUS_I2C_BLOCK_DATA)
- smbcmd = I801_I2C_BLOCK_LAST;
- else
- smbcmd = I801_BLOCK_LAST;
- } else {
- if (command == I2C_SMBUS_I2C_BLOCK_DATA &&
- read_write == I2C_SMBUS_READ)
- smbcmd = I801_I2C_BLOCK_DATA;
- else
- smbcmd = I801_BLOCK_DATA;
- }
- outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT(priv));
-
- if (i == 1)
- outb_p(inb(SMBHSTCNT(priv)) | I801_START,
- SMBHSTCNT(priv));
- /* We will always wait for a fraction of a second! */
- timeout = 0;
- do {
- usleep_range(250, 500);
- status = inb_p(SMBHSTSTS(priv));
- } while (!(status & SMBHSTSTS_BYTE_DONE) &&
- (timeout++ < MAX_RETRIES));
-
- result = i801_check_post(priv, status, timeout > MAX_RETRIES);
- if (result < 0)
- return result;
- if (i == 1 && read_write == I2C_SMBUS_READ &&
- command != I2C_SMBUS_I2C_BLOCK_DATA) {
- len = inb_p(SMBHSTDAT0(priv));
- if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
- dev_err(&priv->adapter.dev,
- "Illegal SMBus block read size %d\n",
- len);
- /* Recover */
- while (inb_p(SMBHSTSTS(priv)) &
- SMBHSTSTS_HOST_BUSY)
- outb_p(SMBHSTSTS_BYTE_DONE,
- SMBHSTSTS(priv));
- outb_p(SMBHSTSTS_INTR,
- SMBHSTSTS(priv));
- return -EPROTO;
- }
- data->block[0] = len;
- }
-
- /* Retrieve/store value in SMBBLKDAT */
- if (read_write == I2C_SMBUS_READ)
- data->block[i] = inb_p(SMBBLKDAT(priv));
- if (read_write == I2C_SMBUS_WRITE && i + 1 <= len)
- outb_p(data->block[i + 1], SMBBLKDAT(priv));
- /* signals SMBBLKDAT ready */
- outb_p(SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR, SMBHSTSTS(priv));
- }
-
- return 0;
-}
-
-static int i801_set_block_buffer_mode(struct kpc_i2c *priv)
-{
- outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
- if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0)
- return -EIO;
- return 0;
-}
-
-/* Block transaction function */
-static int i801_block_transaction(struct kpc_i2c *priv,
- union i2c_smbus_data *data, char read_write,
- int command, int hwpec)
-{
- int result = 0;
- //unsigned char hostc;
-
- if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
- if (read_write == I2C_SMBUS_WRITE) {
- /* set I2C_EN bit in configuration register */
- //TODO: Figure out the right thing to do here...
- //pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc);
- //pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc | SMBHSTCFG_I2C_EN);
- } else if (!(priv->features & FEATURE_I2C_BLOCK_READ)) {
- dev_err(&priv->adapter.dev,
- "I2C block read is unsupported!\n");
- return -EOPNOTSUPP;
- }
- }
-
- if (read_write == I2C_SMBUS_WRITE ||
- command == I2C_SMBUS_I2C_BLOCK_DATA) {
- if (data->block[0] < 1)
- data->block[0] = 1;
- if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
- data->block[0] = I2C_SMBUS_BLOCK_MAX;
- } else {
- data->block[0] = 32; /* max for SMBus block reads */
- }
-
- /* Experience has shown that the block buffer can only be used for
- * SMBus (not I2C) block transactions, even though the datasheet
- * doesn't mention this limitation.
- */
- if ((priv->features & FEATURE_BLOCK_BUFFER) &&
- command != I2C_SMBUS_I2C_BLOCK_DATA &&
- i801_set_block_buffer_mode(priv) == 0) {
- result = i801_block_transaction_by_block(priv, data,
- read_write, hwpec);
- } else {
- result = i801_block_transaction_byte_by_byte(priv, data,
- read_write,
- command, hwpec);
- }
-
- if (result == 0 && hwpec)
- i801_wait_hwpec(priv);
- if (command == I2C_SMBUS_I2C_BLOCK_DATA &&
- read_write == I2C_SMBUS_WRITE) {
- /* restore saved configuration register value */
- //TODO: Figure out the right thing to do here...
- //pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc);
- }
- return result;
-}
-
-/* Return negative errno on error. */
-static s32 i801_access(struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write, u8 command,
- int size, union i2c_smbus_data *data)
-{
- int hwpec;
- int block = 0;
- int ret, xact = 0;
- struct kpc_i2c *priv = i2c_get_adapdata(adap);
-
- hwpec = (priv->features & FEATURE_SMBUS_PEC) &&
- (flags & I2C_CLIENT_PEC) &&
- size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA;
-
- switch (size) {
- case I2C_SMBUS_QUICK:
- dev_dbg(&priv->adapter.dev, " [acc] SMBUS_QUICK\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD(priv));
-
- xact = I801_QUICK;
- break;
- case I2C_SMBUS_BYTE:
- dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BYTE\n");
-
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD(priv));
- if (read_write == I2C_SMBUS_WRITE)
- outb_p(command, SMBHSTCMD(priv));
- xact = I801_BYTE;
- break;
- case I2C_SMBUS_BYTE_DATA:
- dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BYTE_DATA\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD(priv));
-
- outb_p(command, SMBHSTCMD(priv));
- if (read_write == I2C_SMBUS_WRITE)
- outb_p(data->byte, SMBHSTDAT0(priv));
- xact = I801_BYTE_DATA;
- break;
- case I2C_SMBUS_WORD_DATA:
- dev_dbg(&priv->adapter.dev, " [acc] SMBUS_WORD_DATA\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD(priv));
-
- outb_p(command, SMBHSTCMD(priv));
- if (read_write == I2C_SMBUS_WRITE) {
- outb_p(data->word & 0xff, SMBHSTDAT0(priv));
- outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
- }
- xact = I801_WORD_DATA;
- break;
- case I2C_SMBUS_BLOCK_DATA:
- dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BLOCK_DATA\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD(priv));
-
- outb_p(command, SMBHSTCMD(priv));
- block = 1;
- break;
- case I2C_SMBUS_I2C_BLOCK_DATA:
- dev_dbg(&priv->adapter.dev, " [acc] SMBUS_I2C_BLOCK_DATA\n");
- /* NB: page 240 of ICH5 datasheet shows that the R/#W
- * bit should be cleared here, even when reading
- */
- outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
- if (read_write == I2C_SMBUS_READ) {
- /* NB: page 240 of ICH5 datasheet also shows
- * that DATA1 is the cmd field when reading
- */
- outb_p(command, SMBHSTDAT1(priv));
- } else {
- outb_p(command, SMBHSTCMD(priv));
- }
- block = 1;
- break;
- default:
- dev_dbg(&priv->adapter.dev,
- " [acc] Unsupported transaction %d\n", size);
- return -EOPNOTSUPP;
- }
-
- if (hwpec) { /* enable/disable hardware PEC */
- dev_dbg(&priv->adapter.dev, " [acc] hwpec: yes\n");
- outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv));
- } else {
- dev_dbg(&priv->adapter.dev, " [acc] hwpec: no\n");
- outb_p(inb_p(SMBAUXCTL(priv)) &
- (~SMBAUXCTL_CRC), SMBAUXCTL(priv));
- }
-
- if (block) {
- dev_dbg(&priv->adapter.dev, " [acc] block: yes\n");
- ret = i801_block_transaction(priv, data, read_write, size,
- hwpec);
- } else {
- dev_dbg(&priv->adapter.dev, " [acc] block: no\n");
- ret = i801_transaction(priv, xact | ENABLE_INT9);
- }
-
- /* Some BIOSes don't like it when PEC is enabled at reboot or resume
- * time, so we forcibly disable it after every transaction. Turn off
- * E32B for the same reason.
- */
- if (hwpec || block) {
- dev_dbg(&priv->adapter.dev, " [acc] hwpec || block\n");
- outb_p(inb_p(SMBAUXCTL(priv)) & ~(SMBAUXCTL_CRC |
- SMBAUXCTL_E32B), SMBAUXCTL(priv));
- }
- if (block) {
- dev_dbg(&priv->adapter.dev, " [acc] block\n");
- return ret;
- }
- if (ret) {
- dev_dbg(&priv->adapter.dev, " [acc] ret %d\n", ret);
- return ret;
- }
- if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK)) {
- dev_dbg(&priv->adapter.dev,
- " [acc] I2C_SMBUS_WRITE || I801_QUICK -> ret 0\n");
- return 0;
- }
-
- switch (xact & 0x7f) {
- case I801_BYTE: /* Result put in SMBHSTDAT0 */
- case I801_BYTE_DATA:
- dev_dbg(&priv->adapter.dev,
- " [acc] I801_BYTE or I801_BYTE_DATA\n");
- data->byte = inb_p(SMBHSTDAT0(priv));
- break;
- case I801_WORD_DATA:
- dev_dbg(&priv->adapter.dev, " [acc] I801_WORD_DATA\n");
- data->word = inb_p(SMBHSTDAT0(priv)) +
- (inb_p(SMBHSTDAT1(priv)) << 8);
- break;
- }
- return 0;
-}
-
-#define enable_flag(x) (x)
-#define disable_flag(x) 0
-#define enable_flag_if(x, cond) ((cond) ? (x) : 0)
-
-static u32 i801_func(struct i2c_adapter *adapter)
-{
- struct kpc_i2c *priv = i2c_get_adapdata(adapter);
-
- /* original settings
- * u32 f = I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
- * I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
- * I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK |
- * ((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) |
- * ((priv->features & FEATURE_I2C_BLOCK_READ) ?
- * I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0);
- */
-
- // http://lxr.free-electrons.com/source/include/uapi/linux/i2c.h#L85
-
- u32 f =
- enable_flag(I2C_FUNC_I2C) | /* 0x00000001(I enabled this one) */
- disable_flag(I2C_FUNC_10BIT_ADDR) | /* 0x00000002 */
- disable_flag(I2C_FUNC_PROTOCOL_MANGLING) | /* 0x00000004 */
- enable_flag_if(I2C_FUNC_SMBUS_PEC,
- priv->features & FEATURE_SMBUS_PEC) |
- /* 0x00000008 */
- disable_flag(I2C_FUNC_SMBUS_BLOCK_PROC_CALL) | /* 0x00008000 */
- enable_flag(I2C_FUNC_SMBUS_QUICK) | /* 0x00010000 */
- disable_flag(I2C_FUNC_SMBUS_READ_BYTE) | /* 0x00020000 */
- disable_flag(I2C_FUNC_SMBUS_WRITE_BYTE) | /* 0x00040000 */
- disable_flag(I2C_FUNC_SMBUS_READ_BYTE_DATA) | /* 0x00080000 */
- disable_flag(I2C_FUNC_SMBUS_WRITE_BYTE_DATA) | /* 0x00100000 */
- disable_flag(I2C_FUNC_SMBUS_READ_WORD_DATA) | /* 0x00200000 */
- disable_flag(I2C_FUNC_SMBUS_WRITE_WORD_DATA) | /* 0x00400000 */
- disable_flag(I2C_FUNC_SMBUS_PROC_CALL) | /* 0x00800000 */
- disable_flag(I2C_FUNC_SMBUS_READ_BLOCK_DATA) | /* 0x01000000 */
- disable_flag(I2C_FUNC_SMBUS_WRITE_BLOCK_DATA) | /* 0x02000000 */
- enable_flag_if(I2C_FUNC_SMBUS_READ_I2C_BLOCK,
- priv->features & FEATURE_I2C_BLOCK_READ) |
- /* 0x04000000 */
- enable_flag(I2C_FUNC_SMBUS_WRITE_I2C_BLOCK) | /* 0x08000000 */
-
- enable_flag(I2C_FUNC_SMBUS_BYTE) | /* _READ_BYTE _WRITE_BYTE */
- enable_flag(I2C_FUNC_SMBUS_BYTE_DATA) | /* _READ_BYTE_DATA
- * _WRITE_BYTE_DATA
- */
- enable_flag(I2C_FUNC_SMBUS_WORD_DATA) | /* _READ_WORD_DATA
- * _WRITE_WORD_DATA
- */
- enable_flag(I2C_FUNC_SMBUS_BLOCK_DATA) | /* _READ_BLOCK_DATA
- * _WRITE_BLOCK_DATA
- */
- disable_flag(I2C_FUNC_SMBUS_I2C_BLOCK) | /* _READ_I2C_BLOCK
- * _WRITE_I2C_BLOCK
- */
- disable_flag(I2C_FUNC_SMBUS_EMUL); /* _QUICK _BYTE
- * _BYTE_DATA _WORD_DATA
- * _PROC_CALL
- * _WRITE_BLOCK_DATA
- * _I2C_BLOCK _PEC
- */
- return f;
-}
-
-#undef enable_flag
-#undef disable_flag
-#undef enable_flag_if
-
-static const struct i2c_algorithm smbus_algorithm = {
- .smbus_xfer = i801_access,
- .functionality = i801_func,
-};
-
-/********************************
- *** Part 2 - Driver Handlers ***
- ********************************/
-static int kpc_i2c_probe(struct platform_device *pldev)
-{
- int err;
- struct kpc_i2c *priv;
- struct resource *res;
-
- priv = devm_kzalloc(&pldev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- i2c_set_adapdata(&priv->adapter, priv);
- priv->adapter.owner = THIS_MODULE;
- priv->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
- priv->adapter.algo = &smbus_algorithm;
-
- res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
- priv->smba = (unsigned long)devm_ioremap(&pldev->dev,
- res->start,
- resource_size(res));
- if (!priv->smba)
- return -ENOMEM;
-
- platform_set_drvdata(pldev, priv);
-
- priv->features |= FEATURE_IDF;
- priv->features |= FEATURE_I2C_BLOCK_READ;
- priv->features |= FEATURE_SMBUS_PEC;
- priv->features |= FEATURE_BLOCK_BUFFER;
-
- //init_MUTEX(&lddata->sem);
-
- /* set up the sysfs linkage to our parent device */
- priv->adapter.dev.parent = &pldev->dev;
-
- /* Retry up to 3 times on lost arbitration */
- priv->adapter.retries = 3;
-
- snprintf(priv->adapter.name, sizeof(priv->adapter.name),
- "Fake SMBus I801 adapter");
-
- err = i2c_add_adapter(&priv->adapter);
- if (err) {
- dev_err(&priv->adapter.dev, "Failed to add SMBus adapter\n");
- return err;
- }
-
- return 0;
-}
-
-static int kpc_i2c_remove(struct platform_device *pldev)
-{
- struct kpc_i2c *lddev;
-
- lddev = (struct kpc_i2c *)platform_get_drvdata(pldev);
-
- i2c_del_adapter(&lddev->adapter);
-
- //TODO: Figure out the right thing to do here...
- //pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
- //pci_release_region(dev, SMBBAR);
- //pci_set_drvdata(dev, NULL);
-
- //cdev_del(&lddev->cdev);
-
- return 0;
-}
-
-static struct platform_driver kpc_i2c_driver = {
- .probe = kpc_i2c_probe,
- .remove = kpc_i2c_remove,
- .driver = {
- .name = KP_DRIVER_NAME_I2C,
- },
-};
-
-module_platform_driver(kpc_i2c_driver);
diff --git a/drivers/staging/kpc2000/kpc2000_spi.c b/drivers/staging/kpc2000/kpc2000_spi.c
deleted file mode 100644
index 16ca18b8aa15..000000000000
--- a/drivers/staging/kpc2000/kpc2000_spi.c
+++ /dev/null
@@ -1,517 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * KP2000 SPI controller driver
- *
- * Copyright (C) 2014-2018 Daktronics
- * Author: Matt Sickler <matt.sickler@daktronics.com>
- * Very loosely based on spi-omap2-mcspi.c
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/pm_runtime.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/gcd.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/flash.h>
-#include <linux/mtd/partitions.h>
-
-#include "kpc.h"
-
-static struct mtd_partition p2kr0_spi0_parts[] = {
- { .name = "SLOT_0", .size = 7798784, .offset = 0, },
- { .name = "SLOT_1", .size = 7798784, .offset = MTDPART_OFS_NXTBLK},
- { .name = "SLOT_2", .size = 7798784, .offset = MTDPART_OFS_NXTBLK},
- { .name = "SLOT_3", .size = 7798784, .offset = MTDPART_OFS_NXTBLK},
- { .name = "CS0_EXTRA", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_NXTBLK},
-};
-
-static struct mtd_partition p2kr0_spi1_parts[] = {
- { .name = "SLOT_4", .size = 7798784, .offset = 0, },
- { .name = "SLOT_5", .size = 7798784, .offset = MTDPART_OFS_NXTBLK},
- { .name = "SLOT_6", .size = 7798784, .offset = MTDPART_OFS_NXTBLK},
- { .name = "SLOT_7", .size = 7798784, .offset = MTDPART_OFS_NXTBLK},
- { .name = "CS1_EXTRA", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_NXTBLK},
-};
-
-static struct flash_platform_data p2kr0_spi0_pdata = {
- .name = "SPI0",
- .nr_parts = ARRAY_SIZE(p2kr0_spi0_parts),
- .parts = p2kr0_spi0_parts,
-};
-
-static struct flash_platform_data p2kr0_spi1_pdata = {
- .name = "SPI1",
- .nr_parts = ARRAY_SIZE(p2kr0_spi1_parts),
- .parts = p2kr0_spi1_parts,
-};
-
-static struct spi_board_info p2kr0_board_info[] = {
- {
- .modalias = "n25q256a11",
- .bus_num = 1,
- .chip_select = 0,
- .mode = SPI_MODE_0,
- .platform_data = &p2kr0_spi0_pdata
- },
- {
- .modalias = "n25q256a11",
- .bus_num = 1,
- .chip_select = 1,
- .mode = SPI_MODE_0,
- .platform_data = &p2kr0_spi1_pdata
- },
-};
-
-/***************
- * SPI Defines *
- ***************/
-#define KP_SPI_REG_CONFIG 0x0 /* 0x00 */
-#define KP_SPI_REG_STATUS 0x1 /* 0x08 */
-#define KP_SPI_REG_FFCTRL 0x2 /* 0x10 */
-#define KP_SPI_REG_TXDATA 0x3 /* 0x18 */
-#define KP_SPI_REG_RXDATA 0x4 /* 0x20 */
-
-#define KP_SPI_CLK 48000000
-#define KP_SPI_MAX_FIFODEPTH 64
-#define KP_SPI_MAX_FIFOWCNT 0xFFFF
-
-#define KP_SPI_REG_CONFIG_TRM_TXRX 0
-#define KP_SPI_REG_CONFIG_TRM_RX 1
-#define KP_SPI_REG_CONFIG_TRM_TX 2
-
-#define KP_SPI_REG_STATUS_RXS 0x01
-#define KP_SPI_REG_STATUS_TXS 0x02
-#define KP_SPI_REG_STATUS_EOT 0x04
-#define KP_SPI_REG_STATUS_TXFFE 0x10
-#define KP_SPI_REG_STATUS_TXFFF 0x20
-#define KP_SPI_REG_STATUS_RXFFE 0x40
-#define KP_SPI_REG_STATUS_RXFFF 0x80
-
-/******************
- * SPI Structures *
- ******************/
-struct kp_spi {
- struct spi_master *master;
- u64 __iomem *base;
- struct device *dev;
-};
-
-struct kp_spi_controller_state {
- void __iomem *base;
- s64 conf_cache;
-};
-
-union kp_spi_config {
- /* use this to access individual elements */
- struct __packed spi_config_bitfield {
- unsigned int pha : 1; /* spim_clk Phase */
- unsigned int pol : 1; /* spim_clk Polarity */
- unsigned int epol : 1; /* spim_csx Polarity */
- unsigned int dpe : 1; /* Transmission Enable */
- unsigned int wl : 5; /* Word Length */
- unsigned int : 3;
- unsigned int trm : 2; /* TxRx Mode */
- unsigned int cs : 4; /* Chip Select */
- unsigned int wcnt : 7; /* Word Count */
- unsigned int ffen : 1; /* FIFO Enable */
- unsigned int spi_en : 1; /* SPI Enable */
- unsigned int : 5;
- } bitfield;
- /* use this to grab the whole register */
- u32 reg;
-};
-
-union kp_spi_status {
- struct __packed spi_status_bitfield {
- unsigned int rx : 1; /* Rx Status */
- unsigned int tx : 1; /* Tx Status */
- unsigned int eo : 1; /* End of Transfer */
- unsigned int : 1;
- unsigned int txffe : 1; /* Tx FIFO Empty */
- unsigned int txfff : 1; /* Tx FIFO Full */
- unsigned int rxffe : 1; /* Rx FIFO Empty */
- unsigned int rxfff : 1; /* Rx FIFO Full */
- unsigned int : 24;
- } bitfield;
- u32 reg;
-};
-
-union kp_spi_ffctrl {
- struct __packed spi_ffctrl_bitfield {
- unsigned int ffstart : 1; /* FIFO Start */
- unsigned int : 31;
- } bitfield;
- u32 reg;
-};
-
-/***************
- * SPI Helpers *
- ***************/
- static inline u64
-kp_spi_read_reg(struct kp_spi_controller_state *cs, int idx)
-{
- u64 __iomem *addr = cs->base;
-
- addr += idx;
- if ((idx == KP_SPI_REG_CONFIG) && (cs->conf_cache >= 0))
- return cs->conf_cache;
-
- return readq(addr);
-}
-
- static inline void
-kp_spi_write_reg(struct kp_spi_controller_state *cs, int idx, u64 val)
-{
- u64 __iomem *addr = cs->base;
-
- addr += idx;
- writeq(val, addr);
- if (idx == KP_SPI_REG_CONFIG)
- cs->conf_cache = val;
-}
-
- static int
-kp_spi_wait_for_reg_bit(struct kp_spi_controller_state *cs, int idx,
- unsigned long bit)
-{
- unsigned long timeout;
-
- timeout = jiffies + msecs_to_jiffies(1000);
- while (!(kp_spi_read_reg(cs, idx) & bit)) {
- if (time_after(jiffies, timeout)) {
- if (!(kp_spi_read_reg(cs, idx) & bit))
- return -ETIMEDOUT;
- else
- return 0;
- }
- cpu_relax();
- }
- return 0;
-}
-
- static unsigned
-kp_spi_txrx_pio(struct spi_device *spidev, struct spi_transfer *transfer)
-{
- struct kp_spi_controller_state *cs = spidev->controller_state;
- unsigned int count = transfer->len;
- unsigned int c = count;
-
- int i;
- int res;
- u8 *rx = transfer->rx_buf;
- const u8 *tx = transfer->tx_buf;
- int processed = 0;
-
- if (tx) {
- for (i = 0 ; i < c ; i++) {
- char val = *tx++;
-
- res = kp_spi_wait_for_reg_bit(cs, KP_SPI_REG_STATUS,
- KP_SPI_REG_STATUS_TXS);
- if (res < 0)
- goto out;
-
- kp_spi_write_reg(cs, KP_SPI_REG_TXDATA, val);
- processed++;
- }
- } else if (rx) {
- for (i = 0 ; i < c ; i++) {
- char test = 0;
-
- kp_spi_write_reg(cs, KP_SPI_REG_TXDATA, 0x00);
- res = kp_spi_wait_for_reg_bit(cs, KP_SPI_REG_STATUS,
- KP_SPI_REG_STATUS_RXS);
- if (res < 0)
- goto out;
-
- test = kp_spi_read_reg(cs, KP_SPI_REG_RXDATA);
- *rx++ = test;
- processed++;
- }
- }
-
- if (kp_spi_wait_for_reg_bit(cs, KP_SPI_REG_STATUS,
- KP_SPI_REG_STATUS_EOT) < 0) {
- //TODO: Figure out how to abort transaction??
- //Ths has never happened in practice though...
- }
-
-out:
- return processed;
-}
-
-/*****************
- * SPI Functions *
- *****************/
- static int
-kp_spi_setup(struct spi_device *spidev)
-{
- union kp_spi_config sc;
- struct kp_spi *kpspi = spi_master_get_devdata(spidev->master);
- struct kp_spi_controller_state *cs;
-
- /* setup controller state */
- cs = spidev->controller_state;
- if (!cs) {
- cs = kzalloc(sizeof(*cs), GFP_KERNEL);
- if (!cs)
- return -ENOMEM;
- cs->base = kpspi->base;
- cs->conf_cache = -1;
- spidev->controller_state = cs;
- }
-
- /* set config register */
- sc.bitfield.wl = spidev->bits_per_word - 1;
- sc.bitfield.cs = spidev->chip_select;
- sc.bitfield.spi_en = 0;
- sc.bitfield.trm = 0;
- sc.bitfield.ffen = 0;
- kp_spi_write_reg(spidev->controller_state, KP_SPI_REG_CONFIG, sc.reg);
- return 0;
-}
-
- static int
-kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m)
-{
- struct kp_spi_controller_state *cs;
- struct spi_device *spidev;
- struct kp_spi *kpspi;
- struct spi_transfer *transfer;
- union kp_spi_config sc;
- int status = 0;
-
- spidev = m->spi;
- kpspi = spi_master_get_devdata(master);
- m->actual_length = 0;
- m->status = 0;
-
- cs = spidev->controller_state;
-
- /* reject invalid messages and transfers */
- if (list_empty(&m->transfers))
- return -EINVAL;
-
- /* validate input */
- list_for_each_entry(transfer, &m->transfers, transfer_list) {
- const void *tx_buf = transfer->tx_buf;
- void *rx_buf = transfer->rx_buf;
- unsigned int len = transfer->len;
-
- if (transfer->speed_hz > KP_SPI_CLK ||
- (len && !(rx_buf || tx_buf))) {
- dev_dbg(kpspi->dev, " transfer: %d Hz, %d %s%s, %d bpw\n",
- transfer->speed_hz,
- len,
- tx_buf ? "tx" : "",
- rx_buf ? "rx" : "",
- transfer->bits_per_word);
- dev_dbg(kpspi->dev, " transfer -EINVAL\n");
- return -EINVAL;
- }
- if (transfer->speed_hz &&
- transfer->speed_hz < (KP_SPI_CLK >> 15)) {
- dev_dbg(kpspi->dev, "speed_hz %d below minimum %d Hz\n",
- transfer->speed_hz,
- KP_SPI_CLK >> 15);
- dev_dbg(kpspi->dev, " speed_hz -EINVAL\n");
- return -EINVAL;
- }
- }
-
- /* assert chip select to start the sequence*/
- sc.reg = kp_spi_read_reg(cs, KP_SPI_REG_CONFIG);
- sc.bitfield.spi_en = 1;
- kp_spi_write_reg(cs, KP_SPI_REG_CONFIG, sc.reg);
-
- /* work */
- if (kp_spi_wait_for_reg_bit(cs, KP_SPI_REG_STATUS,
- KP_SPI_REG_STATUS_EOT) < 0) {
- dev_info(kpspi->dev, "EOT timed out\n");
- goto out;
- }
-
- /* do the transfers for this message */
- list_for_each_entry(transfer, &m->transfers, transfer_list) {
- if (!transfer->tx_buf && !transfer->rx_buf &&
- transfer->len) {
- status = -EINVAL;
- goto error;
- }
-
- /* transfer */
- if (transfer->len) {
- unsigned int word_len = spidev->bits_per_word;
- unsigned int count;
-
- /* set up the transfer... */
- sc.reg = kp_spi_read_reg(cs, KP_SPI_REG_CONFIG);
-
- /* ...direction */
- if (transfer->tx_buf)
- sc.bitfield.trm = KP_SPI_REG_CONFIG_TRM_TX;
- else if (transfer->rx_buf)
- sc.bitfield.trm = KP_SPI_REG_CONFIG_TRM_RX;
-
- /* ...word length */
- if (transfer->bits_per_word)
- word_len = transfer->bits_per_word;
- sc.bitfield.wl = word_len - 1;
-
- /* ...chip select */
- sc.bitfield.cs = spidev->chip_select;
-
- /* ...and write the new settings */
- kp_spi_write_reg(cs, KP_SPI_REG_CONFIG, sc.reg);
-
- /* do the transfer */
- count = kp_spi_txrx_pio(spidev, transfer);
- m->actual_length += count;
-
- if (count != transfer->len) {
- status = -EIO;
- goto error;
- }
- }
-
- if (transfer->delay.value)
- ndelay(spi_delay_to_ns(&transfer->delay, transfer));
- }
-
- /* de-assert chip select to end the sequence */
- sc.reg = kp_spi_read_reg(cs, KP_SPI_REG_CONFIG);
- sc.bitfield.spi_en = 0;
- kp_spi_write_reg(cs, KP_SPI_REG_CONFIG, sc.reg);
-
-out:
- /* done work */
- spi_finalize_current_message(master);
- return 0;
-
-error:
- m->status = status;
- return status;
-}
-
- static void
-kp_spi_cleanup(struct spi_device *spidev)
-{
- struct kp_spi_controller_state *cs = spidev->controller_state;
-
- kfree(cs);
-}
-
-/******************
- * Probe / Remove *
- ******************/
- static int
-kp_spi_probe(struct platform_device *pldev)
-{
- struct kpc_core_device_platdata *drvdata;
- struct spi_master *master;
- struct kp_spi *kpspi;
- struct resource *r;
- int status = 0;
- int i;
-
- drvdata = pldev->dev.platform_data;
- if (!drvdata) {
- dev_err(&pldev->dev, "%s: platform_data is NULL\n", __func__);
- return -ENODEV;
- }
-
- master = spi_alloc_master(&pldev->dev, sizeof(struct kp_spi));
- if (!master) {
- dev_err(&pldev->dev, "%s: master allocation failed\n",
- __func__);
- return -ENOMEM;
- }
-
- /* set up the spi functions */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->bits_per_word_mask = (unsigned int)SPI_BPW_RANGE_MASK(4, 32);
- master->setup = kp_spi_setup;
- master->transfer_one_message = kp_spi_transfer_one_message;
- master->cleanup = kp_spi_cleanup;
-
- platform_set_drvdata(pldev, master);
-
- kpspi = spi_master_get_devdata(master);
- kpspi->master = master;
- kpspi->dev = &pldev->dev;
-
- master->num_chipselect = 4;
- if (pldev->id != -1)
- master->bus_num = pldev->id;
-
- r = platform_get_resource(pldev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pldev->dev, "%s: Unable to get platform resources\n",
- __func__);
- status = -ENODEV;
- goto free_master;
- }
-
- kpspi->base = devm_ioremap(&pldev->dev, r->start,
- resource_size(r));
-
- status = spi_register_master(master);
- if (status < 0) {
- dev_err(&pldev->dev, "Unable to register SPI device\n");
- goto free_master;
- }
-
- /* register the slave boards */
-#define NEW_SPI_DEVICE_FROM_BOARD_INFO_TABLE(table) \
- for (i = 0 ; i < ARRAY_SIZE(table) ; i++) { \
- spi_new_device(master, &table[i]); \
- }
-
- switch ((drvdata->card_id & 0xFFFF0000) >> 16) {
- case PCI_DEVICE_ID_DAKTRONICS_KADOKA_P2KR0:
- NEW_SPI_DEVICE_FROM_BOARD_INFO_TABLE(p2kr0_board_info);
- break;
- default:
- dev_err(&pldev->dev, "Unknown hardware, cant know what partition table to use!\n");
- goto free_master;
- }
-
- return status;
-
-free_master:
- spi_master_put(master);
- return status;
-}
-
- static int
-kp_spi_remove(struct platform_device *pldev)
-{
- struct spi_master *master = platform_get_drvdata(pldev);
-
- spi_unregister_master(master);
- return 0;
-}
-
-static struct platform_driver kp_spi_driver = {
- .driver = {
- .name = KP_DRIVER_NAME_SPI,
- },
- .probe = kp_spi_probe,
- .remove = kp_spi_remove,
-};
-
-module_platform_driver(kp_spi_driver);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:kp_spi");
diff --git a/drivers/staging/kpc2000/kpc_dma/Makefile b/drivers/staging/kpc2000/kpc_dma/Makefile
deleted file mode 100644
index fe5db532c8c8..000000000000
--- a/drivers/staging/kpc2000/kpc_dma/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-m := kpc_dma.o
-kpc_dma-objs += dma.o
-kpc_dma-objs += fileops.o
-kpc_dma-objs += kpc_dma_driver.o
diff --git a/drivers/staging/kpc2000/kpc_dma/dma.c b/drivers/staging/kpc2000/kpc_dma/dma.c
deleted file mode 100644
index e169ac609ba4..000000000000
--- a/drivers/staging/kpc2000/kpc_dma/dma.c
+++ /dev/null
@@ -1,270 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/fs.h>
-#include <linux/rwsem.h>
-#include "kpc_dma_driver.h"
-
-/********** IRQ Handlers **********/
-static
-irqreturn_t ndd_irq_handler(int irq, void *dev_id)
-{
- struct kpc_dma_device *ldev = (struct kpc_dma_device *)dev_id;
-
- if ((GetEngineControl(ldev) & ENG_CTL_IRQ_ACTIVE) ||
- (ldev->desc_completed->MyDMAAddr != GetEngineCompletePtr(ldev)))
- schedule_work(&ldev->irq_work);
-
- return IRQ_HANDLED;
-}
-
-static
-void ndd_irq_worker(struct work_struct *ws)
-{
- struct kpc_dma_descriptor *cur;
- struct kpc_dma_device *eng = container_of(ws, struct kpc_dma_device, irq_work);
-
- lock_engine(eng);
-
- if (GetEngineCompletePtr(eng) == 0)
- goto out;
-
- if (eng->desc_completed->MyDMAAddr == GetEngineCompletePtr(eng))
- goto out;
-
- cur = eng->desc_completed;
- do {
- cur = cur->Next;
- dev_dbg(&eng->pldev->dev, "Handling completed descriptor %p (acd = %p)\n",
- cur, cur->acd);
- BUG_ON(cur == eng->desc_next); // Ordering failure.
-
- if (cur->DescControlFlags & DMA_DESC_CTL_SOP) {
- eng->accumulated_bytes = 0;
- eng->accumulated_flags = 0;
- }
-
- eng->accumulated_bytes += cur->DescByteCount;
- if (cur->DescStatusFlags & DMA_DESC_STS_ERROR)
- eng->accumulated_flags |= ACD_FLAG_ENG_ACCUM_ERROR;
-
- if (cur->DescStatusFlags & DMA_DESC_STS_SHORT)
- eng->accumulated_flags |= ACD_FLAG_ENG_ACCUM_SHORT;
-
- if (cur->DescControlFlags & DMA_DESC_CTL_EOP) {
- if (cur->acd)
- transfer_complete_cb(cur->acd, eng->accumulated_bytes,
- eng->accumulated_flags | ACD_FLAG_DONE);
- }
-
- eng->desc_completed = cur;
- } while (cur->MyDMAAddr != GetEngineCompletePtr(eng));
-
- out:
- SetClearEngineControl(eng, ENG_CTL_IRQ_ACTIVE, 0);
-
- unlock_engine(eng);
-}
-
-/********** DMA Engine Init/Teardown **********/
-void start_dma_engine(struct kpc_dma_device *eng)
-{
- eng->desc_next = eng->desc_pool_first;
- eng->desc_completed = eng->desc_pool_last;
-
- // Setup the engine pointer registers
- SetEngineNextPtr(eng, eng->desc_pool_first);
- SetEngineSWPtr(eng, eng->desc_pool_first);
- ClearEngineCompletePtr(eng);
-
- WriteEngineControl(eng, ENG_CTL_DMA_ENABLE | ENG_CTL_IRQ_ENABLE);
-}
-
-int setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt)
-{
- u32 caps;
- struct kpc_dma_descriptor *cur;
- struct kpc_dma_descriptor *next;
- dma_addr_t next_handle;
- dma_addr_t head_handle;
- unsigned int i;
- int rv;
-
- caps = GetEngineCapabilities(eng);
-
- if (WARN(!(caps & ENG_CAP_PRESENT), "%s() called for DMA Engine at %p which isn't present in hardware!\n", __func__, eng))
- return -ENXIO;
-
- if (caps & ENG_CAP_DIRECTION)
- eng->dir = DMA_FROM_DEVICE;
- else
- eng->dir = DMA_TO_DEVICE;
-
- eng->desc_pool_cnt = desc_cnt;
- eng->desc_pool = dma_pool_create("KPC DMA Descriptors", &eng->pldev->dev,
- sizeof(struct kpc_dma_descriptor),
- DMA_DESC_ALIGNMENT, 4096);
-
- eng->desc_pool_first = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &head_handle);
- if (!eng->desc_pool_first) {
- dev_err(&eng->pldev->dev, "%s: couldn't allocate desc_pool_first!\n", __func__);
- dma_pool_destroy(eng->desc_pool);
- return -ENOMEM;
- }
-
- eng->desc_pool_first->MyDMAAddr = head_handle;
- clear_desc(eng->desc_pool_first);
-
- cur = eng->desc_pool_first;
- for (i = 1 ; i < eng->desc_pool_cnt ; i++) {
- next = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &next_handle);
- if (!next)
- goto done_alloc;
-
- clear_desc(next);
- next->MyDMAAddr = next_handle;
-
- cur->DescNextDescPtr = next_handle;
- cur->Next = next;
- cur = next;
- }
-
- done_alloc:
- // Link the last descriptor back to the first, so it's a circular linked list
- cur->Next = eng->desc_pool_first;
- cur->DescNextDescPtr = eng->desc_pool_first->MyDMAAddr;
-
- eng->desc_pool_last = cur;
- eng->desc_completed = eng->desc_pool_last;
-
- // Setup work queue
- INIT_WORK(&eng->irq_work, ndd_irq_worker);
-
- // Grab IRQ line
- rv = request_irq(eng->irq, ndd_irq_handler, IRQF_SHARED,
- KP_DRIVER_NAME_DMA_CONTROLLER, eng);
- if (rv) {
- dev_err(&eng->pldev->dev, "%s: failed to request_irq: %d\n", __func__, rv);
- return rv;
- }
-
- // Turn on the engine!
- start_dma_engine(eng);
- unlock_engine(eng);
-
- return 0;
-}
-
-void stop_dma_engine(struct kpc_dma_device *eng)
-{
- unsigned long timeout;
-
- // Disable the descriptor engine
- WriteEngineControl(eng, 0);
-
- // Wait for descriptor engine to finish current operaion
- timeout = jiffies + (HZ / 2);
- while (GetEngineControl(eng) & ENG_CTL_DMA_RUNNING) {
- if (time_after(jiffies, timeout)) {
- dev_crit(&eng->pldev->dev, "DMA_RUNNING still asserted!\n");
- break;
- }
- }
-
- // Request a reset
- WriteEngineControl(eng, ENG_CTL_DMA_RESET_REQUEST);
-
- // Wait for reset request to be processed
- timeout = jiffies + (HZ / 2);
- while (GetEngineControl(eng) & (ENG_CTL_DMA_RUNNING | ENG_CTL_DMA_RESET_REQUEST)) {
- if (time_after(jiffies, timeout)) {
- dev_crit(&eng->pldev->dev, "ENG_CTL_DMA_RESET_REQUEST still asserted!\n");
- break;
- }
- }
-
- // Request a reset
- WriteEngineControl(eng, ENG_CTL_DMA_RESET);
-
- // And wait for reset to complete
- timeout = jiffies + (HZ / 2);
- while (GetEngineControl(eng) & ENG_CTL_DMA_RESET) {
- if (time_after(jiffies, timeout)) {
- dev_crit(&eng->pldev->dev, "DMA_RESET still asserted!\n");
- break;
- }
- }
-
- // Clear any persistent bits just to make sure there is no residue from the reset
- SetClearEngineControl(eng, (ENG_CTL_IRQ_ACTIVE | ENG_CTL_DESC_COMPLETE |
- ENG_CTL_DESC_ALIGN_ERR | ENG_CTL_DESC_FETCH_ERR |
- ENG_CTL_SW_ABORT_ERR | ENG_CTL_DESC_CHAIN_END |
- ENG_CTL_DMA_WAITING_PERSIST), 0);
-
- // Reset performance counters
-
- // Completely disable the engine
- WriteEngineControl(eng, 0);
-}
-
-void destroy_dma_engine(struct kpc_dma_device *eng)
-{
- struct kpc_dma_descriptor *cur;
- dma_addr_t cur_handle;
- unsigned int i;
-
- stop_dma_engine(eng);
-
- cur = eng->desc_pool_first;
- cur_handle = eng->desc_pool_first->MyDMAAddr;
-
- for (i = 0 ; i < eng->desc_pool_cnt ; i++) {
- struct kpc_dma_descriptor *next = cur->Next;
- dma_addr_t next_handle = cur->DescNextDescPtr;
-
- dma_pool_free(eng->desc_pool, cur, cur_handle);
- cur_handle = next_handle;
- cur = next;
- }
-
- dma_pool_destroy(eng->desc_pool);
-
- free_irq(eng->irq, eng);
-}
-
-/********** Helper Functions **********/
-int count_descriptors_available(struct kpc_dma_device *eng)
-{
- u32 count = 0;
- struct kpc_dma_descriptor *cur = eng->desc_next;
-
- while (cur != eng->desc_completed) {
- BUG_ON(!cur);
- count++;
- cur = cur->Next;
- }
- return count;
-}
-
-void clear_desc(struct kpc_dma_descriptor *desc)
-{
- if (!desc)
- return;
- desc->DescByteCount = 0;
- desc->DescStatusErrorFlags = 0;
- desc->DescStatusFlags = 0;
- desc->DescUserControlLS = 0;
- desc->DescUserControlMS = 0;
- desc->DescCardAddrLS = 0;
- desc->DescBufferByteCount = 0;
- desc->DescCardAddrMS = 0;
- desc->DescControlFlags = 0;
- desc->DescSystemAddrLS = 0;
- desc->DescSystemAddrMS = 0;
- desc->acd = NULL;
-}
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
deleted file mode 100644
index 10dcd6646b01..000000000000
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ /dev/null
@@ -1,363 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/kernel.h> /* printk() */
-#include <linux/slab.h> /* kmalloc() */
-#include <linux/fs.h> /* everything... */
-#include <linux/errno.h> /* error codes */
-#include <linux/types.h> /* size_t */
-#include <linux/cdev.h>
-#include <linux/uaccess.h> /* copy_*_user */
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include "kpc_dma_driver.h"
-#include "uapi.h"
-
-/********** Helper Functions **********/
-static inline
-unsigned int count_pages(unsigned long iov_base, size_t iov_len)
-{
- unsigned long first = (iov_base & PAGE_MASK) >> PAGE_SHIFT;
- unsigned long last = ((iov_base + iov_len - 1) & PAGE_MASK) >> PAGE_SHIFT;
-
- return last - first + 1;
-}
-
-static inline
-unsigned int count_parts_for_sge(struct scatterlist *sg)
-{
- return DIV_ROUND_UP(sg_dma_len(sg), 0x80000);
-}
-
-/********** Transfer Helpers **********/
-static int kpc_dma_transfer(struct dev_private_data *priv,
- unsigned long iov_base, size_t iov_len)
-{
- unsigned int i = 0;
- int rv = 0, nr_pages = 0;
- struct kpc_dma_device *ldev;
- struct aio_cb_data *acd;
- DECLARE_COMPLETION_ONSTACK(done);
- u32 desc_needed = 0;
- struct scatterlist *sg;
- u32 num_descrs_avail;
- struct kpc_dma_descriptor *desc;
- unsigned int pcnt;
- unsigned int p;
- u64 card_addr;
- u64 dma_addr;
- u64 user_ctl;
-
- ldev = priv->ldev;
-
- acd = kzalloc(sizeof(*acd), GFP_KERNEL);
- if (!acd) {
- dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for the aio data\n");
- return -ENOMEM;
- }
- memset(acd, 0x66, sizeof(struct aio_cb_data));
-
- acd->priv = priv;
- acd->ldev = priv->ldev;
- acd->cpl = &done;
- acd->flags = 0;
- acd->len = iov_len;
- acd->page_count = count_pages(iov_base, iov_len);
-
- // Allocate an array of page pointers
- acd->user_pages = kcalloc(acd->page_count, sizeof(struct page *),
- GFP_KERNEL);
- if (!acd->user_pages) {
- dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for the page pointers\n");
- rv = -ENOMEM;
- goto err_alloc_userpages;
- }
-
- // Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
- mmap_read_lock(current->mm); /* get memory map semaphore */
- rv = pin_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE,
- acd->user_pages, NULL);
- mmap_read_unlock(current->mm); /* release the semaphore */
- if (rv != acd->page_count) {
- nr_pages = rv;
- if (rv > 0)
- rv = -EFAULT;
-
- dev_err(&priv->ldev->pldev->dev, "Couldn't pin_user_pages (%d)\n", rv);
- goto unpin_pages;
- }
- nr_pages = acd->page_count;
-
- // Allocate and setup the sg_table (scatterlist entries)
- rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count,
- iov_base & (PAGE_SIZE - 1), iov_len, GFP_KERNEL);
- if (rv) {
- dev_err(&priv->ldev->pldev->dev, "Couldn't alloc sg_table (%d)\n", rv);
- goto unpin_pages;
- }
-
- // Setup the DMA mapping for all the sg entries
- acd->mapped_entry_count = dma_map_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents,
- ldev->dir);
- if (acd->mapped_entry_count <= 0) {
- dev_err(&priv->ldev->pldev->dev, "Couldn't dma_map_sg (%d)\n",
- acd->mapped_entry_count);
- goto free_table;
- }
-
- // Calculate how many descriptors are actually needed for this transfer.
- for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i) {
- desc_needed += count_parts_for_sge(sg);
- }
-
- lock_engine(ldev);
-
- // Figoure out how many descriptors are available and return an error if there aren't enough
- num_descrs_avail = count_descriptors_available(ldev);
- dev_dbg(&priv->ldev->pldev->dev,
- " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d\n",
- acd->mapped_entry_count, desc_needed, num_descrs_avail);
-
- if (desc_needed >= ldev->desc_pool_cnt) {
- dev_warn(&priv->ldev->pldev->dev,
- " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d TOO MANY to ever complete!\n",
- acd->mapped_entry_count, desc_needed, num_descrs_avail);
- rv = -EAGAIN;
- goto err_descr_too_many;
- }
- if (desc_needed > num_descrs_avail) {
- dev_warn(&priv->ldev->pldev->dev,
- " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d Too many to complete right now.\n",
- acd->mapped_entry_count, desc_needed, num_descrs_avail);
- rv = -EMSGSIZE;
- goto err_descr_too_many;
- }
-
- // Loop through all the sg table entries and fill out a descriptor for each one.
- desc = ldev->desc_next;
- card_addr = acd->priv->card_addr;
- for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i) {
- pcnt = count_parts_for_sge(sg);
- for (p = 0 ; p < pcnt ; p++) {
- // Fill out the descriptor
- BUG_ON(!desc);
- clear_desc(desc);
- if (p != pcnt - 1)
- desc->DescByteCount = 0x80000;
- else
- desc->DescByteCount = sg_dma_len(sg) - (p * 0x80000);
-
- desc->DescBufferByteCount = desc->DescByteCount;
-
- desc->DescControlFlags |= DMA_DESC_CTL_IRQONERR;
- if (i == 0 && p == 0)
- desc->DescControlFlags |= DMA_DESC_CTL_SOP;
- if (i == acd->mapped_entry_count - 1 && p == pcnt - 1)
- desc->DescControlFlags |= DMA_DESC_CTL_EOP | DMA_DESC_CTL_IRQONDONE;
-
- desc->DescCardAddrLS = (card_addr & 0xFFFFFFFF);
- desc->DescCardAddrMS = (card_addr >> 32) & 0xF;
- card_addr += desc->DescByteCount;
-
- dma_addr = sg_dma_address(sg) + (p * 0x80000);
- desc->DescSystemAddrLS = (dma_addr & 0x00000000FFFFFFFFUL) >> 0;
- desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000UL) >> 32;
-
- user_ctl = acd->priv->user_ctl;
- if (i == acd->mapped_entry_count - 1 && p == pcnt - 1)
- user_ctl = acd->priv->user_ctl_last;
-
- desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFFUL) >> 0;
- desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000UL) >> 32;
-
- if (i == acd->mapped_entry_count - 1 && p == pcnt - 1)
- desc->acd = acd;
-
- dev_dbg(&priv->ldev->pldev->dev, " Filled descriptor %p (acd = %p)\n",
- desc, desc->acd);
-
- ldev->desc_next = desc->Next;
- desc = desc->Next;
- }
- }
-
- // Send the filled descriptors off to the hardware to process!
- SetEngineSWPtr(ldev, ldev->desc_next);
-
- unlock_engine(ldev);
-
- rv = wait_for_completion_interruptible(&done);
- /*
- * If the user aborted (rv == -ERESTARTSYS), we're no longer responsible
- * for cleaning up the acd
- */
- if (rv == -ERESTARTSYS)
- acd->cpl = NULL;
- if (rv == 0) {
- rv = acd->len;
- kfree(acd);
- }
- return rv;
-
- err_descr_too_many:
- unlock_engine(ldev);
- dma_unmap_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir);
- free_table:
- sg_free_table(&acd->sgt);
-
- unpin_pages:
- if (nr_pages > 0)
- unpin_user_pages(acd->user_pages, nr_pages);
- kfree(acd->user_pages);
- err_alloc_userpages:
- kfree(acd);
- dev_dbg(&priv->ldev->pldev->dev, "%s returning with error %d\n", __func__, rv);
- return rv;
-}
-
-void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags)
-{
- unsigned int i;
-
- BUG_ON(!acd);
- BUG_ON(!acd->user_pages);
- BUG_ON(!acd->sgt.sgl);
- BUG_ON(!acd->ldev);
- BUG_ON(!acd->ldev->pldev);
-
- dma_unmap_sg(&acd->ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, acd->ldev->dir);
-
- for (i = 0 ; i < acd->page_count ; i++) {
- if (!PageReserved(acd->user_pages[i]))
- set_page_dirty_lock(acd->user_pages[i]);
- }
-
- unpin_user_pages(acd->user_pages, acd->page_count);
-
- sg_free_table(&acd->sgt);
-
- kfree(acd->user_pages);
-
- acd->flags = flags;
-
- if (acd->cpl) {
- complete(acd->cpl);
- } else {
- /*
- * There's no completion, so we're responsible for cleaning up
- * the acd
- */
- kfree(acd);
- }
-}
-
-/********** Fileops **********/
-static
-int kpc_dma_open(struct inode *inode, struct file *filp)
-{
- struct dev_private_data *priv;
- struct kpc_dma_device *ldev = kpc_dma_lookup_device(iminor(inode));
-
- if (!ldev)
- return -ENODEV;
-
- if (!atomic_dec_and_test(&ldev->open_count)) {
- atomic_inc(&ldev->open_count);
- return -EBUSY; /* already open */
- }
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->ldev = ldev;
- filp->private_data = priv;
-
- return 0;
-}
-
-static
-int kpc_dma_close(struct inode *inode, struct file *filp)
-{
- struct kpc_dma_descriptor *cur;
- struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
- struct kpc_dma_device *eng = priv->ldev;
-
- lock_engine(eng);
-
- stop_dma_engine(eng);
-
- cur = eng->desc_completed->Next;
- while (cur != eng->desc_next) {
- dev_dbg(&eng->pldev->dev, "Aborting descriptor %p (acd = %p)\n", cur, cur->acd);
- if (cur->DescControlFlags & DMA_DESC_CTL_EOP) {
- if (cur->acd)
- transfer_complete_cb(cur->acd, 0, ACD_FLAG_ABORT);
- }
-
- clear_desc(cur);
- eng->desc_completed = cur;
-
- cur = cur->Next;
- }
-
- start_dma_engine(eng);
-
- unlock_engine(eng);
-
- atomic_inc(&priv->ldev->open_count); /* release the device */
- kfree(priv);
- return 0;
-}
-
-static
-ssize_t kpc_dma_read(struct file *filp, char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
-
- if (priv->ldev->dir != DMA_FROM_DEVICE)
- return -EMEDIUMTYPE;
-
- return kpc_dma_transfer(priv, (unsigned long)user_buf, count);
-}
-
-static
-ssize_t kpc_dma_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
-
- if (priv->ldev->dir != DMA_TO_DEVICE)
- return -EMEDIUMTYPE;
-
- return kpc_dma_transfer(priv, (unsigned long)user_buf, count);
-}
-
-static
-long kpc_dma_ioctl(struct file *filp, unsigned int ioctl_num, unsigned long ioctl_param)
-{
- struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
-
- switch (ioctl_num) {
- case KND_IOCTL_SET_CARD_ADDR:
- priv->card_addr = ioctl_param; return priv->card_addr;
- case KND_IOCTL_SET_USER_CTL:
- priv->user_ctl = ioctl_param; return priv->user_ctl;
- case KND_IOCTL_SET_USER_CTL_LAST:
- priv->user_ctl_last = ioctl_param; return priv->user_ctl_last;
- case KND_IOCTL_GET_USER_STS:
- return priv->user_sts;
- }
-
- return -ENOTTY;
-}
-
-const struct file_operations kpc_dma_fops = {
- .owner = THIS_MODULE,
- .open = kpc_dma_open,
- .release = kpc_dma_close,
- .read = kpc_dma_read,
- .write = kpc_dma_write,
- .unlocked_ioctl = kpc_dma_ioctl,
-};
-
diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
deleted file mode 100644
index 175fe8b0d055..000000000000
--- a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
+++ /dev/null
@@ -1,249 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/fs.h>
-#include <linux/rwsem.h>
-#include "kpc_dma_driver.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Matt.Sickler@daktronics.com");
-
-#define KPC_DMA_CHAR_MAJOR UNNAMED_MAJOR
-#define KPC_DMA_NUM_MINORS BIT(MINORBITS)
-static DEFINE_MUTEX(kpc_dma_mtx);
-static int assigned_major_num;
-static LIST_HEAD(kpc_dma_list);
-
-/********** kpc_dma_list list management **********/
-struct kpc_dma_device *kpc_dma_lookup_device(int minor)
-{
- struct kpc_dma_device *c;
-
- mutex_lock(&kpc_dma_mtx);
- list_for_each_entry(c, &kpc_dma_list, list) {
- if (c->pldev->id == minor)
- goto out;
- }
- c = NULL; // not-found case
-out:
- mutex_unlock(&kpc_dma_mtx);
- return c;
-}
-
-static void kpc_dma_add_device(struct kpc_dma_device *ldev)
-{
- mutex_lock(&kpc_dma_mtx);
- list_add(&ldev->list, &kpc_dma_list);
- mutex_unlock(&kpc_dma_mtx);
-}
-
-static void kpc_dma_del_device(struct kpc_dma_device *ldev)
-{
- mutex_lock(&kpc_dma_mtx);
- list_del(&ldev->list);
- mutex_unlock(&kpc_dma_mtx);
-}
-
-/********** SysFS Attributes **********/
-static ssize_t engine_regs_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct kpc_dma_device *ldev;
- struct platform_device *pldev = to_platform_device(dev);
-
- if (!pldev)
- return 0;
- ldev = platform_get_drvdata(pldev);
- if (!ldev)
- return 0;
-
- return scnprintf(buf, PAGE_SIZE,
- "EngineControlStatus = 0x%08x\n"
- "RegNextDescPtr = 0x%08x\n"
- "RegSWDescPtr = 0x%08x\n"
- "RegCompletedDescPtr = 0x%08x\n"
- "desc_pool_first = %p\n"
- "desc_pool_last = %p\n"
- "desc_next = %p\n"
- "desc_completed = %p\n",
- readl(ldev->eng_regs + 1),
- readl(ldev->eng_regs + 2),
- readl(ldev->eng_regs + 3),
- readl(ldev->eng_regs + 4),
- ldev->desc_pool_first,
- ldev->desc_pool_last,
- ldev->desc_next,
- ldev->desc_completed
- );
-}
-static DEVICE_ATTR_RO(engine_regs);
-
-static const struct attribute *ndd_attr_list[] = {
- &dev_attr_engine_regs.attr,
- NULL,
-};
-
-static struct class *kpc_dma_class;
-
-/********** Platform Driver Functions **********/
-static
-int kpc_dma_probe(struct platform_device *pldev)
-{
- struct resource *r = NULL;
- int rv = 0;
- dev_t dev;
-
- struct kpc_dma_device *ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
-
- if (!ldev) {
- dev_err(&pldev->dev, "%s: unable to kzalloc space for kpc_dma_device\n", __func__);
- rv = -ENOMEM;
- goto err_rv;
- }
-
- INIT_LIST_HEAD(&ldev->list);
-
- ldev->pldev = pldev;
- platform_set_drvdata(pldev, ldev);
- atomic_set(&ldev->open_count, 1);
-
- mutex_init(&ldev->sem);
- lock_engine(ldev);
-
- // Get Engine regs resource
- r = platform_get_resource(pldev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&ldev->pldev->dev, "%s: didn't get the engine regs resource!\n", __func__);
- rv = -ENXIO;
- goto err_kfree;
- }
- ldev->eng_regs = ioremap(r->start, resource_size(r));
- if (!ldev->eng_regs) {
- dev_err(&ldev->pldev->dev, "%s: failed to ioremap engine regs!\n", __func__);
- rv = -ENXIO;
- goto err_kfree;
- }
-
- r = platform_get_resource(pldev, IORESOURCE_IRQ, 0);
- if (!r) {
- dev_err(&ldev->pldev->dev, "%s: didn't get the IRQ resource!\n", __func__);
- rv = -ENXIO;
- goto err_kfree;
- }
- ldev->irq = r->start;
-
- // Setup miscdev struct
- dev = MKDEV(assigned_major_num, pldev->id);
- ldev->kpc_dma_dev = device_create(kpc_dma_class, &pldev->dev, dev, ldev,
- "kpc_dma%d", pldev->id);
- if (IS_ERR(ldev->kpc_dma_dev)) {
- rv = PTR_ERR(ldev->kpc_dma_dev);
- dev_err(&ldev->pldev->dev, "%s: device_create failed: %d\n", __func__, rv);
- goto err_kfree;
- }
-
- // Setup the DMA engine
- rv = setup_dma_engine(ldev, 30);
- if (rv) {
- dev_err(&ldev->pldev->dev, "%s: failed to setup_dma_engine: %d\n", __func__, rv);
- goto err_misc_dereg;
- }
-
- // Setup the sysfs files
- rv = sysfs_create_files(&(ldev->pldev->dev.kobj), ndd_attr_list);
- if (rv) {
- dev_err(&ldev->pldev->dev, "%s: Failed to add sysfs files: %d\n", __func__, rv);
- goto err_destroy_eng;
- }
-
- kpc_dma_add_device(ldev);
-
- return 0;
-
- err_destroy_eng:
- destroy_dma_engine(ldev);
- err_misc_dereg:
- device_destroy(kpc_dma_class, dev);
- err_kfree:
- kfree(ldev);
- err_rv:
- return rv;
-}
-
-static
-int kpc_dma_remove(struct platform_device *pldev)
-{
- struct kpc_dma_device *ldev = platform_get_drvdata(pldev);
-
- if (!ldev)
- return -ENXIO;
-
- lock_engine(ldev);
- sysfs_remove_files(&(ldev->pldev->dev.kobj), ndd_attr_list);
- destroy_dma_engine(ldev);
- kpc_dma_del_device(ldev);
- device_destroy(kpc_dma_class, MKDEV(assigned_major_num, ldev->pldev->id));
- kfree(ldev);
-
- return 0;
-}
-
-/********** Driver Functions **********/
-static struct platform_driver kpc_dma_plat_driver_i = {
- .probe = kpc_dma_probe,
- .remove = kpc_dma_remove,
- .driver = {
- .name = KP_DRIVER_NAME_DMA_CONTROLLER,
- },
-};
-
-static
-int __init kpc_dma_driver_init(void)
-{
- int err;
-
- err = __register_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS,
- "kpc_dma", &kpc_dma_fops);
- if (err < 0) {
- pr_err("Can't allocate a major number (%d) for kpc_dma (err = %d)\n",
- KPC_DMA_CHAR_MAJOR, err);
- goto fail_chrdev_register;
- }
- assigned_major_num = err;
-
- kpc_dma_class = class_create(THIS_MODULE, "kpc_dma");
- err = PTR_ERR(kpc_dma_class);
- if (IS_ERR(kpc_dma_class)) {
- pr_err("Can't create class kpc_dma (err = %d)\n", err);
- goto fail_class_create;
- }
-
- err = platform_driver_register(&kpc_dma_plat_driver_i);
- if (err) {
- pr_err("Can't register platform driver for kpc_dma (err = %d)\n", err);
- goto fail_platdriver_register;
- }
-
- return err;
-
-fail_platdriver_register:
- class_destroy(kpc_dma_class);
-fail_class_create:
- __unregister_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma");
-fail_chrdev_register:
- return err;
-}
-module_init(kpc_dma_driver_init);
-
-static
-void __exit kpc_dma_driver_exit(void)
-{
- platform_driver_unregister(&kpc_dma_plat_driver_i);
- class_destroy(kpc_dma_class);
- __unregister_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma");
-}
-module_exit(kpc_dma_driver_exit);
diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h
deleted file mode 100644
index 8b9c978257b9..000000000000
--- a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h
+++ /dev/null
@@ -1,222 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-#ifndef KPC_DMA_DRIVER_H
-#define KPC_DMA_DRIVER_H
-#include <linux/platform_device.h>
-#include <linux/cdev.h>
-#include <linux/kfifo.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/miscdevice.h>
-#include <linux/rwsem.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include "../kpc.h"
-
-struct kp2000_device;
-struct kpc_dma_device {
- struct list_head list;
- struct platform_device *pldev;
- u32 __iomem *eng_regs;
- struct device *kpc_dma_dev;
- struct kobject kobj;
- char name[16];
-
- int dir; // DMA_FROM_DEVICE || DMA_TO_DEVICE
- struct mutex sem;
- unsigned int irq;
- struct work_struct irq_work;
-
- atomic_t open_count;
-
- size_t accumulated_bytes;
- u32 accumulated_flags;
-
- // Descriptor "Pool" housekeeping
- u32 desc_pool_cnt;
- struct dma_pool *desc_pool;
- struct kpc_dma_descriptor *desc_pool_first;
- struct kpc_dma_descriptor *desc_pool_last;
-
- struct kpc_dma_descriptor *desc_next;
- struct kpc_dma_descriptor *desc_completed;
-};
-
-struct dev_private_data {
- struct kpc_dma_device *ldev;
- u64 card_addr;
- u64 user_ctl;
- u64 user_ctl_last;
- u64 user_sts;
-};
-
-struct kpc_dma_device *kpc_dma_lookup_device(int minor);
-
-extern const struct file_operations kpc_dma_fops;
-
-#define ENG_CAP_PRESENT 0x00000001
-#define ENG_CAP_DIRECTION 0x00000002
-#define ENG_CAP_TYPE_MASK 0x000000F0
-#define ENG_CAP_NUMBER_MASK 0x0000FF00
-#define ENG_CAP_CARD_ADDR_SIZE_MASK 0x007F0000
-#define ENG_CAP_DESC_MAX_BYTE_CNT_MASK 0x3F000000
-#define ENG_CAP_PERF_SCALE_MASK 0xC0000000
-
-#define ENG_CTL_IRQ_ENABLE BIT(0)
-#define ENG_CTL_IRQ_ACTIVE BIT(1)
-#define ENG_CTL_DESC_COMPLETE BIT(2)
-#define ENG_CTL_DESC_ALIGN_ERR BIT(3)
-#define ENG_CTL_DESC_FETCH_ERR BIT(4)
-#define ENG_CTL_SW_ABORT_ERR BIT(5)
-#define ENG_CTL_DESC_CHAIN_END BIT(7)
-#define ENG_CTL_DMA_ENABLE BIT(8)
-#define ENG_CTL_DMA_RUNNING BIT(10)
-#define ENG_CTL_DMA_WAITING BIT(11)
-#define ENG_CTL_DMA_WAITING_PERSIST BIT(12)
-#define ENG_CTL_DMA_RESET_REQUEST BIT(14)
-#define ENG_CTL_DMA_RESET BIT(15)
-#define ENG_CTL_DESC_FETCH_ERR_CLASS_MASK 0x700000
-
-struct aio_cb_data {
- struct dev_private_data *priv;
- struct kpc_dma_device *ldev;
- struct completion *cpl;
- unsigned char flags;
- size_t len;
-
- unsigned int page_count;
- struct page **user_pages;
- struct sg_table sgt;
- int mapped_entry_count;
-};
-
-#define ACD_FLAG_DONE 0
-#define ACD_FLAG_ABORT 1
-#define ACD_FLAG_ENG_ACCUM_ERROR 4
-#define ACD_FLAG_ENG_ACCUM_SHORT 5
-
-struct kpc_dma_descriptor {
- struct {
- volatile u32 DescByteCount :20;
- volatile u32 DescStatusErrorFlags :4;
- volatile u32 DescStatusFlags :8;
- };
- volatile u32 DescUserControlLS;
- volatile u32 DescUserControlMS;
- volatile u32 DescCardAddrLS;
- struct {
- volatile u32 DescBufferByteCount :20;
- volatile u32 DescCardAddrMS :4;
- volatile u32 DescControlFlags :8;
- };
- volatile u32 DescSystemAddrLS;
- volatile u32 DescSystemAddrMS;
- volatile u32 DescNextDescPtr;
-
- dma_addr_t MyDMAAddr;
- struct kpc_dma_descriptor *Next;
-
- struct aio_cb_data *acd;
-} __attribute__((packed));
-// DescControlFlags:
-#define DMA_DESC_CTL_SOP BIT(7)
-#define DMA_DESC_CTL_EOP BIT(6)
-#define DMA_DESC_CTL_AFIFO BIT(2)
-#define DMA_DESC_CTL_IRQONERR BIT(1)
-#define DMA_DESC_CTL_IRQONDONE BIT(0)
-// DescStatusFlags:
-#define DMA_DESC_STS_SOP BIT(7)
-#define DMA_DESC_STS_EOP BIT(6)
-#define DMA_DESC_STS_ERROR BIT(4)
-#define DMA_DESC_STS_USMSZ BIT(3)
-#define DMA_DESC_STS_USLSZ BIT(2)
-#define DMA_DESC_STS_SHORT BIT(1)
-#define DMA_DESC_STS_COMPLETE BIT(0)
-// DescStatusErrorFlags:
-#define DMA_DESC_ESTS_ECRC BIT(2)
-#define DMA_DESC_ESTS_POISON BIT(1)
-#define DMA_DESC_ESTS_UNSUCCESSFUL BIT(0)
-
-#define DMA_DESC_ALIGNMENT 0x20
-
-static inline
-u32 GetEngineCapabilities(struct kpc_dma_device *eng)
-{
- return readl(eng->eng_regs + 0);
-}
-
-static inline
-void WriteEngineControl(struct kpc_dma_device *eng, u32 value)
-{
- writel(value, eng->eng_regs + 1);
-}
-
-static inline
-u32 GetEngineControl(struct kpc_dma_device *eng)
-{
- return readl(eng->eng_regs + 1);
-}
-
-static inline
-void SetClearEngineControl(struct kpc_dma_device *eng, u32 set_bits, u32 clear_bits)
-{
- u32 val = GetEngineControl(eng);
-
- val |= set_bits;
- val &= ~clear_bits;
- WriteEngineControl(eng, val);
-}
-
-static inline
-void SetEngineNextPtr(struct kpc_dma_device *eng, struct kpc_dma_descriptor *desc)
-{
- writel(desc->MyDMAAddr, eng->eng_regs + 2);
-}
-
-static inline
-void SetEngineSWPtr(struct kpc_dma_device *eng, struct kpc_dma_descriptor *desc)
-{
- writel(desc->MyDMAAddr, eng->eng_regs + 3);
-}
-
-static inline
-void ClearEngineCompletePtr(struct kpc_dma_device *eng)
-{
- writel(0, eng->eng_regs + 4);
-}
-
-static inline
-u32 GetEngineCompletePtr(struct kpc_dma_device *eng)
-{
- return readl(eng->eng_regs + 4);
-}
-
-static inline
-void lock_engine(struct kpc_dma_device *eng)
-{
- BUG_ON(!eng);
- mutex_lock(&eng->sem);
-}
-
-static inline
-void unlock_engine(struct kpc_dma_device *eng)
-{
- BUG_ON(!eng);
- mutex_unlock(&eng->sem);
-}
-
-/// Shared Functions
-void start_dma_engine(struct kpc_dma_device *eng);
-int setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt);
-void stop_dma_engine(struct kpc_dma_device *eng);
-void destroy_dma_engine(struct kpc_dma_device *eng);
-void clear_desc(struct kpc_dma_descriptor *desc);
-int count_descriptors_available(struct kpc_dma_device *eng);
-void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags);
-
-#endif /* KPC_DMA_DRIVER_H */
-
diff --git a/drivers/staging/kpc2000/kpc_dma/uapi.h b/drivers/staging/kpc2000/kpc_dma/uapi.h
deleted file mode 100644
index 5ff6a1a36ff9..000000000000
--- a/drivers/staging/kpc2000/kpc_dma/uapi.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-#ifndef KPC_DMA_DRIVER_UAPI_H_
-#define KPC_DMA_DRIVER_UAPI_H_
-#include <linux/ioctl.h>
-
-#define KND_IOCTL_SET_CARD_ADDR _IOW('k', 1, __u32)
-#define KND_IOCTL_SET_USER_CTL _IOW('k', 2, __u64)
-#define KND_IOCTL_SET_USER_CTL_LAST _IOW('k', 4, __u64)
-#define KND_IOCTL_GET_USER_STS _IOR('k', 3, __u64)
-
-#endif /* KPC_DMA_DRIVER_UAPI_H_ */
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 8bc3b7d8d3d5..eaa70893224a 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -524,13 +524,11 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
struct net_device *dev = priv->net_dev;
u32 mib_status;
u32 mib_attribute;
- u16 mib_val_size;
- u16 mib_val_type;
mib_status = get_dword(priv);
mib_attribute = get_dword(priv);
- mib_val_size = get_word(priv);
- mib_val_type = get_word(priv);
+ get_word(priv); /* mib_val_size */
+ get_word(priv); /* mib_val_type */
if (mib_status) {
netdev_err(priv->net_dev, "attribute=%08X, status=%08X\n",
@@ -846,9 +844,7 @@ void hostif_ps_adhoc_set_confirm(struct ks_wlan_private *priv)
static
void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv)
{
- u16 result_code;
-
- result_code = get_word(priv);
+ get_word(priv); /* result_code */
priv->infra_status = 1; /* infrastructure mode set */
hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
}
@@ -926,14 +922,14 @@ static
void hostif_phy_information_confirm(struct ks_wlan_private *priv)
{
struct iw_statistics *wstats = &priv->wstats;
- u8 rssi, signal, noise;
+ u8 rssi, signal;
u8 link_speed;
u32 transmitted_frame_count, received_fragment_count;
u32 failed_count, fcs_error_count;
rssi = get_byte(priv);
signal = get_byte(priv);
- noise = get_byte(priv);
+ get_byte(priv); /* noise */
link_speed = get_byte(priv);
transmitted_frame_count = get_dword(priv);
received_fragment_count = get_dword(priv);
diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
index 39138191a556..c62a494ed6bb 100644
--- a/drivers/staging/ks7010/ks_hostif.h
+++ b/drivers/staging/ks7010/ks_hostif.h
@@ -498,20 +498,20 @@ struct hostif_mic_failure_request {
#define TX_RATE_FIXED 5
/* 11b rate */
-#define TX_RATE_1M (u8)(10 / 5) /* 11b 11g basic rate */
-#define TX_RATE_2M (u8)(20 / 5) /* 11b 11g basic rate */
-#define TX_RATE_5M (u8)(55 / 5) /* 11g basic rate */
-#define TX_RATE_11M (u8)(110 / 5) /* 11g basic rate */
+#define TX_RATE_1M ((u8)(10 / 5)) /* 11b 11g basic rate */
+#define TX_RATE_2M ((u8)(20 / 5)) /* 11b 11g basic rate */
+#define TX_RATE_5M ((u8)(55 / 5)) /* 11g basic rate */
+#define TX_RATE_11M ((u8)(110 / 5)) /* 11g basic rate */
/* 11g rate */
-#define TX_RATE_6M (u8)(60 / 5) /* 11g basic rate */
-#define TX_RATE_12M (u8)(120 / 5) /* 11g basic rate */
-#define TX_RATE_24M (u8)(240 / 5) /* 11g basic rate */
-#define TX_RATE_9M (u8)(90 / 5)
-#define TX_RATE_18M (u8)(180 / 5)
-#define TX_RATE_36M (u8)(360 / 5)
-#define TX_RATE_48M (u8)(480 / 5)
-#define TX_RATE_54M (u8)(540 / 5)
+#define TX_RATE_6M ((u8)(60 / 5)) /* 11g basic rate */
+#define TX_RATE_12M ((u8)(120 / 5)) /* 11g basic rate */
+#define TX_RATE_24M ((u8)(240 / 5)) /* 11g basic rate */
+#define TX_RATE_9M ((u8)(90 / 5))
+#define TX_RATE_18M ((u8)(180 / 5))
+#define TX_RATE_36M ((u8)(360 / 5))
+#define TX_RATE_48M ((u8)(480 / 5))
+#define TX_RATE_54M ((u8)(540 / 5))
static inline bool is_11b_rate(u8 rate)
{
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index b34e3c130f53..093ef9a2b291 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -50,8 +50,10 @@ static DECLARE_TASKLET_OLD(dim2_tasklet, dim2_tasklet_fn);
/**
* struct hdm_channel - private structure to keep channel specific data
+ * @name: channel name
* @is_initialized: identifier to know whether the channel is initialized
* @ch: HAL specific channel data
+ * @reset_dbr_size: reset DBR data buffer size
* @pending_list: list to keep MBO's before starting transfer
* @started_list: list to keep MBO's after starting transfer
* @direction: channel direction (TX or RX)
@@ -68,7 +70,7 @@ struct hdm_channel {
enum most_channel_data_type data_type;
};
-/**
+/*
* struct dim2_hdm - private structure to keep interface specific data
* @hch: an array of channel specific data
* @most_iface: most interface structure
@@ -428,9 +430,9 @@ static void complete_all_mbos(struct list_head *head)
/**
* configure_channel - initialize a channel
- * @iface: interface the channel belongs to
- * @channel: channel to be configured
- * @channel_config: structure that holds the configuration information
+ * @most_iface: interface the channel belongs to
+ * @ch_idx: channel index to be configured
+ * @ccfg: structure that holds the configuration information
*
* Receives configuration information from mostcore and initialize
* the corresponding channel. Return 0 on success, negative on failure.
@@ -546,8 +548,8 @@ static int configure_channel(struct most_interface *most_iface, int ch_idx,
/**
* enqueue - enqueue a buffer for data transfer
- * @iface: intended interface
- * @channel: ID of the channel the buffer is intended for
+ * @most_iface: intended interface
+ * @ch_idx: ID of the channel the buffer is intended for
* @mbo: pointer to the buffer object
*
* Push the buffer into pending_list and try to transfer one buffer from
@@ -579,8 +581,9 @@ static int enqueue(struct most_interface *most_iface, int ch_idx,
/**
* request_netinfo - triggers retrieving of network info
- * @iface: pointer to the interface
- * @channel_id: corresponding channel ID
+ * @most_iface: pointer to the interface
+ * @ch_idx: corresponding channel ID
+ * @on_netinfo: call-back used to deliver network status to mostcore
*
* Send a command to INIC which triggers retrieving of network info by means of
* "Message exchange over MDP/MEP". Return 0 on success, negative on failure.
@@ -621,8 +624,8 @@ static void request_netinfo(struct most_interface *most_iface, int ch_idx,
/**
* poison_channel - poison buffers of a channel
- * @iface: pointer to the interface the channel to be poisoned belongs to
- * @channel_id: corresponding channel ID
+ * @most_iface: pointer to the interface the channel to be poisoned belongs to
+ * @ch_idx: corresponding channel ID
*
* Destroy a channel and complete all the buffers in both started_list &
* pending_list. Return 0 on success, negative on failure.
diff --git a/drivers/staging/most/dim2/hal.c b/drivers/staging/most/dim2/hal.c
index 39e17a7d2f24..65282c276862 100644
--- a/drivers/staging/most/dim2/hal.c
+++ b/drivers/staging/most/dim2/hal.c
@@ -96,9 +96,9 @@ static int dbr_get_mask_size(u16 size)
}
/**
- * Allocates DBR memory.
- * @param size Allocating memory size.
- * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
+ * alloc_dbr() - Allocates DBR memory.
+ * @size: Allocating memory size.
+ * Returns: Offset in DBR memory by success or DBR_SIZE if out of memory.
*/
static int alloc_dbr(u16 size)
{
@@ -778,7 +778,7 @@ void dim_service_mlb_int_irq(void)
writel(0, &g.dim2->MS1);
}
-/**
+/*
* Retrieves maximal possible correct buffer size for isochronous data type
* conform to given packet length and not bigger than given buffer size.
*
@@ -792,7 +792,7 @@ u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
return norm_isoc_buffer_size(buf_size, packet_length);
}
-/**
+/*
* Retrieves maximal possible correct buffer size for synchronous data type
* conform to given bytes per frame and not bigger than given buffer size.
*
diff --git a/drivers/staging/most/i2c/i2c.c b/drivers/staging/most/i2c/i2c.c
index 893a8babdb2f..7042f10887bb 100644
--- a/drivers/staging/most/i2c/i2c.c
+++ b/drivers/staging/most/i2c/i2c.c
@@ -51,8 +51,8 @@ static void pending_rx_work(struct work_struct *);
/**
* configure_channel - called from MOST core to configure a channel
- * @iface: interface the channel belongs to
- * @channel: channel to be configured
+ * @most_iface: interface the channel belongs to
+ * @ch_idx: channel to be configured
* @channel_config: structure that holds the configuration information
*
* Return 0 on success, negative on failure.
@@ -107,8 +107,8 @@ static int configure_channel(struct most_interface *most_iface,
/**
* enqueue - called from MOST core to enqueue a buffer for data transfer
- * @iface: intended interface
- * @channel: ID of the channel the buffer is intended for
+ * @most_iface: intended interface
+ * @ch_idx: ID of the channel the buffer is intended for
* @mbo: pointer to the buffer object
*
* Return 0 on success, negative on failure.
@@ -153,8 +153,8 @@ static int enqueue(struct most_interface *most_iface,
/**
* poison_channel - called from MOST core to poison buffers of a channel
- * @iface: pointer to the interface the channel to be poisoned belongs to
- * @channel_id: corresponding channel ID
+ * @most_iface: pointer to the interface the channel to be poisoned belongs to
+ * @ch_idx: corresponding channel ID
*
* Return 0 on success, negative on failure.
*
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
index f125bb6da406..a5fd14246046 100644
--- a/drivers/staging/most/net/net.c
+++ b/drivers/staging/most/net/net.c
@@ -539,9 +539,9 @@ static void __exit most_net_exit(void)
/**
* on_netinfo - callback for HDM to be informed about HW's MAC
- * @param iface - most interface instance
- * @param link_stat - link status
- * @param mac_addr - MAC address
+ * @iface: most interface instance
+ * @link_stat: link status
+ * @mac_addr: MAC address
*/
static void on_netinfo(struct most_interface *iface,
unsigned char link_stat, unsigned char *mac_addr)
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index 7716d0efe524..b65d71686814 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -101,12 +101,6 @@
};
&pcie {
- pinctrl-names = "default";
- pinctrl-0 = <&pcie_pins>;
-
- reset-gpios = <&gpio 19 GPIO_ACTIVE_LOW>,
- <&gpio 8 GPIO_ACTIVE_LOW>,
- <&gpio 7 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 093a7f8091b5..eeabe9c0f4fb 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -55,7 +55,7 @@
#address-cells = <1>;
#size-cells = <1>;
- sysc: sysc@0 {
+ sysc: syscon@0 {
compatible = "mediatek,mt7621-sysc", "syscon";
reg = <0x0 0x100>;
#clock-cells = <1>;
@@ -122,7 +122,7 @@
status = "disabled";
};
- memc: memc@5000 {
+ memc: syscon@5000 {
compatible = "mediatek,mt7621-memc", "syscon";
reg = <0x5000 0x1000>;
};
@@ -372,13 +372,6 @@
clock-names = "nand";
};
- ethsys: syscon@1e000000 {
- compatible = "mediatek,mt7621-ethsys",
- "syscon";
- reg = <0x1e000000 0x1000>;
- #clock-cells = <1>;
- };
-
ethernet: ethernet@1e100000 {
compatible = "mediatek,mt7621-eth";
reg = <0x1e100000 0x10000>;
@@ -396,7 +389,7 @@
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 3 IRQ_TYPE_LEVEL_HIGH>;
- mediatek,ethsys = <&ethsys>;
+ mediatek,ethsys = <&sysc>;
gmac0: mac@0 {
@@ -488,10 +481,10 @@
pcie: pcie@1e140000 {
compatible = "mediatek,mt7621-pci";
- reg = <0x1e140000 0x100 /* host-pci bridge registers */
- 0x1e142000 0x100 /* pcie port 0 RC control registers */
- 0x1e143000 0x100 /* pcie port 1 RC control registers */
- 0x1e144000 0x100>; /* pcie port 2 RC control registers */
+ reg = <0x1e140000 0x100>, /* host-pci bridge registers */
+ <0x1e142000 0x100>, /* pcie port 0 RC control registers */
+ <0x1e143000 0x100>, /* pcie port 1 RC control registers */
+ <0x1e144000 0x100>; /* pcie port 2 RC control registers */
#address-cells = <3>;
#size-cells = <2>;
@@ -500,64 +493,76 @@
device_type = "pci";
- bus-range = <0 255>;
- ranges = <
- 0x02000000 0 0x00000000 0x60000000 0 0x10000000 /* pci memory */
- 0x01000000 0 0x00000000 0x1e160000 0 0x00010000 /* io space */
- >;
+ ranges = <0x02000000 0 0x60000000 0x60000000 0 0x10000000>, /* pci memory */
+ <0x01000000 0 0x00000000 0x1e160000 0 0x00010000>; /* io space */
- interrupt-parent = <&gic>;
- interrupts = <GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH
- GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH
- GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xF800 0 0 0>;
+ interrupt-map = <0x0000 0 0 0 &gic GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH>,
+ <0x0800 0 0 0 &gic GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH>,
+ <0x1000 0 0 0 &gic GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
- resets = <&rstctrl 24 &rstctrl 25 &rstctrl 26>;
- reset-names = "pcie0", "pcie1", "pcie2";
- clocks = <&sysc MT7621_CLK_PCIE0>,
- <&sysc MT7621_CLK_PCIE1>,
- <&sysc MT7621_CLK_PCIE2>;
- clock-names = "pcie0", "pcie1", "pcie2";
- phys = <&pcie0_phy 1>, <&pcie2_phy 0>;
- phy-names = "pcie-phy0", "pcie-phy2";
-
reset-gpios = <&gpio 19 GPIO_ACTIVE_LOW>;
pcie@0,0 {
reg = <0x0000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rstctrl 24>;
+ clocks = <&sysc MT7621_CLK_PCIE0>;
+ phys = <&pcie0_phy 1>;
+ phy-names = "pcie-phy0";
ranges;
- bus-range = <0x00 0xff>;
};
pcie@1,0 {
reg = <0x0800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rstctrl 25>;
+ clocks = <&sysc MT7621_CLK_PCIE1>;
+ phys = <&pcie0_phy 1>;
+ phy-names = "pcie-phy1";
ranges;
- bus-range = <0x00 0xff>;
};
pcie@2,0 {
reg = <0x1000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rstctrl 26>;
+ clocks = <&sysc MT7621_CLK_PCIE2>;
+ phys = <&pcie2_phy 0>;
+ phy-names = "pcie-phy2";
ranges;
- bus-range = <0x00 0xff>;
};
};
pcie0_phy: pcie-phy@1e149000 {
compatible = "mediatek,mt7621-pci-phy";
reg = <0x1e149000 0x0700>;
+ clocks = <&sysc MT7621_CLK_XTAL>;
#phy-cells = <1>;
};
pcie2_phy: pcie-phy@1e14a000 {
compatible = "mediatek,mt7621-pci-phy";
reg = <0x1e14a000 0x0700>;
+ clocks = <&sysc MT7621_CLK_XTAL>;
#phy-cells = <1>;
};
};
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index 115250115f10..f9bdf4e33134 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -16,13 +16,12 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
@@ -30,22 +29,12 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sys_soc.h>
-#include <mt7621.h>
-#include <ralink_regs.h>
-
-#include "../../pci/pci.h"
-
-/* sysctl */
-#define MT7621_GPIO_MODE 0x60
/* MediaTek specific configuration registers */
#define PCIE_FTS_NUM 0x70c
#define PCIE_FTS_NUM_MASK GENMASK(15, 8)
#define PCIE_FTS_NUM_L0(x) (((x) & 0xff) << 8)
-/* rt_sysc_membase relative registers */
-#define RALINK_CLKCFG1 0x30
-
/* Host-PCI bridge registers */
#define RALINK_PCI_PCICFG_ADDR 0x0000
#define RALINK_PCI_PCIMSK_ADDR 0x000C
@@ -54,20 +43,7 @@
#define RALINK_PCI_MEMBASE 0x0028
#define RALINK_PCI_IOBASE 0x002C
-/* PCICFG virtual bridges */
-#define PCIE_P2P_CNT 3
-#define PCIE_P2P_BR_DEVNUM_SHIFT(p) (16 + (p) * 4)
-#define PCIE_P2P_BR_DEVNUM0_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(0)
-#define PCIE_P2P_BR_DEVNUM1_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(1)
-#define PCIE_P2P_BR_DEVNUM2_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(2)
-#define PCIE_P2P_BR_DEVNUM_MASK 0xf
-#define PCIE_P2P_BR_DEVNUM_MASK_FULL (0xfff << PCIE_P2P_BR_DEVNUM0_SHIFT)
-
/* PCIe RC control registers */
-#define MT7621_PCIE_OFFSET 0x2000
-#define MT7621_NEXT_PORT 0x1000
-
-#define RALINK_PCI_BAR0SETUP_ADDR 0x0010
#define RALINK_PCI_ID 0x0030
#define RALINK_PCI_CLASS 0x0034
#define RALINK_PCI_SUBID 0x0038
@@ -79,11 +55,8 @@
#define PCIE_BAR_MAP_MAX GENMASK(30, 16)
#define PCIE_BAR_ENABLE BIT(0)
#define PCIE_PORT_INT_EN(x) BIT(20 + (x))
-#define PCIE_PORT_CLK_EN(x) BIT(24 + (x))
#define PCIE_PORT_LINKUP BIT(0)
-#define PERST_MODE_MASK GENMASK(11, 10)
-#define PERST_MODE_GPIO BIT(10)
#define PERST_DELAY_MS 100
/**
@@ -91,76 +64,68 @@
* @base: I/O mapped register base
* @list: port list
* @pcie: pointer to PCIe host info
+ * @clk: pointer to the port clock gate
* @phy: pointer to PHY control block
* @pcie_rst: pointer to port reset control
* @gpio_rst: gpio reset
* @slot: port slot
- * @irq: GIC irq
* @enabled: indicates if port is enabled
*/
struct mt7621_pcie_port {
void __iomem *base;
struct list_head list;
struct mt7621_pcie *pcie;
+ struct clk *clk;
struct phy *phy;
struct reset_control *pcie_rst;
struct gpio_desc *gpio_rst;
u32 slot;
- int irq;
bool enabled;
};
/**
* struct mt7621_pcie - PCIe host information
* @base: IO Mapped Register Base
- * @io: IO resource
- * @mem: pointer to non-prefetchable memory resource
* @dev: Pointer to PCIe device
- * @io_map_base: virtual memory base address for io
* @ports: pointer to PCIe port information
- * @irq_map: irq mapping info according pcie link status
* @resets_inverted: depends on chip revision
* reset lines are inverted.
*/
struct mt7621_pcie {
void __iomem *base;
struct device *dev;
- struct resource io;
- struct resource *mem;
- unsigned long io_map_base;
struct list_head ports;
- int irq_map[PCIE_P2P_CNT];
bool resets_inverted;
};
static inline u32 pcie_read(struct mt7621_pcie *pcie, u32 reg)
{
- return readl(pcie->base + reg);
+ return readl_relaxed(pcie->base + reg);
}
static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg)
{
- writel(val, pcie->base + reg);
+ writel_relaxed(val, pcie->base + reg);
}
static inline void pcie_rmw(struct mt7621_pcie *pcie, u32 reg, u32 clr, u32 set)
{
- u32 val = readl(pcie->base + reg);
+ u32 val = readl_relaxed(pcie->base + reg);
val &= ~clr;
val |= set;
- writel(val, pcie->base + reg);
+ writel_relaxed(val, pcie->base + reg);
}
static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg)
{
- return readl(port->base + reg);
+ return readl_relaxed(port->base + reg);
}
static inline void pcie_port_write(struct mt7621_pcie_port *port,
u32 val, u32 reg)
{
- writel(val, port->base + reg);
+ writel_relaxed(val, port->base + reg);
}
static inline u32 mt7621_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
@@ -177,7 +142,7 @@ static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
u32 address = mt7621_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
- writel(address, pcie->base + RALINK_PCI_CONFIG_ADDR);
+ writel_relaxed(address, pcie->base + RALINK_PCI_CONFIG_ADDR);
return pcie->base + RALINK_PCI_CONFIG_DATA + (where & 3);
}
@@ -222,16 +187,6 @@ static inline bool mt7621_pcie_port_is_linkup(struct mt7621_pcie_port *port)
return (pcie_port_read(port, RALINK_PCI_STATUS) & PCIE_PORT_LINKUP) != 0;
}
-static inline void mt7621_pcie_port_clk_enable(struct mt7621_pcie_port *port)
-{
- rt_sysc_m32(0, PCIE_PORT_CLK_EN(port->slot), RALINK_CLKCFG1);
-}
-
-static inline void mt7621_pcie_port_clk_disable(struct mt7621_pcie_port *port)
-{
- rt_sysc_m32(PCIE_PORT_CLK_EN(port->slot), 0, RALINK_CLKCFG1);
-}
-
static inline void mt7621_control_assert(struct mt7621_pcie_port *port)
{
struct mt7621_pcie *pcie = port->pcie;
@@ -252,96 +207,46 @@ static inline void mt7621_control_deassert(struct mt7621_pcie_port *port)
reset_control_assert(port->pcie_rst);
}
-static void setup_cm_memory_region(struct mt7621_pcie *pcie)
+static int setup_cm_memory_region(struct pci_host_bridge *host)
{
- struct resource *mem_resource = pcie->mem;
+ struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
struct device *dev = pcie->dev;
+ struct resource_entry *entry;
resource_size_t mask;
+ entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
+ if (!entry) {
+ dev_err(dev, "Cannot get memory resource\n");
+ return -EINVAL;
+ }
+
if (mips_cps_numiocu(0)) {
/*
* FIXME: hardware doesn't accept mask values with 1s after
* 0s (e.g. 0xffef), so it would be great to warn if that's
* about to happen
*/
- mask = ~(mem_resource->end - mem_resource->start);
+ mask = ~(entry->res->end - entry->res->start);
- write_gcr_reg1_base(mem_resource->start);
+ write_gcr_reg1_base(entry->res->start);
write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0);
dev_info(dev, "PCI coherence region base: 0x%08llx, mask/settings: 0x%08llx\n",
(unsigned long long)read_gcr_reg1_base(),
(unsigned long long)read_gcr_reg1_mask());
}
-}
-
-static int mt7621_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
-{
- struct mt7621_pcie *pcie = pdev->bus->sysdata;
- struct device *dev = pcie->dev;
- int irq = pcie->irq_map[slot];
-
- dev_info(dev, "bus=%d slot=%d irq=%d\n", pdev->bus->number, slot, irq);
- return irq;
-}
-
-static int mt7621_pci_parse_request_of_pci_ranges(struct pci_host_bridge *host)
-{
- struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
- struct device *dev = pcie->dev;
- struct device_node *node = dev->of_node;
- struct of_pci_range_parser parser;
- struct resource_entry *entry;
- struct of_pci_range range;
- LIST_HEAD(res);
-
- if (of_pci_range_parser_init(&parser, node)) {
- dev_err(dev, "missing \"ranges\" property\n");
- return -EINVAL;
- }
-
- /*
- * IO_SPACE_LIMIT for MIPS is 0xffff but this platform uses IO at
- * upper address 0x001e160000. of_pci_range_to_resource does not work
- * well for MIPS platforms that don't define PCI_IOBASE, so set the IO
- * resource manually instead.
- */
- pcie->io.name = node->full_name;
- pcie->io.parent = pcie->io.child = pcie->io.sibling = NULL;
- for_each_of_pci_range(&parser, &range) {
- switch (range.flags & IORESOURCE_TYPE_BITS) {
- case IORESOURCE_IO:
- pcie->io_map_base =
- (unsigned long)ioremap(range.cpu_addr,
- range.size);
- pcie->io.flags = range.flags;
- pcie->io.start = range.cpu_addr;
- pcie->io.end = range.cpu_addr + range.size - 1;
- set_io_port_base(pcie->io_map_base);
- break;
- }
- }
-
- entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
- if (!entry) {
- dev_err(dev, "Cannot get memory resource");
- return -EINVAL;
- }
-
- pcie->mem = entry->res;
- pci_add_resource(&res, &pcie->io);
- pci_add_resource(&res, entry->res);
- list_splice_init(&res, &host->windows);
return 0;
}
static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
+ struct device_node *node,
int slot)
{
struct mt7621_pcie_port *port;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
char name[10];
+ int err;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
@@ -351,38 +256,45 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
if (IS_ERR(port->base))
return PTR_ERR(port->base);
- snprintf(name, sizeof(name), "pcie%d", slot);
- port->pcie_rst = devm_reset_control_get_exclusive(dev, name);
+ port->clk = devm_get_clk_from_child(dev, node, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(dev, "failed to get pcie%d clock\n", slot);
+ return PTR_ERR(port->clk);
+ }
+
+ port->pcie_rst = of_reset_control_get_exclusive(node, NULL);
if (PTR_ERR(port->pcie_rst) == -EPROBE_DEFER) {
dev_err(dev, "failed to get pcie%d reset control\n", slot);
return PTR_ERR(port->pcie_rst);
}
snprintf(name, sizeof(name), "pcie-phy%d", slot);
- port->phy = devm_phy_get(dev, name);
- if (IS_ERR(port->phy) && slot != 1)
- return PTR_ERR(port->phy);
+ port->phy = devm_of_phy_get(dev, node, name);
+ if (IS_ERR(port->phy)) {
+ dev_err(dev, "failed to get pcie-phy%d\n", slot);
+ err = PTR_ERR(port->phy);
+ goto remove_reset;
+ }
port->gpio_rst = devm_gpiod_get_index_optional(dev, "reset", slot,
GPIOD_OUT_LOW);
if (IS_ERR(port->gpio_rst)) {
dev_err(dev, "Failed to get GPIO for PCIe%d\n", slot);
- return PTR_ERR(port->gpio_rst);
+ err = PTR_ERR(port->gpio_rst);
+ goto remove_reset;
}
port->slot = slot;
port->pcie = pcie;
- port->irq = platform_get_irq(pdev, slot);
- if (port->irq < 0) {
- dev_err(dev, "Failed to get IRQ for PCIe%d\n", slot);
- return -ENXIO;
- }
-
INIT_LIST_HEAD(&port->list);
list_add_tail(&port->list, &pcie->ports);
return 0;
+
+remove_reset:
+ reset_control_put(port->pcie_rst);
+ return err;
}
static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
@@ -408,7 +320,7 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
slot = PCI_SLOT(err);
- err = mt7621_pcie_parse_port(pcie, slot);
+ err = mt7621_pcie_parse_port(pcie, child, slot);
if (err) {
of_node_put(child);
return err;
@@ -455,7 +367,7 @@ static void mt7621_pcie_reset_assert(struct mt7621_pcie *pcie)
mt7621_rst_gpio_pcie_assert(port);
}
- mdelay(PERST_DELAY_MS);
+ msleep(PERST_DELAY_MS);
}
static void mt7621_pcie_reset_rc_deassert(struct mt7621_pcie *pcie)
@@ -473,7 +385,7 @@ static void mt7621_pcie_reset_ep_deassert(struct mt7621_pcie *pcie)
list_for_each_entry(port, &pcie->ports, list)
mt7621_rst_gpio_pcie_deassert(port);
- mdelay(PERST_DELAY_MS);
+ msleep(PERST_DELAY_MS);
}
static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
@@ -482,8 +394,6 @@ static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
struct mt7621_pcie_port *port, *tmp;
int err;
- rt_sysc_m32(PERST_MODE_MASK, PERST_MODE_GPIO, MT7621_GPIO_MODE);
-
mt7621_pcie_reset_assert(pcie);
mt7621_pcie_reset_rc_deassert(pcie);
@@ -512,7 +422,6 @@ static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n",
slot);
mt7621_control_assert(port);
- mt7621_pcie_port_clk_disable(port);
port->enabled = false;
if (slot == 0) {
@@ -530,7 +439,6 @@ static void mt7621_pcie_enable_port(struct mt7621_pcie_port *port)
{
struct mt7621_pcie *pcie = port->pcie;
u32 slot = port->slot;
- u32 offset = MT7621_PCIE_OFFSET + (slot * MT7621_NEXT_PORT);
u32 val;
/* enable pcie interrupt */
@@ -539,95 +447,52 @@ static void mt7621_pcie_enable_port(struct mt7621_pcie_port *port)
pcie_write(pcie, val, RALINK_PCI_PCIMSK_ADDR);
/* map 2G DDR region */
- pcie_write(pcie, PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
- offset + RALINK_PCI_BAR0SETUP_ADDR);
+ pcie_port_write(port, PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
+ PCI_BASE_ADDRESS_0);
/* configure class code and revision ID */
- pcie_write(pcie, PCIE_CLASS_CODE | PCIE_REVISION_ID,
- offset + RALINK_PCI_CLASS);
+ pcie_port_write(port, PCIE_CLASS_CODE | PCIE_REVISION_ID,
+ RALINK_PCI_CLASS);
+
+ /* configure RC FTS number to 250 when it leaves L0s */
+ val = read_config(pcie, slot, PCIE_FTS_NUM);
+ val &= ~PCIE_FTS_NUM_MASK;
+ val |= PCIE_FTS_NUM_L0(0x50);
+ write_config(pcie, slot, PCIE_FTS_NUM, val);
}
-static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie)
+static int mt7621_pcie_enable_ports(struct pci_host_bridge *host)
{
+ struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
struct device *dev = pcie->dev;
struct mt7621_pcie_port *port;
- u8 num_slots_enabled = 0;
- u32 slot;
- u32 val;
+ struct resource_entry *entry;
+ int err;
+
+ entry = resource_list_first_type(&host->windows, IORESOURCE_IO);
+ if (!entry) {
+ dev_err(dev, "Cannot get io resource\n");
+ return -EINVAL;
+ }
/* Setup MEMWIN and IOWIN */
pcie_write(pcie, 0xffffffff, RALINK_PCI_MEMBASE);
- pcie_write(pcie, pcie->io.start, RALINK_PCI_IOBASE);
+ pcie_write(pcie, entry->res->start, RALINK_PCI_IOBASE);
list_for_each_entry(port, &pcie->ports, list) {
if (port->enabled) {
- mt7621_pcie_port_clk_enable(port);
+ err = clk_prepare_enable(port->clk);
+ if (err) {
+ dev_err(dev, "enabling clk pcie%d\n",
+ port->slot);
+ return err;
+ }
+
mt7621_pcie_enable_port(port);
dev_info(dev, "PCIE%d enabled\n", port->slot);
- num_slots_enabled++;
}
}
- for (slot = 0; slot < num_slots_enabled; slot++) {
- val = read_config(pcie, slot, PCI_COMMAND);
- val |= PCI_COMMAND_MASTER;
- write_config(pcie, slot, PCI_COMMAND, val);
- /* configure RC FTS number to 250 when it leaves L0s */
- val = read_config(pcie, slot, PCIE_FTS_NUM);
- val &= ~PCIE_FTS_NUM_MASK;
- val |= PCIE_FTS_NUM_L0(0x50);
- write_config(pcie, slot, PCIE_FTS_NUM, val);
- }
-}
-
-static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
-{
- u32 pcie_link_status = 0;
- u32 n = 0;
- int i = 0;
- u32 p2p_br_devnum[PCIE_P2P_CNT];
- int irqs[PCIE_P2P_CNT];
- struct mt7621_pcie_port *port;
-
- list_for_each_entry(port, &pcie->ports, list) {
- u32 slot = port->slot;
-
- irqs[i++] = port->irq;
- if (port->enabled)
- pcie_link_status |= BIT(slot);
- }
-
- if (pcie_link_status == 0)
- return -1;
-
- /*
- * Assign device numbers from zero to the enabled ports,
- * then assigning remaining device numbers to any disabled
- * ports.
- */
- for (i = 0; i < PCIE_P2P_CNT; i++)
- if (pcie_link_status & BIT(i))
- p2p_br_devnum[i] = n++;
-
- for (i = 0; i < PCIE_P2P_CNT; i++)
- if ((pcie_link_status & BIT(i)) == 0)
- p2p_br_devnum[i] = n++;
-
- pcie_rmw(pcie, RALINK_PCI_PCICFG_ADDR,
- PCIE_P2P_BR_DEVNUM_MASK_FULL,
- (p2p_br_devnum[0] << PCIE_P2P_BR_DEVNUM0_SHIFT) |
- (p2p_br_devnum[1] << PCIE_P2P_BR_DEVNUM1_SHIFT) |
- (p2p_br_devnum[2] << PCIE_P2P_BR_DEVNUM2_SHIFT));
-
- /* Assign IRQs */
- n = 0;
- for (i = 0; i < PCIE_P2P_CNT; i++)
- if (pcie_link_status & BIT(i))
- pcie->irq_map[n++] = irqs[i];
-
- for (i = n; i < PCIE_P2P_CNT; i++)
- pcie->irq_map[i] = -1;
-
return 0;
}
@@ -636,9 +501,7 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host)
struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
host->ops = &mt7621_pci_ops;
- host->map_irq = mt7621_map_irq;
host->sysdata = pcie;
-
return pci_host_probe(host);
}
@@ -650,6 +513,7 @@ static int mt7621_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct soc_device_attribute *attr;
+ struct mt7621_pcie_port *port;
struct mt7621_pcie *pcie;
struct pci_host_bridge *bridge;
int err;
@@ -676,33 +540,36 @@ static int mt7621_pci_probe(struct platform_device *pdev)
return err;
}
- err = mt7621_pci_parse_request_of_pci_ranges(bridge);
+ mt7621_pcie_init_ports(pcie);
+
+ err = mt7621_pcie_enable_ports(bridge);
if (err) {
- dev_err(dev, "Error requesting pci resources from ranges");
- return err;
+ dev_err(dev, "Error enabling pcie ports\n");
+ goto remove_resets;
}
- /* set resources limits */
- ioport_resource.start = pcie->io.start;
- ioport_resource.end = pcie->io.end;
-
- mt7621_pcie_init_ports(pcie);
-
- err = mt7621_pcie_init_virtual_bridges(pcie);
+ err = setup_cm_memory_region(bridge);
if (err) {
- dev_err(dev, "Nothing is connected in virtual bridges. Exiting...");
- return 0;
+ dev_err(dev, "Error setting up iocu mem regions\n");
+ goto remove_resets;
}
- mt7621_pcie_enable_ports(pcie);
+ return mt7621_pcie_register_host(bridge);
- setup_cm_memory_region(pcie);
+remove_resets:
+ list_for_each_entry(port, &pcie->ports, list)
+ reset_control_put(port->pcie_rst);
- err = mt7621_pcie_register_host(bridge);
- if (err) {
- dev_err(dev, "Error registering host\n");
- return err;
- }
+ return err;
+}
+
+static int mt7621_pci_remove(struct platform_device *pdev)
+{
+ struct mt7621_pcie *pcie = platform_get_drvdata(pdev);
+ struct mt7621_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ reset_control_put(port->pcie_rst);
return 0;
}
@@ -715,10 +582,10 @@ MODULE_DEVICE_TABLE(of, mt7621_pci_ids);
static struct platform_driver mt7621_pci_driver = {
.probe = mt7621_pci_probe,
+ .remove = mt7621_pci_remove,
.driver = {
.name = "mt7621-pci",
.of_match_table = of_match_ptr(mt7621_pci_ids),
},
};
-
builtin_platform_driver(mt7621_pci_driver);
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index a80996b2f5ce..990d15c31a13 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -99,6 +99,7 @@ static const struct mfd_cell nvec_devices[] = {
* nvec_register_notifier - Register a notifier with nvec
* @nvec: A &struct nvec_chip
* @nb: The notifier block to register
+ * @events: Unused
*
* Registers a notifier with @nvec. The notifier will be added to an atomic
* notifier chain that is called for all received messages except those that
@@ -125,7 +126,7 @@ int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
-/**
+/*
* nvec_status_notifier - The final notifier
*
* Prints a message about control events not handled in the notifier
@@ -343,8 +344,8 @@ static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
/**
* nvec_event_mask - fill the command string with event bitfield
- * ev: points to event command string
- * mask: bit to insert into the event mask
+ * @ev: points to event command string
+ * @mask: bit to insert into the event mask
*
* Configure event command expects a 32 bit bitfield which describes
* which events to enable. The bitfield has the following structure
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 9c71ad5af7b9..1ad94c5060b5 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -504,7 +504,6 @@ skip_xmit:
* cvm_oct_xmit_pow - transmit a packet to the POW
* @skb: Packet to send
* @dev: Device info structure
-
* Returns Always returns zero
*/
int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 6d8e9a481786..7284cb4ac395 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -22,6 +22,7 @@
#include <linux/device.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/olpc-ec.h>
#include <asm/tsc.h>
diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c
index b70570b7b467..87d60115ac67 100644
--- a/drivers/staging/qlge/qlge_ethtool.c
+++ b/drivers/staging/qlge/qlge_ethtool.c
@@ -553,7 +553,7 @@ static int qlge_run_loopback_test(struct qlge_adapter *qdev)
atomic_inc(&qdev->lb_count);
}
/* Give queue time to settle before testing results. */
- msleep(2);
+ usleep_range(2000, 2100);
qlge_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
return atomic_read(&qdev->lb_count) ? -EIO : 0;
}
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index c9dc6a852af4..19a02e958865 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -1389,7 +1389,7 @@ static void qlge_categorize_rx_err(struct qlge_adapter *qdev, u8 rx_err,
}
}
-/**
+/*
* qlge_update_mac_hdr_len - helper routine to update the mac header length
* based on vlan tags if present
*/
@@ -2235,7 +2235,7 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
}
}
-/**
+/*
* qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
* based on the features to enable/disable hardware vlan accel
*/
@@ -2796,12 +2796,8 @@ static int qlge_init_bq(struct qlge_bq *bq)
bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
&bq->base_dma, GFP_ATOMIC);
- if (!bq->base) {
- netif_err(qdev, ifup, qdev->ndev,
- "ring %u %s allocation failed.\n", rx_ring->cq_id,
- bq_type_name[bq->type]);
+ if (!bq->base)
return -ENOMEM;
- }
bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
GFP_KERNEL);
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
index 7da911c2ab89..28b936e8be0a 100644
--- a/drivers/staging/rtl8188eu/Makefile
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -2,7 +2,6 @@
r8188eu-y := \
core/rtw_ap.o \
core/rtw_cmd.o \
- core/rtw_debug.o \
core/rtw_efuse.o \
core/rtw_ieee80211.o \
core/rtw_ioctl_set.o \
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 008b60e72758..b817aa8b9de4 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -164,9 +164,9 @@ static u8 chk_sta_is_alive(struct sta_info *psta)
void expire_timeout_chk(struct adapter *padapter)
{
- struct list_head *phead, *plist;
+ struct list_head *phead;
u8 updated = 0;
- struct sta_info *psta = NULL;
+ struct sta_info *psta, *temp;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 chk_alive_num = 0;
char chk_alive_list[NUM_STA];
@@ -175,22 +175,14 @@ void expire_timeout_chk(struct adapter *padapter)
spin_lock_bh(&pstapriv->auth_list_lock);
phead = &pstapriv->auth_list;
- plist = phead->next;
-
/* check auth_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, auth_list);
- plist = plist->next;
-
+ list_for_each_entry_safe(psta, temp, phead, auth_list) {
if (psta->expire_to > 0) {
psta->expire_to--;
if (psta->expire_to == 0) {
list_del_init(&psta->auth_list);
pstapriv->auth_list_cnt--;
- DBG_88E("auth expire %6ph\n",
- psta->hwaddr);
-
spin_unlock_bh(&pstapriv->auth_list_lock);
spin_lock_bh(&pstapriv->sta_hash_lock);
@@ -208,13 +200,8 @@ void expire_timeout_chk(struct adapter *padapter)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = phead->next;
-
/* check asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
- plist = plist->next;
-
+ list_for_each_entry_safe(psta, temp, phead, asoc_list) {
if (chk_sta_is_alive(psta) || !psta->expire_to) {
psta->expire_to = pstapriv->expire_to;
psta->keep_alive_trycnt = 0;
@@ -264,20 +251,13 @@ void expire_timeout_chk(struct adapter *padapter)
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
- DBG_88E("asoc expire %pM, state = 0x%x\n",
- (psta->hwaddr), psta->state);
updated = ap_free_sta(padapter, psta, true,
WLAN_REASON_DEAUTH_LEAVING);
} else {
/* TODO: Aging mechanism to digest frames in sleep_q to avoid running out of xmitframe */
if (psta->sleepq_len > (NR_XMITFRAME / pstapriv->asoc_list_cnt) &&
- padapter->xmitpriv.free_xmitframe_cnt < (NR_XMITFRAME / pstapriv->asoc_list_cnt / 2)) {
- DBG_88E("%s sta:%pM, sleepq_len:%u, free_xmitframe_cnt:%u, asoc_list_cnt:%u, clear sleep_q\n", __func__,
- (psta->hwaddr), psta->sleepq_len,
- padapter->xmitpriv.free_xmitframe_cnt,
- pstapriv->asoc_list_cnt);
+ padapter->xmitpriv.free_xmitframe_cnt < (NR_XMITFRAME / pstapriv->asoc_list_cnt / 2))
wakeup_sta_to_xmit(padapter, psta);
- }
}
}
@@ -309,21 +289,16 @@ void expire_timeout_chk(struct adapter *padapter)
psta->keep_alive_trycnt++;
if (ret == _SUCCESS) {
- DBG_88E("asoc check, sta(%pM) is alive\n",
- (psta->hwaddr));
psta->expire_to = pstapriv->expire_to;
psta->keep_alive_trycnt = 0;
continue;
} else if (psta->keep_alive_trycnt <= 3) {
- DBG_88E("ack check for asoc expire, keep_alive_trycnt =%d\n", psta->keep_alive_trycnt);
psta->expire_to = 1;
continue;
}
psta->keep_alive_trycnt = 0;
- DBG_88E("asoc expire %pM, state = 0x%x\n",
- psta->hwaddr, psta->state);
spin_lock_bh(&pstapriv->asoc_list_lock);
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
@@ -394,9 +369,6 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
tx_ra_bitmap |= ((raid << 28) & 0xf0000000);
- DBG_88E("%s => mac_id:%d , raid:%d , bitmap = 0x%x, arg = 0x%x\n",
- __func__, psta->mac_id, raid, tx_ra_bitmap, arg);
-
/* bitmap[0:27] = tx_rate_bitmap */
/* bitmap[28:31]= Rate Adaptive id */
/* arg[0:4] = macid */
@@ -410,8 +382,6 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
psta->raid = raid;
psta->init_rate = init_rate;
- } else {
- DBG_88E("station aid %d exceed the max number\n", psta->aid);
}
}
@@ -467,8 +437,6 @@ static void update_bmc_sta(struct adapter *padapter)
arg = psta->mac_id & 0x1f;
arg |= BIT(7);
tx_ra_bitmap |= ((raid << 28) & 0xf0000000);
- DBG_88E("%s, mask = 0x%x, arg = 0x%x\n", __func__,
- tx_ra_bitmap, arg);
/* bitmap[0:27] = tx_rate_bitmap */
/* bitmap[28:31]= Rate Adaptive id */
@@ -486,8 +454,6 @@ static void update_bmc_sta(struct adapter *padapter)
psta->state = _FW_LINKED;
spin_unlock_bh(&psta->lock);
- } else {
- DBG_88E("add_RATid_bmc_sta error!\n");
}
}
@@ -582,8 +548,6 @@ static void update_hw_ht_param(struct adapter *padapter)
/* Config SM Power Save setting */
pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & 0x0C) >> 2;
- if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
- DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
}
static void start_bss_network(struct adapter *padapter, u8 *pbuf)
@@ -695,9 +659,6 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
*/
set_channel_bwmode(padapter, cur_channel, cur_ch_offset, cur_bwmode);
- DBG_88E("CH =%d, BW =%d, offset =%d\n", cur_channel, cur_bwmode,
- cur_ch_offset);
-
/* */
pmlmeext->cur_channel = cur_channel;
pmlmeext->cur_bwmode = cur_bwmode;
@@ -717,8 +678,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
update_beacon(padapter, WLAN_EID_TIM, NULL, false);
/* issue beacon frame */
- if (send_beacon(padapter) == _FAIL)
- DBG_88E("send_beacon, fail!\n");
+ send_beacon(padapter);
}
/* update bc/mc sta_info */
@@ -756,8 +716,6 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
/* ht_capab, ht_oper */
/* WPS IE */
- DBG_88E("%s, len =%d\n", __func__, len);
-
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return _FAIL;
@@ -1009,8 +967,6 @@ void rtw_set_macaddr_acl(struct adapter *padapter, int mode)
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
- DBG_88E("%s, mode =%d\n", __func__, mode);
-
pacl_list->mode = mode;
}
@@ -1024,24 +980,18 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
- DBG_88E("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, (addr));
-
if ((NUM_ACL - 1) < pacl_list->num)
return -1;
spin_lock_bh(&pacl_node_q->lock);
phead = get_list_head(pacl_node_q);
- plist = phead->next;
-
- while (phead != plist) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
- plist = plist->next;
+ list_for_each(plist, phead) {
+ paclnode = list_entry(plist, struct rtw_wlan_acl_node, list);
if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
if (paclnode->valid) {
added = true;
- DBG_88E("%s, sta has been added\n", __func__);
break;
}
}
@@ -1072,8 +1022,6 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
}
}
- DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
-
spin_unlock_bh(&pacl_node_q->lock);
return ret;
@@ -1081,23 +1029,16 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
{
- struct list_head *plist, *phead;
- struct rtw_wlan_acl_node *paclnode;
+ struct list_head *phead;
+ struct rtw_wlan_acl_node *paclnode, *temp;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
- DBG_88E("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, (addr));
-
spin_lock_bh(&pacl_node_q->lock);
phead = get_list_head(pacl_node_q);
- plist = phead->next;
-
- while (phead != plist) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
- plist = plist->next;
-
+ list_for_each_entry_safe(paclnode, temp, phead, list) {
if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
if (paclnode->valid) {
paclnode->valid = false;
@@ -1111,7 +1052,6 @@ int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
spin_unlock_bh(&pacl_node_q->lock);
- DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
return 0;
}
@@ -1124,8 +1064,6 @@ static void update_bcn_erpinfo_ie(struct adapter *padapter)
unsigned char *p, *ie = pnetwork->ies;
u32 len = 0;
- DBG_88E("%s, ERP_enable =%d\n", __func__, pmlmeinfo->ERP_enable);
-
if (!pmlmeinfo->ERP_enable)
return;
@@ -1205,8 +1143,6 @@ static void update_bcn_vendor_spec_ie(struct adapter *padapter, u8 *oui)
if (!memcmp(WPS_OUI, oui, 4))
update_bcn_wps_ie(padapter);
- else
- DBG_88E("unknown OUI type!\n");
}
void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
@@ -1269,9 +1205,6 @@ static int rtw_ht_operation_update(struct adapter *padapter)
if (pmlmepriv->htpriv.ht_option)
return 0;
- DBG_88E("%s current operation mode = 0x%X\n",
- __func__, pmlmepriv->ht_op_mode);
-
if (!(pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT) &&
pmlmepriv->num_sta_ht_no_gf) {
pmlmepriv->ht_op_mode |=
@@ -1321,9 +1254,6 @@ static int rtw_ht_operation_update(struct adapter *padapter)
op_mode_changes++;
}
- DBG_88E("%s new operation mode = 0x%X changes =%d\n",
- __func__, pmlmepriv->ht_op_mode, op_mode_changes);
-
return op_mode_changes;
}
@@ -1338,13 +1268,9 @@ void associated_clients_update(struct adapter *padapter, u8 updated)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = phead->next;
-
/* check asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = plist->next;
+ list_for_each(plist, phead) {
+ psta = list_entry(plist, struct sta_info, asoc_list);
VCS_update(padapter, psta);
}
@@ -1438,9 +1364,6 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
if (psta->flags & WLAN_STA_HT) {
u16 ht_capab = le16_to_cpu(psta->htpriv.ht_cap.cap_info);
- DBG_88E("HT: STA %pM HT Capabilities Info: 0x%04x\n",
- (psta->hwaddr), ht_capab);
-
if (psta->no_ht_set) {
psta->no_ht_set = 0;
pmlmepriv->num_sta_no_ht--;
@@ -1451,9 +1374,6 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
psta->no_ht_gf_set = 1;
pmlmepriv->num_sta_ht_no_gf++;
}
- DBG_88E("%s STA %pM - no greenfield, num of non-gf stations %d\n",
- __func__, (psta->hwaddr),
- pmlmepriv->num_sta_ht_no_gf);
}
if ((ht_capab & IEEE80211_HT_CAP_SUP_WIDTH_20_40) == 0) {
@@ -1461,20 +1381,12 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
psta->ht_20mhz_set = 1;
pmlmepriv->num_sta_ht_20mhz++;
}
- DBG_88E("%s STA %pM - 20 MHz HT, num of 20MHz HT STAs %d\n",
- __func__, (psta->hwaddr),
- pmlmepriv->num_sta_ht_20mhz);
}
} else {
if (!psta->no_ht_set) {
psta->no_ht_set = 1;
pmlmepriv->num_sta_no_ht++;
}
- if (pmlmepriv->htpriv.ht_option) {
- DBG_88E("%s STA %pM - no HT, num of non-HT stations %d\n",
- __func__, (psta->hwaddr),
- pmlmepriv->num_sta_no_ht);
- }
}
if (rtw_ht_operation_update(padapter) > 0) {
@@ -1484,8 +1396,6 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
/* update associated stations cap. */
associated_clients_update(padapter, beacon_updated);
-
- DBG_88E("%s, updated =%d\n", __func__, beacon_updated);
}
u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
@@ -1548,8 +1458,6 @@ u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
/* update associated stations cap. */
- DBG_88E("%s, updated =%d\n", __func__, beacon_updated);
-
return beacon_updated;
}
@@ -1595,28 +1503,20 @@ u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
int rtw_sta_flush(struct adapter *padapter)
{
- struct list_head *phead, *plist;
- struct sta_info *psta = NULL;
+ struct list_head *phead;
+ struct sta_info *psta, *temp;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- DBG_88E(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(padapter->pnetdev));
-
if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
return 0;
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = phead->next;
-
/* free sta asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = plist->next;
-
+ list_for_each_entry_safe(psta, temp, phead, asoc_list) {
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
@@ -1716,8 +1616,7 @@ void start_ap_mode(struct adapter *padapter)
void stop_ap_mode(struct adapter *padapter)
{
- struct list_head *phead, *plist;
- struct rtw_wlan_acl_node *paclnode;
+ struct rtw_wlan_acl_node *paclnode, *n;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1737,12 +1636,7 @@ void stop_ap_mode(struct adapter *padapter)
/* for ACL */
spin_lock_bh(&pacl_node_q->lock);
- phead = get_list_head(pacl_node_q);
- plist = phead->next;
- while (phead != plist) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
- plist = plist->next;
-
+ list_for_each_entry_safe(paclnode, n, &pacl_node_q->queue, list) {
if (paclnode->valid) {
paclnode->valid = false;
@@ -1753,8 +1647,6 @@ void stop_ap_mode(struct adapter *padapter)
}
spin_unlock_bh(&pacl_node_q->lock);
- DBG_88E("%s, free acl_node_queue, num =%d\n", __func__, pacl_list->num);
-
rtw_sta_flush(padapter);
/* free_assoc_sta_resources */
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 6728391d39e3..eb89a52aa4e3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -37,7 +37,7 @@ static struct _cmd_callback rtw_cmd_callback[] = {
{_SiteSurvey_CMD_, &rtw_survey_cmd_callback},
{_SetAuth_CMD_, NULL},
{_SetKey_CMD_, NULL},
- {_SetStaKey_CMD_, &rtw_setstaKey_cmdrsp_callback},
+ {_SetStaKey_CMD_, NULL},
{_SetAssocSta_CMD_, &rtw_setassocsta_cmdrsp_callback},
{_AddBAReq_CMD_, NULL},
{_SetChannel_CMD_, NULL},
@@ -52,13 +52,11 @@ static struct _cmd_callback rtw_cmd_callback[] = {
* No irqsave is necessary.
*/
-int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+void rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
init_completion(&pcmdpriv->cmd_queue_comp);
- init_completion(&pcmdpriv->terminate_cmdthread_comp);
_rtw_init_queue(&pcmdpriv->cmd_queue);
- return _SUCCESS;
}
/*
@@ -74,17 +72,12 @@ static int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
{
unsigned long irqL;
- if (!obj)
- goto exit;
-
spin_lock_irqsave(&queue->lock, irqL);
list_add_tail(&obj->list, &queue->queue);
spin_unlock_irqrestore(&queue->lock, irqL);
-exit:
-
return _SUCCESS;
}
@@ -104,11 +97,12 @@ struct cmd_obj *rtw_dequeue_cmd(struct __queue *queue)
static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
{
+ struct adapter *padapter = container_of(pcmdpriv, struct adapter, cmdpriv);
u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */
/* To decide allow or not */
- if ((pcmdpriv->padapter->pwrctrlpriv.bHWPwrPindetect) &&
- (!pcmdpriv->padapter->registrypriv.usbss_enable)) {
+ if ((padapter->pwrctrlpriv.bHWPwrPindetect) &&
+ (!padapter->registrypriv.usbss_enable)) {
if (cmd_obj->cmdcode == _Set_Drv_Extra_CMD_) {
struct drvextra_cmd_parm *pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)cmd_obj->parmbuf;
@@ -120,8 +114,8 @@ static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
if (cmd_obj->cmdcode == _SetChannelPlan_CMD_)
bAllow = true;
- if ((!pcmdpriv->padapter->hw_init_completed && !bAllow) ||
- !pcmdpriv->cmdthd_running) /* com_thread not running */
+ if ((!padapter->hw_init_completed && !bAllow) ||
+ !padapter->cmdThread) /* com_thread not running */
return _FAIL;
return _SUCCESS;
}
@@ -129,7 +123,7 @@ static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
{
int res = _FAIL;
- struct adapter *padapter = pcmdpriv->padapter;
+ struct adapter *padapter = container_of(pcmdpriv, struct adapter, cmdpriv);
if (!cmd_obj)
goto exit;
@@ -181,20 +175,9 @@ int rtw_cmd_thread(void *context)
allow_signal(SIGTERM);
- pcmdpriv->cmdthd_running = true;
- complete(&pcmdpriv->terminate_cmdthread_comp);
-
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
- ("start r871x %s !!!!\n", __func__));
-
- while (1) {
- if (padapter->bDriverStopped ||
- padapter->bSurpriseRemoved) {
- DBG_88E("%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
- __func__, padapter->bDriverStopped,
- padapter->bSurpriseRemoved, __LINE__);
+ do {
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
break;
- }
pcmd = rtw_dequeue_cmd(&pcmdpriv->cmd_queue);
if (!pcmd) {
@@ -223,33 +206,23 @@ int rtw_cmd_thread(void *context)
/* call callback function for post-processed */
if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) {
pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
- ("mlme_cmd_hdl(): pcmd_callback = 0x%p, cmdcode = 0x%x\n",
- pcmd_callback, pcmd->cmdcode));
- } else {
+ if (pcmd_callback) {
/* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
pcmd_callback(pcmd->padapter, pcmd);
}
- } else {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- ("%s: cmdcode = 0x%x callback not defined!\n",
- __func__, pcmd->cmdcode));
}
rtw_free_cmd_obj(pcmd);
if (signal_pending(current))
flush_signals(current);
- }
- pcmdpriv->cmdthd_running = false;
+ } while (!kthread_should_stop());
/* free all cmd_obj resources */
while ((pcmd = rtw_dequeue_cmd(&pcmdpriv->cmd_queue)))
rtw_free_cmd_obj(pcmd);
- complete(&pcmdpriv->terminate_cmdthread_comp);
-
- complete_and_exit(NULL, 0);
+ padapter->cmdThread = NULL;
+ return 0;
}
/*
@@ -282,8 +255,6 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
rtw_free_network_queue(padapter, false);
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("%s: flush network queue\n", __func__));
-
init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, _SiteSurvey_CMD_);
psurveyPara->scan_mode = pmlmepriv->scan_mode;
@@ -342,19 +313,11 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
{
struct cmd_obj *pcmd;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pdev_network = &padapter->registrypriv.dev_network;
u8 res = _SUCCESS;
led_control_8188eu(padapter, LED_CTL_START_TO_LINK);
- if (pmlmepriv->assoc_ssid.ssid_length == 0)
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
- (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.ssid));
- else
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
- (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.ssid));
-
pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
if (!pcmd) {
res = _FAIL;
@@ -392,12 +355,6 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
led_control_8188eu(padapter, LED_CTL_START_TO_LINK);
- if (pmlmepriv->assoc_ssid.ssid_length == 0)
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n"));
- else
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_,
- ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.ssid));
-
pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
if (!pcmd) {
res = _FAIL;
@@ -428,9 +385,6 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
res = _FAIL;
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- ("%s :psecnetwork == NULL!!!\n", __func__));
-
goto exit;
}
@@ -502,8 +456,6 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
else
padapter->pwrctrlpriv.smart_ps = padapter->registrypriv.smart_ps;
- DBG_88E("%s: smart_ps =%d\n", __func__, padapter->pwrctrlpriv.smart_ps);
-
pcmd->cmdsz = get_wlan_bssid_ex_sz(psecnetwork);/* get cmdsz before endian conversion */
INIT_LIST_HEAD(&pcmd->list);
@@ -526,8 +478,6 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
struct cmd_priv *cmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+%s\n", __func__));
-
/* prepare cmd parameter */
param = kzalloc(sizeof(*param), GFP_ATOMIC);
if (!param) {
@@ -745,8 +695,6 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
u8 res = _SUCCESS;
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+%s\n", __func__));
-
/* check input parameter */
if (!rtw_is_channel_plan_valid(chplan)) {
res = _FAIL;
@@ -912,39 +860,28 @@ static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
{
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- if (enqueue) {
- ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = LPS_CTRL_WK_CID;
- pdrvextra_cmd_parm->type_size = lps_ctrl_type;
- pdrvextra_cmd_parm->pbuf = NULL;
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, _Set_Drv_Extra_CMD_);
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
- } else {
+ if (!enqueue) {
lps_ctrl_wk_hdl(padapter, lps_ctrl_type);
+ return _SUCCESS;
}
-exit:
+ ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
+ pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
+ if (!ph2c || !pdrvextra_cmd_parm) {
+ kfree(ph2c);
+ kfree(pdrvextra_cmd_parm);
+ return _FAIL;
+ }
- return res;
+ pdrvextra_cmd_parm->ec_id = LPS_CTRL_WK_CID;
+ pdrvextra_cmd_parm->type_size = lps_ctrl_type;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, _Set_Drv_Extra_CMD_);
+ return rtw_enqueue_cmd(pcmdpriv, ph2c);
}
static void rpt_timer_setting_wk_hdl(struct adapter *padapter, u16 min_time)
@@ -1172,8 +1109,6 @@ void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
} else if (pcmd->res != H2C_SUCCESS) {
mod_timer(&pmlmepriv->scan_to_timer,
jiffies + msecs_to_jiffies(1));
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- ("\n ********Error: MgntActrtw_set_802_11_bssid_LIST_SCAN Fail ************\n\n."));
}
}
@@ -1185,9 +1120,6 @@ void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
spin_lock_bh(&pmlmepriv->lock);
set_fwstate(pmlmepriv, _FW_LINKED);
spin_unlock_bh(&pmlmepriv->lock);
-
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- ("\n ***Error: disconnect_cmd_callback Fail ***\n."));
}
}
@@ -1201,8 +1133,6 @@ void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
mod_timer(&pmlmepriv->assoc_timer,
jiffies + msecs_to_jiffies(1));
} else if (pcmd->res != H2C_SUCCESS) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- ("********Error:rtw_select_and_join_from_scanned_queue Wait Sema Fail ************\n"));
mod_timer(&pmlmepriv->assoc_timer,
jiffies + msecs_to_jiffies(1));
}
@@ -1217,8 +1147,6 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
struct wlan_network *tgt_network = &pmlmepriv->cur_network;
if (pcmd->res != H2C_SUCCESS) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- ("\n **** Error: %s Fail ****\n\n.", __func__));
mod_timer(&pmlmepriv->assoc_timer,
jiffies + msecs_to_jiffies(1));
}
@@ -1232,8 +1160,6 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
if (!psta) {
psta = rtw_alloc_stainfo(&padapter->stapriv, pnetwork->MacAddress);
if (!psta) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- ("\nCan't alloc sta_info when createbss_cmd_callback\n"));
goto createbss_cmd_fail;
}
}
@@ -1245,8 +1171,6 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
if (!pwlan) {
pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
if (!pwlan) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- ("\n Error: can't get pwlan in rtw_joinbss_event_callback\n"));
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto createbss_cmd_fail;
}
@@ -1274,18 +1198,6 @@ createbss_cmd_fail:
spin_unlock_bh(&pmlmepriv->lock);
}
-void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)(pcmd->rsp);
- struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr);
-
- if (!psta) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- ("\nERROR: %s => can't get sta_info\n\n", __func__));
- }
-}
-
void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1294,11 +1206,8 @@ void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *
struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *)(pcmd->rsp);
struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr);
- if (!psta) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- ("\nERROR: %s => can't get sta_info\n\n", __func__));
+ if (!psta)
return;
- }
psta->aid = passocsta_rsp->cam_id;
psta->mac_id = passocsta_rsp->cam_id;
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
deleted file mode 100644
index 1060837fe463..000000000000
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ /dev/null
@@ -1,187 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#define _RTW_DEBUG_C_
-
-#include <rtw_debug.h>
-#include <usb_ops_linux.h>
-
-int proc_get_drv_version(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- int len = 0;
-
- len += scnprintf(page + len, count - len, "%s\n", DRIVERVERSION);
-
- *eof = 1;
- return len;
-}
-
-int proc_get_write_reg(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- *eof = 1;
- return 0;
-}
-
-int proc_set_write_reg(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = netdev_priv(dev);
- char tmp[32];
- u32 addr, val, len;
-
- if (count < 3) {
- DBG_88E("argument size is less than 3\n");
- return -EFAULT;
- }
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- int num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
-
- if (num != 3) {
- DBG_88E("invalid write_reg parameter!\n");
- return count;
- }
- switch (len) {
- case 1:
- usb_write8(padapter, addr, (u8)val);
- break;
- case 2:
- usb_write16(padapter, addr, (u16)val);
- break;
- case 4:
- usb_write32(padapter, addr, val);
- break;
- default:
- DBG_88E("error write length =%d", len);
- break;
- }
- }
- return count;
-}
-
-static u32 proc_get_read_addr = 0xeeeeeeee;
-static u32 proc_get_read_len = 0x4;
-
-int proc_get_read_reg(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = netdev_priv(dev);
-
- int len = 0;
-
- if (proc_get_read_addr == 0xeeeeeeee) {
- *eof = 1;
- return len;
- }
-
- switch (proc_get_read_len) {
- case 1:
- len += scnprintf(page + len, count - len, "usb_read8(0x%x)=0x%x\n",
- proc_get_read_addr, usb_read8(padapter, proc_get_read_addr));
- break;
- case 2:
- len += scnprintf(page + len, count - len, "usb_read16(0x%x)=0x%x\n",
- proc_get_read_addr, usb_read16(padapter, proc_get_read_addr));
- break;
- case 4:
- len += scnprintf(page + len, count - len, "usb_read32(0x%x)=0x%x\n",
- proc_get_read_addr, usb_read32(padapter, proc_get_read_addr));
- break;
- default:
- len += scnprintf(page + len, count - len, "error read length=%d\n",
- proc_get_read_len);
- break;
- }
-
- *eof = 1;
- return len;
-}
-
-int proc_set_read_reg(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- char tmp[16];
- u32 addr, len;
-
- if (count < 2) {
- DBG_88E("argument size is less than 2\n");
- return -EFAULT;
- }
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- int num = sscanf(tmp, "%x %x", &addr, &len);
-
- if (num != 2) {
- DBG_88E("invalid read_reg parameter!\n");
- return count;
- }
-
- proc_get_read_addr = addr;
-
- proc_get_read_len = len;
- }
-
- return count;
-}
-
-int proc_get_adapter_state(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = netdev_priv(dev);
- int len = 0;
-
- len += scnprintf(page + len, count - len, "bSurpriseRemoved=%d, bDriverStopped=%d\n",
- padapter->bSurpriseRemoved,
- padapter->bDriverStopped);
-
- *eof = 1;
- return len;
-}
-
-int proc_get_best_channel(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = netdev_priv(dev);
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- int len = 0;
- u32 i, best_channel_24G = 1, index_24G = 0;
-
- for (i = 0; pmlmeext->channel_set[i].ChannelNum != 0; i++) {
- if (pmlmeext->channel_set[i].ChannelNum == 1)
- index_24G = i;
- }
-
- for (i = 0; pmlmeext->channel_set[i].ChannelNum != 0; i++) {
- /* 2.4G */
- if (pmlmeext->channel_set[i].ChannelNum == 6) {
- if (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_24G].rx_count) {
- index_24G = i;
- best_channel_24G = pmlmeext->channel_set[i].ChannelNum;
- }
- }
-
- /* debug */
- len += scnprintf(page + len, count - len, "The rx cnt of channel %3d = %d\n",
- pmlmeext->channel_set[i].ChannelNum,
- pmlmeext->channel_set[i].rx_count);
- }
-
- len += scnprintf(page + len, count - len, "best_channel_24G = %d\n", best_channel_24G);
-
- *eof = 1;
- return len;
-}
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 9bb3ec0cd62f..80673a73c119 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -91,10 +91,8 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
tmp = kcalloc(EFUSE_MAX_SECTION_88E,
sizeof(void *) + EFUSE_MAX_WORD_UNIT * sizeof(u16),
GFP_KERNEL);
- if (!tmp) {
- DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
+ if (!tmp)
goto eFuseWord_failed;
- }
for (i = 0; i < EFUSE_MAX_SECTION_88E; i++)
tmp[i] = ((char *)(tmp + EFUSE_MAX_SECTION_88E)) + i * EFUSE_MAX_WORD_UNIT * sizeof(u16);
eFuseWord = (u16 **)tmp;
@@ -113,7 +111,6 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
efuse_utilized++;
eFuse_Addr++;
} else {
- DBG_88E("EFUSE is empty efuse_Addr-%d efuse_data =%x\n", eFuse_Addr, rtemp8);
goto exit;
}
@@ -220,8 +217,6 @@ static void efuse_read_phymap_from_txpktbuf(
if (bcnhead < 0) /* if not valid */
bcnhead = usb_read8(adapter, REG_TDECTRL + 1);
- DBG_88E("%s bcnhead:%d\n", __func__, bcnhead);
-
usb_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
dbg_addr = bcnhead * 128 / 8; /* 8-bytes addressing */
@@ -232,31 +227,20 @@ static void efuse_read_phymap_from_txpktbuf(
usb_write8(adapter, REG_TXPKTBUF_DBG, 0);
start = jiffies;
while (!(reg_0x143 = usb_read8(adapter, REG_TXPKTBUF_DBG)) &&
- jiffies_to_msecs(jiffies - start) < 1000) {
- DBG_88E("%s polling reg_0x143:0x%02x, reg_0x106:0x%02x\n", __func__, reg_0x143, usb_read8(adapter, 0x106));
+ jiffies_to_msecs(jiffies - start) < 1000)
usleep_range(1000, 2000);
- }
lo32 = usb_read32(adapter, REG_PKTBUF_DBG_DATA_L);
hi32 = usb_read32(adapter, REG_PKTBUF_DBG_DATA_H);
if (i == 0) {
- u8 lenc[2];
- u16 lenbak, aaabak;
- u16 aaa;
+ usb_read8(adapter, REG_PKTBUF_DBG_DATA_L);
+ usb_read8(adapter, REG_PKTBUF_DBG_DATA_L + 1);
- lenc[0] = usb_read8(adapter, REG_PKTBUF_DBG_DATA_L);
- lenc[1] = usb_read8(adapter, REG_PKTBUF_DBG_DATA_L + 1);
-
- aaabak = le16_to_cpup((__le16 *)lenc);
- lenbak = le16_to_cpu(*((__le16 *)lenc));
- aaa = le16_to_cpup((__le16 *)&lo32);
len = le16_to_cpu(*((__le16 *)&lo32));
limit = min_t(u16, len - 2, limit);
- DBG_88E("%s len:%u, lenbak:%u, aaa:%u, aaabak:%u\n", __func__, len, lenbak, aaa, aaabak);
-
memcpy(pos, ((u8 *)&lo32) + 2, (limit >= count + 2) ? 2 : limit - count);
count += (limit >= count + 2) ? 2 : limit - count;
pos = content + count;
@@ -278,7 +262,6 @@ static void efuse_read_phymap_from_txpktbuf(
i++;
}
usb_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, DISABLE_TRXPKT_BUF_ACCESS);
- DBG_88E("%s read count:%u\n", __func__, count);
*size = count;
}
@@ -298,7 +281,7 @@ static s32 iol_read_efuse(struct adapter *padapter, u8 txpktbuf_bndy, u16 offset
return status;
}
-void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf)
+static void efuse_ReadEFuse(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf)
{
if (rtw_iol_applied(Adapter)) {
rtw_hal_power_on(Adapter);
@@ -432,7 +415,6 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
hoffset = ((tmp_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
hworden = efuse_data & 0x0F;
} else {
- DBG_88E("Error, All words disabled\n");
efuse_addr++;
continue;
}
@@ -479,7 +461,7 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
return true;
}
-static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, struct pgpkt *pFixPkt, u16 *pAddr)
+static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, struct pgpkt *pFixPkt, u16 *pAddr)
{
u8 originaldata[8], badworden = 0;
u16 efuse_addr = *pAddr;
@@ -508,7 +490,7 @@ static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, st
return true;
}
-static bool hal_EfusePgPacketWrite2ByteHeader(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt)
+static bool hal_EfusePgPacketWrite2ByteHeader(struct adapter *pAdapter, u16 *pAddr, struct pgpkt *pTargetPkt)
{
bool ret = false;
u16 efuse_addr = *pAddr;
@@ -559,7 +541,7 @@ static bool hal_EfusePgPacketWrite2ByteHeader(struct adapter *pAdapter, u8 efuse
fixPkt.offset = ((pg_header_temp & 0xE0) >> 5) | ((tmp_header & 0xF0) >> 1);
fixPkt.word_en = tmp_header & 0x0F;
fixPkt.word_cnts = Efuse_CalculateWordCnts(fixPkt.word_en);
- if (!hal_EfuseFixHeaderProcess(pAdapter, efuseType, &fixPkt, &efuse_addr))
+ if (!hal_EfuseFixHeaderProcess(pAdapter, &fixPkt, &efuse_addr))
return false;
} else {
ret = true;
@@ -575,7 +557,7 @@ static bool hal_EfusePgPacketWrite2ByteHeader(struct adapter *pAdapter, u8 efuse
return ret;
}
-static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt)
+static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u16 *pAddr, struct pgpkt *pTargetPkt)
{
bool ret = false;
u8 pg_header = 0, tmp_header = 0;
@@ -602,7 +584,7 @@ static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuse
fixPkt.offset = (tmp_header >> 4) & 0x0F;
fixPkt.word_en = tmp_header & 0x0F;
fixPkt.word_cnts = Efuse_CalculateWordCnts(fixPkt.word_en);
- if (!hal_EfuseFixHeaderProcess(pAdapter, efuseType, &fixPkt, &efuse_addr))
+ if (!hal_EfuseFixHeaderProcess(pAdapter, &fixPkt, &efuse_addr))
return false;
}
@@ -610,7 +592,7 @@ static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuse
return ret;
}
-static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt)
+static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u16 *pAddr, struct pgpkt *pTargetPkt)
{
u16 efuse_addr = *pAddr;
u8 badworden;
@@ -632,16 +614,15 @@ static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u
static bool
hal_EfusePgPacketWriteHeader(
struct adapter *pAdapter,
- u8 efuseType,
u16 *pAddr,
struct pgpkt *pTargetPkt)
{
bool ret = false;
if (pTargetPkt->offset >= EFUSE_MAX_SECTION_BASE)
- ret = hal_EfusePgPacketWrite2ByteHeader(pAdapter, efuseType, pAddr, pTargetPkt);
+ ret = hal_EfusePgPacketWrite2ByteHeader(pAdapter, pAddr, pTargetPkt);
else
- ret = hal_EfusePgPacketWrite1ByteHeader(pAdapter, efuseType, pAddr, pTargetPkt);
+ ret = hal_EfusePgPacketWrite1ByteHeader(pAdapter, pAddr, pTargetPkt);
return ret;
}
@@ -685,7 +666,7 @@ static bool hal_EfuseCheckIfDatafollowed(struct adapter *pAdapter, u8 word_cnts,
return ret;
}
-static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt)
+static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u16 *pAddr, struct pgpkt *pTargetPkt)
{
bool ret = false;
u8 i, efuse_data = 0, cur_header = 0;
@@ -772,20 +753,19 @@ bool Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *pD
{
struct pgpkt targetPkt;
u16 startAddr = 0;
- u8 efuseType = EFUSE_WIFI;
if (Efuse_GetCurrentSize(pAdapter) >= EFUSE_MAP_LEN_88E)
return false;
hal_EfuseConstructPGPkt(offset, word_en, pData, &targetPkt);
- if (!hal_EfusePartialWriteCheck(pAdapter, efuseType, &startAddr, &targetPkt))
+ if (!hal_EfusePartialWriteCheck(pAdapter, &startAddr, &targetPkt))
return false;
- if (!hal_EfusePgPacketWriteHeader(pAdapter, efuseType, &startAddr, &targetPkt))
+ if (!hal_EfusePgPacketWriteHeader(pAdapter, &startAddr, &targetPkt))
return false;
- if (!hal_EfusePgPacketWriteData(pAdapter, efuseType, &startAddr, &targetPkt))
+ if (!hal_EfusePgPacketWriteData(pAdapter, &startAddr, &targetPkt))
return false;
return true;
@@ -875,22 +855,22 @@ void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata)
}
/* Read All Efuse content */
-static void Efuse_ReadAllMap(struct adapter *pAdapter, u8 efuseType, u8 *Efuse)
+static void Efuse_ReadAllMap(struct adapter *pAdapter, u8 *Efuse)
{
efuse_power_switch(pAdapter, false, true);
- efuse_ReadEFuse(pAdapter, efuseType, 0, EFUSE_MAP_LEN_88E, Efuse);
+ efuse_ReadEFuse(pAdapter, 0, EFUSE_MAP_LEN_88E, Efuse);
efuse_power_switch(pAdapter, false, false);
}
/* Transfer current EFUSE content to shadow init and modify map. */
-void EFUSE_ShadowMapUpdate(struct adapter *pAdapter, u8 efuseType)
+void EFUSE_ShadowMapUpdate(struct adapter *pAdapter)
{
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
if (pEEPROM->bautoload_fail_flag)
memset(pEEPROM->efuse_eeprom_data, 0xFF, EFUSE_MAP_LEN_88E);
else
- Efuse_ReadAllMap(pAdapter, efuseType, pEEPROM->efuse_eeprom_data);
+ Efuse_ReadAllMap(pAdapter, pEEPROM->efuse_eeprom_data);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 7a706fe11750..e431914db008 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -370,7 +370,6 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
pos += WPA_SELECTOR_LEN;
left -= WPA_SELECTOR_LEN;
} else if (left > 0) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie length mismatch, %u too much", __func__, left));
return _FAIL;
}
@@ -380,11 +379,8 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
pos += 2;
left -= 2;
- if (count == 0 || left < count * WPA_SELECTOR_LEN) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie count botch (pairwise), count %u left %u",
- __func__, count, left));
+ if (count == 0 || left < count * WPA_SELECTOR_LEN)
return _FAIL;
- }
for (i = 0; i < count; i++) {
*pairwise_cipher |= rtw_get_wpa_cipher_suite(pos);
@@ -393,17 +389,14 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
left -= WPA_SELECTOR_LEN;
}
} else if (left == 1) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie too short (for key mgmt)", __func__));
return _FAIL;
}
if (is_8021x) {
if (left >= 6) {
pos += 2;
- if (!memcmp(pos, SUITE_1X, 4)) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s : there has 802.1x auth\n", __func__));
+ if (!memcmp(pos, SUITE_1X, 4))
*is_8021x = 1;
- }
}
}
@@ -437,7 +430,6 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
left -= RSN_SELECTOR_LEN;
} else if (left > 0) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie length mismatch, %u too much", __func__, left));
return _FAIL;
}
@@ -447,11 +439,8 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
pos += 2;
left -= 2;
- if (count == 0 || left < count * RSN_SELECTOR_LEN) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie count botch (pairwise), count %u left %u",
- __func__, count, left));
+ if (count == 0 || left < count * RSN_SELECTOR_LEN)
return _FAIL;
- }
for (i = 0; i < count; i++) {
*pairwise_cipher |= rtw_get_wpa2_cipher_suite(pos);
@@ -461,18 +450,14 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
}
} else if (left == 1) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie too short (for key mgmt)", __func__));
-
return _FAIL;
}
if (is_8021x) {
if (left >= 6) {
pos += 2;
- if (!memcmp(pos, SUITE_1X, 4)) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s (): there has 802.1x auth\n", __func__));
+ if (!memcmp(pos, SUITE_1X, 4))
*is_8021x = 1;
- }
}
}
return ret;
@@ -480,7 +465,7 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
void rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len)
{
- u8 authmode, sec_idx, i;
+ u8 authmode, sec_idx;
u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
uint cnt;
@@ -494,40 +479,16 @@ void rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie
authmode = in_ie[cnt];
if ((authmode == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(&in_ie[cnt + 2], &wpa_oui[0], 4))) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("\n rtw_get_wpa_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n",
- sec_idx, in_ie[cnt + 1] + 2));
-
- if (wpa_ie) {
+ if (wpa_ie)
memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
- for (i = 0; i < (in_ie[cnt + 1] + 2); i += 8) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n",
- wpa_ie[i], wpa_ie[i + 1], wpa_ie[i + 2], wpa_ie[i + 3], wpa_ie[i + 4],
- wpa_ie[i + 5], wpa_ie[i + 6], wpa_ie[i + 7]));
- }
- }
-
*wpa_len = in_ie[cnt + 1] + 2;
cnt += in_ie[cnt + 1] + 2; /* get next */
} else {
if (authmode == WLAN_EID_RSN) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("\n get_rsn_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n",
- sec_idx, in_ie[cnt + 1] + 2));
-
- if (rsn_ie) {
+ if (rsn_ie)
memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
- for (i = 0; i < (in_ie[cnt + 1] + 2); i += 8) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n",
- rsn_ie[i], rsn_ie[i + 1], rsn_ie[i + 2], rsn_ie[i + 3], rsn_ie[i + 4],
- rsn_ie[i + 5], rsn_ie[i + 6], rsn_ie[i + 7]));
- }
- }
-
*rsn_len = in_ie[cnt + 1] + 2;
cnt += in_ie[cnt + 1] + 2; /* get next */
} else {
@@ -686,13 +647,8 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
* OUI of the vendor. The following byte is used a vendor specific
* sub-type.
*/
- if (elen < 4) {
- if (show_errors) {
- DBG_88E("short vendor specific information element ignored (len=%lu)\n",
- (unsigned long)elen);
- }
+ if (elen < 4)
return -1;
- }
oui = RTW_GET_BE24(pos);
switch (oui) {
@@ -711,11 +667,9 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
elems->wpa_ie_len = elen;
break;
case WME_OUI_TYPE: /* this is a Wi-Fi WME info. element */
- if (elen < 5) {
- DBG_88E("short WME information element ignored (len=%lu)\n",
- (unsigned long)elen);
+ if (elen < 5)
return -1;
- }
+
switch (pos[4]) {
case WME_OUI_SUBTYPE_INFORMATION_ELEMENT:
case WME_OUI_SUBTYPE_PARAMETER_ELEMENT:
@@ -727,8 +681,6 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
elems->wme_tspec_len = elen;
break;
default:
- DBG_88E("unknown WME information element ignored (subtype=%d len=%lu)\n",
- pos[4], (unsigned long)elen);
return -1;
}
break;
@@ -738,8 +690,6 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
elems->wps_ie_len = elen;
break;
default:
- DBG_88E("Unknown Microsoft information element ignored (type=%d len=%lu)\n",
- pos[3], (unsigned long)elen);
return -1;
}
break;
@@ -751,21 +701,17 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
elems->vendor_ht_cap_len = elen;
break;
default:
- DBG_88E("Unknown Broadcom information element ignored (type=%d len=%lu)\n",
- pos[3], (unsigned long)elen);
return -1;
}
break;
default:
- DBG_88E("unknown vendor specific information element ignored (vendor OUI %3phC len=%lu)\n",
- pos, (unsigned long)elen);
return -1;
}
return 0;
}
/**
- * ieee802_11_parse_elems - Parse information elements in management frames
+ * rtw_ieee802_11_parse_elems - Parse information elements in management frames
* @start: Pointer to the start of ies
* @len: Length of IE buffer in octets
* @elems: Data structure for parsed elements
@@ -789,13 +735,8 @@ enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len,
elen = *pos++;
left -= 2;
- if (elen > left) {
- if (show_errors) {
- DBG_88E("IEEE 802.11 element parse failed (id=%d elen=%d left=%lu)\n",
- id, elen, (unsigned long)left);
- }
+ if (elen > left)
return ParseFailed;
- }
switch (id) {
case WLAN_EID_SSID:
@@ -876,10 +817,6 @@ enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len,
break;
default:
unknown++;
- if (!show_errors)
- break;
- DBG_88E("IEEE 802.11 element parse ignored unknown element (id=%d elen=%d)\n",
- id, elen);
break;
}
left -= elen;
@@ -905,12 +842,8 @@ void rtw_macaddr_cfg(u8 *mac_addr)
ether_addr_copy(mac, mac_addr);
}
- if (is_broadcast_ether_addr(mac) || is_zero_ether_addr(mac)) {
+ if (is_broadcast_ether_addr(mac) || is_zero_ether_addr(mac))
eth_random_addr(mac_addr);
- DBG_88E("MAC Address from efuse error, assign random one !!!\n");
- }
-
- DBG_88E("%s MAC Address = %pM\n", __func__, mac_addr);
}
static int rtw_get_cipher_info(struct wlan_network *pnetwork)
@@ -923,30 +856,20 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
pbuf = rtw_get_wpa_ie(&pnetwork->network.ies[12], &wpa_ielen, pnetwork->network.ie_length - 12);
if (pbuf && (wpa_ielen > 0)) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: wpa_ielen: %d", __func__, wpa_ielen));
if (rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x) == _SUCCESS) {
pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
pnetwork->BcnInfo.group_cipher = group_cipher;
pnetwork->BcnInfo.is_8021x = is8021x;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("%s: pnetwork->pairwise_cipher: %d, is_8021x is %d",
- __func__, pnetwork->BcnInfo.pairwise_cipher,
- pnetwork->BcnInfo.is_8021x));
ret = _SUCCESS;
}
} else {
pbuf = rtw_get_wpa2_ie(&pnetwork->network.ies[12], &wpa_ielen, pnetwork->network.ie_length - 12);
if (pbuf && (wpa_ielen > 0)) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE\n"));
if (rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x) == _SUCCESS) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE OK!!!\n"));
pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
pnetwork->BcnInfo.group_cipher = group_cipher;
pnetwork->BcnInfo.is_8021x = is8021x;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: pnetwork->pairwise_cipher: %d, pnetwork->group_cipher is %d, is_8021x is %d",
- __func__, pnetwork->BcnInfo.pairwise_cipher,
- pnetwork->BcnInfo.group_cipher, pnetwork->BcnInfo.is_8021x));
ret = _SUCCESS;
}
}
@@ -974,10 +897,6 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_OPENSYS;
}
rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, NULL, &rsn_len, NULL, &wpa_len);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: ssid =%s\n", __func__, pnetwork->network.ssid.ssid));
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: wpa_len =%d rsn_len =%d\n", __func__, wpa_len, rsn_len));
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: ssid =%s\n", __func__, pnetwork->network.ssid.ssid));
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: wpa_len =%d rsn_len =%d\n", __func__, wpa_len, rsn_len));
if (rsn_len > 0) {
pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA2;
@@ -987,10 +906,6 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
if (bencrypt)
pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WEP;
}
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: pnetwork->encryp_protocol is %x\n",
- __func__, pnetwork->BcnInfo.encryp_protocol));
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: pnetwork->encryp_protocol is %x\n",
- __func__, pnetwork->BcnInfo.encryp_protocol));
rtw_get_cipher_info(pnetwork);
/* get bwmode and ch_offset */
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index 17b999f45132..f679a7f8fe75 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -12,12 +12,12 @@
#include <hal_intf.h>
static const struct {
- int channel_plan;
- char *name;
+ int channel_plan;
+ char *name;
} channel_table[] = { { RT_CHANNEL_DOMAIN_FCC, "US" },
- { RT_CHANNEL_DOMAIN_ETSI, "EU" },
- { RT_CHANNEL_DOMAIN_MKK, "JP" },
- { RT_CHANNEL_DOMAIN_CHINA, "CN"} };
+ { RT_CHANNEL_DOMAIN_ETSI, "EU" },
+ { RT_CHANNEL_DOMAIN_MKK, "JP" },
+ { RT_CHANNEL_DOMAIN_CHINA, "CN"} };
extern void indicate_wx_scan_complete_event(struct adapter *padapter);
@@ -33,8 +33,6 @@ u8 rtw_do_join(struct adapter *padapter)
phead = get_list_head(queue);
plist = phead->next;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("\n %s: phead = %p; plist = %p\n\n\n", __func__, phead, plist));
-
pmlmepriv->cur_network.join_res = -2;
set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
@@ -52,13 +50,10 @@ u8 rtw_do_join(struct adapter *padapter)
if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
pmlmepriv->to_roaming > 0) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("%s: site survey if scanned_queue is empty\n.", __func__));
/* submit site_survey_cmd */
ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
- if (ret != _SUCCESS) {
+ if (ret != _SUCCESS)
pmlmepriv->to_join = false;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("%s: site survey return error\n.", __func__));
- }
} else {
pmlmepriv->to_join = false;
ret = _FAIL;
@@ -92,14 +87,10 @@ u8 rtw_do_join(struct adapter *padapter)
rtw_generate_random_ibss(pibss);
if (rtw_createbss_cmd(padapter) != _SUCCESS) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error =>do_goin: rtw_createbss_cmd status FAIL***\n "));
ret = false;
goto exit;
}
pmlmepriv->to_join = false;
-
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- ("***Error => rtw_select_and_join_from_scanned_queue FAIL under STA_Mode***\n "));
} else {
/* can't associate ; reset under-linking */
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
@@ -109,10 +100,8 @@ u8 rtw_do_join(struct adapter *padapter)
if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
pmlmepriv->to_roaming > 0) {
ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
- if (ret != _SUCCESS) {
+ if (ret != _SUCCESS)
pmlmepriv->to_join = false;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("do_join(): site survey return error\n."));
- }
} else {
ret = _FAIL;
pmlmepriv->to_join = false;
@@ -131,8 +120,6 @@ u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
u32 cur_time = 0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- DBG_88E_LEVEL(_drv_info_, "set bssid:%pM\n", bssid);
-
if ((bssid[0] == 0x00 && bssid[1] == 0x00 && bssid[2] == 0x00 &&
bssid[3] == 0x00 && bssid[4] == 0x00 && bssid[5] == 0x00) ||
(bssid[0] == 0xFF && bssid[1] == 0xFF && bssid[2] == 0xFF &&
@@ -143,23 +130,16 @@ u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
spin_lock_bh(&pmlmepriv->lock);
- DBG_88E("Set BSSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
goto handle_tkip_countermeasure;
else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
goto release_mlme_lock;
if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE)) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
-
if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, ETH_ALEN)) {
if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE))
goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */
} else {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set BSSID not the same bssid\n"));
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid =%pM\n", (bssid)));
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("cur_bssid =%pM\n", (pmlmepriv->cur_network.network.MacAddress)));
-
rtw_disassoc_cmd(padapter, 0, true);
if (check_fwstate(pmlmepriv, _FW_LINKED))
@@ -201,9 +181,6 @@ release_mlme_lock:
spin_unlock_bh(&pmlmepriv->lock);
exit:
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
- ("%s: status=%d\n", __func__, status));
-
return status;
}
@@ -215,35 +192,22 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *pnetwork = &pmlmepriv->cur_network;
- DBG_88E_LEVEL(_drv_info_, "set ssid [%s] fw_state=0x%08x\n",
- ssid->ssid, get_fwstate(pmlmepriv));
-
if (!padapter->hw_init_completed) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
- ("set_ssid: hw_init_completed == false =>exit!!!\n"));
status = _FAIL;
goto exit;
}
spin_lock_bh(&pmlmepriv->lock);
- DBG_88E("Set SSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
goto handle_tkip_countermeasure;
else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
goto release_mlme_lock;
if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE)) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- ("set_ssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
-
if (pmlmepriv->assoc_ssid.ssid_length == ssid->ssid_length &&
!memcmp(&pmlmepriv->assoc_ssid.ssid, ssid->ssid, ssid->ssid_length)) {
if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
- ("Set SSID is the same ssid, fw_state = 0x%08x\n",
- get_fwstate(pmlmepriv)));
-
if (!rtw_is_same_ibss(padapter, pnetwork)) {
/* if in WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE, create bss or rejoin again */
rtw_disassoc_cmd(padapter, 0, true);
@@ -264,10 +228,6 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_JOINBSS, 1);
}
} else {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set SSID not the same ssid\n"));
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_ssid =[%s] len = 0x%x\n", ssid->ssid, (unsigned int)ssid->ssid_length));
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("assoc_ssid =[%s] len = 0x%x\n", pmlmepriv->assoc_ssid.ssid, (unsigned int)pmlmepriv->assoc_ssid.ssid_length));
-
rtw_disassoc_cmd(padapter, 0, true);
if (check_fwstate(pmlmepriv, _FW_LINKED))
@@ -308,8 +268,6 @@ release_mlme_lock:
spin_unlock_bh(&pmlmepriv->lock);
exit:
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
- ("-%s: status =%d\n", __func__, status));
return status;
}
@@ -320,16 +278,9 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
struct wlan_network *cur_network = &pmlmepriv->cur_network;
enum ndis_802_11_network_infra *pold_state = &cur_network->network.InfrastructureMode;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_notice_,
- ("+%s: old =%d new =%d fw_state = 0x%08x\n", __func__,
- *pold_state, networktype, get_fwstate(pmlmepriv)));
-
if (*pold_state != networktype) {
spin_lock_bh(&pmlmepriv->lock);
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, (" change mode!"));
- /* DBG_88E("change mode, old_mode =%d, new_mode =%d, fw_state = 0x%x\n", *pold_state, networktype, get_fwstate(pmlmepriv)); */
-
if (*pold_state == Ndis802_11APMode) {
/* change to other mode from Ndis802_11APMode */
cur_network->join_res = -1;
@@ -387,9 +338,6 @@ u8 rtw_set_802_11_disassociate(struct adapter *padapter)
spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- ("MgntActrtw_set_802_11_disassociate: rtw_indicate_disconnect\n"));
-
rtw_disassoc_cmd(padapter, 0, true);
rtw_indicate_disconnect(padapter);
rtw_free_assoc_resources(padapter);
@@ -406,33 +354,21 @@ u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_s
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 res = true;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("+%s(), fw_state =%x\n", __func__, get_fwstate(pmlmepriv)));
-
if (!padapter) {
res = false;
goto exit;
}
if (!padapter->hw_init_completed) {
res = false;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n === %s:hw_init_completed == false ===\n", __func__));
goto exit;
}
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING) ||
pmlmepriv->LinkDetectInfo.bBusyTraffic) {
/* Scan or linking is in progress, do nothing. */
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("%s fail since fw_state = %x\n", __func__, get_fwstate(pmlmepriv)));
res = true;
-
- if (check_fwstate(pmlmepriv,
- _FW_UNDER_SURVEY | _FW_UNDER_LINKING))
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###_FW_UNDER_SURVEY|_FW_UNDER_LINKING\n\n"));
- else
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###pmlmepriv->sitesurveyctrl.traffic_busy == true\n\n"));
-
} else {
if (rtw_is_scan_deny(padapter)) {
- DBG_88E(FUNC_ADPT_FMT": scan deny\n", FUNC_ADPT_ARG(padapter));
indicate_wx_scan_complete_event(padapter);
return _SUCCESS;
}
@@ -453,15 +389,8 @@ u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum ndis_802_11
int res;
u8 ret;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- ("set_802_11_auth.mode(): mode =%x\n", authmode));
-
psecuritypriv->ndisauthtype = authmode;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- ("%s:psecuritypriv->ndisauthtype=%d", __func__,
- psecuritypriv->ndisauthtype));
-
if (psecuritypriv->ndisauthtype > 3)
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
@@ -484,7 +413,6 @@ u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep)
keyid = wep->KeyIndex & 0x3fffffff;
if (keyid >= 4) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("MgntActrtw_set_802_11_add_wep:keyid>4 =>fail\n"));
ret = false;
goto exit;
}
@@ -492,20 +420,14 @@ u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep)
switch (wep->KeyLength) {
case 5:
psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength = 5\n"));
break;
case 13:
psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength = 13\n"));
break;
default:
psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength!= 5 or 13\n"));
break;
}
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- ("%s:before memcpy, wep->KeyLength = 0x%x wep->KeyIndex = 0x%x keyid =%x\n", __func__,
- wep->KeyLength, wep->KeyIndex, keyid));
memcpy(&psecuritypriv->dot11DefKey[keyid].skey[0],
&wep->KeyMaterial, wep->KeyLength);
@@ -514,22 +436,6 @@ u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep)
psecuritypriv->dot11PrivacyKeyIndex = keyid;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- ("%s:security key material : %x %x %x %x %x %x %x %x %x %x %x %x %x\n", __func__,
- psecuritypriv->dot11DefKey[keyid].skey[0],
- psecuritypriv->dot11DefKey[keyid].skey[1],
- psecuritypriv->dot11DefKey[keyid].skey[2],
- psecuritypriv->dot11DefKey[keyid].skey[3],
- psecuritypriv->dot11DefKey[keyid].skey[4],
- psecuritypriv->dot11DefKey[keyid].skey[5],
- psecuritypriv->dot11DefKey[keyid].skey[6],
- psecuritypriv->dot11DefKey[keyid].skey[7],
- psecuritypriv->dot11DefKey[keyid].skey[8],
- psecuritypriv->dot11DefKey[keyid].skey[9],
- psecuritypriv->dot11DefKey[keyid].skey[10],
- psecuritypriv->dot11DefKey[keyid].skey[11],
- psecuritypriv->dot11DefKey[keyid].skey[12]));
-
res = rtw_set_key(padapter, psecuritypriv, keyid, 1);
if (res == _FAIL)
@@ -595,7 +501,6 @@ int rtw_set_country(struct adapter *adapter, const char *country_code)
int i;
int channel_plan = RT_CHANNEL_DOMAIN_WORLD_WIDE_5G;
- DBG_88E("%s country_code:%s\n", __func__, country_code);
for (i = 0; i < ARRAY_SIZE(channel_table); i++) {
if (strcmp(channel_table[i].name, country_code) == 0) {
channel_plan = channel_table[i].channel_plan;
@@ -603,8 +508,5 @@ int rtw_set_country(struct adapter *adapter, const char *country_code)
}
}
- if (i == ARRAY_SIZE(channel_table))
- DBG_88E("%s unknown country_code:%s\n", __func__, country_code);
-
return rtw_set_chplan_cmd(adapter, channel_plan, 1);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index 32dccae186ca..be868f386204 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -43,7 +43,7 @@ void BlinkWorkItemCallback(struct work_struct *work)
void ResetLedStatus(struct LED_871x *pLed)
{
pLed->CurrLedState = RTW_LED_OFF; /* Current LED state. */
- pLed->bLedOn = false; /* true if LED is ON, false if LED is OFF. */
+ pLed->led_on = false; /* true if LED is ON, false if LED is OFF. */
pLed->bLedBlinkInProgress = false; /* true if it is blinking, false o.w.. */
pLed->bLedWPSBlinkInProgress = false;
@@ -92,15 +92,10 @@ static void SwLedBlink1(struct LED_871x *pLed)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
/* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
+ if (pLed->BlinkingLedState == RTW_LED_ON)
sw_led_on(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
+ else
sw_led_off(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
sw_led_off(padapter, pLed);
@@ -110,7 +105,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
switch (pLed->CurrLedState) {
case LED_BLINK_SLOWLY:
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
@@ -118,7 +113,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
break;
case LED_BLINK_NORMAL:
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
@@ -131,27 +126,25 @@ static void SwLedBlink1(struct LED_871x *pLed)
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_LINK_INTERVAL_ALPHA));
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
} else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
}
pLed->bLedScanBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
@@ -165,27 +158,25 @@ static void SwLedBlink1(struct LED_871x *pLed)
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_LINK_INTERVAL_ALPHA));
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
} else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
}
pLed->bLedBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
@@ -194,7 +185,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
}
break;
case LED_BLINK_WPS:
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
@@ -205,13 +196,12 @@ static void SwLedBlink1(struct LED_871x *pLed)
if (pLed->BlinkingLedState != RTW_LED_ON) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_LINK_INTERVAL_ALPHA));
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
pLed->bLedWPSBlinkInProgress = false;
} else {
@@ -251,7 +241,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
}
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
@@ -274,7 +264,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
}
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
@@ -304,7 +294,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->bLedScanBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SCAN;
pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
@@ -329,7 +319,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->bLedBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_TXRX;
pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
@@ -358,7 +348,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
}
pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS;
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
@@ -387,7 +377,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
else
pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS_STOP;
- if (pLed->bLedOn) {
+ if (pLed->led_on) {
pLed->BlinkingLedState = RTW_LED_OFF;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_WPS_SUCCESS_INTERVAL_ALPHA));
@@ -404,7 +394,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
}
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
+ if (pLed->led_on)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
@@ -439,9 +429,6 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
default:
break;
}
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("Led %d\n", pLed->CurrLedState));
}
void blink_handler(struct LED_871x *pLed)
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index b6ac5b8915b1..71d205f3d73d 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -19,6 +19,7 @@
#include <wlan_bssdef.h>
#include <rtw_ioctl_set.h>
#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
extern const u8 MCS_rate_1R[16];
@@ -32,8 +33,6 @@ int rtw_init_mlme_priv(struct adapter *padapter)
/* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
- pmlmepriv->nic_hdl = (u8 *)padapter;
-
pmlmepriv->pscanned = NULL;
pmlmepriv->fw_state = 0;
pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
@@ -118,8 +117,6 @@ struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
list_del_init(&pnetwork->list);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("rtw_alloc_network: ptr=%p\n", &pnetwork->list));
pnetwork->network_type = 0;
pnetwork->fixed = false;
pnetwork->last_scanned = jiffies;
@@ -181,20 +178,16 @@ struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr)
{
struct list_head *phead, *plist;
struct wlan_network *pnetwork = NULL;
- u8 zero_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
- if (!memcmp(zero_addr, addr, ETH_ALEN)) {
+ if (is_zero_ether_addr(addr)) {
pnetwork = NULL;
goto exit;
}
phead = get_list_head(scanned_queue);
- plist = phead->next;
-
- while (plist != phead) {
- pnetwork = container_of(plist, struct wlan_network, list);
+ list_for_each(plist, phead) {
+ pnetwork = list_entry(plist, struct wlan_network, list);
if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN))
break;
- plist = plist->next;
}
if (plist == phead)
pnetwork = NULL;
@@ -204,23 +197,17 @@ exit:
void rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
{
- struct list_head *phead, *plist;
- struct wlan_network *pnetwork;
+ struct list_head *phead;
+ struct wlan_network *pnetwork, *temp;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *scanned_queue = &pmlmepriv->scanned_queue;
spin_lock_bh(&scanned_queue->lock);
phead = get_list_head(scanned_queue);
- plist = phead->next;
-
- while (phead != plist) {
- pnetwork = container_of(plist, struct wlan_network, list);
-
- plist = plist->next;
-
+ list_for_each_entry_safe(pnetwork, temp, phead, list)
_rtw_free_network(pmlmepriv, pnetwork, isfreeall);
- }
+
spin_unlock_bh(&scanned_queue->lock);
}
@@ -229,15 +216,10 @@ int rtw_if_up(struct adapter *padapter)
int res;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
- !check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("%s:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
- __func__, padapter->bDriverStopped,
- padapter->bSurpriseRemoved));
+ !check_fwstate(&padapter->mlmepriv, _FW_LINKED))
res = false;
- } else {
+ else
res = true;
- }
return res;
}
@@ -400,17 +382,14 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
- pnetwork = container_of(plist, struct wlan_network, list);
+ list_for_each(plist, phead) {
+ pnetwork = list_entry(plist, struct wlan_network, list);
if (is_same_network(&pnetwork->network, target))
break;
if ((oldest == ((struct wlan_network *)0)) ||
time_after(oldest->last_scanned, pnetwork->last_scanned))
oldest = pnetwork;
- plist = plist->next;
}
/* If we didn't find a match, then get a new network slot to initialize
* with this beacon's information
@@ -440,11 +419,8 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
pnetwork = rtw_alloc_network(pmlmepriv); /* will update scan_time */
- if (!pnetwork) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- ("\n\n\nsomething wrong here\n\n\n"));
+ if (!pnetwork)
goto exit;
- }
bssid_ex_sz = get_wlan_bssid_ex_sz(target);
target->Length = bssid_ex_sz;
@@ -520,10 +496,8 @@ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *
bselected = false;
}
- if ((desired_encmode != Ndis802_11EncryptionDisabled) && (privacy == 0)) {
- DBG_88E("desired_encmode: %d, privacy: %d\n", desired_encmode, privacy);
+ if ((desired_encmode != Ndis802_11EncryptionDisabled) && (privacy == 0))
bselected = false;
- }
if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode)
@@ -533,12 +507,6 @@ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *
return bselected;
}
-/* TODO: Perry: For Power Management */
-void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf)
-{
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_event\n"));
-}
-
void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
{
u32 len;
@@ -547,15 +515,9 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
pnetwork = (struct wlan_bssid_ex *)pbuf;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("%s, ssid=%s\n", __func__, pnetwork->ssid.ssid));
-
len = get_wlan_bssid_ex_sz(pnetwork);
- if (len > (sizeof(struct wlan_bssid_ex))) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- ("\n****%s: return a wrong bss ***\n", __func__));
+ if (len > (sizeof(struct wlan_bssid_ex)))
return;
- }
spin_lock_bh(&pmlmepriv->lock);
/* update IBSS_network 's timestamp */
@@ -598,14 +560,9 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
pmlmepriv->wps_probe_req_ie = NULL;
}
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("%s: fw_state:%x\n\n", __func__, get_fwstate(pmlmepriv)));
-
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
del_timer_sync(&pmlmepriv->scan_to_timer);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
- } else {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("nic status=%x, survey done event comes too late!\n", get_fwstate(pmlmepriv)));
}
rtw_set_signal_stat_timer(&adapter->recvpriv);
@@ -624,8 +581,6 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("switching to adhoc master\n"));
-
memcpy(&pdev_network->ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(adapter);
@@ -633,8 +588,7 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
- if (rtw_createbss_cmd(adapter) != _SUCCESS)
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error=>rtw_createbss_cmd status FAIL\n"));
+ rtw_createbss_cmd(adapter);
pmlmepriv->to_join = false;
}
}
@@ -651,7 +605,6 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
rtw_indicate_connect(adapter);
} else {
- DBG_88E("try_to_join, but select scanning queue fail, to_roaming:%d\n", pmlmepriv->to_roaming);
if (pmlmepriv->to_roaming != 0) {
if (--pmlmepriv->to_roaming == 0 ||
rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0) != _SUCCESS) {
@@ -688,7 +641,6 @@ static void free_scanqueue(struct mlme_priv *pmlmepriv)
struct __queue *scan_queue = &pmlmepriv->scanned_queue;
struct list_head *plist, *phead, *ptemp;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+%s\n", __func__));
spin_lock_bh(&scan_queue->lock);
spin_lock_bh(&free_queue->lock);
@@ -728,11 +680,6 @@ void rtw_free_assoc_resources_locked(struct adapter *adapter)
struct sta_priv *pstapriv = &adapter->stapriv;
struct wlan_network *tgt_network = &pmlmepriv->cur_network;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_free_assoc_resources\n"));
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("tgt_network->network.MacAddress=%pM ssid=%s\n",
- tgt_network->network.MacAddress, tgt_network->network.ssid.ssid));
-
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_AP_STATE)) {
struct sta_info *psta;
@@ -759,8 +706,6 @@ void rtw_free_assoc_resources_locked(struct adapter *adapter)
pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
if (pwlan)
pwlan->fixed = false;
- else
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_assoc_resources:pwlan==NULL\n\n"));
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) && (adapter->stapriv.asoc_sta_count == 1)))
rtw_free_network_nolock(pmlmepriv, pwlan);
@@ -775,8 +720,6 @@ void rtw_indicate_connect(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+%s\n", __func__));
-
pmlmepriv->to_join = false;
if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
@@ -790,8 +733,6 @@ void rtw_indicate_connect(struct adapter *padapter)
pmlmepriv->to_roaming = 0;
rtw_set_scan_deny(padapter, 3000);
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("-%s: fw_state=0x%08x\n", __func__, get_fwstate(pmlmepriv)));
}
/*
@@ -801,8 +742,6 @@ void rtw_indicate_disconnect(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+%s\n", __func__));
-
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING | WIFI_UNDER_WPS);
if (pmlmepriv->to_roaming > 0)
@@ -837,7 +776,6 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
psta = rtw_alloc_stainfo(pstapriv, pnetwork->network.MacAddress);
if (psta) { /* update ptarget_sta */
- DBG_88E("%s\n", __func__);
psta->aid = pnetwork->join_res;
psta->mac_id = 0;
/* sta mode */
@@ -900,12 +838,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *cur_network = &pmlmepriv->cur_network;
- DBG_88E("%s\n", __func__);
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("\nfw_state:%x, BSSID:%pM\n",
- get_fwstate(pmlmepriv), pnetwork->network.MacAddress));
-
/* why not use ptarget_wlan?? */
memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length);
/* some ies in pnetwork is wrong, so we should use ptarget_wlan ies */
@@ -934,7 +866,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
break;
default:
pmlmepriv->fw_state = WIFI_NULL_STATE;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Invalid network_mode\n"));
break;
}
@@ -961,27 +892,16 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
struct wlan_network *pcur_wlan = NULL, *ptarget_wlan = NULL;
unsigned int the_same_macaddr = false;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("joinbss event call back received with res=%d\n", pnetwork->join_res));
-
rtw_get_encrypt_decrypt_from_registrypriv(adapter);
- if (pmlmepriv->assoc_ssid.ssid_length == 0)
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ joinbss event call back for Any SSid\n"));
- else
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ rtw_joinbss_event_callback for SSid:%s\n", pmlmepriv->assoc_ssid.ssid));
-
the_same_macaddr = !memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
pnetwork->network.Length = get_wlan_bssid_ex_sz(&pnetwork->network);
- if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex)) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n ***joinbss_evt_callback return a wrong bss ***\n\n"));
+ if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex))
return;
- }
spin_lock_bh(&pmlmepriv->lock);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\nrtw_joinbss_event_callback!! _enter_critical\n"));
-
if (pnetwork->join_res > 0) {
spin_lock_bh(&pmlmepriv->scanned_queue.lock);
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
@@ -1019,7 +939,6 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
if (ptarget_wlan) {
rtw_joinbss_update_network(adapter, ptarget_wlan, pnetwork);
} else {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't find ptarget_wlan when joinbss_event callback\n"));
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto ignore_joinbss_callback;
}
@@ -1028,7 +947,6 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork);
if (!ptarget_sta) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't update stainfo when joinbss_event callback\n"));
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto ignore_joinbss_callback;
}
@@ -1037,18 +955,11 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
/* s4. indicate connect */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
rtw_indicate_connect(adapter);
- } else {
- /* adhoc mode will rtw_indicate_connect when rtw_stassoc_event_callback */
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("adhoc mode, fw_state:%x", get_fwstate(pmlmepriv)));
}
/* s5. Cancel assoc_timer */
del_timer_sync(&pmlmepriv->assoc_timer);
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("Cancel assoc_timer\n"));
-
} else {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_joinbss_event_callback err: fw_state:%x", get_fwstate(pmlmepriv)));
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto ignore_joinbss_callback;
}
@@ -1060,10 +971,8 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
mod_timer(&pmlmepriv->assoc_timer,
jiffies + msecs_to_jiffies(1));
- if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("fail! clear _FW_UNDER_LINKING ^^^fw_state=%x\n", get_fwstate(pmlmepriv)));
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- }
} else { /* if join_res < 0 (join fails), then try again */
mod_timer(&pmlmepriv->assoc_timer,
jiffies + msecs_to_jiffies(1));
@@ -1150,23 +1059,16 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
#endif
/* for AD-HOC mode */
psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta) {
+ if (psta)
/* the sta have been in sta_info_queue => do nothing */
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- ("Error: %s: sta has been in sta_hash_queue\n",
- __func__));
return; /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */
- }
psta = rtw_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (!psta) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- ("Can't alloc sta_info when %s\n", __func__));
+ if (!psta)
return;
- }
+
/* to do: init sta_info variable */
psta->qos_option = 0;
psta->mac_id = (uint)pstassoc->cam_id;
- DBG_88E("%s\n", __func__);
/* for ad-hoc mode */
rtw_hal_set_odm_var(adapter, HAL_ODM_STA_INFO, psta, true);
rtw_stassoc_hw_rpt(adapter, psta);
@@ -1208,8 +1110,6 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
else
mac_id = pstadel->mac_id;
- DBG_88E("%s(mac_id=%d)=%pM\n", __func__, mac_id, pstadel->macaddr);
-
if (mac_id >= 0) {
u16 media_status;
@@ -1280,18 +1180,12 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
_clr_fwstate_(pmlmepriv, WIFI_ADHOC_STATE);
}
- if (rtw_createbss_cmd(adapter) != _SUCCESS)
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error=>stadel_event_callback: rtw_createbss_cmd status FAIL***\n "));
+ rtw_createbss_cmd(adapter);
}
}
spin_unlock_bh(&pmlmepriv->lock);
}
-void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
-{
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+%s !!!\n", __func__));
-}
-
/*
* _rtw_join_timeout_handler - Timeout/failure handler for CMD JoinBss
* @adapter: pointer to struct adapter structure
@@ -1302,8 +1196,6 @@ void _rtw_join_timeout_handler (struct timer_list *t)
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
int do_join_r;
- DBG_88E("%s, fw_state=%x\n", __func__, get_fwstate(pmlmepriv));
-
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;
@@ -1313,15 +1205,11 @@ void _rtw_join_timeout_handler (struct timer_list *t)
while (1) {
pmlmepriv->to_roaming--;
if (pmlmepriv->to_roaming != 0) { /* try another , */
- DBG_88E("%s try another roaming\n", __func__);
do_join_r = rtw_do_join(adapter);
- if (do_join_r != _SUCCESS) {
- DBG_88E("%s roaming do_join return %d\n", __func__, do_join_r);
+ if (do_join_r != _SUCCESS)
continue;
- }
break;
}
- DBG_88E("%s We've try roaming but fail\n", __func__);
rtw_indicate_disconnect(adapter);
break;
}
@@ -1342,7 +1230,6 @@ void rtw_scan_timeout_handler (struct timer_list *t)
mlmepriv.scan_to_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- DBG_88E(FUNC_ADPT_FMT" fw_state=%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
spin_lock_bh(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
spin_unlock_bh(&pmlmepriv->lock);
@@ -1357,7 +1244,6 @@ static void rtw_auto_scan_handler(struct adapter *padapter)
if (pmlmepriv->scan_interval > 0) {
pmlmepriv->scan_interval--;
if (pmlmepriv->scan_interval == 0) {
- DBG_88E("%s\n", __func__);
rtw_set_802_11_bssid_list_scan(padapter, NULL, 0);
pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
}
@@ -1431,15 +1317,6 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
*candidate = competitor;
updated = true;
}
- if (updated) {
- DBG_88E("[by_bssid:%u][assoc_ssid:%s]new candidate: %s(%pM rssi:%d\n",
- pmlmepriv->assoc_by_bssid,
- pmlmepriv->assoc_ssid.ssid,
- (*candidate)->network.ssid.ssid,
- (*candidate)->network.MacAddress,
- (int)(*candidate)->network.Rssi);
- DBG_88E("[to_roaming:%u]\n", pmlmepriv->to_roaming);
- }
exit:
return updated;
@@ -1456,7 +1333,7 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
{
int ret;
struct list_head *phead;
- struct adapter *adapter;
+ struct adapter *adapter = container_of(pmlmepriv, struct adapter, mlmepriv);
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
struct wlan_network *candidate = NULL;
@@ -1464,32 +1341,18 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
spin_lock_bh(&pmlmepriv->scanned_queue.lock);
phead = get_list_head(queue);
- adapter = (struct adapter *)pmlmepriv->nic_hdl;
- pmlmepriv->pscanned = phead->next;
- while (phead != pmlmepriv->pscanned) {
- pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
- if (!pnetwork) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork==NULL)\n", __func__));
- ret = _FAIL;
- goto exit;
- }
- pmlmepriv->pscanned = pmlmepriv->pscanned->next;
+ list_for_each(pmlmepriv->pscanned, phead) {
+ pnetwork = list_entry(pmlmepriv->pscanned,
+ struct wlan_network, list);
rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
}
if (!candidate) {
- DBG_88E("%s: return _FAIL(candidate==NULL)\n", __func__);
ret = _FAIL;
goto exit;
- } else {
- DBG_88E("%s: candidate: %s(%pM ch:%u)\n", __func__,
- candidate->network.ssid.ssid, candidate->network.MacAddress,
- candidate->network.Configuration.DSConfig);
}
/* check for situation of _FW_LINKED */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- DBG_88E("%s: _FW_LINKED while ask_for_joinbss!!!\n", __func__);
-
rtw_disassoc_cmd(adapter, 0, true);
rtw_indicate_disconnect(adapter);
rtw_free_assoc_resources_locked(adapter);
@@ -1500,10 +1363,6 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
u8 cur_ant;
rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(cur_ant));
- DBG_88E("#### Opt_Ant_(%s), cur_Ant(%s)\n",
- (candidate->network.PhyInfo.Optimum_antenna == 2) ? "A" : "B",
- (cur_ant == 2) ? "A" : "B"
- );
}
ret = rtw_joinbss_cmd(adapter, candidate);
@@ -1539,9 +1398,6 @@ int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
pcmd->rsp = NULL;
pcmd->rspsz = 0;
INIT_LIST_HEAD(&pcmd->list);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- ("after enqueue set_auth_cmd, auth_mode=%x\n",
- psecuritypriv->dot11AuthAlgrthm));
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
exit:
return res;
@@ -1566,26 +1422,13 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
goto err_free_cmd;
}
- if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psetkeyparm->algorithm = (unsigned char)psecuritypriv->dot118021XGrpPrivacy;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- ("\n %s: psetkeyparm->algorithm=(unsigned char)psecuritypriv->dot118021XGrpPrivacy=%d\n",
- __func__, psetkeyparm->algorithm));
- } else {
+ else
psetkeyparm->algorithm = (u8)psecuritypriv->dot11PrivacyAlgrthm;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- ("\n %s: psetkeyparm->algorithm=(u8)psecuritypriv->dot11PrivacyAlgrthm=%d\n",
- __func__, psetkeyparm->algorithm));
- }
psetkeyparm->keyid = (u8)keyid;/* 0~3 */
psetkeyparm->set_tx = set_tx;
pmlmepriv->key_mask |= BIT(psetkeyparm->keyid);
- DBG_88E("==> %s algorithm(%x), keyid(%x), key_mask(%x)\n",
- __func__, psetkeyparm->algorithm, psetkeyparm->keyid,
- pmlmepriv->key_mask);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- ("\n %s: psetkeyparm->algorithm=%d psetkeyparm->keyid=(u8)keyid=%d\n",
- __func__, psetkeyparm->algorithm, keyid));
switch (psetkeyparm->algorithm) {
case _WEP40_:
@@ -1609,9 +1452,6 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
psetkeyparm->grpkey = 1;
break;
default:
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- ("\n %s:psecuritypriv->dot11PrivacyAlgrthm=%x (must be 1 or 2 or 4 or 5)\n",
- __func__, psecuritypriv->dot11PrivacyAlgrthm));
res = _FAIL;
goto err_free_parm;
}
@@ -1717,11 +1557,6 @@ int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
struct security_priv *psecuritypriv = &adapter->securitypriv;
uint ndisauthmode = psecuritypriv->ndisauthtype;
- uint ndissecuritytype = psecuritypriv->ndisencryptstatus;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
- ("+%s: ndisauthmode=%d ndissecuritytype=%d\n", __func__,
- ndisauthmode, ndissecuritytype));
/* copy fixed ie only */
memcpy(out_ie, in_ie, 12);
@@ -1801,9 +1636,6 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter)
}
pdev_network->Configuration.DSConfig = pregistrypriv->channel;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("pregistrypriv->channel=%d, pdev_network->Configuration.DSConfig=0x%x\n",
- pregistrypriv->channel, pdev_network->Configuration.DSConfig));
if (cur_network->network.InfrastructureMode == Ndis802_11IBSS)
pdev_network->Configuration.ATIMWindow = 0;
@@ -1936,8 +1768,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
if ((!pmlmeinfo->HT_info_enable) || (!pmlmeinfo->HT_caps_enable))
return;
- DBG_88E("+%s()\n", __func__);
-
/* maybe needs check if ap supports rx ampdu. */
if ((!phtpriv->ampdu_enable) && (pregistrypriv->ampdu_enable == 1)) {
if (pregistrypriv->wifi_spec == 1)
@@ -1974,8 +1804,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
/* Config SM Power Save setting */
pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & 0x0C) >> 2;
- if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
- DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
/* Config current HT Protection mode. */
pmlmeinfo->HT_protection = pmlmeinfo->HT_info.infos[1] & 0x3;
@@ -2010,7 +1838,6 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
issued |= (phtpriv->candidate_tid_bitmap >> priority) & 0x1;
if (issued == 0) {
- DBG_88E("%s, p=%d\n", __func__, priority);
psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
rtw_addbareq_cmd(padapter, (u8)priority, pattrib->ra);
}
@@ -2038,9 +1865,6 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
pnetwork = &pmlmepriv->cur_network;
if (pmlmepriv->to_roaming > 0) {
- DBG_88E("roaming from %s(%pM length:%d\n",
- pnetwork->network.ssid.ssid, pnetwork->network.MacAddress,
- pnetwork->network.ssid.ssid_length);
memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.ssid, sizeof(struct ndis_802_11_ssid));
pmlmepriv->assoc_by_bssid = false;
@@ -2050,13 +1874,11 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
if (do_join_r == _SUCCESS)
break;
- DBG_88E("roaming do_join return %d\n", do_join_r);
pmlmepriv->to_roaming--;
if (pmlmepriv->to_roaming > 0) {
continue;
} else {
- DBG_88E("%s(%d) -to roaming fail, indicate_disconnect\n", __func__, __LINE__);
rtw_indicate_disconnect(padapter);
break;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 50d3c3631be0..25653ebfaafd 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -7,6 +7,7 @@
#define _RTW_MLME_EXT_C_
#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include <osdep_service.h>
@@ -148,14 +149,11 @@ struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
struct xmit_buf *pxmitbuf;
pmgntframe = rtw_alloc_xmitframe(pxmitpriv);
- if (!pmgntframe) {
- DBG_88E("%s, alloc xmitframe fail\n", __func__);
+ if (!pmgntframe)
return NULL;
- }
pxmitbuf = rtw_alloc_xmitbuf_ext(pxmitpriv);
if (!pxmitbuf) {
- DBG_88E("%s, alloc xmitbuf fail\n", __func__);
rtw_free_xmitframe(pxmitpriv, pmgntframe);
return NULL;
}
@@ -177,7 +175,6 @@ void update_mgnt_tx_rate(struct adapter *padapter, u8 rate)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
pmlmeext->tx_rate = rate;
- DBG_88E("%s(): rate = %x\n", __func__, rate);
}
void update_mgntframe_attrib(struct adapter *padapter, struct pkt_attrib *pattrib)
@@ -314,13 +311,10 @@ static void issue_beacon(struct adapter *padapter, int timeout_ms)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe) {
- DBG_88E("%s, alloc mgnt frame fail\n", __func__);
+ if (!pmgntframe)
return;
- }
#if defined(CONFIG_88EU_AP_MODE)
spin_lock_bh(&pmlmepriv->bcn_update_lock);
#endif
@@ -338,13 +332,13 @@ static void issue_beacon(struct adapter *padapter, int timeout_ms)
fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
- ether_addr_copy(pwlanhdr->addr1, bc_addr);
+ eth_broadcast_addr(pwlanhdr->addr1);
ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
ether_addr_copy(pwlanhdr->addr3, cur_network->MacAddress);
SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
/* pmlmeext->mgnt_seq++; */
- SetFrameSubType(pframe, WIFI_BEACON);
+ SetFrameSubType(pframe, IEEE80211_STYPE_BEACON);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
@@ -428,14 +422,11 @@ _issue_bcn:
spin_unlock_bh(&pmlmepriv->bcn_update_lock);
#endif
- if ((pattrib->pktlen + TXDESC_SIZE) > 512) {
- DBG_88E("beacon frame too large\n");
+ if ((pattrib->pktlen + TXDESC_SIZE) > 512)
return;
- }
pattrib->last_txcmdsz = pattrib->pktlen;
- /* DBG_88E("issue bcn_sz=%d\n", pattrib->last_txcmdsz); */
if (timeout_ms > 0)
dump_mgntframe_and_wait(padapter, pmgntframe, timeout_ms);
else
@@ -462,10 +453,8 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
unsigned int rate_len;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe) {
- DBG_88E("%s, alloc mgnt frame fail\n", __func__);
+ if (!pmgntframe)
return;
- }
/* update attribute */
pattrib = &pmgntframe->attrib;
@@ -487,7 +476,7 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
- SetFrameSubType(fctrl, WIFI_PROBERSP);
+ SetFrameSubType(fctrl, IEEE80211_STYPE_PROBE_RESP);
pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = pattrib->hdrlen;
@@ -604,9 +593,6 @@ static int issue_probereq(struct adapter *padapter,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
int bssrate_len = 0;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+%s\n", __func__));
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
@@ -632,15 +618,15 @@ static int issue_probereq(struct adapter *padapter,
ether_addr_copy(pwlanhdr->addr3, da);
} else {
/* broadcast probe request frame */
- ether_addr_copy(pwlanhdr->addr1, bc_addr);
- ether_addr_copy(pwlanhdr->addr3, bc_addr);
+ eth_broadcast_addr(pwlanhdr->addr1);
+ eth_broadcast_addr(pwlanhdr->addr3);
}
ether_addr_copy(pwlanhdr->addr2, mac);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_PROBEREQ);
+ SetFrameSubType(pframe, IEEE80211_STYPE_PROBE_REQ);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
@@ -668,9 +654,6 @@ static int issue_probereq(struct adapter *padapter,
pattrib->last_txcmdsz = pattrib->pktlen;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
- ("issuing probe_req, tx_len=%d\n", pattrib->last_txcmdsz));
-
if (wait_ack) {
ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
} else {
@@ -688,7 +671,6 @@ static int issue_probereq_ex(struct adapter *padapter,
{
int ret;
int i = 0;
- unsigned long start = jiffies;
do {
ret = issue_probereq(padapter, pssid, da, wait_ms > 0);
@@ -707,19 +689,6 @@ static int issue_probereq_ex(struct adapter *padapter,
ret = _SUCCESS;
goto exit;
}
-
- if (try_cnt && wait_ms) {
- if (da)
- DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
- FUNC_ADPT_ARG(padapter), da, rtw_get_oper_ch(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- else
- DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
- FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- }
exit:
return ret;
}
@@ -762,7 +731,7 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_AUTH);
+ SetFrameSubType(pframe, IEEE80211_STYPE_AUTH);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
@@ -865,7 +834,6 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
pattrib->last_txcmdsz = pattrib->pktlen;
rtw_wep_encrypt(padapter, pmgntframe);
- DBG_88E("%s\n", __func__);
dump_mgntframe(padapter, pmgntframe);
}
@@ -887,8 +855,6 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
u8 *ie = pnetwork->ies;
__le16 lestatus, leval;
- DBG_88E("%s\n", __func__);
-
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
return;
@@ -912,7 +878,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
- if ((pkt_type == WIFI_ASSOCRSP) || (pkt_type == WIFI_REASSOCRSP))
+ if ((pkt_type == IEEE80211_STYPE_ASSOC_RESP) || (pkt_type == IEEE80211_STYPE_REASSOC_RESP))
SetFrameSubType(pwlanhdr, pkt_type);
else
return;
@@ -1034,7 +1000,7 @@ static void issue_assocreq(struct adapter *padapter)
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ASSOCREQ);
+ SetFrameSubType(pframe, IEEE80211_STYPE_ASSOC_REQ);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
@@ -1066,7 +1032,6 @@ static void issue_assocreq(struct adapter *padapter)
for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
if (pmlmeinfo->network.SupportedRates[i] == 0)
break;
- DBG_88E("network.SupportedRates[%d]=%02X\n", i, pmlmeinfo->network.SupportedRates[i]);
}
for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
@@ -1081,17 +1046,12 @@ static void issue_assocreq(struct adapter *padapter)
break;
}
- if (j == sta_bssrate_len) {
- /* the rate is not supported by STA */
- DBG_88E("%s(): the rate[%d]=%02X is not supported by STA!\n", __func__, i, pmlmeinfo->network.SupportedRates[i]);
- } else {
+ if (j != sta_bssrate_len)
/* the rate is supported by STA */
bssrate[index++] = pmlmeinfo->network.SupportedRates[i];
- }
}
bssrate_len = index;
- DBG_88E("bssrate_len=%d\n", bssrate_len);
if (bssrate_len == 0) {
rtw_free_xmitbuf(pxmitpriv, pmgntframe->pxmitbuf);
@@ -1226,7 +1186,7 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da,
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_DATA_NULL);
+ SetFrameSubType(pframe, IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
@@ -1251,7 +1211,6 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da,
{
int ret;
int i = 0;
- unsigned long start = jiffies;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
@@ -1276,19 +1235,6 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da,
ret = _SUCCESS;
goto exit;
}
-
- if (try_cnt && wait_ms) {
- if (da)
- DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
- FUNC_ADPT_ARG(padapter), da, rtw_get_oper_ch(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- else
- DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
- FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- }
exit:
return ret;
}
@@ -1309,8 +1255,6 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
- DBG_88E("%s\n", __func__);
-
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
goto exit;
@@ -1355,7 +1299,7 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
+ SetFrameSubType(pframe, IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
pframe += sizeof(struct ieee80211_qos_hdr);
pattrib->pktlen = sizeof(struct ieee80211_qos_hdr);
@@ -1380,7 +1324,6 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
{
int ret;
int i = 0;
- unsigned long start = jiffies;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
@@ -1405,19 +1348,6 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
ret = _SUCCESS;
goto exit;
}
-
- if (try_cnt && wait_ms) {
- if (da)
- DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
- FUNC_ADPT_ARG(padapter), da, rtw_get_oper_ch(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- else
- DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
- FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- }
exit:
return ret;
}
@@ -1460,7 +1390,7 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da,
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_DEAUTH);
+ SetFrameSubType(pframe, IEEE80211_STYPE_DEAUTH);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
@@ -1485,7 +1415,6 @@ exit:
int issue_deauth(struct adapter *padapter, unsigned char *da,
unsigned short reason)
{
- DBG_88E("%s to %pM\n", __func__, da);
return _issue_deauth(padapter, da, reason, false);
}
@@ -1495,7 +1424,6 @@ static int issue_deauth_ex(struct adapter *padapter, u8 *da,
{
int ret;
int i = 0;
- unsigned long start = jiffies;
do {
ret = _issue_deauth(padapter, da, reason, wait_ms > 0);
@@ -1513,19 +1441,6 @@ static int issue_deauth_ex(struct adapter *padapter, u8 *da,
ret = _SUCCESS;
goto exit;
}
-
- if (try_cnt && wait_ms) {
- if (da)
- DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
- FUNC_ADPT_ARG(padapter), da, rtw_get_oper_ch(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- else
- DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
- FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- }
exit:
return ret;
}
@@ -1554,8 +1469,6 @@ static void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
struct registry_priv *pregpriv = &padapter->registrypriv;
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
- DBG_88E("%s, category=%d, action=%d, status=%d\n", __func__, category, action, status);
-
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
return;
@@ -1578,7 +1491,7 @@ static void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
+ SetFrameSubType(pframe, IEEE80211_STYPE_ACTION);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
@@ -1608,8 +1521,6 @@ static void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
if (psta) {
start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07] & 0xfff) + 1;
- DBG_88E("BA_starting_seqctrl=%d for TID=%d\n", start_seq, status & 0x07);
-
psta->BA_starting_seqctrl[status & 0x07] = start_seq;
BA_starting_seqctrl = start_seq << 4;
@@ -1708,8 +1619,6 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
if (pmlmeinfo->bwmode_updated)
return;
- DBG_88E("%s\n", __func__);
-
category = RTW_WLAN_CATEGORY_PUBLIC;
action = ACT_PUBLIC_BSSCOEXIST;
@@ -1735,7 +1644,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
+ SetFrameSubType(pframe, IEEE80211_STYPE_ACTION);
pframe += sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
@@ -1760,16 +1669,13 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
spin_lock_bh(&pmlmepriv->scanned_queue.lock);
phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
+ list_for_each(plist, phead) {
uint len;
u8 *p;
struct wlan_bssid_ex *pbss_network;
- pnetwork = container_of(plist, struct wlan_network, list);
-
- plist = plist->next;
+ pnetwork = list_entry(plist, struct wlan_network,
+ list);
pbss_network = &pnetwork->network;
@@ -1833,8 +1739,7 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
if (initiator == 0) { /* recipient */
for (tid = 0; tid < MAXTID; tid++) {
if (psta->recvreorder_ctrl[tid].enable) {
- DBG_88E("rx agg disable tid(%d)\n", tid);
- issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F));
+ issue_action_BA(padapter, addr, WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F));
psta->recvreorder_ctrl[tid].enable = false;
psta->recvreorder_ctrl[tid].indicate_seq = 0xffff;
}
@@ -1842,8 +1747,7 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
} else if (initiator == 1) { /* originator */
for (tid = 0; tid < MAXTID; tid++) {
if (psta->htpriv.agg_enable_bitmap & BIT(tid)) {
- DBG_88E("tx agg disable tid(%d)\n", tid);
- issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F));
+ issue_action_BA(padapter, addr, WLAN_ACTION_DELBA, (((tid << 1) | initiator) & 0x1F));
psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
}
@@ -1858,8 +1762,6 @@ unsigned int send_beacon(struct adapter *padapter)
u8 bxmitok = false;
int issue = 0;
int poll = 0;
- unsigned long start = jiffies;
- u32 passing_time;
rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
do {
@@ -1874,17 +1776,9 @@ unsigned int send_beacon(struct adapter *padapter)
if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
return _FAIL;
- if (!bxmitok) {
- DBG_88E("%s fail! %u ms\n", __func__,
- jiffies_to_msecs(jiffies - start));
+ if (!bxmitok)
return _FAIL;
- }
- passing_time = jiffies_to_msecs(jiffies - start);
- if (passing_time > 100 || issue > 3)
- DBG_88E("%s success, issue:%d, poll:%d, %u ms\n",
- __func__, issue, poll,
- jiffies_to_msecs(jiffies - start));
return _SUCCESS;
}
@@ -2024,15 +1918,15 @@ static u8 collect_bss_info(struct adapter *padapter,
subtype = GetFrameSubType(pframe);
- if (subtype == WIFI_BEACON) {
+ if (subtype == IEEE80211_STYPE_BEACON) {
bssid->Reserved[0] = 1;
ie_offset = _BEACON_IE_OFFSET_;
} else {
/* FIXME : more type */
- if (subtype == WIFI_PROBEREQ) {
+ if (subtype == IEEE80211_STYPE_PROBE_REQ) {
ie_offset = _PROBEREQ_IE_OFFSET_;
bssid->Reserved[0] = 2;
- } else if (subtype == WIFI_PROBERSP) {
+ } else if (subtype == IEEE80211_STYPE_PROBE_RESP) {
ie_offset = _PROBERSP_IE_OFFSET_;
bssid->Reserved[0] = 3;
} else {
@@ -2055,16 +1949,12 @@ static u8 collect_bss_info(struct adapter *padapter,
/* checking SSID */
p = rtw_get_ie(bssid->ies + ie_offset, WLAN_EID_SSID, &len, bssid->ie_length - ie_offset);
- if (!p) {
- DBG_88E("marc: cannot find SSID for survey event\n");
+ if (!p)
return _FAIL;
- }
if (len) {
- if (len > NDIS_802_11_LENGTH_SSID) {
- DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ if (len > NDIS_802_11_LENGTH_SSID)
return _FAIL;
- }
memcpy(bssid->ssid.ssid, (p + 2), len);
bssid->ssid.ssid_length = len;
} else {
@@ -2077,20 +1967,16 @@ static u8 collect_bss_info(struct adapter *padapter,
i = 0;
p = rtw_get_ie(bssid->ies + ie_offset, WLAN_EID_SUPP_RATES, &len, bssid->ie_length - ie_offset);
if (p) {
- if (len > NDIS_802_11_LENGTH_RATES_EX) {
- DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ if (len > NDIS_802_11_LENGTH_RATES_EX)
return _FAIL;
- }
memcpy(bssid->SupportedRates, (p + 2), len);
i = len;
}
p = rtw_get_ie(bssid->ies + ie_offset, WLAN_EID_EXT_SUPP_RATES, &len, bssid->ie_length - ie_offset);
if (p) {
- if (len > (NDIS_802_11_LENGTH_RATES_EX - i)) {
- DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ if (len > (NDIS_802_11_LENGTH_RATES_EX - i))
return _FAIL;
- }
memcpy(bssid->SupportedRates + i, (p + 2), len);
}
@@ -2120,7 +2006,7 @@ static u8 collect_bss_info(struct adapter *padapter,
}
}
- if (subtype == WIFI_PROBEREQ) {
+ if (subtype == IEEE80211_STYPE_PROBE_REQ) {
/* FIXME */
bssid->InfrastructureMode = Ndis802_11Infrastructure;
ether_addr_copy(bssid->MacAddress, GetAddr2Ptr(pframe));
@@ -2204,8 +2090,6 @@ static void start_create_ibss(struct adapter *padapter)
/* issue beacon */
if (send_beacon(padapter) == _FAIL) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("issuing beacon frame fail....\n"));
-
report_join_res(padapter, -1);
pmlmeinfo->state = WIFI_FW_NULL_STATE;
} else {
@@ -2217,7 +2101,6 @@ static void start_create_ibss(struct adapter *padapter)
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
}
} else {
- DBG_88E("%s, invalid cap:%x\n", __func__, caps);
return;
}
}
@@ -2300,7 +2183,6 @@ static void start_clnt_auth(struct adapter *padapter)
/* For the Win8 P2P connection, it will be hard to have a successful connection if this Wi-Fi doesn't connect to it. */
issue_deauth(padapter, (&pmlmeinfo->network)->MacAddress, WLAN_REASON_DEAUTH_LEAVING);
- DBG_88E_LEVEL(_drv_info_, "start auth\n");
issue_auth(padapter, NULL, 0);
set_link_timer(pmlmeext, REAUTH_TO);
@@ -2333,8 +2215,6 @@ static unsigned int receive_disconnect(struct adapter *padapter,
if (memcmp(MacAddr, pnetwork->MacAddress, ETH_ALEN))
return _SUCCESS;
- DBG_88E("%s\n", __func__);
-
if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) {
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
pmlmeinfo->state = WIFI_FW_NULL_STATE;
@@ -2382,9 +2262,6 @@ static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid
memset(country, 0, 4);
memcpy(country, p, 3);
p += 3;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
- ("%s: 802.11d country =%s\n", __func__, country));
-
i = 0;
while ((ie - p) >= 3) {
fcn = *(p++);
@@ -2478,12 +2355,8 @@ static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid
i = 0;
while ((i < MAX_CHANNEL_NUM) && (chplan_new[i].ChannelNum != 0)) {
if (chplan_new[i].ChannelNum == channel) {
- if (chplan_new[i].ScanType == SCAN_PASSIVE) {
+ if (chplan_new[i].ScanType == SCAN_PASSIVE)
chplan_new[i].ScanType = SCAN_ACTIVE;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
- ("%s: change channel %d scan type from passive to active\n",
- __func__, channel));
- }
break;
}
i++;
@@ -2593,7 +2466,6 @@ static unsigned int OnBeacon(struct adapter *padapter,
if (psta) {
ret = rtw_check_bcn_info(padapter, pframe, len);
if (!ret) {
- DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 65535);
return _SUCCESS;
}
@@ -2656,25 +2528,18 @@ static unsigned int OnAuth(struct adapter *padapter,
if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
return _FAIL;
- DBG_88E("+%s\n", __func__);
-
sa = GetAddr2Ptr(pframe);
auth_mode = psecuritypriv->dot11AuthAlgrthm;
seq = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + 2));
algorithm = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN));
- DBG_88E("auth alg=%x, seq=%X\n", algorithm, seq);
-
if (auth_mode == 2 && psecuritypriv->dot11PrivacyAlgrthm != _WEP40_ &&
psecuritypriv->dot11PrivacyAlgrthm != _WEP104_)
auth_mode = 0;
if ((algorithm > 0 && auth_mode == 0) || /* rx a shared-key auth but shared not enabled */
(algorithm == 0 && auth_mode == 1)) { /* rx a open-system auth but shared-key is enabled */
- DBG_88E("auth rejected due to bad alg [alg=%d, auth_mib=%d] %02X%02X%02X%02X%02X%02X\n",
- algorithm, auth_mode, sa[0], sa[1], sa[2], sa[3], sa[4], sa[5]);
-
status = WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
goto auth_fail;
@@ -2688,10 +2553,8 @@ static unsigned int OnAuth(struct adapter *padapter,
pstat = rtw_get_stainfo(pstapriv, sa);
if (!pstat) {
/* allocate a new one */
- DBG_88E("going to alloc stainfo for sa=%pM\n", sa);
pstat = rtw_alloc_stainfo(pstapriv, sa);
if (!pstat) {
- DBG_88E(" Exceed the upper limit of supported clients...\n");
status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
goto auth_fail;
}
@@ -2722,8 +2585,6 @@ static unsigned int OnAuth(struct adapter *padapter,
pstat->expire_to = pstapriv->auth_to;
if ((pstat->auth_seq + 1) != seq) {
- DBG_88E("(1)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
- seq, pstat->auth_seq + 1);
status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
goto auth_fail;
}
@@ -2735,8 +2596,6 @@ static unsigned int OnAuth(struct adapter *padapter,
pstat->expire_to = pstapriv->assoc_to;
pstat->authalg = algorithm;
} else {
- DBG_88E("(2)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
- seq, pstat->auth_seq + 1);
status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
goto auth_fail;
}
@@ -2750,13 +2609,10 @@ static unsigned int OnAuth(struct adapter *padapter,
pstat->auth_seq = 2;
} else if (seq == 3) {
/* checking for challenging txt... */
- DBG_88E("checking for challenging txt...\n");
-
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, WLAN_EID_CHALLENGE, &ie_len,
len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
if (!p || ie_len <= 0) {
- DBG_88E("auth rejected because challenge failure!(1)\n");
status = WLAN_STATUS_CHALLENGE_FAIL;
goto auth_fail;
}
@@ -2767,13 +2623,10 @@ static unsigned int OnAuth(struct adapter *padapter,
/* challenging txt is correct... */
pstat->expire_to = pstapriv->assoc_to;
} else {
- DBG_88E("auth rejected because challenge failure!\n");
status = WLAN_STATUS_CHALLENGE_FAIL;
goto auth_fail;
}
} else {
- DBG_88E("(3)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
- seq, pstat->auth_seq + 1);
status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
goto auth_fail;
}
@@ -2816,8 +2669,6 @@ static unsigned int OnAuthClient(struct adapter *padapter,
u8 *pframe = precv_frame->pkt->data;
uint pkt_len = precv_frame->pkt->len;
- DBG_88E("%s\n", __func__);
-
/* check A1 matches or not */
if (memcmp(myid(&padapter->eeprompriv), ieee80211_get_DA((struct ieee80211_hdr *)pframe), ETH_ALEN))
return _SUCCESS;
@@ -2831,7 +2682,6 @@ static unsigned int OnAuthClient(struct adapter *padapter,
status = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + offset + 4));
if (status != 0) {
- DBG_88E("clnt auth fail, status: %d\n", status);
if (status == 13) { /* pmlmeinfo->auth_algo == dot11AuthAlgrthm_Auto) */
if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
@@ -2872,7 +2722,6 @@ static unsigned int OnAuthClient(struct adapter *padapter,
}
if (go2asoc) {
- DBG_88E_LEVEL(_drv_info_, "auth success, start assoc\n");
start_clnt_assoc(padapter);
return _SUCCESS;
}
@@ -2887,7 +2736,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
u16 capab_info;
struct rtw_ieee802_11_elems elems;
struct sta_info *pstat;
- unsigned char reassoc, *p, *pos, *wpa_ie;
+ unsigned char *p, *pos, *wpa_ie;
unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
int i, wpa_ie_len, left;
unsigned char supportRate[16];
@@ -2907,19 +2756,13 @@ static unsigned int OnAssocReq(struct adapter *padapter,
return _FAIL;
frame_type = GetFrameSubType(pframe);
- if (frame_type == WIFI_ASSOCREQ) {
- reassoc = 0;
+ if (frame_type == IEEE80211_STYPE_ASSOC_REQ)
ie_offset = _ASOCREQ_IE_OFFSET_;
- } else { /* WIFI_REASSOCREQ */
- reassoc = 1;
+ else /* IEEE80211_STYPE_REASSOC_REQ */
ie_offset = _REASOCREQ_IE_OFFSET_;
- }
- if (pkt_len < IEEE80211_3ADDR_LEN + ie_offset) {
- DBG_88E("handle_assoc(reassoc=%d) - too short payload (len=%lu)"
- "\n", reassoc, (unsigned long)pkt_len);
+ if (pkt_len < IEEE80211_3ADDR_LEN + ie_offset)
return _FAIL;
- }
pstat = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (!pstat) {
@@ -2932,8 +2775,6 @@ static unsigned int OnAssocReq(struct adapter *padapter,
left = pkt_len - (IEEE80211_3ADDR_LEN + ie_offset);
pos = pframe + (IEEE80211_3ADDR_LEN + ie_offset);
- DBG_88E("%s\n", __func__);
-
/* check if this stat has been successfully authenticated/assocated */
if (!((pstat->state) & WIFI_FW_AUTH_SUCCESS)) {
if (!((pstat->state) & WIFI_FW_ASSOC_SUCCESS)) {
@@ -2951,8 +2792,6 @@ static unsigned int OnAssocReq(struct adapter *padapter,
/* now parse all ieee802_11 ie to point to elems */
if (rtw_ieee802_11_parse_elems(pos, left, &elems, 1) == ParseFailed ||
!elems.ssid) {
- DBG_88E("STA %pM sent invalid association request\n",
- pstat->hwaddr);
status = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto OnAssocReqFail;
}
@@ -2981,7 +2820,6 @@ static unsigned int OnAssocReq(struct adapter *padapter,
/* check if the supported rate is ok */
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, WLAN_EID_SUPP_RATES, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
if (!p) {
- DBG_88E("Rx a sta assoc-req which supported rate is empty!\n");
/* use our own rate set as statoin used */
/* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
/* supportRateNum = AP_BSSRATE_LEN; */
@@ -3072,17 +2910,11 @@ static unsigned int OnAssocReq(struct adapter *padapter,
pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
if (!wpa_ie) {
if (elems.wps_ie) {
- DBG_88E("STA included WPS IE in "
- "(Re)Association Request - assume WPS is "
- "used\n");
pstat->flags |= WLAN_STA_WPS;
/* wpabuf_free(sta->wps_ie); */
/* sta->wps_ie = wpabuf_alloc_copy(elems.wps_ie + 4, */
/* elems.wps_ie_len - 4); */
} else {
- DBG_88E("STA did not include WPA/RSN IE "
- "in (Re)Association Request - possible WPS "
- "use\n");
pstat->flags |= WLAN_STA_MAYBE_WPS;
}
@@ -3095,8 +2927,6 @@ static unsigned int OnAssocReq(struct adapter *padapter,
rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR, &selected_registrar, NULL);
if (!selected_registrar) {
- DBG_88E("selected_registrar is false , or AP is not ready to do WPS\n");
-
status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
goto OnAssocReqFail;
@@ -3107,18 +2937,12 @@ static unsigned int OnAssocReq(struct adapter *padapter,
int copy_len;
if (psecuritypriv->wpa_psk == 0) {
- DBG_88E("STA %pM: WPA/RSN IE in association "
- "request, but AP don't support WPA/RSN\n", pstat->hwaddr);
-
status = WLAN_STATUS_INVALID_IE;
goto OnAssocReqFail;
}
if (elems.wps_ie) {
- DBG_88E("STA included WPS IE in "
- "(Re)Association Request - WPS is "
- "used\n");
pstat->flags |= WLAN_STA_WPS;
copy_len = 0;
} else {
@@ -3202,16 +3026,6 @@ static unsigned int OnAssocReq(struct adapter *padapter,
goto OnAssocReqFail;
}
- if ((pstat->flags & WLAN_STA_HT) &&
- ((pstat->wpa2_pairwise_cipher & WPA_CIPHER_TKIP) ||
- (pstat->wpa_pairwise_cipher & WPA_CIPHER_TKIP))) {
- DBG_88E("HT: %pM tried to "
- "use TKIP with HT association\n", pstat->hwaddr);
-
- /* status = WLAN_STATUS_CIPHER_REJECTED_PER_POLICY; */
- /* goto OnAssocReqFail; */
- }
-
pstat->flags |= WLAN_STA_NONERP;
for (i = 0; i < pstat->bssratelen; i++) {
if ((pstat->bssrateset[i] & 0x7f) > 22) {
@@ -3235,9 +3049,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
/* Customer proprietary IE */
/* get a unique AID */
- if (pstat->aid > 0) {
- DBG_88E(" old AID %d\n", pstat->aid);
- } else {
+ if (pstat->aid <= 0) {
for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
if (!pstapriv->sta_aid[pstat->aid - 1])
break;
@@ -3246,14 +3058,11 @@ static unsigned int OnAssocReq(struct adapter *padapter,
if (pstat->aid > pstapriv->max_num_sta) {
pstat->aid = 0;
- DBG_88E(" no room for more AIDs\n");
-
status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
goto OnAssocReqFail;
} else {
pstapriv->sta_aid[pstat->aid - 1] = pstat;
- DBG_88E("allocate new AID=(%d)\n", pstat->aid);
}
}
@@ -3282,13 +3091,12 @@ static unsigned int OnAssocReq(struct adapter *padapter,
sta_info_update(padapter, pstat);
/* issue assoc rsp before notify station join event. */
- if (frame_type == WIFI_ASSOCREQ)
- issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
+ if (frame_type == IEEE80211_STYPE_ASSOC_REQ)
+ issue_asocrsp(padapter, status, pstat, IEEE80211_STYPE_ASSOC_RESP);
else
- issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
+ issue_asocrsp(padapter, status, pstat, IEEE80211_STYPE_REASSOC_RESP);
/* 2 - report to upper layer */
- DBG_88E("indicate_sta_join_event to upper layer - hostapd\n");
rtw_indicate_sta_assoc_event(padapter, pstat);
/* 3-(1) report sta add event */
@@ -3306,10 +3114,10 @@ asoc_class2_error:
OnAssocReqFail:
pstat->aid = 0;
- if (frame_type == WIFI_ASSOCREQ)
- issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
+ if (frame_type == IEEE80211_STYPE_ASSOC_REQ)
+ issue_asocrsp(padapter, status, pstat, IEEE80211_STYPE_ASSOC_RESP);
else
- issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
+ issue_asocrsp(padapter, status, pstat, IEEE80211_STYPE_REASSOC_RESP);
#endif /* CONFIG_88EU_AP_MODE */
@@ -3329,8 +3137,6 @@ static unsigned int OnAssocRsp(struct adapter *padapter,
u8 *pframe = precv_frame->pkt->data;
uint pkt_len = precv_frame->pkt->len;
- DBG_88E("%s\n", __func__);
-
/* check A1 matches or not */
if (memcmp(myid(&padapter->eeprompriv), ieee80211_get_DA((struct ieee80211_hdr *)pframe), ETH_ALEN))
return _SUCCESS;
@@ -3346,7 +3152,6 @@ static unsigned int OnAssocRsp(struct adapter *padapter,
/* status */
status = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 2));
if (status > 0) {
- DBG_88E("assoc reject, status code: %d\n", status);
pmlmeinfo->state = WIFI_FW_NULL_STATE;
res = -4;
goto report_assoc_result;
@@ -3381,6 +3186,7 @@ static unsigned int OnAssocRsp(struct adapter *padapter,
break;
case WLAN_EID_ERP_INFO:
ERP_IE_handler(padapter, pIE);
+ break;
default:
break;
}
@@ -3420,16 +3226,11 @@ static unsigned int OnDeAuth(struct adapter *padapter,
reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
- DBG_88E("%s Reason code(%d)\n", __func__, reason);
-
#ifdef CONFIG_88EU_AP_MODE
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
- DBG_88E_LEVEL(_drv_always_, "ap recv deauth reason code(%d) sta:%pM\n",
- reason, GetAddr2Ptr(pframe));
-
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (psta) {
u8 updated = 0;
@@ -3448,9 +3249,6 @@ static unsigned int OnDeAuth(struct adapter *padapter,
return _SUCCESS;
}
#endif
- DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
- reason, GetAddr3Ptr(pframe));
-
receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
@@ -3473,16 +3271,11 @@ static unsigned int OnDisassoc(struct adapter *padapter,
reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
- DBG_88E("%s Reason code(%d)\n", __func__, reason);
-
#ifdef CONFIG_88EU_AP_MODE
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
- DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
- reason, GetAddr2Ptr(pframe));
-
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (psta) {
u8 updated = 0;
@@ -3501,9 +3294,6 @@ static unsigned int OnDisassoc(struct adapter *padapter,
return _SUCCESS;
}
#endif
- DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
- reason, GetAddr3Ptr(pframe));
-
receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
@@ -3513,7 +3303,6 @@ static unsigned int OnDisassoc(struct adapter *padapter,
static unsigned int OnAtim(struct adapter *padapter,
struct recv_frame *precv_frame)
{
- DBG_88E("%s\n", __func__);
return _SUCCESS;
}
@@ -3527,8 +3316,6 @@ static unsigned int on_action_spct(struct adapter *padapter,
u8 category;
u8 action;
- DBG_88E(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(padapter->pnetdev));
-
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (!psta)
@@ -3575,7 +3362,7 @@ static unsigned int OnAction_back(struct adapter *padapter,
struct recv_reorder_ctrl *preorder_ctrl;
unsigned char *frame_body;
unsigned char category, action;
- unsigned short tid, status, reason_code = 0;
+ unsigned short tid, status;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u8 *pframe = precv_frame->pkt->data;
@@ -3586,8 +3373,6 @@ static unsigned int OnAction_back(struct adapter *padapter,
ETH_ALEN))/* for if1, sta/ap mode */
return _SUCCESS;
- DBG_88E("%s\n", __func__);
-
if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
return _SUCCESS;
@@ -3605,40 +3390,36 @@ static unsigned int OnAction_back(struct adapter *padapter,
if (!pmlmeinfo->HT_enable)
return _SUCCESS;
action = frame_body[1];
- DBG_88E("%s, action=%d\n", __func__, action);
switch (action) {
- case RTW_WLAN_ACTION_ADDBA_REQ: /* ADDBA request */
+ case WLAN_ACTION_ADDBA_REQ:
memcpy(&pmlmeinfo->ADDBA_req, &frame_body[2], sizeof(struct ADDBA_request));
process_addba_req(padapter, (u8 *)&pmlmeinfo->ADDBA_req, addr);
/* 37 = reject ADDBA Req */
issue_action_BA(padapter, addr,
- RTW_WLAN_ACTION_ADDBA_RESP,
+ WLAN_ACTION_ADDBA_RESP,
pmlmeinfo->accept_addba_req ? 0 : 37);
break;
- case RTW_WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
+ case WLAN_ACTION_ADDBA_RESP:
status = get_unaligned_le16(&frame_body[3]);
tid = (frame_body[5] >> 2) & 0x7;
if (status == 0) { /* successful */
- DBG_88E("agg_enable for TID=%d\n", tid);
psta->htpriv.agg_enable_bitmap |= 1 << tid;
psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
} else {
psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
}
break;
- case RTW_WLAN_ACTION_DELBA: /* DELBA */
+ case WLAN_ACTION_DELBA:
if ((frame_body[3] & BIT(3)) == 0) {
psta->htpriv.agg_enable_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
psta->htpriv.candidate_tid_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
- reason_code = get_unaligned_le16(&frame_body[4]);
} else if ((frame_body[3] & BIT(3)) == BIT(3)) {
tid = (frame_body[3] >> 4) & 0x0F;
preorder_ctrl = &psta->recvreorder_ctrl[tid];
preorder_ctrl->enable = false;
preorder_ctrl->indicate_seq = 0xffff;
}
- DBG_88E("%s(): DELBA: %x(%x)\n", __func__, pmlmeinfo->agg_enable_bitmap, reason_code);
/* todo: how to notify the host while receiving DELETE BA */
break;
default:
@@ -3658,17 +3439,11 @@ static s32 rtw_action_public_decache(struct recv_frame *recv_frame, s32 token)
if (GetRetry(frame)) {
if (token >= 0) {
- if ((seq_ctrl == mlmeext->action_public_rxseq) && (token == mlmeext->action_public_dialog_token)) {
- DBG_88E(FUNC_ADPT_FMT" seq_ctrl = 0x%x, rxseq = 0x%x, token:%d\n",
- FUNC_ADPT_ARG(adapter), seq_ctrl, mlmeext->action_public_rxseq, token);
+ if ((seq_ctrl == mlmeext->action_public_rxseq) && (token == mlmeext->action_public_dialog_token))
return _FAIL;
- }
} else {
- if (seq_ctrl == mlmeext->action_public_rxseq) {
- DBG_88E(FUNC_ADPT_FMT" seq_ctrl = 0x%x, rxseq = 0x%x\n",
- FUNC_ADPT_ARG(adapter), seq_ctrl, mlmeext->action_public_rxseq);
+ if (seq_ctrl == mlmeext->action_public_rxseq)
return _FAIL;
- }
}
}
@@ -3822,20 +3597,20 @@ Following are the initialization functions for WiFi MLME
*****************************************************************************/
static struct mlme_handler mlme_sta_tbl[] = {
- {WIFI_ASSOCREQ, "OnAssocReq", &OnAssocReq},
- {WIFI_ASSOCRSP, "OnAssocRsp", &OnAssocRsp},
- {WIFI_REASSOCREQ, "OnReAssocReq", &OnAssocReq},
- {WIFI_REASSOCRSP, "OnReAssocRsp", &OnAssocRsp},
- {WIFI_PROBEREQ, "OnProbeReq", &OnProbeReq},
- {WIFI_PROBERSP, "OnProbeRsp", &OnProbeRsp},
- {0, "DoReserved", &DoReserved},
- {0, "DoReserved", &DoReserved},
- {WIFI_BEACON, "OnBeacon", &OnBeacon},
- {WIFI_ATIM, "OnATIM", &OnAtim},
- {WIFI_DISASSOC, "OnDisassoc", &OnDisassoc},
- {WIFI_AUTH, "OnAuth", &OnAuthClient},
- {WIFI_DEAUTH, "OnDeAuth", &OnDeAuth},
- {WIFI_ACTION, "OnAction", &OnAction},
+ {IEEE80211_STYPE_ASSOC_REQ, "OnAssocReq", &OnAssocReq},
+ {IEEE80211_STYPE_ASSOC_RESP, "OnAssocRsp", &OnAssocRsp},
+ {IEEE80211_STYPE_REASSOC_REQ, "OnReAssocReq", &OnAssocReq},
+ {IEEE80211_STYPE_REASSOC_RESP, "OnReAssocRsp", &OnAssocRsp},
+ {IEEE80211_STYPE_PROBE_REQ, "OnProbeReq", &OnProbeReq},
+ {IEEE80211_STYPE_PROBE_RESP, "OnProbeRsp", &OnProbeRsp},
+ {0, "DoReserved", &DoReserved},
+ {0, "DoReserved", &DoReserved},
+ {IEEE80211_STYPE_BEACON, "OnBeacon", &OnBeacon},
+ {IEEE80211_STYPE_ATIM, "OnATIM", &OnAtim},
+ {IEEE80211_STYPE_DISASSOC, "OnDisassoc", &OnDisassoc},
+ {IEEE80211_STYPE_AUTH, "OnAuth", &OnAuthClient},
+ {IEEE80211_STYPE_DEAUTH, "OnDeAuth", &OnDeAuth},
+ {IEEE80211_STYPE_ACTION, "OnAction", &OnAction},
};
int init_hw_mlme_ext(struct adapter *padapter)
@@ -3971,10 +3746,8 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan,
memset(channel_set, 0, sizeof(struct rt_channel_info) * MAX_CHANNEL_NUM);
- if (ChannelPlan >= RT_CHANNEL_DOMAIN_MAX && ChannelPlan != RT_CHANNEL_DOMAIN_REALTEK_DEFINE) {
- DBG_88E("ChannelPlan ID %x error !!!!!\n", ChannelPlan);
+ if (ChannelPlan >= RT_CHANNEL_DOMAIN_MAX && ChannelPlan != RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
return chanset_size;
- }
if (padapter->registrypriv.wireless_mode & WIRELESS_11G) {
b2_4GBand = true;
@@ -4017,8 +3790,6 @@ int init_mlme_ext_priv(struct adapter *padapter)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- pmlmeext->padapter = padapter;
-
init_mlme_ext_priv_value(padapter);
pmlmeinfo->accept_addba_req = pregistrypriv->accept_addba_req;
@@ -4041,10 +3812,7 @@ int init_mlme_ext_priv(struct adapter *padapter)
void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
{
- struct adapter *padapter = pmlmeext->padapter;
-
- if (!padapter)
- return;
+ struct adapter *padapter = container_of(pmlmeext, struct adapter, mlmeextpriv);
if (padapter->bDriverStopped) {
del_timer_sync(&pmlmeext->survey_timer);
@@ -4056,13 +3824,12 @@ static void _mgt_dispatcher(struct adapter *padapter,
struct mlme_handler *ptable,
struct recv_frame *precv_frame)
{
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 *pframe = precv_frame->pkt->data;
if (ptable->func) {
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ !is_broadcast_ether_addr(GetAddr1Ptr(pframe)))
return;
ptable->func(padapter, precv_frame);
}
@@ -4075,63 +3842,48 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
#ifdef CONFIG_88EU_AP_MODE
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
#endif
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 *pframe = precv_frame->pkt->data;
struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("+%s: type(0x%x) subtype(0x%x)\n", __func__,
- (unsigned int)GetFrameType(pframe),
- (unsigned int)GetFrameSubType(pframe)));
-
- if (GetFrameType(pframe) != WIFI_MGT_TYPE) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- ("%s: type(0x%x) error!\n", __func__,
- (unsigned int)GetFrameType(pframe)));
+ if (GetFrameType(pframe) != WIFI_MGT_TYPE)
return;
- }
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ !is_broadcast_ether_addr(GetAddr1Ptr(pframe)))
return;
ptable = mlme_sta_tbl;
index = GetFrameSubType(pframe) >> 4;
- if (index > 13) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Currently we do not support reserved sub-fr-type=%d\n", index));
+ if (index > 13)
return;
- }
ptable += index;
if (psta) {
if (GetRetry(pframe)) {
if (precv_frame->attrib.seq_num ==
- psta->RxMgmtFrameSeqNum) {
+ psta->RxMgmtFrameSeqNum)
/* drop the duplicate management frame */
- DBG_88E("Drop duplicate management frame with seq_num=%d.\n",
- precv_frame->attrib.seq_num);
return;
- }
}
psta->RxMgmtFrameSeqNum = precv_frame->attrib.seq_num;
}
#ifdef CONFIG_88EU_AP_MODE
switch (GetFrameSubType(pframe)) {
- case WIFI_AUTH:
+ case IEEE80211_STYPE_AUTH:
if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
ptable->func = &OnAuth;
else
ptable->func = &OnAuthClient;
fallthrough;
- case WIFI_ASSOCREQ:
- case WIFI_REASSOCREQ:
- case WIFI_PROBEREQ:
- case WIFI_BEACON:
- case WIFI_ACTION:
+ case IEEE80211_STYPE_ASSOC_REQ:
+ case IEEE80211_STYPE_REASSOC_REQ:
+ case IEEE80211_STYPE_PROBE_REQ:
+ case IEEE80211_STYPE_BEACON:
+ case IEEE80211_STYPE_ACTION:
_mgt_dispatcher(padapter, ptable, precv_frame);
break;
default:
@@ -4244,8 +3996,6 @@ void report_surveydone_event(struct adapter *padapter)
psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
- DBG_88E("survey done event(%x)\n", psurveydone_evt->bss_cnt);
-
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
}
@@ -4290,8 +4040,6 @@ void report_join_res(struct adapter *padapter, int res)
pjoinbss_evt->network.join_res = res;
pjoinbss_evt->network.aid = res;
- DBG_88E("%s(%d)\n", __func__, res);
-
rtw_joinbss_event_prehandle(padapter, (u8 *)&pjoinbss_evt->network);
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
@@ -4347,8 +4095,6 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr,
pdel_sta_evt->mac_id = mac_id;
- DBG_88E("%s: delete STA, mac_id =%d\n", __func__, mac_id);
-
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
}
@@ -4392,8 +4138,6 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr,
ether_addr_copy((unsigned char *)(&padd_sta_evt->macaddr), MacAddr);
padd_sta_evt->cam_id = cam_idx;
- DBG_88E("%s: add STA\n", __func__);
-
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
}
@@ -4462,7 +4206,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
- goto exit_mlmeext_joinbss_event_callback;
+ return;
}
if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
@@ -4518,10 +4262,6 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
correct_TSF(padapter, pmlmeext);
}
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_CONNECT, 0);
-
-exit_mlmeext_joinbss_event_callback:
-
- DBG_88E("=>%s\n", __func__);
}
void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *psta)
@@ -4530,8 +4270,6 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u8 join_type;
- DBG_88E("%s\n", __func__);
-
if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {/* adhoc master or sta_count>1 */
/* nothing to do */
@@ -4683,8 +4421,6 @@ void linked_status_chk(struct adapter *padapter)
if (rx_chk == _FAIL) {
pmlmeext->retry++;
if (pmlmeext->retry > rx_chk_limit) {
- DBG_88E_LEVEL(_drv_always_, FUNC_ADPT_FMT" disconnect or roaming\n",
- FUNC_ADPT_ARG(padapter));
receive_disconnect(padapter, pmlmeinfo->network.MacAddress,
WLAN_REASON_EXPIRATION_CHK);
return;
@@ -4744,8 +4480,6 @@ void survey_timer_hdl(struct timer_list *t)
if (pmlmeext->scan_abort) {
pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
- DBG_88E("%s idx:%d\n", __func__
- , pmlmeext->sitesurvey_res.channel_idx);
pmlmeext->scan_abort = false;/* reset */
}
@@ -4776,7 +4510,6 @@ void link_timer_hdl(struct timer_list *t)
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
- DBG_88E("%s:no beacon while connecting\n", __func__);
pmlmeinfo->state = WIFI_FW_NULL_STATE;
report_join_res(padapter, -3);
} else if (pmlmeinfo->state & WIFI_FW_AUTH_STATE) {
@@ -4787,7 +4520,6 @@ void link_timer_hdl(struct timer_list *t)
return;
}
- DBG_88E("%s: auth timeout and try again\n", __func__);
pmlmeinfo->auth_seq = 1;
issue_auth(padapter, NULL, 0);
set_link_timer(pmlmeext, REAUTH_TO);
@@ -4799,7 +4531,6 @@ void link_timer_hdl(struct timer_list *t)
return;
}
- DBG_88E("%s: assoc timeout and try again\n", __func__);
issue_assocreq(padapter);
set_link_timer(pmlmeext, REASSOC_TO);
}
@@ -4991,9 +4722,8 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
default:
pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
break;
- }
+ }
- DBG_88E("set ch/bw before connected\n");
}
}
break;
@@ -5204,8 +4934,6 @@ u8 setkey_hdl(struct adapter *padapter, u8 *pbuf)
/* write cam */
ctrl = BIT(15) | ((pparm->algorithm) << 2) | pparm->keyid;
- DBG_88E_LEVEL(_drv_info_, "set group key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) "
- "keyid:%d\n", pparm->algorithm, pparm->keyid);
write_cam(padapter, pparm->keyid, ctrl, null_sta, pparm->key);
return H2C_SUCCESS;
@@ -5234,8 +4962,6 @@ u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
cam_id = 4;
- DBG_88E_LEVEL(_drv_info_, "set pairwise key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) camid:%d\n",
- pparm->algorithm, cam_id);
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -5249,24 +4975,16 @@ u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
if (psta) {
ctrl = BIT(15) | ((pparm->algorithm) << 2);
- DBG_88E("r871x_set_stakey_hdl(): enc_algorithm=%d\n", pparm->algorithm);
-
- if ((psta->mac_id < 1) || (psta->mac_id > (NUM_STA - 4))) {
- DBG_88E("r871x_set_stakey_hdl():set_stakey failed, mac_id(aid)=%d\n", psta->mac_id);
+ if ((psta->mac_id < 1) || (psta->mac_id > (NUM_STA - 4)))
return H2C_REJECTED;
- }
cam_id = psta->mac_id + 3;/* 0~3 for default key, cmd_id = macid + 3, macid = aid+1; */
- DBG_88E("Write CAM, mac_addr =%pM, cam_entry=%d\n",
- pparm->addr, cam_id);
-
write_cam(padapter, cam_id, ctrl, pparm->addr, pparm->key);
return H2C_SUCCESS_RSP;
}
- DBG_88E("r871x_set_stakey_hdl(): sta has been free\n");
return H2C_REJECTED;
}
@@ -5294,7 +5012,7 @@ u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf)
if (((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && (pmlmeinfo->HT_enable)) ||
((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) {
- issue_action_BA(padapter, pparm->addr, RTW_WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid);
+ issue_action_BA(padapter, pparm->addr, WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid);
mod_timer(&psta->addba_retry_timer,
jiffies + msecs_to_jiffies(ADDBA_TO));
} else {
@@ -5352,19 +5070,13 @@ u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
evt_code = (u8)((*peventbuf >> 16) & 0xff);
/* checking if event code is valid */
- if (evt_code >= MAX_C2HEVT) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nEvent Code(%d) mismatch!\n", evt_code));
+ if (evt_code >= MAX_C2HEVT)
goto _abort_event_;
- }
/* checking if event size match the event parm size */
if ((wlanevents[evt_code].parmsize != 0) &&
- (wlanevents[evt_code].parmsize != evt_sz)) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- ("\nEvent(%d) Parm Size mismatch (%d vs %d)!\n",
- evt_code, wlanevents[evt_code].parmsize, evt_sz));
+ (wlanevents[evt_code].parmsize != evt_sz))
goto _abort_event_;
- }
peventbuf += 2;
@@ -5379,15 +5091,13 @@ _abort_event_:
u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
{
- if (send_beacon(padapter) == _FAIL) {
- DBG_88E("issue_beacon, fail!\n");
+ if (send_beacon(padapter) == _FAIL)
return H2C_PARAMETERS_ERROR;
- }
#ifdef CONFIG_88EU_AP_MODE
else { /* tx bc/mc frames after update TIM */
struct sta_info *psta_bmc;
- struct list_head *xmitframe_plist, *xmitframe_phead;
- struct xmit_frame *pxmitframe = NULL;
+ struct list_head *xmitframe_phead;
+ struct xmit_frame *pxmitframe, *n;
struct sta_priv *pstapriv = &padapter->stapriv;
/* for BC/MC Frames */
@@ -5400,13 +5110,8 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
spin_lock_bh(&psta_bmc->sleep_q.lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
- xmitframe_plist = xmitframe_phead->next;
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = xmitframe_plist->next;
-
+ list_for_each_entry_safe(pxmitframe, n, xmitframe_phead,
+ list) {
list_del_init(&pxmitframe->list);
psta_bmc->sleepq_len--;
@@ -5441,10 +5146,6 @@ u8 set_ch_hdl(struct adapter *padapter, u8 *pbuf)
set_ch_parm = (struct set_ch_parm *)pbuf;
- DBG_88E(FUNC_NDEV_FMT" ch:%u, bw:%u, ch_offset:%u\n",
- FUNC_NDEV_ARG(padapter->pnetdev),
- set_ch_parm->ch, set_ch_parm->bw, set_ch_parm->ch_offset);
-
pmlmeext->cur_channel = set_ch_parm->ch;
pmlmeext->cur_ch_offset = set_ch_parm->ch_offset;
pmlmeext->cur_bwmode = set_ch_parm->bw;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 4d507d9faec2..cbb34b920ab9 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -18,17 +18,12 @@ static int rtw_hw_suspend(struct adapter *padapter)
struct net_device *pnetdev = padapter->pnetdev;
if ((!padapter->bup) || (padapter->bDriverStopped) ||
- (padapter->bSurpriseRemoved)) {
- DBG_88E("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
- padapter->bup, padapter->bDriverStopped,
- padapter->bSurpriseRemoved);
+ (padapter->bSurpriseRemoved))
goto error_exit;
- }
/* system suspend */
LeaveAllPowerSaveMode(padapter);
- DBG_88E("==> %s\n", __func__);
mutex_lock(&pwrpriv->mutex_lock);
pwrpriv->bips_processing = true;
/* s1. */
@@ -69,7 +64,6 @@ static int rtw_hw_suspend(struct adapter *padapter)
return 0;
error_exit:
- DBG_88E("%s, failed\n", __func__);
return -1;
}
@@ -79,7 +73,6 @@ static int rtw_hw_resume(struct adapter *padapter)
struct net_device *pnetdev = padapter->pnetdev;
/* system resume */
- DBG_88E("==> %s\n", __func__);
mutex_lock(&pwrpriv->mutex_lock);
pwrpriv->bips_processing = true;
rtw_reset_drv_sw(padapter);
@@ -107,7 +100,6 @@ static int rtw_hw_resume(struct adapter *padapter)
return 0;
error_exit:
- DBG_88E("%s, Open net dev failed\n", __func__);
return -1;
}
@@ -120,12 +112,8 @@ void ips_enter(struct adapter *padapter)
return;
if (pxmit_priv->free_xmitbuf_cnt != NR_XMITBUFF ||
- pxmit_priv->free_xmit_extbuf_cnt != NR_XMIT_EXTBUFF) {
- DBG_88E_LEVEL(_drv_info_, "There are some pkts to transmit\n");
- DBG_88E_LEVEL(_drv_info_, "free_xmitbuf_cnt: %d, free_xmit_extbuf_cnt: %d\n",
- pxmit_priv->free_xmitbuf_cnt, pxmit_priv->free_xmit_extbuf_cnt);
+ pxmit_priv->free_xmit_extbuf_cnt != NR_XMIT_EXTBUFF)
return;
- }
mutex_lock(&pwrpriv->mutex_lock);
@@ -135,10 +123,8 @@ void ips_enter(struct adapter *padapter)
pwrpriv->ips_mode = pwrpriv->ips_mode_req;
pwrpriv->ips_enter_cnts++;
- DBG_88E("==>%s:%d\n", __func__, pwrpriv->ips_enter_cnts);
if (rf_off == pwrpriv->change_rfpwrstate) {
pwrpriv->bpower_saving = true;
- DBG_88E_LEVEL(_drv_info_, "nolinked power save enter\n");
if (pwrpriv->ips_mode == IPS_LEVEL_2)
pwrpriv->bkeepfwalive = true;
@@ -165,16 +151,12 @@ int ips_leave(struct adapter *padapter)
pwrpriv->bips_processing = true;
pwrpriv->change_rfpwrstate = rf_on;
pwrpriv->ips_leave_cnts++;
- DBG_88E("==>%s:%d\n", __func__, pwrpriv->ips_leave_cnts);
result = rtw_ips_pwr_up(padapter);
if (result == _SUCCESS)
pwrpriv->rf_pwrstate = rf_on;
- DBG_88E_LEVEL(_drv_info_, "nolinked power save leave\n");
-
if ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_)) {
- DBG_88E("==>%s, channel(%d), processing(%x)\n", __func__, padapter->mlmeextpriv.cur_channel, pwrpriv->bips_processing);
set_channel_bwmode(padapter, padapter->mlmeextpriv.cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
for (keyid = 0; keyid < 4; keyid++) {
if (pmlmepriv->key_mask & BIT(keyid)) {
@@ -186,7 +168,6 @@ int ips_leave(struct adapter *padapter)
}
}
- DBG_88E("==> %s.....LED(0x%08x)...\n", __func__, usb_read32(padapter, 0x4c));
pwrpriv->bips_processing = false;
pwrpriv->bkeepfwalive = false;
@@ -217,7 +198,6 @@ static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
void rtw_ps_processor(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
enum rt_rf_power_state rfpwrstate;
pwrpriv->ps_processing = true;
@@ -227,7 +207,6 @@ void rtw_ps_processor(struct adapter *padapter)
if (padapter->pwrctrlpriv.bHWPwrPindetect) {
rfpwrstate = RfOnOffDetect(padapter);
- DBG_88E("@@@@- #2 %s==> rfstate:%s\n", __func__, (rfpwrstate == rf_on) ? "rf_on" : "rf_off");
if (rfpwrstate != pwrpriv->rf_pwrstate) {
if (rfpwrstate == rf_off) {
@@ -238,7 +217,6 @@ void rtw_ps_processor(struct adapter *padapter)
pwrpriv->change_rfpwrstate = rf_on;
rtw_hw_resume(padapter);
}
- DBG_88E("current rf_pwrstate(%s)\n", (pwrpriv->rf_pwrstate == rf_off) ? "rf_off" : "rf_on");
}
pwrpriv->pwr_state_check_cnts++;
}
@@ -250,7 +228,6 @@ void rtw_ps_processor(struct adapter *padapter)
goto exit;
if ((pwrpriv->rf_pwrstate == rf_on) && ((pwrpriv->pwr_state_check_cnts % 4) == 0)) {
- DBG_88E("==>%s .fw_state(%x)\n", __func__, get_fwstate(pmlmepriv));
pwrpriv->change_rfpwrstate = rf_off;
ips_enter(padapter);
@@ -288,37 +265,22 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
pslv = PS_STATE_S3;
}
- if (pwrpriv->rpwm == pslv) {
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
- ("%s: Already set rpwm[0x%02X], new=0x%02X!\n", __func__, pwrpriv->rpwm, pslv));
+ if (pwrpriv->rpwm == pslv)
return;
- }
if ((padapter->bSurpriseRemoved) ||
(!padapter->hw_init_completed)) {
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
- ("%s: SurpriseRemoved(%d) hw_init_completed(%d)\n",
- __func__, padapter->bSurpriseRemoved, padapter->hw_init_completed));
-
pwrpriv->cpwm = PS_STATE_S4;
return;
}
if (padapter->bDriverStopped) {
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
- ("%s: change power state(0x%02X) when DriverStopped\n", __func__, pslv));
-
- if (pslv < PS_STATE_S2) {
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
- ("%s: Reject to enter PS_STATE(0x%02X) lower than S2 when DriverStopped!!\n", __func__, pslv));
+ if (pslv < PS_STATE_S2)
return;
- }
}
rpwm = pslv | pwrpriv->tog;
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
- ("%s: rpwm=0x%02x cpwm=0x%02x\n", __func__, rpwm, pwrpriv->cpwm));
pwrpriv->rpwm = pslv;
@@ -349,10 +311,8 @@ static u8 PS_RDY_CHECK(struct adapter *padapter)
if (pwrpriv->bInSuspend)
return false;
if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X &&
- !padapter->securitypriv.binstallGrpkey) {
- DBG_88E("Group handshake still in progress !!!\n");
+ !padapter->securitypriv.binstallGrpkey)
return false;
- }
return true;
}
@@ -360,14 +320,8 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
- ("%s: PowerMode=%d Smart_PS=%d\n",
- __func__, ps_mode, smart_ps));
-
- if (ps_mode > PM_Card_Disable) {
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_, ("ps_mode:%d error\n", ps_mode));
+ if (ps_mode > PM_Card_Disable)
return;
- }
if (pwrpriv->pwr_mode == ps_mode) {
if (ps_mode == PS_MODE_ACTIVE)
@@ -381,7 +335,6 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
/* if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) */
if (ps_mode == PS_MODE_ACTIVE) {
if (PS_RDY_CHECK(padapter)) {
- DBG_88E("%s: Enter 802.11 power save\n", __func__);
pwrpriv->bFwCurrentInPSMode = true;
pwrpriv->pwr_mode = ps_mode;
pwrpriv->smart_ps = smart_ps;
@@ -412,13 +365,11 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
if (padapter->bSurpriseRemoved) {
err = -2;
- DBG_88E("%s: device surprise removed!!\n", __func__);
break;
}
if (jiffies_to_msecs(jiffies - start_time) > delay_ms) {
err = -1;
- DBG_88E("%s: Wait for FW LPS leave more than %u ms!!!\n", __func__, delay_ms);
break;
}
msleep(1);
@@ -443,7 +394,6 @@ void LPS_Enter(struct adapter *padapter)
if (pwrpriv->LpsIdleCount >= 2) { /* 4 Sec */
if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) {
pwrpriv->bpower_saving = true;
- DBG_88E("%s smart_ps:%d\n", __func__, pwrpriv->smart_ps);
/* For Tenda W311R IOT issue */
rtw_set_ps_mode(padapter, pwrpriv->power_mgnt, pwrpriv->smart_ps, 0);
}
@@ -549,14 +499,9 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
start = jiffies;
if (pwrpriv->ps_processing) {
- DBG_88E("%s wait ps_processing...\n", __func__);
while (pwrpriv->ps_processing &&
jiffies_to_msecs(jiffies - start) <= 3000)
udelay(1500);
- if (pwrpriv->ps_processing)
- DBG_88E("%s wait ps_processing timeout\n", __func__);
- else
- DBG_88E("%s wait ps_processing done\n", __func__);
}
/* System suspend is not allowed to wakeup */
@@ -577,9 +522,7 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
goto exit;
}
if (rf_off == pwrpriv->rf_pwrstate) {
- DBG_88E("%s call ips_leave....\n", __func__);
if (ips_leave(padapter) == _FAIL) {
- DBG_88E("======> ips_leave fail.............\n");
ret = _FAIL;
goto exit;
}
@@ -588,11 +531,6 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
/* TODO: the following checking need to be merged... */
if (padapter->bDriverStopped || !padapter->bup ||
!padapter->hw_init_completed) {
- DBG_88E("%s: bDriverStopped=%d, bup=%d, hw_init_completed =%u\n"
- , caller
- , padapter->bDriverStopped
- , padapter->bup
- , padapter->hw_init_completed);
ret = false;
goto exit;
}
@@ -631,11 +569,9 @@ int rtw_pm_set_ips(struct adapter *padapter, u8 mode)
if (mode == IPS_NORMAL || mode == IPS_LEVEL_2) {
rtw_ips_mode_req(pwrctrlpriv, mode);
- DBG_88E("%s %s\n", __func__, mode == IPS_NORMAL ? "IPS_NORMAL" : "IPS_LEVEL_2");
return 0;
} else if (mode == IPS_NONE) {
rtw_ips_mode_req(pwrctrlpriv, mode);
- DBG_88E("%s %s\n", __func__, "IPS_NONE");
if ((padapter->bSurpriseRemoved == 0) && (rtw_pwr_wakeup(padapter) == _FAIL))
return -EFAULT;
} else {
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index b2fe448d999d..ff2ef36604e1 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -7,6 +7,7 @@
#define _RTW_RECV_C_
#include <linux/ieee80211.h>
+#include <linux/if_ether.h>
#include <osdep_service.h>
#include <drv_types.h>
@@ -15,9 +16,9 @@
#include <mon.h>
#include <wifi.h>
#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
#include <net/cfg80211.h>
-#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
#define LLC_HEADER_SIZE 6 /* LLC Header Length */
static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37};
@@ -114,11 +115,11 @@ struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
return precvframe;
}
-int rtw_free_recvframe(struct recv_frame *precvframe,
- struct __queue *pfree_recv_queue)
+void rtw_free_recvframe(struct recv_frame *precvframe, struct __queue *pfree_recv_queue)
{
if (!precvframe)
- return _FAIL;
+ return;
+
if (precvframe->pkt) {
dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */
precvframe->pkt = NULL;
@@ -131,8 +132,6 @@ int rtw_free_recvframe(struct recv_frame *precvframe,
list_add_tail(&precvframe->list, get_list_head(pfree_recv_queue));
spin_unlock_bh(&pfree_recv_queue->lock);
-
- return _SUCCESS;
}
int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
@@ -190,7 +189,6 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
while ((pending_frame = rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) {
rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue);
- DBG_88E("%s: dequeue uc_swdec_pending_queue\n", __func__);
cnt++;
}
@@ -216,31 +214,16 @@ static int recvframe_chkmic(struct adapter *adapter,
stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
if (prxattrib->encrypt == _TKIP_) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
- __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
- prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
-
/* calculate mic code */
if (stainfo) {
if (is_multicast_ether_addr(prxattrib->ra)) {
if (!psecuritypriv) {
res = _FAIL;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("\n %s: didn't install group key!!!!!!!!!!\n", __func__));
- DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__);
goto exit;
}
mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("\n %s: bcmc key\n", __func__));
} else {
mickey = &stainfo->dot11tkiprxmickey.skey[0];
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("\n %s: unicast key\n", __func__));
}
/* icv_len included the mic code */
@@ -249,7 +232,6 @@ static int recvframe_chkmic(struct adapter *adapter,
pframe = precvframe->pkt->data;
payload = pframe + prxattrib->hdrlen + prxattrib->iv_len;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
(unsigned char)prxattrib->priority); /* care the length of the data */
@@ -258,83 +240,25 @@ static int recvframe_chkmic(struct adapter *adapter,
bmic_err = false;
for (i = 0; i < 8; i++) {
- if (miccode[i] != *(pframemic + i)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
- __func__, i, miccode[i], i, *(pframemic + i)));
+ if (miccode[i] != *(pframemic + i))
bmic_err = true;
- }
}
if (bmic_err) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
- *(pframemic - 8), *(pframemic - 7), *(pframemic - 6),
- *(pframemic - 5), *(pframemic - 4), *(pframemic - 3),
- *(pframemic - 2), *(pframemic - 1)));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
- *(pframemic - 16), *(pframemic - 15), *(pframemic - 14),
- *(pframemic - 13), *(pframemic - 12), *(pframemic - 11),
- *(pframemic - 10), *(pframemic - 9)));
- {
- uint i;
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("\n ======demp packet (len=%d)======\n",
- precvframe->pkt->len));
- for (i = 0; i < precvframe->pkt->len; i += 8) {
- RT_TRACE(_module_rtl871x_recv_c_,
- _drv_err_,
- ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
- *(precvframe->pkt->data + i),
- *(precvframe->pkt->data + i + 1),
- *(precvframe->pkt->data + i + 2),
- *(precvframe->pkt->data + i + 3),
- *(precvframe->pkt->data + i + 4),
- *(precvframe->pkt->data + i + 5),
- *(precvframe->pkt->data + i + 6),
- *(precvframe->pkt->data + i + 7)));
- }
- RT_TRACE(_module_rtl871x_recv_c_,
- _drv_err_,
- ("\n ====== demp packet end [len=%d]======\n",
- precvframe->pkt->len));
- RT_TRACE(_module_rtl871x_recv_c_,
- _drv_err_,
- ("\n hrdlen=%d,\n",
- prxattrib->hdrlen));
- }
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
- prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
- prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
-
/* double check key_index for some timing issue , */
/* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
if (is_multicast_ether_addr(prxattrib->ra) && prxattrib->key_index != pmlmeinfo->key_index)
brpt_micerror = false;
- if ((prxattrib->bdecrypted) && (brpt_micerror)) {
+ if ((prxattrib->bdecrypted) && (brpt_micerror))
rtw_handle_tkip_mic_err(adapter, (u8)is_multicast_ether_addr(prxattrib->ra));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
- DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
- } else {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
- DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
- }
res = _FAIL;
} else {
/* mic checked ok */
- if (!psecuritypriv->bcheck_grpkey && is_multicast_ether_addr(prxattrib->ra)) {
+ if (!psecuritypriv->bcheck_grpkey &&
+ is_multicast_ether_addr(prxattrib->ra))
psecuritypriv->bcheck_grpkey = true;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true"));
- }
}
- } else {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("%s: rtw_get_stainfo==NULL!!!\n", __func__));
}
skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
@@ -354,16 +278,12 @@ static struct recv_frame *decryptor(struct adapter *padapter,
struct recv_frame *return_packet = precv_frame;
u32 res = _SUCCESS;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("prxstat->decrypted=%x prxattrib->encrypt=0x%03x\n", prxattrib->bdecrypted, prxattrib->encrypt));
-
if (prxattrib->encrypt > 0) {
u8 *iv = precv_frame->pkt->data + prxattrib->hdrlen;
prxattrib->key_index = (((iv[3]) >> 6) & 0x3);
if (prxattrib->key_index > WEP_KEYS) {
- DBG_88E("prxattrib->key_index(%d)>WEP_KEYS\n", prxattrib->key_index);
-
switch (prxattrib->encrypt) {
case _WEP40_:
case _WEP104_:
@@ -434,8 +354,6 @@ static struct recv_frame *portctrl(struct adapter *adapter,
prtnframe = NULL;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########%s:adapter->securitypriv.dot11AuthAlgrthm=%d\n", __func__, adapter->securitypriv.dot11AuthAlgrthm));
-
if (auth_alg == 2) {
/* get ether_type */
ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE + pfhdr->attrib.iv_len;
@@ -445,8 +363,6 @@ static struct recv_frame *portctrl(struct adapter *adapter,
if (psta && (psta->ieee8021x_blocked)) {
/* blocked */
/* only accept EAPOL frame */
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########%s:psta->ieee8021x_blocked==1\n", __func__));
-
if (ether_type == eapol_type) {
prtnframe = precv_frame;
} else {
@@ -457,24 +373,11 @@ static struct recv_frame *portctrl(struct adapter *adapter,
} else {
/* allowed */
/* check decryption status, and decrypt the frame if needed */
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########%s:psta->ieee8021x_blocked==0\n", __func__));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("%s:precv_frame->hdr.attrib.privacy=%x\n",
- __func__, precv_frame->attrib.privacy));
-
- if (pattrib->bdecrypted == 0)
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("%s:prxstat->decrypted=%x\n", __func__, pattrib->bdecrypted));
-
prtnframe = precv_frame;
/* check is the EAPOL frame or not (Rekey) */
- if (ether_type == eapol_type) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("########%s:ether_type==0x888e\n", __func__));
+ if (ether_type == eapol_type)
/* check Rekey */
-
prtnframe = precv_frame;
- } else {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########%s:ether_type=0x%04x\n", __func__, ether_type));
- }
}
} else {
prtnframe = precv_frame;
@@ -491,19 +394,11 @@ static int recv_decache(struct recv_frame *precv_frame, u8 bretry,
u16 seq_ctrl = ((precv_frame->attrib.seq_num & 0xffff) << 4) |
(precv_frame->attrib.frag_num & 0xf);
- if (tid > 15) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("%s, (tid>15)! seq_ctrl=0x%x, tid=0x%x\n", __func__, seq_ctrl, tid));
-
+ if (tid > 15)
return _FAIL;
- }
- if (1) {/* if (bretry) */
- if (seq_ctrl == prxcache->tid_rxseq[tid]) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("%s, seq_ctrl=0x%x, tid=0x%x, tid_rxseq=0x%x\n", __func__, seq_ctrl, tid, prxcache->tid_rxseq[tid]));
-
- return _FAIL;
- }
- }
+ if (seq_ctrl == prxcache->tid_rxseq[tid])
+ return _FAIL;
prxcache->tid_rxseq[tid] = seq_ctrl;
@@ -641,7 +536,6 @@ static int sta2sta_data_frame(struct adapter *adapter,
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
/* filter packets that SA is myself or multicast or broadcast */
if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" SA==myself\n"));
ret = _FAIL;
goto exit;
}
@@ -651,8 +545,8 @@ static int sta2sta_data_frame(struct adapter *adapter,
goto exit;
}
- if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ if (is_zero_ether_addr(pattrib->bssid) ||
+ is_zero_ether_addr(mybssid) ||
memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
ret = _FAIL;
goto exit;
@@ -662,7 +556,6 @@ static int sta2sta_data_frame(struct adapter *adapter,
} else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
/* For Station mode, sa and bssid should always be BSSID, and DA is my mac-address */
if (memcmp(pattrib->bssid, pattrib->src, ETH_ALEN)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("bssid!=TA under STATION_MODE; drop pkt\n"));
ret = _FAIL;
goto exit;
}
@@ -693,7 +586,6 @@ static int sta2sta_data_frame(struct adapter *adapter,
*psta = rtw_get_stainfo(pstapriv, sta_addr); /* get ap_info */
if (!*psta) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("can't get psta under %s ; drop pkt\n", __func__));
ret = _FAIL;
goto exit;
}
@@ -720,31 +612,22 @@ static int ap2sta_data_frame(struct adapter *adapter,
check_fwstate(pmlmepriv, _FW_UNDER_LINKING))) {
/* filter packets that SA is myself or multicast or broadcast */
if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" SA==myself\n"));
ret = _FAIL;
goto exit;
}
/* da should be for me */
if (memcmp(myhwaddr, pattrib->dst, ETH_ALEN) && !mcast) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- (" %s: compare DA fail; DA=%pM\n", __func__, (pattrib->dst)));
ret = _FAIL;
goto exit;
}
/* check BSSID */
- if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- (" %s: compare BSSID fail ; BSSID=%pM\n", __func__, (pattrib->bssid)));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("mybssid=%pM\n", (mybssid)));
-
- if (!mcast) {
- DBG_88E("issue_deauth to the nonassociated ap=%pM for the reason(7)\n", (pattrib->bssid));
+ if (is_zero_ether_addr(pattrib->bssid) ||
+ is_zero_ether_addr(mybssid) ||
+ (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
+ if (!mcast)
issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
- }
ret = _FAIL;
goto exit;
@@ -756,7 +639,6 @@ static int ap2sta_data_frame(struct adapter *adapter,
*psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get ap_info */
if (!*psta) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("ap2sta: can't get psta under STATION_MODE ; drop pkt\n"));
ret = _FAIL;
goto exit;
}
@@ -777,11 +659,8 @@ static int ap2sta_data_frame(struct adapter *adapter,
} else {
if (!memcmp(myhwaddr, pattrib->dst, ETH_ALEN) && !mcast) {
*psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
- if (!*psta) {
- DBG_88E("issue_deauth to the ap =%pM for the reason(7)\n", (pattrib->bssid));
-
+ if (!*psta)
issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
- }
}
ret = _FAIL;
@@ -812,9 +691,6 @@ static int sta2ap_data_frame(struct adapter *adapter,
*psta = rtw_get_stainfo(pstapriv, pattrib->src);
if (!*psta) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("can't get psta under AP_MODE; drop pkt\n"));
- DBG_88E("issue_deauth to sta=%pM for the reason(7)\n", (pattrib->src));
-
issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
ret = RTW_RX_HANDLED;
@@ -839,7 +715,6 @@ static int sta2ap_data_frame(struct adapter *adapter,
ret = RTW_RX_HANDLED;
goto exit;
}
- DBG_88E("issue_deauth to sta=%pM for the reason(7)\n", (pattrib->src));
issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
ret = RTW_RX_HANDLED;
goto exit;
@@ -866,7 +741,7 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
return _FAIL;
/* only handle ps-poll */
- if (GetFrameSubType(pframe) == WIFI_PSPOLL) {
+ if (GetFrameSubType(pframe) == (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL)) {
u16 aid;
u8 wmmps_ac = 0;
struct sta_info *psta = NULL;
@@ -904,7 +779,6 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
return _FAIL;
if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
- DBG_88E("%s alive check-rx ps-poll\n", __func__);
psta->expire_to = pstapriv->expire_to;
psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
}
@@ -948,15 +822,11 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
}
} else {
if (pstapriv->tim_bitmap & BIT(psta->aid)) {
- if (psta->sleepq_len == 0) {
- DBG_88E("no buffered packets to xmit\n");
-
+ if (psta->sleepq_len == 0)
/* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
issue_nulldata(padapter, psta->hwaddr, 0, 0, 0);
- } else {
- DBG_88E("error!psta->sleepq_len=%d\n", psta->sleepq_len);
+ else
psta->sleepq_len = 0;
- }
pstapriv->tim_bitmap &= ~BIT(psta->aid);
@@ -983,25 +853,20 @@ static int validate_recv_mgnt_frame(struct adapter *padapter,
{
struct sta_info *psta;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("+%s\n", __func__));
-
precv_frame = recvframe_chk_defrag(padapter, precv_frame);
- if (!precv_frame) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- ("%s: fragment packet\n", __func__));
+ if (!precv_frame)
return _SUCCESS;
- }
/* for rx pkt statistics */
psta = rtw_get_stainfo(&padapter->stapriv,
GetAddr2Ptr(precv_frame->pkt->data));
if (psta) {
psta->sta_stats.rx_mgnt_pkts++;
- if (GetFrameSubType(precv_frame->pkt->data) == WIFI_BEACON) {
+ if (GetFrameSubType(precv_frame->pkt->data) == IEEE80211_STYPE_BEACON) {
psta->sta_stats.rx_beacon_pkts++;
- } else if (GetFrameSubType(precv_frame->pkt->data) == WIFI_PROBEREQ) {
+ } else if (GetFrameSubType(precv_frame->pkt->data) == IEEE80211_STYPE_PROBE_REQ) {
psta->sta_stats.rx_probereq_pkts++;
- } else if (GetFrameSubType(precv_frame->pkt->data) == WIFI_PROBERSP) {
+ } else if (GetFrameSubType(precv_frame->pkt->data) == IEEE80211_STYPE_PROBE_RESP) {
if (!memcmp(padapter->eeprompriv.mac_addr,
GetAddr1Ptr(precv_frame->pkt->data), ETH_ALEN))
psta->sta_stats.rx_probersp_pkts++;
@@ -1063,7 +928,6 @@ static int validate_recv_data_frame(struct adapter *adapter,
memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN);
memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN);
ret = _FAIL;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" case 3\n"));
break;
default:
ret = _FAIL;
@@ -1076,7 +940,6 @@ static int validate_recv_data_frame(struct adapter *adapter,
goto exit;
if (!psta) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" after to_fr_ds_chk; psta==NULL\n"));
ret = _FAIL;
goto exit;
}
@@ -1108,19 +971,12 @@ static int validate_recv_data_frame(struct adapter *adapter,
/* decache, drop duplicate recv packets */
if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("decache : drop pkt\n"));
ret = _FAIL;
goto exit;
}
if (pattrib->privacy) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("%s:pattrib->privacy=%x\n", __func__, pattrib->privacy));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n ^^^^^^^^^^^is_multicast_ether_addr(pattrib->ra(0x%02x))=%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0], is_multicast_ether_addr(pattrib->ra)));
-
GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, is_multicast_ether_addr(pattrib->ra));
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n pattrib->encrypt=%d\n", pattrib->encrypt));
-
SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
} else {
pattrib->encrypt = 0;
@@ -1158,7 +1014,6 @@ static int validate_recv_frame(struct adapter *adapter,
/* add version chk */
if (ver != 0) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_data_frame fail! (ver!=0)\n"));
retval = _FAIL;
goto exit;
}
@@ -1177,41 +1032,14 @@ static int validate_recv_frame(struct adapter *adapter,
pattrib->privacy = GetPrivacy(ptr);
pattrib->order = GetOrder(ptr);
- /* Dump rx packets */
rtw_hal_get_def_var(adapter, HAL_DEF_DBG_DUMP_RXPKT, &(bDumpRxPkt));
- if (bDumpRxPkt == 1) {/* dump all rx packets */
- if (_drv_err_ <= GlobalDebugLevel) {
- pr_info(DRIVER_PREFIX "#############################\n");
- print_hex_dump(KERN_INFO, DRIVER_PREFIX, DUMP_PREFIX_NONE,
- 16, 1, ptr, 64, false);
- pr_info(DRIVER_PREFIX "#############################\n");
- }
- } else if (bDumpRxPkt == 2) {
- if ((_drv_err_ <= GlobalDebugLevel) && (type == WIFI_MGT_TYPE)) {
- pr_info(DRIVER_PREFIX "#############################\n");
- print_hex_dump(KERN_INFO, DRIVER_PREFIX, DUMP_PREFIX_NONE,
- 16, 1, ptr, 64, false);
- pr_info(DRIVER_PREFIX "#############################\n");
- }
- } else if (bDumpRxPkt == 3) {
- if ((_drv_err_ <= GlobalDebugLevel) && (type == WIFI_DATA_TYPE)) {
- pr_info(DRIVER_PREFIX "#############################\n");
- print_hex_dump(KERN_INFO, DRIVER_PREFIX, DUMP_PREFIX_NONE,
- 16, 1, ptr, 64, false);
- pr_info(DRIVER_PREFIX "#############################\n");
- }
- }
switch (type) {
case WIFI_MGT_TYPE: /* mgnt */
retval = validate_recv_mgnt_frame(adapter, precv_frame);
- if (retval == _FAIL)
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_mgnt_frame fail\n"));
retval = _FAIL; /* only data frame return _SUCCESS */
break;
case WIFI_CTRL_TYPE: /* ctrl */
retval = validate_recv_ctrl_frame(adapter, precv_frame);
- if (retval == _FAIL)
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_ctrl_frame fail\n"));
retval = _FAIL; /* only data frame return _SUCCESS */
break;
case WIFI_DATA_TYPE: /* data */
@@ -1225,7 +1053,6 @@ static int validate_recv_frame(struct adapter *adapter,
}
break;
default:
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_data_frame fail! type= 0x%x\n", type));
retval = _FAIL;
break;
}
@@ -1282,9 +1109,6 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0);
len = precvframe->pkt->len - rmv_len;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("\n===pattrib->hdrlen: %x, pattrib->iv_len:%x===\n\n", pattrib->hdrlen, pattrib->iv_len));
-
memcpy(&be_tmp, ptr + rmv_len, 2);
eth_type = ntohs(be_tmp); /* pattrib->ether_type */
pattrib->eth_type = eth_type;
@@ -1373,8 +1197,6 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
/* free the defrag_q queue and return the prframe */
rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("Performance defrag!!!!!\n"));
-
return prframe;
}
@@ -1436,14 +1258,11 @@ struct recv_frame *recvframe_chk_defrag(struct adapter *padapter,
phead = get_list_head(pdefrag_q);
list_add_tail(&pfhdr->list, phead);
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("Enqueuq: ismfrag=%d, fragnum=%d\n", ismfrag, fragnum));
-
prtnframe = NULL;
} else {
/* can't find this ta's defrag_queue, so free this recv_frame */
rtw_free_recvframe(precv_frame, pfree_recv_queue);
prtnframe = NULL;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("Free because pdefrag_q==NULL: ismfrag=%d, fragnum=%d\n", ismfrag, fragnum));
}
}
@@ -1455,21 +1274,18 @@ struct recv_frame *recvframe_chk_defrag(struct adapter *padapter,
list_add_tail(&pfhdr->list, phead);
/* call recvframe_defrag to defrag */
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("defrag: ismfrag=%d, fragnum=%d\n", ismfrag, fragnum));
precv_frame = recvframe_defrag(padapter, pdefrag_q);
prtnframe = precv_frame;
} else {
/* can't find this ta's defrag_queue, so free this recv_frame */
rtw_free_recvframe(precv_frame, pfree_recv_queue);
prtnframe = NULL;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("Free because pdefrag_q==NULL: ismfrag=%d, fragnum=%d\n", ismfrag, fragnum));
}
}
if (prtnframe && (prtnframe->attrib.privacy)) {
/* after defrag we must check tkip mic code */
if (recvframe_chkmic(padapter, prtnframe) == _FAIL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chkmic(padapter, prtnframe)==_FAIL\n"));
rtw_free_recvframe(prtnframe, pfree_recv_queue);
prtnframe = NULL;
}
@@ -1505,10 +1321,8 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
/* Offset 12 denote 2 mac address */
nSubframe_Length = get_unaligned_be16(pdata + 12);
- if (a_len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
- DBG_88E("nRemain_Length is %d and nSubframe_Length is : %d\n", a_len, nSubframe_Length);
+ if (a_len < (ETH_HLEN + nSubframe_Length))
goto exit;
- }
/* move the data point to data content */
pdata += ETH_HLEN;
@@ -1516,20 +1330,16 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
/* Allocate new skb for releasing to upper layer */
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
- if (!sub_skb) {
- DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes);
+ if (!sub_skb)
break;
- }
skb_reserve(sub_skb, 12);
skb_put_data(sub_skb, pdata, nSubframe_Length);
subframes[nr_subframes++] = sub_skb;
- if (nr_subframes >= MAX_SUBFRAME_COUNT) {
- DBG_88E("ParseSubframe(): Too many Subframes! Packets dropped!\n");
+ if (nr_subframes >= MAX_SUBFRAME_COUNT)
break;
- }
pdata += nSubframe_Length;
a_len -= nSubframe_Length;
@@ -1674,9 +1484,6 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
pattrib = &prframe->attrib;
if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- ("%s: indicate=%d seq=%d amsdu=%d\n",
- __func__, preorder_ctrl->indicate_seq, pattrib->seq_num, pattrib->amsdu));
plist = plist->next;
list_del_init(&prframe->list);
@@ -1723,8 +1530,6 @@ static int recv_indicatepkt_reorder(struct adapter *padapter,
(pattrib->ack_policy != 0)) {
if ((!padapter->bDriverStopped) &&
(!padapter->bSurpriseRemoved)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ %s -recv_func recv_indicatepkt\n", __func__));
-
rtw_recv_indicatepkt(padapter, prframe);
return _SUCCESS;
}
@@ -1754,10 +1559,6 @@ static int recv_indicatepkt_reorder(struct adapter *padapter,
spin_lock_bh(&ppending_recvframe_queue->lock);
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- ("%s: indicate=%d seq=%d\n", __func__,
- preorder_ctrl->indicate_seq, pattrib->seq_num));
-
/* s2. check if winstart_b(indicate_seq) needs to been updated */
if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num)) {
rtw_recv_indicatepkt(padapter, prframe);
@@ -1838,22 +1639,15 @@ static int process_recv_indicatepkts(struct adapter *padapter,
}
} else { /* B/G mode */
retval = wlanhdr_to_ethhdr(prframe);
- if (retval != _SUCCESS) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("wlanhdr_to_ethhdr: drop pkt\n"));
+ if (retval != _SUCCESS)
return retval;
- }
if ((!padapter->bDriverStopped) &&
- (!padapter->bSurpriseRemoved)) {
+ (!padapter->bSurpriseRemoved))
/* indicate this recv_frame */
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ %s- recv_func recv_indicatepkt\n", __func__));
rtw_recv_indicatepkt(padapter, prframe);
- } else {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ %s- recv_func free_indicatepkt\n", __func__));
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_func:bDriverStopped(%d) OR bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved));
+ else
return _FAIL;
- }
}
return retval;
@@ -1868,7 +1662,6 @@ static int recv_func_prehandle(struct adapter *padapter,
/* check the frame crtl field and decache */
ret = validate_recv_frame(padapter, rframe);
if (ret != _SUCCESS) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
goto exit;
}
@@ -1890,20 +1683,16 @@ static int recv_func_posthandle(struct adapter *padapter,
prframe = decryptor(padapter, prframe);
if (!prframe) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("decryptor: drop pkt\n"));
ret = _FAIL;
goto _recv_data_drop;
}
prframe = recvframe_chk_defrag(padapter, prframe);
- if (!prframe) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chk_defrag: drop pkt\n"));
+ if (!prframe)
goto _recv_data_drop;
- }
prframe = portctrl(padapter, prframe);
if (!prframe) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("portctrl: drop pkt\n"));
ret = _FAIL;
goto _recv_data_drop;
}
@@ -1912,7 +1701,6 @@ static int recv_func_posthandle(struct adapter *padapter,
ret = process_recv_indicatepkts(padapter, prframe);
if (ret != _SUCCESS) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recv_func: process_recv_indicatepkts fail!\n"));
rtw_free_recvframe(orig_prframe, pfree_recv_queue);/* free this recv_frame */
goto _recv_data_drop;
}
@@ -1934,10 +1722,8 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
struct recv_frame *pending_frame;
- while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) {
- if (recv_func_posthandle(padapter, pending_frame) == _SUCCESS)
- DBG_88E("%s: dequeue uc_swdec_pending_queue\n", __func__);
- }
+ while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue)))
+ recv_func_posthandle(padapter, pending_frame);
}
ret = recv_func_prehandle(padapter, rframe);
@@ -1951,7 +1737,6 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
!is_wep_enc(psecuritypriv->dot11PrivacyAlgrthm) &&
!psecuritypriv->busetkipkey) {
rtw_enqueue_recvframe(rframe, &padapter->recvpriv.uc_swdec_pending_queue);
- DBG_88E("%s: no key, enqueue uc_swdec_pending_queue\n", __func__);
goto exit;
}
@@ -1971,8 +1756,6 @@ int rtw_recv_entry(struct recv_frame *precvframe)
ret = recv_func(padapter, precvframe);
if (ret == _SUCCESS)
precvpriv->rx_pkts++;
- else
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("%s: recv_func return fail!!!\n", __func__));
return ret;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 617f89842c81..1b2cb6196463 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -465,8 +465,9 @@ static const unsigned short Sbox1[2][256] = { /* Sbox for hash (can be in ROM)
/**
* phase1() - generate P1K, given TA, TK, IV32
- * @tk[]: temporal key [128 bits]
- * @ta[]: transmitter's MAC address [ 48 bits]
+ * @p1k: placeholder for the returned phase 1 key
+ * @tk: temporal key [128 bits]
+ * @ta: transmitter's MAC address [ 48 bits]
* @iv32: upper 32 bits of IV [ 32 bits]
*
* This function only needs to be called every 2**16 packets,
@@ -498,8 +499,9 @@ static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
/**
* phase2() - generate RC4KEY, given TK, P1K, IV16
- * @tk[]: Temporal key [128 bits]
- * @p1k[]: Phase 1 output key [ 80 bits]
+ * @rc4key: Placeholder for the returned key
+ * @tk: Temporal key [128 bits]
+ * @p1k: Phase 1 output key [ 80 bits]
* @iv16: low 16 bits of IV counter [ 16 bits]
*
* The value {TA, IV32, IV16} for Phase1/Phase2 must be unique
@@ -589,8 +591,6 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
if (stainfo) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
-
if (is_multicast_ether_addr(pattrib->ra))
prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
else
@@ -609,9 +609,6 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
if ((curfragnum + 1) == pattrib->nr_frags) { /* 4 the last fragment */
length = pattrib->last_txcmdsz - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
- RT_TRACE(_module_rtl871x_security_c_, _drv_info_,
- ("pattrib->iv_len=%x, pattrib->icv_len=%x\n",
- pattrib->iv_len, pattrib->icv_len));
*((__le32 *)crc) = getcrc32(payload, length);/* modified by Amy*/
arcfour_init(&mycontext, rc4key, 16);
@@ -629,7 +626,6 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
}
}
} else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo==NULL!!!\n", __func__));
res = _FAIL;
}
}
@@ -662,12 +658,10 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
if (is_multicast_ether_addr(prxattrib->ra)) {
if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
- DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
goto exit;
}
prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
} else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
prwskey = &stainfo->dot118021x_UncstKey.skey[0];
}
@@ -693,14 +687,9 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
if (crc[3] != payload[length - 1] ||
crc[2] != payload[length - 2] ||
crc[1] != payload[length - 3] ||
- crc[0] != payload[length - 4]) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
- &crc, &payload[length - 4]));
+ crc[0] != payload[length - 4])
res = _FAIL;
- }
} else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo==NULL!!!\n", __func__));
res = _FAIL;
}
}
@@ -742,10 +731,8 @@ u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
else
stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
- if (!stainfo) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo==NULL!!!\n", __func__));
+ if (!stainfo)
return _FAIL;
- }
crypto_ops = lib80211_get_crypto_ops("CCMP");
@@ -770,8 +757,6 @@ u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
goto exit_crypto_ops_deinit;
}
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
-
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
if (curfragnum + 1 == pattrib->nr_frags)
length = pattrib->last_txcmdsz;
@@ -834,7 +819,6 @@ u32 rtw_aes_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
/* in concurrent we should use sw descrypt in group key, so we remove this message */
if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
- DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
goto exit;
}
key_idx = psecuritypriv->dot118021XGrpKeyid;
@@ -877,7 +861,6 @@ exit_lib80211_ccmp:
if (crypto_ops && crypto_private)
crypto_ops->deinit(crypto_private);
} else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo==NULL!!!\n"));
res = _FAIL;
}
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 3c03141e25b1..19eddf573fd8 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -114,17 +114,11 @@ inline int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta)
{
int offset = (((u8 *)sta) - stapriv->pstainfo_buf) / sizeof(struct sta_info);
- if (!stainfo_offset_valid(offset))
- DBG_88E("%s invalid offset(%d), out of range!!!", __func__, offset);
-
return offset;
}
inline struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int offset)
{
- if (!stainfo_offset_valid(offset))
- DBG_88E("%s invalid offset(%d), out of range!!!", __func__, offset);
-
return (struct sta_info *)(stapriv->pstainfo_buf + offset * sizeof(struct sta_info));
}
@@ -142,13 +136,10 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &pstapriv->sta_hash[index];
- plist = phead->next;
-
- while (phead != plist) {
+ list_for_each(plist, phead) {
int i;
- psta = container_of(plist, struct sta_info, hash_list);
- plist = plist->next;
+ psta = list_entry(plist, struct sta_info, hash_list);
for (i = 0; i < 16; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
@@ -188,13 +179,8 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
_rtw_init_stainfo(psta);
memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
index = wifi_mac_hash(hwaddr);
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
- ("%s: index=%x", __func__, index));
- if (index >= NUM_STA) {
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
- ("ERROR => %s: index >= NUM_STA", __func__));
+ if (index >= NUM_STA)
return NULL;
- }
phash_list = &pstapriv->sta_hash[index];
spin_lock_bh(&pstapriv->sta_hash_lock);
@@ -214,10 +200,6 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i],
&wRxSeqInitialValue, 2);
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
- ("alloc number_%d stainfo with hwaddr = %pM\n",
- pstapriv->asoc_sta_count, hwaddr));
-
init_addba_retry_timer(pstapriv->padapter, psta);
/* for A-MPDU Rx reordering buffer control */
@@ -288,11 +270,6 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
spin_unlock_bh(&pxmitpriv->lock);
list_del_init(&psta->hash_list);
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
- ("\n free number_%d stainfo with hwaddr=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n",
- pstapriv->asoc_sta_count, psta->hwaddr[0], psta->hwaddr[1],
- psta->hwaddr[2], psta->hwaddr[3], psta->hwaddr[4],
- psta->hwaddr[5]));
pstapriv->asoc_sta_count--;
/* re-init sta_info; 20061114 */
@@ -382,9 +359,9 @@ exit:
/* free all stainfo which in sta_hash[all] */
void rtw_free_all_stainfo(struct adapter *padapter)
{
- struct list_head *plist, *phead;
+ struct list_head *phead;
s32 index;
- struct sta_info *psta = NULL;
+ struct sta_info *psta, *temp;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo(padapter);
@@ -395,13 +372,7 @@ void rtw_free_all_stainfo(struct adapter *padapter)
for (index = 0; index < NUM_STA; index++) {
phead = &pstapriv->sta_hash[index];
- plist = phead->next;
-
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, hash_list);
-
- plist = plist->next;
-
+ list_for_each_entry_safe(psta, temp, phead, hash_list) {
if (pbcmc_stainfo != psta)
rtw_free_stainfo(padapter, psta);
}
@@ -431,17 +402,14 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
spin_lock_bh(&pstapriv->sta_hash_lock);
phead = &pstapriv->sta_hash[index];
- plist = phead->next;
-
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, hash_list);
+ list_for_each(plist, phead) {
+ psta = list_entry(plist, struct sta_info, hash_list);
if (!memcmp(psta->hwaddr, addr, ETH_ALEN)) {
/* if found the matched address */
break;
}
psta = NULL;
- plist = plist->next;
}
spin_unlock_bh(&pstapriv->sta_hash_lock);
@@ -456,11 +424,8 @@ u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
psta = rtw_alloc_stainfo(pstapriv, bc_addr);
- if (!psta) {
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
- ("rtw_alloc_stainfo fail"));
+ if (!psta)
return _FAIL;
- }
/* default broadcast & multicast use macid 1 */
psta->mac_id = 1;
@@ -489,10 +454,8 @@ bool rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
spin_lock_bh(&pacl_node_q->lock);
phead = get_list_head(pacl_node_q);
- plist = phead->next;
- while (phead != plist) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
- plist = plist->next;
+ list_for_each(plist, phead) {
+ paclnode = list_entry(plist, struct rtw_wlan_acl_node, list);
if (!memcmp(paclnode->addr, mac_addr, ETH_ALEN)) {
if (paclnode->valid) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 7015db16dcf8..2d4776debb97 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -521,8 +521,6 @@ void WMMOnAssocRsp(struct adapter *padapter)
edca[XMIT_VO_QUEUE] = acParm;
break;
}
-
- DBG_88E("WMM(%x): %x, %x\n", ACI, ACM, acParm);
}
if (padapter->registrypriv.acm_method == 1)
@@ -556,10 +554,8 @@ void WMMOnAssocRsp(struct adapter *padapter)
}
}
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i++)
pxmitpriv->wmm_para_seq[i] = inx[i];
- DBG_88E("wmm_para_seq(%d): %d\n", i, pxmitpriv->wmm_para_seq[i]);
- }
}
static void bwmode_update_check(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
@@ -708,8 +704,6 @@ void HTOnAssocRsp(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- DBG_88E("%s\n", __func__);
-
if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable)) {
pmlmeinfo->HT_enable = 1;
} else {
@@ -803,16 +797,11 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
len = packet_len - sizeof(struct ieee80211_hdr_3addr);
- if (len > MAX_IE_SZ) {
- DBG_88E("%s IE too long for survey event\n", __func__);
+ if (len > MAX_IE_SZ)
return _FAIL;
- }
- if (memcmp(cur_network->network.MacAddress, pbssid, 6)) {
- DBG_88E("Oops: rtw_check_network_encrypt linked but recv other bssid bcn\n%pM %pM\n",
- (pbssid), (cur_network->network.MacAddress));
+ if (memcmp(cur_network->network.MacAddress, pbssid, 6))
return true;
- }
bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
if (!bssid)
@@ -820,7 +809,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
subtype = GetFrameSubType(pframe) >> 4;
- if (subtype == WIFI_BEACON)
+ if (subtype == IEEE80211_STYPE_BEACON)
bssid->Reserved[0] = 1;
bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
@@ -850,11 +839,6 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
if (ht_cap_info != cur_network->BcnInfo.ht_cap_info ||
((ht_info_infos_0 & 0x03) != (cur_network->BcnInfo.ht_info_infos_0 & 0x03))) {
- DBG_88E("%s bcn now: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
- ht_cap_info, ht_info_infos_0);
- DBG_88E("%s bcn link: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
- cur_network->BcnInfo.ht_cap_info, cur_network->BcnInfo.ht_info_infos_0);
- DBG_88E("%s bw mode change, disconnect\n", __func__);
/* bcn_info_update */
cur_network->BcnInfo.ht_cap_info = ht_cap_info;
cur_network->BcnInfo.ht_info_infos_0 = ht_info_infos_0;
@@ -868,18 +852,13 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
bcn_channel = *(p + 2);
} else {/* In 5G, some ap do not have DSSET IE checking HT info for channel */
p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, WLAN_EID_HT_OPERATION, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
- if (pht_info) {
+ if (pht_info)
bcn_channel = pht_info->primary_channel;
- } else { /* we don't find channel IE, so don't check it */
- DBG_88E("Oops: %s we don't find channel IE, so don't check it\n", __func__);
+ else /* we don't find channel IE, so don't check it */
bcn_channel = Adapter->mlmeextpriv.cur_channel;
- }
}
- if (bcn_channel != Adapter->mlmeextpriv.cur_channel) {
- DBG_88E("%s beacon channel:%d cur channel:%d disconnect\n", __func__,
- bcn_channel, Adapter->mlmeextpriv.cur_channel);
+ if (bcn_channel != Adapter->mlmeextpriv.cur_channel)
goto _mismatch;
- }
/* checking SSID */
ssid_len = 0;
@@ -892,17 +871,10 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
memcpy(bssid->ssid.ssid, (p + 2), ssid_len);
bssid->ssid.ssid_length = ssid_len;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.ssid.ssid:%s bssid.ssid.ssid_length:%d "
- "cur_network->network.ssid.ssid:%s len:%d\n", __func__, bssid->ssid.ssid,
- bssid->ssid.ssid_length, cur_network->network.ssid.ssid,
- cur_network->network.ssid.ssid_length));
-
if (memcmp(bssid->ssid.ssid, cur_network->network.ssid.ssid, 32) ||
bssid->ssid.ssid_length != cur_network->network.ssid.ssid_length) {
- if (bssid->ssid.ssid[0] != '\0' && bssid->ssid.ssid_length != 0) { /* not hidden ssid */
- DBG_88E("%s(), SSID is not match return FAIL\n", __func__);
+ if (bssid->ssid.ssid[0] != '\0' && bssid->ssid.ssid_length != 0) /* not hidden ssid */
goto _mismatch;
- }
}
/* check encryption info */
@@ -913,13 +885,8 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
else
bssid->Privacy = 0;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("%s(): cur_network->network.Privacy is %d, bssid.Privacy is %d\n",
- __func__, cur_network->network.Privacy, bssid->Privacy));
- if (cur_network->network.Privacy != bssid->Privacy) {
- DBG_88E("%s(), privacy is not match return FAIL\n", __func__);
+ if (cur_network->network.Privacy != bssid->Privacy)
goto _mismatch;
- }
rtw_get_sec_ie(bssid->ies, bssid->ie_length, NULL, &rsn_len, NULL, &wpa_len);
@@ -932,46 +899,29 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
encryp_protocol = ENCRYP_PROTOCOL_WEP;
}
- if (cur_network->BcnInfo.encryp_protocol != encryp_protocol) {
- DBG_88E("%s(): encryption protocol is not match , return FAIL\n", __func__);
+ if (cur_network->BcnInfo.encryp_protocol != encryp_protocol)
goto _mismatch;
- }
if (encryp_protocol == ENCRYP_PROTOCOL_WPA || encryp_protocol == ENCRYP_PROTOCOL_WPA2) {
pbuf = rtw_get_wpa_ie(&bssid->ies[12], &wpa_ielen,
bssid->ie_length - 12);
if (pbuf && (wpa_ielen > 0)) {
- if (rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is_8021x) == _SUCCESS) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("%s pnetwork->pairwise_cipher: %d, group_cipher is %d, is_8021x is %d\n", __func__,
- pairwise_cipher, group_cipher, is_8021x));
- }
+ rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher,
+ &pairwise_cipher, &is_8021x);
} else {
pbuf = rtw_get_wpa2_ie(&bssid->ies[12], &wpa_ielen,
bssid->ie_length - 12);
- if (pbuf && (wpa_ielen > 0)) {
- if (rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is_8021x) == _SUCCESS) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("%s pnetwork->pairwise_cipher: %d, pnetwork->group_cipher is %d, is_802x is %d\n",
- __func__, pairwise_cipher, group_cipher, is_8021x));
- }
- }
+ if (pbuf && (wpa_ielen > 0))
+ rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher,
+ &pairwise_cipher, &is_8021x);
}
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- ("%s cur_network->group_cipher is %d: %d\n", __func__, cur_network->BcnInfo.group_cipher, group_cipher));
- if (pairwise_cipher != cur_network->BcnInfo.pairwise_cipher || group_cipher != cur_network->BcnInfo.group_cipher) {
- DBG_88E("%s pairwise_cipher(%x:%x) or group_cipher(%x:%x) is not match , return FAIL\n", __func__,
- pairwise_cipher, cur_network->BcnInfo.pairwise_cipher,
- group_cipher, cur_network->BcnInfo.group_cipher);
+ if (pairwise_cipher != cur_network->BcnInfo.pairwise_cipher || group_cipher != cur_network->BcnInfo.group_cipher)
goto _mismatch;
- }
- if (is_8021x != cur_network->BcnInfo.is_8021x) {
- DBG_88E("%s authentication is not match , return FAIL\n", __func__);
+ if (is_8021x != cur_network->BcnInfo.is_8021x)
goto _mismatch;
- }
}
kfree(bssid);
@@ -1029,6 +979,7 @@ unsigned int is_ap_in_tkip(struct adapter *padapter)
case WLAN_EID_RSN:
if (!memcmp((pIE->data + 8), RSN_TKIP_CIPHER, 4))
return true;
+ break;
default:
break;
}
@@ -1191,41 +1142,31 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
case WLAN_EID_VENDOR_SPECIFIC:
if ((!memcmp(pIE->data, ARTHEROS_OUI1, 3)) ||
(!memcmp(pIE->data, ARTHEROS_OUI2, 3))) {
- DBG_88E("link to Artheros AP\n");
return HT_IOT_PEER_ATHEROS;
} else if ((!memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
(!memcmp(pIE->data, BROADCOM_OUI2, 3))) {
- DBG_88E("link to Broadcom AP\n");
return HT_IOT_PEER_BROADCOM;
} else if (!memcmp(pIE->data, MARVELL_OUI, 3)) {
- DBG_88E("link to Marvell AP\n");
return HT_IOT_PEER_MARVELL;
} else if (!memcmp(pIE->data, RALINK_OUI, 3)) {
- if (!ralink_vendor_flag) {
+ if (!ralink_vendor_flag)
ralink_vendor_flag = 1;
- } else {
- DBG_88E("link to Ralink AP\n");
+ else
return HT_IOT_PEER_RALINK;
- }
} else if (!memcmp(pIE->data, CISCO_OUI, 3)) {
- DBG_88E("link to Cisco AP\n");
return HT_IOT_PEER_CISCO;
} else if (!memcmp(pIE->data, REALTEK_OUI, 3)) {
- DBG_88E("link to Realtek 96B\n");
return HT_IOT_PEER_REALTEK;
} else if (!memcmp(pIE->data, AIRGOCAP_OUI, 3)) {
- DBG_88E("link to Airgo Cap\n");
return HT_IOT_PEER_AIRGO;
} else if (!memcmp(pIE->data, EPIGRAM_OUI, 3)) {
epigram_vendor_flag = 1;
- if (ralink_vendor_flag) {
- DBG_88E("link to Tenda W311R AP\n");
+ if (ralink_vendor_flag)
return HT_IOT_PEER_TENDA;
- }
- DBG_88E("Capture EPIGRAM_OUI\n");
} else {
break;
}
+ break;
default:
break;
@@ -1233,14 +1174,10 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
i += (pIE->Length + 2);
}
- if (ralink_vendor_flag && !epigram_vendor_flag) {
- DBG_88E("link to Ralink AP\n");
+ if (ralink_vendor_flag && !epigram_vendor_flag)
return HT_IOT_PEER_RALINK;
- } else if (ralink_vendor_flag && epigram_vendor_flag) {
- DBG_88E("link to Tenda W311R AP\n");
+ else if (ralink_vendor_flag && epigram_vendor_flag)
return HT_IOT_PEER_TENDA;
- }
- DBG_88E("link to new AP\n");
return HT_IOT_PEER_UNKNOWN;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 99e44b2c6f36..d5fc59417ec6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -75,7 +75,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
if (!pxmitpriv->pallocated_frame_buf) {
pxmitpriv->pxmit_frame_buf = NULL;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_frame fail!\n"));
res = _FAIL;
goto exit;
}
@@ -110,7 +109,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->pallocated_xmitbuf = vzalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
if (!pxmitpriv->pallocated_xmitbuf) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_buf fail!\n"));
res = _FAIL;
goto exit;
}
@@ -149,7 +147,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->pallocated_xmit_extbuf = vzalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
if (!pxmitpriv->pallocated_xmit_extbuf) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_extbuf fail!\n"));
res = _FAIL;
goto exit;
}
@@ -364,8 +361,6 @@ u8 qos_acm(u8 acm_mask, u8 priority)
change_priority = 5;
break;
default:
- DBG_88E("%s(): invalid pattrib->priority: %d!!!\n",
- __func__, priority);
break;
}
@@ -444,14 +439,11 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
((tmp[21] == 67) && (tmp[23] == 68))) {
/* 68 : UDP BOOTP client */
/* 67 : UDP BOOTP server */
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("====================== %s: get DHCP Packet\n", __func__));
/* Use low rate to send DHCP packet. */
pattrib->dhcp_pkt = 1;
}
}
}
- } else if (pattrib->ether_type == ETH_P_PAE) {
- DBG_88E_LEVEL(_drv_info_, "send eapol packet\n");
}
if ((pattrib->ether_type == ETH_P_PAE) || (pattrib->dhcp_pkt == 1))
@@ -469,7 +461,6 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
} else {
psta = rtw_get_stainfo(pstapriv, pattrib->ra);
if (!psta) { /* if we cannot get psta => drrp the pkt */
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("\nupdate_attrib => get sta_info fail, ra: %pM\n", (pattrib->ra)));
res = _FAIL;
goto exit;
} else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) &&
@@ -481,11 +472,9 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
if (psta) {
pattrib->mac_id = psta->mac_id;
- /* DBG_88E("%s ==> mac_id(%d)\n", __func__, pattrib->mac_id); */
pattrib->psta = psta;
} else {
/* if we cannot get psta => drop the pkt */
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("\nupdate_attrib => get sta_info fail, ra:%pM\n", (pattrib->ra)));
res = _FAIL;
goto exit;
}
@@ -510,12 +499,9 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
}
if (psta->ieee8021x_blocked) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\n psta->ieee8021x_blocked == true\n"));
-
pattrib->encrypt = 0;
if (pattrib->ether_type != ETH_P_PAE) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\npsta->ieee8021x_blocked == true, pattrib->ether_type(%.4x) != ETH_P_PAE\n", pattrib->ether_type));
res = _FAIL;
goto exit;
}
@@ -551,15 +537,11 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
pattrib->icv_len = 4;
if (padapter->securitypriv.busetkipkey == _FAIL) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- ("\npadapter->securitypriv.busetkipkey(%d) == _FAIL drop packet\n",
- padapter->securitypriv.busetkipkey));
res = _FAIL;
goto exit;
}
break;
case _AES_:
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("pattrib->encrypt=%d (_AES_)\n", pattrib->encrypt));
pattrib->iv_len = 8;
pattrib->icv_len = 8;
break;
@@ -569,18 +551,10 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
break;
}
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- ("%s: encrypt=%d\n", __func__, pattrib->encrypt));
-
- if (pattrib->encrypt && !psecuritypriv->hw_decrypted) {
+ if (pattrib->encrypt && !psecuritypriv->hw_decrypted)
pattrib->bswenc = true;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- ("%s: encrypt=%d bswenc = true\n", __func__,
- pattrib->encrypt));
- } else {
+ else
pattrib->bswenc = false;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("%s: bswenc = false\n", __func__));
- }
update_attrib_phy_info(pattrib, psta);
@@ -649,17 +623,8 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
payload = (u8 *)round_up((size_t)(payload), 4);
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- ("=== curfragnum=%d, pframe = 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x,!!!\n",
- curfragnum, *payload, *(payload + 1),
- *(payload + 2), *(payload + 3),
- *(payload + 4), *(payload + 5),
- *(payload + 6), *(payload + 7)));
payload += pattrib->hdrlen + pattrib->iv_len;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- ("curfragnum=%d pattrib->hdrlen=%d pattrib->iv_len=%d",
- curfragnum, pattrib->hdrlen, pattrib->iv_len));
if (curfragnum + 1 == pattrib->nr_frags) {
length = pattrib->last_txcmdsz -
pattrib->hdrlen -
@@ -676,32 +641,16 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
pattrib->icv_len : 0);
rtw_secmicappend(&micdata, payload, length);
payload += length + pattrib->icv_len;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("curfragnum=%d length=%d pattrib->icv_len=%d", curfragnum, length, pattrib->icv_len));
}
}
rtw_secgetmic(&micdata, &mic[0]);
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: before add mic code!!!\n", __func__));
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: pattrib->last_txcmdsz=%d!!!\n", __func__, pattrib->last_txcmdsz));
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: mic[0]=0x%.2x , mic[1]=0x%.2x , mic[2]= 0x%.2x, mic[3]=0x%.2x\n\
- mic[4]= 0x%.2x , mic[5]= 0x%.2x , mic[6]= 0x%.2x , mic[7]= 0x%.2x !!!!\n",
- __func__, mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7]));
/* add mic code and add the mic code length in last_txcmdsz */
memcpy(payload, &mic[0], 8);
pattrib->last_txcmdsz += 8;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("\n ======== last pkt ========\n"));
payload -= pattrib->last_txcmdsz + 8;
- for (curfragnum = 0; curfragnum < pattrib->last_txcmdsz; curfragnum += 8)
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- (" %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x ",
- *(payload + curfragnum), *(payload + curfragnum + 1),
- *(payload + curfragnum + 2), *(payload + curfragnum + 3),
- *(payload + curfragnum + 4), *(payload + curfragnum + 5),
- *(payload + curfragnum + 6), *(payload + curfragnum + 7)));
- } else {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: rtw_get_stainfo==NULL!!!\n", __func__));
- }
+ }
}
return _SUCCESS;
@@ -712,7 +661,6 @@ static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmi
struct pkt_attrib *pattrib = &pxmitframe->attrib;
if (pattrib->bswenc) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("### %s\n", __func__));
switch (pattrib->encrypt) {
case _WEP40_:
case _WEP104_:
@@ -727,8 +675,6 @@ static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmi
default:
break;
}
- } else {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("### xmitframe_hwencrypt\n"));
}
return _SUCCESS;
@@ -790,7 +736,6 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
if (psta && psta->qos_option)
qos_option = true;
} else {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv)));
res = _FAIL;
goto exit;
}
@@ -926,10 +871,8 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
if (!psta)
return _FAIL;
- if (!pxmitframe->buf_addr) {
- DBG_88E("==> %s buf_addr == NULL\n", __func__);
+ if (!pxmitframe->buf_addr)
return _FAIL;
- }
pbuf_start = pxmitframe->buf_addr;
@@ -938,8 +881,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
mem_start = pbuf_start + hw_hdr_offset;
if (rtw_make_wlanhdr(padapter, mem_start, pattrib) == _FAIL) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: rtw_make_wlanhdr fail; drop pkt\n", __func__));
- DBG_88E("%s: rtw_make_wlanhdr fail; drop pkt\n", __func__);
res = _FAIL;
goto exit;
}
@@ -982,13 +923,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
memcpy(pframe, pattrib->iv, pattrib->iv_len);
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_,
- ("%s: keyid=%d pattrib->iv[3]=%.2x pframe=%.2x %.2x %.2x %.2x\n",
- __func__,
- padapter->securitypriv.dot11PrivacyKeyIndex,
- pattrib->iv[3], *pframe, *(pframe + 1),
- *(pframe + 2), *(pframe + 3)));
-
pframe += pattrib->iv_len;
mpdu_len -= pattrib->iv_len;
@@ -1027,8 +961,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
break;
}
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __func__));
-
addr = (size_t)(pframe);
mem_start = (unsigned char *)round_up(addr, 4) + hw_hdr_offset;
@@ -1039,8 +971,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
rtl88eu_mon_xmit_hook(padapter->pmondev, pxmitframe, frg_len);
if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n"));
- DBG_88E("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n");
res = _FAIL;
goto exit;
}
@@ -1153,10 +1083,8 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
list_del_init(&pxmitbuf->list);
pxmitpriv->free_xmit_extbuf_cnt--;
pxmitbuf->priv_data = NULL;
- if (pxmitbuf->sctx) {
- DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__);
+ if (pxmitbuf->sctx)
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
- }
}
spin_unlock_irqrestore(&pfree_queue->lock, irql);
@@ -1197,7 +1125,6 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
pxmitpriv->free_xmitbuf_cnt--;
pxmitbuf->priv_data = NULL;
if (pxmitbuf->sctx) {
- DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__);
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
}
}
@@ -1214,10 +1141,8 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
if (!pxmitbuf)
return _FAIL;
- if (pxmitbuf->sctx) {
- DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__);
+ if (pxmitbuf->sctx)
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE);
- }
if (pxmitbuf->ext_tag) {
rtw_free_xmitbuf_ext(pxmitpriv, pxmitbuf);
@@ -1261,20 +1186,12 @@ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)
spin_lock_bh(&pfree_xmit_queue->lock);
pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue,
struct xmit_frame, list);
- if (!pxframe) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- ("rtw_alloc_xmitframe:%d\n",
- pxmitpriv->free_xmitframe_cnt));
- } else {
+ if (pxframe) {
list_del_init(&pxframe->list);
/* default value setting */
pxmitpriv->free_xmitframe_cnt--;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- ("rtw_alloc_xmitframe():free_xmitframe_cnt=%d\n",
- pxmitpriv->free_xmitframe_cnt));
-
pxframe->buf_addr = NULL;
pxframe->pxmitbuf = NULL;
@@ -1299,10 +1216,8 @@ s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitfram
struct adapter *padapter = pxmitpriv->adapter;
struct sk_buff *pndis_pkt = NULL;
- if (!pxmitframe) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("====== %s:pxmitframe == NULL!!!!!!!!!!\n", __func__));
+ if (!pxmitframe)
goto exit;
- }
spin_lock_bh(&pfree_xmit_queue->lock);
@@ -1316,7 +1231,6 @@ s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitfram
list_add_tail(&pxmitframe->list, get_list_head(pfree_xmit_queue));
pxmitpriv->free_xmitframe_cnt++;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_, ("%s:free_xmitframe_cnt=%d\n", __func__, pxmitpriv->free_xmitframe_cnt));
spin_unlock_bh(&pfree_xmit_queue->lock);
@@ -1329,31 +1243,22 @@ exit:
void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue)
{
- struct list_head *plist, *phead;
- struct xmit_frame *pxmitframe;
+ struct list_head *phead;
+ struct xmit_frame *pxmitframe, *temp;
spin_lock_bh(&pframequeue->lock);
phead = get_list_head(pframequeue);
- plist = phead->next;
-
- while (phead != plist) {
- pxmitframe = container_of(plist, struct xmit_frame, list);
-
- plist = plist->next;
-
+ list_for_each_entry_safe(pxmitframe, temp, phead, list)
rtw_free_xmitframe(pxmitpriv, pxmitframe);
- }
+
spin_unlock_bh(&pframequeue->lock);
}
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
- if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- ("%s: drop xmit pkt for classifier fail\n", __func__));
+ if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL)
return _FAIL;
- }
return _SUCCESS;
}
@@ -1404,10 +1309,9 @@ struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmi
phwxmit = phwxmit_i + inx[i];
sta_phead = get_list_head(phwxmit->sta_queue);
- sta_plist = sta_phead->next;
-
- while (sta_phead != sta_plist) {
- ptxservq = container_of(sta_plist, struct tx_servq, tx_pending);
+ list_for_each(sta_plist, sta_phead) {
+ ptxservq = list_entry(sta_plist, struct tx_servq,
+ tx_pending);
pframe_queue = &ptxservq->sta_pending;
@@ -1421,8 +1325,6 @@ struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmi
list_del_init(&ptxservq->tx_pending);
goto exit;
}
-
- sta_plist = sta_plist->next;
}
}
exit:
@@ -1440,30 +1342,22 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter,
case 2:
ptxservq = &psta->sta_xmitpriv.bk_q;
*(ac) = 3;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- ("%s : BK\n", __func__));
break;
case 4:
case 5:
ptxservq = &psta->sta_xmitpriv.vi_q;
*(ac) = 1;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- ("%s : VI\n", __func__));
break;
case 6:
case 7:
ptxservq = &psta->sta_xmitpriv.vo_q;
*(ac) = 0;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- ("%s : VO\n", __func__));
break;
case 0:
case 3:
default:
ptxservq = &psta->sta_xmitpriv.be_q;
*(ac) = 2;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- ("%s : BE\n", __func__));
break;
}
@@ -1491,8 +1385,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
if (!psta) {
res = _FAIL;
- DBG_88E("%s: psta == NULL\n", __func__);
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: psta == NULL\n", __func__));
goto exit;
}
@@ -1598,16 +1490,12 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
s32 res;
pxmitframe = rtw_alloc_xmitframe(pxmitpriv);
- if (!pxmitframe) {
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("%s: no more pxmitframe\n", __func__));
- DBG_88E("DBG_TX_DROP_FRAME %s no more pxmitframe\n", __func__);
+ if (!pxmitframe)
return -1;
- }
res = update_attrib(padapter, *ppkt, &pxmitframe->attrib);
if (res == _FAIL) {
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("%s: update attrib fail\n", __func__));
rtw_free_xmitframe(pxmitpriv, pxmitframe);
return -1;
}
@@ -1738,21 +1626,15 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struct sta_info *psta, struct __queue *pframequeue)
{
- struct list_head *plist, *phead;
+ struct list_head *phead;
u8 ac_index;
struct tx_servq *ptxservq;
struct pkt_attrib *pattrib;
- struct xmit_frame *pxmitframe;
+ struct xmit_frame *pxmitframe, *n;
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
phead = get_list_head(pframequeue);
- plist = phead->next;
-
- while (phead != plist) {
- pxmitframe = container_of(plist, struct xmit_frame, list);
-
- plist = plist->next;
-
+ list_for_each_entry_safe(pxmitframe, n, phead, list) {
xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe);
pattrib = &pxmitframe->attrib;
@@ -1811,20 +1693,14 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
{
u8 update_mask = 0, wmmps_ac = 0;
struct sta_info *psta_bmc;
- struct list_head *xmitframe_plist, *xmitframe_phead;
- struct xmit_frame *pxmitframe = NULL;
+ struct list_head *xmitframe_phead;
+ struct xmit_frame *pxmitframe, *n;
struct sta_priv *pstapriv = &padapter->stapriv;
spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = xmitframe_phead->next;
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = xmitframe_plist->next;
-
+ list_for_each_entry_safe(pxmitframe, n, xmitframe_phead, list) {
list_del_init(&pxmitframe->list);
switch (pxmitframe->attrib.priority) {
@@ -1899,13 +1775,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
spin_lock_bh(&psta_bmc->sleep_q.lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
- xmitframe_plist = xmitframe_phead->next;
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = xmitframe_plist->next;
-
+ list_for_each_entry_safe(pxmitframe, n, xmitframe_phead, list) {
list_del_init(&pxmitframe->list);
psta_bmc->sleepq_len--;
@@ -1939,20 +1809,14 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta)
{
u8 wmmps_ac = 0;
- struct list_head *xmitframe_plist, *xmitframe_phead;
- struct xmit_frame *pxmitframe = NULL;
+ struct list_head *xmitframe_phead;
+ struct xmit_frame *pxmitframe, *n;
struct sta_priv *pstapriv = &padapter->stapriv;
spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = xmitframe_phead->next;
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = xmitframe_plist->next;
-
+ list_for_each_entry_safe(pxmitframe, n, xmitframe_phead, list) {
switch (pxmitframe->attrib.priority) {
case 1:
case 2:
@@ -2025,7 +1889,6 @@ int rtw_sctx_wait(struct submit_ctx *sctx)
if (!wait_for_completion_timeout(&sctx->done, expire)) {
/* timeout, do something?? */
status = RTW_SCTX_DONE_TIMEOUT;
- DBG_88E("%s timeout\n", __func__);
} else {
status = sctx->status;
}
@@ -2036,26 +1899,9 @@ int rtw_sctx_wait(struct submit_ctx *sctx)
return ret;
}
-static bool rtw_sctx_chk_warning_status(int status)
-{
- switch (status) {
- case RTW_SCTX_DONE_UNKNOWN:
- case RTW_SCTX_DONE_BUF_ALLOC:
- case RTW_SCTX_DONE_BUF_FREE:
-
- case RTW_SCTX_DONE_DRV_STOP:
- case RTW_SCTX_DONE_DEV_REMOVE:
- return true;
- default:
- return false;
- }
-}
-
void rtw_sctx_done_err(struct submit_ctx **sctx, int status)
{
if (*sctx) {
- if (rtw_sctx_chk_warning_status(status))
- DBG_88E("%s status:%d\n", __func__, status);
(*sctx)->status = status;
complete(&((*sctx)->done));
*sctx = NULL;
@@ -2079,6 +1925,4 @@ void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
if (pxmitpriv->ack_tx)
rtw_sctx_done_err(&pack_tx_ops, status);
- else
- DBG_88E("%s ack_tx not set\n", __func__);
}
diff --git a/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c b/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
index 5792f491b59a..74fff76af16d 100644
--- a/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
+++ b/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
@@ -101,9 +101,6 @@ static void odm_SetTxRPTTiming_8188E(struct odm_dm_struct *dm_odm,
idx -= 1;
}
pRaInfo->RptTime = DynamicTxRPTTiming[idx];
-
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("pRaInfo->RptTime = 0x%x\n", pRaInfo->RptTime));
}
static int odm_RateDown_8188E(struct odm_dm_struct *dm_odm,
@@ -112,20 +109,13 @@ static int odm_RateDown_8188E(struct odm_dm_struct *dm_odm,
u8 RateID, LowestRate, HighestRate;
u8 i;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE,
- ODM_DBG_TRACE, ("=====>%s()\n", __func__));
- if (!pRaInfo) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("%s(): pRaInfo is NULL\n", __func__));
+ if (!pRaInfo)
return -1;
- }
+
RateID = pRaInfo->PreRate;
LowestRate = pRaInfo->LowestRate;
HighestRate = pRaInfo->HighestRate;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- (" RateID =%d LowestRate =%d HighestRate =%d RateSGI =%d\n",
- RateID, LowestRate, HighestRate, pRaInfo->RateSGI));
if (RateID > HighestRate) {
RateID = HighestRate;
} else if (pRaInfo->RateSGI) {
@@ -158,15 +148,6 @@ RateDownFinish:
pRaInfo->DecisionRate = RateID;
odm_SetTxRPTTiming_8188E(dm_odm, pRaInfo, 2);
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE,
- ODM_DBG_LOUD, ("Rate down, RPT Timing default\n"));
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- ("RAWaitingCounter %d, RAPendingCounter %d",
- pRaInfo->RAWaitingCounter, pRaInfo->RAPendingCounter));
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("Rate down to RateID %d RateSGI %d\n", RateID, pRaInfo->RateSGI));
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- ("<===== %s()\n", __func__));
return 0;
}
@@ -176,18 +157,11 @@ static int odm_RateUp_8188E(struct odm_dm_struct *dm_odm,
u8 RateID, HighestRate;
u8 i;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE,
- ODM_DBG_TRACE, ("=====>%s()\n", __func__));
- if (!pRaInfo) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("%s(): pRaInfo is NULL\n", __func__));
+ if (!pRaInfo)
return -1;
- }
+
RateID = pRaInfo->PreRate;
HighestRate = pRaInfo->HighestRate;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- (" RateID =%d HighestRate =%d\n",
- RateID, HighestRate));
if (pRaInfo->RAWaitingCounter == 1) {
pRaInfo->RAWaitingCounter = 0;
pRaInfo->RAPendingCounter = 0;
@@ -196,8 +170,6 @@ static int odm_RateUp_8188E(struct odm_dm_struct *dm_odm,
goto RateUpfinish;
}
odm_SetTxRPTTiming_8188E(dm_odm, pRaInfo, 0);
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("%s():Decrease RPT Timing\n", __func__));
if (RateID < HighestRate) {
for (i = RateID + 1; i <= HighestRate; i++) {
@@ -222,13 +194,6 @@ RateUpfinish:
pRaInfo->RAWaitingCounter++;
pRaInfo->DecisionRate = RateID;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("Rate up to RateID %d\n", RateID));
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- ("RAWaitingCounter %d, RAPendingCounter %d",
- pRaInfo->RAWaitingCounter, pRaInfo->RAPendingCounter));
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE,
- ODM_DBG_TRACE, ("<===== %s()\n", __func__));
return 0;
}
@@ -250,9 +215,6 @@ static void odm_RateDecision_8188E(struct odm_dm_struct *dm_odm,
/* u32 pool_retry; */
static u8 DynamicTxRPTTimingCounter;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- ("=====>%s()\n", __func__));
-
if (pRaInfo->Active && (pRaInfo->TOTAL > 0)) { /* STA used and data packet exits */
if ((pRaInfo->RssiStaRA < (pRaInfo->PreRssiStaRA - 3)) ||
(pRaInfo->RssiStaRA > (pRaInfo->PreRssiStaRA + 3))) {
@@ -270,16 +232,9 @@ static void odm_RateDecision_8188E(struct odm_dm_struct *dm_odm,
RtyPtID = 1;
PenaltyID1 = RETRY_PENALTY_IDX[RtyPtID][RateID]; /* TODO by page */
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- (" NscDown init is %d\n", pRaInfo->NscDown));
-
for (i = 0 ; i <= 4 ; i++)
pRaInfo->NscDown += pRaInfo->RTY[i] * RETRY_PENALTY[PenaltyID1][i];
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- (" NscDown is %d, total*penalty[5] is %d\n", pRaInfo->NscDown,
- (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID1][5])));
-
if (pRaInfo->NscDown > (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID1][5]))
pRaInfo->NscDown -= pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID1][5];
else
@@ -287,24 +242,15 @@ static void odm_RateDecision_8188E(struct odm_dm_struct *dm_odm,
/* rate up */
PenaltyID2 = RETRY_PENALTY_UP_IDX[RateID];
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- (" NscUp init is %d\n", pRaInfo->NscUp));
for (i = 0 ; i <= 4 ; i++)
pRaInfo->NscUp += pRaInfo->RTY[i] * RETRY_PENALTY[PenaltyID2][i];
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- ("NscUp is %d, total*up[5] is %d\n",
- pRaInfo->NscUp, (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID2][5])));
-
if (pRaInfo->NscUp > (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID2][5]))
pRaInfo->NscUp -= pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID2][5];
else
pRaInfo->NscUp = 0;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE | ODM_COMP_INIT, ODM_DBG_LOUD,
- (" RssiStaRa = %d RtyPtID =%d PenaltyID1 = 0x%x PenaltyID2 = 0x%x RateID =%d NscDown =%d NscUp =%d SGI =%d\n",
- pRaInfo->RssiStaRA, RtyPtID, PenaltyID1, PenaltyID2, RateID, pRaInfo->NscDown, pRaInfo->NscUp, pRaInfo->RateSGI));
if ((pRaInfo->NscDown < N_THRESHOLD_LOW[RateID]) ||
(pRaInfo->DROP > DROPING_NECESSARY[RateID]))
odm_RateDown_8188E(dm_odm, pRaInfo);
@@ -321,8 +267,6 @@ static void odm_RateDecision_8188E(struct odm_dm_struct *dm_odm,
if (DynamicTxRPTTimingCounter >= 4) {
odm_SetTxRPTTiming_8188E(dm_odm, pRaInfo, 1);
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE,
- ODM_DBG_LOUD, ("<===== Rate don't change 4 times, Extend RPT Timing\n"));
DynamicTxRPTTimingCounter = 0;
}
@@ -330,8 +274,6 @@ static void odm_RateDecision_8188E(struct odm_dm_struct *dm_odm,
odm_ResetRaCounter_8188E(pRaInfo);
}
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- ("<===== %s()\n", __func__));
}
static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_info *pRaInfo)
@@ -414,15 +356,9 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf
else
pRaInfo->PTModeSS = 0;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("ODM_ARFBRefresh_8188E(): PTModeSS =%d\n", pRaInfo->PTModeSS));
-
if (pRaInfo->DecisionRate > pRaInfo->HighestRate)
pRaInfo->DecisionRate = pRaInfo->HighestRate;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("ODM_ARFBRefresh_8188E(): RateID =%d RateMask =%8.8x RAUseRate =%8.8x HighestRate =%d, DecisionRate =%d\n",
- pRaInfo->RateID, pRaInfo->RateMask, pRaInfo->RAUseRate, pRaInfo->HighestRate, pRaInfo->DecisionRate));
return 0;
}
@@ -518,17 +454,10 @@ static void odm_PTDecision_8188E(struct odm_ra_info *pRaInfo)
static void odm_RATxRPTTimerSetting(struct odm_dm_struct *dm_odm,
u16 minRptTime)
{
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- (" =====>%s()\n", __func__));
-
if (dm_odm->CurrminRptTime != minRptTime) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- (" CurrminRptTime = 0x%04x minRptTime = 0x%04x\n", dm_odm->CurrminRptTime, minRptTime));
rtw_rpt_timer_cfg_cmd(dm_odm->Adapter, minRptTime);
dm_odm->CurrminRptTime = minRptTime;
}
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- (" <===== %s()\n", __func__));
}
int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
@@ -549,10 +478,6 @@ int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
max_rate_idx = 0x03;
}
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("%s(): WirelessMode:0x%08x , max_raid_idx:0x%02x\n",
- __func__, WirelessMode, max_rate_idx));
-
pRaInfo->DecisionRate = max_rate_idx;
pRaInfo->PreRate = max_rate_idx;
pRaInfo->HighestRate = max_rate_idx;
@@ -593,7 +518,6 @@ int ODM_RAInfo_Init_all(struct odm_dm_struct *dm_odm)
{
u8 macid = 0;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("=====>\n"));
dm_odm->CurrminRptTime = 0;
for (macid = 0; macid < ODM_ASSOCIATE_ENTRY_NUM; macid++)
@@ -606,8 +530,6 @@ u8 ODM_RA_GetShortGI_8188E(struct odm_dm_struct *dm_odm, u8 macid)
{
if ((!dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
return 0;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- ("macid =%d SGI =%d\n", macid, dm_odm->RAInfo[macid].RateSGI));
return dm_odm->RAInfo[macid].RateSGI;
}
@@ -618,8 +540,6 @@ u8 ODM_RA_GetDecisionRate_8188E(struct odm_dm_struct *dm_odm, u8 macid)
if ((!dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
return 0;
DecisionRate = dm_odm->RAInfo[macid].DecisionRate;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- (" macid =%d DecisionRate = 0x%x\n", macid, DecisionRate));
return DecisionRate;
}
@@ -630,8 +550,6 @@ u8 ODM_RA_GetHwPwrStatus_8188E(struct odm_dm_struct *dm_odm, u8 macid)
if ((!dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
return 0;
PTStage = dm_odm->RAInfo[macid].PTStage;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- ("macid =%d PTStage = 0x%x\n", macid, PTStage));
return PTStage;
}
@@ -641,9 +559,6 @@ void ODM_RA_UpdateRateInfo_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rate
if ((!dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
return;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("macid =%d RateID = 0x%x RateMask = 0x%x SGIEnable =%d\n",
- macid, RateID, RateMask, SGIEnable));
pRaInfo = &dm_odm->RAInfo[macid];
pRaInfo->RateID = RateID;
@@ -658,8 +573,6 @@ void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rssi)
if ((!dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
return;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- (" macid =%d Rssi =%d\n", macid, Rssi));
pRaInfo = &dm_odm->RAInfo[macid];
pRaInfo->RssiStaRA = Rssi;
@@ -680,10 +593,6 @@ void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16
u32 valid = 0, ItemNum = 0;
u16 minRptTime = 0x927c;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("=====>%s(): valid0 =%d valid1 =%d BufferLength =%d\n",
- __func__, macid_entry0, macid_entry1, TxRPT_Len));
-
ItemNum = TxRPT_Len >> 3;
pBuffer = TxRPT_Buf;
@@ -707,13 +616,6 @@ void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16
pRAInfo->RTY[2] + pRAInfo->RTY[3] +
pRAInfo->RTY[4] + pRAInfo->DROP;
if (pRAInfo->TOTAL != 0) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("macid =%d Total =%d R0 =%d R1 =%d R2 =%d R3 =%d R4 =%d D0 =%d valid0 =%x valid1 =%x\n",
- MacId, pRAInfo->TOTAL,
- pRAInfo->RTY[0], pRAInfo->RTY[1],
- pRAInfo->RTY[2], pRAInfo->RTY[3],
- pRAInfo->RTY[4], pRAInfo->DROP,
- macid_entry0, macid_entry1));
if (pRAInfo->PTActive) {
if (pRAInfo->RAstage < 5)
odm_RateDecision_8188E(dm_odm, pRAInfo);
@@ -730,20 +632,6 @@ void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16
} else {
odm_RateDecision_8188E(dm_odm, pRAInfo);
}
- ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- ("macid =%d R0 =%d R1 =%d R2 =%d R3 =%d R4 =%d drop =%d valid0 =%x RateID =%d SGI =%d\n",
- MacId,
- pRAInfo->RTY[0],
- pRAInfo->RTY[1],
- pRAInfo->RTY[2],
- pRAInfo->RTY[3],
- pRAInfo->RTY[4],
- pRAInfo->DROP,
- macid_entry0,
- pRAInfo->DecisionRate,
- pRAInfo->RateSGI));
- } else {
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, (" TOTAL = 0!!!!\n"));
}
}
@@ -755,7 +643,4 @@ void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16
} while (MacId < ItemNum);
odm_RATxRPTTimerSetting(dm_odm, minRptTime);
-
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
- ("<===== %s()\n", __func__));
}
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index 9585dffc63a3..f09620c54e69 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -23,12 +23,8 @@ uint rtw_hal_init(struct adapter *adapt)
rtw_hal_notch_filter(adapt, 1);
} else {
adapt->hw_init_completed = false;
- DBG_88E("%s: hal__init fail\n", __func__);
}
- RT_TRACE(_module_hal_init_c_, _drv_err_,
- ("-rtl871x_hal_init:status=0x%x\n", status));
-
return status;
}
@@ -40,8 +36,6 @@ uint rtw_hal_deinit(struct adapter *adapt)
if (status == _SUCCESS)
adapt->hw_init_completed = false;
- else
- DBG_88E("\n %s: hal_init fail\n", __func__);
return status;
}
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 4d659a812aed..ffc5394d5bb9 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -5,11 +5,11 @@
*
******************************************************************************/
+#include <linux/etherdevice.h>
+
#include "odm_precomp.h"
#include "phy.h"
-u32 GlobalDebugLevel;
-
/* avoid to warn in FreeBSD ==> To DO modify */
static u32 EDCAParam[HT_IOT_PEER_MAX][3] = {
/* UL DL */
@@ -147,13 +147,6 @@ u8 CCKSwingTable_Ch14[CCK_TABLE_SIZE][8] = {
#define RxDefaultAnt1 0x65a9
#define RxDefaultAnt2 0x569a
-void ODM_InitDebugSetting(struct odm_dm_struct *pDM_Odm)
-{
- pDM_Odm->DebugLevel = ODM_DBG_TRACE;
-
- pDM_Odm->DebugComponents = 0;
-}
-
/* 3 Export Interface */
/* 2011/09/21 MH Add to describe different team necessary resource allocate?? */
@@ -161,7 +154,6 @@ void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
{
/* 2012.05.03 Luke: For all IC series */
odm_CommonInfoSelfInit(pDM_Odm);
- odm_CmnInfoInit_Debug(pDM_Odm);
odm_DIGInit(pDM_Odm);
odm_RateAdaptiveMaskInit(pDM_Odm);
@@ -181,8 +173,6 @@ void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
{
/* 2012.05.03 Luke: For all IC series */
- odm_CmnInfoHook_Debug(pDM_Odm);
- odm_CmnInfoUpdate_Debug(pDM_Odm);
odm_CommonInfoSelfUpdate(pDM_Odm);
odm_FalseAlarmCounterStatistics(pDM_Odm);
odm_RSSIMonitorCheck(pDM_Odm);
@@ -217,8 +207,6 @@ void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
pDM_Odm->bCckHighPower = (bool)phy_query_bb_reg(adapter, 0x824, BIT(9));
pDM_Odm->RFPathRxEnable = (u8)phy_query_bb_reg(adapter, 0xc04, 0x0F);
-
- ODM_InitDebugSetting(pDM_Odm);
}
void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
@@ -247,48 +235,6 @@ void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
pDM_Odm->bOneEntryOnly = false;
}
-void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm)
-{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("%s==>\n", __func__));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportPlatform=%d\n", pDM_Odm->SupportPlatform));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportAbility=0x%x\n", pDM_Odm->SupportAbility));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportInterface=%d\n", pDM_Odm->SupportInterface));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportICType=0x%x\n", pDM_Odm->SupportICType));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("CutVersion=%d\n", pDM_Odm->CutVersion));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("BoardType=%d\n", pDM_Odm->BoardType));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtLNA=%d\n", pDM_Odm->ExtLNA));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtPA=%d\n", pDM_Odm->ExtPA));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtTRSW=%d\n", pDM_Odm->ExtTRSW));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("PatchID=%d\n", pDM_Odm->PatchID));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bInHctTest=%d\n", pDM_Odm->bInHctTest));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFITest=%d\n", pDM_Odm->bWIFITest));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bDualMacSmartConcurrent=%d\n", pDM_Odm->bDualMacSmartConcurrent));
-}
-
-void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm)
-{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("%s==>\n", __func__));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumTxBytesUnicast=%llu\n", *pDM_Odm->pNumTxBytesUnicast));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumRxBytesUnicast=%llu\n", *pDM_Odm->pNumRxBytesUnicast));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pWirelessMode=0x%x\n", *pDM_Odm->pWirelessMode));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecChOffset=%d\n", *pDM_Odm->pSecChOffset));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecurity=%d\n", *pDM_Odm->pSecurity));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pBandWidth=%d\n", *pDM_Odm->pBandWidth));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pChannel=%d\n", *pDM_Odm->pChannel));
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbScanInProcess=%d\n", *pDM_Odm->pbScanInProcess));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbPowerSaving=%d\n", *pDM_Odm->pbPowerSaving));
-}
-
-void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm)
-{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("%s==>\n", __func__));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFI_Direct=%d\n", pDM_Odm->bWIFI_Direct));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFI_Display=%d\n", pDM_Odm->bWIFI_Display));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bLinked=%d\n", pDM_Odm->bLinked));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RSSI_Min=%d\n", pDM_Odm->RSSI_Min));
-}
-
void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI)
{
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
@@ -339,23 +285,15 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
u8 dm_dig_max, dm_dig_min;
u8 CurrentIGI = pDM_DigTable->CurIGValue;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s()==>\n", __func__));
- if ((!(pDM_Odm->SupportAbility & ODM_BB_DIG)) || (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("%s() Return: SupportAbility ODM_BB_DIG or ODM_BB_FA_CNT is disabled\n", __func__));
+ if ((!(pDM_Odm->SupportAbility & ODM_BB_DIG)) || (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT)))
return;
- }
- if (*pDM_Odm->pbScanInProcess) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s() Return: In Scan Progress\n", __func__));
+ if (*pDM_Odm->pbScanInProcess)
return;
- }
/* add by Neil Chen to avoid PSD is processing */
- if (!pDM_Odm->bDMInitialGainEnable) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s() Return: PSD is Processing\n", __func__));
+ if (!pDM_Odm->bDMInitialGainEnable)
return;
- }
DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
@@ -382,33 +320,20 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
DIG_Dynamic_MIN = DIG_MaxOfMin;
else
DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("%s() : bOneEntryOnly=true, DIG_Dynamic_MIN=0x%x\n",
- __func__, DIG_Dynamic_MIN));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("%s() : pDM_Odm->RSSI_Min=%d\n",
- __func__, pDM_Odm->RSSI_Min));
} else if (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV) {
/* 1 Lower Bound for 88E AntDiv */
- if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV)
DIG_Dynamic_MIN = (u8)pDM_DigTable->AntDiv_RSSI_max;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("%s(): pDM_DigTable->AntDiv_RSSI_max=%d\n",
- __func__, pDM_DigTable->AntDiv_RSSI_max));
- }
} else {
DIG_Dynamic_MIN = dm_dig_min;
}
} else {
pDM_DigTable->rx_gain_range_max = dm_dig_max;
DIG_Dynamic_MIN = dm_dig_min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s() : No Link\n", __func__));
}
/* 1 Modify DIG lower bound, deal with abnormally large false alarm */
if (pFalseAlmCnt->Cnt_all > 10000) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("dm_DIG(): Abnormally false alarm case.\n"));
-
if (pDM_DigTable->LargeFAHit != 3)
pDM_DigTable->LargeFAHit++;
if (pDM_DigTable->ForbiddenIGI < CurrentIGI) {
@@ -433,27 +358,20 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
if ((pDM_DigTable->ForbiddenIGI - 1) < DIG_Dynamic_MIN) { /* DM_DIG_MIN) */
pDM_DigTable->ForbiddenIGI = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
pDM_DigTable->rx_gain_range_min = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): Normal Case: At Lower Bound\n", __func__));
} else {
pDM_DigTable->ForbiddenIGI--;
pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): Normal Case: Approach Lower Bound\n", __func__));
}
} else {
pDM_DigTable->LargeFAHit = 0;
}
}
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("%s(): pDM_DigTable->LargeFAHit=%d\n",
- __func__, pDM_DigTable->LargeFAHit));
/* 1 Adjust initial gain by false alarm */
if (pDM_Odm->bLinked) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): DIG AfterLink\n", __func__));
if (FirstConnect) {
CurrentIGI = pDM_Odm->RSSI_Min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("DIG: First Connect\n"));
} else {
if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
@@ -463,10 +381,8 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
}
} else {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): DIG BeforeLink\n", __func__));
if (FirstDisConnect) {
CurrentIGI = pDM_DigTable->rx_gain_range_min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): First DisConnect\n", __func__));
} else {
/* 2012.03.30 LukeLee: enable DIG before link but with very high thresholds */
if (pFalseAlmCnt->Cnt_all > 10000)
@@ -475,24 +391,15 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
CurrentIGI = CurrentIGI + 1;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
else if (pFalseAlmCnt->Cnt_all < 500)
CurrentIGI = CurrentIGI - 1;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): England DIG\n", __func__));
}
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): DIG End Adjust IGI\n", __func__));
/* 1 Check initial gain by upper/lower bound */
if (CurrentIGI > pDM_DigTable->rx_gain_range_max)
CurrentIGI = pDM_DigTable->rx_gain_range_max;
if (CurrentIGI < pDM_DigTable->rx_gain_range_min)
CurrentIGI = pDM_DigTable->rx_gain_range_min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("%s(): rx_gain_range_max=0x%x, rx_gain_range_min=0x%x\n",
- __func__, pDM_DigTable->rx_gain_range_max, pDM_DigTable->rx_gain_range_min));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): TotalFA=%d\n", __func__, pFalseAlmCnt->Cnt_all));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): CurIGValue=0x%x\n", __func__, CurrentIGI));
-
/* 2 High power RSSI threshold */
-
ODM_Write_DIG(pDM_Odm, CurrentIGI);/* ODM_Write_DIG(pDM_Odm, pDM_DigTable->CurIGValue); */
pDM_DigTable->bMediaConnect_0 = pDM_Odm->bLinked;
pDM_DigTable->DIG_Dynamic_MIN_0 = DIG_Dynamic_MIN;
@@ -556,20 +463,6 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
FalseAlmCnt->Cnt_Cck_fail);
FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Enter %s\n", __func__));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Fast_Fsync=%d, Cnt_SB_Search_fail=%d\n",
- FalseAlmCnt->Cnt_Fast_Fsync, FalseAlmCnt->Cnt_SB_Search_fail));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Parity_Fail=%d, Cnt_Rate_Illegal=%d\n",
- FalseAlmCnt->Cnt_Parity_Fail, FalseAlmCnt->Cnt_Rate_Illegal));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Crc8_fail=%d, Cnt_Mcs_fail=%d\n",
- FalseAlmCnt->Cnt_Crc8_fail, FalseAlmCnt->Cnt_Mcs_fail));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Cck_fail=%d\n", FalseAlmCnt->Cnt_Cck_fail));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Ofdm_fail=%d\n", FalseAlmCnt->Cnt_Ofdm_fail));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Total False Alarm=%d\n", FalseAlmCnt->Cnt_all));
}
/* 3============================================================ */
@@ -750,10 +643,6 @@ u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u
break;
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD,
- (" ==> rssi_level:0x%02x, WirelessMode:0x%02x, rate_bitmap:0x%08x\n",
- rssi_level, WirelessMode, rate_bitmap));
-
return rate_bitmap;
}
@@ -775,26 +664,18 @@ void odm_RefreshRateAdaptiveMaskCE(struct odm_dm_struct *pDM_Odm)
u8 i;
struct adapter *pAdapter = pDM_Odm->Adapter;
- if (pAdapter->bDriverStopped) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_TRACE, ("<---- odm_RefreshRateAdaptiveMask(): driver is going to unload\n"));
+ if (pAdapter->bDriverStopped)
return;
- }
- if (!pDM_Odm->bUseRAMask) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, ("<---- odm_RefreshRateAdaptiveMask(): driver does not control rate adaptive mask\n"));
+ if (!pDM_Odm->bUseRAMask)
return;
- }
for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
struct sta_info *pstat = pDM_Odm->pODM_StaInfo[i];
if (IS_STA_VALID(pstat)) {
- if (ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false, &pstat->rssi_level)) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD,
- ("RSSI:%d, RSSI_LEVEL:%d\n",
- pstat->rssi_stat.UndecoratedSmoothedPWDB, pstat->rssi_level));
+ if (ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false, &pstat->rssi_level))
rtw_hal_update_ra_mask(pAdapter, i, pstat->rssi_level);
- }
}
}
}
@@ -808,6 +689,7 @@ bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate
u8 HighRSSIThreshForRA = pRA->HighRSSIThresh;
u8 LowRSSIThreshForRA = pRA->LowRSSIThresh;
u8 RATRState;
+ struct device *dev = dvobj_to_dev(adapter_to_dvobj(pDM_Odm->Adapter));
/* Threshold Adjustment: */
/* when RSSI state trends to go up one or two levels, make sure RSSI is high enough. */
@@ -824,7 +706,7 @@ bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate
LowRSSIThreshForRA += GoUpGap;
break;
default:
- ODM_RT_ASSERT(pDM_Odm, false, ("wrong rssi level setting %d !", *pRATRState));
+ dev_err(dev, "%s(): wrong rssi level setting %d!\n", __func__, *pRATRState);
break;
}
@@ -837,7 +719,6 @@ bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate
RATRState = DM_RATR_STA_LOW;
if (*pRATRState != RATRState || bForceUpdate) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, ("RSSI Level %d -> %d\n", *pRATRState, RATRState));
*pRATRState = RATRState;
return true;
}
@@ -892,7 +773,6 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
u8 sta_cnt = 0;
u32 PWDB_rssi[NUM_STA] = {0};/* 0~15]:MACID, [16~31]:PWDB_rssi */
struct sta_info *psta;
- u8 bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
if (!check_fwstate(&Adapter->mlmepriv, _FW_LINKED))
return;
@@ -901,7 +781,7 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
psta = pDM_Odm->pODM_StaInfo[i];
if (IS_STA_VALID(psta) &&
(psta->state & WIFI_ASOC_STATE) &&
- memcmp(psta->hwaddr, bcast_addr, ETH_ALEN) &&
+ !is_broadcast_ether_addr(psta->hwaddr) &&
memcmp(psta->hwaddr, myid(&Adapter->eeprompriv), ETH_ALEN)) {
if (psta->rssi_stat.UndecoratedSmoothedPWDB < tmpEntryMinPWDB)
tmpEntryMinPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
@@ -945,7 +825,6 @@ void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm)
pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
if (*pDM_Odm->mp_mode != 1)
pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
- MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl);
pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
}
@@ -977,20 +856,16 @@ void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
{
- if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV)) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Return: Not Support HW AntDiv\n"));
+ if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV))
return;
- }
rtl88eu_dm_antenna_div_init(pDM_Odm);
}
void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
{
- if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV)) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Return: Not Support HW AntDiv\n"));
+ if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV))
return;
- }
rtl88eu_dm_antenna_diversity(pDM_Odm);
}
@@ -1003,11 +878,6 @@ void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm)
pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
Adapter->recvpriv.bIsAnyNonBEPkts = false;
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Original VO PARAM: 0x%x\n", usb_read32(Adapter, ODM_EDCA_VO_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Original VI PARAM: 0x%x\n", usb_read32(Adapter, ODM_EDCA_VI_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Original BE PARAM: 0x%x\n", usb_read32(Adapter, ODM_EDCA_BE_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Original BK PARAM: 0x%x\n", usb_read32(Adapter, ODM_EDCA_BK_PARAM)));
} /* ODM_InitEdcaTurbo */
void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
@@ -1015,13 +885,10 @@ void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
/* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
/* at the same time. In the stage2/3, we need to prive universal interface and merge all */
/* HW dynamic mechanism. */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("%s========================>\n", __func__));
-
if (!(pDM_Odm->SupportAbility & ODM_MAC_EDCA_TURBO))
return;
odm_EdcaTurboCheckCE(pDM_Odm);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("<========================%s\n", __func__));
} /* odm_CheckEdcaTurbo */
void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm)
diff --git a/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c b/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
index a55a0d8b9fb7..e29cd35a5811 100644
--- a/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
+++ b/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
@@ -307,11 +307,7 @@ void rtl88eu_dm_antenna_diversity(struct odm_dm_struct *dm_odm)
return;
if (!dm_odm->bLinked) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("ODM_AntennaDiversity_88E(): No Link.\n"));
if (dm_fat_tbl->bBecomeLinked) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("Need to Turn off HW AntDiv\n"));
phy_set_bb_reg(adapter, ODM_REG_IGI_A_11N, BIT(7), 0);
phy_set_bb_reg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N,
BIT(15), 0);
@@ -324,8 +320,6 @@ void rtl88eu_dm_antenna_diversity(struct odm_dm_struct *dm_odm)
}
if (!dm_fat_tbl->bBecomeLinked) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("Need to Turn on HW AntDiv\n"));
phy_set_bb_reg(adapter, ODM_REG_IGI_A_11N, BIT(7), 1);
phy_set_bb_reg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N,
BIT(15), 1);
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index a664bff370bb..256f87b9d630 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -162,18 +162,6 @@ static void get_tx_power_index(struct adapter *adapt, u8 channel, u8 *cck_pwr,
}
}
-static void phy_power_index_check(struct adapter *adapt, u8 channel,
- u8 *cck_pwr, u8 *ofdm_pwr, u8 *bw20_pwr,
- u8 *bw40_pwr)
-{
- struct hal_data_8188e *hal_data = adapt->HalData;
-
- hal_data->CurrentCckTxPwrIdx = cck_pwr[0];
- hal_data->CurrentOfdm24GTxPwrIdx = ofdm_pwr[0];
- hal_data->CurrentBW2024GTxPwrIdx = bw20_pwr[0];
- hal_data->CurrentBW4024GTxPwrIdx = bw40_pwr[0];
-}
-
void phy_set_tx_power_level(struct adapter *adapt, u8 channel)
{
u8 cck_pwr[MAX_TX_COUNT] = {0};
@@ -184,9 +172,6 @@ void phy_set_tx_power_level(struct adapter *adapt, u8 channel)
get_tx_power_index(adapt, channel, &cck_pwr[0], &ofdm_pwr[0],
&bw20_pwr[0], &bw40_pwr[0]);
- phy_power_index_check(adapt, channel, &cck_pwr[0], &ofdm_pwr[0],
- &bw20_pwr[0], &bw40_pwr[0]);
-
rtl88eu_phy_rf6052_set_cck_txpower(adapt, &cck_pwr[0]);
rtl88eu_phy_rf6052_set_ofdm_txpower(adapt, &ofdm_pwr[0], &bw20_pwr[0],
&bw40_pwr[0], channel);
@@ -303,10 +288,6 @@ void rtl88eu_dm_txpower_track_adjust(struct odm_dm_struct *dm_odm, u8 type,
u8 pwr_value = 0;
/* Tx power tracking BB swing table. */
if (type == 0) { /* For OFDM adjust */
- ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("BbSwingIdxOfdm = %d BbSwingFlagOfdm=%d\n",
- dm_odm->BbSwingIdxOfdm, dm_odm->BbSwingFlagOfdm));
-
if (dm_odm->BbSwingIdxOfdm <= dm_odm->BbSwingIdxOfdmBase) {
*direction = 1;
pwr_value = dm_odm->BbSwingIdxOfdmBase -
@@ -316,12 +297,7 @@ void rtl88eu_dm_txpower_track_adjust(struct odm_dm_struct *dm_odm, u8 type,
pwr_value = dm_odm->BbSwingIdxOfdm -
dm_odm->BbSwingIdxOfdmBase;
}
-
} else if (type == 1) { /* For CCK adjust. */
- ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("dm_odm->BbSwingIdxCck = %d dm_odm->BbSwingIdxCckBase = %d\n",
- dm_odm->BbSwingIdxCck, dm_odm->BbSwingIdxCckBase));
-
if (dm_odm->BbSwingIdxCck <= dm_odm->BbSwingIdxCckBase) {
*direction = 1;
pwr_value = dm_odm->BbSwingIdxCckBase -
@@ -343,8 +319,6 @@ void rtl88eu_dm_txpower_track_adjust(struct odm_dm_struct *dm_odm, u8 type,
static void dm_txpwr_track_setpwr(struct odm_dm_struct *dm_odm)
{
if (dm_odm->BbSwingFlagOfdm || dm_odm->BbSwingFlagCck) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("%s CH=%d\n", __func__, *dm_odm->pChannel));
phy_set_tx_power_level(dm_odm->Adapter, *dm_odm->pChannel);
dm_odm->BbSwingFlagOfdm = false;
dm_odm->BbSwingFlagCck = false;
@@ -557,7 +531,6 @@ static u8 phy_path_a_rx_iqk(struct adapter *adapt, bool configPathB)
{
u32 reg_eac, reg_e94, reg_e9c, reg_ea4, u4tmp;
u8 result = 0x00;
- struct odm_dm_struct *dm_odm = &adapt->HalData->odmpriv;
/* 1 Get TXIMR setting */
/* modify RXIQK mode table */
@@ -610,8 +583,6 @@ static u8 phy_path_a_rx_iqk(struct adapter *adapt, bool configPathB)
/* 1 RX IQK */
/* modify RXIQK mode table */
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("Path-A Rx IQK modify RXIQK mode table 2!\n"));
phy_set_bb_reg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
phy_set_rf_reg(adapt, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
phy_set_rf_reg(adapt, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
@@ -650,9 +621,6 @@ static u8 phy_path_a_rx_iqk(struct adapter *adapt, bool configPathB)
(((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
(((reg_eac & 0x03FF0000) >> 16) != 0x36))
result |= 0x02;
- else
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("Path A Rx IQK fail!!\n"));
return result;
}
@@ -661,7 +629,6 @@ static u8 phy_path_b_iqk(struct adapter *adapt)
{
u32 regeac, regeb4, regebc, regec4, regecc;
u8 result = 0x00;
- struct odm_dm_struct *dm_odm = &adapt->HalData->odmpriv;
/* One shot, path B LOK & IQK */
phy_set_bb_reg(adapt, rIQK_AGC_Cont, bMaskDWord, 0x00000002);
@@ -686,9 +653,7 @@ static u8 phy_path_b_iqk(struct adapter *adapt)
(((regec4 & 0x03FF0000) >> 16) != 0x132) &&
(((regecc & 0x03FF0000) >> 16) != 0x36))
result |= 0x02;
- else
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION,
- ODM_DBG_LOUD, ("Path B Rx IQK fail!!\n"));
+
return result;
}
@@ -1055,13 +1020,6 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
bMaskDWord) & 0x3FF0000) >> 16;
break;
}
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("Path A Rx IQK Fail!!\n"));
- }
-
- if (path_a_ok == 0x00) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("Path A IQK failed!!\n"));
}
if (is2t) {
@@ -1089,11 +1047,6 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
bMaskDWord) & 0x3FF0000) >> 16;
}
}
-
- if (path_b_ok == 0x00) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("Path B IQK failed!!\n"));
- }
}
/* Back to BB mode, load original value */
@@ -1214,8 +1167,6 @@ void rtl88eu_phy_iq_calibrate(struct adapter *adapt, bool recovery)
return;
if (recovery) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- ("phy_iq_calibrate: Return due to recovery!\n"));
reload_adda_reg(adapt, iqk_bb_reg_92c,
dm_odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
return;
@@ -1280,8 +1231,6 @@ void rtl88eu_phy_iq_calibrate(struct adapter *adapt, bool recovery)
pathaok = true;
pathbok = true;
} else {
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("IQK: FAIL use default value\n"));
dm_odm->RFCalibrateInfo.RegE94 = 0x100;
dm_odm->RFCalibrateInfo.RegEB4 = 0x100;
dm_odm->RFCalibrateInfo.RegE9C = 0x0;
diff --git a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
index 34784943a7d1..cec2ff879f5d 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
@@ -25,27 +25,12 @@ u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers,
do {
pwrcfgcmd = pwrseqcmd[aryidx];
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- ("%s: offset(%#x) cut_msk(%#x)"
- " cmd(%#x)"
- "msk(%#x) value(%#x)\n",
- __func__,
- GET_PWR_CFG_OFFSET(pwrcfgcmd),
- GET_PWR_CFG_CUT_MASK(pwrcfgcmd),
- GET_PWR_CFG_CMD(pwrcfgcmd),
- GET_PWR_CFG_MASK(pwrcfgcmd),
- GET_PWR_CFG_VALUE(pwrcfgcmd)));
-
/* Only Handle the command whose CUT is matched */
if (GET_PWR_CFG_CUT_MASK(pwrcfgcmd) & cut_vers) {
switch (GET_PWR_CFG_CMD(pwrcfgcmd)) {
case PWR_CMD_READ:
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- ("%s: PWR_CMD_READ\n", __func__));
break;
case PWR_CMD_WRITE:
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- ("%s: PWR_CMD_WRITE\n", __func__));
offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
/* Read the value from system register */
@@ -59,9 +44,6 @@ u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers,
usb_write8(padapter, offset, value);
break;
case PWR_CMD_POLLING:
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- ("%s: PWR_CMD_POLLING\n", __func__));
-
poll_bit = false;
offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
do {
@@ -74,15 +56,11 @@ u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers,
else
udelay(10);
- if (poll_count++ > max_poll_count) {
- DBG_88E("Fail to polling Offset[%#x]\n", offset);
+ if (poll_count++ > max_poll_count)
return false;
- }
} while (!poll_bit);
break;
case PWR_CMD_DELAY:
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- ("%s: PWR_CMD_DELAY\n", __func__));
if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US)
udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd));
else
@@ -90,12 +68,8 @@ u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers,
break;
case PWR_CMD_END:
/* When this command is parsed, end the process */
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- ("%s: PWR_CMD_END\n", __func__));
return true;
default:
- RT_TRACE(_module_hal_init_c_, _drv_err_,
- ("%s: Unknown CMD!!\n", __func__));
break;
}
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 3a0e3d41a404..f2969e160ac3 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -55,11 +55,8 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
u32 h2c_cmd_ex = 0;
s32 ret = _FAIL;
- if (!adapt->bFWReady) {
- DBG_88E("%s(): return H2C cmd because fw is not ready\n",
- __func__);
+ if (!adapt->bFWReady)
return ret;
- }
if (!pCmdBuffer)
goto exit;
@@ -71,10 +68,8 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
/* pay attention to if race condition happened in H2C cmd setting. */
h2c_box_num = adapt->HalData->LastHMEBoxNum;
- if (!_is_fw_read_cmd_down(adapt, h2c_box_num)) {
- DBG_88E(" fw read cmd failed...\n");
+ if (!_is_fw_read_cmd_down(adapt, h2c_box_num))
goto exit;
- }
*(u8 *)(&h2c_cmd) = ElementID;
@@ -134,9 +129,6 @@ void rtw_hal_add_ra_tid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_le
bitmap &= 0x0fffffff;
- DBG_88E("%s=> mac_id:%d, raid:%d, ra_bitmap=0x%x, shortGIrate=0x%02x\n",
- __func__, macid, raid, bitmap, shortGIrate);
-
ODM_RA_UpdateRateInfo_8188E(odmpriv, macid, raid, bitmap, shortGIrate);
}
@@ -146,9 +138,6 @@ void rtl8188e_set_FwPwrMode_cmd(struct adapter *adapt, u8 Mode)
struct pwrctrl_priv *pwrpriv = &adapt->pwrctrlpriv;
u8 RLBM = 0; /* 0:Min, 1:Max, 2:User define */
- DBG_88E("%s: Mode=%d SmartPS=%d UAPSD=%d\n", __func__,
- Mode, pwrpriv->smart_ps, adapt->registrypriv.uapsd_enable);
-
switch (Mode) {
case PS_MODE_ACTIVE:
H2CSetPwrMode.Mode = 0;
@@ -188,13 +177,8 @@ void rtl8188e_set_FwPwrMode_cmd(struct adapter *adapt, u8 Mode)
void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
{
- u8 opmode, macid;
u16 mst_rpt = le16_to_cpu(mstatus_rpt);
- opmode = (u8)mst_rpt;
- macid = (u8)(mst_rpt >> 8);
-
- DBG_88E("### %s: MStatus=%x MACID=%d\n", __func__, opmode, macid);
FillH2CCmd_88E(adapt, H2C_COM_MEDIA_STATUS_RPT, sizeof(mst_rpt), (u8 *)&mst_rpt);
}
@@ -218,7 +202,7 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
ether_addr_copy(pwlanhdr->addr3, cur_network->MacAddress);
SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
- SetFrameSubType(pframe, WIFI_BEACON);
+ SetFrameSubType(pframe, IEEE80211_STYPE_BEACON);
pframe += sizeof(struct ieee80211_hdr_3addr);
pktlen = sizeof(struct ieee80211_hdr_3addr);
@@ -275,10 +259,8 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
_ConstructBeacon:
- if ((pktlen + TXDESC_SIZE) > 512) {
- DBG_88E("beacon frame too large\n");
+ if ((pktlen + TXDESC_SIZE) > 512)
return;
- }
*pLength = pktlen;
}
@@ -297,7 +279,7 @@ static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
SetPwrMgt(fctrl);
- SetFrameSubType(pframe, WIFI_PSPOLL);
+ SetFrameSubType(pframe, IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
/* AID. */
SetDuration(pframe, (pmlmeinfo->aid | 0xc000));
@@ -361,7 +343,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
if (bQoS) {
struct ieee80211_qos_hdr *pwlanqoshdr;
- SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
+ SetFrameSubType(pframe, IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
pwlanqoshdr = (struct ieee80211_qos_hdr *)pframe;
SetPriority(&pwlanqoshdr->qos_ctrl, AC);
@@ -369,7 +351,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
pktlen = sizeof(struct ieee80211_qos_hdr);
} else {
- SetFrameSubType(pframe, WIFI_DATA_NULL);
+ SetFrameSubType(pframe, IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC);
pktlen = sizeof(struct ieee80211_hdr_3addr);
}
@@ -399,7 +381,7 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
ether_addr_copy(pwlanhdr->addr3, bssid);
SetSeqNum(pwlanhdr, 0);
- SetFrameSubType(fctrl, WIFI_PROBERSP);
+ SetFrameSubType(fctrl, IEEE80211_STYPE_PROBE_RESP);
pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe += pktlen;
@@ -440,12 +422,9 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
struct rsvdpage_loc RsvdPageLoc;
struct wlan_bssid_ex *pnetwork;
- DBG_88E("%s\n", __func__);
ReservedPagePacket = kzalloc(1000, GFP_KERNEL);
- if (!ReservedPagePacket) {
- DBG_88E("%s: alloc ReservedPagePacket fail!\n", __func__);
+ if (!ReservedPagePacket)
return;
- }
pxmitpriv = &adapt->xmitpriv;
pmlmeext = &adapt->mlmeextpriv;
@@ -466,7 +445,6 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
if (PageNeed == 1)
PageNeed += 1;
PageNum += PageNeed;
- adapt->HalData->FwRsvdPageStartOffset = PageNum;
BufIndex += PageNeed * 128;
@@ -524,7 +502,6 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
rtw_hal_mgnt_xmit(adapt, pmgntframe);
- DBG_88E("%s: Set RSVD page location to Fw\n", __func__);
FillH2CCmd_88E(adapt, H2C_COM_RSVD_PAGE, sizeof(RsvdPageLoc), (u8 *)&RsvdPageLoc);
exit:
@@ -541,8 +518,6 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
u8 DLBcnCount = 0;
u32 poll = 0;
- DBG_88E("%s mstatus(%x)\n", __func__, mstatus);
-
if (mstatus == 1) {
/* We should set AID, correct TSF, HW seq enable before set JoinBssReport to Fw in 88/92C. */
/* Suggested by filen. Added by tynli. */
@@ -559,10 +534,8 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL) & (~BIT(3)));
usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL) | BIT(4));
- if (haldata->RegFwHwTxQCtrl & BIT(6)) {
- DBG_88E("HalDownloadRSVDPage(): There is an Adapter is sending beacon.\n");
+ if (haldata->RegFwHwTxQCtrl & BIT(6))
bSendBeacon = true;
- }
/* Set FWHW_TXQ_CTRL 0x422[6]=0 to tell Hw the packet is not a real beacon frame. */
usb_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl & (~BIT(6))));
@@ -585,12 +558,6 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
} while (!bcn_valid && (poll % 10) != 0 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
} while (!bcn_valid && DLBcnCount <= 100 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
- if (adapt->bSurpriseRemoved || adapt->bDriverStopped)
- ;
- else if (!bcn_valid)
- DBG_88E("%s: 1 Download RSVD page failed! DLBcnCount:%u, poll:%u\n", __func__, DLBcnCount, poll);
- else
- DBG_88E("%s: 1 Download RSVD success! DLBcnCount:%u, poll:%u\n", __func__, DLBcnCount, poll);
/* */
/* We just can send the reserved page twice during the time that Tx thread is stopped (e.g. pnpsetpower) */
/* because we need to free the Tx BCN Desc which is used by the first reserved page packet. */
@@ -613,10 +580,8 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
}
/* Update RSVD page location H2C to Fw. */
- if (bcn_valid) {
+ if (bcn_valid)
rtw_hal_set_hwreg(adapt, HW_VAR_BCN_VALID, NULL);
- DBG_88E("Set RSVD page location to Fw.\n");
- }
/* Do not enable HW DMA BCN or it will cause Pcie interface hang by timing issue. 2011.11.24. by tynli. */
/* Clear CR[8] or beacon packet will not be send to TxBuf anymore. */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 391c59490718..10e88f976163 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -165,11 +165,9 @@ skip_dm:
void rtw_hal_dm_init(struct adapter *Adapter)
{
struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
- struct odm_dm_struct *podmpriv = &Adapter->HalData->odmpriv;
memset(pdmpriv, 0, sizeof(struct dm_priv));
Init_ODM_ComInfo_88E(Adapter);
- ODM_InitDebugSetting(podmpriv);
}
/* Add new function to reset the state of antenna diversity before link. */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 95b27b4df705..d1086699f952 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -24,11 +24,8 @@ void iol_mode_enable(struct adapter *padapter, u8 enable)
reg_0xf0 = usb_read8(padapter, REG_SYS_CFG);
usb_write8(padapter, REG_SYS_CFG, reg_0xf0 | SW_OFFLOAD_EN);
- if (!padapter->bFWReady) {
- DBG_88E("bFWReady == false call reset 8051...\n");
+ if (!padapter->bFWReady)
_8051Reset88E(padapter);
- }
-
} else {
/* disable initial offload */
reg_0xf0 = usb_read8(padapter, REG_SYS_CFG);
@@ -74,7 +71,6 @@ s32 rtl8188e_iol_efuse_patch(struct adapter *padapter)
{
s32 result = _SUCCESS;
- DBG_88E("==> %s\n", __func__);
if (rtw_iol_applied(padapter)) {
iol_mode_enable(padapter, 1);
result = iol_execute(padapter, CMD_READ_EFUSE_MAP);
@@ -95,7 +91,6 @@ void _8051Reset88E(struct adapter *padapter)
u1bTmp = usb_read8(padapter, REG_SYS_FUNC_EN + 1);
usb_write8(padapter, REG_SYS_FUNC_EN + 1, u1bTmp & (~BIT(2)));
usb_write8(padapter, REG_SYS_FUNC_EN + 1, u1bTmp | (BIT(2)));
- DBG_88E("=====> %s(): 8051 reset success .\n", __func__);
}
void rtl8188e_InitializeFirmwareVars(struct adapter *padapter)
@@ -139,11 +134,9 @@ void rtw_hal_set_odm_var(struct adapter *Adapter, enum hal_odm_variable eVariabl
struct sta_info *psta = pValue1;
if (bSet) {
- DBG_88E("### Set STA_(%d) info\n", psta->mac_id);
ODM_CmnInfoPtrArrayHook(podmpriv, ODM_CMNINFO_STA_STATUS, psta->mac_id, psta);
ODM_RAInfo_Init(podmpriv, psta->mac_id);
} else {
- DBG_88E("### Clean STA_(%d) info\n", psta->mac_id);
ODM_CmnInfoPtrArrayHook(podmpriv, ODM_CMNINFO_STA_STATUS, psta->mac_id, NULL);
}
}
@@ -161,13 +154,10 @@ void rtw_hal_set_odm_var(struct adapter *Adapter, enum hal_odm_variable eVariabl
void rtw_hal_notch_filter(struct adapter *adapter, bool enable)
{
- if (enable) {
- DBG_88E("Enable notch filter\n");
+ if (enable)
usb_write8(adapter, rOFDM0_RxDSP + 1, usb_read8(adapter, rOFDM0_RxDSP + 1) | BIT(1));
- } else {
- DBG_88E("Disable notch filter\n");
+ else
usb_write8(adapter, rOFDM0_RxDSP + 1, usb_read8(adapter, rOFDM0_RxDSP + 1) & ~BIT(1));
- }
}
/* */
@@ -191,7 +181,6 @@ static s32 _LLTWrite(struct adapter *padapter, u32 address, u32 data)
break;
if (count > POLLING_LLT_THRESHOLD) {
- RT_TRACE(_module_hal_init_c_, _drv_err_, ("Failed to polling write LLT done at address %d!\n", address));
status = _FAIL;
break;
}
@@ -241,19 +230,8 @@ s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
void Hal_InitPGData88E(struct adapter *padapter)
{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
-
- if (!pEEPROM->bautoload_fail_flag) { /* autoload OK. */
- if (!is_boot_from_eeprom(padapter)) {
- /* Read EFUSE real map to shadow. */
- EFUSE_ShadowMapUpdate(padapter, EFUSE_WIFI);
- }
- } else {/* autoload fail */
- RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("AutoLoad Fail reported from CR9346!!\n"));
- /* update to default value 0xFF */
- if (!is_boot_from_eeprom(padapter))
- EFUSE_ShadowMapUpdate(padapter, EFUSE_WIFI);
- }
+ if (!is_boot_from_eeprom(padapter))
+ EFUSE_ShadowMapUpdate(padapter);
}
void Hal_EfuseParseIDCode88E(struct adapter *padapter, u8 *hwinfo)
@@ -263,14 +241,10 @@ void Hal_EfuseParseIDCode88E(struct adapter *padapter, u8 *hwinfo)
/* Checl 0x8129 again for making sure autoload status!! */
EEPROMId = le16_to_cpu(*((__le16 *)hwinfo));
- if (EEPROMId != RTL_EEPROM_ID) {
- DBG_88E("EEPROM ID(%#x) is invalid!!\n", EEPROMId);
+ if (EEPROMId != RTL_EEPROM_ID)
pEEPROM->bautoload_fail_flag = true;
- } else {
+ else
pEEPROM->bautoload_fail_flag = false;
- }
-
- DBG_88E("EEPROM ID = 0x%04x\n", EEPROMId);
}
static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, u8 *PROMContent, bool AutoLoadFail)
@@ -404,11 +378,6 @@ void Hal_ReadPowerSavingMode88E(struct adapter *padapter, u8 *hwinfo, bool AutoL
/* decide hw if support remote wakeup function */
/* if hw supported, 8051 (SIE) will generate WeakUP signal(D+/D- toggle) when autoresume */
padapter->pwrctrlpriv.bSupportRemoteWakeup = (hwinfo[EEPROM_USB_OPTIONAL_FUNCTION0] & BIT(1)) ? true : false;
-
- DBG_88E("%s...bHWPwrPindetect(%x)-bHWPowerdown(%x) , bSupportRemoteWakeup(%x)\n", __func__,
- padapter->pwrctrlpriv.bHWPwrPindetect, padapter->pwrctrlpriv.bHWPowerdown, padapter->pwrctrlpriv.bSupportRemoteWakeup);
-
- DBG_88E("### PS params => power_mgnt(%x), usbss_enable(%x) ###\n", padapter->registrypriv.power_mgnt, padapter->registrypriv.usbss_enable);
}
}
@@ -431,21 +400,12 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
pHalData->Index24G_BW40_Base[0][ch] = pwrInfo24G.IndexBW40_Base[0][4];
else
pHalData->Index24G_BW40_Base[0][ch] = pwrInfo24G.IndexBW40_Base[0][group];
-
- DBG_88E("======= Path %d, Channel %d =======\n", 0, ch);
- DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", 0, ch, pHalData->Index24G_CCK_Base[0][ch]);
- DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", 0, ch, pHalData->Index24G_BW40_Base[0][ch]);
}
for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
pHalData->CCK_24G_Diff[0][TxCount] = pwrInfo24G.CCK_Diff[0][TxCount];
pHalData->OFDM_24G_Diff[0][TxCount] = pwrInfo24G.OFDM_Diff[0][TxCount];
pHalData->BW20_24G_Diff[0][TxCount] = pwrInfo24G.BW20_Diff[0][TxCount];
pHalData->BW40_24G_Diff[0][TxCount] = pwrInfo24G.BW40_Diff[0][TxCount];
- DBG_88E("======= TxCount %d =======\n", TxCount);
- DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->CCK_24G_Diff[0][TxCount]);
- DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->OFDM_24G_Diff[0][TxCount]);
- DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->BW20_24G_Diff[0][TxCount]);
- DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->BW40_24G_Diff[0][TxCount]);
}
/* 2010/10/19 MH Add Regulator recognize for CU. */
@@ -456,7 +416,6 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
} else {
pHalData->EEPROMRegulatory = 0;
}
- DBG_88E("EEPROMRegulatory = 0x%x\n", pHalData->EEPROMRegulatory);
}
void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
@@ -470,7 +429,6 @@ void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoa
} else {
pHalData->CrystalCap = EEPROM_Default_CrystalCap_88E;
}
- DBG_88E("CrystalCap: 0x%2x\n", pHalData->CrystalCap);
}
void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
@@ -482,7 +440,6 @@ void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo, bool AutoL
& 0xE0) >> 5;
else
pHalData->BoardType = 0;
- DBG_88E("Board Type: 0x%2x\n", pHalData->BoardType);
}
void Hal_EfuseParseEEPROMVer88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
@@ -496,9 +453,6 @@ void Hal_EfuseParseEEPROMVer88E(struct adapter *padapter, u8 *hwinfo, bool AutoL
} else {
pHalData->EEPROMVersion = 1;
}
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- ("Hal_EfuseParseEEPROMVer(), EEVer = %d\n",
- pHalData->EEPROMVersion));
}
void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
@@ -508,7 +462,6 @@ void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo, bool Auto
padapter->registrypriv.channel_plan,
RT_CHANNEL_DOMAIN_WORLD_WIDE_13, AutoLoadFail);
- DBG_88E("mlmepriv.ChannelPlan = 0x%02x\n", padapter->mlmepriv.ChannelPlan);
}
void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
@@ -521,7 +474,6 @@ void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo, bool Auto
pHalData->EEPROMCustomerID = 0;
pHalData->EEPROMSubCustomerID = 0;
}
- DBG_88E("EEPROM Customer ID: 0x%2x\n", pHalData->EEPROMCustomerID);
}
void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool AutoLoadFail)
@@ -553,7 +505,6 @@ void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool
} else {
pHalData->AntDivCfg = 0;
}
- DBG_88E("EEPROM : AntDivCfg = %x, TRxAntDivType = %x\n", pHalData->AntDivCfg, pHalData->TRxAntDivType);
}
void Hal_ReadThermalMeter_88E(struct adapter *Adapter, u8 *PROMContent, bool AutoloadFail)
@@ -567,8 +518,6 @@ void Hal_ReadThermalMeter_88E(struct adapter *Adapter, u8 *PROMContent, bool Aut
pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_88E;
if (pHalData->EEPROMThermalMeter == 0xff || AutoloadFail) {
- pHalData->bAPKThermalMeterIgnore = true;
pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_88E;
}
- DBG_88E("ThermalMeter = 0x%x\n", pHalData->EEPROMThermalMeter);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index 8669bf097479..05dbd3f08328 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -156,7 +156,7 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe,
myid(&padapter->eeprompriv), ETH_ALEN));
pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
- (GetFrameSubType(wlanhdr) == WIFI_BEACON);
+ (GetFrameSubType(wlanhdr) == IEEE80211_STYPE_BEACON);
if (pkt_info.bPacketBeacon) {
if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE))
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
index 9b8a284544ac..efa8960a7eb5 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
@@ -23,35 +23,3 @@ void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf)
RTW_SCTX_DONE_CCX_PKT_FAIL);
}
}
-
-void _dbg_dump_tx_info(struct adapter *padapter, int frame_tag,
- struct tx_desc *ptxdesc)
-{
- u8 dmp_txpkt;
- bool dump_txdesc = false;
-
- rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DUMP_TXPKT, &(dmp_txpkt));
-
- if (dmp_txpkt == 1) {/* dump txdesc for data frame */
- DBG_88E("dump tx_desc for data frame\n");
- if ((frame_tag & 0x0f) == DATA_FRAMETAG)
- dump_txdesc = true;
- } else if (dmp_txpkt == 2) {/* dump txdesc for mgnt frame */
- DBG_88E("dump tx_desc for mgnt frame\n");
- if ((frame_tag & 0x0f) == MGNT_FRAMETAG)
- dump_txdesc = true;
- }
-
- if (dump_txdesc) {
- DBG_88E("=====================================\n");
- DBG_88E("txdw0(0x%08x)\n", ptxdesc->txdw0);
- DBG_88E("txdw1(0x%08x)\n", ptxdesc->txdw1);
- DBG_88E("txdw2(0x%08x)\n", ptxdesc->txdw2);
- DBG_88E("txdw3(0x%08x)\n", ptxdesc->txdw3);
- DBG_88E("txdw4(0x%08x)\n", ptxdesc->txdw4);
- DBG_88E("txdw5(0x%08x)\n", ptxdesc->txdw5);
- DBG_88E("txdw6(0x%08x)\n", ptxdesc->txdw6);
- DBG_88E("txdw7(0x%08x)\n", ptxdesc->txdw7);
- DBG_88E("=====================================\n");
- }
-}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
index 35806b27fdee..25ce6db3beae 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -18,7 +18,7 @@ void sw_led_on(struct adapter *padapter, struct LED_871x *pLed)
return;
led_cfg = usb_read8(padapter, REG_LEDCFG2);
usb_write8(padapter, REG_LEDCFG2, (led_cfg & 0xf0) | BIT(5) | BIT(6));
- pLed->bLedOn = true;
+ pLed->led_on = true;
}
void sw_led_off(struct adapter *padapter, struct LED_871x *pLed)
@@ -37,7 +37,7 @@ void sw_led_off(struct adapter *padapter, struct LED_871x *pLed)
led_cfg &= 0xFE;
usb_write8(padapter, REG_MAC_PINMUX_CFG, led_cfg);
exit:
- pLed->bLedOn = false;
+ pLed->led_on = false;
}
void rtw_hal_sw_led_init(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index 09bc915994db..aa69fc3880b3 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -31,8 +31,6 @@ int rtw_hal_init_recv_priv(struct adapter *padapter)
kcalloc(NR_RECVBUFF, sizeof(struct recv_buf), GFP_KERNEL);
if (!precvpriv->precv_buf) {
res = _FAIL;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("alloc recv_buf fail!\n"));
goto exit;
}
precvbuf = precvpriv->precv_buf;
@@ -80,14 +78,6 @@ void rtw_hal_free_recv_priv(struct adapter *padapter)
}
kfree(precvpriv->precv_buf);
-
- if (skb_queue_len(&precvpriv->rx_skb_queue))
- DBG_88E(KERN_WARNING "rx_skb_queue not empty\n");
skb_queue_purge(&precvpriv->rx_skb_queue);
-
- if (skb_queue_len(&precvpriv->free_recv_skb_queue))
- DBG_88E(KERN_WARNING "free_recv_skb_queue not empty, %d\n",
- skb_queue_len(&precvpriv->free_recv_skb_queue));
-
skb_queue_purge(&precvpriv->free_recv_skb_queue);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index 2866283c211d..1fa558e0de38 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -287,11 +287,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */
ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
- } else if ((pxmitframe->frame_tag & 0x0f) == TXAGG_FRAMETAG) {
- DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
} else {
- DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
-
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32((4) & 0x3f);/* CAM_ID(MAC_ID) */
@@ -322,7 +318,6 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
rtl88eu_dm_set_tx_ant_by_tx_info(odmpriv, pmem, pattrib->mac_id);
rtl8188eu_cal_txdesc_chksum(ptxdesc);
- _dbg_dump_tx_info(adapt, pxmitframe->frame_tag, ptxdesc);
return pull;
}
@@ -346,15 +341,11 @@ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
rtw_issue_addbareq_cmd(adapt, pxmitframe);
mem_addr = pxmitframe->buf_addr;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("%s()\n", __func__));
-
for (t = 0; t < pattrib->nr_frags; t++) {
if (inner_ret != _SUCCESS && ret == _SUCCESS)
ret = _FAIL;
if (t != (pattrib->nr_frags - 1)) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("pattrib->nr_frags=%d\n", pattrib->nr_frags));
-
sz = pxmitpriv->frag_len;
sz = sz - 4 - pattrib->icv_len;
} else {
@@ -377,8 +368,6 @@ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
rtw_count_tx_stats(adapt, pxmitframe, sz);
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_write_port, w_sz=%d\n", w_sz));
-
mem_addr += w_sz;
mem_addr = (u8 *)round_up((size_t)mem_addr, 4);
@@ -413,7 +402,7 @@ static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
bool rtl8188eu_xmitframe_complete(struct adapter *adapt,
struct xmit_priv *pxmitpriv)
{
- struct xmit_frame *pxmitframe = NULL;
+ struct xmit_frame *pxmitframe, *n;
struct xmit_frame *pfirstframe = NULL;
struct xmit_buf *pxmitbuf;
@@ -422,7 +411,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt,
struct sta_info *psta = NULL;
struct tx_servq *ptxservq = NULL;
- struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
+ struct list_head *xmitframe_phead = NULL;
u32 pbuf; /* next pkt address */
u32 pbuf_tail; /* last pkt tail */
@@ -435,15 +424,11 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt,
/* dump frame variable */
u32 ff_hwaddr;
- RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));
-
pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
if (!pxmitbuf)
return false;
/* 3 1. pick up first frame */
- rtw_free_xmitframe(pxmitpriv, pxmitframe);
-
pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
if (!pxmitframe) {
/* no more xmit frame, release xmit buffer */
@@ -507,12 +492,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt,
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&ptxservq->sta_pending);
- xmitframe_plist = xmitframe_phead->next;
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = xmitframe_plist->next;
-
+ list_for_each_entry_safe(pxmitframe, n, xmitframe_phead, list) {
pxmitframe->agg_num = 0; /* not first frame of aggregation */
pxmitframe->pkt_offset = 0; /* not first frame of aggregation, no need to reserve offset */
@@ -627,7 +607,6 @@ bool rtw_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
if (res == _SUCCESS) {
rtw_dump_xframe(adapt, pxmitframe);
} else {
- DBG_88E("==> %s xmitframe_coalesce failed\n", __func__);
rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
@@ -639,7 +618,6 @@ enqueue:
spin_unlock_bh(&pxmitpriv->lock);
if (res != _SUCCESS) {
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n"));
rtw_free_xmitframe(pxmitpriv, pxmitframe);
/* Trick, make the statistics correct */
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 80cdcf6f7879..05c67e7d23ad 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -35,7 +35,6 @@ static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe)
default:
break;
}
- DBG_88E("%s OutEpQueueSel(0x%02x), OutEpNumber(%d)\n", __func__, haldata->OutEpQueueSel, haldata->OutEpNumber);
}
static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumInPipe, u8 NumOutPipe)
@@ -90,10 +89,8 @@ u32 rtw_hal_power_on(struct adapter *adapt)
return _SUCCESS;
if (!rtl88eu_pwrseqcmdparsing(adapt, PWR_CUT_ALL_MSK,
- Rtl8188E_NIC_PWR_ON_FLOW)) {
- DBG_88E(KERN_ERR "%s: run power on flow fail\n", __func__);
+ Rtl8188E_NIC_PWR_ON_FLOW))
return _FAIL;
- }
/* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
/* Set CR bit10 to enable 32k calibration. Suggested by SD1 Gimmy. Added by tynli. 2011.08.31. */
@@ -594,7 +591,6 @@ static void _InitAntenna_Selection(struct adapter *Adapter)
if (haldata->AntDivCfg == 0)
return;
- DBG_88E("==> %s ....\n", __func__);
usb_write32(Adapter, REG_LEDCFG0, usb_read32(Adapter, REG_LEDCFG0) | BIT(23));
phy_set_bb_reg(Adapter, rFPGA0_XAB_RFParameter, BIT(13), 0x01);
@@ -603,25 +599,8 @@ static void _InitAntenna_Selection(struct adapter *Adapter)
haldata->CurAntenna = Antenna_A;
else
haldata->CurAntenna = Antenna_B;
- DBG_88E("%s,Cur_ant:(%x)%s\n", __func__, haldata->CurAntenna, (haldata->CurAntenna == Antenna_A) ? "Antenna_A" : "Antenna_B");
}
-/*-----------------------------------------------------------------------------
- * Function: HwSuspendModeEnable92Cu()
- *
- * Overview: HW suspend mode switch.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 08/23/2010 MHC HW suspend mode switch test..
- *---------------------------------------------------------------------------
- */
enum rt_rf_power_state RfOnOffDetect(struct adapter *adapt)
{
u8 val8;
@@ -629,12 +608,10 @@ enum rt_rf_power_state RfOnOffDetect(struct adapter *adapt)
if (adapt->pwrctrlpriv.bHWPowerdown) {
val8 = usb_read8(adapt, REG_HSISR);
- DBG_88E("pwrdown, 0x5c(BIT(7))=%02x\n", val8);
rfpowerstate = (val8 & BIT(7)) ? rf_off : rf_on;
} else { /* rf on/off */
usb_write8(adapt, REG_MAC_PINMUX_CFG, usb_read8(adapt, REG_MAC_PINMUX_CFG) & ~(BIT(3)));
val8 = usb_read8(adapt, REG_GPIO_IO_SEL);
- DBG_88E("GPIO_IN=%02x\n", val8);
rfpowerstate = (val8 & BIT(3)) ? rf_on : rf_off;
}
return rfpowerstate;
@@ -649,11 +626,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
struct hal_data_8188e *haldata = Adapter->HalData;
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- unsigned long init_start_time = jiffies;
-
- #define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
-
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN);
if (Adapter->pwrctrlpriv.bkeepfwalive) {
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
@@ -669,10 +641,8 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
goto exit;
}
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PW_ON);
status = rtw_hal_power_on(Adapter);
if (status == _FAIL) {
- RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("Failed to init power on!\n"));
goto exit;
}
@@ -693,7 +663,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
txpktbuf_bndy = WMM_NORMAL_TX_PAGE_BOUNDARY_88E;
}
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC01);
_InitQueueReservedPage(Adapter);
_InitQueuePriority(Adapter);
_InitPageBoundary(Adapter);
@@ -701,7 +670,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
_InitTxBufferBoundary(Adapter, 0);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_DOWNLOAD_FW);
if (Adapter->registrypriv.mp_mode == 1) {
_InitRxSetting(Adapter);
Adapter->bFWReady = false;
@@ -709,11 +677,9 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
status = rtl88eu_download_fw(Adapter);
if (status) {
- DBG_88E("%s: Download Firmware failed!!\n", __func__);
Adapter->bFWReady = false;
return status;
}
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
Adapter->bFWReady = true;
}
rtl8188e_InitializeFirmwareVars(Adapter);
@@ -724,23 +690,17 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
rtl88eu_phy_rf_config(Adapter);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_EFUSE_PATCH);
status = rtl8188e_iol_efuse_patch(Adapter);
- if (status == _FAIL) {
- DBG_88E("%s rtl8188e_iol_efuse_patch failed\n", __func__);
+ if (status == _FAIL)
goto exit;
- }
_InitTxBufferBoundary(Adapter, txpktbuf_bndy);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_LLTT);
status = InitLLTTable(Adapter, txpktbuf_bndy);
if (status == _FAIL) {
- RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("Failed to init LLT table\n"));
goto exit;
}
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC02);
/* Get Rx PHY status in order to report RSSI and others. */
_InitDriverInfoSize(Adapter, DRVINFO_SZ);
@@ -782,13 +742,10 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
haldata->RfRegChnlVal[0] = rtw_hal_read_rfreg(Adapter, (enum rf_radio_path)0, RF_CHNLBW, bRFRegOffsetMask);
haldata->RfRegChnlVal[1] = rtw_hal_read_rfreg(Adapter, (enum rf_radio_path)1, RF_CHNLBW, bRFRegOffsetMask);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_TURN_ON_BLOCK);
_BBTurnOnBlock(Adapter);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_SECURITY);
invalidate_cam_all(Adapter);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC11);
/* 2010/12/17 MH We need to set TX power according to EFUSE content at first. */
phy_set_tx_power_level(Adapter, haldata->CurrentChannel);
@@ -813,7 +770,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* Nav limit , suggest by scott */
usb_write8(Adapter, 0x652, 0x0);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_HAL_DM);
rtl8188e_InitHalDm(Adapter);
/* 2010/08/11 MH Merge from 8192SE for Minicard init. We need to confirm current radio status */
@@ -837,7 +793,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* enable tx DMA to drop the redundate data of packet */
usb_write16(Adapter, REG_TXDMA_OFFSET_CHK, (usb_read16(Adapter, REG_TXDMA_OFFSET_CHK) | DROP_DATA_EN));
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_IQK);
/* 2010/08/26 MH Merge from 8192CE. */
if (pwrctrlpriv->rf_pwrstate == rf_on) {
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
@@ -847,15 +802,11 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
}
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_PW_TRACK);
-
ODM_TXPowerTrackingCheck(&haldata->odmpriv);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_LCK);
rtl88eu_phy_lc_calibrate(Adapter);
}
-/* HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PABIAS); */
/* _InitPABias(Adapter); */
usb_write8(Adapter, REG_USB_HRPWM, 0);
@@ -863,10 +814,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
usb_write32(Adapter, REG_FWHW_TXQ_CTRL, usb_read32(Adapter, REG_FWHW_TXQ_CTRL) | BIT(12));
exit:
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
-
- DBG_88E("%s in %dms\n", __func__,
- jiffies_to_msecs(jiffies - init_start_time));
return status;
}
@@ -875,8 +822,6 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
{
u8 val8;
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("%s\n", __func__));
-
/* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */
val8 = usb_read8(Adapter, REG_TX_RPT_CTRL);
usb_write8(Adapter, REG_TX_RPT_CTRL, val8 & (~BIT(1)));
@@ -942,12 +887,9 @@ static void rtl8192cu_hw_power_down(struct adapter *adapt)
u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
{
- DBG_88E("==> %s\n", __func__);
-
usb_write32(Adapter, REG_HIMR_88E, IMR_DISABLED_88E);
usb_write32(Adapter, REG_HIMRE_88E, IMR_DISABLED_88E);
- DBG_88E("bkeepfwalive(%x)\n", Adapter->pwrctrlpriv.bkeepfwalive);
if (Adapter->pwrctrlpriv.bkeepfwalive) {
if ((Adapter->pwrctrlpriv.bHWPwrPindetect) && (Adapter->pwrctrlpriv.bHWPowerdown))
rtl8192cu_hw_power_down(Adapter);
@@ -971,14 +913,10 @@ u32 rtw_hal_inirp_init(struct adapter *Adapter)
status = _SUCCESS;
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- ("===> usb_inirp_init\n"));
-
/* issue Rx irp to receive data */
precvbuf = precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
if (!usb_read_port(Adapter, RECV_BULK_IN_ADDR, precvbuf)) {
- RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("usb_rx_init: usb_read_port error\n"));
status = _FAIL;
goto exit;
}
@@ -987,9 +925,6 @@ u32 rtw_hal_inirp_init(struct adapter *Adapter)
}
exit:
-
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("<=== usb_inirp_init\n"));
-
return status;
}
@@ -1018,9 +953,6 @@ static void Hal_EfuseParsePIDVID_8188EU(struct adapter *adapt, u8 *hwinfo, bool
haldata->EEPROMCustomerID = EEPROM_Default_CustomerID;
haldata->EEPROMSubCustomerID = EEPROM_Default_SubCustomerID;
}
-
- DBG_88E("VID = 0x%04X, PID = 0x%04X\n", haldata->EEPROMVID, haldata->EEPROMPID);
- DBG_88E("Customer ID: 0x%02X, SubCustomer ID: 0x%02X\n", haldata->EEPROMCustomerID, haldata->EEPROMSubCustomerID);
}
static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail)
@@ -1036,8 +968,6 @@ static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool
/* Read Permanent MAC address */
memcpy(eeprom->mac_addr, &hwinfo[EEPROM_MAC_ADDR_88EU], ETH_ALEN);
}
- RT_TRACE(_module_hci_hal_init_c_, _drv_notice_,
- ("%s: Permanent Address = %pM\n", __func__, eeprom->mac_addr));
}
static void readAdapterInfo_8188EU(struct adapter *adapt)
@@ -1070,23 +1000,13 @@ static void _ReadPROMContent(struct adapter *Adapter)
eeprom->EepromOrEfuse = (eeValue & BOOT_FROM_EEPROM) ? true : false;
eeprom->bautoload_fail_flag = (eeValue & EEPROM_EN) ? false : true;
- DBG_88E("Boot from %s, Autoload %s !\n", (eeprom->EepromOrEfuse ? "EEPROM" : "EFUSE"),
- (eeprom->bautoload_fail_flag ? "Fail" : "OK"));
-
Hal_InitPGData88E(Adapter);
readAdapterInfo_8188EU(Adapter);
}
void rtw_hal_read_chip_info(struct adapter *Adapter)
{
- unsigned long start = jiffies;
-
- MSG_88E("====> %s\n", __func__);
-
_ReadPROMContent(Adapter);
-
- MSG_88E("<==== %s in %d ms\n", __func__,
- jiffies_to_msecs(jiffies - start));
}
#define GPIO_DEBUG_PORT_NUM 0
@@ -1137,8 +1057,6 @@ static void hw_var_set_opmode(struct adapter *Adapter, u8 variable, u8 *val)
val8 |= mode;
usb_write8(Adapter, MSR, val8);
- DBG_88E("%s()-%d mode = %d\n", __func__, __LINE__, mode);
-
if ((mode == _HW_STATE_STATION_) || (mode == _HW_STATE_NOLINK_)) {
StopTxBeacon(Adapter);
@@ -1259,7 +1177,6 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
/* For 8190, we select only 24M, 12M, 6M, 11M, 5.5M, 2M, and 1M from the Basic rate. */
/* We do not use other rates. */
hal_set_brate_cfg(val, &BrateCfg);
- DBG_88E("HW_VAR_BASIC_RATE: BrateCfg(%#x)\n", BrateCfg);
/* 2011.03.30 add by Luke Lee */
/* CCK 2M ACK should be disabled for some BCM and Atheros AP IOT */
@@ -1541,7 +1458,6 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
else
AcmCtrl &= (~AcmHw_BeqEn);
- DBG_88E("[HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl);
usb_write8(Adapter, REG_ACMHWCTRL, AcmCtrl);
}
break;
@@ -1684,9 +1600,7 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
do {
if (!(usb_read32(Adapter, REG_RXPKT_NUM) & RXDMA_IDLE))
break;
- } while (trycnt--);
- if (trycnt == 0)
- DBG_88E("Stop RX DMA failed......\n");
+ } while (--trycnt);
/* RQPN Load 0 */
usb_write16(Adapter, REG_RQPN_NPQ, 0x0);
@@ -1699,13 +1613,11 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
break;
case HW_VAR_APFM_ON_MAC:
haldata->bMacPwrCtrlOn = *val;
- DBG_88E("%s: bMacPwrCtrlOn=%d\n", __func__, haldata->bMacPwrCtrlOn);
break;
case HW_VAR_TX_RPT_MAX_MACID:
{
u8 maxMacid = *val;
- DBG_88E("### MacID(%d),Set Max Tx RPT MID(%d)\n", maxMacid, maxMacid + 1);
usb_write8(Adapter, REG_TX_RPT_CTRL + 1, maxMacid + 1);
}
break;
@@ -1835,27 +1747,6 @@ u8 rtw_hal_get_def_var(struct adapter *Adapter, enum hal_def_variable eVariable,
*((u32 *)pValue) = MAX_AMPDU_FACTOR_64K;
break;
case HW_DEF_RA_INFO_DUMP:
- {
- u8 entry_id = *((u8 *)pValue);
-
- if (check_fwstate(&Adapter->mlmepriv, _FW_LINKED)) {
- DBG_88E("============ RA status check ===================\n");
- DBG_88E("Mac_id:%d , RateID = %d, RAUseRate = 0x%08x, RateSGI = %d, DecisionRate = 0x%02x ,PTStage = %d\n",
- entry_id,
- haldata->odmpriv.RAInfo[entry_id].RateID,
- haldata->odmpriv.RAInfo[entry_id].RAUseRate,
- haldata->odmpriv.RAInfo[entry_id].RateSGI,
- haldata->odmpriv.RAInfo[entry_id].DecisionRate,
- haldata->odmpriv.RAInfo[entry_id].PTStage);
- }
- }
- break;
- case HW_DEF_ODM_DBG_FLAG:
- {
- struct odm_dm_struct *dm_ocm = &haldata->odmpriv;
-
- pr_info("dm_ocm->DebugComponents = 0x%llx\n", dm_ocm->DebugComponents);
- }
break;
case HAL_DEF_DBG_DUMP_RXPKT:
*((u8 *)pValue) = haldata->bDumpRxPkt;
@@ -1919,8 +1810,6 @@ void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
}
rate_bitmap = ODM_Get_Rate_Bitmap(odmpriv, mac_id, mask, rssi_level);
- DBG_88E("%s => mac_id:%d, networkType:0x%02x, mask:0x%08x\n\t ==> rssi_level:%d, rate_bitmap:0x%08x\n",
- __func__, mac_id, networkType, mask, rssi_level, rate_bitmap);
mask &= rate_bitmap;
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index 4116051a9a65..3609a44ed078 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -34,7 +34,6 @@ struct qos_priv {
};
#include <rtw_mlme.h>
-#include <rtw_debug.h>
#include <rtw_rf.h>
#include <rtw_event.h>
#include <rtw_led.h>
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 39df30599a5d..2e3e933781eb 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -117,7 +117,6 @@ enum hal_def_variable {
HW_DEF_RA_INFO_DUMP,
HAL_DEF_DBG_DUMP_TXPKT,
HW_DEF_FA_CNT_DUMP,
- HW_DEF_ODM_DBG_FLAG,
};
enum hal_odm_variable {
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index cb6940d2aeab..da6245a77d5d 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -546,13 +546,6 @@ enum _PUBLIC_ACTION {
ACT_PUBLIC_MAX
};
-/* BACK action code */
-enum rtw_ieee80211_back_actioncode {
- RTW_WLAN_ACTION_ADDBA_REQ = 0,
- RTW_WLAN_ACTION_ADDBA_RESP = 1,
- RTW_WLAN_ACTION_DELBA = 2,
-};
-
/* HT features action code */
enum rtw_ieee80211_ht_actioncode {
RTW_WLAN_ACTION_NOTIFY_CH_WIDTH = 0,
@@ -566,13 +559,6 @@ enum rtw_ieee80211_ht_actioncode {
RTW_WLAN_ACTION_HI_INFO_EXCHG = 8,
};
-/* BACK (block-ack) parties */
-enum rtw_ieee80211_back_parties {
- RTW_WLAN_BACK_RECIPIENT = 0,
- RTW_WLAN_BACK_INITIATOR = 1,
- RTW_WLAN_BACK_TIMER = 2,
-};
-
#define OUI_MICROSOFT 0x0050f2 /* Microsoft (also used in Wi-Fi specs)
* 00:50:F2
*/
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index d814ce492424..98402cfb1168 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -691,8 +691,6 @@ struct odm_dm_struct {
bool odm_ready;
struct rtl8192cd_priv *fake_priv;
- u64 DebugComponents;
- u32 DebugLevel;
/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
bool bCckHighPower;
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
deleted file mode 100644
index 857c64b8d2f4..000000000000
--- a/drivers/staging/rtl8188eu/include/odm_debug.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#ifndef __ODM_DBG_H__
-#define __ODM_DBG_H__
-
-/* */
-/* Define the debug levels */
-/* */
-/* 1. DBG_TRACE and DBG_LOUD are used for normal cases. */
-/* They can help SW engineer to develop or trace states changed */
-/* and also help HW enginner to trace every operation to and from HW, */
-/* e.g IO, Tx, Rx. */
-/* */
-/* 2. DBG_WARNNING and DBG_SERIOUS are used for unusual or error cases, */
-/* which help us to debug SW or HW. */
-
-/* Never used in a call to ODM_RT_TRACE()! */
-#define ODM_DBG_OFF 1
-
-/* Fatal bug. */
-/* For example, Tx/Rx/IO locked up, OS hangs, memory access violation, */
-/* resource allocation failed, unexpected HW behavior, HW BUG and so on. */
-#define ODM_DBG_SERIOUS 2
-
-/* Abnormal, rare, or unexpected cases. */
-/* For example, IRP/Packet/OID canceled, device suprisely unremoved and so on. */
-#define ODM_DBG_WARNING 3
-
-/* Normal case with useful information about current SW or HW state. */
-/* For example, Tx/Rx descriptor to fill, Tx/Rx descr. completed status, */
-/* SW protocol state change, dynamic mechanism state change and so on. */
-/* */
-#define ODM_DBG_LOUD 4
-
-/* Normal case with detail execution flow or information. */
-#define ODM_DBG_TRACE 5
-
-/* Define the tracing components */
-/* BB Functions */
-#define ODM_COMP_DIG BIT(0)
-#define ODM_COMP_RA_MASK BIT(1)
-#define ODM_COMP_DYNAMIC_TXPWR BIT(2)
-#define ODM_COMP_FA_CNT BIT(3)
-#define ODM_COMP_RSSI_MONITOR BIT(4)
-#define ODM_COMP_CCK_PD BIT(5)
-#define ODM_COMP_ANT_DIV BIT(6)
-#define ODM_COMP_PWR_SAVE BIT(7)
-#define ODM_COMP_PWR_TRA BIT(8)
-#define ODM_COMP_RATE_ADAPTIVE BIT(9)
-#define ODM_COMP_PATH_DIV BIT(10)
-#define ODM_COMP_PSD BIT(11)
-#define ODM_COMP_DYNAMIC_PRICCA BIT(12)
-#define ODM_COMP_RXHP BIT(13)
-/* MAC Functions */
-#define ODM_COMP_EDCA_TURBO BIT(16)
-#define ODM_COMP_EARLY_MODE BIT(17)
-/* RF Functions */
-#define ODM_COMP_TX_PWR_TRACK BIT(24)
-#define ODM_COMP_RX_GAIN_TRACK BIT(25)
-#define ODM_COMP_CALIBRATION BIT(26)
-/* Common Functions */
-#define ODM_COMP_COMMON BIT(30)
-#define ODM_COMP_INIT BIT(31)
-
-/*------------------------Export Marco Definition---------------------------*/
-#define RT_PRINTK(fmt, args...) \
- pr_info("%s(): " fmt, __func__, ## args);
-
-#ifndef ASSERT
- #define ASSERT(expr)
-#endif
-
-#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) \
- if (((comp) & pDM_Odm->DebugComponents) && \
- (level <= pDM_Odm->DebugLevel)) { \
- pr_info("[ODM-8188E] "); \
- RT_PRINTK fmt; \
- }
-
-#define ODM_RT_ASSERT(pDM_Odm, expr, fmt) \
- if (!(expr)) { \
- pr_info("Assertion failed! %s at ......\n", #expr); \
- pr_info(" ......%s,%s,line=%d\n", __FILE__, \
- __func__, __LINE__); \
- RT_PRINTK fmt; \
- ASSERT(false); \
- }
-
-void ODM_InitDebugSetting(struct odm_dm_struct *pDM_Odm);
-
-#endif /* __ODM_DBG_H__ */
diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
index 5254d875f96b..eb2b8b613aad 100644
--- a/drivers/staging/rtl8188eu/include/odm_precomp.h
+++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
@@ -23,7 +23,6 @@
#include "odm.h"
#include "odm_hwconfig.h"
-#include "odm_debug.h"
#include "phydm_regdefine11n.h"
#include "hal8188e_rate_adaptive.h" /* for RA,Power training */
@@ -33,8 +32,6 @@
#include "odm_rtl8188e.h"
-void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm);
-void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm);
void odm_DIGInit(struct odm_dm_struct *pDM_Odm);
void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm);
void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm);
@@ -42,7 +39,6 @@ void odm_DynamicTxPowerInit(struct odm_dm_struct *pDM_Odm);
void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm);
void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm);
void odm_SwAntDivInit_NIC(struct odm_dm_struct *pDM_Odm);
-void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm);
void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm);
void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm);
void odm_DIG(struct odm_dm_struct *pDM_Odm);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 0c4c23be1dd5..2c16d3f33e1c 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -20,19 +20,6 @@
#include "rtw_sreset.h"
#include "odm_precomp.h"
-/* Fw Array */
-#define Rtl8188E_FwImageArray Rtl8188EFwImgArray
-#define Rtl8188E_FWImgArrayLength Rtl8188EFWImgArrayLength
-
-#define RTL8188E_FW_UMC_IMG "rtl8188E\\rtl8188efw.bin"
-#define RTL8188E_PHY_REG "rtl8188E\\PHY_REG_1T.txt"
-#define RTL8188E_PHY_RADIO_A "rtl8188E\\radio_a_1T.txt"
-#define RTL8188E_PHY_RADIO_B "rtl8188E\\radio_b_1T.txt"
-#define RTL8188E_AGC_TAB "rtl8188E\\AGC_TAB_1T.txt"
-#define RTL8188E_PHY_MACREG "rtl8188E\\MAC_REG.txt"
-#define RTL8188E_PHY_REG_PG "rtl8188E\\PHY_REG_PG.txt"
-#define RTL8188E_PHY_REG_MP "rtl8188E\\PHY_REG_MP.txt"
-
/* RTL8188E Power Configuration CMDs for USB/SDIO interfaces */
#define Rtl8188E_NIC_PWR_ON_FLOW rtl8188E_power_on_flow
#define Rtl8188E_NIC_RF_OFF_FLOW rtl8188E_radio_off_flow
@@ -208,7 +195,6 @@ struct hal_data_8188e {
u8 bTXPowerDataReadFromEEPORM;
u8 EEPROMThermalMeter;
- u8 bAPKThermalMeterIgnore;
bool EepromOrEfuse;
@@ -233,21 +219,10 @@ struct hal_data_8188e {
u8 PwrGroupHT20[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
u8 PwrGroupHT40[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- u8 LegacyHTTxPowerDiff;/* Legacy to HT rate power diff */
- /* The current Tx Power Level */
- u8 CurrentCckTxPwrIdx;
- u8 CurrentOfdm24GTxPwrIdx;
- u8 CurrentBW2024GTxPwrIdx;
- u8 CurrentBW4024GTxPwrIdx;
-
/* Read/write are allow for following hardware information variables */
u8 framesync;
- u32 framesyncC34;
- u8 framesyncMonitor;
- u8 DefaultInitialGain[4];
u8 pwrGroupCnt;
u32 MCSTxPowerLevelOriginalOffset[MAX_PG_GROUP][16];
- u32 CCKTxPowerLevelOriginalOffset;
u8 CrystalCap;
@@ -280,12 +255,6 @@ struct hal_data_8188e {
u8 bDumpRxPkt;/* for debug */
u8 bDumpTxPkt;/* for debug */
- u8 FwRsvdPageStartOffset; /* Reserve page start offset except
- * beacon in TxQ.
- */
-
- /* 2010/08/09 MH Add CU power down mode. */
- bool pwrdown;
/* Add for dual MAC 0--Mac0 1--Mac1 */
u32 interfaceIndex;
@@ -309,7 +278,6 @@ struct hal_data_8188e {
u8 UsbTxAggMode;
u8 UsbTxAggDescNum;
u16 HwRxPageSize; /* Hardware setting */
- u32 MaxUsbRxAggBlock;
enum usb_rx_agg_mode UsbRxAggMode;
u8 UsbRxAggBlockCount; /* USB Block count. Block size is
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index 55cce1f6bd77..fe0871bbb95f 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -7,15 +7,8 @@
#ifndef __RTL8188E_SPEC_H__
#define __RTL8188E_SPEC_H__
-/* 8192C Register offset definition */
-
-#define HAL_PS_TIMER_INT_DELAY 50 /* 50 microseconds */
-#define HAL_92C_NAV_UPPER_UNIT 128 /* micro-second */
-
-#define MAC_ADDR_LEN 6
/* 8188E PKT_BUFF_ACCESS_CTRL value */
#define TXPKT_BUF_SELECT 0x69
-#define RXPKT_BUF_SELECT 0xA5
#define DISABLE_TRXPKT_BUF_ACCESS 0x0
/* 0x0000h ~ 0x00FFh System Configuration */
@@ -52,19 +45,7 @@
#define REG_FSISR 0x0054
#define REG_HSIMR 0x0058
#define REG_HSISR 0x005c
-#define REG_GPIO_PIN_CTRL_2 0x0060 /* RTL8723 WIFI/BT/GPS
- * Multi-Function GPIO Pin Control.
- */
-#define REG_GPIO_IO_SEL_2 0x0062 /* RTL8723 WIFI/BT/GPS
- * Multi-Function GPIO Select.
- */
#define REG_BB_PAD_CTRL 0x0064
-#define REG_MULTI_FUNC_CTRL 0x0068 /* RTL8723 WIFI/BT/GPS
- * Multi-Function control source.
- */
-#define REG_GPIO_OUTPUT 0x006c
-#define REG_AFE_XTAL_CTRL_EXT 0x0078 /* RTL8188E */
-#define REG_XCK_OUT_CTRL 0x007c /* RTL8188E */
#define REG_MCUFWDL 0x0080
#define REG_WOL_EVENT 0x0081 /* RTL8188E */
#define REG_MCUTSTCFG 0x0084
@@ -172,9 +153,6 @@
#define REG_PCIE_HCPWM 0x0363 /* PCIe CPWM */
#define REG_WATCH_DOG 0x0368
-/* RTL8723 series ------------------------------ */
-#define REG_PCIE_HISR 0x03A0
-
/* spec version 11 */
/* 0x0400h ~ 0x047Fh Protocol Configuration */
#define REG_VOQ_INFORMATION 0x0400
@@ -459,34 +437,6 @@
#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL + 2)
#define GPIO_MOD (REG_GPIO_PIN_CTRL + 3)
-/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
-#define HSIMR_GPIO12_0_INT_EN BIT(0)
-#define HSIMR_SPS_OCP_INT_EN BIT(5)
-#define HSIMR_RON_INT_EN BIT(6)
-#define HSIMR_PDN_INT_EN BIT(7)
-#define HSIMR_GPIO9_INT_EN BIT(25)
-
-/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
-#define HSISR_GPIO12_0_INT BIT(0)
-#define HSISR_SPS_OCP_INT BIT(5)
-#define HSISR_RON_INT_EN BIT(6)
-#define HSISR_PDNINT BIT(7)
-#define HSISR_GPIO9_INT BIT(25)
-
-/* 8192C (MSR) Media Status Register (Offset 0x4C, 8 bits) */
-/*
- * Network Type
- * 00: No link
- * 01: Link in ad hoc network
- * 10: Link in infrastructure network
- * 11: AP mode
- * Default: 00b.
- */
-#define MSR_NOLINK 0x00
-#define MSR_ADHOC 0x01
-#define MSR_INFRA 0x02
-#define MSR_AP 0x03
-
/* 88EU (MSR) Media Status Register (Offset 0x4C, 8 bits) */
#define USB_INTR_CONTENT_C2H_OFFSET 0
#define USB_INTR_CONTENT_CPWM1_OFFSET 16
@@ -505,34 +455,6 @@
#define CMD_EFUSE_PATCH_ERR BIT(6)
#define CMD_IOCONFIG_ERR BIT(7)
-/* 6. Adaptive Control Registers (Offset: 0x0160 - 0x01CF) */
-/* 8192C Response Rate Set Register (offset 0x181, 24bits) */
-#define RRSR_1M BIT(0)
-#define RRSR_2M BIT(1)
-#define RRSR_5_5M BIT(2)
-#define RRSR_11M BIT(3)
-#define RRSR_6M BIT(4)
-#define RRSR_9M BIT(5)
-#define RRSR_12M BIT(6)
-#define RRSR_18M BIT(7)
-#define RRSR_24M BIT(8)
-#define RRSR_36M BIT(9)
-#define RRSR_48M BIT(10)
-#define RRSR_54M BIT(11)
-#define RRSR_MCS0 BIT(12)
-#define RRSR_MCS1 BIT(13)
-#define RRSR_MCS2 BIT(14)
-#define RRSR_MCS3 BIT(15)
-#define RRSR_MCS4 BIT(16)
-#define RRSR_MCS5 BIT(17)
-#define RRSR_MCS6 BIT(18)
-#define RRSR_MCS7 BIT(19)
-
-/* 8192C Response Rate Set Register (offset 0x1BF, 8bits) */
-/* WOL bit information */
-#define HAL92C_WOL_PTK_UPDATE_EVENT BIT(0)
-#define HAL92C_WOL_GTK_UPDATE_EVENT BIT(1)
-
/* 8192C BW_OPMODE bits (Offset 0x203, 8bit) */
#define BW_OPMODE_20MHZ BIT(2)
#define BW_OPMODE_5G BIT(1)
@@ -565,12 +487,6 @@
#define SCR_TxSecEnable 0x02
#define SCR_RxSecEnable 0x04
-/* 10. Power Save Control Registers (Offset: 0x0260 - 0x02DF) */
-#define WOW_PMEN BIT(0) /* Power management Enable. */
-#define WOW_WOMEN BIT(1) /* WoW function on or off. */
-#define WOW_MAGIC BIT(2) /* Magic packet */
-#define WOW_UWF BIT(3) /* Unicast Wakeup frame. */
-
/* 12. Host Interrupt Status Registers (Offset: 0x0300 - 0x030F) */
/* 8188 IMR/ISR bits */
#define IMR_DISABLED_88E 0x0
@@ -648,21 +564,6 @@ So the following defines for 92C is not entire!!!!!!
* 0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
* 0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
*/
-/* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
-/* Note:
- * The bits of stopping AC(VO/VI/BE/BK) queue in datasheet
- * RTL8192S/RTL8192C are wrong,
- * the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2,
- * and BK - Bit3.
- * 8723 and 88E may be not correct either in the earlier version.
- */
-#define StopBecon BIT(6)
-#define StopHigh BIT(5)
-#define StopMgt BIT(4)
-#define StopBK BIT(3)
-#define StopBE BIT(2)
-#define StopVI BIT(1)
-#define StopVO BIT(0)
/* 8192C (RCR) Receive Configuration Register(Offset 0x608, 32 bits) */
#define RCR_APPFCS BIT(31) /* WMAC append FCS after payload */
@@ -695,14 +596,8 @@ So the following defines for 92C is not entire!!!!!!
#define RCR_FIFO_OFFSET 13
/* 0xFE00h ~ 0xFE55h USB Configuration */
-#define REG_USB_INFO 0xFE17
-#define REG_USB_SPECIAL_OPTION 0xFE55
-#define REG_USB_DMA_AGG_TO 0xFE5B
-#define REG_USB_AGG_TO 0xFE5C
-#define REG_USB_AGG_TH 0xFE5D
-
#define REG_USB_HRPWM 0xFE58
-#define REG_USB_HCPWM 0xFE57
+
/* 8192C Register Bit and Content definition */
/* 0x0000h ~ 0x00FFh System Configuration */
@@ -1089,142 +984,6 @@ So the following defines for 92C is not entire!!!!!!
#define SCR_TXBCUSEDK BIT(6) /* Force Tx Bcast pkt Use Default Key */
#define SCR_RXBCUSEDK BIT(7) /* Force Rx Bcast pkt Use Default Key */
-/* RTL8188E SDIO Configuration */
-
-/* I/O bus domain address mapping */
-#define SDIO_LOCAL_BASE 0x10250000
-#define WLAN_IOREG_BASE 0x10260000
-#define FIRMWARE_FIFO_BASE 0x10270000
-#define TX_HIQ_BASE 0x10310000
-#define TX_MIQ_BASE 0x10320000
-#define TX_LOQ_BASE 0x10330000
-#define RX_RX0FF_BASE 0x10340000
-
-/* SDIO host local register space mapping. */
-#define SDIO_LOCAL_MSK 0x0FFF
-#define WLAN_IOREG_MSK 0x7FFF
-#define WLAN_FIFO_MSK 0x1FFF /* Aggregation Length[12:0] */
-#define WLAN_RX0FF_MSK 0x0003
-
-/* Without ref to the SDIO Device ID */
-#define SDIO_WITHOUT_REF_DEVICE_ID 0
-#define SDIO_LOCAL_DEVICE_ID 0 /* 0b[16], 000b[15:13] */
-#define WLAN_TX_HIQ_DEVICE_ID 4 /* 0b[16], 100b[15:13] */
-#define WLAN_TX_MIQ_DEVICE_ID 5 /* 0b[16], 101b[15:13] */
-#define WLAN_TX_LOQ_DEVICE_ID 6 /* 0b[16], 110b[15:13] */
-#define WLAN_RX0FF_DEVICE_ID 7 /* 0b[16], 111b[15:13] */
-#define WLAN_IOREG_DEVICE_ID 8 /* 1b[16] */
-
-/* SDIO Tx Free Page Index */
-#define HI_QUEUE_IDX 0
-#define MID_QUEUE_IDX 1
-#define LOW_QUEUE_IDX 2
-#define PUBLIC_QUEUE_IDX 3
-
-#define SDIO_MAX_TX_QUEUE 3 /* HIQ, MIQ and LOQ */
-#define SDIO_MAX_RX_QUEUE 1
-
-/* SDIO Tx Control */
-#define SDIO_REG_TX_CTRL 0x0000
-/* SDIO Host Interrupt Mask */
-#define SDIO_REG_HIMR 0x0014
-/* SDIO Host Interrupt Service Routine */
-#define SDIO_REG_HISR 0x0018
-/* HCI Current Power Mode */
-#define SDIO_REG_HCPWM 0x0019
-/* RXDMA Request Length */
-#define SDIO_REG_RX0_REQ_LEN 0x001C
-/* Free Tx Buffer Page */
-#define SDIO_REG_FREE_TXPG 0x0020
-/* HCI Current Power Mode 1 */
-#define SDIO_REG_HCPWM1 0x0024
-/* HCI Current Power Mode 2 */
-#define SDIO_REG_HCPWM2 0x0026
-/* HTSF Informaion */
-#define SDIO_REG_HTSFR_INFO 0x0030
-/* HCI Request Power Mode 1 */
-#define SDIO_REG_HRPWM1 0x0080
-/* HCI Request Power Mode 2 */
-#define SDIO_REG_HRPWM2 0x0082
-/* HCI Power Save Clock */
-#define SDIO_REG_HPS_CLKR 0x0084
-/* SDIO HCI Suspend Control */
-#define SDIO_REG_HSUS_CTRL 0x0086
-/* SDIO Host Extension Interrupt Mask Always */
-#define SDIO_REG_HIMR_ON 0x0090
-/* SDIO Host Extension Interrupt Status Always */
-#define SDIO_REG_HISR_ON 0x0091
-
-#define SDIO_HIMR_DISABLED 0
-
-/* RTL8188E SDIO Host Interrupt Mask Register */
-#define SDIO_HIMR_RX_REQUEST_MSK BIT(0)
-#define SDIO_HIMR_AVAL_MSK BIT(1)
-#define SDIO_HIMR_TXERR_MSK BIT(2)
-#define SDIO_HIMR_RXERR_MSK BIT(3)
-#define SDIO_HIMR_TXFOVW_MSK BIT(4)
-#define SDIO_HIMR_RXFOVW_MSK BIT(5)
-#define SDIO_HIMR_TXBCNOK_MSK BIT(6)
-#define SDIO_HIMR_TXBCNERR_MSK BIT(7)
-#define SDIO_HIMR_BCNERLY_INT_MSK BIT(16)
-#define SDIO_HIMR_C2HCMD_MSK BIT(17)
-#define SDIO_HIMR_CPWM1_MSK BIT(18)
-#define SDIO_HIMR_CPWM2_MSK BIT(19)
-#define SDIO_HIMR_HSISR_IND_MSK BIT(20)
-#define SDIO_HIMR_GTINT3_IND_MSK BIT(21)
-#define SDIO_HIMR_GTINT4_IND_MSK BIT(22)
-#define SDIO_HIMR_PSTIMEOUT_MSK BIT(23)
-#define SDIO_HIMR_OCPINT_MSK BIT(24)
-#define SDIO_HIMR_ATIMEND_MSK BIT(25)
-#define SDIO_HIMR_ATIMEND_E_MSK BIT(26)
-#define SDIO_HIMR_CTWEND_MSK BIT(27)
-
-/* RTL8188E SDIO Specific */
-#define SDIO_HIMR_MCU_ERR_MSK BIT(28)
-#define SDIO_HIMR_TSF_BIT32_TOGGLE_MSK BIT(29)
-
-/* SDIO Host Interrupt Service Routine */
-#define SDIO_HISR_RX_REQUEST BIT(0)
-#define SDIO_HISR_AVAL BIT(1)
-#define SDIO_HISR_TXERR BIT(2)
-#define SDIO_HISR_RXERR BIT(3)
-#define SDIO_HISR_TXFOVW BIT(4)
-#define SDIO_HISR_RXFOVW BIT(5)
-#define SDIO_HISR_TXBCNOK BIT(6)
-#define SDIO_HISR_TXBCNERR BIT(7)
-#define SDIO_HISR_BCNERLY_INT BIT(16)
-#define SDIO_HISR_C2HCMD BIT(17)
-#define SDIO_HISR_CPWM1 BIT(18)
-#define SDIO_HISR_CPWM2 BIT(19)
-#define SDIO_HISR_HSISR_IND BIT(20)
-#define SDIO_HISR_GTINT3_IND BIT(21)
-#define SDIO_HISR_GTINT4_IND BIT(22)
-#define SDIO_HISR_PSTIME BIT(23)
-#define SDIO_HISR_OCPINT BIT(24)
-#define SDIO_HISR_ATIMEND BIT(25)
-#define SDIO_HISR_ATIMEND_E BIT(26)
-#define SDIO_HISR_CTWEND BIT(27)
-
-/* RTL8188E SDIO Specific */
-#define SDIO_HISR_MCU_ERR BIT(28)
-#define SDIO_HISR_TSF_BIT32_TOGGLE BIT(29)
-
-#define MASK_SDIO_HISR_CLEAR \
- (SDIO_HISR_TXERR | SDIO_HISR_RXERR | SDIO_HISR_TXFOVW |\
- SDIO_HISR_RXFOVW | SDIO_HISR_TXBCNOK | SDIO_HISR_TXBCNERR |\
- SDIO_HISR_C2HCMD | SDIO_HISR_CPWM1 | SDIO_HISR_CPWM2 |\
- SDIO_HISR_HSISR_IND | SDIO_HISR_GTINT3_IND | SDIO_HISR_GTINT4_IND |\
- SDIO_HISR_PSTIMEOUT | SDIO_HISR_OCPINT)
-
-/* SDIO HCI Suspend Control Register */
-#define HCI_RESUME_PWR_RDY BIT(1)
-#define HCI_SUS_CTRL BIT(0)
-
-/* SDIO Tx FIFO related */
-/* The number of Tx FIFO free page */
-#define SDIO_TX_FREE_PG_QUEUE 4
-#define SDIO_TX_FIFO_PAGE_SZ 128
-
/* 0xFE00h ~ 0xFE55h USB Configuration */
/* 2 USB Information (0xFE17) */
@@ -1276,14 +1035,6 @@ So the following defines for 92C is not entire!!!!!!
/* GPS function enable */
#define GPS_FUNC_EN BIT(22)
-/* 3 REG_LIFECTRL_CTRL */
-#define HAL92C_EN_PKT_LIFE_TIME_BK BIT(3)
-#define HAL92C_EN_PKT_LIFE_TIME_BE BIT(2)
-#define HAL92C_EN_PKT_LIFE_TIME_VI BIT(1)
-#define HAL92C_EN_PKT_LIFE_TIME_VO BIT(0)
-
-#define HAL92C_MSDU_LIFE_TIME_UNIT 128 /* in us */
-
/* General definitions */
#define LAST_ENTRY_OF_TX_PKT_BUFFER 176 /* 22k 22528 bytes */
@@ -1309,48 +1060,15 @@ So the following defines for 92C is not entire!!!!!!
#define EEPROM_CUSTOMERID_88E 0xC5
#define EEPROM_RF_ANTENNA_OPT_88E 0xC9
-/* RTL88EE */
-#define EEPROM_MAC_ADDR_88EE 0xD0
-#define EEPROM_VID_88EE 0xD6
-#define EEPROM_DID_88EE 0xD8
-#define EEPROM_SVID_88EE 0xDA
-#define EEPROM_SMID_88EE 0xDC
-
/* RTL88EU */
#define EEPROM_MAC_ADDR_88EU 0xD7
#define EEPROM_VID_88EU 0xD0
#define EEPROM_PID_88EU 0xD2
#define EEPROM_USB_OPTIONAL_FUNCTION0 0xD4
-/* RTL88ES */
-#define EEPROM_MAC_ADDR_88ES 0x11A
-
/* EEPROM/Efuse Value Type */
#define EETYPE_TX_PWR 0x0
-/* Default Value for EEPROM or EFUSE!!! */
-#define EEPROM_Default_TSSI 0x0
-#define EEPROM_Default_TxPowerDiff 0x0
-#define EEPROM_Default_CrystalCap 0x5
-/* Default: 2X2, RTL8192CE(QFPN68) */
-#define EEPROM_Default_BoardType 0x02
-#define EEPROM_Default_TxPower 0x1010
-#define EEPROM_Default_HT2T_TxPwr 0x10
-
-#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
-#define EEPROM_Default_ThermalMeter 0x12
-
-#define EEPROM_Default_AntTxPowerDiff 0x0
-#define EEPROM_Default_TxPwDiff_CrystalCap 0x5
-#define EEPROM_Default_TxPowerLevel 0x2A
-
-#define EEPROM_Default_HT40_2SDiff 0x0
-/* HT20<->40 default Tx Power Index Difference */
-#define EEPROM_Default_HT20_Diff 2
-#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
-#define EEPROM_Default_HT40_PwrMaxOffset 0
-#define EEPROM_Default_HT20_PwrMaxOffset 0
-
#define EEPROM_Default_CrystalCap_88E 0x20
#define EEPROM_Default_ThermalMeter_88E 0x18
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
index 617c2273b41b..72a2bb812c9a 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -154,7 +154,4 @@ bool rtl8188eu_xmitframe_complete(struct adapter *padapter,
void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf);
-void _dbg_dump_tx_info(struct adapter *padapter, int frame_tag,
- struct tx_desc *ptxdesc);
-
#endif /* __RTL8188E_XMIT_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_android.h b/drivers/staging/rtl8188eu/include/rtw_android.h
index d7ca7c2fb118..2c26993b8205 100644
--- a/drivers/staging/rtl8188eu/include/rtw_android.h
+++ b/drivers/staging/rtl8188eu/include/rtw_android.h
@@ -45,7 +45,6 @@ enum ANDROID_WIFI_CMD {
ANDROID_WIFI_CMD_MAX
};
-int rtw_android_cmdstr_to_num(char *cmdstr);
int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
#endif /* __RTW_ANDROID_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index 68b8ad1a412f..4e9cb93e4b8f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -32,10 +32,7 @@ struct cmd_obj {
struct cmd_priv {
struct completion cmd_queue_comp;
- struct completion terminate_cmdthread_comp;
struct __queue cmd_queue;
- u8 cmdthd_running;
- struct adapter *padapter;
};
#define init_h2fwcmd_w_parm_no_rsp(pcmd, pparm, code) \
@@ -54,7 +51,7 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd);
int rtw_cmd_thread(void *context);
-int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv);
+void rtw_init_cmd_priv(struct cmd_priv *pcmdpriv);
enum rtw_drvextra_cmd_id {
NONE_WK_CID,
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
deleted file mode 100644
index 1fdf16124a0d..000000000000
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef __RTW_DEBUG_H__
-#define __RTW_DEBUG_H__
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#define DRIVERVERSION "v4.1.4_6773.20130222"
-#define _drv_always_ 1
-#define _drv_emerg_ 2
-#define _drv_alert_ 3
-#define _drv_crit_ 4
-#define _drv_err_ 5
-#define _drv_warning_ 6
-#define _drv_notice_ 7
-#define _drv_info_ 8
-#define _drv_debug_ 9
-
-#define _module_rtl871x_xmit_c_ BIT(0)
-#define _module_xmit_osdep_c_ BIT(1)
-#define _module_rtl871x_recv_c_ BIT(2)
-#define _module_recv_osdep_c_ BIT(3)
-#define _module_rtl871x_mlme_c_ BIT(4)
-#define _module_mlme_osdep_c_ BIT(5)
-#define _module_rtl871x_sta_mgt_c_ BIT(6)
-#define _module_rtl871x_cmd_c_ BIT(7)
-#define _module_cmd_osdep_c_ BIT(8)
-#define _module_rtl871x_io_c_ BIT(9)
-#define _module_io_osdep_c_ BIT(10)
-#define _module_os_intfs_c_ BIT(11)
-#define _module_rtl871x_security_c_ BIT(12)
-#define _module_rtl871x_eeprom_c_ BIT(13)
-#define _module_hal_init_c_ BIT(14)
-#define _module_hci_hal_init_c_ BIT(15)
-#define _module_rtl871x_ioctl_c_ BIT(16)
-#define _module_rtl871x_ioctl_set_c_ BIT(17)
-#define _module_rtl871x_ioctl_query_c_ BIT(18)
-#define _module_rtl871x_pwrctrl_c_ BIT(19)
-#define _module_hci_intfs_c_ BIT(20)
-#define _module_hci_ops_c_ BIT(21)
-#define _module_osdep_service_c_ BIT(22)
-#define _module_mp_ BIT(23)
-#define _module_hci_ops_os_c_ BIT(24)
-#define _module_rtl871x_ioctl_os_c BIT(25)
-#define _module_rtl8712_cmd_c_ BIT(26)
-#define _module_rtl8192c_xmit_c_ BIT(27)
-#define _module_hal_xmit_c_ BIT(28)
-#define _module_efuse_ BIT(29)
-#define _module_rtl8712_recv_c_ BIT(30)
-#define _module_rtl8712_led_c_ BIT(31)
-
-#define DRIVER_PREFIX "R8188EU: "
-
-extern u32 GlobalDebugLevel;
-
-#define DBG_88E_LEVEL(_level, fmt, arg...) \
- do { \
- if (_level <= GlobalDebugLevel) \
- pr_info(DRIVER_PREFIX fmt, ##arg); \
- } while (0)
-
-#define DBG_88E(...) \
- do { \
- if (_drv_err_ <= GlobalDebugLevel) \
- pr_info(DRIVER_PREFIX __VA_ARGS__); \
- } while (0)
-
-#define MSG_88E(...) \
- do { \
- if (_drv_err_ <= GlobalDebugLevel) \
- pr_info(DRIVER_PREFIX __VA_ARGS__); \
- } while (0)
-
-#define RT_TRACE(_comp, _level, fmt) \
- do { \
- if (_level <= GlobalDebugLevel) { \
- pr_info("%s [0x%08x,%d]", DRIVER_PREFIX, \
- (unsigned int)_comp, _level); \
- pr_info fmt; \
- } \
- } while (0)
-
-#define RT_PRINT_DATA(_comp, _level, _titlestring, _hexdata, _hexdatalen)\
- do { \
- if (_level <= GlobalDebugLevel) { \
- int __i; \
- u8 *ptr = (u8 *)_hexdata; \
- pr_info("%s", DRIVER_PREFIX); \
- pr_info(_titlestring); \
- for (__i = 0; __i < (int)_hexdatalen; __i++) { \
- pr_info("%02X%s", ptr[__i], \
- (((__i + 1) % 4) == 0) ? \
- " " : " "); \
- if (((__i + 1) % 16) == 0) \
- pr_cont("\n"); \
- } \
- pr_cont("\n"); \
- } \
- } while (0)
-
-int proc_get_drv_version(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_write_reg(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_write_reg(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-int proc_get_read_reg(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_read_reg(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-int proc_get_adapter_state(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_best_channel(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
index 74182c32c4ec..bb5e2b5d4bf1 100644
--- a/drivers/staging/rtl8188eu/include/rtw_efuse.h
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -23,9 +23,6 @@
#define PGPKT_DATA_SIZE 8
-#define EFUSE_WIFI 0
-#define EFUSE_BT 1
-
/* E-Fuse */
#define EFUSE_MAP_SIZE 512
#define EFUSE_MAX_SIZE 256
@@ -60,13 +57,11 @@ u8 Efuse_CalculateWordCnts(u8 word_en);
u8 efuse_OneByteRead(struct adapter *adapter, u16 addr, u8 *data);
u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data);
-void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset,
- u16 _size_byte, u8 *pbuf);
int Efuse_PgPacketRead(struct adapter *adapt, u8 offset, u8 *data);
bool Efuse_PgPacketWrite(struct adapter *adapter, u8 offset, u8 word, u8 *data);
void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata);
u8 Efuse_WordEnableDataWrite(struct adapter *adapter, u16 efuse_addr,
u8 word_en, u8 *data);
-void EFUSE_ShadowMapUpdate(struct adapter *adapter, u8 efusetype);
+void EFUSE_ShadowMapUpdate(struct adapter *adapter);
#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index ee62ed76a465..5f65c3e1e46f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -52,7 +52,7 @@ struct LED_871x {
* either RTW_LED_ON or RTW_LED_OFF are.
*/
- u8 bLedOn; /* true if LED is ON, false if LED is OFF. */
+ u8 led_on; /* true if LED is ON, false if LED is OFF. */
u8 bLedBlinkInProgress; /* true if it is blinking, false o.w.. */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 1b74b32b8a81..2f02316906d0 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -111,8 +111,6 @@ struct mlme_priv {
u8 to_join; /* flag */
u8 to_roaming; /* roaming trying times */
- u8 *nic_hdl;
-
struct list_head *pscanned;
struct __queue free_bss_pool;
struct __queue scanned_queue;
@@ -222,8 +220,6 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf);
void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf);
void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf);
void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf);
-void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf);
-void rtw_cpwm_event_callback(struct adapter *adapter, u8 *pbuf);
void indicate_wx_scan_complete_event(struct adapter *padapter);
void rtw_indicate_wx_assoc_event(struct adapter *padapter);
void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 03d55eb7dc16..c4fcfa986726 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -383,7 +383,6 @@ struct p2p_oper_class_map {
};
struct mlme_ext_priv {
- struct adapter *padapter;
u8 mlmeext_init;
atomic_t event_seq;
u16 mgnt_seq;
@@ -662,7 +661,7 @@ static struct fwevent wlanevents[] = {
{0, &rtw_joinbss_event_callback}, /*10*/
{sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
{sizeof(struct stadel_event), &rtw_stadel_event_callback},
- {0, &rtw_atimdone_event_callback},
+ {0, NULL},
{0, rtw_dummy_event_callback},
{0, NULL}, /*15*/
{0, NULL},
@@ -672,7 +671,7 @@ static struct fwevent wlanevents[] = {
{0, NULL}, /*20*/
{0, NULL},
{0, NULL},
- {0, &rtw_cpwm_event_callback},
+ {0, NULL},
{0, NULL},
};
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index e20bab41708a..8c906b666b62 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -231,8 +231,7 @@ struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
void rtw_init_recvframe(struct recv_frame *precvframe,
struct recv_priv *precvpriv);
-int rtw_free_recvframe(struct recv_frame *precvframe,
- struct __queue *pfree_recv_queue);
+void rtw_free_recvframe(struct recv_frame *precvframe, struct __queue *pfree_recv_queue);
#define rtw_dequeue_recvframe(queue) rtw_alloc_recvframe(queue)
int _rtw_enqueue_recvframe(struct recv_frame *precvframe,
struct __queue *queue);
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 84e17330628e..716fec036e54 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -25,42 +25,6 @@ enum WIFI_FRAME_TYPE {
WIFI_QOS_DATA_TYPE = (BIT(7) | BIT(3)), /* QoS Data */
};
-enum WIFI_FRAME_SUBTYPE {
- /* below is for mgt frame */
- WIFI_ASSOCREQ = (0 | WIFI_MGT_TYPE),
- WIFI_ASSOCRSP = (BIT(4) | WIFI_MGT_TYPE),
- WIFI_REASSOCREQ = (BIT(5) | WIFI_MGT_TYPE),
- WIFI_REASSOCRSP = (BIT(5) | BIT(4) | WIFI_MGT_TYPE),
- WIFI_PROBEREQ = (BIT(6) | WIFI_MGT_TYPE),
- WIFI_PROBERSP = (BIT(6) | BIT(4) | WIFI_MGT_TYPE),
- WIFI_BEACON = (BIT(7) | WIFI_MGT_TYPE),
- WIFI_ATIM = (BIT(7) | BIT(4) | WIFI_MGT_TYPE),
- WIFI_DISASSOC = (BIT(7) | BIT(5) | WIFI_MGT_TYPE),
- WIFI_AUTH = (BIT(7) | BIT(5) | BIT(4) | WIFI_MGT_TYPE),
- WIFI_DEAUTH = (BIT(7) | BIT(6) | WIFI_MGT_TYPE),
- WIFI_ACTION = (BIT(7) | BIT(6) | BIT(4) | WIFI_MGT_TYPE),
-
- /* below is for control frame */
- WIFI_PSPOLL = (BIT(7) | BIT(5) | WIFI_CTRL_TYPE),
- WIFI_RTS = (BIT(7) | BIT(5) | BIT(4) | WIFI_CTRL_TYPE),
- WIFI_CTS = (BIT(7) | BIT(6) | WIFI_CTRL_TYPE),
- WIFI_ACK = (BIT(7) | BIT(6) | BIT(4) | WIFI_CTRL_TYPE),
- WIFI_CFEND = (BIT(7) | BIT(6) | BIT(5) | WIFI_CTRL_TYPE),
- WIFI_CFEND_CFACK = (BIT(7) | BIT(6) | BIT(5) | BIT(4) |
- WIFI_CTRL_TYPE),
-
- /* below is for data frame */
- WIFI_DATA = (0 | WIFI_DATA_TYPE),
- WIFI_DATA_CFACK = (BIT(4) | WIFI_DATA_TYPE),
- WIFI_DATA_CFPOLL = (BIT(5) | WIFI_DATA_TYPE),
- WIFI_DATA_CFACKPOLL = (BIT(5) | BIT(4) | WIFI_DATA_TYPE),
- WIFI_DATA_NULL = (BIT(6) | WIFI_DATA_TYPE),
- WIFI_CF_ACK = (BIT(6) | BIT(4) | WIFI_DATA_TYPE),
- WIFI_CF_POLL = (BIT(6) | BIT(5) | WIFI_DATA_TYPE),
- WIFI_CF_ACKPOLL = (BIT(6) | BIT(5) | BIT(4) | WIFI_DATA_TYPE),
- WIFI_QOS_DATA_NULL = (BIT(6) | WIFI_QOS_DATA_TYPE),
-};
-
#define SetToDs(pbuf) \
*(__le16 *)(pbuf) |= cpu_to_le16(IEEE80211_FCTL_TODS)
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index c95ae4d6a3b6..b958a8d882b0 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -11,7 +11,6 @@
#include <osdep_service.h>
#include <drv_types.h>
#include <wlan_bssdef.h>
-#include <rtw_debug.h>
#include <wifi.h>
#include <rtw_mlme.h>
#include <rtw_mlme_ext.h>
@@ -71,7 +70,6 @@ void rtw_indicate_wx_assoc_event(struct adapter *padapter)
memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress, ETH_ALEN);
- DBG_88E_LEVEL(_drv_always_, "assoc success\n");
wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
}
@@ -84,7 +82,6 @@ void rtw_indicate_wx_disassoc_event(struct adapter *padapter)
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
eth_zero_addr(wrqu.ap_addr.sa_data);
- DBG_88E_LEVEL(_drv_always_, "indicate disassoc\n");
wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
}
@@ -224,7 +221,7 @@ static char *translate_scan(struct adapter *padapter,
/* parsing WPA/WPA2 IE */
{
u8 *buf;
- u8 wpa_ie[255], rsn_ie[255];
+ u8 *wpa_ie, *rsn_ie;
u16 wpa_len = 0, rsn_len = 0;
u8 *p;
@@ -232,9 +229,15 @@ static char *translate_scan(struct adapter *padapter,
if (!buf)
return start;
+ wpa_ie = kzalloc(255, GFP_ATOMIC);
+ if (!wpa_ie)
+ return start;
+
+ rsn_ie = kzalloc(255, GFP_ATOMIC);
+ if (!rsn_ie)
+ return start;
+
rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.ssid.ssid));
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
if (wpa_len > 0) {
p = buf;
@@ -268,6 +271,8 @@ static char *translate_scan(struct adapter *padapter,
start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie);
}
kfree(buf);
+ kfree(wpa_ie);
+ kfree(rsn_ie);
}
{/* parsing WPS IE */
@@ -315,26 +320,20 @@ static int wpa_set_auth_algs(struct net_device *dev, u32 value)
int ret = 0;
if ((value & AUTH_ALG_SHARED_KEY) && (value & AUTH_ALG_OPEN_SYSTEM)) {
- DBG_88E("%s, AUTH_ALG_SHARED_KEY and AUTH_ALG_OPEN_SYSTEM [value:0x%x]\n", __func__, value);
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeAutoSwitch;
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
} else if (value & AUTH_ALG_SHARED_KEY) {
- DBG_88E("%s, AUTH_ALG_SHARED_KEY [value:0x%x]\n", __func__, value);
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeShared;
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
} else if (value & AUTH_ALG_OPEN_SYSTEM) {
- DBG_88E("%s, AUTH_ALG_OPEN_SYSTEM\n", __func__);
if (padapter->securitypriv.ndisauthtype < Ndis802_11AuthModeWPAPSK) {
padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
}
- } else if (value & AUTH_ALG_LEAP) {
- DBG_88E("%s, AUTH_ALG_LEAP\n", __func__);
} else {
- DBG_88E("%s, error!\n", __func__);
ret = -EINVAL;
}
return ret;
@@ -368,9 +367,6 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
}
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("%s, crypt.alg = WEP\n", __func__));
- DBG_88E("%s, crypt.alg = WEP\n", __func__);
-
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
@@ -378,22 +374,15 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
wep_key_idx = param->u.crypt.idx;
wep_key_len = param->u.crypt.key_len;
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("(1)wep_key_idx =%d\n", wep_key_idx));
- DBG_88E("(1)wep_key_idx =%d\n", wep_key_idx);
-
if (wep_key_idx > WEP_KEYS)
return -EINVAL;
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("(2)wep_key_idx =%d\n", wep_key_idx));
-
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + offsetof(struct ndis_802_11_wep, KeyMaterial);
pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
- if (!pwep) {
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("%s: pwep allocate fail !!!\n", __func__));
+ if (!pwep)
goto exit;
- }
memset(pwep, 0, wep_total_len);
pwep->KeyLength = wep_key_len;
pwep->Length = wep_total_len;
@@ -409,11 +398,9 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
pwep->KeyIndex |= 0x80000000;
memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength);
if (param->u.crypt.set_tx) {
- DBG_88E("wep, set_tx = 1\n");
if (rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL)
ret = -EOPNOTSUPP;
} else {
- DBG_88E("wep, set_tx = 0\n");
if (wep_key_idx >= WEP_KEYS) {
ret = -EOPNOTSUPP;
goto exit;
@@ -450,15 +437,12 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
padapter->securitypriv.busetkipkey = false;
}
- DBG_88E(" ~~~~set sta key:unicastkey\n");
-
rtw_setstakey_cmd(padapter, (unsigned char *)psta, true);
} else { /* group key */
memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
padapter->securitypriv.binstallGrpkey = true;
- DBG_88E(" ~~~~set sta key:groupkey\n");
padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
@@ -507,17 +491,7 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
goto exit;
}
- /* dump */
- {
- int i;
-
- DBG_88E("\n wpa_ie(length:%d):\n", ielen);
- for (i = 0; i < ielen; i += 8)
- DBG_88E("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", buf[i], buf[i + 1], buf[i + 2], buf[i + 3], buf[i + 4], buf[i + 5], buf[i + 6], buf[i + 7]);
- }
-
if (ielen < RSN_HEADER_LEN) {
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("Ie len too short %d\n", ielen));
ret = -1;
goto exit;
}
@@ -588,8 +562,6 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
while (cnt < ielen) {
eid = buf[cnt];
if ((eid == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(&buf[cnt + 2], wps_oui, 4))) {
- DBG_88E("SET WPS_IE\n");
-
padapter->securitypriv.wps_ie_len = min(buf[cnt + 1] + 2, MAX_WPA_IE_LEN << 2);
memcpy(padapter->securitypriv.wps_ie, &buf[cnt], padapter->securitypriv.wps_ie_len);
@@ -602,10 +574,6 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
}
}
}
-
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
- ("%s: pairwise_cipher = 0x%08x padapter->securitypriv.ndisencryptstatus =%d padapter->securitypriv.ndisauthtype =%d\n",
- __func__, pairwise_cipher, padapter->securitypriv.ndisencryptstatus, padapter->securitypriv.ndisauthtype));
exit:
kfree(buf);
return ret;
@@ -625,8 +593,6 @@ static int rtw_wx_get_name(struct net_device *dev,
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
NDIS_802_11_RATES_EX *prates = NULL;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("cmd_code =%x\n", info->cmd));
-
if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE)) {
/* parsing HT_CAP_IE */
p = rtw_get_ie(&pcur_bss->ies[12], WLAN_EID_HT_CAPABILITY, &ht_ielen, pcur_bss->ie_length - 12);
@@ -657,14 +623,6 @@ static int rtw_wx_get_name(struct net_device *dev,
return 0;
}
-static int rtw_wx_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+%s\n", __func__));
- return 0;
-}
-
static int rtw_wx_get_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -707,23 +665,18 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
switch (wrqu->mode) {
case IW_MODE_AUTO:
networkType = Ndis802_11AutoUnknown;
- DBG_88E("set_mode = IW_MODE_AUTO\n");
break;
case IW_MODE_ADHOC:
networkType = Ndis802_11IBSS;
- DBG_88E("set_mode = IW_MODE_ADHOC\n");
break;
case IW_MODE_MASTER:
networkType = Ndis802_11APMode;
- DBG_88E("set_mode = IW_MODE_MASTER\n");
break;
case IW_MODE_INFRA:
networkType = Ndis802_11Infrastructure;
- DBG_88E("set_mode = IW_MODE_INFRA\n");
break;
default:
ret = -EINVAL;
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("\n Mode: %s is not supported\n", iw_operation_mode[wrqu->mode]));
goto exit;
}
if (!rtw_set_802_11_infrastructure_mode(padapter, networkType)) {
@@ -741,8 +694,6 @@ static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
struct adapter *padapter = netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s\n", __func__));
-
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
wrqu->mode = IW_MODE_INFRA;
else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
@@ -770,7 +721,6 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
memcpy(strIssueBssid, pPMK->bssid.sa_data, ETH_ALEN);
if (pPMK->cmd == IW_PMKSA_ADD) {
- DBG_88E("[%s] IW_PMKSA_ADD!\n", __func__);
if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN))
return ret;
ret = true;
@@ -780,7 +730,6 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
for (j = 0; j < NUM_PMKID_CACHE; j++) {
if (!memcmp(psecuritypriv->PMKIDList[j].bssid, strIssueBssid, ETH_ALEN)) {
/* BSSID is matched, the same AP => rewrite with new PMKID. */
- DBG_88E("[%s] BSSID exists in the PMKList.\n", __func__);
memcpy(psecuritypriv->PMKIDList[j].PMKID, pPMK->pmkid, IW_PMKID_LEN);
psecuritypriv->PMKIDList[j].used = true;
psecuritypriv->PMKIDIndex = j + 1;
@@ -791,9 +740,6 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
if (!blInserted) {
/* Find a new entry */
- DBG_88E("[%s] Use the new entry index = %d for this PMKID.\n",
- __func__, psecuritypriv->PMKIDIndex);
-
memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bssid, strIssueBssid, ETH_ALEN);
memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, pPMK->pmkid, IW_PMKID_LEN);
@@ -803,7 +749,6 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
psecuritypriv->PMKIDIndex = 0;
}
} else if (pPMK->cmd == IW_PMKSA_REMOVE) {
- DBG_88E("[%s] IW_PMKSA_REMOVE!\n", __func__);
ret = true;
for (j = 0; j < NUM_PMKID_CACHE; j++) {
if (!memcmp(psecuritypriv->PMKIDList[j].bssid, strIssueBssid, ETH_ALEN)) {
@@ -814,7 +759,6 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
}
}
} else if (pPMK->cmd == IW_PMKSA_FLUSH) {
- DBG_88E("[%s] IW_PMKSA_FLUSH!\n", __func__);
memset(&psecuritypriv->PMKIDList[0], 0x00, sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
psecuritypriv->PMKIDIndex = 0;
ret = true;
@@ -843,8 +787,6 @@ static int rtw_wx_get_range(struct net_device *dev,
u16 val;
int i;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s. cmd_code =%x\n", __func__, info->cmd));
-
wrqu->data.length = sizeof(*range);
memset(range, 0, sizeof(*range));
@@ -952,12 +894,9 @@ static int rtw_wx_set_wap(struct net_device *dev,
authmode = padapter->securitypriv.ndisauthtype;
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
- pmlmepriv->pscanned = phead->next;
-
- while (phead != pmlmepriv->pscanned) {
- pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
-
- pmlmepriv->pscanned = pmlmepriv->pscanned->next;
+ list_for_each(pmlmepriv->pscanned, phead) {
+ pnetwork = list_entry(pmlmepriv->pscanned,
+ struct wlan_network, list);
dst_bssid = pnetwork->network.MacAddress;
@@ -998,8 +937,6 @@ static int rtw_wx_get_wap(struct net_device *dev,
eth_zero_addr(wrqu->ap_addr.sa_data);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s\n", __func__));
-
if (check_fwstate(pmlmepriv, _FW_LINKED) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
check_fwstate(pmlmepriv, WIFI_AP_STATE))
@@ -1014,19 +951,12 @@ static int rtw_wx_set_mlme(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
- u16 reason;
struct adapter *padapter = netdev_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
if (!mlme)
return -1;
- DBG_88E("%s\n", __func__);
-
- reason = mlme->reason_code;
-
- DBG_88E("%s, cmd =%d, reason =%d\n", __func__, mlme->cmd, reason);
-
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
if (!rtw_set_802_11_disassociate(padapter))
@@ -1051,15 +981,12 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s\n", __func__));
-
if (!rtw_pwr_wakeup(padapter)) {
ret = -1;
goto exit;
}
if (padapter->bDriverStopped) {
- DBG_88E("bDriverStopped =%d\n", padapter->bDriverStopped);
ret = -1;
goto exit;
}
@@ -1103,15 +1030,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
memcpy(ssid[0].ssid, req->essid, len);
ssid[0].ssid_length = len;
- DBG_88E("IW_SCAN_THIS_ESSID, ssid =%s, len =%d\n", req->essid, req->essid_len);
-
spin_lock_bh(&pmlmepriv->lock);
_status = rtw_sitesurvey_cmd(padapter, ssid, 1, NULL, 0);
spin_unlock_bh(&pmlmepriv->lock);
- } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
- DBG_88E("%s, req->scan_type == IW_SCAN_TYPE_PASSIVE\n", __func__);
}
} else {
if (wrqu->data.length >= WEXT_CSCAN_HEADER_SIZE &&
@@ -1189,9 +1112,6 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
u32 wait_for_surveydone;
int wait_status;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s\n", __func__));
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, (" Start of Query SIOCGIWSCAN .\n"));
-
if (padapter->pwrctrlpriv.brfoffbyhw && padapter->bDriverStopped) {
ret = -EINVAL;
goto exit;
@@ -1211,21 +1131,17 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
spin_lock_bh(&pmlmepriv->scanned_queue.lock);
phead = get_list_head(queue);
- plist = phead->next;
-
- while (phead != plist) {
+ list_for_each(plist, phead) {
if ((stop - ev) < SCAN_ITEM_SIZE) {
ret = -E2BIG;
break;
}
- pnetwork = container_of(plist, struct wlan_network, list);
+ pnetwork = list_entry(plist, struct wlan_network, list);
/* report network only if the current channel set contains the channel to which this network belongs */
if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.Configuration.DSConfig) >= 0)
ev = translate_scan(padapter, a, pnetwork, ev, stop);
-
- plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -1257,8 +1173,6 @@ static int rtw_wx_set_essid(struct net_device *dev,
uint ret = 0, len;
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
- ("+%s: fw_state = 0x%08x\n", __func__, get_fwstate(pmlmepriv)));
if (!rtw_pwr_wakeup(padapter)) {
ret = -1;
goto exit;
@@ -1280,38 +1194,24 @@ static int rtw_wx_set_essid(struct net_device *dev,
}
authmode = padapter->securitypriv.ndisauthtype;
- DBG_88E("=>%s\n", __func__);
if (wrqu->essid.flags && wrqu->essid.length) {
len = min_t(uint, wrqu->essid.length, IW_ESSID_MAX_SIZE);
- if (wrqu->essid.length != 33)
- DBG_88E("ssid =%s, len =%d\n", extra, wrqu->essid.length);
-
memset(&ndis_ssid, 0, sizeof(struct ndis_802_11_ssid));
ndis_ssid.ssid_length = len;
memcpy(ndis_ssid.ssid, extra, len);
src_ssid = ndis_ssid.ssid;
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("%s: ssid =[%s]\n", __func__, src_ssid));
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
- pmlmepriv->pscanned = phead->next;
-
- while (phead != pmlmepriv->pscanned) {
- pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
-
- pmlmepriv->pscanned = pmlmepriv->pscanned->next;
+ list_for_each(pmlmepriv->pscanned, phead) {
+ pnetwork = list_entry(pmlmepriv->pscanned,
+ struct wlan_network, list);
dst_ssid = pnetwork->network.ssid.ssid;
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
- ("%s: dst_ssid =%s\n", __func__,
- pnetwork->network.ssid.ssid));
-
if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.ssid_length)) &&
(pnetwork->network.ssid.ssid_length == ndis_ssid.ssid_length)) {
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
- ("%s: find match, set infra mode\n", __func__));
if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode)
@@ -1328,8 +1228,6 @@ static int rtw_wx_set_essid(struct net_device *dev,
}
}
spin_unlock_bh(&queue->lock);
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
- ("set ssid: set_802_11_auth. mode =%d\n", authmode));
rtw_set_802_11_authentication_mode(padapter, authmode);
if (!rtw_set_802_11_ssid(padapter, &ndis_ssid)) {
ret = -1;
@@ -1338,8 +1236,6 @@ static int rtw_wx_set_essid(struct net_device *dev,
}
exit:
- DBG_88E("<=%s, ret %d\n", __func__, ret);
-
return ret;
}
@@ -1352,8 +1248,6 @@ static int rtw_wx_get_essid(struct net_device *dev,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s\n", __func__));
-
if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
len = pcur_bss->ssid.ssid_length;
@@ -1379,9 +1273,6 @@ static int rtw_wx_set_rate(struct net_device *dev,
u32 ratevalue = 0;
u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s\n", __func__));
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("target_rate = %d, fixed = %d\n", target_rate, fixed));
-
if (target_rate == -1) {
ratevalue = 11;
goto set_rate;
@@ -1440,8 +1331,6 @@ set_rate:
} else {
datarates[i] = 0xff;
}
-
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("datarate_inx =%d\n", datarates[i]));
}
return 0;
@@ -1480,8 +1369,6 @@ static int rtw_wx_set_rts(struct net_device *dev,
padapter->registrypriv.rts_thresh = wrqu->rts.value;
}
- DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
-
return 0;
}
@@ -1491,8 +1378,6 @@ static int rtw_wx_get_rts(struct net_device *dev,
{
struct adapter *padapter = netdev_priv(dev);
- DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
-
wrqu->rts.value = padapter->registrypriv.rts_thresh;
wrqu->rts.fixed = 0; /* no auto select */
/* wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); */
@@ -1516,8 +1401,6 @@ static int rtw_wx_set_frag(struct net_device *dev,
padapter->xmitpriv.frag_len = wrqu->frag.value & ~0x1;
}
- DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
-
return 0;
}
@@ -1527,8 +1410,6 @@ static int rtw_wx_get_frag(struct net_device *dev,
{
struct adapter *padapter = netdev_priv(dev);
- DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
-
wrqu->frag.value = padapter->xmitpriv.frag_len;
wrqu->frag.fixed = 0; /* no auto select */
@@ -1559,14 +1440,11 @@ static int rtw_wx_set_enc(struct net_device *dev,
struct adapter *padapter = netdev_priv(dev);
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- DBG_88E("+%s, flags = 0x%x\n", __func__, erq->flags);
-
memset(&wep, 0, sizeof(struct ndis_802_11_wep));
key = erq->flags & IW_ENCODE_INDEX;
if (erq->flags & IW_ENCODE_DISABLED) {
- DBG_88E("EncryptionDisabled\n");
padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
@@ -1585,12 +1463,10 @@ static int rtw_wx_set_enc(struct net_device *dev,
} else {
keyindex_provided = 0;
key = padapter->securitypriv.dot11PrivacyKeyIndex;
- DBG_88E("%s, key =%d\n", __func__, key);
}
/* set authentication mode */
if (erq->flags & IW_ENCODE_OPEN) {
- DBG_88E("%s():IW_ENCODE_OPEN\n", __func__);
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;/* Ndis802_11EncryptionDisabled; */
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
@@ -1598,7 +1474,6 @@ static int rtw_wx_set_enc(struct net_device *dev,
authmode = Ndis802_11AuthModeOpen;
padapter->securitypriv.ndisauthtype = authmode;
} else if (erq->flags & IW_ENCODE_RESTRICTED) {
- DBG_88E("%s():IW_ENCODE_RESTRICTED\n", __func__);
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
@@ -1606,8 +1481,6 @@ static int rtw_wx_set_enc(struct net_device *dev,
authmode = Ndis802_11AuthModeShared;
padapter->securitypriv.ndisauthtype = authmode;
} else {
- DBG_88E("%s():erq->flags = 0x%x\n", __func__, erq->flags);
-
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;/* Ndis802_11EncryptionDisabled; */
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
@@ -1628,8 +1501,6 @@ static int rtw_wx_set_enc(struct net_device *dev,
/* set key_id only, no given KeyMaterial(erq->length == 0). */
padapter->securitypriv.dot11PrivacyKeyIndex = key;
- DBG_88E("(keyindex_provided == 1), keyid =%d, key_len =%d\n", key, padapter->securitypriv.dot11DefKeylen[key]);
-
switch (padapter->securitypriv.dot11DefKeylen[key]) {
case 5:
padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
@@ -1807,7 +1678,6 @@ static int rtw_wx_set_auth(struct net_device *dev,
if (check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
LeaveAllPowerSaveMode(padapter);
rtw_disassoc_cmd(padapter, 500, false);
- DBG_88E("%s...call rtw_indicate_disconnect\n ", __func__);
rtw_indicate_disconnect(padapter);
rtw_free_assoc_resources(padapter);
}
@@ -1910,12 +1780,6 @@ static int rtw_wx_get_nick(struct net_device *dev,
return 0;
}
-static int dummy(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- return -1;
-}
-
static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
{
uint ret = 0;
@@ -1934,8 +1798,6 @@ static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
break;
}
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
- ("%s:padapter->securitypriv.ndisauthtype =%d\n", __func__, padapter->securitypriv.ndisauthtype));
break;
case IEEE_PARAM_TKIP_COUNTERMEASURES:
break;
@@ -2024,7 +1886,6 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
break;
default:
- DBG_88E("Unknown WPA supplicant request: %d\n", param->cmd);
ret = -EOPNOTSUPP;
break;
}
@@ -2080,8 +1941,6 @@ static int set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
int res = _SUCCESS;
- DBG_88E("%s\n", __func__);
-
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
if (!pcmd) {
res = _FAIL;
@@ -2160,7 +2019,6 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct sta_priv *pstapriv = &padapter->stapriv;
- DBG_88E("%s\n", __func__);
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
if (param_len != sizeof(struct ieee_param) + param->u.crypt.key_len) {
@@ -2174,23 +2032,17 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
}
} else {
psta = rtw_get_stainfo(pstapriv, param->sta_addr);
- if (!psta) {
- DBG_88E("%s(), sta has already been removed or never been added\n", __func__);
+ if (!psta)
goto exit;
- }
}
- if (strcmp(param->u.crypt.alg, "none") == 0 && (!psta)) {
+ if (strcmp(param->u.crypt.alg, "none") == 0 && (!psta))
/* todo:clear default encryption keys */
-
- DBG_88E("clear default encryption keys, keyid =%d\n", param->u.crypt.idx);
goto exit;
- }
+
if (strcmp(param->u.crypt.alg, "WEP") == 0 && (!psta)) {
- DBG_88E("r871x_set_encryption, crypt.alg = WEP\n");
wep_key_idx = param->u.crypt.idx;
wep_key_len = param->u.crypt.key_len;
- DBG_88E("r871x_set_encryption, wep_key_idx=%d, len=%d\n", wep_key_idx, wep_key_len);
if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) {
ret = -EINVAL;
goto exit;
@@ -2200,10 +2052,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + offsetof(struct ndis_802_11_wep, KeyMaterial);
pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
- if (!pwep) {
- DBG_88E(" r871x_set_encryption: pwep allocate fail !!!\n");
+ if (!pwep)
goto exit;
- }
memset(pwep, 0, wep_total_len);
@@ -2216,8 +2066,6 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength);
if (param->u.crypt.set_tx) {
- DBG_88E("wep, set_tx = 1\n");
-
psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
@@ -2235,8 +2083,6 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
set_wep_key(padapter, pwep->KeyMaterial, pwep->KeyLength, wep_key_idx);
} else {
- DBG_88E("wep, set_tx = 0\n");
-
/* don't update "psecuritypriv->dot11PrivacyAlgrthm" and */
/* psecuritypriv->dot11PrivacyKeyIndex = keyid", but can rtw_set_key to cam */
@@ -2253,8 +2099,6 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* group key */
if (param->u.crypt.set_tx == 1) {
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- DBG_88E("%s, set group_key, WEP\n", __func__);
-
memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
@@ -2262,7 +2106,6 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
if (param->u.crypt.key_len == 13)
psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
} else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
- DBG_88E("%s, set group_key, TKIP\n", __func__);
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
@@ -2272,12 +2115,10 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psecuritypriv->busetkipkey = true;
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
- DBG_88E("%s, set group_key, CCMP\n", __func__);
psecuritypriv->dot118021XGrpPrivacy = _AES_;
memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
} else {
- DBG_88E("%s, set group_key, none\n", __func__);
psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
}
psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
@@ -2299,14 +2140,10 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- DBG_88E("%s, set pairwise key, WEP\n", __func__);
-
psta->dot118021XPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
psta->dot118021XPrivacy = _WEP104_;
} else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
- DBG_88E("%s, set pairwise key, TKIP\n", __func__);
-
psta->dot118021XPrivacy = _TKIP_;
/* set mic key */
@@ -2315,12 +2152,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psecuritypriv->busetkipkey = true;
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
- DBG_88E("%s, set pairwise key, CCMP\n", __func__);
-
psta->dot118021XPrivacy = _AES_;
} else {
- DBG_88E("%s, set pairwise key, none\n", __func__);
-
psta->dot118021XPrivacy = _NO_PRIVACY_;
}
@@ -2386,8 +2219,6 @@ static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int
struct sta_priv *pstapriv = &padapter->stapriv;
unsigned char *pbuf = param->u.bcn_ie.buf;
- DBG_88E("%s, len =%d\n", __func__, len);
-
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return -EINVAL;
@@ -2408,8 +2239,6 @@ static int rtw_hostapd_sta_flush(struct net_device *dev)
{
struct adapter *padapter = netdev_priv(dev);
- DBG_88E("%s\n", __func__);
-
flush_all_cam_entry(padapter); /* clear CAM */
return rtw_sta_flush(padapter);
@@ -2423,8 +2252,6 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
- DBG_88E("%s(aid =%d) =%pM\n", __func__, param->u.add_sta.aid, (param->sta_addr));
-
if (!check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)))
return -EINVAL;
@@ -2477,8 +2304,6 @@ static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
struct sta_priv *pstapriv = &padapter->stapriv;
int updated = 0;
- DBG_88E("%s =%pM\n", __func__, (param->sta_addr));
-
if (!check_fwstate(pmlmepriv, _FW_LINKED | WIFI_AP_STATE))
return -EINVAL;
@@ -2496,8 +2321,6 @@ static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
spin_unlock_bh(&pstapriv->asoc_list_lock);
associated_clients_update(padapter, updated);
psta = NULL;
- } else {
- DBG_88E("%s(), sta has already been removed or never been added\n", __func__);
}
return 0;
@@ -2513,8 +2336,6 @@ static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *par
struct ieee_param_ex *param_ex = (struct ieee_param_ex *)param;
struct sta_data *psta_data = (struct sta_data *)param_ex->data;
- DBG_88E("rtw_ioctl_get_sta_info, sta_addr: %pM\n", (param_ex->sta_addr));
-
if (!check_fwstate(pmlmepriv, _FW_LINKED | WIFI_AP_STATE))
return -EINVAL;
@@ -2567,8 +2388,6 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
- DBG_88E("%s, sta_addr: %pM\n", __func__, (param->sta_addr));
-
if (!check_fwstate(pmlmepriv, _FW_LINKED | WIFI_AP_STATE))
return -EINVAL;
@@ -2586,8 +2405,6 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
copy_len = min_t(int, wpa_ie_len + 2, sizeof(psta->wpa_ie));
param->u.wpa_ie.len = copy_len;
memcpy(param->u.wpa_ie.reserved, psta->wpa_ie, copy_len);
- } else {
- DBG_88E("sta's wpa_ie is NONE\n");
}
} else {
ret = -1;
@@ -2604,8 +2421,6 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
int ie_len;
- DBG_88E("%s, len =%d\n", __func__, len);
-
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return -EINVAL;
@@ -2617,10 +2432,8 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
if (ie_len > 0) {
pmlmepriv->wps_beacon_ie = rtw_malloc(ie_len);
pmlmepriv->wps_beacon_ie_len = ie_len;
- if (!pmlmepriv->wps_beacon_ie) {
- DBG_88E("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ if (!pmlmepriv->wps_beacon_ie)
return -EINVAL;
- }
memcpy(pmlmepriv->wps_beacon_ie, param->u.bcn_ie.buf, ie_len);
@@ -2638,8 +2451,6 @@ static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *par
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ie_len;
- DBG_88E("%s, len =%d\n", __func__, len);
-
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return -EINVAL;
@@ -2651,10 +2462,8 @@ static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *par
if (ie_len > 0) {
pmlmepriv->wps_probe_resp_ie = rtw_malloc(ie_len);
pmlmepriv->wps_probe_resp_ie_len = ie_len;
- if (!pmlmepriv->wps_probe_resp_ie) {
- DBG_88E("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ if (!pmlmepriv->wps_probe_resp_ie)
return -EINVAL;
- }
memcpy(pmlmepriv->wps_probe_resp_ie, param->u.bcn_ie.buf, ie_len);
}
@@ -2667,8 +2476,6 @@ static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *par
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ie_len;
- DBG_88E("%s, len =%d\n", __func__, len);
-
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return -EINVAL;
@@ -2680,10 +2487,8 @@ static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *par
if (ie_len > 0) {
pmlmepriv->wps_assoc_resp_ie = rtw_malloc(ie_len);
pmlmepriv->wps_assoc_resp_ie_len = ie_len;
- if (!pmlmepriv->wps_assoc_resp_ie) {
- DBG_88E("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ if (!pmlmepriv->wps_assoc_resp_ie)
return -EINVAL;
- }
memcpy(pmlmepriv->wps_assoc_resp_ie, param->u.bcn_ie.buf, ie_len);
}
@@ -2703,14 +2508,11 @@ static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param,
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return -EINVAL;
- if (param->u.wpa_param.name != 0) /* dummy test... */
- DBG_88E("%s name(%u) != 0\n", __func__, param->u.wpa_param.name);
value = param->u.wpa_param.value;
/* use the same definition of hostapd's ignore_broadcast_ssid */
if (value != 1 && value != 2)
value = 0;
- DBG_88E("%s value(%u)\n", __func__, value);
pmlmeinfo->hidden_ssid_mode = value;
return 0;
}
@@ -2821,7 +2623,6 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
ret = rtw_ioctl_acl_remove_sta(dev, param, p->length);
break;
default:
- DBG_88E("Unknown hostapd request: %d\n", param->cmd);
ret = -EOPNOTSUPP;
break;
}
@@ -2899,87 +2700,53 @@ FREE_EXT:
}
static iw_handler rtw_handlers[] = {
- NULL, /* SIOCSIWCOMMIT */
- rtw_wx_get_name, /* SIOCGIWNAME */
- dummy, /* SIOCSIWNWID */
- dummy, /* SIOCGIWNWID */
- rtw_wx_set_freq, /* SIOCSIWFREQ */
- rtw_wx_get_freq, /* SIOCGIWFREQ */
- rtw_wx_set_mode, /* SIOCSIWMODE */
- rtw_wx_get_mode, /* SIOCGIWMODE */
- dummy, /* SIOCSIWSENS */
- rtw_wx_get_sens, /* SIOCGIWSENS */
- NULL, /* SIOCSIWRANGE */
- rtw_wx_get_range, /* SIOCGIWRANGE */
- rtw_wx_set_priv, /* SIOCSIWPRIV */
- NULL, /* SIOCGIWPRIV */
- NULL, /* SIOCSIWSTATS */
- NULL, /* SIOCGIWSTATS */
- dummy, /* SIOCSIWSPY */
- dummy, /* SIOCGIWSPY */
- NULL, /* SIOCGIWTHRSPY */
- NULL, /* SIOCWIWTHRSPY */
- rtw_wx_set_wap, /* SIOCSIWAP */
- rtw_wx_get_wap, /* SIOCGIWAP */
- rtw_wx_set_mlme, /* request MLME operation; uses struct iw_mlme */
- dummy, /* SIOCGIWAPLIST -- depricated */
- rtw_wx_set_scan, /* SIOCSIWSCAN */
- rtw_wx_get_scan, /* SIOCGIWSCAN */
- rtw_wx_set_essid, /* SIOCSIWESSID */
- rtw_wx_get_essid, /* SIOCGIWESSID */
- dummy, /* SIOCSIWNICKN */
- rtw_wx_get_nick, /* SIOCGIWNICKN */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- rtw_wx_set_rate, /* SIOCSIWRATE */
- rtw_wx_get_rate, /* SIOCGIWRATE */
- rtw_wx_set_rts, /* SIOCSIWRTS */
- rtw_wx_get_rts, /* SIOCGIWRTS */
- rtw_wx_set_frag, /* SIOCSIWFRAG */
- rtw_wx_get_frag, /* SIOCGIWFRAG */
- dummy, /* SIOCSIWTXPOW */
- dummy, /* SIOCGIWTXPOW */
- dummy, /* SIOCSIWRETRY */
- rtw_wx_get_retry, /* SIOCGIWRETRY */
- rtw_wx_set_enc, /* SIOCSIWENCODE */
- rtw_wx_get_enc, /* SIOCGIWENCODE */
- dummy, /* SIOCSIWPOWER */
- rtw_wx_get_power, /* SIOCGIWPOWER */
- NULL, /*---hole---*/
- NULL, /*---hole---*/
- rtw_wx_set_gen_ie, /* SIOCSIWGENIE */
- NULL, /* SIOCGWGENIE */
- rtw_wx_set_auth, /* SIOCSIWAUTH */
- NULL, /* SIOCGIWAUTH */
- rtw_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
- NULL, /* SIOCGIWENCODEEXT */
- rtw_wx_set_pmkid, /* SIOCSIWPMKSA */
- NULL, /*---hole---*/
+ IW_HANDLER(SIOCGIWNAME, rtw_wx_get_name),
+ IW_HANDLER(SIOCGIWFREQ, rtw_wx_get_freq),
+ IW_HANDLER(SIOCSIWMODE, rtw_wx_set_mode),
+ IW_HANDLER(SIOCGIWMODE, rtw_wx_get_mode),
+ IW_HANDLER(SIOCGIWSENS, rtw_wx_get_sens),
+ IW_HANDLER(SIOCGIWRANGE, rtw_wx_get_range),
+ IW_HANDLER(SIOCSIWPRIV, rtw_wx_set_priv),
+ IW_HANDLER(SIOCSIWAP, rtw_wx_set_wap),
+ IW_HANDLER(SIOCGIWAP, rtw_wx_get_wap),
+ IW_HANDLER(SIOCSIWMLME, rtw_wx_set_mlme),
+ IW_HANDLER(SIOCSIWSCAN, rtw_wx_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, rtw_wx_get_scan),
+ IW_HANDLER(SIOCSIWESSID, rtw_wx_set_essid),
+ IW_HANDLER(SIOCGIWESSID, rtw_wx_get_essid),
+ IW_HANDLER(SIOCGIWNICKN, rtw_wx_get_nick),
+ IW_HANDLER(SIOCSIWRATE, rtw_wx_set_rate),
+ IW_HANDLER(SIOCGIWRATE, rtw_wx_get_rate),
+ IW_HANDLER(SIOCSIWRTS, rtw_wx_set_rts),
+ IW_HANDLER(SIOCGIWRTS, rtw_wx_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, rtw_wx_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, rtw_wx_get_frag),
+ IW_HANDLER(SIOCGIWRETRY, rtw_wx_get_retry),
+ IW_HANDLER(SIOCSIWENCODE, rtw_wx_set_enc),
+ IW_HANDLER(SIOCGIWENCODE, rtw_wx_get_enc),
+ IW_HANDLER(SIOCGIWPOWER, rtw_wx_get_power),
+ IW_HANDLER(SIOCSIWGENIE, rtw_wx_set_gen_ie),
+ IW_HANDLER(SIOCSIWAUTH, rtw_wx_set_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, rtw_wx_set_enc_ext),
+ IW_HANDLER(SIOCSIWPMKSA, rtw_wx_set_pmkid),
};
static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
{
struct adapter *padapter = netdev_priv(dev);
struct iw_statistics *piwstats = &padapter->iwstats;
- int tmp_level = 0;
- int tmp_qual = 0;
- int tmp_noise = 0;
if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
piwstats->qual.qual = 0;
piwstats->qual.level = 0;
piwstats->qual.noise = 0;
} else {
- tmp_level = padapter->recvpriv.signal_strength;
- tmp_qual = padapter->recvpriv.signal_qual;
- tmp_noise = padapter->recvpriv.noise;
-
- piwstats->qual.level = tmp_level;
- piwstats->qual.qual = tmp_qual;
- piwstats->qual.noise = tmp_noise;
+ piwstats->qual.level = padapter->recvpriv.signal_strength;
+ piwstats->qual.qual = padapter->recvpriv.signal_qual;
+ piwstats->qual.noise = padapter->recvpriv.noise;
}
piwstats->qual.updated = IW_QUAL_ALL_UPDATED;/* IW_QUAL_DBM; */
- return &padapter->iwstats;
+ return piwstats;
}
struct iw_handler_def rtw_handlers_def = {
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
index df53b7d52618..f12d8a707376 100644
--- a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -87,12 +87,8 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
u8 *buff, *p, i;
union iwreq_data wrqu;
- RT_TRACE(_module_mlme_osdep_c_, _drv_info_,
- ("+%s, authmode=%d\n", __func__, authmode));
buff = NULL;
if (authmode == WLAN_EID_VENDOR_SPECIFIC) {
- RT_TRACE(_module_mlme_osdep_c_, _drv_info_,
- ("%s, authmode=%d\n", __func__, authmode));
buff = rtw_malloc(IW_CUSTOM_MAX);
if (!buff)
return;
@@ -144,8 +140,6 @@ void rtw_indicate_sta_assoc_event(struct adapter *padapter, struct sta_info *pst
memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
- DBG_88E("+%s\n", __func__);
-
wireless_send_event(padapter->pnetdev, IWEVREGISTERED, &wrqu, NULL);
}
@@ -167,8 +161,6 @@ void rtw_indicate_sta_disassoc_event(struct adapter *padapter, struct sta_info *
memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
- DBG_88E("+%s\n", __func__);
-
wireless_send_event(padapter->pnetdev, IWEVEXPIRED, &wrqu, NULL);
}
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index 73b9599fe0dc..6370a3d8881b 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -14,7 +14,7 @@
#include <rtw_xmit.h>
#include <mon.h>
-/**
+/*
* unprotect_frame() - unset Protected flag and strip off IV and ICV/MIC
*/
static void unprotect_frame(struct sk_buff *skb, int iv_len, int icv_len)
@@ -65,7 +65,7 @@ static void mon_recv_encrypted(struct net_device *dev, const u8 *data,
netdev_info(dev, "Encrypted packets are not supported");
}
-/**
+/*
* rtl88eu_mon_recv_hook() - forward received frame to the monitor interface
*
* Assumes that the frame contains an IV and an ICV/MIC, and that
@@ -96,7 +96,7 @@ void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame)
mon_recv_encrypted(dev, data, data_len);
}
-/**
+/*
* rtl88eu_mon_xmit_hook() - forward trasmitted frame to the monitor interface
*
* Assumes that:
@@ -163,18 +163,15 @@ struct net_device *rtl88eu_mon_init(void)
dev = alloc_netdev(0, "mon%d", NET_NAME_UNKNOWN, mon_setup);
if (!dev)
- goto fail;
+ return NULL;
err = register_netdev(dev);
- if (err < 0)
- goto fail_free_dev;
+ if (err < 0) {
+ free_netdev(dev);
+ return NULL;
+ }
return dev;
-
-fail_free_dev:
- free_netdev(dev);
-fail:
- return NULL;
}
void rtl88eu_mon_deinit(struct net_device *dev)
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index a826228cbbe9..423c382e3d20 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -18,7 +18,7 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
MODULE_AUTHOR("Realtek Semiconductor Corp.");
-MODULE_VERSION(DRIVERVERSION);
+MODULE_VERSION("v4.1.4_6773.20130222");
MODULE_FIRMWARE("rtlwifi/rtl8188eufw.bin");
#define RTW_NOTCH_FILTER 0 /* 0:Disable, 1:Enable, */
@@ -136,12 +136,10 @@ MODULE_PARM_DESC(monitor_enable, "Enable monitor interface (default: false)");
static int netdev_close(struct net_device *pnetdev);
-static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
+static void loadparam(struct adapter *padapter)
{
struct registry_priv *registry_par = &padapter->registrypriv;
- GlobalDebugLevel = rtw_debug;
-
memcpy(registry_par->ssid.ssid, "ANY", 3);
registry_par->ssid.ssid_length = 3;
@@ -301,8 +299,6 @@ struct net_device *rtw_init_netdev(void)
struct adapter *padapter;
struct net_device *pnetdev;
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+init_net_dev\n"));
-
pnetdev = alloc_etherdev_mq(sizeof(struct adapter), 4);
if (!pnetdev)
return NULL;
@@ -310,12 +306,12 @@ struct net_device *rtw_init_netdev(void)
pnetdev->dev.type = &wlan_type;
padapter = netdev_priv(pnetdev);
padapter->pnetdev = pnetdev;
- DBG_88E("register rtw_netdev_ops to netdev_ops\n");
pnetdev->netdev_ops = &rtw_netdev_ops;
pnetdev->watchdog_timeo = HZ * 3; /* 3 second timeout */
pnetdev->wireless_handlers = (struct iw_handler_def *)&rtw_handlers_def;
- loadparam(padapter, pnetdev);
+ loadparam(padapter);
+ padapter->cmdThread = NULL;
return pnetdev;
}
@@ -324,27 +320,22 @@ static int rtw_start_drv_threads(struct adapter *padapter)
{
int err = 0;
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+%s\n", __func__));
-
- padapter->cmdThread = kthread_run(rtw_cmd_thread, padapter,
- "RTW_CMD_THREAD");
- if (IS_ERR(padapter->cmdThread))
+ padapter->cmdThread = kthread_run(rtw_cmd_thread, padapter, "RTW_CMD_THREAD");
+ if (IS_ERR(padapter->cmdThread)) {
err = PTR_ERR(padapter->cmdThread);
- else
- /* wait for cmd_thread to run */
- wait_for_completion_interruptible(&padapter->cmdpriv.terminate_cmdthread_comp);
+ padapter->cmdThread = NULL;
+ }
return err;
}
void rtw_stop_drv_threads(struct adapter *padapter)
{
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+%s\n", __func__));
+ if (!padapter->cmdThread)
+ return;
- /* Below is to terminate rtw_cmd_thread & event_thread... */
complete(&padapter->cmdpriv.cmd_queue_comp);
- if (padapter->cmdThread)
- wait_for_completion_interruptible(&padapter->cmdpriv.terminate_cmdthread_comp);
+ kthread_stop(padapter->cmdThread);
}
static u8 rtw_init_default_value(struct adapter *padapter)
@@ -422,42 +413,29 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
{
u8 ret8 = _SUCCESS;
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+%s\n", __func__));
-
- if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init cmd_priv\n"));
- ret8 = _FAIL;
- goto exit;
- }
-
- padapter->cmdpriv.padapter = padapter;
+ rtw_init_cmd_priv(&padapter->cmdpriv);
if (rtw_init_mlme_priv(padapter) == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_priv\n"));
ret8 = _FAIL;
goto exit;
}
if (init_mlme_ext_priv(padapter) == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_ext_priv\n"));
ret8 = _FAIL;
goto exit;
}
if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) {
- DBG_88E("Can't _rtw_init_xmit_priv\n");
ret8 = _FAIL;
goto exit;
}
if (_rtw_init_recv_priv(&padapter->recvpriv, padapter) == _FAIL) {
- DBG_88E("Can't _rtw_init_recv_priv\n");
ret8 = _FAIL;
goto exit;
}
if (_rtw_init_sta_priv(&padapter->stapriv) == _FAIL) {
- DBG_88E("Can't _rtw_init_sta_priv\n");
ret8 = _FAIL;
goto exit;
}
@@ -476,27 +454,19 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
rtw_hal_sreset_init(padapter);
exit:
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-%s\n", __func__));
-
return ret8;
}
void rtw_cancel_all_timer(struct adapter *padapter)
{
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+%s\n", __func__));
-
del_timer_sync(&padapter->mlmepriv.assoc_timer);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("%s:cancel association timer complete!\n", __func__));
del_timer_sync(&padapter->mlmepriv.scan_to_timer);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("%s:cancel scan_to_timer!\n", __func__));
del_timer_sync(&padapter->mlmepriv.dynamic_chk_timer);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("%s:cancel dynamic_chk_timer!\n", __func__));
/* cancel sw led timer */
rtw_hal_sw_led_deinit(padapter);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("%s:cancel DeInitSwLeds!\n", __func__));
del_timer_sync(&padapter->pwrctrlpriv.pwr_state_check_timer);
@@ -505,8 +475,6 @@ void rtw_cancel_all_timer(struct adapter *padapter)
u8 rtw_free_drv_sw(struct adapter *padapter)
{
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("==>%s", __func__));
-
free_mlme_ext_priv(&padapter->mlmeextpriv);
rtw_free_mlme_priv(&padapter->mlmepriv);
@@ -519,12 +487,8 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
rtw_hal_free_data(padapter);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("<== %s\n", __func__));
-
mutex_destroy(&padapter->hw_init_mutex);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-%s\n", __func__));
-
return _SUCCESS;
}
@@ -535,9 +499,6 @@ static int _netdev_open(struct net_device *pnetdev)
struct adapter *padapter = netdev_priv(pnetdev);
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - dev_open\n"));
- DBG_88E("+88eu_drv - drv_open, bup =%d\n", padapter->bup);
-
if (pwrctrlpriv->ps_flag) {
padapter->net_closed = false;
goto netdev_open_normal_process;
@@ -548,10 +509,8 @@ static int _netdev_open(struct net_device *pnetdev)
padapter->bSurpriseRemoved = false;
status = rtw_hal_init(padapter);
- if (status == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_, ("rtl88eu_hal_init(): Can't init h/w!\n"));
+ if (status == _FAIL)
goto netdev_open_error;
- }
pr_info("MAC Address = %pM\n", pnetdev->dev_addr);
@@ -585,16 +544,12 @@ static int _netdev_open(struct net_device *pnetdev)
netif_tx_wake_all_queues(pnetdev);
netdev_open_normal_process:
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - dev_open\n"));
- DBG_88E("-88eu_drv - drv_open, bup =%d\n", padapter->bup);
return 0;
netdev_open_error:
padapter->bup = false;
netif_carrier_off(pnetdev);
netif_tx_stop_all_queues(pnetdev);
- RT_TRACE(_module_os_intfs_c_, _drv_err_, ("-88eu_drv - dev_open, fail!\n"));
- DBG_88E("-88eu_drv - drv_open fail, bup =%d\n", padapter->bup);
return -1;
}
@@ -615,16 +570,13 @@ int ips_netdrv_open(struct adapter *padapter)
int status = _SUCCESS;
padapter->net_closed = false;
- DBG_88E("===> %s.........\n", __func__);
padapter->bDriverStopped = false;
padapter->bSurpriseRemoved = false;
status = rtw_hal_init(padapter);
- if (status == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_, ("%s(): Can't init h/w!\n", __func__));
+ if (status == _FAIL)
goto netdev_open_error;
- }
rtw_hal_inirp_init(padapter);
@@ -635,47 +587,33 @@ int ips_netdrv_open(struct adapter *padapter)
return _SUCCESS;
netdev_open_error:
- DBG_88E("-%s - drv_open failure, bup =%d\n", __func__, padapter->bup);
-
return _FAIL;
}
int rtw_ips_pwr_up(struct adapter *padapter)
{
int result;
- unsigned long start_time = jiffies;
- DBG_88E("===> %s..............\n", __func__);
rtw_reset_drv_sw(padapter);
result = ips_netdrv_open(padapter);
led_control_8188eu(padapter, LED_CTL_NO_LINK);
- DBG_88E("<=== %s.............. in %dms\n", __func__,
- jiffies_to_msecs(jiffies - start_time));
return result;
}
void rtw_ips_pwr_down(struct adapter *padapter)
{
- unsigned long start_time = jiffies;
-
- DBG_88E("===> %s...................\n", __func__);
-
padapter->net_closed = true;
led_control_8188eu(padapter, LED_CTL_POWER_OFF);
rtw_ips_dev_unload(padapter);
- DBG_88E("<=== %s..................... in %dms\n", __func__,
- jiffies_to_msecs(jiffies - start_time));
}
void rtw_ips_dev_unload(struct adapter *padapter)
{
- DBG_88E("====> %s...\n", __func__);
-
rtw_hal_set_hwreg(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
usb_intf_stop(padapter);
@@ -689,8 +627,6 @@ static int netdev_close(struct net_device *pnetdev)
{
struct adapter *padapter = netdev_priv(pnetdev);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - drv_close\n"));
-
if (padapter->pwrctrlpriv.bInternalAutoSuspend) {
if (padapter->pwrctrlpriv.rf_pwrstate == rf_off)
padapter->pwrctrlpriv.ps_flag = true;
@@ -698,9 +634,6 @@ static int netdev_close(struct net_device *pnetdev)
padapter->net_closed = true;
if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) {
- DBG_88E("(2)88eu_drv - drv_close, bup =%d, hw_init_completed =%d\n",
- padapter->bup, padapter->hw_init_completed);
-
/* s1. */
if (pnetdev) {
if (!rtw_netif_queue_stopped(pnetdev))
@@ -720,7 +653,5 @@ static int netdev_close(struct net_device *pnetdev)
led_control_8188eu(padapter, LED_CTL_POWER_OFF);
}
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n"));
- DBG_88E("-88eu_drv - drv_close, bup =%d\n", padapter->bup);
return 0;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index a647cdc330e4..3460619ae08f 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -72,11 +72,8 @@ int rtw_recv_indicatepkt(struct adapter *padapter,
pfree_recv_queue = &precvpriv->free_recv_queue;
skb = precv_frame->pkt;
- if (!skb) {
- RT_TRACE(_module_recv_osdep_c_, _drv_err_,
- ("%s():skb == NULL something wrong!!!!\n", __func__));
+ if (!skb)
goto _recv_indicatepkt_drop;
- }
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
struct sk_buff *pskb2 = NULL;
@@ -124,9 +121,6 @@ _recv_indicatepkt_end:
rtw_free_recvframe(precv_frame, pfree_recv_queue);
- RT_TRACE(_module_recv_osdep_c_, _drv_info_,
- ("\n %s :after netif_rx!!!!\n", __func__));
-
return _SUCCESS;
_recv_indicatepkt_drop:
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index f1470ac56874..3c5446999686 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -10,7 +10,6 @@
#include <rtw_android.h>
#include <osdep_service.h>
-#include <rtw_debug.h>
#include <rtw_ioctl_set.h>
static const char *android_wifi_cmd_str[ANDROID_WIFI_CMD_MAX] = {
@@ -52,7 +51,7 @@ struct android_wifi_priv_cmd {
int total_len;
};
-int rtw_android_cmdstr_to_num(char *cmdstr)
+static int rtw_android_cmdstr_to_num(char *cmdstr)
{
int cmd_num;
@@ -135,8 +134,6 @@ int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
if (IS_ERR(command))
return PTR_ERR(command);
command[priv_cmd.total_len - 1] = 0;
- DBG_88E("%s: Android private cmd \"%s\" on %s\n",
- __func__, command, ifr->ifr_name);
cmd_num = rtw_android_cmdstr_to_num(command);
switch (cmd_num) {
case ANDROID_WIFI_CMD_START:
@@ -202,7 +199,6 @@ int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
case ANDROID_WIFI_CMD_P2P_SET_PS:
break;
default:
- DBG_88E("Unknown PRIVATE command %s - ignored\n", command);
snprintf(command, 3, "OK");
bytes_written = strlen("OK");
}
@@ -211,20 +207,14 @@ response:
if (bytes_written >= 0) {
if ((bytes_written == 0) && (priv_cmd.total_len > 0))
command[0] = '\0';
- if (bytes_written >= priv_cmd.total_len) {
- DBG_88E("%s: bytes_written = %d\n", __func__,
- bytes_written);
+ if (bytes_written >= priv_cmd.total_len)
bytes_written = priv_cmd.total_len;
- } else {
+ else
bytes_written++;
- }
priv_cmd.used_len = bytes_written;
if (copy_to_user((char __user *)priv_cmd.buf, command,
- bytes_written)) {
- DBG_88E("%s: failed to copy data to user buffer\n",
- __func__);
+ bytes_written))
ret = -EFAULT;
- }
} else {
ret = bytes_written;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 3a970d67aa8c..b7e2692c35f3 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -143,16 +143,6 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
void usb_intf_stop(struct adapter *padapter)
{
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+%s\n", __func__));
-
- /* disable_hw_interrupt */
- if (!padapter->bSurpriseRemoved) {
- /* device still exists, so driver can do i/o operation */
- /* TODO: */
- RT_TRACE(_module_hci_intfs_c_, _drv_err_,
- ("SurpriseRemoved == false\n"));
- }
-
/* cancel in irp */
rtw_hal_inirp_deinit(padapter);
@@ -160,14 +150,10 @@ void usb_intf_stop(struct adapter *padapter)
usb_write_port_cancel(padapter);
/* todo:cancel other irps */
-
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-%s\n", __func__));
}
static void rtw_dev_unload(struct adapter *padapter)
{
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+%s\n", __func__));
-
if (padapter->bup) {
pr_debug("===> %s\n", __func__);
padapter->bDriverStopped = true;
@@ -186,14 +172,9 @@ static void rtw_dev_unload(struct adapter *padapter)
}
padapter->bup = false;
- } else {
- RT_TRACE(_module_hci_intfs_c_, _drv_err_,
- ("r871x_dev_unload():padapter->bup == false\n"));
}
pr_debug("<=== %s\n", __func__);
-
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-%s\n", __func__));
}
static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
@@ -351,7 +332,6 @@ static int rtw_usb_if1_init(struct usb_interface *pusb_intf)
padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
if (!padapter->HalData) {
- DBG_88E("Failed to allocate memory for HAL data\n");
err = -ENOMEM;
goto free_adapter;
}
@@ -367,8 +347,6 @@ static int rtw_usb_if1_init(struct usb_interface *pusb_intf)
/* step 5. */
if (rtw_init_drv_sw(padapter) == _FAIL) {
- RT_TRACE(_module_hci_intfs_c_, _drv_err_,
- ("Initialize driver software resource Failed!\n"));
err = -ENOMEM;
goto free_hal_data;
}
@@ -388,8 +366,9 @@ static int rtw_usb_if1_init(struct usb_interface *pusb_intf)
pr_debug("can't get autopm:\n");
/* alloc dev name after read efuse. */
- if (dev_alloc_name(pnetdev, padapter->registrypriv.ifname) < 0)
- RT_TRACE(_module_os_intfs_c_, _drv_err_, ("dev_alloc_name, fail!\n"));
+ err = dev_alloc_name(pnetdev, padapter->registrypriv.ifname);
+ if (err < 0)
+ goto free_hal_data;
netif_carrier_off(pnetdev);
@@ -401,7 +380,6 @@ static int rtw_usb_if1_init(struct usb_interface *pusb_intf)
/* step 6. Tell the network stack we exist */
err = register_netdev(pnetdev);
if (err) {
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("register_netdev() failed\n"));
goto free_hal_data;
}
@@ -478,7 +456,6 @@ static void rtw_dev_remove(struct usb_interface *pusb_intf)
struct adapter *padapter = dvobj->if1;
pr_debug("+%s\n", __func__);
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+dev_remove()\n"));
if (!pusb_intf->unregistering)
padapter->bSurpriseRemoved = true;
@@ -492,7 +469,6 @@ static void rtw_dev_remove(struct usb_interface *pusb_intf)
usb_dvobj_deinit(pusb_intf);
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-dev_remove()\n"));
pr_debug("-r871xu_dev_remove, done\n");
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index e8222ffb6fea..0ceb05f3884f 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -28,10 +28,8 @@ static void interrupt_handler_8188eu(struct adapter *adapt, u16 pkt_len, u8 *pbu
{
struct hal_data_8188e *haldata = adapt->HalData;
- if (pkt_len != INTERRUPT_MSG_FORMAT_LEN) {
- DBG_88E("%s Invalid interrupt content length (%d)!\n", __func__, pkt_len);
+ if (pkt_len != INTERRUPT_MSG_FORMAT_LEN)
return;
- }
/* HISR */
memcpy(&haldata->IntArray[0], &pbuf[USB_INTR_CONTENT_HISR_OFFSET], 4);
@@ -66,20 +64,11 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
pkt_cnt = (le32_to_cpu(prxstat->rxdw2) >> 16) & 0xff;
do {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("%s: rxdesc=offsset 0:0x%08x, 4:0x%08x, 8:0x%08x, C:0x%08x\n",
- __func__, prxstat->rxdw0, prxstat->rxdw1,
- prxstat->rxdw2, prxstat->rxdw4));
-
prxstat = (struct recv_stat *)pbuf;
precvframe = rtw_alloc_recvframe(pfree_recv_queue);
- if (!precvframe) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("%s: precvframe==NULL\n", __func__));
- DBG_88E("%s()-%d: rtw_alloc_recvframe() failed! RX Drop!\n", __func__, __LINE__);
+ if (!precvframe)
goto _exit_recvbuf2recvframe;
- }
INIT_LIST_HEAD(&precvframe->list);
@@ -88,8 +77,6 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
pattrib = &precvframe->attrib;
if ((pattrib->crc_err) || (pattrib->icv_err)) {
- DBG_88E("%s: RX Warning! crc_err=%d icv_err=%d, skip!\n", __func__, pattrib->crc_err, pattrib->icv_err);
-
rtw_free_recvframe(precvframe, pfree_recv_queue);
goto _exit_recvbuf2recvframe;
}
@@ -100,9 +87,6 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
pkt_offset = RXDESC_SIZE + pattrib->drvinfo_sz + pattrib->shift_sz + pattrib->pkt_len;
if ((pattrib->pkt_len <= 0) || (pkt_offset > transfer_len)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("%s: pkt_len<=0\n", __func__));
- DBG_88E("%s()-%d: RX Warning!,pkt_len<=0 or pkt_offset> transfer_len\n", __func__, __LINE__);
rtw_free_recvframe(precvframe, pfree_recv_queue);
goto _exit_recvbuf2recvframe;
}
@@ -138,8 +122,6 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
skb_put_data(pkt_copy, (pbuf + pattrib->drvinfo_sz + RXDESC_SIZE), skb_len);
} else {
- DBG_88E("%s: alloc_skb fail , drop frag frame\n",
- __func__);
rtw_free_recvframe(precvframe, pfree_recv_queue);
goto _exit_recvbuf2recvframe;
}
@@ -159,11 +141,7 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
if (pattrib->physt)
update_recvframe_phyinfo_88e(precvframe, pphy_status);
- if (rtw_recv_entry(precvframe) != _SUCCESS) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("%s: rtw_recv_entry(precvframe) != _SUCCESS\n",
- __func__));
- }
+ rtw_recv_entry(precvframe);
} else if (pattrib->pkt_rpt_type == TX_REPORT1) {
/* CCX-TXRPT ack for xmit mgmt frames. */
handle_txrpt_ccx_88e(adapt, precvframe->pkt->data);
@@ -223,15 +201,11 @@ usbctrl_vendorreq(struct adapter *adapt, u16 value, void *pdata, u16 len, u8 req
int vendorreq_times = 0;
if ((adapt->bSurpriseRemoved) || (adapt->pwrctrlpriv.pnp_bstop_trx)) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s:(adapt->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n",
- __func__));
status = -EPERM;
goto exit;
}
if (len > MAX_VENDOR_REQ_CMD_SIZE) {
- DBG_88E("[%s] Buffer len error ,vendor request failed\n", __func__);
status = -EINVAL;
goto exit;
}
@@ -272,10 +246,6 @@ usbctrl_vendorreq(struct adapter *adapt, u16 value, void *pdata, u16 len, u8 req
if (reqtype == REALTEK_USB_VENQT_READ)
memcpy(pdata, pIo_buf, len);
} else { /* error cases */
- DBG_88E("reg 0x%x, usb %s %u fail, status:%d value=0x%x, vendorreq_times:%d\n",
- value, (reqtype == REALTEK_USB_VENQT_READ) ? "read" : "write",
- len, status, *(u32 *)pdata, vendorreq_times);
-
if (status < 0) {
if (status == -ESHUTDOWN || status == -ENODEV)
adapt->bSurpriseRemoved = true;
@@ -340,29 +310,15 @@ static void usb_read_port_complete(struct urb *purb)
struct adapter *adapt = (struct adapter *)precvbuf->adapter;
struct recv_priv *precvpriv = &adapt->recvpriv;
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("%s!!!\n", __func__));
-
if (adapt->bSurpriseRemoved || adapt->bDriverStopped || adapt->bReadPortCancel) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n",
- __func__, adapt->bDriverStopped,
- adapt->bSurpriseRemoved));
-
precvbuf->reuse = true;
- DBG_88E("%s() RX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) bReadPortCancel(%d)\n",
- __func__, adapt->bDriverStopped,
- adapt->bSurpriseRemoved, adapt->bReadPortCancel);
return;
}
if (purb->status == 0) { /* SUCCESS */
if ((purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n",
- __func__));
precvbuf->reuse = true;
usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
- DBG_88E("%s()-%d: RX Warning!\n", __func__, __LINE__);
} else {
skb_put(precvbuf->pskb, purb->actual_length);
skb_queue_tail(&precvpriv->rx_skb_queue, precvbuf->pskb);
@@ -375,11 +331,6 @@ static void usb_read_port_complete(struct urb *purb)
usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
}
} else {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s : purb->status(%d) != 0\n",
- __func__, purb->status));
-
- DBG_88E("###=> %s => urb status(%d)\n", __func__, purb->status);
skb_put(precvbuf->pskb, purb->actual_length);
precvbuf->pskb = NULL;
@@ -392,8 +343,6 @@ static void usb_read_port_complete(struct urb *purb)
fallthrough;
case -ENOENT:
adapt->bDriverStopped = true;
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s:bDriverStopped=true\n", __func__));
break;
case -EPROTO:
case -EOVERFLOW:
@@ -402,7 +351,6 @@ static void usb_read_port_complete(struct urb *purb)
usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
break;
case -EINPROGRESS:
- DBG_88E("ERROR: URB IS IN PROGRESS!\n");
break;
default:
break;
@@ -418,21 +366,14 @@ u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf)
struct usb_device *pusbd = pdvobj->pusbdev;
int err;
unsigned int pipe;
- u32 ret = _SUCCESS;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
adapter->pwrctrlpriv.pnp_bstop_trx) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s:(adapt->bDriverStopped ||adapt->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n",
- __func__));
return _FAIL;
}
- if (!precvbuf) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s:precvbuf==NULL\n", __func__));
+ if (!precvbuf)
return _FAIL;
- }
if (!precvbuf->reuse || !precvbuf->pskb) {
precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
@@ -443,11 +384,8 @@ u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf)
/* re-assign for linux based on skb */
if (!precvbuf->reuse || !precvbuf->pskb) {
precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ);
- if (!precvbuf->pskb) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n"));
- DBG_88E("#### %s() alloc_skb fail!#####\n", __func__);
+ if (!precvbuf->pskb)
return _FAIL;
- }
} else { /* reuse skb */
precvbuf->reuse = false;
}
@@ -464,16 +402,10 @@ u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf)
precvbuf);/* context is precvbuf */
err = usb_submit_urb(purb, GFP_ATOMIC);
- if ((err) && (err != (-EPERM))) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("cannot submit rx in-token(err=0x%.8x), URB_STATUS =0x%.8x",
- err, purb->status));
- DBG_88E("cannot submit rx in-token(err = 0x%08x),urb_status = %d\n",
- err, purb->status);
- ret = _FAIL;
- }
+ if (err)
+ return _FAIL;
- return ret;
+ return _SUCCESS;
}
void rtw_hal_inirp_deinit(struct adapter *padapter)
@@ -483,8 +415,6 @@ void rtw_hal_inirp_deinit(struct adapter *padapter)
precvbuf = padapter->recvpriv.precv_buf;
- DBG_88E("%s\n", __func__);
-
padapter->bReadPortCancel = true;
for (i = 0; i < NR_RECVBUFF; i++) {
@@ -547,49 +477,23 @@ static void usb_write_port_complete(struct urb *purb)
break;
}
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped ||
- padapter->bWritePortCancel) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
- __func__, padapter->bDriverStopped,
- padapter->bSurpriseRemoved));
- DBG_88E("%s(): TX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) bWritePortCancel(%d) pxmitbuf->ext_tag(%x)\n",
- __func__, padapter->bDriverStopped,
- padapter->bSurpriseRemoved, padapter->bReadPortCancel,
- pxmitbuf->ext_tag);
-
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped || padapter->bWritePortCancel)
goto check_completion;
- }
if (purb->status) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s : purb->status(%d) != 0\n",
- __func__, purb->status));
- DBG_88E("###=> %s status(%d)\n", __func__, purb->status);
if ((purb->status == -EPIPE) || (purb->status == -EPROTO)) {
sreset_set_wifi_error_status(padapter, USB_WRITE_PORT_FAIL);
} else if (purb->status == -EINPROGRESS) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s: EINPROGRESS\n", __func__));
goto check_completion;
} else if (purb->status == -ENOENT) {
- DBG_88E("%s: -ENOENT\n", __func__);
goto check_completion;
} else if (purb->status == -ECONNRESET) {
- DBG_88E("%s: -ECONNRESET\n", __func__);
goto check_completion;
} else if (purb->status == -ESHUTDOWN) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s: ESHUTDOWN\n", __func__));
padapter->bDriverStopped = true;
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s:bDriverStopped = true\n", __func__));
goto check_completion;
} else {
padapter->bSurpriseRemoved = true;
- DBG_88E("bSurpriseRemoved = true\n");
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s:bSurpriseRemoved = true\n", __func__));
goto check_completion;
}
}
@@ -616,13 +520,8 @@ u32 usb_write_port(struct adapter *padapter, u32 addr, u32 cnt, struct xmit_buf
struct xmit_frame *pxmitframe = (struct xmit_frame *)xmitbuf->priv_data;
struct usb_device *pusbd = pdvobj->pusbdev;
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("+%s\n", __func__));
-
if ((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) ||
(padapter->pwrctrlpriv.pnp_bstop_trx)) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s:( padapter->bDriverStopped ||padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n",
- __func__));
rtw_sctx_done_err(&xmitbuf->sctx, RTW_SCTX_DONE_TX_DENY);
goto exit;
}
@@ -670,11 +569,6 @@ u32 usb_write_port(struct adapter *padapter, u32 addr, u32 cnt, struct xmit_buf
status = usb_submit_urb(purb, GFP_ATOMIC);
if (status) {
rtw_sctx_done_err(&xmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
- DBG_88E("%s, status =%d\n", __func__, status);
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- ("%s(): usb_submit_urb, status =%x\n",
- __func__, status));
-
if (status == -ENODEV)
padapter->bDriverStopped = true;
@@ -683,8 +577,6 @@ u32 usb_write_port(struct adapter *padapter, u32 addr, u32 cnt, struct xmit_buf
ret = _SUCCESS;
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("-%s\n", __func__));
-
exit:
if (ret != _SUCCESS)
rtw_free_xmitbuf(pxmitpriv, xmitbuf);
@@ -696,8 +588,6 @@ void usb_write_port_cancel(struct adapter *padapter)
int i, j;
struct xmit_buf *pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmitbuf;
- DBG_88E("%s\n", __func__);
-
padapter->bWritePortCancel = true;
for (i = 0; i < NR_XMITBUFF; i++) {
@@ -726,7 +616,6 @@ void rtl8188eu_recv_tasklet(struct tasklet_struct *t)
while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
if ((adapt->bDriverStopped) || (adapt->bSurpriseRemoved)) {
- DBG_88E("recv_tasklet => bDriverStopped or bSurpriseRemoved\n");
dev_kfree_skb_any(pskb);
break;
}
@@ -746,12 +635,8 @@ void rtl8188eu_xmit_tasklet(struct tasklet_struct *t)
return;
while (1) {
- if ((adapt->bDriverStopped) ||
- (adapt->bSurpriseRemoved) ||
- (adapt->bWritePortCancel)) {
- DBG_88E("xmit_tasklet => bDriverStopped or bSurpriseRemoved or bWritePortCancel\n");
+ if ((adapt->bDriverStopped) || (adapt->bSurpriseRemoved) || (adapt->bWritePortCancel))
break;
- }
if (!rtl8188eu_xmitframe_complete(adapt, pxmitpriv))
break;
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index a9c42fb80583..1b5776ae8eba 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -26,10 +26,8 @@ int rtw_os_xmit_resource_alloc(struct xmit_buf *pxmitbuf, u32 alloc_sz)
for (i = 0; i < 8; i++) {
pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
- if (!pxmitbuf->pxmit_urb[i]) {
- DBG_88E("pxmitbuf->pxmit_urb[i]==NULL");
+ if (!pxmitbuf->pxmit_urb[i])
return _FAIL;
- }
}
return _SUCCESS;
}
@@ -118,13 +116,9 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = phead->next;
-
/* free sta asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = plist->next;
+ list_for_each(plist, phead) {
+ psta = list_entry(plist, struct sta_info, asoc_list);
/* avoid come from STA1 and send back STA1 */
if (!memcmp(psta->hwaddr, &skb->data[6], 6))
@@ -136,16 +130,12 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
memcpy(newskb->data, psta->hwaddr, 6);
res = rtw_xmit(padapter, &newskb);
if (res < 0) {
- DBG_88E("%s()-%d: rtw_xmit() return error!\n",
- __func__, __LINE__);
pxmitpriv->tx_drop++;
dev_kfree_skb_any(newskb);
} else {
pxmitpriv->tx_pkts++;
}
} else {
- DBG_88E("%s-%d: skb_copy() failed!\n",
- __func__, __LINE__);
pxmitpriv->tx_drop++;
spin_unlock_bh(&pstapriv->asoc_list_lock);
@@ -169,13 +159,8 @@ int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
s32 res = 0;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("+xmit_enry\n"));
-
- if (!rtw_if_up(padapter)) {
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_,
- ("%s: rtw_if_up fail\n", __func__));
+ if (!rtw_if_up(padapter))
goto drop_packet;
- }
rtw_check_xmit_resource(padapter, pkt);
@@ -194,16 +179,11 @@ int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
goto drop_packet;
pxmitpriv->tx_pkts++;
- RT_TRACE(_module_xmit_osdep_c_, _drv_info_,
- ("%s: tx_pkts=%d\n", __func__, (u32)pxmitpriv->tx_pkts));
goto exit;
drop_packet:
pxmitpriv->tx_drop++;
dev_kfree_skb_any(pkt);
- RT_TRACE(_module_xmit_osdep_c_, _drv_notice_,
- ("%s: drop, tx_drop=%d\n", __func__, (u32)pxmitpriv->tx_drop));
-
exit:
return NETDEV_TX_OK;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index 7876b389913a..52eeb56c5c76 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
@@ -46,7 +46,6 @@ void rtl92e_set_bandwidth(struct net_device *dev,
netdev_err(dev, "%s(): Unknown bandwidth: %#X\n",
__func__, Bandwidth);
break;
-
}
}
}
@@ -73,7 +72,6 @@ bool rtl92e_config_rf(struct net_device *dev)
pPhyReg = &priv->PHYRegDef[eRFPath];
-
switch (eRFPath) {
case RF90_PATH_A:
case RF90_PATH_C:
@@ -143,7 +141,6 @@ bool rtl92e_config_rf(struct net_device *dev)
__func__, eRFPath);
goto fail;
}
-
}
RT_TRACE(COMP_PHY, "PHY Initialization Success\n");
@@ -170,7 +167,6 @@ void rtl92e_set_cck_tx_power(struct net_device *dev, u8 powerlevel)
rtl92e_set_bb_reg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC);
}
-
void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -215,5 +211,4 @@ void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
(byte1 << 8) | byte0;
rtl92e_set_bb_reg(dev, RegOffset[index], 0x7f7f7f7f, writeVal);
}
-
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index c8506517cc8d..f75a12543781 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -185,14 +185,12 @@ void rtl92e_cam_restore(struct net_device *dev)
rtl92e_set_key(dev, 4, 0,
priv->rtllib->pairwise_key_type,
(u8 *)dev->dev_addr, 0,
- (u32 *)(&priv->rtllib->swcamtable[4].
- key_buf[0]));
+ (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
} else {
rtl92e_set_key(dev, 4, 0,
priv->rtllib->pairwise_key_type,
MacAddr, 0,
- (u32 *)(&priv->rtllib->swcamtable[4].
- key_buf[0]));
+ (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
}
} else if (priv->rtllib->pairwise_key_type == KEY_TYPE_CCMP) {
@@ -200,13 +198,11 @@ void rtl92e_cam_restore(struct net_device *dev)
rtl92e_set_key(dev, 4, 0,
priv->rtllib->pairwise_key_type,
(u8 *)dev->dev_addr, 0,
- (u32 *)(&priv->rtllib->swcamtable[4].
- key_buf[0]));
+ (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
} else {
rtl92e_set_key(dev, 4, 0,
priv->rtllib->pairwise_key_type, MacAddr,
- 0, (u32 *)(&priv->rtllib->swcamtable[4].
- key_buf[0]));
+ 0, (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 736f1a824cd2..698552a92100 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -569,8 +569,8 @@ void rtl92e_writel(struct net_device *dev, int x, u32 y);
void force_pci_posting(struct net_device *dev);
-void rtl92e_rx_enable(struct net_device *);
-void rtl92e_tx_enable(struct net_device *);
+void rtl92e_rx_enable(struct net_device *dev);
+void rtl92e_tx_enable(struct net_device *dev);
void rtl92e_hw_sleep_wq(void *data);
void rtl92e_commit(struct net_device *dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index c53aa2d305ca..756d8db51937 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -1733,7 +1733,7 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
priv->bcurrent_turbo_EDCA = true;
}
} else {
- if (priv->bcurrent_turbo_EDCA) {
+ if (priv->bcurrent_turbo_EDCA) {
u8 tmp = AC0_BE;
priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM,
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index 48d28c7d870b..3b8efaf9b88c 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -276,7 +276,7 @@ void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
struct rt_hi_throughput *pHT = ieee->pHTInfo;
struct ht_capab_ele *pCapELE = NULL;
- if ((posHTCap == NULL) || (pHT == NULL)) {
+ if (!posHTCap || !pHT) {
netdev_warn(ieee->dev,
"%s(): posHTCap and pHTInfo are null\n", __func__);
return;
@@ -357,7 +357,7 @@ void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
struct rt_hi_throughput *pHT = ieee->pHTInfo;
struct ht_info_ele *pHTInfoEle = (struct ht_info_ele *)posHTInfo;
- if ((posHTInfo == NULL) || (pHTInfoEle == NULL)) {
+ if (!posHTInfo || !pHTInfoEle) {
netdev_warn(ieee->dev,
"%s(): posHTInfo and pHTInfoEle are null\n",
__func__);
@@ -397,7 +397,7 @@ void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
void HTConstructRT2RTAggElement(struct rtllib_device *ieee, u8 *posRT2RTAgg,
u8 *len)
{
- if (posRT2RTAgg == NULL) {
+ if (!posRT2RTAgg) {
netdev_warn(ieee->dev, "%s(): posRT2RTAgg is null\n", __func__);
return;
}
@@ -420,7 +420,7 @@ static u8 HT_PickMCSRate(struct rtllib_device *ieee, u8 *pOperateMCS)
{
u8 i;
- if (pOperateMCS == NULL) {
+ if (!pOperateMCS) {
netdev_warn(ieee->dev, "%s(): pOperateMCS is null\n", __func__);
return false;
}
@@ -453,7 +453,7 @@ u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
u8 mcsRate = 0;
u8 availableMcsRate[16];
- if (pMCSRateSet == NULL || pMCSFilter == NULL) {
+ if (!pMCSRateSet || !pMCSFilter) {
netdev_warn(ieee->dev,
"%s(): pMCSRateSet and pMCSFilter are null\n",
__func__);
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 4cabaf21c1ca..c6f8b772335c 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -1979,8 +1979,6 @@ void rtllib_softmac_new_net(struct rtllib_device *ieee,
void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn);
void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee);
-void rtllib_stop_send_beacons(struct rtllib_device *ieee);
-void notify_wx_assoc_event(struct rtllib_device *ieee);
void rtllib_start_ibss(struct rtllib_device *ieee);
void rtllib_softmac_init(struct rtllib_device *ieee);
void rtllib_softmac_free(struct rtllib_device *ieee);
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 6e48b31a9afc..c2209c033838 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -2285,8 +2285,7 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
length -= sizeof(*info_element) + info_element->len;
info_element =
- (struct rtllib_info_element *)&info_element->
- data[info_element->len];
+ (struct rtllib_info_element *)&info_element->data[info_element->len];
}
if (!network->atheros_cap_exist && !network->broadcom_cap_exist &&
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index f9a51f3620d2..25b3d3950a3c 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -730,7 +730,6 @@ EXPORT_SYMBOL(rtllib_act_scanning);
/* called with ieee->lock held */
static void rtllib_start_scan(struct rtllib_device *ieee)
{
- RT_TRACE(COMP_DBG, "===>%s()\n", __func__);
if (ieee->rtllib_ips_leave_wq != NULL)
ieee->rtllib_ips_leave_wq(ieee->dev);
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 89ec72b1895a..37715afb0210 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -881,7 +881,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
success:
if (txb) {
- struct cb_desc *tcb_desc = (struct cb_desc *)
+ tcb_desc = (struct cb_desc *)
(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->bTxEnableFwCalcDur = 1;
tcb_desc->priority = skb->priority;
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index ab1b8217c4e0..0d67d5880377 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -293,8 +293,6 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
int i, key, key_provided, len;
struct lib80211_crypt_data **crypt;
- netdev_dbg(ieee->dev, "%s()\n", __func__);
-
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
if (key > NUM_WEP_KEYS)
@@ -463,8 +461,6 @@ int rtllib_wx_get_encode(struct rtllib_device *ieee,
int len, key;
struct lib80211_crypt_data *crypt;
- netdev_dbg(ieee->dev, "%s()\n", __func__);
-
if (ieee->iw_mode == IW_MODE_MONITOR)
return -1;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 39f4ddd86796..7903c777a417 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -2249,10 +2249,8 @@ void ieee80211_wake_queue(struct ieee80211_device *ieee);
void ieee80211_stop_queue(struct ieee80211_device *ieee);
struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
-void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
struct iw_point *p);
-void notify_wx_assoc_event(struct ieee80211_device *ieee);
void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 25ea8e1b6b65..ab885353f668 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -33,9 +33,9 @@ short ieee80211_is_shortslot(const struct ieee80211_network *net)
}
EXPORT_SYMBOL(ieee80211_is_shortslot);
-/* returns the total length needed for pleacing the RATE MFIE
+/* returns the total length needed for placing the RATE MFIE
* tag and the EXTENDED RATE MFIE tag if needed.
- * It encludes two bytes per tag for the tag itself and its len
+ * It includes two bytes per tag for the tag itself and its len
*/
static unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
{
@@ -50,7 +50,7 @@ static unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
return rate_len;
}
-/* pleace the MFIE rate, tag to the memory (double) poined.
+/* place the MFIE rate, tag to the memory (double) pointer.
* Then it updates the pointer so that
* it points after the new MFIE tag added.
*/
@@ -436,7 +436,7 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
* So we switch to IEEE80211_LINKED_SCANNING to remember
* that we are still logically linked (not interested in
* new network events, despite for updating the net list,
- * but we are temporarly 'unlinked' as the driver shall
+ * but we are temporarily 'unlinked' as the driver shall
* not filter RX frames and the channel is changing.
* So the only situation in witch are interested is to check
* if the state become LINKED because of the #1 situation
@@ -1162,7 +1162,7 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
ieee->associate_seq++;
- /* don't scan, and avoid to have the RX path possibily
+ /* don't scan, and avoid having the RX path possibly
* try again to associate. Even do not react to AUTH or
* ASSOC response. Just wait for the retry wq to be scheduled.
* Here we will check if there are good nets to associate
@@ -1373,7 +1373,7 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
/* if the user set the AP check if match.
* if the network does not broadcast essid we check the user supplyed ANY essid
* if the network does broadcast and the user does not set essid it is OK
- * if the network does broadcast and the user did set essid chech if essid match
+ * if the network does broadcast and the user did set essid check if essid match
*/
if ((apset && apmatch &&
((ssidset && ssidbroad && ssidmatch) || (ssidbroad && !ssidset) || (!ssidbroad && ssidset))) ||
@@ -1911,8 +1911,11 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED &&
ieee->iw_mode == IW_MODE_INFRA) {
- struct ieee80211_network network_resp;
- struct ieee80211_network *network = &network_resp;
+ struct ieee80211_network *network;
+
+ network = kzalloc(sizeof(*network), GFP_KERNEL);
+ if (!network)
+ return -ENOMEM;
errcode = assoc_parse(ieee, skb, &aid);
if (!errcode) {
@@ -1923,7 +1926,6 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
/* Let the register setting defaultly with Legacy station */
if (ieee->qos_support) {
assoc_resp = (struct ieee80211_assoc_response_frame *)skb->data;
- memset(network, 0, sizeof(*network));
if (ieee80211_parse_info_param(ieee, assoc_resp->info_element,\
rx_stats->len - sizeof(*assoc_resp), \
network, rx_stats)) {
@@ -1949,6 +1951,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
else
ieee80211_associate_abort(ieee);
}
+ kfree(network);
}
break;
@@ -2240,9 +2243,9 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
* time to scan all the chans..) or we have just run up the iface
* after setting ad-hoc mode. So we have to give another try..
* Here, in ibss mode, should be safe to do this without extra care
- * (in bss mode we had to make sure no-one tryed to associate when
+ * (in bss mode we had to make sure no-one tried to associate when
* we had just checked the ieee->state and we was going to start the
- * scan) beacause in ibss mode the ieee80211_new_net function, when
+ * scan) because in ibss mode the ieee80211_new_net function, when
* finds a good net, just set the ieee->state to IEEE80211_LINKED,
* so, at worst, we waste a bit of time to initiate an unneeded syncro
* scan, that will stop at the first round because it sees the state
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 96e6aaf859ec..8602e3a6c837 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -795,7 +795,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
success:
//WB add to fill data tcb_desc here. only first fragment is considered, need to change, and you may remove to other place.
if (txb) {
- struct cb_desc *tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
+ tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->bTxEnableFwCalcDur = 1;
if (is_multicast_ether_addr(header.addr1))
tcb_desc->bMulticast = 1;
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index f48186a89fa1..db26edeccea6 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -902,7 +902,6 @@ static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
int rate)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- int ret;
unsigned long flags;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
@@ -915,7 +914,7 @@ static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
*(struct net_device **)(skb->cb) = dev;
tcb_desc->bTxEnableFwCalcDur = 1;
skb_push(skb, priv->ieee80211->tx_headroom);
- ret = rtl8192_tx(dev, skb);
+ rtl8192_tx(dev, skb);
spin_unlock_irqrestore(&priv->tx_lock, flags);
}
@@ -2304,8 +2303,6 @@ static int rtl8192_read_eeprom_info(struct net_device *dev)
/* set channelplan from eeprom */
priv->ChannelPlan = priv->eeprom_ChannelPlan;
if (bLoad_From_EEPOM) {
- int i;
-
for (i = 0; i < 6; i += 2) {
ret = eprom_read(dev, (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
if (ret < 0)
@@ -2369,8 +2366,6 @@ static int rtl8192_read_eeprom_info(struct net_device *dev)
priv->EEPROM_Def_Ver = 1;
RT_TRACE(COMP_EPROM, "EEPROM_DEF_VER:%d\n", priv->EEPROM_Def_Ver);
if (priv->EEPROM_Def_Ver == 0) { /* old eeprom definition */
- int i;
-
if (bLoad_From_EEPOM) {
ret = eprom_read(dev, (EEPROM_TX_PW_INDEX_CCK >> 1));
if (ret < 0)
@@ -2972,12 +2967,10 @@ static RESET_TYPE RxCheckStuck(struct net_device *dev)
return RESET_TYPE_NORESET;
}
-/**
+/*
* This function is called by Checkforhang to check whether we should
* ask OS to reset driver
*
- * \param pAdapter The adapter context for this miniport
- *
* Note:NIC with USB interface sholud not call this function because we
* cannot scan descriptor to judge whether there is tx stuck.
* Note: This function may be required to be rewrite for Vista OS.
@@ -3697,7 +3690,7 @@ static u8 HwRateToMRate90(bool bIsHT, u8 rate)
return ret_rate;
}
-/**
+/*
* Function: UpdateRxPktTimeStamp
* Overview: Record the TSF time stamp when receiving a packet
*
@@ -4297,7 +4290,7 @@ static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
rtl8192_record_rxdesc_forlateruse(pstats, &previous_stats);
}
-/**
+/*
* Function: UpdateReceivedRateHistogramStatistics
* Overview: Record the received data rate
*
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index bac402b40121..725bf5ca9e34 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -2876,7 +2876,8 @@ void dm_check_fsync(struct net_device *dev)
* When Who Remark
* 05/29/2008 amy Create Version 0 porting from windows code.
*
- *---------------------------------------------------------------------------*/
+ *---------------------------------------------------------------------------
+ */
void dm_shadow_init(struct net_device *dev)
{
u8 page;
@@ -2915,7 +2916,8 @@ void dm_shadow_init(struct net_device *dev)
* When Who Remark
* 03/06/2008 Jacken Create Version 0.
*
- *---------------------------------------------------------------------------*/
+ *---------------------------------------------------------------------------
+ */
static void dm_init_dynamic_txpower(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
diff --git a/drivers/staging/rtl8192u/r8192U_hw.h b/drivers/staging/rtl8192u/r8192U_hw.h
index 8d3a592f1c35..217e77766442 100644
--- a/drivers/staging/rtl8192u/r8192U_hw.h
+++ b/drivers/staging/rtl8192u/r8192U_hw.h
@@ -88,7 +88,7 @@ enum _RTL8192Usb_HW {
#define RX_FIFO_THRESHOLD_MASK (BIT(13) | BIT(14) | BIT(15))
#define RX_FIFO_THRESHOLD_SHIFT 13
#define RX_FIFO_THRESHOLD_NONE 7
-#define MAX_RX_DMA_MASK (BIT(8) | BIT(9) | BIT(10))
+#define MAX_RX_DMA_MASK (BIT(8) | BIT(9) | BIT(10))
#define RCR_MXDMA_OFFSET 8
#define RCR_FIFO_OFFSET 13
#define RCR_ONLYERLPKT BIT(31) // Early Receiving based on Packet Size.
@@ -159,7 +159,7 @@ enum _RTL8192Usb_HW {
#define BW_OPMODE_20MHZ BIT(2)
BW_OPMODE = 0x300, // Bandwidth operation mode
MSR = 0x303, // Media Status register
-#define MSR_LINK_MASK (BIT(0)|BIT(1))
+#define MSR_LINK_MASK (BIT(0) | BIT(1))
#define MSR_LINK_MANAGED 2
#define MSR_LINK_NONE 0
#define MSR_LINK_SHIFT 0
@@ -221,14 +221,13 @@ enum _RTL8192Usb_HW {
#define RATR_MCS14 0x04000000
#define RATR_MCS15 0x08000000
// ALL CCK Rate
-#define RATE_ALL_CCK RATR_1M|RATR_2M|RATR_55M|RATR_11M
-#define RATE_ALL_OFDM_AG RATR_6M|RATR_9M|RATR_12M|RATR_18M|RATR_24M\
- |RATR_36M|RATR_48M|RATR_54M
-#define RATE_ALL_OFDM_1SS RATR_MCS0|RATR_MCS1|RATR_MCS2|RATR_MCS3 | \
- RATR_MCS4|RATR_MCS5|RATR_MCS6|RATR_MCS7
-#define RATE_ALL_OFDM_2SS RATR_MCS8|RATR_MCS9 |RATR_MCS10|RATR_MCS11| \
- RATR_MCS12|RATR_MCS13|RATR_MCS14|RATR_MCS15
-
+#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\
+ RATR_24M | RATR_36M | RATR_48M | RATR_54M)
+#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | RATR_MCS3 |\
+ RATR_MCS4 | RATR_MCS5 | RATR_MCS6 | RATR_MCS7)
+#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | RATR_MCS11 |\
+ RATR_MCS12 | RATR_MCS13 | RATR_MCS14 | RATR_MCS15)
EPROM_CMD = 0xfe58,
#define Cmd9346CR_9356SEL BIT(4)
#define EPROM_CMD_OPERATING_MODE_SHIFT 6
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 4cece40a92f6..30a320422358 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -250,46 +250,6 @@ static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
}
/*-----------------------------------------------------------------------------
- * Function: cmpk_handle_query_config_rx()
- *
- * Overview: The function is responsible for extract the message from
- * firmware. It will contain dedicated info in
- * ws-06-0063-rtl8190-command-packet-specification. Please
- * refer to chapter "Beacon State Element".
- *
- * Input: u8 *pmsg - Message Pointer of the command packet.
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------
- */
-static void cmpk_handle_query_config_rx(struct net_device *dev, u8 *pmsg)
-{
- struct cmpk_query_cfg rx_query_cfg;
-
- /* 1. Extract TX feedback info from RFD to temp structure buffer. */
- /* It seems that FW use big endian(MIPS) and DRV use little endian in
- * windows OS. So we have to read the content byte by byte or transfer
- * endian type before copy the message copy.
- */
- rx_query_cfg.cfg_action = (pmsg[4] & 0x80) >> 7;
- rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5;
- rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3;
- rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0;
- rx_query_cfg.cfg_offset = pmsg[7];
- rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) |
- (pmsg[10] << 8) | (pmsg[11] << 0);
- rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
- (pmsg[14] << 8) | (pmsg[15] << 0);
-}
-
-/*-----------------------------------------------------------------------------
* Function: cmpk_count_tx_status()
*
* Overview: Count aggregated tx status from firmwar of one type rx command
@@ -514,7 +474,6 @@ u32 cmpk_message_handle_rx(struct net_device *dev,
break;
case BOTH_QUERY_CONFIG:
- cmpk_handle_query_config_rx(dev, pcmd_buff);
cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE;
break;
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index eef751d2b12e..37b82553412e 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -1185,14 +1185,32 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
u8 *stage, u8 *step, u32 *delay)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- struct sw_chnl_cmd PreCommonCmd[MAX_PRECMD_CNT];
+ struct sw_chnl_cmd *PreCommonCmd;
u32 PreCommonCmdCnt;
- struct sw_chnl_cmd PostCommonCmd[MAX_POSTCMD_CNT];
+ struct sw_chnl_cmd *PostCommonCmd;
u32 PostCommonCmdCnt;
- struct sw_chnl_cmd RfDependCmd[MAX_RFDEPENDCMD_CNT];
+ struct sw_chnl_cmd *RfDependCmd;
u32 RfDependCmdCnt;
struct sw_chnl_cmd *CurrentCmd = NULL;
u8 e_rfpath;
+ bool ret;
+
+ PreCommonCmd = kzalloc(sizeof(*PreCommonCmd) * MAX_PRECMD_CNT, GFP_KERNEL);
+ if (!PreCommonCmd)
+ return false;
+
+ PostCommonCmd = kzalloc(sizeof(*PostCommonCmd) * MAX_POSTCMD_CNT, GFP_KERNEL);
+ if (!PostCommonCmd) {
+ kfree(PreCommonCmd);
+ return false;
+ }
+
+ RfDependCmd = kzalloc(sizeof(*RfDependCmd) * MAX_RFDEPENDCMD_CNT, GFP_KERNEL);
+ if (!RfDependCmd) {
+ kfree(PreCommonCmd);
+ kfree(PostCommonCmd);
+ return false;
+ }
RT_TRACE(COMP_CH, "%s() stage: %d, step: %d, channel: %d\n",
__func__, *stage, *step, channel);
@@ -1201,7 +1219,8 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
/* return true to tell upper caller function this channel
* setting is finished! Or it will in while loop.
*/
- return true;
+ ret = true;
+ goto out;
}
/* FIXME: need to check whether channel is legal or not here */
@@ -1227,7 +1246,8 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
RT_TRACE(COMP_ERR,
"illegal channel for Zebra 8225: %d\n",
channel);
- return true;
+ ret = true;
+ goto out;
}
rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++,
MAX_RFDEPENDCMD_CNT,
@@ -1246,7 +1266,8 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
RT_TRACE(COMP_ERR,
"illegal channel for Zebra 8256: %d\n",
channel);
- return true;
+ ret = true;
+ goto out;
}
rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++,
MAX_RFDEPENDCMD_CNT,
@@ -1262,7 +1283,8 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
default:
RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip);
- return true;
+ ret = true;
+ goto out;
}
do {
@@ -1281,7 +1303,8 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
if (CurrentCmd->cmd_id == CMD_ID_END) {
if ((*stage) == 2) {
(*delay) = CurrentCmd->ms_delay;
- return true;
+ ret = true;
+ goto out;
}
(*stage)++;
(*step) = 0;
@@ -1324,7 +1347,14 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
(*delay) = CurrentCmd->ms_delay;
(*step)++;
- return false;
+ ret = false;
+
+out:
+ kfree(PreCommonCmd);
+ kfree(PostCommonCmd);
+ kfree(RfDependCmd);
+
+ return ret;
}
/******************************************************************************
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 715f1fe8b472..4eff3fdecdb8 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -29,18 +29,31 @@
#define FWBUFF_ALIGN_SZ 512
#define MAX_DUMP_FWSZ (48 * 1024)
+static void rtl871x_load_fw_fail(struct _adapter *adapter)
+{
+ struct usb_device *udev = adapter->dvobjpriv.pusbdev;
+ struct device *dev = &udev->dev;
+ struct device *parent = dev->parent;
+
+ complete(&adapter->rtl8712_fw_ready);
+
+ dev_err(&udev->dev, "r8712u: Firmware request failed\n");
+
+ if (parent)
+ device_lock(parent);
+
+ device_release_driver(dev);
+
+ if (parent)
+ device_unlock(parent);
+}
+
static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
{
struct _adapter *adapter = context;
if (!firmware) {
- struct usb_device *udev = adapter->dvobjpriv.pusbdev;
- struct usb_interface *usb_intf = adapter->pusb_intf;
-
- dev_err(&udev->dev, "r8712u: Firmware request failed\n");
- usb_put_dev(udev);
- usb_set_intfdata(usb_intf, NULL);
- complete(&adapter->rtl8712_fw_ready);
+ rtl871x_load_fw_fail(adapter);
return;
}
adapter->fw = firmware;
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 0c3ae8495afb..2214aca09730 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -328,8 +328,6 @@ int r8712_init_drv_sw(struct _adapter *padapter)
void r8712_free_drv_sw(struct _adapter *padapter)
{
- struct net_device *pnetdev = padapter->pnetdev;
-
r8712_free_cmd_priv(&padapter->cmdpriv);
r8712_free_evt_priv(&padapter->evtpriv);
r8712_DeInitSwLeds(padapter);
@@ -339,8 +337,6 @@ void r8712_free_drv_sw(struct _adapter *padapter)
_r8712_free_sta_priv(&padapter->stapriv);
_r8712_free_recv_priv(&padapter->recvpriv);
mp871xdeinit(padapter);
- if (pnetdev)
- free_netdev(pnetdev);
}
static void enable_video_mode(struct _adapter *padapter, int cbw40_value)
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index ff3cb09c57a6..e9294e1ed06e 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -55,7 +55,7 @@ static void check_hw_pbc(struct _adapter *padapter)
/* Here we only set bPbcPressed to true
* After trigger PBC, the variable will be set to false
*/
- DBG_8712("CheckPbcGPIO - PBC is pressed !!!!\n");
+ netdev_dbg(padapter->pnetdev, "CheckPbcGPIO - PBC is pressed !!!!\n");
/* 0 is the default value and it means the application monitors
* the HW PBC doesn't provide its pid to driver.
*/
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index 5901026949f2..d5fc9026b036 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -1820,3 +1820,11 @@ void LedControl871x(struct _adapter *padapter, enum LED_CTL_MODE LedAction)
break;
}
}
+
+void r8712_flush_led_works(struct _adapter *padapter)
+{
+ struct led_priv *pledpriv = &padapter->ledpriv;
+
+ flush_work(&pledpriv->SwLed0.BlinkWorkItem);
+ flush_work(&pledpriv->SwLed1.BlinkWorkItem);
+}
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index db5c7a487ab3..0ffb30f1af7e 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -1039,8 +1039,9 @@ static void recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
skb_reserve(pkt_copy, 4 - ((addr_t)(pkt_copy->data) % 4));
skb_reserve(pkt_copy, shift_sz);
memcpy(pkt_copy->data, pbuf, tmp_len);
- precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_data =
- precvframe->u.hdr.rx_tail = pkt_copy->data;
+ precvframe->u.hdr.rx_head = pkt_copy->data;
+ precvframe->u.hdr.rx_data = pkt_copy->data;
+ precvframe->u.hdr.rx_tail = pkt_copy->data;
precvframe->u.hdr.rx_end = pkt_copy->data + alloc_sz;
recvframe_put(precvframe, tmp_len);
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index 116cb812dcb9..84a22eba7ebf 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -482,9 +482,9 @@ static void update_txdesc(struct xmit_frame *pxmitframe, uint *pmem, int sz)
ptxdesc->txdw1 |= cpu_to_le32((0x01 << 22) &
0x00c00000);
/*KEY_ID when WEP is used;*/
- ptxdesc->txdw1 |= cpu_to_le32((psecuritypriv->
- PrivacyKeyIndex << 17) &
- 0x00060000);
+ ptxdesc->txdw1 |=
+ cpu_to_le32((psecuritypriv->PrivacyKeyIndex << 17) &
+ 0x00060000);
break;
case _TKIP_:
case _TKIP_WTMIC_:
diff --git a/drivers/staging/rtl8712/rtl871x_debug.h b/drivers/staging/rtl8712/rtl871x_debug.h
index 57f2a38cb71c..69c631af2a2a 100644
--- a/drivers/staging/rtl8712/rtl871x_debug.h
+++ b/drivers/staging/rtl8712/rtl871x_debug.h
@@ -127,27 +127,4 @@
#undef _MODULE_DEFINE_
#endif
-#define _dbgdump printk
-
-#define MSG_8712(x, ...) {}
-
-#define DBG_8712(x, ...) {}
-
-#define WRN_8712(x, ...) {}
-
-#define ERR_8712(x, ...) {}
-
-#undef MSG_8712
-#define MSG_8712 _dbgdump
-
-#undef DBG_8712
-#define DBG_8712 _dbgdump
-
-#undef WRN_8712
-#define WRN_8712 _dbgdump
-
-#undef ERR_8712
-#define ERR_8712 _dbgdump
-
#endif /*__RTL871X_DEBUG_H__*/
-
diff --git a/drivers/staging/rtl8712/rtl871x_led.h b/drivers/staging/rtl8712/rtl871x_led.h
index ee19c873cf01..2f0768132ad8 100644
--- a/drivers/staging/rtl8712/rtl871x_led.h
+++ b/drivers/staging/rtl8712/rtl871x_led.h
@@ -112,6 +112,7 @@ struct led_priv {
void r8712_InitSwLeds(struct _adapter *padapter);
void r8712_DeInitSwLeds(struct _adapter *padapter);
void LedControl871x(struct _adapter *padapter, enum LED_CTL_MODE LedAction);
+void r8712_flush_led_works(struct _adapter *padapter);
#endif
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index ba4a71e91ae0..92b7c9c07df6 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -139,10 +139,8 @@ static struct wlan_network *r8712_find_network(struct __queue *scanned_queue,
return NULL;
spin_lock_irqsave(&scanned_queue->lock, irqL);
phead = &scanned_queue->queue;
- plist = phead->next;
- while (plist != phead) {
- pnetwork = container_of(plist, struct wlan_network, list);
- plist = plist->next;
+ list_for_each(plist, phead) {
+ pnetwork = list_entry(plist, struct wlan_network, list);
if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN))
break;
}
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
index 31414a960c9e..26fa09b45c90 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
@@ -149,26 +149,30 @@ static int mp_start_test(struct _adapter *padapter)
struct mp_priv *pmppriv = &padapter->mppriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *tgt_network = &pmlmepriv->cur_network;
- struct wlan_bssid_ex bssid;
+ struct wlan_bssid_ex *bssid;
struct sta_info *psta;
unsigned long length;
unsigned long irqL;
int res = 0;
+ bssid = kzalloc(sizeof(*bssid), GFP_KERNEL);
+ if (!bssid)
+ return -ENOMEM;
+
/* 3 1. initialize a new struct wlan_bssid_ex */
- memcpy(bssid.MacAddress, pmppriv->network_macaddr, ETH_ALEN);
- bssid.Ssid.SsidLength = 16;
- memcpy(bssid.Ssid.Ssid, (unsigned char *)"mp_pseudo_adhoc",
- bssid.Ssid.SsidLength);
- bssid.InfrastructureMode = Ndis802_11IBSS;
- bssid.NetworkTypeInUse = Ndis802_11DS;
- bssid.IELength = 0;
- length = r8712_get_wlan_bssid_ex_sz(&bssid);
+ memcpy(bssid->MacAddress, pmppriv->network_macaddr, ETH_ALEN);
+ bssid->Ssid.SsidLength = 16;
+ memcpy(bssid->Ssid.Ssid, (unsigned char *)"mp_pseudo_adhoc",
+ bssid->Ssid.SsidLength);
+ bssid->InfrastructureMode = Ndis802_11IBSS;
+ bssid->NetworkTypeInUse = Ndis802_11DS;
+ bssid->IELength = 0;
+ length = r8712_get_wlan_bssid_ex_sz(bssid);
if (length % 4) {
/*round up to multiple of 4 bytes.*/
- bssid.Length = ((length >> 2) + 1) << 2;
+ bssid->Length = ((length >> 2) + 1) << 2;
} else {
- bssid.Length = length;
+ bssid->Length = length;
}
spin_lock_irqsave(&pmlmepriv->lock, irqL);
if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
@@ -185,7 +189,7 @@ static int mp_start_test(struct _adapter *padapter)
tgt_network->network.MacAddress);
if (psta)
r8712_free_stainfo(padapter, psta);
- psta = r8712_alloc_stainfo(&padapter->stapriv, bssid.MacAddress);
+ psta = r8712_alloc_stainfo(&padapter->stapriv, bssid->MacAddress);
if (!psta) {
res = -ENOMEM;
goto end_of_mp_start_test;
@@ -193,13 +197,14 @@ static int mp_start_test(struct _adapter *padapter)
/* 3 3. join pseudo AdHoc */
tgt_network->join_res = 1;
tgt_network->aid = psta->aid = 1;
- memcpy(&tgt_network->network, &bssid, length);
+ memcpy(&tgt_network->network, bssid, length);
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
r8712_os_indicate_connect(padapter);
/* Set to LINKED STATE for MP TRX Testing */
set_fwstate(pmlmepriv, _FW_LINKED);
end_of_mp_start_test:
spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
+ kfree(bssid);
return res;
}
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
index 59fa6664d868..98204493a04c 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
@@ -135,135 +135,8 @@ uint oid_rt_get_power_mode_hdl(
struct oid_par_priv *poid_par_priv);
#ifdef _RTL871X_MP_IOCTL_C_ /* CAUTION!!! */
/* This ifdef _MUST_ be left in!! */
-static const struct oid_obj_priv oid_rtl_seg_81_80_00[] = {
- /* 0x00 OID_RT_PRO_RESET_DUT */
- {1, oid_null_function},
- /* 0x01 */
- {1, oid_rt_pro_set_data_rate_hdl},
- /* 0x02 */
- {1, oid_rt_pro_start_test_hdl},
- /* 0x03 */
- {1, oid_rt_pro_stop_test_hdl},
- /* 0x04 OID_RT_PRO_SET_PREAMBLE */
- {1, oid_null_function},
- /* 0x05 OID_RT_PRO_SET_SCRAMBLER */
- {1, oid_null_function},
- /* 0x06 OID_RT_PRO_SET_FILTER_BB */
- {1, oid_null_function},
- /* 0x07 OID_RT_PRO_SET_MANUAL_DIVERS_BB */
- {1, oid_null_function},
- /* 0x08 */
- {1, oid_rt_pro_set_channel_direct_call_hdl},
- /* 0x09 OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL */
- {1, oid_null_function},
- /* 0x0A OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL */
- {1, oid_null_function},
- /* 0x0B OID_RT_PRO_SET_TX_CONTINUOUS_DIRECT_CALL */
- {1, oid_rt_pro_set_continuous_tx_hdl},
- /* 0x0C OID_RT_PRO_SET_SINGLE_CARRIER_TX_CONTINUOUS */
- {1, oid_rt_pro_set_single_carrier_tx_hdl},
- /* 0x0D OID_RT_PRO_SET_TX_ANTENNA_BB */
- {1, oid_null_function},
- /* 0x0E */
- {1, oid_rt_pro_set_antenna_bb_hdl},
- /* 0x0F OID_RT_PRO_SET_CR_SCRAMBLER */
- {1, oid_null_function},
- /* 0x10 OID_RT_PRO_SET_CR_NEW_FILTER */
- {1, oid_null_function},
- /* 0x11 OID_RT_PRO_SET_TX_POWER_CONTROL */
- {1, oid_rt_pro_set_tx_power_control_hdl},
- /* 0x12 OID_RT_PRO_SET_CR_TX_CONFIG */
- {1, oid_null_function},
- /* 0x13 OID_RT_PRO_GET_TX_POWER_CONTROL */
- {1, oid_null_function},
- /* 0x14 OID_RT_PRO_GET_CR_SIGNAL_QUALITY */
- {1, oid_null_function},
- /* 0x15 OID_RT_PRO_SET_CR_SETPOINT */
- {1, oid_null_function},
- /* 0x16 OID_RT_PRO_SET_INTEGRATOR */
- {1, oid_null_function},
- /* 0x17 OID_RT_PRO_SET_SIGNAL_QUALITY */
- {1, oid_null_function},
- /* 0x18 OID_RT_PRO_GET_INTEGRATOR */
- {1, oid_null_function},
- /* 0x19 OID_RT_PRO_GET_SIGNAL_QUALITY */
- {1, oid_null_function},
- /* 0x1A OID_RT_PRO_QUERY_EEPROM_TYPE */
- {1, oid_null_function},
- /* 0x1B OID_RT_PRO_WRITE_MAC_ADDRESS */
- {1, oid_null_function},
- /* 0x1C OID_RT_PRO_READ_MAC_ADDRESS */
- {1, oid_null_function},
- /* 0x1D OID_RT_PRO_WRITE_CIS_DATA */
- {1, oid_null_function},
- /* 0x1E OID_RT_PRO_READ_CIS_DATA */
- {1, oid_null_function},
- /* 0x1F OID_RT_PRO_WRITE_POWER_CONTROL */
- {1, oid_null_function}
-};
-
-static const struct oid_obj_priv oid_rtl_seg_81_80_20[] = {
- /* 0x20 OID_RT_PRO_READ_POWER_CONTROL */
- {1, oid_null_function},
- /* 0x21 OID_RT_PRO_WRITE_EEPROM */
- {1, oid_null_function},
- /* 0x22 OID_RT_PRO_READ_EEPROM */
- {1, oid_null_function},
- /* 0x23 */
- {1, oid_rt_pro_reset_tx_packet_sent_hdl},
- /* 0x24 */
- {1, oid_rt_pro_query_tx_packet_sent_hdl},
- /* 0x25 */
- {1, oid_rt_pro_reset_rx_packet_received_hdl},
- /* 0x26 */
- {1, oid_rt_pro_query_rx_packet_received_hdl},
- /* 0x27 */
- {1, oid_rt_pro_query_rx_packet_crc32_error_hdl},
- /* 0x28 OID_RT_PRO_QUERY_CURRENT_ADDRESS */
- {1, oid_null_function},
- /* 0x29 OID_RT_PRO_QUERY_PERMANENT_ADDRESS */
- {1, oid_null_function},
- /* 0x2A OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS */
- {1, oid_null_function},
- /* 0x2B OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX */
- {1, oid_rt_pro_set_carrier_suppression_tx_hdl},
- /* 0x2C OID_RT_PRO_RECEIVE_PACKET */
- {1, oid_null_function},
- /* 0x2D OID_RT_PRO_WRITE_EEPROM_BYTE */
- {1, oid_null_function},
- /* 0x2E OID_RT_PRO_READ_EEPROM_BYTE */
- {1, oid_null_function},
- /* 0x2F */
- {1, oid_rt_pro_set_modulation_hdl}
-};
-
-static const struct oid_obj_priv oid_rtl_seg_81_80_40[] = {
- {1, oid_null_function}, /* 0x40 */
- {1, oid_null_function}, /* 0x41 */
- {1, oid_null_function}, /* 0x42 */
- {1, oid_rt_pro_set_single_tone_tx_hdl}, /* 0x43 */
- {1, oid_null_function}, /* 0x44 */
- {1, oid_null_function} /* 0x45 */
-};
-
-static const struct oid_obj_priv oid_rtl_seg_81_80_80[] = {
- {1, oid_null_function}, /* 0x80 OID_RT_DRIVER_OPTION */
- {1, oid_null_function}, /* 0x81 OID_RT_RF_OFF */
- {1, oid_null_function} /* 0x82 OID_RT_AUTH_STATUS */
-
-};
-
-static const struct oid_obj_priv oid_rtl_seg_81_85[] = {
- /* 0x00 OID_RT_WIRELESS_MODE */
- {1, oid_rt_wireless_mode_hdl}
-};
#else /* _RTL871X_MP_IOCTL_C_ */
-extern struct oid_obj_priv oid_rtl_seg_81_80_00[32];
-extern struct oid_obj_priv oid_rtl_seg_81_80_20[16];
-extern struct oid_obj_priv oid_rtl_seg_81_80_40[6];
-extern struct oid_obj_priv oid_rtl_seg_81_80_80[3];
-extern struct oid_obj_priv oid_rtl_seg_81_85[1];
extern struct oid_obj_priv oid_rtl_seg_81_87[5];
extern struct oid_obj_priv oid_rtl_seg_87_11_00[32];
extern struct oid_obj_priv oid_rtl_seg_87_11_20[5];
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index 23cff43437e2..cd6d9ff0bebc 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -224,3 +224,11 @@ void r8712_unregister_cmd_alive(struct _adapter *padapter)
}
mutex_unlock(&pwrctrl->mutex_lock);
}
+
+void r8712_flush_rwctrl_works(struct _adapter *padapter)
+{
+ struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
+
+ flush_work(&pwrctrl->SetPSModeWorkItem);
+ flush_work(&pwrctrl->rpwm_workitem);
+}
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.h b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
index bf6623cfaf27..b35b9c7920eb 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.h
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
@@ -108,5 +108,6 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
void r8712_set_ps_mode(struct _adapter *padapter, uint ps_mode,
uint smart_ps);
void r8712_set_rpwm(struct _adapter *padapter, u8 val8);
+void r8712_flush_rwctrl_works(struct _adapter *padapter);
#endif /* __RTL871X_PWRCTRL_H_ */
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index db2add576418..c23f6b376111 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -374,7 +374,7 @@ static sint ap2sta_data_frame(struct _adapter *adapter,
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
check_fwstate(pmlmepriv, _FW_LINKED)) {
/* if NULL-frame, drop packet */
- if ((GetFrameSubType(ptr)) == IEEE80211_STYPE_NULLFUNC)
+ if ((GetFrameSubType(ptr)) == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC))
return _FAIL;
/* drop QoS-SubType Data, including QoS NULL,
* excluding QoS-Data
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 63d63f7be481..e0a1c30a8fe6 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -1045,9 +1045,9 @@ static void aes_cipher(u8 *key, uint hdrlen,
else
a4_exists = 1;
- if ((frtype == IEEE80211_STYPE_DATA_CFACK) ||
- (frtype == IEEE80211_STYPE_DATA_CFPOLL) ||
- (frtype == IEEE80211_STYPE_DATA_CFACKPOLL)) {
+ if ((frtype == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA_CFACK)) ||
+ (frtype == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA_CFPOLL)) ||
+ (frtype == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA_CFACKPOLL))) {
qc_exists = 1;
if (hdrlen != WLAN_HDR_A3_QOS_LEN)
hdrlen += 2;
@@ -1225,9 +1225,9 @@ static void aes_decipher(u8 *key, uint hdrlen,
a4_exists = 0;
else
a4_exists = 1;
- if ((frtype == IEEE80211_STYPE_DATA_CFACK) ||
- (frtype == IEEE80211_STYPE_DATA_CFPOLL) ||
- (frtype == IEEE80211_STYPE_DATA_CFACKPOLL)) {
+ if ((frtype == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA_CFACK)) ||
+ (frtype == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA_CFPOLL)) ||
+ (frtype == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA_CFACKPOLL))) {
qc_exists = 1;
if (hdrlen != WLAN_HDR_A3_QOS_LEN)
hdrlen += 2;
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 706e9db0fc5b..2c806a0105bf 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -257,7 +257,6 @@ struct sta_info *r8712_get_bcmc_stainfo(struct _adapter *padapter)
return r8712_get_stainfo(pstapriv, bc_addr);
}
-
u8 r8712_access_ctrl(struct wlan_acl_pool *pacl_list, u8 *mac_addr)
{
return true;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index bb4de927fb02..090345bad223 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -846,7 +846,6 @@ static inline struct tx_servq *get_sta_pending(struct _adapter *padapter,
struct __queue **ppstapending,
struct sta_info *psta, sint up)
{
-
struct tx_servq *ptxservq;
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index dc21e7743349..505ebeb643dc 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -36,7 +36,6 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
static void r871xu_dev_remove(struct usb_interface *pusb_intf);
static const struct usb_device_id rtl871x_usb_id_tbl[] = {
-
/* RTL8188SU */
/* Realtek */
{USB_DEVICE(0x0BDA, 0x8171)},
@@ -361,7 +360,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
/* step 1. */
pnetdev = r8712_init_netdev();
if (!pnetdev)
- goto error;
+ goto put_dev;
padapter = netdev_priv(pnetdev);
disable_ht_for_spec_devid(pdid, padapter);
pdvobjpriv = &padapter->dvobjpriv;
@@ -380,17 +379,15 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
/* step 3.
* initialize the dvobj_priv
*/
- if (!padapter->dvobj_init) {
- goto error;
- } else {
- status = padapter->dvobj_init(padapter);
- if (status != _SUCCESS)
- goto error;
- }
+
+ status = padapter->dvobj_init(padapter);
+ if (status != _SUCCESS)
+ goto free_netdev;
+
/* step 4. */
status = r8712_init_drv_sw(padapter);
if (status)
- goto error;
+ goto dvobj_deinit;
/* step 5. read efuse/eeprom data and get mac_addr */
{
int i, offset;
@@ -570,17 +567,20 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
}
/* step 6. Load the firmware asynchronously */
if (rtl871x_load_fw(padapter))
- goto error;
+ goto deinit_drv_sw;
spin_lock_init(&padapter->lock_rx_ff0_filter);
mutex_init(&padapter->mutex_start);
return 0;
-error:
+
+deinit_drv_sw:
+ r8712_free_drv_sw(padapter);
+dvobj_deinit:
+ padapter->dvobj_deinit(padapter);
+free_netdev:
+ free_netdev(pnetdev);
+put_dev:
usb_put_dev(udev);
usb_set_intfdata(pusb_intf, NULL);
- if (padapter && padapter->dvobj_deinit)
- padapter->dvobj_deinit(padapter);
- if (pnetdev)
- free_netdev(pnetdev);
return -ENODEV;
}
@@ -591,34 +591,30 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
{
struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
struct usb_device *udev = interface_to_usbdev(pusb_intf);
+ struct _adapter *padapter = netdev_priv(pnetdev);
+
+ /* never exit with a firmware callback pending */
+ wait_for_completion(&padapter->rtl8712_fw_ready);
+ usb_set_intfdata(pusb_intf, NULL);
+ release_firmware(padapter->fw);
+ if (drvpriv.drv_registered)
+ padapter->surprise_removed = true;
+ if (pnetdev->reg_state != NETREG_UNINITIALIZED)
+ unregister_netdev(pnetdev); /* will call netdev_close() */
+ r8712_flush_rwctrl_works(padapter);
+ r8712_flush_led_works(padapter);
+ udelay(1);
+ /* Stop driver mlme relation timer */
+ r8712_stop_drv_timers(padapter);
+ r871x_dev_unload(padapter);
+ r8712_free_drv_sw(padapter);
+ free_netdev(pnetdev);
+
+ /* decrease the reference count of the usb device structure
+ * when disconnect
+ */
+ usb_put_dev(udev);
- if (pnetdev) {
- struct _adapter *padapter = netdev_priv(pnetdev);
-
- /* never exit with a firmware callback pending */
- wait_for_completion(&padapter->rtl8712_fw_ready);
- pnetdev = usb_get_intfdata(pusb_intf);
- usb_set_intfdata(pusb_intf, NULL);
- if (!pnetdev)
- goto firmware_load_fail;
- release_firmware(padapter->fw);
- if (drvpriv.drv_registered)
- padapter->surprise_removed = true;
- if (pnetdev->reg_state != NETREG_UNINITIALIZED)
- unregister_netdev(pnetdev); /* will call netdev_close() */
- flush_scheduled_work();
- udelay(1);
- /* Stop driver mlme relation timer */
- r8712_stop_drv_timers(padapter);
- r871x_dev_unload(padapter);
- r8712_free_drv_sw(padapter);
-
- /* decrease the reference count of the usb device structure
- * when disconnect
- */
- usb_put_dev(udev);
- }
-firmware_load_fail:
/* If we didn't unplug usb dongle and remove/insert module, driver
* fails on sitesurvey for the first time when device is up.
* Reset usb port for sitesurvey fail issue.
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index 1f67d86c606f..90d34cf9d2ff 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -62,7 +62,6 @@ sint r8712_endofpktfile(struct pkt_file *pfile)
return (pfile->pkt_len == 0);
}
-
void r8712_set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
{
struct ethhdr etherhdr;
diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig
index a88467334dac..7eae820eae3b 100644
--- a/drivers/staging/rtl8723bs/Kconfig
+++ b/drivers/staging/rtl8723bs/Kconfig
@@ -5,6 +5,7 @@ config RTL8723BS
depends on m
select WIRELESS_EXT
select WEXT_PRIV
+ select CRYPTO_LIB_ARC4
help
This option enables support for RTL8723BS SDIO drivers, such as
the wifi found on the 1st gen Intel Compute Stick, the CHIP
diff --git a/drivers/staging/rtl8723bs/Makefile b/drivers/staging/rtl8723bs/Makefile
index dfe410283ca0..fe45200ff53c 100644
--- a/drivers/staging/rtl8723bs/Makefile
+++ b/drivers/staging/rtl8723bs/Makefile
@@ -28,14 +28,12 @@ r8723bs-y = \
hal/HalPwrSeqCmd.o \
hal/odm.o \
hal/odm_CfoTracking.o \
- hal/odm_debug.o \
hal/odm_DIG.o \
hal/odm_DynamicBBPowerSaving.o \
hal/odm_DynamicTxPower.o \
hal/odm_EdcaTurboCheck.o \
hal/odm_HWConfig.o \
hal/odm_NoiseMonitor.o \
- hal/odm_PathDiv.o \
hal/odm_RegConfig8723B.o \
hal/odm_RTL8723B.o \
hal/rtl8723b_cmd.o \
diff --git a/drivers/staging/rtl8723bs/TODO b/drivers/staging/rtl8723bs/TODO
index afa620ceb2d8..3d8f5a634a10 100644
--- a/drivers/staging/rtl8723bs/TODO
+++ b/drivers/staging/rtl8723bs/TODO
@@ -1,6 +1,4 @@
TODO:
-- find and remove remaining code valid only for 5 GHz. Most of the obvious
- ones have been removed, but things like channel > 14 still exist.
- find and remove any code for other chips that is left over
- convert any remaining unusual variable types
- find codes that can use %pM and %Nph formatting
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 4a9bd4825fab..a6fcb5e9d637 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -59,116 +59,112 @@ static void update_BCNTIM(struct adapter *padapter)
unsigned char *pie = pnetwork_mlmeext->IEs;
/* update TIM IE */
- /* if (pstapriv->tim_bitmap) */
- if (true) {
- u8 *p, *dst_ie, *premainder_ie = NULL, *pbackup_remainder_ie = NULL;
- __le16 tim_bitmap_le;
- uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen;
-
- tim_bitmap_le = cpu_to_le16(pstapriv->tim_bitmap);
-
- p = rtw_get_ie(pie + _FIXED_IE_LENGTH_,
- WLAN_EID_TIM,
- &tim_ielen,
- pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_
- );
- if (p && tim_ielen > 0) {
- tim_ielen += 2;
+ u8 *p, *dst_ie, *premainder_ie = NULL, *pbackup_remainder_ie = NULL;
+ __le16 tim_bitmap_le;
+ uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen;
- premainder_ie = p + tim_ielen;
+ tim_bitmap_le = cpu_to_le16(pstapriv->tim_bitmap);
- tim_ie_offset = (signed int)(p - pie);
+ p = rtw_get_ie(pie + _FIXED_IE_LENGTH_,
+ WLAN_EID_TIM,
+ &tim_ielen,
+ pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_
+ );
+ if (p && tim_ielen > 0) {
+ tim_ielen += 2;
- remainder_ielen = pnetwork_mlmeext->IELength - tim_ie_offset - tim_ielen;
+ premainder_ie = p + tim_ielen;
- /* append TIM IE from dst_ie offset */
- dst_ie = p;
- } else {
- tim_ielen = 0;
+ tim_ie_offset = (signed int)(p - pie);
- /* calculate head_len */
- offset = _FIXED_IE_LENGTH_;
+ remainder_ielen = pnetwork_mlmeext->IELength - tim_ie_offset - tim_ielen;
- /* get ssid_ie len */
- p = rtw_get_ie(pie + _BEACON_IE_OFFSET_,
- WLAN_EID_SSID,
- &tmp_len,
- (pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_)
- );
- if (p)
- offset += tmp_len + 2;
+ /* append TIM IE from dst_ie offset */
+ dst_ie = p;
+ } else {
+ tim_ielen = 0;
- /* get supported rates len */
- p = rtw_get_ie(pie + _BEACON_IE_OFFSET_,
- WLAN_EID_SUPP_RATES, &tmp_len,
- (pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_)
- );
- if (p)
- offset += tmp_len + 2;
+ /* calculate head_len */
+ offset = _FIXED_IE_LENGTH_;
- /* DS Parameter Set IE, len =3 */
- offset += 3;
+ /* get ssid_ie len */
+ p = rtw_get_ie(pie + _BEACON_IE_OFFSET_,
+ WLAN_EID_SSID,
+ &tmp_len,
+ (pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_)
+ );
+ if (p)
+ offset += tmp_len + 2;
- premainder_ie = pie + offset;
+ /* get supported rates len */
+ p = rtw_get_ie(pie + _BEACON_IE_OFFSET_,
+ WLAN_EID_SUPP_RATES, &tmp_len,
+ (pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_)
+ );
+ if (p)
+ offset += tmp_len + 2;
- remainder_ielen = pnetwork_mlmeext->IELength - offset - tim_ielen;
+ /* DS Parameter Set IE, len =3 */
+ offset += 3;
- /* append TIM IE from offset */
- dst_ie = pie + offset;
- }
+ premainder_ie = pie + offset;
- if (remainder_ielen > 0) {
- pbackup_remainder_ie = rtw_malloc(remainder_ielen);
- if (pbackup_remainder_ie && premainder_ie)
- memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
- }
+ remainder_ielen = pnetwork_mlmeext->IELength - offset - tim_ielen;
- *dst_ie++ = WLAN_EID_TIM;
+ /* append TIM IE from offset */
+ dst_ie = pie + offset;
+ }
- if ((pstapriv->tim_bitmap & 0xff00) && (pstapriv->tim_bitmap & 0x00fe))
- tim_ielen = 5;
- else
- tim_ielen = 4;
+ if (remainder_ielen > 0) {
+ pbackup_remainder_ie = rtw_malloc(remainder_ielen);
+ if (pbackup_remainder_ie && premainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
+ }
- *dst_ie++ = tim_ielen;
+ *dst_ie++ = WLAN_EID_TIM;
- *dst_ie++ = 0;/* DTIM count */
- *dst_ie++ = 1;/* DTIM period */
+ if ((pstapriv->tim_bitmap & 0xff00) && (pstapriv->tim_bitmap & 0x00fe))
+ tim_ielen = 5;
+ else
+ tim_ielen = 4;
- if (pstapriv->tim_bitmap & BIT(0))/* for bc/mc frames */
- *dst_ie++ = BIT(0);/* bitmap ctrl */
- else
- *dst_ie++ = 0;
+ *dst_ie++ = tim_ielen;
- if (tim_ielen == 4) {
- __le16 pvb;
+ *dst_ie++ = 0;/* DTIM count */
+ *dst_ie++ = 1;/* DTIM period */
- if (pstapriv->tim_bitmap & 0xff00)
- pvb = cpu_to_le16(pstapriv->tim_bitmap >> 8);
- else
- pvb = tim_bitmap_le;
+ if (pstapriv->tim_bitmap & BIT(0))/* for bc/mc frames */
+ *dst_ie++ = BIT(0);/* bitmap ctrl */
+ else
+ *dst_ie++ = 0;
- *dst_ie++ = le16_to_cpu(pvb);
+ if (tim_ielen == 4) {
+ __le16 pvb;
- } else if (tim_ielen == 5) {
- memcpy(dst_ie, &tim_bitmap_le, 2);
- dst_ie += 2;
- }
+ if (pstapriv->tim_bitmap & 0xff00)
+ pvb = cpu_to_le16(pstapriv->tim_bitmap >> 8);
+ else
+ pvb = tim_bitmap_le;
- /* copy remainder IE */
- if (pbackup_remainder_ie) {
- memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
+ *dst_ie++ = le16_to_cpu(pvb);
- kfree(pbackup_remainder_ie);
- }
+ } else if (tim_ielen == 5) {
+ memcpy(dst_ie, &tim_bitmap_le, 2);
+ dst_ie += 2;
+ }
+
+ /* copy remainder IE */
+ if (pbackup_remainder_ie) {
+ memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
- offset = (uint)(dst_ie - pie);
- pnetwork_mlmeext->IELength = offset + remainder_ielen;
+ kfree(pbackup_remainder_ie);
}
+
+ offset = (uint)(dst_ie - pie);
+ pnetwork_mlmeext->IELength = offset + remainder_ielen;
}
-u8 chk_sta_is_alive(struct sta_info *psta);
-u8 chk_sta_is_alive(struct sta_info *psta)
+static u8 chk_sta_is_alive(struct sta_info *psta)
{
sta_update_last_rx_pkts(psta);
@@ -177,7 +173,7 @@ u8 chk_sta_is_alive(struct sta_info *psta)
void expire_timeout_chk(struct adapter *padapter)
{
- struct list_head *phead, *plist;
+ struct list_head *phead, *plist, *tmp;
u8 updated = false;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -188,13 +184,9 @@ void expire_timeout_chk(struct adapter *padapter)
spin_lock_bh(&pstapriv->auth_list_lock);
phead = &pstapriv->auth_list;
- plist = get_next(phead);
-
/* check auth_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, auth_list);
-
- plist = get_next(plist);
+ list_for_each_safe(plist, tmp, phead) {
+ psta = list_entry(plist, struct sta_info, auth_list);
if (psta->expire_to > 0) {
psta->expire_to--;
@@ -217,12 +209,9 @@ void expire_timeout_chk(struct adapter *padapter)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
-
/* check asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ list_for_each_safe(plist, tmp, phead) {
+ psta = list_entry(plist, struct sta_info, asoc_list);
if (chk_sta_is_alive(psta) || !psta->expire_to) {
psta->expire_to = pstapriv->expire_to;
psta->keep_alive_trycnt = 0;
@@ -352,11 +341,7 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
shortGIrate = query_ra_short_GI(psta);
if (pcur_network->Configuration.DSConfig > 14) {
- if (tx_ra_bitmap & 0xffff000)
- sta_band |= WIRELESS_11_5N;
-
- if (tx_ra_bitmap & 0xff0)
- sta_band |= WIRELESS_11A;
+ sta_band |= WIRELESS_INVALID;
} else {
if (tx_ra_bitmap & 0xffff000)
sta_band |= WIRELESS_11_24N;
@@ -422,7 +407,7 @@ void update_bmc_sta(struct adapter *padapter)
} else if (network_type == WIRELESS_INVALID) { /* error handling */
if (pcur_network->Configuration.DSConfig > 14)
- network_type = WIRELESS_11A;
+ network_type = WIRELESS_INVALID;
else
network_type = WIRELESS_11B;
}
@@ -653,7 +638,7 @@ static void update_hw_ht_param(struct adapter *padapter)
/* pmlmeinfo->HT_protection = pmlmeinfo->HT_info.infos[1] & 0x3; */
}
-void start_bss_network(struct adapter *padapter, u8 *pbuf)
+void start_bss_network(struct adapter *padapter)
{
u8 *p;
u8 val8, cur_channel, cur_bwmode, cur_ch_offset;
@@ -1125,9 +1110,6 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
case WIRELESS_11BG_24N:
pbss_network->NetworkTypeInUse = Ndis802_11OFDM24;
break;
- case WIRELESS_11A:
- pbss_network->NetworkTypeInUse = Ndis802_11OFDM5;
- break;
default:
pbss_network->NetworkTypeInUse = Ndis802_11OFDM24;
break;
@@ -1210,11 +1192,8 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
- plist = get_next(phead);
-
- while (phead != plist) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
- plist = get_next(plist);
+ list_for_each(plist, phead) {
+ paclnode = list_entry(plist, struct rtw_wlan_acl_node, list);
if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
if (paclnode->valid == true) {
@@ -1256,7 +1235,7 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
void rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
{
- struct list_head *plist, *phead;
+ struct list_head *plist, *phead, *tmp;
struct rtw_wlan_acl_node *paclnode;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
@@ -1266,11 +1245,8 @@ void rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
- plist = get_next(phead);
-
- while (phead != plist) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
- plist = get_next(plist);
+ list_for_each_safe(plist, tmp, phead) {
+ paclnode = list_entry(plist, struct rtw_wlan_acl_node, list);
if (
!memcmp(paclnode->addr, addr, ETH_ALEN) ||
@@ -1716,13 +1692,9 @@ void associated_clients_update(struct adapter *padapter, u8 updated)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
-
/* check asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = get_next(plist);
+ list_for_each(plist, phead) {
+ psta = list_entry(plist, struct sta_info, asoc_list);
VCS_update(padapter, psta);
}
@@ -1960,7 +1932,7 @@ u8 ap_free_sta(
void rtw_sta_flush(struct adapter *padapter)
{
- struct list_head *phead, *plist;
+ struct list_head *phead, *plist, *tmp;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1972,13 +1944,9 @@ void rtw_sta_flush(struct adapter *padapter)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
-
/* free sta asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = get_next(plist);
+ list_for_each_safe(plist, tmp, phead) {
+ psta = list_entry(plist, struct sta_info, asoc_list);
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
@@ -2039,7 +2007,6 @@ void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta)
/* restore hw setting from sw data structures */
void rtw_ap_restore_network(struct adapter *padapter)
{
- struct mlme_priv *mlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta;
@@ -2058,7 +2025,7 @@ void rtw_ap_restore_network(struct adapter *padapter)
pmlmeext->cur_bwmode
);
- start_bss_network(padapter, (u8 *)&mlmepriv->cur_network.network);
+ start_bss_network(padapter);
if ((padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_) ||
(padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
@@ -2075,13 +2042,10 @@ void rtw_ap_restore_network(struct adapter *padapter)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
-
- while (phead != plist) {
+ list_for_each(plist, phead) {
int stainfo_offset;
- psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ psta = list_entry(plist, struct sta_info, asoc_list);
stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
if (stainfo_offset_valid(stainfo_offset))
@@ -2160,7 +2124,7 @@ void start_ap_mode(struct adapter *padapter)
void stop_ap_mode(struct adapter *padapter)
{
- struct list_head *phead, *plist;
+ struct list_head *phead, *plist, *tmp;
struct rtw_wlan_acl_node *paclnode;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -2184,10 +2148,8 @@ void stop_ap_mode(struct adapter *padapter)
/* for ACL */
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
- plist = get_next(phead);
- while (phead != plist) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
- plist = get_next(plist);
+ list_for_each_safe(plist, tmp, phead) {
+ paclnode = list_entry(plist, struct rtw_wlan_acl_node, list);
if (paclnode->valid) {
paclnode->valid = false;
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index e1a8f8b47edd..04956ccf485c 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _RTW_CMD_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
#include <hal_btcoex.h>
@@ -157,9 +155,9 @@ static struct cmd_hdl wlancmds[] = {
};
/*
-Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
-No irqsave is necessary.
-*/
+ * Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
+ * No irqsave is necessary.
+ */
int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
@@ -225,6 +223,7 @@ void _rtw_free_evt_priv(struct evt_priv *pevtpriv)
while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
+
if (c2h && c2h != (void *)pevtpriv)
kfree(c2h);
}
@@ -243,14 +242,14 @@ void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
}
/*
-Calling Context:
-
-rtw_enqueue_cmd can only be called between kernel thread,
-since only spin_lock is used.
-
-ISR/Call-Back functions can't call this sub-function.
-
-*/
+ * Calling Context:
+ *
+ * rtw_enqueue_cmd can only be called between kernel thread,
+ * since only spin_lock is used.
+ *
+ * ISR/Call-Back functions can't call this sub-function.
+ *
+ */
int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
{
@@ -529,10 +528,11 @@ post_process:
}
/*
-rtw_sitesurvey_cmd(~)
- ### NOTE:#### (!!!!)
- MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
-*/
+ * rtw_sitesurvey_cmd(~)
+ * ### NOTE:#### (!!!!)
+ * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
+ */
+
u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num,
struct rtw_ieee80211_channel *ch, int ch_num)
{
@@ -565,6 +565,7 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
/* prepare ssid list */
if (ssid) {
int i;
+
for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) {
if (ssid[i].SsidLength) {
memcpy(&psurveyPara->ssid[i], &ssid[i], sizeof(struct ndis_802_11_ssid));
@@ -576,6 +577,7 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
/* prepare channel list */
if (ch) {
int i;
+
for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
if (ch[i].hw_value && !(ch[i].flags & RTW_IEEE80211_CHAN_DISABLED)) {
memcpy(&psurveyPara->ch[i], &ch[i], sizeof(struct rtw_ieee80211_channel));
@@ -671,7 +673,7 @@ int rtw_startbss_cmd(struct adapter *padapter, int flags)
if (flags & RTW_CMDF_DIRECTLY) {
/* no need to enqueue, do the cmd hdl directly and free cmd parameter */
- start_bss_network(padapter, (u8 *)&(padapter->mlmepriv.cur_network.network));
+ start_bss_network(padapter);
} else {
/* need enqueue, prepare cmd_obj and enqueue */
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
@@ -695,7 +697,7 @@ int rtw_startbss_cmd(struct adapter *padapter, int flags)
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
if (res == _SUCCESS && (flags & RTW_CMDF_WAIT_ACK)) {
- rtw_sctx_wait(&sctx, __func__);
+ rtw_sctx_wait(&sctx);
if (mutex_lock_interruptible(&pcmdpriv->sctx_mutex) == 0) {
if (sctx.status == RTW_SCTX_SUBMITTED)
pcmd->sctx = NULL;
@@ -1337,6 +1339,7 @@ u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer)
static void dynamic_chk_wk_hdl(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv;
+
pmlmepriv = &(padapter->mlmepriv);
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
diff --git a/drivers/staging/rtl8723bs/core/rtw_debug.c b/drivers/staging/rtl8723bs/core/rtw_debug.c
index 79fd968bb147..576b039f741c 100644
--- a/drivers/staging/rtl8723bs/core/rtw_debug.c
+++ b/drivers/staging/rtl8723bs/core/rtw_debug.c
@@ -10,60 +10,56 @@
#include <rtw_debug.h>
#include <hal_btcoex.h>
-u32 GlobalDebugLevel = _drv_err_;
-
#include <rtw_version.h>
-void sd_f0_reg_dump(void *sel, struct adapter *adapter)
+static void dump_4_regs(struct adapter *adapter, int offset)
{
+ u32 reg[4];
int i;
- for (i = 0x0; i <= 0xff; i++) {
- if (i%16 == 0)
- netdev_dbg(adapter->pnetdev, "0x%02x ", i);
-
- DBG_871X_SEL(sel, "%02x ", rtw_sd_f0_read8(adapter, i));
+ for (i = 0; i < 4; i++)
+ reg[i] = rtw_read32(adapter, offset + i);
- if (i%16 == 15)
- DBG_871X_SEL(sel, "\n");
- else if (i%8 == 7)
- DBG_871X_SEL(sel, "\t");
- }
+ netdev_dbg(adapter->pnetdev, "0x%03x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, reg[0], reg[1], reg[2], reg[3]);
}
-void mac_reg_dump(void *sel, struct adapter *adapter)
+void mac_reg_dump(struct adapter *adapter)
{
- int i, j = 1;
+ int i;
netdev_dbg(adapter->pnetdev, "======= MAC REG =======\n");
- for (i = 0x0; i < 0x800; i += 4) {
- if (j%4 == 1)
- netdev_dbg(adapter->pnetdev, "0x%03x", i);
- DBG_871X_SEL(sel, " 0x%08x ", rtw_read32(adapter, i));
- if ((j++)%4 == 0)
- DBG_871X_SEL(sel, "\n");
- }
+ for (i = 0x0; i < 0x800; i += 4)
+ dump_4_regs(adapter, i);
}
-void bb_reg_dump(void *sel, struct adapter *adapter)
+void bb_reg_dump(struct adapter *adapter)
{
- int i, j = 1;
+ int i;
netdev_dbg(adapter->pnetdev, "======= BB REG =======\n");
- for (i = 0x800; i < 0x1000 ; i += 4) {
- if (j%4 == 1)
- netdev_dbg(adapter->pnetdev, "0x%03x", i);
- DBG_871X_SEL(sel, " 0x%08x ", rtw_read32(adapter, i));
- if ((j++)%4 == 0)
- DBG_871X_SEL(sel, "\n");
- }
+
+ for (i = 0x800; i < 0x1000 ; i += 4)
+ dump_4_regs(adapter, i);
+}
+
+static void dump_4_rf_regs(struct adapter *adapter, int path, int offset)
+{
+ u8 reg[4];
+ int i;
+
+ for (i = 0; i < 4; i++)
+ reg[i] = rtw_hal_read_rfreg(adapter, path, offset + i,
+ 0xffffffff);
+
+ netdev_dbg(adapter->pnetdev, "0x%02x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, reg[0], reg[1], reg[2], reg[3]);
}
-void rf_reg_dump(void *sel, struct adapter *adapter)
+void rf_reg_dump(struct adapter *adapter)
{
- int i, j = 1, path;
- u32 value;
+ int i, path;
u8 rf_type = 0;
u8 path_nums = 0;
@@ -77,13 +73,7 @@ void rf_reg_dump(void *sel, struct adapter *adapter)
for (path = 0; path < path_nums; path++) {
netdev_dbg(adapter->pnetdev, "RF_Path(%x)\n", path);
- for (i = 0; i < 0x100; i++) {
- value = rtw_hal_read_rfreg(adapter, path, i, 0xffffffff);
- if (j%4 == 1)
- netdev_dbg(adapter->pnetdev, "0x%02x ", i);
- DBG_871X_SEL(sel, " 0x%08x ", value);
- if ((j++)%4 == 0)
- DBG_871X_SEL(sel, "\n");
- }
+ for (i = 0; i < 0x100; i++)
+ dump_4_rf_regs(adapter, path, i);
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_eeprom.c b/drivers/staging/rtl8723bs/core/rtw_eeprom.c
deleted file mode 100644
index be0eda1604d0..000000000000
--- a/drivers/staging/rtl8723bs/core/rtw_eeprom.c
+++ /dev/null
@@ -1,210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#define _RTW_EEPROM_C_
-
-#include <drv_conf.h>
-#include <osdep_service.h>
-#include <drv_types.h>
-
-void up_clk(_adapter *padapter, u16 *x)
-{
-_func_enter_;
- *x = *x | _EESK;
- rtw_write8(padapter, EE_9346CR, (u8)*x);
- udelay(CLOCK_RATE);
-
-_func_exit_;
-
-}
-
-void down_clk(_adapter *padapter, u16 *x)
-{
-_func_enter_;
- *x = *x & ~_EESK;
- rtw_write8(padapter, EE_9346CR, (u8)*x);
- udelay(CLOCK_RATE);
-_func_exit_;
-}
-
-void shift_out_bits(_adapter *padapter, u16 data, u16 count)
-{
- u16 x, mask;
-_func_enter_;
-
- if (padapter->bSurpriseRemoved)
- goto out;
-
- mask = 0x01 << (count - 1);
- x = rtw_read8(padapter, EE_9346CR);
-
- x &= ~(_EEDO | _EEDI);
-
- do {
- x &= ~_EEDI;
- if (data & mask)
- x |= _EEDI;
- if (padapter->bSurpriseRemoved)
- goto out;
-
- rtw_write8(padapter, EE_9346CR, (u8)x);
- udelay(CLOCK_RATE);
- up_clk(padapter, &x);
- down_clk(padapter, &x);
- mask = mask >> 1;
- } while (mask);
- if (padapter->bSurpriseRemoved)
- goto out;
-
- x &= ~_EEDI;
- rtw_write8(padapter, EE_9346CR, (u8)x);
-out:
-_func_exit_;
-}
-
-u16 shift_in_bits(_adapter *padapter)
-{
- u16 x, d = 0, i;
-_func_enter_;
- if (padapter->bSurpriseRemoved)
- goto out;
-
- x = rtw_read8(padapter, EE_9346CR);
-
- x &= ~(_EEDO | _EEDI);
- d = 0;
-
- for (i = 0; i < 16; i++) {
- d = d << 1;
- up_clk(padapter, &x);
- if (padapter->bSurpriseRemoved)
- goto out;
-
- x = rtw_read8(padapter, EE_9346CR);
-
- x &= ~(_EEDI);
- if (x & _EEDO)
- d |= 1;
-
- down_clk(padapter, &x);
- }
-out:
-_func_exit_;
-
- return d;
-}
-
-void standby(_adapter *padapter)
-{
- u8 x;
-_func_enter_;
- x = rtw_read8(padapter, EE_9346CR);
-
- x &= ~(_EECS | _EESK);
- rtw_write8(padapter, EE_9346CR, x);
-
- udelay(CLOCK_RATE);
- x |= _EECS;
- rtw_write8(padapter, EE_9346CR, x);
- udelay(CLOCK_RATE);
-_func_exit_;
-}
-
-void eeprom_clean(_adapter *padapter)
-{
- u16 x;
-_func_enter_;
- if (padapter->bSurpriseRemoved)
- goto out;
-
- x = rtw_read8(padapter, EE_9346CR);
- if (padapter->bSurpriseRemoved)
- goto out;
-
- x &= ~(_EECS | _EEDI);
- rtw_write8(padapter, EE_9346CR, (u8)x);
- if (padapter->bSurpriseRemoved)
- goto out;
-
- up_clk(padapter, &x);
- if (padapter->bSurpriseRemoved)
- goto out;
-
- down_clk(padapter, &x);
-out:
-_func_exit_;
-}
-
-u16 eeprom_read16(_adapter *padapter, u16 reg) /*ReadEEprom*/
-{
-
- u16 x;
- u16 data = 0;
-
-_func_enter_;
-
- if (padapter->bSurpriseRemoved)
- goto out;
-
- /* select EEPROM, reset bits, set _EECS*/
- x = rtw_read8(padapter, EE_9346CR);
-
- if (padapter->bSurpriseRemoved)
- goto out;
-
- x &= ~(_EEDI | _EEDO | _EESK | _EEM0);
- x |= _EEM1 | _EECS;
- rtw_write8(padapter, EE_9346CR, (unsigned char)x);
-
- /* write the read opcode and register number in that order*/
- /* The opcode is 3bits in length, reg is 6 bits long*/
- shift_out_bits(padapter, EEPROM_READ_OPCODE, 3);
- shift_out_bits(padapter, reg, padapter->EepromAddressSize);
-
- /* Now read the data (16 bits) in from the selected EEPROM word*/
- data = shift_in_bits(padapter);
-
- eeprom_clean(padapter);
-out:
-_func_exit_;
- return data;
-
-
-}
-
-/*addr_off : address offset of the entry in eeprom (not the tuple number of eeprom (reg); that is addr_off !=reg)*/
-u8 eeprom_read(_adapter *padapter, u32 addr_off, u8 sz, u8 *rbuf)
-{
- u8 quotient, remainder, addr_2align_odd;
- u16 reg, stmp, i = 0, idx = 0;
-_func_enter_;
- reg = (u16)(addr_off >> 1);
- addr_2align_odd = (u8)(addr_off & 0x1);
-
- /*read that start at high part: e.g 1,3,5,7,9,...*/
- if (addr_2align_odd) {
- stmp = eeprom_read16(padapter, reg);
- rbuf[idx++] = (u8) ((stmp>>8)&0xff); /*return hogh-part of the short*/
- reg++; sz--;
- }
-
- quotient = sz >> 1;
- remainder = sz & 0x1;
-
- for (i = 0; i < quotient; i++) {
- stmp = eeprom_read16(padapter, reg+i);
- rbuf[idx++] = (u8) (stmp&0xff);
- rbuf[idx++] = (u8) ((stmp>>8)&0xff);
- }
-
- reg = reg+i;
- if (remainder) { /*end of read at lower part of short : 0,2,4,6,...*/
- stmp = eeprom_read16(padapter, reg);
- rbuf[idx] = (u8)(stmp & 0xff);
- }
-_func_exit_;
- return true;
-}
diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c
index a28a06d5a576..430e2d81924c 100644
--- a/drivers/staging/rtl8723bs/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _RTW_EFUSE_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
#include <hal_data.h>
@@ -40,7 +38,6 @@ Efuse_Read1ByteFromFakeContent(
{
if (Offset >= EFUSE_MAX_HW_SIZE)
return false;
- /* DbgPrint("Read fake content, offset = %d\n", Offset); */
if (fakeEfuseBank == 0)
*Value = fakeEfuseContent[Offset];
else
@@ -252,9 +249,8 @@ bool bPseudoTest)
u8 bResult;
u8 readbyte;
- if (bPseudoTest) {
+ if (bPseudoTest)
return Efuse_Read1ByteFromFakeContent(padapter, addr, data);
- }
/* <20130121, Kordan> For SMIC EFUSE specificatoin. */
/* 0x34[11]: SW force PGMEN input of efuse to high. (for the bank selected by 0x34[9:8]) */
@@ -294,9 +290,8 @@ u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoT
u8 bResult = false;
u32 efuseValue = 0;
- if (bPseudoTest) {
+ if (bPseudoTest)
return Efuse_Write1ByteToFakeContent(padapter, addr, data);
- }
/* -----------------e-fuse reg ctrl --------------------------------- */
@@ -322,11 +317,10 @@ u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoT
tmpidx++;
}
- if (tmpidx < 100) {
+ if (tmpidx < 100)
bResult = true;
- } else {
+ else
bResult = false;
- }
/* disable Efuse program enable */
PHY_SetMacReg(padapter, EFUSE_TEST, BIT(11), 0);
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index ae057eefeeb3..0f0fcd9dc652 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -96,10 +96,7 @@ bool rtw_is_cckratesonly_included(u8 *rate)
int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
{
if (channel > 14) {
- if (rtw_is_cckrates_included(rate))
- return WIRELESS_INVALID;
- else
- return WIRELESS_11A;
+ return WIRELESS_INVALID;
} else { /* could be pure B, pure G, or B/G */
if (rtw_is_cckratesonly_included(rate))
return WIRELESS_11B;
@@ -239,12 +236,10 @@ int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 o
while (1) {
target_ie = rtw_get_ie_ex(start, search_len, eid, oui, oui_len, NULL, &target_ielen);
if (target_ie && target_ielen) {
- u8 buf[MAX_IE_SZ] = {0};
u8 *remain_ies = target_ie + target_ielen;
uint remain_len = search_len - (remain_ies - start);
- memcpy(buf, remain_ies, remain_len);
- memcpy(target_ie, buf, remain_len);
+ memcpy(target_ie, remain_ies, remain_len);
*ies_len = *ies_len - target_ielen;
ret = _SUCCESS;
@@ -268,10 +263,6 @@ void rtw_set_supported_rate(u8 *SupportedRates, uint mode)
break;
case WIRELESS_11G:
- case WIRELESS_11A:
- case WIRELESS_11_5N:
- case WIRELESS_11A_5N:/* Todo: no basic rate for ofdm ? */
- case WIRELESS_11_5AC:
memcpy(SupportedRates, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
break;
@@ -329,14 +320,7 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
ie = rtw_set_ie(ie, WLAN_EID_SSID, pdev_network->Ssid.SsidLength, pdev_network->Ssid.Ssid, &sz);
/* supported rates */
- if (pregistrypriv->wireless_mode == WIRELESS_11ABGN) {
- if (pdev_network->Configuration.DSConfig > 14)
- wireless_mode = WIRELESS_11A_5N;
- else
- wireless_mode = WIRELESS_11BG_24N;
- } else {
- wireless_mode = pregistrypriv->wireless_mode;
- }
+ wireless_mode = pregistrypriv->wireless_mode;
rtw_set_supported_rate(pdev_network->SupportedRates, wireless_mode);
@@ -361,8 +345,8 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
}
/* HT Cap. */
- if (((pregistrypriv->wireless_mode&WIRELESS_11_5N) || (pregistrypriv->wireless_mode&WIRELESS_11_24N))
- && (pregistrypriv->ht_enable == true)) {
+ if ((pregistrypriv->wireless_mode & WIRELESS_11_24N) &&
+ (pregistrypriv->ht_enable == true)) {
/* todo: */
}
@@ -877,7 +861,7 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
}
/**
- * ieee802_11_parse_elems - Parse information elements in management frames
+ * rtw_ieee802_11_parse_elems - Parse information elements in management frames
* @start: Pointer to the start of IEs
* @len: Length of IE buffer in octets
* @elems: Data structure for parsed elements
diff --git a/drivers/staging/rtl8723bs/core/rtw_io.c b/drivers/staging/rtl8723bs/core/rtw_io.c
index c860ab7d618c..856e23398c03 100644
--- a/drivers/staging/rtl8723bs/core/rtw_io.c
+++ b/drivers/staging/rtl8723bs/core/rtw_io.c
@@ -25,8 +25,6 @@ jackson@realtek.com.tw
*/
-#define _RTW_IO_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
@@ -35,7 +33,7 @@ jackson@realtek.com.tw
#define rtw_cpu_to_le16(val) val
#define rtw_cpu_to_le32(val) val
-u8 _rtw_read8(struct adapter *adapter, u32 addr)
+u8 rtw_read8(struct adapter *adapter, u32 addr)
{
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
struct io_priv *pio_priv = &adapter->iopriv;
@@ -47,7 +45,7 @@ u8 _rtw_read8(struct adapter *adapter, u32 addr)
return _read8(pintfhdl, addr);
}
-u16 _rtw_read16(struct adapter *adapter, u32 addr)
+u16 rtw_read16(struct adapter *adapter, u32 addr)
{
u16 r_val;
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
@@ -61,7 +59,7 @@ u16 _rtw_read16(struct adapter *adapter, u32 addr)
return rtw_le16_to_cpu(r_val);
}
-u32 _rtw_read32(struct adapter *adapter, u32 addr)
+u32 rtw_read32(struct adapter *adapter, u32 addr)
{
u32 r_val;
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
@@ -76,7 +74,7 @@ u32 _rtw_read32(struct adapter *adapter, u32 addr)
}
-int _rtw_write8(struct adapter *adapter, u32 addr, u8 val)
+int rtw_write8(struct adapter *adapter, u32 addr, u8 val)
{
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
struct io_priv *pio_priv = &adapter->iopriv;
@@ -90,7 +88,7 @@ int _rtw_write8(struct adapter *adapter, u32 addr, u8 val)
return RTW_STATUS_CODE(ret);
}
-int _rtw_write16(struct adapter *adapter, u32 addr, u16 val)
+int rtw_write16(struct adapter *adapter, u32 addr, u16 val)
{
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
struct io_priv *pio_priv = &adapter->iopriv;
@@ -103,7 +101,7 @@ int _rtw_write16(struct adapter *adapter, u32 addr, u16 val)
ret = _write16(pintfhdl, addr, val);
return RTW_STATUS_CODE(ret);
}
-int _rtw_write32(struct adapter *adapter, u32 addr, u32 val)
+int rtw_write32(struct adapter *adapter, u32 addr, u32 val)
{
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
struct io_priv *pio_priv = &adapter->iopriv;
@@ -118,26 +116,7 @@ int _rtw_write32(struct adapter *adapter, u32 addr, u32 val)
return RTW_STATUS_CODE(ret);
}
-u8 _rtw_sd_f0_read8(struct adapter *adapter, u32 addr)
-{
- u8 r_val = 0x00;
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &(pio_priv->intf);
- u8 (*_sd_f0_read8)(struct intf_hdl *pintfhdl, u32 addr);
-
- _sd_f0_read8 = pintfhdl->io_ops._sd_f0_read8;
-
- if (_sd_f0_read8)
- r_val = _sd_f0_read8(pintfhdl, addr);
- else
- netdev_warn(adapter->pnetdev,
- FUNC_ADPT_FMT " _sd_f0_read8 callback is NULL\n",
- FUNC_ADPT_ARG(adapter));
-
- return r_val;
-}
-
-u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
+u32 rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
{
u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
struct io_priv *pio_priv = &adapter->iopriv;
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 4707dba90397..2dd75e007239 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _RTW_MLME_C_
-
#include <linux/etherdevice.h>
#include <drv_types.h>
#include <rtw_debug.h>
@@ -244,15 +242,11 @@ struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr)
/* spin_lock_bh(&scanned_queue->lock); */
phead = get_list_head(scanned_queue);
- plist = get_next(phead);
-
- while (plist != phead) {
- pnetwork = container_of(plist, struct wlan_network, list);
+ list_for_each(plist, phead) {
+ pnetwork = list_entry(plist, struct wlan_network, list);
if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN))
break;
-
- plist = get_next(plist);
}
if (plist == phead)
@@ -266,7 +260,7 @@ exit:
void rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
{
- struct list_head *phead, *plist;
+ struct list_head *phead, *plist, *tmp;
struct wlan_network *pnetwork;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *scanned_queue = &pmlmepriv->scanned_queue;
@@ -274,13 +268,9 @@ void rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
spin_lock_bh(&scanned_queue->lock);
phead = get_list_head(scanned_queue);
- plist = get_next(phead);
-
- while (phead != plist) {
-
- pnetwork = container_of(plist, struct wlan_network, list);
+ list_for_each_safe(plist, tmp, phead) {
- plist = get_next(plist);
+ pnetwork = list_entry(plist, struct wlan_network, list);
_rtw_free_network(pmlmepriv, pnetwork, isfreeall);
@@ -422,15 +412,11 @@ struct wlan_network *_rtw_find_same_network(struct __queue *scanned_queue, struc
struct wlan_network *found = NULL;
phead = get_list_head(scanned_queue);
- plist = get_next(phead);
-
- while (plist != phead) {
- found = container_of(plist, struct wlan_network, list);
+ list_for_each(plist, phead) {
+ found = list_entry(plist, struct wlan_network, list);
if (is_same_network(&network->network, &found->network, 0))
break;
-
- plist = get_next(plist);
}
if (plist == phead)
@@ -448,21 +434,14 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
phead = get_list_head(scanned_queue);
- plist = get_next(phead);
-
- while (1) {
-
- if (phead == plist)
- break;
+ list_for_each(plist, phead) {
- pwlan = container_of(plist, struct wlan_network, list);
+ pwlan = list_entry(plist, struct wlan_network, list);
if (!pwlan->fixed) {
if (oldest == NULL || time_after(oldest->last_scanned, pwlan->last_scanned))
oldest = pwlan;
}
-
- plist = get_next(plist);
}
return oldest;
@@ -549,13 +528,8 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
- plist = get_next(phead);
-
- while (1) {
- if (phead == plist)
- break;
-
- pnetwork = container_of(plist, struct wlan_network, list);
+ list_for_each(plist, phead) {
+ pnetwork = list_entry(plist, struct wlan_network, list);
rtw_bug_check(pnetwork, pnetwork, pnetwork, pnetwork);
@@ -571,8 +545,6 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
if (oldest == NULL || time_after(oldest->last_scanned, pnetwork->last_scanned))
oldest = pnetwork;
- plist = get_next(plist);
-
}
/* If we didn't find a match, then get a new network slot to initialize
@@ -1189,7 +1161,7 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
/* define REJOIN */
void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
{
- static u8 retry;
+ static u8 __maybe_unused retry;
struct sta_info *ptarget_sta = NULL, *pcur_sta = NULL;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
@@ -1788,17 +1760,10 @@ int rtw_select_roaming_candidate(struct mlme_priv *mlme)
spin_lock_bh(&(mlme->scanned_queue.lock));
phead = get_list_head(queue);
- mlme->pscanned = get_next(phead);
-
- while (phead != mlme->pscanned) {
+ list_for_each(mlme->pscanned, phead) {
- pnetwork = container_of(mlme->pscanned, struct wlan_network, list);
- if (!pnetwork) {
- ret = _FAIL;
- goto exit;
- }
-
- mlme->pscanned = get_next(mlme->pscanned);
+ pnetwork = list_entry(mlme->pscanned, struct wlan_network,
+ list);
rtw_check_roaming_candidate(mlme, &candidate, pnetwork);
@@ -1892,17 +1857,10 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
}
phead = get_list_head(queue);
- pmlmepriv->pscanned = get_next(phead);
-
- while (phead != pmlmepriv->pscanned) {
-
- pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
- if (!pnetwork) {
- ret = _FAIL;
- goto exit;
- }
+ list_for_each(pmlmepriv->pscanned, phead) {
- pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+ pnetwork = list_entry(pmlmepriv->pscanned,
+ struct wlan_network, list);
rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
@@ -2226,16 +2184,6 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter)
case WIRELESS_11BG_24N:
pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24);
break;
- case WIRELESS_11A:
- case WIRELESS_11A_5N:
- pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5);
- break;
- case WIRELESS_11ABGN:
- if (pregistrypriv->channel > 14)
- pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5);
- else
- pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24);
- break;
default:
/* TODO */
break;
@@ -2353,12 +2301,11 @@ void rtw_build_wmm_ie_ht(struct adapter *padapter, u8 *out_ie, uint *pout_len)
{
unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00};
int out_len;
- u8 *pframe;
if (padapter->mlmepriv.qospriv.qos_option == 0) {
out_len = *pout_len;
- pframe = rtw_set_ie(out_ie+out_len, WLAN_EID_VENDOR_SPECIFIC,
- _WMM_IE_Length_, WMM_IE, pout_len);
+ rtw_set_ie(out_ie+out_len, WLAN_EID_VENDOR_SPECIFIC,
+ _WMM_IE_Length_, WMM_IE, pout_len);
padapter->mlmepriv.qospriv.qos_option = 1;
}
@@ -2369,7 +2316,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
{
u32 ielen, out_len;
enum ieee80211_max_ampdu_length_exp max_rx_ampdu_factor;
- unsigned char *p, *pframe;
+ unsigned char *p;
struct ieee80211_ht_cap ht_capie;
u8 cbw40_enable = 0, stbc_rx_enable = 0, rf_type = 0, operation_bw = 0;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
@@ -2492,8 +2439,8 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
else
ht_capie.ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&0x00);
- pframe = rtw_set_ie(out_ie+out_len, WLAN_EID_HT_CAPABILITY,
- sizeof(struct ieee80211_ht_cap), (unsigned char *)&ht_capie, pout_len);
+ rtw_set_ie(out_ie+out_len, WLAN_EID_HT_CAPABILITY,
+ sizeof(struct ieee80211_ht_cap), (unsigned char *)&ht_capie, pout_len);
phtpriv->ht_option = true;
@@ -2501,7 +2448,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
p = rtw_get_ie(in_ie, WLAN_EID_HT_OPERATION, &ielen, in_len);
if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
out_len = *pout_len;
- pframe = rtw_set_ie(out_ie+out_len, WLAN_EID_HT_OPERATION, ielen, p+2, pout_len);
+ rtw_set_ie(out_ie+out_len, WLAN_EID_HT_OPERATION, ielen, p+2, pout_len);
}
}
@@ -2516,7 +2463,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
int len;
/* struct sta_info *bmc_sta, *psta; */
struct ieee80211_ht_cap *pht_capie;
- struct ieee80211_ht_addt_info *pht_addtinfo;
/* struct recv_reorder_ctrl *preorder_ctrl; */
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
@@ -2553,7 +2499,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
len = 0;
p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fix_ie), WLAN_EID_HT_OPERATION, &len, ie_len-sizeof(struct ndis_802_11_fix_ie));
if (p && len > 0) {
- pht_addtinfo = (struct ieee80211_ht_addt_info *)(p+2);
/* todo: */
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 9031cf7657ae..c128d462c6c7 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _RTW_MLME_EXT_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
#include <rtw_wifi_regd.h>
@@ -48,7 +46,6 @@ static struct action_handler OnAction_tbl[] = {
{RTW_WLAN_CATEGORY_UNPROTECTED_WNM, "ACTION_UNPROTECTED_WNM", &DoReserved},
{RTW_WLAN_CATEGORY_SELF_PROTECTED, "ACTION_SELF_PROTECTED", &DoReserved},
{RTW_WLAN_CATEGORY_WMM, "ACTION_WMM", &DoReserved},
- {RTW_WLAN_CATEGORY_VHT, "ACTION_VHT", &DoReserved},
{RTW_WLAN_CATEGORY_P2P, "ACTION_P2P", &DoReserved},
};
@@ -81,135 +78,95 @@ static struct rt_channel_plan_2g RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
{{}, 0}, /* 0x06, RT_CHANNEL_DOMAIN_2G_NULL */
};
-static struct rt_channel_plan_5g RTW_ChannelPlan5G[RT_CHANNEL_DOMAIN_5G_MAX] = {
- {{}, 0}, /* 0x00, RT_CHANNEL_DOMAIN_5G_NULL */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}, 19}, /* 0x01, RT_CHANNEL_DOMAIN_5G_ETSI1 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165}, 24}, /* 0x02, RT_CHANNEL_DOMAIN_5G_ETSI2 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 149, 153, 157, 161, 165}, 22}, /* 0x03, RT_CHANNEL_DOMAIN_5G_ETSI3 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165}, 24}, /* 0x04, RT_CHANNEL_DOMAIN_5G_FCC1 */
- {{36, 40, 44, 48, 149, 153, 157, 161, 165}, 9}, /* 0x05, RT_CHANNEL_DOMAIN_5G_FCC2 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 149, 153, 157, 161, 165}, 13}, /* 0x06, RT_CHANNEL_DOMAIN_5G_FCC3 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 149, 153, 157, 161}, 12}, /* 0x07, RT_CHANNEL_DOMAIN_5G_FCC4 */
- {{149, 153, 157, 161, 165}, 5}, /* 0x08, RT_CHANNEL_DOMAIN_5G_FCC5 */
- {{36, 40, 44, 48, 52, 56, 60, 64}, 8}, /* 0x09, RT_CHANNEL_DOMAIN_5G_FCC6 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 136, 140, 149, 153, 157, 161, 165}, 20}, /* 0x0A, RT_CHANNEL_DOMAIN_5G_FCC7_IC1 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 149, 153, 157, 161, 165}, 20}, /* 0x0B, RT_CHANNEL_DOMAIN_5G_KCC1 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}, 19}, /* 0x0C, RT_CHANNEL_DOMAIN_5G_MKK1 */
- {{36, 40, 44, 48, 52, 56, 60, 64}, 8}, /* 0x0D, RT_CHANNEL_DOMAIN_5G_MKK2 */
- {{100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}, 11}, /* 0x0E, RT_CHANNEL_DOMAIN_5G_MKK3 */
- {{56, 60, 64, 100, 104, 108, 112, 116, 136, 140, 149, 153, 157, 161, 165}, 15}, /* 0x0F, RT_CHANNEL_DOMAIN_5G_NCC1 */
- {{56, 60, 64, 149, 153, 157, 161, 165}, 8}, /* 0x10, RT_CHANNEL_DOMAIN_5G_NCC2 */
- {{149, 153, 157, 161, 165}, 5}, /* 0x11, RT_CHANNEL_DOMAIN_5G_NCC3 */
- {{36, 40, 44, 48}, 4}, /* 0x12, RT_CHANNEL_DOMAIN_5G_ETSI4 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 136, 140, 149, 153, 157, 161, 165}, 20}, /* 0x13, RT_CHANNEL_DOMAIN_5G_ETSI5 */
- {{149, 153, 157, 161}, 4}, /* 0x14, RT_CHANNEL_DOMAIN_5G_FCC8 */
- {{36, 40, 44, 48, 52, 56, 60, 64}, 8}, /* 0x15, RT_CHANNEL_DOMAIN_5G_ETSI6 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 149, 153, 157, 161, 165}, 13}, /* 0x16, RT_CHANNEL_DOMAIN_5G_ETSI7 */
- {{36, 40, 44, 48, 149, 153, 157, 161, 165}, 9}, /* 0x17, RT_CHANNEL_DOMAIN_5G_ETSI8 */
- {{100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}, 11}, /* 0x18, RT_CHANNEL_DOMAIN_5G_ETSI9 */
- {{149, 153, 157, 161, 165}, 5}, /* 0x19, RT_CHANNEL_DOMAIN_5G_ETSI10 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 132, 136, 140, 149, 153, 157, 161, 165}, 16}, /* 0x1A, RT_CHANNEL_DOMAIN_5G_ETSI11 */
- {{52, 56, 60, 64, 100, 104, 108, 112, 116, 132, 136, 140, 149, 153, 157, 161, 165}, 17}, /* 0x1B, RT_CHANNEL_DOMAIN_5G_NCC4 */
- {{149, 153, 157, 161}, 4}, /* 0x1C, RT_CHANNEL_DOMAIN_5G_ETSI12 */
- {{36, 40, 44, 48, 100, 104, 108, 112, 116, 132, 136, 140, 149, 153, 157, 161, 165}, 17}, /* 0x1D, RT_CHANNEL_DOMAIN_5G_FCC9 */
- {{36, 40, 44, 48, 100, 104, 108, 112, 116, 132, 136, 140}, 12}, /* 0x1E, RT_CHANNEL_DOMAIN_5G_ETSI13 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 132, 136, 140, 149, 153, 157, 161}, 20}, /* 0x1F, RT_CHANNEL_DOMAIN_5G_FCC10 */
-
- /* Driver self defined for old channel plan Compatible , Remember to modify if have new channel plan definition ===== */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 132, 136, 140, 149, 153, 157, 161, 165}, 21}, /* 0x20, RT_CHANNEL_DOMAIN_5G_FCC */
- {{36, 40, 44, 48}, 4}, /* 0x21, RT_CHANNEL_DOMAIN_5G_JAPAN_NO_DFS */
- {{36, 40, 44, 48, 149, 153, 157, 161}, 8}, /* 0x22, RT_CHANNEL_DOMAIN_5G_FCC4_NO_DFS */
-};
-
static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
/* 0x00 ~ 0x1F , Old Define ===== */
- {0x02, 0x20}, /* 0x00, RT_CHANNEL_DOMAIN_FCC */
- {0x02, 0x0A}, /* 0x01, RT_CHANNEL_DOMAIN_IC */
- {0x01, 0x01}, /* 0x02, RT_CHANNEL_DOMAIN_ETSI */
- {0x01, 0x00}, /* 0x03, RT_CHANNEL_DOMAIN_SPAIN */
- {0x01, 0x00}, /* 0x04, RT_CHANNEL_DOMAIN_FRANCE */
- {0x03, 0x00}, /* 0x05, RT_CHANNEL_DOMAIN_MKK */
- {0x03, 0x00}, /* 0x06, RT_CHANNEL_DOMAIN_MKK1 */
- {0x01, 0x09}, /* 0x07, RT_CHANNEL_DOMAIN_ISRAEL */
- {0x03, 0x09}, /* 0x08, RT_CHANNEL_DOMAIN_TELEC */
- {0x03, 0x00}, /* 0x09, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN */
- {0x00, 0x00}, /* 0x0A, RT_CHANNEL_DOMAIN_WORLD_WIDE_13 */
- {0x02, 0x0F}, /* 0x0B, RT_CHANNEL_DOMAIN_TAIWAN */
- {0x01, 0x08}, /* 0x0C, RT_CHANNEL_DOMAIN_CHINA */
- {0x02, 0x06}, /* 0x0D, RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO */
- {0x02, 0x0B}, /* 0x0E, RT_CHANNEL_DOMAIN_KOREA */
- {0x02, 0x09}, /* 0x0F, RT_CHANNEL_DOMAIN_TURKEY */
- {0x01, 0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
- {0x02, 0x05}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
- {0x01, 0x21}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
- {0x00, 0x04}, /* 0x13, RT_CHANNEL_DOMAIN_WORLD_WIDE_5G */
- {0x02, 0x10}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
- {0x00, 0x21}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
- {0x00, 0x22}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
- {0x03, 0x21}, /* 0x17, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
- {0x06, 0x08}, /* 0x18, RT_CHANNEL_DOMAIN_PAKISTAN_NO_DFS */
- {0x02, 0x08}, /* 0x19, RT_CHANNEL_DOMAIN_TAIWAN2_NO_DFS */
- {0x00, 0x00}, /* 0x1A, */
- {0x00, 0x00}, /* 0x1B, */
- {0x00, 0x00}, /* 0x1C, */
- {0x00, 0x00}, /* 0x1D, */
- {0x00, 0x00}, /* 0x1E, */
- {0x06, 0x04}, /* 0x1F, RT_CHANNEL_DOMAIN_WORLD_WIDE_ONLY_5G */
+ {0x02}, /* 0x00, RT_CHANNEL_DOMAIN_FCC */
+ {0x02}, /* 0x01, RT_CHANNEL_DOMAIN_IC */
+ {0x01}, /* 0x02, RT_CHANNEL_DOMAIN_ETSI */
+ {0x01}, /* 0x03, RT_CHANNEL_DOMAIN_SPAIN */
+ {0x01}, /* 0x04, RT_CHANNEL_DOMAIN_FRANCE */
+ {0x03}, /* 0x05, RT_CHANNEL_DOMAIN_MKK */
+ {0x03}, /* 0x06, RT_CHANNEL_DOMAIN_MKK1 */
+ {0x01}, /* 0x07, RT_CHANNEL_DOMAIN_ISRAEL */
+ {0x03}, /* 0x08, RT_CHANNEL_DOMAIN_TELEC */
+ {0x03}, /* 0x09, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN */
+ {0x00}, /* 0x0A, RT_CHANNEL_DOMAIN_WORLD_WIDE_13 */
+ {0x02}, /* 0x0B, RT_CHANNEL_DOMAIN_TAIWAN */
+ {0x01}, /* 0x0C, RT_CHANNEL_DOMAIN_CHINA */
+ {0x02}, /* 0x0D, RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO */
+ {0x02}, /* 0x0E, RT_CHANNEL_DOMAIN_KOREA */
+ {0x02}, /* 0x0F, RT_CHANNEL_DOMAIN_TURKEY */
+ {0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
+ {0x02}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
+ {0x01}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+ {0x00}, /* 0x13, RT_CHANNEL_DOMAIN_WORLD_WIDE_5G */
+ {0x02}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
+ {0x00}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
+ {0x00}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
+ {0x03}, /* 0x17, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+ {0x06}, /* 0x18, RT_CHANNEL_DOMAIN_PAKISTAN_NO_DFS */
+ {0x02}, /* 0x19, RT_CHANNEL_DOMAIN_TAIWAN2_NO_DFS */
+ {0x00}, /* 0x1A, */
+ {0x00}, /* 0x1B, */
+ {0x00}, /* 0x1C, */
+ {0x00}, /* 0x1D, */
+ {0x00}, /* 0x1E, */
+ {0x06}, /* 0x1F, RT_CHANNEL_DOMAIN_WORLD_WIDE_ONLY_5G */
/* 0x20 ~ 0x7F , New Define ===== */
- {0x00, 0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
- {0x01, 0x00}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
- {0x02, 0x00}, /* 0x22, RT_CHANNEL_DOMAIN_FCC1_NULL */
- {0x03, 0x00}, /* 0x23, RT_CHANNEL_DOMAIN_MKK1_NULL */
- {0x04, 0x00}, /* 0x24, RT_CHANNEL_DOMAIN_ETSI2_NULL */
- {0x02, 0x04}, /* 0x25, RT_CHANNEL_DOMAIN_FCC1_FCC1 */
- {0x00, 0x01}, /* 0x26, RT_CHANNEL_DOMAIN_WORLD_ETSI1 */
- {0x03, 0x0C}, /* 0x27, RT_CHANNEL_DOMAIN_MKK1_MKK1 */
- {0x00, 0x0B}, /* 0x28, RT_CHANNEL_DOMAIN_WORLD_KCC1 */
- {0x00, 0x05}, /* 0x29, RT_CHANNEL_DOMAIN_WORLD_FCC2 */
- {0x00, 0x00}, /* 0x2A, */
- {0x00, 0x00}, /* 0x2B, */
- {0x00, 0x00}, /* 0x2C, */
- {0x00, 0x00}, /* 0x2D, */
- {0x00, 0x00}, /* 0x2E, */
- {0x00, 0x00}, /* 0x2F, */
- {0x00, 0x06}, /* 0x30, RT_CHANNEL_DOMAIN_WORLD_FCC3 */
- {0x00, 0x07}, /* 0x31, RT_CHANNEL_DOMAIN_WORLD_FCC4 */
- {0x00, 0x08}, /* 0x32, RT_CHANNEL_DOMAIN_WORLD_FCC5 */
- {0x00, 0x09}, /* 0x33, RT_CHANNEL_DOMAIN_WORLD_FCC6 */
- {0x02, 0x0A}, /* 0x34, RT_CHANNEL_DOMAIN_FCC1_FCC7 */
- {0x00, 0x02}, /* 0x35, RT_CHANNEL_DOMAIN_WORLD_ETSI2 */
- {0x00, 0x03}, /* 0x36, RT_CHANNEL_DOMAIN_WORLD_ETSI3 */
- {0x03, 0x0D}, /* 0x37, RT_CHANNEL_DOMAIN_MKK1_MKK2 */
- {0x03, 0x0E}, /* 0x38, RT_CHANNEL_DOMAIN_MKK1_MKK3 */
- {0x02, 0x0F}, /* 0x39, RT_CHANNEL_DOMAIN_FCC1_NCC1 */
- {0x00, 0x00}, /* 0x3A, */
- {0x00, 0x00}, /* 0x3B, */
- {0x00, 0x00}, /* 0x3C, */
- {0x00, 0x00}, /* 0x3D, */
- {0x00, 0x00}, /* 0x3E, */
- {0x00, 0x00}, /* 0x3F, */
- {0x02, 0x10}, /* 0x40, RT_CHANNEL_DOMAIN_FCC1_NCC2 */
- {0x05, 0x00}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_NULL */
- {0x01, 0x12}, /* 0x42, RT_CHANNEL_DOMAIN_ETSI1_ETSI4 */
- {0x02, 0x05}, /* 0x43, RT_CHANNEL_DOMAIN_FCC1_FCC2 */
- {0x02, 0x11}, /* 0x44, RT_CHANNEL_DOMAIN_FCC1_NCC3 */
- {0x00, 0x13}, /* 0x45, RT_CHANNEL_DOMAIN_WORLD_ETSI5 */
- {0x02, 0x14}, /* 0x46, RT_CHANNEL_DOMAIN_FCC1_FCC8 */
- {0x00, 0x15}, /* 0x47, RT_CHANNEL_DOMAIN_WORLD_ETSI6 */
- {0x00, 0x16}, /* 0x48, RT_CHANNEL_DOMAIN_WORLD_ETSI7 */
- {0x00, 0x17}, /* 0x49, RT_CHANNEL_DOMAIN_WORLD_ETSI8 */
- {0x00, 0x18}, /* 0x50, RT_CHANNEL_DOMAIN_WORLD_ETSI9 */
- {0x00, 0x19}, /* 0x51, RT_CHANNEL_DOMAIN_WORLD_ETSI10 */
- {0x00, 0x1A}, /* 0x52, RT_CHANNEL_DOMAIN_WORLD_ETSI11 */
- {0x02, 0x1B}, /* 0x53, RT_CHANNEL_DOMAIN_FCC1_NCC4 */
- {0x00, 0x1C}, /* 0x54, RT_CHANNEL_DOMAIN_WORLD_ETSI12 */
- {0x02, 0x1D}, /* 0x55, RT_CHANNEL_DOMAIN_FCC1_FCC9 */
- {0x00, 0x1E}, /* 0x56, RT_CHANNEL_DOMAIN_WORLD_ETSI13 */
- {0x02, 0x1F}, /* 0x57, RT_CHANNEL_DOMAIN_FCC1_FCC10 */
+ {0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
+ {0x01}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
+ {0x02}, /* 0x22, RT_CHANNEL_DOMAIN_FCC1_NULL */
+ {0x03}, /* 0x23, RT_CHANNEL_DOMAIN_MKK1_NULL */
+ {0x04}, /* 0x24, RT_CHANNEL_DOMAIN_ETSI2_NULL */
+ {0x02}, /* 0x25, RT_CHANNEL_DOMAIN_FCC1_FCC1 */
+ {0x00}, /* 0x26, RT_CHANNEL_DOMAIN_WORLD_ETSI1 */
+ {0x03}, /* 0x27, RT_CHANNEL_DOMAIN_MKK1_MKK1 */
+ {0x00}, /* 0x28, RT_CHANNEL_DOMAIN_WORLD_KCC1 */
+ {0x00}, /* 0x29, RT_CHANNEL_DOMAIN_WORLD_FCC2 */
+ {0x00}, /* 0x2A, */
+ {0x00}, /* 0x2B, */
+ {0x00}, /* 0x2C, */
+ {0x00}, /* 0x2D, */
+ {0x00}, /* 0x2E, */
+ {0x00}, /* 0x2F, */
+ {0x00}, /* 0x30, RT_CHANNEL_DOMAIN_WORLD_FCC3 */
+ {0x00}, /* 0x31, RT_CHANNEL_DOMAIN_WORLD_FCC4 */
+ {0x00}, /* 0x32, RT_CHANNEL_DOMAIN_WORLD_FCC5 */
+ {0x00}, /* 0x33, RT_CHANNEL_DOMAIN_WORLD_FCC6 */
+ {0x02}, /* 0x34, RT_CHANNEL_DOMAIN_FCC1_FCC7 */
+ {0x00}, /* 0x35, RT_CHANNEL_DOMAIN_WORLD_ETSI2 */
+ {0x00}, /* 0x36, RT_CHANNEL_DOMAIN_WORLD_ETSI3 */
+ {0x03}, /* 0x37, RT_CHANNEL_DOMAIN_MKK1_MKK2 */
+ {0x03}, /* 0x38, RT_CHANNEL_DOMAIN_MKK1_MKK3 */
+ {0x02}, /* 0x39, RT_CHANNEL_DOMAIN_FCC1_NCC1 */
+ {0x00}, /* 0x3A, */
+ {0x00}, /* 0x3B, */
+ {0x00}, /* 0x3C, */
+ {0x00}, /* 0x3D, */
+ {0x00}, /* 0x3E, */
+ {0x00}, /* 0x3F, */
+ {0x02}, /* 0x40, RT_CHANNEL_DOMAIN_FCC1_NCC2 */
+ {0x05}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_NULL */
+ {0x01}, /* 0x42, RT_CHANNEL_DOMAIN_ETSI1_ETSI4 */
+ {0x02}, /* 0x43, RT_CHANNEL_DOMAIN_FCC1_FCC2 */
+ {0x02}, /* 0x44, RT_CHANNEL_DOMAIN_FCC1_NCC3 */
+ {0x00}, /* 0x45, RT_CHANNEL_DOMAIN_WORLD_ETSI5 */
+ {0x02}, /* 0x46, RT_CHANNEL_DOMAIN_FCC1_FCC8 */
+ {0x00}, /* 0x47, RT_CHANNEL_DOMAIN_WORLD_ETSI6 */
+ {0x00}, /* 0x48, RT_CHANNEL_DOMAIN_WORLD_ETSI7 */
+ {0x00}, /* 0x49, RT_CHANNEL_DOMAIN_WORLD_ETSI8 */
+ {0x00}, /* 0x50, RT_CHANNEL_DOMAIN_WORLD_ETSI9 */
+ {0x00}, /* 0x51, RT_CHANNEL_DOMAIN_WORLD_ETSI10 */
+ {0x00}, /* 0x52, RT_CHANNEL_DOMAIN_WORLD_ETSI11 */
+ {0x02}, /* 0x53, RT_CHANNEL_DOMAIN_FCC1_NCC4 */
+ {0x00}, /* 0x54, RT_CHANNEL_DOMAIN_WORLD_ETSI12 */
+ {0x02}, /* 0x55, RT_CHANNEL_DOMAIN_FCC1_FCC9 */
+ {0x00}, /* 0x56, RT_CHANNEL_DOMAIN_WORLD_ETSI13 */
+ {0x02}, /* 0x57, RT_CHANNEL_DOMAIN_FCC1_FCC10 */
};
/* use the combination for max channel numbers */
-static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03, 0x02};
+static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03};
/* Search the @param ch in given @param ch_set
* @ch_set: the given channel set
@@ -231,23 +188,6 @@ int rtw_ch_set_search_ch(struct rt_channel_info *ch_set, const u32 ch)
return i;
}
-/* Check the @param ch is fit with setband setting of @param adapter
- * @adapter: the given adapter
- * @ch: the given channel number
- *
- * return true when check valid, false not valid
- */
-bool rtw_mlme_band_check(struct adapter *adapter, const u32 ch)
-{
- if (adapter->setband == GHZ24_50 /* 2.4G and 5G */
- || (adapter->setband == GHZ_24 && ch < 35) /* 2.4G only */
- || (adapter->setband == GHZ_50 && ch > 35) /* 5G only */
- ) {
- return true;
- }
- return false;
-}
-
/****************************************************************************
Following are the initialization functions for WiFi MLME
@@ -395,8 +335,8 @@ static void init_channel_list(struct adapter *padapter, struct rt_channel_info *
static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_channel_info *channel_set)
{
u8 index, chanset_size = 0;
- u8 b5GBand = false, b2_4GBand = false;
- u8 Index2G = 0, Index5G = 0;
+ u8 b2_4GBand = false;
+ u8 Index2G = 0;
memset(channel_set, 0, sizeof(struct rt_channel_info)*MAX_CHANNEL_NUM);
@@ -422,7 +362,6 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
else if ((channel_set[chanset_size].ChannelNum >= 12 && channel_set[chanset_size].ChannelNum <= 14))
channel_set[chanset_size].ScanType = SCAN_PASSIVE;
} else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == ChannelPlan ||
- RT_CHANNEL_DOMAIN_WORLD_WIDE_5G == ChannelPlan ||
RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) { /* channel 12~13, passive scan */
if (channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
@@ -435,20 +374,6 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
}
}
- if (b5GBand) {
- for (index = 0; index < RTW_ChannelPlan5G[Index5G].Len; index++) {
- if (RTW_ChannelPlan5G[Index5G].Channel[index] <= 48
- || RTW_ChannelPlan5G[Index5G].Channel[index] >= 149) {
- channel_set[chanset_size].ChannelNum = RTW_ChannelPlan5G[Index5G].Channel[index];
- if (RT_CHANNEL_DOMAIN_WORLD_WIDE_5G == ChannelPlan)/* passive scan for all 5G channels */
- channel_set[chanset_size].ScanType = SCAN_PASSIVE;
- else
- channel_set[chanset_size].ScanType = SCAN_ACTIVE;
- chanset_size++;
- }
- }
- }
-
return chanset_size;
}
@@ -1021,7 +946,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
u16 capab_info;
struct rtw_ieee802_11_elems elems;
struct sta_info *pstat;
- unsigned char reassoc, *p, *pos, *wpa_ie;
+ unsigned char *p, *pos, *wpa_ie;
unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
int i, ie_len, wpa_ie_len, left;
unsigned char supportRate[16];
@@ -1041,13 +966,10 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
return _FAIL;
frame_type = GetFrameSubType(pframe);
- if (frame_type == WIFI_ASSOCREQ) {
- reassoc = 0;
+ if (frame_type == WIFI_ASSOCREQ)
ie_offset = _ASOCREQ_IE_OFFSET_;
- } else { /* WIFI_REASSOCREQ */
- reassoc = 1;
+ else /* WIFI_REASSOCREQ */
ie_offset = _REASOCREQ_IE_OFFSET_;
- }
if (pkt_len < sizeof(struct ieee80211_hdr_3addr) + ie_offset)
@@ -1726,7 +1648,7 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
struct recv_reorder_ctrl *preorder_ctrl;
unsigned char *frame_body;
unsigned char category, action;
- unsigned short tid, status, reason_code = 0;
+ unsigned short tid, status;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 *pframe = precv_frame->u.hdr.rx_data;
@@ -1795,9 +1717,6 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
~BIT((frame_body[3] >> 4) & 0xf);
psta->htpriv.candidate_tid_bitmap &=
~BIT((frame_body[3] >> 4) & 0xf);
-
- /* reason_code = frame_body[4] | (frame_body[5] << 8); */
- reason_code = get_unaligned_le16(&frame_body[4]);
} else if ((frame_body[3] & BIT(3)) == BIT(3)) {
tid = (frame_body[3] >> 4) & 0x0F;
@@ -2142,7 +2061,7 @@ s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntfr
ret = rtw_hal_mgnt_xmit(padapter, pmgntframe);
if (ret == _SUCCESS)
- ret = rtw_sctx_wait(&sctx, __func__);
+ ret = rtw_sctx_wait(&sctx);
spin_lock_irqsave(&pxmitpriv->lock_sctx, irqL);
pxmitbuf->sctx = NULL;
@@ -2226,7 +2145,6 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
@@ -2248,7 +2166,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ eth_broadcast_addr(pwlanhdr->addr1);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
@@ -2457,9 +2375,13 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
u8 *ssid_ie;
signed int ssid_ielen;
signed int ssid_ielen_diff;
- u8 buf[MAX_IE_SZ];
+ u8 *buf;
u8 *ies = pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr);
+ buf = rtw_zmalloc(MAX_IE_SZ);
+ if (!buf)
+ return;
+
ssid_ie = rtw_get_ie(ies+_FIXED_IE_LENGTH_, WLAN_EID_SSID, &ssid_ielen,
(pframe-ies)-_FIXED_IE_LENGTH_);
@@ -2487,6 +2409,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
pframe += ssid_ielen_diff;
pattrib->pktlen += ssid_ielen_diff;
}
+ kfree (buf);
}
} else {
/* timestamp will be inserted by hardware */
@@ -2567,7 +2490,6 @@ static int _issue_probereq(struct adapter *padapter,
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
int bssrate_len = 0;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
@@ -2594,8 +2516,8 @@ static int _issue_probereq(struct adapter *padapter,
memcpy(pwlanhdr->addr3, da, ETH_ALEN);
} else {
/* broadcast probe request frame */
- memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
- memcpy(pwlanhdr->addr3, bc_addr, ETH_ALEN);
+ eth_broadcast_addr(pwlanhdr->addr1);
+ eth_broadcast_addr(pwlanhdr->addr3);
}
memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
@@ -4483,61 +4405,6 @@ static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid
}
}
- if (pregistrypriv->wireless_mode & WIRELESS_11A) {
- do {
- if ((i == MAX_CHANNEL_NUM) ||
- (chplan_sta[i].ChannelNum == 0))
- break;
-
- if ((j == chplan_ap.Len) || (chplan_ap.Channel[j] == 0))
- break;
-
- if (chplan_sta[i].ChannelNum == chplan_ap.Channel[j]) {
- chplan_new[k].ChannelNum = chplan_ap.Channel[j];
- chplan_new[k].ScanType = SCAN_ACTIVE;
- i++;
- j++;
- k++;
- } else if (chplan_sta[i].ChannelNum < chplan_ap.Channel[j]) {
- chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
-/* chplan_new[k].ScanType = chplan_sta[i].ScanType; */
- chplan_new[k].ScanType = SCAN_PASSIVE;
- i++;
- k++;
- } else if (chplan_sta[i].ChannelNum > chplan_ap.Channel[j]) {
- chplan_new[k].ChannelNum = chplan_ap.Channel[j];
- chplan_new[k].ScanType = SCAN_ACTIVE;
- j++;
- k++;
- }
- } while (1);
-
- /* change AP not support channel to Passive scan */
- while ((i < MAX_CHANNEL_NUM) && (chplan_sta[i].ChannelNum != 0)) {
- chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
-/* chplan_new[k].ScanType = chplan_sta[i].ScanType; */
- chplan_new[k].ScanType = SCAN_PASSIVE;
- i++;
- k++;
- }
-
- /* add channel AP supported */
- while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] != 0)) {
- chplan_new[k].ChannelNum = chplan_ap.Channel[j];
- chplan_new[k].ScanType = SCAN_ACTIVE;
- j++;
- k++;
- }
- } else {
- /* keep original STA 5G channel plan */
- while ((i < MAX_CHANNEL_NUM) && (chplan_sta[i].ChannelNum != 0)) {
- chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
- chplan_new[k].ScanType = chplan_sta[i].ScanType;
- i++;
- k++;
- }
- }
-
pmlmeext->update_channel_plan_by_ap_done = 1;
}
@@ -4548,10 +4415,6 @@ static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid
while ((i < MAX_CHANNEL_NUM) && (chplan_new[i].ChannelNum != 0)) {
if (chplan_new[i].ChannelNum == channel) {
if (chplan_new[i].ScanType == SCAN_PASSIVE) {
- /* 5G Bnad 2, 3 (DFS) doesn't change to active scan */
- if (channel >= 52 && channel <= 144)
- break;
-
chplan_new[i].ScanType = SCAN_ACTIVE;
}
break;
@@ -5125,24 +4988,9 @@ void _linked_info_dump(struct adapter *padapter)
if (padapter->bLinkInfoDump) {
- if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) {
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
rtw_hal_get_def_var(padapter, HAL_DEF_UNDERCORATEDSMOOTHEDPWDB, &UndecoratedSmoothedPWDB);
- } else if ((pmlmeinfo->state&0x03) == _HW_STATE_AP_) {
- struct list_head *phead, *plist;
-
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- plist = get_next(phead);
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- }
for (i = 0; i < NUM_STA; i++) {
if (pdvobj->macid[i]) {
if (i != 1) /* skip bc/mc sta */
@@ -5151,11 +4999,7 @@ void _linked_info_dump(struct adapter *padapter)
}
}
rtw_hal_set_def_var(padapter, HAL_DEF_DBG_RX_INFO_DUMP, NULL);
-
-
}
-
-
}
static u8 chk_ap_is_alive(struct adapter *padapter, struct sta_info *psta)
@@ -5452,9 +5296,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
/* u32 initialgain; */
if (pmlmeinfo->state == WIFI_FW_AP_STATE) {
- struct wlan_bssid_ex *network = &padapter->mlmepriv.cur_network.network;
-
- start_bss_network(padapter, (u8 *)network);
+ start_bss_network(padapter);
return H2C_SUCCESS;
}
@@ -5692,7 +5534,6 @@ static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_c
set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, in[i].hw_value);
if (in[i].hw_value && !(in[i].flags & RTW_IEEE80211_CHAN_DISABLED)
&& set_idx >= 0
- && rtw_mlme_band_check(padapter, in[i].hw_value)
) {
if (j >= out_num) {
netdev_dbg(padapter->pnetdev,
@@ -5716,23 +5557,20 @@ static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_c
if (j == 0) {
for (i = 0; i < pmlmeext->max_chan_nums; i++) {
- if (rtw_mlme_band_check(padapter, pmlmeext->channel_set[i].ChannelNum)) {
-
- if (j >= out_num) {
- netdev_dbg(padapter->pnetdev,
- FUNC_ADPT_FMT " out_num:%u not enough\n",
- FUNC_ADPT_ARG(padapter),
- out_num);
- break;
- }
+ if (j >= out_num) {
+ netdev_dbg(padapter->pnetdev,
+ FUNC_ADPT_FMT " out_num:%u not enough\n",
+ FUNC_ADPT_ARG(padapter),
+ out_num);
+ break;
+ }
- out[j].hw_value = pmlmeext->channel_set[i].ChannelNum;
+ out[j].hw_value = pmlmeext->channel_set[i].ChannelNum;
- if (pmlmeext->channel_set[i].ScanType == SCAN_PASSIVE)
- out[j].flags |= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
+ if (pmlmeext->channel_set[i].ScanType == SCAN_PASSIVE)
+ out[j].flags |= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
- j++;
- }
+ j++;
}
}
@@ -5997,10 +5835,40 @@ exit:
return res;
}
+static struct fwevent wlanevents[] = {
+ {0, rtw_dummy_event_callback}, /*0*/
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, &rtw_survey_event_callback}, /*8*/
+ {sizeof(struct surveydone_event), &rtw_surveydone_event_callback}, /*9*/
+
+ {0, &rtw_joinbss_event_callback}, /*10*/
+ {sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
+ {sizeof(struct stadel_event), &rtw_stadel_event_callback},
+ {0, &rtw_atimdone_event_callback},
+ {0, rtw_dummy_event_callback},
+ {0, NULL}, /*15*/
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, rtw_fwdbg_event_callback},
+ {0, NULL}, /*20*/
+ {0, NULL},
+ {0, NULL},
+ {0, &rtw_cpwm_event_callback},
+ {0, NULL},
+ {0, &rtw_wmm_event_callback},
+
+};
u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
{
- u8 evt_code, evt_seq;
+ u8 evt_code;
u16 evt_sz;
uint *peventbuf;
void (*event_callback)(struct adapter *dev, u8 *pbuf);
@@ -6011,19 +5879,8 @@ u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
peventbuf = (uint *)pbuf;
evt_sz = (u16)(*peventbuf&0xffff);
- evt_seq = (u8)((*peventbuf>>24)&0x7f);
evt_code = (u8)((*peventbuf>>16)&0xff);
-
- #ifdef CHECK_EVENT_SEQ
- /* checking event sequence... */
- if (evt_seq != (atomic_read(&pevt_priv->event_seq) & 0x7f)) {
- pevt_priv->event_seq = (evt_seq+1)&0x7f;
-
- goto _abort_event_;
- }
- #endif
-
/* checking if event code is valid */
if (evt_code >= MAX_C2HEVT)
goto _abort_event_;
@@ -6063,7 +5920,7 @@ u8 h2c_msg_hdl(struct adapter *padapter, unsigned char *pbuf)
u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
{
struct sta_info *psta_bmc;
- struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -6080,12 +5937,9 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = get_next(xmitframe_plist);
+ list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
+ pxmitframe = list_entry(xmitframe_plist,
+ struct xmit_frame, list);
list_del_init(&pxmitframe->list);
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index 251b9abdf591..a392d5b4caf2 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _RTW_PWRCTRL_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
#include <hal_data.h>
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 668a703dee7f..d4c1725718d9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _RTW_RECV_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
#include <linux/jiffies.h>
@@ -1646,16 +1644,10 @@ static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_n
/* Rx Reorder initialize condition. */
if (preorder_ctrl->indicate_seq == 0xFFFF) {
preorder_ctrl->indicate_seq = seq_num;
-
- /* DbgPrint("check_indicate_seq, 1st->indicate_seq =%d\n", precvpriv->indicate_seq); */
}
- /* DbgPrint("enter->check_indicate_seq(): IndicateSeq: %d, NewSeq: %d\n", precvpriv->indicate_seq, seq_num); */
-
/* Drop out the packet which SeqNum is smaller than WinStart */
if (SN_LESS(seq_num, preorder_ctrl->indicate_seq)) {
- /* DbgPrint("CheckRxTsIndicateSeq(): Packet Drop! IndicateSeq: %d, NewSeq: %d\n", precvpriv->indicate_seq, seq_num); */
-
return false;
}
@@ -1668,8 +1660,6 @@ static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_n
preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
} else if (SN_LESS(wend, seq_num)) {
- /* DbgPrint("CheckRxTsIndicateSeq(): Window Shift! IndicateSeq: %d, NewSeq: %d\n", precvpriv->indicate_seq, seq_num); */
-
/* boundary situation, when seq_num cross 0xFFF */
if (seq_num >= (wsize - 1))
preorder_ctrl->indicate_seq = seq_num + 1 - wsize;
@@ -1678,8 +1668,6 @@ static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_n
pdbgpriv->dbg_rx_ampdu_window_shift_cnt++;
}
- /* DbgPrint("exit->check_indicate_seq(): IndicateSeq: %d, NewSeq: %d\n", precvpriv->indicate_seq, seq_num); */
-
return true;
}
@@ -1691,8 +1679,6 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, un
union recv_frame *pnextrframe;
struct rx_pkt_attrib *pnextattrib;
- /* DbgPrint("+enqueue_reorder_recvframe()\n"); */
-
/* spin_lock_irqsave(&ppending_recvframe_queue->lock, irql); */
/* spin_lock(&ppending_recvframe_queue->lock); */
@@ -1713,8 +1699,6 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, un
else
break;
- /* DbgPrint("enqueue_reorder_recvframe():while\n"); */
-
}
@@ -1753,8 +1737,6 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
struct dvobj_priv *psdpriv = padapter->dvobj;
struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
- /* DbgPrint("+recv_indicatepkts_in_order\n"); */
-
/* spin_lock_irqsave(&ppending_recvframe_queue->lock, irql); */
/* spin_lock(&ppending_recvframe_queue->lock); */
@@ -1796,11 +1778,8 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
/* pTS->RxIndicateState = RXTS_INDICATE_PROCESSING; */
/* Indicate packets */
- /* RT_ASSERT((index<=REORDER_WIN_SIZE), ("RxReorderIndicatePacket(): Rx Reorder buffer full!!\n")); */
-
/* indicate this recv_frame */
- /* DbgPrint("recv_indicatepkts_in_order, indicate_seq =%d, seq_num =%d\n", precvpriv->indicate_seq, pattrib->seq_num); */
if (!pattrib->amsdu) {
if ((padapter->bDriverStopped == false) &&
(padapter->bSurpriseRemoved == false))
@@ -1823,8 +1802,6 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
break;
}
- /* DbgPrint("recv_indicatepkts_in_order():while\n"); */
-
}
/* spin_unlock(&ppending_recvframe_queue->lock); */
@@ -1894,7 +1871,6 @@ static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *
/* s3. Insert all packet into Reorder Queue to maintain its ordering. */
if (!enqueue_reorder_recvframe(preorder_ctrl, prframe)) {
- /* DbgPrint("recv_indicatepkt_reorder, enqueue_reorder_recvframe fail!\n"); */
/* spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); */
/* return _FAIL; */
goto _err_exit;
@@ -2123,7 +2099,7 @@ static void rtw_signal_stat_timer_hdl(struct timer_list *t)
u8 avg_signal_strength = 0;
u8 avg_signal_qual = 0;
u32 num_signal_strength = 0;
- u32 num_signal_qual = 0;
+ u32 __maybe_unused num_signal_qual = 0;
u8 _alpha = 5; /* this value is based on converging_constant = 5000 and sampling_interval = 1000 */
if (adapter->recvpriv.is_signal_dbg) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 7823055ed32d..a99f439328f1 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -4,11 +4,10 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _RTW_SECURITY_C_
-
-#include <linux/crc32poly.h>
+#include <linux/crc32.h>
#include <drv_types.h>
#include <rtw_debug.h>
+#include <crypto/aes.h>
static const char * const _security_type_str[] = {
"N/A",
@@ -31,118 +30,6 @@ const char *security_type_str(u8 value)
/* WEP related ===== */
-struct arc4context {
- u32 x;
- u32 y;
- u8 state[256];
-};
-
-
-static void arcfour_init(struct arc4context *parc4ctx, u8 *key, u32 key_len)
-{
- u32 t, u;
- u32 keyindex;
- u32 stateindex;
- u8 *state;
- u32 counter;
-
- state = parc4ctx->state;
- parc4ctx->x = 0;
- parc4ctx->y = 0;
- for (counter = 0; counter < 256; counter++)
- state[counter] = (u8)counter;
- keyindex = 0;
- stateindex = 0;
- for (counter = 0; counter < 256; counter++) {
- t = state[counter];
- stateindex = (stateindex + key[keyindex] + t) & 0xff;
- u = state[stateindex];
- state[stateindex] = (u8)t;
- state[counter] = (u8)u;
- if (++keyindex >= key_len)
- keyindex = 0;
- }
-}
-
-static u32 arcfour_byte(struct arc4context *parc4ctx)
-{
- u32 x;
- u32 y;
- u32 sx, sy;
- u8 *state;
-
- state = parc4ctx->state;
- x = (parc4ctx->x + 1) & 0xff;
- sx = state[x];
- y = (sx + parc4ctx->y) & 0xff;
- sy = state[y];
- parc4ctx->x = x;
- parc4ctx->y = y;
- state[y] = (u8)sx;
- state[x] = (u8)sy;
- return state[(sx + sy) & 0xff];
-}
-
-static void arcfour_encrypt(struct arc4context *parc4ctx, u8 *dest, u8 *src, u32 len)
-{
- u32 i;
-
- for (i = 0; i < len; i++)
- dest[i] = src[i] ^ (unsigned char)arcfour_byte(parc4ctx);
-}
-
-static signed int bcrc32initialized;
-static u32 crc32_table[256];
-
-
-static u8 crc32_reverseBit(u8 data)
-{
- return((u8)((data<<7)&0x80) | ((data<<5)&0x40) | ((data<<3)&0x20) | ((data<<1)&0x10) | ((data>>1)&0x08) | ((data>>3)&0x04) | ((data>>5)&0x02) | ((data>>7)&0x01));
-}
-
-static void crc32_init(void)
-{
- if (bcrc32initialized == 1)
- return;
- else {
- signed int i, j;
- u32 c;
- u8 *p = (u8 *)&c, *p1;
- u8 k;
-
- c = 0x12340000;
-
- for (i = 0; i < 256; ++i) {
- k = crc32_reverseBit((u8)i);
- for (c = ((u32)k) << 24, j = 8; j > 0; --j)
- c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY_BE : (c << 1);
- p1 = (u8 *)&crc32_table[i];
-
- p1[0] = crc32_reverseBit(p[3]);
- p1[1] = crc32_reverseBit(p[2]);
- p1[2] = crc32_reverseBit(p[1]);
- p1[3] = crc32_reverseBit(p[0]);
- }
- bcrc32initialized = 1;
- }
-}
-
-static __le32 getcrc32(u8 *buf, signed int len)
-{
- u8 *p;
- u32 crc;
-
- if (bcrc32initialized == 0)
- crc32_init();
-
- crc = 0xffffffff; /* preload shift register, per CRC-32 spec */
-
- for (p = buf; len > 0; ++p, --len)
- crc = crc32_table[(crc ^ *p) & 0xff] ^ (crc >> 8);
- return cpu_to_le32(~crc); /* transmit complement, per CRC-32 spec */
-}
-
-
/*
Need to consider the fragment situation
*/
@@ -150,7 +37,6 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
{ /* exclude ICV */
unsigned char crc[4];
- struct arc4context mycontext;
signed int curfragnum, length;
u32 keylength;
@@ -161,6 +47,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct arc4_ctx *ctx = &psecuritypriv->xmit_arc4_ctx;
if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
return;
@@ -182,18 +69,18 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
- *((__le32 *)crc) = getcrc32(payload, length);
+ *((__le32 *)crc) = ~crc32_le(~0, payload, length);
- arcfour_init(&mycontext, wepkey, 3+keylength);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload+length, crc, 4);
+ arc4_setkey(ctx, wepkey, 3 + keylength);
+ arc4_crypt(ctx, payload, payload, length);
+ arc4_crypt(ctx, payload + length, crc, 4);
} else {
length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
- *((__le32 *)crc) = getcrc32(payload, length);
- arcfour_init(&mycontext, wepkey, 3+keylength);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload+length, crc, 4);
+ *((__le32 *)crc) = ~crc32_le(~0, payload, length);
+ arc4_setkey(ctx, wepkey, 3 + keylength);
+ arc4_crypt(ctx, payload, payload, length);
+ arc4_crypt(ctx, payload + length, crc, 4);
pframe += pxmitpriv->frag_len;
pframe = (u8 *)round_up((SIZE_PTR)(pframe), 4);
@@ -206,13 +93,13 @@ void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
{
/* exclude ICV */
u8 crc[4];
- struct arc4context mycontext;
signed int length;
u32 keylength;
u8 *pframe, *payload, *iv, wepkey[16];
u8 keyindex;
struct rx_pkt_attrib *prxattrib = &(((union recv_frame *)precvframe)->u.hdr.attrib);
struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct arc4_ctx *ctx = &psecuritypriv->recv_arc4_ctx;
pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
@@ -230,11 +117,11 @@ void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
/* decrypt payload include icv */
- arcfour_init(&mycontext, wepkey, 3+keylength);
- arcfour_encrypt(&mycontext, payload, payload, length);
+ arc4_setkey(ctx, wepkey, 3 + keylength);
+ arc4_crypt(ctx, payload, payload, length);
/* calculate icv and compare the icv */
- *((u32 *)crc) = le32_to_cpu(getcrc32(payload, length-4));
+ *((u32 *)crc) = le32_to_cpu(~crc32_le(~0, payload, length - 4));
}
}
@@ -579,7 +466,6 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
u8 ttkey[16];
u8 crc[4];
u8 hw_hdr_offset = 0;
- struct arc4context mycontext;
signed int curfragnum, length;
u8 *pframe, *payload, *iv, *prwskey;
@@ -587,6 +473,7 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct arc4_ctx *ctx = &psecuritypriv->xmit_arc4_ctx;
u32 res = _SUCCESS;
if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
@@ -619,18 +506,19 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
- *((__le32 *)crc) = getcrc32(payload, length);/* modified by Amy*/
+ *((__le32 *)crc) = ~crc32_le(~0, payload, length);
- arcfour_init(&mycontext, rc4key, 16);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload+length, crc, 4);
+ arc4_setkey(ctx, rc4key, 16);
+ arc4_crypt(ctx, payload, payload, length);
+ arc4_crypt(ctx, payload + length, crc, 4);
} else {
length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
- *((__le32 *)crc) = getcrc32(payload, length);/* modified by Amy*/
- arcfour_init(&mycontext, rc4key, 16);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload+length, crc, 4);
+ *((__le32 *)crc) = ~crc32_le(~0, payload, length);
+
+ arc4_setkey(ctx, rc4key, 16);
+ arc4_crypt(ctx, payload, payload, length);
+ arc4_crypt(ctx, payload + length, crc, 4);
pframe += pxmitpriv->frag_len;
pframe = (u8 *)round_up((SIZE_PTR)(pframe), 4);
@@ -650,7 +538,6 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
u8 rc4key[16];
u8 ttkey[16];
u8 crc[4];
- struct arc4context mycontext;
signed int length;
u8 *pframe, *payload, *iv, *prwskey;
@@ -658,6 +545,7 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct arc4_ctx *ctx = &psecuritypriv->recv_arc4_ctx;
u32 res = _SUCCESS;
pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
@@ -727,10 +615,10 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
/* 4 decrypt payload include icv */
- arcfour_init(&mycontext, rc4key, 16);
- arcfour_encrypt(&mycontext, payload, payload, length);
+ arc4_setkey(ctx, rc4key, 16);
+ arc4_crypt(ctx, payload, payload, length);
- *((u32 *)crc) = le32_to_cpu(getcrc32(payload, length-4));
+ *((u32 *)crc) = le32_to_cpu(~crc32_le(~0, payload, length - 4));
if (crc[3] != payload[length - 1] || crc[2] != payload[length - 2] ||
crc[1] != payload[length - 3] || crc[0] != payload[length - 4])
@@ -749,44 +637,6 @@ exit:
#define MAX_MSG_SIZE 2048
-/*****************************/
-/******** SBOX Table *********/
-/*****************************/
-
- static const u8 sbox_table[256] = {
- 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
- 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
- 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
- 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
- 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
- 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
- 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
- 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
- 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
- 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
- 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
- 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
- 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
- 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
- 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
- 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
- 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
- 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
- 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
- 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
- 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
- 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
- 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
- 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
- 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
- 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
- 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
- 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
- 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
- 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
- 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
- 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
- };
/*****************************/
/**** Function Prototypes ****/
@@ -815,13 +665,7 @@ static void construct_ctr_preload(u8 *ctr_preload,
u8 *pn_vector,
signed int c,
uint frtype); /* for CONFIG_IEEE80211W, none 11w also can use */
-static void xor_128(u8 *a, u8 *b, u8 *out);
-static void xor_32(u8 *a, u8 *b, u8 *out);
-static u8 sbox(u8 a);
-static void next_key(u8 *key, signed int round);
-static void byte_sub(u8 *in, u8 *out);
-static void shift_row(u8 *in, u8 *out);
-static void mix_column(u8 *in, u8 *out);
+
static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext);
@@ -830,171 +674,13 @@ static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext);
/* Performs a 128 bit AES encrypt with */
/* 128 bit data. */
/****************************************/
-static void xor_128(u8 *a, u8 *b, u8 *out)
-{
- signed int i;
-
- for (i = 0; i < 16; i++)
- out[i] = a[i] ^ b[i];
-}
-
-
-static void xor_32(u8 *a, u8 *b, u8 *out)
-{
- signed int i;
-
- for (i = 0; i < 4; i++)
- out[i] = a[i] ^ b[i];
-}
-
-
-static u8 sbox(u8 a)
-{
- return sbox_table[(signed int)a];
-}
-
-
-static void next_key(u8 *key, signed int round)
-{
- u8 rcon;
- u8 sbox_key[4];
- static const u8 rcon_table[12] = {
- 0x01, 0x02, 0x04, 0x08,
- 0x10, 0x20, 0x40, 0x80,
- 0x1b, 0x36, 0x36, 0x36
- };
- sbox_key[0] = sbox(key[13]);
- sbox_key[1] = sbox(key[14]);
- sbox_key[2] = sbox(key[15]);
- sbox_key[3] = sbox(key[12]);
-
- rcon = rcon_table[round];
-
- xor_32(&key[0], sbox_key, &key[0]);
- key[0] = key[0] ^ rcon;
-
- xor_32(&key[4], &key[0], &key[4]);
- xor_32(&key[8], &key[4], &key[8]);
- xor_32(&key[12], &key[8], &key[12]);
-}
-
-
-static void byte_sub(u8 *in, u8 *out)
-{
- signed int i;
-
- for (i = 0; i < 16; i++)
- out[i] = sbox(in[i]);
-}
-
-
-static void shift_row(u8 *in, u8 *out)
-{
- out[0] = in[0];
- out[1] = in[5];
- out[2] = in[10];
- out[3] = in[15];
- out[4] = in[4];
- out[5] = in[9];
- out[6] = in[14];
- out[7] = in[3];
- out[8] = in[8];
- out[9] = in[13];
- out[10] = in[2];
- out[11] = in[7];
- out[12] = in[12];
- out[13] = in[1];
- out[14] = in[6];
- out[15] = in[11];
-}
-
-static void mix_column(u8 *in, u8 *out)
-{
- signed int i;
- u8 add1b[4];
- u8 add1bf7[4];
- u8 rotl[4];
- u8 swap_halfs[4];
- u8 andf7[4];
- u8 rotr[4];
- u8 temp[4];
- u8 tempb[4];
-
- for (i = 0; i < 4; i++) {
- if ((in[i] & 0x80) == 0x80)
- add1b[i] = 0x1b;
- else
- add1b[i] = 0x00;
- }
-
- swap_halfs[0] = in[2]; /* Swap halfs */
- swap_halfs[1] = in[3];
- swap_halfs[2] = in[0];
- swap_halfs[3] = in[1];
-
- rotl[0] = in[3]; /* Rotate left 8 bits */
- rotl[1] = in[0];
- rotl[2] = in[1];
- rotl[3] = in[2];
-
- andf7[0] = in[0] & 0x7f;
- andf7[1] = in[1] & 0x7f;
- andf7[2] = in[2] & 0x7f;
- andf7[3] = in[3] & 0x7f;
-
- for (i = 3; i > 0; i--) { /* logical shift left 1 bit */
- andf7[i] = andf7[i] << 1;
- if ((andf7[i-1] & 0x80) == 0x80)
- andf7[i] = (andf7[i] | 0x01);
- }
- andf7[0] = andf7[0] << 1;
- andf7[0] = andf7[0] & 0xfe;
-
- xor_32(add1b, andf7, add1bf7);
-
- xor_32(in, add1bf7, rotr);
-
- temp[0] = rotr[0]; /* Rotate right 8 bits */
- rotr[0] = rotr[1];
- rotr[1] = rotr[2];
- rotr[2] = rotr[3];
- rotr[3] = temp[0];
-
- xor_32(add1bf7, rotr, temp);
- xor_32(swap_halfs, rotl, tempb);
- xor_32(temp, tempb, out);
-}
-
static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
{
- signed int round;
- signed int i;
- u8 intermediatea[16];
- u8 intermediateb[16];
- u8 round_key[16];
+ struct crypto_aes_ctx ctx;
- for (i = 0; i < 16; i++)
- round_key[i] = key[i];
-
- for (round = 0; round < 11; round++) {
- if (round == 0) {
- xor_128(round_key, data, ciphertext);
- next_key(round_key, round);
- } else if (round == 10) {
- byte_sub(ciphertext, intermediatea);
- shift_row(intermediatea, intermediateb);
- xor_128(intermediateb, round_key, ciphertext);
- } else { /* 1 - 9 */
- byte_sub(ciphertext, intermediatea);
- shift_row(intermediatea, intermediateb);
- mix_column(&intermediateb[0], &intermediatea[0]);
- mix_column(&intermediateb[4], &intermediatea[4]);
- mix_column(&intermediateb[8], &intermediatea[8]);
- mix_column(&intermediateb[12], &intermediatea[12]);
- xor_128(intermediatea, round_key, ciphertext);
- next_key(round_key, round);
- }
- }
+ aes_expandkey(&ctx, key, 16);
+ aes_encrypt(&ctx, ciphertext, data);
+ memzero_explicit(&ctx, sizeof(ctx));
}
/************************************************/
@@ -1758,274 +1444,6 @@ BIP_exit:
return res;
}
-/* AES tables*/
-const u32 Te0[256] = {
- 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
- 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
- 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
- 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
- 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
- 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
- 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
- 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
- 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
- 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
- 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
- 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
- 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
- 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
- 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
- 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
- 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
- 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
- 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
- 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
- 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
- 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
- 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
- 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
- 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
- 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
- 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
- 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
- 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
- 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
- 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
- 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
- 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
- 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
- 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
- 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
- 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
- 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
- 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
- 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
- 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
- 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
- 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
- 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
- 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
- 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
- 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
- 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
- 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
- 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
- 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
- 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
- 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
- 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
- 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
- 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
- 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
- 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
- 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
- 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
- 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
- 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
- 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
- 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
-};
-
-const u32 Td0[256] = {
- 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
- 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
- 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
- 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
- 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
- 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
- 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
- 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
- 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
- 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
- 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
- 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
- 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
- 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
- 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
- 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
- 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
- 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
- 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
- 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
- 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
- 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
- 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
- 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
- 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
- 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
- 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
- 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
- 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
- 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
- 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
- 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
- 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
- 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
- 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
- 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
- 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
- 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
- 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
- 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
- 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
- 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
- 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
- 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
- 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
- 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
- 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
- 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
- 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
- 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
- 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
- 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
- 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
- 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
- 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
- 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
- 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
- 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
- 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
- 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
- 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
- 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
- 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
- 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
-};
-
-const u8 Td4s[256] = {
- 0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U,
- 0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
- 0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U,
- 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
- 0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU,
- 0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
- 0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U,
- 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
- 0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U,
- 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
- 0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
- 0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
- 0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU,
- 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
- 0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U,
- 0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
- 0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU,
- 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
- 0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U,
- 0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
- 0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U,
- 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
- 0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U,
- 0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
- 0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U,
- 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
- 0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU,
- 0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
- 0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U,
- 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
- 0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U,
- 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU,
-};
-
-const u8 rcons[] = {
- 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36
- /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
-};
-
-/**
- * Expand the cipher key into the encryption key schedule.
- *
- * @return the number of rounds for the given cipher key size.
- */
-static void rijndaelKeySetupEnc(u32 rk[/*44*/], const u8 cipherKey[])
-{
- int i;
- u32 temp;
-
- rk[0] = GETU32(cipherKey);
- rk[1] = GETU32(cipherKey + 4);
- rk[2] = GETU32(cipherKey + 8);
- rk[3] = GETU32(cipherKey + 12);
- for (i = 0; i < 10; i++) {
- temp = rk[3];
- rk[4] = rk[0] ^
- TE421(temp) ^ TE432(temp) ^ TE443(temp) ^ TE414(temp) ^
- RCON(i);
- rk[5] = rk[1] ^ rk[4];
- rk[6] = rk[2] ^ rk[5];
- rk[7] = rk[3] ^ rk[6];
- rk += 4;
- }
-}
-
-static void rijndaelEncrypt(u32 rk[/*44*/], u8 pt[16], u8 ct[16])
-{
- u32 s0, s1, s2, s3, t0, t1, t2, t3;
- int Nr = 10;
- int r;
-
- /*
- * map byte array block to cipher state
- * and add initial round key:
- */
- s0 = GETU32(pt) ^ rk[0];
- s1 = GETU32(pt + 4) ^ rk[1];
- s2 = GETU32(pt + 8) ^ rk[2];
- s3 = GETU32(pt + 12) ^ rk[3];
-
-#define ROUND(i, d, s) \
- do { \
- d##0 = TE0(s##0) ^ TE1(s##1) ^ TE2(s##2) ^ TE3(s##3) ^ rk[4 * i]; \
- d##1 = TE0(s##1) ^ TE1(s##2) ^ TE2(s##3) ^ TE3(s##0) ^ rk[4 * i + 1]; \
- d##2 = TE0(s##2) ^ TE1(s##3) ^ TE2(s##0) ^ TE3(s##1) ^ rk[4 * i + 2]; \
- d##3 = TE0(s##3) ^ TE1(s##0) ^ TE2(s##1) ^ TE3(s##2) ^ rk[4 * i + 3]; \
- } while (0)
-
- /* Nr - 1 full rounds: */
- r = Nr >> 1;
- for (;;) {
- ROUND(1, t, s);
- rk += 8;
- if (--r == 0)
- break;
- ROUND(0, s, t);
- }
-
-#undef ROUND
-
- /*
- * apply last round and
- * map cipher state to byte array block:
- */
- s0 = TE41(t0) ^ TE42(t1) ^ TE43(t2) ^ TE44(t3) ^ rk[0];
- PUTU32(ct, s0);
- s1 = TE41(t1) ^ TE42(t2) ^ TE43(t3) ^ TE44(t0) ^ rk[1];
- PUTU32(ct + 4, s1);
- s2 = TE41(t2) ^ TE42(t3) ^ TE43(t0) ^ TE44(t1) ^ rk[2];
- PUTU32(ct + 8, s2);
- s3 = TE41(t3) ^ TE42(t0) ^ TE43(t1) ^ TE44(t2) ^ rk[3];
- PUTU32(ct + 12, s3);
-}
-
-static void *aes_encrypt_init(u8 *key, size_t len)
-{
- u32 *rk;
-
- if (len != 16)
- return NULL;
- rk = rtw_malloc(AES_PRIV_SIZE);
- if (rk == NULL)
- return NULL;
- rijndaelKeySetupEnc(rk, key);
- return rk;
-}
-
-static void aes_128_encrypt(void *ctx, u8 *plain, u8 *crypt)
-{
- rijndaelEncrypt(ctx, plain, crypt);
-}
-
static void gf_mulx(u8 *pad)
{
int i, carry;
@@ -2039,11 +1457,6 @@ static void gf_mulx(u8 *pad)
pad[AES_BLOCK_SIZE - 1] ^= 0x87;
}
-static void aes_encrypt_deinit(void *ctx)
-{
- kfree_sensitive(ctx);
-}
-
/**
* omac1_aes_128_vector - One-Key CBC MAC (OMAC1) hash with AES-128
* @key: 128-bit key for the hash operation
@@ -2058,15 +1471,16 @@ static void aes_encrypt_deinit(void *ctx)
* (SP) 800-38B.
*/
static int omac1_aes_128_vector(u8 *key, size_t num_elem,
- u8 *addr[], size_t *len, u8 *mac)
+ u8 *addr[], size_t *len, u8 *mac)
{
- void *ctx;
+ struct crypto_aes_ctx ctx;
u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
u8 *pos, *end;
size_t i, e, left, total_len;
+ int ret;
- ctx = aes_encrypt_init(key, 16);
- if (ctx == NULL)
+ ret = aes_expandkey(&ctx, key, 16);
+ if (ret)
return -1;
memset(cbc, 0, AES_BLOCK_SIZE);
@@ -2089,12 +1503,12 @@ static int omac1_aes_128_vector(u8 *key, size_t num_elem,
}
}
if (left > AES_BLOCK_SIZE)
- aes_128_encrypt(ctx, cbc, cbc);
+ aes_encrypt(&ctx, cbc, cbc);
left -= AES_BLOCK_SIZE;
}
memset(pad, 0, AES_BLOCK_SIZE);
- aes_128_encrypt(ctx, pad, pad);
+ aes_encrypt(&ctx, pad, pad);
gf_mulx(pad);
if (left || total_len == 0) {
@@ -2112,8 +1526,8 @@ static int omac1_aes_128_vector(u8 *key, size_t num_elem,
for (i = 0; i < AES_BLOCK_SIZE; i++)
pad[i] ^= cbc[i];
- aes_128_encrypt(ctx, pad, mac);
- aes_encrypt_deinit(ctx);
+ aes_encrypt(&ctx, pad, mac);
+ memzero_explicit(&ctx, sizeof(ctx));
return 0;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index 85663182b388..67ca219f95bf 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _RTW_STA_MGT_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
@@ -119,7 +117,6 @@ void kfree_all_stainfo(struct sta_priv *pstapriv);
void kfree_all_stainfo(struct sta_priv *pstapriv)
{
struct list_head *plist, *phead;
- struct sta_info *psta = NULL;
spin_lock_bh(&pstapriv->sta_hash_lock);
@@ -127,7 +124,6 @@ void kfree_all_stainfo(struct sta_priv *pstapriv)
plist = get_next(phead);
while (phead != plist) {
- psta = container_of(plist, struct sta_info, list);
plist = get_next(plist);
}
@@ -152,13 +148,11 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
- plist = get_next(phead);
-
- while (phead != plist) {
+ list_for_each(plist, phead) {
int i;
- psta = container_of(plist, struct sta_info, hash_list);
- plist = get_next(plist);
+ psta = list_entry(plist, struct sta_info,
+ hash_list);
for (i = 0; i < 16 ; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
@@ -429,7 +423,7 @@ exit:
/* free all stainfo which in sta_hash[all] */
void rtw_free_all_stainfo(struct adapter *padapter)
{
- struct list_head *plist, *phead;
+ struct list_head *plist, *phead, *tmp;
s32 index;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -442,12 +436,8 @@ void rtw_free_all_stainfo(struct adapter *padapter)
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
- plist = get_next(phead);
-
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, hash_list);
-
- plist = get_next(plist);
+ list_for_each_safe(plist, tmp, phead) {
+ psta = list_entry(plist, struct sta_info, hash_list);
if (pbcmc_stainfo != psta)
rtw_free_stainfo(padapter, psta);
@@ -479,17 +469,14 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
spin_lock_bh(&pstapriv->sta_hash_lock);
phead = &(pstapriv->sta_hash[index]);
- plist = get_next(phead);
-
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, hash_list);
+ list_for_each(plist, phead) {
+ psta = list_entry(plist, struct sta_info, hash_list);
if ((!memcmp(psta->hwaddr, addr, ETH_ALEN)))
/* if found the matched address */
break;
psta = NULL;
- plist = get_next(plist);
}
spin_unlock_bh(&pstapriv->sta_hash_lock);
@@ -499,7 +486,6 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
{
struct sta_info *psta;
- u32 res = _SUCCESS;
NDIS_802_11_MAC_ADDRESS bcast_addr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -507,15 +493,12 @@ u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
- if (!psta) {
- res = _FAIL;
- goto exit;
- }
+ if (!psta)
+ return _FAIL;
/* default broadcast & multicast use macid 1 */
psta->mac_id = 1;
-exit:
return _SUCCESS;
}
@@ -539,10 +522,8 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
- plist = get_next(phead);
- while (phead != plist) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
- plist = get_next(plist);
+ list_for_each(plist, phead) {
+ paclnode = list_entry(plist, struct rtw_wlan_acl_node, list);
if (!memcmp(paclnode->addr, mac_addr, ETH_ALEN))
if (paclnode->valid == true) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index f9bd7c167da7..c06b74f6569a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -55,9 +55,6 @@ u8 networktype_to_raid_ex(struct adapter *adapter, struct sta_info *psta)
if (cur_rf_type == RF_1T1R) {
rf_type = RF_1T1R;
- } else if (IsSupportedVHT(psta->wireless_mode)) {
- if (psta->ra_mask & 0xffc00000)
- rf_type = RF_2T2R;
} else if (IsSupportedHT(psta->wireless_mode)) {
if (psta->ra_mask & 0xfff00000)
rf_type = RF_2T2R;
@@ -67,7 +64,6 @@ u8 networktype_to_raid_ex(struct adapter *adapter, struct sta_info *psta)
case WIRELESS_11B:
raid = RATEID_IDX_B;
break;
- case WIRELESS_11A:
case WIRELESS_11G:
raid = RATEID_IDX_G;
break;
@@ -75,8 +71,6 @@ u8 networktype_to_raid_ex(struct adapter *adapter, struct sta_info *psta)
raid = RATEID_IDX_BG;
break;
case WIRELESS_11_24N:
- case WIRELESS_11_5N:
- case WIRELESS_11A_5N:
case WIRELESS_11G_24N:
if (rf_type == RF_2T2R)
raid = RATEID_IDX_GN_N2SS;
@@ -342,9 +336,7 @@ u8 rtw_get_center_ch(u8 channel, u8 chnl_bw, u8 chnl_offset)
{
u8 center_ch = channel;
- if (chnl_bw == CHANNEL_WIDTH_80) {
- center_ch = 7;
- } else if (chnl_bw == CHANNEL_WIDTH_40) {
+ if (chnl_bw == CHANNEL_WIDTH_40) {
if (chnl_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
center_ch = channel + 2;
else
@@ -381,14 +373,6 @@ void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigne
center_ch = rtw_get_center_ch(channel, bwmode, channel_offset);
- if (bwmode == CHANNEL_WIDTH_80) {
- if (center_ch > channel)
- chnl_offset80 = HAL_PRIME_CHNL_OFFSET_LOWER;
- else if (center_ch < channel)
- chnl_offset80 = HAL_PRIME_CHNL_OFFSET_UPPER;
- else
- chnl_offset80 = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- }
/* set Channel */
if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->setch_mutex)))
@@ -777,6 +761,32 @@ int WMM_param_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE)
return true;
}
+static void sort_wmm_ac_params(u32 *inx, u32 *edca)
+{
+ u32 i, j, change_inx = false;
+
+ /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
+ for (i = 0; i < 4; i++) {
+ for (j = i + 1; j < 4; j++) {
+ /* compare CW and AIFS */
+ if ((edca[j] & 0xFFFF) < (edca[i] & 0xFFFF)) {
+ change_inx = true;
+ } else if ((edca[j] & 0xFFFF) == (edca[i] & 0xFFFF)) {
+ /* compare TXOP */
+ if ((edca[j] >> 16) > (edca[i] >> 16))
+ change_inx = true;
+ }
+
+ if (change_inx) {
+ swap(edca[i], edca[j]);
+ swap(inx[i], inx[j]);
+
+ change_inx = false;
+ }
+ }
+ }
+}
+
void WMMOnAssocRsp(struct adapter *padapter)
{
u8 ACI, ACM, AIFS, ECWMin, ECWMax, aSifsTime;
@@ -801,7 +811,7 @@ void WMMOnAssocRsp(struct adapter *padapter)
AIFS = aSifsTime + (2 * pmlmeinfo->slotTime);
- if (pmlmeext->cur_wireless_mode & (WIRELESS_11G | WIRELESS_11A)) {
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11G) {
ECWMin = 4;
ECWMax = 10;
} else if (pmlmeext->cur_wireless_mode & WIRELESS_11B) {
@@ -873,35 +883,8 @@ void WMMOnAssocRsp(struct adapter *padapter)
inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
- if (pregpriv->wifi_spec == 1) {
- u32 j, tmp, change_inx = false;
-
- /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
- for (i = 0; i < 4; i++) {
- for (j = i+1; j < 4; j++) {
- /* compare CW and AIFS */
- if ((edca[j] & 0xFFFF) < (edca[i] & 0xFFFF)) {
- change_inx = true;
- } else if ((edca[j] & 0xFFFF) == (edca[i] & 0xFFFF)) {
- /* compare TXOP */
- if ((edca[j] >> 16) > (edca[i] >> 16))
- change_inx = true;
- }
-
- if (change_inx) {
- tmp = edca[i];
- edca[i] = edca[j];
- edca[j] = tmp;
-
- tmp = inx[i];
- inx[i] = inx[j];
- inx[j] = tmp;
-
- change_inx = false;
- }
- }
- }
- }
+ if (pregpriv->wifi_spec == 1)
+ sort_wmm_ac_params(inx, edca);
for (i = 0; i < 4; i++)
pxmitpriv->wmm_para_seq[i] = inx[i];
@@ -926,9 +909,6 @@ static void bwmode_update_check(struct adapter *padapter, struct ndis_80211_var_
if (phtpriv->ht_option == false)
return;
- if (pmlmeext->cur_bwmode >= CHANNEL_WIDTH_80)
- return;
-
if (pIE->Length > sizeof(struct HT_info_element))
return;
@@ -1255,34 +1235,34 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
/* parsing HT_CAP_IE */
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, WLAN_EID_HT_CAPABILITY, &len, bssid->IELength - _FIXED_IE_LENGTH_);
if (p && len > 0) {
- pht_cap = (struct ieee80211_ht_cap *)(p + 2);
- ht_cap_info = le16_to_cpu(pht_cap->cap_info);
+ pht_cap = (struct ieee80211_ht_cap *)(p + 2);
+ ht_cap_info = le16_to_cpu(pht_cap->cap_info);
} else {
- ht_cap_info = 0;
+ ht_cap_info = 0;
}
/* parsing HT_INFO_IE */
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, WLAN_EID_HT_OPERATION, &len, bssid->IELength - _FIXED_IE_LENGTH_);
if (p && len > 0) {
- pht_info = (struct HT_info_element *)(p + 2);
- ht_info_infos_0 = pht_info->infos[0];
+ pht_info = (struct HT_info_element *)(p + 2);
+ ht_info_infos_0 = pht_info->infos[0];
} else {
- ht_info_infos_0 = 0;
+ ht_info_infos_0 = 0;
}
if (ht_cap_info != cur_network->BcnInfo.ht_cap_info ||
- ((ht_info_infos_0&0x03) != (cur_network->BcnInfo.ht_info_infos_0&0x03))) {
- {
- /* bcn_info_update */
- cur_network->BcnInfo.ht_cap_info = ht_cap_info;
- cur_network->BcnInfo.ht_info_infos_0 = ht_info_infos_0;
- /* to do : need to check that whether modify related register of BB or not */
- }
- /* goto _mismatch; */
+ ((ht_info_infos_0&0x03) != (cur_network->BcnInfo.ht_info_infos_0&0x03))) {
+ {
+ /* bcn_info_update */
+ cur_network->BcnInfo.ht_cap_info = ht_cap_info;
+ cur_network->BcnInfo.ht_info_infos_0 = ht_info_infos_0;
+ /* to do : need to check that whether modify related register of BB or not */
+ }
+ /* goto _mismatch; */
}
/* Checking for channel */
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, WLAN_EID_DS_PARAMS, &len, bssid->IELength - _FIXED_IE_LENGTH_);
if (p) {
- bcn_channel = *(p + 2);
+ bcn_channel = *(p + 2);
} else {/* In 5G, some ap do not have DSSET IE checking HT info for channel */
rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, WLAN_EID_HT_OPERATION,
&len, bssid->IELength - _FIXED_IE_LENGTH_);
@@ -1293,7 +1273,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
if (bcn_channel != Adapter->mlmeextpriv.cur_channel)
- goto _mismatch;
+ goto _mismatch;
/* checking SSID */
ssid_len = 0;
@@ -1496,6 +1476,33 @@ void set_sta_rate(struct adapter *padapter, struct sta_info *psta)
Update_RA_Entry(padapter, psta);
}
+static u32 get_realtek_assoc_AP_vender(struct ndis_80211_var_ie *pIE)
+{
+ u32 Vender = HT_IOT_PEER_REALTEK;
+
+ if (pIE->Length >= 5) {
+ if (pIE->data[4] == 1)
+ /* if (pIE->data[5] & RT_HT_CAP_USE_LONG_PREAMBLE) */
+ /* bssDesc->BssHT.RT2RT_HT_Mode |= RT_HT_CAP_USE_LONG_PREAMBLE; */
+ if (pIE->data[5] & RT_HT_CAP_USE_92SE)
+ /* bssDesc->BssHT.RT2RT_HT_Mode |= RT_HT_CAP_USE_92SE; */
+ Vender = HT_IOT_PEER_REALTEK_92SE;
+
+ if (pIE->data[5] & RT_HT_CAP_USE_SOFTAP)
+ Vender = HT_IOT_PEER_REALTEK_SOFTAP;
+
+ if (pIE->data[4] == 2) {
+ if (pIE->data[6] & RT_HT_CAP_USE_JAGUAR_BCUT)
+ Vender = HT_IOT_PEER_REALTEK_JAGUAR_BCUTAP;
+
+ if (pIE->data[6] & RT_HT_CAP_USE_JAGUAR_CCUT)
+ Vender = HT_IOT_PEER_REALTEK_JAGUAR_CCUTAP;
+ }
+ }
+
+ return Vender;
+}
+
unsigned char check_assoc_AP(u8 *pframe, uint len)
{
unsigned int i;
@@ -1506,47 +1513,24 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
switch (pIE->ElementID) {
case WLAN_EID_VENDOR_SPECIFIC:
- if ((!memcmp(pIE->data, ARTHEROS_OUI1, 3)) || (!memcmp(pIE->data, ARTHEROS_OUI2, 3))) {
+ if ((!memcmp(pIE->data, ARTHEROS_OUI1, 3)) || (!memcmp(pIE->data, ARTHEROS_OUI2, 3)))
return HT_IOT_PEER_ATHEROS;
- } else if ((!memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
- (!memcmp(pIE->data, BROADCOM_OUI2, 3)) ||
- (!memcmp(pIE->data, BROADCOM_OUI3, 3))) {
+ else if ((!memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
+ (!memcmp(pIE->data, BROADCOM_OUI2, 3)) ||
+ (!memcmp(pIE->data, BROADCOM_OUI3, 3)))
return HT_IOT_PEER_BROADCOM;
- } else if (!memcmp(pIE->data, MARVELL_OUI, 3)) {
+ else if (!memcmp(pIE->data, MARVELL_OUI, 3))
return HT_IOT_PEER_MARVELL;
- } else if (!memcmp(pIE->data, RALINK_OUI, 3)) {
+ else if (!memcmp(pIE->data, RALINK_OUI, 3))
return HT_IOT_PEER_RALINK;
- } else if (!memcmp(pIE->data, CISCO_OUI, 3)) {
+ else if (!memcmp(pIE->data, CISCO_OUI, 3))
return HT_IOT_PEER_CISCO;
- } else if (!memcmp(pIE->data, REALTEK_OUI, 3)) {
- u32 Vender = HT_IOT_PEER_REALTEK;
-
- if (pIE->Length >= 5) {
- if (pIE->data[4] == 1)
- /* if (pIE->data[5] & RT_HT_CAP_USE_LONG_PREAMBLE) */
- /* bssDesc->BssHT.RT2RT_HT_Mode |= RT_HT_CAP_USE_LONG_PREAMBLE; */
- if (pIE->data[5] & RT_HT_CAP_USE_92SE)
- /* bssDesc->BssHT.RT2RT_HT_Mode |= RT_HT_CAP_USE_92SE; */
- Vender = HT_IOT_PEER_REALTEK_92SE;
-
- if (pIE->data[5] & RT_HT_CAP_USE_SOFTAP)
- Vender = HT_IOT_PEER_REALTEK_SOFTAP;
-
- if (pIE->data[4] == 2) {
- if (pIE->data[6] & RT_HT_CAP_USE_JAGUAR_BCUT)
- Vender = HT_IOT_PEER_REALTEK_JAGUAR_BCUTAP;
-
- if (pIE->data[6] & RT_HT_CAP_USE_JAGUAR_CCUT)
- Vender = HT_IOT_PEER_REALTEK_JAGUAR_CCUTAP;
- }
- }
-
- return Vender;
- } else if (!memcmp(pIE->data, AIRGOCAP_OUI, 3)) {
+ else if (!memcmp(pIE->data, REALTEK_OUI, 3))
+ return get_realtek_assoc_AP_vender(pIE);
+ else if (!memcmp(pIE->data, AIRGOCAP_OUI, 3))
return HT_IOT_PEER_AIRGO;
- } else {
+ else
break;
- }
default:
break;
@@ -1620,7 +1604,7 @@ void update_capinfo(struct adapter *Adapter, u16 updateCap)
pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
} else {
/* Filen: See 802.11-2007 p.90 */
- if (pmlmeext->cur_wireless_mode & (WIRELESS_11_24N | WIRELESS_11A | WIRELESS_11_5N | WIRELESS_11AC)) {
+ if (pmlmeext->cur_wireless_mode & (WIRELESS_11_24N)) {
pmlmeinfo->slotTime = SHORT_SLOT_TIME;
} else if (pmlmeext->cur_wireless_mode & (WIRELESS_11G)) {
if ((updateCap & cShortSlotTime) /* && (!(pMgntInfo->pHTInfo->RT2RT_HT_Mode & RT_HT_CAP_USE_LONG_PREAMBLE)) */)
@@ -1650,9 +1634,7 @@ void update_wireless_mode(struct adapter *padapter)
if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable))
pmlmeinfo->HT_enable = 1;
- if (pmlmeinfo->VHT_enable)
- network_type = WIRELESS_11AC;
- else if (pmlmeinfo->HT_enable)
+ if (pmlmeinfo->HT_enable)
network_type = WIRELESS_11_24N;
if (rtw_is_cckratesonly_included(rate))
@@ -1716,7 +1698,7 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
{
struct sta_info *psta;
- u16 tid, start_seq, param;
+ u16 tid, param;
struct recv_reorder_ctrl *preorder_ctrl;
struct sta_priv *pstapriv = &padapter->stapriv;
struct ADDBA_request *preq = (struct ADDBA_request *)paddba_req;
@@ -1726,8 +1708,6 @@ void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
psta = rtw_get_stainfo(pstapriv, addr);
if (psta) {
- start_seq = le16_to_cpu(preq->BA_starting_seqctrl) >> 4;
-
param = le16_to_cpu(preq->BA_para_set);
tid = (param>>2)&0x0f;
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index bd3acdd7d75f..79e4d7df1ef5 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _RTW_XMIT_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
@@ -323,15 +321,12 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
u8 query_ra_short_GI(struct sta_info *psta)
{
- u8 sgi = false, sgi_20m = false, sgi_40m = false, sgi_80m = false;
+ u8 sgi = false, sgi_20m = false, sgi_40m = false;
sgi_20m = psta->htpriv.sgi_20m;
sgi_40m = psta->htpriv.sgi_40m;
switch (psta->bw_mode) {
- case CHANNEL_WIDTH_80:
- sgi = sgi_80m;
- break;
case CHANNEL_WIDTH_40:
sgi = sgi_40m;
break;
@@ -647,7 +642,7 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
pattrib->pktlen = pktfile.pkt_len;
- if (ETH_P_IP == pattrib->ether_type) {
+ if (pattrib->ether_type == ETH_P_IP) {
/* The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time */
/* to prevent DHCP protocol fail */
@@ -657,7 +652,7 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
pattrib->dhcp_pkt = 0;
if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
- if (ETH_P_IP == pattrib->ether_type) {/* IP header */
+ if (pattrib->ether_type == ETH_P_IP) {/* IP header */
if (((tmp[21] == 68) && (tmp[23] == 67)) ||
((tmp[21] == 67) && (tmp[23] == 68))) {
/* 68 : UDP BOOTP client */
@@ -675,7 +670,7 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
if (piphdr->protocol == 0x1) /* protocol type in ip header 0x1 is ICMP */
pattrib->icmp_pkt = 1;
}
- } else if (0x888e == pattrib->ether_type) {
+ } else if (pattrib->ether_type == 0x888e) {
netdev_dbg(padapter->pnetdev, "send eapol packet\n");
}
@@ -1725,18 +1720,14 @@ exit:
void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue)
{
- struct list_head *plist, *phead;
+ struct list_head *plist, *phead, *tmp;
struct xmit_frame *pxmitframe;
spin_lock_bh(&pframequeue->lock);
phead = get_list_head(pframequeue);
- plist = get_next(phead);
-
- while (phead != plist) {
- pxmitframe = container_of(plist, struct xmit_frame, list);
-
- plist = get_next(plist);
+ list_for_each_safe(plist, tmp, phead) {
+ pxmitframe = list_entry(plist, struct xmit_frame, list);
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
@@ -2128,7 +2119,7 @@ signed int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct x
static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struct sta_info *psta, struct __queue *pframequeue)
{
signed int ret;
- struct list_head *plist, *phead;
+ struct list_head *plist, *phead, *tmp;
u8 ac_index;
struct tx_servq *ptxservq;
struct pkt_attrib *pattrib;
@@ -2136,12 +2127,8 @@ static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struc
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
phead = get_list_head(pframequeue);
- plist = get_next(phead);
-
- while (phead != plist) {
- pxmitframe = container_of(plist, struct xmit_frame, list);
-
- plist = get_next(plist);
+ list_for_each_safe(plist, tmp, phead) {
+ pxmitframe = list_entry(plist, struct xmit_frame, list);
pattrib = &pxmitframe->attrib;
@@ -2201,7 +2188,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
{
u8 update_mask = 0, wmmps_ac = 0;
struct sta_info *psta_bmc;
- struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2211,12 +2198,9 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = get_next(xmitframe_plist);
+ list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
+ pxmitframe = list_entry(xmitframe_plist, struct xmit_frame,
+ list);
list_del_init(&pxmitframe->list);
@@ -2285,12 +2269,9 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
if ((pstapriv->sta_dz_bitmap&0xfffe) == 0x0) { /* no any sta in ps mode */
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = get_next(xmitframe_plist);
+ list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
+ pxmitframe = list_entry(xmitframe_plist,
+ struct xmit_frame, list);
list_del_init(&pxmitframe->list);
@@ -2324,7 +2305,7 @@ _exit:
void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta)
{
u8 wmmps_ac = 0;
- struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2332,12 +2313,9 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
-
- while (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = get_next(xmitframe_plist);
+ list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
+ pxmitframe = list_entry(xmitframe_plist, struct xmit_frame,
+ list);
switch (pxmitframe->attrib.priority) {
case 1:
@@ -2524,7 +2502,7 @@ void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
sctx->status = RTW_SCTX_SUBMITTED;
}
-int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg)
+int rtw_sctx_wait(struct submit_ctx *sctx)
{
int ret = _FAIL;
unsigned long expire;
@@ -2565,7 +2543,7 @@ int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms)
pack_tx_ops->timeout_ms = timeout_ms;
pack_tx_ops->status = RTW_SCTX_SUBMITTED;
- return rtw_sctx_wait(pack_tx_ops, __func__);
+ return rtw_sctx_wait(pack_tx_ops);
}
void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
index 503790924532..dc58bb87f1b0 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
@@ -38,110 +38,44 @@ static u8 halbtc8723b1ant_BtRssiState(
(pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
(pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW)
) {
- if (btRssi >= (rssiThresh + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT)) {
+ if (btRssi >= (rssiThresh + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT))
btRssiState = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_RSSI_STATE,
- ("[BTCoex], BT Rssi state switch to High\n")
- );
- } else {
+ else
btRssiState = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_RSSI_STATE,
- ("[BTCoex], BT Rssi state stay at Low\n")
- );
- }
} else {
- if (btRssi < rssiThresh) {
+ if (btRssi < rssiThresh)
btRssiState = BTC_RSSI_STATE_LOW;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_RSSI_STATE,
- ("[BTCoex], BT Rssi state switch to Low\n")
- );
- } else {
+ else
btRssiState = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_RSSI_STATE,
- ("[BTCoex], BT Rssi state stay at High\n")
- );
- }
}
} else if (levelNum == 3) {
- if (rssiThresh > rssiThresh1) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_RSSI_STATE,
- ("[BTCoex], BT Rssi thresh error!!\n")
- );
+ if (rssiThresh > rssiThresh1)
return pCoexSta->preBtRssiState;
- }
if (
(pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
(pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW)
) {
- if (btRssi >= (rssiThresh + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT)) {
+ if (btRssi >= (rssiThresh + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT))
btRssiState = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_RSSI_STATE,
- ("[BTCoex], BT Rssi state switch to Medium\n")
- );
- } else {
+ else
btRssiState = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_RSSI_STATE,
- ("[BTCoex], BT Rssi state stay at Low\n")
- );
- }
} else if (
(pCoexSta->preBtRssiState == BTC_RSSI_STATE_MEDIUM) ||
(pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_MEDIUM)
) {
- if (btRssi >= (rssiThresh1 + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT)) {
+ if (btRssi >= (rssiThresh1 + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT))
btRssiState = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_RSSI_STATE,
- ("[BTCoex], BT Rssi state switch to High\n")
- );
- } else if (btRssi < rssiThresh) {
+ else if (btRssi < rssiThresh)
btRssiState = BTC_RSSI_STATE_LOW;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_RSSI_STATE,
- ("[BTCoex], BT Rssi state switch to Low\n")
- );
- } else {
+ else
btRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_RSSI_STATE,
- ("[BTCoex], BT Rssi state stay at Medium\n")
- );
- }
} else {
- if (btRssi < rssiThresh1) {
+ if (btRssi < rssiThresh1)
btRssiState = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_RSSI_STATE,
- ("[BTCoex], BT Rssi state switch to Medium\n")
- );
- } else {
+ else
btRssiState = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_RSSI_STATE,
- ("[BTCoex], BT Rssi state stay at High\n")
- );
- }
}
}
@@ -297,7 +231,7 @@ static void halbtc8723b1ant_LimitedRx(
u8 rxAggSize = aggBufSize;
/* */
- /* Rx Aggregation related setting */
+ /* Rx Aggregation related setting */
/* */
pBtCoexist->fBtcSet(
pBtCoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, &bRejectRxAgg
@@ -316,18 +250,12 @@ static void halbtc8723b1ant_LimitedRx(
static void halbtc8723b1ant_QueryBtInfo(struct btc_coexist *pBtCoexist)
{
- u8 H2C_Parameter[1] = {0};
+ u8 H2C_Parameter[1] = {0};
pCoexSta->bC2hBtInfoReqSent = true;
H2C_Parameter[0] |= BIT0; /* trigger */
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- ("[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", H2C_Parameter[0])
- );
-
pBtCoexist->fBtcFillH2c(pBtCoexist, 0x61, 1, H2C_Parameter);
}
@@ -367,18 +295,6 @@ static void halbtc8723b1ant_MonitorBtCtr(struct btc_coexist *pBtCoexist)
if ((pCoexSta->lowPriorityTx >= 1050) && (!pCoexSta->bC2hBtInquiryPage))
pCoexSta->popEventCnt++;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- (
- "[BTCoex], Hi-Pri Rx/Tx: %d/%d, Lo-Pri Rx/Tx: %d/%d\n",
- regHPRx,
- regHPTx,
- regLPRx,
- regLPTx
- )
- );
-
/* reset counter */
pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x76e, 0xc);
@@ -445,15 +361,15 @@ static void halbtc8723b1ant_MonitorWiFiCtr(struct btc_coexist *pBtCoexist)
)
) {
if (nCCKLockCounter < 5)
- nCCKLockCounter++;
+ nCCKLockCounter++;
} else {
if (nCCKLockCounter > 0)
- nCCKLockCounter--;
+ nCCKLockCounter--;
}
} else {
if (nCCKLockCounter > 0)
- nCCKLockCounter--;
+ nCCKLockCounter--;
}
} else {
if (nCCKLockCounter > 0)
@@ -463,14 +379,14 @@ static void halbtc8723b1ant_MonitorWiFiCtr(struct btc_coexist *pBtCoexist)
if (!pCoexSta->bPreCCKLock) {
if (nCCKLockCounter >= 5)
- pCoexSta->bCCKLock = true;
+ pCoexSta->bCCKLock = true;
else
- pCoexSta->bCCKLock = false;
+ pCoexSta->bCCKLock = false;
} else {
if (nCCKLockCounter == 0)
- pCoexSta->bCCKLock = false;
+ pCoexSta->bCCKLock = false;
else
- pCoexSta->bCCKLock = true;
+ pCoexSta->bCCKLock = true;
}
pCoexSta->bPreCCKLock = pCoexSta->bCCKLock;
@@ -586,14 +502,8 @@ static u8 halbtc8723b1ant_ActionAlgorithm(struct btc_coexist *pBtCoexist)
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
- if (!pBtLinkInfo->bBtLinkExist) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], No BT link exists!!!\n")
- );
+ if (!pBtLinkInfo->bBtLinkExist)
return algorithm;
- }
if (pBtLinkInfo->bScoExist)
numOfDiffProfile++;
@@ -606,151 +516,62 @@ static u8 halbtc8723b1ant_ActionAlgorithm(struct btc_coexist *pBtCoexist)
if (numOfDiffProfile == 1) {
if (pBtLinkInfo->bScoExist) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = SCO only\n")
- );
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else {
if (pBtLinkInfo->bHidExist) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = HID only\n")
- );
algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
} else if (pBtLinkInfo->bA2dpExist) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = A2DP only\n")
- );
algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
} else if (pBtLinkInfo->bPanExist) {
- if (bBtHsOn) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = PAN(HS) only\n")
- );
+ if (bBtHsOn)
algorithm = BT_8723B_1ANT_COEX_ALGO_PANHS;
- } else {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = PAN(EDR) only\n")
- );
+ else
algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR;
- }
}
}
} else if (numOfDiffProfile == 2) {
if (pBtLinkInfo->bScoExist) {
if (pBtLinkInfo->bHidExist) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = SCO + HID\n")
- );
algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
} else if (pBtLinkInfo->bA2dpExist) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = SCO + A2DP ==> SCO\n")
- );
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else if (pBtLinkInfo->bPanExist) {
- if (bBtHsOn) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = SCO + PAN(HS)\n")
- );
+ if (bBtHsOn)
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
- } else {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = SCO + PAN(EDR)\n")
- );
+ else
algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
- }
}
} else {
if (pBtLinkInfo->bHidExist && pBtLinkInfo->bA2dpExist) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = HID + A2DP\n")
- );
algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else if (pBtLinkInfo->bHidExist && pBtLinkInfo->bPanExist) {
- if (bBtHsOn) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = HID + PAN(HS)\n")
- );
+ if (bBtHsOn)
algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
- } else {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = HID + PAN(EDR)\n")
- );
+ else
algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
- }
} else if (pBtLinkInfo->bPanExist && pBtLinkInfo->bA2dpExist) {
- if (bBtHsOn) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = A2DP + PAN(HS)\n")
- );
+ if (bBtHsOn)
algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
- } else {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = A2DP + PAN(EDR)\n")
- );
+ else
algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
- }
}
}
} else if (numOfDiffProfile == 3) {
if (pBtLinkInfo->bScoExist) {
if (pBtLinkInfo->bHidExist && pBtLinkInfo->bA2dpExist) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n")
- );
algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
} else if (
pBtLinkInfo->bHidExist && pBtLinkInfo->bPanExist
) {
- if (bBtHsOn) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"));
+ if (bBtHsOn)
algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
- } else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"));
+ else
algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
- }
} else if (pBtLinkInfo->bPanExist && pBtLinkInfo->bA2dpExist) {
- if (bBtHsOn) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"));
+ if (bBtHsOn)
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
- } else {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n")
- );
+ else
algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
- }
}
} else {
if (
@@ -758,21 +579,10 @@ static u8 halbtc8723b1ant_ActionAlgorithm(struct btc_coexist *pBtCoexist)
pBtLinkInfo->bPanExist &&
pBtLinkInfo->bA2dpExist
) {
- if (bBtHsOn) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n")
- );
+ if (bBtHsOn)
algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
- } else {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n")
- );
+ else
algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
- }
}
}
} else if (numOfDiffProfile >= 3) {
@@ -782,21 +592,9 @@ static u8 halbtc8723b1ant_ActionAlgorithm(struct btc_coexist *pBtCoexist)
pBtLinkInfo->bPanExist &&
pBtLinkInfo->bA2dpExist
) {
- if (bBtHsOn) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n")
- );
-
- } else {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR) ==>PAN(EDR)+HID\n")
- );
+ if (!bBtHsOn)
algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
- }
+
}
}
}
@@ -808,7 +606,7 @@ static void halbtc8723b1ant_SetSwPenaltyTxRateAdaptive(
struct btc_coexist *pBtCoexist, bool bLowPenaltyRa
)
{
- u8 H2C_Parameter[6] = {0};
+ u8 H2C_Parameter[6] = {0};
H2C_Parameter[0] = 0x6; /* opCode, 0x6 = Retry_Penalty */
@@ -820,15 +618,6 @@ static void halbtc8723b1ant_SetSwPenaltyTxRateAdaptive(
H2C_Parameter[5] = 0xf9; /* MCS5 or OFDM36 */
}
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- (
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (bLowPenaltyRa ? "ON!!" : "OFF!!")
- )
- );
-
pBtCoexist->fBtcFillH2c(pBtCoexist, 0x69, 6, H2C_Parameter);
}
@@ -857,32 +646,12 @@ static void halbtc8723b1ant_SetCoexTable(
u8 val0x6cc
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_EXEC,
- ("[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0)
- );
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x6c0, val0x6c0);
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_EXEC,
- ("[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4)
- );
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x6c4, val0x6c4);
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_EXEC,
- ("[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8)
- );
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x6c8, val0x6c8);
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_EXEC,
- ("[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc)
- );
pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x6cc, val0x6cc);
}
@@ -895,15 +664,6 @@ static void halbtc8723b1ant_CoexTable(
u8 val0x6cc
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW,
- (
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
- (bForceExec ? "force to" : ""),
- val0x6c0, val0x6c4, val0x6cc
- )
- );
pCoexDm->curVal0x6c0 = val0x6c0;
pCoexDm->curVal0x6c4 = val0x6c4;
pCoexDm->curVal0x6c8 = val0x6c8;
@@ -933,12 +693,6 @@ static void halbtc8723b1ant_CoexTableWithType(
struct btc_coexist *pBtCoexist, bool bForceExec, u8 type
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], ********** CoexTable(%d) **********\n", type)
- );
-
pCoexSta->nCoexTableType = type;
switch (type) {
@@ -996,15 +750,6 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(
if (bEnable)
H2C_Parameter[0] |= BIT0; /* function enable */
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- (
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- H2C_Parameter[0]
- )
- );
-
pBtCoexist->fBtcFillH2c(pBtCoexist, 0x63, 1, H2C_Parameter);
}
@@ -1012,28 +757,9 @@ static void halbtc8723b1ant_IgnoreWlanAct(
struct btc_coexist *pBtCoexist, bool bForceExec, bool bEnable
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW,
- (
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (bForceExec ? "force to" : ""),
- (bEnable ? "ON" : "OFF")
- )
- );
pCoexDm->bCurIgnoreWlanAct = bEnable;
if (!bForceExec) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- (
- "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
- pCoexDm->bPreIgnoreWlanAct,
- pCoexDm->bCurIgnoreWlanAct
- )
- );
-
if (pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
return;
}
@@ -1057,44 +783,14 @@ static void halbtc8723b1ant_LpsRpwm(
struct btc_coexist *pBtCoexist, bool bForceExec, u8 lpsVal, u8 rpwmVal
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW,
- (
- "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
- (bForceExec ? "force to" : ""),
- lpsVal,
- rpwmVal
- )
- );
pCoexDm->curLps = lpsVal;
pCoexDm->curRpwm = rpwmVal;
if (!bForceExec) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- (
- "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
- pCoexDm->curLps,
- pCoexDm->curRpwm
- )
- );
-
if (
(pCoexDm->preLps == pCoexDm->curLps) &&
(pCoexDm->preRpwm == pCoexDm->curRpwm)
) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- (
- "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
- pCoexDm->preRpwm,
- pCoexDm->curRpwm
- )
- );
-
return;
}
}
@@ -1108,12 +804,6 @@ static void halbtc8723b1ant_SwMechanism(
struct btc_coexist *pBtCoexist, bool bLowPenaltyRA
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_MONITOR,
- ("[BTCoex], SM[LpRA] = %d\n", bLowPenaltyRA)
- );
-
halbtc8723b1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, bLowPenaltyRA);
}
@@ -1189,13 +879,10 @@ static void halbtc8723b1ant_SetAntPath(
u1Tmp = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x49d);
cntBtCalChk++;
- if (u1Tmp & BIT0) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ########### BT is calibrating (wait cnt =%d) ###########\n", cntBtCalChk));
+ if (u1Tmp & BIT0)
mdelay(50);
- } else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ********** BT is NOT calibrating (wait cnt =%d)**********\n", cntBtCalChk));
+ else
break;
- }
}
/* set grant_bt to PTA */
@@ -1318,11 +1005,6 @@ static void halbtc8723b1ant_SetFwPstdma(
if (bApEnable) {
if (byte1 & BIT4 && !(byte1 & BIT5)) {
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_NOTIFY,
- ("[BTCoex], FW for 1Ant AP mode\n")
- );
realByte1 &= ~BIT4;
realByte1 |= BIT5;
@@ -1343,19 +1025,6 @@ static void halbtc8723b1ant_SetFwPstdma(
pCoexDm->psTdmaPara[3] = byte4;
pCoexDm->psTdmaPara[4] = realByte5;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- (
- "[BTCoex], PS-TDMA H2C cmd = 0x%x%08x\n",
- H2C_Parameter[0],
- H2C_Parameter[1] << 24 |
- H2C_Parameter[2] << 16 |
- H2C_Parameter[3] << 8 |
- H2C_Parameter[4]
- )
- );
-
pBtCoexist->fBtcFillH2c(pBtCoexist, 0x60, 5, H2C_Parameter);
}
@@ -1369,35 +1038,13 @@ static void halbtc8723b1ant_PsTdma(
u8 rssiAdjustVal = 0;
u8 psTdmaByte4Val = 0x50, psTdmaByte0Val = 0x51, psTdmaByte3Val = 0x10;
s8 nWiFiDurationAdjust = 0x0;
- /* u32 fwVer = 0; */
+ /* u32 fwVer = 0; */
- /* BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type =%d\n", */
- /* (bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type)); */
pCoexDm->bCurPsTdmaOn = bTurnOn;
pCoexDm->curPsTdma = type;
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
- if (pCoexDm->bCurPsTdmaOn) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- (
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- pCoexDm->curPsTdma
- )
- );
- } else {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- (
- "[BTCoex], ********** TDMA(off, %d) **********\n",
- pCoexDm->curPsTdma
- )
- );
- }
-
if (!bForceExec) {
if (
(pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
@@ -1670,80 +1317,40 @@ static bool halbtc8723b1ant_IsCommonAction(struct btc_coexist *pBtCoexist)
if (
!bWifiConnected &&
- BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus
+ pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE
) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n")
- );
-
/* halbtc8723b1ant_SwMechanism(pBtCoexist, false); */
bCommon = true;
} else if (
bWifiConnected &&
- (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE)
) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], Wifi connected + BT non connected-idle!!\n")
- );
-
/* halbtc8723b1ant_SwMechanism(pBtCoexist, false); */
bCommon = true;
} else if (
!bWifiConnected &&
- (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE)
) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], Wifi non connected-idle + BT connected-idle!!\n")
- );
-
/* halbtc8723b1ant_SwMechanism(pBtCoexist, false); */
bCommon = true;
} else if (
bWifiConnected &&
- (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE)
) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT connected-idle!!\n"));
-
/* halbtc8723b1ant_SwMechanism(pBtCoexist, false); */
bCommon = true;
} else if (
!bWifiConnected &&
- (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus)
+ (pCoexDm->btStatus != BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE)
) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], Wifi non connected-idle + BT Busy!!\n")
- );
-
/* halbtc8723b1ant_SwMechanism(pBtCoexist, false); */
bCommon = true;
} else {
- if (bWifiBusy) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], Wifi Connected-Busy + BT Busy!!\n")
- );
- } else {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], Wifi Connected-Idle + BT Busy!!\n")
- );
- }
-
bCommon = false;
}
@@ -1759,16 +1366,10 @@ static void halbtc8723b1ant_TdmaDurationAdjustForAcl(
s32 result; /* 0: no change, +1: increase WiFi duration, -1: decrease WiFi duration */
u8 retryCount = 0, btInfoExt;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW,
- ("[BTCoex], TdmaDurationAdjustForAcl()\n")
- );
-
if (
- (BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus) ||
- (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifiStatus) ||
- (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifiStatus)
+ (wifiStatus == BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN) ||
+ (wifiStatus == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN) ||
+ (wifiStatus == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT)
) {
if (
pCoexDm->curPsTdma != 1 &&
@@ -1791,11 +1392,6 @@ static void halbtc8723b1ant_TdmaDurationAdjustForAcl(
if (!pCoexDm->bAutoTdmaAdjust) {
pCoexDm->bAutoTdmaAdjust = true;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- ("[BTCoex], first run TdmaDurationAdjust()!!\n")
- );
halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
pCoexDm->psTdmaDuAdjType = 2;
@@ -1810,9 +1406,6 @@ static void halbtc8723b1ant_TdmaDurationAdjustForAcl(
/* acquire the BT TRx retry count from BT_Info byte2 */
retryCount = pCoexSta->btRetryCnt;
btInfoExt = pCoexSta->btInfoExt;
- /* BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount)); */
- /* BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up =%d, dn =%d, m =%d, n =%d, WaitCount =%d\n", */
- /* up, dn, m, n, WaitCount)); */
if (pCoexSta->lowPriorityTx > 1050 || pCoexSta->lowPriorityRx > 1250)
retryCount++;
@@ -1833,11 +1426,6 @@ static void halbtc8723b1ant_TdmaDurationAdjustForAcl(
up = 0;
dn = 0;
result = 1;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- ("[BTCoex], Increase wifi duration!!\n")
- );
}
} else if (retryCount <= 3) { /* <=3 retry in the last 2-second duration */
up--;
@@ -1860,7 +1448,6 @@ static void halbtc8723b1ant_TdmaDurationAdjustForAcl(
dn = 0;
WaitCount = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
}
} else { /* retry count > 3, 只要1次 retry count > 3, 則調窄WiFi duration */
if (WaitCount == 1)
@@ -1876,11 +1463,6 @@ static void halbtc8723b1ant_TdmaDurationAdjustForAcl(
dn = 0;
WaitCount = 0;
result = -1;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n")
- );
}
if (result == -1) {
@@ -1917,15 +1499,6 @@ static void halbtc8723b1ant_TdmaDurationAdjustForAcl(
halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
pCoexDm->psTdmaDuAdjType = 1;
}
- } else { /* no change */
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- (
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- pCoexDm->curPsTdma
- )
- );
}
if (
@@ -2002,13 +1575,13 @@ static void halbtc8723b1ant_PowerSaveState(
/* */
/* */
-/* Software Coex Mechanism start */
+/* Software Coex Mechanism start */
/* */
/* */
/* */
/* */
-/* Non-Software Coex Mechanism start */
+/* Non-Software Coex Mechanism start */
/* */
/* */
static void halbtc8723b1ant_ActionWifiMultiPort(struct btc_coexist *pBtCoexist)
@@ -2091,6 +1664,7 @@ static void halbtc8723b1ant_ActionWifiConnectedBtAclBusy(
)
{
struct btc_bt_link_info *pBtLinkInfo = &pBtCoexist->btLinkInfo;
+
halbtc8723b1ant_BtRssiState(2, 28, 0);
if ((pCoexSta->lowPriorityRx >= 1000) && (pCoexSta->lowPriorityRx != 65535))
@@ -2103,7 +1677,7 @@ static void halbtc8723b1ant_ActionWifiConnectedBtAclBusy(
pCoexDm->bAutoTdmaAdjust = false;
return;
} else if (pBtLinkInfo->bA2dpOnly) { /* A2DP */
- if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE == wifiStatus) {
+ if (wifiStatus == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE) {
halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
pCoexDm->bAutoTdmaAdjust = false;
@@ -2158,7 +1732,7 @@ static void halbtc8723b1ant_ActionWifiNotConnectedScan(
halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
/* tdma and coex table */
- if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) {
+ if (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_BUSY) {
if (pBtLinkInfo->bA2dpExist) {
halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
@@ -2170,8 +1744,8 @@ static void halbtc8723b1ant_ActionWifiNotConnectedScan(
halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
}
} else if (
- (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
- (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_SCO_BUSY) ||
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY)
) {
halbtc8723b1ant_ActionBtScoHidOnlyBusy(
pBtCoexist, BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN
@@ -2215,7 +1789,7 @@ static void halbtc8723b1ant_ActionWifiConnectedScan(struct btc_coexist *pBtCoexi
halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
/* tdma and coex table */
- if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) {
+ if (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_BUSY) {
if (pBtLinkInfo->bA2dpExist) {
halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
@@ -2227,8 +1801,8 @@ static void halbtc8723b1ant_ActionWifiConnectedScan(struct btc_coexist *pBtCoexi
halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
}
} else if (
- (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
- (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_SCO_BUSY) ||
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY)
) {
halbtc8723b1ant_ActionBtScoHidOnlyBusy(
pBtCoexist, BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN
@@ -2271,20 +1845,9 @@ static void halbtc8723b1ant_ActionWifiConnected(struct btc_coexist *pBtCoexist)
bool bScan = false, bLink = false, bRoam = false;
bool bUnder4way = false, bApEnable = false;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], CoexForWifiConnect() ===>\n")
- );
-
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &bUnder4way);
if (bUnder4way) {
halbtc8723b1ant_ActionWifiConnectedSpecialPacket(pBtCoexist);
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n")
- );
return;
}
@@ -2296,11 +1859,6 @@ static void halbtc8723b1ant_ActionWifiConnected(struct btc_coexist *pBtCoexist)
halbtc8723b1ant_ActionWifiConnectedScan(pBtCoexist);
else
halbtc8723b1ant_ActionWifiConnectedSpecialPacket(pBtCoexist);
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n")
- );
return;
}
@@ -2310,7 +1868,7 @@ static void halbtc8723b1ant_ActionWifiConnected(struct btc_coexist *pBtCoexist)
/* power save state */
if (
!bApEnable &&
- BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus &&
+ pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_BUSY &&
!pBtCoexist->btLinkInfo.bHidOnly
) {
if (pBtCoexist->btLinkInfo.bA2dpOnly) { /* A2DP */
@@ -2341,14 +1899,14 @@ static void halbtc8723b1ant_ActionWifiConnected(struct btc_coexist *pBtCoexist)
/* tdma and coex table */
if (!bWifiBusy) {
- if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) {
+ if (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_BUSY) {
halbtc8723b1ant_ActionWifiConnectedBtAclBusy(
pBtCoexist,
BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE
);
} else if (
- (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
- (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_SCO_BUSY) ||
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY)
) {
halbtc8723b1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE);
@@ -2361,14 +1919,14 @@ static void halbtc8723b1ant_ActionWifiConnected(struct btc_coexist *pBtCoexist)
halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
}
} else {
- if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) {
+ if (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_BUSY) {
halbtc8723b1ant_ActionWifiConnectedBtAclBusy(
pBtCoexist,
BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY
);
} else if (
- (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
- (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_SCO_BUSY) ||
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY)
) {
halbtc8723b1ant_ActionBtScoHidOnlyBusy(
pBtCoexist,
@@ -2397,47 +1955,36 @@ static void halbtc8723b1ant_RunSwCoexistMechanism(struct btc_coexist *pBtCoexist
} else {
switch (pCoexDm->curAlgorithm) {
case BT_8723B_1ANT_COEX_ALGO_SCO:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = SCO.\n"));
/* halbtc8723b1ant_ActionSco(pBtCoexist); */
break;
case BT_8723B_1ANT_COEX_ALGO_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID.\n"));
/* halbtc8723b1ant_ActionHid(pBtCoexist); */
break;
case BT_8723B_1ANT_COEX_ALGO_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP.\n"));
/* halbtc8723b1ant_ActionA2dp(pBtCoexist); */
break;
case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP+PAN(HS).\n"));
/* halbtc8723b1ant_ActionA2dpPanHs(pBtCoexist); */
break;
case BT_8723B_1ANT_COEX_ALGO_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR).\n"));
/* halbtc8723b1ant_ActionPanEdr(pBtCoexist); */
break;
case BT_8723B_1ANT_COEX_ALGO_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HS mode.\n"));
/* halbtc8723b1ant_ActionPanHs(pBtCoexist); */
break;
case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN+A2DP.\n"));
/* halbtc8723b1ant_ActionPanEdrA2dp(pBtCoexist); */
break;
case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR)+HID.\n"));
/* halbtc8723b1ant_ActionPanEdrHid(pBtCoexist); */
break;
case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP+PAN.\n"));
/* halbtc8723b1ant_ActionHidA2dpPanEdr(pBtCoexist); */
break;
case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP.\n"));
/* halbtc8723b1ant_ActionHidA2dp(pBtCoexist); */
break;
default:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = coexist All Off!!\n"));
break;
}
pCoexDm->preAlgorithm = pCoexDm->curAlgorithm;
@@ -2454,27 +2001,19 @@ static void halbtc8723b1ant_RunCoexistMechanism(struct btc_coexist *pBtCoexist)
u32 wifiLinkStatus = 0;
u32 numOfWifiLink = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism() ===>\n"));
-
- if (pBtCoexist->bManualControl) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"));
+ if (pBtCoexist->bManualControl)
return;
- }
- if (pBtCoexist->bStopCoexDm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"));
+ if (pBtCoexist->bStopCoexDm)
return;
- }
- if (pCoexSta->bUnderIps) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is under IPS !!!\n"));
+ if (pCoexSta->bUnderIps)
return;
- }
if (
- (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) ||
- (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
- (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_BUSY) ||
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_SCO_BUSY) ||
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY)
){
bIncreaseScanDevNum = true;
}
@@ -2498,26 +2037,12 @@ static void halbtc8723b1ant_RunCoexistMechanism(struct btc_coexist *pBtCoexist)
numOfWifiLink = wifiLinkStatus >> 16;
if ((numOfWifiLink >= 2) || (wifiLinkStatus & WIFI_P2P_GO_CONNECTED)) {
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_NOTIFY,
- (
- "############# [BTCoex], Multi-Port numOfWifiLink = %d, wifiLinkStatus = 0x%x\n",
- numOfWifiLink,
- wifiLinkStatus
- )
- );
halbtc8723b1ant_LimitedTx(pBtCoexist, NORMAL_EXEC, 0, 0, 0, 0);
halbtc8723b1ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, bBtCtrlAggBufSize, aggBufSize);
- if ((pBtLinkInfo->bA2dpExist) && (pCoexSta->bC2hBtInquiryPage)) {
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_NOTIFY,
- ("############# [BTCoex], BT Is Inquirying\n")
- );
+ if ((pBtLinkInfo->bA2dpExist) && (pCoexSta->bC2hBtInquiryPage))
halbtc8723b1ant_ActionBtInquiry(pBtCoexist);
- } else
+ else
halbtc8723b1ant_ActionWifiMultiPort(pBtCoexist);
return;
@@ -2544,11 +2069,6 @@ static void halbtc8723b1ant_RunCoexistMechanism(struct btc_coexist *pBtCoexist)
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
if (pCoexSta->bC2hBtInquiryPage) {
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_NOTIFY,
- ("############# [BTCoex], BT Is Inquirying\n")
- );
halbtc8723b1ant_ActionBtInquiry(pBtCoexist);
return;
} else if (bBtHsOn) {
@@ -2560,16 +2080,14 @@ static void halbtc8723b1ant_RunCoexistMechanism(struct btc_coexist *pBtCoexist)
if (!bWifiConnected) {
bool bScan = false, bLink = false, bRoam = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is non connected-idle !!!\n"));
-
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
if (bScan || bLink || bRoam) {
- if (bScan)
+ if (bScan)
halbtc8723b1ant_ActionWifiNotConnectedScan(pBtCoexist);
- else
+ else
halbtc8723b1ant_ActionWifiNotConnectedAssoAuth(pBtCoexist);
} else
halbtc8723b1ant_ActionWifiNotConnected(pBtCoexist);
@@ -2596,15 +2114,6 @@ static void halbtc8723b1ant_InitHwConfig(
bool bWifiOnly
)
{
- u32 u4Tmp = 0;/* fwVer; */
- u8 u1Tmpa = 0, u1Tmpb = 0;
-
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_INIT,
- ("[BTCoex], 1Ant Init HW Config!!\n")
- );
-
pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x550, 0x8, 0x1); /* enable TBTT nterrupt */
/* 0x790[5:0]= 0x5 */
@@ -2624,20 +2133,9 @@ static void halbtc8723b1ant_InitHwConfig(
/* PTA parameter */
halbtc8723b1ant_CoexTableWithType(pBtCoexist, FORCE_EXEC, 0);
- u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x948);
- u1Tmpa = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x765);
- u1Tmpb = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x67);
-
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_NOTIFY,
- (
- "############# [BTCoex], 0x948 = 0x%x, 0x765 = 0x%x, 0x67 = 0x%x\n",
- u4Tmp,
- u1Tmpa,
- u1Tmpb
- )
- );
+ pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x948);
+ pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x765);
+ pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x67);
}
/* */
@@ -2666,9 +2164,9 @@ void EXhalbtc8723b1ant_PowerOnSetting(struct btc_coexist *pBtCoexist)
/* */
/* S0 or S1 setting and Local register setting(By the setting fw can get ant number, S0/S1, ... info) */
/* Local setting bit define */
- /* BIT0: "0" for no antenna inverse; "1" for antenna inverse */
- /* BIT1: "0" for internal switch; "1" for external switch */
- /* BIT2: "0" for one antenna; "1" for two antenna */
+ /* BIT0: "0" for no antenna inverse; "1" for antenna inverse */
+ /* BIT1: "0" for internal switch; "1" for external switch */
+ /* BIT2: "0" for one antenna; "1" for two antenna */
/* NOTE: here default all internal switch and 1-antenna ==> BIT1 = 0 and BIT2 = 0 */
if (pBtCoexist->chipInterface == BTC_INTF_USB) {
/* fixed at S0 for USB interface */
@@ -2705,12 +2203,6 @@ void EXhalbtc8723b1ant_InitHwConfig(struct btc_coexist *pBtCoexist, bool bWifiOn
void EXhalbtc8723b1ant_InitCoexDm(struct btc_coexist *pBtCoexist)
{
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_INIT,
- ("[BTCoex], Coex Mechanism Init!!\n")
- );
-
pBtCoexist->bStopCoexDm = false;
halbtc8723b1ant_InitCoexDm(pBtCoexist);
@@ -2730,7 +2222,6 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
bool bRoam = false;
bool bScan = false;
bool bLink = false;
- bool bWifiUnder5G = false;
bool bWifiUnderBMode = false;
bool bBtHsOn = false;
bool bWifiBusy = false;
@@ -2778,7 +2269,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %d/ %d/ %d", "Ant PG Num/ Ant Mech/ Ant Pos:", \
+ "\r\n %-35s = %d/ %d/ %d", "Ant PG Num/ Ant Mech/ Ant Pos:",
pBoardInfo->pgAntNum,
pBoardInfo->btdmAntNum,
pBoardInfo->btdmAntPos
@@ -2788,7 +2279,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+ "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
((pStackInfo->bProfileNotified) ? "Yes" : "No"),
pStackInfo->hciVersion
);
@@ -2799,7 +2290,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", "CoexVer/ FwVer/ PatchVer", \
+ "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", "CoexVer/ FwVer/ PatchVer",
GLCoexVerDate8723b1Ant,
GLCoexVer8723b1Ant,
fwVer,
@@ -2814,7 +2305,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+ "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)",
wifiDot11Chnl,
wifiHsChnl,
bBtHsOn
@@ -2824,7 +2315,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+ "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info",
pCoexDm->wifiChnlInfo[0],
pCoexDm->wifiChnlInfo[1],
pCoexDm->wifiChnlInfo[2]
@@ -2836,7 +2327,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+ "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
wifiRssi - 100, btHsRssi - 100
);
CL_PRINTF(cliBuf);
@@ -2847,12 +2338,11 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %d/ %d/ %d/ %s", "Wifi bLink/ bRoam/ bScan/ bHi-Pri", \
+ "\r\n %-35s = %d/ %d/ %d/ %s", "Wifi bLink/ bRoam/ bScan/ bHi-Pri",
bLink, bRoam, bScan, ((pCoexSta->bWiFiIsHighPriTask) ? "1" : "0")
);
CL_PRINTF(cliBuf);
- pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
pBtCoexist->fBtcGet(
@@ -2865,10 +2355,10 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %s / %s/ %s/ AP =%d/ %s ", "Wifi status", \
- (bWifiUnder5G ? "5G" : "2.4G"),
- ((bWifiUnderBMode) ? "11b" : ((BTC_WIFI_BW_LEGACY == wifiBw) ? "11bg" : (((BTC_WIFI_BW_HT40 == wifiBw) ? "HT40" : "HT20")))),
- ((!bWifiBusy) ? "idle" : ((BTC_WIFI_TRAFFIC_TX == wifiTrafficDir) ? "uplink" : "downlink")),
+ "\r\n %-35s = %s / %s/ %s/ AP =%d/ %s ", "Wifi status",
+ ("2.4G"),
+ ((bWifiUnderBMode) ? "11b" : ((wifiBw == BTC_WIFI_BW_LEGACY) ? "11bg" : (((wifiBw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")))),
+ ((!bWifiBusy) ? "idle" : ((wifiTrafficDir == BTC_WIFI_TRAFFIC_TX) ? "uplink" : "downlink")),
pCoexSta->nScanAPNum,
(pCoexSta->bCCKLock) ? "Lock" : "noLock"
);
@@ -2880,7 +2370,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %d/ %d/ %d/ %d/ %d", "sta/vwifi/hs/p2pGo/p2pGc", \
+ "\r\n %-35s = %d/ %d/ %d/ %d/ %d", "sta/vwifi/hs/p2pGo/p2pGc",
((wifiLinkStatus & WIFI_STA_CONNECTED) ? 1 : 0),
((wifiLinkStatus & WIFI_AP_CONNECTED) ? 1 : 0),
((wifiLinkStatus & WIFI_HS_CONNECTED) ? 1 : 0),
@@ -2894,9 +2384,9 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = [%s/ %d/ %d/ %d] ", "BT [status/ rssi/ retryCnt/ popCnt]", \
- ((pBtCoexist->btInfo.bBtDisabled) ? ("disabled") : ((pCoexSta->bC2hBtInquiryPage) ? ("inquiry/page scan") : ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus) ? "non-connected idle" :
- ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) ? "connected-idle" : "busy")))),
+ "\r\n %-35s = [%s/ %d/ %d/ %d] ", "BT [status/ rssi/ retryCnt/ popCnt]",
+ ((pBtCoexist->btInfo.bBtDisabled) ? ("disabled") : ((pCoexSta->bC2hBtInquiryPage) ? ("inquiry/page scan") : ((pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE) ? "non-connected idle" :
+ ((pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE) ? "connected-idle" : "busy")))),
pCoexSta->btRssi, pCoexSta->btRetryCnt, pCoexSta->popEventCnt
);
CL_PRINTF(cliBuf);
@@ -2910,7 +2400,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+ "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
pBtLinkInfo->bScoExist,
pBtLinkInfo->bHidExist,
pBtLinkInfo->bPanExist,
@@ -2921,8 +2411,8 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
if (pStackInfo->bProfileNotified) {
pBtCoexist->fBtcDispDbgMsg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO);
} else {
- CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Role", \
- (pBtLinkInfo->bSlaveRole) ? "Slave" : "Master");
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Role",
+ (pBtLinkInfo->bSlaveRole) ? "Slave" : "Master");
CL_PRINTF(cliBuf);
}
@@ -2931,7 +2421,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %s", "BT Info A2DP rate", \
+ "\r\n %-35s = %s", "BT Info A2DP rate",
(btInfoExt & BIT0) ? "Basic rate" : "EDR rate"
);
CL_PRINTF(cliBuf);
@@ -2941,7 +2431,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8723b1Ant[i], \
+ "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8723b1Ant[i],
pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1],
pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3],
pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5],
@@ -2953,7 +2443,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %s/%s, (0x%x/0x%x)", "PS state, IPS/LPS, (lps/rpwm)", \
+ "\r\n %-35s = %s/%s, (0x%x/0x%x)", "PS state, IPS/LPS, (lps/rpwm)",
(pCoexSta->bUnderIps ? "IPS ON" : "IPS OFF"),
(pCoexSta->bUnderLps ? "LPS ON" : "LPS OFF"),
pBtCoexist->btInfo.lpsVal,
@@ -2974,7 +2464,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %d", "SM[LowPenaltyRA]", \
+ "\r\n %-35s = %d", "SM[LowPenaltyRA]",
pCoexDm->bCurLowPenaltyRa
);
CL_PRINTF(cliBuf);
@@ -2982,7 +2472,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %s/ %s/ %d ", "DelBA/ BtCtrlAgg/ AggSize", \
+ "\r\n %-35s = %s/ %s/ %d ", "DelBA/ BtCtrlAgg/ AggSize",
(pBtCoexist->btInfo.bRejectAggPkt ? "Yes" : "No"),
(pBtCoexist->btInfo.bBtCtrlAggBufSize ? "Yes" : "No"),
pBtCoexist->btInfo.aggBufSize
@@ -2991,7 +2481,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = 0x%x ", "Rate Mask", \
+ "\r\n %-35s = 0x%x ", "Rate Mask",
pBtCoexist->btInfo.raMask
);
CL_PRINTF(cliBuf);
@@ -3001,41 +2491,35 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_PRINTF(cliBuf);
psTdmaCase = pCoexDm->curPsTdma;
- CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", "PS TDMA", \
- pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1],
- pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3],
- pCoexDm->psTdmaPara[4], psTdmaCase, pCoexDm->bAutoTdmaAdjust);
- CL_PRINTF(cliBuf);
-
- CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "Coex Table Type", \
- pCoexSta->nCoexTableType);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", "PS TDMA",
+ pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1],
+ pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3],
+ pCoexDm->psTdmaPara[4], psTdmaCase, pCoexDm->bAutoTdmaAdjust);
CL_PRINTF(cliBuf);
- CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "IgnWlanAct", \
- pCoexDm->bCurIgnoreWlanAct);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "Coex Table Type",
+ pCoexSta->nCoexTableType);
CL_PRINTF(cliBuf);
- /*
- CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Latest error condition(should be 0)", \
- pCoexDm->errorCondition);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "IgnWlanAct",
+ pCoexDm->bCurIgnoreWlanAct);
CL_PRINTF(cliBuf);
- */
}
/* Hw setting */
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
CL_PRINTF(cliBuf);
- CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", "backup ARFR1/ARFR2/RL/AMaxTime", \
- pCoexDm->backupArfrCnt1, pCoexDm->backupArfrCnt2, pCoexDm->backupRetryLimit, pCoexDm->backupAmpduMaxTime);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", "backup ARFR1/ARFR2/RL/AMaxTime",
+ pCoexDm->backupArfrCnt1, pCoexDm->backupArfrCnt2, pCoexDm->backupRetryLimit, pCoexDm->backupAmpduMaxTime);
CL_PRINTF(cliBuf);
u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x430);
u4Tmp[1] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x434);
u2Tmp[0] = pBtCoexist->fBtcRead2Byte(pBtCoexist, 0x42a);
u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x456);
- CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", "0x430/0x434/0x42a/0x456", \
- u4Tmp[0], u4Tmp[1], u2Tmp[0], u1Tmp[0]);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", "0x430/0x434/0x42a/0x456",
+ u4Tmp[0], u4Tmp[1], u2Tmp[0], u1Tmp[0]);
CL_PRINTF(cliBuf);
u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x778);
@@ -3043,7 +2527,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
u4Tmp[1] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x880);
CL_SPRINTF(
cliBuf, BT_TMP_BUF_SIZE,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/0x6cc/0x880[29:25]", \
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/0x6cc/0x880[29:25]",
u1Tmp[0], u4Tmp[0], (u4Tmp[1] & 0x3e000000) >> 25
);
CL_PRINTF(cliBuf);
@@ -3055,7 +2539,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x948/ 0x67[5] / 0x764 / 0x76e", \
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x948/ 0x67[5] / 0x764 / 0x76e",
u4Tmp[0], ((u1Tmp[0] & 0x20) >> 5), (u4Tmp[1] & 0xffff), u1Tmp[1]
);
CL_PRINTF(cliBuf);
@@ -3066,7 +2550,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", \
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
u4Tmp[0] & 0x3, u4Tmp[1] & 0xff, u4Tmp[2] & 0x3
);
CL_PRINTF(cliBuf);
@@ -3078,7 +2562,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x38[11]/0x40/0x4c[24:23]/0x64[0]", \
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
((u1Tmp[0] & 0x8) >> 3),
u1Tmp[1],
((u4Tmp[0] & 0x01800000) >> 23),
@@ -3091,7 +2575,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+ "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
u4Tmp[0], u1Tmp[0]
);
CL_PRINTF(cliBuf);
@@ -3101,7 +2585,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = 0x%x/ 0x%x", "0xc50(dig)/0x49c(null-drop)", \
+ "\r\n %-35s = 0x%x/ 0x%x", "0xc50(dig)/0x49c(null-drop)",
u4Tmp[0] & 0xff, u1Tmp[0]
);
CL_PRINTF(cliBuf);
@@ -3117,14 +2601,14 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
faOfdm =
((u4Tmp[0] & 0xffff0000) >> 16) +
((u4Tmp[1] & 0xffff0000) >> 16) +
- (u4Tmp[1] & 0xffff) + (u4Tmp[2] & 0xffff) + \
+ (u4Tmp[1] & 0xffff) + (u4Tmp[2] & 0xffff) +
((u4Tmp[3] & 0xffff0000) >> 16) + (u4Tmp[3] & 0xffff);
faCck = (u1Tmp[0] << 8) + u1Tmp[1];
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "OFDM-CCA/OFDM-FA/CCK-FA", \
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "OFDM-CCA/OFDM-FA/CCK-FA",
u4Tmp[0] & 0xffff, faOfdm, faCck
);
CL_PRINTF(cliBuf);
@@ -3133,7 +2617,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %d/ %d/ %d/ %d", "CRC_OK CCK/11g/11n/11n-Agg", \
+ "\r\n %-35s = %d/ %d/ %d/ %d", "CRC_OK CCK/11g/11n/11n-Agg",
pCoexSta->nCRCOK_CCK,
pCoexSta->nCRCOK_11g,
pCoexSta->nCRCOK_11n,
@@ -3144,7 +2628,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %d/ %d/ %d/ %d", "CRC_Err CCK/11g/11n/11n-Agg", \
+ "\r\n %-35s = %d/ %d/ %d/ %d", "CRC_Err CCK/11g/11n/11n-Agg",
pCoexSta->nCRCErr_CCK,
pCoexSta->nCRCErr_11g,
pCoexSta->nCRCErr_11n,
@@ -3158,21 +2642,21 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8(coexTable)", \
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8(coexTable)",
u4Tmp[0], u4Tmp[1], u4Tmp[2]);
CL_PRINTF(cliBuf);
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)", \
+ "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
pCoexSta->highPriorityRx, pCoexSta->highPriorityTx
);
CL_PRINTF(cliBuf);
CL_SPRINTF(
cliBuf,
BT_TMP_BUF_SIZE,
- "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)", \
+ "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx
);
CL_PRINTF(cliBuf);
@@ -3186,19 +2670,13 @@ void EXhalbtc8723b1ant_IpsNotify(struct btc_coexist *pBtCoexist, u8 type)
if (pBtCoexist->bManualControl || pBtCoexist->bStopCoexDm)
return;
- if (BTC_IPS_ENTER == type) {
- BTC_PRINT(
- BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n")
- );
+ if (type == BTC_IPS_ENTER) {
pCoexSta->bUnderIps = true;
halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 0);
halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
halbtc8723b1ant_SetAntPath(pBtCoexist, BTC_ANT_PATH_BT, false, true);
- } else if (BTC_IPS_LEAVE == type) {
- BTC_PRINT(
- BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n")
- );
+ } else if (type == BTC_IPS_LEAVE) {
pCoexSta->bUnderIps = false;
halbtc8723b1ant_InitHwConfig(pBtCoexist, false, false);
@@ -3212,17 +2690,10 @@ void EXhalbtc8723b1ant_LpsNotify(struct btc_coexist *pBtCoexist, u8 type)
if (pBtCoexist->bManualControl || pBtCoexist->bStopCoexDm)
return;
- if (BTC_LPS_ENABLE == type) {
- BTC_PRINT(
- BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n")
- );
+ if (type == BTC_LPS_ENABLE)
pCoexSta->bUnderLps = true;
- } else if (BTC_LPS_DISABLE == type) {
- BTC_PRINT(
- BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n")
- );
+ else if (type == BTC_LPS_DISABLE)
pCoexSta->bUnderLps = false;
- }
}
void EXhalbtc8723b1ant_ScanNotify(struct btc_coexist *pBtCoexist, u8 type)
@@ -3233,39 +2704,18 @@ void EXhalbtc8723b1ant_ScanNotify(struct btc_coexist *pBtCoexist, u8 type)
bool bBtCtrlAggBufSize = false;
u8 aggBufSize = 5;
- u8 u1Tmpa, u1Tmpb;
- u32 u4Tmp;
-
if (pBtCoexist->bManualControl || pBtCoexist->bStopCoexDm)
return;
- if (BTC_SCAN_START == type) {
+ if (type == BTC_SCAN_START) {
pCoexSta->bWiFiIsHighPriTask = true;
- BTC_PRINT(
- BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n")
- );
halbtc8723b1ant_PsTdma(pBtCoexist, FORCE_EXEC, false, 8); /* Force antenna setup for no scan result issue */
- u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x948);
- u1Tmpa = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x765);
- u1Tmpb = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x67);
-
-
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_NOTIFY,
- (
- "[BTCoex], 0x948 = 0x%x, 0x765 = 0x%x, 0x67 = 0x%x\n",
- u4Tmp,
- u1Tmpa,
- u1Tmpb
- )
- );
+ pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x948);
+ pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x765);
+ pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x67);
} else {
pCoexSta->bWiFiIsHighPriTask = false;
- BTC_PRINT(
- BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n")
- );
pBtCoexist->fBtcGet(
pBtCoexist, BTC_GET_U1_AP_NUM, &pCoexSta->nScanAPNum
@@ -3300,14 +2750,12 @@ void EXhalbtc8723b1ant_ScanNotify(struct btc_coexist *pBtCoexist, u8 type)
return;
}
- if (BTC_SCAN_START == type) {
- /* BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n")); */
+ if (type == BTC_SCAN_START) {
if (!bWifiConnected) /* non-connected scan */
halbtc8723b1ant_ActionWifiNotConnectedScan(pBtCoexist);
else /* wifi is connected */
halbtc8723b1ant_ActionWifiConnectedScan(pBtCoexist);
- } else if (BTC_SCAN_FINISH == type) {
- /* BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n")); */
+ } else if (type == BTC_SCAN_FINISH) {
if (!bWifiConnected) /* non-connected scan */
halbtc8723b1ant_ActionWifiNotConnected(pBtCoexist);
else
@@ -3330,13 +2778,11 @@ void EXhalbtc8723b1ant_ConnectNotify(struct btc_coexist *pBtCoexist, u8 type)
)
return;
- if (BTC_ASSOCIATE_START == type) {
+ if (type == BTC_ASSOCIATE_START) {
pCoexSta->bWiFiIsHighPriTask = true;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
pCoexDm->nArpCnt = 0;
} else {
pCoexSta->bWiFiIsHighPriTask = false;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
/* pCoexDm->nArpCnt = 0; */
}
@@ -3358,12 +2804,9 @@ void EXhalbtc8723b1ant_ConnectNotify(struct btc_coexist *pBtCoexist, u8 type)
return;
}
- if (BTC_ASSOCIATE_START == type) {
- /* BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n")); */
+ if (type == BTC_ASSOCIATE_START) {
halbtc8723b1ant_ActionWifiNotConnectedAssoAuth(pBtCoexist);
- } else if (BTC_ASSOCIATE_FINISH == type) {
- /* BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n")); */
-
+ } else if (type == BTC_ASSOCIATE_FINISH) {
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
if (!bWifiConnected) /* non-connected scan */
halbtc8723b1ant_ActionWifiNotConnected(pBtCoexist);
@@ -3386,9 +2829,7 @@ void EXhalbtc8723b1ant_MediaStatusNotify(struct btc_coexist *pBtCoexist, u8 type
)
return;
- if (BTC_MEDIA_CONNECT == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
-
+ if (type == BTC_MEDIA_CONNECT) {
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_UNDER_B_MODE, &bWifiUnderBMode);
/* Set CCK Tx/Rx high Pri except 11b mode */
@@ -3405,7 +2846,6 @@ void EXhalbtc8723b1ant_MediaStatusNotify(struct btc_coexist *pBtCoexist, u8 type
pCoexDm->backupRetryLimit = pBtCoexist->fBtcRead2Byte(pBtCoexist, 0x42a);
pCoexDm->backupAmpduMaxTime = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x456);
} else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
pCoexDm->nArpCnt = 0;
pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x6cd, 0x0); /* CCK Tx */
@@ -3414,13 +2854,13 @@ void EXhalbtc8723b1ant_MediaStatusNotify(struct btc_coexist *pBtCoexist, u8 type
/* only 2.4G we need to inform bt the chnl mask */
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl);
- if ((BTC_MEDIA_CONNECT == type) && (wifiCentralChnl <= 14)) {
+ if ((type == BTC_MEDIA_CONNECT) && (wifiCentralChnl <= 14)) {
/* H2C_Parameter[0] = 0x1; */
H2C_Parameter[0] = 0x0;
H2C_Parameter[1] = wifiCentralChnl;
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
- if (BTC_WIFI_BW_HT40 == wifiBw)
+ if (wifiBw == BTC_WIFI_BW_HT40)
H2C_Parameter[2] = 0x30;
else
H2C_Parameter[2] = 0x20;
@@ -3430,15 +2870,6 @@ void EXhalbtc8723b1ant_MediaStatusNotify(struct btc_coexist *pBtCoexist, u8 type
pCoexDm->wifiChnlInfo[1] = H2C_Parameter[1];
pCoexDm->wifiChnlInfo[2] = H2C_Parameter[2];
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- (
- "[BTCoex], FW write 0x66 = 0x%x\n",
- H2C_Parameter[0] << 16 | H2C_Parameter[1] << 8 | H2C_Parameter[2]
- )
- );
-
pBtCoexist->fBtcFillH2c(pBtCoexist, 0x66, 3, H2C_Parameter);
}
@@ -3458,23 +2889,12 @@ void EXhalbtc8723b1ant_SpecialPacketNotify(struct btc_coexist *pBtCoexist, u8 ty
return;
if (
- BTC_PACKET_DHCP == type ||
- BTC_PACKET_EAPOL == type ||
- BTC_PACKET_ARP == type
+ type == BTC_PACKET_DHCP ||
+ type == BTC_PACKET_EAPOL ||
+ type == BTC_PACKET_ARP
) {
- if (BTC_PACKET_ARP == type) {
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_NOTIFY,
- ("[BTCoex], special Packet ARP notify\n")
- );
-
+ if (type == BTC_PACKET_ARP) {
pCoexDm->nArpCnt++;
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_NOTIFY,
- ("[BTCoex], ARP Packet Count = %d\n", pCoexDm->nArpCnt)
- );
if (pCoexDm->nArpCnt >= 10) /* if APR PKT > 10 after connect, do not go to ActionWifiConnectedSpecialPacket(pBtCoexist) */
pCoexSta->bWiFiIsHighPriTask = false;
@@ -3482,19 +2902,9 @@ void EXhalbtc8723b1ant_SpecialPacketNotify(struct btc_coexist *pBtCoexist, u8 ty
pCoexSta->bWiFiIsHighPriTask = true;
} else {
pCoexSta->bWiFiIsHighPriTask = true;
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_NOTIFY,
- ("[BTCoex], special Packet DHCP or EAPOL notify\n")
- );
}
} else {
pCoexSta->bWiFiIsHighPriTask = false;
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_NOTIFY,
- ("[BTCoex], special Packet [Type = %d] notify\n", type)
- );
}
pCoexSta->specialPktPeriodCnt = 0;
@@ -3523,9 +2933,9 @@ void EXhalbtc8723b1ant_SpecialPacketNotify(struct btc_coexist *pBtCoexist, u8 ty
}
if (
- BTC_PACKET_DHCP == type ||
- BTC_PACKET_EAPOL == type ||
- ((BTC_PACKET_ARP == type) && (pCoexSta->bWiFiIsHighPriTask))
+ type == BTC_PACKET_DHCP ||
+ type == BTC_PACKET_EAPOL ||
+ ((type == BTC_PACKET_ARP) && (pCoexSta->bWiFiIsHighPriTask))
)
halbtc8723b1ant_ActionWifiConnectedSpecialPacket(pBtCoexist);
}
@@ -3546,26 +2956,13 @@ void EXhalbtc8723b1ant_BtInfoNotify(
rspSource = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
pCoexSta->btInfoC2hCnt[rspSource]++;
- BTC_PRINT(
- BTC_MSG_INTERFACE,
- INTF_NOTIFY,
- ("[BTCoex], Bt info[%d], length =%d, hex data =[",
- rspSource,
- length)
- );
for (i = 0; i < length; i++) {
pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
if (i == 1)
btInfo = tmpBuf[i];
- if (i == length - 1)
- BTC_PRINT(
- BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i])
- );
- else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
}
- if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rspSource) {
+ if (rspSource != BT_INFO_SRC_8723B_1ANT_WIFI_FW) {
pCoexSta->btRetryCnt = pCoexSta->btInfoC2h[rspSource][2] & 0xf;
if (pCoexSta->btRetryCnt >= 1)
@@ -3586,18 +2983,12 @@ void EXhalbtc8723b1ant_BtInfoNotify(
if (!pCoexSta->bBtTxRxMask) {
/* BT into is responded by BT FW and BT RF REG 0x3C != 0x15 => Need to switch BT TRx Mask */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x15\n"));
pBtCoexist->fBtcSetBtReg(pBtCoexist, BTC_BT_REG_RF, 0x3c, 0x15);
}
/* Here we need to resend some wifi info to BT */
/* because bt is reset and loss of the info. */
if (pCoexSta->btInfoExt & BIT1) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n")
- );
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
if (bWifiConnected)
EXhalbtc8723b1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_CONNECT);
@@ -3606,14 +2997,8 @@ void EXhalbtc8723b1ant_BtInfoNotify(
}
if (pCoexSta->btInfoExt & BIT3) {
- if (!pBtCoexist->bManualControl && !pBtCoexist->bStopCoexDm) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n")
- );
+ if (!pBtCoexist->bManualControl && !pBtCoexist->bStopCoexDm)
halbtc8723b1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, false);
- }
} else {
/* BT already NOT ignore Wlan active, do nothing here. */
}
@@ -3661,32 +3046,27 @@ void EXhalbtc8723b1ant_BtInfoNotify(
if (!(btInfo & BT_INFO_8723B_1ANT_B_CONNECTION)) {
pCoexDm->btStatus = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"));
} else if (btInfo == BT_INFO_8723B_1ANT_B_CONNECTION) {
/* connection exists but no busy */
pCoexDm->btStatus = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"));
} else if (
(btInfo & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
(btInfo & BT_INFO_8723B_1ANT_B_SCO_BUSY)
) {
pCoexDm->btStatus = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"));
} else if (btInfo & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
- if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != pCoexDm->btStatus)
+ if (pCoexDm->btStatus != BT_8723B_1ANT_BT_STATUS_ACL_BUSY)
pCoexDm->bAutoTdmaAdjust = false;
pCoexDm->btStatus = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"));
} else {
pCoexDm->btStatus = BT_8723B_1ANT_BT_STATUS_MAX;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"));
}
if (
- (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) ||
- (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
- (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_BUSY) ||
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_SCO_BUSY) ||
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY)
)
bBtBusy = true;
else
@@ -3698,8 +3078,6 @@ void EXhalbtc8723b1ant_BtInfoNotify(
void EXhalbtc8723b1ant_HaltNotify(struct btc_coexist *pBtCoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
-
halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
halbtc8723b1ant_PsTdma(pBtCoexist, FORCE_EXEC, false, 0);
halbtc8723b1ant_SetAntPath(pBtCoexist, BTC_ANT_PATH_BT, false, true);
@@ -3713,19 +3091,14 @@ void EXhalbtc8723b1ant_HaltNotify(struct btc_coexist *pBtCoexist)
void EXhalbtc8723b1ant_PnpNotify(struct btc_coexist *pBtCoexist, u8 pnpState)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify\n"));
-
- if (BTC_WIFI_PNP_SLEEP == pnpState) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify to SLEEP\n"));
-
+ if (pnpState == BTC_WIFI_PNP_SLEEP) {
halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 0);
halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
halbtc8723b1ant_SetAntPath(pBtCoexist, BTC_ANT_PATH_BT, false, true);
pBtCoexist->bStopCoexDm = true;
- } else if (BTC_WIFI_PNP_WAKE_UP == pnpState) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify to WAKE UP\n"));
+ } else if (pnpState == BTC_WIFI_PNP_WAKE_UP) {
pBtCoexist->bStopCoexDm = false;
halbtc8723b1ant_InitHwConfig(pBtCoexist, false, false);
halbtc8723b1ant_InitCoexDm(pBtCoexist);
@@ -3738,16 +3111,10 @@ void EXhalbtc8723b1ant_Periodical(struct btc_coexist *pBtCoexist)
static u8 disVerInfoCnt;
u32 fwVer = 0, btPatchVer = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ==========================Periodical ===========================\n"));
-
if (disVerInfoCnt <= 5) {
disVerInfoCnt += 1;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer);
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", \
- GLCoexVerDate8723b1Ant, GLCoexVer8723b1Ant, fwVer, btPatchVer, btPatchVer));
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
}
halbtc8723b1ant_MonitorBtCtr(pBtCoexist);
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
index 4b570ec75e67..84241619fb3a 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
@@ -46,23 +46,18 @@ static u8 halbtc8723b2ant_BtRssiState(
) {
if (btRssi >= (rssiThresh + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT)) {
btRssiState = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
} else {
btRssiState = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
}
} else {
if (btRssi < rssiThresh) {
btRssiState = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
} else {
btRssiState = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
}
}
} else if (levelNum == 3) {
if (rssiThresh > rssiThresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n"));
return pCoexSta->preBtRssiState;
}
@@ -72,10 +67,8 @@ static u8 halbtc8723b2ant_BtRssiState(
) {
if (btRssi >= (rssiThresh + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT)) {
btRssiState = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
} else {
btRssiState = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
}
} else if (
(pCoexSta->preBtRssiState == BTC_RSSI_STATE_MEDIUM) ||
@@ -83,21 +76,16 @@ static u8 halbtc8723b2ant_BtRssiState(
) {
if (btRssi >= (rssiThresh1 + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT)) {
btRssiState = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
} else if (btRssi < rssiThresh) {
btRssiState = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
} else {
btRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n"));
}
} else {
if (btRssi < rssiThresh1) {
btRssiState = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
} else {
btRssiState = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
}
}
}
@@ -127,23 +115,18 @@ static u8 halbtc8723b2ant_WifiRssiState(
) {
if (wifiRssi >= (rssiThresh + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT)) {
wifiRssiState = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
} else {
wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
}
} else {
if (wifiRssi < rssiThresh) {
wifiRssiState = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
} else {
wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
}
}
} else if (levelNum == 3) {
if (rssiThresh > rssiThresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n"));
return pCoexSta->preWifiRssiState[index];
}
@@ -153,10 +136,8 @@ static u8 halbtc8723b2ant_WifiRssiState(
) {
if (wifiRssi >= (rssiThresh + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT)) {
wifiRssiState = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
} else {
wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
}
} else if (
(pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_MEDIUM) ||
@@ -164,21 +145,16 @@ static u8 halbtc8723b2ant_WifiRssiState(
) {
if (wifiRssi >= (rssiThresh1 + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT)) {
wifiRssiState = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
} else if (wifiRssi < rssiThresh) {
wifiRssiState = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
} else {
wifiRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n"));
}
} else {
if (wifiRssi < rssiThresh1) {
wifiRssiState = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
} else {
wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
}
}
}
@@ -233,31 +209,6 @@ static void halbtc8723b2ant_MonitorBtCtr(struct btc_coexist *pBtCoexist)
pCoexSta->lowPriorityTx = regLPTx;
pCoexSta->lowPriorityRx = regLPRx;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_MONITOR,
- (
- "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- regHPTxRx,
- regHPTx,
- regHPTx,
- regHPRx,
- regHPRx
- )
- );
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_BT_MONITOR,
- (
- "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- regLPTxRx,
- regLPTx,
- regLPTx,
- regLPRx,
- regLPRx
- )
- );
-
/* reset counter */
pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x76e, 0xc);
}
@@ -270,12 +221,6 @@ static void halbtc8723b2ant_QueryBtInfo(struct btc_coexist *pBtCoexist)
H2C_Parameter[0] |= BIT0; /* trigger */
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- ("[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", H2C_Parameter[0])
- );
-
pBtCoexist->fBtcFillH2c(pBtCoexist, 0x61, 1, H2C_Parameter);
}
@@ -384,7 +329,6 @@ static u8 halbtc8723b2ant_ActionAlgorithm(struct btc_coexist *pBtCoexist)
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
if (!pBtLinkInfo->bBtLinkExist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No BT link exists!!!\n"));
return algorithm;
}
@@ -402,21 +346,16 @@ static u8 halbtc8723b2ant_ActionAlgorithm(struct btc_coexist *pBtCoexist)
if (numOfDiffProfile == 1) {
if (pBtLinkInfo->bScoExist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
} else {
if (pBtLinkInfo->bHidExist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
} else if (pBtLinkInfo->bA2dpExist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
} else if (pBtLinkInfo->bPanExist) {
if (bBtHsOn) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR;
}
}
@@ -424,17 +363,13 @@ static u8 halbtc8723b2ant_ActionAlgorithm(struct btc_coexist *pBtCoexist)
} else if (numOfDiffProfile == 2) {
if (pBtLinkInfo->bScoExist) {
if (pBtLinkInfo->bHidExist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (pBtLinkInfo->bA2dpExist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (pBtLinkInfo->bPanExist) {
if (bBtHsOn) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -443,17 +378,14 @@ static u8 halbtc8723b2ant_ActionAlgorithm(struct btc_coexist *pBtCoexist)
pBtLinkInfo->bHidExist &&
pBtLinkInfo->bA2dpExist
) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
} else if (
pBtLinkInfo->bHidExist &&
pBtLinkInfo->bPanExist
) {
if (bBtHsOn) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (
@@ -461,10 +393,8 @@ static u8 halbtc8723b2ant_ActionAlgorithm(struct btc_coexist *pBtCoexist)
pBtLinkInfo->bA2dpExist
) {
if (bBtHsOn) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
}
}
@@ -475,42 +405,17 @@ static u8 halbtc8723b2ant_ActionAlgorithm(struct btc_coexist *pBtCoexist)
pBtLinkInfo->bHidExist &&
pBtLinkInfo->bA2dpExist
) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], SCO + HID + A2DP ==> HID\n")
- );
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (
pBtLinkInfo->bHidExist &&
pBtLinkInfo->bPanExist
) {
- if (bBtHsOn) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], SCO + HID + PAN(HS)\n")
- );
- algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
- } else {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- ("[BTCoex], SCO + HID + PAN(EDR)\n")
- );
- algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
- }
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (
pBtLinkInfo->bPanExist &&
pBtLinkInfo->bA2dpExist
) {
- if (bBtHsOn) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n"));
- algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
- } else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"));
- algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
- }
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
} else {
if (
@@ -519,10 +424,8 @@ static u8 halbtc8723b2ant_ActionAlgorithm(struct btc_coexist *pBtCoexist)
pBtLinkInfo->bA2dpExist
) {
if (bBtHsOn) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
}
@@ -535,10 +438,7 @@ static u8 halbtc8723b2ant_ActionAlgorithm(struct btc_coexist *pBtCoexist)
pBtLinkInfo->bA2dpExist
) {
if (bBtHsOn) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
-
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR) ==>PAN(EDR)+HID\n"));
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -558,17 +458,6 @@ static void halbtc8723b2ant_SetFwDacSwingLevel(
/* 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */
H2C_Parameter[0] = dacSwingLvl;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- ("[BTCoex], Set Dac Swing Level = 0x%x\n", dacSwingLvl)
- );
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- ("[BTCoex], FW write 0x64 = 0x%x\n", H2C_Parameter[0])
- );
-
pBtCoexist->fBtcFillH2c(pBtCoexist, 0x64, 1, H2C_Parameter);
}
@@ -580,16 +469,6 @@ static void halbtc8723b2ant_SetFwDecBtPwr(
H2C_Parameter[0] = decBtPwrLvl;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- (
- "[BTCoex], decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
- decBtPwrLvl,
- H2C_Parameter[0]
- )
- );
-
pBtCoexist->fBtcFillH2c(pBtCoexist, 0x62, 1, H2C_Parameter);
}
@@ -597,28 +476,9 @@ static void halbtc8723b2ant_DecBtPwr(
struct btc_coexist *pBtCoexist, bool bForceExec, u8 decBtPwrLvl
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW,
- (
- "[BTCoex], %s Dec BT power level = %d\n",
- (bForceExec ? "force to" : ""),
- decBtPwrLvl
- )
- );
pCoexDm->curBtDecPwrLvl = decBtPwrLvl;
if (!bForceExec) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- (
- "[BTCoex], preBtDecPwrLvl =%d, curBtDecPwrLvl =%d\n",
- pCoexDm->preBtDecPwrLvl,
- pCoexDm->curBtDecPwrLvl
- )
- );
-
if (pCoexDm->preBtDecPwrLvl == pCoexDm->curBtDecPwrLvl)
return;
}
@@ -631,28 +491,9 @@ static void halbtc8723b2ant_FwDacSwingLvl(
struct btc_coexist *pBtCoexist, bool bForceExec, u8 fwDacSwingLvl
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW,
- (
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (bForceExec ? "force to" : ""),
- fwDacSwingLvl
- )
- );
pCoexDm->curFwDacSwingLvl = fwDacSwingLvl;
if (!bForceExec) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- (
- "[BTCoex], preFwDacSwingLvl =%d, curFwDacSwingLvl =%d\n",
- pCoexDm->preFwDacSwingLvl,
- pCoexDm->curFwDacSwingLvl
- )
- );
-
if (pCoexDm->preFwDacSwingLvl == pCoexDm->curFwDacSwingLvl)
return;
}
@@ -669,17 +510,11 @@ static void halbtc8723b2ant_SetSwRfRxLpfCorner(
{
if (bRxRfShrinkOn) {
/* Shrink RF Rx LPF corner */
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_EXEC,
- ("[BTCoex], Shrink RF Rx LPF corner!!\n")
- );
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xffffc);
} else {
/* Resume RF Rx LPF corner */
/* After initialized, we can use pCoexDm->btRf0x1eBackup */
if (pBtCoexist->bInitilized) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup);
}
}
@@ -689,28 +524,9 @@ static void halbtc8723b2ant_RfShrink(
struct btc_coexist *pBtCoexist, bool bForceExec, bool bRxRfShrinkOn
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW,
- (
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
- (bForceExec ? "force to" : ""),
- (bRxRfShrinkOn ? "ON" : "OFF")
- )
- );
pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn;
if (!bForceExec) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_DETAIL,
- (
- "[BTCoex], bPreRfRxLpfShrink =%d, bCurRfRxLpfShrink =%d\n",
- pCoexDm->bPreRfRxLpfShrink,
- pCoexDm->bCurRfRxLpfShrink
- )
- );
-
if (pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink)
return;
}
@@ -735,15 +551,6 @@ static void halbtc8723b2ant_SetSwPenaltyTxRateAdaptive(
H2C_Parameter[5] = 0xf9; /* MCS5 or OFDM36 */
}
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- (
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (bLowPenaltyRa ? "ON!!" : "OFF!!")
- )
- );
-
pBtCoexist->fBtcFillH2c(pBtCoexist, 0x69, 6, H2C_Parameter);
}
@@ -752,28 +559,9 @@ static void halbtc8723b2ant_LowPenaltyRa(
)
{
/* return; */
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW,
- (
- "[BTCoex], %s turn LowPenaltyRA = %s\n",
- (bForceExec ? "force to" : ""),
- (bLowPenaltyRa ? "ON" : "OFF")
- )
- );
pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa;
if (!bForceExec) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_DETAIL,
- (
- "[BTCoex], bPreLowPenaltyRa =%d, bCurLowPenaltyRa =%d\n",
- pCoexDm->bPreLowPenaltyRa,
- pCoexDm->bCurLowPenaltyRa
- )
- );
-
if (pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa)
return;
}
@@ -786,11 +574,6 @@ static void halbtc8723b2ant_SetDacSwingReg(struct btc_coexist *pBtCoexist, u32 l
{
u8 val = (u8)level;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_EXEC,
- ("[BTCoex], Write SwDacSwing = 0x%x\n", level)
- );
pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x883, 0x3e, val);
}
@@ -812,32 +595,10 @@ static void halbtc8723b2ant_DacSwing(
u32 dacSwingLvl
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW,
- (
- "[BTCoex], %s turn DacSwing =%s, dacSwingLvl = 0x%x\n",
- (bForceExec ? "force to" : ""),
- (bDacSwingOn ? "ON" : "OFF"),
- dacSwingLvl
- )
- );
pCoexDm->bCurDacSwingOn = bDacSwingOn;
pCoexDm->curDacSwingLvl = dacSwingLvl;
if (!bForceExec) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_DETAIL,
- (
- "[BTCoex], bPreDacSwingOn =%d, preDacSwingLvl = 0x%x, bCurDacSwingOn =%d, curDacSwingLvl = 0x%x\n",
- pCoexDm->bPreDacSwingOn,
- pCoexDm->preDacSwingLvl,
- pCoexDm->bCurDacSwingOn,
- pCoexDm->curDacSwingLvl
- )
- );
-
if ((pCoexDm->bPreDacSwingOn == pCoexDm->bCurDacSwingOn) &&
(pCoexDm->preDacSwingLvl == pCoexDm->curDacSwingLvl))
return;
@@ -857,7 +618,6 @@ static void halbtc8723b2ant_SetAgcTable(
/* BB AGC Gain Table */
if (bAgcTableEn) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB Agc Table On!\n"));
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0x6e1A0001);
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0x6d1B0001);
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0x6c1C0001);
@@ -866,7 +626,6 @@ static void halbtc8723b2ant_SetAgcTable(
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0x691F0001);
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0x68200001);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB Agc Table Off!\n"));
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0xaa1A0001);
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0xa91B0001);
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0xa81C0001);
@@ -880,11 +639,9 @@ static void halbtc8723b2ant_SetAgcTable(
/* RF Gain */
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
if (bAgcTableEn) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x38fff);
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x38ffe);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x380c3);
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x28ce6);
}
@@ -892,11 +649,9 @@ static void halbtc8723b2ant_SetAgcTable(
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
if (bAgcTableEn) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x40, 0xfffff, 0x38fff);
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x40, 0xfffff, 0x38ffe);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x40, 0xfffff, 0x380c3);
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x40, 0xfffff, 0x28ce6);
}
@@ -913,28 +668,9 @@ static void halbtc8723b2ant_AgcTable(
struct btc_coexist *pBtCoexist, bool bForceExec, bool bAgcTableEn
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW,
- (
- "[BTCoex], %s %s Agc Table\n",
- (bForceExec ? "force to" : ""),
- (bAgcTableEn ? "Enable" : "Disable")
- )
- );
pCoexDm->bCurAgcTableEn = bAgcTableEn;
if (!bForceExec) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_DETAIL,
- (
- "[BTCoex], bPreAgcTableEn =%d, bCurAgcTableEn =%d\n",
- pCoexDm->bPreAgcTableEn,
- pCoexDm->bCurAgcTableEn
- )
- );
-
if (pCoexDm->bPreAgcTableEn == pCoexDm->bCurAgcTableEn)
return;
}
@@ -951,32 +687,12 @@ static void halbtc8723b2ant_SetCoexTable(
u8 val0x6cc
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_EXEC,
- ("[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0)
- );
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x6c0, val0x6c0);
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_EXEC,
- ("[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4)
- );
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x6c4, val0x6c4);
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_EXEC,
- ("[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8)
- );
pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x6c8, val0x6c8);
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_EXEC,
- ("[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc)
- );
pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x6cc, val0x6cc);
}
@@ -989,47 +705,12 @@ static void halbtc8723b2ant_CoexTable(
u8 val0x6cc
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW,
- (
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- (bForceExec ? "force to" : ""),
- val0x6c0,
- val0x6c4,
- val0x6c8,
- val0x6cc
- )
- );
pCoexDm->curVal0x6c0 = val0x6c0;
pCoexDm->curVal0x6c4 = val0x6c4;
pCoexDm->curVal0x6c8 = val0x6c8;
pCoexDm->curVal0x6cc = val0x6cc;
if (!bForceExec) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_DETAIL,
- (
- "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
- pCoexDm->preVal0x6c0,
- pCoexDm->preVal0x6c4,
- pCoexDm->preVal0x6c8,
- pCoexDm->preVal0x6cc
- )
- );
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_SW_DETAIL,
- (
- "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x, curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
- pCoexDm->curVal0x6c0,
- pCoexDm->curVal0x6c4,
- pCoexDm->curVal0x6c8,
- pCoexDm->curVal0x6cc
- )
- );
-
if (
(pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) &&
(pCoexDm->preVal0x6c4 == pCoexDm->curVal0x6c4) &&
@@ -1104,15 +785,6 @@ static void halbtc8723b2ant_SetFwIgnoreWlanAct(
if (bEnable)
H2C_Parameter[0] |= BIT0; /* function enable */
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- (
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- H2C_Parameter[0]
- )
- );
-
pBtCoexist->fBtcFillH2c(pBtCoexist, 0x63, 1, H2C_Parameter);
}
@@ -1120,22 +792,9 @@ static void halbtc8723b2ant_IgnoreWlanAct(
struct btc_coexist *pBtCoexist, bool bForceExec, bool bEnable
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW,
- (
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (bForceExec ? "force to" : ""),
- (bEnable ? "ON" : "OFF")
- )
- );
-
pCoexDm->bCurIgnoreWlanAct = bEnable;
if (!bForceExec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
- pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct));
-
if (pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
return;
}
@@ -1167,19 +826,6 @@ static void halbtc8723b2ant_SetFwPstdma(
pCoexDm->psTdmaPara[3] = byte4;
pCoexDm->psTdmaPara[4] = byte5;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- (
- "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
- H2C_Parameter[0],
- H2C_Parameter[1] << 24 |
- H2C_Parameter[2] << 16 |
- H2C_Parameter[3] << 8 |
- H2C_Parameter[4]
- )
- );
-
pBtCoexist->fBtcFillH2c(pBtCoexist, 0x60, 5, H2C_Parameter);
}
@@ -1305,38 +951,10 @@ static void halbtc8723b2ant_PsTdma(
struct btc_coexist *pBtCoexist, bool bForceExec, bool bTurnOn, u8 type
)
{
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW,
- (
- "[BTCoex], %s turn %s PS TDMA, type =%d\n",
- (bForceExec ? "force to" : ""),
- (bTurnOn ? "ON" : "OFF"),
- type
- )
- );
pCoexDm->bCurPsTdmaOn = bTurnOn;
pCoexDm->curPsTdma = type;
if (!bForceExec) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- (
- "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
- pCoexDm->bPrePsTdmaOn,
- pCoexDm->bCurPsTdmaOn
- )
- );
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- (
- "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
- pCoexDm->prePsTdma, pCoexDm->curPsTdma
- )
- );
-
if (
(pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
(pCoexDm->prePsTdma == pCoexDm->curPsTdma)
@@ -1505,8 +1123,6 @@ static bool halbtc8723b2ant_IsCommonAction(struct btc_coexist *pBtCoexist)
pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-connected idle!!\n"));
-
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 1);
@@ -1523,8 +1139,6 @@ static bool halbtc8723b2ant_IsCommonAction(struct btc_coexist *pBtCoexist)
pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT non connected-idle!!\n"));
-
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 1);
@@ -1542,7 +1156,6 @@ static bool halbtc8723b2ant_IsCommonAction(struct btc_coexist *pBtCoexist)
if (bBtHsOn)
return false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT connected-idle!!\n"));
halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -1560,13 +1173,11 @@ static bool halbtc8723b2ant_IsCommonAction(struct btc_coexist *pBtCoexist)
pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
if (bWifiBusy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi Connected-Busy + BT Busy!!\n"));
bCommon = false;
} else {
if (bBtHsOn)
return false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi Connected-Idle + BT Busy!!\n"));
btRssiState = halbtc8723b2ant_BtRssiState(2, 29, 0);
halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
@@ -1598,11 +1209,8 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
s32 result; /* 0: no change, +1: increase WiFi duration, -1: decrease WiFi duration */
u8 retryCount = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], TdmaDurationAdjust()\n"));
-
if (!pCoexDm->bAutoTdmaAdjust) {
pCoexDm->bAutoTdmaAdjust = true;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
{
if (bScoHid) {
if (bTxPause) {
@@ -1648,15 +1256,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
} else {
/* acquire the BT TRx retry count from BT_Info byte2 */
retryCount = pCoexSta->btRetryCnt;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount));
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- (
- "[BTCoex], up =%d, dn =%d, m =%d, n =%d, WaitCount =%d\n",
- up, dn, m, n, WaitCount
- )
- );
result = 0;
WaitCount++;
@@ -1673,7 +1272,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
up = 0;
dn = 0;
result = 1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n"));
}
} else if (retryCount <= 3) { /* <=3 retry in the last 2-second duration */
up--;
@@ -1696,7 +1294,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
dn = 0;
WaitCount = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
}
} else { /* retry count > 3, 只要1次 retry count > 3, 則調窄WiFi duration */
if (WaitCount == 1)
@@ -1712,14 +1309,10 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
dn = 0;
WaitCount = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], max Interval = %d\n", maxInterval));
if (maxInterval == 1) {
if (bTxPause) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
-
if (pCoexDm->curPsTdma == 71)
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(5);
else if (pCoexDm->curPsTdma == 1)
@@ -1768,7 +1361,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(13);
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
if (pCoexDm->curPsTdma == 5)
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(71);
else if (pCoexDm->curPsTdma == 6)
@@ -1821,7 +1413,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
}
} else if (maxInterval == 2) {
if (bTxPause) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
if (pCoexDm->curPsTdma == 1)
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(6);
else if (pCoexDm->curPsTdma == 2)
@@ -1868,7 +1459,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(14);
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
if (pCoexDm->curPsTdma == 5)
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(2);
else if (pCoexDm->curPsTdma == 6)
@@ -1917,7 +1507,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
}
} else if (maxInterval == 3) {
if (bTxPause) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
if (pCoexDm->curPsTdma == 1)
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(7);
else if (pCoexDm->curPsTdma == 2)
@@ -1964,7 +1553,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(15);
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
if (pCoexDm->curPsTdma == 5)
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(3);
else if (pCoexDm->curPsTdma == 6)
@@ -2018,15 +1606,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
/* then we have to adjust it back to the previous record one. */
if (pCoexDm->curPsTdma != pCoexDm->psTdmaDuAdjType) {
bool bScan = false, bLink = false, bRoam = false;
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- (
- "[BTCoex], PsTdma type mismatch!!!, curPsTdma =%d, recordPsTdma =%d\n",
- pCoexDm->curPsTdma,
- pCoexDm->psTdmaDuAdjType
- )
- );
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
@@ -2034,9 +1613,7 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
if (!bScan && !bLink && !bRoam)
halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType);
- else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"));
- }
+
}
}
@@ -2171,7 +1748,6 @@ static void halbtc8723b2ant_ActionA2dp(struct btc_coexist *pBtCoexist)
/* define the office environment */
if (apNum >= 10 && BTC_RSSI_HIGH(wifiRssiState1)) {
- /* DbgPrint(" AP#>10(%d)\n", apNum); */
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
@@ -2660,21 +2236,16 @@ static void halbtc8723b2ant_RunCoexistMechanism(struct btc_coexist *pBtCoexist)
{
u8 algorithm = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism() ===>\n"));
-
if (pBtCoexist->bManualControl) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"));
return;
}
if (pCoexSta->bUnderIps) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is under IPS !!!\n"));
return;
}
algorithm = halbtc8723b2ant_ActionAlgorithm(pBtCoexist);
if (pCoexSta->bC2hBtInquiryPage && (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT is under inquiry/page scan !!\n"));
halbtc8723b2ant_ActionBtInquiry(pBtCoexist);
return;
} else {
@@ -2685,69 +2256,47 @@ static void halbtc8723b2ant_RunCoexistMechanism(struct btc_coexist *pBtCoexist)
}
pCoexDm->curAlgorithm = algorithm;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Algorithm = %d\n", pCoexDm->curAlgorithm));
if (halbtc8723b2ant_IsCommonAction(pBtCoexist)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant common.\n"));
pCoexDm->bAutoTdmaAdjust = false;
} else {
if (pCoexDm->curAlgorithm != pCoexDm->preAlgorithm) {
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE,
- (
- "[BTCoex], preAlgorithm =%d, curAlgorithm =%d\n",
- pCoexDm->preAlgorithm,
- pCoexDm->curAlgorithm
- )
- );
pCoexDm->bAutoTdmaAdjust = false;
}
switch (pCoexDm->curAlgorithm) {
case BT_8723B_2ANT_COEX_ALGO_SCO:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = SCO.\n"));
halbtc8723b2ant_ActionSco(pBtCoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID.\n"));
halbtc8723b2ant_ActionHid(pBtCoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = A2DP.\n"));
halbtc8723b2ant_ActionA2dp(pBtCoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS).\n"));
halbtc8723b2ant_ActionA2dpPanHs(pBtCoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n"));
halbtc8723b2ant_ActionPanEdr(pBtCoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HS mode.\n"));
halbtc8723b2ant_ActionPanHs(pBtCoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n"));
halbtc8723b2ant_ActionPanEdrA2dp(pBtCoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n"));
halbtc8723b2ant_ActionPanEdrHid(pBtCoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n"));
halbtc8723b2ant_ActionHidA2dpPanEdr(pBtCoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n"));
halbtc8723b2ant_ActionHidA2dp(pBtCoexist);
break;
default:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"));
halbtc8723b2ant_CoexAllOff(pBtCoexist);
break;
}
@@ -2784,8 +2333,6 @@ static void halbtc8723b2ant_InitHwConfig(struct btc_coexist *pBtCoexist, bool bB
{
u8 u1Tmp = 0;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 2Ant Init HW Config!!\n"));
-
/* backup rf 0x1e value */
pCoexDm->btRf0x1eBackup =
pBtCoexist->fBtcGetRfReg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff);
@@ -2873,8 +2420,6 @@ void EXhalbtc8723b2ant_InitHwConfig(struct btc_coexist *pBtCoexist, bool bWifiOn
void EXhalbtc8723b2ant_InitCoexDm(struct btc_coexist *pBtCoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
-
halbtc8723b2ant_InitCoexDm(pBtCoexist);
}
@@ -2886,7 +2431,7 @@ void EXhalbtc8723b2ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
u8 *cliBuf = pBtCoexist->cliBuf;
u8 u1Tmp[4], i, btInfoExt, psTdmaCase = 0;
u32 u4Tmp[4];
- bool bRoam = false, bScan = false, bLink = false, bWifiUnder5G = false;
+ bool bRoam = false, bScan = false, bLink = false;
bool bBtHsOn = false, bWifiBusy = false;
s32 wifiRssi = 0, btHsRssi = 0;
u32 wifiBw, wifiTrafficDir, faOfdm, faCck;
@@ -2977,7 +2522,6 @@ void EXhalbtc8723b2ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
);
CL_PRINTF(cliBuf);
- pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
@@ -2985,7 +2529,7 @@ void EXhalbtc8723b2ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
cliBuf,
BT_TMP_BUF_SIZE,
"\r\n %-35s = %s / %s/ %s ", "Wifi status", \
- (bWifiUnder5G ? "5G" : "2.4G"),
+ ("2.4G"),
((BTC_WIFI_BW_LEGACY == wifiBw) ? "Legacy" : (((BTC_WIFI_BW_HT40 == wifiBw) ? "HT40" : "HT20"))),
((!bWifiBusy) ? "idle" : ((BTC_WIFI_TRAFFIC_TX == wifiTrafficDir) ? "uplink" : "downlink"))
);
@@ -3262,13 +2806,11 @@ void EXhalbtc8723b2ant_DisplayCoexInfo(struct btc_coexist *pBtCoexist)
void EXhalbtc8723b2ant_IpsNotify(struct btc_coexist *pBtCoexist, u8 type)
{
if (BTC_IPS_ENTER == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
pCoexSta->bUnderIps = true;
halbtc8723b2ant_WifiOffHwCfg(pBtCoexist);
halbtc8723b2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
halbtc8723b2ant_CoexAllOff(pBtCoexist);
} else if (BTC_IPS_LEAVE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
pCoexSta->bUnderIps = false;
halbtc8723b2ant_InitHwConfig(pBtCoexist, false);
halbtc8723b2ant_InitCoexDm(pBtCoexist);
@@ -3279,10 +2821,8 @@ void EXhalbtc8723b2ant_IpsNotify(struct btc_coexist *pBtCoexist, u8 type)
void EXhalbtc8723b2ant_LpsNotify(struct btc_coexist *pBtCoexist, u8 type)
{
if (BTC_LPS_ENABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
pCoexSta->bUnderLps = true;
} else if (BTC_LPS_DISABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
pCoexSta->bUnderLps = false;
}
}
@@ -3290,18 +2830,14 @@ void EXhalbtc8723b2ant_LpsNotify(struct btc_coexist *pBtCoexist, u8 type)
void EXhalbtc8723b2ant_ScanNotify(struct btc_coexist *pBtCoexist, u8 type)
{
if (BTC_SCAN_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
} else if (BTC_SCAN_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
}
}
void EXhalbtc8723b2ant_ConnectNotify(struct btc_coexist *pBtCoexist, u8 type)
{
if (BTC_ASSOCIATE_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
} else if (BTC_ASSOCIATE_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
}
}
@@ -3312,12 +2848,6 @@ void EXhalbtc8723b2ant_MediaStatusNotify(struct btc_coexist *pBtCoexist, u8 type
u8 wifiCentralChnl;
u8 apNum = 0;
- if (BTC_MEDIA_CONNECT == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
- } else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
- }
-
/* only 2.4G we need to inform bt the chnl mask */
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl);
if ((BTC_MEDIA_CONNECT == type) && (wifiCentralChnl <= 14)) {
@@ -3339,23 +2869,11 @@ void EXhalbtc8723b2ant_MediaStatusNotify(struct btc_coexist *pBtCoexist, u8 type
pCoexDm->wifiChnlInfo[1] = H2C_Parameter[1];
pCoexDm->wifiChnlInfo[2] = H2C_Parameter[2];
- BTC_PRINT(
- BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_EXEC,
- (
- "[BTCoex], FW write 0x66 = 0x%x\n",
- H2C_Parameter[0] << 16 | H2C_Parameter[1] << 8 | H2C_Parameter[2]
- )
- );
-
pBtCoexist->fBtcFillH2c(pBtCoexist, 0x66, 3, H2C_Parameter);
}
void EXhalbtc8723b2ant_SpecialPacketNotify(struct btc_coexist *pBtCoexist, u8 type)
{
- if (type == BTC_PACKET_DHCP) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], DHCP Packet notify\n"));
- }
}
void EXhalbtc8723b2ant_BtInfoNotify(
@@ -3375,21 +2893,14 @@ void EXhalbtc8723b2ant_BtInfoNotify(
pCoexSta->btInfoC2hCnt[rspSource]++;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length =%d, hex data =[", rspSource, length));
for (i = 0; i < length; i++) {
pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
if (i == 1)
btInfo = tmpBuf[i];
- if (i == length - 1) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i]));
- } else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
- }
}
if (pBtCoexist->bManualControl) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n"));
return;
}
@@ -3404,14 +2915,12 @@ void EXhalbtc8723b2ant_BtInfoNotify(
pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_BL_BT_TX_RX_MASK, &pCoexSta->bBtTxRxMask);
if (pCoexSta->bBtTxRxMask) {
/* BT into is responded by BT FW and BT RF REG 0x3C != 0x01 => Need to switch BT TRx Mask */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x01\n"));
pBtCoexist->fBtcSetBtReg(pBtCoexist, BTC_BT_REG_RF, 0x3c, 0x01);
}
/* Here we need to resend some wifi info to BT */
/* because bt is reset and loss of the info. */
if ((pCoexSta->btInfoExt & BIT1)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"));
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
if (bWifiConnected)
@@ -3421,7 +2930,6 @@ void EXhalbtc8723b2ant_BtInfoNotify(
}
if ((pCoexSta->btInfoExt & BIT3)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"));
halbtc8723b2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, false);
} else {
/* BT already NOT ignore Wlan active, do nothing here. */
@@ -3465,22 +2973,17 @@ void EXhalbtc8723b2ant_BtInfoNotify(
if (!(btInfo & BT_INFO_8723B_2ANT_B_CONNECTION)) {
pCoexDm->btStatus = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"));
} else if (btInfo == BT_INFO_8723B_2ANT_B_CONNECTION) { /* connection exists but no busy */
pCoexDm->btStatus = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"));
} else if (
(btInfo & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
(btInfo & BT_INFO_8723B_2ANT_B_SCO_BUSY)
) {
pCoexDm->btStatus = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"));
} else if (btInfo & BT_INFO_8723B_2ANT_B_ACL_BUSY) {
pCoexDm->btStatus = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"));
} else {
pCoexDm->btStatus = BT_8723B_2ANT_BT_STATUS_MAX;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"));
}
if (
@@ -3505,8 +3008,6 @@ void EXhalbtc8723b2ant_BtInfoNotify(
void EXhalbtc8723b2ant_HaltNotify(struct btc_coexist *pBtCoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
-
halbtc8723b2ant_WifiOffHwCfg(pBtCoexist);
pBtCoexist->fBtcSetBtReg(pBtCoexist, BTC_BT_REG_RF, 0x3c, 0x15); /* BT goto standby while GNT_BT 1-->0 */
halbtc8723b2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
@@ -3516,12 +3017,8 @@ void EXhalbtc8723b2ant_HaltNotify(struct btc_coexist *pBtCoexist)
void EXhalbtc8723b2ant_PnpNotify(struct btc_coexist *pBtCoexist, u8 pnpState)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify\n"));
-
if (BTC_WIFI_PNP_SLEEP == pnpState) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify to SLEEP\n"));
} else if (BTC_WIFI_PNP_WAKE_UP == pnpState) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify to WAKE UP\n"));
halbtc8723b2ant_InitHwConfig(pBtCoexist, false);
halbtc8723b2ant_InitCoexDm(pBtCoexist);
halbtc8723b2ant_QueryBtInfo(pBtCoexist);
@@ -3533,16 +3030,10 @@ void EXhalbtc8723b2ant_Periodical(struct btc_coexist *pBtCoexist)
static u8 disVerInfoCnt;
u32 fwVer = 0, btPatchVer = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ==========================Periodical ===========================\n"));
-
if (disVerInfoCnt <= 5) {
disVerInfoCnt += 1;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer);
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", \
- GLCoexVerDate8723b2Ant, GLCoexVer8723b2Ant, fwVer, btPatchVer, btPatchVer));
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
}
if (
diff --git a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
index 7b2d94a33d9c..deb57fa15eaf 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
+++ b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
@@ -78,29 +78,6 @@ enum {
BTC_CHIP_MAX
};
-enum {
- BTC_MSG_INTERFACE = 0x0,
- BTC_MSG_ALGORITHM = 0x1,
- BTC_MSG_MAX
-};
-extern u32 GLBtcDbgType[];
-
-/* following is for BTC_MSG_INTERFACE */
-#define INTF_INIT BIT0
-#define INTF_NOTIFY BIT2
-
-/* following is for BTC_ALGORITHM */
-#define ALGO_BT_RSSI_STATE BIT0
-#define ALGO_WIFI_RSSI_STATE BIT1
-#define ALGO_BT_MONITOR BIT2
-#define ALGO_TRACE BIT3
-#define ALGO_TRACE_FW BIT4
-#define ALGO_TRACE_FW_DETAIL BIT5
-#define ALGO_TRACE_FW_EXEC BIT6
-#define ALGO_TRACE_SW BIT7
-#define ALGO_TRACE_SW_DETAIL BIT8
-#define ALGO_TRACE_SW_EXEC BIT9
-
/* following is for wifi link status */
#define WIFI_STA_CONNECTED BIT0
#define WIFI_AP_CONNECTED BIT1
@@ -112,50 +89,6 @@ extern u32 GLBtcDbgType[];
#define CL_SPRINTF snprintf
#define CL_PRINTF DCMD_Printf
-/* The following is for dbgview print */
-#if DBG
-#define BTC_PRINT(dbgtype, dbgflag, printstr)\
-{\
- if (GLBtcDbgType[dbgtype] & dbgflag)\
- DbgPrint printstr;\
-}
-
-#define BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr)\
-{\
- if (GLBtcDbgType[dbgtype] & dbgflag) {\
- int __i;\
- u8 *ptr = (u8 *)_Ptr;\
- DbgPrint printstr;\
- DbgPrint(" ");\
- for (__i = 0; __i < 6; __i++)\
- DbgPrint("%02X%s", ptr[__i], (__i == 5) ? "" : "-");\
- DbgPrint("\n");\
- } \
-}
-
-#define BTC_PRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen)\
-{\
- if (GLBtcDbgType[dbgtype] & dbgflag) {\
- int __i;\
- u8 *ptr = (u8 *)_HexData;\
- DbgPrint(_TitleString);\
- for (__i = 0; __i < (int)_HexDataLen; __i++) {\
- DbgPrint("%02X%s", ptr[__i], (((__i + 1) % 4) == 0) ? " " : " ");\
- if (((__i + 1) % 16) == 0)\
- DbgPrint("\n");\
- } \
- DbgPrint("\n");\
- } \
-}
-
-#else
-#define BTC_PRINT(dbgtype, dbgflag, printstr) no_printk printstr
-#define BTC_PRINT_F(dbgtype, dbgflag, printstr) no_printk printstr
-#define BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr) no_printk printstr
-#define BTC_PRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen) \
- no_printk("%s %p %zu", _TitleString, _HexData, _HexDataLen)
-#endif
-
struct btc_board_info {
/* The following is some board information */
u8 btChipType;
@@ -209,7 +142,6 @@ enum {
BTC_GET_BL_WIFI_LINK,
BTC_GET_BL_WIFI_ROAM,
BTC_GET_BL_WIFI_4_WAY_PROGRESS,
- BTC_GET_BL_WIFI_UNDER_5G,
BTC_GET_BL_WIFI_AP_MODE_ENABLE,
BTC_GET_BL_WIFI_ENABLE_ENCRYPTION,
BTC_GET_BL_WIFI_UNDER_B_MODE,
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
index 016d257b90a0..3de8dcb5ed7c 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
@@ -33,47 +33,6 @@ static bool CheckPositive(
pDM_Odm->TypeALNA << 16 |
pDM_Odm->TypeAPA << 24;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
- cond1,
- cond2
- )
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
- driver1,
- driver2
- )
- );
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (" (Platform, Interface) = (0x%X, 0x%X)\n",
- pDM_Odm->SupportPlatform,
- pDM_Odm->SupportInterface
- )
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- " (Board, Package) = (0x%X, 0x%X)\n",
- pDM_Odm->BoardType,
- pDM_Odm->PackageType
- )
- );
-
/* Value Defined Check =============== */
/* QFN Type [15:12] and Cut Version [27:24] need to do value check */
@@ -263,13 +222,6 @@ void ODM_ReadAndConfig_MP_8723B_AGC_TAB(struct dm_odm_t *pDM_Odm)
u32 ArrayLen = ARRAY_SIZE(Array_MP_8723B_AGC_TAB);
u32 *Array = Array_MP_8723B_AGC_TAB;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_LOUD,
- ("===> ODM_ReadAndConfig_MP_8723B_AGC_TAB\n")
- );
-
for (i = 0; i < ArrayLen; i += 2) {
u32 v1 = Array[i];
u32 v2 = Array[i+1];
@@ -532,13 +484,6 @@ void ODM_ReadAndConfig_MP_8723B_PHY_REG(struct dm_odm_t *pDM_Odm)
u32 ArrayLen = ARRAY_SIZE(Array_MP_8723B_PHY_REG);
u32 *Array = Array_MP_8723B_PHY_REG;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_LOUD,
- ("===> ODM_ReadAndConfig_MP_8723B_PHY_REG\n")
- );
-
for (i = 0; i < ArrayLen; i += 2) {
u32 v1 = Array[i];
u32 v2 = Array[i+1];
@@ -598,12 +543,12 @@ void ODM_ReadAndConfig_MP_8723B_PHY_REG(struct dm_odm_t *pDM_Odm)
******************************************************************************/
static u32 Array_MP_8723B_PHY_REG_PG[] = {
- 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003800,
- 0, 0, 0, 0x0000086c, 0xffffff00, 0x32343600,
- 0, 0, 0, 0x00000e00, 0xffffffff, 0x40424444,
- 0, 0, 0, 0x00000e04, 0xffffffff, 0x28323638,
- 0, 0, 0, 0x00000e10, 0xffffffff, 0x38404244,
- 0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436
+ 0, 0, 0x00000e08, 0x0000ff00, 0x00003800,
+ 0, 0, 0x0000086c, 0xffffff00, 0x32343600,
+ 0, 0, 0x00000e00, 0xffffffff, 0x40424444,
+ 0, 0, 0x00000e04, 0xffffffff, 0x28323638,
+ 0, 0, 0x00000e10, 0xffffffff, 0x38404244,
+ 0, 0, 0x00000e14, 0xffffffff, 0x26303436
};
void ODM_ReadAndConfig_MP_8723B_PHY_REG_PG(struct dm_odm_t *pDM_Odm)
@@ -611,24 +556,16 @@ void ODM_ReadAndConfig_MP_8723B_PHY_REG_PG(struct dm_odm_t *pDM_Odm)
u32 i = 0;
u32 *Array = Array_MP_8723B_PHY_REG_PG;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_LOUD,
- ("===> ODM_ReadAndConfig_MP_8723B_PHY_REG_PG\n")
- );
-
pDM_Odm->PhyRegPgVersion = 1;
pDM_Odm->PhyRegPgValueType = PHY_REG_PG_EXACT_VALUE;
- for (i = 0; i < ARRAY_SIZE(Array_MP_8723B_PHY_REG_PG); i += 6) {
+ for (i = 0; i < ARRAY_SIZE(Array_MP_8723B_PHY_REG_PG); i += 5) {
u32 v1 = Array[i];
u32 v2 = Array[i+1];
u32 v3 = Array[i+2];
u32 v4 = Array[i+3];
u32 v5 = Array[i+4];
- u32 v6 = Array[i+5];
- odm_ConfigBB_PHY_REG_PG_8723B(pDM_Odm, v1, v2, v3, v4, v5, v6);
+ odm_ConfigBB_PHY_REG_PG_8723B(pDM_Odm, v1, v2, v3, v4, v5);
}
}
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
index 677bcfa10b0d..47e66f4ad9d1 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
@@ -33,48 +33,6 @@ static bool CheckPositive(
pDM_Odm->TypeALNA << 16 |
pDM_Odm->TypeAPA << 24;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
- cond1,
- cond2
- )
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
- driver1,
- driver2
- )
- );
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- " (Platform, Interface) = (0x%X, 0x%X)\n",
- pDM_Odm->SupportPlatform,
- pDM_Odm->SupportInterface
- )
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- " (Board, Package) = (0x%X, 0x%X)\n",
- pDM_Odm->BoardType,
- pDM_Odm->PackageType
- )
- );
-
/* Value Defined Check =============== */
/* QFN Type [15:12] and Cut Version [27:24] need to do value check */
@@ -234,13 +192,6 @@ void ODM_ReadAndConfig_MP_8723B_MAC_REG(struct dm_odm_t *pDM_Odm)
u32 ArrayLen = ARRAY_SIZE(Array_MP_8723B_MAC_REG);
u32 *Array = Array_MP_8723B_MAC_REG;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_LOUD,
- ("===> ODM_ReadAndConfig_MP_8723B_MAC_REG\n")
- );
-
for (i = 0; i < ArrayLen; i += 2) {
u32 v1 = Array[i];
u32 v2 = Array[i+1];
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
index 2c450c1ce7e7..00d429977ea9 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
@@ -33,48 +33,6 @@ static bool CheckPositive(
pDM_Odm->TypeALNA << 16 |
pDM_Odm->TypeAPA << 24;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
- cond1,
- cond2
- )
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
- driver1,
- driver2
- )
- );
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- " (Platform, Interface) = (0x%X, 0x%X)\n",
- pDM_Odm->SupportPlatform,
- pDM_Odm->SupportInterface
- )
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- " (Board, Package) = (0x%X, 0x%X)\n",
- pDM_Odm->BoardType,
- pDM_Odm->PackageType
- )
- );
-
/* Value Defined Check =============== */
/* QFN Type [15:12] and Cut Version [27:24] need to do value check */
@@ -265,13 +223,6 @@ void ODM_ReadAndConfig_MP_8723B_RadioA(struct dm_odm_t *pDM_Odm)
u32 ArrayLen = ARRAY_SIZE(Array_MP_8723B_RadioA);
u32 *Array = Array_MP_8723B_RadioA;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_LOUD,
- ("===> ODM_ReadAndConfig_MP_8723B_RadioA\n")
- );
-
for (i = 0; i < ArrayLen; i += 2) {
u32 v1 = Array[i];
u32 v2 = Array[i+1];
@@ -331,62 +282,6 @@ void ODM_ReadAndConfig_MP_8723B_RadioA(struct dm_odm_t *pDM_Odm)
* TxPowerTrack_SDIO.TXT
******************************************************************************/
-static u8 gDeltaSwingTableIdx_MP_5GB_N_TxPowerTrack_SDIO_8723B[][DELTA_SWINGIDX_SIZE] = {
- {
- 0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9,
- 9, 10, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14
- },
- {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,
- 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14
- },
- {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,
- 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14
- },
-};
-static u8 gDeltaSwingTableIdx_MP_5GB_P_TxPowerTrack_SDIO_8723B[][DELTA_SWINGIDX_SIZE] = {
- {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12,
- 12, 13, 14, 15, 15, 16, 16, 17, 17, 18, 19, 20, 20, 20
- },
- {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12,
- 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20
- },
- {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12,
- 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 20, 21, 21, 21
- },
-};
-static u8 gDeltaSwingTableIdx_MP_5GA_N_TxPowerTrack_SDIO_8723B[][DELTA_SWINGIDX_SIZE] = {
- {
- 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 8, 9, 9, 10,
- 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14
- },
- {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10,
- 11, 11, 12, 13, 13, 14, 15, 16, 16, 16, 16, 16, 16, 16
- },
- {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11,
- 11, 12, 13, 14, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16
- },
-};
-static u8 gDeltaSwingTableIdx_MP_5GA_P_TxPowerTrack_SDIO_8723B[][DELTA_SWINGIDX_SIZE] = {
- {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- },
- {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12,
- 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 20, 21, 21, 21
- },
- {
- 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12,
- 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 20, 21, 21, 21
- },
-};
static u8 gDeltaSwingTableIdx_MP_2GB_N_TxPowerTrack_SDIO_8723B[] = {
0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 6, 6, 6, 6,
7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12, 13, 14, 15
@@ -424,13 +319,6 @@ void ODM_ReadAndConfig_MP_8723B_TxPowerTrack_SDIO(struct dm_odm_t *pDM_Odm)
{
struct odm_rf_cal_t *pRFCalibrateInfo = &pDM_Odm->RFCalibrateInfo;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_LOUD,
- ("===> ODM_ReadAndConfig_MP_MP_8723B\n")
- );
-
memcpy(
pRFCalibrateInfo->DeltaSwingTableIdx_2GA_P,
@@ -473,27 +361,6 @@ void ODM_ReadAndConfig_MP_8723B_TxPowerTrack_SDIO(struct dm_odm_t *pDM_Odm)
gDeltaSwingTableIdx_MP_2GCCKB_N_TxPowerTrack_SDIO_8723B,
DELTA_SWINGIDX_SIZE
);
-
- memcpy(
- pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P,
- gDeltaSwingTableIdx_MP_5GA_P_TxPowerTrack_SDIO_8723B,
- DELTA_SWINGIDX_SIZE*3
- );
- memcpy(
- pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N,
- gDeltaSwingTableIdx_MP_5GA_N_TxPowerTrack_SDIO_8723B,
- DELTA_SWINGIDX_SIZE*3
- );
- memcpy(
- pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P,
- gDeltaSwingTableIdx_MP_5GB_P_TxPowerTrack_SDIO_8723B,
- DELTA_SWINGIDX_SIZE*3
- );
- memcpy(
- pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N,
- gDeltaSwingTableIdx_MP_5GB_N_TxPowerTrack_SDIO_8723B,
- DELTA_SWINGIDX_SIZE*3
- );
}
/******************************************************************************
@@ -501,258 +368,258 @@ void ODM_ReadAndConfig_MP_8723B_TxPowerTrack_SDIO(struct dm_odm_t *pDM_Odm)
******************************************************************************/
static u8 *Array_MP_8723B_TXPWR_LMT[] = {
- "FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
- "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "01", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "02", "32",
- "ETSI", "2.4G", "20M", "CCK", "1T", "02", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "02", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "03", "32",
- "ETSI", "2.4G", "20M", "CCK", "1T", "03", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "03", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "04", "32",
- "ETSI", "2.4G", "20M", "CCK", "1T", "04", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "04", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "05", "32",
- "ETSI", "2.4G", "20M", "CCK", "1T", "05", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "05", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "06", "32",
- "ETSI", "2.4G", "20M", "CCK", "1T", "06", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "06", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "07", "32",
- "ETSI", "2.4G", "20M", "CCK", "1T", "07", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "07", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "08", "32",
- "ETSI", "2.4G", "20M", "CCK", "1T", "08", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "08", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "09", "32",
- "ETSI", "2.4G", "20M", "CCK", "1T", "09", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "09", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "10", "32",
- "ETSI", "2.4G", "20M", "CCK", "1T", "10", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "10", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "11", "32",
- "ETSI", "2.4G", "20M", "CCK", "1T", "11", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "11", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "12", "63",
- "ETSI", "2.4G", "20M", "CCK", "1T", "12", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "12", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "13", "63",
- "ETSI", "2.4G", "20M", "CCK", "1T", "13", "32",
- "MKK", "2.4G", "20M", "CCK", "1T", "13", "32",
- "FCC", "2.4G", "20M", "CCK", "1T", "14", "63",
- "ETSI", "2.4G", "20M", "CCK", "1T", "14", "63",
- "MKK", "2.4G", "20M", "CCK", "1T", "14", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "01", "28",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "01", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "01", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "02", "28",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "02", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "02", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "03", "32",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "03", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "03", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "04", "32",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "04", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "04", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "05", "32",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "05", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "05", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "06", "32",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "06", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "06", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "07", "32",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "07", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "07", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "08", "32",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "08", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "08", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "09", "32",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "09", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "09", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "10", "28",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "10", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "10", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "11", "28",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "11", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "11", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "12", "63",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "12", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "12", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "13", "63",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "13", "32",
- "MKK", "2.4G", "20M", "OFDM", "1T", "13", "32",
- "FCC", "2.4G", "20M", "OFDM", "1T", "14", "63",
- "ETSI", "2.4G", "20M", "OFDM", "1T", "14", "63",
- "MKK", "2.4G", "20M", "OFDM", "1T", "14", "63",
- "FCC", "2.4G", "20M", "HT", "1T", "01", "26",
- "ETSI", "2.4G", "20M", "HT", "1T", "01", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "01", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "02", "26",
- "ETSI", "2.4G", "20M", "HT", "1T", "02", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "02", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "03", "32",
- "ETSI", "2.4G", "20M", "HT", "1T", "03", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "03", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "04", "32",
- "ETSI", "2.4G", "20M", "HT", "1T", "04", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "04", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "05", "32",
- "ETSI", "2.4G", "20M", "HT", "1T", "05", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "05", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "06", "32",
- "ETSI", "2.4G", "20M", "HT", "1T", "06", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "06", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "07", "32",
- "ETSI", "2.4G", "20M", "HT", "1T", "07", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "07", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "08", "32",
- "ETSI", "2.4G", "20M", "HT", "1T", "08", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "08", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "09", "32",
- "ETSI", "2.4G", "20M", "HT", "1T", "09", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "09", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "10", "26",
- "ETSI", "2.4G", "20M", "HT", "1T", "10", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "10", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "11", "26",
- "ETSI", "2.4G", "20M", "HT", "1T", "11", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "11", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "12", "63",
- "ETSI", "2.4G", "20M", "HT", "1T", "12", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "12", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "13", "63",
- "ETSI", "2.4G", "20M", "HT", "1T", "13", "32",
- "MKK", "2.4G", "20M", "HT", "1T", "13", "32",
- "FCC", "2.4G", "20M", "HT", "1T", "14", "63",
- "ETSI", "2.4G", "20M", "HT", "1T", "14", "63",
- "MKK", "2.4G", "20M", "HT", "1T", "14", "63",
- "FCC", "2.4G", "20M", "HT", "2T", "01", "30",
- "ETSI", "2.4G", "20M", "HT", "2T", "01", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "01", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "02", "32",
- "ETSI", "2.4G", "20M", "HT", "2T", "02", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "02", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "03", "32",
- "ETSI", "2.4G", "20M", "HT", "2T", "03", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "03", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "04", "32",
- "ETSI", "2.4G", "20M", "HT", "2T", "04", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "04", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "05", "32",
- "ETSI", "2.4G", "20M", "HT", "2T", "05", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "05", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "06", "32",
- "ETSI", "2.4G", "20M", "HT", "2T", "06", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "06", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "07", "32",
- "ETSI", "2.4G", "20M", "HT", "2T", "07", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "07", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "08", "32",
- "ETSI", "2.4G", "20M", "HT", "2T", "08", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "08", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "09", "32",
- "ETSI", "2.4G", "20M", "HT", "2T", "09", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "09", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "10", "32",
- "ETSI", "2.4G", "20M", "HT", "2T", "10", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "10", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "11", "30",
- "ETSI", "2.4G", "20M", "HT", "2T", "11", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "11", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "12", "63",
- "ETSI", "2.4G", "20M", "HT", "2T", "12", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "12", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "13", "63",
- "ETSI", "2.4G", "20M", "HT", "2T", "13", "32",
- "MKK", "2.4G", "20M", "HT", "2T", "13", "32",
- "FCC", "2.4G", "20M", "HT", "2T", "14", "63",
- "ETSI", "2.4G", "20M", "HT", "2T", "14", "63",
- "MKK", "2.4G", "20M", "HT", "2T", "14", "63",
- "FCC", "2.4G", "40M", "HT", "1T", "01", "63",
- "ETSI", "2.4G", "40M", "HT", "1T", "01", "63",
- "MKK", "2.4G", "40M", "HT", "1T", "01", "63",
- "FCC", "2.4G", "40M", "HT", "1T", "02", "63",
- "ETSI", "2.4G", "40M", "HT", "1T", "02", "63",
- "MKK", "2.4G", "40M", "HT", "1T", "02", "63",
- "FCC", "2.4G", "40M", "HT", "1T", "03", "26",
- "ETSI", "2.4G", "40M", "HT", "1T", "03", "32",
- "MKK", "2.4G", "40M", "HT", "1T", "03", "32",
- "FCC", "2.4G", "40M", "HT", "1T", "04", "26",
- "ETSI", "2.4G", "40M", "HT", "1T", "04", "32",
- "MKK", "2.4G", "40M", "HT", "1T", "04", "32",
- "FCC", "2.4G", "40M", "HT", "1T", "05", "32",
- "ETSI", "2.4G", "40M", "HT", "1T", "05", "32",
- "MKK", "2.4G", "40M", "HT", "1T", "05", "32",
- "FCC", "2.4G", "40M", "HT", "1T", "06", "32",
- "ETSI", "2.4G", "40M", "HT", "1T", "06", "32",
- "MKK", "2.4G", "40M", "HT", "1T", "06", "32",
- "FCC", "2.4G", "40M", "HT", "1T", "07", "32",
- "ETSI", "2.4G", "40M", "HT", "1T", "07", "32",
- "MKK", "2.4G", "40M", "HT", "1T", "07", "32",
- "FCC", "2.4G", "40M", "HT", "1T", "08", "26",
- "ETSI", "2.4G", "40M", "HT", "1T", "08", "32",
- "MKK", "2.4G", "40M", "HT", "1T", "08", "32",
- "FCC", "2.4G", "40M", "HT", "1T", "09", "26",
- "ETSI", "2.4G", "40M", "HT", "1T", "09", "32",
- "MKK", "2.4G", "40M", "HT", "1T", "09", "32",
- "FCC", "2.4G", "40M", "HT", "1T", "10", "26",
- "ETSI", "2.4G", "40M", "HT", "1T", "10", "32",
- "MKK", "2.4G", "40M", "HT", "1T", "10", "32",
- "FCC", "2.4G", "40M", "HT", "1T", "11", "26",
- "ETSI", "2.4G", "40M", "HT", "1T", "11", "32",
- "MKK", "2.4G", "40M", "HT", "1T", "11", "32",
- "FCC", "2.4G", "40M", "HT", "1T", "12", "63",
- "ETSI", "2.4G", "40M", "HT", "1T", "12", "32",
- "MKK", "2.4G", "40M", "HT", "1T", "12", "32",
- "FCC", "2.4G", "40M", "HT", "1T", "13", "63",
- "ETSI", "2.4G", "40M", "HT", "1T", "13", "32",
- "MKK", "2.4G", "40M", "HT", "1T", "13", "32",
- "FCC", "2.4G", "40M", "HT", "1T", "14", "63",
- "ETSI", "2.4G", "40M", "HT", "1T", "14", "63",
- "MKK", "2.4G", "40M", "HT", "1T", "14", "63",
- "FCC", "2.4G", "40M", "HT", "2T", "01", "63",
- "ETSI", "2.4G", "40M", "HT", "2T", "01", "63",
- "MKK", "2.4G", "40M", "HT", "2T", "01", "63",
- "FCC", "2.4G", "40M", "HT", "2T", "02", "63",
- "ETSI", "2.4G", "40M", "HT", "2T", "02", "63",
- "MKK", "2.4G", "40M", "HT", "2T", "02", "63",
- "FCC", "2.4G", "40M", "HT", "2T", "03", "30",
- "ETSI", "2.4G", "40M", "HT", "2T", "03", "30",
- "MKK", "2.4G", "40M", "HT", "2T", "03", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "04", "32",
- "ETSI", "2.4G", "40M", "HT", "2T", "04", "30",
- "MKK", "2.4G", "40M", "HT", "2T", "04", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "05", "32",
- "ETSI", "2.4G", "40M", "HT", "2T", "05", "30",
- "MKK", "2.4G", "40M", "HT", "2T", "05", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "06", "32",
- "ETSI", "2.4G", "40M", "HT", "2T", "06", "30",
- "MKK", "2.4G", "40M", "HT", "2T", "06", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "07", "32",
- "ETSI", "2.4G", "40M", "HT", "2T", "07", "30",
- "MKK", "2.4G", "40M", "HT", "2T", "07", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "08", "32",
- "ETSI", "2.4G", "40M", "HT", "2T", "08", "30",
- "MKK", "2.4G", "40M", "HT", "2T", "08", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "09", "32",
- "ETSI", "2.4G", "40M", "HT", "2T", "09", "30",
- "MKK", "2.4G", "40M", "HT", "2T", "09", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "10", "32",
- "ETSI", "2.4G", "40M", "HT", "2T", "10", "30",
- "MKK", "2.4G", "40M", "HT", "2T", "10", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "11", "30",
- "ETSI", "2.4G", "40M", "HT", "2T", "11", "30",
- "MKK", "2.4G", "40M", "HT", "2T", "11", "30",
- "FCC", "2.4G", "40M", "HT", "2T", "12", "63",
- "ETSI", "2.4G", "40M", "HT", "2T", "12", "32",
- "MKK", "2.4G", "40M", "HT", "2T", "12", "32",
- "FCC", "2.4G", "40M", "HT", "2T", "13", "63",
- "ETSI", "2.4G", "40M", "HT", "2T", "13", "32",
- "MKK", "2.4G", "40M", "HT", "2T", "13", "32",
- "FCC", "2.4G", "40M", "HT", "2T", "14", "63",
- "ETSI", "2.4G", "40M", "HT", "2T", "14", "63",
- "MKK", "2.4G", "40M", "HT", "2T", "14", "63"
+ "FCC", "20M", "CCK", "1T", "01", "32",
+ "ETSI", "20M", "CCK", "1T", "01", "32",
+ "MKK", "20M", "CCK", "1T", "01", "32",
+ "FCC", "20M", "CCK", "1T", "02", "32",
+ "ETSI", "20M", "CCK", "1T", "02", "32",
+ "MKK", "20M", "CCK", "1T", "02", "32",
+ "FCC", "20M", "CCK", "1T", "03", "32",
+ "ETSI", "20M", "CCK", "1T", "03", "32",
+ "MKK", "20M", "CCK", "1T", "03", "32",
+ "FCC", "20M", "CCK", "1T", "04", "32",
+ "ETSI", "20M", "CCK", "1T", "04", "32",
+ "MKK", "20M", "CCK", "1T", "04", "32",
+ "FCC", "20M", "CCK", "1T", "05", "32",
+ "ETSI", "20M", "CCK", "1T", "05", "32",
+ "MKK", "20M", "CCK", "1T", "05", "32",
+ "FCC", "20M", "CCK", "1T", "06", "32",
+ "ETSI", "20M", "CCK", "1T", "06", "32",
+ "MKK", "20M", "CCK", "1T", "06", "32",
+ "FCC", "20M", "CCK", "1T", "07", "32",
+ "ETSI", "20M", "CCK", "1T", "07", "32",
+ "MKK", "20M", "CCK", "1T", "07", "32",
+ "FCC", "20M", "CCK", "1T", "08", "32",
+ "ETSI", "20M", "CCK", "1T", "08", "32",
+ "MKK", "20M", "CCK", "1T", "08", "32",
+ "FCC", "20M", "CCK", "1T", "09", "32",
+ "ETSI", "20M", "CCK", "1T", "09", "32",
+ "MKK", "20M", "CCK", "1T", "09", "32",
+ "FCC", "20M", "CCK", "1T", "10", "32",
+ "ETSI", "20M", "CCK", "1T", "10", "32",
+ "MKK", "20M", "CCK", "1T", "10", "32",
+ "FCC", "20M", "CCK", "1T", "11", "32",
+ "ETSI", "20M", "CCK", "1T", "11", "32",
+ "MKK", "20M", "CCK", "1T", "11", "32",
+ "FCC", "20M", "CCK", "1T", "12", "63",
+ "ETSI", "20M", "CCK", "1T", "12", "32",
+ "MKK", "20M", "CCK", "1T", "12", "32",
+ "FCC", "20M", "CCK", "1T", "13", "63",
+ "ETSI", "20M", "CCK", "1T", "13", "32",
+ "MKK", "20M", "CCK", "1T", "13", "32",
+ "FCC", "20M", "CCK", "1T", "14", "63",
+ "ETSI", "20M", "CCK", "1T", "14", "63",
+ "MKK", "20M", "CCK", "1T", "14", "32",
+ "FCC", "20M", "OFDM", "1T", "01", "28",
+ "ETSI", "20M", "OFDM", "1T", "01", "32",
+ "MKK", "20M", "OFDM", "1T", "01", "32",
+ "FCC", "20M", "OFDM", "1T", "02", "28",
+ "ETSI", "20M", "OFDM", "1T", "02", "32",
+ "MKK", "20M", "OFDM", "1T", "02", "32",
+ "FCC", "20M", "OFDM", "1T", "03", "32",
+ "ETSI", "20M", "OFDM", "1T", "03", "32",
+ "MKK", "20M", "OFDM", "1T", "03", "32",
+ "FCC", "20M", "OFDM", "1T", "04", "32",
+ "ETSI", "20M", "OFDM", "1T", "04", "32",
+ "MKK", "20M", "OFDM", "1T", "04", "32",
+ "FCC", "20M", "OFDM", "1T", "05", "32",
+ "ETSI", "20M", "OFDM", "1T", "05", "32",
+ "MKK", "20M", "OFDM", "1T", "05", "32",
+ "FCC", "20M", "OFDM", "1T", "06", "32",
+ "ETSI", "20M", "OFDM", "1T", "06", "32",
+ "MKK", "20M", "OFDM", "1T", "06", "32",
+ "FCC", "20M", "OFDM", "1T", "07", "32",
+ "ETSI", "20M", "OFDM", "1T", "07", "32",
+ "MKK", "20M", "OFDM", "1T", "07", "32",
+ "FCC", "20M", "OFDM", "1T", "08", "32",
+ "ETSI", "20M", "OFDM", "1T", "08", "32",
+ "MKK", "20M", "OFDM", "1T", "08", "32",
+ "FCC", "20M", "OFDM", "1T", "09", "32",
+ "ETSI", "20M", "OFDM", "1T", "09", "32",
+ "MKK", "20M", "OFDM", "1T", "09", "32",
+ "FCC", "20M", "OFDM", "1T", "10", "28",
+ "ETSI", "20M", "OFDM", "1T", "10", "32",
+ "MKK", "20M", "OFDM", "1T", "10", "32",
+ "FCC", "20M", "OFDM", "1T", "11", "28",
+ "ETSI", "20M", "OFDM", "1T", "11", "32",
+ "MKK", "20M", "OFDM", "1T", "11", "32",
+ "FCC", "20M", "OFDM", "1T", "12", "63",
+ "ETSI", "20M", "OFDM", "1T", "12", "32",
+ "MKK", "20M", "OFDM", "1T", "12", "32",
+ "FCC", "20M", "OFDM", "1T", "13", "63",
+ "ETSI", "20M", "OFDM", "1T", "13", "32",
+ "MKK", "20M", "OFDM", "1T", "13", "32",
+ "FCC", "20M", "OFDM", "1T", "14", "63",
+ "ETSI", "20M", "OFDM", "1T", "14", "63",
+ "MKK", "20M", "OFDM", "1T", "14", "63",
+ "FCC", "20M", "HT", "1T", "01", "26",
+ "ETSI", "20M", "HT", "1T", "01", "32",
+ "MKK", "20M", "HT", "1T", "01", "32",
+ "FCC", "20M", "HT", "1T", "02", "26",
+ "ETSI", "20M", "HT", "1T", "02", "32",
+ "MKK", "20M", "HT", "1T", "02", "32",
+ "FCC", "20M", "HT", "1T", "03", "32",
+ "ETSI", "20M", "HT", "1T", "03", "32",
+ "MKK", "20M", "HT", "1T", "03", "32",
+ "FCC", "20M", "HT", "1T", "04", "32",
+ "ETSI", "20M", "HT", "1T", "04", "32",
+ "MKK", "20M", "HT", "1T", "04", "32",
+ "FCC", "20M", "HT", "1T", "05", "32",
+ "ETSI", "20M", "HT", "1T", "05", "32",
+ "MKK", "20M", "HT", "1T", "05", "32",
+ "FCC", "20M", "HT", "1T", "06", "32",
+ "ETSI", "20M", "HT", "1T", "06", "32",
+ "MKK", "20M", "HT", "1T", "06", "32",
+ "FCC", "20M", "HT", "1T", "07", "32",
+ "ETSI", "20M", "HT", "1T", "07", "32",
+ "MKK", "20M", "HT", "1T", "07", "32",
+ "FCC", "20M", "HT", "1T", "08", "32",
+ "ETSI", "20M", "HT", "1T", "08", "32",
+ "MKK", "20M", "HT", "1T", "08", "32",
+ "FCC", "20M", "HT", "1T", "09", "32",
+ "ETSI", "20M", "HT", "1T", "09", "32",
+ "MKK", "20M", "HT", "1T", "09", "32",
+ "FCC", "20M", "HT", "1T", "10", "26",
+ "ETSI", "20M", "HT", "1T", "10", "32",
+ "MKK", "20M", "HT", "1T", "10", "32",
+ "FCC", "20M", "HT", "1T", "11", "26",
+ "ETSI", "20M", "HT", "1T", "11", "32",
+ "MKK", "20M", "HT", "1T", "11", "32",
+ "FCC", "20M", "HT", "1T", "12", "63",
+ "ETSI", "20M", "HT", "1T", "12", "32",
+ "MKK", "20M", "HT", "1T", "12", "32",
+ "FCC", "20M", "HT", "1T", "13", "63",
+ "ETSI", "20M", "HT", "1T", "13", "32",
+ "MKK", "20M", "HT", "1T", "13", "32",
+ "FCC", "20M", "HT", "1T", "14", "63",
+ "ETSI", "20M", "HT", "1T", "14", "63",
+ "MKK", "20M", "HT", "1T", "14", "63",
+ "FCC", "20M", "HT", "2T", "01", "30",
+ "ETSI", "20M", "HT", "2T", "01", "32",
+ "MKK", "20M", "HT", "2T", "01", "32",
+ "FCC", "20M", "HT", "2T", "02", "32",
+ "ETSI", "20M", "HT", "2T", "02", "32",
+ "MKK", "20M", "HT", "2T", "02", "32",
+ "FCC", "20M", "HT", "2T", "03", "32",
+ "ETSI", "20M", "HT", "2T", "03", "32",
+ "MKK", "20M", "HT", "2T", "03", "32",
+ "FCC", "20M", "HT", "2T", "04", "32",
+ "ETSI", "20M", "HT", "2T", "04", "32",
+ "MKK", "20M", "HT", "2T", "04", "32",
+ "FCC", "20M", "HT", "2T", "05", "32",
+ "ETSI", "20M", "HT", "2T", "05", "32",
+ "MKK", "20M", "HT", "2T", "05", "32",
+ "FCC", "20M", "HT", "2T", "06", "32",
+ "ETSI", "20M", "HT", "2T", "06", "32",
+ "MKK", "20M", "HT", "2T", "06", "32",
+ "FCC", "20M", "HT", "2T", "07", "32",
+ "ETSI", "20M", "HT", "2T", "07", "32",
+ "MKK", "20M", "HT", "2T", "07", "32",
+ "FCC", "20M", "HT", "2T", "08", "32",
+ "ETSI", "20M", "HT", "2T", "08", "32",
+ "MKK", "20M", "HT", "2T", "08", "32",
+ "FCC", "20M", "HT", "2T", "09", "32",
+ "ETSI", "20M", "HT", "2T", "09", "32",
+ "MKK", "20M", "HT", "2T", "09", "32",
+ "FCC", "20M", "HT", "2T", "10", "32",
+ "ETSI", "20M", "HT", "2T", "10", "32",
+ "MKK", "20M", "HT", "2T", "10", "32",
+ "FCC", "20M", "HT", "2T", "11", "30",
+ "ETSI", "20M", "HT", "2T", "11", "32",
+ "MKK", "20M", "HT", "2T", "11", "32",
+ "FCC", "20M", "HT", "2T", "12", "63",
+ "ETSI", "20M", "HT", "2T", "12", "32",
+ "MKK", "20M", "HT", "2T", "12", "32",
+ "FCC", "20M", "HT", "2T", "13", "63",
+ "ETSI", "20M", "HT", "2T", "13", "32",
+ "MKK", "20M", "HT", "2T", "13", "32",
+ "FCC", "20M", "HT", "2T", "14", "63",
+ "ETSI", "20M", "HT", "2T", "14", "63",
+ "MKK", "20M", "HT", "2T", "14", "63",
+ "FCC", "40M", "HT", "1T", "01", "63",
+ "ETSI", "40M", "HT", "1T", "01", "63",
+ "MKK", "40M", "HT", "1T", "01", "63",
+ "FCC", "40M", "HT", "1T", "02", "63",
+ "ETSI", "40M", "HT", "1T", "02", "63",
+ "MKK", "40M", "HT", "1T", "02", "63",
+ "FCC", "40M", "HT", "1T", "03", "26",
+ "ETSI", "40M", "HT", "1T", "03", "32",
+ "MKK", "40M", "HT", "1T", "03", "32",
+ "FCC", "40M", "HT", "1T", "04", "26",
+ "ETSI", "40M", "HT", "1T", "04", "32",
+ "MKK", "40M", "HT", "1T", "04", "32",
+ "FCC", "40M", "HT", "1T", "05", "32",
+ "ETSI", "40M", "HT", "1T", "05", "32",
+ "MKK", "40M", "HT", "1T", "05", "32",
+ "FCC", "40M", "HT", "1T", "06", "32",
+ "ETSI", "40M", "HT", "1T", "06", "32",
+ "MKK", "40M", "HT", "1T", "06", "32",
+ "FCC", "40M", "HT", "1T", "07", "32",
+ "ETSI", "40M", "HT", "1T", "07", "32",
+ "MKK", "40M", "HT", "1T", "07", "32",
+ "FCC", "40M", "HT", "1T", "08", "26",
+ "ETSI", "40M", "HT", "1T", "08", "32",
+ "MKK", "40M", "HT", "1T", "08", "32",
+ "FCC", "40M", "HT", "1T", "09", "26",
+ "ETSI", "40M", "HT", "1T", "09", "32",
+ "MKK", "40M", "HT", "1T", "09", "32",
+ "FCC", "40M", "HT", "1T", "10", "26",
+ "ETSI", "40M", "HT", "1T", "10", "32",
+ "MKK", "40M", "HT", "1T", "10", "32",
+ "FCC", "40M", "HT", "1T", "11", "26",
+ "ETSI", "40M", "HT", "1T", "11", "32",
+ "MKK", "40M", "HT", "1T", "11", "32",
+ "FCC", "40M", "HT", "1T", "12", "63",
+ "ETSI", "40M", "HT", "1T", "12", "32",
+ "MKK", "40M", "HT", "1T", "12", "32",
+ "FCC", "40M", "HT", "1T", "13", "63",
+ "ETSI", "40M", "HT", "1T", "13", "32",
+ "MKK", "40M", "HT", "1T", "13", "32",
+ "FCC", "40M", "HT", "1T", "14", "63",
+ "ETSI", "40M", "HT", "1T", "14", "63",
+ "MKK", "40M", "HT", "1T", "14", "63",
+ "FCC", "40M", "HT", "2T", "01", "63",
+ "ETSI", "40M", "HT", "2T", "01", "63",
+ "MKK", "40M", "HT", "2T", "01", "63",
+ "FCC", "40M", "HT", "2T", "02", "63",
+ "ETSI", "40M", "HT", "2T", "02", "63",
+ "MKK", "40M", "HT", "2T", "02", "63",
+ "FCC", "40M", "HT", "2T", "03", "30",
+ "ETSI", "40M", "HT", "2T", "03", "30",
+ "MKK", "40M", "HT", "2T", "03", "30",
+ "FCC", "40M", "HT", "2T", "04", "32",
+ "ETSI", "40M", "HT", "2T", "04", "30",
+ "MKK", "40M", "HT", "2T", "04", "30",
+ "FCC", "40M", "HT", "2T", "05", "32",
+ "ETSI", "40M", "HT", "2T", "05", "30",
+ "MKK", "40M", "HT", "2T", "05", "30",
+ "FCC", "40M", "HT", "2T", "06", "32",
+ "ETSI", "40M", "HT", "2T", "06", "30",
+ "MKK", "40M", "HT", "2T", "06", "30",
+ "FCC", "40M", "HT", "2T", "07", "32",
+ "ETSI", "40M", "HT", "2T", "07", "30",
+ "MKK", "40M", "HT", "2T", "07", "30",
+ "FCC", "40M", "HT", "2T", "08", "32",
+ "ETSI", "40M", "HT", "2T", "08", "30",
+ "MKK", "40M", "HT", "2T", "08", "30",
+ "FCC", "40M", "HT", "2T", "09", "32",
+ "ETSI", "40M", "HT", "2T", "09", "30",
+ "MKK", "40M", "HT", "2T", "09", "30",
+ "FCC", "40M", "HT", "2T", "10", "32",
+ "ETSI", "40M", "HT", "2T", "10", "30",
+ "MKK", "40M", "HT", "2T", "10", "30",
+ "FCC", "40M", "HT", "2T", "11", "30",
+ "ETSI", "40M", "HT", "2T", "11", "30",
+ "MKK", "40M", "HT", "2T", "11", "30",
+ "FCC", "40M", "HT", "2T", "12", "63",
+ "ETSI", "40M", "HT", "2T", "12", "32",
+ "MKK", "40M", "HT", "2T", "12", "32",
+ "FCC", "40M", "HT", "2T", "13", "63",
+ "ETSI", "40M", "HT", "2T", "13", "32",
+ "MKK", "40M", "HT", "2T", "13", "32",
+ "FCC", "40M", "HT", "2T", "14", "63",
+ "ETSI", "40M", "HT", "2T", "14", "63",
+ "MKK", "40M", "HT", "2T", "14", "63"
};
void ODM_ReadAndConfig_MP_8723B_TXPWR_LMT(struct dm_odm_t *pDM_Odm)
@@ -760,26 +627,17 @@ void ODM_ReadAndConfig_MP_8723B_TXPWR_LMT(struct dm_odm_t *pDM_Odm)
u32 i = 0;
u8 **Array = Array_MP_8723B_TXPWR_LMT;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_LOUD,
- ("===> ODM_ReadAndConfig_MP_8723B_TXPWR_LMT\n")
- );
-
- for (i = 0; i < ARRAY_SIZE(Array_MP_8723B_TXPWR_LMT); i += 7) {
+ for (i = 0; i < ARRAY_SIZE(Array_MP_8723B_TXPWR_LMT); i += 6) {
u8 *regulation = Array[i];
- u8 *band = Array[i+1];
- u8 *bandwidth = Array[i+2];
- u8 *rate = Array[i+3];
- u8 *rfPath = Array[i+4];
- u8 *chnl = Array[i+5];
- u8 *val = Array[i+6];
+ u8 *bandwidth = Array[i+1];
+ u8 *rate = Array[i+2];
+ u8 *rfPath = Array[i+3];
+ u8 *chnl = Array[i+4];
+ u8 *val = Array[i+5];
odm_ConfigBB_TXPWR_LMT_8723B(
pDM_Odm,
regulation,
- band,
bandwidth,
rate,
rfPath,
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.c b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
index 14426151faae..365e1195b5e5 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
@@ -76,7 +76,7 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- u8 ThermalValue = 0, delta, delta_LCK, delta_IQK, p = 0, i = 0;
+ u8 ThermalValue = 0, delta, delta_LCK, p = 0, i = 0;
u8 ThermalValue_AVG_count = 0;
u32 ThermalValue_AVG = 0;
@@ -108,18 +108,6 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
pDM_Odm->RFCalibrateInfo.TXPowerTrackingCallbackCnt++;
pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = true;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "===>ODM_TXPowerTrackingCallback_ThermalMeter,\npDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]: %d, pDM_Odm->DefaultOfdmIndex: %d\n",
- pDM_Odm->BbSwingIdxCckBase,
- pDM_Odm->BbSwingIdxOfdmBase[ODM_RF_PATH_A],
- pDM_Odm->DefaultOfdmIndex
- )
- );
-
ThermalValue = (u8)PHY_QueryRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, c.ThermalRegAddr, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
if (
!pDM_Odm->RFCalibrateInfo.TxPowerTrackControl ||
@@ -130,13 +118,6 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
/* 4 3. Initialize ThermalValues of RFCalibrateInfo */
- if (pDM_Odm->RFCalibrateInfo.bReloadtxpowerindex)
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("reload ofdm index for band switch\n")
- );
-
/* 4 4. Calculate average thermal meter */
pDM_Odm->RFCalibrateInfo.ThermalValue_AVG[pDM_Odm->RFCalibrateInfo.ThermalValue_AVG_index] = ThermalValue;
@@ -154,19 +135,9 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
/* Calculate Average ThermalValue after average enough times */
if (ThermalValue_AVG_count) {
ThermalValue = (u8)(ThermalValue_AVG / ThermalValue_AVG_count);
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
- ThermalValue,
- pHalData->EEPROMThermalMeter
- )
- );
}
- /* 4 5. Calculate delta, delta_LCK, delta_IQK. */
+ /* 4 5. Calculate delta, delta_LCK */
/* delta" here is used to determine whether thermal value changes or not. */
delta =
(ThermalValue > pDM_Odm->RFCalibrateInfo.ThermalValue) ?
@@ -176,36 +147,10 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
(ThermalValue > pDM_Odm->RFCalibrateInfo.ThermalValue_LCK) ?
(ThermalValue - pDM_Odm->RFCalibrateInfo.ThermalValue_LCK) :
(pDM_Odm->RFCalibrateInfo.ThermalValue_LCK - ThermalValue);
- delta_IQK =
- (ThermalValue > pDM_Odm->RFCalibrateInfo.ThermalValue_IQK) ?
- (ThermalValue - pDM_Odm->RFCalibrateInfo.ThermalValue_IQK) :
- (pDM_Odm->RFCalibrateInfo.ThermalValue_IQK - ThermalValue);
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
- delta,
- delta_LCK,
- delta_IQK
- )
- );
/* 4 6. If necessary, do LCK. */
/* Delta temperature is equal to or larger than 20 centigrade. */
if (delta_LCK >= c.Threshold_IQK) {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "delta_LCK(%d) >= Threshold_IQK(%d)\n",
- delta_LCK,
- c.Threshold_IQK
- )
- );
pDM_Odm->RFCalibrateInfo.ThermalValue_LCK = ThermalValue;
if (c.PHY_LCCalibrate)
(*c.PHY_LCCalibrate)(pDM_Odm);
@@ -224,16 +169,6 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
/* 4 7.1 The Final Power Index = BaseIndex + PowerIndexOffset */
if (ThermalValue > pHalData->EEPROMThermalMeter) {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "deltaSwingTableIdx_TUP_A[%d] = %d\n",
- delta,
- deltaSwingTableIdx_TUP_A[delta]
- )
- );
pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[ODM_RF_PATH_A] =
pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_A];
pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_A] =
@@ -243,27 +178,7 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] =
deltaSwingTableIdx_TUP_A[delta];
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
- pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A]
- )
- );
-
if (c.RfPathCount > 1) {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "deltaSwingTableIdx_TUP_B[%d] = %d\n",
- delta,
- deltaSwingTableIdx_TUP_B[delta]
- )
- );
pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[ODM_RF_PATH_B] =
pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_B];
pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_B] =
@@ -272,29 +187,9 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
/* Record delta swing for mix mode power tracking */
pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] =
deltaSwingTableIdx_TUP_B[delta];
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
- pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B]
- )
- );
}
} else {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "deltaSwingTableIdx_TDOWN_A[%d] = %d\n",
- delta,
- deltaSwingTableIdx_TDOWN_A[delta]
- )
- );
-
pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[ODM_RF_PATH_A] =
pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_A];
pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_A] =
@@ -304,28 +199,7 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] =
-1 * deltaSwingTableIdx_TDOWN_A[delta];
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
- pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A]
- )
- );
-
if (c.RfPathCount > 1) {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "deltaSwingTableIdx_TDOWN_B[%d] = %d\n",
- delta,
- deltaSwingTableIdx_TDOWN_B[delta]
- )
- );
-
pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[ODM_RF_PATH_B] =
pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_B];
pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_B] =
@@ -334,30 +208,10 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
/* Record delta swing for mix mode power tracking */
pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] =
-1 * deltaSwingTableIdx_TDOWN_B[delta];
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
- pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B]
- )
- );
}
}
for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++) {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "\n\n ================================ [Path-%c] Calculating PowerIndexOffset ================================\n",
- (p == ODM_RF_PATH_A ? 'A' : 'B')
- )
- );
-
if (
pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p] ==
pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p]
@@ -366,20 +220,6 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
else
pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p] - pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p]; /* Power Index Diff between 2 times Power Tracking */
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n",
- (
- p == ODM_RF_PATH_A ? 'A' : 'B'),
- pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p],
- pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p],
- pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p]
- )
- );
-
pDM_Odm->RFCalibrateInfo.OFDM_index[p] =
pDM_Odm->BbSwingIdxOfdmBase[p] +
pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p];
@@ -394,87 +234,23 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
pDM_Odm->BbSwingIdxOfdm[p] =
pDM_Odm->RFCalibrateInfo.OFDM_index[p];
- /* *************Print BB Swing Base and Index Offset************* */
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
- pDM_Odm->BbSwingIdxCck,
- pDM_Odm->BbSwingIdxCckBase,
- pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p]
- )
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
- pDM_Odm->BbSwingIdxOfdm[p],
- (p == ODM_RF_PATH_A ? 'A' : 'B'),
- pDM_Odm->BbSwingIdxOfdmBase[p],
- pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p]
- )
- );
-
/* 4 7.1 Handle boundary conditions of index. */
if (pDM_Odm->RFCalibrateInfo.OFDM_index[p] > c.SwingTableSize_OFDM-1)
pDM_Odm->RFCalibrateInfo.OFDM_index[p] = c.SwingTableSize_OFDM-1;
else if (pDM_Odm->RFCalibrateInfo.OFDM_index[p] < OFDM_min_index)
pDM_Odm->RFCalibrateInfo.OFDM_index[p] = OFDM_min_index;
}
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- ("\n\n ========================================================================================================\n")
- );
if (pDM_Odm->RFCalibrateInfo.CCK_index > c.SwingTableSize_CCK-1)
pDM_Odm->RFCalibrateInfo.CCK_index = c.SwingTableSize_CCK-1;
/* else if (pDM_Odm->RFCalibrateInfo.CCK_index < 0) */
/* pDM_Odm->RFCalibrateInfo.CCK_index = 0; */
} else {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "The thermal meter is unchanged or TxPowerTracking OFF(%d): ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
- pDM_Odm->RFCalibrateInfo.TxPowerTrackControl,
- ThermalValue,
- pDM_Odm->RFCalibrateInfo.ThermalValue
- )
- );
-
for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++)
pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = 0;
}
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n",
- pDM_Odm->RFCalibrateInfo.CCK_index,
- pDM_Odm->BbSwingIdxCckBase
- )
- );
/* Print Swing base & current */
for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++) {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n",
- pDM_Odm->RFCalibrateInfo.OFDM_index[p],
- (p == ODM_RF_PATH_A ? 'A' : 'B'),
- pDM_Odm->BbSwingIdxOfdmBase[p]
- )
- );
}
if (
@@ -490,106 +266,11 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
/* to increase TX power. Otherwise, EVM will be bad. */
/* */
/* 2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E. */
- if (ThermalValue > pDM_Odm->RFCalibrateInfo.ThermalValue) {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- pDM_Odm->RFCalibrateInfo.PowerIndexOffset[ODM_RF_PATH_A],
- delta,
- ThermalValue,
- pHalData->EEPROMThermalMeter,
- pDM_Odm->RFCalibrateInfo.ThermalValue
- )
- );
-
- if (c.RfPathCount > 1)
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "Temperature Increasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- pDM_Odm->RFCalibrateInfo.PowerIndexOffset[ODM_RF_PATH_B],
- delta,
- ThermalValue,
- pHalData->EEPROMThermalMeter,
- pDM_Odm->RFCalibrateInfo.ThermalValue
- )
- );
-
- } else if (ThermalValue < pDM_Odm->RFCalibrateInfo.ThermalValue) { /* Low temperature */
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- pDM_Odm->RFCalibrateInfo.PowerIndexOffset[ODM_RF_PATH_A],
- delta,
- ThermalValue,
- pHalData->EEPROMThermalMeter,
- pDM_Odm->RFCalibrateInfo.ThermalValue
- )
- );
-
- if (c.RfPathCount > 1)
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "Temperature Decreasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- pDM_Odm->RFCalibrateInfo.PowerIndexOffset[ODM_RF_PATH_B],
- delta,
- ThermalValue,
- pHalData->EEPROMThermalMeter,
- pDM_Odm->RFCalibrateInfo.ThermalValue
- )
- );
-
- }
if (ThermalValue > pHalData->EEPROMThermalMeter) {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "Temperature(%d) higher than PG value(%d)\n",
- ThermalValue,
- pHalData->EEPROMThermalMeter
- )
- );
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- ("**********Enter POWER Tracking MIX_MODE**********\n")
- );
for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++)
(*c.ODM_TxPwrTrackSetPwr)(pDM_Odm, MIX_MODE, p, 0);
} else {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- (
- "Temperature(%d) lower than PG value(%d)\n",
- ThermalValue,
- pHalData->EEPROMThermalMeter
- )
- );
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- ("**********Enter POWER Tracking MIX_MODE**********\n")
- );
for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++)
(*c.ODM_TxPwrTrackSetPwr)(pDM_Odm, MIX_MODE, p, Indexforchannel);
}
@@ -599,26 +280,9 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++)
pDM_Odm->BbSwingIdxOfdmBase[p] = pDM_Odm->BbSwingIdxOfdm[p];
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- (
- "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue = %d\n",
- pDM_Odm->RFCalibrateInfo.ThermalValue,
- ThermalValue
- )
- );
-
/* Record last Power Tracking Thermal Value */
pDM_Odm->RFCalibrateInfo.ThermalValue = ThermalValue;
}
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_TX_PWR_TRACK,
- ODM_DBG_LOUD,
- ("<===ODM_TXPowerTrackingCallback_ThermalMeter\n")
- );
-
pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
}
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
index c70b9cf2da32..8121b8eb45b6 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -69,7 +69,7 @@ static void setIqkMatrix_8723B(
ele_D = (OFDMSwingTable_New[OFDM_index] & 0xFFC00000)>>22;
/* new element A = element D x X */
- if ((IqkResult_X != 0) && (*(pDM_Odm->pBandType) == ODM_BAND_2_4G)) {
+ if (IqkResult_X != 0) {
if ((IqkResult_X & 0x00000200) != 0) /* consider minus */
IqkResult_X = IqkResult_X | 0xFFFFFC00;
ele_A = ((IqkResult_X * ele_D)>>8)&0x000003FF;
@@ -129,9 +129,6 @@ static void setIqkMatrix_8723B(
break;
}
}
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("TxPwrTracking path B: X = 0x%x, Y = 0x%x ele_A = 0x%x ele_C = 0x%x ele_D = 0x%x 0xeb4 = 0x%x 0xebc = 0x%x\n",
- (u32)IqkResult_X, (u32)IqkResult_Y, (u32)ele_A, (u32)ele_C, (u32)ele_D, (u32)IqkResult_X, (u32)IqkResult_Y));
}
@@ -210,8 +207,6 @@ void ODM_TxPwrTrackSetPwr_8723B(
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("===>ODM_TxPwrTrackSetPwr8723B\n"));
-
if (TxRate != 0xFF) {
/* 2 CCK */
if ((TxRate >= MGN_1M) && (TxRate <= MGN_11M))
@@ -233,13 +228,10 @@ void ODM_TxPwrTrackSetPwr_8723B(
else
PwrTrackingLimit_OFDM = pDM_Odm->DefaultOfdmIndex; /* Default OFDM index = 30 */
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("TxRate = 0x%x, PwrTrackingLimit =%d\n", TxRate, PwrTrackingLimit_OFDM));
if (Method == TXAGC) {
struct adapter *Adapter = pDM_Odm->Adapter;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("odm_TxPwrTrackSetPwr8723B CH =%d\n", *(pDM_Odm->pChannel)));
-
pDM_Odm->Remnant_OFDMSwingIdx[RFPath] = pDM_Odm->Absolute_OFDMSwingIdx[RFPath];
pDM_Odm->Modify_TxAGC_Flag_PathA = true;
@@ -270,10 +262,6 @@ void ODM_TxPwrTrackSetPwr_8723B(
setCCKFilterCoefficient(pDM_Odm, Final_CCK_Swing_Index);
} else if (Method == MIX_MODE) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("pDM_Odm->DefaultOfdmIndex =%d, pDM_Odm->DefaultCCKIndex =%d, pDM_Odm->Absolute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
- pDM_Odm->DefaultOfdmIndex, pDM_Odm->DefaultCckIndex, pDM_Odm->Absolute_OFDMSwingIdx[RFPath], RFPath));
-
Final_OFDM_Swing_Index = pDM_Odm->DefaultOfdmIndex + pDM_Odm->Absolute_OFDMSwingIdx[RFPath];
Final_CCK_Swing_Index = pDM_Odm->DefaultCckIndex + pDM_Odm->Absolute_OFDMSwingIdx[RFPath];
@@ -287,10 +275,6 @@ void ODM_TxPwrTrackSetPwr_8723B(
pDM_Odm->Modify_TxAGC_Flag_PathA = true;
PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, OFDM);
PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, HT_MCS0_MCS7);
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d\n",
- PwrTrackingLimit_OFDM, pDM_Odm->Remnant_OFDMSwingIdx[RFPath]));
} else if (Final_OFDM_Swing_Index <= 0) {
pDM_Odm->Remnant_OFDMSwingIdx[RFPath] = Final_OFDM_Swing_Index;
@@ -301,26 +285,16 @@ void ODM_TxPwrTrackSetPwr_8723B(
pDM_Odm->Modify_TxAGC_Flag_PathA = true;
PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, OFDM);
PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, HT_MCS0_MCS7);
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
- pDM_Odm->Remnant_OFDMSwingIdx[RFPath]));
} else {
setIqkMatrix_8723B(pDM_Odm, Final_OFDM_Swing_Index, RFPath,
pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][0],
pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][1]);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("******Path_A Compensate with BBSwing , Final_OFDM_Swing_Index = %d\n", Final_OFDM_Swing_Index));
-
if (pDM_Odm->Modify_TxAGC_Flag_PathA) { /* If TxAGC has changed, reset TxAGC again */
pDM_Odm->Remnant_OFDMSwingIdx[RFPath] = 0;
PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, OFDM);
PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, HT_MCS0_MCS7);
pDM_Odm->Modify_TxAGC_Flag_PathA = false;
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("******Path_A pDM_Odm->Modify_TxAGC_Flag = false\n"));
}
}
@@ -329,30 +303,18 @@ void ODM_TxPwrTrackSetPwr_8723B(
setCCKFilterCoefficient(pDM_Odm, PwrTrackingLimit_CCK);
pDM_Odm->Modify_TxAGC_Flag_PathA_CCK = true;
PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, CCK);
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("******Path_A CCK Over Limit , PwrTrackingLimit_CCK = %d , pDM_Odm->Remnant_CCKSwingIdx = %d\n", PwrTrackingLimit_CCK, pDM_Odm->Remnant_CCKSwingIdx));
} else if (Final_CCK_Swing_Index <= 0) { /* Lowest CCK Index = 0 */
pDM_Odm->Remnant_CCKSwingIdx = Final_CCK_Swing_Index;
setCCKFilterCoefficient(pDM_Odm, 0);
pDM_Odm->Modify_TxAGC_Flag_PathA_CCK = true;
PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, CCK);
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("******Path_A CCK Under Limit , PwrTrackingLimit_CCK = %d , pDM_Odm->Remnant_CCKSwingIdx = %d\n", 0, pDM_Odm->Remnant_CCKSwingIdx));
} else {
setCCKFilterCoefficient(pDM_Odm, Final_CCK_Swing_Index);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("******Path_A CCK Compensate with BBSwing , Final_CCK_Swing_Index = %d\n", Final_CCK_Swing_Index));
-
if (pDM_Odm->Modify_TxAGC_Flag_PathA_CCK) { /* If TxAGC has changed, reset TxAGC again */
pDM_Odm->Remnant_CCKSwingIdx = 0;
PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, CCK);
pDM_Odm->Modify_TxAGC_Flag_PathA_CCK = false;
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("******Path_A pDM_Odm->Modify_TxAGC_Flag_CCK = false\n"));
}
}
} else
@@ -385,22 +347,7 @@ static void GetDeltaSwingTable_8723B(
*TemperatureUP_B = pRFCalibrateInfo->DeltaSwingTableIdx_2GB_P;
*TemperatureDOWN_B = pRFCalibrateInfo->DeltaSwingTableIdx_2GB_N;
}
- } /*else if (36 <= channel && channel <= 64) {
- *TemperatureUP_A = pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[0];
- *TemperatureDOWN_A = pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[0];
- *TemperatureUP_B = pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[0];
- *TemperatureDOWN_B = pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[0];
- } else if (100 <= channel && channel <= 140) {
- *TemperatureUP_A = pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[1];
- *TemperatureDOWN_A = pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[1];
- *TemperatureUP_B = pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[1];
- *TemperatureDOWN_B = pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[1];
- } else if (149 <= channel && channel <= 173) {
- *TemperatureUP_A = pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[2];
- *TemperatureDOWN_A = pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[2];
- *TemperatureUP_B = pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[2];
- *TemperatureDOWN_B = pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[2];
- }*/else {
+ } else {
*TemperatureUP_A = (u8 *)DeltaSwingTableIdx_2GA_P_8188E;
*TemperatureDOWN_A = (u8 *)DeltaSwingTableIdx_2GA_N_8188E;
*TemperatureUP_B = (u8 *)DeltaSwingTableIdx_2GA_P_8188E;
@@ -442,8 +389,6 @@ static u8 phy_PathA_IQK_8723B(
/* Save RF Path */
Path_SEL_BB = PHY_QueryBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQK!\n"));
-
/* leave IQK mode */
PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
@@ -461,7 +406,6 @@ static u8 phy_PathA_IQK_8723B(
PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord, 0x01007c00);
PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK, bMaskDWord, 0x01004800);
/* path-A IQK setting */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A IQK setting!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
@@ -473,7 +417,6 @@ static u8 phy_PathA_IQK_8723B(
PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_B, bMaskDWord, 0x28110000);
/* LO calibration setting */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
/* enter IQK mode */
@@ -491,12 +434,10 @@ static u8 phy_PathA_IQK_8723B(
PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00000800);
/* One shot, path A LOK & IQK */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_8723B)); */
/* PlatformStallExecution(IQK_DELAY_TIME_8723B*1000); */
mdelay(IQK_DELAY_TIME_8723B);
@@ -513,11 +454,6 @@ static u8 phy_PathA_IQK_8723B(
regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
regE94 = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord);
regE9C = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x, 0xe9c = 0x%x\n", regE94, regE9C));
- /* monitor image power before & after IQK */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe90(before IQK) = 0x%x, 0xe98(afer IQK) = 0x%x\n",
- PHY_QueryBBReg(pDM_Odm->Adapter, 0xe90, bMaskDWord), PHY_QueryBBReg(pDM_Odm->Adapter, 0xe98, bMaskDWord)));
/* Allen 20131125 */
@@ -550,18 +486,13 @@ static u8 phy_PathA_RxIQK8723B(
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK!\n")); */
-
/* Save RF Path */
Path_SEL_BB = PHY_QueryBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord);
/* leave IQK mode */
PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A RX IQK:Get TXIMR setting\n"));
/* 1 Get TXIMR setting */
/* modify RXIQK mode table */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table!\n")); */
PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x1);
PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x18000);
PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0001f);
@@ -587,7 +518,6 @@ static u8 phy_PathA_RxIQK8723B(
PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_B, bMaskDWord, 0x28110000);
/* LO calibration setting */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
/* enter IQK mode */
@@ -605,12 +535,10 @@ static u8 phy_PathA_RxIQK8723B(
PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00000800);
/* One shot, path A LOK & IQK */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_8723B)); */
/* PlatformStallExecution(IQK_DELAY_TIME_8723B*1000); */
mdelay(IQK_DELAY_TIME_8723B);
@@ -626,11 +554,6 @@ static u8 phy_PathA_RxIQK8723B(
regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
regE94 = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord);
regE9C = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x, 0xe9c = 0x%x\n", regE94, regE9C));
- /* monitor image power before & after IQK */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe90(before IQK) = 0x%x, 0xe98(afer IQK) = 0x%x\n",
- PHY_QueryBBReg(pDM_Odm->Adapter, 0xe90, bMaskDWord), PHY_QueryBBReg(pDM_Odm->Adapter, 0xe98, bMaskDWord)));
/* Allen 20131125 */
tmp = (regE9C & 0x03FF0000)>>16;
@@ -651,14 +574,8 @@ static u8 phy_PathA_RxIQK8723B(
u4tmp = 0x80007C00 | (regE94&0x3FF0000) | ((regE9C&0x3FF0000) >> 16);
PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord, u4tmp);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe40 = 0x%x u4tmp = 0x%x\n", PHY_QueryBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord), u4tmp));
-
-
- /* 1 RX IQK */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A RX IQK\n"));
/* modify RXIQK mode table */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table 2!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x1);
PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x18000);
@@ -688,7 +605,6 @@ static u8 phy_PathA_RxIQK8723B(
PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_B, bMaskDWord, 0x28110000);
/* LO calibration setting */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Rsp, bMaskDWord, 0x0046a8d1);
/* enter IQK mode */
@@ -706,12 +622,10 @@ static u8 phy_PathA_RxIQK8723B(
PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00000800);
/* One shot, path A LOK & IQK */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E)); */
/* PlatformStallExecution(IQK_DELAY_TIME_8723B*1000); */
mdelay(IQK_DELAY_TIME_8723B);
@@ -726,11 +640,6 @@ static u8 phy_PathA_RxIQK8723B(
/* Check failed */
regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
regEA4 = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_Before_IQK_A_2, bMaskDWord);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x, 0xeac = 0x%x\n", regEA4, regEAC));
- /* monitor image power before & after IQK */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea0(before IQK) = 0x%x, 0xea8(afer IQK) = 0x%x\n",
- PHY_QueryBBReg(pDM_Odm->Adapter, 0xea0, bMaskDWord), PHY_QueryBBReg(pDM_Odm->Adapter, 0xea8, bMaskDWord)));
/* PA/PAD controlled by 0x0 */
/* leave IQK mode */
@@ -751,8 +660,7 @@ static u8 phy_PathA_RxIQK8723B(
(tmp < 0xf)
)
result |= 0x02;
- else /* if Tx not OK, ignore Rx */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK fail!!\n"));
+
return result;
}
@@ -764,8 +672,6 @@ static u8 phy_PathB_IQK_8723B(struct adapter *padapter)
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK!\n"));
-
/* Save RF Path */
Path_SEL_BB = PHY_QueryBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord);
@@ -788,7 +694,6 @@ static u8 phy_PathB_IQK_8723B(struct adapter *padapter)
PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord, 0x01007c00);
PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK, bMaskDWord, 0x01004800);
/* path-A IQK setting */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-B IQK setting!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
@@ -801,7 +706,6 @@ static u8 phy_PathB_IQK_8723B(struct adapter *padapter)
PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_B, bMaskDWord, 0x28110000);
/* LO calibration setting */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
/* enter IQK mode */
@@ -815,12 +719,10 @@ static u8 phy_PathB_IQK_8723B(struct adapter *padapter)
PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00000800);
/* One shot, path B LOK & IQK */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path B LOK & IQK!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path B LOK & IQK.\n", IQK_DELAY_TIME_88E)); */
/* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
mdelay(IQK_DELAY_TIME_8723B);
@@ -832,18 +734,10 @@ static u8 phy_PathB_IQK_8723B(struct adapter *padapter)
/* leave IQK mode */
PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0x948 = 0x%x\n", PHY_QueryBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord))); */
-
-
/* Check failed */
regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
regE94 = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord);
regE9C = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x, 0xe9c = 0x%x\n", regE94, regE9C));
- /* monitor image power before & after IQK */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe90(before IQK) = 0x%x, 0xe98(afer IQK) = 0x%x\n",
- PHY_QueryBBReg(pDM_Odm->Adapter, 0xe90, bMaskDWord), PHY_QueryBBReg(pDM_Odm->Adapter, 0xe98, bMaskDWord)));
/* Allen 20131125 */
tmp = (regE9C & 0x03FF0000)>>16;
@@ -871,8 +765,6 @@ static u8 phy_PathB_RxIQK8723B(struct adapter *padapter, bool configPathB)
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Rx IQK!\n")); */
-
/* Save RF Path */
Path_SEL_BB = PHY_QueryBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord);
/* leave IQK mode */
@@ -880,11 +772,7 @@ static u8 phy_PathB_RxIQK8723B(struct adapter *padapter, bool configPathB)
/* switch to path B */
PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000280);
-
- /* 1 Get TXIMR setting */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B RX IQK:Get TXIMR setting!\n"));
/* modify RXIQK mode table */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table!\n")); */
PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x1);
PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x18000);
PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0001f);
@@ -912,7 +800,6 @@ static u8 phy_PathB_RxIQK8723B(struct adapter *padapter, bool configPathB)
PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_B, bMaskDWord, 0x28110000);
/* LO calibration setting */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
/* enter IQK mode */
@@ -926,13 +813,11 @@ static u8 phy_PathB_RxIQK8723B(struct adapter *padapter, bool configPathB)
PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00000800);
/* One shot, path B TXIQK @ RXIQK */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path B LOK & IQK!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E)); */
/* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
mdelay(IQK_DELAY_TIME_8723B);
@@ -948,18 +833,11 @@ static u8 phy_PathB_RxIQK8723B(struct adapter *padapter, bool configPathB)
regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
regE94 = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord);
regE9C = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x, 0xe9c = 0x%x\n", regE94, regE9C));
- /* monitor image power before & after IQK */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe90(before IQK) = 0x%x, 0xe98(afer IQK) = 0x%x\n",
- PHY_QueryBBReg(pDM_Odm->Adapter, 0xe90, bMaskDWord), PHY_QueryBBReg(pDM_Odm->Adapter, 0xe98, bMaskDWord)));
/* Allen 20131125 */
tmp = (regE9C & 0x03FF0000)>>16;
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("tmp1 = 0x%x\n", tmp)); */
if ((tmp & 0x200) > 0)
tmp = 0x400 - tmp;
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("tmp2 = 0x%x\n", tmp)); */
if (
!(regEAC & BIT28) &&
@@ -975,10 +853,6 @@ static u8 phy_PathB_RxIQK8723B(struct adapter *padapter, bool configPathB)
u4tmp = 0x80007C00 | (regE94&0x3FF0000) | ((regE9C&0x3FF0000) >> 16);
PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord, u4tmp);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe40 = 0x%x u4tmp = 0x%x\n", PHY_QueryBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord), u4tmp));
-
- /* 1 RX IQK */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B RX IQK\n"));
/* modify RXIQK mode table */
/* 20121009, Kordan> RF Mode = 3 */
@@ -1013,7 +887,6 @@ static u8 phy_PathB_RxIQK8723B(struct adapter *padapter, bool configPathB)
PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_B, bMaskDWord, 0x28110000);
/* LO calibration setting */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Rsp, bMaskDWord, 0x0046a8d1);
/* enter IQK mode */
@@ -1027,12 +900,10 @@ static u8 phy_PathB_RxIQK8723B(struct adapter *padapter, bool configPathB)
PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00000800);
/* One shot, path B LOK & IQK */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path B LOK & IQK!\n")); */
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
-/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E)); */
/* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
mdelay(IQK_DELAY_TIME_8723B);
@@ -1048,12 +919,6 @@ static u8 phy_PathB_RxIQK8723B(struct adapter *padapter, bool configPathB)
regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
regEA4 = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_Before_IQK_A_2, bMaskDWord);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x, 0xeac = 0x%x\n", regEA4, regEAC));
- /* monitor image power before & after IQK */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea0(before IQK) = 0x%x, 0xea8(afer IQK) = 0x%x\n",
- PHY_QueryBBReg(pDM_Odm->Adapter, 0xea0, bMaskDWord), PHY_QueryBBReg(pDM_Odm->Adapter, 0xea8, bMaskDWord)));
-
/* PA/PAD controlled by 0x0 */
/* leave IQK mode */
/* PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, 0xffffff00, 0x00000000); */
@@ -1075,8 +940,6 @@ static u8 phy_PathB_RxIQK8723B(struct adapter *padapter, bool configPathB)
(tmp < 0xf)
)
result |= 0x02;
- else
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Rx IQK fail!!\n"));
return result;
}
@@ -1096,8 +959,6 @@ static void _PHY_PathAFillIQKMatrix8723B(
struct odm_rf_cal_t *pRFCalibrateInfo = &pDM_Odm->RFCalibrateInfo;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQ Calibration %s !\n", (bIQKOK)?"Success":"Failed"));
-
if (final_candidate == 0xFF)
return;
@@ -1108,7 +969,6 @@ static void _PHY_PathAFillIQKMatrix8723B(
if ((X & 0x00000200) != 0)
X = X | 0xFFFFFC00;
TX0_A = (X * Oldval_0) >> 8;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("X = 0x%x, TX0_A = 0x%x, Oldval_0 0x%x\n", X, TX0_A, Oldval_0));
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, BIT(31), ((X*Oldval_0>>7) & 0x1));
@@ -1119,7 +979,6 @@ static void _PHY_PathAFillIQKMatrix8723B(
/* 2 Tx IQC */
TX0_C = (Y * Oldval_0) >> 8;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX = 0x%x\n", Y, TX0_C));
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C&0x3C0)>>6));
pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC94][KEY] = rOFDM0_XCTxAFE;
pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC94][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XCTxAFE, bMaskDWord);
@@ -1133,8 +992,6 @@ static void _PHY_PathAFillIQKMatrix8723B(
pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC4C][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskDWord);
if (bTxOnly) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("_PHY_PathAFillIQKMatrix8723B only Tx OK\n"));
-
/* <20130226, Kordan> Saving RxIQC, otherwise not initialized. */
pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xCA0][KEY] = rOFDM0_RxIQExtAnta;
pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xCA0][VAL] = 0xfffffff & PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_RxIQExtAnta, bMaskDWord);
@@ -1176,8 +1033,6 @@ static void _PHY_PathBFillIQKMatrix8723B(
struct odm_rf_cal_t *pRFCalibrateInfo = &pDM_Odm->RFCalibrateInfo;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQ Calibration %s !\n", (bIQKOK)?"Success":"Failed"));
-
if (final_candidate == 0xFF)
return;
@@ -1188,7 +1043,6 @@ static void _PHY_PathBFillIQKMatrix8723B(
if ((X & 0x00000200) != 0)
X = X | 0xFFFFFC00;
TX1_A = (X * Oldval_1) >> 8;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("X = 0x%x, TX1_A = 0x%x\n", X, TX1_A));
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBTxIQImbalance, 0x3FF, TX1_A);
@@ -1199,7 +1053,6 @@ static void _PHY_PathBFillIQKMatrix8723B(
Y = Y | 0xFFFFFC00;
TX1_C = (Y * Oldval_1) >> 8;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX1_C = 0x%x\n", Y, TX1_C));
/* 2 Tx IQC */
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XDTxAFE, 0xF0000000, ((TX1_C&0x3C0)>>6));
@@ -1217,8 +1070,6 @@ static void _PHY_PathBFillIQKMatrix8723B(
pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC4C][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskDWord);
if (bTxOnly) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("_PHY_PathBFillIQKMatrix8723B only Tx OK\n"));
-
pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xC14][KEY] = rOFDM0_XARxIQImbalance;
/* pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xC14][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XARxIQImbalance, bMaskDWord); */
pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xC14][VAL] = 0x40000100;
@@ -1297,7 +1148,6 @@ static void _PHY_SaveADDARegisters8723B(
if (!ODM_CheckPowerStatus(padapter))
return;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save ADDA parameters.\n"));
for (i = 0 ; i < RegisterNum ; i++) {
ADDABackup[i] = PHY_QueryBBReg(pDM_Odm->Adapter, ADDAReg[i], bMaskDWord);
}
@@ -1312,7 +1162,6 @@ static void _PHY_SaveMACRegisters8723B(
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save MAC parameters.\n"));
for (i = 0 ; i < (IQK_MAC_REG_NUM - 1); i++) {
MACBackup[i] = rtw_read8(pDM_Odm->Adapter, MACReg[i]);
}
@@ -1332,7 +1181,6 @@ static void _PHY_ReloadADDARegisters8723B(
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Reload ADDA power saving parameters !\n"));
for (i = 0 ; i < RegiesterNum; i++) {
PHY_SetBBReg(pDM_Odm->Adapter, ADDAReg[i], bMaskDWord, ADDABackup[i]);
}
@@ -1362,8 +1210,6 @@ static void _PHY_PathADDAOn8723B(
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("ADDA ON.\n"));
-
pathOn = 0x01c00014;
if (!is2T) {
pathOn = 0x01c00014;
@@ -1386,8 +1232,6 @@ static void _PHY_MACSettingCalibration8723B(
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("MAC settings for Calibration.\n"));
-
rtw_write8(pDM_Odm->Adapter, MACReg[i], 0x3F);
for (i = 1 ; i < (IQK_MAC_REG_NUM - 1); i++) {
@@ -1540,17 +1384,12 @@ static void phy_IQCalibrate_8723B(
/* u32 bbvalue; */
if (t == 0) {
-/* bbvalue = PHY_QueryBBReg(pDM_Odm->Adapter, rFPGA0_RFMOD, bMaskDWord); */
-/* RT_DISP(FINIT, INIT_IQK, ("phy_IQCalibrate_8188E() ==>0x%08x\n", bbvalue)); */
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2T ? "2T2R" : "1T1R"), t));
/* Save ADDA parameters, turn Path A ADDA on */
_PHY_SaveADDARegisters8723B(padapter, ADDA_REG, pDM_Odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM);
_PHY_SaveMACRegisters8723B(padapter, IQK_MAC_REG, pDM_Odm->RFCalibrateInfo.IQK_MAC_backup);
_PHY_SaveADDARegisters8723B(padapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2T ? "2T2R" : "1T1R"), t));
_PHY_PathADDAOn8723B(padapter, ADDA_REG, is2T);
@@ -1596,7 +1435,6 @@ static void phy_IQCalibrate_8723B(
PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
pDM_Odm->RFCalibrateInfo.TxLOK[ODM_RF_PATH_A] = PHY_QueryRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0x8, bRFRegOffsetMask);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Tx IQK Success!!\n"));
result[t][0] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
result[t][1] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
break;
@@ -1607,19 +1445,15 @@ static void phy_IQCalibrate_8723B(
for (i = 0 ; i < retryCount ; i++) {
PathAOK = phy_PathA_RxIQK8723B(padapter, is2T, RF_Path);
if (PathAOK == 0x03) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Success!!\n"));
/* result[t][0] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16; */
/* result[t][1] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16; */
result[t][2] = (PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
result[t][3] = (PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
break;
- } else {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Fail!!\n"));
}
}
if (0x00 == PathAOK) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQK failed!!\n"));
}
/* path B IQK */
@@ -1633,7 +1467,6 @@ static void phy_IQCalibrate_8723B(
PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
pDM_Odm->RFCalibrateInfo.TxLOK[ODM_RF_PATH_B] = PHY_QueryRFReg(pDM_Odm->Adapter, ODM_RF_PATH_B, 0x8, bRFRegOffsetMask);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Tx IQK Success!!\n"));
result[t][4] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
result[t][5] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
break;
@@ -1644,25 +1477,18 @@ static void phy_IQCalibrate_8723B(
for (i = 0 ; i < retryCount ; i++) {
PathBOK = phy_PathB_RxIQK8723B(padapter, is2T);
if (PathBOK == 0x03) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Rx IQK Success!!\n"));
/* result[t][0] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16; */
/* result[t][1] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16; */
result[t][6] = (PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
result[t][7] = (PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
break;
- } else {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Rx IQK Fail!!\n"));
}
}
/* Allen end */
- if (0x00 == PathBOK) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK failed!!\n"));
- }
}
/* Back to BB mode, load original value */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:Back to BB mode, load original value!\n"));
PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0);
if (t != 0) {
@@ -1692,7 +1518,6 @@ static void phy_IQCalibrate_8723B(
PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_IQCalibrate_8723B() <==\n"));
}
@@ -1789,7 +1614,7 @@ void PHY_IQCalibrate_8723B(
s32 result[4][8]; /* last is final result */
u8 i, final_candidate;
bool bPathAOK, bPathBOK;
- s32 RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC, RegTmp = 0;
+ s32 RegE94, RegE9C, RegEA4, RegEB4, RegEBC, RegEC4, RegTmp = 0;
bool is12simular, is13simular, is23simular;
bool bSingleTone = false, bCarrierSuppression = false;
u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
@@ -1805,8 +1630,6 @@ void PHY_IQCalibrate_8723B(
};
/* u32 Path_SEL_BB = 0; */
u32 GNT_BT_default;
- u32 StartTime;
- s32 ProgressingTime;
if (!ODM_CheckPowerStatus(padapter))
return;
@@ -1818,9 +1641,6 @@ void PHY_IQCalibrate_8723B(
if (bSingleTone || bCarrierSuppression)
return;
-#if DISABLE_BB_RF
- return;
-#endif
if (pDM_Odm->RFCalibrateInfo.bIQKInProgress)
return;
@@ -1868,12 +1688,9 @@ void PHY_IQCalibrate_8723B(
}
if (bReCovery) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("PHY_IQCalibrate_8723B: Return due to bReCovery!\n"));
_PHY_ReloadADDARegisters8723B(padapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
return;
}
- StartTime = jiffies;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:Start!!!\n"));
/* save default GNT_BT */
GNT_BT_default = PHY_QueryBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord);
@@ -1908,7 +1725,6 @@ void PHY_IQCalibrate_8723B(
is12simular = phy_SimularityCompare_8723B(padapter, result, 0, 1);
if (is12simular) {
final_candidate = 0;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is12simular final_candidate is %x\n", final_candidate));
break;
}
}
@@ -1917,7 +1733,6 @@ void PHY_IQCalibrate_8723B(
is13simular = phy_SimularityCompare_8723B(padapter, result, 0, 2);
if (is13simular) {
final_candidate = 0;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is13simular final_candidate is %x\n", final_candidate));
break;
}
@@ -1925,7 +1740,6 @@ void PHY_IQCalibrate_8723B(
is23simular = phy_SimularityCompare_8723B(padapter, result, 1, 2);
if (is23simular) {
final_candidate = 1;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is23simular final_candidate is %x\n", final_candidate));
} else {
for (i = 0; i < 8; i++)
RegTmp += result[3][i];
@@ -1942,29 +1756,20 @@ void PHY_IQCalibrate_8723B(
RegE94 = result[i][0];
RegE9C = result[i][1];
RegEA4 = result[i][2];
- RegEAC = result[i][3];
RegEB4 = result[i][4];
RegEBC = result[i][5];
RegEC4 = result[i][6];
- RegECC = result[i][7];
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: RegE94 =%x RegE9C =%x RegEA4 =%x RegEAC =%x RegEB4 =%x RegEBC =%x RegEC4 =%x RegECC =%x\n ", RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC));
}
if (final_candidate != 0xff) {
pDM_Odm->RFCalibrateInfo.RegE94 = RegE94 = result[final_candidate][0];
pDM_Odm->RFCalibrateInfo.RegE9C = RegE9C = result[final_candidate][1];
RegEA4 = result[final_candidate][2];
- RegEAC = result[final_candidate][3];
pDM_Odm->RFCalibrateInfo.RegEB4 = RegEB4 = result[final_candidate][4];
pDM_Odm->RFCalibrateInfo.RegEBC = RegEBC = result[final_candidate][5];
RegEC4 = result[final_candidate][6];
- RegECC = result[final_candidate][7];
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: final_candidate is %x\n", final_candidate));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: RegE94 =%x RegE9C =%x RegEA4 =%x RegEAC =%x RegEB4 =%x RegEBC =%x RegEC4 =%x RegECC =%x\n ", RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC));
bPathAOK = bPathBOK = true;
} else {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: FAIL use default value\n"));
-
pDM_Odm->RFCalibrateInfo.RegE94 = pDM_Odm->RFCalibrateInfo.RegEB4 = 0x100; /* X default value */
pDM_Odm->RFCalibrateInfo.RegE9C = pDM_Odm->RFCalibrateInfo.RegEBC = 0x0; /* Y default value */
}
@@ -1985,7 +1790,6 @@ void PHY_IQCalibrate_8723B(
pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[0].Value[0][i] = result[final_candidate][i];
pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[0].bIQKDone = true;
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\nIQK OK Indexforchannel %d.\n", 0));
_PHY_SaveADDARegisters8723B(padapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
@@ -2015,12 +1819,6 @@ void PHY_IQCalibrate_8723B(
}
pDM_Odm->RFCalibrateInfo.bIQKInProgress = false;
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK finished\n"));
- ProgressingTime = jiffies_to_msecs(jiffies - StartTime);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK ProgressingTime = %d\n", ProgressingTime));
-
-
}
@@ -2028,12 +1826,6 @@ void PHY_LCCalibrate_8723B(struct dm_odm_t *pDM_Odm)
{
bool bSingleTone = false, bCarrierSuppression = false;
u32 timeout = 2000, timecount = 0;
- u32 StartTime;
- s32 ProgressingTime;
-
-#if DISABLE_BB_RF
- return;
-#endif
if (!(pDM_Odm->SupportAbility & ODM_RF_CALIBRATION))
return;
@@ -2042,7 +1834,6 @@ void PHY_LCCalibrate_8723B(struct dm_odm_t *pDM_Odm)
if (bSingleTone || bCarrierSuppression)
return;
- StartTime = jiffies;
while (*(pDM_Odm->pbScanInProcess) && timecount < timeout) {
mdelay(50);
timecount += 50;
@@ -2055,8 +1846,4 @@ void PHY_LCCalibrate_8723B(struct dm_odm_t *pDM_Odm)
pDM_Odm->RFCalibrateInfo.bLCKInProgress = false;
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LCK:Finish!!!interface %d\n", pDM_Odm->InterfaceIndex));
- ProgressingTime = jiffies_to_msecs(jiffies - StartTime);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LCK ProgressingTime = %d\n", ProgressingTime));
}
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index 5802ed4c6f82..3b0573885dce 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -17,7 +17,6 @@ struct btc_coexist GLBtCoexist;
static u8 GLBtcWiFiInScanState;
static u8 GLBtcWiFiInIQKState;
-u32 GLBtcDbgType[BTC_MSG_MAX];
static u8 GLBtcDbgBuf[BT_TMP_BUF_SIZE];
struct btcdbginfo { /* _btcoexdbginfo */
@@ -75,32 +74,6 @@ static u8 halbtcoutsrc_IsBtCoexistAvailable(struct btc_coexist *pBtCoexist)
return true;
}
-static void halbtcoutsrc_DbgInit(void)
-{
- u8 i;
-
- for (i = 0; i < BTC_MSG_MAX; i++)
- GLBtcDbgType[i] = 0;
-
- GLBtcDbgType[BTC_MSG_INTERFACE] = \
-/* INTF_INIT | */
-/* INTF_NOTIFY | */
- 0;
-
- GLBtcDbgType[BTC_MSG_ALGORITHM] = \
-/* ALGO_BT_RSSI_STATE | */
-/* ALGO_WIFI_RSSI_STATE | */
-/* ALGO_BT_MONITOR | */
-/* ALGO_TRACE | */
-/* ALGO_TRACE_FW | */
-/* ALGO_TRACE_FW_DETAIL | */
-/* ALGO_TRACE_FW_EXEC | */
-/* ALGO_TRACE_SW | */
-/* ALGO_TRACE_SW_DETAIL | */
-/* ALGO_TRACE_SW_EXEC | */
- 0;
-}
-
static void halbtcoutsrc_LeaveLps(struct btc_coexist *pBtCoexist)
{
struct adapter *padapter;
@@ -131,9 +104,6 @@ static void halbtcoutsrc_NormalLps(struct btc_coexist *pBtCoexist)
{
struct adapter *padapter;
-
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Normal LPS behavior!!!\n"));
-
padapter = pBtCoexist->Adapter;
if (pBtCoexist->btInfo.bBtCtrlLps) {
@@ -398,10 +368,6 @@ static u8 halbtcoutsrc_Get(void *pBtcContext, u8 getType, void *pOutBuf)
*pu8 = false;
break;
- case BTC_GET_BL_WIFI_UNDER_5G:
- *pu8 = pHalData->CurrentBandType == 1;
- break;
-
case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
*pu8 = check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE);
break;
@@ -922,8 +888,6 @@ void hal_btcoex_Initialize(void *padapter)
/* pBtCoexist->statistics.cntBind++; */
- halbtcoutsrc_DbgInit();
-
pBtCoexist->chipInterface = BTC_INTF_SDIO;
EXhalbtcoutsrc_BindBtCoexWithAdapter(padapter);
@@ -1481,10 +1445,6 @@ u32 hal_btcoex_GetRaMask(struct adapter *padapter)
void hal_btcoex_RecordPwrMode(struct adapter *padapter, u8 *pCmdBuf, u8 cmdLen)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write pwrModeCmd = 0x%04x%08x\n",
- pCmdBuf[0] << 8 | pCmdBuf[1],
- pCmdBuf[2] << 24 | pCmdBuf[3] << 16 | pCmdBuf[4] << 8 | pCmdBuf[5]));
-
memcpy(GLBtCoexist.pwrModeVal, pCmdBuf, cmdLen);
}
@@ -1499,138 +1459,3 @@ void hal_btcoex_DisplayBtCoexInfo(struct adapter *padapter, u8 *pbuf, u32 bufsiz
DBG_BT_INFO_INIT(pinfo, NULL, 0);
}
-void hal_btcoex_SetDBG(struct adapter *padapter, u32 *pDbgModule)
-{
- u32 i;
-
-
- if (!pDbgModule)
- return;
-
- for (i = 0; i < BTC_MSG_MAX; i++)
- GLBtcDbgType[i] = pDbgModule[i];
-}
-
-u32 hal_btcoex_GetDBG(struct adapter *padapter, u8 *pStrBuf, u32 bufSize)
-{
- s32 count;
- u8 *pstr;
- u32 leftSize;
-
-
- if (!pStrBuf || bufSize == 0)
- return 0;
-
- pstr = pStrBuf;
- leftSize = bufSize;
-
- count = rtw_sprintf(pstr, leftSize, "#define DBG\t%d\n", DBG);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
-
- count = rtw_sprintf(pstr, leftSize, "BTCOEX Debug Setting:\n");
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
-
- count = rtw_sprintf(pstr, leftSize,
- "INTERFACE / ALGORITHM: 0x%08X / 0x%08X\n\n",
- GLBtcDbgType[BTC_MSG_INTERFACE],
- GLBtcDbgType[BTC_MSG_ALGORITHM]);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
-
- count = rtw_sprintf(pstr, leftSize, "INTERFACE Debug Setting Definition:\n");
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
- count = rtw_sprintf(pstr, leftSize, "\tbit[0]=%d for INTF_INIT\n",
- (GLBtcDbgType[BTC_MSG_INTERFACE] & INTF_INIT) ? 1 : 0);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
- count = rtw_sprintf(pstr, leftSize, "\tbit[2]=%d for INTF_NOTIFY\n\n",
- (GLBtcDbgType[BTC_MSG_INTERFACE] & INTF_NOTIFY) ? 1 : 0);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
-
- count = rtw_sprintf(pstr, leftSize, "ALGORITHM Debug Setting Definition:\n");
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
- count = rtw_sprintf(pstr, leftSize, "\tbit[0]=%d for BT_RSSI_STATE\n",
- (GLBtcDbgType[BTC_MSG_ALGORITHM] & ALGO_BT_RSSI_STATE) ? 1 : 0);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
- count = rtw_sprintf(pstr, leftSize, "\tbit[1]=%d for WIFI_RSSI_STATE\n",
- (GLBtcDbgType[BTC_MSG_ALGORITHM] & ALGO_WIFI_RSSI_STATE) ? 1 : 0);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
- count = rtw_sprintf(pstr, leftSize, "\tbit[2]=%d for BT_MONITOR\n",
- (GLBtcDbgType[BTC_MSG_ALGORITHM] & ALGO_BT_MONITOR) ? 1 : 0);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
- count = rtw_sprintf(pstr, leftSize, "\tbit[3]=%d for TRACE\n",
- (GLBtcDbgType[BTC_MSG_ALGORITHM] & ALGO_TRACE) ? 1 : 0);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
- count = rtw_sprintf(pstr, leftSize, "\tbit[4]=%d for TRACE_FW\n",
- (GLBtcDbgType[BTC_MSG_ALGORITHM] & ALGO_TRACE_FW) ? 1 : 0);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
- count = rtw_sprintf(pstr, leftSize, "\tbit[5]=%d for TRACE_FW_DETAIL\n",
- (GLBtcDbgType[BTC_MSG_ALGORITHM] & ALGO_TRACE_FW_DETAIL) ? 1 : 0);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
- count = rtw_sprintf(pstr, leftSize, "\tbit[6]=%d for TRACE_FW_EXEC\n",
- (GLBtcDbgType[BTC_MSG_ALGORITHM] & ALGO_TRACE_FW_EXEC) ? 1 : 0);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
- count = rtw_sprintf(pstr, leftSize, "\tbit[7]=%d for TRACE_SW\n",
- (GLBtcDbgType[BTC_MSG_ALGORITHM] & ALGO_TRACE_SW) ? 1 : 0);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
- count = rtw_sprintf(pstr, leftSize, "\tbit[8]=%d for TRACE_SW_DETAIL\n",
- (GLBtcDbgType[BTC_MSG_ALGORITHM] & ALGO_TRACE_SW_DETAIL) ? 1 : 0);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
- count = rtw_sprintf(pstr, leftSize, "\tbit[9]=%d for TRACE_SW_EXEC\n",
- (GLBtcDbgType[BTC_MSG_ALGORITHM] & ALGO_TRACE_SW_EXEC) ? 1 : 0);
- if ((count < 0) || (count >= leftSize))
- goto exit;
- pstr += count;
- leftSize -= count;
-
-exit:
- count = pstr - pStrBuf;
-
- return count;
-}
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index e82f59fc5e9b..eebd48438733 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -295,126 +295,6 @@ u8 MRateToHwRate(u8 rate)
case MGN_MCS31:
ret = DESC_RATEMCS31;
break;
- case MGN_VHT1SS_MCS0:
- ret = DESC_RATEVHTSS1MCS0;
- break;
- case MGN_VHT1SS_MCS1:
- ret = DESC_RATEVHTSS1MCS1;
- break;
- case MGN_VHT1SS_MCS2:
- ret = DESC_RATEVHTSS1MCS2;
- break;
- case MGN_VHT1SS_MCS3:
- ret = DESC_RATEVHTSS1MCS3;
- break;
- case MGN_VHT1SS_MCS4:
- ret = DESC_RATEVHTSS1MCS4;
- break;
- case MGN_VHT1SS_MCS5:
- ret = DESC_RATEVHTSS1MCS5;
- break;
- case MGN_VHT1SS_MCS6:
- ret = DESC_RATEVHTSS1MCS6;
- break;
- case MGN_VHT1SS_MCS7:
- ret = DESC_RATEVHTSS1MCS7;
- break;
- case MGN_VHT1SS_MCS8:
- ret = DESC_RATEVHTSS1MCS8;
- break;
- case MGN_VHT1SS_MCS9:
- ret = DESC_RATEVHTSS1MCS9;
- break;
- case MGN_VHT2SS_MCS0:
- ret = DESC_RATEVHTSS2MCS0;
- break;
- case MGN_VHT2SS_MCS1:
- ret = DESC_RATEVHTSS2MCS1;
- break;
- case MGN_VHT2SS_MCS2:
- ret = DESC_RATEVHTSS2MCS2;
- break;
- case MGN_VHT2SS_MCS3:
- ret = DESC_RATEVHTSS2MCS3;
- break;
- case MGN_VHT2SS_MCS4:
- ret = DESC_RATEVHTSS2MCS4;
- break;
- case MGN_VHT2SS_MCS5:
- ret = DESC_RATEVHTSS2MCS5;
- break;
- case MGN_VHT2SS_MCS6:
- ret = DESC_RATEVHTSS2MCS6;
- break;
- case MGN_VHT2SS_MCS7:
- ret = DESC_RATEVHTSS2MCS7;
- break;
- case MGN_VHT2SS_MCS8:
- ret = DESC_RATEVHTSS2MCS8;
- break;
- case MGN_VHT2SS_MCS9:
- ret = DESC_RATEVHTSS2MCS9;
- break;
- case MGN_VHT3SS_MCS0:
- ret = DESC_RATEVHTSS3MCS0;
- break;
- case MGN_VHT3SS_MCS1:
- ret = DESC_RATEVHTSS3MCS1;
- break;
- case MGN_VHT3SS_MCS2:
- ret = DESC_RATEVHTSS3MCS2;
- break;
- case MGN_VHT3SS_MCS3:
- ret = DESC_RATEVHTSS3MCS3;
- break;
- case MGN_VHT3SS_MCS4:
- ret = DESC_RATEVHTSS3MCS4;
- break;
- case MGN_VHT3SS_MCS5:
- ret = DESC_RATEVHTSS3MCS5;
- break;
- case MGN_VHT3SS_MCS6:
- ret = DESC_RATEVHTSS3MCS6;
- break;
- case MGN_VHT3SS_MCS7:
- ret = DESC_RATEVHTSS3MCS7;
- break;
- case MGN_VHT3SS_MCS8:
- ret = DESC_RATEVHTSS3MCS8;
- break;
- case MGN_VHT3SS_MCS9:
- ret = DESC_RATEVHTSS3MCS9;
- break;
- case MGN_VHT4SS_MCS0:
- ret = DESC_RATEVHTSS4MCS0;
- break;
- case MGN_VHT4SS_MCS1:
- ret = DESC_RATEVHTSS4MCS1;
- break;
- case MGN_VHT4SS_MCS2:
- ret = DESC_RATEVHTSS4MCS2;
- break;
- case MGN_VHT4SS_MCS3:
- ret = DESC_RATEVHTSS4MCS3;
- break;
- case MGN_VHT4SS_MCS4:
- ret = DESC_RATEVHTSS4MCS4;
- break;
- case MGN_VHT4SS_MCS5:
- ret = DESC_RATEVHTSS4MCS5;
- break;
- case MGN_VHT4SS_MCS6:
- ret = DESC_RATEVHTSS4MCS6;
- break;
- case MGN_VHT4SS_MCS7:
- ret = DESC_RATEVHTSS4MCS7;
- break;
- case MGN_VHT4SS_MCS8:
- ret = DESC_RATEVHTSS4MCS8;
- break;
- case MGN_VHT4SS_MCS9:
- ret = DESC_RATEVHTSS4MCS9;
- break;
default:
break;
}
@@ -559,127 +439,6 @@ u8 HwRateToMRate(u8 rate)
case DESC_RATEMCS31:
ret_rate = MGN_MCS31;
break;
- case DESC_RATEVHTSS1MCS0:
- ret_rate = MGN_VHT1SS_MCS0;
- break;
- case DESC_RATEVHTSS1MCS1:
- ret_rate = MGN_VHT1SS_MCS1;
- break;
- case DESC_RATEVHTSS1MCS2:
- ret_rate = MGN_VHT1SS_MCS2;
- break;
- case DESC_RATEVHTSS1MCS3:
- ret_rate = MGN_VHT1SS_MCS3;
- break;
- case DESC_RATEVHTSS1MCS4:
- ret_rate = MGN_VHT1SS_MCS4;
- break;
- case DESC_RATEVHTSS1MCS5:
- ret_rate = MGN_VHT1SS_MCS5;
- break;
- case DESC_RATEVHTSS1MCS6:
- ret_rate = MGN_VHT1SS_MCS6;
- break;
- case DESC_RATEVHTSS1MCS7:
- ret_rate = MGN_VHT1SS_MCS7;
- break;
- case DESC_RATEVHTSS1MCS8:
- ret_rate = MGN_VHT1SS_MCS8;
- break;
- case DESC_RATEVHTSS1MCS9:
- ret_rate = MGN_VHT1SS_MCS9;
- break;
- case DESC_RATEVHTSS2MCS0:
- ret_rate = MGN_VHT2SS_MCS0;
- break;
- case DESC_RATEVHTSS2MCS1:
- ret_rate = MGN_VHT2SS_MCS1;
- break;
- case DESC_RATEVHTSS2MCS2:
- ret_rate = MGN_VHT2SS_MCS2;
- break;
- case DESC_RATEVHTSS2MCS3:
- ret_rate = MGN_VHT2SS_MCS3;
- break;
- case DESC_RATEVHTSS2MCS4:
- ret_rate = MGN_VHT2SS_MCS4;
- break;
- case DESC_RATEVHTSS2MCS5:
- ret_rate = MGN_VHT2SS_MCS5;
- break;
- case DESC_RATEVHTSS2MCS6:
- ret_rate = MGN_VHT2SS_MCS6;
- break;
- case DESC_RATEVHTSS2MCS7:
- ret_rate = MGN_VHT2SS_MCS7;
- break;
- case DESC_RATEVHTSS2MCS8:
- ret_rate = MGN_VHT2SS_MCS8;
- break;
- case DESC_RATEVHTSS2MCS9:
- ret_rate = MGN_VHT2SS_MCS9;
- break;
- case DESC_RATEVHTSS3MCS0:
- ret_rate = MGN_VHT3SS_MCS0;
- break;
- case DESC_RATEVHTSS3MCS1:
- ret_rate = MGN_VHT3SS_MCS1;
- break;
- case DESC_RATEVHTSS3MCS2:
- ret_rate = MGN_VHT3SS_MCS2;
- break;
- case DESC_RATEVHTSS3MCS3:
- ret_rate = MGN_VHT3SS_MCS3;
- break;
- case DESC_RATEVHTSS3MCS4:
- ret_rate = MGN_VHT3SS_MCS4;
- break;
- case DESC_RATEVHTSS3MCS5:
- ret_rate = MGN_VHT3SS_MCS5;
- break;
- case DESC_RATEVHTSS3MCS6:
- ret_rate = MGN_VHT3SS_MCS6;
- break;
- case DESC_RATEVHTSS3MCS7:
- ret_rate = MGN_VHT3SS_MCS7;
- break;
- case DESC_RATEVHTSS3MCS8:
- ret_rate = MGN_VHT3SS_MCS8;
- break;
- case DESC_RATEVHTSS3MCS9:
- ret_rate = MGN_VHT3SS_MCS9;
- break;
- case DESC_RATEVHTSS4MCS0:
- ret_rate = MGN_VHT4SS_MCS0;
- break;
- case DESC_RATEVHTSS4MCS1:
- ret_rate = MGN_VHT4SS_MCS1;
- break;
- case DESC_RATEVHTSS4MCS2:
- ret_rate = MGN_VHT4SS_MCS2;
- break;
- case DESC_RATEVHTSS4MCS3:
- ret_rate = MGN_VHT4SS_MCS3;
- break;
- case DESC_RATEVHTSS4MCS4:
- ret_rate = MGN_VHT4SS_MCS4;
- break;
- case DESC_RATEVHTSS4MCS5:
- ret_rate = MGN_VHT4SS_MCS5;
- break;
- case DESC_RATEVHTSS4MCS6:
- ret_rate = MGN_VHT4SS_MCS6;
- break;
- case DESC_RATEVHTSS4MCS7:
- ret_rate = MGN_VHT4SS_MCS7;
- break;
- case DESC_RATEVHTSS4MCS8:
- ret_rate = MGN_VHT4SS_MCS8;
- break;
- case DESC_RATEVHTSS4MCS9:
- ret_rate = MGN_VHT4SS_MCS9;
- break;
-
default:
break;
}
@@ -916,16 +675,10 @@ s32 c2h_evt_read_88xx(struct adapter *adapter, u8 *buf)
c2h_evt->seq = rtw_read8(adapter, REG_C2HEVT_CMD_SEQ_88XX);
c2h_evt->plen = rtw_read8(adapter, REG_C2HEVT_CMD_LEN_88XX);
- print_hex_dump_debug(DRIVER_PREFIX ": c2h_evt_read(): ", DUMP_PREFIX_NONE,
- 16, 1, &c2h_evt, sizeof(c2h_evt), false);
-
/* Read the content */
for (i = 0; i < c2h_evt->plen; i++)
c2h_evt->payload[i] = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 2 + i);
- print_hex_dump_debug(DRIVER_PREFIX ": c2h_evt_read(): Command Content:\n",
- DUMP_PREFIX_NONE, 16, 1, c2h_evt->payload, c2h_evt->plen, false);
-
ret = _SUCCESS;
clear_evt:
@@ -1093,13 +846,6 @@ u8 SetHalDefVar(
u8 bResult = _SUCCESS;
switch (variable) {
- case HW_DEF_FA_CNT_DUMP:
- /* ODM_COMP_COMMON */
- if (*((u8 *)value))
- odm->DebugComponents |= (ODM_COMP_DIG | ODM_COMP_FA_CNT);
- else
- odm->DebugComponents &= ~(ODM_COMP_DIG | ODM_COMP_FA_CNT);
- break;
case HAL_DEF_DBG_RX_INFO_DUMP:
if (odm->bLinked) {
@@ -1166,7 +912,6 @@ u8 GetHalDefVar(
)
{
struct hal_com_data *hal_data = GET_HAL_DATA(adapter);
- struct dm_odm_t *odm = &(hal_data->odmpriv);
u8 bResult = _SUCCESS;
switch (variable) {
@@ -1183,12 +928,6 @@ u8 GetHalDefVar(
*((int *)value) = psta->rssi_stat.UndecoratedSmoothedPWDB;
}
break;
- case HW_DEF_ODM_DBG_FLAG:
- *((u64 *)value) = odm->DebugComponents;
- break;
- case HW_DEF_ODM_DBG_LEVEL:
- *((u32 *)value) = odm->DebugLevel;
- break;
case HAL_DEF_DBG_DM_FUNC:
*((u32 *)value) = hal_data->odmpriv.SupportAbility;
break;
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 94d11689b4ac..bb7941aee0c4 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -11,7 +11,7 @@
#include <hal_data.h>
#include <linux/kernel.h>
-u8 PHY_GetTxPowerByRateBase(struct adapter *Adapter, u8 Band, u8 RfPath,
+u8 PHY_GetTxPowerByRateBase(struct adapter *Adapter, u8 RfPath,
u8 TxNum, enum rate_section RateSection)
{
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
@@ -20,73 +20,27 @@ u8 PHY_GetTxPowerByRateBase(struct adapter *Adapter, u8 Band, u8 RfPath,
if (RfPath > ODM_RF_PATH_D)
return 0;
- if (Band == BAND_ON_2_4G) {
- switch (RateSection) {
- case CCK:
- value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][0];
- break;
- case OFDM:
- value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][1];
- break;
- case HT_MCS0_MCS7:
- value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][2];
- break;
- case HT_MCS8_MCS15:
- value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][3];
- break;
- case HT_MCS16_MCS23:
- value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][4];
- break;
- case HT_MCS24_MCS31:
- value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][5];
- break;
- case VHT_1SSMCS0_1SSMCS9:
- value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][6];
- break;
- case VHT_2SSMCS0_2SSMCS9:
- value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][7];
- break;
- case VHT_3SSMCS0_3SSMCS9:
- value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][8];
- break;
- case VHT_4SSMCS0_4SSMCS9:
- value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][9];
- break;
- default:
- break;
- }
- } else if (Band == BAND_ON_5G) {
- switch (RateSection) {
- case OFDM:
- value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][0];
- break;
- case HT_MCS0_MCS7:
- value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][1];
- break;
- case HT_MCS8_MCS15:
- value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][2];
- break;
- case HT_MCS16_MCS23:
- value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][3];
- break;
- case HT_MCS24_MCS31:
- value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][4];
- break;
- case VHT_1SSMCS0_1SSMCS9:
- value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][5];
- break;
- case VHT_2SSMCS0_2SSMCS9:
- value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][6];
- break;
- case VHT_3SSMCS0_3SSMCS9:
- value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][7];
- break;
- case VHT_4SSMCS0_4SSMCS9:
- value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][8];
- break;
- default:
- break;
- }
+ switch (RateSection) {
+ case CCK:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][0];
+ break;
+ case OFDM:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][1];
+ break;
+ case HT_MCS0_MCS7:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][2];
+ break;
+ case HT_MCS8_MCS15:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][3];
+ break;
+ case HT_MCS16_MCS23:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][4];
+ break;
+ case HT_MCS24_MCS31:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][5];
+ break;
+ default:
+ break;
}
return value;
@@ -95,7 +49,6 @@ u8 PHY_GetTxPowerByRateBase(struct adapter *Adapter, u8 Band, u8 RfPath,
static void
phy_SetTxPowerByRateBase(
struct adapter *Adapter,
- u8 Band,
u8 RfPath,
enum rate_section RateSection,
u8 TxNum,
@@ -107,73 +60,27 @@ phy_SetTxPowerByRateBase(
if (RfPath > ODM_RF_PATH_D)
return;
- if (Band == BAND_ON_2_4G) {
- switch (RateSection) {
- case CCK:
- pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][0] = Value;
- break;
- case OFDM:
- pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][1] = Value;
- break;
- case HT_MCS0_MCS7:
- pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][2] = Value;
- break;
- case HT_MCS8_MCS15:
- pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][3] = Value;
- break;
- case HT_MCS16_MCS23:
- pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][4] = Value;
- break;
- case HT_MCS24_MCS31:
- pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][5] = Value;
- break;
- case VHT_1SSMCS0_1SSMCS9:
- pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][6] = Value;
- break;
- case VHT_2SSMCS0_2SSMCS9:
- pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][7] = Value;
- break;
- case VHT_3SSMCS0_3SSMCS9:
- pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][8] = Value;
- break;
- case VHT_4SSMCS0_4SSMCS9:
- pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][9] = Value;
- break;
- default:
- break;
- }
- } else if (Band == BAND_ON_5G) {
- switch (RateSection) {
- case OFDM:
- pHalData->TxPwrByRateBase5G[RfPath][TxNum][0] = Value;
- break;
- case HT_MCS0_MCS7:
- pHalData->TxPwrByRateBase5G[RfPath][TxNum][1] = Value;
- break;
- case HT_MCS8_MCS15:
- pHalData->TxPwrByRateBase5G[RfPath][TxNum][2] = Value;
- break;
- case HT_MCS16_MCS23:
- pHalData->TxPwrByRateBase5G[RfPath][TxNum][3] = Value;
- break;
- case HT_MCS24_MCS31:
- pHalData->TxPwrByRateBase5G[RfPath][TxNum][4] = Value;
- break;
- case VHT_1SSMCS0_1SSMCS9:
- pHalData->TxPwrByRateBase5G[RfPath][TxNum][5] = Value;
- break;
- case VHT_2SSMCS0_2SSMCS9:
- pHalData->TxPwrByRateBase5G[RfPath][TxNum][6] = Value;
- break;
- case VHT_3SSMCS0_3SSMCS9:
- pHalData->TxPwrByRateBase5G[RfPath][TxNum][7] = Value;
- break;
- case VHT_4SSMCS0_4SSMCS9:
- pHalData->TxPwrByRateBase5G[RfPath][TxNum][8] = Value;
- break;
- default:
- break;
- }
+ switch (RateSection) {
+ case CCK:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][0] = Value;
+ break;
+ case OFDM:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][1] = Value;
+ break;
+ case HT_MCS0_MCS7:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][2] = Value;
+ break;
+ case HT_MCS8_MCS15:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][3] = Value;
+ break;
+ case HT_MCS16_MCS23:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][4] = Value;
+ break;
+ case HT_MCS24_MCS31:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][5] = Value;
+ break;
+ default:
+ break;
}
}
@@ -185,50 +92,21 @@ struct adapter *padapter
u8 path, base;
for (path = ODM_RF_PATH_A; path <= ODM_RF_PATH_B; ++path) {
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_1TX, MGN_11M);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, CCK, RF_1TX, base);
-
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_1TX, MGN_54M);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, OFDM, RF_1TX, base);
-
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_1TX, MGN_MCS7);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, HT_MCS0_MCS7, RF_1TX, base);
-
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_2TX, MGN_MCS15);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, HT_MCS8_MCS15, RF_2TX, base);
-
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_3TX, MGN_MCS23);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, HT_MCS16_MCS23, RF_3TX, base);
-
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_1TX, MGN_VHT1SS_MCS7);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base);
-
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_2TX, MGN_VHT2SS_MCS7);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base);
-
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_3TX, MGN_VHT3SS_MCS7);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, VHT_3SSMCS0_3SSMCS9, RF_3TX, base);
+ base = PHY_GetTxPowerByRate(padapter, path, RF_1TX, MGN_11M);
+ phy_SetTxPowerByRateBase(padapter, path, CCK, RF_1TX, base);
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_1TX, MGN_54M);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, OFDM, RF_1TX, base);
+ base = PHY_GetTxPowerByRate(padapter, path, RF_1TX, MGN_54M);
+ phy_SetTxPowerByRateBase(padapter, path, OFDM, RF_1TX, base);
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_1TX, MGN_MCS7);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, HT_MCS0_MCS7, RF_1TX, base);
+ base = PHY_GetTxPowerByRate(padapter, path, RF_1TX, MGN_MCS7);
+ phy_SetTxPowerByRateBase(padapter, path, HT_MCS0_MCS7, RF_1TX, base);
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_2TX, MGN_MCS15);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, HT_MCS8_MCS15, RF_2TX, base);
+ base = PHY_GetTxPowerByRate(padapter, path, RF_2TX, MGN_MCS15);
+ phy_SetTxPowerByRateBase(padapter, path, HT_MCS8_MCS15, RF_2TX, base);
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_3TX, MGN_MCS23);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, HT_MCS16_MCS23, RF_3TX, base);
+ base = PHY_GetTxPowerByRate(padapter, path, RF_3TX, MGN_MCS23);
+ phy_SetTxPowerByRateBase(padapter, path, HT_MCS16_MCS23, RF_3TX, base);
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_1TX, MGN_VHT1SS_MCS7);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base);
-
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_2TX, MGN_VHT2SS_MCS7);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base);
-
- base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_3TX, MGN_VHT2SS_MCS7);
- phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, VHT_3SSMCS0_3SSMCS9, RF_3TX, base);
}
}
@@ -532,81 +410,6 @@ PHY_GetRateValuesOfTxPowerByRate(
*RateNum = 4;
break;
- case 0xC3C:
- case 0xE3C:
- case 0x183C:
- case 0x1a3C:
- RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS0);
- RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS1);
- RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS2);
- RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS3);
- for (i = 0; i < 4; ++i) {
- PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((Value >> (i * 8)) & 0xF));
- }
- *RateNum = 4;
- break;
-
- case 0xC40:
- case 0xE40:
- case 0x1840:
- case 0x1a40:
- RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS4);
- RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS5);
- RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS6);
- RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS7);
- for (i = 0; i < 4; ++i) {
- PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((Value >> (i * 8)) & 0xF));
- }
- *RateNum = 4;
- break;
-
- case 0xC44:
- case 0xE44:
- case 0x1844:
- case 0x1a44:
- RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS8);
- RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS9);
- RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS0);
- RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS1);
- for (i = 0; i < 4; ++i) {
- PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((Value >> (i * 8)) & 0xF));
- }
- *RateNum = 4;
- break;
-
- case 0xC48:
- case 0xE48:
- case 0x1848:
- case 0x1a48:
- RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS2);
- RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS3);
- RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS4);
- RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS5);
- for (i = 0; i < 4; ++i) {
- PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((Value >> (i * 8)) & 0xF));
- }
- *RateNum = 4;
- break;
-
- case 0xC4C:
- case 0xE4C:
- case 0x184C:
- case 0x1a4C:
- RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS6);
- RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS7);
- RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS8);
- RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS9);
- for (i = 0; i < 4; ++i) {
- PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((Value >> (i * 8)) & 0xF));
- }
- *RateNum = 4;
- break;
-
case 0xCD8:
case 0xED8:
case 0x18D8:
@@ -637,49 +440,6 @@ PHY_GetRateValuesOfTxPowerByRate(
*RateNum = 4;
break;
- case 0xCE0:
- case 0xEE0:
- case 0x18E0:
- case 0x1aE0:
- RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS0);
- RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS1);
- RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS2);
- RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS3);
- for (i = 0; i < 4; ++i) {
- PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((Value >> (i * 8)) & 0xF));
- }
- *RateNum = 4;
- break;
-
- case 0xCE4:
- case 0xEE4:
- case 0x18E4:
- case 0x1aE4:
- RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS4);
- RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS5);
- RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS6);
- RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS7);
- for (i = 0; i < 4; ++i) {
- PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((Value >> (i * 8)) & 0xF));
- }
- *RateNum = 4;
- break;
-
- case 0xCE8:
- case 0xEE8:
- case 0x18E8:
- case 0x1aE8:
- RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS8);
- RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS9);
- for (i = 0; i < 2; ++i) {
- PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
- ((Value >> (i * 8)) & 0xF));
- }
- *RateNum = 4;
- break;
-
default:
break;
}
@@ -687,7 +447,6 @@ PHY_GetRateValuesOfTxPowerByRate(
static void PHY_StoreTxPowerByRateNew(
struct adapter *padapter,
- u32 Band,
u32 RfPath,
u32 TxNum,
u32 RegAddr,
@@ -701,9 +460,6 @@ static void PHY_StoreTxPowerByRateNew(
PHY_GetRateValuesOfTxPowerByRate(padapter, RegAddr, BitMask, Data, rateIndex, PwrByRateVal, &rateNum);
- if (Band != BAND_ON_2_4G && Band != BAND_ON_5G)
- return;
-
if (RfPath > ODM_RF_PATH_D)
return;
@@ -711,11 +467,7 @@ static void PHY_StoreTxPowerByRateNew(
return;
for (i = 0; i < rateNum; ++i) {
- if (rateIndex[i] == PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS0) ||
- rateIndex[i] == PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS1))
- TxNum = RF_2TX;
-
- pHalData->TxPwrByRateOffset[Band][RfPath][TxNum][rateIndex[i]] = PwrByRateVal[i];
+ pHalData->TxPwrByRateOffset[RfPath][TxNum][rateIndex[i]] = PwrByRateVal[i];
}
}
@@ -732,18 +484,16 @@ static void PHY_StoreTxPowerByRateOld(
void PHY_InitTxPowerByRate(struct adapter *padapter)
{
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
- u8 band, rfPath, TxNum, rate;
+ u8 rfPath, TxNum, rate;
- for (band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band)
- for (rfPath = 0; rfPath < TX_PWR_BY_RATE_NUM_RF; ++rfPath)
- for (TxNum = 0; TxNum < TX_PWR_BY_RATE_NUM_RF; ++TxNum)
- for (rate = 0; rate < TX_PWR_BY_RATE_NUM_RATE; ++rate)
- pHalData->TxPwrByRateOffset[band][rfPath][TxNum][rate] = 0;
+ for (rfPath = 0; rfPath < TX_PWR_BY_RATE_NUM_RF; ++rfPath)
+ for (TxNum = 0; TxNum < TX_PWR_BY_RATE_NUM_RF; ++TxNum)
+ for (rate = 0; rate < TX_PWR_BY_RATE_NUM_RATE; ++rate)
+ pHalData->TxPwrByRateOffset[rfPath][TxNum][rate] = 0;
}
void PHY_StoreTxPowerByRate(
struct adapter *padapter,
- u32 Band,
u32 RfPath,
u32 TxNum,
u32 RegAddr,
@@ -755,7 +505,7 @@ void PHY_StoreTxPowerByRate(
struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
if (pDM_Odm->PhyRegPgVersion > 0)
- PHY_StoreTxPowerByRateNew(padapter, Band, RfPath, TxNum, RegAddr, BitMask, Data);
+ PHY_StoreTxPowerByRateNew(padapter, RfPath, TxNum, RegAddr, BitMask, Data);
else if (pDM_Odm->PhyRegPgVersion == 0) {
PHY_StoreTxPowerByRateOld(padapter, RegAddr, BitMask, Data);
@@ -771,7 +521,7 @@ phy_ConvertTxPowerByRateInDbmToRelativeValues(
struct adapter *padapter
)
{
- u8 base = 0, i = 0, value = 0, band = 0, path = 0, txNum = 0;
+ u8 base = 0, i = 0, value = 0, path = 0, txNum = 0;
u8 cckRates[4] = {
MGN_1M, MGN_2M, MGN_5_5M, MGN_11M
};
@@ -787,77 +537,42 @@ struct adapter *padapter
u8 mcs16_23Rates[8] = {
MGN_MCS16, MGN_MCS17, MGN_MCS18, MGN_MCS19, MGN_MCS20, MGN_MCS21, MGN_MCS22, MGN_MCS23
};
- u8 vht1ssRates[10] = {
- MGN_VHT1SS_MCS0, MGN_VHT1SS_MCS1, MGN_VHT1SS_MCS2, MGN_VHT1SS_MCS3, MGN_VHT1SS_MCS4,
- MGN_VHT1SS_MCS5, MGN_VHT1SS_MCS6, MGN_VHT1SS_MCS7, MGN_VHT1SS_MCS8, MGN_VHT1SS_MCS9
- };
- u8 vht2ssRates[10] = {
- MGN_VHT2SS_MCS0, MGN_VHT2SS_MCS1, MGN_VHT2SS_MCS2, MGN_VHT2SS_MCS3, MGN_VHT2SS_MCS4,
- MGN_VHT2SS_MCS5, MGN_VHT2SS_MCS6, MGN_VHT2SS_MCS7, MGN_VHT2SS_MCS8, MGN_VHT2SS_MCS9
- };
- u8 vht3ssRates[10] = {
- MGN_VHT3SS_MCS0, MGN_VHT3SS_MCS1, MGN_VHT3SS_MCS2, MGN_VHT3SS_MCS3, MGN_VHT3SS_MCS4,
- MGN_VHT3SS_MCS5, MGN_VHT3SS_MCS6, MGN_VHT3SS_MCS7, MGN_VHT3SS_MCS8, MGN_VHT3SS_MCS9
- };
-
- for (band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band) {
- for (path = ODM_RF_PATH_A; path <= ODM_RF_PATH_D; ++path) {
- for (txNum = RF_1TX; txNum < RF_MAX_TX_NUM; ++txNum) {
- /* CCK */
- base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_11M);
- for (i = 0; i < ARRAY_SIZE(cckRates); ++i) {
- value = PHY_GetTxPowerByRate(padapter, band, path, txNum, cckRates[i]);
- PHY_SetTxPowerByRate(padapter, band, path, txNum, cckRates[i], value - base);
- }
- /* OFDM */
- base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_54M);
- for (i = 0; i < sizeof(ofdmRates); ++i) {
- value = PHY_GetTxPowerByRate(padapter, band, path, txNum, ofdmRates[i]);
- PHY_SetTxPowerByRate(padapter, band, path, txNum, ofdmRates[i], value - base);
- }
-
- /* HT MCS0~7 */
- base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_MCS7);
- for (i = 0; i < sizeof(mcs0_7Rates); ++i) {
- value = PHY_GetTxPowerByRate(padapter, band, path, txNum, mcs0_7Rates[i]);
- PHY_SetTxPowerByRate(padapter, band, path, txNum, mcs0_7Rates[i], value - base);
- }
-
- /* HT MCS8~15 */
- base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_MCS15);
- for (i = 0; i < sizeof(mcs8_15Rates); ++i) {
- value = PHY_GetTxPowerByRate(padapter, band, path, txNum, mcs8_15Rates[i]);
- PHY_SetTxPowerByRate(padapter, band, path, txNum, mcs8_15Rates[i], value - base);
- }
+ for (path = ODM_RF_PATH_A; path <= ODM_RF_PATH_D; ++path) {
+ for (txNum = RF_1TX; txNum < RF_MAX_TX_NUM; ++txNum) {
+ /* CCK */
+ base = PHY_GetTxPowerByRate(padapter, path, txNum, MGN_11M);
+ for (i = 0; i < ARRAY_SIZE(cckRates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, path, txNum, cckRates[i]);
+ PHY_SetTxPowerByRate(padapter, path, txNum, cckRates[i], value - base);
+ }
- /* HT MCS16~23 */
- base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_MCS23);
- for (i = 0; i < sizeof(mcs16_23Rates); ++i) {
- value = PHY_GetTxPowerByRate(padapter, band, path, txNum, mcs16_23Rates[i]);
- PHY_SetTxPowerByRate(padapter, band, path, txNum, mcs16_23Rates[i], value - base);
- }
+ /* OFDM */
+ base = PHY_GetTxPowerByRate(padapter, path, txNum, MGN_54M);
+ for (i = 0; i < sizeof(ofdmRates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, path, txNum, ofdmRates[i]);
+ PHY_SetTxPowerByRate(padapter, path, txNum, ofdmRates[i], value - base);
+ }
- /* VHT 1SS */
- base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_VHT1SS_MCS7);
- for (i = 0; i < sizeof(vht1ssRates); ++i) {
- value = PHY_GetTxPowerByRate(padapter, band, path, txNum, vht1ssRates[i]);
- PHY_SetTxPowerByRate(padapter, band, path, txNum, vht1ssRates[i], value - base);
- }
+ /* HT MCS0~7 */
+ base = PHY_GetTxPowerByRate(padapter, path, txNum, MGN_MCS7);
+ for (i = 0; i < sizeof(mcs0_7Rates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, path, txNum, mcs0_7Rates[i]);
+ PHY_SetTxPowerByRate(padapter, path, txNum, mcs0_7Rates[i], value - base);
+ }
- /* VHT 2SS */
- base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_VHT2SS_MCS7);
- for (i = 0; i < sizeof(vht2ssRates); ++i) {
- value = PHY_GetTxPowerByRate(padapter, band, path, txNum, vht2ssRates[i]);
- PHY_SetTxPowerByRate(padapter, band, path, txNum, vht2ssRates[i], value - base);
- }
+ /* HT MCS8~15 */
+ base = PHY_GetTxPowerByRate(padapter, path, txNum, MGN_MCS15);
+ for (i = 0; i < sizeof(mcs8_15Rates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, path, txNum, mcs8_15Rates[i]);
+ PHY_SetTxPowerByRate(padapter, path, txNum, mcs8_15Rates[i], value - base);
+ }
- /* VHT 3SS */
- base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_VHT3SS_MCS7);
- for (i = 0; i < sizeof(vht3ssRates); ++i) {
- value = PHY_GetTxPowerByRate(padapter, band, path, txNum, vht3ssRates[i]);
- PHY_SetTxPowerByRate(padapter, band, path, txNum, vht3ssRates[i], value - base);
- }
+ /* HT MCS16~23 */
+ base = PHY_GetTxPowerByRate(padapter, path, txNum, MGN_MCS23);
+ for (i = 0; i < sizeof(mcs16_23Rates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, path, txNum, mcs16_23Rates[i]);
+ PHY_SetTxPowerByRate(padapter, path, txNum, mcs16_23Rates[i], value - base);
}
}
}
@@ -881,11 +596,10 @@ void PHY_SetTxPowerIndexByRateSection(
if (RateSection == CCK) {
u8 cckRates[] = {MGN_1M, MGN_2M, MGN_5_5M, MGN_11M};
- if (pHalData->CurrentBandType == BAND_ON_2_4G)
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
- pHalData->CurrentChannelBW,
- Channel, cckRates,
- ARRAY_SIZE(cckRates));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, cckRates,
+ ARRAY_SIZE(cckRates));
} else if (RateSection == OFDM) {
u8 ofdmRates[] = {MGN_6M, MGN_9M, MGN_12M, MGN_18M, MGN_24M, MGN_36M, MGN_48M, MGN_54M};
@@ -922,67 +636,7 @@ void PHY_SetTxPowerIndexByRateSection(
Channel, htRates4T,
ARRAY_SIZE(htRates4T));
- } else if (RateSection == VHT_1SSMCS0_1SSMCS9) {
- u8 vhtRates1T[] = {MGN_VHT1SS_MCS0, MGN_VHT1SS_MCS1, MGN_VHT1SS_MCS2, MGN_VHT1SS_MCS3, MGN_VHT1SS_MCS4,
- MGN_VHT1SS_MCS5, MGN_VHT1SS_MCS6, MGN_VHT1SS_MCS7, MGN_VHT1SS_MCS8, MGN_VHT1SS_MCS9};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
- pHalData->CurrentChannelBW,
- Channel, vhtRates1T,
- ARRAY_SIZE(vhtRates1T));
-
- } else if (RateSection == VHT_2SSMCS0_2SSMCS9) {
- u8 vhtRates2T[] = {MGN_VHT2SS_MCS0, MGN_VHT2SS_MCS1, MGN_VHT2SS_MCS2, MGN_VHT2SS_MCS3, MGN_VHT2SS_MCS4,
- MGN_VHT2SS_MCS5, MGN_VHT2SS_MCS6, MGN_VHT2SS_MCS7, MGN_VHT2SS_MCS8, MGN_VHT2SS_MCS9};
-
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
- pHalData->CurrentChannelBW,
- Channel, vhtRates2T,
- ARRAY_SIZE(vhtRates2T));
- } else if (RateSection == VHT_3SSMCS0_3SSMCS9) {
- u8 vhtRates3T[] = {MGN_VHT3SS_MCS0, MGN_VHT3SS_MCS1, MGN_VHT3SS_MCS2, MGN_VHT3SS_MCS3, MGN_VHT3SS_MCS4,
- MGN_VHT3SS_MCS5, MGN_VHT3SS_MCS6, MGN_VHT3SS_MCS7, MGN_VHT3SS_MCS8, MGN_VHT3SS_MCS9};
-
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
- pHalData->CurrentChannelBW,
- Channel, vhtRates3T,
- ARRAY_SIZE(vhtRates3T));
- } else if (RateSection == VHT_4SSMCS0_4SSMCS9) {
- u8 vhtRates4T[] = {MGN_VHT4SS_MCS0, MGN_VHT4SS_MCS1, MGN_VHT4SS_MCS2, MGN_VHT4SS_MCS3, MGN_VHT4SS_MCS4,
- MGN_VHT4SS_MCS5, MGN_VHT4SS_MCS6, MGN_VHT4SS_MCS7, MGN_VHT4SS_MCS8, MGN_VHT4SS_MCS9};
-
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
- pHalData->CurrentChannelBW,
- Channel, vhtRates4T,
- ARRAY_SIZE(vhtRates4T));
- }
-}
-
-static bool phy_GetChnlIndex(u8 Channel, u8 *ChannelIdx)
-{
- u8 channel5G[CHANNEL_MAX_NUMBER_5G] = {
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 100, 102,
- 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130,
- 132, 134, 136, 138, 140, 142, 144, 149, 151, 153, 155, 157, 159, 161,
- 163, 165, 167, 168, 169, 171, 173, 175, 177
- };
- u8 i = 0;
- bool bIn24G = true;
-
- if (Channel <= 14) {
- bIn24G = true;
- *ChannelIdx = Channel-1;
- } else {
- bIn24G = false;
-
- for (i = 0; i < ARRAY_SIZE(channel5G); ++i) {
- if (channel5G[i] == Channel) {
- *ChannelIdx = i;
- return bIn24G;
- }
- }
}
-
- return bIn24G;
}
u8 PHY_GetTxPowerIndexBase(
@@ -990,110 +644,45 @@ u8 PHY_GetTxPowerIndexBase(
u8 RFPath,
u8 Rate,
enum channel_width BandWidth,
- u8 Channel,
- bool *bIn24G
+ u8 Channel
)
{
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
- u8 i = 0; /* default set to 1S */
u8 txPower = 0;
u8 chnlIdx = (Channel-1);
if (HAL_IsLegalChannel(padapter, Channel) == false)
chnlIdx = 0;
- *bIn24G = phy_GetChnlIndex(Channel, &chnlIdx);
-
- if (*bIn24G) { /* 3 ============================== 2.4 G ============================== */
- if (IS_CCK_RATE(Rate))
- txPower = pHalData->Index24G_CCK_Base[RFPath][chnlIdx];
- else if (MGN_6M <= Rate)
- txPower = pHalData->Index24G_BW40_Base[RFPath][chnlIdx];
-
- /* OFDM-1T */
- if ((MGN_6M <= Rate && Rate <= MGN_54M) && !IS_CCK_RATE(Rate))
- txPower += pHalData->OFDM_24G_Diff[RFPath][TX_1S];
-
- if (BandWidth == CHANNEL_WIDTH_20) { /* BW20-1S, BW20-2S */
- if ((MGN_MCS0 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT1SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW20_24G_Diff[RFPath][TX_1S];
- if ((MGN_MCS8 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT2SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW20_24G_Diff[RFPath][TX_2S];
- if ((MGN_MCS16 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT3SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW20_24G_Diff[RFPath][TX_3S];
- if ((MGN_MCS24 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT4SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW20_24G_Diff[RFPath][TX_4S];
-
- } else if (BandWidth == CHANNEL_WIDTH_40) { /* BW40-1S, BW40-2S */
- if ((MGN_MCS0 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT1SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW40_24G_Diff[RFPath][TX_1S];
- if ((MGN_MCS8 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT2SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW40_24G_Diff[RFPath][TX_2S];
- if ((MGN_MCS16 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT3SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW40_24G_Diff[RFPath][TX_3S];
- if ((MGN_MCS24 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT4SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW40_24G_Diff[RFPath][TX_4S];
-
- }
- /* Willis suggest adopt BW 40M power index while in BW 80 mode */
- else if (BandWidth == CHANNEL_WIDTH_80) {
- if ((MGN_MCS0 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT1SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW40_24G_Diff[RFPath][TX_1S];
- if ((MGN_MCS8 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT2SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW40_24G_Diff[RFPath][TX_2S];
- if ((MGN_MCS16 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT3SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW40_24G_Diff[RFPath][TX_3S];
- if ((MGN_MCS24 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT4SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW40_24G_Diff[RFPath][TX_4S];
+ if (IS_CCK_RATE(Rate))
+ txPower = pHalData->Index24G_CCK_Base[RFPath][chnlIdx];
+ else if (MGN_6M <= Rate)
+ txPower = pHalData->Index24G_BW40_Base[RFPath][chnlIdx];
+
+ /* OFDM-1T */
+ if ((MGN_6M <= Rate && Rate <= MGN_54M) && !IS_CCK_RATE(Rate))
+ txPower += pHalData->OFDM_24G_Diff[RFPath][TX_1S];
+
+ if (BandWidth == CHANNEL_WIDTH_20) { /* BW20-1S, BW20-2S */
+ if (MGN_MCS0 <= Rate && Rate <= MGN_MCS31)
+ txPower += pHalData->BW20_24G_Diff[RFPath][TX_1S];
+ if (MGN_MCS8 <= Rate && Rate <= MGN_MCS31)
+ txPower += pHalData->BW20_24G_Diff[RFPath][TX_2S];
+ if (MGN_MCS16 <= Rate && Rate <= MGN_MCS31)
+ txPower += pHalData->BW20_24G_Diff[RFPath][TX_3S];
+ if (MGN_MCS24 <= Rate && Rate <= MGN_MCS31)
+ txPower += pHalData->BW20_24G_Diff[RFPath][TX_4S];
+
+ } else if (BandWidth == CHANNEL_WIDTH_40) { /* BW40-1S, BW40-2S */
+ if (MGN_MCS0 <= Rate && Rate <= MGN_MCS31)
+ txPower += pHalData->BW40_24G_Diff[RFPath][TX_1S];
+ if (MGN_MCS8 <= Rate && Rate <= MGN_MCS31)
+ txPower += pHalData->BW40_24G_Diff[RFPath][TX_2S];
+ if (MGN_MCS16 <= Rate && Rate <= MGN_MCS31)
+ txPower += pHalData->BW40_24G_Diff[RFPath][TX_3S];
+ if (MGN_MCS24 <= Rate && Rate <= MGN_MCS31)
+ txPower += pHalData->BW40_24G_Diff[RFPath][TX_4S];
- }
- } else {/* 3 ============================== 5 G ============================== */
- if (MGN_6M <= Rate)
- txPower = pHalData->Index5G_BW40_Base[RFPath][chnlIdx];
-
- /* OFDM-1T */
- if ((MGN_6M <= Rate && Rate <= MGN_54M) && !IS_CCK_RATE(Rate))
- txPower += pHalData->OFDM_5G_Diff[RFPath][TX_1S];
-
- /* BW20-1S, BW20-2S */
- if (BandWidth == CHANNEL_WIDTH_20) {
- if ((MGN_MCS0 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT1SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW20_5G_Diff[RFPath][TX_1S];
- if ((MGN_MCS8 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT2SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW20_5G_Diff[RFPath][TX_2S];
- if ((MGN_MCS16 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT3SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW20_5G_Diff[RFPath][TX_3S];
- if ((MGN_MCS24 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT4SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW20_5G_Diff[RFPath][TX_4S];
-
- } else if (BandWidth == CHANNEL_WIDTH_40) { /* BW40-1S, BW40-2S */
- if ((MGN_MCS0 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT1SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW40_5G_Diff[RFPath][TX_1S];
- if ((MGN_MCS8 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT2SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW40_5G_Diff[RFPath][TX_2S];
- if ((MGN_MCS16 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT3SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW40_5G_Diff[RFPath][TX_3S];
- if ((MGN_MCS24 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT4SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW40_5G_Diff[RFPath][TX_4S];
-
- } else if (BandWidth == CHANNEL_WIDTH_80) { /* BW80-1S, BW80-2S */
- /* <20121220, Kordan> Get the index of array "Index5G_BW80_Base". */
- u8 channel5G_80M[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
- for (i = 0; i < ARRAY_SIZE(channel5G_80M); ++i)
- if (channel5G_80M[i] == Channel)
- chnlIdx = i;
-
- txPower = pHalData->Index5G_BW80_Base[RFPath][chnlIdx];
-
- if ((MGN_MCS0 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT1SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += + pHalData->BW80_5G_Diff[RFPath][TX_1S];
- if ((MGN_MCS8 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT2SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW80_5G_Diff[RFPath][TX_2S];
- if ((MGN_MCS16 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT3SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW80_5G_Diff[RFPath][TX_3S];
- if ((MGN_MCS23 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT4SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
- txPower += pHalData->BW80_5G_Diff[RFPath][TX_4S];
- }
}
return txPower;
@@ -1252,126 +841,6 @@ u8 PHY_GetRateIndexOfTxPowerByRate(u8 Rate)
case MGN_MCS31:
index = 43;
break;
- case MGN_VHT1SS_MCS0:
- index = 44;
- break;
- case MGN_VHT1SS_MCS1:
- index = 45;
- break;
- case MGN_VHT1SS_MCS2:
- index = 46;
- break;
- case MGN_VHT1SS_MCS3:
- index = 47;
- break;
- case MGN_VHT1SS_MCS4:
- index = 48;
- break;
- case MGN_VHT1SS_MCS5:
- index = 49;
- break;
- case MGN_VHT1SS_MCS6:
- index = 50;
- break;
- case MGN_VHT1SS_MCS7:
- index = 51;
- break;
- case MGN_VHT1SS_MCS8:
- index = 52;
- break;
- case MGN_VHT1SS_MCS9:
- index = 53;
- break;
- case MGN_VHT2SS_MCS0:
- index = 54;
- break;
- case MGN_VHT2SS_MCS1:
- index = 55;
- break;
- case MGN_VHT2SS_MCS2:
- index = 56;
- break;
- case MGN_VHT2SS_MCS3:
- index = 57;
- break;
- case MGN_VHT2SS_MCS4:
- index = 58;
- break;
- case MGN_VHT2SS_MCS5:
- index = 59;
- break;
- case MGN_VHT2SS_MCS6:
- index = 60;
- break;
- case MGN_VHT2SS_MCS7:
- index = 61;
- break;
- case MGN_VHT2SS_MCS8:
- index = 62;
- break;
- case MGN_VHT2SS_MCS9:
- index = 63;
- break;
- case MGN_VHT3SS_MCS0:
- index = 64;
- break;
- case MGN_VHT3SS_MCS1:
- index = 65;
- break;
- case MGN_VHT3SS_MCS2:
- index = 66;
- break;
- case MGN_VHT3SS_MCS3:
- index = 67;
- break;
- case MGN_VHT3SS_MCS4:
- index = 68;
- break;
- case MGN_VHT3SS_MCS5:
- index = 69;
- break;
- case MGN_VHT3SS_MCS6:
- index = 70;
- break;
- case MGN_VHT3SS_MCS7:
- index = 71;
- break;
- case MGN_VHT3SS_MCS8:
- index = 72;
- break;
- case MGN_VHT3SS_MCS9:
- index = 73;
- break;
- case MGN_VHT4SS_MCS0:
- index = 74;
- break;
- case MGN_VHT4SS_MCS1:
- index = 75;
- break;
- case MGN_VHT4SS_MCS2:
- index = 76;
- break;
- case MGN_VHT4SS_MCS3:
- index = 77;
- break;
- case MGN_VHT4SS_MCS4:
- index = 78;
- break;
- case MGN_VHT4SS_MCS5:
- index = 79;
- break;
- case MGN_VHT4SS_MCS6:
- index = 80;
- break;
- case MGN_VHT4SS_MCS7:
- index = 81;
- break;
- case MGN_VHT4SS_MCS8:
- index = 82;
- break;
- case MGN_VHT4SS_MCS9:
- index = 83;
- break;
default:
break;
}
@@ -1379,7 +848,7 @@ u8 PHY_GetRateIndexOfTxPowerByRate(u8 Rate)
}
s8 PHY_GetTxPowerByRate(
- struct adapter *padapter, u8 Band, u8 RFPath, u8 TxNum, u8 Rate
+ struct adapter *padapter, u8 RFPath, u8 TxNum, u8 Rate
)
{
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
@@ -1390,9 +859,6 @@ s8 PHY_GetTxPowerByRate(
padapter->registrypriv.RegEnableTxPowerByRate == 0)
return 0;
- if (Band != BAND_ON_2_4G && Band != BAND_ON_5G)
- return value;
-
if (RFPath > ODM_RF_PATH_D)
return value;
@@ -1402,13 +868,12 @@ s8 PHY_GetTxPowerByRate(
if (rateIndex >= TX_PWR_BY_RATE_NUM_RATE)
return value;
- return pHalData->TxPwrByRateOffset[Band][RFPath][TxNum][rateIndex];
+ return pHalData->TxPwrByRateOffset[RFPath][TxNum][rateIndex];
}
void PHY_SetTxPowerByRate(
struct adapter *padapter,
- u8 Band,
u8 RFPath,
u8 TxNum,
u8 Rate,
@@ -1418,9 +883,6 @@ void PHY_SetTxPowerByRate(
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
u8 rateIndex = PHY_GetRateIndexOfTxPowerByRate(Rate);
- if (Band != BAND_ON_2_4G && Band != BAND_ON_5G)
- return;
-
if (RFPath > ODM_RF_PATH_D)
return;
@@ -1430,18 +892,16 @@ void PHY_SetTxPowerByRate(
if (rateIndex >= TX_PWR_BY_RATE_NUM_RATE)
return;
- pHalData->TxPwrByRateOffset[Band][RFPath][TxNum][rateIndex] = Value;
+ pHalData->TxPwrByRateOffset[RFPath][TxNum][rateIndex] = Value;
}
void PHY_SetTxPowerLevelByPath(struct adapter *Adapter, u8 channel, u8 path)
{
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- bool bIsIn24G = (pHalData->CurrentBandType == BAND_ON_2_4G);
/* if (pMgntInfo->RegNByteAccess == 0) */
{
- if (bIsIn24G)
- PHY_SetTxPowerIndexByRateSection(Adapter, path, channel, CCK);
+ PHY_SetTxPowerIndexByRateSection(Adapter, path, channel, CCK);
PHY_SetTxPowerIndexByRateSection(Adapter, path, channel, OFDM);
PHY_SetTxPowerIndexByRateSection(Adapter, path, channel, HT_MCS0_MCS7);
@@ -1483,26 +943,9 @@ static s8 phy_GetWorldWideLimit(s8 *LimitTable)
return min;
}
-static s8 phy_GetChannelIndexOfTxPowerLimit(u8 Band, u8 Channel)
+static s8 phy_GetChannelIndexOfTxPowerLimit(u8 Channel)
{
- s8 channelIndex = -1;
- u8 channel5G[CHANNEL_MAX_NUMBER_5G] = {
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 100, 102,
- 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130,
- 132, 134, 136, 138, 140, 142, 144, 149, 151, 153, 155, 157, 159, 161,
- 163, 165, 167, 168, 169, 171, 173, 175, 177
- };
- u8 i = 0;
- if (Band == BAND_ON_2_4G)
- channelIndex = Channel - 1;
- else if (Band == BAND_ON_5G) {
- for (i = 0; i < ARRAY_SIZE(channel5G); ++i) {
- if (channel5G[i] == Channel)
- channelIndex = i;
- }
- }
-
- return channelIndex;
+ return Channel - 1;
}
static s16 get_bandwidth_idx(const enum channel_width bandwidth)
@@ -1512,10 +955,6 @@ static s16 get_bandwidth_idx(const enum channel_width bandwidth)
return 0;
case CHANNEL_WIDTH_40:
return 1;
- case CHANNEL_WIDTH_80:
- return 2;
- case CHANNEL_WIDTH_160:
- return 3;
default:
return -1;
}
@@ -1541,42 +980,22 @@ static s16 get_rate_sctn_idx(const u8 rate)
case MGN_MCS24: case MGN_MCS25: case MGN_MCS26: case MGN_MCS27:
case MGN_MCS28: case MGN_MCS29: case MGN_MCS30: case MGN_MCS31:
return 5;
- case MGN_VHT1SS_MCS0: case MGN_VHT1SS_MCS1: case MGN_VHT1SS_MCS2:
- case MGN_VHT1SS_MCS3: case MGN_VHT1SS_MCS4: case MGN_VHT1SS_MCS5:
- case MGN_VHT1SS_MCS6: case MGN_VHT1SS_MCS7: case MGN_VHT1SS_MCS8:
- case MGN_VHT1SS_MCS9:
- return 6;
- case MGN_VHT2SS_MCS0: case MGN_VHT2SS_MCS1: case MGN_VHT2SS_MCS2:
- case MGN_VHT2SS_MCS3: case MGN_VHT2SS_MCS4: case MGN_VHT2SS_MCS5:
- case MGN_VHT2SS_MCS6: case MGN_VHT2SS_MCS7: case MGN_VHT2SS_MCS8:
- case MGN_VHT2SS_MCS9:
- return 7;
- case MGN_VHT3SS_MCS0: case MGN_VHT3SS_MCS1: case MGN_VHT3SS_MCS2:
- case MGN_VHT3SS_MCS3: case MGN_VHT3SS_MCS4: case MGN_VHT3SS_MCS5:
- case MGN_VHT3SS_MCS6: case MGN_VHT3SS_MCS7: case MGN_VHT3SS_MCS8:
- case MGN_VHT3SS_MCS9:
- return 8;
- case MGN_VHT4SS_MCS0: case MGN_VHT4SS_MCS1: case MGN_VHT4SS_MCS2:
- case MGN_VHT4SS_MCS3: case MGN_VHT4SS_MCS4: case MGN_VHT4SS_MCS5:
- case MGN_VHT4SS_MCS6: case MGN_VHT4SS_MCS7: case MGN_VHT4SS_MCS8:
- case MGN_VHT4SS_MCS9:
- return 9;
default:
return -1;
}
}
s8 phy_get_tx_pwr_lmt(struct adapter *adapter, u32 reg_pwr_tbl_sel,
- enum band_type band_type, enum channel_width bandwidth,
+ enum channel_width bandwidth,
u8 rf_path, u8 data_rate, u8 channel)
{
- s16 idx_band = -1;
s16 idx_regulation = -1;
s16 idx_bandwidth = -1;
s16 idx_rate_sctn = -1;
s16 idx_channel = -1;
s8 pwr_lmt = MAX_POWER_INDEX;
struct hal_com_data *hal_data = GET_HAL_DATA(adapter);
+ s8 limits[10] = {0}; u8 i = 0;
if (((adapter->registrypriv.RegEnableTxPowerLimit == 2) &&
(hal_data->EEPROMRegulatory != 1)) ||
@@ -1597,17 +1016,10 @@ s8 phy_get_tx_pwr_lmt(struct adapter *adapter, u32 reg_pwr_tbl_sel,
idx_regulation = TXPWR_LMT_WW;
break;
default:
- idx_regulation = (band_type == BAND_ON_2_4G) ?
- hal_data->Regulation2_4G :
- hal_data->Regulation5G;
+ idx_regulation = hal_data->Regulation2_4G;
break;
}
- if (band_type == BAND_ON_2_4G)
- idx_band = 0;
- else if (band_type == BAND_ON_5G)
- idx_band = 1;
-
idx_bandwidth = get_bandwidth_idx(bandwidth);
idx_rate_sctn = get_rate_sctn_idx(data_rate);
@@ -1617,107 +1029,30 @@ s8 phy_get_tx_pwr_lmt(struct adapter *adapter, u32 reg_pwr_tbl_sel,
/* HT on 80M will reference to HT on 40M */
if (idx_rate_sctn == 0 || idx_rate_sctn == 1)
idx_bandwidth = 0;
- else if ((idx_rate_sctn == 2 || idx_rate_sctn == 3) &&
- (band_type == BAND_ON_5G) && (idx_bandwidth == 2))
- idx_bandwidth = 1;
- if (band_type == BAND_ON_2_4G || band_type == BAND_ON_5G)
- channel = phy_GetChannelIndexOfTxPowerLimit(band_type, channel);
+ channel = phy_GetChannelIndexOfTxPowerLimit(channel);
- if (idx_band == -1 || idx_regulation == -1 || idx_bandwidth == -1 ||
+ if (idx_regulation == -1 || idx_bandwidth == -1 ||
idx_rate_sctn == -1 || idx_channel == -1)
return MAX_POWER_INDEX;
- if (band_type == BAND_ON_2_4G) {
- s8 limits[10] = {0}; u8 i = 0;
-
- for (i = 0; i < MAX_REGULATION_NUM; i++)
- limits[i] = hal_data->TxPwrLimit_2_4G[i]
- [idx_bandwidth]
- [idx_rate_sctn]
- [idx_channel]
- [rf_path];
-
- pwr_lmt = (idx_regulation == TXPWR_LMT_WW) ?
- phy_GetWorldWideLimit(limits) :
- hal_data->TxPwrLimit_2_4G[idx_regulation]
- [idx_bandwidth]
- [idx_rate_sctn]
- [idx_channel]
- [rf_path];
-
- } else if (band_type == BAND_ON_5G) {
- s8 limits[10] = {0}; u8 i = 0;
-
- for (i = 0; i < MAX_REGULATION_NUM; ++i)
- limits[i] = hal_data->TxPwrLimit_5G[i]
- [idx_bandwidth]
- [idx_rate_sctn]
- [idx_channel]
- [rf_path];
-
- pwr_lmt = (idx_regulation == TXPWR_LMT_WW) ?
- phy_GetWorldWideLimit(limits) :
- hal_data->TxPwrLimit_5G[idx_regulation]
- [idx_bandwidth]
- [idx_rate_sctn]
- [idx_channel]
- [rf_path];
- }
- return pwr_lmt;
-}
+ for (i = 0; i < MAX_REGULATION_NUM; i++)
+ limits[i] = hal_data->TxPwrLimit_2_4G[i]
+ [idx_bandwidth]
+ [idx_rate_sctn]
+ [idx_channel]
+ [rf_path];
-static void phy_CrossReferenceHTAndVHTTxPowerLimit(struct adapter *padapter)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
- u8 regulation, bw, channel, rateSection;
- s8 tempPwrLmt = 0;
+ pwr_lmt = (idx_regulation == TXPWR_LMT_WW) ?
+ phy_GetWorldWideLimit(limits) :
+ hal_data->TxPwrLimit_2_4G[idx_regulation]
+ [idx_bandwidth]
+ [idx_rate_sctn]
+ [idx_channel]
+ [rf_path];
- for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
- for (bw = 0; bw < MAX_5G_BANDWIDTH_NUM; ++bw) {
- for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G; ++channel) {
- for (rateSection = 0; rateSection < MAX_RATE_SECTION_NUM; ++rateSection) {
- tempPwrLmt = pHalData->TxPwrLimit_5G[regulation][bw][rateSection][channel][ODM_RF_PATH_A];
- if (tempPwrLmt == MAX_POWER_INDEX) {
- u8 baseSection = 2, refSection = 6;
- if (bw == 0 || bw == 1) { /* 5G 20M 40M VHT and HT can cross reference */
- /* 1, bw, rateSection, channel, ODM_RF_PATH_A); */
- if (rateSection >= 2 && rateSection <= 9) {
- if (rateSection == 2) {
- baseSection = 2;
- refSection = 6;
- } else if (rateSection == 3) {
- baseSection = 3;
- refSection = 7;
- } else if (rateSection == 4) {
- baseSection = 4;
- refSection = 8;
- } else if (rateSection == 5) {
- baseSection = 5;
- refSection = 9;
- } else if (rateSection == 6) {
- baseSection = 6;
- refSection = 2;
- } else if (rateSection == 7) {
- baseSection = 7;
- refSection = 3;
- } else if (rateSection == 8) {
- baseSection = 8;
- refSection = 4;
- } else if (rateSection == 9) {
- baseSection = 9;
- refSection = 5;
- }
- pHalData->TxPwrLimit_5G[regulation][bw][baseSection][channel][ODM_RF_PATH_A] =
- pHalData->TxPwrLimit_5G[regulation][bw][refSection][channel][ODM_RF_PATH_A];
- }
- }
- }
- }
- }
- }
- }
+ return pwr_lmt;
}
void PHY_ConvertTxPowerLimitToPowerIndex(struct adapter *Adapter)
@@ -1728,8 +1063,6 @@ void PHY_ConvertTxPowerLimitToPowerIndex(struct adapter *Adapter)
s8 tempValue = 0, tempPwrLmt = 0;
u8 rfPath = 0;
- phy_CrossReferenceHTAndVHTTxPowerLimit(Adapter);
-
for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
for (bw = 0; bw < MAX_2_4G_BANDWIDTH_NUM; ++bw) {
for (channel = 0; channel < CHANNEL_MAX_NUMBER_2G; ++channel) {
@@ -1739,17 +1072,17 @@ void PHY_ConvertTxPowerLimitToPowerIndex(struct adapter *Adapter)
for (rfPath = ODM_RF_PATH_A; rfPath < MAX_RF_PATH_NUM; ++rfPath) {
if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE) {
if (rateSection == 5) /* HT 4T */
- BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, BAND_ON_2_4G, rfPath, RF_4TX, HT_MCS24_MCS31);
+ BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, rfPath, RF_4TX, HT_MCS24_MCS31);
else if (rateSection == 4) /* HT 3T */
- BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, BAND_ON_2_4G, rfPath, RF_3TX, HT_MCS16_MCS23);
+ BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, rfPath, RF_3TX, HT_MCS16_MCS23);
else if (rateSection == 3) /* HT 2T */
- BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, BAND_ON_2_4G, rfPath, RF_2TX, HT_MCS8_MCS15);
+ BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, rfPath, RF_2TX, HT_MCS8_MCS15);
else if (rateSection == 2) /* HT 1T */
- BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, BAND_ON_2_4G, rfPath, RF_1TX, HT_MCS0_MCS7);
+ BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, rfPath, RF_1TX, HT_MCS0_MCS7);
else if (rateSection == 1) /* OFDM */
- BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, BAND_ON_2_4G, rfPath, RF_1TX, OFDM);
+ BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, rfPath, RF_1TX, OFDM);
else if (rateSection == 0) /* CCK */
- BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, BAND_ON_2_4G, rfPath, RF_1TX, CCK);
+ BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, rfPath, RF_1TX, CCK);
} else
BW40PwrBasedBm2_4G = Adapter->registrypriv.RegPowerBase * 2;
@@ -1776,20 +1109,11 @@ void PHY_InitTxPowerLimit(struct adapter *Adapter)
for (l = 0; l < MAX_RF_PATH_NUM; ++l)
pHalData->TxPwrLimit_2_4G[i][j][k][m][l] = MAX_POWER_INDEX;
}
-
- for (i = 0; i < MAX_REGULATION_NUM; ++i) {
- for (j = 0; j < MAX_5G_BANDWIDTH_NUM; ++j)
- for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
- for (m = 0; m < CHANNEL_MAX_NUMBER_5G; ++m)
- for (l = 0; l < MAX_RF_PATH_NUM; ++l)
- pHalData->TxPwrLimit_5G[i][j][k][m][l] = MAX_POWER_INDEX;
- }
}
void PHY_SetTxPowerLimit(
struct adapter *Adapter,
u8 *Regulation,
- u8 *Band,
u8 *Bandwidth,
u8 *RateSection,
u8 *RfPath,
@@ -1827,14 +1151,6 @@ void PHY_SetTxPowerLimit(
rateSection = 4;
else if (eqNByte(RateSection, (u8 *)("HT"), 2) && eqNByte(RfPath, (u8 *)("4T"), 2))
rateSection = 5;
- else if (eqNByte(RateSection, (u8 *)("VHT"), 3) && eqNByte(RfPath, (u8 *)("1T"), 2))
- rateSection = 6;
- else if (eqNByte(RateSection, (u8 *)("VHT"), 3) && eqNByte(RfPath, (u8 *)("2T"), 2))
- rateSection = 7;
- else if (eqNByte(RateSection, (u8 *)("VHT"), 3) && eqNByte(RfPath, (u8 *)("3T"), 2))
- rateSection = 8;
- else if (eqNByte(RateSection, (u8 *)("VHT"), 3) && eqNByte(RfPath, (u8 *)("4T"), 2))
- rateSection = 9;
else
return;
@@ -1847,38 +1163,21 @@ void PHY_SetTxPowerLimit(
else if (eqNByte(Bandwidth, (u8 *)("160M"), 4))
bandwidth = 3;
- if (eqNByte(Band, (u8 *)("2.4G"), 4)) {
- channelIndex = phy_GetChannelIndexOfTxPowerLimit(BAND_ON_2_4G, channel);
+ channelIndex = phy_GetChannelIndexOfTxPowerLimit(channel);
- if (channelIndex == -1)
- return;
-
- prevPowerLimit = pHalData->TxPwrLimit_2_4G[regulation][bandwidth][rateSection][channelIndex][ODM_RF_PATH_A];
-
- if (powerLimit < prevPowerLimit)
- pHalData->TxPwrLimit_2_4G[regulation][bandwidth][rateSection][channelIndex][ODM_RF_PATH_A] = powerLimit;
-
- } else if (eqNByte(Band, (u8 *)("5G"), 2)) {
- channelIndex = phy_GetChannelIndexOfTxPowerLimit(BAND_ON_5G, channel);
-
- if (channelIndex == -1)
- return;
-
- prevPowerLimit = pHalData->TxPwrLimit_5G[regulation][bandwidth][rateSection][channelIndex][ODM_RF_PATH_A];
+ if (channelIndex == -1)
+ return;
- if (powerLimit < prevPowerLimit)
- pHalData->TxPwrLimit_5G[regulation][bandwidth][rateSection][channelIndex][ODM_RF_PATH_A] = powerLimit;
+ prevPowerLimit = pHalData->TxPwrLimit_2_4G[regulation][bandwidth][rateSection][channelIndex][ODM_RF_PATH_A];
- } else {
- return;
- }
+ if (powerLimit < prevPowerLimit)
+ pHalData->TxPwrLimit_2_4G[regulation][bandwidth][rateSection][channelIndex][ODM_RF_PATH_A] = powerLimit;
}
void Hal_ChannelPlanToRegulation(struct adapter *Adapter, u16 ChannelPlan)
{
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
pHalData->Regulation2_4G = TXPWR_LMT_WW;
- pHalData->Regulation5G = TXPWR_LMT_WW;
switch (ChannelPlan) {
case RT_CHANNEL_DOMAIN_WORLD_NULL:
@@ -1898,139 +1197,105 @@ void Hal_ChannelPlanToRegulation(struct adapter *Adapter, u16 ChannelPlan)
break;
case RT_CHANNEL_DOMAIN_FCC1_FCC1:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_WORLD_ETSI1:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_ETSI;
break;
case RT_CHANNEL_DOMAIN_MKK1_MKK1:
pHalData->Regulation2_4G = TXPWR_LMT_MKK;
- pHalData->Regulation5G = TXPWR_LMT_MKK;
break;
case RT_CHANNEL_DOMAIN_WORLD_KCC1:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_MKK;
break;
case RT_CHANNEL_DOMAIN_WORLD_FCC2:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_WORLD_FCC3:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_WORLD_FCC4:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_WORLD_FCC5:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_WORLD_FCC6:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_FCC1_FCC7:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_WORLD_ETSI2:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_WORLD_ETSI3:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_MKK1_MKK2:
pHalData->Regulation2_4G = TXPWR_LMT_MKK;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_MKK1_MKK3:
pHalData->Regulation2_4G = TXPWR_LMT_MKK;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_FCC1_NCC1:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_FCC1_NCC2:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_GLOBAL_NULL:
pHalData->Regulation2_4G = TXPWR_LMT_WW;
- pHalData->Regulation5G = TXPWR_LMT_WW;
break;
case RT_CHANNEL_DOMAIN_ETSI1_ETSI4:
pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
- pHalData->Regulation5G = TXPWR_LMT_ETSI;
break;
case RT_CHANNEL_DOMAIN_FCC1_FCC2:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_FCC1_NCC3:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_WORLD_ETSI5:
pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
- pHalData->Regulation5G = TXPWR_LMT_ETSI;
break;
case RT_CHANNEL_DOMAIN_FCC1_FCC8:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_WORLD_ETSI6:
pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
- pHalData->Regulation5G = TXPWR_LMT_ETSI;
break;
case RT_CHANNEL_DOMAIN_WORLD_ETSI7:
pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
- pHalData->Regulation5G = TXPWR_LMT_ETSI;
break;
case RT_CHANNEL_DOMAIN_WORLD_ETSI8:
pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
- pHalData->Regulation5G = TXPWR_LMT_ETSI;
break;
case RT_CHANNEL_DOMAIN_WORLD_ETSI9:
pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
- pHalData->Regulation5G = TXPWR_LMT_ETSI;
break;
case RT_CHANNEL_DOMAIN_WORLD_ETSI10:
pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
- pHalData->Regulation5G = TXPWR_LMT_ETSI;
break;
case RT_CHANNEL_DOMAIN_WORLD_ETSI11:
pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
- pHalData->Regulation5G = TXPWR_LMT_ETSI;
break;
case RT_CHANNEL_DOMAIN_FCC1_NCC4:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_WORLD_ETSI12:
pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
- pHalData->Regulation5G = TXPWR_LMT_ETSI;
break;
case RT_CHANNEL_DOMAIN_FCC1_FCC9:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_WORLD_ETSI13:
pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
- pHalData->Regulation5G = TXPWR_LMT_ETSI;
break;
case RT_CHANNEL_DOMAIN_FCC1_FCC10:
pHalData->Regulation2_4G = TXPWR_LMT_FCC;
- pHalData->Regulation5G = TXPWR_LMT_FCC;
break;
case RT_CHANNEL_DOMAIN_REALTEK_DEFINE: /* Realtek Reserve */
pHalData->Regulation2_4G = TXPWR_LMT_WW;
- pHalData->Regulation5G = TXPWR_LMT_WW;
break;
default:
break;
diff --git a/drivers/staging/rtl8723bs/hal/hal_intf.c b/drivers/staging/rtl8723bs/hal/hal_intf.c
index 8dc4dd8c6d4c..4868a69cdb8f 100644
--- a/drivers/staging/rtl8723bs/hal/hal_intf.c
+++ b/drivers/staging/rtl8723bs/hal/hal_intf.c
@@ -4,9 +4,6 @@
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-
-#define _HAL_INTF_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
#include <hal_data.h>
diff --git a/drivers/staging/rtl8723bs/hal/odm.c b/drivers/staging/rtl8723bs/hal/odm.c
index 092b32c41ff3..68dfb77772b2 100644
--- a/drivers/staging/rtl8723bs/hal/odm.c
+++ b/drivers/staging/rtl8723bs/hal/odm.c
@@ -290,8 +290,6 @@ static void odm_CommonInfoSelfInit(struct dm_odm_t *pDM_Odm)
pDM_Odm->bCckHighPower = (bool) PHY_QueryBBReg(pDM_Odm->Adapter, ODM_REG(CCK_RPT_FORMAT, pDM_Odm), ODM_BIT(CCK_RPT_FORMAT, pDM_Odm));
pDM_Odm->RFPathRxEnable = (u8) PHY_QueryBBReg(pDM_Odm->Adapter, ODM_REG(BB_RX_PATH, pDM_Odm), ODM_BIT(BB_RX_PATH, pDM_Odm));
- ODM_InitDebugSetting(pDM_Odm);
-
pDM_Odm->TxRate = 0xFF;
}
@@ -323,32 +321,10 @@ static void odm_CommonInfoSelfUpdate(struct dm_odm_t *pDM_Odm)
static void odm_CmnInfoInit_Debug(struct dm_odm_t *pDM_Odm)
{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoInit_Debug ==>\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportPlatform =%d\n", pDM_Odm->SupportPlatform));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportAbility = 0x%x\n", pDM_Odm->SupportAbility));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportInterface =%d\n", pDM_Odm->SupportInterface));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportICType = 0x%x\n", pDM_Odm->SupportICType));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("CutVersion =%d\n", pDM_Odm->CutVersion));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("FabVersion =%d\n", pDM_Odm->FabVersion));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RFType =%d\n", pDM_Odm->RFType));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("BoardType =%d\n", pDM_Odm->BoardType));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtLNA =%d\n", pDM_Odm->ExtLNA));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtPA =%d\n", pDM_Odm->ExtPA));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtTRSW =%d\n", pDM_Odm->ExtTRSW));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("PatchID =%d\n", pDM_Odm->PatchID));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bInHctTest =%d\n", pDM_Odm->bInHctTest));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFITest =%d\n", pDM_Odm->bWIFITest));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bDualMacSmartConcurrent =%d\n", pDM_Odm->bDualMacSmartConcurrent));
-
}
static void odm_BasicDbgMessage(struct dm_odm_t *pDM_Odm)
{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_BasicDbgMsg ==>\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bLinked = %d, RSSI_Min = %d,\n",
- pDM_Odm->bLinked, pDM_Odm->RSSI_Min));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RxRate = 0x%x, RSSI_A = %d, RSSI_B = %d\n",
- pDM_Odm->RxRate, pDM_Odm->RSSI_A, pDM_Odm->RSSI_B));
}
/* 3 ============================================================ */
@@ -401,7 +377,6 @@ u32 ODM_Get_Rate_Bitmap(
break;
case (ODM_WM_G):
- case (ODM_WM_A):
if (rssi_level == DM_RATR_STA_HIGH)
rate_bitmap = 0x00000f00;
else
@@ -420,7 +395,6 @@ u32 ODM_Get_Rate_Bitmap(
case (ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
case (ODM_WM_B|ODM_WM_N24G):
case (ODM_WM_G|ODM_WM_N24G):
- case (ODM_WM_A|ODM_WM_N5G):
if (pDM_Odm->RFType == ODM_1T2R || pDM_Odm->RFType == ODM_1T1R) {
if (rssi_level == DM_RATR_STA_HIGH)
rate_bitmap = 0x000f0000;
@@ -446,34 +420,6 @@ u32 ODM_Get_Rate_Bitmap(
}
break;
- case (ODM_WM_AC|ODM_WM_G):
- if (rssi_level == 1)
- rate_bitmap = 0xfc3f0000;
- else if (rssi_level == 2)
- rate_bitmap = 0xfffff000;
- else
- rate_bitmap = 0xffffffff;
- break;
-
- case (ODM_WM_AC|ODM_WM_A):
-
- if (pDM_Odm->RFType == RF_1T1R) {
- if (rssi_level == 1) /* add by Gary for ac-series */
- rate_bitmap = 0x003f8000;
- else if (rssi_level == 2)
- rate_bitmap = 0x003ff000;
- else
- rate_bitmap = 0x003ff010;
- } else {
- if (rssi_level == 1) /* add by Gary for ac-series */
- rate_bitmap = 0xfe3f8000; /* VHT 2SS MCS3~9 */
- else if (rssi_level == 2)
- rate_bitmap = 0xfffff000; /* VHT 2SS MCS0~9 */
- else
- rate_bitmap = 0xfffff010; /* All */
- }
- break;
-
default:
if (pDM_Odm->RFType == RF_1T2R)
rate_bitmap = 0x000fffff;
@@ -482,9 +428,6 @@ u32 ODM_Get_Rate_Bitmap(
break;
}
- /* printk("%s ==> rssi_level:0x%02x, WirelessMode:0x%02x, rate_bitmap:0x%08x\n", __func__, rssi_level, WirelessMode, rate_bitmap); */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, (" ==> rssi_level:0x%02x, WirelessMode:0x%02x, rate_bitmap:0x%08x\n", rssi_level, WirelessMode, rate_bitmap));
-
return ra_mask & rate_bitmap;
}
@@ -495,12 +438,10 @@ static void odm_RefreshRateAdaptiveMaskCE(struct dm_odm_t *pDM_Odm)
struct adapter *padapter = pDM_Odm->Adapter;
if (padapter->bDriverStopped) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_TRACE, ("<---- odm_RefreshRateAdaptiveMask(): driver is going to unload\n"));
return;
}
if (!pDM_Odm->bUseRAMask) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, ("<---- odm_RefreshRateAdaptiveMask(): driver does not control rate adaptive mask\n"));
return;
}
@@ -512,7 +453,6 @@ static void odm_RefreshRateAdaptiveMaskCE(struct dm_odm_t *pDM_Odm)
continue;
if (true == ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false, &pstat->rssi_level)) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, ("RSSI:%d, RSSI_LEVEL:%d\n", pstat->rssi_stat.UndecoratedSmoothedPWDB, pstat->rssi_level));
/* printk("RSSI:%d, RSSI_LEVEL:%d\n", pstat->rssi_stat.UndecoratedSmoothedPWDB, pstat->rssi_level); */
rtw_hal_update_ra_mask(pstat, pstat->rssi_level);
}
@@ -541,9 +481,7 @@ static void odm_RefreshRateAdaptiveMaskCE(struct dm_odm_t *pDM_Odm)
static void odm_RefreshRateAdaptiveMask(struct dm_odm_t *pDM_Odm)
{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_TRACE, ("odm_RefreshRateAdaptiveMask()---------->\n"));
if (!(pDM_Odm->SupportAbility & ODM_BB_RA_MASK)) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_TRACE, ("odm_RefreshRateAdaptiveMask(): Return cos not supported\n"));
return;
}
odm_RefreshRateAdaptiveMaskCE(pDM_Odm);
@@ -582,7 +520,8 @@ bool ODM_RAStateCheck(
break;
default:
- ODM_RT_ASSERT(pDM_Odm, false, ("wrong rssi level setting %d !", *pRATRState));
+ netdev_dbg(pDM_Odm->Adapter->pnetdev,
+ "wrong rssi level setting %d !", *pRATRState);
break;
}
@@ -596,7 +535,6 @@ bool ODM_RAStateCheck(
/* printk("==>%s, RATRState:0x%02x , RSSI:%d\n", __func__, RATRState, RSSI); */
if (*pRATRState != RATRState || bForceUpdate) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, ("RSSI Level %d -> %d\n", *pRATRState, RATRState));
*pRATRState = RATRState;
return true;
}
@@ -631,11 +569,8 @@ static void FindMinimumRSSI(struct adapter *padapter)
(pdmpriv->EntryMinUndecoratedSmoothedPWDB == 0)
) {
pdmpriv->MinUndecoratedPWDBForDM = 0;
- /* ODM_RT_TRACE(pDM_Odm, COMP_BB_POWERSAVING, DBG_LOUD, ("Not connected to any\n")); */
} else
pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
-
- /* ODM_RT_TRACE(pDM_Odm, COMP_DIG, DBG_LOUD, ("MinUndecoratedPWDBForDM =%d\n", pHalData->MinUndecoratedPWDBForDM)); */
}
static void odm_RSSIMonitorCheckCE(struct dm_odm_t *pDM_Odm)
@@ -838,9 +773,6 @@ void ODM_DMInit(struct dm_odm_t *pDM_Odm)
ODM_ClearTxPowerTrackingState(pDM_Odm);
- if (*(pDM_Odm->mp_mode) != 1)
- odm_PathDiversityInit(pDM_Odm);
-
odm_DynamicBBPowerSavingInit(pDM_Odm);
odm_DynamicTxPowerInit(pDM_Odm);
@@ -858,7 +790,6 @@ void ODM_DMWatchdog(struct dm_odm_t *pDM_Odm)
odm_BasicDbgMessage(pDM_Odm);
odm_FalseAlarmCounterStatistics(pDM_Odm);
odm_NHMCounterStatistics(pDM_Odm);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): RSSI = 0x%x\n", pDM_Odm->RSSI_Min));
odm_RSSIMonitorCheck(pDM_Odm);
@@ -872,8 +803,6 @@ void ODM_DMWatchdog(struct dm_odm_t *pDM_Odm)
/* (pDM_Odm->SupportICType & (ODM_RTL8188E) &&(&&(((pDM_Odm->SupportInterface == ODM_ITRF_SDIO))) */
/* */
) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("----Step1: odm_DIG is in LPS mode\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("---Step2: 8723AS is in LPS mode\n"));
odm_DIGbyRSSI_LPS(pDM_Odm);
} else
odm_DIG(pDM_Odm);
@@ -891,7 +820,6 @@ void ODM_DMWatchdog(struct dm_odm_t *pDM_Odm)
odm_RefreshRateAdaptiveMask(pDM_Odm);
odm_EdcaTurboCheck(pDM_Odm);
- odm_PathDiversity(pDM_Odm);
ODM_CfoTracking(pDM_Odm);
ODM_TXPowerTrackingCheck(pDM_Odm);
@@ -956,10 +884,6 @@ void ODM_CmnInfoInit(struct dm_odm_t *pDM_Odm, enum odm_cmninfo_e CmnInfo, u32 V
pDM_Odm->AntDivType = (u8)Value;
break;
- case ODM_CMNINFO_BOARD_TYPE:
- pDM_Odm->BoardType = (u8)Value;
- break;
-
case ODM_CMNINFO_PACKAGE_TYPE:
pDM_Odm->PackageType = (u8)Value;
break;
@@ -968,18 +892,10 @@ void ODM_CmnInfoInit(struct dm_odm_t *pDM_Odm, enum odm_cmninfo_e CmnInfo, u32 V
pDM_Odm->ExtLNA = (u8)Value;
break;
- case ODM_CMNINFO_5G_EXT_LNA:
- pDM_Odm->ExtLNA5G = (u8)Value;
- break;
-
case ODM_CMNINFO_EXT_PA:
pDM_Odm->ExtPA = (u8)Value;
break;
- case ODM_CMNINFO_5G_EXT_PA:
- pDM_Odm->ExtPA5G = (u8)Value;
- break;
-
case ODM_CMNINFO_GPA:
pDM_Odm->TypeGPA = (enum odm_type_gpa_e)Value;
break;
@@ -1044,10 +960,6 @@ void ODM_CmnInfoHook(struct dm_odm_t *pDM_Odm, enum odm_cmninfo_e CmnInfo, void
pDM_Odm->pwirelessmode = pValue;
break;
- case ODM_CMNINFO_BAND:
- pDM_Odm->pBandType = pValue;
- break;
-
case ODM_CMNINFO_SEC_CHNL_OFFSET:
pDM_Odm->pSecChOffset = pValue;
break;
@@ -1214,13 +1126,6 @@ void ODM_CmnInfoUpdate(struct dm_odm_t *pDM_Odm, u32 CmnInfo, u64 Value)
pDM_Odm->RSSI_Min = (u8)Value;
break;
- case ODM_CMNINFO_DBG_COMP:
- pDM_Odm->DebugComponents = Value;
- break;
-
- case ODM_CMNINFO_DBG_LEVEL:
- pDM_Odm->DebugLevel = (u32)Value;
- break;
case ODM_CMNINFO_RA_THRESHOLD_HIGH:
pDM_Odm->RateAdaptive.HighRSSIThresh = (u8)Value;
break;
@@ -1262,10 +1167,6 @@ void ODM_CmnInfoUpdate(struct dm_odm_t *pDM_Odm, u32 CmnInfo, u64 Value)
pDM_Odm->WirelessMode = (u8)Value;
break;
- case ODM_CMNINFO_BAND:
- pDM_Odm->BandType = (u8)Value;
- break;
-
case ODM_CMNINFO_SEC_CHNL_OFFSET:
pDM_Odm->SecChOffset = (u8)Value;
break;
diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
index ff21343fbe0b..abf6547518fb 100644
--- a/drivers/staging/rtl8723bs/hal/odm.h
+++ b/drivers/staging/rtl8723bs/hal/odm.h
@@ -11,7 +11,6 @@
#include "odm_EdcaTurboCheck.h"
#include "odm_DIG.h"
-#include "odm_PathDiv.h"
#include "odm_DynamicBBPowerSaving.h"
#include "odm_DynamicTxPower.h"
#include "odm_CfoTracking.h"
@@ -152,7 +151,6 @@ struct swat_t { /* _SW_Antenna_Switch_ */
bool ANTB_ON; /* To indicate Ant B is on or not */
bool Pre_Aux_FailDetec;
bool RSSI_AntDect_bResult;
- u8 Ant5G;
u8 Ant2G;
s32 RSSI_sum_A;
@@ -197,10 +195,7 @@ struct odm_rate_adaptive {
#define AVG_THERMAL_NUM 8
#define IQK_Matrix_REG_NUM 8
-#define IQK_Matrix_Settings_NUM (14 + 24 + 21) /* Channels_2_4G_NUM
- * + Channels_5G_20M_NUM
- * + Channels_5G
- */
+#define IQK_Matrix_Settings_NUM 14 /* Channels_2_4G_NUM */
#define DM_Type_ByFW 0
#define DM_Type_ByDriver 1
@@ -293,12 +288,9 @@ enum odm_cmninfo_e {
ODM_CMNINFO_FAB_VER, /* ODM_FAB_E */
ODM_CMNINFO_RF_TYPE, /* ODM_RF_PATH_E or ODM_RF_TYPE_E? */
ODM_CMNINFO_RFE_TYPE,
- ODM_CMNINFO_BOARD_TYPE, /* ODM_BOARD_TYPE_E */
ODM_CMNINFO_PACKAGE_TYPE,
ODM_CMNINFO_EXT_LNA, /* true */
- ODM_CMNINFO_5G_EXT_LNA,
ODM_CMNINFO_EXT_PA,
- ODM_CMNINFO_5G_EXT_PA,
ODM_CMNINFO_GPA,
ODM_CMNINFO_APA,
ODM_CMNINFO_GLNA,
@@ -316,7 +308,6 @@ enum odm_cmninfo_e {
ODM_CMNINFO_TX_UNI,
ODM_CMNINFO_RX_UNI,
ODM_CMNINFO_WM_MODE, /* ODM_WIRELESS_MODE_E */
- ODM_CMNINFO_BAND, /* ODM_BAND_TYPE_E */
ODM_CMNINFO_SEC_CHNL_OFFSET, /* ODM_SEC_CHNL_OFFSET_E */
ODM_CMNINFO_SEC_MODE, /* ODM_SECURITY_E */
ODM_CMNINFO_BW, /* ODM_BW_E */
@@ -456,33 +447,17 @@ enum { /* tag_Wireless_Mode_Definition */
ODM_WM_UNKNOWN = 0x0,
ODM_WM_B = BIT0,
ODM_WM_G = BIT1,
- ODM_WM_A = BIT2,
ODM_WM_N24G = BIT3,
- ODM_WM_N5G = BIT4,
ODM_WM_AUTO = BIT5,
- ODM_WM_AC = BIT6,
-};
-
-/* ODM_CMNINFO_BAND */
-enum { /* tag_Band_Type_Definition */
- ODM_BAND_2_4G = 0,
- ODM_BAND_5G,
- ODM_BAND_ON_BOTH,
- ODM_BANDMAX
};
/* ODM_CMNINFO_BW */
enum { /* tag_Bandwidth_Definition */
ODM_BW20M = 0,
ODM_BW40M = 1,
- ODM_BW80M = 2,
- ODM_BW160M = 3,
- ODM_BW10M = 4,
};
-/* ODM_CMNINFO_BOARD_TYPE */
-/* For non-AC-series IC , ODM_BOARD_5G_EXT_PA and ODM_BOARD_5G_EXT_LNA are ignored */
-/* For AC-series IC, external PA & LNA can be indivisuallly added on 2.4G and/or 5G */
+/* For AC-series IC, external PA & LNA can be individually added on 2.4G */
enum odm_type_gpa_e { /* tag_ODM_TYPE_GPA_Definition */
TYPE_GPA0 = 0,
@@ -530,7 +505,6 @@ struct odm_rf_cal_t { /* ODM_RF_Calibration_Structure */
bool bTXPowerTracking;
u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking as default */
u8 TM_Trigger;
- u8 InternalPA5G[2]; /* pathA / pathB */
u8 ThermalMeter[2]; /* ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
u8 ThermalValue;
@@ -565,7 +539,7 @@ struct odm_rf_cal_t { /* ODM_RF_Calibration_Structure */
bool bIQKInProgress;
u8 Delta_IQK;
u8 Delta_LCK;
- s8 BBSwingDiff2G, BBSwingDiff5G; /* Unit: dB */
+ s8 BBSwingDiff2G; /* Unit: dB */
u8 DeltaSwingTableIdx_2GCCKA_P[DELTA_SWINGIDX_SIZE];
u8 DeltaSwingTableIdx_2GCCKA_N[DELTA_SWINGIDX_SIZE];
u8 DeltaSwingTableIdx_2GCCKB_P[DELTA_SWINGIDX_SIZE];
@@ -574,10 +548,6 @@ struct odm_rf_cal_t { /* ODM_RF_Calibration_Structure */
u8 DeltaSwingTableIdx_2GA_N[DELTA_SWINGIDX_SIZE];
u8 DeltaSwingTableIdx_2GB_P[DELTA_SWINGIDX_SIZE];
u8 DeltaSwingTableIdx_2GB_N[DELTA_SWINGIDX_SIZE];
- u8 DeltaSwingTableIdx_5GA_P[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 DeltaSwingTableIdx_5GA_N[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 DeltaSwingTableIdx_5GB_P[BAND_NUM][DELTA_SWINGIDX_SIZE];
- u8 DeltaSwingTableIdx_5GB_N[BAND_NUM][DELTA_SWINGIDX_SIZE];
u8 DeltaSwingTableIdx_2GA_P_8188E[DELTA_SWINGIDX_SIZE];
u8 DeltaSwingTableIdx_2GA_N_8188E[DELTA_SWINGIDX_SIZE];
@@ -643,8 +613,6 @@ struct fat_t { /* _FAST_ANTENNA_TRAINNING_ */
bool bBecomeLinked;
u32 MinMaxRSSI;
u8 idx_AntDiv_counter_2G;
- u8 idx_AntDiv_counter_5G;
- u32 AntDiv_2G_5G;
u32 CCK_counter_main;
u32 CCK_counter_aux;
u32 OFDM_counter_main;
@@ -710,9 +678,6 @@ struct dm_odm_t { /* DM_Out_Source_Dynamic_Mechanism_Structure */
enum phy_reg_pg_type PhyRegPgValueType;
u8 PhyRegPgVersion;
- u64 DebugComponents;
- u32 DebugLevel;
-
u32 NumQryPhyStatusAll; /* CCK + OFDM */
u32 LastNumQryPhyStatusAll;
u32 RxPWDBAve;
@@ -766,10 +731,8 @@ struct dm_odm_t { /* DM_Out_Source_Dynamic_Mechanism_Structure */
u8 TypeAPA;
/* with external LNA NO/Yes = 0/1 */
u8 ExtLNA;
- u8 ExtLNA5G;
/* with external PA NO/Yes = 0/1 */
u8 ExtPA;
- u8 ExtPA5G;
/* with external TRSW NO/Yes = 0/1 */
u8 ExtTRSW;
u8 PatchID; /* Customer ID */
@@ -798,8 +761,6 @@ struct dm_odm_t { /* DM_Out_Source_Dynamic_Mechanism_Structure */
u64 *pNumRxBytesUnicast;
/* Wireless mode B/G/A/N = BIT0/BIT1/BIT2/BIT3 */
u8 *pwirelessmode; /* ODM_WIRELESS_MODE_E */
- /* Frequence band 2.4G/5G = 0/1 */
- u8 *pBandType;
/* Secondary channel offset don't_care/below/above = 0/1/2 */
u8 *pSecChOffset;
/* Security mode Open/WEP/AES/TKIP = 0/1/2/3 */
@@ -1068,7 +1029,6 @@ enum ODM_BB_Config_Type {
CONFIG_BB_PHY_REG,
CONFIG_BB_AGC_TAB,
CONFIG_BB_AGC_TAB_2G,
- CONFIG_BB_AGC_TAB_5G,
CONFIG_BB_PHY_REG_PG,
CONFIG_BB_PHY_REG_MP,
CONFIG_BB_AGC_TAB_DIFF,
diff --git a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
index 75471c6c168e..0f6b9d661e39 100644
--- a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
+++ b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
@@ -25,16 +25,6 @@ static void odm_SetCrystalCap(void *pDM_VOID, u8 CrystalCap)
0x00FFF000,
(CrystalCap | (CrystalCap << 6))
);
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- (
- "odm_SetCrystalCap(): CrystalCap = 0x%x\n",
- CrystalCap
- )
- );
}
static u8 odm_GetDefaultCrytaltalCap(void *pDM_VOID)
@@ -98,22 +88,6 @@ void ODM_CfoTrackingInit(void *pDM_VOID)
pCfoTrack->CrystalCap = odm_GetDefaultCrytaltalCap(pDM_Odm);
pCfoTrack->bATCStatus = odm_GetATCStatus(pDM_Odm);
pCfoTrack->bAdjust = true;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- ("ODM_CfoTracking_init() =========>\n")
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- (
- "ODM_CfoTracking_init(): bATCStatus = %d, CrystalCap = 0x%x\n",
- pCfoTrack->bATCStatus,
- pCfoTrack->DefXCap
- )
- );
}
void ODM_CfoTracking(void *pDM_VOID)
@@ -127,47 +101,16 @@ void ODM_CfoTracking(void *pDM_VOID)
/* 4 Support ability */
if (!(pDM_Odm->SupportAbility & ODM_BB_CFO_TRACKING)) {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- ("ODM_CfoTracking(): Return: SupportAbility ODM_BB_CFO_TRACKING is disabled\n")
- );
return;
}
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- ("ODM_CfoTracking() =========>\n")
- );
-
if (!pDM_Odm->bLinked || !pDM_Odm->bOneEntryOnly) {
/* 4 No link or more than one entry */
ODM_CfoTrackingReset(pDM_Odm);
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- (
- "ODM_CfoTracking(): Reset: bLinked = %d, bOneEntryOnly = %d\n",
- pDM_Odm->bLinked,
- pDM_Odm->bOneEntryOnly
- )
- );
} else {
/* 3 1. CFO Tracking */
/* 4 1.1 No new packet */
if (pCfoTrack->packetCount == pCfoTrack->packetCount_pre) {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- (
- "ODM_CfoTracking(): packet counter doesn't change\n"
- )
- );
return;
}
pCfoTrack->packetCount_pre = pCfoTrack->packetCount;
@@ -180,17 +123,6 @@ void ODM_CfoTracking(void *pDM_VOID)
CFO_ave = CFO_kHz_A;
else
CFO_ave = (int)(CFO_kHz_A + CFO_kHz_B) >> 1;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- (
- "ODM_CfoTracking(): CFO_kHz_A = %dkHz, CFO_kHz_B = %dkHz, CFO_ave = %dkHz\n",
- CFO_kHz_A,
- CFO_kHz_B,
- CFO_ave
- )
- );
/* 4 1.3 Avoid abnormal large CFO */
CFO_ave_diff =
@@ -203,7 +135,6 @@ void ODM_CfoTracking(void *pDM_VOID)
pCfoTrack->largeCFOHit == 0 &&
!pCfoTrack->bAdjust
) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CFO_TRACKING, ODM_DBG_LOUD, ("ODM_CfoTracking(): first large CFO hit\n"));
pCfoTrack->largeCFOHit = 1;
return;
} else
@@ -223,12 +154,6 @@ void ODM_CfoTracking(void *pDM_VOID)
if (pDM_Odm->bBtEnabled) {
pCfoTrack->bAdjust = false;
odm_SetCrystalCap(pDM_Odm, pCfoTrack->DefXCap);
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- ("ODM_CfoTracking(): Disable CFO tracking for BT!!\n")
- );
}
/* 4 1.6 Big jump */
@@ -237,16 +162,6 @@ void ODM_CfoTracking(void *pDM_VOID)
Adjust_Xtal = Adjust_Xtal+((CFO_ave-CFO_TH_XTAL_LOW)>>2);
else if (CFO_ave < (-CFO_TH_XTAL_LOW))
Adjust_Xtal = Adjust_Xtal+((CFO_TH_XTAL_LOW-CFO_ave)>>2);
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- (
- "ODM_CfoTracking(): Crystal cap offset = %d\n",
- Adjust_Xtal
- )
- );
}
/* 4 1.7 Adjust Crystal Cap. */
@@ -263,34 +178,12 @@ void ODM_CfoTracking(void *pDM_VOID)
odm_SetCrystalCap(pDM_Odm, (u8)CrystalCap);
}
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- (
- "ODM_CfoTracking(): Crystal cap = 0x%x, Default Crystal cap = 0x%x\n",
- pCfoTrack->CrystalCap,
- pCfoTrack->DefXCap
- )
- );
/* 3 2. Dynamic ATC switch */
if (CFO_ave < CFO_TH_ATC && CFO_ave > -CFO_TH_ATC) {
odm_SetATCStatus(pDM_Odm, false);
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- ("ODM_CfoTracking(): Disable ATC!!\n")
- );
} else {
odm_SetATCStatus(pDM_Odm, true);
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CFO_TRACKING,
- ODM_DBG_LOUD,
- ("ODM_CfoTracking(): Enable ATC!!\n")
- );
}
}
}
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
index dcef7fb17389..ef5b48bb01b2 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -81,17 +81,6 @@ void odm_NHMBB(void *pDM_VOID)
*(pDM_Odm->pNumTxBytesUnicast);
pDM_Odm->NHMLastRxOkcnt =
*(pDM_Odm->pNumRxBytesUnicast);
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- (
- "NHM_cnt_0 =%d, NHMCurTxOkcnt = %llu, NHMCurRxOkcnt = %llu\n",
- pDM_Odm->NHM_cnt_0,
- pDM_Odm->NHMCurTxOkcnt,
- pDM_Odm->NHMCurRxOkcnt
- )
- );
if ((pDM_Odm->NHMCurTxOkcnt) + 1 > (u64)(pDM_Odm->NHMCurRxOkcnt<<2) + 1) { /* Tx > 4*Rx possible for adaptivity test */
@@ -127,8 +116,6 @@ void odm_NHMBB(void *pDM_VOID)
}
}
}
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("adaptivity_flag = %d\n ", pDM_Odm->adaptivity_flag));
}
void odm_SearchPwdBLowerBound(void *pDM_VOID, u8 IGI_target)
@@ -165,7 +152,6 @@ void odm_SearchPwdBLowerBound(void *pDM_VOID, u8 IGI_target)
else
pDM_Odm->txEdcca0 = pDM_Odm->txEdcca0 + 1;
}
- /* DbgPrint("txEdcca1 = %d, txEdcca0 = %d\n", pDM_Odm->txEdcca1, pDM_Odm->txEdcca0); */
if (pDM_Odm->txEdcca1 > 5) {
IGI = IGI-1;
@@ -199,8 +185,6 @@ void odm_SearchPwdBLowerBound(void *pDM_VOID, u8 IGI_target)
pDM_Odm->Adaptivity_IGI_upper = IGI;
}
}
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("IGI = 0x%x, H2L_lb = 0x%x, L2H_lb = 0x%x\n", IGI, pDM_Odm->H2L_lb, pDM_Odm->L2H_lb));
}
void odm_AdaptivityInit(void *pDM_VOID)
@@ -239,19 +223,13 @@ void odm_Adaptivity(void *pDM_VOID, u8 IGI)
bool EDCCA_State = false;
if (!(pDM_Odm->SupportAbility & ODM_BB_ADAPTIVITY)) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("Go to odm_DynamicEDCCA()\n"));
return;
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_Adaptivity() =====>\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("ForceEDCCA =%d, IGI_Base = 0x%x, TH_L2H_ini = %d, TH_EDCCA_HL_diff = %d, AdapEn_RSSI = %d\n",
- pDM_Odm->ForceEDCCA, pDM_Odm->IGI_Base, pDM_Odm->TH_L2H_ini, pDM_Odm->TH_EDCCA_HL_diff, pDM_Odm->AdapEn_RSSI));
if (*pDM_Odm->pBandWidth == ODM_BW20M) /* CHANNEL_WIDTH_20 */
IGI_target = pDM_Odm->IGI_Base;
else if (*pDM_Odm->pBandWidth == ODM_BW40M)
IGI_target = pDM_Odm->IGI_Base + 2;
- else if (*pDM_Odm->pBandWidth == ODM_BW80M)
- IGI_target = pDM_Odm->IGI_Base + 2;
else
IGI_target = pDM_Odm->IGI_Base;
pDM_Odm->IGI_target = (u8) IGI_target;
@@ -284,19 +262,6 @@ void odm_Adaptivity(void *pDM_VOID, u8 IGI)
)
odm_NHMBB(pDM_Odm);
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- (
- "BandWidth =%s, IGI_target = 0x%x, EDCCA_State =%d\n",
- (*pDM_Odm->pBandWidth == ODM_BW80M) ? "80M" :
- ((*pDM_Odm->pBandWidth == ODM_BW40M) ? "40M" : "20M"),
- IGI_target,
- EDCCA_State
- )
- );
-
if (EDCCA_State) {
Diff = IGI_target-(s8)IGI;
TH_L2H_dmc = pDM_Odm->TH_L2H_ini + Diff;
@@ -314,8 +279,6 @@ void odm_Adaptivity(void *pDM_VOID, u8 IGI)
TH_L2H_dmc = 0x7f;
TH_H2L_dmc = 0x7f;
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("IGI = 0x%x, TH_L2H_dmc = %d, TH_H2L_dmc = %d\n",
- IGI, TH_L2H_dmc, TH_H2L_dmc));
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskByte0, (u8)TH_L2H_dmc);
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskByte2, (u8)TH_H2L_dmc);
}
@@ -326,18 +289,13 @@ void ODM_Write_DIG(void *pDM_VOID, u8 CurrentIGI)
struct dig_t *pDM_DigTable = &pDM_Odm->DM_DigTable;
if (pDM_DigTable->bStopDIG) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("Stop Writing IGI\n"));
return;
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_TRACE, ("ODM_REG(IGI_A, pDM_Odm) = 0x%x, ODM_BIT(IGI, pDM_Odm) = 0x%x\n",
- ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm)));
-
if (pDM_DigTable->CurIGValue != CurrentIGI) {
/* 1 Check initial gain by upper bound */
if (!pDM_DigTable->bPSDInProgress) {
if (CurrentIGI > pDM_DigTable->rx_gain_range_max) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_TRACE, ("CurrentIGI(0x%02x) is larger than upper bound !!\n", pDM_DigTable->rx_gain_range_max));
CurrentIGI = pDM_DigTable->rx_gain_range_max;
}
@@ -352,8 +310,6 @@ void ODM_Write_DIG(void *pDM_VOID, u8 CurrentIGI)
pDM_DigTable->CurIGValue = CurrentIGI;
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_TRACE, ("CurrentIGI(0x%02x).\n", CurrentIGI));
-
}
void odm_PauseDIG(
@@ -366,18 +322,10 @@ void odm_PauseDIG(
struct dig_t *pDM_DigTable = &pDM_Odm->DM_DigTable;
static bool bPaused;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG() =========>\n"));
-
if (
(pDM_Odm->SupportAbility & ODM_BB_ADAPTIVITY) &&
pDM_Odm->TxHangFlg == true
) {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- ("odm_PauseDIG(): Dynamic adjust threshold in progress !!\n")
- );
return;
}
@@ -385,12 +333,6 @@ void odm_PauseDIG(
!bPaused && (!(pDM_Odm->SupportAbility & ODM_BB_DIG) ||
!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
){
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- ("odm_PauseDIG(): Return: SupportAbility ODM_BB_DIG or ODM_BB_FA_CNT is disabled\n")
- );
return;
}
@@ -399,18 +341,15 @@ void odm_PauseDIG(
case ODM_PAUSE_DIG:
/* 2 Disable DIG */
ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_ABILITY, pDM_Odm->SupportAbility & (~ODM_BB_DIG));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG(): Pause DIG !!\n"));
/* 2 Backup IGI value */
if (!bPaused) {
pDM_DigTable->IGIBackup = pDM_DigTable->CurIGValue;
bPaused = true;
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG(): Backup IGI = 0x%x\n", pDM_DigTable->IGIBackup));
/* 2 Write new IGI value */
ODM_Write_DIG(pDM_Odm, IGIValue);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG(): Write new IGI = 0x%x\n", IGIValue));
break;
/* 1 Resume DIG */
@@ -419,16 +358,13 @@ void odm_PauseDIG(
/* 2 Write backup IGI value */
ODM_Write_DIG(pDM_Odm, pDM_DigTable->IGIBackup);
bPaused = false;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG(): Write original IGI = 0x%x\n", pDM_DigTable->IGIBackup));
/* 2 Enable DIG */
ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_ABILITY, pDM_Odm->SupportAbility | ODM_BB_DIG);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG(): Resume DIG !!\n"));
}
break;
default:
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG(): Wrong type !!\n"));
break;
}
}
@@ -439,25 +375,21 @@ bool odm_DigAbort(void *pDM_VOID)
/* SupportAbility */
if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT)) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Return: SupportAbility ODM_BB_FA_CNT is disabled\n"));
return true;
}
/* SupportAbility */
if (!(pDM_Odm->SupportAbility & ODM_BB_DIG)) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Return: SupportAbility ODM_BB_DIG is disabled\n"));
return true;
}
/* ScanInProcess */
if (*(pDM_Odm->pbScanInProcess)) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Return: In Scan Progress\n"));
return true;
}
/* add by Neil Chen to avoid PSD is processing */
if (pDM_Odm->bDMInitialGainEnable == false) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Return: PSD is Processing\n"));
return true;
}
@@ -523,8 +455,6 @@ void odm_DIG(void *pDM_VOID)
if (odm_DigAbort(pDM_Odm))
return;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() ===========================>\n\n"));
-
if (pDM_Odm->adaptivity_flag == true)
Adap_IGI_Upper = pDM_Odm->Adaptivity_IGI_upper;
@@ -540,15 +470,12 @@ void odm_DIG(void *pDM_VOID)
dm_dig_min = DM_DIG_MIN_NIC;
DIG_MaxOfMin = DM_DIG_MAX_AP;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Absolutely upper bound = 0x%x, lower bound = 0x%x\n", dm_dig_max, dm_dig_min));
-
/* 1 Adjust boundary by RSSI */
if (pDM_Odm->bLinked && bPerformance) {
/* 2 Modify DIG upper bound */
/* 4 Modify DIG upper bound for 92E, 8723A\B, 8821 & 8812 BT */
if (pDM_Odm->bBtLimitedDig == 1) {
offset = 10;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Coex. case: Force upper bound to RSSI + %d !!!!!!\n", offset));
} else
offset = 15;
@@ -586,49 +513,9 @@ void odm_DIG(void *pDM_VOID)
DIG_Dynamic_MIN = DIG_MaxOfMin;
else
DIG_Dynamic_MIN = (u8) pDM_DigTable->AntDiv_RSSI_max;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_ANT_DIV,
- ODM_DBG_LOUD,
- (
- "odm_DIG(): Antenna diversity case: Force lower bound to 0x%x !!!!!!\n",
- DIG_Dynamic_MIN
- )
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_ANT_DIV,
- ODM_DBG_LOUD,
- (
- "odm_DIG(): Antenna diversity case: RSSI_max = 0x%x !!!!!!\n",
- pDM_DigTable->AntDiv_RSSI_max
- )
- );
}
}
}
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- (
- "odm_DIG(): Adjust boundary by RSSI Upper bound = 0x%x, Lower bound = 0x%x\n",
- pDM_DigTable->rx_gain_range_max,
- DIG_Dynamic_MIN
- )
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- (
- "odm_DIG(): Link status: bLinked = %d, RSSI = %d, bFirstConnect = %d, bFirsrDisConnect = %d\n\n",
- pDM_Odm->bLinked,
- pDM_Odm->RSSI_Min,
- FirstConnect,
- FirstDisConnect
- )
- );
/* 1 Modify DIG lower bound, deal with abnormal case */
/* 2 Abnormal false alarm case */
@@ -645,47 +532,20 @@ void odm_DIG(void *pDM_VOID)
pDM_Odm->bsta_state
) {
pDM_DigTable->rx_gain_range_min = dm_dig_min;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- (
- "odm_DIG(): Abnormal #beacon (%d) case in STA mode: Force lower bound to 0x%x !!!!!!\n\n",
- pDM_Odm->PhyDbgInfo.NumQryBeaconPkt,
- pDM_DigTable->rx_gain_range_min
- )
- );
}
}
/* 2 Abnormal lower bound case */
if (pDM_DigTable->rx_gain_range_min > pDM_DigTable->rx_gain_range_max) {
pDM_DigTable->rx_gain_range_min = pDM_DigTable->rx_gain_range_max;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- (
- "odm_DIG(): Abnormal lower bound case: Force lower bound to 0x%x !!!!!!\n\n",
- pDM_DigTable->rx_gain_range_min
- )
- );
}
/* 1 False alarm threshold decision */
odm_FAThresholdCheck(pDM_Odm, bDFSBand, bPerformance, RxTp, TxTp, dm_FA_thres);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): False alarm threshold = %d, %d, %d\n\n", dm_FA_thres[0], dm_FA_thres[1], dm_FA_thres[2]));
/* 1 Adjust initial gain by false alarm */
if (pDM_Odm->bLinked && bPerformance) {
- /* 2 After link */
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- ("odm_DIG(): Adjust IGI after link\n")
- );
if (bFirstTpTarget || (FirstConnect && bPerformance)) {
pDM_DigTable->LargeFAHit = 0;
@@ -698,16 +558,6 @@ void odm_DIG(void *pDM_VOID)
CurrentIGI = DIG_MaxOfMin;
}
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- (
- "odm_DIG(): First connect case: IGI does on-shot to 0x%x\n",
- CurrentIGI
- )
- );
-
} else {
if (pFalseAlmCnt->Cnt_all > dm_FA_thres[2])
CurrentIGI = CurrentIGI + 4;
@@ -722,35 +572,12 @@ void odm_DIG(void *pDM_VOID)
(pDM_Odm->bsta_state)
) {
CurrentIGI = pDM_DigTable->rx_gain_range_min;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- (
- "odm_DIG(): Abnormal #beacon (%d) case: IGI does one-shot to 0x%x\n",
- pDM_Odm->PhyDbgInfo.NumQryBeaconPkt,
- CurrentIGI
- )
- );
}
}
} else {
- /* 2 Before link */
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- ("odm_DIG(): Adjust IGI before link\n")
- );
if (FirstDisConnect || bFirstCoverage) {
CurrentIGI = dm_dig_min;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- ("odm_DIG(): First disconnect case: IGI does on-shot to lower bound\n")
- );
} else {
if (pFalseAlmCnt->Cnt_all > dm_FA_thres[2])
CurrentIGI = CurrentIGI + 4;
@@ -768,17 +595,6 @@ void odm_DIG(void *pDM_VOID)
if (CurrentIGI > pDM_DigTable->rx_gain_range_max)
CurrentIGI = pDM_DigTable->rx_gain_range_max;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- (
- "odm_DIG(): CurIGValue = 0x%x, TotalFA = %d\n\n",
- CurrentIGI,
- pFalseAlmCnt->Cnt_all
- )
- );
-
/* 1 Force upper bound and lower bound for adaptivity */
if (
pDM_Odm->SupportAbility & ODM_BB_ADAPTIVITY &&
@@ -791,8 +607,6 @@ void odm_DIG(void *pDM_VOID)
if (CurrentIGI < pDM_Odm->IGI_LowerBound)
CurrentIGI = pDM_Odm->IGI_LowerBound;
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Adaptivity case: Force upper bound to 0x%x !!!!!!\n", Adap_IGI_Upper));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Adaptivity case: Force lower bound to 0x%x !!!!!!\n\n", pDM_Odm->IGI_LowerBound));
}
@@ -831,13 +645,6 @@ void odm_DIGbyRSSI_LPS(void *pDM_VOID)
CurrentIGI = CurrentIGI+RSSI_OFFSET_DIG;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- ("odm_DIGbyRSSI_LPS() ==>\n")
- );
-
/* Using FW PS mode to make IGI */
/* Adjust by FA in LPS MODE */
if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2_LPS)
@@ -862,26 +669,6 @@ void odm_DIGbyRSSI_LPS(void *pDM_VOID)
else if (CurrentIGI < RSSI_Lower)
CurrentIGI = RSSI_Lower;
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- ("odm_DIGbyRSSI_LPS(): pFalseAlmCnt->Cnt_all = %d\n", pFalseAlmCnt->Cnt_all)
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- ("odm_DIGbyRSSI_LPS(): pDM_Odm->RSSI_Min = %d\n", pDM_Odm->RSSI_Min)
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_DIG,
- ODM_DBG_LOUD,
- ("odm_DIGbyRSSI_LPS(): CurrentIGI = 0x%x\n", CurrentIGI)
- );
-
ODM_Write_DIG(pDM_Odm, CurrentIGI);
/* ODM_Write_DIG(pDM_Odm, pDM_DigTable->CurIGValue); */
}
@@ -970,86 +757,6 @@ void odm_FalseAlarmCounterStatistics(void *pDM_VOID)
FalseAlmCnt->Cnt_CCA_all =
FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_FA_CNT,
- ODM_DBG_LOUD,
- ("Enter odm_FalseAlarmCounterStatistics\n")
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_FA_CNT,
- ODM_DBG_LOUD,
- (
- "Cnt_Fast_Fsync =%d, Cnt_SB_Search_fail =%d\n",
- FalseAlmCnt->Cnt_Fast_Fsync,
- FalseAlmCnt->Cnt_SB_Search_fail
- )
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_FA_CNT,
- ODM_DBG_LOUD,
- (
- "Cnt_Parity_Fail =%d, Cnt_Rate_Illegal =%d\n",
- FalseAlmCnt->Cnt_Parity_Fail,
- FalseAlmCnt->Cnt_Rate_Illegal
- )
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_FA_CNT,
- ODM_DBG_LOUD,
- (
- "Cnt_Crc8_fail =%d, Cnt_Mcs_fail =%d\n",
- FalseAlmCnt->Cnt_Crc8_fail,
- FalseAlmCnt->Cnt_Mcs_fail
- )
- );
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_FA_CNT,
- ODM_DBG_LOUD,
- ("Cnt_OFDM_CCA =%d\n", FalseAlmCnt->Cnt_OFDM_CCA)
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_FA_CNT,
- ODM_DBG_LOUD,
- ("Cnt_CCK_CCA =%d\n", FalseAlmCnt->Cnt_CCK_CCA)
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_FA_CNT,
- ODM_DBG_LOUD,
- ("Cnt_CCA_all =%d\n", FalseAlmCnt->Cnt_CCA_all)
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_FA_CNT,
- ODM_DBG_LOUD,
- ("Cnt_Ofdm_fail =%d\n", FalseAlmCnt->Cnt_Ofdm_fail)
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_FA_CNT,
- ODM_DBG_LOUD,
- ("Cnt_Cck_fail =%d\n", FalseAlmCnt->Cnt_Cck_fail)
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_FA_CNT,
- ODM_DBG_LOUD,
- ("Cnt_Ofdm_fail =%d\n", FalseAlmCnt->Cnt_Ofdm_fail)
- );
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_FA_CNT,
- ODM_DBG_LOUD,
- ("Total False Alarm =%d\n", FalseAlmCnt->Cnt_all)
- );
}
@@ -1084,8 +791,6 @@ u8 odm_ForbiddenIGICheck(void *pDM_VOID, u8 DIG_Dynamic_MIN, u8 CurrentIGI)
u8 rx_gain_range_min = pDM_DigTable->rx_gain_range_min;
if (pFalseAlmCnt->Cnt_all > 10000) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Abnormally false alarm case.\n"));
-
if (pDM_DigTable->LargeFAHit != 3)
pDM_DigTable->LargeFAHit++;
@@ -1102,22 +807,18 @@ u8 odm_ForbiddenIGICheck(void *pDM_VOID, u8 DIG_Dynamic_MIN, u8 CurrentIGI)
else
rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 2);
pDM_DigTable->Recover_cnt = 1800;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Abnormally false alarm case: Recover_cnt = %d\n", pDM_DigTable->Recover_cnt));
}
} else {
if (pDM_DigTable->Recover_cnt != 0) {
pDM_DigTable->Recover_cnt--;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: Recover_cnt = %d\n", pDM_DigTable->Recover_cnt));
} else {
if (pDM_DigTable->LargeFAHit < 3) {
if ((pDM_DigTable->ForbiddenIGI - 2) < DIG_Dynamic_MIN) { /* DM_DIG_MIN) */
pDM_DigTable->ForbiddenIGI = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
rx_gain_range_min = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: At Lower Bound\n"));
} else {
pDM_DigTable->ForbiddenIGI -= 2;
rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 2);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: Approach Lower Bound\n"));
}
} else
pDM_DigTable->LargeFAHit = 0;
@@ -1143,25 +844,12 @@ void odm_CCKPacketDetectionThresh(void *pDM_VOID)
!(pDM_Odm->SupportAbility & ODM_BB_CCK_PD) ||
!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT)
) {
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CCK_PD,
- ODM_DBG_LOUD,
- ("odm_CCKPacketDetectionThresh() return ==========\n")
- );
return;
}
if (pDM_Odm->ExtLNA)
return;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CCK_PD,
- ODM_DBG_LOUD,
- ("odm_CCKPacketDetectionThresh() ==========>\n")
- );
-
if (pDM_Odm->bLinked) {
if (pDM_Odm->RSSI_Min > 25)
CurCCK_CCAThres = 0xcd;
@@ -1181,16 +869,6 @@ void odm_CCKPacketDetectionThresh(void *pDM_VOID)
}
ODM_Write_CCK_CCA_Thres(pDM_Odm, CurCCK_CCAThres);
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_CCK_PD,
- ODM_DBG_LOUD,
- (
- "odm_CCKPacketDetectionThresh() CurCCK_CCAThres = 0x%x\n",
- CurCCK_CCAThres
- )
- );
}
void ODM_Write_CCK_CCA_Thres(void *pDM_VOID, u8 CurCCK_CCAThres)
diff --git a/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c b/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c
index 12b37c17ea0c..578d5712645c 100644
--- a/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c
+++ b/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c
@@ -37,19 +37,6 @@ void ODM_EdcaTurboInit(void *pDM_VOID)
pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
Adapter->recvpriv.bIsAnyNonBEPkts = false;
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
- ("Original VO PARAM: 0x%x\n",
- rtw_read32(pDM_Odm->Adapter, ODM_EDCA_VO_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
- ("Original VI PARAM: 0x%x\n",
- rtw_read32(pDM_Odm->Adapter, ODM_EDCA_VI_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
- ("Original BE PARAM: 0x%x\n",
- rtw_read32(pDM_Odm->Adapter, ODM_EDCA_BE_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
- ("Original BK PARAM: 0x%x\n",
- rtw_read32(pDM_Odm->Adapter, ODM_EDCA_BK_PARAM)));
} /* ODM_InitEdcaTurbo */
void odm_EdcaTurboCheck(void *pDM_VOID)
@@ -60,15 +47,10 @@ void odm_EdcaTurboCheck(void *pDM_VOID)
*/
struct dm_odm_t *pDM_Odm = (struct dm_odm_t *)pDM_VOID;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
- ("odm_EdcaTurboCheck ========================>\n"));
-
if (!(pDM_Odm->SupportAbility & ODM_MAC_EDCA_TURBO))
return;
odm_EdcaTurboCheckCE(pDM_Odm);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
- ("<========================odm_EdcaTurboCheck\n"));
} /* odm_CheckEdcaTurbo */
void odm_EdcaTurboCheckCE(void *pDM_VOID)
@@ -142,12 +124,10 @@ void odm_EdcaTurboCheckCE(void *pDM_VOID)
} else if ((iot_peer == HT_IOT_PEER_CISCO) &&
((wirelessmode == ODM_WM_G) ||
(wirelessmode == (ODM_WM_B | ODM_WM_G)) ||
- (wirelessmode == ODM_WM_A) ||
(wirelessmode == ODM_WM_B))) {
EDCA_BE_DL = edca_setting_DL_GMode[iot_peer];
} else if ((iot_peer == HT_IOT_PEER_AIRGO) &&
- ((wirelessmode == ODM_WM_G) ||
- (wirelessmode == ODM_WM_A))) {
+ (wirelessmode == ODM_WM_G)) {
EDCA_BE_DL = 0xa630;
} else if (iot_peer == HT_IOT_PEER_MARVELL) {
EDCA_BE_DL = edca_setting_DL[iot_peer];
diff --git a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
index 638c16f5c668..32f7eb952ca8 100644
--- a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
@@ -59,9 +59,6 @@ static u8 odm_EVMdbToPercentage(s8 Value)
ret_val = Value;
ret_val /= 2;
- /* DbgPrint("Value =%d\n", Value); */
- /* ODM_RT_DISP(FRX, RX_PHY_SQ, ("EVMdbToPercentage92C Value =%d / %x\n", ret_val, ret_val)); */
-
if (ret_val >= 0)
ret_val = 0;
if (ret_val <= -33)
@@ -141,7 +138,6 @@ static void odm_RxPhyStatus92CSeries_Parsing(
}
- /* DbgPrint("cck SQ = %d\n", SQ); */
pPhyInfo->signal_quality = SQ;
pPhyInfo->rx_mimo_signal_quality[ODM_RF_PATH_A] = SQ;
pPhyInfo->rx_mimo_signal_quality[ODM_RF_PATH_B] = -1;
@@ -168,7 +164,6 @@ static void odm_RxPhyStatus92CSeries_Parsing(
/* Translate DBM to percentage. */
RSSI = odm_QueryRxPwrPercentage(rx_pwr[i]);
total_rssi += RSSI;
- /* RT_DISP(FRX, RX_PHY_SS, ("RF-%d RXPWR =%x RSSI =%d\n", i, rx_pwr[i], RSSI)); */
pPhyInfo->rx_mimo_signal_strength[i] = (u8) RSSI;
@@ -183,10 +178,8 @@ static void odm_RxPhyStatus92CSeries_Parsing(
rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1)&0x7f)-110;
PWDB_ALL_BT = PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
- /* RT_DISP(FRX, RX_PHY_SS, ("PWDB_ALL =%d\n", PWDB_ALL)); */
pPhyInfo->rx_pwd_ba11 = PWDB_ALL;
- /* ODM_RT_TRACE(pDM_Odm, ODM_COMP_RSSI_MONITOR, ODM_DBG_LOUD, ("ODM OFDM RSSI =%d\n", pPhyInfo->rx_pwd_ba11)); */
pPhyInfo->bt_rx_rssi_percentage = PWDB_ALL_BT;
pPhyInfo->rx_power = rx_pwr_all;
pPhyInfo->recv_signal_power = rx_pwr_all;
@@ -206,9 +199,6 @@ static void odm_RxPhyStatus92CSeries_Parsing(
/* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
EVM = odm_EVMdbToPercentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
- /* RT_DISP(FRX, RX_PHY_SQ, ("RXRATE =%x RXEVM =%x EVM =%s%d\n", */
- /* GET_RX_STATUS_DESC_RX_MCS(pDesc), pDrvInfo->rxevm[i], "%", EVM)); */
-
/* if (pPktinfo->bPacketMatchBSSID) */
{
if (i == ODM_RF_PATH_A) /* Fill value in RFD, Get the first spatial stream only */
@@ -232,9 +222,6 @@ static void odm_RxPhyStatus92CSeries_Parsing(
pPhyInfo->signal_strength = (u8)(odm_SignalScaleMapping(pDM_Odm, total_rssi /= rf_rx_num));
}
}
-
- /* DbgPrint("isCCKrate = %d, pPhyInfo->rx_pwd_ba11 = %d, pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a = 0x%x\n", */
- /* isCCKrate, pPhyInfo->rx_pwd_ba11, pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a); */
}
static void odm_Process_RSSIForDM(
@@ -286,8 +273,6 @@ static void odm_Process_RSSIForDM(
pDM_Odm->RSSI_A = pPhyInfo->rx_mimo_signal_strength[ODM_RF_PATH_A];
pDM_Odm->RSSI_B = 0;
} else {
- /* DbgPrint("pRfd->Status.rx_mimo_signal_strength[0] = %d, pRfd->Status.rx_mimo_signal_strength[1] = %d\n", */
- /* pRfd->Status.rx_mimo_signal_strength[0], pRfd->Status.rx_mimo_signal_strength[1]); */
pDM_Odm->RSSI_A = pPhyInfo->rx_mimo_signal_strength[ODM_RF_PATH_A];
pDM_Odm->RSSI_B = pPhyInfo->rx_mimo_signal_strength[ODM_RF_PATH_B];
@@ -377,11 +362,6 @@ static void odm_Process_RSSIForDM(
pEntry->rssi_stat.UndecoratedSmoothedCCK = UndecoratedSmoothedCCK;
pEntry->rssi_stat.UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM;
pEntry->rssi_stat.UndecoratedSmoothedPWDB = UndecoratedSmoothedPWDB;
-
- /* DbgPrint("OFDM_pkt =%d, Weighting =%d\n", OFDM_pkt, Weighting); */
- /* DbgPrint("UndecoratedSmoothedOFDM =%d, UndecoratedSmoothedPWDB =%d, UndecoratedSmoothedCCK =%d\n", */
- /* UndecoratedSmoothedOFDM, UndecoratedSmoothedPWDB, UndecoratedSmoothedCCK); */
-
}
}
@@ -427,12 +407,6 @@ enum hal_status ODM_ConfigRFWithHeaderFile(
enum odm_rf_radio_path_e eRFPath
)
{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- ("===>ODM_ConfigRFWithHeaderFile (%s)\n", (pDM_Odm->bIsMPChip) ? "MPChip" : "TestChip"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- ("pDM_Odm->SupportPlatform: 0x%X, pDM_Odm->SupportInterface: 0x%X, pDM_Odm->BoardType: 0x%X\n",
- pDM_Odm->SupportPlatform, pDM_Odm->SupportInterface, pDM_Odm->BoardType));
-
if (ConfigType == CONFIG_RF_RADIO)
READ_AND_CONFIG(8723B, _RadioA);
else if (ConfigType == CONFIG_RF_TXPWR_LMT)
@@ -443,12 +417,6 @@ enum hal_status ODM_ConfigRFWithHeaderFile(
enum hal_status ODM_ConfigRFWithTxPwrTrackHeaderFile(struct dm_odm_t *pDM_Odm)
{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- ("===>ODM_ConfigRFWithTxPwrTrackHeaderFile (%s)\n", (pDM_Odm->bIsMPChip) ? "MPChip" : "TestChip"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- ("pDM_Odm->SupportPlatform: 0x%X, pDM_Odm->SupportInterface: 0x%X, pDM_Odm->BoardType: 0x%X\n",
- pDM_Odm->SupportPlatform, pDM_Odm->SupportInterface, pDM_Odm->BoardType));
-
if (pDM_Odm->SupportInterface == ODM_ITRF_SDIO)
READ_AND_CONFIG(8723B, _TxPowerTrack_SDIO);
@@ -459,12 +427,6 @@ enum hal_status ODM_ConfigBBWithHeaderFile(
struct dm_odm_t *pDM_Odm, enum ODM_BB_Config_Type ConfigType
)
{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- ("===>ODM_ConfigBBWithHeaderFile (%s)\n", (pDM_Odm->bIsMPChip) ? "MPChip" : "TestChip"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- ("pDM_Odm->SupportPlatform: 0x%X, pDM_Odm->SupportInterface: 0x%X, pDM_Odm->BoardType: 0x%X\n",
- pDM_Odm->SupportPlatform, pDM_Odm->SupportInterface, pDM_Odm->BoardType));
-
if (ConfigType == CONFIG_BB_PHY_REG)
READ_AND_CONFIG(8723B, _PHY_REG);
else if (ConfigType == CONFIG_BB_AGC_TAB)
diff --git a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c
index c3de123e2a48..ad169704f3e9 100644
--- a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c
+++ b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c
@@ -29,9 +29,8 @@ static s16 odm_InbandNoise_Monitor_NSeries(
u8 max_rf_path = 0, rf_path;
u8 reg_c50, reg_c58, valid_done = 0;
struct noise_level noise_data;
- u32 start = 0, func_start = 0, func_end = 0;
+ u32 start = 0;
- func_start = jiffies;
pDM_Odm->noise_level.noise_all = 0;
if ((pDM_Odm->RFType == ODM_1T2R) || (pDM_Odm->RFType == ODM_2T2R))
@@ -39,8 +38,6 @@ static s16 odm_InbandNoise_Monitor_NSeries(
else
max_rf_path = 1;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_DebugControlInbandNoise_Nseries() ==>\n"));
-
memset(&noise_data, 0, sizeof(struct noise_level));
/* */
@@ -65,7 +62,6 @@ static s16 odm_InbandNoise_Monitor_NSeries(
/* Read Noise Floor Report */
tmp4b = PHY_QueryBBReg(pDM_Odm->Adapter, 0x8f8, bMaskDWord);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("Noise Floor Report (0x8f8) = 0x%08x\n", tmp4b));
/* PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XAAGCCore1, bMaskByte0, TestInitialGain); */
/* if (max_rf_path == 2) */
@@ -77,17 +73,10 @@ static s16 odm_InbandNoise_Monitor_NSeries(
noise_data.value[ODM_RF_PATH_A] = (u8)(tmp4b&0xff);
noise_data.value[ODM_RF_PATH_B] = (u8)((tmp4b&0xff00)>>8);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("value_a = 0x%x(%d), value_b = 0x%x(%d)\n",
- noise_data.value[ODM_RF_PATH_A], noise_data.value[ODM_RF_PATH_A], noise_data.value[ODM_RF_PATH_B], noise_data.value[ODM_RF_PATH_B]));
-
for (rf_path = ODM_RF_PATH_A; rf_path < max_rf_path; rf_path++) {
noise_data.sval[rf_path] = (s8)noise_data.value[rf_path];
noise_data.sval[rf_path] /= 2;
}
-
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("sval_a = %d, sval_b = %d\n",
- noise_data.sval[ODM_RF_PATH_A], noise_data.sval[ODM_RF_PATH_B]));
/* mdelay(10); */
/* msleep(10); */
@@ -95,11 +84,8 @@ static s16 odm_InbandNoise_Monitor_NSeries(
if ((noise_data.valid_cnt[rf_path] < ValidCnt) && (noise_data.sval[rf_path] < Valid_Max && noise_data.sval[rf_path] >= Valid_Min)) {
noise_data.valid_cnt[rf_path]++;
noise_data.sum[rf_path] += noise_data.sval[rf_path];
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RF_Path:%d Valid sval = %d\n", rf_path, noise_data.sval[rf_path]));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("Sum of sval = %d,\n", noise_data.sum[rf_path]));
if (noise_data.valid_cnt[rf_path] == ValidCnt) {
valid_done++;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("After divided, RF_Path:%d , sum = %d\n", rf_path, noise_data.sum[rf_path]));
}
}
@@ -120,43 +106,23 @@ static s16 odm_InbandNoise_Monitor_NSeries(
}
reg_c50 = (s32)PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XAAGCCore1, bMaskByte0);
reg_c50 &= ~BIT7;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("0x%x = 0x%02x(%d)\n", rOFDM0_XAAGCCore1, reg_c50, reg_c50));
pDM_Odm->noise_level.noise[ODM_RF_PATH_A] = -110 + reg_c50 + noise_data.sum[ODM_RF_PATH_A];
pDM_Odm->noise_level.noise_all += pDM_Odm->noise_level.noise[ODM_RF_PATH_A];
if (max_rf_path == 2) {
reg_c58 = (s32)PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XBAGCCore1, bMaskByte0);
reg_c58 &= ~BIT7;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("0x%x = 0x%02x(%d)\n", rOFDM0_XBAGCCore1, reg_c58, reg_c58));
pDM_Odm->noise_level.noise[ODM_RF_PATH_B] = -110 + reg_c58 + noise_data.sum[ODM_RF_PATH_B];
pDM_Odm->noise_level.noise_all += pDM_Odm->noise_level.noise[ODM_RF_PATH_B];
}
pDM_Odm->noise_level.noise_all /= max_rf_path;
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_COMMON,
- ODM_DBG_LOUD,
- (
- "noise_a = %d, noise_b = %d\n",
- pDM_Odm->noise_level.noise[ODM_RF_PATH_A],
- pDM_Odm->noise_level.noise[ODM_RF_PATH_B]
- )
- );
-
/* */
/* Step 4. Recover the Dig */
/* */
if (bPauseDIG)
odm_PauseDIG(pDM_Odm, ODM_RESUME_DIG, IGIValue);
- func_end = jiffies_to_msecs(jiffies - func_start);
- /* printk("%s noise_a = %d, noise_b = %d noise_all:%d (%d ms)\n", __func__, */
- /* pDM_Odm->noise_level.noise[ODM_RF_PATH_A], */
- /* pDM_Odm->noise_level.noise[ODM_RF_PATH_B], */
- /* pDM_Odm->noise_level.noise_all, func_end); */
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_DebugControlInbandNoise_Nseries() <==\n"));
return pDM_Odm->noise_level.noise_all;
}
diff --git a/drivers/staging/rtl8723bs/hal/odm_PathDiv.c b/drivers/staging/rtl8723bs/hal/odm_PathDiv.c
deleted file mode 100644
index 92b708265d47..000000000000
--- a/drivers/staging/rtl8723bs/hal/odm_PathDiv.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include "odm_precomp.h"
-
-void odm_PathDiversityInit(void *pDM_VOID)
-{
- struct dm_odm_t *pDM_Odm = (struct dm_odm_t *)pDM_VOID;
-
- if (!(pDM_Odm->SupportAbility & ODM_BB_PATH_DIV))
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_PATH_DIV,
- ODM_DBG_LOUD,
- ("Return: Not Support PathDiv\n")
- );
-}
-
-void odm_PathDiversity(void *pDM_VOID)
-{
- struct dm_odm_t *pDM_Odm = (struct dm_odm_t *)pDM_VOID;
-
- if (!(pDM_Odm->SupportAbility & ODM_BB_PATH_DIV))
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_PATH_DIV,
- ODM_DBG_LOUD,
- ("Return: Not Support PathDiv\n")
- );
-}
diff --git a/drivers/staging/rtl8723bs/hal/odm_PathDiv.h b/drivers/staging/rtl8723bs/hal/odm_PathDiv.h
deleted file mode 100644
index 7a5bc00c3682..000000000000
--- a/drivers/staging/rtl8723bs/hal/odm_PathDiv.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#ifndef __ODMPATHDIV_H__
-#define __ODMPATHDIV_H__
-
-void
-odm_PathDiversityInit(
- void *pDM_VOID
- );
-
-void
-odm_PathDiversity(
- void *pDM_VOID
- );
-
- #endif /* ifndef __ODMPATHDIV_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/odm_RTL8723B.c b/drivers/staging/rtl8723bs/hal/odm_RTL8723B.c
index ecf0045fcad9..54518ea1be6b 100644
--- a/drivers/staging/rtl8723bs/hal/odm_RTL8723B.c
+++ b/drivers/staging/rtl8723bs/hal/odm_RTL8723B.c
@@ -29,7 +29,6 @@ s8 odm_CCKRSSI_8723B(u8 LNA_idx, u8 VGA_idx)
break;
default:
/* rx_pwr_all = -53+(2*(31-VGA_idx)); */
- /* DbgPrint("wrong LNA index\n"); */
break;
}
diff --git a/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c b/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c
index 63bf5ba3e0d5..a29bd9375023 100644
--- a/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c
+++ b/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c
@@ -38,17 +38,6 @@ void odm_ConfigRFReg_8723B(
PHY_SetRFReg(pDM_Odm->Adapter, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
udelay(1);
getvalue = PHY_QueryRFReg(pDM_Odm->Adapter, RF_PATH, Addr, bMaskDWord);
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- "===> ODM_ConfigRFWithHeaderFile: [B6] getvalue 0x%x, Data 0x%x, count %d\n",
- getvalue,
- Data,
- count
- )
- );
if (count > 5)
break;
}
@@ -86,17 +75,6 @@ void odm_ConfigRFReg_8723B(
getvalue = PHY_QueryRFReg(
pDM_Odm->Adapter, RF_PATH, Addr, bMaskDWord
);
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- "===> ODM_ConfigRFWithHeaderFile: [B2] getvalue 0x%x, Data 0x%x, count %d\n",
- getvalue,
- Data,
- count
- )
- );
if (count > 5)
break;
@@ -118,32 +96,11 @@ void odm_ConfigRF_RadioA_8723B(struct dm_odm_t *pDM_Odm, u32 Addr, u32 Data)
ODM_RF_PATH_A,
Addr|maskforPhySet
);
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- "===> ODM_ConfigRFWithHeaderFile: [RadioA] %08X %08X\n",
- Addr,
- Data
- )
- );
}
void odm_ConfigMAC_8723B(struct dm_odm_t *pDM_Odm, u32 Addr, u8 Data)
{
rtw_write8(pDM_Odm->Adapter, Addr, Data);
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- "===> ODM_ConfigMACWithHeaderFile: [MAC_REG] %08X %08X\n",
- Addr,
- Data
- )
- );
}
void odm_ConfigBB_AGC_8723B(
@@ -156,22 +113,10 @@ void odm_ConfigBB_AGC_8723B(
PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
/* Add 1us delay between BB/RF register setting. */
udelay(1);
-
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_TRACE,
- (
- "===> ODM_ConfigBBWithHeaderFile: [AGC_TAB] %08X %08X\n",
- Addr,
- Data
- )
- );
}
void odm_ConfigBB_PHY_REG_PG_8723B(
struct dm_odm_t *pDM_Odm,
- u32 Band,
u32 RfPath,
u32 TxNum,
u32 Addr,
@@ -182,19 +127,8 @@ void odm_ConfigBB_PHY_REG_PG_8723B(
if (Addr == 0xfe || Addr == 0xffe)
msleep(50);
else {
- PHY_StoreTxPowerByRate(pDM_Odm->Adapter, Band, RfPath, TxNum, Addr, Bitmask, Data);
+ PHY_StoreTxPowerByRate(pDM_Odm->Adapter, RfPath, TxNum, Addr, Bitmask, Data);
}
- ODM_RT_TRACE(
- pDM_Odm,
- ODM_COMP_INIT,
- ODM_DBG_LOUD,
- (
- "===> ODM_ConfigBBWithHeaderFile: [PHY_REG] %08X %08X %08X\n",
- Addr,
- Bitmask,
- Data
- )
- );
}
void odm_ConfigBB_PHY_8723B(
@@ -222,13 +156,11 @@ void odm_ConfigBB_PHY_8723B(
/* Add 1us delay between BB/RF register setting. */
udelay(1);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigBBWithHeaderFile: [PHY_REG] %08X %08X\n", Addr, Data));
}
void odm_ConfigBB_TXPWR_LMT_8723B(
struct dm_odm_t *pDM_Odm,
u8 *Regulation,
- u8 *Band,
u8 *Bandwidth,
u8 *RateSection,
u8 *RfPath,
@@ -239,7 +171,6 @@ void odm_ConfigBB_TXPWR_LMT_8723B(
PHY_SetTxPowerLimit(
pDM_Odm->Adapter,
Regulation,
- Band,
Bandwidth,
RateSection,
RfPath,
diff --git a/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h b/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h
index b392d14c389d..bdd6fde49fc6 100644
--- a/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h
+++ b/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h
@@ -25,7 +25,6 @@ void odm_ConfigBB_AGC_8723B(struct dm_odm_t *pDM_Odm,
);
void odm_ConfigBB_PHY_REG_PG_8723B(struct dm_odm_t *pDM_Odm,
- u32 Band,
u32 RfPath,
u32 TxNum,
u32 Addr,
@@ -41,7 +40,6 @@ void odm_ConfigBB_PHY_8723B(struct dm_odm_t *pDM_Odm,
void odm_ConfigBB_TXPWR_LMT_8723B(struct dm_odm_t *pDM_Odm,
u8 *Regulation,
- u8 *Band,
u8 *Bandwidth,
u8 *RateSection,
u8 *RfPath,
diff --git a/drivers/staging/rtl8723bs/hal/odm_debug.c b/drivers/staging/rtl8723bs/hal/odm_debug.c
deleted file mode 100644
index b35451bcb437..000000000000
--- a/drivers/staging/rtl8723bs/hal/odm_debug.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include "odm_precomp.h"
-
-void ODM_InitDebugSetting(struct dm_odm_t *pDM_Odm)
-{
- pDM_Odm->DebugLevel = ODM_DBG_LOUD;
-
- pDM_Odm->DebugComponents =
-/* BB Functions */
-/* ODM_COMP_DIG | */
-/* ODM_COMP_RA_MASK | */
-/* ODM_COMP_DYNAMIC_TXPWR | */
-/* ODM_COMP_FA_CNT | */
-/* ODM_COMP_RSSI_MONITOR | */
-/* ODM_COMP_CCK_PD | */
-/* ODM_COMP_ANT_DIV | */
-/* ODM_COMP_PWR_SAVE | */
-/* ODM_COMP_PWR_TRAIN | */
-/* ODM_COMP_RATE_ADAPTIVE | */
-/* ODM_COMP_PATH_DIV | */
-/* ODM_COMP_DYNAMIC_PRICCA | */
-/* ODM_COMP_RXHP | */
-/* ODM_COMP_MP | */
-/* ODM_COMP_CFO_TRACKING | */
-
-/* MAC Functions */
-/* ODM_COMP_EDCA_TURBO | */
-/* ODM_COMP_EARLY_MODE | */
-/* RF Functions */
-/* ODM_COMP_TX_PWR_TRACK | */
-/* ODM_COMP_RX_GAIN_TRACK | */
-/* ODM_COMP_CALIBRATION | */
-/* Common */
-/* ODM_COMP_COMMON | */
-/* ODM_COMP_INIT | */
-/* ODM_COMP_PSD | */
-0;
-}
diff --git a/drivers/staging/rtl8723bs/hal/odm_debug.h b/drivers/staging/rtl8723bs/hal/odm_debug.h
deleted file mode 100644
index be0d4c49a747..000000000000
--- a/drivers/staging/rtl8723bs/hal/odm_debug.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#ifndef __ODM_DBG_H__
-#define __ODM_DBG_H__
-
-
-/* */
-/* Define the debug levels */
-/* */
-/* 1. DBG_TRACE and DBG_LOUD are used for normal cases. */
-/* So that, they can help SW engineer to developed or trace states changed */
-/* and also help HW enginner to trace every operation to and from HW, */
-/* e.g IO, Tx, Rx. */
-/* */
-/* 2. DBG_WARNNING and DBG_SERIOUS are used for unusual or error cases, */
-/* which help us to debug SW or HW. */
-/* */
-/* */
-/* */
-/* Never used in a call to ODM_RT_TRACE()! */
-/* */
-#define ODM_DBG_OFF 1
-
-/* */
-/* Fatal bug. */
-/* For example, Tx/Rx/IO locked up, OS hangs, memory access violation, */
-/* resource allocation failed, unexpected HW behavior, HW BUG and so on. */
-/* */
-#define ODM_DBG_SERIOUS 2
-
-/* */
-/* Abnormal, rare, or unexpected cases. */
-/* For example, */
-/* IRP/Packet/OID canceled, */
-/* device suprisely unremoved and so on. */
-/* */
-#define ODM_DBG_WARNING 3
-
-/* */
-/* Normal case with useful information about current SW or HW state. */
-/* For example, Tx/Rx descriptor to fill, Tx/Rx descriptor completed status, */
-/* SW protocol state change, dynamic mechanism state change and so on. */
-/* */
-#define ODM_DBG_LOUD 4
-
-/* */
-/* Normal case with detail execution flow or information. */
-/* */
-#define ODM_DBG_TRACE 5
-
-/* */
-/* Define the tracing components */
-/* */
-/* */
-/* BB Functions */
-#define ODM_COMP_DIG BIT0
-#define ODM_COMP_RA_MASK BIT1
-#define ODM_COMP_DYNAMIC_TXPWR BIT2
-#define ODM_COMP_FA_CNT BIT3
-#define ODM_COMP_RSSI_MONITOR BIT4
-#define ODM_COMP_CCK_PD BIT5
-#define ODM_COMP_ANT_DIV BIT6
-#define ODM_COMP_PWR_SAVE BIT7
-#define ODM_COMP_PWR_TRAIN BIT8
-#define ODM_COMP_RATE_ADAPTIVE BIT9
-#define ODM_COMP_PATH_DIV BIT10
-#define ODM_COMP_PSD BIT11
-#define ODM_COMP_DYNAMIC_PRICCA BIT12
-#define ODM_COMP_RXHP BIT13
-#define ODM_COMP_MP BIT14
-#define ODM_COMP_CFO_TRACKING BIT15
-/* MAC Functions */
-#define ODM_COMP_EDCA_TURBO BIT16
-#define ODM_COMP_EARLY_MODE BIT17
-/* RF Functions */
-#define ODM_COMP_TX_PWR_TRACK BIT24
-#define ODM_COMP_RX_GAIN_TRACK BIT25
-#define ODM_COMP_CALIBRATION BIT26
-/* Common Functions */
-#define ODM_COMP_COMMON BIT30
-#define ODM_COMP_INIT BIT31
-
-/*------------------------Export Marco Definition---------------------------*/
- #define DbgPrint printk
- #define RT_PRINTK(fmt, args...)\
- DbgPrint("%s(): " fmt, __func__, ## args)
- #define RT_DISP(dbgtype, dbgflag, printstr)
-
-#ifndef ASSERT
- #define ASSERT(expr)
-#endif
-
-#if DBG
-#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt)\
- do {\
- if (\
- (comp & pDM_Odm->DebugComponents) &&\
- (level <= pDM_Odm->DebugLevel ||\
- level == ODM_DBG_SERIOUS)\
- ) {\
- RT_PRINTK fmt;\
- } \
- } while (0)
-
-#define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt)\
- do {\
- if (\
- (comp & pDM_Odm->DebugComponents) &&\
- (level <= pDM_Odm->DebugLevel)\
- ) {\
- RT_PRINTK fmt;\
- } \
- } while (0)
-
-#define ODM_RT_ASSERT(pDM_Odm, expr, fmt)\
- do {\
- if (!expr) {\
- DbgPrint("Assertion failed! %s at ......\n", #expr);\
- DbgPrint(\
- " ......%s,%s, line =%d\n",\
- __FILE__,\
- __func__,\
- __LINE__\
- );\
- RT_PRINTK fmt;\
- ASSERT(false);\
- } \
- } while (0)
-#define ODM_dbg_trace(str) { DbgPrint("%s:%s\n", __func__, str); }
-
-#define ODM_PRINT_ADDR(pDM_Odm, comp, level, title_str, ptr)\
- do {\
- if (\
- (comp & pDM_Odm->DebugComponents) &&\
- (level <= pDM_Odm->DebugLevel)\
- ) {\
- int __i;\
- u8 *__ptr = (u8 *)ptr;\
- DbgPrint("[ODM] ");\
- DbgPrint(title_str);\
- DbgPrint(" ");\
- for (__i = 0; __i < 6; __i++)\
- DbgPrint("%02X%s", __ptr[__i], (__i == 5) ? "" : "-");\
- DbgPrint("\n");\
- } \
- } while (0)
-#else
-#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) no_printk fmt
-#define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt) no_printk fmt
-#define ODM_RT_ASSERT(pDM_Odm, expr, fmt) no_printk fmt
-#define ODM_dbg_enter() do {} while (0)
-#define ODM_dbg_exit() do {} while (0)
-#define ODM_dbg_trace(str) no_printk("%s", str)
-#define ODM_PRINT_ADDR(pDM_Odm, comp, level, title_str, ptr) \
- no_printk("%s %p", title_str, ptr)
-#endif
-
-void ODM_InitDebugSetting(struct dm_odm_t *pDM_Odm);
-
-#endif /* __ODM_DBG_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/odm_precomp.h b/drivers/staging/rtl8723bs/hal/odm_precomp.h
index d48d681472d5..5041c9535e9a 100644
--- a/drivers/staging/rtl8723bs/hal/odm_precomp.h
+++ b/drivers/staging/rtl8723bs/hal/odm_precomp.h
@@ -27,11 +27,9 @@
#include "odm.h"
#include "odm_HWConfig.h"
-#include "odm_debug.h"
#include "odm_RegDefine11N.h"
#include "odm_EdcaTurboCheck.h"
#include "odm_DIG.h"
-#include "odm_PathDiv.h"
#include "odm_DynamicBBPowerSaving.h"
#include "odm_DynamicTxPower.h"
#include "odm_CfoTracking.h"
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
index 2451875ab3c0..2b7077bb52df 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
@@ -114,14 +114,13 @@ static void ConstructBeacon(struct adapter *padapter, u8 *pframe, u32 *pLength)
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
pwlanhdr = (struct ieee80211_hdr *)pframe;
fctrl = &(pwlanhdr->frame_control);
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ eth_broadcast_addr(pwlanhdr->addr1);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
@@ -309,9 +308,6 @@ static void rtl8723b_set_FwRsvdPage_cmd(struct adapter *padapter, struct rsvdpag
SET_8723B_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1H2CRsvdPageParm, rsvdpageloc->LocQosNull);
SET_8723B_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1H2CRsvdPageParm, rsvdpageloc->LocBTQosNull);
- print_hex_dump_debug(DRIVER_PREFIX ": u1H2CRsvdPageParm:", DUMP_PREFIX_NONE,
- 16, 1, u1H2CRsvdPageParm, H2C_RSVDPAGE_LOC_LEN, false);
-
FillH2CCmd8723B(padapter, H2C_8723B_RSVD_PAGE, H2C_RSVDPAGE_LOC_LEN, u1H2CRsvdPageParm);
}
@@ -329,9 +325,6 @@ void rtl8723b_set_FwMediaStatusRpt_cmd(struct adapter *padapter, u8 mstatus, u8
SET_8723B_H2CCMD_MSRRPT_PARM_MACID(u1H2CMediaStatusRptParm, macid);
SET_8723B_H2CCMD_MSRRPT_PARM_MACID_END(u1H2CMediaStatusRptParm, macid_end);
- print_hex_dump_debug(DRIVER_PREFIX ": u1H2CMediaStatusRptParm:", DUMP_PREFIX_NONE,
- 16, 1, u1H2CMediaStatusRptParm, H2C_MEDIA_STATUS_RPT_LEN, false);
-
FillH2CCmd8723B(padapter, H2C_8723B_MEDIA_STATUS_RPT, H2C_MEDIA_STATUS_RPT_LEN, u1H2CMediaStatusRptParm);
}
@@ -348,9 +341,6 @@ void rtl8723b_set_FwMacIdConfig_cmd(struct adapter *padapter, u8 mac_id, u8 raid
SET_8723B_H2CCMD_MACID_CFG_RATE_MASK2(u1H2CMacIdConfigParm, (u8)((mask & 0x00ff0000) >> 16));
SET_8723B_H2CCMD_MACID_CFG_RATE_MASK3(u1H2CMacIdConfigParm, (u8)((mask & 0xff000000) >> 24));
- print_hex_dump_debug(DRIVER_PREFIX ": u1H2CMacIdConfigParm:", DUMP_PREFIX_NONE,
- 16, 1, u1H2CMacIdConfigParm, H2C_MACID_CFG_LEN, false);
-
FillH2CCmd8723B(padapter, H2C_8723B_MACID_CFG, H2C_MACID_CFG_LEN, u1H2CMacIdConfigParm);
}
@@ -365,9 +355,6 @@ void rtl8723b_set_rssi_cmd(struct adapter *padapter, u8 *param)
SET_8723B_H2CCMD_RSSI_SETTING_RSSI(u1H2CRssiSettingParm, rssi);
SET_8723B_H2CCMD_RSSI_SETTING_ULDL_STATE(u1H2CRssiSettingParm, uldl_state);
- print_hex_dump_debug(DRIVER_PREFIX ": u1H2CRssiSettingParm:", DUMP_PREFIX_NONE,
- 16, 1, u1H2CRssiSettingParm, H2C_RSSI_SETTING_LEN, false);
-
FillH2CCmd8723B(padapter, H2C_8723B_RSSI_SETTING, H2C_RSSI_SETTING_LEN, u1H2CRssiSettingParm);
}
@@ -465,9 +452,6 @@ void rtl8723b_set_FwPwrMode_cmd(struct adapter *padapter, u8 psmode)
hal_btcoex_RecordPwrMode(padapter, u1H2CPwrModeParm, H2C_PWRMODE_LEN);
- print_hex_dump_debug(DRIVER_PREFIX ": u1H2CPwrModeParm:", DUMP_PREFIX_NONE,
- 16, 1, u1H2CPwrModeParm, H2C_PWRMODE_LEN, false);
-
FillH2CCmd8723B(padapter, H2C_8723B_SET_PWR_MODE, H2C_PWRMODE_LEN, u1H2CPwrModeParm);
}
@@ -485,9 +469,6 @@ void rtl8723b_set_FwPsTuneParam_cmd(struct adapter *padapter)
SET_8723B_H2CCMD_PSTUNE_PARM_ADOPT(u1H2CPsTuneParm, 1);
SET_8723B_H2CCMD_PSTUNE_PARM_DTIM_PERIOD(u1H2CPsTuneParm, dtim_period);
- print_hex_dump_debug(DRIVER_PREFIX ": u1H2CPsTuneParm:", DUMP_PREFIX_NONE,
- 16, 1, u1H2CPsTuneParm, H2C_PSTUNEPARAM_LEN, false);
-
FillH2CCmd8723B(padapter, H2C_8723B_PS_TUNING_PARA, H2C_PSTUNEPARAM_LEN, u1H2CPsTuneParm);
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
index 23be025ceb5b..5840a5241fde 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
@@ -109,7 +109,6 @@ static void Update_ODM_ComInfo_8723b(struct adapter *Adapter)
ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_CHNL, &(pHalData->CurrentChannel));
ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_NET_CLOSED, &(Adapter->net_closed));
ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_MP_MODE, &zero);
- ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_BAND, &(pHalData->CurrentBandType));
ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_FORCED_IGI_LB, &(pHalData->u1ForcedIgiLb));
ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_FORCED_RATE, &(pHalData->ForcedDataRate));
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index 082448557b53..059d3050acc6 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -122,7 +122,6 @@ static int _WriteFW(struct adapter *padapter, void *buffer, u32 size)
u8 *bufferPtr = buffer;
pageNums = size / MAX_DLFW_PAGE_SIZE;
- /* RT_ASSERT((pageNums <= 4), ("Page numbers should not greater then 4\n")); */
remainSize = size % MAX_DLFW_PAGE_SIZE;
for (page = 0; page < pageNums; page++) {
@@ -1149,10 +1148,8 @@ static u16 hal_EfuseGetCurrentSize_BT(struct adapter *padapter, u8 bPseudoTest)
retU2 = ((bank-1)*EFUSE_BT_REAL_BANK_CONTENT_LEN)+efuse_addr;
if (bPseudoTest) {
pEfuseHal->fakeBTEfuseUsedBytes = retU2;
- /* RT_DISP(FEEPROM, EFUSE_PG, ("Hal_EfuseGetCurrentSize_BT92C(), already use %u bytes\n", pEfuseHal->fakeBTEfuseUsedBytes)); */
} else {
pEfuseHal->BTEfuseUsedBytes = retU2;
- /* RT_DISP(FEEPROM, EFUSE_PG, ("Hal_EfuseGetCurrentSize_BT92C(), already use %u bytes\n", pEfuseHal->BTEfuseUsedBytes)); */
}
return retU2;
@@ -2512,15 +2509,8 @@ u8 BWMapping_8723B(struct adapter *Adapter, struct pkt_attrib *pattrib)
u8 BWSettingOfDesc = 0;
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_80) {
- if (pattrib->bwmode == CHANNEL_WIDTH_80)
- BWSettingOfDesc = 2;
- else if (pattrib->bwmode == CHANNEL_WIDTH_40)
- BWSettingOfDesc = 1;
- else
- BWSettingOfDesc = 0;
- } else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40) {
- if ((pattrib->bwmode == CHANNEL_WIDTH_40) || (pattrib->bwmode == CHANNEL_WIDTH_80))
+ if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40) {
+ if (pattrib->bwmode == CHANNEL_WIDTH_40)
BWSettingOfDesc = 1;
else
BWSettingOfDesc = 0;
@@ -2538,38 +2528,20 @@ u8 SCMapping_8723B(struct adapter *Adapter, struct pkt_attrib *pattrib)
u8 SCSettingOfDesc = 0;
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_80) {
- if (pattrib->bwmode == CHANNEL_WIDTH_80) {
- SCSettingOfDesc = VHT_DATA_SC_DONOT_CARE;
- } else if (pattrib->bwmode == CHANNEL_WIDTH_40) {
- if (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER)
- SCSettingOfDesc = VHT_DATA_SC_40_LOWER_OF_80MHZ;
- else if (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER)
- SCSettingOfDesc = VHT_DATA_SC_40_UPPER_OF_80MHZ;
- } else {
- if ((pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) && (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER))
- SCSettingOfDesc = VHT_DATA_SC_20_LOWEST_OF_80MHZ;
- else if ((pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER) && (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER))
- SCSettingOfDesc = VHT_DATA_SC_20_LOWER_OF_80MHZ;
- else if ((pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) && (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER))
- SCSettingOfDesc = VHT_DATA_SC_20_UPPER_OF_80MHZ;
- else if ((pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER) && (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER))
- SCSettingOfDesc = VHT_DATA_SC_20_UPPERST_OF_80MHZ;
- }
- } else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40) {
+ if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40) {
if (pattrib->bwmode == CHANNEL_WIDTH_40) {
- SCSettingOfDesc = VHT_DATA_SC_DONOT_CARE;
+ SCSettingOfDesc = HT_DATA_SC_DONOT_CARE;
} else if (pattrib->bwmode == CHANNEL_WIDTH_20) {
if (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER) {
- SCSettingOfDesc = VHT_DATA_SC_20_UPPER_OF_80MHZ;
+ SCSettingOfDesc = HT_DATA_SC_20_UPPER_OF_40MHZ;
} else if (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) {
- SCSettingOfDesc = VHT_DATA_SC_20_LOWER_OF_80MHZ;
+ SCSettingOfDesc = HT_DATA_SC_20_LOWER_OF_40MHZ;
} else {
- SCSettingOfDesc = VHT_DATA_SC_DONOT_CARE;
+ SCSettingOfDesc = HT_DATA_SC_DONOT_CARE;
}
}
} else {
- SCSettingOfDesc = VHT_DATA_SC_DONOT_CARE;
+ SCSettingOfDesc = HT_DATA_SC_DONOT_CARE;
}
return SCSettingOfDesc;
@@ -3315,9 +3287,6 @@ void C2HPacketHandler_8723B(struct adapter *padapter, u8 *pbuffer, u16 length)
C2hEvent.CmdLen = length-2;
tmpBuf = pbuffer+2;
- print_hex_dump_debug(DRIVER_PREFIX ": C2HPacketHandler_8723B(): Command Content:\n",
- DUMP_PREFIX_NONE, 16, 1, tmpBuf, C2hEvent.CmdLen, false);
-
process_c2h_event(padapter, &C2hEvent, tmpBuf);
/* c2h_handler_8723b(padapter,&C2hEvent); */
}
@@ -3898,27 +3867,19 @@ u8 GetHalDefVar8723B(struct adapter *padapter, enum hal_def_variable variable, v
{
u8 mac_id = *(u8 *)pval;
u32 cmd;
- u32 ra_info1, ra_info2;
- u32 rate_mask1, rate_mask2;
- u8 curr_tx_rate, curr_tx_sgi, hight_rate, lowest_rate;
cmd = 0x40000100 | mac_id;
rtw_write32(padapter, REG_HMEBOX_DBG_2_8723B, cmd);
msleep(10);
- ra_info1 = rtw_read32(padapter, 0x2F0);
- curr_tx_rate = ra_info1&0x7F;
- curr_tx_sgi = (ra_info1>>7)&0x01;
+ rtw_read32(padapter, 0x2F0); // info 1
cmd = 0x40000400 | mac_id;
rtw_write32(padapter, REG_HMEBOX_DBG_2_8723B, cmd);
msleep(10);
- ra_info1 = rtw_read32(padapter, 0x2F0);
- ra_info2 = rtw_read32(padapter, 0x2F4);
- rate_mask1 = rtw_read32(padapter, 0x2F8);
- rate_mask2 = rtw_read32(padapter, 0x2FC);
- hight_rate = ra_info2&0xFF;
- lowest_rate = (ra_info2>>8) & 0xFF;
-
+ rtw_read32(padapter, 0x2F0); // info 1
+ rtw_read32(padapter, 0x2F4); // info 2
+ rtw_read32(padapter, 0x2F8); // rate mask 1
+ rtw_read32(padapter, 0x2FC); // rate mask 2
}
break;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index f43abf9b0d22..6e524034f388 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -38,7 +38,7 @@ static u32 phy_CalculateBitShift(u32 BitMask)
/**
- * PHY_QueryBBReg - Read "specific bits" from BB register.
+ * PHY_QueryBBReg_8723B - Read "specific bits" from BB register.
* @Adapter:
* @RegAddr: The target address to be readback
* @BitMask: The target bit position in the target address
@@ -53,10 +53,6 @@ u32 PHY_QueryBBReg_8723B(struct adapter *Adapter, u32 RegAddr, u32 BitMask)
{
u32 OriginalValue, BitShift;
-#if (DISABLE_BB_RF == 1)
- return 0;
-#endif
-
OriginalValue = rtw_read32(Adapter, RegAddr);
BitShift = phy_CalculateBitShift(BitMask);
@@ -66,7 +62,7 @@ u32 PHY_QueryBBReg_8723B(struct adapter *Adapter, u32 RegAddr, u32 BitMask)
/**
- * PHY_SetBBReg - Write "Specific bits" to BB register (page 8~).
+ * PHY_SetBBReg_8723B - Write "Specific bits" to BB register (page 8~).
* @Adapter:
* @RegAddr: The target address to be modified
* @BitMask: The target bit position in the target address
@@ -88,10 +84,6 @@ void PHY_SetBBReg_8723B(
/* u16 BBWaitCounter = 0; */
u32 OriginalValue, BitShift;
-#if (DISABLE_BB_RF == 1)
- return;
-#endif
-
if (BitMask != bMaskDWord) { /* if not "double word" write */
OriginalValue = rtw_read32(Adapter, RegAddr);
BitShift = phy_CalculateBitShift(BitMask);
@@ -231,10 +223,10 @@ static void phy_RFSerialWrite_8723B(
/**
- * PHY_QueryRFReg - Query "Specific bits" to RF register (page 8~).
+ * PHY_QueryRFReg_8723B - Query "Specific bits" to RF register (page 8~).
* @Adapter:
* @eRFPath: Radio path of A/B/C/D
- * @RegAdd: The target address to be read
+ * @RegAddr: The target address to be read
* @BitMask: The target bit position in the target address
* to be read
*
@@ -252,10 +244,6 @@ u32 PHY_QueryRFReg_8723B(
{
u32 Original_Value, BitShift;
-#if (DISABLE_BB_RF == 1)
- return 0;
-#endif
-
Original_Value = phy_RFSerialRead_8723B(Adapter, eRFPath, RegAddr);
BitShift = phy_CalculateBitShift(BitMask);
@@ -263,7 +251,7 @@ u32 PHY_QueryRFReg_8723B(
}
/**
- * PHY_SetRFReg - Write "Specific bits" to RF register (page 8~).
+ * PHY_SetRFReg_8723B - Write "Specific bits" to RF register (page 8~).
* @Adapter:
* @eRFPath: Radio path of A/B/C/D
* @RegAddr: The target address to be modified
@@ -285,10 +273,6 @@ void PHY_SetRFReg_8723B(
{
u32 Original_Value, BitShift;
-#if (DISABLE_BB_RF == 1)
- return;
-#endif
-
/* RF data is 12 bits only */
if (BitMask != bRFRegOffsetMask) {
Original_Value = phy_RFSerialRead_8723B(Adapter, eRFPath, RegAddr);
@@ -562,15 +546,13 @@ u8 PHY_GetTxPowerIndex(
{
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
s8 txPower = 0, powerDiffByRate = 0, limit = 0;
- bool bIn24G = false;
- txPower = (s8) PHY_GetTxPowerIndexBase(padapter, RFPath, Rate, BandWidth, Channel, &bIn24G);
- powerDiffByRate = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, ODM_RF_PATH_A, RF_1TX, Rate);
+ txPower = (s8) PHY_GetTxPowerIndexBase(padapter, RFPath, Rate, BandWidth, Channel);
+ powerDiffByRate = PHY_GetTxPowerByRate(padapter, ODM_RF_PATH_A, RF_1TX, Rate);
limit = phy_get_tx_pwr_lmt(
padapter,
padapter->registrypriv.RegPwrTblSel,
- (u8)(!bIn24G),
pHalData->CurrentChannelBW,
RFPath,
Rate,
@@ -625,11 +607,6 @@ static void phy_SetRegBW_8723B(
rtw_write16(Adapter, REG_TRXPTCL_CTL_8723B, (u2tmp & 0xFEFF)); /* BIT 7 = 1, BIT 8 = 0 */
break;
- case CHANNEL_WIDTH_80:
- u2tmp = RegRfMod_BW | BIT8;
- rtw_write16(Adapter, REG_TRXPTCL_CTL_8723B, (u2tmp & 0xFF7F)); /* BIT 7 = 0, BIT 8 = 1 */
- break;
-
default:
break;
}
@@ -640,37 +617,11 @@ static u8 phy_GetSecondaryChnl_8723B(struct adapter *Adapter)
u8 SCSettingOf40 = 0, SCSettingOf20 = 0;
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_80) {
- if (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER)
- SCSettingOf40 = VHT_DATA_SC_40_LOWER_OF_80MHZ;
- else if (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER)
- SCSettingOf40 = VHT_DATA_SC_40_UPPER_OF_80MHZ;
-
- if (
- (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) &&
- (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER)
- )
- SCSettingOf20 = VHT_DATA_SC_20_LOWEST_OF_80MHZ;
- else if (
- (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER) &&
- (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER)
- )
- SCSettingOf20 = VHT_DATA_SC_20_LOWER_OF_80MHZ;
- else if (
- (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) &&
- (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER)
- )
- SCSettingOf20 = VHT_DATA_SC_20_UPPER_OF_80MHZ;
- else if (
- (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER) &&
- (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER)
- )
- SCSettingOf20 = VHT_DATA_SC_20_UPPERST_OF_80MHZ;
- } else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40) {
+ if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40) {
if (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER)
- SCSettingOf20 = VHT_DATA_SC_20_UPPER_OF_80MHZ;
+ SCSettingOf20 = HT_DATA_SC_20_UPPER_OF_40MHZ;
else if (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER)
- SCSettingOf20 = VHT_DATA_SC_20_LOWER_OF_80MHZ;
+ SCSettingOf20 = HT_DATA_SC_20_LOWER_OF_40MHZ;
}
return (SCSettingOf40 << 4) | SCSettingOf20;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 7c2680b6508c..ad803ffc0696 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -108,7 +108,7 @@ static void update_recvframe_phyinfo(union recv_frame *precvframe,
!pattrib->icv_err && !pattrib->crc_err &&
ether_addr_equal(rx_bssid, my_bssid));
- rx_ra = get_ra(wlanhdr);
+ rx_ra = rtl8723bs_get_ra(wlanhdr);
my_hwaddr = myid(&padapter->eeprompriv);
pkt_info.to_self = pkt_info.bssid_match &&
ether_addr_equal(rx_ra, my_hwaddr);
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index bd95e62fb053..a05d43b716ee 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -178,7 +178,7 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv
struct hw_xmit *hwxmits, *phwxmit;
u8 idx, hwentry;
struct tx_servq *ptxservq;
- struct list_head *sta_plist, *sta_phead, *frame_plist, *frame_phead;
+ struct list_head *sta_plist, *sta_phead, *frame_plist, *frame_phead, *tmp;
struct xmit_frame *pxmitframe;
struct __queue *pframe_queue;
struct xmit_buf *pxmitbuf;
@@ -223,12 +223,11 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv
spin_lock_bh(&pxmitpriv->lock);
sta_phead = get_list_head(phwxmit->sta_queue);
- sta_plist = get_next(sta_phead);
/* because stop_sta_xmit may delete sta_plist at any time */
/* so we should add lock here, or while loop can not exit */
- while (sta_phead != sta_plist) {
- ptxservq = container_of(sta_plist, struct tx_servq, tx_pending);
- sta_plist = get_next(sta_plist);
+ list_for_each_safe(sta_plist, tmp, sta_phead) {
+ ptxservq = list_entry(sta_plist, struct tx_servq,
+ tx_pending);
pframe_queue = &ptxservq->sta_pending;
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
index abd90026a8c7..a07a6dacec42 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _SDIO_HALINIT_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
#include <rtl8723b_hal.h>
@@ -228,7 +226,6 @@ static void _InitNormalChipOneOutEpPriority(struct adapter *Adapter)
value = QUEUE_NORMAL;
break;
default:
- /* RT_ASSERT(false, ("Shall not reach here!\n")); */
break;
}
@@ -262,7 +259,6 @@ static void _InitNormalChipTwoOutEpPriority(struct adapter *Adapter)
valueLow = QUEUE_NORMAL;
break;
default:
- /* RT_ASSERT(false, ("Shall not reach here!\n")); */
break;
}
@@ -327,7 +323,6 @@ static void _InitQueuePriority(struct adapter *Adapter)
_InitNormalChipThreeOutEpPriority(Adapter);
break;
default:
- /* RT_ASSERT(false, ("Shall not reach here!\n")); */
break;
}
@@ -517,9 +512,6 @@ static void _InitOperationMode(struct adapter *padapter)
case WIRELESS_MODE_B:
regBwOpMode = BW_OPMODE_20MHZ;
break;
- case WIRELESS_MODE_A:
-/* RT_ASSERT(false, ("Error wireless a mode\n")); */
- break;
case WIRELESS_MODE_G:
regBwOpMode = BW_OPMODE_20MHZ;
break;
@@ -531,10 +523,6 @@ static void _InitOperationMode(struct adapter *padapter)
/* CCK rate will be filtered out only when associated AP does not support it. */
regBwOpMode = BW_OPMODE_20MHZ;
break;
- case WIRELESS_MODE_N_5G:
-/* RT_ASSERT(false, ("Error wireless mode")); */
- regBwOpMode = BW_OPMODE_5G;
- break;
default: /* for MacOSX compiler warning. */
break;
@@ -567,11 +555,6 @@ static void _InitRFType(struct adapter *padapter)
{
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
-#if DISABLE_BB_RF
- pHalData->rf_chip = RF_PSEUDO_11N;
- return;
-#endif
-
pHalData->rf_chip = RF_6052;
pHalData->rf_type = RF_1T1R;
@@ -695,29 +678,23 @@ static u32 rtl8723bs_hal_init(struct adapter *padapter)
/* <Roger_Notes> Current Channel will be updated again later. */
pHalData->CurrentChannel = 6;
-#if (HAL_MAC_ENABLE == 1)
ret = PHY_MACConfig8723B(padapter);
if (ret != _SUCCESS)
return ret;
-#endif
/* */
/* d. Initialize BB related configurations. */
/* */
-#if (HAL_BB_ENABLE == 1)
ret = PHY_BBConfig8723B(padapter);
if (ret != _SUCCESS)
return ret;
-#endif
/* If RF is on, we need to init RF. Otherwise, skip the procedure. */
/* We need to follow SU method to change the RF cfg.txt. Default disable RF TX/RX mode. */
/* if (pHalData->eRFPowerState == eRfOn) */
{
-#if (HAL_RF_ENABLE == 1)
ret = PHY_RFConfig8723B(padapter);
if (ret != _SUCCESS)
return ret;
-#endif
}
/* */
@@ -798,8 +775,6 @@ static u32 rtl8723bs_hal_init(struct adapter *padapter)
rtl8723b_InitHalDm(padapter);
- /* DbgPrint("pHalData->DefaultTxPwrDbm = %d\n", pHalData->DefaultTxPwrDbm); */
-
/* */
/* Update current Tx FIFO page status. */
/* */
@@ -878,10 +853,9 @@ static void CardDisableRTL8723BSdio(struct adapter *padapter)
{
u8 u1bTmp;
u8 bMacPwrCtrlOn;
- u8 ret = _FAIL;
/* Run LPS WL RFOFF flow */
- ret = HalPwrSeqCmdParsing(padapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, rtl8723B_enter_lps_flow);
+ HalPwrSeqCmdParsing(padapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, rtl8723B_enter_lps_flow);
/* ==== Reset digital sequence ====== */
@@ -909,9 +883,8 @@ static void CardDisableRTL8723BSdio(struct adapter *padapter)
/* ==== Reset digital sequence end ====== */
bMacPwrCtrlOn = false; /* Disable CMD53 R/W */
- ret = false;
rtw_hal_set_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
- ret = HalPwrSeqCmdParsing(padapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, rtl8723B_card_disable_flow);
+ HalPwrSeqCmdParsing(padapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, rtl8723B_card_disable_flow);
}
static u32 rtl8723bs_hal_deinit(struct adapter *padapter)
@@ -1046,11 +1019,7 @@ static void _ReadRFType(struct adapter *Adapter)
{
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
-#if DISABLE_BB_RF
- pHalData->rf_chip = RF_PSEUDO_11N;
-#else
pHalData->rf_chip = RF_6052;
-#endif
}
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index a31694525bc1..a545832a468e 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
*******************************************************************************/
-#define _SDIO_OPS_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
#include <rtl8723b_hal.h>
@@ -160,7 +158,7 @@ static u32 sdio_read32(struct intf_hdl *intfhdl, u32 addr)
u32 ftaddr;
u8 shift;
u32 val;
- s32 err;
+ s32 __maybe_unused err;
__le32 le_tmp;
adapter = intfhdl->padapter;
@@ -348,11 +346,6 @@ static s32 sdio_writeN(struct intf_hdl *intfhdl, u32 addr, u32 cnt, u8 *buf)
return err;
}
-static u8 sdio_f0_read8(struct intf_hdl *intfhdl, u32 addr)
-{
- return sd_f0_read8(intfhdl, addr, NULL);
-}
-
static void sdio_read_mem(
struct intf_hdl *intfhdl,
u32 addr,
@@ -360,10 +353,7 @@ static void sdio_read_mem(
u8 *rmem
)
{
- s32 err;
-
- err = sdio_readN(intfhdl, addr, cnt, rmem);
- /* TODO: Report error is err not zero */
+ sdio_readN(intfhdl, addr, cnt, rmem);
}
static void sdio_write_mem(
@@ -486,8 +476,6 @@ void sdio_set_intf_ops(struct adapter *adapter, struct _io_ops *ops)
ops->_writeN = &sdio_writeN;
ops->_write_mem = &sdio_write_mem;
ops->_write_port = &sdio_write_port;
-
- ops->_sd_f0_read8 = sdio_f0_read8;
}
/*
@@ -921,6 +909,8 @@ void sd_int_dpc(struct adapter *adapter)
} else {
rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
}
+ } else {
+ kfree(c2h_evt);
}
} else {
/* Error handling for malloc fail */
diff --git a/drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h b/drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h
index 4b3a7c051630..aad962548278 100644
--- a/drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h
+++ b/drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h
@@ -558,7 +558,6 @@
#define b3WireRFPowerDown 0x1 /* Useless now */
/* define bHWSISelect 0x8 */
-#define b5GPAPEPolarity 0x40000000
#define b2GPAPEPolarity 0x80000000
#define bRFSW_TxDefaultAnt 0x3
#define bRFSW_TxOptionAnt 0x30
@@ -577,7 +576,6 @@
#define bRFSI_ANTSW 0x100
#define bRFSI_ANTSWB 0x200
#define bRFSI_PAPE 0x400
-#define bRFSI_PAPE5G 0x800
#define bBandSelect 0x1
#define bHTSIG2_GI 0x80
#define bHTSIG2_Smoothing 0x01
diff --git a/drivers/staging/rtl8723bs/include/autoconf.h b/drivers/staging/rtl8723bs/include/autoconf.h
deleted file mode 100644
index 944a7d2a1e53..000000000000
--- a/drivers/staging/rtl8723bs/include/autoconf.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-/*
- * Automatically generated C config: don't edit
- */
-
-/*
- * Functions Config
- */
-
-#ifndef CONFIG_WIRELESS_EXT
-#error CONFIG_WIRELESS_EXT needs to be enabled for this driver to work
-#endif
-
-/*
- * Auto Config Section
- */
-#define LPS_RPWM_WAIT_MS 300
-#ifndef DISABLE_BB_RF
-#define DISABLE_BB_RF 0
-#endif
-
-#if DISABLE_BB_RF
- #define HAL_MAC_ENABLE 0
- #define HAL_BB_ENABLE 0
- #define HAL_RF_ENABLE 0
-#else
- #define HAL_MAC_ENABLE 1
- #define HAL_BB_ENABLE 1
- #define HAL_RF_ENABLE 1
-#endif
-
-/*
- * Platform dependent
- */
-#define WAKEUP_GPIO_IDX 12 /* WIFI Chip Side */
-
-/*
- * Debug Related Config
- */
-
-#define DBG 0 /* for ODM & BTCOEX debug */
-
-/* define DBG_XMIT_BUF */
-/* define DBG_XMIT_BUF_EXT */
diff --git a/drivers/staging/rtl8723bs/include/drv_conf.h b/drivers/staging/rtl8723bs/include/drv_conf.h
deleted file mode 100644
index 9cef9ce589a1..000000000000
--- a/drivers/staging/rtl8723bs/include/drv_conf.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef __DRV_CONF_H__
-#define __DRV_CONF_H__
-#include "autoconf.h"
-
-#define DYNAMIC_CAMID_ALLOC
-
-#ifndef CONFIG_RTW_HIQ_FILTER
- #define CONFIG_RTW_HIQ_FILTER 1
-#endif
-
-//#include <rtl871x_byteorder.h>
-
-#endif // __DRV_CONF_H__
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 83d43e5726dd..895c41526164 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -15,7 +15,6 @@
#define __DRV_TYPES_H__
#include <linux/sched/signal.h>
-#include <autoconf.h>
#include <basic_types.h>
#include <osdep_service.h>
#include <rtw_byteorder.h>
@@ -109,9 +108,11 @@ struct registry_priv {
struct wlan_bssid_ex dev_network;
u8 ht_enable;
- /* 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160MHz */
- /* 2.4G use bit 0 ~ 3, 5G use bit 4 ~ 7 */
- /* 0x21 means enable 2.4G 40MHz & 5G 80MHz */
+ /*
+ * 0: 20 MHz, 1: 40 MHz
+ * 2.4G use bit 0 ~ 3
+ * 0x01 means enable 2.4G 40MHz
+ */
u8 bw_mode;
u8 ampdu_enable;/* for tx */
u8 rx_stbc;
@@ -170,9 +171,7 @@ struct registry_priv {
u8 RegPowerBase;
u8 RegPwrTblSel;
s8 TxBBSwing_2G;
- s8 TxBBSwing_5G;
u8 AmplifierType_2G;
- u8 AmplifierType_5G;
u8 bEn_RFE;
u8 RFE_Type;
u8 check_fw_ps;
@@ -503,7 +502,6 @@ static inline u8 *myid(struct eeprom_priv *peepriv)
void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
void rtw_indicate_wx_assoc_event(struct adapter *padapter);
-void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
void indicate_wx_scan_complete_event(struct adapter *padapter);
int rtw_change_ifname(struct adapter *padapter, const char *ifname);
diff --git a/drivers/staging/rtl8723bs/include/hal_btcoex.h b/drivers/staging/rtl8723bs/include/hal_btcoex.h
index 3c03be210d87..849fb90b43b7 100644
--- a/drivers/staging/rtl8723bs/include/hal_btcoex.h
+++ b/drivers/staging/rtl8723bs/include/hal_btcoex.h
@@ -9,6 +9,8 @@
#include <drv_types.h>
+#define LPS_RPWM_WAIT_MS 300
+
/* Some variables can't get from outsrc BT-Coex, */
/* so we need to save here */
struct bt_coexist {
@@ -53,7 +55,5 @@ u8 hal_btcoex_LpsVal(struct adapter *);
u32 hal_btcoex_GetRaMask(struct adapter *);
void hal_btcoex_RecordPwrMode(struct adapter *padapter, u8 *pCmdBuf, u8 cmdLen);
void hal_btcoex_DisplayBtCoexInfo(struct adapter *, u8 *pbuf, u32 bufsize);
-void hal_btcoex_SetDBG(struct adapter *, u32 *pDbgModule);
-u32 hal_btcoex_GetDBG(struct adapter *, u8 *pStrBuf, u32 bufSize);
#endif /* !__HAL_BTCOEX_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_com.h b/drivers/staging/rtl8723bs/include/hal_com.h
index 6bcc443d59fb..1bc332261b2a 100644
--- a/drivers/staging/rtl8723bs/include/hal_com.h
+++ b/drivers/staging/rtl8723bs/include/hal_com.h
@@ -69,46 +69,6 @@
#define DESC_RATEMCS29 0x29
#define DESC_RATEMCS30 0x2A
#define DESC_RATEMCS31 0x2B
-#define DESC_RATEVHTSS1MCS0 0x2C
-#define DESC_RATEVHTSS1MCS1 0x2D
-#define DESC_RATEVHTSS1MCS2 0x2E
-#define DESC_RATEVHTSS1MCS3 0x2F
-#define DESC_RATEVHTSS1MCS4 0x30
-#define DESC_RATEVHTSS1MCS5 0x31
-#define DESC_RATEVHTSS1MCS6 0x32
-#define DESC_RATEVHTSS1MCS7 0x33
-#define DESC_RATEVHTSS1MCS8 0x34
-#define DESC_RATEVHTSS1MCS9 0x35
-#define DESC_RATEVHTSS2MCS0 0x36
-#define DESC_RATEVHTSS2MCS1 0x37
-#define DESC_RATEVHTSS2MCS2 0x38
-#define DESC_RATEVHTSS2MCS3 0x39
-#define DESC_RATEVHTSS2MCS4 0x3A
-#define DESC_RATEVHTSS2MCS5 0x3B
-#define DESC_RATEVHTSS2MCS6 0x3C
-#define DESC_RATEVHTSS2MCS7 0x3D
-#define DESC_RATEVHTSS2MCS8 0x3E
-#define DESC_RATEVHTSS2MCS9 0x3F
-#define DESC_RATEVHTSS3MCS0 0x40
-#define DESC_RATEVHTSS3MCS1 0x41
-#define DESC_RATEVHTSS3MCS2 0x42
-#define DESC_RATEVHTSS3MCS3 0x43
-#define DESC_RATEVHTSS3MCS4 0x44
-#define DESC_RATEVHTSS3MCS5 0x45
-#define DESC_RATEVHTSS3MCS6 0x46
-#define DESC_RATEVHTSS3MCS7 0x47
-#define DESC_RATEVHTSS3MCS8 0x48
-#define DESC_RATEVHTSS3MCS9 0x49
-#define DESC_RATEVHTSS4MCS0 0x4A
-#define DESC_RATEVHTSS4MCS1 0x4B
-#define DESC_RATEVHTSS4MCS2 0x4C
-#define DESC_RATEVHTSS4MCS3 0x4D
-#define DESC_RATEVHTSS4MCS4 0x4E
-#define DESC_RATEVHTSS4MCS5 0x4F
-#define DESC_RATEVHTSS4MCS6 0x50
-#define DESC_RATEVHTSS4MCS7 0x51
-#define DESC_RATEVHTSS4MCS8 0x52
-#define DESC_RATEVHTSS4MCS9 0x53
#define HDATA_RATE(rate)\
(rate == DESC_RATE1M) ? "CCK_1M" : \
@@ -138,27 +98,7 @@
(rate == DESC_RATEMCS12) ? "MCS12" : \
(rate == DESC_RATEMCS13) ? "MCS13" : \
(rate == DESC_RATEMCS14) ? "MCS14" : \
-(rate == DESC_RATEMCS15) ? "MCS15" : \
-(rate == DESC_RATEVHTSS1MCS0) ? "VHTSS1MCS0" : \
-(rate == DESC_RATEVHTSS1MCS1) ? "VHTSS1MCS1" : \
-(rate == DESC_RATEVHTSS1MCS2) ? "VHTSS1MCS2" : \
-(rate == DESC_RATEVHTSS1MCS3) ? "VHTSS1MCS3" : \
-(rate == DESC_RATEVHTSS1MCS4) ? "VHTSS1MCS4" : \
-(rate == DESC_RATEVHTSS1MCS5) ? "VHTSS1MCS5" : \
-(rate == DESC_RATEVHTSS1MCS6) ? "VHTSS1MCS6" : \
-(rate == DESC_RATEVHTSS1MCS7) ? "VHTSS1MCS7" : \
-(rate == DESC_RATEVHTSS1MCS8) ? "VHTSS1MCS8" : \
-(rate == DESC_RATEVHTSS1MCS9) ? "VHTSS1MCS9" : \
-(rate == DESC_RATEVHTSS2MCS0) ? "VHTSS2MCS0" : \
-(rate == DESC_RATEVHTSS2MCS1) ? "VHTSS2MCS1" : \
-(rate == DESC_RATEVHTSS2MCS2) ? "VHTSS2MCS2" : \
-(rate == DESC_RATEVHTSS2MCS3) ? "VHTSS2MCS3" : \
-(rate == DESC_RATEVHTSS2MCS4) ? "VHTSS2MCS4" : \
-(rate == DESC_RATEVHTSS2MCS5) ? "VHTSS2MCS5" : \
-(rate == DESC_RATEVHTSS2MCS6) ? "VHTSS2MCS6" : \
-(rate == DESC_RATEVHTSS2MCS7) ? "VHTSS2MCS7" : \
-(rate == DESC_RATEVHTSS2MCS8) ? "VHTSS2MCS8" : \
-(rate == DESC_RATEVHTSS2MCS9) ? "VHTSS2MCS9" : "UNKNOWN"
+(rate == DESC_RATEMCS15) ? "MCS15" : "UNKNOWN"
enum{
diff --git a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
index 73f6cadb5c79..c966d0e3e5ae 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
@@ -19,10 +19,6 @@ enum rate_section {
HT_MCS8_MCS15,
HT_MCS16_MCS23,
HT_MCS24_MCS31,
- VHT_1SSMCS0_1SSMCS9,
- VHT_2SSMCS0_2SSMCS9,
- VHT_3SSMCS0_3SSMCS9,
- VHT_4SSMCS0_4SSMCS9,
};
enum {
@@ -70,153 +66,55 @@ struct bb_register_def {
};
-u8
-PHY_GetTxPowerByRateBase(
-struct adapter *Adapter,
-u8 Band,
-u8 RfPath,
-u8 TxNum,
-enum rate_section RateSection
- );
-
-u8
-PHY_GetRateSectionIndexOfTxPowerByRate(
-struct adapter *padapter,
-u32 RegAddr,
-u32 BitMask
- );
-
-void
-PHY_GetRateValuesOfTxPowerByRate(
-struct adapter *padapter,
-u32 RegAddr,
-u32 BitMask,
-u32 Value,
-u8 *RateIndex,
-s8 *PwrByRateVal,
-u8 *RateNum
- );
-
-u8
-PHY_GetRateIndexOfTxPowerByRate(
-u8 Rate
- );
-
-void
-PHY_SetTxPowerIndexByRateSection(
-struct adapter *padapter,
-u8 RFPath,
-u8 Channel,
-u8 RateSection
- );
-
-s8
-PHY_GetTxPowerByRate(
-struct adapter *padapter,
-u8 Band,
-u8 RFPath,
-u8 TxNum,
-u8 RateIndex
- );
-
-void
-PHY_SetTxPowerByRate(
-struct adapter *padapter,
-u8 Band,
-u8 RFPath,
-u8 TxNum,
-u8 Rate,
-s8 Value
- );
-
-void
-PHY_SetTxPowerLevelByPath(
-struct adapter *Adapter,
-u8 channel,
-u8 path
- );
-
-void
-PHY_SetTxPowerIndexByRateArray(
-struct adapter *padapter,
-u8 RFPath,
-enum channel_width BandWidth,
-u8 Channel,
-u8 *Rates,
-u8 RateArraySize
- );
-
-void
-PHY_InitTxPowerByRate(
-struct adapter *padapter
- );
-
-void
-PHY_StoreTxPowerByRate(
-struct adapter *padapter,
-u32 Band,
-u32 RfPath,
-u32 TxNum,
-u32 RegAddr,
-u32 BitMask,
-u32 Data
- );
-
-void
-PHY_TxPowerByRateConfiguration(
- struct adapter *padapter
- );
-
-u8
-PHY_GetTxPowerIndexBase(
-struct adapter *padapter,
-u8 RFPath,
-u8 Rate,
-enum channel_width BandWidth,
-u8 Channel,
- bool *bIn24G
- );
+u8 PHY_GetTxPowerByRateBase(struct adapter *Adapter, u8 RfPath, u8 TxNum,
+ enum rate_section RateSection);
+
+u8 PHY_GetRateSectionIndexOfTxPowerByRate(struct adapter *padapter, u32 RegAddr,
+ u32 BitMask);
+
+void PHY_GetRateValuesOfTxPowerByRate(struct adapter *padapter, u32 RegAddr,
+ u32 BitMask, u32 Value, u8 *RateIndex,
+ s8 *PwrByRateVal, u8 *RateNum);
+
+u8 PHY_GetRateIndexOfTxPowerByRate(u8 Rate);
+
+void PHY_SetTxPowerIndexByRateSection(struct adapter *padapter, u8 RFPath, u8 Channel,
+ u8 RateSection);
+
+s8 PHY_GetTxPowerByRate(struct adapter *padapter, u8 RFPath, u8 TxNum, u8 RateIndex);
+
+void PHY_SetTxPowerByRate(struct adapter *padapter, u8 RFPath, u8 TxNum, u8 Rate,
+ s8 Value);
+
+void PHY_SetTxPowerLevelByPath(struct adapter *Adapter, u8 channel, u8 path);
+
+void PHY_SetTxPowerIndexByRateArray(struct adapter *padapter, u8 RFPath,
+ enum channel_width BandWidth, u8 Channel,
+ u8 *Rates, u8 RateArraySize);
+
+void PHY_InitTxPowerByRate(struct adapter *padapter);
+
+void PHY_StoreTxPowerByRate(struct adapter *padapter, u32 RfPath, u32 TxNum,
+ u32 RegAddr, u32 BitMask, u32 Data);
+
+void PHY_TxPowerByRateConfiguration(struct adapter *padapter);
+
+u8 PHY_GetTxPowerIndexBase(struct adapter *padapter, u8 RFPath, u8 Rate,
+ enum channel_width BandWidth, u8 Channel);
s8 phy_get_tx_pwr_lmt(struct adapter *adapter, u32 RegPwrTblSel,
- enum band_type Band, enum channel_width Bandwidth,
-u8 RfPath,
-u8 DataRate,
-u8 Channel
- );
-
-void
-PHY_SetTxPowerLimit(
-struct adapter *Adapter,
-u8 *Regulation,
-u8 *Band,
-u8 *Bandwidth,
-u8 *RateSection,
-u8 *RfPath,
-u8 *Channel,
-u8 *PowerLimit
- );
-
-void
-PHY_ConvertTxPowerLimitToPowerIndex(
-struct adapter *Adapter
- );
-
-void
-PHY_InitTxPowerLimit(
-struct adapter *Adapter
- );
-
-s8
-PHY_GetTxPowerTrackingOffset(
- struct adapter *padapter,
- u8 Rate,
- u8 RFPath
- );
-
-void
-Hal_ChannelPlanToRegulation(
-struct adapter *Adapter,
-u16 ChannelPlan
- );
+ enum channel_width Bandwidth, u8 RfPath, u8 DataRate,
+ u8 Channel);
+
+void PHY_SetTxPowerLimit(struct adapter *Adapter, u8 *Regulation, u8 *Bandwidth,
+ u8 *RateSection, u8 *RfPath, u8 *Channel, u8 *PowerLimit);
+
+void PHY_ConvertTxPowerLimitToPowerIndex(struct adapter *Adapter);
+
+void PHY_InitTxPowerLimit(struct adapter *Adapter);
+
+s8 PHY_GetTxPowerTrackingOffset(struct adapter *padapter, u8 Rate, u8 RFPath);
+
+void Hal_ChannelPlanToRegulation(struct adapter *Adapter, u16 ChannelPlan);
#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_com_reg.h b/drivers/staging/rtl8723bs/include/hal_com_reg.h
index b14585cb0233..b2f179b48019 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_reg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_reg.h
@@ -717,7 +717,6 @@ Default: 00b.
/* BW_OPMODE bits (Offset 0x603, 8bit) */
/* */
#define BW_OPMODE_20MHZ BIT2
-#define BW_OPMODE_5G BIT1
/* */
/* CAM Config Setting (offset 0x680, 1 byte) */
diff --git a/drivers/staging/rtl8723bs/include/hal_data.h b/drivers/staging/rtl8723bs/include/hal_data.h
index babcb03a7c23..3298fa8eb682 100644
--- a/drivers/staging/rtl8723bs/include/hal_data.h
+++ b/drivers/staging/rtl8723bs/include/hal_data.h
@@ -46,21 +46,16 @@ enum rt_ampdu_burst {
RT_AMPDU_BURST_8723B = 7,
};
-#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel number */
+#define CHANNEL_MAX_NUMBER (14) /* 14 is the max channel number */
#define CHANNEL_MAX_NUMBER_2G 14
-#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to "phy_GetChnlGroup8812A" and "Hal_ReadTxPowerInfo8812A" */
-#define CHANNEL_MAX_NUMBER_5G_80M 7
#define MAX_PG_GROUP 13
/* Tx Power Limit Table Size */
#define MAX_REGULATION_NUM 4
#define MAX_2_4G_BANDWIDTH_NUM 4
#define MAX_RATE_SECTION_NUM 10
-#define MAX_5G_BANDWIDTH_NUM 4
#define MAX_BASE_NUM_IN_PHY_REG_PG_2_4G 10 /* CCK:1, OFDM:1, HT:4, VHT:4 */
-#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 9 /* OFDM:1, HT:4, VHT:4 */
-
/* duplicate code, will move to ODM ######### */
/* define IQK_MAC_REG_NUM 4 */
@@ -182,8 +177,6 @@ struct hal_com_data {
/* current WIFI_PHY values */
enum wireless_mode CurrentWirelessMode;
enum channel_width CurrentChannelBW;
- enum band_type CurrentBandType; /* 0:2.4G, 1:5G */
- enum band_type BandSet;
u8 CurrentChannel;
u8 CurrentCenterFrequencyIndex1;
u8 nCur40MhzPrimeSC;/* Control channel sub-carrier */
@@ -236,16 +229,8 @@ struct hal_com_data {
s8 OFDM_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
s8 BW20_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
s8 BW40_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
- /* 3 [5G] */
- u8 Index5G_BW40_Base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
- u8 Index5G_BW80_Base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M];
- s8 OFDM_5G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
- s8 BW20_5G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
- s8 BW40_5G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
- s8 BW80_5G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
u8 Regulation2_4G;
- u8 Regulation5G;
u8 TxPwrInPercentage;
@@ -253,15 +238,13 @@ struct hal_com_data {
/* TX power by rate table at most 4RF path. */
/* The register is */
/* VHT TX power by rate off setArray = */
- /* Band:-2G&5G = 0 / 1 */
/* RF: at most 4*4 = ABCD = 0/1/2/3 */
/* CCK = 0 OFDM = 1/2 HT-MCS 0-15 =3/4/56 VHT =7/8/9/10/11 */
u8 TxPwrByRateTable;
u8 TxPwrByRateBand;
- s8 TxPwrByRateOffset[TX_PWR_BY_RATE_NUM_BAND]
- [TX_PWR_BY_RATE_NUM_RF]
- [TX_PWR_BY_RATE_NUM_RF]
- [TX_PWR_BY_RATE_NUM_RATE];
+ s8 TxPwrByRateOffset[TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RATE];
/* */
/* 2 Power Limit Table */
@@ -278,21 +261,10 @@ struct hal_com_data {
[CHANNEL_MAX_NUMBER_2G]
[MAX_RF_PATH_NUM];
- /* Power Limit Table for 5G */
- s8 TxPwrLimit_5G[MAX_REGULATION_NUM]
- [MAX_5G_BANDWIDTH_NUM]
- [MAX_RATE_SECTION_NUM]
- [CHANNEL_MAX_NUMBER_5G]
- [MAX_RF_PATH_NUM];
-
-
/* Store the original power by rate value of the base of each rate section of rf path A & B */
u8 TxPwrByRateBase2_4G[TX_PWR_BY_RATE_NUM_RF]
[TX_PWR_BY_RATE_NUM_RF]
[MAX_BASE_NUM_IN_PHY_REG_PG_2_4G];
- u8 TxPwrByRateBase5G[TX_PWR_BY_RATE_NUM_RF]
- [TX_PWR_BY_RATE_NUM_RF]
- [MAX_BASE_NUM_IN_PHY_REG_PG_5G];
/* For power group */
u8 PwrGroupHT20[RF_PATH_MAX_92C_88E][CHANNEL_MAX_NUMBER];
@@ -319,13 +291,9 @@ struct hal_com_data {
u32 AntennaRxPath; /* Antenna path Rx */
u8 PAType_2G;
- u8 PAType_5G;
u8 LNAType_2G;
- u8 LNAType_5G;
u8 ExternalPA_2G;
u8 ExternalLNA_2G;
- u8 ExternalPA_5G;
- u8 ExternalLNA_5G;
u8 TypeGLNA;
u8 TypeGPA;
u8 TypeALNA;
diff --git a/drivers/staging/rtl8723bs/include/hal_pg.h b/drivers/staging/rtl8723bs/include/hal_pg.h
index 0b7a8adf5c74..2d8ccc9ddebb 100644
--- a/drivers/staging/rtl8723bs/include/hal_pg.h
+++ b/drivers/staging/rtl8723bs/include/hal_pg.h
@@ -16,10 +16,8 @@
/* For VHT series TX power by rate table. */
/* VHT TX power by rate off setArray = */
-/* Band:-2G&5G = 0 / 1 */
/* RF: at most 4*4 = ABCD = 0/1/2/3 */
/* CCK = 0 OFDM = 1/2 HT-MCS 0-15 =3/4/56 VHT =7/8/9/10/11 */
-#define TX_PWR_BY_RATE_NUM_BAND 2
#define TX_PWR_BY_RATE_NUM_RF 4
#define TX_PWR_BY_RATE_NUM_RATE 84
#define MAX_RF_PATH_NUM 2
diff --git a/drivers/staging/rtl8723bs/include/hal_phy.h b/drivers/staging/rtl8723bs/include/hal_phy.h
index 521eb1c2efad..861aa71cd179 100644
--- a/drivers/staging/rtl8723bs/include/hal_phy.h
+++ b/drivers/staging/rtl8723bs/include/hal_phy.h
@@ -6,20 +6,6 @@
******************************************************************************/
#ifndef __HAL_PHY_H__
#define __HAL_PHY_H__
-
-
-#if DISABLE_BB_RF
-#define HAL_FW_ENABLE 0
-#define HAL_MAC_ENABLE 0
-#define HAL_BB_ENABLE 0
-#define HAL_RF_ENABLE 0
-#else /* FPGA_PHY and ASIC */
-#define HAL_FW_ENABLE 1
-#define HAL_MAC_ENABLE 1
-#define HAL_BB_ENABLE 1
-#define HAL_RF_ENABLE 1
-#endif
-
/* */
/* Antenna detection method, i.e., using single tone detection or RSSI reported from each antenna detected. */
/* Added by Roger, 2013.05.22. */
@@ -31,13 +17,6 @@
/*--------------------------Define Parameters-------------------------------*/
-enum band_type {
- BAND_ON_2_4G = 0,
- BAND_ON_5G,
- BAND_ON_BOTH,
- BANDMAX
-};
-
enum {
RF_TYPE_MIN = 0, /* 0 */
RF_8225 = 1, /* 1 11b/g RF for verification only */
@@ -65,13 +44,10 @@ enum rf_path {
enum wireless_mode {
WIRELESS_MODE_UNKNOWN = 0x00,
- WIRELESS_MODE_A = 0x01,
WIRELESS_MODE_B = 0x02,
WIRELESS_MODE_G = 0x04,
WIRELESS_MODE_AUTO = 0x08,
WIRELESS_MODE_N_24G = 0x10,
- WIRELESS_MODE_N_5G = 0x20,
- WIRELESS_MODE_AC_5G = 0x40,
WIRELESS_MODE_AC_24G = 0x80,
WIRELESS_MODE_AC_ONLY = 0x100,
};
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index 6540c7a22938..378c21595e05 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -60,7 +60,6 @@ enum {
#define WLAN_STA_HT BIT(11)
#define WLAN_STA_WPS BIT(12)
#define WLAN_STA_MAYBE_WPS BIT(13)
-#define WLAN_STA_VHT BIT(14)
#define WLAN_STA_NONERP BIT(31)
#define IEEE_CMD_SET_WPA_PARAM 1
@@ -135,8 +134,6 @@ enum {
RATEID_IDX_BG = 6,
RATEID_IDX_G = 7,
RATEID_IDX_B = 8,
- RATEID_IDX_VHT_2SS = 9,
- RATEID_IDX_VHT_1SS = 10,
};
enum network_type {
@@ -144,33 +141,20 @@ enum network_type {
/* Sub-Element */
WIRELESS_11B = BIT(0), /* tx: cck only , rx: cck only, hw: cck */
WIRELESS_11G = BIT(1), /* tx: ofdm only, rx: ofdm & cck, hw: cck & ofdm */
- WIRELESS_11A = BIT(2), /* tx: ofdm only, rx: ofdm only, hw: ofdm only */
WIRELESS_11_24N = BIT(3), /* tx: MCS only, rx: MCS & cck, hw: MCS & cck */
- WIRELESS_11_5N = BIT(4), /* tx: MCS only, rx: MCS & ofdm, hw: ofdm only */
WIRELESS_AUTO = BIT(5),
- WIRELESS_11AC = BIT(6),
/* Combination */
/* Type for current wireless mode */
WIRELESS_11BG = (WIRELESS_11B|WIRELESS_11G), /* tx: cck & ofdm, rx: cck & ofdm & MCS, hw: cck & ofdm */
WIRELESS_11G_24N = (WIRELESS_11G|WIRELESS_11_24N), /* tx: ofdm & MCS, rx: ofdm & cck & MCS, hw: cck & ofdm */
- WIRELESS_11A_5N = (WIRELESS_11A|WIRELESS_11_5N), /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
WIRELESS_11B_24N = (WIRELESS_11B|WIRELESS_11_24N), /* tx: ofdm & cck & MCS, rx: ofdm & cck & MCS, hw: ofdm & cck */
WIRELESS_11BG_24N = (WIRELESS_11B|WIRELESS_11G|WIRELESS_11_24N), /* tx: ofdm & cck & MCS, rx: ofdm & cck & MCS, hw: ofdm & cck */
- WIRELESS_11_24AC = (WIRELESS_11G|WIRELESS_11AC),
- WIRELESS_11_5AC = (WIRELESS_11A|WIRELESS_11AC),
-
-
- /* Type for registry default wireless mode */
- WIRELESS_11AGN = (WIRELESS_11A|WIRELESS_11G|WIRELESS_11_24N|WIRELESS_11_5N), /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
- WIRELESS_11ABGN = (WIRELESS_11A|WIRELESS_11B|WIRELESS_11G|WIRELESS_11_24N|WIRELESS_11_5N),
- WIRELESS_MODE_24G = (WIRELESS_11B|WIRELESS_11G|WIRELESS_11_24N|WIRELESS_11AC),
- WIRELESS_MODE_MAX = (WIRELESS_11A|WIRELESS_11B|WIRELESS_11G|WIRELESS_11_24N|WIRELESS_11_5N|WIRELESS_11AC),
};
#define SUPPORTED_24G_NETTYPE_MSK (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N)
-#define IsLegacyOnly(NetType) ((NetType) == ((NetType) & (WIRELESS_11BG|WIRELESS_11A)))
+#define IsLegacyOnly(NetType) ((NetType) == ((NetType) & (WIRELESS_11BG)))
#define IsSupported24G(NetType) ((NetType) & SUPPORTED_24G_NETTYPE_MSK ? true : false)
@@ -182,11 +166,8 @@ enum network_type {
#define IsSupportedRxHT(NetType) IsEnableHWOFDM(NetType)
#define IsSupportedTxCCK(NetType) (((NetType) & (WIRELESS_11B)) ? true : false)
-#define IsSupportedTxOFDM(NetType) (((NetType) & (WIRELESS_11G|WIRELESS_11A)) ? true : false)
-#define IsSupportedHT(NetType) (((NetType) & (WIRELESS_11_24N|WIRELESS_11_5N)) ? true : false)
-
-#define IsSupportedVHT(NetType) (((NetType) & (WIRELESS_11AC)) ? true : false)
-
+#define IsSupportedTxOFDM(NetType) (((NetType) & (WIRELESS_11G) ? true : false)
+#define IsSupportedHT(NetType) (((NetType) & (WIRELESS_11_24N)) ? true : false)
struct ieee_param {
u32 cmd;
@@ -440,51 +421,10 @@ enum {
MGN_MCS29,
MGN_MCS30,
MGN_MCS31,
- MGN_VHT1SS_MCS0,
- MGN_VHT1SS_MCS1,
- MGN_VHT1SS_MCS2,
- MGN_VHT1SS_MCS3,
- MGN_VHT1SS_MCS4,
- MGN_VHT1SS_MCS5,
- MGN_VHT1SS_MCS6,
- MGN_VHT1SS_MCS7,
- MGN_VHT1SS_MCS8,
- MGN_VHT1SS_MCS9,
- MGN_VHT2SS_MCS0,
- MGN_VHT2SS_MCS1,
- MGN_VHT2SS_MCS2,
- MGN_VHT2SS_MCS3,
- MGN_VHT2SS_MCS4,
- MGN_VHT2SS_MCS5,
- MGN_VHT2SS_MCS6,
- MGN_VHT2SS_MCS7,
- MGN_VHT2SS_MCS8,
- MGN_VHT2SS_MCS9,
- MGN_VHT3SS_MCS0,
- MGN_VHT3SS_MCS1,
- MGN_VHT3SS_MCS2,
- MGN_VHT3SS_MCS3,
- MGN_VHT3SS_MCS4,
- MGN_VHT3SS_MCS5,
- MGN_VHT3SS_MCS6,
- MGN_VHT3SS_MCS7,
- MGN_VHT3SS_MCS8,
- MGN_VHT3SS_MCS9,
- MGN_VHT4SS_MCS0,
- MGN_VHT4SS_MCS1,
- MGN_VHT4SS_MCS2,
- MGN_VHT4SS_MCS3,
- MGN_VHT4SS_MCS4,
- MGN_VHT4SS_MCS5,
- MGN_VHT4SS_MCS6,
- MGN_VHT4SS_MCS7,
- MGN_VHT4SS_MCS8,
- MGN_VHT4SS_MCS9,
MGN_UNKNOWN
};
#define IS_HT_RATE(_rate) (_rate >= MGN_MCS0 && _rate <= MGN_MCS31)
-#define IS_VHT_RATE(_rate) (_rate >= MGN_VHT1SS_MCS0 && _rate <= MGN_VHT4SS_MCS9)
#define IS_CCK_RATE(_rate) (MGN_1M == _rate || _rate == MGN_2M || _rate == MGN_5_5M || _rate == MGN_11M)
#define IS_OFDM_RATE(_rate) (MGN_6M <= _rate && _rate <= MGN_54M && _rate != MGN_11M)
@@ -641,7 +581,6 @@ enum {
RTW_WLAN_CATEGORY_TDLS = 12,
RTW_WLAN_CATEGORY_SELF_PROTECTED = 15, /* add for CONFIG_IEEE80211W, none 11w also can use */
RTW_WLAN_CATEGORY_WMM = 17,
- RTW_WLAN_CATEGORY_VHT = 21,
RTW_WLAN_CATEGORY_P2P = 0x7f,/* P2P action frames */
};
diff --git a/drivers/staging/rtl8723bs/include/osdep_intf.h b/drivers/staging/rtl8723bs/include/osdep_intf.h
index 48c90f00cc2e..111e0179712a 100644
--- a/drivers/staging/rtl8723bs/include/osdep_intf.h
+++ b/drivers/staging/rtl8723bs/include/osdep_intf.h
@@ -66,7 +66,7 @@ void rtw_ips_pwr_down(struct adapter *padapter);
int rtw_drv_register_netdev(struct adapter *padapter);
void rtw_ndev_destructor(struct net_device *ndev);
-int rtw_suspend_common(struct adapter *padapter);
+void rtw_suspend_common(struct adapter *padapter);
int rtw_resume_common(struct adapter *padapter);
int netdev_open(struct net_device *pnetdev);
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_spec.h b/drivers/staging/rtl8723bs/include/rtl8723b_spec.h
index 999555476ebc..6816040a6aff 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_spec.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_spec.h
@@ -7,8 +7,6 @@
#ifndef __RTL8723B_SPEC_H__
#define __RTL8723B_SPEC_H__
-#include <autoconf.h>
-
#define HAL_NAV_UPPER_UNIT_8723B 128 /* micro-second */
/* */
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h b/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h
index 56bdc14af47d..9dd329a5208a 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h
@@ -402,27 +402,6 @@ struct txdesc_8723b {
#define DESC8723B_RATEMCS13 0x19
#define DESC8723B_RATEMCS14 0x1a
#define DESC8723B_RATEMCS15 0x1b
-#define DESC8723B_RATEVHTSS1MCS0 0x2c
-#define DESC8723B_RATEVHTSS1MCS1 0x2d
-#define DESC8723B_RATEVHTSS1MCS2 0x2e
-#define DESC8723B_RATEVHTSS1MCS3 0x2f
-#define DESC8723B_RATEVHTSS1MCS4 0x30
-#define DESC8723B_RATEVHTSS1MCS5 0x31
-#define DESC8723B_RATEVHTSS1MCS6 0x32
-#define DESC8723B_RATEVHTSS1MCS7 0x33
-#define DESC8723B_RATEVHTSS1MCS8 0x34
-#define DESC8723B_RATEVHTSS1MCS9 0x35
-#define DESC8723B_RATEVHTSS2MCS0 0x36
-#define DESC8723B_RATEVHTSS2MCS1 0x37
-#define DESC8723B_RATEVHTSS2MCS2 0x38
-#define DESC8723B_RATEVHTSS2MCS3 0x39
-#define DESC8723B_RATEVHTSS2MCS4 0x3a
-#define DESC8723B_RATEVHTSS2MCS5 0x3b
-#define DESC8723B_RATEVHTSS2MCS6 0x3c
-#define DESC8723B_RATEVHTSS2MCS7 0x3d
-#define DESC8723B_RATEVHTSS2MCS8 0x3e
-#define DESC8723B_RATEVHTSS2MCS9 0x3f
-
#define RX_HAL_IS_CCK_RATE_8723B(pDesc)\
(GET_RX_STATUS_DESC_RX_RATE_8723B(pDesc) == DESC8723B_RATE1M ||\
diff --git a/drivers/staging/rtl8723bs/include/rtw_ap.h b/drivers/staging/rtl8723bs/include/rtw_ap.h
index 4a1ed9eff83a..7a735e691399 100644
--- a/drivers/staging/rtl8723bs/include/rtw_ap.h
+++ b/drivers/staging/rtl8723bs/include/rtw_ap.h
@@ -14,7 +14,7 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx);
void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level);
void expire_timeout_chk(struct adapter *padapter);
void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta);
-void start_bss_network(struct adapter *padapter, u8 *pbuf);
+void start_bss_network(struct adapter *padapter);
int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len);
void rtw_ap_restore_network(struct adapter *padapter);
void rtw_set_macaddr_acl(struct adapter *padapter, int mode);
diff --git a/drivers/staging/rtl8723bs/include/rtw_debug.h b/drivers/staging/rtl8723bs/include/rtw_debug.h
index 23f4cb4711d4..7f96ff66915f 100644
--- a/drivers/staging/rtl8723bs/include/rtw_debug.h
+++ b/drivers/staging/rtl8723bs/include/rtw_debug.h
@@ -7,172 +7,8 @@
#ifndef __RTW_DEBUG_H__
#define __RTW_DEBUG_H__
-#include <linux/trace_seq.h>
-
-#define _drv_always_ 1
-#define _drv_emerg_ 2
-#define _drv_alert_ 3
-#define _drv_crit_ 4
-#define _drv_err_ 5
-#define _drv_warning_ 6
-#define _drv_notice_ 7
-#define _drv_info_ 8
-#define _drv_dump_ 9
-#define _drv_debug_ 10
-
-
-#define _module_rtl871x_xmit_c_ BIT(0)
-#define _module_xmit_osdep_c_ BIT(1)
-#define _module_rtl871x_recv_c_ BIT(2)
-#define _module_recv_osdep_c_ BIT(3)
-#define _module_rtl871x_mlme_c_ BIT(4)
-#define _module_mlme_osdep_c_ BIT(5)
-#define _module_rtl871x_sta_mgt_c_ BIT(6)
-#define _module_rtl871x_cmd_c_ BIT(7)
-#define _module_cmd_osdep_c_ BIT(8)
-#define _module_rtl871x_io_c_ BIT(9)
-#define _module_io_osdep_c_ BIT(10)
-#define _module_os_intfs_c_ BIT(11)
-#define _module_rtl871x_security_c_ BIT(12)
-#define _module_rtl871x_eeprom_c_ BIT(13)
-#define _module_hal_init_c_ BIT(14)
-#define _module_hci_hal_init_c_ BIT(15)
-#define _module_rtl871x_ioctl_c_ BIT(16)
-#define _module_rtl871x_ioctl_set_c_ BIT(17)
-#define _module_rtl871x_ioctl_query_c_ BIT(18)
-#define _module_rtl871x_pwrctrl_c_ BIT(19)
-#define _module_hci_intfs_c_ BIT(20)
-#define _module_hci_ops_c_ BIT(21)
-#define _module_osdep_service_c_ BIT(22)
-#define _module_mp_ BIT(23)
-#define _module_hci_ops_os_c_ BIT(24)
-#define _module_rtl871x_ioctl_os_c BIT(25)
-#define _module_rtl8712_cmd_c_ BIT(26)
-/* define _module_efuse_ BIT(27) */
-#define _module_rtl8192c_xmit_c_ BIT(28)
-#define _module_hal_xmit_c_ BIT(28)
-#define _module_efuse_ BIT(29)
-#define _module_rtl8712_recv_c_ BIT(30)
-#define _module_rtl8712_led_c_ BIT(31)
-
-#undef _MODULE_DEFINE_
-
-#if defined _RTW_XMIT_C_
- #define _MODULE_DEFINE_ _module_rtl871x_xmit_c_
-#elif defined _XMIT_OSDEP_C_
- #define _MODULE_DEFINE_ _module_xmit_osdep_c_
-#elif defined _RTW_RECV_C_
- #define _MODULE_DEFINE_ _module_rtl871x_recv_c_
-#elif defined _RECV_OSDEP_C_
- #define _MODULE_DEFINE_ _module_recv_osdep_c_
-#elif defined _RTW_MLME_C_
- #define _MODULE_DEFINE_ _module_rtl871x_mlme_c_
-#elif defined _MLME_OSDEP_C_
- #define _MODULE_DEFINE_ _module_mlme_osdep_c_
-#elif defined _RTW_MLME_EXT_C_
- #define _MODULE_DEFINE_ 1
-#elif defined _RTW_STA_MGT_C_
- #define _MODULE_DEFINE_ _module_rtl871x_sta_mgt_c_
-#elif defined _RTW_CMD_C_
- #define _MODULE_DEFINE_ _module_rtl871x_cmd_c_
-#elif defined _CMD_OSDEP_C_
- #define _MODULE_DEFINE_ _module_cmd_osdep_c_
-#elif defined _RTW_IO_C_
- #define _MODULE_DEFINE_ _module_rtl871x_io_c_
-#elif defined _IO_OSDEP_C_
- #define _MODULE_DEFINE_ _module_io_osdep_c_
-#elif defined _OS_INTFS_C_
- #define _MODULE_DEFINE_ _module_os_intfs_c_
-#elif defined _RTW_SECURITY_C_
- #define _MODULE_DEFINE_ _module_rtl871x_security_c_
-#elif defined _RTW_EEPROM_C_
- #define _MODULE_DEFINE_ _module_rtl871x_eeprom_c_
-#elif defined _HAL_INTF_C_
- #define _MODULE_DEFINE_ _module_hal_init_c_
-#elif (defined _HCI_HAL_INIT_C_) || (defined _SDIO_HALINIT_C_)
- #define _MODULE_DEFINE_ _module_hci_hal_init_c_
-#elif defined _RTL871X_IOCTL_C_
- #define _MODULE_DEFINE_ _module_rtl871x_ioctl_c_
-#elif defined _RTL871X_IOCTL_SET_C_
- #define _MODULE_DEFINE_ _module_rtl871x_ioctl_set_c_
-#elif defined _RTL871X_IOCTL_QUERY_C_
- #define _MODULE_DEFINE_ _module_rtl871x_ioctl_query_c_
-#elif defined _RTL871X_PWRCTRL_C_
- #define _MODULE_DEFINE_ _module_rtl871x_pwrctrl_c_
-#elif defined _RTW_PWRCTRL_C_
- #define _MODULE_DEFINE_ 1
-#elif defined _HCI_INTF_C_
- #define _MODULE_DEFINE_ _module_hci_intfs_c_
-#elif defined _HCI_OPS_C_
- #define _MODULE_DEFINE_ _module_hci_ops_c_
-#elif defined _SDIO_OPS_C_
- #define _MODULE_DEFINE_ 1
-#elif defined _OSDEP_HCI_INTF_C_
- #define _MODULE_DEFINE_ _module_hci_intfs_c_
-#elif defined _OSDEP_SERVICE_C_
- #define _MODULE_DEFINE_ _module_osdep_service_c_
-#elif defined _HCI_OPS_OS_C_
- #define _MODULE_DEFINE_ _module_hci_ops_os_c_
-#elif defined _RTL871X_IOCTL_LINUX_C_
- #define _MODULE_DEFINE_ _module_rtl871x_ioctl_os_c
-#elif defined _RTL8712_CMD_C_
- #define _MODULE_DEFINE_ _module_rtl8712_cmd_c_
-#elif defined _RTL8192C_XMIT_C_
- #define _MODULE_DEFINE_ 1
-#elif defined _RTL8723AS_XMIT_C_
- #define _MODULE_DEFINE_ 1
-#elif defined _RTL8712_RECV_C_
- #define _MODULE_DEFINE_ _module_rtl8712_recv_c_
-#elif defined _RTL8192CU_RECV_C_
- #define _MODULE_DEFINE_ _module_rtl8712_recv_c_
-#elif defined _RTL871X_MLME_EXT_C_
- #define _MODULE_DEFINE_ _module_mlme_osdep_c_
-#elif defined _RTW_EFUSE_C_
- #define _MODULE_DEFINE_ _module_efuse_
-#endif
-
-#undef _dbgdump
-
-#ifndef _RTL871X_DEBUG_C_
- extern u32 GlobalDebugLevel;
- extern u64 GlobalDebugComponents;
-#endif
-
-#define _dbgdump printk
-
-#define DRIVER_PREFIX "RTL8723BS: "
-
-#if defined(_dbgdump)
-
-/* without driver-defined prefix */
-#undef _DBG_871X_LEVEL
-#define _DBG_871X_LEVEL(level, fmt, arg...) \
- do {\
- if (level <= GlobalDebugLevel) {\
- if (level <= _drv_err_ && level > _drv_always_) \
- _dbgdump("ERROR " fmt, ##arg);\
- else \
- _dbgdump(fmt, ##arg);\
- } \
- } while (0)
-
-#define RTW_DBGDUMP NULL /* 'stream' for _dbgdump */
-
-/* dump message to selected 'stream' */
-#define DBG_871X_SEL(sel, fmt, arg...) \
- do { \
- if (sel == RTW_DBGDUMP) \
- _DBG_871X_LEVEL(_drv_always_, fmt, ##arg); \
- else \
- seq_printf(sel, fmt, ##arg); \
- } while (0)
-
-#endif /* defined(_dbgdump) */
-
-void sd_f0_reg_dump(void *sel, struct adapter *adapter);
-
-void mac_reg_dump(void *sel, struct adapter *adapter);
-void bb_reg_dump(void *sel, struct adapter *adapter);
-void rf_reg_dump(void *sel, struct adapter *adapter);
+void mac_reg_dump(struct adapter *adapter);
+void bb_reg_dump(struct adapter *adapter);
+void rf_reg_dump(struct adapter *adapter);
#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_ht.h b/drivers/staging/rtl8723bs/include/rtw_ht.h
index e3f353fe1e47..1527d8be2d7a 100644
--- a/drivers/staging/rtl8723bs/include/rtw_ht.h
+++ b/drivers/staging/rtl8723bs/include/rtw_ht.h
@@ -42,10 +42,6 @@ enum {
HT_AGG_SIZE_16K = 1,
HT_AGG_SIZE_32K = 2,
HT_AGG_SIZE_64K = 3,
- VHT_AGG_SIZE_128K = 4,
- VHT_AGG_SIZE_256K = 5,
- VHT_AGG_SIZE_512K = 6,
- VHT_AGG_SIZE_1024K = 7,
};
enum {
diff --git a/drivers/staging/rtl8723bs/include/rtw_io.h b/drivers/staging/rtl8723bs/include/rtw_io.h
index fbb73e698e09..e98083a07a66 100644
--- a/drivers/staging/rtl8723bs/include/rtw_io.h
+++ b/drivers/staging/rtl8723bs/include/rtw_io.h
@@ -104,8 +104,6 @@ struct _io_ops {
void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
-
- u8 (*_sd_f0_read8)(struct intf_hdl *pintfhdl, u32 addr);
};
struct io_req {
@@ -170,29 +168,15 @@ extern void unregister_intf_hdl(struct intf_hdl *pintfhdl);
extern void _rtw_attrib_read(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
extern void _rtw_attrib_write(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-extern u8 _rtw_read8(struct adapter *adapter, u32 addr);
-extern u16 _rtw_read16(struct adapter *adapter, u32 addr);
-extern u32 _rtw_read32(struct adapter *adapter, u32 addr);
-
-extern int _rtw_write8(struct adapter *adapter, u32 addr, u8 val);
-extern int _rtw_write16(struct adapter *adapter, u32 addr, u16 val);
-extern int _rtw_write32(struct adapter *adapter, u32 addr, u32 val);
-
-extern u8 _rtw_sd_f0_read8(struct adapter *adapter, u32 addr);
-
-extern u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+extern u8 rtw_read8(struct adapter *adapter, u32 addr);
+extern u16 rtw_read16(struct adapter *adapter, u32 addr);
+extern u32 rtw_read32(struct adapter *adapter, u32 addr);
-#define rtw_read8(adapter, addr) _rtw_read8((adapter), (addr))
-#define rtw_read16(adapter, addr) _rtw_read16((adapter), (addr))
-#define rtw_read32(adapter, addr) _rtw_read32((adapter), (addr))
+extern int rtw_write8(struct adapter *adapter, u32 addr, u8 val);
+extern int rtw_write16(struct adapter *adapter, u32 addr, u16 val);
+extern int rtw_write32(struct adapter *adapter, u32 addr, u32 val);
-#define rtw_write8(adapter, addr, val) _rtw_write8((adapter), (addr), (val))
-#define rtw_write16(adapter, addr, val) _rtw_write16((adapter), (addr), (val))
-#define rtw_write32(adapter, addr, val) _rtw_write32((adapter), (addr), (val))
-
-#define rtw_write_port(adapter, addr, cnt, mem) _rtw_write_port((adapter), (addr), (cnt), (mem))
-
-#define rtw_sd_f0_read8(adapter, addr) _rtw_sd_f0_read8((adapter), (addr))
+extern u32 rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
extern void rtw_write_scsi(struct adapter *adapter, u32 cnt, u8 *pmem);
@@ -236,18 +220,4 @@ extern void bus_sync_io(struct io_queue *pio_q);
extern u32 _ioreq2rwmem(struct io_queue *pio_q);
extern void dev_power_down(struct adapter *Adapter, u8 bpwrup);
-#define PlatformEFIOWrite1Byte(_a, _b, _c) \
- rtw_write8(_a, _b, _c)
-#define PlatformEFIOWrite2Byte(_a, _b, _c) \
- rtw_write16(_a, _b, _c)
-#define PlatformEFIOWrite4Byte(_a, _b, _c) \
- rtw_write32(_a, _b, _c)
-
-#define PlatformEFIORead1Byte(_a, _b) \
- rtw_read8(_a, _b)
-#define PlatformEFIORead2Byte(_a, _b) \
- rtw_read16(_a, _b)
-#define PlatformEFIORead4Byte(_a, _b) \
- rtw_read32(_a, _b)
-
#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index 472818c5fd83..89b389d4c44b 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -195,47 +195,6 @@ enum {
RT_CHANNEL_DOMAIN_2G_MAX,
};
-enum {
- RT_CHANNEL_DOMAIN_5G_NULL = 0x00,
- RT_CHANNEL_DOMAIN_5G_ETSI1 = 0x01, /* Europe */
- RT_CHANNEL_DOMAIN_5G_ETSI2 = 0x02, /* Australia, New Zealand */
- RT_CHANNEL_DOMAIN_5G_ETSI3 = 0x03, /* Russia */
- RT_CHANNEL_DOMAIN_5G_FCC1 = 0x04, /* US */
- RT_CHANNEL_DOMAIN_5G_FCC2 = 0x05, /* FCC o/w DFS Channels */
- RT_CHANNEL_DOMAIN_5G_FCC3 = 0x06, /* India, Mexico */
- RT_CHANNEL_DOMAIN_5G_FCC4 = 0x07, /* Venezuela */
- RT_CHANNEL_DOMAIN_5G_FCC5 = 0x08, /* China */
- RT_CHANNEL_DOMAIN_5G_FCC6 = 0x09, /* Israel */
- RT_CHANNEL_DOMAIN_5G_FCC7_IC1 = 0x0A, /* US, Canada */
- RT_CHANNEL_DOMAIN_5G_KCC1 = 0x0B, /* Korea */
- RT_CHANNEL_DOMAIN_5G_MKK1 = 0x0C, /* Japan */
- RT_CHANNEL_DOMAIN_5G_MKK2 = 0x0D, /* Japan (W52, W53) */
- RT_CHANNEL_DOMAIN_5G_MKK3 = 0x0E, /* Japan (W56) */
- RT_CHANNEL_DOMAIN_5G_NCC1 = 0x0F, /* Taiwan */
- RT_CHANNEL_DOMAIN_5G_NCC2 = 0x10, /* Taiwan o/w DFS */
- RT_CHANNEL_DOMAIN_5G_NCC3 = 0x11, /* Taiwan w/o DFS, Band4 only */
- RT_CHANNEL_DOMAIN_5G_ETSI4 = 0x12, /* Europe w/o DFS, Band1 only */
- RT_CHANNEL_DOMAIN_5G_ETSI5 = 0x13, /* Australia, New Zealand(w/o Weather radar) */
- RT_CHANNEL_DOMAIN_5G_FCC8 = 0x14, /* Latin America */
- RT_CHANNEL_DOMAIN_5G_ETSI6 = 0x15, /* Israel, Bahrain, Egypt, India, China, Malaysia */
- RT_CHANNEL_DOMAIN_5G_ETSI7 = 0x16, /* China */
- RT_CHANNEL_DOMAIN_5G_ETSI8 = 0x17, /* Jordan */
- RT_CHANNEL_DOMAIN_5G_ETSI9 = 0x18, /* Lebanon */
- RT_CHANNEL_DOMAIN_5G_ETSI10 = 0x19, /* Qatar */
- RT_CHANNEL_DOMAIN_5G_ETSI11 = 0x1A, /* Russia */
- RT_CHANNEL_DOMAIN_5G_NCC4 = 0x1B, /* Taiwan, (w/o Weather radar) */
- RT_CHANNEL_DOMAIN_5G_ETSI12 = 0x1C, /* Indonesia */
- RT_CHANNEL_DOMAIN_5G_FCC9 = 0x1D, /* w/o Weather radar) */
- RT_CHANNEL_DOMAIN_5G_ETSI13 = 0x1E, /* w/o Weather radar) */
- RT_CHANNEL_DOMAIN_5G_FCC10 = 0x1F, /* Argentina (w/o Weather radar) */
- /* Add new channel plan above this line =============== */
- /* Driver Self Defined ===== */
- RT_CHANNEL_DOMAIN_5G_FCC = 0x20,
- RT_CHANNEL_DOMAIN_5G_JAPAN_NO_DFS = 0x21,
- RT_CHANNEL_DOMAIN_5G_FCC4_NO_DFS = 0x22,
- RT_CHANNEL_DOMAIN_5G_MAX,
-};
-
#define rtw_is_channel_plan_valid(chplan) (chplan < RT_CHANNEL_DOMAIN_MAX || chplan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
struct rt_channel_plan {
@@ -248,14 +207,8 @@ struct rt_channel_plan_2g {
unsigned char Len;
};
-struct rt_channel_plan_5g {
- unsigned char Channel[MAX_CHANNEL_NUM_5G];
- unsigned char Len;
-};
-
struct rt_channel_plan_map {
unsigned char Index2G;
- unsigned char Index5G;
};
enum {
@@ -348,13 +301,13 @@ struct FW_Sta_Info {
* When the driver scanned RTW_SCAN_NUM_OF_CH channels, it would switch back to AP's operating channel for
* RTW_STAY_AP_CH_MILLISECOND * SURVEY_TO milliseconds.
* Example:
- * For chip supports 2.4G + 5GHz and AP mode is operating in channel 1,
+ * For chip supports 2.4G and AP mode is operating in channel 1,
* RTW_SCAN_NUM_OF_CH is 8, RTW_STAY_AP_CH_MILLISECOND is 3 and SURVEY_TO is 100.
* When it's STA mode gets set_scan command,
* it would
* 1. Doing the scan on channel 1.2.3.4.5.6.7.8
* 2. Back to channel 1 for 300 milliseconds
- * 3. Go through doing site survey on channel 9.10.11.36.40.44.48.52
+ * 3. Go through doing site survey on channel 9.10.11
* 4. Back to channel 1 for 300 milliseconds
* 5. ... and so on, till survey done.
*/
@@ -411,7 +364,6 @@ struct rt_channel_info {
};
int rtw_ch_set_search_ch(struct rt_channel_info *ch_set, const u32 ch);
-bool rtw_mlme_band_check(struct adapter *adapter, const u32 ch);
/* P2P_MAX_REG_CLASSES - Maximum number of regulatory classes */
#define P2P_MAX_REG_CLASSES 10
@@ -805,38 +757,6 @@ enum {
#ifdef _RTW_MLME_EXT_C_
-static struct fwevent wlanevents[] =
-{
- {0, rtw_dummy_event_callback}, /*0*/
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, &rtw_survey_event_callback}, /*8*/
- {sizeof(struct surveydone_event), &rtw_surveydone_event_callback}, /*9*/
-
- {0, &rtw_joinbss_event_callback}, /*10*/
- {sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
- {sizeof(struct stadel_event), &rtw_stadel_event_callback},
- {0, &rtw_atimdone_event_callback},
- {0, rtw_dummy_event_callback},
- {0, NULL}, /*15*/
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, rtw_fwdbg_event_callback},
- {0, NULL}, /*20*/
- {0, NULL},
- {0, NULL},
- {0, &rtw_cpwm_event_callback},
- {0, NULL},
- {0, &rtw_wmm_event_callback},
-
-};
-
#endif/* _RTL8192C_CMD_C_ */
#endif
diff --git a/drivers/staging/rtl8723bs/include/rtw_mp.h b/drivers/staging/rtl8723bs/include/rtw_mp.h
index 2788ad80b114..ea3abee325ef 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mp.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mp.h
@@ -348,7 +348,6 @@ void Hal_SetCarrierSuppressionTx(struct adapter *padapter, u8 bStart);
void Hal_SetSingleToneTx(struct adapter *padapter, u8 bStart);
void Hal_SetSingleCarrierTx(struct adapter *padapter, u8 bStart);
void Hal_SetContinuousTx(struct adapter *padapter, u8 bStart);
-void Hal_SetBandwidth(struct adapter *padapter);
void Hal_SetDataRate(struct adapter *padapter);
void Hal_SetChannel(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/include/rtw_rf.h b/drivers/staging/rtl8723bs/include/rtw_rf.h
index cb6beccd3d23..6c25707f4ec8 100644
--- a/drivers/staging/rtl8723bs/include/rtw_rf.h
+++ b/drivers/staging/rtl8723bs/include/rtw_rf.h
@@ -21,16 +21,13 @@
#define RTL8711_RF_MAX_SENS 6
#define RTL8711_RF_DEF_SENS 4
-/* */
-/* We now define the following channels as the max channels in each channel plan. */
-/* 2G, total 14 chnls */
-/* {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14} */
-/* 5G, total 24 chnls */
-/* {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120,
- * 124, 128, 132, 136, 140, 149, 153, 157, 161, 165} */
+/*
+ * We now define the following channels as the max channels in each channel plan.
+ * 2G, total 14 chnls
+ * {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}
+ */
#define MAX_CHANNEL_NUM_2G 14
-#define MAX_CHANNEL_NUM_5G 24
-#define MAX_CHANNEL_NUM 38/* 14+24 */
+#define MAX_CHANNEL_NUM 14
#define NUM_REGULATORYS 1
@@ -83,10 +80,6 @@ enum {
enum channel_width {
CHANNEL_WIDTH_20 = 0,
CHANNEL_WIDTH_40 = 1,
- CHANNEL_WIDTH_80 = 2,
- CHANNEL_WIDTH_160 = 3,
- CHANNEL_WIDTH_80_80 = 4,
- CHANNEL_WIDTH_MAX = 5,
};
/* Represent Extension Channel Offset in HT Capabilities */
@@ -99,17 +92,9 @@ enum extchnl_offset {
};
enum {
- VHT_DATA_SC_DONOT_CARE = 0,
- VHT_DATA_SC_20_UPPER_OF_80MHZ = 1,
- VHT_DATA_SC_20_LOWER_OF_80MHZ = 2,
- VHT_DATA_SC_20_UPPERST_OF_80MHZ = 3,
- VHT_DATA_SC_20_LOWEST_OF_80MHZ = 4,
- VHT_DATA_SC_20_RECV1 = 5,
- VHT_DATA_SC_20_RECV2 = 6,
- VHT_DATA_SC_20_RECV3 = 7,
- VHT_DATA_SC_20_RECV4 = 8,
- VHT_DATA_SC_40_UPPER_OF_80MHZ = 9,
- VHT_DATA_SC_40_LOWER_OF_80MHZ = 10,
+ HT_DATA_SC_DONOT_CARE = 0,
+ HT_DATA_SC_20_UPPER_OF_40MHZ = 1,
+ HT_DATA_SC_20_LOWER_OF_40MHZ = 2,
};
/* 2007/11/15 MH Define different RF type. */
diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h
index 5c787e999aab..a68b73858462 100644
--- a/drivers/staging/rtl8723bs/include/rtw_security.h
+++ b/drivers/staging/rtl8723bs/include/rtw_security.h
@@ -7,6 +7,7 @@
#ifndef __RTW_SECURITY_H_
#define __RTW_SECURITY_H_
+#include <crypto/arc4.h>
#define _NO_PRIVACY_ 0x0
#define _WEP40_ 0x1
@@ -127,6 +128,8 @@ struct security_priv {
u8 wps_ie[MAX_WPS_IE_LEN];/* added in assoc req */
int wps_ie_len;
+ struct arc4_ctx xmit_arc4_ctx;
+ struct arc4_ctx recv_arc4_ctx;
u8 binstallGrpkey;
u8 binstallBIPkey;
@@ -191,8 +194,6 @@ do {\
} \
} while (0)
-#define _AES_IV_LEN_ 8
-
#define SET_ICE_IV_LEN(iv_len, icv_len, encrypt)\
do {\
switch (encrypt)\
@@ -243,110 +244,11 @@ struct mic_data {
u32 nBytesInM; /* # bytes in M */
};
-extern const u32 Te0[256];
-extern const u32 Te1[256];
-extern const u32 Te2[256];
-extern const u32 Te3[256];
-extern const u32 Te4[256];
-extern const u32 Td0[256];
-extern const u32 Td1[256];
-extern const u32 Td2[256];
-extern const u32 Td3[256];
-extern const u32 Td4[256];
-extern const u32 rcon[10];
-extern const u8 Td4s[256];
-extern const u8 rcons[10];
-
-#define RCON(i) (rcons[(i)] << 24)
-
-static inline u32 rotr(u32 val, int bits)
-{
- return (val >> bits) | (val << (32 - bits));
-}
-
-#define TE0(i) Te0[((i) >> 24) & 0xff]
-#define TE1(i) rotr(Te0[((i) >> 16) & 0xff], 8)
-#define TE2(i) rotr(Te0[((i) >> 8) & 0xff], 16)
-#define TE3(i) rotr(Te0[(i) & 0xff], 24)
-#define TE41(i) ((Te0[((i) >> 24) & 0xff] << 8) & 0xff000000)
-#define TE42(i) (Te0[((i) >> 16) & 0xff] & 0x00ff0000)
-#define TE43(i) (Te0[((i) >> 8) & 0xff] & 0x0000ff00)
-#define TE44(i) ((Te0[(i) & 0xff] >> 8) & 0x000000ff)
-#define TE421(i) ((Te0[((i) >> 16) & 0xff] << 8) & 0xff000000)
-#define TE432(i) (Te0[((i) >> 8) & 0xff] & 0x00ff0000)
-#define TE443(i) (Te0[(i) & 0xff] & 0x0000ff00)
-#define TE414(i) ((Te0[((i) >> 24) & 0xff] >> 8) & 0x000000ff)
-#define TE4(i) ((Te0[(i)] >> 8) & 0x000000ff)
-
-#define TD0(i) Td0[((i) >> 24) & 0xff]
-#define TD1(i) rotr(Td0[((i) >> 16) & 0xff], 8)
-#define TD2(i) rotr(Td0[((i) >> 8) & 0xff], 16)
-#define TD3(i) rotr(Td0[(i) & 0xff], 24)
-#define TD41(i) (Td4s[((i) >> 24) & 0xff] << 24)
-#define TD42(i) (Td4s[((i) >> 16) & 0xff] << 16)
-#define TD43(i) (Td4s[((i) >> 8) & 0xff] << 8)
-#define TD44(i) (Td4s[(i) & 0xff])
-#define TD0_(i) Td0[(i) & 0xff]
-#define TD1_(i) rotr(Td0[(i) & 0xff], 8)
-#define TD2_(i) rotr(Td0[(i) & 0xff], 16)
-#define TD3_(i) rotr(Td0[(i) & 0xff], 24)
-
-#define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ \
- ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3]))
-
-#define PUTU32(ct, st) { \
-(ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); \
-(ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); }
-
-#define WPA_GET_BE32(a) ((((u32) (a)[0]) << 24) | (((u32) (a)[1]) << 16) | \
- (((u32) (a)[2]) << 8) | ((u32) (a)[3]))
-
-#define WPA_PUT_LE16(a, val) \
- do { \
- (a)[1] = ((u16) (val)) >> 8; \
- (a)[0] = ((u16) (val)) & 0xff; \
- } while (0)
-
-#define WPA_PUT_BE32(a, val) \
- do { \
- (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[3] = (u8) (((u32) (val)) & 0xff); \
- } while (0)
-
-#define WPA_PUT_BE64(a, val) \
- do { \
- (a)[0] = (u8) (((u64) (val)) >> 56); \
- (a)[1] = (u8) (((u64) (val)) >> 48); \
- (a)[2] = (u8) (((u64) (val)) >> 40); \
- (a)[3] = (u8) (((u64) (val)) >> 32); \
- (a)[4] = (u8) (((u64) (val)) >> 24); \
- (a)[5] = (u8) (((u64) (val)) >> 16); \
- (a)[6] = (u8) (((u64) (val)) >> 8); \
- (a)[7] = (u8) (((u64) (val)) & 0xff); \
- } while (0)
-
/* ===== start - public domain SHA256 implementation ===== */
/* This is based on SHA256 implementation in LibTomCrypt that was released into
* public domain by Tom St Denis. */
-/* Various logical functions */
-#define RORc(x, y) \
-(((((unsigned long) (x) & 0xFFFFFFFFUL) >> (unsigned long) ((y) & 31)) | \
- ((unsigned long) (x) << (unsigned long) (32 - ((y) & 31)))) & 0xFFFFFFFFUL)
-#define Ch(x, y, z) (z ^ (x & (y ^ z)))
-#define Maj(x, y, z) (((x | y) & z) | (x & y))
-#define S(x, n) RORc((x), (n))
-#define R(x, n) (((x)&0xFFFFFFFFUL)>>(n))
-#define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22))
-#define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25))
-#define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3))
-#define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10))
-#ifndef MIN
-#define MIN(x, y) (((x) < (y)) ? (x) : (y))
-#endif
int omac1_aes_128(u8 *key, u8 *data, size_t data_len, u8 *mac);
void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key);
void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b);
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index e45753d17313..676ead0372fa 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -234,7 +234,7 @@ enum {
void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms);
-int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg);
+int rtw_sctx_wait(struct submit_ctx *sctx);
void rtw_sctx_done_err(struct submit_ctx **sctx, int status);
void rtw_sctx_done(struct submit_ctx **sctx);
diff --git a/drivers/staging/rtl8723bs/include/sdio_ops_linux.h b/drivers/staging/rtl8723bs/include/sdio_ops_linux.h
index 16a03adbc2cf..18830dd18372 100644
--- a/drivers/staging/rtl8723bs/include/sdio_ops_linux.h
+++ b/drivers/staging/rtl8723bs/include/sdio_ops_linux.h
@@ -11,8 +11,6 @@
#define SDIO_ERR_VAL16 0xEAEA
#define SDIO_ERR_VAL32 0xEAEAEAEA
-u8 sd_f0_read8(struct intf_hdl *pintfhdl, u32 addr, s32 *err);
-
s32 _sd_cmd52_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata);
s32 _sd_cmd52_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata);
s32 sd_cmd52_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata);
diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h
index 036cf57c65a9..0bd7b662b972 100644
--- a/drivers/staging/rtl8723bs/include/wifi.h
+++ b/drivers/staging/rtl8723bs/include/wifi.h
@@ -234,7 +234,7 @@ static inline int IS_MCAST(unsigned char *da)
return false;
}
-static inline unsigned char *get_ra(unsigned char *pframe)
+static inline unsigned char *rtl8723bs_get_ra(unsigned char *pframe)
{
unsigned char *ra;
ra = GetAddr1Ptr(pframe);
@@ -336,7 +336,6 @@ static inline int IsFrameTypeCtrl(unsigned char *pframe)
#define _PRE_ALLOCICVHDR_ 5
#define _PRE_ALLOCMICHDR_ 6
-#define _SIFSTIME_ ((priv->pmib->dot11BssType.net_work_type&WIRELESS_11A)?16:10)
#define _ACKCTSLNG_ 14 /* 14 bytes long, including crclng */
#define _CRCLNG_ 4
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 437859228371..fd747c8d920e 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -77,7 +77,6 @@ static struct ieee80211_rate rtw_rates[] = {
#define RTW_G_RATES_NUM 12
#define RTW_2G_CHANNELS_NUM 14
-#define RTW_5G_CHANNELS_NUM 37
static struct ieee80211_channel rtw_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
@@ -203,8 +202,6 @@ rtw_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
static int rtw_ieee80211_channel_to_frequency(int chan, int band)
{
- /* see 802.11 17.3.8.3.2 and Annex J
- * there are overlapping channel numbers in 5GHz and 2GHz bands */
if (band == NL80211_BAND_2GHZ) {
if (chan == 14)
return 2484;
@@ -229,7 +226,6 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
size_t len, bssinf_len = 0;
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct wireless_dev *wdev = padapter->rtw_wdev;
struct wiphy *wiphy = wdev->wiphy;
@@ -310,7 +306,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
/* pmlmeext->mgnt_seq++; */
if (pnetwork->network.Reserved[0] == 1) { /* WIFI_BEACON */
- memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ eth_broadcast_addr(pwlanhdr->addr1);
SetFrameSubType(pbuf, WIFI_BEACON);
} else {
memcpy(pwlanhdr->addr1, myid(&(padapter->eeprompriv)), ETH_ALEN);
@@ -960,7 +956,7 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
memset(param, 0, param_len);
param->cmd = IEEE_CMD_SET_ENCRYPTION;
- memset(param->sta_addr, 0xff, ETH_ALEN);
+ eth_broadcast_addr(param->sta_addr);
switch (params->cipher) {
case IW_AUTH_CIPHER_NONE:
@@ -1265,18 +1261,12 @@ void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter)
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
-
- while (1)
+ list_for_each(plist, phead)
{
- if (phead == plist)
- break;
-
- pnetwork = container_of(plist, struct wlan_network, list);
+ pnetwork = list_entry(plist, struct wlan_network, list);
/* report network only if the current channel set contains the channel to which this network belongs */
if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.Configuration.DSConfig) >= 0
- && rtw_mlme_band_check(padapter, pnetwork->network.Configuration.DSConfig) == true
&& true == rtw_validate_ssid(&(pnetwork->network.Ssid))
)
{
@@ -1284,8 +1274,6 @@ void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter)
rtw_cfg80211_inform_bss(padapter, pnetwork);
}
- plist = get_next(plist);
-
}
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
@@ -2460,7 +2448,7 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy, struct net_device *ndev
struct station_del_parameters *params)
{
int ret = 0;
- struct list_head *phead, *plist;
+ struct list_head *phead, *plist, *tmp;
u8 updated = false;
struct sta_info *psta = NULL;
struct adapter *padapter = rtw_netdev_priv(ndev);
@@ -2489,13 +2477,9 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy, struct net_device *ndev
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
-
/* check asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = get_next(plist);
+ list_for_each_safe(plist, tmp, phead) {
+ psta = list_entry(plist, struct sta_info, asoc_list);
if (!memcmp((u8 *)mac, psta->hwaddr, ETH_ALEN)) {
if (psta->dot8021xalg != 1 || psta->bpairwise_key_installed) {
@@ -2598,7 +2582,7 @@ static int _cfg80211_rtw_mgmt_tx(struct adapter *padapter, u8 tx_ch, const u8 *b
struct pkt_attrib *pattrib;
unsigned char *pframe;
int ret = _FAIL;
- bool ack = true;
+ bool __maybe_unused ack = true;
struct ieee80211_hdr *pwlanhdr;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 5088c3731b6d..f95000df8942 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -420,8 +420,10 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
pwep = kzalloc(wep_total_len, GFP_KERNEL);
- if (!pwep)
+ if (!pwep) {
+ ret = -ENOMEM;
goto exit;
+ }
pwep->KeyLength = wep_key_len;
pwep->Length = wep_total_len;
@@ -1052,15 +1054,9 @@ static int rtw_wx_set_wap(struct net_device *dev,
authmode = padapter->securitypriv.ndisauthtype;
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
- pmlmepriv->pscanned = get_next(phead);
-
- while (1) {
- if (phead == pmlmepriv->pscanned)
- break;
-
- pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
-
- pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+ list_for_each(pmlmepriv->pscanned, phead) {
+ pnetwork = list_entry(pmlmepriv->pscanned,
+ struct wlan_network, list);
dst_bssid = pnetwork->network.MacAddress;
@@ -1299,29 +1295,21 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
-
- while (1) {
- if (phead == plist)
- break;
-
+ list_for_each(plist, phead) {
if ((stop - ev) < SCAN_ITEM_SIZE) {
ret = -E2BIG;
break;
}
- pnetwork = container_of(plist, struct wlan_network, list);
+ pnetwork = list_entry(plist, struct wlan_network, list);
/* report network only if the current channel set contains the channel to which this network belongs */
if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.Configuration.DSConfig) >= 0
- && rtw_mlme_band_check(padapter, pnetwork->network.Configuration.DSConfig) == true
&& true == rtw_validate_ssid(&(pnetwork->network.Ssid))) {
ev = translate_scan(padapter, a, pnetwork, ev, stop);
}
- plist = get_next(plist);
-
}
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
@@ -1387,15 +1375,9 @@ static int rtw_wx_set_essid(struct net_device *dev,
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
- pmlmepriv->pscanned = get_next(phead);
-
- while (1) {
- if (phead == pmlmepriv->pscanned)
- break;
-
- pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
-
- pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+ list_for_each(pmlmepriv->pscanned, phead) {
+ pnetwork = list_entry(pmlmepriv->pscanned,
+ struct wlan_network, list);
dst_ssid = pnetwork->network.Ssid.Ssid;
@@ -1934,7 +1916,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
return -1;
param->cmd = IEEE_CMD_SET_ENCRYPTION;
- memset(param->sta_addr, 0xff, ETH_ALEN);
+ eth_broadcast_addr(param->sta_addr);
switch (pext->alg) {
@@ -2252,14 +2234,8 @@ static int rtw_get_ap_info(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
-
- while (1) {
- if (phead == plist)
- break;
-
-
- pnetwork = container_of(plist, struct wlan_network, list);
+ list_for_each(plist, phead) {
+ pnetwork = list_entry(plist, struct wlan_network, list);
if (!mac_pton(data, bssid)) {
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
@@ -2282,8 +2258,6 @@ static int rtw_get_ap_info(struct net_device *dev,
}
}
- plist = get_next(plist);
-
}
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
@@ -2600,10 +2574,9 @@ static int rtw_dbg_port(struct net_device *dev,
case 0x12: /* set rx_stbc */
{
struct registry_priv *pregpriv = &padapter->registrypriv;
- /* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, 0x3: enable both 2.4g and 5g */
- /* default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
- if (extra_arg == 0 || extra_arg == 1 ||
- extra_arg == 2 || extra_arg == 3)
+ /* 0: disable, bit(0):enable 2.4g */
+ /* default is set to enable 2.4GHZ */
+ if (extra_arg == 0 || extra_arg == 1)
pregpriv->rx_stbc = extra_arg;
}
break;
@@ -2734,11 +2707,11 @@ static int rtw_dbg_port(struct net_device *dev,
case 0xdd:/* registers dump , 0 for mac reg, 1 for bb reg, 2 for rf reg */
{
if (extra_arg == 0)
- mac_reg_dump(RTW_DBGDUMP, padapter);
+ mac_reg_dump(padapter);
else if (extra_arg == 1)
- bb_reg_dump(RTW_DBGDUMP, padapter);
+ bb_reg_dump(padapter);
else if (extra_arg == 2)
- rf_reg_dump(RTW_DBGDUMP, padapter);
+ rf_reg_dump(padapter);
}
break;
diff --git a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
index 0a16752f805b..a4560ba22db1 100644
--- a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
@@ -4,10 +4,6 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-
-
-#define _MLME_OSDEP_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 160f624612c7..648456b992bb 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _OS_INTFS_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
#include <hal_data.h>
@@ -24,7 +22,7 @@ static int rtw_lbkmode;/* RTL8712_AIR_TRX; */
static int rtw_network_mode = Ndis802_11IBSS;/* Ndis802_11Infrastructure;infra, ad-hoc, auto */
/* struct ndis_802_11_ssid ssid; */
static int rtw_channel = 1;/* ad-hoc support requirement */
-static int rtw_wireless_mode = WIRELESS_MODE_MAX;
+static int rtw_wireless_mode = WIRELESS_11BG_24N;
static int rtw_vrtl_carrier_sense = AUTO_VCS;
static int rtw_vcs_type = RTS_CTS;/* */
static int rtw_rts_thresh = 2347;/* */
@@ -67,10 +65,12 @@ static int rtw_uapsd_acvi_en;
static int rtw_uapsd_acvo_en;
int rtw_ht_enable = 1;
-/* 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160MHz, 4: 80+80MHz */
-/* 2.4G use bit 0 ~ 3, 5G use bit 4 ~ 7 */
-/* 0x21 means enable 2.4G 40MHz & 5G 80MHz */
-static int rtw_bw_mode = 0x21;
+/*
+ * 0: 20 MHz, 1: 40 MHz
+ * 2.4G use bit 0 ~ 3
+ * 0x01 means enable 2.4G 40MHz
+ */
+static int rtw_bw_mode = 0x01;
static int rtw_ampdu_enable = 1;/* for enable tx_ampdu ,0: disable, 0x1:enable (but wifi_spec should be 0), 0x2: force enable (don't care wifi_spec) */
static int rtw_rx_stbc = 1;/* 0: disable, 1:enable 2.4g */
static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto . There is an IOT issu with DLINK DIR-629 when the flag turn on */
@@ -289,7 +289,6 @@ static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
registry_par->RegPowerBase = 14;
registry_par->TxBBSwing_2G = 0xFF;
- registry_par->TxBBSwing_5G = 0xFF;
registry_par->bEn_RFE = 1;
registry_par->RFE_Type = 64;
@@ -1152,14 +1151,13 @@ static void rtw_suspend_normal(struct adapter *padapter)
padapter->intf_deinit(adapter_to_dvobj(padapter));
}
-int rtw_suspend_common(struct adapter *padapter)
+void rtw_suspend_common(struct adapter *padapter)
{
struct dvobj_priv *psdpriv = padapter->dvobj;
struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
struct pwrctrl_priv *pwrpriv = dvobj_to_pwrctl(psdpriv);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int ret = 0;
unsigned long start_time = jiffies;
netdev_dbg(padapter->pnetdev, " suspend start\n");
@@ -1190,19 +1188,14 @@ int rtw_suspend_common(struct adapter *padapter)
rtw_ps_deny_cancel(padapter, PS_DENY_SUSPEND);
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- rtw_suspend_normal(padapter);
- else if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- rtw_suspend_normal(padapter);
- else
- rtw_suspend_normal(padapter);
+ rtw_suspend_normal(padapter);
netdev_dbg(padapter->pnetdev, "rtw suspend success in %d ms\n",
jiffies_to_msecs(jiffies - start_time));
exit:
- return ret;
+ return;
}
static int rtw_resume_process_normal(struct adapter *padapter)
@@ -1269,17 +1262,10 @@ int rtw_resume_common(struct adapter *padapter)
int ret = 0;
unsigned long start_time = jiffies;
struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
netdev_dbg(padapter->pnetdev, "resume start\n");
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- rtw_resume_process_normal(padapter);
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- rtw_resume_process_normal(padapter);
- } else {
- rtw_resume_process_normal(padapter);
- }
+ rtw_resume_process_normal(padapter);
hal_btcoex_SuspendNotify(padapter, 0);
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 9c6b1666df13..c58555a4012f 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -4,10 +4,6 @@
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-
-
-#define _OSDEP_SERVICE_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index cd51430d4618..88a69c7ca8f2 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _RECV_OSDEP_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
#include <linux/jiffies.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index d2bf444117b8..490431484524 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _HCI_INTF_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
#include <hal_btcoex.h>
@@ -449,7 +447,9 @@ static int rtw_sdio_suspend(struct device *dev)
return 0;
}
- return rtw_suspend_common(padapter);
+ rtw_suspend_common(padapter);
+
+ return 0;
}
static int rtw_resume_process(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
index 5cedf775b6ef..bed930760656 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
@@ -26,34 +26,6 @@ inline void rtw_sdio_set_irq_thd(struct dvobj_priv *dvobj, void *thd_hdl)
sdio_data->sys_sdio_irq_thd = thd_hdl;
}
-u8 sd_f0_read8(struct intf_hdl *pintfhdl, u32 addr, s32 *err)
-{
- struct adapter *padapter;
- struct dvobj_priv *psdiodev;
- struct sdio_data *psdio;
-
- u8 v = 0;
- struct sdio_func *func;
- bool claim_needed;
-
- padapter = pintfhdl->padapter;
- psdiodev = pintfhdl->pintf_dev;
- psdio = &psdiodev->intf_data;
-
- if (padapter->bSurpriseRemoved)
- return v;
-
- func = psdio->func;
- claim_needed = rtw_sdio_claim_host_needed(func);
-
- if (claim_needed)
- sdio_claim_host(func);
- v = sdio_f0_readb(func, addr, err);
- if (claim_needed)
- sdio_release_host(func);
- return v;
-}
-
/*
* Return:
*0 Success
diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
index 0084589499b9..5eef1d68c6f0 100644
--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
@@ -44,10 +44,6 @@ static const struct ieee80211_regdomain rtw_regdom_rd = {
static int rtw_ieee80211_channel_to_frequency(int chan, int band)
{
- /* see 802.11 17.3.8.3.2 and Annex J
- * there are overlapping channel numbers in 5GHz and 2GHz bands
- */
-
/* NL80211_BAND_2GHZ */
if (chan == 14)
return 2484;
diff --git a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
index 639408eaf4df..530e7a6c67c5 100644
--- a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
@@ -4,8 +4,6 @@
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
******************************************************************************/
-#define _XMIT_OSDEP_C_
-
#include <drv_types.h>
#include <rtw_debug.h>
@@ -139,13 +137,11 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
-
/* free sta asoc_queue */
- while (phead != plist) {
+ list_for_each(plist, phead) {
int stainfo_offset;
- psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+
+ psta = list_entry(plist, struct sta_info, asoc_list);
stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
if (stainfo_offset_valid(stainfo_offset)) {
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index 9001570a8c94..c6ad34a7fa33 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -2406,7 +2406,6 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
(void)ms_arbitrate_l2p(chip, phy_blk,
log_blk - ms_start_idx[seg_no], us1, us2);
- continue;
}
segment->build_flag = 1;
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index 8faa601c700b..24b9077a634a 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -131,7 +131,8 @@ int sm750_hw_fillrect(struct lynx_accel *accel,
}
/**
- * sm750_hm_copyarea
+ * sm750_hw_copyarea
+ * @accel: Acceleration device data
* @sBase: Address of source: offset in frame buffer
* @sPitch: Pitch value of source surface in BYTE
* @sx: Starting x coordinate of source surface
@@ -298,6 +299,7 @@ static unsigned int deGetTransparency(struct lynx_accel *accel)
/**
* sm750_hw_imageblit
+ * @accel: Acceleration device data
* @pSrcbuf: pointer to start of source buffer in system memory
* @srcDelta: Pitch value (in bytes) of the source buffer, +ive means top down
* and -ive mean button up
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 4455d26f7c96..41f8a72a2a95 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -6,10 +6,10 @@
#include <linux/debugfs.h>
#include <linux/kthread.h>
-#include <linux/idr.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/visorbus.h>
+#include <linux/xarray.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
@@ -82,8 +82,7 @@ struct visorhba_devdata {
* allows us to pass int handles back-and-forth between us and
* iovm, instead of raw pointers
*/
- struct idr idr;
-
+ struct xarray xa;
struct dentry *debugfs_dir;
struct dentry *debugfs_info;
};
@@ -183,70 +182,47 @@ static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
}
/*
- * simple_idr_get - Associate a provided pointer with an int value
- * 1 <= value <= INT_MAX, and return this int value;
- * the pointer value can be obtained later by passing
- * this int value to idr_find()
- * @idrtable: The data object maintaining the pointer<-->int mappings
- * @p: The pointer value to be remembered
- * @lock: A spinlock used when exclusive access to idrtable is needed
- *
- * Return: The id number mapped to pointer 'p', 0 on failure
- */
-static unsigned int simple_idr_get(struct idr *idrtable, void *p,
- spinlock_t *lock)
-{
- int id;
- unsigned long flags;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(lock, flags);
- id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
- spin_unlock_irqrestore(lock, flags);
- idr_preload_end();
- /* failure */
- if (id < 0)
- return 0;
- /* idr_alloc() guarantees > 0 */
- return (unsigned int)(id);
-}
-
-/*
* setup_scsitaskmgmt_handles - Stash the necessary handles so that the
* completion processing logic for a taskmgmt
* cmd will be able to find who to wake up
* and where to stash the result
- * @idrtable: The data object maintaining the pointer<-->int mappings
- * @lock: A spinlock used when exclusive access to idrtable is needed
+ * @xa: The data object maintaining the pointer<-->int mappings
* @cmdrsp: Response from the IOVM
* @event: The event handle to associate with an id
* @result: The location to place the result of the event handle into
*/
-static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
- struct uiscmdrsp *cmdrsp,
+static int setup_scsitaskmgmt_handles(struct xarray *xa, struct uiscmdrsp *cmdrsp,
wait_queue_head_t *event, int *result)
{
- /* specify the event that has to be triggered when this */
- /* cmd is complete */
- cmdrsp->scsitaskmgmt.notify_handle =
- simple_idr_get(idrtable, event, lock);
- cmdrsp->scsitaskmgmt.notifyresult_handle =
- simple_idr_get(idrtable, result, lock);
+ int ret;
+ u32 id;
+
+ /* specify the event that has to be triggered when this cmd is complete */
+ ret = xa_alloc_irq(xa, &id, event, xa_limit_32b, GFP_KERNEL);
+ if (ret)
+ return ret;
+ cmdrsp->scsitaskmgmt.notify_handle = id;
+ ret = xa_alloc_irq(xa, &id, result, xa_limit_32b, GFP_KERNEL);
+ if (ret) {
+ xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notify_handle);
+ return ret;
+ }
+ cmdrsp->scsitaskmgmt.notifyresult_handle = id;
+
+ return 0;
}
/*
* cleanup_scsitaskmgmt_handles - Forget handles created by
* setup_scsitaskmgmt_handles()
- * @idrtable: The data object maintaining the pointer<-->int mappings
+ * @xa: The data object maintaining the pointer<-->int mappings
* @cmdrsp: Response from the IOVM
*/
-static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
+static void cleanup_scsitaskmgmt_handles(struct xarray *xa,
struct uiscmdrsp *cmdrsp)
{
- if (cmdrsp->scsitaskmgmt.notify_handle)
- idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
- if (cmdrsp->scsitaskmgmt.notifyresult_handle)
- idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
+ xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notify_handle);
+ xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notifyresult_handle);
}
/*
@@ -269,6 +245,7 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
int notifyresult = 0xffff;
wait_queue_head_t notifyevent;
int scsicmd_id;
+ int ret;
if (devdata->serverdown || devdata->serverchangingstate)
return FAILED;
@@ -284,8 +261,14 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
/* issue TASK_MGMT_ABORT_TASK */
cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
- setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
- &notifyevent, &notifyresult);
+
+ ret = setup_scsitaskmgmt_handles(&devdata->xa, cmdrsp,
+ &notifyevent, &notifyresult);
+ if (ret) {
+ dev_dbg(&scsidev->sdev_gendev,
+ "visorhba: setup_scsitaskmgmt_handles returned %d\n", ret);
+ return FAILED;
+ }
/* save destination */
cmdrsp->scsitaskmgmt.tasktype = tasktype;
@@ -311,14 +294,14 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
dev_dbg(&scsidev->sdev_gendev,
"visorhba: taskmgmt type=%d success; result=0x%x\n",
tasktype, notifyresult);
- cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
+ cleanup_scsitaskmgmt_handles(&devdata->xa, cmdrsp);
return SUCCESS;
err_del_scsipending_ent:
dev_dbg(&scsidev->sdev_gendev,
"visorhba: taskmgmt type=%d not executed\n", tasktype);
del_scsipending_ent(devdata, scsicmd_id);
- cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
+ cleanup_scsitaskmgmt_handles(&devdata->xa, cmdrsp);
return FAILED;
}
@@ -654,13 +637,13 @@ DEFINE_SHOW_ATTRIBUTE(info_debugfs);
* Service Partition returned the result of the task management
* command. Wake up anyone waiting for it.
*/
-static void complete_taskmgmt_command(struct idr *idrtable,
+static void complete_taskmgmt_command(struct xarray *xa,
struct uiscmdrsp *cmdrsp, int result)
{
wait_queue_head_t *wq =
- idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
+ xa_load(xa, cmdrsp->scsitaskmgmt.notify_handle);
int *scsi_result_ptr =
- idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
+ xa_load(xa, cmdrsp->scsitaskmgmt.notifyresult_handle);
if (unlikely(!(wq && scsi_result_ptr))) {
pr_err("visorhba: no completion context; cmd will time out\n");
return;
@@ -708,7 +691,7 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
break;
case CMD_SCSITASKMGMT_TYPE:
cmdrsp = pendingdel->sent;
- complete_taskmgmt_command(&devdata->idr, cmdrsp,
+ complete_taskmgmt_command(&devdata->xa, cmdrsp,
TASK_MGMT_FAILED);
break;
default:
@@ -905,7 +888,7 @@ static void drain_queue(struct uiscmdrsp *cmdrsp,
if (!del_scsipending_ent(devdata,
cmdrsp->scsitaskmgmt.handle))
break;
- complete_taskmgmt_command(&devdata->idr, cmdrsp,
+ complete_taskmgmt_command(&devdata->xa, cmdrsp,
cmdrsp->scsitaskmgmt.result);
} else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
dev_err_once(&devdata->dev->device,
@@ -1053,7 +1036,7 @@ static int visorhba_probe(struct visor_device *dev)
if (err)
goto err_debugfs_info;
- idr_init(&devdata->idr);
+ xa_init(&devdata->xa);
devdata->cmdrsp = kmalloc(sizeof(*devdata->cmdrsp), GFP_ATOMIC);
visorbus_enable_channel_interrupts(dev);
@@ -1096,8 +1079,6 @@ static void visorhba_remove(struct visor_device *dev)
scsi_remove_host(scsihost);
scsi_host_put(scsihost);
- idr_destroy(&devdata->idr);
-
dev_set_drvdata(&dev->device, NULL);
debugfs_remove(devdata->debugfs_info);
debugfs_remove_recursive(devdata->debugfs_dir);
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 6d202cba8575..426deab22d62 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -556,7 +556,6 @@ static void handle_locking_key(struct input_dev *visorinput_dev, int keycode,
led = LED_NUML;
break;
default:
- led = -1;
return;
}
if (test_bit(led, visorinput_dev->led) != desired_state) {
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index 7546d70116a0..e21e73ae3cc6 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -12,5 +12,5 @@ obj-$(CONFIG_SND_BCM2835) += bcm2835-audio/
obj-$(CONFIG_VIDEO_BCM2835) += bcm2835-camera/
obj-$(CONFIG_BCM2835_VCHIQ_MMAL) += vchiq-mmal/
-ccflags-y += -I $(srctree)/$(src)/include -D__VCCOREVER__=0x04000000
+ccflags-y += -I $(srctree)/$(src)/include
diff --git a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
index fefc664eefcf..81db7fb76d6d 100644
--- a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
+++ b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
@@ -104,7 +104,7 @@ extern enum vchiq_status vchiq_bulk_receive(unsigned int service,
enum vchiq_bulk_mode mode);
extern void *vchiq_get_service_userdata(unsigned int service);
extern enum vchiq_status vchiq_get_peer_version(unsigned int handle,
- short *peer_version);
+ short *peer_version);
extern struct vchiq_header *vchiq_msg_hold(unsigned int handle);
#endif /* VCHIQ_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 8782ebe0b39a..30d6f1a404ba 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -29,6 +29,8 @@
#define BELL0 0x00
#define BELL2 0x08
+#define ARM_DS_ACTIVE BIT(2)
+
struct vchiq_2835_state {
int inited;
struct vchiq_arm_state arm_state;
@@ -132,8 +134,9 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
- if (vchiq_init_state(state, vchiq_slot_zero) != VCHIQ_SUCCESS)
- return -EINVAL;
+ err = vchiq_init_state(state, vchiq_slot_zero);
+ if (err)
+ return err;
g_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(g_regs))
@@ -169,25 +172,21 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
return 0;
}
-enum vchiq_status
+int
vchiq_platform_init_state(struct vchiq_state *state)
{
- enum vchiq_status status = VCHIQ_SUCCESS;
struct vchiq_2835_state *platform_state;
state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
if (!state->platform_state)
- return VCHIQ_ERROR;
+ return -ENOMEM;
platform_state = (struct vchiq_2835_state *)state->platform_state;
platform_state->inited = 1;
- status = vchiq_arm_init_state(state, &platform_state->arm_state);
+ vchiq_arm_init_state(state, &platform_state->arm_state);
- if (status != VCHIQ_SUCCESS)
- platform_state->inited = 0;
-
- return status;
+ return 0;
}
struct vchiq_arm_state*
@@ -215,7 +214,7 @@ remote_event_signal(struct remote_event *event)
writel(0, g_regs + BELL2); /* trigger vc interrupt */
}
-enum vchiq_status
+int
vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
void __user *uoffset, int size, int dir)
{
@@ -227,7 +226,7 @@ vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
: PAGELIST_WRITE);
if (!pagelistinfo)
- return VCHIQ_ERROR;
+ return -ENOMEM;
bulk->data = pagelistinfo->dma_addr;
@@ -237,7 +236,7 @@ vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
*/
bulk->remote_data = pagelistinfo;
- return VCHIQ_SUCCESS;
+ return 0;
}
void
@@ -272,7 +271,7 @@ vchiq_doorbell_irq(int irq, void *dev_id)
/* Read (and clear) the doorbell */
status = readl(g_regs + BELL0);
- if (status & 0x4) { /* Was the doorbell rung? */
+ if (status & ARM_DS_ACTIVE) { /* Was the doorbell rung? */
remote_event_pollall(state);
ret = IRQ_HANDLED;
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index e39897c38e6a..b5aac862a298 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -139,22 +139,18 @@ static const char *const ioctl_names[] = {
"CLOSE_DELIVERED"
};
-vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
- (VCHIQ_IOC_MAX + 1));
+static_assert(ARRAY_SIZE(ioctl_names) == (VCHIQ_IOC_MAX + 1));
static enum vchiq_status
vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
unsigned int size, enum vchiq_bulk_dir dir);
#define VCHIQ_INIT_RETRIES 10
-enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
+int vchiq_initialise(struct vchiq_instance **instance_out)
{
- enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_state *state;
struct vchiq_instance *instance = NULL;
- int i;
-
- vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
+ int i, ret;
/*
* VideoCore may not be ready due to boot up timing.
@@ -170,6 +166,7 @@ enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
if (i == VCHIQ_INIT_RETRIES) {
vchiq_log_error(vchiq_core_log_level,
"%s: videocore not initialized\n", __func__);
+ ret = -ENOTCONN;
goto failed;
} else if (i > 0) {
vchiq_log_warning(vchiq_core_log_level,
@@ -181,6 +178,7 @@ enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
if (!instance) {
vchiq_log_error(vchiq_core_log_level,
"%s: error allocating vchiq instance\n", __func__);
+ ret = -ENOMEM;
goto failed;
}
@@ -191,48 +189,48 @@ enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
*instance_out = instance;
- status = VCHIQ_SUCCESS;
+ ret = 0;
failed:
vchiq_log_trace(vchiq_core_log_level,
- "%s(%p): returning %d", __func__, instance, status);
+ "%s(%p): returning %d", __func__, instance, ret);
- return status;
+ return ret;
}
EXPORT_SYMBOL(vchiq_initialise);
+static void free_bulk_waiter(struct vchiq_instance *instance)
+{
+ struct bulk_waiter_node *waiter, *next;
+
+ list_for_each_entry_safe(waiter, next,
+ &instance->bulk_waiter_list, list) {
+ list_del(&waiter->list);
+ vchiq_log_info(vchiq_arm_log_level,
+ "bulk_waiter - cleaned up %pK for pid %d",
+ waiter, waiter->pid);
+ kfree(waiter);
+ }
+}
+
enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
{
- enum vchiq_status status;
+ enum vchiq_status status = VCHIQ_SUCCESS;
struct vchiq_state *state = instance->state;
- vchiq_log_trace(vchiq_core_log_level,
- "%s(%p) called", __func__, instance);
-
if (mutex_lock_killable(&state->mutex))
return VCHIQ_RETRY;
/* Remove all services */
- status = vchiq_shutdown_internal(state, instance);
+ vchiq_shutdown_internal(state, instance);
mutex_unlock(&state->mutex);
vchiq_log_trace(vchiq_core_log_level,
"%s(%p): returning %d", __func__, instance, status);
- if (status == VCHIQ_SUCCESS) {
- struct bulk_waiter_node *waiter, *next;
-
- list_for_each_entry_safe(waiter, next,
- &instance->bulk_waiter_list, list) {
- list_del(&waiter->list);
- vchiq_log_info(vchiq_arm_log_level,
- "bulk_waiter - cleaned up %pK for pid %d",
- waiter, waiter->pid);
- kfree(waiter);
- }
- kfree(instance);
- }
+ free_bulk_waiter(instance);
+ kfree(instance);
return status;
}
@@ -248,9 +246,6 @@ enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
enum vchiq_status status;
struct vchiq_state *state = instance->state;
- vchiq_log_trace(vchiq_core_log_level,
- "%s(%p) called", __func__, instance);
-
if (mutex_lock_killable(&state->mutex)) {
vchiq_log_trace(vchiq_core_log_level,
"%s: call to mutex_lock failed", __func__);
@@ -272,19 +267,16 @@ failed:
}
EXPORT_SYMBOL(vchiq_connect);
-static enum vchiq_status vchiq_add_service(
- struct vchiq_instance *instance,
- const struct vchiq_service_params_kernel *params,
- unsigned int *phandle)
+static enum vchiq_status
+vchiq_add_service(struct vchiq_instance *instance,
+ const struct vchiq_service_params_kernel *params,
+ unsigned int *phandle)
{
enum vchiq_status status;
struct vchiq_state *state = instance->state;
struct vchiq_service *service = NULL;
int srvstate;
- vchiq_log_trace(vchiq_core_log_level,
- "%s(%p) called", __func__, instance);
-
*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
srvstate = vchiq_is_connected(instance)
@@ -301,8 +293,9 @@ static enum vchiq_status vchiq_add_service(
if (service) {
*phandle = service->handle;
status = VCHIQ_SUCCESS;
- } else
+ } else {
status = VCHIQ_ERROR;
+ }
vchiq_log_trace(vchiq_core_log_level,
"%s(%p): returning %d", __func__, instance, status);
@@ -310,18 +303,15 @@ static enum vchiq_status vchiq_add_service(
return status;
}
-enum vchiq_status vchiq_open_service(
- struct vchiq_instance *instance,
- const struct vchiq_service_params_kernel *params,
- unsigned int *phandle)
+enum vchiq_status
+vchiq_open_service(struct vchiq_instance *instance,
+ const struct vchiq_service_params_kernel *params,
+ unsigned int *phandle)
{
enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_state *state = instance->state;
struct vchiq_service *service = NULL;
- vchiq_log_trace(vchiq_core_log_level,
- "%s(%p) called", __func__, instance);
-
*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
if (!vchiq_is_connected(instance))
@@ -351,8 +341,8 @@ failed:
EXPORT_SYMBOL(vchiq_open_service);
enum vchiq_status
-vchiq_bulk_transmit(unsigned int handle, const void *data,
- unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
+vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
+ void *userdata, enum vchiq_bulk_mode mode)
{
enum vchiq_status status;
@@ -426,8 +416,8 @@ enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
EXPORT_SYMBOL(vchiq_bulk_receive);
static enum vchiq_status
-vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
- unsigned int size, enum vchiq_bulk_dir dir)
+vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
+ enum vchiq_bulk_dir dir)
{
struct vchiq_instance *instance;
struct vchiq_service *service;
@@ -441,7 +431,7 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
instance = service->instance;
- unlock_service(service);
+ vchiq_service_put(service);
mutex_lock(&instance->bulk_waiter_list_mutex);
list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
@@ -471,7 +461,7 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
}
}
} else {
- waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
+ waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
if (!waiter) {
vchiq_log_error(vchiq_core_log_level,
"%s - out of memory", __func__);
@@ -505,11 +495,6 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
return status;
}
-/****************************************************************************
- *
- * add_completion
- *
- ***************************************************************************/
static enum vchiq_status
add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
@@ -554,7 +539,7 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
* Take an extra reference, to be held until
* this CLOSED notification is delivered.
*/
- lock_service(user_service->service);
+ vchiq_service_get(user_service->service);
if (instance->use_close_delivered)
user_service->close_pending = 1;
}
@@ -576,12 +561,6 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
return VCHIQ_SUCCESS;
}
-/****************************************************************************
- *
- * service_callback
- *
- ***************************************************************************/
-
static enum vchiq_status
service_callback(enum vchiq_reason reason, struct vchiq_header *header,
unsigned int handle, void *bulk_userdata)
@@ -602,7 +581,9 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
service = handle_to_service(handle);
- BUG_ON(!service);
+ if (WARN_ON(!service))
+ return VCHIQ_SUCCESS;
+
user_service = (struct user_service *)service->base.userdata;
instance = user_service->instance;
@@ -691,22 +672,12 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
bulk_userdata);
}
-/****************************************************************************
- *
- * user_service_free
- *
- ***************************************************************************/
static void
user_service_free(void *userdata)
{
kfree(userdata);
}
-/****************************************************************************
- *
- * close_delivered
- *
- ***************************************************************************/
static void close_delivered(struct user_service *user_service)
{
vchiq_log_info(vchiq_arm_log_level,
@@ -715,7 +686,7 @@ static void close_delivered(struct user_service *user_service)
if (user_service->close_pending) {
/* Allow the underlying service to be culled */
- unlock_service(user_service->service);
+ vchiq_service_put(user_service->service);
/* Wake the user-thread blocked in close_ or remove_service */
complete(&user_service->close_event);
@@ -769,14 +740,8 @@ static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
return maxsize;
}
-/**************************************************************************
- *
- * vchiq_ioc_queue_message
- *
- **************************************************************************/
static int
-vchiq_ioc_queue_message(unsigned int handle,
- struct vchiq_element *elements,
+vchiq_ioc_queue_message(unsigned int handle, struct vchiq_element *elements,
unsigned long count)
{
struct vchiq_io_copy_callback_context context;
@@ -911,15 +876,18 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
break;
}
spin_lock(&msg_queue_spinlock);
- } while (user_service->msg_remove ==
- user_service->msg_insert);
+ } while (user_service->msg_remove == user_service->msg_insert);
if (ret)
goto out;
}
- BUG_ON((int)(user_service->msg_insert -
- user_service->msg_remove) < 0);
+ if (WARN_ON_ONCE((int)(user_service->msg_insert -
+ user_service->msg_remove) < 0)) {
+ spin_unlock(&msg_queue_spinlock);
+ ret = -EINVAL;
+ goto out;
+ }
header = user_service->msg_queue[user_service->msg_remove &
(MSG_QUEUE_SIZE - 1)];
@@ -935,8 +903,9 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
header->data, header->size) == 0)) {
ret = header->size;
vchiq_release_message(service->handle, header);
- } else
+ } else {
ret = -EFAULT;
+ }
} else {
vchiq_log_error(vchiq_arm_log_level,
"header %pK: bufsize %x < size %x",
@@ -946,7 +915,7 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
}
DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
out:
- unlock_service(service);
+ vchiq_service_put(service);
return ret;
}
@@ -967,8 +936,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
return -EINVAL;
if (args->mode == VCHIQ_BULK_MODE_BLOCKING) {
- waiter = kzalloc(sizeof(struct bulk_waiter_node),
- GFP_KERNEL);
+ waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
if (!waiter) {
ret = -ENOMEM;
goto out;
@@ -1033,7 +1001,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
ret = put_user(mode_waiting, mode);
}
out:
- unlock_service(service);
+ vchiq_service_put(service);
if (ret)
return ret;
else if (status == VCHIQ_ERROR)
@@ -1051,6 +1019,7 @@ static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int i
if (in_compat_syscall()) {
compat_uptr_t ptr32;
compat_uptr_t __user *uptr = ubuf;
+
ret = get_user(ptr32, uptr + index);
if (ret)
return ret;
@@ -1058,6 +1027,7 @@ static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int i
*buf = compat_ptr(ptr32);
} else {
uintptr_t ptr, __user *uptr = ubuf;
+
ret = get_user(ptr, uptr + index);
if (ret)
@@ -1117,8 +1087,7 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
mutex_lock(&instance->completion_mutex);
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
- while ((instance->completion_remove ==
- instance->completion_insert)
+ while ((instance->completion_remove == instance->completion_insert)
&& !instance->closing) {
int rc;
@@ -1212,7 +1181,7 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
if ((completion->reason == VCHIQ_SERVICE_CLOSED) &&
!instance->use_close_delivered)
- unlock_service(service);
+ vchiq_service_put(service);
/*
* FIXME: address space mismatch, does bulk_userdata
@@ -1248,11 +1217,6 @@ out:
return ret;
}
-/****************************************************************************
- *
- * vchiq_ioctl
- *
- ***************************************************************************/
static long
vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
@@ -1279,7 +1243,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
while ((service = next_service_by_instance(instance->state,
instance, &i))) {
status = vchiq_remove_service(service->handle);
- unlock_service(service);
+ vchiq_service_put(service);
if (status != VCHIQ_SUCCESS)
break;
}
@@ -1379,24 +1343,24 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
service = find_service_for_instance(instance, handle);
if (service) {
- status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
+ ret = (cmd == VCHIQ_IOC_USE_SERVICE) ?
vchiq_use_service_internal(service) :
vchiq_release_service_internal(service);
- if (status != VCHIQ_SUCCESS) {
+ if (ret) {
vchiq_log_error(vchiq_susp_log_level,
- "%s: cmd %s returned error %d for service %c%c%c%c:%03d",
+ "%s: cmd %s returned error %ld for service %c%c%c%c:%03d",
__func__,
(cmd == VCHIQ_IOC_USE_SERVICE) ?
"VCHIQ_IOC_USE_SERVICE" :
"VCHIQ_IOC_RELEASE_SERVICE",
- status,
+ ret,
VCHIQ_FOURCC_AS_4CHARS(
service->base.fourcc),
service->client_id);
- ret = -EINVAL;
}
- } else
+ } else {
ret = -EINVAL;
+ }
} break;
case VCHIQ_IOC_QUEUE_MESSAGE: {
@@ -1512,8 +1476,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
- status = vchiq_set_service_option(
- args.handle, args.option, args.value);
+ ret = vchiq_set_service_option(args.handle, args.option,
+ args.value);
} break;
case VCHIQ_IOC_LIB_VERSION: {
@@ -1533,8 +1497,9 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct user_service *user_service =
(struct user_service *)service->base.userdata;
close_delivered(user_service);
- } else
+ } else {
ret = -EINVAL;
+ }
} break;
default:
@@ -1543,7 +1508,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
if (service)
- unlock_service(service);
+ vchiq_service_put(service);
if (ret == 0) {
if (status == VCHIQ_ERROR)
@@ -1594,10 +1559,8 @@ struct vchiq_create_service32 {
_IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
static long
-vchiq_compat_ioctl_create_service(
- struct file *file,
- unsigned int cmd,
- struct vchiq_create_service32 __user *ptrargs32)
+vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd,
+ struct vchiq_create_service32 __user *ptrargs32)
{
struct vchiq_create_service args;
struct vchiq_create_service32 args32;
@@ -1678,7 +1641,7 @@ vchiq_compat_ioctl_queue_message(struct file *file,
if (copy_from_user(&element32, args.elements,
sizeof(element32))) {
- unlock_service(service);
+ vchiq_service_put(service);
return -EFAULT;
}
@@ -1692,7 +1655,7 @@ vchiq_compat_ioctl_queue_message(struct file *file,
} else {
ret = -EINVAL;
}
- unlock_service(service);
+ vchiq_service_put(service);
return ret;
}
@@ -1834,6 +1797,7 @@ static long
vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = compat_ptr(arg);
+
switch (cmd) {
case VCHIQ_IOC_CREATE_SERVICE32:
return vchiq_compat_ioctl_create_service(file, cmd, argp);
@@ -1927,7 +1891,7 @@ static int vchiq_release(struct inode *inode, struct file *file)
complete(&user_service->remove_event);
vchiq_terminate_service_internal(service);
- unlock_service(service);
+ vchiq_service_put(service);
}
/* ...and wait for them to die */
@@ -1937,7 +1901,10 @@ static int vchiq_release(struct inode *inode, struct file *file)
wait_for_completion(&service->remove_event);
- BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
+ if (WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE)) {
+ vchiq_service_put(service);
+ break;
+ }
spin_lock(&msg_queue_spinlock);
@@ -1956,12 +1923,11 @@ static int vchiq_release(struct inode *inode, struct file *file)
spin_unlock(&msg_queue_spinlock);
- unlock_service(service);
+ vchiq_service_put(service);
}
/* Release any closed services */
- while (instance->completion_remove !=
- instance->completion_insert) {
+ while (instance->completion_remove != instance->completion_insert) {
struct vchiq_completion_data_kernel *completion;
struct vchiq_service *service;
@@ -1975,7 +1941,7 @@ static int vchiq_release(struct inode *inode, struct file *file)
/* Wake any blocked user-thread */
if (instance->use_close_delivered)
complete(&user_service->close_event);
- unlock_service(service);
+ vchiq_service_put(service);
}
instance->completion_remove++;
}
@@ -1983,18 +1949,7 @@ static int vchiq_release(struct inode *inode, struct file *file)
/* Release the PEER service count. */
vchiq_release_internal(instance->state, NULL);
- {
- struct bulk_waiter_node *waiter, *next;
-
- list_for_each_entry_safe(waiter, next,
- &instance->bulk_waiter_list, list) {
- list_del(&waiter->list);
- vchiq_log_info(vchiq_arm_log_level,
- "bulk_waiter - cleaned up %pK for pid %d",
- waiter, waiter->pid);
- kfree(waiter);
- }
- }
+ free_bulk_waiter(instance);
vchiq_debugfs_remove_instance(instance);
@@ -2005,12 +1960,6 @@ out:
return ret;
}
-/****************************************************************************
- *
- * vchiq_dump
- *
- ***************************************************************************/
-
int vchiq_dump(void *dump_context, const char *str, int len)
{
struct dump_context *context = (struct dump_context *)dump_context;
@@ -2052,12 +2001,6 @@ int vchiq_dump(void *dump_context, const char *str, int len)
return 0;
}
-/****************************************************************************
- *
- * vchiq_dump_platform_instance_state
- *
- ***************************************************************************/
-
int vchiq_dump_platform_instances(void *dump_context)
{
struct vchiq_state *state = vchiq_get_state();
@@ -2120,12 +2063,6 @@ int vchiq_dump_platform_instances(void *dump_context)
return 0;
}
-/****************************************************************************
- *
- * vchiq_dump_platform_service_state
- *
- ***************************************************************************/
-
int vchiq_dump_platform_service_state(void *dump_context,
struct vchiq_service *service)
{
@@ -2151,15 +2088,8 @@ int vchiq_dump_platform_service_state(void *dump_context,
return vchiq_dump(dump_context, buf, len + 1);
}
-/****************************************************************************
- *
- * vchiq_read
- *
- ***************************************************************************/
-
static ssize_t
-vchiq_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+vchiq_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct dump_context context;
int err;
@@ -2183,10 +2113,10 @@ vchiq_get_state(void)
{
if (!g_state.remote)
- printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
+ pr_err("%s: g_state.remote == NULL\n", __func__);
else if (g_state.remote->initialised != 1)
- printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
- __func__, g_state.remote->initialised);
+ pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
+ __func__, g_state.remote->initialised);
return (g_state.remote &&
(g_state.remote->initialised == 1)) ? &g_state : NULL;
@@ -2210,9 +2140,8 @@ vchiq_fops = {
static enum vchiq_status
vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
- struct vchiq_header *header,
- unsigned int service_user,
- void *bulk_user)
+ struct vchiq_header *header,
+ unsigned int service_user, void *bulk_user)
{
vchiq_log_error(vchiq_susp_log_level,
"%s callback reason %d", __func__, reason);
@@ -2228,6 +2157,7 @@ vchiq_keepalive_thread_func(void *v)
enum vchiq_status status;
struct vchiq_instance *instance;
unsigned int ka_handle;
+ int ret;
struct vchiq_service_params_kernel params = {
.fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
@@ -2236,10 +2166,10 @@ vchiq_keepalive_thread_func(void *v)
.version_min = KEEPALIVE_VER_MIN
};
- status = vchiq_initialise(&instance);
- if (status != VCHIQ_SUCCESS) {
+ ret = vchiq_initialise(&instance);
+ if (ret) {
vchiq_log_error(vchiq_susp_log_level,
- "%s vchiq_initialise failed %d", __func__, status);
+ "%s vchiq_initialise failed %d", __func__, ret);
goto exit;
}
@@ -2303,7 +2233,7 @@ exit:
return 0;
}
-enum vchiq_status
+void
vchiq_arm_init_state(struct vchiq_state *state,
struct vchiq_arm_state *arm_state)
{
@@ -2319,26 +2249,23 @@ vchiq_arm_init_state(struct vchiq_state *state,
arm_state->first_connect = 0;
}
- return VCHIQ_SUCCESS;
}
-enum vchiq_status
+int
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- enum vchiq_status ret = VCHIQ_SUCCESS;
+ int ret = 0;
char entity[16];
int *entity_uc;
int local_uc;
if (!arm_state) {
- ret = VCHIQ_ERROR;
+ ret = -EINVAL;
goto out;
}
- vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
-
if (use_type == USE_TYPE_VCHIQ) {
sprintf(entity, "VCHIQ: ");
entity_uc = &arm_state->peer_use_count;
@@ -2349,7 +2276,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
entity_uc = &service->service_use_count;
} else {
vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
- ret = VCHIQ_ERROR;
+ ret = -EINVAL;
goto out;
}
@@ -2363,7 +2290,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
write_unlock_bh(&arm_state->susp_res_lock);
- if (ret == VCHIQ_SUCCESS) {
+ if (!ret) {
enum vchiq_status status = VCHIQ_SUCCESS;
long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
@@ -2383,21 +2310,19 @@ out:
return ret;
}
-enum vchiq_status
+int
vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- enum vchiq_status ret = VCHIQ_SUCCESS;
+ int ret = 0;
char entity[16];
int *entity_uc;
if (!arm_state) {
- ret = VCHIQ_ERROR;
+ ret = -EINVAL;
goto out;
}
- vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
-
if (service) {
sprintf(entity, "%c%c%c%c:%03d",
VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
@@ -2413,7 +2338,7 @@ vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
/* Don't use BUG_ON - don't allow user thread to crash kernel */
WARN_ON(!arm_state->videocore_use_count);
WARN_ON(!(*entity_uc));
- ret = VCHIQ_ERROR;
+ ret = -EINVAL;
goto unlock;
}
--arm_state->videocore_use_count;
@@ -2437,7 +2362,6 @@ vchiq_on_remote_use(struct vchiq_state *state)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
atomic_inc(&arm_state->ka_use_count);
complete(&arm_state->ka_evt);
}
@@ -2447,18 +2371,17 @@ vchiq_on_remote_release(struct vchiq_state *state)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
atomic_inc(&arm_state->ka_release_count);
complete(&arm_state->ka_evt);
}
-enum vchiq_status
+int
vchiq_use_service_internal(struct vchiq_service *service)
{
return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
}
-enum vchiq_status
+int
vchiq_release_service_internal(struct vchiq_service *service)
{
return vchiq_release_internal(service->state, service);
@@ -2521,7 +2444,7 @@ vchiq_use_service(unsigned int handle)
if (service) {
ret = vchiq_use_internal(service->state, service,
USE_TYPE_SERVICE);
- unlock_service(service);
+ vchiq_service_put(service);
}
return ret;
}
@@ -2535,7 +2458,7 @@ vchiq_release_service(unsigned int handle)
if (service) {
ret = vchiq_release_internal(service->state, service);
- unlock_service(service);
+ vchiq_service_put(service);
}
return ret;
}
@@ -2634,8 +2557,6 @@ vchiq_check_service(struct vchiq_service *service)
if (!service || !service->state)
goto out;
- vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
-
arm_state = vchiq_platform_get_arm_state(service->state);
read_lock_bh(&arm_state->susp_res_lock);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index 24c331c94354..c7d2cf1f2e68 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -67,7 +67,7 @@ int vchiq_platform_init(struct platform_device *pdev,
extern struct vchiq_state *
vchiq_get_state(void);
-extern enum vchiq_status
+extern void
vchiq_arm_init_state(struct vchiq_state *state,
struct vchiq_arm_state *arm_state);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
index 3023fa9fdc64..0ee96d1d0481 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -52,6 +52,7 @@ void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
}
mutex_unlock(&g_connected_mutex);
}
+EXPORT_SYMBOL(vchiq_add_connected_callback);
/*
* This function is called by the vchiq stack once it has been connected to
@@ -73,4 +74,3 @@ void vchiq_call_connected_callbacks(void)
g_connected = 1;
mutex_unlock(&g_connected_mutex);
}
-EXPORT_SYMBOL(vchiq_add_connected_callback);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h
index ec5d2b716e7a..95c18670e839 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h
@@ -4,16 +4,8 @@
#ifndef VCHIQ_CONNECTED_H
#define VCHIQ_CONNECTED_H
-/* ---- Include Files ----------------------------------------------------- */
-
-/* ---- Constants and Types ---------------------------------------------- */
-
typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
-/* ---- Variable Externs ------------------------------------------------- */
-
-/* ---- Function Prototypes ---------------------------------------------- */
-
void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
void vchiq_call_connected_callbacks(void);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 517a8c9b41ed..4f43e4213bfe 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -17,6 +17,75 @@
#define VCHIQ_SLOT_HANDLER_STACK 8192
+#define VCHIQ_MSG_PADDING 0 /* - */
+#define VCHIQ_MSG_CONNECT 1 /* - */
+#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
+#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
+#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
+#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
+#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
+#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
+#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
+#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
+#define VCHIQ_MSG_PAUSE 10 /* - */
+#define VCHIQ_MSG_RESUME 11 /* - */
+#define VCHIQ_MSG_REMOTE_USE 12 /* - */
+#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
+#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
+
+#define TYPE_SHIFT 24
+
+#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
+#define VCHIQ_PORT_FREE 0x1000
+#define VCHIQ_PORT_IS_VALID(port) ((port) < VCHIQ_PORT_FREE)
+#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
+ (((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
+#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)(msgid) >> TYPE_SHIFT)
+#define VCHIQ_MSG_SRCPORT(msgid) \
+ (unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff)
+#define VCHIQ_MSG_DSTPORT(msgid) \
+ ((unsigned short)(msgid) & 0xfff)
+
+#define MAKE_CONNECT (VCHIQ_MSG_CONNECT << TYPE_SHIFT)
+#define MAKE_OPEN(srcport) \
+ ((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12))
+#define MAKE_OPENACK(srcport, dstport) \
+ ((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
+#define MAKE_CLOSE(srcport, dstport) \
+ ((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
+#define MAKE_DATA(srcport, dstport) \
+ ((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
+#define MAKE_PAUSE (VCHIQ_MSG_PAUSE << TYPE_SHIFT)
+#define MAKE_RESUME (VCHIQ_MSG_RESUME << TYPE_SHIFT)
+#define MAKE_REMOTE_USE (VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT)
+#define MAKE_REMOTE_USE_ACTIVE (VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT)
+
+/* Ensure the fields are wide enough */
+static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
+ == 0);
+static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
+static_assert((unsigned int)VCHIQ_PORT_MAX <
+ (unsigned int)VCHIQ_PORT_FREE);
+
+#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
+#define VCHIQ_MSGID_CLAIMED 0x40000000
+
+#define VCHIQ_FOURCC_INVALID 0x00000000
+#define VCHIQ_FOURCC_IS_LEGAL(fourcc) ((fourcc) != VCHIQ_FOURCC_INVALID)
+
+#define VCHIQ_BULK_ACTUAL_ABORTED -1
+
+#if VCHIQ_ENABLE_STATS
+#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
+#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
+#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
+ (service->stats. stat += addend)
+#else
+#define VCHIQ_STATS_INC(state, stat) ((void)0)
+#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
+#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
+#endif
+
#define HANDLE_STATE_SHIFT 12
#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
@@ -31,13 +100,19 @@
#define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
(SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
-#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
+#define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
#define SRVTRACE_LEVEL(srv) \
(((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
#define SRVTRACE_ENABLED(srv, lev) \
(((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
+#define NO_CLOSE_RECVD 0
+#define CLOSE_RECVD 1
+
+#define NO_RETRY_POLL 0
+#define RETRY_POLL 1
+
struct vchiq_open_payload {
int fourcc;
int client_id;
@@ -55,13 +130,28 @@ enum {
QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
};
+enum {
+ VCHIQ_POLL_TERMINATE,
+ VCHIQ_POLL_REMOVE,
+ VCHIQ_POLL_TXNOTIFY,
+ VCHIQ_POLL_RXNOTIFY,
+ VCHIQ_POLL_COUNT
+};
+
/* we require this for consistency between endpoints */
-vchiq_static_assert(sizeof(struct vchiq_header) == 8);
-vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header)));
-vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
-vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
-vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
-vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
+static_assert(sizeof(struct vchiq_header) == 8);
+static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
+
+static inline void check_sizes(void)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE);
+ BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS);
+ BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE);
+ BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header));
+ BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS);
+ BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS);
+ BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
+}
/* Run time control of log level, based on KERN_XXX level. */
int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
@@ -243,7 +333,8 @@ __next_service_by_instance(struct vchiq_state *state,
while (idx < state->unused_service) {
struct vchiq_service *srv;
- srv = rcu_dereference(state->services[idx++]);
+ srv = rcu_dereference(state->services[idx]);
+ idx++;
if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
srv->instance == instance) {
service = srv;
@@ -277,7 +368,7 @@ next_service_by_instance(struct vchiq_state *state,
}
void
-lock_service(struct vchiq_service *service)
+vchiq_service_get(struct vchiq_service *service)
{
if (!service) {
WARN(1, "%s service is NULL\n", __func__);
@@ -300,7 +391,7 @@ static void service_release(struct kref *kref)
}
void
-unlock_service(struct vchiq_service *service)
+vchiq_service_put(struct vchiq_service *service)
{
if (!service) {
WARN(1, "%s: service is NULL\n", __func__);
@@ -626,6 +717,68 @@ reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
(tx_pos & VCHIQ_SLOT_MASK));
}
+static void
+process_free_data_message(struct vchiq_state *state, BITSET_T *service_found,
+ struct vchiq_header *header)
+{
+ int msgid = header->msgid;
+ int port = VCHIQ_MSG_SRCPORT(msgid);
+ struct vchiq_service_quota *quota = &state->service_quotas[port];
+ int count;
+
+ spin_lock(&quota_spinlock);
+ count = quota->message_use_count;
+ if (count > 0)
+ quota->message_use_count = count - 1;
+ spin_unlock(&quota_spinlock);
+
+ if (count == quota->message_quota) {
+ /*
+ * Signal the service that it
+ * has dropped below its quota
+ */
+ complete(&quota->quota_event);
+ } else if (count == 0) {
+ vchiq_log_error(vchiq_core_log_level,
+ "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
+ port,
+ quota->message_use_count,
+ header, msgid, header->msgid,
+ header->size);
+ WARN(1, "invalid message use count\n");
+ }
+ if (!BITSET_IS_SET(service_found, port)) {
+ /* Set the found bit for this service */
+ BITSET_SET(service_found, port);
+
+ spin_lock(&quota_spinlock);
+ count = quota->slot_use_count;
+ if (count > 0)
+ quota->slot_use_count = count - 1;
+ spin_unlock(&quota_spinlock);
+
+ if (count > 0) {
+ /*
+ * Signal the service in case
+ * it has dropped below its quota
+ */
+ complete(&quota->quota_event);
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: pfq:%d %x@%pK - slot_use->%d",
+ state->id, port,
+ header->size, header,
+ count - 1);
+ } else {
+ vchiq_log_error(vchiq_core_log_level,
+ "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
+ port, count, header,
+ msgid, header->msgid,
+ header->size);
+ WARN(1, "bad slot use count\n");
+ }
+ }
+}
+
/* Called by the recycle thread. */
static void
process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
@@ -649,11 +802,12 @@ process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
while (slot_queue_available != local->slot_queue_recycle) {
unsigned int pos;
- int slot_index = local->slot_queue[slot_queue_available++ &
+ int slot_index = local->slot_queue[slot_queue_available &
VCHIQ_SLOT_QUEUE_MASK];
char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
int data_found = 0;
+ slot_queue_available++;
/*
* Beware of the address dependency - data is calculated
* using an index written by the other side.
@@ -675,67 +829,8 @@ process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
int msgid = header->msgid;
if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
- int port = VCHIQ_MSG_SRCPORT(msgid);
- struct vchiq_service_quota *quota =
- &state->service_quotas[port];
- int count;
-
- spin_lock(&quota_spinlock);
- count = quota->message_use_count;
- if (count > 0)
- quota->message_use_count =
- count - 1;
- spin_unlock(&quota_spinlock);
-
- if (count == quota->message_quota)
- /*
- * Signal the service that it
- * has dropped below its quota
- */
- complete(&quota->quota_event);
- else if (count == 0) {
- vchiq_log_error(vchiq_core_log_level,
- "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
- port,
- quota->message_use_count,
- header, msgid, header->msgid,
- header->size);
- WARN(1, "invalid message use count\n");
- }
- if (!BITSET_IS_SET(service_found, port)) {
- /* Set the found bit for this service */
- BITSET_SET(service_found, port);
-
- spin_lock(&quota_spinlock);
- count = quota->slot_use_count;
- if (count > 0)
- quota->slot_use_count =
- count - 1;
- spin_unlock(&quota_spinlock);
-
- if (count > 0) {
- /*
- * Signal the service in case
- * it has dropped below its quota
- */
- complete(&quota->quota_event);
- vchiq_log_trace(
- vchiq_core_log_level,
- "%d: pfq:%d %x@%pK - slot_use->%d",
- state->id, port,
- header->size, header,
- count - 1);
- } else {
- vchiq_log_error(
- vchiq_core_log_level,
- "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
- port, count, header,
- msgid, header->msgid,
- header->size);
- WARN(1, "bad slot use count\n");
- }
- }
-
+ process_free_data_message(state, service_found,
+ header);
data_found = 1;
}
@@ -755,8 +850,7 @@ process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
spin_lock(&quota_spinlock);
count = state->data_use_count;
if (count > 0)
- state->data_use_count =
- count - 1;
+ state->data_use_count = count - 1;
spin_unlock(&quota_spinlock);
if (count == state->data_quota)
complete(&state->data_quota_event);
@@ -834,7 +928,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
stride = calc_stride(size);
- WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
+ WARN_ON(stride > VCHIQ_SLOT_SIZE);
if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
mutex_lock_killable(&state->slot_mutex))
@@ -897,8 +991,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
while ((quota->message_use_count == quota->message_quota) ||
((tx_end_index != quota->previous_tx_index) &&
- (quota->slot_use_count ==
- quota->slot_quota))) {
+ (quota->slot_use_count == quota->slot_quota))) {
spin_unlock(&quota_spinlock);
vchiq_log_trace(vchiq_core_log_level,
"%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
@@ -1175,8 +1268,6 @@ static void
release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
struct vchiq_header *header, struct vchiq_service *service)
{
- int release_count;
-
mutex_lock(&state->recycle_mutex);
if (header) {
@@ -1192,10 +1283,9 @@ release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
}
- release_count = slot_info->release_count;
- slot_info->release_count = ++release_count;
+ slot_info->release_count++;
- if (release_count == slot_info->use_count) {
+ if (slot_info->release_count == slot_info->use_count) {
int slot_queue_recycle;
/* Add to the freed queue */
@@ -1226,6 +1316,22 @@ release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
mutex_unlock(&state->recycle_mutex);
}
+static inline enum vchiq_reason
+get_bulk_reason(struct vchiq_bulk *bulk)
+{
+ if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
+ if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ return VCHIQ_BULK_TRANSMIT_ABORTED;
+
+ return VCHIQ_BULK_TRANSMIT_DONE;
+ }
+
+ if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ return VCHIQ_BULK_RECEIVE_ABORTED;
+
+ return VCHIQ_BULK_RECEIVE_DONE;
+}
+
/* Called by the slot handler - don't hold the bulk mutex */
static enum vchiq_status
notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
@@ -1241,69 +1347,58 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
queue->remote_notify = queue->process;
- if (status == VCHIQ_SUCCESS) {
- while (queue->remove != queue->remote_notify) {
- struct vchiq_bulk *bulk =
- &queue->bulks[BULK_INDEX(queue->remove)];
+ while (queue->remove != queue->remote_notify) {
+ struct vchiq_bulk *bulk =
+ &queue->bulks[BULK_INDEX(queue->remove)];
- /*
- * Only generate callbacks for non-dummy bulk
- * requests, and non-terminated services
- */
- if (bulk->data && service->instance) {
- if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
- if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
- VCHIQ_SERVICE_STATS_INC(service,
- bulk_tx_count);
- VCHIQ_SERVICE_STATS_ADD(service,
- bulk_tx_bytes,
- bulk->actual);
- } else {
- VCHIQ_SERVICE_STATS_INC(service,
- bulk_rx_count);
- VCHIQ_SERVICE_STATS_ADD(service,
- bulk_rx_bytes,
- bulk->actual);
- }
+ /*
+ * Only generate callbacks for non-dummy bulk
+ * requests, and non-terminated services
+ */
+ if (bulk->data && service->instance) {
+ if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
+ if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
+ VCHIQ_SERVICE_STATS_INC(service,
+ bulk_tx_count);
+ VCHIQ_SERVICE_STATS_ADD(service,
+ bulk_tx_bytes,
+ bulk->actual);
} else {
VCHIQ_SERVICE_STATS_INC(service,
- bulk_aborted_count);
+ bulk_rx_count);
+ VCHIQ_SERVICE_STATS_ADD(service,
+ bulk_rx_bytes,
+ bulk->actual);
}
- if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
- struct bulk_waiter *waiter;
-
- spin_lock(&bulk_waiter_spinlock);
- waiter = bulk->userdata;
- if (waiter) {
- waiter->actual = bulk->actual;
- complete(&waiter->event);
- }
- spin_unlock(&bulk_waiter_spinlock);
- } else if (bulk->mode ==
- VCHIQ_BULK_MODE_CALLBACK) {
- enum vchiq_reason reason = (bulk->dir ==
- VCHIQ_BULK_TRANSMIT) ?
- ((bulk->actual ==
- VCHIQ_BULK_ACTUAL_ABORTED) ?
- VCHIQ_BULK_TRANSMIT_ABORTED :
- VCHIQ_BULK_TRANSMIT_DONE) :
- ((bulk->actual ==
- VCHIQ_BULK_ACTUAL_ABORTED) ?
- VCHIQ_BULK_RECEIVE_ABORTED :
- VCHIQ_BULK_RECEIVE_DONE);
- status = make_service_callback(service,
- reason, NULL, bulk->userdata);
- if (status == VCHIQ_RETRY)
- break;
+ } else {
+ VCHIQ_SERVICE_STATS_INC(service,
+ bulk_aborted_count);
+ }
+ if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
+ struct bulk_waiter *waiter;
+
+ spin_lock(&bulk_waiter_spinlock);
+ waiter = bulk->userdata;
+ if (waiter) {
+ waiter->actual = bulk->actual;
+ complete(&waiter->event);
}
+ spin_unlock(&bulk_waiter_spinlock);
+ } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
+ enum vchiq_reason reason =
+ get_bulk_reason(bulk);
+ status = make_service_callback(service,
+ reason, NULL, bulk->userdata);
+ if (status == VCHIQ_RETRY)
+ break;
}
-
- queue->remove++;
- complete(&service->bulk_remove_event);
}
- if (!retry_poll)
- status = VCHIQ_SUCCESS;
+
+ queue->remove++;
+ complete(&service->bulk_remove_event);
}
+ if (!retry_poll)
+ status = VCHIQ_SUCCESS;
if (status == VCHIQ_RETRY)
request_poll(service->state, service,
@@ -1313,74 +1408,70 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
return status;
}
-/* Called by the slot handler thread */
static void
-poll_services(struct vchiq_state *state)
+poll_services_of_group(struct vchiq_state *state, int group)
{
- int group, i;
-
- for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
- u32 flags;
-
- flags = atomic_xchg(&state->poll_services[group], 0);
- for (i = 0; flags; i++) {
- if (flags & BIT(i)) {
- struct vchiq_service *service =
- find_service_by_port(state,
- (group<<5) + i);
- u32 service_flags;
-
- flags &= ~BIT(i);
- if (!service)
- continue;
- service_flags =
- atomic_xchg(&service->poll_flags, 0);
- if (service_flags &
- BIT(VCHIQ_POLL_REMOVE)) {
- vchiq_log_info(vchiq_core_log_level,
- "%d: ps - remove %d<->%d",
- state->id, service->localport,
- service->remoteport);
-
- /*
- * Make it look like a client, because
- * it must be removed and not left in
- * the LISTENING state.
- */
- service->public_fourcc =
- VCHIQ_FOURCC_INVALID;
-
- if (vchiq_close_service_internal(
- service, 0/*!close_recvd*/) !=
- VCHIQ_SUCCESS)
- request_poll(state, service,
- VCHIQ_POLL_REMOVE);
- } else if (service_flags &
- BIT(VCHIQ_POLL_TERMINATE)) {
- vchiq_log_info(vchiq_core_log_level,
- "%d: ps - terminate %d<->%d",
- state->id, service->localport,
- service->remoteport);
- if (vchiq_close_service_internal(
- service, 0/*!close_recvd*/) !=
- VCHIQ_SUCCESS)
- request_poll(state, service,
- VCHIQ_POLL_TERMINATE);
- }
- if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
- notify_bulks(service,
- &service->bulk_tx,
- 1/*retry_poll*/);
- if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
- notify_bulks(service,
- &service->bulk_rx,
- 1/*retry_poll*/);
- unlock_service(service);
- }
+ u32 flags = atomic_xchg(&state->poll_services[group], 0);
+ int i;
+
+ for (i = 0; flags; i++) {
+ struct vchiq_service *service;
+ u32 service_flags;
+
+ if ((flags & BIT(i)) == 0)
+ continue;
+
+ service = find_service_by_port(state, (group << 5) + i);
+ flags &= ~BIT(i);
+
+ if (!service)
+ continue;
+
+ service_flags = atomic_xchg(&service->poll_flags, 0);
+ if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
+ vchiq_log_info(vchiq_core_log_level, "%d: ps - remove %d<->%d",
+ state->id, service->localport,
+ service->remoteport);
+
+ /*
+ * Make it look like a client, because
+ * it must be removed and not left in
+ * the LISTENING state.
+ */
+ service->public_fourcc = VCHIQ_FOURCC_INVALID;
+
+ if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) !=
+ VCHIQ_SUCCESS)
+ request_poll(state, service, VCHIQ_POLL_REMOVE);
+ } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: ps - terminate %d<->%d",
+ state->id, service->localport,
+ service->remoteport);
+ if (vchiq_close_service_internal(
+ service, NO_CLOSE_RECVD) !=
+ VCHIQ_SUCCESS)
+ request_poll(state, service,
+ VCHIQ_POLL_TERMINATE);
}
+ if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
+ notify_bulks(service, &service->bulk_tx, RETRY_POLL);
+ if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
+ notify_bulks(service, &service->bulk_rx, RETRY_POLL);
+ vchiq_service_put(service);
}
}
+/* Called by the slot handler thread */
+static void
+poll_services(struct vchiq_state *state)
+{
+ int group;
+
+ for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
+ poll_services_of_group(state, group);
+}
+
/* Called with the bulk_mutex held */
static void
abort_outstanding_bulks(struct vchiq_service *service,
@@ -1393,8 +1484,8 @@ abort_outstanding_bulks(struct vchiq_service *service,
service->state->id, service->localport, is_tx ? 't' : 'r',
queue->local_insert, queue->remote_insert, queue->process);
- WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
- WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
+ WARN_ON((int)(queue->local_insert - queue->process) < 0);
+ WARN_ON((int)(queue->remote_insert - queue->process) < 0);
while ((queue->process != queue->local_insert) ||
(queue->process != queue->remote_insert)) {
@@ -1435,103 +1526,92 @@ abort_outstanding_bulks(struct vchiq_service *service,
static int
parse_open(struct vchiq_state *state, struct vchiq_header *header)
{
+ const struct vchiq_open_payload *payload;
struct vchiq_service *service = NULL;
int msgid, size;
- unsigned int localport, remoteport;
+ unsigned int localport, remoteport, fourcc;
+ short version, version_min;
msgid = header->msgid;
size = header->size;
localport = VCHIQ_MSG_DSTPORT(msgid);
remoteport = VCHIQ_MSG_SRCPORT(msgid);
- if (size >= sizeof(struct vchiq_open_payload)) {
- const struct vchiq_open_payload *payload =
- (struct vchiq_open_payload *)header->data;
- unsigned int fourcc;
-
- fourcc = payload->fourcc;
- vchiq_log_info(vchiq_core_log_level,
- "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
- state->id, header, localport,
- VCHIQ_FOURCC_AS_4CHARS(fourcc));
-
- service = get_listening_service(state, fourcc);
+ if (size < sizeof(struct vchiq_open_payload))
+ goto fail_open;
- if (service) {
- /* A matching service exists */
- short version = payload->version;
- short version_min = payload->version_min;
-
- if ((service->version < version_min) ||
- (version < service->version_min)) {
- /* Version mismatch */
- vchiq_loud_error_header();
- vchiq_loud_error("%d: service %d (%c%c%c%c) "
- "version mismatch - local (%d, min %d)"
- " vs. remote (%d, min %d)",
- state->id, service->localport,
- VCHIQ_FOURCC_AS_4CHARS(fourcc),
- service->version, service->version_min,
- version, version_min);
- vchiq_loud_error_footer();
- unlock_service(service);
- service = NULL;
- goto fail_open;
- }
- service->peer_version = version;
+ payload = (struct vchiq_open_payload *)header->data;
+ fourcc = payload->fourcc;
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
+ state->id, header, localport,
+ VCHIQ_FOURCC_AS_4CHARS(fourcc));
- if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
- struct vchiq_openack_payload ack_payload = {
- service->version
- };
-
- if (state->version_common <
- VCHIQ_VERSION_SYNCHRONOUS_MODE)
- service->sync = 0;
-
- /* Acknowledge the OPEN */
- if (service->sync) {
- if (queue_message_sync(
- state,
- NULL,
- VCHIQ_MAKE_MSG(
- VCHIQ_MSG_OPENACK,
- service->localport,
- remoteport),
- memcpy_copy_callback,
- &ack_payload,
- sizeof(ack_payload),
- 0) == VCHIQ_RETRY)
- goto bail_not_ready;
- } else {
- if (queue_message(state,
- NULL,
- VCHIQ_MAKE_MSG(
- VCHIQ_MSG_OPENACK,
- service->localport,
- remoteport),
- memcpy_copy_callback,
- &ack_payload,
- sizeof(ack_payload),
- 0) == VCHIQ_RETRY)
- goto bail_not_ready;
- }
+ service = get_listening_service(state, fourcc);
+ if (!service)
+ goto fail_open;
- /* The service is now open */
- vchiq_set_service_state(service,
- service->sync ? VCHIQ_SRVSTATE_OPENSYNC
- : VCHIQ_SRVSTATE_OPEN);
- }
+ /* A matching service exists */
+ version = payload->version;
+ version_min = payload->version_min;
- /* Success - the message has been dealt with */
- unlock_service(service);
- return 1;
+ if ((service->version < version_min) ||
+ (version < service->version_min)) {
+ /* Version mismatch */
+ vchiq_loud_error_header();
+ vchiq_loud_error("%d: service %d (%c%c%c%c) "
+ "version mismatch - local (%d, min %d)"
+ " vs. remote (%d, min %d)",
+ state->id, service->localport,
+ VCHIQ_FOURCC_AS_4CHARS(fourcc),
+ service->version, service->version_min,
+ version, version_min);
+ vchiq_loud_error_footer();
+ vchiq_service_put(service);
+ service = NULL;
+ goto fail_open;
+ }
+ service->peer_version = version;
+
+ if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
+ struct vchiq_openack_payload ack_payload = {
+ service->version
+ };
+ int openack_id = MAKE_OPENACK(service->localport, remoteport);
+
+ if (state->version_common <
+ VCHIQ_VERSION_SYNCHRONOUS_MODE)
+ service->sync = 0;
+
+ /* Acknowledge the OPEN */
+ if (service->sync) {
+ if (queue_message_sync(state, NULL, openack_id,
+ memcpy_copy_callback,
+ &ack_payload,
+ sizeof(ack_payload),
+ 0) == VCHIQ_RETRY)
+ goto bail_not_ready;
+ } else {
+ if (queue_message(state, NULL, openack_id,
+ memcpy_copy_callback,
+ &ack_payload,
+ sizeof(ack_payload),
+ 0) == VCHIQ_RETRY)
+ goto bail_not_ready;
}
+
+ /* The service is now open */
+ vchiq_set_service_state(service,
+ service->sync ? VCHIQ_SRVSTATE_OPENSYNC
+ : VCHIQ_SRVSTATE_OPEN);
}
+ /* Success - the message has been dealt with */
+ vchiq_service_put(service);
+ return 1;
+
fail_open:
/* No available service, or an invalid request - send a CLOSE */
- if (queue_message(state, NULL,
- VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
+ if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
NULL, NULL, 0, 0) == VCHIQ_RETRY)
goto bail_not_ready;
@@ -1539,339 +1619,364 @@ fail_open:
bail_not_ready:
if (service)
- unlock_service(service);
+ vchiq_service_put(service);
return 0;
}
-/* Called by the slot handler thread */
-static void
-parse_rx_slots(struct vchiq_state *state)
+/**
+ * parse_message() - parses a single message from the rx slot
+ * @state: vchiq state struct
+ * @header: message header
+ *
+ * Context: Process context
+ *
+ * Return:
+ * * >= 0 - size of the parsed message payload (without header)
+ * * -EINVAL - fatal error occurred, bail out is required
+ */
+static int
+parse_message(struct vchiq_state *state, struct vchiq_header *header)
{
- struct vchiq_shared_state *remote = state->remote;
struct vchiq_service *service = NULL;
- int tx_pos;
+ unsigned int localport, remoteport;
+ int msgid, size, type, ret = -EINVAL;
DEBUG_INITIALISE(state->local)
- tx_pos = remote->tx_pos;
-
- while (state->rx_pos != tx_pos) {
- struct vchiq_header *header;
- int msgid, size;
- int type;
- unsigned int localport, remoteport;
-
- DEBUG_TRACE(PARSE_LINE);
- if (!state->rx_data) {
- int rx_index;
-
- WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
- rx_index = remote->slot_queue[
- SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
- state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
- rx_index);
- state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
+ DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
+ msgid = header->msgid;
+ DEBUG_VALUE(PARSE_MSGID, msgid);
+ size = header->size;
+ type = VCHIQ_MSG_TYPE(msgid);
+ localport = VCHIQ_MSG_DSTPORT(msgid);
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
+ if (type != VCHIQ_MSG_DATA)
+ VCHIQ_STATS_INC(state, ctrl_rx_count);
+
+ switch (type) {
+ case VCHIQ_MSG_OPENACK:
+ case VCHIQ_MSG_CLOSE:
+ case VCHIQ_MSG_DATA:
+ case VCHIQ_MSG_BULK_RX:
+ case VCHIQ_MSG_BULK_TX:
+ case VCHIQ_MSG_BULK_RX_DONE:
+ case VCHIQ_MSG_BULK_TX_DONE:
+ service = find_service_by_port(state, localport);
+ if ((!service ||
+ ((service->remoteport != remoteport) &&
+ (service->remoteport != VCHIQ_PORT_FREE))) &&
+ (localport == 0) &&
+ (type == VCHIQ_MSG_CLOSE)) {
/*
- * Initialise use_count to one, and increment
- * release_count at the end of the slot to avoid
- * releasing the slot prematurely.
+ * This could be a CLOSE from a client which
+ * hadn't yet received the OPENACK - look for
+ * the connected service
*/
- state->rx_info->use_count = 1;
- state->rx_info->release_count = 0;
+ if (service)
+ vchiq_service_put(service);
+ service = get_connected_service(state,
+ remoteport);
+ if (service)
+ vchiq_log_warning(vchiq_core_log_level,
+ "%d: prs %s@%pK (%d->%d) - found connected service %d",
+ state->id, msg_type_str(type),
+ header, remoteport, localport,
+ service->localport);
}
- header = (struct vchiq_header *)(state->rx_data +
- (state->rx_pos & VCHIQ_SLOT_MASK));
- DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
- msgid = header->msgid;
- DEBUG_VALUE(PARSE_MSGID, msgid);
- size = header->size;
- type = VCHIQ_MSG_TYPE(msgid);
- localport = VCHIQ_MSG_DSTPORT(msgid);
- remoteport = VCHIQ_MSG_SRCPORT(msgid);
-
- if (type != VCHIQ_MSG_DATA)
- VCHIQ_STATS_INC(state, ctrl_rx_count);
+ if (!service) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
+ state->id, msg_type_str(type),
+ header, remoteport, localport,
+ localport);
+ goto skip_message;
+ }
+ break;
+ default:
+ break;
+ }
- switch (type) {
- case VCHIQ_MSG_OPENACK:
- case VCHIQ_MSG_CLOSE:
- case VCHIQ_MSG_DATA:
- case VCHIQ_MSG_BULK_RX:
- case VCHIQ_MSG_BULK_TX:
- case VCHIQ_MSG_BULK_RX_DONE:
- case VCHIQ_MSG_BULK_TX_DONE:
- service = find_service_by_port(state, localport);
- if ((!service ||
- ((service->remoteport != remoteport) &&
- (service->remoteport != VCHIQ_PORT_FREE))) &&
- (localport == 0) &&
- (type == VCHIQ_MSG_CLOSE)) {
- /*
- * This could be a CLOSE from a client which
- * hadn't yet received the OPENACK - look for
- * the connected service
- */
- if (service)
- unlock_service(service);
- service = get_connected_service(state,
- remoteport);
- if (service)
- vchiq_log_warning(vchiq_core_log_level,
- "%d: prs %s@%pK (%d->%d) - found connected service %d",
- state->id, msg_type_str(type),
- header, remoteport, localport,
- service->localport);
- }
+ if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
+ int svc_fourcc;
- if (!service) {
- vchiq_log_error(vchiq_core_log_level,
- "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
- state->id, msg_type_str(type),
- header, remoteport, localport,
- localport);
- goto skip_message;
- }
- break;
- default:
- break;
- }
+ svc_fourcc = service
+ ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+ vchiq_log_info(SRVTRACE_LEVEL(service),
+ "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
+ msg_type_str(type), type,
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
+ remoteport, localport, size);
+ if (size > 0)
+ vchiq_log_dump_mem("Rcvd", 0, header->data,
+ min(16, size));
+ }
- if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
- int svc_fourcc;
+ if (((unsigned long)header & VCHIQ_SLOT_MASK) +
+ calc_stride(size) > VCHIQ_SLOT_SIZE) {
+ vchiq_log_error(vchiq_core_log_level,
+ "header %pK (msgid %x) - size %x too big for slot",
+ header, (unsigned int)msgid,
+ (unsigned int)size);
+ WARN(1, "oversized for slot\n");
+ }
- svc_fourcc = service
- ? service->base.fourcc
- : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
- vchiq_log_info(SRVTRACE_LEVEL(service),
- "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
- msg_type_str(type), type,
- VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
- remoteport, localport, size);
- if (size > 0)
- vchiq_log_dump_mem("Rcvd", 0, header->data,
- min(16, size));
+ switch (type) {
+ case VCHIQ_MSG_OPEN:
+ WARN_ON(VCHIQ_MSG_DSTPORT(msgid));
+ if (!parse_open(state, header))
+ goto bail_not_ready;
+ break;
+ case VCHIQ_MSG_OPENACK:
+ if (size >= sizeof(struct vchiq_openack_payload)) {
+ const struct vchiq_openack_payload *payload =
+ (struct vchiq_openack_payload *)
+ header->data;
+ service->peer_version = payload->version;
}
-
- if (((unsigned long)header & VCHIQ_SLOT_MASK) +
- calc_stride(size) > VCHIQ_SLOT_SIZE) {
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
+ if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
+ service->remoteport = remoteport;
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_OPEN);
+ complete(&service->remove_event);
+ } else {
vchiq_log_error(vchiq_core_log_level,
- "header %pK (msgid %x) - size %x too big for slot",
- header, (unsigned int)msgid,
- (unsigned int)size);
- WARN(1, "oversized for slot\n");
+ "OPENACK received in state %s",
+ srvstate_names[service->srvstate]);
}
+ break;
+ case VCHIQ_MSG_CLOSE:
+ WARN_ON(size); /* There should be no data */
- switch (type) {
- case VCHIQ_MSG_OPEN:
- WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
- if (!parse_open(state, header))
- goto bail_not_ready;
- break;
- case VCHIQ_MSG_OPENACK:
- if (size >= sizeof(struct vchiq_openack_payload)) {
- const struct vchiq_openack_payload *payload =
- (struct vchiq_openack_payload *)
- header->data;
- service->peer_version = payload->version;
- }
- vchiq_log_info(vchiq_core_log_level,
- "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
- state->id, header, size, remoteport, localport,
- service->peer_version);
- if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
- service->remoteport = remoteport;
- vchiq_set_service_state(service,
- VCHIQ_SRVSTATE_OPEN);
- complete(&service->remove_event);
- } else
- vchiq_log_error(vchiq_core_log_level,
- "OPENACK received in state %s",
- srvstate_names[service->srvstate]);
- break;
- case VCHIQ_MSG_CLOSE:
- WARN_ON(size != 0); /* There should be no data */
-
- vchiq_log_info(vchiq_core_log_level,
- "%d: prs CLOSE@%pK (%d->%d)",
- state->id, header, remoteport, localport);
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs CLOSE@%pK (%d->%d)",
+ state->id, header, remoteport, localport);
- mark_service_closing_internal(service, 1);
+ mark_service_closing_internal(service, 1);
- if (vchiq_close_service_internal(service,
- 1/*close_recvd*/) == VCHIQ_RETRY)
- goto bail_not_ready;
+ if (vchiq_close_service_internal(service,
+ CLOSE_RECVD) == VCHIQ_RETRY)
+ goto bail_not_ready;
- vchiq_log_info(vchiq_core_log_level,
- "Close Service %c%c%c%c s:%u d:%d",
- VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
- service->localport,
- service->remoteport);
- break;
- case VCHIQ_MSG_DATA:
- vchiq_log_info(vchiq_core_log_level,
- "%d: prs DATA@%pK,%x (%d->%d)",
- state->id, header, size, remoteport, localport);
-
- if ((service->remoteport == remoteport) &&
- (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
- header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
- claim_slot(state->rx_info);
+ vchiq_log_info(vchiq_core_log_level,
+ "Close Service %c%c%c%c s:%u d:%d",
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->localport,
+ service->remoteport);
+ break;
+ case VCHIQ_MSG_DATA:
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs DATA@%pK,%x (%d->%d)",
+ state->id, header, size, remoteport, localport);
+
+ if ((service->remoteport == remoteport) &&
+ (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
+ header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
+ claim_slot(state->rx_info);
+ DEBUG_TRACE(PARSE_LINE);
+ if (make_service_callback(service,
+ VCHIQ_MESSAGE_AVAILABLE, header,
+ NULL) == VCHIQ_RETRY) {
DEBUG_TRACE(PARSE_LINE);
- if (make_service_callback(service,
- VCHIQ_MESSAGE_AVAILABLE, header,
- NULL) == VCHIQ_RETRY) {
- DEBUG_TRACE(PARSE_LINE);
- goto bail_not_ready;
- }
- VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
- VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
- size);
- } else {
- VCHIQ_STATS_INC(state, error_count);
+ goto bail_not_ready;
}
- break;
- case VCHIQ_MSG_CONNECT:
- vchiq_log_info(vchiq_core_log_level,
- "%d: prs CONNECT@%pK", state->id, header);
- state->version_common = ((struct vchiq_slot_zero *)
- state->slot_data)->version;
- complete(&state->connect);
- break;
- case VCHIQ_MSG_BULK_RX:
- case VCHIQ_MSG_BULK_TX:
- /*
- * We should never receive a bulk request from the
- * other side since we're not setup to perform as the
- * master.
- */
- WARN_ON(1);
- break;
- case VCHIQ_MSG_BULK_RX_DONE:
- case VCHIQ_MSG_BULK_TX_DONE:
- if ((service->remoteport == remoteport) &&
- (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
- struct vchiq_bulk_queue *queue;
- struct vchiq_bulk *bulk;
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
+ size);
+ } else {
+ VCHIQ_STATS_INC(state, error_count);
+ }
+ break;
+ case VCHIQ_MSG_CONNECT:
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs CONNECT@%pK", state->id, header);
+ state->version_common = ((struct vchiq_slot_zero *)
+ state->slot_data)->version;
+ complete(&state->connect);
+ break;
+ case VCHIQ_MSG_BULK_RX:
+ case VCHIQ_MSG_BULK_TX:
+ /*
+ * We should never receive a bulk request from the
+ * other side since we're not setup to perform as the
+ * master.
+ */
+ WARN_ON(1);
+ break;
+ case VCHIQ_MSG_BULK_RX_DONE:
+ case VCHIQ_MSG_BULK_TX_DONE:
+ if ((service->remoteport == remoteport) &&
+ (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
+ struct vchiq_bulk_queue *queue;
+ struct vchiq_bulk *bulk;
- queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
- &service->bulk_rx : &service->bulk_tx;
+ queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
+ &service->bulk_rx : &service->bulk_tx;
+ DEBUG_TRACE(PARSE_LINE);
+ if (mutex_lock_killable(&service->bulk_mutex)) {
DEBUG_TRACE(PARSE_LINE);
- if (mutex_lock_killable(&service->bulk_mutex)) {
- DEBUG_TRACE(PARSE_LINE);
- goto bail_not_ready;
- }
- if ((int)(queue->remote_insert -
- queue->local_insert) >= 0) {
- vchiq_log_error(vchiq_core_log_level,
- "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
- state->id, msg_type_str(type),
- header, remoteport, localport,
- queue->remote_insert,
- queue->local_insert);
- mutex_unlock(&service->bulk_mutex);
- break;
- }
- if (queue->process != queue->remote_insert) {
- pr_err("%s: p %x != ri %x\n",
- __func__,
- queue->process,
- queue->remote_insert);
- mutex_unlock(&service->bulk_mutex);
- goto bail_not_ready;
- }
-
- bulk = &queue->bulks[
- BULK_INDEX(queue->remote_insert)];
- bulk->actual = *(int *)header->data;
- queue->remote_insert++;
-
- vchiq_log_info(vchiq_core_log_level,
- "%d: prs %s@%pK (%d->%d) %x@%pad",
+ goto bail_not_ready;
+ }
+ if ((int)(queue->remote_insert -
+ queue->local_insert) >= 0) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
state->id, msg_type_str(type),
header, remoteport, localport,
- bulk->actual, &bulk->data);
-
- vchiq_log_trace(vchiq_core_log_level,
- "%d: prs:%d %cx li=%x ri=%x p=%x",
- state->id, localport,
- (type == VCHIQ_MSG_BULK_RX_DONE) ?
- 'r' : 't',
- queue->local_insert,
- queue->remote_insert, queue->process);
-
- DEBUG_TRACE(PARSE_LINE);
- WARN_ON(queue->process == queue->local_insert);
- vchiq_complete_bulk(bulk);
- queue->process++;
+ queue->remote_insert,
+ queue->local_insert);
mutex_unlock(&service->bulk_mutex);
- DEBUG_TRACE(PARSE_LINE);
- notify_bulks(service, queue, 1/*retry_poll*/);
- DEBUG_TRACE(PARSE_LINE);
- }
- break;
- case VCHIQ_MSG_PADDING:
- vchiq_log_trace(vchiq_core_log_level,
- "%d: prs PADDING@%pK,%x",
- state->id, header, size);
- break;
- case VCHIQ_MSG_PAUSE:
- /* If initiated, signal the application thread */
- vchiq_log_trace(vchiq_core_log_level,
- "%d: prs PAUSE@%pK,%x",
- state->id, header, size);
- if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
- vchiq_log_error(vchiq_core_log_level,
- "%d: PAUSE received in state PAUSED",
- state->id);
break;
}
- if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
- /* Send a PAUSE in response */
- if (queue_message(state, NULL,
- VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
- NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
- == VCHIQ_RETRY)
- goto bail_not_ready;
+ if (queue->process != queue->remote_insert) {
+ pr_err("%s: p %x != ri %x\n",
+ __func__,
+ queue->process,
+ queue->remote_insert);
+ mutex_unlock(&service->bulk_mutex);
+ goto bail_not_ready;
}
- /* At this point slot_mutex is held */
- vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
- break;
- case VCHIQ_MSG_RESUME:
- vchiq_log_trace(vchiq_core_log_level,
- "%d: prs RESUME@%pK,%x",
- state->id, header, size);
- /* Release the slot mutex */
- mutex_unlock(&state->slot_mutex);
- vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
- break;
- case VCHIQ_MSG_REMOTE_USE:
- vchiq_on_remote_use(state);
- break;
- case VCHIQ_MSG_REMOTE_RELEASE:
- vchiq_on_remote_release(state);
- break;
- case VCHIQ_MSG_REMOTE_USE_ACTIVE:
- break;
+ bulk = &queue->bulks[
+ BULK_INDEX(queue->remote_insert)];
+ bulk->actual = *(int *)header->data;
+ queue->remote_insert++;
- default:
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs %s@%pK (%d->%d) %x@%pad",
+ state->id, msg_type_str(type),
+ header, remoteport, localport,
+ bulk->actual, &bulk->data);
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs:%d %cx li=%x ri=%x p=%x",
+ state->id, localport,
+ (type == VCHIQ_MSG_BULK_RX_DONE) ?
+ 'r' : 't',
+ queue->local_insert,
+ queue->remote_insert, queue->process);
+
+ DEBUG_TRACE(PARSE_LINE);
+ WARN_ON(queue->process == queue->local_insert);
+ vchiq_complete_bulk(bulk);
+ queue->process++;
+ mutex_unlock(&service->bulk_mutex);
+ DEBUG_TRACE(PARSE_LINE);
+ notify_bulks(service, queue, RETRY_POLL);
+ DEBUG_TRACE(PARSE_LINE);
+ }
+ break;
+ case VCHIQ_MSG_PADDING:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs PADDING@%pK,%x",
+ state->id, header, size);
+ break;
+ case VCHIQ_MSG_PAUSE:
+ /* If initiated, signal the application thread */
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs PAUSE@%pK,%x",
+ state->id, header, size);
+ if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
vchiq_log_error(vchiq_core_log_level,
- "%d: prs invalid msgid %x@%pK,%x",
- state->id, msgid, header, size);
- WARN(1, "invalid message\n");
+ "%d: PAUSE received in state PAUSED",
+ state->id);
break;
}
+ if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
+ /* Send a PAUSE in response */
+ if (queue_message(state, NULL, MAKE_PAUSE,
+ NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
+ == VCHIQ_RETRY)
+ goto bail_not_ready;
+ }
+ /* At this point slot_mutex is held */
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
+ break;
+ case VCHIQ_MSG_RESUME:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs RESUME@%pK,%x",
+ state->id, header, size);
+ /* Release the slot mutex */
+ mutex_unlock(&state->slot_mutex);
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
+ break;
+
+ case VCHIQ_MSG_REMOTE_USE:
+ vchiq_on_remote_use(state);
+ break;
+ case VCHIQ_MSG_REMOTE_RELEASE:
+ vchiq_on_remote_release(state);
+ break;
+ case VCHIQ_MSG_REMOTE_USE_ACTIVE:
+ break;
+
+ default:
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: prs invalid msgid %x@%pK,%x",
+ state->id, msgid, header, size);
+ WARN(1, "invalid message\n");
+ break;
+ }
skip_message:
- if (service) {
- unlock_service(service);
- service = NULL;
+ ret = size;
+
+bail_not_ready:
+ if (service)
+ vchiq_service_put(service);
+
+ return ret;
+}
+
+/* Called by the slot handler thread */
+static void
+parse_rx_slots(struct vchiq_state *state)
+{
+ struct vchiq_shared_state *remote = state->remote;
+ int tx_pos;
+
+ DEBUG_INITIALISE(state->local)
+
+ tx_pos = remote->tx_pos;
+
+ while (state->rx_pos != tx_pos) {
+ struct vchiq_header *header;
+ int size;
+
+ DEBUG_TRACE(PARSE_LINE);
+ if (!state->rx_data) {
+ int rx_index;
+
+ WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK);
+ rx_index = remote->slot_queue[
+ SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
+ state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
+ rx_index);
+ state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
+
+ /*
+ * Initialise use_count to one, and increment
+ * release_count at the end of the slot to avoid
+ * releasing the slot prematurely.
+ */
+ state->rx_info->use_count = 1;
+ state->rx_info->release_count = 0;
}
+ header = (struct vchiq_header *)(state->rx_data +
+ (state->rx_pos & VCHIQ_SLOT_MASK));
+ size = parse_message(state, header);
+ if (size < 0)
+ return;
+
state->rx_pos += calc_stride(size);
DEBUG_TRACE(PARSE_LINE);
@@ -1885,10 +1990,56 @@ skip_message:
state->rx_data = NULL;
}
}
+}
-bail_not_ready:
- if (service)
- unlock_service(service);
+/**
+ * handle_poll() - handle service polling and other rare conditions
+ * @state: vchiq state struct
+ *
+ * Context: Process context
+ *
+ * Return:
+ * * 0 - poll handled successful
+ * * -EAGAIN - retry later
+ */
+static int
+handle_poll(struct vchiq_state *state)
+{
+ switch (state->conn_state) {
+ case VCHIQ_CONNSTATE_CONNECTED:
+ /* Poll the services as requested */
+ poll_services(state);
+ break;
+
+ case VCHIQ_CONNSTATE_PAUSING:
+ if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
+ QMFLAGS_NO_MUTEX_UNLOCK) != VCHIQ_RETRY) {
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT);
+ } else {
+ /* Retry later */
+ return -EAGAIN;
+ }
+ break;
+
+ case VCHIQ_CONNSTATE_RESUMING:
+ if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0,
+ QMFLAGS_NO_MUTEX_LOCK) != VCHIQ_RETRY) {
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
+ } else {
+ /*
+ * This should really be impossible,
+ * since the PAUSE should have flushed
+ * through outstanding messages.
+ */
+ vchiq_log_error(vchiq_core_log_level,
+ "Failed to send RESUME message");
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
}
/* Called by the slot handler thread */
@@ -1909,54 +2060,14 @@ slot_handler_func(void *v)
DEBUG_TRACE(SLOT_HANDLER_LINE);
if (state->poll_needed) {
-
state->poll_needed = 0;
/*
* Handle service polling and other rare conditions here
* out of the mainline code
*/
- switch (state->conn_state) {
- case VCHIQ_CONNSTATE_CONNECTED:
- /* Poll the services as requested */
- poll_services(state);
- break;
-
- case VCHIQ_CONNSTATE_PAUSING:
- if (queue_message(state, NULL,
- VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
- NULL, NULL, 0,
- QMFLAGS_NO_MUTEX_UNLOCK)
- != VCHIQ_RETRY) {
- vchiq_set_conn_state(state,
- VCHIQ_CONNSTATE_PAUSE_SENT);
- } else {
- /* Retry later */
- state->poll_needed = 1;
- }
- break;
-
- case VCHIQ_CONNSTATE_RESUMING:
- if (queue_message(state, NULL,
- VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
- NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
- != VCHIQ_RETRY) {
- vchiq_set_conn_state(state,
- VCHIQ_CONNSTATE_CONNECTED);
- } else {
- /*
- * This should really be impossible,
- * since the PAUSE should have flushed
- * through outstanding messages.
- */
- vchiq_log_error(vchiq_core_log_level,
- "Failed to send RESUME message");
- }
- break;
- default:
- break;
- }
-
+ if (handle_poll(state) == -EAGAIN)
+ state->poll_needed = 1;
}
DEBUG_TRACE(SLOT_HANDLER_LINE);
@@ -2070,8 +2181,7 @@ sync_func(void *v)
state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
- (service->srvstate ==
- VCHIQ_SRVSTATE_OPENSYNC)) {
+ (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
if (make_service_callback(service,
VCHIQ_MESSAGE_AVAILABLE, header,
NULL) == VCHIQ_RETRY)
@@ -2089,7 +2199,7 @@ sync_func(void *v)
break;
}
- unlock_service(service);
+ vchiq_service_put(service);
}
return 0;
@@ -2118,9 +2228,11 @@ vchiq_init_slots(void *mem_base, int mem_size)
(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
struct vchiq_slot_zero *slot_zero =
(struct vchiq_slot_zero *)(mem_base + mem_align);
- int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
+ int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE;
int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
+ check_sizes();
+
/* Ensure there is enough memory to run an absolutely minimum system */
num_slots -= first_data_slot;
@@ -2143,26 +2255,25 @@ vchiq_init_slots(void *mem_base, int mem_size)
slot_zero->master.slot_sync = first_data_slot;
slot_zero->master.slot_first = first_data_slot + 1;
- slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
- slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
- slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
+ slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1;
+ slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2);
+ slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1;
slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
return slot_zero;
}
-enum vchiq_status
+int
vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
{
struct vchiq_shared_state *local;
struct vchiq_shared_state *remote;
- enum vchiq_status status;
char threadname[16];
- int i;
+ int i, ret;
if (vchiq_states[0]) {
pr_err("%s: VCHIQ state already initialized\n", __func__);
- return VCHIQ_ERROR;
+ return -EINVAL;
}
local = &slot_zero->slave;
@@ -2175,7 +2286,7 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
else
vchiq_loud_error("master/slave mismatch two slaves");
vchiq_loud_error_footer();
- return VCHIQ_ERROR;
+ return -EINVAL;
}
memset(state, 0, sizeof(struct vchiq_state));
@@ -2206,17 +2317,17 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
state->slot_queue_available = 0;
for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
- struct vchiq_service_quota *quota =
- &state->service_quotas[i];
+ struct vchiq_service_quota *quota = &state->service_quotas[i];
init_completion(&quota->quota_event);
}
for (i = local->slot_first; i <= local->slot_last; i++) {
- local->slot_queue[state->slot_queue_available++] = i;
+ local->slot_queue[state->slot_queue_available] = i;
+ state->slot_queue_available++;
complete(&state->slot_available_event);
}
- state->default_slot_quota = state->slot_queue_available/2;
+ state->default_slot_quota = state->slot_queue_available / 2;
state->default_message_quota =
min((unsigned short)(state->default_slot_quota * 256),
(unsigned short)~0);
@@ -2240,9 +2351,9 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
- status = vchiq_platform_init_state(state);
- if (status != VCHIQ_SUCCESS)
- return VCHIQ_ERROR;
+ ret = vchiq_platform_init_state(state);
+ if (ret)
+ return ret;
/*
* bring up slot handler thread
@@ -2256,7 +2367,7 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
- return VCHIQ_ERROR;
+ return PTR_ERR(state->slot_handler_thread);
}
set_user_nice(state->slot_handler_thread, -19);
@@ -2268,6 +2379,7 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
+ ret = PTR_ERR(state->recycle_thread);
goto fail_free_handler_thread;
}
set_user_nice(state->recycle_thread, -19);
@@ -2280,6 +2392,7 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
vchiq_loud_error_footer();
+ ret = PTR_ERR(state->sync_thread);
goto fail_free_recycle_thread;
}
set_user_nice(state->sync_thread, -20);
@@ -2293,14 +2406,14 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
/* Indicate readiness to the other side */
local->initialised = 1;
- return status;
+ return 0;
fail_free_recycle_thread:
kthread_stop(state->recycle_thread);
fail_free_handler_thread:
kthread_stop(state->slot_handler_thread);
- return VCHIQ_ERROR;
+ return ret;
}
void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
@@ -2314,7 +2427,8 @@ void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
flush_signals(current);
}
- pos = service->msg_queue_write++ & (VCHIQ_MAX_SLOTS - 1);
+ pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
+ service->msg_queue_write++;
service->msg_queue[pos] = header;
complete(&service->msg_queue_push);
@@ -2335,7 +2449,8 @@ struct vchiq_header *vchiq_msg_hold(unsigned int handle)
flush_signals(current);
}
- pos = service->msg_queue_read++ & (VCHIQ_MAX_SLOTS - 1);
+ pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
+ service->msg_queue_read++;
header = service->msg_queue[pos];
complete(&service->msg_queue_pop);
@@ -2440,11 +2555,11 @@ vchiq_add_service_internal(struct vchiq_state *state,
struct vchiq_service *srv;
srv = rcu_dereference(state->services[i]);
- if (!srv)
+ if (!srv) {
pservice = &state->services[i];
- else if ((srv->public_fourcc == params->fourcc) &&
- ((srv->instance != instance) ||
- (srv->base.callback != params->callback))) {
+ } else if ((srv->public_fourcc == params->fourcc) &&
+ ((srv->instance != instance) ||
+ (srv->base.callback != params->callback))) {
/*
* There is another server using this
* fourcc which doesn't match.
@@ -2514,10 +2629,7 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id)
service->client_id = client_id;
vchiq_use_service_internal(service);
status = queue_message(service->state,
- NULL,
- VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
- service->localport,
- 0),
+ NULL, MAKE_OPEN(service->localport),
memcpy_copy_callback,
&payload,
sizeof(payload),
@@ -2568,42 +2680,43 @@ release_service_messages(struct vchiq_service *service)
for (i = state->remote->slot_first; i <= slot_last; i++) {
struct vchiq_slot_info *slot_info =
SLOT_INFO_FROM_INDEX(state, i);
- if (slot_info->release_count != slot_info->use_count) {
- char *data =
- (char *)SLOT_DATA_FROM_INDEX(state, i);
- unsigned int pos, end;
+ unsigned int pos, end;
+ char *data;
- end = VCHIQ_SLOT_SIZE;
- if (data == state->rx_data)
- /*
- * This buffer is still being read from - stop
- * at the current read position
- */
- end = state->rx_pos & VCHIQ_SLOT_MASK;
-
- pos = 0;
-
- while (pos < end) {
- struct vchiq_header *header =
- (struct vchiq_header *)(data + pos);
- int msgid = header->msgid;
- int port = VCHIQ_MSG_DSTPORT(msgid);
-
- if ((port == service->localport) &&
- (msgid & VCHIQ_MSGID_CLAIMED)) {
- vchiq_log_info(vchiq_core_log_level,
- " fsi - hdr %pK", header);
- release_slot(state, slot_info, header,
- NULL);
- }
- pos += calc_stride(header->size);
- if (pos > VCHIQ_SLOT_SIZE) {
- vchiq_log_error(vchiq_core_log_level,
- "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
- pos, header, msgid,
- header->msgid, header->size);
- WARN(1, "invalid slot position\n");
- }
+ if (slot_info->release_count == slot_info->use_count)
+ continue;
+
+ data = (char *)SLOT_DATA_FROM_INDEX(state, i);
+ end = VCHIQ_SLOT_SIZE;
+ if (data == state->rx_data)
+ /*
+ * This buffer is still being read from - stop
+ * at the current read position
+ */
+ end = state->rx_pos & VCHIQ_SLOT_MASK;
+
+ pos = 0;
+
+ while (pos < end) {
+ struct vchiq_header *header =
+ (struct vchiq_header *)(data + pos);
+ int msgid = header->msgid;
+ int port = VCHIQ_MSG_DSTPORT(msgid);
+
+ if ((port == service->localport) &&
+ (msgid & VCHIQ_MSGID_CLAIMED)) {
+ vchiq_log_info(vchiq_core_log_level,
+ " fsi - hdr %pK", header);
+ release_slot(state, slot_info, header,
+ NULL);
+ }
+ pos += calc_stride(header->size);
+ if (pos > VCHIQ_SLOT_SIZE) {
+ vchiq_log_error(vchiq_core_log_level,
+ "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
+ pos, header, msgid,
+ header->msgid, header->size);
+ WARN(1, "invalid slot position\n");
}
}
}
@@ -2621,10 +2734,11 @@ do_abort_bulks(struct vchiq_service *service)
abort_outstanding_bulks(service, &service->bulk_rx);
mutex_unlock(&service->bulk_mutex);
- status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
- if (status == VCHIQ_SUCCESS)
- status = notify_bulks(service, &service->bulk_rx,
- 0/*!retry_poll*/);
+ status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
+ if (status != VCHIQ_SUCCESS)
+ return 0;
+
+ status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
return (status == VCHIQ_SUCCESS);
}
@@ -2644,10 +2758,12 @@ close_service_complete(struct vchiq_service *service, int failstate)
service->client_id = 0;
service->remoteport = VCHIQ_PORT_FREE;
newstate = VCHIQ_SRVSTATE_LISTENING;
- } else
+ } else {
newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
- } else
+ }
+ } else {
newstate = VCHIQ_SRVSTATE_CLOSED;
+ }
vchiq_set_service_state(service, newstate);
break;
case VCHIQ_SRVSTATE_LISTENING:
@@ -2677,16 +2793,17 @@ close_service_complete(struct vchiq_service *service, int failstate)
service->client_id = 0;
service->remoteport = VCHIQ_PORT_FREE;
- if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
+ if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
vchiq_free_service_internal(service);
- else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
+ } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
if (is_server)
service->closing = 0;
complete(&service->remove_event);
}
- } else
+ } else {
vchiq_set_service_state(service, failstate);
+ }
return status;
}
@@ -2698,6 +2815,8 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
struct vchiq_state *state = service->state;
enum vchiq_status status = VCHIQ_SUCCESS;
int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
+ int close_id = MAKE_CLOSE(service->localport,
+ VCHIQ_MSG_DSTPORT(service->remoteport));
vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
service->state->id, service->localport, close_recvd,
@@ -2708,11 +2827,11 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
case VCHIQ_SRVSTATE_HIDDEN:
case VCHIQ_SRVSTATE_LISTENING:
case VCHIQ_SRVSTATE_CLOSEWAIT:
- if (close_recvd)
+ if (close_recvd) {
vchiq_log_error(vchiq_core_log_level,
"%s(1) called in state %s",
__func__, srvstate_names[service->srvstate]);
- else if (is_server) {
+ } else if (is_server) {
if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
status = VCHIQ_ERROR;
} else {
@@ -2724,8 +2843,9 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
VCHIQ_SRVSTATE_LISTENING);
}
complete(&service->remove_event);
- } else
+ } else {
vchiq_free_service_internal(service);
+ }
break;
case VCHIQ_SRVSTATE_OPENING:
if (close_recvd) {
@@ -2735,11 +2855,7 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
complete(&service->remove_event);
} else {
/* Shutdown mid-open - let the other side know */
- status = queue_message(state, service,
- VCHIQ_MAKE_MSG
- (VCHIQ_MSG_CLOSE,
- service->localport,
- VCHIQ_MSG_DSTPORT(service->remoteport)),
+ status = queue_message(state, service, close_id,
NULL, NULL, 0, 0);
}
break;
@@ -2756,28 +2872,24 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
release_service_messages(service);
if (status == VCHIQ_SUCCESS)
- status = queue_message(state, service,
- VCHIQ_MAKE_MSG
- (VCHIQ_MSG_CLOSE,
- service->localport,
- VCHIQ_MSG_DSTPORT(service->remoteport)),
+ status = queue_message(state, service, close_id,
NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
- if (status == VCHIQ_SUCCESS) {
- if (!close_recvd) {
- /* Change the state while the mutex is still held */
- vchiq_set_service_state(service,
- VCHIQ_SRVSTATE_CLOSESENT);
- mutex_unlock(&state->slot_mutex);
- if (service->sync)
- mutex_unlock(&state->sync_mutex);
- break;
- }
- } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
- mutex_unlock(&state->sync_mutex);
+ if (status != VCHIQ_SUCCESS) {
+ if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
+ mutex_unlock(&state->sync_mutex);
break;
- } else
+ }
+
+ if (!close_recvd) {
+ /* Change the state while the mutex is still held */
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_CLOSESENT);
+ mutex_unlock(&state->slot_mutex);
+ if (service->sync)
+ mutex_unlock(&state->sync_mutex);
break;
+ }
/* Change the state while the mutex is still held */
vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
@@ -2867,7 +2979,7 @@ vchiq_free_service_internal(struct vchiq_service *service)
complete(&service->remove_event);
/* Release the initial lock */
- unlock_service(service);
+ vchiq_service_put(service);
}
enum vchiq_status
@@ -2883,12 +2995,11 @@ vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instanc
if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
vchiq_set_service_state(service,
VCHIQ_SRVSTATE_LISTENING);
- unlock_service(service);
+ vchiq_service_put(service);
}
if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
- if (queue_message(state, NULL,
- VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
+ if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL,
0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
return VCHIQ_RETRY;
@@ -2906,21 +3017,19 @@ vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instanc
return VCHIQ_SUCCESS;
}
-enum vchiq_status
+void
vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
{
struct vchiq_service *service;
int i;
- /* Find all services registered to this client and enable them. */
+ /* Find all services registered to this client and remove them. */
i = 0;
while ((service = next_service_by_instance(state, instance,
&i)) != NULL) {
(void)vchiq_remove_service(service->handle);
- unlock_service(service);
+ vchiq_service_put(service);
}
-
- return VCHIQ_SUCCESS;
}
enum vchiq_status
@@ -2940,15 +3049,14 @@ vchiq_close_service(unsigned int handle)
if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
(service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
- unlock_service(service);
+ vchiq_service_put(service);
return VCHIQ_ERROR;
}
mark_service_closing(service);
if (current == service->state->slot_handler_thread) {
- status = vchiq_close_service_internal(service,
- 0/*!close_recvd*/);
+ status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
WARN_ON(status == VCHIQ_RETRY);
} else {
/* Mark the service for termination by the slot handler */
@@ -2977,7 +3085,7 @@ vchiq_close_service(unsigned int handle)
(service->srvstate != VCHIQ_SRVSTATE_LISTENING))
status = VCHIQ_ERROR;
- unlock_service(service);
+ vchiq_service_put(service);
return status;
}
@@ -2998,7 +3106,7 @@ vchiq_remove_service(unsigned int handle)
service->state->id, service->localport);
if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
- unlock_service(service);
+ vchiq_service_put(service);
return VCHIQ_ERROR;
}
@@ -3012,8 +3120,7 @@ vchiq_remove_service(unsigned int handle)
*/
service->public_fourcc = VCHIQ_FOURCC_INVALID;
- status = vchiq_close_service_internal(service,
- 0/*!close_recvd*/);
+ status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
WARN_ON(status == VCHIQ_RETRY);
} else {
/* Mark the service for removal by the slot handler */
@@ -3039,7 +3146,7 @@ vchiq_remove_service(unsigned int handle)
(service->srvstate != VCHIQ_SRVSTATE_FREE))
status = VCHIQ_ERROR;
- unlock_service(service);
+ vchiq_service_put(service);
return status;
}
@@ -3134,8 +3241,7 @@ enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
bulk->size = size;
bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
- if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir)
- != VCHIQ_SUCCESS)
+ if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir))
goto unlock_error_exit;
wmb();
@@ -3185,7 +3291,7 @@ enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
queue->local_insert, queue->remote_insert, queue->process);
waiting:
- unlock_service(service);
+ vchiq_service_put(service);
status = VCHIQ_SUCCESS;
@@ -3208,7 +3314,7 @@ unlock_error_exit:
error_exit:
if (service)
- unlock_service(service);
+ vchiq_service_put(service);
return status;
}
@@ -3221,6 +3327,7 @@ vchiq_queue_message(unsigned int handle,
{
struct vchiq_service *service = find_service_by_handle(handle);
enum vchiq_status status = VCHIQ_ERROR;
+ int data_id;
if (!service)
goto error_exit;
@@ -3239,19 +3346,15 @@ vchiq_queue_message(unsigned int handle,
goto error_exit;
}
+ data_id = MAKE_DATA(service->localport, service->remoteport);
+
switch (service->srvstate) {
case VCHIQ_SRVSTATE_OPEN:
- status = queue_message(service->state, service,
- VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
- service->localport,
- service->remoteport),
+ status = queue_message(service->state, service, data_id,
copy_callback, context, size, 1);
break;
case VCHIQ_SRVSTATE_OPENSYNC:
- status = queue_message_sync(service->state, service,
- VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
- service->localport,
- service->remoteport),
+ status = queue_message_sync(service->state, service, data_id,
copy_callback, context, size, 1);
break;
default:
@@ -3261,7 +3364,7 @@ vchiq_queue_message(unsigned int handle,
error_exit:
if (service)
- unlock_service(service);
+ vchiq_service_put(service);
return status;
}
@@ -3316,10 +3419,11 @@ vchiq_release_message(unsigned int handle,
release_slot(state, slot_info, header, service);
}
- } else if (slot_index == remote->slot_sync)
+ } else if (slot_index == remote->slot_sync) {
release_message_sync(state, header);
+ }
- unlock_service(service);
+ vchiq_service_put(service);
}
EXPORT_SYMBOL(vchiq_release_message);
@@ -3350,7 +3454,7 @@ vchiq_get_peer_version(unsigned int handle, short *peer_version)
exit:
if (service)
- unlock_service(service);
+ vchiq_service_put(service);
return status;
}
EXPORT_SYMBOL(vchiq_get_peer_version);
@@ -3365,21 +3469,21 @@ void vchiq_get_config(struct vchiq_config *config)
config->version_min = VCHIQ_VERSION_MIN;
}
-enum vchiq_status
+int
vchiq_set_service_option(unsigned int handle,
enum vchiq_service_option option, int value)
{
struct vchiq_service *service = find_service_by_handle(handle);
- enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_service_quota *quota;
+ int ret = -EINVAL;
if (!service)
- return VCHIQ_ERROR;
+ return -EINVAL;
switch (option) {
case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
service->auto_close = value;
- status = VCHIQ_SUCCESS;
+ ret = 0;
break;
case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
@@ -3396,7 +3500,7 @@ vchiq_set_service_option(unsigned int handle,
* dropped below its quota
*/
complete(&quota->quota_event);
- status = VCHIQ_SUCCESS;
+ ret = 0;
}
break;
@@ -3414,7 +3518,7 @@ vchiq_set_service_option(unsigned int handle,
* dropped below its quota
*/
complete(&quota->quota_event);
- status = VCHIQ_SUCCESS;
+ ret = 0;
}
break;
@@ -3422,21 +3526,21 @@ vchiq_set_service_option(unsigned int handle,
if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
(service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
service->sync = value;
- status = VCHIQ_SUCCESS;
+ ret = 0;
}
break;
case VCHIQ_SERVICE_OPTION_TRACE:
service->trace = value;
- status = VCHIQ_SUCCESS;
+ ret = 0;
break;
default:
break;
}
- unlock_service(service);
+ vchiq_service_put(service);
- return status;
+ return ret;
}
static int
@@ -3575,7 +3679,7 @@ int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
if (service) {
err = vchiq_dump_service_state(dump_context, service);
- unlock_service(service);
+ vchiq_service_put(service);
if (err)
return err;
}
@@ -3611,8 +3715,9 @@ int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
scnprintf(remoteport + len2,
sizeof(remoteport) - len2,
" (client %x)", service->client_id);
- } else
+ } else {
strcpy(remoteport, "n/a");
+ }
len += scnprintf(buf + len, sizeof(buf) - len,
" '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
@@ -3711,9 +3816,7 @@ enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
return VCHIQ_RETRY;
- return queue_message(state, NULL,
- VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
- NULL, NULL, 0, 0);
+ return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0);
}
enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
@@ -3721,8 +3824,7 @@ enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
return VCHIQ_RETRY;
- return queue_message(state, NULL,
- VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
+ return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE,
NULL, NULL, 0, 0);
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index b817097651fa..957fea1f574f 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -64,74 +64,20 @@
#define vchiq_loud_error(...) \
vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
-#ifndef vchiq_static_assert
-#define vchiq_static_assert(cond) __attribute__((unused)) \
- extern int vchiq_static_assert[(cond) ? 1 : -1]
-#endif
-
-#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
-
-/* Ensure that the slot size and maximum number of slots are powers of 2 */
-vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
-vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
-vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
-
#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
#define VCHIQ_SLOT_ZERO_SLOTS DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
VCHIQ_SLOT_SIZE)
-#define VCHIQ_MSG_PADDING 0 /* - */
-#define VCHIQ_MSG_CONNECT 1 /* - */
-#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
-#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
-#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
-#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
-#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
-#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
-#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
-#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
-#define VCHIQ_MSG_PAUSE 10 /* - */
-#define VCHIQ_MSG_RESUME 11 /* - */
-#define VCHIQ_MSG_REMOTE_USE 12 /* - */
-#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
-#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
-
-#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
-#define VCHIQ_PORT_FREE 0x1000
-#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
-#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
- ((type<<24) | (srcport<<12) | (dstport<<0))
-#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
-#define VCHIQ_MSG_SRCPORT(msgid) \
- (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
-#define VCHIQ_MSG_DSTPORT(msgid) \
- ((unsigned short)msgid & 0xfff)
-
#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
((fourcc) >> 24) & 0xff, \
((fourcc) >> 16) & 0xff, \
((fourcc) >> 8) & 0xff, \
(fourcc) & 0xff
-/* Ensure the fields are wide enough */
-vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
- == 0);
-vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
-vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
- (unsigned int)VCHIQ_PORT_FREE);
-
-#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
-#define VCHIQ_MSGID_CLAIMED 0x40000000
-
-#define VCHIQ_FOURCC_INVALID 0x00000000
-#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
-
-#define VCHIQ_BULK_ACTUAL_ABORTED -1
-
typedef uint32_t BITSET_T;
-vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
+static_assert((sizeof(BITSET_T) * 8) == 32);
#define BITSET_SIZE(b) ((b + 31) >> 5)
#define BITSET_WORD(b) (b >> 5)
@@ -140,17 +86,6 @@ vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
-#if VCHIQ_ENABLE_STATS
-#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
-#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
-#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
- (service->stats. stat += addend)
-#else
-#define VCHIQ_STATS_INC(state, stat) ((void)0)
-#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
-#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
-#endif
-
enum {
DEBUG_ENTRIES,
#if VCHIQ_ENABLE_DEBUG
@@ -212,14 +147,6 @@ enum {
VCHIQ_SRVSTATE_CLOSED
};
-enum {
- VCHIQ_POLL_TERMINATE,
- VCHIQ_POLL_REMOVE,
- VCHIQ_POLL_TXNOTIFY,
- VCHIQ_POLL_RXNOTIFY,
- VCHIQ_POLL_COUNT
-};
-
enum vchiq_bulk_dir {
VCHIQ_BULK_TRANSMIT,
VCHIQ_BULK_RECEIVE
@@ -539,7 +466,7 @@ get_conn_state_name(enum vchiq_connstate conn_state);
extern struct vchiq_slot_zero *
vchiq_init_slots(void *mem_base, int mem_size);
-extern enum vchiq_status
+extern int
vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero);
extern enum vchiq_status
@@ -563,7 +490,7 @@ vchiq_terminate_service_internal(struct vchiq_service *service);
extern void
vchiq_free_service_internal(struct vchiq_service *service);
-extern enum vchiq_status
+extern void
vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance);
extern void
@@ -627,10 +554,10 @@ next_service_by_instance(struct vchiq_state *state,
int *pidx);
extern void
-lock_service(struct vchiq_service *service);
+vchiq_service_get(struct vchiq_service *service);
extern void
-unlock_service(struct vchiq_service *service);
+vchiq_service_put(struct vchiq_service *service);
extern enum vchiq_status
vchiq_queue_message(unsigned int handle,
@@ -644,7 +571,7 @@ vchiq_queue_message(unsigned int handle,
* implementations must be provided.
*/
-extern enum vchiq_status
+extern int
vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
void __user *uoffset, int size, int dir);
@@ -667,10 +594,10 @@ extern int
vchiq_dump_platform_service_state(void *dump_context,
struct vchiq_service *service);
-extern enum vchiq_status
+extern int
vchiq_use_service_internal(struct vchiq_service *service);
-extern enum vchiq_status
+extern int
vchiq_release_service_internal(struct vchiq_service *service);
extern void
@@ -679,7 +606,7 @@ vchiq_on_remote_use(struct vchiq_state *state);
extern void
vchiq_on_remote_release(struct vchiq_state *state);
-extern enum vchiq_status
+extern int
vchiq_platform_init_state(struct vchiq_state *state);
extern enum vchiq_status
@@ -712,7 +639,7 @@ extern int vchiq_get_client_id(unsigned int service);
extern void vchiq_get_config(struct vchiq_config *config);
-extern enum vchiq_status
+extern int
vchiq_set_service_option(unsigned int service, enum vchiq_service_option option,
int value);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index a39757b4e759..8f3d9cb2d562 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -11,11 +11,6 @@
#ifdef CONFIG_DEBUG_FS
-/****************************************************************************
- *
- * log category entries
- *
- ***************************************************************************/
#define DEBUGFS_WRITE_BUF_SIZE 256
#define VCHIQ_LOG_ERROR_STR "error"
@@ -40,6 +35,7 @@ static struct vchiq_debugfs_log_entry vchiq_debugfs_log_entries[] = {
{ "susp", &vchiq_susp_log_level },
{ "arm", &vchiq_arm_log_level },
};
+
static int n_log_entries = ARRAY_SIZE(vchiq_debugfs_log_entries);
static int debugfs_log_show(struct seq_file *f, void *offset)
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
index 06bca7be5203..76d3f0399964 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
@@ -1862,7 +1862,7 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
int status;
int err = -ENODEV;
struct vchiq_mmal_instance *instance;
- static struct vchiq_instance *vchiq_instance;
+ struct vchiq_instance *vchiq_instance;
struct vchiq_service_params_kernel params = {
.version = VC_MMAL_VER,
.version_min = VC_MMAL_MIN_VER,
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index e086ec6e77f7..8608693ae9c3 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -42,7 +42,7 @@
#define PCAvDelayByIO(uDelayUnit) \
do { \
- unsigned char byData; \
+ unsigned char __maybe_unused byData; \
unsigned long ii; \
\
if (uDelayUnit <= 50) { \
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 759e475e303c..7951bd63816f 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -276,7 +276,7 @@ static int prism2_scan(struct wiphy *wiphy,
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
struct wlandevice *wlandev;
struct p80211msg_dot11req_scan msg1;
- struct p80211msg_dot11req_scan_results msg2;
+ struct p80211msg_dot11req_scan_results *msg2;
struct cfg80211_bss *bss;
struct cfg80211_scan_info info = {};
@@ -301,6 +301,10 @@ static int prism2_scan(struct wiphy *wiphy,
return -EOPNOTSUPP;
}
+ msg2 = kzalloc(sizeof(*msg2), GFP_KERNEL);
+ if (!msg2)
+ return -ENOMEM;
+
priv->scan_request = request;
memset(&msg1, 0x00, sizeof(msg1));
@@ -342,31 +346,30 @@ static int prism2_scan(struct wiphy *wiphy,
for (i = 0; i < numbss; i++) {
int freq;
- memset(&msg2, 0, sizeof(msg2));
- msg2.msgcode = DIDMSG_DOT11REQ_SCAN_RESULTS;
- msg2.bssindex.data = i;
+ msg2->msgcode = DIDMSG_DOT11REQ_SCAN_RESULTS;
+ msg2->bssindex.data = i;
result = p80211req_dorequest(wlandev, (u8 *)&msg2);
if ((result != 0) ||
- (msg2.resultcode.data != P80211ENUM_resultcode_success)) {
+ (msg2->resultcode.data != P80211ENUM_resultcode_success)) {
break;
}
ie_buf[0] = WLAN_EID_SSID;
- ie_buf[1] = msg2.ssid.data.len;
+ ie_buf[1] = msg2->ssid.data.len;
ie_len = ie_buf[1] + 2;
- memcpy(&ie_buf[2], &msg2.ssid.data.data, msg2.ssid.data.len);
- freq = ieee80211_channel_to_frequency(msg2.dschannel.data,
+ memcpy(&ie_buf[2], &msg2->ssid.data.data, msg2->ssid.data.len);
+ freq = ieee80211_channel_to_frequency(msg2->dschannel.data,
NL80211_BAND_2GHZ);
bss = cfg80211_inform_bss(wiphy,
ieee80211_get_channel(wiphy, freq),
CFG80211_BSS_FTYPE_UNKNOWN,
- (const u8 *)&msg2.bssid.data.data,
- msg2.timestamp.data, msg2.capinfo.data,
- msg2.beaconperiod.data,
+ (const u8 *)&msg2->bssid.data.data,
+ msg2->timestamp.data, msg2->capinfo.data,
+ msg2->beaconperiod.data,
ie_buf,
ie_len,
- (msg2.signal.data - 65536) * 100, /* Conversion to signed type */
+ (msg2->signal.data - 65536) * 100, /* Conversion to signed type */
GFP_KERNEL);
if (!bss) {
@@ -378,12 +381,13 @@ static int prism2_scan(struct wiphy *wiphy,
}
if (result)
- err = prism2_result2err(msg2.resultcode.data);
+ err = prism2_result2err(msg2->resultcode.data);
exit:
info.aborted = !!(err);
cfg80211_scan_done(request, &info);
priv->scan_request = NULL;
+ kfree(msg2);
return err;
}
diff --git a/drivers/staging/wlan-ng/p80211ioctl.h b/drivers/staging/wlan-ng/p80211ioctl.h
index ed65ac57adbe..77e8d2913b76 100644
--- a/drivers/staging/wlan-ng/p80211ioctl.h
+++ b/drivers/staging/wlan-ng/p80211ioctl.h
@@ -81,7 +81,7 @@
struct p80211ioctl_req {
char name[WLAN_DEVNAMELEN_MAX];
- caddr_t data;
+ char __user *data;
u32 magic;
u16 len;
u32 result;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 70570e8a5ad2..6f470e7ba647 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -325,7 +325,7 @@ static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
int result = 0;
- int txresult = -1;
+ int txresult;
struct wlandevice *wlandev = netdev->ml_priv;
union p80211_hdr p80211_hdr;
struct p80211_metawep p80211_wep;
@@ -569,7 +569,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev,
goto bail;
}
- msgbuf = memdup_user((void __user *)req->data, req->len);
+ msgbuf = memdup_user(req->data, req->len);
if (IS_ERR(msgbuf)) {
result = PTR_ERR(msgbuf);
goto bail;
@@ -579,7 +579,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev,
if (result == 0) {
if (copy_to_user
- ((void __user *)req->data, msgbuf, req->len)) {
+ (req->data, msgbuf, req->len)) {
result = -EFAULT;
}
}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index af35251232eb..b044999ad002 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -265,12 +265,13 @@ void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
if (ccmd->release) {
- struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
-
- if (ttinfo->sgl) {
+ if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+ put_page(sg_page(&ccmd->sg));
+ } else {
struct cxgbit_sock *csk = conn->context;
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+ struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
/* Abort the TCP conn if DDP is not complete to
* avoid any possibility of DDP after freeing
@@ -280,14 +281,14 @@ void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
cmd->se_cmd.data_length))
cxgbit_abort_conn(csk);
+ if (unlikely(ttinfo->sgl)) {
+ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
+ ttinfo->nents, DMA_FROM_DEVICE);
+ ttinfo->nents = 0;
+ ttinfo->sgl = NULL;
+ }
cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
-
- dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
- ttinfo->nents, DMA_FROM_DEVICE);
- } else {
- put_page(sg_page(&ccmd->sg));
}
-
ccmd->release = false;
}
}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index b926e1d6c7b8..282297ffc404 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -997,17 +997,18 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
struct scatterlist *sg_start;
struct iscsi_conn *conn = csk->conn;
struct iscsi_cmd *cmd = NULL;
+ struct cxgbit_cmd *ccmd;
+ struct cxgbi_task_tag_info *ttinfo;
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
u32 data_offset = be32_to_cpu(hdr->offset);
- u32 data_len = pdu_cb->dlen;
+ u32 data_len = ntoh24(hdr->dlength);
int rc, sg_nents, sg_off;
bool dcrc_err = false;
if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
u32 offset = be32_to_cpu(hdr->offset);
u32 ddp_data_len;
- u32 payload_length = ntoh24(hdr->dlength);
bool success = false;
cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
@@ -1022,7 +1023,7 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
cmd->data_sn = be32_to_cpu(hdr->datasn);
rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
- cmd, payload_length, &success);
+ cmd, data_len, &success);
if (rc < 0)
return rc;
else if (!success)
@@ -1060,6 +1061,20 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
}
+ ccmd = iscsit_priv_cmd(cmd);
+ ttinfo = &ccmd->ttinfo;
+
+ if (ccmd->release && ttinfo->sgl &&
+ (cmd->se_cmd.data_length == (cmd->write_data_done + data_len))) {
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+
+ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
+ DMA_FROM_DEVICE);
+ ttinfo->nents = 0;
+ ttinfo->sgl = NULL;
+ }
+
check_payload:
rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index cd670cb9b8fb..0dd52f484fec 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -871,8 +871,6 @@ int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
if (iscsit_execute_cmd(cmd, 1) < 0)
return -1;
-
- continue;
}
return ooo_count;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 151e2949bb75..c0ed6f8e5c5b 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -118,13 +118,6 @@ static u32 iscsi_handle_authentication(
" CHAP auth\n");
return -1;
}
- iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
- se_node_acl);
- if (!iscsi_nacl) {
- pr_err("Unable to locate struct iscsi_node_acl for"
- " CHAP auth\n");
- return -1;
- }
if (se_nacl->dynamic_node_acl) {
iscsi_tpg = container_of(se_nacl->se_tpg,
@@ -1082,14 +1075,12 @@ int iscsi_target_locate_portal(
login_req = (struct iscsi_login_req *) login->req;
payload_length = ntoh24(login_req->dlength);
- tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
+ tmpbuf = kmemdup_nul(login->req_buf, payload_length, GFP_KERNEL);
if (!tmpbuf) {
pr_err("Unable to allocate memory for tmpbuf.\n");
return -1;
}
- memcpy(tmpbuf, login->req_buf, payload_length);
- tmpbuf[payload_length] = '\0';
start = tmpbuf;
end = (start + payload_length);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 7a461fbb1566..6bc3aaf655fc 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1357,14 +1357,12 @@ int iscsi_decode_text_input(
struct iscsi_param_list *param_list = conn->param_list;
char *tmpbuf, *start = NULL, *end = NULL;
- tmpbuf = kzalloc(length + 1, GFP_KERNEL);
+ tmpbuf = kmemdup_nul(textbuf, length, GFP_KERNEL);
if (!tmpbuf) {
pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
return -ENOMEM;
}
- memcpy(tmpbuf, textbuf, length);
- tmpbuf[length] = '\0';
start = tmpbuf;
end = (start + length);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 2687fd7d45db..6d0b0e67e79e 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -566,7 +566,6 @@ static int tcm_loop_queue_data_or_status(const char *func,
memcpy(sc->sense_buffer, se_cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
sc->result = SAM_STAT_CHECK_CONDITION;
- set_driver_byte(sc, DRIVER_SENSE);
} else
sc->result = scsi_status;
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index ce84f93c183a..4d3ceee23622 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1261,7 +1261,6 @@ static int sbp_rw_data(struct sbp_target_request *req)
pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
if (pg_size) {
pr_err("sbp_run_transaction: page size ignored\n");
- pg_size = 0x100 << pg_size;
}
spin_lock_bh(&sess->lock);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 5517c7dd5144..3bb921345bce 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -123,7 +123,7 @@ target_emulate_report_referrals(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd);
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -255,7 +255,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
}
transport_kunmap_data_sg(cmd);
- target_complete_cmd_with_length(cmd, GOOD, rd_len + 4);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, rd_len + 4);
return 0;
}
@@ -424,7 +424,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
out:
transport_kunmap_data_sg(cmd);
if (!rc)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return rc;
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 4b2e49341ad6..102ec644bc8a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1479,6 +1479,54 @@ static ssize_t target_wwn_revision_store(struct config_item *item,
return count;
}
+static ssize_t
+target_wwn_company_id_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%#08x\n",
+ to_t10_wwn(item)->company_id);
+}
+
+static ssize_t
+target_wwn_company_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct t10_wwn *t10_wwn = to_t10_wwn(item);
+ struct se_device *dev = t10_wwn->t10_dev;
+ u32 val;
+ int ret;
+
+ /*
+ * The IEEE COMPANY_ID field should contain a 24-bit canonical
+ * form OUI assigned by the IEEE.
+ */
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val >= 0x1000000)
+ return -EOVERFLOW;
+
+ /*
+ * Check to see if any active exports exist. If they do exist, fail
+ * here as changing this information on the fly (underneath the
+ * initiator side OS dependent multipath code) could cause negative
+ * effects.
+ */
+ if (dev->export_count) {
+ pr_err("Unable to set Company ID while %u exports exist\n",
+ dev->export_count);
+ return -EINVAL;
+ }
+
+ t10_wwn->company_id = val;
+
+ pr_debug("Target_Core_ConfigFS: Set IEEE Company ID: %#08x\n",
+ t10_wwn->company_id);
+
+ return count;
+}
+
/*
* VPD page 0x80 Unit serial
*/
@@ -1625,6 +1673,7 @@ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
CONFIGFS_ATTR(target_wwn_, vendor_id);
CONFIGFS_ATTR(target_wwn_, product_id);
CONFIGFS_ATTR(target_wwn_, revision);
+CONFIGFS_ATTR(target_wwn_, company_id);
CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
@@ -1635,6 +1684,7 @@ static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
&target_wwn_attr_vendor_id,
&target_wwn_attr_product_id,
&target_wwn_attr_revision,
+ &target_wwn_attr_company_id,
&target_wwn_attr_vpd_unit_serial,
&target_wwn_attr_vpd_protocol_identifier,
&target_wwn_attr_vpd_assoc_logical_unit,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index a8df9f0a82fa..8cb1fa0c0585 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -773,6 +773,11 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->t10_alua.lba_map_lock);
dev->t10_wwn.t10_dev = dev;
+ /*
+ * Use OpenFabrics IEEE Company ID: 00 14 05
+ */
+ dev->t10_wwn.company_id = 0x001405;
+
dev->t10_alua.t10_dev = dev;
dev->dev_attrib.da_dev = dev;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index a526f9678c34..44d9d028f716 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -474,7 +474,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
if (ret)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 6fd5fec95539..4b94b085625b 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -234,7 +234,7 @@ target_scsi2_reservation_release(struct se_cmd *cmd)
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -297,7 +297,7 @@ out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
@@ -3676,7 +3676,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
}
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
@@ -4073,7 +4073,7 @@ target_scsi3_emulate_pr_in(struct se_cmd *cmd)
}
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index a31c93e4e19c..b793c99637ab 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -52,7 +52,7 @@
/*
* Function defined in target_core_spc.c
*/
-void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
+void spc_gen_naa_6h_vendor_specific(struct se_device *, unsigned char *);
extern struct kmem_cache *t10_pr_reg_cache;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index f2a11414366d..2629d2ef3970 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -982,7 +982,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req = blk_get_request(pdv->pdv_sd->request_queue,
cmd->data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed\n");
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1044,7 +1044,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
struct se_cmd *cmd = req->end_io_data;
struct pscsi_plugin_task *pt = cmd->priv;
int result = scsi_req(req)->result;
- u8 scsi_status = status_byte(result) << 1;
+ enum sam_status scsi_status = result & 0xff;
if (scsi_status != SAM_STAT_GOOD) {
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 7b07e557dc8d..ca1b2312d6e7 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -25,7 +25,7 @@
#include "target_core_alua.h"
static sense_reason_t
-sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
+sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool);
static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
static sense_reason_t
@@ -67,7 +67,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd_with_length(cmd, GOOD, 8);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8);
return 0;
}
@@ -130,7 +130,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd_with_length(cmd, GOOD, 32);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 32);
return 0;
}
@@ -202,14 +202,14 @@ sbc_execute_write_same_unmap(struct se_cmd *cmd)
return ret;
}
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
static sense_reason_t
sbc_emulate_noop(struct se_cmd *cmd)
{
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -279,14 +279,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
}
static sense_reason_t
-sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
+sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops)
{
struct se_device *dev = cmd->se_dev;
sector_t end_lba = dev->transport->get_blocks(dev) + 1;
unsigned int sectors = sbc_get_write_same_sectors(cmd);
sense_reason_t ret;
- if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+ if ((flags & 0x04) || (flags & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
@@ -308,7 +308,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
}
/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
- if (flags[0] & 0x10) {
+ if (flags & 0x10) {
pr_warn("WRITE SAME with ANCHOR not supported\n");
return TCM_INVALID_CDB_FIELD;
}
@@ -316,7 +316,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
* Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
* translated into block discard requests within backend code.
*/
- if (flags[0] & 0x08) {
+ if (flags & 0x08) {
if (!ops->execute_unmap)
return TCM_UNSUPPORTED_SCSI_OPCODE;
@@ -331,7 +331,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
if (!ops->execute_write_same)
return TCM_UNSUPPORTED_SCSI_OPCODE;
- ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
+ ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true);
if (ret)
return ret;
@@ -717,10 +717,9 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_
}
static sense_reason_t
-sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
+sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect,
u32 sectors, bool is_write)
{
- u8 protect = cdb[1] >> 5;
int sp_ops = cmd->se_sess->sup_prot_ops;
int pi_prot_type = dev->dev_attrib.pi_prot_type;
bool fabric_prot = false;
@@ -768,7 +767,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
fallthrough;
default:
pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
- "PROTECT: 0x%02x\n", cdb[0], protect);
+ "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect);
return TCM_INVALID_CDB_FIELD;
}
@@ -843,7 +842,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
if (ret)
return ret;
@@ -857,7 +856,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
if (ret)
return ret;
@@ -871,7 +870,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
if (ret)
return ret;
@@ -892,7 +891,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
if (ret)
return ret;
@@ -906,7 +905,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
if (ret)
return ret;
@@ -921,7 +920,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
if (ret)
return ret;
@@ -980,7 +979,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
- ret = sbc_setup_write_same(cmd, &cdb[10], ops);
+ ret = sbc_setup_write_same(cmd, cdb[10], ops);
if (ret)
return ret;
break;
@@ -1079,7 +1078,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
- ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+ ret = sbc_setup_write_same(cmd, cdb[1], ops);
if (ret)
return ret;
break;
@@ -1097,7 +1096,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
* Follow sbcr26 with WRITE_SAME (10) and check for the existence
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
- ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+ ret = sbc_setup_write_same(cmd, cdb[1], ops);
if (ret)
return ret;
break;
@@ -1245,7 +1244,7 @@ sbc_execute_unmap(struct se_cmd *cmd)
err:
transport_kunmap_data_sg(cmd);
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 70a661801cb9..22703a0dbd07 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -47,10 +47,32 @@ static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
spin_unlock(&lun->lun_tg_pt_gp_lock);
}
+static u16
+spc_find_scsi_transport_vd(int proto_id)
+{
+ switch (proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ return SCSI_VERSION_DESCRIPTOR_FCP4;
+ case SCSI_PROTOCOL_ISCSI:
+ return SCSI_VERSION_DESCRIPTOR_ISCSI;
+ case SCSI_PROTOCOL_SAS:
+ return SCSI_VERSION_DESCRIPTOR_SAS3;
+ case SCSI_PROTOCOL_SBP:
+ return SCSI_VERSION_DESCRIPTOR_SBP3;
+ case SCSI_PROTOCOL_SRP:
+ return SCSI_VERSION_DESCRIPTOR_SRP;
+ default:
+ pr_warn("Cannot find VERSION DESCRIPTOR value for unknown SCSI"
+ " transport PROTOCOL IDENTIFIER %#x\n", proto_id);
+ return 0;
+ }
+}
+
sense_reason_t
spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
{
struct se_lun *lun = cmd->se_lun;
+ struct se_portal_group *tpg = lun->lun_tpg;
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
@@ -58,7 +80,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
buf[1] = 0x80;
- buf[2] = 0x05; /* SPC-3 */
+ buf[2] = 0x06; /* SPC-4 */
/*
* NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
@@ -108,7 +130,17 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN));
memcpy(&buf[32], dev->t10_wwn.revision,
strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN));
- buf[4] = 31; /* Set additional length to 31 */
+
+ /*
+ * Set the VERSION DESCRIPTOR fields
+ */
+ put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SAM5, &buf[58]);
+ put_unaligned_be16(spc_find_scsi_transport_vd(tpg->proto_id), &buf[60]);
+ put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SPC4, &buf[62]);
+ if (cmd->se_dev->transport->get_device_type(dev) == TYPE_DISK)
+ put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SBC3, &buf[64]);
+
+ buf[4] = 91; /* Set additional length to 91 */
return 0;
}
@@ -129,14 +161,29 @@ spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
-void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
- unsigned char *buf)
+/*
+ * Generate NAA IEEE Registered Extended designator
+ */
+void spc_gen_naa_6h_vendor_specific(struct se_device *dev,
+ unsigned char *buf)
{
unsigned char *p = &dev->t10_wwn.unit_serial[0];
- int cnt;
+ u32 company_id = dev->t10_wwn.company_id;
+ int cnt, off = 0;
bool next = true;
/*
+ * Start NAA IEEE Registered Extended Identifier/Designator
+ */
+ buf[off] = 0x6 << 4;
+
+ /* IEEE COMPANY_ID */
+ buf[off++] |= (company_id >> 20) & 0xf;
+ buf[off++] = (company_id >> 12) & 0xff;
+ buf[off++] = (company_id >> 4) & 0xff;
+ buf[off] = (company_id & 0xf) << 4;
+
+ /*
* Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
* byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
* format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
@@ -144,7 +191,7 @@ void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
* NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
* per device uniqeness.
*/
- for (cnt = 0; *p && cnt < 13; p++) {
+ for (cnt = off + 13; *p && off < cnt; p++) {
int val = hex_to_bin(*p);
if (val < 0)
@@ -152,10 +199,10 @@ void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
if (next) {
next = false;
- buf[cnt++] |= val;
+ buf[off++] |= val;
} else {
next = true;
- buf[cnt] = val << 4;
+ buf[off] = val << 4;
}
}
}
@@ -203,24 +250,8 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
/* Identifier/Designator length */
buf[off++] = 0x10;
- /*
- * Start NAA IEEE Registered Extended Identifier/Designator
- */
- buf[off++] = (0x6 << 4);
-
- /*
- * Use OpenFabrics IEEE Company ID: 00 14 05
- */
- buf[off++] = 0x01;
- buf[off++] = 0x40;
- buf[off] = (0x5 << 4);
-
- /*
- * Return ConfigFS Unit Serial Number information for
- * VENDOR_SPECIFIC_IDENTIFIER and
- * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
- */
- spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
+ /* NAA IEEE Registered Extended designator */
+ spc_gen_naa_6h_vendor_specific(dev, &buf[off]);
len = 20;
off = (len + 4);
@@ -750,7 +781,7 @@ out:
kfree(buf);
if (!ret)
- target_complete_cmd_with_length(cmd, GOOD, len);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, len);
return ret;
}
@@ -1104,7 +1135,7 @@ set_length:
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd_with_length(cmd, GOOD, length);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, length);
return 0;
}
@@ -1122,7 +1153,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
int i;
if (!cmd->data_length) {
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -1165,7 +1196,7 @@ out:
transport_kunmap_data_sg(cmd);
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
@@ -1198,7 +1229,7 @@ static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -1265,7 +1296,7 @@ done:
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8 + lun_count * 8);
return 0;
}
EXPORT_SYMBOL(spc_emulate_report_luns);
@@ -1273,7 +1304,7 @@ EXPORT_SYMBOL(spc_emulate_report_luns);
static sense_reason_t
spc_emulate_testunitready(struct se_cmd *cmd)
{
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7e35eddd9eb7..26ceabe34de5 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -886,7 +886,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
INIT_WORK(&cmd->work, success ? target_complete_ok_work :
target_complete_failure_work);
- if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
+ if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
cpu = cmd->cpuid;
else
cpu = wwn->cmd_compl_affinity;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 4bba10e7755a..fbb6ffaddfbe 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -121,7 +121,7 @@ struct tcmu_dev {
#define TCMU_DEV_BIT_BROKEN 1
#define TCMU_DEV_BIT_BLOCKED 2
#define TCMU_DEV_BIT_TMR_NOTIFY 3
-#define TCM_DEV_BIT_PLUGGED 4
+#define TCMU_DEV_BIT_PLUGGED 4
unsigned long flags;
struct uio_info uio_info;
@@ -982,7 +982,7 @@ static void tcmu_unplug_device(struct se_dev_plug *se_plug)
struct se_device *se_dev = se_plug->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
- clear_bit(TCM_DEV_BIT_PLUGGED, &udev->flags);
+ clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags);
uio_event_notify(&udev->uio_info);
}
@@ -990,7 +990,7 @@ static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
{
struct tcmu_dev *udev = TCMU_DEV(se_dev);
- if (!test_and_set_bit(TCM_DEV_BIT_PLUGGED, &udev->flags))
+ if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
return &udev->se_plug;
return NULL;
@@ -1124,7 +1124,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
- if (!test_bit(TCM_DEV_BIT_PLUGGED, &udev->flags))
+ if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
uio_event_notify(&udev->uio_info);
return 0;
@@ -1423,7 +1423,7 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev)
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
pr_err("ring broken, not handling completions\n");
- return 0;
+ return false;
}
mb = udev->mb_addr;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index d31ed071cb08..0f1319336f3e 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -33,19 +33,6 @@ static struct workqueue_struct *xcopy_wq = NULL;
static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop);
-static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
-{
- int off = 0;
-
- buf[off++] = (0x6 << 4);
- buf[off++] = 0x01;
- buf[off++] = 0x40;
- buf[off] = (0x5 << 4);
-
- spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
- return 0;
-}
-
/**
* target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
*
@@ -65,7 +52,7 @@ static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
}
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
- target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
+ spc_gen_naa_6h_vendor_specific(se_dev, &tmp_dev_wwn[0]);
rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
if (rc != 0) {
@@ -241,7 +228,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
* se_device the XCOPY was received upon..
*/
memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
- target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
+ spc_gen_naa_6h_vendor_specific(local_dev, &xop->local_dev_wwn[0]);
while (start < tdll) {
/*
@@ -1011,7 +998,7 @@ static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
put_unaligned_be32(42, &p[0]);
transport_kunmap_data_sg(se_cmd);
- target_complete_cmd(se_cmd, GOOD);
+ target_complete_cmd(se_cmd, SAM_STAT_GOOD);
return TCM_NO_SENSE;
}
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 6e6eb836e9b6..945f03da0223 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -184,7 +184,7 @@ static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
struct optee_msg_arg *ma;
shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
- TEE_SHM_MAPPED);
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
if (IS_ERR(shm))
return shm;
@@ -416,11 +416,13 @@ void optee_enable_shm_cache(struct optee *optee)
}
/**
- * optee_disable_shm_cache() - Disables caching of some shared memory allocation
- * in OP-TEE
+ * __optee_disable_shm_cache() - Disables caching of some shared memory
+ * allocation in OP-TEE
* @optee: main service struct
+ * @is_mapped: true if the cached shared memory addresses were mapped by this
+ * kernel, are safe to dereference, and should be freed
*/
-void optee_disable_shm_cache(struct optee *optee)
+static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
{
struct optee_call_waiter w;
@@ -439,6 +441,13 @@ void optee_disable_shm_cache(struct optee *optee)
if (res.result.status == OPTEE_SMC_RETURN_OK) {
struct tee_shm *shm;
+ /*
+ * Shared memory references that were not mapped by
+ * this kernel must be ignored to prevent a crash.
+ */
+ if (!is_mapped)
+ continue;
+
shm = reg_pair_to_ptr(res.result.shm_upper32,
res.result.shm_lower32);
tee_shm_free(shm);
@@ -449,6 +458,27 @@ void optee_disable_shm_cache(struct optee *optee)
optee_cq_wait_final(&optee->call_queue, &w);
}
+/**
+ * optee_disable_shm_cache() - Disables caching of mapped shared memory
+ * allocations in OP-TEE
+ * @optee: main service struct
+ */
+void optee_disable_shm_cache(struct optee *optee)
+{
+ return __optee_disable_shm_cache(optee, true);
+}
+
+/**
+ * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
+ * allocations in OP-TEE which are not
+ * currently mapped
+ * @optee: main service struct
+ */
+void optee_disable_unmapped_shm_cache(struct optee *optee)
+{
+ return __optee_disable_shm_cache(optee, false);
+}
+
#define PAGELIST_ENTRIES_PER_PAGE \
((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index ddb8f9ecf307..5ce13b099d7d 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/arm-smccc.h>
+#include <linux/crash_dump.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -277,7 +278,8 @@ static void optee_release(struct tee_context *ctx)
if (!ctxdata)
return;
- shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
+ shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg),
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
if (!IS_ERR(shm)) {
arg = tee_shm_get_va(shm, 0);
/*
@@ -572,6 +574,13 @@ static optee_invoke_fn *get_invoke_func(struct device *dev)
return ERR_PTR(-EINVAL);
}
+/* optee_remove - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * optee_remove is called by platform subsystem to alert the driver
+ * that it should release the device
+ */
+
static int optee_remove(struct platform_device *pdev)
{
struct optee *optee = platform_get_drvdata(pdev);
@@ -602,6 +611,18 @@ static int optee_remove(struct platform_device *pdev)
return 0;
}
+/* optee_shutdown - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * platform_shutdown is called by the platform subsystem to alert
+ * the driver that a shutdown, reboot, or kexec is happening and
+ * device must be disabled.
+ */
+static void optee_shutdown(struct platform_device *pdev)
+{
+ optee_disable_shm_cache(platform_get_drvdata(pdev));
+}
+
static int optee_probe(struct platform_device *pdev)
{
optee_invoke_fn *invoke_fn;
@@ -612,6 +633,16 @@ static int optee_probe(struct platform_device *pdev)
u32 sec_caps;
int rc;
+ /*
+ * The kernel may have crashed at the same time that all available
+ * secure world threads were suspended and we cannot reschedule the
+ * suspended threads without access to the crashed kernel's wait_queue.
+ * Therefore, we cannot reliably initialize the OP-TEE driver in the
+ * kdump kernel.
+ */
+ if (is_kdump_kernel())
+ return -ENODEV;
+
invoke_fn = get_invoke_func(&pdev->dev);
if (IS_ERR(invoke_fn))
return PTR_ERR(invoke_fn);
@@ -686,6 +717,15 @@ static int optee_probe(struct platform_device *pdev)
optee->memremaped_shm = memremaped_shm;
optee->pool = pool;
+ /*
+ * Ensure that there are no pre-existing shm objects before enabling
+ * the shm cache so that there's no chance of receiving an invalid
+ * address during shutdown. This could occur, for example, if we're
+ * kexec booting from an older kernel that did not properly cleanup the
+ * shm cache.
+ */
+ optee_disable_unmapped_shm_cache(optee);
+
optee_enable_shm_cache(optee);
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
@@ -728,6 +768,7 @@ MODULE_DEVICE_TABLE(of, optee_dt_match);
static struct platform_driver optee_driver = {
.probe = optee_probe,
.remove = optee_remove,
+ .shutdown = optee_shutdown,
.driver = {
.name = "optee",
.of_match_table = optee_dt_match,
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index e25b216a14ef..dbdd367be156 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -159,6 +159,7 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
void optee_enable_shm_cache(struct optee *optee);
void optee_disable_shm_cache(struct optee *optee);
+void optee_disable_unmapped_shm_cache(struct optee *optee);
int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
struct page **pages, size_t num_pages,
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index 1849180b0278..efbaff7ad7e5 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -314,7 +314,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
shm = cmd_alloc_suppl(ctx, sz);
break;
case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
+ shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
break;
default:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
@@ -502,7 +502,8 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
case OPTEE_SMC_RPC_FUNC_ALLOC:
- shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
+ shm = tee_shm_alloc(ctx, param->a1,
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
reg_pair_from_64(&param->a1, &param->a2, pa);
reg_pair_from_64(&param->a4, &param->a5,
diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
index d767eebf30bd..c41a9a501a6e 100644
--- a/drivers/tee/optee/shm_pool.c
+++ b/drivers/tee/optee/shm_pool.c
@@ -27,13 +27,19 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
shm->paddr = page_to_phys(page);
shm->size = PAGE_SIZE << order;
- if (shm->flags & TEE_SHM_DMA_BUF) {
+ /*
+ * Shared memory private to the OP-TEE driver doesn't need
+ * to be registered with OP-TEE.
+ */
+ if (!(shm->flags & TEE_SHM_PRIV)) {
unsigned int nr_pages = 1 << order, i;
struct page **pages;
pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ if (!pages) {
+ rc = -ENOMEM;
+ goto err;
+ }
for (i = 0; i < nr_pages; i++) {
pages[i] = page;
@@ -44,15 +50,21 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
(unsigned long)shm->kaddr);
kfree(pages);
+ if (rc)
+ goto err;
}
+ return 0;
+
+err:
+ __free_pages(page, order);
return rc;
}
static void pool_op_free(struct tee_shm_pool_mgr *poolm,
struct tee_shm *shm)
{
- if (shm->flags & TEE_SHM_DMA_BUF)
+ if (!(shm->flags & TEE_SHM_PRIV))
optee_shm_unregister(shm->ctx, shm);
free_pages((unsigned long)shm->kaddr, get_order(shm->size));
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 00472f5ce22e..8a9384a64f3e 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -117,7 +117,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
return ERR_PTR(-EINVAL);
}
- if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
+ if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) {
dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
return ERR_PTR(-EINVAL);
}
@@ -193,6 +193,24 @@ err_dev_put:
}
EXPORT_SYMBOL_GPL(tee_shm_alloc);
+/**
+ * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ *
+ * The returned memory registered in secure world and is suitable to be
+ * passed as a memory buffer in parameter argument to
+ * tee_client_invoke_func(). The memory allocated is later freed with a
+ * call to tee_shm_free().
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
+{
+ return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
+
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
size_t length, u32 flags)
{
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 3a788ac4f525..5a86cffd78f6 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -458,7 +458,7 @@ struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df)
EXPORT_SYMBOL_GPL(devfreq_cooling_register);
/**
- * devfreq_cooling_em_register_power() - Register devfreq cooling device with
+ * devfreq_cooling_em_register() - Register devfreq cooling device with
* power information and automatically register Energy Model (EM)
* @df: Pointer to devfreq device.
* @dfc_power: Pointer to devfreq_cooling_power.
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index b01d28eca7ee..8d76dbfde6a9 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -93,6 +93,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
for_each_available_child_of_node(np, child) {
sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
if (!sensor) {
+ of_node_put(child);
of_node_put(sensor_np);
return -ENOMEM;
}
@@ -104,6 +105,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"failed to get valid sensor resource id: %d\n",
ret);
+ of_node_put(child);
break;
}
@@ -114,6 +116,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
if (IS_ERR(sensor->tzd)) {
dev_err(&pdev->dev, "failed to register thermal zone\n");
ret = PTR_ERR(sensor->tzd);
+ of_node_put(child);
break;
}
diff --git a/drivers/thermal/intel/int340x_thermal/Makefile b/drivers/thermal/intel/int340x_thermal/Makefile
index 38a2731e503c..4e852ce4a5d5 100644
--- a/drivers/thermal/intel/int340x_thermal/Makefile
+++ b/drivers/thermal/intel/int340x_thermal/Makefile
@@ -4,6 +4,9 @@ obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal_zone.o
obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o
+obj-$(CONFIG_INT340X_THERMAL) += int3401_thermal.o
+obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device_pci_legacy.o
+obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device_pci.o
obj-$(CONFIG_PROC_THERMAL_MMIO_RAPL) += processor_thermal_rapl.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_rfim.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_mbox.o
diff --git a/drivers/thermal/intel/int340x_thermal/int3401_thermal.c b/drivers/thermal/intel/int340x_thermal/int3401_thermal.c
new file mode 100644
index 000000000000..acebc8ba94e2
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/int3401_thermal.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * INT3401 processor thermal device
+ * Copyright (c) 2020, Intel Corporation.
+ */
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#include "int340x_thermal_zone.h"
+#include "processor_thermal_device.h"
+
+static const struct acpi_device_id int3401_device_ids[] = {
+ {"INT3401", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3401_device_ids);
+
+static int int3401_add(struct platform_device *pdev)
+{
+ struct proc_thermal_device *proc_priv;
+ int ret;
+
+ proc_priv = devm_kzalloc(&pdev->dev, sizeof(*proc_priv), GFP_KERNEL);
+ if (!proc_priv)
+ return -ENOMEM;
+
+ ret = proc_thermal_add(&pdev->dev, proc_priv);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, proc_priv);
+
+ return ret;
+}
+
+static int int3401_remove(struct platform_device *pdev)
+{
+ proc_thermal_remove(platform_get_drvdata(pdev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int int3401_thermal_resume(struct device *dev)
+{
+ return proc_thermal_resume(dev);
+}
+#else
+#define int3401_thermal_resume NULL
+#endif
+
+static SIMPLE_DEV_PM_OPS(int3401_proc_thermal_pm, NULL, int3401_thermal_resume);
+
+static struct platform_driver int3401_driver = {
+ .probe = int3401_add,
+ .remove = int3401_remove,
+ .driver = {
+ .name = "int3401 thermal",
+ .acpi_match_table = int3401_device_ids,
+ .pm = &int3401_proc_thermal_pm,
+ },
+};
+
+static int __init proc_thermal_init(void)
+{
+ return platform_driver_register(&int3401_driver);
+}
+
+static void __exit proc_thermal_exit(void)
+{
+ platform_driver_unregister(&int3401_driver);
+}
+
+module_init(proc_thermal_init);
+module_exit(proc_thermal_exit);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 9e6f2a895a23..0f0038af2ad4 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -3,34 +3,17 @@
* processor_thermal_device.c
* Copyright (c) 2014, Intel Corporation.
*/
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/acpi.h>
#include <linux/thermal.h>
-#include <linux/cpuhotplug.h>
#include "int340x_thermal_zone.h"
#include "processor_thermal_device.h"
#include "../intel_soc_dts_iosf.h"
#define DRV_NAME "proc_thermal"
-enum proc_thermal_emum_mode_type {
- PROC_THERMAL_NONE,
- PROC_THERMAL_PCI,
- PROC_THERMAL_PLATFORM_DEV
-};
-
-/*
- * We can have only one type of enumeration, PCI or Platform,
- * not both. So we don't need instance specific data.
- */
-static enum proc_thermal_emum_mode_type proc_thermal_emum_mode =
- PROC_THERMAL_NONE;
-
#define POWER_LIMIT_SHOW(index, suffix) \
static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
struct device_attribute *attr, \
@@ -38,11 +21,6 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
{ \
struct proc_thermal_device *proc_dev = dev_get_drvdata(dev); \
\
- if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
- dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
- return 0; \
- } \
- \
return sprintf(buf, "%lu\n",\
(unsigned long)proc_dev->power_limits[index].suffix * 1000); \
}
@@ -100,24 +78,27 @@ static ssize_t tcc_offset_degree_celsius_show(struct device *dev,
if (err)
return err;
- val = (val >> 24) & 0xff;
+ val = (val >> 24) & 0x3f;
return sprintf(buf, "%d\n", (int)val);
}
-static int tcc_offset_update(int tcc)
+static int tcc_offset_update(unsigned int tcc)
{
u64 val;
int err;
- if (!tcc)
+ if (tcc > 63)
return -EINVAL;
err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
if (err)
return err;
- val &= ~GENMASK_ULL(31, 24);
- val |= (tcc & 0xff) << 24;
+ if (val & BIT(31))
+ return -EPERM;
+
+ val &= ~GENMASK_ULL(29, 24);
+ val |= (tcc & 0x3f) << 24;
err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val);
if (err)
@@ -126,14 +107,15 @@ static int tcc_offset_update(int tcc)
return 0;
}
-static int tcc_offset_save;
+static unsigned int tcc_offset_save;
static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
+ unsigned int tcc;
u64 val;
- int tcc, err;
+ int err;
err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
if (err)
@@ -142,7 +124,7 @@ static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
if (!(val & BIT(30)))
return -EACCES;
- if (kstrtoint(buf, 0, &tcc))
+ if (kstrtouint(buf, 0, &tcc))
return -EINVAL;
err = tcc_offset_update(tcc);
@@ -291,11 +273,8 @@ static void proc_thermal_notify(acpi_handle handle, u32 event, void *data)
}
}
-
-static int proc_thermal_add(struct device *dev,
- struct proc_thermal_device **priv)
+int proc_thermal_add(struct device *dev, struct proc_thermal_device *proc_priv)
{
- struct proc_thermal_device *proc_priv;
struct acpi_device *adev;
acpi_status status;
unsigned long long tmp;
@@ -306,13 +285,8 @@ static int proc_thermal_add(struct device *dev,
if (!adev)
return -ENODEV;
- proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
- if (!proc_priv)
- return -ENOMEM;
-
proc_priv->dev = dev;
proc_priv->adev = adev;
- *priv = proc_priv;
ret = proc_thermal_read_ppcc(proc_priv);
if (ret)
@@ -338,15 +312,29 @@ static int proc_thermal_add(struct device *dev,
if (ret)
goto remove_zone;
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_tcc_offset_degree_celsius.attr);
+ if (ret)
+ goto remove_notify;
+
+ ret = sysfs_create_group(&dev->kobj, &power_limit_attribute_group);
+ if (ret) {
+ sysfs_remove_file(&dev->kobj, &dev_attr_tcc_offset_degree_celsius.attr);
+ goto remove_notify;
+ }
+
return 0;
+remove_notify:
+ acpi_remove_notify_handler(adev->handle,
+ ACPI_DEVICE_NOTIFY, proc_thermal_notify);
remove_zone:
int340x_thermal_zone_remove(proc_priv->int340x_zone);
return ret;
}
+EXPORT_SYMBOL_GPL(proc_thermal_add);
-static void proc_thermal_remove(struct proc_thermal_device *proc_priv)
+void proc_thermal_remove(struct proc_thermal_device *proc_priv)
{
acpi_remove_notify_handler(proc_priv->adev->handle,
ACPI_DEVICE_NOTIFY, proc_thermal_notify);
@@ -355,60 +343,24 @@ static void proc_thermal_remove(struct proc_thermal_device *proc_priv)
sysfs_remove_group(&proc_priv->dev->kobj,
&power_limit_attribute_group);
}
+EXPORT_SYMBOL_GPL(proc_thermal_remove);
-static int int3401_add(struct platform_device *pdev)
+int proc_thermal_resume(struct device *dev)
{
- struct proc_thermal_device *proc_priv;
- int ret;
-
- if (proc_thermal_emum_mode == PROC_THERMAL_PCI) {
- dev_err(&pdev->dev, "error: enumerated as PCI dev\n");
- return -ENODEV;
- }
-
- ret = proc_thermal_add(&pdev->dev, &proc_priv);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, proc_priv);
- proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
-
- dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n");
-
- ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_tcc_offset_degree_celsius.attr);
- if (ret)
- return ret;
-
- ret = sysfs_create_group(&pdev->dev.kobj, &power_limit_attribute_group);
- if (ret)
- sysfs_remove_file(&pdev->dev.kobj, &dev_attr_tcc_offset_degree_celsius.attr);
+ struct proc_thermal_device *proc_dev;
- return ret;
-}
+ proc_dev = dev_get_drvdata(dev);
+ proc_thermal_read_ppcc(proc_dev);
-static int int3401_remove(struct platform_device *pdev)
-{
- proc_thermal_remove(platform_get_drvdata(pdev));
+ tcc_offset_update(tcc_offset_save);
return 0;
}
-
-static irqreturn_t proc_thermal_pci_msi_irq(int irq, void *devid)
-{
- struct proc_thermal_device *proc_priv;
- struct pci_dev *pdev = devid;
-
- proc_priv = pci_get_drvdata(pdev);
-
- intel_soc_dts_iosf_interrupt_handler(proc_priv->soc_dts);
-
- return IRQ_HANDLED;
-}
+EXPORT_SYMBOL_GPL(proc_thermal_resume);
#define MCHBAR 0
-static int proc_thermal_set_mmio_base(struct pci_dev *pdev,
- struct proc_thermal_device *proc_priv)
+static int proc_thermal_set_mmio_base(struct pci_dev *pdev, struct proc_thermal_device *proc_priv)
{
int ret;
@@ -423,9 +375,9 @@ static int proc_thermal_set_mmio_base(struct pci_dev *pdev,
return 0;
}
-static int proc_thermal_mmio_add(struct pci_dev *pdev,
- struct proc_thermal_device *proc_priv,
- kernel_ulong_t feature_mask)
+int proc_thermal_mmio_add(struct pci_dev *pdev,
+ struct proc_thermal_device *proc_priv,
+ kernel_ulong_t feature_mask)
{
int ret;
@@ -471,11 +423,10 @@ err_rem_rapl:
return ret;
}
+EXPORT_SYMBOL_GPL(proc_thermal_mmio_add);
-static void proc_thermal_mmio_remove(struct pci_dev *pdev)
+void proc_thermal_mmio_remove(struct pci_dev *pdev, struct proc_thermal_device *proc_priv)
{
- struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev);
-
if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_RAPL)
proc_thermal_rapl_remove();
@@ -486,181 +437,7 @@ static void proc_thermal_mmio_remove(struct pci_dev *pdev)
if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_MBOX)
proc_thermal_mbox_remove(pdev);
}
-
-static int proc_thermal_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- struct proc_thermal_device *proc_priv;
- int ret;
-
- if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) {
- dev_err(&pdev->dev, "error: enumerated as platform dev\n");
- return -ENODEV;
- }
-
- ret = pcim_enable_device(pdev);
- if (ret < 0) {
- dev_err(&pdev->dev, "error: could not enable device\n");
- return ret;
- }
-
- ret = proc_thermal_add(&pdev->dev, &proc_priv);
- if (ret)
- return ret;
-
- pci_set_drvdata(pdev, proc_priv);
- proc_thermal_emum_mode = PROC_THERMAL_PCI;
-
- if (pdev->device == PCI_DEVICE_ID_INTEL_BSW_THERMAL) {
- /*
- * Enumerate additional DTS sensors available via IOSF.
- * But we are not treating as a failure condition, if
- * there are no aux DTSs enabled or fails. This driver
- * already exposes sensors, which can be accessed via
- * ACPI/MSR. So we don't want to fail for auxiliary DTSs.
- */
- proc_priv->soc_dts = intel_soc_dts_iosf_init(
- INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
-
- if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
- ret = pci_enable_msi(pdev);
- if (!ret) {
- ret = request_threaded_irq(pdev->irq, NULL,
- proc_thermal_pci_msi_irq,
- IRQF_ONESHOT, "proc_thermal",
- pdev);
- if (ret) {
- intel_soc_dts_iosf_exit(
- proc_priv->soc_dts);
- pci_disable_msi(pdev);
- proc_priv->soc_dts = NULL;
- }
- }
- } else
- dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
- }
-
- dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n");
-
- ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_tcc_offset_degree_celsius.attr);
- if (ret)
- return ret;
-
- ret = sysfs_create_group(&pdev->dev.kobj, &power_limit_attribute_group);
- if (ret) {
- sysfs_remove_file(&pdev->dev.kobj, &dev_attr_tcc_offset_degree_celsius.attr);
- return ret;
- }
-
- ret = proc_thermal_mmio_add(pdev, proc_priv, id->driver_data);
- if (ret) {
- proc_thermal_remove(proc_priv);
- return ret;
- }
-
- return 0;
-}
-
-static void proc_thermal_pci_remove(struct pci_dev *pdev)
-{
- struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev);
-
- if (proc_priv->soc_dts) {
- intel_soc_dts_iosf_exit(proc_priv->soc_dts);
- if (pdev->irq) {
- free_irq(pdev->irq, pdev);
- pci_disable_msi(pdev);
- }
- }
-
- proc_thermal_mmio_remove(pdev);
- proc_thermal_remove(proc_priv);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int proc_thermal_resume(struct device *dev)
-{
- struct proc_thermal_device *proc_dev;
-
- proc_dev = dev_get_drvdata(dev);
- proc_thermal_read_ppcc(proc_dev);
-
- tcc_offset_update(tcc_offset_save);
-
- return 0;
-}
-#else
-#define proc_thermal_resume NULL
-#endif
-
-static SIMPLE_DEV_PM_OPS(proc_thermal_pm, NULL, proc_thermal_resume);
-
-static const struct pci_device_id proc_thermal_pci_ids[] = {
- { PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
- { PCI_DEVICE_DATA(INTEL, BDW_THERMAL, 0) },
- { PCI_DEVICE_DATA(INTEL, BSW_THERMAL, 0) },
- { PCI_DEVICE_DATA(INTEL, BXT0_THERMAL, 0) },
- { PCI_DEVICE_DATA(INTEL, BXT1_THERMAL, 0) },
- { PCI_DEVICE_DATA(INTEL, BXTX_THERMAL, 0) },
- { PCI_DEVICE_DATA(INTEL, BXTP_THERMAL, 0) },
- { PCI_DEVICE_DATA(INTEL, CNL_THERMAL, 0) },
- { PCI_DEVICE_DATA(INTEL, CFL_THERMAL, 0) },
- { PCI_DEVICE_DATA(INTEL, GLK_THERMAL, 0) },
- { PCI_DEVICE_DATA(INTEL, HSB_THERMAL, 0) },
- { PCI_DEVICE_DATA(INTEL, ICL_THERMAL, PROC_THERMAL_FEATURE_RAPL) },
- { PCI_DEVICE_DATA(INTEL, JSL_THERMAL, 0) },
- { PCI_DEVICE_DATA(INTEL, SKL_THERMAL, PROC_THERMAL_FEATURE_RAPL) },
- { PCI_DEVICE_DATA(INTEL, TGL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_MBOX) },
- { },
-};
-
-MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
-
-static struct pci_driver proc_thermal_pci_driver = {
- .name = DRV_NAME,
- .probe = proc_thermal_pci_probe,
- .remove = proc_thermal_pci_remove,
- .id_table = proc_thermal_pci_ids,
- .driver.pm = &proc_thermal_pm,
-};
-
-static const struct acpi_device_id int3401_device_ids[] = {
- {"INT3401", 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, int3401_device_ids);
-
-static struct platform_driver int3401_driver = {
- .probe = int3401_add,
- .remove = int3401_remove,
- .driver = {
- .name = "int3401 thermal",
- .acpi_match_table = int3401_device_ids,
- .pm = &proc_thermal_pm,
- },
-};
-
-static int __init proc_thermal_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&int3401_driver);
- if (ret)
- return ret;
-
- ret = pci_register_driver(&proc_thermal_pci_driver);
-
- return ret;
-}
-
-static void __exit proc_thermal_exit(void)
-{
- platform_driver_unregister(&int3401_driver);
- pci_unregister_driver(&proc_thermal_pci_driver);
-}
-
-module_init(proc_thermal_init);
-module_exit(proc_thermal_exit);
+EXPORT_SYMBOL_GPL(proc_thermal_mmio_remove);
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
index b9ed64561aaf..5a1cfe4864f1 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
@@ -44,6 +44,7 @@ struct proc_thermal_device {
struct intel_soc_dts_sensors *soc_dts;
u32 mmio_feature_mask;
void __iomem *mmio_base;
+ void *priv_data;
};
struct rapl_mmio_regs {
@@ -79,4 +80,12 @@ void proc_thermal_rfim_remove(struct pci_dev *pdev);
int proc_thermal_mbox_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv);
void proc_thermal_mbox_remove(struct pci_dev *pdev);
+int processor_thermal_send_mbox_cmd(struct pci_dev *pdev, u16 cmd_id, u32 cmd_data, u32 *cmd_resp);
+int proc_thermal_add(struct device *dev, struct proc_thermal_device *priv);
+void proc_thermal_remove(struct proc_thermal_device *proc_priv);
+int proc_thermal_resume(struct device *dev);
+int proc_thermal_mmio_add(struct pci_dev *pdev,
+ struct proc_thermal_device *proc_priv,
+ kernel_ulong_t feature_mask);
+void proc_thermal_mmio_remove(struct pci_dev *pdev, struct proc_thermal_device *proc_priv);
#endif
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
new file mode 100644
index 000000000000..11dd2e825f4f
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Processor thermal device for newer processors
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/thermal.h>
+
+#include "int340x_thermal_zone.h"
+#include "processor_thermal_device.h"
+
+#define DRV_NAME "proc_thermal_pci"
+
+struct proc_thermal_pci {
+ struct pci_dev *pdev;
+ struct proc_thermal_device *proc_priv;
+ struct thermal_zone_device *tzone;
+ struct delayed_work work;
+ int stored_thres;
+ int no_legacy;
+};
+
+enum proc_thermal_mmio_type {
+ PROC_THERMAL_MMIO_TJMAX,
+ PROC_THERMAL_MMIO_PP0_TEMP,
+ PROC_THERMAL_MMIO_PP1_TEMP,
+ PROC_THERMAL_MMIO_PKG_TEMP,
+ PROC_THERMAL_MMIO_THRES_0,
+ PROC_THERMAL_MMIO_THRES_1,
+ PROC_THERMAL_MMIO_INT_ENABLE_0,
+ PROC_THERMAL_MMIO_INT_ENABLE_1,
+ PROC_THERMAL_MMIO_INT_STATUS_0,
+ PROC_THERMAL_MMIO_INT_STATUS_1,
+ PROC_THERMAL_MMIO_MAX
+};
+
+struct proc_thermal_mmio_info {
+ enum proc_thermal_mmio_type mmio_type;
+ u64 mmio_addr;
+ u64 shift;
+ u64 mask;
+};
+
+static struct proc_thermal_mmio_info proc_thermal_mmio_info[] = {
+ { PROC_THERMAL_MMIO_TJMAX, 0x599c, 16, 0xff },
+ { PROC_THERMAL_MMIO_PP0_TEMP, 0x597c, 0, 0xff },
+ { PROC_THERMAL_MMIO_PP1_TEMP, 0x5980, 0, 0xff },
+ { PROC_THERMAL_MMIO_PKG_TEMP, 0x5978, 0, 0xff },
+ { PROC_THERMAL_MMIO_THRES_0, 0x5820, 8, 0x7F },
+ { PROC_THERMAL_MMIO_THRES_1, 0x5820, 16, 0x7F },
+ { PROC_THERMAL_MMIO_INT_ENABLE_0, 0x5820, 15, 0x01 },
+ { PROC_THERMAL_MMIO_INT_ENABLE_1, 0x5820, 23, 0x01 },
+ { PROC_THERMAL_MMIO_INT_STATUS_0, 0x7200, 6, 0x01 },
+ { PROC_THERMAL_MMIO_INT_STATUS_1, 0x7200, 8, 0x01 },
+};
+
+#define B0D4_THERMAL_NOTIFY_DELAY 1000
+static int notify_delay_ms = B0D4_THERMAL_NOTIFY_DELAY;
+
+static void proc_thermal_mmio_read(struct proc_thermal_pci *pci_info,
+ enum proc_thermal_mmio_type type,
+ u32 *value)
+{
+ *value = ioread32(((u8 __iomem *)pci_info->proc_priv->mmio_base +
+ proc_thermal_mmio_info[type].mmio_addr));
+ *value >>= proc_thermal_mmio_info[type].shift;
+ *value &= proc_thermal_mmio_info[type].mask;
+}
+
+static void proc_thermal_mmio_write(struct proc_thermal_pci *pci_info,
+ enum proc_thermal_mmio_type type,
+ u32 value)
+{
+ u32 current_val;
+ u32 mask;
+
+ current_val = ioread32(((u8 __iomem *)pci_info->proc_priv->mmio_base +
+ proc_thermal_mmio_info[type].mmio_addr));
+ mask = proc_thermal_mmio_info[type].mask << proc_thermal_mmio_info[type].shift;
+ current_val &= ~mask;
+
+ value &= proc_thermal_mmio_info[type].mask;
+ value <<= proc_thermal_mmio_info[type].shift;
+
+ current_val |= value;
+ iowrite32(current_val, ((u8 __iomem *)pci_info->proc_priv->mmio_base +
+ proc_thermal_mmio_info[type].mmio_addr));
+}
+
+/*
+ * To avoid sending two many messages to user space, we have 1 second delay.
+ * On interrupt we are disabling interrupt and enabling after 1 second.
+ * This workload function is delayed by 1 second.
+ */
+static void proc_thermal_threshold_work_fn(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct proc_thermal_pci *pci_info = container_of(delayed_work,
+ struct proc_thermal_pci, work);
+ struct thermal_zone_device *tzone = pci_info->tzone;
+
+ if (tzone)
+ thermal_zone_device_update(tzone, THERMAL_TRIP_VIOLATED);
+
+ /* Enable interrupt flag */
+ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1);
+}
+
+static void pkg_thermal_schedule_work(struct delayed_work *work)
+{
+ unsigned long ms = msecs_to_jiffies(notify_delay_ms);
+
+ schedule_delayed_work(work, ms);
+}
+
+static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
+{
+ struct proc_thermal_pci *pci_info = devid;
+ u32 status;
+
+ proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_INT_STATUS_0, &status);
+
+ /* Disable enable interrupt flag */
+ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
+ pci_write_config_byte(pci_info->pdev, 0xdc, 0x01);
+
+ pkg_thermal_schedule_work(&pci_info->work);
+
+ return IRQ_HANDLED;
+}
+
+static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
+{
+ struct proc_thermal_pci *pci_info = tzd->devdata;
+ u32 _temp;
+
+ proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_PKG_TEMP, &_temp);
+ *temp = (unsigned long)_temp * 1000;
+
+ return 0;
+}
+
+static int sys_get_trip_temp(struct thermal_zone_device *tzd,
+ int trip, int *temp)
+{
+ struct proc_thermal_pci *pci_info = tzd->devdata;
+ u32 _temp;
+
+ proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_THRES_0, &_temp);
+ if (!_temp) {
+ *temp = THERMAL_TEMP_INVALID;
+ } else {
+ int tjmax;
+
+ proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_TJMAX, &tjmax);
+ _temp = tjmax - _temp;
+ *temp = (unsigned long)_temp * 1000;
+ }
+
+ return 0;
+}
+
+static int sys_get_trip_type(struct thermal_zone_device *tzd, int trip,
+ enum thermal_trip_type *type)
+{
+ *type = THERMAL_TRIP_PASSIVE;
+
+ return 0;
+}
+
+static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
+{
+ struct proc_thermal_pci *pci_info = tzd->devdata;
+ int tjmax, _temp;
+
+ if (temp <= 0) {
+ cancel_delayed_work_sync(&pci_info->work);
+ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
+ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0);
+ thermal_zone_device_disable(tzd);
+ pci_info->stored_thres = 0;
+ return 0;
+ }
+
+ proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_TJMAX, &tjmax);
+ _temp = tjmax - (temp / 1000);
+ if (_temp < 0)
+ return -EINVAL;
+
+ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, _temp);
+ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1);
+
+ thermal_zone_device_enable(tzd);
+ pci_info->stored_thres = temp;
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = sys_get_curr_temp,
+ .get_trip_temp = sys_get_trip_temp,
+ .get_trip_type = sys_get_trip_type,
+ .set_trip_temp = sys_set_trip_temp,
+};
+
+static struct thermal_zone_params tzone_params = {
+ .governor_name = "user_space",
+ .no_hwmon = true,
+};
+
+static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct proc_thermal_device *proc_priv;
+ struct proc_thermal_pci *pci_info;
+ int irq_flag = 0, irq, ret;
+
+ proc_priv = devm_kzalloc(&pdev->dev, sizeof(*proc_priv), GFP_KERNEL);
+ if (!proc_priv)
+ return -ENOMEM;
+
+ pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ pci_info->pdev = pdev;
+ ret = pcim_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ INIT_DELAYED_WORK(&pci_info->work, proc_thermal_threshold_work_fn);
+
+ ret = proc_thermal_add(&pdev->dev, proc_priv);
+ if (ret) {
+ dev_err(&pdev->dev, "error: proc_thermal_add, will continue\n");
+ pci_info->no_legacy = 1;
+ }
+
+ proc_priv->priv_data = pci_info;
+ pci_info->proc_priv = proc_priv;
+ pci_set_drvdata(pdev, proc_priv);
+
+ ret = proc_thermal_mmio_add(pdev, proc_priv, id->driver_data);
+ if (ret)
+ goto err_ret_thermal;
+
+ pci_info->tzone = thermal_zone_device_register("TCPU_PCI", 1, 1, pci_info,
+ &tzone_ops,
+ &tzone_params, 0, 0);
+ if (IS_ERR(pci_info->tzone)) {
+ ret = PTR_ERR(pci_info->tzone);
+ goto err_ret_mmio;
+ }
+
+ /* request and enable interrupt */
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to allocate vectors!\n");
+ goto err_ret_tzone;
+ }
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ irq_flag = IRQF_SHARED;
+
+ irq = pci_irq_vector(pdev, 0);
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ proc_thermal_irq_handler, NULL,
+ irq_flag, KBUILD_MODNAME, pci_info);
+ if (ret) {
+ dev_err(&pdev->dev, "Request IRQ %d failed\n", pdev->irq);
+ goto err_free_vectors;
+ }
+
+ return 0;
+
+err_free_vectors:
+ pci_free_irq_vectors(pdev);
+err_ret_tzone:
+ thermal_zone_device_unregister(pci_info->tzone);
+err_ret_mmio:
+ proc_thermal_mmio_remove(pdev, proc_priv);
+err_ret_thermal:
+ if (!pci_info->no_legacy)
+ proc_thermal_remove(proc_priv);
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void proc_thermal_pci_remove(struct pci_dev *pdev)
+{
+ struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev);
+ struct proc_thermal_pci *pci_info = proc_priv->priv_data;
+
+ cancel_delayed_work_sync(&pci_info->work);
+
+ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0);
+ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
+
+ devm_free_irq(&pdev->dev, pdev->irq, pci_info);
+ pci_free_irq_vectors(pdev);
+
+ thermal_zone_device_unregister(pci_info->tzone);
+ proc_thermal_mmio_remove(pdev, pci_info->proc_priv);
+ if (!pci_info->no_legacy)
+ proc_thermal_remove(proc_priv);
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int proc_thermal_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct proc_thermal_device *proc_priv;
+ struct proc_thermal_pci *pci_info;
+
+ proc_priv = pci_get_drvdata(pdev);
+ pci_info = proc_priv->priv_data;
+
+ if (pci_info->stored_thres) {
+ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0,
+ pci_info->stored_thres / 1000);
+ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1);
+ }
+
+ if (!pci_info->no_legacy)
+ return proc_thermal_resume(dev);
+
+ return 0;
+}
+#else
+#define proc_thermal_pci_resume NULL
+#endif
+
+static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, NULL, proc_thermal_pci_resume);
+
+static const struct pci_device_id proc_thermal_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
+
+static struct pci_driver proc_thermal_pci_driver = {
+ .name = DRV_NAME,
+ .probe = proc_thermal_pci_probe,
+ .remove = proc_thermal_pci_remove,
+ .id_table = proc_thermal_pci_ids,
+ .driver.pm = &proc_thermal_pci_pm,
+};
+
+static int __init proc_thermal_init(void)
+{
+ return pci_register_driver(&proc_thermal_pci_driver);
+}
+
+static void __exit proc_thermal_exit(void)
+{
+ pci_unregister_driver(&proc_thermal_pci_driver);
+}
+
+module_init(proc_thermal_init);
+module_exit(proc_thermal_exit);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
new file mode 100644
index 000000000000..f5fc1791b11e
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * B0D4 processor thermal device
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/thermal.h>
+
+#include "int340x_thermal_zone.h"
+#include "processor_thermal_device.h"
+#include "../intel_soc_dts_iosf.h"
+
+#define DRV_NAME "proc_thermal"
+
+static irqreturn_t proc_thermal_pci_msi_irq(int irq, void *devid)
+{
+ struct proc_thermal_device *proc_priv;
+ struct pci_dev *pdev = devid;
+
+ proc_priv = pci_get_drvdata(pdev);
+
+ intel_soc_dts_iosf_interrupt_handler(proc_priv->soc_dts);
+
+ return IRQ_HANDLED;
+}
+
+static int proc_thermal_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct proc_thermal_device *proc_priv;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ proc_priv = devm_kzalloc(&pdev->dev, sizeof(*proc_priv), GFP_KERNEL);
+ if (!proc_priv)
+ return -ENOMEM;
+
+ ret = proc_thermal_add(&pdev->dev, proc_priv);
+ if (ret)
+ return ret;
+
+ pci_set_drvdata(pdev, proc_priv);
+
+ if (pdev->device == PCI_DEVICE_ID_INTEL_BSW_THERMAL) {
+ /*
+ * Enumerate additional DTS sensors available via IOSF.
+ * But we are not treating as a failure condition, if
+ * there are no aux DTSs enabled or fails. This driver
+ * already exposes sensors, which can be accessed via
+ * ACPI/MSR. So we don't want to fail for auxiliary DTSs.
+ */
+ proc_priv->soc_dts = intel_soc_dts_iosf_init(
+ INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
+
+ if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
+ ret = pci_enable_msi(pdev);
+ if (!ret) {
+ ret = request_threaded_irq(pdev->irq, NULL,
+ proc_thermal_pci_msi_irq,
+ IRQF_ONESHOT, "proc_thermal",
+ pdev);
+ if (ret) {
+ intel_soc_dts_iosf_exit(
+ proc_priv->soc_dts);
+ pci_disable_msi(pdev);
+ proc_priv->soc_dts = NULL;
+ }
+ }
+ } else
+ dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
+ } else {
+
+ }
+
+ ret = proc_thermal_mmio_add(pdev, proc_priv, id->driver_data);
+ if (ret) {
+ proc_thermal_remove(proc_priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void proc_thermal_pci_remove(struct pci_dev *pdev)
+{
+ struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev);
+
+ if (proc_priv->soc_dts) {
+ intel_soc_dts_iosf_exit(proc_priv->soc_dts);
+ if (pdev->irq) {
+ free_irq(pdev->irq, pdev);
+ pci_disable_msi(pdev);
+ }
+ }
+
+ proc_thermal_mmio_remove(pdev, proc_priv);
+ proc_thermal_remove(proc_priv);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int proc_thermal_pci_resume(struct device *dev)
+{
+ return proc_thermal_resume(dev);
+}
+#else
+#define proc_thermal_pci_resume NULL
+#endif
+
+static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, NULL, proc_thermal_pci_resume);
+
+static const struct pci_device_id proc_thermal_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, BDW_THERMAL, 0) },
+ { PCI_DEVICE_DATA(INTEL, BSW_THERMAL, 0) },
+ { PCI_DEVICE_DATA(INTEL, BXT0_THERMAL, 0) },
+ { PCI_DEVICE_DATA(INTEL, BXT1_THERMAL, 0) },
+ { PCI_DEVICE_DATA(INTEL, BXTX_THERMAL, 0) },
+ { PCI_DEVICE_DATA(INTEL, BXTP_THERMAL, 0) },
+ { PCI_DEVICE_DATA(INTEL, CNL_THERMAL, 0) },
+ { PCI_DEVICE_DATA(INTEL, CFL_THERMAL, 0) },
+ { PCI_DEVICE_DATA(INTEL, GLK_THERMAL, 0) },
+ { PCI_DEVICE_DATA(INTEL, HSB_THERMAL, 0) },
+ { PCI_DEVICE_DATA(INTEL, ICL_THERMAL, PROC_THERMAL_FEATURE_RAPL) },
+ { PCI_DEVICE_DATA(INTEL, JSL_THERMAL, 0) },
+ { PCI_DEVICE_DATA(INTEL, SKL_THERMAL, PROC_THERMAL_FEATURE_RAPL) },
+ { PCI_DEVICE_DATA(INTEL, TGL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_MBOX) },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
+
+static struct pci_driver proc_thermal_pci_driver = {
+ .name = DRV_NAME,
+ .probe = proc_thermal_pci_probe,
+ .remove = proc_thermal_pci_remove,
+ .id_table = proc_thermal_pci_ids,
+ .driver.pm = &proc_thermal_pci_pm,
+};
+
+static int __init proc_thermal_init(void)
+{
+ return pci_register_driver(&proc_thermal_pci_driver);
+}
+
+static void __exit proc_thermal_exit(void)
+{
+ pci_unregister_driver(&proc_thermal_pci_driver);
+}
+
+module_init(proc_thermal_init);
+module_exit(proc_thermal_exit);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
index 990f51f22884..59e93b04f0a9 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
@@ -23,7 +23,7 @@
static DEFINE_MUTEX(mbox_lock);
-static int send_mbox_cmd(struct pci_dev *pdev, u8 cmd_id, u32 cmd_data, u8 *cmd_resp)
+static int send_mbox_cmd(struct pci_dev *pdev, u16 cmd_id, u32 cmd_data, u32 *cmd_resp)
{
struct proc_thermal_device *proc_priv;
u32 retries, data;
@@ -82,6 +82,12 @@ unlock_mbox:
return ret;
}
+int processor_thermal_send_mbox_cmd(struct pci_dev *pdev, u16 cmd_id, u32 cmd_data, u32 *cmd_resp)
+{
+ return send_mbox_cmd(pdev, cmd_id, cmd_data, cmd_resp);
+}
+EXPORT_SYMBOL_GPL(processor_thermal_send_mbox_cmd);
+
/* List of workload types */
static const char * const workload_types[] = {
"none",
@@ -147,7 +153,7 @@ static ssize_t workload_type_show(struct device *dev,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
- u8 cmd_resp;
+ u32 cmd_resp;
int ret;
ret = send_mbox_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_READ, 0, &cmd_resp);
@@ -181,7 +187,7 @@ static bool workload_req_created;
int proc_thermal_mbox_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv)
{
- u8 cmd_resp;
+ u32 cmd_resp;
int ret;
/* Check if there is a mailbox support, if fails return success */
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
index aef993a813e2..2b8a3235d518 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
@@ -190,6 +190,59 @@ static DEVICE_ATTR_RO(ddr_data_rate_point_2);
static DEVICE_ATTR_RO(ddr_data_rate_point_3);
static DEVICE_ATTR_RW(rfi_disable);
+static ssize_t rfi_restriction_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u16 cmd_id = 0x0008;
+ u32 cmd_resp;
+ u32 input;
+ int ret;
+
+ ret = kstrtou32(buf, 10, &input);
+ if (ret)
+ return ret;
+
+ ret = processor_thermal_send_mbox_cmd(to_pci_dev(dev), cmd_id, input, &cmd_resp);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t rfi_restriction_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u16 cmd_id = 0x0007;
+ u32 cmd_resp;
+ int ret;
+
+ ret = processor_thermal_send_mbox_cmd(to_pci_dev(dev), cmd_id, 0, &cmd_resp);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", cmd_resp);
+}
+
+static ssize_t ddr_data_rate_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u16 cmd_id = 0x0107;
+ u32 cmd_resp;
+ int ret;
+
+ ret = processor_thermal_send_mbox_cmd(to_pci_dev(dev), cmd_id, 0, &cmd_resp);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", cmd_resp);
+}
+
+static DEVICE_ATTR_RW(rfi_restriction);
+static DEVICE_ATTR_RO(ddr_data_rate);
+
static struct attribute *dvfs_attrs[] = {
&dev_attr_rfi_restriction_run_busy.attr,
&dev_attr_rfi_restriction_err_code.attr,
@@ -199,6 +252,8 @@ static struct attribute *dvfs_attrs[] = {
&dev_attr_ddr_data_rate_point_2.attr,
&dev_attr_ddr_data_rate_point_3.attr,
&dev_attr_rfi_disable.attr,
+ &dev_attr_ddr_data_rate.attr,
+ &dev_attr_rfi_restriction.attr,
NULL
};
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c
index 4f1a2f7c016c..342b0bb5a56d 100644
--- a/drivers/thermal/intel/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.c
@@ -350,13 +350,14 @@ int intel_soc_dts_iosf_add_read_only_critical_trip(
int i, j;
for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
- for (j = 0; j < sensors->soc_dts[i].trip_count; ++j) {
- if (!(sensors->soc_dts[i].trip_mask & BIT(j))) {
- return update_trip_temp(&sensors->soc_dts[i], j,
- sensors->tj_max - critical_offset,
- THERMAL_TRIP_CRITICAL);
- }
- }
+ struct intel_soc_dts_sensor_entry *entry = &sensors->soc_dts[i];
+ int temp = sensors->tj_max - critical_offset;
+ unsigned long count = entry->trip_count;
+ unsigned long mask = entry->trip_mask;
+
+ j = find_first_zero_bit(&mask, count);
+ if (j < count)
+ return update_trip_temp(entry, j, temp, THERMAL_TRIP_CRITICAL);
}
return -EINVAL;
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 97e8678ccf0e..ede94eadddda 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -23,6 +23,8 @@
#include <linux/reset.h>
#include <linux/types.h>
+#include "thermal_hwmon.h"
+
/* AUXADC Registers */
#define AUXADC_CON1_SET_V 0x008
#define AUXADC_CON1_CLR_V 0x00c
@@ -1087,6 +1089,10 @@ static int mtk_thermal_probe(struct platform_device *pdev)
goto err_disable_clk_peri_therm;
}
+ ret = devm_thermal_add_hwmon_sysfs(tzdev);
+ if (ret)
+ dev_warn(&pdev->dev, "error in thermal_add_hwmon_sysfs");
+
return 0;
err_disable_clk_peri_therm:
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index e1e412348076..fdf16aa34eb4 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -143,7 +143,7 @@ static void rcar_gen3_thermal_calc_coefs(struct rcar_gen3_thermal_tsc *tsc,
* Division is not scaled in BSP and if scaled it might overflow
* the dividend (4095 * 4095 << 14 > INT_MAX) so keep it unscaled
*/
- tsc->tj_t = (FIXPT_INT((ptat[1] - ptat[2]) * 157)
+ tsc->tj_t = (FIXPT_INT((ptat[1] - ptat[2]) * (ths_tj_1 - TJ_3))
/ (ptat[0] - ptat[2])) + FIXPT_INT(TJ_3);
tsc->coef.a1 = FIXPT_DIV(FIXPT_INT(thcode[1] - thcode[2]),
@@ -307,7 +307,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
{
struct rcar_gen3_thermal_priv *priv;
struct device *dev = &pdev->dev;
- const int *rcar_gen3_ths_tj_1 = of_device_get_match_data(dev);
+ const int *ths_tj_1 = of_device_get_match_data(dev);
struct resource *res;
struct thermal_zone_device *zone;
int ret, i;
@@ -352,8 +352,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
priv->tscs[i] = tsc;
priv->thermal_init(tsc);
- rcar_gen3_thermal_calc_coefs(tsc, ptat, thcodes[i],
- *rcar_gen3_ths_tj_1);
+ rcar_gen3_thermal_calc_coefs(tsc, ptat, thcodes[i], *ths_tj_1);
zone = devm_thermal_zone_of_sensor_register(dev, i, tsc,
&rcar_gen3_tz_of_ops);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index aa9e0e31ef98..657d84b9963e 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -211,7 +211,11 @@ struct rockchip_thermal_data {
#define TSADCV3_AUTO_PERIOD_TIME 1875 /* 2.5ms */
#define TSADCV3_AUTO_PERIOD_HT_TIME 1875 /* 2.5ms */
+#define TSADCV5_AUTO_PERIOD_TIME 1622 /* 2.5ms */
+#define TSADCV5_AUTO_PERIOD_HT_TIME 1622 /* 2.5ms */
+
#define TSADCV2_USER_INTER_PD_SOC 0x340 /* 13 clocks */
+#define TSADCV5_USER_INTER_PD_SOC 0xfc0 /* 97us, at least 90us */
#define GRF_SARADC_TESTBIT 0x0e644
#define GRF_TSADC_TESTBIT_L 0x0e648
@@ -219,6 +223,12 @@ struct rockchip_thermal_data {
#define PX30_GRF_SOC_CON2 0x0408
+#define RK3568_GRF_TSADC_CON 0x0600
+#define RK3568_GRF_TSADC_ANA_REG0 (0x10001 << 0)
+#define RK3568_GRF_TSADC_ANA_REG1 (0x10001 << 1)
+#define RK3568_GRF_TSADC_ANA_REG2 (0x10001 << 2)
+#define RK3568_GRF_TSADC_TSEN (0x10001 << 8)
+
#define GRF_SARADC_TESTBIT_ON (0x10001 << 2)
#define GRF_TSADC_TESTBIT_H_ON (0x10001 << 2)
#define GRF_TSADC_VCM_EN_L (0x10001 << 7)
@@ -474,6 +484,45 @@ static const struct tsadc_table rk3399_code_table[] = {
{TSADCV3_DATA_MASK, 125000},
};
+static const struct tsadc_table rk3568_code_table[] = {
+ {0, -40000},
+ {1584, -40000},
+ {1620, -35000},
+ {1652, -30000},
+ {1688, -25000},
+ {1720, -20000},
+ {1756, -15000},
+ {1788, -10000},
+ {1824, -5000},
+ {1856, 0},
+ {1892, 5000},
+ {1924, 10000},
+ {1956, 15000},
+ {1992, 20000},
+ {2024, 25000},
+ {2060, 30000},
+ {2092, 35000},
+ {2128, 40000},
+ {2160, 45000},
+ {2196, 50000},
+ {2228, 55000},
+ {2264, 60000},
+ {2300, 65000},
+ {2332, 70000},
+ {2368, 75000},
+ {2400, 80000},
+ {2436, 85000},
+ {2468, 90000},
+ {2500, 95000},
+ {2536, 100000},
+ {2572, 105000},
+ {2604, 110000},
+ {2636, 115000},
+ {2672, 120000},
+ {2704, 125000},
+ {TSADCV2_DATA_MASK, 125000},
+};
+
static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
int temp)
{
@@ -701,6 +750,49 @@ static void rk_tsadcv4_initialize(struct regmap *grf, void __iomem *regs,
regmap_write(grf, PX30_GRF_SOC_CON2, GRF_CON_TSADC_CH_INV);
}
+static void rk_tsadcv7_initialize(struct regmap *grf, void __iomem *regs,
+ enum tshut_polarity tshut_polarity)
+{
+ writel_relaxed(TSADCV5_USER_INTER_PD_SOC, regs + TSADCV2_USER_CON);
+ writel_relaxed(TSADCV5_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD);
+ writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_INT_DEBOUNCE);
+ writel_relaxed(TSADCV5_AUTO_PERIOD_HT_TIME,
+ regs + TSADCV2_AUTO_PERIOD_HT);
+ writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
+
+ if (tshut_polarity == TSHUT_HIGH_ACTIVE)
+ writel_relaxed(0U | TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
+ regs + TSADCV2_AUTO_CON);
+ else
+ writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
+ regs + TSADCV2_AUTO_CON);
+
+ /*
+ * The general register file will is optional
+ * and might not be available.
+ */
+ if (!IS_ERR(grf)) {
+ regmap_write(grf, RK3568_GRF_TSADC_CON, RK3568_GRF_TSADC_TSEN);
+ /*
+ * RK3568 TRM, section 18.5. requires a delay no less
+ * than 10us between the rising edge of tsadc_tsen_en
+ * and the rising edge of tsadc_ana_reg_0/1/2.
+ */
+ udelay(15);
+ regmap_write(grf, RK3568_GRF_TSADC_CON, RK3568_GRF_TSADC_ANA_REG0);
+ regmap_write(grf, RK3568_GRF_TSADC_CON, RK3568_GRF_TSADC_ANA_REG1);
+ regmap_write(grf, RK3568_GRF_TSADC_CON, RK3568_GRF_TSADC_ANA_REG2);
+
+ /*
+ * RK3568 TRM, section 18.5. requires a delay no less
+ * than 90us after the rising edge of tsadc_ana_reg_0/1/2.
+ */
+ usleep_range(100, 200);
+ }
+}
+
static void rk_tsadcv2_irq_ack(void __iomem *regs)
{
u32 val;
@@ -1027,6 +1119,31 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
},
};
+static const struct rockchip_tsadc_chip rk3568_tsadc_data = {
+ .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
+ .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
+ .chn_num = 2, /* two channels for tsadc */
+
+ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
+ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
+ .tshut_temp = 95000,
+
+ .initialize = rk_tsadcv7_initialize,
+ .irq_ack = rk_tsadcv3_irq_ack,
+ .control = rk_tsadcv3_control,
+ .get_temp = rk_tsadcv2_get_temp,
+ .set_alarm_temp = rk_tsadcv2_alarm_temp,
+ .set_tshut_temp = rk_tsadcv2_tshut_temp,
+ .set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+ .table = {
+ .id = rk3568_code_table,
+ .length = ARRAY_SIZE(rk3568_code_table),
+ .data_mask = TSADCV2_DATA_MASK,
+ .mode = ADC_INCREMENT,
+ },
+};
+
static const struct of_device_id of_rockchip_thermal_match[] = {
{ .compatible = "rockchip,px30-tsadc",
.data = (void *)&px30_tsadc_data,
@@ -1059,6 +1176,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = {
.compatible = "rockchip,rk3399-tsadc",
.data = (void *)&rk3399_tsadc_data,
},
+ {
+ .compatible = "rockchip,rk3568-tsadc",
+ .data = (void *)&rk3568_tsadc_data,
+ },
{ /* end */ },
};
MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match);
diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c
index 3682edb2f466..fff80fc18002 100644
--- a/drivers/thermal/sprd_thermal.c
+++ b/drivers/thermal/sprd_thermal.c
@@ -388,7 +388,7 @@ static int sprd_thm_probe(struct platform_device *pdev)
sen = devm_kzalloc(&pdev->dev, sizeof(*sen), GFP_KERNEL);
if (!sen) {
ret = -ENOMEM;
- goto disable_clk;
+ goto of_put;
}
sen->data = thm;
@@ -397,13 +397,13 @@ static int sprd_thm_probe(struct platform_device *pdev)
ret = of_property_read_u32(sen_child, "reg", &sen->id);
if (ret) {
dev_err(&pdev->dev, "get sensor reg failed");
- goto disable_clk;
+ goto of_put;
}
ret = sprd_thm_sensor_calibration(sen_child, thm, sen);
if (ret) {
dev_err(&pdev->dev, "efuse cal analysis failed");
- goto disable_clk;
+ goto of_put;
}
sprd_thm_sensor_init(thm, sen);
@@ -416,19 +416,20 @@ static int sprd_thm_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "register thermal zone failed %d\n",
sen->id);
ret = PTR_ERR(sen->tzd);
- goto disable_clk;
+ goto of_put;
}
thm->sensor[sen->id] = sen;
}
+ /* sen_child set to NULL at this point */
ret = sprd_thm_set_ready(thm);
if (ret)
- goto disable_clk;
+ goto of_put;
ret = sprd_thm_wait_temp_ready(thm);
if (ret)
- goto disable_clk;
+ goto of_put;
for (i = 0; i < thm->nr_sensors; i++)
sprd_thm_toggle_sensor(thm->sensor[i], true);
@@ -436,6 +437,8 @@ static int sprd_thm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, thm);
return 0;
+of_put:
+ of_node_put(sen_child);
disable_clk:
clk_disable_unprepare(thm->clk);
return ret;
@@ -532,6 +535,7 @@ static const struct of_device_id sprd_thermal_of_match[] = {
{ .compatible = "sprd,ums512-thermal", .data = &ums512_data },
{ },
};
+MODULE_DEVICE_TABLE(of, sprd_thermal_of_match);
static const struct dev_pm_ops sprd_thermal_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sprd_thm_suspend, sprd_thm_resume)
diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c
index a0114452d11f..d68596c40be9 100644
--- a/drivers/thermal/st/st_thermal_memmap.c
+++ b/drivers/thermal/st/st_thermal_memmap.c
@@ -119,19 +119,10 @@ static int st_mmap_regmap_init(struct st_thermal_sensor *sensor)
{
struct device *dev = sensor->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "no memory resources defined\n");
- return -ENODEV;
- }
-
- sensor->mmio_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(sensor->mmio_base)) {
- dev_err(dev, "failed to remap IO\n");
+ sensor->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(sensor->mmio_base))
return PTR_ERR(sensor->mmio_base);
- }
sensor->regmap = devm_regmap_init_mmio(dev, sensor->mmio_base,
&st_416mpe_regmap_config);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 10a2d8e1cacf..97ef9b040b84 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1315,7 +1315,7 @@ free_tz:
EXPORT_SYMBOL_GPL(thermal_zone_device_register);
/**
- * thermal_device_unregister - removes the registered thermal zone device
+ * thermal_zone_device_unregister - removes the registered thermal zone device
* @tz: the thermal zone device to remove
*/
void thermal_zone_device_unregister(struct thermal_zone_device *tz)
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 5b76f9a1280d..6379f26a335f 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -559,6 +559,9 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
if (!tz)
return;
+ /* stop temperature polling */
+ thermal_zone_device_disable(tzd);
+
mutex_lock(&tzd->lock);
tzd->ops->get_temp = NULL;
tzd->ops->get_trend = NULL;
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 7aa48f6c41d9..da19d7987d00 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -2,7 +2,7 @@
obj-${CONFIG_USB4} := thunderbolt.o
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
-thunderbolt-objs += nvm.o retimer.o quirks.o
+thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o
thunderbolt-${CONFIG_ACPI} += acpi.o
thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index 35fa17f7e599..b67e72d5644b 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -180,3 +180,209 @@ bool tb_acpi_is_xdomain_allowed(void)
return osc_sb_native_usb4_control & OSC_USB_XDOMAIN;
return true;
}
+
+/* UUID for retimer _DSM: e0053122-795b-4122-8a5e-57be1d26acb3 */
+static const guid_t retimer_dsm_guid =
+ GUID_INIT(0xe0053122, 0x795b, 0x4122,
+ 0x8a, 0x5e, 0x57, 0xbe, 0x1d, 0x26, 0xac, 0xb3);
+
+#define RETIMER_DSM_QUERY_ONLINE_STATE 1
+#define RETIMER_DSM_SET_ONLINE_STATE 2
+
+static int tb_acpi_retimer_set_power(struct tb_port *port, bool power)
+{
+ struct usb4_port *usb4 = port->usb4;
+ union acpi_object argv4[2];
+ struct acpi_device *adev;
+ union acpi_object *obj;
+ int ret;
+
+ if (!usb4->can_offline)
+ return 0;
+
+ adev = ACPI_COMPANION(&usb4->dev);
+ if (WARN_ON(!adev))
+ return 0;
+
+ /* Check if we are already powered on (and in correct mode) */
+ obj = acpi_evaluate_dsm_typed(adev->handle, &retimer_dsm_guid, 1,
+ RETIMER_DSM_QUERY_ONLINE_STATE, NULL,
+ ACPI_TYPE_INTEGER);
+ if (!obj) {
+ tb_port_warn(port, "ACPI: query online _DSM failed\n");
+ return -EIO;
+ }
+
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
+
+ if (power == ret)
+ return 0;
+
+ tb_port_dbg(port, "ACPI: calling _DSM to power %s retimers\n",
+ power ? "on" : "off");
+
+ argv4[0].type = ACPI_TYPE_PACKAGE;
+ argv4[0].package.count = 1;
+ argv4[0].package.elements = &argv4[1];
+ argv4[1].integer.type = ACPI_TYPE_INTEGER;
+ argv4[1].integer.value = power;
+
+ obj = acpi_evaluate_dsm_typed(adev->handle, &retimer_dsm_guid, 1,
+ RETIMER_DSM_SET_ONLINE_STATE, argv4,
+ ACPI_TYPE_INTEGER);
+ if (!obj) {
+ tb_port_warn(port,
+ "ACPI: set online state _DSM evaluation failed\n");
+ return -EIO;
+ }
+
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
+
+ if (ret >= 0) {
+ if (power)
+ return ret == 1 ? 0 : -EBUSY;
+ return 0;
+ }
+
+ tb_port_warn(port, "ACPI: set online state _DSM failed with error %d\n", ret);
+ return -EIO;
+}
+
+/**
+ * tb_acpi_power_on_retimers() - Call platform to power on retimers
+ * @port: USB4 port
+ *
+ * Calls platform to turn on power to all retimers behind this USB4
+ * port. After this function returns successfully the caller can
+ * continue with the normal retimer flows (as specified in the USB4
+ * spec). Note if this returns %-EBUSY it means the type-C port is in
+ * non-USB4/TBT mode (there is non-USB4/TBT device connected).
+ *
+ * This should only be called if the USB4/TBT link is not up.
+ *
+ * Returns %0 on success.
+ */
+int tb_acpi_power_on_retimers(struct tb_port *port)
+{
+ return tb_acpi_retimer_set_power(port, true);
+}
+
+/**
+ * tb_acpi_power_off_retimers() - Call platform to power off retimers
+ * @port: USB4 port
+ *
+ * This is the opposite of tb_acpi_power_on_retimers(). After returning
+ * successfully the normal operations with the @port can continue.
+ *
+ * Returns %0 on success.
+ */
+int tb_acpi_power_off_retimers(struct tb_port *port)
+{
+ return tb_acpi_retimer_set_power(port, false);
+}
+
+static bool tb_acpi_bus_match(struct device *dev)
+{
+ return tb_is_switch(dev) || tb_is_usb4_port_device(dev);
+}
+
+static struct acpi_device *tb_acpi_find_port(struct acpi_device *adev,
+ const struct tb_port *port)
+{
+ struct acpi_device *port_adev;
+
+ if (!adev)
+ return NULL;
+
+ /*
+ * Device routers exists under the downstream facing USB4 port
+ * of the parent router. Their _ADR is always 0.
+ */
+ list_for_each_entry(port_adev, &adev->children, node) {
+ if (acpi_device_adr(port_adev) == port->port)
+ return port_adev;
+ }
+
+ return NULL;
+}
+
+static struct acpi_device *tb_acpi_switch_find_companion(struct tb_switch *sw)
+{
+ struct acpi_device *adev = NULL;
+ struct tb_switch *parent_sw;
+
+ parent_sw = tb_switch_parent(sw);
+ if (parent_sw) {
+ struct tb_port *port = tb_port_at(tb_route(sw), parent_sw);
+ struct acpi_device *port_adev;
+
+ port_adev = tb_acpi_find_port(ACPI_COMPANION(&parent_sw->dev), port);
+ if (port_adev)
+ adev = acpi_find_child_device(port_adev, 0, false);
+ } else {
+ struct tb_nhi *nhi = sw->tb->nhi;
+ struct acpi_device *parent_adev;
+
+ parent_adev = ACPI_COMPANION(&nhi->pdev->dev);
+ if (parent_adev)
+ adev = acpi_find_child_device(parent_adev, 0, false);
+ }
+
+ return adev;
+}
+
+static struct acpi_device *tb_acpi_find_companion(struct device *dev)
+{
+ /*
+ * The Thunderbolt/USB4 hierarchy looks like following:
+ *
+ * Device (NHI)
+ * Device (HR) // Host router _ADR == 0
+ * Device (DFP0) // Downstream port _ADR == lane 0 adapter
+ * Device (DR) // Device router _ADR == 0
+ * Device (UFP) // Upstream port _ADR == lane 0 adapter
+ * Device (DFP1) // Downstream port _ADR == lane 0 adapter number
+ *
+ * At the moment we bind the host router to the corresponding
+ * Linux device.
+ */
+ if (tb_is_switch(dev))
+ return tb_acpi_switch_find_companion(tb_to_switch(dev));
+ else if (tb_is_usb4_port_device(dev))
+ return tb_acpi_find_port(ACPI_COMPANION(dev->parent),
+ tb_to_usb4_port_device(dev)->port);
+ return NULL;
+}
+
+static void tb_acpi_setup(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct usb4_port *usb4 = tb_to_usb4_port_device(dev);
+
+ if (!adev || !usb4)
+ return;
+
+ if (acpi_check_dsm(adev->handle, &retimer_dsm_guid, 1,
+ BIT(RETIMER_DSM_QUERY_ONLINE_STATE) |
+ BIT(RETIMER_DSM_SET_ONLINE_STATE)))
+ usb4->can_offline = true;
+}
+
+static struct acpi_bus_type tb_acpi_bus = {
+ .name = "thunderbolt",
+ .match = tb_acpi_bus_match,
+ .find_companion = tb_acpi_find_companion,
+ .setup = tb_acpi_setup,
+};
+
+int tb_acpi_init(void)
+{
+ return register_acpi_bus_type(&tb_acpi_bus);
+}
+
+void tb_acpi_exit(void)
+{
+ unregister_acpi_bus_type(&tb_acpi_bus);
+}
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
index 5631319f7b20..9f20c7bbf0ce 100644
--- a/drivers/thunderbolt/dma_port.c
+++ b/drivers/thunderbolt/dma_port.c
@@ -299,15 +299,13 @@ static int dma_port_request(struct tb_dma_port *dma, u32 in,
return status_to_errno(out);
}
-static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
- void *buf, u32 size)
+static int dma_port_flash_read_block(void *data, unsigned int dwaddress,
+ void *buf, size_t dwords)
{
+ struct tb_dma_port *dma = data;
struct tb_switch *sw = dma->sw;
- u32 in, dwaddress, dwords;
int ret;
-
- dwaddress = address / 4;
- dwords = size / 4;
+ u32 in;
in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
if (dwords < MAIL_DATA_DWORDS)
@@ -323,14 +321,13 @@ static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
}
-static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
- const void *buf, u32 size)
+static int dma_port_flash_write_block(void *data, unsigned int dwaddress,
+ const void *buf, size_t dwords)
{
+ struct tb_dma_port *dma = data;
struct tb_switch *sw = dma->sw;
- u32 in, dwaddress, dwords;
int ret;
-
- dwords = size / 4;
+ u32 in;
/* Write the block to MAIL_DATA registers */
ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
@@ -341,12 +338,8 @@ static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
/* CSS header write is always done to the same magic address */
- if (address >= DMA_PORT_CSS_ADDRESS) {
- dwaddress = DMA_PORT_CSS_ADDRESS;
+ if (dwaddress >= DMA_PORT_CSS_ADDRESS)
in |= MAIL_IN_CSS;
- } else {
- dwaddress = address / 4;
- }
in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
@@ -365,36 +358,8 @@ static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
void *buf, size_t size)
{
- unsigned int retries = DMA_PORT_RETRIES;
-
- do {
- unsigned int offset;
- size_t nbytes;
- int ret;
-
- offset = address & 3;
- nbytes = min_t(size_t, size + offset, MAIL_DATA_DWORDS * 4);
-
- ret = dma_port_flash_read_block(dma, address, dma->buf,
- ALIGN(nbytes, 4));
- if (ret) {
- if (ret == -ETIMEDOUT) {
- if (retries--)
- continue;
- ret = -EIO;
- }
- return ret;
- }
-
- nbytes -= offset;
- memcpy(buf, dma->buf + offset, nbytes);
-
- size -= nbytes;
- address += nbytes;
- buf += nbytes;
- } while (size > 0);
-
- return 0;
+ return tb_nvm_read_data(address, buf, size, DMA_PORT_RETRIES,
+ dma_port_flash_read_block, dma);
}
/**
@@ -411,40 +376,11 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
const void *buf, size_t size)
{
- unsigned int retries = DMA_PORT_RETRIES;
- unsigned int offset;
-
- if (address >= DMA_PORT_CSS_ADDRESS) {
- offset = 0;
- if (size > DMA_PORT_CSS_MAX_SIZE)
- return -E2BIG;
- } else {
- offset = address & 3;
- address = address & ~3;
- }
-
- do {
- u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
- int ret;
+ if (address >= DMA_PORT_CSS_ADDRESS && size > DMA_PORT_CSS_MAX_SIZE)
+ return -E2BIG;
- memcpy(dma->buf + offset, buf, nbytes);
-
- ret = dma_port_flash_write_block(dma, address, buf, nbytes);
- if (ret) {
- if (ret == -ETIMEDOUT) {
- if (retries--)
- continue;
- ret = -EIO;
- }
- return ret;
- }
-
- size -= nbytes;
- address += nbytes;
- buf += nbytes;
- } while (size > 0);
-
- return 0;
+ return tb_nvm_write_data(address, buf, size, DMA_PORT_RETRIES,
+ dma_port_flash_write_block, dma);
}
/**
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 98f4056f89ff..a062befcb3b2 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -881,11 +881,12 @@ int tb_domain_init(void)
int ret;
tb_test_init();
-
tb_debugfs_init();
+ tb_acpi_init();
+
ret = tb_xdomain_init();
if (ret)
- goto err_debugfs;
+ goto err_acpi;
ret = bus_register(&tb_bus_type);
if (ret)
goto err_xdomain;
@@ -894,7 +895,8 @@ int tb_domain_init(void)
err_xdomain:
tb_xdomain_exit();
-err_debugfs:
+err_acpi:
+ tb_acpi_exit();
tb_debugfs_exit();
tb_test_exit();
@@ -907,6 +909,7 @@ void tb_domain_exit(void)
ida_destroy(&tb_domain_ida);
tb_nvm_exit();
tb_xdomain_exit();
+ tb_acpi_exit();
tb_debugfs_exit();
tb_test_exit();
}
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 46d0906a3070..470885e6f1c8 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -214,7 +214,10 @@ static u32 tb_crc32(void *data, size_t len)
return ~__crc32c_le(~0, data, len);
}
-#define TB_DROM_DATA_START 13
+#define TB_DROM_DATA_START 13
+#define TB_DROM_HEADER_SIZE 22
+#define USB4_DROM_HEADER_SIZE 16
+
struct tb_drom_header {
/* BYTE 0 */
u8 uid_crc8; /* checksum for uid */
@@ -224,9 +227,9 @@ struct tb_drom_header {
u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
/* BYTE 13 */
u8 device_rom_revision; /* should be <= 1 */
- u16 data_len:10;
- u8 __unknown1:6;
- /* BYTES 16-21 */
+ u16 data_len:12;
+ u8 reserved:4;
+ /* BYTES 16-21 - Only for TBT DROM, nonexistent in USB4 DROM */
u16 vendor_id;
u16 model_id;
u8 model_rev;
@@ -401,10 +404,10 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw,
*
* Drom must have been copied to sw->drom.
*/
-static int tb_drom_parse_entries(struct tb_switch *sw)
+static int tb_drom_parse_entries(struct tb_switch *sw, size_t header_size)
{
struct tb_drom_header *header = (void *) sw->drom;
- u16 pos = sizeof(*header);
+ u16 pos = header_size;
u16 drom_size = header->data_len + TB_DROM_DATA_START;
int res;
@@ -566,7 +569,7 @@ static int tb_drom_parse(struct tb_switch *sw)
header->data_crc32, crc);
}
- return tb_drom_parse_entries(sw);
+ return tb_drom_parse_entries(sw, TB_DROM_HEADER_SIZE);
}
static int usb4_drom_parse(struct tb_switch *sw)
@@ -583,7 +586,7 @@ static int usb4_drom_parse(struct tb_switch *sw)
return -EINVAL;
}
- return tb_drom_parse_entries(sw);
+ return tb_drom_parse_entries(sw, USB4_DROM_HEADER_SIZE);
}
/**
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 2f30b816705a..6255f1ef9599 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1677,14 +1677,18 @@ static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
static bool icm_tgl_is_supported(struct tb *tb)
{
- u32 val;
+ unsigned long end = jiffies + msecs_to_jiffies(10);
- /*
- * If the firmware is not running use software CM. This platform
- * should fully support both.
- */
- val = ioread32(tb->nhi->iobase + REG_FW_STS);
- return !!(val & REG_FW_STS_NVM_AUTH_DONE);
+ do {
+ u32 val;
+
+ val = ioread32(tb->nhi->iobase + REG_FW_STS);
+ if (val & REG_FW_STS_NVM_AUTH_DONE)
+ return true;
+ usleep_range(100, 500);
+ } while (time_before(jiffies, end));
+
+ return false;
}
static void icm_handle_notification(struct work_struct *work)
@@ -2505,6 +2509,8 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_TGL_NHI1:
case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
+ case PCI_DEVICE_ID_INTEL_ADL_NHI0:
+ case PCI_DEVICE_ID_INTEL_ADL_NHI1:
icm->is_supported = icm_tgl_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index bc671730a11f..c178f0d7beab 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -208,8 +208,8 @@ static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
if (ret)
return ret;
- ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WOP |
- TB_LC_SX_CTRL_WOU4);
+ ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WODPC |
+ TB_LC_SX_CTRL_WODPD | TB_LC_SX_CTRL_WOP | TB_LC_SX_CTRL_WOU4);
if (flags & TB_WAKE_ON_CONNECT)
ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
@@ -217,6 +217,8 @@ static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
ctrl |= TB_LC_SX_CTRL_WOU4;
if (flags & TB_WAKE_ON_PCIE)
ctrl |= TB_LC_SX_CTRL_WOP;
+ if (flags & TB_WAKE_ON_DP)
+ ctrl |= TB_LC_SX_CTRL_WODPC | TB_LC_SX_CTRL_WODPD;
return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index a0386d1e3fc9..fa44332845a1 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/property.h>
-#include <linux/platform_data/x86/apple.h>
#include "nhi.h"
#include "nhi_regs.h"
@@ -1127,69 +1126,6 @@ static bool nhi_imr_valid(struct pci_dev *pdev)
return true;
}
-/*
- * During suspend the Thunderbolt controller is reset and all PCIe
- * tunnels are lost. The NHI driver will try to reestablish all tunnels
- * during resume. This adds device links between the tunneled PCIe
- * downstream ports and the NHI so that the device core will make sure
- * NHI is resumed first before the rest.
- */
-static void tb_apple_add_links(struct tb_nhi *nhi)
-{
- struct pci_dev *upstream, *pdev;
-
- if (!x86_apple_machine)
- return;
-
- switch (nhi->pdev->device) {
- case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
- case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
- case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
- case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
- break;
- default:
- return;
- }
-
- upstream = pci_upstream_bridge(nhi->pdev);
- while (upstream) {
- if (!pci_is_pcie(upstream))
- return;
- if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
- break;
- upstream = pci_upstream_bridge(upstream);
- }
-
- if (!upstream)
- return;
-
- /*
- * For each hotplug downstream port, create add device link
- * back to NHI so that PCIe tunnels can be re-established after
- * sleep.
- */
- for_each_pci_bridge(pdev, upstream->subordinate) {
- const struct device_link *link;
-
- if (!pci_is_pcie(pdev))
- continue;
- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
- !pdev->is_hotplug_bridge)
- continue;
-
- link = device_link_add(&pdev->dev, &nhi->pdev->dev,
- DL_FLAG_AUTOREMOVE_SUPPLIER |
- DL_FLAG_PM_RUNTIME);
- if (link) {
- dev_dbg(&nhi->pdev->dev, "created link from %s\n",
- dev_name(&pdev->dev));
- } else {
- dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
- dev_name(&pdev->dev));
- }
- }
-}
-
static struct tb *nhi_select_cm(struct tb_nhi *nhi)
{
struct tb *tb;
@@ -1278,9 +1214,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return res;
}
- tb_apple_add_links(nhi);
- tb_acpi_add_links(nhi);
-
tb = nhi_select_cm(nhi);
if (!tb) {
dev_err(&nhi->pdev->dev,
@@ -1400,6 +1333,10 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 69770beca792..69083aab2736 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -72,6 +72,8 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE 0x15ea
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI 0x15eb
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
+#define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e
+#define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d
#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
#define PCI_DEVICE_ID_INTEL_TGL_NHI0 0x9a1b
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
index 29de6d95c6e7..3a5336913cca 100644
--- a/drivers/thunderbolt/nvm.c
+++ b/drivers/thunderbolt/nvm.c
@@ -164,6 +164,101 @@ void tb_nvm_free(struct tb_nvm *nvm)
kfree(nvm);
}
+/**
+ * tb_nvm_read_data() - Read data from NVM
+ * @address: Start address on the flash
+ * @buf: Buffer where the read data is copied
+ * @size: Size of the buffer in bytes
+ * @retries: Number of retries if block read fails
+ * @read_block: Function that reads block from the flash
+ * @read_block_data: Data passsed to @read_block
+ *
+ * This is a generic function that reads data from NVM or NVM like
+ * device.
+ *
+ * Returns %0 on success and negative errno otherwise.
+ */
+int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
+ unsigned int retries, read_block_fn read_block,
+ void *read_block_data)
+{
+ do {
+ unsigned int dwaddress, dwords, offset;
+ u8 data[NVM_DATA_DWORDS * 4];
+ size_t nbytes;
+ int ret;
+
+ offset = address & 3;
+ nbytes = min_t(size_t, size + offset, NVM_DATA_DWORDS * 4);
+
+ dwaddress = address / 4;
+ dwords = ALIGN(nbytes, 4) / 4;
+
+ ret = read_block(read_block_data, dwaddress, data, dwords);
+ if (ret) {
+ if (ret != -ENODEV && retries--)
+ continue;
+ return ret;
+ }
+
+ nbytes -= offset;
+ memcpy(buf, data + offset, nbytes);
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+/**
+ * tb_nvm_write_data() - Write data to NVM
+ * @address: Start address on the flash
+ * @buf: Buffer where the data is copied from
+ * @size: Size of the buffer in bytes
+ * @retries: Number of retries if the block write fails
+ * @write_block: Function that writes block to the flash
+ * @write_block_data: Data passwd to @write_block
+ *
+ * This is generic function that writes data to NVM or NVM like device.
+ *
+ * Returns %0 on success and negative errno otherwise.
+ */
+int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
+ unsigned int retries, write_block_fn write_block,
+ void *write_block_data)
+{
+ do {
+ unsigned int offset, dwaddress;
+ u8 data[NVM_DATA_DWORDS * 4];
+ size_t nbytes;
+ int ret;
+
+ offset = address & 3;
+ nbytes = min_t(u32, size + offset, NVM_DATA_DWORDS * 4);
+
+ memcpy(data + offset, buf, nbytes);
+
+ dwaddress = address / 4;
+ ret = write_block(write_block_data, dwaddress, data, nbytes / 4);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ if (retries--)
+ continue;
+ ret = -EIO;
+ }
+ return ret;
+ }
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
void tb_nvm_exit(void)
{
ida_destroy(&nvm_ida);
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index f63e205a35d9..564e2f42cebd 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -367,7 +367,7 @@ static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
int i, res;
for (i = first_hop; i < path->path_length; i++) {
res = tb_port_add_nfc_credits(path->hops[i].in_port,
- -path->nfc_credits);
+ -path->hops[i].nfc_credits);
if (res)
tb_port_warn(path->hops[i].in_port,
"nfc credits deallocation failed for hop %d\n",
@@ -502,7 +502,7 @@ int tb_path_activate(struct tb_path *path)
/* Add non flow controlled credits. */
for (i = path->path_length - 1; i >= 0; i--) {
res = tb_port_add_nfc_credits(path->hops[i].in_port,
- path->nfc_credits);
+ path->hops[i].nfc_credits);
if (res) {
__tb_path_deallocate_nfc(path, i);
goto err;
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index 57e2978a3c21..b5f2ec79c4d6 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -12,7 +12,17 @@ static void quirk_force_power_link(struct tb_switch *sw)
sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER;
}
+static void quirk_dp_credit_allocation(struct tb_switch *sw)
+{
+ if (sw->credit_allocation && sw->min_dp_main_credits == 56) {
+ sw->min_dp_main_credits = 18;
+ tb_sw_dbg(sw, "quirked DP main: %u\n", sw->min_dp_main_credits);
+ }
+}
+
struct tb_quirk {
+ u16 hw_vendor_id;
+ u16 hw_device_id;
u16 vendor;
u16 device;
void (*hook)(struct tb_switch *sw);
@@ -20,7 +30,13 @@ struct tb_quirk {
static const struct tb_quirk tb_quirks[] = {
/* Dell WD19TB supports self-authentication on unplug */
- { 0x00d4, 0xb070, quirk_force_power_link },
+ { 0x0000, 0x0000, 0x00d4, 0xb070, quirk_force_power_link },
+ { 0x0000, 0x0000, 0x00d4, 0xb071, quirk_force_power_link },
+ /*
+ * Intel Goshen Ridge NVM 27 and before report wrong number of
+ * DP buffers.
+ */
+ { 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation },
};
/**
@@ -36,7 +52,15 @@ void tb_check_quirks(struct tb_switch *sw)
for (i = 0; i < ARRAY_SIZE(tb_quirks); i++) {
const struct tb_quirk *q = &tb_quirks[i];
- if (sw->device == q->device && sw->vendor == q->vendor)
- q->hook(sw);
+ if (q->hw_vendor_id && q->hw_vendor_id != sw->config.vendor_id)
+ continue;
+ if (q->hw_device_id && q->hw_device_id != sw->config.device_id)
+ continue;
+ if (q->vendor && q->vendor != sw->vendor)
+ continue;
+ if (q->device && q->device != sw->device)
+ continue;
+
+ q->hook(sw);
}
}
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index c44fad2b9fbb..722694052f4a 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -103,6 +103,7 @@ static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
unsigned int image_size, hdr_size;
const u8 *buf = rt->nvm->buf;
u16 ds_size, device;
+ int ret;
image_size = rt->nvm->buf_data_size;
if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
@@ -140,8 +141,43 @@ static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
buf += hdr_size;
image_size -= hdr_size;
- return usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
- image_size);
+ ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
+ image_size);
+ if (!ret)
+ rt->nvm->flushed = true;
+
+ return ret;
+}
+
+static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
+{
+ u32 status;
+ int ret;
+
+ if (auth_only) {
+ ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
+ if (ret)
+ return ret;
+ }
+
+ ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
+ if (ret)
+ return ret;
+
+ usleep_range(100, 150);
+
+ /*
+ * Check the status now if we still can access the retimer. It
+ * is expected that the below fails.
+ */
+ ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
+ &status);
+ if (!ret) {
+ rt->auth_status = status;
+ return status ? -EINVAL : 0;
+ }
+
+ return 0;
}
static ssize_t device_show(struct device *dev, struct device_attribute *attr,
@@ -176,8 +212,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct tb_retimer *rt = tb_to_retimer(dev);
- bool val;
- int ret;
+ int val, ret;
pm_runtime_get_sync(&rt->dev);
@@ -191,7 +226,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
goto exit_unlock;
}
- ret = kstrtobool(buf, &val);
+ ret = kstrtoint(buf, 10, &val);
if (ret)
goto exit_unlock;
@@ -199,16 +234,22 @@ static ssize_t nvm_authenticate_store(struct device *dev,
rt->auth_status = 0;
if (val) {
- if (!rt->nvm->buf) {
- ret = -EINVAL;
- goto exit_unlock;
+ if (val == AUTHENTICATE_ONLY) {
+ ret = tb_retimer_nvm_authenticate(rt, true);
+ } else {
+ if (!rt->nvm->flushed) {
+ if (!rt->nvm->buf) {
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ ret = tb_retimer_nvm_validate_and_write(rt);
+ if (ret || val == WRITE_ONLY)
+ goto exit_unlock;
+ }
+ if (val == WRITE_AND_AUTHENTICATE)
+ ret = tb_retimer_nvm_authenticate(rt, false);
}
-
- ret = tb_retimer_nvm_validate_and_write(rt);
- if (ret)
- goto exit_unlock;
-
- ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
}
exit_unlock:
@@ -283,11 +324,13 @@ struct device_type tb_retimer_type = {
static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
{
+ struct usb4_port *usb4;
struct tb_retimer *rt;
u32 vendor, device;
int ret;
- if (!port->cap_usb4)
+ usb4 = port->usb4;
+ if (!usb4)
return -EINVAL;
ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
@@ -331,7 +374,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
rt->port = port;
rt->tb = port->sw->tb;
- rt->dev.parent = &port->sw->dev;
+ rt->dev.parent = &usb4->dev;
rt->dev.bus = &tb_bus_type;
rt->dev.type = &tb_retimer_type;
dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
@@ -389,7 +432,7 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
struct tb_retimer_lookup lookup = { .port = port, .index = index };
struct device *dev;
- dev = device_find_child(&port->sw->dev, &lookup, retimer_match);
+ dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
if (dev)
return tb_to_retimer(dev);
@@ -399,19 +442,18 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
/**
* tb_retimer_scan() - Scan for on-board retimers under port
* @port: USB4 port to scan
+ * @add: If true also registers found retimers
*
- * Tries to enumerate on-board retimers connected to @port. Found
- * retimers are registered as children of @port. Does not scan for cable
- * retimers for now.
+ * Brings the sideband into a state where retimers can be accessed.
+ * Then Tries to enumerate on-board retimers connected to @port. Found
+ * retimers are registered as children of @port if @add is set. Does
+ * not scan for cable retimers for now.
*/
-int tb_retimer_scan(struct tb_port *port)
+int tb_retimer_scan(struct tb_port *port, bool add)
{
u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
int ret, i, last_idx = 0;
- if (!port->cap_usb4)
- return 0;
-
/*
* Send broadcast RT to make sure retimer indices facing this
* port are set.
@@ -421,6 +463,13 @@ int tb_retimer_scan(struct tb_port *port)
return ret;
/*
+ * Enable sideband channel for each retimer. We can do this
+ * regardless whether there is device connected or not.
+ */
+ for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+ usb4_port_retimer_set_inbound_sbtx(port, i);
+
+ /*
* Before doing anything else, read the authentication status.
* If the retimer has it set, store it for the new retimer
* device instance.
@@ -451,10 +500,10 @@ int tb_retimer_scan(struct tb_port *port)
rt = tb_port_find_retimer(port, i);
if (rt) {
put_device(&rt->dev);
- } else {
+ } else if (add) {
ret = tb_retimer_add(port, i, status[i]);
if (ret && ret != -EOPNOTSUPP)
- return ret;
+ break;
}
}
@@ -479,7 +528,10 @@ static int remove_retimer(struct device *dev, void *data)
*/
void tb_retimer_remove_all(struct tb_port *port)
{
- if (port->cap_usb4)
- device_for_each_child_reverse(&port->sw->dev, port,
+ struct usb4_port *usb4;
+
+ usb4 = port->usb4;
+ if (usb4)
+ device_for_each_child_reverse(&usb4->dev, port,
remove_retimer);
}
diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
index 9dafd696612f..bda889ff3bda 100644
--- a/drivers/thunderbolt/sb_regs.h
+++ b/drivers/thunderbolt/sb_regs.h
@@ -17,7 +17,9 @@
enum usb4_sb_opcode {
USB4_SB_OPCODE_ERR = 0x20525245, /* "ERR " */
USB4_SB_OPCODE_ONS = 0x444d4321, /* "!CMD" */
+ USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c, /* "LSEN" */
USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45, /* "ENUM" */
+ USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */
USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */
USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */
USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index e73cd296db7e..10d6b228cc94 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -26,11 +26,6 @@ struct nvm_auth_status {
u32 status;
};
-enum nvm_write_ops {
- WRITE_AND_AUTHENTICATE = 1,
- WRITE_ONLY = 2,
-};
-
/*
* Hold NVM authentication failure status per switch This information
* needs to stay around even when the switch gets power cycled so we
@@ -308,13 +303,23 @@ static inline int nvm_read(struct tb_switch *sw, unsigned int address,
return dma_port_flash_read(sw->dma_port, address, buf, size);
}
-static int nvm_authenticate(struct tb_switch *sw)
+static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
{
int ret;
- if (tb_switch_is_usb4(sw))
+ if (tb_switch_is_usb4(sw)) {
+ if (auth_only) {
+ ret = usb4_switch_nvm_set_offset(sw, 0);
+ if (ret)
+ return ret;
+ }
+ sw->nvm->authenticating = true;
return usb4_switch_nvm_authenticate(sw);
+ } else if (auth_only) {
+ return -EOPNOTSUPP;
+ }
+ sw->nvm->authenticating = true;
if (!tb_route(sw)) {
nvm_authenticate_start_dma_port(sw);
ret = nvm_authenticate_host_dma_port(sw);
@@ -459,7 +464,7 @@ static void tb_switch_nvm_remove(struct tb_switch *sw)
/* port utility functions */
-static const char *tb_port_type(struct tb_regs_port_header *port)
+static const char *tb_port_type(const struct tb_regs_port_header *port)
{
switch (port->type >> 16) {
case 0:
@@ -488,17 +493,21 @@ static const char *tb_port_type(struct tb_regs_port_header *port)
}
}
-static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
+static void tb_dump_port(struct tb *tb, const struct tb_port *port)
{
+ const struct tb_regs_port_header *regs = &port->config;
+
tb_dbg(tb,
" Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
- port->port_number, port->vendor_id, port->device_id,
- port->revision, port->thunderbolt_version, tb_port_type(port),
- port->type);
+ regs->port_number, regs->vendor_id, regs->device_id,
+ regs->revision, regs->thunderbolt_version, tb_port_type(regs),
+ regs->type);
tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
- port->max_in_hop_id, port->max_out_hop_id);
- tb_dbg(tb, " Max counters: %d\n", port->max_counters);
- tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
+ regs->max_in_hop_id, regs->max_out_hop_id);
+ tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
+ tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
+ tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
+ port->ctl_credits);
}
/**
@@ -738,13 +747,32 @@ static int tb_init_port(struct tb_port *port)
cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
if (cap > 0)
port->cap_usb4 = cap;
+
+ /*
+ * USB4 ports the buffers allocated for the control path
+ * can be read from the path config space. Legacy
+ * devices we use hard-coded value.
+ */
+ if (tb_switch_is_usb4(port->sw)) {
+ struct tb_regs_hop hop;
+
+ if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
+ port->ctl_credits = hop.initial_credits;
+ }
+ if (!port->ctl_credits)
+ port->ctl_credits = 2;
+
} else if (port->port != 0) {
cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
if (cap > 0)
port->cap_adap = cap;
}
- tb_dump_port(port->sw->tb, &port->config);
+ port->total_credits =
+ (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+ ADP_CS_4_TOTAL_BUFFERS_SHIFT;
+
+ tb_dump_port(port->sw->tb, port);
INIT_LIST_HEAD(&port->list);
return 0;
@@ -991,8 +1019,11 @@ static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
* tb_port_lane_bonding_enable() - Enable bonding on port
* @port: port to enable
*
- * Enable bonding by setting the link width of the port and the
- * other port in case of dual link port.
+ * Enable bonding by setting the link width of the port and the other
+ * port in case of dual link port. Does not wait for the link to
+ * actually reach the bonded state so caller needs to call
+ * tb_port_wait_for_link_width() before enabling any paths through the
+ * link to make sure the link is in expected state.
*
* Return: %0 in case of success and negative errno in case of error
*/
@@ -1043,6 +1074,79 @@ void tb_port_lane_bonding_disable(struct tb_port *port)
tb_port_set_link_width(port, 1);
}
+/**
+ * tb_port_wait_for_link_width() - Wait until link reaches specific width
+ * @port: Port to wait for
+ * @width: Expected link width (%1 or %2)
+ * @timeout_msec: Timeout in ms how long to wait
+ *
+ * Should be used after both ends of the link have been bonded (or
+ * bonding has been disabled) to wait until the link actually reaches
+ * the expected state. Returns %-ETIMEDOUT if the @width was not reached
+ * within the given timeout, %0 if it did.
+ */
+int tb_port_wait_for_link_width(struct tb_port *port, int width,
+ int timeout_msec)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+ int ret;
+
+ do {
+ ret = tb_port_get_link_width(port);
+ if (ret < 0)
+ return ret;
+ else if (ret == width)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
+static int tb_port_do_update_credits(struct tb_port *port)
+{
+ u32 nfc_credits;
+ int ret;
+
+ ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
+ if (ret)
+ return ret;
+
+ if (nfc_credits != port->config.nfc_credits) {
+ u32 total;
+
+ total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+ ADP_CS_4_TOTAL_BUFFERS_SHIFT;
+
+ tb_port_dbg(port, "total credits changed %u -> %u\n",
+ port->total_credits, total);
+
+ port->config.nfc_credits = nfc_credits;
+ port->total_credits = total;
+ }
+
+ return 0;
+}
+
+/**
+ * tb_port_update_credits() - Re-read port total credits
+ * @port: Port to update
+ *
+ * After the link is bonded (or bonding was disabled) the port total
+ * credits may change, so this function needs to be called to re-read
+ * the credits. Updates also the second lane adapter.
+ */
+int tb_port_update_credits(struct tb_port *port)
+{
+ int ret;
+
+ ret = tb_port_do_update_credits(port);
+ if (ret)
+ return ret;
+ return tb_port_do_update_credits(port->dual_link_port);
+}
+
static int tb_port_start_lane_initialization(struct tb_port *port)
{
int ret;
@@ -1054,6 +1158,33 @@ static int tb_port_start_lane_initialization(struct tb_port *port)
return ret == -EINVAL ? 0 : ret;
}
+/*
+ * Returns true if the port had something (router, XDomain) connected
+ * before suspend.
+ */
+static bool tb_port_resume(struct tb_port *port)
+{
+ bool has_remote = tb_port_has_remote(port);
+
+ if (port->usb4) {
+ usb4_port_device_resume(port->usb4);
+ } else if (!has_remote) {
+ /*
+ * For disconnected downstream lane adapters start lane
+ * initialization now so we detect future connects.
+ *
+ * For XDomain start the lane initialzation now so the
+ * link gets re-established.
+ *
+ * This is only needed for non-USB4 ports.
+ */
+ if (!tb_is_upstream_port(port) || port->xdomain)
+ tb_port_start_lane_initialization(port);
+ }
+
+ return has_remote || port->xdomain;
+}
+
/**
* tb_port_is_enabled() - Is the adapter port enabled
* @port: Port to check
@@ -1592,8 +1723,7 @@ static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
bool disconnect)
{
struct tb_switch *sw = tb_to_switch(dev);
- int val;
- int ret;
+ int val, ret;
pm_runtime_get_sync(&sw->dev);
@@ -1616,22 +1746,27 @@ static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
nvm_clear_auth_status(sw);
if (val > 0) {
- if (!sw->nvm->flushed) {
- if (!sw->nvm->buf) {
+ if (val == AUTHENTICATE_ONLY) {
+ if (disconnect)
ret = -EINVAL;
- goto exit_unlock;
+ else
+ ret = nvm_authenticate(sw, true);
+ } else {
+ if (!sw->nvm->flushed) {
+ if (!sw->nvm->buf) {
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ ret = nvm_validate_and_write(sw);
+ if (ret || val == WRITE_ONLY)
+ goto exit_unlock;
}
-
- ret = nvm_validate_and_write(sw);
- if (ret || val == WRITE_ONLY)
- goto exit_unlock;
- }
- if (val == WRITE_AND_AUTHENTICATE) {
- if (disconnect) {
- ret = tb_lc_force_power(sw);
- } else {
- sw->nvm->authenticating = true;
- ret = nvm_authenticate(sw);
+ if (val == WRITE_AND_AUTHENTICATE) {
+ if (disconnect)
+ ret = tb_lc_force_power(sw);
+ else
+ ret = nvm_authenticate(sw, false);
}
}
}
@@ -1740,18 +1875,6 @@ static struct attribute *switch_attrs[] = {
NULL,
};
-static bool has_port(const struct tb_switch *sw, enum tb_port_type type)
-{
- const struct tb_port *port;
-
- tb_switch_for_each_port(sw, port) {
- if (!port->disabled && port->config.type == type)
- return true;
- }
-
- return false;
-}
-
static umode_t switch_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
@@ -1760,8 +1883,7 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_authorized.attr) {
if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
- sw->tb->security_level == TB_SECURITY_DPONLY ||
- !has_port(sw, TB_TYPE_PCIE_UP))
+ sw->tb->security_level == TB_SECURITY_DPONLY)
return 0;
} else if (attr == &dev_attr_device.attr) {
if (!sw->device)
@@ -2432,6 +2554,14 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
return ret;
}
+ ret = tb_port_wait_for_link_width(down, 2, 100);
+ if (ret) {
+ tb_port_warn(down, "timeout enabling lane bonding\n");
+ return ret;
+ }
+
+ tb_port_update_credits(down);
+ tb_port_update_credits(up);
tb_switch_update_link_attributes(sw);
tb_sw_dbg(sw, "lane bonding enabled\n");
@@ -2462,7 +2592,17 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw)
tb_port_lane_bonding_disable(up);
tb_port_lane_bonding_disable(down);
+ /*
+ * It is fine if we get other errors as the router might have
+ * been unplugged.
+ */
+ if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
+ tb_sw_warn(sw, "timeout disabling lane bonding\n");
+
+ tb_port_update_credits(down);
+ tb_port_update_credits(up);
tb_switch_update_link_attributes(sw);
+
tb_sw_dbg(sw, "lane bonding disabled\n");
}
@@ -2529,6 +2669,16 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
tb_lc_unconfigure_port(down);
}
+static void tb_switch_credits_init(struct tb_switch *sw)
+{
+ if (tb_switch_is_icm(sw))
+ return;
+ if (!tb_switch_is_usb4(sw))
+ return;
+ if (usb4_switch_credits_init(sw))
+ tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
+}
+
/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
@@ -2559,6 +2709,8 @@ int tb_switch_add(struct tb_switch *sw)
}
if (!sw->safe_mode) {
+ tb_switch_credits_init(sw);
+
/* read drom */
ret = tb_drom_read(sw);
if (ret) {
@@ -2612,11 +2764,16 @@ int tb_switch_add(struct tb_switch *sw)
sw->device_name);
}
+ ret = usb4_switch_add_ports(sw);
+ if (ret) {
+ dev_err(&sw->dev, "failed to add USB4 ports\n");
+ goto err_del;
+ }
+
ret = tb_switch_nvm_add(sw);
if (ret) {
dev_err(&sw->dev, "failed to add NVM devices\n");
- device_del(&sw->dev);
- return ret;
+ goto err_ports;
}
/*
@@ -2637,6 +2794,13 @@ int tb_switch_add(struct tb_switch *sw)
tb_switch_debugfs_init(sw);
return 0;
+
+err_ports:
+ usb4_switch_remove_ports(sw);
+err_del:
+ device_del(&sw->dev);
+
+ return ret;
}
/**
@@ -2676,6 +2840,7 @@ void tb_switch_remove(struct tb_switch *sw)
tb_plug_events_active(sw, false);
tb_switch_nvm_remove(sw);
+ usb4_switch_remove_ports(sw);
if (tb_route(sw))
dev_info(&sw->dev, "device disconnected\n");
@@ -2773,22 +2938,11 @@ int tb_switch_resume(struct tb_switch *sw)
/* check for surviving downstream switches */
tb_switch_for_each_port(sw, port) {
- if (!tb_port_has_remote(port) && !port->xdomain) {
- /*
- * For disconnected downstream lane adapters
- * start lane initialization now so we detect
- * future connects.
- */
- if (!tb_is_upstream_port(port) && tb_port_is_null(port))
- tb_port_start_lane_initialization(port);
+ if (!tb_port_is_null(port))
+ continue;
+
+ if (!tb_port_resume(port))
continue;
- } else if (port->xdomain) {
- /*
- * Start lane initialization for XDomain so the
- * link gets re-established.
- */
- tb_port_start_lane_initialization(port);
- }
if (tb_wait_for_port(port, true) <= 0) {
tb_port_warn(port,
@@ -2797,7 +2951,7 @@ int tb_switch_resume(struct tb_switch *sw)
tb_sw_set_unplugged(port->remote->sw);
else if (port->xdomain)
port->xdomain->is_unplugged = true;
- } else if (tb_port_has_remote(port) || port->xdomain) {
+ } else {
/*
* Always unlock the port so the downstream
* switch/domain is accessible.
@@ -2844,7 +2998,8 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
if (runtime) {
/* Trigger wake when something is plugged in/out */
flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
- flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
+ flags |= TB_WAKE_ON_USB4;
+ flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
} else if (device_may_wakeup(&sw->dev)) {
flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 7e6dc2b03bed..2897a77d44c3 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/platform_data/x86/apple.h>
#include "tb.h"
#include "tb_regs.h"
@@ -595,7 +596,7 @@ static void tb_scan_port(struct tb_port *port)
return;
}
- tb_retimer_scan(port);
+ tb_retimer_scan(port, true);
sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
tb_downstream_route(port));
@@ -662,7 +663,7 @@ static void tb_scan_port(struct tb_port *port)
tb_sw_warn(sw, "failed to enable TMU\n");
/* Scan upstream retimers */
- tb_retimer_scan(upstream_port);
+ tb_retimer_scan(upstream_port, true);
/*
* Create USB 3.x tunnels only when the switch is plugged to the
@@ -1571,6 +1572,69 @@ static const struct tb_cm_ops tb_cm_ops = {
.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
};
+/*
+ * During suspend the Thunderbolt controller is reset and all PCIe
+ * tunnels are lost. The NHI driver will try to reestablish all tunnels
+ * during resume. This adds device links between the tunneled PCIe
+ * downstream ports and the NHI so that the device core will make sure
+ * NHI is resumed first before the rest.
+ */
+static void tb_apple_add_links(struct tb_nhi *nhi)
+{
+ struct pci_dev *upstream, *pdev;
+
+ if (!x86_apple_machine)
+ return;
+
+ switch (nhi->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+ break;
+ default:
+ return;
+ }
+
+ upstream = pci_upstream_bridge(nhi->pdev);
+ while (upstream) {
+ if (!pci_is_pcie(upstream))
+ return;
+ if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
+ break;
+ upstream = pci_upstream_bridge(upstream);
+ }
+
+ if (!upstream)
+ return;
+
+ /*
+ * For each hotplug downstream port, create add device link
+ * back to NHI so that PCIe tunnels can be re-established after
+ * sleep.
+ */
+ for_each_pci_bridge(pdev, upstream->subordinate) {
+ const struct device_link *link;
+
+ if (!pci_is_pcie(pdev))
+ continue;
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
+ !pdev->is_hotplug_bridge)
+ continue;
+
+ link = device_link_add(&pdev->dev, &nhi->pdev->dev,
+ DL_FLAG_AUTOREMOVE_SUPPLIER |
+ DL_FLAG_PM_RUNTIME);
+ if (link) {
+ dev_dbg(&nhi->pdev->dev, "created link from %s\n",
+ dev_name(&pdev->dev));
+ } else {
+ dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
+ dev_name(&pdev->dev));
+ }
+ }
+}
+
struct tb *tb_probe(struct tb_nhi *nhi)
{
struct tb_cm *tcm;
@@ -1594,5 +1658,8 @@ struct tb *tb_probe(struct tb_nhi *nhi)
tb_dbg(tb, "using software connection manager\n");
+ tb_apple_add_links(nhi);
+ tb_acpi_add_links(nhi);
+
return tb;
}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 9790e9f13d2b..725104c83e3d 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -20,6 +20,7 @@
#define NVM_MIN_SIZE SZ_32K
#define NVM_MAX_SIZE SZ_512K
+#define NVM_DATA_DWORDS 16
/* Intel specific NVM offsets */
#define NVM_DEVID 0x05
@@ -57,6 +58,12 @@ struct tb_nvm {
bool flushed;
};
+enum tb_nvm_write_ops {
+ WRITE_AND_AUTHENTICATE = 1,
+ WRITE_ONLY = 2,
+ AUTHENTICATE_ONLY = 3,
+};
+
#define TB_SWITCH_KEY_SIZE 32
#define TB_SWITCH_MAX_DEPTH 6
#define USB4_SWITCH_MAX_DEPTH 5
@@ -135,6 +142,12 @@ struct tb_switch_tmu {
* @rpm_complete: Completion used to wait for runtime resume to
* complete (ICM only)
* @quirks: Quirks used for this Thunderbolt switch
+ * @credit_allocation: Are the below buffer allocation parameters valid
+ * @max_usb3_credits: Router preferred number of buffers for USB 3.x
+ * @min_dp_aux_credits: Router preferred minimum number of buffers for DP AUX
+ * @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN
+ * @max_pcie_credits: Router preferred number of buffers for PCIe
+ * @max_dma_credits: Router preferred number of buffers for DMA/P2P
*
* When the switch is being added or removed to the domain (other
* switches) you need to have domain lock held.
@@ -177,6 +190,12 @@ struct tb_switch {
u8 depth;
struct completion rpm_complete;
unsigned long quirks;
+ bool credit_allocation;
+ unsigned int max_usb3_credits;
+ unsigned int min_dp_aux_credits;
+ unsigned int min_dp_main_credits;
+ unsigned int max_pcie_credits;
+ unsigned int max_dma_credits;
};
/**
@@ -189,6 +208,7 @@ struct tb_switch {
* @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present)
* @cap_adap: Offset of the adapter specific capability (%0 if not present)
* @cap_usb4: Offset to the USB4 port capability (%0 if not present)
+ * @usb4: Pointer to the USB4 port structure (only if @cap_usb4 is != %0)
* @port: Port number on switch
* @disabled: Disabled by eeprom or enabled but not implemented
* @bonded: true if the port is bonded (two lanes combined as one)
@@ -198,6 +218,10 @@ struct tb_switch {
* @in_hopids: Currently allocated input HopIDs
* @out_hopids: Currently allocated output HopIDs
* @list: Used to link ports to DP resources list
+ * @total_credits: Total number of buffers available for this port
+ * @ctl_credits: Buffers reserved for control path
+ * @dma_credits: Number of credits allocated for DMA tunneling for all
+ * DMA paths through this port.
*
* In USB4 terminology this structure represents an adapter (protocol or
* lane adapter).
@@ -211,6 +235,7 @@ struct tb_port {
int cap_tmu;
int cap_adap;
int cap_usb4;
+ struct usb4_port *usb4;
u8 port;
bool disabled;
bool bonded;
@@ -219,6 +244,24 @@ struct tb_port {
struct ida in_hopids;
struct ida out_hopids;
struct list_head list;
+ unsigned int total_credits;
+ unsigned int ctl_credits;
+ unsigned int dma_credits;
+};
+
+/**
+ * struct usb4_port - USB4 port device
+ * @dev: Device for the port
+ * @port: Pointer to the lane 0 adapter
+ * @can_offline: Does the port have necessary platform support to moved
+ * it into offline mode and back
+ * @offline: The port is currently in offline mode
+ */
+struct usb4_port {
+ struct device dev;
+ struct tb_port *port;
+ bool can_offline;
+ bool offline;
};
/**
@@ -255,6 +298,8 @@ struct tb_retimer {
* @next_hop_index: HopID of the packet when it is routed out from @out_port
* @initial_credits: Number of initial flow control credits allocated for
* the path
+ * @nfc_credits: Number of non-flow controlled buffers allocated for the
+ * @in_port.
*
* Hop configuration is always done on the IN port of a switch.
* in_port and out_port have to be on the same switch. Packets arriving on
@@ -274,6 +319,7 @@ struct tb_path_hop {
int in_counter_index;
int next_hop_index;
unsigned int initial_credits;
+ unsigned int nfc_credits;
};
/**
@@ -296,7 +342,6 @@ enum tb_path_port {
* struct tb_path - a unidirectional path between two ports
* @tb: Pointer to the domain structure
* @name: Name of the path (used for debugging)
- * @nfc_credits: Number of non flow controlled credits allocated for the path
* @ingress_shared_buffer: Shared buffering used for ingress ports on the path
* @egress_shared_buffer: Shared buffering used for egress ports on the path
* @ingress_fc_enable: Flow control for ingress ports on the path
@@ -317,7 +362,6 @@ enum tb_path_port {
struct tb_path {
struct tb *tb;
const char *name;
- int nfc_credits;
enum tb_path_port ingress_shared_buffer;
enum tb_path_port egress_shared_buffer;
enum tb_path_port ingress_fc_enable;
@@ -346,6 +390,7 @@ struct tb_path {
#define TB_WAKE_ON_USB4 BIT(2)
#define TB_WAKE_ON_USB3 BIT(3)
#define TB_WAKE_ON_PCIE BIT(4)
+#define TB_WAKE_ON_DP BIT(5)
/**
* struct tb_cm_ops - Connection manager specific operations vector
@@ -623,6 +668,7 @@ struct tb *tb_probe(struct tb_nhi *nhi);
extern struct device_type tb_domain_type;
extern struct device_type tb_retimer_type;
extern struct device_type tb_switch_type;
+extern struct device_type usb4_port_device_type;
int tb_domain_init(void);
void tb_domain_exit(void);
@@ -674,6 +720,16 @@ int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
void tb_nvm_free(struct tb_nvm *nvm);
void tb_nvm_exit(void);
+typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
+typedef int (*write_block_fn)(void *, unsigned int, const void *, size_t);
+
+int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
+ unsigned int retries, read_block_fn read_block,
+ void *read_block_data);
+int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
+ unsigned int retries, write_block_fn write_next_block,
+ void *write_block_data);
+
struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
u64 route);
struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
@@ -853,6 +909,11 @@ void tb_port_release_out_hopid(struct tb_port *port, int hopid);
struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
struct tb_port *prev);
+static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
+{
+ return tb_port_is_null(port) && port->sw->credit_allocation;
+}
+
/**
* tb_for_each_port_on_path() - Iterate over each port on path
* @src: Source port
@@ -870,6 +931,9 @@ int tb_port_get_link_width(struct tb_port *port);
int tb_port_state(struct tb_port *port);
int tb_port_lane_bonding_enable(struct tb_port *port);
void tb_port_lane_bonding_disable(struct tb_port *port);
+int tb_port_wait_for_link_width(struct tb_port *port, int width,
+ int timeout_msec);
+int tb_port_update_credits(struct tb_port *port);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
@@ -904,6 +968,17 @@ bool tb_path_is_invalid(struct tb_path *path);
bool tb_path_port_on_path(const struct tb_path *path,
const struct tb_port *port);
+/**
+ * tb_path_for_each_hop() - Iterate over each hop on path
+ * @path: Path whose hops to iterate
+ * @hop: Hop used as iterator
+ *
+ * Iterates over each hop on path.
+ */
+#define tb_path_for_each_hop(path, hop) \
+ for ((hop) = &(path)->hops[0]; \
+ (hop) <= &(path)->hops[(path)->path_length - 1]; (hop)++)
+
int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
@@ -950,7 +1025,7 @@ void tb_xdomain_remove(struct tb_xdomain *xd);
struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
-int tb_retimer_scan(struct tb_port *port);
+int tb_retimer_scan(struct tb_port *port, bool add);
void tb_retimer_remove_all(struct tb_port *port);
static inline bool tb_is_retimer(const struct device *dev)
@@ -975,10 +1050,12 @@ int usb4_switch_set_sleep(struct tb_switch *sw);
int usb4_switch_nvm_sector_size(struct tb_switch *sw);
int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size);
+int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address);
int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
const void *buf, size_t size);
int usb4_switch_nvm_authenticate(struct tb_switch *sw);
int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status);
+int usb4_switch_credits_init(struct tb_switch *sw);
bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
@@ -986,20 +1063,27 @@ struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
const struct tb_port *port);
struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
const struct tb_port *port);
+int usb4_switch_add_ports(struct tb_switch *sw);
+void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port);
void usb4_port_unconfigure_xdomain(struct tb_port *port);
+int usb4_port_router_offline(struct tb_port *port);
+int usb4_port_router_online(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port);
+int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
u8 size);
int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
const void *buf, u8 size);
int usb4_port_retimer_is_last(struct tb_port *port, u8 index);
int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index);
+int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
+ unsigned int address);
int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index,
unsigned int address, const void *buf,
size_t size);
@@ -1018,6 +1102,22 @@ int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw);
+static inline bool tb_is_usb4_port_device(const struct device *dev)
+{
+ return dev->type == &usb4_port_device_type;
+}
+
+static inline struct usb4_port *tb_to_usb4_port_device(struct device *dev)
+{
+ if (tb_is_usb4_port_device(dev))
+ return container_of(dev, struct usb4_port, dev);
+ return NULL;
+}
+
+struct usb4_port *usb4_port_device_add(struct tb_port *port);
+void usb4_port_device_remove(struct usb4_port *usb4);
+int usb4_port_device_resume(struct usb4_port *usb4);
+
/* Keep link controller awake during update */
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
@@ -1031,6 +1131,11 @@ bool tb_acpi_may_tunnel_usb3(void);
bool tb_acpi_may_tunnel_dp(void);
bool tb_acpi_may_tunnel_pcie(void);
bool tb_acpi_is_xdomain_allowed(void);
+
+int tb_acpi_init(void);
+void tb_acpi_exit(void);
+int tb_acpi_power_on_retimers(struct tb_port *port);
+int tb_acpi_power_off_retimers(struct tb_port *port);
#else
static inline void tb_acpi_add_links(struct tb_nhi *nhi) { }
@@ -1039,6 +1144,11 @@ static inline bool tb_acpi_may_tunnel_usb3(void) { return true; }
static inline bool tb_acpi_may_tunnel_dp(void) { return true; }
static inline bool tb_acpi_may_tunnel_pcie(void) { return true; }
static inline bool tb_acpi_is_xdomain_allowed(void) { return true; }
+
+static inline int tb_acpi_init(void) { return 0; }
+static inline void tb_acpi_exit(void) { }
+static inline int tb_acpi_power_on_retimers(struct tb_port *port) { return 0; }
+static inline int tb_acpi_power_off_retimers(struct tb_port *port) { return 0; }
#endif
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 626751e06292..484f25be2849 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -195,6 +195,7 @@ struct tb_regs_switch_header {
#define ROUTER_CS_5_SLP BIT(0)
#define ROUTER_CS_5_WOP BIT(1)
#define ROUTER_CS_5_WOU BIT(2)
+#define ROUTER_CS_5_WOD BIT(3)
#define ROUTER_CS_5_C3S BIT(23)
#define ROUTER_CS_5_PTO BIT(24)
#define ROUTER_CS_5_UTO BIT(25)
@@ -228,6 +229,7 @@ enum usb4_switch_op {
USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
USB4_SWITCH_OP_DROM_READ = 0x24,
USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
+ USB4_SWITCH_OP_BUFFER_ALLOC = 0x33,
};
/* Router TMU configuration */
@@ -458,6 +460,8 @@ struct tb_regs_hop {
#define TB_LC_SX_CTRL 0x96
#define TB_LC_SX_CTRL_WOC BIT(1)
#define TB_LC_SX_CTRL_WOD BIT(2)
+#define TB_LC_SX_CTRL_WODPC BIT(3)
+#define TB_LC_SX_CTRL_WODPD BIT(4)
#define TB_LC_SX_CTRL_WOU4 BIT(5)
#define TB_LC_SX_CTRL_WOP BIT(6)
#define TB_LC_SX_CTRL_L1C BIT(16)
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index 5ff5a03bc9ce..eca0ef311bde 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -87,22 +87,30 @@ static struct tb_switch *alloc_host(struct kunit *test)
sw->ports[1].config.type = TB_TYPE_PORT;
sw->ports[1].config.max_in_hop_id = 19;
sw->ports[1].config.max_out_hop_id = 19;
+ sw->ports[1].total_credits = 60;
+ sw->ports[1].ctl_credits = 2;
sw->ports[1].dual_link_port = &sw->ports[2];
sw->ports[2].config.type = TB_TYPE_PORT;
sw->ports[2].config.max_in_hop_id = 19;
sw->ports[2].config.max_out_hop_id = 19;
+ sw->ports[2].total_credits = 60;
+ sw->ports[2].ctl_credits = 2;
sw->ports[2].dual_link_port = &sw->ports[1];
sw->ports[2].link_nr = 1;
sw->ports[3].config.type = TB_TYPE_PORT;
sw->ports[3].config.max_in_hop_id = 19;
sw->ports[3].config.max_out_hop_id = 19;
+ sw->ports[3].total_credits = 60;
+ sw->ports[3].ctl_credits = 2;
sw->ports[3].dual_link_port = &sw->ports[4];
sw->ports[4].config.type = TB_TYPE_PORT;
sw->ports[4].config.max_in_hop_id = 19;
sw->ports[4].config.max_out_hop_id = 19;
+ sw->ports[4].total_credits = 60;
+ sw->ports[4].ctl_credits = 2;
sw->ports[4].dual_link_port = &sw->ports[3];
sw->ports[4].link_nr = 1;
@@ -143,6 +151,25 @@ static struct tb_switch *alloc_host(struct kunit *test)
return sw;
}
+static struct tb_switch *alloc_host_usb4(struct kunit *test)
+{
+ struct tb_switch *sw;
+
+ sw = alloc_host(test);
+ if (!sw)
+ return NULL;
+
+ sw->generation = 4;
+ sw->credit_allocation = true;
+ sw->max_usb3_credits = 32;
+ sw->min_dp_aux_credits = 1;
+ sw->min_dp_main_credits = 0;
+ sw->max_pcie_credits = 64;
+ sw->max_dma_credits = 14;
+
+ return sw;
+}
+
static struct tb_switch *alloc_dev_default(struct kunit *test,
struct tb_switch *parent,
u64 route, bool bonded)
@@ -164,44 +191,60 @@ static struct tb_switch *alloc_dev_default(struct kunit *test,
sw->ports[1].config.type = TB_TYPE_PORT;
sw->ports[1].config.max_in_hop_id = 19;
sw->ports[1].config.max_out_hop_id = 19;
+ sw->ports[1].total_credits = 60;
+ sw->ports[1].ctl_credits = 2;
sw->ports[1].dual_link_port = &sw->ports[2];
sw->ports[2].config.type = TB_TYPE_PORT;
sw->ports[2].config.max_in_hop_id = 19;
sw->ports[2].config.max_out_hop_id = 19;
+ sw->ports[2].total_credits = 60;
+ sw->ports[2].ctl_credits = 2;
sw->ports[2].dual_link_port = &sw->ports[1];
sw->ports[2].link_nr = 1;
sw->ports[3].config.type = TB_TYPE_PORT;
sw->ports[3].config.max_in_hop_id = 19;
sw->ports[3].config.max_out_hop_id = 19;
+ sw->ports[3].total_credits = 60;
+ sw->ports[3].ctl_credits = 2;
sw->ports[3].dual_link_port = &sw->ports[4];
sw->ports[4].config.type = TB_TYPE_PORT;
sw->ports[4].config.max_in_hop_id = 19;
sw->ports[4].config.max_out_hop_id = 19;
+ sw->ports[4].total_credits = 60;
+ sw->ports[4].ctl_credits = 2;
sw->ports[4].dual_link_port = &sw->ports[3];
sw->ports[4].link_nr = 1;
sw->ports[5].config.type = TB_TYPE_PORT;
sw->ports[5].config.max_in_hop_id = 19;
sw->ports[5].config.max_out_hop_id = 19;
+ sw->ports[5].total_credits = 60;
+ sw->ports[5].ctl_credits = 2;
sw->ports[5].dual_link_port = &sw->ports[6];
sw->ports[6].config.type = TB_TYPE_PORT;
sw->ports[6].config.max_in_hop_id = 19;
sw->ports[6].config.max_out_hop_id = 19;
+ sw->ports[6].total_credits = 60;
+ sw->ports[6].ctl_credits = 2;
sw->ports[6].dual_link_port = &sw->ports[5];
sw->ports[6].link_nr = 1;
sw->ports[7].config.type = TB_TYPE_PORT;
sw->ports[7].config.max_in_hop_id = 19;
sw->ports[7].config.max_out_hop_id = 19;
+ sw->ports[7].total_credits = 60;
+ sw->ports[7].ctl_credits = 2;
sw->ports[7].dual_link_port = &sw->ports[8];
sw->ports[8].config.type = TB_TYPE_PORT;
sw->ports[8].config.max_in_hop_id = 19;
sw->ports[8].config.max_out_hop_id = 19;
+ sw->ports[8].total_credits = 60;
+ sw->ports[8].ctl_credits = 2;
sw->ports[8].dual_link_port = &sw->ports[7];
sw->ports[8].link_nr = 1;
@@ -260,14 +303,18 @@ static struct tb_switch *alloc_dev_default(struct kunit *test,
if (port->dual_link_port && upstream_port->dual_link_port) {
port->dual_link_port->remote = upstream_port->dual_link_port;
upstream_port->dual_link_port->remote = port->dual_link_port;
- }
- if (bonded) {
- /* Bonding is used */
- port->bonded = true;
- port->dual_link_port->bonded = true;
- upstream_port->bonded = true;
- upstream_port->dual_link_port->bonded = true;
+ if (bonded) {
+ /* Bonding is used */
+ port->bonded = true;
+ port->total_credits *= 2;
+ port->dual_link_port->bonded = true;
+ port->dual_link_port->total_credits = 0;
+ upstream_port->bonded = true;
+ upstream_port->total_credits *= 2;
+ upstream_port->dual_link_port->bonded = true;
+ upstream_port->dual_link_port->total_credits = 0;
+ }
}
return sw;
@@ -294,6 +341,27 @@ static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
return sw;
}
+static struct tb_switch *alloc_dev_usb4(struct kunit *test,
+ struct tb_switch *parent,
+ u64 route, bool bonded)
+{
+ struct tb_switch *sw;
+
+ sw = alloc_dev_default(test, parent, route, bonded);
+ if (!sw)
+ return NULL;
+
+ sw->generation = 4;
+ sw->credit_allocation = true;
+ sw->max_usb3_credits = 14;
+ sw->min_dp_aux_credits = 1;
+ sw->min_dp_main_credits = 18;
+ sw->max_pcie_credits = 32;
+ sw->max_dma_credits = 14;
+
+ return sw;
+}
+
static void tb_test_path_basic(struct kunit *test)
{
struct tb_port *src_port, *dst_port, *p;
@@ -389,7 +457,7 @@ static void tb_test_path_single_hop_walk(struct kunit *test)
i++;
}
- KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
i = ARRAY_SIZE(test_data) - 1;
tb_for_each_port_on_path(dst_port, src_port, p) {
@@ -448,7 +516,7 @@ static void tb_test_path_daisy_chain_walk(struct kunit *test)
i++;
}
- KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
i = ARRAY_SIZE(test_data) - 1;
tb_for_each_port_on_path(dst_port, src_port, p) {
@@ -511,7 +579,7 @@ static void tb_test_path_simple_tree_walk(struct kunit *test)
i++;
}
- KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
i = ARRAY_SIZE(test_data) - 1;
tb_for_each_port_on_path(dst_port, src_port, p) {
@@ -595,7 +663,7 @@ static void tb_test_path_complex_tree_walk(struct kunit *test)
i++;
}
- KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
i = ARRAY_SIZE(test_data) - 1;
tb_for_each_port_on_path(dst_port, src_port, p) {
@@ -698,7 +766,7 @@ static void tb_test_path_max_length_walk(struct kunit *test)
i++;
}
- KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
i = ARRAY_SIZE(test_data) - 1;
tb_for_each_port_on_path(dst_port, src_port, p) {
@@ -780,7 +848,7 @@ static void tb_test_path_not_bonded_lane0(struct kunit *test)
path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
KUNIT_ASSERT_TRUE(test, path != NULL);
- KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+ KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
for (i = 0; i < ARRAY_SIZE(test_data); i++) {
const struct tb_port *in_port, *out_port;
@@ -842,7 +910,7 @@ static void tb_test_path_not_bonded_lane1(struct kunit *test)
path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
KUNIT_ASSERT_TRUE(test, path != NULL);
- KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+ KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
for (i = 0; i < ARRAY_SIZE(test_data); i++) {
const struct tb_port *in_port, *out_port;
@@ -922,7 +990,7 @@ static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
KUNIT_ASSERT_TRUE(test, path != NULL);
- KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+ KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
for (i = 0; i < ARRAY_SIZE(test_data); i++) {
const struct tb_port *in_port, *out_port;
@@ -1002,7 +1070,7 @@ static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
KUNIT_ASSERT_TRUE(test, path != NULL);
- KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+ KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
for (i = 0; i < ARRAY_SIZE(test_data); i++) {
const struct tb_port *in_port, *out_port;
@@ -1094,7 +1162,7 @@ static void tb_test_path_mixed_chain(struct kunit *test)
path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
KUNIT_ASSERT_TRUE(test, path != NULL);
- KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+ KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
for (i = 0; i < ARRAY_SIZE(test_data); i++) {
const struct tb_port *in_port, *out_port;
@@ -1186,7 +1254,7 @@ static void tb_test_path_mixed_chain_reverse(struct kunit *test)
path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
KUNIT_ASSERT_TRUE(test, path != NULL);
- KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+ KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
for (i = 0; i < ARRAY_SIZE(test_data); i++) {
const struct tb_port *in_port, *out_port;
@@ -1230,10 +1298,10 @@ static void tb_test_tunnel_pcie(struct kunit *test)
up = &dev1->ports[9];
tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
- KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
+ KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
- KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
+ KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
@@ -1245,10 +1313,10 @@ static void tb_test_tunnel_pcie(struct kunit *test)
up = &dev2->ports[9];
tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
- KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
+ KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
- KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
+ KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
@@ -1282,10 +1350,10 @@ static void tb_test_tunnel_dp(struct kunit *test)
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
- KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
+ KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
- KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
@@ -1328,10 +1396,10 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
- KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
+ KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
- KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
@@ -1378,10 +1446,10 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
- KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
+ KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
- KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
@@ -1443,10 +1511,10 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
- KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
+ KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
- KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
/* First hop */
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
@@ -1499,10 +1567,10 @@ static void tb_test_tunnel_usb3(struct kunit *test)
up = &dev1->ports[16];
tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
- KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
+ KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
- KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
+ KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
@@ -1514,10 +1582,10 @@ static void tb_test_tunnel_usb3(struct kunit *test)
up = &dev2->ports[16];
tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
- KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
+ KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
- KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
+ KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
@@ -1618,10 +1686,10 @@ static void tb_test_tunnel_dma(struct kunit *test)
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
- KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
+ KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
- KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
/* RX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
@@ -1661,10 +1729,10 @@ static void tb_test_tunnel_dma_rx(struct kunit *test)
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
- KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
+ KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
- KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)1);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
/* RX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
@@ -1698,10 +1766,10 @@ static void tb_test_tunnel_dma_tx(struct kunit *test)
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
- KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
+ KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
- KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)1);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
/* TX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
@@ -1744,10 +1812,10 @@ static void tb_test_tunnel_dma_chain(struct kunit *test)
port = &dev2->ports[3];
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
- KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
+ KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
- KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
/* RX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
@@ -1829,6 +1897,475 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
tb_tunnel_free(tunnel);
}
+static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
+{
+ struct tb_switch *host, *dev;
+ struct tb_port *up, *down;
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ host = alloc_host(test);
+ dev = alloc_dev_default(test, host, 0x1, false);
+
+ down = &host->ports[8];
+ up = &dev->ports[9];
+ tunnel = tb_tunnel_alloc_pci(NULL, up, down);
+ KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
+
+ path = tunnel->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
+
+ path = tunnel->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
+
+ tb_tunnel_free(tunnel);
+}
+
+static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
+{
+ struct tb_switch *host, *dev;
+ struct tb_port *up, *down;
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ host = alloc_host(test);
+ dev = alloc_dev_default(test, host, 0x1, true);
+
+ down = &host->ports[8];
+ up = &dev->ports[9];
+ tunnel = tb_tunnel_alloc_pci(NULL, up, down);
+ KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
+
+ path = tunnel->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
+
+ path = tunnel->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
+
+ tb_tunnel_free(tunnel);
+}
+
+static void tb_test_credit_alloc_pcie(struct kunit *test)
+{
+ struct tb_switch *host, *dev;
+ struct tb_port *up, *down;
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ host = alloc_host_usb4(test);
+ dev = alloc_dev_usb4(test, host, 0x1, true);
+
+ down = &host->ports[8];
+ up = &dev->ports[9];
+ tunnel = tb_tunnel_alloc_pci(NULL, up, down);
+ KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
+
+ path = tunnel->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
+
+ path = tunnel->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
+
+ tb_tunnel_free(tunnel);
+}
+
+static void tb_test_credit_alloc_dp(struct kunit *test)
+{
+ struct tb_switch *host, *dev;
+ struct tb_port *in, *out;
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ host = alloc_host_usb4(test);
+ dev = alloc_dev_usb4(test, host, 0x1, true);
+
+ in = &host->ports[5];
+ out = &dev->ports[14];
+
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+
+ /* Video (main) path */
+ path = tunnel->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
+
+ /* AUX TX */
+ path = tunnel->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+
+ /* AUX RX */
+ path = tunnel->paths[2];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+
+ tb_tunnel_free(tunnel);
+}
+
+static void tb_test_credit_alloc_usb3(struct kunit *test)
+{
+ struct tb_switch *host, *dev;
+ struct tb_port *up, *down;
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ host = alloc_host_usb4(test);
+ dev = alloc_dev_usb4(test, host, 0x1, true);
+
+ down = &host->ports[12];
+ up = &dev->ports[16];
+ tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
+ KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
+
+ path = tunnel->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
+
+ path = tunnel->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
+
+ tb_tunnel_free(tunnel);
+}
+
+static void tb_test_credit_alloc_dma(struct kunit *test)
+{
+ struct tb_switch *host, *dev;
+ struct tb_port *nhi, *port;
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ host = alloc_host_usb4(test);
+ dev = alloc_dev_usb4(test, host, 0x1, true);
+
+ nhi = &host->ports[7];
+ port = &dev->ports[3];
+
+ tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
+ KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
+
+ /* DMA RX */
+ path = tunnel->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
+
+ /* DMA TX */
+ path = tunnel->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
+
+ tb_tunnel_free(tunnel);
+}
+
+static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
+{
+ struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
+ struct tb_switch *host, *dev;
+ struct tb_port *nhi, *port;
+ struct tb_path *path;
+
+ host = alloc_host_usb4(test);
+ dev = alloc_dev_usb4(test, host, 0x1, true);
+
+ nhi = &host->ports[7];
+ port = &dev->ports[3];
+
+ /*
+ * Create three DMA tunnels through the same ports. With the
+ * default buffers we should be able to create two and the last
+ * one fails.
+ *
+ * For default host we have following buffers for DMA:
+ *
+ * 120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
+ *
+ * For device we have following:
+ *
+ * 120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
+ *
+ * spare = 14 + 1 = 15
+ *
+ * So on host the first tunnel gets 14 and the second gets the
+ * remaining 1 and then we run out of buffers.
+ */
+ tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
+ KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
+ KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
+
+ path = tunnel1->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
+
+ path = tunnel1->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
+
+ tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
+ KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
+ KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
+
+ path = tunnel2->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+
+ path = tunnel2->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+
+ tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
+ KUNIT_ASSERT_TRUE(test, tunnel3 == NULL);
+
+ /*
+ * Release the first DMA tunnel. That should make 14 buffers
+ * available for the next tunnel.
+ */
+ tb_tunnel_free(tunnel1);
+
+ tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
+ KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
+
+ path = tunnel3->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
+
+ path = tunnel3->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
+
+ tb_tunnel_free(tunnel3);
+ tb_tunnel_free(tunnel2);
+}
+
+static void tb_test_credit_alloc_all(struct kunit *test)
+{
+ struct tb_port *up, *down, *in, *out, *nhi, *port;
+ struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
+ struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
+ struct tb_switch *host, *dev;
+ struct tb_path *path;
+
+ /*
+ * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
+ * device. Expectation is that all these can be established with
+ * the default credit allocation found in Intel hardware.
+ */
+
+ host = alloc_host_usb4(test);
+ dev = alloc_dev_usb4(test, host, 0x1, true);
+
+ down = &host->ports[8];
+ up = &dev->ports[9];
+ pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
+ KUNIT_ASSERT_TRUE(test, pcie_tunnel != NULL);
+ KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
+
+ path = pcie_tunnel->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
+
+ path = pcie_tunnel->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
+
+ in = &host->ports[5];
+ out = &dev->ports[13];
+
+ dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ KUNIT_ASSERT_TRUE(test, dp_tunnel1 != NULL);
+ KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
+
+ path = dp_tunnel1->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
+
+ path = dp_tunnel1->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+
+ path = dp_tunnel1->paths[2];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+
+ in = &host->ports[6];
+ out = &dev->ports[14];
+
+ dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ KUNIT_ASSERT_TRUE(test, dp_tunnel2 != NULL);
+ KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
+
+ path = dp_tunnel2->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
+
+ path = dp_tunnel2->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+
+ path = dp_tunnel2->paths[2];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+
+ down = &host->ports[12];
+ up = &dev->ports[16];
+ usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
+ KUNIT_ASSERT_TRUE(test, usb3_tunnel != NULL);
+ KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
+
+ path = usb3_tunnel->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
+
+ path = usb3_tunnel->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
+
+ nhi = &host->ports[7];
+ port = &dev->ports[3];
+
+ dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
+ KUNIT_ASSERT_TRUE(test, dma_tunnel1 != NULL);
+ KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
+
+ path = dma_tunnel1->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
+
+ path = dma_tunnel1->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
+
+ dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
+ KUNIT_ASSERT_TRUE(test, dma_tunnel2 != NULL);
+ KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
+
+ path = dma_tunnel2->paths[0];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+
+ path = dma_tunnel2->paths[1];
+ KUNIT_ASSERT_EQ(test, path->path_length, 2);
+ KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+ KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
+
+ tb_tunnel_free(dma_tunnel2);
+ tb_tunnel_free(dma_tunnel1);
+ tb_tunnel_free(usb3_tunnel);
+ tb_tunnel_free(dp_tunnel2);
+ tb_tunnel_free(dp_tunnel1);
+ tb_tunnel_free(pcie_tunnel);
+}
+
static const u32 root_directory[] = {
0x55584401, /* "UXD" v1 */
0x00000018, /* Root directory length */
@@ -1906,7 +2443,7 @@ static void tb_test_property_parse(struct kunit *test)
p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_TRUE(test, p != NULL);
- KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0xa27);
+ KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
KUNIT_ASSERT_TRUE(test, p != NULL);
@@ -1914,7 +2451,7 @@ static void tb_test_property_parse(struct kunit *test)
p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_TRUE(test, p != NULL);
- KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0xa);
+ KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
KUNIT_ASSERT_TRUE(test, !p);
@@ -1927,19 +2464,19 @@ static void tb_test_property_parse(struct kunit *test)
p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_TRUE(test, p != NULL);
- KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x1);
+ KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_TRUE(test, p != NULL);
- KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x1);
+ KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_TRUE(test, p != NULL);
- KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x1);
+ KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_TRUE(test, p != NULL);
- KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x0);
+ KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
KUNIT_EXPECT_TRUE(test, !p);
@@ -1960,7 +2497,7 @@ static void tb_test_property_format(struct kunit *test)
KUNIT_ASSERT_TRUE(test, dir != NULL);
ret = tb_property_format_dir(dir, NULL, 0);
- KUNIT_ASSERT_EQ(test, ret, (int)ARRAY_SIZE(root_directory));
+ KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
block_len = ret;
@@ -2063,7 +2600,7 @@ static void tb_test_property_copy(struct kunit *test)
/* Compare the resulting property block */
ret = tb_property_format_dir(dst, NULL, 0);
- KUNIT_ASSERT_EQ(test, ret, (int)ARRAY_SIZE(root_directory));
+ KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, block != NULL);
@@ -2105,6 +2642,14 @@ static struct kunit_case tb_test_cases[] = {
KUNIT_CASE(tb_test_tunnel_dma_tx),
KUNIT_CASE(tb_test_tunnel_dma_chain),
KUNIT_CASE(tb_test_tunnel_dma_match),
+ KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
+ KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
+ KUNIT_CASE(tb_test_credit_alloc_pcie),
+ KUNIT_CASE(tb_test_credit_alloc_dp),
+ KUNIT_CASE(tb_test_credit_alloc_usb3),
+ KUNIT_CASE(tb_test_credit_alloc_dma),
+ KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
+ KUNIT_CASE(tb_test_credit_alloc_all),
KUNIT_CASE(tb_test_property_parse),
KUNIT_CASE(tb_test_property_format),
KUNIT_CASE(tb_test_property_copy),
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index e1979bed7146..bb5cc480fc9a 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -34,6 +34,16 @@
#define TB_DP_AUX_PATH_OUT 1
#define TB_DP_AUX_PATH_IN 2
+/* Minimum number of credits needed for PCIe path */
+#define TB_MIN_PCIE_CREDITS 6U
+/*
+ * Number of credits we try to allocate for each DMA path if not limited
+ * by the host router baMaxHI.
+ */
+#define TB_DMA_CREDITS 14U
+/* Minimum number of credits for DMA path */
+#define TB_MIN_DMA_CREDITS 1U
+
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
@@ -57,6 +67,55 @@ static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
#define tb_tunnel_dbg(tunnel, fmt, arg...) \
__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
+static inline unsigned int tb_usable_credits(const struct tb_port *port)
+{
+ return port->total_credits - port->ctl_credits;
+}
+
+/**
+ * tb_available_credits() - Available credits for PCIe and DMA
+ * @port: Lane adapter to check
+ * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
+ * streams possible through this lane adapter
+ */
+static unsigned int tb_available_credits(const struct tb_port *port,
+ size_t *max_dp_streams)
+{
+ const struct tb_switch *sw = port->sw;
+ int credits, usb3, pcie, spare;
+ size_t ndp;
+
+ usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
+ pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
+
+ if (tb_acpi_is_xdomain_allowed()) {
+ spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
+ /* Add some credits for potential second DMA tunnel */
+ spare += TB_MIN_DMA_CREDITS;
+ } else {
+ spare = 0;
+ }
+
+ credits = tb_usable_credits(port);
+ if (tb_acpi_may_tunnel_dp()) {
+ /*
+ * Maximum number of DP streams possible through the
+ * lane adapter.
+ */
+ ndp = (credits - (usb3 + pcie + spare)) /
+ (sw->min_dp_aux_credits + sw->min_dp_main_credits);
+ } else {
+ ndp = 0;
+ }
+ credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
+ credits -= usb3;
+
+ if (max_dp_streams)
+ *max_dp_streams = ndp;
+
+ return credits > 0 ? credits : 0;
+}
+
static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
enum tb_tunnel_type type)
{
@@ -94,24 +153,37 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
return 0;
}
-static int tb_initial_credits(const struct tb_switch *sw)
+static int tb_pci_init_credits(struct tb_path_hop *hop)
{
- /* If the path is complete sw is not NULL */
- if (sw) {
- /* More credits for faster link */
- switch (sw->link_speed * sw->link_width) {
- case 40:
- return 32;
- case 20:
- return 24;
- }
+ struct tb_port *port = hop->in_port;
+ struct tb_switch *sw = port->sw;
+ unsigned int credits;
+
+ if (tb_port_use_credit_allocation(port)) {
+ unsigned int available;
+
+ available = tb_available_credits(port, NULL);
+ credits = min(sw->max_pcie_credits, available);
+
+ if (credits < TB_MIN_PCIE_CREDITS)
+ return -ENOSPC;
+
+ credits = max(TB_MIN_PCIE_CREDITS, credits);
+ } else {
+ if (tb_port_is_null(port))
+ credits = port->bonded ? 32 : 16;
+ else
+ credits = 7;
}
- return 16;
+ hop->initial_credits = credits;
+ return 0;
}
-static void tb_pci_init_path(struct tb_path *path)
+static int tb_pci_init_path(struct tb_path *path)
{
+ struct tb_path_hop *hop;
+
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
@@ -119,11 +191,16 @@ static void tb_pci_init_path(struct tb_path *path)
path->priority = 3;
path->weight = 1;
path->drop_packages = 0;
- path->nfc_credits = 0;
- path->hops[0].initial_credits = 7;
- if (path->path_length > 1)
- path->hops[1].initial_credits =
- tb_initial_credits(path->hops[1].in_port->sw);
+
+ tb_path_for_each_hop(path, hop) {
+ int ret;
+
+ ret = tb_pci_init_credits(hop);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
/**
@@ -163,14 +240,16 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
goto err_free;
}
tunnel->paths[TB_PCI_PATH_UP] = path;
- tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
+ if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
+ goto err_free;
path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
"PCIe Down");
if (!path)
goto err_deactivate;
tunnel->paths[TB_PCI_PATH_DOWN] = path;
- tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
+ if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
+ goto err_deactivate;
/* Validate that the tunnel is complete */
if (!tb_port_is_pcie_up(tunnel->dst_port)) {
@@ -228,23 +307,25 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
"PCIe Down");
- if (!path) {
- tb_tunnel_free(tunnel);
- return NULL;
- }
- tb_pci_init_path(path);
+ if (!path)
+ goto err_free;
tunnel->paths[TB_PCI_PATH_DOWN] = path;
+ if (tb_pci_init_path(path))
+ goto err_free;
path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
"PCIe Up");
- if (!path) {
- tb_tunnel_free(tunnel);
- return NULL;
- }
- tb_pci_init_path(path);
+ if (!path)
+ goto err_free;
tunnel->paths[TB_PCI_PATH_UP] = path;
+ if (tb_pci_init_path(path))
+ goto err_free;
return tunnel;
+
+err_free:
+ tb_tunnel_free(tunnel);
+ return NULL;
}
static bool tb_dp_is_usb4(const struct tb_switch *sw)
@@ -599,9 +680,20 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
return 0;
}
+static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
+{
+ struct tb_port *port = hop->in_port;
+ struct tb_switch *sw = port->sw;
+
+ if (tb_port_use_credit_allocation(port))
+ hop->initial_credits = sw->min_dp_aux_credits;
+ else
+ hop->initial_credits = 1;
+}
+
static void tb_dp_init_aux_path(struct tb_path *path)
{
- int i;
+ struct tb_path_hop *hop;
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
path->egress_shared_buffer = TB_PATH_NONE;
@@ -610,13 +702,42 @@ static void tb_dp_init_aux_path(struct tb_path *path)
path->priority = 2;
path->weight = 1;
- for (i = 0; i < path->path_length; i++)
- path->hops[i].initial_credits = 1;
+ tb_path_for_each_hop(path, hop)
+ tb_dp_init_aux_credits(hop);
}
-static void tb_dp_init_video_path(struct tb_path *path, bool discover)
+static int tb_dp_init_video_credits(struct tb_path_hop *hop)
{
- u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
+ struct tb_port *port = hop->in_port;
+ struct tb_switch *sw = port->sw;
+
+ if (tb_port_use_credit_allocation(port)) {
+ unsigned int nfc_credits;
+ size_t max_dp_streams;
+
+ tb_available_credits(port, &max_dp_streams);
+ /*
+ * Read the number of currently allocated NFC credits
+ * from the lane adapter. Since we only use them for DP
+ * tunneling we can use that to figure out how many DP
+ * tunnels already go through the lane adapter.
+ */
+ nfc_credits = port->config.nfc_credits &
+ ADP_CS_4_NFC_BUFFERS_MASK;
+ if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
+ return -ENOSPC;
+
+ hop->nfc_credits = sw->min_dp_main_credits;
+ } else {
+ hop->nfc_credits = min(port->total_credits - 2, 12U);
+ }
+
+ return 0;
+}
+
+static int tb_dp_init_video_path(struct tb_path *path)
+{
+ struct tb_path_hop *hop;
path->egress_fc_enable = TB_PATH_NONE;
path->egress_shared_buffer = TB_PATH_NONE;
@@ -625,16 +746,15 @@ static void tb_dp_init_video_path(struct tb_path *path, bool discover)
path->priority = 1;
path->weight = 1;
- if (discover) {
- path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
- } else {
- u32 max_credits;
+ tb_path_for_each_hop(path, hop) {
+ int ret;
- max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
- ADP_CS_4_TOTAL_BUFFERS_SHIFT;
- /* Leave some credits for AUX path */
- path->nfc_credits = min(max_credits - 2, 12U);
+ ret = tb_dp_init_video_credits(hop);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
/**
@@ -674,7 +794,8 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
goto err_free;
}
tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
- tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
+ if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
+ goto err_free;
path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
if (!path)
@@ -761,7 +882,7 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1, "Video");
if (!path)
goto err_free;
- tb_dp_init_video_path(path, false);
+ tb_dp_init_video_path(path);
paths[TB_DP_VIDEO_PATH_OUT] = path;
path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
@@ -785,20 +906,92 @@ err_free:
return NULL;
}
-static u32 tb_dma_credits(struct tb_port *nhi)
+static unsigned int tb_dma_available_credits(const struct tb_port *port)
{
- u32 max_credits;
+ const struct tb_switch *sw = port->sw;
+ int credits;
- max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
- ADP_CS_4_TOTAL_BUFFERS_SHIFT;
- return min(max_credits, 13U);
+ credits = tb_available_credits(port, NULL);
+ if (tb_acpi_may_tunnel_pcie())
+ credits -= sw->max_pcie_credits;
+ credits -= port->dma_credits;
+
+ return credits > 0 ? credits : 0;
}
-static void tb_dma_init_path(struct tb_path *path, unsigned int efc, u32 credits)
+static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
{
- int i;
+ struct tb_port *port = hop->in_port;
- path->egress_fc_enable = efc;
+ if (tb_port_use_credit_allocation(port)) {
+ unsigned int available = tb_dma_available_credits(port);
+
+ /*
+ * Need to have at least TB_MIN_DMA_CREDITS, otherwise
+ * DMA path cannot be established.
+ */
+ if (available < TB_MIN_DMA_CREDITS)
+ return -ENOSPC;
+
+ while (credits > available)
+ credits--;
+
+ tb_port_dbg(port, "reserving %u credits for DMA path\n",
+ credits);
+
+ port->dma_credits += credits;
+ } else {
+ if (tb_port_is_null(port))
+ credits = port->bonded ? 14 : 6;
+ else
+ credits = min(port->total_credits, credits);
+ }
+
+ hop->initial_credits = credits;
+ return 0;
+}
+
+/* Path from lane adapter to NHI */
+static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
+{
+ struct tb_path_hop *hop;
+ unsigned int i, tmp;
+
+ path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+ path->priority = 5;
+ path->weight = 1;
+ path->clear_fc = true;
+
+ /*
+ * First lane adapter is the one connected to the remote host.
+ * We don't tunnel other traffic over this link so can use all
+ * the credits (except the ones reserved for control traffic).
+ */
+ hop = &path->hops[0];
+ tmp = min(tb_usable_credits(hop->in_port), credits);
+ hop->initial_credits = tmp;
+ hop->in_port->dma_credits += tmp;
+
+ for (i = 1; i < path->path_length; i++) {
+ int ret;
+
+ ret = tb_dma_reserve_credits(&path->hops[i], credits);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Path from NHI to lane adapter */
+static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
+{
+ struct tb_path_hop *hop;
+
+ path->egress_fc_enable = TB_PATH_ALL;
path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
@@ -806,8 +999,46 @@ static void tb_dma_init_path(struct tb_path *path, unsigned int efc, u32 credits
path->weight = 1;
path->clear_fc = true;
- for (i = 0; i < path->path_length; i++)
- path->hops[i].initial_credits = credits;
+ tb_path_for_each_hop(path, hop) {
+ int ret;
+
+ ret = tb_dma_reserve_credits(hop, credits);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void tb_dma_release_credits(struct tb_path_hop *hop)
+{
+ struct tb_port *port = hop->in_port;
+
+ if (tb_port_use_credit_allocation(port)) {
+ port->dma_credits -= hop->initial_credits;
+
+ tb_port_dbg(port, "released %u DMA path credits\n",
+ hop->initial_credits);
+ }
+}
+
+static void tb_dma_deinit_path(struct tb_path *path)
+{
+ struct tb_path_hop *hop;
+
+ tb_path_for_each_hop(path, hop)
+ tb_dma_release_credits(hop);
+}
+
+static void tb_dma_deinit(struct tb_tunnel *tunnel)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (!tunnel->paths[i])
+ continue;
+ tb_dma_deinit_path(tunnel->paths[i]);
+ }
}
/**
@@ -832,7 +1063,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_tunnel *tunnel;
size_t npaths = 0, i = 0;
struct tb_path *path;
- u32 credits;
+ int credits;
if (receive_ring > 0)
npaths++;
@@ -848,32 +1079,39 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
tunnel->src_port = nhi;
tunnel->dst_port = dst;
+ tunnel->deinit = tb_dma_deinit;
- credits = tb_dma_credits(nhi);
+ credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
if (receive_ring > 0) {
path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
"DMA RX");
- if (!path) {
- tb_tunnel_free(tunnel);
- return NULL;
- }
- tb_dma_init_path(path, TB_PATH_SOURCE | TB_PATH_INTERNAL, credits);
+ if (!path)
+ goto err_free;
tunnel->paths[i++] = path;
+ if (tb_dma_init_rx_path(path, credits)) {
+ tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
+ goto err_free;
+ }
}
if (transmit_ring > 0) {
path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
"DMA TX");
- if (!path) {
- tb_tunnel_free(tunnel);
- return NULL;
- }
- tb_dma_init_path(path, TB_PATH_ALL, credits);
+ if (!path)
+ goto err_free;
tunnel->paths[i++] = path;
+ if (tb_dma_init_tx_path(path, credits)) {
+ tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
+ goto err_free;
+ }
}
return tunnel;
+
+err_free:
+ tb_tunnel_free(tunnel);
+ return NULL;
}
/**
@@ -1067,8 +1305,28 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
tunnel->allocated_up, tunnel->allocated_down);
}
+static void tb_usb3_init_credits(struct tb_path_hop *hop)
+{
+ struct tb_port *port = hop->in_port;
+ struct tb_switch *sw = port->sw;
+ unsigned int credits;
+
+ if (tb_port_use_credit_allocation(port)) {
+ credits = sw->max_usb3_credits;
+ } else {
+ if (tb_port_is_null(port))
+ credits = port->bonded ? 32 : 16;
+ else
+ credits = 7;
+ }
+
+ hop->initial_credits = credits;
+}
+
static void tb_usb3_init_path(struct tb_path *path)
{
+ struct tb_path_hop *hop;
+
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
@@ -1076,11 +1334,9 @@ static void tb_usb3_init_path(struct tb_path *path)
path->priority = 3;
path->weight = 3;
path->drop_packages = 0;
- path->nfc_credits = 0;
- path->hops[0].initial_credits = 7;
- if (path->path_length > 1)
- path->hops[1].initial_credits =
- tb_initial_credits(path->hops[1].in_port->sw);
+
+ tb_path_for_each_hop(path, hop)
+ tb_usb3_init_credits(hop);
}
/**
@@ -1280,6 +1536,9 @@ void tb_tunnel_free(struct tb_tunnel *tunnel)
if (!tunnel)
return;
+ if (tunnel->deinit)
+ tunnel->deinit(tunnel);
+
for (i = 0; i < tunnel->npaths; i++) {
if (tunnel->paths[i])
tb_path_free(tunnel->paths[i]);
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index a66994fb4e60..eea14e24f7e0 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -27,6 +27,7 @@ enum tb_tunnel_type {
* @paths: All paths required by the tunnel
* @npaths: Number of paths in @paths
* @init: Optional tunnel specific initialization
+ * @deinit: Optional tunnel specific de-initialization
* @activate: Optional tunnel specific activation/deactivation
* @consumed_bandwidth: Return how much bandwidth the tunnel consumes
* @release_unused_bandwidth: Release all unused bandwidth
@@ -47,6 +48,7 @@ struct tb_tunnel {
struct tb_path **paths;
size_t npaths;
int (*init)(struct tb_tunnel *tunnel);
+ void (*deinit)(struct tb_tunnel *tunnel);
int (*activate)(struct tb_tunnel *tunnel, bool activate);
int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down);
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 671d72af8ba1..ceddbe7e9f93 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -13,7 +13,6 @@
#include "sb_regs.h"
#include "tb.h"
-#define USB4_DATA_DWORDS 16
#define USB4_DATA_RETRIES 3
enum usb4_sb_target {
@@ -37,8 +36,19 @@ enum usb4_sb_target {
#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
-typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
-typedef int (*write_block_fn)(void *, const void *, size_t);
+#define USB4_BA_LENGTH_MASK GENMASK(7, 0)
+#define USB4_BA_INDEX_MASK GENMASK(15, 0)
+
+enum usb4_ba_index {
+ USB4_BA_MAX_USB3 = 0x1,
+ USB4_BA_MIN_DP_AUX = 0x2,
+ USB4_BA_MIN_DP_MAIN = 0x3,
+ USB4_BA_MAX_PCIE = 0x4,
+ USB4_BA_MAX_HI = 0x5,
+};
+
+#define USB4_BA_VALUE_MASK GENMASK(31, 16)
+#define USB4_BA_VALUE_SHIFT 16
static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
u32 value, int timeout_msec)
@@ -62,76 +72,6 @@ static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
return -ETIMEDOUT;
}
-static int usb4_do_read_data(u16 address, void *buf, size_t size,
- read_block_fn read_block, void *read_block_data)
-{
- unsigned int retries = USB4_DATA_RETRIES;
- unsigned int offset;
-
- do {
- unsigned int dwaddress, dwords;
- u8 data[USB4_DATA_DWORDS * 4];
- size_t nbytes;
- int ret;
-
- offset = address & 3;
- nbytes = min_t(size_t, size + offset, USB4_DATA_DWORDS * 4);
-
- dwaddress = address / 4;
- dwords = ALIGN(nbytes, 4) / 4;
-
- ret = read_block(read_block_data, dwaddress, data, dwords);
- if (ret) {
- if (ret != -ENODEV && retries--)
- continue;
- return ret;
- }
-
- nbytes -= offset;
- memcpy(buf, data + offset, nbytes);
-
- size -= nbytes;
- address += nbytes;
- buf += nbytes;
- } while (size > 0);
-
- return 0;
-}
-
-static int usb4_do_write_data(unsigned int address, const void *buf, size_t size,
- write_block_fn write_next_block, void *write_block_data)
-{
- unsigned int retries = USB4_DATA_RETRIES;
- unsigned int offset;
-
- offset = address & 3;
- address = address & ~3;
-
- do {
- u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
- u8 data[USB4_DATA_DWORDS * 4];
- int ret;
-
- memcpy(data + offset, buf, nbytes);
-
- ret = write_next_block(write_block_data, data, nbytes / 4);
- if (ret) {
- if (ret == -ETIMEDOUT) {
- if (retries--)
- continue;
- ret = -EIO;
- }
- return ret;
- }
-
- size -= nbytes;
- address += nbytes;
- buf += nbytes;
- } while (size > 0);
-
- return 0;
-}
-
static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
u32 *metadata, u8 *status,
const void *tx_data, size_t tx_dwords,
@@ -193,7 +133,7 @@ static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
{
const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
- if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS)
+ if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS)
return -EINVAL;
/*
@@ -320,7 +260,7 @@ int usb4_switch_setup(struct tb_switch *sw)
parent = tb_switch_parent(sw);
downstream_port = tb_port_at(tb_route(sw), parent);
sw->link_usb4 = link_is_usb4(downstream_port);
- tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT3");
+ tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
xhci = val & ROUTER_CS_6_HCI;
tbt3 = !(val & ROUTER_CS_6_TNS);
@@ -414,8 +354,8 @@ static int usb4_switch_drom_read_block(void *data,
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size)
{
- return usb4_do_read_data(address, buf, size,
- usb4_switch_drom_read_block, sw);
+ return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
+ usb4_switch_drom_read_block, sw);
}
/**
@@ -473,12 +413,18 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
- if (flags & TB_WAKE_ON_CONNECT)
- val |= PORT_CS_19_WOC;
- if (flags & TB_WAKE_ON_DISCONNECT)
- val |= PORT_CS_19_WOD;
- if (flags & TB_WAKE_ON_USB4)
+ if (tb_is_upstream_port(port)) {
val |= PORT_CS_19_WOU4;
+ } else {
+ bool configured = val & PORT_CS_19_PC;
+
+ if ((flags & TB_WAKE_ON_CONNECT) && !configured)
+ val |= PORT_CS_19_WOC;
+ if ((flags & TB_WAKE_ON_DISCONNECT) && configured)
+ val |= PORT_CS_19_WOD;
+ if ((flags & TB_WAKE_ON_USB4) && configured)
+ val |= PORT_CS_19_WOU4;
+ }
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
@@ -487,7 +433,7 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
}
/*
- * Enable wakes from PCIe and USB 3.x on this router. Only
+ * Enable wakes from PCIe, USB 3.x and DP on this router. Only
* needed for device routers.
*/
if (route) {
@@ -495,11 +441,13 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
if (ret)
return ret;
- val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU);
+ val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD);
if (flags & TB_WAKE_ON_USB3)
val |= ROUTER_CS_5_WOU;
if (flags & TB_WAKE_ON_PCIE)
val |= ROUTER_CS_5_WOP;
+ if (flags & TB_WAKE_ON_DP)
+ val |= ROUTER_CS_5_WOD;
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
@@ -595,12 +543,21 @@ static int usb4_switch_nvm_read_block(void *data,
int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size)
{
- return usb4_do_read_data(address, buf, size,
- usb4_switch_nvm_read_block, sw);
+ return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
+ usb4_switch_nvm_read_block, sw);
}
-static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
- unsigned int address)
+/**
+ * usb4_switch_nvm_set_offset() - Set NVM write offset
+ * @sw: USB4 router
+ * @address: Start offset
+ *
+ * Explicitly sets NVM write offset. Normally when writing to NVM this
+ * is done automatically by usb4_switch_nvm_write().
+ *
+ * Returns %0 in success and negative errno if there was a failure.
+ */
+int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
{
u32 metadata, dwaddress;
u8 status = 0;
@@ -618,8 +575,8 @@ static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
return status ? -EIO : 0;
}
-static int usb4_switch_nvm_write_next_block(void *data, const void *buf,
- size_t dwords)
+static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
+ const void *buf, size_t dwords)
{
struct tb_switch *sw = data;
u8 status;
@@ -652,8 +609,8 @@ int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
if (ret)
return ret;
- return usb4_do_write_data(address, buf, size,
- usb4_switch_nvm_write_next_block, sw);
+ return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
+ usb4_switch_nvm_write_next_block, sw);
}
/**
@@ -736,6 +693,147 @@ int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
}
/**
+ * usb4_switch_credits_init() - Read buffer allocation parameters
+ * @sw: USB4 router
+ *
+ * Reads @sw buffer allocation parameters and initializes @sw buffer
+ * allocation fields accordingly. Specifically @sw->credits_allocation
+ * is set to %true if these parameters can be used in tunneling.
+ *
+ * Returns %0 on success and negative errno otherwise.
+ */
+int usb4_switch_credits_init(struct tb_switch *sw)
+{
+ int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
+ int ret, length, i, nports;
+ const struct tb_port *port;
+ u32 data[NVM_DATA_DWORDS];
+ u32 metadata = 0;
+ u8 status = 0;
+
+ memset(data, 0, sizeof(data));
+ ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata,
+ &status, NULL, 0, data, ARRAY_SIZE(data));
+ if (ret)
+ return ret;
+ if (status)
+ return -EIO;
+
+ length = metadata & USB4_BA_LENGTH_MASK;
+ if (WARN_ON(length > ARRAY_SIZE(data)))
+ return -EMSGSIZE;
+
+ max_usb3 = -1;
+ min_dp_aux = -1;
+ min_dp_main = -1;
+ max_pcie = -1;
+ max_dma = -1;
+
+ tb_sw_dbg(sw, "credit allocation parameters:\n");
+
+ for (i = 0; i < length; i++) {
+ u16 index, value;
+
+ index = data[i] & USB4_BA_INDEX_MASK;
+ value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT;
+
+ switch (index) {
+ case USB4_BA_MAX_USB3:
+ tb_sw_dbg(sw, " USB3: %u\n", value);
+ max_usb3 = value;
+ break;
+ case USB4_BA_MIN_DP_AUX:
+ tb_sw_dbg(sw, " DP AUX: %u\n", value);
+ min_dp_aux = value;
+ break;
+ case USB4_BA_MIN_DP_MAIN:
+ tb_sw_dbg(sw, " DP main: %u\n", value);
+ min_dp_main = value;
+ break;
+ case USB4_BA_MAX_PCIE:
+ tb_sw_dbg(sw, " PCIe: %u\n", value);
+ max_pcie = value;
+ break;
+ case USB4_BA_MAX_HI:
+ tb_sw_dbg(sw, " DMA: %u\n", value);
+ max_dma = value;
+ break;
+ default:
+ tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n",
+ index);
+ break;
+ }
+ }
+
+ /*
+ * Validate the buffer allocation preferences. If we find
+ * issues, log a warning and fall back using the hard-coded
+ * values.
+ */
+
+ /* Host router must report baMaxHI */
+ if (!tb_route(sw) && max_dma < 0) {
+ tb_sw_warn(sw, "host router is missing baMaxHI\n");
+ goto err_invalid;
+ }
+
+ nports = 0;
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_is_null(port))
+ nports++;
+ }
+
+ /* Must have DP buffer allocation (multiple USB4 ports) */
+ if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) {
+ tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n");
+ goto err_invalid;
+ }
+
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_is_dpout(port) && min_dp_main < 0) {
+ tb_sw_warn(sw, "missing baMinDPmain");
+ goto err_invalid;
+ }
+ if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) &&
+ min_dp_aux < 0) {
+ tb_sw_warn(sw, "missing baMinDPaux");
+ goto err_invalid;
+ }
+ if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) &&
+ max_usb3 < 0) {
+ tb_sw_warn(sw, "missing baMaxUSB3");
+ goto err_invalid;
+ }
+ if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) &&
+ max_pcie < 0) {
+ tb_sw_warn(sw, "missing baMaxPCIe");
+ goto err_invalid;
+ }
+ }
+
+ /*
+ * Buffer allocation passed the validation so we can use it in
+ * path creation.
+ */
+ sw->credit_allocation = true;
+ if (max_usb3 > 0)
+ sw->max_usb3_credits = max_usb3;
+ if (min_dp_aux > 0)
+ sw->min_dp_aux_credits = min_dp_aux;
+ if (min_dp_main > 0)
+ sw->min_dp_main_credits = min_dp_main;
+ if (max_pcie > 0)
+ sw->max_pcie_credits = max_pcie;
+ if (max_dma > 0)
+ sw->max_dma_credits = max_dma;
+
+ return 0;
+
+err_invalid:
+ return -EINVAL;
+}
+
+/**
* usb4_switch_query_dp_resource() - Query availability of DP IN resource
* @sw: USB4 router
* @in: DP IN adapter
@@ -897,6 +995,60 @@ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
}
/**
+ * usb4_switch_add_ports() - Add USB4 ports for this router
+ * @sw: USB4 router
+ *
+ * For USB4 router finds all USB4 ports and registers devices for each.
+ * Can be called to any router.
+ *
+ * Return %0 in case of success and negative errno in case of failure.
+ */
+int usb4_switch_add_ports(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw))
+ return 0;
+
+ tb_switch_for_each_port(sw, port) {
+ struct usb4_port *usb4;
+
+ if (!tb_port_is_null(port))
+ continue;
+ if (!port->cap_usb4)
+ continue;
+
+ usb4 = usb4_port_device_add(port);
+ if (IS_ERR(usb4)) {
+ usb4_switch_remove_ports(sw);
+ return PTR_ERR(usb4);
+ }
+
+ port->usb4 = usb4;
+ }
+
+ return 0;
+}
+
+/**
+ * usb4_switch_remove_ports() - Removes USB4 ports from this router
+ * @sw: USB4 router
+ *
+ * Unregisters previously registered USB4 ports.
+ */
+void usb4_switch_remove_ports(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (port->usb4) {
+ usb4_port_device_remove(port->usb4);
+ port->usb4 = NULL;
+ }
+ }
+}
+
+/**
* usb4_port_unlock() - Unlock USB4 downstream port
* @port: USB4 port to unlock
*
@@ -1029,7 +1181,7 @@ static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
{
- if (dwords > USB4_DATA_DWORDS)
+ if (dwords > NVM_DATA_DWORDS)
return -EINVAL;
return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
@@ -1039,7 +1191,7 @@ static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
static int usb4_port_write_data(struct tb_port *port, const void *data,
size_t dwords)
{
- if (dwords > USB4_DATA_DWORDS)
+ if (dwords > NVM_DATA_DWORDS)
return -EINVAL;
return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
@@ -1175,6 +1327,48 @@ static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
return -ETIMEDOUT;
}
+static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
+{
+ u32 val = !offline;
+ int ret;
+
+ ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_METADATA, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ val = USB4_SB_OPCODE_ROUTER_OFFLINE;
+ return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_OPCODE, &val, sizeof(val));
+}
+
+/**
+ * usb4_port_router_offline() - Put the USB4 port to offline mode
+ * @port: USB4 port
+ *
+ * This function puts the USB4 port into offline mode. In this mode the
+ * port does not react on hotplug events anymore. This needs to be
+ * called before retimer access is done when the USB4 links is not up.
+ *
+ * Returns %0 in case of success and negative errno if there was an
+ * error.
+ */
+int usb4_port_router_offline(struct tb_port *port)
+{
+ return usb4_port_set_router_offline(port, true);
+}
+
+/**
+ * usb4_port_router_online() - Put the USB4 port back to online
+ * @port: USB4 port
+ *
+ * Makes the USB4 port functional again.
+ */
+int usb4_port_router_online(struct tb_port *port)
+{
+ return usb4_port_set_router_offline(port, false);
+}
+
/**
* usb4_port_enumerate_retimers() - Send RT broadcast transaction
* @port: USB4 port
@@ -1201,6 +1395,33 @@ static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
}
/**
+ * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions
+ * @port: USB4 port
+ * @index: Retimer index
+ *
+ * Enables sideband channel transations on SBTX. Can be used when USB4
+ * link does not go up, for example if there is no device connected.
+ */
+int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
+{
+ int ret;
+
+ ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
+ 500);
+
+ if (ret != -ENODEV)
+ return ret;
+
+ /*
+ * Per the USB4 retimer spec, the retimer is not required to
+ * send an RT (Retimer Transaction) response for the first
+ * SET_INBOUND_SBTX command
+ */
+ return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
+ 500);
+}
+
+/**
* usb4_port_retimer_read() - Read from retimer sideband registers
* @port: USB4 port
* @index: Retimer index
@@ -1292,8 +1513,19 @@ int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
}
-static int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
- unsigned int address)
+/**
+ * usb4_port_retimer_nvm_set_offset() - Set NVM write offset
+ * @port: USB4 port
+ * @index: Retimer index
+ * @address: Start offset
+ *
+ * Exlicitly sets NVM write offset. Normally when writing to NVM this is
+ * done automatically by usb4_port_retimer_nvm_write().
+ *
+ * Returns %0 in success and negative errno if there was a failure.
+ */
+int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
+ unsigned int address)
{
u32 metadata, dwaddress;
int ret;
@@ -1316,8 +1548,8 @@ struct retimer_info {
u8 index;
};
-static int usb4_port_retimer_nvm_write_next_block(void *data, const void *buf,
- size_t dwords)
+static int usb4_port_retimer_nvm_write_next_block(void *data,
+ unsigned int dwaddress, const void *buf, size_t dwords)
{
const struct retimer_info *info = data;
@@ -1357,8 +1589,8 @@ int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int add
if (ret)
return ret;
- return usb4_do_write_data(address, buf, size,
- usb4_port_retimer_nvm_write_next_block, &info);
+ return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
+ usb4_port_retimer_nvm_write_next_block, &info);
}
/**
@@ -1442,7 +1674,7 @@ static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
int ret;
metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
- if (dwords < USB4_DATA_DWORDS)
+ if (dwords < NVM_DATA_DWORDS)
metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
@@ -1475,8 +1707,8 @@ int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
{
struct retimer_info info = { .port = port, .index = index };
- return usb4_do_read_data(address, buf, size,
- usb4_port_retimer_nvm_read_block, &info);
+ return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
+ usb4_port_retimer_nvm_read_block, &info);
}
/**
diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c
new file mode 100644
index 000000000000..29e2a4f9c9f5
--- /dev/null
+++ b/drivers/thunderbolt/usb4_port.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB4 port device
+ *
+ * Copyright (C) 2021, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "tb.h"
+
+static ssize_t link_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb4_port *usb4 = tb_to_usb4_port_device(dev);
+ struct tb_port *port = usb4->port;
+ struct tb *tb = port->sw->tb;
+ const char *link;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ if (tb_is_upstream_port(port))
+ link = port->sw->link_usb4 ? "usb4" : "tbt";
+ else if (tb_port_has_remote(port))
+ link = port->remote->sw->link_usb4 ? "usb4" : "tbt";
+ else
+ link = "none";
+
+ mutex_unlock(&tb->lock);
+
+ return sysfs_emit(buf, "%s\n", link);
+}
+static DEVICE_ATTR_RO(link);
+
+static struct attribute *common_attrs[] = {
+ &dev_attr_link.attr,
+ NULL
+};
+
+static const struct attribute_group common_group = {
+ .attrs = common_attrs,
+};
+
+static int usb4_port_offline(struct usb4_port *usb4)
+{
+ struct tb_port *port = usb4->port;
+ int ret;
+
+ ret = tb_acpi_power_on_retimers(port);
+ if (ret)
+ return ret;
+
+ ret = usb4_port_router_offline(port);
+ if (ret) {
+ tb_acpi_power_off_retimers(port);
+ return ret;
+ }
+
+ ret = tb_retimer_scan(port, false);
+ if (ret) {
+ usb4_port_router_online(port);
+ tb_acpi_power_off_retimers(port);
+ }
+
+ return ret;
+}
+
+static void usb4_port_online(struct usb4_port *usb4)
+{
+ struct tb_port *port = usb4->port;
+
+ usb4_port_router_online(port);
+ tb_acpi_power_off_retimers(port);
+}
+
+static ssize_t offline_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb4_port *usb4 = tb_to_usb4_port_device(dev);
+
+ return sysfs_emit(buf, "%d\n", usb4->offline);
+}
+
+static ssize_t offline_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb4_port *usb4 = tb_to_usb4_port_device(dev);
+ struct tb_port *port = usb4->port;
+ struct tb *tb = port->sw->tb;
+ bool val;
+ int ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(&usb4->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm;
+ }
+
+ if (val == usb4->offline)
+ goto out_unlock;
+
+ /* Offline mode works only for ports that are not connected */
+ if (tb_port_has_remote(port)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ if (val) {
+ ret = usb4_port_offline(usb4);
+ if (ret)
+ goto out_unlock;
+ } else {
+ usb4_port_online(usb4);
+ tb_retimer_remove_all(port);
+ }
+
+ usb4->offline = val;
+ tb_port_dbg(port, "%s offline mode\n", val ? "enter" : "exit");
+
+out_unlock:
+ mutex_unlock(&tb->lock);
+out_rpm:
+ pm_runtime_mark_last_busy(&usb4->dev);
+ pm_runtime_put_autosuspend(&usb4->dev);
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(offline);
+
+static ssize_t rescan_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb4_port *usb4 = tb_to_usb4_port_device(dev);
+ struct tb_port *port = usb4->port;
+ struct tb *tb = port->sw->tb;
+ bool val;
+ int ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ if (!val)
+ return count;
+
+ pm_runtime_get_sync(&usb4->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm;
+ }
+
+ /* Must be in offline mode already */
+ if (!usb4->offline) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ tb_retimer_remove_all(port);
+ ret = tb_retimer_scan(port, true);
+
+out_unlock:
+ mutex_unlock(&tb->lock);
+out_rpm:
+ pm_runtime_mark_last_busy(&usb4->dev);
+ pm_runtime_put_autosuspend(&usb4->dev);
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_WO(rescan);
+
+static struct attribute *service_attrs[] = {
+ &dev_attr_offline.attr,
+ &dev_attr_rescan.attr,
+ NULL
+};
+
+static umode_t service_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct usb4_port *usb4 = tb_to_usb4_port_device(dev);
+
+ /*
+ * Always need some platform help to cycle the modes so that
+ * retimers can be accessed through the sideband.
+ */
+ return usb4->can_offline ? attr->mode : 0;
+}
+
+static const struct attribute_group service_group = {
+ .attrs = service_attrs,
+ .is_visible = service_attr_is_visible,
+};
+
+static const struct attribute_group *usb4_port_device_groups[] = {
+ &common_group,
+ &service_group,
+ NULL
+};
+
+static void usb4_port_device_release(struct device *dev)
+{
+ struct usb4_port *usb4 = container_of(dev, struct usb4_port, dev);
+
+ kfree(usb4);
+}
+
+struct device_type usb4_port_device_type = {
+ .name = "usb4_port",
+ .groups = usb4_port_device_groups,
+ .release = usb4_port_device_release,
+};
+
+/**
+ * usb4_port_device_add() - Add USB4 port device
+ * @port: Lane 0 adapter port to add the USB4 port
+ *
+ * Creates and registers a USB4 port device for @port. Returns the new
+ * USB4 port device pointer or ERR_PTR() in case of error.
+ */
+struct usb4_port *usb4_port_device_add(struct tb_port *port)
+{
+ struct usb4_port *usb4;
+ int ret;
+
+ usb4 = kzalloc(sizeof(*usb4), GFP_KERNEL);
+ if (!usb4)
+ return ERR_PTR(-ENOMEM);
+
+ usb4->port = port;
+ usb4->dev.type = &usb4_port_device_type;
+ usb4->dev.parent = &port->sw->dev;
+ dev_set_name(&usb4->dev, "usb4_port%d", port->port);
+
+ ret = device_register(&usb4->dev);
+ if (ret) {
+ put_device(&usb4->dev);
+ return ERR_PTR(ret);
+ }
+
+ pm_runtime_no_callbacks(&usb4->dev);
+ pm_runtime_set_active(&usb4->dev);
+ pm_runtime_enable(&usb4->dev);
+ pm_runtime_set_autosuspend_delay(&usb4->dev, TB_AUTOSUSPEND_DELAY);
+ pm_runtime_mark_last_busy(&usb4->dev);
+ pm_runtime_use_autosuspend(&usb4->dev);
+
+ return usb4;
+}
+
+/**
+ * usb4_port_device_remove() - Removes USB4 port device
+ * @usb4: USB4 port device
+ *
+ * Unregisters the USB4 port device from the system. The device will be
+ * released when the last reference is dropped.
+ */
+void usb4_port_device_remove(struct usb4_port *usb4)
+{
+ device_unregister(&usb4->dev);
+}
+
+/**
+ * usb4_port_device_resume() - Resumes USB4 port device
+ * @usb4: USB4 port device
+ *
+ * Used to resume USB4 port device after sleep state.
+ */
+int usb4_port_device_resume(struct usb4_port *usb4)
+{
+ return usb4->offline ? usb4_port_offline(usb4) : 0;
+}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index b21d99d59412..d66ea4d616fd 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1527,6 +1527,13 @@ int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
return ret;
}
+ ret = tb_port_wait_for_link_width(port, 2, 100);
+ if (ret) {
+ tb_port_warn(port, "timeout enabling lane bonding\n");
+ return ret;
+ }
+
+ tb_port_update_credits(port);
tb_xdomain_update_link_attributes(xd);
dev_dbg(&xd->dev, "lane bonding enabled\n");
@@ -1548,7 +1555,10 @@ void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
if (port->dual_link_port) {
tb_port_lane_bonding_disable(port);
+ if (tb_port_wait_for_link_width(port, 1, 100) == -ETIMEDOUT)
+ tb_port_warn(port, "timeout disabling lane bonding\n");
tb_port_disable(port->dual_link_port);
+ tb_port_update_credits(port);
tb_xdomain_update_link_attributes(xd);
dev_dbg(&xd->dev, "lane bonding disabled\n");
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index f6a7fd6d3bb6..23cc988c68a4 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -12,9 +12,8 @@ if TTY
config VT
bool "Virtual terminal" if EXPERT
- depends on !UML
select INPUT
- default y
+ default y if !UML
help
If you say Y here, you will get support for terminal devices with
display and keyboard devices. These are called "virtual" because you
@@ -78,7 +77,7 @@ config VT_CONSOLE_SLEEP
config HW_CONSOLE
bool
- depends on VT && !UML
+ depends on VT
default y
config VT_HW_CONSOLE_BINDING
@@ -204,7 +203,7 @@ config MOXA_INTELLIO
config MOXA_SMARTIO
tristate "Moxa SmartIO support v. 2.0"
- depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA)
+ depends on SERIAL_NONSTANDARD && PCI
help
Say Y here if you have a Moxa SmartIO multiport serial card and/or
want to help develop a new version of this driver.
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index c7054f5117c3..a2bd75fbaaa4 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_AUDIT) += tty_audit.o
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_N_HDLC) += n_hdlc.o
obj-$(CONFIG_N_GSM) += n_gsm.o
-obj-$(CONFIG_R3964) += n_r3964.o
obj-y += vt/
obj-$(CONFIG_HVC_DRIVER) += hvc/
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index ca48ce5a0862..5ec19c48fb7a 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -148,7 +148,7 @@ static __inline__ void rtsdtr_ctrl(int bits)
* ------------------------------------------------------------
* rs_stop() and rs_start()
*
- * This routines are called before setting or resetting tty->stopped.
+ * This routines are called before setting or resetting tty->flow.stopped.
* They enable or disable transmitter interrupts, as necessary.
* ------------------------------------------------------------
*/
@@ -309,7 +309,7 @@ static void transmit_chars(struct serial_state *info)
return;
}
if (info->xmit.head == info->xmit.tail
- || info->tport.tty->stopped
+ || info->tport.tty->flow.stopped
|| info->tport.tty->hw_stopped) {
info->IER &= ~UART_IER_THRI;
custom.intena = IF_TBE;
@@ -768,7 +768,7 @@ static void rs_flush_chars(struct tty_struct *tty)
unsigned long flags;
if (info->xmit.head == info->xmit.tail
- || tty->stopped
+ || tty->flow.stopped
|| tty->hw_stopped
|| !info->xmit.buf)
return;
@@ -812,7 +812,7 @@ static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count
local_irq_restore(flags);
if (info->xmit.head != info->xmit.tail
- && !tty->stopped
+ && !tty->flow.stopped
&& !tty->hw_stopped
&& !(info->IER & UART_IER_THRI)) {
info->IER |= UART_IER_THRI;
@@ -827,14 +827,14 @@ static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count
return ret;
}
-static int rs_write_room(struct tty_struct *tty)
+static unsigned int rs_write_room(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
-static int rs_chars_in_buffer(struct tty_struct *tty)
+static unsigned int rs_chars_in_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 3c6dd06ec5fb..445e5ff9b36d 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -536,11 +536,11 @@ static void ehv_bc_tty_close(struct tty_struct *ttys, struct file *filp)
* how much write room the driver can guarantee will be sent OR BUFFERED. This
* driver MUST honor the return value.
*/
-static int ehv_bc_tty_write_room(struct tty_struct *ttys)
+static unsigned int ehv_bc_tty_write_room(struct tty_struct *ttys)
{
struct ehv_bc_data *bc = ttys->driver_data;
unsigned long flags;
- int count;
+ unsigned int count;
spin_lock_irqsave(&bc->lock, flags);
count = CIRC_SPACE(bc->head, bc->tail, BUF_SIZE);
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index cd23a4b05c8f..ccb683a6e6f5 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -193,12 +193,12 @@ static int goldfish_tty_write(struct tty_struct *tty, const unsigned char *buf,
return count;
}
-static int goldfish_tty_write_room(struct tty_struct *tty)
+static unsigned int goldfish_tty_write_room(struct tty_struct *tty)
{
return 0x10000;
}
-static int goldfish_tty_chars_in_buffer(struct tty_struct *tty)
+static unsigned int goldfish_tty_chars_in_buffer(struct tty_struct *tty)
{
struct goldfish_tty *qtty = &goldfish_ttys[tty->index];
void __iomem *base = qtty->base;
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index cdcc64ea2554..5bb8c4e44961 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -292,7 +292,7 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
if (vtermnos[index] != -1)
return -1;
- /* make sure no no tty has been registered in this index */
+ /* make sure no tty has been registered in this index */
hp = hvc_get_by_index(index);
if (hp) {
tty_port_put(&hp->port);
@@ -586,7 +586,7 @@ static void hvc_set_winsz(struct work_struct *work)
* how much write room the driver can guarantee will be sent OR BUFFERED. This
* driver MUST honor the return value.
*/
-static int hvc_write_room(struct tty_struct *tty)
+static unsigned int hvc_write_room(struct tty_struct *tty)
{
struct hvc_struct *hp = tty->driver_data;
@@ -596,7 +596,7 @@ static int hvc_write_room(struct tty_struct *tty)
return hp->outbuf_size - hp->n_outbuf;
}
-static int hvc_chars_in_buffer(struct tty_struct *tty)
+static unsigned int hvc_chars_in_buffer(struct tty_struct *tty)
{
struct hvc_struct *hp = tty->driver_data;
@@ -620,7 +620,7 @@ static u32 timeout = MIN_TIMEOUT;
/*
* Maximum number of bytes to get from the console driver if hvc_poll is
* called from driver (and can't sleep). Any more than this and we break
- * and start polling with khvcd. This value was derived from from an OpenBMC
+ * and start polling with khvcd. This value was derived from an OpenBMC
* console with the OPAL driver that results in about 0.25ms interrupts off
* latency.
*/
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index 2af1e5751bd6..82a76cac94de 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -438,8 +438,6 @@ static void hvc_iucv_sndbuf_work(struct work_struct *work)
struct hvc_iucv_private *priv;
priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
- if (!priv)
- return;
spin_lock_bh(&priv->lock);
hvc_iucv_send(priv);
@@ -966,37 +964,6 @@ static void hvc_iucv_msg_complete(struct iucv_path *path,
destroy_tty_buffer_list(&list_remove);
}
-/**
- * hvc_iucv_pm_freeze() - Freeze PM callback
- * @dev: IUVC HVC terminal device
- *
- * Sever an established IUCV communication path and
- * trigger a hang-up of the underlying HVC terminal.
- */
-static int hvc_iucv_pm_freeze(struct device *dev)
-{
- struct hvc_iucv_private *priv = dev_get_drvdata(dev);
-
- local_bh_disable();
- hvc_iucv_hangup(priv);
- local_bh_enable();
-
- return 0;
-}
-
-/**
- * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
- * @dev: IUVC HVC terminal device
- *
- * Wake up the HVC thread to trigger hang-up and respective
- * HVC back-end notifier invocations.
- */
-static int hvc_iucv_pm_restore_thaw(struct device *dev)
-{
- hvc_kick();
- return 0;
-}
-
static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1051,20 +1018,6 @@ static const struct hv_ops hvc_iucv_ops = {
.dtr_rts = hvc_iucv_dtr_rts,
};
-/* Suspend / resume device operations */
-static const struct dev_pm_ops hvc_iucv_pm_ops = {
- .freeze = hvc_iucv_pm_freeze,
- .thaw = hvc_iucv_pm_restore_thaw,
- .restore = hvc_iucv_pm_restore_thaw,
-};
-
-/* IUCV HVC device driver */
-static struct device_driver hvc_iucv_driver = {
- .name = KMSG_COMPONENT,
- .bus = &iucv_bus,
- .pm = &hvc_iucv_pm_ops,
-};
-
/* IUCV HVC device attributes */
static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
@@ -1144,7 +1097,6 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
dev_set_drvdata(priv->dev, priv);
priv->dev->bus = &iucv_bus;
priv->dev->parent = iucv_root;
- priv->dev->driver = &hvc_iucv_driver;
priv->dev->groups = hvc_iucv_dev_attr_groups;
priv->dev->release = (void (*)(struct device *)) kfree;
rc = device_register(priv->dev);
@@ -1376,11 +1328,6 @@ static int __init hvc_iucv_init(void)
goto out_error;
}
- /* register IUCV HVC device driver */
- rc = driver_register(&hvc_iucv_driver);
- if (rc)
- goto out_error;
-
/* parse hvc_iucv_allow string and create z/VM user ID filter list */
if (hvc_iucv_filter_string) {
rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index 798f27f40cc2..72b11aa7e0a6 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -249,7 +249,7 @@ static void udbg_hvc_putc(char c)
count = hvterm_hvsi_put_chars(0, &c, 1);
break;
}
- } while(count == 0);
+ } while (count == 0 || count == -EAGAIN);
}
static int udbg_hvc_getc_poll(void)
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 197988c55e0c..fe5e6b4f43de 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -1376,7 +1376,7 @@ static int hvcs_write(struct tty_struct *tty,
* absolutely WILL BUFFER if we can't send it. This driver MUST honor the
* return value, hence the reason for hvcs_struct buffering.
*/
-static int hvcs_write_room(struct tty_struct *tty)
+static unsigned int hvcs_write_room(struct tty_struct *tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
@@ -1386,7 +1386,7 @@ static int hvcs_write_room(struct tty_struct *tty)
return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
}
-static int hvcs_chars_in_buffer(struct tty_struct *tty)
+static unsigned int hvcs_chars_in_buffer(struct tty_struct *tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index e8c58f9bd263..bfc15279d5bc 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -890,14 +890,14 @@ out:
spin_unlock_irqrestore(&hp->lock, flags);
}
-static int hvsi_write_room(struct tty_struct *tty)
+static unsigned int hvsi_write_room(struct tty_struct *tty)
{
struct hvsi_struct *hp = tty->driver_data;
return N_OUTBUF - hp->n_outbuf;
}
-static int hvsi_chars_in_buffer(struct tty_struct *tty)
+static unsigned int hvsi_chars_in_buffer(struct tty_struct *tty)
{
struct hvsi_struct *hp = tty->driver_data;
@@ -929,7 +929,7 @@ static int hvsi_write(struct tty_struct *tty,
* will see there is no room in outbuf and return.
*/
while ((count > 0) && (hvsi_write_room(tty) > 0)) {
- int chunksize = min(count, hvsi_write_room(tty));
+ int chunksize = min_t(int, count, hvsi_write_room(tty));
BUG_ON(hp->n_outbuf < 0);
memcpy(hp->outbuf + hp->n_outbuf, source, chunksize);
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 99bb2f149ff5..e3a5a5ba752c 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -54,7 +54,6 @@ struct ipw_tty {
unsigned int control_lines;
struct mutex ipw_tty_mutex;
int tx_bytes_queued;
- int closing;
};
static struct ipw_tty *ttys[IPWIRELESS_PCMCIA_MINORS];
@@ -228,7 +227,7 @@ static int ipw_write(struct tty_struct *linux_tty,
return count;
}
-static int ipw_write_room(struct tty_struct *linux_tty)
+static unsigned int ipw_write_room(struct tty_struct *linux_tty)
{
struct ipw_tty *tty = linux_tty->driver_data;
int room;
@@ -270,7 +269,7 @@ static int ipwireless_set_serial_info(struct tty_struct *linux_tty,
return 0; /* Keeps the PCMCIA scripts happy. */
}
-static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
+static unsigned int ipw_chars_in_buffer(struct tty_struct *linux_tty)
{
struct ipw_tty *tty = linux_tty->driver_data;
@@ -525,7 +524,6 @@ void ipwireless_tty_free(struct ipw_tty *tty)
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": deregistering %s device ttyIPWp%d\n",
tty_type_name(ttyj->tty_type), j);
- ttyj->closing = 1;
if (ttyj->port.tty != NULL) {
mutex_unlock(&ttyj->ipw_tty_mutex);
tty_vhangup(ttyj->port.tty);
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index a8e19b4833bf..3b5915b94fac 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -840,11 +840,11 @@ static int mips_ejtag_fdc_tty_write(struct tty_struct *tty,
return total;
}
-static int mips_ejtag_fdc_tty_write_room(struct tty_struct *tty)
+static unsigned int mips_ejtag_fdc_tty_write_room(struct tty_struct *tty)
{
struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
struct mips_ejtag_fdc_tty *priv = dport->driver;
- int room;
+ unsigned int room;
/* Report the space in the xmit buffer */
spin_lock(&dport->xmit_lock);
@@ -854,10 +854,10 @@ static int mips_ejtag_fdc_tty_write_room(struct tty_struct *tty)
return room;
}
-static int mips_ejtag_fdc_tty_chars_in_buffer(struct tty_struct *tty)
+static unsigned int mips_ejtag_fdc_tty_chars_in_buffer(struct tty_struct *tty)
{
struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
- int chars;
+ unsigned int chars;
/* Report the number of bytes in the xmit buffer */
spin_lock(&dport->xmit_lock);
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 4d4f15b5cd29..64b18177c790 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -188,9 +188,9 @@ module_param(ttymajor, int, 0);
static int moxa_open(struct tty_struct *, struct file *);
static void moxa_close(struct tty_struct *, struct file *);
static int moxa_write(struct tty_struct *, const unsigned char *, int);
-static int moxa_write_room(struct tty_struct *);
+static unsigned int moxa_write_room(struct tty_struct *);
static void moxa_flush_buffer(struct tty_struct *);
-static int moxa_chars_in_buffer(struct tty_struct *);
+static unsigned int moxa_chars_in_buffer(struct tty_struct *);
static void moxa_set_termios(struct tty_struct *, struct ktermios *);
static void moxa_stop(struct tty_struct *);
static void moxa_start(struct tty_struct *);
@@ -216,9 +216,9 @@ static int MoxaPortLineStatus(struct moxa_port *);
static void MoxaPortFlushData(struct moxa_port *, int);
static int MoxaPortWriteData(struct tty_struct *, const unsigned char *, int);
static int MoxaPortReadData(struct moxa_port *);
-static int MoxaPortTxQueue(struct moxa_port *);
+static unsigned int MoxaPortTxQueue(struct moxa_port *);
static int MoxaPortRxQueue(struct moxa_port *);
-static int MoxaPortTxFree(struct moxa_port *);
+static unsigned int MoxaPortTxFree(struct moxa_port *);
static void MoxaPortTxDisable(struct moxa_port *);
static void MoxaPortTxEnable(struct moxa_port *);
static int moxa_get_serial_info(struct tty_struct *, struct serial_struct *);
@@ -1217,11 +1217,11 @@ static int moxa_write(struct tty_struct *tty,
return len;
}
-static int moxa_write_room(struct tty_struct *tty)
+static unsigned int moxa_write_room(struct tty_struct *tty)
{
struct moxa_port *ch;
- if (tty->stopped)
+ if (tty->flow.stopped)
return 0;
ch = tty->driver_data;
if (ch == NULL)
@@ -1239,10 +1239,10 @@ static void moxa_flush_buffer(struct tty_struct *tty)
tty_wakeup(tty);
}
-static int moxa_chars_in_buffer(struct tty_struct *tty)
+static unsigned int moxa_chars_in_buffer(struct tty_struct *tty)
{
struct moxa_port *ch = tty->driver_data;
- int chars;
+ unsigned int chars;
chars = MoxaPortTxQueue(ch);
if (chars)
@@ -1374,7 +1374,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
clear_bit(EMPTYWAIT, &p->statusflags);
tty_wakeup(tty);
}
- if (test_bit(LOWWAIT, &p->statusflags) && !tty->stopped &&
+ if (test_bit(LOWWAIT, &p->statusflags) && !tty->flow.stopped &&
MoxaPortTxQueue(p) <= WAKEUP_CHARS) {
clear_bit(LOWWAIT, &p->statusflags);
tty_wakeup(tty);
@@ -1981,7 +1981,7 @@ static int MoxaPortReadData(struct moxa_port *port)
}
-static int MoxaPortTxQueue(struct moxa_port *port)
+static unsigned int MoxaPortTxQueue(struct moxa_port *port)
{
void __iomem *ofsAddr = port->tableAddr;
u16 rptr, wptr, mask;
@@ -1992,7 +1992,7 @@ static int MoxaPortTxQueue(struct moxa_port *port)
return (wptr - rptr) & mask;
}
-static int MoxaPortTxFree(struct moxa_port *port)
+static unsigned int MoxaPortTxFree(struct moxa_port *port)
{
void __iomem *ofsAddr = port->tableAddr;
u16 rptr, wptr, mask;
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 16a852ecbe8a..900ccb2ca166 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -41,9 +41,112 @@
#include <asm/irq.h>
#include <linux/uaccess.h>
-#include "mxser.h"
+/*
+ * Semi-public control interfaces
+ */
+
+/*
+ * MOXA ioctls
+ */
+
+#define MOXA 0x400
+#define MOXA_SET_OP_MODE (MOXA + 66)
+#define MOXA_GET_OP_MODE (MOXA + 67)
+
+#define RS232_MODE 0
+#define RS485_2WIRE_MODE 1
+#define RS422_MODE 2
+#define RS485_4WIRE_MODE 3
+#define OP_MODE_MASK 3
+
+/* --------------------------------------------------- */
+
+/*
+ * Follow just what Moxa Must chip defines.
+ *
+ * When LCR register (offset 0x03) is written the following value, the Must chip
+ * will enter enhanced mode. And a write to EFR (offset 0x02) bit 6,7 will
+ * change bank.
+ */
+#define MOXA_MUST_ENTER_ENHANCED 0xBF
+
+/* when enhanced mode is enabled, access to general bank register */
+#define MOXA_MUST_GDL_REGISTER 0x07
+#define MOXA_MUST_GDL_MASK 0x7F
+#define MOXA_MUST_GDL_HAS_BAD_DATA 0x80
+
+#define MOXA_MUST_LSR_RERR 0x80 /* error in receive FIFO */
+/* enhanced register bank select and enhanced mode setting register */
+/* This works only when LCR register equals to 0xBF */
+#define MOXA_MUST_EFR_REGISTER 0x02
+#define MOXA_MUST_EFR_EFRB_ENABLE 0x10 /* enhanced mode enable */
+/* enhanced register bank set 0, 1, 2 */
+#define MOXA_MUST_EFR_BANK0 0x00
+#define MOXA_MUST_EFR_BANK1 0x40
+#define MOXA_MUST_EFR_BANK2 0x80
+#define MOXA_MUST_EFR_BANK3 0xC0
+#define MOXA_MUST_EFR_BANK_MASK 0xC0
+
+/* set XON1 value register, when LCR=0xBF and change to bank0 */
+#define MOXA_MUST_XON1_REGISTER 0x04
+
+/* set XON2 value register, when LCR=0xBF and change to bank0 */
+#define MOXA_MUST_XON2_REGISTER 0x05
+
+/* set XOFF1 value register, when LCR=0xBF and change to bank0 */
+#define MOXA_MUST_XOFF1_REGISTER 0x06
+
+/* set XOFF2 value register, when LCR=0xBF and change to bank0 */
+#define MOXA_MUST_XOFF2_REGISTER 0x07
+
+#define MOXA_MUST_RBRTL_REGISTER 0x04
+#define MOXA_MUST_RBRTH_REGISTER 0x05
+#define MOXA_MUST_RBRTI_REGISTER 0x06
+#define MOXA_MUST_THRTL_REGISTER 0x07
+#define MOXA_MUST_ENUM_REGISTER 0x04
+#define MOXA_MUST_HWID_REGISTER 0x05
+#define MOXA_MUST_ECR_REGISTER 0x06
+#define MOXA_MUST_CSR_REGISTER 0x07
+
+#define MOXA_MUST_FCR_GDA_MODE_ENABLE 0x20 /* good data mode enable */
+#define MOXA_MUST_FCR_GDA_ONLY_ENABLE 0x10 /* only good data put into RxFIFO */
+
+#define MOXA_MUST_IER_ECTSI 0x80 /* enable CTS interrupt */
+#define MOXA_MUST_IER_ERTSI 0x40 /* enable RTS interrupt */
+#define MOXA_MUST_IER_XINT 0x20 /* enable Xon/Xoff interrupt */
+#define MOXA_MUST_IER_EGDAI 0x10 /* enable GDA interrupt */
+
+#define MOXA_MUST_RECV_ISR (UART_IER_RDI | MOXA_MUST_IER_EGDAI)
+
+/* GDA interrupt pending */
+#define MOXA_MUST_IIR_GDA 0x1C
+#define MOXA_MUST_IIR_RDA 0x04
+#define MOXA_MUST_IIR_RTO 0x0C
+#define MOXA_MUST_IIR_LSR 0x06
+
+/* received Xon/Xoff or specical interrupt pending */
+#define MOXA_MUST_IIR_XSC 0x10
+
+/* RTS/CTS change state interrupt pending */
+#define MOXA_MUST_IIR_RTSCTS 0x20
+#define MOXA_MUST_IIR_MASK 0x3E
+
+#define MOXA_MUST_MCR_XON_FLAG 0x40
+#define MOXA_MUST_MCR_XON_ANY 0x80
+#define MOXA_MUST_MCR_TX_XON 0x08
+
+#define MOXA_MUST_EFR_SF_MASK 0x0F /* software flow control on chip mask value */
+#define MOXA_MUST_EFR_SF_TX1 0x08 /* send Xon1/Xoff1 */
+#define MOXA_MUST_EFR_SF_TX2 0x04 /* send Xon2/Xoff2 */
+#define MOXA_MUST_EFR_SF_TX12 0x0C /* send Xon1,Xon2/Xoff1,Xoff2 */
+#define MOXA_MUST_EFR_SF_TX_NO 0x00 /* don't send Xon/Xoff */
+#define MOXA_MUST_EFR_SF_TX_MASK 0x0C /* Tx software flow control mask */
+#define MOXA_MUST_EFR_SF_RX_NO 0x00 /* don't receive Xon/Xoff */
+#define MOXA_MUST_EFR_SF_RX1 0x02 /* receive Xon1/Xoff1 */
+#define MOXA_MUST_EFR_SF_RX2 0x01 /* receive Xon2/Xoff2 */
+#define MOXA_MUST_EFR_SF_RX12 0x03 /* receive Xon1,Xon2/Xoff1,Xoff2 */
+#define MOXA_MUST_EFR_SF_RX_MASK 0x03 /* Rx software flow control mask */
-#define MXSER_VERSION "2.0.5" /* 1.14 */
#define MXSERMAJOR 174
#define MXSER_BOARDS 4 /* Max. boards */
@@ -51,15 +154,10 @@
#define MXSER_PORTS (MXSER_BOARDS * MXSER_PORTS_PER_BOARD)
#define MXSER_ISR_PASS_LIMIT 100
-/*CheckIsMoxaMust return value*/
-#define MOXA_OTHER_UART 0x00
-#define MOXA_MUST_MU150_HWID 0x01
-#define MOXA_MUST_MU860_HWID 0x02
-
#define WAKEUP_CHARS 256
-#define UART_MCR_AFE 0x20
-#define UART_LSR_SPECIAL 0x1E
+#define MXSER_BAUD_BASE 921600
+#define MXSER_CUSTOM_DIVISOR (MXSER_BAUD_BASE * 16)
#define PCI_DEVICE_ID_POS104UL 0x1044
#define PCI_DEVICE_ID_CB108 0x1080
@@ -70,151 +168,71 @@
#define PCI_DEVICE_ID_CB134I 0x1341
#define PCI_DEVICE_ID_CP138U 0x1380
+#define MXSER_NPORTS(ddata) ((ddata) & 0xffU)
+#define MXSER_HIGHBAUD 0x0100
-#define C168_ASIC_ID 1
-#define C104_ASIC_ID 2
-#define C102_ASIC_ID 0xB
-#define CI132_ASIC_ID 4
-#define CI134_ASIC_ID 3
-#define CI104J_ASIC_ID 5
-
-#define MXSER_HIGHBAUD 1
-#define MXSER_HAS2 2
+enum mxser_must_hwid {
+ MOXA_OTHER_UART = 0x00,
+ MOXA_MUST_MU150_HWID = 0x01,
+ MOXA_MUST_MU860_HWID = 0x02,
+};
-/* This is only for PCI */
static const struct {
- int type;
- int tx_fifo;
- int rx_fifo;
- int xmit_fifo_size;
- int rx_high_water;
- int rx_trigger;
- int rx_low_water;
- long max_baud;
+ u8 type;
+ u8 fifo_size;
+ u8 rx_high_water;
+ u8 rx_low_water;
+ speed_t max_baud;
} Gpci_uart_info[] = {
- {MOXA_OTHER_UART, 16, 16, 16, 14, 14, 1, 921600L},
- {MOXA_MUST_MU150_HWID, 64, 64, 64, 48, 48, 16, 230400L},
- {MOXA_MUST_MU860_HWID, 128, 128, 128, 96, 96, 32, 921600L}
+ { MOXA_OTHER_UART, 16, 14, 1, 921600 },
+ { MOXA_MUST_MU150_HWID, 64, 48, 16, 230400 },
+ { MOXA_MUST_MU860_HWID, 128, 96, 32, 921600 }
};
#define UART_INFO_NUM ARRAY_SIZE(Gpci_uart_info)
-struct mxser_cardinfo {
- char *name;
- unsigned int nports;
- unsigned int flags;
-};
-
-static const struct mxser_cardinfo mxser_cards[] = {
-/* 0*/ { "C168 series", 8, },
- { "C104 series", 4, },
- { "CI-104J series", 4, },
- { "C168H/PCI series", 8, },
- { "C104H/PCI series", 4, },
-/* 5*/ { "C102 series", 4, MXSER_HAS2 }, /* C102-ISA */
- { "CI-132 series", 4, MXSER_HAS2 },
- { "CI-134 series", 4, },
- { "CP-132 series", 2, },
- { "CP-114 series", 4, },
-/*10*/ { "CT-114 series", 4, },
- { "CP-102 series", 2, MXSER_HIGHBAUD },
- { "CP-104U series", 4, },
- { "CP-168U series", 8, },
- { "CP-132U series", 2, },
-/*15*/ { "CP-134U series", 4, },
- { "CP-104JU series", 4, },
- { "Moxa UC7000 Serial", 8, }, /* RC7000 */
- { "CP-118U series", 8, },
- { "CP-102UL series", 2, },
-/*20*/ { "CP-102U series", 2, },
- { "CP-118EL series", 8, },
- { "CP-168EL series", 8, },
- { "CP-104EL series", 4, },
- { "CB-108 series", 8, },
-/*25*/ { "CB-114 series", 4, },
- { "CB-134I series", 4, },
- { "CP-138U series", 8, },
- { "POS-104UL series", 4, },
- { "CP-114UL series", 4, },
-/*30*/ { "CP-102UF series", 2, },
- { "CP-112UL series", 2, },
-};
/* driver_data correspond to the lines in the structure above
see also ISA probe function before you change something */
static const struct pci_device_id mxser_pcibrds[] = {
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168), .driver_data = 3 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168), .driver_data = 8 },
{ PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C104), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114), .driver_data = 9 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CT114), .driver_data = 10 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102), .driver_data = 11 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U), .driver_data = 12 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U), .driver_data = 13 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U), .driver_data = 14 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U), .driver_data = 15 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104JU),.driver_data = 16 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_RC7000), .driver_data = 17 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U), .driver_data = 18 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),.driver_data = 19 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U), .driver_data = 20 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118EL),.driver_data = 21 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168EL),.driver_data = 22 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104EL),.driver_data = 23 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB108), .driver_data = 24 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB114), .driver_data = 25 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB134I), .driver_data = 26 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U), .driver_data = 27 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_POS104UL), .driver_data = 28 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP114UL), .driver_data = 29 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP102UF), .driver_data = 30 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP112UL), .driver_data = 31 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132), .driver_data = 2 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114), .driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CT114), .driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102), .driver_data = 2 | MXSER_HIGHBAUD },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U), .driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U), .driver_data = 8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U), .driver_data = 2 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U), .driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104JU),.driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_RC7000), .driver_data = 8 }, /* RC7000 */
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U), .driver_data = 8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),.driver_data = 2 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U), .driver_data = 2 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118EL),.driver_data = 8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168EL),.driver_data = 8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104EL),.driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB108), .driver_data = 8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB114), .driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB134I), .driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U), .driver_data = 8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_POS104UL), .driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP114UL), .driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP102UF), .driver_data = 2 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP112UL), .driver_data = 2 },
{ }
};
MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
-static unsigned long ioaddr[MXSER_BOARDS];
static int ttymajor = MXSERMAJOR;
/* Variables for insmod */
MODULE_AUTHOR("Casper Yang");
MODULE_DESCRIPTION("MOXA Smartio/Industio Family Multiport Board Device Driver");
-module_param_hw_array(ioaddr, ulong, ioport, NULL, 0);
-MODULE_PARM_DESC(ioaddr, "ISA io addresses to look for a moxa board");
module_param(ttymajor, int, 0);
MODULE_LICENSE("GPL");
-struct mxser_log {
- int tick;
- unsigned long rxcnt[MXSER_PORTS];
- unsigned long txcnt[MXSER_PORTS];
-};
-
-struct mxser_mon {
- unsigned long rxcnt;
- unsigned long txcnt;
- unsigned long up_rxcnt;
- unsigned long up_txcnt;
- int modem_status;
- unsigned char hold_reason;
-};
-
-struct mxser_mon_ext {
- unsigned long rx_cnt[32];
- unsigned long tx_cnt[32];
- unsigned long up_rxcnt[32];
- unsigned long up_txcnt[32];
- int modem_status[32];
-
- long baudrate[32];
- int databits[32];
- int stopbits[32];
- int parity[32];
- int flowctrl[32];
- int fifo[32];
- int iftype[32];
-};
-
struct mxser_board;
struct mxser_port {
@@ -223,274 +241,147 @@ struct mxser_port {
unsigned long ioaddr;
unsigned long opmode_ioaddr;
- int max_baud;
- int rx_high_water;
- int rx_trigger; /* Rx fifo trigger level */
- int rx_low_water;
- int baud_base; /* max. speed */
+ u8 rx_high_water;
+ u8 rx_low_water;
int type; /* UART type */
- int x_char; /* xon/xoff character */
- int IER; /* Interrupt Enable Register */
- int MCR; /* Modem control register */
+ unsigned char x_char; /* xon/xoff character */
+ u8 IER; /* Interrupt Enable Register */
+ u8 MCR; /* Modem control register */
- unsigned char stop_rx;
unsigned char ldisc_stop_rx;
- int custom_divisor;
- unsigned char err_shadow;
-
struct async_icount icount; /* kernel counters for 4 input interrupts */
unsigned int timeout;
- int read_status_mask;
- int ignore_status_mask;
- unsigned int xmit_fifo_size;
- int xmit_head;
- int xmit_tail;
- int xmit_cnt;
+ u8 read_status_mask;
+ u8 ignore_status_mask;
+ u8 xmit_fifo_size;
+ unsigned int xmit_head;
+ unsigned int xmit_tail;
+ unsigned int xmit_cnt;
int closing;
- struct ktermios normal_termios;
-
- struct mxser_mon mon_data;
-
spinlock_t slock;
};
struct mxser_board {
unsigned int idx;
+ unsigned short nports;
int irq;
- const struct mxser_cardinfo *info;
unsigned long vector;
- unsigned long vector_mask;
-
- int chip_flag;
- int uart_type;
- struct mxser_port ports[MXSER_PORTS_PER_BOARD];
-};
+ enum mxser_must_hwid must_hwid;
+ speed_t max_baud;
-struct mxser_mstatus {
- tcflag_t cflag;
- int cts;
- int dsr;
- int ri;
- int dcd;
+ struct mxser_port ports[];
};
-static struct mxser_board mxser_boards[MXSER_BOARDS];
+static DECLARE_BITMAP(mxser_boards, MXSER_BOARDS);
static struct tty_driver *mxvar_sdriver;
-static struct mxser_log mxvar_log;
-static int mxser_set_baud_method[MXSER_PORTS + 1];
-static void mxser_enable_must_enchance_mode(unsigned long baseio)
+static u8 __mxser_must_set_EFR(unsigned long baseio, u8 clear, u8 set,
+ bool restore_LCR)
{
- u8 oldlcr;
- u8 efr;
+ u8 oldlcr, efr;
oldlcr = inb(baseio + UART_LCR);
- outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENHANCED, baseio + UART_LCR);
efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
- efr |= MOXA_MUST_EFR_EFRB_ENABLE;
+ efr &= ~clear;
+ efr |= set;
outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
- outb(oldlcr, baseio + UART_LCR);
-}
-#ifdef CONFIG_PCI
-static void mxser_disable_must_enchance_mode(unsigned long baseio)
-{
- u8 oldlcr;
- u8 efr;
+ if (restore_LCR)
+ outb(oldlcr, baseio + UART_LCR);
- oldlcr = inb(baseio + UART_LCR);
- outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
-
- efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
- efr &= ~MOXA_MUST_EFR_EFRB_ENABLE;
+ return oldlcr;
+}
- outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
- outb(oldlcr, baseio + UART_LCR);
+static u8 mxser_must_select_bank(unsigned long baseio, u8 bank)
+{
+ return __mxser_must_set_EFR(baseio, MOXA_MUST_EFR_BANK_MASK, bank,
+ false);
}
-#endif
static void mxser_set_must_xon1_value(unsigned long baseio, u8 value)
{
- u8 oldlcr;
- u8 efr;
-
- oldlcr = inb(baseio + UART_LCR);
- outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
-
- efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
- efr &= ~MOXA_MUST_EFR_BANK_MASK;
- efr |= MOXA_MUST_EFR_BANK0;
-
- outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ u8 oldlcr = mxser_must_select_bank(baseio, MOXA_MUST_EFR_BANK0);
outb(value, baseio + MOXA_MUST_XON1_REGISTER);
outb(oldlcr, baseio + UART_LCR);
}
static void mxser_set_must_xoff1_value(unsigned long baseio, u8 value)
{
- u8 oldlcr;
- u8 efr;
-
- oldlcr = inb(baseio + UART_LCR);
- outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
-
- efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
- efr &= ~MOXA_MUST_EFR_BANK_MASK;
- efr |= MOXA_MUST_EFR_BANK0;
-
- outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ u8 oldlcr = mxser_must_select_bank(baseio, MOXA_MUST_EFR_BANK0);
outb(value, baseio + MOXA_MUST_XOFF1_REGISTER);
outb(oldlcr, baseio + UART_LCR);
}
static void mxser_set_must_fifo_value(struct mxser_port *info)
{
- u8 oldlcr;
- u8 efr;
-
- oldlcr = inb(info->ioaddr + UART_LCR);
- outb(MOXA_MUST_ENTER_ENCHANCE, info->ioaddr + UART_LCR);
-
- efr = inb(info->ioaddr + MOXA_MUST_EFR_REGISTER);
- efr &= ~MOXA_MUST_EFR_BANK_MASK;
- efr |= MOXA_MUST_EFR_BANK1;
-
- outb(efr, info->ioaddr + MOXA_MUST_EFR_REGISTER);
- outb((u8)info->rx_high_water, info->ioaddr + MOXA_MUST_RBRTH_REGISTER);
- outb((u8)info->rx_trigger, info->ioaddr + MOXA_MUST_RBRTI_REGISTER);
- outb((u8)info->rx_low_water, info->ioaddr + MOXA_MUST_RBRTL_REGISTER);
+ u8 oldlcr = mxser_must_select_bank(info->ioaddr, MOXA_MUST_EFR_BANK1);
+ outb(info->rx_high_water, info->ioaddr + MOXA_MUST_RBRTH_REGISTER);
+ outb(info->rx_high_water, info->ioaddr + MOXA_MUST_RBRTI_REGISTER);
+ outb(info->rx_low_water, info->ioaddr + MOXA_MUST_RBRTL_REGISTER);
outb(oldlcr, info->ioaddr + UART_LCR);
}
static void mxser_set_must_enum_value(unsigned long baseio, u8 value)
{
- u8 oldlcr;
- u8 efr;
-
- oldlcr = inb(baseio + UART_LCR);
- outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
-
- efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
- efr &= ~MOXA_MUST_EFR_BANK_MASK;
- efr |= MOXA_MUST_EFR_BANK2;
-
- outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ u8 oldlcr = mxser_must_select_bank(baseio, MOXA_MUST_EFR_BANK2);
outb(value, baseio + MOXA_MUST_ENUM_REGISTER);
outb(oldlcr, baseio + UART_LCR);
}
-#ifdef CONFIG_PCI
-static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId)
+static u8 mxser_get_must_hardware_id(unsigned long baseio)
{
- u8 oldlcr;
- u8 efr;
-
- oldlcr = inb(baseio + UART_LCR);
- outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
-
- efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
- efr &= ~MOXA_MUST_EFR_BANK_MASK;
- efr |= MOXA_MUST_EFR_BANK2;
-
- outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
- *pId = inb(baseio + MOXA_MUST_HWID_REGISTER);
+ u8 oldlcr = mxser_must_select_bank(baseio, MOXA_MUST_EFR_BANK2);
+ u8 id = inb(baseio + MOXA_MUST_HWID_REGISTER);
outb(oldlcr, baseio + UART_LCR);
+
+ return id;
}
-#endif
-static void SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(unsigned long baseio)
+static void mxser_must_set_EFR(unsigned long baseio, u8 clear, u8 set)
{
- u8 oldlcr;
- u8 efr;
-
- oldlcr = inb(baseio + UART_LCR);
- outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
-
- efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
- efr &= ~MOXA_MUST_EFR_SF_MASK;
-
- outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
- outb(oldlcr, baseio + UART_LCR);
+ __mxser_must_set_EFR(baseio, clear, set, true);
}
-static void mxser_enable_must_tx_software_flow_control(unsigned long baseio)
+static void mxser_must_set_enhance_mode(unsigned long baseio, bool enable)
{
- u8 oldlcr;
- u8 efr;
-
- oldlcr = inb(baseio + UART_LCR);
- outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
-
- efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
- efr &= ~MOXA_MUST_EFR_SF_TX_MASK;
- efr |= MOXA_MUST_EFR_SF_TX1;
-
- outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
- outb(oldlcr, baseio + UART_LCR);
+ mxser_must_set_EFR(baseio,
+ enable ? 0 : MOXA_MUST_EFR_EFRB_ENABLE,
+ enable ? MOXA_MUST_EFR_EFRB_ENABLE : 0);
}
-static void mxser_disable_must_tx_software_flow_control(unsigned long baseio)
+static void mxser_must_no_sw_flow_control(unsigned long baseio)
{
- u8 oldlcr;
- u8 efr;
-
- oldlcr = inb(baseio + UART_LCR);
- outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
-
- efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
- efr &= ~MOXA_MUST_EFR_SF_TX_MASK;
-
- outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
- outb(oldlcr, baseio + UART_LCR);
+ mxser_must_set_EFR(baseio, MOXA_MUST_EFR_SF_MASK, 0);
}
-static void mxser_enable_must_rx_software_flow_control(unsigned long baseio)
+static void mxser_must_set_tx_sw_flow_control(unsigned long baseio, bool enable)
{
- u8 oldlcr;
- u8 efr;
-
- oldlcr = inb(baseio + UART_LCR);
- outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
-
- efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
- efr &= ~MOXA_MUST_EFR_SF_RX_MASK;
- efr |= MOXA_MUST_EFR_SF_RX1;
-
- outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
- outb(oldlcr, baseio + UART_LCR);
+ mxser_must_set_EFR(baseio, MOXA_MUST_EFR_SF_TX_MASK,
+ enable ? MOXA_MUST_EFR_SF_TX1 : 0);
}
-static void mxser_disable_must_rx_software_flow_control(unsigned long baseio)
+static void mxser_must_set_rx_sw_flow_control(unsigned long baseio, bool enable)
{
- u8 oldlcr;
- u8 efr;
-
- oldlcr = inb(baseio + UART_LCR);
- outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
-
- efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
- efr &= ~MOXA_MUST_EFR_SF_RX_MASK;
-
- outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
- outb(oldlcr, baseio + UART_LCR);
+ mxser_must_set_EFR(baseio, MOXA_MUST_EFR_SF_RX_MASK,
+ enable ? MOXA_MUST_EFR_SF_RX1 : 0);
}
-#ifdef CONFIG_PCI
-static int CheckIsMoxaMust(unsigned long io)
+static enum mxser_must_hwid mxser_must_get_hwid(unsigned long io)
{
u8 oldmcr, hwid;
int i;
outb(0, io + UART_LCR);
- mxser_disable_must_enchance_mode(io);
+ mxser_must_set_enhance_mode(io, false);
oldmcr = inb(io + UART_MCR);
outb(0, io + UART_MCR);
mxser_set_must_xon1_value(io, 0x11);
@@ -499,49 +390,59 @@ static int CheckIsMoxaMust(unsigned long io)
return MOXA_OTHER_UART;
}
- mxser_get_must_hardware_id(io, &hwid);
- for (i = 1; i < UART_INFO_NUM; i++) { /* 0 = OTHER_UART */
+ hwid = mxser_get_must_hardware_id(io);
+ for (i = 1; i < UART_INFO_NUM; i++) /* 0 = OTHER_UART */
if (hwid == Gpci_uart_info[i].type)
- return (int)hwid;
- }
+ return hwid;
+
return MOXA_OTHER_UART;
}
-#endif
-static void process_txrx_fifo(struct mxser_port *info)
+static bool mxser_16550A_or_MUST(struct mxser_port *info)
{
- int i;
+ return info->type == PORT_16550A || info->board->must_hwid;
+}
- if ((info->type == PORT_16450) || (info->type == PORT_8250)) {
- info->rx_trigger = 1;
+static void mxser_process_txrx_fifo(struct mxser_port *info)
+{
+ unsigned int i;
+
+ if (info->type == PORT_16450 || info->type == PORT_8250) {
info->rx_high_water = 1;
info->rx_low_water = 1;
info->xmit_fifo_size = 1;
- } else
- for (i = 0; i < UART_INFO_NUM; i++)
- if (info->board->chip_flag == Gpci_uart_info[i].type) {
- info->rx_trigger = Gpci_uart_info[i].rx_trigger;
- info->rx_low_water = Gpci_uart_info[i].rx_low_water;
- info->rx_high_water = Gpci_uart_info[i].rx_high_water;
- info->xmit_fifo_size = Gpci_uart_info[i].xmit_fifo_size;
- break;
- }
+ return;
+ }
+
+ for (i = 0; i < UART_INFO_NUM; i++)
+ if (info->board->must_hwid == Gpci_uart_info[i].type) {
+ info->rx_low_water = Gpci_uart_info[i].rx_low_water;
+ info->rx_high_water = Gpci_uart_info[i].rx_high_water;
+ info->xmit_fifo_size = Gpci_uart_info[i].fifo_size;
+ break;
+ }
}
-static unsigned char mxser_get_msr(int baseaddr, int mode, int port)
+static void __mxser_start_tx(struct mxser_port *info)
{
- static unsigned char mxser_msr[MXSER_PORTS + 1];
- unsigned char status = 0;
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
+ info->IER |= UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
+}
- status = inb(baseaddr + UART_MSR);
+static void mxser_start_tx(struct mxser_port *info)
+{
+ unsigned long flags;
- mxser_msr[port] &= 0x0F;
- mxser_msr[port] |= status;
- status = mxser_msr[port];
- if (mode)
- mxser_msr[port] = 0;
+ spin_lock_irqsave(&info->slock, flags);
+ __mxser_start_tx(info);
+ spin_unlock_irqrestore(&info->slock, flags);
+}
- return status;
+static void __mxser_stop_tx(struct mxser_port *info)
+{
+ info->IER &= ~UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
}
static int mxser_carrier_raised(struct tty_port *port)
@@ -554,38 +455,36 @@ static void mxser_dtr_rts(struct tty_port *port, int on)
{
struct mxser_port *mp = container_of(port, struct mxser_port, port);
unsigned long flags;
+ u8 mcr;
spin_lock_irqsave(&mp->slock, flags);
+ mcr = inb(mp->ioaddr + UART_MCR);
if (on)
- outb(inb(mp->ioaddr + UART_MCR) |
- UART_MCR_DTR | UART_MCR_RTS, mp->ioaddr + UART_MCR);
+ mcr |= UART_MCR_DTR | UART_MCR_RTS;
else
- outb(inb(mp->ioaddr + UART_MCR)&~(UART_MCR_DTR | UART_MCR_RTS),
- mp->ioaddr + UART_MCR);
+ mcr &= ~(UART_MCR_DTR | UART_MCR_RTS);
+ outb(mcr, mp->ioaddr + UART_MCR);
spin_unlock_irqrestore(&mp->slock, flags);
}
-static int mxser_set_baud(struct tty_struct *tty, long newspd)
+static int mxser_set_baud(struct tty_struct *tty, speed_t newspd)
{
struct mxser_port *info = tty->driver_data;
unsigned int quot = 0, baud;
unsigned char cval;
u64 timeout;
- if (!info->ioaddr)
- return -1;
-
- if (newspd > info->max_baud)
+ if (newspd > info->board->max_baud)
return -1;
if (newspd == 134) {
- quot = 2 * info->baud_base / 269;
+ quot = 2 * MXSER_BAUD_BASE / 269;
tty_encode_baud_rate(tty, 134, 134);
} else if (newspd) {
- quot = info->baud_base / newspd;
+ quot = MXSER_BAUD_BASE / newspd;
if (quot == 0)
quot = 1;
- baud = info->baud_base/quot;
+ baud = MXSER_BAUD_BASE / quot;
tty_encode_baud_rate(tty, baud, baud);
} else {
quot = 0;
@@ -596,7 +495,7 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd)
* u64 domain
*/
timeout = (u64)info->xmit_fifo_size * HZ * 10 * quot;
- do_div(timeout, info->baud_base);
+ do_div(timeout, MXSER_BAUD_BASE);
info->timeout = timeout + HZ / 50; /* Add .02 seconds of slop */
if (quot) {
@@ -618,7 +517,7 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd)
#ifdef BOTHER
if (C_BAUD(tty) == BOTHER) {
- quot = info->baud_base % newspd;
+ quot = MXSER_BAUD_BASE % newspd;
quot *= 8;
if (quot % newspd > newspd / 2) {
quot /= newspd;
@@ -634,6 +533,28 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd)
return 0;
}
+static void mxser_handle_cts(struct tty_struct *tty, struct mxser_port *info,
+ u8 msr)
+{
+ bool cts = msr & UART_MSR_CTS;
+
+ if (tty->hw_stopped) {
+ if (cts) {
+ tty->hw_stopped = 0;
+
+ if (!mxser_16550A_or_MUST(info))
+ __mxser_start_tx(info);
+ tty_wakeup(tty);
+ }
+ return;
+ } else if (cts)
+ return;
+
+ tty->hw_stopped = 1;
+ if (!mxser_16550A_or_MUST(info))
+ __mxser_stop_tx(info);
+}
+
/*
* This routine is called to set the UART divisor registers to match
* the specified baud rate for a serial port.
@@ -642,35 +563,30 @@ static void mxser_change_speed(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
unsigned cflag, cval, fcr;
- unsigned char status;
cflag = tty->termios.c_cflag;
- if (!info->ioaddr)
- return;
- if (mxser_set_baud_method[tty->index] == 0)
- mxser_set_baud(tty, tty_get_baud_rate(tty));
+ mxser_set_baud(tty, tty_get_baud_rate(tty));
/* byte size and parity */
switch (cflag & CSIZE) {
+ default:
case CS5:
- cval = 0x00;
+ cval = UART_LCR_WLEN5;
break;
case CS6:
- cval = 0x01;
+ cval = UART_LCR_WLEN6;
break;
case CS7:
- cval = 0x02;
+ cval = UART_LCR_WLEN7;
break;
case CS8:
- cval = 0x03;
+ cval = UART_LCR_WLEN8;
break;
- default:
- cval = 0x00;
- break; /* too keep GCC shut... */
}
+
if (cflag & CSTOPB)
- cval |= 0x04;
+ cval |= UART_LCR_STOP;
if (cflag & PARENB)
cval |= UART_LCR_PARITY;
if (!(cflag & PARODD))
@@ -679,7 +595,7 @@ static void mxser_change_speed(struct tty_struct *tty)
cval |= UART_LCR_SPAR;
if ((info->type == PORT_8250) || (info->type == PORT_16450)) {
- if (info->board->chip_flag) {
+ if (info->board->must_hwid) {
fcr = UART_FCR_ENABLE_FIFO;
fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
mxser_set_must_fifo_value(info);
@@ -687,11 +603,11 @@ static void mxser_change_speed(struct tty_struct *tty)
fcr = 0;
} else {
fcr = UART_FCR_ENABLE_FIFO;
- if (info->board->chip_flag) {
+ if (info->board->must_hwid) {
fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
mxser_set_must_fifo_value(info);
} else {
- switch (info->rx_trigger) {
+ switch (info->rx_high_water) {
case 1:
fcr |= UART_FCR_TRIGGER_1;
break;
@@ -714,35 +630,11 @@ static void mxser_change_speed(struct tty_struct *tty)
tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
if (cflag & CRTSCTS) {
info->IER |= UART_IER_MSI;
- if ((info->type == PORT_16550A) || (info->board->chip_flag)) {
+ if (mxser_16550A_or_MUST(info)) {
info->MCR |= UART_MCR_AFE;
} else {
- status = inb(info->ioaddr + UART_MSR);
- if (tty->hw_stopped) {
- if (status & UART_MSR_CTS) {
- tty->hw_stopped = 0;
- if (info->type != PORT_16550A &&
- !info->board->chip_flag) {
- outb(info->IER & ~UART_IER_THRI,
- info->ioaddr +
- UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr +
- UART_IER);
- }
- tty_wakeup(tty);
- }
- } else {
- if (!(status & UART_MSR_CTS)) {
- tty->hw_stopped = 1;
- if ((info->type != PORT_16550A) &&
- (!info->board->chip_flag)) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->ioaddr +
- UART_IER);
- }
- }
- }
+ mxser_handle_cts(tty, info,
+ inb(info->ioaddr + UART_MSR));
}
}
outb(info->MCR, info->ioaddr + UART_MCR);
@@ -780,23 +672,11 @@ static void mxser_change_speed(struct tty_struct *tty)
UART_LSR_FE;
}
}
- if (info->board->chip_flag) {
+ if (info->board->must_hwid) {
mxser_set_must_xon1_value(info->ioaddr, START_CHAR(tty));
mxser_set_must_xoff1_value(info->ioaddr, STOP_CHAR(tty));
- if (I_IXON(tty)) {
- mxser_enable_must_rx_software_flow_control(
- info->ioaddr);
- } else {
- mxser_disable_must_rx_software_flow_control(
- info->ioaddr);
- }
- if (I_IXOFF(tty)) {
- mxser_enable_must_tx_software_flow_control(
- info->ioaddr);
- } else {
- mxser_disable_must_tx_software_flow_control(
- info->ioaddr);
- }
+ mxser_must_set_rx_sw_flow_control(info->ioaddr, I_IXON(tty));
+ mxser_must_set_tx_sw_flow_control(info->ioaddr, I_IXOFF(tty));
}
@@ -816,7 +696,6 @@ static void mxser_check_modem_status(struct tty_struct *tty,
port->icount.dcd++;
if (status & UART_MSR_DCTS)
port->icount.cts++;
- port->mon_data.modem_status = status;
wake_up_interruptible(&port->port.delta_msr_wait);
if (tty_port_check_carrier(&port->port) && (status & UART_MSR_DDCD)) {
@@ -824,33 +703,8 @@ static void mxser_check_modem_status(struct tty_struct *tty,
wake_up_interruptible(&port->port.open_wait);
}
- if (tty_port_cts_enabled(&port->port)) {
- if (tty->hw_stopped) {
- if (status & UART_MSR_CTS) {
- tty->hw_stopped = 0;
-
- if ((port->type != PORT_16550A) &&
- (!port->board->chip_flag)) {
- outb(port->IER & ~UART_IER_THRI,
- port->ioaddr + UART_IER);
- port->IER |= UART_IER_THRI;
- outb(port->IER, port->ioaddr +
- UART_IER);
- }
- tty_wakeup(tty);
- }
- } else {
- if (!(status & UART_MSR_CTS)) {
- tty->hw_stopped = 1;
- if (port->type != PORT_16550A &&
- !port->board->chip_flag) {
- port->IER &= ~UART_IER_THRI;
- outb(port->IER, port->ioaddr +
- UART_IER);
- }
- }
- }
- }
+ if (tty_port_cts_enabled(&port->port))
+ mxser_handle_cts(tty, port, status);
}
static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
@@ -865,7 +719,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
spin_lock_irqsave(&info->slock, flags);
- if (!info->ioaddr || !info->type) {
+ if (!info->type) {
set_bit(TTY_IO_ERROR, &tty->flags);
free_page(page);
spin_unlock_irqrestore(&info->slock, flags);
@@ -877,7 +731,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
* Clear the FIFO buffers and disable them
* (they will be reenabled in mxser_change_speed())
*/
- if (info->board->chip_flag)
+ if (info->board->must_hwid)
outb((UART_FCR_CLEAR_RCVR |
UART_FCR_CLEAR_XMIT |
MOXA_MUST_FCR_GDA_MODE_ENABLE), info->ioaddr + UART_FCR);
@@ -919,7 +773,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
*/
info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
- if (info->board->chip_flag)
+ if (info->board->must_hwid)
info->IER |= MOXA_MUST_IER_EGDAI;
outb(info->IER, info->ioaddr + UART_IER); /* enable interrupts */
@@ -971,7 +825,7 @@ static void mxser_shutdown_port(struct tty_port *port)
outb(0x00, info->ioaddr + UART_IER);
/* clear Rx/Tx FIFO's */
- if (info->board->chip_flag)
+ if (info->board->must_hwid)
outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT |
MOXA_MUST_FCR_GDA_MODE_ENABLE,
info->ioaddr + UART_FCR);
@@ -983,8 +837,8 @@ static void mxser_shutdown_port(struct tty_port *port)
(void) inb(info->ioaddr + UART_RX);
- if (info->board->chip_flag)
- SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+ if (info->board->must_hwid)
+ mxser_must_no_sw_flow_control(info->ioaddr);
spin_unlock_irqrestore(&info->slock, flags);
}
@@ -997,18 +851,12 @@ static void mxser_shutdown_port(struct tty_port *port)
*/
static int mxser_open(struct tty_struct *tty, struct file *filp)
{
- struct mxser_port *info;
- int line;
+ struct tty_port *tport = tty->port;
+ struct mxser_port *port = container_of(tport, struct mxser_port, port);
- line = tty->index;
- if (line == MXSER_PORTS)
- return 0;
- info = &mxser_boards[line / MXSER_PORTS_PER_BOARD].ports[line % MXSER_PORTS_PER_BOARD];
- if (!info->ioaddr)
- return -ENODEV;
+ tty->driver_data = port;
- tty->driver_data = info;
- return tty_port_open(&info->port, tty, filp);
+ return tty_port_open(tport, tty, filp);
}
static void mxser_flush_buffer(struct tty_struct *tty)
@@ -1043,7 +891,7 @@ static void mxser_close_port(struct tty_port *port)
* line status register.
*/
info->IER &= ~UART_IER_RLSI;
- if (info->board->chip_flag)
+ if (info->board->must_hwid)
info->IER &= ~MOXA_MUST_RECV_ISR;
outb(info->IER, info->ioaddr + UART_IER);
@@ -1071,7 +919,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
struct mxser_port *info = tty->driver_data;
struct tty_port *port = &info->port;
- if (tty->index == MXSER_PORTS || info == NULL)
+ if (info == NULL)
return;
if (tty_port_close_start(port, tty, filp) == 0)
return;
@@ -1118,18 +966,10 @@ static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int cou
total += c;
}
- if (info->xmit_cnt && !tty->stopped) {
- if (!tty->hw_stopped ||
- (info->type == PORT_16550A) ||
- (info->board->chip_flag)) {
- spin_lock_irqsave(&info->slock, flags);
- outb(info->IER & ~UART_IER_THRI, info->ioaddr +
- UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- spin_unlock_irqrestore(&info->slock, flags);
- }
- }
+ if (info->xmit_cnt && !tty->flow.stopped)
+ if (!tty->hw_stopped || mxser_16550A_or_MUST(info))
+ mxser_start_tx(info);
+
return total;
}
@@ -1149,17 +989,7 @@ static int mxser_put_char(struct tty_struct *tty, unsigned char ch)
info->xmit_head &= SERIAL_XMIT_SIZE - 1;
info->xmit_cnt++;
spin_unlock_irqrestore(&info->slock, flags);
- if (!tty->stopped) {
- if (!tty->hw_stopped ||
- (info->type == PORT_16550A) ||
- info->board->chip_flag) {
- spin_lock_irqsave(&info->slock, flags);
- outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- spin_unlock_irqrestore(&info->slock, flags);
- }
- }
+
return 1;
}
@@ -1167,23 +997,15 @@ static int mxser_put_char(struct tty_struct *tty, unsigned char ch)
static void mxser_flush_chars(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
- unsigned long flags;
- if (info->xmit_cnt <= 0 || tty->stopped || !info->port.xmit_buf ||
- (tty->hw_stopped && info->type != PORT_16550A &&
- !info->board->chip_flag))
+ if (!info->xmit_cnt || tty->flow.stopped || !info->port.xmit_buf ||
+ (tty->hw_stopped && !mxser_16550A_or_MUST(info)))
return;
- spin_lock_irqsave(&info->slock, flags);
-
- outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
-
- spin_unlock_irqrestore(&info->slock, flags);
+ mxser_start_tx(info);
}
-static int mxser_write_room(struct tty_struct *tty)
+static unsigned int mxser_write_room(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
int ret;
@@ -1192,7 +1014,7 @@ static int mxser_write_room(struct tty_struct *tty)
return ret < 0 ? 0 : ret;
}
-static int mxser_chars_in_buffer(struct tty_struct *tty)
+static unsigned int mxser_chars_in_buffer(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
return info->xmit_cnt;
@@ -1210,9 +1032,6 @@ static int mxser_get_serial_info(struct tty_struct *tty,
struct tty_port *port = &info->port;
unsigned int closing_wait, close_delay;
- if (tty->index == MXSER_PORTS)
- return -ENOTTY;
-
mutex_lock(&port->mutex);
close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
@@ -1225,10 +1044,10 @@ static int mxser_get_serial_info(struct tty_struct *tty,
ss->port = info->ioaddr,
ss->irq = info->board->irq,
ss->flags = info->port.flags,
- ss->baud_base = info->baud_base,
+ ss->baud_base = MXSER_BAUD_BASE,
ss->close_delay = close_delay;
ss->closing_wait = closing_wait;
- ss->custom_divisor = info->custom_divisor,
+ ss->custom_divisor = MXSER_CUSTOM_DIVISOR,
mutex_unlock(&port->mutex);
return 0;
}
@@ -1240,19 +1059,13 @@ static int mxser_set_serial_info(struct tty_struct *tty,
struct tty_port *port = &info->port;
speed_t baud;
unsigned long sl_flags;
- unsigned int flags, close_delay, closing_wait;
+ unsigned int old_speed, close_delay, closing_wait;
int retval = 0;
- if (tty->index == MXSER_PORTS)
- return -ENOTTY;
if (tty_io_error(tty))
return -EIO;
mutex_lock(&port->mutex);
- if (!info->ioaddr) {
- mutex_unlock(&port->mutex);
- return -ENODEV;
- }
if (ss->irq != info->board->irq ||
ss->port != info->ioaddr) {
@@ -1260,7 +1073,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
return -EINVAL;
}
- flags = port->flags & ASYNC_SPD_MASK;
+ old_speed = port->flags & ASYNC_SPD_MASK;
close_delay = msecs_to_jiffies(ss->close_delay * 10);
closing_wait = ss->closing_wait;
@@ -1268,15 +1081,15 @@ static int mxser_set_serial_info(struct tty_struct *tty,
closing_wait = msecs_to_jiffies(closing_wait * 10);
if (!capable(CAP_SYS_ADMIN)) {
- if ((ss->baud_base != info->baud_base) ||
- (close_delay != info->port.close_delay) ||
- (closing_wait != info->port.closing_wait) ||
- ((ss->flags & ~ASYNC_USR_MASK) != (info->port.flags & ~ASYNC_USR_MASK))) {
+ if ((ss->baud_base != MXSER_BAUD_BASE) ||
+ (close_delay != port->close_delay) ||
+ (closing_wait != port->closing_wait) ||
+ ((ss->flags & ~ASYNC_USR_MASK) != (port->flags & ~ASYNC_USR_MASK))) {
mutex_unlock(&port->mutex);
return -EPERM;
}
- info->port.flags = ((info->port.flags & ~ASYNC_USR_MASK) |
- (ss->flags & ASYNC_USR_MASK));
+ port->flags = (port->flags & ~ASYNC_USR_MASK) |
+ (ss->flags & ASYNC_USR_MASK);
} else {
/*
* OK, past this point, all the error checking has been done.
@@ -1287,9 +1100,9 @@ static int mxser_set_serial_info(struct tty_struct *tty,
port->close_delay = close_delay;
port->closing_wait = closing_wait;
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
- (ss->baud_base != info->baud_base ||
+ (ss->baud_base != MXSER_BAUD_BASE ||
ss->custom_divisor !=
- info->custom_divisor)) {
+ MXSER_CUSTOM_DIVISOR)) {
if (ss->custom_divisor == 0) {
mutex_unlock(&port->mutex);
return -EINVAL;
@@ -1300,11 +1113,11 @@ static int mxser_set_serial_info(struct tty_struct *tty,
info->type = ss->type;
- process_txrx_fifo(info);
+ mxser_process_txrx_fifo(info);
}
if (tty_port_initialized(port)) {
- if (flags != (port->flags & ASYNC_SPD_MASK)) {
+ if (old_speed != (port->flags & ASYNC_SPD_MASK)) {
spin_lock_irqsave(&info->slock, sl_flags);
mxser_change_speed(tty);
spin_unlock_irqrestore(&info->slock, sl_flags);
@@ -1348,19 +1161,16 @@ static int mxser_tiocmget(struct tty_struct *tty)
unsigned char control, status;
unsigned long flags;
-
- if (tty->index == MXSER_PORTS)
- return -ENOIOCTLCMD;
if (tty_io_error(tty))
return -EIO;
- control = info->MCR;
-
spin_lock_irqsave(&info->slock, flags);
+ control = info->MCR;
status = inb(info->ioaddr + UART_MSR);
if (status & UART_MSR_ANY_DELTA)
mxser_check_modem_status(tty, info, status);
spin_unlock_irqrestore(&info->slock, flags);
+
return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
((status & UART_MSR_DCD) ? TIOCM_CAR : 0) |
@@ -1375,9 +1185,6 @@ static int mxser_tiocmset(struct tty_struct *tty,
struct mxser_port *info = tty->driver_data;
unsigned long flags;
-
- if (tty->index == MXSER_PORTS)
- return -ENOIOCTLCMD;
if (tty_io_error(tty))
return -EIO;
@@ -1398,267 +1205,6 @@ static int mxser_tiocmset(struct tty_struct *tty,
return 0;
}
-static int __init mxser_program_mode(int port)
-{
- int id, i, j, n;
-
- outb(0, port);
- outb(0, port);
- outb(0, port);
- (void)inb(port);
- (void)inb(port);
- outb(0, port);
- (void)inb(port);
-
- id = inb(port + 1) & 0x1F;
- if ((id != C168_ASIC_ID) &&
- (id != C104_ASIC_ID) &&
- (id != C102_ASIC_ID) &&
- (id != CI132_ASIC_ID) &&
- (id != CI134_ASIC_ID) &&
- (id != CI104J_ASIC_ID))
- return -1;
- for (i = 0, j = 0; i < 4; i++) {
- n = inb(port + 2);
- if (n == 'M') {
- j = 1;
- } else if ((j == 1) && (n == 1)) {
- j = 2;
- break;
- } else
- j = 0;
- }
- if (j != 2)
- id = -2;
- return id;
-}
-
-static void __init mxser_normal_mode(int port)
-{
- int i, n;
-
- outb(0xA5, port + 1);
- outb(0x80, port + 3);
- outb(12, port + 0); /* 9600 bps */
- outb(0, port + 1);
- outb(0x03, port + 3); /* 8 data bits */
- outb(0x13, port + 4); /* loop back mode */
- for (i = 0; i < 16; i++) {
- n = inb(port + 5);
- if ((n & 0x61) == 0x60)
- break;
- if ((n & 1) == 1)
- (void)inb(port);
- }
- outb(0x00, port + 4);
-}
-
-#define CHIP_SK 0x01 /* Serial Data Clock in Eprom */
-#define CHIP_DO 0x02 /* Serial Data Output in Eprom */
-#define CHIP_CS 0x04 /* Serial Chip Select in Eprom */
-#define CHIP_DI 0x08 /* Serial Data Input in Eprom */
-#define EN_CCMD 0x000 /* Chip's command register */
-#define EN0_RSARLO 0x008 /* Remote start address reg 0 */
-#define EN0_RSARHI 0x009 /* Remote start address reg 1 */
-#define EN0_RCNTLO 0x00A /* Remote byte count reg WR */
-#define EN0_RCNTHI 0x00B /* Remote byte count reg WR */
-#define EN0_DCFG 0x00E /* Data configuration reg WR */
-#define EN0_PORT 0x010 /* Rcv missed frame error counter RD */
-#define ENC_PAGE0 0x000 /* Select page 0 of chip registers */
-#define ENC_PAGE3 0x0C0 /* Select page 3 of chip registers */
-static int __init mxser_read_register(int port, unsigned short *regs)
-{
- int i, k, value, id;
- unsigned int j;
-
- id = mxser_program_mode(port);
- if (id < 0)
- return id;
- for (i = 0; i < 14; i++) {
- k = (i & 0x3F) | 0x180;
- for (j = 0x100; j > 0; j >>= 1) {
- outb(CHIP_CS, port);
- if (k & j) {
- outb(CHIP_CS | CHIP_DO, port);
- outb(CHIP_CS | CHIP_DO | CHIP_SK, port); /* A? bit of read */
- } else {
- outb(CHIP_CS, port);
- outb(CHIP_CS | CHIP_SK, port); /* A? bit of read */
- }
- }
- (void)inb(port);
- value = 0;
- for (k = 0, j = 0x8000; k < 16; k++, j >>= 1) {
- outb(CHIP_CS, port);
- outb(CHIP_CS | CHIP_SK, port);
- if (inb(port) & CHIP_DI)
- value |= j;
- }
- regs[i] = value;
- outb(0, port);
- }
- mxser_normal_mode(port);
- return id;
-}
-
-static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
-{
- struct mxser_port *ip;
- struct tty_port *port;
- struct tty_struct *tty;
- int result, status;
- unsigned int i, j;
- int ret = 0;
-
- switch (cmd) {
- case MOXA_GET_MAJOR:
- printk_ratelimited(KERN_WARNING "mxser: '%s' uses deprecated ioctl "
- "%x (GET_MAJOR), fix your userspace\n",
- current->comm, cmd);
- return put_user(ttymajor, (int __user *)argp);
-
- case MOXA_CHKPORTENABLE:
- result = 0;
- for (i = 0; i < MXSER_BOARDS; i++)
- for (j = 0; j < MXSER_PORTS_PER_BOARD; j++)
- if (mxser_boards[i].ports[j].ioaddr)
- result |= (1 << i);
- return put_user(result, (unsigned long __user *)argp);
- case MOXA_GETDATACOUNT:
- /* The receive side is locked by port->slock but it isn't
- clear that an exact snapshot is worth copying here */
- if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
- ret = -EFAULT;
- return ret;
- case MOXA_GETMSTATUS: {
- struct mxser_mstatus ms, __user *msu = argp;
- for (i = 0; i < MXSER_BOARDS; i++)
- for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
- ip = &mxser_boards[i].ports[j];
- port = &ip->port;
- memset(&ms, 0, sizeof(ms));
-
- mutex_lock(&port->mutex);
- if (!ip->ioaddr)
- goto copy;
-
- tty = tty_port_tty_get(port);
-
- if (!tty)
- ms.cflag = ip->normal_termios.c_cflag;
- else
- ms.cflag = tty->termios.c_cflag;
- tty_kref_put(tty);
- spin_lock_irq(&ip->slock);
- status = inb(ip->ioaddr + UART_MSR);
- spin_unlock_irq(&ip->slock);
- if (status & UART_MSR_DCD)
- ms.dcd = 1;
- if (status & UART_MSR_DSR)
- ms.dsr = 1;
- if (status & UART_MSR_CTS)
- ms.cts = 1;
- copy:
- mutex_unlock(&port->mutex);
- if (copy_to_user(msu, &ms, sizeof(ms)))
- return -EFAULT;
- msu++;
- }
- return 0;
- }
- case MOXA_ASPP_MON_EXT: {
- struct mxser_mon_ext *me; /* it's 2k, stack unfriendly */
- unsigned int cflag, iflag, p;
- u8 opmode;
-
- me = kzalloc(sizeof(*me), GFP_KERNEL);
- if (!me)
- return -ENOMEM;
-
- for (i = 0, p = 0; i < MXSER_BOARDS; i++) {
- for (j = 0; j < MXSER_PORTS_PER_BOARD; j++, p++) {
- if (p >= ARRAY_SIZE(me->rx_cnt)) {
- i = MXSER_BOARDS;
- break;
- }
- ip = &mxser_boards[i].ports[j];
- port = &ip->port;
-
- mutex_lock(&port->mutex);
- if (!ip->ioaddr) {
- mutex_unlock(&port->mutex);
- continue;
- }
-
- spin_lock_irq(&ip->slock);
- status = mxser_get_msr(ip->ioaddr, 0, p);
-
- if (status & UART_MSR_TERI)
- ip->icount.rng++;
- if (status & UART_MSR_DDSR)
- ip->icount.dsr++;
- if (status & UART_MSR_DDCD)
- ip->icount.dcd++;
- if (status & UART_MSR_DCTS)
- ip->icount.cts++;
-
- ip->mon_data.modem_status = status;
- me->rx_cnt[p] = ip->mon_data.rxcnt;
- me->tx_cnt[p] = ip->mon_data.txcnt;
- me->up_rxcnt[p] = ip->mon_data.up_rxcnt;
- me->up_txcnt[p] = ip->mon_data.up_txcnt;
- me->modem_status[p] =
- ip->mon_data.modem_status;
- spin_unlock_irq(&ip->slock);
-
- tty = tty_port_tty_get(&ip->port);
-
- if (!tty) {
- cflag = ip->normal_termios.c_cflag;
- iflag = ip->normal_termios.c_iflag;
- me->baudrate[p] = tty_termios_baud_rate(&ip->normal_termios);
- } else {
- cflag = tty->termios.c_cflag;
- iflag = tty->termios.c_iflag;
- me->baudrate[p] = tty_get_baud_rate(tty);
- }
- tty_kref_put(tty);
-
- me->databits[p] = cflag & CSIZE;
- me->stopbits[p] = cflag & CSTOPB;
- me->parity[p] = cflag & (PARENB | PARODD |
- CMSPAR);
-
- if (cflag & CRTSCTS)
- me->flowctrl[p] |= 0x03;
-
- if (iflag & (IXON | IXOFF))
- me->flowctrl[p] |= 0x0C;
-
- if (ip->type == PORT_16550A)
- me->fifo[p] = 1;
-
- if (ip->board->chip_flag == MOXA_MUST_MU860_HWID) {
- opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2);
- opmode &= OP_MODE_MASK;
- } else {
- opmode = RS232_MODE;
- }
- me->iftype[p] = opmode;
- mutex_unlock(&port->mutex);
- }
- }
- if (copy_to_user(argp, me, sizeof(*me)))
- ret = -EFAULT;
- kfree(me);
- return ret;
- }
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
static int mxser_cflags_changed(struct mxser_port *info, unsigned long arg,
struct async_icount *cprev)
{
@@ -1680,6 +1226,41 @@ static int mxser_cflags_changed(struct mxser_port *info, unsigned long arg,
return ret;
}
+/* We should likely switch to TIOCGRS485/TIOCSRS485. */
+static int mxser_ioctl_op_mode(struct mxser_port *port, int index, bool set,
+ int __user *u_opmode)
+{
+ int opmode, p = index % 4;
+ int shiftbit = p * 2;
+ u8 val;
+
+ if (port->board->must_hwid != MOXA_MUST_MU860_HWID)
+ return -EFAULT;
+
+ if (set) {
+ if (get_user(opmode, u_opmode))
+ return -EFAULT;
+
+ if (opmode & ~OP_MODE_MASK)
+ return -EINVAL;
+
+ spin_lock_irq(&port->slock);
+ val = inb(port->opmode_ioaddr);
+ val &= ~(OP_MODE_MASK << shiftbit);
+ val |= (opmode << shiftbit);
+ outb(val, port->opmode_ioaddr);
+ spin_unlock_irq(&port->slock);
+
+ return 0;
+ }
+
+ spin_lock_irq(&port->slock);
+ opmode = inb(port->opmode_ioaddr) >> shiftbit;
+ spin_unlock_irq(&port->slock);
+
+ return put_user(opmode & OP_MODE_MASK, u_opmode);
+}
+
static int mxser_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
@@ -1688,47 +1269,9 @@ static int mxser_ioctl(struct tty_struct *tty,
unsigned long flags;
void __user *argp = (void __user *)arg;
- if (tty->index == MXSER_PORTS)
- return mxser_ioctl_special(cmd, argp);
-
- if (cmd == MOXA_SET_OP_MODE || cmd == MOXA_GET_OP_MODE) {
- int p;
- unsigned long opmode;
- static unsigned char ModeMask[] = { 0xfc, 0xf3, 0xcf, 0x3f };
- int shiftbit;
- unsigned char val, mask;
-
- if (info->board->chip_flag != MOXA_MUST_MU860_HWID)
- return -EFAULT;
-
- p = tty->index % 4;
- if (cmd == MOXA_SET_OP_MODE) {
- if (get_user(opmode, (int __user *) argp))
- return -EFAULT;
- if (opmode != RS232_MODE &&
- opmode != RS485_2WIRE_MODE &&
- opmode != RS422_MODE &&
- opmode != RS485_4WIRE_MODE)
- return -EFAULT;
- mask = ModeMask[p];
- shiftbit = p * 2;
- spin_lock_irq(&info->slock);
- val = inb(info->opmode_ioaddr);
- val &= mask;
- val |= (opmode << shiftbit);
- outb(val, info->opmode_ioaddr);
- spin_unlock_irq(&info->slock);
- } else {
- shiftbit = p * 2;
- spin_lock_irq(&info->slock);
- opmode = inb(info->opmode_ioaddr) >> shiftbit;
- spin_unlock_irq(&info->slock);
- opmode &= OP_MODE_MASK;
- if (put_user(opmode, (int __user *)argp))
- return -EFAULT;
- }
- return 0;
- }
+ if (cmd == MOXA_SET_OP_MODE || cmd == MOXA_GET_OP_MODE)
+ return mxser_ioctl_op_mode(info, tty->index,
+ cmd == MOXA_SET_OP_MODE, argp);
if (cmd != TIOCMIWAIT && tty_io_error(tty))
return -EIO;
@@ -1749,72 +1292,6 @@ static int mxser_ioctl(struct tty_struct *tty,
return wait_event_interruptible(info->port.delta_msr_wait,
mxser_cflags_changed(info, arg, &cnow));
- case MOXA_HighSpeedOn:
- return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
- case MOXA_SDS_RSTICOUNTER:
- spin_lock_irq(&info->slock);
- info->mon_data.rxcnt = 0;
- info->mon_data.txcnt = 0;
- spin_unlock_irq(&info->slock);
- return 0;
-
- case MOXA_ASPP_OQUEUE:{
- int len, lsr;
-
- len = mxser_chars_in_buffer(tty);
- spin_lock_irq(&info->slock);
- lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_THRE;
- spin_unlock_irq(&info->slock);
- len += (lsr ? 0 : 1);
-
- return put_user(len, (int __user *)argp);
- }
- case MOXA_ASPP_MON: {
- int mcr, status;
-
- spin_lock_irq(&info->slock);
- status = mxser_get_msr(info->ioaddr, 1, tty->index);
- mxser_check_modem_status(tty, info, status);
-
- mcr = inb(info->ioaddr + UART_MCR);
- spin_unlock_irq(&info->slock);
-
- if (mcr & MOXA_MUST_MCR_XON_FLAG)
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD;
- else
- info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFHOLD;
-
- if (mcr & MOXA_MUST_MCR_TX_XON)
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFXENT;
- else
- info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFXENT;
-
- if (tty->hw_stopped)
- info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD;
- else
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD;
-
- if (copy_to_user(argp, &info->mon_data,
- sizeof(struct mxser_mon)))
- return -EFAULT;
-
- return 0;
- }
- case MOXA_ASPP_LSTATUS: {
- if (put_user(info->err_shadow, (unsigned char __user *)argp))
- return -EFAULT;
-
- info->err_shadow = 0;
- return 0;
- }
- case MOXA_SET_BAUD_METHOD: {
- int method;
-
- if (get_user(method, (int __user *)argp))
- return -EFAULT;
- mxser_set_baud_method[tty->index] = method;
- return put_user(method, (int __user *)argp);
- }
default:
return -ENOIOCTLCMD;
}
@@ -1860,7 +1337,7 @@ static void mxser_stoprx(struct tty_struct *tty)
info->ldisc_stop_rx = 1;
if (I_IXOFF(tty)) {
- if (info->board->chip_flag) {
+ if (info->board->must_hwid) {
info->IER &= ~MOXA_MUST_RECV_ISR;
outb(info->IER, info->ioaddr + UART_IER);
} else {
@@ -1896,7 +1373,7 @@ static void mxser_unthrottle(struct tty_struct *tty)
if (info->x_char)
info->x_char = 0;
else {
- if (info->board->chip_flag) {
+ if (info->board->must_hwid) {
info->IER |= MOXA_MUST_RECV_ISR;
outb(info->IER, info->ioaddr + UART_IER);
} else {
@@ -1917,7 +1394,7 @@ static void mxser_unthrottle(struct tty_struct *tty)
/*
* mxser_stop() and mxser_start()
*
- * This routines are called before setting or resetting tty->stopped.
+ * This routines are called before setting or resetting tty->flow.stopped.
* They enable or disable transmitter interrupts, as necessary.
*/
static void mxser_stop(struct tty_struct *tty)
@@ -1926,10 +1403,8 @@ static void mxser_stop(struct tty_struct *tty)
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
- if (info->IER & UART_IER_THRI) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- }
+ if (info->IER & UART_IER_THRI)
+ __mxser_stop_tx(info);
spin_unlock_irqrestore(&info->slock, flags);
}
@@ -1939,11 +1414,8 @@ static void mxser_start(struct tty_struct *tty)
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
- if (info->xmit_cnt && info->port.xmit_buf) {
- outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- }
+ if (info->xmit_cnt && info->port.xmit_buf)
+ __mxser_start_tx(info);
spin_unlock_irqrestore(&info->slock, flags);
}
@@ -1963,12 +1435,11 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
/* Handle sw stopped */
if ((old_termios->c_iflag & IXON) && !I_IXON(tty)) {
- tty->stopped = 0;
+ tty->flow.stopped = 0;
- if (info->board->chip_flag) {
+ if (info->board->must_hwid) {
spin_lock_irqsave(&info->slock, flags);
- mxser_disable_must_rx_software_flow_control(
- info->ioaddr);
+ mxser_must_set_rx_sw_flow_control(info->ioaddr, false);
spin_unlock_irqrestore(&info->slock, flags);
}
@@ -2051,88 +1522,92 @@ static int mxser_rs_break(struct tty_struct *tty, int break_state)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
+ u8 lcr;
spin_lock_irqsave(&info->slock, flags);
+ lcr = inb(info->ioaddr + UART_LCR);
if (break_state == -1)
- outb(inb(info->ioaddr + UART_LCR) | UART_LCR_SBC,
- info->ioaddr + UART_LCR);
+ lcr |= UART_LCR_SBC;
else
- outb(inb(info->ioaddr + UART_LCR) & ~UART_LCR_SBC,
- info->ioaddr + UART_LCR);
+ lcr &= ~UART_LCR_SBC;
+ outb(lcr, info->ioaddr + UART_LCR);
spin_unlock_irqrestore(&info->slock, flags);
+
return 0;
}
-static void mxser_receive_chars(struct tty_struct *tty,
- struct mxser_port *port, int *status)
+static bool mxser_receive_chars_new(struct tty_struct *tty,
+ struct mxser_port *port, u8 status)
{
- unsigned char ch, gdl;
- int ignored = 0;
- int cnt = 0;
- int recv_room;
- int max = 256;
+ enum mxser_must_hwid hwid = port->board->must_hwid;
+ u8 gdl;
+
+ if (hwid == MOXA_OTHER_UART)
+ return false;
+ if (status & UART_LSR_BRK_ERROR_BITS)
+ return false;
+ if (hwid == MOXA_MUST_MU860_HWID && (status & MOXA_MUST_LSR_RERR))
+ return false;
+ if (status & MOXA_MUST_LSR_RERR)
+ return false;
- recv_room = tty->receive_room;
- if (recv_room == 0 && !port->ldisc_stop_rx)
+ gdl = inb(port->ioaddr + MOXA_MUST_GDL_REGISTER);
+ if (hwid == MOXA_MUST_MU150_HWID)
+ gdl &= MOXA_MUST_GDL_MASK;
+
+ if (gdl >= tty->receive_room && !port->ldisc_stop_rx)
mxser_stoprx(tty);
- if (port->board->chip_flag != MOXA_OTHER_UART) {
-
- if (*status & UART_LSR_SPECIAL)
- goto intr_old;
- if (port->board->chip_flag == MOXA_MUST_MU860_HWID &&
- (*status & MOXA_MUST_LSR_RERR))
- goto intr_old;
- if (*status & MOXA_MUST_LSR_RERR)
- goto intr_old;
-
- gdl = inb(port->ioaddr + MOXA_MUST_GDL_REGISTER);
-
- if (port->board->chip_flag == MOXA_MUST_MU150_HWID)
- gdl &= MOXA_MUST_GDL_MASK;
- if (gdl >= recv_room) {
- if (!port->ldisc_stop_rx)
- mxser_stoprx(tty);
- }
- while (gdl--) {
- ch = inb(port->ioaddr + UART_RX);
- tty_insert_flip_char(&port->port, ch, 0);
- cnt++;
- }
- goto end_intr;
+
+ while (gdl--) {
+ u8 ch = inb(port->ioaddr + UART_RX);
+ tty_insert_flip_char(&port->port, ch, 0);
}
-intr_old:
+
+ return true;
+}
+
+static u8 mxser_receive_chars_old(struct tty_struct *tty,
+ struct mxser_port *port, u8 status)
+{
+ enum mxser_must_hwid hwid = port->board->must_hwid;
+ int recv_room = tty->receive_room;
+ int ignored = 0;
+ int max = 256;
+ int cnt = 0;
+ u8 ch;
do {
if (max-- < 0)
break;
ch = inb(port->ioaddr + UART_RX);
- if (port->board->chip_flag && (*status & UART_LSR_OE))
- outb(0x23, port->ioaddr + UART_FCR);
- *status &= port->read_status_mask;
- if (*status & port->ignore_status_mask) {
+ if (hwid && (status & UART_LSR_OE))
+ outb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR |
+ MOXA_MUST_FCR_GDA_MODE_ENABLE,
+ port->ioaddr + UART_FCR);
+ status &= port->read_status_mask;
+ if (status & port->ignore_status_mask) {
if (++ignored > 100)
break;
} else {
char flag = 0;
- if (*status & UART_LSR_SPECIAL) {
- if (*status & UART_LSR_BI) {
+ if (status & UART_LSR_BRK_ERROR_BITS) {
+ if (status & UART_LSR_BI) {
flag = TTY_BREAK;
port->icount.brk++;
if (port->port.flags & ASYNC_SAK)
do_SAK(tty);
- } else if (*status & UART_LSR_PE) {
+ } else if (status & UART_LSR_PE) {
flag = TTY_PARITY;
port->icount.parity++;
- } else if (*status & UART_LSR_FE) {
+ } else if (status & UART_LSR_FE) {
flag = TTY_FRAME;
port->icount.frame++;
- } else if (*status & UART_LSR_OE) {
+ } else if (status & UART_LSR_OE) {
flag = TTY_OVERRUN;
port->icount.overrun++;
- } else
- flag = TTY_BREAK;
+ }
}
tty_insert_flip_char(&port->port, ch, flag);
cnt++;
@@ -2144,18 +1619,27 @@ intr_old:
}
- if (port->board->chip_flag)
+ if (hwid)
break;
- *status = inb(port->ioaddr + UART_LSR);
- } while (*status & UART_LSR_DR);
+ status = inb(port->ioaddr + UART_LSR);
+ } while (status & UART_LSR_DR);
+
+ return status;
+}
+
+static u8 mxser_receive_chars(struct tty_struct *tty,
+ struct mxser_port *port, u8 status)
+{
+ if (tty->receive_room == 0 && !port->ldisc_stop_rx)
+ mxser_stoprx(tty);
-end_intr:
- mxvar_log.rxcnt[tty->index] += cnt;
- port->mon_data.rxcnt += cnt;
- port->mon_data.up_rxcnt += cnt;
+ if (!mxser_receive_chars_new(tty, port, status))
+ status = mxser_receive_chars_old(tty, port, status);
tty_flip_buffer_push(&port->port);
+
+ return status;
}
static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port)
@@ -2165,9 +1649,6 @@ static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port
if (port->x_char) {
outb(port->x_char, port->ioaddr + UART_TX);
port->x_char = 0;
- mxvar_log.txcnt[tty->index]++;
- port->mon_data.txcnt++;
- port->mon_data.up_txcnt++;
port->icount.tx++;
return;
}
@@ -2175,12 +1656,9 @@ static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port
if (port->port.xmit_buf == NULL)
return;
- if (port->xmit_cnt <= 0 || tty->stopped ||
- (tty->hw_stopped &&
- (port->type != PORT_16550A) &&
- (!port->board->chip_flag))) {
- port->IER &= ~UART_IER_THRI;
- outb(port->IER, port->ioaddr + UART_IER);
+ if (!port->xmit_cnt || tty->flow.stopped ||
+ (tty->hw_stopped && !mxser_16550A_or_MUST(port))) {
+ __mxser_stop_tx(port);
return;
}
@@ -2190,22 +1668,72 @@ static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port
outb(port->port.xmit_buf[port->xmit_tail++],
port->ioaddr + UART_TX);
port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE - 1);
- if (--port->xmit_cnt <= 0)
+ if (!--port->xmit_cnt)
break;
} while (--count > 0);
- mxvar_log.txcnt[tty->index] += (cnt - port->xmit_cnt);
- port->mon_data.txcnt += (cnt - port->xmit_cnt);
- port->mon_data.up_txcnt += (cnt - port->xmit_cnt);
port->icount.tx += (cnt - port->xmit_cnt);
if (port->xmit_cnt < WAKEUP_CHARS)
tty_wakeup(tty);
- if (port->xmit_cnt <= 0) {
- port->IER &= ~UART_IER_THRI;
- outb(port->IER, port->ioaddr + UART_IER);
+ if (!port->xmit_cnt)
+ __mxser_stop_tx(port);
+}
+
+static bool mxser_port_isr(struct mxser_port *port)
+{
+ struct tty_struct *tty;
+ u8 iir, msr, status;
+ bool error = false;
+
+ iir = inb(port->ioaddr + UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ return true;
+
+ iir &= MOXA_MUST_IIR_MASK;
+ tty = tty_port_tty_get(&port->port);
+ if (!tty || port->closing || !tty_port_initialized(&port->port)) {
+ status = inb(port->ioaddr + UART_LSR);
+ outb(MOXA_MUST_FCR_GDA_MODE_ENABLE | UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+ port->ioaddr + UART_FCR);
+ inb(port->ioaddr + UART_MSR);
+
+ error = true;
+ goto put_tty;
+ }
+
+ status = inb(port->ioaddr + UART_LSR);
+
+ if (port->board->must_hwid) {
+ if (iir == MOXA_MUST_IIR_GDA ||
+ iir == MOXA_MUST_IIR_RDA ||
+ iir == MOXA_MUST_IIR_RTO ||
+ iir == MOXA_MUST_IIR_LSR)
+ status = mxser_receive_chars(tty, port, status);
+ } else {
+ status &= port->read_status_mask;
+ if (status & UART_LSR_DR)
+ status = mxser_receive_chars(tty, port, status);
+ }
+
+ msr = inb(port->ioaddr + UART_MSR);
+ if (msr & UART_MSR_ANY_DELTA)
+ mxser_check_modem_status(tty, port, msr);
+
+ if (port->board->must_hwid) {
+ if (iir == 0x02 && (status & UART_LSR_THRE))
+ mxser_transmit_chars(tty, port);
+ } else {
+ if (status & UART_LSR_THRE)
+ mxser_transmit_chars(tty, port);
}
+
+put_tty:
+ tty_kref_put(tty);
+
+ return error;
}
/*
@@ -2213,33 +1741,21 @@ static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port
*/
static irqreturn_t mxser_interrupt(int irq, void *dev_id)
{
- int status, iir, i;
- struct mxser_board *brd = NULL;
+ struct mxser_board *brd = dev_id;
struct mxser_port *port;
- int max, irqbits, bits, msr;
unsigned int int_cnt, pass_counter = 0;
+ unsigned int i, max = brd->nports;
int handled = IRQ_NONE;
- struct tty_struct *tty;
-
- for (i = 0; i < MXSER_BOARDS; i++)
- if (dev_id == &mxser_boards[i]) {
- brd = dev_id;
- break;
- }
+ u8 irqbits, bits, mask = BIT(max) - 1;
- if (i == MXSER_BOARDS)
- goto irq_stop;
- if (brd == NULL)
- goto irq_stop;
- max = brd->info->nports;
while (pass_counter++ < MXSER_ISR_PASS_LIMIT) {
- irqbits = inb(brd->vector) & brd->vector_mask;
- if (irqbits == brd->vector_mask)
+ irqbits = inb(brd->vector) & mask;
+ if (irqbits == mask)
break;
handled = IRQ_HANDLED;
for (i = 0, bits = 1; i < max; i++, irqbits |= bits, bits <<= 1) {
- if (irqbits == brd->vector_mask)
+ if (irqbits == mask)
break;
if (bits & irqbits)
continue;
@@ -2248,65 +1764,13 @@ static irqreturn_t mxser_interrupt(int irq, void *dev_id)
int_cnt = 0;
spin_lock(&port->slock);
do {
- iir = inb(port->ioaddr + UART_IIR);
- if (iir & UART_IIR_NO_INT)
+ if (mxser_port_isr(port))
break;
- iir &= MOXA_MUST_IIR_MASK;
- tty = tty_port_tty_get(&port->port);
- if (!tty || port->closing ||
- !tty_port_initialized(&port->port)) {
- status = inb(port->ioaddr + UART_LSR);
- outb(0x27, port->ioaddr + UART_FCR);
- inb(port->ioaddr + UART_MSR);
- tty_kref_put(tty);
- break;
- }
-
- status = inb(port->ioaddr + UART_LSR);
-
- if (status & UART_LSR_PE)
- port->err_shadow |= NPPI_NOTIFY_PARITY;
- if (status & UART_LSR_FE)
- port->err_shadow |= NPPI_NOTIFY_FRAMING;
- if (status & UART_LSR_OE)
- port->err_shadow |=
- NPPI_NOTIFY_HW_OVERRUN;
- if (status & UART_LSR_BI)
- port->err_shadow |= NPPI_NOTIFY_BREAK;
-
- if (port->board->chip_flag) {
- if (iir == MOXA_MUST_IIR_GDA ||
- iir == MOXA_MUST_IIR_RDA ||
- iir == MOXA_MUST_IIR_RTO ||
- iir == MOXA_MUST_IIR_LSR)
- mxser_receive_chars(tty, port,
- &status);
-
- } else {
- status &= port->read_status_mask;
- if (status & UART_LSR_DR)
- mxser_receive_chars(tty, port,
- &status);
- }
- msr = inb(port->ioaddr + UART_MSR);
- if (msr & UART_MSR_ANY_DELTA)
- mxser_check_modem_status(tty, port, msr);
-
- if (port->board->chip_flag) {
- if (iir == 0x02 && (status &
- UART_LSR_THRE))
- mxser_transmit_chars(tty, port);
- } else {
- if (status & UART_LSR_THRE)
- mxser_transmit_chars(tty, port);
- }
- tty_kref_put(tty);
} while (int_cnt++ < MXSER_ISR_PASS_LIMIT);
spin_unlock(&port->slock);
}
}
-irq_stop:
return handled;
}
@@ -2346,254 +1810,103 @@ static const struct tty_port_operations mxser_port_ops = {
* The MOXA Smartio/Industio serial driver boot-time initialization code!
*/
-static bool allow_overlapping_vector;
-module_param(allow_overlapping_vector, bool, S_IRUGO);
-MODULE_PARM_DESC(allow_overlapping_vector, "whether we allow ISA cards to be configured such that vector overlabs IO ports (default=no)");
-
-static bool mxser_overlapping_vector(struct mxser_board *brd)
-{
- return allow_overlapping_vector &&
- brd->vector >= brd->ports[0].ioaddr &&
- brd->vector < brd->ports[0].ioaddr + 8 * brd->info->nports;
-}
-
-static int mxser_request_vector(struct mxser_board *brd)
+static void mxser_initbrd(struct mxser_board *brd, bool high_baud)
{
- if (mxser_overlapping_vector(brd))
- return 0;
- return request_region(brd->vector, 1, "mxser(vector)") ? 0 : -EIO;
-}
+ struct mxser_port *info;
+ unsigned int i;
+ bool is_mu860;
-static void mxser_release_vector(struct mxser_board *brd)
-{
- if (mxser_overlapping_vector(brd))
- return;
- release_region(brd->vector, 1);
-}
+ brd->must_hwid = mxser_must_get_hwid(brd->ports[0].ioaddr);
+ is_mu860 = brd->must_hwid == MOXA_MUST_MU860_HWID;
-static void mxser_release_ISA_res(struct mxser_board *brd)
-{
- release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
- mxser_release_vector(brd);
-}
+ for (i = 0; i < UART_INFO_NUM; i++) {
+ if (Gpci_uart_info[i].type == brd->must_hwid) {
+ brd->max_baud = Gpci_uart_info[i].max_baud;
-static int mxser_initbrd(struct mxser_board *brd)
-{
- struct mxser_port *info;
- unsigned int i;
- int retval;
+ /* exception....CP-102 */
+ if (high_baud)
+ brd->max_baud = 921600;
+ break;
+ }
+ }
- printk(KERN_INFO "mxser: max. baud rate = %d bps\n",
- brd->ports[0].max_baud);
+ if (is_mu860) {
+ /* set to RS232 mode by default */
+ outb(0, brd->vector + 4);
+ outb(0, brd->vector + 0x0c);
+ }
- for (i = 0; i < brd->info->nports; i++) {
+ for (i = 0; i < brd->nports; i++) {
info = &brd->ports[i];
+ if (is_mu860) {
+ if (i < 4)
+ info->opmode_ioaddr = brd->vector + 4;
+ else
+ info->opmode_ioaddr = brd->vector + 0x0c;
+ }
tty_port_init(&info->port);
info->port.ops = &mxser_port_ops;
info->board = brd;
- info->stop_rx = 0;
info->ldisc_stop_rx = 0;
/* Enhance mode enabled here */
- if (brd->chip_flag != MOXA_OTHER_UART)
- mxser_enable_must_enchance_mode(info->ioaddr);
+ if (brd->must_hwid != MOXA_OTHER_UART)
+ mxser_must_set_enhance_mode(info->ioaddr, true);
- info->type = brd->uart_type;
+ info->type = PORT_16550A;
- process_txrx_fifo(info);
+ mxser_process_txrx_fifo(info);
- info->custom_divisor = info->baud_base * 16;
info->port.close_delay = 5 * HZ / 10;
info->port.closing_wait = 30 * HZ;
- info->normal_termios = mxvar_sdriver->init_termios;
- memset(&info->mon_data, 0, sizeof(struct mxser_mon));
- info->err_shadow = 0;
spin_lock_init(&info->slock);
/* before set INT ISR, disable all int */
outb(inb(info->ioaddr + UART_IER) & 0xf0,
info->ioaddr + UART_IER);
}
-
- retval = request_irq(brd->irq, mxser_interrupt, IRQF_SHARED, "mxser",
- brd);
- if (retval) {
- for (i = 0; i < brd->info->nports; i++)
- tty_port_destroy(&brd->ports[i].port);
- printk(KERN_ERR "Board %s: Request irq failed, IRQ (%d) may "
- "conflict with another device.\n",
- brd->info->name, brd->irq);
- }
-
- return retval;
-}
-
-static void mxser_board_remove(struct mxser_board *brd)
-{
- unsigned int i;
-
- for (i = 0; i < brd->info->nports; i++) {
- tty_unregister_device(mxvar_sdriver, brd->idx + i);
- tty_port_destroy(&brd->ports[i].port);
- }
- free_irq(brd->irq, brd);
-}
-
-static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd)
-{
- int id, i, bits, ret;
- unsigned short regs[16], irq;
- unsigned char scratch, scratch2;
-
- brd->chip_flag = MOXA_OTHER_UART;
-
- id = mxser_read_register(cap, regs);
- switch (id) {
- case C168_ASIC_ID:
- brd->info = &mxser_cards[0];
- break;
- case C104_ASIC_ID:
- brd->info = &mxser_cards[1];
- break;
- case CI104J_ASIC_ID:
- brd->info = &mxser_cards[2];
- break;
- case C102_ASIC_ID:
- brd->info = &mxser_cards[5];
- break;
- case CI132_ASIC_ID:
- brd->info = &mxser_cards[6];
- break;
- case CI134_ASIC_ID:
- brd->info = &mxser_cards[7];
- break;
- default:
- return 0;
- }
-
- irq = 0;
- /* some ISA cards have 2 ports, but we want to see them as 4-port (why?)
- Flag-hack checks if configuration should be read as 2-port here. */
- if (brd->info->nports == 2 || (brd->info->flags & MXSER_HAS2)) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- if (irq != (regs[9] & 0xFF00))
- goto err_irqconflict;
- } else if (brd->info->nports == 4) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- irq = irq | (irq >> 8);
- if (irq != regs[9])
- goto err_irqconflict;
- } else if (brd->info->nports == 8) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- irq = irq | (irq >> 8);
- if ((irq != regs[9]) || (irq != regs[10]))
- goto err_irqconflict;
- }
-
- if (!irq) {
- printk(KERN_ERR "mxser: interrupt number unset\n");
- return -EIO;
- }
- brd->irq = ((int)(irq & 0xF000) >> 12);
- for (i = 0; i < 8; i++)
- brd->ports[i].ioaddr = (int) regs[i + 1] & 0xFFF8;
- if ((regs[12] & 0x80) == 0) {
- printk(KERN_ERR "mxser: invalid interrupt vector\n");
- return -EIO;
- }
- brd->vector = (int)regs[11]; /* interrupt vector */
- if (id == 1)
- brd->vector_mask = 0x00FF;
- else
- brd->vector_mask = 0x000F;
- for (i = 7, bits = 0x0100; i >= 0; i--, bits <<= 1) {
- if (regs[12] & bits) {
- brd->ports[i].baud_base = 921600;
- brd->ports[i].max_baud = 921600;
- } else {
- brd->ports[i].baud_base = 115200;
- brd->ports[i].max_baud = 115200;
- }
- }
- scratch2 = inb(cap + UART_LCR) & (~UART_LCR_DLAB);
- outb(scratch2 | UART_LCR_DLAB, cap + UART_LCR);
- outb(0, cap + UART_EFR); /* EFR is the same as FCR */
- outb(scratch2, cap + UART_LCR);
- outb(UART_FCR_ENABLE_FIFO, cap + UART_FCR);
- scratch = inb(cap + UART_IIR);
-
- if (scratch & 0xC0)
- brd->uart_type = PORT_16550A;
- else
- brd->uart_type = PORT_16450;
- if (!request_region(brd->ports[0].ioaddr, 8 * brd->info->nports,
- "mxser(IO)")) {
- printk(KERN_ERR "mxser: can't request ports I/O region: "
- "0x%.8lx-0x%.8lx\n",
- brd->ports[0].ioaddr, brd->ports[0].ioaddr +
- 8 * brd->info->nports - 1);
- return -EIO;
- }
-
- ret = mxser_request_vector(brd);
- if (ret) {
- release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
- printk(KERN_ERR "mxser: can't request interrupt vector region: "
- "0x%.8lx-0x%.8lx\n",
- brd->ports[0].ioaddr, brd->ports[0].ioaddr +
- 8 * brd->info->nports - 1);
- return ret;
- }
- return brd->info->nports;
-
-err_irqconflict:
- printk(KERN_ERR "mxser: invalid interrupt number\n");
- return -EIO;
}
static int mxser_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
-#ifdef CONFIG_PCI
struct mxser_board *brd;
- unsigned int i, j;
+ unsigned int i, base;
unsigned long ioaddress;
+ unsigned short nports = MXSER_NPORTS(ent->driver_data);
struct device *tty_dev;
int retval = -EINVAL;
- for (i = 0; i < MXSER_BOARDS; i++)
- if (mxser_boards[i].info == NULL)
- break;
-
+ i = find_first_zero_bit(mxser_boards, MXSER_BOARDS);
if (i >= MXSER_BOARDS) {
dev_err(&pdev->dev, "too many boards found (maximum %d), board "
"not configured\n", MXSER_BOARDS);
goto err;
}
- brd = &mxser_boards[i];
- brd->idx = i * MXSER_PORTS_PER_BOARD;
- dev_info(&pdev->dev, "found MOXA %s board (BusNo=%d, DevNo=%d)\n",
- mxser_cards[ent->driver_data].name,
- pdev->bus->number, PCI_SLOT(pdev->devfn));
+ brd = devm_kzalloc(&pdev->dev, struct_size(brd, ports, nports),
+ GFP_KERNEL);
+ if (!brd)
+ goto err;
+
+ brd->idx = i;
+ __set_bit(brd->idx, mxser_boards);
+ base = i * MXSER_PORTS_PER_BOARD;
- retval = pci_enable_device(pdev);
+ retval = pcim_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "PCI enable failed\n");
- goto err;
+ goto err_zero;
}
/* io address */
ioaddress = pci_resource_start(pdev, 2);
retval = pci_request_region(pdev, 2, "mxser(IO)");
if (retval)
- goto err_dis;
+ goto err_zero;
- brd->info = &mxser_cards[ent->driver_data];
- for (i = 0; i < brd->info->nports; i++)
+ brd->nports = nports;
+ for (i = 0; i < nports; i++)
brd->ports[i].ioaddr = ioaddress + 8 * i;
/* vector */
@@ -2606,53 +1919,23 @@ static int mxser_probe(struct pci_dev *pdev,
/* irq */
brd->irq = pdev->irq;
- brd->chip_flag = CheckIsMoxaMust(brd->ports[0].ioaddr);
- brd->uart_type = PORT_16550A;
- brd->vector_mask = 0;
-
- for (i = 0; i < brd->info->nports; i++) {
- for (j = 0; j < UART_INFO_NUM; j++) {
- if (Gpci_uart_info[j].type == brd->chip_flag) {
- brd->ports[i].max_baud =
- Gpci_uart_info[j].max_baud;
-
- /* exception....CP-102 */
- if (brd->info->flags & MXSER_HIGHBAUD)
- brd->ports[i].max_baud = 921600;
- break;
- }
- }
- }
-
- if (brd->chip_flag == MOXA_MUST_MU860_HWID) {
- for (i = 0; i < brd->info->nports; i++) {
- if (i < 4)
- brd->ports[i].opmode_ioaddr = ioaddress + 4;
- else
- brd->ports[i].opmode_ioaddr = ioaddress + 0x0c;
- }
- outb(0, ioaddress + 4); /* default set to RS232 mode */
- outb(0, ioaddress + 0x0c); /* default set to RS232 mode */
- }
+ mxser_initbrd(brd, ent->driver_data & MXSER_HIGHBAUD);
- for (i = 0; i < brd->info->nports; i++) {
- brd->vector_mask |= (1 << i);
- brd->ports[i].baud_base = 921600;
+ retval = devm_request_irq(&pdev->dev, brd->irq, mxser_interrupt,
+ IRQF_SHARED, "mxser", brd);
+ if (retval) {
+ dev_err(&pdev->dev, "request irq failed");
+ goto err_relbrd;
}
- /* mxser_initbrd will hook ISR. */
- retval = mxser_initbrd(brd);
- if (retval)
- goto err_rel3;
-
- for (i = 0; i < brd->info->nports; i++) {
+ for (i = 0; i < nports; i++) {
tty_dev = tty_port_register_device(&brd->ports[i].port,
- mxvar_sdriver, brd->idx + i, &pdev->dev);
+ mxvar_sdriver, base + i, &pdev->dev);
if (IS_ERR(tty_dev)) {
retval = PTR_ERR(tty_dev);
for (; i > 0; i--)
tty_unregister_device(mxvar_sdriver,
- brd->idx + i - 1);
+ base + i - 1);
goto err_relbrd;
}
}
@@ -2661,35 +1944,25 @@ static int mxser_probe(struct pci_dev *pdev,
return 0;
err_relbrd:
- for (i = 0; i < brd->info->nports; i++)
+ for (i = 0; i < nports; i++)
tty_port_destroy(&brd->ports[i].port);
- free_irq(brd->irq, brd);
-err_rel3:
- pci_release_region(pdev, 3);
err_zero:
- brd->info = NULL;
- pci_release_region(pdev, 2);
-err_dis:
- pci_disable_device(pdev);
+ __clear_bit(brd->idx, mxser_boards);
err:
return retval;
-#else
- return -ENODEV;
-#endif
}
static void mxser_remove(struct pci_dev *pdev)
{
-#ifdef CONFIG_PCI
struct mxser_board *brd = pci_get_drvdata(pdev);
+ unsigned int i, base = brd->idx * MXSER_PORTS_PER_BOARD;
- mxser_board_remove(brd);
+ for (i = 0; i < brd->nports; i++) {
+ tty_unregister_device(mxvar_sdriver, base + i);
+ tty_port_destroy(&brd->ports[i].port);
+ }
- pci_release_region(pdev, 2);
- pci_release_region(pdev, 3);
- pci_disable_device(pdev);
- brd->info = NULL;
-#endif
+ __clear_bit(brd->idx, mxser_boards);
}
static struct pci_driver mxser_driver = {
@@ -2701,18 +1974,12 @@ static struct pci_driver mxser_driver = {
static int __init mxser_module_init(void)
{
- struct mxser_board *brd;
- struct device *tty_dev;
- unsigned int b, i, m;
int retval;
- mxvar_sdriver = alloc_tty_driver(MXSER_PORTS + 1);
+ mxvar_sdriver = alloc_tty_driver(MXSER_PORTS);
if (!mxvar_sdriver)
return -ENOMEM;
- printk(KERN_INFO "MOXA Smartio/Industio family driver version %s\n",
- MXSER_VERSION);
-
/* Initialize the tty_driver structure */
mxvar_sdriver->name = "ttyMI";
mxvar_sdriver->major = ttymajor;
@@ -2731,57 +1998,10 @@ static int __init mxser_module_init(void)
goto err_put;
}
- /* Start finding ISA boards here */
- for (m = 0, b = 0; b < MXSER_BOARDS; b++) {
- if (!ioaddr[b])
- continue;
-
- brd = &mxser_boards[m];
- retval = mxser_get_ISA_conf(ioaddr[b], brd);
- if (retval <= 0) {
- brd->info = NULL;
- continue;
- }
-
- printk(KERN_INFO "mxser: found MOXA %s board (CAP=0x%lx)\n",
- brd->info->name, ioaddr[b]);
-
- /* mxser_initbrd will hook ISR. */
- if (mxser_initbrd(brd) < 0) {
- mxser_release_ISA_res(brd);
- brd->info = NULL;
- continue;
- }
-
- brd->idx = m * MXSER_PORTS_PER_BOARD;
- for (i = 0; i < brd->info->nports; i++) {
- tty_dev = tty_port_register_device(&brd->ports[i].port,
- mxvar_sdriver, brd->idx + i, NULL);
- if (IS_ERR(tty_dev)) {
- for (; i > 0; i--)
- tty_unregister_device(mxvar_sdriver,
- brd->idx + i - 1);
- for (i = 0; i < brd->info->nports; i++)
- tty_port_destroy(&brd->ports[i].port);
- free_irq(brd->irq, brd);
- mxser_release_ISA_res(brd);
- brd->info = NULL;
- break;
- }
- }
- if (brd->info == NULL)
- continue;
-
- m++;
- }
-
retval = pci_register_driver(&mxser_driver);
if (retval) {
printk(KERN_ERR "mxser: can't register pci driver\n");
- if (!m) {
- retval = -ENODEV;
- goto err_unr;
- } /* else: we have some ISA cards under control */
+ goto err_unr;
}
return 0;
@@ -2794,19 +2014,9 @@ err_put:
static void __exit mxser_module_exit(void)
{
- unsigned int i;
-
pci_unregister_driver(&mxser_driver);
-
- for (i = 0; i < MXSER_BOARDS; i++) /* ISA remains */
- if (mxser_boards[i].info != NULL)
- mxser_board_remove(&mxser_boards[i]);
tty_unregister_driver(mxvar_sdriver);
put_tty_driver(mxvar_sdriver);
-
- for (i = 0; i < MXSER_BOARDS; i++)
- if (mxser_boards[i].info != NULL)
- mxser_release_ISA_res(&mxser_boards[i]);
}
module_init(mxser_module_init);
diff --git a/drivers/tty/mxser.h b/drivers/tty/mxser.h
deleted file mode 100644
index e6cb15626567..000000000000
--- a/drivers/tty/mxser.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _MXSER_H
-#define _MXSER_H
-
-/*
- * Semi-public control interfaces
- */
-
-/*
- * MOXA ioctls
- */
-
-#define MOXA 0x400
-#define MOXA_GETDATACOUNT (MOXA + 23)
-#define MOXA_DIAGNOSE (MOXA + 50)
-#define MOXA_CHKPORTENABLE (MOXA + 60)
-#define MOXA_HighSpeedOn (MOXA + 61)
-#define MOXA_GET_MAJOR (MOXA + 63)
-#define MOXA_GETMSTATUS (MOXA + 65)
-#define MOXA_SET_OP_MODE (MOXA + 66)
-#define MOXA_GET_OP_MODE (MOXA + 67)
-
-#define RS232_MODE 0
-#define RS485_2WIRE_MODE 1
-#define RS422_MODE 2
-#define RS485_4WIRE_MODE 3
-#define OP_MODE_MASK 3
-
-#define MOXA_SDS_RSTICOUNTER (MOXA + 69)
-#define MOXA_ASPP_OQUEUE (MOXA + 70)
-#define MOXA_ASPP_MON (MOXA + 73)
-#define MOXA_ASPP_LSTATUS (MOXA + 74)
-#define MOXA_ASPP_MON_EXT (MOXA + 75)
-#define MOXA_SET_BAUD_METHOD (MOXA + 76)
-
-/* --------------------------------------------------- */
-
-#define NPPI_NOTIFY_PARITY 0x01
-#define NPPI_NOTIFY_FRAMING 0x02
-#define NPPI_NOTIFY_HW_OVERRUN 0x04
-#define NPPI_NOTIFY_SW_OVERRUN 0x08
-#define NPPI_NOTIFY_BREAK 0x10
-
-#define NPPI_NOTIFY_CTSHOLD 0x01 /* Tx hold by CTS low */
-#define NPPI_NOTIFY_DSRHOLD 0x02 /* Tx hold by DSR low */
-#define NPPI_NOTIFY_XOFFHOLD 0x08 /* Tx hold by Xoff received */
-#define NPPI_NOTIFY_XOFFXENT 0x10 /* Xoff Sent */
-
-/* follow just for Moxa Must chip define. */
-/* */
-/* when LCR register (offset 0x03) write following value, */
-/* the Must chip will enter enchance mode. And write value */
-/* on EFR (offset 0x02) bit 6,7 to change bank. */
-#define MOXA_MUST_ENTER_ENCHANCE 0xBF
-
-/* when enhance mode enable, access on general bank register */
-#define MOXA_MUST_GDL_REGISTER 0x07
-#define MOXA_MUST_GDL_MASK 0x7F
-#define MOXA_MUST_GDL_HAS_BAD_DATA 0x80
-
-#define MOXA_MUST_LSR_RERR 0x80 /* error in receive FIFO */
-/* enchance register bank select and enchance mode setting register */
-/* when LCR register equal to 0xBF */
-#define MOXA_MUST_EFR_REGISTER 0x02
-/* enchance mode enable */
-#define MOXA_MUST_EFR_EFRB_ENABLE 0x10
-/* enchance reister bank set 0, 1, 2 */
-#define MOXA_MUST_EFR_BANK0 0x00
-#define MOXA_MUST_EFR_BANK1 0x40
-#define MOXA_MUST_EFR_BANK2 0x80
-#define MOXA_MUST_EFR_BANK3 0xC0
-#define MOXA_MUST_EFR_BANK_MASK 0xC0
-
-/* set XON1 value register, when LCR=0xBF and change to bank0 */
-#define MOXA_MUST_XON1_REGISTER 0x04
-
-/* set XON2 value register, when LCR=0xBF and change to bank0 */
-#define MOXA_MUST_XON2_REGISTER 0x05
-
-/* set XOFF1 value register, when LCR=0xBF and change to bank0 */
-#define MOXA_MUST_XOFF1_REGISTER 0x06
-
-/* set XOFF2 value register, when LCR=0xBF and change to bank0 */
-#define MOXA_MUST_XOFF2_REGISTER 0x07
-
-#define MOXA_MUST_RBRTL_REGISTER 0x04
-#define MOXA_MUST_RBRTH_REGISTER 0x05
-#define MOXA_MUST_RBRTI_REGISTER 0x06
-#define MOXA_MUST_THRTL_REGISTER 0x07
-#define MOXA_MUST_ENUM_REGISTER 0x04
-#define MOXA_MUST_HWID_REGISTER 0x05
-#define MOXA_MUST_ECR_REGISTER 0x06
-#define MOXA_MUST_CSR_REGISTER 0x07
-
-/* good data mode enable */
-#define MOXA_MUST_FCR_GDA_MODE_ENABLE 0x20
-/* only good data put into RxFIFO */
-#define MOXA_MUST_FCR_GDA_ONLY_ENABLE 0x10
-
-/* enable CTS interrupt */
-#define MOXA_MUST_IER_ECTSI 0x80
-/* enable RTS interrupt */
-#define MOXA_MUST_IER_ERTSI 0x40
-/* enable Xon/Xoff interrupt */
-#define MOXA_MUST_IER_XINT 0x20
-/* enable GDA interrupt */
-#define MOXA_MUST_IER_EGDAI 0x10
-
-#define MOXA_MUST_RECV_ISR (UART_IER_RDI | MOXA_MUST_IER_EGDAI)
-
-/* GDA interrupt pending */
-#define MOXA_MUST_IIR_GDA 0x1C
-#define MOXA_MUST_IIR_RDA 0x04
-#define MOXA_MUST_IIR_RTO 0x0C
-#define MOXA_MUST_IIR_LSR 0x06
-
-/* received Xon/Xoff or specical interrupt pending */
-#define MOXA_MUST_IIR_XSC 0x10
-
-/* RTS/CTS change state interrupt pending */
-#define MOXA_MUST_IIR_RTSCTS 0x20
-#define MOXA_MUST_IIR_MASK 0x3E
-
-#define MOXA_MUST_MCR_XON_FLAG 0x40
-#define MOXA_MUST_MCR_XON_ANY 0x80
-#define MOXA_MUST_MCR_TX_XON 0x08
-
-/* software flow control on chip mask value */
-#define MOXA_MUST_EFR_SF_MASK 0x0F
-/* send Xon1/Xoff1 */
-#define MOXA_MUST_EFR_SF_TX1 0x08
-/* send Xon2/Xoff2 */
-#define MOXA_MUST_EFR_SF_TX2 0x04
-/* send Xon1,Xon2/Xoff1,Xoff2 */
-#define MOXA_MUST_EFR_SF_TX12 0x0C
-/* don't send Xon/Xoff */
-#define MOXA_MUST_EFR_SF_TX_NO 0x00
-/* Tx software flow control mask */
-#define MOXA_MUST_EFR_SF_TX_MASK 0x0C
-/* don't receive Xon/Xoff */
-#define MOXA_MUST_EFR_SF_RX_NO 0x00
-/* receive Xon1/Xoff1 */
-#define MOXA_MUST_EFR_SF_RX1 0x02
-/* receive Xon2/Xoff2 */
-#define MOXA_MUST_EFR_SF_RX2 0x01
-/* receive Xon1,Xon2/Xoff1,Xoff2 */
-#define MOXA_MUST_EFR_SF_RX12 0x03
-/* Rx software flow control mask */
-#define MOXA_MUST_EFR_SF_RX_MASK 0x03
-
-#endif
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 5fea02cfb0cc..e907b7a5cab5 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -512,7 +512,7 @@ static void gsm_print_packet(const char *hdr, int addr, int cr,
*/
/**
- * gsm_stuff_packet - bytestuff a packet
+ * gsm_stuff_frame - bytestuff a packet
* @input: input buffer
* @output: output buffer
* @len: length of input
@@ -1594,7 +1594,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
}
/**
- * gsm_dlci_control - data arrived on control channel
+ * gsm_dlci_command - data arrived on control channel
* @dlci: channel
* @data: block of bytes received
* @len: length of received block
@@ -2424,7 +2424,7 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
}
static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+ const char *fp, int count)
{
struct gsm_mux *gsm = tty->disc_data;
char flags = TTY_NORMAL;
@@ -2557,6 +2557,8 @@ static void gsmld_write_wakeup(struct tty_struct *tty)
* @file: file object
* @buf: userspace buffer pointer
* @nr: size of I/O
+ * @cookie: unused
+ * @offset: unused
*
* Perform reads for the line discipline. We are guaranteed that the
* line discipline will not be closed under us but we may get multiple
@@ -2857,6 +2859,7 @@ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
/* Line discipline for real tty */
static struct tty_ldisc_ops tty_ldisc_packet = {
.owner = THIS_MODULE,
+ .num = N_GSM0710,
.name = "n_gsm",
.open = gsmld_open,
.close = gsmld_close,
@@ -3055,7 +3058,7 @@ static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
return sent;
}
-static int gsmtty_write_room(struct tty_struct *tty)
+static unsigned int gsmtty_write_room(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
@@ -3063,7 +3066,7 @@ static int gsmtty_write_room(struct tty_struct *tty)
return TX_SIZE - kfifo_len(&dlci->fifo);
}
-static int gsmtty_chars_in_buffer(struct tty_struct *tty)
+static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
@@ -3242,7 +3245,7 @@ static const struct tty_operations gsmtty_ops = {
static int __init gsm_init(void)
{
/* Fill in our line protocol discipline, and register it */
- int status = tty_register_ldisc(N_GSM0710, &tty_ldisc_packet);
+ int status = tty_register_ldisc(&tty_ldisc_packet);
if (status != 0) {
pr_err("n_gsm: can't register line discipline (err = %d)\n",
status);
@@ -3251,9 +3254,9 @@ static int __init gsm_init(void)
gsm_tty_driver = alloc_tty_driver(256);
if (!gsm_tty_driver) {
- tty_unregister_ldisc(N_GSM0710);
pr_err("gsm_init: tty allocation failed.\n");
- return -EINVAL;
+ status = -ENOMEM;
+ goto err_unreg_ldisc;
}
gsm_tty_driver->driver_name = "gsmtty";
gsm_tty_driver->name = "gsmtty";
@@ -3269,22 +3272,23 @@ static int __init gsm_init(void)
tty_set_operations(gsm_tty_driver, &gsmtty_ops);
if (tty_register_driver(gsm_tty_driver)) {
- put_tty_driver(gsm_tty_driver);
- tty_unregister_ldisc(N_GSM0710);
pr_err("gsm_init: tty registration failed.\n");
- return -EBUSY;
+ status = -EBUSY;
+ goto err_put_driver;
}
pr_debug("gsm_init: loaded as %d,%d.\n",
gsm_tty_driver->major, gsm_tty_driver->minor_start);
return 0;
+err_put_driver:
+ put_tty_driver(gsm_tty_driver);
+err_unreg_ldisc:
+ tty_unregister_ldisc(&tty_ldisc_packet);
+ return status;
}
static void __exit gsm_exit(void)
{
- int status = tty_unregister_ldisc(N_GSM0710);
- if (status != 0)
- pr_err("n_gsm: can't unregister line discipline (err = %d)\n",
- status);
+ tty_unregister_ldisc(&tty_ldisc_packet);
tty_unregister_driver(gsm_tty_driver);
put_tty_driver(gsm_tty_driver);
}
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index b0f33e8ac819..580a37b3fe1b 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -358,7 +358,7 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
* interpreted as one HDLC frame.
*/
static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
- char *flags, int count)
+ const char *flags, int count)
{
register struct n_hdlc *n_hdlc = tty->disc_data;
register struct n_hdlc_buf *buf;
@@ -411,8 +411,10 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
* n_hdlc_tty_read - Called to retrieve one frame of data (if available)
* @tty: pointer to tty instance data
* @file: pointer to open file object
- * @buf: pointer to returned data buffer
+ * @kbuf: pointer to returned data buffer
* @nr: size of returned data buffer
+ * @cookie: stored rbuf from previous run
+ * @offset: offset into the data buffer
*
* Returns the number of bytes returned or error code.
*/
@@ -788,6 +790,7 @@ static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
static struct tty_ldisc_ops n_hdlc_ldisc = {
.owner = THIS_MODULE,
+ .num = N_HDLC,
.name = "hdlc",
.open = n_hdlc_tty_open,
.close = n_hdlc_tty_close,
@@ -807,7 +810,7 @@ static int __init n_hdlc_init(void)
/* range check maxframe arg */
maxframe = clamp(maxframe, 4096, MAX_HDLC_FRAME_SIZE);
- status = tty_register_ldisc(N_HDLC, &n_hdlc_ldisc);
+ status = tty_register_ldisc(&n_hdlc_ldisc);
if (!status)
pr_info("N_HDLC line discipline registered with maxframe=%d\n",
maxframe);
@@ -821,14 +824,7 @@ static int __init n_hdlc_init(void)
static void __exit n_hdlc_exit(void)
{
- /* Release tty registration of line discipline */
- int status = tty_unregister_ldisc(N_HDLC);
-
- if (status)
- pr_err("N_HDLC: can't unregister line discipline (err = %d)\n",
- status);
- else
- pr_info("N_HDLC: line discipline unregistered\n");
+ tty_unregister_ldisc(&n_hdlc_ldisc);
}
module_init(n_hdlc_init);
diff --git a/drivers/tty/n_null.c b/drivers/tty/n_null.c
index b8f67b5f1ef8..f913b665af72 100644
--- a/drivers/tty/n_null.c
+++ b/drivers/tty/n_null.c
@@ -33,13 +33,14 @@ static ssize_t n_null_write(struct tty_struct *tty, struct file *file,
}
static void n_null_receivebuf(struct tty_struct *tty,
- const unsigned char *cp, char *fp,
+ const unsigned char *cp, const char *fp,
int cnt)
{
}
static struct tty_ldisc_ops null_ldisc = {
.owner = THIS_MODULE,
+ .num = N_NULL,
.name = "n_null",
.open = n_null_open,
.close = n_null_close,
@@ -50,13 +51,13 @@ static struct tty_ldisc_ops null_ldisc = {
static int __init n_null_init(void)
{
- BUG_ON(tty_register_ldisc(N_NULL, &null_ldisc));
+ BUG_ON(tty_register_ldisc(&null_ldisc));
return 0;
}
static void __exit n_null_exit(void)
{
- tty_unregister_ldisc(N_NULL);
+ tty_unregister_ldisc(&null_ldisc);
}
module_init(n_null_init);
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
deleted file mode 100644
index 2eb76ea1d88d..000000000000
--- a/drivers/tty/n_r3964.c
+++ /dev/null
@@ -1,1283 +0,0 @@
-// SPDX-License-Identifier: GPL-1.0+
-/* r3964 linediscipline for linux
- *
- * -----------------------------------------------------------
- * Copyright by
- * Philips Automation Projects
- * Kassel (Germany)
- * -----------------------------------------------------------
- * Author:
- * L. Haag
- *
- * $Log: n_r3964.c,v $
- * Revision 1.10 2001/03/18 13:02:24 dwmw2
- * Fix timer usage, use spinlocks properly.
- *
- * Revision 1.9 2001/03/18 12:52:14 dwmw2
- * Merge changes in 2.4.2
- *
- * Revision 1.8 2000/03/23 14:14:54 dwmw2
- * Fix race in sleeping in r3964_read()
- *
- * Revision 1.7 1999/28/08 11:41:50 dwmw2
- * Port to 2.3 kernel
- *
- * Revision 1.6 1998/09/30 00:40:40 dwmw2
- * Fixed compilation on 2.0.x kernels
- * Updated to newly registered tty-ldisc number 9
- *
- * Revision 1.5 1998/09/04 21:57:36 dwmw2
- * Signal handling bug fixes, port to 2.1.x.
- *
- * Revision 1.4 1998/04/02 20:26:59 lhaag
- * select, blocking, ...
- *
- * Revision 1.3 1998/02/12 18:58:43 root
- * fixed some memory leaks
- * calculation of checksum characters
- *
- * Revision 1.2 1998/02/07 13:03:34 root
- * ioctl read_telegram
- *
- * Revision 1.1 1998/02/06 19:21:03 root
- * Initial revision
- *
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/tty.h>
-#include <linux/errno.h>
-#include <linux/string.h> /* used in new tty drivers */
-#include <linux/signal.h> /* used in new tty drivers */
-#include <linux/ioctl.h>
-#include <linux/n_r3964.h>
-#include <linux/poll.h>
-#include <linux/init.h>
-#include <linux/uaccess.h>
-
-/*#define DEBUG_QUEUE*/
-
-/* Log successful handshake and protocol operations */
-/*#define DEBUG_PROTO_S*/
-
-/* Log handshake and protocol errors: */
-/*#define DEBUG_PROTO_E*/
-
-/* Log Linediscipline operations (open, close, read, write...): */
-/*#define DEBUG_LDISC*/
-
-/* Log module and memory operations (init, cleanup; kmalloc, kfree): */
-/*#define DEBUG_MODUL*/
-
-/* Macro helpers for debug output: */
-#define TRACE(format, args...) printk("r3964: " format "\n" , ## args)
-
-#ifdef DEBUG_MODUL
-#define TRACE_M(format, args...) printk("r3964: " format "\n" , ## args)
-#else
-#define TRACE_M(fmt, arg...) do {} while (0)
-#endif
-#ifdef DEBUG_PROTO_S
-#define TRACE_PS(format, args...) printk("r3964: " format "\n" , ## args)
-#else
-#define TRACE_PS(fmt, arg...) do {} while (0)
-#endif
-#ifdef DEBUG_PROTO_E
-#define TRACE_PE(format, args...) printk("r3964: " format "\n" , ## args)
-#else
-#define TRACE_PE(fmt, arg...) do {} while (0)
-#endif
-#ifdef DEBUG_LDISC
-#define TRACE_L(format, args...) printk("r3964: " format "\n" , ## args)
-#else
-#define TRACE_L(fmt, arg...) do {} while (0)
-#endif
-#ifdef DEBUG_QUEUE
-#define TRACE_Q(format, args...) printk("r3964: " format "\n" , ## args)
-#else
-#define TRACE_Q(fmt, arg...) do {} while (0)
-#endif
-static void add_tx_queue(struct r3964_info *, struct r3964_block_header *);
-static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code);
-static void put_char(struct r3964_info *pInfo, unsigned char ch);
-static void trigger_transmit(struct r3964_info *pInfo);
-static void retry_transmit(struct r3964_info *pInfo);
-static void transmit_block(struct r3964_info *pInfo);
-static void receive_char(struct r3964_info *pInfo, const unsigned char c);
-static void receive_error(struct r3964_info *pInfo, const char flag);
-static void on_timeout(struct timer_list *t);
-static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg);
-static int read_telegram(struct r3964_info *pInfo, struct pid *pid,
- unsigned char __user * buf);
-static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg,
- int error_code, struct r3964_block_header *pBlock);
-static struct r3964_message *remove_msg(struct r3964_info *pInfo,
- struct r3964_client_info *pClient);
-static void remove_client_block(struct r3964_info *pInfo,
- struct r3964_client_info *pClient);
-
-static int r3964_open(struct tty_struct *tty);
-static void r3964_close(struct tty_struct *tty);
-static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
- void *cookie, unsigned char *buf, size_t nr);
-static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t nr);
-static int r3964_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
-#ifdef CONFIG_COMPAT
-static int r3964_compat_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
-#endif
-static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
-static __poll_t r3964_poll(struct tty_struct *tty, struct file *file,
- struct poll_table_struct *wait);
-static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count);
-
-static struct tty_ldisc_ops tty_ldisc_N_R3964 = {
- .owner = THIS_MODULE,
- .name = "R3964",
- .open = r3964_open,
- .close = r3964_close,
- .read = r3964_read,
- .write = r3964_write,
- .ioctl = r3964_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = r3964_compat_ioctl,
-#endif
- .set_termios = r3964_set_termios,
- .poll = r3964_poll,
- .receive_buf = r3964_receive_buf,
-};
-
-static void dump_block(const unsigned char *block, unsigned int length)
-{
- unsigned int i, j;
- char linebuf[16 * 3 + 1];
-
- for (i = 0; i < length; i += 16) {
- for (j = 0; (j < 16) && (j + i < length); j++) {
- sprintf(linebuf + 3 * j, "%02x ", block[i + j]);
- }
- linebuf[3 * j] = '\0';
- TRACE_PS("%s", linebuf);
- }
-}
-
-/*************************************************************
- * Driver initialisation
- *************************************************************/
-
-/*************************************************************
- * Module support routines
- *************************************************************/
-
-static void __exit r3964_exit(void)
-{
- int status;
-
- TRACE_M("cleanup_module()");
-
- status = tty_unregister_ldisc(N_R3964);
-
- if (status != 0) {
- printk(KERN_ERR "r3964: error unregistering linediscipline: "
- "%d\n", status);
- } else {
- TRACE_L("linediscipline successfully unregistered");
- }
-}
-
-static int __init r3964_init(void)
-{
- int status;
-
- printk("r3964: Philips r3964 Driver $Revision: 1.10 $\n");
-
- /*
- * Register the tty line discipline
- */
-
- status = tty_register_ldisc(N_R3964, &tty_ldisc_N_R3964);
- if (status == 0) {
- TRACE_L("line discipline %d registered", N_R3964);
- TRACE_L("flags=%x num=%x", tty_ldisc_N_R3964.flags,
- tty_ldisc_N_R3964.num);
- TRACE_L("open=%p", tty_ldisc_N_R3964.open);
- TRACE_L("tty_ldisc_N_R3964 = %p", &tty_ldisc_N_R3964);
- } else {
- printk(KERN_ERR "r3964: error registering line discipline: "
- "%d\n", status);
- }
- return status;
-}
-
-module_init(r3964_init);
-module_exit(r3964_exit);
-
-/*************************************************************
- * Protocol implementation routines
- *************************************************************/
-
-static void add_tx_queue(struct r3964_info *pInfo,
- struct r3964_block_header *pHeader)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pInfo->lock, flags);
-
- pHeader->next = NULL;
-
- if (pInfo->tx_last == NULL) {
- pInfo->tx_first = pInfo->tx_last = pHeader;
- } else {
- pInfo->tx_last->next = pHeader;
- pInfo->tx_last = pHeader;
- }
-
- spin_unlock_irqrestore(&pInfo->lock, flags);
-
- TRACE_Q("add_tx_queue %p, length %d, tx_first = %p",
- pHeader, pHeader->length, pInfo->tx_first);
-}
-
-static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code)
-{
- struct r3964_block_header *pHeader;
- unsigned long flags;
-#ifdef DEBUG_QUEUE
- struct r3964_block_header *pDump;
-#endif
-
- pHeader = pInfo->tx_first;
-
- if (pHeader == NULL)
- return;
-
-#ifdef DEBUG_QUEUE
- printk("r3964: remove_from_tx_queue: %p, length %u - ",
- pHeader, pHeader->length);
- for (pDump = pHeader; pDump; pDump = pDump->next)
- printk("%p ", pDump);
- printk("\n");
-#endif
-
- if (pHeader->owner) {
- if (error_code) {
- add_msg(pHeader->owner, R3964_MSG_ACK, 0,
- error_code, NULL);
- } else {
- add_msg(pHeader->owner, R3964_MSG_ACK, pHeader->length,
- error_code, NULL);
- }
- wake_up_interruptible(&pInfo->tty->read_wait);
- }
-
- spin_lock_irqsave(&pInfo->lock, flags);
-
- pInfo->tx_first = pHeader->next;
- if (pInfo->tx_first == NULL) {
- pInfo->tx_last = NULL;
- }
-
- spin_unlock_irqrestore(&pInfo->lock, flags);
-
- kfree(pHeader);
- TRACE_M("remove_from_tx_queue - kfree %p", pHeader);
-
- TRACE_Q("remove_from_tx_queue: tx_first = %p, tx_last = %p",
- pInfo->tx_first, pInfo->tx_last);
-}
-
-static void add_rx_queue(struct r3964_info *pInfo,
- struct r3964_block_header *pHeader)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pInfo->lock, flags);
-
- pHeader->next = NULL;
-
- if (pInfo->rx_last == NULL) {
- pInfo->rx_first = pInfo->rx_last = pHeader;
- } else {
- pInfo->rx_last->next = pHeader;
- pInfo->rx_last = pHeader;
- }
- pInfo->blocks_in_rx_queue++;
-
- spin_unlock_irqrestore(&pInfo->lock, flags);
-
- TRACE_Q("add_rx_queue: %p, length = %d, rx_first = %p, count = %d",
- pHeader, pHeader->length,
- pInfo->rx_first, pInfo->blocks_in_rx_queue);
-}
-
-static void remove_from_rx_queue(struct r3964_info *pInfo,
- struct r3964_block_header *pHeader)
-{
- unsigned long flags;
- struct r3964_block_header *pFind;
-
- if (pHeader == NULL)
- return;
-
- TRACE_Q("remove_from_rx_queue: rx_first = %p, rx_last = %p, count = %d",
- pInfo->rx_first, pInfo->rx_last, pInfo->blocks_in_rx_queue);
- TRACE_Q("remove_from_rx_queue: %p, length %u",
- pHeader, pHeader->length);
-
- spin_lock_irqsave(&pInfo->lock, flags);
-
- if (pInfo->rx_first == pHeader) {
- /* Remove the first block in the linked list: */
- pInfo->rx_first = pHeader->next;
-
- if (pInfo->rx_first == NULL) {
- pInfo->rx_last = NULL;
- }
- pInfo->blocks_in_rx_queue--;
- } else {
- /* Find block to remove: */
- for (pFind = pInfo->rx_first; pFind; pFind = pFind->next) {
- if (pFind->next == pHeader) {
- /* Got it. */
- pFind->next = pHeader->next;
- pInfo->blocks_in_rx_queue--;
- if (pFind->next == NULL) {
- /* Oh, removed the last one! */
- pInfo->rx_last = pFind;
- }
- break;
- }
- }
- }
-
- spin_unlock_irqrestore(&pInfo->lock, flags);
-
- kfree(pHeader);
- TRACE_M("remove_from_rx_queue - kfree %p", pHeader);
-
- TRACE_Q("remove_from_rx_queue: rx_first = %p, rx_last = %p, count = %d",
- pInfo->rx_first, pInfo->rx_last, pInfo->blocks_in_rx_queue);
-}
-
-static void put_char(struct r3964_info *pInfo, unsigned char ch)
-{
- struct tty_struct *tty = pInfo->tty;
- /* FIXME: put_char should not be called from an IRQ */
- tty_put_char(tty, ch);
- pInfo->bcc ^= ch;
-}
-
-static void flush(struct r3964_info *pInfo)
-{
- struct tty_struct *tty = pInfo->tty;
-
- if (tty == NULL || tty->ops->flush_chars == NULL)
- return;
- tty->ops->flush_chars(tty);
-}
-
-static void trigger_transmit(struct r3964_info *pInfo)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pInfo->lock, flags);
-
- if ((pInfo->state == R3964_IDLE) && (pInfo->tx_first != NULL)) {
- pInfo->state = R3964_TX_REQUEST;
- pInfo->nRetry = 0;
- pInfo->flags &= ~R3964_ERROR;
- mod_timer(&pInfo->tmr, jiffies + R3964_TO_QVZ);
-
- spin_unlock_irqrestore(&pInfo->lock, flags);
-
- TRACE_PS("trigger_transmit - sent STX");
-
- put_char(pInfo, STX);
- flush(pInfo);
-
- pInfo->bcc = 0;
- } else {
- spin_unlock_irqrestore(&pInfo->lock, flags);
- }
-}
-
-static void retry_transmit(struct r3964_info *pInfo)
-{
- if (pInfo->nRetry < R3964_MAX_RETRIES) {
- TRACE_PE("transmission failed. Retry #%d", pInfo->nRetry);
- pInfo->bcc = 0;
- put_char(pInfo, STX);
- flush(pInfo);
- pInfo->state = R3964_TX_REQUEST;
- pInfo->nRetry++;
- mod_timer(&pInfo->tmr, jiffies + R3964_TO_QVZ);
- } else {
- TRACE_PE("transmission failed after %d retries",
- R3964_MAX_RETRIES);
-
- remove_from_tx_queue(pInfo, R3964_TX_FAIL);
-
- put_char(pInfo, NAK);
- flush(pInfo);
- pInfo->state = R3964_IDLE;
-
- trigger_transmit(pInfo);
- }
-}
-
-static void transmit_block(struct r3964_info *pInfo)
-{
- struct tty_struct *tty = pInfo->tty;
- struct r3964_block_header *pBlock = pInfo->tx_first;
- int room = 0;
-
- if (tty == NULL || pBlock == NULL) {
- return;
- }
-
- room = tty_write_room(tty);
-
- TRACE_PS("transmit_block %p, room %d, length %d",
- pBlock, room, pBlock->length);
-
- while (pInfo->tx_position < pBlock->length) {
- if (room < 2)
- break;
-
- if (pBlock->data[pInfo->tx_position] == DLE) {
- /* send additional DLE char: */
- put_char(pInfo, DLE);
- }
- put_char(pInfo, pBlock->data[pInfo->tx_position++]);
-
- room--;
- }
-
- if ((pInfo->tx_position == pBlock->length) && (room >= 3)) {
- put_char(pInfo, DLE);
- put_char(pInfo, ETX);
- if (pInfo->flags & R3964_BCC) {
- put_char(pInfo, pInfo->bcc);
- }
- pInfo->state = R3964_WAIT_FOR_TX_ACK;
- mod_timer(&pInfo->tmr, jiffies + R3964_TO_QVZ);
- }
- flush(pInfo);
-}
-
-static void on_receive_block(struct r3964_info *pInfo)
-{
- unsigned int length;
- struct r3964_client_info *pClient;
- struct r3964_block_header *pBlock;
-
- length = pInfo->rx_position;
-
- /* compare byte checksum characters: */
- if (pInfo->flags & R3964_BCC) {
- if (pInfo->bcc != pInfo->last_rx) {
- TRACE_PE("checksum error - got %x but expected %x",
- pInfo->last_rx, pInfo->bcc);
- pInfo->flags |= R3964_CHECKSUM;
- }
- }
-
- /* check for errors (parity, overrun,...): */
- if (pInfo->flags & R3964_ERROR) {
- TRACE_PE("on_receive_block - transmission failed error %x",
- pInfo->flags & R3964_ERROR);
-
- put_char(pInfo, NAK);
- flush(pInfo);
- if (pInfo->nRetry < R3964_MAX_RETRIES) {
- pInfo->state = R3964_WAIT_FOR_RX_REPEAT;
- pInfo->nRetry++;
- mod_timer(&pInfo->tmr, jiffies + R3964_TO_RX_PANIC);
- } else {
- TRACE_PE("on_receive_block - failed after max retries");
- pInfo->state = R3964_IDLE;
- }
- return;
- }
-
- /* received block; submit DLE: */
- put_char(pInfo, DLE);
- flush(pInfo);
- del_timer_sync(&pInfo->tmr);
- TRACE_PS(" rx success: got %d chars", length);
-
- /* prepare struct r3964_block_header: */
- pBlock = kmalloc(length + sizeof(struct r3964_block_header),
- GFP_KERNEL);
- TRACE_M("on_receive_block - kmalloc %p", pBlock);
-
- if (pBlock == NULL)
- return;
-
- pBlock->length = length;
- pBlock->data = ((unsigned char *)pBlock) +
- sizeof(struct r3964_block_header);
- pBlock->locks = 0;
- pBlock->next = NULL;
- pBlock->owner = NULL;
-
- memcpy(pBlock->data, pInfo->rx_buf, length);
-
- /* queue block into rx_queue: */
- add_rx_queue(pInfo, pBlock);
-
- /* notify attached client processes: */
- for (pClient = pInfo->firstClient; pClient; pClient = pClient->next) {
- if (pClient->sig_flags & R3964_SIG_DATA) {
- add_msg(pClient, R3964_MSG_DATA, length, R3964_OK,
- pBlock);
- }
- }
- wake_up_interruptible(&pInfo->tty->read_wait);
-
- pInfo->state = R3964_IDLE;
-
- trigger_transmit(pInfo);
-}
-
-static void receive_char(struct r3964_info *pInfo, const unsigned char c)
-{
- switch (pInfo->state) {
- case R3964_TX_REQUEST:
- if (c == DLE) {
- TRACE_PS("TX_REQUEST - got DLE");
-
- pInfo->state = R3964_TRANSMITTING;
- pInfo->tx_position = 0;
-
- transmit_block(pInfo);
- } else if (c == STX) {
- if (pInfo->nRetry == 0) {
- TRACE_PE("TX_REQUEST - init conflict");
- if (pInfo->priority == R3964_SLAVE) {
- goto start_receiving;
- }
- } else {
- TRACE_PE("TX_REQUEST - secondary init "
- "conflict!? Switching to SLAVE mode "
- "for next rx.");
- goto start_receiving;
- }
- } else {
- TRACE_PE("TX_REQUEST - char != DLE: %x", c);
- retry_transmit(pInfo);
- }
- break;
- case R3964_TRANSMITTING:
- if (c == NAK) {
- TRACE_PE("TRANSMITTING - got NAK");
- retry_transmit(pInfo);
- } else {
- TRACE_PE("TRANSMITTING - got invalid char");
-
- pInfo->state = R3964_WAIT_ZVZ_BEFORE_TX_RETRY;
- mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ);
- }
- break;
- case R3964_WAIT_FOR_TX_ACK:
- if (c == DLE) {
- TRACE_PS("WAIT_FOR_TX_ACK - got DLE");
- remove_from_tx_queue(pInfo, R3964_OK);
-
- pInfo->state = R3964_IDLE;
- trigger_transmit(pInfo);
- } else {
- retry_transmit(pInfo);
- }
- break;
- case R3964_WAIT_FOR_RX_REPEAT:
- case R3964_IDLE:
- if (c == STX) {
- /* Prevent rx_queue from overflow: */
- if (pInfo->blocks_in_rx_queue >=
- R3964_MAX_BLOCKS_IN_RX_QUEUE) {
- TRACE_PE("IDLE - got STX but no space in "
- "rx_queue!");
- pInfo->state = R3964_WAIT_FOR_RX_BUF;
- mod_timer(&pInfo->tmr,
- jiffies + R3964_TO_NO_BUF);
- break;
- }
-start_receiving:
- /* Ok, start receiving: */
- TRACE_PS("IDLE - got STX");
- pInfo->rx_position = 0;
- pInfo->last_rx = 0;
- pInfo->flags &= ~R3964_ERROR;
- pInfo->state = R3964_RECEIVING;
- mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ);
- pInfo->nRetry = 0;
- put_char(pInfo, DLE);
- flush(pInfo);
- pInfo->bcc = 0;
- }
- break;
- case R3964_RECEIVING:
- if (pInfo->rx_position < RX_BUF_SIZE) {
- pInfo->bcc ^= c;
-
- if (c == DLE) {
- if (pInfo->last_rx == DLE) {
- pInfo->last_rx = 0;
- goto char_to_buf;
- }
- pInfo->last_rx = DLE;
- break;
- } else if ((c == ETX) && (pInfo->last_rx == DLE)) {
- if (pInfo->flags & R3964_BCC) {
- pInfo->state = R3964_WAIT_FOR_BCC;
- mod_timer(&pInfo->tmr,
- jiffies + R3964_TO_ZVZ);
- } else {
- on_receive_block(pInfo);
- }
- } else {
- pInfo->last_rx = c;
-char_to_buf:
- pInfo->rx_buf[pInfo->rx_position++] = c;
- mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ);
- }
- }
- /* else: overflow-msg? BUF_SIZE>MTU; should not happen? */
- break;
- case R3964_WAIT_FOR_BCC:
- pInfo->last_rx = c;
- on_receive_block(pInfo);
- break;
- }
-}
-
-static void receive_error(struct r3964_info *pInfo, const char flag)
-{
- switch (flag) {
- case TTY_NORMAL:
- break;
- case TTY_BREAK:
- TRACE_PE("received break");
- pInfo->flags |= R3964_BREAK;
- break;
- case TTY_PARITY:
- TRACE_PE("parity error");
- pInfo->flags |= R3964_PARITY;
- break;
- case TTY_FRAME:
- TRACE_PE("frame error");
- pInfo->flags |= R3964_FRAME;
- break;
- case TTY_OVERRUN:
- TRACE_PE("frame overrun");
- pInfo->flags |= R3964_OVERRUN;
- break;
- default:
- TRACE_PE("receive_error - unknown flag %d", flag);
- pInfo->flags |= R3964_UNKNOWN;
- break;
- }
-}
-
-static void on_timeout(struct timer_list *t)
-{
- struct r3964_info *pInfo = from_timer(pInfo, t, tmr);
-
- switch (pInfo->state) {
- case R3964_TX_REQUEST:
- TRACE_PE("TX_REQUEST - timeout");
- retry_transmit(pInfo);
- break;
- case R3964_WAIT_ZVZ_BEFORE_TX_RETRY:
- put_char(pInfo, NAK);
- flush(pInfo);
- retry_transmit(pInfo);
- break;
- case R3964_WAIT_FOR_TX_ACK:
- TRACE_PE("WAIT_FOR_TX_ACK - timeout");
- retry_transmit(pInfo);
- break;
- case R3964_WAIT_FOR_RX_BUF:
- TRACE_PE("WAIT_FOR_RX_BUF - timeout");
- put_char(pInfo, NAK);
- flush(pInfo);
- pInfo->state = R3964_IDLE;
- break;
- case R3964_RECEIVING:
- TRACE_PE("RECEIVING - timeout after %d chars",
- pInfo->rx_position);
- put_char(pInfo, NAK);
- flush(pInfo);
- pInfo->state = R3964_IDLE;
- break;
- case R3964_WAIT_FOR_RX_REPEAT:
- TRACE_PE("WAIT_FOR_RX_REPEAT - timeout");
- pInfo->state = R3964_IDLE;
- break;
- case R3964_WAIT_FOR_BCC:
- TRACE_PE("WAIT_FOR_BCC - timeout");
- put_char(pInfo, NAK);
- flush(pInfo);
- pInfo->state = R3964_IDLE;
- break;
- }
-}
-
-static struct r3964_client_info *findClient(struct r3964_info *pInfo,
- struct pid *pid)
-{
- struct r3964_client_info *pClient;
-
- for (pClient = pInfo->firstClient; pClient; pClient = pClient->next) {
- if (pClient->pid == pid) {
- return pClient;
- }
- }
- return NULL;
-}
-
-static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg)
-{
- struct r3964_client_info *pClient;
- struct r3964_client_info **ppClient;
- struct r3964_message *pMsg;
-
- if ((arg & R3964_SIG_ALL) == 0) {
- /* Remove client from client list */
- for (ppClient = &pInfo->firstClient; *ppClient;
- ppClient = &(*ppClient)->next) {
- pClient = *ppClient;
-
- if (pClient->pid == pid) {
- TRACE_PS("removing client %d from client list",
- pid_nr(pid));
- *ppClient = pClient->next;
- while (pClient->msg_count) {
- pMsg = remove_msg(pInfo, pClient);
- if (pMsg) {
- kfree(pMsg);
- TRACE_M("enable_signals - msg "
- "kfree %p", pMsg);
- }
- }
- put_pid(pClient->pid);
- kfree(pClient);
- TRACE_M("enable_signals - kfree %p", pClient);
- return 0;
- }
- }
- return -EINVAL;
- } else {
- pClient = findClient(pInfo, pid);
- if (pClient) {
- /* update signal options */
- pClient->sig_flags = arg;
- } else {
- /* add client to client list */
- pClient = kmalloc(sizeof(struct r3964_client_info),
- GFP_KERNEL);
- TRACE_M("enable_signals - kmalloc %p", pClient);
- if (pClient == NULL)
- return -ENOMEM;
-
- TRACE_PS("add client %d to client list", pid_nr(pid));
- spin_lock_init(&pClient->lock);
- pClient->sig_flags = arg;
- pClient->pid = get_pid(pid);
- pClient->next = pInfo->firstClient;
- pClient->first_msg = NULL;
- pClient->last_msg = NULL;
- pClient->next_block_to_read = NULL;
- pClient->msg_count = 0;
- pInfo->firstClient = pClient;
- }
- }
-
- return 0;
-}
-
-static int read_telegram(struct r3964_info *pInfo, struct pid *pid,
- unsigned char __user * buf)
-{
- struct r3964_client_info *pClient;
- struct r3964_block_header *block;
-
- if (!buf) {
- return -EINVAL;
- }
-
- pClient = findClient(pInfo, pid);
- if (pClient == NULL) {
- return -EINVAL;
- }
-
- block = pClient->next_block_to_read;
- if (!block) {
- return 0;
- } else {
- if (copy_to_user(buf, block->data, block->length))
- return -EFAULT;
-
- remove_client_block(pInfo, pClient);
- return block->length;
- }
-
- return -EINVAL;
-}
-
-static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg,
- int error_code, struct r3964_block_header *pBlock)
-{
- struct r3964_message *pMsg;
- unsigned long flags;
-
- if (pClient->msg_count < R3964_MAX_MSG_COUNT - 1) {
-queue_the_message:
-
- pMsg = kmalloc(sizeof(struct r3964_message),
- error_code ? GFP_ATOMIC : GFP_KERNEL);
- TRACE_M("add_msg - kmalloc %p", pMsg);
- if (pMsg == NULL) {
- return;
- }
-
- spin_lock_irqsave(&pClient->lock, flags);
-
- pMsg->msg_id = msg_id;
- pMsg->arg = arg;
- pMsg->error_code = error_code;
- pMsg->block = pBlock;
- pMsg->next = NULL;
-
- if (pClient->last_msg == NULL) {
- pClient->first_msg = pClient->last_msg = pMsg;
- } else {
- pClient->last_msg->next = pMsg;
- pClient->last_msg = pMsg;
- }
-
- pClient->msg_count++;
-
- if (pBlock != NULL) {
- pBlock->locks++;
- }
- spin_unlock_irqrestore(&pClient->lock, flags);
- } else {
- if ((pClient->last_msg->msg_id == R3964_MSG_ACK)
- && (pClient->last_msg->error_code == R3964_OVERFLOW)) {
- pClient->last_msg->arg++;
- TRACE_PE("add_msg - inc prev OVERFLOW-msg");
- } else {
- msg_id = R3964_MSG_ACK;
- arg = 0;
- error_code = R3964_OVERFLOW;
- pBlock = NULL;
- TRACE_PE("add_msg - queue OVERFLOW-msg");
- goto queue_the_message;
- }
- }
- /* Send SIGIO signal to client process: */
- if (pClient->sig_flags & R3964_USE_SIGIO) {
- kill_pid(pClient->pid, SIGIO, 1);
- }
-}
-
-static struct r3964_message *remove_msg(struct r3964_info *pInfo,
- struct r3964_client_info *pClient)
-{
- struct r3964_message *pMsg = NULL;
- unsigned long flags;
-
- if (pClient->first_msg) {
- spin_lock_irqsave(&pClient->lock, flags);
-
- pMsg = pClient->first_msg;
- pClient->first_msg = pMsg->next;
- if (pClient->first_msg == NULL) {
- pClient->last_msg = NULL;
- }
-
- pClient->msg_count--;
- if (pMsg->block) {
- remove_client_block(pInfo, pClient);
- pClient->next_block_to_read = pMsg->block;
- }
- spin_unlock_irqrestore(&pClient->lock, flags);
- }
- return pMsg;
-}
-
-static void remove_client_block(struct r3964_info *pInfo,
- struct r3964_client_info *pClient)
-{
- struct r3964_block_header *block;
-
- TRACE_PS("remove_client_block PID %d", pid_nr(pClient->pid));
-
- block = pClient->next_block_to_read;
- if (block) {
- block->locks--;
- if (block->locks == 0) {
- remove_from_rx_queue(pInfo, block);
- }
- }
- pClient->next_block_to_read = NULL;
-}
-
-/*************************************************************
- * Line discipline routines
- *************************************************************/
-
-static int r3964_open(struct tty_struct *tty)
-{
- struct r3964_info *pInfo;
-
- TRACE_L("open");
- TRACE_L("tty=%p, PID=%d, disc_data=%p",
- tty, current->pid, tty->disc_data);
-
- pInfo = kmalloc(sizeof(struct r3964_info), GFP_KERNEL);
- TRACE_M("r3964_open - info kmalloc %p", pInfo);
-
- if (!pInfo) {
- printk(KERN_ERR "r3964: failed to alloc info structure\n");
- return -ENOMEM;
- }
-
- pInfo->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
- TRACE_M("r3964_open - rx_buf kmalloc %p", pInfo->rx_buf);
-
- if (!pInfo->rx_buf) {
- printk(KERN_ERR "r3964: failed to alloc receive buffer\n");
- kfree(pInfo);
- TRACE_M("r3964_open - info kfree %p", pInfo);
- return -ENOMEM;
- }
-
- pInfo->tx_buf = kmalloc(TX_BUF_SIZE, GFP_KERNEL);
- TRACE_M("r3964_open - tx_buf kmalloc %p", pInfo->tx_buf);
-
- if (!pInfo->tx_buf) {
- printk(KERN_ERR "r3964: failed to alloc transmit buffer\n");
- kfree(pInfo->rx_buf);
- TRACE_M("r3964_open - rx_buf kfree %p", pInfo->rx_buf);
- kfree(pInfo);
- TRACE_M("r3964_open - info kfree %p", pInfo);
- return -ENOMEM;
- }
-
- spin_lock_init(&pInfo->lock);
- mutex_init(&pInfo->read_lock);
- pInfo->tty = tty;
- pInfo->priority = R3964_MASTER;
- pInfo->rx_first = pInfo->rx_last = NULL;
- pInfo->tx_first = pInfo->tx_last = NULL;
- pInfo->rx_position = 0;
- pInfo->tx_position = 0;
- pInfo->last_rx = 0;
- pInfo->blocks_in_rx_queue = 0;
- pInfo->firstClient = NULL;
- pInfo->state = R3964_IDLE;
- pInfo->flags = R3964_DEBUG;
- pInfo->nRetry = 0;
-
- tty->disc_data = pInfo;
- tty->receive_room = 65536;
-
- timer_setup(&pInfo->tmr, on_timeout, 0);
-
- return 0;
-}
-
-static void r3964_close(struct tty_struct *tty)
-{
- struct r3964_info *pInfo = tty->disc_data;
- struct r3964_client_info *pClient, *pNext;
- struct r3964_message *pMsg;
- struct r3964_block_header *pHeader, *pNextHeader;
- unsigned long flags;
-
- TRACE_L("close");
-
- /*
- * Make sure that our task queue isn't activated. If it
- * is, take it out of the linked list.
- */
- del_timer_sync(&pInfo->tmr);
-
- /* Remove client-structs and message queues: */
- pClient = pInfo->firstClient;
- while (pClient) {
- pNext = pClient->next;
- while (pClient->msg_count) {
- pMsg = remove_msg(pInfo, pClient);
- if (pMsg) {
- kfree(pMsg);
- TRACE_M("r3964_close - msg kfree %p", pMsg);
- }
- }
- put_pid(pClient->pid);
- kfree(pClient);
- TRACE_M("r3964_close - client kfree %p", pClient);
- pClient = pNext;
- }
- /* Remove jobs from tx_queue: */
- spin_lock_irqsave(&pInfo->lock, flags);
- pHeader = pInfo->tx_first;
- pInfo->tx_first = pInfo->tx_last = NULL;
- spin_unlock_irqrestore(&pInfo->lock, flags);
-
- while (pHeader) {
- pNextHeader = pHeader->next;
- kfree(pHeader);
- pHeader = pNextHeader;
- }
-
- /* Free buffers: */
- kfree(pInfo->rx_buf);
- TRACE_M("r3964_close - rx_buf kfree %p", pInfo->rx_buf);
- kfree(pInfo->tx_buf);
- TRACE_M("r3964_close - tx_buf kfree %p", pInfo->tx_buf);
- kfree(pInfo);
- TRACE_M("r3964_close - info kfree %p", pInfo);
-}
-
-static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
- unsigned char *kbuf, size_t nr,
- void **cookie, unsigned long offset)
-{
- struct r3964_info *pInfo = tty->disc_data;
- struct r3964_client_info *pClient;
- struct r3964_message *pMsg;
- struct r3964_client_message theMsg;
- int ret;
-
- TRACE_L("read()");
-
- /*
- * Internal serialization of reads.
- */
- if (file->f_flags & O_NONBLOCK) {
- if (!mutex_trylock(&pInfo->read_lock))
- return -EAGAIN;
- } else {
- if (mutex_lock_interruptible(&pInfo->read_lock))
- return -ERESTARTSYS;
- }
-
- pClient = findClient(pInfo, task_pid(current));
- if (pClient) {
- pMsg = remove_msg(pInfo, pClient);
- if (pMsg == NULL) {
- /* no messages available. */
- if (tty_io_nonblock(tty, file)) {
- ret = -EAGAIN;
- goto unlock;
- }
- /* block until there is a message: */
- wait_event_interruptible(tty->read_wait,
- (pMsg = remove_msg(pInfo, pClient)));
- }
-
- /* If we still haven't got a message, we must have been signalled */
-
- if (!pMsg) {
- ret = -EINTR;
- goto unlock;
- }
-
- /* deliver msg to client process: */
- theMsg.msg_id = pMsg->msg_id;
- theMsg.arg = pMsg->arg;
- theMsg.error_code = pMsg->error_code;
- ret = sizeof(struct r3964_client_message);
-
- kfree(pMsg);
- TRACE_M("r3964_read - msg kfree %p", pMsg);
-
- memcpy(kbuf, &theMsg, ret);
-
- TRACE_PS("read - return %d", ret);
- goto unlock;
- }
- ret = -EPERM;
-unlock:
- mutex_unlock(&pInfo->read_lock);
- return ret;
-}
-
-static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
- const unsigned char *data, size_t count)
-{
- struct r3964_info *pInfo = tty->disc_data;
- struct r3964_block_header *pHeader;
- struct r3964_client_info *pClient;
- unsigned char *new_data;
-
- TRACE_L("write request, %d characters", count);
-/*
- * Verify the pointers
- */
-
- if (!pInfo)
- return -EIO;
-
-/*
- * Ensure that the caller does not wish to send too much.
- */
- if (count > R3964_MTU) {
- if (pInfo->flags & R3964_DEBUG) {
- TRACE_L(KERN_WARNING "r3964_write: truncating user "
- "packet from %u to mtu %d", count, R3964_MTU);
- }
- count = R3964_MTU;
- }
-/*
- * Allocate a buffer for the data and copy it from the buffer with header prepended
- */
- new_data = kmalloc(count + sizeof(struct r3964_block_header),
- GFP_KERNEL);
- TRACE_M("r3964_write - kmalloc %p", new_data);
- if (new_data == NULL) {
- if (pInfo->flags & R3964_DEBUG) {
- printk(KERN_ERR "r3964_write: no memory\n");
- }
- return -ENOSPC;
- }
-
- pHeader = (struct r3964_block_header *)new_data;
- pHeader->data = new_data + sizeof(struct r3964_block_header);
- pHeader->length = count;
- pHeader->locks = 0;
- pHeader->owner = NULL;
-
- pClient = findClient(pInfo, task_pid(current));
- if (pClient) {
- pHeader->owner = pClient;
- }
-
- memcpy(pHeader->data, data, count); /* We already verified this */
-
- if (pInfo->flags & R3964_DEBUG) {
- dump_block(pHeader->data, count);
- }
-
-/*
- * Add buffer to transmit-queue:
- */
- add_tx_queue(pInfo, pHeader);
- trigger_transmit(pInfo);
-
- return 0;
-}
-
-static int r3964_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct r3964_info *pInfo = tty->disc_data;
- if (pInfo == NULL)
- return -EINVAL;
- switch (cmd) {
- case R3964_ENABLE_SIGNALS:
- return enable_signals(pInfo, task_pid(current), arg);
- case R3964_SETPRIORITY:
- if (arg < R3964_MASTER || arg > R3964_SLAVE)
- return -EINVAL;
- pInfo->priority = arg & 0xff;
- return 0;
- case R3964_USE_BCC:
- if (arg)
- pInfo->flags |= R3964_BCC;
- else
- pInfo->flags &= ~R3964_BCC;
- return 0;
- case R3964_READ_TELEGRAM:
- return read_telegram(pInfo, task_pid(current),
- (unsigned char __user *)arg);
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-#ifdef CONFIG_COMPAT
-static int r3964_compat_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case R3964_ENABLE_SIGNALS:
- case R3964_SETPRIORITY:
- case R3964_USE_BCC:
- return r3964_ioctl(tty, file, cmd, arg);
- default:
- return -ENOIOCTLCMD;
- }
-}
-#endif
-
-static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old)
-{
- TRACE_L("set_termios");
-}
-
-/* Called without the kernel lock held - fine */
-static __poll_t r3964_poll(struct tty_struct *tty, struct file *file,
- struct poll_table_struct *wait)
-{
- struct r3964_info *pInfo = tty->disc_data;
- struct r3964_client_info *pClient;
- struct r3964_message *pMsg = NULL;
- unsigned long flags;
- __poll_t result = EPOLLOUT;
-
- TRACE_L("POLL");
-
- pClient = findClient(pInfo, task_pid(current));
- if (pClient) {
- poll_wait(file, &tty->read_wait, wait);
- spin_lock_irqsave(&pInfo->lock, flags);
- pMsg = pClient->first_msg;
- spin_unlock_irqrestore(&pInfo->lock, flags);
- if (pMsg)
- result |= EPOLLIN | EPOLLRDNORM;
- } else {
- result = -EINVAL;
- }
- return result;
-}
-
-static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
-{
- struct r3964_info *pInfo = tty->disc_data;
- const unsigned char *p;
- char *f, flags = TTY_NORMAL;
- int i;
-
- for (i = count, p = cp, f = fp; i; i--, p++) {
- if (f)
- flags = *f++;
- if (flags == TTY_NORMAL) {
- receive_char(pInfo, *p);
- } else {
- receive_error(pInfo, flags);
- }
-
- }
-}
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_LDISC(N_R3964);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 9686c5d10571..0ec93f1a61f5 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -342,10 +342,10 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
{
unsigned long flags;
- if (tty->link->packet) {
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- tty->ctrl_status |= TIOCPKT_FLUSHREAD;
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ if (tty->link->ctrl.packet) {
+ spin_lock_irqsave(&tty->ctrl.lock, flags);
+ tty->ctrl.pktstatus |= TIOCPKT_FLUSHREAD;
+ spin_unlock_irqrestore(&tty->ctrl.lock, flags);
wake_up_interruptible(&tty->link->read_wait);
}
}
@@ -361,7 +361,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
* Holds termios_rwsem to exclude producer/consumer while
* buffer indices are reset.
*
- * Locking: ctrl_lock, exclusive termios_rwsem
+ * Locking: ctrl.lock, exclusive termios_rwsem
*/
static void n_tty_flush_buffer(struct tty_struct *tty)
@@ -1103,7 +1103,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
* buffer is 'output'. The signal is processed first to alert any current
* readers or writers to discontinue and exit their i/o loops.
*
- * Locking: ctrl_lock
+ * Locking: ctrl.lock
*/
static void __isig(int sig, struct tty_struct *tty)
@@ -1245,7 +1245,6 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
commit_echoes(tty);
} else
process_echoes(tty);
- return;
}
/**
@@ -1260,12 +1259,8 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
* n_tty_receive_buf()/producer path:
* caller holds non-exclusive termios_rwsem
* publishes canon_head if canonical mode is active
- *
- * Returns 1 if LNEXT was received, else returns 0
*/
-
-static int
-n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1273,35 +1268,35 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
if (c == START_CHAR(tty)) {
start_tty(tty);
process_echoes(tty);
- return 0;
+ return;
}
if (c == STOP_CHAR(tty)) {
stop_tty(tty);
- return 0;
+ return;
}
}
if (L_ISIG(tty)) {
if (c == INTR_CHAR(tty)) {
n_tty_receive_signal_char(tty, SIGINT, c);
- return 0;
+ return;
} else if (c == QUIT_CHAR(tty)) {
n_tty_receive_signal_char(tty, SIGQUIT, c);
- return 0;
+ return;
} else if (c == SUSP_CHAR(tty)) {
n_tty_receive_signal_char(tty, SIGTSTP, c);
- return 0;
+ return;
}
}
- if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) {
+ if (tty->flow.stopped && !tty->flow.tco_stopped && I_IXON(tty) && I_IXANY(tty)) {
start_tty(tty);
process_echoes(tty);
}
if (c == '\r') {
if (I_IGNCR(tty))
- return 0;
+ return;
if (I_ICRNL(tty))
c = '\n';
} else if (c == '\n' && I_INLCR(tty))
@@ -1312,7 +1307,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
(c == WERASE_CHAR(tty) && L_IEXTEN(tty))) {
eraser(c, tty);
commit_echoes(tty);
- return 0;
+ return;
}
if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) {
ldata->lnext = 1;
@@ -1324,7 +1319,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
commit_echoes(tty);
}
}
- return 1;
+ return;
}
if (c == REPRINT_CHAR(tty) && L_ECHO(tty) && L_IEXTEN(tty)) {
size_t tail = ldata->canon_head;
@@ -1337,7 +1332,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
tail++;
}
commit_echoes(tty);
- return 0;
+ return;
}
if (c == '\n') {
if (L_ECHO(tty) || L_ECHONL(tty)) {
@@ -1375,7 +1370,7 @@ handle_newline:
smp_store_release(&ldata->canon_head, ldata->read_head);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
- return 0;
+ return;
}
}
@@ -1397,15 +1392,13 @@ handle_newline:
put_tty_queue(c, ldata);
put_tty_queue(c, ldata);
- return 0;
}
-static inline void
-n_tty_receive_char_inline(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
- if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) {
+ if (tty->flow.stopped && !tty->flow.tco_stopped && I_IXON(tty) && I_IXANY(tty)) {
start_tty(tty);
process_echoes(tty);
}
@@ -1423,31 +1416,6 @@ n_tty_receive_char_inline(struct tty_struct *tty, unsigned char c)
put_tty_queue(c, ldata);
}
-static void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
-{
- n_tty_receive_char_inline(tty, c);
-}
-
-static inline void
-n_tty_receive_char_fast(struct tty_struct *tty, unsigned char c)
-{
- struct n_tty_data *ldata = tty->disc_data;
-
- if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) {
- start_tty(tty);
- process_echoes(tty);
- }
- if (L_ECHO(tty)) {
- finish_erasing(ldata);
- /* Record the column of first canon char. */
- if (ldata->canon_head == ldata->read_head)
- echo_set_canon_col(ldata);
- echo_char(c, tty);
- commit_echoes(tty);
- }
- put_tty_queue(c, ldata);
-}
-
static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
{
if (I_ISTRIP(tty))
@@ -1459,7 +1427,7 @@ static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
if (c == STOP_CHAR(tty))
stop_tty(tty);
else if (c == START_CHAR(tty) ||
- (tty->stopped && !tty->flow_stopped && I_IXANY(tty) &&
+ (tty->flow.stopped && !tty->flow.tco_stopped && I_IXANY(tty) &&
c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) &&
c != SUSP_CHAR(tty))) {
start_tty(tty);
@@ -1506,7 +1474,7 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
static void
n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+ const char *fp, int count)
{
struct n_tty_data *ldata = tty->disc_data;
size_t n, head;
@@ -1526,7 +1494,7 @@ n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
static void
n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+ const char *fp, int count)
{
struct n_tty_data *ldata = tty->disc_data;
char flag = TTY_NORMAL;
@@ -1543,7 +1511,7 @@ n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp,
static void
n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+ const char *fp, int count)
{
char flag = TTY_NORMAL;
@@ -1555,68 +1523,46 @@ n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
}
}
-static void
-n_tty_receive_buf_standard(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+static void n_tty_receive_buf_standard(struct tty_struct *tty,
+ const unsigned char *cp, const char *fp, int count)
{
struct n_tty_data *ldata = tty->disc_data;
char flag = TTY_NORMAL;
while (count--) {
+ unsigned char c = *cp++;
+
if (fp)
flag = *fp++;
- if (likely(flag == TTY_NORMAL)) {
- unsigned char c = *cp++;
-
- if (I_ISTRIP(tty))
- c &= 0x7f;
- if (I_IUCLC(tty) && L_IEXTEN(tty))
- c = tolower(c);
- if (L_EXTPROC(tty)) {
- put_tty_queue(c, ldata);
- continue;
- }
- if (!test_bit(c, ldata->char_map))
- n_tty_receive_char_inline(tty, c);
- else if (n_tty_receive_char_special(tty, c) && count) {
- if (fp)
- flag = *fp++;
- n_tty_receive_char_lnext(tty, *cp++, flag);
- count--;
- }
- } else
- n_tty_receive_char_flagged(tty, *cp++, flag);
- }
-}
-static void
-n_tty_receive_buf_fast(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
-{
- struct n_tty_data *ldata = tty->disc_data;
- char flag = TTY_NORMAL;
+ if (ldata->lnext) {
+ n_tty_receive_char_lnext(tty, c, flag);
+ continue;
+ }
- while (count--) {
- if (fp)
- flag = *fp++;
- if (likely(flag == TTY_NORMAL)) {
- unsigned char c = *cp++;
-
- if (!test_bit(c, ldata->char_map))
- n_tty_receive_char_fast(tty, c);
- else if (n_tty_receive_char_special(tty, c) && count) {
- if (fp)
- flag = *fp++;
- n_tty_receive_char_lnext(tty, *cp++, flag);
- count--;
- }
- } else
- n_tty_receive_char_flagged(tty, *cp++, flag);
+ if (unlikely(flag != TTY_NORMAL)) {
+ n_tty_receive_char_flagged(tty, c, flag);
+ continue;
+ }
+
+ if (I_ISTRIP(tty))
+ c &= 0x7f;
+ if (I_IUCLC(tty) && L_IEXTEN(tty))
+ c = tolower(c);
+ if (L_EXTPROC(tty)) {
+ put_tty_queue(c, ldata);
+ continue;
+ }
+
+ if (test_bit(c, ldata->char_map))
+ n_tty_receive_char_special(tty, c);
+ else
+ n_tty_receive_char(tty, c);
}
}
static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+ const char *fp, int count)
{
struct n_tty_data *ldata = tty->disc_data;
bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty));
@@ -1628,19 +1574,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
else if (tty->closing && !L_EXTPROC(tty))
n_tty_receive_buf_closing(tty, cp, fp, count);
else {
- if (ldata->lnext) {
- char flag = TTY_NORMAL;
-
- if (fp)
- flag = *fp++;
- n_tty_receive_char_lnext(tty, *cp++, flag);
- count--;
- }
-
- if (!preops && !I_PARMRK(tty))
- n_tty_receive_buf_fast(tty, cp, fp, count);
- else
- n_tty_receive_buf_standard(tty, cp, fp, count);
+ n_tty_receive_buf_standard(tty, cp, fp, count);
flush_echoes(tty);
if (tty->ops->flush_chars)
@@ -1695,7 +1629,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
*/
static int
n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count, int flow)
+ const char *fp, int count, int flow)
{
struct n_tty_data *ldata = tty->disc_data;
int room, n, rcvd = 0, overflow;
@@ -1764,13 +1698,13 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
}
static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+ const char *fp, int count)
{
n_tty_receive_buf_common(tty, cp, fp, count, 0);
}
static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+ const char *fp, int count)
{
return n_tty_receive_buf_common(tty, cp, fp, count, 1);
}
@@ -1863,7 +1797,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
* Fix tty hang when I_IXON(tty) is cleared, but the tty
* been stopped by STOP_CHAR(tty) before it.
*/
- if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
+ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow.tco_stopped) {
start_tty(tty);
process_echoes(tty);
}
@@ -2091,7 +2025,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
*
* Locking: redirected write test is safe
* current->signal->tty check is safe
- * ctrl_lock to safely reference tty->pgrp
+ * ctrl.lock to safely reference tty->ctrl.pgrp
*/
static int job_control(struct tty_struct *tty, struct file *file)
@@ -2138,7 +2072,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
int minimum, time;
ssize_t retval = 0;
long timeout;
- int packet;
+ bool packet;
size_t tail;
/*
@@ -2194,20 +2128,20 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
}
}
- packet = tty->packet;
+ packet = tty->ctrl.packet;
tail = ldata->read_tail;
add_wait_queue(&tty->read_wait, &wait);
while (nr) {
/* First test for status change. */
- if (packet && tty->link->ctrl_status) {
+ if (packet && tty->link->ctrl.pktstatus) {
unsigned char cs;
if (kb != kbuf)
break;
- spin_lock_irq(&tty->link->ctrl_lock);
- cs = tty->link->ctrl_status;
- tty->link->ctrl_status = 0;
- spin_unlock_irq(&tty->link->ctrl_lock);
+ spin_lock_irq(&tty->link->ctrl.lock);
+ cs = tty->link->ctrl.pktstatus;
+ tty->link->ctrl.pktstatus = 0;
+ spin_unlock_irq(&tty->link->ctrl.lock);
*kb++ = cs;
nr--;
break;
@@ -2434,7 +2368,7 @@ static __poll_t n_tty_poll(struct tty_struct *tty, struct file *file,
if (input_available_p(tty, 1))
mask |= EPOLLIN | EPOLLRDNORM;
}
- if (tty->packet && tty->link->ctrl_status)
+ if (tty->ctrl.packet && tty->link->ctrl.pktstatus)
mask |= EPOLLPRI | EPOLLIN | EPOLLRDNORM;
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
mask |= EPOLLHUP;
@@ -2490,6 +2424,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
static struct tty_ldisc_ops n_tty_ops = {
.owner = THIS_MODULE,
+ .num = N_TTY,
.name = "n_tty",
.open = n_tty_open,
.close = n_tty_close,
@@ -2515,11 +2450,11 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
{
*ops = n_tty_ops;
ops->owner = NULL;
- ops->refcount = ops->flags = 0;
+ ops->flags = 0;
}
EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
void __init n_tty_init(void)
{
- tty_register_ldisc(N_TTY, &n_tty_ops);
+ tty_register_ldisc(&n_tty_ops);
}
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index 9a2d78ace49b..0c80f25c8c3d 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -1378,7 +1378,7 @@ static int nozomi_card_init(struct pci_dev *pdev,
NOZOMI_NAME, dc);
if (unlikely(ret)) {
dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
- goto err_free_kfifo;
+ goto err_free_all_kfifo;
}
DBG1("base_addr: %p", dc->base_addr);
@@ -1416,12 +1416,15 @@ static int nozomi_card_init(struct pci_dev *pdev,
return 0;
err_free_tty:
- for (i = 0; i < MAX_PORT; ++i) {
+ for (i--; i >= 0; i--) {
tty_unregister_device(ntty_driver, dc->index_start + i);
tty_port_destroy(&dc->port[i].port);
}
+ free_irq(pdev->irq, dc);
+err_free_all_kfifo:
+ i = MAX_PORT;
err_free_kfifo:
- for (i = 0; i < MAX_PORT; i++)
+ for (i--; i >= PORT_MDM; i--)
kfifo_free(&dc->port[i].fifo_ul);
err_free_sbuf:
kfree(dc->send_buf);
@@ -1636,10 +1639,10 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
* If the port is unplugged report lots of room and let the bits
* dribble away so we don't block anything.
*/
-static int ntty_write_room(struct tty_struct *tty)
+static unsigned int ntty_write_room(struct tty_struct *tty)
{
struct port *port = tty->driver_data;
- int room = 4096;
+ unsigned int room = 4096;
const struct nozomi *dc = get_dc_by_tty(tty);
if (dc)
@@ -1776,20 +1779,15 @@ static void ntty_throttle(struct tty_struct *tty)
}
/* Returns number of chars in buffer, called by tty layer */
-static s32 ntty_chars_in_buffer(struct tty_struct *tty)
+static unsigned int ntty_chars_in_buffer(struct tty_struct *tty)
{
struct port *port = tty->driver_data;
struct nozomi *dc = get_dc_by_tty(tty);
- s32 rval = 0;
- if (unlikely(!dc || !port)) {
- goto exit_in_buffer;
- }
-
- rval = kfifo_len(&port->fifo_ul);
+ if (unlikely(!dc || !port))
+ return 0;
-exit_in_buffer:
- return rval;
+ return kfifo_len(&port->fifo_ul);
}
static const struct tty_port_operations noz_tty_port_ops = {
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 9b5d4ae5d8f2..74bfabe5b453 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -57,9 +57,9 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
set_bit(TTY_IO_ERROR, &tty->flags);
wake_up_interruptible(&tty->read_wait);
wake_up_interruptible(&tty->write_wait);
- spin_lock_irq(&tty->ctrl_lock);
- tty->packet = 0;
- spin_unlock_irq(&tty->ctrl_lock);
+ spin_lock_irq(&tty->ctrl.lock);
+ tty->ctrl.packet = false;
+ spin_unlock_irq(&tty->ctrl.lock);
/* Review - krefs on tty_link ?? */
if (!tty->link)
return;
@@ -113,7 +113,7 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
struct tty_struct *to = tty->link;
unsigned long flags;
- if (tty->stopped)
+ if (tty->flow.stopped)
return 0;
if (c > 0) {
@@ -136,26 +136,13 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
* the other device.
*/
-static int pty_write_room(struct tty_struct *tty)
+static unsigned int pty_write_room(struct tty_struct *tty)
{
- if (tty->stopped)
+ if (tty->flow.stopped)
return 0;
return tty_buffer_space_avail(tty->link->port);
}
-/**
- * pty_chars_in_buffer - characters currently in our tx queue
- * @tty: our tty
- *
- * Report how much we have in the transmit queue. As everything is
- * instantly at the other end this is easy to implement.
- */
-
-static int pty_chars_in_buffer(struct tty_struct *tty)
-{
- return 0;
-}
-
/* Set the lock flag on a pty */
static int pty_set_lock(struct tty_struct *tty, int __user *arg)
{
@@ -185,16 +172,16 @@ static int pty_set_pktmode(struct tty_struct *tty, int __user *arg)
if (get_user(pktmode, arg))
return -EFAULT;
- spin_lock_irq(&tty->ctrl_lock);
+ spin_lock_irq(&tty->ctrl.lock);
if (pktmode) {
- if (!tty->packet) {
- tty->link->ctrl_status = 0;
+ if (!tty->ctrl.packet) {
+ tty->link->ctrl.pktstatus = 0;
smp_mb();
- tty->packet = 1;
+ tty->ctrl.packet = true;
}
} else
- tty->packet = 0;
- spin_unlock_irq(&tty->ctrl_lock);
+ tty->ctrl.packet = false;
+ spin_unlock_irq(&tty->ctrl.lock);
return 0;
}
@@ -202,7 +189,7 @@ static int pty_set_pktmode(struct tty_struct *tty, int __user *arg)
/* Get the packet mode of a pty */
static int pty_get_pktmode(struct tty_struct *tty, int __user *arg)
{
- int pktmode = tty->packet;
+ int pktmode = tty->ctrl.packet;
return put_user(pktmode, arg);
}
@@ -232,11 +219,11 @@ static void pty_flush_buffer(struct tty_struct *tty)
return;
tty_buffer_flush(to, NULL);
- if (to->packet) {
- spin_lock_irq(&tty->ctrl_lock);
- tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
+ if (to->ctrl.packet) {
+ spin_lock_irq(&tty->ctrl.lock);
+ tty->ctrl.pktstatus |= TIOCPKT_FLUSHWRITE;
wake_up_interruptible(&to->read_wait);
- spin_unlock_irq(&tty->ctrl_lock);
+ spin_unlock_irq(&tty->ctrl.lock);
}
}
@@ -266,7 +253,7 @@ static void pty_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
/* See if packet mode change of state. */
- if (tty->link && tty->link->packet) {
+ if (tty->link && tty->link->ctrl.packet) {
int extproc = (old_termios->c_lflag & EXTPROC) | L_EXTPROC(tty);
int old_flow = ((old_termios->c_iflag & IXON) &&
(old_termios->c_cc[VSTOP] == '\023') &&
@@ -275,17 +262,17 @@ static void pty_set_termios(struct tty_struct *tty,
STOP_CHAR(tty) == '\023' &&
START_CHAR(tty) == '\021');
if ((old_flow != new_flow) || extproc) {
- spin_lock_irq(&tty->ctrl_lock);
+ spin_lock_irq(&tty->ctrl.lock);
if (old_flow != new_flow) {
- tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP);
+ tty->ctrl.pktstatus &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP);
if (new_flow)
- tty->ctrl_status |= TIOCPKT_DOSTOP;
+ tty->ctrl.pktstatus |= TIOCPKT_DOSTOP;
else
- tty->ctrl_status |= TIOCPKT_NOSTOP;
+ tty->ctrl.pktstatus |= TIOCPKT_NOSTOP;
}
if (extproc)
- tty->ctrl_status |= TIOCPKT_IOCTL;
- spin_unlock_irq(&tty->ctrl_lock);
+ tty->ctrl.pktstatus |= TIOCPKT_IOCTL;
+ spin_unlock_irq(&tty->ctrl.lock);
wake_up_interruptible(&tty->link->read_wait);
}
}
@@ -295,7 +282,7 @@ static void pty_set_termios(struct tty_struct *tty,
}
/**
- * pty_do_resize - resize event
+ * pty_resize - resize event
* @tty: tty being resized
* @ws: window size being set.
*
@@ -346,11 +333,11 @@ static void pty_start(struct tty_struct *tty)
{
unsigned long flags;
- if (tty->link && tty->link->packet) {
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- tty->ctrl_status &= ~TIOCPKT_STOP;
- tty->ctrl_status |= TIOCPKT_START;
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ if (tty->link && tty->link->ctrl.packet) {
+ spin_lock_irqsave(&tty->ctrl.lock, flags);
+ tty->ctrl.pktstatus &= ~TIOCPKT_STOP;
+ tty->ctrl.pktstatus |= TIOCPKT_START;
+ spin_unlock_irqrestore(&tty->ctrl.lock, flags);
wake_up_interruptible_poll(&tty->link->read_wait, EPOLLIN);
}
}
@@ -359,11 +346,11 @@ static void pty_stop(struct tty_struct *tty)
{
unsigned long flags;
- if (tty->link && tty->link->packet) {
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- tty->ctrl_status &= ~TIOCPKT_START;
- tty->ctrl_status |= TIOCPKT_STOP;
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ if (tty->link && tty->link->ctrl.packet) {
+ spin_lock_irqsave(&tty->ctrl.lock, flags);
+ tty->ctrl.pktstatus &= ~TIOCPKT_START;
+ tty->ctrl.pktstatus |= TIOCPKT_STOP;
+ spin_unlock_irqrestore(&tty->ctrl.lock, flags);
wake_up_interruptible_poll(&tty->link->read_wait, EPOLLIN);
}
}
@@ -525,7 +512,6 @@ static const struct tty_operations master_pty_ops_bsd = {
.write = pty_write,
.write_room = pty_write_room,
.flush_buffer = pty_flush_buffer,
- .chars_in_buffer = pty_chars_in_buffer,
.unthrottle = pty_unthrottle,
.ioctl = pty_bsd_ioctl,
.compat_ioctl = pty_bsd_compat_ioctl,
@@ -541,7 +527,6 @@ static const struct tty_operations slave_pty_ops_bsd = {
.write = pty_write,
.write_room = pty_write_room,
.flush_buffer = pty_flush_buffer,
- .chars_in_buffer = pty_chars_in_buffer,
.unthrottle = pty_unthrottle,
.set_termios = pty_set_termios,
.cleanup = pty_cleanup,
@@ -626,7 +611,7 @@ static struct cdev ptmx_cdev;
*/
int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
{
- int fd = -1;
+ int fd;
struct file *filp;
int retval = -EINVAL;
struct path path;
@@ -776,7 +761,6 @@ static const struct tty_operations ptm_unix98_ops = {
.write = pty_write,
.write_room = pty_write_room,
.flush_buffer = pty_flush_buffer,
- .chars_in_buffer = pty_chars_in_buffer,
.unthrottle = pty_unthrottle,
.ioctl = pty_unix98_ioctl,
.compat_ioctl = pty_unix98_compat_ioctl,
@@ -794,7 +778,6 @@ static const struct tty_operations pty_unix98_ops = {
.write = pty_write,
.write_room = pty_write_room,
.flush_buffer = pty_flush_buffer,
- .chars_in_buffer = pty_chars_in_buffer,
.unthrottle = pty_unthrottle,
.set_termios = pty_set_termios,
.start = pty_start,
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index aead0c0c9796..9cdfcfe07e87 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -798,7 +798,7 @@ void serdev_controller_remove(struct serdev_controller *ctrl)
EXPORT_SYMBOL_GPL(serdev_controller_remove);
/**
- * serdev_driver_register() - Register client driver with serdev core
+ * __serdev_device_driver_register() - Register client driver with serdev core
* @sdrv: client driver to be associated with client-device.
* @owner: client driver owner to set.
*
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index d035d08cb987..2350fb3bb5e4 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -34,7 +34,6 @@
struct aspeed_vuart {
struct device *dev;
- void __iomem *regs;
struct clk *clk;
int line;
struct timer_list unthrottle_timer;
@@ -64,14 +63,24 @@ static const int unthrottle_timeout = HZ/10;
* different system (though most of them use 3f8/4).
*/
+static inline u8 aspeed_vuart_readb(struct aspeed_vuart *vuart, u8 reg)
+{
+ return readb(vuart->port->port.membase + reg);
+}
+
+static inline void aspeed_vuart_writeb(struct aspeed_vuart *vuart, u8 val, u8 reg)
+{
+ writeb(val, vuart->port->port.membase + reg);
+}
+
static ssize_t lpc_address_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct aspeed_vuart *vuart = dev_get_drvdata(dev);
u16 addr;
- addr = (readb(vuart->regs + ASPEED_VUART_ADDRH) << 8) |
- (readb(vuart->regs + ASPEED_VUART_ADDRL));
+ addr = (aspeed_vuart_readb(vuart, ASPEED_VUART_ADDRH) << 8) |
+ (aspeed_vuart_readb(vuart, ASPEED_VUART_ADDRL));
return snprintf(buf, PAGE_SIZE - 1, "0x%x\n", addr);
}
@@ -81,8 +90,8 @@ static int aspeed_vuart_set_lpc_address(struct aspeed_vuart *vuart, u32 addr)
if (addr > U16_MAX)
return -EINVAL;
- writeb(addr >> 8, vuart->regs + ASPEED_VUART_ADDRH);
- writeb(addr >> 0, vuart->regs + ASPEED_VUART_ADDRL);
+ aspeed_vuart_writeb(vuart, addr >> 8, ASPEED_VUART_ADDRH);
+ aspeed_vuart_writeb(vuart, addr >> 0, ASPEED_VUART_ADDRL);
return 0;
}
@@ -111,7 +120,7 @@ static ssize_t sirq_show(struct device *dev,
struct aspeed_vuart *vuart = dev_get_drvdata(dev);
u8 reg;
- reg = readb(vuart->regs + ASPEED_VUART_GCRB);
+ reg = aspeed_vuart_readb(vuart, ASPEED_VUART_GCRB);
reg &= ASPEED_VUART_GCRB_HOST_SIRQ_MASK;
reg >>= ASPEED_VUART_GCRB_HOST_SIRQ_SHIFT;
@@ -128,10 +137,10 @@ static int aspeed_vuart_set_sirq(struct aspeed_vuart *vuart, u32 sirq)
sirq <<= ASPEED_VUART_GCRB_HOST_SIRQ_SHIFT;
sirq &= ASPEED_VUART_GCRB_HOST_SIRQ_MASK;
- reg = readb(vuart->regs + ASPEED_VUART_GCRB);
+ reg = aspeed_vuart_readb(vuart, ASPEED_VUART_GCRB);
reg &= ~ASPEED_VUART_GCRB_HOST_SIRQ_MASK;
reg |= sirq;
- writeb(reg, vuart->regs + ASPEED_VUART_GCRB);
+ aspeed_vuart_writeb(vuart, reg, ASPEED_VUART_GCRB);
return 0;
}
@@ -159,7 +168,7 @@ static ssize_t sirq_polarity_show(struct device *dev,
struct aspeed_vuart *vuart = dev_get_drvdata(dev);
u8 reg;
- reg = readb(vuart->regs + ASPEED_VUART_GCRA);
+ reg = aspeed_vuart_readb(vuart, ASPEED_VUART_GCRA);
reg &= ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY;
return snprintf(buf, PAGE_SIZE - 1, "%u\n", reg ? 1 : 0);
@@ -168,14 +177,14 @@ static ssize_t sirq_polarity_show(struct device *dev,
static void aspeed_vuart_set_sirq_polarity(struct aspeed_vuart *vuart,
bool polarity)
{
- u8 reg = readb(vuart->regs + ASPEED_VUART_GCRA);
+ u8 reg = aspeed_vuart_readb(vuart, ASPEED_VUART_GCRA);
if (polarity)
reg |= ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY;
else
reg &= ~ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY;
- writeb(reg, vuart->regs + ASPEED_VUART_GCRA);
+ aspeed_vuart_writeb(vuart, reg, ASPEED_VUART_GCRA);
}
static ssize_t sirq_polarity_store(struct device *dev,
@@ -210,14 +219,14 @@ static const struct attribute_group aspeed_vuart_attr_group = {
static void aspeed_vuart_set_enabled(struct aspeed_vuart *vuart, bool enabled)
{
- u8 reg = readb(vuart->regs + ASPEED_VUART_GCRA);
+ u8 reg = aspeed_vuart_readb(vuart, ASPEED_VUART_GCRA);
if (enabled)
reg |= ASPEED_VUART_GCRA_VUART_EN;
else
reg &= ~ASPEED_VUART_GCRA_VUART_EN;
- writeb(reg, vuart->regs + ASPEED_VUART_GCRA);
+ aspeed_vuart_writeb(vuart, reg, ASPEED_VUART_GCRA);
}
static void aspeed_vuart_set_host_tx_discard(struct aspeed_vuart *vuart,
@@ -225,7 +234,7 @@ static void aspeed_vuart_set_host_tx_discard(struct aspeed_vuart *vuart,
{
u8 reg;
- reg = readb(vuart->regs + ASPEED_VUART_GCRA);
+ reg = aspeed_vuart_readb(vuart, ASPEED_VUART_GCRA);
/* If the DISABLE_HOST_TX_DISCARD bit is set, discard is disabled */
if (!discard)
@@ -233,7 +242,7 @@ static void aspeed_vuart_set_host_tx_discard(struct aspeed_vuart *vuart,
else
reg &= ~ASPEED_VUART_GCRA_DISABLE_HOST_TX_DISCARD;
- writeb(reg, vuart->regs + ASPEED_VUART_GCRA);
+ aspeed_vuart_writeb(vuart, reg, ASPEED_VUART_GCRA);
}
static int aspeed_vuart_startup(struct uart_port *uart_port)
@@ -320,14 +329,15 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned int iir, lsr;
- int space, count;
+ unsigned long flags;
+ unsigned int space, count;
iir = serial_port_in(port, UART_IIR);
if (iir & UART_IIR_NO_INT)
return 0;
- spin_lock(&port->lock);
+ spin_lock_irqsave(&port->lock, flags);
lsr = serial_port_in(port, UART_LSR);
@@ -339,14 +349,12 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
struct aspeed_vuart *vuart = port->private_data;
__aspeed_vuart_set_throttle(up, true);
- if (!timer_pending(&vuart->unthrottle_timer)) {
- vuart->port = up;
+ if (!timer_pending(&vuart->unthrottle_timer))
mod_timer(&vuart->unthrottle_timer,
jiffies + unthrottle_timeout);
- }
} else {
- count = min(space, 256);
+ count = min(space, 256U);
do {
serial8250_read_char(up, lsr);
@@ -363,7 +371,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
if (lsr & UART_LSR_THRE)
serial8250_tx_chars(up);
- uart_unlock_and_check_sysrq(port);
+ uart_unlock_and_check_sysrq_irqrestore(port, flags);
return 1;
}
@@ -421,13 +429,9 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
timer_setup(&vuart->unthrottle_timer, aspeed_vuart_unthrottle_exp, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vuart->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(vuart->regs))
- return PTR_ERR(vuart->regs);
memset(&port, 0, sizeof(port));
port.port.private_data = vuart;
- port.port.membase = vuart->regs;
port.port.mapbase = res->start;
port.port.mapsize = resource_size(res);
port.port.startup = aspeed_vuart_startup;
@@ -485,7 +489,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
port.port.iotype = UPIO_MEM;
port.port.type = PORT_16550A;
port.port.uartclk = clk;
- port.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF
+ port.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP
| UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_NO_THRE_TEST;
if (of_property_read_bool(np, "no-loopback-test"))
@@ -502,6 +506,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
goto err_clk_disable;
vuart->line = rc;
+ vuart->port = serial8250_get_port(vuart->line);
rc = of_parse_phandle_with_fixed_args(
np, "aspeed,sirq-polarity-sense", 2, 0,
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index cae61d1ebec5..1ce193daea7f 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -172,7 +172,6 @@ static void serial_do_unlink(struct irq_info *i, struct uart_8250_port *up)
static int serial_link_irq_chain(struct uart_8250_port *up)
{
struct hlist_head *h;
- struct hlist_node *n;
struct irq_info *i;
int ret;
@@ -180,13 +179,11 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
h = &irq_lists[up->port.irq % NR_IRQ_HASH];
- hlist_for_each(n, h) {
- i = hlist_entry(n, struct irq_info, node);
+ hlist_for_each_entry(i, h, node)
if (i->irq == up->port.irq)
break;
- }
- if (n == NULL) {
+ if (i == NULL) {
i = kzalloc(sizeof(struct irq_info), GFP_KERNEL);
if (i == NULL) {
mutex_unlock(&hash_mutex);
@@ -220,25 +217,18 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
static void serial_unlink_irq_chain(struct uart_8250_port *up)
{
- /*
- * yes, some broken gcc emit "warning: 'i' may be used uninitialized"
- * but no, we are not going to take a patch that assigns NULL below.
- */
struct irq_info *i;
- struct hlist_node *n;
struct hlist_head *h;
mutex_lock(&hash_mutex);
h = &irq_lists[up->port.irq % NR_IRQ_HASH];
- hlist_for_each(n, h) {
- i = hlist_entry(n, struct irq_info, node);
+ hlist_for_each_entry(i, h, node)
if (i->irq == up->port.irq)
break;
- }
- BUG_ON(n == NULL);
+ BUG_ON(i == NULL);
BUG_ON(i->head == NULL);
if (list_empty(i->head))
@@ -331,9 +321,9 @@ static int univ8250_setup_irq(struct uart_8250_port *up)
* hardware interrupt, we use a timer-based system. The original
* driver used to do this with IRQ0.
*/
- if (!port->irq) {
+ if (!port->irq)
mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
- } else
+ else
retval = serial_link_irq_chain(up);
return retval;
@@ -762,6 +752,7 @@ void serial8250_suspend_port(int line)
if (!console_suspend_enabled && uart_console(port) &&
port->type != PORT_8250) {
unsigned char canary = 0xa5;
+
serial_out(up, UART_SCR, canary);
if (serial_in(up, UART_SCR) == canary)
up->canary = canary;
@@ -915,7 +906,7 @@ static struct platform_device *serial8250_isa_devs;
*/
static DEFINE_MUTEX(serial_mutex);
-static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *port)
+static struct uart_8250_port *serial8250_find_match_or_unused(const struct uart_port *port)
{
int i;
@@ -980,7 +971,7 @@ static void serial_8250_overrun_backoff_work(struct work_struct *work)
*
* On success the port is ready to use and the line number is returned.
*/
-int serial8250_register_8250_port(struct uart_8250_port *up)
+int serial8250_register_8250_port(const struct uart_8250_port *up)
{
struct uart_8250_port *uart;
int ret = -ENOSPC;
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index bd4e9f6ac29c..3ffeedc29c83 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -501,23 +501,27 @@ static const struct dmi_system_id exar_platforms[] = {
{}
};
+static const struct exar8250_platform *exar_get_platform(void)
+{
+ const struct dmi_system_id *dmi_match;
+
+ dmi_match = dmi_first_match(exar_platforms);
+ if (dmi_match)
+ return dmi_match->driver_data;
+
+ return &exar8250_default_platform;
+}
+
static int
pci_xr17v35x_setup(struct exar8250 *priv, struct pci_dev *pcidev,
struct uart_8250_port *port, int idx)
{
- const struct exar8250_platform *platform;
- const struct dmi_system_id *dmi_match;
+ const struct exar8250_platform *platform = exar_get_platform();
unsigned int offset = idx * 0x400;
unsigned int baud = 7812500;
u8 __iomem *p;
int ret;
- dmi_match = dmi_first_match(exar_platforms);
- if (dmi_match)
- platform = dmi_match->driver_data;
- else
- platform = &exar8250_default_platform;
-
port->port.uartclk = baud * 16;
port->port.rs485_config = platform->rs485_config;
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 4e75d2e4f87c..fc65a2293ce9 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -30,10 +30,11 @@ struct fsl8250_data {
int fsl8250_handle_irq(struct uart_port *port)
{
unsigned char lsr, orig_lsr;
+ unsigned long flags;
unsigned int iir;
struct uart_8250_port *up = up_to_u8250p(port);
- spin_lock(&up->port.lock);
+ spin_lock_irqsave(&up->port.lock, flags);
iir = port->serial_in(port, UART_IIR);
if (iir & UART_IIR_NO_INT) {
@@ -82,7 +83,7 @@ int fsl8250_handle_irq(struct uart_port *port)
up->lsr_saved_flags = orig_lsr;
- uart_unlock_and_check_sysrq(&up->port);
+ uart_unlock_and_check_sysrq_irqrestore(&up->port, flags);
return 1;
}
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index f7d3023f860f..fb65dc601b23 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -93,10 +93,13 @@ static void mtk8250_dma_rx_complete(void *param)
struct dma_tx_state state;
int copied, total, cnt;
unsigned char *ptr;
+ unsigned long flags;
if (data->rx_status == DMA_RX_SHUTDOWN)
return;
+ spin_lock_irqsave(&up->port.lock, flags);
+
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
total = dma->rx_size - state.residue;
cnt = total;
@@ -120,6 +123,8 @@ static void mtk8250_dma_rx_complete(void *param)
tty_flip_buffer_push(tty_port);
mtk8250_rx_dma(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
static void mtk8250_rx_dma(struct uart_8250_port *up)
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 0b077b45d6a9..bce28729dd7b 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -192,6 +192,10 @@ static int of_platform_serial_probe(struct platform_device *ofdev)
u32 tx_threshold;
int ret;
+ if (IS_ENABLED(CONFIG_SERIAL_8250_BCM7271) &&
+ of_device_is_compatible(ofdev->dev.of_node, "brcm,bcm7271-uart"))
+ return -ENODEV;
+
port_type = (unsigned long)of_device_get_match_data(&ofdev->dev);
if (port_type == PORT_UNKNOWN)
return -EINVAL;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 8ac11eaeca51..79418d4beb48 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -43,6 +43,7 @@
#define UART_ERRATA_CLOCK_DISABLE (1 << 3)
#define UART_HAS_EFR2 BIT(4)
#define UART_HAS_RHR_IT_DIS BIT(5)
+#define UART_RX_TIMEOUT_QUIRK BIT(6)
#define OMAP_UART_FCR_RX_TRIG 6
#define OMAP_UART_FCR_TX_TRIG 4
@@ -104,6 +105,9 @@
#define UART_OMAP_EFR2 0x23
#define UART_OMAP_EFR2_TIMEOUT_BEHAVE BIT(6)
+/* RX FIFO occupancy indicator */
+#define UART_OMAP_RX_LVL 0x64
+
struct omap8250_priv {
int line;
u8 habit;
@@ -611,6 +615,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port);
static irqreturn_t omap8250_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
+ struct omap8250_priv *priv = port->private_data;
struct uart_8250_port *up = up_to_u8250p(port);
unsigned int iir;
int ret;
@@ -625,6 +630,18 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
serial8250_rpm_get(up);
iir = serial_port_in(port, UART_IIR);
ret = serial8250_handle_irq(port, iir);
+
+ /*
+ * On K3 SoCs, it is observed that RX TIMEOUT is signalled after
+ * FIFO has been drained, in which case a dummy read of RX FIFO
+ * is required to clear RX TIMEOUT condition.
+ */
+ if (priv->habit & UART_RX_TIMEOUT_QUIRK &&
+ (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT &&
+ serial_port_in(port, UART_OMAP_RX_LVL) == 0) {
+ serial_port_in(port, UART_RX);
+ }
+
serial8250_rpm_put(up);
return IRQ_RETVAL(ret);
@@ -813,7 +830,7 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
poll_count--)
cpu_relax();
- if (!poll_count)
+ if (poll_count == -1)
dev_err(p->port.dev, "teardown incomplete\n");
}
}
@@ -1218,7 +1235,8 @@ static struct omap8250_dma_params am33xx_dma = {
static struct omap8250_platdata am654_platdata = {
.dma_params = &am654_dma,
- .habit = UART_HAS_EFR2 | UART_HAS_RHR_IT_DIS,
+ .habit = UART_HAS_EFR2 | UART_HAS_RHR_IT_DIS |
+ UART_RX_TIMEOUT_QUIRK,
};
static struct omap8250_platdata am33xx_platdata = {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 780cc99732b6..a808c283883e 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -2851,7 +2851,7 @@ enum pci_board_num_t {
pbn_b0_2_1843200,
pbn_b0_4_1843200,
- pbn_b0_1_4000000,
+ pbn_b0_1_3906250,
pbn_b0_bt_1_115200,
pbn_b0_bt_2_115200,
@@ -2931,10 +2931,10 @@ enum pci_board_num_t {
pbn_plx_romulus,
pbn_endrun_2_4000000,
pbn_oxsemi,
- pbn_oxsemi_1_4000000,
- pbn_oxsemi_2_4000000,
- pbn_oxsemi_4_4000000,
- pbn_oxsemi_8_4000000,
+ pbn_oxsemi_1_3906250,
+ pbn_oxsemi_2_3906250,
+ pbn_oxsemi_4_3906250,
+ pbn_oxsemi_8_3906250,
pbn_intel_i960,
pbn_sgi_ioc3,
pbn_computone_4,
@@ -2972,6 +2972,10 @@ enum pci_board_num_t {
pbn_sunix_pci_4s,
pbn_sunix_pci_8s,
pbn_sunix_pci_16s,
+ pbn_titan_1_4000000,
+ pbn_titan_2_4000000,
+ pbn_titan_4_4000000,
+ pbn_titan_8_4000000,
pbn_moxa8250_2p,
pbn_moxa8250_4p,
pbn_moxa8250_8p,
@@ -3077,10 +3081,10 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 8,
},
- [pbn_b0_1_4000000] = {
+ [pbn_b0_1_3906250] = {
.flags = FL_BASE0,
.num_ports = 1,
- .base_baud = 4000000,
+ .base_baud = 3906250,
.uart_offset = 8,
},
@@ -3475,31 +3479,31 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 115200,
.uart_offset = 8,
},
- [pbn_oxsemi_1_4000000] = {
+ [pbn_oxsemi_1_3906250] = {
.flags = FL_BASE0,
.num_ports = 1,
- .base_baud = 4000000,
+ .base_baud = 3906250,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_oxsemi_2_4000000] = {
+ [pbn_oxsemi_2_3906250] = {
.flags = FL_BASE0,
.num_ports = 2,
- .base_baud = 4000000,
+ .base_baud = 3906250,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_oxsemi_4_4000000] = {
+ [pbn_oxsemi_4_3906250] = {
.flags = FL_BASE0,
.num_ports = 4,
- .base_baud = 4000000,
+ .base_baud = 3906250,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_oxsemi_8_4000000] = {
+ [pbn_oxsemi_8_3906250] = {
.flags = FL_BASE0,
.num_ports = 8,
- .base_baud = 4000000,
+ .base_baud = 3906250,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
@@ -3759,6 +3763,34 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 921600,
.uart_offset = 0x8,
},
+ [pbn_titan_1_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_titan_2_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_titan_4_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_titan_8_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
[pbn_moxa8250_2p] = {
.flags = FL_BASE1,
.num_ports = 2,
@@ -3804,6 +3836,12 @@ static const struct pci_device_id blacklist[] = {
{ PCI_VDEVICE(INTEL, 0x0f0c), },
{ PCI_VDEVICE(INTEL, 0x228a), },
{ PCI_VDEVICE(INTEL, 0x228c), },
+ { PCI_VDEVICE(INTEL, 0x4b96), },
+ { PCI_VDEVICE(INTEL, 0x4b97), },
+ { PCI_VDEVICE(INTEL, 0x4b98), },
+ { PCI_VDEVICE(INTEL, 0x4b99), },
+ { PCI_VDEVICE(INTEL, 0x4b9a), },
+ { PCI_VDEVICE(INTEL, 0x4b9b), },
{ PCI_VDEVICE(INTEL, 0x9ce3), },
{ PCI_VDEVICE(INTEL, 0x9ce4), },
@@ -3964,6 +4002,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
if (pci_match_id(pci_use_msi, dev)) {
dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
pci_set_master(dev);
+ uart.port.flags &= ~UPF_SHARE_IRQ;
rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
} else {
dev_dbg(&dev->dev, "Using legacy interrupts\n");
@@ -4478,158 +4517,158 @@ static const struct pci_device_id serial_pci_tbl[] = {
*/
{ PCI_VENDOR_ID_OXSEMI, 0xc101, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc105, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc11b, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc11f, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc120, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc124, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc138, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc13d, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc140, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc141, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc144, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc145, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc158, /* OXPCIe952 2 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_oxsemi_2_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc15d, /* OXPCIe952 2 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_oxsemi_2_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc208, /* OXPCIe954 4 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_4_4000000 },
+ pbn_oxsemi_4_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc20d, /* OXPCIe954 4 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_4_4000000 },
+ pbn_oxsemi_4_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc308, /* OXPCIe958 8 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_8_4000000 },
+ pbn_oxsemi_8_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc30d, /* OXPCIe958 8 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_8_4000000 },
+ pbn_oxsemi_8_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc40b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc40f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc41b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc41f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc42b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc42f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc43b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc43f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc44b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc44f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc45b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc45f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc46b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc46f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc47b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc47f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc48b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc48f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc49b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc49f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4ab, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4af, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4bb, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4bf, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4cb, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4cf, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
/*
* Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado
*/
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_oxsemi_2_3906250 },
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0,
- pbn_oxsemi_4_4000000 },
+ pbn_oxsemi_4_3906250 },
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0,
- pbn_oxsemi_8_4000000 },
+ pbn_oxsemi_8_3906250 },
/*
* Digi/IBM PCIe 2-port Async EIA-232 Adapter utilizing OxSemi Tornado
*/
{ PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_2_OX_IBM,
PCI_SUBVENDOR_ID_IBM, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_oxsemi_2_3906250 },
/*
* SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards,
@@ -4703,22 +4742,22 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_b0_4_921600 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100E,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_titan_1_4000000 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200E,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_titan_2_4000000 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400E,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_4_4000000 },
+ pbn_titan_4_4000000 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800E,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_8_4000000 },
+ pbn_titan_8_4000000 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_titan_2_4000000 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_titan_2_4000000 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200V3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_2_921600 },
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index fc5ab2032282..1da29a219842 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -311,7 +311,11 @@ static const struct serial8250_config uart_config[] = {
/* Uart divisor latch read */
static int default_serial_dl_read(struct uart_8250_port *up)
{
- return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8;
+ /* Assign these in pieces to truncate any bits above 7. */
+ unsigned char dll = serial_in(up, UART_DLL);
+ unsigned char dlm = serial_in(up, UART_DLM);
+
+ return dll | dlm << 8;
}
/* Uart divisor latch write */
@@ -1297,9 +1301,11 @@ static void autoconfig(struct uart_8250_port *up)
serial_out(up, UART_LCR, 0);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
- scratch = serial_in(up, UART_IIR) >> 6;
- switch (scratch) {
+ /* Assign this as it is to truncate any bits above 7. */
+ scratch = serial_in(up, UART_IIR);
+
+ switch (scratch >> 6) {
case 0:
autoconfig_8250(up);
break;
@@ -1893,11 +1899,12 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
unsigned char status;
struct uart_8250_port *up = up_to_u8250p(port);
bool skip_rx = false;
+ unsigned long flags;
if (iir & UART_IIR_NO_INT)
return 0;
- spin_lock(&port->lock);
+ spin_lock_irqsave(&port->lock, flags);
status = serial_port_in(port, UART_LSR);
@@ -1923,7 +1930,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
(up->ier & UART_IER_THRI))
serial8250_tx_chars(up);
- uart_unlock_and_check_sysrq(port);
+ uart_unlock_and_check_sysrq_irqrestore(port, flags);
return 1;
}
@@ -2512,19 +2519,45 @@ static unsigned int serial8250_do_get_divisor(struct uart_port *port,
unsigned int baud,
unsigned int *frac)
{
+ upf_t magic_multiplier = port->flags & UPF_MAGIC_MULTIPLIER;
struct uart_8250_port *up = up_to_u8250p(port);
unsigned int quot;
/*
- * Handle magic divisors for baud rates above baud_base on
- * SMSC SuperIO chips.
+ * Handle magic divisors for baud rates above baud_base on SMSC
+ * Super I/O chips. We clamp custom rates from clk/6 and clk/12
+ * up to clk/4 (0x8001) and clk/8 (0x8002) respectively. These
+ * magic divisors actually reprogram the baud rate generator's
+ * reference clock derived from chips's 14.318MHz clock input.
+ *
+ * Documentation claims that with these magic divisors the base
+ * frequencies of 7.3728MHz and 3.6864MHz are used respectively
+ * for the extra baud rates of 460800bps and 230400bps rather
+ * than the usual base frequency of 1.8462MHz. However empirical
+ * evidence contradicts that.
+ *
+ * Instead bit 7 of the DLM register (bit 15 of the divisor) is
+ * effectively used as a clock prescaler selection bit for the
+ * base frequency of 7.3728MHz, always used. If set to 0, then
+ * the base frequency is divided by 4 for use by the Baud Rate
+ * Generator, for the usual arrangement where the value of 1 of
+ * the divisor produces the baud rate of 115200bps. Conversely,
+ * if set to 1 and high-speed operation has been enabled with the
+ * Serial Port Mode Register in the Device Configuration Space,
+ * then the base frequency is supplied directly to the Baud Rate
+ * Generator, so for the divisor values of 0x8001, 0x8002, 0x8003,
+ * 0x8004, etc. the respective baud rates produced are 460800bps,
+ * 230400bps, 153600bps, 115200bps, etc.
*
+ * In all cases only low 15 bits of the divisor are used to divide
+ * the baud base and therefore 32767 is the maximum divisor value
+ * possible, even though documentation says that the programmable
+ * Baud Rate Generator is capable of dividing the internal PLL
+ * clock by any divisor from 1 to 65535.
*/
- if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
- baud == (port->uartclk/4))
+ if (magic_multiplier && baud >= port->uartclk / 6)
quot = 0x8001;
- else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
- baud == (port->uartclk/8))
+ else if (magic_multiplier && baud >= port->uartclk / 12)
quot = 0x8002;
else if (up->port.type == PORT_NPCM)
quot = npcm_get_divisor(up, baud);
@@ -2629,6 +2662,21 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
struct ktermios *old)
{
unsigned int tolerance = port->uartclk / 100;
+ unsigned int min;
+ unsigned int max;
+
+ /*
+ * Handle magic divisors for baud rates above baud_base on SMSC
+ * Super I/O chips. Enable custom rates of clk/4 and clk/8, but
+ * disable divisor values beyond 32767, which are unavailable.
+ */
+ if (port->flags & UPF_MAGIC_MULTIPLIER) {
+ min = port->uartclk / 16 / UART_DIV_MAX >> 1;
+ max = (port->uartclk + tolerance) / 4;
+ } else {
+ min = port->uartclk / 16 / UART_DIV_MAX;
+ max = (port->uartclk + tolerance) / 16;
+ }
/*
* Ask the core to calculate the divisor for us.
@@ -2636,9 +2684,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
* slower than nominal still match standard baud rates without
* causing transmission errors.
*/
- return uart_get_baud_rate(port, termios, old,
- port->uartclk / 16 / UART_DIV_MAX,
- (port->uartclk + tolerance) / 16);
+ return uart_get_baud_rate(port, termios, old, min, max);
}
/*
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index 63ea9c4da3d5..dc2ef05a10eb 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -306,6 +306,7 @@ static int serial_resume(struct pcmcia_device *link)
static int serial_probe(struct pcmcia_device *link)
{
struct serial_info *info;
+ int ret;
dev_dbg(&link->dev, "serial_attach()\n");
@@ -320,7 +321,15 @@ static int serial_probe(struct pcmcia_device *link)
if (do_sound)
link->config_flags |= CONF_ENABLE_SPKR;
- return serial_config(link);
+ ret = serial_config(link);
+ if (ret)
+ goto free_info;
+
+ return 0;
+
+free_info:
+ kfree(info);
+ return ret;
}
static void serial_detach(struct pcmcia_device *link)
@@ -777,6 +786,7 @@ static const struct pcmcia_device_id serial_ids[] = {
PCMCIA_DEVICE_PROD_ID12("Multi-Tech", "MT2834LT", 0x5f73be51, 0x4cd7c09e),
PCMCIA_DEVICE_PROD_ID12("OEM ", "C288MX ", 0xb572d360, 0xd2385b7a),
PCMCIA_DEVICE_PROD_ID12("Option International", "V34bis GSM/PSTN Data/Fax Modem", 0x9d7cd6f5, 0x5cb8bf41),
+ PCMCIA_DEVICE_PROD_ID12("Option International", "GSM-Ready 56K/ISDN", 0x9d7cd6f5, 0xb23844aa),
PCMCIA_DEVICE_PROD_ID12("PCMCIA ", "C336MX ", 0x99bcafe9, 0xaa25bcab),
PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "PCMCIA Dual RS-232 Serial Port Card", 0xc4420b35, 0x92abc92f),
PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "Dual RS-232 Serial Port PC Card", 0xc4420b35, 0x031a380d),
@@ -804,7 +814,6 @@ static const struct pcmcia_device_id serial_ids[] = {
PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
- PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"),
PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100 1.00.", 0x19ca78af, 0xf964f42b),
PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100", 0x19ca78af, 0x71d98e83),
PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL232 1.00.", 0x19ca78af, 0x69fb7490),
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 682f9171c82c..24282ad99d85 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1553,6 +1553,7 @@ config SERIAL_LITEUART_CONSOLE
bool "LiteUART serial port console support"
depends on SERIAL_LITEUART=y
select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
help
Say 'Y' or 'M' here if you wish to use the FPGA-based LiteUART serial
controller from LiteX SoC builder as the system console
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 78682c12156a..e14f3378b8a0 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1062,7 +1062,7 @@ static void pl011_dma_rx_poll(struct timer_list *t)
struct tty_port *port = &uap->port.state->port;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = uap->dmarx.chan;
- unsigned long flags = 0;
+ unsigned long flags;
unsigned int dmataken = 0;
unsigned int size = 0;
struct pl011_sgbuf *sgbuf;
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 1a9444b6b57e..596217d10d5c 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -149,7 +149,7 @@ static unsigned int arc_serial_tx_empty(struct uart_port *port)
/*
* Driver internal routine, used by both tty(serial core) as well as tx-isr
* -Called under spinlock in either cases
- * -also tty->stopped has already been checked
+ * -also tty->flow.stopped has already been checked
* = by uart_start( ) before calling us
* = tx_ist checks that too before calling
*/
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 058886d9045b..249ea35088d2 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -30,9 +30,9 @@
#include <linux/irq.h>
#include <linux/suspend.h>
#include <linux/mm.h>
+#include <linux/io.h>
#include <asm/div64.h>
-#include <asm/io.h>
#include <asm/ioctls.h>
#define PDC_BUFFER_SIZE 512
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 58aaa533203b..c719aa2b1832 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -524,24 +524,7 @@ static void cpm_uart_set_termios(struct uart_port *port,
scval = 0;
/* byte size */
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- bits = 5;
- break;
- case CS6:
- bits = 6;
- break;
- case CS7:
- bits = 7;
- break;
- case CS8:
- bits = 8;
- break;
- /* Never happens, but GCC is too dumb to figure it out */
- default:
- bits = 8;
- break;
- }
+ bits = tty_get_char_size(termios->c_cflag);
sbits = bits - 5;
if (termios->c_cflag & CSTOPB) {
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 4552742c3859..e9edabc5a211 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -47,8 +47,8 @@
#include <linux/tty_flip.h>
#include <linux/atomic.h>
+#include <linux/io.h>
#include <asm/bootinfo.h>
-#include <asm/io.h>
#include <asm/dec/interrupts.h>
#include <asm/dec/kn01.h>
@@ -115,7 +115,7 @@ static void dz_out(struct dz_port *dport, unsigned offset, u16 value)
* rs_stop () and rs_start ()
*
* These routines are called before setting or resetting
- * tty->stopped. They enable or disable transmitter interrupts,
+ * tty->flow.stopped. They enable or disable transmitter interrupts,
* as necessary.
* ------------------------------------------------------------
*/
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 794035041744..f0e5da77ed6d 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -824,21 +824,18 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
static void lpuart_txint(struct lpuart_port *sport)
{
- unsigned long flags;
-
- spin_lock_irqsave(&sport->port.lock, flags);
+ spin_lock(&sport->port.lock);
lpuart_transmit_buffer(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ spin_unlock(&sport->port.lock);
}
static void lpuart_rxint(struct lpuart_port *sport)
{
unsigned int flg, ignored = 0, overrun = 0;
struct tty_port *port = &sport->port.state->port;
- unsigned long flags;
unsigned char rx, sr;
- spin_lock_irqsave(&sport->port.lock, flags);
+ spin_lock(&sport->port.lock);
while (!(readb(sport->port.membase + UARTSFIFO) & UARTSFIFO_RXEMPT)) {
flg = TTY_NORMAL;
@@ -850,7 +847,7 @@ static void lpuart_rxint(struct lpuart_port *sport)
sr = readb(sport->port.membase + UARTSR1);
rx = readb(sport->port.membase + UARTDR);
- if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
+ if (uart_prepare_sysrq_char(&sport->port, rx))
continue;
if (sr & (UARTSR1_PE | UARTSR1_OR | UARTSR1_FE)) {
@@ -896,28 +893,26 @@ out:
writeb(UARTSFIFO_RXOF, sport->port.membase + UARTSFIFO);
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_unlock_and_check_sysrq(&sport->port);
tty_flip_buffer_push(port);
}
static void lpuart32_txint(struct lpuart_port *sport)
{
- unsigned long flags;
-
- spin_lock_irqsave(&sport->port.lock, flags);
+ spin_lock(&sport->port.lock);
lpuart32_transmit_buffer(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ spin_unlock(&sport->port.lock);
}
static void lpuart32_rxint(struct lpuart_port *sport)
{
unsigned int flg, ignored = 0;
struct tty_port *port = &sport->port.state->port;
- unsigned long flags;
unsigned long rx, sr;
+ bool is_break;
- spin_lock_irqsave(&sport->port.lock, flags);
+ spin_lock(&sport->port.lock);
while (!(lpuart32_read(&sport->port, UARTFIFO) & UARTFIFO_RXEMPT)) {
flg = TTY_NORMAL;
@@ -928,16 +923,29 @@ static void lpuart32_rxint(struct lpuart_port *sport)
*/
sr = lpuart32_read(&sport->port, UARTSTAT);
rx = lpuart32_read(&sport->port, UARTDATA);
- rx &= 0x3ff;
+ rx &= UARTDATA_MASK;
+
+ /*
+ * The LPUART can't distinguish between a break and a framing error,
+ * thus we assume it is a break if the received data is zero.
+ */
+ is_break = (sr & UARTSTAT_FE) && !rx;
- if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
+ if (is_break && uart_handle_break(&sport->port))
+ continue;
+
+ if (uart_prepare_sysrq_char(&sport->port, rx))
continue;
if (sr & (UARTSTAT_PE | UARTSTAT_OR | UARTSTAT_FE)) {
- if (sr & UARTSTAT_PE)
- sport->port.icount.parity++;
- else if (sr & UARTSTAT_FE)
+ if (sr & UARTSTAT_PE) {
+ if (is_break)
+ sport->port.icount.brk++;
+ else
+ sport->port.icount.parity++;
+ } else if (sr & UARTSTAT_FE) {
sport->port.icount.frame++;
+ }
if (sr & UARTSTAT_OR)
sport->port.icount.overrun++;
@@ -950,22 +958,24 @@ static void lpuart32_rxint(struct lpuart_port *sport)
sr &= sport->port.read_status_mask;
- if (sr & UARTSTAT_PE)
- flg = TTY_PARITY;
- else if (sr & UARTSTAT_FE)
+ if (sr & UARTSTAT_PE) {
+ if (is_break)
+ flg = TTY_BREAK;
+ else
+ flg = TTY_PARITY;
+ } else if (sr & UARTSTAT_FE) {
flg = TTY_FRAME;
+ }
if (sr & UARTSTAT_OR)
flg = TTY_OVERRUN;
-
- sport->port.sysrq = 0;
}
tty_insert_flip_char(port, rx, flg);
}
out:
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ uart_unlock_and_check_sysrq(&sport->port);
tty_flip_buffer_push(port);
}
@@ -1393,58 +1403,54 @@ static int lpuart32_config_rs485(struct uart_port *port,
static unsigned int lpuart_get_mctrl(struct uart_port *port)
{
- unsigned int temp = 0;
- unsigned char reg;
-
- reg = readb(port->membase + UARTMODEM);
- if (reg & UARTMODEM_TXCTSE)
- temp |= TIOCM_CTS;
+ unsigned int mctrl = 0;
+ u8 reg;
- if (reg & UARTMODEM_RXRTSE)
- temp |= TIOCM_RTS;
+ reg = readb(port->membase + UARTCR1);
+ if (reg & UARTCR1_LOOPS)
+ mctrl |= TIOCM_LOOP;
- return temp;
+ return mctrl;
}
static unsigned int lpuart32_get_mctrl(struct uart_port *port)
{
- unsigned int temp = 0;
- unsigned long reg;
-
- reg = lpuart32_read(port, UARTMODIR);
- if (reg & UARTMODIR_TXCTSE)
- temp |= TIOCM_CTS;
+ unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+ u32 reg;
- if (reg & UARTMODIR_RXRTSE)
- temp |= TIOCM_RTS;
+ reg = lpuart32_read(port, UARTCTRL);
+ if (reg & UARTCTRL_LOOPS)
+ mctrl |= TIOCM_LOOP;
- return temp;
+ return mctrl;
}
static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- unsigned char temp;
- struct lpuart_port *sport = container_of(port,
- struct lpuart_port, port);
+ u8 reg;
- /* Make sure RXRTSE bit is not set when RS485 is enabled */
- if (!(sport->port.rs485.flags & SER_RS485_ENABLED)) {
- temp = readb(sport->port.membase + UARTMODEM) &
- ~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
+ reg = readb(port->membase + UARTCR1);
- if (mctrl & TIOCM_RTS)
- temp |= UARTMODEM_RXRTSE;
+ /* for internal loopback we need LOOPS=1 and RSRC=0 */
+ reg &= ~(UARTCR1_LOOPS | UARTCR1_RSRC);
+ if (mctrl & TIOCM_LOOP)
+ reg |= UARTCR1_LOOPS;
- if (mctrl & TIOCM_CTS)
- temp |= UARTMODEM_TXCTSE;
-
- writeb(temp, port->membase + UARTMODEM);
- }
+ writeb(reg, port->membase + UARTCR1);
}
static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
+ u32 reg;
+
+ reg = lpuart32_read(port, UARTCTRL);
+ /* for internal loopback we need LOOPS=1 and RSRC=0 */
+ reg &= ~(UARTCTRL_LOOPS | UARTCTRL_RSRC);
+ if (mctrl & TIOCM_LOOP)
+ reg |= UARTCTRL_LOOPS;
+
+ lpuart32_write(port, reg, UARTCTRL);
}
static void lpuart_break_ctl(struct uart_port *port, int break_state)
@@ -1581,6 +1587,9 @@ static void lpuart_tx_dma_startup(struct lpuart_port *sport)
u32 uartbaud;
int ret;
+ if (uart_console(&sport->port))
+ goto err;
+
if (!sport->dma_tx_chan)
goto err;
@@ -1610,6 +1619,9 @@ static void lpuart_rx_dma_startup(struct lpuart_port *sport)
int ret;
unsigned char cr3;
+ if (uart_console(&sport->port))
+ goto err;
+
if (!sport->dma_rx_chan)
goto err;
@@ -1625,7 +1637,7 @@ static void lpuart_rx_dma_startup(struct lpuart_port *sport)
sport->lpuart_dma_rx_use = true;
rx_dma_timer_init(sport);
- if (sport->port.has_sysrq) {
+ if (sport->port.has_sysrq && !lpuart_is_32(sport)) {
cr3 = readb(sport->port.membase + UARTCR3);
cr3 |= UARTCR3_FEIE;
writeb(cr3, sport->port.membase + UARTCR3);
@@ -2278,7 +2290,7 @@ lpuart_console_write(struct console *co, const char *s, unsigned int count)
unsigned long flags;
int locked = 1;
- if (sport->port.sysrq || oops_in_progress)
+ if (oops_in_progress)
locked = spin_trylock_irqsave(&sport->port.lock, flags);
else
spin_lock_irqsave(&sport->port.lock, flags);
@@ -2308,7 +2320,7 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
unsigned long flags;
int locked = 1;
- if (sport->port.sysrq || oops_in_progress)
+ if (oops_in_progress)
locked = spin_trylock_irqsave(&sport->port.lock, flags);
else
spin_lock_irqsave(&sport->port.lock, flags);
@@ -2414,6 +2426,9 @@ lpuart32_console_get_options(struct lpuart_port *sport, int *baud,
bd = lpuart32_read(&sport->port, UARTBAUD);
bd &= UARTBAUD_SBR_MASK;
+ if (!bd)
+ return;
+
sbr = bd;
uartclk = lpuart_get_baud_clk_rate(sport);
/*
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 9e9abfc4824a..03a2fe9f4c9a 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -37,7 +37,7 @@
#include <linux/firmware.h>
#include <linux/bitops.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 7d5a8dfa3e91..8b121cd869e9 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -225,6 +225,8 @@ struct imx_port {
struct scatterlist rx_sgl, tx_sgl[2];
void *rx_buf;
struct circ_buf rx_ring;
+ unsigned int rx_buf_size;
+ unsigned int rx_period_length;
unsigned int rx_periods;
dma_cookie_t rx_cookie;
unsigned int tx_bytes;
@@ -1183,10 +1185,6 @@ static void imx_uart_dma_rx_callback(void *data)
}
}
-/* RX DMA buffer periods */
-#define RX_DMA_PERIODS 16
-#define RX_BUF_SIZE (RX_DMA_PERIODS * PAGE_SIZE / 4)
-
static int imx_uart_start_rx_dma(struct imx_port *sport)
{
struct scatterlist *sgl = &sport->rx_sgl;
@@ -1197,9 +1195,8 @@ static int imx_uart_start_rx_dma(struct imx_port *sport)
sport->rx_ring.head = 0;
sport->rx_ring.tail = 0;
- sport->rx_periods = RX_DMA_PERIODS;
- sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
+ sg_init_one(sgl, sport->rx_buf, sport->rx_buf_size);
ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
if (ret == 0) {
dev_err(dev, "DMA mapping error for RX.\n");
@@ -1316,7 +1313,8 @@ static int imx_uart_dma_init(struct imx_port *sport)
goto err;
}
- sport->rx_buf = kzalloc(RX_BUF_SIZE, GFP_KERNEL);
+ sport->rx_buf_size = sport->rx_period_length * sport->rx_periods;
+ sport->rx_buf = kzalloc(sport->rx_buf_size, GFP_KERNEL);
if (!sport->rx_buf) {
ret = -ENOMEM;
goto err;
@@ -1975,8 +1973,8 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
{
struct imx_port *sport = imx_uart_ports[co->index];
struct imx_port_ucrs old_ucr;
+ unsigned long flags;
unsigned int ucr1;
- unsigned long flags = 0;
int locked = 1;
if (sport->port.sysrq)
@@ -2179,11 +2177,16 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
return HRTIMER_NORESTART;
}
+/* Default RX DMA buffer configuration */
+#define RX_DMA_PERIODS 16
+#define RX_DMA_PERIOD_LEN (PAGE_SIZE / 4)
+
static int imx_uart_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct imx_port *sport;
void __iomem *base;
+ u32 dma_buf_conf[2];
int ret = 0;
u32 ucr1;
struct resource *res;
@@ -2218,6 +2221,14 @@ static int imx_uart_probe(struct platform_device *pdev)
if (of_get_property(np, "fsl,inverted-rx", NULL))
sport->inverted_rx = 1;
+ if (!of_property_read_u32_array(np, "fsl,dma-info", dma_buf_conf, 2)) {
+ sport->rx_period_length = dma_buf_conf[0];
+ sport->rx_periods = dma_buf_conf[1];
+ } else {
+ sport->rx_period_length = RX_DMA_PERIOD_LEN;
+ sport->rx_periods = RX_DMA_PERIODS;
+ }
+
if (sport->port.line >= ARRAY_SIZE(imx_uart_ports)) {
dev_err(&pdev->dev, "serial%d out of range\n",
sport->port.line);
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 86fff69d7e7c..f4dc5fe4ba92 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -31,7 +31,7 @@
#include <linux/spinlock.h>
#include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/sgialib.h>
#include <asm/sgi/ioc.h>
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index db059b66438e..3e7c6ee8e4b3 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -115,7 +115,7 @@ static void kgdb_tty_recv(int ch)
static int kgdb_nmi_poll_one_knock(void)
{
static int n;
- int c = -1;
+ int c;
const char *magic = kgdb_nmi_magic;
size_t m = strlen(magic);
bool printch = false;
@@ -298,7 +298,7 @@ static void kgdb_nmi_tty_hangup(struct tty_struct *tty)
tty_port_hangup(&priv->port);
}
-static int kgdb_nmi_tty_write_room(struct tty_struct *tty)
+static unsigned int kgdb_nmi_tty_write_room(struct tty_struct *tty)
{
/* Actually, we can handle any amount as we use polled writes. */
return 2048;
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
index 0b06770642cb..dbc0559a9157 100644
--- a/drivers/tty/serial/liteuart.c
+++ b/drivers/tty/serial/liteuart.c
@@ -370,6 +370,27 @@ static int __init liteuart_console_init(void)
return 0;
}
console_initcall(liteuart_console_init);
+
+static void early_liteuart_write(struct console *console, const char *s,
+ unsigned int count)
+{
+ struct earlycon_device *device = console->data;
+ struct uart_port *port = &device->port;
+
+ uart_console_write(port, s, count, liteuart_putchar);
+}
+
+static int __init early_liteuart_setup(struct earlycon_device *device,
+ const char *options)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = early_liteuart_write;
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(liteuart, "litex,liteuart", early_liteuart_setup);
#endif /* CONFIG_SERIAL_LITEUART_CONSOLE */
static int __init liteuart_init(void)
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 3cbc757d7be7..ef11860cd69e 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -552,7 +552,7 @@ static int max310x_update_best_err(unsigned long f, long *besterr)
return 1;
}
-static int max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
+static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
unsigned long freq, bool xtal)
{
unsigned int div, clksrc, pllcfg = 0;
@@ -618,7 +618,7 @@ static int max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
}
}
- return (int)bestfreq;
+ return bestfreq;
}
static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int len)
@@ -1253,9 +1253,10 @@ static int max310x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
static int max310x_probe(struct device *dev, const struct max310x_devtype *devtype,
struct regmap *regmap, int irq)
{
- int i, ret, fmin, fmax, freq, uartclk;
+ int i, ret, fmin, fmax, freq;
struct max310x_port *s;
- bool xtal = false;
+ u32 uartclk = 0;
+ bool xtal;
if (IS_ERR(regmap))
return PTR_ERR(regmap);
@@ -1267,24 +1268,20 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
return -ENOMEM;
}
+ /* Always ask for fixed clock rate from a property. */
+ device_property_read_u32(dev, "clock-frequency", &uartclk);
+
s->clk = devm_clk_get_optional(dev, "osc");
if (IS_ERR(s->clk))
return PTR_ERR(s->clk);
if (s->clk) {
- fmin = 500000;
- fmax = 35000000;
+ xtal = false;
} else {
s->clk = devm_clk_get_optional(dev, "xtal");
if (IS_ERR(s->clk))
return PTR_ERR(s->clk);
- if (s->clk) {
- fmin = 1000000;
- fmax = 4000000;
- xtal = true;
- } else {
- dev_err(dev, "Cannot get clock\n");
- return -EINVAL;
- }
+
+ xtal = true;
}
ret = clk_prepare_enable(s->clk);
@@ -1292,6 +1289,22 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
return ret;
freq = clk_get_rate(s->clk);
+ if (freq == 0)
+ freq = uartclk;
+ if (freq == 0) {
+ dev_err(dev, "Cannot get clock rate\n");
+ ret = -EINVAL;
+ goto out_clk;
+ }
+
+ if (xtal) {
+ fmin = 1000000;
+ fmax = 4000000;
+ } else {
+ fmin = 500000;
+ fmax = 35000000;
+ }
+
/* Check frequency limits */
if (freq < fmin || freq > fmax) {
ret = -ERANGE;
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 529cd0289056..efee3935917f 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -715,13 +715,15 @@ static int meson_uart_probe(struct platform_device *pdev)
{
struct resource *res_mem, *res_irq;
struct uart_port *port;
+ u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
int ret = 0;
- int id = -1;
if (pdev->dev.of_node)
pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
if (pdev->id < 0) {
+ int id;
+
for (id = AML_UART_PORT_OFFSET; id < AML_UART_PORT_NUM; id++) {
if (!meson_ports[id]) {
pdev->id = id;
@@ -741,6 +743,8 @@ static int meson_uart_probe(struct platform_device *pdev)
if (!res_irq)
return -ENODEV;
+ of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
+
if (meson_ports[pdev->id]) {
dev_err(&pdev->dev, "port %d already allocated\n", pdev->id);
return -EBUSY;
@@ -770,7 +774,7 @@ static int meson_uart_probe(struct platform_device *pdev)
port->type = PORT_MESON;
port->x_char = 0;
port->ops = &meson_uart_ops;
- port->fifosize = 64;
+ port->fifosize = fifosize;
meson_ports[pdev->id] = port;
platform_set_drvdata(pdev, port);
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 47ab280f553b..be640d9863cd 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -21,7 +21,7 @@
#include <linux/console.h>
#include <linux/delay.h> /* for udelay */
#include <linux/device.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/parisc-device.h>
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 51b0ecabf2ec..231de29a6452 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -128,7 +128,6 @@ struct mvebu_uart {
struct uart_port *port;
struct clk *clk;
int irq[UART_IRQ_COUNT];
- unsigned char __iomem *nb;
struct mvebu_uart_driver_data *data;
#if defined(CONFIG_PM)
struct mvebu_uart_pm_regs pm_regs;
@@ -445,12 +444,11 @@ static void mvebu_uart_shutdown(struct uart_port *port)
static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
{
- struct mvebu_uart *mvuart = to_mvuart(port);
unsigned int d_divisor, m_divisor;
u32 brdv, osamp;
- if (IS_ERR(mvuart->clk))
- return -PTR_ERR(mvuart->clk);
+ if (!port->uartclk)
+ return -EOPNOTSUPP;
/*
* The baudrate is derived from the UART clock thanks to two divisors:
@@ -463,7 +461,7 @@ static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
* makes use of D to configure the desired baudrate.
*/
m_divisor = OSAMP_DEFAULT_DIVISOR;
- d_divisor = DIV_ROUND_UP(port->uartclk, baud * m_divisor);
+ d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
brdv = readl(port->membase + UART_BRDV);
brdv &= ~BRDV_BAUD_MASK;
@@ -482,7 +480,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
struct ktermios *old)
{
unsigned long flags;
- unsigned int baud;
+ unsigned int baud, min_baud, max_baud;
spin_lock_irqsave(&port->lock, flags);
@@ -501,16 +499,21 @@ static void mvebu_uart_set_termios(struct uart_port *port,
port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR;
/*
+ * Maximal divisor is 1023 * 16 when using default (x16) scheme.
* Maximum achievable frequency with simple baudrate divisor is 230400.
* Since the error per bit frame would be of more than 15%, achieving
* higher frequencies would require to implement the fractional divisor
* feature.
*/
- baud = uart_get_baud_rate(port, termios, old, 0, 230400);
+ min_baud = DIV_ROUND_UP(port->uartclk, 1023 * 16);
+ max_baud = 230400;
+
+ baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
if (mvebu_uart_baud_rate_set(port, baud)) {
/* No clock available, baudrate cannot be changed */
if (old)
- baud = uart_get_baud_rate(port, old, NULL, 0, 230400);
+ baud = uart_get_baud_rate(port, old, NULL,
+ min_baud, max_baud);
} else {
tty_termios_encode_baud_rate(termios, baud, baud);
uart_update_timeout(port, termios->c_cflag, baud);
@@ -617,7 +620,7 @@ static void mvebu_uart_putc(struct uart_port *port, int c)
static void mvebu_uart_putc_early_write(struct console *con,
const char *s,
- unsigned n)
+ unsigned int n)
{
struct earlycon_device *dev = con->data;
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index f414d6acad69..ac45f3386e97 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -87,7 +87,7 @@
#define AUART_LINECTRL_BAUD_DIVFRAC(v) (((v) & 0x3f) << 8)
#define AUART_LINECTRL_SPS (1 << 7)
#define AUART_LINECTRL_WLEN_MASK 0x00000060
-#define AUART_LINECTRL_WLEN(v) (((v) & 0x3) << 5)
+#define AUART_LINECTRL_WLEN(v) ((((v) - 5) & 0x3) << 5)
#define AUART_LINECTRL_FEN (1 << 4)
#define AUART_LINECTRL_STP2 (1 << 3)
#define AUART_LINECTRL_EPS (1 << 2)
@@ -962,7 +962,7 @@ static void mxs_auart_settermios(struct uart_port *u,
struct ktermios *old)
{
struct mxs_auart_port *s = to_auart_port(u);
- u32 bm, ctrl, ctrl2, div;
+ u32 ctrl, ctrl2, div;
unsigned int cflag, baud, baud_min, baud_max;
cflag = termios->c_cflag;
@@ -970,25 +970,7 @@ static void mxs_auart_settermios(struct uart_port *u,
ctrl = AUART_LINECTRL_FEN;
ctrl2 = mxs_read(s, REG_CTRL2);
- /* byte size */
- switch (cflag & CSIZE) {
- case CS5:
- bm = 0;
- break;
- case CS6:
- bm = 1;
- break;
- case CS7:
- bm = 2;
- break;
- case CS8:
- bm = 3;
- break;
- default:
- return;
- }
-
- ctrl |= AUART_LINECTRL_WLEN(bm);
+ ctrl |= AUART_LINECTRL_WLEN(tty_get_char_size(cflag));
/* parity */
if (cflag & PARENB) {
@@ -1403,7 +1385,7 @@ auart_console_get_options(struct mxs_auart_port *s, int *baud,
*parity = 'o';
}
- if ((lcr_h & AUART_LINECTRL_WLEN_MASK) == AUART_LINECTRL_WLEN(2))
+ if ((lcr_h & AUART_LINECTRL_WLEN_MASK) == AUART_LINECTRL_WLEN(7))
*bits = 7;
else
*bits = 8;
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 84e8158088cd..9e81b09ba08e 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -626,7 +626,7 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
static unsigned int serial_omap_tx_empty(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
- unsigned long flags = 0;
+ unsigned long flags;
unsigned int ret = 0;
pm_runtime_get_sync(up->dev);
@@ -704,7 +704,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void serial_omap_break_ctl(struct uart_port *port, int break_state)
{
struct uart_omap_port *up = to_uart_omap_port(port);
- unsigned long flags = 0;
+ unsigned long flags;
dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
pm_runtime_get_sync(up->dev);
@@ -722,7 +722,7 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
static int serial_omap_startup(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
- unsigned long flags = 0;
+ unsigned long flags;
int retval;
/*
@@ -797,7 +797,7 @@ static int serial_omap_startup(struct uart_port *port)
static void serial_omap_shutdown(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
- unsigned long flags = 0;
+ unsigned long flags;
dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->port.line);
@@ -845,7 +845,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned char cval = 0;
- unsigned long flags = 0;
+ unsigned long flags;
unsigned int baud, quot;
switch (termios->c_cflag & CSIZE) {
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index d6aef8a1f0a4..12ce150b0ad4 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -47,7 +47,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/sections.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#ifdef CONFIG_PPC_PMAC
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 23d729ed3bf6..aedc38893e6c 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1050,21 +1050,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
}
/* bits per char */
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- bits_per_char = 5;
- break;
- case CS6:
- bits_per_char = 6;
- break;
- case CS7:
- bits_per_char = 7;
- break;
- case CS8:
- default:
- bits_per_char = 8;
- break;
- }
+ bits_per_char = tty_get_char_size(termios->c_cflag);
/* stop bits */
if (termios->c_cflag & CSTOPB)
@@ -1338,7 +1324,7 @@ static const struct uart_ops qcom_geni_uart_pops = {
static int qcom_geni_serial_probe(struct platform_device *pdev)
{
int ret = 0;
- int line = -1;
+ int line;
struct qcom_geni_serial_port *port;
struct uart_port *uport;
struct resource *res;
@@ -1354,7 +1340,9 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
line = of_alias_get_id(pdev->dev.of_node, "serial");
} else {
drv = &qcom_geni_uart_driver;
- line = of_alias_get_id(pdev->dev.of_node, "hsuart");
+ line = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (line == -ENODEV) /* compat with non-standard aliases */
+ line = of_alias_get_id(pdev->dev.of_node, "hsuart");
}
port = get_port_from_line(line, console);
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index d9e4b67a12a0..9fbc61151c2e 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -2220,8 +2220,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
default:
dev_warn(&pdev->dev, "unsupported reg-io-width (%d)\n",
prop);
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
}
}
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 22c7bc90b104..738df6d9c0d9 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -34,7 +34,7 @@
#include <linux/types.h>
#include <linux/refcount.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_uart.h>
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 9adb8362578c..acbb615dd28f 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1208,8 +1208,16 @@ static int sc16is7xx_probe(struct device *dev,
/* Always ask for fixed clock rate from a property. */
device_property_read_u32(dev, "clock-frequency", &uartclk);
- s->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(s->clk)) {
+ s->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(s->clk))
+ return PTR_ERR(s->clk);
+
+ ret = clk_prepare_enable(s->clk);
+ if (ret)
+ return ret;
+
+ freq = clk_get_rate(s->clk);
+ if (freq == 0) {
if (uartclk)
freq = uartclk;
if (pfreq)
@@ -1217,13 +1225,7 @@ static int sc16is7xx_probe(struct device *dev,
if (freq)
dev_dbg(dev, "Clock frequency: %luHz\n", freq);
else
- return PTR_ERR(s->clk);
- } else {
- ret = clk_prepare_enable(s->clk);
- if (ret)
- return ret;
-
- freq = clk_get_rate(s->clk);
+ return -EINVAL;
}
s->regmap = regmap;
@@ -1358,8 +1360,7 @@ out_thread:
kthread_stop(s->kworker_task);
out_clk:
- if (!IS_ERR(s->clk))
- clk_disable_unprepare(s->clk);
+ clk_disable_unprepare(s->clk);
return ret;
}
@@ -1383,8 +1384,7 @@ static int sc16is7xx_remove(struct device *dev)
kthread_flush_worker(&s->kworker);
kthread_stop(s->kworker_task);
- if (!IS_ERR(s->clk))
- clk_disable_unprepare(s->clk);
+ clk_disable_unprepare(s->clk);
return 0;
}
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 222032792d6c..eba5b9ecba34 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1045,9 +1045,11 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
if (tup->cdata->fifo_mode_enable_status) {
ret = tegra_uart_wait_fifo_mode_enabled(tup);
- dev_err(tup->uport.dev, "FIFO mode not enabled\n");
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(tup->uport.dev,
+ "Failed to enable FIFO mode: %d\n", ret);
return ret;
+ }
} else {
/*
* For all tegra devices (up to t210), there is a hardware
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 18ff85a83f80..69092deba11f 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -184,8 +184,8 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
int init_hw)
{
struct uart_port *uport = uart_port_check(state);
+ unsigned long flags;
unsigned long page;
- unsigned long flags = 0;
int retval = 0;
if (uport->type == PORT_UNKNOWN)
@@ -275,7 +275,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
{
struct uart_port *uport = uart_port_check(state);
struct tty_port *port = &state->port;
- unsigned long flags = 0;
+ unsigned long flags;
char *xmit_buf = NULL;
/*
@@ -334,39 +334,15 @@ void
uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud)
{
- unsigned int bits;
+ unsigned int size;
- /* byte size and parity */
- switch (cflag & CSIZE) {
- case CS5:
- bits = 7;
- break;
- case CS6:
- bits = 8;
- break;
- case CS7:
- bits = 9;
- break;
- default:
- bits = 10;
- break; /* CS8 */
- }
-
- if (cflag & CSTOPB)
- bits++;
- if (cflag & PARENB)
- bits++;
-
- /*
- * The total number of bits to be transmitted in the fifo.
- */
- bits = bits * port->fifosize;
+ size = tty_get_frame_size(cflag) * port->fifosize;
/*
* Figure the timeout to send the above number of bits.
* Add .02 seconds of slop
*/
- port->timeout = (HZ * bits) / baud + HZ/50;
+ port->timeout = (HZ * size) / baud + HZ/50;
}
EXPORT_SYMBOL(uart_update_timeout);
@@ -616,12 +592,12 @@ static int uart_write(struct tty_struct *tty,
return ret;
}
-static int uart_write_room(struct tty_struct *tty)
+static unsigned int uart_write_room(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
unsigned long flags;
- int ret;
+ unsigned int ret;
port = uart_port_lock(state, flags);
ret = uart_circ_chars_free(&state->xmit);
@@ -629,12 +605,12 @@ static int uart_write_room(struct tty_struct *tty)
return ret;
}
-static int uart_chars_in_buffer(struct tty_struct *tty)
+static unsigned int uart_chars_in_buffer(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
unsigned long flags;
- int ret;
+ unsigned int ret;
port = uart_port_lock(state, flags);
ret = uart_circ_chars_pending(&state->xmit);
@@ -3029,26 +3005,28 @@ out:
/*
* Are the two ports equivalent?
*/
-int uart_match_port(struct uart_port *port1, struct uart_port *port2)
+bool uart_match_port(const struct uart_port *port1,
+ const struct uart_port *port2)
{
if (port1->iotype != port2->iotype)
- return 0;
+ return false;
switch (port1->iotype) {
case UPIO_PORT:
- return (port1->iobase == port2->iobase);
+ return port1->iobase == port2->iobase;
case UPIO_HUB6:
- return (port1->iobase == port2->iobase) &&
- (port1->hub6 == port2->hub6);
+ return port1->iobase == port2->iobase &&
+ port1->hub6 == port2->hub6;
case UPIO_MEM:
case UPIO_MEM16:
case UPIO_MEM32:
case UPIO_MEM32BE:
case UPIO_AU:
case UPIO_TSI:
- return (port1->mapbase == port2->mapbase);
+ return port1->mapbase == port2->mapbase;
}
- return 0;
+
+ return false;
}
EXPORT_SYMBOL(uart_match_port);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 0a7e5b74bc1d..aaca4fe38486 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -24,7 +24,7 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <asm/io.h>
+#include <linux/io.h>
static char *serial_version = "1.11";
static char *serial_name = "TX39/49 Serial driver";
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4baf1316ea72..07eb56294371 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -289,7 +289,7 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
},
/*
- * The "SCIFA" that is in RZ/T and RZ/A2.
+ * The "SCIFA" that is in RZ/A2, RZ/G2L and RZ/T.
* It looks like a normal SCIF with FIFO data, but with a
* compressed address space. Also, the break out of interrupts
* are different: ERI/BRI, RXI, TXI, TEI, DRI.
@@ -306,6 +306,7 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
[SCFDR] = { 0x0E, 16 },
[SCSPTR] = { 0x10, 16 },
[SCLSR] = { 0x12, 16 },
+ [SEMR] = { 0x14, 8 },
},
.fifosize = 16,
.overrun_reg = SCLSR,
@@ -610,6 +611,14 @@ static void sci_stop_tx(struct uart_port *port)
ctrl &= ~SCSCR_TIE;
serial_port_out(port, SCSCR, ctrl);
+
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ if (to_sci_port(port)->chan_tx &&
+ !dma_submit_error(to_sci_port(port)->cookie_tx)) {
+ dmaengine_terminate_async(to_sci_port(port)->chan_tx);
+ to_sci_port(port)->cookie_tx = -EINVAL;
+ }
+#endif
}
static void sci_start_rx(struct uart_port *port)
@@ -840,9 +849,6 @@ static void sci_transmit_chars(struct uart_port *port)
}
-/* On SH3, SCIF may read end-of-break as a space->mark char */
-#define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
-
static void sci_receive_chars(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
@@ -2494,25 +2500,10 @@ done:
uart_update_timeout(port, termios->c_cflag, baud);
/* byte size and parity */
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- bits = 7;
- break;
- case CS6:
- bits = 8;
- break;
- case CS7:
- bits = 9;
- break;
- default:
- bits = 10;
- break;
- }
+ bits = tty_get_frame_size(termios->c_cflag);
- if (termios->c_cflag & CSTOPB)
- bits++;
- if (termios->c_cflag & PARENB)
- bits++;
+ if (sci_getreg(port, SEMR)->size)
+ serial_port_out(port, SEMR, 0);
if (best_clk >= 0) {
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
@@ -3170,6 +3161,10 @@ static const struct of_device_id of_sci_match[] = {
.compatible = "renesas,scif-r7s9210",
.data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE),
},
+ {
+ .compatible = "renesas,scif-r9a07g044",
+ .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE),
+ },
/* Family-specific types */
{
.compatible = "renesas,rcar-gen1-scif",
@@ -3452,6 +3447,7 @@ static int __init rzscifa_early_console_setup(struct earlycon_device *device,
port_cfg.regtype = SCIx_RZ_SCIFA_REGTYPE;
return early_console_setup(device, PORT_SCIF);
}
+
static int __init scifa_early_console_setup(struct earlycon_device *device,
const char *opt)
{
@@ -3471,6 +3467,7 @@ static int __init hscif_early_console_setup(struct earlycon_device *device,
OF_EARLYCON_DECLARE(sci, "renesas,sci", sci_early_console_setup);
OF_EARLYCON_DECLARE(scif, "renesas,scif", scif_early_console_setup);
OF_EARLYCON_DECLARE(scif, "renesas,scif-r7s9210", rzscifa_early_console_setup);
+OF_EARLYCON_DECLARE(scif, "renesas,scif-r9a07g044", rzscifa_early_console_setup);
OF_EARLYCON_DECLARE(scifa, "renesas,scifa", scifa_early_console_setup);
OF_EARLYCON_DECLARE(scifb, "renesas,scifb", scifb_early_console_setup);
OF_EARLYCON_DECLARE(hscif, "renesas,hscif", hscif_early_console_setup);
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index c0dfe4382898..c0ae78632dda 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -31,6 +31,7 @@ enum {
SCCKS, /* BRG Clock Select Register */
HSRTRGR, /* Rx FIFO Data Count Trigger Register */
HSTTRGR, /* Tx FIFO Data Count Trigger Register */
+ SEMR, /* Serial extended mode register */
SCIx_NR_REGS,
};
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index e7048515a79c..87e480cc8206 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -65,7 +65,7 @@ static struct uart_driver asc_uart_driver;
/* ASC_RXBUF */
#define ASC_RXBUF_PE 0x100
#define ASC_RXBUF_FE 0x200
-/**
+/*
* Some of status comes from higher bits of the character and some come from
* the status register. Combining both of them in to single status using dummy
* bits.
@@ -478,7 +478,7 @@ static void asc_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct asc_port *ascport = to_asc_port(port);
- unsigned long flags = 0;
+ unsigned long flags;
u32 ctl;
switch (state) {
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index c2ae7b392b86..ef793b3b4591 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -718,36 +718,6 @@ static void stm32_usart_shutdown(struct uart_port *port)
free_irq(port->irq, port);
}
-static unsigned int stm32_usart_get_databits(struct ktermios *termios)
-{
- unsigned int bits;
-
- tcflag_t cflag = termios->c_cflag;
-
- switch (cflag & CSIZE) {
- /*
- * CSIZE settings are not necessarily supported in hardware.
- * CSIZE unsupported configurations are handled here to set word length
- * to 8 bits word as default configuration and to print debug message.
- */
- case CS5:
- bits = 5;
- break;
- case CS6:
- bits = 6;
- break;
- case CS7:
- bits = 7;
- break;
- /* default including CS8 */
- default:
- bits = 8;
- break;
- }
-
- return bits;
-}
-
static void stm32_usart_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
@@ -805,7 +775,7 @@ static void stm32_usart_set_termios(struct uart_port *port,
if (cflag & CSTOPB)
cr2 |= USART_CR2_STOP_2B;
- bits = stm32_usart_get_databits(termios);
+ bits = tty_get_char_size(cflag);
stm32_port->rdr_mask = (BIT(bits) - 1);
if (cflag & PARENB) {
@@ -980,7 +950,7 @@ static void stm32_usart_pm(struct uart_port *port, unsigned int state,
struct stm32_port, port);
const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
const struct stm32_usart_config *cfg = &stm32port->info->cfg;
- unsigned long flags = 0;
+ unsigned long flags;
switch (state) {
case UART_PM_STATE_ON:
@@ -1182,6 +1152,14 @@ static const struct of_device_id stm32_match[] = {
MODULE_DEVICE_TABLE(of, stm32_match);
#endif
+static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port,
+ struct platform_device *pdev)
+{
+ if (stm32port->rx_buf)
+ dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf,
+ stm32port->rx_dma_buf);
+}
+
static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
struct platform_device *pdev)
{
@@ -1199,19 +1177,11 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
if (uart_console(port))
return -ENODEV;
- /* Request DMA RX channel */
- stm32port->rx_ch = dma_request_slave_channel(dev, "rx");
- if (!stm32port->rx_ch) {
- dev_info(dev, "rx dma alloc failed\n");
- return -ENODEV;
- }
stm32port->rx_buf = dma_alloc_coherent(&pdev->dev, RX_BUF_L,
&stm32port->rx_dma_buf,
GFP_KERNEL);
- if (!stm32port->rx_buf) {
- ret = -ENOMEM;
- goto alloc_err;
- }
+ if (!stm32port->rx_buf)
+ return -ENOMEM;
/* Configure DMA channel */
memset(&config, 0, sizeof(config));
@@ -1221,8 +1191,8 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
ret = dmaengine_slave_config(stm32port->rx_ch, &config);
if (ret < 0) {
dev_err(dev, "rx dma channel config failed\n");
- ret = -ENODEV;
- goto config_err;
+ stm32_usart_of_dma_rx_remove(stm32port, pdev);
+ return ret;
}
/* Prepare a DMA cyclic transaction */
@@ -1232,8 +1202,8 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dev, "rx dma prep cyclic failed\n");
- ret = -ENODEV;
- goto config_err;
+ stm32_usart_of_dma_rx_remove(stm32port, pdev);
+ return -ENODEV;
}
/* No callback as dma buffer is drained on usart interrupt */
@@ -1244,24 +1214,22 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
ret = dma_submit_error(dmaengine_submit(desc));
if (ret) {
dmaengine_terminate_sync(stm32port->rx_ch);
- goto config_err;
+ stm32_usart_of_dma_rx_remove(stm32port, pdev);
+ return ret;
}
/* Issue pending DMA requests */
dma_async_issue_pending(stm32port->rx_ch);
return 0;
+}
-config_err:
- dma_free_coherent(&pdev->dev,
- RX_BUF_L, stm32port->rx_buf,
- stm32port->rx_dma_buf);
-
-alloc_err:
- dma_release_channel(stm32port->rx_ch);
- stm32port->rx_ch = NULL;
-
- return ret;
+static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port,
+ struct platform_device *pdev)
+{
+ if (stm32port->tx_buf)
+ dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf,
+ stm32port->tx_dma_buf);
}
static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
@@ -1275,19 +1243,11 @@ static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
stm32port->tx_dma_busy = false;
- /* Request DMA TX channel */
- stm32port->tx_ch = dma_request_slave_channel(dev, "tx");
- if (!stm32port->tx_ch) {
- dev_info(dev, "tx dma alloc failed\n");
- return -ENODEV;
- }
stm32port->tx_buf = dma_alloc_coherent(&pdev->dev, TX_BUF_L,
&stm32port->tx_dma_buf,
GFP_KERNEL);
- if (!stm32port->tx_buf) {
- ret = -ENOMEM;
- goto alloc_err;
- }
+ if (!stm32port->tx_buf)
+ return -ENOMEM;
/* Configure DMA channel */
memset(&config, 0, sizeof(config));
@@ -1297,22 +1257,11 @@ static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
ret = dmaengine_slave_config(stm32port->tx_ch, &config);
if (ret < 0) {
dev_err(dev, "tx dma channel config failed\n");
- ret = -ENODEV;
- goto config_err;
+ stm32_usart_of_dma_tx_remove(stm32port, pdev);
+ return ret;
}
return 0;
-
-config_err:
- dma_free_coherent(&pdev->dev,
- TX_BUF_L, stm32port->tx_buf,
- stm32port->tx_dma_buf);
-
-alloc_err:
- dma_release_channel(stm32port->tx_ch);
- stm32port->tx_ch = NULL;
-
- return ret;
}
static int stm32_usart_serial_probe(struct platform_device *pdev)
@@ -1336,16 +1285,43 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
if (ret)
- goto err_nowup;
+ goto err_deinit_port;
}
- ret = stm32_usart_of_dma_rx_probe(stm32port, pdev);
- if (ret)
- dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n");
+ stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
+ if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_wakeirq;
+ }
+ /* Fall back in interrupt mode for any non-deferral error */
+ if (IS_ERR(stm32port->rx_ch))
+ stm32port->rx_ch = NULL;
+
+ stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx");
+ if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_dma_rx;
+ }
+ /* Fall back in interrupt mode for any non-deferral error */
+ if (IS_ERR(stm32port->tx_ch))
+ stm32port->tx_ch = NULL;
- ret = stm32_usart_of_dma_tx_probe(stm32port, pdev);
- if (ret)
- dev_info(&pdev->dev, "interrupt mode used for tx (no dma)\n");
+ if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
+ /* Fall back in interrupt mode */
+ dma_release_channel(stm32port->rx_ch);
+ stm32port->rx_ch = NULL;
+ }
+
+ if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) {
+ /* Fall back in interrupt mode */
+ dma_release_channel(stm32port->tx_ch);
+ stm32port->tx_ch = NULL;
+ }
+
+ if (!stm32port->rx_ch)
+ dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n");
+ if (!stm32port->tx_ch)
+ dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n");
platform_set_drvdata(pdev, &stm32port->port);
@@ -1366,30 +1342,23 @@ err_port:
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- if (stm32port->rx_ch) {
- dmaengine_terminate_async(stm32port->rx_ch);
- dma_release_channel(stm32port->rx_ch);
- }
-
- if (stm32port->rx_dma_buf)
- dma_free_coherent(&pdev->dev,
- RX_BUF_L, stm32port->rx_buf,
- stm32port->rx_dma_buf);
-
if (stm32port->tx_ch) {
- dmaengine_terminate_async(stm32port->tx_ch);
+ stm32_usart_of_dma_tx_remove(stm32port, pdev);
dma_release_channel(stm32port->tx_ch);
}
- if (stm32port->tx_dma_buf)
- dma_free_coherent(&pdev->dev,
- TX_BUF_L, stm32port->tx_buf,
- stm32port->tx_dma_buf);
+ if (stm32port->rx_ch)
+ stm32_usart_of_dma_rx_remove(stm32port, pdev);
+
+err_dma_rx:
+ if (stm32port->rx_ch)
+ dma_release_channel(stm32port->rx_ch);
+err_wakeirq:
if (stm32port->wakeup_src)
dev_pm_clear_wake_irq(&pdev->dev);
-err_nowup:
+err_deinit_port:
if (stm32port->wakeup_src)
device_set_wakeup_capable(&pdev->dev, false);
@@ -1416,28 +1385,20 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+ if (stm32_port->tx_ch) {
+ dmaengine_terminate_async(stm32_port->tx_ch);
+ stm32_usart_of_dma_tx_remove(stm32_port, pdev);
+ dma_release_channel(stm32_port->tx_ch);
+ }
+
if (stm32_port->rx_ch) {
dmaengine_terminate_async(stm32_port->rx_ch);
+ stm32_usart_of_dma_rx_remove(stm32_port, pdev);
dma_release_channel(stm32_port->rx_ch);
}
- if (stm32_port->rx_dma_buf)
- dma_free_coherent(&pdev->dev,
- RX_BUF_L, stm32_port->rx_buf,
- stm32_port->rx_dma_buf);
-
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
- if (stm32_port->tx_ch) {
- dmaengine_terminate_async(stm32_port->tx_ch);
- dma_release_channel(stm32_port->tx_ch);
- }
-
- if (stm32_port->tx_dma_buf)
- dma_free_coherent(&pdev->dev,
- TX_BUF_L, stm32_port->tx_buf,
- stm32_port->tx_dma_buf);
-
if (stm32_port->wakeup_src) {
dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index bab551f46963..92e572634009 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -35,7 +35,7 @@
#include <linux/init.h>
#include <linux/of_device.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/setup.h>
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 12c2468f2b0e..425a016f9db7 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -39,7 +39,7 @@
#include <linux/delay.h>
#include <linux/of_device.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/setup.h>
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 001e19d7c17d..1a54e3e52ed6 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -35,7 +35,7 @@
#include <linux/init.h>
#include <linux/of_device.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/setup.h>
diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
index 52687c65ad74..4877c54c613d 100644
--- a/drivers/tty/serial/tegra-tcu.c
+++ b/drivers/tty/serial/tegra-tcu.c
@@ -195,13 +195,6 @@ static int tegra_tcu_probe(struct platform_device *pdev)
return err;
}
- tcu->rx = mbox_request_channel_byname(&tcu->rx_client, "rx");
- if (IS_ERR(tcu->rx)) {
- err = PTR_ERR(tcu->rx);
- dev_err(&pdev->dev, "failed to get rx mailbox: %d\n", err);
- goto free_tx;
- }
-
#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
/* setup the console */
strcpy(tcu->console.name, "ttyTCU");
@@ -226,7 +219,7 @@ static int tegra_tcu_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "failed to register UART driver: %d\n",
err);
- goto free_rx;
+ goto free_tx;
}
/* setup the port */
@@ -246,6 +239,17 @@ static int tegra_tcu_probe(struct platform_device *pdev)
goto unregister_uart;
}
+ /*
+ * Request RX channel after creating port to ensure tcu->port
+ * is ready for any immediate incoming bytes.
+ */
+ tcu->rx = mbox_request_channel_byname(&tcu->rx_client, "rx");
+ if (IS_ERR(tcu->rx)) {
+ err = PTR_ERR(tcu->rx);
+ dev_err(&pdev->dev, "failed to get rx mailbox: %d\n", err);
+ goto remove_uart_port;
+ }
+
platform_set_drvdata(pdev, tcu);
#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
register_console(&tcu->console);
@@ -253,10 +257,10 @@ static int tegra_tcu_probe(struct platform_device *pdev)
return 0;
+remove_uart_port:
+ uart_remove_one_port(&tcu->driver, &tcu->port);
unregister_uart:
uart_unregister_driver(&tcu->driver);
-free_rx:
- mbox_free_channel(tcu->rx);
free_tx:
mbox_free_channel(tcu->tx);
@@ -270,9 +274,9 @@ static int tegra_tcu_remove(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
unregister_console(&tcu->console);
#endif
+ mbox_free_channel(tcu->rx);
uart_remove_one_port(&tcu->driver, &tcu->port);
uart_unregister_driver(&tcu->driver);
- mbox_free_channel(tcu->rx);
mbox_free_channel(tcu->tx);
return 0;
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index f42ccc40ffa6..a5f15f22d9ef 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -505,21 +505,23 @@ static void ulite_console_write(struct console *co, const char *s,
static int ulite_console_setup(struct console *co, char *options)
{
- struct uart_port *port;
+ struct uart_port *port = NULL;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
-
- port = console_port;
+ if (co->index >= 0 && co->index < ULITE_NR_UARTS)
+ port = ulite_ports + co->index;
/* Has the device been initialized yet? */
- if (!port->mapbase) {
+ if (!port || !port->mapbase) {
pr_debug("console on ttyUL%i not present\n", co->index);
return -ENODEV;
}
+ console_port = port;
+
/* not initialized yet? */
if (!port->membase) {
if (ulite_request_port(port))
@@ -655,17 +657,6 @@ static int ulite_assign(struct device *dev, int id, u32 base, int irq,
dev_set_drvdata(dev, port);
-#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
- /*
- * If console hasn't been found yet try to assign this port
- * because it is required to be assigned for console setup function.
- * If register_console() don't assign value, then console_port pointer
- * is cleanup.
- */
- if (ulite_uart_driver.cons->index == -1)
- console_port = port;
-#endif
-
/* Register the port */
rc = uart_add_one_port(&ulite_uart_driver, port);
if (rc) {
@@ -675,12 +666,6 @@ static int ulite_assign(struct device *dev, int id, u32 base, int irq,
return rc;
}
-#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
- /* This is not port which is used for console that's why clean it up */
- if (ulite_uart_driver.cons->index == -1)
- console_port = NULL;
-#endif
-
return 0;
}
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index f81261cb52b8..6000853973c1 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1227,7 +1227,7 @@ static int soft_uart_init(struct platform_device *ofdev)
* kernel, then we use it.
*/
ret = request_firmware_nowait(THIS_MODULE,
- FW_ACTION_HOTPLUG, filename, &ofdev->dev,
+ FW_ACTION_UEVENT, filename, &ofdev->dev,
GFP_KERNEL, &ofdev->dev, uart_firmware_cont);
if (ret) {
dev_err(&ofdev->dev,
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index eeb4b6568776..647198b1e2b9 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -20,7 +20,7 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/vr41xx/siu.h>
#include <asm/vr41xx/vr41xx.h>
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 67a2db621e2b..962e522ccc45 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -484,7 +484,7 @@ static unsigned int cdns_uart_set_baud_rate(struct uart_port *port,
#ifdef CONFIG_COMMON_CLK
/**
- * cdns_uart_clk_notitifer_cb - Clock notifier callback
+ * cdns_uart_clk_notifier_cb - Clock notifier callback
* @nb: Notifier block
* @event: Notify event
* @data: Notifier data
@@ -497,8 +497,8 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
struct uart_port *port;
int locked = 0;
struct clk_notifier_data *ndata = data;
- unsigned long flags = 0;
struct cdns_uart *cdns_uart = to_cdns_uart(nb);
+ unsigned long flags;
port = cdns_uart->port;
if (port->suspended)
@@ -1149,7 +1149,7 @@ static void cdns_uart_console_putchar(struct uart_port *port, int ch)
}
static void cdns_early_write(struct console *con, const char *s,
- unsigned n)
+ unsigned int n)
{
struct earlycon_device *dev = con->data;
@@ -1210,7 +1210,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = console_port;
- unsigned long flags = 0;
+ unsigned long flags;
unsigned int imr, ctrl;
int locked = 1;
@@ -1308,7 +1308,7 @@ static int cdns_uart_suspend(struct device *device)
may_wake = device_may_wakeup(device);
if (console_suspend_enabled && uart_console(port) && may_wake) {
- unsigned long flags = 0;
+ unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Empty the receive FIFO 1st before making changes */
@@ -1339,7 +1339,7 @@ static int cdns_uart_resume(struct device *device)
{
struct uart_port *port = dev_get_drvdata(device);
struct cdns_uart *cdns_uart = port->private_data;
- unsigned long flags = 0;
+ unsigned long flags;
u32 ctrl_reg;
int may_wake;
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 5523cf7bd1c2..5bb928b7873e 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -768,7 +768,7 @@ static int write(struct tty_struct *tty,
if (!info->tx_buf || (count > info->max_frame_size))
return -EIO;
- if (!count || tty->stopped || tty->hw_stopped)
+ if (!count || tty->flow.stopped || tty->hw_stopped)
return 0;
spin_lock_irqsave(&info->lock, flags);
@@ -868,15 +868,15 @@ exit:
DBGINFO(("%s wait_until_sent exit\n", info->device_name));
}
-static int write_room(struct tty_struct *tty)
+static unsigned int write_room(struct tty_struct *tty)
{
struct slgt_info *info = tty->driver_data;
- int ret;
+ unsigned int ret;
if (sanity_check(info, tty->name, "write_room"))
return 0;
ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
- DBGINFO(("%s write_room=%d\n", info->device_name, ret));
+ DBGINFO(("%s write_room=%u\n", info->device_name, ret));
return ret;
}
@@ -889,7 +889,7 @@ static void flush_chars(struct tty_struct *tty)
return;
DBGINFO(("%s flush_chars entry tx_count=%d\n", info->device_name, info->tx_count));
- if (info->tx_count <= 0 || tty->stopped ||
+ if (info->tx_count <= 0 || tty->flow.stopped ||
tty->hw_stopped || !info->tx_buf)
return;
@@ -1254,14 +1254,14 @@ static int synclink_gt_proc_show(struct seq_file *m, void *v)
/*
* return count of bytes in transmit buffer
*/
-static int chars_in_buffer(struct tty_struct *tty)
+static unsigned int chars_in_buffer(struct tty_struct *tty)
{
struct slgt_info *info = tty->driver_data;
- int count;
+ unsigned int count;
if (sanity_check(info, tty->name, "chars_in_buffer"))
return 0;
count = tbuf_bytes(info);
- DBGINFO(("%s chars_in_buffer()=%d\n", info->device_name, count));
+ DBGINFO(("%s chars_in_buffer()=%u\n", info->device_name, count));
return count;
}
@@ -2241,7 +2241,7 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
else
#endif
{
- if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) {
+ if (info->port.tty && (info->port.tty->flow.stopped || info->port.tty->hw_stopped)) {
tx_stop(info);
return;
}
@@ -2465,14 +2465,7 @@ static void change_params(struct slgt_info *info)
/* byte size and parity */
- switch (cflag & CSIZE) {
- case CS5: info->params.data_bits = 5; break;
- case CS6: info->params.data_bits = 6; break;
- case CS7: info->params.data_bits = 7; break;
- case CS8: info->params.data_bits = 8; break;
- default: info->params.data_bits = 7; break;
- }
-
+ info->params.data_bits = tty_get_char_size(cflag);
info->params.stop_bits = (cflag & CSTOPB) ? 2 : 1;
if (cflag & PARENB)
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index 48b5de659c77..426b1252781a 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -147,7 +147,7 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
int iclose = ibaud/50, oclose = obaud/50;
int ibinput = 0;
- if (obaud == 0) /* CD dropped */
+ if (obaud == 0) /* CD dropped */
ibaud = 0; /* Clear ibaud to be sure */
termios->c_ispeed = ibaud;
@@ -159,8 +159,9 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
#endif
#ifdef BOTHER
/* If the user asked for a precise weird speed give a precise weird
- answer. If they asked for a Bfoo speed they may have problems
- digesting non-exact replies so fuzz a bit */
+ * answer. If they asked for a Bfoo speed they may have problems
+ * digesting non-exact replies so fuzz a bit.
+ */
if ((termios->c_cflag & CBAUD) == BOTHER) {
oclose = 0;
@@ -191,7 +192,8 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
if (ibaud - iclose <= baud_table[i] &&
ibaud + iclose >= baud_table[i]) {
/* For the case input == output don't set IBAUD bits
- if the user didn't do so */
+ * if the user didn't do so.
+ */
if (ofound == i && !ibinput)
ifound = i;
#ifdef IBSHIFT
@@ -211,7 +213,8 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
if (ofound == -1)
termios->c_cflag |= BOTHER;
/* Set exact input bits only if the input and output differ or the
- user already did */
+ * user already did.
+ */
if (ifound == -1 && (ibaud != obaud || ibinput))
termios->c_cflag |= (BOTHER << IBSHIFT);
#else
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 9733469a14b2..635d0af229b7 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -32,8 +32,8 @@
* We default to dicing tty buffer allocations to this many characters
* in order to avoid multiple page allocations. We know the size of
* tty_buffer itself but it must also be taken into account that the
- * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
- * logic this must match
+ * buffer is 256 byte aligned. See tty_buffer_find for the allocation
+ * logic this must match.
*/
#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
@@ -88,9 +88,10 @@ EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
* pre-allocate if memory guarantee is required).
*/
-int tty_buffer_space_avail(struct tty_port *port)
+unsigned int tty_buffer_space_avail(struct tty_port *port)
{
int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used);
+
return max(space, 0);
}
EXPORT_SYMBOL_GPL(tty_buffer_space_avail);
@@ -169,7 +170,8 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
}
/* Should possibly check if this fails for the largest buffer we
- have queued and recycle that ? */
+ * have queued and recycle that ?
+ */
if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
return NULL;
p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
@@ -242,7 +244,7 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
}
/**
- * tty_buffer_request_room - grow tty buffer if needed
+ * __tty_buffer_request_room - grow tty buffer if needed
* @port: tty port
* @size: size desired
* @flags: buffer flags if new buffer allocated (default = 0)
@@ -312,11 +314,13 @@ int tty_insert_flip_string_fixed_flag(struct tty_port *port,
const unsigned char *chars, char flag, size_t size)
{
int copied = 0;
+
do {
int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
int space = __tty_buffer_request_room(port, goal, flags);
struct tty_buffer *tb = port->buf.tail;
+
if (unlikely(space == 0))
break;
memcpy(char_buf_ptr(tb, tb->used), chars, space);
@@ -326,7 +330,8 @@ int tty_insert_flip_string_fixed_flag(struct tty_port *port,
copied += space;
chars += space;
/* There is a small chance that we need to split the data over
- several buffers. If this is the case we must loop */
+ * several buffers. If this is the case we must loop.
+ */
} while (unlikely(size > copied));
return copied;
}
@@ -348,10 +353,12 @@ int tty_insert_flip_string_flags(struct tty_port *port,
const unsigned char *chars, const char *flags, size_t size)
{
int copied = 0;
+
do {
int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
int space = tty_buffer_request_room(port, goal);
struct tty_buffer *tb = port->buf.tail;
+
if (unlikely(space == 0))
break;
memcpy(char_buf_ptr(tb, tb->used), chars, space);
@@ -361,7 +368,8 @@ int tty_insert_flip_string_flags(struct tty_port *port,
chars += space;
flags += space;
/* There is a small chance that we need to split the data over
- several buffers. If this is the case we must loop */
+ * several buffers. If this is the case we must loop.
+ */
} while (unlikely(size > copied));
return copied;
}
@@ -431,8 +439,10 @@ int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
size_t size)
{
int space = __tty_buffer_request_room(port, size, TTYB_NORMAL);
+
if (likely(space)) {
struct tty_buffer *tb = port->buf.tail;
+
*chars = char_buf_ptr(tb, tb->used);
if (~tb->flags & TTYB_NORMAL)
memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space);
@@ -455,7 +465,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
* Returns the number of bytes processed
*/
int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
- char *f, int count)
+ const char *f, int count)
{
if (ld->ops->receive_buf2)
count = ld->ops->receive_buf2(ld->tty, p, f, count);
@@ -472,7 +482,7 @@ static int
receive_buf(struct tty_port *port, struct tty_buffer *head, int count)
{
unsigned char *p = char_buf_ptr(head, head->read);
- char *f = NULL;
+ const char *f = NULL;
int n;
if (~head->flags & TTYB_NORMAL)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 5b5e99604989..26debec26b4e 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -33,7 +33,7 @@
* -- Nick Holloway <alfie@dcs.warwick.ac.uk>, 27th May 1993.
*
* Rewrote canonical mode and added more termios flags.
- * -- julian@uhunix.uhcc.hawaii.edu (J. Cowley), 13Jan94
+ * -- julian@uhunix.uhcc.hawaii.edu (J. Cowley), 13Jan94
*
* Reorganized FASYNC support so mouse code can share it.
* -- ctm@ardi.com, 9Sep95
@@ -131,12 +131,12 @@ struct ktermios tty_std_termios = { /* for the benefit of tty drivers */
.c_ospeed = 38400,
/* .c_line = N_TTY, */
};
-
EXPORT_SYMBOL(tty_std_termios);
/* This list gets poked at by procfs and various bits of boot up code. This
- could do with some rationalisation such as pulling the tty proc function
- into this file */
+ * could do with some rationalisation such as pulling the tty proc function
+ * into this file.
+ */
LIST_HEAD(tty_drivers); /* linked list of tty drivers */
@@ -248,7 +248,6 @@ const char *tty_name(const struct tty_struct *tty)
return "NULL tty";
return tty->name;
}
-
EXPORT_SYMBOL(tty_name);
const char *tty_driver_name(const struct tty_struct *tty)
@@ -320,6 +319,7 @@ static struct tty_driver *get_tty_driver(dev_t device, int *index)
list_for_each_entry(p, &tty_drivers, tty_drivers) {
dev_t base = MKDEV(p->major, p->minor_start);
+
if (device < base || device >= base + p->num)
continue;
*index = device - base;
@@ -537,7 +537,6 @@ void tty_wakeup(struct tty_struct *tty)
}
wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
}
-
EXPORT_SYMBOL_GPL(tty_wakeup);
/**
@@ -613,8 +612,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
set_bit(TTY_HUPPING, &tty->flags);
/* inuse_filps is protected by the single tty lock,
- this really needs to change if we want to flush the
- workqueue with the lock held */
+ * this really needs to change if we want to flush the
+ * workqueue with the lock held.
+ */
check_tty_count(tty, "tty_hangup");
spin_lock(&tty->files_lock);
@@ -638,15 +638,15 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
tty_ldisc_hangup(tty, cons_filp != NULL);
- spin_lock_irq(&tty->ctrl_lock);
+ spin_lock_irq(&tty->ctrl.lock);
clear_bit(TTY_THROTTLED, &tty->flags);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- put_pid(tty->session);
- put_pid(tty->pgrp);
- tty->session = NULL;
- tty->pgrp = NULL;
- tty->ctrl_status = 0;
- spin_unlock_irq(&tty->ctrl_lock);
+ put_pid(tty->ctrl.session);
+ put_pid(tty->ctrl.pgrp);
+ tty->ctrl.session = NULL;
+ tty->ctrl.pgrp = NULL;
+ tty->ctrl.pktstatus = 0;
+ spin_unlock_irq(&tty->ctrl.lock);
/*
* If one of the devices matches a console pointer, we
@@ -694,7 +694,6 @@ void tty_hangup(struct tty_struct *tty)
tty_debug_hangup(tty, "hangup\n");
schedule_work(&tty->hangup_work);
}
-
EXPORT_SYMBOL(tty_hangup);
/**
@@ -711,7 +710,6 @@ void tty_vhangup(struct tty_struct *tty)
tty_debug_hangup(tty, "vhangup\n");
__tty_hangup(tty, 0);
}
-
EXPORT_SYMBOL(tty_vhangup);
@@ -761,9 +759,17 @@ int tty_hung_up_p(struct file *filp)
{
return (filp && filp->f_op == &hung_up_tty_fops);
}
-
EXPORT_SYMBOL(tty_hung_up_p);
+void __stop_tty(struct tty_struct *tty)
+{
+ if (tty->flow.stopped)
+ return;
+ tty->flow.stopped = true;
+ if (tty->ops->stop)
+ tty->ops->stop(tty);
+}
+
/**
* stop_tty - propagate flow control
* @tty: tty to stop
@@ -778,28 +784,28 @@ EXPORT_SYMBOL(tty_hung_up_p);
* but not always.
*
* Locking:
- * flow_lock
+ * flow.lock
*/
-
-void __stop_tty(struct tty_struct *tty)
-{
- if (tty->stopped)
- return;
- tty->stopped = 1;
- if (tty->ops->stop)
- tty->ops->stop(tty);
-}
-
void stop_tty(struct tty_struct *tty)
{
unsigned long flags;
- spin_lock_irqsave(&tty->flow_lock, flags);
+ spin_lock_irqsave(&tty->flow.lock, flags);
__stop_tty(tty);
- spin_unlock_irqrestore(&tty->flow_lock, flags);
+ spin_unlock_irqrestore(&tty->flow.lock, flags);
}
EXPORT_SYMBOL(stop_tty);
+void __start_tty(struct tty_struct *tty)
+{
+ if (!tty->flow.stopped || tty->flow.tco_stopped)
+ return;
+ tty->flow.stopped = false;
+ if (tty->ops->start)
+ tty->ops->start(tty);
+ tty_wakeup(tty);
+}
+
/**
* start_tty - propagate flow control
* @tty: tty to start
@@ -809,26 +815,15 @@ EXPORT_SYMBOL(stop_tty);
* start method is invoked and the line discipline woken.
*
* Locking:
- * flow_lock
+ * flow.lock
*/
-
-void __start_tty(struct tty_struct *tty)
-{
- if (!tty->stopped || tty->flow_stopped)
- return;
- tty->stopped = 0;
- if (tty->ops->start)
- tty->ops->start(tty);
- tty_wakeup(tty);
-}
-
void start_tty(struct tty_struct *tty)
{
unsigned long flags;
- spin_lock_irqsave(&tty->flow_lock, flags);
+ spin_lock_irqsave(&tty->flow.lock, flags);
__start_tty(tty);
- spin_unlock_irqrestore(&tty->flow_lock, flags);
+ spin_unlock_irqrestore(&tty->flow.lock, flags);
}
EXPORT_SYMBOL(start_tty);
@@ -914,10 +909,8 @@ static int iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
/**
* tty_read - read method for tty device files
- * @file: pointer to tty file
- * @buf: user buffer
- * @count: size of user buffer
- * @ppos: unused
+ * @iocb: kernel I/O control block
+ * @to: destination for the data read
*
* Perform the read system call function on this terminal device. Checks
* for hung up devices before calling the line discipline method.
@@ -941,7 +934,8 @@ static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
return -EIO;
/* We want to wait for the line discipline to sort out in this
- situation */
+ * situation.
+ */
ld = tty_ldisc_ref_wait(tty);
if (!ld)
return hung_up_tty_read(iocb, to);
@@ -1033,6 +1027,7 @@ static inline ssize_t do_tty_write(
/* Do the write .. */
for (;;) {
size_t size = count;
+
if (size > chunk)
size = chunk;
@@ -1091,36 +1086,18 @@ void tty_write_message(struct tty_struct *tty, char *msg)
tty_unlock(tty);
tty_write_unlock(tty);
}
- return;
}
-
-/**
- * tty_write - write method for tty device file
- * @file: tty file pointer
- * @buf: user data to write
- * @count: bytes to write
- * @ppos: unused
- *
- * Write data to a tty device via the line discipline.
- *
- * Locking:
- * Locks the line discipline as required
- * Writes to the tty driver are serialized by the atomic_write_lock
- * and are then processed in chunks to the device. The line discipline
- * write method will not be invoked in parallel for each device.
- */
-
static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
{
struct tty_struct *tty = file_tty(file);
- struct tty_ldisc *ld;
+ struct tty_ldisc *ld;
ssize_t ret;
if (tty_paranoia_check(tty, file_inode(file), "tty_write"))
return -EIO;
if (!tty || !tty->ops->write || tty_io_error(tty))
- return -EIO;
+ return -EIO;
/* Short term debug to catch buggy drivers */
if (tty->ops->write_room == NULL)
tty_err(tty, "missing write_room method\n");
@@ -1135,6 +1112,20 @@ static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_
return ret;
}
+/**
+ * tty_write - write method for tty device file
+ * @iocb: kernel I/O control block
+ * @from: iov_iter with data to write
+ *
+ * Write data to a tty device via the line discipline.
+ *
+ * Locking:
+ * Locks the line discipline as required
+ * Writes to the tty driver are serialized by the atomic_write_lock
+ * and are then processed in chunks to the device. The line
+ * discipline write method will not be invoked in parallel for
+ * each device.
+ */
static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
{
return file_tty_write(iocb->ki_filp, iocb, from);
@@ -1150,11 +1141,12 @@ ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
spin_unlock(&redirect_lock);
/*
- * We know the redirected tty is just another tty, we can can
+ * We know the redirected tty is just another tty, we can
* call file_tty_write() directly with that file pointer.
*/
if (p) {
ssize_t res;
+
res = file_tty_write(p, iocb, iter);
fput(p);
return res;
@@ -1172,7 +1164,7 @@ ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
int tty_send_xchar(struct tty_struct *tty, char ch)
{
- int was_stopped = tty->stopped;
+ bool was_stopped = tty->flow.stopped;
if (tty->ops->send_xchar) {
down_read(&tty->termios_rwsem);
@@ -1559,8 +1551,8 @@ static void release_one_tty(struct work_struct *work)
list_del_init(&tty->tty_files);
spin_unlock(&tty->files_lock);
- put_pid(tty->pgrp);
- put_pid(tty->session);
+ put_pid(tty->ctrl.pgrp);
+ put_pid(tty->ctrl.session);
free_tty_struct(tty);
}
@@ -1569,7 +1561,8 @@ static void queue_release_one_tty(struct kref *kref)
struct tty_struct *tty = container_of(kref, struct tty_struct, kref);
/* The hangup queue is now free so we can reuse it rather than
- waste a chunk of memory for each port */
+ * waste a chunk of memory for each port.
+ */
INIT_WORK(&tty->hangup_work, release_one_tty);
schedule_work(&tty->hangup_work);
}
@@ -1861,9 +1854,9 @@ int tty_release(struct inode *inode, struct file *filp)
*/
if (!tty->count) {
read_lock(&tasklist_lock);
- session_clear_tty(tty->session);
+ session_clear_tty(tty->ctrl.session);
if (o_tty)
- session_clear_tty(o_tty->session);
+ session_clear_tty(o_tty->ctrl.session);
read_unlock(&tasklist_lock);
}
@@ -1874,7 +1867,8 @@ int tty_release(struct inode *inode, struct file *filp)
tty_unlock(tty);
/* At this point, the tty->count == 0 should ensure a dead tty
- cannot be re-opened by a racing opener */
+ * cannot be re-opened by a racing opener.
+ */
if (!final)
return 0;
@@ -1928,8 +1922,8 @@ static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
* @index: index for the device in the @return driver
* @return: driver for this inode (with increased refcount)
*
- * If @return is not erroneous, the caller is responsible to decrement the
- * refcount by tty_driver_kref_put.
+ * If @return is not erroneous, the caller is responsible to decrement the
+ * refcount by tty_driver_kref_put.
*
* Locking: tty_mutex protects get_tty_driver
*/
@@ -1942,6 +1936,7 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
#ifdef CONFIG_VT
case MKDEV(TTY_MAJOR, 0): {
extern struct tty_driver *console_driver;
+
driver = tty_driver_kref_get(console_driver);
*index = fg_console;
break;
@@ -1949,6 +1944,7 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
#endif
case MKDEV(TTYAUX_MAJOR, 1): {
struct tty_driver *console_driver = console_device(index);
+
if (console_driver) {
driver = tty_driver_kref_get(console_driver);
if (driver && filp) {
@@ -2250,16 +2246,16 @@ static int __tty_fasync(int fd, struct file *filp, int on)
enum pid_type type;
struct pid *pid;
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- if (tty->pgrp) {
- pid = tty->pgrp;
+ spin_lock_irqsave(&tty->ctrl.lock, flags);
+ if (tty->ctrl.pgrp) {
+ pid = tty->ctrl.pgrp;
type = PIDTYPE_PGID;
} else {
pid = task_pid(current);
type = PIDTYPE_TGID;
}
get_pid(pid);
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ spin_unlock_irqrestore(&tty->ctrl.lock, flags);
__f_setown(filp, pid, type, 0);
put_pid(pid);
retval = 0;
@@ -2336,7 +2332,7 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
err = copy_to_user(arg, &tty->winsize, sizeof(*arg));
mutex_unlock(&tty->winsize_mutex);
- return err ? -EFAULT: 0;
+ return err ? -EFAULT : 0;
}
/**
@@ -2381,13 +2377,14 @@ EXPORT_SYMBOL(tty_do_resize);
*
* Locking:
* Driver dependent. The default do_resize method takes the
- * tty termios mutex and ctrl_lock. The console takes its own lock
+ * tty termios mutex and ctrl.lock. The console takes its own lock
* then calls into the default method.
*/
static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg)
{
struct winsize tmp_ws;
+
if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
return -EFAULT;
@@ -2412,6 +2409,7 @@ static int tioccons(struct file *file)
return -EPERM;
if (file->f_op->write_iter == redirected_tty_write) {
struct file *f;
+
spin_lock(&redirect_lock);
f = redirect;
redirect = NULL;
@@ -2734,6 +2732,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case TIOCGEXCL:
{
int excl = test_bit(TTY_EXCLUSIVE, &tty->flags);
+
return put_user(excl, (int __user *)p);
}
case TIOCGETD:
@@ -2748,6 +2747,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case TIOCGDEV:
{
unsigned int ret = new_encode_dev(tty_devnum(real_tty));
+
return put_user(ret, (unsigned int __user *)p);
}
/*
@@ -3006,7 +3006,7 @@ static int this_tty(const void *t, struct file *file, unsigned fd)
return 0;
return file_tty(file) != t ? 0 : fd + 1;
}
-
+
/*
* This implements the "Secure Attention Key" --- the idea is to
* prevent trojan horses by killing all processes associated with this
@@ -3039,9 +3039,9 @@ void __do_SAK(struct tty_struct *tty)
if (!tty)
return;
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- session = get_pid(tty->session);
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ spin_lock_irqsave(&tty->ctrl.lock, flags);
+ session = get_pid(tty->ctrl.session);
+ spin_unlock_irqrestore(&tty->ctrl.lock, flags);
tty_ldisc_flush(tty);
@@ -3096,13 +3096,13 @@ void do_SAK(struct tty_struct *tty)
return;
schedule_work(&tty->SAK_work);
}
-
EXPORT_SYMBOL(do_SAK);
/* Must put_device() after it's unused! */
static struct device *tty_get_device(struct tty_struct *tty)
{
dev_t devt = tty_devnum(tty);
+
return class_find_device_by_devt(tty_class, devt);
}
@@ -3129,8 +3129,8 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
kfree(tty);
return NULL;
}
- tty->session = NULL;
- tty->pgrp = NULL;
+ tty->ctrl.session = NULL;
+ tty->ctrl.pgrp = NULL;
mutex_init(&tty->legacy_mutex);
mutex_init(&tty->throttle_mutex);
init_rwsem(&tty->termios_rwsem);
@@ -3140,8 +3140,8 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
init_waitqueue_head(&tty->read_wait);
INIT_WORK(&tty->hangup_work, do_tty_hangup);
mutex_init(&tty->atomic_write_lock);
- spin_lock_init(&tty->ctrl_lock);
- spin_lock_init(&tty->flow_lock);
+ spin_lock_init(&tty->ctrl.lock);
+ spin_lock_init(&tty->flow.lock);
spin_lock_init(&tty->files_lock);
INIT_LIST_HEAD(&tty->tty_files);
INIT_WORK(&tty->SAK_work, do_SAK_work);
@@ -3317,11 +3317,11 @@ err_put:
EXPORT_SYMBOL_GPL(tty_register_device_attr);
/**
- * tty_unregister_device - unregister a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
+ * tty_unregister_device - unregister a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
*
- * If a tty device is registered with a call to tty_register_device() then
+ * If a tty device is registered with a call to tty_register_device() then
* this function must be called when the tty device is gone.
*
* Locking: ??
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 41f7449d0464..507a25d692bb 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -54,7 +54,7 @@
* to be no queue on the device.
*/
-int tty_chars_in_buffer(struct tty_struct *tty)
+unsigned int tty_chars_in_buffer(struct tty_struct *tty)
{
if (tty->ops->chars_in_buffer)
return tty->ops->chars_in_buffer(tty);
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(tty_chars_in_buffer);
* returned and data may be lost as there will be no flow control.
*/
-int tty_write_room(struct tty_struct *tty)
+unsigned int tty_write_room(struct tty_struct *tty)
{
if (tty->ops->write_room)
return tty->ops->write_room(tty);
@@ -97,28 +97,6 @@ void tty_driver_flush_buffer(struct tty_struct *tty)
EXPORT_SYMBOL(tty_driver_flush_buffer);
/**
- * tty_throttle - flow control
- * @tty: terminal
- *
- * Indicate that a tty should stop transmitting data down the stack.
- * Takes the termios rwsem to protect against parallel throttle/unthrottle
- * and also to ensure the driver can consistently reference its own
- * termios data at this point when implementing software flow control.
- */
-
-void tty_throttle(struct tty_struct *tty)
-{
- down_write(&tty->termios_rwsem);
- /* check TTY_THROTTLED first so it indicates our state */
- if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) &&
- tty->ops->throttle)
- tty->ops->throttle(tty);
- tty->flow_change = 0;
- up_write(&tty->termios_rwsem);
-}
-EXPORT_SYMBOL(tty_throttle);
-
-/**
* tty_unthrottle - flow control
* @tty: terminal
*
@@ -146,10 +124,11 @@ EXPORT_SYMBOL(tty_unthrottle);
* tty_throttle_safe - flow control
* @tty: terminal
*
- * Similar to tty_throttle() but will only attempt throttle
- * if tty->flow_change is TTY_THROTTLE_SAFE. Prevents an accidental
- * throttle due to race conditions when throttling is conditional
- * on factors evaluated prior to throttling.
+ * Indicate that a tty should stop transmitting data down the stack.
+ * tty_throttle_safe will only attempt throttle if tty->flow_change is
+ * TTY_THROTTLE_SAFE. Prevents an accidental throttle due to race
+ * conditions when throttling is conditional on factors evaluated prior to
+ * throttling.
*
* Returns 0 if tty is throttled (or was already throttled)
*/
@@ -301,6 +280,51 @@ int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b)
EXPORT_SYMBOL(tty_termios_hw_change);
/**
+ * tty_get_char_size - get size of a character
+ * @cflag: termios cflag value
+ *
+ * Get the size (in bits) of a character depending on @cflag's %CSIZE
+ * setting.
+ */
+unsigned char tty_get_char_size(unsigned int cflag)
+{
+ switch (cflag & CSIZE) {
+ case CS5:
+ return 5;
+ case CS6:
+ return 6;
+ case CS7:
+ return 7;
+ case CS8:
+ default:
+ return 8;
+ }
+}
+EXPORT_SYMBOL_GPL(tty_get_char_size);
+
+/**
+ * tty_get_frame_size - get size of a frame
+ * @cflag: termios cflag value
+ *
+ * Get the size (in bits) of a frame depending on @cflag's %CSIZE, %CSTOPB,
+ * and %PARENB setting. The result is a sum of character size, start and
+ * stop bits -- one bit each -- second stop bit (if set), and parity bit
+ * (if set).
+ */
+unsigned char tty_get_frame_size(unsigned int cflag)
+{
+ unsigned char bits = 2 + tty_get_char_size(cflag);
+
+ if (cflag & CSTOPB)
+ bits++;
+ if (cflag & PARENB)
+ bits++;
+
+ return bits;
+}
+EXPORT_SYMBOL_GPL(tty_get_frame_size);
+
+/**
* tty_set_termios - update termios values
* @tty: tty to update
* @new_termios: desired new value
@@ -846,20 +870,20 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
return retval;
switch (arg) {
case TCOOFF:
- spin_lock_irq(&tty->flow_lock);
- if (!tty->flow_stopped) {
- tty->flow_stopped = 1;
+ spin_lock_irq(&tty->flow.lock);
+ if (!tty->flow.tco_stopped) {
+ tty->flow.tco_stopped = true;
__stop_tty(tty);
}
- spin_unlock_irq(&tty->flow_lock);
+ spin_unlock_irq(&tty->flow.lock);
break;
case TCOON:
- spin_lock_irq(&tty->flow_lock);
- if (tty->flow_stopped) {
- tty->flow_stopped = 0;
+ spin_lock_irq(&tty->flow.lock);
+ if (tty->flow.tco_stopped) {
+ tty->flow.tco_stopped = false;
__start_tty(tty);
}
- spin_unlock_irq(&tty->flow_lock);
+ spin_unlock_irq(&tty->flow.lock);
break;
case TCIOFF:
if (STOP_CHAR(tty) != __DISABLED_CHAR)
diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
index 7813dc910a19..80b86a7992b5 100644
--- a/drivers/tty/tty_jobctrl.c
+++ b/drivers/tty/tty_jobctrl.c
@@ -20,7 +20,7 @@ static int is_ignored(int sig)
}
/**
- * tty_check_change - check for POSIX terminal changes
+ * __tty_check_change - check for POSIX terminal changes
* @tty: tty to check
* @sig: signal to send
*
@@ -28,7 +28,7 @@ static int is_ignored(int sig)
* not in the foreground, send a SIGTTOU. If the signal is blocked or
* ignored, go ahead and perform the operation. (POSIX 7.2)
*
- * Locking: ctrl_lock
+ * Locking: ctrl.lock
*/
int __tty_check_change(struct tty_struct *tty, int sig)
{
@@ -42,9 +42,9 @@ int __tty_check_change(struct tty_struct *tty, int sig)
rcu_read_lock();
pgrp = task_pgrp(current);
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- tty_pgrp = tty->pgrp;
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ spin_lock_irqsave(&tty->ctrl.lock, flags);
+ tty_pgrp = tty->ctrl.pgrp;
+ spin_unlock_irqrestore(&tty->ctrl.lock, flags);
if (tty_pgrp && pgrp != tty_pgrp) {
if (is_ignored(sig)) {
@@ -85,7 +85,7 @@ void proc_clear_tty(struct task_struct *p)
}
/**
- * proc_set_tty - set the controlling terminal
+ * __proc_set_tty - set the controlling terminal
* @tty: tty structure
*
* Only callable by the session leader and only if it does not already have
@@ -99,16 +99,16 @@ static void __proc_set_tty(struct tty_struct *tty)
{
unsigned long flags;
- spin_lock_irqsave(&tty->ctrl_lock, flags);
+ spin_lock_irqsave(&tty->ctrl.lock, flags);
/*
* The session and fg pgrp references will be non-NULL if
* tiocsctty() is stealing the controlling tty
*/
- put_pid(tty->session);
- put_pid(tty->pgrp);
- tty->pgrp = get_pid(task_pgrp(current));
- tty->session = get_pid(task_session(current));
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ put_pid(tty->ctrl.session);
+ put_pid(tty->ctrl.pgrp);
+ tty->ctrl.pgrp = get_pid(task_pgrp(current));
+ tty->ctrl.session = get_pid(task_session(current));
+ spin_unlock_irqrestore(&tty->ctrl.lock, flags);
if (current->signal->tty) {
tty_debug(tty, "current tty %s not NULL!!\n",
current->signal->tty->name);
@@ -135,7 +135,7 @@ void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty)
spin_lock_irq(&current->sighand->siglock);
if (current->signal->leader &&
!current->signal->tty &&
- tty->session == NULL) {
+ tty->ctrl.session == NULL) {
/*
* Don't let a process that only has write access to the tty
* obtain the privileges associated with having a tty as
@@ -200,8 +200,8 @@ int tty_signal_session_leader(struct tty_struct *tty, int exit_session)
struct pid *tty_pgrp = NULL;
read_lock(&tasklist_lock);
- if (tty->session) {
- do_each_pid_task(tty->session, PIDTYPE_SID, p) {
+ if (tty->ctrl.session) {
+ do_each_pid_task(tty->ctrl.session, PIDTYPE_SID, p) {
spin_lock_irq(&p->sighand->siglock);
if (p->signal->tty == tty) {
p->signal->tty = NULL;
@@ -218,13 +218,14 @@ int tty_signal_session_leader(struct tty_struct *tty, int exit_session)
__group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
__group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
put_pid(p->signal->tty_old_pgrp); /* A noop */
- spin_lock(&tty->ctrl_lock);
- tty_pgrp = get_pid(tty->pgrp);
- if (tty->pgrp)
- p->signal->tty_old_pgrp = get_pid(tty->pgrp);
- spin_unlock(&tty->ctrl_lock);
+ spin_lock(&tty->ctrl.lock);
+ tty_pgrp = get_pid(tty->ctrl.pgrp);
+ if (tty->ctrl.pgrp)
+ p->signal->tty_old_pgrp =
+ get_pid(tty->ctrl.pgrp);
+ spin_unlock(&tty->ctrl.lock);
spin_unlock_irq(&p->sighand->siglock);
- } while_each_pid_task(tty->session, PIDTYPE_SID, p);
+ } while_each_pid_task(tty->ctrl.session, PIDTYPE_SID, p);
}
read_unlock(&tasklist_lock);
@@ -309,12 +310,12 @@ void disassociate_ctty(int on_exit)
unsigned long flags;
tty_lock(tty);
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- put_pid(tty->session);
- put_pid(tty->pgrp);
- tty->session = NULL;
- tty->pgrp = NULL;
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ spin_lock_irqsave(&tty->ctrl.lock, flags);
+ put_pid(tty->ctrl.session);
+ put_pid(tty->ctrl.pgrp);
+ tty->ctrl.session = NULL;
+ tty->ctrl.pgrp = NULL;
+ spin_unlock_irqrestore(&tty->ctrl.lock, flags);
tty_unlock(tty);
tty_kref_put(tty);
}
@@ -363,7 +364,8 @@ static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
tty_lock(tty);
read_lock(&tasklist_lock);
- if (current->signal->leader && (task_session(current) == tty->session))
+ if (current->signal->leader &&
+ task_session(current) == tty->ctrl.session)
goto unlock;
/*
@@ -375,7 +377,7 @@ static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
goto unlock;
}
- if (tty->session) {
+ if (tty->ctrl.session) {
/*
* This tty is already the controlling
* tty for another session group!
@@ -384,7 +386,7 @@ static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
/*
* Steal it away
*/
- session_clear_tty(tty->session);
+ session_clear_tty(tty->ctrl.session);
} else {
ret = -EPERM;
goto unlock;
@@ -416,9 +418,9 @@ struct pid *tty_get_pgrp(struct tty_struct *tty)
unsigned long flags;
struct pid *pgrp;
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- pgrp = get_pid(tty->pgrp);
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ spin_lock_irqsave(&tty->ctrl.lock, flags);
+ pgrp = get_pid(tty->ctrl.pgrp);
+ spin_unlock_irqrestore(&tty->ctrl.lock, flags);
return pgrp;
}
@@ -499,10 +501,10 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
if (pgrp_nr < 0)
return -EINVAL;
- spin_lock_irq(&real_tty->ctrl_lock);
+ spin_lock_irq(&real_tty->ctrl.lock);
if (!current->signal->tty ||
(current->signal->tty != real_tty) ||
- (real_tty->session != task_session(current))) {
+ (real_tty->ctrl.session != task_session(current))) {
retval = -ENOTTY;
goto out_unlock_ctrl;
}
@@ -515,12 +517,12 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
if (session_of_pgrp(pgrp) != task_session(current))
goto out_unlock;
retval = 0;
- put_pid(real_tty->pgrp);
- real_tty->pgrp = get_pid(pgrp);
+ put_pid(real_tty->ctrl.pgrp);
+ real_tty->ctrl.pgrp = get_pid(pgrp);
out_unlock:
rcu_read_unlock();
out_unlock_ctrl:
- spin_unlock_irq(&real_tty->ctrl_lock);
+ spin_unlock_irq(&real_tty->ctrl.lock);
return retval;
}
@@ -545,16 +547,16 @@ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t _
if (tty == real_tty && current->signal->tty != real_tty)
return -ENOTTY;
- spin_lock_irqsave(&real_tty->ctrl_lock, flags);
- if (!real_tty->session)
+ spin_lock_irqsave(&real_tty->ctrl.lock, flags);
+ if (!real_tty->ctrl.session)
goto err;
- sid = pid_vnr(real_tty->session);
- spin_unlock_irqrestore(&real_tty->ctrl_lock, flags);
+ sid = pid_vnr(real_tty->ctrl.session);
+ spin_unlock_irqrestore(&real_tty->ctrl.lock, flags);
return put_user(sid, p);
err:
- spin_unlock_irqrestore(&real_tty->ctrl_lock, flags);
+ spin_unlock_irqrestore(&real_tty->ctrl.lock, flags);
return -ENOTTY;
}
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 03f414172f34..756a4bfa6a69 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -48,7 +48,6 @@ static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
/**
* tty_register_ldisc - install a line discipline
- * @disc: ldisc number
* @new_ldisc: pointer to the ldisc object
*
* Installs a new line discipline into the kernel. The discipline
@@ -59,18 +58,16 @@ static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
* takes tty_ldiscs_lock to guard against ldisc races
*/
-int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
+int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc)
{
unsigned long flags;
int ret = 0;
- if (disc < N_TTY || disc >= NR_LDISCS)
+ if (new_ldisc->num < N_TTY || new_ldisc->num >= NR_LDISCS)
return -EINVAL;
raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
- tty_ldiscs[disc] = new_ldisc;
- new_ldisc->num = disc;
- new_ldisc->refcount = 0;
+ tty_ldiscs[new_ldisc->num] = new_ldisc;
raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
return ret;
@@ -79,7 +76,7 @@ EXPORT_SYMBOL(tty_register_ldisc);
/**
* tty_unregister_ldisc - unload a line discipline
- * @disc: ldisc number
+ * @ldisc: ldisc number
*
* Remove a line discipline from the kernel providing it is not
* currently in use.
@@ -88,22 +85,13 @@ EXPORT_SYMBOL(tty_register_ldisc);
* takes tty_ldiscs_lock to guard against ldisc races
*/
-int tty_unregister_ldisc(int disc)
+void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc)
{
unsigned long flags;
- int ret = 0;
-
- if (disc < N_TTY || disc >= NR_LDISCS)
- return -EINVAL;
raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
- if (tty_ldiscs[disc]->refcount)
- ret = -EBUSY;
- else
- tty_ldiscs[disc] = NULL;
+ tty_ldiscs[ldisc->num] = NULL;
raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
-
- return ret;
}
EXPORT_SYMBOL(tty_unregister_ldisc);
@@ -117,10 +105,8 @@ static struct tty_ldisc_ops *get_ldops(int disc)
ldops = tty_ldiscs[disc];
if (ldops) {
ret = ERR_PTR(-EAGAIN);
- if (try_module_get(ldops->owner)) {
- ldops->refcount++;
+ if (try_module_get(ldops->owner))
ret = ldops;
- }
}
raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
return ret;
@@ -131,7 +117,6 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
unsigned long flags;
raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
- ldops->refcount--;
module_put(ldops->owner);
raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
}
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 303c198fbf5c..2f1061a9d926 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -468,7 +468,8 @@ int tty_port_block_til_ready(struct tty_port *port,
DEFINE_WAIT(wait);
/* if non-blocking mode is set we can pass directly to open unless
- the port has just hung up or is in another error state */
+ * the port has just hung up or is in another error state.
+ */
if (tty_io_error(tty)) {
tty_port_set_active(port, 1);
return 0;
@@ -485,8 +486,9 @@ int tty_port_block_til_ready(struct tty_port *port,
do_clocal = 1;
/* Block waiting until we can proceed. We may need to wait for the
- carrier, but we must also wait for any close that is in progress
- before the next open may complete */
+ * carrier, but we must also wait for any close that is in progress
+ * before the next open may complete.
+ */
retval = 0;
@@ -503,7 +505,8 @@ int tty_port_block_til_ready(struct tty_port *port,
prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE);
/* Check for a hangup or uninitialised port.
- Return accordingly */
+ * Return accordingly.
+ */
if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
if (port->flags & ASYNC_HUP_NOTIFY)
retval = -EAGAIN;
@@ -530,7 +533,8 @@ int tty_port_block_til_ready(struct tty_port *port,
finish_wait(&port->open_wait, &wait);
/* Update counts. A parallel hangup will have set count to zero and
- we must not mess that up further */
+ * we must not mess that up further.
+ */
spin_lock_irqsave(&port->lock, flags);
if (!tty_hung_up_p(filp))
port->count++;
@@ -587,7 +591,7 @@ int tty_port_close_start(struct tty_port *port,
if (tty_port_initialized(port)) {
/* Don't block on a stalled port, just pull the chain */
- if (tty->flow_stopped)
+ if (tty->flow.tco_stopped)
tty_driver_flush_buffer(tty);
if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
tty_wait_until_sent(tty, port->closing_wait);
@@ -688,6 +692,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
clear_bit(TTY_IO_ERROR, &tty->flags);
if (port->ops->activate) {
int retval = port->ops->activate(port, tty);
+
if (retval) {
mutex_unlock(&port->mutex);
return retval;
@@ -698,5 +703,4 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
mutex_unlock(&port->mutex);
return tty_port_block_til_ready(port, tty, filp);
}
-
EXPORT_SYMBOL(tty_port_open);
diff --git a/drivers/tty/ttynull.c b/drivers/tty/ttynull.c
index 17f05b7eb6d3..af3311a24917 100644
--- a/drivers/tty/ttynull.c
+++ b/drivers/tty/ttynull.c
@@ -35,7 +35,7 @@ static int ttynull_write(struct tty_struct *tty, const unsigned char *buf,
return count;
}
-static int ttynull_write_room(struct tty_struct *tty)
+static unsigned int ttynull_write_room(struct tty_struct *tty)
{
return 65536;
}
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index 0a3a71e14df4..d06bcc3b4c07 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -473,9 +473,9 @@ static struct vio_version vcc_versions[] = {
static struct tty_port_operations vcc_port_ops = { 0 };
-static ssize_t vcc_sysfs_domain_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t domain_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct vcc_port *port;
int rv;
@@ -505,9 +505,9 @@ static int vcc_send_ctl(struct vcc_port *port, int ctl)
return rv;
}
-static ssize_t vcc_sysfs_break_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t break_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct vcc_port *port;
unsigned long flags;
@@ -530,8 +530,8 @@ static ssize_t vcc_sysfs_break_store(struct device *dev,
return rv;
}
-static DEVICE_ATTR(domain, 0400, vcc_sysfs_domain_show, NULL);
-static DEVICE_ATTR(break, 0200, NULL, vcc_sysfs_break_store);
+static DEVICE_ATTR_ADMIN_RO(domain);
+static DEVICE_ATTR_WO(break);
static struct attribute *vcc_sysfs_entries[] = {
&dev_attr_domain.attr,
@@ -668,7 +668,7 @@ free_port:
*
* Return: status of removal
*/
-static int vcc_remove(struct vio_dev *vdev)
+static void vcc_remove(struct vio_dev *vdev)
{
struct vcc_port *port = dev_get_drvdata(&vdev->dev);
@@ -703,8 +703,6 @@ static int vcc_remove(struct vio_dev *vdev)
kfree(port->domain);
kfree(port);
}
-
- return 0;
}
static const struct vio_device_id vcc_match[] = {
@@ -870,10 +868,10 @@ static int vcc_write(struct tty_struct *tty, const unsigned char *buf,
return total_sent ? total_sent : rv;
}
-static int vcc_write_room(struct tty_struct *tty)
+static unsigned int vcc_write_room(struct tty_struct *tty)
{
struct vcc_port *port;
- u64 num;
+ unsigned int num;
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
@@ -888,10 +886,10 @@ static int vcc_write_room(struct tty_struct *tty)
return num;
}
-static int vcc_chars_in_buffer(struct tty_struct *tty)
+static unsigned int vcc_chars_in_buffer(struct tty_struct *tty)
{
struct vcc_port *port;
- u64 num;
+ unsigned int num;
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 5d2309742718..4b0d69042ceb 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -515,7 +515,7 @@ static void fn_hold(struct vc_data *vc)
* these routines are also activated by ^S/^Q.
* (And SCROLLOCK can also be set by the ioctl KDSKBLED.)
*/
- if (tty->stopped)
+ if (tty->flow.stopped)
start_tty(tty);
else
stop_tty(tty);
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index f245a5acf7e9..f7755e73696e 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -33,7 +33,7 @@
#include <linux/sched/signal.h>
/* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
-#define isspace(c) ((c) == ' ')
+#define is_space_on_vt(c) ((c) == ' ')
/* FIXME: all this needs locking */
static struct vc_selection {
@@ -109,7 +109,7 @@ static inline int inword(const u32 c)
}
/**
- * set loadlut - load the LUT table
+ * sel_loadlut() - load the LUT table
* @p: user table
*
* Load the LUT table from user space. The caller must hold the console
@@ -209,7 +209,7 @@ static int vc_selection_store_chars(struct vc_data *vc, bool unicode)
bp += store_utf8(c, bp);
else
*bp++ = c;
- if (!isspace(c))
+ if (!is_space_on_vt(c))
obp = bp;
if (!((i + 2) % vc->vc_size_row)) {
/* strip trailing blanks from line and add newline,
@@ -238,9 +238,9 @@ static int vc_do_selection(struct vc_data *vc, unsigned short mode, int ps,
new_sel_end = pe;
break;
case TIOCL_SELWORD: /* word-by-word selection */
- spc = isspace(sel_pos(ps, unicode));
+ spc = is_space_on_vt(sel_pos(ps, unicode));
for (new_sel_start = ps; ; ps -= 2) {
- if ((spc && !isspace(sel_pos(ps, unicode))) ||
+ if ((spc && !is_space_on_vt(sel_pos(ps, unicode))) ||
(!spc && !inword(sel_pos(ps, unicode))))
break;
new_sel_start = ps;
@@ -248,9 +248,9 @@ static int vc_do_selection(struct vc_data *vc, unsigned short mode, int ps,
break;
}
- spc = isspace(sel_pos(pe, unicode));
+ spc = is_space_on_vt(sel_pos(pe, unicode));
for (new_sel_end = pe; ; pe += 2) {
- if ((spc && !isspace(sel_pos(pe, unicode))) ||
+ if ((spc && !is_space_on_vt(sel_pos(pe, unicode))) ||
(!spc && !inword(sel_pos(pe, unicode))))
break;
new_sel_end = pe;
@@ -276,12 +276,12 @@ static int vc_do_selection(struct vc_data *vc, unsigned short mode, int ps,
/* select to end of line if on trailing space */
if (new_sel_end > new_sel_start &&
!atedge(new_sel_end, vc->vc_size_row) &&
- isspace(sel_pos(new_sel_end, unicode))) {
+ is_space_on_vt(sel_pos(new_sel_end, unicode))) {
for (pe = new_sel_end + 2; ; pe += 2)
- if (!isspace(sel_pos(pe, unicode)) ||
+ if (!is_space_on_vt(sel_pos(pe, unicode)) ||
atedge(pe, vc->vc_size_row))
break;
- if (isspace(sel_pos(pe, unicode)))
+ if (is_space_on_vt(sel_pos(pe, unicode)))
new_sel_end = pe;
}
if (vc_sel.start == -1) /* no current selection */
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index fa1548d4f94b..ef981d3b7bb4 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1189,7 +1189,7 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
* information and perform any necessary signal handling.
*
* Caller must hold the console semaphore. Takes the termios rwsem and
- * ctrl_lock of the tty IFF a tty is passed.
+ * ctrl.lock of the tty IFF a tty is passed.
*/
static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
@@ -1355,7 +1355,7 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows)
* the actual work.
*
* Takes the console sem and the called methods then take the tty
- * termios_rwsem and the tty ctrl_lock in that order.
+ * termios_rwsem and the tty ctrl.lock in that order.
*/
static int vt_resize(struct tty_struct *tty, struct winsize *ws)
{
@@ -2888,7 +2888,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
param.vc = vc;
- while (!tty->stopped && count) {
+ while (!tty->flow.stopped && count) {
int orig = *buf;
buf++;
n++;
@@ -3260,23 +3260,16 @@ static int con_write(struct tty_struct *tty, const unsigned char *buf, int count
static int con_put_char(struct tty_struct *tty, unsigned char ch)
{
- if (in_interrupt())
- return 0; /* n_r3964 calls put_char() from interrupt context */
return do_con_write(tty, &ch, 1);
}
-static int con_write_room(struct tty_struct *tty)
+static unsigned int con_write_room(struct tty_struct *tty)
{
- if (tty->stopped)
+ if (tty->flow.stopped)
return 0;
return 32768; /* No limit, really; we're not buffering */
}
-static int con_chars_in_buffer(struct tty_struct *tty)
-{
- return 0; /* we're not buffering */
-}
-
/*
* con_throttle and con_unthrottle are only used for
* paste_selection(), which has to stuff in a large number of
@@ -3523,7 +3516,6 @@ static const struct tty_operations con_ops = {
.write_room = con_write_room,
.put_char = con_put_char,
.flush_chars = con_flush_chars,
- .chars_in_buffer = con_chars_in_buffer,
.ioctl = vt_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vt_compat_ioctl,
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 5531f3afeb21..2e16c5338e5b 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -18,7 +18,7 @@ config UIO_CIF
depends on PCI
help
Driver for Hilscher CIF DeviceNet and Profibus cards. This
- driver requires a userspace component called cif that handles
+ driver requires a userspace component called cif that handles
all of the heavy lifting and can be found at:
<http://www.osadl.org/projects/downloads/UIO/user/>
diff --git a/drivers/uio/uio_aec.c b/drivers/uio/uio_aec.c
index 32357f8a92b5..64eafd59e6e7 100644
--- a/drivers/uio/uio_aec.c
+++ b/drivers/uio/uio_aec.c
@@ -133,7 +133,7 @@ static void remove(struct pci_dev *pdev)
uio_unregister_device(info);
pci_release_regions(pdev);
pci_disable_device(pdev);
- iounmap(info->priv);
+ pci_iounmap(pdev, info->priv);
}
static struct pci_driver pci_driver = {
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index 3bb0b0075467..e03f9b532a96 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -72,7 +72,9 @@ static int probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct uio_pci_generic_dev *gdev;
+ struct uio_mem *uiomem;
int err;
+ int i;
err = pcim_enable_device(pdev);
if (err) {
@@ -101,6 +103,36 @@ static int probe(struct pci_dev *pdev,
"no support for interrupts?\n");
}
+ uiomem = &gdev->info.mem[0];
+ for (i = 0; i < MAX_UIO_MAPS; ++i) {
+ struct resource *r = &pdev->resource[i];
+
+ if (r->flags != (IORESOURCE_SIZEALIGN | IORESOURCE_MEM))
+ continue;
+
+ if (uiomem >= &gdev->info.mem[MAX_UIO_MAPS]) {
+ dev_warn(
+ &pdev->dev,
+ "device has more than " __stringify(
+ MAX_UIO_MAPS) " I/O memory resources.\n");
+ break;
+ }
+
+ uiomem->memtype = UIO_MEM_PHYS;
+ uiomem->addr = r->start & PAGE_MASK;
+ uiomem->offs = r->start & ~PAGE_MASK;
+ uiomem->size =
+ (uiomem->offs + resource_size(r) + PAGE_SIZE - 1) &
+ PAGE_MASK;
+ uiomem->name = r->name;
+ ++uiomem;
+ }
+
+ while (uiomem < &gdev->info.mem[MAX_UIO_MAPS]) {
+ uiomem->size = 0;
+ ++uiomem;
+ }
+
return devm_uio_register_device(&pdev->dev, &gdev->info);
}
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 4d3947476f0e..4ce7cba2b48a 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -180,7 +180,7 @@ struct cxacru_data {
struct mutex poll_state_serialize;
enum cxacru_poll_state poll_state;
- /* contol handles */
+ /* control handles */
struct mutex cm_serialize;
u8 *rcv_buf;
u8 *snd_buf;
diff --git a/drivers/usb/cdns3/cdns3-ep0.c b/drivers/usb/cdns3/cdns3-ep0.c
index 9a17802275d5..e29989d57bef 100644
--- a/drivers/usb/cdns3/cdns3-ep0.c
+++ b/drivers/usb/cdns3/cdns3-ep0.c
@@ -677,7 +677,7 @@ static int cdns3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
}
/**
- * cdns3_gadget_ep0_queue Transfer data on endpoint zero
+ * cdns3_gadget_ep0_queue - Transfer data on endpoint zero
* @ep: pointer to endpoint zero object
* @request: pointer to request object
* @gfp_flags: gfp flags
@@ -731,6 +731,7 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
request->actual = 0;
priv_dev->status_completion_no_call = true;
priv_dev->pending_status_request = request;
+ usb_gadget_set_state(&priv_dev->gadget, USB_STATE_CONFIGURED);
spin_unlock_irqrestore(&priv_dev->lock, flags);
/*
@@ -772,7 +773,7 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
}
/**
- * cdns3_gadget_ep_set_wedge Set wedge on selected endpoint
+ * cdns3_gadget_ep_set_wedge - Set wedge on selected endpoint
* @ep: endpoint object
*
* Returns 0
@@ -865,7 +866,7 @@ void cdns3_ep0_config(struct cdns3_device *priv_dev)
}
/**
- * cdns3_init_ep0 Initializes software endpoint 0 of gadget
+ * cdns3_init_ep0 - Initializes software endpoint 0 of gadget
* @priv_dev: extended gadget object
* @priv_ep: extended endpoint object
*
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index 5281f8d3fb3d..5d8c982019af 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -155,7 +155,7 @@ static struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
}
/**
- * select_ep - selects endpoint
+ * cdns3_select_ep - selects endpoint
* @priv_dev: extended gadget object
* @ep: endpoint address
*/
@@ -430,9 +430,7 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev,
if (ret)
return ret;
- list_del(&request->list);
- list_add_tail(&request->list,
- &priv_ep->pending_req_list);
+ list_move_tail(&request->list, &priv_ep->pending_req_list);
if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN))
break;
}
@@ -484,7 +482,7 @@ static void __cdns3_descmiss_copy_data(struct usb_request *request,
}
/**
- * cdns3_wa2_descmiss_copy_data copy data from internal requests to
+ * cdns3_wa2_descmiss_copy_data - copy data from internal requests to
* request queued by class driver.
* @priv_ep: extended endpoint object
* @request: request object
@@ -1835,7 +1833,7 @@ __must_hold(&priv_dev->lock)
}
/**
- * cdns3_device_irq_handler- interrupt handler for device part of controller
+ * cdns3_device_irq_handler - interrupt handler for device part of controller
*
* @irq: irq number for cdns3 core device
* @data: structure of cdns3
@@ -1879,7 +1877,7 @@ static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
}
/**
- * cdns3_device_thread_irq_handler- interrupt handler for device part
+ * cdns3_device_thread_irq_handler - interrupt handler for device part
* of controller
*
* @irq: irq number for cdns3 core device
@@ -2022,7 +2020,7 @@ static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
}
/**
- * cdns3_ep_config Configure hardware endpoint
+ * cdns3_ep_config - Configure hardware endpoint
* @priv_ep: extended endpoint object
* @enable: set EP_CFG_ENABLE bit in ep_cfg register.
*/
@@ -2221,7 +2219,7 @@ usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
}
/**
- * cdns3_gadget_ep_alloc_request Allocates request
+ * cdns3_gadget_ep_alloc_request - Allocates request
* @ep: endpoint object associated with request
* @gfp_flags: gfp flags
*
@@ -2244,7 +2242,7 @@ struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep,
}
/**
- * cdns3_gadget_ep_free_request Free memory occupied by request
+ * cdns3_gadget_ep_free_request - Free memory occupied by request
* @ep: endpoint object associated with request
* @request: request to free memory
*/
@@ -2261,7 +2259,7 @@ void cdns3_gadget_ep_free_request(struct usb_ep *ep,
}
/**
- * cdns3_gadget_ep_enable Enable endpoint
+ * cdns3_gadget_ep_enable - Enable endpoint
* @ep: endpoint object
* @desc: endpoint descriptor
*
@@ -2396,7 +2394,7 @@ exit:
}
/**
- * cdns3_gadget_ep_disable Disable endpoint
+ * cdns3_gadget_ep_disable - Disable endpoint
* @ep: endpoint object
*
* Returns 0 on success, error code elsewhere
@@ -2486,7 +2484,7 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
}
/**
- * cdns3_gadget_ep_queue Transfer data on endpoint
+ * __cdns3_gadget_ep_queue - Transfer data on endpoint
* @ep: endpoint object
* @request: request object
* @gfp_flags: gfp flags
@@ -2586,7 +2584,7 @@ static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
}
/**
- * cdns3_gadget_ep_dequeue Remove request from transfer queue
+ * cdns3_gadget_ep_dequeue - Remove request from transfer queue
* @ep: endpoint object associated with request
* @request: request object
*
@@ -2653,7 +2651,7 @@ not_found:
}
/**
- * __cdns3_gadget_ep_set_halt Sets stall on selected endpoint
+ * __cdns3_gadget_ep_set_halt - Sets stall on selected endpoint
* Should be called after acquiring spin_lock and selecting ep
* @priv_ep: endpoint object to set stall on.
*/
@@ -2674,7 +2672,7 @@ void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep)
}
/**
- * __cdns3_gadget_ep_clear_halt Clears stall on selected endpoint
+ * __cdns3_gadget_ep_clear_halt - Clears stall on selected endpoint
* Should be called after acquiring spin_lock and selecting ep
* @priv_ep: endpoint object to clear stall on
*/
@@ -2719,7 +2717,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
}
/**
- * cdns3_gadget_ep_set_halt Sets/clears stall on selected endpoint
+ * cdns3_gadget_ep_set_halt - Sets/clears stall on selected endpoint
* @ep: endpoint object to set/clear stall on
* @value: 1 for set stall, 0 for clear stall
*
@@ -2765,7 +2763,7 @@ static const struct usb_ep_ops cdns3_gadget_ep_ops = {
};
/**
- * cdns3_gadget_get_frame Returns number of actual ITP frame
+ * cdns3_gadget_get_frame - Returns number of actual ITP frame
* @gadget: gadget object
*
* Returns number of actual ITP frame
@@ -2874,7 +2872,7 @@ static void cdns3_gadget_config(struct cdns3_device *priv_dev)
}
/**
- * cdns3_gadget_udc_start Gadget start
+ * cdns3_gadget_udc_start - Gadget start
* @gadget: gadget object
* @driver: driver which operates on this gadget
*
@@ -2920,7 +2918,7 @@ static int cdns3_gadget_udc_start(struct usb_gadget *gadget,
}
/**
- * cdns3_gadget_udc_stop Stops gadget
+ * cdns3_gadget_udc_stop - Stops gadget
* @gadget: gadget object
*
* Returns 0
@@ -2983,7 +2981,7 @@ static void cdns3_free_all_eps(struct cdns3_device *priv_dev)
}
/**
- * cdns3_init_eps Initializes software endpoints of gadget
+ * cdns3_init_eps - Initializes software endpoints of gadget
* @priv_dev: extended gadget object
*
* Returns 0 on success, error code elsewhere
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
index 74e758dc0895..59860d1753fd 100644
--- a/drivers/usb/cdns3/cdns3-imx.c
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* cdns3-imx.c - NXP i.MX specific Glue layer for Cadence USB Controller
*
* Copyright (C) 2019 NXP
diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
index e1deeada425c..4d0f027e5bd3 100644
--- a/drivers/usb/cdns3/cdns3-plat.c
+++ b/drivers/usb/cdns3/cdns3-plat.c
@@ -170,7 +170,7 @@ err_phy3_init:
}
/**
- * cdns3_remove - unbind drd driver and clean up
+ * cdns3_plat_remove() - unbind drd driver and clean up
* @pdev: Pointer to Linux platform device
*
* Returns 0 on success otherwise negative errno
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
index eccb1c766bba..07c318770362 100644
--- a/drivers/usb/cdns3/cdns3-ti.c
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* cdns3-ti.c - TI specific Glue layer for Cadence USB Controller
*
* Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index c083985e387b..27df0c697897 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -56,7 +56,8 @@ u32 cdnsp_port_state_to_neutral(u32 state)
}
/**
- * Find the offset of the extended capabilities with capability ID id.
+ * cdnsp_find_next_ext_cap - Find the offset of the extended capabilities
+ * with capability ID id.
* @base: PCI MMIO registers base address.
* @start: Address at which to start looking, (0 or HCC_PARAMS to start at
* beginning of list)
@@ -1151,7 +1152,7 @@ static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value)
struct cdnsp_ep *pep = to_cdnsp_ep(ep);
struct cdnsp_device *pdev = pep->pdev;
struct cdnsp_request *preq;
- unsigned long flags = 0;
+ unsigned long flags;
int ret;
spin_lock_irqsave(&pdev->lock, flags);
@@ -1176,7 +1177,7 @@ static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep)
{
struct cdnsp_ep *pep = to_cdnsp_ep(ep);
struct cdnsp_device *pdev = pep->pdev;
- unsigned long flags = 0;
+ unsigned long flags;
int ret;
spin_lock_irqsave(&pdev->lock, flags);
@@ -1881,7 +1882,7 @@ static int __cdnsp_gadget_init(struct cdns *cdns)
pdev->gadget.name = "cdnsp-gadget";
pdev->gadget.speed = USB_SPEED_UNKNOWN;
pdev->gadget.sg_supported = 1;
- pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS;
+ pdev->gadget.max_speed = max_speed;
pdev->gadget.lpm_capable = 1;
pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL);
diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
index 783ca8ffde00..f740fa6089d8 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.h
+++ b/drivers/usb/cdns3/cdnsp-gadget.h
@@ -383,8 +383,8 @@ struct cdnsp_intr_reg {
#define IMAN_IE BIT(1)
#define IMAN_IP BIT(0)
/* bits 2:31 need to be preserved */
-#define IMAN_IE_SET(p) (((p) & IMAN_IE) | 0x2)
-#define IMAN_IE_CLEAR(p) (((p) & IMAN_IE) & ~(0x2))
+#define IMAN_IE_SET(p) ((p) | IMAN_IE)
+#define IMAN_IE_CLEAR(p) ((p) & ~IMAN_IE)
/* IMOD - Interrupter Moderation Register - irq_control bitmasks. */
/*
diff --git a/drivers/usb/cdns3/cdnsp-mem.c b/drivers/usb/cdns3/cdnsp-mem.c
index 5d4c4bfe15b7..a47948a1623f 100644
--- a/drivers/usb/cdns3/cdnsp-mem.c
+++ b/drivers/usb/cdns3/cdnsp-mem.c
@@ -1082,9 +1082,8 @@ void cdnsp_mem_cleanup(struct cdnsp_device *pdev)
dma_pool_destroy(pdev->device_pool);
pdev->device_pool = NULL;
- if (pdev->dcbaa)
- dma_free_coherent(dev, sizeof(*pdev->dcbaa),
- pdev->dcbaa, pdev->dcbaa->dma);
+ dma_free_coherent(dev, sizeof(*pdev->dcbaa),
+ pdev->dcbaa, pdev->dcbaa->dma);
pdev->dcbaa = NULL;
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index 68972746e363..1b1438457fb0 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1932,15 +1932,13 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
}
if (enqd_len + trb_buff_len >= full_len) {
- if (need_zero_pkt && zero_len_trb) {
- zero_len_trb = true;
- } else {
- field &= ~TRB_CHAIN;
- field |= TRB_IOC;
- more_trbs_coming = false;
- need_zero_pkt = false;
- preq->td.last_trb = ring->enqueue;
- }
+ if (need_zero_pkt)
+ zero_len_trb = !zero_len_trb;
+
+ field &= ~TRB_CHAIN;
+ field |= TRB_IOC;
+ more_trbs_coming = false;
+ preq->td.last_trb = ring->enqueue;
}
/* Only set interrupt on short packet for OUT endpoints. */
@@ -1955,7 +1953,7 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
- cdnsp_queue_trb(pdev, ring, more_trbs_coming | need_zero_pkt,
+ cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb,
lower_32_bits(send_addr),
upper_32_bits(send_addr),
length_field,
diff --git a/drivers/usb/cdns3/cdnsp-trace.h b/drivers/usb/cdns3/cdnsp-trace.h
index 5aa88ca012de..6a2571c6aa9e 100644
--- a/drivers/usb/cdns3/cdnsp-trace.h
+++ b/drivers/usb/cdns3/cdnsp-trace.h
@@ -138,7 +138,7 @@ DECLARE_EVENT_CLASS(cdnsp_log_simple,
__string(text, msg)
),
TP_fast_assign(
- __assign_str(text, msg)
+ __assign_str(text, msg);
),
TP_printk("%s", __get_str(text))
);
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index bb739d88179f..dbcdf3b24b47 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -332,7 +332,7 @@ exit:
}
/**
- * cdsn3_role_get - get current role of controller.
+ * cdns_role_get - get current role of controller.
*
* @sw: pointer to USB role switch structure
*
@@ -419,7 +419,7 @@ static irqreturn_t cdns_wakeup_irq(int irq, void *data)
}
/**
- * cdns_probe - probe for cdns3/cdnsp core device
+ * cdns_init - probe for cdns3/cdnsp core device
* @cdns: Pointer to cdns structure.
*
* Returns 0 on success otherwise negative errno
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 0697eb980e5f..99440baa6458 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -195,7 +195,6 @@ struct hw_bank {
* @phy: pointer to PHY, if any
* @usb_phy: pointer to USB PHY, if any and if using the USB PHY framework
* @hcd: pointer to usb_hcd for ehci host driver
- * @debugfs: root dentry for this controller in debugfs
* @id_event: indicates there is an id event, and handled at ci_otg_work
* @b_sess_valid_event: indicates there is a vbus event, and handled
* at ci_otg_work
@@ -249,7 +248,6 @@ struct ci_hdrc {
/* old usb_phy interface */
struct usb_phy *usb_phy;
struct usb_hcd *hcd;
- struct dentry *debugfs;
bool id_event;
bool b_sess_valid_event;
bool imx28_write_fix;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 3f6c21406dbd..2b18f5088ae4 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -335,7 +335,7 @@ static int _ci_usb_phy_init(struct ci_hdrc *ci)
}
/**
- * _ci_usb_phy_exit: deinitialize phy taking in account both phy and usb_phy
+ * ci_usb_phy_exit: deinitialize phy taking in account both phy and usb_phy
* interfaces
* @ci: the controller
*/
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index da5d18cf6840..faf6b078b6c4 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -342,26 +342,20 @@ DEFINE_SHOW_ATTRIBUTE(ci_registers);
*/
void dbg_create_files(struct ci_hdrc *ci)
{
- ci->debugfs = debugfs_create_dir(dev_name(ci->dev), usb_debug_root);
-
- debugfs_create_file("device", S_IRUGO, ci->debugfs, ci,
- &ci_device_fops);
- debugfs_create_file("port_test", S_IRUGO | S_IWUSR, ci->debugfs, ci,
- &ci_port_test_fops);
- debugfs_create_file("qheads", S_IRUGO, ci->debugfs, ci,
- &ci_qheads_fops);
- debugfs_create_file("requests", S_IRUGO, ci->debugfs, ci,
- &ci_requests_fops);
-
- if (ci_otg_is_fsm_mode(ci)) {
- debugfs_create_file("otg", S_IRUGO, ci->debugfs, ci,
- &ci_otg_fops);
- }
+ struct dentry *dir;
+
+ dir = debugfs_create_dir(dev_name(ci->dev), usb_debug_root);
+
+ debugfs_create_file("device", S_IRUGO, dir, ci, &ci_device_fops);
+ debugfs_create_file("port_test", S_IRUGO | S_IWUSR, dir, ci, &ci_port_test_fops);
+ debugfs_create_file("qheads", S_IRUGO, dir, ci, &ci_qheads_fops);
+ debugfs_create_file("requests", S_IRUGO, dir, ci, &ci_requests_fops);
+
+ if (ci_otg_is_fsm_mode(ci))
+ debugfs_create_file("otg", S_IRUGO, dir, ci, &ci_otg_fops);
- debugfs_create_file("role", S_IRUGO | S_IWUSR, ci->debugfs, ci,
- &ci_role_fops);
- debugfs_create_file("registers", S_IRUGO, ci->debugfs, ci,
- &ci_registers_fops);
+ debugfs_create_file("role", S_IRUGO | S_IWUSR, dir, ci, &ci_role_fops);
+ debugfs_create_file("registers", S_IRUGO, dir, ci, &ci_registers_fops);
}
/**
@@ -370,5 +364,5 @@ void dbg_create_files(struct ci_hdrc *ci)
*/
void dbg_remove_files(struct ci_hdrc *ci)
{
- debugfs_remove_recursive(ci->debugfs);
+ debugfs_remove(debugfs_lookup(dev_name(ci->dev), usb_debug_root));
}
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index d3aada3ce7ec..8dd59282827b 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -22,7 +22,7 @@
#include "otg_fsm.h"
/**
- * hw_read_otgsc returns otgsc register bits value.
+ * hw_read_otgsc - returns otgsc register bits value.
* @ci: the controller
* @mask: bitfield mask
*/
@@ -75,7 +75,7 @@ u32 hw_read_otgsc(struct ci_hdrc *ci, u32 mask)
}
/**
- * hw_write_otgsc updates target bits of OTGSC register.
+ * hw_write_otgsc - updates target bits of OTGSC register.
* @ci: the controller
* @mask: bitfield mask
* @data: to be written
@@ -140,8 +140,9 @@ void ci_handle_vbus_change(struct ci_hdrc *ci)
}
/**
- * When we switch to device mode, the vbus value should be lower
- * than OTGSC_BSV before connecting to host.
+ * hw_wait_vbus_lower_bsv - When we switch to device mode, the vbus value
+ * should be lower than OTGSC_BSV before connecting
+ * to host.
*
* @ci: the controller
*
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 393f216b9161..8834ca613721 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -238,7 +238,7 @@ static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
}
/**
- * hw_is_port_high_speed: test if port is high speed
+ * hw_port_is_high_speed: test if port is high speed
* @ci: the controller
*
* This function returns true if high speed port
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ca7a61190dd9..4895325b16a4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -838,7 +838,7 @@ static int acm_tty_write(struct tty_struct *tty,
return count;
}
-static int acm_tty_write_room(struct tty_struct *tty)
+static unsigned int acm_tty_write_room(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
/*
@@ -848,7 +848,7 @@ static int acm_tty_write_room(struct tty_struct *tty)
return acm_wb_is_avail(acm) ? acm->writesize : 0;
}
-static int acm_tty_chars_in_buffer(struct tty_struct *tty)
+static unsigned int acm_tty_chars_in_buffer(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
/*
@@ -1056,21 +1056,8 @@ static void acm_tty_set_termios(struct tty_struct *tty,
newline.bParityType = termios->c_cflag & PARENB ?
(termios->c_cflag & PARODD ? 1 : 2) +
(termios->c_cflag & CMSPAR ? 2 : 0) : 0;
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- newline.bDataBits = 5;
- break;
- case CS6:
- newline.bDataBits = 6;
- break;
- case CS7:
- newline.bDataBits = 7;
- break;
- case CS8:
- default:
- newline.bDataBits = 8;
- break;
- }
+ newline.bDataBits = tty_get_char_size(termios->c_cflag);
+
/* FIXME: Needs to clear unsupported bits in the termios */
acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
@@ -1959,6 +1946,11 @@ static const struct usb_device_id acm_ids[] = {
.driver_info = IGNORE_DEVICE,
},
+ /* Exclude Heimann Sensor GmbH USB appset demo */
+ { USB_DEVICE(0x32a7, 0x0000),
+ .driver_info = IGNORE_DEVICE,
+ },
+
/* control interfaces without any protocol set */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 8e5490ac13a2..35d5908b5478 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -824,7 +824,7 @@ static struct usb_class_driver wdm_class = {
};
/* --- WWAN framework integration --- */
-#ifdef CONFIG_WWAN
+#ifdef CONFIG_WWAN_CORE
static int wdm_wwan_port_start(struct wwan_port *port)
{
struct wdm_device *desc = wwan_port_get_drvdata(port);
@@ -963,11 +963,11 @@ static void wdm_wwan_rx(struct wdm_device *desc, int length)
/* inbuf has been copied, it is safe to check for outstanding data */
schedule_work(&desc->service_outs_intr);
}
-#else /* CONFIG_WWAN */
+#else /* CONFIG_WWAN_CORE */
static void wdm_wwan_init(struct wdm_device *desc) {}
static void wdm_wwan_deinit(struct wdm_device *desc) {}
static void wdm_wwan_rx(struct wdm_device *desc, int length) {}
-#endif /* CONFIG_WWAN */
+#endif /* CONFIG_WWAN_CORE */
/* --- error handling --- */
static void wdm_rxwork(struct work_struct *work)
@@ -1035,9 +1035,10 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
INIT_WORK(&desc->rxwork, wdm_rxwork);
INIT_WORK(&desc->service_outs_intr, service_interrupt_work);
- rv = -EINVAL;
- if (!usb_endpoint_is_int_in(ep))
+ if (!usb_endpoint_is_int_in(ep)) {
+ rv = -EINVAL;
goto err;
+ }
desc->wMaxPacketSize = usb_endpoint_maxp(ep);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 74d5a9c5238a..73f419adce61 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -2324,17 +2324,10 @@ static void usbtmc_interrupt(struct urb *urb)
dev_err(dev, "overflow with length %d, actual length is %d\n",
data->iin_wMaxPacketSize, urb->actual_length);
fallthrough;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- case -EILSEQ:
- case -ETIME:
- case -EPIPE:
+ default:
/* urb terminated, clean up */
dev_dbg(dev, "urb terminated, status: %d\n", status);
return;
- default:
- dev_err(dev, "unknown status received: %d\n", status);
}
exit:
rv = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index ce5e6f6711f7..7e13b74e60e5 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -141,7 +141,7 @@ static const struct device_type ulpi_dev_type = {
/* -------------------------------------------------------------------------- */
/**
- * ulpi_register_driver - register a driver with the ULPI bus
+ * __ulpi_register_driver - register a driver with the ULPI bus
* @drv: driver being registered
* @module: ends up being THIS_MODULE
*
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index 6c4e3a19f42c..0158148cb054 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -83,11 +83,11 @@ static void usb_conn_detect_cable(struct work_struct *work)
else
role = USB_ROLE_NONE;
- dev_dbg(info->dev, "role %d/%d, gpios: id %d, vbus %d\n",
- info->last_role, role, id, vbus);
+ dev_dbg(info->dev, "role %s -> %s, gpios: id %d, vbus %d\n",
+ usb_role_string(info->last_role), usb_role_string(role), id, vbus);
if (info->last_role == role) {
- dev_warn(info->dev, "repeated role: %d\n", role);
+ dev_warn(info->dev, "repeated role: %s\n", usb_role_string(role));
return;
}
@@ -149,14 +149,32 @@ static int usb_charger_get_property(struct power_supply *psy,
return 0;
}
-static int usb_conn_probe(struct platform_device *pdev)
+static int usb_conn_psy_register(struct usb_conn_info *info)
{
- struct device *dev = &pdev->dev;
- struct power_supply_desc *desc;
- struct usb_conn_info *info;
+ struct device *dev = info->dev;
+ struct power_supply_desc *desc = &info->desc;
struct power_supply_config cfg = {
.of_node = dev->of_node,
};
+
+ desc->name = "usb-charger";
+ desc->properties = usb_charger_properties;
+ desc->num_properties = ARRAY_SIZE(usb_charger_properties);
+ desc->get_property = usb_charger_get_property;
+ desc->type = POWER_SUPPLY_TYPE_USB;
+ cfg.drv_data = info;
+
+ info->charger = devm_power_supply_register(dev, desc, &cfg);
+ if (IS_ERR(info->charger))
+ dev_err(dev, "Unable to register charger\n");
+
+ return PTR_ERR_OR_ZERO(info->charger);
+}
+
+static int usb_conn_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_conn_info *info;
bool need_vbus = true;
int ret = 0;
@@ -205,18 +223,18 @@ static int usb_conn_probe(struct platform_device *pdev)
}
if (IS_ERR(info->vbus)) {
- if (PTR_ERR(info->vbus) != -EPROBE_DEFER)
- dev_err(dev, "failed to get vbus: %ld\n", PTR_ERR(info->vbus));
- return PTR_ERR(info->vbus);
+ ret = PTR_ERR(info->vbus);
+ return dev_err_probe(dev, ret, "failed to get vbus :%d\n", ret);
}
info->role_sw = usb_role_switch_get(dev);
- if (IS_ERR(info->role_sw)) {
- if (PTR_ERR(info->role_sw) != -EPROBE_DEFER)
- dev_err(dev, "failed to get role switch\n");
+ if (IS_ERR(info->role_sw))
+ return dev_err_probe(dev, PTR_ERR(info->role_sw),
+ "failed to get role switch\n");
- return PTR_ERR(info->role_sw);
- }
+ ret = usb_conn_psy_register(info);
+ if (ret)
+ goto put_role_sw;
if (info->id_gpiod) {
info->id_irq = gpiod_to_irq(info->id_gpiod);
@@ -252,20 +270,6 @@ static int usb_conn_probe(struct platform_device *pdev)
}
}
- desc = &info->desc;
- desc->name = "usb-charger";
- desc->properties = usb_charger_properties;
- desc->num_properties = ARRAY_SIZE(usb_charger_properties);
- desc->get_property = usb_charger_get_property;
- desc->type = POWER_SUPPLY_TYPE_USB;
- cfg.drv_data = info;
-
- info->charger = devm_power_supply_register(dev, desc, &cfg);
- if (IS_ERR(info->charger)) {
- dev_err(dev, "Unable to register charger\n");
- return PTR_ERR(info->charger);
- }
-
platform_set_drvdata(pdev, info);
/* Perform initial detection */
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index 3740cf95560e..0697fde51d00 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -193,7 +193,11 @@ static void otg_start_hnp_polling(struct otg_fsm *fsm)
if (!fsm->host_req_flag)
return;
- INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work);
+ if (!fsm->hnp_work_inited) {
+ INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work);
+ fsm->hnp_work_inited = true;
+ }
+
schedule_delayed_work(&fsm->hnp_polling_work,
msecs_to_jiffies(T_HOST_REQ_POLL));
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 2218941d35a3..9618ba622a2d 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1133,7 +1133,7 @@ static int do_proc_control(struct usb_dev_state *ps,
"wIndex=%04x wLength=%04x\n",
ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
ctrl->wIndex, ctrl->wLength);
- if (ctrl->bRequestType & 0x80) {
+ if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength) {
pipe = usb_rcvctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl->wLength, tmo, SUBMIT, NULL, 0);
@@ -1162,7 +1162,7 @@ static int do_proc_control(struct usb_dev_state *ps,
tbuf, ctrl->wLength);
usb_unlock_device(dev);
- i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl->bRequest,
+ i = usb_control_msg(dev, pipe, ctrl->bRequest,
ctrl->bRequestType, ctrl->wValue, ctrl->wIndex,
tbuf, ctrl->wLength, tmo);
usb_lock_device(dev);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 6119fb41d736..0f8b7c93310e 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2111,6 +2111,136 @@ int usb_hcd_get_frame_number (struct usb_device *udev)
}
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_HCD_TEST_MODE
+
+static void usb_ehset_completion(struct urb *urb)
+{
+ struct completion *done = urb->context;
+
+ complete(done);
+}
+/*
+ * Allocate and initialize a control URB. This request will be used by the
+ * EHSET SINGLE_STEP_SET_FEATURE test in which the DATA and STATUS stages
+ * of the GetDescriptor request are sent 15 seconds after the SETUP stage.
+ * Return NULL if failed.
+ */
+static struct urb *request_single_step_set_feature_urb(
+ struct usb_device *udev,
+ void *dr,
+ void *buf,
+ struct completion *done)
+{
+ struct urb *urb;
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ struct usb_host_endpoint *ep;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return NULL;
+
+ urb->pipe = usb_rcvctrlpipe(udev, 0);
+ ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out)
+ [usb_pipeendpoint(urb->pipe)];
+ if (!ep) {
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ urb->ep = ep;
+ urb->dev = udev;
+ urb->setup_packet = (void *)dr;
+ urb->transfer_buffer = buf;
+ urb->transfer_buffer_length = USB_DT_DEVICE_SIZE;
+ urb->complete = usb_ehset_completion;
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->transfer_flags = URB_DIR_IN;
+ usb_get_urb(urb);
+ atomic_inc(&urb->use_count);
+ atomic_inc(&urb->dev->urbnum);
+ if (map_urb_for_dma(hcd, urb, GFP_KERNEL)) {
+ usb_put_urb(urb);
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ urb->context = done;
+ return urb;
+}
+
+int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+ int retval = -ENOMEM;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+ struct usb_device *udev;
+ struct usb_device_descriptor *buf;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ /* Obtain udev of the rhub's child port */
+ udev = usb_hub_find_child(hcd->self.root_hub, port);
+ if (!udev) {
+ dev_err(hcd->self.controller, "No device attached to the RootHub\n");
+ return -ENODEV;
+ }
+ buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!dr) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ /* Fill Setup packet for GetDescriptor */
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8);
+ dr->wIndex = 0;
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ urb = request_single_step_set_feature_urb(udev, dr, buf, &done);
+ if (!urb)
+ goto cleanup;
+
+ /* Submit just the SETUP stage */
+ retval = hcd->driver->submit_single_step_set_feature(hcd, urb, 1);
+ if (retval)
+ goto out1;
+ if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) {
+ usb_kill_urb(urb);
+ retval = -ETIMEDOUT;
+ dev_err(hcd->self.controller,
+ "%s SETUP stage timed out on ep0\n", __func__);
+ goto out1;
+ }
+ msleep(15 * 1000);
+
+ /* Complete remaining DATA and STATUS stages using the same URB */
+ urb->status = -EINPROGRESS;
+ usb_get_urb(urb);
+ atomic_inc(&urb->use_count);
+ atomic_inc(&urb->dev->urbnum);
+ retval = hcd->driver->submit_single_step_set_feature(hcd, urb, 0);
+ if (!retval && !wait_for_completion_timeout(&done,
+ msecs_to_jiffies(2000))) {
+ usb_kill_urb(urb);
+ retval = -ETIMEDOUT;
+ dev_err(hcd->self.controller,
+ "%s IN stage timed out on ep0\n", __func__);
+ }
+out1:
+ usb_free_urb(urb);
+cleanup:
+ kfree(dr);
+ kfree(buf);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(ehset_single_step_set_feature);
+#endif /* CONFIG_USB_HCD_TEST_MODE */
+
+/*-------------------------------------------------------------------------*/
#ifdef CONFIG_PM
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index df8e69e60aaf..86658a81d284 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -48,6 +48,7 @@
#define USB_TP_TRANSMISSION_DELAY 40 /* ns */
#define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
+#define USB_PING_RESPONSE_TIME 400 /* ns */
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
@@ -182,8 +183,9 @@ int usb_device_supports_lpm(struct usb_device *udev)
}
/*
- * Set the Maximum Exit Latency (MEL) for the host to initiate a transition from
- * either U1 or U2.
+ * Set the Maximum Exit Latency (MEL) for the host to wakup up the path from
+ * U1/U2, send a PING to the device and receive a PING_RESPONSE.
+ * See USB 3.1 section C.1.5.2
*/
static void usb_set_lpm_mel(struct usb_device *udev,
struct usb3_lpm_parameters *udev_lpm_params,
@@ -193,35 +195,37 @@ static void usb_set_lpm_mel(struct usb_device *udev,
unsigned int hub_exit_latency)
{
unsigned int total_mel;
- unsigned int device_mel;
- unsigned int hub_mel;
/*
- * Calculate the time it takes to transition all links from the roothub
- * to the parent hub into U0. The parent hub must then decode the
- * packet (hub header decode latency) to figure out which port it was
- * bound for.
- *
- * The Hub Header decode latency is expressed in 0.1us intervals (0x1
- * means 0.1us). Multiply that by 100 to get nanoseconds.
+ * tMEL1. time to transition path from host to device into U0.
+ * MEL for parent already contains the delay up to parent, so only add
+ * the exit latency for the last link (pick the slower exit latency),
+ * and the hub header decode latency. See USB 3.1 section C 2.2.1
+ * Store MEL in nanoseconds
*/
total_mel = hub_lpm_params->mel +
- (hub->descriptor->u.ss.bHubHdrDecLat * 100);
+ max(udev_exit_latency, hub_exit_latency) * 1000 +
+ hub->descriptor->u.ss.bHubHdrDecLat * 100;
/*
- * How long will it take to transition the downstream hub's port into
- * U0? The greater of either the hub exit latency or the device exit
- * latency.
- *
- * The BOS U1/U2 exit latencies are expressed in 1us intervals.
- * Multiply that by 1000 to get nanoseconds.
+ * tMEL2. Time to submit PING packet. Sum of tTPTransmissionDelay for
+ * each link + wHubDelay for each hub. Add only for last link.
+ * tMEL4, the time for PING_RESPONSE to traverse upstream is similar.
+ * Multiply by 2 to include it as well.
*/
- device_mel = udev_exit_latency * 1000;
- hub_mel = hub_exit_latency * 1000;
- if (device_mel > hub_mel)
- total_mel += device_mel;
- else
- total_mel += hub_mel;
+ total_mel += (__le16_to_cpu(hub->descriptor->u.ss.wHubDelay) +
+ USB_TP_TRANSMISSION_DELAY) * 2;
+
+ /*
+ * tMEL3, tPingResponse. Time taken by device to generate PING_RESPONSE
+ * after receiving PING. Also add 2100ns as stated in USB 3.1 C 1.5.2.4
+ * to cover the delay if the PING_RESPONSE is queued behind a Max Packet
+ * Size DP.
+ * Note these delays should be added only once for the entire path, so
+ * add them to the MEL of the device connected to the roothub.
+ */
+ if (!hub->hdev->parent)
+ total_mel += USB_PING_RESPONSE_TIME + 2100;
udev_lpm_params->mel = total_mel;
}
@@ -2434,6 +2438,8 @@ static void set_usb_port_removable(struct usb_device *udev)
u16 wHubCharacteristics;
bool removable = true;
+ dev_set_removable(&udev->dev, DEVICE_REMOVABLE_UNKNOWN);
+
if (!hdev)
return;
@@ -2445,11 +2451,11 @@ static void set_usb_port_removable(struct usb_device *udev)
*/
switch (hub->ports[udev->portnum - 1]->connect_type) {
case USB_PORT_CONNECT_TYPE_HOT_PLUG:
- udev->removable = USB_DEVICE_REMOVABLE;
+ dev_set_removable(&udev->dev, DEVICE_REMOVABLE);
return;
case USB_PORT_CONNECT_TYPE_HARD_WIRED:
case USB_PORT_NOT_USED:
- udev->removable = USB_DEVICE_FIXED;
+ dev_set_removable(&udev->dev, DEVICE_FIXED);
return;
default:
break;
@@ -2474,9 +2480,9 @@ static void set_usb_port_removable(struct usb_device *udev)
}
if (removable)
- udev->removable = USB_DEVICE_REMOVABLE;
+ dev_set_removable(&udev->dev, DEVICE_REMOVABLE);
else
- udev->removable = USB_DEVICE_FIXED;
+ dev_set_removable(&udev->dev, DEVICE_FIXED);
}
@@ -2548,8 +2554,7 @@ int usb_new_device(struct usb_device *udev)
device_enable_async_suspend(&udev->dev);
/* check whether the hub or firmware marks this port as non-removable */
- if (udev->parent)
- set_usb_port_removable(udev);
+ set_usb_port_removable(udev);
/* Register the device. The device driver is responsible
* for configuring the device and invoking the add-device
@@ -3387,6 +3392,26 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
status = 0;
}
if (status) {
+ /* Check if the port has been suspended for the timeout case
+ * to prevent the suspended port from incorrect handling.
+ */
+ if (status == -ETIMEDOUT) {
+ int ret;
+ u16 portstatus, portchange;
+
+ portstatus = portchange = 0;
+ ret = hub_port_status(hub, port1, &portstatus,
+ &portchange);
+
+ dev_dbg(&port_dev->dev,
+ "suspend timeout, status %04x\n", portstatus);
+
+ if (ret == 0 && port_is_suspended(hub, portstatus)) {
+ status = 0;
+ goto suspend_done;
+ }
+ }
+
dev_dbg(&port_dev->dev, "can't suspend, status %d\n", status);
/* Try to enable USB3 LTM again */
@@ -3403,6 +3428,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
if (!PMSG_IS_AUTO(msg))
status = 0;
} else {
+ suspend_done:
dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""),
udev->do_remote_wakeup);
@@ -4091,6 +4117,47 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
}
/*
+ * Don't allow device intiated U1/U2 if the system exit latency + one bus
+ * interval is greater than the minimum service interval of any active
+ * periodic endpoint. See USB 3.2 section 9.4.9
+ */
+static bool usb_device_may_initiate_lpm(struct usb_device *udev,
+ enum usb3_link_state state)
+{
+ unsigned int sel; /* us */
+ int i, j;
+
+ if (state == USB3_LPM_U1)
+ sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
+ else if (state == USB3_LPM_U2)
+ sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
+ else
+ return false;
+
+ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
+ struct usb_interface *intf;
+ struct usb_endpoint_descriptor *desc;
+ unsigned int interval;
+
+ intf = udev->actconfig->interface[i];
+ if (!intf)
+ continue;
+
+ for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) {
+ desc = &intf->cur_altsetting->endpoint[j].desc;
+
+ if (usb_endpoint_xfer_int(desc) ||
+ usb_endpoint_xfer_isoc(desc)) {
+ interval = (1 << (desc->bInterval - 1)) * 125;
+ if (sel + 125 > interval)
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+/*
* Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated
* U1/U2 entry.
*
@@ -4162,20 +4229,23 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
* U1/U2_ENABLE
*/
if (udev->actconfig &&
- usb_set_device_initiated_lpm(udev, state, true) == 0) {
- if (state == USB3_LPM_U1)
- udev->usb3_lpm_u1_enabled = 1;
- else if (state == USB3_LPM_U2)
- udev->usb3_lpm_u2_enabled = 1;
- } else {
- /* Don't request U1/U2 entry if the device
- * cannot transition to U1/U2.
- */
- usb_set_lpm_timeout(udev, state, 0);
- hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
+ usb_device_may_initiate_lpm(udev, state)) {
+ if (usb_set_device_initiated_lpm(udev, state, true)) {
+ /*
+ * Request to enable device initiated U1/U2 failed,
+ * better to turn off lpm in this case.
+ */
+ usb_set_lpm_timeout(udev, state, 0);
+ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
+ return;
+ }
}
-}
+ if (state == USB3_LPM_U1)
+ udev->usb3_lpm_u1_enabled = 1;
+ else if (state == USB3_LPM_U2)
+ udev->usb3_lpm_u2_enabled = 1;
+}
/*
* Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated
* U1/U2 entry.
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 30e9e680c74c..4d59d927ae3e 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -783,6 +783,9 @@ int usb_get_descriptor(struct usb_device *dev, unsigned char type,
int i;
int result;
+ if (size <= 0) /* No point in asking for no data */
+ return -EINVAL;
+
memset(buf, 0, size); /* Make sure we parse really received data */
for (i = 0; i < 3; ++i) {
@@ -832,6 +835,9 @@ static int usb_get_string(struct usb_device *dev, unsigned short langid,
int i;
int result;
+ if (size <= 0) /* No point in asking for no data */
+ return -EINVAL;
+
for (i = 0; i < 3; ++i) {
/* retry on length 0 or stall; some devices are flakey */
result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 21e7522655ac..8239fe7129dd 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -406,7 +406,6 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Realtek hub in Dell WD19 (Type-C) */
{ USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
- { USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME },
/* Generic RTL8153 based ethernet adapters */
{ USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
@@ -502,10 +501,6 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
- /* Fibocom L850-GL LTE Modem */
- { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
- USB_QUIRK_IGNORE_REMOTE_WAKEUP },
-
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5a168ba9fc51..fa2e49d432ff 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -301,29 +301,6 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(urbnum);
-static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct usb_device *udev;
- char *state;
-
- udev = to_usb_device(dev);
-
- switch (udev->removable) {
- case USB_DEVICE_REMOVABLE:
- state = "removable";
- break;
- case USB_DEVICE_FIXED:
- state = "fixed";
- break;
- default:
- state = "unknown";
- }
-
- return sprintf(buf, "%s\n", state);
-}
-static DEVICE_ATTR_RO(removable);
-
static ssize_t ltm_capable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -828,7 +805,6 @@ static struct attribute *dev_attrs[] = {
&dev_attr_avoid_reset_quirk.attr,
&dev_attr_authorized.attr,
&dev_attr_remove.attr,
- &dev_attr_removable.attr,
&dev_attr_ltm_capable.attr,
#ifdef CONFIG_OF
&dev_attr_devspec.attr,
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 357b149b20d3..30727729a44c 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -407,6 +407,15 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
return -ENOEXEC;
is_out = !(setup->bRequestType & USB_DIR_IN) ||
!setup->wLength;
+ dev_WARN_ONCE(&dev->dev, (usb_pipeout(urb->pipe) != is_out),
+ "BOGUS control dir, pipe %x doesn't match bRequestType %x\n",
+ urb->pipe, setup->bRequestType);
+ if (le16_to_cpu(setup->wLength) != urb->transfer_buffer_length) {
+ dev_dbg(&dev->dev, "BOGUS control len %d doesn't match transfer length %d\n",
+ le16_to_cpu(setup->wLength),
+ urb->transfer_buffer_length);
+ return -EBADR;
+ }
} else {
is_out = usb_endpoint_dir_out(&ep->desc);
}
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 6f70ab9577b4..272ae5722c86 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -1111,15 +1111,6 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
if (hsotg->params.phy_utmi_width == 16)
usbcfg |= GUSBCFG_PHYIF16;
-
- /* Set turnaround time */
- if (dwc2_is_device_mode(hsotg)) {
- usbcfg &= ~GUSBCFG_USBTRDTIM_MASK;
- if (hsotg->params.phy_utmi_width == 16)
- usbcfg |= 5 << GUSBCFG_USBTRDTIM_SHIFT;
- else
- usbcfg |= 9 << GUSBCFG_USBTRDTIM_SHIFT;
- }
break;
default:
dev_err(hsotg->dev, "FS PHY selected at HS!\n");
@@ -1141,6 +1132,24 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
return retval;
}
+static void dwc2_set_turnaround_time(struct dwc2_hsotg *hsotg)
+{
+ u32 usbcfg;
+
+ if (hsotg->params.phy_type != DWC2_PHY_TYPE_PARAM_UTMI)
+ return;
+
+ usbcfg = dwc2_readl(hsotg, GUSBCFG);
+
+ usbcfg &= ~GUSBCFG_USBTRDTIM_MASK;
+ if (hsotg->params.phy_utmi_width == 16)
+ usbcfg |= 5 << GUSBCFG_USBTRDTIM_SHIFT;
+ else
+ usbcfg |= 9 << GUSBCFG_USBTRDTIM_SHIFT;
+
+ dwc2_writel(hsotg, usbcfg, GUSBCFG);
+}
+
int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
u32 usbcfg;
@@ -1158,6 +1167,9 @@ int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
retval = dwc2_hs_phy_init(hsotg, select_phy);
if (retval)
return retval;
+
+ if (dwc2_is_device_mode(hsotg))
+ dwc2_set_turnaround_time(hsotg);
}
if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index ab6b815e0089..483de2bbfaab 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -383,6 +383,9 @@ enum dwc2_ep0_state {
* 0 - No (default)
* 1 - Partial power down
* 2 - Hibernation
+ * @no_clock_gating: Specifies whether to avoid clock gating feature.
+ * 0 - No (use clock gating)
+ * 1 - Yes (avoid it)
* @lpm: Enable LPM support.
* 0 - No
* 1 - Yes
@@ -480,6 +483,7 @@ struct dwc2_core_params {
#define DWC2_POWER_DOWN_PARAM_NONE 0
#define DWC2_POWER_DOWN_PARAM_PARTIAL 1
#define DWC2_POWER_DOWN_PARAM_HIBERNATION 2
+ bool no_clock_gating;
bool lpm;
bool lpm_clock_gating;
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index a5ab03808da6..a5c52b237e72 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -556,7 +556,8 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
* If neither hibernation nor partial power down are supported,
* clock gating is used to save power.
*/
- dwc2_gadget_enter_clock_gating(hsotg);
+ if (!hsotg->params.no_clock_gating)
+ dwc2_gadget_enter_clock_gating(hsotg);
}
/*
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 184964174dc0..3146df6e6510 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1496,8 +1496,8 @@ static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hs = hs_ep->parent;
- unsigned long flags = 0;
- int ret = 0;
+ unsigned long flags;
+ int ret;
spin_lock_irqsave(&hs->lock, flags);
ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags);
@@ -2749,12 +2749,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
return;
}
- /* Zlp for all endpoints, for ep0 only in DATA IN stage */
+ /* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
if (hs_ep->send_zlp) {
- dwc2_hsotg_program_zlp(hsotg, hs_ep);
hs_ep->send_zlp = 0;
- /* transfer will be completed on next complete interrupt */
- return;
+ if (!using_desc_dma(hsotg)) {
+ dwc2_hsotg_program_zlp(hsotg, hs_ep);
+ /* transfer will be completed on next complete interrupt */
+ return;
+ }
}
if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
@@ -3338,7 +3340,7 @@ static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
/**
- * dwc2_hsotg_core_init - issue softreset to the core
+ * dwc2_hsotg_core_init_disconnected - issue softreset to the core
* @hsotg: The device state
* @is_usb_reset: Usb resetting flag
*
@@ -3900,9 +3902,27 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
__func__);
}
} else {
+ /* Mask GINTSTS_GOUTNAKEFF interrupt */
+ dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF);
+
if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
+ if (!using_dma(hsotg)) {
+ /* Wait for GINTSTS_RXFLVL interrupt */
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+ GINTSTS_RXFLVL, 100)) {
+ dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n",
+ __func__);
+ } else {
+ /*
+ * Pop GLOBAL OUT NAK status packet from RxFIFO
+ * to assert GOUTNAKEFF interrupt
+ */
+ dwc2_readl(hsotg, GRXSTSP);
+ }
+ }
+
/* Wait for global nak to take effect */
if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
GINTSTS_GOUTNAKEFF, 100))
@@ -4348,6 +4368,9 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
epctl = dwc2_readl(hs, epreg);
if (value) {
+ /* Unmask GOUTNAKEFF interrupt */
+ dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF);
+
if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
// STALL bit will be set in GOUTNAKEFF interrupt handler
@@ -4374,8 +4397,8 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hs = hs_ep->parent;
- unsigned long flags = 0;
- int ret = 0;
+ unsigned long flags;
+ int ret;
spin_lock_irqsave(&hs->lock, flags);
ret = dwc2_hsotg_ep_sethalt(ep, value, false);
@@ -4505,7 +4528,7 @@ err:
static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
{
struct dwc2_hsotg *hsotg = to_hsotg(gadget);
- unsigned long flags = 0;
+ unsigned long flags;
int ep;
if (!hsotg)
@@ -4577,7 +4600,7 @@ static int dwc2_hsotg_set_selfpowered(struct usb_gadget *gadget,
static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on)
{
struct dwc2_hsotg *hsotg = to_hsotg(gadget);
- unsigned long flags = 0;
+ unsigned long flags;
dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on,
hsotg->op_state);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 035d4911a3c3..2a7828971d05 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -3338,7 +3338,8 @@ int dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
* If not hibernation nor partial power down are supported,
* clock gating is used to save power.
*/
- dwc2_host_enter_clock_gating(hsotg);
+ if (!hsotg->params.no_clock_gating)
+ dwc2_host_enter_clock_gating(hsotg);
break;
}
@@ -4402,7 +4403,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
* If not hibernation nor partial power down are supported,
* clock gating is used to save power.
*/
- dwc2_host_enter_clock_gating(hsotg);
+ if (!hsotg->params.no_clock_gating)
+ dwc2_host_enter_clock_gating(hsotg);
/* After entering suspend, hardware is not accessible */
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 621a4846bd05..89a788326c56 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -675,7 +675,7 @@ static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
}
/**
- * dwc2_ls_pmap_unschedule() - Undo work done by dwc2_hs_pmap_schedule()
+ * dwc2_hs_pmap_unschedule() - Undo work done by dwc2_hs_pmap_schedule()
*
* @hsotg: The HCD state structure for the DWC OTG controller.
* @qh: QH for the periodic transfer.
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 7a6089fa81e1..59e119345994 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -76,6 +76,7 @@ static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
struct dwc2_core_params *p = &hsotg->params;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+ p->no_clock_gating = true;
p->phy_utmi_width = 8;
}
@@ -784,8 +785,8 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
}
/**
- * During device initialization, read various hardware configuration
- * registers and interpret the contents.
+ * dwc2_get_hwparams() - During device initialization, read various hardware
+ * configuration registers and interpret the contents.
*
* @hsotg: Programming view of the DWC_otg controller
*
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index 0000151e3ca9..a93559b4ecdb 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -64,7 +64,7 @@ struct dwc2_pci_glue {
};
/**
- * dwc2_pci_probe() - Provides the cleanup entry points for the DWC_otg PCI
+ * dwc2_pci_remove() - Provides the cleanup entry points for the DWC_otg PCI
* driver
*
* @pci: The programming view of DWC_otg PCI
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 520a0beef77c..c8f18f3ba9e3 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -408,7 +408,7 @@ static bool dwc2_check_core_endianness(struct dwc2_hsotg *hsotg)
}
/**
- * Check core version
+ * dwc2_check_core_version() - Check core version
*
* @hsotg: Programming view of the DWC_otg controller
*
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 4ac397e43e19..ba74ad7f6995 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1545,6 +1545,10 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
+ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
@@ -1616,17 +1620,18 @@ static int dwc3_probe(struct platform_device *pdev)
}
dwc3_check_params(dwc);
+ dwc3_debugfs_init(dwc);
ret = dwc3_core_init_mode(dwc);
if (ret)
goto err5;
- dwc3_debugfs_init(dwc);
pm_runtime_put(dev);
return 0;
err5:
+ dwc3_debugfs_exit(dwc);
dwc3_event_buffers_cleanup(dwc);
usb_phy_shutdown(dwc->usb2_phy);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index c5d5760cdf53..5991766239ba 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1013,7 +1013,6 @@ struct dwc3_scratchpad_array {
* @link_state: link state
* @speed: device speed (super, high, full, low)
* @hwparams: copy of hwparams registers
- * @root: debugfs root folder pointer
* @regset: debugfs pointer to regdump file
* @dbg_lsp_select: current debug lsp mux register selection
* @test_mode: true when we're entering a USB test mode
@@ -1222,7 +1221,6 @@ struct dwc3 {
u8 num_eps;
struct dwc3_hwparams hwparams;
- struct dentry *root;
struct debugfs_regset32 *regset;
u32 dbg_lsp_select;
@@ -1281,6 +1279,7 @@ struct dwc3 {
unsigned dis_metastability_quirk:1;
unsigned dis_split_quirk:1;
+ unsigned async_callbacks:1;
u16 imod_interval;
};
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 5dbbe53269d3..f2b7675c7f62 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -889,8 +889,10 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
{
struct dentry *dir;
+ struct dentry *root;
- dir = debugfs_create_dir(dep->name, dep->dwc->root);
+ root = debugfs_lookup(dev_name(dep->dwc->dev), usb_debug_root);
+ dir = debugfs_create_dir(dep->name, root);
dwc3_debugfs_create_endpoint_files(dep, dir);
}
@@ -909,8 +911,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
- dwc->root = root;
-
debugfs_create_regset32("regdump", 0444, root, dwc->regset);
debugfs_create_file("lsp_dump", 0644, root, dwc, &dwc3_lsp_fops);
@@ -929,6 +929,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
void dwc3_debugfs_exit(struct dwc3 *dwc)
{
- debugfs_remove_recursive(dwc->root);
+ debugfs_remove(debugfs_lookup(dev_name(dwc->dev), usb_debug_root));
kfree(dwc->regset);
}
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index e2b68bb770d1..8fcbac10510c 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -596,7 +596,6 @@ int dwc3_drd_init(struct dwc3 *dwc)
dwc3_drd_update(dwc);
} else {
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
- dwc->current_dr_role = DWC3_GCTL_PRTCAP_OTG;
/* use OTG block to get ID event */
irq = dwc3_otg_get_irq(dwc);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 1e51460938b8..2b37bdd39a74 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -36,7 +36,7 @@
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
#define PCI_DEVICE_ID_INTEL_CNPV 0xa3b0
#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee
-#define PCI_DEVICE_ID_INTEL_EHLLP 0x4b7e
+#define PCI_DEVICE_ID_INTEL_EHL 0x4b7e
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee
#define PCI_DEVICE_ID_INTEL_JSP 0x4dee
@@ -167,7 +167,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
pdev->device == PCI_DEVICE_ID_INTEL_BXT_M ||
- pdev->device == PCI_DEVICE_ID_INTEL_EHLLP) {
+ pdev->device == PCI_DEVICE_ID_INTEL_EHL) {
guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid);
dwc->has_dsm_for_pm = true;
}
@@ -375,8 +375,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICLLP),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_EHLLP),
- (kernel_ulong_t) &dwc3_pci_intel_swnode },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_EHL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 3cd294264372..2f9e45eed228 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -597,11 +597,13 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
- int ret;
+ int ret = -EINVAL;
- spin_unlock(&dwc->lock);
- ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
- spin_lock(&dwc->lock);
+ if (dwc->async_callbacks) {
+ spin_unlock(&dwc->lock);
+ ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
+ spin_lock(&dwc->lock);
+ }
return ret;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f14c2aa83759..84fe57ef5a49 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2250,6 +2250,17 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
}
/*
+ * Avoid issuing a runtime resume if the device is already in the
+ * suspended state during gadget disconnect. DWC3 gadget was already
+ * halted/stopped during runtime suspend.
+ */
+ if (!is_on) {
+ pm_runtime_barrier(dwc->dev);
+ if (pm_runtime_suspended(dwc->dev))
+ return 0;
+ }
+
+ /*
* Check the return value for successful resume, or error. For a
* successful resume, the DWC3 runtime PM resume routine will handle
* the run stop sequence, so avoid duplicate operations here.
@@ -2585,6 +2596,16 @@ static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
return ret;
}
+static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->async_callbacks = enable;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
static const struct usb_gadget_ops dwc3_gadget_ops = {
.get_frame = dwc3_gadget_get_frame,
.wakeup = dwc3_gadget_wakeup,
@@ -2596,6 +2617,7 @@ static const struct usb_gadget_ops dwc3_gadget_ops = {
.udc_set_ssp_rate = dwc3_gadget_set_ssp_rate,
.get_config_params = dwc3_gadget_config_params,
.vbus_draw = dwc3_gadget_vbus_draw,
+ .udc_async_callbacks = dwc3_gadget_async_callbacks,
};
/* -------------------------------------------------------------------------- */
@@ -2798,7 +2820,9 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
list_del(&dep->endpoint.ep_list);
}
- debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
+ debugfs_remove_recursive(debugfs_lookup(dep->name,
+ debugfs_lookup(dev_name(dep->dwc->dev),
+ usb_debug_root)));
kfree(dep);
}
}
@@ -3229,7 +3253,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
static void dwc3_disconnect_gadget(struct dwc3 *dwc)
{
- if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
+ if (dwc->async_callbacks && dwc->gadget_driver->disconnect) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->disconnect(dwc->gadget);
spin_lock(&dwc->lock);
@@ -3238,7 +3262,7 @@ static void dwc3_disconnect_gadget(struct dwc3 *dwc)
static void dwc3_suspend_gadget(struct dwc3 *dwc)
{
- if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
+ if (dwc->async_callbacks && dwc->gadget_driver->suspend) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->suspend(dwc->gadget);
spin_lock(&dwc->lock);
@@ -3247,7 +3271,7 @@ static void dwc3_suspend_gadget(struct dwc3 *dwc)
static void dwc3_resume_gadget(struct dwc3 *dwc)
{
- if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+ if (dwc->async_callbacks && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->resume(dwc->gadget);
spin_lock(&dwc->lock);
@@ -3259,7 +3283,7 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
if (!dwc->gadget_driver)
return;
- if (dwc->gadget->speed != USB_SPEED_UNKNOWN) {
+ if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) {
spin_unlock(&dwc->lock);
usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
spin_lock(&dwc->lock);
@@ -3583,7 +3607,7 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
* implemented.
*/
- if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+ if (dwc->async_callbacks && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->resume(dwc->gadget);
spin_lock(&dwc->lock);
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 51d18e8d1602..cb998ba50fea 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -222,8 +222,6 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
TP_STRUCT__entry(
__string(name, dep->name)
__field(struct dwc3_trb *, trb)
- __field(u32, allocated)
- __field(u32, queued)
__field(u32, bpl)
__field(u32, bph)
__field(u32, size)
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index 2cd9942707b4..5d38f29bda72 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -30,6 +30,11 @@ struct f_eem {
u8 ctrl_id;
};
+struct in_context {
+ struct sk_buff *skb;
+ struct usb_ep *ep;
+};
+
static inline struct f_eem *func_to_eem(struct usb_function *f)
{
return container_of(f, struct f_eem, port.func);
@@ -320,9 +325,12 @@ fail:
static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
- struct sk_buff *skb = (struct sk_buff *)req->context;
+ struct in_context *ctx = req->context;
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(ctx->skb);
+ kfree(req->buf);
+ usb_ep_free_request(ctx->ep, req);
+ kfree(ctx);
}
/*
@@ -410,7 +418,9 @@ static int eem_unwrap(struct gether *port,
* b15: bmType (0 == data, 1 == command)
*/
if (header & BIT(15)) {
- struct usb_request *req = cdev->req;
+ struct usb_request *req;
+ struct in_context *ctx;
+ struct usb_ep *ep;
u16 bmEEMCmd;
/* EEM command packet format:
@@ -439,11 +449,36 @@ static int eem_unwrap(struct gether *port,
skb_trim(skb2, len);
put_unaligned_le16(BIT(15) | BIT(11) | len,
skb_push(skb2, 2));
+
+ ep = port->in_ep;
+ req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+ if (!req) {
+ dev_kfree_skb_any(skb2);
+ goto next;
+ }
+
+ req->buf = kmalloc(skb2->len, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ dev_kfree_skb_any(skb2);
+ goto next;
+ }
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ dev_kfree_skb_any(skb2);
+ goto next;
+ }
+ ctx->skb = skb2;
+ ctx->ep = ep;
+
skb_copy_bits(skb2, 0, req->buf, skb2->len);
req->length = skb2->len;
req->complete = eem_cmd_complete;
req->zero = 1;
- req->context = skb2;
+ req->context = ctx;
if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
DBG(cdev, "echo response queue fail\n");
break;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index d4844afeaffc..9c0c393abb39 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -250,8 +250,8 @@ EXPORT_SYMBOL_GPL(ffs_lock);
static struct ffs_dev *_ffs_find_dev(const char *name);
static struct ffs_dev *_ffs_alloc_dev(void);
static void _ffs_free_dev(struct ffs_dev *dev);
-static void *ffs_acquire_dev(const char *dev_name);
-static void ffs_release_dev(struct ffs_data *ffs_data);
+static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data);
+static void ffs_release_dev(struct ffs_dev *ffs_dev);
static int ffs_ready(struct ffs_data *ffs);
static void ffs_closed(struct ffs_data *ffs);
@@ -1554,8 +1554,8 @@ unmapped_value:
static int ffs_fs_get_tree(struct fs_context *fc)
{
struct ffs_sb_fill_data *ctx = fc->fs_private;
- void *ffs_dev;
struct ffs_data *ffs;
+ int ret;
ENTER();
@@ -1574,13 +1574,12 @@ static int ffs_fs_get_tree(struct fs_context *fc)
return -ENOMEM;
}
- ffs_dev = ffs_acquire_dev(ffs->dev_name);
- if (IS_ERR(ffs_dev)) {
+ ret = ffs_acquire_dev(ffs->dev_name, ffs);
+ if (ret) {
ffs_data_put(ffs);
- return PTR_ERR(ffs_dev);
+ return ret;
}
- ffs->private_data = ffs_dev;
ctx->ffs_data = ffs;
return get_tree_nodev(fc, ffs_sb_fill);
}
@@ -1591,7 +1590,6 @@ static void ffs_fs_free_fc(struct fs_context *fc)
if (ctx) {
if (ctx->ffs_data) {
- ffs_release_dev(ctx->ffs_data);
ffs_data_put(ctx->ffs_data);
}
@@ -1630,10 +1628,8 @@ ffs_fs_kill_sb(struct super_block *sb)
ENTER();
kill_litter_super(sb);
- if (sb->s_fs_info) {
- ffs_release_dev(sb->s_fs_info);
+ if (sb->s_fs_info)
ffs_data_closed(sb->s_fs_info);
- }
}
static struct file_system_type ffs_fs_type = {
@@ -1703,6 +1699,7 @@ static void ffs_data_put(struct ffs_data *ffs)
if (refcount_dec_and_test(&ffs->ref)) {
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
+ ffs_release_dev(ffs->private_data);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
swait_active(&ffs->ep0req_completion.wait) ||
waitqueue_active(&ffs->wait));
@@ -3032,6 +3029,7 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
struct ffs_function *func = ffs_func_from_usb(f);
struct f_fs_opts *ffs_opts =
container_of(f->fi, struct f_fs_opts, func_inst);
+ struct ffs_data *ffs_data;
int ret;
ENTER();
@@ -3046,12 +3044,13 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
if (!ffs_opts->no_configfs)
ffs_dev_lock();
ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
- func->ffs = ffs_opts->dev->ffs_data;
+ ffs_data = ffs_opts->dev->ffs_data;
if (!ffs_opts->no_configfs)
ffs_dev_unlock();
if (ret)
return ERR_PTR(ret);
+ func->ffs = ffs_data;
func->conf = c;
func->gadget = c->cdev->gadget;
@@ -3506,6 +3505,7 @@ static void ffs_free_inst(struct usb_function_instance *f)
struct f_fs_opts *opts;
opts = to_f_fs_opts(f);
+ ffs_release_dev(opts->dev);
ffs_dev_lock();
_ffs_free_dev(opts->dev);
ffs_dev_unlock();
@@ -3693,47 +3693,48 @@ static void _ffs_free_dev(struct ffs_dev *dev)
{
list_del(&dev->entry);
- /* Clear the private_data pointer to stop incorrect dev access */
- if (dev->ffs_data)
- dev->ffs_data->private_data = NULL;
-
kfree(dev);
if (list_empty(&ffs_devices))
functionfs_cleanup();
}
-static void *ffs_acquire_dev(const char *dev_name)
+static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data)
{
+ int ret = 0;
struct ffs_dev *ffs_dev;
ENTER();
ffs_dev_lock();
ffs_dev = _ffs_find_dev(dev_name);
- if (!ffs_dev)
- ffs_dev = ERR_PTR(-ENOENT);
- else if (ffs_dev->mounted)
- ffs_dev = ERR_PTR(-EBUSY);
- else if (ffs_dev->ffs_acquire_dev_callback &&
- ffs_dev->ffs_acquire_dev_callback(ffs_dev))
- ffs_dev = ERR_PTR(-ENOENT);
- else
+ if (!ffs_dev) {
+ ret = -ENOENT;
+ } else if (ffs_dev->mounted) {
+ ret = -EBUSY;
+ } else if (ffs_dev->ffs_acquire_dev_callback &&
+ ffs_dev->ffs_acquire_dev_callback(ffs_dev)) {
+ ret = -ENOENT;
+ } else {
ffs_dev->mounted = true;
+ ffs_dev->ffs_data = ffs_data;
+ ffs_data->private_data = ffs_dev;
+ }
ffs_dev_unlock();
- return ffs_dev;
+ return ret;
}
-static void ffs_release_dev(struct ffs_data *ffs_data)
+static void ffs_release_dev(struct ffs_dev *ffs_dev)
{
- struct ffs_dev *ffs_dev;
-
ENTER();
ffs_dev_lock();
- ffs_dev = ffs_data->private_data;
- if (ffs_dev) {
+ if (ffs_dev && ffs_dev->mounted) {
ffs_dev->mounted = false;
+ if (ffs_dev->ffs_data) {
+ ffs_dev->ffs_data->private_data = NULL;
+ ffs_dev->ffs_data = NULL;
+ }
if (ffs_dev->ffs_release_dev_callback)
ffs_dev->ffs_release_dev_callback(ffs_dev);
@@ -3761,7 +3762,6 @@ static int ffs_ready(struct ffs_data *ffs)
}
ffs_obj->desc_ready = true;
- ffs_obj->ffs_data = ffs;
if (ffs_obj->ffs_ready_callback) {
ret = ffs_obj->ffs_ready_callback(ffs);
@@ -3789,7 +3789,6 @@ static void ffs_closed(struct ffs_data *ffs)
goto done;
ffs_obj->desc_ready = false;
- ffs_obj->ffs_data = NULL;
if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
ffs_obj->ffs_closed_callback)
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index e55699308117..bb476e121eae 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -41,6 +41,7 @@ struct f_hidg {
unsigned char bInterfaceSubClass;
unsigned char bInterfaceProtocol;
unsigned char protocol;
+ unsigned char idle;
unsigned short report_desc_length;
char *report_desc;
unsigned short report_length;
@@ -88,7 +89,7 @@ static struct usb_interface_descriptor hidg_interface_desc = {
static struct hid_descriptor hidg_desc = {
.bLength = sizeof hidg_desc,
.bDescriptorType = HID_DT_HID,
- .bcdHID = 0x0101,
+ .bcdHID = cpu_to_le16(0x0101),
.bCountryCode = 0x00,
.bNumDescriptors = 0x1,
/*.desc[0].bDescriptorType = DYNAMIC */
@@ -338,6 +339,11 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
spin_lock_irqsave(&hidg->write_spinlock, flags);
+ if (!hidg->req) {
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+ return -ESHUTDOWN;
+ }
+
#define WRITE_COND (!hidg->write_pending)
try_again:
/* write queue */
@@ -358,8 +364,14 @@ try_again:
count = min_t(unsigned, count, hidg->report_length);
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
- status = copy_from_user(req->buf, buffer, count);
+ if (!req) {
+ ERROR(hidg->func.config->cdev, "hidg->req is NULL\n");
+ status = -ESHUTDOWN;
+ goto release_write_pending;
+ }
+
+ status = copy_from_user(req->buf, buffer, count);
if (status != 0) {
ERROR(hidg->func.config->cdev,
"copy_from_user error\n");
@@ -387,14 +399,17 @@ try_again:
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+ if (!hidg->in_ep->enabled) {
+ ERROR(hidg->func.config->cdev, "in_ep is disabled\n");
+ status = -ESHUTDOWN;
+ goto release_write_pending;
+ }
+
status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
- if (status < 0) {
- ERROR(hidg->func.config->cdev,
- "usb_ep_queue error on int endpoint %zd\n", status);
+ if (status < 0)
goto release_write_pending;
- } else {
+ else
status = count;
- }
return status;
release_write_pending:
@@ -523,6 +538,14 @@ static int hidg_setup(struct usb_function *f,
goto respond;
break;
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_GET_IDLE):
+ VDBG(cdev, "get_idle\n");
+ length = min_t(unsigned int, length, 1);
+ ((u8 *) req->buf)[0] = hidg->idle;
+ goto respond;
+ break;
+
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_SET_REPORT):
VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength);
@@ -546,6 +569,14 @@ static int hidg_setup(struct usb_function *f,
goto stall;
break;
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_SET_IDLE):
+ VDBG(cdev, "set_idle\n");
+ length = 0;
+ hidg->idle = value >> 8;
+ goto respond;
+ break;
+
case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
| USB_REQ_GET_DESCRIPTOR):
switch (value >> 8) {
@@ -773,6 +804,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
hidg->protocol = HID_REPORT_PROTOCOL;
+ hidg->idle = 1;
hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_ss_in_comp_desc.wBytesPerInterval =
cpu_to_le16(hidg->report_length);
@@ -1118,7 +1150,7 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
hidg->func.setup = hidg_setup;
hidg->func.free_func = hidg_free;
- /* this could me made configurable at some point */
+ /* this could be made configurable at some point */
hidg->qlen = 4;
return &hidg->func;
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 59d382fe1bbf..abec5c58f525 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -667,8 +667,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
spin_lock(&dev->lock);
if (value) {
- list_del(&req->list);
- list_add(&req->list, &dev->tx_reqs);
+ list_move(&req->list, &dev->tx_reqs);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 7aa4c8bc5a1a..ae29ff2b2b68 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -44,6 +44,7 @@
#define EPIN_EN(_opts) ((_opts)->p_chmask != 0)
#define EPOUT_EN(_opts) ((_opts)->c_chmask != 0)
+#define EPOUT_FBACK_IN_EN(_opts) ((_opts)->c_sync == USB_ENDPOINT_SYNC_ASYNC)
struct f_uac2 {
struct g_audio g_audio;
@@ -273,7 +274,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
+ /* .bmAttributes = DYNAMIC */
/* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -282,7 +283,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
- .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
+ /* .bmAttributes = DYNAMIC */
/* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -292,7 +293,7 @@ static struct usb_endpoint_descriptor ss_epout_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
+ /* .bmAttributes = DYNAMIC */
/* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -317,6 +318,37 @@ static struct uac2_iso_endpoint_descriptor as_iso_out_desc = {
.wLockDelay = 0,
};
+/* STD AS ISO IN Feedback Endpoint */
+static struct usb_endpoint_descriptor fs_epin_fback_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_USAGE_FEEDBACK,
+ .wMaxPacketSize = cpu_to_le16(3),
+ .bInterval = 1,
+};
+
+static struct usb_endpoint_descriptor hs_epin_fback_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_USAGE_FEEDBACK,
+ .wMaxPacketSize = cpu_to_le16(4),
+ .bInterval = 4,
+};
+
+static struct usb_endpoint_descriptor ss_epin_fback_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_USAGE_FEEDBACK,
+ .wMaxPacketSize = cpu_to_le16(4),
+ .bInterval = 4,
+};
+
+
/* Audio Streaming IN Interface - Alt0 */
static struct usb_interface_descriptor std_as_in_if0_desc = {
.bLength = sizeof std_as_in_if0_desc,
@@ -431,6 +463,7 @@ static struct usb_descriptor_header *fs_audio_desc[] = {
(struct usb_descriptor_header *)&as_out_fmt1_desc,
(struct usb_descriptor_header *)&fs_epout_desc,
(struct usb_descriptor_header *)&as_iso_out_desc,
+ (struct usb_descriptor_header *)&fs_epin_fback_desc,
(struct usb_descriptor_header *)&std_as_in_if0_desc,
(struct usb_descriptor_header *)&std_as_in_if1_desc,
@@ -461,6 +494,7 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
(struct usb_descriptor_header *)&as_out_fmt1_desc,
(struct usb_descriptor_header *)&hs_epout_desc,
(struct usb_descriptor_header *)&as_iso_out_desc,
+ (struct usb_descriptor_header *)&hs_epin_fback_desc,
(struct usb_descriptor_header *)&std_as_in_if0_desc,
(struct usb_descriptor_header *)&std_as_in_if1_desc,
@@ -492,6 +526,7 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
(struct usb_descriptor_header *)&ss_epout_desc,
(struct usb_descriptor_header *)&ss_epout_desc_comp,
(struct usb_descriptor_header *)&as_iso_out_desc,
+ (struct usb_descriptor_header *)&ss_epin_fback_desc,
(struct usb_descriptor_header *)&std_as_in_if0_desc,
(struct usb_descriptor_header *)&std_as_in_if1_desc,
@@ -549,8 +584,11 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
ssize = uac2_opts->c_ssize;
}
+ if (!is_playback && (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC))
+ srate = srate * (1000 + uac2_opts->fb_max) / 1000;
+
max_size_bw = num_channels(chmask) * ssize *
- ((srate / (factor / (1 << (ep_desc->bInterval - 1)))) + 1);
+ DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
max_size_ep));
@@ -568,22 +606,26 @@ static void setup_headers(struct f_uac2_opts *opts,
struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
struct usb_endpoint_descriptor *epout_desc;
struct usb_endpoint_descriptor *epin_desc;
+ struct usb_endpoint_descriptor *epin_fback_desc;
int i;
switch (speed) {
case USB_SPEED_FULL:
epout_desc = &fs_epout_desc;
epin_desc = &fs_epin_desc;
+ epin_fback_desc = &fs_epin_fback_desc;
break;
case USB_SPEED_HIGH:
epout_desc = &hs_epout_desc;
epin_desc = &hs_epin_desc;
+ epin_fback_desc = &hs_epin_fback_desc;
break;
default:
epout_desc = &ss_epout_desc;
epin_desc = &ss_epin_desc;
epout_desc_comp = &ss_epout_desc_comp;
epin_desc_comp = &ss_epin_desc_comp;
+ epin_fback_desc = &ss_epin_fback_desc;
}
i = 0;
@@ -611,6 +653,9 @@ static void setup_headers(struct f_uac2_opts *opts,
headers[i++] = USBDHDR(epout_desc_comp);
headers[i++] = USBDHDR(&as_iso_out_desc);
+
+ if (EPOUT_FBACK_IN_EN(opts))
+ headers[i++] = USBDHDR(epin_fback_desc);
}
if (EPIN_EN(opts)) {
headers[i++] = USBDHDR(&std_as_in_if0_desc);
@@ -781,6 +826,23 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
std_as_out_if1_desc.bInterfaceNumber = ret;
uac2->as_out_intf = ret;
uac2->as_out_alt = 0;
+
+ if (EPOUT_FBACK_IN_EN(uac2_opts)) {
+ fs_epout_desc.bmAttributes =
+ USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC;
+ hs_epout_desc.bmAttributes =
+ USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC;
+ ss_epout_desc.bmAttributes =
+ USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC;
+ std_as_out_if1_desc.bNumEndpoints++;
+ } else {
+ fs_epout_desc.bmAttributes =
+ USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ADAPTIVE;
+ hs_epout_desc.bmAttributes =
+ USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ADAPTIVE;
+ ss_epout_desc.bmAttributes =
+ USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ADAPTIVE;
+ }
}
if (EPIN_EN(uac2_opts)) {
@@ -844,6 +906,15 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return -ENODEV;
}
+ if (EPOUT_FBACK_IN_EN(uac2_opts)) {
+ agdev->in_ep_fback = usb_ep_autoconfig(gadget,
+ &fs_epin_fback_desc);
+ if (!agdev->in_ep_fback) {
+ dev_err(dev, "%s:%d Error!\n",
+ __func__, __LINE__);
+ return -ENODEV;
+ }
+ }
}
if (EPIN_EN(uac2_opts)) {
@@ -867,8 +938,10 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
le16_to_cpu(ss_epout_desc.wMaxPacketSize));
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
+ hs_epin_fback_desc.bEndpointAddress = fs_epin_fback_desc.bEndpointAddress;
hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
ss_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
+ ss_epin_fback_desc.bEndpointAddress = fs_epin_fback_desc.bEndpointAddress;
ss_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
setup_descriptor(uac2_opts);
@@ -887,6 +960,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
agdev->params.c_srate = uac2_opts->c_srate;
agdev->params.c_ssize = uac2_opts->c_ssize;
agdev->params.req_number = uac2_opts->req_number;
+ agdev->params.fb_max = uac2_opts->fb_max;
ret = g_audio_setup(agdev, "UAC2 PCM", "UAC2_Gadget");
if (ret)
goto err_free_descs;
@@ -1195,13 +1269,71 @@ end: \
\
CONFIGFS_ATTR(f_uac2_opts_, name)
+#define UAC2_ATTRIBUTE_SYNC(name) \
+static ssize_t f_uac2_opts_##name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct f_uac2_opts *opts = to_f_uac2_opts(item); \
+ int result; \
+ char *str; \
+ \
+ mutex_lock(&opts->lock); \
+ switch (opts->name) { \
+ case USB_ENDPOINT_SYNC_ASYNC: \
+ str = "async"; \
+ break; \
+ case USB_ENDPOINT_SYNC_ADAPTIVE: \
+ str = "adaptive"; \
+ break; \
+ default: \
+ str = "unknown"; \
+ break; \
+ } \
+ result = sprintf(page, "%s\n", str); \
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+} \
+ \
+static ssize_t f_uac2_opts_##name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct f_uac2_opts *opts = to_f_uac2_opts(item); \
+ int ret = 0; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ if (!strncmp(page, "async", 5)) \
+ opts->name = USB_ENDPOINT_SYNC_ASYNC; \
+ else if (!strncmp(page, "adaptive", 8)) \
+ opts->name = USB_ENDPOINT_SYNC_ADAPTIVE; \
+ else { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ \
+ ret = len; \
+ \
+end: \
+ mutex_unlock(&opts->lock); \
+ return ret; \
+} \
+ \
+CONFIGFS_ATTR(f_uac2_opts_, name)
+
UAC2_ATTRIBUTE(p_chmask);
UAC2_ATTRIBUTE(p_srate);
UAC2_ATTRIBUTE(p_ssize);
UAC2_ATTRIBUTE(c_chmask);
UAC2_ATTRIBUTE(c_srate);
+UAC2_ATTRIBUTE_SYNC(c_sync);
UAC2_ATTRIBUTE(c_ssize);
UAC2_ATTRIBUTE(req_number);
+UAC2_ATTRIBUTE(fb_max);
static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_p_chmask,
@@ -1210,7 +1342,9 @@ static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_c_chmask,
&f_uac2_opts_attr_c_srate,
&f_uac2_opts_attr_c_ssize,
+ &f_uac2_opts_attr_c_sync,
&f_uac2_opts_attr_req_number,
+ &f_uac2_opts_attr_fb_max,
NULL,
};
@@ -1248,7 +1382,9 @@ static struct usb_function_instance *afunc_alloc_inst(void)
opts->c_chmask = UAC2_DEF_CCHMASK;
opts->c_srate = UAC2_DEF_CSRATE;
opts->c_ssize = UAC2_DEF_CSSIZE;
+ opts->c_sync = UAC2_DEF_CSYNC;
opts->req_number = UAC2_DEF_REQ_NUM;
+ opts->fb_max = UAC2_DEF_FB_MAX;
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 5fbceee897a3..018dd0978995 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -16,6 +16,7 @@
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
+#include <sound/control.h>
#include "u_audio.h"
@@ -35,9 +36,13 @@ struct uac_rtd_params {
void *rbuf;
+ unsigned int pitch; /* Stream pitch ratio to 1000000 */
unsigned int max_psize; /* MaxPacketSize of endpoint */
struct usb_request **reqs;
+
+ struct usb_request *req_fback; /* Feedback endpoint request */
+ bool fb_ep_enabled; /* if the ep is enabled */
};
struct snd_uac_chip {
@@ -70,6 +75,48 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
.periods_min = MIN_PERIODS,
};
+static void u_audio_set_fback_frequency(enum usb_device_speed speed,
+ unsigned long long freq,
+ unsigned int pitch,
+ void *buf)
+{
+ u32 ff = 0;
+
+ /*
+ * Because the pitch base is 1000000, the final divider here
+ * will be 1000 * 1000000 = 1953125 << 9
+ *
+ * Instead of dealing with big numbers lets fold this 9 left shift
+ */
+
+ if (speed == USB_SPEED_FULL) {
+ /*
+ * Full-speed feedback endpoints report frequency
+ * in samples/frame
+ * Format is encoded in Q10.10 left-justified in the 24 bits,
+ * so that it has a Q10.14 format.
+ *
+ * ff = (freq << 14) / 1000
+ */
+ freq <<= 5;
+ } else {
+ /*
+ * High-speed feedback endpoints report frequency
+ * in samples/microframe.
+ * Format is encoded in Q12.13 fitted into four bytes so that
+ * the binary point is located between the second and the third
+ * byte fromat (that is Q16.16)
+ *
+ * ff = (freq << 16) / 8000
+ */
+ freq <<= 4;
+ }
+
+ ff = DIV_ROUND_CLOSEST_ULL((freq * pitch), 1953125);
+
+ *(__le32 *)buf = cpu_to_le32(ff);
+}
+
static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
{
unsigned int pending;
@@ -173,6 +220,35 @@ exit:
dev_err(uac->card->dev, "%d Error!\n", __LINE__);
}
+static void u_audio_iso_fback_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct uac_rtd_params *prm = req->context;
+ struct snd_uac_chip *uac = prm->uac;
+ struct g_audio *audio_dev = uac->audio_dev;
+ struct uac_params *params = &audio_dev->params;
+ int status = req->status;
+
+ /* i/f shutting down */
+ if (!prm->fb_ep_enabled || req->status == -ESHUTDOWN)
+ return;
+
+ /*
+ * We can't really do much about bad xfers.
+ * Afterall, the ISOCH xfers could fail legitimately.
+ */
+ if (status)
+ pr_debug("%s: iso_complete status(%d) %d/%d\n",
+ __func__, status, req->actual, req->length);
+
+ u_audio_set_fback_frequency(audio_dev->gadget->speed,
+ params->c_srate, prm->pitch,
+ req->buf);
+
+ if (usb_ep_queue(ep, req, GFP_ATOMIC))
+ dev_err(uac->card->dev, "%d Error!\n", __LINE__);
+}
+
static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
@@ -335,14 +411,33 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
}
+static inline void free_ep_fback(struct uac_rtd_params *prm, struct usb_ep *ep)
+{
+ struct snd_uac_chip *uac = prm->uac;
+
+ if (!prm->fb_ep_enabled)
+ return;
+
+ prm->fb_ep_enabled = false;
+
+ if (prm->req_fback) {
+ usb_ep_dequeue(ep, prm->req_fback);
+ kfree(prm->req_fback->buf);
+ usb_ep_free_request(ep, prm->req_fback);
+ prm->req_fback = NULL;
+ }
+
+ if (usb_ep_disable(ep))
+ dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
+}
int u_audio_start_capture(struct g_audio *audio_dev)
{
struct snd_uac_chip *uac = audio_dev->uac;
struct usb_gadget *gadget = audio_dev->gadget;
struct device *dev = &gadget->dev;
- struct usb_request *req;
- struct usb_ep *ep;
+ struct usb_request *req, *req_fback;
+ struct usb_ep *ep, *ep_fback;
struct uac_rtd_params *prm;
struct uac_params *params = &audio_dev->params;
int req_len, i;
@@ -374,6 +469,43 @@ int u_audio_start_capture(struct g_audio *audio_dev)
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
}
+ ep_fback = audio_dev->in_ep_fback;
+ if (!ep_fback)
+ return 0;
+
+ /* Setup feedback endpoint */
+ config_ep_by_speed(gadget, &audio_dev->func, ep_fback);
+ prm->fb_ep_enabled = true;
+ usb_ep_enable(ep_fback);
+ req_len = ep_fback->maxpacket;
+
+ req_fback = usb_ep_alloc_request(ep_fback, GFP_ATOMIC);
+ if (req_fback == NULL)
+ return -ENOMEM;
+
+ prm->req_fback = req_fback;
+ req_fback->zero = 0;
+ req_fback->context = prm;
+ req_fback->length = req_len;
+ req_fback->complete = u_audio_iso_fback_complete;
+
+ req_fback->buf = kzalloc(req_len, GFP_ATOMIC);
+ if (!req_fback->buf)
+ return -ENOMEM;
+
+ /*
+ * Configure the feedback endpoint's reported frequency.
+ * Always start with original frequency since its deviation can't
+ * be meauserd at start of playback
+ */
+ prm->pitch = 1000000;
+ u_audio_set_fback_frequency(audio_dev->gadget->speed,
+ params->c_srate, prm->pitch,
+ req_fback->buf);
+
+ if (usb_ep_queue(ep_fback, req_fback, GFP_ATOMIC))
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+
return 0;
}
EXPORT_SYMBOL_GPL(u_audio_start_capture);
@@ -382,6 +514,8 @@ void u_audio_stop_capture(struct g_audio *audio_dev)
{
struct snd_uac_chip *uac = audio_dev->uac;
+ if (audio_dev->in_ep_fback)
+ free_ep_fback(&uac->c_prm, audio_dev->in_ep_fback);
free_ep(&uac->c_prm, audio_dev->out_ep);
}
EXPORT_SYMBOL_GPL(u_audio_stop_capture);
@@ -463,12 +597,82 @@ void u_audio_stop_playback(struct g_audio *audio_dev)
}
EXPORT_SYMBOL_GPL(u_audio_stop_playback);
+static int u_audio_pitch_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
+ struct snd_uac_chip *uac = prm->uac;
+ struct g_audio *audio_dev = uac->audio_dev;
+ struct uac_params *params = &audio_dev->params;
+ unsigned int pitch_min, pitch_max;
+
+ pitch_min = (1000 - FBACK_SLOW_MAX) * 1000;
+ pitch_max = (1000 + params->fb_max) * 1000;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = pitch_min;
+ uinfo->value.integer.max = pitch_max;
+ uinfo->value.integer.step = 1;
+ return 0;
+}
+
+static int u_audio_pitch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = prm->pitch;
+
+ return 0;
+}
+
+static int u_audio_pitch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct uac_rtd_params *prm = snd_kcontrol_chip(kcontrol);
+ struct snd_uac_chip *uac = prm->uac;
+ struct g_audio *audio_dev = uac->audio_dev;
+ struct uac_params *params = &audio_dev->params;
+ unsigned int val;
+ unsigned int pitch_min, pitch_max;
+ int change = 0;
+
+ pitch_min = (1000 - FBACK_SLOW_MAX) * 1000;
+ pitch_max = (1000 + params->fb_max) * 1000;
+
+ val = ucontrol->value.integer.value[0];
+
+ if (val < pitch_min)
+ val = pitch_min;
+ if (val > pitch_max)
+ val = pitch_max;
+
+ if (prm->pitch != val) {
+ prm->pitch = val;
+ change = 1;
+ }
+
+ return change;
+}
+
+static const struct snd_kcontrol_new u_audio_controls[] = {
+{
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "Capture Pitch 1000000",
+ .info = u_audio_pitch_info,
+ .get = u_audio_pitch_get,
+ .put = u_audio_pitch_put,
+},
+};
+
int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
const char *card_name)
{
struct snd_uac_chip *uac;
struct snd_card *card;
struct snd_pcm *pcm;
+ struct snd_kcontrol *kctl;
struct uac_params *params;
int p_chmask, c_chmask;
int err;
@@ -556,6 +760,23 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
+ if (c_chmask && g_audio->in_ep_fback) {
+ strscpy(card->mixername, card_name, sizeof(card->driver));
+
+ kctl = snd_ctl_new1(&u_audio_controls[0], &uac->c_prm);
+ if (!kctl) {
+ err = -ENOMEM;
+ goto snd_fail;
+ }
+
+ kctl->id.device = pcm->device;
+ kctl->id.subdevice = 0;
+
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
+ goto snd_fail;
+ }
+
strscpy(card->driver, card_name, sizeof(card->driver));
strscpy(card->shortname, card_name, sizeof(card->shortname));
sprintf(card->longname, "%s %i", card_name, card->dev->id);
diff --git a/drivers/usb/gadget/function/u_audio.h b/drivers/usb/gadget/function/u_audio.h
index 5ea6b86f1fda..a218cdf771fe 100644
--- a/drivers/usb/gadget/function/u_audio.h
+++ b/drivers/usb/gadget/function/u_audio.h
@@ -11,6 +11,14 @@
#include <linux/usb/composite.h>
+/*
+ * Same maximum frequency deviation on the slower side as in
+ * sound/usb/endpoint.c. Value is expressed in per-mil deviation.
+ * The maximum deviation on the faster side will be provided as
+ * parameter, as it impacts the endpoint required bandwidth.
+ */
+#define FBACK_SLOW_MAX 250
+
struct uac_params {
/* playback */
int p_chmask; /* channel mask */
@@ -23,6 +31,7 @@ struct uac_params {
int c_ssize; /* sample size */
int req_number; /* number of preallocated requests */
+ int fb_max; /* upper frequency drift feedback limit per-mil */
};
struct g_audio {
@@ -30,7 +39,10 @@ struct g_audio {
struct usb_gadget *gadget;
struct usb_ep *in_ep;
+
struct usb_ep *out_ep;
+ /* feedback IN endpoint corresponding to out_ep */
+ struct usb_ep *in_ep_fback;
/* Max packet size for all in_ep possible speeds */
unsigned int in_ep_maxpsize;
diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h
index 84e6da302499..98d6af558c03 100644
--- a/drivers/usb/gadget/function/u_hid.h
+++ b/drivers/usb/gadget/function/u_hid.h
@@ -29,8 +29,8 @@ struct f_hid_opts {
* Protect the data form concurrent access by read/write
* and create symlink/remove symlink.
*/
- struct mutex lock;
- int refcnt;
+ struct mutex lock;
+ int refcnt;
};
int ghid_setup(struct usb_gadget *g, int count);
diff --git a/drivers/usb/gadget/function/u_midi.h b/drivers/usb/gadget/function/u_midi.h
index f6e14af7f566..2e400b495cb8 100644
--- a/drivers/usb/gadget/function/u_midi.h
+++ b/drivers/usb/gadget/function/u_midi.h
@@ -29,8 +29,8 @@ struct f_midi_opts {
* Protect the data form concurrent access by read/write
* and create symlink/remove symlink.
*/
- struct mutex lock;
- int refcnt;
+ struct mutex lock;
+ int refcnt;
};
#endif /* U_MIDI_H */
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 1e59204ec7aa..281ca766698a 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -774,34 +774,34 @@ static void gs_flush_chars(struct tty_struct *tty)
spin_unlock_irqrestore(&port->port_lock, flags);
}
-static int gs_write_room(struct tty_struct *tty)
+static unsigned int gs_write_room(struct tty_struct *tty)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
- int room = 0;
+ unsigned int room = 0;
spin_lock_irqsave(&port->port_lock, flags);
if (port->port_usb)
room = kfifo_avail(&port->port_write_buf);
spin_unlock_irqrestore(&port->port_lock, flags);
- pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
+ pr_vdebug("gs_write_room: (%d,%p) room=%u\n",
port->port_num, tty, room);
return room;
}
-static int gs_chars_in_buffer(struct tty_struct *tty)
+static unsigned int gs_chars_in_buffer(struct tty_struct *tty)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
- int chars = 0;
+ unsigned int chars;
spin_lock_irqsave(&port->port_lock, flags);
chars = kfifo_len(&port->port_write_buf);
spin_unlock_irqrestore(&port->port_lock, flags);
- pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
+ pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%u\n",
port->port_num, tty, chars);
return chars;
@@ -1198,7 +1198,7 @@ void gserial_free_line(unsigned char port_num)
struct gs_port *port;
mutex_lock(&ports[port_num].lock);
- if (WARN_ON(!ports[port_num].port)) {
+ if (!ports[port_num].port) {
mutex_unlock(&ports[port_num].lock);
return;
}
diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
index b5035711172d..179d3ef6a195 100644
--- a/drivers/usb/gadget/function/u_uac2.h
+++ b/drivers/usb/gadget/function/u_uac2.h
@@ -21,7 +21,9 @@
#define UAC2_DEF_CCHMASK 0x3
#define UAC2_DEF_CSRATE 64000
#define UAC2_DEF_CSSIZE 2
+#define UAC2_DEF_CSYNC USB_ENDPOINT_SYNC_ASYNC
#define UAC2_DEF_REQ_NUM 2
+#define UAC2_DEF_FB_MAX 5
struct f_uac2_opts {
struct usb_function_instance func_inst;
@@ -31,7 +33,9 @@ struct f_uac2_opts {
int c_chmask;
int c_srate;
int c_ssize;
+ int c_sync;
int req_number;
+ int fb_max;
bool bound;
struct mutex lock;
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index cd28dec837dd..77d64031aa9c 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -914,8 +914,6 @@ static int uvcg_streaming_header_allow_link(struct config_item *src,
target_fmt = container_of(to_config_group(target), struct uvcg_format,
group);
- if (!target_fmt)
- goto out;
uvcg_format_set_indices(to_config_group(target));
@@ -955,8 +953,6 @@ static void uvcg_streaming_header_drop_link(struct config_item *src,
mutex_lock(&opts->lock);
target_fmt = container_of(to_config_group(target), struct uvcg_format,
group);
- if (!target_fmt)
- goto out;
list_for_each_entry_safe(format_ptr, tmp, &src_hdr->formats, entry)
if (format_ptr->fmt == target_fmt) {
@@ -968,7 +964,6 @@ static void uvcg_streaming_header_drop_link(struct config_item *src,
--target_fmt->linked;
-out:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
}
diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
index c4eda7fe7ab4..5b27d289443f 100644
--- a/drivers/usb/gadget/legacy/hid.c
+++ b/drivers/usb/gadget/legacy/hid.c
@@ -171,8 +171,10 @@ static int hid_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto put;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index 9cd4a70ccdd6..a9f07c59fc37 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -288,7 +288,6 @@ struct bcm63xx_req {
* @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
* @ep0_reply: Pending reply from gadget driver.
* @ep0_request: Outstanding ep0 request.
- * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
*/
struct bcm63xx_udc {
spinlock_t lock;
@@ -327,8 +326,6 @@ struct bcm63xx_udc {
unsigned ep0_req_completed:1;
struct usb_request *ep0_reply;
struct usb_request *ep0_request;
-
- struct dentry *debugfs_root;
};
static const struct usb_ep_ops bcm63xx_udc_ep_ops;
@@ -2250,8 +2247,6 @@ static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
return;
root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
- udc->debugfs_root = root;
-
debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
}
@@ -2264,7 +2259,7 @@ static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
*/
static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
{
- debugfs_remove_recursive(udc->debugfs_root);
+ debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
}
/***********************************************************************
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 493ff93f7dda..b7f0b1ebaaa8 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1148,6 +1148,53 @@ static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
}
/**
+ * usb_gadget_enable_async_callbacks - tell usb device controller to enable asynchronous callbacks
+ * @udc: The UDC which should enable async callbacks
+ *
+ * This routine is used when binding gadget drivers. It undoes the effect
+ * of usb_gadget_disable_async_callbacks(); the UDC driver should enable IRQs
+ * (if necessary) and resume issuing callbacks.
+ *
+ * This routine will always be called in process context.
+ */
+static inline void usb_gadget_enable_async_callbacks(struct usb_udc *udc)
+{
+ struct usb_gadget *gadget = udc->gadget;
+
+ if (gadget->ops->udc_async_callbacks)
+ gadget->ops->udc_async_callbacks(gadget, true);
+}
+
+/**
+ * usb_gadget_disable_async_callbacks - tell usb device controller to disable asynchronous callbacks
+ * @udc: The UDC which should disable async callbacks
+ *
+ * This routine is used when unbinding gadget drivers. It prevents a race:
+ * The UDC driver doesn't know when the gadget driver's ->unbind callback
+ * runs, so unless it is told to disable asynchronous callbacks, it might
+ * issue a callback (such as ->disconnect) after the unbind has completed.
+ *
+ * After this function runs, the UDC driver must suppress all ->suspend,
+ * ->resume, ->disconnect, ->reset, and ->setup callbacks to the gadget driver
+ * until async callbacks are again enabled. A simple-minded but effective
+ * way to accomplish this is to tell the UDC hardware not to generate any
+ * more IRQs.
+ *
+ * Request completion callbacks must still be issued. However, it's okay
+ * to defer them until the request is cancelled, since the pull-up will be
+ * turned off during the time period when async callbacks are disabled.
+ *
+ * This routine will always be called in process context.
+ */
+static inline void usb_gadget_disable_async_callbacks(struct usb_udc *udc)
+{
+ struct usb_gadget *gadget = udc->gadget;
+
+ if (gadget->ops->udc_async_callbacks)
+ gadget->ops->udc_async_callbacks(gadget, false);
+}
+
+/**
* usb_udc_release - release the usb_udc struct
* @dev: the dev member within usb_udc
*
@@ -1361,6 +1408,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
usb_gadget_disconnect(udc->gadget);
+ usb_gadget_disable_async_callbacks(udc);
if (udc->gadget->irq)
synchronize_irq(udc->gadget->irq);
udc->driver->unbind(udc->gadget);
@@ -1442,6 +1490,7 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
driver->unbind(udc->gadget);
goto err1;
}
+ usb_gadget_enable_async_callbacks(udc);
usb_udc_connect_control(udc);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 7db773c87379..a2d956af42a2 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -934,6 +934,15 @@ static void dummy_udc_set_speed(struct usb_gadget *_gadget,
dummy_udc_update_ep0(dum);
}
+static void dummy_udc_async_callbacks(struct usb_gadget *_gadget, bool enable)
+{
+ struct dummy *dum = gadget_dev_to_dummy(&_gadget->dev);
+
+ spin_lock_irq(&dum->lock);
+ dum->ints_enabled = enable;
+ spin_unlock_irq(&dum->lock);
+}
+
static int dummy_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int dummy_udc_stop(struct usb_gadget *g);
@@ -946,6 +955,7 @@ static const struct usb_gadget_ops dummy_ops = {
.udc_start = dummy_udc_start,
.udc_stop = dummy_udc_stop,
.udc_set_speed = dummy_udc_set_speed,
+ .udc_async_callbacks = dummy_udc_async_callbacks,
};
/*-------------------------------------------------------------------------*/
@@ -1005,7 +1015,6 @@ static int dummy_udc_start(struct usb_gadget *g,
spin_lock_irq(&dum->lock);
dum->devstatus = 0;
dum->driver = driver;
- dum->ints_enabled = 1;
spin_unlock_irq(&dum->lock);
return 0;
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index fa66449b3907..15db7a3868fe 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -541,6 +541,7 @@ static int qe_ep_init(struct qe_udc *udc,
case USB_SPEED_HIGH:
if ((max == 128) || (max == 256) || (max == 512))
break;
+ fallthrough;
default:
switch (max) {
case 4:
@@ -562,9 +563,11 @@ static int qe_ep_init(struct qe_udc *udc,
case USB_SPEED_HIGH:
if (max <= 1024)
break;
+ fallthrough;
case USB_SPEED_FULL:
if (max <= 64)
break;
+ fallthrough;
default:
if (max <= 8)
break;
@@ -579,9 +582,11 @@ static int qe_ep_init(struct qe_udc *udc,
case USB_SPEED_HIGH:
if (max <= 1024)
break;
+ fallthrough;
case USB_SPEED_FULL:
if (max <= 1023)
break;
+ fallthrough;
default:
goto en_done;
}
@@ -605,6 +610,7 @@ static int qe_ep_init(struct qe_udc *udc,
default:
goto en_done;
}
+ fallthrough;
case USB_SPEED_LOW:
switch (max) {
case 1:
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index ad6ff9c4188e..29fcb9b461d7 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -36,7 +36,6 @@
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include <linux/dmapool.h>
-#include <linux/delay.h>
#include <linux/of_device.h>
#include <asm/byteorder.h>
@@ -323,13 +322,11 @@ static int dr_controller_setup(struct fsl_udc *udc)
fsl_writel(tmp, &dr_regs->endptctrl[ep_num]);
}
/* Config control enable i/o output, cpu endian register */
-#ifndef CONFIG_ARCH_MXC
if (udc->pdata->have_sysif_regs) {
ctrl = __raw_readl(&usb_sys_regs->control);
ctrl |= USB_CTRL_IOENB;
__raw_writel(ctrl, &usb_sys_regs->control);
}
-#endif
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
/* Turn on cache snooping hardware, since some PowerPC platforms
@@ -547,7 +544,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
unsigned short max = 0;
unsigned char mult = 0, zlt;
int retval = -EINVAL;
- unsigned long flags = 0;
+ unsigned long flags;
ep = container_of(_ep, struct fsl_ep, ep);
@@ -631,7 +628,7 @@ static int fsl_ep_disable(struct usb_ep *_ep)
{
struct fsl_udc *udc = NULL;
struct fsl_ep *ep = NULL;
- unsigned long flags = 0;
+ unsigned long flags;
u32 epctrl;
int ep_num;
@@ -1001,7 +998,7 @@ out: epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
{
struct fsl_ep *ep = NULL;
- unsigned long flags = 0;
+ unsigned long flags;
int status = -EOPNOTSUPP; /* operation not supported */
unsigned char ep_dir = 0, ep_num = 0;
struct fsl_udc *udc = NULL;
@@ -1938,7 +1935,7 @@ static int fsl_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
int retval = 0;
- unsigned long flags = 0;
+ unsigned long flags;
/* lock is needed but whether should use this lock or another */
spin_lock_irqsave(&udc_controller->lock, flags);
@@ -2153,7 +2150,6 @@ static int fsl_proc_read(struct seq_file *m, void *v)
tmp_reg = fsl_readl(&dr_regs->endpointprime);
seq_printf(m, "EP Prime Reg = [0x%x]\n\n", tmp_reg);
-#ifndef CONFIG_ARCH_MXC
if (udc->pdata->have_sysif_regs) {
tmp_reg = usb_sys_regs->snoop1;
seq_printf(m, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
@@ -2161,7 +2157,6 @@ static int fsl_proc_read(struct seq_file *m, void *v)
tmp_reg = usb_sys_regs->control;
seq_printf(m, "General Control Reg : = [0x%x]\n\n", tmp_reg);
}
-#endif
/* ------fsl_udc, fsl_ep, fsl_request structure information ----- */
ep = &udc->eps[0];
@@ -2412,28 +2407,21 @@ static int fsl_udc_probe(struct platform_device *pdev)
*/
if (pdata->init && pdata->init(pdev)) {
ret = -ENODEV;
- goto err_iounmap_noclk;
+ goto err_iounmap;
}
/* Set accessors only after pdata->init() ! */
fsl_set_accessors(pdata);
-#ifndef CONFIG_ARCH_MXC
if (pdata->have_sysif_regs)
usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
-#endif
-
- /* Initialize USB clocks */
- ret = fsl_udc_clk_init(pdev);
- if (ret < 0)
- goto err_iounmap_noclk;
/* Read Device Controller Capability Parameters register */
dccparams = fsl_readl(&dr_regs->dccparams);
if (!(dccparams & DCCPARAMS_DC)) {
ERR("This SOC doesn't support device role\n");
ret = -ENODEV;
- goto err_iounmap;
+ goto err_exit;
}
/* Get max device endpoints */
/* DEN is bidirectional ep number, max_ep doubles the number */
@@ -2442,7 +2430,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
ret = ret ? : -ENODEV;
- goto err_iounmap;
+ goto err_exit;
}
udc_controller->irq = ret;
@@ -2451,7 +2439,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
if (ret != 0) {
ERR("cannot request irq %d err %d\n",
udc_controller->irq, ret);
- goto err_iounmap;
+ goto err_exit;
}
/* Initialize the udc structure including QH member and other member */
@@ -2467,10 +2455,6 @@ static int fsl_udc_probe(struct platform_device *pdev)
dr_controller_setup(udc_controller);
}
- ret = fsl_udc_clk_finalize(pdev);
- if (ret)
- goto err_free_irq;
-
/* Setup gadget structure */
udc_controller->gadget.ops = &fsl_gadget_ops;
udc_controller->gadget.max_speed = USB_SPEED_HIGH;
@@ -2530,11 +2514,10 @@ err_del_udc:
dma_pool_destroy(udc_controller->td_pool);
err_free_irq:
free_irq(udc_controller->irq, udc_controller);
-err_iounmap:
+err_exit:
if (pdata->exit)
pdata->exit(pdev);
- fsl_udc_clk_release();
-err_iounmap_noclk:
+err_iounmap:
iounmap(dr_regs);
err_release_mem_region:
if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
@@ -2561,8 +2544,6 @@ static int fsl_udc_remove(struct platform_device *pdev)
udc_controller->done = &done;
usb_del_gadget_udc(&udc_controller->gadget);
- fsl_udc_clk_release();
-
/* DR has been stopped in usb_gadget_unregister_driver() */
remove_proc_file();
@@ -2677,10 +2658,6 @@ static int fsl_udc_otg_resume(struct device *dev)
--------------------------------------------------------------------------*/
static const struct platform_device_id fsl_udc_devtype[] = {
{
- .name = "imx-udc-mx27",
- }, {
- .name = "imx-udc-mx51",
- }, {
.name = "fsl-usb2-udc",
}, {
/* sentinel */
@@ -2689,7 +2666,6 @@ static const struct platform_device_id fsl_udc_devtype[] = {
MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
static struct platform_driver udc_driver = {
.remove = fsl_udc_remove,
- /* Just for FSL i.mx SoC currently */
.id_table = fsl_udc_devtype,
/* these suspend and resume are not usb suspend and resume */
.suspend = fsl_udc_suspend,
diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h
index 4ba651ae9048..2efc5a930b48 100644
--- a/drivers/usb/gadget/udc/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
@@ -588,23 +588,4 @@ static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
USB_DIR_IN) ? 1 : 0];
}
-struct platform_device;
-#ifdef CONFIG_ARCH_MXC
-int fsl_udc_clk_init(struct platform_device *pdev);
-int fsl_udc_clk_finalize(struct platform_device *pdev);
-void fsl_udc_clk_release(void);
-#else
-static inline int fsl_udc_clk_init(struct platform_device *pdev)
-{
- return 0;
-}
-static inline int fsl_udc_clk_finalize(struct platform_device *pdev)
-{
- return 0;
-}
-static inline void fsl_udc_clk_release(void)
-{
-}
-#endif
-
#endif
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index f8f3aa52383b..4b35739d3695 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -207,14 +207,15 @@ DEFINE_SHOW_ATTRIBUTE(gr_dfs);
static void gr_dfs_create(struct gr_udc *dev)
{
const char *name = "gr_udc_state";
+ struct dentry *root;
- dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root);
- debugfs_create_file(name, 0444, dev->dfs_root, dev, &gr_dfs_fops);
+ root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root);
+ debugfs_create_file(name, 0444, root, dev, &gr_dfs_fops);
}
static void gr_dfs_delete(struct gr_udc *dev)
{
- debugfs_remove_recursive(dev->dfs_root);
+ debugfs_remove(debugfs_lookup(dev_name(dev->dev), usb_debug_root));
}
#else /* !CONFIG_USB_GADGET_DEBUG_FS */
diff --git a/drivers/usb/gadget/udc/gr_udc.h b/drivers/usb/gadget/udc/gr_udc.h
index ac5b3f65adb5..70134239179e 100644
--- a/drivers/usb/gadget/udc/gr_udc.h
+++ b/drivers/usb/gadget/udc/gr_udc.h
@@ -215,8 +215,6 @@ struct gr_udc {
struct list_head ep_list;
spinlock_t lock; /* General lock, a.k.a. "dev->lock" in comments */
-
- struct dentry *dfs_root;
};
#define to_gr_udc(gadget) (container_of((gadget), struct gr_udc, gadget))
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 3f1c62adce4b..a25d01c89564 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -127,7 +127,6 @@ struct lpc32xx_udc {
struct usb_gadget_driver *driver;
struct platform_device *pdev;
struct device *dev;
- struct dentry *pde;
spinlock_t lock;
struct i2c_client *isp1301_i2c_client;
@@ -528,12 +527,12 @@ DEFINE_SHOW_ATTRIBUTE(udc);
static void create_debug_file(struct lpc32xx_udc *udc)
{
- udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &udc_fops);
+ debugfs_create_file(debug_filename, 0, NULL, udc, &udc_fops);
}
static void remove_debug_file(struct lpc32xx_udc *udc)
{
- debugfs_remove(udc->pde);
+ debugfs_remove(debugfs_lookup(debug_filename, NULL));
}
#else
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index 34f4db554977..d2a2b20cc1ad 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -1255,12 +1255,14 @@ static int max3420_probe(struct spi_device *spi)
err = devm_request_irq(&spi->dev, irq, max3420_irq_handler, 0,
"max3420", udc);
if (err < 0)
- return err;
+ goto del_gadget;
udc->thread_task = kthread_create(max3420_thread, udc,
"max3420-thread");
- if (IS_ERR(udc->thread_task))
- return PTR_ERR(udc->thread_task);
+ if (IS_ERR(udc->thread_task)) {
+ err = PTR_ERR(udc->thread_task);
+ goto del_gadget;
+ }
irq = of_irq_get_byname(spi->dev.of_node, "vbus");
if (irq <= 0) { /* no vbus irq implies self-powered design */
@@ -1280,10 +1282,14 @@ static int max3420_probe(struct spi_device *spi)
err = devm_request_irq(&spi->dev, irq,
max3420_vbus_handler, 0, "vbus", udc);
if (err < 0)
- return err;
+ goto del_gadget;
}
return 0;
+
+del_gadget:
+ usb_del_gadget_udc(&udc->gadget);
+ return err;
}
static int max3420_remove(struct spi_device *spi)
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 5486f5a70868..ce3d7a3eb7e3 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -941,7 +941,7 @@ mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall)
static int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
{
struct mv_u3d_ep *ep;
- unsigned long flags = 0;
+ unsigned long flags;
int status = 0;
struct mv_u3d *u3d;
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 0fb4ef464321..7f24ce400b59 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -888,7 +888,7 @@ static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
{
struct mv_ep *ep;
- unsigned long flags = 0;
+ unsigned long flags;
int status = 0;
struct mv_udc *udc;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 89f479b78d80..7c38057dcb4a 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -1150,6 +1150,7 @@ net2272_pullup(struct usb_gadget *_gadget, int is_on)
static int net2272_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver);
static int net2272_stop(struct usb_gadget *_gadget);
+static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable);
static const struct usb_gadget_ops net2272_ops = {
.get_frame = net2272_get_frame,
@@ -1158,6 +1159,7 @@ static const struct usb_gadget_ops net2272_ops = {
.pullup = net2272_pullup,
.udc_start = net2272_start,
.udc_stop = net2272_stop,
+ .udc_async_callbacks = net2272_async_callbacks,
};
/*---------------------------------------------------------------------------*/
@@ -1476,7 +1478,7 @@ stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
net2272_dequeue_all(&dev->ep[i]);
/* report disconnect; the driver is already quiesced */
- if (driver) {
+ if (dev->async_callbacks && driver) {
spin_unlock(&dev->lock);
driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
@@ -1501,6 +1503,15 @@ static int net2272_stop(struct usb_gadget *_gadget)
return 0;
}
+static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable)
+{
+ struct net2272 *dev = container_of(_gadget, struct net2272, gadget);
+
+ spin_lock_irq(&dev->lock);
+ dev->async_callbacks = enable;
+ spin_unlock_irq(&dev->lock);
+}
+
/*---------------------------------------------------------------------------*/
/* handle ep-a/ep-b dma completions */
static void
@@ -1910,9 +1921,11 @@ net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
u.r.bRequestType, u.r.bRequest,
u.r.wValue, u.r.wIndex,
net2272_ep_read(ep, EP_CFG));
- spin_unlock(&dev->lock);
- tmp = dev->driver->setup(&dev->gadget, &u.r);
- spin_lock(&dev->lock);
+ if (dev->async_callbacks) {
+ spin_unlock(&dev->lock);
+ tmp = dev->driver->setup(&dev->gadget, &u.r);
+ spin_lock(&dev->lock);
+ }
}
/* stall ep0 on error */
@@ -1994,14 +2007,14 @@ net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
if (disconnect || reset) {
stop_activity(dev, dev->driver);
net2272_ep0_start(dev);
- spin_unlock(&dev->lock);
- if (reset)
- usb_gadget_udc_reset
- (&dev->gadget, dev->driver);
- else
- (dev->driver->disconnect)
- (&dev->gadget);
- spin_lock(&dev->lock);
+ if (dev->async_callbacks) {
+ spin_unlock(&dev->lock);
+ if (reset)
+ usb_gadget_udc_reset(&dev->gadget, dev->driver);
+ else
+ (dev->driver->disconnect)(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
return;
}
}
@@ -2015,14 +2028,14 @@ net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
if (stat & tmp) {
net2272_write(dev, IRQSTAT1, tmp);
if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
- if (dev->driver->suspend)
+ if (dev->async_callbacks && dev->driver->suspend)
dev->driver->suspend(&dev->gadget);
if (!enable_suspend) {
stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
}
} else {
- if (dev->driver->resume)
+ if (dev->async_callbacks && dev->driver->resume)
dev->driver->resume(&dev->gadget);
}
stat &= ~tmp;
diff --git a/drivers/usb/gadget/udc/net2272.h b/drivers/usb/gadget/udc/net2272.h
index c669308111c2..a9994f737588 100644
--- a/drivers/usb/gadget/udc/net2272.h
+++ b/drivers/usb/gadget/udc/net2272.h
@@ -442,6 +442,7 @@ struct net2272 {
softconnect:1,
wakeup:1,
added:1,
+ async_callbacks:1,
dma_eot_polarity:1,
dma_dack_polarity:1,
dma_dreq_polarity:1,
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index fc9f99fe7f37..16e7d2db6411 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1617,6 +1617,7 @@ static struct usb_ep *net2280_match_ep(struct usb_gadget *_gadget,
static int net2280_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver);
static int net2280_stop(struct usb_gadget *_gadget);
+static void net2280_async_callbacks(struct usb_gadget *_gadget, bool enable);
static const struct usb_gadget_ops net2280_ops = {
.get_frame = net2280_get_frame,
@@ -1625,6 +1626,7 @@ static const struct usb_gadget_ops net2280_ops = {
.pullup = net2280_pullup,
.udc_start = net2280_start,
.udc_stop = net2280_stop,
+ .udc_async_callbacks = net2280_async_callbacks,
.match_ep = net2280_match_ep,
};
@@ -2472,7 +2474,7 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
nuke(&dev->ep[i]);
/* report disconnect; the driver is already quiesced */
- if (driver) {
+ if (dev->async_callbacks && driver) {
spin_unlock(&dev->lock);
driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
@@ -2502,6 +2504,15 @@ static int net2280_stop(struct usb_gadget *_gadget)
return 0;
}
+static void net2280_async_callbacks(struct usb_gadget *_gadget, bool enable)
+{
+ struct net2280 *dev = container_of(_gadget, struct net2280, gadget);
+
+ spin_lock_irq(&dev->lock);
+ dev->async_callbacks = enable;
+ spin_unlock_irq(&dev->lock);
+}
+
/*-------------------------------------------------------------------------*/
/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
@@ -2814,8 +2825,6 @@ static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
* - Wait and try again.
*/
udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
-
- continue;
}
@@ -3042,9 +3051,11 @@ usb3_delegate:
readl(&ep->cfg->ep_cfg));
ep->responded = 0;
- spin_unlock(&dev->lock);
- tmp = dev->driver->setup(&dev->gadget, &r);
- spin_lock(&dev->lock);
+ if (dev->async_callbacks) {
+ spin_unlock(&dev->lock);
+ tmp = dev->driver->setup(&dev->gadget, &r);
+ spin_lock(&dev->lock);
+ }
}
do_stall3:
if (tmp < 0) {
@@ -3284,9 +3295,11 @@ delegate:
w_value, w_index, w_length,
readl(&ep->cfg->ep_cfg));
ep->responded = 0;
- spin_unlock(&dev->lock);
- tmp = dev->driver->setup(&dev->gadget, &u.r);
- spin_lock(&dev->lock);
+ if (dev->async_callbacks) {
+ spin_unlock(&dev->lock);
+ tmp = dev->driver->setup(&dev->gadget, &u.r);
+ spin_lock(&dev->lock);
+ }
}
/* stall ep0 on error */
@@ -3391,14 +3404,14 @@ __acquires(dev->lock)
if (disconnect || reset) {
stop_activity(dev, dev->driver);
ep0_start(dev);
- spin_unlock(&dev->lock);
- if (reset)
- usb_gadget_udc_reset
- (&dev->gadget, dev->driver);
- else
- (dev->driver->disconnect)
- (&dev->gadget);
- spin_lock(&dev->lock);
+ if (dev->async_callbacks) {
+ spin_unlock(&dev->lock);
+ if (reset)
+ usb_gadget_udc_reset(&dev->gadget, dev->driver);
+ else
+ (dev->driver->disconnect)(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
return;
}
}
@@ -3419,12 +3432,12 @@ __acquires(dev->lock)
writel(tmp, &dev->regs->irqstat1);
spin_unlock(&dev->lock);
if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
- if (dev->driver->suspend)
+ if (dev->async_callbacks && dev->driver->suspend)
dev->driver->suspend(&dev->gadget);
if (!enable_suspend)
stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
} else {
- if (dev->driver->resume)
+ if (dev->async_callbacks && dev->driver->resume)
dev->driver->resume(&dev->gadget);
/* at high speed, note erratum 0133 */
}
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 7da3dc1e9729..34716a9f4926 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -162,6 +162,7 @@ struct net2280 {
ltm_enable:1,
wakeup_enable:1,
addressed_state:1,
+ async_callbacks:1,
bug7734_patched:1;
u16 chiprev;
int enhanced_mode;
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index 10324a7334fe..69ef1e669d0c 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -1338,10 +1338,10 @@ DEFINE_SHOW_ATTRIBUTE(udc_debug);
#define create_debug_files(dev) \
do { \
- dev->debugfs_udc = debugfs_create_file(dev->gadget.name, \
+ debugfs_create_file(dev->gadget.name, \
S_IRUGO, NULL, dev, &udc_debug_fops); \
} while (0)
-#define remove_debug_files(dev) debugfs_remove(dev->debugfs_udc)
+#define remove_debug_files(dev) debugfs_remove(debugfs_lookup(dev->gadget.name, NULL))
#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h
index ccc6b921f067..aa4b68fd9fc0 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.h
+++ b/drivers/usb/gadget/udc/pxa25x_udc.h
@@ -116,10 +116,6 @@ struct pxa25x_udc {
struct usb_phy *transceiver;
u64 dma_mask;
struct pxa25x_ep ep [PXA_UDC_NUM_ENDPOINTS];
-
-#ifdef CONFIG_USB_GADGET_DEBUG_FS
- struct dentry *debugfs_udc;
-#endif
void __iomem *regs;
};
#define to_pxa25x(g) (container_of((g), struct pxa25x_udc, gadget))
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index ce57961dfd2d..f4b7a2a3e711 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -208,8 +208,6 @@ static void pxa_init_debugfs(struct pxa_udc *udc)
struct dentry *root;
root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
- udc->debugfs_root = root;
-
debugfs_create_file("udcstate", 0400, root, udc, &state_dbg_fops);
debugfs_create_file("queues", 0400, root, udc, &queues_dbg_fops);
debugfs_create_file("epstate", 0400, root, udc, &eps_dbg_fops);
@@ -217,7 +215,7 @@ static void pxa_init_debugfs(struct pxa_udc *udc)
static void pxa_cleanup_debugfs(struct pxa_udc *udc)
{
- debugfs_remove_recursive(udc->debugfs_root);
+ debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
}
#else
@@ -1730,7 +1728,7 @@ static void udc_enable(struct pxa_udc *udc)
}
/**
- * pxa27x_start - Register gadget driver
+ * pxa27x_udc_start - Register gadget driver
* @g: gadget
* @driver: gadget driver
*
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.h b/drivers/usb/gadget/udc/pxa27x_udc.h
index 13b2977399ab..0a6bc18a1264 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.h
+++ b/drivers/usb/gadget/udc/pxa27x_udc.h
@@ -440,7 +440,6 @@ struct udc_stats {
* @last_interface: UDC interface of the last SET_INTERFACE host request
* @last_alternate: UDC altsetting of the last SET_INTERFACE host request
* @udccsr0: save of udccsr0 in case of suspend
- * @debugfs_root: root entry of debug filesystem
* @debugfs_state: debugfs entry for "udcstate"
* @debugfs_queues: debugfs entry for "queues"
* @debugfs_eps: debugfs entry for "epstate"
@@ -474,9 +473,6 @@ struct pxa_udc {
#ifdef CONFIG_PM
unsigned udccsr0;
#endif
-#ifdef CONFIG_USB_GADGET_DEBUG_FS
- struct dentry *debugfs_root;
-#endif
};
#define to_pxa(g) (container_of((g), struct pxa_udc, gadget))
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index 7bd5182ce3ef..89f1f8c9f02e 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -1220,9 +1220,8 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
struct s3c24xx_hsudc_platdata *pd = dev_get_platdata(&pdev->dev);
int ret, i;
- hsudc = devm_kzalloc(&pdev->dev, sizeof(struct s3c_hsudc) +
- sizeof(struct s3c_hsudc_ep) * pd->epnum,
- GFP_KERNEL);
+ hsudc = devm_kzalloc(&pdev->dev, struct_size(hsudc, ep, pd->epnum),
+ GFP_KERNEL);
if (!hsudc)
return -ENOMEM;
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index b154b62abefa..179777cb699f 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -198,7 +198,7 @@ static inline void s3c2410_udc_set_ep0_de(void __iomem *base)
udc_writeb(base, S3C2410_UDC_EP0_CSR_DE, S3C2410_UDC_EP0_CSR_REG);
}
-inline void s3c2410_udc_set_ep0_ss(void __iomem *b)
+static inline void s3c2410_udc_set_ep0_ss(void __iomem *b)
{
udc_writeb(b, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
udc_writeb(b, S3C2410_UDC_EP0_CSR_SENDSTL, S3C2410_UDC_EP0_CSR_REG);
@@ -1843,9 +1843,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
if (retval)
goto err_add_udc;
- udc->regs_info = debugfs_create_file("registers", S_IRUGO,
- s3c2410_udc_debugfs_root, udc,
- &s3c2410_udc_debugfs_fops);
+ debugfs_create_file("registers", S_IRUGO, s3c2410_udc_debugfs_root, udc,
+ &s3c2410_udc_debugfs_fops);
dev_dbg(dev, "probe ok\n");
@@ -1889,7 +1888,7 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
return -EBUSY;
usb_del_gadget_udc(&udc->gadget);
- debugfs_remove(udc->regs_info);
+ debugfs_remove(debugfs_lookup("registers", s3c2410_udc_debugfs_root));
if (udc_info && !udc_info->udc_command &&
gpio_is_valid(udc_info->pullup_pin))
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.h b/drivers/usb/gadget/udc/s3c2410_udc.h
index 68bdf3e5aac2..135a5bff3c74 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.h
+++ b/drivers/usb/gadget/udc/s3c2410_udc.h
@@ -89,7 +89,6 @@ struct s3c2410_udc {
unsigned req_config : 1;
unsigned req_pending : 1;
u8 vbus;
- struct dentry *regs_info;
int irq;
};
#define to_s3c2410(g) (container_of((g), struct s3c2410_udc, gadget))
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index 2319c9737c2b..c0ca7144e512 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -1907,7 +1907,7 @@ static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep,
kfree(req);
}
-static struct usb_ep_ops tegra_xudc_ep_ops = {
+static const struct usb_ep_ops tegra_xudc_ep_ops = {
.enable = tegra_xudc_ep_enable,
.disable = tegra_xudc_ep_disable,
.alloc_request = tegra_xudc_ep_alloc_request,
@@ -1928,7 +1928,7 @@ static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep)
return -EBUSY;
}
-static struct usb_ep_ops tegra_xudc_ep0_ops = {
+static const struct usb_ep_ops tegra_xudc_ep0_ops = {
.enable = tegra_xudc_ep0_enable,
.disable = tegra_xudc_ep0_disable,
.alloc_request = tegra_xudc_ep_alloc_request,
@@ -2168,7 +2168,7 @@ static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
return 0;
}
-static struct usb_gadget_ops tegra_xudc_gadget_ops = {
+static const struct usb_gadget_ops tegra_xudc_gadget_ops = {
.get_frame = tegra_xudc_gadget_get_frame,
.wakeup = tegra_xudc_gadget_wakeup,
.pullup = tegra_xudc_gadget_pullup,
@@ -3508,10 +3508,8 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
xudc->utmi_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
if (IS_ERR(xudc->utmi_phy[i])) {
err = PTR_ERR(xudc->utmi_phy[i]);
- if (err != -EPROBE_DEFER)
- dev_err(xudc->dev, "failed to get usb2-%d PHY: %d\n",
- i, err);
-
+ dev_err_probe(xudc->dev, err,
+ "failed to get usb2-%d PHY\n", i);
goto clean_up;
} else if (xudc->utmi_phy[i]) {
/* Get usb-phy, if utmi phy is available */
@@ -3520,8 +3518,8 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
&xudc->vbus_nb);
if (IS_ERR(xudc->usbphy[i])) {
err = PTR_ERR(xudc->usbphy[i]);
- dev_err(xudc->dev, "failed to get usbphy-%d: %d\n",
- i, err);
+ dev_err_probe(xudc->dev, err,
+ "failed to get usbphy-%d\n", i);
goto clean_up;
}
} else if (!xudc->utmi_phy[i]) {
@@ -3538,10 +3536,8 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
if (IS_ERR(xudc->usb3_phy[i])) {
err = PTR_ERR(xudc->usb3_phy[i]);
- if (err != -EPROBE_DEFER)
- dev_err(xudc->dev, "failed to get usb3-%d PHY: %d\n",
- usb3, err);
-
+ dev_err_probe(xudc->dev, err,
+ "failed to get usb3-%d PHY\n", usb3);
goto clean_up;
} else if (xudc->usb3_phy[i])
dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
@@ -3781,9 +3777,7 @@ static int tegra_xudc_probe(struct platform_device *pdev)
err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks, xudc->clks);
if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(xudc->dev, "failed to request clocks: %d\n", err);
-
+ dev_err_probe(xudc->dev, err, "failed to request clocks\n");
return err;
}
@@ -3798,9 +3792,7 @@ static int tegra_xudc_probe(struct platform_device *pdev)
err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
xudc->supplies);
if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(xudc->dev, "failed to request regulators: %d\n", err);
-
+ dev_err_probe(xudc->dev, err, "failed to request regulators\n");
return err;
}
@@ -3861,6 +3853,7 @@ static int tegra_xudc_probe(struct platform_device *pdev)
return 0;
free_eps:
+ pm_runtime_disable(&pdev->dev);
tegra_xudc_free_eps(xudc);
free_event_ring:
tegra_xudc_free_event_ring(xudc);
diff --git a/drivers/usb/gadget/udc/trace.c b/drivers/usb/gadget/udc/trace.c
index 7430624c0bd7..19e837de2a20 100644
--- a/drivers/usb/gadget/udc/trace.c
+++ b/drivers/usb/gadget/udc/trace.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* trace.c - USB Gadget Framework Trace Support
*
* Copyright (C) 2016 Intel Corporation
diff --git a/drivers/usb/gadget/udc/trace.h b/drivers/usb/gadget/udc/trace.h
index 2d1f68b5ea76..98584f6b6c66 100644
--- a/drivers/usb/gadget/udc/trace.h
+++ b/drivers/usb/gadget/udc/trace.h
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* udc.c - Core UDC Framework
*
* Copyright (C) 2016 Intel Corporation
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 72f2ea062d55..fb4ffedd6f0d 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -791,7 +791,7 @@ static int xudc_ep_set_halt(struct usb_ep *_ep, int value)
}
/**
- * xudc_ep_enable - Enables the given endpoint.
+ * __xudc_ep_enable - Enables the given endpoint.
* @ep: pointer to the xusb endpoint structure.
* @desc: pointer to usb endpoint descriptor.
*
@@ -987,7 +987,7 @@ static void xudc_free_request(struct usb_ep *_ep, struct usb_request *_req)
}
/**
- * xudc_ep0_queue - Adds the request to endpoint 0 queue.
+ * __xudc_ep0_queue - Adds the request to endpoint 0 queue.
* @ep0: pointer to the xusb endpoint 0 structure.
* @req: pointer to the xusb request structure.
*
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 6f7bd6641694..385be30baad3 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -387,11 +387,11 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
/* EHCI registers start at offset 0x100 */
ehci->caps = hcd->regs + 0x100;
-#ifdef CONFIG_PPC_83xx
+#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_85xx)
/*
- * Deal with MPC834X that need port power to be cycled after the power
- * fault condition is removed. Otherwise the state machine does not
- * reflect PORTSC[CSC] correctly.
+ * Deal with MPC834X/85XX that need port power to be cycled
+ * after the power fault condition is removed. Otherwise the
+ * state machine does not reflect PORTSC[CSC] correctly.
*/
ehci->need_oc_pp_cycle = 1;
#endif
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 94b5e64ae9a2..10b0365f3439 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -76,12 +76,12 @@ static const char hcd_name [] = "ehci_hcd";
#define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */
/* Initial IRQ latency: faster than hw default */
-static int log2_irq_thresh = 0; // 0 to 6
+static int log2_irq_thresh; // 0 to 6
module_param (log2_irq_thresh, int, S_IRUGO);
MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
/* initial park setting: slower than hw default */
-static unsigned park = 0;
+static unsigned park;
module_param (park, uint, S_IRUGO);
MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
@@ -703,24 +703,28 @@ EXPORT_SYMBOL_GPL(ehci_setup);
static irqreturn_t ehci_irq (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- u32 status, masked_status, pcd_status = 0, cmd;
+ u32 status, current_status, masked_status, pcd_status = 0;
+ u32 cmd;
int bh;
spin_lock(&ehci->lock);
- status = ehci_readl(ehci, &ehci->regs->status);
+ status = 0;
+ current_status = ehci_readl(ehci, &ehci->regs->status);
+restart:
/* e.g. cardbus physical eject */
- if (status == ~(u32) 0) {
+ if (current_status == ~(u32) 0) {
ehci_dbg (ehci, "device removed\n");
goto dead;
}
+ status |= current_status;
/*
* We don't use STS_FLR, but some controllers don't like it to
* remain on, so mask it out along with the other status bits.
*/
- masked_status = status & (INTR_MASK | STS_FLR);
+ masked_status = current_status & (INTR_MASK | STS_FLR);
/* Shared IRQ? */
if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
@@ -730,6 +734,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* clear (just) interrupts */
ehci_writel(ehci, masked_status, &ehci->regs->status);
+
+ /* For edge interrupts, don't race with an interrupt bit being raised */
+ current_status = ehci_readl(ehci, &ehci->regs->status);
+ if (current_status & INTR_MASK)
+ goto restart;
+
cmd = ehci_readl(ehci, &ehci->regs->command);
bh = 0;
@@ -1238,6 +1248,10 @@ static const struct hc_driver ehci_hc_driver = {
* device support
*/
.free_dev = ehci_remove_device,
+#ifdef CONFIG_USB_HCD_TEST_MODE
+ /* EH SINGLE_STEP_SET_FEATURE test support */
+ .submit_single_step_set_feature = ehci_submit_single_step_set_feature,
+#endif
};
void ehci_init_driver(struct hc_driver *drv,
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 159cc27b1a36..c4f6a2559a98 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -727,145 +727,6 @@ ehci_hub_descriptor (
}
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_USB_HCD_TEST_MODE
-
-#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06
-
-static void usb_ehset_completion(struct urb *urb)
-{
- struct completion *done = urb->context;
-
- complete(done);
-}
-static int submit_single_step_set_feature(
- struct usb_hcd *hcd,
- struct urb *urb,
- int is_setup
-);
-
-/*
- * Allocate and initialize a control URB. This request will be used by the
- * EHSET SINGLE_STEP_SET_FEATURE test in which the DATA and STATUS stages
- * of the GetDescriptor request are sent 15 seconds after the SETUP stage.
- * Return NULL if failed.
- */
-static struct urb *request_single_step_set_feature_urb(
- struct usb_device *udev,
- void *dr,
- void *buf,
- struct completion *done
-) {
- struct urb *urb;
- struct usb_hcd *hcd = bus_to_hcd(udev->bus);
- struct usb_host_endpoint *ep;
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- return NULL;
-
- urb->pipe = usb_rcvctrlpipe(udev, 0);
- ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out)
- [usb_pipeendpoint(urb->pipe)];
- if (!ep) {
- usb_free_urb(urb);
- return NULL;
- }
-
- urb->ep = ep;
- urb->dev = udev;
- urb->setup_packet = (void *)dr;
- urb->transfer_buffer = buf;
- urb->transfer_buffer_length = USB_DT_DEVICE_SIZE;
- urb->complete = usb_ehset_completion;
- urb->status = -EINPROGRESS;
- urb->actual_length = 0;
- urb->transfer_flags = URB_DIR_IN;
- usb_get_urb(urb);
- atomic_inc(&urb->use_count);
- atomic_inc(&urb->dev->urbnum);
- urb->setup_dma = dma_map_single(
- hcd->self.sysdev,
- urb->setup_packet,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
- urb->transfer_dma = dma_map_single(
- hcd->self.sysdev,
- urb->transfer_buffer,
- urb->transfer_buffer_length,
- DMA_FROM_DEVICE);
- urb->context = done;
- return urb;
-}
-
-static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
-{
- int retval = -ENOMEM;
- struct usb_ctrlrequest *dr;
- struct urb *urb;
- struct usb_device *udev;
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- struct usb_device_descriptor *buf;
- DECLARE_COMPLETION_ONSTACK(done);
-
- /* Obtain udev of the rhub's child port */
- udev = usb_hub_find_child(hcd->self.root_hub, port);
- if (!udev) {
- ehci_err(ehci, "No device attached to the RootHub\n");
- return -ENODEV;
- }
- buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
- if (!dr) {
- kfree(buf);
- return -ENOMEM;
- }
-
- /* Fill Setup packet for GetDescriptor */
- dr->bRequestType = USB_DIR_IN;
- dr->bRequest = USB_REQ_GET_DESCRIPTOR;
- dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8);
- dr->wIndex = 0;
- dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
- urb = request_single_step_set_feature_urb(udev, dr, buf, &done);
- if (!urb)
- goto cleanup;
-
- /* Submit just the SETUP stage */
- retval = submit_single_step_set_feature(hcd, urb, 1);
- if (retval)
- goto out1;
- if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) {
- usb_kill_urb(urb);
- retval = -ETIMEDOUT;
- ehci_err(ehci, "%s SETUP stage timed out on ep0\n", __func__);
- goto out1;
- }
- msleep(15 * 1000);
-
- /* Complete remaining DATA and STATUS stages using the same URB */
- urb->status = -EINPROGRESS;
- usb_get_urb(urb);
- atomic_inc(&urb->use_count);
- atomic_inc(&urb->dev->urbnum);
- retval = submit_single_step_set_feature(hcd, urb, 0);
- if (!retval && !wait_for_completion_timeout(&done,
- msecs_to_jiffies(2000))) {
- usb_kill_urb(urb);
- retval = -ETIMEDOUT;
- ehci_err(ehci, "%s IN stage timed out on ep0\n", __func__);
- }
-out1:
- usb_free_urb(urb);
-cleanup:
- kfree(dr);
- kfree(buf);
- return retval;
-}
-#endif /* CONFIG_USB_HCD_TEST_MODE */
-/*-------------------------------------------------------------------------*/
int ehci_hub_control(
struct usb_hcd *hcd,
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index a826715ae9bd..2cbf4f85bff3 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1165,7 +1165,7 @@ submit_async (
* performed; TRUE - SETUP and FALSE - IN+STATUS
* Returns 0 if success
*/
-static int submit_single_step_set_feature(
+static int ehci_submit_single_step_set_feature(
struct usb_hcd *hcd,
struct urb *urb,
int is_setup
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 9c2eda0918e1..05fb8d97cf02 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -850,7 +850,6 @@ static inline void create_debug_files(struct fotg210_hcd *fotg210)
struct dentry *root;
root = debugfs_create_dir(bus->bus_name, fotg210_debug_root);
- fotg210->debug_dir = root;
debugfs_create_file("async", S_IRUGO, root, bus, &debug_async_fops);
debugfs_create_file("periodic", S_IRUGO, root, bus,
@@ -861,7 +860,9 @@ static inline void create_debug_files(struct fotg210_hcd *fotg210)
static inline void remove_debug_files(struct fotg210_hcd *fotg210)
{
- debugfs_remove_recursive(fotg210->debug_dir);
+ struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
+
+ debugfs_remove(debugfs_lookup(bus->bus_name, fotg210_debug_root));
}
/* handshake - spin reading hc until handshake completes or fails
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
index 6cee40ec65b4..0a91061a0551 100644
--- a/drivers/usb/host/fotg210.h
+++ b/drivers/usb/host/fotg210.h
@@ -184,9 +184,6 @@ struct fotg210_hcd { /* one per controller */
/* silicon clock */
struct clk *pclk;
-
- /* debug files */
- struct dentry *debug_dir;
};
/* convert between an HCD pointer and the corresponding FOTG210_HCD */
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index e7a8e0609853..59cc1bc7f12f 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -153,8 +153,6 @@ struct max3421_hcd {
*/
struct urb *curr_urb;
enum scheduling_pass sched_pass;
- struct usb_device *loaded_dev; /* dev that's loaded into the chip */
- int loaded_epnum; /* epnum whose toggles are loaded */
int urb_done; /* > 0 -> no errors, < 0: errno */
size_t curr_len;
u8 hien;
@@ -492,39 +490,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
* Caller must NOT hold HCD spinlock.
*/
static void
-max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
- int force_toggles)
+max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum)
{
- struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
- int old_epnum, same_ep, rcvtog, sndtog;
- struct usb_device *old_dev;
+ int rcvtog, sndtog;
u8 hctl;
- old_dev = max3421_hcd->loaded_dev;
- old_epnum = max3421_hcd->loaded_epnum;
-
- same_ep = (dev == old_dev && epnum == old_epnum);
- if (same_ep && !force_toggles)
- return;
-
- if (old_dev && !same_ep) {
- /* save the old end-points toggles: */
- u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
-
- rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
- sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
-
- /* no locking: HCD (i.e., we) own toggles, don't we? */
- usb_settoggle(old_dev, old_epnum, 0, rcvtog);
- usb_settoggle(old_dev, old_epnum, 1, sndtog);
- }
/* setup new endpoint's toggle bits: */
rcvtog = usb_gettoggle(dev, epnum, 0);
sndtog = usb_gettoggle(dev, epnum, 1);
hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
- max3421_hcd->loaded_epnum = epnum;
spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
/*
@@ -532,7 +508,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
* address-assignment so it's best to just always load the
* address whenever the end-point changed/was forced.
*/
- max3421_hcd->loaded_dev = dev;
spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
}
@@ -667,7 +642,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd)
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct urb *urb, *curr_urb = NULL;
struct max3421_ep *max3421_ep;
- int epnum, force_toggles = 0;
+ int epnum;
struct usb_host_endpoint *ep;
struct list_head *pos;
unsigned long flags;
@@ -777,7 +752,6 @@ done:
usb_settoggle(urb->dev, epnum, 0, 1);
usb_settoggle(urb->dev, epnum, 1, 1);
max3421_ep->pkt_state = PKT_STATE_SETUP;
- force_toggles = 1;
} else
max3421_ep->pkt_state = PKT_STATE_TRANSFER;
}
@@ -785,7 +759,7 @@ done:
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
max3421_ep->last_active = max3421_hcd->frame_number;
- max3421_set_address(hcd, urb->dev, epnum, force_toggles);
+ max3421_set_address(hcd, urb->dev, epnum);
max3421_set_speed(hcd, urb->dev);
max3421_next_transfer(hcd, 0);
return 1;
@@ -1379,6 +1353,16 @@ max3421_urb_done(struct usb_hcd *hcd)
status = 0;
urb = max3421_hcd->curr_urb;
if (urb) {
+ /* save the old end-points toggles: */
+ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+ int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
+ int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+ int epnum = usb_endpoint_num(&urb->ep->desc);
+
+ /* no locking: HCD (i.e., we) own toggles, don't we? */
+ usb_settoggle(urb->dev, epnum, 0, rcvtog);
+ usb_settoggle(urb->dev, epnum, 1, sndtog);
+
max3421_hcd->curr_urb = NULL;
spin_lock_irqsave(&max3421_hcd->lock, flags);
usb_hcd_unlink_urb_from_ep(hcd, urb);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 9bbd7ddd0003..a24aea3d2759 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -611,8 +611,6 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
if (ohci_at91->wakeup)
enable_irq_wake(hcd->irq);
- ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
-
ret = ohci_suspend(hcd, ohci_at91->wakeup);
if (ret) {
if (ohci_at91->wakeup)
@@ -632,7 +630,10 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
/* flush the writes */
(void) ohci_readl (ohci, &ohci->regs->control);
msleep(1);
+ ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
at91_stop_clock(ohci_at91);
+ } else {
+ ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
}
return ret;
@@ -644,6 +645,8 @@ ohci_hcd_at91_drv_resume(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
+ ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0);
+
if (ohci_at91->wakeup)
disable_irq_wake(hcd->irq);
else
@@ -651,8 +654,6 @@ ohci_hcd_at91_drv_resume(struct device *dev)
ohci_resume(hcd, false);
- ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0);
-
return 0;
}
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 5a783c423d8e..ae882d76612b 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2392,8 +2392,7 @@ static int dequeue_from_overflow_chain(struct u132 *u132,
urb->error_count = 0;
usb_hcd_giveback_urb(hcd, urb, 0);
return 0;
- } else
- continue;
+ }
}
dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]=%p ring"
"[%d] %c%c usb_endp=%d usb_addr=%d size=%d next=%04X last=%04X"
@@ -2448,8 +2447,7 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
urb_slot = &endp->urb_list[ENDP_QUEUE_MASK &
queue_scan];
break;
- } else
- continue;
+ }
}
while (++queue_list < ENDP_QUEUE_SIZE && --queue_size > 0) {
*urb_slot = endp->urb_list[ENDP_QUEUE_MASK &
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index ae4e4ab638b5..bef104511352 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -240,11 +240,11 @@ static void dbc_tty_flush_chars(struct tty_struct *tty)
spin_unlock_irqrestore(&port->port_lock, flags);
}
-static int dbc_tty_write_room(struct tty_struct *tty)
+static unsigned int dbc_tty_write_room(struct tty_struct *tty)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
- int room = 0;
+ unsigned int room;
spin_lock_irqsave(&port->port_lock, flags);
room = kfifo_avail(&port->write_fifo);
@@ -253,11 +253,11 @@ static int dbc_tty_write_room(struct tty_struct *tty)
return room;
}
-static int dbc_tty_chars_in_buffer(struct tty_struct *tty)
+static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
- int chars = 0;
+ unsigned int chars;
spin_lock_irqsave(&port->port_lock, flags);
chars = kfifo_len(&port->write_fifo);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e9b18fc17617..151e93c4bd57 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1638,11 +1638,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
* Inform the usbcore about resume-in-progress by returning
* a non-zero value even if there are no status changes.
*/
+ spin_lock_irqsave(&xhci->lock, flags);
+
status = bus_state->resuming_ports;
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
- spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */
for (i = 0; i < max_ports; i++) {
temp = readl(ports[i]->addr);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index f66815fe8482..0e312066c5c6 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1924,6 +1924,7 @@ no_bw:
xhci->hw_ports = NULL;
xhci->rh_bw = NULL;
xhci->ext_caps = NULL;
+ xhci->port_caps = NULL;
xhci->page_size = 0;
xhci->page_shift = 0;
@@ -2547,6 +2548,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Wrote ERST address to ir_set 0.");
+ xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
+
/*
* XXX: Might need to set the Interrupter Moderation Register to
* something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 8b90da5a6ed1..cffcaf4dfa9f 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -470,11 +470,12 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
{
- struct mu3h_sch_tt *tt = sch_ep->sch_tt;
u32 extra_cs_count;
u32 start_ss, last_ss;
u32 start_cs, last_cs;
- int i;
+
+ if (!sch_ep->sch_tt)
+ return 0;
start_ss = offset % 8;
@@ -488,10 +489,6 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
if (!(start_ss == 7 || last_ss < 6))
return -ESCH_SS_Y6;
- for (i = 0; i < sch_ep->cs_count; i++)
- if (test_bit(offset + i, tt->ss_bit_map))
- return -ESCH_SS_OVERLAP;
-
} else {
u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
@@ -518,9 +515,6 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
if (cs_count > 7)
cs_count = 7; /* HW limit */
- if (test_bit(offset, tt->ss_bit_map))
- return -ESCH_SS_OVERLAP;
-
sch_ep->cs_count = cs_count;
/* one for ss, the other for idle */
sch_ep->num_budget_microframes = cs_count + 2;
@@ -541,11 +535,9 @@ static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
struct mu3h_sch_tt *tt = sch_ep->sch_tt;
u32 base, num_esit;
int bw_updated;
- int bits;
int i, j;
num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
- bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1;
if (used)
bw_updated = sch_ep->bw_cost_per_microframe;
@@ -555,13 +547,6 @@ static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
for (i = 0; i < num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit;
- for (j = 0; j < bits; j++) {
- if (used)
- set_bit(base + j, tt->ss_bit_map);
- else
- clear_bit(base + j, tt->ss_bit_map);
- }
-
for (j = 0; j < sch_ep->cs_count; j++)
tt->fs_bus_bw[base + j] += bw_updated;
}
@@ -603,54 +588,47 @@ static u32 get_esit_boundary(struct mu3h_sch_ep_info *sch_ep)
static int check_sch_bw(struct mu3h_sch_bw_info *sch_bw,
struct mu3h_sch_ep_info *sch_ep)
{
+ const u32 esit_boundary = get_esit_boundary(sch_ep);
+ const u32 bw_boundary = get_bw_boundary(sch_ep->speed);
u32 offset;
- u32 min_bw;
- u32 min_index;
u32 worst_bw;
- u32 bw_boundary;
- u32 esit_boundary;
- u32 min_num_budget;
- u32 min_cs_count;
+ u32 min_bw = ~0;
+ int min_index = -1;
int ret = 0;
/*
* Search through all possible schedule microframes.
* and find a microframe where its worst bandwidth is minimum.
*/
- min_bw = ~0;
- min_index = 0;
- min_cs_count = sch_ep->cs_count;
- min_num_budget = sch_ep->num_budget_microframes;
- esit_boundary = get_esit_boundary(sch_ep);
for (offset = 0; offset < sch_ep->esit; offset++) {
- if (sch_ep->sch_tt) {
- ret = check_sch_tt(sch_ep, offset);
- if (ret)
- continue;
- }
+ ret = check_sch_tt(sch_ep, offset);
+ if (ret)
+ continue;
if ((offset + sch_ep->num_budget_microframes) > esit_boundary)
break;
worst_bw = get_max_bw(sch_bw, sch_ep, offset);
+ if (worst_bw > bw_boundary)
+ continue;
+
if (min_bw > worst_bw) {
min_bw = worst_bw;
min_index = offset;
- min_cs_count = sch_ep->cs_count;
- min_num_budget = sch_ep->num_budget_microframes;
}
+
+ /* use first-fit for LS/FS */
+ if (sch_ep->sch_tt && min_index >= 0)
+ break;
+
if (min_bw == 0)
break;
}
- bw_boundary = get_bw_boundary(sch_ep->speed);
- /* check bandwidth */
- if (min_bw > bw_boundary)
+ if (min_index < 0)
return ret ? ret : -ESCH_BW_OVERFLOW;
sch_ep->offset = min_index;
- sch_ep->cs_count = min_cs_count;
- sch_ep->num_budget_microframes = min_num_budget;
return load_ep_bw(sch_bw, sch_ep, true);
}
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index b2058b3bc834..2548976bcf05 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -495,8 +495,6 @@ static int xhci_mtk_probe(struct platform_device *pdev)
goto put_usb2_hcd;
}
mtk->has_ippc = true;
- } else {
- mtk->has_ippc = false;
}
device_init_wakeup(dev, true);
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index cd3a37bb73e6..ace432356c41 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -24,12 +24,10 @@
#define XHCI_MTK_MAX_ESIT 64
/**
- * @ss_bit_map: used to avoid start split microframes overlay
* @fs_bus_bw: array to keep track of bandwidth already used for FS
* @ep_list: Endpoints using this TT
*/
struct mu3h_sch_tt {
- DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT);
u32 fs_bus_bw[XHCI_MTK_MAX_ESIT];
struct list_head ep_list;
};
@@ -138,17 +136,17 @@ struct xhci_hcd_mtk {
struct mu3h_sch_bw_info *sch_array;
struct list_head bw_ep_chk_list;
struct mu3c_ippc_regs __iomem *ippc_regs;
- bool has_ippc;
int num_u2_ports;
int num_u3_ports;
int u3p_dis_msk;
struct regulator *vusb33;
struct regulator *vbus;
struct clk_bulk_data clks[BULK_CLKS_NUM];
- bool lpm_support;
- bool u2_lpm_disable;
+ unsigned int has_ippc:1;
+ unsigned int lpm_support:1;
+ unsigned int u2_lpm_disable:1;
/* usb remote wakeup */
- bool uwk_en;
+ unsigned int uwk_en:1;
struct regmap *uwk;
u32 uwk_reg_base;
u32 uwk_vers;
diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
index f97ac9f52bf4..5923844ed821 100644
--- a/drivers/usb/host/xhci-pci-renesas.c
+++ b/drivers/usb/host/xhci-pci-renesas.c
@@ -197,7 +197,7 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
if (err)
return pcibios_err_to_errno(err);
- if (rom_state & BIT(15)) {
+ if (rom_state & RENESAS_ROM_STATUS_ROM_EXISTS) {
/* ROM exists */
dev_dbg(&pdev->dev, "ROM exists\n");
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 18c2bbddf080..1c9a7957c45c 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -636,7 +636,14 @@ static const struct pci_device_id pci_ids[] = {
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
+
+/*
+ * Without CONFIG_USB_XHCI_PCI_RENESAS renesas_xhci_check_request_fw() won't
+ * load firmware, so don't encumber the xhci-pci driver with it.
+ */
+#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
MODULE_FIRMWARE("renesas_usb_fw.mem");
+#endif
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver xhci_pci_driver = {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6acd2329e08d..8fea44bbc266 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3076,6 +3076,11 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
if (event_loop++ < TRBS_PER_SEGMENT / 2)
continue;
xhci_update_erst_dequeue(xhci, event_ring_deq);
+
+ /* ring is half-full, force isoc trbs to interrupt more often */
+ if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
+ xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2;
+
event_loop = 0;
}
@@ -3956,7 +3961,7 @@ static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
* generate an event at least every 8th TD to clear the event ring
*/
if (i && xhci->quirks & XHCI_AVOID_BEI)
- return !!(i % 8);
+ return !!(i % xhci->isoc_bei_interval);
return true;
}
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index c7387677a26a..575fa89a783f 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -2,7 +2,7 @@
/*
* NVIDIA Tegra xHCI host controller driver
*
- * Copyright (C) 2014 NVIDIA Corporation
+ * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (C) 2014 Google, Inc.
*/
@@ -15,9 +15,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include <linux/phy/tegra/xusb.h>
#include <linux/platform_device.h>
+#include <linux/usb/ch9.h>
#include <linux/pm.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -224,6 +226,7 @@ struct tegra_xusb {
int xhci_irq;
int mbox_irq;
+ int padctl_irq;
void __iomem *ipfs_base;
void __iomem *fpci_base;
@@ -249,8 +252,7 @@ struct tegra_xusb {
struct device *genpd_dev_host;
struct device *genpd_dev_ss;
- struct device_link *genpd_dl_host;
- struct device_link *genpd_dl_ss;
+ bool use_genpd;
struct phy **phys;
unsigned int num_phys;
@@ -270,6 +272,7 @@ struct tegra_xusb {
dma_addr_t phys;
} fw;
+ bool suspended;
struct tegra_xusb_context context;
};
@@ -666,6 +669,9 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
mutex_lock(&tegra->lock);
+ if (pm_runtime_suspended(tegra->dev) || tegra->suspended)
+ goto out;
+
value = fpci_readl(tegra, tegra->soc->mbox.data_out);
tegra_xusb_mbox_unpack(&msg, value);
@@ -679,6 +685,7 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
tegra_xusb_mbox_handle(tegra, &msg);
+out:
mutex_unlock(&tegra->lock);
return IRQ_HANDLED;
}
@@ -819,40 +826,6 @@ static void tegra_xusb_phy_disable(struct tegra_xusb *tegra)
}
}
-static int tegra_xusb_runtime_suspend(struct device *dev)
-{
- struct tegra_xusb *tegra = dev_get_drvdata(dev);
-
- regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
- tegra_xusb_clk_disable(tegra);
-
- return 0;
-}
-
-static int tegra_xusb_runtime_resume(struct device *dev)
-{
- struct tegra_xusb *tegra = dev_get_drvdata(dev);
- int err;
-
- err = tegra_xusb_clk_enable(tegra);
- if (err) {
- dev_err(dev, "failed to enable clocks: %d\n", err);
- return err;
- }
-
- err = regulator_bulk_enable(tegra->soc->num_supplies, tegra->supplies);
- if (err) {
- dev_err(dev, "failed to enable regulators: %d\n", err);
- goto disable_clk;
- }
-
- return 0;
-
-disable_clk:
- tegra_xusb_clk_disable(tegra);
- return err;
-}
-
#ifdef CONFIG_PM_SLEEP
static int tegra_xusb_init_context(struct tegra_xusb *tegra)
{
@@ -1022,10 +995,9 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
static void tegra_xusb_powerdomain_remove(struct device *dev,
struct tegra_xusb *tegra)
{
- if (tegra->genpd_dl_ss)
- device_link_del(tegra->genpd_dl_ss);
- if (tegra->genpd_dl_host)
- device_link_del(tegra->genpd_dl_host);
+ if (!tegra->use_genpd)
+ return;
+
if (!IS_ERR_OR_NULL(tegra->genpd_dev_ss))
dev_pm_domain_detach(tegra->genpd_dev_ss, true);
if (!IS_ERR_OR_NULL(tegra->genpd_dev_host))
@@ -1051,20 +1023,84 @@ static int tegra_xusb_powerdomain_init(struct device *dev,
return err;
}
- tegra->genpd_dl_host = device_link_add(dev, tegra->genpd_dev_host,
- DL_FLAG_PM_RUNTIME |
- DL_FLAG_STATELESS);
- if (!tegra->genpd_dl_host) {
- dev_err(dev, "adding host device link failed!\n");
- return -ENODEV;
+ tegra->use_genpd = true;
+
+ return 0;
+}
+
+static int tegra_xusb_unpowergate_partitions(struct tegra_xusb *tegra)
+{
+ struct device *dev = tegra->dev;
+ int rc;
+
+ if (tegra->use_genpd) {
+ rc = pm_runtime_get_sync(tegra->genpd_dev_ss);
+ if (rc < 0) {
+ dev_err(dev, "failed to enable XUSB SS partition\n");
+ return rc;
+ }
+
+ rc = pm_runtime_get_sync(tegra->genpd_dev_host);
+ if (rc < 0) {
+ dev_err(dev, "failed to enable XUSB Host partition\n");
+ pm_runtime_put_sync(tegra->genpd_dev_ss);
+ return rc;
+ }
+ } else {
+ rc = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBA,
+ tegra->ss_clk,
+ tegra->ss_rst);
+ if (rc < 0) {
+ dev_err(dev, "failed to enable XUSB SS partition\n");
+ return rc;
+ }
+
+ rc = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBC,
+ tegra->host_clk,
+ tegra->host_rst);
+ if (rc < 0) {
+ dev_err(dev, "failed to enable XUSB Host partition\n");
+ tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
+ return rc;
+ }
}
- tegra->genpd_dl_ss = device_link_add(dev, tegra->genpd_dev_ss,
- DL_FLAG_PM_RUNTIME |
- DL_FLAG_STATELESS);
- if (!tegra->genpd_dl_ss) {
- dev_err(dev, "adding superspeed device link failed!\n");
- return -ENODEV;
+ return 0;
+}
+
+static int tegra_xusb_powergate_partitions(struct tegra_xusb *tegra)
+{
+ struct device *dev = tegra->dev;
+ int rc;
+
+ if (tegra->use_genpd) {
+ rc = pm_runtime_put_sync(tegra->genpd_dev_host);
+ if (rc < 0) {
+ dev_err(dev, "failed to disable XUSB Host partition\n");
+ return rc;
+ }
+
+ rc = pm_runtime_put_sync(tegra->genpd_dev_ss);
+ if (rc < 0) {
+ dev_err(dev, "failed to disable XUSB SS partition\n");
+ pm_runtime_get_sync(tegra->genpd_dev_host);
+ return rc;
+ }
+ } else {
+ rc = tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
+ if (rc < 0) {
+ dev_err(dev, "failed to disable XUSB Host partition\n");
+ return rc;
+ }
+
+ rc = tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
+ if (rc < 0) {
+ dev_err(dev, "failed to disable XUSB SS partition\n");
+ tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBC,
+ tegra->host_clk,
+ tegra->host_rst);
+ return rc;
+ }
}
return 0;
@@ -1086,6 +1122,24 @@ static int __tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
return err;
}
+static irqreturn_t tegra_xusb_padctl_irq(int irq, void *data)
+{
+ struct tegra_xusb *tegra = data;
+
+ mutex_lock(&tegra->lock);
+
+ if (tegra->suspended) {
+ mutex_unlock(&tegra->lock);
+ return IRQ_HANDLED;
+ }
+
+ mutex_unlock(&tegra->lock);
+
+ pm_runtime_resume(tegra->dev);
+
+ return IRQ_HANDLED;
+}
+
static int tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
{
int err;
@@ -1209,6 +1263,52 @@ static void tegra_xhci_id_work(struct work_struct *work)
}
}
+#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
+static bool is_usb2_otg_phy(struct tegra_xusb *tegra, unsigned int index)
+{
+ return (tegra->usbphy[index] != NULL);
+}
+
+static bool is_usb3_otg_phy(struct tegra_xusb *tegra, unsigned int index)
+{
+ struct tegra_xusb_padctl *padctl = tegra->padctl;
+ unsigned int i;
+ int port;
+
+ for (i = 0; i < tegra->num_usb_phys; i++) {
+ if (is_usb2_otg_phy(tegra, i)) {
+ port = tegra_xusb_padctl_get_usb3_companion(padctl, i);
+ if ((port >= 0) && (index == (unsigned int)port))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool is_host_mode_phy(struct tegra_xusb *tegra, unsigned int phy_type, unsigned int index)
+{
+ if (strcmp(tegra->soc->phy_types[phy_type].name, "hsic") == 0)
+ return true;
+
+ if (strcmp(tegra->soc->phy_types[phy_type].name, "usb2") == 0) {
+ if (is_usb2_otg_phy(tegra, index))
+ return ((index == tegra->otg_usb2_port) && tegra->host_mode);
+ else
+ return true;
+ }
+
+ if (strcmp(tegra->soc->phy_types[phy_type].name, "usb3") == 0) {
+ if (is_usb3_otg_phy(tegra, index))
+ return ((index == tegra->otg_usb3_port) && tegra->host_mode);
+ else
+ return true;
+ }
+
+ return false;
+}
+#endif
+
static int tegra_xusb_get_usb2_port(struct tegra_xusb *tegra,
struct usb_phy *usbphy)
{
@@ -1301,6 +1401,7 @@ static void tegra_xusb_deinit_usb_phy(struct tegra_xusb *tegra)
static int tegra_xusb_probe(struct platform_device *pdev)
{
struct tegra_xusb *tegra;
+ struct device_node *np;
struct resource *regs;
struct xhci_hcd *xhci;
unsigned int i, j, k;
@@ -1321,8 +1422,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
if (err < 0)
return err;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tegra->regs = devm_ioremap_resource(&pdev->dev, regs);
+ tegra->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(tegra->regs))
return PTR_ERR(tegra->regs);
@@ -1348,6 +1448,18 @@ static int tegra_xusb_probe(struct platform_device *pdev)
if (IS_ERR(tegra->padctl))
return PTR_ERR(tegra->padctl);
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,xusb-padctl", 0);
+ if (!np) {
+ err = -ENODEV;
+ goto put_padctl;
+ }
+
+ tegra->padctl_irq = of_irq_get(np, 0);
+ if (tegra->padctl_irq <= 0) {
+ err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq;
+ goto put_padctl;
+ }
+
tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host");
if (IS_ERR(tegra->host_clk)) {
err = PTR_ERR(tegra->host_clk);
@@ -1428,25 +1540,6 @@ static int tegra_xusb_probe(struct platform_device *pdev)
err);
goto put_padctl;
}
-
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBA,
- tegra->ss_clk,
- tegra->ss_rst);
- if (err) {
- dev_err(&pdev->dev,
- "failed to enable XUSBA domain: %d\n", err);
- goto put_padctl;
- }
-
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBC,
- tegra->host_clk,
- tegra->host_rst);
- if (err) {
- tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
- dev_err(&pdev->dev,
- "failed to enable XUSBC domain: %d\n", err);
- goto put_padctl;
- }
} else {
err = tegra_xusb_powerdomain_init(&pdev->dev, tegra);
if (err)
@@ -1511,6 +1604,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_powerdomains;
}
+ tegra->hcd->skip_phy_initialization = 1;
tegra->hcd->regs = tegra->regs;
tegra->hcd->rsrc_start = regs->start;
tegra->hcd->rsrc_len = resource_size(regs);
@@ -1521,10 +1615,22 @@ static int tegra_xusb_probe(struct platform_device *pdev)
*/
platform_set_drvdata(pdev, tegra);
+ err = tegra_xusb_clk_enable(tegra);
+ if (err) {
+ dev_err(tegra->dev, "failed to enable clocks: %d\n", err);
+ goto put_hcd;
+ }
+
+ err = regulator_bulk_enable(tegra->soc->num_supplies, tegra->supplies);
+ if (err) {
+ dev_err(tegra->dev, "failed to enable regulators: %d\n", err);
+ goto disable_clk;
+ }
+
err = tegra_xusb_phy_enable(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable PHYs: %d\n", err);
- goto put_hcd;
+ goto disable_regulator;
}
/*
@@ -1543,30 +1649,22 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto disable_phy;
}
- pm_runtime_enable(&pdev->dev);
-
- if (!pm_runtime_enabled(&pdev->dev))
- err = tegra_xusb_runtime_resume(&pdev->dev);
- else
- err = pm_runtime_get_sync(&pdev->dev);
-
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable device: %d\n", err);
+ err = tegra_xusb_unpowergate_partitions(tegra);
+ if (err)
goto free_firmware;
- }
tegra_xusb_config(tegra);
err = tegra_xusb_load_firmware(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
- goto put_rpm;
+ goto powergate;
}
err = usb_add_hcd(tegra->hcd, tegra->xhci_irq, IRQF_SHARED);
if (err < 0) {
dev_err(&pdev->dev, "failed to add USB HCD: %d\n", err);
- goto put_rpm;
+ goto powergate;
}
device_wakeup_enable(tegra->hcd->self.controller);
@@ -1589,12 +1687,6 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_usb3;
}
- err = tegra_xusb_enable_firmware_messages(tegra);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable messages: %d\n", err);
- goto remove_usb3;
- }
-
err = devm_request_threaded_irq(&pdev->dev, tegra->mbox_irq,
tegra_xusb_mbox_irq,
tegra_xusb_mbox_thread, 0,
@@ -1604,12 +1696,36 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto remove_usb3;
}
+ err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq, NULL, tegra_xusb_padctl_irq,
+ IRQF_ONESHOT, dev_name(&pdev->dev), tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err);
+ goto remove_usb3;
+ }
+
+ err = tegra_xusb_enable_firmware_messages(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable messages: %d\n", err);
+ goto remove_usb3;
+ }
+
err = tegra_xusb_init_usb_phy(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to init USB PHY: %d\n", err);
goto remove_usb3;
}
+ /* Enable wake for both USB 2.0 and USB 3.0 roothubs */
+ device_init_wakeup(&tegra->hcd->self.root_hub->dev, true);
+ device_init_wakeup(&xhci->shared_hcd->self.root_hub->dev, true);
+ device_init_wakeup(tegra->dev, true);
+
+ pm_runtime_use_autosuspend(tegra->dev);
+ pm_runtime_set_autosuspend_delay(tegra->dev, 2000);
+ pm_runtime_mark_last_busy(tegra->dev);
+ pm_runtime_set_active(tegra->dev);
+ pm_runtime_enable(tegra->dev);
+
return 0;
remove_usb3:
@@ -1618,25 +1734,23 @@ put_usb3:
usb_put_hcd(xhci->shared_hcd);
remove_usb2:
usb_remove_hcd(tegra->hcd);
-put_rpm:
- if (!pm_runtime_status_suspended(&pdev->dev))
- tegra_xusb_runtime_suspend(&pdev->dev);
-put_hcd:
- usb_put_hcd(tegra->hcd);
+powergate:
+ tegra_xusb_powergate_partitions(tegra);
free_firmware:
dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
tegra->fw.phys);
disable_phy:
tegra_xusb_phy_disable(tegra);
- pm_runtime_disable(&pdev->dev);
+disable_regulator:
+ regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
+disable_clk:
+ tegra_xusb_clk_disable(tegra);
+put_hcd:
+ usb_put_hcd(tegra->hcd);
put_powerdomains:
- if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
- tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
- tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
- } else {
- tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
- }
+ tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
put_padctl:
+ of_node_put(np);
tegra_xusb_padctl_put(tegra->padctl);
return err;
}
@@ -1648,6 +1762,7 @@ static int tegra_xusb_remove(struct platform_device *pdev)
tegra_xusb_deinit_usb_phy(tegra);
+ pm_runtime_get_sync(&pdev->dev);
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
xhci->shared_hcd = NULL;
@@ -1657,24 +1772,22 @@ static int tegra_xusb_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
tegra->fw.phys);
- pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
- if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
- tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
- tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
- } else {
- tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
- }
+ tegra_xusb_powergate_partitions(tegra);
- tegra_xusb_phy_disable(tegra);
+ tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
+ tegra_xusb_phy_disable(tegra);
+ tegra_xusb_clk_disable(tegra);
+ regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
tegra_xusb_padctl_put(tegra->padctl);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
+#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
{
struct device *dev = hub->hcd->self.controller;
@@ -1700,9 +1813,17 @@ static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
static int tegra_xusb_check_ports(struct tegra_xusb *tegra)
{
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ struct xhci_bus_state *bus_state = &xhci->usb2_rhub.bus_state;
unsigned long flags;
int err = 0;
+ if (bus_state->bus_suspended) {
+ /* xusb_hub_suspend() has just directed one or more USB2 port(s)
+ * to U3 state, it takes 3ms to enter U3.
+ */
+ usleep_range(3000, 4000);
+ }
+
spin_lock_irqsave(&xhci->lock, flags);
if (!xhci_hub_ports_suspended(&xhci->usb2_rhub) ||
@@ -1748,45 +1869,186 @@ static void tegra_xusb_restore_context(struct tegra_xusb *tegra)
}
}
-static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool wakeup)
+static enum usb_device_speed tegra_xhci_portsc_to_speed(struct tegra_xusb *tegra, u32 portsc)
{
+ if (DEV_LOWSPEED(portsc))
+ return USB_SPEED_LOW;
+
+ if (DEV_HIGHSPEED(portsc))
+ return USB_SPEED_HIGH;
+
+ if (DEV_FULLSPEED(portsc))
+ return USB_SPEED_FULL;
+
+ if (DEV_SUPERSPEED_ANY(portsc))
+ return USB_SPEED_SUPER;
+
+ return USB_SPEED_UNKNOWN;
+}
+
+static void tegra_xhci_enable_phy_sleepwalk_wake(struct tegra_xusb *tegra)
+{
+ struct tegra_xusb_padctl *padctl = tegra->padctl;
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ enum usb_device_speed speed;
+ struct phy *phy;
+ unsigned int index, offset;
+ unsigned int i, j, k;
+ struct xhci_hub *rhub;
+ u32 portsc;
+
+ for (i = 0, k = 0; i < tegra->soc->num_types; i++) {
+ if (strcmp(tegra->soc->phy_types[i].name, "usb3") == 0)
+ rhub = &xhci->usb3_rhub;
+ else
+ rhub = &xhci->usb2_rhub;
+
+ if (strcmp(tegra->soc->phy_types[i].name, "hsic") == 0)
+ offset = tegra->soc->ports.usb2.count;
+ else
+ offset = 0;
+
+ for (j = 0; j < tegra->soc->phy_types[i].num; j++) {
+ phy = tegra->phys[k++];
+
+ if (!phy)
+ continue;
+
+ index = j + offset;
+
+ if (index >= rhub->num_ports)
+ continue;
+
+ if (!is_host_mode_phy(tegra, i, j))
+ continue;
+
+ portsc = readl(rhub->ports[index]->addr);
+ speed = tegra_xhci_portsc_to_speed(tegra, portsc);
+ tegra_xusb_padctl_enable_phy_sleepwalk(padctl, phy, speed);
+ tegra_xusb_padctl_enable_phy_wake(padctl, phy);
+ }
+ }
+}
+
+static void tegra_xhci_disable_phy_wake(struct tegra_xusb *tegra)
+{
+ struct tegra_xusb_padctl *padctl = tegra->padctl;
+ unsigned int i;
+
+ for (i = 0; i < tegra->num_phys; i++) {
+ if (!tegra->phys[i])
+ continue;
+
+ tegra_xusb_padctl_disable_phy_wake(padctl, tegra->phys[i]);
+ }
+}
+
+static void tegra_xhci_disable_phy_sleepwalk(struct tegra_xusb *tegra)
+{
+ struct tegra_xusb_padctl *padctl = tegra->padctl;
+ unsigned int i;
+
+ for (i = 0; i < tegra->num_phys; i++) {
+ if (!tegra->phys[i])
+ continue;
+
+ tegra_xusb_padctl_disable_phy_sleepwalk(padctl, tegra->phys[i]);
+ }
+}
+
+static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ struct device *dev = tegra->dev;
+ bool wakeup = runtime ? true : device_may_wakeup(dev);
+ unsigned int i;
int err;
+ u32 usbcmd;
+
+ dev_dbg(dev, "entering ELPG\n");
+
+ usbcmd = readl(&xhci->op_regs->command);
+ usbcmd &= ~CMD_EIE;
+ writel(usbcmd, &xhci->op_regs->command);
err = tegra_xusb_check_ports(tegra);
if (err < 0) {
dev_err(tegra->dev, "not all ports suspended: %d\n", err);
- return err;
+ goto out;
}
err = xhci_suspend(xhci, wakeup);
if (err < 0) {
dev_err(tegra->dev, "failed to suspend XHCI: %d\n", err);
- return err;
+ goto out;
}
tegra_xusb_save_context(tegra);
- tegra_xusb_phy_disable(tegra);
+
+ if (wakeup)
+ tegra_xhci_enable_phy_sleepwalk_wake(tegra);
+
+ tegra_xusb_powergate_partitions(tegra);
+
+ for (i = 0; i < tegra->num_phys; i++) {
+ if (!tegra->phys[i])
+ continue;
+
+ phy_power_off(tegra->phys[i]);
+ if (!wakeup)
+ phy_exit(tegra->phys[i]);
+ }
+
tegra_xusb_clk_disable(tegra);
- return 0;
+out:
+ if (!err)
+ dev_dbg(tegra->dev, "entering ELPG done\n");
+ else {
+ usbcmd = readl(&xhci->op_regs->command);
+ usbcmd |= CMD_EIE;
+ writel(usbcmd, &xhci->op_regs->command);
+
+ dev_dbg(tegra->dev, "entering ELPG failed\n");
+ pm_runtime_mark_last_busy(tegra->dev);
+ }
+
+ return err;
}
-static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool wakeup)
+static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime)
{
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ struct device *dev = tegra->dev;
+ bool wakeup = runtime ? true : device_may_wakeup(dev);
+ unsigned int i;
+ u32 usbcmd;
int err;
+ dev_dbg(dev, "exiting ELPG\n");
+ pm_runtime_mark_last_busy(tegra->dev);
+
err = tegra_xusb_clk_enable(tegra);
if (err < 0) {
dev_err(tegra->dev, "failed to enable clocks: %d\n", err);
- return err;
+ goto out;
}
- err = tegra_xusb_phy_enable(tegra);
- if (err < 0) {
- dev_err(tegra->dev, "failed to enable PHYs: %d\n", err);
- goto disable_clk;
+ err = tegra_xusb_unpowergate_partitions(tegra);
+ if (err)
+ goto disable_clks;
+
+ if (wakeup)
+ tegra_xhci_disable_phy_wake(tegra);
+
+ for (i = 0; i < tegra->num_phys; i++) {
+ if (!tegra->phys[i])
+ continue;
+
+ if (!wakeup)
+ phy_init(tegra->phys[i]);
+
+ phy_power_on(tegra->phys[i]);
}
tegra_xusb_config(tegra);
@@ -1804,31 +2066,79 @@ static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool wakeup)
goto disable_phy;
}
- err = xhci_resume(xhci, true);
+ if (wakeup)
+ tegra_xhci_disable_phy_sleepwalk(tegra);
+
+ err = xhci_resume(xhci, 0);
if (err < 0) {
dev_err(tegra->dev, "failed to resume XHCI: %d\n", err);
goto disable_phy;
}
- return 0;
+ usbcmd = readl(&xhci->op_regs->command);
+ usbcmd |= CMD_EIE;
+ writel(usbcmd, &xhci->op_regs->command);
+
+ goto out;
disable_phy:
- tegra_xusb_phy_disable(tegra);
-disable_clk:
+ for (i = 0; i < tegra->num_phys; i++) {
+ if (!tegra->phys[i])
+ continue;
+
+ phy_power_off(tegra->phys[i]);
+ if (!wakeup)
+ phy_exit(tegra->phys[i]);
+ }
+ tegra_xusb_powergate_partitions(tegra);
+disable_clks:
tegra_xusb_clk_disable(tegra);
+out:
+ if (!err)
+ dev_dbg(dev, "exiting ELPG done\n");
+ else
+ dev_dbg(dev, "exiting ELPG failed\n");
+
return err;
}
static int tegra_xusb_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- bool wakeup = device_may_wakeup(dev);
int err;
synchronize_irq(tegra->mbox_irq);
mutex_lock(&tegra->lock);
- err = tegra_xusb_enter_elpg(tegra, wakeup);
+
+ if (pm_runtime_suspended(dev)) {
+ err = tegra_xusb_exit_elpg(tegra, true);
+ if (err < 0)
+ goto out;
+ }
+
+ err = tegra_xusb_enter_elpg(tegra, false);
+ if (err < 0) {
+ if (pm_runtime_suspended(dev)) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ goto out;
+ }
+
+out:
+ if (!err) {
+ tegra->suspended = true;
+ pm_runtime_disable(dev);
+
+ if (device_may_wakeup(dev)) {
+ if (enable_irq_wake(tegra->padctl_irq))
+ dev_err(dev, "failed to enable padctl wakes\n");
+ }
+ }
+
mutex_unlock(&tegra->lock);
return err;
@@ -1837,11 +2147,56 @@ static int tegra_xusb_suspend(struct device *dev)
static int tegra_xusb_resume(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- bool wakeup = device_may_wakeup(dev);
int err;
mutex_lock(&tegra->lock);
- err = tegra_xusb_exit_elpg(tegra, wakeup);
+
+ if (!tegra->suspended) {
+ mutex_unlock(&tegra->lock);
+ return 0;
+ }
+
+ err = tegra_xusb_exit_elpg(tegra, false);
+ if (err < 0) {
+ mutex_unlock(&tegra->lock);
+ return err;
+ }
+
+ if (device_may_wakeup(dev)) {
+ if (disable_irq_wake(tegra->padctl_irq))
+ dev_err(dev, "failed to disable padctl wakes\n");
+ }
+ tegra->suspended = false;
+ mutex_unlock(&tegra->lock);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int tegra_xusb_runtime_suspend(struct device *dev)
+{
+ struct tegra_xusb *tegra = dev_get_drvdata(dev);
+ int ret;
+
+ synchronize_irq(tegra->mbox_irq);
+ mutex_lock(&tegra->lock);
+ ret = tegra_xusb_enter_elpg(tegra, true);
+ mutex_unlock(&tegra->lock);
+
+ return ret;
+}
+
+static int tegra_xusb_runtime_resume(struct device *dev)
+{
+ struct tegra_xusb *tegra = dev_get_drvdata(dev);
+ int err;
+
+ mutex_lock(&tegra->lock);
+ err = tegra_xusb_exit_elpg(tegra, true);
mutex_unlock(&tegra->lock);
return err;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 27283654ca08..3618070eba78 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1361,12 +1361,17 @@ static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
urb->transfer_buffer_length,
dir);
- if (usb_urb_dir_in(urb))
+ if (usb_urb_dir_in(urb)) {
len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
urb->transfer_buffer,
buf_len,
0);
-
+ if (len != buf_len) {
+ xhci_dbg(hcd_to_xhci(hcd),
+ "Copy from tmp buf to urb sg list failed\n");
+ urb->actual_length = len;
+ }
+ }
urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
kfree(urb->transfer_buffer);
urb->transfer_buffer = NULL;
@@ -4840,7 +4845,6 @@ static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
if (xhci_update_timeout_for_endpoint(xhci, udev,
&alt->endpoint[j].desc, state, timeout))
return -E2BIG;
- continue;
}
return 0;
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e417f5ce13d1..3c7d281672ae 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1526,6 +1526,12 @@ static inline const char *xhci_trb_type_string(u8 type)
#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \
(addr & (TRB_MAX_BUFF_SIZE - 1)))
#define MAX_SOFT_RETRY 3
+/*
+ * Limits of consecutive isoc trbs that can Block Event Interrupt (BEI) if
+ * XHCI_AVOID_BEI quirk is in use.
+ */
+#define AVOID_BEI_INTERVAL_MIN 8
+#define AVOID_BEI_INTERVAL_MAX 32
struct xhci_segment {
union xhci_trb *trbs;
@@ -1663,10 +1669,6 @@ struct urb_priv {
* meaning 64 ring segments.
* Initial allocated size of the ERST, in number of entries */
#define ERST_NUM_SEGS 1
-/* Initial allocated size of the ERST, in number of entries */
-#define ERST_SIZE 64
-/* Initial number of event segment rings allocated */
-#define ERST_ENTRIES 1
/* Poll every 60 seconds */
#define POLL_TIMEOUT 60
/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
@@ -1772,6 +1774,7 @@ struct xhci_hcd {
u8 isoc_threshold;
/* imod_interval in ns (I * 250ns) */
u32 imod_interval;
+ u32 isoc_bei_interval;
int event_ring_max;
/* 4KB min, 128MB max */
int page_size;
diff --git a/drivers/usb/isp1760/Kconfig b/drivers/usb/isp1760/Kconfig
index b1022cc490a2..2ed2b73291d1 100644
--- a/drivers/usb/isp1760/Kconfig
+++ b/drivers/usb/isp1760/Kconfig
@@ -1,10 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
config USB_ISP1760
- tristate "NXP ISP 1760/1761 support"
+ tristate "NXP ISP 1760/1761/1763 support"
depends on USB || USB_GADGET
+ select REGMAP_MMIO
help
- Say Y or M here if your system as an ISP1760 USB host controller
+ Say Y or M here if your system as an ISP1760/1763 USB host controller
or an ISP1761 USB dual-role controller.
This driver does not support isochronous transfers or OTG.
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
index fdeb4cf97cc5..ff07e2890692 100644
--- a/drivers/usb/isp1760/isp1760-core.c
+++ b/drivers/usb/isp1760/isp1760-core.c
@@ -2,12 +2,14 @@
/*
* Driver for the NXP ISP1760 chip
*
+ * Copyright 2021 Linaro, Rui Miguel Silva
* Copyright 2014 Laurent Pinchart
* Copyright 2007 Sebastian Siewior
*
* Contacts:
* Sebastian Siewior <bigeasy@linutronix.de>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Rui Miguel Silva <rui.silva@linaro.org>
*/
#include <linux/delay.h>
@@ -15,6 +17,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/usb.h>
@@ -23,10 +26,10 @@
#include "isp1760-regs.h"
#include "isp1760-udc.h"
-static void isp1760_init_core(struct isp1760_device *isp)
+static int isp1760_init_core(struct isp1760_device *isp)
{
- u32 otgctrl;
- u32 hwmode;
+ struct isp1760_hcd *hcd = &isp->hcd;
+ struct isp1760_udc *udc = &isp->udc;
/* Low-level chip reset */
if (isp->rst_gpio) {
@@ -39,24 +42,29 @@ static void isp1760_init_core(struct isp1760_device *isp)
* Reset the host controller, including the CPU interface
* configuration.
*/
- isp1760_write32(isp->regs, HC_RESET_REG, SW_RESET_RESET_ALL);
+ isp1760_field_set(hcd->fields, SW_RESET_RESET_ALL);
msleep(100);
/* Setup HW Mode Control: This assumes a level active-low interrupt */
- hwmode = HW_DATA_BUS_32BIT;
+ if ((isp->devflags & ISP1760_FLAG_ANALOG_OC) && hcd->is_isp1763) {
+ dev_err(isp->dev, "isp1763 analog overcurrent not available\n");
+ return -EINVAL;
+ }
if (isp->devflags & ISP1760_FLAG_BUS_WIDTH_16)
- hwmode &= ~HW_DATA_BUS_32BIT;
+ isp1760_field_clear(hcd->fields, HW_DATA_BUS_WIDTH);
+ if (isp->devflags & ISP1760_FLAG_BUS_WIDTH_8)
+ isp1760_field_set(hcd->fields, HW_DATA_BUS_WIDTH);
if (isp->devflags & ISP1760_FLAG_ANALOG_OC)
- hwmode |= HW_ANA_DIGI_OC;
+ isp1760_field_set(hcd->fields, HW_ANA_DIGI_OC);
if (isp->devflags & ISP1760_FLAG_DACK_POL_HIGH)
- hwmode |= HW_DACK_POL_HIGH;
+ isp1760_field_set(hcd->fields, HW_DACK_POL_HIGH);
if (isp->devflags & ISP1760_FLAG_DREQ_POL_HIGH)
- hwmode |= HW_DREQ_POL_HIGH;
+ isp1760_field_set(hcd->fields, HW_DREQ_POL_HIGH);
if (isp->devflags & ISP1760_FLAG_INTR_POL_HIGH)
- hwmode |= HW_INTR_HIGH_ACT;
+ isp1760_field_set(hcd->fields, HW_INTR_HIGH_ACT);
if (isp->devflags & ISP1760_FLAG_INTR_EDGE_TRIG)
- hwmode |= HW_INTR_EDGE_TRIG;
+ isp1760_field_set(hcd->fields, HW_INTR_EDGE_TRIG);
/*
* The ISP1761 has a dedicated DC IRQ line but supports sharing the HC
@@ -65,59 +73,425 @@ static void isp1760_init_core(struct isp1760_device *isp)
* spurious interrupts during HCD registration.
*/
if (isp->devflags & ISP1760_FLAG_ISP1761) {
- isp1760_write32(isp->regs, DC_MODE, 0);
- hwmode |= HW_COMN_IRQ;
+ isp1760_reg_write(udc->regs, ISP176x_DC_MODE, 0);
+ isp1760_field_set(hcd->fields, HW_COMN_IRQ);
}
/*
- * We have to set this first in case we're in 16-bit mode.
- * Write it twice to ensure correct upper bits if switching
- * to 16-bit mode.
- */
- isp1760_write32(isp->regs, HC_HW_MODE_CTRL, hwmode);
- isp1760_write32(isp->regs, HC_HW_MODE_CTRL, hwmode);
-
- /*
* PORT 1 Control register of the ISP1760 is the OTG control register
* on ISP1761.
*
* TODO: Really support OTG. For now we configure port 1 in device mode
- * when OTG is requested.
*/
- if ((isp->devflags & ISP1760_FLAG_ISP1761) &&
- (isp->devflags & ISP1760_FLAG_OTG_EN))
- otgctrl = ((HW_DM_PULLDOWN | HW_DP_PULLDOWN) << 16)
- | HW_OTG_DISABLE;
- else
- otgctrl = (HW_SW_SEL_HC_DC << 16)
- | (HW_VBUS_DRV | HW_SEL_CP_EXT);
-
- isp1760_write32(isp->regs, HC_PORT1_CTRL, otgctrl);
+ if (((isp->devflags & ISP1760_FLAG_ISP1761) ||
+ (isp->devflags & ISP1760_FLAG_ISP1763)) &&
+ (isp->devflags & ISP1760_FLAG_PERIPHERAL_EN)) {
+ isp1760_field_set(hcd->fields, HW_DM_PULLDOWN);
+ isp1760_field_set(hcd->fields, HW_DP_PULLDOWN);
+ isp1760_field_set(hcd->fields, HW_OTG_DISABLE);
+ } else {
+ isp1760_field_set(hcd->fields, HW_SW_SEL_HC_DC);
+ isp1760_field_set(hcd->fields, HW_VBUS_DRV);
+ isp1760_field_set(hcd->fields, HW_SEL_CP_EXT);
+ }
- dev_info(isp->dev, "bus width: %u, oc: %s\n",
+ dev_info(isp->dev, "%s bus width: %u, oc: %s\n",
+ hcd->is_isp1763 ? "isp1763" : "isp1760",
+ isp->devflags & ISP1760_FLAG_BUS_WIDTH_8 ? 8 :
isp->devflags & ISP1760_FLAG_BUS_WIDTH_16 ? 16 : 32,
+ hcd->is_isp1763 ? "not available" :
isp->devflags & ISP1760_FLAG_ANALOG_OC ? "analog" : "digital");
+
+ return 0;
}
void isp1760_set_pullup(struct isp1760_device *isp, bool enable)
{
- isp1760_write32(isp->regs, HW_OTG_CTRL_SET,
- enable ? HW_DP_PULLUP : HW_DP_PULLUP << 16);
+ struct isp1760_udc *udc = &isp->udc;
+
+ if (enable)
+ isp1760_field_set(udc->fields, HW_DP_PULLUP);
+ else
+ isp1760_field_set(udc->fields, HW_DP_PULLUP_CLEAR);
}
+/*
+ * ISP1760/61:
+ *
+ * 60kb divided in:
+ * - 32 blocks @ 256 bytes
+ * - 20 blocks @ 1024 bytes
+ * - 4 blocks @ 8192 bytes
+ */
+static const struct isp1760_memory_layout isp176x_memory_conf = {
+ .blocks[0] = 32,
+ .blocks_size[0] = 256,
+ .blocks[1] = 20,
+ .blocks_size[1] = 1024,
+ .blocks[2] = 4,
+ .blocks_size[2] = 8192,
+
+ .slot_num = 32,
+ .payload_blocks = 32 + 20 + 4,
+ .payload_area_size = 0xf000,
+};
+
+/*
+ * ISP1763:
+ *
+ * 20kb divided in:
+ * - 8 blocks @ 256 bytes
+ * - 2 blocks @ 1024 bytes
+ * - 4 blocks @ 4096 bytes
+ */
+static const struct isp1760_memory_layout isp1763_memory_conf = {
+ .blocks[0] = 8,
+ .blocks_size[0] = 256,
+ .blocks[1] = 2,
+ .blocks_size[1] = 1024,
+ .blocks[2] = 4,
+ .blocks_size[2] = 4096,
+
+ .slot_num = 16,
+ .payload_blocks = 8 + 2 + 4,
+ .payload_area_size = 0x5000,
+};
+
+static const struct regmap_range isp176x_hc_volatile_ranges[] = {
+ regmap_reg_range(ISP176x_HC_USBCMD, ISP176x_HC_ATL_PTD_LASTPTD),
+ regmap_reg_range(ISP176x_HC_BUFFER_STATUS, ISP176x_HC_MEMORY),
+ regmap_reg_range(ISP176x_HC_INTERRUPT, ISP176x_HC_OTG_CTRL_CLEAR),
+};
+
+static const struct regmap_access_table isp176x_hc_volatile_table = {
+ .yes_ranges = isp176x_hc_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(isp176x_hc_volatile_ranges),
+};
+
+static const struct regmap_config isp1760_hc_regmap_conf = {
+ .name = "isp1760-hc",
+ .reg_bits = 16,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = ISP176x_HC_OTG_CTRL_CLEAR,
+ .volatile_table = &isp176x_hc_volatile_table,
+};
+
+static const struct reg_field isp1760_hc_reg_fields[] = {
+ [HCS_PPC] = REG_FIELD(ISP176x_HC_HCSPARAMS, 4, 4),
+ [HCS_N_PORTS] = REG_FIELD(ISP176x_HC_HCSPARAMS, 0, 3),
+ [HCC_ISOC_CACHE] = REG_FIELD(ISP176x_HC_HCCPARAMS, 7, 7),
+ [HCC_ISOC_THRES] = REG_FIELD(ISP176x_HC_HCCPARAMS, 4, 6),
+ [CMD_LRESET] = REG_FIELD(ISP176x_HC_USBCMD, 7, 7),
+ [CMD_RESET] = REG_FIELD(ISP176x_HC_USBCMD, 1, 1),
+ [CMD_RUN] = REG_FIELD(ISP176x_HC_USBCMD, 0, 0),
+ [STS_PCD] = REG_FIELD(ISP176x_HC_USBSTS, 2, 2),
+ [HC_FRINDEX] = REG_FIELD(ISP176x_HC_FRINDEX, 0, 13),
+ [FLAG_CF] = REG_FIELD(ISP176x_HC_CONFIGFLAG, 0, 0),
+ [HC_ISO_PTD_DONEMAP] = REG_FIELD(ISP176x_HC_ISO_PTD_DONEMAP, 0, 31),
+ [HC_ISO_PTD_SKIPMAP] = REG_FIELD(ISP176x_HC_ISO_PTD_SKIPMAP, 0, 31),
+ [HC_ISO_PTD_LASTPTD] = REG_FIELD(ISP176x_HC_ISO_PTD_LASTPTD, 0, 31),
+ [HC_INT_PTD_DONEMAP] = REG_FIELD(ISP176x_HC_INT_PTD_DONEMAP, 0, 31),
+ [HC_INT_PTD_SKIPMAP] = REG_FIELD(ISP176x_HC_INT_PTD_SKIPMAP, 0, 31),
+ [HC_INT_PTD_LASTPTD] = REG_FIELD(ISP176x_HC_INT_PTD_LASTPTD, 0, 31),
+ [HC_ATL_PTD_DONEMAP] = REG_FIELD(ISP176x_HC_ATL_PTD_DONEMAP, 0, 31),
+ [HC_ATL_PTD_SKIPMAP] = REG_FIELD(ISP176x_HC_ATL_PTD_SKIPMAP, 0, 31),
+ [HC_ATL_PTD_LASTPTD] = REG_FIELD(ISP176x_HC_ATL_PTD_LASTPTD, 0, 31),
+ [PORT_OWNER] = REG_FIELD(ISP176x_HC_PORTSC1, 13, 13),
+ [PORT_POWER] = REG_FIELD(ISP176x_HC_PORTSC1, 12, 12),
+ [PORT_LSTATUS] = REG_FIELD(ISP176x_HC_PORTSC1, 10, 11),
+ [PORT_RESET] = REG_FIELD(ISP176x_HC_PORTSC1, 8, 8),
+ [PORT_SUSPEND] = REG_FIELD(ISP176x_HC_PORTSC1, 7, 7),
+ [PORT_RESUME] = REG_FIELD(ISP176x_HC_PORTSC1, 6, 6),
+ [PORT_PE] = REG_FIELD(ISP176x_HC_PORTSC1, 2, 2),
+ [PORT_CSC] = REG_FIELD(ISP176x_HC_PORTSC1, 1, 1),
+ [PORT_CONNECT] = REG_FIELD(ISP176x_HC_PORTSC1, 0, 0),
+ [ALL_ATX_RESET] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 31, 31),
+ [HW_ANA_DIGI_OC] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 15, 15),
+ [HW_COMN_IRQ] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 10, 10),
+ [HW_DATA_BUS_WIDTH] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 8, 8),
+ [HW_DACK_POL_HIGH] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 6, 6),
+ [HW_DREQ_POL_HIGH] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 5, 5),
+ [HW_INTR_HIGH_ACT] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 2, 2),
+ [HW_INTR_EDGE_TRIG] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 1, 1),
+ [HW_GLOBAL_INTR_EN] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 0, 0),
+ [HC_CHIP_REV] = REG_FIELD(ISP176x_HC_CHIP_ID, 16, 31),
+ [HC_CHIP_ID_HIGH] = REG_FIELD(ISP176x_HC_CHIP_ID, 8, 15),
+ [HC_CHIP_ID_LOW] = REG_FIELD(ISP176x_HC_CHIP_ID, 0, 7),
+ [HC_SCRATCH] = REG_FIELD(ISP176x_HC_SCRATCH, 0, 31),
+ [SW_RESET_RESET_ALL] = REG_FIELD(ISP176x_HC_RESET, 0, 0),
+ [ISO_BUF_FILL] = REG_FIELD(ISP176x_HC_BUFFER_STATUS, 2, 2),
+ [INT_BUF_FILL] = REG_FIELD(ISP176x_HC_BUFFER_STATUS, 1, 1),
+ [ATL_BUF_FILL] = REG_FIELD(ISP176x_HC_BUFFER_STATUS, 0, 0),
+ [MEM_BANK_SEL] = REG_FIELD(ISP176x_HC_MEMORY, 16, 17),
+ [MEM_START_ADDR] = REG_FIELD(ISP176x_HC_MEMORY, 0, 15),
+ [HC_INTERRUPT] = REG_FIELD(ISP176x_HC_INTERRUPT, 0, 9),
+ [HC_ATL_IRQ_ENABLE] = REG_FIELD(ISP176x_HC_INTERRUPT_ENABLE, 8, 8),
+ [HC_INT_IRQ_ENABLE] = REG_FIELD(ISP176x_HC_INTERRUPT_ENABLE, 7, 7),
+ [HC_ISO_IRQ_MASK_OR] = REG_FIELD(ISP176x_HC_ISO_IRQ_MASK_OR, 0, 31),
+ [HC_INT_IRQ_MASK_OR] = REG_FIELD(ISP176x_HC_INT_IRQ_MASK_OR, 0, 31),
+ [HC_ATL_IRQ_MASK_OR] = REG_FIELD(ISP176x_HC_ATL_IRQ_MASK_OR, 0, 31),
+ [HC_ISO_IRQ_MASK_AND] = REG_FIELD(ISP176x_HC_ISO_IRQ_MASK_AND, 0, 31),
+ [HC_INT_IRQ_MASK_AND] = REG_FIELD(ISP176x_HC_INT_IRQ_MASK_AND, 0, 31),
+ [HC_ATL_IRQ_MASK_AND] = REG_FIELD(ISP176x_HC_ATL_IRQ_MASK_AND, 0, 31),
+ [HW_OTG_DISABLE] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 10, 10),
+ [HW_SW_SEL_HC_DC] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 7, 7),
+ [HW_VBUS_DRV] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 4, 4),
+ [HW_SEL_CP_EXT] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 3, 3),
+ [HW_DM_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 2, 2),
+ [HW_DP_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 1, 1),
+ [HW_DP_PULLUP] = REG_FIELD(ISP176x_HC_OTG_CTRL_SET, 0, 0),
+ [HW_OTG_DISABLE_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 10, 10),
+ [HW_SW_SEL_HC_DC_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 7, 7),
+ [HW_VBUS_DRV_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 4, 4),
+ [HW_SEL_CP_EXT_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 3, 3),
+ [HW_DM_PULLDOWN_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 2, 2),
+ [HW_DP_PULLDOWN_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 1, 1),
+ [HW_DP_PULLUP_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL_CLEAR, 0, 0),
+};
+
+static const struct reg_field isp1763_hc_reg_fields[] = {
+ [CMD_LRESET] = REG_FIELD(ISP1763_HC_USBCMD, 7, 7),
+ [CMD_RESET] = REG_FIELD(ISP1763_HC_USBCMD, 1, 1),
+ [CMD_RUN] = REG_FIELD(ISP1763_HC_USBCMD, 0, 0),
+ [STS_PCD] = REG_FIELD(ISP1763_HC_USBSTS, 2, 2),
+ [HC_FRINDEX] = REG_FIELD(ISP1763_HC_FRINDEX, 0, 13),
+ [FLAG_CF] = REG_FIELD(ISP1763_HC_CONFIGFLAG, 0, 0),
+ [HC_ISO_PTD_DONEMAP] = REG_FIELD(ISP1763_HC_ISO_PTD_DONEMAP, 0, 15),
+ [HC_ISO_PTD_SKIPMAP] = REG_FIELD(ISP1763_HC_ISO_PTD_SKIPMAP, 0, 15),
+ [HC_ISO_PTD_LASTPTD] = REG_FIELD(ISP1763_HC_ISO_PTD_LASTPTD, 0, 15),
+ [HC_INT_PTD_DONEMAP] = REG_FIELD(ISP1763_HC_INT_PTD_DONEMAP, 0, 15),
+ [HC_INT_PTD_SKIPMAP] = REG_FIELD(ISP1763_HC_INT_PTD_SKIPMAP, 0, 15),
+ [HC_INT_PTD_LASTPTD] = REG_FIELD(ISP1763_HC_INT_PTD_LASTPTD, 0, 15),
+ [HC_ATL_PTD_DONEMAP] = REG_FIELD(ISP1763_HC_ATL_PTD_DONEMAP, 0, 15),
+ [HC_ATL_PTD_SKIPMAP] = REG_FIELD(ISP1763_HC_ATL_PTD_SKIPMAP, 0, 15),
+ [HC_ATL_PTD_LASTPTD] = REG_FIELD(ISP1763_HC_ATL_PTD_LASTPTD, 0, 15),
+ [PORT_OWNER] = REG_FIELD(ISP1763_HC_PORTSC1, 13, 13),
+ [PORT_POWER] = REG_FIELD(ISP1763_HC_PORTSC1, 12, 12),
+ [PORT_LSTATUS] = REG_FIELD(ISP1763_HC_PORTSC1, 10, 11),
+ [PORT_RESET] = REG_FIELD(ISP1763_HC_PORTSC1, 8, 8),
+ [PORT_SUSPEND] = REG_FIELD(ISP1763_HC_PORTSC1, 7, 7),
+ [PORT_RESUME] = REG_FIELD(ISP1763_HC_PORTSC1, 6, 6),
+ [PORT_PE] = REG_FIELD(ISP1763_HC_PORTSC1, 2, 2),
+ [PORT_CSC] = REG_FIELD(ISP1763_HC_PORTSC1, 1, 1),
+ [PORT_CONNECT] = REG_FIELD(ISP1763_HC_PORTSC1, 0, 0),
+ [HW_DATA_BUS_WIDTH] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 4, 4),
+ [HW_DACK_POL_HIGH] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 6, 6),
+ [HW_DREQ_POL_HIGH] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 5, 5),
+ [HW_INTF_LOCK] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 3, 3),
+ [HW_INTR_HIGH_ACT] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 2, 2),
+ [HW_INTR_EDGE_TRIG] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 1, 1),
+ [HW_GLOBAL_INTR_EN] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 0, 0),
+ [SW_RESET_RESET_ATX] = REG_FIELD(ISP1763_HC_RESET, 3, 3),
+ [SW_RESET_RESET_ALL] = REG_FIELD(ISP1763_HC_RESET, 0, 0),
+ [HC_CHIP_ID_HIGH] = REG_FIELD(ISP1763_HC_CHIP_ID, 0, 15),
+ [HC_CHIP_ID_LOW] = REG_FIELD(ISP1763_HC_CHIP_REV, 8, 15),
+ [HC_CHIP_REV] = REG_FIELD(ISP1763_HC_CHIP_REV, 0, 7),
+ [HC_SCRATCH] = REG_FIELD(ISP1763_HC_SCRATCH, 0, 15),
+ [ISO_BUF_FILL] = REG_FIELD(ISP1763_HC_BUFFER_STATUS, 2, 2),
+ [INT_BUF_FILL] = REG_FIELD(ISP1763_HC_BUFFER_STATUS, 1, 1),
+ [ATL_BUF_FILL] = REG_FIELD(ISP1763_HC_BUFFER_STATUS, 0, 0),
+ [MEM_START_ADDR] = REG_FIELD(ISP1763_HC_MEMORY, 0, 15),
+ [HC_DATA] = REG_FIELD(ISP1763_HC_DATA, 0, 15),
+ [HC_INTERRUPT] = REG_FIELD(ISP1763_HC_INTERRUPT, 0, 10),
+ [HC_ATL_IRQ_ENABLE] = REG_FIELD(ISP1763_HC_INTERRUPT_ENABLE, 8, 8),
+ [HC_INT_IRQ_ENABLE] = REG_FIELD(ISP1763_HC_INTERRUPT_ENABLE, 7, 7),
+ [HC_ISO_IRQ_MASK_OR] = REG_FIELD(ISP1763_HC_ISO_IRQ_MASK_OR, 0, 15),
+ [HC_INT_IRQ_MASK_OR] = REG_FIELD(ISP1763_HC_INT_IRQ_MASK_OR, 0, 15),
+ [HC_ATL_IRQ_MASK_OR] = REG_FIELD(ISP1763_HC_ATL_IRQ_MASK_OR, 0, 15),
+ [HC_ISO_IRQ_MASK_AND] = REG_FIELD(ISP1763_HC_ISO_IRQ_MASK_AND, 0, 15),
+ [HC_INT_IRQ_MASK_AND] = REG_FIELD(ISP1763_HC_INT_IRQ_MASK_AND, 0, 15),
+ [HC_ATL_IRQ_MASK_AND] = REG_FIELD(ISP1763_HC_ATL_IRQ_MASK_AND, 0, 15),
+ [HW_HC_2_DIS] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 15, 15),
+ [HW_OTG_DISABLE] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 10, 10),
+ [HW_SW_SEL_HC_DC] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 7, 7),
+ [HW_VBUS_DRV] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 4, 4),
+ [HW_SEL_CP_EXT] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 3, 3),
+ [HW_DM_PULLDOWN] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 2, 2),
+ [HW_DP_PULLDOWN] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 1, 1),
+ [HW_DP_PULLUP] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 0, 0),
+ [HW_HC_2_DIS_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 15, 15),
+ [HW_OTG_DISABLE_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 10, 10),
+ [HW_SW_SEL_HC_DC_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 7, 7),
+ [HW_VBUS_DRV_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 4, 4),
+ [HW_SEL_CP_EXT_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 3, 3),
+ [HW_DM_PULLDOWN_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 2, 2),
+ [HW_DP_PULLDOWN_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 1, 1),
+ [HW_DP_PULLUP_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 0, 0),
+};
+
+static const struct regmap_range isp1763_hc_volatile_ranges[] = {
+ regmap_reg_range(ISP1763_HC_USBCMD, ISP1763_HC_ATL_PTD_LASTPTD),
+ regmap_reg_range(ISP1763_HC_BUFFER_STATUS, ISP1763_HC_DATA),
+ regmap_reg_range(ISP1763_HC_INTERRUPT, ISP1763_HC_OTG_CTRL_CLEAR),
+};
+
+static const struct regmap_access_table isp1763_hc_volatile_table = {
+ .yes_ranges = isp1763_hc_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(isp1763_hc_volatile_ranges),
+};
+
+static const struct regmap_config isp1763_hc_regmap_conf = {
+ .name = "isp1763-hc",
+ .reg_bits = 8,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .fast_io = true,
+ .max_register = ISP1763_HC_OTG_CTRL_CLEAR,
+ .volatile_table = &isp1763_hc_volatile_table,
+};
+
+static const struct regmap_range isp176x_dc_volatile_ranges[] = {
+ regmap_reg_range(ISP176x_DC_EPMAXPKTSZ, ISP176x_DC_EPTYPE),
+ regmap_reg_range(ISP176x_DC_BUFLEN, ISP176x_DC_EPINDEX),
+};
+
+static const struct regmap_access_table isp176x_dc_volatile_table = {
+ .yes_ranges = isp176x_dc_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(isp176x_dc_volatile_ranges),
+};
+
+static const struct regmap_config isp1761_dc_regmap_conf = {
+ .name = "isp1761-dc",
+ .reg_bits = 16,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = ISP176x_DC_TESTMODE,
+ .volatile_table = &isp176x_dc_volatile_table,
+};
+
+static const struct reg_field isp1761_dc_reg_fields[] = {
+ [DC_DEVEN] = REG_FIELD(ISP176x_DC_ADDRESS, 7, 7),
+ [DC_DEVADDR] = REG_FIELD(ISP176x_DC_ADDRESS, 0, 6),
+ [DC_VBUSSTAT] = REG_FIELD(ISP176x_DC_MODE, 8, 8),
+ [DC_SFRESET] = REG_FIELD(ISP176x_DC_MODE, 4, 4),
+ [DC_GLINTENA] = REG_FIELD(ISP176x_DC_MODE, 3, 3),
+ [DC_CDBGMOD_ACK] = REG_FIELD(ISP176x_DC_INTCONF, 6, 6),
+ [DC_DDBGMODIN_ACK] = REG_FIELD(ISP176x_DC_INTCONF, 4, 4),
+ [DC_DDBGMODOUT_ACK] = REG_FIELD(ISP176x_DC_INTCONF, 2, 2),
+ [DC_INTPOL] = REG_FIELD(ISP176x_DC_INTCONF, 0, 0),
+ [DC_IEPRXTX_7] = REG_FIELD(ISP176x_DC_INTENABLE, 25, 25),
+ [DC_IEPRXTX_6] = REG_FIELD(ISP176x_DC_INTENABLE, 23, 23),
+ [DC_IEPRXTX_5] = REG_FIELD(ISP176x_DC_INTENABLE, 21, 21),
+ [DC_IEPRXTX_4] = REG_FIELD(ISP176x_DC_INTENABLE, 19, 19),
+ [DC_IEPRXTX_3] = REG_FIELD(ISP176x_DC_INTENABLE, 17, 17),
+ [DC_IEPRXTX_2] = REG_FIELD(ISP176x_DC_INTENABLE, 15, 15),
+ [DC_IEPRXTX_1] = REG_FIELD(ISP176x_DC_INTENABLE, 13, 13),
+ [DC_IEPRXTX_0] = REG_FIELD(ISP176x_DC_INTENABLE, 11, 11),
+ [DC_IEP0SETUP] = REG_FIELD(ISP176x_DC_INTENABLE, 8, 8),
+ [DC_IEVBUS] = REG_FIELD(ISP176x_DC_INTENABLE, 7, 7),
+ [DC_IEHS_STA] = REG_FIELD(ISP176x_DC_INTENABLE, 5, 5),
+ [DC_IERESM] = REG_FIELD(ISP176x_DC_INTENABLE, 4, 4),
+ [DC_IESUSP] = REG_FIELD(ISP176x_DC_INTENABLE, 3, 3),
+ [DC_IEBRST] = REG_FIELD(ISP176x_DC_INTENABLE, 0, 0),
+ [DC_EP0SETUP] = REG_FIELD(ISP176x_DC_EPINDEX, 5, 5),
+ [DC_ENDPIDX] = REG_FIELD(ISP176x_DC_EPINDEX, 1, 4),
+ [DC_EPDIR] = REG_FIELD(ISP176x_DC_EPINDEX, 0, 0),
+ [DC_CLBUF] = REG_FIELD(ISP176x_DC_CTRLFUNC, 4, 4),
+ [DC_VENDP] = REG_FIELD(ISP176x_DC_CTRLFUNC, 3, 3),
+ [DC_DSEN] = REG_FIELD(ISP176x_DC_CTRLFUNC, 2, 2),
+ [DC_STATUS] = REG_FIELD(ISP176x_DC_CTRLFUNC, 1, 1),
+ [DC_STALL] = REG_FIELD(ISP176x_DC_CTRLFUNC, 0, 0),
+ [DC_BUFLEN] = REG_FIELD(ISP176x_DC_BUFLEN, 0, 15),
+ [DC_FFOSZ] = REG_FIELD(ISP176x_DC_EPMAXPKTSZ, 0, 10),
+ [DC_EPENABLE] = REG_FIELD(ISP176x_DC_EPTYPE, 3, 3),
+ [DC_ENDPTYP] = REG_FIELD(ISP176x_DC_EPTYPE, 0, 1),
+ [DC_UFRAMENUM] = REG_FIELD(ISP176x_DC_FRAMENUM, 11, 13),
+ [DC_FRAMENUM] = REG_FIELD(ISP176x_DC_FRAMENUM, 0, 10),
+ [DC_CHIP_ID_HIGH] = REG_FIELD(ISP176x_DC_CHIPID, 16, 31),
+ [DC_CHIP_ID_LOW] = REG_FIELD(ISP176x_DC_CHIPID, 0, 15),
+ [DC_SCRATCH] = REG_FIELD(ISP176x_DC_SCRATCH, 0, 15),
+};
+
+static const struct regmap_range isp1763_dc_volatile_ranges[] = {
+ regmap_reg_range(ISP1763_DC_EPMAXPKTSZ, ISP1763_DC_EPTYPE),
+ regmap_reg_range(ISP1763_DC_BUFLEN, ISP1763_DC_EPINDEX),
+};
+
+static const struct regmap_access_table isp1763_dc_volatile_table = {
+ .yes_ranges = isp1763_dc_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(isp1763_dc_volatile_ranges),
+};
+
+static const struct reg_field isp1763_dc_reg_fields[] = {
+ [DC_DEVEN] = REG_FIELD(ISP1763_DC_ADDRESS, 7, 7),
+ [DC_DEVADDR] = REG_FIELD(ISP1763_DC_ADDRESS, 0, 6),
+ [DC_VBUSSTAT] = REG_FIELD(ISP1763_DC_MODE, 8, 8),
+ [DC_SFRESET] = REG_FIELD(ISP1763_DC_MODE, 4, 4),
+ [DC_GLINTENA] = REG_FIELD(ISP1763_DC_MODE, 3, 3),
+ [DC_CDBGMOD_ACK] = REG_FIELD(ISP1763_DC_INTCONF, 6, 6),
+ [DC_DDBGMODIN_ACK] = REG_FIELD(ISP1763_DC_INTCONF, 4, 4),
+ [DC_DDBGMODOUT_ACK] = REG_FIELD(ISP1763_DC_INTCONF, 2, 2),
+ [DC_INTPOL] = REG_FIELD(ISP1763_DC_INTCONF, 0, 0),
+ [DC_IEPRXTX_7] = REG_FIELD(ISP1763_DC_INTENABLE, 25, 25),
+ [DC_IEPRXTX_6] = REG_FIELD(ISP1763_DC_INTENABLE, 23, 23),
+ [DC_IEPRXTX_5] = REG_FIELD(ISP1763_DC_INTENABLE, 21, 21),
+ [DC_IEPRXTX_4] = REG_FIELD(ISP1763_DC_INTENABLE, 19, 19),
+ [DC_IEPRXTX_3] = REG_FIELD(ISP1763_DC_INTENABLE, 17, 17),
+ [DC_IEPRXTX_2] = REG_FIELD(ISP1763_DC_INTENABLE, 15, 15),
+ [DC_IEPRXTX_1] = REG_FIELD(ISP1763_DC_INTENABLE, 13, 13),
+ [DC_IEPRXTX_0] = REG_FIELD(ISP1763_DC_INTENABLE, 11, 11),
+ [DC_IEP0SETUP] = REG_FIELD(ISP1763_DC_INTENABLE, 8, 8),
+ [DC_IEVBUS] = REG_FIELD(ISP1763_DC_INTENABLE, 7, 7),
+ [DC_IEHS_STA] = REG_FIELD(ISP1763_DC_INTENABLE, 5, 5),
+ [DC_IERESM] = REG_FIELD(ISP1763_DC_INTENABLE, 4, 4),
+ [DC_IESUSP] = REG_FIELD(ISP1763_DC_INTENABLE, 3, 3),
+ [DC_IEBRST] = REG_FIELD(ISP1763_DC_INTENABLE, 0, 0),
+ [DC_EP0SETUP] = REG_FIELD(ISP1763_DC_EPINDEX, 5, 5),
+ [DC_ENDPIDX] = REG_FIELD(ISP1763_DC_EPINDEX, 1, 4),
+ [DC_EPDIR] = REG_FIELD(ISP1763_DC_EPINDEX, 0, 0),
+ [DC_CLBUF] = REG_FIELD(ISP1763_DC_CTRLFUNC, 4, 4),
+ [DC_VENDP] = REG_FIELD(ISP1763_DC_CTRLFUNC, 3, 3),
+ [DC_DSEN] = REG_FIELD(ISP1763_DC_CTRLFUNC, 2, 2),
+ [DC_STATUS] = REG_FIELD(ISP1763_DC_CTRLFUNC, 1, 1),
+ [DC_STALL] = REG_FIELD(ISP1763_DC_CTRLFUNC, 0, 0),
+ [DC_BUFLEN] = REG_FIELD(ISP1763_DC_BUFLEN, 0, 15),
+ [DC_FFOSZ] = REG_FIELD(ISP1763_DC_EPMAXPKTSZ, 0, 10),
+ [DC_EPENABLE] = REG_FIELD(ISP1763_DC_EPTYPE, 3, 3),
+ [DC_ENDPTYP] = REG_FIELD(ISP1763_DC_EPTYPE, 0, 1),
+ [DC_UFRAMENUM] = REG_FIELD(ISP1763_DC_FRAMENUM, 11, 13),
+ [DC_FRAMENUM] = REG_FIELD(ISP1763_DC_FRAMENUM, 0, 10),
+ [DC_CHIP_ID_HIGH] = REG_FIELD(ISP1763_DC_CHIPID_HIGH, 0, 15),
+ [DC_CHIP_ID_LOW] = REG_FIELD(ISP1763_DC_CHIPID_LOW, 0, 15),
+ [DC_SCRATCH] = REG_FIELD(ISP1763_DC_SCRATCH, 0, 15),
+};
+
+static const struct regmap_config isp1763_dc_regmap_conf = {
+ .name = "isp1763-dc",
+ .reg_bits = 8,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .fast_io = true,
+ .max_register = ISP1763_DC_TESTMODE,
+ .volatile_table = &isp1763_dc_volatile_table,
+};
+
int isp1760_register(struct resource *mem, int irq, unsigned long irqflags,
struct device *dev, unsigned int devflags)
{
+ const struct regmap_config *hc_regmap;
+ const struct reg_field *hc_reg_fields;
+ const struct regmap_config *dc_regmap;
+ const struct reg_field *dc_reg_fields;
struct isp1760_device *isp;
- bool udc_disabled = !(devflags & ISP1760_FLAG_ISP1761);
+ struct isp1760_hcd *hcd;
+ struct isp1760_udc *udc;
+ struct regmap_field *f;
+ bool udc_enabled;
int ret;
+ int i;
/*
* If neither the HCD not the UDC is enabled return an error, as no
* device would be registered.
*/
+ udc_enabled = ((devflags & ISP1760_FLAG_ISP1763) ||
+ (devflags & ISP1760_FLAG_ISP1761));
+
if ((!IS_ENABLED(CONFIG_USB_ISP1760_HCD) || usb_disabled()) &&
- (!IS_ENABLED(CONFIG_USB_ISP1761_UDC) || udc_disabled))
+ (!IS_ENABLED(CONFIG_USB_ISP1761_UDC) || !udc_enabled))
return -ENODEV;
isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
@@ -126,28 +500,81 @@ int isp1760_register(struct resource *mem, int irq, unsigned long irqflags,
isp->dev = dev;
isp->devflags = devflags;
+ hcd = &isp->hcd;
+ udc = &isp->udc;
+
+ hcd->is_isp1763 = !!(devflags & ISP1760_FLAG_ISP1763);
+ udc->is_isp1763 = !!(devflags & ISP1760_FLAG_ISP1763);
+
+ if (!hcd->is_isp1763 && (devflags & ISP1760_FLAG_BUS_WIDTH_8)) {
+ dev_err(dev, "isp1760/61 do not support data width 8\n");
+ return -EINVAL;
+ }
+
+ if (hcd->is_isp1763) {
+ hc_regmap = &isp1763_hc_regmap_conf;
+ hc_reg_fields = &isp1763_hc_reg_fields[0];
+ dc_regmap = &isp1763_dc_regmap_conf;
+ dc_reg_fields = &isp1763_dc_reg_fields[0];
+ } else {
+ hc_regmap = &isp1760_hc_regmap_conf;
+ hc_reg_fields = &isp1760_hc_reg_fields[0];
+ dc_regmap = &isp1761_dc_regmap_conf;
+ dc_reg_fields = &isp1761_dc_reg_fields[0];
+ }
isp->rst_gpio = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
if (IS_ERR(isp->rst_gpio))
return PTR_ERR(isp->rst_gpio);
- isp->regs = devm_ioremap_resource(dev, mem);
- if (IS_ERR(isp->regs))
- return PTR_ERR(isp->regs);
+ hcd->base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(hcd->base))
+ return PTR_ERR(hcd->base);
+
+ hcd->regs = devm_regmap_init_mmio(dev, hcd->base, hc_regmap);
+ if (IS_ERR(hcd->regs))
+ return PTR_ERR(hcd->regs);
+
+ for (i = 0; i < HC_FIELD_MAX; i++) {
+ f = devm_regmap_field_alloc(dev, hcd->regs, hc_reg_fields[i]);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ hcd->fields[i] = f;
+ }
+
+ udc->regs = devm_regmap_init_mmio(dev, hcd->base, dc_regmap);
+ if (IS_ERR(udc->regs))
+ return PTR_ERR(udc->regs);
+
+ for (i = 0; i < DC_FIELD_MAX; i++) {
+ f = devm_regmap_field_alloc(dev, udc->regs, dc_reg_fields[i]);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ udc->fields[i] = f;
+ }
+
+ if (hcd->is_isp1763)
+ hcd->memory_layout = &isp1763_memory_conf;
+ else
+ hcd->memory_layout = &isp176x_memory_conf;
- isp1760_init_core(isp);
+ ret = isp1760_init_core(isp);
+ if (ret < 0)
+ return ret;
if (IS_ENABLED(CONFIG_USB_ISP1760_HCD) && !usb_disabled()) {
- ret = isp1760_hcd_register(&isp->hcd, isp->regs, mem, irq,
+ ret = isp1760_hcd_register(hcd, mem, irq,
irqflags | IRQF_SHARED, dev);
if (ret < 0)
return ret;
}
- if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) {
+ if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && udc_enabled) {
ret = isp1760_udc_register(isp, irq, irqflags);
if (ret < 0) {
- isp1760_hcd_unregister(&isp->hcd);
+ isp1760_hcd_unregister(hcd);
return ret;
}
}
diff --git a/drivers/usb/isp1760/isp1760-core.h b/drivers/usb/isp1760/isp1760-core.h
index d9a0a4cc467c..91e0ee3992a7 100644
--- a/drivers/usb/isp1760/isp1760-core.h
+++ b/drivers/usb/isp1760/isp1760-core.h
@@ -2,18 +2,21 @@
/*
* Driver for the NXP ISP1760 chip
*
+ * Copyright 2021 Linaro, Rui Miguel Silva
* Copyright 2014 Laurent Pinchart
* Copyright 2007 Sebastian Siewior
*
* Contacts:
* Sebastian Siewior <bigeasy@linutronix.de>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Rui Miguel Silva <rui.silva@linaro.org>
*/
#ifndef _ISP1760_CORE_H_
#define _ISP1760_CORE_H_
#include <linux/ioport.h>
+#include <linux/regmap.h>
#include "isp1760-hcd.h"
#include "isp1760-udc.h"
@@ -27,18 +30,19 @@ struct gpio_desc;
* a sane default configuration.
*/
#define ISP1760_FLAG_BUS_WIDTH_16 0x00000002 /* 16-bit data bus width */
-#define ISP1760_FLAG_OTG_EN 0x00000004 /* Port 1 supports OTG */
+#define ISP1760_FLAG_PERIPHERAL_EN 0x00000004 /* Port 1 supports Peripheral mode*/
#define ISP1760_FLAG_ANALOG_OC 0x00000008 /* Analog overcurrent */
#define ISP1760_FLAG_DACK_POL_HIGH 0x00000010 /* DACK active high */
#define ISP1760_FLAG_DREQ_POL_HIGH 0x00000020 /* DREQ active high */
#define ISP1760_FLAG_ISP1761 0x00000040 /* Chip is ISP1761 */
#define ISP1760_FLAG_INTR_POL_HIGH 0x00000080 /* Interrupt polarity active high */
#define ISP1760_FLAG_INTR_EDGE_TRIG 0x00000100 /* Interrupt edge triggered */
+#define ISP1760_FLAG_ISP1763 0x00000200 /* Chip is ISP1763 */
+#define ISP1760_FLAG_BUS_WIDTH_8 0x00000400 /* 8-bit data bus width */
struct isp1760_device {
struct device *dev;
- void __iomem *regs;
unsigned int devflags;
struct gpio_desc *rst_gpio;
@@ -52,14 +56,42 @@ void isp1760_unregister(struct device *dev);
void isp1760_set_pullup(struct isp1760_device *isp, bool enable);
-static inline u32 isp1760_read32(void __iomem *base, u32 reg)
+static inline u32 isp1760_field_read(struct regmap_field **fields, u32 field)
{
- return readl(base + reg);
+ unsigned int val;
+
+ regmap_field_read(fields[field], &val);
+
+ return val;
+}
+
+static inline void isp1760_field_write(struct regmap_field **fields, u32 field,
+ u32 val)
+{
+ regmap_field_write(fields[field], val);
+}
+
+static inline void isp1760_field_set(struct regmap_field **fields, u32 field)
+{
+ isp1760_field_write(fields, field, 0xFFFFFFFF);
}
-static inline void isp1760_write32(void __iomem *base, u32 reg, u32 val)
+static inline void isp1760_field_clear(struct regmap_field **fields, u32 field)
{
- writel(val, base + reg);
+ isp1760_field_write(fields, field, 0);
}
+static inline u32 isp1760_reg_read(struct regmap *regs, u32 reg)
+{
+ unsigned int val;
+
+ regmap_read(regs, reg, &val);
+
+ return val;
+}
+
+static inline void isp1760_reg_write(struct regmap *regs, u32 reg, u32 val)
+{
+ regmap_write(regs, reg, val);
+}
#endif
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 33ae656c4b68..27168b4a4ef2 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -11,6 +11,8 @@
*
* (c) 2011 Arvid Brodin <arvid.brodin@enea.com>
*
+ * Copyright 2021 Linaro, Rui Miguel Silva <rui.silva@linaro.org>
+ *
*/
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -44,6 +46,9 @@ static inline struct isp1760_hcd *hcd_to_priv(struct usb_hcd *hcd)
return *(struct isp1760_hcd **)hcd->hcd_priv;
}
+#define dw_to_le32(x) (cpu_to_le32((__force u32)x))
+#define le32_to_dw(x) ((__force __dw)(le32_to_cpu(x)))
+
/* urb state*/
#define DELETE_URB (0x0008)
#define NO_TRANSFER_ACTIVE (0xffffffff)
@@ -60,50 +65,69 @@ struct ptd {
__dw dw6;
__dw dw7;
};
+
+struct ptd_le32 {
+ __le32 dw0;
+ __le32 dw1;
+ __le32 dw2;
+ __le32 dw3;
+ __le32 dw4;
+ __le32 dw5;
+ __le32 dw6;
+ __le32 dw7;
+};
+
#define PTD_OFFSET 0x0400
#define ISO_PTD_OFFSET 0x0400
#define INT_PTD_OFFSET 0x0800
#define ATL_PTD_OFFSET 0x0c00
#define PAYLOAD_OFFSET 0x1000
-
-/* ATL */
-/* DW0 */
-#define DW0_VALID_BIT 1
-#define FROM_DW0_VALID(x) ((x) & 0x01)
-#define TO_DW0_LENGTH(x) (((u32) x) << 3)
-#define TO_DW0_MAXPACKET(x) (((u32) x) << 18)
-#define TO_DW0_MULTI(x) (((u32) x) << 29)
-#define TO_DW0_ENDPOINT(x) (((u32) x) << 31)
+#define ISP_BANK_0 0x00
+#define ISP_BANK_1 0x01
+#define ISP_BANK_2 0x02
+#define ISP_BANK_3 0x03
+
+#define TO_DW(x) ((__force __dw)x)
+#define TO_U32(x) ((__force u32)x)
+
+ /* ATL */
+ /* DW0 */
+#define DW0_VALID_BIT TO_DW(1)
+#define FROM_DW0_VALID(x) (TO_U32(x) & 0x01)
+#define TO_DW0_LENGTH(x) TO_DW((((u32)x) << 3))
+#define TO_DW0_MAXPACKET(x) TO_DW((((u32)x) << 18))
+#define TO_DW0_MULTI(x) TO_DW((((u32)x) << 29))
+#define TO_DW0_ENDPOINT(x) TO_DW((((u32)x) << 31))
/* DW1 */
-#define TO_DW1_DEVICE_ADDR(x) (((u32) x) << 3)
-#define TO_DW1_PID_TOKEN(x) (((u32) x) << 10)
-#define DW1_TRANS_BULK ((u32) 2 << 12)
-#define DW1_TRANS_INT ((u32) 3 << 12)
-#define DW1_TRANS_SPLIT ((u32) 1 << 14)
-#define DW1_SE_USB_LOSPEED ((u32) 2 << 16)
-#define TO_DW1_PORT_NUM(x) (((u32) x) << 18)
-#define TO_DW1_HUB_NUM(x) (((u32) x) << 25)
+#define TO_DW1_DEVICE_ADDR(x) TO_DW((((u32)x) << 3))
+#define TO_DW1_PID_TOKEN(x) TO_DW((((u32)x) << 10))
+#define DW1_TRANS_BULK TO_DW(((u32)2 << 12))
+#define DW1_TRANS_INT TO_DW(((u32)3 << 12))
+#define DW1_TRANS_SPLIT TO_DW(((u32)1 << 14))
+#define DW1_SE_USB_LOSPEED TO_DW(((u32)2 << 16))
+#define TO_DW1_PORT_NUM(x) TO_DW((((u32)x) << 18))
+#define TO_DW1_HUB_NUM(x) TO_DW((((u32)x) << 25))
/* DW2 */
-#define TO_DW2_DATA_START_ADDR(x) (((u32) x) << 8)
-#define TO_DW2_RL(x) ((x) << 25)
-#define FROM_DW2_RL(x) (((x) >> 25) & 0xf)
+#define TO_DW2_DATA_START_ADDR(x) TO_DW((((u32)x) << 8))
+#define TO_DW2_RL(x) TO_DW(((x) << 25))
+#define FROM_DW2_RL(x) ((TO_U32(x) >> 25) & 0xf)
/* DW3 */
-#define FROM_DW3_NRBYTESTRANSFERRED(x) ((x) & 0x7fff)
-#define FROM_DW3_SCS_NRBYTESTRANSFERRED(x) ((x) & 0x07ff)
-#define TO_DW3_NAKCOUNT(x) ((x) << 19)
-#define FROM_DW3_NAKCOUNT(x) (((x) >> 19) & 0xf)
-#define TO_DW3_CERR(x) ((x) << 23)
-#define FROM_DW3_CERR(x) (((x) >> 23) & 0x3)
-#define TO_DW3_DATA_TOGGLE(x) ((x) << 25)
-#define FROM_DW3_DATA_TOGGLE(x) (((x) >> 25) & 0x1)
-#define TO_DW3_PING(x) ((x) << 26)
-#define FROM_DW3_PING(x) (((x) >> 26) & 0x1)
-#define DW3_ERROR_BIT (1 << 28)
-#define DW3_BABBLE_BIT (1 << 29)
-#define DW3_HALT_BIT (1 << 30)
-#define DW3_ACTIVE_BIT (1 << 31)
-#define FROM_DW3_ACTIVE(x) (((x) >> 31) & 0x01)
+#define FROM_DW3_NRBYTESTRANSFERRED(x) TO_U32((x) & 0x3fff)
+#define FROM_DW3_SCS_NRBYTESTRANSFERRED(x) TO_U32((x) & 0x07ff)
+#define TO_DW3_NAKCOUNT(x) TO_DW(((x) << 19))
+#define FROM_DW3_NAKCOUNT(x) ((TO_U32(x) >> 19) & 0xf)
+#define TO_DW3_CERR(x) TO_DW(((x) << 23))
+#define FROM_DW3_CERR(x) ((TO_U32(x) >> 23) & 0x3)
+#define TO_DW3_DATA_TOGGLE(x) TO_DW(((x) << 25))
+#define FROM_DW3_DATA_TOGGLE(x) ((TO_U32(x) >> 25) & 0x1)
+#define TO_DW3_PING(x) TO_DW(((x) << 26))
+#define FROM_DW3_PING(x) ((TO_U32(x) >> 26) & 0x1)
+#define DW3_ERROR_BIT TO_DW((1 << 28))
+#define DW3_BABBLE_BIT TO_DW((1 << 29))
+#define DW3_HALT_BIT TO_DW((1 << 30))
+#define DW3_ACTIVE_BIT TO_DW((1 << 31))
+#define FROM_DW3_ACTIVE(x) ((TO_U32(x) >> 31) & 0x01)
#define INT_UNDERRUN (1 << 2)
#define INT_BABBLE (1 << 1)
@@ -116,7 +140,7 @@ struct ptd {
/* Errata 1 */
#define RL_COUNTER (0)
#define NAK_COUNTER (0)
-#define ERR_COUNTER (2)
+#define ERR_COUNTER (3)
struct isp1760_qtd {
u8 packet_type;
@@ -158,17 +182,124 @@ struct urb_listitem {
struct urb *urb;
};
+static const u32 isp1763_hc_portsc1_fields[] = {
+ [PORT_OWNER] = BIT(13),
+ [PORT_POWER] = BIT(12),
+ [PORT_LSTATUS] = BIT(10),
+ [PORT_RESET] = BIT(8),
+ [PORT_SUSPEND] = BIT(7),
+ [PORT_RESUME] = BIT(6),
+ [PORT_PE] = BIT(2),
+ [PORT_CSC] = BIT(1),
+ [PORT_CONNECT] = BIT(0),
+};
+
+/*
+ * Access functions for isp176x registers regmap fields
+ */
+static u32 isp1760_hcd_read(struct usb_hcd *hcd, u32 field)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ return isp1760_field_read(priv->fields, field);
+}
+
/*
- * Access functions for isp176x registers (addresses 0..0x03FF).
+ * We need, in isp1763, to write directly the values to the portsc1
+ * register so it will make the other values to trigger.
*/
-static u32 reg_read32(void __iomem *base, u32 reg)
+static void isp1760_hcd_portsc1_set_clear(struct isp1760_hcd *priv, u32 field,
+ u32 val)
+{
+ u32 bit = isp1763_hc_portsc1_fields[field];
+ u32 port_status = readl(priv->base + ISP1763_HC_PORTSC1);
+
+ if (val)
+ writel(port_status | bit, priv->base + ISP1763_HC_PORTSC1);
+ else
+ writel(port_status & ~bit, priv->base + ISP1763_HC_PORTSC1);
+}
+
+static void isp1760_hcd_write(struct usb_hcd *hcd, u32 field, u32 val)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ if (unlikely(priv->is_isp1763 &&
+ (field >= PORT_OWNER && field <= PORT_CONNECT)))
+ return isp1760_hcd_portsc1_set_clear(priv, field, val);
+
+ isp1760_field_write(priv->fields, field, val);
+}
+
+static void isp1760_hcd_set(struct usb_hcd *hcd, u32 field)
+{
+ isp1760_hcd_write(hcd, field, 0xFFFFFFFF);
+}
+
+static void isp1760_hcd_clear(struct usb_hcd *hcd, u32 field)
{
- return isp1760_read32(base, reg);
+ isp1760_hcd_write(hcd, field, 0);
+}
+
+static int isp1760_hcd_set_and_wait(struct usb_hcd *hcd, u32 field,
+ u32 timeout_us)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ u32 val;
+
+ isp1760_hcd_set(hcd, field);
+
+ return regmap_field_read_poll_timeout(priv->fields[field], val,
+ val, 10, timeout_us);
+}
+
+static int isp1760_hcd_set_and_wait_swap(struct usb_hcd *hcd, u32 field,
+ u32 timeout_us)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ u32 val;
+
+ isp1760_hcd_set(hcd, field);
+
+ return regmap_field_read_poll_timeout(priv->fields[field], val,
+ !val, 10, timeout_us);
+}
+
+static int isp1760_hcd_clear_and_wait(struct usb_hcd *hcd, u32 field,
+ u32 timeout_us)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ u32 val;
+
+ isp1760_hcd_clear(hcd, field);
+
+ return regmap_field_read_poll_timeout(priv->fields[field], val,
+ !val, 10, timeout_us);
+}
+
+static bool isp1760_hcd_is_set(struct usb_hcd *hcd, u32 field)
+{
+ return !!isp1760_hcd_read(hcd, field);
+}
+
+static bool isp1760_hcd_ppc_is_set(struct usb_hcd *hcd)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ if (priv->is_isp1763)
+ return true;
+
+ return isp1760_hcd_is_set(hcd, HCS_PPC);
}
-static void reg_write32(void __iomem *base, u32 reg, u32 val)
+static u32 isp1760_hcd_n_ports(struct usb_hcd *hcd)
{
- isp1760_write32(base, reg, val);
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ if (priv->is_isp1763)
+ return 1;
+
+ return isp1760_hcd_read(hcd, HCS_N_PORTS);
}
/*
@@ -176,7 +307,7 @@ static void reg_write32(void __iomem *base, u32 reg, u32 val)
*
* bank_reads8() reads memory locations prefetched by an earlier write to
* HC_MEMORY_REG (see isp176x datasheet). Unless you want to do fancy multi-
- * bank optimizations, you should use the more generic mem_reads8() below.
+ * bank optimizations, you should use the more generic mem_read() below.
*
* For access to ptd memory, use the specialized ptd_read() and ptd_write()
* below.
@@ -196,7 +327,7 @@ static void bank_reads8(void __iomem *src_base, u32 src_offset, u32 bank_addr,
if (src_offset < PAYLOAD_OFFSET) {
while (bytes >= 4) {
- *dst = le32_to_cpu(__raw_readl(src));
+ *dst = readl_relaxed(src);
bytes -= 4;
src++;
dst++;
@@ -217,7 +348,7 @@ static void bank_reads8(void __iomem *src_base, u32 src_offset, u32 bank_addr,
* allocated.
*/
if (src_offset < PAYLOAD_OFFSET)
- val = le32_to_cpu(__raw_readl(src));
+ val = readl_relaxed(src);
else
val = __raw_readl(src);
@@ -231,16 +362,59 @@ static void bank_reads8(void __iomem *src_base, u32 src_offset, u32 bank_addr,
}
}
-static void mem_reads8(void __iomem *src_base, u32 src_offset, void *dst,
- u32 bytes)
+static void isp1760_mem_read(struct usb_hcd *hcd, u32 src_offset, void *dst,
+ u32 bytes)
{
- reg_write32(src_base, HC_MEMORY_REG, src_offset + ISP_BANK(0));
- ndelay(90);
- bank_reads8(src_base, src_offset, ISP_BANK(0), dst, bytes);
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ isp1760_hcd_write(hcd, MEM_BANK_SEL, ISP_BANK_0);
+ isp1760_hcd_write(hcd, MEM_START_ADDR, src_offset);
+ ndelay(100);
+
+ bank_reads8(priv->base, src_offset, ISP_BANK_0, dst, bytes);
}
-static void mem_writes8(void __iomem *dst_base, u32 dst_offset,
- __u32 const *src, u32 bytes)
+/*
+ * ISP1763 does not have the banks direct host controller memory access,
+ * needs to use the HC_DATA register. Add data read/write according to this,
+ * and also adjust 16bit access.
+ */
+static void isp1763_mem_read(struct usb_hcd *hcd, u16 srcaddr,
+ u16 *dstptr, u32 bytes)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ /* Write the starting device address to the hcd memory register */
+ isp1760_reg_write(priv->regs, ISP1763_HC_MEMORY, srcaddr);
+ ndelay(100); /* Delay between consecutive access */
+
+ /* As long there are at least 16-bit to read ... */
+ while (bytes >= 2) {
+ *dstptr = __raw_readw(priv->base + ISP1763_HC_DATA);
+ bytes -= 2;
+ dstptr++;
+ }
+
+ /* If there are no more bytes to read, return */
+ if (bytes <= 0)
+ return;
+
+ *((u8 *)dstptr) = (u8)(readw(priv->base + ISP1763_HC_DATA) & 0xFF);
+}
+
+static void mem_read(struct usb_hcd *hcd, u32 src_offset, __u32 *dst,
+ u32 bytes)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ if (!priv->is_isp1763)
+ return isp1760_mem_read(hcd, src_offset, (u16 *)dst, bytes);
+
+ isp1763_mem_read(hcd, (u16)src_offset, (u16 *)dst, bytes);
+}
+
+static void isp1760_mem_write(void __iomem *dst_base, u32 dst_offset,
+ __u32 const *src, u32 bytes)
{
__u32 __iomem *dst;
@@ -248,7 +422,7 @@ static void mem_writes8(void __iomem *dst_base, u32 dst_offset,
if (dst_offset < PAYLOAD_OFFSET) {
while (bytes >= 4) {
- __raw_writel(cpu_to_le32(*src), dst);
+ writel_relaxed(*src, dst);
bytes -= 4;
src++;
dst++;
@@ -269,74 +443,168 @@ static void mem_writes8(void __iomem *dst_base, u32 dst_offset,
*/
if (dst_offset < PAYLOAD_OFFSET)
- __raw_writel(cpu_to_le32(*src), dst);
+ writel_relaxed(*src, dst);
else
__raw_writel(*src, dst);
}
+static void isp1763_mem_write(struct usb_hcd *hcd, u16 dstaddr, u16 *src,
+ u32 bytes)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ /* Write the starting device address to the hcd memory register */
+ isp1760_reg_write(priv->regs, ISP1763_HC_MEMORY, dstaddr);
+ ndelay(100); /* Delay between consecutive access */
+
+ while (bytes >= 2) {
+ /* Get and write the data; then adjust the data ptr and len */
+ __raw_writew(*src, priv->base + ISP1763_HC_DATA);
+ bytes -= 2;
+ src++;
+ }
+
+ /* If there are no more bytes to process, return */
+ if (bytes <= 0)
+ return;
+
+ /*
+ * The only way to get here is if there is a single byte left,
+ * get it and write it to the data reg;
+ */
+ writew(*((u8 *)src), priv->base + ISP1763_HC_DATA);
+}
+
+static void mem_write(struct usb_hcd *hcd, u32 dst_offset, __u32 *src,
+ u32 bytes)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ if (!priv->is_isp1763)
+ return isp1760_mem_write(priv->base, dst_offset, src, bytes);
+
+ isp1763_mem_write(hcd, dst_offset, (u16 *)src, bytes);
+}
+
/*
* Read and write ptds. 'ptd_offset' should be one of ISO_PTD_OFFSET,
* INT_PTD_OFFSET, and ATL_PTD_OFFSET. 'slot' should be less than 32.
*/
-static void ptd_read(void __iomem *base, u32 ptd_offset, u32 slot,
- struct ptd *ptd)
+static void isp1760_ptd_read(struct usb_hcd *hcd, u32 ptd_offset, u32 slot,
+ struct ptd *ptd)
{
- reg_write32(base, HC_MEMORY_REG,
- ISP_BANK(0) + ptd_offset + slot*sizeof(*ptd));
+ u16 src_offset = ptd_offset + slot * sizeof(*ptd);
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ isp1760_hcd_write(hcd, MEM_BANK_SEL, ISP_BANK_0);
+ isp1760_hcd_write(hcd, MEM_START_ADDR, src_offset);
ndelay(90);
- bank_reads8(base, ptd_offset + slot*sizeof(*ptd), ISP_BANK(0),
- (void *) ptd, sizeof(*ptd));
+
+ bank_reads8(priv->base, src_offset, ISP_BANK_0, (void *)ptd,
+ sizeof(*ptd));
+}
+
+static void isp1763_ptd_read(struct usb_hcd *hcd, u32 ptd_offset, u32 slot,
+ struct ptd *ptd)
+{
+ u16 src_offset = ptd_offset + slot * sizeof(*ptd);
+ struct ptd_le32 le32_ptd;
+
+ isp1763_mem_read(hcd, src_offset, (u16 *)&le32_ptd, sizeof(le32_ptd));
+ /* Normalize the data obtained */
+ ptd->dw0 = le32_to_dw(le32_ptd.dw0);
+ ptd->dw1 = le32_to_dw(le32_ptd.dw1);
+ ptd->dw2 = le32_to_dw(le32_ptd.dw2);
+ ptd->dw3 = le32_to_dw(le32_ptd.dw3);
+ ptd->dw4 = le32_to_dw(le32_ptd.dw4);
+ ptd->dw5 = le32_to_dw(le32_ptd.dw5);
+ ptd->dw6 = le32_to_dw(le32_ptd.dw6);
+ ptd->dw7 = le32_to_dw(le32_ptd.dw7);
+}
+
+static void ptd_read(struct usb_hcd *hcd, u32 ptd_offset, u32 slot,
+ struct ptd *ptd)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ if (!priv->is_isp1763)
+ return isp1760_ptd_read(hcd, ptd_offset, slot, ptd);
+
+ isp1763_ptd_read(hcd, ptd_offset, slot, ptd);
+}
+
+static void isp1763_ptd_write(struct usb_hcd *hcd, u32 ptd_offset, u32 slot,
+ struct ptd *cpu_ptd)
+{
+ u16 dst_offset = ptd_offset + slot * sizeof(*cpu_ptd);
+ struct ptd_le32 ptd;
+
+ ptd.dw0 = dw_to_le32(cpu_ptd->dw0);
+ ptd.dw1 = dw_to_le32(cpu_ptd->dw1);
+ ptd.dw2 = dw_to_le32(cpu_ptd->dw2);
+ ptd.dw3 = dw_to_le32(cpu_ptd->dw3);
+ ptd.dw4 = dw_to_le32(cpu_ptd->dw4);
+ ptd.dw5 = dw_to_le32(cpu_ptd->dw5);
+ ptd.dw6 = dw_to_le32(cpu_ptd->dw6);
+ ptd.dw7 = dw_to_le32(cpu_ptd->dw7);
+
+ isp1763_mem_write(hcd, dst_offset, (u16 *)&ptd.dw0,
+ 8 * sizeof(ptd.dw0));
}
-static void ptd_write(void __iomem *base, u32 ptd_offset, u32 slot,
- struct ptd *ptd)
+static void isp1760_ptd_write(void __iomem *base, u32 ptd_offset, u32 slot,
+ struct ptd *ptd)
{
- mem_writes8(base, ptd_offset + slot*sizeof(*ptd) + sizeof(ptd->dw0),
- &ptd->dw1, 7*sizeof(ptd->dw1));
- /* Make sure dw0 gets written last (after other dw's and after payload)
- since it contains the enable bit */
+ u32 dst_offset = ptd_offset + slot * sizeof(*ptd);
+
+ /*
+ * Make sure dw0 gets written last (after other dw's and after payload)
+ * since it contains the enable bit
+ */
+ isp1760_mem_write(base, dst_offset + sizeof(ptd->dw0),
+ (__force u32 *)&ptd->dw1, 7 * sizeof(ptd->dw1));
wmb();
- mem_writes8(base, ptd_offset + slot*sizeof(*ptd), &ptd->dw0,
- sizeof(ptd->dw0));
+ isp1760_mem_write(base, dst_offset, (__force u32 *)&ptd->dw0,
+ sizeof(ptd->dw0));
}
+static void ptd_write(struct usb_hcd *hcd, u32 ptd_offset, u32 slot,
+ struct ptd *ptd)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ if (!priv->is_isp1763)
+ return isp1760_ptd_write(priv->base, ptd_offset, slot, ptd);
+
+ isp1763_ptd_write(hcd, ptd_offset, slot, ptd);
+}
/* memory management of the 60kb on the chip from 0x1000 to 0xffff */
static void init_memory(struct isp1760_hcd *priv)
{
- int i, curr;
+ const struct isp1760_memory_layout *mem = priv->memory_layout;
+ int i, j, curr;
u32 payload_addr;
payload_addr = PAYLOAD_OFFSET;
- for (i = 0; i < BLOCK_1_NUM; i++) {
- priv->memory_pool[i].start = payload_addr;
- priv->memory_pool[i].size = BLOCK_1_SIZE;
- priv->memory_pool[i].free = 1;
- payload_addr += priv->memory_pool[i].size;
- }
-
- curr = i;
- for (i = 0; i < BLOCK_2_NUM; i++) {
- priv->memory_pool[curr + i].start = payload_addr;
- priv->memory_pool[curr + i].size = BLOCK_2_SIZE;
- priv->memory_pool[curr + i].free = 1;
- payload_addr += priv->memory_pool[curr + i].size;
- }
- curr = i;
- for (i = 0; i < BLOCK_3_NUM; i++) {
- priv->memory_pool[curr + i].start = payload_addr;
- priv->memory_pool[curr + i].size = BLOCK_3_SIZE;
- priv->memory_pool[curr + i].free = 1;
- payload_addr += priv->memory_pool[curr + i].size;
+ for (i = 0, curr = 0; i < ARRAY_SIZE(mem->blocks); i++) {
+ for (j = 0; j < mem->blocks[i]; j++, curr++) {
+ priv->memory_pool[curr + j].start = payload_addr;
+ priv->memory_pool[curr + j].size = mem->blocks_size[i];
+ priv->memory_pool[curr + j].free = 1;
+ payload_addr += priv->memory_pool[curr + j].size;
+ }
}
- WARN_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE);
+ WARN_ON(payload_addr - priv->memory_pool[0].start >
+ mem->payload_area_size);
}
static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ const struct isp1760_memory_layout *mem = priv->memory_layout;
int i;
WARN_ON(qtd->payload_addr);
@@ -344,7 +612,7 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
if (!qtd->length)
return;
- for (i = 0; i < BLOCKS; i++) {
+ for (i = 0; i < mem->payload_blocks; i++) {
if (priv->memory_pool[i].size >= qtd->length &&
priv->memory_pool[i].free) {
priv->memory_pool[i].free = 0;
@@ -357,12 +625,13 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ const struct isp1760_memory_layout *mem = priv->memory_layout;
int i;
if (!qtd->payload_addr)
return;
- for (i = 0; i < BLOCKS; i++) {
+ for (i = 0; i < mem->payload_blocks; i++) {
if (priv->memory_pool[i].start == qtd->payload_addr) {
WARN_ON(priv->memory_pool[i].free);
priv->memory_pool[i].free = 1;
@@ -377,34 +646,15 @@ static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
qtd->payload_addr = 0;
}
-static int handshake(struct usb_hcd *hcd, u32 reg,
- u32 mask, u32 done, int usec)
-{
- u32 result;
- int ret;
-
- ret = readl_poll_timeout_atomic(hcd->regs + reg, result,
- ((result & mask) == done ||
- result == U32_MAX), 1, usec);
- if (result == U32_MAX)
- return -ENODEV;
-
- return ret;
-}
-
/* reset a non-running (STS_HALT == 1) controller */
static int ehci_reset(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
- u32 command = reg_read32(hcd->regs, HC_USBCMD);
-
- command |= CMD_RESET;
- reg_write32(hcd->regs, HC_USBCMD, command);
hcd->state = HC_STATE_HALT;
priv->next_statechange = jiffies;
- return handshake(hcd, HC_USBCMD, CMD_RESET, 0, 250 * 1000);
+ return isp1760_hcd_set_and_wait_swap(hcd, CMD_RESET, 250 * 1000);
}
static struct isp1760_qh *qh_alloc(gfp_t flags)
@@ -432,8 +682,9 @@ static void qh_free(struct isp1760_qh *qh)
/* one-time init, only for memory state */
static int priv_init(struct usb_hcd *hcd)
{
- struct isp1760_hcd *priv = hcd_to_priv(hcd);
- u32 hcc_params;
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ u32 isoc_cache;
+ u32 isoc_thres;
int i;
spin_lock_init(&priv->lock);
@@ -447,13 +698,20 @@ static int priv_init(struct usb_hcd *hcd)
*/
priv->periodic_size = DEFAULT_I_TDPS;
+ if (priv->is_isp1763) {
+ priv->i_thresh = 2;
+ return 0;
+ }
+
/* controllers may cache some of the periodic schedule ... */
- hcc_params = reg_read32(hcd->regs, HC_HCCPARAMS);
+ isoc_cache = isp1760_hcd_read(hcd, HCC_ISOC_CACHE);
+ isoc_thres = isp1760_hcd_read(hcd, HCC_ISOC_THRES);
+
/* full frame cache */
- if (HCC_ISOC_CACHE(hcc_params))
+ if (isoc_cache)
priv->i_thresh = 8;
else /* N microframes cached */
- priv->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
+ priv->i_thresh = 2 + isoc_thres;
return 0;
}
@@ -461,15 +719,24 @@ static int priv_init(struct usb_hcd *hcd)
static int isp1760_hc_setup(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ u32 atx_reset;
int result;
- u32 scratch, hwmode;
+ u32 scratch;
+ u32 pattern;
+
+ if (priv->is_isp1763)
+ pattern = 0xcafe;
+ else
+ pattern = 0xdeadcafe;
+
+ isp1760_hcd_write(hcd, HC_SCRATCH, pattern);
- reg_write32(hcd->regs, HC_SCRATCH_REG, 0xdeadbabe);
/* Change bus pattern */
- scratch = reg_read32(hcd->regs, HC_CHIP_ID_REG);
- scratch = reg_read32(hcd->regs, HC_SCRATCH_REG);
- if (scratch != 0xdeadbabe) {
- dev_err(hcd->self.controller, "Scratch test failed.\n");
+ scratch = isp1760_hcd_read(hcd, HC_CHIP_ID_HIGH);
+ dev_err(hcd->self.controller, "Scratch test 0x%08x\n", scratch);
+ scratch = isp1760_hcd_read(hcd, HC_SCRATCH);
+ if (scratch != pattern) {
+ dev_err(hcd->self.controller, "Scratch test failed. 0x%08x\n", scratch);
return -ENODEV;
}
@@ -481,10 +748,13 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
* the host controller through the EHCI USB Command register. The device
* has been reset in core code anyway, so this shouldn't matter.
*/
- reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
- reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
- reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
- reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
+ isp1760_hcd_clear(hcd, ISO_BUF_FILL);
+ isp1760_hcd_clear(hcd, INT_BUF_FILL);
+ isp1760_hcd_clear(hcd, ATL_BUF_FILL);
+
+ isp1760_hcd_set(hcd, HC_ATL_PTD_SKIPMAP);
+ isp1760_hcd_set(hcd, HC_INT_PTD_SKIPMAP);
+ isp1760_hcd_set(hcd, HC_ISO_PTD_SKIPMAP);
result = ehci_reset(hcd);
if (result)
@@ -493,14 +763,26 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
/* Step 11 passed */
/* ATL reset */
- hwmode = reg_read32(hcd->regs, HC_HW_MODE_CTRL) & ~ALL_ATX_RESET;
- reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET);
+ if (priv->is_isp1763)
+ atx_reset = SW_RESET_RESET_ATX;
+ else
+ atx_reset = ALL_ATX_RESET;
+
+ isp1760_hcd_set(hcd, atx_reset);
mdelay(10);
- reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
+ isp1760_hcd_clear(hcd, atx_reset);
- reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK);
+ if (priv->is_isp1763) {
+ isp1760_hcd_set(hcd, HW_OTG_DISABLE);
+ isp1760_hcd_set(hcd, HW_SW_SEL_HC_DC_CLEAR);
+ isp1760_hcd_set(hcd, HW_HC_2_DIS_CLEAR);
+ mdelay(10);
- priv->hcs_params = reg_read32(hcd->regs, HC_HCSPARAMS);
+ isp1760_hcd_set(hcd, HW_INTF_LOCK);
+ }
+
+ isp1760_hcd_set(hcd, HC_INT_IRQ_ENABLE);
+ isp1760_hcd_set(hcd, HC_ATL_IRQ_ENABLE);
return priv_init(hcd);
}
@@ -553,7 +835,7 @@ static void create_ptd_atl(struct isp1760_qh *qh,
ptd->dw0 |= TO_DW0_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe));
/* DW1 */
- ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1;
+ ptd->dw1 = TO_DW((usb_pipeendpoint(qtd->urb->pipe) >> 1));
ptd->dw1 |= TO_DW1_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe));
ptd->dw1 |= TO_DW1_PID_TOKEN(qtd->packet_type);
@@ -575,7 +857,7 @@ static void create_ptd_atl(struct isp1760_qh *qh,
/* SE bit for Split INT transfers */
if (usb_pipeint(qtd->urb->pipe) &&
(qtd->urb->dev->speed == USB_SPEED_LOW))
- ptd->dw1 |= 2 << 16;
+ ptd->dw1 |= DW1_SE_USB_LOSPEED;
rl = 0;
nak = 0;
@@ -647,14 +929,14 @@ static void transform_add_int(struct isp1760_qh *qh,
* that number come from? 0xff seems to work fine...
*/
/* ptd->dw5 = 0x1c; */
- ptd->dw5 = 0xff; /* Execute Complete Split on any uFrame */
+ ptd->dw5 = TO_DW(0xff); /* Execute Complete Split on any uFrame */
}
period = period >> 1;/* Ensure equal or shorter period than requested */
period &= 0xf8; /* Mask off too large values and lowest unused 3 bits */
- ptd->dw2 |= period;
- ptd->dw4 = usof;
+ ptd->dw2 |= TO_DW(period);
+ ptd->dw4 = TO_DW(usof);
}
static void create_ptd_int(struct isp1760_qh *qh,
@@ -720,41 +1002,45 @@ static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot,
struct ptd *ptd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ const struct isp1760_memory_layout *mem = priv->memory_layout;
int skip_map;
- WARN_ON((slot < 0) || (slot > 31));
+ WARN_ON((slot < 0) || (slot > mem->slot_num - 1));
WARN_ON(qtd->length && !qtd->payload_addr);
WARN_ON(slots[slot].qtd);
WARN_ON(slots[slot].qh);
WARN_ON(qtd->status != QTD_PAYLOAD_ALLOC);
+ if (priv->is_isp1763)
+ ndelay(100);
+
/* Make sure done map has not triggered from some unlinked transfer */
if (ptd_offset == ATL_PTD_OFFSET) {
- priv->atl_done_map |= reg_read32(hcd->regs,
- HC_ATL_PTD_DONEMAP_REG);
+ skip_map = isp1760_hcd_read(hcd, HC_ATL_PTD_SKIPMAP);
+ isp1760_hcd_write(hcd, HC_ATL_PTD_SKIPMAP,
+ skip_map | (1 << slot));
+ priv->atl_done_map |= isp1760_hcd_read(hcd, HC_ATL_PTD_DONEMAP);
priv->atl_done_map &= ~(1 << slot);
} else {
- priv->int_done_map |= reg_read32(hcd->regs,
- HC_INT_PTD_DONEMAP_REG);
+ skip_map = isp1760_hcd_read(hcd, HC_INT_PTD_SKIPMAP);
+ isp1760_hcd_write(hcd, HC_INT_PTD_SKIPMAP,
+ skip_map | (1 << slot));
+ priv->int_done_map |= isp1760_hcd_read(hcd, HC_INT_PTD_DONEMAP);
priv->int_done_map &= ~(1 << slot);
}
+ skip_map &= ~(1 << slot);
qh->slot = slot;
qtd->status = QTD_XFER_STARTED;
slots[slot].timestamp = jiffies;
slots[slot].qtd = qtd;
slots[slot].qh = qh;
- ptd_write(hcd->regs, ptd_offset, slot, ptd);
+ ptd_write(hcd, ptd_offset, slot, ptd);
- if (ptd_offset == ATL_PTD_OFFSET) {
- skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
- skip_map &= ~(1 << qh->slot);
- reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
- } else {
- skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
- skip_map &= ~(1 << qh->slot);
- reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
- }
+ if (ptd_offset == ATL_PTD_OFFSET)
+ isp1760_hcd_write(hcd, HC_ATL_PTD_SKIPMAP, skip_map);
+ else
+ isp1760_hcd_write(hcd, HC_INT_PTD_SKIPMAP, skip_map);
}
static int is_short_bulk(struct isp1760_qtd *qtd)
@@ -766,9 +1052,9 @@ static int is_short_bulk(struct isp1760_qtd *qtd)
static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
struct list_head *urb_list)
{
- int last_qtd;
struct isp1760_qtd *qtd, *qtd_next;
struct urb_listitem *urb_listitem;
+ int last_qtd;
list_for_each_entry_safe(qtd, qtd_next, &qh->qtd_list, qtd_list) {
if (qtd->status < QTD_XFER_COMPLETE)
@@ -783,9 +1069,9 @@ static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
if (qtd->actual_length) {
switch (qtd->packet_type) {
case IN_PID:
- mem_reads8(hcd->regs, qtd->payload_addr,
- qtd->data_buffer,
- qtd->actual_length);
+ mem_read(hcd, qtd->payload_addr,
+ qtd->data_buffer,
+ qtd->actual_length);
fallthrough;
case OUT_PID:
qtd->urb->actual_length +=
@@ -829,6 +1115,8 @@ static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ const struct isp1760_memory_layout *mem = priv->memory_layout;
+ int slot_num = mem->slot_num;
int ptd_offset;
struct isp1760_slotinfo *slots;
int curr_slot, free_slot;
@@ -855,7 +1143,7 @@ static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
}
free_slot = -1;
- for (curr_slot = 0; curr_slot < 32; curr_slot++) {
+ for (curr_slot = 0; curr_slot < slot_num; curr_slot++) {
if ((free_slot == -1) && (slots[curr_slot].qtd == NULL))
free_slot = curr_slot;
if (slots[curr_slot].qh == qh)
@@ -870,11 +1158,10 @@ static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
if ((qtd->length) && (!qtd->payload_addr))
break;
- if ((qtd->length) &&
- ((qtd->packet_type == SETUP_PID) ||
- (qtd->packet_type == OUT_PID))) {
- mem_writes8(hcd->regs, qtd->payload_addr,
- qtd->data_buffer, qtd->length);
+ if (qtd->length && (qtd->packet_type == SETUP_PID ||
+ qtd->packet_type == OUT_PID)) {
+ mem_write(hcd, qtd->payload_addr,
+ qtd->data_buffer, qtd->length);
}
qtd->status = QTD_PAYLOAD_ALLOC;
@@ -887,7 +1174,7 @@ static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
"available for transfer\n", __func__);
*/
/* Start xfer for this endpoint if not already done */
- if ((curr_slot > 31) && (free_slot > -1)) {
+ if ((curr_slot > slot_num - 1) && (free_slot > -1)) {
if (usb_pipeint(qtd->urb->pipe))
create_ptd_int(qh, qtd, &ptd);
else
@@ -977,10 +1264,10 @@ static void schedule_ptds(struct usb_hcd *hcd)
static int check_int_transfer(struct usb_hcd *hcd, struct ptd *ptd,
struct urb *urb)
{
- __dw dw4;
+ u32 dw4;
int i;
- dw4 = ptd->dw4;
+ dw4 = TO_U32(ptd->dw4);
dw4 >>= 8;
/* FIXME: ISP1761 datasheet does not say what to do with these. Do we
@@ -1074,9 +1361,9 @@ static void handle_done_ptds(struct usb_hcd *hcd)
int modified;
int skip_map;
- skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
+ skip_map = isp1760_hcd_read(hcd, HC_INT_PTD_SKIPMAP);
priv->int_done_map &= ~skip_map;
- skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
+ skip_map = isp1760_hcd_read(hcd, HC_ATL_PTD_SKIPMAP);
priv->atl_done_map &= ~skip_map;
modified = priv->int_done_map || priv->atl_done_map;
@@ -1094,7 +1381,7 @@ static void handle_done_ptds(struct usb_hcd *hcd)
continue;
}
ptd_offset = INT_PTD_OFFSET;
- ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
+ ptd_read(hcd, INT_PTD_OFFSET, slot, &ptd);
state = check_int_transfer(hcd, &ptd,
slots[slot].qtd->urb);
} else {
@@ -1109,7 +1396,7 @@ static void handle_done_ptds(struct usb_hcd *hcd)
continue;
}
ptd_offset = ATL_PTD_OFFSET;
- ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
+ ptd_read(hcd, ATL_PTD_OFFSET, slot, &ptd);
state = check_atl_transfer(hcd, &ptd,
slots[slot].qtd->urb);
}
@@ -1134,7 +1421,7 @@ static void handle_done_ptds(struct usb_hcd *hcd)
qtd->status = QTD_XFER_COMPLETE;
if (list_is_last(&qtd->qtd_list, &qh->qtd_list) ||
- is_short_bulk(qtd))
+ is_short_bulk(qtd))
qtd = NULL;
else
qtd = list_entry(qtd->qtd_list.next,
@@ -1202,25 +1489,30 @@ static void handle_done_ptds(struct usb_hcd *hcd)
static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
- u32 imask;
irqreturn_t irqret = IRQ_NONE;
+ u32 int_reg;
+ u32 imask;
spin_lock(&priv->lock);
if (!(hcd->state & HC_STATE_RUNNING))
goto leave;
- imask = reg_read32(hcd->regs, HC_INTERRUPT_REG);
+ imask = isp1760_hcd_read(hcd, HC_INTERRUPT);
if (unlikely(!imask))
goto leave;
- reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); /* Clear */
- priv->int_done_map |= reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG);
- priv->atl_done_map |= reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG);
+ int_reg = priv->is_isp1763 ? ISP1763_HC_INTERRUPT :
+ ISP176x_HC_INTERRUPT;
+ isp1760_reg_write(priv->regs, int_reg, imask);
+
+ priv->int_done_map |= isp1760_hcd_read(hcd, HC_INT_PTD_DONEMAP);
+ priv->atl_done_map |= isp1760_hcd_read(hcd, HC_ATL_PTD_DONEMAP);
handle_done_ptds(hcd);
irqret = IRQ_HANDLED;
+
leave:
spin_unlock(&priv->lock);
@@ -1261,17 +1553,18 @@ static void errata2_function(struct timer_list *unused)
{
struct usb_hcd *hcd = errata2_timer_hcd;
struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ const struct isp1760_memory_layout *mem = priv->memory_layout;
int slot;
struct ptd ptd;
unsigned long spinflags;
spin_lock_irqsave(&priv->lock, spinflags);
- for (slot = 0; slot < 32; slot++)
+ for (slot = 0; slot < mem->slot_num; slot++)
if (priv->atl_slots[slot].qh && time_after(jiffies,
priv->atl_slots[slot].timestamp +
msecs_to_jiffies(SLOT_TIMEOUT))) {
- ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
+ ptd_read(hcd, ATL_PTD_OFFSET, slot, &ptd);
if (!FROM_DW0_VALID(ptd.dw0) &&
!FROM_DW3_ACTIVE(ptd.dw3))
priv->atl_done_map |= 1 << slot;
@@ -1286,35 +1579,120 @@ static void errata2_function(struct timer_list *unused)
add_timer(&errata2_timer);
}
+static int isp1763_run(struct usb_hcd *hcd)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ int retval;
+ u32 chipid_h;
+ u32 chipid_l;
+ u32 chip_rev;
+ u32 ptd_atl_int;
+ u32 ptd_iso;
+
+ hcd->uses_new_polling = 1;
+ hcd->state = HC_STATE_RUNNING;
+
+ chipid_h = isp1760_hcd_read(hcd, HC_CHIP_ID_HIGH);
+ chipid_l = isp1760_hcd_read(hcd, HC_CHIP_ID_LOW);
+ chip_rev = isp1760_hcd_read(hcd, HC_CHIP_REV);
+ dev_info(hcd->self.controller, "USB ISP %02x%02x HW rev. %d started\n",
+ chipid_h, chipid_l, chip_rev);
+
+ isp1760_hcd_clear(hcd, ISO_BUF_FILL);
+ isp1760_hcd_clear(hcd, INT_BUF_FILL);
+ isp1760_hcd_clear(hcd, ATL_BUF_FILL);
+
+ isp1760_hcd_set(hcd, HC_ATL_PTD_SKIPMAP);
+ isp1760_hcd_set(hcd, HC_INT_PTD_SKIPMAP);
+ isp1760_hcd_set(hcd, HC_ISO_PTD_SKIPMAP);
+ ndelay(100);
+ isp1760_hcd_clear(hcd, HC_ATL_PTD_DONEMAP);
+ isp1760_hcd_clear(hcd, HC_INT_PTD_DONEMAP);
+ isp1760_hcd_clear(hcd, HC_ISO_PTD_DONEMAP);
+
+ isp1760_hcd_set(hcd, HW_OTG_DISABLE);
+ isp1760_reg_write(priv->regs, ISP1763_HC_OTG_CTRL_CLEAR, BIT(7));
+ isp1760_reg_write(priv->regs, ISP1763_HC_OTG_CTRL_CLEAR, BIT(15));
+ mdelay(10);
+
+ isp1760_hcd_set(hcd, HC_INT_IRQ_ENABLE);
+ isp1760_hcd_set(hcd, HC_ATL_IRQ_ENABLE);
+
+ isp1760_hcd_set(hcd, HW_GLOBAL_INTR_EN);
+
+ isp1760_hcd_clear(hcd, HC_ATL_IRQ_MASK_AND);
+ isp1760_hcd_clear(hcd, HC_INT_IRQ_MASK_AND);
+ isp1760_hcd_clear(hcd, HC_ISO_IRQ_MASK_AND);
+
+ isp1760_hcd_set(hcd, HC_ATL_IRQ_MASK_OR);
+ isp1760_hcd_set(hcd, HC_INT_IRQ_MASK_OR);
+ isp1760_hcd_set(hcd, HC_ISO_IRQ_MASK_OR);
+
+ ptd_atl_int = 0x8000;
+ ptd_iso = 0x0001;
+
+ isp1760_hcd_write(hcd, HC_ATL_PTD_LASTPTD, ptd_atl_int);
+ isp1760_hcd_write(hcd, HC_INT_PTD_LASTPTD, ptd_atl_int);
+ isp1760_hcd_write(hcd, HC_ISO_PTD_LASTPTD, ptd_iso);
+
+ isp1760_hcd_set(hcd, ATL_BUF_FILL);
+ isp1760_hcd_set(hcd, INT_BUF_FILL);
+
+ isp1760_hcd_clear(hcd, CMD_LRESET);
+ isp1760_hcd_clear(hcd, CMD_RESET);
+
+ retval = isp1760_hcd_set_and_wait(hcd, CMD_RUN, 250 * 1000);
+ if (retval)
+ return retval;
+
+ down_write(&ehci_cf_port_reset_rwsem);
+ retval = isp1760_hcd_set_and_wait(hcd, FLAG_CF, 250 * 1000);
+ up_write(&ehci_cf_port_reset_rwsem);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
static int isp1760_run(struct usb_hcd *hcd)
{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
int retval;
- u32 temp;
- u32 command;
- u32 chipid;
+ u32 chipid_h;
+ u32 chipid_l;
+ u32 chip_rev;
+ u32 ptd_atl_int;
+ u32 ptd_iso;
+
+ /*
+ * ISP1763 have some differences in the setup and order to enable
+ * the ports, disable otg, setup buffers, and ATL, INT, ISO status.
+ * So, just handle it a separate sequence.
+ */
+ if (priv->is_isp1763)
+ return isp1763_run(hcd);
hcd->uses_new_polling = 1;
hcd->state = HC_STATE_RUNNING;
/* Set PTD interrupt AND & OR maps */
- reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0);
- reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0xffffffff);
- reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0);
- reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0xffffffff);
- reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0);
- reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff);
+ isp1760_hcd_clear(hcd, HC_ATL_IRQ_MASK_AND);
+ isp1760_hcd_clear(hcd, HC_INT_IRQ_MASK_AND);
+ isp1760_hcd_clear(hcd, HC_ISO_IRQ_MASK_AND);
+
+ isp1760_hcd_set(hcd, HC_ATL_IRQ_MASK_OR);
+ isp1760_hcd_set(hcd, HC_INT_IRQ_MASK_OR);
+ isp1760_hcd_set(hcd, HC_ISO_IRQ_MASK_OR);
+
/* step 23 passed */
- temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
- reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp | HW_GLOBAL_INTR_EN);
+ isp1760_hcd_set(hcd, HW_GLOBAL_INTR_EN);
- command = reg_read32(hcd->regs, HC_USBCMD);
- command &= ~(CMD_LRESET|CMD_RESET);
- command |= CMD_RUN;
- reg_write32(hcd->regs, HC_USBCMD, command);
+ isp1760_hcd_clear(hcd, CMD_LRESET);
+ isp1760_hcd_clear(hcd, CMD_RESET);
- retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 250 * 1000);
+ retval = isp1760_hcd_set_and_wait(hcd, CMD_RUN, 250 * 1000);
if (retval)
return retval;
@@ -1324,9 +1702,8 @@ static int isp1760_run(struct usb_hcd *hcd)
* the semaphore while doing so.
*/
down_write(&ehci_cf_port_reset_rwsem);
- reg_write32(hcd->regs, HC_CONFIGFLAG, FLAG_CF);
- retval = handshake(hcd, HC_CONFIGFLAG, FLAG_CF, FLAG_CF, 250 * 1000);
+ retval = isp1760_hcd_set_and_wait(hcd, FLAG_CF, 250 * 1000);
up_write(&ehci_cf_port_reset_rwsem);
if (retval)
return retval;
@@ -1336,21 +1713,28 @@ static int isp1760_run(struct usb_hcd *hcd)
errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
add_timer(&errata2_timer);
- chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
- dev_info(hcd->self.controller, "USB ISP %04x HW rev. %d started\n",
- chipid & 0xffff, chipid >> 16);
+ chipid_h = isp1760_hcd_read(hcd, HC_CHIP_ID_HIGH);
+ chipid_l = isp1760_hcd_read(hcd, HC_CHIP_ID_LOW);
+ chip_rev = isp1760_hcd_read(hcd, HC_CHIP_REV);
+ dev_info(hcd->self.controller, "USB ISP %02x%02x HW rev. %d started\n",
+ chipid_h, chipid_l, chip_rev);
/* PTD Register Init Part 2, Step 28 */
/* Setup registers controlling PTD checking */
- reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000);
- reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000);
- reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001);
- reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, 0xffffffff);
- reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, 0xffffffff);
- reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, 0xffffffff);
- reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
- ATL_BUF_FILL | INT_BUF_FILL);
+ ptd_atl_int = 0x80000000;
+ ptd_iso = 0x00000001;
+
+ isp1760_hcd_write(hcd, HC_ATL_PTD_LASTPTD, ptd_atl_int);
+ isp1760_hcd_write(hcd, HC_INT_PTD_LASTPTD, ptd_atl_int);
+ isp1760_hcd_write(hcd, HC_ISO_PTD_LASTPTD, ptd_iso);
+
+ isp1760_hcd_set(hcd, HC_ATL_PTD_SKIPMAP);
+ isp1760_hcd_set(hcd, HC_INT_PTD_SKIPMAP);
+ isp1760_hcd_set(hcd, HC_ISO_PTD_SKIPMAP);
+
+ isp1760_hcd_set(hcd, ATL_BUF_FILL);
+ isp1760_hcd_set(hcd, INT_BUF_FILL);
/* GRR this is run-once init(), being done every time the HC starts.
* So long as they're part of class devices, we can't do it init()
@@ -1363,8 +1747,6 @@ static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len)
{
qtd->data_buffer = databuffer;
- if (len > MAX_PAYLOAD_SIZE)
- len = MAX_PAYLOAD_SIZE;
qtd->length = len;
return qtd->length;
@@ -1388,6 +1770,8 @@ static void qtd_list_free(struct list_head *qtd_list)
static void packetize_urb(struct usb_hcd *hcd,
struct urb *urb, struct list_head *head, gfp_t flags)
{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ const struct isp1760_memory_layout *mem = priv->memory_layout;
struct isp1760_qtd *qtd;
void *buf;
int len, maxpacketsize;
@@ -1440,6 +1824,10 @@ static void packetize_urb(struct usb_hcd *hcd,
qtd = qtd_alloc(flags, urb, packet_type);
if (!qtd)
goto cleanup;
+
+ if (len > mem->blocks_size[ISP176x_BLOCK_NUM - 1])
+ len = mem->blocks_size[ISP176x_BLOCK_NUM - 1];
+
this_qtd_len = qtd_fill(qtd, buf, len);
list_add_tail(&qtd->qtd_list, head);
@@ -1584,15 +1972,16 @@ static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
/* We need to forcefully reclaim the slot since some transfers never
return, e.g. interrupt transfers and NAKed bulk transfers. */
if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
- skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
+ skip_map = isp1760_hcd_read(hcd, HC_ATL_PTD_SKIPMAP);
skip_map |= (1 << qh->slot);
- reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
+ isp1760_hcd_write(hcd, HC_ATL_PTD_SKIPMAP, skip_map);
+ ndelay(100);
priv->atl_slots[qh->slot].qh = NULL;
priv->atl_slots[qh->slot].qtd = NULL;
} else {
- skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
+ skip_map = isp1760_hcd_read(hcd, HC_INT_PTD_SKIPMAP);
skip_map |= (1 << qh->slot);
- reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
+ isp1760_hcd_write(hcd, HC_INT_PTD_SKIPMAP, skip_map);
priv->int_slots[qh->slot].qh = NULL;
priv->int_slots[qh->slot].qtd = NULL;
}
@@ -1705,8 +2094,7 @@ out:
static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
- u32 temp, status = 0;
- u32 mask;
+ u32 status = 0;
int retval = 1;
unsigned long flags;
@@ -1716,17 +2104,13 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
/* init status to no-changes */
buf[0] = 0;
- mask = PORT_CSC;
spin_lock_irqsave(&priv->lock, flags);
- temp = reg_read32(hcd->regs, HC_PORTSC1);
- if (temp & PORT_OWNER) {
- if (temp & PORT_CSC) {
- temp &= ~PORT_CSC;
- reg_write32(hcd->regs, HC_PORTSC1, temp);
- goto done;
- }
+ if (isp1760_hcd_is_set(hcd, PORT_OWNER) &&
+ isp1760_hcd_is_set(hcd, PORT_CSC)) {
+ isp1760_hcd_clear(hcd, PORT_CSC);
+ goto done;
}
/*
@@ -1735,11 +2119,9 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
* high-speed device is switched over to the companion
* controller by the user.
*/
-
- if ((temp & mask) != 0
- || ((temp & PORT_RESUME) != 0
- && time_after_eq(jiffies,
- priv->reset_done))) {
+ if (isp1760_hcd_is_set(hcd, PORT_CSC) ||
+ (isp1760_hcd_is_set(hcd, PORT_RESUME) &&
+ time_after_eq(jiffies, priv->reset_done))) {
buf [0] |= 1 << (0 + 1);
status = STS_PCD;
}
@@ -1752,9 +2134,11 @@ done:
static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
struct usb_hub_descriptor *desc)
{
- int ports = HCS_N_PORTS(priv->hcs_params);
+ int ports;
u16 temp;
+ ports = isp1760_hcd_n_ports(priv->hcd);
+
desc->bDescriptorType = USB_DT_HUB;
/* priv 1.0, 2.3.9 says 20ms max */
desc->bPwrOn2PwrGood = 10;
@@ -1770,7 +2154,7 @@ static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
/* per-port overcurrent reporting */
temp = HUB_CHAR_INDV_PORT_OCPM;
- if (HCS_PPC(priv->hcs_params))
+ if (isp1760_hcd_ppc_is_set(priv->hcd))
/* per-port power control */
temp |= HUB_CHAR_INDV_PORT_LPSM;
else
@@ -1781,38 +2165,37 @@ static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
-static int check_reset_complete(struct usb_hcd *hcd, int index,
- int port_status)
+static void check_reset_complete(struct usb_hcd *hcd, int index)
{
- if (!(port_status & PORT_CONNECT))
- return port_status;
+ if (!(isp1760_hcd_is_set(hcd, PORT_CONNECT)))
+ return;
/* if reset finished and it's still not enabled -- handoff */
- if (!(port_status & PORT_PE)) {
-
+ if (!isp1760_hcd_is_set(hcd, PORT_PE)) {
dev_info(hcd->self.controller,
- "port %d full speed --> companion\n",
- index + 1);
+ "port %d full speed --> companion\n", index + 1);
- port_status |= PORT_OWNER;
- port_status &= ~PORT_RWC_BITS;
- reg_write32(hcd->regs, HC_PORTSC1, port_status);
+ isp1760_hcd_set(hcd, PORT_OWNER);
- } else
+ isp1760_hcd_clear(hcd, PORT_CSC);
+ } else {
dev_info(hcd->self.controller, "port %d high speed\n",
- index + 1);
+ index + 1);
+ }
- return port_status;
+ return;
}
static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
u16 wValue, u16 wIndex, char *buf, u16 wLength)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
- int ports = HCS_N_PORTS(priv->hcs_params);
- u32 temp, status;
+ u32 status;
unsigned long flags;
int retval = 0;
+ int ports;
+
+ ports = isp1760_hcd_n_ports(hcd);
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
@@ -1837,7 +2220,6 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
- temp = reg_read32(hcd->regs, HC_PORTSC1);
/*
* Even if OWNER is set, so the port is owned by the
@@ -1848,22 +2230,22 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_PE);
+ isp1760_hcd_clear(hcd, PORT_PE);
break;
case USB_PORT_FEAT_C_ENABLE:
/* XXX error? */
break;
case USB_PORT_FEAT_SUSPEND:
- if (temp & PORT_RESET)
+ if (isp1760_hcd_is_set(hcd, PORT_RESET))
goto error;
- if (temp & PORT_SUSPEND) {
- if ((temp & PORT_PE) == 0)
+ if (isp1760_hcd_is_set(hcd, PORT_SUSPEND)) {
+ if (!isp1760_hcd_is_set(hcd, PORT_PE))
goto error;
/* resume signaling for 20 msec */
- temp &= ~(PORT_RWC_BITS);
- reg_write32(hcd->regs, HC_PORTSC1,
- temp | PORT_RESUME);
+ isp1760_hcd_clear(hcd, PORT_CSC);
+ isp1760_hcd_set(hcd, PORT_RESUME);
+
priv->reset_done = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
}
@@ -1872,12 +2254,11 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
/* we auto-clear this feature */
break;
case USB_PORT_FEAT_POWER:
- if (HCS_PPC(priv->hcs_params))
- reg_write32(hcd->regs, HC_PORTSC1,
- temp & ~PORT_POWER);
+ if (isp1760_hcd_ppc_is_set(hcd))
+ isp1760_hcd_clear(hcd, PORT_POWER);
break;
case USB_PORT_FEAT_C_CONNECTION:
- reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_CSC);
+ isp1760_hcd_set(hcd, PORT_CSC);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
/* XXX error ?*/
@@ -1888,7 +2269,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
default:
goto error;
}
- reg_read32(hcd->regs, HC_USBCMD);
+ isp1760_hcd_read(hcd, CMD_RUN);
break;
case GetHubDescriptor:
isp1760_hub_descriptor(priv, (struct usb_hub_descriptor *)
@@ -1903,15 +2284,13 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
goto error;
wIndex--;
status = 0;
- temp = reg_read32(hcd->regs, HC_PORTSC1);
/* wPortChange bits */
- if (temp & PORT_CSC)
+ if (isp1760_hcd_is_set(hcd, PORT_CSC))
status |= USB_PORT_STAT_C_CONNECTION << 16;
-
/* whoever resumes must GetPortStatus to complete it!! */
- if (temp & PORT_RESUME) {
+ if (isp1760_hcd_is_set(hcd, PORT_RESUME)) {
dev_err(hcd->self.controller, "Port resume should be skipped.\n");
/* Remote Wakeup received? */
@@ -1930,44 +2309,39 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
priv->reset_done = 0;
/* stop resume signaling */
- temp = reg_read32(hcd->regs, HC_PORTSC1);
- reg_write32(hcd->regs, HC_PORTSC1,
- temp & ~(PORT_RWC_BITS | PORT_RESUME));
- retval = handshake(hcd, HC_PORTSC1,
- PORT_RESUME, 0, 2000 /* 2msec */);
+ isp1760_hcd_clear(hcd, PORT_CSC);
+
+ retval = isp1760_hcd_clear_and_wait(hcd,
+ PORT_RESUME, 2000);
if (retval != 0) {
dev_err(hcd->self.controller,
"port %d resume error %d\n",
wIndex + 1, retval);
goto error;
}
- temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
}
}
/* whoever resets must GetPortStatus to complete it!! */
- if ((temp & PORT_RESET)
- && time_after_eq(jiffies,
- priv->reset_done)) {
+ if (isp1760_hcd_is_set(hcd, PORT_RESET) &&
+ time_after_eq(jiffies, priv->reset_done)) {
status |= USB_PORT_STAT_C_RESET << 16;
priv->reset_done = 0;
/* force reset to complete */
- reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_RESET);
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
- retval = handshake(hcd, HC_PORTSC1,
- PORT_RESET, 0, 750);
+ retval = isp1760_hcd_clear_and_wait(hcd, PORT_RESET,
+ 750);
if (retval != 0) {
dev_err(hcd->self.controller, "port %d reset error %d\n",
- wIndex + 1, retval);
+ wIndex + 1, retval);
goto error;
}
/* see what we found out */
- temp = check_reset_complete(hcd, wIndex,
- reg_read32(hcd->regs, HC_PORTSC1));
+ check_reset_complete(hcd, wIndex);
}
/*
* Even if OWNER is set, there's no harm letting hub_wq
@@ -1975,21 +2349,22 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
* for PORT_POWER anyway).
*/
- if (temp & PORT_OWNER)
+ if (isp1760_hcd_is_set(hcd, PORT_OWNER))
dev_err(hcd->self.controller, "PORT_OWNER is set\n");
- if (temp & PORT_CONNECT) {
+ if (isp1760_hcd_is_set(hcd, PORT_CONNECT)) {
status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
status |= USB_PORT_STAT_HIGH_SPEED;
}
- if (temp & PORT_PE)
+ if (isp1760_hcd_is_set(hcd, PORT_PE))
status |= USB_PORT_STAT_ENABLE;
- if (temp & (PORT_SUSPEND|PORT_RESUME))
+ if (isp1760_hcd_is_set(hcd, PORT_SUSPEND) &&
+ isp1760_hcd_is_set(hcd, PORT_RESUME))
status |= USB_PORT_STAT_SUSPEND;
- if (temp & PORT_RESET)
+ if (isp1760_hcd_is_set(hcd, PORT_RESET))
status |= USB_PORT_STAT_RESET;
- if (temp & PORT_POWER)
+ if (isp1760_hcd_is_set(hcd, PORT_POWER))
status |= USB_PORT_STAT_POWER;
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
@@ -2009,41 +2384,40 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
- temp = reg_read32(hcd->regs, HC_PORTSC1);
- if (temp & PORT_OWNER)
+
+ if (isp1760_hcd_is_set(hcd, PORT_OWNER))
break;
-/* temp &= ~PORT_RWC_BITS; */
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_PE);
+ isp1760_hcd_set(hcd, PORT_PE);
break;
case USB_PORT_FEAT_SUSPEND:
- if ((temp & PORT_PE) == 0
- || (temp & PORT_RESET) != 0)
+ if (!isp1760_hcd_is_set(hcd, PORT_PE) ||
+ isp1760_hcd_is_set(hcd, PORT_RESET))
goto error;
- reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_SUSPEND);
+ isp1760_hcd_set(hcd, PORT_SUSPEND);
break;
case USB_PORT_FEAT_POWER:
- if (HCS_PPC(priv->hcs_params))
- reg_write32(hcd->regs, HC_PORTSC1,
- temp | PORT_POWER);
+ if (isp1760_hcd_ppc_is_set(hcd))
+ isp1760_hcd_set(hcd, PORT_POWER);
break;
case USB_PORT_FEAT_RESET:
- if (temp & PORT_RESUME)
+ if (isp1760_hcd_is_set(hcd, PORT_RESUME))
goto error;
/* line status bits may report this as low speed,
* which can be fine if this root hub has a
* transaction translator built in.
*/
- if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT
- && PORT_USB11(temp)) {
- temp |= PORT_OWNER;
+ if ((isp1760_hcd_is_set(hcd, PORT_CONNECT) &&
+ !isp1760_hcd_is_set(hcd, PORT_PE)) &&
+ (isp1760_hcd_read(hcd, PORT_LSTATUS) == 1)) {
+ isp1760_hcd_set(hcd, PORT_OWNER);
} else {
- temp |= PORT_RESET;
- temp &= ~PORT_PE;
+ isp1760_hcd_set(hcd, PORT_RESET);
+ isp1760_hcd_clear(hcd, PORT_PE);
/*
* caller must wait, then call GetPortStatus
@@ -2052,12 +2426,10 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
priv->reset_done = jiffies +
msecs_to_jiffies(50);
}
- reg_write32(hcd->regs, HC_PORTSC1, temp);
break;
default:
goto error;
}
- reg_read32(hcd->regs, HC_USBCMD);
break;
default:
@@ -2074,14 +2446,13 @@ static int isp1760_get_frame(struct usb_hcd *hcd)
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 fr;
- fr = reg_read32(hcd->regs, HC_FRINDEX);
+ fr = isp1760_hcd_read(hcd, HC_FRINDEX);
return (fr >> 3) % priv->periodic_size;
}
static void isp1760_stop(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
- u32 temp;
del_timer(&errata2_timer);
@@ -2092,24 +2463,19 @@ static void isp1760_stop(struct usb_hcd *hcd)
spin_lock_irq(&priv->lock);
ehci_reset(hcd);
/* Disable IRQ */
- temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
- reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
+ isp1760_hcd_clear(hcd, HW_GLOBAL_INTR_EN);
spin_unlock_irq(&priv->lock);
- reg_write32(hcd->regs, HC_CONFIGFLAG, 0);
+ isp1760_hcd_clear(hcd, FLAG_CF);
}
static void isp1760_shutdown(struct usb_hcd *hcd)
{
- u32 command, temp;
-
isp1760_stop(hcd);
- temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
- reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
- command = reg_read32(hcd->regs, HC_USBCMD);
- command &= ~CMD_RUN;
- reg_write32(hcd->regs, HC_USBCMD, command);
+ isp1760_hcd_clear(hcd, HW_GLOBAL_INTR_EN);
+
+ isp1760_hcd_clear(hcd, CMD_RUN);
}
static void isp1760_clear_tt_buffer_complete(struct usb_hcd *hcd,
@@ -2182,10 +2548,11 @@ void isp1760_deinit_kmem_cache(void)
kmem_cache_destroy(urb_listitem_cachep);
}
-int isp1760_hcd_register(struct isp1760_hcd *priv, void __iomem *regs,
- struct resource *mem, int irq, unsigned long irqflags,
+int isp1760_hcd_register(struct isp1760_hcd *priv, struct resource *mem,
+ int irq, unsigned long irqflags,
struct device *dev)
{
+ const struct isp1760_memory_layout *mem_layout = priv->memory_layout;
struct usb_hcd *hcd;
int ret;
@@ -2197,10 +2564,23 @@ int isp1760_hcd_register(struct isp1760_hcd *priv, void __iomem *regs,
priv->hcd = hcd;
+ priv->atl_slots = kcalloc(mem_layout->slot_num,
+ sizeof(struct isp1760_slotinfo), GFP_KERNEL);
+ if (!priv->atl_slots) {
+ ret = -ENOMEM;
+ goto put_hcd;
+ }
+
+ priv->int_slots = kcalloc(mem_layout->slot_num,
+ sizeof(struct isp1760_slotinfo), GFP_KERNEL);
+ if (!priv->int_slots) {
+ ret = -ENOMEM;
+ goto free_atl_slots;
+ }
+
init_memory(priv);
hcd->irq = irq;
- hcd->regs = regs;
hcd->rsrc_start = mem->start;
hcd->rsrc_len = resource_size(mem);
@@ -2209,13 +2589,17 @@ int isp1760_hcd_register(struct isp1760_hcd *priv, void __iomem *regs,
ret = usb_add_hcd(hcd, irq, irqflags);
if (ret)
- goto error;
+ goto free_int_slots;
device_wakeup_enable(hcd->self.controller);
return 0;
-error:
+free_int_slots:
+ kfree(priv->int_slots);
+free_atl_slots:
+ kfree(priv->atl_slots);
+put_hcd:
usb_put_hcd(hcd);
return ret;
}
@@ -2227,4 +2611,6 @@ void isp1760_hcd_unregister(struct isp1760_hcd *priv)
usb_remove_hcd(priv->hcd);
usb_put_hcd(priv->hcd);
+ kfree(priv->atl_slots);
+ kfree(priv->int_slots);
}
diff --git a/drivers/usb/isp1760/isp1760-hcd.h b/drivers/usb/isp1760/isp1760-hcd.h
index f1bb2deb1ccf..ee3063a34de3 100644
--- a/drivers/usb/isp1760/isp1760-hcd.h
+++ b/drivers/usb/isp1760/isp1760-hcd.h
@@ -3,30 +3,15 @@
#define _ISP1760_HCD_H_
#include <linux/spinlock.h>
+#include <linux/regmap.h>
+
+#include "isp1760-regs.h"
struct isp1760_qh;
struct isp1760_qtd;
struct resource;
struct usb_hcd;
-/*
- * 60kb divided in:
- * - 32 blocks @ 256 bytes
- * - 20 blocks @ 1024 bytes
- * - 4 blocks @ 8192 bytes
- */
-
-#define BLOCK_1_NUM 32
-#define BLOCK_2_NUM 20
-#define BLOCK_3_NUM 4
-
-#define BLOCK_1_SIZE 256
-#define BLOCK_2_SIZE 1024
-#define BLOCK_3_SIZE 8192
-#define BLOCKS (BLOCK_1_NUM + BLOCK_2_NUM + BLOCK_3_NUM)
-#define MAX_PAYLOAD_SIZE BLOCK_3_SIZE
-#define PAYLOAD_AREA_SIZE 0xf000
-
struct isp1760_slotinfo {
struct isp1760_qh *qh;
struct isp1760_qtd *qtd;
@@ -34,6 +19,18 @@ struct isp1760_slotinfo {
};
/* chip memory management */
+#define ISP176x_BLOCK_MAX (32 + 20 + 4)
+#define ISP176x_BLOCK_NUM 3
+
+struct isp1760_memory_layout {
+ unsigned int blocks[ISP176x_BLOCK_NUM];
+ unsigned int blocks_size[ISP176x_BLOCK_NUM];
+
+ unsigned int slot_num;
+ unsigned int payload_blocks;
+ unsigned int payload_area_size;
+};
+
struct isp1760_memory_chunk {
unsigned int start;
unsigned int size;
@@ -48,16 +45,22 @@ enum isp1760_queue_head_types {
};
struct isp1760_hcd {
-#ifdef CONFIG_USB_ISP1760_HCD
struct usb_hcd *hcd;
- u32 hcs_params;
+ void __iomem *base;
+
+ struct regmap *regs;
+ struct regmap_field *fields[HC_FIELD_MAX];
+
+ bool is_isp1763;
+ const struct isp1760_memory_layout *memory_layout;
+
spinlock_t lock;
- struct isp1760_slotinfo atl_slots[32];
+ struct isp1760_slotinfo *atl_slots;
int atl_done_map;
- struct isp1760_slotinfo int_slots[32];
+ struct isp1760_slotinfo *int_slots;
int int_done_map;
- struct isp1760_memory_chunk memory_pool[BLOCKS];
+ struct isp1760_memory_chunk memory_pool[ISP176x_BLOCK_MAX];
struct list_head qh_list[QH_END];
/* periodic schedule support */
@@ -66,20 +69,18 @@ struct isp1760_hcd {
unsigned i_thresh;
unsigned long reset_done;
unsigned long next_statechange;
-#endif
};
#ifdef CONFIG_USB_ISP1760_HCD
-int isp1760_hcd_register(struct isp1760_hcd *priv, void __iomem *regs,
- struct resource *mem, int irq, unsigned long irqflags,
- struct device *dev);
+int isp1760_hcd_register(struct isp1760_hcd *priv, struct resource *mem,
+ int irq, unsigned long irqflags, struct device *dev);
void isp1760_hcd_unregister(struct isp1760_hcd *priv);
int isp1760_init_kmem_once(void);
void isp1760_deinit_kmem_cache(void);
#else
static inline int isp1760_hcd_register(struct isp1760_hcd *priv,
- void __iomem *regs, struct resource *mem,
+ struct resource *mem,
int irq, unsigned long irqflags,
struct device *dev)
{
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index ccd30f835888..7cc349c0b2ad 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -7,6 +7,7 @@
* - PDEV (generic platform device centralized driver model)
*
* (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
+ * Copyright 2021 Linaro, Rui Miguel Silva <rui.silva@linaro.org>
*
*/
@@ -16,8 +17,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/usb/isp1760.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
#include "isp1760-core.h"
#include "isp1760-regs.h"
@@ -75,9 +76,9 @@ static int isp1761_pci_init(struct pci_dev *dev)
/*by default host is in 16bit mode, so
* io operations at this stage must be 16 bit
* */
- writel(0xface, iobase + HC_SCRATCH_REG);
+ writel(0xface, iobase + ISP176x_HC_SCRATCH);
udelay(100);
- reg_data = readl(iobase + HC_SCRATCH_REG) & 0x0000ffff;
+ reg_data = readl(iobase + ISP176x_HC_SCRATCH) & 0x0000ffff;
retry_count--;
}
@@ -209,13 +210,21 @@ static int isp1760_plat_probe(struct platform_device *pdev)
if (of_device_is_compatible(dp, "nxp,usb-isp1761"))
devflags |= ISP1760_FLAG_ISP1761;
- /* Some systems wire up only 16 of the 32 data lines */
+ if (of_device_is_compatible(dp, "nxp,usb-isp1763"))
+ devflags |= ISP1760_FLAG_ISP1763;
+
+ /*
+ * Some systems wire up only 8 of 16 data lines or
+ * 16 of the 32 data lines
+ */
of_property_read_u32(dp, "bus-width", &bus_width);
if (bus_width == 16)
devflags |= ISP1760_FLAG_BUS_WIDTH_16;
+ else if (bus_width == 8)
+ devflags |= ISP1760_FLAG_BUS_WIDTH_8;
- if (of_property_read_bool(dp, "port1-otg"))
- devflags |= ISP1760_FLAG_OTG_EN;
+ if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL)
+ devflags |= ISP1760_FLAG_PERIPHERAL_EN;
if (of_property_read_bool(dp, "analog-oc"))
devflags |= ISP1760_FLAG_ANALOG_OC;
@@ -225,22 +234,9 @@ static int isp1760_plat_probe(struct platform_device *pdev)
if (of_property_read_bool(dp, "dreq-polarity"))
devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
- } else if (dev_get_platdata(&pdev->dev)) {
- struct isp1760_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
-
- if (pdata->is_isp1761)
- devflags |= ISP1760_FLAG_ISP1761;
- if (pdata->bus_width_16)
- devflags |= ISP1760_FLAG_BUS_WIDTH_16;
- if (pdata->port1_otg)
- devflags |= ISP1760_FLAG_OTG_EN;
- if (pdata->analog_oc)
- devflags |= ISP1760_FLAG_ANALOG_OC;
- if (pdata->dack_polarity_high)
- devflags |= ISP1760_FLAG_DACK_POL_HIGH;
- if (pdata->dreq_polarity_high)
- devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
+ } else {
+ pr_err("isp1760: no platform data\n");
+ return -ENXIO;
}
ret = isp1760_register(mem_res, irq_res->start, irqflags, &pdev->dev,
@@ -263,6 +259,7 @@ static int isp1760_plat_remove(struct platform_device *pdev)
static const struct of_device_id isp1760_of_match[] = {
{ .compatible = "nxp,usb-isp1760", },
{ .compatible = "nxp,usb-isp1761", },
+ { .compatible = "nxp,usb-isp1763", },
{ },
};
MODULE_DEVICE_TABLE(of, isp1760_of_match);
diff --git a/drivers/usb/isp1760/isp1760-regs.h b/drivers/usb/isp1760/isp1760-regs.h
index fedc4f5cded0..94ea60c20b2a 100644
--- a/drivers/usb/isp1760/isp1760-regs.h
+++ b/drivers/usb/isp1760/isp1760-regs.h
@@ -2,226 +2,291 @@
/*
* Driver for the NXP ISP1760 chip
*
+ * Copyright 2021 Linaro, Rui Miguel Silva
* Copyright 2014 Laurent Pinchart
* Copyright 2007 Sebastian Siewior
*
* Contacts:
* Sebastian Siewior <bigeasy@linutronix.de>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Rui Miguel Silva <rui.silva@linaro.org>
*/
-#ifndef _ISP1760_REGS_H_
-#define _ISP1760_REGS_H_
+#ifndef _ISP176x_REGS_H_
+#define _ISP176x_REGS_H_
/* -----------------------------------------------------------------------------
* Host Controller
*/
+/* ISP1760/31 */
/* EHCI capability registers */
-#define HC_CAPLENGTH 0x000
-#define HC_LENGTH(p) (((p) >> 00) & 0x00ff) /* bits 7:0 */
-#define HC_VERSION(p) (((p) >> 16) & 0xffff) /* bits 31:16 */
+#define ISP176x_HC_VERSION 0x002
+#define ISP176x_HC_HCSPARAMS 0x004
+#define ISP176x_HC_HCCPARAMS 0x008
-#define HC_HCSPARAMS 0x004
-#define HCS_INDICATOR(p) ((p) & (1 << 16)) /* true: has port indicators */
-#define HCS_PPC(p) ((p) & (1 << 4)) /* true: port power control */
-#define HCS_N_PORTS(p) (((p) >> 0) & 0xf) /* bits 3:0, ports on HC */
+/* EHCI operational registers */
+#define ISP176x_HC_USBCMD 0x020
+#define ISP176x_HC_USBSTS 0x024
+#define ISP176x_HC_FRINDEX 0x02c
+
+#define ISP176x_HC_CONFIGFLAG 0x060
+#define ISP176x_HC_PORTSC1 0x064
+
+#define ISP176x_HC_ISO_PTD_DONEMAP 0x130
+#define ISP176x_HC_ISO_PTD_SKIPMAP 0x134
+#define ISP176x_HC_ISO_PTD_LASTPTD 0x138
+#define ISP176x_HC_INT_PTD_DONEMAP 0x140
+#define ISP176x_HC_INT_PTD_SKIPMAP 0x144
+#define ISP176x_HC_INT_PTD_LASTPTD 0x148
+#define ISP176x_HC_ATL_PTD_DONEMAP 0x150
+#define ISP176x_HC_ATL_PTD_SKIPMAP 0x154
+#define ISP176x_HC_ATL_PTD_LASTPTD 0x158
-#define HC_HCCPARAMS 0x008
-#define HCC_ISOC_CACHE(p) ((p) & (1 << 7)) /* true: can cache isoc frame */
-#define HCC_ISOC_THRES(p) (((p) >> 4) & 0x7) /* bits 6:4, uframes cached */
+/* Configuration Register */
+#define ISP176x_HC_HW_MODE_CTRL 0x300
+#define ISP176x_HC_CHIP_ID 0x304
+#define ISP176x_HC_SCRATCH 0x308
+#define ISP176x_HC_RESET 0x30c
+#define ISP176x_HC_BUFFER_STATUS 0x334
+#define ISP176x_HC_MEMORY 0x33c
+/* Interrupt Register */
+#define ISP176x_HC_INTERRUPT 0x310
+#define ISP176x_HC_INTERRUPT_ENABLE 0x314
+#define ISP176x_HC_ISO_IRQ_MASK_OR 0x318
+#define ISP176x_HC_INT_IRQ_MASK_OR 0x31c
+#define ISP176x_HC_ATL_IRQ_MASK_OR 0x320
+#define ISP176x_HC_ISO_IRQ_MASK_AND 0x324
+#define ISP176x_HC_INT_IRQ_MASK_AND 0x328
+#define ISP176x_HC_ATL_IRQ_MASK_AND 0x32c
+
+#define ISP176x_HC_OTG_CTRL_SET 0x374
+#define ISP176x_HC_OTG_CTRL_CLEAR 0x376
+
+enum isp176x_host_controller_fields {
+ /* HC_PORTSC1 */
+ PORT_OWNER, PORT_POWER, PORT_LSTATUS, PORT_RESET, PORT_SUSPEND,
+ PORT_RESUME, PORT_PE, PORT_CSC, PORT_CONNECT,
+ /* HC_HCSPARAMS */
+ HCS_PPC, HCS_N_PORTS,
+ /* HC_HCCPARAMS */
+ HCC_ISOC_CACHE, HCC_ISOC_THRES,
+ /* HC_USBCMD */
+ CMD_LRESET, CMD_RESET, CMD_RUN,
+ /* HC_USBSTS */
+ STS_PCD,
+ /* HC_FRINDEX */
+ HC_FRINDEX,
+ /* HC_CONFIGFLAG */
+ FLAG_CF,
+ /* ISO/INT/ATL PTD */
+ HC_ISO_PTD_DONEMAP, HC_ISO_PTD_SKIPMAP, HC_ISO_PTD_LASTPTD,
+ HC_INT_PTD_DONEMAP, HC_INT_PTD_SKIPMAP, HC_INT_PTD_LASTPTD,
+ HC_ATL_PTD_DONEMAP, HC_ATL_PTD_SKIPMAP, HC_ATL_PTD_LASTPTD,
+ /* HC_HW_MODE_CTRL */
+ ALL_ATX_RESET, HW_ANA_DIGI_OC, HW_DEV_DMA, HW_COMN_IRQ, HW_COMN_DMA,
+ HW_DATA_BUS_WIDTH, HW_DACK_POL_HIGH, HW_DREQ_POL_HIGH, HW_INTR_HIGH_ACT,
+ HW_INTF_LOCK, HW_INTR_EDGE_TRIG, HW_GLOBAL_INTR_EN,
+ /* HC_CHIP_ID */
+ HC_CHIP_ID_HIGH, HC_CHIP_ID_LOW, HC_CHIP_REV,
+ /* HC_SCRATCH */
+ HC_SCRATCH,
+ /* HC_RESET */
+ SW_RESET_RESET_ATX, SW_RESET_RESET_HC, SW_RESET_RESET_ALL,
+ /* HC_BUFFER_STATUS */
+ ISO_BUF_FILL, INT_BUF_FILL, ATL_BUF_FILL,
+ /* HC_MEMORY */
+ MEM_BANK_SEL, MEM_START_ADDR,
+ /* HC_DATA */
+ HC_DATA,
+ /* HC_INTERRUPT */
+ HC_INTERRUPT,
+ /* HC_INTERRUPT_ENABLE */
+ HC_INT_IRQ_ENABLE, HC_ATL_IRQ_ENABLE,
+ /* INTERRUPT MASKS */
+ HC_ISO_IRQ_MASK_OR, HC_INT_IRQ_MASK_OR, HC_ATL_IRQ_MASK_OR,
+ HC_ISO_IRQ_MASK_AND, HC_INT_IRQ_MASK_AND, HC_ATL_IRQ_MASK_AND,
+ /* HW_OTG_CTRL_SET */
+ HW_OTG_DISABLE, HW_SW_SEL_HC_DC, HW_VBUS_DRV, HW_SEL_CP_EXT,
+ HW_DM_PULLDOWN, HW_DP_PULLDOWN, HW_DP_PULLUP, HW_HC_2_DIS,
+ /* HW_OTG_CTRL_CLR */
+ HW_OTG_DISABLE_CLEAR, HW_SW_SEL_HC_DC_CLEAR, HW_VBUS_DRV_CLEAR,
+ HW_SEL_CP_EXT_CLEAR, HW_DM_PULLDOWN_CLEAR, HW_DP_PULLDOWN_CLEAR,
+ HW_DP_PULLUP_CLEAR, HW_HC_2_DIS_CLEAR,
+ /* Last element */
+ HC_FIELD_MAX,
+};
+
+/* ISP1763 */
/* EHCI operational registers */
-#define HC_USBCMD 0x020
-#define CMD_LRESET (1 << 7) /* partial reset (no ports, etc) */
-#define CMD_RESET (1 << 1) /* reset HC not bus */
-#define CMD_RUN (1 << 0) /* start/stop HC */
-
-#define HC_USBSTS 0x024
-#define STS_PCD (1 << 2) /* port change detect */
-
-#define HC_FRINDEX 0x02c
-
-#define HC_CONFIGFLAG 0x060
-#define FLAG_CF (1 << 0) /* true: we'll support "high speed" */
-
-#define HC_PORTSC1 0x064
-#define PORT_OWNER (1 << 13) /* true: companion hc owns this port */
-#define PORT_POWER (1 << 12) /* true: has power (see PPC) */
-#define PORT_USB11(x) (((x) & (3 << 10)) == (1 << 10)) /* USB 1.1 device */
-#define PORT_RESET (1 << 8) /* reset port */
-#define PORT_SUSPEND (1 << 7) /* suspend port */
-#define PORT_RESUME (1 << 6) /* resume it */
-#define PORT_PE (1 << 2) /* port enable */
-#define PORT_CSC (1 << 1) /* connect status change */
-#define PORT_CONNECT (1 << 0) /* device connected */
-#define PORT_RWC_BITS (PORT_CSC)
-
-#define HC_ISO_PTD_DONEMAP_REG 0x130
-#define HC_ISO_PTD_SKIPMAP_REG 0x134
-#define HC_ISO_PTD_LASTPTD_REG 0x138
-#define HC_INT_PTD_DONEMAP_REG 0x140
-#define HC_INT_PTD_SKIPMAP_REG 0x144
-#define HC_INT_PTD_LASTPTD_REG 0x148
-#define HC_ATL_PTD_DONEMAP_REG 0x150
-#define HC_ATL_PTD_SKIPMAP_REG 0x154
-#define HC_ATL_PTD_LASTPTD_REG 0x158
+#define ISP1763_HC_USBCMD 0x8c
+#define ISP1763_HC_USBSTS 0x90
+#define ISP1763_HC_FRINDEX 0x98
+
+#define ISP1763_HC_CONFIGFLAG 0x9c
+#define ISP1763_HC_PORTSC1 0xa0
+
+#define ISP1763_HC_ISO_PTD_DONEMAP 0xa4
+#define ISP1763_HC_ISO_PTD_SKIPMAP 0xa6
+#define ISP1763_HC_ISO_PTD_LASTPTD 0xa8
+#define ISP1763_HC_INT_PTD_DONEMAP 0xaa
+#define ISP1763_HC_INT_PTD_SKIPMAP 0xac
+#define ISP1763_HC_INT_PTD_LASTPTD 0xae
+#define ISP1763_HC_ATL_PTD_DONEMAP 0xb0
+#define ISP1763_HC_ATL_PTD_SKIPMAP 0xb2
+#define ISP1763_HC_ATL_PTD_LASTPTD 0xb4
/* Configuration Register */
-#define HC_HW_MODE_CTRL 0x300
-#define ALL_ATX_RESET (1 << 31)
-#define HW_ANA_DIGI_OC (1 << 15)
-#define HW_DEV_DMA (1 << 11)
-#define HW_COMN_IRQ (1 << 10)
-#define HW_COMN_DMA (1 << 9)
-#define HW_DATA_BUS_32BIT (1 << 8)
-#define HW_DACK_POL_HIGH (1 << 6)
-#define HW_DREQ_POL_HIGH (1 << 5)
-#define HW_INTR_HIGH_ACT (1 << 2)
-#define HW_INTR_EDGE_TRIG (1 << 1)
-#define HW_GLOBAL_INTR_EN (1 << 0)
-
-#define HC_CHIP_ID_REG 0x304
-#define HC_SCRATCH_REG 0x308
-
-#define HC_RESET_REG 0x30c
-#define SW_RESET_RESET_HC (1 << 1)
-#define SW_RESET_RESET_ALL (1 << 0)
-
-#define HC_BUFFER_STATUS_REG 0x334
-#define ISO_BUF_FILL (1 << 2)
-#define INT_BUF_FILL (1 << 1)
-#define ATL_BUF_FILL (1 << 0)
-
-#define HC_MEMORY_REG 0x33c
-#define ISP_BANK(x) ((x) << 16)
-
-#define HC_PORT1_CTRL 0x374
-#define PORT1_POWER (3 << 3)
-#define PORT1_INIT1 (1 << 7)
-#define PORT1_INIT2 (1 << 23)
-#define HW_OTG_CTRL_SET 0x374
-#define HW_OTG_CTRL_CLR 0x376
-#define HW_OTG_DISABLE (1 << 10)
-#define HW_OTG_SE0_EN (1 << 9)
-#define HW_BDIS_ACON_EN (1 << 8)
-#define HW_SW_SEL_HC_DC (1 << 7)
-#define HW_VBUS_CHRG (1 << 6)
-#define HW_VBUS_DISCHRG (1 << 5)
-#define HW_VBUS_DRV (1 << 4)
-#define HW_SEL_CP_EXT (1 << 3)
-#define HW_DM_PULLDOWN (1 << 2)
-#define HW_DP_PULLDOWN (1 << 1)
-#define HW_DP_PULLUP (1 << 0)
+#define ISP1763_HC_HW_MODE_CTRL 0xb6
+#define ISP1763_HC_CHIP_REV 0x70
+#define ISP1763_HC_CHIP_ID 0x72
+#define ISP1763_HC_SCRATCH 0x78
+#define ISP1763_HC_RESET 0xb8
+#define ISP1763_HC_BUFFER_STATUS 0xba
+#define ISP1763_HC_MEMORY 0xc4
+#define ISP1763_HC_DATA 0xc6
/* Interrupt Register */
-#define HC_INTERRUPT_REG 0x310
-
-#define HC_INTERRUPT_ENABLE 0x314
-#define HC_ISO_INT (1 << 9)
-#define HC_ATL_INT (1 << 8)
-#define HC_INTL_INT (1 << 7)
-#define HC_EOT_INT (1 << 3)
-#define HC_SOT_INT (1 << 1)
-#define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT)
-
-#define HC_ISO_IRQ_MASK_OR_REG 0x318
-#define HC_INT_IRQ_MASK_OR_REG 0x31c
-#define HC_ATL_IRQ_MASK_OR_REG 0x320
-#define HC_ISO_IRQ_MASK_AND_REG 0x324
-#define HC_INT_IRQ_MASK_AND_REG 0x328
-#define HC_ATL_IRQ_MASK_AND_REG 0x32c
+#define ISP1763_HC_INTERRUPT 0xd4
+#define ISP1763_HC_INTERRUPT_ENABLE 0xd6
+#define ISP1763_HC_ISO_IRQ_MASK_OR 0xd8
+#define ISP1763_HC_INT_IRQ_MASK_OR 0xda
+#define ISP1763_HC_ATL_IRQ_MASK_OR 0xdc
+#define ISP1763_HC_ISO_IRQ_MASK_AND 0xde
+#define ISP1763_HC_INT_IRQ_MASK_AND 0xe0
+#define ISP1763_HC_ATL_IRQ_MASK_AND 0xe2
+
+#define ISP1763_HC_OTG_CTRL_SET 0xe4
+#define ISP1763_HC_OTG_CTRL_CLEAR 0xe6
/* -----------------------------------------------------------------------------
* Peripheral Controller
*/
-/* Initialization Registers */
-#define DC_ADDRESS 0x0200
-#define DC_DEVEN (1 << 7)
-
-#define DC_MODE 0x020c
-#define DC_DMACLKON (1 << 9)
-#define DC_VBUSSTAT (1 << 8)
-#define DC_CLKAON (1 << 7)
-#define DC_SNDRSU (1 << 6)
-#define DC_GOSUSP (1 << 5)
-#define DC_SFRESET (1 << 4)
-#define DC_GLINTENA (1 << 3)
-#define DC_WKUPCS (1 << 2)
-
-#define DC_INTCONF 0x0210
-#define DC_CDBGMOD_ACK_NAK (0 << 6)
-#define DC_CDBGMOD_ACK (1 << 6)
-#define DC_CDBGMOD_ACK_1NAK (2 << 6)
-#define DC_DDBGMODIN_ACK_NAK (0 << 4)
-#define DC_DDBGMODIN_ACK (1 << 4)
-#define DC_DDBGMODIN_ACK_1NAK (2 << 4)
-#define DC_DDBGMODOUT_ACK_NYET_NAK (0 << 2)
-#define DC_DDBGMODOUT_ACK_NYET (1 << 2)
-#define DC_DDBGMODOUT_ACK_NYET_1NAK (2 << 2)
-#define DC_INTLVL (1 << 1)
-#define DC_INTPOL (1 << 0)
-
-#define DC_DEBUG 0x0212
-#define DC_INTENABLE 0x0214
#define DC_IEPTX(n) (1 << (11 + 2 * (n)))
#define DC_IEPRX(n) (1 << (10 + 2 * (n)))
#define DC_IEPRXTX(n) (3 << (10 + 2 * (n)))
-#define DC_IEP0SETUP (1 << 8)
-#define DC_IEVBUS (1 << 7)
-#define DC_IEDMA (1 << 6)
-#define DC_IEHS_STA (1 << 5)
-#define DC_IERESM (1 << 4)
-#define DC_IESUSP (1 << 3)
-#define DC_IEPSOF (1 << 2)
-#define DC_IESOF (1 << 1)
-#define DC_IEBRST (1 << 0)
+
+#define ISP176x_DC_CDBGMOD_ACK BIT(6)
+#define ISP176x_DC_DDBGMODIN_ACK BIT(4)
+#define ISP176x_DC_DDBGMODOUT_ACK BIT(2)
+
+#define ISP176x_DC_IEP0SETUP BIT(8)
+#define ISP176x_DC_IEVBUS BIT(7)
+#define ISP176x_DC_IEHS_STA BIT(5)
+#define ISP176x_DC_IERESM BIT(4)
+#define ISP176x_DC_IESUSP BIT(3)
+#define ISP176x_DC_IEBRST BIT(0)
+
+#define ISP176x_DC_ENDPTYP_ISOC 0x01
+#define ISP176x_DC_ENDPTYP_BULK 0x02
+#define ISP176x_DC_ENDPTYP_INTERRUPT 0x03
+
+/* Initialization Registers */
+#define ISP176x_DC_ADDRESS 0x0200
+#define ISP176x_DC_MODE 0x020c
+#define ISP176x_DC_INTCONF 0x0210
+#define ISP176x_DC_DEBUG 0x0212
+#define ISP176x_DC_INTENABLE 0x0214
+
+/* Data Flow Registers */
+#define ISP176x_DC_EPMAXPKTSZ 0x0204
+#define ISP176x_DC_EPTYPE 0x0208
+
+#define ISP176x_DC_BUFLEN 0x021c
+#define ISP176x_DC_BUFSTAT 0x021e
+#define ISP176x_DC_DATAPORT 0x0220
+
+#define ISP176x_DC_CTRLFUNC 0x0228
+#define ISP176x_DC_EPINDEX 0x022c
+
+/* DMA Registers */
+#define ISP176x_DC_DMACMD 0x0230
+#define ISP176x_DC_DMATXCOUNT 0x0234
+#define ISP176x_DC_DMACONF 0x0238
+#define ISP176x_DC_DMAHW 0x023c
+#define ISP176x_DC_DMAINTREASON 0x0250
+#define ISP176x_DC_DMAINTEN 0x0254
+#define ISP176x_DC_DMAEP 0x0258
+#define ISP176x_DC_DMABURSTCOUNT 0x0264
+
+/* General Registers */
+#define ISP176x_DC_INTERRUPT 0x0218
+#define ISP176x_DC_CHIPID 0x0270
+#define ISP176x_DC_FRAMENUM 0x0274
+#define ISP176x_DC_SCRATCH 0x0278
+#define ISP176x_DC_UNLOCKDEV 0x027c
+#define ISP176x_DC_INTPULSEWIDTH 0x0280
+#define ISP176x_DC_TESTMODE 0x0284
+
+enum isp176x_device_controller_fields {
+ /* DC_ADDRESS */
+ DC_DEVEN, DC_DEVADDR,
+ /* DC_MODE */
+ DC_VBUSSTAT, DC_SFRESET, DC_GLINTENA,
+ /* DC_INTCONF */
+ DC_CDBGMOD_ACK, DC_DDBGMODIN_ACK, DC_DDBGMODOUT_ACK, DC_INTPOL,
+ /* DC_INTENABLE */
+ DC_IEPRXTX_7, DC_IEPRXTX_6, DC_IEPRXTX_5, DC_IEPRXTX_4, DC_IEPRXTX_3,
+ DC_IEPRXTX_2, DC_IEPRXTX_1, DC_IEPRXTX_0,
+ DC_IEP0SETUP, DC_IEVBUS, DC_IEHS_STA, DC_IERESM, DC_IESUSP, DC_IEBRST,
+ /* DC_EPINDEX */
+ DC_EP0SETUP, DC_ENDPIDX, DC_EPDIR,
+ /* DC_CTRLFUNC */
+ DC_CLBUF, DC_VENDP, DC_DSEN, DC_STATUS, DC_STALL,
+ /* DC_BUFLEN */
+ DC_BUFLEN,
+ /* DC_EPMAXPKTSZ */
+ DC_FFOSZ,
+ /* DC_EPTYPE */
+ DC_EPENABLE, DC_ENDPTYP,
+ /* DC_FRAMENUM */
+ DC_FRAMENUM, DC_UFRAMENUM,
+ /* DC_CHIP_ID */
+ DC_CHIP_ID_HIGH, DC_CHIP_ID_LOW,
+ /* DC_SCRATCH */
+ DC_SCRATCH,
+ /* Last element */
+ DC_FIELD_MAX,
+};
+
+/* ISP1763 */
+/* Initialization Registers */
+#define ISP1763_DC_ADDRESS 0x00
+#define ISP1763_DC_MODE 0x0c
+#define ISP1763_DC_INTCONF 0x10
+#define ISP1763_DC_INTENABLE 0x14
/* Data Flow Registers */
-#define DC_EPINDEX 0x022c
-#define DC_EP0SETUP (1 << 5)
-#define DC_ENDPIDX(n) ((n) << 1)
-#define DC_EPDIR (1 << 0)
-
-#define DC_CTRLFUNC 0x0228
-#define DC_CLBUF (1 << 4)
-#define DC_VENDP (1 << 3)
-#define DC_DSEN (1 << 2)
-#define DC_STATUS (1 << 1)
-#define DC_STALL (1 << 0)
-
-#define DC_DATAPORT 0x0220
-#define DC_BUFLEN 0x021c
-#define DC_DATACOUNT_MASK 0xffff
-#define DC_BUFSTAT 0x021e
-#define DC_EPMAXPKTSZ 0x0204
-
-#define DC_EPTYPE 0x0208
-#define DC_NOEMPKT (1 << 4)
-#define DC_EPENABLE (1 << 3)
-#define DC_DBLBUF (1 << 2)
-#define DC_ENDPTYP_ISOC (1 << 0)
-#define DC_ENDPTYP_BULK (2 << 0)
-#define DC_ENDPTYP_INTERRUPT (3 << 0)
+#define ISP1763_DC_EPMAXPKTSZ 0x04
+#define ISP1763_DC_EPTYPE 0x08
+
+#define ISP1763_DC_BUFLEN 0x1c
+#define ISP1763_DC_BUFSTAT 0x1e
+#define ISP1763_DC_DATAPORT 0x20
+
+#define ISP1763_DC_CTRLFUNC 0x28
+#define ISP1763_DC_EPINDEX 0x2c
/* DMA Registers */
-#define DC_DMACMD 0x0230
-#define DC_DMATXCOUNT 0x0234
-#define DC_DMACONF 0x0238
-#define DC_DMAHW 0x023c
-#define DC_DMAINTREASON 0x0250
-#define DC_DMAINTEN 0x0254
-#define DC_DMAEP 0x0258
-#define DC_DMABURSTCOUNT 0x0264
+#define ISP1763_DC_DMACMD 0x30
+#define ISP1763_DC_DMATXCOUNT 0x34
+#define ISP1763_DC_DMACONF 0x38
+#define ISP1763_DC_DMAHW 0x3c
+#define ISP1763_DC_DMAINTREASON 0x50
+#define ISP1763_DC_DMAINTEN 0x54
+#define ISP1763_DC_DMAEP 0x58
+#define ISP1763_DC_DMABURSTCOUNT 0x64
/* General Registers */
-#define DC_INTERRUPT 0x0218
-#define DC_CHIPID 0x0270
-#define DC_FRAMENUM 0x0274
-#define DC_SCRATCH 0x0278
-#define DC_UNLOCKDEV 0x027c
-#define DC_INTPULSEWIDTH 0x0280
-#define DC_TESTMODE 0x0284
+#define ISP1763_DC_INTERRUPT 0x18
+#define ISP1763_DC_CHIPID_LOW 0x70
+#define ISP1763_DC_CHIPID_HIGH 0x72
+#define ISP1763_DC_FRAMENUM 0x74
+#define ISP1763_DC_SCRATCH 0x78
+#define ISP1763_DC_UNLOCKDEV 0x7c
+#define ISP1763_DC_INTPULSEWIDTH 0x80
+#define ISP1763_DC_TESTMODE 0x84
#endif
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
index 1714b2258b54..a78da59d6417 100644
--- a/drivers/usb/isp1760/isp1760-udc.c
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -2,10 +2,12 @@
/*
* Driver for the NXP ISP1761 device controller
*
+ * Copyright 2021 Linaro, Rui Miguel Silva
* Copyright 2014 Ideas on Board Oy
*
* Contacts:
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Rui Miguel Silva <rui.silva@linaro.org>
*/
#include <linux/interrupt.h>
@@ -45,16 +47,62 @@ static inline struct isp1760_request *req_to_udc_req(struct usb_request *req)
return container_of(req, struct isp1760_request, req);
}
-static inline u32 isp1760_udc_read(struct isp1760_udc *udc, u16 reg)
+static u32 isp1760_udc_read(struct isp1760_udc *udc, u16 field)
{
- return isp1760_read32(udc->regs, reg);
+ return isp1760_field_read(udc->fields, field);
}
-static inline void isp1760_udc_write(struct isp1760_udc *udc, u16 reg, u32 val)
+static void isp1760_udc_write(struct isp1760_udc *udc, u16 field, u32 val)
{
- isp1760_write32(udc->regs, reg, val);
+ isp1760_field_write(udc->fields, field, val);
}
+static u32 isp1760_udc_read_raw(struct isp1760_udc *udc, u16 reg)
+{
+ __le32 val;
+
+ regmap_raw_read(udc->regs, reg, &val, 4);
+
+ return le32_to_cpu(val);
+}
+
+static u16 isp1760_udc_read_raw16(struct isp1760_udc *udc, u16 reg)
+{
+ __le16 val;
+
+ regmap_raw_read(udc->regs, reg, &val, 2);
+
+ return le16_to_cpu(val);
+}
+
+static void isp1760_udc_write_raw(struct isp1760_udc *udc, u16 reg, u32 val)
+{
+ __le32 val_le = cpu_to_le32(val);
+
+ regmap_raw_write(udc->regs, reg, &val_le, 4);
+}
+
+static void isp1760_udc_write_raw16(struct isp1760_udc *udc, u16 reg, u16 val)
+{
+ __le16 val_le = cpu_to_le16(val);
+
+ regmap_raw_write(udc->regs, reg, &val_le, 2);
+}
+
+static void isp1760_udc_set(struct isp1760_udc *udc, u32 field)
+{
+ isp1760_udc_write(udc, field, 0xFFFFFFFF);
+}
+
+static void isp1760_udc_clear(struct isp1760_udc *udc, u32 field)
+{
+ isp1760_udc_write(udc, field, 0);
+}
+
+static bool isp1760_udc_is_set(struct isp1760_udc *udc, u32 field)
+{
+ return !!isp1760_udc_read(udc, field);
+}
/* -----------------------------------------------------------------------------
* Endpoint Management
*/
@@ -75,16 +123,21 @@ static struct isp1760_ep *isp1760_udc_find_ep(struct isp1760_udc *udc,
return NULL;
}
-static void __isp1760_udc_select_ep(struct isp1760_ep *ep, int dir)
+static void __isp1760_udc_select_ep(struct isp1760_udc *udc,
+ struct isp1760_ep *ep, int dir)
{
- isp1760_udc_write(ep->udc, DC_EPINDEX,
- DC_ENDPIDX(ep->addr & USB_ENDPOINT_NUMBER_MASK) |
- (dir == USB_DIR_IN ? DC_EPDIR : 0));
+ isp1760_udc_write(udc, DC_ENDPIDX, ep->addr & USB_ENDPOINT_NUMBER_MASK);
+
+ if (dir == USB_DIR_IN)
+ isp1760_udc_set(udc, DC_EPDIR);
+ else
+ isp1760_udc_clear(udc, DC_EPDIR);
}
/**
* isp1760_udc_select_ep - Select an endpoint for register access
* @ep: The endpoint
+ * @udc: Reference to the device controller
*
* The ISP1761 endpoint registers are banked. This function selects the target
* endpoint for banked register access. The selection remains valid until the
@@ -93,9 +146,10 @@ static void __isp1760_udc_select_ep(struct isp1760_ep *ep, int dir)
*
* Called with the UDC spinlock held.
*/
-static void isp1760_udc_select_ep(struct isp1760_ep *ep)
+static void isp1760_udc_select_ep(struct isp1760_udc *udc,
+ struct isp1760_ep *ep)
{
- __isp1760_udc_select_ep(ep, ep->addr & USB_ENDPOINT_DIR_MASK);
+ __isp1760_udc_select_ep(udc, ep, ep->addr & USB_ENDPOINT_DIR_MASK);
}
/* Called with the UDC spinlock held. */
@@ -108,9 +162,13 @@ static void isp1760_udc_ctrl_send_status(struct isp1760_ep *ep, int dir)
* the direction opposite to the data stage data packets, we thus need
* to select the OUT/IN endpoint for IN/OUT transfers.
*/
- isp1760_udc_write(udc, DC_EPINDEX, DC_ENDPIDX(0) |
- (dir == USB_DIR_IN ? 0 : DC_EPDIR));
- isp1760_udc_write(udc, DC_CTRLFUNC, DC_STATUS);
+ if (dir == USB_DIR_IN)
+ isp1760_udc_clear(udc, DC_EPDIR);
+ else
+ isp1760_udc_set(udc, DC_EPDIR);
+
+ isp1760_udc_write(udc, DC_ENDPIDX, 1);
+ isp1760_udc_set(udc, DC_STATUS);
/*
* The hardware will terminate the request automatically and go back to
@@ -157,10 +215,10 @@ static void isp1760_udc_ctrl_send_stall(struct isp1760_ep *ep)
spin_lock_irqsave(&udc->lock, flags);
/* Stall both the IN and OUT endpoints. */
- __isp1760_udc_select_ep(ep, USB_DIR_OUT);
- isp1760_udc_write(udc, DC_CTRLFUNC, DC_STALL);
- __isp1760_udc_select_ep(ep, USB_DIR_IN);
- isp1760_udc_write(udc, DC_CTRLFUNC, DC_STALL);
+ __isp1760_udc_select_ep(udc, ep, USB_DIR_OUT);
+ isp1760_udc_set(udc, DC_STALL);
+ __isp1760_udc_select_ep(udc, ep, USB_DIR_IN);
+ isp1760_udc_set(udc, DC_STALL);
/* A protocol stall completes the control transaction. */
udc->ep0_state = ISP1760_CTRL_SETUP;
@@ -181,8 +239,8 @@ static bool isp1760_udc_receive(struct isp1760_ep *ep,
u32 *buf;
int i;
- isp1760_udc_select_ep(ep);
- len = isp1760_udc_read(udc, DC_BUFLEN) & DC_DATACOUNT_MASK;
+ isp1760_udc_select_ep(udc, ep);
+ len = isp1760_udc_read(udc, DC_BUFLEN);
dev_dbg(udc->isp->dev, "%s: received %u bytes (%u/%u done)\n",
__func__, len, req->req.actual, req->req.length);
@@ -198,7 +256,7 @@ static bool isp1760_udc_receive(struct isp1760_ep *ep,
* datasheet doesn't clearly document how this should be
* handled.
*/
- isp1760_udc_write(udc, DC_CTRLFUNC, DC_CLBUF);
+ isp1760_udc_set(udc, DC_CLBUF);
return false;
}
@@ -209,9 +267,9 @@ static bool isp1760_udc_receive(struct isp1760_ep *ep,
* the next packet might be removed from the FIFO.
*/
for (i = len; i > 2; i -= 4, ++buf)
- *buf = le32_to_cpu(isp1760_udc_read(udc, DC_DATAPORT));
+ *buf = isp1760_udc_read_raw(udc, ISP176x_DC_DATAPORT);
if (i > 0)
- *(u16 *)buf = le16_to_cpu(readw(udc->regs + DC_DATAPORT));
+ *(u16 *)buf = isp1760_udc_read_raw16(udc, ISP176x_DC_DATAPORT);
req->req.actual += len;
@@ -253,7 +311,7 @@ static void isp1760_udc_transmit(struct isp1760_ep *ep,
__func__, req->packet_size, req->req.actual,
req->req.length);
- __isp1760_udc_select_ep(ep, USB_DIR_IN);
+ __isp1760_udc_select_ep(udc, ep, USB_DIR_IN);
if (req->packet_size)
isp1760_udc_write(udc, DC_BUFLEN, req->packet_size);
@@ -265,14 +323,14 @@ static void isp1760_udc_transmit(struct isp1760_ep *ep,
* the FIFO for this kind of conditions, but doesn't seem to work.
*/
for (i = req->packet_size; i > 2; i -= 4, ++buf)
- isp1760_udc_write(udc, DC_DATAPORT, cpu_to_le32(*buf));
+ isp1760_udc_write_raw(udc, ISP176x_DC_DATAPORT, *buf);
if (i > 0)
- writew(cpu_to_le16(*(u16 *)buf), udc->regs + DC_DATAPORT);
+ isp1760_udc_write_raw16(udc, ISP176x_DC_DATAPORT, *(u16 *)buf);
if (ep->addr == 0)
- isp1760_udc_write(udc, DC_CTRLFUNC, DC_DSEN);
+ isp1760_udc_set(udc, DC_DSEN);
if (!req->packet_size)
- isp1760_udc_write(udc, DC_CTRLFUNC, DC_VENDP);
+ isp1760_udc_set(udc, DC_VENDP);
}
static void isp1760_ep_rx_ready(struct isp1760_ep *ep)
@@ -408,19 +466,24 @@ static int __isp1760_udc_set_halt(struct isp1760_ep *ep, bool halt)
return -EINVAL;
}
- isp1760_udc_select_ep(ep);
- isp1760_udc_write(udc, DC_CTRLFUNC, halt ? DC_STALL : 0);
+ isp1760_udc_select_ep(udc, ep);
+
+ if (halt)
+ isp1760_udc_set(udc, DC_STALL);
+ else
+ isp1760_udc_clear(udc, DC_STALL);
if (ep->addr == 0) {
/* When halting the control endpoint, stall both IN and OUT. */
- __isp1760_udc_select_ep(ep, USB_DIR_IN);
- isp1760_udc_write(udc, DC_CTRLFUNC, halt ? DC_STALL : 0);
+ __isp1760_udc_select_ep(udc, ep, USB_DIR_IN);
+ if (halt)
+ isp1760_udc_set(udc, DC_STALL);
+ else
+ isp1760_udc_clear(udc, DC_STALL);
} else if (!halt) {
/* Reset the data PID by cycling the endpoint enable bit. */
- u16 eptype = isp1760_udc_read(udc, DC_EPTYPE);
-
- isp1760_udc_write(udc, DC_EPTYPE, eptype & ~DC_EPENABLE);
- isp1760_udc_write(udc, DC_EPTYPE, eptype);
+ isp1760_udc_clear(udc, DC_EPENABLE);
+ isp1760_udc_set(udc, DC_EPENABLE);
/*
* Disabling the endpoint emptied the transmit FIFO, fill it
@@ -479,12 +542,14 @@ static int isp1760_udc_get_status(struct isp1760_udc *udc,
return -EINVAL;
}
- isp1760_udc_write(udc, DC_EPINDEX, DC_ENDPIDX(0) | DC_EPDIR);
+ isp1760_udc_set(udc, DC_EPDIR);
+ isp1760_udc_write(udc, DC_ENDPIDX, 1);
+
isp1760_udc_write(udc, DC_BUFLEN, 2);
- writew(cpu_to_le16(status), udc->regs + DC_DATAPORT);
+ isp1760_udc_write_raw16(udc, ISP176x_DC_DATAPORT, status);
- isp1760_udc_write(udc, DC_CTRLFUNC, DC_DSEN);
+ isp1760_udc_set(udc, DC_DSEN);
dev_dbg(udc->isp->dev, "%s: status 0x%04x\n", __func__, status);
@@ -508,7 +573,8 @@ static int isp1760_udc_set_address(struct isp1760_udc *udc, u16 addr)
usb_gadget_set_state(&udc->gadget, addr ? USB_STATE_ADDRESS :
USB_STATE_DEFAULT);
- isp1760_udc_write(udc, DC_ADDRESS, DC_DEVEN | addr);
+ isp1760_udc_write(udc, DC_DEVADDR, addr);
+ isp1760_udc_set(udc, DC_DEVEN);
spin_lock(&udc->lock);
isp1760_udc_ctrl_send_status(&udc->ep[0], USB_DIR_OUT);
@@ -650,9 +716,9 @@ static void isp1760_ep0_setup(struct isp1760_udc *udc)
spin_lock(&udc->lock);
- isp1760_udc_write(udc, DC_EPINDEX, DC_EP0SETUP);
+ isp1760_udc_set(udc, DC_EP0SETUP);
- count = isp1760_udc_read(udc, DC_BUFLEN) & DC_DATACOUNT_MASK;
+ count = isp1760_udc_read(udc, DC_BUFLEN);
if (count != sizeof(req)) {
spin_unlock(&udc->lock);
@@ -663,8 +729,8 @@ static void isp1760_ep0_setup(struct isp1760_udc *udc)
return;
}
- req.data[0] = isp1760_udc_read(udc, DC_DATAPORT);
- req.data[1] = isp1760_udc_read(udc, DC_DATAPORT);
+ req.data[0] = isp1760_udc_read_raw(udc, ISP176x_DC_DATAPORT);
+ req.data[1] = isp1760_udc_read_raw(udc, ISP176x_DC_DATAPORT);
if (udc->ep0_state != ISP1760_CTRL_SETUP) {
spin_unlock(&udc->lock);
@@ -732,13 +798,13 @@ static int isp1760_ep_enable(struct usb_ep *ep,
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_ISOC:
- type = DC_ENDPTYP_ISOC;
+ type = ISP176x_DC_ENDPTYP_ISOC;
break;
case USB_ENDPOINT_XFER_BULK:
- type = DC_ENDPTYP_BULK;
+ type = ISP176x_DC_ENDPTYP_BULK;
break;
case USB_ENDPOINT_XFER_INT:
- type = DC_ENDPTYP_INTERRUPT;
+ type = ISP176x_DC_ENDPTYP_INTERRUPT;
break;
case USB_ENDPOINT_XFER_CONTROL:
default:
@@ -755,10 +821,13 @@ static int isp1760_ep_enable(struct usb_ep *ep,
uep->halted = false;
uep->wedged = false;
- isp1760_udc_select_ep(uep);
- isp1760_udc_write(udc, DC_EPMAXPKTSZ, uep->maxpacket);
+ isp1760_udc_select_ep(udc, uep);
+
+ isp1760_udc_write(udc, DC_FFOSZ, uep->maxpacket);
isp1760_udc_write(udc, DC_BUFLEN, uep->maxpacket);
- isp1760_udc_write(udc, DC_EPTYPE, DC_EPENABLE | type);
+
+ isp1760_udc_write(udc, DC_ENDPTYP, type);
+ isp1760_udc_set(udc, DC_EPENABLE);
spin_unlock_irqrestore(&udc->lock, flags);
@@ -786,8 +855,9 @@ static int isp1760_ep_disable(struct usb_ep *ep)
uep->desc = NULL;
uep->maxpacket = 0;
- isp1760_udc_select_ep(uep);
- isp1760_udc_write(udc, DC_EPTYPE, 0);
+ isp1760_udc_select_ep(udc, uep);
+ isp1760_udc_clear(udc, DC_EPENABLE);
+ isp1760_udc_clear(udc, DC_ENDPTYP);
/* TODO Synchronize with the IRQ handler */
@@ -864,8 +934,8 @@ static int isp1760_ep_queue(struct usb_ep *ep, struct usb_request *_req,
case ISP1760_CTRL_DATA_OUT:
list_add_tail(&req->queue, &uep->queue);
- __isp1760_udc_select_ep(uep, USB_DIR_OUT);
- isp1760_udc_write(udc, DC_CTRLFUNC, DC_DSEN);
+ __isp1760_udc_select_ep(udc, uep, USB_DIR_OUT);
+ isp1760_udc_set(udc, DC_DSEN);
break;
case ISP1760_CTRL_STATUS:
@@ -1025,14 +1095,14 @@ static void isp1760_ep_fifo_flush(struct usb_ep *ep)
spin_lock_irqsave(&udc->lock, flags);
- isp1760_udc_select_ep(uep);
+ isp1760_udc_select_ep(udc, uep);
/*
* Set the CLBUF bit twice to flush both buffers in case double
* buffering is enabled.
*/
- isp1760_udc_write(udc, DC_CTRLFUNC, DC_CLBUF);
- isp1760_udc_write(udc, DC_CTRLFUNC, DC_CLBUF);
+ isp1760_udc_set(udc, DC_CLBUF);
+ isp1760_udc_set(udc, DC_CLBUF);
spin_unlock_irqrestore(&udc->lock, flags);
}
@@ -1082,6 +1152,10 @@ static void isp1760_udc_disconnect(struct isp1760_udc *udc)
static void isp1760_udc_init_hw(struct isp1760_udc *udc)
{
+ u32 intconf = udc->is_isp1763 ? ISP1763_DC_INTCONF : ISP176x_DC_INTCONF;
+ u32 intena = udc->is_isp1763 ? ISP1763_DC_INTENABLE :
+ ISP176x_DC_INTENABLE;
+
/*
* The device controller currently shares its interrupt with the host
* controller, the DC_IRQ polarity and signaling mode are ignored. Set
@@ -1091,19 +1165,22 @@ static void isp1760_udc_init_hw(struct isp1760_udc *udc)
* ACK tokens only (and NYET for the out pipe). The default
* configuration also generates an interrupt on the first NACK token.
*/
- isp1760_udc_write(udc, DC_INTCONF, DC_CDBGMOD_ACK | DC_DDBGMODIN_ACK |
- DC_DDBGMODOUT_ACK_NYET);
-
- isp1760_udc_write(udc, DC_INTENABLE, DC_IEPRXTX(7) | DC_IEPRXTX(6) |
- DC_IEPRXTX(5) | DC_IEPRXTX(4) | DC_IEPRXTX(3) |
- DC_IEPRXTX(2) | DC_IEPRXTX(1) | DC_IEPRXTX(0) |
- DC_IEP0SETUP | DC_IEVBUS | DC_IERESM | DC_IESUSP |
- DC_IEHS_STA | DC_IEBRST);
+ isp1760_reg_write(udc->regs, intconf,
+ ISP176x_DC_CDBGMOD_ACK | ISP176x_DC_DDBGMODIN_ACK |
+ ISP176x_DC_DDBGMODOUT_ACK);
+
+ isp1760_reg_write(udc->regs, intena, DC_IEPRXTX(7) |
+ DC_IEPRXTX(6) | DC_IEPRXTX(5) | DC_IEPRXTX(4) |
+ DC_IEPRXTX(3) | DC_IEPRXTX(2) | DC_IEPRXTX(1) |
+ DC_IEPRXTX(0) | ISP176x_DC_IEP0SETUP |
+ ISP176x_DC_IEVBUS | ISP176x_DC_IERESM |
+ ISP176x_DC_IESUSP | ISP176x_DC_IEHS_STA |
+ ISP176x_DC_IEBRST);
if (udc->connected)
isp1760_set_pullup(udc->isp, true);
- isp1760_udc_write(udc, DC_ADDRESS, DC_DEVEN);
+ isp1760_udc_set(udc, DC_DEVEN);
}
static void isp1760_udc_reset(struct isp1760_udc *udc)
@@ -1152,7 +1229,7 @@ static int isp1760_udc_get_frame(struct usb_gadget *gadget)
{
struct isp1760_udc *udc = gadget_to_udc(gadget);
- return isp1760_udc_read(udc, DC_FRAMENUM) & ((1 << 11) - 1);
+ return isp1760_udc_read(udc, DC_FRAMENUM);
}
static int isp1760_udc_wakeup(struct usb_gadget *gadget)
@@ -1219,7 +1296,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
usb_gadget_set_state(&udc->gadget, USB_STATE_ATTACHED);
/* DMA isn't supported yet, don't enable the DMA clock. */
- isp1760_udc_write(udc, DC_MODE, DC_GLINTENA);
+ isp1760_udc_set(udc, DC_GLINTENA);
isp1760_udc_init_hw(udc);
@@ -1232,13 +1309,14 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
static int isp1760_udc_stop(struct usb_gadget *gadget)
{
struct isp1760_udc *udc = gadget_to_udc(gadget);
+ u32 mode_reg = udc->is_isp1763 ? ISP1763_DC_MODE : ISP176x_DC_MODE;
unsigned long flags;
dev_dbg(udc->isp->dev, "%s\n", __func__);
del_timer_sync(&udc->vbus_timer);
- isp1760_udc_write(udc, DC_MODE, 0);
+ isp1760_reg_write(udc->regs, mode_reg, 0);
spin_lock_irqsave(&udc->lock, flags);
udc->driver = NULL;
@@ -1260,15 +1338,30 @@ static const struct usb_gadget_ops isp1760_udc_ops = {
* Interrupt Handling
*/
+static u32 isp1760_udc_irq_get_status(struct isp1760_udc *udc)
+{
+ u32 status;
+
+ if (udc->is_isp1763) {
+ status = isp1760_reg_read(udc->regs, ISP1763_DC_INTERRUPT)
+ & isp1760_reg_read(udc->regs, ISP1763_DC_INTENABLE);
+ isp1760_reg_write(udc->regs, ISP1763_DC_INTERRUPT, status);
+ } else {
+ status = isp1760_reg_read(udc->regs, ISP176x_DC_INTERRUPT)
+ & isp1760_reg_read(udc->regs, ISP176x_DC_INTENABLE);
+ isp1760_reg_write(udc->regs, ISP176x_DC_INTERRUPT, status);
+ }
+
+ return status;
+}
+
static irqreturn_t isp1760_udc_irq(int irq, void *dev)
{
struct isp1760_udc *udc = dev;
unsigned int i;
u32 status;
- status = isp1760_udc_read(udc, DC_INTERRUPT)
- & isp1760_udc_read(udc, DC_INTENABLE);
- isp1760_udc_write(udc, DC_INTERRUPT, status);
+ status = isp1760_udc_irq_get_status(udc);
if (status & DC_IEVBUS) {
dev_dbg(udc->isp->dev, "%s(VBUS)\n", __func__);
@@ -1313,7 +1406,7 @@ static irqreturn_t isp1760_udc_irq(int irq, void *dev)
dev_dbg(udc->isp->dev, "%s(SUSP)\n", __func__);
spin_lock(&udc->lock);
- if (!(isp1760_udc_read(udc, DC_MODE) & DC_VBUSSTAT))
+ if (!isp1760_udc_is_set(udc, DC_VBUSSTAT))
isp1760_udc_disconnect(udc);
else
isp1760_udc_suspend(udc);
@@ -1335,7 +1428,7 @@ static void isp1760_udc_vbus_poll(struct timer_list *t)
spin_lock_irqsave(&udc->lock, flags);
- if (!(isp1760_udc_read(udc, DC_MODE) & DC_VBUSSTAT))
+ if (!(isp1760_udc_is_set(udc, DC_VBUSSTAT)))
isp1760_udc_disconnect(udc);
else if (udc->gadget.state >= USB_STATE_POWERED)
mod_timer(&udc->vbus_timer,
@@ -1403,6 +1496,7 @@ static void isp1760_udc_init_eps(struct isp1760_udc *udc)
static int isp1760_udc_init(struct isp1760_udc *udc)
{
+ u32 mode_reg = udc->is_isp1763 ? ISP1763_DC_MODE : ISP176x_DC_MODE;
u16 scratch;
u32 chipid;
@@ -1413,7 +1507,8 @@ static int isp1760_udc_init(struct isp1760_udc *udc)
* and scratch register contents must match the expected values.
*/
isp1760_udc_write(udc, DC_SCRATCH, 0xbabe);
- chipid = isp1760_udc_read(udc, DC_CHIPID);
+ chipid = isp1760_udc_read(udc, DC_CHIP_ID_HIGH) << 16;
+ chipid |= isp1760_udc_read(udc, DC_CHIP_ID_LOW);
scratch = isp1760_udc_read(udc, DC_SCRATCH);
if (scratch != 0xbabe) {
@@ -1423,15 +1518,16 @@ static int isp1760_udc_init(struct isp1760_udc *udc)
return -ENODEV;
}
- if (chipid != 0x00011582 && chipid != 0x00158210) {
+ if (chipid != 0x00011582 && chipid != 0x00158210 &&
+ chipid != 0x00176320) {
dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid);
return -ENODEV;
}
/* Reset the device controller. */
- isp1760_udc_write(udc, DC_MODE, DC_SFRESET);
+ isp1760_udc_set(udc, DC_SFRESET);
usleep_range(10000, 11000);
- isp1760_udc_write(udc, DC_MODE, 0);
+ isp1760_reg_write(udc->regs, mode_reg, 0);
usleep_range(10000, 11000);
return 0;
@@ -1445,7 +1541,6 @@ int isp1760_udc_register(struct isp1760_device *isp, int irq,
udc->irq = -1;
udc->isp = isp;
- udc->regs = isp->regs;
spin_lock_init(&udc->lock);
timer_setup(&udc->vbus_timer, isp1760_udc_vbus_poll, 0);
diff --git a/drivers/usb/isp1760/isp1760-udc.h b/drivers/usb/isp1760/isp1760-udc.h
index d2df650d54e9..22044e86bc0e 100644
--- a/drivers/usb/isp1760/isp1760-udc.h
+++ b/drivers/usb/isp1760/isp1760-udc.h
@@ -2,10 +2,12 @@
/*
* Driver for the NXP ISP1761 device controller
*
+ * Copyright 2021 Linaro, Rui Miguel Silva
* Copyright 2014 Ideas on Board Oy
*
* Contacts:
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Rui Miguel Silva <rui.silva@linaro.org>
*/
#ifndef _ISP1760_UDC_H_
@@ -17,6 +19,8 @@
#include <linux/timer.h>
#include <linux/usb/gadget.h>
+#include "isp1760-regs.h"
+
struct isp1760_device;
struct isp1760_udc;
@@ -48,7 +52,7 @@ struct isp1760_ep {
* struct isp1760_udc - UDC state information
* irq: IRQ number
* irqname: IRQ name (as passed to request_irq)
- * regs: Base address of the UDC registers
+ * regs: regmap for UDC registers
* driver: Gadget driver
* gadget: Gadget device
* lock: Protects driver, vbus_timer, ep, ep0_*, DC_EPINDEX register
@@ -59,12 +63,13 @@ struct isp1760_ep {
* connected: Tracks gadget driver bus connection state
*/
struct isp1760_udc {
-#ifdef CONFIG_USB_ISP1761_UDC
struct isp1760_device *isp;
int irq;
char *irqname;
- void __iomem *regs;
+
+ struct regmap *regs;
+ struct regmap_field *fields[DC_FIELD_MAX];
struct usb_gadget_driver *driver;
struct usb_gadget gadget;
@@ -79,9 +84,9 @@ struct isp1760_udc {
u16 ep0_length;
bool connected;
+ bool is_isp1763;
unsigned int devstatus;
-#endif
};
#ifdef CONFIG_USB_ISP1761_UDC
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 8a3d9c0c8d8b..e5a8fcdbb78e 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -2098,7 +2098,6 @@ more:{
} else
d += sprintf(d, " ..");
bytes_read += 1;
- continue;
}
goto more;
} else if (packet_bytes > 1) {
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index aef0a0bba25a..5546b868b08b 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -10,6 +10,7 @@
#ifndef __MTU3_H__
#define __MTU3_H__
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/extcon.h>
@@ -21,6 +22,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
+#include <linux/usb/role.h>
struct mtu3;
struct mtu3_ep;
@@ -88,6 +90,8 @@ struct mtu3_request;
*/
#define EP0_RESPONSE_BUF 6
+#define BULK_CLKS_CNT 4
+
/* device operated link and speed got from DEVICE_CONF register */
enum mtu3_speed {
MTU3_SPEED_INACTIVE = 0,
@@ -192,13 +196,9 @@ struct mtu3_gpd_ring {
/**
* @vbus: vbus 5V used by host mode
* @edev: external connector used to detect vbus and iddig changes
-* @vbus_nb: notifier for vbus detection
-* @vbus_work : work of vbus detection notifier, used to avoid sleep in
-* notifier callback which is atomic context
-* @vbus_event : event of vbus detecion notifier
* @id_nb : notifier for iddig(idpin) detection
-* @id_work : work of iddig detection notifier
-* @id_event : event of iddig detecion notifier
+* @dr_work : work for drd mode switch, used to avoid sleep in atomic context
+* @desired_role : role desired to switch
* @role_sw : use USB Role Switch to support dual-role switch, can't use
* extcon at the same time, and extcon is deprecated.
* @role_sw_used : true when the USB Role Switch is used.
@@ -209,12 +209,9 @@ struct mtu3_gpd_ring {
struct otg_switch_mtk {
struct regulator *vbus;
struct extcon_dev *edev;
- struct notifier_block vbus_nb;
- struct work_struct vbus_work;
- unsigned long vbus_event;
struct notifier_block id_nb;
- struct work_struct id_work;
- unsigned long id_event;
+ struct work_struct dr_work;
+ enum usb_role desired_role;
struct usb_role_switch *role_sw;
bool role_sw_used;
bool is_u3_drd;
@@ -225,10 +222,6 @@ struct otg_switch_mtk {
* @mac_base: register base address of device MAC, exclude xHCI's
* @ippc_base: register base address of IP Power and Clock interface (IPPC)
* @vusb33: usb3.3V shared by device/host IP
- * @sys_clk: system clock of mtu3, shared by device/host IP
- * @ref_clk: reference clock
- * @mcu_clk: mcu_bus_ck clock for AHB bus etc
- * @dma_clk: dma_bus_ck clock for AXI bus etc
* @dr_mode: works in which mode:
* host only, device only or dual-role mode
* @u2_ports: number of usb2.0 host ports
@@ -250,10 +243,7 @@ struct ssusb_mtk {
int num_phys;
/* common power & clock */
struct regulator *vusb33;
- struct clk *sys_clk;
- struct clk *ref_clk;
- struct clk *mcu_clk;
- struct clk *dma_clk;
+ struct clk_bulk_data clks[BULK_CLKS_CNT];
/* otg */
struct otg_switch_mtk otg_switch;
enum usb_dr_mode dr_mode;
@@ -422,11 +412,9 @@ int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
int interval, int burst, int mult);
void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep);
void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set);
-void mtu3_ep0_setup(struct mtu3 *mtu);
void mtu3_start(struct mtu3 *mtu);
void mtu3_stop(struct mtu3 *mtu);
void mtu3_dev_on_off(struct mtu3 *mtu, int is_on);
-void mtu3_set_speed(struct mtu3 *mtu, enum usb_device_speed speed);
int mtu3_gadget_setup(struct mtu3 *mtu);
void mtu3_gadget_cleanup(struct mtu3 *mtu);
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index b3b459937566..562f4357831e 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -207,7 +207,7 @@ static void mtu3_intr_enable(struct mtu3 *mtu)
mtu3_writel(mbase, U3D_DEV_LINK_INTR_ENABLE, SSUSB_DEV_SPEED_CHG_INTR);
}
-void mtu3_set_speed(struct mtu3 *mtu, enum usb_device_speed speed)
+static void mtu3_set_speed(struct mtu3 *mtu, enum usb_device_speed speed)
{
void __iomem *mbase = mtu->mac_base;
@@ -334,6 +334,10 @@ void mtu3_start(struct mtu3 *mtu)
mtu3_readl(mbase, U3D_DEVICE_CONTROL));
mtu3_clrbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+ if (mtu->is_u3_ip)
+ mtu3_clrbits(mtu->ippc_base, SSUSB_U3_CTRL(0), SSUSB_U3_PORT_PDN);
+
+ mtu3_clrbits(mtu->ippc_base, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_PDN);
mtu3_csr_init(mtu);
mtu3_set_speed(mtu, mtu->speed);
@@ -356,6 +360,11 @@ void mtu3_stop(struct mtu3 *mtu)
mtu3_dev_on_off(mtu, 0);
mtu->is_active = 0;
+
+ if (mtu->is_u3_ip)
+ mtu3_setbits(mtu->ippc_base, SSUSB_U3_CTRL(0), SSUSB_U3_PORT_PDN);
+
+ mtu3_setbits(mtu->ippc_base, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_PDN);
mtu3_setbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
}
@@ -536,7 +545,7 @@ static void get_ep_fifo_config(struct mtu3 *mtu)
rx_fifo->base, rx_fifo->limit);
}
-void mtu3_ep0_setup(struct mtu3 *mtu)
+static void mtu3_ep0_setup(struct mtu3 *mtu)
{
u32 maxpacket = mtu->g.ep0->maxpacket;
u32 csr;
@@ -921,16 +930,15 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
device_init_wakeup(dev, true);
+ /* power down device IP for power saving by default */
+ mtu3_stop(mtu);
+
ret = mtu3_gadget_setup(mtu);
if (ret) {
dev_err(dev, "mtu3 gadget init failed:%d\n", ret);
goto gadget_err;
}
- /* init as host mode, power down device IP for power saving */
- if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
- mtu3_stop(mtu);
-
ssusb_dev_debugfs_init(ssusb);
dev_dbg(dev, " %s() done...\n", __func__);
diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c
index 7537bfd651af..d27de647c86a 100644
--- a/drivers/usb/mtu3/mtu3_debugfs.c
+++ b/drivers/usb/mtu3/mtu3_debugfs.c
@@ -30,6 +30,7 @@ static const struct debugfs_reg32 mtu3_ippc_regs[] = {
dump_register(SSUSB_IP_PW_CTRL1),
dump_register(SSUSB_IP_PW_CTRL2),
dump_register(SSUSB_IP_PW_CTRL3),
+ dump_register(SSUSB_IP_PW_STS1),
dump_register(SSUSB_OTG_STS),
dump_register(SSUSB_IP_XHCI_CAP),
dump_register(SSUSB_IP_DEV_CAP),
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index 04f666e85731..318fbc618137 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -7,8 +7,6 @@
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
*/
-#include <linux/usb/role.h>
-
#include "mtu3.h"
#include "mtu3_dr.h"
#include "mtu3_debug.h"
@@ -16,27 +14,9 @@
#define USB2_PORT 2
#define USB3_PORT 3
-enum mtu3_vbus_id_state {
- MTU3_ID_FLOAT = 1,
- MTU3_ID_GROUND,
- MTU3_VBUS_OFF,
- MTU3_VBUS_VALID,
-};
-
-static char *mailbox_state_string(enum mtu3_vbus_id_state state)
+static inline struct ssusb_mtk *otg_sx_to_ssusb(struct otg_switch_mtk *otg_sx)
{
- switch (state) {
- case MTU3_ID_FLOAT:
- return "ID_FLOAT";
- case MTU3_ID_GROUND:
- return "ID_GROUND";
- case MTU3_VBUS_OFF:
- return "VBUS_OFF";
- case MTU3_VBUS_VALID:
- return "VBUS_VALID";
- default:
- return "UNKNOWN";
- }
+ return container_of(otg_sx, struct ssusb_mtk, otg_switch);
}
static void toggle_opstate(struct ssusb_mtk *ssusb)
@@ -123,8 +103,7 @@ static void switch_port_to_device(struct ssusb_mtk *ssusb)
int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
{
- struct ssusb_mtk *ssusb =
- container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct ssusb_mtk *ssusb = otg_sx_to_ssusb(otg_sx);
struct regulator *vbus = otg_sx->vbus;
int ret;
@@ -147,99 +126,72 @@ int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
return 0;
}
-/*
- * switch to host: -> MTU3_VBUS_OFF --> MTU3_ID_GROUND
- * switch to device: -> MTU3_ID_FLOAT --> MTU3_VBUS_VALID
- */
-static void ssusb_set_mailbox(struct otg_switch_mtk *otg_sx,
- enum mtu3_vbus_id_state status)
+static void ssusb_mode_sw_work(struct work_struct *work)
{
- struct ssusb_mtk *ssusb =
- container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct otg_switch_mtk *otg_sx =
+ container_of(work, struct otg_switch_mtk, dr_work);
+ struct ssusb_mtk *ssusb = otg_sx_to_ssusb(otg_sx);
struct mtu3 *mtu = ssusb->u3d;
+ enum usb_role desired_role = otg_sx->desired_role;
+ enum usb_role current_role;
+
+ current_role = ssusb->is_host ? USB_ROLE_HOST : USB_ROLE_DEVICE;
+
+ if (desired_role == USB_ROLE_NONE)
+ desired_role = USB_ROLE_HOST;
- dev_dbg(ssusb->dev, "mailbox %s\n", mailbox_state_string(status));
- mtu3_dbg_trace(ssusb->dev, "mailbox %s", mailbox_state_string(status));
+ if (current_role == desired_role)
+ return;
+
+ dev_dbg(ssusb->dev, "set role : %s\n", usb_role_string(desired_role));
+ mtu3_dbg_trace(ssusb->dev, "set role : %s", usb_role_string(desired_role));
- switch (status) {
- case MTU3_ID_GROUND:
+ switch (desired_role) {
+ case USB_ROLE_HOST:
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
+ mtu3_stop(mtu);
switch_port_to_host(ssusb);
ssusb_set_vbus(otg_sx, 1);
ssusb->is_host = true;
break;
- case MTU3_ID_FLOAT:
+ case USB_ROLE_DEVICE:
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_DEVICE);
ssusb->is_host = false;
ssusb_set_vbus(otg_sx, 0);
switch_port_to_device(ssusb);
- break;
- case MTU3_VBUS_OFF:
- mtu3_stop(mtu);
- pm_relax(ssusb->dev);
- break;
- case MTU3_VBUS_VALID:
- /* avoid suspend when works as device */
- pm_stay_awake(ssusb->dev);
mtu3_start(mtu);
break;
+ case USB_ROLE_NONE:
default:
- dev_err(ssusb->dev, "invalid state\n");
+ dev_err(ssusb->dev, "invalid role\n");
}
}
-static void ssusb_id_work(struct work_struct *work)
+static void ssusb_set_mode(struct otg_switch_mtk *otg_sx, enum usb_role role)
{
- struct otg_switch_mtk *otg_sx =
- container_of(work, struct otg_switch_mtk, id_work);
+ struct ssusb_mtk *ssusb = otg_sx_to_ssusb(otg_sx);
- if (otg_sx->id_event)
- ssusb_set_mailbox(otg_sx, MTU3_ID_GROUND);
- else
- ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
-}
-
-static void ssusb_vbus_work(struct work_struct *work)
-{
- struct otg_switch_mtk *otg_sx =
- container_of(work, struct otg_switch_mtk, vbus_work);
+ if (ssusb->dr_mode != USB_DR_MODE_OTG)
+ return;
- if (otg_sx->vbus_event)
- ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
- else
- ssusb_set_mailbox(otg_sx, MTU3_VBUS_OFF);
+ otg_sx->desired_role = role;
+ queue_work(system_freezable_wq, &otg_sx->dr_work);
}
-/*
- * @ssusb_id_notifier is called in atomic context, but @ssusb_set_mailbox
- * may sleep, so use work queue here
- */
static int ssusb_id_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct otg_switch_mtk *otg_sx =
container_of(nb, struct otg_switch_mtk, id_nb);
- otg_sx->id_event = event;
- schedule_work(&otg_sx->id_work);
-
- return NOTIFY_DONE;
-}
-
-static int ssusb_vbus_notifier(struct notifier_block *nb,
- unsigned long event, void *ptr)
-{
- struct otg_switch_mtk *otg_sx =
- container_of(nb, struct otg_switch_mtk, vbus_nb);
-
- otg_sx->vbus_event = event;
- schedule_work(&otg_sx->vbus_work);
+ ssusb_set_mode(otg_sx, event ? USB_ROLE_HOST : USB_ROLE_DEVICE);
return NOTIFY_DONE;
}
static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx)
{
- struct ssusb_mtk *ssusb =
- container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct ssusb_mtk *ssusb = otg_sx_to_ssusb(otg_sx);
struct extcon_dev *edev = otg_sx->edev;
int ret;
@@ -247,14 +199,6 @@ static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx)
if (!edev)
return 0;
- otg_sx->vbus_nb.notifier_call = ssusb_vbus_notifier;
- ret = devm_extcon_register_notifier(ssusb->dev, edev, EXTCON_USB,
- &otg_sx->vbus_nb);
- if (ret < 0) {
- dev_err(ssusb->dev, "failed to register notifier for USB\n");
- return ret;
- }
-
otg_sx->id_nb.notifier_call = ssusb_id_notifier;
ret = devm_extcon_register_notifier(ssusb->dev, edev, EXTCON_USB_HOST,
&otg_sx->id_nb);
@@ -263,15 +207,12 @@ static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx)
return ret;
}
- dev_dbg(ssusb->dev, "EXTCON_USB: %d, EXTCON_USB_HOST: %d\n",
- extcon_get_state(edev, EXTCON_USB),
- extcon_get_state(edev, EXTCON_USB_HOST));
+ ret = extcon_get_state(edev, EXTCON_USB_HOST);
+ dev_dbg(ssusb->dev, "EXTCON_USB_HOST: %d\n", ret);
/* default as host, switch to device mode if needed */
- if (extcon_get_state(edev, EXTCON_USB_HOST) == false)
- ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
- if (extcon_get_state(edev, EXTCON_USB) == true)
- ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+ if (!ret)
+ ssusb_set_mode(otg_sx, USB_ROLE_DEVICE);
return 0;
}
@@ -286,15 +227,7 @@ void ssusb_mode_switch(struct ssusb_mtk *ssusb, int to_host)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
- if (to_host) {
- ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
- ssusb_set_mailbox(otg_sx, MTU3_VBUS_OFF);
- ssusb_set_mailbox(otg_sx, MTU3_ID_GROUND);
- } else {
- ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_DEVICE);
- ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
- ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
- }
+ ssusb_set_mode(otg_sx, to_host ? USB_ROLE_HOST : USB_ROLE_DEVICE);
}
void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
@@ -323,13 +256,9 @@ void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
static int ssusb_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
{
struct ssusb_mtk *ssusb = usb_role_switch_get_drvdata(sw);
- bool to_host = false;
-
- if (role == USB_ROLE_HOST)
- to_host = true;
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
- if (to_host ^ ssusb->is_host)
- ssusb_mode_switch(ssusb, to_host);
+ ssusb_set_mode(otg_sx, role);
return 0;
}
@@ -337,18 +266,14 @@ static int ssusb_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
static enum usb_role ssusb_role_sw_get(struct usb_role_switch *sw)
{
struct ssusb_mtk *ssusb = usb_role_switch_get_drvdata(sw);
- enum usb_role role;
-
- role = ssusb->is_host ? USB_ROLE_HOST : USB_ROLE_DEVICE;
- return role;
+ return ssusb->is_host ? USB_ROLE_HOST : USB_ROLE_DEVICE;
}
static int ssusb_role_sw_register(struct otg_switch_mtk *otg_sx)
{
struct usb_role_switch_desc role_sx_desc = { 0 };
- struct ssusb_mtk *ssusb =
- container_of(otg_sx, struct ssusb_mtk, otg_switch);
+ struct ssusb_mtk *ssusb = otg_sx_to_ssusb(otg_sx);
if (!otg_sx->role_sw_used)
return 0;
@@ -367,8 +292,7 @@ int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
int ret = 0;
- INIT_WORK(&otg_sx->id_work, ssusb_id_work);
- INIT_WORK(&otg_sx->vbus_work, ssusb_vbus_work);
+ INIT_WORK(&otg_sx->dr_work, ssusb_mode_sw_work);
if (otg_sx->manual_drd_enabled)
ssusb_dr_debugfs_init(ssusb);
@@ -384,7 +308,6 @@ void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
- cancel_work_sync(&otg_sx->id_work);
- cancel_work_sync(&otg_sx->vbus_work);
+ cancel_work_sync(&otg_sx->dr_work);
usb_role_switch_unregister(otg_sx->role_sw);
}
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index 38f17d66d5bc..5e21ba05ebf0 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -577,7 +577,7 @@ mtu3_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed)
dev_dbg(mtu->dev, "%s %s\n", __func__, usb_speed_string(speed));
spin_lock_irqsave(&mtu->lock, flags);
- mtu3_set_speed(mtu, speed);
+ mtu->speed = speed;
spin_unlock_irqrestore(&mtu->lock, flags);
}
diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c
index 0a8cd446cf1b..93a1a4c11e1e 100644
--- a/drivers/usb/mtu3/mtu3_host.c
+++ b/drivers/usb/mtu3/mtu3_host.c
@@ -213,8 +213,6 @@ int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend)
static void ssusb_host_setup(struct ssusb_mtk *ssusb)
{
- struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
-
host_ports_num_get(ssusb);
/*
@@ -222,9 +220,7 @@ static void ssusb_host_setup(struct ssusb_mtk *ssusb)
* if support OTG, gadget driver will switch port0 to device mode
*/
ssusb_host_enable(ssusb);
-
- if (otg_sx->manual_drd_enabled)
- ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
/* if port0 supports dual-role, works as host mode by default */
ssusb_set_vbus(&ssusb->otg_switch, 1);
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index 7786a95a874e..c0615f6e5cce 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -5,7 +5,6 @@
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
*/
-#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -101,54 +100,6 @@ static void ssusb_phy_power_off(struct ssusb_mtk *ssusb)
phy_power_off(ssusb->phys[i]);
}
-static int ssusb_clks_enable(struct ssusb_mtk *ssusb)
-{
- int ret;
-
- ret = clk_prepare_enable(ssusb->sys_clk);
- if (ret) {
- dev_err(ssusb->dev, "failed to enable sys_clk\n");
- goto sys_clk_err;
- }
-
- ret = clk_prepare_enable(ssusb->ref_clk);
- if (ret) {
- dev_err(ssusb->dev, "failed to enable ref_clk\n");
- goto ref_clk_err;
- }
-
- ret = clk_prepare_enable(ssusb->mcu_clk);
- if (ret) {
- dev_err(ssusb->dev, "failed to enable mcu_clk\n");
- goto mcu_clk_err;
- }
-
- ret = clk_prepare_enable(ssusb->dma_clk);
- if (ret) {
- dev_err(ssusb->dev, "failed to enable dma_clk\n");
- goto dma_clk_err;
- }
-
- return 0;
-
-dma_clk_err:
- clk_disable_unprepare(ssusb->mcu_clk);
-mcu_clk_err:
- clk_disable_unprepare(ssusb->ref_clk);
-ref_clk_err:
- clk_disable_unprepare(ssusb->sys_clk);
-sys_clk_err:
- return ret;
-}
-
-static void ssusb_clks_disable(struct ssusb_mtk *ssusb)
-{
- clk_disable_unprepare(ssusb->dma_clk);
- clk_disable_unprepare(ssusb->mcu_clk);
- clk_disable_unprepare(ssusb->ref_clk);
- clk_disable_unprepare(ssusb->sys_clk);
-}
-
static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
{
int ret = 0;
@@ -159,7 +110,7 @@ static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
goto vusb33_err;
}
- ret = ssusb_clks_enable(ssusb);
+ ret = clk_bulk_prepare_enable(BULK_CLKS_CNT, ssusb->clks);
if (ret)
goto clks_err;
@@ -180,7 +131,7 @@ static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
phy_err:
ssusb_phy_exit(ssusb);
phy_init_err:
- ssusb_clks_disable(ssusb);
+ clk_bulk_disable_unprepare(BULK_CLKS_CNT, ssusb->clks);
clks_err:
regulator_disable(ssusb->vusb33);
vusb33_err:
@@ -189,7 +140,7 @@ vusb33_err:
static void ssusb_rscs_exit(struct ssusb_mtk *ssusb)
{
- ssusb_clks_disable(ssusb);
+ clk_bulk_disable_unprepare(BULK_CLKS_CNT, ssusb->clks);
regulator_disable(ssusb->vusb33);
ssusb_phy_power_off(ssusb);
ssusb_phy_exit(ssusb);
@@ -215,6 +166,7 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
{
struct device_node *node = pdev->dev.of_node;
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+ struct clk_bulk_data *clks = ssusb->clks;
struct device *dev = &pdev->dev;
int i;
int ret;
@@ -225,23 +177,13 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
return PTR_ERR(ssusb->vusb33);
}
- ssusb->sys_clk = devm_clk_get(dev, "sys_ck");
- if (IS_ERR(ssusb->sys_clk)) {
- dev_err(dev, "failed to get sys clock\n");
- return PTR_ERR(ssusb->sys_clk);
- }
-
- ssusb->ref_clk = devm_clk_get_optional(dev, "ref_ck");
- if (IS_ERR(ssusb->ref_clk))
- return PTR_ERR(ssusb->ref_clk);
-
- ssusb->mcu_clk = devm_clk_get_optional(dev, "mcu_ck");
- if (IS_ERR(ssusb->mcu_clk))
- return PTR_ERR(ssusb->mcu_clk);
-
- ssusb->dma_clk = devm_clk_get_optional(dev, "dma_ck");
- if (IS_ERR(ssusb->dma_clk))
- return PTR_ERR(ssusb->dma_clk);
+ clks[0].id = "sys_ck";
+ clks[1].id = "ref_ck";
+ clks[2].id = "mcu_ck";
+ clks[3].id = "dma_ck";
+ ret = devm_clk_bulk_get_optional(dev, BULK_CLKS_CNT, clks);
+ if (ret)
+ return ret;
ssusb->num_phys = of_count_phandle_with_args(node,
"phys", "#phy-cells");
@@ -299,11 +241,14 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
of_property_read_bool(node, "enable-manual-drd");
otg_sx->role_sw_used = of_property_read_bool(node, "usb-role-switch");
- if (!otg_sx->role_sw_used && of_property_read_bool(node, "extcon")) {
+ if (otg_sx->role_sw_used || otg_sx->manual_drd_enabled)
+ goto out;
+
+ if (of_property_read_bool(node, "extcon")) {
otg_sx->edev = extcon_get_edev_by_phandle(ssusb->dev, 0);
if (IS_ERR(otg_sx->edev)) {
- dev_err(ssusb->dev, "couldn't get extcon device\n");
- return PTR_ERR(otg_sx->edev);
+ return dev_err_probe(dev, PTR_ERR(otg_sx->edev),
+ "couldn't get extcon device\n");
}
}
@@ -461,7 +406,7 @@ static int __maybe_unused mtu3_suspend(struct device *dev)
ssusb_host_disable(ssusb, true);
ssusb_phy_power_off(ssusb);
- ssusb_clks_disable(ssusb);
+ clk_bulk_disable_unprepare(BULK_CLKS_CNT, ssusb->clks);
ssusb_wakeup_set(ssusb, true);
return 0;
@@ -478,7 +423,7 @@ static int __maybe_unused mtu3_resume(struct device *dev)
return 0;
ssusb_wakeup_set(ssusb, false);
- ret = ssusb_clks_enable(ssusb);
+ ret = clk_bulk_prepare_enable(BULK_CLKS_CNT, ssusb->clks);
if (ret)
goto clks_err;
@@ -491,7 +436,7 @@ static int __maybe_unused mtu3_resume(struct device *dev)
return 0;
phy_err:
- ssusb_clks_disable(ssusb);
+ clk_bulk_disable_unprepare(BULK_CLKS_CNT, ssusb->clks);
clks_err:
return ret;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 4c8f0112481f..f7b1d5993f8c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -480,9 +480,7 @@ int musb_set_host(struct musb *musb)
devctl = musb_read_devctl(musb);
if (!(devctl & MUSB_DEVCTL_BDEVICE)) {
- dev_info(musb->controller,
- "%s: already in host mode: %02x\n",
- __func__, devctl);
+ trace_musb_state(musb, devctl, "Already in host mode");
goto init_data;
}
@@ -499,6 +497,9 @@ int musb_set_host(struct musb *musb)
return error;
}
+ devctl = musb_read_devctl(musb);
+ trace_musb_state(musb, devctl, "Host mode set");
+
init_data:
musb->is_active = 1;
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
@@ -526,10 +527,7 @@ int musb_set_peripheral(struct musb *musb)
devctl = musb_read_devctl(musb);
if (devctl & MUSB_DEVCTL_BDEVICE) {
- dev_info(musb->controller,
- "%s: already in peripheral mode: %02x\n",
- __func__, devctl);
-
+ trace_musb_state(musb, devctl, "Already in peripheral mode");
goto init_data;
}
@@ -546,6 +544,9 @@ int musb_set_peripheral(struct musb *musb)
return error;
}
+ devctl = musb_read_devctl(musb);
+ trace_musb_state(musb, devctl, "Peripheral mode set");
+
init_data:
musb->is_active = 0;
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
@@ -1984,6 +1985,21 @@ ATTRIBUTE_GROUPS(musb);
#define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
+static bool musb_state_needs_recheck(struct musb *musb, u8 devctl,
+ const char *desc)
+{
+ if (musb->quirk_retries && !musb->flush_irq_work) {
+ trace_musb_state(musb, devctl, desc);
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+ musb->quirk_retries--;
+
+ return true;
+ }
+
+ return false;
+}
+
/*
* Check the musb devctl session bit to determine if we want to
* allow PM runtime for the device. In general, we want to keep things
@@ -2004,35 +2020,21 @@ static void musb_pm_runtime_check_session(struct musb *musb)
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
case MUSB_QUIRK_B_DISCONNECT_99:
- if (musb->quirk_retries && !musb->flush_irq_work) {
- musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
- schedule_delayed_work(&musb->irq_work,
- msecs_to_jiffies(1000));
- musb->quirk_retries--;
- }
+ musb_state_needs_recheck(musb, devctl,
+ "Poll devctl in case of suspend after disconnect");
break;
case MUSB_QUIRK_B_INVALID_VBUS_91:
- if (musb->quirk_retries && !musb->flush_irq_work) {
- musb_dbg(musb,
- "Poll devctl on invalid vbus, assume no session");
- schedule_delayed_work(&musb->irq_work,
- msecs_to_jiffies(1000));
- musb->quirk_retries--;
+ if (musb_state_needs_recheck(musb, devctl,
+ "Poll devctl on invalid vbus, assume no session"))
return;
- }
fallthrough;
case MUSB_QUIRK_A_DISCONNECT_19:
- if (musb->quirk_retries && !musb->flush_irq_work) {
- musb_dbg(musb,
- "Poll devctl on possible host mode disconnect");
- schedule_delayed_work(&musb->irq_work,
- msecs_to_jiffies(1000));
- musb->quirk_retries--;
+ if (musb_state_needs_recheck(musb, devctl,
+ "Poll devctl on possible host mode disconnect"))
return;
- }
if (!musb->session)
break;
- musb_dbg(musb, "Allow PM on possible host mode disconnect");
+ trace_musb_state(musb, devctl, "Allow PM on possible host mode disconnect");
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
musb->session = false;
@@ -2048,14 +2050,23 @@ static void musb_pm_runtime_check_session(struct musb *musb)
/* Block PM or allow PM? */
if (s) {
- musb_dbg(musb, "Block PM on active session: %02x", devctl);
+ trace_musb_state(musb, devctl, "Block PM on active session");
error = pm_runtime_get_sync(musb->controller);
if (error < 0)
dev_err(musb->controller, "Could not enable: %i\n",
error);
musb->quirk_retries = 3;
+
+ /*
+ * We can get a spurious MUSB_INTR_SESSREQ interrupt on start-up
+ * in B-peripheral mode with nothing connected and the session
+ * bit clears silently. Check status again in 3 seconds.
+ */
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(3000));
} else {
- musb_dbg(musb, "Allow PM with no session: %02x", devctl);
+ trace_musb_state(musb, devctl, "Allow PM with no session");
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
}
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ef374d4dd94a..98c0f4c1bffd 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -611,7 +611,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* mode 0 only. So we do not get endpoint interrupts due to DMA
* completion. We only get interrupts from DMA controller.
*
- * We could operate in DMA mode 1 if we knew the size of the tranfer
+ * We could operate in DMA mode 1 if we knew the size of the transfer
* in advance. For mass storage class, request->length = what the host
* sends, so that'd work. But for pretty much everything else,
* request->length is routinely more than what the host sends. For
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 30c5e7de0761..9ff7d891b4b7 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -563,10 +563,9 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
ep->rx_reinit = 0;
}
-static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
- struct musb_hw_ep *hw_ep, struct musb_qh *qh,
- struct urb *urb, u32 offset,
- u32 *length, u8 *mode)
+static void musb_tx_dma_set_mode_mentor(struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ u32 *length, u8 *mode)
{
struct dma_channel *channel = hw_ep->tx_channel;
void __iomem *epio = hw_ep->regs;
@@ -602,12 +601,8 @@ static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
musb_writew(epio, MUSB_TXCSR, csr);
}
-static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
- struct musb_hw_ep *hw_ep,
- struct musb_qh *qh,
+static void musb_tx_dma_set_mode_cppi_tusb(struct musb_hw_ep *hw_ep,
struct urb *urb,
- u32 offset,
- u32 *length,
u8 *mode)
{
struct dma_channel *channel = hw_ep->tx_channel;
@@ -630,11 +625,10 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
u8 mode;
if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
- musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
+ musb_tx_dma_set_mode_mentor(hw_ep, qh,
&length, &mode);
else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
- musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
- &length, &mode);
+ musb_tx_dma_set_mode_cppi_tusb(hw_ep, urb, &mode);
else
return false;
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 4804d4d85c15..63298bd233e0 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -61,10 +61,6 @@ extern void musb_host_tx(struct musb *, u8);
extern void musb_host_rx(struct musb *, u8);
extern void musb_root_disconnect(struct musb *musb);
extern void musb_host_free(struct musb *);
-extern void musb_host_cleanup(struct musb *);
-extern void musb_host_tx(struct musb *, u8);
-extern void musb_host_rx(struct musb *, u8);
-extern void musb_root_disconnect(struct musb *musb);
extern void musb_host_resume_root_hub(struct musb *musb);
extern void musb_host_poke_root_hub(struct musb *musb);
extern int musb_port_suspend(struct musb *musb, bool do_suspend);
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index 380ebc77eab1..ec28b5716796 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -37,6 +37,23 @@ TRACE_EVENT(musb_log,
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
+TRACE_EVENT(musb_state,
+ TP_PROTO(struct musb *musb, u8 devctl, const char *desc),
+ TP_ARGS(musb, devctl, desc),
+ TP_STRUCT__entry(
+ __string(name, dev_name(musb->controller))
+ __field(u8, devctl)
+ __string(desc, desc)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev_name(musb->controller));
+ __entry->devctl = devctl;
+ __assign_str(desc, desc);
+ ),
+ TP_printk("%s: devctl: %02x %s", __get_str(name), __entry->devctl,
+ __get_str(desc))
+);
+
DECLARE_EVENT_CLASS(musb_regb,
TP_PROTO(void *caller, const void __iomem *addr,
unsigned int offset, u8 data),
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 4232f1ce3fbf..f086960fe2b5 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -33,6 +33,9 @@ struct omap2430_glue {
enum musb_vbus_id_status status;
struct work_struct omap_musb_mailbox_work;
struct device *control_otghs;
+ unsigned int is_runtime_suspended:1;
+ unsigned int needs_resume:1;
+ unsigned int phy_suspended:1;
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
@@ -456,8 +459,12 @@ static int omap2430_runtime_suspend(struct device *dev)
omap2430_low_level_exit(musb);
- phy_power_off(musb->phy);
- phy_exit(musb->phy);
+ if (!glue->phy_suspended) {
+ phy_power_off(musb->phy);
+ phy_exit(musb->phy);
+ }
+
+ glue->is_runtime_suspended = 1;
return 0;
}
@@ -470,8 +477,10 @@ static int omap2430_runtime_resume(struct device *dev)
if (!musb)
return 0;
- phy_init(musb->phy);
- phy_power_on(musb->phy);
+ if (!glue->phy_suspended) {
+ phy_init(musb->phy);
+ phy_power_on(musb->phy);
+ }
omap2430_low_level_init(musb);
musb_writel(musb->mregs, OTG_INTERFSEL,
@@ -480,12 +489,68 @@ static int omap2430_runtime_resume(struct device *dev)
/* Wait for musb to get oriented. Otherwise we can get babble */
usleep_range(200000, 250000);
+ glue->is_runtime_suspended = 0;
+
+ return 0;
+}
+
+/* I2C and SPI PHYs need to be suspended before the glue layer */
+static int omap2430_suspend(struct device *dev)
+{
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+
+ phy_power_off(musb->phy);
+ phy_exit(musb->phy);
+ glue->phy_suspended = 1;
+
+ return 0;
+}
+
+/* Glue layer needs to be suspended after musb_suspend() */
+static int omap2430_suspend_late(struct device *dev)
+{
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+
+ if (glue->is_runtime_suspended)
+ return 0;
+
+ glue->needs_resume = 1;
+
+ return omap2430_runtime_suspend(dev);
+}
+
+static int omap2430_resume_early(struct device *dev)
+{
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+
+ if (!glue->needs_resume)
+ return 0;
+
+ glue->needs_resume = 0;
+
+ return omap2430_runtime_resume(dev);
+}
+
+static int omap2430_resume(struct device *dev)
+{
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+
+ phy_init(musb->phy);
+ phy_power_on(musb->phy);
+ glue->phy_suspended = 0;
+
return 0;
}
static const struct dev_pm_ops omap2430_pm_ops = {
.runtime_suspend = omap2430_runtime_suspend,
.runtime_resume = omap2430_runtime_resume,
+ .suspend = omap2430_suspend,
+ .suspend_late = omap2430_suspend_late,
+ .resume_early = omap2430_resume_early,
+ .resume = omap2430_resume,
};
#define DEV_PM_OPS (&omap2430_pm_ops)
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index 02bb7ddd4bd6..f3e9b3b6ac3e 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -555,7 +555,7 @@ pullup:
case OTG_STATE_A_PERIPHERAL:
if (otg_ctrl & OTG_PULLUP)
goto pullup;
- /* FALLTHROUGH */
+ fallthrough;
// case OTG_STATE_B_WAIT_ACON:
default:
pulldown:
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index 6cf6fbd39237..ad3d57f1c273 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -142,24 +142,17 @@ static struct i2c_driver isp1301_driver = {
module_i2c_driver(isp1301_driver);
-static int match(struct device *dev, const void *data)
-{
- const struct device_node *node = (const struct device_node *)data;
- return (dev->of_node == node) &&
- (dev->driver == &isp1301_driver.driver);
-}
-
struct i2c_client *isp1301_get_client(struct device_node *node)
{
- if (node) { /* reference of ISP1301 I2C node via DT */
- struct device *dev = bus_find_device(&i2c_bus_type, NULL,
- node, match);
- if (!dev)
- return NULL;
- return to_i2c_client(dev);
- } else { /* non-DT: only one ISP1301 chip supported */
- return isp1301_i2c_client;
- }
+ struct i2c_client *client;
+
+ /* reference of ISP1301 I2C node via DT */
+ client = of_find_i2c_device_by_node(node);
+ if (client)
+ return client;
+
+ /* non-DT: only one ISP1301 chip supported */
+ return isp1301_i2c_client;
}
EXPORT_SYMBOL_GPL(isp1301_get_client);
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index a48452a6172b..c0f432d509aa 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -58,12 +58,12 @@
#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
#define USB_PHY_VBUS_SENSORS 0x404
-#define B_SESS_VLD_WAKEUP_EN BIT(6)
-#define B_VBUS_VLD_WAKEUP_EN BIT(14)
+#define B_SESS_VLD_WAKEUP_EN BIT(14)
#define A_SESS_VLD_WAKEUP_EN BIT(22)
#define A_VBUS_VLD_WAKEUP_EN BIT(30)
#define USB_PHY_VBUS_WAKEUP_ID 0x408
+#define VBUS_WAKEUP_STS BIT(10)
#define VBUS_WAKEUP_WAKEUP_EN BIT(30)
#define USB1_LEGACY_CTRL 0x410
@@ -544,7 +544,7 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val = readl_relaxed(base + USB_PHY_VBUS_SENSORS);
val &= ~(A_VBUS_VLD_WAKEUP_EN | A_SESS_VLD_WAKEUP_EN);
- val &= ~(B_VBUS_VLD_WAKEUP_EN | B_SESS_VLD_WAKEUP_EN);
+ val &= ~(B_SESS_VLD_WAKEUP_EN);
writel_relaxed(val, base + USB_PHY_VBUS_SENSORS);
val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
@@ -642,6 +642,15 @@ static int utmi_phy_power_off(struct tegra_usb_phy *phy)
void __iomem *base = phy->regs;
u32 val;
+ /*
+ * Give hardware time to settle down after VBUS disconnection,
+ * otherwise PHY will immediately wake up from suspend.
+ */
+ if (phy->wakeup_enabled && phy->mode != USB_DR_MODE_HOST)
+ readl_relaxed_poll_timeout(base + USB_PHY_VBUS_WAKEUP_ID,
+ val, !(val & VBUS_WAKEUP_STS),
+ 5000, 100000);
+
utmi_phy_clk_disable(phy);
/* PHY won't resume if reset is asserted */
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index b47285f023cf..1b24492bb4e5 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -42,6 +42,12 @@ static const char *const usb_chger_type[] = {
[ACA_TYPE] = "USB_CHARGER_ACA_TYPE",
};
+static const char *const usb_chger_state[] = {
+ [USB_CHARGER_DEFAULT] = "USB_CHARGER_DEFAULT",
+ [USB_CHARGER_PRESENT] = "USB_CHARGER_PRESENT",
+ [USB_CHARGER_ABSENT] = "USB_CHARGER_ABSENT",
+};
+
static struct usb_phy *__usb_find_phy(struct list_head *list,
enum usb_phy_type type)
{
@@ -74,6 +80,18 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
return ERR_PTR(-EPROBE_DEFER);
}
+static struct usb_phy *__device_to_usb_phy(struct device *dev)
+{
+ struct usb_phy *usb_phy;
+
+ list_for_each_entry(usb_phy, &phy_list, head) {
+ if (usb_phy->dev == dev)
+ return usb_phy;
+ }
+
+ return NULL;
+}
+
static void usb_phy_set_default_current(struct usb_phy *usb_phy)
{
usb_phy->chg_cur.sdp_min = DEFAULT_SDP_CUR_MIN;
@@ -105,9 +123,6 @@ static void usb_phy_set_default_current(struct usb_phy *usb_phy)
static void usb_phy_notify_charger_work(struct work_struct *work)
{
struct usb_phy *usb_phy = container_of(work, struct usb_phy, chg_work);
- char uchger_state[50] = { 0 };
- char uchger_type[50] = { 0 };
- char *envp[] = { uchger_state, uchger_type, NULL };
unsigned int min, max;
switch (usb_phy->chg_state) {
@@ -115,15 +130,11 @@ static void usb_phy_notify_charger_work(struct work_struct *work)
usb_phy_get_charger_current(usb_phy, &min, &max);
atomic_notifier_call_chain(&usb_phy->notifier, max, usb_phy);
- snprintf(uchger_state, ARRAY_SIZE(uchger_state),
- "USB_CHARGER_STATE=%s", "USB_CHARGER_PRESENT");
break;
case USB_CHARGER_ABSENT:
usb_phy_set_default_current(usb_phy);
atomic_notifier_call_chain(&usb_phy->notifier, 0, usb_phy);
- snprintf(uchger_state, ARRAY_SIZE(uchger_state),
- "USB_CHARGER_STATE=%s", "USB_CHARGER_ABSENT");
break;
default:
dev_warn(usb_phy->dev, "Unknown USB charger state: %d\n",
@@ -131,9 +142,36 @@ static void usb_phy_notify_charger_work(struct work_struct *work)
return;
}
+ kobject_uevent(&usb_phy->dev->kobj, KOBJ_CHANGE);
+}
+
+static int usb_phy_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct usb_phy *usb_phy;
+ char uchger_state[50] = { 0 };
+ char uchger_type[50] = { 0 };
+ unsigned long flags;
+
+ spin_lock_irqsave(&phy_lock, flags);
+ usb_phy = __device_to_usb_phy(dev);
+ spin_unlock_irqrestore(&phy_lock, flags);
+
+ if (!usb_phy)
+ return -ENODEV;
+
+ snprintf(uchger_state, ARRAY_SIZE(uchger_state),
+ "USB_CHARGER_STATE=%s", usb_chger_state[usb_phy->chg_state]);
+
snprintf(uchger_type, ARRAY_SIZE(uchger_type),
"USB_CHARGER_TYPE=%s", usb_chger_type[usb_phy->chg_type]);
- kobject_uevent_env(&usb_phy->dev->kobj, KOBJ_CHANGE, envp);
+
+ if (add_uevent_var(env, uchger_state))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, uchger_type))
+ return -ENOMEM;
+
+ return 0;
}
static void __usb_phy_get_charger_type(struct usb_phy *usb_phy)
@@ -661,6 +699,11 @@ out:
}
EXPORT_SYMBOL_GPL(usb_add_phy);
+static struct device_type usb_phy_dev_type = {
+ .name = "usb_phy",
+ .uevent = usb_phy_uevent,
+};
+
/**
* usb_add_phy_dev - declare the USB PHY
* @x: the USB phy to be used; or NULL
@@ -684,6 +727,8 @@ int usb_add_phy_dev(struct usb_phy *x)
if (ret)
return ret;
+ x->dev->type = &usb_phy_dev_type;
+
ATOMIC_INIT_NOTIFIER_HEAD(&x->notifier);
spin_lock_irqsave(&phy_lock, flags);
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index b5e7991dc7d9..a3c2b01ccf7b 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -101,6 +101,8 @@ static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
#define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
#define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
+static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
+static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
@@ -123,6 +125,11 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
if (chan) {
dmaengine_terminate_all(chan);
usbhsf_dma_unmap(pkt);
+ } else {
+ if (usbhs_pipe_is_dir_in(pipe))
+ usbhsf_rx_irq_ctrl(pipe, 0);
+ else
+ usbhsf_tx_irq_ctrl(pipe, 0);
}
usbhs_pipe_clear_without_sequence(pipe, 0, 0);
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 33b637d0d8d9..dfaed7eee94f 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -214,6 +214,15 @@ static const char * const usb_roles[] = {
[USB_ROLE_DEVICE] = "device",
};
+const char *usb_role_string(enum usb_role role)
+{
+ if (role < 0 || role >= ARRAY_SIZE(usb_roles))
+ return "unknown";
+
+ return usb_roles[role];
+}
+EXPORT_SYMBOL_GPL(usb_role_string);
+
static ssize_t
role_show(struct device *dev, struct device_attribute *attr, char *buf)
{
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index ed9193f3bb1a..8107e4b5b03b 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -356,25 +356,7 @@ static void belkin_sa_set_termios(struct tty_struct *tty,
/* set the number of data bits */
if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
- switch (cflag & CSIZE) {
- case CS5:
- urb_value = BELKIN_SA_DATA_BITS(5);
- break;
- case CS6:
- urb_value = BELKIN_SA_DATA_BITS(6);
- break;
- case CS7:
- urb_value = BELKIN_SA_DATA_BITS(7);
- break;
- case CS8:
- urb_value = BELKIN_SA_DATA_BITS(8);
- break;
- default:
- dev_dbg(&port->dev,
- "CSIZE was not CS5-CS8, using default of 8\n");
- urb_value = BELKIN_SA_DATA_BITS(8);
- break;
- }
+ urb_value = BELKIN_SA_DATA_BITS(tty_get_char_size(cflag));
if (BSA_USB_CMD(BELKIN_SA_SET_DATA_BITS_REQUEST, urb_value) < 0)
dev_err(&port->dev, "Set data bits error\n");
}
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2db917eab799..8a521b5ea769 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -851,6 +851,7 @@ static struct usb_serial_driver ch341_device = {
.owner = THIS_MODULE,
.name = "ch341-uart",
},
+ .bulk_in_size = 512,
.id_table = id_table,
.num_ports = 1,
.open = ch341_open,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fcb812bc832c..3c80bfbf3bec 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
{ USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+ { USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
{ USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
@@ -202,8 +203,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
{ USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
- { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */
- { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */
+ { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */
+ { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */
{ USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
@@ -247,9 +248,9 @@ struct cp210x_serial_private {
#ifdef CONFIG_GPIOLIB
struct gpio_chip gc;
bool gpio_registered;
- u8 gpio_pushpull;
- u8 gpio_altfunc;
- u8 gpio_input;
+ u16 gpio_pushpull;
+ u16 gpio_altfunc;
+ u16 gpio_input;
#endif
u8 partnum;
u32 fw_version;
@@ -534,6 +535,48 @@ struct cp210x_single_port_config {
#define CP2104_GPIO1_RXLED_MODE BIT(1)
#define CP2104_GPIO2_RS485_MODE BIT(2)
+struct cp210x_quad_port_state {
+ __le16 gpio_mode_pb0;
+ __le16 gpio_mode_pb1;
+ __le16 gpio_mode_pb2;
+ __le16 gpio_mode_pb3;
+ __le16 gpio_mode_pb4;
+
+ __le16 gpio_lowpower_pb0;
+ __le16 gpio_lowpower_pb1;
+ __le16 gpio_lowpower_pb2;
+ __le16 gpio_lowpower_pb3;
+ __le16 gpio_lowpower_pb4;
+
+ __le16 gpio_latch_pb0;
+ __le16 gpio_latch_pb1;
+ __le16 gpio_latch_pb2;
+ __le16 gpio_latch_pb3;
+ __le16 gpio_latch_pb4;
+};
+
+/*
+ * CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0x49 bytes
+ * on a CP2108 chip.
+ *
+ * See https://www.silabs.com/documents/public/application-notes/an978-cp210x-usb-to-uart-api-specification.pdf
+ */
+struct cp210x_quad_port_config {
+ struct cp210x_quad_port_state reset_state;
+ struct cp210x_quad_port_state suspend_state;
+ u8 ipdelay_ifc[4];
+ u8 enhancedfxn_ifc[4];
+ u8 enhancedfxn_device;
+ u8 extclkfreq[4];
+} __packed;
+
+#define CP2108_EF_IFC_GPIO_TXLED 0x01
+#define CP2108_EF_IFC_GPIO_RXLED 0x02
+#define CP2108_EF_IFC_GPIO_RS485 0x04
+#define CP2108_EF_IFC_GPIO_RS485_LOGIC 0x08
+#define CP2108_EF_IFC_GPIO_CLOCK 0x10
+#define CP2108_EF_IFC_DYNAMIC_SUSPEND 0x40
+
/* CP2102N configuration array indices */
#define CP210X_2NCONFIG_CONFIG_VERSION_IDX 2
#define CP210X_2NCONFIG_GPIO_MODE_IDX 581
@@ -546,13 +589,25 @@ struct cp210x_single_port_config {
#define CP2102N_QFN20_GPIO1_RS485_MODE BIT(4)
#define CP2102N_QFN20_GPIO0_CLK_MODE BIT(6)
-/* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */
+/*
+ * CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x02 bytes
+ * for CP2102N, CP2103, CP2104 and CP2105.
+ */
struct cp210x_gpio_write {
u8 mask;
u8 state;
};
/*
+ * CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x04 bytes
+ * for CP2108.
+ */
+struct cp210x_gpio_write16 {
+ __le16 mask;
+ __le16 state;
+};
+
+/*
* Helper to get interface number when we only have struct usb_serial.
*/
static u8 cp210x_interface_num(struct usb_serial *serial)
@@ -1434,52 +1489,84 @@ static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct usb_serial *serial = gpiochip_get_data(gc);
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
- u8 req_type = REQTYPE_DEVICE_TO_HOST;
+ u8 req_type;
+ u16 mask;
int result;
- u8 buf;
-
- if (priv->partnum == CP210X_PARTNUM_CP2105)
- req_type = REQTYPE_INTERFACE_TO_HOST;
+ int len;
result = usb_autopm_get_interface(serial->interface);
if (result)
return result;
- result = cp210x_read_vendor_block(serial, req_type,
- CP210X_READ_LATCH, &buf, sizeof(buf));
+ switch (priv->partnum) {
+ case CP210X_PARTNUM_CP2105:
+ req_type = REQTYPE_INTERFACE_TO_HOST;
+ len = 1;
+ break;
+ case CP210X_PARTNUM_CP2108:
+ req_type = REQTYPE_INTERFACE_TO_HOST;
+ len = 2;
+ break;
+ default:
+ req_type = REQTYPE_DEVICE_TO_HOST;
+ len = 1;
+ break;
+ }
+
+ mask = 0;
+ result = cp210x_read_vendor_block(serial, req_type, CP210X_READ_LATCH,
+ &mask, len);
+
usb_autopm_put_interface(serial->interface);
+
if (result < 0)
return result;
- return !!(buf & BIT(gpio));
+ le16_to_cpus(&mask);
+
+ return !!(mask & BIT(gpio));
}
static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct usb_serial *serial = gpiochip_get_data(gc);
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ struct cp210x_gpio_write16 buf16;
struct cp210x_gpio_write buf;
+ u16 mask, state;
+ u16 wIndex;
int result;
if (value == 1)
- buf.state = BIT(gpio);
+ state = BIT(gpio);
else
- buf.state = 0;
+ state = 0;
- buf.mask = BIT(gpio);
+ mask = BIT(gpio);
result = usb_autopm_get_interface(serial->interface);
if (result)
goto out;
- if (priv->partnum == CP210X_PARTNUM_CP2105) {
+ switch (priv->partnum) {
+ case CP210X_PARTNUM_CP2105:
+ buf.mask = (u8)mask;
+ buf.state = (u8)state;
result = cp210x_write_vendor_block(serial,
REQTYPE_HOST_TO_INTERFACE,
CP210X_WRITE_LATCH, &buf,
sizeof(buf));
- } else {
- u16 wIndex = buf.state << 8 | buf.mask;
-
+ break;
+ case CP210X_PARTNUM_CP2108:
+ buf16.mask = cpu_to_le16(mask);
+ buf16.state = cpu_to_le16(state);
+ result = cp210x_write_vendor_block(serial,
+ REQTYPE_HOST_TO_INTERFACE,
+ CP210X_WRITE_LATCH, &buf16,
+ sizeof(buf16));
+ break;
+ default:
+ wIndex = state << 8 | mask;
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
CP210X_VENDOR_SPECIFIC,
@@ -1487,6 +1574,7 @@ static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
CP210X_WRITE_LATCH,
wIndex,
NULL, 0, USB_CTRL_SET_TIMEOUT);
+ break;
}
usb_autopm_put_interface(serial->interface);
@@ -1696,6 +1784,61 @@ static int cp2104_gpioconf_init(struct usb_serial *serial)
return 0;
}
+static int cp2108_gpio_init(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ struct cp210x_quad_port_config config;
+ u16 gpio_latch;
+ int result;
+ u8 i;
+
+ result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+ CP210X_GET_PORTCONFIG, &config,
+ sizeof(config));
+ if (result < 0)
+ return result;
+
+ priv->gc.ngpio = 16;
+ priv->gpio_pushpull = le16_to_cpu(config.reset_state.gpio_mode_pb1);
+ gpio_latch = le16_to_cpu(config.reset_state.gpio_latch_pb1);
+
+ /*
+ * Mark all pins which are not in GPIO mode.
+ *
+ * Refer to table 9.1 "GPIO Mode alternate Functions" in the datasheet:
+ * https://www.silabs.com/documents/public/data-sheets/cp2108-datasheet.pdf
+ *
+ * Alternate functions of GPIO0 to GPIO3 are determine by enhancedfxn_ifc[0]
+ * and the similarly for the other pins; enhancedfxn_ifc[1]: GPIO4 to GPIO7,
+ * enhancedfxn_ifc[2]: GPIO8 to GPIO11, enhancedfxn_ifc[3]: GPIO12 to GPIO15.
+ */
+ for (i = 0; i < 4; i++) {
+ if (config.enhancedfxn_ifc[i] & CP2108_EF_IFC_GPIO_TXLED)
+ priv->gpio_altfunc |= BIT(i * 4);
+ if (config.enhancedfxn_ifc[i] & CP2108_EF_IFC_GPIO_RXLED)
+ priv->gpio_altfunc |= BIT((i * 4) + 1);
+ if (config.enhancedfxn_ifc[i] & CP2108_EF_IFC_GPIO_RS485)
+ priv->gpio_altfunc |= BIT((i * 4) + 2);
+ if (config.enhancedfxn_ifc[i] & CP2108_EF_IFC_GPIO_CLOCK)
+ priv->gpio_altfunc |= BIT((i * 4) + 3);
+ }
+
+ /*
+ * Like CP2102N, CP2108 has also no strict input and output pin
+ * modes. Do the same input mode emulation as CP2102N.
+ */
+ for (i = 0; i < priv->gc.ngpio; ++i) {
+ /*
+ * Set direction to "input" iff pin is open-drain and reset
+ * value is 1.
+ */
+ if (!(priv->gpio_pushpull & BIT(i)) && (gpio_latch & BIT(i)))
+ priv->gpio_input |= BIT(i);
+ }
+
+ return 0;
+}
+
static int cp2102n_gpioconf_init(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
@@ -1812,6 +1955,15 @@ static int cp210x_gpio_init(struct usb_serial *serial)
case CP210X_PARTNUM_CP2105:
result = cp2105_gpioconf_init(serial);
break;
+ case CP210X_PARTNUM_CP2108:
+ /*
+ * The GPIOs are not tied to any specific port so only register
+ * once for interface 0.
+ */
+ if (cp210x_interface_num(serial) != 0)
+ return 0;
+ result = cp2108_gpio_init(serial);
+ break;
case CP210X_PARTNUM_CP2102N_QFN28:
case CP210X_PARTNUM_CP2102N_QFN24:
case CP210X_PARTNUM_CP2102N_QFN20:
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index cf389224d528..51e5aac3bf4c 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -53,7 +53,7 @@ static int cyberjack_open(struct tty_struct *tty,
static void cyberjack_close(struct usb_serial_port *port);
static int cyberjack_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count);
-static int cyberjack_write_room(struct tty_struct *tty);
+static unsigned int cyberjack_write_room(struct tty_struct *tty);
static void cyberjack_read_int_callback(struct urb *urb);
static void cyberjack_read_bulk_callback(struct urb *urb);
static void cyberjack_write_bulk_callback(struct urb *urb);
@@ -240,7 +240,7 @@ static int cyberjack_write(struct tty_struct *tty,
return count;
}
-static int cyberjack_write_room(struct tty_struct *tty)
+static unsigned int cyberjack_write_room(struct tty_struct *tty)
{
/* FIXME: .... */
return CYBERJACK_LOCAL_BUF_SIZE;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 166ee2286fda..6b18990258c3 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -122,14 +122,14 @@ static void cypress_dtr_rts(struct usb_serial_port *port, int on);
static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static void cypress_send(struct usb_serial_port *port);
-static int cypress_write_room(struct tty_struct *tty);
+static unsigned int cypress_write_room(struct tty_struct *tty);
static void cypress_earthmate_init_termios(struct tty_struct *tty);
static void cypress_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static int cypress_tiocmget(struct tty_struct *tty);
static int cypress_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
-static int cypress_chars_in_buffer(struct tty_struct *tty);
+static unsigned int cypress_chars_in_buffer(struct tty_struct *tty);
static void cypress_throttle(struct tty_struct *tty);
static void cypress_unthrottle(struct tty_struct *tty);
static void cypress_set_dead(struct usb_serial_port *port);
@@ -326,7 +326,7 @@ static int cypress_serial_control(struct tty_struct *tty,
/* fill the feature_buffer with new configuration */
put_unaligned_le32(new_baudrate, feature_buffer);
- feature_buffer[4] |= data_bits; /* assign data bits in 2 bit space ( max 3 ) */
+ feature_buffer[4] |= data_bits - 5; /* assign data bits in 2 bit space ( max 3 ) */
/* 1 bit gap */
feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */
feature_buffer[4] |= (parity_enable << 4); /* assign parity flag in 1 bit space */
@@ -789,18 +789,18 @@ send:
/* returns how much space is available in the soft buffer */
-static int cypress_write_room(struct tty_struct *tty)
+static unsigned int cypress_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct cypress_private *priv = usb_get_serial_port_data(port);
- int room = 0;
+ unsigned int room;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
room = kfifo_avail(&priv->write_fifo);
spin_unlock_irqrestore(&priv->lock, flags);
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, room);
return room;
}
@@ -887,23 +887,8 @@ static void cypress_set_termios(struct tty_struct *tty,
} else
parity_enable = parity_type = 0;
- switch (cflag & CSIZE) {
- case CS5:
- data_bits = 0;
- break;
- case CS6:
- data_bits = 1;
- break;
- case CS7:
- data_bits = 2;
- break;
- case CS8:
- data_bits = 3;
- break;
- default:
- dev_err(dev, "%s - CSIZE was set, but not CS5-CS8\n", __func__);
- data_bits = 3;
- }
+ data_bits = tty_get_char_size(cflag);
+
spin_lock_irqsave(&priv->lock, flags);
oldlines = priv->line_control;
if ((cflag & CBAUD) == B0) {
@@ -970,18 +955,18 @@ static void cypress_set_termios(struct tty_struct *tty,
/* returns amount of data still left in soft buffer */
-static int cypress_chars_in_buffer(struct tty_struct *tty)
+static unsigned int cypress_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct cypress_private *priv = usb_get_serial_port_data(port);
- int chars = 0;
+ unsigned int chars;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
chars = kfifo_len(&priv->write_fifo);
spin_unlock_irqrestore(&priv->lock, flags);
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars);
return chars;
}
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 8b2f06539f2c..af65eb863d70 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -223,8 +223,8 @@ static int digi_tiocmset(struct tty_struct *tty, unsigned int set,
static int digi_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static void digi_write_bulk_callback(struct urb *urb);
-static int digi_write_room(struct tty_struct *tty);
-static int digi_chars_in_buffer(struct tty_struct *tty);
+static unsigned int digi_write_room(struct tty_struct *tty);
+static unsigned int digi_chars_in_buffer(struct tty_struct *tty);
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
static void digi_close(struct usb_serial_port *port);
static void digi_dtr_rts(struct usb_serial_port *port, int on);
@@ -372,7 +372,7 @@ static int digi_write_oob_command(struct usb_serial_port *port,
int len;
struct usb_serial_port *oob_port = (struct usb_serial_port *)((struct digi_serial *)(usb_get_serial_data(port->serial)))->ds_oob_port;
struct digi_port *oob_priv = usb_get_serial_port_data(oob_port);
- unsigned long flags = 0;
+ unsigned long flags;
dev_dbg(&port->dev,
"digi_write_oob_command: TOP: port=%d, count=%d\n",
@@ -430,7 +430,7 @@ static int digi_write_inb_command(struct usb_serial_port *port,
int len;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned char *data = port->write_urb->transfer_buffer;
- unsigned long flags = 0;
+ unsigned long flags;
dev_dbg(&port->dev, "digi_write_inb_command: TOP: port=%d, count=%d\n",
priv->dp_port_num, count);
@@ -511,8 +511,7 @@ static int digi_set_modem_signals(struct usb_serial_port *port,
struct usb_serial_port *oob_port = (struct usb_serial_port *) ((struct digi_serial *)(usb_get_serial_data(port->serial)))->ds_oob_port;
struct digi_port *oob_priv = usb_get_serial_port_data(oob_port);
unsigned char *data = oob_port->write_urb->transfer_buffer;
- unsigned long flags = 0;
-
+ unsigned long flags;
dev_dbg(&port->dev,
"digi_set_modem_signals: TOP: port=%d, modem_signals=0x%x\n",
@@ -577,7 +576,7 @@ static int digi_transmit_idle(struct usb_serial_port *port,
int ret;
unsigned char buf[2];
struct digi_port *priv = usb_get_serial_port_data(port);
- unsigned long flags = 0;
+ unsigned long flags;
spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_transmit_idle = 0;
@@ -887,7 +886,7 @@ static int digi_write(struct tty_struct *tty, struct usb_serial_port *port,
int ret, data_len, new_len;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned char *data = port->write_urb->transfer_buffer;
- unsigned long flags = 0;
+ unsigned long flags;
dev_dbg(&port->dev, "digi_write: TOP: port=%d, count=%d\n",
priv->dp_port_num, count);
@@ -1020,12 +1019,12 @@ static void digi_write_bulk_callback(struct urb *urb)
tty_port_tty_wakeup(&port->port);
}
-static int digi_write_room(struct tty_struct *tty)
+static unsigned int digi_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
- int room;
- unsigned long flags = 0;
+ unsigned long flags;
+ unsigned int room;
spin_lock_irqsave(&priv->dp_port_lock, flags);
@@ -1035,27 +1034,28 @@ static int digi_write_room(struct tty_struct *tty)
room = port->bulk_out_size - 2 - priv->dp_out_buf_len;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
- dev_dbg(&port->dev, "digi_write_room: port=%d, room=%d\n", priv->dp_port_num, room);
+ dev_dbg(&port->dev, "digi_write_room: port=%d, room=%u\n", priv->dp_port_num, room);
return room;
}
-static int digi_chars_in_buffer(struct tty_struct *tty)
+static unsigned int digi_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ unsigned int chars;
- if (priv->dp_write_urb_in_use) {
- dev_dbg(&port->dev, "digi_chars_in_buffer: port=%d, chars=%d\n",
- priv->dp_port_num, port->bulk_out_size - 2);
- /* return(port->bulk_out_size - 2); */
- return 256;
- } else {
- dev_dbg(&port->dev, "digi_chars_in_buffer: port=%d, chars=%d\n",
- priv->dp_port_num, priv->dp_out_buf_len);
- return priv->dp_out_buf_len;
- }
+ spin_lock_irqsave(&priv->dp_port_lock, flags);
+ if (priv->dp_write_urb_in_use)
+ chars = port->bulk_out_size - 2;
+ else
+ chars = priv->dp_out_buf_len;
+ spin_unlock_irqrestore(&priv->dp_port_lock, flags);
+ dev_dbg(&port->dev, "%s: port=%d, chars=%d\n", __func__,
+ priv->dp_port_num, chars);
+ return chars;
}
static void digi_dtr_rts(struct usb_serial_port *port, int on)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4a1f3a95d017..33bbb3470ca3 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -219,6 +219,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) },
{ USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_AUTO_M3_OP_COM_V2_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) },
{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index add602bebd82..755858ca20ba 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -159,6 +159,9 @@
/* Vardaan Enterprises Serial Interface VEUSB422R3 */
#define FTDI_VARDAAN_PID 0xF070
+/* Auto-M3 Ltd. - OP-COM USB V2 - OBD interface Adapter */
+#define FTDI_AUTO_M3_OP_COM_V2_PID 0x4f50
+
/*
* Xsens Technologies BV products (http://www.xsens.com).
*/
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 50e8bdc77e71..756d1ac7e96f 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1113,7 +1113,7 @@ static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
}
-static int garmin_write_room(struct tty_struct *tty)
+static unsigned int garmin_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
/*
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index d10aa3d2ee49..15b6dee3a8e5 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -230,11 +230,11 @@ int usb_serial_generic_write(struct tty_struct *tty,
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write);
-int usb_serial_generic_write_room(struct tty_struct *tty)
+unsigned int usb_serial_generic_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned long flags;
- int room;
+ unsigned int room;
if (!port->bulk_out_size)
return 0;
@@ -243,15 +243,15 @@ int usb_serial_generic_write_room(struct tty_struct *tty)
room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, room);
return room;
}
-int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
+unsigned int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
unsigned long flags;
- int chars;
+ unsigned int chars;
if (!port->bulk_out_size)
return 0;
@@ -260,7 +260,7 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
chars = kfifo_len(&port->write_fifo) + port->tx_bytes;
spin_unlock_irqrestore(&port->lock, flags);
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars);
return chars;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_chars_in_buffer);
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index e6fe3882bf69..ea4edf5eed27 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1351,33 +1351,21 @@ exit_send:
/*****************************************************************************
* edge_write_room
* this function is called by the tty driver when it wants to know how
- * many bytes of data we can accept for a specific port. If successful,
- * we return the amount of room that we have for this port (the txCredits)
- * otherwise we return a negative error number.
+ * many bytes of data we can accept for a specific port.
*****************************************************************************/
-static int edge_write_room(struct tty_struct *tty)
+static unsigned int edge_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
- int room;
+ unsigned int room;
unsigned long flags;
- if (edge_port == NULL)
- return 0;
- if (edge_port->closePending)
- return 0;
-
- if (!edge_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return 0;
- }
-
/* total of both buffers is still txCredit */
spin_lock_irqsave(&edge_port->ep_lock, flags);
room = edge_port->txCredits - edge_port->txfifo.count;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, room);
return room;
}
@@ -1387,33 +1375,20 @@ static int edge_write_room(struct tty_struct *tty)
* this function is called by the tty driver when it wants to know how
* many bytes of data we currently have outstanding in the port (data that
* has been written, but hasn't made it out the port yet)
- * If successful, we return the number of bytes left to be written in the
- * system,
- * Otherwise we return a negative error number.
*****************************************************************************/
-static int edge_chars_in_buffer(struct tty_struct *tty)
+static unsigned int edge_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
- int num_chars;
+ unsigned int num_chars;
unsigned long flags;
- if (edge_port == NULL)
- return 0;
- if (edge_port->closePending)
- return 0;
-
- if (!edge_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return 0;
- }
-
spin_lock_irqsave(&edge_port->ep_lock, flags);
num_chars = edge_port->maxTxCredits - edge_port->txCredits +
edge_port->txfifo.count;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
if (num_chars) {
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, num_chars);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, num_chars);
}
return num_chars;
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 39503fdccebf..84b848d2794e 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2067,11 +2067,11 @@ static void edge_send(struct usb_serial_port *port, struct tty_struct *tty)
tty_wakeup(tty);
}
-static int edge_write_room(struct tty_struct *tty)
+static unsigned int edge_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
- int room = 0;
+ unsigned int room;
unsigned long flags;
if (edge_port == NULL)
@@ -2083,15 +2083,15 @@ static int edge_write_room(struct tty_struct *tty)
room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, room);
return room;
}
-static int edge_chars_in_buffer(struct tty_struct *tty)
+static unsigned int edge_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
- int chars = 0;
+ unsigned int chars;
unsigned long flags;
if (edge_port == NULL)
return 0;
@@ -2100,7 +2100,7 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
chars = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars);
return chars;
}
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 172261a908d8..7b44dbea95cd 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -47,7 +47,7 @@ static int xbof = -1;
static int ir_startup (struct usb_serial *serial);
static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
-static int ir_write_room(struct tty_struct *tty);
+static unsigned int ir_write_room(struct tty_struct *tty);
static void ir_write_bulk_callback(struct urb *urb);
static void ir_process_read_urb(struct urb *urb);
static void ir_set_termios(struct tty_struct *tty,
@@ -339,10 +339,10 @@ static void ir_write_bulk_callback(struct urb *urb)
usb_serial_port_softint(port);
}
-static int ir_write_room(struct tty_struct *tty)
+static unsigned int ir_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- int count = 0;
+ unsigned int count = 0;
if (port->bulk_out_size == 0)
return 0;
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index b04a029e3657..87b89c99d517 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1453,13 +1453,13 @@ static void usa67_glocont_callback(struct urb *urb)
}
}
-static int keyspan_write_room(struct tty_struct *tty)
+static unsigned int keyspan_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
int flip;
- int data_len;
+ unsigned int data_len;
struct urb *this_urb;
p_priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index a9bc546626ab..4ed8b8b0a361 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -53,7 +53,7 @@ static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
static void kobil_close(struct usb_serial_port *port);
static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
-static int kobil_write_room(struct tty_struct *tty);
+static unsigned int kobil_write_room(struct tty_struct *tty);
static int kobil_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
static int kobil_tiocmget(struct tty_struct *tty);
@@ -358,7 +358,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
}
-static int kobil_write_room(struct tty_struct *tty)
+static unsigned int kobil_write_room(struct tty_struct *tty)
{
/* FIXME */
return 8;
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index f9ce9e7b9b80..30ab565e0738 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -109,9 +109,9 @@ static void metrousb_read_int_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
+ unsigned long flags;
int throttled = 0;
int result = 0;
- unsigned long flags = 0;
dev_dbg(&port->dev, "%s\n", __func__);
@@ -171,7 +171,7 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
- unsigned long flags = 0;
+ unsigned long flags;
int result = 0;
/* Set the private data information for the port. */
@@ -268,7 +268,7 @@ static void metrousb_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
- unsigned long flags = 0;
+ unsigned long flags;
/* Set the private information for the port to stop reading data. */
spin_lock_irqsave(&metro_priv->lock, flags);
@@ -281,7 +281,7 @@ static int metrousb_tiocmget(struct tty_struct *tty)
unsigned long control_state = 0;
struct usb_serial_port *port = tty->driver_data;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
- unsigned long flags = 0;
+ unsigned long flags;
spin_lock_irqsave(&metro_priv->lock, flags);
control_state = metro_priv->control_state;
@@ -296,7 +296,7 @@ static int metrousb_tiocmset(struct tty_struct *tty,
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
- unsigned long flags = 0;
+ unsigned long flags;
unsigned long control_state = 0;
dev_dbg(&port->dev, "%s - set=%d, clear=%d\n", __func__, set, clear);
@@ -323,7 +323,7 @@ static void metrousb_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct metrousb_private *metro_priv = usb_get_serial_port_data(port);
- unsigned long flags = 0;
+ unsigned long flags;
int result = 0;
/* Set the private information for the port to resume reading data. */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 6ee83886e2c9..227f43d2bd56 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -945,27 +945,20 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
* this function is called by the tty driver when it wants to know how many
* bytes of data we currently have outstanding in the port (data that has
* been written, but hasn't made it out the port yet)
- * If successful, we return the number of bytes left to be written in the
- * system,
- * Otherwise we return a negative error number.
*/
-static int mos7720_chars_in_buffer(struct tty_struct *tty)
+static unsigned int mos7720_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7720_port = usb_get_serial_port_data(port);
int i;
- int chars = 0;
- struct moschip_port *mos7720_port;
-
- mos7720_port = usb_get_serial_port_data(port);
- if (mos7720_port == NULL)
- return 0;
+ unsigned int chars = 0;
for (i = 0; i < NUM_URBS; ++i) {
if (mos7720_port->write_urb_pool[i] &&
mos7720_port->write_urb_pool[i]->status == -EINPROGRESS)
chars += URB_TRANSFER_BUFFER_SIZE;
}
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars);
return chars;
}
@@ -1030,20 +1023,14 @@ static void mos7720_break(struct tty_struct *tty, int break_state)
* mos7720_write_room
* this function is called by the tty driver when it wants to know how many
* bytes of data we can accept for a specific port.
- * If successful, we return the amount of room that we have for this port
- * Otherwise we return a negative error number.
*/
-static int mos7720_write_room(struct tty_struct *tty)
+static unsigned int mos7720_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7720_port;
- int room = 0;
+ struct moschip_port *mos7720_port = usb_get_serial_port_data(port);
+ unsigned int room = 0;
int i;
- mos7720_port = usb_get_serial_port_data(port);
- if (mos7720_port == NULL)
- return 0;
-
/* FIXME: Locking */
for (i = 0; i < NUM_URBS; ++i) {
if (mos7720_port->write_urb_pool[i] &&
@@ -1051,7 +1038,7 @@ static int mos7720_write_room(struct tty_struct *tty)
room += URB_TRANSFER_BUFFER_SIZE;
}
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, room);
return room;
}
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 28e4093794e0..d7fe33ca73e4 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -730,17 +730,14 @@ err:
* this function is called by the tty driver when it wants to know how many
* bytes of data we currently have outstanding in the port (data that has
* been written, but hasn't made it out the port yet)
- * If successful, we return the number of bytes left to be written in the
- * system,
- * Otherwise we return zero.
*****************************************************************************/
-static int mos7840_chars_in_buffer(struct tty_struct *tty)
+static unsigned int mos7840_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int i;
- int chars = 0;
+ unsigned int chars = 0;
unsigned long flags;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
@@ -751,7 +748,7 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
}
}
spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars);
return chars;
}
@@ -814,16 +811,14 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
* mos7840_write_room
* this function is called by the tty driver when it wants to know how many
* bytes of data we can accept for a specific port.
- * If successful, we return the amount of room that we have for this port
- * Otherwise we return a negative error number.
*****************************************************************************/
-static int mos7840_write_room(struct tty_struct *tty)
+static unsigned int mos7840_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int i;
- int room = 0;
+ unsigned int room = 0;
unsigned long flags;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
@@ -834,7 +829,7 @@ static int mos7840_write_room(struct tty_struct *tty)
spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
room = (room == 0) ? 0 : room - URB_TRANSFER_BUFFER_SIZE + 1;
- dev_dbg(&mos7840_port->port->dev, "%s - returns %d\n", __func__, room);
+ dev_dbg(&mos7840_port->port->dev, "%s - returns %u\n", __func__, room);
return room;
}
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 40c713fae0c3..aed28c35caff 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -267,7 +267,7 @@ error_no_buffer:
return ret;
}
-static int opticon_write_room(struct tty_struct *tty)
+static unsigned int opticon_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct opticon_private *priv = usb_get_serial_port_data(port);
@@ -289,12 +289,12 @@ static int opticon_write_room(struct tty_struct *tty)
return 2048;
}
-static int opticon_chars_in_buffer(struct tty_struct *tty)
+static unsigned int opticon_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct opticon_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- int count;
+ unsigned int count;
spin_lock_irqsave(&priv->lock, flags);
count = priv->outstanding_bytes;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7608584ef4fe..039450069ca4 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -238,6 +238,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_UC15 0x9090
/* These u-blox products use Qualcomm's vendor ID */
#define UBLOX_PRODUCT_R410M 0x90b2
+#define UBLOX_PRODUCT_R6XX 0x90fa
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
@@ -1101,6 +1102,8 @@ static const struct usb_device_id option_ids[] = {
/* u-blox products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
.driver_info = RSVD(1) | RSVD(3) },
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
+ .driver_info = RSVD(3) },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
@@ -1200,6 +1203,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff), /* Telit FN980 (PCIe) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */
+ .driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 65cd0341fa78..a5caedbe72e2 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -126,8 +126,8 @@ static void oti6858_read_bulk_callback(struct urb *urb);
static void oti6858_write_bulk_callback(struct urb *urb);
static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
-static int oti6858_write_room(struct tty_struct *tty);
-static int oti6858_chars_in_buffer(struct tty_struct *tty);
+static unsigned int oti6858_write_room(struct tty_struct *tty);
+static unsigned int oti6858_chars_in_buffer(struct tty_struct *tty);
static int oti6858_tiocmget(struct tty_struct *tty);
static int oti6858_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
@@ -363,10 +363,10 @@ static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
return count;
}
-static int oti6858_write_room(struct tty_struct *tty)
+static unsigned int oti6858_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- int room = 0;
+ unsigned int room;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
@@ -376,10 +376,10 @@ static int oti6858_write_room(struct tty_struct *tty)
return room;
}
-static int oti6858_chars_in_buffer(struct tty_struct *tty)
+static unsigned int oti6858_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- int chars = 0;
+ unsigned int chars;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 940050c31482..930b3d50a330 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -418,24 +418,34 @@ static int pl2303_detect_type(struct usb_serial *serial)
bcdDevice = le16_to_cpu(desc->bcdDevice);
bcdUSB = le16_to_cpu(desc->bcdUSB);
- switch (bcdDevice) {
- case 0x100:
- /*
- * Assume it's an HXN-type if the device doesn't support the old read
- * request value.
- */
- if (bcdUSB == 0x200 && !pl2303_supports_hx_status(serial))
- return TYPE_HXN;
+ switch (bcdUSB) {
+ case 0x110:
+ switch (bcdDevice) {
+ case 0x300:
+ return TYPE_HX;
+ case 0x400:
+ return TYPE_HXD;
+ default:
+ return TYPE_HX;
+ }
break;
- case 0x300:
- if (bcdUSB == 0x200)
+ case 0x200:
+ switch (bcdDevice) {
+ case 0x100:
+ case 0x305:
+ /*
+ * Assume it's an HXN-type if the device doesn't
+ * support the old read request value.
+ */
+ if (!pl2303_supports_hx_status(serial))
+ return TYPE_HXN;
+ break;
+ case 0x300:
return TYPE_TA;
-
- return TYPE_HX;
- case 0x400:
- return TYPE_HXD;
- case 0x500:
- return TYPE_TB;
+ case 0x500:
+ return TYPE_TB;
+ }
+ break;
}
dev_err(&serial->interface->dev,
@@ -789,20 +799,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
pl2303_get_line_request(port, buf);
- switch (C_CSIZE(tty)) {
- case CS5:
- buf[6] = 5;
- break;
- case CS6:
- buf[6] = 6;
- break;
- case CS7:
- buf[6] = 7;
- break;
- default:
- case CS8:
- buf[6] = 8;
- }
+ buf[6] = tty_get_char_size(tty->termios.c_cflag);
dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
/* For reference buf[0]:buf[3] baud rate value */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 067690dac24c..971907f083a3 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -870,12 +870,12 @@ static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch)
}
-static int qt2_write_room(struct tty_struct *tty)
+static unsigned int qt2_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct qt2_port_private *port_priv;
- unsigned long flags = 0;
- int r;
+ unsigned long flags;
+ unsigned int r;
port_priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 54e16ffc30a0..4b49b7c33364 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -613,7 +613,7 @@ static void sierra_instat_callback(struct urb *urb)
}
}
-static int sierra_write_room(struct tty_struct *tty)
+static unsigned int sierra_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
@@ -632,19 +632,19 @@ static int sierra_write_room(struct tty_struct *tty)
return 2048;
}
-static int sierra_chars_in_buffer(struct tty_struct *tty)
+static unsigned int sierra_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
unsigned long flags;
- int chars;
+ unsigned int chars;
/* NOTE: This overcounts somewhat. */
spin_lock_irqsave(&portdata->lock, flags);
chars = portdata->outstanding_urbs * MAX_TRANSFER;
spin_unlock_irqrestore(&portdata->lock, flags);
- dev_dbg(&port->dev, "%s - %d\n", __func__, chars);
+ dev_dbg(&port->dev, "%s - %u\n", __func__, chars);
return chars;
}
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 310db5abea9d..18c0bd853392 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -308,8 +308,8 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port);
static void ti_close(struct usb_serial_port *port);
static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count);
-static int ti_write_room(struct tty_struct *tty);
-static int ti_chars_in_buffer(struct tty_struct *tty);
+static unsigned int ti_write_room(struct tty_struct *tty);
+static unsigned int ti_chars_in_buffer(struct tty_struct *tty);
static bool ti_tx_empty(struct usb_serial_port *port);
static void ti_throttle(struct tty_struct *tty);
static void ti_unthrottle(struct tty_struct *tty);
@@ -813,34 +813,34 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
}
-static int ti_write_room(struct tty_struct *tty)
+static unsigned int ti_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
- int room = 0;
+ unsigned int room;
unsigned long flags;
spin_lock_irqsave(&tport->tp_lock, flags);
room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, room);
return room;
}
-static int ti_chars_in_buffer(struct tty_struct *tty)
+static unsigned int ti_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
- int chars = 0;
+ unsigned int chars;
unsigned long flags;
spin_lock_irqsave(&tport->tp_lock, flags);
chars = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
+ dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars);
return chars;
}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 98b33b1b5357..eeb441c77207 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -376,7 +376,7 @@ exit:
return retval;
}
-static int serial_write_room(struct tty_struct *tty)
+static unsigned int serial_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
@@ -385,7 +385,7 @@ static int serial_write_room(struct tty_struct *tty)
return port->serial->type->write_room(tty);
}
-static int serial_chars_in_buffer(struct tty_struct *tty)
+static unsigned int serial_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index b5331d03092f..519101945769 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -11,13 +11,13 @@ extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port);
extern void usb_wwan_close(struct usb_serial_port *port);
extern int usb_wwan_port_probe(struct usb_serial_port *port);
extern void usb_wwan_port_remove(struct usb_serial_port *port);
-extern int usb_wwan_write_room(struct tty_struct *tty);
+extern unsigned int usb_wwan_write_room(struct tty_struct *tty);
extern int usb_wwan_tiocmget(struct tty_struct *tty);
extern int usb_wwan_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
-extern int usb_wwan_chars_in_buffer(struct tty_struct *tty);
+extern unsigned int usb_wwan_chars_in_buffer(struct tty_struct *tty);
#ifdef CONFIG_PM
extern int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message);
extern int usb_wwan_resume(struct usb_serial *serial);
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 3eb72c59ede6..cb01283d4d15 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -278,12 +278,12 @@ static void usb_wwan_outdat_callback(struct urb *urb)
}
}
-int usb_wwan_write_room(struct tty_struct *tty)
+unsigned int usb_wwan_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_wwan_port_private *portdata;
int i;
- int data_len = 0;
+ unsigned int data_len = 0;
struct urb *this_urb;
portdata = usb_get_serial_port_data(port);
@@ -294,17 +294,17 @@ int usb_wwan_write_room(struct tty_struct *tty)
data_len += OUT_BUFLEN;
}
- dev_dbg(&port->dev, "%s: %d\n", __func__, data_len);
+ dev_dbg(&port->dev, "%s: %u\n", __func__, data_len);
return data_len;
}
EXPORT_SYMBOL(usb_wwan_write_room);
-int usb_wwan_chars_in_buffer(struct tty_struct *tty)
+unsigned int usb_wwan_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_wwan_port_private *portdata;
int i;
- int data_len = 0;
+ unsigned int data_len = 0;
struct urb *this_urb;
portdata = usb_get_serial_port_data(port);
@@ -316,7 +316,7 @@ int usb_wwan_chars_in_buffer(struct tty_struct *tty)
if (this_urb && test_bit(i, &portdata->out_busy))
data_len += this_urb->transfer_buffer_length;
}
- dev_dbg(&port->dev, "%s: %d\n", __func__, data_len);
+ dev_dbg(&port->dev, "%s: %u\n", __func__, data_len);
return data_len;
}
EXPORT_SYMBOL(usb_wwan_chars_in_buffer);
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5116ed9db3eb..da65d14c9ed5 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -625,14 +625,7 @@ static void firm_setup_port(struct tty_struct *tty)
port_settings.port = port->port_number + 1;
- /* get the byte size */
- switch (cflag & CSIZE) {
- case CS5: port_settings.bits = 5; break;
- case CS6: port_settings.bits = 6; break;
- case CS7: port_settings.bits = 7; break;
- default:
- case CS8: port_settings.bits = 8; break;
- }
+ port_settings.bits = tty_get_char_size(cflag);
dev_dbg(dev, "%s - data bits = %d\n", __func__, port_settings.bits);
/* determine the parity */
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index a6f3267bbef6..2f7093ba5a2f 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -221,11 +221,11 @@ static void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
desc[12] = regs[6]; /* device */
desc[13] = regs[7]; /* command */
- srb->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ srb->result = SAM_STAT_CHECK_CONDITION;
}
goto end;
invalid_fld:
- srb->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
usb_stor_sense_invalidCDB,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index f9677a5ec31b..c35a6db993f1 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -45,6 +45,13 @@ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+/* Reported-by: Julian Sikorski <belegdol@gmail.com> */
+UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
+ "LaCie",
+ "Rugged USB3-FW",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others?
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index b9429c9f65f6..aeef453aa658 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -517,8 +517,10 @@ typec_register_altmode(struct device *parent,
int ret;
alt = kzalloc(sizeof(*alt), GFP_KERNEL);
- if (!alt)
+ if (!alt) {
+ altmode_id_remove(parent, id);
return ERR_PTR(-ENOMEM);
+ }
alt->adev.svid = desc->svid;
alt->adev.mode = desc->mode;
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 77dabd306ba8..c8340de0ed49 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -17,21 +17,12 @@
#include "class.h"
#include "mux.h"
-static bool dev_name_ends_with(struct device *dev, const char *suffix)
-{
- const char *name = dev_name(dev);
- const int name_len = strlen(name);
- const int suffix_len = strlen(suffix);
-
- if (suffix_len > name_len)
- return false;
-
- return strcmp(name + (name_len - suffix_len), suffix) == 0;
-}
-
static int switch_fwnode_match(struct device *dev, const void *fwnode)
{
- return dev_fwnode(dev) == fwnode && dev_name_ends_with(dev, "-switch");
+ if (!is_typec_switch(dev))
+ return 0;
+
+ return dev_fwnode(dev) == fwnode;
}
static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id,
@@ -39,9 +30,22 @@ static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id,
{
struct device *dev;
+ /*
+ * Device graph (OF graph) does not give any means to identify the
+ * device type or the device class of the remote port parent that @fwnode
+ * represents, so in order to identify the type or the class of @fwnode
+ * an additional device property is needed. With typec switches the
+ * property is named "orientation-switch" (@id). The value of the device
+ * property is ignored.
+ */
if (id && !fwnode_property_present(fwnode, id))
return NULL;
+ /*
+ * At this point we are sure that @fwnode is a typec switch in all
+ * cases. If the switch hasn't yet been registered for some reason, the
+ * function "defers probe" for now.
+ */
dev = class_find_device(&typec_mux_class, NULL, fwnode,
switch_fwnode_match);
@@ -90,7 +94,7 @@ static void typec_switch_release(struct device *dev)
kfree(to_typec_switch(dev));
}
-static const struct device_type typec_switch_dev_type = {
+const struct device_type typec_switch_dev_type = {
.name = "orientation_switch",
.release = typec_switch_release,
};
@@ -180,7 +184,10 @@ EXPORT_SYMBOL_GPL(typec_switch_get_drvdata);
static int mux_fwnode_match(struct device *dev, const void *fwnode)
{
- return dev_fwnode(dev) == fwnode && dev_name_ends_with(dev, "-mux");
+ if (!is_typec_mux(dev))
+ return 0;
+
+ return dev_fwnode(dev) == fwnode;
}
static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
@@ -295,7 +302,7 @@ static void typec_mux_release(struct device *dev)
kfree(to_typec_mux(dev));
}
-static const struct device_type typec_mux_dev_type = {
+const struct device_type typec_mux_dev_type = {
.name = "mode_switch",
.release = typec_mux_release,
};
diff --git a/drivers/usb/typec/mux.h b/drivers/usb/typec/mux.h
index 4fd9426ee44f..b1d6e837cb74 100644
--- a/drivers/usb/typec/mux.h
+++ b/drivers/usb/typec/mux.h
@@ -18,4 +18,10 @@ struct typec_mux {
#define to_typec_switch(_dev_) container_of(_dev_, struct typec_switch, dev)
#define to_typec_mux(_dev_) container_of(_dev_, struct typec_mux, dev)
+extern const struct device_type typec_switch_dev_type;
+extern const struct device_type typec_mux_dev_type;
+
+#define is_typec_switch(dev) ((dev)->type == &typec_switch_dev_type)
+#define is_typec_mux(dev) ((dev)->type == &typec_mux_dev_type)
+
#endif /* __USB_TYPEC_MUX__ */
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index ffa8aa12d5f1..2cdd22130834 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -83,8 +83,6 @@ enum {
/*
* Input Output Manager (IOM) PORT STATUS
*/
-#define IOM_PORT_STATUS_OFFSET 0x560
-
#define IOM_PORT_STATUS_ACTIVITY_TYPE_MASK GENMASK(9, 6)
#define IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT 6
#define IOM_PORT_STATUS_ACTIVITY_TYPE_USB 0x03
@@ -144,6 +142,7 @@ struct pmc_usb {
struct pmc_usb_port *port;
struct acpi_device *iom_adev;
void __iomem *iom_base;
+ u32 iom_port_status_offset;
};
static void update_port_status(struct pmc_usb_port *port)
@@ -153,7 +152,8 @@ static void update_port_status(struct pmc_usb_port *port)
/* SoC expects the USB Type-C port numbers to start with 0 */
port_num = port->usb3_port - 1;
- port->iom_status = readl(port->pmc->iom_base + IOM_PORT_STATUS_OFFSET +
+ port->iom_status = readl(port->pmc->iom_base +
+ port->pmc->iom_port_status_offset +
port_num * sizeof(u32));
}
@@ -559,14 +559,32 @@ static int is_memory(struct acpi_resource *res, void *data)
return !acpi_dev_resource_memory(res, &r);
}
+/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */
+static const struct acpi_device_id iom_acpi_ids[] = {
+ /* TigerLake */
+ { "INTC1072", 0x560, },
+
+ /* AlderLake */
+ { "INTC1079", 0x160, },
+ {}
+};
+
static int pmc_usb_probe_iom(struct pmc_usb *pmc)
{
struct list_head resource_list;
struct resource_entry *rentry;
- struct acpi_device *adev;
+ static const struct acpi_device_id *dev_id;
+ struct acpi_device *adev = NULL;
int ret;
- adev = acpi_dev_get_first_match_dev("INTC1072", NULL, -1);
+ for (dev_id = &iom_acpi_ids[0]; dev_id->id[0]; dev_id++) {
+ if (acpi_dev_present(dev_id->id, NULL, -1)) {
+ pmc->iom_port_status_offset = (u32)dev_id->driver_data;
+ adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1);
+ break;
+ }
+ }
+
if (!adev)
return -ENODEV;
diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
index 6eaeba9b096e..e7745d1c2a5c 100644
--- a/drivers/usb/typec/stusb160x.c
+++ b/drivers/usb/typec/stusb160x.c
@@ -686,6 +686,15 @@ static int stusb160x_probe(struct i2c_client *client)
return -ENODEV;
/*
+ * This fwnode has a "compatible" property, but is never populated as a
+ * struct device. Instead we simply parse it to read the properties.
+ * This it breaks fw_devlink=on. To maintain backward compatibility
+ * with existing DT files, we work around this by deleting any
+ * fwnode_links to/from this fwnode.
+ */
+ fw_devlink_purge_absent_suppliers(fwnode);
+
+ /*
* When both VDD and VSYS power supplies are present, the low power
* supply VSYS is selected when VSYS voltage is above 3.1 V.
* Otherwise VDD is selected.
@@ -739,10 +748,6 @@ static int stusb160x_probe(struct i2c_client *client)
typec_set_pwr_opmode(chip->port, chip->pwr_opmode);
if (client->irq) {
- ret = stusb160x_irq_init(chip, client->irq);
- if (ret)
- goto port_unregister;
-
chip->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(chip->role_sw)) {
ret = PTR_ERR(chip->role_sw);
@@ -752,6 +757,10 @@ static int stusb160x_probe(struct i2c_client *client)
ret);
goto port_unregister;
}
+
+ ret = stusb160x_irq_init(chip, client->irq);
+ if (ret)
+ goto role_sw_put;
} else {
/*
* If Source or Dual power role, need to enable VDD supply
@@ -775,6 +784,9 @@ static int stusb160x_probe(struct i2c_client *client)
return 0;
+role_sw_put:
+ if (chip->role_sw)
+ usb_role_switch_put(chip->role_sw);
port_unregister:
typec_unregister_port(chip->port);
all_reg_disable:
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 25b480752266..9858716698df 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -21,8 +21,12 @@
#define PD_RETRY_COUNT_DEFAULT 3
#define PD_RETRY_COUNT_3_0_OR_HIGHER 2
#define AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV 3500
-#define AUTO_DISCHARGE_PD_HEADROOM_MV 850
-#define AUTO_DISCHARGE_PPS_HEADROOM_MV 1250
+#define VSINKPD_MIN_IR_DROP_MV 750
+#define VSRC_NEW_MIN_PERCENT 95
+#define VSRC_VALID_MIN_MV 500
+#define VPPS_NEW_MIN_PERCENT 95
+#define VPPS_VALID_MIN_MV 100
+#define VSINKDISCONNECT_PD_MIN_PERCENT 90
#define tcpc_presenting_rd(reg, cc) \
(!(TCPC_ROLE_CTRL_DRP & (reg)) && \
@@ -115,6 +119,33 @@ static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
return 0;
}
+static int tcpci_apply_rc(struct tcpc_dev *tcpc, enum typec_cc_status cc,
+ enum typec_cc_polarity polarity)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &reg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * APPLY_RC state is when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2 and vbus autodischarge on
+ * disconnect is disabled. Bail out when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2.
+ */
+ if (((reg & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) >>
+ TCPC_ROLE_CTRL_CC2_SHIFT) !=
+ ((reg & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) >>
+ TCPC_ROLE_CTRL_CC1_SHIFT))
+ return 0;
+
+ return regmap_update_bits(tcpci->regmap, TCPC_ROLE_CTRL, polarity == TYPEC_POLARITY_CC1 ?
+ TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT :
+ TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT,
+ TCPC_ROLE_CTRL_CC_OPEN);
+}
+
static int tcpci_start_toggling(struct tcpc_dev *tcpc,
enum typec_port_type port_type,
enum typec_cc_status cc)
@@ -324,11 +355,13 @@ static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum ty
threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
} else if (mode == TYPEC_PWR_MODE_PD) {
if (pps_active)
- threshold = (95 * requested_vbus_voltage_mv / 100) -
- AUTO_DISCHARGE_PD_HEADROOM_MV;
+ threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
+ VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) *
+ VSINKDISCONNECT_PD_MIN_PERCENT / 100;
else
- threshold = (95 * requested_vbus_voltage_mv / 100) -
- AUTO_DISCHARGE_PPS_HEADROOM_MV;
+ threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
+ VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
+ VSINKDISCONNECT_PD_MIN_PERCENT / 100;
} else {
/* 3.5V for non-pd sink */
threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
@@ -728,6 +761,7 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
tcpci->tcpc.get_vbus = tcpci_get_vbus;
tcpci->tcpc.set_vbus = tcpci_set_vbus;
tcpci->tcpc.set_cc = tcpci_set_cc;
+ tcpci->tcpc.apply_rc = tcpci_apply_rc;
tcpci->tcpc.get_cc = tcpci_get_cc;
tcpci->tcpc.set_polarity = tcpci_set_polarity;
tcpci->tcpc.set_vconn = tcpci_set_vconn;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 63470cf7f4cd..b9bb63d749ec 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -776,6 +776,34 @@ static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
port->tcpc->set_cc(port->tcpc, cc);
}
+static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
+{
+ int ret = 0;
+
+ if (port->tcpc->enable_auto_vbus_discharge) {
+ ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
+ tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable",
+ ret);
+ if (!ret)
+ port->auto_vbus_discharge_enabled = enable;
+ }
+
+ return ret;
+}
+
+static void tcpm_apply_rc(struct tcpm_port *port)
+{
+ /*
+ * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
+ * when Vbus auto discharge on disconnect is enabled.
+ */
+ if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
+ tcpm_log(port, "Apply_RC");
+ port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
+ tcpm_enable_auto_vbus_discharge(port, false);
+ }
+}
+
/*
* Determine RP value to set based on maximum current supported
* by a port if configured as source.
@@ -2576,6 +2604,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
} else {
next_state = SNK_WAIT_CAPABILITIES;
}
+
+ /* Threshold was relaxed before sending Request. Restore it back. */
+ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
+ port->pps_data.active,
+ port->supply_voltage);
tcpm_set_state(port, next_state, 0);
break;
case SNK_NEGOTIATE_PPS_CAPABILITIES:
@@ -2589,6 +2622,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
port->send_discover)
port->vdm_sm_running = true;
+ /* Threshold was relaxed before sending Request. Restore it back. */
+ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
+ port->pps_data.active,
+ port->supply_voltage);
+
tcpm_set_state(port, SNK_READY, 0);
break;
case DR_SWAP_SEND:
@@ -3308,6 +3346,12 @@ static int tcpm_pd_send_request(struct tcpm_port *port)
if (ret < 0)
return ret;
+ /*
+ * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
+ * It is safer to modify the threshold here.
+ */
+ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
+
memset(&msg, 0, sizeof(msg));
msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
port->pwr_role,
@@ -3405,6 +3449,9 @@ static int tcpm_pd_send_pps_request(struct tcpm_port *port)
if (ret < 0)
return ret;
+ /* Relax the threshold as voltage will be adjusted right after Accept Message. */
+ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
+
memset(&msg, 0, sizeof(msg));
msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
port->pwr_role,
@@ -3515,12 +3562,7 @@ static int tcpm_src_attach(struct tcpm_port *port)
if (ret < 0)
return ret;
- if (port->tcpc->enable_auto_vbus_discharge) {
- ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
- tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
- if (!ret)
- port->auto_vbus_discharge_enabled = true;
- }
+ tcpm_enable_auto_vbus_discharge(port, true);
ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
if (ret < 0)
@@ -3597,14 +3639,7 @@ static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capab
static void tcpm_reset_port(struct tcpm_port *port)
{
- int ret;
-
- if (port->tcpc->enable_auto_vbus_discharge) {
- ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, false);
- tcpm_log_force(port, "Disable vbus discharge ret:%d", ret);
- if (!ret)
- port->auto_vbus_discharge_enabled = false;
- }
+ tcpm_enable_auto_vbus_discharge(port, false);
port->in_ams = false;
port->ams = NONE_AMS;
port->vdm_sm_running = false;
@@ -3672,13 +3707,7 @@ static int tcpm_snk_attach(struct tcpm_port *port)
if (ret < 0)
return ret;
- if (port->tcpc->enable_auto_vbus_discharge) {
- tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
- ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
- tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
- if (!ret)
- port->auto_vbus_discharge_enabled = true;
- }
+ tcpm_enable_auto_vbus_discharge(port, true);
ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
if (ret < 0)
@@ -4186,6 +4215,10 @@ static void run_state_machine(struct tcpm_port *port)
port->hard_reset_count = 0;
ret = tcpm_pd_send_request(port);
if (ret < 0) {
+ /* Restore back to the original state */
+ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
+ port->pps_data.active,
+ port->supply_voltage);
/* Let the Source send capabilities again. */
tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
} else {
@@ -4196,6 +4229,10 @@ static void run_state_machine(struct tcpm_port *port)
case SNK_NEGOTIATE_PPS_CAPABILITIES:
ret = tcpm_pd_send_pps_request(port);
if (ret < 0) {
+ /* Restore back to the original state */
+ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
+ port->pps_data.active,
+ port->supply_voltage);
port->pps_status = ret;
/*
* If this was called due to updates to sink
@@ -4515,6 +4552,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, ready_state(port), 0);
break;
case PR_SWAP_START:
+ tcpm_apply_rc(port);
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
PD_T_SRC_TRANSITION);
@@ -4554,6 +4592,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
break;
case PR_SWAP_SRC_SNK_SINK_ON:
+ tcpm_enable_auto_vbus_discharge(port, true);
/* Set the vbus disconnect threshold for implicit contract */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
tcpm_set_state(port, SNK_STARTUP, 0);
@@ -4570,6 +4609,7 @@ static void run_state_machine(struct tcpm_port *port)
PD_T_PS_SOURCE_OFF);
break;
case PR_SWAP_SNK_SRC_SOURCE_ON:
+ tcpm_enable_auto_vbus_discharge(port, true);
tcpm_set_cc(port, tcpm_rp_cc(port));
tcpm_set_vbus(port, true);
/*
@@ -5198,6 +5238,10 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
tcpm_set_state(port, SNK_UNATTACHED, 0);
}
break;
+ case PR_SWAP_SNK_SRC_SINK_OFF:
+ case PR_SWAP_SNK_SRC_SOURCE_ON:
+ /* Do nothing, vsafe0v is expected during transition */
+ break;
default:
if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
tcpm_set_state(port, SNK_UNATTACHED, 0);
@@ -5325,7 +5369,7 @@ EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
void tcpm_sink_frs(struct tcpm_port *port)
{
spin_lock(&port->pd_event_lock);
- port->pd_events = TCPM_FRS_EVENT;
+ port->pd_events |= TCPM_FRS_EVENT;
spin_unlock(&port->pd_event_lock);
kthread_queue_work(port->wq, &port->event_work);
}
@@ -5334,7 +5378,7 @@ EXPORT_SYMBOL_GPL(tcpm_sink_frs);
void tcpm_sourcing_vbus(struct tcpm_port *port)
{
spin_lock(&port->pd_event_lock);
- port->pd_events = TCPM_SOURCING_VBUS;
+ port->pd_events |= TCPM_SOURCING_VBUS;
spin_unlock(&port->pd_event_lock);
kthread_queue_work(port->wq, &port->event_work);
}
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 5d125339687a..20917d85d6f4 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* typec_wcove.c - WhiskeyCove PMIC USB Type-C PHY driver
*
* Copyright (C) 2017 Intel Corporation
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 938219bc1b4b..21b3ae25c76d 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -629,6 +629,15 @@ static int tps6598x_probe(struct i2c_client *client)
if (!fwnode)
return -ENODEV;
+ /*
+ * This fwnode has a "compatible" property, but is never populated as a
+ * struct device. Instead we simply parse it to read the properties.
+ * This breaks fw_devlink=on. To maintain backward compatibility
+ * with existing DT files, we work around this by deleting any
+ * fwnode_links to/from this fwnode.
+ */
+ fw_devlink_purge_absent_suppliers(fwnode);
+
tps->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(tps->role_sw)) {
ret = PTR_ERR(tps->role_sw);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index b7d104c80d85..5ef5bd0e87cf 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1219,7 +1219,7 @@ static int ucsi_init(struct ucsi *ucsi)
goto err_reset;
}
- /* Allocate the connectors. Released in ucsi_unregister_ppm() */
+ /* Allocate the connectors. Released in ucsi_unregister() */
ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
sizeof(*ucsi->connector), GFP_KERNEL);
if (!ucsi->connector) {
@@ -1280,7 +1280,7 @@ void *ucsi_get_drvdata(struct ucsi *ucsi)
EXPORT_SYMBOL_GPL(ucsi_get_drvdata);
/**
- * ucsi_get_drvdata - Assign private driver data pointer
+ * ucsi_set_drvdata - Assign private driver data pointer
* @ucsi: UCSI interface
* @data: Private data pointer
*/
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 1a661ab45af5..6e197fe0fcf9 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -133,6 +133,8 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
&hw->notify_off_multiplier);
hw->notify_bar = cap.bar;
hw->notify_base = get_cap_addr(hw, &cap);
+ hw->notify_base_pa = pci_resource_start(pdev, cap.bar) +
+ le32_to_cpu(cap.offset);
IFCVF_DBG(pdev, "hw->notify_base = %p\n",
hw->notify_base);
break;
@@ -161,6 +163,8 @@ next:
notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
hw->vring[i].notify_addr = hw->notify_base +
notify_off * hw->notify_off_multiplier;
+ hw->vring[i].notify_pa = hw->notify_base_pa +
+ notify_off * hw->notify_off_multiplier;
}
hw->lm_cfg = hw->base[IFCVF_LM_BAR];
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 0111bfdeb342..2996db0da490 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -19,21 +19,9 @@
#include <uapi/linux/virtio_config.h>
#include <uapi/linux/virtio_pci.h>
-#define N3000_VENDOR_ID 0x1AF4
#define N3000_DEVICE_ID 0x1041
-#define N3000_SUBSYS_VENDOR_ID 0x8086
#define N3000_SUBSYS_DEVICE_ID 0x001A
-#define C5000X_PL_VENDOR_ID 0x1AF4
-#define C5000X_PL_DEVICE_ID 0x1000
-#define C5000X_PL_SUBSYS_VENDOR_ID 0x8086
-#define C5000X_PL_SUBSYS_DEVICE_ID 0x0001
-
-#define C5000X_PL_BLK_VENDOR_ID 0x1AF4
-#define C5000X_PL_BLK_DEVICE_ID 0x1001
-#define C5000X_PL_BLK_SUBSYS_VENDOR_ID 0x8086
-#define C5000X_PL_BLK_SUBSYS_DEVICE_ID 0x0002
-
#define IFCVF_NET_SUPPORTED_FEATURES \
((1ULL << VIRTIO_NET_F_MAC) | \
(1ULL << VIRTIO_F_ANY_LAYOUT) | \
@@ -73,6 +61,7 @@ struct vring_info {
u16 last_avail_idx;
bool ready;
void __iomem *notify_addr;
+ phys_addr_t notify_pa;
u32 irq;
struct vdpa_callback cb;
char msix_name[256];
@@ -87,6 +76,7 @@ struct ifcvf_hw {
u8 notify_bar;
/* Notificaiton bar address */
void __iomem *notify_base;
+ phys_addr_t notify_base_pa;
u32 notify_off_multiplier;
u64 req_features;
u64 hw_features;
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index ab0ab5cf0f6e..351c6cfb24c3 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -264,7 +264,7 @@ static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- state->avail_index = ifcvf_get_vq_state(vf, qid);
+ state->split.avail_index = ifcvf_get_vq_state(vf, qid);
return 0;
}
@@ -273,7 +273,7 @@ static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- return ifcvf_set_vq_state(vf, qid, state->avail_index);
+ return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
}
static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
@@ -413,6 +413,21 @@ static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
return vf->vring[qid].irq;
}
+static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
+ u16 idx)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ struct vdpa_notification_area area;
+
+ area.addr = vf->vring[idx].notify_pa;
+ if (!vf->notify_off_multiplier)
+ area.size = PAGE_SIZE;
+ else
+ area.size = vf->notify_off_multiplier;
+
+ return area;
+}
+
/*
* IFCVF currently does't have on-chip IOMMU, so not
* implemented set_map()/dma_map()/dma_unmap()
@@ -440,6 +455,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_config = ifcvf_vdpa_get_config,
.set_config = ifcvf_vdpa_set_config,
.set_config_cb = ifcvf_vdpa_set_config_cb,
+ .get_vq_notification = ifcvf_get_vq_notification,
};
static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -477,9 +493,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
dev, &ifc_vdpa_ops, NULL);
- if (adapter == NULL) {
+ if (IS_ERR(adapter)) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
- return -ENOMEM;
+ return PTR_ERR(adapter);
}
pci_set_master(pdev);
@@ -536,18 +552,21 @@ static void ifcvf_remove(struct pci_dev *pdev)
}
static struct pci_device_id ifcvf_pci_ids[] = {
- { PCI_DEVICE_SUB(N3000_VENDOR_ID,
+ /* N3000 network device */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
N3000_DEVICE_ID,
- N3000_SUBSYS_VENDOR_ID,
+ PCI_VENDOR_ID_INTEL,
N3000_SUBSYS_DEVICE_ID) },
- { PCI_DEVICE_SUB(C5000X_PL_VENDOR_ID,
- C5000X_PL_DEVICE_ID,
- C5000X_PL_SUBSYS_VENDOR_ID,
- C5000X_PL_SUBSYS_DEVICE_ID) },
- { PCI_DEVICE_SUB(C5000X_PL_BLK_VENDOR_ID,
- C5000X_PL_BLK_DEVICE_ID,
- C5000X_PL_BLK_SUBSYS_VENDOR_ID,
- C5000X_PL_BLK_SUBSYS_DEVICE_ID) },
+ /* C5000X-PL network device */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
+ VIRTIO_TRANS_ID_NET,
+ PCI_VENDOR_ID_INTEL,
+ VIRTIO_ID_NET) },
+ /* C5000X-PL block device */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
+ VIRTIO_TRANS_ID_BLOCK,
+ PCI_VENDOR_ID_INTEL,
+ VIRTIO_ID_BLOCK) },
{ 0 },
};
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index b6cc53ba980c..0002b2136b48 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -35,12 +35,14 @@ struct mlx5_vdpa_mr {
/* serialize mkey creation and destruction */
struct mutex mkey_mtx;
+ bool user_mr;
};
struct mlx5_vdpa_resources {
u32 pdn;
struct mlx5_uars_page *uar;
void __iomem *kick_addr;
+ u64 phys_kick_addr;
u16 uid;
u32 null_mkey;
bool valid;
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 800cfd1967ad..e59135fa867e 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -219,11 +219,6 @@ static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_m
mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
}
-static struct device *get_dma_device(struct mlx5_vdpa_dev *mvdev)
-{
- return &mvdev->mdev->pdev->dev;
-}
-
static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
struct vhost_iotlb *iotlb)
{
@@ -239,7 +234,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
u64 pa;
u64 paend;
struct scatterlist *sg;
- struct device *dma = get_dma_device(mvdev);
+ struct device *dma = mvdev->vdev.dma_dev;
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
@@ -298,7 +293,7 @@ err_map:
static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
- struct device *dma = get_dma_device(mvdev);
+ struct device *dma = mvdev->vdev.dma_dev;
destroy_direct_mr(mvdev, mr);
dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
@@ -360,7 +355,7 @@ err_alloc:
* indirect memory key that provides access to the enitre address space given
* by iotlb.
*/
-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct mlx5_vdpa_direct_mr *dmr;
@@ -374,9 +369,6 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
int err = 0;
int nnuls;
- if (mr->initialized)
- return 0;
-
INIT_LIST_HEAD(&mr->head);
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
@@ -414,7 +406,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
if (err)
goto err_chain;
- mr->initialized = true;
+ mr->user_mr = true;
return 0;
err_chain:
@@ -426,44 +418,100 @@ err_chain:
return err;
}
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int create_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
+ int err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
+ if (!err)
+ mr->user_mr = false;
+
+ kfree(in);
+ return err;
+}
+
+static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+ mlx5_vdpa_destroy_mkey(mvdev, &mr->mkey);
+}
+
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err;
- mutex_lock(&mr->mkey_mtx);
+ if (mr->initialized)
+ return 0;
+
+ if (iotlb)
+ err = create_user_mr(mvdev, iotlb);
+ else
+ err = create_dma_mr(mvdev, mr);
+
+ if (!err)
+ mr->initialized = true;
+
+ return err;
+}
+
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ int err;
+
+ mutex_lock(&mvdev->mr.mkey_mtx);
err = _mlx5_vdpa_create_mr(mvdev, iotlb);
- mutex_unlock(&mr->mkey_mtx);
+ mutex_unlock(&mvdev->mr.mkey_mtx);
return err;
}
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
+static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct mlx5_vdpa_direct_mr *dmr;
struct mlx5_vdpa_direct_mr *n;
- mutex_lock(&mr->mkey_mtx);
- if (!mr->initialized)
- goto out;
-
destroy_indirect_key(mvdev, mr);
list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
list_del_init(&dmr->list);
unmap_direct_mr(mvdev, dmr);
kfree(dmr);
}
+}
+
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+
+ mutex_lock(&mr->mkey_mtx);
+ if (!mr->initialized)
+ goto out;
+
+ if (mr->user_mr)
+ destroy_user_mr(mvdev, mr);
+ else
+ destroy_dma_mr(mvdev, mr);
+
memset(mr, 0, sizeof(*mr));
mr->initialized = false;
out:
mutex_unlock(&mr->mkey_mtx);
}
-static bool map_empty(struct vhost_iotlb *iotlb)
-{
- return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
-}
-
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map)
{
@@ -471,10 +519,6 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
int err = 0;
*change_map = false;
- if (map_empty(iotlb)) {
- mlx5_vdpa_destroy_mr(mvdev);
- return 0;
- }
mutex_lock(&mr->mkey_mtx);
if (mr->initialized) {
mlx5_vdpa_info(mvdev, "memory map update\n");
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index 6521cbd0f5c2..d4606213f88a 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -54,6 +54,9 @@ static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid)
void *in;
int err;
+ if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0))
+ return 0;
+
/* 0 means not supported */
if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx))
return -EOPNOTSUPP;
@@ -79,6 +82,9 @@ static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid)
u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
+ if (!uid)
+ return;
+
MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
MLX5_SET(destroy_uctx_in, in, uid, uid);
@@ -247,6 +253,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
goto err_key;
kick_addr = mdev->bar_addr + offset;
+ res->phys_kick_addr = kick_addr;
res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
if (!res->kick_addr) {
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index dda5dc6f7737..3cc12fcab08d 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -526,7 +526,6 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
void __iomem *uar_page = ndev->mvdev.res.uar->map;
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_vdpa_cq *vcq = &mvq->cq;
- unsigned int irqn;
__be64 *pas;
int inlen;
void *cqc;
@@ -566,7 +565,7 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
/* Use vector 0 by default. Consider adding code to choose least used
* vector.
*/
- err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn);
+ err = mlx5_vector2eqn(mdev, 0, &eqn);
if (err)
goto err_vec;
@@ -611,8 +610,8 @@ static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
}
-static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
- struct mlx5_vdpa_umem **umemp)
+static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
+ struct mlx5_vdpa_umem **umemp)
{
struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
int p_a;
@@ -635,7 +634,7 @@ static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
*umemp = &mvq->umem3;
break;
}
- return p_a * mvq->num_ent + p_b;
+ (*umemp)->size = p_a * mvq->num_ent + p_b;
}
static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem)
@@ -651,15 +650,10 @@ static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
void *in;
int err;
__be64 *pas;
- int size;
struct mlx5_vdpa_umem *umem;
- size = umem_size(ndev, mvq, num, &umem);
- if (size < 0)
- return size;
-
- umem->size = size;
- err = umem_frag_buf_alloc(ndev, umem, size);
+ set_umem_size(ndev, mvq, num, &umem);
+ err = umem_frag_buf_alloc(ndev, umem, umem->size);
if (err)
return err;
@@ -758,12 +752,12 @@ static int get_queue_type(struct mlx5_vdpa_net *ndev)
type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
/* prefer split queue */
- if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
- return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
+ if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)
+ return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
- WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
+ WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED));
- return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
+ return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
}
static bool vq_is_tx(u16 idx)
@@ -829,9 +823,9 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
- MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem1.size);
+ MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size);
MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
- MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem1.size);
+ MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
@@ -1428,8 +1422,8 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
return -EINVAL;
}
- mvq->used_idx = state->avail_index;
- mvq->avail_idx = state->avail_index;
+ mvq->used_idx = state->split.avail_index;
+ mvq->avail_idx = state->split.avail_index;
return 0;
}
@@ -1450,7 +1444,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
* Since both values should be identical, we take the value of
* used_idx which is reported correctly.
*/
- state->avail_index = mvq->used_idx;
+ state->split.avail_index = mvq->used_idx;
return 0;
}
@@ -1459,7 +1453,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
return err;
}
- state->avail_index = attr.used_index;
+ state->split.avail_index = attr.used_index;
return 0;
}
@@ -1772,6 +1766,14 @@ out:
mutex_unlock(&ndev->reslock);
}
+static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
+{
+ int i;
+
+ for (i = 0; i < ndev->mvdev.max_vqs; i++)
+ ndev->vqs[i].ready = false;
+}
+
static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -1782,10 +1784,15 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
if (!status) {
mlx5_vdpa_info(mvdev, "performing device reset\n");
teardown_driver(ndev);
+ clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.mlx_features = 0;
++mvdev->generation;
+ if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+ if (mlx5_vdpa_create_mr(mvdev, NULL))
+ mlx5_vdpa_warn(mvdev, "create MR failed\n");
+ }
return;
}
@@ -1866,6 +1873,7 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
ndev = to_mlx5_vdpa_ndev(mvdev);
free_resources(ndev);
+ mlx5_vdpa_destroy_mr(mvdev);
if (!is_zero_ether_addr(ndev->config.mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
@@ -1876,8 +1884,22 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct vdpa_notification_area ret = {};
+ struct mlx5_vdpa_net *ndev;
+ phys_addr_t addr;
+ /* If SF BAR size is smaller than PAGE_SIZE, do not use direct
+ * notification to avoid the risk of mapping pages that contain BAR of more
+ * than one SF
+ */
+ if (MLX5_CAP_GEN(mvdev->mdev, log_min_sf_size) + 12 < PAGE_SHIFT)
+ return ret;
+
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+ addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr;
+ ret.addr = addr;
+ ret.size = PAGE_SIZE;
return ret;
}
@@ -2007,6 +2029,12 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
return -ENOSPC;
mdev = mgtdev->madev->mdev;
+ if (!(MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_queue_type) &
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)) {
+ dev_warn(mdev->device, "missing support for split virtqueues\n");
+ return -EOPNOTSUPP;
+ }
+
/* we save one virtqueue for control virtqueue should we require it */
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
@@ -2037,14 +2065,20 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
goto err_mtu;
}
- mvdev->vdev.dma_dev = mdev->device;
+ mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
goto err_mpfs;
+ if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+ err = mlx5_vdpa_create_mr(mvdev, NULL);
+ if (err)
+ goto err_res;
+ }
+
err = alloc_resources(ndev);
if (err)
- goto err_res;
+ goto err_mr;
mvdev->vdev.mdev = &mgtdev->mgtdev;
err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
@@ -2056,6 +2090,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
err_reg:
free_resources(ndev);
+err_mr:
+ mlx5_vdpa_destroy_mr(mvdev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
err_mpfs:
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 98f793bc9376..c621cf7feec0 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -251,8 +251,10 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
dev_attr->name);
- if (!vdpasim)
+ if (IS_ERR(vdpasim)) {
+ ret = PTR_ERR(vdpasim);
goto err_alloc;
+ }
vdpasim->dev_attr = *dev_attr;
INIT_WORK(&vdpasim->work, dev_attr->work_fn);
@@ -374,7 +376,7 @@ static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
struct vringh *vrh = &vq->vring;
spin_lock(&vdpasim->lock);
- vrh->last_avail_idx = state->avail_index;
+ vrh->last_avail_idx = state->split.avail_index;
spin_unlock(&vdpasim->lock);
return 0;
@@ -387,7 +389,7 @@ static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
struct vringh *vrh = &vq->vring;
- state->avail_index = vrh->last_avail_idx;
+ state->split.avail_index = vrh->last_avail_idx;
return 0;
}
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index 5bfe1c281645..a790903f243e 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -15,7 +15,6 @@
#include <linux/blkdev.h>
#include <linux/vringh.h>
#include <linux/vdpa.h>
-#include <linux/blkdev.h>
#include <uapi/linux/virtio_blk.h>
#include "vdpa_sim.h"
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index c76ebb531212..fe0527329857 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -210,13 +210,49 @@ static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
return -EOPNOTSUPP;
}
+static int vp_vdpa_set_vq_state_split(struct vdpa_device *vdpa,
+ const struct vdpa_vq_state *state)
+{
+ const struct vdpa_vq_state_split *split = &state->split;
+
+ if (split->avail_index == 0)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int vp_vdpa_set_vq_state_packed(struct vdpa_device *vdpa,
+ const struct vdpa_vq_state *state)
+{
+ const struct vdpa_vq_state_packed *packed = &state->packed;
+
+ if (packed->last_avail_counter == 1 &&
+ packed->last_avail_idx == 0 &&
+ packed->last_used_counter == 1 &&
+ packed->last_used_idx == 0)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
const struct vdpa_vq_state *state)
{
- /* Note that this is not supported by virtio specification, so
- * we return -ENOPOTSUPP here. This means we can't support live
- * migration, vhost device start/stop.
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ /* Note that this is not supported by virtio specification.
+ * But if the state is by chance equal to the device initial
+ * state, we can let it go.
*/
+ if ((vp_modern_get_status(mdev) & VIRTIO_CONFIG_S_FEATURES_OK) &&
+ !vp_modern_get_queue_enable(mdev, qid)) {
+ if (vp_modern_get_driver_features(mdev) &
+ BIT_ULL(VIRTIO_F_RING_PACKED))
+ return vp_vdpa_set_vq_state_packed(vdpa, state);
+ else
+ return vp_vdpa_set_vq_state_split(vdpa, state);
+ }
+
return -EOPNOTSUPP;
}
@@ -400,9 +436,9 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
dev, &vp_vdpa_ops, NULL);
- if (vp_vdpa == NULL) {
+ if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
- return -ENOMEM;
+ return PTR_ERR(vp_vdpa);
}
mdev = &vp_vdpa->mdev;
@@ -442,6 +478,7 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vp_modern_map_vq_notify(mdev, i,
&vp_vdpa->vring[i].notify_pa);
if (!vp_vdpa->vring[i].notify) {
+ ret = -EINVAL;
dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i);
goto err;
}
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
index 980e59551301..90cad109583b 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -140,26 +140,18 @@ static int vfio_fsl_mc_open(struct vfio_device *core_vdev)
{
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
- int ret;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
+ int ret = 0;
mutex_lock(&vdev->reflck->lock);
if (!vdev->refcnt) {
ret = vfio_fsl_mc_regions_init(vdev);
if (ret)
- goto err_reg_init;
+ goto out;
}
vdev->refcnt++;
-
+out:
mutex_unlock(&vdev->reflck->lock);
- return 0;
-
-err_reg_init:
- mutex_unlock(&vdev->reflck->lock);
- module_put(THIS_MODULE);
return ret;
}
@@ -196,8 +188,6 @@ static void vfio_fsl_mc_release(struct vfio_device *core_vdev)
}
mutex_unlock(&vdev->reflck->lock);
-
- module_put(THIS_MODULE);
}
static long vfio_fsl_mc_ioctl(struct vfio_device *core_vdev,
diff --git a/drivers/vfio/mdev/Kconfig b/drivers/vfio/mdev/Kconfig
index 5da27f2100f9..763c877a1318 100644
--- a/drivers/vfio/mdev/Kconfig
+++ b/drivers/vfio/mdev/Kconfig
@@ -9,10 +9,3 @@ config VFIO_MDEV
See Documentation/driver-api/vfio-mediated-device.rst for more details.
If you don't know what do here, say N.
-
-config VFIO_MDEV_DEVICE
- tristate "VFIO driver for Mediated devices"
- depends on VFIO && VFIO_MDEV
- default n
- help
- VFIO based driver for Mediated devices.
diff --git a/drivers/vfio/mdev/Makefile b/drivers/vfio/mdev/Makefile
index 101516fdf375..ff9ecd802125 100644
--- a/drivers/vfio/mdev/Makefile
+++ b/drivers/vfio/mdev/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o
+mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o vfio_mdev.o
obj-$(CONFIG_VFIO_MDEV) += mdev.o
-obj-$(CONFIG_VFIO_MDEV_DEVICE) += vfio_mdev.o
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index 2a85d6fcb7dd..e4581ec093a6 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -94,9 +94,11 @@ static void mdev_device_remove_common(struct mdev_device *mdev)
mdev_remove_sysfs_files(mdev);
device_del(&mdev->dev);
lockdep_assert_held(&parent->unreg_sem);
- ret = parent->ops->remove(mdev);
- if (ret)
- dev_err(&mdev->dev, "Remove failed: err=%d\n", ret);
+ if (parent->ops->remove) {
+ ret = parent->ops->remove(mdev);
+ if (ret)
+ dev_err(&mdev->dev, "Remove failed: err=%d\n", ret);
+ }
/* Balances with device_initialize() */
put_device(&mdev->dev);
@@ -127,7 +129,9 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
char *envp[] = { env_string, NULL };
/* check for mandatory ops */
- if (!ops || !ops->create || !ops->remove || !ops->supported_type_groups)
+ if (!ops || !ops->supported_type_groups)
+ return -EINVAL;
+ if (!ops->device_driver && (!ops->create || !ops->remove))
return -EINVAL;
dev = get_device(dev);
@@ -256,6 +260,7 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
int ret;
struct mdev_device *mdev, *tmp;
struct mdev_parent *parent = type->parent;
+ struct mdev_driver *drv = parent->ops->device_driver;
mutex_lock(&mdev_list_lock);
@@ -296,14 +301,22 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
goto out_put_device;
}
- ret = parent->ops->create(mdev);
- if (ret)
- goto out_unlock;
+ if (parent->ops->create) {
+ ret = parent->ops->create(mdev);
+ if (ret)
+ goto out_unlock;
+ }
ret = device_add(&mdev->dev);
if (ret)
goto out_remove;
+ if (!drv)
+ drv = &vfio_mdev_driver;
+ ret = device_driver_attach(&drv->driver, &mdev->dev);
+ if (ret)
+ goto out_del;
+
ret = mdev_create_sysfs_files(mdev);
if (ret)
goto out_del;
@@ -317,7 +330,8 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
out_del:
device_del(&mdev->dev);
out_remove:
- parent->ops->remove(mdev);
+ if (parent->ops->remove)
+ parent->ops->remove(mdev);
out_unlock:
up_read(&parent->unreg_sem);
out_put_device:
@@ -360,11 +374,24 @@ int mdev_device_remove(struct mdev_device *mdev)
static int __init mdev_init(void)
{
- return mdev_bus_register();
+ int rc;
+
+ rc = mdev_bus_register();
+ if (rc)
+ return rc;
+ rc = mdev_register_driver(&vfio_mdev_driver);
+ if (rc)
+ goto err_bus;
+ return 0;
+err_bus:
+ mdev_bus_unregister();
+ return rc;
}
static void __exit mdev_exit(void)
{
+ mdev_unregister_driver(&vfio_mdev_driver);
+
if (mdev_bus_compat_class)
class_compat_unregister(mdev_bus_compat_class);
@@ -378,4 +405,3 @@ MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_SOFTDEP("post: vfio_mdev");
diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c
index 041699571b7e..c368ec824e2b 100644
--- a/drivers/vfio/mdev/mdev_driver.c
+++ b/drivers/vfio/mdev/mdev_driver.c
@@ -71,10 +71,20 @@ static int mdev_remove(struct device *dev)
return 0;
}
+static int mdev_match(struct device *dev, struct device_driver *drv)
+{
+ /*
+ * No drivers automatically match. Drivers are only bound by explicit
+ * device_driver_attach()
+ */
+ return 0;
+}
+
struct bus_type mdev_bus_type = {
.name = "mdev",
.probe = mdev_probe,
.remove = mdev_remove,
+ .match = mdev_match,
};
EXPORT_SYMBOL_GPL(mdev_bus_type);
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index 6999c89db7b1..afbad7b0a14a 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -37,6 +37,8 @@ struct mdev_type {
#define to_mdev_type(_kobj) \
container_of(_kobj, struct mdev_type, kobj)
+extern struct mdev_driver vfio_mdev_driver;
+
int parent_create_sysfs_files(struct mdev_parent *parent);
void parent_remove_sysfs_files(struct mdev_parent *parent);
diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c
index 922729071c5a..39ef7489fe47 100644
--- a/drivers/vfio/mdev/vfio_mdev.c
+++ b/drivers/vfio/mdev/vfio_mdev.c
@@ -17,28 +17,15 @@
#include "mdev_private.h"
-#define DRIVER_VERSION "0.1"
-#define DRIVER_AUTHOR "NVIDIA Corporation"
-#define DRIVER_DESC "VFIO based driver for Mediated device"
-
static int vfio_mdev_open(struct vfio_device *core_vdev)
{
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
struct mdev_parent *parent = mdev->type->parent;
- int ret;
-
if (unlikely(!parent->ops->open))
return -EINVAL;
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- ret = parent->ops->open(mdev);
- if (ret)
- module_put(THIS_MODULE);
-
- return ret;
+ return parent->ops->open(mdev);
}
static void vfio_mdev_release(struct vfio_device *core_vdev)
@@ -48,8 +35,6 @@ static void vfio_mdev_release(struct vfio_device *core_vdev)
if (likely(parent->ops->release))
parent->ops->release(mdev);
-
- module_put(THIS_MODULE);
}
static long vfio_mdev_unlocked_ioctl(struct vfio_device *core_vdev,
@@ -151,7 +136,7 @@ static void vfio_mdev_remove(struct mdev_device *mdev)
kfree(vdev);
}
-static struct mdev_driver vfio_mdev_driver = {
+struct mdev_driver vfio_mdev_driver = {
.driver = {
.name = "vfio_mdev",
.owner = THIS_MODULE,
@@ -160,21 +145,3 @@ static struct mdev_driver vfio_mdev_driver = {
.probe = vfio_mdev_probe,
.remove = vfio_mdev_remove,
};
-
-static int __init vfio_mdev_init(void)
-{
- return mdev_register_driver(&vfio_mdev_driver);
-}
-
-static void __exit vfio_mdev_exit(void)
-{
- mdev_unregister_driver(&vfio_mdev_driver);
-}
-
-module_init(vfio_mdev_init)
-module_exit(vfio_mdev_exit)
-
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index bd7c482c948a..318864d52837 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -477,13 +477,10 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
* We can not use the "try" reset interface here, which will
* overwrite the previously restored configuration information.
*/
- if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
- if (device_trylock(&pdev->dev)) {
- if (!__pci_reset_function_locked(pdev))
- vdev->needs_reset = false;
- device_unlock(&pdev->dev);
- }
- pci_cfg_access_unlock(pdev);
+ if (vdev->reset_works && pci_dev_trylock(pdev)) {
+ if (!__pci_reset_function_locked(pdev))
+ vdev->needs_reset = false;
+ pci_dev_unlock(pdev);
}
pci_restore_state(pdev);
@@ -558,8 +555,6 @@ static void vfio_pci_release(struct vfio_device *core_vdev)
}
mutex_unlock(&vdev->reflck->lock);
-
- module_put(THIS_MODULE);
}
static int vfio_pci_open(struct vfio_device *core_vdev)
@@ -568,9 +563,6 @@ static int vfio_pci_open(struct vfio_device *core_vdev)
container_of(core_vdev, struct vfio_pci_device, vdev);
int ret = 0;
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
mutex_lock(&vdev->reflck->lock);
if (!vdev->refcnt) {
@@ -584,8 +576,6 @@ static int vfio_pci_open(struct vfio_device *core_vdev)
vdev->refcnt++;
error:
mutex_unlock(&vdev->reflck->lock);
- if (ret)
- module_put(THIS_MODULE);
return ret;
}
@@ -1594,6 +1584,7 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_device *vdev = vma->vm_private_data;
+ struct vfio_pci_mmap_vma *mmap_vma;
vm_fault_t ret = VM_FAULT_NOPAGE;
mutex_lock(&vdev->vma_lock);
@@ -1601,24 +1592,36 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
if (!__vfio_pci_memory_enabled(vdev)) {
ret = VM_FAULT_SIGBUS;
- mutex_unlock(&vdev->vma_lock);
goto up_out;
}
- if (__vfio_pci_add_vma(vdev, vma)) {
- ret = VM_FAULT_OOM;
- mutex_unlock(&vdev->vma_lock);
- goto up_out;
+ /*
+ * We populate the whole vma on fault, so we need to test whether
+ * the vma has already been mapped, such as for concurrent faults
+ * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
+ * we ask it to fill the same range again.
+ */
+ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+ if (mmap_vma->vma == vma)
+ goto up_out;
}
- mutex_unlock(&vdev->vma_lock);
-
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
ret = VM_FAULT_SIGBUS;
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+ goto up_out;
+ }
+
+ if (__vfio_pci_add_vma(vdev, vma)) {
+ ret = VM_FAULT_OOM;
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+ }
up_out:
up_read(&vdev->memory_lock);
+ mutex_unlock(&vdev->vma_lock);
return ret;
}
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index f970eb2a999f..badfffea14fb 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -59,7 +59,6 @@ static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
vdev->flags = VFIO_DEVICE_FLAGS_AMBA;
vdev->get_resource = get_amba_resource;
vdev->get_irq = get_amba_irq;
- vdev->parent_module = THIS_MODULE;
vdev->reset_required = false;
ret = vfio_platform_probe_common(vdev, &adev->dev);
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index e4027799a154..68a1c87066d7 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -50,7 +50,6 @@ static int vfio_platform_probe(struct platform_device *pdev)
vdev->flags = VFIO_DEVICE_FLAGS_PLATFORM;
vdev->get_resource = get_platform_resource;
vdev->get_irq = get_platform_irq;
- vdev->parent_module = THIS_MODULE;
vdev->reset_required = reset_required;
ret = vfio_platform_probe_common(vdev, &pdev->dev);
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 470fcf7dac56..703164df7637 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -241,8 +241,6 @@ static void vfio_platform_release(struct vfio_device *core_vdev)
}
mutex_unlock(&driver_lock);
-
- module_put(vdev->parent_module);
}
static int vfio_platform_open(struct vfio_device *core_vdev)
@@ -251,9 +249,6 @@ static int vfio_platform_open(struct vfio_device *core_vdev)
container_of(core_vdev, struct vfio_platform_device, vdev);
int ret;
- if (!try_module_get(vdev->parent_module))
- return -ENODEV;
-
mutex_lock(&driver_lock);
if (!vdev->refcnt) {
@@ -291,7 +286,6 @@ err_irq:
vfio_platform_regions_cleanup(vdev);
err_reg:
mutex_unlock(&driver_lock);
- module_put(vdev->parent_module);
return ret;
}
diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h
index a5ba82c8cbc3..dfb834c13659 100644
--- a/drivers/vfio/platform/vfio_platform_private.h
+++ b/drivers/vfio/platform/vfio_platform_private.h
@@ -50,7 +50,6 @@ struct vfio_platform_device {
u32 num_irqs;
int refcnt;
struct mutex igate;
- struct module *parent_module;
const char *compat;
const char *acpihid;
struct module *reset_module;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 5e631c359ef2..02cc51ce6891 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1369,8 +1369,14 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
if (IS_ERR(device))
return PTR_ERR(device);
+ if (!try_module_get(device->dev->driver->owner)) {
+ vfio_device_put(device);
+ return -ENODEV;
+ }
+
ret = device->ops->open(device);
if (ret) {
+ module_put(device->dev->driver->owner);
vfio_device_put(device);
return ret;
}
@@ -1382,6 +1388,7 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
ret = get_unused_fd_flags(O_CLOEXEC);
if (ret < 0) {
device->ops->release(device);
+ module_put(device->dev->driver->owner);
vfio_device_put(device);
return ret;
}
@@ -1392,6 +1399,7 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
put_unused_fd(ret);
ret = PTR_ERR(filep);
device->ops->release(device);
+ module_put(device->dev->driver->owner);
vfio_device_put(device);
return ret;
}
@@ -1550,6 +1558,8 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep)
device->ops->release(device);
+ module_put(device->dev->driver->owner);
+
vfio_group_try_dissolve_container(device->group);
vfio_device_put(device);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 4fce73a8a650..0b4f7c174c7a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -110,7 +110,7 @@ struct vfio_batch {
int offset; /* of next entry in pages */
};
-struct vfio_group {
+struct vfio_iommu_group {
struct iommu_group *iommu_group;
struct list_head next;
bool mdev_group; /* An mdev group */
@@ -160,8 +160,9 @@ struct vfio_regions {
static int put_pfn(unsigned long pfn, int prot);
-static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
- struct iommu_group *iommu_group);
+static struct vfio_iommu_group*
+vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
+ struct iommu_group *iommu_group);
/*
* This code handles mapping and unmapping of user data buffers
@@ -836,7 +837,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
unsigned long *phys_pfn)
{
struct vfio_iommu *iommu = iommu_data;
- struct vfio_group *group;
+ struct vfio_iommu_group *group;
int i, j, ret;
unsigned long remote_vaddr;
struct vfio_dma *dma;
@@ -1875,10 +1876,10 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
__free_pages(pages, order);
}
-static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
- struct iommu_group *iommu_group)
+static struct vfio_iommu_group *find_iommu_group(struct vfio_domain *domain,
+ struct iommu_group *iommu_group)
{
- struct vfio_group *g;
+ struct vfio_iommu_group *g;
list_for_each_entry(g, &domain->group_list, next) {
if (g->iommu_group == iommu_group)
@@ -1888,11 +1889,12 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
return NULL;
}
-static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
- struct iommu_group *iommu_group)
+static struct vfio_iommu_group*
+vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
+ struct iommu_group *iommu_group)
{
struct vfio_domain *domain;
- struct vfio_group *group = NULL;
+ struct vfio_iommu_group *group = NULL;
list_for_each_entry(domain, &iommu->domain_list, next) {
group = find_iommu_group(domain, iommu_group);
@@ -1967,7 +1969,7 @@ static int vfio_mdev_detach_domain(struct device *dev, void *data)
}
static int vfio_iommu_attach_group(struct vfio_domain *domain,
- struct vfio_group *group)
+ struct vfio_iommu_group *group)
{
if (group->mdev_group)
return iommu_group_for_each_dev(group->iommu_group,
@@ -1978,7 +1980,7 @@ static int vfio_iommu_attach_group(struct vfio_domain *domain,
}
static void vfio_iommu_detach_group(struct vfio_domain *domain,
- struct vfio_group *group)
+ struct vfio_iommu_group *group)
{
if (group->mdev_group)
iommu_group_for_each_dev(group->iommu_group, domain->domain,
@@ -2242,7 +2244,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
struct vfio_iommu *iommu = iommu_data;
- struct vfio_group *group;
+ struct vfio_iommu_group *group;
struct vfio_domain *domain, *d;
struct bus_type *bus = NULL;
int ret;
@@ -2518,7 +2520,7 @@ static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,
struct list_head *iova_copy)
{
struct vfio_domain *d;
- struct vfio_group *g;
+ struct vfio_iommu_group *g;
struct vfio_iova *node;
dma_addr_t start, end;
LIST_HEAD(resv_regions);
@@ -2560,7 +2562,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain;
- struct vfio_group *group;
+ struct vfio_iommu_group *group;
bool update_dirty_scope = false;
LIST_HEAD(iova_copy);
@@ -2681,7 +2683,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
static void vfio_release_domain(struct vfio_domain *domain, bool external)
{
- struct vfio_group *group, *group_tmp;
+ struct vfio_iommu_group *group, *group_tmp;
list_for_each_entry_safe(group, group_tmp,
&domain->group_list, next) {
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
index 0fd3f87e913c..0582079e4bcc 100644
--- a/drivers/vhost/iotlb.c
+++ b/drivers/vhost/iotlb.c
@@ -83,7 +83,7 @@ int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
EXPORT_SYMBOL_GPL(vhost_iotlb_add_range);
/**
- * vring_iotlb_del_range - delete overlapped ranges from vhost IOTLB
+ * vhost_iotlb_del_range - delete overlapped ranges from vhost IOTLB
* @iotlb: the IOTLB
* @start: start of the IOVA range
* @last: last of IOVA range
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index d16c04dcc144..46f897e41217 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1430,11 +1430,6 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
vhost_scsi_handle_vq(vs, vq);
}
-static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
-{
- vhost_poll_flush(&vs->vqs[index].vq.poll);
-}
-
/* Callers must hold dev mutex */
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
@@ -1453,10 +1448,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- vhost_scsi_flush_vq(vs, i);
- vhost_work_flush(&vs->dev, &vs->vs_completion_work);
- vhost_work_flush(&vs->dev, &vs->vs_event_work);
+ vhost_work_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
@@ -1740,11 +1732,12 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
- /*
- * Make sure cmds are not running before tearing them
- * down.
- */
- vhost_scsi_flush(vs);
+ }
+ /* Make sure cmds are not running before tearing them down. */
+ vhost_scsi_flush(vs);
+
+ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ vq = &vs->vqs[i].vq;
vhost_scsi_destroy_vq_cmds(vq);
}
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index fb41db3da611..9479f7f79217 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -383,7 +383,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
if (r)
return r;
- vq->last_avail_idx = vq_state.avail_index;
+ vq->last_avail_idx = vq_state.split.avail_index;
break;
}
@@ -401,7 +401,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
break;
case VHOST_SET_VRING_BASE:
- vq_state.avail_index = vq->last_avail_idx;
+ vq_state.split.avail_index = vq->last_avail_idx;
if (ops->set_vq_state(vdpa, idx, &vq_state))
r = -EINVAL;
break;
@@ -614,7 +614,8 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
long pinned;
int ret = 0;
- if (msg->iova < v->range.first ||
+ if (msg->iova < v->range.first || !msg->size ||
+ msg->iova > U64_MAX - msg->size + 1 ||
msg->iova + msg->size - 1 > v->range.last)
return -EINVAL;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 5ccb0705beae..59edb5a1ffe2 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -231,7 +231,7 @@ void vhost_poll_stop(struct vhost_poll *poll)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
+void vhost_work_dev_flush(struct vhost_dev *dev)
{
struct vhost_flush_struct flush;
@@ -243,13 +243,13 @@ void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
wait_for_completion(&flush.wait_event);
}
}
-EXPORT_SYMBOL_GPL(vhost_work_flush);
+EXPORT_SYMBOL_GPL(vhost_work_dev_flush);
/* Flush any work that has been scheduled. When calling this, don't hold any
* locks that are also used by the callback. */
void vhost_poll_flush(struct vhost_poll *poll)
{
- vhost_work_flush(poll->dev, &poll->work);
+ vhost_work_dev_flush(poll->dev);
}
EXPORT_SYMBOL_GPL(vhost_poll_flush);
@@ -538,7 +538,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
attach.owner = current;
vhost_work_init(&attach.work, vhost_attach_cgroups_work);
vhost_work_queue(dev, &attach.work);
- vhost_work_flush(dev, &attach.work);
+ vhost_work_dev_flush(dev);
return attach.ret;
}
@@ -735,10 +735,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}
+/* Make sure 64 bit math will not overflow. */
static bool vhost_overflow(u64 uaddr, u64 size)
{
- /* Make sure 64 bit math will not overflow. */
- return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
+ if (uaddr > ULONG_MAX || size > ULONG_MAX)
+ return true;
+
+ if (!size)
+ return false;
+
+ return uaddr > ULONG_MAX - size + 1;
}
/* Caller should have vq mutex and device mutex. */
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index b063324c7669..638bb640d6b4 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -20,20 +20,20 @@ typedef void (*vhost_work_fn_t)(struct vhost_work *work);
#define VHOST_WORK_QUEUED 1
struct vhost_work {
- struct llist_node node;
- vhost_work_fn_t fn;
- unsigned long flags;
+ struct llist_node node;
+ vhost_work_fn_t fn;
+ unsigned long flags;
};
/* Poll a file (eventfd or socket) */
/* Note: there's nothing vhost specific about this structure. */
struct vhost_poll {
- poll_table table;
- wait_queue_head_t *wqh;
- wait_queue_entry_t wait;
- struct vhost_work work;
- __poll_t mask;
- struct vhost_dev *dev;
+ poll_table table;
+ wait_queue_head_t *wqh;
+ wait_queue_entry_t wait;
+ struct vhost_work work;
+ __poll_t mask;
+ struct vhost_dev *dev;
};
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
@@ -46,8 +46,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
void vhost_poll_flush(struct vhost_poll *poll);
void vhost_poll_queue(struct vhost_poll *poll);
-void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
-long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
+void vhost_work_dev_flush(struct vhost_dev *dev);
struct vhost_log {
u64 addr;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 4af8fa259d65..14e2043d7685 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -359,7 +359,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
iov = wiov;
else {
iov = riov;
- if (unlikely(wiov && wiov->i)) {
+ if (unlikely(wiov && wiov->used)) {
vringh_bad("Readable desc %p after writable",
&descs[i]);
err = -EINVAL;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index d38c996b4f46..f249622ef11b 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -708,7 +708,7 @@ static void vhost_vsock_flush(struct vhost_vsock *vsock)
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
if (vsock->vqs[i].handle_kick)
vhost_poll_flush(&vsock->vqs[i].poll);
- vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
+ vhost_work_dev_flush(&vsock->dev);
}
static void vhost_vsock_reset_orphans(struct sock *sk)
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index d83c87b902c1..e32694c13da5 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -128,12 +128,12 @@ config LCD_HX8357
If you have a HX-8357 LCD panel, say Y to enable its LCD control
driver.
- config LCD_OTM3225A
- tristate "ORISE Technology OTM3225A support"
- depends on SPI
- help
- If you have a panel based on the OTM3225A controller
- chip then say y to include a driver for it.
+config LCD_OTM3225A
+ tristate "ORISE Technology OTM3225A support"
+ depends on SPI
+ help
+ If you have a panel based on the OTM3225A controller
+ chip then say y to include a driver for it.
endif # LCD_CLASS_DEVICE
@@ -269,11 +269,11 @@ config BACKLIGHT_MAX8925
WLED output, say Y here to enable this driver.
config BACKLIGHT_APPLE
- tristate "Apple Backlight Driver"
- depends on X86 && ACPI
- help
- If you have an Intel-based Apple say Y to enable a driver for its
- backlight.
+ tristate "Apple Backlight Driver"
+ depends on X86 && ACPI
+ help
+ If you have an Intel-based Apple say Y to enable a driver for its
+ backlight.
config BACKLIGHT_TOSA
tristate "Sharp SL-6000 Backlight Driver"
@@ -289,6 +289,14 @@ config BACKLIGHT_QCOM_WLED
If you have the Qualcomm PMIC, say Y to enable a driver for the
WLED block. Currently it supports PM8941 and PMI8998.
+config BACKLIGHT_RT4831
+ tristate "Richtek RT4831 Backlight Driver"
+ depends on MFD_RT4831
+ help
+ This enables support for Richtek RT4831 Backlight driver.
+ It's commonly used to drive the display WLED. There're four channels
+ inisde, and each channel can provide up to 30mA current.
+
config BACKLIGHT_SAHARA
tristate "Tabletkiosk Sahara Touch-iT Backlight Driver"
depends on X86
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 685f3f1ca4df..cae2c83422ae 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
obj-$(CONFIG_BACKLIGHT_QCOM_WLED) += qcom-wled.o
+obj-$(CONFIG_BACKLIGHT_RT4831) += rt4831-backlight.o
obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
obj-$(CONFIG_BACKLIGHT_SKY81452) += sky81452-backlight.o
obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index e88a2b0e5904..e8b185bb6f5e 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -52,6 +52,7 @@ struct lm3630a_chip {
struct gpio_desc *enable_gpio;
struct regmap *regmap;
struct pwm_device *pwmd;
+ struct pwm_state pwmd_state;
};
/* i2c access */
@@ -167,16 +168,19 @@ static int lm3630a_intr_config(struct lm3630a_chip *pchip)
return rval;
}
-static void lm3630a_pwm_ctrl(struct lm3630a_chip *pchip, int br, int br_max)
+static int lm3630a_pwm_ctrl(struct lm3630a_chip *pchip, int br, int br_max)
{
- unsigned int period = pchip->pdata->pwm_period;
- unsigned int duty = br * period / br_max;
+ int err;
- pwm_config(pchip->pwmd, duty, period);
- if (duty)
- pwm_enable(pchip->pwmd);
- else
- pwm_disable(pchip->pwmd);
+ pchip->pwmd_state.period = pchip->pdata->pwm_period;
+
+ err = pwm_set_relative_duty_cycle(&pchip->pwmd_state, br, br_max);
+ if (err)
+ return err;
+
+ pchip->pwmd_state.enabled = pchip->pwmd_state.duty_cycle ? true : false;
+
+ return pwm_apply_state(pchip->pwmd, &pchip->pwmd_state);
}
/* update and get brightness */
@@ -187,11 +191,9 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
/* pwm control */
- if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0) {
- lm3630a_pwm_ctrl(pchip, bl->props.brightness,
- bl->props.max_brightness);
- return bl->props.brightness;
- }
+ if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0)
+ return lm3630a_pwm_ctrl(pchip, bl->props.brightness,
+ bl->props.max_brightness);
/* disable sleep */
ret = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00);
@@ -210,8 +212,8 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
return 0;
out_i2c_err:
- dev_err(pchip->dev, "i2c failed to access\n");
- return bl->props.brightness;
+ dev_err(pchip->dev, "i2c failed to access (%pe)\n", ERR_PTR(ret));
+ return ret;
}
static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
@@ -264,11 +266,9 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl;
/* pwm control */
- if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0) {
- lm3630a_pwm_ctrl(pchip, bl->props.brightness,
- bl->props.max_brightness);
- return bl->props.brightness;
- }
+ if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0)
+ return lm3630a_pwm_ctrl(pchip, bl->props.brightness,
+ bl->props.max_brightness);
/* disable sleep */
ret = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00);
@@ -287,8 +287,8 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
return 0;
out_i2c_err:
- dev_err(pchip->dev, "i2c failed to access REG_CTRL\n");
- return bl->props.brightness;
+ dev_err(pchip->dev, "i2c failed to access (%pe)\n", ERR_PTR(ret));
+ return ret;
}
static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
@@ -482,8 +482,10 @@ static int lm3630a_parse_node(struct lm3630a_chip *pchip,
device_for_each_child_node(pchip->dev, node) {
ret = lm3630a_parse_bank(pdata, node, &seen_led_sources);
- if (ret)
+ if (ret) {
+ fwnode_handle_put(node);
return ret;
+ }
}
return ret;
@@ -563,11 +565,7 @@ static int lm3630a_probe(struct i2c_client *client,
return PTR_ERR(pchip->pwmd);
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(pchip->pwmd);
+ pwm_init_state(pchip->pwmd, &pchip->pwmd_state);
}
/* interrupt enable : irq 0 is not allowed */
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index 7c02f87c51c2..d094299c2a48 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -1717,6 +1717,7 @@ static int wled_remove(struct platform_device *pdev)
static const struct of_device_id wled_match_table[] = {
{ .compatible = "qcom,pm8941-wled", .data = (void *)3 },
+ { .compatible = "qcom,pmi8994-wled", .data = (void *)4 },
{ .compatible = "qcom,pmi8998-wled", .data = (void *)4 },
{ .compatible = "qcom,pm660l-wled", .data = (void *)4 },
{ .compatible = "qcom,pm8150l-wled", .data = (void *)5 },
diff --git a/drivers/video/backlight/rt4831-backlight.c b/drivers/video/backlight/rt4831-backlight.c
new file mode 100644
index 000000000000..42155c7d2db1
--- /dev/null
+++ b/drivers/video/backlight/rt4831-backlight.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <dt-bindings/leds/rt4831-backlight.h>
+#include <linux/backlight.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define RT4831_REG_BLCFG 0x02
+#define RT4831_REG_BLDIML 0x04
+#define RT4831_REG_ENABLE 0x08
+
+#define RT4831_BLMAX_BRIGHTNESS 2048
+
+#define RT4831_BLOVP_MASK GENMASK(7, 5)
+#define RT4831_BLOVP_SHIFT 5
+#define RT4831_BLPWMEN_MASK BIT(0)
+#define RT4831_BLEN_MASK BIT(4)
+#define RT4831_BLCH_MASK GENMASK(3, 0)
+#define RT4831_BLDIML_MASK GENMASK(2, 0)
+#define RT4831_BLDIMH_MASK GENMASK(10, 3)
+#define RT4831_BLDIMH_SHIFT 3
+
+struct rt4831_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct backlight_device *bl;
+};
+
+static int rt4831_bl_update_status(struct backlight_device *bl_dev)
+{
+ struct rt4831_priv *priv = bl_get_data(bl_dev);
+ int brightness = backlight_get_brightness(bl_dev);
+ unsigned int enable = brightness ? RT4831_BLEN_MASK : 0;
+ u8 v[2];
+ int ret;
+
+ if (brightness) {
+ v[0] = (brightness - 1) & RT4831_BLDIML_MASK;
+ v[1] = ((brightness - 1) & RT4831_BLDIMH_MASK) >> RT4831_BLDIMH_SHIFT;
+
+ ret = regmap_raw_write(priv->regmap, RT4831_REG_BLDIML, v, sizeof(v));
+ if (ret)
+ return ret;
+ }
+
+ return regmap_update_bits(priv->regmap, RT4831_REG_ENABLE, RT4831_BLEN_MASK, enable);
+
+}
+
+static int rt4831_bl_get_brightness(struct backlight_device *bl_dev)
+{
+ struct rt4831_priv *priv = bl_get_data(bl_dev);
+ unsigned int val;
+ u8 v[2];
+ int ret;
+
+ ret = regmap_read(priv->regmap, RT4831_REG_ENABLE, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & RT4831_BLEN_MASK))
+ return 0;
+
+ ret = regmap_raw_read(priv->regmap, RT4831_REG_BLDIML, v, sizeof(v));
+ if (ret)
+ return ret;
+
+ ret = (v[1] << RT4831_BLDIMH_SHIFT) + (v[0] & RT4831_BLDIML_MASK) + 1;
+
+ return ret;
+}
+
+static const struct backlight_ops rt4831_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = rt4831_bl_update_status,
+ .get_brightness = rt4831_bl_get_brightness,
+};
+
+static int rt4831_parse_backlight_properties(struct rt4831_priv *priv,
+ struct backlight_properties *bl_props)
+{
+ struct device *dev = priv->dev;
+ u8 propval;
+ u32 brightness;
+ unsigned int val = 0;
+ int ret;
+
+ /* common properties */
+ ret = device_property_read_u32(dev, "max-brightness", &brightness);
+ if (ret)
+ brightness = RT4831_BLMAX_BRIGHTNESS;
+
+ bl_props->max_brightness = min_t(u32, brightness, RT4831_BLMAX_BRIGHTNESS);
+
+ ret = device_property_read_u32(dev, "default-brightness", &brightness);
+ if (ret)
+ brightness = bl_props->max_brightness;
+
+ bl_props->brightness = min_t(u32, brightness, bl_props->max_brightness);
+
+ /* vendor properties */
+ if (device_property_read_bool(dev, "richtek,pwm-enable"))
+ val = RT4831_BLPWMEN_MASK;
+
+ ret = regmap_update_bits(priv->regmap, RT4831_REG_BLCFG, RT4831_BLPWMEN_MASK, val);
+ if (ret)
+ return ret;
+
+ ret = device_property_read_u8(dev, "richtek,bled-ovp-sel", &propval);
+ if (ret)
+ propval = RT4831_BLOVPLVL_21V;
+
+ propval = min_t(u8, propval, RT4831_BLOVPLVL_29V);
+ ret = regmap_update_bits(priv->regmap, RT4831_REG_BLCFG, RT4831_BLOVP_MASK,
+ propval << RT4831_BLOVP_SHIFT);
+ if (ret)
+ return ret;
+
+ ret = device_property_read_u8(dev, "richtek,channel-use", &propval);
+ if (ret) {
+ dev_err(dev, "richtek,channel-use DT property missing\n");
+ return ret;
+ }
+
+ if (!(propval & RT4831_BLCH_MASK)) {
+ dev_err(dev, "No channel specified\n");
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(priv->regmap, RT4831_REG_ENABLE, RT4831_BLCH_MASK, propval);
+}
+
+static int rt4831_bl_probe(struct platform_device *pdev)
+{
+ struct rt4831_priv *priv;
+ struct backlight_properties bl_props = { .type = BACKLIGHT_RAW,
+ .scale = BACKLIGHT_SCALE_LINEAR };
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+
+ priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!priv->regmap) {
+ dev_err(&pdev->dev, "Failed to init regmap\n");
+ return -ENODEV;
+ }
+
+ ret = rt4831_parse_backlight_properties(priv, &bl_props);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to parse backlight properties\n");
+ return ret;
+ }
+
+ priv->bl = devm_backlight_device_register(&pdev->dev, pdev->name, &pdev->dev, priv,
+ &rt4831_bl_ops, &bl_props);
+ if (IS_ERR(priv->bl)) {
+ dev_err(&pdev->dev, "Failed to register backlight\n");
+ return PTR_ERR(priv->bl);
+ }
+
+ backlight_update_status(priv->bl);
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int rt4831_bl_remove(struct platform_device *pdev)
+{
+ struct rt4831_priv *priv = platform_get_drvdata(pdev);
+ struct backlight_device *bl_dev = priv->bl;
+
+ bl_dev->props.brightness = 0;
+ backlight_update_status(priv->bl);
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused rt4831_bl_of_match[] = {
+ { .compatible = "richtek,rt4831-backlight", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt4831_bl_of_match);
+
+static struct platform_driver rt4831_bl_driver = {
+ .driver = {
+ .name = "rt4831-backlight",
+ .of_match_table = rt4831_bl_of_match,
+ },
+ .probe = rt4831_bl_probe,
+ .remove = rt4831_bl_remove,
+};
+module_platform_driver(rt4831_bl_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index ee33b8ec62bb..840d9813b0bc 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -9,7 +9,7 @@ config VGA_CONSOLE
bool "VGA text console" if EXPERT || !X86
depends on !4xx && !PPC_8xx && !SPARC && !M68K && !PARISC && !SUPERH && \
(!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
- !ARM64 && !ARC && !MICROBLAZE && !OPENRISC && !NDS32 && !S390
+ !ARM64 && !ARC && !MICROBLAZE && !OPENRISC && !NDS32 && !S390 && !UML
default y
help
Saying Y here will allow you to use Linux in text mode through a
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 5e1ad0c92bfc..d33c5cd684c0 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2209,7 +2209,6 @@ config FB_SIMPLE
config FB_SSD1307
tristate "Solomon SSD1307 framebuffer support"
depends on FB && I2C
- depends on OF
depends on GPIOLIB || COMPILE_TEST
select FB_SYS_FOPS
select FB_SYS_FILLRECT
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 98f193078c05..1c855145711b 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -970,13 +970,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
fb_var_to_videomode(&mode2, &info->var);
/* make sure we don't delete the videomode of current var */
ret = fb_mode_is_equal(&mode1, &mode2);
-
- if (!ret)
- fbcon_mode_deleted(info, &mode1);
-
- if (!ret)
- fb_delete_videomode(&mode1, &info->modelist);
-
+ if (!ret) {
+ ret = fbcon_mode_deleted(info, &mode1);
+ if (!ret)
+ fb_delete_videomode(&mode1, &info->modelist);
+ }
return ret ? -EINVAL : 0;
}
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index a7e6eea2c4a1..23999df52739 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -52,6 +52,7 @@
#include <linux/completion.h>
#include <linux/fb.h>
#include <linux/pci.h>
+#include <linux/panic_notifier.h>
#include <linux/efi.h>
#include <linux/console.h>
diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c
index ffbf900648d9..438e2c78142f 100644
--- a/drivers/video/fbdev/xilinxfb.c
+++ b/drivers/video/fbdev/xilinxfb.c
@@ -241,6 +241,8 @@ xilinx_fb_blank(int blank_mode, struct fb_info *fbi)
case FB_BLANK_POWERDOWN:
/* turn off panel */
xilinx_fb_out32(drvdata, REG_CTRL, 0);
+ break;
+
default:
break;
}
diff --git a/drivers/virt/acrn/vm.c b/drivers/virt/acrn/vm.c
index 0d002a355a93..fbc9f1042000 100644
--- a/drivers/virt/acrn/vm.c
+++ b/drivers/virt/acrn/vm.c
@@ -64,6 +64,14 @@ int acrn_vm_destroy(struct acrn_vm *vm)
test_and_set_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags))
return 0;
+ ret = hcall_destroy_vm(vm->vmid);
+ if (ret < 0) {
+ dev_err(acrn_dev.this_device,
+ "Failed to destroy VM %u\n", vm->vmid);
+ clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags);
+ return ret;
+ }
+
/* Remove from global VM list */
write_lock_bh(&acrn_vm_list_lock);
list_del_init(&vm->list);
@@ -78,14 +86,6 @@ int acrn_vm_destroy(struct acrn_vm *vm)
vm->monitor_page = NULL;
}
- ret = hcall_destroy_vm(vm->vmid);
- if (ret < 0) {
- dev_err(acrn_dev.this_device,
- "Failed to destroy VM %u\n", vm->vmid);
- clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags);
- return ret;
- }
-
acrn_vm_all_ram_unmap(vm);
dev_dbg(acrn_dev.this_device, "VM %u destroyed.\n", vm->vmid);
diff --git a/drivers/virt/nitro_enclaves/ne_pci_dev.c b/drivers/virt/nitro_enclaves/ne_pci_dev.c
index b9c1de41e300..143207e9b969 100644
--- a/drivers/virt/nitro_enclaves/ne_pci_dev.c
+++ b/drivers/virt/nitro_enclaves/ne_pci_dev.c
@@ -480,6 +480,8 @@ static int ne_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto free_ne_pci_dev;
}
+ pci_set_master(pdev);
+
rc = pci_request_regions_exclusive(pdev, "nitro_enclaves");
if (rc < 0) {
dev_err(&pdev->dev, "Error in pci request regions [rc=%d]\n", rc);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 4b15c00c0a0a..49984d2cba24 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -355,6 +355,7 @@ int register_virtio_device(struct virtio_device *dev)
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
INIT_LIST_HEAD(&dev->vqs);
+ spin_lock_init(&dev->vqs_list_lock);
/*
* device_add() causes the bus infrastructure to look for a matching
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 10ec60d81e84..09ed55de07d7 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -75,10 +75,14 @@ enum virtio_mem_sbm_mb_state {
VIRTIO_MEM_SBM_MB_OFFLINE,
/* Partially plugged, fully added to Linux, offline. */
VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
- /* Fully plugged, fully added to Linux, online. */
- VIRTIO_MEM_SBM_MB_ONLINE,
- /* Partially plugged, fully added to Linux, online. */
- VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL,
+ /* Fully plugged, fully added to Linux, onlined to a kernel zone. */
+ VIRTIO_MEM_SBM_MB_KERNEL,
+ /* Partially plugged, fully added to Linux, online to a kernel zone */
+ VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
+ /* Fully plugged, fully added to Linux, onlined to ZONE_MOVABLE. */
+ VIRTIO_MEM_SBM_MB_MOVABLE,
+ /* Partially plugged, fully added to Linux, onlined to ZONE_MOVABLE. */
+ VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
VIRTIO_MEM_SBM_MB_COUNT
};
@@ -699,18 +703,6 @@ static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id)
}
/*
- * See virtio_mem_remove_memory(): Try to remove all Linux memory blocks covered
- * by the big block.
- */
-static int virtio_mem_bbm_remove_bb(struct virtio_mem *vm, unsigned long bb_id)
-{
- const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
- const uint64_t size = vm->bbm.bb_size;
-
- return virtio_mem_remove_memory(vm, addr, size);
-}
-
-/*
* Try offlining and removing memory from Linux.
*
* Must not be called with the vm->hotplug_mutex held (possible deadlock with
@@ -832,11 +824,13 @@ static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm,
unsigned long mb_id)
{
switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
- case VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL:
+ case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
+ case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
break;
- case VIRTIO_MEM_SBM_MB_ONLINE:
+ case VIRTIO_MEM_SBM_MB_KERNEL:
+ case VIRTIO_MEM_SBM_MB_MOVABLE:
virtio_mem_sbm_set_mb_state(vm, mb_id,
VIRTIO_MEM_SBM_MB_OFFLINE);
break;
@@ -847,21 +841,29 @@ static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm,
}
static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
- unsigned long mb_id)
+ unsigned long mb_id,
+ unsigned long start_pfn)
{
+ const bool is_movable = page_zonenum(pfn_to_page(start_pfn)) ==
+ ZONE_MOVABLE;
+ int new_state;
+
switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
- virtio_mem_sbm_set_mb_state(vm, mb_id,
- VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL);
+ new_state = VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL;
+ if (is_movable)
+ new_state = VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL;
break;
case VIRTIO_MEM_SBM_MB_OFFLINE:
- virtio_mem_sbm_set_mb_state(vm, mb_id,
- VIRTIO_MEM_SBM_MB_ONLINE);
+ new_state = VIRTIO_MEM_SBM_MB_KERNEL;
+ if (is_movable)
+ new_state = VIRTIO_MEM_SBM_MB_MOVABLE;
break;
default:
BUG();
break;
}
+ virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
}
static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm,
@@ -1015,7 +1017,7 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
break;
case MEM_ONLINE:
if (vm->in_sbm)
- virtio_mem_sbm_notify_online(vm, id);
+ virtio_mem_sbm_notify_online(vm, id, mhp->start_pfn);
atomic64_sub(size, &vm->offline_size);
/*
@@ -1065,6 +1067,7 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
static void virtio_mem_set_fake_offline(unsigned long pfn,
unsigned long nr_pages, bool onlined)
{
+ page_offline_begin();
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
@@ -1075,6 +1078,7 @@ static void virtio_mem_set_fake_offline(unsigned long pfn,
ClearPageReserved(page);
}
}
+ page_offline_end();
}
/*
@@ -1135,7 +1139,7 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
*/
static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
{
- const bool is_movable = zone_idx(page_zone(pfn_to_page(pfn))) ==
+ const bool is_movable = page_zonenum(pfn_to_page(pfn)) ==
ZONE_MOVABLE;
int rc, retry_count;
@@ -1453,8 +1457,8 @@ static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id)
*
* Note: can fail after some subblocks were unplugged.
*/
-static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm,
- unsigned long mb_id, uint64_t *nb_sb)
+static int virtio_mem_sbm_unplug_any_sb_raw(struct virtio_mem *vm,
+ unsigned long mb_id, uint64_t *nb_sb)
{
int sb_id, count;
int rc;
@@ -1496,7 +1500,7 @@ static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id)
{
uint64_t nb_sb = vm->sbm.sbs_per_mb;
- return virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb);
+ return virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, &nb_sb);
}
/*
@@ -1583,9 +1587,9 @@ static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm,
* Note: Can fail after some subblocks were successfully plugged.
*/
static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
- unsigned long mb_id, uint64_t *nb_sb,
- bool online)
+ unsigned long mb_id, uint64_t *nb_sb)
{
+ const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
unsigned long pfn, nr_pages;
int sb_id, count;
int rc;
@@ -1607,7 +1611,7 @@ static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
if (rc)
return rc;
*nb_sb -= count;
- if (!online)
+ if (old_state == VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL)
continue;
/* fake-online the pages if the memory block is online */
@@ -1617,23 +1621,22 @@ static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
virtio_mem_fake_online(pfn, nr_pages);
}
- if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
- if (online)
- virtio_mem_sbm_set_mb_state(vm, mb_id,
- VIRTIO_MEM_SBM_MB_ONLINE);
- else
- virtio_mem_sbm_set_mb_state(vm, mb_id,
- VIRTIO_MEM_SBM_MB_OFFLINE);
- }
+ if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
+ virtio_mem_sbm_set_mb_state(vm, mb_id, old_state - 1);
return 0;
}
static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
{
+ const int mb_states[] = {
+ VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
+ VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
+ };
uint64_t nb_sb = diff / vm->sbm.sb_size;
unsigned long mb_id;
- int rc;
+ int rc, i;
if (!nb_sb)
return 0;
@@ -1641,22 +1644,13 @@ static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
/* Don't race with onlining/offlining */
mutex_lock(&vm->hotplug_mutex);
- /* Try to plug subblocks of partially plugged online blocks. */
- virtio_mem_sbm_for_each_mb(vm, mb_id,
- VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL) {
- rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb, true);
- if (rc || !nb_sb)
- goto out_unlock;
- cond_resched();
- }
-
- /* Try to plug subblocks of partially plugged offline blocks. */
- virtio_mem_sbm_for_each_mb(vm, mb_id,
- VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
- rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb, false);
- if (rc || !nb_sb)
- goto out_unlock;
- cond_resched();
+ for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
+ virtio_mem_sbm_for_each_mb(vm, mb_id, mb_states[i]) {
+ rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb);
+ if (rc || !nb_sb)
+ goto out_unlock;
+ cond_resched();
+ }
}
/*
@@ -1817,7 +1811,7 @@ static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm,
{
int rc;
- rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, nb_sb);
+ rc = virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, nb_sb);
/* some subblocks might have been unplugged even on failure */
if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
@@ -1854,6 +1848,7 @@ static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
int count)
{
const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count;
+ const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
unsigned long start_pfn;
int rc;
@@ -1872,8 +1867,17 @@ static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
return rc;
}
- virtio_mem_sbm_set_mb_state(vm, mb_id,
- VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL);
+ switch (old_state) {
+ case VIRTIO_MEM_SBM_MB_KERNEL:
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL);
+ break;
+ case VIRTIO_MEM_SBM_MB_MOVABLE:
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL);
+ break;
+ }
+
return 0;
}
@@ -1940,11 +1944,50 @@ unplugged:
return 0;
}
+/*
+ * Unplug the desired number of plugged subblocks of a memory block that is
+ * already added to Linux. Will skip subblock of online memory blocks that are
+ * busy (by the OS). Will fail if any subblock that's not busy cannot get
+ * unplugged.
+ *
+ * Will modify the state of the memory block. Might temporarily drop the
+ * hotplug_mutex.
+ *
+ * Note: Can fail after some subblocks were successfully unplugged. Can
+ * return 0 even if subblocks were busy and could not get unplugged.
+ */
+static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm,
+ unsigned long mb_id,
+ uint64_t *nb_sb)
+{
+ const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
+
+ switch (old_state) {
+ case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
+ case VIRTIO_MEM_SBM_MB_KERNEL:
+ case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
+ case VIRTIO_MEM_SBM_MB_MOVABLE:
+ return virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, nb_sb);
+ case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
+ case VIRTIO_MEM_SBM_MB_OFFLINE:
+ return virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, nb_sb);
+ }
+ return -EINVAL;
+}
+
static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
{
+ const int mb_states[] = {
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
+ VIRTIO_MEM_SBM_MB_OFFLINE,
+ VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
+ VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
+ VIRTIO_MEM_SBM_MB_MOVABLE,
+ VIRTIO_MEM_SBM_MB_KERNEL,
+ };
uint64_t nb_sb = diff / vm->sbm.sb_size;
unsigned long mb_id;
- int rc;
+ int rc, i;
if (!nb_sb)
return 0;
@@ -1956,47 +1999,26 @@ static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
*/
mutex_lock(&vm->hotplug_mutex);
- /* Try to unplug subblocks of partially plugged offline blocks. */
- virtio_mem_sbm_for_each_mb_rev(vm, mb_id,
- VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
- rc = virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, &nb_sb);
- if (rc || !nb_sb)
- goto out_unlock;
- cond_resched();
- }
-
- /* Try to unplug subblocks of plugged offline blocks. */
- virtio_mem_sbm_for_each_mb_rev(vm, mb_id, VIRTIO_MEM_SBM_MB_OFFLINE) {
- rc = virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, &nb_sb);
- if (rc || !nb_sb)
- goto out_unlock;
- cond_resched();
- }
-
- if (!unplug_online) {
- mutex_unlock(&vm->hotplug_mutex);
- return 0;
- }
-
- /* Try to unplug subblocks of partially plugged online blocks. */
- virtio_mem_sbm_for_each_mb_rev(vm, mb_id,
- VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL) {
- rc = virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, &nb_sb);
- if (rc || !nb_sb)
- goto out_unlock;
- mutex_unlock(&vm->hotplug_mutex);
- cond_resched();
- mutex_lock(&vm->hotplug_mutex);
- }
-
- /* Try to unplug subblocks of plugged online blocks. */
- virtio_mem_sbm_for_each_mb_rev(vm, mb_id, VIRTIO_MEM_SBM_MB_ONLINE) {
- rc = virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, &nb_sb);
- if (rc || !nb_sb)
- goto out_unlock;
- mutex_unlock(&vm->hotplug_mutex);
- cond_resched();
- mutex_lock(&vm->hotplug_mutex);
+ /*
+ * We try unplug from partially plugged blocks first, to try removing
+ * whole memory blocks along with metadata. We prioritize ZONE_MOVABLE
+ * as it's more reliable to unplug memory and remove whole memory
+ * blocks, and we don't want to trigger a zone imbalances by
+ * accidentially removing too much kernel memory.
+ */
+ for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
+ virtio_mem_sbm_for_each_mb_rev(vm, mb_id, mb_states[i]) {
+ rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb);
+ if (rc || !nb_sb)
+ goto out_unlock;
+ mutex_unlock(&vm->hotplug_mutex);
+ cond_resched();
+ mutex_lock(&vm->hotplug_mutex);
+ }
+ if (!unplug_online && i == 1) {
+ mutex_unlock(&vm->hotplug_mutex);
+ return 0;
+ }
}
mutex_unlock(&vm->hotplug_mutex);
@@ -2083,47 +2105,41 @@ rollback_safe_unplug:
}
/*
- * Try to remove a big block from Linux and unplug it. Will fail with
- * -EBUSY if some memory is online.
- *
- * Will modify the state of the memory block.
+ * Test if a big block is completely offline.
*/
-static int virtio_mem_bbm_remove_and_unplug_bb(struct virtio_mem *vm,
- unsigned long bb_id)
+static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm,
+ unsigned long bb_id)
{
- int rc;
-
- if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
- VIRTIO_MEM_BBM_BB_ADDED))
- return -EINVAL;
+ const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
+ const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
+ unsigned long pfn;
- rc = virtio_mem_bbm_remove_bb(vm, bb_id);
- if (rc)
- return -EBUSY;
+ for (pfn = start_pfn; pfn < start_pfn + nr_pages;
+ pfn += PAGES_PER_SECTION) {
+ if (pfn_to_online_page(pfn))
+ return false;
+ }
- rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
- if (rc)
- virtio_mem_bbm_set_bb_state(vm, bb_id,
- VIRTIO_MEM_BBM_BB_PLUGGED);
- else
- virtio_mem_bbm_set_bb_state(vm, bb_id,
- VIRTIO_MEM_BBM_BB_UNUSED);
- return rc;
+ return true;
}
/*
- * Test if a big block is completely offline.
+ * Test if a big block is completely onlined to ZONE_MOVABLE (or offline).
*/
-static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm,
+static bool virtio_mem_bbm_bb_is_movable(struct virtio_mem *vm,
unsigned long bb_id)
{
const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
+ struct page *page;
unsigned long pfn;
for (pfn = start_pfn; pfn < start_pfn + nr_pages;
pfn += PAGES_PER_SECTION) {
- if (pfn_to_online_page(pfn))
+ page = pfn_to_online_page(pfn);
+ if (!page)
+ continue;
+ if (page_zonenum(page) != ZONE_MOVABLE)
return false;
}
@@ -2134,42 +2150,37 @@ static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
{
uint64_t nb_bb = diff / vm->bbm.bb_size;
uint64_t bb_id;
- int rc;
+ int rc, i;
if (!nb_bb)
return 0;
- /* Try to unplug completely offline big blocks first. */
- virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
- cond_resched();
- /*
- * As we're holding no locks, this check is racy as memory
- * can get onlined in the meantime - but we'll fail gracefully.
- */
- if (!virtio_mem_bbm_bb_is_offline(vm, bb_id))
- continue;
- rc = virtio_mem_bbm_remove_and_unplug_bb(vm, bb_id);
- if (rc == -EBUSY)
- continue;
- if (!rc)
- nb_bb--;
- if (rc || !nb_bb)
- return rc;
- }
-
- if (!unplug_online)
- return 0;
+ /*
+ * Try to unplug big blocks. Similar to SBM, start with offline
+ * big blocks.
+ */
+ for (i = 0; i < 3; i++) {
+ virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
+ cond_resched();
- /* Try to unplug any big blocks. */
- virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
- cond_resched();
- rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id);
- if (rc == -EBUSY)
- continue;
- if (!rc)
- nb_bb--;
- if (rc || !nb_bb)
- return rc;
+ /*
+ * As we're holding no locks, these checks are racy,
+ * but we don't care.
+ */
+ if (i == 0 && !virtio_mem_bbm_bb_is_offline(vm, bb_id))
+ continue;
+ if (i == 1 && !virtio_mem_bbm_bb_is_movable(vm, bb_id))
+ continue;
+ rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id);
+ if (rc == -EBUSY)
+ continue;
+ if (!rc)
+ nb_bb--;
+ if (rc || !nb_bb)
+ return rc;
+ }
+ if (i == 0 && !unplug_online)
+ return 0;
}
return nb_bb ? -EBUSY : 0;
@@ -2420,6 +2431,10 @@ static int virtio_mem_init(struct virtio_mem *vm)
dev_warn(&vm->vdev->dev,
"Some device memory is not addressable/pluggable. This can make some memory unusable.\n");
+ /* Prepare the offline threshold - make sure we can add two blocks. */
+ vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
+ VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
+
/*
* We want subblocks to span at least MAX_ORDER_NR_PAGES and
* pageblock_nr_pages pages. This:
@@ -2466,14 +2481,11 @@ static int virtio_mem_init(struct virtio_mem *vm)
vm->bbm.bb_size - 1;
vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
vm->bbm.next_bb_id = vm->bbm.first_bb_id;
- }
- /* Prepare the offline threshold - make sure we can add two blocks. */
- vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
- VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
- /* In BBM, we also want at least two big blocks. */
- vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
- vm->offline_threshold);
+ /* Make sure we can add two big blocks. */
+ vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
+ vm->offline_threshold);
+ }
dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 222d630c41fc..b35bb2d57f62 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -576,6 +576,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
struct device *dev = get_device(&vp_dev->vdev.dev);
+ /*
+ * Device is marked broken on surprise removal so that virtio upper
+ * layers can abort any ongoing operation.
+ */
+ if (!pci_device_is_present(pci_dev))
+ virtio_break_device(&vp_dev->vdev);
+
pci_disable_sriov(pci_dev);
unregister_virtio_device(&vp_dev->vdev);
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index 54f297028586..e11ed748e661 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -384,6 +384,27 @@ u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
EXPORT_SYMBOL_GPL(vp_modern_get_features);
/*
+ * vp_modern_get_driver_features - get driver features from device
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the driver features read from the device
+ */
+u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ u64 features;
+
+ vp_iowrite32(0, &cfg->guest_feature_select);
+ features = vp_ioread32(&cfg->guest_feature);
+ vp_iowrite32(1, &cfg->guest_feature_select);
+ features |= ((u64)vp_ioread32(&cfg->guest_feature) << 32);
+
+ return features;
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_driver_features);
+
+/*
* vp_modern_set_features - set features to device
* @mdev: the modern virtio-pci device
* @features: the features set to device
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 71e16b53e9c1..dd95dfd85e98 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
#include <xen/xen.h>
#ifdef DEBUG
@@ -74,14 +75,14 @@ struct vring_desc_state_packed {
void *data; /* Data for callback. */
struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
u16 num; /* Descriptor list length. */
- u16 next; /* The next desc state in a list. */
u16 last; /* The last desc state in a list. */
};
-struct vring_desc_extra_packed {
+struct vring_desc_extra {
dma_addr_t addr; /* Buffer DMA addr. */
u32 len; /* Buffer length. */
u16 flags; /* Descriptor flags. */
+ u16 next; /* The next desc state in a list. */
};
struct vring_virtqueue {
@@ -113,6 +114,9 @@ struct vring_virtqueue {
/* Last used index we've seen. */
u16 last_used_idx;
+ /* Hint for event idx: already triggered no need to disable. */
+ bool event_triggered;
+
union {
/* Available for split ring */
struct {
@@ -130,6 +134,7 @@ struct vring_virtqueue {
/* Per-descriptor state. */
struct vring_desc_state_split *desc_state;
+ struct vring_desc_extra *desc_extra;
/* DMA address and size information */
dma_addr_t queue_dma_addr;
@@ -166,7 +171,7 @@ struct vring_virtqueue {
/* Per-descriptor state. */
struct vring_desc_state_packed *desc_state;
- struct vring_desc_extra_packed *desc_extra;
+ struct vring_desc_extra *desc_extra;
/* DMA address and size information */
dma_addr_t ring_dma_addr;
@@ -364,8 +369,8 @@ static int vring_mapping_error(const struct vring_virtqueue *vq,
* Split ring specific functions - *_split().
*/
-static void vring_unmap_one_split(const struct vring_virtqueue *vq,
- struct vring_desc *desc)
+static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
+ struct vring_desc *desc)
{
u16 flags;
@@ -389,6 +394,35 @@ static void vring_unmap_one_split(const struct vring_virtqueue *vq,
}
}
+static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
+ unsigned int i)
+{
+ struct vring_desc_extra *extra = vq->split.desc_extra;
+ u16 flags;
+
+ if (!vq->use_dma_api)
+ goto out;
+
+ flags = extra[i].flags;
+
+ if (flags & VRING_DESC_F_INDIRECT) {
+ dma_unmap_single(vring_dma_dev(vq),
+ extra[i].addr,
+ extra[i].len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(vring_dma_dev(vq),
+ extra[i].addr,
+ extra[i].len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+
+out:
+ return extra[i].next;
+}
+
static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
unsigned int total_sg,
gfp_t gfp)
@@ -412,6 +446,35 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
return desc;
}
+static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
+ struct vring_desc *desc,
+ unsigned int i,
+ dma_addr_t addr,
+ unsigned int len,
+ u16 flags,
+ bool indirect)
+{
+ struct vring_virtqueue *vring = to_vvq(vq);
+ struct vring_desc_extra *extra = vring->split.desc_extra;
+ u16 next;
+
+ desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
+ desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
+ desc[i].len = cpu_to_virtio32(vq->vdev, len);
+
+ if (!indirect) {
+ next = extra[i].next;
+ desc[i].next = cpu_to_virtio16(vq->vdev, next);
+
+ extra[i].addr = addr;
+ extra[i].len = len;
+ extra[i].flags = flags;
+ } else
+ next = virtio16_to_cpu(vq->vdev, desc[i].next);
+
+ return next;
+}
+
static inline int virtqueue_add_split(struct virtqueue *_vq,
struct scatterlist *sgs[],
unsigned int total_sg,
@@ -484,11 +547,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
if (vring_mapping_error(vq, addr))
goto unmap_release;
- desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
- desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
- desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
prev = i;
- i = virtio16_to_cpu(_vq->vdev, desc[i].next);
+ /* Note that we trust indirect descriptor
+ * table since it use stream DMA mapping.
+ */
+ i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
+ VRING_DESC_F_NEXT,
+ indirect);
}
}
for (; n < (out_sgs + in_sgs); n++) {
@@ -497,15 +562,22 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
if (vring_mapping_error(vq, addr))
goto unmap_release;
- desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
- desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
- desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
prev = i;
- i = virtio16_to_cpu(_vq->vdev, desc[i].next);
+ /* Note that we trust indirect descriptor
+ * table since it use stream DMA mapping.
+ */
+ i = virtqueue_add_desc_split(_vq, desc, i, addr,
+ sg->length,
+ VRING_DESC_F_NEXT |
+ VRING_DESC_F_WRITE,
+ indirect);
}
}
/* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
+ if (!indirect && vq->use_dma_api)
+ vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags =
+ ~VRING_DESC_F_NEXT;
if (indirect) {
/* Now that the indirect table is filled in, map it. */
@@ -515,13 +587,11 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
if (vring_mapping_error(vq, addr))
goto unmap_release;
- vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
- VRING_DESC_F_INDIRECT);
- vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
- addr);
-
- vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
- total_sg * sizeof(struct vring_desc));
+ virtqueue_add_desc_split(_vq, vq->split.vring.desc,
+ head, addr,
+ total_sg * sizeof(struct vring_desc),
+ VRING_DESC_F_INDIRECT,
+ false);
}
/* We're using some buffers from the free list. */
@@ -529,8 +599,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
/* Update free pointer */
if (indirect)
- vq->free_head = virtio16_to_cpu(_vq->vdev,
- vq->split.vring.desc[head].next);
+ vq->free_head = vq->split.desc_extra[head].next;
else
vq->free_head = i;
@@ -575,8 +644,11 @@ unmap_release:
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
- vring_unmap_one_split(vq, &desc[i]);
- i = virtio16_to_cpu(_vq->vdev, desc[i].next);
+ if (indirect) {
+ vring_unmap_one_split_indirect(vq, &desc[i]);
+ i = virtio16_to_cpu(_vq->vdev, desc[i].next);
+ } else
+ i = vring_unmap_one_split(vq, i);
}
if (indirect)
@@ -630,14 +702,13 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
i = head;
while (vq->split.vring.desc[i].flags & nextflag) {
- vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
- i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
+ vring_unmap_one_split(vq, i);
+ i = vq->split.desc_extra[i].next;
vq->vq.num_free++;
}
- vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
- vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
- vq->free_head);
+ vring_unmap_one_split(vq, i);
+ vq->split.desc_extra[i].next = vq->free_head;
vq->free_head = head;
/* Plus final descriptor */
@@ -652,15 +723,14 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
if (!indir_desc)
return;
- len = virtio32_to_cpu(vq->vq.vdev,
- vq->split.vring.desc[head].len);
+ len = vq->split.desc_extra[head].len;
- BUG_ON(!(vq->split.vring.desc[head].flags &
- cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
+ BUG_ON(!(vq->split.desc_extra[head].flags &
+ VRING_DESC_F_INDIRECT));
BUG_ON(len == 0 || len % sizeof(struct vring_desc));
for (j = 0; j < len / sizeof(struct vring_desc); j++)
- vring_unmap_one_split(vq, &indir_desc[j]);
+ vring_unmap_one_split_indirect(vq, &indir_desc[j]);
kfree(indir_desc);
vq->split.desc_state[head].indir_desc = NULL;
@@ -739,7 +809,10 @@ static void virtqueue_disable_cb_split(struct virtqueue *_vq)
if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- if (!vq->event)
+ if (vq->event)
+ /* TODO: this is a hack. Figure out a cleaner value to write. */
+ vring_used_event(&vq->split.vring) = 0x0;
+ else
vq->split.vring.avail->flags =
cpu_to_virtio16(_vq->vdev,
vq->split.avail_flags_shadow);
@@ -912,7 +985,7 @@ static struct virtqueue *vring_create_virtqueue_split(
*/
static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
- struct vring_desc_extra_packed *state)
+ struct vring_desc_extra *state)
{
u16 flags;
@@ -1061,7 +1134,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
1 << VRING_PACKED_DESC_F_USED;
}
vq->packed.next_avail_idx = n;
- vq->free_head = vq->packed.desc_state[id].next;
+ vq->free_head = vq->packed.desc_extra[id].next;
/* Store token and indirect buffer state. */
vq->packed.desc_state[id].num = 1;
@@ -1169,7 +1242,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
le16_to_cpu(flags);
}
prev = curr;
- curr = vq->packed.desc_state[curr].next;
+ curr = vq->packed.desc_extra[curr].next;
if ((unlikely(++i >= vq->packed.vring.num))) {
i = 0;
@@ -1213,13 +1286,16 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
unmap_release:
err_idx = i;
i = head;
+ curr = vq->free_head;
vq->packed.avail_used_flags = avail_used_flags;
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
- vring_unmap_desc_packed(vq, &desc[i]);
+ vring_unmap_state_packed(vq,
+ &vq->packed.desc_extra[curr]);
+ curr = vq->packed.desc_extra[curr].next;
i++;
if (i >= vq->packed.vring.num)
i = 0;
@@ -1290,7 +1366,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
/* Clear data ptr. */
state->data = NULL;
- vq->packed.desc_state[state->last].next = vq->free_head;
+ vq->packed.desc_extra[state->last].next = vq->free_head;
vq->free_head = id;
vq->vq.num_free += state->num;
@@ -1299,7 +1375,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
for (i = 0; i < state->num; i++) {
vring_unmap_state_packed(vq,
&vq->packed.desc_extra[curr]);
- curr = vq->packed.desc_state[curr].next;
+ curr = vq->packed.desc_extra[curr].next;
}
}
@@ -1550,6 +1626,25 @@ static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
return NULL;
}
+static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
+ unsigned int num)
+{
+ struct vring_desc_extra *desc_extra;
+ unsigned int i;
+
+ desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
+ GFP_KERNEL);
+ if (!desc_extra)
+ return NULL;
+
+ memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
+
+ for (i = 0; i < num - 1; i++)
+ desc_extra[i].next = i + 1;
+
+ return desc_extra;
+}
+
static struct virtqueue *vring_create_virtqueue_packed(
unsigned int index,
unsigned int num,
@@ -1567,7 +1662,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
struct vring_packed_desc_event *driver, *device;
dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
size_t ring_size_in_bytes, event_size_in_bytes;
- unsigned int i;
ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
@@ -1605,6 +1699,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->weak_barriers = weak_barriers;
vq->broken = false;
vq->last_used_idx = 0;
+ vq->event_triggered = false;
vq->num_added = 0;
vq->packed_ring = true;
vq->use_dma_api = vring_use_dma_api(vdev);
@@ -1649,18 +1744,11 @@ static struct virtqueue *vring_create_virtqueue_packed(
/* Put everything in free lists. */
vq->free_head = 0;
- for (i = 0; i < num-1; i++)
- vq->packed.desc_state[i].next = i + 1;
- vq->packed.desc_extra = kmalloc_array(num,
- sizeof(struct vring_desc_extra_packed),
- GFP_KERNEL);
+ vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
if (!vq->packed.desc_extra)
goto err_desc_extra;
- memset(vq->packed.desc_extra, 0,
- num * sizeof(struct vring_desc_extra_packed));
-
/* No callback? Tell other side not to bother us. */
if (!callback) {
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
@@ -1668,7 +1756,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
cpu_to_le16(vq->packed.event_flags_shadow);
}
+ spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
+ spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
err_desc_extra:
@@ -1875,7 +1965,7 @@ bool virtqueue_kick(struct virtqueue *vq)
EXPORT_SYMBOL_GPL(virtqueue_kick);
/**
- * virtqueue_get_buf - get the next used buffer
+ * virtqueue_get_buf_ctx - get the next used buffer
* @_vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer
* @ctx: extra context for the token
@@ -1919,6 +2009,12 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ /* If device triggered an event already it won't trigger one again:
+ * no need to disable.
+ */
+ if (vq->event_triggered)
+ return;
+
if (vq->packed_ring)
virtqueue_disable_cb_packed(_vq);
else
@@ -1942,6 +2038,9 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ if (vq->event_triggered)
+ vq->event_triggered = false;
+
return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
virtqueue_enable_cb_prepare_split(_vq);
}
@@ -2005,6 +2104,9 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ if (vq->event_triggered)
+ vq->event_triggered = false;
+
return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
virtqueue_enable_cb_delayed_split(_vq);
}
@@ -2044,6 +2146,10 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
if (unlikely(vq->broken))
return IRQ_HANDLED;
+ /* Just a hint for performance: so it's ok that this can be racy! */
+ if (vq->event)
+ vq->event_triggered = true;
+
pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
if (vq->vq.callback)
vq->vq.callback(&vq->vq);
@@ -2062,7 +2168,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *),
const char *name)
{
- unsigned int i;
struct vring_virtqueue *vq;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
@@ -2083,6 +2188,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->weak_barriers = weak_barriers;
vq->broken = false;
vq->last_used_idx = 0;
+ vq->event_triggered = false;
vq->num_added = 0;
vq->use_dma_api = vring_use_dma_api(vdev);
#ifdef DEBUG
@@ -2114,20 +2220,28 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->split.desc_state = kmalloc_array(vring.num,
sizeof(struct vring_desc_state_split), GFP_KERNEL);
- if (!vq->split.desc_state) {
- kfree(vq);
- return NULL;
- }
+ if (!vq->split.desc_state)
+ goto err_state;
+
+ vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
+ if (!vq->split.desc_extra)
+ goto err_extra;
/* Put everything in free lists. */
vq->free_head = 0;
- for (i = 0; i < vring.num-1; i++)
- vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
memset(vq->split.desc_state, 0, vring.num *
sizeof(struct vring_desc_state_split));
+ spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
+ spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
+
+err_extra:
+ kfree(vq->split.desc_state);
+err_state:
+ kfree(vq);
+ return NULL;
}
EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
@@ -2182,6 +2296,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ spin_lock(&vq->vq.vdev->vqs_list_lock);
+ list_del(&_vq->list);
+ spin_unlock(&vq->vq.vdev->vqs_list_lock);
+
if (vq->we_own_ring) {
if (vq->packed_ring) {
vring_free_queue(vq->vq.vdev,
@@ -2208,9 +2326,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
vq->split.queue_dma_addr);
}
}
- if (!vq->packed_ring)
+ if (!vq->packed_ring) {
kfree(vq->split.desc_state);
- list_del(&_vq->list);
+ kfree(vq->split.desc_extra);
+ }
kfree(vq);
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -2262,7 +2381,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->broken;
+ return READ_ONCE(vq->broken);
}
EXPORT_SYMBOL_GPL(virtqueue_is_broken);
@@ -2274,10 +2393,14 @@ void virtio_break_device(struct virtio_device *dev)
{
struct virtqueue *_vq;
+ spin_lock(&dev->vqs_list_lock);
list_for_each_entry(_vq, &dev->vqs, list) {
struct vring_virtqueue *vq = to_vvq(_vq);
- vq->broken = true;
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, true);
}
+ spin_unlock(&dev->vqs_list_lock);
}
EXPORT_SYMBOL_GPL(virtio_break_device);
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index e28acf482e0c..72eaef2caeb1 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -142,6 +142,8 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
struct vdpa_callback cb;
struct virtqueue *vq;
u64 desc_addr, driver_addr, device_addr;
+ /* Assume split virtqueue, switch to packed if necessary */
+ struct vdpa_vq_state state = {0};
unsigned long flags;
u32 align, num;
int err;
@@ -149,6 +151,9 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (!name)
return NULL;
+ if (index >= vdpa->nvqs)
+ return ERR_PTR(-ENOENT);
+
/* Queue shouldn't already be set up. */
if (ops->get_vq_ready(vdpa, index))
return ERR_PTR(-ENOENT);
@@ -191,6 +196,19 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
goto err_vq;
}
+ /* reset virtqueue state index */
+ if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+ struct vdpa_vq_state_packed *s = &state.packed;
+
+ s->last_avail_counter = 1;
+ s->last_avail_idx = 0;
+ s->last_used_counter = 1;
+ s->last_used_idx = 0;
+ }
+ err = ops->set_vq_state(vdpa, index, &state);
+ if (err)
+ goto err_vq;
+
ops->set_vq_ready(vdpa, index, 1);
vq->priv = info;
diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
index cb1eb7e05f87..5668cad86e37 100644
--- a/drivers/visorbus/visorchipset.c
+++ b/drivers/visorbus/visorchipset.c
@@ -1561,7 +1561,7 @@ schedule_out:
static int visorchipset_init(struct acpi_device *acpi_device)
{
- int err = -ENODEV;
+ int err = -ENOMEM;
struct visorchannel *controlvm_channel;
chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
@@ -1584,8 +1584,10 @@ static int visorchipset_init(struct acpi_device *acpi_device)
"controlvm",
sizeof(struct visor_controlvm_channel),
VISOR_CONTROLVM_CHANNEL_VERSIONID,
- VISOR_CHANNEL_SIGNATURE))
+ VISOR_CHANNEL_SIGNATURE)) {
+ err = -ENODEV;
goto error_delete_groups;
+ }
/* if booting in a crash kernel */
if (is_kdump_kernel())
INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index b471779c3e2c..6c962e88501c 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* ds2482.c - provides i2c to w1-master bridge(s)
* Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
*
@@ -19,7 +19,7 @@
#include <linux/w1.h>
-/**
+/*
* Allow the active pullup to be disabled, default is enabled.
*
* Note from the DS2482 datasheet:
@@ -39,7 +39,7 @@ static int extra_config;
module_param(extra_config, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(extra_config, "Extra Configuration settings 1=APU,2=PPM,3=SPU,8=1WS");
-/**
+/*
* The DS2482 registers - there are 3 registers that are addressed by a read
* pointer. The read pointer is set by the last command executed.
*
@@ -62,7 +62,7 @@ MODULE_PARM_DESC(extra_config, "Extra Configuration settings 1=APU,2=PPM,3=SPU,8
#define DS2482_PTR_CODE_CHANNEL 0xD2 /* DS2482-800 only */
#define DS2482_PTR_CODE_CONFIG 0xC3
-/**
+/*
* Configure Register bit definitions
* The top 4 bits always read 0.
* To write, the top nibble must be the 1's compl. of the low nibble.
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(extra_config, "Extra Configuration settings 1=APU,2=PPM,3=SPU,8
#define DS2482_REG_CFG_APU 0x01 /* active pull-up */
-/**
+/*
* Write and verify codes for the CHANNEL_SELECT command (DS2482-800 only).
* To set the channel, write the value at the index of the channel.
* Read and compare against the corresponding value to verify the change.
@@ -84,7 +84,7 @@ static const u8 ds2482_chan_rd[8] =
{ 0xB8, 0xB1, 0xAA, 0xA3, 0x9C, 0x95, 0x8E, 0x87 };
-/**
+/*
* Status Register bit definitions (read only)
*/
#define DS2482_REG_STS_DIR 0x80
@@ -124,9 +124,9 @@ struct ds2482_data {
/**
- * Helper to calculate values for configuration register
- * @param conf the raw config value
- * @return the value w/ complements that can be written to register
+ * ds2482_calculate_config - Helper to calculate values for configuration register
+ * @conf: the raw config value
+ * Return: the value w/ complements that can be written to register
*/
static inline u8 ds2482_calculate_config(u8 conf)
{
@@ -140,10 +140,10 @@ static inline u8 ds2482_calculate_config(u8 conf)
/**
- * Sets the read pointer.
- * @param pdev The ds2482 client pointer
- * @param read_ptr see DS2482_PTR_CODE_xxx above
- * @return -1 on failure, 0 on success
+ * ds2482_select_register - Sets the read pointer.
+ * @pdev: The ds2482 client pointer
+ * @read_ptr: see DS2482_PTR_CODE_xxx above
+ * Return: -1 on failure, 0 on success
*/
static inline int ds2482_select_register(struct ds2482_data *pdev, u8 read_ptr)
{
@@ -159,12 +159,12 @@ static inline int ds2482_select_register(struct ds2482_data *pdev, u8 read_ptr)
}
/**
- * Sends a command without a parameter
- * @param pdev The ds2482 client pointer
- * @param cmd DS2482_CMD_RESET,
+ * ds2482_send_cmd - Sends a command without a parameter
+ * @pdev: The ds2482 client pointer
+ * @cmd: DS2482_CMD_RESET,
* DS2482_CMD_1WIRE_RESET,
* DS2482_CMD_1WIRE_READ_BYTE
- * @return -1 on failure, 0 on success
+ * Return: -1 on failure, 0 on success
*/
static inline int ds2482_send_cmd(struct ds2482_data *pdev, u8 cmd)
{
@@ -176,14 +176,14 @@ static inline int ds2482_send_cmd(struct ds2482_data *pdev, u8 cmd)
}
/**
- * Sends a command with a parameter
- * @param pdev The ds2482 client pointer
- * @param cmd DS2482_CMD_WRITE_CONFIG,
+ * ds2482_send_cmd_data - Sends a command with a parameter
+ * @pdev: The ds2482 client pointer
+ * @cmd: DS2482_CMD_WRITE_CONFIG,
* DS2482_CMD_1WIRE_SINGLE_BIT,
* DS2482_CMD_1WIRE_WRITE_BYTE,
* DS2482_CMD_1WIRE_TRIPLET
- * @param byte The data to send
- * @return -1 on failure, 0 on success
+ * @byte: The data to send
+ * Return: -1 on failure, 0 on success
*/
static inline int ds2482_send_cmd_data(struct ds2482_data *pdev,
u8 cmd, u8 byte)
@@ -205,10 +205,10 @@ static inline int ds2482_send_cmd_data(struct ds2482_data *pdev,
#define DS2482_WAIT_IDLE_TIMEOUT 100
/**
- * Waits until the 1-wire interface is idle (not busy)
+ * ds2482_wait_1wire_idle - Waits until the 1-wire interface is idle (not busy)
*
- * @param pdev Pointer to the device structure
- * @return the last value read from status or -1 (failure)
+ * @pdev: Pointer to the device structure
+ * Return: the last value read from status or -1 (failure)
*/
static int ds2482_wait_1wire_idle(struct ds2482_data *pdev)
{
@@ -230,12 +230,12 @@ static int ds2482_wait_1wire_idle(struct ds2482_data *pdev)
}
/**
- * Selects a w1 channel.
+ * ds2482_set_channel - Selects a w1 channel.
* The 1-wire interface must be idle before calling this function.
*
- * @param pdev The ds2482 client pointer
- * @param channel 0-7
- * @return -1 (failure) or 0 (success)
+ * @pdev: The ds2482 client pointer
+ * @channel: 0-7
+ * Return: -1 (failure) or 0 (success)
*/
static int ds2482_set_channel(struct ds2482_data *pdev, u8 channel)
{
@@ -254,11 +254,11 @@ static int ds2482_set_channel(struct ds2482_data *pdev, u8 channel)
/**
- * Performs the touch-bit function, which writes a 0 or 1 and reads the level.
+ * ds2482_w1_touch_bit - Performs the touch-bit function, which writes a 0 or 1 and reads the level.
*
- * @param data The ds2482 channel pointer
- * @param bit The level to write: 0 or non-zero
- * @return The level read: 0 or 1
+ * @data: The ds2482 channel pointer
+ * @bit: The level to write: 0 or non-zero
+ * Return: The level read: 0 or 1
*/
static u8 ds2482_w1_touch_bit(void *data, u8 bit)
{
@@ -284,13 +284,13 @@ static u8 ds2482_w1_touch_bit(void *data, u8 bit)
}
/**
- * Performs the triplet function, which reads two bits and writes a bit.
+ * ds2482_w1_triplet - Performs the triplet function, which reads two bits and writes a bit.
* The bit written is determined by the two reads:
* 00 => dbit, 01 => 0, 10 => 1
*
- * @param data The ds2482 channel pointer
- * @param dbit The direction to choose if both branches are valid
- * @return b0=read1 b1=read2 b3=bit written
+ * @data: The ds2482 channel pointer
+ * @dbit: The direction to choose if both branches are valid
+ * Return: b0=read1 b1=read2 b3=bit written
*/
static u8 ds2482_w1_triplet(void *data, u8 dbit)
{
@@ -317,10 +317,10 @@ static u8 ds2482_w1_triplet(void *data, u8 dbit)
}
/**
- * Performs the write byte function.
+ * ds2482_w1_write_byte - Performs the write byte function.
*
- * @param data The ds2482 channel pointer
- * @param byte The value to write
+ * @data: The ds2482 channel pointer
+ * @byte: The value to write
*/
static void ds2482_w1_write_byte(void *data, u8 byte)
{
@@ -341,10 +341,10 @@ static void ds2482_w1_write_byte(void *data, u8 byte)
}
/**
- * Performs the read byte function.
+ * ds2482_w1_read_byte - Performs the read byte function.
*
- * @param data The ds2482 channel pointer
- * @return The value read
+ * @data: The ds2482 channel pointer
+ * Return: The value read
*/
static u8 ds2482_w1_read_byte(void *data)
{
@@ -378,10 +378,10 @@ static u8 ds2482_w1_read_byte(void *data)
/**
- * Sends a reset on the 1-wire interface
+ * ds2482_w1_reset_bus - Sends a reset on the 1-wire interface
*
- * @param data The ds2482 channel pointer
- * @return 0=Device present, 1=No device present or error
+ * @data: The ds2482 channel pointer
+ * Return: 0=Device present, 1=No device present or error
*/
static u8 ds2482_w1_reset_bus(void *data)
{
@@ -541,7 +541,7 @@ static int ds2482_remove(struct i2c_client *client)
return 0;
}
-/**
+/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id ds2482_id[] = {
diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
index 5cfb0ae23e91..ca64f99c8f3d 100644
--- a/drivers/w1/slaves/w1_ds2438.c
+++ b/drivers/w1/slaves/w1_ds2438.c
@@ -49,6 +49,15 @@
#define DS2438_CURRENT_MSB 0x06
#define DS2438_THRESHOLD 0x07
+/* Page #1 definitions */
+#define DS2438_ETM_0 0x00
+#define DS2438_ETM_1 0x01
+#define DS2438_ETM_2 0x02
+#define DS2438_ETM_3 0x03
+#define DS2438_ICA 0x04
+#define DS2438_OFFSET_LSB 0x05
+#define DS2438_OFFSET_MSB 0x06
+
static int w1_ds2438_get_page(struct w1_slave *sl, int pageno, u8 *buf)
{
unsigned int retries = W1_DS2438_RETRIES;
@@ -62,13 +71,13 @@ static int w1_ds2438_get_page(struct w1_slave *sl, int pageno, u8 *buf)
if (w1_reset_select_slave(sl))
continue;
w1_buf[0] = W1_DS2438_RECALL_MEMORY;
- w1_buf[1] = 0x00;
+ w1_buf[1] = (u8)pageno;
w1_write_block(sl->master, w1_buf, 2);
if (w1_reset_select_slave(sl))
continue;
w1_buf[0] = W1_DS2438_READ_SCRATCH;
- w1_buf[1] = 0x00;
+ w1_buf[1] = (u8)pageno;
w1_write_block(sl->master, w1_buf, 2);
count = w1_read_block(sl->master, buf, DS2438_PAGE_SIZE + 1);
@@ -154,11 +163,11 @@ static int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value)
if ((status & mask) == value)
return 0; /* already set as requested */
- else {
- /* changing bit */
- status ^= mask;
- perform_write = 1;
- }
+
+ /* changing bit */
+ status ^= mask;
+ perform_write = 1;
+
break;
}
@@ -184,6 +193,34 @@ static int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value)
return -1;
}
+static int w1_ds2438_change_offset_register(struct w1_slave *sl, u8 *value)
+{
+ unsigned int retries = W1_DS2438_RETRIES;
+ u8 w1_buf[9];
+ u8 w1_page1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
+
+ if (w1_ds2438_get_page(sl, 1, w1_page1_buf) == 0) {
+ memcpy(&w1_buf[2], w1_page1_buf, DS2438_PAGE_SIZE - 1); /* last register reserved */
+ w1_buf[7] = value[0]; /* change only offset register */
+ w1_buf[8] = value[1];
+ while (retries--) {
+ if (w1_reset_select_slave(sl))
+ continue;
+ w1_buf[0] = W1_DS2438_WRITE_SCRATCH;
+ w1_buf[1] = 0x01; /* write to page 1 */
+ w1_write_block(sl->master, w1_buf, 9);
+
+ if (w1_reset_select_slave(sl))
+ continue;
+ w1_buf[0] = W1_DS2438_COPY_SCRATCH;
+ w1_buf[1] = 0x01;
+ w1_write_block(sl->master, w1_buf, 2);
+ return 0;
+ }
+ }
+ return -1;
+}
+
static int w1_ds2438_get_voltage(struct w1_slave *sl,
int adc_input, uint16_t *voltage)
{
@@ -287,9 +324,9 @@ static ssize_t iad_read(struct file *filp, struct kobject *kobj,
if (!buf)
return -EINVAL;
- if (w1_ds2438_get_current(sl, &voltage) == 0) {
+ if (w1_ds2438_get_current(sl, &voltage) == 0)
ret = snprintf(buf, count, "%i\n", voltage);
- } else
+ else
ret = -EIO;
return ret;
@@ -325,6 +362,55 @@ static ssize_t page0_read(struct file *filp, struct kobject *kobj,
return ret;
}
+static ssize_t page1_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int ret;
+ u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
+
+ if (off != 0)
+ return 0;
+ if (!buf)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Read no more than page1 size */
+ if (count > DS2438_PAGE_SIZE)
+ count = DS2438_PAGE_SIZE;
+
+ if (w1_ds2438_get_page(sl, 1, w1_buf) == 0) {
+ memcpy(buf, &w1_buf, count);
+ ret = count;
+ } else
+ ret = -EIO;
+
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return ret;
+}
+
+static ssize_t offset_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int ret;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ if (w1_ds2438_change_offset_register(sl, buf) == 0)
+ ret = count;
+ else
+ ret = -EIO;
+
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return ret;
+}
+
static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
@@ -338,9 +424,9 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
if (!buf)
return -EINVAL;
- if (w1_ds2438_get_temperature(sl, &temp) == 0) {
+ if (w1_ds2438_get_temperature(sl, &temp) == 0)
ret = snprintf(buf, count, "%i\n", temp);
- } else
+ else
ret = -EIO;
return ret;
@@ -359,9 +445,9 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj,
if (!buf)
return -EINVAL;
- if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VAD, &voltage) == 0) {
+ if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VAD, &voltage) == 0)
ret = snprintf(buf, count, "%u\n", voltage);
- } else
+ else
ret = -EIO;
return ret;
@@ -380,16 +466,18 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
if (!buf)
return -EINVAL;
- if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VDD, &voltage) == 0) {
+ if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VDD, &voltage) == 0)
ret = snprintf(buf, count, "%u\n", voltage);
- } else
+ else
ret = -EIO;
return ret;
}
-static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, iad_read, iad_write, 0);
+static BIN_ATTR_RW(iad, 0);
static BIN_ATTR_RO(page0, DS2438_PAGE_SIZE);
+static BIN_ATTR_RO(page1, DS2438_PAGE_SIZE);
+static BIN_ATTR_WO(offset, 2);
static BIN_ATTR_RO(temperature, 0/* real length varies */);
static BIN_ATTR_RO(vad, 0/* real length varies */);
static BIN_ATTR_RO(vdd, 0/* real length varies */);
@@ -397,6 +485,8 @@ static BIN_ATTR_RO(vdd, 0/* real length varies */);
static struct bin_attribute *w1_ds2438_bin_attrs[] = {
&bin_attr_iad,
&bin_attr_page0,
+ &bin_attr_page1,
+ &bin_attr_offset,
&bin_attr_temperature,
&bin_attr_vad,
&bin_attr_vdd,
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 9d08a1c9c445..ca70c5f03206 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -834,7 +834,7 @@ static int check_family_data(struct w1_slave *sl)
}
/**
- * support_bulk_read() - check if slave support bulk read
+ * bulk_read_support() - check if slave support bulk read
* @sl: device to check the ability
*
* Return: true if bulk read is supported, false if not or error
@@ -2056,7 +2056,6 @@ static ssize_t w1_seq_show(struct device *device,
{
struct w1_slave *sl = dev_to_w1_slave(device);
ssize_t c = PAGE_SIZE;
- int rv;
int i;
u8 ack;
u64 rn;
@@ -2084,7 +2083,7 @@ static ssize_t w1_seq_show(struct device *device,
goto error;
w1_write_8(sl->master, W1_42_COND_READ);
- rv = w1_read_block(sl->master, (u8 *)&rn, 8);
+ w1_read_block(sl->master, (u8 *)&rn, 8);
reg_num = (struct w1_reg_num *) &rn;
if (reg_num->family == W1_42_FINISHED_BYTE)
break;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 355100dad60a..546dfc1e2349 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -22,9 +22,9 @@ menuconfig WATCHDOG
The watchdog is usually used together with the watchdog daemon
which is available from
- <ftp://ibiblio.org/pub/Linux/system/daemons/watchdog/>. This daemon can
- also monitor NFS connections and can reboot the machine when the process
- table is full.
+ <https://ibiblio.org/pub/Linux/system/daemons/watchdog/>. This daemon
+ can also monitor NFS connections and can reboot the machine when the
+ process table is full.
If unsure, say N.
@@ -73,6 +73,14 @@ config WATCHDOG_SYSFS
Say Y here if you want to enable watchdog device status read through
sysfs attributes.
+config WATCHDOG_HRTIMER_PRETIMEOUT
+ bool "Enable watchdog hrtimer-based pretimeouts"
+ help
+ Enable this if you want to use a hrtimer timer based pretimeout for
+ watchdogs that do not natively support pretimeout support. Be aware
+ that because this pretimeout functionality uses hrtimers, it may not
+ be able to fire before the actual watchdog fires in some situations.
+
comment "Watchdog Pretimeout Governors"
config WATCHDOG_PRETIMEOUT_GOV
@@ -302,7 +310,7 @@ config XILINX_WATCHDOG
depends on HAS_IOMEM
select WATCHDOG_CORE
help
- Watchdog driver for the xps_timebase_wdt ip core.
+ Watchdog driver for the xps_timebase_wdt IP core.
To compile this driver as a module, choose M here: the
module will be called of_xilinx_wdt.
@@ -404,8 +412,8 @@ config ASM9260_WATCHDOG
select WATCHDOG_CORE
select RESET_CONTROLLER
help
- Watchdog timer embedded into Alphascale asm9260 chips. This will reboot your
- system when the timeout is reached.
+ Watchdog timer embedded into Alphascale asm9260 chips. This will
+ reboot your system when the timeout is reached.
config AT91RM9200_WATCHDOG
tristate "AT91RM9200 watchdog"
@@ -548,8 +556,9 @@ config OMAP_WATCHDOG
depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || COMPILE_TEST
select WATCHDOG_CORE
help
- Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog. Say 'Y'
- here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
+ Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog.
+ Say 'Y' here to enable the
+ OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
config PNX4008_WATCHDOG
tristate "LPC32XX Watchdog"
@@ -980,6 +989,18 @@ config VISCONTI_WATCHDOG
Say Y here to include support for the watchdog timer in Toshiba
Visconti SoCs.
+config MSC313E_WATCHDOG
+ tristate "MStar MSC313e watchdog"
+ depends on ARCH_MSTARV7 || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the Watchdog timer embedded
+ into MStar MSC313e chips. This will reboot your system when the
+ timeout is reached.
+
+ To compile this driver as a module, choose M here: the
+ module will be called msc313e_wdt.
+
# X86 (i386 + ia64 + x86_64) Architecture
config ACQUIRE_WDT
@@ -1096,13 +1117,16 @@ config SBC_FITPC2_WATCHDOG
This is the driver for the built-in watchdog timer on the fit-PC2,
fit-PC2i, CM-iAM single-board computers made by Compulab.
- It`s possible to enable watchdog timer either from BIOS (F2) or from booted Linux.
- When "Watchdog Timer Value" enabled one can set 31-255 s operational range.
+ It's possible to enable the watchdog timer either from BIOS (F2) or
+ from booted Linux.
+ When the "Watchdog Timer Value" is enabled one can set 31-255 seconds
+ operational range.
- Entering BIOS setup temporary disables watchdog operation regardless to current state,
- so system will not be restarted while user in BIOS setup.
+ Entering BIOS setup temporarily disables watchdog operation regardless
+ of current state, so system will not be restarted while user is in
+ BIOS setup.
- Once watchdog was enabled the system will be restarted every
+ Once the watchdog is enabled the system will be restarted every
"Watchdog Timer Value" period, so to prevent it user can restart or
disable the watchdog.
@@ -1124,11 +1148,12 @@ config IB700_WDT
depends on X86
help
This is the driver for the hardware watchdog on the IB700 Single
- Board Computer produced by TMC Technology (www.tmc-uk.com). This watchdog
- simply watches your kernel to make sure it doesn't freeze, and if
- it does, it reboots your computer after a certain amount of time.
+ Board Computer produced by TMC Technology (www.tmc-uk.com). This
+ watchdog simply watches your kernel to make sure it doesn't freeze,
+ and if it does, it reboots your computer after a certain amount of time.
- This driver is like the WDT501 driver but for slightly different hardware.
+ This driver is like the WDT501 driver but for slightly different
+ hardware.
To compile this driver as a module, choose M here: the
module will be called ib700wdt.
@@ -1807,10 +1832,10 @@ config PIC32_DMT
select WATCHDOG_CORE
depends on MACH_PIC32 || (MIPS && COMPILE_TEST)
help
- Watchdog driver for PIC32 instruction fetch counting timer. This specific
- timer is typically be used in misson critical and safety critical
- applications, where any single failure of the software functionality
- and sequencing must be detected.
+ Watchdog driver for PIC32 instruction fetch counting timer. This
+ specific timer is typically be used in mission critical and safety
+ critical applications, where any single failure of the software
+ functionality and sequencing must be detected.
To compile this driver as a loadable module, choose M here.
The module will be called pic32-dmt.
@@ -1844,10 +1869,6 @@ config 8xxx_WDT
For BookE processors (MPC85xx) use the BOOKE_WDT driver instead.
-config MV64X60_WDT
- tristate "MV64X60 (Marvell Discovery) Watchdog Timer"
- depends on MV64X60 || COMPILE_TEST
-
config PIKA_WDT
tristate "PIKA FPGA Watchdog"
depends on WARP || (PPC64 && COMPILE_TEST)
@@ -2013,8 +2034,8 @@ config PCWATCHDOG
This card simply watches your kernel to make sure it doesn't freeze,
and if it does, it reboots your computer after a certain amount of
time. This driver is like the WDT501 driver but for different
- hardware. Please read <file:Documentation/watchdog/pcwd-watchdog.rst>. The PC
- watchdog cards can be ordered from <http://www.berkprod.com/>.
+ hardware. Please read <file:Documentation/watchdog/pcwd-watchdog.rst>.
+ The PC watchdog cards can be ordered from <http://www.berkprod.com/>.
To compile this driver as a module, choose M here: the
module will be called pcwd.
@@ -2115,7 +2136,7 @@ config KEEMBAY_WATCHDOG
This option enable support for an In-secure watchdog timer driver for
Intel Keem Bay SoC. This WDT has a 32 bit timer and decrements in every
count unit. An interrupt will be triggered, when the count crosses
- the thershold configured in the register.
+ the threshold configured in the register.
To compile this driver as a module, choose M here: the
module will be called keembay_wdt.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index a7eade8b4d45..abaf2ebd814e 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_WATCHDOG_CORE) += watchdog.o
watchdog-objs += watchdog_core.o watchdog_dev.o
watchdog-$(CONFIG_WATCHDOG_PRETIMEOUT_GOV) += watchdog_pretimeout.o
+watchdog-$(CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT) += watchdog_hrtimer_pretimeout.o
obj-$(CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP) += pretimeout_noop.o
obj-$(CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC) += pretimeout_panic.o
@@ -92,6 +93,7 @@ obj-$(CONFIG_SPRD_WATCHDOG) += sprd_wdt.o
obj-$(CONFIG_PM8916_WATCHDOG) += pm8916_wdt.o
obj-$(CONFIG_ARM_SMC_WATCHDOG) += arm_smc_wdt.o
obj-$(CONFIG_VISCONTI_WATCHDOG) += visconti_wdt.o
+obj-$(CONFIG_MSC313E_WATCHDOG) += msc313e_wdt.o
# X86 (i386 + ia64 + x86_64) Architecture
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
@@ -175,7 +177,6 @@ obj-$(CONFIG_PIC32_DMT) += pic32-dmt.o
# POWERPC Architecture
obj-$(CONFIG_GEF_WDT) += gef_wdt.o
obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o
-obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o
obj-$(CONFIG_PIKA_WDT) += pika_wdt.o
obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o
obj-$(CONFIG_MEN_A21_WDT) += mena21_wdt.o
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index 7e00960651fa..436571b6fc79 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -147,7 +147,7 @@ static int aspeed_wdt_set_timeout(struct watchdog_device *wdd,
wdd->timeout = timeout;
- actual = min(timeout, wdd->max_hw_heartbeat_ms * 1000);
+ actual = min(timeout, wdd->max_hw_heartbeat_ms / 1000);
writel(actual * WDT_RATE_1MHZ, wdt->base + WDT_RELOAD_VALUE);
writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART);
@@ -175,8 +175,8 @@ static ssize_t access_cs0_show(struct device *dev,
struct aspeed_wdt *wdt = dev_get_drvdata(dev);
u32 status = readl(wdt->base + WDT_TIMEOUT_STATUS);
- return sprintf(buf, "%u\n",
- !(status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY));
+ return sysfs_emit(buf, "%u\n",
+ !(status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY));
}
static ssize_t access_cs0_store(struct device *dev,
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index 979caa18d3c8..acaaa0005d5b 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -34,6 +34,25 @@ struct bcm7038_watchdog {
static bool nowayout = WATCHDOG_NOWAYOUT;
+static inline void bcm7038_wdt_write(u32 value, void __iomem *addr)
+{
+ /* MIPS chips strapped for BE will automagically configure the
+ * peripheral registers for CPU-native byte order.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(value, addr);
+ else
+ writel_relaxed(value, addr);
+}
+
+static inline u32 bcm7038_wdt_read(void __iomem *addr)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(addr);
+ else
+ return readl_relaxed(addr);
+}
+
static void bcm7038_wdt_set_timeout_reg(struct watchdog_device *wdog)
{
struct bcm7038_watchdog *wdt = watchdog_get_drvdata(wdog);
@@ -41,15 +60,15 @@ static void bcm7038_wdt_set_timeout_reg(struct watchdog_device *wdog)
timeout = wdt->rate * wdog->timeout;
- writel(timeout, wdt->base + WDT_TIMEOUT_REG);
+ bcm7038_wdt_write(timeout, wdt->base + WDT_TIMEOUT_REG);
}
static int bcm7038_wdt_ping(struct watchdog_device *wdog)
{
struct bcm7038_watchdog *wdt = watchdog_get_drvdata(wdog);
- writel(WDT_START_1, wdt->base + WDT_CMD_REG);
- writel(WDT_START_2, wdt->base + WDT_CMD_REG);
+ bcm7038_wdt_write(WDT_START_1, wdt->base + WDT_CMD_REG);
+ bcm7038_wdt_write(WDT_START_2, wdt->base + WDT_CMD_REG);
return 0;
}
@@ -66,8 +85,8 @@ static int bcm7038_wdt_stop(struct watchdog_device *wdog)
{
struct bcm7038_watchdog *wdt = watchdog_get_drvdata(wdog);
- writel(WDT_STOP_1, wdt->base + WDT_CMD_REG);
- writel(WDT_STOP_2, wdt->base + WDT_CMD_REG);
+ bcm7038_wdt_write(WDT_STOP_1, wdt->base + WDT_CMD_REG);
+ bcm7038_wdt_write(WDT_STOP_2, wdt->base + WDT_CMD_REG);
return 0;
}
@@ -88,7 +107,7 @@ static unsigned int bcm7038_wdt_get_timeleft(struct watchdog_device *wdog)
struct bcm7038_watchdog *wdt = watchdog_get_drvdata(wdog);
u32 time_left;
- time_left = readl(wdt->base + WDT_CMD_REG);
+ time_left = bcm7038_wdt_read(wdt->base + WDT_CMD_REG);
return time_left / wdt->rate;
}
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 7817fb976f9c..5e4dc1a0f2c6 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -148,7 +148,7 @@ static void __booke_wdt_enable(void *data)
}
/**
- * booke_wdt_disable - disable the watchdog on the given CPU
+ * __booke_wdt_disable - disable the watchdog on the given CPU
*
* This function is called on each CPU. It disables the watchdog on that CPU.
*
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
index aafc8d98bf9f..4cb10877017c 100644
--- a/drivers/watchdog/diag288_wdt.c
+++ b/drivers/watchdog/diag288_wdt.c
@@ -118,8 +118,6 @@ static int wdt_start(struct watchdog_device *dev)
if (test_and_set_bit(DIAG_WDOG_BUSY, &wdt_status))
return -EBUSY;
- ret = -ENODEV;
-
if (MACHINE_IS_VM) {
ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
if (!ebc_cmd) {
@@ -167,8 +165,6 @@ static int wdt_ping(struct watchdog_device *dev)
int ret;
unsigned int func;
- ret = -ENODEV;
-
if (MACHINE_IS_VM) {
ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
if (!ebc_cmd)
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index 32d0e1781e63..cd578843277e 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -13,22 +13,21 @@
*/
#include <linux/bitops.h>
-#include <linux/limits.h>
-#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/interrupt.h>
#include <linux/of.h>
-#include <linux/pm.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/reset.h>
#include <linux/watchdog.h>
-#include <linux/debugfs.h>
#define WDOG_CONTROL_REG_OFFSET 0x00
#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index 2418ebb707bd..ce682942662c 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -392,7 +392,7 @@ static struct notifier_block eurwdt_notifier = {
};
/**
- * cleanup_module:
+ * eurwdt_exit:
*
* Unload the watchdog. You cannot do this with any file handles open.
* If your watchdog is set to continue ticking on close and you unload
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 22ddba3802ef..a5006a58e0db 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -45,6 +45,7 @@ static unsigned long __iomem *hpwdt_timer_con;
static const struct pci_device_id hpwdt_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) }, /* iLO2 */
{ PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) }, /* iLO3 */
+ { PCI_DEVICE(PCI_VENDOR_ID_HP_3PAR, 0x0389) }, /* PCtrl */
{0}, /* terminate list */
};
MODULE_DEVICE_TABLE(pci, hpwdt_devices);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index bf31d7b67a69..b3f604669e2c 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -71,6 +71,8 @@
#define TCOBASE(p) ((p)->tco_res->start)
/* SMI Control and Enable Register */
#define SMI_EN(p) ((p)->smi_res->start)
+#define TCO_EN (1 << 13)
+#define GBL_SMI_EN (1 << 0)
#define TCO_RLD(p) (TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */
#define TCOv1_TMR(p) (TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/
@@ -355,8 +357,12 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
tmrval = seconds_to_ticks(p, t);
- /* For TCO v1 the timer counts down twice before rebooting */
- if (p->iTCO_version == 1)
+ /*
+ * If TCO SMIs are off, the timer counts down twice before rebooting.
+ * Otherwise, the BIOS generally reboots when the SMI triggers.
+ */
+ if (p->smi_res &&
+ (SMI_EN(p) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
tmrval /= 2;
/* from the specs: */
@@ -479,13 +485,13 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
if (!devm_request_region(dev, p->smi_res->start,
resource_size(p->smi_res),
pdev->name)) {
- pr_err("I/O address 0x%04llx already in use, device disabled\n",
+ dev_err(dev, "I/O address 0x%04llx already in use, device disabled\n",
(u64)SMI_EN(p));
return -EBUSY;
}
} else if (iTCO_vendorsupport ||
turn_SMI_watchdog_clear_off >= p->iTCO_version) {
- pr_err("SMI I/O resource is missing\n");
+ dev_err(dev, "SMI I/O resource is missing\n");
return -ENODEV;
}
@@ -521,7 +527,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
* Disables TCO logic generating an SMI#
*/
val32 = inl(SMI_EN(p));
- val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
+ val32 &= ~TCO_EN; /* Turn off SMI clearing watchdog */
outl(val32, SMI_EN(p));
}
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index b84f80f7d342..cc86018c5eb5 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -65,6 +65,7 @@ struct imx2_wdt_device {
struct regmap *regmap;
struct watchdog_device wdog;
bool ext_reset;
+ bool clk_is_on;
};
static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -160,6 +161,9 @@ static int imx2_wdt_ping(struct watchdog_device *wdog)
{
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+ if (!wdev->clk_is_on)
+ return 0;
+
regmap_write(wdev->regmap, IMX2_WDT_WSR, IMX2_WDT_SEQ1);
regmap_write(wdev->regmap, IMX2_WDT_WSR, IMX2_WDT_SEQ2);
return 0;
@@ -301,6 +305,8 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
if (ret)
return ret;
+ wdev->clk_is_on = true;
+
regmap_read(wdev->regmap, IMX2_WDT_WRSR, &val);
wdog->bootstatus = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0;
@@ -361,6 +367,8 @@ static int __maybe_unused imx2_wdt_suspend(struct device *dev)
clk_disable_unprepare(wdev->clk);
+ wdev->clk_is_on = false;
+
return 0;
}
@@ -375,6 +383,8 @@ static int __maybe_unused imx2_wdt_resume(struct device *dev)
if (ret)
return ret;
+ wdev->clk_is_on = true;
+
if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
/*
* If the watchdog is still active and resumes
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
index e9ee22a7cb45..8ac021748d16 100644
--- a/drivers/watchdog/imx_sc_wdt.c
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -183,16 +183,12 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
watchdog_stop_on_reboot(wdog);
watchdog_stop_on_unregister(wdog);
- ret = devm_watchdog_register_device(dev, wdog);
- if (ret)
- return ret;
-
ret = imx_scu_irq_group_enable(SC_IRQ_GROUP_WDOG,
SC_IRQ_WDOG,
true);
if (ret) {
dev_warn(dev, "Enable irq failed, pretimeout NOT supported\n");
- return 0;
+ goto register_device;
}
imx_sc_wdd->wdt_notifier.notifier_call = imx_sc_wdt_notify;
@@ -203,7 +199,7 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
false);
dev_warn(dev,
"Register irq notifier failed, pretimeout NOT supported\n");
- return 0;
+ goto register_device;
}
ret = devm_add_action_or_reset(dev, imx_sc_wdt_action,
@@ -213,7 +209,8 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
else
dev_warn(dev, "Add action failed, pretimeout NOT supported\n");
- return 0;
+register_device:
+ return devm_watchdog_register_device(dev, wdog);
}
static int __maybe_unused imx_sc_wdt_suspend(struct device *dev)
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index 2b4831842162..bb1122909396 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -152,14 +152,6 @@ static inline int superio_inw(int reg)
return val;
}
-static inline void superio_outw(int val, int reg)
-{
- outb(reg++, REG);
- outb(val >> 8, VAL);
- outb(reg, REG);
- outb(val, VAL);
-}
-
/* Internal function, should be called after superio_select(GPIO) */
static void _wdt_update_timeout(unsigned int t)
{
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index bdf9564efa29..395bde79e292 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -176,9 +176,9 @@ static int jz4740_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(jz4740_wdt, drvdata);
drvdata->map = device_node_to_regmap(dev->parent->of_node);
- if (!drvdata->map) {
+ if (IS_ERR(drvdata->map)) {
dev_err(dev, "regmap not found\n");
- return -EINVAL;
+ return PTR_ERR(drvdata->map);
}
return devm_watchdog_register_device(dev, &drvdata->wdt);
diff --git a/drivers/watchdog/keembay_wdt.c b/drivers/watchdog/keembay_wdt.c
index 547d3fea33ff..2a39114dbc64 100644
--- a/drivers/watchdog/keembay_wdt.c
+++ b/drivers/watchdog/keembay_wdt.c
@@ -23,12 +23,19 @@
#define TIM_WDOG_EN 0x8
#define TIM_SAFE 0xc
-#define WDT_ISR_MASK GENMASK(9, 8)
-#define WDT_ISR_CLEAR 0x8200ff18
+#define WDT_TH_INT_MASK BIT(8)
+#define WDT_TO_INT_MASK BIT(9)
+#define WDT_INT_CLEAR_SMC 0x8200ff18
+
#define WDT_UNLOCK 0xf1d0dead
+#define WDT_DISABLE 0x0
+#define WDT_ENABLE 0x1
+
#define WDT_LOAD_MAX U32_MAX
#define WDT_LOAD_MIN 1
+
#define WDT_TIMEOUT 5
+#define WDT_PRETIMEOUT 4
static unsigned int timeout = WDT_TIMEOUT;
module_param(timeout, int, 0);
@@ -82,8 +89,7 @@ static int keembay_wdt_start(struct watchdog_device *wdog)
{
struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
- keembay_wdt_set_timeout_reg(wdog);
- keembay_wdt_writel(wdt, TIM_WDOG_EN, 1);
+ keembay_wdt_writel(wdt, TIM_WDOG_EN, WDT_ENABLE);
return 0;
}
@@ -92,7 +98,7 @@ static int keembay_wdt_stop(struct watchdog_device *wdog)
{
struct keembay_wdt *wdt = watchdog_get_drvdata(wdog);
- keembay_wdt_writel(wdt, TIM_WDOG_EN, 0);
+ keembay_wdt_writel(wdt, TIM_WDOG_EN, WDT_DISABLE);
return 0;
}
@@ -108,6 +114,7 @@ static int keembay_wdt_set_timeout(struct watchdog_device *wdog, u32 t)
{
wdog->timeout = t;
keembay_wdt_set_timeout_reg(wdog);
+ keembay_wdt_set_pretimeout_reg(wdog);
return 0;
}
@@ -139,9 +146,8 @@ static irqreturn_t keembay_wdt_to_isr(int irq, void *dev_id)
struct keembay_wdt *wdt = dev_id;
struct arm_smccc_res res;
- keembay_wdt_writel(wdt, TIM_WATCHDOG, 1);
- arm_smccc_smc(WDT_ISR_CLEAR, WDT_ISR_MASK, 0, 0, 0, 0, 0, 0, &res);
- dev_crit(wdt->wdd.parent, "Intel Keem Bay non-sec wdt timeout.\n");
+ arm_smccc_smc(WDT_INT_CLEAR_SMC, WDT_TO_INT_MASK, 0, 0, 0, 0, 0, 0, &res);
+ dev_crit(wdt->wdd.parent, "Intel Keem Bay non-secure wdt timeout.\n");
emergency_restart();
return IRQ_HANDLED;
@@ -152,8 +158,10 @@ static irqreturn_t keembay_wdt_th_isr(int irq, void *dev_id)
struct keembay_wdt *wdt = dev_id;
struct arm_smccc_res res;
- arm_smccc_smc(WDT_ISR_CLEAR, WDT_ISR_MASK, 0, 0, 0, 0, 0, 0, &res);
- dev_crit(wdt->wdd.parent, "Intel Keem Bay non-sec wdt pre-timeout.\n");
+ keembay_wdt_set_pretimeout(&wdt->wdd, 0x0);
+
+ arm_smccc_smc(WDT_INT_CLEAR_SMC, WDT_TH_INT_MASK, 0, 0, 0, 0, 0, 0, &res);
+ dev_crit(wdt->wdd.parent, "Intel Keem Bay non-secure wdt pre-timeout.\n");
watchdog_notify_pretimeout(&wdt->wdd);
return IRQ_HANDLED;
@@ -224,11 +232,13 @@ static int keembay_wdt_probe(struct platform_device *pdev)
wdt->wdd.min_timeout = WDT_LOAD_MIN;
wdt->wdd.max_timeout = WDT_LOAD_MAX / wdt->rate;
wdt->wdd.timeout = WDT_TIMEOUT;
+ wdt->wdd.pretimeout = WDT_PRETIMEOUT;
watchdog_set_drvdata(&wdt->wdd, wdt);
watchdog_set_nowayout(&wdt->wdd, nowayout);
watchdog_init_timeout(&wdt->wdd, timeout, dev);
keembay_wdt_set_timeout(&wdt->wdd, wdt->wdd.timeout);
+ keembay_wdt_set_pretimeout(&wdt->wdd, wdt->wdd.pretimeout);
ret = devm_watchdog_register_device(dev, &wdt->wdd);
if (ret)
@@ -271,8 +281,8 @@ static const struct of_device_id keembay_wdt_match[] = {
MODULE_DEVICE_TABLE(of, keembay_wdt_match);
static struct platform_driver keembay_wdt_driver = {
- .probe = keembay_wdt_probe,
- .driver = {
+ .probe = keembay_wdt_probe,
+ .driver = {
.name = "keembay_wdt",
.of_match_table = keembay_wdt_match,
.pm = &keembay_wdt_pm_ops,
diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
index 78cf11c94941..60b6d74f267d 100644
--- a/drivers/watchdog/lpc18xx_wdt.c
+++ b/drivers/watchdog/lpc18xx_wdt.c
@@ -292,7 +292,7 @@ static int lpc18xx_wdt_remove(struct platform_device *pdev)
struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev);
dev_warn(&pdev->dev, "I quit now, hardware will probably reboot!\n");
- del_timer(&lpc18xx_wdt->timer);
+ del_timer_sync(&lpc18xx_wdt->timer);
return 0;
}
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
index e023d7d90d66..c7a7235e6224 100644
--- a/drivers/watchdog/mei_wdt.c
+++ b/drivers/watchdog/mei_wdt.c
@@ -105,7 +105,7 @@ struct mei_wdt {
#endif /* CONFIG_DEBUG_FS */
};
-/*
+/**
* struct mei_mc_hdr - Management Control Command Header
*
* @command: Management Control (0x2)
@@ -121,7 +121,7 @@ struct mei_mc_hdr {
};
/**
- * struct mei_wdt_start_request watchdog start/ping
+ * struct mei_wdt_start_request - watchdog start/ping
*
* @hdr: Management Control Command Header
* @timeout: timeout value
@@ -134,7 +134,7 @@ struct mei_wdt_start_request {
} __packed;
/**
- * struct mei_wdt_start_response watchdog start/ping response
+ * struct mei_wdt_start_response - watchdog start/ping response
*
* @hdr: Management Control Command Header
* @status: operation status
@@ -474,7 +474,7 @@ out:
complete(&wdt->response);
}
-/*
+/**
* mei_wdt_notif - callback for event notification
*
* @cldev: bus device
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index 459f3ae02c91..539feaa1f904 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -162,7 +162,6 @@ static int meson_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_wdt_dev *meson_wdt;
- const struct of_device_id *of_id;
int err;
meson_wdt = devm_kzalloc(dev, sizeof(*meson_wdt), GFP_KERNEL);
@@ -173,12 +172,7 @@ static int meson_wdt_probe(struct platform_device *pdev)
if (IS_ERR(meson_wdt->wdt_base))
return PTR_ERR(meson_wdt->wdt_base);
- of_id = of_match_device(meson_wdt_dt_ids, dev);
- if (!of_id) {
- dev_err(dev, "Unable to initialize WDT data\n");
- return -ENODEV;
- }
- meson_wdt->data = of_id->data;
+ meson_wdt->data = device_get_match_data(dev);
meson_wdt->wdt_dev.parent = dev;
meson_wdt->wdt_dev.info = &meson_wdt_info;
diff --git a/drivers/watchdog/msc313e_wdt.c b/drivers/watchdog/msc313e_wdt.c
new file mode 100644
index 000000000000..0d497aa0fb7d
--- /dev/null
+++ b/drivers/watchdog/msc313e_wdt.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MStar WDT driver
+ *
+ * Copyright (C) 2019 - 2021 Daniel Palmer
+ * Copyright (C) 2021 Romain Perier
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define REG_WDT_CLR 0x0
+#define REG_WDT_MAX_PRD_L 0x10
+#define REG_WDT_MAX_PRD_H 0x14
+
+#define MSC313E_WDT_MIN_TIMEOUT 1
+#define MSC313E_WDT_DEFAULT_TIMEOUT 30
+
+static unsigned int timeout;
+
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
+
+struct msc313e_wdt_priv {
+ void __iomem *base;
+ struct watchdog_device wdev;
+ struct clk *clk;
+};
+
+static int msc313e_wdt_start(struct watchdog_device *wdev)
+{
+ struct msc313e_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ u32 timeout;
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+
+ timeout = wdev->timeout * clk_get_rate(priv->clk);
+ writew(timeout & 0xffff, priv->base + REG_WDT_MAX_PRD_L);
+ writew((timeout >> 16) & 0xffff, priv->base + REG_WDT_MAX_PRD_H);
+ writew(1, priv->base + REG_WDT_CLR);
+ return 0;
+}
+
+static int msc313e_wdt_ping(struct watchdog_device *wdev)
+{
+ struct msc313e_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ writew(1, priv->base + REG_WDT_CLR);
+ return 0;
+}
+
+static int msc313e_wdt_stop(struct watchdog_device *wdev)
+{
+ struct msc313e_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ writew(0, priv->base + REG_WDT_MAX_PRD_L);
+ writew(0, priv->base + REG_WDT_MAX_PRD_H);
+ writew(0, priv->base + REG_WDT_CLR);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static int msc313e_wdt_settimeout(struct watchdog_device *wdev, unsigned int new_time)
+{
+ wdev->timeout = new_time;
+
+ return msc313e_wdt_start(wdev);
+}
+
+static const struct watchdog_info msc313e_wdt_ident = {
+ .identity = "MSC313e watchdog",
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+};
+
+static const struct watchdog_ops msc313e_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = msc313e_wdt_start,
+ .stop = msc313e_wdt_stop,
+ .ping = msc313e_wdt_ping,
+ .set_timeout = msc313e_wdt_settimeout,
+};
+
+static const struct of_device_id msc313e_wdt_of_match[] = {
+ { .compatible = "mstar,msc313e-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, msc313e_wdt_of_match);
+
+static int msc313e_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct msc313e_wdt_priv *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "No input clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ priv->wdev.info = &msc313e_wdt_ident,
+ priv->wdev.ops = &msc313e_wdt_ops,
+ priv->wdev.parent = dev;
+ priv->wdev.min_timeout = MSC313E_WDT_MIN_TIMEOUT;
+ priv->wdev.max_timeout = U32_MAX / clk_get_rate(priv->clk);
+ priv->wdev.timeout = MSC313E_WDT_DEFAULT_TIMEOUT;
+
+ watchdog_set_drvdata(&priv->wdev, priv);
+
+ watchdog_init_timeout(&priv->wdev, timeout, dev);
+ watchdog_stop_on_reboot(&priv->wdev);
+ watchdog_stop_on_unregister(&priv->wdev);
+
+ return devm_watchdog_register_device(dev, &priv->wdev);
+}
+
+static int __maybe_unused msc313e_wdt_suspend(struct device *dev)
+{
+ struct msc313e_wdt_priv *priv = dev_get_drvdata(dev);
+
+ if (watchdog_active(&priv->wdev))
+ msc313e_wdt_stop(&priv->wdev);
+
+ return 0;
+}
+
+static int __maybe_unused msc313e_wdt_resume(struct device *dev)
+{
+ struct msc313e_wdt_priv *priv = dev_get_drvdata(dev);
+
+ if (watchdog_active(&priv->wdev))
+ msc313e_wdt_start(&priv->wdev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(msc313e_wdt_pm_ops, msc313e_wdt_suspend, msc313e_wdt_resume);
+
+static struct platform_driver msc313e_wdt_driver = {
+ .driver = {
+ .name = "msc313e-wdt",
+ .of_match_table = msc313e_wdt_of_match,
+ .pm = &msc313e_wdt_pm_ops,
+ },
+ .probe = msc313e_wdt_probe,
+};
+module_platform_driver(msc313e_wdt_driver);
+
+MODULE_AUTHOR("Daniel Palmer <daniel@thingy.jp>");
+MODULE_DESCRIPTION("Watchdog driver for MStar MSC313e");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 97ca993bd009..16b6aff324a7 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -25,9 +25,10 @@
#include <linux/reset-controller.h>
#include <linux/types.h>
#include <linux/watchdog.h>
+#include <linux/interrupt.h>
#define WDT_MAX_TIMEOUT 31
-#define WDT_MIN_TIMEOUT 1
+#define WDT_MIN_TIMEOUT 2
#define WDT_LENGTH_TIMEOUT(n) ((n) << 5)
#define WDT_LENGTH 0x04
@@ -187,12 +188,19 @@ static int mtk_wdt_set_timeout(struct watchdog_device *wdt_dev,
u32 reg;
wdt_dev->timeout = timeout;
+ /*
+ * In dual mode, irq will be triggered at timeout / 2
+ * the real timeout occurs at timeout
+ */
+ if (wdt_dev->pretimeout)
+ wdt_dev->pretimeout = timeout / 2;
/*
* One bit is the value of 512 ticks
* The clock has 32 KHz
*/
- reg = WDT_LENGTH_TIMEOUT(timeout << 6) | WDT_LENGTH_KEY;
+ reg = WDT_LENGTH_TIMEOUT((timeout - wdt_dev->pretimeout) << 6)
+ | WDT_LENGTH_KEY;
iowrite32(reg, wdt_base + WDT_LENGTH);
mtk_wdt_ping(wdt_dev);
@@ -239,13 +247,48 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev)
return ret;
reg = ioread32(wdt_base + WDT_MODE);
- reg &= ~(WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
+ if (wdt_dev->pretimeout)
+ reg |= (WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
+ else
+ reg &= ~(WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
reg |= (WDT_MODE_EN | WDT_MODE_KEY);
iowrite32(reg, wdt_base + WDT_MODE);
return 0;
}
+static int mtk_wdt_set_pretimeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdd);
+ void __iomem *wdt_base = mtk_wdt->wdt_base;
+ u32 reg = ioread32(wdt_base + WDT_MODE);
+
+ if (timeout && !wdd->pretimeout) {
+ wdd->pretimeout = wdd->timeout / 2;
+ reg |= (WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
+ } else if (!timeout && wdd->pretimeout) {
+ wdd->pretimeout = 0;
+ reg &= ~(WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
+ } else {
+ return 0;
+ }
+
+ reg |= WDT_MODE_KEY;
+ iowrite32(reg, wdt_base + WDT_MODE);
+
+ return mtk_wdt_set_timeout(wdd, wdd->timeout);
+}
+
+static irqreturn_t mtk_wdt_isr(int irq, void *arg)
+{
+ struct watchdog_device *wdd = arg;
+
+ watchdog_notify_pretimeout(wdd);
+
+ return IRQ_HANDLED;
+}
+
static const struct watchdog_info mtk_wdt_info = {
.identity = DRV_NAME,
.options = WDIOF_SETTIMEOUT |
@@ -253,12 +296,21 @@ static const struct watchdog_info mtk_wdt_info = {
WDIOF_MAGICCLOSE,
};
+static const struct watchdog_info mtk_wdt_pt_info = {
+ .identity = DRV_NAME,
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_PRETIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+};
+
static const struct watchdog_ops mtk_wdt_ops = {
.owner = THIS_MODULE,
.start = mtk_wdt_start,
.stop = mtk_wdt_stop,
.ping = mtk_wdt_ping,
.set_timeout = mtk_wdt_set_timeout,
+ .set_pretimeout = mtk_wdt_set_pretimeout,
.restart = mtk_wdt_restart,
};
@@ -267,7 +319,7 @@ static int mtk_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mtk_wdt_dev *mtk_wdt;
const struct mtk_wdt_data *wdt_data;
- int err;
+ int err, irq;
mtk_wdt = devm_kzalloc(dev, sizeof(*mtk_wdt), GFP_KERNEL);
if (!mtk_wdt)
@@ -279,7 +331,22 @@ static int mtk_wdt_probe(struct platform_device *pdev)
if (IS_ERR(mtk_wdt->wdt_base))
return PTR_ERR(mtk_wdt->wdt_base);
- mtk_wdt->wdt_dev.info = &mtk_wdt_info;
+ irq = platform_get_irq(pdev, 0);
+ if (irq > 0) {
+ err = devm_request_irq(&pdev->dev, irq, mtk_wdt_isr, 0, "wdt_bark",
+ &mtk_wdt->wdt_dev);
+ if (err)
+ return err;
+
+ mtk_wdt->wdt_dev.info = &mtk_wdt_pt_info;
+ mtk_wdt->wdt_dev.pretimeout = WDT_MAX_TIMEOUT / 2;
+ } else {
+ if (irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ mtk_wdt->wdt_dev.info = &mtk_wdt_info;
+ }
+
mtk_wdt->wdt_dev.ops = &mtk_wdt_ops;
mtk_wdt->wdt_dev.timeout = WDT_MAX_TIMEOUT;
mtk_wdt->wdt_dev.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT * 1000;
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 8aa1cb4a295f..ea1bbf5ee528 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -41,8 +41,6 @@
#include <linux/uaccess.h>
#include <linux/gpio/consumer.h>
-#include <asm/mach-au1x00/au1000.h>
-
#define MTX1_WDT_INTERVAL (5 * HZ)
static int ticks = 100 * HZ;
diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c
deleted file mode 100644
index 894aa63488d3..000000000000
--- a/drivers/watchdog/mv64x60_wdt.c
+++ /dev/null
@@ -1,324 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * mv64x60_wdt.c - MV64X60 (Marvell Discovery) watchdog userspace interface
- *
- * Author: James Chapman <jchapman@katalix.com>
- *
- * Platform-specific setup code should configure the dog to generate
- * interrupt or reset as required. This code only enables/disables
- * and services the watchdog.
- *
- * Derived from mpc8xx_wdt.c, with the following copyright.
- *
- * 2002 (c) Florian Schirmer <jolt@tuxbox.org>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/watchdog.h>
-#include <linux/platform_device.h>
-#include <linux/mv643xx.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-
-#define MV64x60_WDT_WDC_OFFSET 0
-
-/*
- * The watchdog configuration register contains a pair of 2-bit fields,
- * 1. a reload field, bits 27-26, which triggers a reload of
- * the countdown register, and
- * 2. an enable field, bits 25-24, which toggles between
- * enabling and disabling the watchdog timer.
- * Bit 31 is a read-only field which indicates whether the
- * watchdog timer is currently enabled.
- *
- * The low 24 bits contain the timer reload value.
- */
-#define MV64x60_WDC_ENABLE_SHIFT 24
-#define MV64x60_WDC_SERVICE_SHIFT 26
-#define MV64x60_WDC_ENABLED_SHIFT 31
-
-#define MV64x60_WDC_ENABLED_TRUE 1
-#define MV64x60_WDC_ENABLED_FALSE 0
-
-/* Flags bits */
-#define MV64x60_WDOG_FLAG_OPENED 0
-
-static unsigned long wdt_flags;
-static int wdt_status;
-static void __iomem *mv64x60_wdt_regs;
-static int mv64x60_wdt_timeout;
-static int mv64x60_wdt_count;
-static unsigned int bus_clk;
-static char expect_close;
-static DEFINE_SPINLOCK(mv64x60_wdt_spinlock);
-
-static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout,
- "Watchdog cannot be stopped once started (default="
- __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-static int mv64x60_wdt_toggle_wdc(int enabled_predicate, int field_shift)
-{
- u32 data;
- u32 enabled;
- int ret = 0;
-
- spin_lock(&mv64x60_wdt_spinlock);
- data = readl(mv64x60_wdt_regs + MV64x60_WDT_WDC_OFFSET);
- enabled = (data >> MV64x60_WDC_ENABLED_SHIFT) & 1;
-
- /* only toggle the requested field if enabled state matches predicate */
- if ((enabled ^ enabled_predicate) == 0) {
- /* We write a 1, then a 2 -- to the appropriate field */
- data = (1 << field_shift) | mv64x60_wdt_count;
- writel(data, mv64x60_wdt_regs + MV64x60_WDT_WDC_OFFSET);
-
- data = (2 << field_shift) | mv64x60_wdt_count;
- writel(data, mv64x60_wdt_regs + MV64x60_WDT_WDC_OFFSET);
- ret = 1;
- }
- spin_unlock(&mv64x60_wdt_spinlock);
-
- return ret;
-}
-
-static void mv64x60_wdt_service(void)
-{
- mv64x60_wdt_toggle_wdc(MV64x60_WDC_ENABLED_TRUE,
- MV64x60_WDC_SERVICE_SHIFT);
-}
-
-static void mv64x60_wdt_handler_enable(void)
-{
- if (mv64x60_wdt_toggle_wdc(MV64x60_WDC_ENABLED_FALSE,
- MV64x60_WDC_ENABLE_SHIFT)) {
- mv64x60_wdt_service();
- pr_notice("watchdog activated\n");
- }
-}
-
-static void mv64x60_wdt_handler_disable(void)
-{
- if (mv64x60_wdt_toggle_wdc(MV64x60_WDC_ENABLED_TRUE,
- MV64x60_WDC_ENABLE_SHIFT))
- pr_notice("watchdog deactivated\n");
-}
-
-static void mv64x60_wdt_set_timeout(unsigned int timeout)
-{
- /* maximum bus cycle count is 0xFFFFFFFF */
- if (timeout > 0xFFFFFFFF / bus_clk)
- timeout = 0xFFFFFFFF / bus_clk;
-
- mv64x60_wdt_count = timeout * bus_clk >> 8;
- mv64x60_wdt_timeout = timeout;
-}
-
-static int mv64x60_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(MV64x60_WDOG_FLAG_OPENED, &wdt_flags))
- return -EBUSY;
-
- if (nowayout)
- __module_get(THIS_MODULE);
-
- mv64x60_wdt_handler_enable();
-
- return stream_open(inode, file);
-}
-
-static int mv64x60_wdt_release(struct inode *inode, struct file *file)
-{
- if (expect_close == 42)
- mv64x60_wdt_handler_disable();
- else {
- pr_crit("unexpected close, not stopping timer!\n");
- mv64x60_wdt_service();
- }
- expect_close = 0;
-
- clear_bit(MV64x60_WDOG_FLAG_OPENED, &wdt_flags);
-
- return 0;
-}
-
-static ssize_t mv64x60_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- expect_close = 0;
-
- for (i = 0; i != len; i++) {
- char c;
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- expect_close = 42;
- }
- }
- mv64x60_wdt_service();
- }
-
- return len;
-}
-
-static long mv64x60_wdt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int timeout;
- int options;
- void __user *argp = (void __user *)arg;
- static const struct watchdog_info info = {
- .options = WDIOF_SETTIMEOUT |
- WDIOF_MAGICCLOSE |
- WDIOF_KEEPALIVEPING,
- .firmware_version = 0,
- .identity = "MV64x60 watchdog",
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- if (copy_to_user(argp, &info, sizeof(info)))
- return -EFAULT;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- if (put_user(wdt_status, (int __user *)argp))
- return -EFAULT;
- wdt_status &= ~WDIOF_KEEPALIVEPING;
- break;
-
- case WDIOC_GETTEMP:
- return -EOPNOTSUPP;
-
- case WDIOC_SETOPTIONS:
- if (get_user(options, (int __user *)argp))
- return -EFAULT;
-
- if (options & WDIOS_DISABLECARD)
- mv64x60_wdt_handler_disable();
-
- if (options & WDIOS_ENABLECARD)
- mv64x60_wdt_handler_enable();
- break;
-
- case WDIOC_KEEPALIVE:
- mv64x60_wdt_service();
- wdt_status |= WDIOF_KEEPALIVEPING;
- break;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(timeout, (int __user *)argp))
- return -EFAULT;
- mv64x60_wdt_set_timeout(timeout);
- fallthrough;
-
- case WDIOC_GETTIMEOUT:
- if (put_user(mv64x60_wdt_timeout, (int __user *)argp))
- return -EFAULT;
- break;
-
- default:
- return -ENOTTY;
- }
-
- return 0;
-}
-
-static const struct file_operations mv64x60_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = mv64x60_wdt_write,
- .unlocked_ioctl = mv64x60_wdt_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
- .open = mv64x60_wdt_open,
- .release = mv64x60_wdt_release,
-};
-
-static struct miscdevice mv64x60_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &mv64x60_wdt_fops,
-};
-
-static int mv64x60_wdt_probe(struct platform_device *dev)
-{
- struct mv64x60_wdt_pdata *pdata = dev_get_platdata(&dev->dev);
- struct resource *r;
- int timeout = 10;
-
- bus_clk = 133; /* in MHz */
- if (pdata) {
- timeout = pdata->timeout;
- bus_clk = pdata->bus_clk;
- }
-
- /* Since bus_clk is truncated MHz, actual frequency could be
- * up to 1MHz higher. Round up, since it's better to time out
- * too late than too soon.
- */
- bus_clk++;
- bus_clk *= 1000000; /* convert to Hz */
-
- r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
-
- mv64x60_wdt_regs = devm_ioremap(&dev->dev, r->start, resource_size(r));
- if (mv64x60_wdt_regs == NULL)
- return -ENOMEM;
-
- mv64x60_wdt_set_timeout(timeout);
-
- mv64x60_wdt_handler_disable(); /* in case timer was already running */
-
- return misc_register(&mv64x60_wdt_miscdev);
-}
-
-static int mv64x60_wdt_remove(struct platform_device *dev)
-{
- misc_deregister(&mv64x60_wdt_miscdev);
-
- mv64x60_wdt_handler_disable();
-
- return 0;
-}
-
-static struct platform_driver mv64x60_wdt_driver = {
- .probe = mv64x60_wdt_probe,
- .remove = mv64x60_wdt_remove,
- .driver = {
- .name = MV64x60_WDT_NAME,
- },
-};
-
-static int __init mv64x60_wdt_init(void)
-{
- pr_info("MV64x60 watchdog driver\n");
-
- return platform_driver_register(&mv64x60_wdt_driver);
-}
-
-static void __exit mv64x60_wdt_exit(void)
-{
- platform_driver_unregister(&mv64x60_wdt_driver);
-}
-
-module_init(mv64x60_wdt_init);
-module_exit(mv64x60_wdt_exit);
-
-MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
-MODULE_DESCRIPTION("MV64x60 watchdog driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" MV64x60_WDT_NAME);
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 391c774a1f67..0fe71f7e66d5 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -120,7 +120,7 @@ static int cpu2core(int cpu)
}
/**
- * Poke the watchdog when an interrupt is received
+ * octeon_wdt_poke_irq - Poke the watchdog when an interrupt is received
*
* @cpl:
* @dev_id:
@@ -154,7 +154,7 @@ static irqreturn_t octeon_wdt_poke_irq(int cpl, void *dev_id)
extern int prom_putchar(char c);
/**
- * Write a string to the uart
+ * octeon_wdt_write_string - Write a string to the uart
*
* @str: String to write
*/
@@ -166,7 +166,7 @@ static void octeon_wdt_write_string(const char *str)
}
/**
- * Write a hex number out of the uart
+ * octeon_wdt_write_hex() - Write a hex number out of the uart
*
* @value: Number to display
* @digits: Number of digits to print (1 to 16)
@@ -193,6 +193,8 @@ static const char reg_name[][3] = {
};
/**
+ * octeon_wdt_nmi_stage3:
+ *
* NMI stage 3 handler. NMIs are handled in the following manner:
* 1) The first NMI handler enables CVMSEG and transfers from
* the bootbus region into normal memory. It is careful to not
@@ -514,7 +516,7 @@ static struct watchdog_device octeon_wdt = {
static enum cpuhp_state octeon_wdt_online;
/**
- * Module/ driver initialization.
+ * octeon_wdt_init - Module/ driver initialization.
*
* Returns Zero on success
*/
@@ -586,7 +588,7 @@ err:
}
/**
- * Module / driver shutdown
+ * octeon_wdt_cleanup - Module / driver shutdown
*/
static void __exit octeon_wdt_cleanup(void)
{
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 7fe4f7c3f7ce..3318544366b8 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -6,6 +6,7 @@
* (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>)
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -24,12 +25,12 @@
#define XWT_TBR_OFFSET 0x8 /* Timebase Register Offset */
/* Control/Status Register Masks */
-#define XWT_CSR0_WRS_MASK 0x00000008 /* Reset status */
-#define XWT_CSR0_WDS_MASK 0x00000004 /* Timer state */
-#define XWT_CSR0_EWDT1_MASK 0x00000002 /* Enable bit 1 */
+#define XWT_CSR0_WRS_MASK BIT(3) /* Reset status */
+#define XWT_CSR0_WDS_MASK BIT(2) /* Timer state */
+#define XWT_CSR0_EWDT1_MASK BIT(1) /* Enable bit 1 */
/* Control/Status Register 0/1 bits */
-#define XWT_CSRX_EWDT2_MASK 0x00000001 /* Enable bit 2 */
+#define XWT_CSRX_EWDT2_MASK BIT(0) /* Enable bit 2 */
/* SelfTest constants */
#define XWT_MAX_SELFTEST_LOOP_COUNT 0x00010000
@@ -40,7 +41,7 @@
struct xwdt_device {
void __iomem *base;
u32 wdt_interval;
- spinlock_t spinlock;
+ spinlock_t spinlock; /* spinlock for register handling */
struct watchdog_device xilinx_wdt_wdd;
struct clk *clk;
};
@@ -70,6 +71,8 @@ static int xilinx_wdt_start(struct watchdog_device *wdd)
spin_unlock(&xdev->spinlock);
+ dev_dbg(wdd->parent, "Watchdog Started!\n");
+
return 0;
}
@@ -91,7 +94,7 @@ static int xilinx_wdt_stop(struct watchdog_device *wdd)
clk_disable(xdev->clk);
- pr_info("Stopped!\n");
+ dev_dbg(wdd->parent, "Watchdog Stopped!\n");
return 0;
}
@@ -208,6 +211,15 @@ static int xwdt_probe(struct platform_device *pdev)
"The watchdog clock freq cannot be obtained\n");
} else {
pfreq = clk_get_rate(xdev->clk);
+ rc = clk_prepare_enable(xdev->clk);
+ if (rc) {
+ dev_err(dev, "unable to enable clock\n");
+ return rc;
+ }
+ rc = devm_add_action_or_reset(dev, xwdt_clk_disable_unprepare,
+ xdev->clk);
+ if (rc)
+ return rc;
}
/*
@@ -221,16 +233,6 @@ static int xwdt_probe(struct platform_device *pdev)
spin_lock_init(&xdev->spinlock);
watchdog_set_drvdata(xilinx_wdt_wdd, xdev);
- rc = clk_prepare_enable(xdev->clk);
- if (rc) {
- dev_err(dev, "unable to enable clock\n");
- return rc;
- }
- rc = devm_add_action_or_reset(dev, xwdt_clk_disable_unprepare,
- xdev->clk);
- if (rc)
- return rc;
-
rc = xwdt_selftest(xdev);
if (rc == XWT_TIMER_FAILED) {
dev_err(dev, "SelfTest routine error\n");
@@ -243,8 +245,8 @@ static int xwdt_probe(struct platform_device *pdev)
clk_disable(xdev->clk);
- dev_info(dev, "Xilinx Watchdog Timer at %p with timeout %ds\n",
- xdev->base, xilinx_wdt_wdd->timeout);
+ dev_info(dev, "Xilinx Watchdog Timer with timeout %ds\n",
+ xilinx_wdt_wdd->timeout);
platform_set_drvdata(pdev, xdev);
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 4ddb4ea2e4a3..127eefc9161d 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -174,7 +174,7 @@ static int armadaxp_wdt_clock_init(struct platform_device *pdev,
return ret;
}
- /* Fix the wdt and timer1 clock freqency to 25MHz */
+ /* Fix the wdt and timer1 clock frequency to 25MHz */
val = WDT_AXP_FIXED_ENABLE_BIT | TIMER1_FIXED_ENABLE_BIT;
atomic_io_modify(dev->reg + TIMER_CTRL, val, val);
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 2d4504302c9e..9f9a340427fc 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -445,7 +445,7 @@ static long pc87413_ioctl(struct file *file, unsigned int cmd,
/* -- Notifier funtions -----------------------------------------*/
/**
- * notify_sys:
+ * pc87413_notify_sys:
* @this: our notifier block
* @code: the event being reported
* @unused: unused
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index e38a87ffe5f5..0d2209c5eaca 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -329,7 +329,9 @@ static int __maybe_unused qcom_wdt_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(qcom_wdt_pm_ops, qcom_wdt_suspend, qcom_wdt_resume);
+static const struct dev_pm_ops qcom_wdt_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(qcom_wdt_suspend, qcom_wdt_resume)
+};
static const struct of_device_id qcom_wdt_of_table[] = {
{ .compatible = "qcom,kpss-timer", .data = &match_data_apcs_tmr },
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index e5d11d6a2600..ec20ad4e534f 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -268,8 +268,10 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
wdd->min_timeout = MIN_WDT_TIMEOUT;
wdd->max_timeout = MAX_WDT_TIMEOUT;
wdt->last_ping = jiffies;
- wdt->sam9x60_support = of_device_is_compatible(dev->of_node,
- "microchip,sam9x60-wdt");
+
+ if (of_device_is_compatible(dev->of_node, "microchip,sam9x60-wdt") ||
+ of_device_is_compatible(dev->of_node, "microchip,sama7g5-wdt"))
+ wdt->sam9x60_support = true;
watchdog_set_drvdata(wdd, wdt);
@@ -329,6 +331,10 @@ static const struct of_device_id sama5d4_wdt_of_match[] = {
{
.compatible = "microchip,sam9x60-wdt",
},
+ {
+ .compatible = "microchip,sama7g5-wdt",
+ },
+
{ }
};
MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match);
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index a947a63fb44a..7b974802dfc7 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -146,7 +146,7 @@ static void wdt_startup(void)
static void wdt_turnoff(void)
{
/* Stop the timer */
- del_timer(&timer);
+ del_timer_sync(&timer);
inb_p(wdt_stop);
pr_info("Watchdog timer is now disabled...\n");
}
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index f0f1e3b2e463..ee9ff38929eb 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -73,16 +73,21 @@
#define SBSA_GWDT_WCS_WS0 BIT(1)
#define SBSA_GWDT_WCS_WS1 BIT(2)
+#define SBSA_GWDT_VERSION_MASK 0xF
+#define SBSA_GWDT_VERSION_SHIFT 16
+
/**
* struct sbsa_gwdt - Internal representation of the SBSA GWDT
* @wdd: kernel watchdog_device structure
* @clk: store the System Counter clock frequency, in Hz.
+ * @version: store the architecture version
* @refresh_base: Virtual address of the watchdog refresh frame
* @control_base: Virtual address of the watchdog control frame
*/
struct sbsa_gwdt {
struct watchdog_device wdd;
u32 clk;
+ int version;
void __iomem *refresh_base;
void __iomem *control_base;
};
@@ -113,6 +118,30 @@ MODULE_PARM_DESC(nowayout,
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
+ * Arm Base System Architecture 1.0 introduces watchdog v1 which
+ * increases the length watchdog offset register to 48 bits.
+ * - For version 0: WOR is 32 bits;
+ * - For version 1: WOR is 48 bits which comprises the register
+ * offset 0x8 and 0xC, and the bits [63:48] are reserved which are
+ * Read-As-Zero and Writes-Ignored.
+ */
+static u64 sbsa_gwdt_reg_read(struct sbsa_gwdt *gwdt)
+{
+ if (gwdt->version == 0)
+ return readl(gwdt->control_base + SBSA_GWDT_WOR);
+ else
+ return readq(gwdt->control_base + SBSA_GWDT_WOR);
+}
+
+static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
+{
+ if (gwdt->version == 0)
+ writel((u32)val, gwdt->control_base + SBSA_GWDT_WOR);
+ else
+ writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
+}
+
+/*
* watchdog operation functions
*/
static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
@@ -123,16 +152,14 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
wdd->timeout = timeout;
if (action)
- writel(gwdt->clk * timeout,
- gwdt->control_base + SBSA_GWDT_WOR);
+ sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
else
/*
* In the single stage mode, The first signal (WS0) is ignored,
* the timeout is (WOR * 2), so the WOR should be configured
* to half value of timeout.
*/
- writel(gwdt->clk / 2 * timeout,
- gwdt->control_base + SBSA_GWDT_WOR);
+ sbsa_gwdt_reg_write(gwdt->clk / 2 * timeout, gwdt);
return 0;
}
@@ -149,7 +176,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
*/
if (!action &&
!(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0))
- timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
+ timeleft += sbsa_gwdt_reg_read(gwdt);
timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
arch_timer_read_counter();
@@ -172,6 +199,17 @@ static int sbsa_gwdt_keepalive(struct watchdog_device *wdd)
return 0;
}
+static void sbsa_gwdt_get_version(struct watchdog_device *wdd)
+{
+ struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
+ int ver;
+
+ ver = readl(gwdt->control_base + SBSA_GWDT_W_IIDR);
+ ver = (ver >> SBSA_GWDT_VERSION_SHIFT) & SBSA_GWDT_VERSION_MASK;
+
+ gwdt->version = ver;
+}
+
static int sbsa_gwdt_start(struct watchdog_device *wdd)
{
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
@@ -252,10 +290,14 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
wdd->info = &sbsa_gwdt_info;
wdd->ops = &sbsa_gwdt_ops;
wdd->min_timeout = 1;
- wdd->max_hw_heartbeat_ms = U32_MAX / gwdt->clk * 1000;
wdd->timeout = DEFAULT_TIMEOUT;
watchdog_set_drvdata(wdd, gwdt);
watchdog_set_nowayout(wdd, nowayout);
+ sbsa_gwdt_get_version(wdd);
+ if (gwdt->version == 0)
+ wdd->max_hw_heartbeat_ms = U32_MAX / gwdt->clk * 1000;
+ else
+ wdd->max_hw_heartbeat_ms = GENMASK_ULL(47, 0) / gwdt->clk * 1000;
status = readl(cf_base + SBSA_GWDT_WCS);
if (status & SBSA_GWDT_WCS_WS1) {
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index e66e6b905964..ca65468f4b9c 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -186,7 +186,7 @@ static int wdt_startup(void)
static int wdt_turnoff(void)
{
/* Stop the timer */
- del_timer(&timer);
+ del_timer_sync(&timer);
/* Stop the watchdog */
wdt_config(0);
diff --git a/drivers/watchdog/sl28cpld_wdt.c b/drivers/watchdog/sl28cpld_wdt.c
index a45047d8d9ab..2de93298475f 100644
--- a/drivers/watchdog/sl28cpld_wdt.c
+++ b/drivers/watchdog/sl28cpld_wdt.c
@@ -164,7 +164,7 @@ static int sl28cpld_wdt_probe(struct platform_device *pdev)
/*
* Initial timeout value, may be overwritten by device tree or module
- * parmeter in watchdog_init_timeout().
+ * parameter in watchdog_init_timeout().
*
* Reading a zero here means that either the hardware has a default
* value of zero (which is very unlikely and definitely a hardware
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 58a00e1ab23b..dbeb2146c968 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -11,7 +11,6 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/resource.h>
#include <linux/amba/bus.h>
@@ -23,8 +22,8 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/of.h>
#include <linux/pm.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -58,7 +57,8 @@
* @wdd: instance of struct watchdog_device
* @lock: spin lock protecting dev structure and io access
* @base: base address of wdt
- * @clk: clock structure of wdt
+ * @clk: (optional) clock structure of wdt
+ * @rate: (optional) clock rate when provided via properties
* @adev: amba device structure of wdt
* @status: current status of wdt
* @load_val: load value to be set for current timeout
@@ -231,6 +231,7 @@ static int
sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
{
struct sp805_wdt *wdt;
+ u64 rate = 0;
int ret = 0;
wdt = devm_kzalloc(&adev->dev, sizeof(*wdt), GFP_KERNEL);
@@ -243,25 +244,23 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- if (adev->dev.of_node) {
- wdt->clk = devm_clk_get(&adev->dev, NULL);
- if (IS_ERR(wdt->clk)) {
- dev_err(&adev->dev, "Clock not found\n");
- return PTR_ERR(wdt->clk);
- }
- wdt->rate = clk_get_rate(wdt->clk);
- } else if (has_acpi_companion(&adev->dev)) {
- /*
- * When Driver probe with ACPI device, clock devices
- * are not available, so watchdog rate get from
- * clock-frequency property given in _DSD object.
- */
- device_property_read_u64(&adev->dev, "clock-frequency",
- &wdt->rate);
- if (!wdt->rate) {
- dev_err(&adev->dev, "no clock-frequency property\n");
- return -ENODEV;
- }
+ /*
+ * When driver probe with ACPI device, clock devices
+ * are not available, so watchdog rate get from
+ * clock-frequency property given in _DSD object.
+ */
+ device_property_read_u64(&adev->dev, "clock-frequency", &rate);
+
+ wdt->clk = devm_clk_get_optional(&adev->dev, NULL);
+ if (IS_ERR(wdt->clk))
+ return dev_err_probe(&adev->dev, PTR_ERR(wdt->clk), "Clock not found\n");
+
+ wdt->rate = clk_get_rate(wdt->clk);
+ if (!wdt->rate)
+ wdt->rate = rate;
+ if (!wdt->rate) {
+ dev_err(&adev->dev, "no clock-frequency property\n");
+ return -ENODEV;
}
wdt->adev = adev;
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index 5772cc5d3780..f2650863fd02 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -166,7 +166,7 @@ static void wdt_startup(void)
static void wdt_turnoff(void)
{
/* Stop the timer */
- del_timer(&timer);
+ del_timer_sync(&timer);
wdt_change(WDT_DISABLE);
diff --git a/drivers/watchdog/watchdog_core.h b/drivers/watchdog/watchdog_core.h
index a5062e8e0d13..5b35a8439e26 100644
--- a/drivers/watchdog/watchdog_core.h
+++ b/drivers/watchdog/watchdog_core.h
@@ -7,6 +7,8 @@
*
* (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
*
+ * (c) Copyright 2021 Hewlett Packard Enterprise Development LP.
+ *
* This source code is part of the generic code that can be used
* by all the watchdog timer drivers.
*
@@ -22,12 +24,58 @@
* This material is provided "AS-IS" and at no charge.
*/
+#include <linux/hrtimer.h>
+#include <linux/kthread.h>
+
#define MAX_DOGS 32 /* Maximum number of watchdog devices */
/*
+ * struct watchdog_core_data - watchdog core internal data
+ * @dev: The watchdog's internal device
+ * @cdev: The watchdog's Character device.
+ * @wdd: Pointer to watchdog device.
+ * @lock: Lock for watchdog core.
+ * @status: Watchdog core internal status bits.
+ */
+struct watchdog_core_data {
+ struct device dev;
+ struct cdev cdev;
+ struct watchdog_device *wdd;
+ struct mutex lock;
+ ktime_t last_keepalive;
+ ktime_t last_hw_keepalive;
+ ktime_t open_deadline;
+ struct hrtimer timer;
+ struct kthread_work work;
+#if IS_ENABLED(CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT)
+ struct hrtimer pretimeout_timer;
+#endif
+ unsigned long status; /* Internal status bits */
+#define _WDOG_DEV_OPEN 0 /* Opened ? */
+#define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */
+#define _WDOG_KEEPALIVE 2 /* Did we receive a keepalive ? */
+};
+
+/*
* Functions/procedures to be called by the core
*/
extern int watchdog_dev_register(struct watchdog_device *);
extern void watchdog_dev_unregister(struct watchdog_device *);
extern int __init watchdog_dev_init(void);
extern void __exit watchdog_dev_exit(void);
+
+static inline bool watchdog_have_pretimeout(struct watchdog_device *wdd)
+{
+ return wdd->info->options & WDIOF_PRETIMEOUT ||
+ IS_ENABLED(CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT);
+}
+
+#if IS_ENABLED(CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT)
+void watchdog_hrtimer_pretimeout_init(struct watchdog_device *wdd);
+void watchdog_hrtimer_pretimeout_start(struct watchdog_device *wdd);
+void watchdog_hrtimer_pretimeout_stop(struct watchdog_device *wdd);
+#else
+static inline void watchdog_hrtimer_pretimeout_init(struct watchdog_device *wdd) {}
+static inline void watchdog_hrtimer_pretimeout_start(struct watchdog_device *wdd) {}
+static inline void watchdog_hrtimer_pretimeout_stop(struct watchdog_device *wdd) {}
+#endif
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 2946f3a63110..3bab32485273 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -7,6 +7,7 @@
*
* (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
*
+ * (c) Copyright 2021 Hewlett Packard Enterprise Development LP.
*
* This source code is part of the generic code that can be used
* by all the watchdog timer drivers.
@@ -46,30 +47,6 @@
#include "watchdog_core.h"
#include "watchdog_pretimeout.h"
-/*
- * struct watchdog_core_data - watchdog core internal data
- * @dev: The watchdog's internal device
- * @cdev: The watchdog's Character device.
- * @wdd: Pointer to watchdog device.
- * @lock: Lock for watchdog core.
- * @status: Watchdog core internal status bits.
- */
-struct watchdog_core_data {
- struct device dev;
- struct cdev cdev;
- struct watchdog_device *wdd;
- struct mutex lock;
- ktime_t last_keepalive;
- ktime_t last_hw_keepalive;
- ktime_t open_deadline;
- struct hrtimer timer;
- struct kthread_work work;
- unsigned long status; /* Internal status bits */
-#define _WDOG_DEV_OPEN 0 /* Opened ? */
-#define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */
-#define _WDOG_KEEPALIVE 2 /* Did we receive a keepalive ? */
-};
-
/* the dev_t structure to store the dynamically allocated watchdog devices */
static dev_t watchdog_devt;
/* Reference to watchdog device behind /dev/watchdog */
@@ -185,6 +162,9 @@ static int __watchdog_ping(struct watchdog_device *wdd)
else
err = wdd->ops->start(wdd); /* restart watchdog */
+ if (err == 0)
+ watchdog_hrtimer_pretimeout_start(wdd);
+
watchdog_update_worker(wdd);
return err;
@@ -275,8 +255,10 @@ static int watchdog_start(struct watchdog_device *wdd)
started_at = ktime_get();
if (watchdog_hw_running(wdd) && wdd->ops->ping) {
err = __watchdog_ping(wdd);
- if (err == 0)
+ if (err == 0) {
set_bit(WDOG_ACTIVE, &wdd->status);
+ watchdog_hrtimer_pretimeout_start(wdd);
+ }
} else {
err = wdd->ops->start(wdd);
if (err == 0) {
@@ -284,6 +266,7 @@ static int watchdog_start(struct watchdog_device *wdd)
wd_data->last_keepalive = started_at;
wd_data->last_hw_keepalive = started_at;
watchdog_update_worker(wdd);
+ watchdog_hrtimer_pretimeout_start(wdd);
}
}
@@ -325,6 +308,7 @@ static int watchdog_stop(struct watchdog_device *wdd)
if (err == 0) {
clear_bit(WDOG_ACTIVE, &wdd->status);
watchdog_update_worker(wdd);
+ watchdog_hrtimer_pretimeout_stop(wdd);
}
return err;
@@ -361,6 +345,9 @@ static unsigned int watchdog_get_status(struct watchdog_device *wdd)
if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status))
status |= WDIOF_KEEPALIVEPING;
+ if (IS_ENABLED(CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT))
+ status |= WDIOF_PRETIMEOUT;
+
return status;
}
@@ -408,7 +395,7 @@ static int watchdog_set_pretimeout(struct watchdog_device *wdd,
{
int err = 0;
- if (!(wdd->info->options & WDIOF_PRETIMEOUT))
+ if (!watchdog_have_pretimeout(wdd))
return -EOPNOTSUPP;
if (watchdog_pretimeout_invalid(wdd, timeout))
@@ -451,7 +438,8 @@ static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr,
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status));
+ return sysfs_emit(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT,
+ &wdd->status));
}
static ssize_t nowayout_store(struct device *dev, struct device_attribute *attr,
@@ -485,7 +473,7 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
status = watchdog_get_status(wdd);
mutex_unlock(&wd_data->lock);
- return sprintf(buf, "0x%x\n", status);
+ return sysfs_emit(buf, "0x%x\n", status);
}
static DEVICE_ATTR_RO(status);
@@ -494,7 +482,7 @@ static ssize_t bootstatus_show(struct device *dev,
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", wdd->bootstatus);
+ return sysfs_emit(buf, "%u\n", wdd->bootstatus);
}
static DEVICE_ATTR_RO(bootstatus);
@@ -510,7 +498,7 @@ static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr,
status = watchdog_get_timeleft(wdd, &val);
mutex_unlock(&wd_data->lock);
if (!status)
- status = sprintf(buf, "%u\n", val);
+ status = sysfs_emit(buf, "%u\n", val);
return status;
}
@@ -521,16 +509,34 @@ static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", wdd->timeout);
+ return sysfs_emit(buf, "%u\n", wdd->timeout);
}
static DEVICE_ATTR_RO(timeout);
+static ssize_t min_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", wdd->min_timeout);
+}
+static DEVICE_ATTR_RO(min_timeout);
+
+static ssize_t max_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", wdd->max_timeout);
+}
+static DEVICE_ATTR_RO(max_timeout);
+
static ssize_t pretimeout_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", wdd->pretimeout);
+ return sysfs_emit(buf, "%u\n", wdd->pretimeout);
}
static DEVICE_ATTR_RO(pretimeout);
@@ -539,7 +545,7 @@ static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", wdd->info->identity);
+ return sysfs_emit(buf, "%s\n", wdd->info->identity);
}
static DEVICE_ATTR_RO(identity);
@@ -549,9 +555,9 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
struct watchdog_device *wdd = dev_get_drvdata(dev);
if (watchdog_active(wdd))
- return sprintf(buf, "active\n");
+ return sysfs_emit(buf, "active\n");
- return sprintf(buf, "inactive\n");
+ return sysfs_emit(buf, "inactive\n");
}
static DEVICE_ATTR_RO(state);
@@ -594,13 +600,11 @@ static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
mode = 0;
- else if (attr == &dev_attr_pretimeout.attr &&
- !(wdd->info->options & WDIOF_PRETIMEOUT))
+ else if (attr == &dev_attr_pretimeout.attr && !watchdog_have_pretimeout(wdd))
mode = 0;
else if ((attr == &dev_attr_pretimeout_governor.attr ||
attr == &dev_attr_pretimeout_available_governors.attr) &&
- (!(wdd->info->options & WDIOF_PRETIMEOUT) ||
- !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)))
+ (!watchdog_have_pretimeout(wdd) || !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)))
mode = 0;
return mode;
@@ -609,6 +613,8 @@ static struct attribute *wdt_attrs[] = {
&dev_attr_state.attr,
&dev_attr_identity.attr,
&dev_attr_timeout.attr,
+ &dev_attr_min_timeout.attr,
+ &dev_attr_max_timeout.attr,
&dev_attr_pretimeout.attr,
&dev_attr_timeleft.attr,
&dev_attr_bootstatus.attr,
@@ -1009,6 +1015,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
kthread_init_work(&wd_data->work, watchdog_ping_work);
hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
wd_data->timer.function = watchdog_timer_expired;
+ watchdog_hrtimer_pretimeout_init(wdd);
if (wdd->id == 0) {
old_wd_data = wd_data;
@@ -1096,6 +1103,7 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
hrtimer_cancel(&wd_data->timer);
kthread_cancel_work_sync(&wd_data->work);
+ watchdog_hrtimer_pretimeout_stop(wdd);
put_device(&wd_data->dev);
}
diff --git a/drivers/watchdog/watchdog_hrtimer_pretimeout.c b/drivers/watchdog/watchdog_hrtimer_pretimeout.c
new file mode 100644
index 000000000000..940b53718a91
--- /dev/null
+++ b/drivers/watchdog/watchdog_hrtimer_pretimeout.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (c) Copyright 2021 Hewlett Packard Enterprise Development LP.
+ */
+
+#include <linux/hrtimer.h>
+#include <linux/watchdog.h>
+
+#include "watchdog_core.h"
+#include "watchdog_pretimeout.h"
+
+static enum hrtimer_restart watchdog_hrtimer_pretimeout(struct hrtimer *timer)
+{
+ struct watchdog_core_data *wd_data;
+
+ wd_data = container_of(timer, struct watchdog_core_data, pretimeout_timer);
+
+ watchdog_notify_pretimeout(wd_data->wdd);
+ return HRTIMER_NORESTART;
+}
+
+void watchdog_hrtimer_pretimeout_init(struct watchdog_device *wdd)
+{
+ struct watchdog_core_data *wd_data = wdd->wd_data;
+
+ hrtimer_init(&wd_data->pretimeout_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ wd_data->pretimeout_timer.function = watchdog_hrtimer_pretimeout;
+}
+
+void watchdog_hrtimer_pretimeout_start(struct watchdog_device *wdd)
+{
+ if (!(wdd->info->options & WDIOF_PRETIMEOUT) &&
+ !watchdog_pretimeout_invalid(wdd, wdd->pretimeout))
+ hrtimer_start(&wdd->wd_data->pretimeout_timer,
+ ktime_set(wdd->timeout - wdd->pretimeout, 0),
+ HRTIMER_MODE_REL);
+ else
+ hrtimer_cancel(&wdd->wd_data->pretimeout_timer);
+}
+
+void watchdog_hrtimer_pretimeout_stop(struct watchdog_device *wdd)
+{
+ hrtimer_cancel(&wdd->wd_data->pretimeout_timer);
+}
diff --git a/drivers/watchdog/watchdog_pretimeout.c b/drivers/watchdog/watchdog_pretimeout.c
index 01ca84be240f..376a495ab80c 100644
--- a/drivers/watchdog/watchdog_pretimeout.c
+++ b/drivers/watchdog/watchdog_pretimeout.c
@@ -9,6 +9,7 @@
#include <linux/string.h>
#include <linux/watchdog.h>
+#include "watchdog_core.h"
#include "watchdog_pretimeout.h"
/* Default watchdog pretimeout governor */
@@ -55,7 +56,7 @@ int watchdog_pretimeout_available_governors_get(char *buf)
mutex_lock(&governor_lock);
list_for_each_entry(priv, &governor_list, entry)
- count += sprintf(buf + count, "%s\n", priv->gov->name);
+ count += sysfs_emit_at(buf, count, "%s\n", priv->gov->name);
mutex_unlock(&governor_lock);
@@ -68,7 +69,7 @@ int watchdog_pretimeout_governor_get(struct watchdog_device *wdd, char *buf)
spin_lock_irq(&pretimeout_lock);
if (wdd->gov)
- count = sprintf(buf, "%s\n", wdd->gov->name);
+ count = sysfs_emit(buf, "%s\n", wdd->gov->name);
spin_unlock_irq(&pretimeout_lock);
return count;
@@ -177,7 +178,7 @@ int watchdog_register_pretimeout(struct watchdog_device *wdd)
{
struct watchdog_pretimeout *p;
- if (!(wdd->info->options & WDIOF_PRETIMEOUT))
+ if (!watchdog_have_pretimeout(wdd))
return 0;
p = kzalloc(sizeof(*p), GFP_KERNEL);
@@ -197,7 +198,7 @@ void watchdog_unregister_pretimeout(struct watchdog_device *wdd)
{
struct watchdog_pretimeout *p, *t;
- if (!(wdd->info->options & WDIOF_PRETIMEOUT))
+ if (!watchdog_have_pretimeout(wdd))
return;
spin_lock_irq(&pretimeout_lock);
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index cec7917790e5..195c8c004b69 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -208,7 +208,7 @@ static int wdat_wdt_enable_reboot(struct wdat_wdt *wdat)
/*
* WDAT specification says that the watchdog is required to reboot
* the system when it fires. However, it also states that it is
- * recommeded to make it configurable through hardware register. We
+ * recommended to make it configurable through hardware register. We
* enable reboot now if it is configurable, just in case.
*/
ret = wdat_wdt_run_action(wdat, ACPI_WDAT_SET_REBOOT, 0, NULL);
@@ -475,7 +475,7 @@ static int wdat_wdt_suspend_noirq(struct device *dev)
return 0;
/*
- * We need to stop the watchdog if firmare is not doing it or if we
+ * We need to stop the watchdog if firmware is not doing it or if we
* are going suspend to idle (where firmware is not involved). If
* firmware is stopping the watchdog we kick it here one more time
* to give it some time.
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index a9e40b5c633e..183876156243 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -494,7 +494,7 @@ static int wdt_temp_release(struct inode *inode, struct file *file)
}
/**
- * notify_sys:
+ * wdt_notify_sys:
* @this: our notifier block
* @code: the event being reported
* @unused: unused
@@ -558,7 +558,7 @@ static struct notifier_block wdt_notifier = {
};
/**
- * cleanup_module:
+ * wdt_exit:
*
* Unload the watchdog. You cannot do this with any file handles open.
* If your watchdog is set to continue ticking on close and you unload
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index c3254ba5ace6..d5e56b601351 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -537,7 +537,7 @@ static int wdtpci_temp_release(struct inode *inode, struct file *file)
}
/**
- * notify_sys:
+ * wdtpci_notify_sys:
* @this: our notifier block
* @code: the event being reported
* @unused: unused
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index 4297280807ca..c5a9b820d43a 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -69,9 +69,6 @@ static char *ziirave_reasons[] = {"power cycle", "hw watchdog", NULL, NULL,
#define ZIIRAVE_CMD_JUMP_TO_BOOTLOADER_MAGIC 1
#define ZIIRAVE_CMD_RESET_PROCESSOR_MAGIC 1
-#define ZIIRAVE_FW_VERSION_FMT "02.%02u.%02u"
-#define ZIIRAVE_BL_VERSION_FMT "01.%02u.%02u"
-
struct ziirave_wdt_rev {
unsigned char major;
unsigned char minor;
@@ -445,8 +442,9 @@ static ssize_t ziirave_wdt_sysfs_show_firm(struct device *dev,
if (ret)
return ret;
- ret = sprintf(buf, ZIIRAVE_FW_VERSION_FMT, w_priv->firmware_rev.major,
- w_priv->firmware_rev.minor);
+ ret = sysfs_emit(buf, "02.%02u.%02u\n",
+ w_priv->firmware_rev.major,
+ w_priv->firmware_rev.minor);
mutex_unlock(&w_priv->sysfs_mutex);
@@ -468,8 +466,9 @@ static ssize_t ziirave_wdt_sysfs_show_boot(struct device *dev,
if (ret)
return ret;
- ret = sprintf(buf, ZIIRAVE_BL_VERSION_FMT, w_priv->bootloader_rev.major,
- w_priv->bootloader_rev.minor);
+ ret = sysfs_emit(buf, "01.%02u.%02u\n",
+ w_priv->bootloader_rev.major,
+ w_priv->bootloader_rev.minor);
mutex_unlock(&w_priv->sysfs_mutex);
@@ -491,7 +490,7 @@ static ssize_t ziirave_wdt_sysfs_show_reason(struct device *dev,
if (ret)
return ret;
- ret = sprintf(buf, "%s", ziirave_reasons[w_priv->reset_reason]);
+ ret = sysfs_emit(buf, "%s\n", ziirave_reasons[w_priv->reset_reason]);
mutex_unlock(&w_priv->sysfs_mutex);
@@ -536,7 +535,7 @@ static ssize_t ziirave_wdt_sysfs_store_firm(struct device *dev,
}
dev_info(&client->dev,
- "Firmware updated to version " ZIIRAVE_FW_VERSION_FMT "\n",
+ "Firmware updated to version 02.%02u.%02u\n",
w_priv->firmware_rev.major, w_priv->firmware_rev.minor);
/* Restore the watchdog timeout */
@@ -677,7 +676,7 @@ static int ziirave_wdt_probe(struct i2c_client *client,
}
dev_info(&client->dev,
- "Firmware version: " ZIIRAVE_FW_VERSION_FMT "\n",
+ "Firmware version: 02.%02u.%02u\n",
w_priv->firmware_rev.major, w_priv->firmware_rev.minor);
ret = ziirave_wdt_revision(client, &w_priv->bootloader_rev,
@@ -688,7 +687,7 @@ static int ziirave_wdt_probe(struct i2c_client *client,
}
dev_info(&client->dev,
- "Bootloader version: " ZIIRAVE_BL_VERSION_FMT "\n",
+ "Bootloader version: 01.%02u.%02u\n",
w_priv->bootloader_rev.major, w_priv->bootloader_rev.minor);
w_priv->reset_reason = i2c_smbus_read_byte_data(client,
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index d7e361fb0548..a78704ae3618 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -198,12 +198,12 @@ static void disable_dynirq(struct irq_data *data);
static DEFINE_PER_CPU(unsigned int, irq_epoch);
-static void clear_evtchn_to_irq_row(unsigned row)
+static void clear_evtchn_to_irq_row(int *evtchn_row)
{
unsigned col;
for (col = 0; col < EVTCHN_PER_ROW; col++)
- WRITE_ONCE(evtchn_to_irq[row][col], -1);
+ WRITE_ONCE(evtchn_row[col], -1);
}
static void clear_evtchn_to_irq_all(void)
@@ -213,7 +213,7 @@ static void clear_evtchn_to_irq_all(void)
for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
if (evtchn_to_irq[row] == NULL)
continue;
- clear_evtchn_to_irq_row(row);
+ clear_evtchn_to_irq_row(evtchn_to_irq[row]);
}
}
@@ -221,6 +221,7 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
{
unsigned row;
unsigned col;
+ int *evtchn_row;
if (evtchn >= xen_evtchn_max_channels())
return -EINVAL;
@@ -233,11 +234,18 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
if (irq == -1)
return 0;
- evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
- if (evtchn_to_irq[row] == NULL)
+ evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
+ if (evtchn_row == NULL)
return -ENOMEM;
- clear_evtchn_to_irq_row(row);
+ clear_evtchn_to_irq_row(evtchn_row);
+
+ /*
+ * We've prepared an empty row for the mapping. If a different
+ * thread was faster inserting it, we can drop ours.
+ */
+ if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
+ free_page((unsigned long) evtchn_row);
}
WRITE_ONCE(evtchn_to_irq[row][col], irq);
@@ -1009,7 +1017,7 @@ static void __unbind_from_irq(unsigned int irq)
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
unsigned pirq, int shareable, char *name)
{
- int irq = -1;
+ int irq;
struct physdev_irq irq_op;
int ret;
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index 1bcdd5227771..47aa3a1ccaf5 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -92,7 +92,7 @@ static int xen_pcpu_up(uint32_t cpu_id)
return HYPERVISOR_platform_op(&op);
}
-static ssize_t show_online(struct device *dev,
+static ssize_t online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -101,7 +101,7 @@ static ssize_t show_online(struct device *dev,
return sprintf(buf, "%u\n", !!(cpu->flags & XEN_PCPU_FLAGS_ONLINE));
}
-static ssize_t __ref store_online(struct device *dev,
+static ssize_t __ref online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -130,7 +130,7 @@ static ssize_t __ref store_online(struct device *dev,
ret = count;
return ret;
}
-static DEVICE_ATTR(online, S_IRUGO | S_IWUSR, show_online, store_online);
+static DEVICE_ATTR_RW(online);
static struct attribute *pcpu_dev_attrs[] = {
&dev_attr_online.attr,
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index a8d24433c8e9..8cd583db20b1 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -134,13 +134,13 @@ void xen_balloon_init(void)
EXPORT_SYMBOL_GPL(xen_balloon_init);
#define BALLOON_SHOW(name, format, args...) \
- static ssize_t show_##name(struct device *dev, \
+ static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return sprintf(buf, format, ##args); \
} \
- static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+ static DEVICE_ATTR_RO(name)
BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
@@ -152,16 +152,15 @@ static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages);
-static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
+static ssize_t target_kb_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
}
-static ssize_t store_target_kb(struct device *dev,
+static ssize_t target_kb_store(struct device *dev,
struct device_attribute *attr,
- const char *buf,
- size_t count)
+ const char *buf, size_t count)
{
char *endchar;
unsigned long long target_bytes;
@@ -176,22 +175,19 @@ static ssize_t store_target_kb(struct device *dev,
return count;
}
-static DEVICE_ATTR(target_kb, S_IRUGO | S_IWUSR,
- show_target_kb, store_target_kb);
+static DEVICE_ATTR_RW(target_kb);
-
-static ssize_t show_target(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t target_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%llu\n",
(unsigned long long)balloon_stats.target_pages
<< PAGE_SHIFT);
}
-static ssize_t store_target(struct device *dev,
+static ssize_t target_store(struct device *dev,
struct device_attribute *attr,
- const char *buf,
- size_t count)
+ const char *buf, size_t count)
{
char *endchar;
unsigned long long target_bytes;
@@ -206,9 +202,7 @@ static ssize_t store_target(struct device *dev,
return count;
}
-static DEVICE_ATTR(target, S_IRUGO | S_IWUSR,
- show_target, store_target);
-
+static DEVICE_ATTR_RW(target);
static struct attribute *balloon_attrs[] = {
&dev_attr_target_kb.attr,
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 55a4763da05e..61ce0d142eea 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -222,10 +222,10 @@ static void scsiback_print_status(char *sense_buffer, int errors,
{
struct scsiback_tpg *tpg = pending_req->v2p->tpg;
- pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n",
+ pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x\n",
tpg->tport->tport_name, pending_req->v2p->lun,
- pending_req->cmnd[0], status_byte(errors), msg_byte(errors),
- host_byte(errors), driver_byte(errors));
+ pending_req->cmnd[0], errors & 0xff, COMMAND_COMPLETE,
+ host_byte(errors));
}
static void scsiback_fast_flush_area(struct vscsibk_pend *req)
@@ -719,10 +719,10 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info,
result = DID_NO_CONNECT;
break;
default:
- result = DRIVER_ERROR;
+ result = DID_ERROR;
break;
}
- scsiback_send_response(info, NULL, result << 24, 0,
+ scsiback_send_response(info, NULL, result << 16, 0,
ring_req.rqid);
return 1;
}
@@ -732,7 +732,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info,
if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
scsiback_fast_flush_area(pending_req);
scsiback_do_resp_with_sense(NULL,
- DRIVER_ERROR << 24, 0, pending_req);
+ DID_ERROR << 16, 0, pending_req);
transport_generic_free_cmd(&pending_req->se_cmd, 0);
} else {
scsiback_cmd_exec(pending_req);
@@ -747,7 +747,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info,
break;
default:
pr_err_ratelimited("invalid request\n");
- scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 0,
+ scsiback_do_resp_with_sense(NULL, DID_ERROR << 16, 0,
pending_req);
transport_generic_free_cmd(&pending_req->se_cmd, 0);
break;
@@ -1401,8 +1401,7 @@ static int scsiback_queue_status(struct se_cmd *se_cmd)
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE)))
- pending_req->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ pending_req->result = SAM_STAT_CHECK_CONDITION;
else
pending_req->result = se_cmd->scsi_status;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 97f0d234482d..33d09b3f6211 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -207,7 +207,7 @@ void xenbus_otherend_changed(struct xenbus_watch *watch,
EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
#define XENBUS_SHOW_STAT(name) \
-static ssize_t show_##name(struct device *_dev, \
+static ssize_t name##_show(struct device *_dev, \
struct device_attribute *attr, \
char *buf) \
{ \
@@ -215,14 +215,14 @@ static ssize_t show_##name(struct device *_dev, \
\
return sprintf(buf, "%d\n", atomic_read(&dev->name)); \
} \
-static DEVICE_ATTR(name, 0444, show_##name, NULL)
+static DEVICE_ATTR_RO(name)
XENBUS_SHOW_STAT(event_channels);
XENBUS_SHOW_STAT(events);
XENBUS_SHOW_STAT(spurious_events);
XENBUS_SHOW_STAT(jiffies_eoi_delayed);
-static ssize_t show_spurious_threshold(struct device *_dev,
+static ssize_t spurious_threshold_show(struct device *_dev,
struct device_attribute *attr,
char *buf)
{
@@ -231,9 +231,9 @@ static ssize_t show_spurious_threshold(struct device *_dev,
return sprintf(buf, "%d\n", dev->spurious_threshold);
}
-static ssize_t set_spurious_threshold(struct device *_dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t spurious_threshold_store(struct device *_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct xenbus_device *dev = to_xenbus_device(_dev);
unsigned int val;
@@ -248,8 +248,7 @@ static ssize_t set_spurious_threshold(struct device *_dev,
return count;
}
-static DEVICE_ATTR(spurious_threshold, 0644, show_spurious_threshold,
- set_spurious_threshold);
+static DEVICE_ATTR_RW(spurious_threshold);
static struct attribute *xenbus_attrs[] = {
&dev_attr_event_channels.attr,
diff --git a/fs/Kconfig b/fs/Kconfig
index 141a856c50e7..a7749c126b8e 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -240,6 +240,21 @@ config HUGETLBFS
config HUGETLB_PAGE
def_bool HUGETLBFS
+config HUGETLB_PAGE_FREE_VMEMMAP
+ def_bool HUGETLB_PAGE
+ depends on X86_64
+ depends on SPARSEMEM_VMEMMAP
+
+config HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON
+ bool "Default freeing vmemmap pages of HugeTLB to on"
+ default n
+ depends on HUGETLB_PAGE_FREE_VMEMMAP
+ help
+ When using HUGETLB_PAGE_FREE_VMEMMAP, the freeing unused vmemmap
+ pages associated with each HugeTLB page is default off. Say Y here
+ to enable freeing vmemmap pages of HugeTLB by default. It can then
+ be disabled on the command line via hugetlb_free_vmemmap=off.
+
config MEMFD_CREATE
def_bool TMPFS || HUGETLBFS
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 06fb7a93a1bd..4d5ae61580aa 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -168,21 +168,6 @@ config OSF4_COMPAT
with v4 shared libraries freely available from Compaq. If you're
going to use shared libraries from Tru64 version 5.0 or later, say N.
-config BINFMT_EM86
- tristate "Kernel support for Linux/Intel ELF binaries"
- depends on ALPHA
- help
- Say Y here if you want to be able to execute Linux/Intel ELF
- binaries just like native Alpha binaries on your Alpha machine. For
- this to work, you need to have the emulator /usr/bin/em86 in place.
-
- You can get the same functionality by saying N here and saying Y to
- "Kernel support for MISC binaries".
-
- You may answer M to compile the emulation support as a module and
- later load the module when you want to use a Linux/Intel binary. The
- module will be called binfmt_em86. If unsure, say Y.
-
config BINFMT_MISC
tristate "Kernel support for MISC binaries"
help
diff --git a/fs/Makefile b/fs/Makefile
index 9c708e1fbe8f..f98f3e691c37 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -39,7 +39,6 @@ obj-$(CONFIG_FS_ENCRYPTION) += crypto/
obj-$(CONFIG_FS_VERITY) += verity/
obj-$(CONFIG_FILE_LOCKING) += locks.o
obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
-obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o
obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o
obj-$(CONFIG_BINFMT_SCRIPT) += binfmt_script.o
obj-$(CONFIG_BINFMT_ELF) += binfmt_elf.o
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index d3c6bb22c5f4..a3f5de28be79 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -29,16 +29,11 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
static int afs_deliver_yfs_cb_callback(struct afs_call *);
-#define CM_NAME(name) \
- char afs_SRXCB##name##_name[] __tracepoint_string = \
- "CB." #name
-
/*
* CB.CallBack operation type
*/
-static CM_NAME(CallBack);
static const struct afs_call_type afs_SRXCBCallBack = {
- .name = afs_SRXCBCallBack_name,
+ .name = "CB.CallBack",
.deliver = afs_deliver_cb_callback,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_CallBack,
@@ -47,9 +42,8 @@ static const struct afs_call_type afs_SRXCBCallBack = {
/*
* CB.InitCallBackState operation type
*/
-static CM_NAME(InitCallBackState);
static const struct afs_call_type afs_SRXCBInitCallBackState = {
- .name = afs_SRXCBInitCallBackState_name,
+ .name = "CB.InitCallBackState",
.deliver = afs_deliver_cb_init_call_back_state,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_InitCallBackState,
@@ -58,9 +52,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState = {
/*
* CB.InitCallBackState3 operation type
*/
-static CM_NAME(InitCallBackState3);
static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
- .name = afs_SRXCBInitCallBackState3_name,
+ .name = "CB.InitCallBackState3",
.deliver = afs_deliver_cb_init_call_back_state3,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_InitCallBackState,
@@ -69,9 +62,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
/*
* CB.Probe operation type
*/
-static CM_NAME(Probe);
static const struct afs_call_type afs_SRXCBProbe = {
- .name = afs_SRXCBProbe_name,
+ .name = "CB.Probe",
.deliver = afs_deliver_cb_probe,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_Probe,
@@ -80,9 +72,8 @@ static const struct afs_call_type afs_SRXCBProbe = {
/*
* CB.ProbeUuid operation type
*/
-static CM_NAME(ProbeUuid);
static const struct afs_call_type afs_SRXCBProbeUuid = {
- .name = afs_SRXCBProbeUuid_name,
+ .name = "CB.ProbeUuid",
.deliver = afs_deliver_cb_probe_uuid,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_ProbeUuid,
@@ -91,9 +82,8 @@ static const struct afs_call_type afs_SRXCBProbeUuid = {
/*
* CB.TellMeAboutYourself operation type
*/
-static CM_NAME(TellMeAboutYourself);
static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
- .name = afs_SRXCBTellMeAboutYourself_name,
+ .name = "CB.TellMeAboutYourself",
.deliver = afs_deliver_cb_tell_me_about_yourself,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_TellMeAboutYourself,
@@ -102,9 +92,8 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
/*
* YFS CB.CallBack operation type
*/
-static CM_NAME(YFS_CallBack);
static const struct afs_call_type afs_SRXYFSCB_CallBack = {
- .name = afs_SRXCBYFS_CallBack_name,
+ .name = "YFSCB.CallBack",
.deliver = afs_deliver_yfs_cb_callback,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_CallBack,
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 78719f2f567e..ac829e63c570 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -656,7 +656,6 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
return ret;
}
- ret = -ENOENT;
if (!cookie.found) {
_leave(" = -ENOENT [not found]");
return -ENOENT;
@@ -2020,17 +2019,20 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
if (d_count(new_dentry) > 2) {
/* copy the target dentry's name */
- ret = -ENOMEM;
op->rename.tmp = d_alloc(new_dentry->d_parent,
&new_dentry->d_name);
- if (!op->rename.tmp)
+ if (!op->rename.tmp) {
+ op->error = -ENOMEM;
goto error;
+ }
ret = afs_sillyrename(new_dvnode,
AFS_FS_I(d_inode(new_dentry)),
new_dentry, op->key);
- if (ret)
+ if (ret) {
+ op->error = ret;
goto error;
+ }
op->dentry_2 = op->rename.tmp;
op->rename.rehash = NULL;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 3104b62c2082..c0534697268e 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -771,14 +771,20 @@ int afs_writepages(struct address_space *mapping,
if (wbc->range_cyclic) {
start = mapping->writeback_index * PAGE_SIZE;
ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
- if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
- ret = afs_writepages_region(mapping, wbc, 0, start,
- &next);
- mapping->writeback_index = next / PAGE_SIZE;
+ if (ret == 0) {
+ mapping->writeback_index = next / PAGE_SIZE;
+ if (start > 0 && wbc->nr_to_write > 0) {
+ ret = afs_writepages_region(mapping, wbc, 0,
+ start, &next);
+ if (ret == 0)
+ mapping->writeback_index =
+ next / PAGE_SIZE;
+ }
+ }
} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
- if (wbc->nr_to_write > 0)
- mapping->writeback_index = next;
+ if (wbc->nr_to_write > 0 && ret == 0)
+ mapping->writeback_index = next / PAGE_SIZE;
} else {
ret = afs_writepages_region(mapping, wbc,
wbc->range_start, wbc->range_end, &next);
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
deleted file mode 100644
index 06b9b9fddf70..000000000000
--- a/fs/binfmt_em86.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/fs/binfmt_em86.c
- *
- * Based on linux/fs/binfmt_script.c
- * Copyright (C) 1996 Martin von Löwis
- * original #!-checking implemented by tytso.
- *
- * em86 changes Copyright (C) 1997 Jim Paradis
- */
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/binfmts.h>
-#include <linux/elf.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/errno.h>
-
-
-#define EM86_INTERP "/usr/bin/em86"
-#define EM86_I_NAME "em86"
-
-static int load_em86(struct linux_binprm *bprm)
-{
- const char *i_name, *i_arg;
- char *interp;
- struct file * file;
- int retval;
- struct elfhdr elf_ex;
-
- /* Make sure this is a Linux/Intel ELF executable... */
- elf_ex = *((struct elfhdr *)bprm->buf);
-
- if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
- return -ENOEXEC;
-
- /* First of all, some simple consistency checks */
- if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
- (!((elf_ex.e_machine == EM_386) || (elf_ex.e_machine == EM_486))) ||
- !bprm->file->f_op->mmap) {
- return -ENOEXEC;
- }
-
- /* Need to be able to load the file after exec */
- if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
- return -ENOENT;
-
- /* Unlike in the script case, we don't have to do any hairy
- * parsing to find our interpreter... it's hardcoded!
- */
- interp = EM86_INTERP;
- i_name = EM86_I_NAME;
- i_arg = NULL; /* We reserve the right to add an arg later */
-
- /*
- * Splice in (1) the interpreter's name for argv[0]
- * (2) (optional) argument to interpreter
- * (3) filename of emulated file (replace argv[0])
- *
- * This is done in reverse order, because of how the
- * user environment and arguments are stored.
- */
- remove_arg_zero(bprm);
- retval = copy_string_kernel(bprm->filename, bprm);
- if (retval < 0) return retval;
- bprm->argc++;
- if (i_arg) {
- retval = copy_string_kernel(i_arg, bprm);
- if (retval < 0) return retval;
- bprm->argc++;
- }
- retval = copy_string_kernel(i_name, bprm);
- if (retval < 0) return retval;
- bprm->argc++;
-
- /*
- * OK, now restart the process with the interpreter's inode.
- * Note that we use open_exec() as the name is now in kernel
- * space, and we don't need to copy it.
- */
- file = open_exec(interp);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- bprm->interpreter = file;
- return 0;
-}
-
-static struct linux_binfmt em86_format = {
- .module = THIS_MODULE,
- .load_binary = load_em86,
-};
-
-static int __init init_em86_binfmt(void)
-{
- register_binfmt(&em86_format);
- return 0;
-}
-
-static void __exit exit_em86_binfmt(void)
-{
- unregister_binfmt(&em86_format);
-}
-
-core_initcall(init_em86_binfmt);
-module_exit(exit_em86_binfmt);
-MODULE_LICENSE("GPL");
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 7e83c3e71504..9ef4f1fc2cb0 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -812,6 +812,8 @@ static void bdev_free_inode(struct inode *inode)
free_percpu(bdev->bd_stats);
kfree(bdev->bd_meta_info);
+ if (!bdev_is_partition(bdev))
+ kfree(bdev->bd_disk);
kmem_cache_free(bdev_cachep, BDEV_I(inode));
}
@@ -1609,7 +1611,7 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
* Does not take i_mutex for the write and thus is not for general purpose
* use.
*/
-ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *bd_inode = bdev_file_inode(file);
@@ -1647,9 +1649,8 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
blk_finish_plug(&plug);
return ret;
}
-EXPORT_SYMBOL_GPL(blkdev_write_iter);
-ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
+static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct inode *bd_inode = bdev_file_inode(file);
@@ -1671,7 +1672,6 @@ ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
iov_iter_reexpand(to, iov_iter_count(to) + shorted);
return ret;
}
-EXPORT_SYMBOL_GPL(blkdev_read_iter);
static int blkdev_writepages(struct address_space *mapping,
struct writeback_control *wbc)
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 7a8a2fc19533..78b202d198b8 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1488,15 +1488,15 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 time_seq, struct ulist **roots,
- bool ignore_offset)
+ bool ignore_offset, bool skip_commit_root_sem)
{
int ret;
- if (!trans)
+ if (!trans && !skip_commit_root_sem)
down_read(&fs_info->commit_root_sem);
ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
time_seq, roots, ignore_offset);
- if (!trans)
+ if (!trans && !skip_commit_root_sem)
up_read(&fs_info->commit_root_sem);
return ret;
}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 17abde7f794c..ff5f07f9940b 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -47,7 +47,8 @@ int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
const u64 *extent_item_pos, bool ignore_offset);
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 time_seq, struct ulist **roots, bool ignore_offset);
+ u64 time_seq, struct ulist **roots, bool ignore_offset,
+ bool skip_commit_root_sem);
char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
u32 name_len, unsigned long name_off,
struct extent_buffer *eb_in, u64 parent,
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 38b127b9edfc..9e7d9d0c763d 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1498,9 +1498,18 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
return;
- mutex_lock(&fs_info->reclaim_bgs_lock);
+ /*
+ * Long running balances can keep us blocked here for eternity, so
+ * simply skip reclaim if we're unable to get the mutex.
+ */
+ if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) {
+ btrfs_exclop_finish(fs_info);
+ return;
+ }
+
spin_lock(&fs_info->unused_bgs_lock);
while (!list_empty(&fs_info->reclaim_bgs)) {
+ u64 zone_unusable;
int ret = 0;
bg = list_first_entry(&fs_info->reclaim_bgs,
@@ -1534,13 +1543,22 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
goto next;
}
+ /*
+ * Cache the zone_unusable value before turning the block group
+ * to read only. As soon as the blog group is read only it's
+ * zone_unusable value gets moved to the block group's read-only
+ * bytes and isn't available for calculations anymore.
+ */
+ zone_unusable = bg->zone_unusable;
ret = inc_block_group_ro(bg, 0);
up_write(&space_info->groups_sem);
if (ret < 0)
goto next;
- btrfs_info(fs_info, "reclaiming chunk %llu with %llu%% used",
- bg->start, div_u64(bg->used * 100, bg->length));
+ btrfs_info(fs_info,
+ "reclaiming chunk %llu with %llu%% used %llu%% unusable",
+ bg->start, div_u64(bg->used * 100, bg->length),
+ div64_u64(zone_unusable * 100, bg->length));
trace_btrfs_reclaim_block_group(bg);
ret = btrfs_relocate_chunk(fs_info, bg->start);
if (ret)
@@ -2197,6 +2215,13 @@ error:
return ret;
}
+/*
+ * This function, insert_block_group_item(), belongs to the phase 2 of chunk
+ * allocation.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
+ */
static int insert_block_group_item(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group)
{
@@ -2219,15 +2244,19 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans,
return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
}
+/*
+ * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of
+ * chunk allocation.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
+ */
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group *block_group;
int ret = 0;
- if (!trans->can_flush_pending_bgs)
- return;
-
while (!list_empty(&trans->new_bgs)) {
int index;
@@ -2242,6 +2271,13 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
ret = insert_block_group_item(trans, block_group);
if (ret)
btrfs_abort_transaction(trans, ret);
+ if (!block_group->chunk_item_inserted) {
+ mutex_lock(&fs_info->chunk_mutex);
+ ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
+ mutex_unlock(&fs_info->chunk_mutex);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+ }
ret = btrfs_finish_chunk_alloc(trans, block_group->start,
block_group->length);
if (ret)
@@ -2265,8 +2301,9 @@ next:
btrfs_trans_release_chunk_metadata(trans);
}
-int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
- u64 type, u64 chunk_offset, u64 size)
+struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
+ u64 bytes_used, u64 type,
+ u64 chunk_offset, u64 size)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group *cache;
@@ -2276,7 +2313,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
if (!cache)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
cache->length = size;
set_free_space_tree_thresholds(cache);
@@ -2290,7 +2327,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
ret = btrfs_load_block_group_zone_info(cache, true);
if (ret) {
btrfs_put_block_group(cache);
- return ret;
+ return ERR_PTR(ret);
}
ret = exclude_super_stripes(cache);
@@ -2298,7 +2335,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
/* We may have excluded something, so call this just in case */
btrfs_free_excluded_extents(cache);
btrfs_put_block_group(cache);
- return ret;
+ return ERR_PTR(ret);
}
add_new_free_space(cache, chunk_offset, chunk_offset + size);
@@ -2325,7 +2362,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
if (ret) {
btrfs_remove_free_space_cache(cache);
btrfs_put_block_group(cache);
- return ret;
+ return ERR_PTR(ret);
}
/*
@@ -2344,7 +2381,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
btrfs_update_delayed_refs_rsv(trans);
set_avail_alloc_bits(fs_info, type);
- return 0;
+ return cache;
}
/*
@@ -3222,11 +3259,203 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
}
+static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
+{
+ struct btrfs_block_group *bg;
+ int ret;
+
+ /*
+ * Check if we have enough space in the system space info because we
+ * will need to update device items in the chunk btree and insert a new
+ * chunk item in the chunk btree as well. This will allocate a new
+ * system block group if needed.
+ */
+ check_system_chunk(trans, flags);
+
+ bg = btrfs_alloc_chunk(trans, flags);
+ if (IS_ERR(bg)) {
+ ret = PTR_ERR(bg);
+ goto out;
+ }
+
+ /*
+ * If this is a system chunk allocation then stop right here and do not
+ * add the chunk item to the chunk btree. This is to prevent a deadlock
+ * because this system chunk allocation can be triggered while COWing
+ * some extent buffer of the chunk btree and while holding a lock on a
+ * parent extent buffer, in which case attempting to insert the chunk
+ * item (or update the device item) would result in a deadlock on that
+ * parent extent buffer. In this case defer the chunk btree updates to
+ * the second phase of chunk allocation and keep our reservation until
+ * the second phase completes.
+ *
+ * This is a rare case and can only be triggered by the very few cases
+ * we have where we need to touch the chunk btree outside chunk allocation
+ * and chunk removal. These cases are basically adding a device, removing
+ * a device or resizing a device.
+ */
+ if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ return 0;
+
+ ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
+ /*
+ * Normally we are not expected to fail with -ENOSPC here, since we have
+ * previously reserved space in the system space_info and allocated one
+ * new system chunk if necessary. However there are two exceptions:
+ *
+ * 1) We may have enough free space in the system space_info but all the
+ * existing system block groups have a profile which can not be used
+ * for extent allocation.
+ *
+ * This happens when mounting in degraded mode. For example we have a
+ * RAID1 filesystem with 2 devices, lose one device and mount the fs
+ * using the other device in degraded mode. If we then allocate a chunk,
+ * we may have enough free space in the existing system space_info, but
+ * none of the block groups can be used for extent allocation since they
+ * have a RAID1 profile, and because we are in degraded mode with a
+ * single device, we are forced to allocate a new system chunk with a
+ * SINGLE profile. Making check_system_chunk() iterate over all system
+ * block groups and check if they have a usable profile and enough space
+ * can be slow on very large filesystems, so we tolerate the -ENOSPC and
+ * try again after forcing allocation of a new system chunk. Like this
+ * we avoid paying the cost of that search in normal circumstances, when
+ * we were not mounted in degraded mode;
+ *
+ * 2) We had enough free space info the system space_info, and one suitable
+ * block group to allocate from when we called check_system_chunk()
+ * above. However right after we called it, the only system block group
+ * with enough free space got turned into RO mode by a running scrub,
+ * and in this case we have to allocate a new one and retry. We only
+ * need do this allocate and retry once, since we have a transaction
+ * handle and scrub uses the commit root to search for block groups.
+ */
+ if (ret == -ENOSPC) {
+ const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info);
+ struct btrfs_block_group *sys_bg;
+
+ sys_bg = btrfs_alloc_chunk(trans, sys_flags);
+ if (IS_ERR(sys_bg)) {
+ ret = PTR_ERR(sys_bg);
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+ } else if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+out:
+ btrfs_trans_release_chunk_metadata(trans);
+
+ return ret;
+}
+
/*
- * If force is CHUNK_ALLOC_FORCE:
+ * Chunk allocation is done in 2 phases:
+ *
+ * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for
+ * the chunk, the chunk mapping, create its block group and add the items
+ * that belong in the chunk btree to it - more specifically, we need to
+ * update device items in the chunk btree and add a new chunk item to it.
+ *
+ * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block
+ * group item to the extent btree and the device extent items to the devices
+ * btree.
+ *
+ * This is done to prevent deadlocks. For example when COWing a node from the
+ * extent btree we are holding a write lock on the node's parent and if we
+ * trigger chunk allocation and attempted to insert the new block group item
+ * in the extent btree right way, we could deadlock because the path for the
+ * insertion can include that parent node. At first glance it seems impossible
+ * to trigger chunk allocation after starting a transaction since tasks should
+ * reserve enough transaction units (metadata space), however while that is true
+ * most of the time, chunk allocation may still be triggered for several reasons:
+ *
+ * 1) When reserving metadata, we check if there is enough free space in the
+ * metadata space_info and therefore don't trigger allocation of a new chunk.
+ * However later when the task actually tries to COW an extent buffer from
+ * the extent btree or from the device btree for example, it is forced to
+ * allocate a new block group (chunk) because the only one that had enough
+ * free space was just turned to RO mode by a running scrub for example (or
+ * device replace, block group reclaim thread, etc), so we can not use it
+ * for allocating an extent and end up being forced to allocate a new one;
+ *
+ * 2) Because we only check that the metadata space_info has enough free bytes,
+ * we end up not allocating a new metadata chunk in that case. However if
+ * the filesystem was mounted in degraded mode, none of the existing block
+ * groups might be suitable for extent allocation due to their incompatible
+ * profile (for e.g. mounting a 2 devices filesystem, where all block groups
+ * use a RAID1 profile, in degraded mode using a single device). In this case
+ * when the task attempts to COW some extent buffer of the extent btree for
+ * example, it will trigger allocation of a new metadata block group with a
+ * suitable profile (SINGLE profile in the example of the degraded mount of
+ * the RAID1 filesystem);
+ *
+ * 3) The task has reserved enough transaction units / metadata space, but when
+ * it attempts to COW an extent buffer from the extent or device btree for
+ * example, it does not find any free extent in any metadata block group,
+ * therefore forced to try to allocate a new metadata block group.
+ * This is because some other task allocated all available extents in the
+ * meanwhile - this typically happens with tasks that don't reserve space
+ * properly, either intentionally or as a bug. One example where this is
+ * done intentionally is fsync, as it does not reserve any transaction units
+ * and ends up allocating a variable number of metadata extents for log
+ * tree extent buffers.
+ *
+ * We also need this 2 phases setup when adding a device to a filesystem with
+ * a seed device - we must create new metadata and system chunks without adding
+ * any of the block group items to the chunk, extent and device btrees. If we
+ * did not do it this way, we would get ENOSPC when attempting to update those
+ * btrees, since all the chunks from the seed device are read-only.
+ *
+ * Phase 1 does the updates and insertions to the chunk btree because if we had
+ * it done in phase 2 and have a thundering herd of tasks allocating chunks in
+ * parallel, we risk having too many system chunks allocated by many tasks if
+ * many tasks reach phase 1 without the previous ones completing phase 2. In the
+ * extreme case this leads to exhaustion of the system chunk array in the
+ * superblock. This is easier to trigger if using a btree node/leaf size of 64K
+ * and with RAID filesystems (so we have more device items in the chunk btree).
+ * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of
+ * the system chunk array due to concurrent allocations") provides more details.
+ *
+ * For allocation of system chunks, we defer the updates and insertions into the
+ * chunk btree to phase 2. This is to prevent deadlocks on extent buffers because
+ * if the chunk allocation is triggered while COWing an extent buffer of the
+ * chunk btree, we are holding a lock on the parent of that extent buffer and
+ * doing the chunk btree updates and insertions can require locking that parent.
+ * This is for the very few and rare cases where we update the chunk btree that
+ * are not chunk allocation or chunk removal: adding a device, removing a device
+ * or resizing a device.
+ *
+ * The reservation of system space, done through check_system_chunk(), as well
+ * as all the updates and insertions into the chunk btree must be done while
+ * holding fs_info->chunk_mutex. This is important to guarantee that while COWing
+ * an extent buffer from the chunks btree we never trigger allocation of a new
+ * system chunk, which would result in a deadlock (trying to lock twice an
+ * extent buffer of the chunk btree, first time before triggering the chunk
+ * allocation and the second time during chunk allocation while attempting to
+ * update the chunks btree). The system chunk array is also updated while holding
+ * that mutex. The same logic applies to removing chunks - we must reserve system
+ * space, update the chunk btree and the system chunk array in the superblock
+ * while holding fs_info->chunk_mutex.
+ *
+ * This function, btrfs_chunk_alloc(), belongs to phase 1.
+ *
+ * If @force is CHUNK_ALLOC_FORCE:
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
- * If force is NOT CHUNK_ALLOC_FORCE:
+ * If @force is NOT CHUNK_ALLOC_FORCE:
* - return 0 if it doesn't need to allocate a new chunk,
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
@@ -3243,6 +3472,13 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
/* Don't re-enter if we're already allocating a chunk */
if (trans->allocating_chunk)
return -ENOSPC;
+ /*
+ * If we are removing a chunk, don't re-enter or we would deadlock.
+ * System space reservation and system chunk allocation is done by the
+ * chunk remove operation (btrfs_remove_chunk()).
+ */
+ if (trans->removing_chunk)
+ return -ENOSPC;
space_info = btrfs_find_space_info(fs_info, flags);
ASSERT(space_info);
@@ -3306,13 +3542,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
force_metadata_allocation(fs_info);
}
- /*
- * Check if we have enough space in SYSTEM chunk because we may need
- * to update devices.
- */
- check_system_chunk(trans, flags);
-
- ret = btrfs_alloc_chunk(trans, flags);
+ ret = do_chunk_alloc(trans, flags);
trans->allocating_chunk = false;
spin_lock(&space_info->lock);
@@ -3331,22 +3561,6 @@ out:
space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
mutex_unlock(&fs_info->chunk_mutex);
- /*
- * When we allocate a new chunk we reserve space in the chunk block
- * reserve to make sure we can COW nodes/leafs in the chunk tree or
- * add new nodes/leafs to it if we end up needing to do it when
- * inserting the chunk item and updating device items as part of the
- * second phase of chunk allocation, performed by
- * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
- * large number of new block groups to create in our transaction
- * handle's new_bgs list to avoid exhausting the chunk block reserve
- * in extreme cases - like having a single transaction create many new
- * block groups when starting to write out the free space caches of all
- * the block groups that were made dirty during the lifetime of the
- * transaction.
- */
- if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
- btrfs_create_pending_block_groups(trans);
return ret;
}
@@ -3367,7 +3581,6 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
*/
void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
{
- struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_space_info *info;
u64 left;
@@ -3382,7 +3595,6 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
lockdep_assert_held(&fs_info->chunk_mutex);
info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
-again:
spin_lock(&info->lock);
left = info->total_bytes - btrfs_space_info_used(info, true);
spin_unlock(&info->lock);
@@ -3401,76 +3613,39 @@ again:
if (left < thresh) {
u64 flags = btrfs_system_alloc_profile(fs_info);
- u64 reserved = atomic64_read(&cur_trans->chunk_bytes_reserved);
-
- /*
- * If there's not available space for the chunk tree (system
- * space) and there are other tasks that reserved space for
- * creating a new system block group, wait for them to complete
- * the creation of their system block group and release excess
- * reserved space. We do this because:
- *
- * *) We can end up allocating more system chunks than necessary
- * when there are multiple tasks that are concurrently
- * allocating block groups, which can lead to exhaustion of
- * the system array in the superblock;
- *
- * *) If we allocate extra and unnecessary system block groups,
- * despite being empty for a long time, and possibly forever,
- * they end not being added to the list of unused block groups
- * because that typically happens only when deallocating the
- * last extent from a block group - which never happens since
- * we never allocate from them in the first place. The few
- * exceptions are when mounting a filesystem or running scrub,
- * which add unused block groups to the list of unused block
- * groups, to be deleted by the cleaner kthread.
- * And even when they are added to the list of unused block
- * groups, it can take a long time until they get deleted,
- * since the cleaner kthread might be sleeping or busy with
- * other work (deleting subvolumes, running delayed iputs,
- * defrag scheduling, etc);
- *
- * This is rare in practice, but can happen when too many tasks
- * are allocating blocks groups in parallel (via fallocate())
- * and before the one that reserved space for a new system block
- * group finishes the block group creation and releases the space
- * reserved in excess (at btrfs_create_pending_block_groups()),
- * other tasks end up here and see free system space temporarily
- * not enough for updating the chunk tree.
- *
- * We unlock the chunk mutex before waiting for such tasks and
- * lock it again after the wait, otherwise we would deadlock.
- * It is safe to do so because allocating a system chunk is the
- * first thing done while allocating a new block group.
- */
- if (reserved > trans->chunk_bytes_reserved) {
- const u64 min_needed = reserved - thresh;
-
- mutex_unlock(&fs_info->chunk_mutex);
- wait_event(cur_trans->chunk_reserve_wait,
- atomic64_read(&cur_trans->chunk_bytes_reserved) <=
- min_needed);
- mutex_lock(&fs_info->chunk_mutex);
- goto again;
- }
+ struct btrfs_block_group *bg;
/*
* Ignore failure to create system chunk. We might end up not
* needing it, as we might not need to COW all nodes/leafs from
* the paths we visit in the chunk tree (they were already COWed
* or created in the current transaction for example).
+ *
+ * Also, if our caller is allocating a system chunk, do not
+ * attempt to insert the chunk item in the chunk btree, as we
+ * could deadlock on an extent buffer since our caller may be
+ * COWing an extent buffer from the chunk btree.
*/
- ret = btrfs_alloc_chunk(trans, flags);
+ bg = btrfs_alloc_chunk(trans, flags);
+ if (IS_ERR(bg)) {
+ ret = PTR_ERR(bg);
+ } else if (!(type & BTRFS_BLOCK_GROUP_SYSTEM)) {
+ /*
+ * If we fail to add the chunk item here, we end up
+ * trying again at phase 2 of chunk allocation, at
+ * btrfs_create_pending_block_groups(). So ignore
+ * any error here.
+ */
+ btrfs_chunk_alloc_add_chunk_item(trans, bg);
+ }
}
if (!ret) {
ret = btrfs_block_rsv_add(fs_info->chunk_root,
&fs_info->chunk_block_rsv,
thresh, BTRFS_RESERVE_NO_FLUSH);
- if (!ret) {
- atomic64_add(thresh, &cur_trans->chunk_bytes_reserved);
+ if (!ret)
trans->chunk_bytes_reserved += thresh;
- }
}
}
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 7b927425dc71..c72a71efcb18 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -97,6 +97,7 @@ struct btrfs_block_group {
unsigned int removed:1;
unsigned int to_copy:1;
unsigned int relocating_repair:1;
+ unsigned int chunk_item_inserted:1;
int disk_cache_state;
@@ -268,8 +269,9 @@ void btrfs_reclaim_bgs_work(struct work_struct *work);
void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
int btrfs_read_block_groups(struct btrfs_fs_info *info);
-int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
- u64 type, u64 chunk_offset, u64 size);
+struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
+ u64 bytes_used, u64 type,
+ u64 chunk_offset, u64 size);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
bool do_chunk_alloc);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 9a023ae0f98b..30d82cdf128c 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -352,7 +352,7 @@ static void end_compressed_bio_write(struct bio *bio)
btrfs_record_physical_zoned(inode, cb->start, bio);
btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
cb->start, cb->start + cb->len - 1,
- bio->bi_status == BLK_STS_OK);
+ !cb->errors);
end_compressed_writeback(inode, cb);
/* note, our inode could be gone now */
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 4bc3ca2cbd7d..c5c08c87e130 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -364,49 +364,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
return 0;
}
-static struct extent_buffer *alloc_tree_block_no_bg_flush(
- struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 parent_start,
- const struct btrfs_disk_key *disk_key,
- int level,
- u64 hint,
- u64 empty_size,
- enum btrfs_lock_nesting nest)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct extent_buffer *ret;
-
- /*
- * If we are COWing a node/leaf from the extent, chunk, device or free
- * space trees, make sure that we do not finish block group creation of
- * pending block groups. We do this to avoid a deadlock.
- * COWing can result in allocation of a new chunk, and flushing pending
- * block groups (btrfs_create_pending_block_groups()) can be triggered
- * when finishing allocation of a new chunk. Creation of a pending block
- * group modifies the extent, chunk, device and free space trees,
- * therefore we could deadlock with ourselves since we are holding a
- * lock on an extent buffer that btrfs_create_pending_block_groups() may
- * try to COW later.
- * For similar reasons, we also need to delay flushing pending block
- * groups when splitting a leaf or node, from one of those trees, since
- * we are holding a write lock on it and its parent or when inserting a
- * new root node for one of those trees.
- */
- if (root == fs_info->extent_root ||
- root == fs_info->chunk_root ||
- root == fs_info->dev_root ||
- root == fs_info->free_space_root)
- trans->can_flush_pending_bgs = false;
-
- ret = btrfs_alloc_tree_block(trans, root, parent_start,
- root->root_key.objectid, disk_key, level,
- hint, empty_size, nest);
- trans->can_flush_pending_bgs = true;
-
- return ret;
-}
-
/*
* does the dirty work in cow of a single block. The parent block (if
* supplied) is updated to point to the new cow copy. The new buffer is marked
@@ -455,8 +412,9 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
parent_start = parent->start;
- cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
- level, search_start, empty_size, nest);
+ cow = btrfs_alloc_tree_block(trans, root, parent_start,
+ root->root_key.objectid, &disk_key, level,
+ search_start, empty_size, nest);
if (IS_ERR(cow))
return PTR_ERR(cow);
@@ -2458,9 +2416,9 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
else
btrfs_node_key(lower, &lower_key, 0);
- c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
- root->node->start, 0,
- BTRFS_NESTING_NEW_ROOT);
+ c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ &lower_key, level, root->node->start, 0,
+ BTRFS_NESTING_NEW_ROOT);
if (IS_ERR(c))
return PTR_ERR(c);
@@ -2589,8 +2547,9 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
mid = (c_nritems + 1) / 2;
btrfs_node_key(c, &disk_key, mid);
- split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
- c->start, 0, BTRFS_NESTING_SPLIT);
+ split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ &disk_key, level, c->start, 0,
+ BTRFS_NESTING_SPLIT);
if (IS_ERR(split))
return PTR_ERR(split);
@@ -3381,10 +3340,10 @@ again:
* BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
* use BTRFS_NESTING_NEW_ROOT.
*/
- right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
- l->start, 0, num_doubles ?
- BTRFS_NESTING_NEW_ROOT :
- BTRFS_NESTING_SPLIT);
+ right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ &disk_key, 0, l->start, 0,
+ num_doubles ? BTRFS_NESTING_NEW_ROOT :
+ BTRFS_NESTING_SPLIT);
if (IS_ERR(right))
return PTR_ERR(right);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 06bc842ecdb3..ca848b183474 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -974,7 +974,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
if (qrecord_inserted)
- btrfs_qgroup_trace_extent_post(fs_info, record);
+ btrfs_qgroup_trace_extent_post(trans, record);
return 0;
}
@@ -1069,7 +1069,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
if (qrecord_inserted)
- return btrfs_qgroup_trace_extent_post(fs_info, record);
+ return btrfs_qgroup_trace_extent_post(trans, record);
return 0;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index b117dd3b8172..a59ab7b9aea0 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -209,7 +209,7 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
static void csum_tree_block(struct extent_buffer *buf, u8 *result)
{
struct btrfs_fs_info *fs_info = buf->fs_info;
- const int num_pages = fs_info->nodesize >> PAGE_SHIFT;
+ const int num_pages = num_extent_pages(buf);
const int first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
char *kaddr;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d296483d148f..268ce58d4569 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6019,6 +6019,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
mutex_lock(&fs_info->fs_devices->device_list_mutex);
devices = &fs_info->fs_devices->devices;
list_for_each_entry(device, devices, dev_list) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
+ continue;
+
ret = btrfs_trim_free_extents(device, &group_trimmed);
if (ret) {
dev_failed++;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 28a05ba47060..ee34497500e1 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -399,7 +399,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
/*
* Copy data from userspace to the current page
*/
- copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
+ copied = copy_page_from_iter_atomic(page, offset, count, i);
/* Flush processor's dcache for this page */
flush_dcache_page(page);
@@ -413,20 +413,19 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
* The rest of the btrfs_file_write code will fall
* back to page at a time copies after we return 0.
*/
- if (!PageUptodate(page) && copied < count)
- copied = 0;
+ if (unlikely(copied < count)) {
+ if (!PageUptodate(page)) {
+ iov_iter_revert(i, copied);
+ copied = 0;
+ }
+ if (!copied)
+ break;
+ }
- iov_iter_advance(i, copied);
write_bytes -= copied;
total_copied += copied;
-
- /* Return to btrfs_file_write_iter to fault page */
- if (unlikely(copied == 0))
- break;
-
- if (copied < PAGE_SIZE - offset) {
- offset += copied;
- } else {
+ offset += copied;
+ if (offset == PAGE_SIZE) {
pg++;
offset = 0;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e6eb20987351..06f9f167222b 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2271,13 +2271,127 @@ static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
}
+/*
+ * Split an extent_map at [start, start + len]
+ *
+ * This function is intended to be used only for extract_ordered_extent().
+ */
+static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
+ u64 pre, u64 post)
+{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+ struct extent_map *em;
+ struct extent_map *split_pre = NULL;
+ struct extent_map *split_mid = NULL;
+ struct extent_map *split_post = NULL;
+ int ret = 0;
+ int modified;
+ unsigned long flags;
+
+ /* Sanity check */
+ if (pre == 0 && post == 0)
+ return 0;
+
+ split_pre = alloc_extent_map();
+ if (pre)
+ split_mid = alloc_extent_map();
+ if (post)
+ split_post = alloc_extent_map();
+ if (!split_pre || (pre && !split_mid) || (post && !split_post)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ASSERT(pre + post < len);
+
+ lock_extent(&inode->io_tree, start, start + len - 1);
+ write_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, start, len);
+ if (!em) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ ASSERT(em->len == len);
+ ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
+ ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
+
+ flags = em->flags;
+ clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+ clear_bit(EXTENT_FLAG_LOGGING, &flags);
+ modified = !list_empty(&em->list);
+
+ /* First, replace the em with a new extent_map starting from * em->start */
+ split_pre->start = em->start;
+ split_pre->len = (pre ? pre : em->len - post);
+ split_pre->orig_start = split_pre->start;
+ split_pre->block_start = em->block_start;
+ split_pre->block_len = split_pre->len;
+ split_pre->orig_block_len = split_pre->block_len;
+ split_pre->ram_bytes = split_pre->len;
+ split_pre->flags = flags;
+ split_pre->compress_type = em->compress_type;
+ split_pre->generation = em->generation;
+
+ replace_extent_mapping(em_tree, em, split_pre, modified);
+
+ /*
+ * Now we only have an extent_map at:
+ * [em->start, em->start + pre] if pre != 0
+ * [em->start, em->start + em->len - post] if pre == 0
+ */
+
+ if (pre) {
+ /* Insert the middle extent_map */
+ split_mid->start = em->start + pre;
+ split_mid->len = em->len - pre - post;
+ split_mid->orig_start = split_mid->start;
+ split_mid->block_start = em->block_start + pre;
+ split_mid->block_len = split_mid->len;
+ split_mid->orig_block_len = split_mid->block_len;
+ split_mid->ram_bytes = split_mid->len;
+ split_mid->flags = flags;
+ split_mid->compress_type = em->compress_type;
+ split_mid->generation = em->generation;
+ add_extent_mapping(em_tree, split_mid, modified);
+ }
+
+ if (post) {
+ split_post->start = em->start + em->len - post;
+ split_post->len = post;
+ split_post->orig_start = split_post->start;
+ split_post->block_start = em->block_start + em->len - post;
+ split_post->block_len = split_post->len;
+ split_post->orig_block_len = split_post->block_len;
+ split_post->ram_bytes = split_post->len;
+ split_post->flags = flags;
+ split_post->compress_type = em->compress_type;
+ split_post->generation = em->generation;
+ add_extent_mapping(em_tree, split_post, modified);
+ }
+
+ /* Once for us */
+ free_extent_map(em);
+ /* Once for the tree */
+ free_extent_map(em);
+
+out_unlock:
+ write_unlock(&em_tree->lock);
+ unlock_extent(&inode->io_tree, start, start + len - 1);
+out:
+ free_extent_map(split_pre);
+ free_extent_map(split_mid);
+ free_extent_map(split_post);
+
+ return ret;
+}
+
static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
struct bio *bio, loff_t file_offset)
{
struct btrfs_ordered_extent *ordered;
- struct extent_map *em = NULL, *em_new = NULL;
- struct extent_map_tree *em_tree = &inode->extent_tree;
u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ u64 file_len;
u64 len = bio->bi_iter.bi_size;
u64 end = start + len;
u64 ordered_end;
@@ -2317,41 +2431,16 @@ static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
goto out;
}
+ file_len = ordered->num_bytes;
pre = start - ordered->disk_bytenr;
post = ordered_end - end;
ret = btrfs_split_ordered_extent(ordered, pre, post);
if (ret)
goto out;
-
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, ordered->file_offset, len);
- if (!em) {
- read_unlock(&em_tree->lock);
- ret = -EIO;
- goto out;
- }
- read_unlock(&em_tree->lock);
-
- ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
- /*
- * We cannot reuse em_new here but have to create a new one, as
- * unpin_extent_cache() expects the start of the extent map to be the
- * logical offset of the file, which does not hold true anymore after
- * splitting.
- */
- em_new = create_io_em(inode, em->start + pre, len,
- em->start + pre, em->block_start + pre, len,
- len, len, BTRFS_COMPRESS_NONE,
- BTRFS_ORDERED_REGULAR);
- if (IS_ERR(em_new)) {
- ret = PTR_ERR(em_new);
- goto out;
- }
- free_extent_map(em_new);
+ ret = split_zoned_em(inode, file_offset, file_len, pre, post);
out:
- free_extent_map(em);
btrfs_put_ordered_extent(ordered);
return errno_to_blk_status(ret);
@@ -2903,7 +2992,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
- if (ordered_extent->disk)
+ if (ordered_extent->bdev)
btrfs_rewrite_logical_zoned(ordered_extent);
btrfs_free_io_failure_record(inode, start, end);
@@ -9137,8 +9226,14 @@ static int btrfs_rename_exchange(struct inode *old_dir,
bool dest_log_pinned = false;
bool need_abort = false;
- /* we only allow rename subvolume link between subvolumes */
- if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
+ /*
+ * For non-subvolumes allow exchange only within one subvolume, in the
+ * same inode namespace. Two subvolumes (represented as directory) can
+ * be exchanged as they're a logical link and have a fixed inode number.
+ */
+ if (root != dest &&
+ (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
+ new_ino != BTRFS_FIRST_FREE_OBJECTID))
return -EXDEV;
/* close the race window with snapshot create/destroy ioctl */
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 6eb41b7c0c84..5c0f8481e25e 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -190,8 +190,6 @@ static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset
entry->truncated_len = (u64)-1;
entry->qgroup_rsv = ret;
entry->physical = (u64)-1;
- entry->disk = NULL;
- entry->partno = (u8)-1;
ASSERT(type == BTRFS_ORDERED_REGULAR ||
type == BTRFS_ORDERED_NOCOW ||
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 566472004edd..b2d88aba8420 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -145,8 +145,7 @@ struct btrfs_ordered_extent {
* command in a workqueue context
*/
u64 physical;
- struct gendisk *disk;
- u8 partno;
+ struct block_device *bdev;
};
/*
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 07ec06d4e972..0fa121171ca1 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1704,17 +1704,39 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
return 0;
}
-int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
+int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
struct btrfs_qgroup_extent_record *qrecord)
{
struct ulist *old_root;
u64 bytenr = qrecord->bytenr;
int ret;
- ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
+ /*
+ * We are always called in a context where we are already holding a
+ * transaction handle. Often we are called when adding a data delayed
+ * reference from btrfs_truncate_inode_items() (truncating or unlinking),
+ * in which case we will be holding a write lock on extent buffer from a
+ * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
+ * acquire fs_info->commit_root_sem, because that is a higher level lock
+ * that must be acquired before locking any extent buffers.
+ *
+ * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
+ * but we can't pass it a non-NULL transaction handle, because otherwise
+ * it would not use commit roots and would lock extent buffers, causing
+ * a deadlock if it ends up trying to read lock the same extent buffer
+ * that was previously write locked at btrfs_truncate_inode_items().
+ *
+ * So pass a NULL transaction handle to btrfs_find_all_roots() and
+ * explicitly tell it to not acquire the commit_root_sem - if we are
+ * holding a transaction handle we don't need its protection.
+ */
+ ASSERT(trans != NULL);
+
+ ret = btrfs_find_all_roots(NULL, trans->fs_info, bytenr, 0, &old_root,
+ false, true);
if (ret < 0) {
- fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- btrfs_warn(fs_info,
+ trans->fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ btrfs_warn(trans->fs_info,
"error accounting new delayed refs extent (err code: %d), quota inconsistent",
ret);
return 0;
@@ -1758,7 +1780,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
kfree(record);
return 0;
}
- return btrfs_qgroup_trace_extent_post(fs_info, record);
+ return btrfs_qgroup_trace_extent_post(trans, record);
}
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
@@ -2629,7 +2651,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
/* Search commit root to find old_roots */
ret = btrfs_find_all_roots(NULL, fs_info,
record->bytenr, 0,
- &record->old_roots, false);
+ &record->old_roots, false, false);
if (ret < 0)
goto cleanup;
}
@@ -2645,7 +2667,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
* current root. It's safe inside commit_transaction().
*/
ret = btrfs_find_all_roots(trans, fs_info,
- record->bytenr, BTRFS_SEQ_LAST, &new_roots, false);
+ record->bytenr, BTRFS_SEQ_LAST, &new_roots, false, false);
if (ret < 0)
goto cleanup;
if (qgroup_to_skip) {
@@ -3179,7 +3201,7 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
num_bytes = found.offset;
ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
- &roots, false);
+ &roots, false, false);
if (ret < 0)
goto out;
/* For rescan, just pass old_roots as NULL */
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 7283e4f549af..880e9df0dac1 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -298,7 +298,7 @@ int btrfs_qgroup_trace_extent_nolock(
* using current root, then we can move all expensive backref walk out of
* transaction committing, but not now as qgroup accounting will be wrong again.
*/
-int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
+int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
struct btrfs_qgroup_extent_record *qrecord);
/*
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index f3137285a9e2..98b5aaba46f1 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -224,7 +224,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
* quota.
*/
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
@@ -237,7 +237,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
return ret;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
@@ -261,7 +261,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
new_roots = NULL;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
@@ -273,7 +273,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
return -EINVAL;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
@@ -325,7 +325,7 @@ static int test_multiple_refs(struct btrfs_root *root,
}
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
@@ -338,7 +338,7 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
@@ -360,7 +360,7 @@ static int test_multiple_refs(struct btrfs_root *root,
}
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
@@ -373,7 +373,7 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
@@ -401,7 +401,7 @@ static int test_multiple_refs(struct btrfs_root *root,
}
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
@@ -414,7 +414,7 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
- false);
+ false, false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 50318231c1a8..14b9fdc8aaa9 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -254,23 +254,21 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans)
}
/*
- * To be called after all the new block groups attached to the transaction
- * handle have been created (btrfs_create_pending_block_groups()).
+ * To be called after doing the chunk btree updates right after allocating a new
+ * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a
+ * chunk after all chunk btree updates and after finishing the second phase of
+ * chunk allocation (btrfs_create_pending_block_groups()) in case some block
+ * group had its chunk item insertion delayed to the second phase.
*/
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_transaction *cur_trans = trans->transaction;
if (!trans->chunk_bytes_reserved)
return;
- WARN_ON_ONCE(!list_empty(&trans->new_bgs));
-
btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
trans->chunk_bytes_reserved, NULL);
- atomic64_sub(trans->chunk_bytes_reserved, &cur_trans->chunk_bytes_reserved);
- cond_wake_up(&cur_trans->chunk_reserve_wait);
trans->chunk_bytes_reserved = 0;
}
@@ -386,8 +384,6 @@ loop:
spin_lock_init(&cur_trans->dropped_roots_lock);
INIT_LIST_HEAD(&cur_trans->releasing_ebs);
spin_lock_init(&cur_trans->releasing_ebs_lock);
- atomic64_set(&cur_trans->chunk_bytes_reserved, 0);
- init_waitqueue_head(&cur_trans->chunk_reserve_wait);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
@@ -701,7 +697,6 @@ again:
h->fs_info = root->fs_info;
h->type = type;
- h->can_flush_pending_bgs = true;
INIT_LIST_HEAD(&h->new_bgs);
smp_mb();
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 07d76029f598..ba45065f9451 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -96,13 +96,6 @@ struct btrfs_transaction {
spinlock_t releasing_ebs_lock;
struct list_head releasing_ebs;
-
- /*
- * The number of bytes currently reserved, by all transaction handles
- * attached to this transaction, for metadata extents of the chunk tree.
- */
- atomic64_t chunk_bytes_reserved;
- wait_queue_head_t chunk_reserve_wait;
};
#define __TRANS_FREEZABLE (1U << 0)
@@ -139,7 +132,7 @@ struct btrfs_trans_handle {
short aborted;
bool adding_csums;
bool allocating_chunk;
- bool can_flush_pending_bgs;
+ bool removing_chunk;
bool reloc_reserved;
bool in_fsync;
struct btrfs_root *root;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index cab451d19547..e6430ac9bbe8 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3173,7 +3173,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (!log_root_tree->node) {
ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
if (ret) {
- mutex_unlock(&fs_info->tree_log_mutex);
+ mutex_unlock(&fs_info->tree_root->log_mutex);
goto out;
}
}
@@ -5526,16 +5526,29 @@ log_extents:
spin_lock(&inode->lock);
inode->logged_trans = trans->transid;
/*
- * Don't update last_log_commit if we logged that an inode exists
- * after it was loaded to memory (full_sync bit set).
- * This is to prevent data loss when we do a write to the inode,
- * then the inode gets evicted after all delalloc was flushed,
- * then we log it exists (due to a rename for example) and then
- * fsync it. This last fsync would do nothing (not logging the
- * extents previously written).
+ * Don't update last_log_commit if we logged that an inode exists.
+ * We do this for two reasons:
+ *
+ * 1) We might have had buffered writes to this inode that were
+ * flushed and had their ordered extents completed in this
+ * transaction, but we did not previously log the inode with
+ * LOG_INODE_ALL. Later the inode was evicted and after that
+ * it was loaded again and this LOG_INODE_EXISTS log operation
+ * happened. We must make sure that if an explicit fsync against
+ * the inode is performed later, it logs the new extents, an
+ * updated inode item, etc, and syncs the log. The same logic
+ * applies to direct IO writes instead of buffered writes.
+ *
+ * 2) When we log the inode with LOG_INODE_EXISTS, its inode item
+ * is logged with an i_size of 0 or whatever value was logged
+ * before. If later the i_size of the inode is increased by a
+ * truncate operation, the log is synced through an fsync of
+ * some other inode and then finally an explicit fsync against
+ * this inode is made, we must make sure this fsync logs the
+ * inode with the new i_size, the hole between old i_size and
+ * the new i_size, and syncs the log.
*/
- if (inode_only != LOG_INODE_EXISTS ||
- !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
+ if (inode_only != LOG_INODE_EXISTS)
inode->last_log_commit = inode->last_sub_trans;
spin_unlock(&inode->lock);
}
@@ -6490,8 +6503,8 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
* if this inode hasn't been logged and directory we're renaming it
* from hasn't been logged, we don't need to log it
*/
- if (inode->logged_trans < trans->transid &&
- (!old_dir || old_dir->logged_trans < trans->transid))
+ if (!inode_logged(trans, inode) &&
+ (!old_dir || !inode_logged(trans, old_dir)))
return;
/*
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 807502cd6510..70f94b75f25a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1078,6 +1078,7 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
list_del_init(&device->dev_alloc_list);
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+ fs_devices->rw_devices--;
}
list_del_init(&device->dev_list);
fs_devices->num_devices--;
@@ -1745,19 +1746,14 @@ again:
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
} else {
- btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
goto out;
}
*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
ret = btrfs_del_item(trans, root, path);
- if (ret) {
- btrfs_handle_fs_error(fs_info, ret,
- "Failed to remove dev extent item");
- } else {
+ if (ret == 0)
set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
- }
out:
btrfs_free_path(path);
return ret;
@@ -2942,7 +2938,7 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
u32 cur;
struct btrfs_key key;
- mutex_lock(&fs_info->chunk_mutex);
+ lockdep_assert_held(&fs_info->chunk_mutex);
array_size = btrfs_super_sys_array_size(super_copy);
ptr = super_copy->sys_chunk_array;
@@ -2972,7 +2968,6 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
cur += len;
}
}
- mutex_unlock(&fs_info->chunk_mutex);
return ret;
}
@@ -3012,6 +3007,29 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
return em;
}
+static int remove_chunk_item(struct btrfs_trans_handle *trans,
+ struct map_lookup *map, u64 chunk_offset)
+{
+ int i;
+
+ /*
+ * Removing chunk items and updating the device items in the chunks btree
+ * requires holding the chunk_mutex.
+ * See the comment at btrfs_chunk_alloc() for the details.
+ */
+ lockdep_assert_held(&trans->fs_info->chunk_mutex);
+
+ for (i = 0; i < map->num_stripes; i++) {
+ int ret;
+
+ ret = btrfs_update_device(trans, map->stripes[i].dev);
+ if (ret)
+ return ret;
+ }
+
+ return btrfs_free_chunk(trans, chunk_offset);
+}
+
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -3032,14 +3050,16 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
return PTR_ERR(em);
}
map = em->map_lookup;
- mutex_lock(&fs_info->chunk_mutex);
- check_system_chunk(trans, map->type);
- mutex_unlock(&fs_info->chunk_mutex);
/*
- * Take the device list mutex to prevent races with the final phase of
- * a device replace operation that replaces the device object associated
- * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
+ * First delete the device extent items from the devices btree.
+ * We take the device_list_mutex to avoid racing with the finishing phase
+ * of a device replace operation. See the comment below before acquiring
+ * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
+ * because that can result in a deadlock when deleting the device extent
+ * items from the devices btree - COWing an extent buffer from the btree
+ * may result in allocating a new metadata chunk, which would attempt to
+ * lock again fs_info->chunk_mutex.
*/
mutex_lock(&fs_devices->device_list_mutex);
for (i = 0; i < map->num_stripes; i++) {
@@ -3061,18 +3081,73 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
btrfs_clear_space_info_full(fs_info);
mutex_unlock(&fs_info->chunk_mutex);
}
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
- ret = btrfs_update_device(trans, device);
+ /*
+ * We acquire fs_info->chunk_mutex for 2 reasons:
+ *
+ * 1) Just like with the first phase of the chunk allocation, we must
+ * reserve system space, do all chunk btree updates and deletions, and
+ * update the system chunk array in the superblock while holding this
+ * mutex. This is for similar reasons as explained on the comment at
+ * the top of btrfs_chunk_alloc();
+ *
+ * 2) Prevent races with the final phase of a device replace operation
+ * that replaces the device object associated with the map's stripes,
+ * because the device object's id can change at any time during that
+ * final phase of the device replace operation
+ * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
+ * replaced device and then see it with an ID of
+ * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
+ * the device item, which does not exists on the chunk btree.
+ * The finishing phase of device replace acquires both the
+ * device_list_mutex and the chunk_mutex, in that order, so we are
+ * safe by just acquiring the chunk_mutex.
+ */
+ trans->removing_chunk = true;
+ mutex_lock(&fs_info->chunk_mutex);
+
+ check_system_chunk(trans, map->type);
+
+ ret = remove_chunk_item(trans, map, chunk_offset);
+ /*
+ * Normally we should not get -ENOSPC since we reserved space before
+ * through the call to check_system_chunk().
+ *
+ * Despite our system space_info having enough free space, we may not
+ * be able to allocate extents from its block groups, because all have
+ * an incompatible profile, which will force us to allocate a new system
+ * block group with the right profile, or right after we called
+ * check_system_space() above, a scrub turned the only system block group
+ * with enough free space into RO mode.
+ * This is explained with more detail at do_chunk_alloc().
+ *
+ * So if we get -ENOSPC, allocate a new system chunk and retry once.
+ */
+ if (ret == -ENOSPC) {
+ const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
+ struct btrfs_block_group *sys_bg;
+
+ sys_bg = btrfs_alloc_chunk(trans, sys_flags);
+ if (IS_ERR(sys_bg)) {
+ ret = PTR_ERR(sys_bg);
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
if (ret) {
- mutex_unlock(&fs_devices->device_list_mutex);
btrfs_abort_transaction(trans, ret);
goto out;
}
- }
- mutex_unlock(&fs_devices->device_list_mutex);
- ret = btrfs_free_chunk(trans, chunk_offset);
- if (ret) {
+ ret = remove_chunk_item(trans, map, chunk_offset);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+ } else if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -3087,6 +3162,15 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
}
}
+ mutex_unlock(&fs_info->chunk_mutex);
+ trans->removing_chunk = false;
+
+ /*
+ * We are done with chunk btree updates and deletions, so release the
+ * system space we previously reserved (with check_system_chunk()).
+ */
+ btrfs_trans_release_chunk_metadata(trans);
+
ret = btrfs_remove_block_group(trans, chunk_offset, em);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -3094,6 +3178,10 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
}
out:
+ if (trans->removing_chunk) {
+ mutex_unlock(&fs_info->chunk_mutex);
+ trans->removing_chunk = false;
+ }
/* once for us */
free_extent_map(em);
return ret;
@@ -4860,13 +4948,12 @@ static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
u32 array_size;
u8 *ptr;
- mutex_lock(&fs_info->chunk_mutex);
+ lockdep_assert_held(&fs_info->chunk_mutex);
+
array_size = btrfs_super_sys_array_size(super_copy);
if (array_size + item_size + sizeof(disk_key)
- > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
- mutex_unlock(&fs_info->chunk_mutex);
+ > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
return -EFBIG;
- }
ptr = super_copy->sys_chunk_array + array_size;
btrfs_cpu_key_to_disk(&disk_key, key);
@@ -4875,7 +4962,6 @@ static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
memcpy(ptr, chunk, item_size);
item_size += sizeof(disk_key);
btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
- mutex_unlock(&fs_info->chunk_mutex);
return 0;
}
@@ -5225,13 +5311,14 @@ static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
}
}
-static int create_chunk(struct btrfs_trans_handle *trans,
+static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
struct alloc_chunk_ctl *ctl,
struct btrfs_device_info *devices_info)
{
struct btrfs_fs_info *info = trans->fs_info;
struct map_lookup *map = NULL;
struct extent_map_tree *em_tree;
+ struct btrfs_block_group *block_group;
struct extent_map *em;
u64 start = ctl->start;
u64 type = ctl->type;
@@ -5241,7 +5328,7 @@ static int create_chunk(struct btrfs_trans_handle *trans,
map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
if (!map)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
map->num_stripes = ctl->num_stripes;
for (i = 0; i < ctl->ndevs; ++i) {
@@ -5263,7 +5350,7 @@ static int create_chunk(struct btrfs_trans_handle *trans,
em = alloc_extent_map();
if (!em) {
kfree(map);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
em->map_lookup = map;
@@ -5279,12 +5366,12 @@ static int create_chunk(struct btrfs_trans_handle *trans,
if (ret) {
write_unlock(&em_tree->lock);
free_extent_map(em);
- return ret;
+ return ERR_PTR(ret);
}
write_unlock(&em_tree->lock);
- ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
- if (ret)
+ block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
+ if (IS_ERR(block_group))
goto error_del_extent;
for (i = 0; i < map->num_stripes; i++) {
@@ -5304,7 +5391,7 @@ static int create_chunk(struct btrfs_trans_handle *trans,
check_raid56_incompat_flag(info, type);
check_raid1c34_incompat_flag(info, type);
- return 0;
+ return block_group;
error_del_extent:
write_lock(&em_tree->lock);
@@ -5316,34 +5403,36 @@ error_del_extent:
/* One for the tree reference */
free_extent_map(em);
- return ret;
+ return block_group;
}
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
+struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+ u64 type)
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_fs_devices *fs_devices = info->fs_devices;
struct btrfs_device_info *devices_info = NULL;
struct alloc_chunk_ctl ctl;
+ struct btrfs_block_group *block_group;
int ret;
lockdep_assert_held(&info->chunk_mutex);
if (!alloc_profile_is_valid(type, 0)) {
ASSERT(0);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if (list_empty(&fs_devices->alloc_list)) {
if (btrfs_test_opt(info, ENOSPC_DEBUG))
btrfs_debug(info, "%s: no writable device", __func__);
- return -ENOSPC;
+ return ERR_PTR(-ENOSPC);
}
if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
btrfs_err(info, "invalid chunk type 0x%llx requested", type);
ASSERT(0);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
ctl.start = find_next_chunk(info);
@@ -5353,46 +5442,43 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
GFP_NOFS);
if (!devices_info)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
ret = gather_device_info(fs_devices, &ctl, devices_info);
- if (ret < 0)
+ if (ret < 0) {
+ block_group = ERR_PTR(ret);
goto out;
+ }
ret = decide_stripe_size(fs_devices, &ctl, devices_info);
- if (ret < 0)
+ if (ret < 0) {
+ block_group = ERR_PTR(ret);
goto out;
+ }
- ret = create_chunk(trans, &ctl, devices_info);
+ block_group = create_chunk(trans, &ctl, devices_info);
out:
kfree(devices_info);
- return ret;
+ return block_group;
}
/*
- * Chunk allocation falls into two parts. The first part does work
- * that makes the new allocated chunk usable, but does not do any operation
- * that modifies the chunk tree. The second part does the work that
- * requires modifying the chunk tree. This division is important for the
- * bootstrap process of adding storage to a seed btrfs.
+ * This function, btrfs_finish_chunk_alloc(), belongs to phase 2.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
*/
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
u64 chunk_offset, u64 chunk_size)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *extent_root = fs_info->extent_root;
- struct btrfs_root *chunk_root = fs_info->chunk_root;
- struct btrfs_key key;
struct btrfs_device *device;
- struct btrfs_chunk *chunk;
- struct btrfs_stripe *stripe;
struct extent_map *em;
struct map_lookup *map;
- size_t item_size;
u64 dev_offset;
u64 stripe_size;
- int i = 0;
+ int i;
int ret = 0;
em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
@@ -5400,53 +5486,117 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
return PTR_ERR(em);
map = em->map_lookup;
- item_size = btrfs_chunk_item_size(map->num_stripes);
stripe_size = em->orig_block_len;
- chunk = kzalloc(item_size, GFP_NOFS);
- if (!chunk) {
- ret = -ENOMEM;
- goto out;
- }
-
/*
* Take the device list mutex to prevent races with the final phase of
* a device replace operation that replaces the device object associated
* with the map's stripes, because the device object's id can change
* at any time during that final phase of the device replace operation
- * (dev-replace.c:btrfs_dev_replace_finishing()).
+ * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
+ * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
+ * resulting in persisting a device extent item with such ID.
*/
mutex_lock(&fs_info->fs_devices->device_list_mutex);
for (i = 0; i < map->num_stripes; i++) {
device = map->stripes[i].dev;
dev_offset = map->stripes[i].physical;
- ret = btrfs_update_device(trans, device);
- if (ret)
- break;
ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
dev_offset, stripe_size);
if (ret)
break;
}
- if (ret) {
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+
+ free_extent_map(em);
+ return ret;
+}
+
+/*
+ * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
+ * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
+ * chunks.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
+ */
+int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *bg)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *extent_root = fs_info->extent_root;
+ struct btrfs_root *chunk_root = fs_info->chunk_root;
+ struct btrfs_key key;
+ struct btrfs_chunk *chunk;
+ struct btrfs_stripe *stripe;
+ struct extent_map *em;
+ struct map_lookup *map;
+ size_t item_size;
+ int i;
+ int ret;
+
+ /*
+ * We take the chunk_mutex for 2 reasons:
+ *
+ * 1) Updates and insertions in the chunk btree must be done while holding
+ * the chunk_mutex, as well as updating the system chunk array in the
+ * superblock. See the comment on top of btrfs_chunk_alloc() for the
+ * details;
+ *
+ * 2) To prevent races with the final phase of a device replace operation
+ * that replaces the device object associated with the map's stripes,
+ * because the device object's id can change at any time during that
+ * final phase of the device replace operation
+ * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
+ * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
+ * which would cause a failure when updating the device item, which does
+ * not exists, or persisting a stripe of the chunk item with such ID.
+ * Here we can't use the device_list_mutex because our caller already
+ * has locked the chunk_mutex, and the final phase of device replace
+ * acquires both mutexes - first the device_list_mutex and then the
+ * chunk_mutex. Using any of those two mutexes protects us from a
+ * concurrent device replace.
+ */
+ lockdep_assert_held(&fs_info->chunk_mutex);
+
+ em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+
+ map = em->map_lookup;
+ item_size = btrfs_chunk_item_size(map->num_stripes);
+
+ chunk = kzalloc(item_size, GFP_NOFS);
+ if (!chunk) {
+ ret = -ENOMEM;
+ btrfs_abort_transaction(trans, ret);
goto out;
}
+ for (i = 0; i < map->num_stripes; i++) {
+ struct btrfs_device *device = map->stripes[i].dev;
+
+ ret = btrfs_update_device(trans, device);
+ if (ret)
+ goto out;
+ }
+
stripe = &chunk->stripe;
for (i = 0; i < map->num_stripes; i++) {
- device = map->stripes[i].dev;
- dev_offset = map->stripes[i].physical;
+ struct btrfs_device *device = map->stripes[i].dev;
+ const u64 dev_offset = map->stripes[i].physical;
btrfs_set_stack_stripe_devid(stripe, device->devid);
btrfs_set_stack_stripe_offset(stripe, dev_offset);
memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
stripe++;
}
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- btrfs_set_stack_chunk_length(chunk, chunk_size);
+ btrfs_set_stack_chunk_length(chunk, bg->length);
btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
btrfs_set_stack_chunk_type(chunk, map->type);
@@ -5458,15 +5608,18 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.type = BTRFS_CHUNK_ITEM_KEY;
- key.offset = chunk_offset;
+ key.offset = bg->start;
ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
- if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
- /*
- * TODO: Cleanup of inserted chunk root in case of
- * failure.
- */
+ if (ret)
+ goto out;
+
+ bg->chunk_item_inserted = 1;
+
+ if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
+ if (ret)
+ goto out;
}
out:
@@ -5479,16 +5632,41 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
u64 alloc_profile;
- int ret;
+ struct btrfs_block_group *meta_bg;
+ struct btrfs_block_group *sys_bg;
+
+ /*
+ * When adding a new device for sprouting, the seed device is read-only
+ * so we must first allocate a metadata and a system chunk. But before
+ * adding the block group items to the extent, device and chunk btrees,
+ * we must first:
+ *
+ * 1) Create both chunks without doing any changes to the btrees, as
+ * otherwise we would get -ENOSPC since the block groups from the
+ * seed device are read-only;
+ *
+ * 2) Add the device item for the new sprout device - finishing the setup
+ * of a new block group requires updating the device item in the chunk
+ * btree, so it must exist when we attempt to do it. The previous step
+ * ensures this does not fail with -ENOSPC.
+ *
+ * After that we can add the block group items to their btrees:
+ * update existing device item in the chunk btree, add a new block group
+ * item to the extent btree, add a new chunk item to the chunk btree and
+ * finally add the new device extent items to the devices btree.
+ */
alloc_profile = btrfs_metadata_alloc_profile(fs_info);
- ret = btrfs_alloc_chunk(trans, alloc_profile);
- if (ret)
- return ret;
+ meta_bg = btrfs_alloc_chunk(trans, alloc_profile);
+ if (IS_ERR(meta_bg))
+ return PTR_ERR(meta_bg);
alloc_profile = btrfs_system_alloc_profile(fs_info);
- ret = btrfs_alloc_chunk(trans, alloc_profile);
- return ret;
+ sys_bg = btrfs_alloc_chunk(trans, alloc_profile);
+ if (IS_ERR(sys_bg))
+ return PTR_ERR(sys_bg);
+
+ return 0;
}
static inline int btrfs_chunk_max_errors(struct map_lookup *map)
@@ -7415,10 +7593,18 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
total_dev++;
} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
struct btrfs_chunk *chunk;
+
+ /*
+ * We are only called at mount time, so no need to take
+ * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
+ * we always lock first fs_info->chunk_mutex before
+ * acquiring any locks on the chunk tree. This is a
+ * requirement for chunk allocation, see the comment on
+ * top of btrfs_chunk_alloc() for details.
+ */
+ ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
- mutex_lock(&fs_info->chunk_mutex);
ret = read_one_chunk(&found_key, leaf, chunk);
- mutex_unlock(&fs_info->chunk_mutex);
if (ret)
goto error;
}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index c7fc7caf575c..55a8ba244716 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -450,7 +450,8 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
struct btrfs_io_geometry *io_geom);
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
+struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+ u64 type);
void btrfs_mapping_tree_free(struct extent_map_tree *tree);
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num);
@@ -509,6 +510,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
u64 logical);
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
u64 chunk_offset, u64 chunk_size);
+int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *bg);
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
u64 logical, u64 length);
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 297c0b1c0634..907c2cc45c9c 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1349,8 +1349,7 @@ void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
return;
ordered->physical = physical;
- ordered->disk = bio->bi_bdev->bd_disk;
- ordered->partno = bio->bi_bdev->bd_partno;
+ ordered->bdev = bio->bi_bdev;
btrfs_put_ordered_extent(ordered);
}
@@ -1362,18 +1361,16 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
struct extent_map_tree *em_tree;
struct extent_map *em;
struct btrfs_ordered_sum *sum;
- struct block_device *bdev;
u64 orig_logical = ordered->disk_bytenr;
u64 *logical = NULL;
int nr, stripe_len;
/* Zoned devices should not have partitions. So, we can assume it is 0 */
- ASSERT(ordered->partno == 0);
- bdev = bdgrab(ordered->disk->part0);
- if (WARN_ON(!bdev))
+ ASSERT(!bdev_is_partition(ordered->bdev));
+ if (WARN_ON(!ordered->bdev))
return;
- if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, bdev,
+ if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, ordered->bdev,
ordered->physical, &logical, &nr,
&stripe_len)))
goto out;
@@ -1402,7 +1399,6 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
out:
kfree(logical);
- bdput(bdev);
}
bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index c1570fada3d8..a1e2813731d1 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -82,10 +82,6 @@ static int ceph_set_page_dirty(struct page *page)
struct inode *inode;
struct ceph_inode_info *ci;
struct ceph_snap_context *snapc;
- int ret;
-
- if (unlikely(!mapping))
- return !TestSetPageDirty(page);
if (PageDirty(page)) {
dout("%p set_page_dirty %p idx %lu -- already dirty\n",
@@ -130,11 +126,7 @@ static int ceph_set_page_dirty(struct page *page)
BUG_ON(PagePrivate(page));
attach_page_private(page, snapc);
- ret = __set_page_dirty_nobuffers(page);
- WARN_ON(!PageLocked(page));
- WARN_ON(!page->mapping);
-
- return ret;
+ return __set_page_dirty_nobuffers(page);
}
/*
@@ -226,7 +218,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
int err = req->r_result;
ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
- req->r_end_latency, err);
+ req->r_end_latency, osd_data->length, err);
dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result,
subreq->len, i_size_read(req->r_inode));
@@ -313,7 +305,7 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
ceph_put_cap_refs(ci, got);
}
-const struct netfs_read_request_ops ceph_netfs_read_ops = {
+static const struct netfs_read_request_ops ceph_netfs_read_ops = {
.init_rreq = ceph_init_rreq,
.is_cache_enabled = ceph_is_cache_enabled,
.begin_cache_operation = ceph_begin_cache_operation,
@@ -560,7 +552,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
err = ceph_osdc_wait_request(osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
- req->r_end_latency, err);
+ req->r_end_latency, len, err);
ceph_osdc_put_request(req);
if (err == 0)
@@ -635,6 +627,7 @@ static void writepages_finish(struct ceph_osd_request *req)
struct ceph_snap_context *snapc = req->r_snapc;
struct address_space *mapping = inode->i_mapping;
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ unsigned int len = 0;
bool remove_page;
dout("writepages_finish %p rc %d\n", inode, rc);
@@ -647,9 +640,6 @@ static void writepages_finish(struct ceph_osd_request *req)
ceph_clear_error_write(ci);
}
- ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
- req->r_end_latency, rc);
-
/*
* We lost the cache cap, need to truncate the page before
* it is unlocked, otherwise we'd truncate it later in the
@@ -666,6 +656,7 @@ static void writepages_finish(struct ceph_osd_request *req)
osd_data = osd_req_op_extent_osd_data(req, i);
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
+ len += osd_data->length;
num_pages = calc_pages_for((u64)osd_data->alignment,
(u64)osd_data->length);
total_pages += num_pages;
@@ -696,6 +687,9 @@ static void writepages_finish(struct ceph_osd_request *req)
release_pages(osd_data->pages, num_pages);
}
+ ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, len, rc);
+
ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
osd_data = osd_req_op_extent_osd_data(req, 0);
@@ -1711,7 +1705,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
- req->r_end_latency, err);
+ req->r_end_latency, len, err);
out_put:
ceph_osdc_put_request(req);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index a5e93b185515..2a2900903f8c 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -645,9 +645,7 @@ void ceph_add_cap(struct inode *inode,
dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
session->s_mds, cap_id, ceph_cap_string(issued), seq);
- spin_lock(&session->s_gen_ttl_lock);
- gen = session->s_cap_gen;
- spin_unlock(&session->s_gen_ttl_lock);
+ gen = atomic_read(&session->s_cap_gen);
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
@@ -785,10 +783,8 @@ static int __cap_is_valid(struct ceph_cap *cap)
unsigned long ttl;
u32 gen;
- spin_lock(&cap->session->s_gen_ttl_lock);
- gen = cap->session->s_cap_gen;
+ gen = atomic_read(&cap->session->s_cap_gen);
ttl = cap->session->s_cap_ttl;
- spin_unlock(&cap->session->s_gen_ttl_lock);
if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
dout("__cap_is_valid %p cap %p issued %s "
@@ -1182,7 +1178,8 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
* s_cap_gen while session is in the reconnect state.
*/
if (queue_release &&
- (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
+ (!session->s_cap_reconnect ||
+ cap->cap_gen == atomic_read(&session->s_cap_gen))) {
cap->queue_release = 1;
if (removed) {
__ceph_queue_cap_release(session, cap);
@@ -1534,7 +1531,7 @@ static inline int __send_flush_snap(struct inode *inode,
* asynchronously back to the MDS once sync writes complete and dirty
* data is written out.
*
- * Called under i_ceph_lock. Takes s_mutex as needed.
+ * Called under i_ceph_lock.
*/
static void __ceph_flush_snaps(struct ceph_inode_info *ci,
struct ceph_mds_session *session)
@@ -1656,7 +1653,6 @@ retry:
mds = ci->i_auth_cap->session->s_mds;
if (session && session->s_mds != mds) {
dout(" oops, wrong session %p mutex\n", session);
- mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
session = NULL;
}
@@ -1665,10 +1661,6 @@ retry:
mutex_lock(&mdsc->mutex);
session = __ceph_lookup_mds_session(mdsc, mds);
mutex_unlock(&mdsc->mutex);
- if (session) {
- dout(" inverting session/ino locks on %p\n", session);
- mutex_lock(&session->s_mutex);
- }
goto retry;
}
@@ -1680,12 +1672,10 @@ retry:
out:
spin_unlock(&ci->i_ceph_lock);
- if (psession) {
+ if (psession)
*psession = session;
- } else if (session) {
- mutex_unlock(&session->s_mutex);
+ else
ceph_put_mds_session(session);
- }
/* we flushed them all; remove this inode from the queue */
spin_lock(&mdsc->snap_flush_lock);
list_del_init(&ci->i_snap_flush_item);
@@ -1915,7 +1905,6 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_cap *cap;
u64 flush_tid, oldest_flush_tid;
int file_wanted, used, cap_used;
- int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
int issued, implemented, want, retain, revoking, flushing = 0;
int mds = -1; /* keep track of how far we've gone through i_caps list
to avoid an infinite loop on retry */
@@ -1923,14 +1912,13 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
bool queue_invalidate = false;
bool tried_invalidate = false;
+ if (session)
+ ceph_get_mds_session(session);
+
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_FLUSH)
flags |= CHECK_CAPS_FLUSH;
-
- goto retry_locked;
retry:
- spin_lock(&ci->i_ceph_lock);
-retry_locked:
/* Caps wanted by virtue of active open files. */
file_wanted = __ceph_caps_file_wanted(ci);
@@ -2010,7 +1998,7 @@ retry_locked:
ci->i_rdcache_revoking = ci->i_rdcache_gen;
}
tried_invalidate = true;
- goto retry_locked;
+ goto retry;
}
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
@@ -2024,8 +2012,6 @@ retry_locked:
((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
continue;
- /* NOTE: no side-effects allowed, until we take s_mutex */
-
/*
* If we have an auth cap, we don't need to consider any
* overlapping caps as used.
@@ -2088,37 +2074,8 @@ retry_locked:
continue; /* nope, all good */
ack:
- if (session && session != cap->session) {
- dout("oops, wrong session %p mutex\n", session);
- mutex_unlock(&session->s_mutex);
- session = NULL;
- }
- if (!session) {
- session = cap->session;
- if (mutex_trylock(&session->s_mutex) == 0) {
- dout("inverting session/ino locks on %p\n",
- session);
- session = ceph_get_mds_session(session);
- spin_unlock(&ci->i_ceph_lock);
- if (took_snap_rwsem) {
- up_read(&mdsc->snap_rwsem);
- took_snap_rwsem = 0;
- }
- if (session) {
- mutex_lock(&session->s_mutex);
- ceph_put_mds_session(session);
- } else {
- /*
- * Because we take the reference while
- * holding the i_ceph_lock, it should
- * never be NULL. Throw a warning if it
- * ever is.
- */
- WARN_ON_ONCE(true);
- }
- goto retry;
- }
- }
+ ceph_put_mds_session(session);
+ session = ceph_get_mds_session(cap->session);
/* kick flushing and flush snaps before sending normal
* cap message */
@@ -2130,20 +2087,7 @@ ack:
if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)
__ceph_flush_snaps(ci, session);
- goto retry_locked;
- }
-
- /* take snap_rwsem after session mutex */
- if (!took_snap_rwsem) {
- if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
- dout("inverting snap/in locks on %p\n",
- inode);
- spin_unlock(&ci->i_ceph_lock);
- down_read(&mdsc->snap_rwsem);
- took_snap_rwsem = 1;
- goto retry;
- }
- took_snap_rwsem = 1;
+ goto retry;
}
if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
@@ -2165,9 +2109,10 @@ ack:
__prep_cap(&arg, cap, CEPH_CAP_OP_UPDATE, mflags, cap_used,
want, retain, flushing, flush_tid, oldest_flush_tid);
- spin_unlock(&ci->i_ceph_lock);
+ spin_unlock(&ci->i_ceph_lock);
__send_cap(&arg, ci);
+ spin_lock(&ci->i_ceph_lock);
goto retry; /* retake i_ceph_lock and restart our cap scan. */
}
@@ -2182,13 +2127,9 @@ ack:
spin_unlock(&ci->i_ceph_lock);
+ ceph_put_mds_session(session);
if (queue_invalidate)
ceph_queue_invalidate(inode);
-
- if (session)
- mutex_unlock(&session->s_mutex);
- if (took_snap_rwsem)
- up_read(&mdsc->snap_rwsem);
}
/*
@@ -2198,26 +2139,17 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
{
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_session *session = NULL;
int flushing = 0;
u64 flush_tid = 0, oldest_flush_tid = 0;
-retry:
spin_lock(&ci->i_ceph_lock);
retry_locked:
if (ci->i_dirty_caps && ci->i_auth_cap) {
struct ceph_cap *cap = ci->i_auth_cap;
struct cap_msg_args arg;
+ struct ceph_mds_session *session = cap->session;
- if (session != cap->session) {
- spin_unlock(&ci->i_ceph_lock);
- if (session)
- mutex_unlock(&session->s_mutex);
- session = cap->session;
- mutex_lock(&session->s_mutex);
- goto retry;
- }
- if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
+ if (session->s_state < CEPH_MDS_SESSION_OPEN) {
spin_unlock(&ci->i_ceph_lock);
goto out;
}
@@ -2254,9 +2186,6 @@ retry_locked:
spin_unlock(&ci->i_ceph_lock);
}
out:
- if (session)
- mutex_unlock(&session->s_mutex);
-
*ptid = flush_tid;
return flushing;
}
@@ -3213,8 +3142,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
if (complete_capsnap)
wake_up_all(&ci->i_cap_wq);
while (put-- > 0) {
- /* avoid calling iput_final() in osd dispatch threads */
- ceph_async_iput(inode);
+ iput(inode);
}
}
@@ -3288,7 +3216,7 @@ static void handle_cap_grant(struct inode *inode,
u64 size = le64_to_cpu(grant->size);
u64 max_size = le64_to_cpu(grant->max_size);
unsigned char check_caps = 0;
- bool was_stale = cap->cap_gen < session->s_cap_gen;
+ bool was_stale = cap->cap_gen < atomic_read(&session->s_cap_gen);
bool wake = false;
bool writeback = false;
bool queue_trunc = false;
@@ -3340,7 +3268,7 @@ static void handle_cap_grant(struct inode *inode,
}
/* side effects now are allowed */
- cap->cap_gen = session->s_cap_gen;
+ cap->cap_gen = atomic_read(&session->s_cap_gen);
cap->seq = seq;
__check_cap_issue(ci, cap, newcaps);
@@ -3553,13 +3481,12 @@ static void handle_cap_grant(struct inode *inode,
if (wake)
wake_up_all(&ci->i_cap_wq);
+ mutex_unlock(&session->s_mutex);
if (check_caps == 1)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL,
session);
else if (check_caps == 2)
ceph_check_caps(ci, CHECK_CAPS_NOINVAL, session);
- else
- mutex_unlock(&session->s_mutex);
}
/*
@@ -4203,8 +4130,7 @@ done:
mutex_unlock(&session->s_mutex);
done_unlocked:
ceph_put_string(extra_info.pool_ns);
- /* avoid calling iput_final() in mds dispatch threads */
- ceph_async_iput(inode);
+ iput(inode);
return;
flush_cap_releases:
@@ -4224,11 +4150,19 @@ bad:
/*
* Delayed work handler to process end of delayed cap release LRU list.
+ *
+ * If new caps are added to the list while processing it, these won't get
+ * processed in this run. In this case, the ci->i_hold_caps_max will be
+ * returned so that the work can be scheduled accordingly.
*/
-void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
+unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
{
struct inode *inode;
struct ceph_inode_info *ci;
+ struct ceph_mount_options *opt = mdsc->fsc->mount_options;
+ unsigned long delay_max = opt->caps_wanted_delay_max * HZ;
+ unsigned long loop_start = jiffies;
+ unsigned long delay = 0;
dout("check_delayed_caps\n");
spin_lock(&mdsc->cap_delay_lock);
@@ -4236,6 +4170,11 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
ci = list_first_entry(&mdsc->cap_delay_list,
struct ceph_inode_info,
i_cap_delay_list);
+ if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) {
+ dout("%s caps added recently. Exiting loop", __func__);
+ delay = ci->i_hold_caps_max;
+ break;
+ }
if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
time_before(jiffies, ci->i_hold_caps_max))
break;
@@ -4246,12 +4185,13 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
spin_unlock(&mdsc->cap_delay_lock);
dout("check_delayed_caps on %p\n", inode);
ceph_check_caps(ci, 0, NULL);
- /* avoid calling iput_final() in tick thread */
- ceph_async_iput(inode);
+ iput(inode);
spin_lock(&mdsc->cap_delay_lock);
}
}
spin_unlock(&mdsc->cap_delay_lock);
+
+ return delay;
}
/*
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 425f3356332a..38b78b45811f 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -127,7 +127,7 @@ static int mdsc_show(struct seq_file *s, void *p)
return 0;
}
-#define CEPH_METRIC_SHOW(name, total, avg, min, max, sq) { \
+#define CEPH_LAT_METRIC_SHOW(name, total, avg, min, max, sq) { \
s64 _total, _avg, _min, _max, _sq, _st; \
_avg = ktime_to_us(avg); \
_min = ktime_to_us(min == KTIME_MAX ? 0 : min); \
@@ -140,6 +140,12 @@ static int mdsc_show(struct seq_file *s, void *p)
name, total, _avg, _min, _max, _st); \
}
+#define CEPH_SZ_METRIC_SHOW(name, total, avg, min, max, sum) { \
+ u64 _min = min == U64_MAX ? 0 : min; \
+ seq_printf(s, "%-14s%-12lld%-16llu%-16llu%-16llu%llu\n", \
+ name, total, avg, _min, max, sum); \
+}
+
static int metric_show(struct seq_file *s, void *p)
{
struct ceph_fs_client *fsc = s->private;
@@ -147,6 +153,7 @@ static int metric_show(struct seq_file *s, void *p)
struct ceph_client_metric *m = &mdsc->metric;
int nr_caps = 0;
s64 total, sum, avg, min, max, sq;
+ u64 sum_sz, avg_sz, min_sz, max_sz;
sum = percpu_counter_sum(&m->total_inodes);
seq_printf(s, "item total\n");
@@ -170,7 +177,7 @@ static int metric_show(struct seq_file *s, void *p)
max = m->read_latency_max;
sq = m->read_latency_sq_sum;
spin_unlock(&m->read_metric_lock);
- CEPH_METRIC_SHOW("read", total, avg, min, max, sq);
+ CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
spin_lock(&m->write_metric_lock);
total = m->total_writes;
@@ -180,7 +187,7 @@ static int metric_show(struct seq_file *s, void *p)
max = m->write_latency_max;
sq = m->write_latency_sq_sum;
spin_unlock(&m->write_metric_lock);
- CEPH_METRIC_SHOW("write", total, avg, min, max, sq);
+ CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
spin_lock(&m->metadata_metric_lock);
total = m->total_metadatas;
@@ -190,7 +197,29 @@ static int metric_show(struct seq_file *s, void *p)
max = m->metadata_latency_max;
sq = m->metadata_latency_sq_sum;
spin_unlock(&m->metadata_metric_lock);
- CEPH_METRIC_SHOW("metadata", total, avg, min, max, sq);
+ CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
+
+ seq_printf(s, "\n");
+ seq_printf(s, "item total avg_sz(bytes) min_sz(bytes) max_sz(bytes) total_sz(bytes)\n");
+ seq_printf(s, "----------------------------------------------------------------------------------------\n");
+
+ spin_lock(&m->read_metric_lock);
+ total = m->total_reads;
+ sum_sz = m->read_size_sum;
+ avg_sz = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum_sz, total) : 0;
+ min_sz = m->read_size_min;
+ max_sz = m->read_size_max;
+ spin_unlock(&m->read_metric_lock);
+ CEPH_SZ_METRIC_SHOW("read", total, avg_sz, min_sz, max_sz, sum_sz);
+
+ spin_lock(&m->write_metric_lock);
+ total = m->total_writes;
+ sum_sz = m->write_size_sum;
+ avg_sz = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum_sz, total) : 0;
+ min_sz = m->write_size_min;
+ max_sz = m->write_size_max;
+ spin_unlock(&m->write_metric_lock);
+ CEPH_SZ_METRIC_SHOW("write", total, avg_sz, min_sz, max_sz, sum_sz);
seq_printf(s, "\n");
seq_printf(s, "item total miss hit\n");
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 9ba79b6531fb..133dbd9338e7 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -788,6 +788,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
mask |= CEPH_CAP_XATTR_SHARED;
req->r_args.getattr.mask = cpu_to_le32(mask);
+ ihold(dir);
req->r_parent = dir;
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
err = ceph_mdsc_do_request(mdsc, NULL, req);
@@ -868,6 +869,7 @@ static int ceph_mknod(struct user_namespace *mnt_userns, struct inode *dir,
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_parent = dir;
+ ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_args.mknod.mode = cpu_to_le32(mode);
req->r_args.mknod.rdev = cpu_to_le32(rdev);
@@ -929,6 +931,8 @@ static int ceph_symlink(struct user_namespace *mnt_userns, struct inode *dir,
goto out;
}
req->r_parent = dir;
+ ihold(dir);
+
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
@@ -993,6 +997,7 @@ static int ceph_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_parent = dir;
+ ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_args.mkdir.mode = cpu_to_le32(mode);
req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
@@ -1037,6 +1042,7 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
req->r_num_caps = 2;
req->r_old_dentry = dget(old_dentry);
req->r_parent = dir;
+ ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
@@ -1158,6 +1164,7 @@ retry:
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_parent = dir;
+ ihold(dir);
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
req->r_inode_drop = ceph_drop_caps_for_unlink(inode);
@@ -1232,6 +1239,7 @@ static int ceph_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
req->r_old_dentry = dget(old_dentry);
req->r_old_dentry_dir = old_dir;
req->r_parent = new_dir;
+ ihold(new_dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
@@ -1548,10 +1556,8 @@ static bool __dentry_lease_is_valid(struct ceph_dentry_info *di)
u32 gen;
unsigned long ttl;
- spin_lock(&session->s_gen_ttl_lock);
- gen = session->s_cap_gen;
+ gen = atomic_read(&session->s_cap_gen);
ttl = session->s_cap_ttl;
- spin_unlock(&session->s_gen_ttl_lock);
if (di->lease_gen == gen &&
time_before(jiffies, ttl) &&
@@ -1730,6 +1736,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_parent = dir;
+ ihold(dir);
mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
if (ceph_security_xattr_wanted(dir))
@@ -1809,8 +1816,7 @@ static void ceph_d_release(struct dentry *dentry)
dentry->d_fsdata = NULL;
spin_unlock(&dentry->d_lock);
- if (di->lease_session)
- ceph_put_mds_session(di->lease_session);
+ ceph_put_mds_session(di->lease_session);
kmem_cache_free(ceph_dentry_cachep, di);
}
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 65540a4429b2..1d65934c1262 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -542,6 +542,7 @@ static int ceph_get_name(struct dentry *parent, char *name,
ihold(inode);
req->r_ino2 = ceph_vino(d_inode(parent));
req->r_parent = d_inode(parent);
+ ihold(req->r_parent);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_num_caps = 2;
err = ceph_mdsc_do_request(mdsc, NULL, req);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index d51af3698032..d1755ac1d964 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -706,6 +706,7 @@ retry:
mask |= CEPH_CAP_XATTR_SHARED;
req->r_args.open.mask = cpu_to_le32(mask);
req->r_parent = dir;
+ ihold(dir);
if (flags & O_CREAT) {
struct ceph_file_layout lo;
@@ -903,7 +904,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
ceph_update_read_metrics(&fsc->mdsc->metric,
req->r_start_latency,
req->r_end_latency,
- ret);
+ len, ret);
ceph_osdc_put_request(req);
@@ -1035,12 +1036,12 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
struct ceph_aio_request *aio_req = req->r_priv;
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
+ unsigned int len = osd_data->bvec_pos.iter.bi_size;
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
BUG_ON(!osd_data->num_bvecs);
- dout("ceph_aio_complete_req %p rc %d bytes %u\n",
- inode, rc, osd_data->bvec_pos.iter.bi_size);
+ dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
if (rc == -EOLDSNAPC) {
struct ceph_aio_work *aio_work;
@@ -1058,9 +1059,9 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
} else if (!aio_req->write) {
if (rc == -ENOENT)
rc = 0;
- if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
+ if (rc >= 0 && len > rc) {
struct iov_iter i;
- int zlen = osd_data->bvec_pos.iter.bi_size - rc;
+ int zlen = len - rc;
/*
* If read is satisfied by single OSD request,
@@ -1077,8 +1078,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
}
iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
- osd_data->num_bvecs,
- osd_data->bvec_pos.iter.bi_size);
+ osd_data->num_bvecs, len);
iov_iter_advance(&i, rc);
iov_iter_zero(zlen, &i);
}
@@ -1088,10 +1088,10 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
if (req->r_start_latency) {
if (aio_req->write)
ceph_update_write_metrics(metric, req->r_start_latency,
- req->r_end_latency, rc);
+ req->r_end_latency, len, rc);
else
ceph_update_read_metrics(metric, req->r_start_latency,
- req->r_end_latency, rc);
+ req->r_end_latency, len, rc);
}
put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
@@ -1299,10 +1299,10 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (write)
ceph_update_write_metrics(metric, req->r_start_latency,
- req->r_end_latency, ret);
+ req->r_end_latency, len, ret);
else
ceph_update_read_metrics(metric, req->r_start_latency,
- req->r_end_latency, ret);
+ req->r_end_latency, len, ret);
size = i_size_read(inode);
if (!write) {
@@ -1476,7 +1476,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
- req->r_end_latency, ret);
+ req->r_end_latency, len, ret);
out:
ceph_osdc_put_request(req);
if (ret != 0) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index df0c8a724609..1bd2cc015913 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1124,7 +1124,7 @@ static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
return;
}
- if (di->lease_gen == session->s_cap_gen &&
+ if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
time_before(ttl, di->time))
return; /* we already have a newer lease. */
@@ -1135,7 +1135,7 @@ static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
if (!di->lease_session)
di->lease_session = ceph_get_mds_session(session);
- di->lease_gen = session->s_cap_gen;
+ di->lease_gen = atomic_read(&session->s_cap_gen);
di->lease_seq = le32_to_cpu(lease->seq);
di->lease_renew_after = half_ttl;
di->lease_renew_from = 0;
@@ -1154,8 +1154,7 @@ static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
__update_dentry_lease(dir, dentry, lease, session, from_time,
&old_lease_session);
spin_unlock(&dentry->d_lock);
- if (old_lease_session)
- ceph_put_mds_session(old_lease_session);
+ ceph_put_mds_session(old_lease_session);
}
/*
@@ -1200,8 +1199,7 @@ static void update_dentry_lease_careful(struct dentry *dentry,
from_time, &old_lease_session);
out_unlock:
spin_unlock(&dentry->d_lock);
- if (old_lease_session)
- ceph_put_mds_session(old_lease_session);
+ ceph_put_mds_session(old_lease_session);
}
/*
@@ -1568,8 +1566,7 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
unlock_new_inode(in);
}
- /* avoid calling iput_final() in mds dispatch threads */
- ceph_async_iput(in);
+ iput(in);
}
return err;
@@ -1766,13 +1763,11 @@ retry_lookup:
if (ret < 0) {
pr_err("ceph_fill_inode badness on %p\n", in);
if (d_really_is_negative(dn)) {
- /* avoid calling iput_final() in mds
- * dispatch threads */
if (in->i_state & I_NEW) {
ihold(in);
discard_new_inode(in);
}
- ceph_async_iput(in);
+ iput(in);
}
d_drop(dn);
err = ret;
@@ -1785,7 +1780,7 @@ retry_lookup:
if (ceph_security_xattr_deadlock(in)) {
dout(" skip splicing dn %p to inode %p"
" (security xattr deadlock)\n", dn, in);
- ceph_async_iput(in);
+ iput(in);
skipped++;
goto next_item;
}
@@ -1836,25 +1831,6 @@ bool ceph_inode_set_size(struct inode *inode, loff_t size)
return ret;
}
-/*
- * Put reference to inode, but avoid calling iput_final() in current thread.
- * iput_final() may wait for reahahead pages. The wait can cause deadlock in
- * some contexts.
- */
-void ceph_async_iput(struct inode *inode)
-{
- if (!inode)
- return;
- for (;;) {
- if (atomic_add_unless(&inode->i_count, -1, 1))
- break;
- if (queue_work(ceph_inode_to_client(inode)->inode_wq,
- &ceph_inode(inode)->i_work))
- break;
- /* queue work failed, i_count must be at least 2 */
- }
-}
-
void ceph_queue_inode_work(struct inode *inode, int work_bit)
{
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index e5af591d3bd4..afdc20213876 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -664,6 +664,9 @@ struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
void ceph_put_mds_session(struct ceph_mds_session *s)
{
+ if (IS_ERR_OR_NULL(s))
+ return;
+
dout("mdsc put_session %p %d -> %d\n", s,
refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1);
if (refcount_dec_and_test(&s->s_ref)) {
@@ -746,8 +749,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
- spin_lock_init(&s->s_gen_ttl_lock);
- s->s_cap_gen = 1;
+ atomic_set(&s->s_cap_gen, 1);
s->s_cap_ttl = jiffies - 1;
spin_lock_init(&s->s_cap_lock);
@@ -822,14 +824,13 @@ void ceph_mdsc_release_request(struct kref *kref)
ceph_msg_put(req->r_reply);
if (req->r_inode) {
ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
- /* avoid calling iput_final() in mds dispatch threads */
- ceph_async_iput(req->r_inode);
+ iput(req->r_inode);
}
if (req->r_parent) {
ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
- ceph_async_iput(req->r_parent);
+ iput(req->r_parent);
}
- ceph_async_iput(req->r_target_inode);
+ iput(req->r_target_inode);
if (req->r_dentry)
dput(req->r_dentry);
if (req->r_old_dentry)
@@ -843,7 +844,7 @@ void ceph_mdsc_release_request(struct kref *kref)
*/
ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
CEPH_CAP_PIN);
- ceph_async_iput(req->r_old_dentry_dir);
+ iput(req->r_old_dentry_dir);
}
kfree(req->r_path1);
kfree(req->r_path2);
@@ -958,8 +959,7 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
}
if (req->r_unsafe_dir) {
- /* avoid calling iput_final() in mds dispatch threads */
- ceph_async_iput(req->r_unsafe_dir);
+ iput(req->r_unsafe_dir);
req->r_unsafe_dir = NULL;
}
@@ -1130,7 +1130,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
if (!cap) {
spin_unlock(&ci->i_ceph_lock);
- ceph_async_iput(inode);
+ iput(inode);
goto random;
}
mds = cap->session->s_mds;
@@ -1139,9 +1139,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
cap == ci->i_auth_cap ? "auth " : "", cap);
spin_unlock(&ci->i_ceph_lock);
out:
- /* avoid calling iput_final() while holding mdsc->mutex or
- * in mds dispatch threads */
- ceph_async_iput(inode);
+ iput(inode);
return mds;
random:
@@ -1438,8 +1436,7 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
for (i = 0; i < mi->num_export_targets; i++) {
ts = __open_export_target_session(mdsc, mi->export_targets[i]);
- if (!IS_ERR(ts))
- ceph_put_mds_session(ts);
+ ceph_put_mds_session(ts);
}
}
@@ -1545,9 +1542,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
spin_unlock(&session->s_cap_lock);
if (last_inode) {
- /* avoid calling iput_final() while holding
- * s_mutex or in mds dispatch threads */
- ceph_async_iput(last_inode);
+ iput(last_inode);
last_inode = NULL;
}
if (old_cap) {
@@ -1581,7 +1576,7 @@ out:
session->s_cap_iterator = NULL;
spin_unlock(&session->s_cap_lock);
- ceph_async_iput(last_inode);
+ iput(last_inode);
if (old_cap)
ceph_put_cap(session->s_mdsc, old_cap);
@@ -1721,8 +1716,7 @@ static void remove_session_caps(struct ceph_mds_session *session)
spin_unlock(&session->s_cap_lock);
inode = ceph_find_inode(sb, vino);
- /* avoid calling iput_final() while holding s_mutex */
- ceph_async_iput(inode);
+ iput(inode);
spin_lock(&session->s_cap_lock);
}
@@ -1761,7 +1755,7 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
ci->i_requested_max_size = 0;
spin_unlock(&ci->i_ceph_lock);
} else if (ev == RENEWCAPS) {
- if (cap->cap_gen < cap->session->s_cap_gen) {
+ if (cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) {
/* mds did not re-issue stale cap */
spin_lock(&ci->i_ceph_lock);
cap->issued = cap->implemented = CEPH_CAP_PIN;
@@ -2988,7 +2982,6 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
ceph_take_cap_refs(ci, CEPH_CAP_PIN, false);
__ceph_touch_fmode(ci, mdsc, fmode);
spin_unlock(&ci->i_ceph_lock);
- ihold(req->r_parent);
}
if (req->r_old_dentry_dir)
ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
@@ -3499,10 +3492,8 @@ static void handle_session(struct ceph_mds_session *session,
case CEPH_SESSION_STALE:
pr_info("mds%d caps went stale, renewing\n",
session->s_mds);
- spin_lock(&session->s_gen_ttl_lock);
- session->s_cap_gen++;
+ atomic_inc(&session->s_cap_gen);
session->s_cap_ttl = jiffies - 1;
- spin_unlock(&session->s_gen_ttl_lock);
send_renew_caps(mdsc, session);
break;
@@ -3771,7 +3762,7 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
cap->seq = 0; /* reset cap seq */
cap->issue_seq = 0; /* and issue_seq */
cap->mseq = 0; /* and migrate_seq */
- cap->cap_gen = cap->session->s_cap_gen;
+ cap->cap_gen = atomic_read(&cap->session->s_cap_gen);
/* These are lost when the session goes away */
if (S_ISDIR(inode->i_mode)) {
@@ -4011,9 +4002,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
dout("session %p state %s\n", session,
ceph_session_state_name(session->s_state));
- spin_lock(&session->s_gen_ttl_lock);
- session->s_cap_gen++;
- spin_unlock(&session->s_gen_ttl_lock);
+ atomic_inc(&session->s_cap_gen);
spin_lock(&session->s_cap_lock);
/* don't know if session is readonly */
@@ -4344,7 +4333,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
case CEPH_MDS_LEASE_RENEW:
if (di->lease_session == session &&
- di->lease_gen == session->s_cap_gen &&
+ di->lease_gen == atomic_read(&session->s_cap_gen) &&
di->lease_renew_from &&
di->lease_renew_after == 0) {
unsigned long duration =
@@ -4372,8 +4361,7 @@ release:
out:
mutex_unlock(&session->s_mutex);
- /* avoid calling iput_final() in mds dispatch threads */
- ceph_async_iput(inode);
+ iput(inode);
return;
bad:
@@ -4468,7 +4456,7 @@ bool check_session_state(struct ceph_mds_session *s)
break;
case CEPH_MDS_SESSION_CLOSING:
/* Should never reach this when we're unmounting */
- WARN_ON_ONCE(true);
+ WARN_ON_ONCE(s->s_ttl);
fallthrough;
case CEPH_MDS_SESSION_NEW:
case CEPH_MDS_SESSION_RESTARTING:
@@ -4502,22 +4490,29 @@ void inc_session_sequence(struct ceph_mds_session *s)
}
/*
- * delayed work -- periodically trim expired leases, renew caps with mds
+ * delayed work -- periodically trim expired leases, renew caps with mds. If
+ * the @delay parameter is set to 0 or if it's more than 5 secs, the default
+ * workqueue delay value of 5 secs will be used.
*/
-static void schedule_delayed(struct ceph_mds_client *mdsc)
+static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
{
- int delay = 5;
- unsigned hz = round_jiffies_relative(HZ * delay);
- schedule_delayed_work(&mdsc->delayed_work, hz);
+ unsigned long max_delay = HZ * 5;
+
+ /* 5 secs default delay */
+ if (!delay || (delay > max_delay))
+ delay = max_delay;
+ schedule_delayed_work(&mdsc->delayed_work,
+ round_jiffies_relative(delay));
}
static void delayed_work(struct work_struct *work)
{
- int i;
struct ceph_mds_client *mdsc =
container_of(work, struct ceph_mds_client, delayed_work.work);
+ unsigned long delay;
int renew_interval;
int renew_caps;
+ int i;
dout("mdsc delayed_work\n");
@@ -4557,7 +4552,7 @@ static void delayed_work(struct work_struct *work)
}
mutex_unlock(&mdsc->mutex);
- ceph_check_delayed_caps(mdsc);
+ delay = ceph_check_delayed_caps(mdsc);
ceph_queue_cap_reclaim_work(mdsc);
@@ -4565,7 +4560,7 @@ static void delayed_work(struct work_struct *work)
maybe_recover_session(mdsc);
- schedule_delayed(mdsc);
+ schedule_delayed(mdsc, delay);
}
int ceph_mdsc_init(struct ceph_fs_client *fsc)
@@ -5042,7 +5037,7 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
mdsc->mdsmap->m_epoch);
mutex_unlock(&mdsc->mutex);
- schedule_delayed(mdsc);
+ schedule_delayed(mdsc, 0);
return;
bad_unlock:
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 15c11a0f2caf..20e42d8b66c6 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -186,10 +186,8 @@ struct ceph_mds_session {
struct ceph_auth_handshake s_auth;
- /* protected by s_gen_ttl_lock */
- spinlock_t s_gen_ttl_lock;
- u32 s_cap_gen; /* inc each time we get mds stale msg */
- unsigned long s_cap_ttl; /* when session caps expire */
+ atomic_t s_cap_gen; /* inc each time we get mds stale msg */
+ unsigned long s_cap_ttl; /* when session caps expire. protected by s_mutex */
/* protected by s_cap_lock */
spinlock_t s_cap_lock;
diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
index 28b6b42ad677..5ac151eb0d49 100644
--- a/fs/ceph/metric.c
+++ b/fs/ceph/metric.c
@@ -20,8 +20,11 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
struct ceph_opened_files *files;
struct ceph_pinned_icaps *icaps;
struct ceph_opened_inodes *inodes;
+ struct ceph_read_io_size *rsize;
+ struct ceph_write_io_size *wsize;
struct ceph_client_metric *m = &mdsc->metric;
u64 nr_caps = atomic64_read(&m->total_caps);
+ u32 header_len = sizeof(struct ceph_metric_header);
struct ceph_msg *msg;
struct timespec64 ts;
s64 sum;
@@ -30,7 +33,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
+ sizeof(*meta) + sizeof(*dlease) + sizeof(*files)
- + sizeof(*icaps) + sizeof(*inodes);
+ + sizeof(*icaps) + sizeof(*inodes) + sizeof(*rsize)
+ + sizeof(*wsize);
msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
if (!msg) {
@@ -43,10 +47,10 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
/* encode the cap metric */
cap = (struct ceph_metric_cap *)(head + 1);
- cap->type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO);
- cap->ver = 1;
- cap->compat = 1;
- cap->data_len = cpu_to_le32(sizeof(*cap) - 10);
+ cap->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO);
+ cap->header.ver = 1;
+ cap->header.compat = 1;
+ cap->header.data_len = cpu_to_le32(sizeof(*cap) - header_len);
cap->hit = cpu_to_le64(percpu_counter_sum(&m->i_caps_hit));
cap->mis = cpu_to_le64(percpu_counter_sum(&m->i_caps_mis));
cap->total = cpu_to_le64(nr_caps);
@@ -54,10 +58,10 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
/* encode the read latency metric */
read = (struct ceph_metric_read_latency *)(cap + 1);
- read->type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY);
- read->ver = 1;
- read->compat = 1;
- read->data_len = cpu_to_le32(sizeof(*read) - 10);
+ read->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY);
+ read->header.ver = 1;
+ read->header.compat = 1;
+ read->header.data_len = cpu_to_le32(sizeof(*read) - header_len);
sum = m->read_latency_sum;
jiffies_to_timespec64(sum, &ts);
read->sec = cpu_to_le32(ts.tv_sec);
@@ -66,10 +70,10 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
/* encode the write latency metric */
write = (struct ceph_metric_write_latency *)(read + 1);
- write->type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY);
- write->ver = 1;
- write->compat = 1;
- write->data_len = cpu_to_le32(sizeof(*write) - 10);
+ write->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY);
+ write->header.ver = 1;
+ write->header.compat = 1;
+ write->header.data_len = cpu_to_le32(sizeof(*write) - header_len);
sum = m->write_latency_sum;
jiffies_to_timespec64(sum, &ts);
write->sec = cpu_to_le32(ts.tv_sec);
@@ -78,10 +82,10 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
/* encode the metadata latency metric */
meta = (struct ceph_metric_metadata_latency *)(write + 1);
- meta->type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY);
- meta->ver = 1;
- meta->compat = 1;
- meta->data_len = cpu_to_le32(sizeof(*meta) - 10);
+ meta->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY);
+ meta->header.ver = 1;
+ meta->header.compat = 1;
+ meta->header.data_len = cpu_to_le32(sizeof(*meta) - header_len);
sum = m->metadata_latency_sum;
jiffies_to_timespec64(sum, &ts);
meta->sec = cpu_to_le32(ts.tv_sec);
@@ -90,10 +94,10 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
/* encode the dentry lease metric */
dlease = (struct ceph_metric_dlease *)(meta + 1);
- dlease->type = cpu_to_le32(CLIENT_METRIC_TYPE_DENTRY_LEASE);
- dlease->ver = 1;
- dlease->compat = 1;
- dlease->data_len = cpu_to_le32(sizeof(*dlease) - 10);
+ dlease->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_DENTRY_LEASE);
+ dlease->header.ver = 1;
+ dlease->header.compat = 1;
+ dlease->header.data_len = cpu_to_le32(sizeof(*dlease) - header_len);
dlease->hit = cpu_to_le64(percpu_counter_sum(&m->d_lease_hit));
dlease->mis = cpu_to_le64(percpu_counter_sum(&m->d_lease_mis));
dlease->total = cpu_to_le64(atomic64_read(&m->total_dentries));
@@ -103,34 +107,54 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
/* encode the opened files metric */
files = (struct ceph_opened_files *)(dlease + 1);
- files->type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_FILES);
- files->ver = 1;
- files->compat = 1;
- files->data_len = cpu_to_le32(sizeof(*files) - 10);
+ files->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_FILES);
+ files->header.ver = 1;
+ files->header.compat = 1;
+ files->header.data_len = cpu_to_le32(sizeof(*files) - header_len);
files->opened_files = cpu_to_le64(atomic64_read(&m->opened_files));
files->total = cpu_to_le64(sum);
items++;
/* encode the pinned icaps metric */
icaps = (struct ceph_pinned_icaps *)(files + 1);
- icaps->type = cpu_to_le32(CLIENT_METRIC_TYPE_PINNED_ICAPS);
- icaps->ver = 1;
- icaps->compat = 1;
- icaps->data_len = cpu_to_le32(sizeof(*icaps) - 10);
+ icaps->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_PINNED_ICAPS);
+ icaps->header.ver = 1;
+ icaps->header.compat = 1;
+ icaps->header.data_len = cpu_to_le32(sizeof(*icaps) - header_len);
icaps->pinned_icaps = cpu_to_le64(nr_caps);
icaps->total = cpu_to_le64(sum);
items++;
/* encode the opened inodes metric */
inodes = (struct ceph_opened_inodes *)(icaps + 1);
- inodes->type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_INODES);
- inodes->ver = 1;
- inodes->compat = 1;
- inodes->data_len = cpu_to_le32(sizeof(*inodes) - 10);
+ inodes->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_INODES);
+ inodes->header.ver = 1;
+ inodes->header.compat = 1;
+ inodes->header.data_len = cpu_to_le32(sizeof(*inodes) - header_len);
inodes->opened_inodes = cpu_to_le64(percpu_counter_sum(&m->opened_inodes));
inodes->total = cpu_to_le64(sum);
items++;
+ /* encode the read io size metric */
+ rsize = (struct ceph_read_io_size *)(inodes + 1);
+ rsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_IO_SIZES);
+ rsize->header.ver = 1;
+ rsize->header.compat = 1;
+ rsize->header.data_len = cpu_to_le32(sizeof(*rsize) - header_len);
+ rsize->total_ops = cpu_to_le64(m->total_reads);
+ rsize->total_size = cpu_to_le64(m->read_size_sum);
+ items++;
+
+ /* encode the write io size metric */
+ wsize = (struct ceph_write_io_size *)(rsize + 1);
+ wsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_IO_SIZES);
+ wsize->header.ver = 1;
+ wsize->header.compat = 1;
+ wsize->header.data_len = cpu_to_le32(sizeof(*wsize) - header_len);
+ wsize->total_ops = cpu_to_le64(m->total_writes);
+ wsize->total_size = cpu_to_le64(m->write_size_sum);
+ items++;
+
put_unaligned_le32(items, &head->num);
msg->front.iov_len = len;
msg->hdr.version = cpu_to_le16(1);
@@ -225,6 +249,9 @@ int ceph_metric_init(struct ceph_client_metric *m)
m->read_latency_max = 0;
m->total_reads = 0;
m->read_latency_sum = 0;
+ m->read_size_min = U64_MAX;
+ m->read_size_max = 0;
+ m->read_size_sum = 0;
spin_lock_init(&m->write_metric_lock);
m->write_latency_sq_sum = 0;
@@ -232,6 +259,9 @@ int ceph_metric_init(struct ceph_client_metric *m)
m->write_latency_max = 0;
m->total_writes = 0;
m->write_latency_sum = 0;
+ m->write_size_min = U64_MAX;
+ m->write_size_max = 0;
+ m->write_size_sum = 0;
spin_lock_init(&m->metadata_metric_lock);
m->metadata_latency_sq_sum = 0;
@@ -281,23 +311,21 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
cancel_delayed_work_sync(&m->delayed_work);
- if (m->session)
- ceph_put_mds_session(m->session);
+ ceph_put_mds_session(m->session);
}
-static inline void __update_latency(ktime_t *totalp, ktime_t *lsump,
- ktime_t *min, ktime_t *max,
- ktime_t *sq_sump, ktime_t lat)
-{
- ktime_t total, avg, sq, lsum;
-
- total = ++(*totalp);
- lsum = (*lsump += lat);
+#define METRIC_UPDATE_MIN_MAX(min, max, new) \
+{ \
+ if (unlikely(new < min)) \
+ min = new; \
+ if (unlikely(new > max)) \
+ max = new; \
+}
- if (unlikely(lat < *min))
- *min = lat;
- if (unlikely(lat > *max))
- *max = lat;
+static inline void __update_stdev(ktime_t total, ktime_t lsum,
+ ktime_t *sq_sump, ktime_t lat)
+{
+ ktime_t avg, sq;
if (unlikely(total == 1))
return;
@@ -312,33 +340,51 @@ static inline void __update_latency(ktime_t *totalp, ktime_t *lsump,
void ceph_update_read_metrics(struct ceph_client_metric *m,
ktime_t r_start, ktime_t r_end,
- int rc)
+ unsigned int size, int rc)
{
ktime_t lat = ktime_sub(r_end, r_start);
+ ktime_t total;
if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
return;
spin_lock(&m->read_metric_lock);
- __update_latency(&m->total_reads, &m->read_latency_sum,
- &m->read_latency_min, &m->read_latency_max,
- &m->read_latency_sq_sum, lat);
+ total = ++m->total_reads;
+ m->read_size_sum += size;
+ m->read_latency_sum += lat;
+ METRIC_UPDATE_MIN_MAX(m->read_size_min,
+ m->read_size_max,
+ size);
+ METRIC_UPDATE_MIN_MAX(m->read_latency_min,
+ m->read_latency_max,
+ lat);
+ __update_stdev(total, m->read_latency_sum,
+ &m->read_latency_sq_sum, lat);
spin_unlock(&m->read_metric_lock);
}
void ceph_update_write_metrics(struct ceph_client_metric *m,
ktime_t r_start, ktime_t r_end,
- int rc)
+ unsigned int size, int rc)
{
ktime_t lat = ktime_sub(r_end, r_start);
+ ktime_t total;
if (unlikely(rc && rc != -ETIMEDOUT))
return;
spin_lock(&m->write_metric_lock);
- __update_latency(&m->total_writes, &m->write_latency_sum,
- &m->write_latency_min, &m->write_latency_max,
- &m->write_latency_sq_sum, lat);
+ total = ++m->total_writes;
+ m->write_size_sum += size;
+ m->write_latency_sum += lat;
+ METRIC_UPDATE_MIN_MAX(m->write_size_min,
+ m->write_size_max,
+ size);
+ METRIC_UPDATE_MIN_MAX(m->write_latency_min,
+ m->write_latency_max,
+ lat);
+ __update_stdev(total, m->write_latency_sum,
+ &m->write_latency_sq_sum, lat);
spin_unlock(&m->write_metric_lock);
}
@@ -347,13 +393,18 @@ void ceph_update_metadata_metrics(struct ceph_client_metric *m,
int rc)
{
ktime_t lat = ktime_sub(r_end, r_start);
+ ktime_t total;
if (unlikely(rc && rc != -ENOENT))
return;
spin_lock(&m->metadata_metric_lock);
- __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
- &m->metadata_latency_min, &m->metadata_latency_max,
- &m->metadata_latency_sq_sum, lat);
+ total = ++m->total_metadatas;
+ m->metadata_latency_sum += lat;
+ METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
+ m->metadata_latency_max,
+ lat);
+ __update_stdev(total, m->metadata_latency_sum,
+ &m->metadata_latency_sq_sum, lat);
spin_unlock(&m->metadata_metric_lock);
}
diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
index e984eb2bb14b..0133955a3c6a 100644
--- a/fs/ceph/metric.h
+++ b/fs/ceph/metric.h
@@ -17,8 +17,10 @@ enum ceph_metric_type {
CLIENT_METRIC_TYPE_OPENED_FILES,
CLIENT_METRIC_TYPE_PINNED_ICAPS,
CLIENT_METRIC_TYPE_OPENED_INODES,
+ CLIENT_METRIC_TYPE_READ_IO_SIZES,
+ CLIENT_METRIC_TYPE_WRITE_IO_SIZES,
- CLIENT_METRIC_TYPE_MAX = CLIENT_METRIC_TYPE_OPENED_INODES,
+ CLIENT_METRIC_TYPE_MAX = CLIENT_METRIC_TYPE_WRITE_IO_SIZES,
};
/*
@@ -34,18 +36,22 @@ enum ceph_metric_type {
CLIENT_METRIC_TYPE_OPENED_FILES, \
CLIENT_METRIC_TYPE_PINNED_ICAPS, \
CLIENT_METRIC_TYPE_OPENED_INODES, \
+ CLIENT_METRIC_TYPE_READ_IO_SIZES, \
+ CLIENT_METRIC_TYPE_WRITE_IO_SIZES, \
\
CLIENT_METRIC_TYPE_MAX, \
}
-/* metric caps header */
-struct ceph_metric_cap {
+struct ceph_metric_header {
__le32 type; /* ceph metric type */
-
__u8 ver;
__u8 compat;
-
__le32 data_len; /* length of sizeof(hit + mis + total) */
+} __packed;
+
+/* metric caps header */
+struct ceph_metric_cap {
+ struct ceph_metric_header header;
__le64 hit;
__le64 mis;
__le64 total;
@@ -53,48 +59,28 @@ struct ceph_metric_cap {
/* metric read latency header */
struct ceph_metric_read_latency {
- __le32 type; /* ceph metric type */
-
- __u8 ver;
- __u8 compat;
-
- __le32 data_len; /* length of sizeof(sec + nsec) */
+ struct ceph_metric_header header;
__le32 sec;
__le32 nsec;
} __packed;
/* metric write latency header */
struct ceph_metric_write_latency {
- __le32 type; /* ceph metric type */
-
- __u8 ver;
- __u8 compat;
-
- __le32 data_len; /* length of sizeof(sec + nsec) */
+ struct ceph_metric_header header;
__le32 sec;
__le32 nsec;
} __packed;
/* metric metadata latency header */
struct ceph_metric_metadata_latency {
- __le32 type; /* ceph metric type */
-
- __u8 ver;
- __u8 compat;
-
- __le32 data_len; /* length of sizeof(sec + nsec) */
+ struct ceph_metric_header header;
__le32 sec;
__le32 nsec;
} __packed;
/* metric dentry lease header */
struct ceph_metric_dlease {
- __le32 type; /* ceph metric type */
-
- __u8 ver;
- __u8 compat;
-
- __le32 data_len; /* length of sizeof(hit + mis + total) */
+ struct ceph_metric_header header;
__le64 hit;
__le64 mis;
__le64 total;
@@ -102,40 +88,39 @@ struct ceph_metric_dlease {
/* metric opened files header */
struct ceph_opened_files {
- __le32 type; /* ceph metric type */
-
- __u8 ver;
- __u8 compat;
-
- __le32 data_len; /* length of sizeof(opened_files + total) */
+ struct ceph_metric_header header;
__le64 opened_files;
__le64 total;
} __packed;
/* metric pinned i_caps header */
struct ceph_pinned_icaps {
- __le32 type; /* ceph metric type */
-
- __u8 ver;
- __u8 compat;
-
- __le32 data_len; /* length of sizeof(pinned_icaps + total) */
+ struct ceph_metric_header header;
__le64 pinned_icaps;
__le64 total;
} __packed;
/* metric opened inodes header */
struct ceph_opened_inodes {
- __le32 type; /* ceph metric type */
-
- __u8 ver;
- __u8 compat;
-
- __le32 data_len; /* length of sizeof(opened_inodes + total) */
+ struct ceph_metric_header header;
__le64 opened_inodes;
__le64 total;
} __packed;
+/* metric read io size header */
+struct ceph_read_io_size {
+ struct ceph_metric_header header;
+ __le64 total_ops;
+ __le64 total_size;
+} __packed;
+
+/* metric write io size header */
+struct ceph_write_io_size {
+ struct ceph_metric_header header;
+ __le64 total_ops;
+ __le64 total_size;
+} __packed;
+
struct ceph_metric_head {
__le32 num; /* the number of metrics that will be sent */
} __packed;
@@ -152,6 +137,9 @@ struct ceph_client_metric {
spinlock_t read_metric_lock;
u64 total_reads;
+ u64 read_size_sum;
+ u64 read_size_min;
+ u64 read_size_max;
ktime_t read_latency_sum;
ktime_t read_latency_sq_sum;
ktime_t read_latency_min;
@@ -159,6 +147,9 @@ struct ceph_client_metric {
spinlock_t write_metric_lock;
u64 total_writes;
+ u64 write_size_sum;
+ u64 write_size_min;
+ u64 write_size_max;
ktime_t write_latency_sum;
ktime_t write_latency_sq_sum;
ktime_t write_latency_min;
@@ -206,10 +197,10 @@ static inline void ceph_update_cap_mis(struct ceph_client_metric *m)
extern void ceph_update_read_metrics(struct ceph_client_metric *m,
ktime_t r_start, ktime_t r_end,
- int rc);
+ unsigned int size, int rc);
extern void ceph_update_write_metrics(struct ceph_client_metric *m,
ktime_t r_start, ktime_t r_end,
- int rc);
+ unsigned int size, int rc);
extern void ceph_update_metadata_metrics(struct ceph_client_metric *m,
ktime_t r_start, ktime_t r_end,
int rc);
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 4e32c9600ecc..620c691af40e 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -74,8 +74,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
le64_to_cpu(h->max_files));
spin_unlock(&ci->i_ceph_lock);
- /* avoid calling iput_final() in dispatch thread */
- ceph_async_iput(inode);
+ iput(inode);
}
static struct ceph_quotarealm_inode *
@@ -247,8 +246,7 @@ restart:
ci = ceph_inode(in);
has_quota = __ceph_has_any_quota(ci);
- /* avoid calling iput_final() while holding mdsc->snap_rwsem */
- ceph_async_iput(in);
+ iput(in);
next = realm->parent;
if (has_quota || !next)
@@ -383,8 +381,7 @@ restart:
pr_warn("Invalid quota check op (%d)\n", op);
exceeded = true; /* Just break the loop */
}
- /* avoid calling iput_final() while holding mdsc->snap_rwsem */
- ceph_async_iput(in);
+ iput(in);
next = realm->parent;
if (exceeded || !next)
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 4ce18055d931..4c6bd1042c94 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -60,24 +60,26 @@
/*
* increase ref count for the realm
*
- * caller must hold snap_rwsem for write.
+ * caller must hold snap_rwsem.
*/
void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm)
{
- dout("get_realm %p %d -> %d\n", realm,
- atomic_read(&realm->nref), atomic_read(&realm->nref)+1);
+ lockdep_assert_held(&mdsc->snap_rwsem);
+
/*
- * since we _only_ increment realm refs or empty the empty
- * list with snap_rwsem held, adjusting the empty list here is
- * safe. we do need to protect against concurrent empty list
- * additions, however.
+ * The 0->1 and 1->0 transitions must take the snap_empty_lock
+ * atomically with the refcount change. Go ahead and bump the
+ * nref here, unless it's 0, in which case we take the spinlock
+ * and then do the increment and remove it from the list.
*/
- if (atomic_inc_return(&realm->nref) == 1) {
- spin_lock(&mdsc->snap_empty_lock);
+ if (atomic_inc_not_zero(&realm->nref))
+ return;
+
+ spin_lock(&mdsc->snap_empty_lock);
+ if (atomic_inc_return(&realm->nref) == 1)
list_del_init(&realm->empty_item);
- spin_unlock(&mdsc->snap_empty_lock);
- }
+ spin_unlock(&mdsc->snap_empty_lock);
}
static void __insert_snap_realm(struct rb_root *root,
@@ -113,6 +115,8 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
{
struct ceph_snap_realm *realm;
+ lockdep_assert_held_write(&mdsc->snap_rwsem);
+
realm = kzalloc(sizeof(*realm), GFP_NOFS);
if (!realm)
return ERR_PTR(-ENOMEM);
@@ -135,7 +139,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
/*
* lookup the realm rooted at @ino.
*
- * caller must hold snap_rwsem for write.
+ * caller must hold snap_rwsem.
*/
static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
u64 ino)
@@ -143,6 +147,8 @@ static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
struct rb_node *n = mdsc->snap_realms.rb_node;
struct ceph_snap_realm *r;
+ lockdep_assert_held(&mdsc->snap_rwsem);
+
while (n) {
r = rb_entry(n, struct ceph_snap_realm, node);
if (ino < r->ino)
@@ -176,6 +182,8 @@ static void __put_snap_realm(struct ceph_mds_client *mdsc,
static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm)
{
+ lockdep_assert_held_write(&mdsc->snap_rwsem);
+
dout("__destroy_snap_realm %p %llx\n", realm, realm->ino);
rb_erase(&realm->node, &mdsc->snap_realms);
@@ -198,28 +206,30 @@ static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
static void __put_snap_realm(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm)
{
- dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
- atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
+ lockdep_assert_held_write(&mdsc->snap_rwsem);
+
+ /*
+ * We do not require the snap_empty_lock here, as any caller that
+ * increments the value must hold the snap_rwsem.
+ */
if (atomic_dec_and_test(&realm->nref))
__destroy_snap_realm(mdsc, realm);
}
/*
- * caller needn't hold any locks
+ * See comments in ceph_get_snap_realm. Caller needn't hold any locks.
*/
void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm)
{
- dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
- atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
- if (!atomic_dec_and_test(&realm->nref))
+ if (!atomic_dec_and_lock(&realm->nref, &mdsc->snap_empty_lock))
return;
if (down_write_trylock(&mdsc->snap_rwsem)) {
+ spin_unlock(&mdsc->snap_empty_lock);
__destroy_snap_realm(mdsc, realm);
up_write(&mdsc->snap_rwsem);
} else {
- spin_lock(&mdsc->snap_empty_lock);
list_add(&realm->empty_item, &mdsc->snap_empty);
spin_unlock(&mdsc->snap_empty_lock);
}
@@ -236,6 +246,8 @@ static void __cleanup_empty_realms(struct ceph_mds_client *mdsc)
{
struct ceph_snap_realm *realm;
+ lockdep_assert_held_write(&mdsc->snap_rwsem);
+
spin_lock(&mdsc->snap_empty_lock);
while (!list_empty(&mdsc->snap_empty)) {
realm = list_first_entry(&mdsc->snap_empty,
@@ -269,6 +281,8 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
{
struct ceph_snap_realm *parent;
+ lockdep_assert_held_write(&mdsc->snap_rwsem);
+
if (realm->parent_ino == parentino)
return 0;
@@ -460,7 +474,7 @@ static bool has_new_snaps(struct ceph_snap_context *o,
* Caller must hold snap_rwsem for read (i.e., the realm topology won't
* change).
*/
-void ceph_queue_cap_snap(struct ceph_inode_info *ci)
+static void ceph_queue_cap_snap(struct ceph_inode_info *ci)
{
struct inode *inode = &ci->vfs_inode;
struct ceph_cap_snap *capsnap;
@@ -663,15 +677,13 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
if (!inode)
continue;
spin_unlock(&realm->inodes_with_caps_lock);
- /* avoid calling iput_final() while holding
- * mdsc->snap_rwsem or in mds dispatch threads */
- ceph_async_iput(lastinode);
+ iput(lastinode);
lastinode = inode;
ceph_queue_cap_snap(ci);
spin_lock(&realm->inodes_with_caps_lock);
}
spin_unlock(&realm->inodes_with_caps_lock);
- ceph_async_iput(lastinode);
+ iput(lastinode);
dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
}
@@ -696,6 +708,8 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
int err = -ENOMEM;
LIST_HEAD(dirty_realms);
+ lockdep_assert_held_write(&mdsc->snap_rwsem);
+
dout("update_snap_trace deletion=%d\n", deletion);
more:
ceph_decode_need(&p, e, sizeof(*ri), bad);
@@ -791,7 +805,7 @@ more:
return 0;
bad:
- err = -EINVAL;
+ err = -EIO;
fail:
if (realm && !IS_ERR(realm))
ceph_put_snap_realm(mdsc, realm);
@@ -823,17 +837,12 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
ihold(inode);
spin_unlock(&mdsc->snap_flush_lock);
ceph_flush_snaps(ci, &session);
- /* avoid calling iput_final() while holding
- * session->s_mutex or in mds dispatch threads */
- ceph_async_iput(inode);
+ iput(inode);
spin_lock(&mdsc->snap_flush_lock);
}
spin_unlock(&mdsc->snap_flush_lock);
- if (session) {
- mutex_unlock(&session->s_mutex);
- ceph_put_mds_session(session);
- }
+ ceph_put_mds_session(session);
dout("flush_snaps done\n");
}
@@ -969,14 +978,12 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
ceph_get_snap_realm(mdsc, realm);
ceph_put_snap_realm(mdsc, oldrealm);
- /* avoid calling iput_final() while holding
- * mdsc->snap_rwsem or mds in dispatch threads */
- ceph_async_iput(inode);
+ iput(inode);
continue;
skip_inode:
spin_unlock(&ci->i_ceph_lock);
- ceph_async_iput(inode);
+ iput(inode);
}
/* we may have taken some of the old realm's children. */
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 839e6b0239ee..9215a2f4535c 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -931,7 +931,6 @@ extern int ceph_update_snap_trace(struct ceph_mds_client *m,
extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg);
-extern void ceph_queue_cap_snap(struct ceph_inode_info *ci);
extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap);
extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
@@ -989,8 +988,6 @@ extern int ceph_inode_holds_cap(struct inode *inode, int mask);
extern bool ceph_inode_set_size(struct inode *inode, loff_t size);
extern void __ceph_do_pending_vmtruncate(struct inode *inode);
-extern void ceph_async_iput(struct inode *inode);
-
void ceph_queue_inode_work(struct inode *inode, int work_bit);
static inline void ceph_queue_vmtruncate(struct inode *inode)
@@ -1170,7 +1167,7 @@ extern void ceph_flush_snaps(struct ceph_inode_info *ci,
extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_session *session);
-extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
+extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
extern int ceph_drop_caps_for_unlink(struct inode *inode);
extern int ceph_encode_inode_release(void **p, struct inode *inode,
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index ec57cdb1590f..007427ba75e5 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -151,6 +151,9 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
return ERR_PTR(-EINVAL);
if (ref) {
+ if (WARN_ON_ONCE(!ref->node_name || ref->path_consumed < 0))
+ return ERR_PTR(-EINVAL);
+
if (strlen(fullpath) - ref->path_consumed) {
prepath = fullpath + ref->path_consumed;
/* skip initial delimiter */
@@ -173,7 +176,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
}
}
- rc = dns_resolve_server_name_to_ip(name, &srvIP);
+ rc = dns_resolve_server_name_to_ip(name, &srvIP, NULL);
if (rc < 0) {
cifs_dbg(FYI, "%s: Failed to resolve server part of %s to IP: %d\n",
__func__, name, rc);
@@ -208,6 +211,10 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
else
noff = tkn_e - (sb_mountdata + off) + 1;
+ if (strncasecmp(sb_mountdata + off, "cruid=", 6) == 0) {
+ off += noff;
+ continue;
+ }
if (strncasecmp(sb_mountdata + off, "unc=", 4) == 0) {
off += noff;
continue;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 9fb874dd8d24..64b71c4e2a9d 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -65,6 +65,7 @@ bool lookupCacheEnabled = true;
bool disable_legacy_dialects; /* false by default */
bool enable_gcm_256 = true;
bool require_gcm_256; /* false by default */
+bool enable_negotiate_signing; /* false by default */
unsigned int global_secflags = CIFSSEC_DEF;
/* unsigned int ntlmv2_support = 0; */
unsigned int sign_CIFS_PDUs = 1;
@@ -104,6 +105,9 @@ MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encr
module_param(require_gcm_256, bool, 0644);
MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
+module_param(enable_negotiate_signing, bool, 0644);
+MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
+
module_param(disable_legacy_dialects, bool, 0644);
MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
"helpful to restrict the ability to "
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 177f3e7ab86d..d25a4099b32e 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -153,5 +153,5 @@ extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type,
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.32"
+#define CIFS_VERSION "2.33"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 3100f8b66e60..c6a9542ca281 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -75,6 +75,9 @@
#define SMB_ECHO_INTERVAL_MAX 600
#define SMB_ECHO_INTERVAL_DEFAULT 60
+/* dns resolution interval in seconds */
+#define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600
+
/* maximum number of PDUs in one compound */
#define MAX_COMPOUND 5
@@ -577,6 +580,7 @@ struct TCP_Server_Info {
char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
struct smb_version_operations *ops;
struct smb_version_values *vals;
+ /* updates to tcpStatus protected by GlobalMid_Lock */
enum statusEnum tcpStatus; /* what we think the status is */
char *hostname; /* hostname portion of UNC string */
struct socket *ssocket;
@@ -645,6 +649,7 @@ struct TCP_Server_Info {
/* point to the SMBD connection if RDMA is used instead of socket */
struct smbd_connection *smbd_conn;
struct delayed_work echo; /* echo ping workqueue job */
+ struct delayed_work resolve; /* dns resolution workqueue job */
char *smallbuf; /* pointer to current "small" buffer */
char *bigbuf; /* pointer to current "big" buffer */
/* Total size of this PDU. Only valid from cifs_demultiplex_thread */
@@ -666,9 +671,11 @@ struct TCP_Server_Info {
unsigned int max_write;
unsigned int min_offload;
__le16 compress_algorithm;
+ __u16 signing_algorithm;
__le16 cipher_type;
/* save initital negprot hash */
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+ bool signing_negotiated; /* true if valid signing context rcvd from server */
bool posix_ext_supported;
struct delayed_work reconnect; /* reconnect workqueue job */
struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
@@ -686,6 +693,9 @@ struct TCP_Server_Info {
bool use_swn_dstaddr;
struct sockaddr_storage swn_dstaddr;
#endif
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ bool is_dfs_conn; /* if a dfs connection */
+#endif
};
struct cifs_credits {
@@ -1601,6 +1611,11 @@ struct dfs_info3_param {
int ttl;
};
+struct file_list {
+ struct list_head list;
+ struct cifsFileInfo *cfile;
+};
+
/*
* common struct for holding inode info when searching for or updating an
* inode with new info
@@ -1785,7 +1800,7 @@ require use of the stronger protocol */
* list operations on pending_mid_q and oplockQ
* updates to XID counters, multiplex id and SMB sequence numbers
* list operations on global DnotifyReqList
- * updates to ses->status
+ * updates to ses->status and TCP_Server_Info->tcpStatus
* updates to server->CurrentMid
* tcp_ses_lock protects:
* list operations on tcp and SMB session lists
@@ -1868,6 +1883,7 @@ extern unsigned int global_secflags; /* if on, session setup sent
extern unsigned int sign_CIFS_PDUs; /* enable smb packet signing */
extern bool enable_gcm_256; /* allow optional negotiate of strongest signing (aes-gcm-256) */
extern bool require_gcm_256; /* require use of strongest signing (aes-gcm-256) */
+extern bool enable_negotiate_signing; /* request use of faster (GMAC) signing if available */
extern bool linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
extern unsigned int CIFSMaxBufSize; /* max size not including hdr */
extern unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 0923f72d27e9..f6e235001358 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1785,6 +1785,7 @@ struct smb_com_transaction2_sfi_req {
__u16 Fid;
__le16 InformationLevel;
__u16 Reserved4;
+ __u8 payload[];
} __attribute__((packed));
struct smb_com_transaction2_sfi_rsp {
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 58ebec4d4413..65d1a65bfc37 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -873,8 +873,11 @@ PsxDelete:
InformationLevel) - 4;
offset = param_offset + params;
- /* Setup pointer to Request Data (inode type) */
- pRqD = (struct unlink_psx_rq *)(((char *)&pSMB->hdr.Protocol) + offset);
+ /* Setup pointer to Request Data (inode type).
+ * Note that SMB offsets are from the beginning of SMB which is 4 bytes
+ * in, after RFC1001 field
+ */
+ pRqD = (struct unlink_psx_rq *)((char *)(pSMB) + offset + 4);
pRqD->type = cpu_to_le16(type);
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
@@ -1081,7 +1084,8 @@ PsxCreat:
param_offset = offsetof(struct smb_com_transaction2_spi_req,
InformationLevel) - 4;
offset = param_offset + params;
- pdata = (OPEN_PSX_REQ *)(((char *)&pSMB->hdr.Protocol) + offset);
+ /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+ pdata = (OPEN_PSX_REQ *)((char *)(pSMB) + offset + 4);
pdata->Level = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
pdata->Permissions = cpu_to_le64(mode);
pdata->PosixOpenFlags = cpu_to_le32(posix_flags);
@@ -2537,8 +2541,9 @@ CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon,
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
+ /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
parm_data = (struct cifs_posix_lock *)
- (((char *) &pSMB->hdr.Protocol) + offset);
+ (((char *)pSMB) + offset + 4);
parm_data->lock_type = cpu_to_le16(lock_type);
if (waitFlag) {
@@ -2767,7 +2772,8 @@ int CIFSSMBRenameOpenFile(const unsigned int xid, struct cifs_tcon *pTcon,
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
- data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+ /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+ data_offset = (char *)(pSMB) + offset + 4;
rename_info = (struct set_file_rename *) data_offset;
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */
@@ -2925,7 +2931,8 @@ createSymLinkRetry:
InformationLevel) - 4;
offset = param_offset + params;
- data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+ /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+ data_offset = (char *)pSMB + offset + 4;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len_target =
cifsConvertToUTF16((__le16 *) data_offset, toName,
@@ -3009,7 +3016,8 @@ createHardLinkRetry:
InformationLevel) - 4;
offset = param_offset + params;
- data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+ /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+ data_offset = (char *)pSMB + offset + 4;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len_target =
cifsConvertToUTF16((__le16 *) data_offset, fromName,
@@ -5626,9 +5634,9 @@ CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon,
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
+ /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
parm_data =
- (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol)
- + offset);
+ (struct file_end_of_file_info *)(((char *)pSMB) + offset + 4);
pSMB->DataOffset = cpu_to_le16(offset);
parm_data->FileSize = cpu_to_le64(size);
pSMB->Fid = cfile->fid.netfid;
@@ -5761,7 +5769,8 @@ CIFSSMBSetFileDisposition(const unsigned int xid, struct cifs_tcon *tcon,
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
- data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+ /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+ data_offset = (char *)(pSMB) + offset + 4;
count = 1;
pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -6062,9 +6071,8 @@ setPermsRetry:
param_offset = offsetof(struct smb_com_transaction2_spi_req,
InformationLevel) - 4;
offset = param_offset + params;
- data_offset =
- (FILE_UNIX_BASIC_INFO *) ((char *) &pSMB->hdr.Protocol +
- offset);
+ /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+ data_offset = (FILE_UNIX_BASIC_INFO *)((char *) pSMB + offset + 4);
memset(data_offset, 0, count);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->ParameterOffset = cpu_to_le16(param_offset);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 5d269f583dac..3781eee9360a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -78,6 +78,8 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
int rc;
int len;
char *unc, *ipaddr = NULL;
+ time64_t expiry, now;
+ unsigned long ttl = SMB_DNS_RESOLVE_INTERVAL_DEFAULT;
if (!server->hostname)
return -EINVAL;
@@ -91,13 +93,13 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
}
scnprintf(unc, len, "\\\\%s", server->hostname);
- rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
+ rc = dns_resolve_server_name_to_ip(unc, &ipaddr, &expiry);
kfree(unc);
if (rc < 0) {
cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
__func__, server->hostname, rc);
- return rc;
+ goto requeue_resolve;
}
spin_lock(&cifs_tcp_ses_lock);
@@ -106,7 +108,45 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
spin_unlock(&cifs_tcp_ses_lock);
kfree(ipaddr);
- return !rc ? -1 : 0;
+ /* rc == 1 means success here */
+ if (rc) {
+ now = ktime_get_real_seconds();
+ if (expiry && expiry > now)
+ /*
+ * To make sure we don't use the cached entry, retry 1s
+ * after expiry.
+ */
+ ttl = (expiry - now + 1);
+ }
+ rc = !rc ? -1 : 0;
+
+requeue_resolve:
+ cifs_dbg(FYI, "%s: next dns resolution scheduled for %lu seconds in the future\n",
+ __func__, ttl);
+ mod_delayed_work(cifsiod_wq, &server->resolve, (ttl * HZ));
+
+ return rc;
+}
+
+
+static void cifs_resolve_server(struct work_struct *work)
+{
+ int rc;
+ struct TCP_Server_Info *server = container_of(work,
+ struct TCP_Server_Info, resolve.work);
+
+ mutex_lock(&server->srv_mutex);
+
+ /*
+ * Resolve the hostname again to make sure that IP address is up-to-date.
+ */
+ rc = reconn_set_ipaddr_from_hostname(server);
+ if (rc) {
+ cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+ __func__, rc);
+ }
+
+ mutex_unlock(&server->srv_mutex);
}
#ifdef CONFIG_CIFS_DFS_UPCALL
@@ -180,7 +220,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
#ifdef CONFIG_CIFS_DFS_UPCALL
struct super_block *sb = NULL;
struct cifs_sb_info *cifs_sb = NULL;
- struct dfs_cache_tgt_list tgt_list = {0};
+ struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list);
struct dfs_cache_tgt_iterator *tgt_it = NULL;
#endif
@@ -680,6 +720,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
spin_unlock(&cifs_tcp_ses_lock);
cancel_delayed_work_sync(&server->echo);
+ cancel_delayed_work_sync(&server->resolve);
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
@@ -1227,6 +1268,16 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ /*
+ * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for
+ * DFS connections to do failover properly, so avoid sharing them with regular
+ * shares or even links that may connect to same server but having completely
+ * different failover targets.
+ */
+ if (server->is_dfs_conn)
+ continue;
+#endif
/*
* Skip ses channels since they're only handled in lower layers
* (e.g. cifs_send_recv).
@@ -1254,12 +1305,16 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
return;
}
+ /* srv_count can never go negative */
+ WARN_ON(server->srv_count < 0);
+
put_net(cifs_net_ns(server));
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
cancel_delayed_work_sync(&server->echo);
+ cancel_delayed_work_sync(&server->resolve);
if (from_reconnect)
/*
@@ -1342,6 +1397,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx)
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
+ INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server);
INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
mutex_init(&tcp_ses->reconnect_mutex);
memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
@@ -1403,6 +1459,11 @@ smbd_connected:
goto out_err_crypto_release;
}
tcp_ses->min_offload = ctx->min_offload;
+ /*
+ * at this point we are the only ones with the pointer
+ * to the struct since the kernel thread not created yet
+ * no need to spinlock this update of tcpStatus
+ */
tcp_ses->tcpStatus = CifsNeedNegotiate;
if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
@@ -1422,6 +1483,12 @@ smbd_connected:
/* queue echo request delayed work */
queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
+ /* queue dns resolution delayed work */
+ cifs_dbg(FYI, "%s: next dns resolution scheduled for %d seconds in the future\n",
+ __func__, SMB_DNS_RESOLVE_INTERVAL_DEFAULT);
+
+ queue_delayed_work(cifsiod_wq, &tcp_ses->resolve, (SMB_DNS_RESOLVE_INTERVAL_DEFAULT * HZ));
+
return tcp_ses;
out_err_crypto_release:
@@ -1600,6 +1667,9 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
}
spin_unlock(&cifs_tcp_ses_lock);
+ /* ses_count can never go negative */
+ WARN_ON(ses->ses_count < 0);
+
spin_lock(&GlobalMid_Lock);
if (ses->status == CifsGood)
ses->status = CifsExiting;
@@ -1967,6 +2037,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
return;
}
+ /* tc_count can never go negative */
+ WARN_ON(tcon->tc_count < 0);
+
if (tcon->use_witness) {
int rc;
@@ -2905,6 +2978,23 @@ static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
}
#ifdef CONFIG_CIFS_DFS_UPCALL
+static int mount_get_dfs_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb,
+ unsigned int *xid, struct TCP_Server_Info **nserver,
+ struct cifs_ses **nses, struct cifs_tcon **ntcon)
+{
+ int rc;
+
+ ctx->nosharesock = true;
+ rc = mount_get_conns(ctx, cifs_sb, xid, nserver, nses, ntcon);
+ if (*nserver) {
+ cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
+ spin_lock(&cifs_tcp_ses_lock);
+ (*nserver)->is_dfs_conn = true;
+ spin_unlock(&cifs_tcp_ses_lock);
+ }
+ return rc;
+}
+
/*
* cifs_build_path_to_root returns full path to root when we do not have an
* existing connection (tcon)
@@ -3040,7 +3130,7 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
{
int rc;
char *npath = NULL;
- struct dfs_cache_tgt_list tgt_list = {0};
+ struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list);
struct dfs_cache_tgt_iterator *tgt_it = NULL;
struct smb3_fs_context tmp_ctx = {NULL};
@@ -3100,7 +3190,7 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
tmp_ctx.prepath);
mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon);
- rc = mount_get_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon);
+ rc = mount_get_dfs_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon);
if (!rc || (*server && *ses)) {
/*
* We were able to connect to new target server. Update current context with
@@ -3399,7 +3489,12 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
goto error;
}
- ctx->nosharesock = true;
+ mount_put_conns(cifs_sb, xid, server, ses, tcon);
+ /*
+ * Ignore error check here because we may failover to other targets from cached a
+ * referral.
+ */
+ (void)mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
/* Get path of DFS root */
ref_path = build_unc_path_to_root(ctx, cifs_sb, false);
@@ -3428,7 +3523,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
/* Connect to new DFS target only if we were redirected */
if (oldmnt != cifs_sb->ctx->mount_options) {
mount_put_conns(cifs_sb, xid, server, ses, tcon);
- rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
+ rc = mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
}
if (rc && !server && !ses) {
/* Failed to connect. Try to connect to other targets in the referral. */
@@ -3454,7 +3549,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
rc = -ELOOP;
} while (rc == -EREMOTE);
- if (rc || !tcon)
+ if (rc || !tcon || !ses)
goto error;
kfree(ref_path);
@@ -4090,7 +4185,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
if (!tree)
return -ENOMEM;
- if (!tcon->dfs_path) {
+ /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
+ if (!tcon->dfs_path || dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl)) {
if (tcon->ipc) {
scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
@@ -4100,9 +4196,6 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
goto out;
}
- rc = dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl);
- if (rc)
- goto out;
isroot = ref.server_type == DFS_TYPE_ROOT;
free_dfs_info_param(&ref);
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 7c1769714609..283745592844 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -19,6 +19,7 @@
#include "cifs_debug.h"
#include "cifs_unicode.h"
#include "smb2glob.h"
+#include "dns_resolve.h"
#include "dfs_cache.h"
@@ -911,6 +912,7 @@ static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
err_free_it:
list_for_each_entry_safe(it, nit, head, it_list) {
+ list_del(&it->it_list);
kfree(it->it_name);
kfree(it);
}
@@ -1293,6 +1295,194 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
return 0;
}
+static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
+{
+ char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
+ const char *host;
+ size_t hostlen;
+ char *ip = NULL;
+ struct sockaddr sa;
+ bool match;
+ int rc;
+
+ if (strcasecmp(s1, s2))
+ return false;
+
+ /*
+ * Resolve share's hostname and check if server address matches. Otherwise just ignore it
+ * as we could not have upcall to resolve hostname or failed to convert ip address.
+ */
+ match = true;
+ extract_unc_hostname(s1, &host, &hostlen);
+ scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
+
+ rc = dns_resolve_server_name_to_ip(unc, &ip, NULL);
+ if (rc < 0) {
+ cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
+ __func__, (int)hostlen, host);
+ return true;
+ }
+
+ if (!cifs_convert_address(&sa, ip, strlen(ip))) {
+ cifs_dbg(VFS, "%s: failed to convert address \'%s\'. skip address matching.\n",
+ __func__, ip);
+ } else {
+ mutex_lock(&server->srv_mutex);
+ match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, &sa);
+ mutex_unlock(&server->srv_mutex);
+ }
+
+ kfree(ip);
+ return match;
+}
+
+/*
+ * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
+ * target shares in @refs.
+ */
+static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl,
+ const struct dfs_info3_param *refs, int numrefs)
+{
+ struct dfs_cache_tgt_iterator *it;
+ int i;
+
+ for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) {
+ for (i = 0; i < numrefs; i++) {
+ if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it),
+ refs[i].node_name))
+ return;
+ }
+ }
+
+ cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
+ for (i = 0; i < tcon->ses->chan_count; i++) {
+ spin_lock(&GlobalMid_Lock);
+ if (tcon->ses->chans[i].server->tcpStatus != CifsExiting)
+ tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+ spin_unlock(&GlobalMid_Lock);
+ }
+}
+
+/* Refresh dfs referral of tcon and mark it for reconnect if needed */
+static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool force_refresh)
+{
+ const char *path = tcon->dfs_path + 1;
+ struct cifs_ses *ses;
+ struct cache_entry *ce;
+ struct dfs_info3_param *refs = NULL;
+ int numrefs = 0;
+ bool needs_refresh = false;
+ struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+ int rc = 0;
+ unsigned int xid;
+
+ ses = find_ipc_from_server_path(sessions, path);
+ if (IS_ERR(ses)) {
+ cifs_dbg(FYI, "%s: could not find ipc session\n", __func__);
+ return PTR_ERR(ses);
+ }
+
+ down_read(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
+ if (!IS_ERR(ce)) {
+ rc = get_targets(ce, &tl);
+ if (rc)
+ cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc);
+ }
+ up_read(&htable_rw_lock);
+
+ if (!needs_refresh) {
+ rc = 0;
+ goto out;
+ }
+
+ xid = get_xid();
+ rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+ free_xid(xid);
+
+ /* Create or update a cache entry with the new referral */
+ if (!rc) {
+ dump_refs(refs, numrefs);
+
+ down_write(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ if (IS_ERR(ce))
+ add_cache_entry_locked(refs, numrefs);
+ else if (force_refresh || cache_entry_expired(ce))
+ update_cache_entry_locked(ce, refs, numrefs);
+ up_write(&htable_rw_lock);
+
+ mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs);
+ }
+
+out:
+ dfs_cache_free_tgts(&tl);
+ free_dfs_info_array(refs, numrefs);
+ return rc;
+}
+
+/**
+ * dfs_cache_remount_fs - remount a DFS share
+ *
+ * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
+ * match any of the new targets, mark it for reconnect.
+ *
+ * @cifs_sb: cifs superblock.
+ *
+ * Return zero if remounted, otherwise non-zero.
+ */
+int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+{
+ struct cifs_tcon *tcon;
+ struct mount_group *mg;
+ struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
+ int rc;
+
+ if (!cifs_sb || !cifs_sb->master_tlink)
+ return -EINVAL;
+
+ tcon = cifs_sb_master_tcon(cifs_sb);
+ if (!tcon->dfs_path) {
+ cifs_dbg(FYI, "%s: not a dfs tcon\n", __func__);
+ return 0;
+ }
+
+ if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
+ cifs_dbg(FYI, "%s: tcon has no dfs mount group id\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&mount_group_list_lock);
+ mg = find_mount_group_locked(&cifs_sb->dfs_mount_id);
+ if (IS_ERR(mg)) {
+ mutex_unlock(&mount_group_list_lock);
+ cifs_dbg(FYI, "%s: tcon has ipc session to refresh referral\n", __func__);
+ return PTR_ERR(mg);
+ }
+ kref_get(&mg->refcount);
+ mutex_unlock(&mount_group_list_lock);
+
+ spin_lock(&mg->lock);
+ memcpy(&sessions, mg->sessions, mg->num_sessions * sizeof(mg->sessions[0]));
+ spin_unlock(&mg->lock);
+
+ /*
+ * After reconnecting to a different server, unique ids won't match anymore, so we disable
+ * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
+ */
+ cifs_autodisable_serverino(cifs_sb);
+ /*
+ * Force the use of prefix path to support failover on DFS paths that resolve to targets
+ * that have different prefix paths.
+ */
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ rc = refresh_tcon(sessions, tcon, true);
+
+ kref_put(&mg->refcount, mount_group_release);
+ return rc;
+}
+
/*
* Refresh all active dfs mounts regardless of whether they are in cache or not.
* (cache can be cleared)
@@ -1303,7 +1493,6 @@ static void refresh_mounts(struct cifs_ses **sessions)
struct cifs_ses *ses;
struct cifs_tcon *tcon, *ntcon;
struct list_head tcons;
- unsigned int xid;
INIT_LIST_HEAD(&tcons);
@@ -1321,44 +1510,8 @@ static void refresh_mounts(struct cifs_ses **sessions)
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
- const char *path = tcon->dfs_path + 1;
- struct cache_entry *ce;
- struct dfs_info3_param *refs = NULL;
- int numrefs = 0;
- bool needs_refresh = false;
- int rc = 0;
-
list_del_init(&tcon->ulist);
-
- ses = find_ipc_from_server_path(sessions, path);
- if (IS_ERR(ses))
- goto next_tcon;
-
- down_read(&htable_rw_lock);
- ce = lookup_cache_entry(path);
- needs_refresh = IS_ERR(ce) || cache_entry_expired(ce);
- up_read(&htable_rw_lock);
-
- if (!needs_refresh)
- goto next_tcon;
-
- xid = get_xid();
- rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
- free_xid(xid);
-
- /* Create or update a cache entry with the new referral */
- if (!rc) {
- down_write(&htable_rw_lock);
- ce = lookup_cache_entry(path);
- if (IS_ERR(ce))
- add_cache_entry_locked(refs, numrefs);
- else if (cache_entry_expired(ce))
- update_cache_entry_locked(ce, refs, numrefs);
- up_write(&htable_rw_lock);
- }
-
-next_tcon:
- free_dfs_info_array(refs, numrefs);
+ refresh_tcon(sessions, tcon, false);
cifs_put_tcon(tcon);
}
}
diff --git a/fs/cifs/dfs_cache.h b/fs/cifs/dfs_cache.h
index b29d3ae64829..52070d1df189 100644
--- a/fs/cifs/dfs_cache.h
+++ b/fs/cifs/dfs_cache.h
@@ -13,6 +13,8 @@
#include <linux/uuid.h>
#include "cifsglob.h"
+#define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
+
struct dfs_cache_tgt_list {
int tl_numtgts;
struct list_head tl_list;
@@ -44,6 +46,7 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id);
void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses);
char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
+int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
static inline struct dfs_cache_tgt_iterator *
dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 79402ca0ddfa..5f8a302ffcb2 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -100,7 +100,7 @@ build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
- s = dentry_path_raw(direntry, page, PAGE_SIZE);
+ s = dentry_path_raw(direntry, page, PATH_MAX);
if (IS_ERR(s))
return s;
if (!s[1]) // for root we want "", not "/"
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index d15b82d569ef..8c616aaeb7c4 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -24,6 +24,7 @@
* dns_resolve_server_name_to_ip - Resolve UNC server name to ip address.
* @unc: UNC path specifying the server (with '/' as delimiter)
* @ip_addr: Where to return the IP address.
+ * @expiry: Where to return the expiry time for the dns record.
*
* The IP address will be returned in string form, and the caller is
* responsible for freeing it.
@@ -31,7 +32,7 @@
* Returns length of result on success, -ve on error.
*/
int
-dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
+dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry)
{
struct sockaddr_storage ss;
const char *hostname, *sep;
@@ -66,13 +67,14 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
/* Perform the upcall */
rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len,
- NULL, ip_addr, NULL, false);
+ NULL, ip_addr, expiry, false);
if (rc < 0)
cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n",
__func__, len, len, hostname);
else
- cifs_dbg(FYI, "%s: resolved: %*.*s to %s\n",
- __func__, len, len, hostname, *ip_addr);
+ cifs_dbg(FYI, "%s: resolved: %*.*s to %s expiry %llu\n",
+ __func__, len, len, hostname, *ip_addr,
+ expiry ? (*expiry) : 0);
return rc;
name_is_IP_address:
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
index 5be060b82b13..9fa2807ef79e 100644
--- a/fs/cifs/dns_resolve.h
+++ b/fs/cifs/dns_resolve.h
@@ -12,7 +12,7 @@
#define _DNS_RESOLVE_H
#ifdef __KERNEL__
-extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr);
+extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry);
#endif /* KERNEL */
#endif /* _DNS_RESOLVE_H */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index cd108607a070..bb98fbdd22a9 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -4619,7 +4619,7 @@ read_complete:
static int cifs_readpage(struct file *file, struct page *page)
{
- loff_t offset = (loff_t)page->index << PAGE_SHIFT;
+ loff_t offset = page_file_offset(page);
int rc = -EACCES;
unsigned int xid;
@@ -4848,34 +4848,33 @@ void cifs_oplock_break(struct work_struct *work)
oplock_break_ack:
/*
- * releasing stale oplock after recent reconnect of smb session using
- * a now incorrect file handle is not a data integrity issue but do
- * not bother sending an oplock release if session to server still is
- * disconnected since oplock already released by the server
- */
- if (!cfile->oplock_break_cancelled) {
- rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
- cinode);
- cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
- }
- /*
* When oplock break is received and there are no active
* file handles but cached, then schedule deferred close immediately.
* So, new open will not use cached handle.
*/
spin_lock(&CIFS_I(inode)->deferred_lock);
is_deferred = cifs_is_deferred_close(cfile, &dclose);
+ spin_unlock(&CIFS_I(inode)->deferred_lock);
if (is_deferred &&
cfile->deferred_close_scheduled &&
delayed_work_pending(&cfile->deferred)) {
- /*
- * If there is no pending work, mod_delayed_work queues new work.
- * So, Increase the ref count to avoid use-after-free.
- */
- if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
- cifsFileInfo_get(cfile);
+ if (cancel_delayed_work(&cfile->deferred)) {
+ _cifsFileInfo_put(cfile, false, false);
+ goto oplock_break_done;
+ }
}
- spin_unlock(&CIFS_I(inode)->deferred_lock);
+ /*
+ * releasing stale oplock after recent reconnect of smb session using
+ * a now incorrect file handle is not a data integrity issue but do
+ * not bother sending an oplock release if session to server still is
+ * disconnected since oplock already released by the server
+ */
+ if (!cfile->oplock_break_cancelled) {
+ rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
+ cinode);
+ cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+ }
+oplock_break_done:
_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
cifs_done_oplock_break(cinode);
}
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 92d4ab029c91..eed59bc1d913 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -13,6 +13,9 @@
#include <linux/magic.h>
#include <linux/security.h>
#include <net/net_namespace.h>
+#ifdef CONFIG_CIFS_DFS_UPCALL
+#include "dfs_cache.h"
+#endif
*/
#include <linux/ctype.h>
@@ -322,7 +325,6 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
new_ctx->UNC = NULL;
new_ctx->source = NULL;
new_ctx->iocharset = NULL;
-
/*
* Make sure to stay in sync with smb3_cleanup_fs_context_contents()
*/
@@ -780,6 +782,10 @@ static int smb3_reconfigure(struct fs_context *fc)
smb3_cleanup_fs_context_contents(cifs_sb->ctx);
rc = smb3_fs_context_dup(cifs_sb->ctx, ctx);
smb3_update_mnt_flags(cifs_sb);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ if (!rc)
+ rc = dfs_cache_remount_fs(cifs_sb);
+#endif
return rc;
}
@@ -792,6 +798,8 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
int i, opt;
bool is_smb3 = !strcmp(fc->fs_type->name, "smb3");
bool skip_parsing = false;
+ kuid_t uid;
+ kgid_t gid;
cifs_dbg(FYI, "CIFS: parsing cifs mount option '%s'\n", param->key);
@@ -904,18 +912,38 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
}
break;
case Opt_uid:
- ctx->linux_uid.val = result.uint_32;
+ uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(uid))
+ goto cifs_parse_mount_err;
+ ctx->linux_uid = uid;
ctx->uid_specified = true;
break;
case Opt_cruid:
- ctx->cred_uid.val = result.uint_32;
+ uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(uid))
+ goto cifs_parse_mount_err;
+ ctx->cred_uid = uid;
+ ctx->cruid_specified = true;
+ break;
+ case Opt_backupuid:
+ uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(uid))
+ goto cifs_parse_mount_err;
+ ctx->backupuid = uid;
+ ctx->backupuid_specified = true;
break;
case Opt_backupgid:
- ctx->backupgid.val = result.uint_32;
+ gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(gid))
+ goto cifs_parse_mount_err;
+ ctx->backupgid = gid;
ctx->backupgid_specified = true;
break;
case Opt_gid:
- ctx->linux_gid.val = result.uint_32;
+ gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(gid))
+ goto cifs_parse_mount_err;
+ ctx->linux_gid = gid;
ctx->gid_specified = true;
break;
case Opt_port:
diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
index 2a71c8e411ac..b6243972edf3 100644
--- a/fs/cifs/fs_context.h
+++ b/fs/cifs/fs_context.h
@@ -155,6 +155,7 @@ enum cifs_param {
struct smb3_fs_context {
bool uid_specified;
+ bool cruid_specified;
bool gid_specified;
bool sloppy;
bool got_ip;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index b96b253e7635..65f8a70cece3 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1625,7 +1625,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
goto unlink_out;
}
- cifs_close_all_deferred_files(tcon);
+ cifs_close_deferred_file(CIFS_I(inode));
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = CIFSPOSIXDelFile(xid, tcon, full_path,
@@ -2084,6 +2084,7 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
FILE_UNIX_BASIC_INFO *info_buf_target;
unsigned int xid;
int rc, tmprc;
+ int retry_count = 0;
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
@@ -2113,10 +2114,24 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
goto cifs_rename_exit;
}
- cifs_close_all_deferred_files(tcon);
+ cifs_close_deferred_file(CIFS_I(d_inode(source_dentry)));
+ if (d_inode(target_dentry) != NULL)
+ cifs_close_deferred_file(CIFS_I(d_inode(target_dentry)));
+
rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
to_name);
+ if (rc == -EACCES) {
+ while (retry_count < 3) {
+ cifs_close_all_deferred_files(tcon);
+ rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
+ to_name);
+ if (rc != -EACCES)
+ break;
+ retry_count++;
+ }
+ }
+
/*
* No-replace is the natural behavior for CIFS, so skip unlink hacks.
*/
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 184138b4eb8c..9469f1cf0b46 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -723,13 +723,31 @@ void
cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
{
struct cifsFileInfo *cfile = NULL;
- struct cifs_deferred_close *dclose;
+ struct file_list *tmp_list, *tmp_next_list;
+ struct list_head file_head;
+
+ if (cifs_inode == NULL)
+ return;
+ INIT_LIST_HEAD(&file_head);
+ spin_lock(&cifs_inode->open_file_lock);
list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
- spin_lock(&cifs_inode->deferred_lock);
- if (cifs_is_deferred_close(cfile, &dclose))
- mod_delayed_work(deferredclose_wq, &cfile->deferred, 0);
- spin_unlock(&cifs_inode->deferred_lock);
+ if (delayed_work_pending(&cfile->deferred)) {
+ if (cancel_delayed_work(&cfile->deferred)) {
+ tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+ continue;
+ tmp_list->cfile = cfile;
+ list_add_tail(&tmp_list->list, &file_head);
+ }
+ }
+ }
+ spin_unlock(&cifs_inode->open_file_lock);
+
+ list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+ _cifsFileInfo_put(tmp_list->cfile, true, false);
+ list_del(&tmp_list->list);
+ kfree(tmp_list);
}
}
@@ -738,20 +756,30 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
{
struct cifsFileInfo *cfile;
struct list_head *tmp;
+ struct file_list *tmp_list, *tmp_next_list;
+ struct list_head file_head;
+ INIT_LIST_HEAD(&file_head);
spin_lock(&tcon->open_file_lock);
list_for_each(tmp, &tcon->openFileList) {
cfile = list_entry(tmp, struct cifsFileInfo, tlist);
if (delayed_work_pending(&cfile->deferred)) {
- /*
- * If there is no pending work, mod_delayed_work queues new work.
- * So, Increase the ref count to avoid use-after-free.
- */
- if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
- cifsFileInfo_get(cfile);
+ if (cancel_delayed_work(&cfile->deferred)) {
+ tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+ continue;
+ tmp_list->cfile = cfile;
+ list_add_tail(&tmp_list->list, &file_head);
+ }
}
}
spin_unlock(&tcon->open_file_lock);
+
+ list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+ _cifsFileInfo_put(tmp_list->cfile, true, false);
+ list_del(&tmp_list->list);
+ kfree(tmp_list);
+ }
}
/* parses DFS refferal V3 structure
@@ -1187,7 +1215,7 @@ int match_target_ip(struct TCP_Server_Info *server,
cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
- rc = dns_resolve_server_name_to_ip(target, &tip);
+ rc = dns_resolve_server_name_to_ip(target, &tip, NULL);
if (rc < 0)
goto out;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index e4c8f603dd58..2dfd0d8297eb 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -557,8 +557,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
p = buf;
while (bytes_left >= sizeof(*p)) {
info->speed = le64_to_cpu(p->LinkSpeed);
- info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
- info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
+ info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
+ info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
@@ -2910,6 +2910,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
/* ipc tcons are not refcounted */
spin_lock(&cifs_tcp_ses_lock);
tcon->tc_count--;
+ /* tc_count can never go negative */
+ WARN_ON(tcon->tc_count < 0);
spin_unlock(&cifs_tcp_ses_lock);
}
kfree(utf16_path);
@@ -3616,6 +3618,7 @@ static int smb3_simple_fallocate_write_range(unsigned int xid,
{
struct cifs_io_parms io_parms = {0};
int nbytes;
+ int rc = 0;
struct kvec iov[2];
io_parms.netfid = cfile->fid.netfid;
@@ -3623,13 +3626,25 @@ static int smb3_simple_fallocate_write_range(unsigned int xid,
io_parms.tcon = tcon;
io_parms.persistent_fid = cfile->fid.persistent_fid;
io_parms.volatile_fid = cfile->fid.volatile_fid;
- io_parms.offset = off;
- io_parms.length = len;
- /* iov[0] is reserved for smb header */
- iov[1].iov_base = buf;
- iov[1].iov_len = io_parms.length;
- return SMB2_write(xid, &io_parms, &nbytes, iov, 1);
+ while (len) {
+ io_parms.offset = off;
+ io_parms.length = len;
+ if (io_parms.length > SMB2_MAX_BUFFER_SIZE)
+ io_parms.length = SMB2_MAX_BUFFER_SIZE;
+ /* iov[0] is reserved for smb header */
+ iov[1].iov_base = buf;
+ iov[1].iov_len = io_parms.length;
+ rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1);
+ if (rc)
+ break;
+ if (nbytes > len)
+ return -EINVAL;
+ buf += nbytes;
+ off += nbytes;
+ len -= nbytes;
+ }
+ return rc;
}
static int smb3_simple_fallocate_range(unsigned int xid,
@@ -3653,11 +3668,6 @@ static int smb3_simple_fallocate_range(unsigned int xid,
(char **)&out_data, &out_data_len);
if (rc)
goto out;
- /*
- * It is already all allocated
- */
- if (out_data_len == 0)
- goto out;
buf = kzalloc(1024 * 1024, GFP_KERNEL);
if (buf == NULL) {
@@ -3780,6 +3790,24 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
goto out;
}
+ if (keep_size == true) {
+ /*
+ * We can not preallocate pages beyond the end of the file
+ * in SMB2
+ */
+ if (off >= i_size_read(inode)) {
+ rc = 0;
+ goto out;
+ }
+ /*
+ * For fallocates that are partially beyond the end of file,
+ * clamp len so we only fallocate up to the end of file.
+ */
+ if (off + len > i_size_read(inode)) {
+ len = i_size_read(inode) - off;
+ }
+ }
+
if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
/*
* At this point, we are trying to fallocate an internal
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 962826dc3316..b6d2e3591927 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -433,6 +433,29 @@ build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt)
pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1;
}
+static unsigned int
+build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt)
+{
+ unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities);
+ unsigned short num_algs = 1; /* number of signing algorithms sent */
+
+ pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
+ /*
+ * Context Data length must be rounded to multiple of 8 for some servers
+ */
+ pneg_ctxt->DataLength = cpu_to_le16(DIV_ROUND_UP(
+ sizeof(struct smb2_signing_capabilities) -
+ sizeof(struct smb2_neg_context) +
+ (num_algs * 2 /* sizeof u16 */), 8) * 8);
+ pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs);
+ pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC);
+
+ ctxt_len += 2 /* sizeof le16 */ * num_algs;
+ ctxt_len = DIV_ROUND_UP(ctxt_len, 8) * 8;
+ return ctxt_len;
+ /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */
+}
+
static void
build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
{
@@ -498,7 +521,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
struct TCP_Server_Info *server, unsigned int *total_len)
{
char *pneg_ctxt;
- unsigned int ctxt_len;
+ unsigned int ctxt_len, neg_context_count;
if (*total_len > 200) {
/* In case length corrupted don't want to overrun smb buffer */
@@ -525,6 +548,17 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
*total_len += ctxt_len;
pneg_ctxt += ctxt_len;
+ ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
+ server->hostname);
+ *total_len += ctxt_len;
+ pneg_ctxt += ctxt_len;
+
+ build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
+ *total_len += sizeof(struct smb2_posix_neg_context);
+ pneg_ctxt += sizeof(struct smb2_posix_neg_context);
+
+ neg_context_count = 4;
+
if (server->compress_algorithm) {
build_compression_ctxt((struct smb2_compression_capabilities_context *)
pneg_ctxt);
@@ -533,17 +567,20 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
8) * 8;
*total_len += ctxt_len;
pneg_ctxt += ctxt_len;
- req->NegotiateContextCount = cpu_to_le16(5);
- } else
- req->NegotiateContextCount = cpu_to_le16(4);
+ neg_context_count++;
+ }
- ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
- server->hostname);
- *total_len += ctxt_len;
- pneg_ctxt += ctxt_len;
+ if (enable_negotiate_signing) {
+ ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *)
+ pneg_ctxt);
+ *total_len += ctxt_len;
+ pneg_ctxt += ctxt_len;
+ neg_context_count++;
+ }
+
+ /* check for and add transport_capabilities and signing capabilities */
+ req->NegotiateContextCount = cpu_to_le16(neg_context_count);
- build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
- *total_len += sizeof(struct smb2_posix_neg_context);
}
static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
@@ -632,6 +669,31 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server,
return 0;
}
+static void decode_signing_ctx(struct TCP_Server_Info *server,
+ struct smb2_signing_capabilities *pctxt)
+{
+ unsigned int len = le16_to_cpu(pctxt->DataLength);
+
+ if ((len < 4) || (len > 16)) {
+ pr_warn_once("server sent bad signing negcontext\n");
+ return;
+ }
+ if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) {
+ pr_warn_once("Invalid signing algorithm count\n");
+ return;
+ }
+ if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) {
+ pr_warn_once("unknown signing algorithm\n");
+ return;
+ }
+
+ server->signing_negotiated = true;
+ server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]);
+ cifs_dbg(FYI, "signing algorithm %d chosen\n",
+ server->signing_algorithm);
+}
+
+
static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
struct TCP_Server_Info *server,
unsigned int len_of_smb)
@@ -675,6 +737,9 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
(struct smb2_compression_capabilities_context *)pctx);
else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
server->posix_ext_supported = true;
+ else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES)
+ decode_signing_ctx(server,
+ (struct smb2_signing_capabilities *)pctx);
else
cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
le16_to_cpu(pctx->ContextType));
@@ -2361,7 +2426,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
memcpy(aclptr, &acl, sizeof(struct cifs_acl));
buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
- *len = ptr - (__u8 *)buf;
+ *len = roundup(ptr - (__u8 *)buf, 8);
return buf;
}
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index a5c48b85549a..e9cac7970b66 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -13,7 +13,7 @@
#define _SMB2PDU_H
#include <net/sock.h>
-#include <cifsacl.h>
+#include "cifsacl.h"
/*
* Note that, due to trying to use names similar to the protocol specifications,
@@ -329,7 +329,7 @@ struct smb2_neg_context {
__le16 ContextType;
__le16 DataLength;
__le32 Reserved;
- /* Followed by array of data */
+ /* Followed by array of data. NOTE: some servers require padding to 8 byte boundary */
} __packed;
#define SMB311_LINUX_CLIENT_SALT_SIZE 32
@@ -394,6 +394,8 @@ struct smb2_compression_capabilities_context {
__u16 Padding;
__u32 Flags;
__le16 CompressionAlgorithms[3];
+ __u16 Pad; /* Some servers require pad to DataLen multiple of 8 */
+ /* Check if pad needed */
} __packed;
/*
@@ -420,6 +422,7 @@ struct smb2_transport_capabilities_context {
__le16 DataLength;
__u32 Reserved;
__le32 Flags;
+ __u32 Pad;
} __packed;
/*
@@ -458,6 +461,7 @@ struct smb2_signing_capabilities {
__u32 Reserved;
__le16 SigningAlgorithmCount;
__le16 SigningAlgorithms[];
+ /* Followed by padding to 8 byte boundary (required by some servers) */
} __packed;
#define POSIX_CTXT_DATA_LEN 16
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index f65f9a692ca2..75a95de320cf 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -431,7 +431,9 @@ unmask:
* be taken as the remainder of this one. We need to kill the
* socket so the server throws away the partial SMB
*/
+ spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsNeedReconnect;
+ spin_unlock(&GlobalMid_Lock);
trace_smb3_partial_send_reconnect(server->CurrentMid,
server->conn_id, server->hostname);
}
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index e26060dae70a..0ad32150611e 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -14,7 +14,7 @@
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
-
+#include <linux/uio.h>
#include <linux/configfs.h>
#include "configfs_internal.h"
@@ -77,28 +77,9 @@ static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer)
return 0;
}
-/**
- * configfs_read_file - read an attribute.
- * @file: file pointer.
- * @buf: buffer to fill.
- * @count: number of bytes to read.
- * @ppos: starting offset in file.
- *
- * Userspace wants to read an attribute file. The attribute descriptor
- * is in the file's ->d_fsdata. The target item is in the directory's
- * ->d_fsdata.
- *
- * We call fill_read_buffer() to allocate and fill the buffer from the
- * item's show() method exactly once (if the read is happening from
- * the beginning of the file). That should fill the entire buffer with
- * all the data the item has to offer for that attribute.
- * We then call flush_read_buffer() to copy the buffer to userspace
- * in the increments specified.
- */
-
-static ssize_t
-configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+static ssize_t configfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
+ struct file *file = iocb->ki_filp;
struct configfs_buffer *buffer = file->private_data;
ssize_t retval = 0;
@@ -108,43 +89,27 @@ configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *pp
if (retval)
goto out;
}
- pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
- __func__, count, *ppos, buffer->page);
- retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
- buffer->count);
+ pr_debug("%s: count = %zd, pos = %lld, buf = %s\n",
+ __func__, iov_iter_count(to), iocb->ki_pos, buffer->page);
+ if (iocb->ki_pos >= buffer->count)
+ goto out;
+ retval = copy_to_iter(buffer->page + iocb->ki_pos,
+ buffer->count - iocb->ki_pos, to);
+ iocb->ki_pos += retval;
+ if (retval == 0)
+ retval = -EFAULT;
out:
mutex_unlock(&buffer->mutex);
return retval;
}
-/**
- * configfs_read_bin_file - read a binary attribute.
- * @file: file pointer.
- * @buf: buffer to fill.
- * @count: number of bytes to read.
- * @ppos: starting offset in file.
- *
- * Userspace wants to read a binary attribute file. The attribute
- * descriptor is in the file's ->d_fsdata. The target item is in the
- * directory's ->d_fsdata.
- *
- * We check whether we need to refill the buffer. If so we will
- * call the attributes' attr->read() twice. The first time we
- * will pass a NULL as a buffer pointer, which the attributes' method
- * will use to return the size of the buffer required. If no error
- * occurs we will allocate the buffer using vmalloc and call
- * attr->read() again passing that buffer as an argument.
- * Then we just copy to user-space using simple_read_from_buffer.
- */
-
-static ssize_t
-configfs_read_bin_file(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t configfs_bin_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
+ struct file *file = iocb->ki_filp;
struct configfs_fragment *frag = to_frag(file);
struct configfs_buffer *buffer = file->private_data;
ssize_t retval = 0;
- ssize_t len = min_t(size_t, count, PAGE_SIZE);
+ ssize_t len;
mutex_lock(&buffer->mutex);
@@ -200,42 +165,35 @@ configfs_read_bin_file(struct file *file, char __user *buf,
buffer->needs_read_fill = 0;
}
- retval = simple_read_from_buffer(buf, count, ppos, buffer->bin_buffer,
- buffer->bin_buffer_size);
+ if (iocb->ki_pos >= buffer->bin_buffer_size)
+ goto out;
+ retval = copy_to_iter(buffer->bin_buffer + iocb->ki_pos,
+ buffer->bin_buffer_size - iocb->ki_pos, to);
+ iocb->ki_pos += retval;
+ if (retval == 0)
+ retval = -EFAULT;
out:
mutex_unlock(&buffer->mutex);
return retval;
}
-
-/**
- * fill_write_buffer - copy buffer from userspace.
- * @buffer: data buffer for file.
- * @buf: data from user.
- * @count: number of bytes in @userbuf.
- *
- * Allocate @buffer->page if it hasn't been already, then
- * copy the user-supplied buffer into it.
- */
-
-static int
-fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count)
+/* Fill @buffer with data coming from @from. */
+static int fill_write_buffer(struct configfs_buffer *buffer,
+ struct iov_iter *from)
{
- int error;
+ int copied;
if (!buffer->page)
buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0);
if (!buffer->page)
return -ENOMEM;
- if (count >= SIMPLE_ATTR_SIZE)
- count = SIMPLE_ATTR_SIZE - 1;
- error = copy_from_user(buffer->page,buf,count);
+ copied = copy_from_iter(buffer->page, SIMPLE_ATTR_SIZE - 1, from);
buffer->needs_read_fill = 1;
/* if buf is assumed to contain a string, terminate it by \0,
* so e.g. sscanf() can scan the string easily */
- buffer->page[count] = 0;
- return error ? -EFAULT : count;
+ buffer->page[copied] = 0;
+ return copied ? : -EFAULT;
}
static int
@@ -252,58 +210,36 @@ flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t cou
}
-/**
- * configfs_write_file - write an attribute.
- * @file: file pointer
- * @buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- *
- * Similar to configfs_read_file(), though working in the opposite direction.
- * We allocate and fill the data from the user in fill_write_buffer(),
- * then push it to the config_item in flush_write_buffer().
- * There is no easy way for us to know if userspace is only doing a partial
- * write, so we don't support them. We expect the entire buffer to come
- * on the first write.
- * Hint: if you're writing a value, first read the file, modify only
- * the value you're changing, then write entire buffer back.
+/*
+ * There is no easy way for us to know if userspace is only doing a partial
+ * write, so we don't support them. We expect the entire buffer to come on the
+ * first write.
+ * Hint: if you're writing a value, first read the file, modify only the value
+ * you're changing, then write entire buffer back.
*/
-
-static ssize_t
-configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+static ssize_t configfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
+ struct file *file = iocb->ki_filp;
struct configfs_buffer *buffer = file->private_data;
- ssize_t len;
+ int len;
mutex_lock(&buffer->mutex);
- len = fill_write_buffer(buffer, buf, count);
+ len = fill_write_buffer(buffer, from);
if (len > 0)
len = flush_write_buffer(file, buffer, len);
if (len > 0)
- *ppos += len;
+ iocb->ki_pos += len;
mutex_unlock(&buffer->mutex);
return len;
}
-/**
- * configfs_write_bin_file - write a binary attribute.
- * @file: file pointer
- * @buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- *
- * Writing to a binary attribute file is similar to a normal read.
- * We buffer the consecutive writes (binary attribute files do not
- * support lseek) in a continuously growing buffer, but we don't
- * commit until the close of the file.
- */
-
-static ssize_t
-configfs_write_bin_file(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t configfs_bin_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
{
+ struct file *file = iocb->ki_filp;
struct configfs_buffer *buffer = file->private_data;
void *tbuf = NULL;
+ size_t end_offset;
ssize_t len;
mutex_lock(&buffer->mutex);
@@ -316,15 +252,14 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
buffer->write_in_progress = true;
/* buffer grows? */
- if (*ppos + count > buffer->bin_buffer_size) {
-
- if (buffer->cb_max_size &&
- *ppos + count > buffer->cb_max_size) {
+ end_offset = iocb->ki_pos + iov_iter_count(from);
+ if (end_offset > buffer->bin_buffer_size) {
+ if (buffer->cb_max_size && end_offset > buffer->cb_max_size) {
len = -EFBIG;
goto out;
}
- tbuf = vmalloc(*ppos + count);
+ tbuf = vmalloc(end_offset);
if (tbuf == NULL) {
len = -ENOMEM;
goto out;
@@ -339,16 +274,17 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
/* clear the new area */
memset(tbuf + buffer->bin_buffer_size, 0,
- *ppos + count - buffer->bin_buffer_size);
+ end_offset - buffer->bin_buffer_size);
buffer->bin_buffer = tbuf;
- buffer->bin_buffer_size = *ppos + count;
+ buffer->bin_buffer_size = end_offset;
}
- len = simple_write_to_buffer(buffer->bin_buffer,
- buffer->bin_buffer_size, ppos, buf, count);
+ len = copy_from_iter(buffer->bin_buffer + iocb->ki_pos,
+ buffer->bin_buffer_size - iocb->ki_pos, from);
+ iocb->ki_pos += len;
out:
mutex_unlock(&buffer->mutex);
- return len;
+ return len ? : -EFAULT;
}
static int __configfs_open_file(struct inode *inode, struct file *file, int type)
@@ -466,11 +402,8 @@ static int configfs_release_bin_file(struct inode *inode, struct file *file)
{
struct configfs_buffer *buffer = file->private_data;
- buffer->read_in_progress = false;
-
if (buffer->write_in_progress) {
struct configfs_fragment *frag = to_frag(file);
- buffer->write_in_progress = false;
down_read(&frag->frag_sem);
if (!frag->frag_dead) {
@@ -480,29 +413,26 @@ static int configfs_release_bin_file(struct inode *inode, struct file *file)
buffer->bin_buffer_size);
}
up_read(&frag->frag_sem);
- /* vfree on NULL is safe */
- vfree(buffer->bin_buffer);
- buffer->bin_buffer = NULL;
- buffer->bin_buffer_size = 0;
- buffer->needs_read_fill = 1;
}
+ vfree(buffer->bin_buffer);
+
configfs_release(inode, file);
return 0;
}
const struct file_operations configfs_file_operations = {
- .read = configfs_read_file,
- .write = configfs_write_file,
+ .read_iter = configfs_read_iter,
+ .write_iter = configfs_write_iter,
.llseek = generic_file_llseek,
.open = configfs_open_file,
.release = configfs_release,
};
const struct file_operations configfs_bin_file_operations = {
- .read = configfs_read_bin_file,
- .write = configfs_write_bin_file,
+ .read_iter = configfs_bin_read_iter,
+ .write_iter = configfs_bin_write_iter,
.llseek = NULL, /* bin file is not seekable */
.open = configfs_open_bin_file,
.release = configfs_release_bin_file,
@@ -532,7 +462,7 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib
/**
* configfs_create_bin_file - create a binary attribute file for an item.
* @item: item we're creating for.
- * @attr: atrribute descriptor.
+ * @bin_attr: atrribute descriptor.
*/
int configfs_create_bin_file(struct config_item *item,
diff --git a/fs/coredump.c b/fs/coredump.c
index c3d8fc14b993..07afb5ddb1c4 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -755,8 +755,8 @@ void do_coredump(const kernel_siginfo_t *siginfo)
task_lock(&init_task);
get_fs_root(init_task.fs, &root);
task_unlock(&init_task);
- cprm.file = file_open_root(root.dentry, root.mnt,
- cn.corename, open_flags, 0600);
+ cprm.file = file_open_root(&root, cn.corename,
+ open_flags, 0600);
path_put(&root);
} else {
cprm.file = filp_open(cn.corename, open_flags, 0600);
diff --git a/fs/d_path.c b/fs/d_path.c
index 270d62133996..23a53f7b5c71 100644
--- a/fs/d_path.c
+++ b/fs/d_path.c
@@ -8,14 +8,27 @@
#include <linux/prefetch.h>
#include "mount.h"
-static int prepend(char **buffer, int *buflen, const char *str, int namelen)
+struct prepend_buffer {
+ char *buf;
+ int len;
+};
+#define DECLARE_BUFFER(__name, __buf, __len) \
+ struct prepend_buffer __name = {.buf = __buf + __len, .len = __len}
+
+static char *extract_string(struct prepend_buffer *p)
{
- *buflen -= namelen;
- if (*buflen < 0)
- return -ENAMETOOLONG;
- *buffer -= namelen;
- memcpy(*buffer, str, namelen);
- return 0;
+ if (likely(p->len >= 0))
+ return p->buf;
+ return ERR_PTR(-ENAMETOOLONG);
+}
+
+static void prepend(struct prepend_buffer *p, const char *str, int namelen)
+{
+ p->len -= namelen;
+ if (likely(p->len >= 0)) {
+ p->buf -= namelen;
+ memcpy(p->buf, str, namelen);
+ }
}
/**
@@ -35,22 +48,58 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
*
* Load acquire is needed to make sure that we see that terminating NUL.
*/
-static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
+static bool prepend_name(struct prepend_buffer *p, const struct qstr *name)
{
const char *dname = smp_load_acquire(&name->name); /* ^^^ */
u32 dlen = READ_ONCE(name->len);
- char *p;
+ char *s;
- *buflen -= dlen + 1;
- if (*buflen < 0)
- return -ENAMETOOLONG;
- p = *buffer -= dlen + 1;
- *p++ = '/';
+ p->len -= dlen + 1;
+ if (unlikely(p->len < 0))
+ return false;
+ s = p->buf -= dlen + 1;
+ *s++ = '/';
while (dlen--) {
char c = *dname++;
if (!c)
break;
- *p++ = c;
+ *s++ = c;
+ }
+ return true;
+}
+
+static int __prepend_path(const struct dentry *dentry, const struct mount *mnt,
+ const struct path *root, struct prepend_buffer *p)
+{
+ while (dentry != root->dentry || &mnt->mnt != root->mnt) {
+ const struct dentry *parent = READ_ONCE(dentry->d_parent);
+
+ if (dentry == mnt->mnt.mnt_root) {
+ struct mount *m = READ_ONCE(mnt->mnt_parent);
+ struct mnt_namespace *mnt_ns;
+
+ if (likely(mnt != m)) {
+ dentry = READ_ONCE(mnt->mnt_mountpoint);
+ mnt = m;
+ continue;
+ }
+ /* Global root */
+ mnt_ns = READ_ONCE(mnt->mnt_ns);
+ /* open-coded is_mounted() to use local mnt_ns */
+ if (!IS_ERR_OR_NULL(mnt_ns) && !is_anon_ns(mnt_ns))
+ return 1; // absolute root
+ else
+ return 2; // detached or not attached yet
+ }
+
+ if (unlikely(dentry == parent))
+ /* Escaped? */
+ return 3;
+
+ prefetch(parent);
+ if (!prepend_name(p, &dentry->d_name))
+ break;
+ dentry = parent;
}
return 0;
}
@@ -74,15 +123,11 @@ static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
*/
static int prepend_path(const struct path *path,
const struct path *root,
- char **buffer, int *buflen)
+ struct prepend_buffer *p)
{
- struct dentry *dentry;
- struct vfsmount *vfsmnt;
- struct mount *mnt;
- int error = 0;
unsigned seq, m_seq = 0;
- char *bptr;
- int blen;
+ struct prepend_buffer b;
+ int error;
rcu_read_lock();
restart_mnt:
@@ -90,50 +135,9 @@ restart_mnt:
seq = 0;
rcu_read_lock();
restart:
- bptr = *buffer;
- blen = *buflen;
- error = 0;
- dentry = path->dentry;
- vfsmnt = path->mnt;
- mnt = real_mount(vfsmnt);
+ b = *p;
read_seqbegin_or_lock(&rename_lock, &seq);
- while (dentry != root->dentry || vfsmnt != root->mnt) {
- struct dentry * parent;
-
- if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
- struct mount *parent = READ_ONCE(mnt->mnt_parent);
- struct mnt_namespace *mnt_ns;
-
- /* Escaped? */
- if (dentry != vfsmnt->mnt_root) {
- bptr = *buffer;
- blen = *buflen;
- error = 3;
- break;
- }
- /* Global root? */
- if (mnt != parent) {
- dentry = READ_ONCE(mnt->mnt_mountpoint);
- mnt = parent;
- vfsmnt = &mnt->mnt;
- continue;
- }
- mnt_ns = READ_ONCE(mnt->mnt_ns);
- /* open-coded is_mounted() to use local mnt_ns */
- if (!IS_ERR_OR_NULL(mnt_ns) && !is_anon_ns(mnt_ns))
- error = 1; // absolute root
- else
- error = 2; // detached or not attached yet
- break;
- }
- parent = dentry->d_parent;
- prefetch(parent);
- error = prepend_name(&bptr, &blen, &dentry->d_name);
- if (error)
- break;
-
- dentry = parent;
- }
+ error = __prepend_path(path->dentry, real_mount(path->mnt), root, &b);
if (!(seq & 1))
rcu_read_unlock();
if (need_seqretry(&rename_lock, seq)) {
@@ -150,14 +154,13 @@ restart:
}
done_seqretry(&mount_lock, m_seq);
- if (error >= 0 && bptr == *buffer) {
- if (--blen < 0)
- error = -ENAMETOOLONG;
- else
- *--bptr = '/';
- }
- *buffer = bptr;
- *buflen = blen;
+ if (unlikely(error == 3))
+ b = *p;
+
+ if (b.len == p->len)
+ prepend(&b, "/", 1);
+
+ *p = b;
return error;
}
@@ -181,56 +184,24 @@ char *__d_path(const struct path *path,
const struct path *root,
char *buf, int buflen)
{
- char *res = buf + buflen;
- int error;
-
- prepend(&res, &buflen, "\0", 1);
- error = prepend_path(path, root, &res, &buflen);
+ DECLARE_BUFFER(b, buf, buflen);
- if (error < 0)
- return ERR_PTR(error);
- if (error > 0)
+ prepend(&b, "", 1);
+ if (unlikely(prepend_path(path, root, &b) > 0))
return NULL;
- return res;
+ return extract_string(&b);
}
char *d_absolute_path(const struct path *path,
char *buf, int buflen)
{
struct path root = {};
- char *res = buf + buflen;
- int error;
-
- prepend(&res, &buflen, "\0", 1);
- error = prepend_path(path, &root, &res, &buflen);
-
- if (error > 1)
- error = -EINVAL;
- if (error < 0)
- return ERR_PTR(error);
- return res;
-}
-
-/*
- * same as __d_path but appends "(deleted)" for unlinked files.
- */
-static int path_with_deleted(const struct path *path,
- const struct path *root,
- char **buf, int *buflen)
-{
- prepend(buf, buflen, "\0", 1);
- if (d_unlinked(path->dentry)) {
- int error = prepend(buf, buflen, " (deleted)", 10);
- if (error)
- return error;
- }
-
- return prepend_path(path, root, buf, buflen);
-}
+ DECLARE_BUFFER(b, buf, buflen);
-static int prepend_unreachable(char **buffer, int *buflen)
-{
- return prepend(buffer, buflen, "(unreachable)", 13);
+ prepend(&b, "", 1);
+ if (unlikely(prepend_path(path, &root, &b) > 1))
+ return ERR_PTR(-EINVAL);
+ return extract_string(&b);
}
static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
@@ -261,9 +232,8 @@ static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
*/
char *d_path(const struct path *path, char *buf, int buflen)
{
- char *res = buf + buflen;
+ DECLARE_BUFFER(b, buf, buflen);
struct path root;
- int error;
/*
* We have various synthetic filesystems that never get mounted. On
@@ -282,12 +252,14 @@ char *d_path(const struct path *path, char *buf, int buflen)
rcu_read_lock();
get_fs_root_rcu(current->fs, &root);
- error = path_with_deleted(path, &root, &res, &buflen);
+ if (unlikely(d_unlinked(path->dentry)))
+ prepend(&b, " (deleted)", 11);
+ else
+ prepend(&b, "", 1);
+ prepend_path(path, &root, &b);
rcu_read_unlock();
- if (error < 0)
- res = ERR_PTR(error);
- return res;
+ return extract_string(&b);
}
EXPORT_SYMBOL(d_path);
@@ -314,47 +286,34 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
{
- char *end = buffer + buflen;
+ DECLARE_BUFFER(b, buffer, buflen);
/* these dentries are never renamed, so d_lock is not needed */
- if (prepend(&end, &buflen, " (deleted)", 11) ||
- prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
- prepend(&end, &buflen, "/", 1))
- end = ERR_PTR(-ENAMETOOLONG);
- return end;
+ prepend(&b, " (deleted)", 11);
+ prepend(&b, dentry->d_name.name, dentry->d_name.len);
+ prepend(&b, "/", 1);
+ return extract_string(&b);
}
/*
* Write full pathname from the root of the filesystem into the buffer.
*/
-static char *__dentry_path(const struct dentry *d, char *buf, int buflen)
+static char *__dentry_path(const struct dentry *d, struct prepend_buffer *p)
{
const struct dentry *dentry;
- char *end, *retval;
- int len, seq = 0;
- int error = 0;
-
- if (buflen < 2)
- goto Elong;
+ struct prepend_buffer b;
+ int seq = 0;
rcu_read_lock();
restart:
dentry = d;
- end = buf + buflen;
- len = buflen;
- prepend(&end, &len, "\0", 1);
- /* Get '/' right */
- retval = end-1;
- *retval = '/';
+ b = *p;
read_seqbegin_or_lock(&rename_lock, &seq);
while (!IS_ROOT(dentry)) {
const struct dentry *parent = dentry->d_parent;
prefetch(parent);
- error = prepend_name(&end, &len, &dentry->d_name);
- if (error)
+ if (!prepend_name(&b, &dentry->d_name))
break;
-
- retval = end;
dentry = parent;
}
if (!(seq & 1))
@@ -364,36 +323,29 @@ restart:
goto restart;
}
done_seqretry(&rename_lock, seq);
- if (error)
- goto Elong;
- return retval;
-Elong:
- return ERR_PTR(-ENAMETOOLONG);
+ if (b.len == p->len)
+ prepend(&b, "/", 1);
+ return extract_string(&b);
}
char *dentry_path_raw(const struct dentry *dentry, char *buf, int buflen)
{
- return __dentry_path(dentry, buf, buflen);
+ DECLARE_BUFFER(b, buf, buflen);
+
+ prepend(&b, "", 1);
+ return __dentry_path(dentry, &b);
}
EXPORT_SYMBOL(dentry_path_raw);
char *dentry_path(const struct dentry *dentry, char *buf, int buflen)
{
- char *p = NULL;
- char *retval;
-
- if (d_unlinked(dentry)) {
- p = buf + buflen;
- if (prepend(&p, &buflen, "//deleted", 10) != 0)
- goto Elong;
- buflen++;
- }
- retval = __dentry_path(dentry, buf, buflen);
- if (!IS_ERR(retval) && p)
- *p = '/'; /* restore '/' overriden with '\0' */
- return retval;
-Elong:
- return ERR_PTR(-ENAMETOOLONG);
+ DECLARE_BUFFER(b, buf, buflen);
+
+ if (unlikely(d_unlinked(dentry)))
+ prepend(&b, "//deleted", 10);
+ else
+ prepend(&b, "", 1);
+ return __dentry_path(dentry, &b);
}
static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
@@ -438,38 +390,28 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
rcu_read_lock();
get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
- error = -ENOENT;
- if (!d_unlinked(pwd.dentry)) {
- unsigned long len;
- char *cwd = page + PATH_MAX;
- int buflen = PATH_MAX;
-
- prepend(&cwd, &buflen, "\0", 1);
- error = prepend_path(&pwd, &root, &cwd, &buflen);
+ if (unlikely(d_unlinked(pwd.dentry))) {
rcu_read_unlock();
+ error = -ENOENT;
+ } else {
+ unsigned len;
+ DECLARE_BUFFER(b, page, PATH_MAX);
- if (error < 0)
- goto out;
-
- /* Unreachable from current root */
- if (error > 0) {
- error = prepend_unreachable(&cwd, &buflen);
- if (error)
- goto out;
- }
+ prepend(&b, "", 1);
+ if (unlikely(prepend_path(&pwd, &root, &b) > 0))
+ prepend(&b, "(unreachable)", 13);
+ rcu_read_unlock();
- error = -ERANGE;
- len = PATH_MAX + page - cwd;
- if (len <= size) {
+ len = PATH_MAX - b.len;
+ if (unlikely(len > PATH_MAX))
+ error = -ENAMETOOLONG;
+ else if (unlikely(len > size))
+ error = -ERANGE;
+ else if (copy_to_user(buf, b.buf, len))
+ error = -EFAULT;
+ else
error = len;
- if (copy_to_user(buf, cwd, len))
- error = -EFAULT;
- }
- } else {
- rcu_read_unlock();
}
-
-out:
__putname(page);
return error;
}
diff --git a/fs/dax.c b/fs/dax.c
index da41f9363568..99b4e78d888f 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -722,7 +722,7 @@ static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_d
return rc;
id = dax_read_lock();
- rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL);
+ rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
if (rc < 0) {
dax_read_unlock(id);
return rc;
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index ba7c01cd9a5d..df00231d3ecc 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -582,22 +582,12 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ulong_wo, NULL, debugfs_ulong_set, "%llu\n");
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
- * be returned.
*/
-struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value)
+void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent,
+ unsigned long *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value,
- &fops_ulong, &fops_ulong_ro,
- &fops_ulong_wo);
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_ulong,
+ &fops_ulong_ro, &fops_ulong_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_ulong);
@@ -846,20 +836,11 @@ static const struct file_operations fops_bool_wo = {
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
- * be returned.
*/
-struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent, bool *value)
+void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent,
+ bool *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_bool,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_bool,
&fops_bool_ro, &fops_bool_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_bool);
@@ -980,7 +961,8 @@ static const struct file_operations fops_blob = {
/**
* debugfs_create_blob - create a debugfs file that is used to read a binary blob
* @name: a pointer to a string containing the name of the file to create.
- * @mode: the permission that the file should have
+ * @mode: the read permission that the file should have (other permissions are
+ * masked out)
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
@@ -1004,7 +986,7 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob)
{
- return debugfs_create_file_unsafe(name, mode, parent, blob, &fops_blob);
+ return debugfs_create_file_unsafe(name, mode & 0444, parent, blob, &fops_blob);
}
EXPORT_SYMBOL_GPL(debugfs_create_blob);
diff --git a/fs/exec.c b/fs/exec.c
index f2bcdbeb3afb..38f63451b928 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -84,9 +84,6 @@ static DEFINE_RWLOCK(binfmt_lock);
void __register_binfmt(struct linux_binfmt * fmt, int insert)
{
- BUG_ON(!fmt);
- if (WARN_ON(!fmt->load_binary))
- return;
write_lock(&binfmt_lock);
insert ? list_add(&fmt->lh, &formats) :
list_add_tail(&fmt->lh, &formats);
diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
index c4523648472a..cb1c0d8c1714 100644
--- a/fs/exfat/dir.c
+++ b/fs/exfat/dir.c
@@ -63,7 +63,7 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_entry *dir_entry)
{
int i, dentries_per_clu, dentries_per_clu_bits = 0, num_ext;
- unsigned int type, clu_offset;
+ unsigned int type, clu_offset, max_dentries;
sector_t sector;
struct exfat_chain dir, clu;
struct exfat_uni_name uni_name;
@@ -86,6 +86,8 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
dentries_per_clu = sbi->dentries_per_clu;
dentries_per_clu_bits = ilog2(dentries_per_clu);
+ max_dentries = (unsigned int)min_t(u64, MAX_EXFAT_DENTRIES,
+ (u64)sbi->num_clusters << dentries_per_clu_bits);
clu_offset = dentry >> dentries_per_clu_bits;
exfat_chain_dup(&clu, &dir);
@@ -109,7 +111,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
}
}
- while (clu.dir != EXFAT_EOF_CLUSTER) {
+ while (clu.dir != EXFAT_EOF_CLUSTER && dentry < max_dentries) {
i = dentry & (dentries_per_clu - 1);
for ( ; i < dentries_per_clu; i++, dentry++) {
@@ -245,7 +247,7 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx)
if (err)
goto unlock;
get_new:
- if (cpos >= i_size_read(inode))
+ if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode))
goto end_of_dir;
err = exfat_readdir(inode, &cpos, &de);
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index d38d17a77e76..5539ffc20d16 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -690,7 +690,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
if (!sb->s_root) {
exfat_err(sb, "failed to get the root dentry");
err = -ENOMEM;
- goto put_inode;
+ goto free_table;
}
return 0;
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 14292dba3a12..2c2f179b6977 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -106,12 +106,11 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
return err;
}
-static bool ext2_check_page(struct page *page, int quiet)
+static bool ext2_check_page(struct page *page, int quiet, char *kaddr)
{
struct inode *dir = page->mapping->host;
struct super_block *sb = dir->i_sb;
unsigned chunk_size = ext2_chunk_size(dir);
- char *kaddr = page_address(page);
u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
unsigned offs, rec_len;
unsigned limit = PAGE_SIZE;
@@ -205,7 +204,8 @@ static struct page * ext2_get_page(struct inode *dir, unsigned long n,
if (!IS_ERR(page)) {
*page_addr = kmap_local_page(page);
if (unlikely(!PageChecked(page))) {
- if (PageError(page) || !ext2_check_page(page, quiet))
+ if (PageError(page) || !ext2_check_page(page, quiet,
+ *page_addr))
goto fail;
}
}
@@ -584,10 +584,10 @@ out_unlock:
* ext2_delete_entry deletes a directory entry by merging it with the
* previous entry. Page is up-to-date.
*/
-int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
+int ext2_delete_entry (struct ext2_dir_entry_2 *dir, struct page *page,
+ char *kaddr)
{
struct inode *inode = page->mapping->host;
- char *kaddr = page_address(page);
unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
unsigned to = ((char *)dir - kaddr) +
ext2_rec_len_from_disk(dir->rec_len);
@@ -607,7 +607,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
de = ext2_next_entry(de);
}
if (pde)
- from = (char*)pde - (char*)page_address(page);
+ from = (char *)pde - kaddr;
pos = page_offset(page) + from;
lock_page(page);
err = ext2_prepare_chunk(page, pos, to - from);
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index b0a694820cb7..e512630cb63e 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -740,7 +740,8 @@ extern int ext2_inode_by_name(struct inode *dir,
extern int ext2_make_empty(struct inode *, struct inode *);
extern struct ext2_dir_entry_2 *ext2_find_entry(struct inode *, const struct qstr *,
struct page **, void **res_page_addr);
-extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *);
+extern int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page,
+ char *kaddr);
extern int ext2_empty_dir (struct inode *);
extern struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p, void **pa);
extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, void *,
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 1f69b81655b6..5f6b7560eb3f 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -293,7 +293,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry)
goto out;
}
- err = ext2_delete_entry (de, page);
+ err = ext2_delete_entry (de, page, page_addr);
ext2_put_page(page, page_addr);
if (err)
goto out;
@@ -397,7 +397,7 @@ static int ext2_rename (struct user_namespace * mnt_userns,
old_inode->i_ctime = current_time(old_inode);
mark_inode_dirty(old_inode);
- ext2_delete_entry(old_de, old_page);
+ ext2_delete_entry(old_de, old_page, old_page_addr);
if (dir_de) {
if (old_dir != new_dir)
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index be799040a415..b60f0152ea57 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -244,9 +244,6 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
* "bh" may be NULL: a metadata block may have been freed from memory
* but there may still be a record of it in the journal, and that record
* still needs to be revoked.
- *
- * If the handle isn't valid we're not journaling, but we still need to
- * call into ext4_journal_revoke() to put the buffer head.
*/
int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
int is_metadata, struct inode *inode,
@@ -327,6 +324,7 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
set_buffer_meta(bh);
set_buffer_prio(bh);
+ set_buffer_uptodate(bh);
if (ext4_handle_valid(handle)) {
err = jbd2_journal_dirty_metadata(handle, bh);
/* Errors can only happen due to aborted journal or a nasty bug */
@@ -355,7 +353,6 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
err);
}
} else {
- set_buffer_uptodate(bh);
if (inode)
mark_buffer_dirty_inode(bh, inode);
else
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index e27f34bceb8d..6eed6170aded 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -692,6 +692,13 @@ static long ext4_ioctl_group_add(struct file *file,
if (err)
return err;
+ if (ext4_has_feature_bigalloc(sb)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online resizing not supported with bigalloc");
+ err = -EOPNOTSUPP;
+ goto group_add_out;
+ }
+
err = mnt_want_write_file(file);
if (err)
goto group_add_out;
@@ -816,7 +823,7 @@ static int ext4_ioctl_checkpoint(struct file *filp, unsigned long arg)
if (!EXT4_SB(sb)->s_journal)
return -ENODEV;
- if (flags & ~JBD2_JOURNAL_FLUSH_VALID)
+ if (flags & ~EXT4_IOC_CHECKPOINT_FLAG_VALID)
return -EINVAL;
q = bdev_get_queue(EXT4_SB(sb)->s_journal->j_dev);
@@ -914,6 +921,13 @@ setversion_out:
goto group_extend_out;
}
+ if (ext4_has_feature_bigalloc(sb)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online resizing not supported with bigalloc");
+ err = -EOPNOTSUPP;
+ goto group_extend_out;
+ }
+
err = mnt_want_write_file(filp);
if (err)
goto group_extend_out;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c2c22c2baac0..089c958aa2c3 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1909,10 +1909,11 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block,
if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
/* Should never happen! (but apparently sometimes does?!?) */
WARN_ON(1);
- ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "
- "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
- block, order, needed, ex->fe_group, ex->fe_start,
- ex->fe_len, ex->fe_logical);
+ ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
+ "corruption or bug in mb_find_extent "
+ "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
+ block, order, needed, ex->fe_group, ex->fe_start,
+ ex->fe_len, ex->fe_logical);
ex->fe_len = 0;
ex->fe_start = 0;
ex->fe_group = 0;
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 6cb598b549ca..cebea4270817 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -138,7 +138,7 @@ static int kmmpd(void *data)
unsigned mmp_check_interval;
unsigned long last_update_time;
unsigned long diff;
- int retval;
+ int retval = 0;
mmp_block = le64_to_cpu(es->s_mmp_block);
mmp = (struct mmp_struct *)(bh->b_data);
@@ -156,7 +156,12 @@ static int kmmpd(void *data)
memcpy(mmp->mmp_nodename, init_utsname()->nodename,
sizeof(mmp->mmp_nodename));
- while (!kthread_should_stop()) {
+ while (!kthread_should_stop() && !sb_rdonly(sb)) {
+ if (!ext4_has_feature_mmp(sb)) {
+ ext4_warning(sb, "kmmpd being stopped since MMP feature"
+ " has been disabled.");
+ goto wait_to_exit;
+ }
if (++seq > EXT4_MMP_SEQ_MAX)
seq = 1;
@@ -177,16 +182,6 @@ static int kmmpd(void *data)
failed_writes++;
}
- if (!(le32_to_cpu(es->s_feature_incompat) &
- EXT4_FEATURE_INCOMPAT_MMP)) {
- ext4_warning(sb, "kmmpd being stopped since MMP feature"
- " has been disabled.");
- goto exit_thread;
- }
-
- if (sb_rdonly(sb))
- break;
-
diff = jiffies - last_update_time;
if (diff < mmp_update_interval * HZ)
schedule_timeout_interruptible(mmp_update_interval *
@@ -207,7 +202,7 @@ static int kmmpd(void *data)
ext4_error_err(sb, -retval,
"error reading MMP data: %d",
retval);
- goto exit_thread;
+ goto wait_to_exit;
}
mmp_check = (struct mmp_struct *)(bh_check->b_data);
@@ -221,7 +216,7 @@ static int kmmpd(void *data)
ext4_error_err(sb, EBUSY, "abort");
put_bh(bh_check);
retval = -EBUSY;
- goto exit_thread;
+ goto wait_to_exit;
}
put_bh(bh_check);
}
@@ -244,7 +239,13 @@ static int kmmpd(void *data)
retval = write_mmp_block(sb, bh);
-exit_thread:
+wait_to_exit:
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!kthread_should_stop())
+ schedule();
+ }
+ set_current_state(TASK_RUNNING);
return retval;
}
@@ -391,5 +392,3 @@ failed:
brelse(bh);
return 1;
}
-
-
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 5fd56f616cf0..f3bbcd4efb56 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2517,7 +2517,7 @@ again:
goto journal_error;
err = ext4_handle_dirty_dx_node(handle, dir,
frame->bh);
- if (err)
+ if (restart || err)
goto journal_error;
} else {
struct dx_root *dxroot;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index fc885914c88a..7a9f1adef679 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -74,10 +74,6 @@ int ext4_resize_begin(struct super_block *sb)
return -EPERM;
}
- if (ext4_has_feature_bigalloc(sb)) {
- ext4_msg(sb, KERN_ERR, "Online resizing not supported with bigalloc");
- return -EOPNOTSUPP;
- }
if (ext4_has_feature_sparse_super2(sb)) {
ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
return -EOPNOTSUPP;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 20344633bdd9..dfa09a277b56 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -705,15 +705,23 @@ static void flush_stashed_error_work(struct work_struct *work)
* ext4 error handling code during handling of previous errors.
*/
if (!sb_rdonly(sbi->s_sb) && journal) {
+ struct buffer_head *sbh = sbi->s_sbh;
handle = jbd2_journal_start(journal, 1);
if (IS_ERR(handle))
goto write_directly;
- if (jbd2_journal_get_write_access(handle, sbi->s_sbh)) {
+ if (jbd2_journal_get_write_access(handle, sbh)) {
jbd2_journal_stop(handle);
goto write_directly;
}
ext4_update_super(sbi->s_sb);
- if (jbd2_journal_dirty_metadata(handle, sbi->s_sbh)) {
+ if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
+ ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to "
+ "superblock detected");
+ clear_buffer_write_io_error(sbh);
+ set_buffer_uptodate(sbh);
+ }
+
+ if (jbd2_journal_dirty_metadata(handle, sbh)) {
jbd2_journal_stop(handle);
goto write_directly;
}
@@ -1176,7 +1184,6 @@ static void ext4_put_super(struct super_block *sb)
ext4_unregister_sysfs(sb);
if (sbi->s_journal) {
- jbd2_journal_unregister_shrinker(sbi->s_journal);
aborted = is_journal_aborted(sbi->s_journal);
err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
@@ -5168,7 +5175,6 @@ failed_mount_wq:
sbi->s_ea_block_cache = NULL;
if (sbi->s_journal) {
- jbd2_journal_unregister_shrinker(sbi->s_journal);
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
}
@@ -5494,12 +5500,6 @@ static int ext4_load_journal(struct super_block *sb,
ext4_commit_super(sb);
}
- err = jbd2_journal_register_shrinker(journal);
- if (err) {
- EXT4_SB(sb)->s_journal = NULL;
- goto err_out;
- }
-
return 0;
err_out:
@@ -5985,7 +5985,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
*/
ext4_mark_recovery_complete(sb, es);
}
- ext4_stop_mmpd(sbi);
} else {
/* Make sure we can mount this feature set readwrite */
if (ext4_has_feature_readonly(sb) ||
@@ -6099,6 +6098,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
ext4_release_system_zone(sb);
+ if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
+ ext4_stop_mmpd(sbi);
+
/*
* Some options can be enabled by ext4 and/or by VFS mount flag
* either way we need to make sure it matches in both *flags and
@@ -6132,6 +6134,8 @@ restore_opts:
for (i = 0; i < EXT4_MAXQUOTAS; i++)
kfree(to_free[i]);
#endif
+ if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
+ ext4_stop_mmpd(sbi);
kfree(orig_data);
return err;
}
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index f795049e63d5..6c208108d69c 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -444,7 +444,7 @@ static int f2fs_set_meta_page_dirty(struct page *page)
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
- f2fs_set_page_private(page, 0);
+ set_page_private_reference(page);
return 1;
}
return 0;
@@ -1018,7 +1018,7 @@ void f2fs_update_dirty_page(struct inode *inode, struct page *page)
inode_inc_dirty_pages(inode);
spin_unlock(&sbi->inode_lock[type]);
- f2fs_set_page_private(page, 0);
+ set_page_private_reference(page);
}
void f2fs_remove_dirty_inode(struct inode *inode)
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 925a5ca3744a..455561826c7d 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -12,9 +12,11 @@
#include <linux/lzo.h>
#include <linux/lz4.h>
#include <linux/zstd.h>
+#include <linux/pagevec.h>
#include "f2fs.h"
#include "node.h"
+#include "segment.h"
#include <trace/events/f2fs.h>
static struct kmem_cache *cic_entry_slab;
@@ -74,7 +76,7 @@ bool f2fs_is_compressed_page(struct page *page)
return false;
if (!page_private(page))
return false;
- if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
+ if (page_private_nonpointer(page))
return false;
f2fs_bug_on(F2FS_M_SB(page->mapping),
@@ -85,8 +87,7 @@ bool f2fs_is_compressed_page(struct page *page)
static void f2fs_set_compressed_page(struct page *page,
struct inode *inode, pgoff_t index, void *data)
{
- SetPagePrivate(page);
- set_page_private(page, (unsigned long)data);
+ attach_page_private(page, (void *)data);
/* i_crypto_info and iv index */
page->index = index;
@@ -589,8 +590,7 @@ static void f2fs_compress_free_page(struct page *page)
{
if (!page)
return;
- set_page_private(page, (unsigned long)NULL);
- ClearPagePrivate(page);
+ detach_page_private(page);
page->mapping = NULL;
unlock_page(page);
mempool_free(page, compress_page_pool);
@@ -738,7 +738,7 @@ out:
return ret;
}
-static void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
+void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
struct f2fs_inode_info *fi = F2FS_I(dic->inode);
@@ -837,7 +837,8 @@ out_end_io:
* page being waited on in the cluster, and if so, it decompresses the cluster
* (or in the case of a failure, cleans up without actually decompressing).
*/
-void f2fs_end_read_compressed_page(struct page *page, bool failed)
+void f2fs_end_read_compressed_page(struct page *page, bool failed,
+ block_t blkaddr)
{
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);
@@ -847,6 +848,9 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed)
if (failed)
WRITE_ONCE(dic->failed, true);
+ else if (blkaddr)
+ f2fs_cache_compressed_page(sbi, page,
+ dic->inode->i_ino, blkaddr);
if (atomic_dec_and_test(&dic->remaining_pages))
f2fs_decompress_cluster(dic);
@@ -876,7 +880,7 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
return is_page_in_cluster(cc, index);
}
-static bool __cluster_may_compress(struct compress_ctx *cc)
+static bool cluster_has_invalid_data(struct compress_ctx *cc)
{
loff_t i_size = i_size_read(cc->inode);
unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
@@ -889,19 +893,22 @@ static bool __cluster_may_compress(struct compress_ctx *cc)
/* beyond EOF */
if (page->index >= nr_pages)
- return false;
+ return true;
}
- return true;
+ return false;
}
-static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
+static int __f2fs_cluster_blocks(struct inode *inode,
+ unsigned int cluster_idx, bool compr)
{
struct dnode_of_data dn;
+ unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
+ unsigned int start_idx = cluster_idx <<
+ F2FS_I(inode)->i_log_cluster_size;
int ret;
- set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
- ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
- LOOKUP_NODE);
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
if (ret) {
if (ret == -ENOENT)
ret = 0;
@@ -912,7 +919,7 @@ static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
int i;
ret = 1;
- for (i = 1; i < cc->cluster_size; i++) {
+ for (i = 1; i < cluster_size; i++) {
block_t blkaddr;
blkaddr = data_blkaddr(dn.inode,
@@ -925,6 +932,10 @@ static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
ret++;
}
}
+
+ f2fs_bug_on(F2FS_I_SB(inode),
+ !compr && ret != cluster_size &&
+ !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
}
fail:
f2fs_put_dnode(&dn);
@@ -934,25 +945,15 @@ fail:
/* return # of compressed blocks in compressed cluster */
static int f2fs_compressed_blocks(struct compress_ctx *cc)
{
- return __f2fs_cluster_blocks(cc, true);
+ return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
}
/* return # of valid blocks in compressed cluster */
-static int f2fs_cluster_blocks(struct compress_ctx *cc)
-{
- return __f2fs_cluster_blocks(cc, false);
-}
-
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
{
- struct compress_ctx cc = {
- .inode = inode,
- .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
- .cluster_size = F2FS_I(inode)->i_cluster_size,
- .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
- };
-
- return f2fs_cluster_blocks(&cc);
+ return __f2fs_cluster_blocks(inode,
+ index >> F2FS_I(inode)->i_log_cluster_size,
+ false);
}
static bool cluster_may_compress(struct compress_ctx *cc)
@@ -961,13 +962,11 @@ static bool cluster_may_compress(struct compress_ctx *cc)
return false;
if (f2fs_is_atomic_file(cc->inode))
return false;
- if (f2fs_is_mmap_file(cc->inode))
- return false;
if (!f2fs_cluster_is_full(cc))
return false;
if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
return false;
- return __cluster_may_compress(cc);
+ return !cluster_has_invalid_data(cc);
}
static void set_cluster_writeback(struct compress_ctx *cc)
@@ -995,21 +994,16 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct address_space *mapping = cc->inode->i_mapping;
struct page *page;
- struct dnode_of_data dn;
sector_t last_block_in_bio;
unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
pgoff_t start_idx = start_idx_of_cluster(cc);
int i, ret;
- bool prealloc;
retry:
- ret = f2fs_cluster_blocks(cc);
+ ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
if (ret <= 0)
return ret;
- /* compressed case */
- prealloc = (ret < cc->cluster_size);
-
ret = f2fs_init_compress_ctx(cc);
if (ret)
return ret;
@@ -1067,25 +1061,6 @@ release_and_retry:
}
}
- if (prealloc) {
- f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
-
- set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
-
- for (i = cc->cluster_size - 1; i > 0; i--) {
- ret = f2fs_get_block(&dn, start_idx + i);
- if (ret) {
- i = cc->cluster_size;
- break;
- }
-
- if (dn.data_blkaddr != NEW_ADDR)
- break;
- }
-
- f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
- }
-
if (likely(!ret)) {
*fsdata = cc->rpages;
*pagep = cc->rpages[offset_in_cluster(cc, index)];
@@ -1216,6 +1191,12 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
loff_t psize;
int i, err;
+ /* we should bypass data pages to proceed the kworkder jobs */
+ if (unlikely(f2fs_cp_error(sbi))) {
+ mapping_set_error(cc->rpages[0]->mapping, -EIO);
+ goto out_free;
+ }
+
if (IS_NOQUOTA(inode)) {
/*
* We need to wait for node_write to avoid block allocation during
@@ -1399,7 +1380,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
for (i = 0; i < cic->nr_rpages; i++) {
WARN_ON(!cic->rpages[i]);
- clear_cold_data(cic->rpages[i]);
+ clear_page_private_gcing(cic->rpages[i]);
end_page_writeback(cic->rpages[i]);
}
@@ -1685,6 +1666,164 @@ void f2fs_put_page_dic(struct page *page)
f2fs_put_dic(dic);
}
+const struct address_space_operations f2fs_compress_aops = {
+ .releasepage = f2fs_release_page,
+ .invalidatepage = f2fs_invalidate_page,
+};
+
+struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
+{
+ return sbi->compress_inode->i_mapping;
+}
+
+void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+ if (!sbi->compress_inode)
+ return;
+ invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
+}
+
+void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+ nid_t ino, block_t blkaddr)
+{
+ struct page *cpage;
+ int ret;
+
+ if (!test_opt(sbi, COMPRESS_CACHE))
+ return;
+
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
+ return;
+
+ if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
+ return;
+
+ cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
+ if (cpage) {
+ f2fs_put_page(cpage, 0);
+ return;
+ }
+
+ cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
+ if (!cpage)
+ return;
+
+ ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
+ blkaddr, GFP_NOFS);
+ if (ret) {
+ f2fs_put_page(cpage, 0);
+ return;
+ }
+
+ set_page_private_data(cpage, ino);
+
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
+ goto out;
+
+ memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
+ SetPageUptodate(cpage);
+out:
+ f2fs_put_page(cpage, 1);
+}
+
+bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+ block_t blkaddr)
+{
+ struct page *cpage;
+ bool hitted = false;
+
+ if (!test_opt(sbi, COMPRESS_CACHE))
+ return false;
+
+ cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
+ blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
+ if (cpage) {
+ if (PageUptodate(cpage)) {
+ atomic_inc(&sbi->compress_page_hit);
+ memcpy(page_address(page),
+ page_address(cpage), PAGE_SIZE);
+ hitted = true;
+ }
+ f2fs_put_page(cpage, 1);
+ }
+
+ return hitted;
+}
+
+void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ struct address_space *mapping = sbi->compress_inode->i_mapping;
+ struct pagevec pvec;
+ pgoff_t index = 0;
+ pgoff_t end = MAX_BLKADDR(sbi);
+
+ if (!mapping->nrpages)
+ return;
+
+ pagevec_init(&pvec);
+
+ do {
+ unsigned int nr_pages;
+ int i;
+
+ nr_pages = pagevec_lookup_range(&pvec, mapping,
+ &index, end - 1);
+ if (!nr_pages)
+ break;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ if (page->index > end)
+ break;
+
+ lock_page(page);
+ if (page->mapping != mapping) {
+ unlock_page(page);
+ continue;
+ }
+
+ if (ino != get_page_private_data(page)) {
+ unlock_page(page);
+ continue;
+ }
+
+ generic_error_remove_page(mapping, page);
+ unlock_page(page);
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+ } while (index < end);
+}
+
+int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
+{
+ struct inode *inode;
+
+ if (!test_opt(sbi, COMPRESS_CACHE))
+ return 0;
+
+ inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+ sbi->compress_inode = inode;
+
+ sbi->compress_percent = COMPRESS_PERCENT;
+ sbi->compress_watermark = COMPRESS_WATERMARK;
+
+ atomic_set(&sbi->compress_page_hit, 0);
+
+ return 0;
+}
+
+void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
+{
+ if (!sbi->compress_inode)
+ return;
+ iput(sbi->compress_inode);
+ sbi->compress_inode = NULL;
+}
+
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
{
dev_t dev = sbi->sb->s_bdev->bd_dev;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 009a09fb9d88..d2cf48c5a2e4 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -58,18 +58,19 @@ static bool __is_cp_guaranteed(struct page *page)
if (!mapping)
return false;
- if (f2fs_is_compressed_page(page))
- return false;
-
inode = mapping->host;
sbi = F2FS_I_SB(inode);
if (inode->i_ino == F2FS_META_INO(sbi) ||
inode->i_ino == F2FS_NODE_INO(sbi) ||
- S_ISDIR(inode->i_mode) ||
- (S_ISREG(inode->i_mode) &&
+ S_ISDIR(inode->i_mode))
+ return true;
+
+ if (f2fs_is_compressed_page(page))
+ return false;
+ if ((S_ISREG(inode->i_mode) &&
(f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
- is_cold_data(page))
+ page_private_gcing(page))
return true;
return false;
}
@@ -131,7 +132,7 @@ static void f2fs_finish_read_bio(struct bio *bio)
if (f2fs_is_compressed_page(page)) {
if (bio->bi_status)
- f2fs_end_read_compressed_page(page, true);
+ f2fs_end_read_compressed_page(page, true, 0);
f2fs_put_page_dic(page);
continue;
}
@@ -227,15 +228,19 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
struct bio_vec *bv;
struct bvec_iter_all iter_all;
bool all_compressed = true;
+ block_t blkaddr = SECTOR_TO_BLOCK(ctx->bio->bi_iter.bi_sector);
bio_for_each_segment_all(bv, ctx->bio, iter_all) {
struct page *page = bv->bv_page;
/* PG_error was set if decryption failed. */
if (f2fs_is_compressed_page(page))
- f2fs_end_read_compressed_page(page, PageError(page));
+ f2fs_end_read_compressed_page(page, PageError(page),
+ blkaddr);
else
all_compressed = false;
+
+ blkaddr++;
}
/*
@@ -299,9 +304,8 @@ static void f2fs_write_end_io(struct bio *bio)
struct page *page = bvec->bv_page;
enum count_type type = WB_DATA_TYPE(page);
- if (IS_DUMMY_WRITTEN_PAGE(page)) {
- set_page_private(page, (unsigned long)NULL);
- ClearPagePrivate(page);
+ if (page_private_dummy(page)) {
+ clear_page_private_dummy(page);
unlock_page(page);
mempool_free(page, sbi->write_io_dummy);
@@ -331,7 +335,7 @@ static void f2fs_write_end_io(struct bio *bio)
dec_page_count(sbi, type);
if (f2fs_in_warm_node_list(sbi, page))
f2fs_del_fsync_node_entry(sbi, page);
- clear_cold_data(page);
+ clear_page_private_gcing(page);
end_page_writeback(page);
}
if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
@@ -455,10 +459,11 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
GFP_NOIO | __GFP_NOFAIL);
f2fs_bug_on(sbi, !page);
- zero_user_segment(page, 0, PAGE_SIZE);
- SetPagePrivate(page);
- set_page_private(page, DUMMY_WRITTEN_PAGE);
lock_page(page);
+
+ zero_user_segment(page, 0, PAGE_SIZE);
+ set_page_private_dummy(page);
+
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
f2fs_bug_on(sbi, 1);
}
@@ -1351,9 +1356,11 @@ alloc:
old_blkaddr = dn->data_blkaddr;
f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
&sum, seg_type, NULL);
- if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
invalidate_mapping_pages(META_MAPPING(sbi),
old_blkaddr, old_blkaddr);
+ f2fs_invalidate_compress_page(sbi, old_blkaddr);
+ }
f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
/*
@@ -2173,7 +2180,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
goto out_put_dnode;
}
- for (i = 0; i < dic->nr_cpages; i++) {
+ for (i = 0; i < cc->nr_cpages; i++) {
struct page *page = dic->cpages[i];
block_t blkaddr;
struct bio_post_read_ctx *ctx;
@@ -2181,6 +2188,14 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
blkaddr = data_blkaddr(dn.inode, dn.node_page,
dn.ofs_in_node + i + 1);
+ f2fs_wait_on_block_writeback(inode, blkaddr);
+
+ if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
+ if (atomic_dec_and_test(&dic->remaining_pages))
+ f2fs_decompress_cluster(dic);
+ continue;
+ }
+
if (bio && (!page_is_mergeable(sbi, bio,
*last_block_in_bio, blkaddr) ||
!f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
@@ -2202,8 +2217,6 @@ submit_and_realloc:
}
}
- f2fs_wait_on_block_writeback(inode, blkaddr);
-
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
goto submit_and_realloc;
@@ -2459,6 +2472,10 @@ static inline bool check_inplace_update_policy(struct inode *inode,
bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
{
+ /* swap file is migrating in aligned write mode */
+ if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
+ return false;
+
if (f2fs_is_pinned_file(inode))
return true;
@@ -2481,10 +2498,15 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
return true;
if (f2fs_is_atomic_file(inode))
return true;
+
+ /* swap file is migrating in aligned write mode */
+ if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
+ return true;
+
if (fio) {
- if (is_cold_data(fio->page))
+ if (page_private_gcing(fio->page))
return true;
- if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
+ if (page_private_dummy(fio->page))
return true;
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
@@ -2540,7 +2562,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
/* This page is already truncated */
if (fio->old_blkaddr == NULL_ADDR) {
ClearPageUptodate(page);
- clear_cold_data(page);
+ clear_page_private_gcing(page);
goto out_writepage;
}
got_it:
@@ -2750,7 +2772,7 @@ out:
inode_dec_dirty_pages(inode);
if (err) {
ClearPageUptodate(page);
- clear_cold_data(page);
+ clear_page_private_gcing(page);
}
if (wbc->for_reclaim) {
@@ -3224,7 +3246,7 @@ restart:
f2fs_do_read_inline_data(page, ipage);
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
- set_inline_node(ipage);
+ set_page_private_inline(ipage);
} else {
err = f2fs_convert_inline_page(&dn, page);
if (err)
@@ -3615,12 +3637,20 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
}
}
- clear_cold_data(page);
+ clear_page_private_gcing(page);
+
+ if (test_opt(sbi, COMPRESS_CACHE)) {
+ if (f2fs_compressed_file(inode))
+ f2fs_invalidate_compress_pages(sbi, inode->i_ino);
+ if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
+ clear_page_private_data(page);
+ }
- if (IS_ATOMIC_WRITTEN_PAGE(page))
+ if (page_private_atomic(page))
return f2fs_drop_inmem_page(inode, page);
- f2fs_clear_page_private(page);
+ detach_page_private(page);
+ set_page_private(page, 0);
}
int f2fs_release_page(struct page *page, gfp_t wait)
@@ -3630,11 +3660,23 @@ int f2fs_release_page(struct page *page, gfp_t wait)
return 0;
/* This is atomic written page, keep Private */
- if (IS_ATOMIC_WRITTEN_PAGE(page))
+ if (page_private_atomic(page))
return 0;
- clear_cold_data(page);
- f2fs_clear_page_private(page);
+ if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
+ struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct inode *inode = page->mapping->host;
+
+ if (f2fs_compressed_file(inode))
+ f2fs_invalidate_compress_pages(sbi, inode->i_ino);
+ if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
+ clear_page_private_data(page);
+ }
+
+ clear_page_private_gcing(page);
+
+ detach_page_private(page);
+ set_page_private(page, 0);
return 1;
}
@@ -3650,7 +3692,7 @@ static int f2fs_set_data_page_dirty(struct page *page)
return __set_page_dirty_nobuffers(page);
if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
- if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
+ if (!page_private_atomic(page)) {
f2fs_register_inmem_page(inode, page);
return 1;
}
@@ -3742,7 +3784,7 @@ int f2fs_migrate_page(struct address_space *mapping,
{
int rc, extra_count;
struct f2fs_inode_info *fi = F2FS_I(mapping->host);
- bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
+ bool atomic_written = page_private_atomic(page);
BUG_ON(PageWriteback(page));
@@ -3777,9 +3819,16 @@ int f2fs_migrate_page(struct address_space *mapping,
get_page(newpage);
}
+ /* guarantee to start from no stale private field */
+ set_page_private(newpage, 0);
if (PagePrivate(page)) {
- f2fs_set_page_private(newpage, page_private(page));
- f2fs_clear_page_private(page);
+ set_page_private(newpage, page_private(page));
+ SetPagePrivate(newpage);
+ get_page(newpage);
+
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+ put_page(page);
}
if (mode != MIGRATE_SYNC_NO_COPY)
@@ -3792,67 +3841,66 @@ int f2fs_migrate_page(struct address_space *mapping,
#endif
#ifdef CONFIG_SWAP
-static int f2fs_is_file_aligned(struct inode *inode)
+static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
+ unsigned int blkcnt)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- block_t main_blkaddr = SM_I(sbi)->main_blkaddr;
- block_t cur_lblock;
- block_t last_lblock;
- block_t pblock;
- unsigned long nr_pblocks;
- unsigned int blocks_per_sec = BLKS_PER_SEC(sbi);
- unsigned int not_aligned = 0;
+ unsigned int blkofs;
+ unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
+ unsigned int secidx = start_blk / blk_per_sec;
+ unsigned int end_sec = secidx + blkcnt / blk_per_sec;
int ret = 0;
- cur_lblock = 0;
- last_lblock = bytes_to_blks(inode, i_size_read(inode));
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
- while (cur_lblock < last_lblock) {
- struct f2fs_map_blocks map;
+ set_inode_flag(inode, FI_ALIGNED_WRITE);
- memset(&map, 0, sizeof(map));
- map.m_lblk = cur_lblock;
- map.m_len = last_lblock - cur_lblock;
- map.m_next_pgofs = NULL;
- map.m_next_extent = NULL;
- map.m_seg_type = NO_CHECK_TYPE;
- map.m_may_create = false;
+ for (; secidx < end_sec; secidx++) {
+ down_write(&sbi->pin_sem);
- ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
- if (ret)
- goto out;
+ f2fs_lock_op(sbi);
+ f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
+ f2fs_unlock_op(sbi);
- /* hole */
- if (!(map.m_flags & F2FS_MAP_FLAGS)) {
- f2fs_err(sbi, "Swapfile has holes\n");
- ret = -ENOENT;
- goto out;
- }
+ set_inode_flag(inode, FI_DO_DEFRAG);
- pblock = map.m_pblk;
- nr_pblocks = map.m_len;
+ for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
+ struct page *page;
+ unsigned int blkidx = secidx * blk_per_sec + blkofs;
- if ((pblock - main_blkaddr) & (blocks_per_sec - 1) ||
- nr_pblocks & (blocks_per_sec - 1)) {
- if (f2fs_is_pinned_file(inode)) {
- f2fs_err(sbi, "Swapfile does not align to section");
- ret = -EINVAL;
- goto out;
+ page = f2fs_get_lock_data_page(inode, blkidx, true);
+ if (IS_ERR(page)) {
+ up_write(&sbi->pin_sem);
+ ret = PTR_ERR(page);
+ goto done;
}
- not_aligned++;
+
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
}
- cur_lblock += nr_pblocks;
+ clear_inode_flag(inode, FI_DO_DEFRAG);
+
+ ret = filemap_fdatawrite(inode->i_mapping);
+
+ up_write(&sbi->pin_sem);
+
+ if (ret)
+ break;
}
- if (not_aligned)
- f2fs_warn(sbi, "Swapfile (%u) is not align to section: \n"
- "\t1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate()",
- not_aligned);
-out:
+
+done:
+ clear_inode_flag(inode, FI_DO_DEFRAG);
+ clear_inode_flag(inode, FI_ALIGNED_WRITE);
+
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
return ret;
}
-static int check_swap_activate_fast(struct swap_info_struct *sis,
+static int check_swap_activate(struct swap_info_struct *sis,
struct file *swap_file, sector_t *span)
{
struct address_space *mapping = swap_file->f_mapping;
@@ -3865,7 +3913,8 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
sector_t highest_pblock = 0;
int nr_extents = 0;
unsigned long nr_pblocks;
- unsigned int blocks_per_sec = BLKS_PER_SEC(sbi);
+ unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
+ unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
unsigned int not_aligned = 0;
int ret = 0;
@@ -3878,7 +3927,7 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
while (cur_lblock < last_lblock && cur_lblock < sis->max) {
struct f2fs_map_blocks map;
-
+retry:
cond_resched();
memset(&map, 0, sizeof(map));
@@ -3895,7 +3944,7 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
/* hole */
if (!(map.m_flags & F2FS_MAP_FLAGS)) {
- f2fs_err(sbi, "Swapfile has holes\n");
+ f2fs_err(sbi, "Swapfile has holes");
ret = -EINVAL;
goto out;
}
@@ -3903,16 +3952,28 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
pblock = map.m_pblk;
nr_pblocks = map.m_len;
- if ((pblock - SM_I(sbi)->main_blkaddr) & (blocks_per_sec - 1) ||
- nr_pblocks & (blocks_per_sec - 1)) {
- if (f2fs_is_pinned_file(inode)) {
- f2fs_err(sbi, "Swapfile does not align to section");
- ret = -EINVAL;
- goto out;
- }
+ if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
+ nr_pblocks & sec_blks_mask) {
not_aligned++;
- }
+ nr_pblocks = roundup(nr_pblocks, blks_per_sec);
+ if (cur_lblock + nr_pblocks > sis->max)
+ nr_pblocks -= blks_per_sec;
+
+ if (!nr_pblocks) {
+ /* this extent is last one */
+ nr_pblocks = map.m_len;
+ f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
+ goto next;
+ }
+
+ ret = f2fs_migrate_blocks(inode, cur_lblock,
+ nr_pblocks);
+ if (ret)
+ goto out;
+ goto retry;
+ }
+next:
if (cur_lblock + nr_pblocks >= sis->max)
nr_pblocks = sis->max - cur_lblock;
@@ -3939,120 +4000,11 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
sis->max = cur_lblock;
sis->pages = cur_lblock - 1;
sis->highest_bit = cur_lblock - 1;
-
- if (not_aligned)
- f2fs_warn(sbi, "Swapfile (%u) is not align to section: \n"
- "\t1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate()",
- not_aligned);
-out:
- return ret;
-}
-
-/* Copied from generic_swapfile_activate() to check any holes */
-static int check_swap_activate(struct swap_info_struct *sis,
- struct file *swap_file, sector_t *span)
-{
- struct address_space *mapping = swap_file->f_mapping;
- struct inode *inode = mapping->host;
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- unsigned blocks_per_page;
- unsigned long page_no;
- sector_t probe_block;
- sector_t last_block;
- sector_t lowest_block = -1;
- sector_t highest_block = 0;
- int nr_extents = 0;
- int ret = 0;
-
- if (PAGE_SIZE == F2FS_BLKSIZE)
- return check_swap_activate_fast(sis, swap_file, span);
-
- ret = f2fs_is_file_aligned(inode);
- if (ret)
- goto out;
-
- blocks_per_page = bytes_to_blks(inode, PAGE_SIZE);
-
- /*
- * Map all the blocks into the extent list. This code doesn't try
- * to be very smart.
- */
- probe_block = 0;
- page_no = 0;
- last_block = bytes_to_blks(inode, i_size_read(inode));
- while ((probe_block + blocks_per_page) <= last_block &&
- page_no < sis->max) {
- unsigned block_in_page;
- sector_t first_block;
- sector_t block = 0;
-
- cond_resched();
-
- block = probe_block;
- ret = bmap(inode, &block);
- if (ret)
- goto out;
- if (!block)
- goto bad_bmap;
- first_block = block;
-
- /*
- * It must be PAGE_SIZE aligned on-disk
- */
- if (first_block & (blocks_per_page - 1)) {
- probe_block++;
- goto reprobe;
- }
-
- for (block_in_page = 1; block_in_page < blocks_per_page;
- block_in_page++) {
-
- block = probe_block + block_in_page;
- ret = bmap(inode, &block);
- if (ret)
- goto out;
- if (!block)
- goto bad_bmap;
-
- if (block != first_block + block_in_page) {
- /* Discontiguity */
- probe_block++;
- goto reprobe;
- }
- }
-
- first_block >>= (PAGE_SHIFT - inode->i_blkbits);
- if (page_no) { /* exclude the header page */
- if (first_block < lowest_block)
- lowest_block = first_block;
- if (first_block > highest_block)
- highest_block = first_block;
- }
-
- /*
- * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
- */
- ret = add_swap_extent(sis, page_no, 1, first_block);
- if (ret < 0)
- goto out;
- nr_extents += ret;
- page_no++;
- probe_block += blocks_per_page;
-reprobe:
- continue;
- }
- ret = nr_extents;
- *span = 1 + highest_block - lowest_block;
- if (page_no == 0)
- page_no = 1; /* force Empty message */
- sis->max = page_no;
- sis->pages = page_no - 1;
- sis->highest_bit = page_no - 1;
out:
+ if (not_aligned)
+ f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
+ not_aligned, blks_per_sec * F2FS_BLKSIZE);
return ret;
-bad_bmap:
- f2fs_err(sbi, "Swapfile has holes\n");
- return -EINVAL;
}
static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
@@ -4067,6 +4019,12 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (f2fs_readonly(F2FS_I_SB(inode)->sb))
return -EROFS;
+ if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
+ f2fs_err(F2FS_I_SB(inode),
+ "Swapfile not supported in LFS mode");
+ return -EINVAL;
+ }
+
ret = f2fs_convert_inline_inode(inode);
if (ret)
return ret;
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index c03949a7ccff..833325038ef3 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -152,6 +152,12 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->node_pages = NODE_MAPPING(sbi)->nrpages;
if (sbi->meta_inode)
si->meta_pages = META_MAPPING(sbi)->nrpages;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (sbi->compress_inode) {
+ si->compress_pages = COMPRESS_MAPPING(sbi)->nrpages;
+ si->compress_page_hit = atomic_read(&sbi->compress_page_hit);
+ }
+#endif
si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
si->sits = MAIN_SEGS(sbi);
@@ -309,6 +315,12 @@ get_cache:
si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
}
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (sbi->compress_inode) {
+ unsigned npages = COMPRESS_MAPPING(sbi)->nrpages;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+ }
+#endif
}
static int stat_show(struct seq_file *s, void *v)
@@ -476,6 +488,7 @@ static int stat_show(struct seq_file *s, void *v)
"volatile IO: %4d (Max. %4d)\n",
si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
si->vw_cnt, si->max_vw_cnt);
+ seq_printf(s, " - compress: %4d, hit:%8d\n", si->compress_pages, si->compress_page_hit);
seq_printf(s, " - nodes: %4d in %4d\n",
si->ndirty_node, si->node_pages);
seq_printf(s, " - dents: %4d in dirs:%4d (%4d)\n",
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index dc7ce79672b8..456651682daf 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -16,6 +16,10 @@
#include "xattr.h"
#include <trace/events/f2fs.h>
+#ifdef CONFIG_UNICODE
+extern struct kmem_cache *f2fs_cf_name_slab;
+#endif
+
static unsigned long dir_blocks(struct inode *inode)
{
return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
@@ -77,11 +81,10 @@ int f2fs_init_casefolded_name(const struct inode *dir,
{
#ifdef CONFIG_UNICODE
struct super_block *sb = dir->i_sb;
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (IS_CASEFOLDED(dir)) {
- fname->cf_name.name = f2fs_kmalloc(sbi, F2FS_NAME_LEN,
- GFP_NOFS);
+ fname->cf_name.name = kmem_cache_alloc(f2fs_cf_name_slab,
+ GFP_NOFS);
if (!fname->cf_name.name)
return -ENOMEM;
fname->cf_name.len = utf8_casefold(sb->s_encoding,
@@ -89,7 +92,7 @@ int f2fs_init_casefolded_name(const struct inode *dir,
fname->cf_name.name,
F2FS_NAME_LEN);
if ((int)fname->cf_name.len <= 0) {
- kfree(fname->cf_name.name);
+ kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
fname->cf_name.name = NULL;
if (sb_has_strict_encoding(sb))
return -EINVAL;
@@ -172,8 +175,10 @@ void f2fs_free_filename(struct f2fs_filename *fname)
fname->crypto_buf.name = NULL;
#endif
#ifdef CONFIG_UNICODE
- kfree(fname->cf_name.name);
- fname->cf_name.name = NULL;
+ if (fname->cf_name.name) {
+ kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
+ fname->cf_name.name = NULL;
+ }
#endif
}
@@ -929,11 +934,15 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
!f2fs_truncate_hole(dir, page->index, page->index + 1)) {
f2fs_clear_page_cache_dirty_tag(page);
clear_page_dirty_for_io(page);
- f2fs_clear_page_private(page);
ClearPageUptodate(page);
- clear_cold_data(page);
+
+ clear_page_private_gcing(page);
+
inode_dec_dirty_pages(dir);
f2fs_remove_dirty_inode(dir);
+
+ detach_page_private(page);
+ set_page_private(page, 0);
}
f2fs_put_page(page, 1);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c83d90125ebd..ee8eb33e2c25 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -98,6 +98,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
#define F2FS_MOUNT_ATGC 0x08000000
#define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000
#define F2FS_MOUNT_GC_MERGE 0x20000000
+#define F2FS_MOUNT_COMPRESS_CACHE 0x40000000
#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
@@ -150,8 +151,10 @@ struct f2fs_mount_info {
unsigned char compress_level; /* compress level */
bool compress_chksum; /* compressed data chksum */
unsigned char compress_ext_cnt; /* extension count */
+ unsigned char nocompress_ext_cnt; /* nocompress extension count */
int compress_mode; /* compression mode */
unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
+ unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
};
#define F2FS_FEATURE_ENCRYPT 0x0001
@@ -168,6 +171,7 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_SB_CHKSUM 0x0800
#define F2FS_FEATURE_CASEFOLD 0x1000
#define F2FS_FEATURE_COMPRESSION 0x2000
+#define F2FS_FEATURE_RO 0x4000
#define __F2FS_HAS_FEATURE(raw_super, mask) \
((raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -706,6 +710,8 @@ enum {
FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */
FI_MMAP_FILE, /* indicate file was mmapped */
FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */
+ FI_COMPRESS_RELEASED, /* compressed blocks were released */
+ FI_ALIGNED_WRITE, /* enable aligned write */
FI_MAX, /* max flag, never be used */
};
@@ -939,6 +945,7 @@ static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
#define NR_CURSEG_DATA_TYPE (3)
#define NR_CURSEG_NODE_TYPE (3)
#define NR_CURSEG_INMEM_TYPE (2)
+#define NR_CURSEG_RO_TYPE (2)
#define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
#define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
@@ -1291,17 +1298,119 @@ enum {
*/
};
+static inline int f2fs_test_bit(unsigned int nr, char *addr);
+static inline void f2fs_set_bit(unsigned int nr, char *addr);
+static inline void f2fs_clear_bit(unsigned int nr, char *addr);
+
/*
- * this value is set in page as a private data which indicate that
- * the page is atomically written, and it is in inmem_pages list.
+ * Layout of f2fs page.private:
+ *
+ * Layout A: lowest bit should be 1
+ * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
+ * bit 0 PAGE_PRIVATE_NOT_POINTER
+ * bit 1 PAGE_PRIVATE_ATOMIC_WRITE
+ * bit 2 PAGE_PRIVATE_DUMMY_WRITE
+ * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION
+ * bit 4 PAGE_PRIVATE_INLINE_INODE
+ * bit 5 PAGE_PRIVATE_REF_RESOURCE
+ * bit 6- f2fs private data
+ *
+ * Layout B: lowest bit should be 0
+ * page.private is a wrapped pointer.
*/
-#define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
-#define DUMMY_WRITTEN_PAGE ((unsigned long)-2)
+enum {
+ PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */
+ PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
+ PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */
+ PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
+ PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
+ PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
+ PAGE_PRIVATE_MAX
+};
-#define IS_ATOMIC_WRITTEN_PAGE(page) \
- (page_private(page) == ATOMIC_WRITTEN_PAGE)
-#define IS_DUMMY_WRITTEN_PAGE(page) \
- (page_private(page) == DUMMY_WRITTEN_PAGE)
+#define PAGE_PRIVATE_GET_FUNC(name, flagname) \
+static inline bool page_private_##name(struct page *page) \
+{ \
+ return PagePrivate(page) && \
+ test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
+ test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
+}
+
+#define PAGE_PRIVATE_SET_FUNC(name, flagname) \
+static inline void set_page_private_##name(struct page *page) \
+{ \
+ if (!PagePrivate(page)) { \
+ get_page(page); \
+ SetPagePrivate(page); \
+ set_page_private(page, 0); \
+ } \
+ set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
+ set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
+}
+
+#define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
+static inline void clear_page_private_##name(struct page *page) \
+{ \
+ clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
+ if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \
+ set_page_private(page, 0); \
+ if (PagePrivate(page)) { \
+ ClearPagePrivate(page); \
+ put_page(page); \
+ }\
+ } \
+}
+
+PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
+PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE);
+PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
+PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
+PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
+
+PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
+PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
+PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
+PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
+
+PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
+PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
+PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
+PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
+
+static inline unsigned long get_page_private_data(struct page *page)
+{
+ unsigned long data = page_private(page);
+
+ if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
+ return 0;
+ return data >> PAGE_PRIVATE_MAX;
+}
+
+static inline void set_page_private_data(struct page *page, unsigned long data)
+{
+ if (!PagePrivate(page)) {
+ get_page(page);
+ SetPagePrivate(page);
+ set_page_private(page, 0);
+ }
+ set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
+ page_private(page) |= data << PAGE_PRIVATE_MAX;
+}
+
+static inline void clear_page_private_data(struct page *page)
+{
+ page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1;
+ if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) {
+ set_page_private(page, 0);
+ if (PagePrivate(page)) {
+ ClearPagePrivate(page);
+ put_page(page);
+ }
+ }
+}
/* For compression */
enum compress_algorithm_type {
@@ -1317,6 +1426,9 @@ enum compress_flag {
COMPRESS_MAX_FLAG,
};
+#define COMPRESS_WATERMARK 20
+#define COMPRESS_PERCENT 20
+
#define COMPRESS_DATA_RESERVED_SIZE 4
struct compress_data {
__le32 clen; /* compressed data size */
@@ -1594,6 +1706,9 @@ struct f2fs_sb_info {
struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */
struct completion s_stat_kobj_unregister;
+ struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */
+ struct completion s_feature_list_kobj_unregister;
+
/* For shrinker support */
struct list_head s_list;
int s_ndevs; /* number of devices */
@@ -1626,6 +1741,12 @@ struct f2fs_sb_info {
u64 compr_written_block;
u64 compr_saved_block;
u32 compr_new_inode;
+
+ /* For compressed block cache */
+ struct inode *compress_inode; /* cache compressed blocks */
+ unsigned int compress_percent; /* cache page percentage */
+ unsigned int compress_watermark; /* cache page watermark */
+ atomic_t compress_page_hit; /* cache hit count */
#endif
};
@@ -2678,6 +2799,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
case FI_DATA_EXIST:
case FI_INLINE_DOTS:
case FI_PIN_FILE:
+ case FI_COMPRESS_RELEASED:
f2fs_mark_inode_dirty_sync(inode, true);
}
}
@@ -2799,6 +2921,8 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
set_bit(FI_EXTRA_ATTR, fi->flags);
if (ri->i_inline & F2FS_PIN_FILE)
set_bit(FI_PIN_FILE, fi->flags);
+ if (ri->i_inline & F2FS_COMPRESS_RELEASED)
+ set_bit(FI_COMPRESS_RELEASED, fi->flags);
}
static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
@@ -2819,6 +2943,8 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
ri->i_inline |= F2FS_EXTRA_ATTR;
if (is_inode_flag_set(inode, FI_PIN_FILE))
ri->i_inline |= F2FS_PIN_FILE;
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
+ ri->i_inline |= F2FS_COMPRESS_RELEASED;
}
static inline int f2fs_has_extra_attr(struct inode *inode)
@@ -3027,25 +3153,6 @@ static inline bool is_dot_dotdot(const u8 *name, size_t len)
return false;
}
-static inline bool f2fs_may_extent_tree(struct inode *inode)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-
- if (!test_opt(sbi, EXTENT_CACHE) ||
- is_inode_flag_set(inode, FI_NO_EXTENT) ||
- is_inode_flag_set(inode, FI_COMPRESSED_FILE))
- return false;
-
- /*
- * for recovered files during mount do not create extents
- * if shrinker is not registered.
- */
- if (list_empty(&sbi->s_list))
- return false;
-
- return S_ISREG(inode->i_mode);
-}
-
static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
@@ -3169,20 +3276,6 @@ static inline bool __is_valid_data_blkaddr(block_t blkaddr)
return true;
}
-static inline void f2fs_set_page_private(struct page *page,
- unsigned long data)
-{
- if (PagePrivate(page))
- return;
-
- attach_page_private(page, (void *)data);
-}
-
-static inline void f2fs_clear_page_private(struct page *page)
-{
- detach_page_private(page);
-}
-
/*
* file.c
*/
@@ -3566,6 +3659,8 @@ void f2fs_destroy_garbage_collection_cache(void);
*/
int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
+int __init f2fs_create_recovery_cache(void);
+void f2fs_destroy_recovery_cache(void);
/*
* debug.c
@@ -3604,7 +3699,8 @@ struct f2fs_stat_info {
unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid;
int rsvd_segs, overp_segs;
- int dirty_count, node_pages, meta_pages;
+ int dirty_count, node_pages, meta_pages, compress_pages;
+ int compress_page_hit;
int prefree_count, call_count, cp_count, bg_cp_count;
int tot_segs, node_segs, data_segs, free_segs, free_secs;
int bg_node_segs, bg_data_segs;
@@ -3940,7 +4036,9 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
bool f2fs_is_compress_backend_ready(struct inode *inode);
int f2fs_init_compress_mempool(void);
void f2fs_destroy_compress_mempool(void);
-void f2fs_end_read_compressed_page(struct page *page, bool failed);
+void f2fs_decompress_cluster(struct decompress_io_ctx *dic);
+void f2fs_end_read_compressed_page(struct page *page, bool failed,
+ block_t blkaddr);
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
@@ -3958,10 +4056,19 @@ void f2fs_put_page_dic(struct page *page);
int f2fs_init_compress_ctx(struct compress_ctx *cc);
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
+int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
+void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
int __init f2fs_init_compress_cache(void);
void f2fs_destroy_compress_cache(void);
+struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
+void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
+void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+ nid_t ino, block_t blkaddr);
+bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
+ block_t blkaddr);
+void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
#define inc_compr_inode_stat(inode) \
do { \
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
@@ -3990,7 +4097,9 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
}
static inline int f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { }
-static inline void f2fs_end_read_compressed_page(struct page *page, bool failed)
+static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { }
+static inline void f2fs_end_read_compressed_page(struct page *page,
+ bool failed, block_t blkaddr)
{
WARN_ON_ONCE(1);
}
@@ -3998,10 +4107,20 @@ static inline void f2fs_put_page_dic(struct page *page)
{
WARN_ON_ONCE(1);
}
+static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
+static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
static inline int __init f2fs_init_compress_cache(void) { return 0; }
static inline void f2fs_destroy_compress_cache(void) { }
+static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
+ block_t blkaddr) { }
+static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
+ struct page *page, nid_t ino, block_t blkaddr) { }
+static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
+ struct page *page, block_t blkaddr) { return false; }
+static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
+ nid_t ino) { }
#define inc_compr_inode_stat(inode) do { } while (0)
#endif
@@ -4066,6 +4185,27 @@ F2FS_FEATURE_FUNCS(verity, VERITY);
F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
F2FS_FEATURE_FUNCS(compression, COMPRESSION);
+F2FS_FEATURE_FUNCS(readonly, RO);
+
+static inline bool f2fs_may_extent_tree(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (!test_opt(sbi, EXTENT_CACHE) ||
+ is_inode_flag_set(inode, FI_NO_EXTENT) ||
+ (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+ !f2fs_sb_has_readonly(sbi)))
+ return false;
+
+ /*
+ * for recovered files during mount do not create extents
+ * if shrinker is not registered.
+ */
+ if (list_empty(&sbi->s_list))
+ return false;
+
+ return S_ISREG(inode->i_mode);
+}
#ifdef CONFIG_BLK_DEV_ZONED
static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ceb575f99048..6afd4562335f 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -63,6 +63,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
if (unlikely(IS_IMMUTABLE(inode)))
return VM_FAULT_SIGBUS;
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
+ return VM_FAULT_SIGBUS;
+
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
goto err;
@@ -85,10 +88,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
err = ret;
goto err;
} else if (ret) {
- if (ret < F2FS_I(inode)->i_cluster_size) {
- err = -EAGAIN;
- goto err;
- }
need_alloc = false;
}
}
@@ -117,7 +116,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = f2fs_get_block(&dn, page->index);
- f2fs_put_dnode(&dn);
f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
}
@@ -3203,7 +3201,7 @@ int f2fs_precache_extents(struct inode *inode)
map.m_lblk = m_next_extent;
}
- return err;
+ return 0;
}
static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
@@ -3237,7 +3235,7 @@ static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
f2fs_warn(F2FS_I_SB(inode),
- "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
+ "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
inode->i_ino);
return -EOPNOTSUPP;
}
@@ -3425,7 +3423,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
goto out;
}
- if (IS_IMMUTABLE(inode)) {
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EINVAL;
goto out;
}
@@ -3434,8 +3432,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
if (ret)
goto out;
- F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
- f2fs_set_inode_flags(inode);
+ set_inode_flag(inode, FI_COMPRESS_RELEASED);
inode->i_ctime = current_time(inode);
f2fs_mark_inode_dirty_sync(inode, true);
@@ -3590,7 +3587,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
inode_lock(inode);
- if (!IS_IMMUTABLE(inode)) {
+ if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EINVAL;
goto unlock_inode;
}
@@ -3635,8 +3632,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
up_write(&F2FS_I(inode)->i_mmap_sem);
if (ret >= 0) {
- F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
- f2fs_set_inode_flags(inode);
+ clear_inode_flag(inode, FI_COMPRESS_RELEASED);
inode->i_ctime = current_time(inode);
f2fs_mark_inode_dirty_sync(inode, true);
}
@@ -4023,9 +4019,8 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
LLONG_MAX);
if (ret)
- f2fs_warn(sbi, "%s: The file might be partially decompressed "
- "(errno=%d). Please delete the file.\n",
- __func__, ret);
+ f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
+ __func__, ret);
out:
inode_unlock(inode);
file_end_write(filp);
@@ -4097,9 +4092,8 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
clear_inode_flag(inode, FI_ENABLE_COMPRESS);
if (ret)
- f2fs_warn(sbi, "%s: The file might be partially compressed "
- "(errno=%d). Please delete the file.\n",
- __func__, ret);
+ f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
+ __func__, ret);
out:
inode_unlock(inode);
file_end_write(filp);
@@ -4254,6 +4248,11 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto unlock;
}
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ ret = -EPERM;
+ goto unlock;
+ }
+
ret = generic_write_checks(iocb, from);
if (ret > 0) {
bool preallocated = false;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 8d1f17ab94d8..0e42ee5f7770 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1031,8 +1031,8 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (unlikely(check_valid_map(sbi, segno, offset))) {
if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
- f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n",
- blkaddr, source_blkaddr, segno);
+ f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
+ blkaddr, source_blkaddr, segno);
f2fs_bug_on(sbi, 1);
}
}
@@ -1261,6 +1261,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
f2fs_put_page(mpage, 1);
invalidate_mapping_pages(META_MAPPING(fio.sbi),
fio.old_blkaddr, fio.old_blkaddr);
+ f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
set_page_dirty(fio.encrypted_page);
if (clear_page_dirty_for_io(fio.encrypted_page))
@@ -1336,7 +1337,7 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
goto out;
}
set_page_dirty(page);
- set_cold_data(page);
+ set_page_private_gcing(page);
} else {
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
@@ -1362,11 +1363,11 @@ retry:
f2fs_remove_dirty_inode(inode);
}
- set_cold_data(page);
+ set_page_private_gcing(page);
err = f2fs_do_write_data_page(&fio);
if (err) {
- clear_cold_data(page);
+ clear_page_private_gcing(page);
if (err == -ENOMEM) {
congestion_wait(BLK_RW_ASYNC,
DEFAULT_IO_TIMEOUT);
@@ -1450,10 +1451,8 @@ next_step:
if (phase == 3) {
inode = f2fs_iget(sb, dni.ino);
- if (IS_ERR(inode) || is_bad_inode(inode)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
+ if (IS_ERR(inode) || is_bad_inode(inode))
continue;
- }
if (!down_write_trylock(
&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
@@ -1822,6 +1821,7 @@ static void init_atgc_management(struct f2fs_sb_info *sbi)
am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
+ am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
}
void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 92652ca7a7c8..56a20d5c15da 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -173,7 +173,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
/* clear inline data and flag after data writeback */
f2fs_truncate_inline_inode(dn->inode, dn->inode_page, 0);
- clear_inline_node(dn->inode_page);
+ clear_page_private_inline(dn->inode_page);
clear_out:
stat_dec_inline_inode(dn->inode);
clear_inode_flag(dn->inode, FI_INLINE_DATA);
@@ -255,7 +255,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
set_inode_flag(inode, FI_APPEND_WRITE);
set_inode_flag(inode, FI_DATA_EXIST);
- clear_inline_node(dn.inode_page);
+ clear_page_private_inline(dn.inode_page);
f2fs_put_dnode(&dn);
return 0;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index b401f08569f7..9141147b5bb0 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -18,6 +18,10 @@
#include <trace/events/f2fs.h>
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+extern const struct address_space_operations f2fs_compress_aops;
+#endif
+
void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
{
if (is_inode_flag_set(inode, FI_NEW_INODE))
@@ -494,6 +498,11 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
goto make_now;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (ino == F2FS_COMPRESS_INO(sbi))
+ goto make_now;
+#endif
+
ret = do_read_inode(inode);
if (ret)
goto bad_inode;
@@ -504,6 +513,12 @@ make_now:
} else if (ino == F2FS_META_INO(sbi)) {
inode->i_mapping->a_ops = &f2fs_meta_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+ } else if (ino == F2FS_COMPRESS_INO(sbi)) {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ inode->i_mapping->a_ops = &f2fs_compress_aops;
+#endif
+ mapping_set_gfp_mask(inode->i_mapping,
+ GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
} else if (S_ISREG(inode->i_mode)) {
inode->i_op = &f2fs_file_inode_operations;
inode->i_fop = &f2fs_file_operations;
@@ -646,7 +661,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
/* deleted inode */
if (inode->i_nlink == 0)
- clear_inline_node(node_page);
+ clear_page_private_inline(node_page);
F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
@@ -723,8 +738,12 @@ void f2fs_evict_inode(struct inode *inode)
trace_f2fs_evict_inode(inode);
truncate_inode_pages_final(&inode->i_data);
+ if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
+ f2fs_invalidate_compress_pages(sbi, inode->i_ino);
+
if (inode->i_ino == F2FS_NODE_INO(sbi) ||
- inode->i_ino == F2FS_META_INO(sbi))
+ inode->i_ino == F2FS_META_INO(sbi) ||
+ inode->i_ino == F2FS_COMPRESS_INO(sbi))
goto out_clear;
f2fs_bug_on(sbi, get_dirty_pages(inode));
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index a9cd9cf97229..e149c8c66a71 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -153,7 +153,8 @@ fail_drop:
return ERR_PTR(err);
}
-static inline int is_extension_exist(const unsigned char *s, const char *sub)
+static inline int is_extension_exist(const unsigned char *s, const char *sub,
+ bool tmp_ext)
{
size_t slen = strlen(s);
size_t sublen = strlen(sub);
@@ -169,6 +170,13 @@ static inline int is_extension_exist(const unsigned char *s, const char *sub)
if (slen < sublen + 2)
return 0;
+ if (!tmp_ext) {
+ /* file has no temp extension */
+ if (s[slen - sublen - 1] != '.')
+ return 0;
+ return !strncasecmp(s + slen - sublen, sub, sublen);
+ }
+
for (i = 1; i < slen - sublen; i++) {
if (s[i] != '.')
continue;
@@ -194,7 +202,7 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *
hot_count = sbi->raw_super->hot_ext_count;
for (i = 0; i < cold_count + hot_count; i++) {
- if (is_extension_exist(name, extlist[i]))
+ if (is_extension_exist(name, extlist[i], true))
break;
}
@@ -279,14 +287,16 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
const unsigned char *name)
{
__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
- unsigned char (*ext)[F2FS_EXTENSION_LEN];
- unsigned int ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+ unsigned char (*noext)[F2FS_EXTENSION_LEN] = F2FS_OPTION(sbi).noextensions;
+ unsigned char (*ext)[F2FS_EXTENSION_LEN] = F2FS_OPTION(sbi).extensions;
+ unsigned char ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+ unsigned char noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
int i, cold_count, hot_count;
if (!f2fs_sb_has_compression(sbi) ||
- is_inode_flag_set(inode, FI_COMPRESSED_FILE) ||
F2FS_I(inode)->i_flags & F2FS_NOCOMP_FL ||
- !f2fs_may_compress(inode))
+ !f2fs_may_compress(inode) ||
+ (!ext_cnt && !noext_cnt))
return;
down_read(&sbi->sb_lock);
@@ -295,7 +305,7 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
hot_count = sbi->raw_super->hot_ext_count;
for (i = cold_count; i < cold_count + hot_count; i++) {
- if (is_extension_exist(name, extlist[i])) {
+ if (is_extension_exist(name, extlist[i], false)) {
up_read(&sbi->sb_lock);
return;
}
@@ -303,10 +313,18 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
up_read(&sbi->sb_lock);
- ext = F2FS_OPTION(sbi).extensions;
+ for (i = 0; i < noext_cnt; i++) {
+ if (is_extension_exist(name, noext[i], false)) {
+ f2fs_disable_compressed_file(inode);
+ return;
+ }
+ }
+
+ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
+ return;
for (i = 0; i < ext_cnt; i++) {
- if (!is_extension_exist(name, ext[i]))
+ if (!is_extension_exist(name, ext[i], false))
continue;
set_compress_context(inode);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index e67ce5f13b98..0be9e2d7120e 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -97,6 +97,20 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
sizeof(struct discard_cmd)) >> PAGE_SHIFT;
res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
+ } else if (type == COMPRESS_PAGE) {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ unsigned long free_ram = val.freeram;
+
+ /*
+ * free memory is lower than watermark or cached page count
+ * exceed threshold, deny caching compress page.
+ */
+ res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
+ (COMPRESS_MAPPING(sbi)->nrpages <
+ free_ram * sbi->compress_percent / 100);
+#else
+ res = false;
+#endif
} else {
if (!sbi->sb->s_bdi->wb.dirty_exceeded)
return true;
@@ -1535,13 +1549,10 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
trace_f2fs_writepage(page, NODE);
if (unlikely(f2fs_cp_error(sbi))) {
- if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
- ClearPageUptodate(page);
- dec_page_count(sbi, F2FS_DIRTY_NODES);
- unlock_page(page);
- return 0;
- }
- goto redirty_out;
+ ClearPageUptodate(page);
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ unlock_page(page);
+ return 0;
}
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
@@ -1860,8 +1871,8 @@ continue_unlock:
}
/* flush inline_data, if it's async context. */
- if (is_inline_node(page)) {
- clear_inline_node(page);
+ if (page_private_inline(page)) {
+ clear_page_private_inline(page);
unlock_page(page);
flush_inline_data(sbi, ino_of_node(page));
continue;
@@ -1941,8 +1952,8 @@ continue_unlock:
goto write_node;
/* flush inline_data */
- if (is_inline_node(page)) {
- clear_inline_node(page);
+ if (page_private_inline(page)) {
+ clear_page_private_inline(page);
unlock_page(page);
flush_inline_data(sbi, ino_of_node(page));
goto lock_node;
@@ -2096,7 +2107,7 @@ static int f2fs_set_node_page_dirty(struct page *page)
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
- f2fs_set_page_private(page, 0);
+ set_page_private_reference(page);
return 1;
}
return 0;
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 7a45c0f10629..ff14a6e5ac1c 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -38,6 +38,9 @@
/* return value for read_node_page */
#define LOCKED_PAGE 1
+/* check pinned file's alignment status of physical blocks */
+#define FILE_NOT_ALIGNED 1
+
/* For flag in struct node_info */
enum {
IS_CHECKPOINTED, /* is it checkpointed before? */
@@ -148,6 +151,7 @@ enum mem_type {
EXTENT_CACHE, /* indicates extent cache */
INMEM_PAGES, /* indicates inmemory pages */
DISCARD_CACHE, /* indicates memory of cached discard cmds */
+ COMPRESS_PAGE, /* indicates memory of cached compressed pages */
BASE_CHECK, /* check kernel status */
};
@@ -389,20 +393,6 @@ static inline nid_t get_nid(struct page *p, int off, bool i)
* - Mark cold node blocks in their node footer
* - Mark cold data pages in page cache
*/
-static inline int is_cold_data(struct page *page)
-{
- return PageChecked(page);
-}
-
-static inline void set_cold_data(struct page *page)
-{
- SetPageChecked(page);
-}
-
-static inline void clear_cold_data(struct page *page)
-{
- ClearPageChecked(page);
-}
static inline int is_node(struct page *page, int type)
{
@@ -414,21 +404,6 @@ static inline int is_node(struct page *page, int type)
#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
-static inline int is_inline_node(struct page *page)
-{
- return PageChecked(page);
-}
-
-static inline void set_inline_node(struct page *page)
-{
- SetPageChecked(page);
-}
-
-static inline void clear_inline_node(struct page *page)
-{
- ClearPageChecked(page);
-}
-
static inline void set_cold_node(struct page *page, bool is_dir)
{
struct f2fs_node *rn = F2FS_NODE(page);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 422146c6d866..695eacfe776c 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -45,6 +45,10 @@
static struct kmem_cache *fsync_entry_slab;
+#ifdef CONFIG_UNICODE
+extern struct kmem_cache *f2fs_cf_name_slab;
+#endif
+
bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
{
s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
@@ -145,7 +149,7 @@ static int init_recovered_filename(const struct inode *dir,
f2fs_hash_filename(dir, fname);
#ifdef CONFIG_UNICODE
/* Case-sensitive match is fine for recovery */
- kfree(fname->cf_name.name);
+ kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
fname->cf_name.name = NULL;
#endif
} else {
@@ -788,13 +792,6 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
#endif
- fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
- sizeof(struct fsync_inode_entry));
- if (!fsync_entry_slab) {
- err = -ENOMEM;
- goto out;
- }
-
INIT_LIST_HEAD(&inode_list);
INIT_LIST_HEAD(&tmp_inode_list);
INIT_LIST_HEAD(&dir_list);
@@ -867,8 +864,6 @@ skip:
}
}
- kmem_cache_destroy(fsync_entry_slab);
-out:
#ifdef CONFIG_QUOTA
/* Turn quotas off */
if (quota_enabled)
@@ -878,3 +873,17 @@ out:
return ret ? ret : err;
}
+
+int __init f2fs_create_recovery_cache(void)
+{
+ fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
+ sizeof(struct fsync_inode_entry));
+ if (!fsync_entry_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+void f2fs_destroy_recovery_cache(void)
+{
+ kmem_cache_destroy(fsync_entry_slab);
+}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 51dc79fad4fe..15cc89eef28d 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -186,10 +186,7 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
{
struct inmem_pages *new;
- if (PagePrivate(page))
- set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
- else
- f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
+ set_page_private_atomic(page);
new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
@@ -272,9 +269,10 @@ next:
/* we don't need to invalidate this in the sccessful status */
if (drop || recover) {
ClearPageUptodate(page);
- clear_cold_data(page);
+ clear_page_private_gcing(page);
}
- f2fs_clear_page_private(page);
+ detach_page_private(page);
+ set_page_private(page, 0);
f2fs_put_page(page, 1);
list_del(&cur->list);
@@ -357,7 +355,7 @@ void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
struct list_head *head = &fi->inmem_pages;
struct inmem_pages *cur = NULL;
- f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
+ f2fs_bug_on(sbi, !page_private_atomic(page));
mutex_lock(&fi->inmem_lock);
list_for_each_entry(cur, head, list) {
@@ -373,9 +371,12 @@ void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
kmem_cache_free(inmem_entry_slab, cur);
ClearPageUptodate(page);
- f2fs_clear_page_private(page);
+ clear_page_private_atomic(page);
f2fs_put_page(page, 0);
+ detach_page_private(page);
+ set_page_private(page, 0);
+
trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
}
@@ -2321,6 +2322,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
return;
invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
+ f2fs_invalidate_compress_page(sbi, addr);
/* add it into sit main buffer */
down_write(&sit_i->sentry_lock);
@@ -3289,7 +3291,10 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
if (fio->type == DATA) {
struct inode *inode = fio->page->mapping->host;
- if (is_cold_data(fio->page)) {
+ if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
+ return CURSEG_COLD_DATA_PINNED;
+
+ if (page_private_gcing(fio->page)) {
if (fio->sbi->am.atgc_enabled &&
(fio->io_type == FS_DATA_IO) &&
(fio->sbi->gc_mode != GC_URGENT_HIGH))
@@ -3468,9 +3473,11 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
reallocate:
f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
&fio->new_blkaddr, sum, type, fio);
- if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
+ if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
invalidate_mapping_pages(META_MAPPING(fio->sbi),
fio->old_blkaddr, fio->old_blkaddr);
+ f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
+ }
/* writeout dirty page into bdev */
f2fs_submit_page_write(fio);
@@ -3660,6 +3667,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
invalidate_mapping_pages(META_MAPPING(sbi),
old_blkaddr, old_blkaddr);
+ f2fs_invalidate_compress_page(sbi, old_blkaddr);
if (!from_gc)
update_segment_mtime(sbi, old_blkaddr, 0);
update_sit_entry(sbi, old_blkaddr, -1);
@@ -3919,7 +3927,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
/* sanity check for summary blocks */
if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
- f2fs_err(sbi, "invalid journal entries nats %u sits %u\n",
+ f2fs_err(sbi, "invalid journal entries nats %u sits %u",
nats_in_cursum(nat_j), sits_in_cursum(sit_j));
return -EINVAL;
}
@@ -4682,6 +4690,10 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi)
struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
unsigned int blkofs = curseg->next_blkoff;
+ if (f2fs_sb_has_readonly(sbi) &&
+ i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
+ continue;
+
sanity_check_seg_type(sbi, curseg->seg_type);
if (f2fs_test_bit(blkofs, se->cur_valid_map))
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 7d325bfaf65a..8fecd3050ccd 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -148,8 +148,10 @@ enum {
Opt_compress_algorithm,
Opt_compress_log_size,
Opt_compress_extension,
+ Opt_nocompress_extension,
Opt_compress_chksum,
Opt_compress_mode,
+ Opt_compress_cache,
Opt_atgc,
Opt_gc_merge,
Opt_nogc_merge,
@@ -222,8 +224,10 @@ static match_table_t f2fs_tokens = {
{Opt_compress_algorithm, "compress_algorithm=%s"},
{Opt_compress_log_size, "compress_log_size=%u"},
{Opt_compress_extension, "compress_extension=%s"},
+ {Opt_nocompress_extension, "nocompress_extension=%s"},
{Opt_compress_chksum, "compress_chksum"},
{Opt_compress_mode, "compress_mode=%s"},
+ {Opt_compress_cache, "compress_cache"},
{Opt_atgc, "atgc"},
{Opt_gc_merge, "gc_merge"},
{Opt_nogc_merge, "nogc_merge"},
@@ -275,6 +279,24 @@ static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
return 0;
}
+
+struct kmem_cache *f2fs_cf_name_slab;
+static int __init f2fs_create_casefold_cache(void)
+{
+ f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
+ F2FS_NAME_LEN);
+ if (!f2fs_cf_name_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+static void f2fs_destroy_casefold_cache(void)
+{
+ kmem_cache_destroy(f2fs_cf_name_slab);
+}
+#else
+static int __init f2fs_create_casefold_cache(void) { return 0; }
+static void f2fs_destroy_casefold_cache(void) { }
#endif
static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
@@ -473,6 +495,43 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
+/*
+ * 1. The same extension name cannot not appear in both compress and non-compress extension
+ * at the same time.
+ * 2. If the compress extension specifies all files, the types specified by the non-compress
+ * extension will be treated as special cases and will not be compressed.
+ * 3. Don't allow the non-compress extension specifies all files.
+ */
+static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
+{
+ unsigned char (*ext)[F2FS_EXTENSION_LEN];
+ unsigned char (*noext)[F2FS_EXTENSION_LEN];
+ int ext_cnt, noext_cnt, index = 0, no_index = 0;
+
+ ext = F2FS_OPTION(sbi).extensions;
+ ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+ noext = F2FS_OPTION(sbi).noextensions;
+ noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+
+ if (!noext_cnt)
+ return 0;
+
+ for (no_index = 0; no_index < noext_cnt; no_index++) {
+ if (!strcasecmp("*", noext[no_index])) {
+ f2fs_info(sbi, "Don't allow the nocompress extension specifies all files");
+ return -EINVAL;
+ }
+ for (index = 0; index < ext_cnt; index++) {
+ if (!strcasecmp(ext[index], noext[no_index])) {
+ f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension",
+ ext[index]);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
#ifdef CONFIG_F2FS_FS_LZ4
static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
{
@@ -546,7 +605,8 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
substring_t args[MAX_OPT_ARGS];
#ifdef CONFIG_F2FS_FS_COMPRESSION
unsigned char (*ext)[F2FS_EXTENSION_LEN];
- int ext_cnt;
+ unsigned char (*noext)[F2FS_EXTENSION_LEN];
+ int ext_cnt, noext_cnt;
#endif
char *p, *name;
int arg = 0;
@@ -555,7 +615,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
int ret;
if (!options)
- return 0;
+ goto default_check;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -1049,6 +1109,30 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
F2FS_OPTION(sbi).compress_ext_cnt++;
kfree(name);
break;
+ case Opt_nocompress_extension:
+ if (!f2fs_sb_has_compression(sbi)) {
+ f2fs_info(sbi, "Image doesn't support compression");
+ break;
+ }
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+
+ noext = F2FS_OPTION(sbi).noextensions;
+ noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+
+ if (strlen(name) >= F2FS_EXTENSION_LEN ||
+ noext_cnt >= COMPRESS_EXT_NUM) {
+ f2fs_err(sbi,
+ "invalid extension length/number");
+ kfree(name);
+ return -EINVAL;
+ }
+
+ strcpy(noext[noext_cnt], name);
+ F2FS_OPTION(sbi).nocompress_ext_cnt++;
+ kfree(name);
+ break;
case Opt_compress_chksum:
F2FS_OPTION(sbi).compress_chksum = true;
break;
@@ -1066,12 +1150,17 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
}
kfree(name);
break;
+ case Opt_compress_cache:
+ set_opt(sbi, COMPRESS_CACHE);
+ break;
#else
case Opt_compress_algorithm:
case Opt_compress_log_size:
case Opt_compress_extension:
+ case Opt_nocompress_extension:
case Opt_compress_chksum:
case Opt_compress_mode:
+ case Opt_compress_cache:
f2fs_info(sbi, "compression options not supported");
break;
#endif
@@ -1090,6 +1179,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
return -EINVAL;
}
}
+default_check:
#ifdef CONFIG_QUOTA
if (f2fs_check_quota_options(sbi))
return -EINVAL;
@@ -1122,6 +1212,13 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
}
#endif
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_test_compress_extension(sbi)) {
+ f2fs_err(sbi, "invalid compress or nocompress extension");
+ return -EINVAL;
+ }
+#endif
+
if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
F2FS_IO_SIZE_KB(sbi));
@@ -1153,7 +1250,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
}
if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
- f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n");
+ f2fs_err(sbi, "LFS not compatible with checkpoint=disable");
return -EINVAL;
}
@@ -1162,6 +1259,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
*/
if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
+
+ if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
+ f2fs_err(sbi, "Allow to mount readonly mode only");
+ return -EROFS;
+ }
return 0;
}
@@ -1403,6 +1505,8 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_bug_on(sbi, sbi->fsync_node_num);
+ f2fs_destroy_compress_inode(sbi);
+
iput(sbi->node_inode);
sbi->node_inode = NULL;
@@ -1665,6 +1769,11 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
F2FS_OPTION(sbi).extensions[i]);
}
+ for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
+ seq_printf(seq, ",nocompress_extension=%s",
+ F2FS_OPTION(sbi).noextensions[i]);
+ }
+
if (F2FS_OPTION(sbi).compress_chksum)
seq_puts(seq, ",compress_chksum");
@@ -1672,6 +1781,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
seq_printf(seq, ",compress_mode=%s", "fs");
else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
seq_printf(seq, ",compress_mode=%s", "user");
+
+ if (test_opt(sbi, COMPRESS_CACHE))
+ seq_puts(seq, ",compress_cache");
}
#endif
@@ -1819,7 +1931,11 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
static void default_options(struct f2fs_sb_info *sbi)
{
/* init some FS parameters */
- F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
+ if (f2fs_sb_has_readonly(sbi))
+ F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
+ else
+ F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
+
F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
@@ -1949,6 +2065,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
bool no_io_align = !F2FS_IO_ALIGNED(sbi);
bool no_atgc = !test_opt(sbi, ATGC);
+ bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
bool checkpoint_changed;
#ifdef CONFIG_QUOTA
int i, j;
@@ -2004,6 +2121,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
goto skip;
+ if (f2fs_sb_has_readonly(sbi) && !(*flags & SB_RDONLY)) {
+ err = -EROFS;
+ goto restore_opts;
+ }
+
#ifdef CONFIG_QUOTA
if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
err = dquot_suspend(sb, -1);
@@ -2041,6 +2163,12 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
+ if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
+ err = -EINVAL;
+ f2fs_warn(sbi, "switch compress_cache option is not allowed");
+ goto restore_opts;
+ }
+
if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
err = -EINVAL;
f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
@@ -3137,14 +3265,15 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
- if (unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
+ if (!f2fs_sb_has_readonly(sbi) &&
+ unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
ovp_segments == 0 || reserved_segments == 0)) {
f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
return 1;
}
-
user_block_count = le64_to_cpu(ckpt->user_block_count);
- segment_count_main = le32_to_cpu(raw_super->segment_count_main);
+ segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
+ (f2fs_sb_has_readonly(sbi) ? 1 : 0);
log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
if (!user_block_count || user_block_count >=
segment_count_main << log_blocks_per_seg) {
@@ -3175,6 +3304,10 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
return 1;
+
+ if (f2fs_sb_has_readonly(sbi))
+ goto check_data;
+
for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
le32_to_cpu(ckpt->cur_node_segno[j])) {
@@ -3185,10 +3318,15 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
}
}
}
+check_data:
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
return 1;
+
+ if (f2fs_sb_has_readonly(sbi))
+ goto skip_cross;
+
for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
le32_to_cpu(ckpt->cur_data_segno[j])) {
@@ -3210,7 +3348,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
}
}
}
-
+skip_cross:
sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
@@ -3555,7 +3693,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
#ifdef CONFIG_BLK_DEV_ZONED
if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
!f2fs_sb_has_blkzoned(sbi)) {
- f2fs_err(sbi, "Zoned block device feature not enabled\n");
+ f2fs_err(sbi, "Zoned block device feature not enabled");
return -EINVAL;
}
if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
@@ -3940,10 +4078,14 @@ try_onemore:
goto free_node_inode;
}
- err = f2fs_register_sysfs(sbi);
+ err = f2fs_init_compress_inode(sbi);
if (err)
goto free_root_inode;
+ err = f2fs_register_sysfs(sbi);
+ if (err)
+ goto free_compress_inode;
+
#ifdef CONFIG_QUOTA
/* Enable quota usage during mount */
if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
@@ -4084,6 +4226,8 @@ free_meta:
/* evict some inodes being cached by GC */
evict_inodes(sb);
f2fs_unregister_sysfs(sbi);
+free_compress_inode:
+ f2fs_destroy_compress_inode(sbi);
free_root_inode:
dput(sb->s_root);
sb->s_root = NULL;
@@ -4162,6 +4306,15 @@ static void kill_f2fs_super(struct super_block *sb)
f2fs_stop_gc_thread(sbi);
f2fs_stop_discard_thread(sbi);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ /*
+ * latter evict_inode() can bypass checking and invalidating
+ * compress inode cache.
+ */
+ if (test_opt(sbi, COMPRESS_CACHE))
+ truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
+#endif
+
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
struct cp_control cpc = {
@@ -4227,9 +4380,12 @@ static int __init init_f2fs_fs(void)
err = f2fs_create_checkpoint_caches();
if (err)
goto free_segment_manager_caches;
- err = f2fs_create_extent_cache();
+ err = f2fs_create_recovery_cache();
if (err)
goto free_checkpoint_caches;
+ err = f2fs_create_extent_cache();
+ if (err)
+ goto free_recovery_cache;
err = f2fs_create_garbage_collection_cache();
if (err)
goto free_extent_cache;
@@ -4258,7 +4414,12 @@ static int __init init_f2fs_fs(void)
err = f2fs_init_compress_cache();
if (err)
goto free_compress_mempool;
+ err = f2fs_create_casefold_cache();
+ if (err)
+ goto free_compress_cache;
return 0;
+free_compress_cache:
+ f2fs_destroy_compress_cache();
free_compress_mempool:
f2fs_destroy_compress_mempool();
free_bioset:
@@ -4278,6 +4439,8 @@ free_garbage_collection_cache:
f2fs_destroy_garbage_collection_cache();
free_extent_cache:
f2fs_destroy_extent_cache();
+free_recovery_cache:
+ f2fs_destroy_recovery_cache();
free_checkpoint_caches:
f2fs_destroy_checkpoint_caches();
free_segment_manager_caches:
@@ -4292,6 +4455,7 @@ fail:
static void __exit exit_f2fs_fs(void)
{
+ f2fs_destroy_casefold_cache();
f2fs_destroy_compress_cache();
f2fs_destroy_compress_mempool();
f2fs_destroy_bioset();
@@ -4303,6 +4467,7 @@ static void __exit exit_f2fs_fs(void)
f2fs_exit_sysfs();
f2fs_destroy_garbage_collection_cache();
f2fs_destroy_extent_cache();
+ f2fs_destroy_recovery_cache();
f2fs_destroy_checkpoint_caches();
f2fs_destroy_segment_manager_caches();
f2fs_destroy_node_manager_caches();
@@ -4315,4 +4480,5 @@ module_exit(exit_f2fs_fs)
MODULE_AUTHOR("Samsung Electronics's Praesto Team");
MODULE_DESCRIPTION("Flash Friendly File System");
MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: crc32");
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 39b522ec73e7..6642246206bd 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -37,6 +37,7 @@ enum {
#endif
RESERVED_BLOCKS, /* struct f2fs_sb_info */
CPRC_INFO, /* struct ckpt_req_control */
+ ATGC_INFO, /* struct atgc_management */
};
struct f2fs_attr {
@@ -75,6 +76,8 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
#endif
else if (struct_type == CPRC_INFO)
return (unsigned char *)&sbi->cprc_info;
+ else if (struct_type == ATGC_INFO)
+ return (unsigned char *)&sbi->am;
return NULL;
}
@@ -155,6 +158,9 @@ static ssize_t features_show(struct f2fs_attr *a,
if (f2fs_sb_has_casefold(sbi))
len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "casefold");
+ if (f2fs_sb_has_readonly(sbi))
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "readonly");
if (f2fs_sb_has_compression(sbi))
len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "compression");
@@ -495,6 +501,20 @@ out:
}
#endif
+ if (!strcmp(a->attr.name, "atgc_candidate_ratio")) {
+ if (t > 100)
+ return -EINVAL;
+ sbi->am.candidate_ratio = t;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "atgc_age_weight")) {
+ if (t > 100)
+ return -EINVAL;
+ sbi->am.age_weight = t;
+ return count;
+ }
+
*ui = (unsigned int)t;
return count;
@@ -546,46 +566,49 @@ static void f2fs_sb_release(struct kobject *kobj)
complete(&sbi->s_kobj_unregister);
}
-enum feat_id {
- FEAT_CRYPTO = 0,
- FEAT_BLKZONED,
- FEAT_ATOMIC_WRITE,
- FEAT_EXTRA_ATTR,
- FEAT_PROJECT_QUOTA,
- FEAT_INODE_CHECKSUM,
- FEAT_FLEXIBLE_INLINE_XATTR,
- FEAT_QUOTA_INO,
- FEAT_INODE_CRTIME,
- FEAT_LOST_FOUND,
- FEAT_VERITY,
- FEAT_SB_CHECKSUM,
- FEAT_CASEFOLD,
- FEAT_COMPRESSION,
- FEAT_TEST_DUMMY_ENCRYPTION_V2,
-};
-
+/*
+ * Note that there are three feature list entries:
+ * 1) /sys/fs/f2fs/features
+ * : shows runtime features supported by in-kernel f2fs along with Kconfig.
+ * - ref. F2FS_FEATURE_RO_ATTR()
+ *
+ * 2) /sys/fs/f2fs/$s_id/features <deprecated>
+ * : shows on-disk features enabled by mkfs.f2fs, used for old kernels. This
+ * won't add new feature anymore, and thus, users should check entries in 3)
+ * instead of this 2).
+ *
+ * 3) /sys/fs/f2fs/$s_id/feature_list
+ * : shows on-disk features enabled by mkfs.f2fs per instance, which follows
+ * sysfs entry rule where each entry should expose single value.
+ * This list covers old feature list provided by 2) and beyond. Therefore,
+ * please add new on-disk feature in this list only.
+ * - ref. F2FS_SB_FEATURE_RO_ATTR()
+ */
static ssize_t f2fs_feature_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- switch (a->id) {
- case FEAT_CRYPTO:
- case FEAT_BLKZONED:
- case FEAT_ATOMIC_WRITE:
- case FEAT_EXTRA_ATTR:
- case FEAT_PROJECT_QUOTA:
- case FEAT_INODE_CHECKSUM:
- case FEAT_FLEXIBLE_INLINE_XATTR:
- case FEAT_QUOTA_INO:
- case FEAT_INODE_CRTIME:
- case FEAT_LOST_FOUND:
- case FEAT_VERITY:
- case FEAT_SB_CHECKSUM:
- case FEAT_CASEFOLD:
- case FEAT_COMPRESSION:
- case FEAT_TEST_DUMMY_ENCRYPTION_V2:
+ return sprintf(buf, "supported\n");
+}
+
+#define F2FS_FEATURE_RO_ATTR(_name) \
+static struct f2fs_attr f2fs_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = 0444 }, \
+ .show = f2fs_feature_show, \
+}
+
+static ssize_t f2fs_sb_feature_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ if (F2FS_HAS_FEATURE(sbi, a->id))
return sprintf(buf, "supported\n");
- }
- return 0;
+ return sprintf(buf, "unsupported\n");
+}
+
+#define F2FS_SB_FEATURE_RO_ATTR(_name, _feat) \
+static struct f2fs_attr f2fs_attr_sb_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = 0444 }, \
+ .show = f2fs_sb_feature_show, \
+ .id = F2FS_FEATURE_##_feat, \
}
#define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
@@ -605,13 +628,6 @@ static struct f2fs_attr f2fs_attr_##_name = { \
#define F2FS_GENERAL_RO_ATTR(name) \
static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
-#define F2FS_FEATURE_RO_ATTR(_name, _id) \
-static struct f2fs_attr f2fs_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = 0444 }, \
- .show = f2fs_feature_show, \
- .id = _id, \
-}
-
#define F2FS_STAT_ATTR(_struct_type, _struct_name, _name, _elname) \
static struct f2fs_attr f2fs_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = 0444 }, \
@@ -685,31 +701,44 @@ F2FS_GENERAL_RO_ATTR(avg_vblocks);
#endif
#ifdef CONFIG_FS_ENCRYPTION
-F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
-F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2, FEAT_TEST_DUMMY_ENCRYPTION_V2);
+F2FS_FEATURE_RO_ATTR(encryption);
+F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2);
+#ifdef CONFIG_UNICODE
+F2FS_FEATURE_RO_ATTR(encrypted_casefold);
#endif
+#endif /* CONFIG_FS_ENCRYPTION */
#ifdef CONFIG_BLK_DEV_ZONED
-F2FS_FEATURE_RO_ATTR(block_zoned, FEAT_BLKZONED);
+F2FS_FEATURE_RO_ATTR(block_zoned);
#endif
-F2FS_FEATURE_RO_ATTR(atomic_write, FEAT_ATOMIC_WRITE);
-F2FS_FEATURE_RO_ATTR(extra_attr, FEAT_EXTRA_ATTR);
-F2FS_FEATURE_RO_ATTR(project_quota, FEAT_PROJECT_QUOTA);
-F2FS_FEATURE_RO_ATTR(inode_checksum, FEAT_INODE_CHECKSUM);
-F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR);
-F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO);
-F2FS_FEATURE_RO_ATTR(inode_crtime, FEAT_INODE_CRTIME);
-F2FS_FEATURE_RO_ATTR(lost_found, FEAT_LOST_FOUND);
+F2FS_FEATURE_RO_ATTR(atomic_write);
+F2FS_FEATURE_RO_ATTR(extra_attr);
+F2FS_FEATURE_RO_ATTR(project_quota);
+F2FS_FEATURE_RO_ATTR(inode_checksum);
+F2FS_FEATURE_RO_ATTR(flexible_inline_xattr);
+F2FS_FEATURE_RO_ATTR(quota_ino);
+F2FS_FEATURE_RO_ATTR(inode_crtime);
+F2FS_FEATURE_RO_ATTR(lost_found);
#ifdef CONFIG_FS_VERITY
-F2FS_FEATURE_RO_ATTR(verity, FEAT_VERITY);
+F2FS_FEATURE_RO_ATTR(verity);
#endif
-F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM);
-F2FS_FEATURE_RO_ATTR(casefold, FEAT_CASEFOLD);
+F2FS_FEATURE_RO_ATTR(sb_checksum);
+#ifdef CONFIG_UNICODE
+F2FS_FEATURE_RO_ATTR(casefold);
+#endif
+F2FS_FEATURE_RO_ATTR(readonly);
#ifdef CONFIG_F2FS_FS_COMPRESSION
-F2FS_FEATURE_RO_ATTR(compression, FEAT_COMPRESSION);
+F2FS_FEATURE_RO_ATTR(compression);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_written_block, compr_written_block);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_saved_block, compr_saved_block);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_new_inode, compr_new_inode);
#endif
+F2FS_FEATURE_RO_ATTR(pin_file);
+
+/* For ATGC */
+F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_candidate_ratio, candidate_ratio);
+F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_candidate_count, max_candidate_count);
+F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_age_weight, age_weight);
+F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_age_threshold, age_threshold);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -778,6 +807,11 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(compr_saved_block),
ATTR_LIST(compr_new_inode),
#endif
+ /* For ATGC */
+ ATTR_LIST(atgc_candidate_ratio),
+ ATTR_LIST(atgc_candidate_count),
+ ATTR_LIST(atgc_age_weight),
+ ATTR_LIST(atgc_age_threshold),
NULL,
};
ATTRIBUTE_GROUPS(f2fs);
@@ -786,7 +820,10 @@ static struct attribute *f2fs_feat_attrs[] = {
#ifdef CONFIG_FS_ENCRYPTION
ATTR_LIST(encryption),
ATTR_LIST(test_dummy_encryption_v2),
+#ifdef CONFIG_UNICODE
+ ATTR_LIST(encrypted_casefold),
#endif
+#endif /* CONFIG_FS_ENCRYPTION */
#ifdef CONFIG_BLK_DEV_ZONED
ATTR_LIST(block_zoned),
#endif
@@ -802,10 +839,14 @@ static struct attribute *f2fs_feat_attrs[] = {
ATTR_LIST(verity),
#endif
ATTR_LIST(sb_checksum),
+#ifdef CONFIG_UNICODE
ATTR_LIST(casefold),
+#endif
+ ATTR_LIST(readonly),
#ifdef CONFIG_F2FS_FS_COMPRESSION
ATTR_LIST(compression),
#endif
+ ATTR_LIST(pin_file),
NULL,
};
ATTRIBUTE_GROUPS(f2fs_feat);
@@ -817,6 +858,40 @@ static struct attribute *f2fs_stat_attrs[] = {
};
ATTRIBUTE_GROUPS(f2fs_stat);
+F2FS_SB_FEATURE_RO_ATTR(encryption, ENCRYPT);
+F2FS_SB_FEATURE_RO_ATTR(block_zoned, BLKZONED);
+F2FS_SB_FEATURE_RO_ATTR(extra_attr, EXTRA_ATTR);
+F2FS_SB_FEATURE_RO_ATTR(project_quota, PRJQUOTA);
+F2FS_SB_FEATURE_RO_ATTR(inode_checksum, INODE_CHKSUM);
+F2FS_SB_FEATURE_RO_ATTR(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
+F2FS_SB_FEATURE_RO_ATTR(quota_ino, QUOTA_INO);
+F2FS_SB_FEATURE_RO_ATTR(inode_crtime, INODE_CRTIME);
+F2FS_SB_FEATURE_RO_ATTR(lost_found, LOST_FOUND);
+F2FS_SB_FEATURE_RO_ATTR(verity, VERITY);
+F2FS_SB_FEATURE_RO_ATTR(sb_checksum, SB_CHKSUM);
+F2FS_SB_FEATURE_RO_ATTR(casefold, CASEFOLD);
+F2FS_SB_FEATURE_RO_ATTR(compression, COMPRESSION);
+F2FS_SB_FEATURE_RO_ATTR(readonly, RO);
+
+static struct attribute *f2fs_sb_feat_attrs[] = {
+ ATTR_LIST(sb_encryption),
+ ATTR_LIST(sb_block_zoned),
+ ATTR_LIST(sb_extra_attr),
+ ATTR_LIST(sb_project_quota),
+ ATTR_LIST(sb_inode_checksum),
+ ATTR_LIST(sb_flexible_inline_xattr),
+ ATTR_LIST(sb_quota_ino),
+ ATTR_LIST(sb_inode_crtime),
+ ATTR_LIST(sb_lost_found),
+ ATTR_LIST(sb_verity),
+ ATTR_LIST(sb_sb_checksum),
+ ATTR_LIST(sb_casefold),
+ ATTR_LIST(sb_compression),
+ ATTR_LIST(sb_readonly),
+ NULL,
+};
+ATTRIBUTE_GROUPS(f2fs_sb_feat);
+
static const struct sysfs_ops f2fs_attr_ops = {
.show = f2fs_attr_show,
.store = f2fs_attr_store,
@@ -883,6 +958,33 @@ static struct kobj_type f2fs_stat_ktype = {
.release = f2fs_stat_kobj_release,
};
+static ssize_t f2fs_sb_feat_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+ s_feature_list_kobj);
+ struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+ return a->show ? a->show(a, sbi, buf) : 0;
+}
+
+static void f2fs_feature_list_kobj_release(struct kobject *kobj)
+{
+ struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+ s_feature_list_kobj);
+ complete(&sbi->s_feature_list_kobj_unregister);
+}
+
+static const struct sysfs_ops f2fs_feature_list_attr_ops = {
+ .show = f2fs_sb_feat_attr_show,
+};
+
+static struct kobj_type f2fs_feature_list_ktype = {
+ .default_groups = f2fs_sb_feat_groups,
+ .sysfs_ops = &f2fs_feature_list_attr_ops,
+ .release = f2fs_feature_list_kobj_release,
+};
+
static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
void *offset)
{
@@ -1099,6 +1201,14 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
if (err)
goto put_stat_kobj;
+ sbi->s_feature_list_kobj.kset = &f2fs_kset;
+ init_completion(&sbi->s_feature_list_kobj_unregister);
+ err = kobject_init_and_add(&sbi->s_feature_list_kobj,
+ &f2fs_feature_list_ktype,
+ &sbi->s_kobj, "feature_list");
+ if (err)
+ goto put_feature_list_kobj;
+
if (f2fs_proc_root)
sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
@@ -1113,6 +1223,9 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
victim_bits_seq_show, sb);
}
return 0;
+put_feature_list_kobj:
+ kobject_put(&sbi->s_feature_list_kobj);
+ wait_for_completion(&sbi->s_feature_list_kobj_unregister);
put_stat_kobj:
kobject_put(&sbi->s_stat_kobj);
wait_for_completion(&sbi->s_stat_kobj_unregister);
@@ -1135,6 +1248,9 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
kobject_del(&sbi->s_stat_kobj);
kobject_put(&sbi->s_stat_kobj);
wait_for_completion(&sbi->s_stat_kobj_unregister);
+ kobject_del(&sbi->s_feature_list_kobj);
+ kobject_put(&sbi->s_feature_list_kobj);
+ wait_for_completion(&sbi->s_feature_list_kobj_unregister);
kobject_del(&sbi->s_kobj);
kobject_put(&sbi->s_kobj);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index dfc72f15be7f..f946bec8f1f1 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -369,8 +369,8 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
/* 32-bit arches must use fcntl64() */
case F_OFD_SETLK:
case F_OFD_SETLKW:
-#endif
fallthrough;
+#endif
case F_SETLK:
case F_SETLKW:
if (copy_from_user(&flock, argp, sizeof(flock)))
diff --git a/fs/fhandle.c b/fs/fhandle.c
index ec6feeccc276..6630c69c23a2 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -229,7 +229,7 @@ static long do_handle_open(int mountdirfd, struct file_handle __user *ufh,
path_put(&path);
return fd;
}
- file = file_open_root(path.dentry, path.mnt, "", open_flag, 0);
+ file = file_open_root(&path, "", open_flag, 0);
if (IS_ERR(file)) {
put_unused_fd(fd);
retval = PTR_ERR(file);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 06d04a74ab6c..4c3370548982 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -521,6 +521,9 @@ static bool inode_prepare_wbs_switch(struct inode *inode,
*/
smp_mb();
+ if (IS_DAX(inode))
+ return false;
+
/* while holding I_WB_SWITCH, no one else can update the association */
spin_lock(&inode->i_lock);
if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
diff --git a/fs/fs_context.c b/fs/fs_context.c
index 2834d1afa6e8..de1985eae535 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -80,6 +80,35 @@ static int vfs_parse_sb_flag(struct fs_context *fc, const char *key)
}
/**
+ * vfs_parse_fs_param_source - Handle setting "source" via parameter
+ * @fc: The filesystem context to modify
+ * @param: The parameter
+ *
+ * This is a simple helper for filesystems to verify that the "source" they
+ * accept is sane.
+ *
+ * Returns 0 on success, -ENOPARAM if this is not "source" parameter, and
+ * -EINVAL otherwise. In the event of failure, supplementary error information
+ * is logged.
+ */
+int vfs_parse_fs_param_source(struct fs_context *fc, struct fs_parameter *param)
+{
+ if (strcmp(param->key, "source") != 0)
+ return -ENOPARAM;
+
+ if (param->type != fs_value_is_string)
+ return invalf(fc, "Non-string source");
+
+ if (fc->source)
+ return invalf(fc, "Multiple sources");
+
+ fc->source = param->string;
+ param->string = NULL;
+ return 0;
+}
+EXPORT_SYMBOL(vfs_parse_fs_param_source);
+
+/**
* vfs_parse_fs_param - Add a single parameter to a superblock config
* @fc: The filesystem context to modify
* @param: The parameter
@@ -122,15 +151,9 @@ int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param)
/* If the filesystem doesn't take any arguments, give it the
* default handling of source.
*/
- if (strcmp(param->key, "source") == 0) {
- if (param->type != fs_value_is_string)
- return invalf(fc, "VFS: Non-string source");
- if (fc->source)
- return invalf(fc, "VFS: Multiple sources");
- fc->source = param->string;
- param->string = NULL;
- return 0;
- }
+ ret = vfs_parse_fs_param_source(fc, param);
+ if (ret != -ENOPARAM)
+ return ret;
return invalf(fc, "%s: Unknown parameter '%s'",
fc->fs_type->name, param->key);
@@ -504,16 +527,11 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
struct legacy_fs_context *ctx = fc->fs_private;
unsigned int size = ctx->data_size;
size_t len = 0;
+ int ret;
- if (strcmp(param->key, "source") == 0) {
- if (param->type != fs_value_is_string)
- return invalf(fc, "VFS: Legacy: Non-string source");
- if (fc->source)
- return invalf(fc, "VFS: Legacy: Multiple sources");
- fc->source = param->string;
- param->string = NULL;
- return 0;
- }
+ ret = vfs_parse_fs_param_source(fc, param);
+ if (ret != -ENOPARAM)
+ return ret;
if (ctx->param_type == LEGACY_FS_MONOLITHIC_PARAMS)
return invalf(fc, "VFS: Legacy: Can't mix monolithic and individual options");
diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
index fb733eb5aead..9d58371d22c2 100644
--- a/fs/fuse/dax.c
+++ b/fs/fuse/dax.c
@@ -213,7 +213,7 @@ static int fuse_setup_one_mapping(struct inode *inode, unsigned long start_idx,
dmap->writable = writable;
if (!upgrade) {
/*
- * We don't take a refernce on inode. inode is valid right now
+ * We don't take a reference on inode. inode is valid right now
* and when inode is going away, cleanup logic should first
* cleanup dmap entries.
*/
@@ -622,7 +622,7 @@ static int fuse_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
}
/*
- * If read beyond end of file happnes, fs code seems to return
+ * If read beyond end of file happens, fs code seems to return
* it as hole
*/
iomap_hole:
@@ -1207,7 +1207,7 @@ static void fuse_dax_free_mem_worker(struct work_struct *work)
ret);
}
- /* If number of free ranges are still below threhold, requeue */
+ /* If number of free ranges are still below threshold, requeue */
kick_dmap_free_worker(fcd, 1);
}
@@ -1235,8 +1235,6 @@ void fuse_dax_conn_free(struct fuse_conn *fc)
static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
{
long nr_pages, nr_ranges;
- void *kaddr;
- pfn_t pfn;
struct fuse_dax_mapping *range;
int ret, id;
size_t dax_size = -1;
@@ -1248,8 +1246,8 @@ static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
id = dax_read_lock();
- nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), &kaddr,
- &pfn);
+ nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), NULL,
+ NULL);
dax_read_unlock(id);
if (nr_pages < 0) {
pr_debug("dax_direct_access() returned %ld\n", nr_pages);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a5ceccc5ef00..1c8f79b3dd06 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -91,7 +91,7 @@ static void fuse_drop_waiting(struct fuse_conn *fc)
{
/*
* lockess check of fc->connected is okay, because atomic_dec_and_test()
- * provides a memory barrier mached with the one in fuse_wait_aborted()
+ * provides a memory barrier matched with the one in fuse_wait_aborted()
* to ensure no wake-up is missed.
*/
if (atomic_dec_and_test(&fc->num_waiting) &&
@@ -783,6 +783,7 @@ static int fuse_check_page(struct page *page)
1 << PG_uptodate |
1 << PG_lru |
1 << PG_active |
+ 1 << PG_workingset |
1 << PG_reclaim |
1 << PG_waiters))) {
dump_page(page, "fuse: trying to steal weird page");
@@ -1271,6 +1272,15 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
goto restart;
}
spin_lock(&fpq->lock);
+ /*
+ * Must not put request on fpq->io queue after having been shut down by
+ * fuse_abort_conn()
+ */
+ if (!fpq->connected) {
+ req->out.h.error = err = -ECONNABORTED;
+ goto out_end;
+
+ }
list_add(&req->list, &fpq->io);
spin_unlock(&fpq->lock);
cs->req = req;
@@ -1857,7 +1867,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
}
err = -EINVAL;
- if (oh.error <= -1000 || oh.error > 0)
+ if (oh.error <= -512 || oh.error > 0)
goto copy_finish;
spin_lock(&fpq->lock);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 1b6c001a7dd1..eade6f965b2e 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -252,7 +252,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
if (ret == -ENOMEM)
goto out;
if (ret || fuse_invalid_attr(&outarg.attr) ||
- inode_wrong_type(inode, outarg.attr.mode))
+ fuse_stale_inode(inode, outarg.generation, &outarg.attr))
goto invalid;
forget_all_cached_acls(inode);
@@ -309,68 +309,23 @@ static int fuse_dentry_delete(const struct dentry *dentry)
static struct vfsmount *fuse_dentry_automount(struct path *path)
{
struct fs_context *fsc;
- struct fuse_mount *parent_fm = get_fuse_mount_super(path->mnt->mnt_sb);
- struct fuse_conn *fc = parent_fm->fc;
- struct fuse_mount *fm;
struct vfsmount *mnt;
struct fuse_inode *mp_fi = get_fuse_inode(d_inode(path->dentry));
- struct super_block *sb;
- int err;
fsc = fs_context_for_submount(path->mnt->mnt_sb->s_type, path->dentry);
- if (IS_ERR(fsc)) {
- err = PTR_ERR(fsc);
- goto out;
- }
-
- err = -ENOMEM;
- fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
- if (!fm)
- goto out_put_fsc;
-
- fsc->s_fs_info = fm;
- sb = sget_fc(fsc, NULL, set_anon_super_fc);
- if (IS_ERR(sb)) {
- err = PTR_ERR(sb);
- kfree(fm);
- goto out_put_fsc;
- }
- fm->fc = fuse_conn_get(fc);
-
- /* Initialize superblock, making @mp_fi its root */
- err = fuse_fill_super_submount(sb, mp_fi);
- if (err)
- goto out_put_sb;
+ if (IS_ERR(fsc))
+ return ERR_CAST(fsc);
- sb->s_flags |= SB_ACTIVE;
- fsc->root = dget(sb->s_root);
- /* We are done configuring the superblock, so unlock it */
- up_write(&sb->s_umount);
-
- down_write(&fc->killsb);
- list_add_tail(&fm->fc_entry, &fc->mounts);
- up_write(&fc->killsb);
+ /* Pass the FUSE inode of the mount for fuse_get_tree_submount() */
+ fsc->fs_private = mp_fi;
/* Create the submount */
- mnt = vfs_create_mount(fsc);
- if (IS_ERR(mnt)) {
- err = PTR_ERR(mnt);
- goto out_put_fsc;
- }
- mntget(mnt);
- put_fs_context(fsc);
- return mnt;
+ mnt = fc_mount(fsc);
+ if (!IS_ERR(mnt))
+ mntget(mnt);
-out_put_sb:
- /*
- * Only jump here when fsc->root is NULL and sb is still locked
- * (otherwise put_fs_context() will put the superblock)
- */
- deactivate_locked_super(sb);
-out_put_fsc:
put_fs_context(fsc);
-out:
- return ERR_PTR(err);
+ return mnt;
}
const struct dentry_operations fuse_dentry_operations = {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 09ef2a4d25ed..97f860cfc195 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -645,7 +645,7 @@ static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
* == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
*
* An example:
- * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
+ * User requested DIO read of 64K. It was split into two 32K fuse requests,
* both submitted asynchronously. The first of them was ACKed by userspace as
* fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
* second request was ACKed as short, e.g. only 1K was read, resulting in
@@ -1171,14 +1171,12 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
- tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
+ tmp = copy_page_from_iter_atomic(page, offset, bytes, ii);
flush_dcache_page(page);
- iov_iter_advance(ii, tmp);
if (!tmp) {
unlock_page(page);
put_page(page);
- bytes = min(bytes, iov_iter_single_seg_count(ii));
goto again;
}
@@ -1405,7 +1403,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
nbytes += ret;
ret += start;
- npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
+ npages = DIV_ROUND_UP(ret, PAGE_SIZE);
ap->descs[ap->num_pages].offset = start;
fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
@@ -2907,11 +2905,13 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
};
int err;
bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
- (mode & FALLOC_FL_PUNCH_HOLE);
+ (mode & (FALLOC_FL_PUNCH_HOLE |
+ FALLOC_FL_ZERO_RANGE));
bool block_faults = FUSE_IS_DAX(inode) && lock_inode;
- if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
+ FALLOC_FL_ZERO_RANGE))
return -EOPNOTSUPP;
if (fm->fc->no_fallocate)
@@ -2926,7 +2926,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
goto out;
}
- if (mode & FALLOC_FL_PUNCH_HOLE) {
+ if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
loff_t endbyte = offset + length - 1;
err = fuse_writeback_range(inode, offset, endbyte);
@@ -2966,7 +2966,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
file_update_time(file);
}
- if (mode & FALLOC_FL_PUNCH_HOLE)
+ if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))
truncate_pagecache_range(inode, offset, offset + length - 1);
fuse_invalidate_attr(inode);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 7e463e220053..07829ce78695 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -761,6 +761,9 @@ struct fuse_conn {
/* Auto-mount submounts announced by the server */
unsigned int auto_submounts:1;
+ /* Propagate syncfs() to server */
+ unsigned int sync_fs:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
@@ -867,6 +870,13 @@ static inline u64 fuse_get_attr_version(struct fuse_conn *fc)
return atomic64_read(&fc->attr_version);
}
+static inline bool fuse_stale_inode(const struct inode *inode, int generation,
+ struct fuse_attr *attr)
+{
+ return inode->i_generation != generation ||
+ inode_wrong_type(inode, attr->mode);
+}
+
static inline void fuse_make_bad(struct inode *inode)
{
remove_inode_hash(inode);
@@ -1082,15 +1092,6 @@ void fuse_send_init(struct fuse_mount *fm);
int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx);
/*
- * Fill in superblock for submounts
- * @sb: partially-initialized superblock to fill in
- * @parent_fi: The fuse_inode of the parent filesystem where this submount is
- * mounted
- */
-int fuse_fill_super_submount(struct super_block *sb,
- struct fuse_inode *parent_fi);
-
-/*
* Remove the mount from the connection
*
* Returns whether this was the last mount
@@ -1098,6 +1099,11 @@ int fuse_fill_super_submount(struct super_block *sb,
bool fuse_mount_remove(struct fuse_mount *fm);
/*
+ * Setup context ops for submounts
+ */
+int fuse_init_fs_context_submount(struct fs_context *fsc);
+
+/*
* Shut down the connection (possibly sending DESTROY request).
*/
void fuse_conn_destroy(struct fuse_mount *fm);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 393e36b74dc4..b9beb39a4a18 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -350,8 +350,8 @@ retry:
inode->i_generation = generation;
fuse_init_inode(inode, attr);
unlock_new_inode(inode);
- } else if (inode_wrong_type(inode, attr->mode)) {
- /* Inode has changed type, any I/O on the old should fail */
+ } else if (fuse_stale_inode(inode, generation, attr)) {
+ /* nodeid was reused, any I/O on the old inode should fail */
fuse_make_bad(inode);
iput(inode);
goto retry;
@@ -506,6 +506,45 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
return err;
}
+static int fuse_sync_fs(struct super_block *sb, int wait)
+{
+ struct fuse_mount *fm = get_fuse_mount_super(sb);
+ struct fuse_conn *fc = fm->fc;
+ struct fuse_syncfs_in inarg;
+ FUSE_ARGS(args);
+ int err;
+
+ /*
+ * Userspace cannot handle the wait == 0 case. Avoid a
+ * gratuitous roundtrip.
+ */
+ if (!wait)
+ return 0;
+
+ /* The filesystem is being unmounted. Nothing to do. */
+ if (!sb->s_root)
+ return 0;
+
+ if (!fc->sync_fs)
+ return 0;
+
+ memset(&inarg, 0, sizeof(inarg));
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.opcode = FUSE_SYNCFS;
+ args.nodeid = get_node_id(sb->s_root->d_inode);
+ args.out_numargs = 0;
+
+ err = fuse_simple_request(fm, &args);
+ if (err == -ENOSYS) {
+ fc->sync_fs = 0;
+ err = 0;
+ }
+
+ return err;
+}
+
enum {
OPT_SOURCE,
OPT_SUBTYPE,
@@ -909,6 +948,7 @@ static const struct super_operations fuse_super_operations = {
.put_super = fuse_put_super,
.umount_begin = fuse_umount_begin,
.statfs = fuse_statfs,
+ .sync_fs = fuse_sync_fs,
.show_options = fuse_show_options,
};
@@ -1275,8 +1315,8 @@ static void fuse_sb_defaults(struct super_block *sb)
sb->s_xattr = fuse_no_acl_xattr_handlers;
}
-int fuse_fill_super_submount(struct super_block *sb,
- struct fuse_inode *parent_fi)
+static int fuse_fill_super_submount(struct super_block *sb,
+ struct fuse_inode *parent_fi)
{
struct fuse_mount *fm = get_fuse_mount_super(sb);
struct super_block *parent_sb = parent_fi->inode.i_sb;
@@ -1313,6 +1353,58 @@ int fuse_fill_super_submount(struct super_block *sb,
return 0;
}
+/* Filesystem context private data holds the FUSE inode of the mount point */
+static int fuse_get_tree_submount(struct fs_context *fsc)
+{
+ struct fuse_mount *fm;
+ struct fuse_inode *mp_fi = fsc->fs_private;
+ struct fuse_conn *fc = get_fuse_conn(&mp_fi->inode);
+ struct super_block *sb;
+ int err;
+
+ fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
+ if (!fm)
+ return -ENOMEM;
+
+ fsc->s_fs_info = fm;
+ sb = sget_fc(fsc, NULL, set_anon_super_fc);
+ if (IS_ERR(sb)) {
+ kfree(fm);
+ return PTR_ERR(sb);
+ }
+ fm->fc = fuse_conn_get(fc);
+
+ /* Initialize superblock, making @mp_fi its root */
+ err = fuse_fill_super_submount(sb, mp_fi);
+ if (err) {
+ fuse_conn_put(fc);
+ kfree(fm);
+ sb->s_fs_info = NULL;
+ deactivate_locked_super(sb);
+ return err;
+ }
+
+ down_write(&fc->killsb);
+ list_add_tail(&fm->fc_entry, &fc->mounts);
+ up_write(&fc->killsb);
+
+ sb->s_flags |= SB_ACTIVE;
+ fsc->root = dget(sb->s_root);
+
+ return 0;
+}
+
+static const struct fs_context_operations fuse_context_submount_ops = {
+ .get_tree = fuse_get_tree_submount,
+};
+
+int fuse_init_fs_context_submount(struct fs_context *fsc)
+{
+ fsc->ops = &fuse_context_submount_ops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fuse_init_fs_context_submount);
+
int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
{
struct fuse_dev *fud = NULL;
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index 277f7041d55a..bc267832310c 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -200,9 +200,12 @@ retry:
if (!d_in_lookup(dentry)) {
struct fuse_inode *fi;
inode = d_inode(dentry);
+ if (inode && get_node_id(inode) != o->nodeid)
+ inode = NULL;
if (!inode ||
- get_node_id(inode) != o->nodeid ||
- inode_wrong_type(inode, o->attr.mode)) {
+ fuse_stale_inode(inode, o->generation, &o->attr)) {
+ if (inode)
+ fuse_make_bad(inode);
d_invalidate(dentry);
dput(dentry);
goto retry;
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index bcb8a02e2d8b..8f52cdaa8445 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -1447,6 +1447,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
fc->release = fuse_free_conn;
fc->delete_stale = true;
fc->auto_submounts = true;
+ fc->sync_fs = true;
/* Tell FUSE to split requests that exceed the virtqueue's size */
fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
@@ -1496,6 +1497,9 @@ static int virtio_fs_init_fs_context(struct fs_context *fsc)
{
struct fuse_fs_context *ctx;
+ if (fsc->purpose == FS_CONTEXT_FOR_SUBMOUNT)
+ return fuse_init_fs_context_submount(fsc);
+
ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
index 4af318fbda77..ef9498a6e88a 100644
--- a/fs/hfs/bfind.c
+++ b/fs/hfs/bfind.c
@@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
fd->key = ptr + tree->max_key_len + 2;
hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
tree->cnid, __builtin_return_address(0));
- mutex_lock(&tree->tree_lock);
+ switch (tree->cnid) {
+ case HFS_CAT_CNID:
+ mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
+ break;
+ case HFS_EXT_CNID:
+ mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
+ break;
+ case HFS_ATTR_CNID:
+ mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index b63a4df7327b..c0a73a6ffb28 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -15,16 +15,31 @@
#include "btree.h"
-void hfs_bnode_read(struct hfs_bnode *node, void *buf,
- int off, int len)
+void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
{
struct page *page;
+ int pagenum;
+ int bytes_read;
+ int bytes_to_read;
+ void *vaddr;
off += node->page_offset;
- page = node->page[0];
+ pagenum = off >> PAGE_SHIFT;
+ off &= ~PAGE_MASK; /* compute page offset for the first page */
- memcpy(buf, kmap(page) + off, len);
- kunmap(page);
+ for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
+ if (pagenum >= node->tree->pages_per_bnode)
+ break;
+ page = node->page[pagenum];
+ bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
+
+ vaddr = kmap_atomic(page);
+ memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
+ kunmap_atomic(vaddr);
+
+ pagenum++;
+ off = 0; /* page offset only applies to the first page */
+ }
}
u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
index 4ba45caf5939..0e6baee93245 100644
--- a/fs/hfs/btree.h
+++ b/fs/hfs/btree.h
@@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
#define NODE_HASH_SIZE 256
+/* B-tree mutex nested subclasses */
+enum hfs_btree_mutex_classes {
+ CATALOG_BTREE_MUTEX,
+ EXTENTS_BTREE_MUTEX,
+ ATTR_BTREE_MUTEX,
+};
+
/* A HFS BTree held in memory */
struct hfs_btree {
struct super_block *sb;
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 44d07c9e3a7f..12d9bae39363 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
if (!res) {
if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
res = -EIO;
- goto bail;
+ goto bail_hfs_find;
}
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
}
- if (res) {
- hfs_find_exit(&fd);
- goto bail_no_root;
- }
+ if (res)
+ goto bail_hfs_find;
res = -EINVAL;
root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
hfs_find_exit(&fd);
@@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
/* everything's okay */
return 0;
+bail_hfs_find:
+ hfs_find_exit(&fd);
bail_no_root:
pr_err("get root inode failed\n");
bail:
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 70e8374ddac4..6fef67c2a9f0 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -281,6 +281,11 @@ int hfsplus_getattr(struct user_namespace *mnt_userns, const struct path *path,
struct inode *inode = d_inode(path->dentry);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
+ if (request_mask & STATX_BTIME) {
+ stat->result_mask |= STATX_BTIME;
+ stat->btime = hfsp_mt2ut(hip->create_date);
+ }
+
if (inode->i_flags & S_APPEND)
stat->attributes |= STATX_ATTR_APPEND;
if (inode->i_flags & S_IMMUTABLE)
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index 4d169c5a2673..e2855ceefd39 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -204,7 +204,6 @@ check_attr_tree_state_again:
buf = kzalloc(node_size, GFP_NOFS);
if (!buf) {
- pr_err("failed to allocate memory for header node\n");
err = -ENOMEM;
goto end_attr_file_creation;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 926eeb9bf4eb..cdfb1ae78a3f 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -77,7 +77,7 @@ enum hugetlb_param {
static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
fsparam_u32 ("gid", Opt_gid),
fsparam_string("min_size", Opt_min_size),
- fsparam_u32 ("mode", Opt_mode),
+ fsparam_u32oct("mode", Opt_mode),
fsparam_string("nr_inodes", Opt_nr_inodes),
fsparam_string("pagesize", Opt_pagesize),
fsparam_string("size", Opt_size),
diff --git a/fs/internal.h b/fs/internal.h
index 6aeae7ef3380..82e8eb32ff3d 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -61,7 +61,6 @@ extern void __init chrdev_init(void);
*/
extern const struct fs_context_operations legacy_fs_context_ops;
extern int parse_monolithic_mount_data(struct fs_context *, void *);
-extern void fc_drop_locked(struct fs_context *);
extern void vfs_clean_context(struct fs_context *fc);
extern int finish_clean_context(struct fs_context *fc);
@@ -129,7 +128,7 @@ struct open_flags {
};
extern struct file *do_filp_open(int dfd, struct filename *pathname,
const struct open_flags *op);
-extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
+extern struct file *do_file_open_root(const struct path *,
const char *, const struct open_flags *);
extern struct open_how build_open_how(int flags, umode_t mode);
extern int build_open_flags(const struct open_how *how, struct open_flags *op);
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 843d4a7bcd6e..7d2ed8c7dd31 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -129,7 +129,8 @@ struct io_cb_cancel_data {
bool cancel_all;
};
-static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
+static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index, bool first);
+static void io_wqe_dec_running(struct io_worker *worker);
static bool io_worker_get(struct io_worker *worker)
{
@@ -168,26 +169,21 @@ static void io_worker_exit(struct io_worker *worker)
{
struct io_wqe *wqe = worker->wqe;
struct io_wqe_acct *acct = io_wqe_get_acct(worker);
- unsigned flags;
if (refcount_dec_and_test(&worker->ref))
complete(&worker->ref_done);
wait_for_completion(&worker->ref_done);
- preempt_disable();
- current->flags &= ~PF_IO_WORKER;
- flags = worker->flags;
- worker->flags = 0;
- if (flags & IO_WORKER_F_RUNNING)
- atomic_dec(&acct->nr_running);
- worker->flags = 0;
- preempt_enable();
-
raw_spin_lock_irq(&wqe->lock);
- if (flags & IO_WORKER_F_FREE)
+ if (worker->flags & IO_WORKER_F_FREE)
hlist_nulls_del_rcu(&worker->nulls_node);
list_del_rcu(&worker->all_list);
acct->nr_workers--;
+ preempt_disable();
+ io_wqe_dec_running(worker);
+ worker->flags = 0;
+ current->flags &= ~PF_IO_WORKER;
+ preempt_enable();
raw_spin_unlock_irq(&wqe->lock);
kfree_rcu(worker, rcu);
@@ -214,15 +210,19 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
struct hlist_nulls_node *n;
struct io_worker *worker;
- n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
- if (is_a_nulls(n))
- return false;
-
- worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
- if (io_worker_get(worker)) {
- wake_up_process(worker->task);
+ /*
+ * Iterate free_list and see if we can find an idle worker to
+ * activate. If a given worker is on the free_list but in the process
+ * of exiting, keep trying.
+ */
+ hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
+ if (!io_worker_get(worker))
+ continue;
+ if (wake_up_process(worker->task)) {
+ io_worker_release(worker);
+ return true;
+ }
io_worker_release(worker);
- return true;
}
return false;
@@ -247,10 +247,21 @@ static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
ret = io_wqe_activate_free_worker(wqe);
rcu_read_unlock();
- if (!ret && acct->nr_workers < acct->max_workers) {
- atomic_inc(&acct->nr_running);
- atomic_inc(&wqe->wq->worker_refs);
- create_io_worker(wqe->wq, wqe, acct->index);
+ if (!ret) {
+ bool do_create = false, first = false;
+
+ raw_spin_lock_irq(&wqe->lock);
+ if (acct->nr_workers < acct->max_workers) {
+ atomic_inc(&acct->nr_running);
+ atomic_inc(&wqe->wq->worker_refs);
+ if (!acct->nr_workers)
+ first = true;
+ acct->nr_workers++;
+ do_create = true;
+ }
+ raw_spin_unlock_irq(&wqe->lock);
+ if (do_create)
+ create_io_worker(wqe->wq, wqe, acct->index, first);
}
}
@@ -271,10 +282,28 @@ static void create_worker_cb(struct callback_head *cb)
{
struct create_worker_data *cwd;
struct io_wq *wq;
+ struct io_wqe *wqe;
+ struct io_wqe_acct *acct;
+ bool do_create = false, first = false;
cwd = container_of(cb, struct create_worker_data, work);
- wq = cwd->wqe->wq;
- create_io_worker(wq, cwd->wqe, cwd->index);
+ wqe = cwd->wqe;
+ wq = wqe->wq;
+ acct = &wqe->acct[cwd->index];
+ raw_spin_lock_irq(&wqe->lock);
+ if (acct->nr_workers < acct->max_workers) {
+ if (!acct->nr_workers)
+ first = true;
+ acct->nr_workers++;
+ do_create = true;
+ }
+ raw_spin_unlock_irq(&wqe->lock);
+ if (do_create) {
+ create_io_worker(wq, wqe, cwd->index, first);
+ } else {
+ atomic_dec(&acct->nr_running);
+ io_worker_ref_put(wq);
+ }
kfree(cwd);
}
@@ -612,7 +641,7 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
raw_spin_unlock_irq(&worker->wqe->lock);
}
-static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index, bool first)
{
struct io_wqe_acct *acct = &wqe->acct[index];
struct io_worker *worker;
@@ -635,6 +664,9 @@ static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
kfree(worker);
fail:
atomic_dec(&acct->nr_running);
+ raw_spin_lock_irq(&wqe->lock);
+ acct->nr_workers--;
+ raw_spin_unlock_irq(&wqe->lock);
io_worker_ref_put(wq);
return;
}
@@ -650,9 +682,8 @@ fail:
worker->flags |= IO_WORKER_F_FREE;
if (index == IO_WQ_ACCT_BOUND)
worker->flags |= IO_WORKER_F_BOUND;
- if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
+ if (first && (worker->flags & IO_WORKER_F_BOUND))
worker->flags |= IO_WORKER_F_FIXED;
- acct->nr_workers++;
raw_spin_unlock_irq(&wqe->lock);
wake_up_new_task(tsk);
}
@@ -731,7 +762,12 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
int work_flags;
unsigned long flags;
- if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) {
+ /*
+ * If io-wq is exiting for this task, or if the request has explicitly
+ * been marked as one that should not get executed, cancel it here.
+ */
+ if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
+ (work->flags & IO_WQ_WORK_CANCEL)) {
io_run_cancel(work, wqe);
return;
}
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e55b21fc0ab2..a2e20a6fbfed 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -78,6 +78,7 @@
#include <linux/task_work.h>
#include <linux/pagemap.h>
#include <linux/io_uring.h>
+#include <linux/tracehook.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@ -465,7 +466,8 @@ struct io_ring_ctx {
struct mm_struct *mm_account;
/* ctx exit and cancelation */
- struct callback_head *exit_task_work;
+ struct llist_head fallback_llist;
+ struct delayed_work fallback_work;
struct work_struct exit_work;
struct list_head tctx_list;
struct completion ref_comp;
@@ -784,9 +786,14 @@ struct async_poll {
struct io_poll_iocb *double_poll;
};
+typedef void (*io_req_tw_func_t)(struct io_kiocb *req);
+
struct io_task_work {
- struct io_wq_work_node node;
- task_work_func_t func;
+ union {
+ struct io_wq_work_node node;
+ struct llist_node fallback_node;
+ };
+ io_req_tw_func_t func;
};
enum {
@@ -849,10 +856,7 @@ struct io_kiocb {
/* used with ctx->iopoll_list with reads/writes */
struct list_head inflight_entry;
- union {
- struct io_task_work io_task_work;
- struct callback_head task_work;
- };
+ struct io_task_work io_task_work;
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
struct hlist_node hash_node;
struct async_poll *apoll;
@@ -1071,6 +1075,8 @@ static void io_submit_flush_completions(struct io_ring_ctx *ctx);
static bool io_poll_remove_waitqs(struct io_kiocb *req);
static int io_req_prep_async(struct io_kiocb *req);
+static void io_fallback_req_func(struct work_struct *unused);
+
static struct kmem_cache *req_cachep;
static const struct file_operations io_uring_fops;
@@ -1202,6 +1208,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->tctx_list);
INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
INIT_LIST_HEAD(&ctx->locked_free_list);
+ INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
return ctx;
err:
kfree(ctx->dummy_ubuf);
@@ -1273,8 +1280,17 @@ static void io_prep_async_link(struct io_kiocb *req)
{
struct io_kiocb *cur;
- io_for_each_link(cur, req)
- io_prep_async_work(cur);
+ if (req->flags & REQ_F_LINK_TIMEOUT) {
+ struct io_ring_ctx *ctx = req->ctx;
+
+ spin_lock_irq(&ctx->completion_lock);
+ io_for_each_link(cur, req)
+ io_prep_async_work(cur);
+ spin_unlock_irq(&ctx->completion_lock);
+ } else {
+ io_for_each_link(cur, req)
+ io_prep_async_work(cur);
+ }
}
static void io_queue_async_work(struct io_kiocb *req)
@@ -1288,6 +1304,17 @@ static void io_queue_async_work(struct io_kiocb *req)
/* init ->work of the whole link before punting */
io_prep_async_link(req);
+
+ /*
+ * Not expected to happen, but if we do have a bug where this _can_
+ * happen, catch it here and ensure the request is marked as
+ * canceled. That will make io-wq go through the usual work cancel
+ * procedure rather than attempt to run this request (or create a new
+ * worker for it).
+ */
+ if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
+ req->work.flags |= IO_WQ_WORK_CANCEL;
+
trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
&req->work, req->flags);
io_wq_enqueue(tctx->io_wq, &req->work);
@@ -1473,7 +1500,8 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
all_flushed = list_empty(&ctx->cq_overflow_list);
if (all_flushed) {
clear_bit(0, &ctx->check_cq_overflow);
- ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
+ WRITE_ONCE(ctx->rings->sq_flags,
+ ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
}
if (posted)
@@ -1552,7 +1580,9 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
}
if (list_empty(&ctx->cq_overflow_list)) {
set_bit(0, &ctx->check_cq_overflow);
- ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
+ WRITE_ONCE(ctx->rings->sq_flags,
+ ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
+
}
ocqe->cqe.user_data = user_data;
ocqe->cqe.res = res;
@@ -1929,13 +1959,17 @@ static void tctx_task_work(struct callback_head *cb)
ctx = req->ctx;
percpu_ref_get(&ctx->refs);
}
- req->task_work.func(&req->task_work);
+ req->io_task_work.func(req);
node = next;
}
if (wq_list_empty(&tctx->task_list)) {
+ spin_lock_irq(&tctx->task_lock);
clear_bit(0, &tctx->task_state);
- if (wq_list_empty(&tctx->task_list))
+ if (wq_list_empty(&tctx->task_list)) {
+ spin_unlock_irq(&tctx->task_lock);
break;
+ }
+ spin_unlock_irq(&tctx->task_lock);
/* another tctx_task_work() is enqueued, yield */
if (test_and_set_bit(0, &tctx->task_state))
break;
@@ -1946,17 +1980,13 @@ static void tctx_task_work(struct callback_head *cb)
ctx_flush_and_put(ctx);
}
-static int io_req_task_work_add(struct io_kiocb *req)
+static void io_req_task_work_add(struct io_kiocb *req)
{
struct task_struct *tsk = req->task;
struct io_uring_task *tctx = tsk->io_uring;
enum task_work_notify_mode notify;
- struct io_wq_work_node *node, *prev;
+ struct io_wq_work_node *node;
unsigned long flags;
- int ret = 0;
-
- if (unlikely(tsk->flags & PF_EXITING))
- return -ESRCH;
WARN_ON_ONCE(!tctx);
@@ -1967,7 +1997,9 @@ static int io_req_task_work_add(struct io_kiocb *req)
/* task_work already pending, we're done */
if (test_bit(0, &tctx->task_state) ||
test_and_set_bit(0, &tctx->task_state))
- return 0;
+ return;
+ if (unlikely(tsk->flags & PF_EXITING))
+ goto fail;
/*
* SQPOLL kernel thread doesn't need notification, just a wakeup. For
@@ -1976,72 +2008,28 @@ static int io_req_task_work_add(struct io_kiocb *req)
* will do the job.
*/
notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
-
if (!task_work_add(tsk, &tctx->task_work, notify)) {
wake_up_process(tsk);
- return 0;
+ return;
}
-
- /*
- * Slow path - we failed, find and delete work. if the work is not
- * in the list, it got run and we're fine.
- */
+fail:
+ clear_bit(0, &tctx->task_state);
spin_lock_irqsave(&tctx->task_lock, flags);
- wq_list_for_each(node, prev, &tctx->task_list) {
- if (&req->io_task_work.node == node) {
- wq_list_del(&tctx->task_list, node, prev);
- ret = 1;
- break;
- }
- }
+ node = tctx->task_list.first;
+ INIT_WQ_LIST(&tctx->task_list);
spin_unlock_irqrestore(&tctx->task_lock, flags);
- clear_bit(0, &tctx->task_state);
- return ret;
-}
-
-static bool io_run_task_work_head(struct callback_head **work_head)
-{
- struct callback_head *work, *next;
- bool executed = false;
-
- do {
- work = xchg(work_head, NULL);
- if (!work)
- break;
-
- do {
- next = work->next;
- work->func(work);
- work = next;
- cond_resched();
- } while (work);
- executed = true;
- } while (1);
- return executed;
-}
-
-static void io_task_work_add_head(struct callback_head **work_head,
- struct callback_head *task_work)
-{
- struct callback_head *head;
-
- do {
- head = READ_ONCE(*work_head);
- task_work->next = head;
- } while (cmpxchg(work_head, head, task_work) != head);
-}
-
-static void io_req_task_work_add_fallback(struct io_kiocb *req,
- task_work_func_t cb)
-{
- init_task_work(&req->task_work, cb);
- io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
+ while (node) {
+ req = container_of(node, struct io_kiocb, io_task_work.node);
+ node = node->next;
+ if (llist_add(&req->io_task_work.fallback_node,
+ &req->ctx->fallback_llist))
+ schedule_delayed_work(&req->ctx->fallback_work, 1);
+ }
}
-static void io_req_task_cancel(struct callback_head *cb)
+static void io_req_task_cancel(struct io_kiocb *req)
{
- struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
struct io_ring_ctx *ctx = req->ctx;
/* ctx is guaranteed to stay alive while we hold uring_lock */
@@ -2050,41 +2038,36 @@ static void io_req_task_cancel(struct callback_head *cb)
mutex_unlock(&ctx->uring_lock);
}
-static void __io_req_task_submit(struct io_kiocb *req)
+static void io_req_task_submit(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
/* ctx stays valid until unlock, even if we drop all ours ctx->refs */
mutex_lock(&ctx->uring_lock);
- if (!(current->flags & PF_EXITING) && !current->in_execve)
+ if (!(req->task->flags & PF_EXITING) && !req->task->in_execve)
__io_queue_sqe(req);
else
io_req_complete_failed(req, -EFAULT);
mutex_unlock(&ctx->uring_lock);
}
-static void io_req_task_submit(struct callback_head *cb)
-{
- struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-
- __io_req_task_submit(req);
-}
-
static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
{
req->result = ret;
- req->task_work.func = io_req_task_cancel;
-
- if (unlikely(io_req_task_work_add(req)))
- io_req_task_work_add_fallback(req, io_req_task_cancel);
+ req->io_task_work.func = io_req_task_cancel;
+ io_req_task_work_add(req);
}
static void io_req_task_queue(struct io_kiocb *req)
{
- req->task_work.func = io_req_task_submit;
+ req->io_task_work.func = io_req_task_submit;
+ io_req_task_work_add(req);
+}
- if (unlikely(io_req_task_work_add(req)))
- io_req_task_queue_fail(req, -ECANCELED);
+static void io_req_task_queue_reissue(struct io_kiocb *req)
+{
+ req->io_task_work.func = io_queue_async_work;
+ io_req_task_work_add(req);
}
static inline void io_queue_next(struct io_kiocb *req)
@@ -2195,18 +2178,10 @@ static inline void io_put_req(struct io_kiocb *req)
io_free_req(req);
}
-static void io_put_req_deferred_cb(struct callback_head *cb)
-{
- struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-
- io_free_req(req);
-}
-
static void io_free_req_deferred(struct io_kiocb *req)
{
- req->task_work.func = io_put_req_deferred_cb;
- if (unlikely(io_req_task_work_add(req)))
- io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
+ req->io_task_work.func = io_free_req;
+ io_req_task_work_add(req);
}
static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
@@ -2251,9 +2226,9 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
static inline bool io_run_task_work(void)
{
- if (current->task_works) {
+ if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
__set_current_state(TASK_RUNNING);
- task_work_run();
+ tracehook_notify_signal();
return true;
}
@@ -2264,7 +2239,7 @@ static inline bool io_run_task_work(void)
* Find and free completed poll iocbs
*/
static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
- struct list_head *done)
+ struct list_head *done, bool resubmit)
{
struct req_batch rb;
struct io_kiocb *req;
@@ -2279,11 +2254,11 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry);
- if (READ_ONCE(req->result) == -EAGAIN &&
+ if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
!(req->flags & REQ_F_DONT_REISSUE)) {
req->iopoll_completed = 0;
req_ref_get(req);
- io_queue_async_work(req);
+ io_req_task_queue_reissue(req);
continue;
}
@@ -2303,7 +2278,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
}
static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
- long min)
+ long min, bool resubmit)
{
struct io_kiocb *req, *tmp;
LIST_HEAD(done);
@@ -2346,7 +2321,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
}
if (!list_empty(&done))
- io_iopoll_complete(ctx, nr_events, &done);
+ io_iopoll_complete(ctx, nr_events, &done, resubmit);
return ret;
}
@@ -2364,7 +2339,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
while (!list_empty(&ctx->iopoll_list)) {
unsigned int nr_events = 0;
- io_do_iopoll(ctx, &nr_events, 0);
+ io_do_iopoll(ctx, &nr_events, 0, false);
/* let it sleep and repeat later if can't complete a request */
if (nr_events == 0)
@@ -2415,14 +2390,18 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
* very same mutex.
*/
if (list_empty(&ctx->iopoll_list)) {
+ u32 tail = ctx->cached_cq_tail;
+
mutex_unlock(&ctx->uring_lock);
io_run_task_work();
mutex_lock(&ctx->uring_lock);
- if (list_empty(&ctx->iopoll_list))
+ /* some requests don't go through iopoll_list */
+ if (tail != ctx->cached_cq_tail ||
+ list_empty(&ctx->iopoll_list))
break;
}
- ret = io_do_iopoll(ctx, &nr_events, min);
+ ret = io_do_iopoll(ctx, &nr_events, min, true);
} while (!ret && nr_events < min && !need_resched());
out:
mutex_unlock(&ctx->uring_lock);
@@ -2472,6 +2451,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
*/
if (percpu_ref_is_dying(&ctx->refs))
return false;
+ /*
+ * Play it safe and assume not safe to re-import and reissue if we're
+ * not in the original thread group (or in task context).
+ */
+ if (!same_thread_group(req->task, current) || !in_task())
+ return false;
return true;
}
#else
@@ -2485,6 +2470,19 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
}
#endif
+static void io_fallback_req_func(struct work_struct *work)
+{
+ struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
+ fallback_work.work);
+ struct llist_node *node = llist_del_all(&ctx->fallback_llist);
+ struct io_kiocb *req, *tmp;
+
+ percpu_ref_get(&ctx->refs);
+ llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
+ req->io_task_work.func(req);
+ percpu_ref_put(&ctx->refs);
+}
+
static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
unsigned int issue_flags)
{
@@ -2791,7 +2789,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
req->flags &= ~REQ_F_REISSUE;
if (io_resubmit_prep(req)) {
req_ref_get(req);
- io_queue_async_work(req);
+ io_req_task_queue_reissue(req);
} else {
int cflags = 0;
@@ -4846,14 +4844,13 @@ IO_NETOP_FN(recv);
struct io_poll_table {
struct poll_table_struct pt;
struct io_kiocb *req;
+ int nr_entries;
int error;
};
static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
- __poll_t mask, task_work_func_t func)
+ __poll_t mask, io_req_tw_func_t func)
{
- int ret;
-
/* for instances that support it check for an event match first: */
if (mask && !(mask & poll->events))
return 0;
@@ -4863,7 +4860,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
list_del_init(&poll->wait.entry);
req->result = mask;
- req->task_work.func = func;
+ req->io_task_work.func = func;
/*
* If this fails, then the task is exiting. When a task exits, the
@@ -4871,11 +4868,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
* of executing it. We can't safely execute it anyway, as we may not
* have the needed state needed for it anyway.
*/
- ret = io_req_task_work_add(req);
- if (unlikely(ret)) {
- WRITE_ONCE(poll->canceled, true);
- io_req_task_work_add_fallback(req, func);
- }
+ io_req_task_work_add(req);
return 1;
}
@@ -4884,6 +4877,9 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
{
struct io_ring_ctx *ctx = req->ctx;
+ if (unlikely(req->task->flags & PF_EXITING))
+ WRITE_ONCE(poll->canceled, true);
+
if (!req->result && !READ_ONCE(poll->canceled)) {
struct poll_table_struct pt = { ._key = poll->events };
@@ -4949,7 +4945,6 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
if (req->poll.events & EPOLLONESHOT)
flags = 0;
if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
- io_poll_remove_waitqs(req);
req->poll.done = true;
flags = 0;
}
@@ -4960,9 +4955,8 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
return !(flags & IORING_CQE_F_MORE);
}
-static void io_poll_task_func(struct callback_head *cb)
+static void io_poll_task_func(struct io_kiocb *req)
{
- struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *nxt;
@@ -4973,6 +4967,7 @@ static void io_poll_task_func(struct callback_head *cb)
done = io_poll_complete(req, req->result);
if (done) {
+ io_poll_remove_double(req);
hash_del(&req->hash_node);
} else {
req->result = 0;
@@ -4984,7 +4979,7 @@ static void io_poll_task_func(struct callback_head *cb)
if (done) {
nxt = io_put_req_find_next(req);
if (nxt)
- __io_req_task_submit(nxt);
+ io_req_task_submit(nxt);
}
}
}
@@ -5004,7 +4999,7 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
list_del_init(&wait->entry);
- if (poll && poll->head) {
+ if (poll->head) {
bool done;
spin_lock(&poll->head->lock);
@@ -5043,11 +5038,11 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
struct io_kiocb *req = pt->req;
/*
- * If poll->head is already set, it's because the file being polled
- * uses multiple waitqueues for poll handling (eg one for read, one
- * for write). Setup a separate io_poll_iocb if this happens.
+ * The file being polled uses multiple waitqueues for poll handling
+ * (e.g. one for read, one for write). Setup a separate io_poll_iocb
+ * if this happens.
*/
- if (unlikely(poll->head)) {
+ if (unlikely(pt->nr_entries)) {
struct io_poll_iocb *poll_one = poll;
/* already have a 2nd entry, fail a third attempt */
@@ -5075,7 +5070,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
*poll_ptr = poll;
}
- pt->error = 0;
+ pt->nr_entries++;
poll->head = head;
if (poll->events & EPOLLEXCLUSIVE)
@@ -5093,9 +5088,8 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
}
-static void io_async_task_func(struct callback_head *cb)
+static void io_async_task_func(struct io_kiocb *req)
{
- struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
struct async_poll *apoll = req->apoll;
struct io_ring_ctx *ctx = req->ctx;
@@ -5111,7 +5105,7 @@ static void io_async_task_func(struct callback_head *cb)
spin_unlock_irq(&ctx->completion_lock);
if (!READ_ONCE(apoll->poll.canceled))
- __io_req_task_submit(req);
+ io_req_task_submit(req);
else
io_req_complete_failed(req, -ECANCELED);
}
@@ -5153,11 +5147,16 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
ipt->pt._key = mask;
ipt->req = req;
- ipt->error = -EINVAL;
+ ipt->error = 0;
+ ipt->nr_entries = 0;
mask = vfs_poll(req->file, &ipt->pt) & poll->events;
+ if (unlikely(!ipt->nr_entries) && !ipt->error)
+ ipt->error = -EINVAL;
spin_lock_irq(&ctx->completion_lock);
+ if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
+ io_poll_remove_double(req);
if (likely(poll->head)) {
spin_lock(&poll->head->lock);
if (unlikely(list_empty(&poll->wait.entry))) {
@@ -5228,7 +5227,6 @@ static int io_arm_poll_handler(struct io_kiocb *req)
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
io_async_wake);
if (ret || ipt.error) {
- io_poll_remove_double(req);
spin_unlock_irq(&ctx->completion_lock);
if (ret)
return IO_APOLL_READY;
@@ -6068,10 +6066,12 @@ static bool io_drain_req(struct io_kiocb *req)
ret = io_req_prep_async(req);
if (ret)
- return ret;
+ goto fail;
io_prep_async_link(req);
de = kmalloc(sizeof(*de), GFP_KERNEL);
if (!de) {
+ ret = -ENOMEM;
+fail:
io_req_complete_failed(req, ret);
return true;
}
@@ -6809,14 +6809,16 @@ static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
{
/* Tell userspace we may need a wakeup call */
spin_lock_irq(&ctx->completion_lock);
- ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
+ WRITE_ONCE(ctx->rings->sq_flags,
+ ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
spin_unlock_irq(&ctx->completion_lock);
}
static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
{
spin_lock_irq(&ctx->completion_lock);
- ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+ WRITE_ONCE(ctx->rings->sq_flags,
+ ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
spin_unlock_irq(&ctx->completion_lock);
}
@@ -6839,7 +6841,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
mutex_lock(&ctx->uring_lock);
if (!list_empty(&ctx->iopoll_list))
- io_do_iopoll(ctx, &nr_events, 0);
+ io_do_iopoll(ctx, &nr_events, 0, true);
/*
* Don't submit if refs are dying, good for io_uring_register(),
@@ -7138,16 +7140,6 @@ static void **io_alloc_page_table(size_t size)
return table;
}
-static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
-{
- spin_lock_bh(&ctx->rsrc_ref_lock);
-}
-
-static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
-{
- spin_unlock_bh(&ctx->rsrc_ref_lock);
-}
-
static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
{
percpu_ref_exit(&ref_node->refs);
@@ -7164,9 +7156,9 @@ static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
rsrc_node->rsrc_data = data_to_kill;
- io_rsrc_ref_lock(ctx);
+ spin_lock_irq(&ctx->rsrc_ref_lock);
list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
- io_rsrc_ref_unlock(ctx);
+ spin_unlock_irq(&ctx->rsrc_ref_lock);
atomic_inc(&data_to_kill->refs);
percpu_ref_kill(&rsrc_node->refs);
@@ -7205,17 +7197,19 @@ static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ct
/* kill initial ref, already quiesced if zero */
if (atomic_dec_and_test(&data->refs))
break;
+ mutex_unlock(&ctx->uring_lock);
flush_delayed_work(&ctx->rsrc_put_work);
ret = wait_for_completion_interruptible(&data->done);
- if (!ret)
+ if (!ret) {
+ mutex_lock(&ctx->uring_lock);
break;
+ }
atomic_inc(&data->refs);
/* wait for all works potentially completing data->done */
flush_delayed_work(&ctx->rsrc_put_work);
reinit_completion(&data->done);
- mutex_unlock(&ctx->uring_lock);
ret = io_run_task_work_sig();
mutex_lock(&ctx->uring_lock);
} while (ret >= 0);
@@ -7674,9 +7668,10 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
{
struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
struct io_ring_ctx *ctx = node->rsrc_data->ctx;
+ unsigned long flags;
bool first_add = false;
- io_rsrc_ref_lock(ctx);
+ spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
node->done = true;
while (!list_empty(&ctx->rsrc_ref_list)) {
@@ -7688,7 +7683,7 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
list_del(&node->node);
first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
}
- io_rsrc_ref_unlock(ctx);
+ spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
if (first_add)
mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
@@ -7946,15 +7941,19 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
struct io_wq_data data;
unsigned int concurrency;
+ mutex_lock(&ctx->uring_lock);
hash = ctx->hash_map;
if (!hash) {
hash = kzalloc(sizeof(*hash), GFP_KERNEL);
- if (!hash)
+ if (!hash) {
+ mutex_unlock(&ctx->uring_lock);
return ERR_PTR(-ENOMEM);
+ }
refcount_set(&hash->refs, 1);
init_waitqueue_head(&hash->wait);
ctx->hash_map = hash;
}
+ mutex_unlock(&ctx->uring_lock);
data.hash = hash;
data.task = task;
@@ -8028,9 +8027,11 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
f = fdget(p->wq_fd);
if (!f.file)
return -ENXIO;
- fdput(f);
- if (f.file->f_op != &io_uring_fops)
+ if (f.file->f_op != &io_uring_fops) {
+ fdput(f);
return -EINVAL;
+ }
+ fdput(f);
}
if (ctx->flags & IORING_SETUP_SQPOLL) {
struct task_struct *tsk;
@@ -8653,13 +8654,10 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
mutex_unlock(&ctx->uring_lock);
}
-static bool io_wait_rsrc_data(struct io_rsrc_data *data)
+static void io_wait_rsrc_data(struct io_rsrc_data *data)
{
- if (!data)
- return false;
- if (!atomic_dec_and_test(&data->refs))
+ if (data && !atomic_dec_and_test(&data->refs))
wait_for_completion(&data->done);
- return true;
}
static void io_ring_ctx_free(struct io_ring_ctx *ctx)
@@ -8671,10 +8669,14 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
ctx->mm_account = NULL;
}
+ /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
+ io_wait_rsrc_data(ctx->buf_data);
+ io_wait_rsrc_data(ctx->file_data);
+
mutex_lock(&ctx->uring_lock);
- if (io_wait_rsrc_data(ctx->buf_data))
+ if (ctx->buf_data)
__io_sqe_buffers_unregister(ctx);
- if (io_wait_rsrc_data(ctx->file_data))
+ if (ctx->file_data)
__io_sqe_files_unregister(ctx);
if (ctx->rings)
__io_cqring_overflow_flush(ctx, true);
@@ -8767,11 +8769,6 @@ static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
return -EINVAL;
}
-static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
-{
- return io_run_task_work_head(&ctx->exit_task_work);
-}
-
struct io_tctx_exit {
struct callback_head task_work;
struct completion completion;
@@ -8837,7 +8834,7 @@ static void io_ring_exit_work(struct work_struct *work)
/*
* Some may use context even when all refs and requests have been put,
* and they are free to do so while still holding uring_lock or
- * completion_lock, see __io_req_task_submit(). Apart from other work,
+ * completion_lock, see io_req_task_submit(). Apart from other work,
* this lock/unlock section also waits them to finish.
*/
mutex_lock(&ctx->uring_lock);
@@ -9036,7 +9033,6 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
ret |= io_kill_timeouts(ctx, task, cancel_all);
if (task)
ret |= io_run_task_work();
- ret |= io_run_ctx_fallback(ctx);
if (!ret)
break;
cond_resched();
@@ -9376,9 +9372,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
if (ctx->flags & IORING_SETUP_SQPOLL) {
io_cqring_overflow_flush(ctx, false);
- ret = -EOWNERDEAD;
- if (unlikely(ctx->sq_data->thread == NULL))
+ if (unlikely(ctx->sq_data->thread == NULL)) {
+ ret = -EOWNERDEAD;
goto out;
+ }
if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sq_data->wait);
if (flags & IORING_ENTER_SQ_WAIT) {
@@ -9846,10 +9843,11 @@ static int io_register_personality(struct io_ring_ctx *ctx)
ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
- if (!ret)
- return id;
- put_cred(creds);
- return ret;
+ if (ret < 0) {
+ put_cred(creds);
+ return ret;
+ }
+ return id;
}
static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 0065781935c7..87ccb3438bec 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -215,6 +215,7 @@ iomap_read_inline_data(struct inode *inode, struct page *page,
if (PageUptodate(page))
return;
+ BUG_ON(page_has_private(page));
BUG_ON(page->index);
BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
@@ -239,7 +240,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
{
struct iomap_readpage_ctx *ctx = data;
struct page *page = ctx->cur_page;
- struct iomap_page *iop = iomap_page_create(inode, page);
+ struct iomap_page *iop;
bool same_page = false, is_contig = false;
loff_t orig_pos = pos;
unsigned poff, plen;
@@ -252,6 +253,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
}
/* zero post-eof blocks as the page may be mapped */
+ iop = iomap_page_create(inode, page);
iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
if (plen == 0)
goto done;
@@ -746,10 +748,6 @@ again:
* Otherwise there's a nasty deadlock on copying from the
* same page as we're writing to, without it being marked
* up-to-date.
- *
- * Not only is this an optimisation, but it is also required
- * to check that the address is actually valid, when atomic
- * usercopies are used, below.
*/
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
status = -EFAULT;
@@ -764,30 +762,29 @@ again:
if (mapping_writably_mapped(inode->i_mapping))
flush_dcache_page(page);
- copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
+ copied = copy_page_from_iter_atomic(page, offset, bytes, i);
- copied = iomap_write_end(inode, pos, bytes, copied, page, iomap,
+ status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
srcmap);
- cond_resched();
+ if (unlikely(copied != status))
+ iov_iter_revert(i, copied - status);
- iov_iter_advance(i, copied);
- if (unlikely(copied == 0)) {
+ cond_resched();
+ if (unlikely(status == 0)) {
/*
- * If we were unable to copy any data at all, we must
- * fall back to a single segment length write.
- *
- * If we didn't fallback here, we could livelock
- * because not all segments in the iov can be copied at
- * once without a pagefault.
+ * A short copy made iomap_write_end() reject the
+ * thing entirely. Might be memory poisoning
+ * halfway through, might be a race with munmap,
+ * might be severe memory pressure.
*/
- bytes = min_t(unsigned long, PAGE_SIZE - offset,
- iov_iter_single_seg_count(i));
+ if (copied)
+ bytes = copied;
goto again;
}
- pos += copied;
- written += copied;
- length -= copied;
+ pos += status;
+ written += status;
+ length -= status;
balance_dirty_pages_ratelimited(inode->i_mapping);
} while (iov_iter_count(i) && length);
@@ -972,7 +969,6 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
block_commit_write(page, 0, length);
} else {
WARN_ON_ONCE(!PageUptodate(page));
- iomap_page_create(inode, page);
set_page_dirty(page);
}
@@ -1309,14 +1305,13 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct inode *inode,
struct page *page, u64 end_offset)
{
- struct iomap_page *iop = to_iomap_page(page);
+ struct iomap_page *iop = iomap_page_create(inode, page);
struct iomap_ioend *ioend, *next;
unsigned len = i_blocksize(inode);
u64 file_offset; /* file offset of page */
int error = 0, count = 0, i;
LIST_HEAD(submit_list);
- WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
/*
diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
index dab1b02eba5b..ce6fb810854f 100644
--- a/fs/iomap/seek.c
+++ b/fs/iomap/seek.c
@@ -35,23 +35,20 @@ loff_t
iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
{
loff_t size = i_size_read(inode);
- loff_t length = size - offset;
loff_t ret;
/* Nothing to be found before or beyond the end of the file. */
if (offset < 0 || offset >= size)
return -ENXIO;
- while (length > 0) {
- ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
- &offset, iomap_seek_hole_actor);
+ while (offset < size) {
+ ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
+ ops, &offset, iomap_seek_hole_actor);
if (ret < 0)
return ret;
if (ret == 0)
break;
-
offset += ret;
- length -= ret;
}
return offset;
@@ -83,27 +80,23 @@ loff_t
iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
{
loff_t size = i_size_read(inode);
- loff_t length = size - offset;
loff_t ret;
/* Nothing to be found before or beyond the end of the file. */
if (offset < 0 || offset >= size)
return -ENXIO;
- while (length > 0) {
- ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
- &offset, iomap_seek_data_actor);
+ while (offset < size) {
+ ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
+ ops, &offset, iomap_seek_data_actor);
if (ret < 0)
return ret;
if (ret == 0)
- break;
-
+ return offset;
offset += ret;
- length -= ret;
}
- if (length <= 0)
- return -ENXIO;
- return offset;
+ /* We've reached the end of the file without finding data */
+ return -ENXIO;
}
EXPORT_SYMBOL_GPL(iomap_seek_data);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 51d1eb2ffeb9..746132998c57 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -701,7 +701,7 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
__buffer_unlink(jh);
jh->b_cp_transaction = NULL;
- percpu_counter_dec(&journal->j_jh_shrink_count);
+ percpu_counter_dec(&journal->j_checkpoint_jh_count);
jbd2_journal_put_journal_head(jh);
/* Is this transaction empty? */
@@ -764,7 +764,7 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
jh->b_cpnext->b_cpprev = jh;
}
transaction->t_checkpoint_list = jh;
- percpu_counter_inc(&transaction->t_journal->j_jh_shrink_count);
+ percpu_counter_inc(&transaction->t_journal->j_checkpoint_jh_count);
}
/*
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 152880c298ca..35302bc192eb 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1283,6 +1283,48 @@ static int jbd2_min_tag_size(void)
return sizeof(journal_block_tag_t) - 4;
}
+/**
+ * jbd2_journal_shrink_scan()
+ *
+ * Scan the checkpointed buffer on the checkpoint list and release the
+ * journal_head.
+ */
+static unsigned long jbd2_journal_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ journal_t *journal = container_of(shrink, journal_t, j_shrinker);
+ unsigned long nr_to_scan = sc->nr_to_scan;
+ unsigned long nr_shrunk;
+ unsigned long count;
+
+ count = percpu_counter_read_positive(&journal->j_checkpoint_jh_count);
+ trace_jbd2_shrink_scan_enter(journal, sc->nr_to_scan, count);
+
+ nr_shrunk = jbd2_journal_shrink_checkpoint_list(journal, &nr_to_scan);
+
+ count = percpu_counter_read_positive(&journal->j_checkpoint_jh_count);
+ trace_jbd2_shrink_scan_exit(journal, nr_to_scan, nr_shrunk, count);
+
+ return nr_shrunk;
+}
+
+/**
+ * jbd2_journal_shrink_count()
+ *
+ * Count the number of checkpoint buffers on the checkpoint list.
+ */
+static unsigned long jbd2_journal_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ journal_t *journal = container_of(shrink, journal_t, j_shrinker);
+ unsigned long count;
+
+ count = percpu_counter_read_positive(&journal->j_checkpoint_jh_count);
+ trace_jbd2_shrink_count(journal, sc->nr_to_scan, count);
+
+ return count;
+}
+
/*
* Management for journal control blocks: functions to create and
* destroy journal_t structures, and to initialise and read existing
@@ -1361,9 +1403,23 @@ static journal_t *journal_init_common(struct block_device *bdev,
journal->j_sb_buffer = bh;
journal->j_superblock = (journal_superblock_t *)bh->b_data;
+ journal->j_shrink_transaction = NULL;
+ journal->j_shrinker.scan_objects = jbd2_journal_shrink_scan;
+ journal->j_shrinker.count_objects = jbd2_journal_shrink_count;
+ journal->j_shrinker.seeks = DEFAULT_SEEKS;
+ journal->j_shrinker.batch = journal->j_max_transaction_buffers;
+
+ if (percpu_counter_init(&journal->j_checkpoint_jh_count, 0, GFP_KERNEL))
+ goto err_cleanup;
+
+ if (register_shrinker(&journal->j_shrinker)) {
+ percpu_counter_destroy(&journal->j_checkpoint_jh_count);
+ goto err_cleanup;
+ }
return journal;
err_cleanup:
+ brelse(journal->j_sb_buffer);
kfree(journal->j_wbuf);
jbd2_journal_destroy_revoke(journal);
kfree(journal);
@@ -2051,93 +2107,6 @@ recovery_error:
}
/**
- * jbd2_journal_shrink_scan()
- *
- * Scan the checkpointed buffer on the checkpoint list and release the
- * journal_head.
- */
-static unsigned long jbd2_journal_shrink_scan(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- journal_t *journal = container_of(shrink, journal_t, j_shrinker);
- unsigned long nr_to_scan = sc->nr_to_scan;
- unsigned long nr_shrunk;
- unsigned long count;
-
- count = percpu_counter_read_positive(&journal->j_jh_shrink_count);
- trace_jbd2_shrink_scan_enter(journal, sc->nr_to_scan, count);
-
- nr_shrunk = jbd2_journal_shrink_checkpoint_list(journal, &nr_to_scan);
-
- count = percpu_counter_read_positive(&journal->j_jh_shrink_count);
- trace_jbd2_shrink_scan_exit(journal, nr_to_scan, nr_shrunk, count);
-
- return nr_shrunk;
-}
-
-/**
- * jbd2_journal_shrink_count()
- *
- * Count the number of checkpoint buffers on the checkpoint list.
- */
-static unsigned long jbd2_journal_shrink_count(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- journal_t *journal = container_of(shrink, journal_t, j_shrinker);
- unsigned long count;
-
- count = percpu_counter_read_positive(&journal->j_jh_shrink_count);
- trace_jbd2_shrink_count(journal, sc->nr_to_scan, count);
-
- return count;
-}
-
-/**
- * jbd2_journal_register_shrinker()
- * @journal: Journal to act on.
- *
- * Init a percpu counter to record the checkpointed buffers on the checkpoint
- * list and register a shrinker to release their journal_head.
- */
-int jbd2_journal_register_shrinker(journal_t *journal)
-{
- int err;
-
- journal->j_shrink_transaction = NULL;
-
- err = percpu_counter_init(&journal->j_jh_shrink_count, 0, GFP_KERNEL);
- if (err)
- return err;
-
- journal->j_shrinker.scan_objects = jbd2_journal_shrink_scan;
- journal->j_shrinker.count_objects = jbd2_journal_shrink_count;
- journal->j_shrinker.seeks = DEFAULT_SEEKS;
- journal->j_shrinker.batch = journal->j_max_transaction_buffers;
-
- err = register_shrinker(&journal->j_shrinker);
- if (err) {
- percpu_counter_destroy(&journal->j_jh_shrink_count);
- return err;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(jbd2_journal_register_shrinker);
-
-/**
- * jbd2_journal_unregister_shrinker()
- * @journal: Journal to act on.
- *
- * Unregister the checkpointed buffer shrinker and destroy the percpu counter.
- */
-void jbd2_journal_unregister_shrinker(journal_t *journal)
-{
- percpu_counter_destroy(&journal->j_jh_shrink_count);
- unregister_shrinker(&journal->j_shrinker);
-}
-EXPORT_SYMBOL(jbd2_journal_unregister_shrinker);
-
-/**
* jbd2_journal_destroy() - Release a journal_t structure.
* @journal: Journal to act on.
*
@@ -2209,8 +2178,10 @@ int jbd2_journal_destroy(journal_t *journal)
brelse(journal->j_sb_buffer);
}
- jbd2_journal_unregister_shrinker(journal);
-
+ if (journal->j_shrinker.flags & SHRINKER_REGISTERED) {
+ percpu_counter_destroy(&journal->j_checkpoint_jh_count);
+ unregister_shrinker(&journal->j_shrinker);
+ }
if (journal->j_proc_entry)
jbd2_stats_proc_exit(journal);
iput(journal->j_inode);
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 3663dd5a23bc..57ab424c05ff 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -151,7 +151,8 @@ void jfs_evict_inode(struct inode *inode)
if (test_cflag(COMMIT_Freewmap, inode))
jfs_free_zero_link(inode);
- diFree(inode);
+ if (JFS_SBI(inode->i_sb)->ipimap)
+ diFree(inode);
/*
* Free the inode from the quota allocation.
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h
index d6af79e94263..6b231d0d0071 100644
--- a/fs/jfs/jfs_dinode.h
+++ b/fs/jfs/jfs_dinode.h
@@ -101,7 +101,6 @@ struct dinode {
u8 unused[16]; /* 16: */
dxd_t _dxd; /* 16: */
union {
- __le32 _rdev; /* 4: */
/*
* The fast symlink area
* is expected to overflow
@@ -109,9 +108,15 @@ struct dinode {
* needed (which will clear
* INLINEEA).
*/
- u8 _fastsymlink[128];
- } _u;
- u8 _inlineea[128];
+ struct {
+ union {
+ __le32 _rdev; /* 4: */
+ u8 _fastsymlink[128];
+ } _u;
+ u8 _inlineea[128];
+ };
+ u8 _inline_all[256];
+ };
} _special;
} _u2;
} _file;
@@ -122,6 +127,7 @@ struct dinode {
#define di_rdev u._file._u2._special._u._rdev
#define di_fastsymlink u._file._u2._special._u._fastsymlink
#define di_inlineea u._file._u2._special._inlineea
+#define di_inline_all u._file._u2._special._inline_all
} u;
};
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 7aee15608619..91f4ec93dab1 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -3660,7 +3660,7 @@ void dbFinalizeBmap(struct inode *ipbmap)
* (the leftmost ag with average free space in it);
*/
//agpref:
- /* get the number of active ags and inacitve ags */
+ /* get the number of active ags and inactive ags */
actags = bmp->db_maxag + 1;
inactags = bmp->db_numag - actags;
ag_rem = bmp->db_mapsize & (bmp->db_agsize - 1); /* ??? */
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 937ca07b58b1..799d3837e7c2 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -103,10 +103,8 @@ int diMount(struct inode *ipimap)
*/
/* allocate the in-memory inode map control structure. */
imap = kmalloc(sizeof(struct inomap), GFP_KERNEL);
- if (imap == NULL) {
- jfs_err("diMount: kmalloc returned NULL!");
+ if (imap == NULL)
return -ENOMEM;
- }
/* read the on-disk inode map control structure. */
@@ -763,7 +761,7 @@ int diWrite(tid_t tid, struct inode *ip)
lv = & dilinelock->lv[dilinelock->index];
lv->offset = (dioffset + 2 * 128) >> L2INODESLOTSIZE;
lv->length = 2;
- memcpy(&dp->di_fastsymlink, jfs_ip->i_inline, IDATASIZE);
+ memcpy(&dp->di_inline_all, jfs_ip->i_inline_all, IDATASIZE);
dilinelock->index++;
}
/*
@@ -3084,7 +3082,7 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
}
if (S_ISDIR(ip->i_mode)) {
- memcpy(&jfs_ip->i_dirtable, &dip->di_dirtable, 384);
+ memcpy(&jfs_ip->u.dir, &dip->u._dir, 384);
} else if (S_ISREG(ip->i_mode) || S_ISLNK(ip->i_mode)) {
memcpy(&jfs_ip->i_xtroot, &dip->di_xtroot, 288);
} else
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index a466ec41cfbb..721def69e732 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -77,11 +77,18 @@ struct jfs_inode_info {
unchar _unused[16]; /* 16: */
dxd_t _dxd; /* 16: */
/* _inline may overflow into _inline_ea when needed */
- unchar _inline[128]; /* 128: inline symlink */
/* _inline_ea may overlay the last part of
* file._xtroot if maxentry = XTROOTINITSLOT
*/
- unchar _inline_ea[128]; /* 128: inline extended attr */
+ union {
+ struct {
+ /* 128: inline symlink */
+ unchar _inline[128];
+ /* 128: inline extended attr */
+ unchar _inline_ea[128];
+ };
+ unchar _inline_all[256];
+ };
} link;
} u;
#ifdef CONFIG_QUOTA
@@ -96,6 +103,7 @@ struct jfs_inode_info {
#define i_dtroot u.dir._dtroot
#define i_inline u.link._inline
#define i_inline_ea u.link._inline_ea
+#define i_inline_all u.link._inline_all
#define IREAD_LOCK(ip, subclass) \
down_read_nested(&JFS_IP(ip)->rdwrlock, subclass)
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 9330eff210e0..78fd136ac13b 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1324,6 +1324,7 @@ int lmLogInit(struct jfs_log * log)
} else {
if (!uuid_equal(&logsuper->uuid, &log->uuid)) {
jfs_warn("wrong uuid on JFS log device");
+ rc = -EINVAL;
goto errout20;
}
log->size = le32_to_cpu(logsuper->size);
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 053295cd7bc6..042bbe6d8ac2 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -105,7 +105,7 @@ static DEFINE_SPINLOCK(jfsTxnLock);
#define TXN_LOCK() spin_lock(&jfsTxnLock)
#define TXN_UNLOCK() spin_unlock(&jfsTxnLock)
-#define LAZY_LOCK_INIT() spin_lock_init(&TxAnchor.LazyLock);
+#define LAZY_LOCK_INIT() spin_lock_init(&TxAnchor.LazyLock)
#define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags)
#define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 1f0ffabbde56..9030aeaf0f88 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -939,7 +939,8 @@ static int __init init_jfs_fs(void)
jfs_inode_cachep =
kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info),
0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
- offsetof(struct jfs_inode_info, i_inline), IDATASIZE,
+ offsetof(struct jfs_inode_info, i_inline_all),
+ sizeof_field(struct jfs_inode_info, i_inline_all),
init_once);
if (jfs_inode_cachep == NULL)
return -ENOMEM;
diff --git a/fs/kernel_read_file.c b/fs/kernel_read_file.c
index 90d255fbdd9b..87aac4c72c37 100644
--- a/fs/kernel_read_file.c
+++ b/fs/kernel_read_file.c
@@ -160,7 +160,7 @@ int kernel_read_file_from_path_initns(const char *path, loff_t offset,
get_fs_root(init_task.fs, &root);
task_unlock(&init_task);
- file = file_open_root(root.dentry, root.mnt, path, O_RDONLY, 0);
+ file = file_open_root(&root, path, O_RDONLY, 0);
path_put(&root);
if (IS_ERR(file))
return PTR_ERR(file);
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 7e0e62deab53..33166ec90a11 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -548,49 +548,6 @@ void kernfs_put(struct kernfs_node *kn)
}
EXPORT_SYMBOL_GPL(kernfs_put);
-static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
-{
- struct kernfs_node *kn;
-
- if (flags & LOOKUP_RCU)
- return -ECHILD;
-
- /* Always perform fresh lookup for negatives */
- if (d_really_is_negative(dentry))
- goto out_bad_unlocked;
-
- kn = kernfs_dentry_node(dentry);
- mutex_lock(&kernfs_mutex);
-
- /* The kernfs node has been deactivated */
- if (!kernfs_active(kn))
- goto out_bad;
-
- /* The kernfs node has been moved? */
- if (kernfs_dentry_node(dentry->d_parent) != kn->parent)
- goto out_bad;
-
- /* The kernfs node has been renamed */
- if (strcmp(dentry->d_name.name, kn->name) != 0)
- goto out_bad;
-
- /* The kernfs node has been moved to a different namespace */
- if (kn->parent && kernfs_ns_enabled(kn->parent) &&
- kernfs_info(dentry->d_sb)->ns != kn->ns)
- goto out_bad;
-
- mutex_unlock(&kernfs_mutex);
- return 1;
-out_bad:
- mutex_unlock(&kernfs_mutex);
-out_bad_unlocked:
- return 0;
-}
-
-const struct dentry_operations kernfs_dops = {
- .d_revalidate = kernfs_dop_revalidate,
-};
-
/**
* kernfs_node_from_dentry - determine kernfs_node associated with a dentry
* @dentry: the dentry in question
@@ -1073,6 +1030,49 @@ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
return ERR_PTR(rc);
}
+static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ struct kernfs_node *kn;
+
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ /* Always perform fresh lookup for negatives */
+ if (d_really_is_negative(dentry))
+ goto out_bad_unlocked;
+
+ kn = kernfs_dentry_node(dentry);
+ mutex_lock(&kernfs_mutex);
+
+ /* The kernfs node has been deactivated */
+ if (!kernfs_active(kn))
+ goto out_bad;
+
+ /* The kernfs node has been moved? */
+ if (kernfs_dentry_node(dentry->d_parent) != kn->parent)
+ goto out_bad;
+
+ /* The kernfs node has been renamed */
+ if (strcmp(dentry->d_name.name, kn->name) != 0)
+ goto out_bad;
+
+ /* The kernfs node has been moved to a different namespace */
+ if (kn->parent && kernfs_ns_enabled(kn->parent) &&
+ kernfs_info(dentry->d_sb)->ns != kn->ns)
+ goto out_bad;
+
+ mutex_unlock(&kernfs_mutex);
+ return 1;
+out_bad:
+ mutex_unlock(&kernfs_mutex);
+out_bad_unlocked:
+ return 0;
+}
+
+const struct dentry_operations kernfs_dops = {
+ .d_revalidate = kernfs_dop_revalidate,
+};
+
static struct dentry *kernfs_iop_lookup(struct inode *dir,
struct dentry *dentry,
unsigned int flags)
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 1a639e34847d..2de048f80eb8 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -766,6 +766,46 @@ static void __exit exit_nlm(void)
module_init(init_nlm);
module_exit(exit_nlm);
+/**
+ * nlmsvc_dispatch - Process an NLM Request
+ * @rqstp: incoming request
+ * @statp: pointer to location of accept_stat field in RPC Reply buffer
+ *
+ * Return values:
+ * %0: Processing complete; do not send a Reply
+ * %1: Processing complete; send Reply in rqstp->rq_res
+ */
+static int nlmsvc_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+{
+ const struct svc_procedure *procp = rqstp->rq_procinfo;
+ struct kvec *argv = rqstp->rq_arg.head;
+ struct kvec *resv = rqstp->rq_res.head;
+
+ svcxdr_init_decode(rqstp);
+ if (!procp->pc_decode(rqstp, argv->iov_base))
+ goto out_decode_err;
+
+ *statp = procp->pc_func(rqstp);
+ if (*statp == rpc_drop_reply)
+ return 0;
+ if (*statp != rpc_success)
+ return 1;
+
+ svcxdr_init_encode(rqstp);
+ if (!procp->pc_encode(rqstp, resv->iov_base + resv->iov_len))
+ goto out_encode_err;
+
+ return 1;
+
+out_decode_err:
+ *statp = rpc_garbage_args;
+ return 1;
+
+out_encode_err:
+ *statp = rpc_system_err;
+ return 1;
+}
+
/*
* Define NLM program and procedures
*/
@@ -775,6 +815,7 @@ static const struct svc_version nlmsvc_version1 = {
.vs_nproc = 17,
.vs_proc = nlmsvc_procedures,
.vs_count = nlmsvc_version1_count,
+ .vs_dispatch = nlmsvc_dispatch,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
static unsigned int nlmsvc_version3_count[24];
@@ -783,6 +824,7 @@ static const struct svc_version nlmsvc_version3 = {
.vs_nproc = 24,
.vs_proc = nlmsvc_procedures,
.vs_count = nlmsvc_version3_count,
+ .vs_dispatch = nlmsvc_dispatch,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
#ifdef CONFIG_LOCKD_V4
@@ -792,6 +834,7 @@ static const struct svc_version nlmsvc_version4 = {
.vs_nproc = 24,
.vs_proc = nlmsvc_procedures4,
.vs_count = nlmsvc_version4_count,
+ .vs_dispatch = nlmsvc_dispatch,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
#endif
diff --git a/fs/lockd/svcxdr.h b/fs/lockd/svcxdr.h
new file mode 100644
index 000000000000..c69a0bb76c94
--- /dev/null
+++ b/fs/lockd/svcxdr.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Encode/decode NLM basic data types
+ *
+ * Basic NLMv3 XDR data types are not defined in an IETF standards
+ * document. X/Open has a description of these data types that
+ * is useful. See Chapter 10 of "Protocols for Interworking:
+ * XNFS, Version 3W".
+ *
+ * Basic NLMv4 XDR data types are defined in Appendix II.1.4 of
+ * RFC 1813: "NFS Version 3 Protocol Specification".
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#ifndef _LOCKD_SVCXDR_H_
+#define _LOCKD_SVCXDR_H_
+
+static inline bool
+svcxdr_decode_stats(struct xdr_stream *xdr, __be32 *status)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, XDR_UNIT);
+ if (!p)
+ return false;
+ *status = *p;
+
+ return true;
+}
+
+static inline bool
+svcxdr_encode_stats(struct xdr_stream *xdr, __be32 status)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, XDR_UNIT);
+ if (!p)
+ return false;
+ *p = status;
+
+ return true;
+}
+
+static inline bool
+svcxdr_decode_string(struct xdr_stream *xdr, char **data, unsigned int *data_len)
+{
+ __be32 *p;
+ u32 len;
+
+ if (xdr_stream_decode_u32(xdr, &len) < 0)
+ return false;
+ if (len > NLM_MAXSTRLEN)
+ return false;
+ p = xdr_inline_decode(xdr, len);
+ if (!p)
+ return false;
+ *data_len = len;
+ *data = (char *)p;
+
+ return true;
+}
+
+/*
+ * NLM cookies are defined by specification to be a variable-length
+ * XDR opaque no longer than 1024 bytes. However, this implementation
+ * limits their length to 32 bytes, and treats zero-length cookies
+ * specially.
+ */
+static inline bool
+svcxdr_decode_cookie(struct xdr_stream *xdr, struct nlm_cookie *cookie)
+{
+ __be32 *p;
+ u32 len;
+
+ if (xdr_stream_decode_u32(xdr, &len) < 0)
+ return false;
+ if (len > NLM_MAXCOOKIELEN)
+ return false;
+ if (!len)
+ goto out_hpux;
+
+ p = xdr_inline_decode(xdr, len);
+ if (!p)
+ return false;
+ cookie->len = len;
+ memcpy(cookie->data, p, len);
+
+ return true;
+
+ /* apparently HPUX can return empty cookies */
+out_hpux:
+ cookie->len = 4;
+ memset(cookie->data, 0, 4);
+ return true;
+}
+
+static inline bool
+svcxdr_encode_cookie(struct xdr_stream *xdr, const struct nlm_cookie *cookie)
+{
+ __be32 *p;
+
+ if (xdr_stream_encode_u32(xdr, cookie->len) < 0)
+ return false;
+ p = xdr_reserve_space(xdr, cookie->len);
+ if (!p)
+ return false;
+ memcpy(p, cookie->data, cookie->len);
+
+ return true;
+}
+
+static inline bool
+svcxdr_decode_owner(struct xdr_stream *xdr, struct xdr_netobj *obj)
+{
+ __be32 *p;
+ u32 len;
+
+ if (xdr_stream_decode_u32(xdr, &len) < 0)
+ return false;
+ if (len > XDR_MAX_NETOBJ)
+ return false;
+ p = xdr_inline_decode(xdr, len);
+ if (!p)
+ return false;
+ obj->len = len;
+ obj->data = (u8 *)p;
+
+ return true;
+}
+
+static inline bool
+svcxdr_encode_owner(struct xdr_stream *xdr, const struct xdr_netobj *obj)
+{
+ unsigned int quadlen = XDR_QUADLEN(obj->len);
+ __be32 *p;
+
+ if (xdr_stream_encode_u32(xdr, obj->len) < 0)
+ return false;
+ p = xdr_reserve_space(xdr, obj->len);
+ if (!p)
+ return false;
+ p[quadlen - 1] = 0; /* XDR pad */
+ memcpy(p, obj->data, obj->len);
+
+ return true;
+}
+
+#endif /* _LOCKD_SVCXDR_H_ */
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index 982629f7b120..9235e60b1769 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -19,7 +19,7 @@
#include <uapi/linux/nfs2.h>
-#define NLMDBG_FACILITY NLMDBG_XDR
+#include "svcxdr.h"
static inline loff_t
@@ -42,311 +42,323 @@ loff_t_to_s32(loff_t offset)
}
/*
- * XDR functions for basic NLM types
+ * NLM file handles are defined by specification to be a variable-length
+ * XDR opaque no longer than 1024 bytes. However, this implementation
+ * constrains their length to exactly the length of an NFSv2 file
+ * handle.
*/
-static __be32 *nlm_decode_cookie(__be32 *p, struct nlm_cookie *c)
+static bool
+svcxdr_decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh)
{
- unsigned int len;
-
- len = ntohl(*p++);
-
- if(len==0)
- {
- c->len=4;
- memset(c->data, 0, 4); /* hockeypux brain damage */
- }
- else if(len<=NLM_MAXCOOKIELEN)
- {
- c->len=len;
- memcpy(c->data, p, len);
- p+=XDR_QUADLEN(len);
- }
- else
- {
- dprintk("lockd: bad cookie size %d (only cookies under "
- "%d bytes are supported.)\n",
- len, NLM_MAXCOOKIELEN);
- return NULL;
- }
- return p;
-}
-
-static inline __be32 *
-nlm_encode_cookie(__be32 *p, struct nlm_cookie *c)
-{
- *p++ = htonl(c->len);
- memcpy(p, c->data, c->len);
- p+=XDR_QUADLEN(c->len);
- return p;
-}
-
-static __be32 *
-nlm_decode_fh(__be32 *p, struct nfs_fh *f)
-{
- unsigned int len;
-
- if ((len = ntohl(*p++)) != NFS2_FHSIZE) {
- dprintk("lockd: bad fhandle size %d (should be %d)\n",
- len, NFS2_FHSIZE);
- return NULL;
- }
- f->size = NFS2_FHSIZE;
- memset(f->data, 0, sizeof(f->data));
- memcpy(f->data, p, NFS2_FHSIZE);
- return p + XDR_QUADLEN(NFS2_FHSIZE);
-}
-
-/*
- * Encode and decode owner handle
- */
-static inline __be32 *
-nlm_decode_oh(__be32 *p, struct xdr_netobj *oh)
-{
- return xdr_decode_netobj(p, oh);
-}
-
-static inline __be32 *
-nlm_encode_oh(__be32 *p, struct xdr_netobj *oh)
-{
- return xdr_encode_netobj(p, oh);
+ __be32 *p;
+ u32 len;
+
+ if (xdr_stream_decode_u32(xdr, &len) < 0)
+ return false;
+ if (len != NFS2_FHSIZE)
+ return false;
+
+ p = xdr_inline_decode(xdr, len);
+ if (!p)
+ return false;
+ fh->size = NFS2_FHSIZE;
+ memcpy(fh->data, p, len);
+ memset(fh->data + NFS2_FHSIZE, 0, sizeof(fh->data) - NFS2_FHSIZE);
+
+ return true;
}
-static __be32 *
-nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
+static bool
+svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock)
{
- struct file_lock *fl = &lock->fl;
- s32 start, len, end;
-
- if (!(p = xdr_decode_string_inplace(p, &lock->caller,
- &lock->len,
- NLM_MAXSTRLEN))
- || !(p = nlm_decode_fh(p, &lock->fh))
- || !(p = nlm_decode_oh(p, &lock->oh)))
- return NULL;
- lock->svid = ntohl(*p++);
+ struct file_lock *fl = &lock->fl;
+ s32 start, len, end;
+
+ if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len))
+ return false;
+ if (!svcxdr_decode_fhandle(xdr, &lock->fh))
+ return false;
+ if (!svcxdr_decode_owner(xdr, &lock->oh))
+ return false;
+ if (xdr_stream_decode_u32(xdr, &lock->svid) < 0)
+ return false;
+ if (xdr_stream_decode_u32(xdr, &start) < 0)
+ return false;
+ if (xdr_stream_decode_u32(xdr, &len) < 0)
+ return false;
locks_init_lock(fl);
fl->fl_flags = FL_POSIX;
- fl->fl_type = F_RDLCK; /* as good as anything else */
- start = ntohl(*p++);
- len = ntohl(*p++);
+ fl->fl_type = F_RDLCK;
end = start + len - 1;
-
fl->fl_start = s32_to_loff_t(start);
-
if (len == 0 || end < 0)
fl->fl_end = OFFSET_MAX;
else
fl->fl_end = s32_to_loff_t(end);
- return p;
+
+ return true;
}
-/*
- * Encode result of a TEST/TEST_MSG call
- */
-static __be32 *
-nlm_encode_testres(__be32 *p, struct nlm_res *resp)
+static bool
+svcxdr_encode_holder(struct xdr_stream *xdr, const struct nlm_lock *lock)
{
- s32 start, len;
-
- if (!(p = nlm_encode_cookie(p, &resp->cookie)))
- return NULL;
- *p++ = resp->status;
-
- if (resp->status == nlm_lck_denied) {
- struct file_lock *fl = &resp->lock.fl;
-
- *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
- *p++ = htonl(resp->lock.svid);
-
- /* Encode owner handle. */
- if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
- return NULL;
+ const struct file_lock *fl = &lock->fl;
+ s32 start, len;
+
+ /* exclusive */
+ if (xdr_stream_encode_bool(xdr, fl->fl_type != F_RDLCK) < 0)
+ return false;
+ if (xdr_stream_encode_u32(xdr, lock->svid) < 0)
+ return false;
+ if (!svcxdr_encode_owner(xdr, &lock->oh))
+ return false;
+ start = loff_t_to_s32(fl->fl_start);
+ if (fl->fl_end == OFFSET_MAX)
+ len = 0;
+ else
+ len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
+ if (xdr_stream_encode_u32(xdr, start) < 0)
+ return false;
+ if (xdr_stream_encode_u32(xdr, len) < 0)
+ return false;
- start = loff_t_to_s32(fl->fl_start);
- if (fl->fl_end == OFFSET_MAX)
- len = 0;
- else
- len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
+ return true;
+}
- *p++ = htonl(start);
- *p++ = htonl(len);
+static bool
+svcxdr_encode_testrply(struct xdr_stream *xdr, const struct nlm_res *resp)
+{
+ if (!svcxdr_encode_stats(xdr, resp->status))
+ return false;
+ switch (resp->status) {
+ case nlm_lck_denied:
+ if (!svcxdr_encode_holder(xdr, &resp->lock))
+ return false;
}
- return p;
+ return true;
}
/*
- * First, the server side XDR functions
+ * Decode Call arguments
*/
+
+int
+nlmsvc_decode_void(struct svc_rqst *rqstp, __be32 *p)
+{
+ return 1;
+}
+
int
nlmsvc_decode_testargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nlm_args *argp = rqstp->rq_argp;
- u32 exclusive;
+ u32 exclusive;
- if (!(p = nlm_decode_cookie(p, &argp->cookie)))
+ if (!svcxdr_decode_cookie(xdr, &argp->cookie))
return 0;
-
- exclusive = ntohl(*p++);
- if (!(p = nlm_decode_lock(p, &argp->lock)))
+ if (xdr_stream_decode_bool(xdr, &exclusive) < 0)
+ return 0;
+ if (!svcxdr_decode_lock(xdr, &argp->lock))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
- return xdr_argsize_check(rqstp, p);
-}
-
-int
-nlmsvc_encode_testres(struct svc_rqst *rqstp, __be32 *p)
-{
- struct nlm_res *resp = rqstp->rq_resp;
-
- if (!(p = nlm_encode_testres(p, resp)))
- return 0;
- return xdr_ressize_check(rqstp, p);
+ return 1;
}
int
nlmsvc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nlm_args *argp = rqstp->rq_argp;
- u32 exclusive;
+ u32 exclusive;
- if (!(p = nlm_decode_cookie(p, &argp->cookie)))
+ if (!svcxdr_decode_cookie(xdr, &argp->cookie))
+ return 0;
+ if (xdr_stream_decode_bool(xdr, &argp->block) < 0)
return 0;
- argp->block = ntohl(*p++);
- exclusive = ntohl(*p++);
- if (!(p = nlm_decode_lock(p, &argp->lock)))
+ if (xdr_stream_decode_bool(xdr, &exclusive) < 0)
+ return 0;
+ if (!svcxdr_decode_lock(xdr, &argp->lock))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
- argp->reclaim = ntohl(*p++);
- argp->state = ntohl(*p++);
+ if (xdr_stream_decode_bool(xdr, &argp->reclaim) < 0)
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &argp->state) < 0)
+ return 0;
argp->monitor = 1; /* monitor client by default */
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
int
nlmsvc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nlm_args *argp = rqstp->rq_argp;
- u32 exclusive;
+ u32 exclusive;
- if (!(p = nlm_decode_cookie(p, &argp->cookie)))
+ if (!svcxdr_decode_cookie(xdr, &argp->cookie))
+ return 0;
+ if (xdr_stream_decode_bool(xdr, &argp->block) < 0)
return 0;
- argp->block = ntohl(*p++);
- exclusive = ntohl(*p++);
- if (!(p = nlm_decode_lock(p, &argp->lock)))
+ if (xdr_stream_decode_bool(xdr, &exclusive) < 0)
+ return 0;
+ if (!svcxdr_decode_lock(xdr, &argp->lock))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
- return xdr_argsize_check(rqstp, p);
+
+ return 1;
}
int
nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nlm_args *argp = rqstp->rq_argp;
- if (!(p = nlm_decode_cookie(p, &argp->cookie))
- || !(p = nlm_decode_lock(p, &argp->lock)))
+ if (!svcxdr_decode_cookie(xdr, &argp->cookie))
+ return 0;
+ if (!svcxdr_decode_lock(xdr, &argp->lock))
return 0;
argp->lock.fl.fl_type = F_UNLCK;
- return xdr_argsize_check(rqstp, p);
+
+ return 1;
}
int
-nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
+nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p)
{
- struct nlm_args *argp = rqstp->rq_argp;
- struct nlm_lock *lock = &argp->lock;
-
- memset(lock, 0, sizeof(*lock));
- locks_init_lock(&lock->fl);
- lock->svid = ~(u32) 0;
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
+ struct nlm_res *resp = rqstp->rq_argp;
- if (!(p = nlm_decode_cookie(p, &argp->cookie))
- || !(p = xdr_decode_string_inplace(p, &lock->caller,
- &lock->len, NLM_MAXSTRLEN))
- || !(p = nlm_decode_fh(p, &lock->fh))
- || !(p = nlm_decode_oh(p, &lock->oh)))
+ if (!svcxdr_decode_cookie(xdr, &resp->cookie))
+ return 0;
+ if (!svcxdr_decode_stats(xdr, &resp->status))
return 0;
- argp->fsm_mode = ntohl(*p++);
- argp->fsm_access = ntohl(*p++);
- return xdr_argsize_check(rqstp, p);
+
+ return 1;
}
int
-nlmsvc_encode_shareres(struct svc_rqst *rqstp, __be32 *p)
+nlmsvc_decode_reboot(struct svc_rqst *rqstp, __be32 *p)
{
- struct nlm_res *resp = rqstp->rq_resp;
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
+ struct nlm_reboot *argp = rqstp->rq_argp;
+ u32 len;
- if (!(p = nlm_encode_cookie(p, &resp->cookie)))
+ if (xdr_stream_decode_u32(xdr, &len) < 0)
+ return 0;
+ if (len > SM_MAXSTRLEN)
+ return 0;
+ p = xdr_inline_decode(xdr, len);
+ if (!p)
+ return 0;
+ argp->len = len;
+ argp->mon = (char *)p;
+ if (xdr_stream_decode_u32(xdr, &argp->state) < 0)
+ return 0;
+ p = xdr_inline_decode(xdr, SM_PRIV_SIZE);
+ if (!p)
return 0;
- *p++ = resp->status;
- *p++ = xdr_zero; /* sequence argument */
- return xdr_ressize_check(rqstp, p);
+ memcpy(&argp->priv.data, p, sizeof(argp->priv.data));
+
+ return 1;
}
int
-nlmsvc_encode_res(struct svc_rqst *rqstp, __be32 *p)
+nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
{
- struct nlm_res *resp = rqstp->rq_resp;
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
+ struct nlm_args *argp = rqstp->rq_argp;
+ struct nlm_lock *lock = &argp->lock;
- if (!(p = nlm_encode_cookie(p, &resp->cookie)))
+ memset(lock, 0, sizeof(*lock));
+ locks_init_lock(&lock->fl);
+ lock->svid = ~(u32)0;
+
+ if (!svcxdr_decode_cookie(xdr, &argp->cookie))
+ return 0;
+ if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len))
+ return 0;
+ if (!svcxdr_decode_fhandle(xdr, &lock->fh))
+ return 0;
+ if (!svcxdr_decode_owner(xdr, &lock->oh))
+ return 0;
+ /* XXX: Range checks are missing in the original code */
+ if (xdr_stream_decode_u32(xdr, &argp->fsm_mode) < 0)
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &argp->fsm_access) < 0)
return 0;
- *p++ = resp->status;
- return xdr_ressize_check(rqstp, p);
+
+ return 1;
}
int
nlmsvc_decode_notify(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nlm_args *argp = rqstp->rq_argp;
struct nlm_lock *lock = &argp->lock;
- if (!(p = xdr_decode_string_inplace(p, &lock->caller,
- &lock->len, NLM_MAXSTRLEN)))
+ if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len))
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &argp->state) < 0)
return 0;
- argp->state = ntohl(*p++);
- return xdr_argsize_check(rqstp, p);
+
+ return 1;
}
+
+/*
+ * Encode Reply results
+ */
+
int
-nlmsvc_decode_reboot(struct svc_rqst *rqstp, __be32 *p)
+nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p)
{
- struct nlm_reboot *argp = rqstp->rq_argp;
-
- if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
- return 0;
- argp->state = ntohl(*p++);
- memcpy(&argp->priv.data, p, sizeof(argp->priv.data));
- p += XDR_QUADLEN(SM_PRIV_SIZE);
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
int
-nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p)
+nlmsvc_encode_testres(struct svc_rqst *rqstp, __be32 *p)
{
- struct nlm_res *resp = rqstp->rq_argp;
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+ struct nlm_res *resp = rqstp->rq_resp;
- if (!(p = nlm_decode_cookie(p, &resp->cookie)))
- return 0;
- resp->status = *p++;
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_encode_cookie(xdr, &resp->cookie) &&
+ svcxdr_encode_testrply(xdr, resp);
}
int
-nlmsvc_decode_void(struct svc_rqst *rqstp, __be32 *p)
+nlmsvc_encode_res(struct svc_rqst *rqstp, __be32 *p)
{
- return xdr_argsize_check(rqstp, p);
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+ struct nlm_res *resp = rqstp->rq_resp;
+
+ return svcxdr_encode_cookie(xdr, &resp->cookie) &&
+ svcxdr_encode_stats(xdr, resp->status);
}
int
-nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p)
+nlmsvc_encode_shareres(struct svc_rqst *rqstp, __be32 *p)
{
- return xdr_ressize_check(rqstp, p);
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+ struct nlm_res *resp = rqstp->rq_resp;
+
+ if (!svcxdr_encode_cookie(xdr, &resp->cookie))
+ return 0;
+ if (!svcxdr_encode_stats(xdr, resp->status))
+ return 0;
+ /* sequence */
+ if (xdr_stream_encode_u32(xdr, 0) < 0)
+ return 0;
+
+ return 1;
}
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index 5fa9f48a9dba..98e957e4566c 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -18,7 +18,7 @@
#include <linux/sunrpc/stats.h>
#include <linux/lockd/lockd.h>
-#define NLMDBG_FACILITY NLMDBG_XDR
+#include "svcxdr.h"
static inline loff_t
s64_to_loff_t(__s64 offset)
@@ -41,309 +41,322 @@ loff_t_to_s64(loff_t offset)
}
/*
- * XDR functions for basic NLM types
+ * NLM file handles are defined by specification to be a variable-length
+ * XDR opaque no longer than 1024 bytes. However, this implementation
+ * limits their length to the size of an NFSv3 file handle.
*/
-static __be32 *
-nlm4_decode_cookie(__be32 *p, struct nlm_cookie *c)
+static bool
+svcxdr_decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh)
{
- unsigned int len;
-
- len = ntohl(*p++);
-
- if(len==0)
- {
- c->len=4;
- memset(c->data, 0, 4); /* hockeypux brain damage */
- }
- else if(len<=NLM_MAXCOOKIELEN)
- {
- c->len=len;
- memcpy(c->data, p, len);
- p+=XDR_QUADLEN(len);
- }
- else
- {
- dprintk("lockd: bad cookie size %d (only cookies under "
- "%d bytes are supported.)\n",
- len, NLM_MAXCOOKIELEN);
- return NULL;
- }
- return p;
-}
-
-static __be32 *
-nlm4_encode_cookie(__be32 *p, struct nlm_cookie *c)
-{
- *p++ = htonl(c->len);
- memcpy(p, c->data, c->len);
- p+=XDR_QUADLEN(c->len);
- return p;
-}
-
-static __be32 *
-nlm4_decode_fh(__be32 *p, struct nfs_fh *f)
-{
- memset(f->data, 0, sizeof(f->data));
- f->size = ntohl(*p++);
- if (f->size > NFS_MAXFHSIZE) {
- dprintk("lockd: bad fhandle size %d (should be <=%d)\n",
- f->size, NFS_MAXFHSIZE);
- return NULL;
- }
- memcpy(f->data, p, f->size);
- return p + XDR_QUADLEN(f->size);
-}
-
-/*
- * Encode and decode owner handle
- */
-static __be32 *
-nlm4_decode_oh(__be32 *p, struct xdr_netobj *oh)
-{
- return xdr_decode_netobj(p, oh);
+ __be32 *p;
+ u32 len;
+
+ if (xdr_stream_decode_u32(xdr, &len) < 0)
+ return false;
+ if (len > NFS_MAXFHSIZE)
+ return false;
+
+ p = xdr_inline_decode(xdr, len);
+ if (!p)
+ return false;
+ fh->size = len;
+ memcpy(fh->data, p, len);
+ memset(fh->data + len, 0, sizeof(fh->data) - len);
+
+ return true;
}
-static __be32 *
-nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
+static bool
+svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock)
{
- struct file_lock *fl = &lock->fl;
- __u64 len, start;
- __s64 end;
-
- if (!(p = xdr_decode_string_inplace(p, &lock->caller,
- &lock->len, NLM_MAXSTRLEN))
- || !(p = nlm4_decode_fh(p, &lock->fh))
- || !(p = nlm4_decode_oh(p, &lock->oh)))
- return NULL;
- lock->svid = ntohl(*p++);
+ struct file_lock *fl = &lock->fl;
+ u64 len, start;
+ s64 end;
+
+ if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len))
+ return false;
+ if (!svcxdr_decode_fhandle(xdr, &lock->fh))
+ return false;
+ if (!svcxdr_decode_owner(xdr, &lock->oh))
+ return false;
+ if (xdr_stream_decode_u32(xdr, &lock->svid) < 0)
+ return false;
+ if (xdr_stream_decode_u64(xdr, &start) < 0)
+ return false;
+ if (xdr_stream_decode_u64(xdr, &len) < 0)
+ return false;
locks_init_lock(fl);
fl->fl_flags = FL_POSIX;
- fl->fl_type = F_RDLCK; /* as good as anything else */
- p = xdr_decode_hyper(p, &start);
- p = xdr_decode_hyper(p, &len);
+ fl->fl_type = F_RDLCK;
end = start + len - 1;
-
fl->fl_start = s64_to_loff_t(start);
-
if (len == 0 || end < 0)
fl->fl_end = OFFSET_MAX;
else
fl->fl_end = s64_to_loff_t(end);
- return p;
+
+ return true;
}
-/*
- * Encode result of a TEST/TEST_MSG call
- */
-static __be32 *
-nlm4_encode_testres(__be32 *p, struct nlm_res *resp)
+static bool
+svcxdr_encode_holder(struct xdr_stream *xdr, const struct nlm_lock *lock)
+{
+ const struct file_lock *fl = &lock->fl;
+ s64 start, len;
+
+ /* exclusive */
+ if (xdr_stream_encode_bool(xdr, fl->fl_type != F_RDLCK) < 0)
+ return false;
+ if (xdr_stream_encode_u32(xdr, lock->svid) < 0)
+ return false;
+ if (!svcxdr_encode_owner(xdr, &lock->oh))
+ return false;
+ start = loff_t_to_s64(fl->fl_start);
+ if (fl->fl_end == OFFSET_MAX)
+ len = 0;
+ else
+ len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
+ if (xdr_stream_encode_u64(xdr, start) < 0)
+ return false;
+ if (xdr_stream_encode_u64(xdr, len) < 0)
+ return false;
+
+ return true;
+}
+
+static bool
+svcxdr_encode_testrply(struct xdr_stream *xdr, const struct nlm_res *resp)
{
- s64 start, len;
-
- dprintk("xdr: before encode_testres (p %p resp %p)\n", p, resp);
- if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
- return NULL;
- *p++ = resp->status;
-
- if (resp->status == nlm_lck_denied) {
- struct file_lock *fl = &resp->lock.fl;
-
- *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
- *p++ = htonl(resp->lock.svid);
-
- /* Encode owner handle. */
- if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
- return NULL;
-
- start = loff_t_to_s64(fl->fl_start);
- if (fl->fl_end == OFFSET_MAX)
- len = 0;
- else
- len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
-
- p = xdr_encode_hyper(p, start);
- p = xdr_encode_hyper(p, len);
- dprintk("xdr: encode_testres (status %u pid %d type %d start %Ld end %Ld)\n",
- resp->status, (int)resp->lock.svid, fl->fl_type,
- (long long)fl->fl_start, (long long)fl->fl_end);
+ if (!svcxdr_encode_stats(xdr, resp->status))
+ return false;
+ switch (resp->status) {
+ case nlm_lck_denied:
+ if (!svcxdr_encode_holder(xdr, &resp->lock))
+ return false;
}
- dprintk("xdr: after encode_testres (p %p resp %p)\n", p, resp);
- return p;
+ return true;
}
/*
- * First, the server side XDR functions
+ * Decode Call arguments
*/
+
+int
+nlm4svc_decode_void(struct svc_rqst *rqstp, __be32 *p)
+{
+ return 1;
+}
+
int
nlm4svc_decode_testargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nlm_args *argp = rqstp->rq_argp;
- u32 exclusive;
+ u32 exclusive;
- if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
+ if (!svcxdr_decode_cookie(xdr, &argp->cookie))
return 0;
-
- exclusive = ntohl(*p++);
- if (!(p = nlm4_decode_lock(p, &argp->lock)))
+ if (xdr_stream_decode_bool(xdr, &exclusive) < 0)
+ return 0;
+ if (!svcxdr_decode_lock(xdr, &argp->lock))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
- return xdr_argsize_check(rqstp, p);
-}
-
-int
-nlm4svc_encode_testres(struct svc_rqst *rqstp, __be32 *p)
-{
- struct nlm_res *resp = rqstp->rq_resp;
-
- if (!(p = nlm4_encode_testres(p, resp)))
- return 0;
- return xdr_ressize_check(rqstp, p);
+ return 1;
}
int
nlm4svc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nlm_args *argp = rqstp->rq_argp;
- u32 exclusive;
+ u32 exclusive;
- if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
+ if (!svcxdr_decode_cookie(xdr, &argp->cookie))
+ return 0;
+ if (xdr_stream_decode_bool(xdr, &argp->block) < 0)
return 0;
- argp->block = ntohl(*p++);
- exclusive = ntohl(*p++);
- if (!(p = nlm4_decode_lock(p, &argp->lock)))
+ if (xdr_stream_decode_bool(xdr, &exclusive) < 0)
+ return 0;
+ if (!svcxdr_decode_lock(xdr, &argp->lock))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
- argp->reclaim = ntohl(*p++);
- argp->state = ntohl(*p++);
+ if (xdr_stream_decode_bool(xdr, &argp->reclaim) < 0)
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &argp->state) < 0)
+ return 0;
argp->monitor = 1; /* monitor client by default */
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
int
nlm4svc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nlm_args *argp = rqstp->rq_argp;
- u32 exclusive;
+ u32 exclusive;
- if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
+ if (!svcxdr_decode_cookie(xdr, &argp->cookie))
+ return 0;
+ if (xdr_stream_decode_bool(xdr, &argp->block) < 0)
return 0;
- argp->block = ntohl(*p++);
- exclusive = ntohl(*p++);
- if (!(p = nlm4_decode_lock(p, &argp->lock)))
+ if (xdr_stream_decode_bool(xdr, &exclusive) < 0)
+ return 0;
+ if (!svcxdr_decode_lock(xdr, &argp->lock))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
int
nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nlm_args *argp = rqstp->rq_argp;
- if (!(p = nlm4_decode_cookie(p, &argp->cookie))
- || !(p = nlm4_decode_lock(p, &argp->lock)))
+ if (!svcxdr_decode_cookie(xdr, &argp->cookie))
+ return 0;
+ if (!svcxdr_decode_lock(xdr, &argp->lock))
return 0;
argp->lock.fl.fl_type = F_UNLCK;
- return xdr_argsize_check(rqstp, p);
+
+ return 1;
}
int
-nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
+nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p)
{
- struct nlm_args *argp = rqstp->rq_argp;
- struct nlm_lock *lock = &argp->lock;
-
- memset(lock, 0, sizeof(*lock));
- locks_init_lock(&lock->fl);
- lock->svid = ~(u32) 0;
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
+ struct nlm_res *resp = rqstp->rq_argp;
- if (!(p = nlm4_decode_cookie(p, &argp->cookie))
- || !(p = xdr_decode_string_inplace(p, &lock->caller,
- &lock->len, NLM_MAXSTRLEN))
- || !(p = nlm4_decode_fh(p, &lock->fh))
- || !(p = nlm4_decode_oh(p, &lock->oh)))
+ if (!svcxdr_decode_cookie(xdr, &resp->cookie))
+ return 0;
+ if (!svcxdr_decode_stats(xdr, &resp->status))
return 0;
- argp->fsm_mode = ntohl(*p++);
- argp->fsm_access = ntohl(*p++);
- return xdr_argsize_check(rqstp, p);
+
+ return 1;
}
int
-nlm4svc_encode_shareres(struct svc_rqst *rqstp, __be32 *p)
+nlm4svc_decode_reboot(struct svc_rqst *rqstp, __be32 *p)
{
- struct nlm_res *resp = rqstp->rq_resp;
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
+ struct nlm_reboot *argp = rqstp->rq_argp;
+ u32 len;
- if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
+ if (xdr_stream_decode_u32(xdr, &len) < 0)
return 0;
- *p++ = resp->status;
- *p++ = xdr_zero; /* sequence argument */
- return xdr_ressize_check(rqstp, p);
+ if (len > SM_MAXSTRLEN)
+ return 0;
+ p = xdr_inline_decode(xdr, len);
+ if (!p)
+ return 0;
+ argp->len = len;
+ argp->mon = (char *)p;
+ if (xdr_stream_decode_u32(xdr, &argp->state) < 0)
+ return 0;
+ p = xdr_inline_decode(xdr, SM_PRIV_SIZE);
+ if (!p)
+ return 0;
+ memcpy(&argp->priv.data, p, sizeof(argp->priv.data));
+
+ return 1;
}
int
-nlm4svc_encode_res(struct svc_rqst *rqstp, __be32 *p)
+nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
{
- struct nlm_res *resp = rqstp->rq_resp;
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
+ struct nlm_args *argp = rqstp->rq_argp;
+ struct nlm_lock *lock = &argp->lock;
+
+ memset(lock, 0, sizeof(*lock));
+ locks_init_lock(&lock->fl);
+ lock->svid = ~(u32)0;
- if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
+ if (!svcxdr_decode_cookie(xdr, &argp->cookie))
return 0;
- *p++ = resp->status;
- return xdr_ressize_check(rqstp, p);
+ if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len))
+ return 0;
+ if (!svcxdr_decode_fhandle(xdr, &lock->fh))
+ return 0;
+ if (!svcxdr_decode_owner(xdr, &lock->oh))
+ return 0;
+ /* XXX: Range checks are missing in the original code */
+ if (xdr_stream_decode_u32(xdr, &argp->fsm_mode) < 0)
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &argp->fsm_access) < 0)
+ return 0;
+
+ return 1;
}
int
nlm4svc_decode_notify(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nlm_args *argp = rqstp->rq_argp;
struct nlm_lock *lock = &argp->lock;
- if (!(p = xdr_decode_string_inplace(p, &lock->caller,
- &lock->len, NLM_MAXSTRLEN)))
+ if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len))
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &argp->state) < 0)
return 0;
- argp->state = ntohl(*p++);
- return xdr_argsize_check(rqstp, p);
+
+ return 1;
}
+
+/*
+ * Encode Reply results
+ */
+
int
-nlm4svc_decode_reboot(struct svc_rqst *rqstp, __be32 *p)
+nlm4svc_encode_void(struct svc_rqst *rqstp, __be32 *p)
{
- struct nlm_reboot *argp = rqstp->rq_argp;
-
- if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
- return 0;
- argp->state = ntohl(*p++);
- memcpy(&argp->priv.data, p, sizeof(argp->priv.data));
- p += XDR_QUADLEN(SM_PRIV_SIZE);
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
int
-nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p)
+nlm4svc_encode_testres(struct svc_rqst *rqstp, __be32 *p)
{
- struct nlm_res *resp = rqstp->rq_argp;
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+ struct nlm_res *resp = rqstp->rq_resp;
- if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
- return 0;
- resp->status = *p++;
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_encode_cookie(xdr, &resp->cookie) &&
+ svcxdr_encode_testrply(xdr, resp);
}
int
-nlm4svc_decode_void(struct svc_rqst *rqstp, __be32 *p)
+nlm4svc_encode_res(struct svc_rqst *rqstp, __be32 *p)
{
- return xdr_argsize_check(rqstp, p);
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+ struct nlm_res *resp = rqstp->rq_resp;
+
+ return svcxdr_encode_cookie(xdr, &resp->cookie) &&
+ svcxdr_encode_stats(xdr, resp->status);
}
int
-nlm4svc_encode_void(struct svc_rqst *rqstp, __be32 *p)
+nlm4svc_encode_shareres(struct svc_rqst *rqstp, __be32 *p)
{
- return xdr_ressize_check(rqstp, p);
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+ struct nlm_res *resp = rqstp->rq_resp;
+
+ if (!svcxdr_encode_cookie(xdr, &resp->cookie))
+ return 0;
+ if (!svcxdr_encode_stats(xdr, resp->status))
+ return 0;
+ /* sequence */
+ if (xdr_stream_encode_u32(xdr, 0) < 0)
+ return 0;
+
+ return 1;
}
diff --git a/fs/namei.c b/fs/namei.c
index 79b0ff9b151e..bf6d8a738c59 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -554,7 +554,7 @@ struct nameidata {
struct qstr last;
struct path root;
struct inode *inode; /* path.dentry.d_inode */
- unsigned int flags;
+ unsigned int flags, state;
unsigned seq, m_seq, r_seq;
int last_type;
unsigned depth;
@@ -573,10 +573,15 @@ struct nameidata {
umode_t dir_mode;
} __randomize_layout;
-static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
+#define ND_ROOT_PRESET 1
+#define ND_ROOT_GRABBED 2
+#define ND_JUMPED 4
+
+static void __set_nameidata(struct nameidata *p, int dfd, struct filename *name)
{
struct nameidata *old = current->nameidata;
p->stack = p->internal;
+ p->depth = 0;
p->dfd = dfd;
p->name = name;
p->path.mnt = NULL;
@@ -586,6 +591,17 @@ static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
current->nameidata = p;
}
+static inline void set_nameidata(struct nameidata *p, int dfd, struct filename *name,
+ const struct path *root)
+{
+ __set_nameidata(p, dfd, name);
+ p->state = 0;
+ if (unlikely(root)) {
+ p->state = ND_ROOT_PRESET;
+ p->root = *root;
+ }
+}
+
static void restore_nameidata(void)
{
struct nameidata *now = current->nameidata, *old = now->saved;
@@ -645,9 +661,9 @@ static void terminate_walk(struct nameidata *nd)
path_put(&nd->path);
for (i = 0; i < nd->depth; i++)
path_put(&nd->stack[i].link);
- if (nd->flags & LOOKUP_ROOT_GRABBED) {
+ if (nd->state & ND_ROOT_GRABBED) {
path_put(&nd->root);
- nd->flags &= ~LOOKUP_ROOT_GRABBED;
+ nd->state &= ~ND_ROOT_GRABBED;
}
} else {
nd->flags &= ~LOOKUP_RCU;
@@ -710,9 +726,9 @@ static bool legitimize_root(struct nameidata *nd)
if (!nd->root.mnt && (nd->flags & LOOKUP_IS_SCOPED))
return false;
/* Nothing to do if nd->root is zero or is managed by the VFS user. */
- if (!nd->root.mnt || (nd->flags & LOOKUP_ROOT))
+ if (!nd->root.mnt || (nd->state & ND_ROOT_PRESET))
return true;
- nd->flags |= LOOKUP_ROOT_GRABBED;
+ nd->state |= ND_ROOT_GRABBED;
return legitimize_path(nd, &nd->root, nd->root_seq);
}
@@ -849,8 +865,9 @@ static int complete_walk(struct nameidata *nd)
* We don't want to zero nd->root for scoped-lookups or
* externally-managed nd->root.
*/
- if (!(nd->flags & (LOOKUP_ROOT | LOOKUP_IS_SCOPED)))
- nd->root.mnt = NULL;
+ if (!(nd->state & ND_ROOT_PRESET))
+ if (!(nd->flags & LOOKUP_IS_SCOPED))
+ nd->root.mnt = NULL;
nd->flags &= ~LOOKUP_CACHED;
if (!try_to_unlazy(nd))
return -ECHILD;
@@ -877,7 +894,7 @@ static int complete_walk(struct nameidata *nd)
return -EXDEV;
}
- if (likely(!(nd->flags & LOOKUP_JUMPED)))
+ if (likely(!(nd->state & ND_JUMPED)))
return 0;
if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
@@ -915,7 +932,7 @@ static int set_root(struct nameidata *nd)
} while (read_seqcount_retry(&fs->seq, seq));
} else {
get_fs_root(fs, &nd->root);
- nd->flags |= LOOKUP_ROOT_GRABBED;
+ nd->state |= ND_ROOT_GRABBED;
}
return 0;
}
@@ -948,7 +965,7 @@ static int nd_jump_root(struct nameidata *nd)
path_get(&nd->path);
nd->inode = nd->path.dentry->d_inode;
}
- nd->flags |= LOOKUP_JUMPED;
+ nd->state |= ND_JUMPED;
return 0;
}
@@ -976,7 +993,7 @@ int nd_jump_link(struct path *path)
path_put(&nd->path);
nd->path = *path;
nd->inode = nd->path.dentry->d_inode;
- nd->flags |= LOOKUP_JUMPED;
+ nd->state |= ND_JUMPED;
return 0;
err:
@@ -1423,7 +1440,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
if (mounted) {
path->mnt = &mounted->mnt;
dentry = path->dentry = mounted->mnt.mnt_root;
- nd->flags |= LOOKUP_JUMPED;
+ nd->state |= ND_JUMPED;
*seqp = read_seqcount_begin(&dentry->d_seq);
*inode = dentry->d_inode;
/*
@@ -1468,7 +1485,7 @@ static inline int handle_mounts(struct nameidata *nd, struct dentry *dentry,
if (unlikely(nd->flags & LOOKUP_NO_XDEV))
ret = -EXDEV;
else
- nd->flags |= LOOKUP_JUMPED;
+ nd->state |= ND_JUMPED;
}
if (unlikely(ret)) {
dput(path->dentry);
@@ -2219,7 +2236,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
case 2:
if (name[1] == '.') {
type = LAST_DOTDOT;
- nd->flags |= LOOKUP_JUMPED;
+ nd->state |= ND_JUMPED;
}
break;
case 1:
@@ -2227,7 +2244,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
}
if (likely(type == LAST_NORM)) {
struct dentry *parent = nd->path.dentry;
- nd->flags &= ~LOOKUP_JUMPED;
+ nd->state &= ~ND_JUMPED;
if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
struct qstr this = { { .hash_len = hash_len }, .name = name };
err = parent->d_op->d_hash(parent, &this);
@@ -2301,14 +2318,14 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
if (flags & LOOKUP_RCU)
rcu_read_lock();
- nd->flags = flags | LOOKUP_JUMPED;
- nd->depth = 0;
+ nd->flags = flags;
+ nd->state |= ND_JUMPED;
nd->m_seq = __read_seqcount_begin(&mount_lock.seqcount);
nd->r_seq = __read_seqcount_begin(&rename_lock.seqcount);
smp_rmb();
- if (flags & LOOKUP_ROOT) {
+ if (nd->state & ND_ROOT_PRESET) {
struct dentry *root = nd->root.dentry;
struct inode *inode = root->d_inode;
if (*s && unlikely(!d_can_lookup(root)))
@@ -2383,7 +2400,7 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->root_seq = nd->seq;
} else {
path_get(&nd->root);
- nd->flags |= LOOKUP_ROOT_GRABBED;
+ nd->state |= ND_ROOT_GRABBED;
}
}
return s;
@@ -2422,7 +2439,7 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
;
if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
err = handle_lookup_down(nd);
- nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
+ nd->state &= ~ND_JUMPED; // no d_weak_revalidate(), please...
}
if (!err)
err = complete_walk(nd);
@@ -2446,11 +2463,7 @@ int filename_lookup(int dfd, struct filename *name, unsigned flags,
struct nameidata nd;
if (IS_ERR(name))
return PTR_ERR(name);
- if (unlikely(root)) {
- nd.root = *root;
- flags |= LOOKUP_ROOT;
- }
- set_nameidata(&nd, dfd, name);
+ set_nameidata(&nd, dfd, name, root);
retval = path_lookupat(&nd, flags | LOOKUP_RCU, path);
if (unlikely(retval == -ECHILD))
retval = path_lookupat(&nd, flags, path);
@@ -2491,7 +2504,7 @@ static struct filename *filename_parentat(int dfd, struct filename *name,
if (IS_ERR(name))
return name;
- set_nameidata(&nd, dfd, name);
+ set_nameidata(&nd, dfd, name, NULL);
retval = path_parentat(&nd, flags | LOOKUP_RCU, parent);
if (unlikely(retval == -ECHILD))
retval = path_parentat(&nd, flags, parent);
@@ -3517,7 +3530,7 @@ struct file *do_filp_open(int dfd, struct filename *pathname,
int flags = op->lookup_flags;
struct file *filp;
- set_nameidata(&nd, dfd, pathname);
+ set_nameidata(&nd, dfd, pathname, NULL);
filp = path_openat(&nd, op, flags | LOOKUP_RCU);
if (unlikely(filp == ERR_PTR(-ECHILD)))
filp = path_openat(&nd, op, flags);
@@ -3527,25 +3540,22 @@ struct file *do_filp_open(int dfd, struct filename *pathname,
return filp;
}
-struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
+struct file *do_file_open_root(const struct path *root,
const char *name, const struct open_flags *op)
{
struct nameidata nd;
struct file *file;
struct filename *filename;
- int flags = op->lookup_flags | LOOKUP_ROOT;
-
- nd.root.mnt = mnt;
- nd.root.dentry = dentry;
+ int flags = op->lookup_flags;
- if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN)
+ if (d_is_symlink(root->dentry) && op->intent & LOOKUP_OPEN)
return ERR_PTR(-ELOOP);
filename = getname_kernel(name);
if (IS_ERR(filename))
return ERR_CAST(filename);
- set_nameidata(&nd, -1, filename);
+ set_nameidata(&nd, -1, filename, root);
file = path_openat(&nd, op, flags | LOOKUP_RCU);
if (unlikely(file == ERR_PTR(-ECHILD)))
file = path_openat(&nd, op, flags);
diff --git a/fs/namespace.c b/fs/namespace.c
index 2279473d0d6f..97adcb5ab5d5 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1942,6 +1942,20 @@ void drop_collected_mounts(struct vfsmount *mnt)
namespace_unlock();
}
+static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+{
+ struct mount *child;
+
+ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+ if (!is_subdir(child->mnt_mountpoint, dentry))
+ continue;
+
+ if (child->mnt.mnt_flags & MNT_LOCKED)
+ return true;
+ }
+ return false;
+}
+
/**
* clone_private_mount - create a private clone of a path
* @path: path to clone
@@ -1957,10 +1971,19 @@ struct vfsmount *clone_private_mount(const struct path *path)
struct mount *old_mnt = real_mount(path->mnt);
struct mount *new_mnt;
+ down_read(&namespace_sem);
if (IS_MNT_UNBINDABLE(old_mnt))
- return ERR_PTR(-EINVAL);
+ goto invalid;
+
+ if (!check_mnt(old_mnt))
+ goto invalid;
+
+ if (has_locked_children(old_mnt, path->dentry))
+ goto invalid;
new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
+ up_read(&namespace_sem);
+
if (IS_ERR(new_mnt))
return ERR_CAST(new_mnt);
@@ -1968,6 +1991,10 @@ struct vfsmount *clone_private_mount(const struct path *path)
new_mnt->mnt_ns = MNT_NS_INTERNAL;
return &new_mnt->mnt;
+
+invalid:
+ up_read(&namespace_sem);
+ return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(clone_private_mount);
@@ -2319,19 +2346,6 @@ static int do_change_type(struct path *path, int ms_flags)
return err;
}
-static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
-{
- struct mount *child;
- list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
- if (!is_subdir(child->mnt_mountpoint, dentry))
- continue;
-
- if (child->mnt.mnt_flags & MNT_LOCKED)
- return true;
- }
- return false;
-}
-
static struct mount *__do_loopback(struct path *old_path, int recurse)
{
struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index e6ec6f09ac6e..11118398f495 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -75,6 +75,13 @@ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
}
+static void nfs_mark_return_delegation(struct nfs_server *server,
+ struct nfs_delegation *delegation)
+{
+ set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+ set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+}
+
static bool
nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
fmode_t flags)
@@ -293,6 +300,7 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
goto out;
spin_lock(&delegation->lock);
if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
/* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);
}
@@ -314,16 +322,17 @@ nfs_start_delegation_return(struct nfs_inode *nfsi)
return delegation;
}
-static void
-nfs_abort_delegation_return(struct nfs_delegation *delegation,
- struct nfs_client *clp)
+static void nfs_abort_delegation_return(struct nfs_delegation *delegation,
+ struct nfs_client *clp, int err)
{
spin_lock(&delegation->lock);
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
- set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+ if (err == -EAGAIN) {
+ set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
+ set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state);
+ }
spin_unlock(&delegation->lock);
- set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
}
static struct nfs_delegation *
@@ -521,11 +530,18 @@ out:
static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
{
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ unsigned int mode = O_WRONLY | O_RDWR;
int err = 0;
if (delegation == NULL)
return 0;
- do {
+
+ if (!issync)
+ mode |= O_NONBLOCK;
+ /* Recall of any remaining application leases */
+ err = break_lease(inode, mode);
+
+ while (err == 0) {
if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
break;
err = nfs_delegation_claim_opens(inode, &delegation->stateid,
@@ -536,10 +552,10 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
* Guard against state recovery
*/
err = nfs4_wait_clnt_recover(clp);
- } while (err == 0);
+ }
if (err) {
- nfs_abort_delegation_return(delegation, clp);
+ nfs_abort_delegation_return(delegation, clp, err);
goto out;
}
@@ -568,6 +584,7 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
if (ret)
clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
+ test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) ||
test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
ret = false;
@@ -647,6 +664,38 @@ out:
return err;
}
+static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
+{
+ struct nfs_delegation *d;
+ bool ret = false;
+
+ list_for_each_entry_rcu (d, &server->delegations, super_list) {
+ if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags))
+ continue;
+ nfs_mark_return_delegation(server, d);
+ clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags);
+ ret = true;
+ }
+ return ret;
+}
+
+static bool nfs_client_clear_delayed_delegations(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+ bool ret = false;
+
+ if (!test_and_clear_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state))
+ goto out;
+ rcu_read_lock();
+ list_for_each_entry_rcu (server, &clp->cl_superblocks, client_link) {
+ if (nfs_server_clear_delayed_delegations(server))
+ ret = true;
+ }
+ rcu_read_unlock();
+out:
+ return ret;
+}
+
/**
* nfs_client_return_marked_delegations - return previously marked delegations
* @clp: nfs_client to process
@@ -659,8 +708,14 @@ out:
*/
int nfs_client_return_marked_delegations(struct nfs_client *clp)
{
- return nfs_client_for_each_server(clp,
- nfs_server_return_marked_delegations, NULL);
+ int err = nfs_client_for_each_server(
+ clp, nfs_server_return_marked_delegations, NULL);
+ if (err)
+ return err;
+ /* If a return was delayed, sleep to prevent hard looping */
+ if (nfs_client_clear_delayed_delegations(clp))
+ ssleep(1);
+ return 0;
}
/**
@@ -698,13 +753,14 @@ int nfs4_inode_return_delegation(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_delegation *delegation;
- int err = 0;
- nfs_wb_all(inode);
delegation = nfs_start_delegation_return(nfsi);
+ /* Synchronous recall of any application leases */
+ break_lease(inode, O_WRONLY | O_RDWR);
+ nfs_wb_all(inode);
if (delegation != NULL)
- err = nfs_end_delegation_return(inode, delegation, 1);
- return err;
+ return nfs_end_delegation_return(inode, delegation, 1);
+ return 0;
}
/**
@@ -775,13 +831,6 @@ static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
}
-static void nfs_mark_return_delegation(struct nfs_server *server,
- struct nfs_delegation *delegation)
-{
- set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
-}
-
static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
{
struct nfs_delegation *delegation;
@@ -1010,6 +1059,9 @@ int nfs_async_inode_return_delegation(struct inode *inode,
nfs_mark_return_delegation(server, delegation);
rcu_read_unlock();
+ /* If there are any application leases or delegations, recall them */
+ break_lease(inode, O_WRONLY | O_RDWR | O_NONBLOCK);
+
nfs_delegation_run_state_manager(clp);
return 0;
out_enoent:
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index c19b4fd20781..1c378992b7c0 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -36,6 +36,7 @@ enum {
NFS_DELEGATION_REVOKED,
NFS_DELEGATION_TEST_EXPIRED,
NFS_DELEGATION_INODE_FREEING,
+ NFS_DELEGATION_RETURN_DELAYED,
};
int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 2d30a4da49fa..2e894fec036b 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -700,8 +700,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
{
struct nfs_direct_req *dreq = hdr->dreq;
struct nfs_commit_info cinfo;
- bool request_commit = false;
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ int flags = NFS_ODIRECT_DONE;
nfs_init_cinfo_from_dreq(&cinfo, dreq);
@@ -713,15 +713,9 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
nfs_direct_count_bytes(dreq, hdr);
if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) {
- switch (dreq->flags) {
- case 0:
+ if (!dreq->flags)
dreq->flags = NFS_ODIRECT_DO_COMMIT;
- request_commit = true;
- break;
- case NFS_ODIRECT_RESCHED_WRITES:
- case NFS_ODIRECT_DO_COMMIT:
- request_commit = true;
- }
+ flags = dreq->flags;
}
spin_unlock(&dreq->lock);
@@ -729,12 +723,15 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
req = nfs_list_entry(hdr->pages.next);
nfs_list_remove_request(req);
- if (request_commit) {
+ if (flags == NFS_ODIRECT_DO_COMMIT) {
kref_get(&req->wb_kref);
memcpy(&req->wb_verf, &hdr->verf.verifier,
sizeof(req->wb_verf));
nfs_mark_request_commit(req, hdr->lseg, &cinfo,
hdr->ds_commit_idx);
+ } else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
+ kref_get(&req->wb_kref);
+ nfs_mark_request_commit(req, NULL, &cinfo, 0);
}
nfs_unlock_and_release_request(req);
}
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index c4c021c6ebbd..d743629e05e1 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -385,12 +385,15 @@ static void nfs_readpage_from_fscache_complete(struct page *page,
"NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
page, context, error);
- /* if the read completes with an error, we just unlock the page and let
- * the VM reissue the readpage */
- if (!error) {
+ /*
+ * If the read completes with an error, mark the page with PG_checked,
+ * unlock the page, and let the VM reissue the readpage.
+ */
+ if (!error)
SetPageUptodate(page);
- unlock_page(page);
- }
+ else
+ SetPageChecked(page);
+ unlock_page(page);
}
/*
@@ -405,6 +408,11 @@ int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
"NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
nfs_i_fscache(inode), page, page->index, page->flags, inode);
+ if (PageChecked(page)) {
+ ClearPageChecked(page);
+ return 1;
+ }
+
ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
page,
nfs_readpage_from_fscache_complete,
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index aaeeb4659bff..59355c106ece 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -67,7 +67,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
int nfs_get_root(struct super_block *s, struct fs_context *fc)
{
struct nfs_fs_context *ctx = nfs_fc2context(fc);
- struct nfs_server *server = NFS_SB(s);
+ struct nfs_server *server = NFS_SB(s), *clone_server;
struct nfs_fsinfo fsinfo;
struct dentry *root;
struct inode *inode;
@@ -127,7 +127,7 @@ int nfs_get_root(struct super_block *s, struct fs_context *fc)
}
spin_unlock(&root->d_lock);
fc->root = root;
- if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL)
+ if (server->caps & NFS_CAP_SECURITY_LABEL)
kflags |= SECURITY_LSM_NATIVE_LABELS;
if (ctx->clone_data.sb) {
if (d_inode(fc->root)->i_fop != &nfs_dir_operations) {
@@ -137,15 +137,19 @@ int nfs_get_root(struct super_block *s, struct fs_context *fc)
/* clone lsm security options from the parent to the new sb */
error = security_sb_clone_mnt_opts(ctx->clone_data.sb,
s, kflags, &kflags_out);
+ if (error)
+ goto error_splat_root;
+ clone_server = NFS_SB(ctx->clone_data.sb);
+ server->has_sec_mnt_opts = clone_server->has_sec_mnt_opts;
} else {
error = security_sb_set_mnt_opts(s, fc->security,
kflags, &kflags_out);
}
if (error)
goto error_splat_root;
- if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL &&
+ if (server->caps & NFS_CAP_SECURITY_LABEL &&
!(kflags_out & SECURITY_LSM_NATIVE_LABELS))
- NFS_SB(s)->caps &= ~NFS_CAP_SECURITY_LABEL;
+ server->caps &= ~NFS_CAP_SECURITY_LABEL;
nfs_setsecurity(inode, fsinfo.fattr, fsinfo.fattr->label);
error = 0;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 529c4099f482..853213b3a209 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1101,6 +1101,7 @@ EXPORT_SYMBOL_GPL(nfs_inode_attach_open_context);
void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
{
filp->private_data = get_nfs_open_context(ctx);
+ set_bit(NFS_CONTEXT_FILE_OPEN, &ctx->flags);
if (list_empty(&ctx->list))
nfs_inode_attach_open_context(ctx);
}
@@ -1120,6 +1121,8 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct
continue;
if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
continue;
+ if (!test_bit(NFS_CONTEXT_FILE_OPEN, &pos->flags))
+ continue;
ctx = get_nfs_open_context(pos);
if (ctx)
break;
@@ -1135,6 +1138,7 @@ void nfs_file_clear_open_context(struct file *filp)
if (ctx) {
struct inode *inode = d_inode(ctx->dentry);
+ clear_bit(NFS_CONTEXT_FILE_OPEN, &ctx->flags);
/*
* We fatal error on write before. Try to writeback
* every page again.
@@ -2055,35 +2059,33 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
| NFS_INO_INVALID_OTHER;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
+ attr_changed = true;
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id,
inode->i_ino);
} else if (!have_delegation)
nfsi->cache_validity |= NFS_INO_DATA_INVAL_DEFER;
inode_set_iversion_raw(inode, fattr->change_attr);
- attr_changed = true;
}
} else {
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_CHANGE;
- cache_revalidated = false;
+ if (!have_delegation ||
+ (nfsi->cache_validity & NFS_INO_INVALID_CHANGE) != 0)
+ cache_revalidated = false;
}
- if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
+ if (fattr->valid & NFS_ATTR_FATTR_MTIME)
inode->i_mtime = fattr->mtime;
- } else if (fattr_supported & NFS_ATTR_FATTR_MTIME) {
+ else if (fattr_supported & NFS_ATTR_FATTR_MTIME)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_MTIME;
- cache_revalidated = false;
- }
- if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
+ if (fattr->valid & NFS_ATTR_FATTR_CTIME)
inode->i_ctime = fattr->ctime;
- } else if (fattr_supported & NFS_ATTR_FATTR_CTIME) {
+ else if (fattr_supported & NFS_ATTR_FATTR_CTIME)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_CTIME;
- cache_revalidated = false;
- }
/* Check if our cached file size is stale */
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
@@ -2096,7 +2098,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
i_size_write(inode, new_isize);
if (!have_writers)
invalid |= NFS_INO_INVALID_DATA;
- attr_changed = true;
}
dprintk("NFS: isize change on server for file %s/%ld "
"(%Ld to %Ld)\n",
@@ -2111,19 +2112,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
fattr->du.nfs3.used = 0;
fattr->valid |= NFS_ATTR_FATTR_SPACE_USED;
}
- } else {
+ } else
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_SIZE;
- cache_revalidated = false;
- }
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
inode->i_atime = fattr->atime;
- else if (fattr_supported & NFS_ATTR_FATTR_ATIME) {
+ else if (fattr_supported & NFS_ATTR_FATTR_ATIME)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_ATIME;
- cache_revalidated = false;
- }
if (fattr->valid & NFS_ATTR_FATTR_MODE) {
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
@@ -2132,71 +2129,55 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
inode->i_mode = newmode;
invalid |= NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL;
- attr_changed = true;
}
- } else if (fattr_supported & NFS_ATTR_FATTR_MODE) {
+ } else if (fattr_supported & NFS_ATTR_FATTR_MODE)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_MODE;
- cache_revalidated = false;
- }
if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
if (!uid_eq(inode->i_uid, fattr->uid)) {
invalid |= NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL;
inode->i_uid = fattr->uid;
- attr_changed = true;
}
- } else if (fattr_supported & NFS_ATTR_FATTR_OWNER) {
+ } else if (fattr_supported & NFS_ATTR_FATTR_OWNER)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_OTHER;
- cache_revalidated = false;
- }
if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
if (!gid_eq(inode->i_gid, fattr->gid)) {
invalid |= NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL;
inode->i_gid = fattr->gid;
- attr_changed = true;
}
- } else if (fattr_supported & NFS_ATTR_FATTR_GROUP) {
+ } else if (fattr_supported & NFS_ATTR_FATTR_GROUP)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_OTHER;
- cache_revalidated = false;
- }
if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
if (inode->i_nlink != fattr->nlink) {
if (S_ISDIR(inode->i_mode))
invalid |= NFS_INO_INVALID_DATA;
set_nlink(inode, fattr->nlink);
- attr_changed = true;
}
- } else if (fattr_supported & NFS_ATTR_FATTR_NLINK) {
+ } else if (fattr_supported & NFS_ATTR_FATTR_NLINK)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_NLINK;
- cache_revalidated = false;
- }
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
/*
* report the blocks in 512byte units
*/
inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
- } else if (fattr_supported & NFS_ATTR_FATTR_SPACE_USED) {
+ } else if (fattr_supported & NFS_ATTR_FATTR_SPACE_USED)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_BLOCKS;
- cache_revalidated = false;
- }
- if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) {
+ if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
- } else if (fattr_supported & NFS_ATTR_FATTR_BLOCKS_USED) {
+ else if (fattr_supported & NFS_ATTR_FATTR_BLOCKS_USED)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_BLOCKS;
- cache_revalidated = false;
- }
/* Update attrtimeo value if we're out of the unstable period */
if (attr_changed) {
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 5c4e23abc345..2299446b3b89 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -385,7 +385,7 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
break;
case NFS3_CREATE_UNCHECKED:
- goto out;
+ goto out_release_acls;
}
nfs_fattr_init(data->res.dir_attr);
nfs_fattr_init(data->res.fattr);
@@ -751,7 +751,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
break;
default:
status = -EINVAL;
- goto out;
+ goto out_release_acls;
}
d_alias = nfs3_do_create(dir, dentry, data);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 543d916f79ab..ba78df4b13d9 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -45,6 +45,7 @@ enum nfs4_client_state {
NFS4CLNT_RECALL_RUNNING,
NFS4CLNT_RECALL_ANY_LAYOUT_READ,
NFS4CLNT_RECALL_ANY_LAYOUT_RW,
+ NFS4CLNT_DELEGRETURN_DELAYED,
};
#define NFS4_RENEW_TIMEOUT 0x01
@@ -322,7 +323,8 @@ extern int update_open_stateid(struct nfs4_state *state,
const nfs4_stateid *open_stateid,
const nfs4_stateid *deleg_stateid,
fmode_t fmode);
-
+extern int nfs4_proc_setlease(struct file *file, long arg,
+ struct file_lock **lease, void **priv);
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
struct nfs_fsinfo *fsinfo);
extern void nfs4_update_changeattr(struct inode *dir,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 42719384e25f..28431acd1230 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -197,8 +197,11 @@ void nfs40_shutdown_client(struct nfs_client *clp)
struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
{
- int err;
+ char buf[INET6_ADDRSTRLEN + 1];
+ const char *ip_addr = cl_init->ip_addr;
struct nfs_client *clp = nfs_alloc_client(cl_init);
+ int err;
+
if (IS_ERR(clp))
return clp;
@@ -222,6 +225,44 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
init_waitqueue_head(&clp->cl_lock_waitq);
#endif
INIT_LIST_HEAD(&clp->pending_cb_stateids);
+
+ if (cl_init->minorversion != 0)
+ __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
+ __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+ __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
+
+ /*
+ * Set up the connection to the server before we add add to the
+ * global list.
+ */
+ err = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
+ if (err == -EINVAL)
+ err = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
+ if (err < 0)
+ goto error;
+
+ /* If no clientaddr= option was specified, find a usable cb address */
+ if (ip_addr == NULL) {
+ struct sockaddr_storage cb_addr;
+ struct sockaddr *sap = (struct sockaddr *)&cb_addr;
+
+ err = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
+ if (err < 0)
+ goto error;
+ err = rpc_ntop(sap, buf, sizeof(buf));
+ if (err < 0)
+ goto error;
+ ip_addr = (const char *)buf;
+ }
+ strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
+
+ err = nfs_idmap_new(clp);
+ if (err < 0) {
+ dprintk("%s: failed to create idmapper. Error = %d\n",
+ __func__, err);
+ goto error;
+ }
+ __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
return clp;
error:
@@ -372,8 +413,6 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
struct nfs_client *nfs4_init_client(struct nfs_client *clp,
const struct nfs_client_initdata *cl_init)
{
- char buf[INET6_ADDRSTRLEN + 1];
- const char *ip_addr = cl_init->ip_addr;
struct nfs_client *old;
int error;
@@ -381,43 +420,6 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
/* the client is initialised already */
return clp;
- /* Check NFS protocol revision and initialize RPC op vector */
- clp->rpc_ops = &nfs_v4_clientops;
-
- if (clp->cl_minorversion != 0)
- __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
- __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
- __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
-
- error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
- if (error == -EINVAL)
- error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
- if (error < 0)
- goto error;
-
- /* If no clientaddr= option was specified, find a usable cb address */
- if (ip_addr == NULL) {
- struct sockaddr_storage cb_addr;
- struct sockaddr *sap = (struct sockaddr *)&cb_addr;
-
- error = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
- if (error < 0)
- goto error;
- error = rpc_ntop(sap, buf, sizeof(buf));
- if (error < 0)
- goto error;
- ip_addr = (const char *)buf;
- }
- strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
-
- error = nfs_idmap_new(clp);
- if (error < 0) {
- dprintk("%s: failed to create idmapper. Error = %d\n",
- __func__, error);
- goto error;
- }
- __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
-
error = nfs4_init_client_minor_version(clp);
if (error < 0)
goto error;
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index a1e5c6b85ded..c820de58a661 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -435,6 +435,12 @@ void nfs42_ssc_unregister_ops(void)
}
#endif /* CONFIG_NFS_V4_2 */
+static int nfs4_setlease(struct file *file, long arg, struct file_lock **lease,
+ void **priv)
+{
+ return nfs4_proc_setlease(file, arg, lease, priv);
+}
+
const struct file_operations nfs4_file_operations = {
.read_iter = nfs_file_read,
.write_iter = nfs_file_write,
@@ -448,7 +454,7 @@ const struct file_operations nfs4_file_operations = {
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.check_flags = nfs_check_flags,
- .setlease = simple_nosetlease,
+ .setlease = nfs4_setlease,
#ifdef CONFIG_NFS_V4_2
.copy_file_range = nfs4_copy_file_range,
.llseek = nfs4_file_llseek,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index e653654c10bc..e1214bb6b7ee 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1155,7 +1155,11 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res)
{
- return nfs4_do_call_sync(clnt, server, msg, args, res, 0);
+ unsigned short task_flags = 0;
+
+ if (server->nfs_client->cl_minorversion)
+ task_flags = RPC_TASK_MOVEABLE;
+ return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags);
}
@@ -1205,12 +1209,12 @@ nfs4_update_changeattr_locked(struct inode *inode,
u64 change_attr = inode_peek_iversion_raw(inode);
cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
+ if (S_ISDIR(inode->i_mode))
+ cache_validity |= NFS_INO_INVALID_DATA;
switch (NFS_SERVER(inode)->change_attr_type) {
case NFS4_CHANGE_TYPE_IS_UNDEFINED:
- break;
- case NFS4_CHANGE_TYPE_IS_TIME_METADATA:
- if ((s64)(change_attr - cinfo->after) > 0)
+ if (cinfo->after == change_attr)
goto out;
break;
default:
@@ -1218,24 +1222,21 @@ nfs4_update_changeattr_locked(struct inode *inode,
goto out;
}
- if (cinfo->atomic && cinfo->before == change_attr) {
- nfsi->attrtimeo_timestamp = jiffies;
- } else {
- if (S_ISDIR(inode->i_mode)) {
- cache_validity |= NFS_INO_INVALID_DATA;
+ inode_set_iversion_raw(inode, cinfo->after);
+ if (!cinfo->atomic || cinfo->before != change_attr) {
+ if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
- } else {
- if (!NFS_PROTO(inode)->have_delegation(inode,
- FMODE_READ))
- cache_validity |= NFS_INO_REVAL_PAGECACHE;
- }
- if (cinfo->before != change_attr)
- cache_validity |= NFS_INO_INVALID_ACCESS |
- NFS_INO_INVALID_ACL |
- NFS_INO_INVALID_XATTR;
+ if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+ cache_validity |=
+ NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
+ NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
+ NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
+ NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR |
+ NFS_INO_REVAL_PAGECACHE;
+ nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
}
- inode_set_iversion_raw(inode, cinfo->after);
+ nfsi->attrtimeo_timestamp = jiffies;
nfsi->read_cache_jiffies = timestamp;
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
@@ -2569,6 +2570,9 @@ static int nfs4_run_open_task(struct nfs4_opendata *data,
};
int status;
+ if (server->nfs_client->cl_minorversion)
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
kref_get(&data->kref);
data->rpc_done = false;
data->rpc_status = 0;
@@ -3749,6 +3753,9 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
};
int status = -ENOMEM;
+ if (server->nfs_client->cl_minorversion)
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
&task_setup_data.rpc_client, &msg);
@@ -4188,6 +4195,9 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
};
unsigned short task_flags = 0;
+ if (nfs4_has_session(server->nfs_client))
+ task_flags = RPC_TASK_MOVEABLE;
+
/* Is this is an attribute revalidation, subject to softreval? */
if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
task_flags |= RPC_TASK_TIMEOUT;
@@ -4307,6 +4317,9 @@ static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
};
unsigned short task_flags = 0;
+ if (server->nfs_client->cl_minorversion)
+ task_flags = RPC_TASK_MOVEABLE;
+
/* Is this is an attribute revalidation, subject to softreval? */
if (nfs_lookup_is_soft_revalidate(dentry))
task_flags |= RPC_TASK_TIMEOUT;
@@ -6538,7 +6551,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_delegreturn_ops,
- .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
};
int status = 0;
@@ -6856,6 +6869,11 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
+ struct nfs_client *client =
+ NFS_SERVER(lsp->ls_state->inode)->nfs_client;
+
+ if (client->cl_minorversion)
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
@@ -7130,6 +7148,10 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
};
int ret;
+ struct nfs_client *client = NFS_SERVER(state->inode)->nfs_client;
+
+ if (client->cl_minorversion)
+ task_setup_data.flags |= RPC_TASK_MOVEABLE;
dprintk("%s: begin!\n", __func__);
data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
@@ -7438,6 +7460,43 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
return nfs4_retry_setlk(state, cmd, request);
}
+static int nfs4_delete_lease(struct file *file, void **priv)
+{
+ return generic_setlease(file, F_UNLCK, NULL, priv);
+}
+
+static int nfs4_add_lease(struct file *file, long arg, struct file_lock **lease,
+ void **priv)
+{
+ struct inode *inode = file_inode(file);
+ fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE;
+ int ret;
+
+ /* No delegation, no lease */
+ if (!nfs4_have_delegation(inode, type))
+ return -EAGAIN;
+ ret = generic_setlease(file, arg, lease, priv);
+ if (ret || nfs4_have_delegation(inode, type))
+ return ret;
+ /* We raced with a delegation return */
+ nfs4_delete_lease(file, priv);
+ return -EAGAIN;
+}
+
+int nfs4_proc_setlease(struct file *file, long arg, struct file_lock **lease,
+ void **priv)
+{
+ switch (arg) {
+ case F_RDLCK:
+ case F_WRLCK:
+ return nfs4_add_lease(file, arg, lease, priv);
+ case F_UNLCK:
+ return nfs4_delete_lease(file, priv);
+ default:
+ return -EINVAL;
+ }
+}
+
int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct nfs_server *server = NFS_SERVER(state->inode);
@@ -9186,7 +9245,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs41_sequence_ops,
- .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
};
struct rpc_task *ret;
@@ -9385,7 +9444,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
{
struct inode *inode = lgp->args.inode;
struct nfs_server *server = NFS_SERVER(inode);
- struct pnfs_layout_hdr *lo;
+ struct pnfs_layout_hdr *lo = lgp->lo;
int nfs4err = task->tk_status;
int err, status = 0;
LIST_HEAD(head);
@@ -9437,7 +9496,6 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
case -NFS4ERR_BAD_STATEID:
exception->timeout = 0;
spin_lock(&inode->i_lock);
- lo = NFS_I(inode)->layout;
/* If the open stateid was bad, then recover it. */
if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
!nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
@@ -9509,7 +9567,8 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
.rpc_message = &msg,
.callback_ops = &nfs4_layoutget_call_ops,
.callback_data = lgp,
- .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF |
+ RPC_TASK_MOVEABLE,
};
struct pnfs_layout_segment *lseg = NULL;
struct nfs4_exception exception = {
@@ -9520,9 +9579,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
dprintk("--> %s\n", __func__);
- /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
- pnfs_get_layout_hdr(NFS_I(inode)->layout);
-
nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
task = rpc_run_task(&task_setup_data);
@@ -9650,6 +9706,7 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
.rpc_message = &msg,
.callback_ops = &nfs4_layoutreturn_call_ops,
.callback_data = lrp,
+ .flags = RPC_TASK_MOVEABLE,
};
int status = 0;
@@ -9804,6 +9861,7 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
.rpc_message = &msg,
.callback_ops = &nfs4_layoutcommit_ops,
.callback_data = data,
+ .flags = RPC_TASK_MOVEABLE,
};
struct rpc_task *task;
int status = 0;
@@ -10131,7 +10189,7 @@ static int nfs41_free_stateid(struct nfs_server *server,
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs41_free_stateid_ops,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
};
struct nfs_free_stateid_data *data;
struct rpc_task *task;
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 2ef75caad6da..7a2567aa2b86 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -625,7 +625,7 @@ TRACE_EVENT(nfs4_state_mgr,
TP_fast_assign(
__entry->state = clp->cl_state;
- __assign_str(hostname, clp->cl_hostname)
+ __assign_str(hostname, clp->cl_hostname);
),
TP_printk(
@@ -1637,7 +1637,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
__entry->fileid = 0;
__entry->dev = 0;
}
- __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
+ __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown");
),
TP_printk(
@@ -1694,7 +1694,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
__entry->fileid = 0;
__entry->dev = 0;
}
- __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
+ __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown");
__entry->stateid_seq =
be32_to_cpu(stateid->seqid);
__entry->stateid_hash =
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index ccef43e02b48..8a224871be74 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -280,8 +280,6 @@ TRACE_DEFINE_ENUM(LOOKUP_OPEN);
TRACE_DEFINE_ENUM(LOOKUP_CREATE);
TRACE_DEFINE_ENUM(LOOKUP_EXCL);
TRACE_DEFINE_ENUM(LOOKUP_RENAME_TARGET);
-TRACE_DEFINE_ENUM(LOOKUP_JUMPED);
-TRACE_DEFINE_ENUM(LOOKUP_ROOT);
TRACE_DEFINE_ENUM(LOOKUP_EMPTY);
TRACE_DEFINE_ENUM(LOOKUP_DOWN);
@@ -297,8 +295,6 @@ TRACE_DEFINE_ENUM(LOOKUP_DOWN);
{ LOOKUP_CREATE, "CREATE" }, \
{ LOOKUP_EXCL, "EXCL" }, \
{ LOOKUP_RENAME_TARGET, "RENAME_TARGET" }, \
- { LOOKUP_JUMPED, "JUMPED" }, \
- { LOOKUP_ROOT, "ROOT" }, \
{ LOOKUP_EMPTY, "EMPTY" }, \
{ LOOKUP_DOWN, "DOWN" })
@@ -1427,8 +1423,8 @@ DECLARE_EVENT_CLASS(nfs_xdr_event,
__entry->version = task->tk_client->cl_vers;
__entry->error = error;
__assign_str(program,
- task->tk_client->cl_program->name)
- __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
+ task->tk_client->cl_program->name);
+ __assign_str(procedure, task->tk_msg.rpc_proc->p_name);
),
TP_printk(
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index cf9cc62ec48e..cc232d1f16f2 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -954,6 +954,7 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
{
struct nfs_pgio_header *hdr;
int ret;
+ unsigned short task_flags = 0;
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
if (!hdr) {
@@ -962,14 +963,17 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
}
nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
ret = nfs_generic_pgio(desc, hdr);
- if (ret == 0)
+ if (ret == 0) {
+ if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion)
+ task_flags = RPC_TASK_MOVEABLE;
ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
hdr,
hdr->cred,
NFS_PROTO(hdr->inode),
desc->pg_rpc_callops,
desc->pg_ioflags,
- RPC_TASK_CRED_NOREF);
+ RPC_TASK_CRED_NOREF | task_flags);
+ }
return ret;
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 2c01ee805306..ef14ea0b6ab8 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -966,10 +966,8 @@ void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
const struct cred *cred, bool update_barrier)
{
- u32 oldseq, newseq, new_barrier = 0;
-
- oldseq = be32_to_cpu(lo->plh_stateid.seqid);
- newseq = be32_to_cpu(new->seqid);
+ u32 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
+ u32 newseq = be32_to_cpu(new->seqid);
if (!pnfs_layout_is_valid(lo)) {
pnfs_set_layout_cred(lo, cred);
@@ -979,19 +977,21 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
return;
}
- if (pnfs_seqid_is_newer(newseq, oldseq)) {
+
+ if (pnfs_seqid_is_newer(newseq, oldseq))
nfs4_stateid_copy(&lo->plh_stateid, new);
- /*
- * Because of wraparound, we want to keep the barrier
- * "close" to the current seqids.
- */
- new_barrier = newseq - atomic_read(&lo->plh_outstanding);
- }
- if (update_barrier)
- new_barrier = be32_to_cpu(new->seqid);
- else if (new_barrier == 0)
+
+ if (update_barrier) {
+ pnfs_barrier_update(lo, newseq);
return;
- pnfs_barrier_update(lo, new_barrier);
+ }
+ /*
+ * Because of wraparound, we want to keep the barrier
+ * "close" to the current seqids. We really only want to
+ * get here from a layoutget call.
+ */
+ if (atomic_read(&lo->plh_outstanding) == 1)
+ pnfs_barrier_update(lo, be32_to_cpu(lo->plh_stateid.seqid));
}
static bool
@@ -1128,8 +1128,7 @@ void pnfs_layoutget_free(struct nfs4_layoutget *lgp)
size_t max_pages = lgp->args.layout.pglen / PAGE_SIZE;
nfs4_free_pages(lgp->args.layout.pages, max_pages);
- if (lgp->args.inode)
- pnfs_put_layout_hdr(NFS_I(lgp->args.inode)->layout);
+ pnfs_put_layout_hdr(lgp->lo);
put_nfs_open_context(lgp->args.ctx);
kfree(lgp);
}
@@ -2014,7 +2013,7 @@ lookup_again:
* If the layout segment list is empty, but there are outstanding
* layoutget calls, then they might be subject to a layoutrecall.
*/
- if (list_empty(&lo->plh_segs) &&
+ if ((list_empty(&lo->plh_segs) || !pnfs_layout_is_valid(lo)) &&
atomic_read(&lo->plh_outstanding) != 0) {
spin_unlock(&ino->i_lock);
lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
@@ -2124,6 +2123,9 @@ lookup_again:
goto out_put_layout_hdr;
}
+ lgp->lo = lo;
+ pnfs_get_layout_hdr(lo);
+
lseg = nfs4_proc_layoutget(lgp, &timeout);
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
@@ -2255,6 +2257,7 @@ static void _lgopen_prepare_attached(struct nfs4_opendata *data,
pnfs_put_layout_hdr(lo);
return;
}
+ lgp->lo = lo;
data->lgp = lgp;
data->o_arg.lg_args = &lgp->args;
data->o_res.lg_res = &lgp->res;
@@ -2263,6 +2266,7 @@ static void _lgopen_prepare_attached(struct nfs4_opendata *data,
static void _lgopen_prepare_floating(struct nfs4_opendata *data,
struct nfs_open_context *ctx)
{
+ struct inode *ino = data->dentry->d_inode;
struct pnfs_layout_range rng = {
.iomode = (data->o_arg.fmode & FMODE_WRITE) ?
IOMODE_RW: IOMODE_READ,
@@ -2271,7 +2275,7 @@ static void _lgopen_prepare_floating(struct nfs4_opendata *data,
};
struct nfs4_layoutget *lgp;
- lgp = pnfs_alloc_init_layoutget_args(NULL, ctx, &current_stateid,
+ lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid,
&rng, GFP_KERNEL);
if (!lgp)
return;
@@ -2291,6 +2295,8 @@ void pnfs_lgopen_prepare(struct nfs4_opendata *data,
/* Could check on max_ops, but currently hardcoded high enough */
if (!nfs_server_capable(data->dir->d_inode, NFS_CAP_LGOPEN))
return;
+ if (data->lgp)
+ return;
if (data->state)
_lgopen_prepare_attached(data, ctx);
else
@@ -2330,13 +2336,13 @@ void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp,
}
return;
}
- if (!lgp->args.inode) {
+ if (!lgp->lo) {
lo = _pnfs_grab_empty_layout(ino, ctx);
if (!lo)
return;
- lgp->args.inode = ino;
+ lgp->lo = lo;
} else
- lo = NFS_I(lgp->args.inode)->layout;
+ lo = lgp->lo;
lseg = pnfs_layout_process(lgp);
if (!IS_ERR(lseg)) {
@@ -2349,11 +2355,9 @@ void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp,
void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
{
if (lgp != NULL) {
- struct inode *inode = lgp->args.inode;
- if (inode) {
- struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
- pnfs_clear_first_layoutget(lo);
- nfs_layoutget_end(lo);
+ if (lgp->lo) {
+ pnfs_clear_first_layoutget(lgp->lo);
+ nfs_layoutget_end(lgp->lo);
}
pnfs_layoutget_free(lgp);
}
@@ -2362,7 +2366,7 @@ void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
struct pnfs_layout_segment *
pnfs_layout_process(struct nfs4_layoutget *lgp)
{
- struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
+ struct pnfs_layout_hdr *lo = lgp->lo;
struct nfs4_layoutget_res *res = &lgp->res;
struct pnfs_layout_segment *lseg;
struct inode *ino = lo->plh_inode;
@@ -2390,11 +2394,13 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
goto out_forget;
}
+ if (!pnfs_layout_is_valid(lo) && !pnfs_is_first_layoutget(lo))
+ goto out_forget;
+
if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
/* existing state ID, make sure the sequence number matches. */
if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
- if (!pnfs_layout_is_valid(lo) &&
- pnfs_is_first_layoutget(lo))
+ if (!pnfs_layout_is_valid(lo))
lo->plh_barrier = 0;
dprintk("%s forget reply due to sequence\n", __func__);
goto out_forget;
@@ -2413,8 +2419,6 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
goto out_forget;
} else {
/* We have a completely new layout */
- if (!pnfs_is_first_layoutget(lo))
- goto out_forget;
pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
}
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 49d3389bd813..cf19914fec81 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -805,19 +805,16 @@ out:
}
EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add);
-static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
+static int nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
{
might_sleep();
- wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING,
- TASK_KILLABLE);
+ return wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING, TASK_KILLABLE);
}
static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
{
smp_mb__before_atomic();
- clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
- smp_mb__after_atomic();
- wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
+ clear_and_wake_up_bit(NFS4DS_CONNECTING, &ds->ds_state);
}
static struct nfs_client *(*get_v3_ds_connect)(
@@ -858,7 +855,7 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
if (!load_v3_ds_connect())
- goto out;
+ return -EPROTONOSUPPORT;
list_for_each_entry(da, &ds->ds_addrs, da_node) {
dprintk("%s: DS %s: trying address %s\n",
@@ -993,30 +990,33 @@ int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
{
int err;
-again:
- err = 0;
- if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
- if (version == 3) {
- err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
- retrans);
- } else if (version == 4) {
- err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo,
- retrans, minor_version);
- } else {
- dprintk("%s: unsupported DS version %d\n", __func__,
- version);
- err = -EPROTONOSUPPORT;
- }
+ do {
+ err = nfs4_wait_ds_connect(ds);
+ if (err || ds->ds_clp)
+ goto out;
+ if (nfs4_test_deviceid_unavailable(devid))
+ return -ENODEV;
+ } while (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) != 0);
- nfs4_clear_ds_conn_bit(ds);
- } else {
- nfs4_wait_ds_connect(ds);
+ if (ds->ds_clp)
+ goto connect_done;
- /* what was waited on didn't connect AND didn't mark unavail */
- if (!ds->ds_clp && !nfs4_test_deviceid_unavailable(devid))
- goto again;
+ switch (version) {
+ case 3:
+ err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, retrans);
+ break;
+ case 4:
+ err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo, retrans,
+ minor_version);
+ break;
+ default:
+ dprintk("%s: unsupported DS version %d\n", __func__, version);
+ err = -EPROTONOSUPPORT;
}
+connect_done:
+ nfs4_clear_ds_conn_bit(ds);
+out:
/*
* At this point the ds->ds_clp should be ready, but it might have
* hit an error.
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index d2b6dce1f99f..9f39e0a1a38b 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -74,8 +74,7 @@ void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
}
EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
-static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio,
- struct inode *inode)
+static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
{
struct nfs_pgio_mirror *pgm;
unsigned long npages;
@@ -86,9 +85,9 @@ static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio,
WARN_ON_ONCE(pgio->pg_mirror_count != 1);
pgm = &pgio->pg_mirrors[0];
- NFS_I(inode)->read_io += pgm->pg_bytes_written;
+ NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
- nfs_add_stats(inode, NFSIOS_READPAGES, npages);
+ nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
}
@@ -363,22 +362,23 @@ int nfs_readpage(struct file *file, struct page *page)
} else
desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
+ xchg(&desc.ctx->error, 0);
if (!IS_SYNC(inode)) {
ret = nfs_readpage_from_fscache(desc.ctx, inode, page);
if (ret == 0)
- goto out;
+ goto out_wait;
}
- xchg(&desc.ctx->error, 0);
nfs_pageio_init_read(&desc.pgio, inode, false,
&nfs_async_read_completion_ops);
ret = readpage_async_filler(&desc, page);
+ if (ret)
+ goto out;
- if (!ret)
- nfs_pageio_complete_read(&desc.pgio, inode);
-
+ nfs_pageio_complete_read(&desc.pgio);
ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
+out_wait:
if (!ret) {
ret = wait_on_page_locked_killable(page);
if (!PageUptodate(page) && !ret)
@@ -430,7 +430,7 @@ int nfs_readpages(struct file *file, struct address_space *mapping,
ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
- nfs_pageio_complete_read(&desc.pgio, inode);
+ nfs_pageio_complete_read(&desc.pgio);
read_complete:
put_nfs_open_context(desc.ctx);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3bf82178166a..eae9bf114041 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1810,6 +1810,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
struct nfs_commit_info *cinfo)
{
struct nfs_commit_data *data;
+ unsigned short task_flags = 0;
/* another commit raced with us */
if (list_empty(head))
@@ -1820,8 +1821,11 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
/* Set up the argument struct */
nfs_init_commit(data, head, NULL, cinfo);
atomic_inc(&cinfo->mds->rpcs_out);
+ if (NFS_SERVER(inode)->nfs_client->cl_minorversion)
+ task_flags = RPC_TASK_MOVEABLE;
return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
- data->mds_ops, how, RPC_TASK_CRED_NOREF);
+ data->mds_ops, how,
+ RPC_TASK_CRED_NOREF | task_flags);
}
/*
diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
index 26f2a50eceac..edec45831585 100644
--- a/fs/nfs_common/grace.c
+++ b/fs/nfs_common/grace.c
@@ -82,6 +82,7 @@ __state_in_grace(struct net *net, bool open)
/**
* locks_in_grace
+ * @net: network namespace
*
* Lock managers call this function to determine when it is OK for them
* to answer ordinary lock requests, and when they should accept only
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 1058659a8d31..c99dee99a3c1 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -236,7 +236,7 @@ again:
if (!buf)
return -ENOMEM;
- rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
+ rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
if (IS_ERR(rq)) {
error = -ENOMEM;
goto out_free_buf;
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index a75abeb1e698..935c1028c217 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -176,6 +176,12 @@ struct nfsd_net {
unsigned int longest_chain_cachesize;
struct shrinker nfsd_reply_cache_shrinker;
+
+ /* tracking server-to-server copy mounts */
+ spinlock_t nfsd_ssc_lock;
+ struct list_head nfsd_ssc_mount_list;
+ wait_queue_head_t nfsd_ssc_waitq;
+
/* utsname taken from the process that starts the server */
char nfsd_name[UNX_MAXNODENAME+1];
};
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index a1591feeea22..5dfe7644a517 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -172,7 +172,7 @@ static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p)
struct nfsd3_getaclres *resp = rqstp->rq_resp;
struct dentry *dentry = resp->fh.fh_dentry;
struct kvec *head = rqstp->rq_res.head;
- struct inode *inode = d_inode(dentry);
+ struct inode *inode;
unsigned int base;
int n;
int w;
@@ -181,6 +181,7 @@ static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p)
return 0;
switch (resp->status) {
case nfs_ok:
+ inode = d_inode(dentry);
if (!svcxdr_encode_post_op_attr(rqstp, xdr, &resp->fh))
return 0;
if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 7325592b456e..0f8b10f363e7 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -915,10 +915,8 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
args.authflavor = clp->cl_cred.cr_flavor;
clp->cl_cb_ident = conn->cb_ident;
} else {
- if (!conn->cb_xprt) {
- trace_nfsd_cb_setup_err(clp, -EINVAL);
+ if (!conn->cb_xprt)
return -EINVAL;
- }
clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
clp->cl_cb_session = ses;
args.bc_xprt = conn->cb_xprt;
@@ -941,37 +939,43 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
}
clp->cl_cb_client = client;
clp->cl_cb_cred = cred;
- trace_nfsd_cb_setup(clp);
+ rcu_read_lock();
+ trace_nfsd_cb_setup(clp, rpc_peeraddr2str(client, RPC_DISPLAY_NETID),
+ args.authflavor);
+ rcu_read_unlock();
return 0;
}
+static void nfsd4_mark_cb_state(struct nfs4_client *clp, int newstate)
+{
+ if (clp->cl_cb_state != newstate) {
+ clp->cl_cb_state = newstate;
+ trace_nfsd_cb_state(clp);
+ }
+}
+
static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
{
if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
return;
- clp->cl_cb_state = NFSD4_CB_DOWN;
- trace_nfsd_cb_state(clp);
+ nfsd4_mark_cb_state(clp, NFSD4_CB_DOWN);
}
static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
{
if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
return;
- clp->cl_cb_state = NFSD4_CB_FAULT;
- trace_nfsd_cb_state(clp);
+ nfsd4_mark_cb_state(clp, NFSD4_CB_FAULT);
}
static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
{
struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
- trace_nfsd_cb_done(clp, task->tk_status);
if (task->tk_status)
nfsd4_mark_cb_down(clp, task->tk_status);
- else {
- clp->cl_cb_state = NFSD4_CB_UP;
- trace_nfsd_cb_state(clp);
- }
+ else
+ nfsd4_mark_cb_state(clp, NFSD4_CB_UP);
}
static void nfsd4_cb_probe_release(void *calldata)
@@ -995,8 +999,8 @@ static const struct rpc_call_ops nfsd4_cb_probe_ops = {
*/
void nfsd4_probe_callback(struct nfs4_client *clp)
{
- clp->cl_cb_state = NFSD4_CB_UNKNOWN;
- trace_nfsd_cb_state(clp);
+ trace_nfsd_cb_probe(clp);
+ nfsd4_mark_cb_state(clp, NFSD4_CB_UNKNOWN);
set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
nfsd4_run_cb(&clp->cl_cb_null);
}
@@ -1009,11 +1013,10 @@ void nfsd4_probe_callback_sync(struct nfs4_client *clp)
void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
{
- clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+ nfsd4_mark_cb_state(clp, NFSD4_CB_UNKNOWN);
spin_lock(&clp->cl_lock);
memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
spin_unlock(&clp->cl_lock);
- trace_nfsd_cb_state(clp);
}
/*
@@ -1170,8 +1173,6 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
struct nfsd4_callback *cb = calldata;
struct nfs4_client *clp = cb->cb_clp;
- trace_nfsd_cb_done(clp, task->tk_status);
-
if (!nfsd4_cb_sequence_done(task, cb))
return;
@@ -1231,6 +1232,9 @@ void nfsd4_destroy_callback_queue(void)
/* must be called under the state lock */
void nfsd4_shutdown_callback(struct nfs4_client *clp)
{
+ if (clp->cl_cb_state != NFSD4_CB_UNKNOWN)
+ trace_nfsd_cb_shutdown(clp);
+
set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags);
/*
* Note this won't actually result in a null callback;
@@ -1276,7 +1280,6 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
* kill the old client:
*/
if (clp->cl_cb_client) {
- trace_nfsd_cb_shutdown(clp);
rpc_shutdown_client(clp->cl_cb_client);
clp->cl_cb_client = NULL;
put_cred(clp->cl_cb_cred);
@@ -1322,8 +1325,6 @@ nfsd4_run_cb_work(struct work_struct *work)
struct rpc_clnt *clnt;
int flags;
- trace_nfsd_cb_work(clp, cb->cb_msg.rpc_proc->p_name);
-
if (cb->cb_need_restart) {
cb->cb_need_restart = false;
} else {
@@ -1345,7 +1346,7 @@ nfsd4_run_cb_work(struct work_struct *work)
* Don't send probe messages for 4.1 or later.
*/
if (!cb->cb_ops && clp->cl_minorversion) {
- clp->cl_cb_state = NFSD4_CB_UP;
+ nfsd4_mark_cb_state(clp, NFSD4_CB_UP);
nfsd41_destroy_cb(cb);
return;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index f4ce93d7f26e..486c5dba4b65 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -55,6 +55,13 @@ module_param(inter_copy_offload_enable, bool, 0644);
MODULE_PARM_DESC(inter_copy_offload_enable,
"Enable inter server to server copy offload. Default: false");
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+static int nfsd4_ssc_umount_timeout = 900000; /* default to 15 mins */
+module_param(nfsd4_ssc_umount_timeout, int, 0644);
+MODULE_PARM_DESC(nfsd4_ssc_umount_timeout,
+ "idle msecs before unmount export from source server");
+#endif
+
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
#include <linux/security.h>
@@ -1166,6 +1173,81 @@ extern void nfs_sb_deactive(struct super_block *sb);
#define NFSD42_INTERSSC_MOUNTOPS "vers=4.2,addr=%s,sec=sys"
/*
+ * setup a work entry in the ssc delayed unmount list.
+ */
+static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
+ struct nfsd4_ssc_umount_item **retwork, struct vfsmount **ss_mnt)
+{
+ struct nfsd4_ssc_umount_item *ni = 0;
+ struct nfsd4_ssc_umount_item *work = NULL;
+ struct nfsd4_ssc_umount_item *tmp;
+ DEFINE_WAIT(wait);
+
+ *ss_mnt = NULL;
+ *retwork = NULL;
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+try_again:
+ spin_lock(&nn->nfsd_ssc_lock);
+ list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
+ if (strncmp(ni->nsui_ipaddr, ipaddr, sizeof(ni->nsui_ipaddr)))
+ continue;
+ /* found a match */
+ if (ni->nsui_busy) {
+ /* wait - and try again */
+ prepare_to_wait(&nn->nfsd_ssc_waitq, &wait,
+ TASK_INTERRUPTIBLE);
+ spin_unlock(&nn->nfsd_ssc_lock);
+
+ /* allow 20secs for mount/unmount for now - revisit */
+ if (signal_pending(current) ||
+ (schedule_timeout(20*HZ) == 0)) {
+ kfree(work);
+ return nfserr_eagain;
+ }
+ finish_wait(&nn->nfsd_ssc_waitq, &wait);
+ goto try_again;
+ }
+ *ss_mnt = ni->nsui_vfsmount;
+ refcount_inc(&ni->nsui_refcnt);
+ spin_unlock(&nn->nfsd_ssc_lock);
+ kfree(work);
+
+ /* return vfsmount in ss_mnt */
+ return 0;
+ }
+ if (work) {
+ strncpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr));
+ refcount_set(&work->nsui_refcnt, 2);
+ work->nsui_busy = true;
+ list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
+ *retwork = work;
+ }
+ spin_unlock(&nn->nfsd_ssc_lock);
+ return 0;
+}
+
+static void nfsd4_ssc_update_dul_work(struct nfsd_net *nn,
+ struct nfsd4_ssc_umount_item *work, struct vfsmount *ss_mnt)
+{
+ /* set nsui_vfsmount, clear busy flag and wakeup waiters */
+ spin_lock(&nn->nfsd_ssc_lock);
+ work->nsui_vfsmount = ss_mnt;
+ work->nsui_busy = false;
+ wake_up_all(&nn->nfsd_ssc_waitq);
+ spin_unlock(&nn->nfsd_ssc_lock);
+}
+
+static void nfsd4_ssc_cancel_dul_work(struct nfsd_net *nn,
+ struct nfsd4_ssc_umount_item *work)
+{
+ spin_lock(&nn->nfsd_ssc_lock);
+ list_del(&work->nsui_list);
+ wake_up_all(&nn->nfsd_ssc_waitq);
+ spin_unlock(&nn->nfsd_ssc_lock);
+ kfree(work);
+}
+
+/*
* Support one copy source server for now.
*/
static __be32
@@ -1181,6 +1263,8 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
char *ipaddr, *dev_name, *raw_data;
int len, raw_len;
__be32 status = nfserr_inval;
+ struct nfsd4_ssc_umount_item *work = NULL;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
naddr = &nss->u.nl4_addr;
tmp_addrlen = rpc_uaddr2sockaddr(SVC_NET(rqstp), naddr->addr,
@@ -1229,12 +1313,24 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
goto out_free_rawdata;
snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep);
+ status = nfsd4_ssc_setup_dul(nn, ipaddr, &work, &ss_mnt);
+ if (status)
+ goto out_free_devname;
+ if (ss_mnt)
+ goto out_done;
+
/* Use an 'internal' mount: SB_KERNMOUNT -> MNT_INTERNAL */
ss_mnt = vfs_kern_mount(type, SB_KERNMOUNT, dev_name, raw_data);
module_put(type->owner);
- if (IS_ERR(ss_mnt))
+ if (IS_ERR(ss_mnt)) {
+ status = nfserr_nodev;
+ if (work)
+ nfsd4_ssc_cancel_dul_work(nn, work);
goto out_free_devname;
-
+ }
+ if (work)
+ nfsd4_ssc_update_dul_work(nn, work, ss_mnt);
+out_done:
status = 0;
*mount = ss_mnt;
@@ -1301,10 +1397,42 @@ static void
nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
struct nfsd_file *dst)
{
+ bool found = false;
+ long timeout;
+ struct nfsd4_ssc_umount_item *tmp;
+ struct nfsd4_ssc_umount_item *ni = NULL;
+ struct nfsd_net *nn = net_generic(dst->nf_net, nfsd_net_id);
+
nfs42_ssc_close(src->nf_file);
- fput(src->nf_file);
nfsd_file_put(dst);
- mntput(ss_mnt);
+ fput(src->nf_file);
+
+ if (!nn) {
+ mntput(ss_mnt);
+ return;
+ }
+ spin_lock(&nn->nfsd_ssc_lock);
+ timeout = msecs_to_jiffies(nfsd4_ssc_umount_timeout);
+ list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
+ if (ni->nsui_vfsmount->mnt_sb == ss_mnt->mnt_sb) {
+ list_del(&ni->nsui_list);
+ /*
+ * vfsmount can be shared by multiple exports,
+ * decrement refcnt. If the count drops to 1 it
+ * will be unmounted when nsui_expire expires.
+ */
+ refcount_dec(&ni->nsui_refcnt);
+ ni->nsui_expire = jiffies + timeout;
+ list_add_tail(&ni->nsui_list, &nn->nfsd_ssc_mount_list);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&nn->nfsd_ssc_lock);
+ if (!found) {
+ mntput(ss_mnt);
+ return;
+ }
}
#else /* CONFIG_NFSD_V4_2_INTER_SSC */
@@ -1375,7 +1503,8 @@ static const struct nfsd4_callback_ops nfsd4_cb_offload_ops = {
static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
{
- copy->cp_res.wr_stable_how = NFS_UNSTABLE;
+ copy->cp_res.wr_stable_how =
+ copy->committed ? NFS_FILE_SYNC : NFS_UNSTABLE;
copy->cp_synchronous = sync;
gen_boot_verifier(&copy->cp_res.wr_verifier, copy->cp_clp->net);
}
@@ -1386,6 +1515,7 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
u64 bytes_total = copy->cp_count;
u64 src_pos = copy->cp_src_pos;
u64 dst_pos = copy->cp_dst_pos;
+ __be32 status;
/* See RFC 7862 p.67: */
if (bytes_total == 0)
@@ -1403,6 +1533,16 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
src_pos += bytes_copied;
dst_pos += bytes_copied;
} while (bytes_total > 0 && !copy->cp_synchronous);
+ /* for a non-zero asynchronous copy do a commit of data */
+ if (!copy->cp_synchronous && copy->cp_res.wr_bytes_written > 0) {
+ down_write(&copy->nf_dst->nf_rwsem);
+ status = vfs_fsync_range(copy->nf_dst->nf_file,
+ copy->cp_dst_pos,
+ copy->cp_res.wr_bytes_written, 0);
+ up_write(&copy->nf_dst->nf_rwsem);
+ if (!status)
+ copy->committed = true;
+ }
return bytes_copied;
}
@@ -1497,6 +1637,8 @@ do_callback:
memcpy(&cb_copy->fh, &copy->fh, sizeof(copy->fh));
nfsd4_init_cb(&cb_copy->cp_cb, cb_copy->cp_clp,
&nfsd4_cb_offload_ops, NFSPROC4_CLNT_CB_OFFLOAD);
+ trace_nfsd_cb_offload(copy->cp_clp, &copy->cp_res.cb_stateid,
+ &copy->fh, copy->cp_count, copy->nfserr);
nfsd4_run_cb(&cb_copy->cp_cb);
out:
if (!copy->cp_intra)
@@ -3232,7 +3374,7 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfsd4_compoundargs *argp = rqstp->rq_argp;
- struct nfsd4_op *this = &argp->ops[resp->opcnt - 1];
+ struct nfsd4_op *this;
struct nfsd4_compound_state *cstate = &resp->cstate;
struct nfs4_op_map *allow = &cstate->clp->cl_spo_must_allow;
u32 opiter;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index b517a8794400..fa67ecd5fe63 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -44,6 +44,7 @@
#include <linux/jhash.h>
#include <linux/string_helpers.h>
#include <linux/fsnotify.h>
+#include <linux/nfs_ssc.h>
#include "xdr4.h"
#include "xdr4cb.h"
#include "vfs.h"
@@ -1745,6 +1746,8 @@ static void nfsd4_conn_lost(struct svc_xpt_user *u)
struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
struct nfs4_client *clp = c->cn_session->se_client;
+ trace_nfsd_cb_lost(clp);
+
spin_lock(&clp->cl_lock);
if (!list_empty(&c->cn_persession)) {
list_del(&c->cn_persession);
@@ -2351,10 +2354,25 @@ static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
static void seq_quote_mem(struct seq_file *m, char *data, int len)
{
seq_printf(m, "\"");
- seq_escape_mem_ascii(m, data, len);
+ seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
seq_printf(m, "\"");
}
+static const char *cb_state2str(int state)
+{
+ switch (state) {
+ case NFSD4_CB_UP:
+ return "UP";
+ case NFSD4_CB_UNKNOWN:
+ return "UNKNOWN";
+ case NFSD4_CB_DOWN:
+ return "DOWN";
+ case NFSD4_CB_FAULT:
+ return "FAULT";
+ }
+ return "UNDEFINED";
+}
+
static int client_info_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
@@ -2383,6 +2401,8 @@ static int client_info_show(struct seq_file *m, void *v)
seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
}
+ seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
+ seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
drop_client(clp);
return 0;
@@ -2665,6 +2685,8 @@ static void force_expire_client(struct nfs4_client *clp)
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
bool already_expired;
+ trace_nfsd_clid_admin_expired(&clp->cl_clientid);
+
spin_lock(&clp->cl_lock);
clp->cl_time = 0;
spin_unlock(&clp->cl_lock);
@@ -2816,14 +2838,11 @@ move_to_confirmed(struct nfs4_client *clp)
lockdep_assert_held(&nn->client_lock);
- dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
add_clp_to_name_tree(clp, &nn->conf_name_tree);
- if (!test_and_set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags) &&
- clp->cl_nfsd_dentry &&
- clp->cl_nfsd_info_dentry)
- fsnotify_dentry(clp->cl_nfsd_info_dentry, FS_MODIFY);
+ set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
+ trace_nfsd_clid_confirmed(&clp->cl_clientid);
renew_client_locked(clp);
}
@@ -3176,20 +3195,24 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
}
/* case 6 */
exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
+ trace_nfsd_clid_confirmed_r(conf);
goto out_copy;
}
if (!creds_match) { /* case 3 */
if (client_has_state(conf)) {
status = nfserr_clid_inuse;
+ trace_nfsd_clid_cred_mismatch(conf, rqstp);
goto out;
}
goto out_new;
}
if (verfs_match) { /* case 2 */
conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
+ trace_nfsd_clid_confirmed_r(conf);
goto out_copy;
}
/* case 5, client reboot */
+ trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
conf = NULL;
goto out_new;
}
@@ -3199,16 +3222,19 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
}
- unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
+ unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
if (unconf) /* case 4, possible retry or client restart */
unhash_client_locked(unconf);
- /* case 1 (normal case) */
+ /* case 1, new owner ID */
+ trace_nfsd_clid_fresh(new);
+
out_new:
if (conf) {
status = mark_client_expired_locked(conf);
if (status)
goto out;
+ trace_nfsd_clid_replaced(&conf->cl_clientid);
}
new->cl_minorversion = cstate->minorversion;
new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
@@ -3232,8 +3258,10 @@ out:
out_nolock:
if (new)
expire_client(new);
- if (unconf)
+ if (unconf) {
+ trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
expire_client(unconf);
+ }
return status;
}
@@ -3425,9 +3453,10 @@ nfsd4_create_session(struct svc_rqst *rqstp,
goto out_free_conn;
}
} else if (unconf) {
+ status = nfserr_clid_inuse;
if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
!rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
- status = nfserr_clid_inuse;
+ trace_nfsd_clid_cred_mismatch(unconf, rqstp);
goto out_free_conn;
}
status = nfserr_wrong_cred;
@@ -3447,6 +3476,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
old = NULL;
goto out_free_conn;
}
+ trace_nfsd_clid_replaced(&old->cl_clientid);
}
move_to_confirmed(unconf);
conf = unconf;
@@ -3471,6 +3501,8 @@ nfsd4_create_session(struct svc_rqst *rqstp,
/* cache solo and embedded create sessions under the client_lock */
nfsd4_cache_create_session(cr_ses, cs_slot, status);
spin_unlock(&nn->client_lock);
+ if (conf == unconf)
+ fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
/* init connection and backchannel */
nfsd4_init_conn(rqstp, conn, new);
nfsd4_put_session(new);
@@ -3904,6 +3936,7 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp,
status = nfserr_wrong_cred;
goto out;
}
+ trace_nfsd_clid_destroyed(&clp->cl_clientid);
unhash_client_locked(clp);
out:
spin_unlock(&nn->client_lock);
@@ -3946,6 +3979,7 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp,
goto out;
status = nfs_ok;
+ trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
nfsd4_client_record_create(clp);
inc_reclaim_complete(clp);
out:
@@ -3967,27 +4001,29 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
new = create_client(clname, rqstp, &clverifier);
if (new == NULL)
return nfserr_jukebox;
- /* Cases below refer to rfc 3530 section 14.2.33: */
spin_lock(&nn->client_lock);
conf = find_confirmed_client_by_name(&clname, nn);
if (conf && client_has_state(conf)) {
- /* case 0: */
status = nfserr_clid_inuse;
if (clp_used_exchangeid(conf))
goto out;
if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
- trace_nfsd_clid_inuse_err(conf);
+ trace_nfsd_clid_cred_mismatch(conf, rqstp);
goto out;
}
}
unconf = find_unconfirmed_client_by_name(&clname, nn);
if (unconf)
unhash_client_locked(unconf);
- /* We need to handle only case 1: probable callback update */
- if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
- copy_clid(new, conf);
- gen_confirm(new, nn);
- }
+ if (conf) {
+ if (same_verf(&conf->cl_verifier, &clverifier)) {
+ copy_clid(new, conf);
+ gen_confirm(new, nn);
+ } else
+ trace_nfsd_clid_verf_mismatch(conf, rqstp,
+ &clverifier);
+ } else
+ trace_nfsd_clid_fresh(new);
new->cl_minorversion = 0;
gen_callback(new, setclid, rqstp);
add_to_unconfirmed(new);
@@ -4000,12 +4036,13 @@ out:
spin_unlock(&nn->client_lock);
if (new)
free_client(new);
- if (unconf)
+ if (unconf) {
+ trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
expire_client(unconf);
+ }
return status;
}
-
__be32
nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
@@ -4034,25 +4071,27 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
* Nevertheless, RFC 7530 recommends INUSE for this case:
*/
status = nfserr_clid_inuse;
- if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
+ if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
+ trace_nfsd_clid_cred_mismatch(unconf, rqstp);
goto out;
- if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
+ }
+ if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
+ trace_nfsd_clid_cred_mismatch(conf, rqstp);
goto out;
- /* cases below refer to rfc 3530 section 14.2.34: */
+ }
if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
if (conf && same_verf(&confirm, &conf->cl_confirm)) {
- /* case 2: probable retransmit */
status = nfs_ok;
- } else /* case 4: client hasn't noticed we rebooted yet? */
+ } else
status = nfserr_stale_clientid;
goto out;
}
status = nfs_ok;
- if (conf) { /* case 1: callback update */
+ if (conf) {
old = unconf;
unhash_client_locked(old);
nfsd4_change_callback(conf, &unconf->cl_cb_conn);
- } else { /* case 3: normal case; new or rebooted client */
+ } else {
old = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (old) {
status = nfserr_clid_inuse;
@@ -4065,12 +4104,15 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
old = NULL;
goto out;
}
+ trace_nfsd_clid_replaced(&old->cl_clientid);
}
move_to_confirmed(unconf);
conf = unconf;
}
get_client_locked(conf);
spin_unlock(&nn->client_lock);
+ if (conf == unconf)
+ fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
nfsd4_probe_callback(conf);
spin_lock(&nn->client_lock);
put_client_renew_locked(conf);
@@ -4618,7 +4660,7 @@ nfsd_break_deleg_cb(struct file_lock *fl)
struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
struct nfs4_file *fp = dp->dl_stid.sc_file;
- trace_nfsd_deleg_break(&dp->dl_stid.sc_stateid);
+ trace_nfsd_cb_recall(&dp->dl_stid);
/*
* We don't want the locks code to timeout the lease for us;
@@ -5457,6 +5499,69 @@ static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
return false;
}
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
+{
+ spin_lock_init(&nn->nfsd_ssc_lock);
+ INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
+ init_waitqueue_head(&nn->nfsd_ssc_waitq);
+}
+EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
+
+/*
+ * This is called when nfsd is being shutdown, after all inter_ssc
+ * cleanup were done, to destroy the ssc delayed unmount list.
+ */
+static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
+{
+ struct nfsd4_ssc_umount_item *ni = NULL;
+ struct nfsd4_ssc_umount_item *tmp;
+
+ spin_lock(&nn->nfsd_ssc_lock);
+ list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
+ list_del(&ni->nsui_list);
+ spin_unlock(&nn->nfsd_ssc_lock);
+ mntput(ni->nsui_vfsmount);
+ kfree(ni);
+ spin_lock(&nn->nfsd_ssc_lock);
+ }
+ spin_unlock(&nn->nfsd_ssc_lock);
+}
+
+static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
+{
+ bool do_wakeup = false;
+ struct nfsd4_ssc_umount_item *ni = 0;
+ struct nfsd4_ssc_umount_item *tmp;
+
+ spin_lock(&nn->nfsd_ssc_lock);
+ list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
+ if (time_after(jiffies, ni->nsui_expire)) {
+ if (refcount_read(&ni->nsui_refcnt) > 1)
+ continue;
+
+ /* mark being unmount */
+ ni->nsui_busy = true;
+ spin_unlock(&nn->nfsd_ssc_lock);
+ mntput(ni->nsui_vfsmount);
+ spin_lock(&nn->nfsd_ssc_lock);
+
+ /* waiters need to start from begin of list */
+ list_del(&ni->nsui_list);
+ kfree(ni);
+
+ /* wakeup ssc_connect waiters */
+ do_wakeup = true;
+ continue;
+ }
+ break;
+ }
+ if (do_wakeup)
+ wake_up_all(&nn->nfsd_ssc_waitq);
+ spin_unlock(&nn->nfsd_ssc_lock);
+}
+#endif
+
static time64_t
nfs4_laundromat(struct nfsd_net *nn)
{
@@ -5495,10 +5600,8 @@ nfs4_laundromat(struct nfsd_net *nn)
clp = list_entry(pos, struct nfs4_client, cl_lru);
if (!state_expired(&lt, clp->cl_time))
break;
- if (mark_client_expired_locked(clp)) {
- trace_nfsd_clid_expired(&clp->cl_clientid);
+ if (mark_client_expired_locked(clp))
continue;
- }
list_add(&clp->cl_lru, &reaplist);
}
spin_unlock(&nn->client_lock);
@@ -5568,6 +5671,10 @@ nfs4_laundromat(struct nfsd_net *nn)
list_del_init(&nbl->nbl_lru);
free_blocked_lock(nbl);
}
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+ /* service the server-to-server copy delayed unmount list */
+ nfsd4_ssc_expire_umount(nn);
+#endif
out:
return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
}
@@ -6430,8 +6537,10 @@ nfsd4_lm_notify(struct file_lock *fl)
}
spin_unlock(&nn->blocked_locks_lock);
- if (queue)
+ if (queue) {
+ trace_nfsd_cb_notify_lock(lo, nbl);
nfsd4_run_cb(&nbl->nbl_cb);
+ }
}
static const struct lock_manager_operations nfsd_posix_mng_ops = {
@@ -7229,7 +7338,6 @@ nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
unsigned int strhashval;
struct nfs4_client_reclaim *crp;
- trace_nfsd_clid_reclaim(nn, name.len, name.data);
crp = alloc_reclaim();
if (crp) {
strhashval = clientstr_hashval(name);
@@ -7279,8 +7387,6 @@ nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
unsigned int strhashval;
struct nfs4_client_reclaim *crp = NULL;
- trace_nfsd_clid_find(nn, name.len, name.data);
-
strhashval = clientstr_hashval(name);
list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
if (compare_blob(&crp->cr_name, &name) == 0) {
@@ -7486,6 +7592,9 @@ nfs4_state_shutdown_net(struct net *net)
nfsd4_client_tracking_exit(net);
nfs4_state_destroy_net(net);
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+ nfsd4_ssc_shutdown_umount(nn);
+#endif
}
void
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 14dbfa75059d..9664303afdaf 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -484,6 +484,10 @@ static inline bool nfsd_attrs_supported(u32 minorversion, const u32 *bmval)
extern int nfsd4_is_junction(struct dentry *dentry);
extern int register_cld_notifier(void);
extern void unregister_cld_notifier(void);
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+extern void nfsd4_ssc_init_umount_work(struct nfsd_net *nn);
+#endif
+
#else /* CONFIG_NFSD_V4 */
static inline int nfsd4_is_junction(struct dentry *dentry)
{
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index aff2cda5c6c3..6106697adc04 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -225,15 +225,12 @@ static inline bool fh_fsid_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
* returns a crc32 hash for the filehandle that is compatible with
* the one displayed by "wireshark".
*/
-
-static inline u32
-knfsd_fh_hash(struct knfsd_fh *fh)
+static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh)
{
return ~crc32_le(0xFFFFFFFF, (unsigned char *)&fh->fh_base, fh->fh_size);
}
#else
-static inline u32
-knfsd_fh_hash(struct knfsd_fh *fh)
+static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh)
{
return 0;
}
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index dd5d69921676..ccb59e91011b 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -403,6 +403,9 @@ static int nfsd_startup_net(struct net *net, const struct cred *cred)
if (ret)
goto out_filecache;
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+ nfsd4_ssc_init_umount_work(nn);
+#endif
nn->nfsd_net_up = true;
return 0;
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 27a93ebd1d80..adaec43548d1 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -408,7 +408,6 @@ TRACE_EVENT(nfsd_dirent,
__entry->ino = ino;
__entry->len = namlen;
memcpy(__get_str(name), name, namlen);
- __assign_str(name, name);
),
TP_printk("fh_hash=0x%08x ino=%llu name=%.*s",
__entry->fh_hash, __entry->ino,
@@ -459,7 +458,6 @@ DEFINE_STATEID_EVENT(layout_recall_release);
DEFINE_STATEID_EVENT(open);
DEFINE_STATEID_EVENT(deleg_read);
-DEFINE_STATEID_EVENT(deleg_break);
DEFINE_STATEID_EVENT(deleg_recall);
DECLARE_EVENT_CLASS(nfsd_stateseqid_class,
@@ -511,7 +509,12 @@ DEFINE_EVENT(nfsd_clientid_class, nfsd_clid_##name, \
TP_PROTO(const clientid_t *clid), \
TP_ARGS(clid))
-DEFINE_CLIENTID_EVENT(expired);
+DEFINE_CLIENTID_EVENT(expire_unconf);
+DEFINE_CLIENTID_EVENT(reclaim_complete);
+DEFINE_CLIENTID_EVENT(confirmed);
+DEFINE_CLIENTID_EVENT(destroyed);
+DEFINE_CLIENTID_EVENT(admin_expired);
+DEFINE_CLIENTID_EVENT(replaced);
DEFINE_CLIENTID_EVENT(purged);
DEFINE_CLIENTID_EVENT(renew);
DEFINE_CLIENTID_EVENT(stale);
@@ -536,58 +539,102 @@ DEFINE_EVENT(nfsd_net_class, nfsd_##name, \
DEFINE_NET_EVENT(grace_start);
DEFINE_NET_EVENT(grace_complete);
-DECLARE_EVENT_CLASS(nfsd_clid_class,
- TP_PROTO(const struct nfsd_net *nn,
- unsigned int namelen,
- const unsigned char *namedata),
- TP_ARGS(nn, namelen, namedata),
+TRACE_EVENT(nfsd_clid_cred_mismatch,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ const struct svc_rqst *rqstp
+ ),
+ TP_ARGS(clp, rqstp),
TP_STRUCT__entry(
- __field(unsigned long long, boot_time)
- __field(unsigned int, namelen)
- __dynamic_array(unsigned char, name, namelen)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(unsigned long, cl_flavor)
+ __field(unsigned long, new_flavor)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
- __entry->boot_time = nn->boot_time;
- __entry->namelen = namelen;
- memcpy(__get_dynamic_array(name), namedata, namelen);
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __entry->cl_flavor = clp->cl_cred.cr_flavor;
+ __entry->new_flavor = rqstp->rq_cred.cr_flavor;
+ memcpy(__entry->addr, &rqstp->rq_xprt->xpt_remote,
+ sizeof(struct sockaddr_in6));
),
- TP_printk("boot_time=%16llx nfs4_clientid=%.*s",
- __entry->boot_time, __entry->namelen, __get_str(name))
+ TP_printk("client %08x:%08x flavor=%s, conflict=%s from addr=%pISpc",
+ __entry->cl_boot, __entry->cl_id,
+ show_nfsd_authflavor(__entry->cl_flavor),
+ show_nfsd_authflavor(__entry->new_flavor), __entry->addr
+ )
)
-#define DEFINE_CLID_EVENT(name) \
-DEFINE_EVENT(nfsd_clid_class, nfsd_clid_##name, \
- TP_PROTO(const struct nfsd_net *nn, \
- unsigned int namelen, \
- const unsigned char *namedata), \
- TP_ARGS(nn, namelen, namedata))
-
-DEFINE_CLID_EVENT(find);
-DEFINE_CLID_EVENT(reclaim);
+TRACE_EVENT(nfsd_clid_verf_mismatch,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ const struct svc_rqst *rqstp,
+ const nfs4_verifier *verf
+ ),
+ TP_ARGS(clp, rqstp, verf),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __array(unsigned char, cl_verifier, NFS4_VERIFIER_SIZE)
+ __array(unsigned char, new_verifier, NFS4_VERIFIER_SIZE)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ memcpy(__entry->cl_verifier, (void *)&clp->cl_verifier,
+ NFS4_VERIFIER_SIZE);
+ memcpy(__entry->new_verifier, (void *)verf,
+ NFS4_VERIFIER_SIZE);
+ memcpy(__entry->addr, &rqstp->rq_xprt->xpt_remote,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("client %08x:%08x verf=0x%s, updated=0x%s from addr=%pISpc",
+ __entry->cl_boot, __entry->cl_id,
+ __print_hex_str(__entry->cl_verifier, NFS4_VERIFIER_SIZE),
+ __print_hex_str(__entry->new_verifier, NFS4_VERIFIER_SIZE),
+ __entry->addr
+ )
+);
-TRACE_EVENT(nfsd_clid_inuse_err,
+DECLARE_EVENT_CLASS(nfsd_clid_class,
TP_PROTO(const struct nfs4_client *clp),
TP_ARGS(clp),
TP_STRUCT__entry(
__field(u32, cl_boot)
__field(u32, cl_id)
__array(unsigned char, addr, sizeof(struct sockaddr_in6))
- __field(unsigned int, namelen)
- __dynamic_array(unsigned char, name, clp->cl_name.len)
+ __field(unsigned long, flavor)
+ __array(unsigned char, verifier, NFS4_VERIFIER_SIZE)
+ __dynamic_array(char, name, clp->cl_name.len + 1)
),
TP_fast_assign(
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
memcpy(__entry->addr, &clp->cl_addr,
sizeof(struct sockaddr_in6));
- __entry->namelen = clp->cl_name.len;
- memcpy(__get_dynamic_array(name), clp->cl_name.data,
- clp->cl_name.len);
- ),
- TP_printk("nfs4_clientid %.*s already in use by %pISpc, client %08x:%08x",
- __entry->namelen, __get_str(name), __entry->addr,
+ __entry->flavor = clp->cl_cred.cr_flavor;
+ memcpy(__entry->verifier, (void *)&clp->cl_verifier,
+ NFS4_VERIFIER_SIZE);
+ memcpy(__get_str(name), clp->cl_name.data, clp->cl_name.len);
+ __get_str(name)[clp->cl_name.len] = '\0';
+ ),
+ TP_printk("addr=%pISpc name='%s' verifier=0x%s flavor=%s client=%08x:%08x",
+ __entry->addr, __get_str(name),
+ __print_hex_str(__entry->verifier, NFS4_VERIFIER_SIZE),
+ show_nfsd_authflavor(__entry->flavor),
__entry->cl_boot, __entry->cl_id)
-)
+);
+
+#define DEFINE_CLID_EVENT(name) \
+DEFINE_EVENT(nfsd_clid_class, nfsd_clid_##name, \
+ TP_PROTO(const struct nfs4_client *clp), \
+ TP_ARGS(clp))
+
+DEFINE_CLID_EVENT(fresh);
+DEFINE_CLID_EVENT(confirmed_r);
/*
* from fs/nfsd/filecache.h
@@ -809,9 +856,9 @@ TRACE_EVENT(nfsd_cb_args,
memcpy(__entry->addr, &conn->cb_addr,
sizeof(struct sockaddr_in6));
),
- TP_printk("client %08x:%08x callback addr=%pISpc prog=%u ident=%u",
- __entry->cl_boot, __entry->cl_id,
- __entry->addr, __entry->prog, __entry->ident)
+ TP_printk("addr=%pISpc client %08x:%08x prog=%u ident=%u",
+ __entry->addr, __entry->cl_boot, __entry->cl_id,
+ __entry->prog, __entry->ident)
);
TRACE_EVENT(nfsd_cb_nodelegs,
@@ -828,11 +875,6 @@ TRACE_EVENT(nfsd_cb_nodelegs,
TP_printk("client %08x:%08x", __entry->cl_boot, __entry->cl_id)
)
-TRACE_DEFINE_ENUM(NFSD4_CB_UP);
-TRACE_DEFINE_ENUM(NFSD4_CB_UNKNOWN);
-TRACE_DEFINE_ENUM(NFSD4_CB_DOWN);
-TRACE_DEFINE_ENUM(NFSD4_CB_FAULT);
-
#define show_cb_state(val) \
__print_symbolic(val, \
{ NFSD4_CB_UP, "UP" }, \
@@ -866,10 +908,53 @@ DEFINE_EVENT(nfsd_cb_class, nfsd_cb_##name, \
TP_PROTO(const struct nfs4_client *clp), \
TP_ARGS(clp))
-DEFINE_NFSD_CB_EVENT(setup);
DEFINE_NFSD_CB_EVENT(state);
+DEFINE_NFSD_CB_EVENT(probe);
+DEFINE_NFSD_CB_EVENT(lost);
DEFINE_NFSD_CB_EVENT(shutdown);
+TRACE_DEFINE_ENUM(RPC_AUTH_NULL);
+TRACE_DEFINE_ENUM(RPC_AUTH_UNIX);
+TRACE_DEFINE_ENUM(RPC_AUTH_GSS);
+TRACE_DEFINE_ENUM(RPC_AUTH_GSS_KRB5);
+TRACE_DEFINE_ENUM(RPC_AUTH_GSS_KRB5I);
+TRACE_DEFINE_ENUM(RPC_AUTH_GSS_KRB5P);
+
+#define show_nfsd_authflavor(val) \
+ __print_symbolic(val, \
+ { RPC_AUTH_NULL, "none" }, \
+ { RPC_AUTH_UNIX, "sys" }, \
+ { RPC_AUTH_GSS, "gss" }, \
+ { RPC_AUTH_GSS_KRB5, "krb5" }, \
+ { RPC_AUTH_GSS_KRB5I, "krb5i" }, \
+ { RPC_AUTH_GSS_KRB5P, "krb5p" })
+
+TRACE_EVENT(nfsd_cb_setup,
+ TP_PROTO(const struct nfs4_client *clp,
+ const char *netid,
+ rpc_authflavor_t authflavor
+ ),
+ TP_ARGS(clp, netid, authflavor),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(unsigned long, authflavor)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, netid, 8)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ strlcpy(__entry->netid, netid, sizeof(__entry->netid));
+ __entry->authflavor = authflavor;
+ memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x proto=%s flavor=%s",
+ __entry->addr, __entry->cl_boot, __entry->cl_id,
+ __entry->netid, show_nfsd_authflavor(__entry->authflavor))
+);
+
TRACE_EVENT(nfsd_cb_setup_err,
TP_PROTO(
const struct nfs4_client *clp,
@@ -893,52 +978,97 @@ TRACE_EVENT(nfsd_cb_setup_err,
__entry->addr, __entry->cl_boot, __entry->cl_id, __entry->error)
);
-TRACE_EVENT(nfsd_cb_work,
+TRACE_EVENT(nfsd_cb_recall,
TP_PROTO(
- const struct nfs4_client *clp,
- const char *procedure
+ const struct nfs4_stid *stid
),
- TP_ARGS(clp, procedure),
+ TP_ARGS(stid),
TP_STRUCT__entry(
__field(u32, cl_boot)
__field(u32, cl_id)
- __string(procedure, procedure)
+ __field(u32, si_id)
+ __field(u32, si_generation)
__array(unsigned char, addr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
+ const stateid_t *stp = &stid->sc_stateid;
+ const struct nfs4_client *clp = stid->sc_client;
+
+ __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
+ __entry->cl_id = stp->si_opaque.so_clid.cl_id;
+ __entry->si_id = stp->si_opaque.so_id;
+ __entry->si_generation = stp->si_generation;
+ if (clp)
+ memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+ sizeof(struct sockaddr_in6));
+ else
+ memset(__entry->addr, 0, sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x stateid %08x:%08x",
+ __entry->addr, __entry->cl_boot, __entry->cl_id,
+ __entry->si_id, __entry->si_generation)
+);
+
+TRACE_EVENT(nfsd_cb_notify_lock,
+ TP_PROTO(
+ const struct nfs4_lockowner *lo,
+ const struct nfsd4_blocked_lock *nbl
+ ),
+ TP_ARGS(lo, nbl),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, fh_hash)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ const struct nfs4_client *clp = lo->lo_owner.so_client;
+
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
- __assign_str(procedure, procedure)
+ __entry->fh_hash = knfsd_fh_hash(&nbl->nbl_fh);
memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
sizeof(struct sockaddr_in6));
),
- TP_printk("addr=%pISpc client %08x:%08x procedure=%s",
+ TP_printk("addr=%pISpc client %08x:%08x fh_hash=0x%08x",
__entry->addr, __entry->cl_boot, __entry->cl_id,
- __get_str(procedure))
+ __entry->fh_hash)
);
-TRACE_EVENT(nfsd_cb_done,
+TRACE_EVENT(nfsd_cb_offload,
TP_PROTO(
const struct nfs4_client *clp,
- int status
+ const stateid_t *stp,
+ const struct knfsd_fh *fh,
+ u64 count,
+ __be32 status
),
- TP_ARGS(clp, status),
+ TP_ARGS(clp, stp, fh, count, status),
TP_STRUCT__entry(
__field(u32, cl_boot)
__field(u32, cl_id)
+ __field(u32, si_id)
+ __field(u32, si_generation)
+ __field(u32, fh_hash)
__field(int, status)
+ __field(u64, count)
__array(unsigned char, addr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
- __entry->cl_boot = clp->cl_clientid.cl_boot;
- __entry->cl_id = clp->cl_clientid.cl_id;
- __entry->status = status;
+ __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
+ __entry->cl_id = stp->si_opaque.so_clid.cl_id;
+ __entry->si_id = stp->si_opaque.so_id;
+ __entry->si_generation = stp->si_generation;
+ __entry->fh_hash = knfsd_fh_hash(fh);
+ __entry->status = be32_to_cpu(status);
+ __entry->count = count;
memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
sizeof(struct sockaddr_in6));
),
- TP_printk("addr=%pISpc client %08x:%08x status=%d",
+ TP_printk("addr=%pISpc client %08x:%08x stateid %08x:%08x fh_hash=0x%08x count=%llu status=%d",
__entry->addr, __entry->cl_boot, __entry->cl_id,
- __entry->status)
+ __entry->si_id, __entry->si_generation,
+ __entry->fh_hash, __entry->count, __entry->status)
);
#endif /* _NFSD_TRACE_H */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 15adf1f6ab21..a224a5e23cc1 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1123,6 +1123,19 @@ out:
}
#ifdef CONFIG_NFSD_V3
+static int
+nfsd_filemap_write_and_wait_range(struct nfsd_file *nf, loff_t offset,
+ loff_t end)
+{
+ struct address_space *mapping = nf->nf_file->f_mapping;
+ int ret = filemap_fdatawrite_range(mapping, offset, end);
+
+ if (ret)
+ return ret;
+ filemap_fdatawait_range_keep_errors(mapping, offset, end);
+ return 0;
+}
+
/*
* Commit all pending writes to stable storage.
*
@@ -1153,10 +1166,11 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (err)
goto out;
if (EX_ISSYNC(fhp->fh_export)) {
- int err2;
+ int err2 = nfsd_filemap_write_and_wait_range(nf, offset, end);
down_write(&nf->nf_rwsem);
- err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
+ if (!err2)
+ err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
switch (err2) {
case 0:
nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net,
@@ -1613,9 +1627,9 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
host_err = vfs_symlink(&init_user_ns, d_inode(dentry), dnew, path);
err = nfserrno(host_err);
+ fh_unlock(fhp);
if (!err)
err = nfserrno(commit_metadata(fhp));
- fh_unlock(fhp);
fh_drop_write(fhp);
@@ -1680,6 +1694,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
if (d_really_is_negative(dold))
goto out_dput;
host_err = vfs_link(dold, &init_user_ns, dirp, dnew, NULL);
+ fh_unlock(ffhp);
if (!host_err) {
err = nfserrno(commit_metadata(ffhp));
if (!err)
@@ -1859,6 +1874,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
{
struct dentry *dentry, *rdentry;
struct inode *dirp;
+ struct inode *rinode;
__be32 err;
int host_err;
@@ -1887,6 +1903,8 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
host_err = -ENOENT;
goto out_drop_write;
}
+ rinode = d_inode(rdentry);
+ ihold(rinode);
if (!type)
type = d_inode(rdentry)->i_mode & S_IFMT;
@@ -1899,9 +1917,11 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
host_err = vfs_rmdir(&init_user_ns, dirp, rdentry);
}
+ fh_unlock(fhp);
if (!host_err)
host_err = commit_metadata(fhp);
dput(rdentry);
+ iput(rinode); /* truncate the inode here */
out_drop_write:
fh_drop_write(fhp);
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index a7c425254fee..3e4052e3bd50 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -567,6 +567,7 @@ struct nfsd4_copy {
struct vfsmount *ss_mnt;
struct nfs_fh c_fh;
nfs4_stateid stateid;
+ bool committed;
};
struct nfsd4_seek {
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index f42ab57201e7..ab9ec073330f 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -738,7 +738,6 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
if (ptr2 != ptr + cnt || ++cnt == maxblocks)
goto end;
index++;
- continue;
}
if (level == maxlevel)
break;
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 64864fb40b40..28b67cb9458d 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -54,22 +54,27 @@ static int fanotify_max_queued_events __read_mostly;
#include <linux/sysctl.h>
+static long ft_zero = 0;
+static long ft_int_max = INT_MAX;
+
struct ctl_table fanotify_table[] = {
{
.procname = "max_user_groups",
.data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = &ft_zero,
+ .extra2 = &ft_int_max,
},
{
.procname = "max_user_marks",
.data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = &ft_zero,
+ .extra2 = &ft_int_max,
},
{
.procname = "max_queued_events",
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 98f61b31745a..62051247f6d2 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -55,22 +55,27 @@ struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
#include <linux/sysctl.h>
+static long it_zero = 0;
+static long it_int_max = INT_MAX;
+
struct ctl_table inotify_table[] = {
{
.procname = "max_user_instances",
.data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = &it_zero,
+ .extra2 = &it_int_max,
},
{
.procname = "max_user_watches",
.data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = &it_zero,
+ .extra2 = &it_int_max,
},
{
.procname = "max_queued_events",
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index e5aab265dff1..ab4f3362466d 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1684,20 +1684,17 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
{
struct page **last_page = pages + nr_pages;
size_t total = 0;
- struct iov_iter data = *i;
unsigned len, copied;
do {
len = PAGE_SIZE - ofs;
if (len > bytes)
len = bytes;
- copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
- len);
+ copied = copy_page_from_iter_atomic(*pages, ofs, len, i);
total += copied;
bytes -= copied;
if (!bytes)
break;
- iov_iter_advance(&data, copied);
if (copied < len)
goto err;
ofs = 0;
@@ -1866,34 +1863,24 @@ again:
if (likely(copied == bytes)) {
status = ntfs_commit_pages_after_write(pages, do_pages,
pos, bytes);
- if (!status)
- status = bytes;
}
do {
unlock_page(pages[--do_pages]);
put_page(pages[do_pages]);
} while (do_pages);
- if (unlikely(status < 0))
+ if (unlikely(status < 0)) {
+ iov_iter_revert(i, copied);
break;
- copied = status;
+ }
cond_resched();
- if (unlikely(!copied)) {
- size_t sc;
-
- /*
- * We failed to copy anything. Fall back to single
- * segment length write.
- *
- * This is needed to avoid possible livelock in the
- * case that all segments in the iov cannot be copied
- * at once without a pagefault.
- */
- sc = iov_iter_single_seg_count(i);
- if (bytes > sc)
- bytes = sc;
+ if (unlikely(copied < bytes)) {
+ iov_iter_revert(i, copied);
+ if (copied)
+ bytes = copied;
+ else if (bytes > PAGE_SIZE - ofs)
+ bytes = PAGE_SIZE - ofs;
goto again;
}
- iov_iter_advance(i, copied);
pos += copied;
written += copied;
balance_dirty_pages_ratelimited(mapping);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 775657943057..54d7843c0211 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1529,6 +1529,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
}
}
+/*
+ * zero out partial blocks of one cluster.
+ *
+ * start: file offset where zero starts, will be made upper block aligned.
+ * len: it will be trimmed to the end of current cluster if "start + len"
+ * is bigger than it.
+ */
+static int ocfs2_zeroout_partial_cluster(struct inode *inode,
+ u64 start, u64 len)
+{
+ int ret;
+ u64 start_block, end_block, nr_blocks;
+ u64 p_block, offset;
+ u32 cluster, p_cluster, nr_clusters;
+ struct super_block *sb = inode->i_sb;
+ u64 end = ocfs2_align_bytes_to_clusters(sb, start);
+
+ if (start + len < end)
+ end = start + len;
+
+ start_block = ocfs2_blocks_for_bytes(sb, start);
+ end_block = ocfs2_blocks_for_bytes(sb, end);
+ nr_blocks = end_block - start_block;
+ if (!nr_blocks)
+ return 0;
+
+ cluster = ocfs2_bytes_to_clusters(sb, start);
+ ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
+ &nr_clusters, NULL);
+ if (ret)
+ return ret;
+ if (!p_cluster)
+ return 0;
+
+ offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
+ p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
+ return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
+}
+
static int ocfs2_zero_partial_clusters(struct inode *inode,
u64 start, u64 len)
{
@@ -1538,6 +1577,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
unsigned int csize = osb->s_clustersize;
handle_t *handle;
+ loff_t isize = i_size_read(inode);
/*
* The "start" and "end" values are NOT necessarily part of
@@ -1558,6 +1598,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
goto out;
+ /* No page cache for EOF blocks, issue zero out to disk. */
+ if (end > isize) {
+ /*
+ * zeroout eof blocks in last cluster starting from
+ * "isize" even "start" > "isize" because it is
+ * complicated to zeroout just at "start" as "start"
+ * may be not aligned with block size, buffer write
+ * would be required to do that, but out of eof buffer
+ * write is not supported.
+ */
+ ret = ocfs2_zeroout_partial_cluster(inode, isize,
+ end - isize);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ if (start >= isize)
+ goto out;
+ end = isize;
+ }
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
@@ -1856,45 +1916,6 @@ out:
}
/*
- * zero out partial blocks of one cluster.
- *
- * start: file offset where zero starts, will be made upper block aligned.
- * len: it will be trimmed to the end of current cluster if "start + len"
- * is bigger than it.
- */
-static int ocfs2_zeroout_partial_cluster(struct inode *inode,
- u64 start, u64 len)
-{
- int ret;
- u64 start_block, end_block, nr_blocks;
- u64 p_block, offset;
- u32 cluster, p_cluster, nr_clusters;
- struct super_block *sb = inode->i_sb;
- u64 end = ocfs2_align_bytes_to_clusters(sb, start);
-
- if (start + len < end)
- end = start + len;
-
- start_block = ocfs2_blocks_for_bytes(sb, start);
- end_block = ocfs2_blocks_for_bytes(sb, end);
- nr_blocks = end_block - start_block;
- if (!nr_blocks)
- return 0;
-
- cluster = ocfs2_bytes_to_clusters(sb, start);
- ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
- &nr_clusters, NULL);
- if (ret)
- return ret;
- if (!p_cluster)
- return 0;
-
- offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
- p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
- return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
-}
-
-/*
* Parts of this function taken from xfs_change_file_space()
*/
static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
@@ -1935,7 +1956,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock;
}
- orig_isize = i_size_read(inode);
switch (sr->l_whence) {
case 0: /*SEEK_SET*/
break;
@@ -1943,7 +1963,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
sr->l_start += f_pos;
break;
case 2: /*SEEK_END*/
- sr->l_start += orig_isize;
+ sr->l_start += i_size_read(inode);
break;
default:
ret = -EINVAL;
@@ -1998,6 +2018,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
ret = -EINVAL;
}
+ orig_isize = i_size_read(inode);
/* zeroout eof blocks in the cluster. */
if (!ret && change_size && orig_isize < size) {
ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
diff --git a/fs/open.c b/fs/open.c
index 53bc0573c0ec..94bef26ff1b6 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -852,8 +852,17 @@ static int do_dentry_open(struct file *f,
* XXX: Huge page cache doesn't support writing yet. Drop all page
* cache for this file before processing writes.
*/
- if ((f->f_mode & FMODE_WRITE) && filemap_nr_thps(inode->i_mapping))
- truncate_pagecache(inode, 0);
+ if (f->f_mode & FMODE_WRITE) {
+ /*
+ * Paired with smp_mb() in collapse_file() to ensure nr_thps
+ * is up to date and the update to i_writecount by
+ * get_write_access() is visible. Ensures subsequent insertion
+ * of THPs into the page cache will fail.
+ */
+ smp_mb();
+ if (filemap_nr_thps(inode->i_mapping))
+ truncate_pagecache(inode, 0);
+ }
return 0;
@@ -1164,7 +1173,7 @@ struct file *filp_open(const char *filename, int flags, umode_t mode)
}
EXPORT_SYMBOL(filp_open);
-struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
+struct file *file_open_root(const struct path *root,
const char *filename, int flags, umode_t mode)
{
struct open_flags op;
@@ -1172,7 +1181,7 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
int err = build_open_flags(&how, &op);
if (err)
return ERR_PTR(err);
- return do_file_open_root(dentry, mnt, filename, &op);
+ return do_file_open_root(root, filename, &op);
}
EXPORT_SYMBOL(file_open_root);
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 6bf35a0d61f3..16ac617df7d7 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -249,8 +249,7 @@ static void orangefs_readahead(struct readahead_control *rac)
{
loff_t offset;
struct iov_iter iter;
- struct file *file = rac->file;
- struct inode *inode = file->f_mapping->host;
+ struct inode *inode = rac->mapping->host;
struct xarray *i_pages;
struct page *page;
loff_t new_start = readahead_pos(rac);
@@ -269,14 +268,14 @@ static void orangefs_readahead(struct readahead_control *rac)
readahead_expand(rac, new_start, new_len);
offset = readahead_pos(rac);
- i_pages = &file->f_mapping->i_pages;
+ i_pages = &rac->mapping->i_pages;
iov_iter_xarray(&iter, READ, i_pages, offset, readahead_length(rac));
/* read in the pages. */
if ((ret = wait_for_direct_io(ORANGEFS_IO_READ, inode,
&offset, &iter, readahead_length(rac),
- inode->i_size, NULL, NULL, file)) < 0)
+ inode->i_size, NULL, NULL, rac->file)) < 0)
gossip_debug(GOSSIP_FILE_DEBUG,
"%s: wait_for_direct_io failed. \n", __func__);
else
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index ee5efdc35cc1..2f2e430461b2 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -209,7 +209,7 @@ static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bavail = (sector_t) new_op->downcall.resp.statfs.blocks_avail;
buf->f_files = (sector_t) new_op->downcall.resp.statfs.files_total;
buf->f_ffree = (sector_t) new_op->downcall.resp.statfs.files_avail;
- buf->f_frsize = sb->s_blocksize;
+ buf->f_frsize = 0;
out_op_release:
op_release(new_op);
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 41ebf52f1bbc..ebde05c9cf62 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -392,6 +392,7 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
*/
take_dentry_name_snapshot(&name, real);
this = lookup_one_len(name.name.name, connected, name.name.len);
+ release_dentry_name_snapshot(&name);
err = PTR_ERR(this);
if (IS_ERR(this)) {
goto fail;
@@ -406,7 +407,6 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
}
out:
- release_dentry_name_snapshot(&name);
dput(parent);
inode_unlock(dir);
return this;
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 4d53d3b7e5fe..d081faa55e83 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -392,6 +392,51 @@ out_unlock:
return ret;
}
+/*
+ * Calling iter_file_splice_write() directly from overlay's f_op may deadlock
+ * due to lock order inversion between pipe->mutex in iter_file_splice_write()
+ * and file_start_write(real.file) in ovl_write_iter().
+ *
+ * So do everything ovl_write_iter() does and call iter_file_splice_write() on
+ * the real file.
+ */
+static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
+{
+ struct fd real;
+ const struct cred *old_cred;
+ struct inode *inode = file_inode(out);
+ struct inode *realinode = ovl_inode_real(inode);
+ ssize_t ret;
+
+ inode_lock(inode);
+ /* Update mode */
+ ovl_copyattr(realinode, inode);
+ ret = file_remove_privs(out);
+ if (ret)
+ goto out_unlock;
+
+ ret = ovl_real_fdget(out, &real);
+ if (ret)
+ goto out_unlock;
+
+ old_cred = ovl_override_creds(inode->i_sb);
+ file_start_write(real.file);
+
+ ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
+
+ file_end_write(real.file);
+ /* Update size */
+ ovl_copyattr(realinode, inode);
+ revert_creds(old_cred);
+ fdput(real);
+
+out_unlock:
+ inode_unlock(inode);
+
+ return ret;
+}
+
static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct fd real;
@@ -603,7 +648,7 @@ const struct file_operations ovl_file_operations = {
.fadvise = ovl_fadvise,
.flush = ovl_flush,
.splice_read = generic_file_splice_read,
- .splice_write = iter_file_splice_write,
+ .splice_write = ovl_splice_write,
.copy_file_range = ovl_copy_file_range,
.remap_file_range = ovl_remap_file_range,
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index e8ad2c2c77dd..150fdf3bc68d 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -481,6 +481,8 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
}
this = lookup_one_len(p->name, dir, p->len);
if (IS_ERR_OR_NULL(this) || !this->d_inode) {
+ /* Mark a stale entry */
+ p->is_whiteout = true;
if (IS_ERR(this)) {
err = PTR_ERR(this);
this = NULL;
@@ -776,6 +778,9 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
if (err)
goto out;
}
+ }
+ /* ovl_cache_update_ino() sets is_whiteout on stale entry */
+ if (!p->is_whiteout) {
if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
break;
}
diff --git a/fs/pipe.c b/fs/pipe.c
index bfd946a9ad01..678dee2a8228 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -32,6 +32,21 @@
#include "internal.h"
/*
+ * New pipe buffers will be restricted to this size while the user is exceeding
+ * their pipe buffer quota. The general pipe use case needs at least two
+ * buffers: one for data yet to be read, and one for new data. If this is less
+ * than two, then a write to a non-empty pipe may block even if the pipe is not
+ * full. This can occur with GNU make jobserver or similar uses of pipes as
+ * semaphores: multiple processes may be waiting to write tokens back to the
+ * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
+ *
+ * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
+ * own risk, namely: pipe writes to non-full pipes may block until the pipe is
+ * emptied.
+ */
+#define PIPE_MIN_DEF_BUFFERS 2
+
+/*
* The max size that a non-root user is allowed to grow the pipe. Can
* be set by root in /proc/sys/fs/pipe-max-size
*/
@@ -429,14 +444,11 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
#endif
/*
- * Only wake up if the pipe started out empty, since
- * otherwise there should be no readers waiting.
- *
* If it wasn't empty we try to merge new data into
* the last buffer.
*
* That naturally merges small writes, but it also
- * page-aligs the rest of the writes for large writes
+ * page-aligns the rest of the writes for large writes
* spanning multiple pages.
*/
head = pipe->head;
@@ -575,8 +587,11 @@ out:
* This is particularly important for small writes, because of
* how (for example) the GNU make jobserver uses small writes to
* wake up pending jobs
+ *
+ * Epoll nonsensically wants a wakeup whether the pipe
+ * was already empty or not.
*/
- if (was_empty) {
+ if (was_empty || pipe->poll_usage) {
wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
@@ -639,6 +654,9 @@ pipe_poll(struct file *filp, poll_table *wait)
struct pipe_inode_info *pipe = filp->private_data;
unsigned int head, tail;
+ /* Epoll has some historical nasty semantics, this enables them */
+ pipe->poll_usage = 1;
+
/*
* Reading pipe state only -- no need for acquiring the semaphore.
*
@@ -781,8 +799,8 @@ struct pipe_inode_info *alloc_pipe_info(void)
user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
- user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
- pipe_bufs = 1;
+ user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
+ pipe_bufs = PIPE_MIN_DEF_BUFFERS;
}
if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9cbd915025ad..e5b5f7709d48 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -854,7 +854,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
while (count > 0) {
- int this_len = min_t(int, count, PAGE_SIZE);
+ size_t this_len = min_t(size_t, count, PAGE_SIZE);
if (write && copy_from_user(page, buf, this_len)) {
copied = -EFAULT;
@@ -3172,7 +3172,7 @@ static const struct pid_entry tgid_base_stuff[] = {
DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
DIR("map_files", S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations),
- DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
+ DIR("fdinfo", S_IRUGO|S_IXUGO, proc_fdinfo_inode_operations, proc_fdinfo_operations),
DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
#ifdef CONFIG_NET
DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
@@ -3517,7 +3517,7 @@ static const struct inode_operations proc_tid_comm_inode_operations = {
*/
static const struct pid_entry tid_base_stuff[] = {
DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
- DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
+ DIR("fdinfo", S_IRUGO|S_IXUGO, proc_fdinfo_inode_operations, proc_fdinfo_operations),
DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
#ifdef CONFIG_NET
DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
diff --git a/fs/proc/bootconfig.c b/fs/proc/bootconfig.c
index ad31ec4ad627..6d8d4bf20837 100644
--- a/fs/proc/bootconfig.c
+++ b/fs/proc/bootconfig.c
@@ -49,7 +49,7 @@ static int __init copy_xbc_key_value_list(char *dst, size_t size)
else
q = '"';
ret = snprintf(dst, rest(dst, end), "%c%s%c%s",
- q, val, q, vnode->next ? ", " : "\n");
+ q, val, q, xbc_node_is_array(vnode) ? ", " : "\n");
if (ret < 0)
goto out;
dst += ret;
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 07fc4fad2602..172c86270b31 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -6,6 +6,7 @@
#include <linux/fdtable.h>
#include <linux/namei.h>
#include <linux/pid.h>
+#include <linux/ptrace.h>
#include <linux/security.h>
#include <linux/file.h>
#include <linux/seq_file.h>
@@ -53,9 +54,10 @@ static int seq_show(struct seq_file *m, void *v)
if (ret)
return ret;
- seq_printf(m, "pos:\t%lli\nflags:\t0%o\nmnt_id:\t%i\n",
+ seq_printf(m, "pos:\t%lli\nflags:\t0%o\nmnt_id:\t%i\nino:\t%lu\n",
(long long)file->f_pos, f_flags,
- real_mount(file->f_path.mnt)->mnt_id);
+ real_mount(file->f_path.mnt)->mnt_id,
+ file_inode(file)->i_ino);
/* show_fd_locks() never deferences files so a stale value is safe */
show_fd_locks(m, file, files);
@@ -72,6 +74,18 @@ out:
static int seq_fdinfo_open(struct inode *inode, struct file *file)
{
+ bool allowed = false;
+ struct task_struct *task = get_proc_task(inode);
+
+ if (!task)
+ return -ESRCH;
+
+ allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
+ put_task_struct(task);
+
+ if (!allowed)
+ return -EACCES;
+
return single_open(file, seq_show, inode);
}
@@ -308,7 +322,7 @@ static struct dentry *proc_fdinfo_instantiate(struct dentry *dentry,
struct proc_inode *ei;
struct inode *inode;
- inode = proc_pid_make_inode(dentry->d_sb, task, S_IFREG | S_IRUSR);
+ inode = proc_pid_make_inode(dentry->d_sb, task, S_IFREG | S_IRUGO);
if (!inode)
return ERR_PTR(-ENOENT);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 4d2e64e9016c..982e694aae77 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -313,6 +313,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
{
char *buf = file->private_data;
size_t phdrs_offset, notes_offset, data_offset;
+ size_t page_offline_frozen = 1;
size_t phdrs_len, notes_len;
struct kcore_list *m;
size_t tsz;
@@ -322,6 +323,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
int ret = 0;
down_read(&kclist_lock);
+ /*
+ * Don't race against drivers that set PageOffline() and expect no
+ * further page access.
+ */
+ page_offline_freeze();
get_kcore_size(&nphdr, &phdrs_len, &notes_len, &data_offset);
phdrs_offset = sizeof(struct elfhdr);
@@ -380,11 +386,8 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R | PF_W | PF_X;
phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
- if (m->type == KCORE_REMAP)
- phdr->p_vaddr = (size_t)m->vaddr;
- else
- phdr->p_vaddr = (size_t)m->addr;
- if (m->type == KCORE_RAM || m->type == KCORE_REMAP)
+ phdr->p_vaddr = (size_t)m->addr;
+ if (m->type == KCORE_RAM)
phdr->p_paddr = __pa(m->addr);
else if (m->type == KCORE_TEXT)
phdr->p_paddr = __pa_symbol(m->addr);
@@ -468,6 +471,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
m = NULL;
while (buflen) {
+ struct page *page;
+ unsigned long pfn;
+
/*
* If this is the first iteration or the address is not within
* the previous entry, search for a matching entry.
@@ -480,31 +486,57 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
}
}
+ if (page_offline_frozen++ % MAX_ORDER_NR_PAGES == 0) {
+ page_offline_thaw();
+ cond_resched();
+ page_offline_freeze();
+ }
+
if (&m->list == &kclist_head) {
if (clear_user(buffer, tsz)) {
ret = -EFAULT;
goto out;
}
m = NULL; /* skip the list anchor */
- } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
- if (clear_user(buffer, tsz)) {
- ret = -EFAULT;
- goto out;
- }
- } else if (m->type == KCORE_VMALLOC) {
+ goto skip;
+ }
+
+ switch (m->type) {
+ case KCORE_VMALLOC:
vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */
if (copy_to_user(buffer, buf, tsz)) {
ret = -EFAULT;
goto out;
}
- } else if (m->type == KCORE_USER) {
+ break;
+ case KCORE_USER:
/* User page is handled prior to normal kernel page: */
if (copy_to_user(buffer, (char *)start, tsz)) {
ret = -EFAULT;
goto out;
}
- } else {
+ break;
+ case KCORE_RAM:
+ pfn = __pa(start) >> PAGE_SHIFT;
+ page = pfn_to_online_page(pfn);
+
+ /*
+ * Don't read offline sections, logically offline pages
+ * (e.g., inflated in a balloon), hwpoisoned pages,
+ * and explicitly excluded physical ranges.
+ */
+ if (!page || PageOffline(page) ||
+ is_page_hwpoison(page) || !pfn_is_ram(pfn)) {
+ if (clear_user(buffer, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ break;
+ }
+ fallthrough;
+ case KCORE_VMEMMAP:
+ case KCORE_TEXT:
if (kern_addr_valid(start)) {
/*
* Using bounce buffer to bypass the
@@ -528,7 +560,15 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
goto out;
}
}
+ break;
+ default:
+ pr_warn_once("Unhandled KCORE type: %d\n", m->type);
+ if (clear_user(buffer, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
}
+skip:
buflen -= tsz;
*fpos += tsz;
buffer += tsz;
@@ -537,6 +577,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
}
out:
+ page_offline_thaw();
up_read(&kclist_lock);
if (ret)
return ret;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index dea0f5ee540c..5d66faecd4ef 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1807,7 +1807,7 @@ static int process_sysctl_arg(char *param, char *val,
panic("%s: Failed to allocate path for %s\n", __func__, param);
strreplace(path, '.', '/');
- file = file_open_root((*proc_mnt)->mnt_root, *proc_mnt, path, O_WRONLY, 0);
+ file = file_open_root_mnt(*proc_mnt, path, O_WRONLY, 0);
if (IS_ERR(file)) {
err = PTR_ERR(file);
if (err == -ENOENT)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 66965ad88d8b..eb97468dfe4c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -514,10 +514,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
} else {
mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
}
- } else if (is_migration_entry(swpent))
- page = migration_entry_to_page(swpent);
- else if (is_device_private_entry(swpent))
- page = device_private_entry_to_page(swpent);
+ } else if (is_pfn_swap_entry(swpent))
+ page = pfn_swap_entry_to_page(swpent);
} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
&& pte_none(*pte))) {
page = xa_load(&vma->vm_file->f_mapping->i_pages,
@@ -549,7 +547,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
swp_entry_t entry = pmd_to_swp_entry(*pmd);
if (is_migration_entry(entry))
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
}
if (IS_ERR_OR_NULL(page))
return;
@@ -694,10 +692,8 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
} else if (is_swap_pte(*pte)) {
swp_entry_t swpent = pte_to_swp_entry(*pte);
- if (is_migration_entry(swpent))
- page = migration_entry_to_page(swpent);
- else if (is_device_private_entry(swpent))
- page = device_private_entry_to_page(swpent);
+ if (is_pfn_swap_entry(swpent))
+ page = pfn_swap_entry_to_page(swpent);
}
if (page) {
int mapcount = page_mapcount(page);
@@ -832,7 +828,7 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %d\n",
- transparent_hugepage_enabled(vma));
+ transparent_hugepage_active(vma));
if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
@@ -1302,6 +1298,7 @@ struct pagemapread {
#define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
#define PM_SOFT_DIRTY BIT_ULL(55)
#define PM_MMAP_EXCLUSIVE BIT_ULL(56)
+#define PM_UFFD_WP BIT_ULL(57)
#define PM_FILE BIT_ULL(61)
#define PM_SWAP BIT_ULL(62)
#define PM_PRESENT BIT_ULL(63)
@@ -1375,20 +1372,21 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
page = vm_normal_page(vma, addr, pte);
if (pte_soft_dirty(pte))
flags |= PM_SOFT_DIRTY;
+ if (pte_uffd_wp(pte))
+ flags |= PM_UFFD_WP;
} else if (is_swap_pte(pte)) {
swp_entry_t entry;
if (pte_swp_soft_dirty(pte))
flags |= PM_SOFT_DIRTY;
+ if (pte_swp_uffd_wp(pte))
+ flags |= PM_UFFD_WP;
entry = pte_to_swp_entry(pte);
if (pm->show_pfn)
frame = swp_type(entry) |
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
flags |= PM_SWAP;
- if (is_migration_entry(entry))
- page = migration_entry_to_page(entry);
-
- if (is_device_private_entry(entry))
- page = device_private_entry_to_page(entry);
+ if (is_pfn_swap_entry(entry))
+ page = pfn_swap_entry_to_page(entry);
}
if (page && !PageAnon(page))
@@ -1426,6 +1424,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
flags |= PM_PRESENT;
if (pmd_soft_dirty(pmd))
flags |= PM_SOFT_DIRTY;
+ if (pmd_uffd_wp(pmd))
+ flags |= PM_UFFD_WP;
if (pm->show_pfn)
frame = pmd_pfn(pmd) +
((addr & ~PMD_MASK) >> PAGE_SHIFT);
@@ -1444,8 +1444,10 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
flags |= PM_SWAP;
if (pmd_swp_soft_dirty(pmd))
flags |= PM_SOFT_DIRTY;
+ if (pmd_swp_uffd_wp(pmd))
+ flags |= PM_UFFD_WP;
VM_BUG_ON(!is_pmd_migration_entry(pmd));
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
}
#endif
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 476a7ff49482..ef42729216d1 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -387,6 +387,24 @@ void pathrelse(struct treepath *search_path)
search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
}
+static int has_valid_deh_location(struct buffer_head *bh, struct item_head *ih)
+{
+ struct reiserfs_de_head *deh;
+ int i;
+
+ deh = B_I_DEH(bh, ih);
+ for (i = 0; i < ih_entry_count(ih); i++) {
+ if (deh_location(&deh[i]) > ih_item_len(ih)) {
+ reiserfs_warning(NULL, "reiserfs-5094",
+ "directory entry location seems wrong %h",
+ &deh[i]);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
{
struct block_head *blkh;
@@ -454,11 +472,14 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
"(second one): %h", ih);
return 0;
}
- if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) {
- reiserfs_warning(NULL, "reiserfs-5093",
- "item entry count seems wrong %h",
- ih);
- return 0;
+ if (is_direntry_le_ih(ih)) {
+ if (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE)) {
+ reiserfs_warning(NULL, "reiserfs-5093",
+ "item entry count seems wrong %h",
+ ih);
+ return 0;
+ }
+ return has_valid_deh_location(bh, ih);
}
prev_location = ih_location(ih);
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 3ffafc73acf0..58481f8d63d5 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2082,6 +2082,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
unlock_new_inode(root_inode);
}
+ if (!S_ISDIR(root_inode->i_mode) || !inode_get_bytes(root_inode) ||
+ !root_inode->i_size) {
+ SWARN(silent, s, "", "corrupt root inode, run fsck");
+ iput(root_inode);
+ errval = -EUCLEAN;
+ goto error;
+ }
+
s->s_root = d_make_root(root_inode);
if (!s->s_root)
goto error;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 5059248f2d64..4a2cda04d3e2 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -32,6 +32,9 @@ static void seq_set_overflow(struct seq_file *m)
static void *seq_buf_alloc(unsigned long size)
{
+ if (unlikely(size > MAX_RW_COUNT))
+ return NULL;
+
return kvmalloc(size, GFP_KERNEL_ACCOUNT);
}
@@ -356,6 +359,31 @@ int seq_release(struct inode *inode, struct file *file)
EXPORT_SYMBOL(seq_release);
/**
+ * seq_escape_mem - print data into buffer, escaping some characters
+ * @m: target buffer
+ * @src: source buffer
+ * @len: size of source buffer
+ * @flags: flags to pass to string_escape_mem()
+ * @esc: set of characters that need escaping
+ *
+ * Puts data into buffer, replacing each occurrence of character from
+ * given class (defined by @flags and @esc) with printable escaped sequence.
+ *
+ * Use seq_has_overflowed() to check for errors.
+ */
+void seq_escape_mem(struct seq_file *m, const char *src, size_t len,
+ unsigned int flags, const char *esc)
+{
+ char *buf;
+ size_t size = seq_get_buf(m, &buf);
+ int ret;
+
+ ret = string_escape_mem(src, len, buf, size, flags, esc);
+ seq_commit(m, ret < size ? ret : -1);
+}
+EXPORT_SYMBOL(seq_escape_mem);
+
+/**
* seq_escape - print string into buffer, escaping some characters
* @m: target buffer
* @s: string
@@ -367,26 +395,10 @@ EXPORT_SYMBOL(seq_release);
*/
void seq_escape(struct seq_file *m, const char *s, const char *esc)
{
- char *buf;
- size_t size = seq_get_buf(m, &buf);
- int ret;
-
- ret = string_escape_str(s, buf, size, ESCAPE_OCTAL, esc);
- seq_commit(m, ret < size ? ret : -1);
+ seq_escape_str(m, s, ESCAPE_OCTAL, esc);
}
EXPORT_SYMBOL(seq_escape);
-void seq_escape_mem_ascii(struct seq_file *m, const char *src, size_t isz)
-{
- char *buf;
- size_t size = seq_get_buf(m, &buf);
- int ret;
-
- ret = string_escape_mem_ascii(src, isz, buf, size);
- seq_commit(m, ret < size ? ret : -1);
-}
-EXPORT_SYMBOL(seq_escape_mem_ascii);
-
void seq_vprintf(struct seq_file *m, const char *f, va_list args)
{
int len;
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 1bbb9fe661b1..fc718f6178f2 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2824,7 +2824,7 @@ void dbg_debugfs_init_fs(struct ubifs_info *c)
n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME,
c->vi.ubi_num, c->vi.vol_id);
- if (n == UBIFS_DFS_DIR_LEN) {
+ if (n > UBIFS_DFS_DIR_LEN) {
/* The array size is too small */
return;
}
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 5bd8482e660a..7c61d0ec0159 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -1337,7 +1337,10 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out_release;
}
+ spin_lock(&whiteout->i_lock);
whiteout->i_state |= I_LINKABLE;
+ spin_unlock(&whiteout->i_lock);
+
whiteout_ui = ubifs_inode(whiteout);
whiteout_ui->data = dev;
whiteout_ui->data_len = ubifs_encode_dev(dev, MKDEV(0, 0));
@@ -1430,7 +1433,11 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
inc_nlink(whiteout);
mark_inode_dirty(whiteout);
+
+ spin_lock(&whiteout->i_lock);
whiteout->i_state &= ~I_LINKABLE;
+ spin_unlock(&whiteout->i_lock);
+
iput(whiteout);
}
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 2857e64d673d..8ea680dba61e 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -882,6 +882,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
struct ubifs_dent_node *xent, *pxent = NULL;
if (ui->xattr_cnt > ubifs_xattr_max_cnt(c)) {
+ err = -EPERM;
ubifs_err(c, "Cannot delete inode, it has too much xattrs!");
goto out_release;
}
@@ -1431,7 +1432,7 @@ out_free:
/**
* truncate_data_node - re-compress/encrypt a truncated data node.
* @c: UBIFS file-system description object
- * @inode: inode which referes to the data node
+ * @inode: inode which refers to the data node
* @block: data block number
* @dn: data node to re-compress
* @new_len: new length
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
index 0df9a3dd0aaa..7adc37c10b6a 100644
--- a/fs/ubifs/master.c
+++ b/fs/ubifs/master.c
@@ -37,7 +37,7 @@ int ubifs_compare_master_node(struct ubifs_info *c, void *m1, void *m2)
return ret;
/*
- * Do not compare the embedded HMAC aswell which also must be different
+ * Do not compare the embedded HMAC as well which also must be different
* due to the different common node header.
*/
behind = hmac_offs + UBIFS_MAX_HMAC_LEN;
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 382a54c82930..5260d3e531bb 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -296,7 +296,7 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
* @b: second replay entry
*
* This is a comparios function for 'list_sort()' which compares 2 replay
- * entries @a and @b by comparing their sequence numer. Returns %1 if @a has
+ * entries @a and @b by comparing their sequence number. Returns %1 if @a has
* greater sequence number and %-1 otherwise.
*/
static int replay_entries_cmp(void *priv, const struct list_head *a,
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 7b572e1414ba..f0fb25727d96 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -275,6 +275,7 @@ static struct inode *ubifs_alloc_inode(struct super_block *sb)
memset((void *)ui + sizeof(struct inode), 0,
sizeof(struct ubifs_inode) - sizeof(struct inode));
mutex_init(&ui->ui_mutex);
+ init_rwsem(&ui->xattr_sem);
spin_lock_init(&ui->ui_lock);
return &ui->vfs_inode;
};
@@ -2060,7 +2061,7 @@ const struct super_operations ubifs_super_operations = {
* @mode: UBI volume open mode
*
* The primary method of mounting UBIFS is by specifying the UBI volume
- * character device node path. However, UBIFS may also be mounted withoug any
+ * character device node path. However, UBIFS may also be mounted without any
* character device node using one of the following methods:
*
* o ubiX_Y - mount UBI device number X, volume Y;
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index 234be1c4dc87..58c92c96ecef 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -930,7 +930,7 @@ static int write_index(struct ubifs_info *c)
* flag cleared before %COW_ZNODE. Specifically, it matters in
* the 'dirty_cow_znode()' function. This is the reason for the
* first barrier. Also, we want the bit changes to be seen to
- * other threads ASAP, to avoid unnecesarry copying, which is
+ * other threads ASAP, to avoid unnecessary copying, which is
* the reason for the second barrier.
*/
clear_bit(DIRTY_ZNODE, &znode->flags);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index b65c599a386a..c38066ce9ab0 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -356,6 +356,7 @@ struct ubifs_gced_idx_leb {
* @ui_mutex: serializes inode write-back with the rest of VFS operations,
* serializes "clean <-> dirty" state changes, serializes bulk-read,
* protects @dirty, @bulk_read, @ui_size, and @xattr_size
+ * @xattr_sem: serilizes write operations (remove|set|create) on xattr
* @ui_lock: protects @synced_i_size
* @synced_i_size: synchronized size of inode, i.e. the value of inode size
* currently stored on the flash; used only for regular file
@@ -409,6 +410,7 @@ struct ubifs_inode {
unsigned int bulk_read:1;
unsigned int compr_type:2;
struct mutex ui_mutex;
+ struct rw_semaphore xattr_sem;
spinlock_t ui_lock;
loff_t synced_i_size;
loff_t ui_size;
@@ -912,7 +914,7 @@ struct ubifs_budget_req {
* @rb: rb-tree node of rb-tree of orphans sorted by inode number
* @list: list head of list of orphans in order added
* @new_list: list head of list of orphans added since the last commit
- * @child_list: list of xattr childs if this orphan hosts xattrs, list head
+ * @child_list: list of xattr children if this orphan hosts xattrs, list head
* if this orphan is a xattr, not used otherwise.
* @cnext: next orphan to commit
* @dnext: next orphan to delete
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 6b1e9830b274..e4f193eae4b2 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -208,13 +208,11 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
err = -ENOMEM;
goto out_free;
}
- mutex_lock(&ui->ui_mutex);
kfree(ui->data);
ui->data = buf;
inode->i_size = ui->ui_size = size;
old_size = ui->data_len;
ui->data_len = size;
- mutex_unlock(&ui->ui_mutex);
mutex_lock(&host_ui->ui_mutex);
host->i_ctime = current_time(host);
@@ -285,6 +283,7 @@ int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
if (!xent)
return -ENOMEM;
+ down_write(&ubifs_inode(host)->xattr_sem);
/*
* The extended attribute entries are stored in LNC, so multiple
* look-ups do not involve reading the flash.
@@ -319,6 +318,7 @@ int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
iput(inode);
out_free:
+ up_write(&ubifs_inode(host)->xattr_sem);
kfree(xent);
return err;
}
@@ -341,25 +341,25 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
if (!xent)
return -ENOMEM;
+ down_read(&ubifs_inode(host)->xattr_sem);
xent_key_init(c, &key, host->i_ino, &nm);
err = ubifs_tnc_lookup_nm(c, &key, xent, &nm);
if (err) {
if (err == -ENOENT)
err = -ENODATA;
- goto out_unlock;
+ goto out_cleanup;
}
inode = iget_xattr(c, le64_to_cpu(xent->inum));
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
- goto out_unlock;
+ goto out_cleanup;
}
ui = ubifs_inode(inode);
ubifs_assert(c, inode->i_size == ui->data_len);
ubifs_assert(c, ubifs_inode(host)->xattr_size > ui->data_len);
- mutex_lock(&ui->ui_mutex);
if (buf) {
/* If @buf is %NULL we are supposed to return the length */
if (ui->data_len > size) {
@@ -372,9 +372,9 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
err = ui->data_len;
out_iput:
- mutex_unlock(&ui->ui_mutex);
iput(inode);
-out_unlock:
+out_cleanup:
+ up_read(&ubifs_inode(host)->xattr_sem);
kfree(xent);
return err;
}
@@ -406,16 +406,21 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
dbg_gen("ino %lu ('%pd'), buffer size %zd", host->i_ino,
dentry, size);
+ down_read(&host_ui->xattr_sem);
len = host_ui->xattr_names + host_ui->xattr_cnt;
- if (!buffer)
+ if (!buffer) {
/*
* We should return the minimum buffer size which will fit a
* null-terminated list of all the extended attribute names.
*/
- return len;
+ err = len;
+ goto out_err;
+ }
- if (len > size)
- return -ERANGE;
+ if (len > size) {
+ err = -ERANGE;
+ goto out_err;
+ }
lowest_xent_key(c, &key, host->i_ino);
while (1) {
@@ -437,8 +442,9 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
pxent = xent;
key_read(c, &xent->key, &key);
}
-
kfree(pxent);
+ up_read(&host_ui->xattr_sem);
+
if (err != -ENOENT) {
ubifs_err(c, "cannot find next direntry, error %d", err);
return err;
@@ -446,6 +452,10 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
ubifs_assert(c, written <= size);
return written;
+
+out_err:
+ up_read(&host_ui->xattr_sem);
+ return err;
}
static int remove_xattr(struct ubifs_info *c, struct inode *host,
@@ -504,6 +514,7 @@ int ubifs_purge_xattrs(struct inode *host)
ubifs_warn(c, "inode %lu has too many xattrs, doing a non-atomic deletion",
host->i_ino);
+ down_write(&ubifs_inode(host)->xattr_sem);
lowest_xent_key(c, &key, host->i_ino);
while (1) {
xent = ubifs_tnc_next_ent(c, &key, &nm);
@@ -523,7 +534,7 @@ int ubifs_purge_xattrs(struct inode *host)
ubifs_ro_mode(c, err);
kfree(pxent);
kfree(xent);
- return err;
+ goto out_err;
}
ubifs_assert(c, ubifs_inode(xino)->xattr);
@@ -535,7 +546,7 @@ int ubifs_purge_xattrs(struct inode *host)
kfree(xent);
iput(xino);
ubifs_err(c, "cannot remove xattr, error %d", err);
- return err;
+ goto out_err;
}
iput(xino);
@@ -544,14 +555,19 @@ int ubifs_purge_xattrs(struct inode *host)
pxent = xent;
key_read(c, &xent->key, &key);
}
-
kfree(pxent);
+ up_write(&ubifs_inode(host)->xattr_sem);
+
if (err != -ENOENT) {
ubifs_err(c, "cannot find next direntry, error %d", err);
return err;
}
return 0;
+
+out_err:
+ up_write(&ubifs_inode(host)->xattr_sem);
+ return err;
}
/**
@@ -594,6 +610,7 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
if (!xent)
return -ENOMEM;
+ down_write(&ubifs_inode(host)->xattr_sem);
xent_key_init(c, &key, host->i_ino, &nm);
err = ubifs_tnc_lookup_nm(c, &key, xent, &nm);
if (err) {
@@ -618,6 +635,7 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
iput(inode);
out_free:
+ up_write(&ubifs_inode(host)->xattr_sem);
kfree(xent);
return err;
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index dd7a6c62b56f..5c2d806e6ae5 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1236,23 +1236,21 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
}
static __always_inline int validate_range(struct mm_struct *mm,
- __u64 *start, __u64 len)
+ __u64 start, __u64 len)
{
__u64 task_size = mm->task_size;
- *start = untagged_addr(*start);
-
- if (*start & ~PAGE_MASK)
+ if (start & ~PAGE_MASK)
return -EINVAL;
if (len & ~PAGE_MASK)
return -EINVAL;
if (!len)
return -EINVAL;
- if (*start < mmap_min_addr)
+ if (start < mmap_min_addr)
return -EINVAL;
- if (*start >= task_size)
+ if (start >= task_size)
return -EINVAL;
- if (len > task_size - *start)
+ if (len > task_size - start)
return -EINVAL;
return 0;
}
@@ -1267,8 +1265,7 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma,
}
if (vm_flags & VM_UFFD_MINOR) {
- /* FIXME: Add minor fault interception for shmem. */
- if (!is_vm_hugetlb_page(vma))
+ if (!(is_vm_hugetlb_page(vma) || vma_is_shmem(vma)))
return false;
}
@@ -1304,8 +1301,12 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
vm_flags = 0;
if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
vm_flags |= VM_UFFD_MISSING;
- if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP)
+ if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
+#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+ goto out;
+#endif
vm_flags |= VM_UFFD_WP;
+ }
if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) {
#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
goto out;
@@ -1313,7 +1314,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
vm_flags |= VM_UFFD_MINOR;
}
- ret = validate_range(mm, &uffdio_register.range.start,
+ ret = validate_range(mm, uffdio_register.range.start,
uffdio_register.range.len);
if (ret)
goto out;
@@ -1519,7 +1520,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
goto out;
- ret = validate_range(mm, &uffdio_unregister.start,
+ ret = validate_range(mm, uffdio_unregister.start,
uffdio_unregister.len);
if (ret)
goto out;
@@ -1668,7 +1669,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
goto out;
- ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len);
+ ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
if (ret)
goto out;
@@ -1708,7 +1709,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
sizeof(uffdio_copy)-sizeof(__s64)))
goto out;
- ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len);
+ ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
if (ret)
goto out;
/*
@@ -1765,7 +1766,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
sizeof(uffdio_zeropage)-sizeof(__s64)))
goto out;
- ret = validate_range(ctx->mm, &uffdio_zeropage.range.start,
+ ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
uffdio_zeropage.range.len);
if (ret)
goto out;
@@ -1815,7 +1816,7 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
sizeof(struct uffdio_writeprotect)))
return -EFAULT;
- ret = validate_range(ctx->mm, &uffdio_wp.range.start,
+ ret = validate_range(ctx->mm, uffdio_wp.range.start,
uffdio_wp.range.len);
if (ret)
return ret;
@@ -1863,7 +1864,7 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
sizeof(uffdio_continue) - (sizeof(__s64))))
goto out;
- ret = validate_range(ctx->mm, &uffdio_continue.range.start,
+ ret = validate_range(ctx->mm, uffdio_continue.range.start,
uffdio_continue.range.len);
if (ret)
goto out;
@@ -1941,7 +1942,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
/* report all available features and ioctls to userland */
uffdio_api.features = UFFD_API_FEATURES;
#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
- uffdio_api.features &= ~UFFD_FEATURE_MINOR_HUGETLBFS;
+ uffdio_api.features &=
+ ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM);
+#endif
+#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+ uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
#endif
uffdio_api.ioctls = UFFD_API_IOCTLS;
ret = -EFAULT;
diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c
index eac6788fc6cf..c4769a9396c5 100644
--- a/fs/vboxsf/dir.c
+++ b/fs/vboxsf/dir.c
@@ -253,7 +253,7 @@ static int vboxsf_dir_instantiate(struct inode *parent, struct dentry *dentry,
}
static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
- umode_t mode, int is_dir)
+ umode_t mode, bool is_dir, bool excl, u64 *handle_ret)
{
struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
@@ -261,10 +261,12 @@ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
int err;
params.handle = SHFL_HANDLE_NIL;
- params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW |
- SHFL_CF_ACT_FAIL_IF_EXISTS |
- SHFL_CF_ACCESS_READWRITE |
- (is_dir ? SHFL_CF_DIRECTORY : 0);
+ params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW | SHFL_CF_ACCESS_READWRITE;
+ if (is_dir)
+ params.create_flags |= SHFL_CF_DIRECTORY;
+ if (excl)
+ params.create_flags |= SHFL_CF_ACT_FAIL_IF_EXISTS;
+
params.info.attr.mode = (mode & 0777) |
(is_dir ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE);
params.info.attr.additional = SHFLFSOBJATTRADD_NOTHING;
@@ -276,30 +278,81 @@ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
if (params.result != SHFL_FILE_CREATED)
return -EPERM;
- vboxsf_close(sbi->root, params.handle);
-
err = vboxsf_dir_instantiate(parent, dentry, &params.info);
if (err)
- return err;
+ goto out;
/* parent directory access/change time changed */
sf_parent_i->force_restat = 1;
- return 0;
+out:
+ if (err == 0 && handle_ret)
+ *handle_ret = params.handle;
+ else
+ vboxsf_close(sbi->root, params.handle);
+
+ return err;
}
static int vboxsf_dir_mkfile(struct user_namespace *mnt_userns,
struct inode *parent, struct dentry *dentry,
umode_t mode, bool excl)
{
- return vboxsf_dir_create(parent, dentry, mode, 0);
+ return vboxsf_dir_create(parent, dentry, mode, false, excl, NULL);
}
static int vboxsf_dir_mkdir(struct user_namespace *mnt_userns,
struct inode *parent, struct dentry *dentry,
umode_t mode)
{
- return vboxsf_dir_create(parent, dentry, mode, 1);
+ return vboxsf_dir_create(parent, dentry, mode, true, true, NULL);
+}
+
+static int vboxsf_dir_atomic_open(struct inode *parent, struct dentry *dentry,
+ struct file *file, unsigned int flags, umode_t mode)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
+ struct vboxsf_handle *sf_handle;
+ struct dentry *res = NULL;
+ u64 handle;
+ int err;
+
+ if (d_in_lookup(dentry)) {
+ res = vboxsf_dir_lookup(parent, dentry, 0);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ if (res)
+ dentry = res;
+ }
+
+ /* Only creates */
+ if (!(flags & O_CREAT) || d_really_is_positive(dentry))
+ return finish_no_open(file, res);
+
+ err = vboxsf_dir_create(parent, dentry, mode, false, flags & O_EXCL, &handle);
+ if (err)
+ goto out;
+
+ sf_handle = vboxsf_create_sf_handle(d_inode(dentry), handle, SHFL_CF_ACCESS_READWRITE);
+ if (IS_ERR(sf_handle)) {
+ vboxsf_close(sbi->root, handle);
+ err = PTR_ERR(sf_handle);
+ goto out;
+ }
+
+ err = finish_open(file, dentry, generic_file_open);
+ if (err) {
+ /* This also closes the handle passed to vboxsf_create_sf_handle() */
+ vboxsf_release_sf_handle(d_inode(dentry), sf_handle);
+ goto out;
+ }
+
+ file->private_data = sf_handle;
+ file->f_mode |= FMODE_CREATED;
+out:
+ dput(res);
+ return err;
}
static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry)
@@ -422,6 +475,7 @@ const struct inode_operations vboxsf_dir_iops = {
.lookup = vboxsf_dir_lookup,
.create = vboxsf_dir_mkfile,
.mkdir = vboxsf_dir_mkdir,
+ .atomic_open = vboxsf_dir_atomic_open,
.rmdir = vboxsf_dir_unlink,
.unlink = vboxsf_dir_unlink,
.rename = vboxsf_dir_rename,
diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c
index c4ab5996d97a..864c2fad23be 100644
--- a/fs/vboxsf/file.c
+++ b/fs/vboxsf/file.c
@@ -20,17 +20,39 @@ struct vboxsf_handle {
struct list_head head;
};
-static int vboxsf_file_open(struct inode *inode, struct file *file)
+struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode,
+ u64 handle, u32 access_flags)
{
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
- struct shfl_createparms params = {};
struct vboxsf_handle *sf_handle;
- u32 access_flags = 0;
- int err;
sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL);
if (!sf_handle)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+
+ /* the host may have given us different attr then requested */
+ sf_i->force_restat = 1;
+
+ /* init our handle struct and add it to the inode's handles list */
+ sf_handle->handle = handle;
+ sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
+ sf_handle->access_flags = access_flags;
+ kref_init(&sf_handle->refcount);
+
+ mutex_lock(&sf_i->handle_list_mutex);
+ list_add(&sf_handle->head, &sf_i->handle_list);
+ mutex_unlock(&sf_i->handle_list_mutex);
+
+ return sf_handle;
+}
+
+static int vboxsf_file_open(struct inode *inode, struct file *file)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
+ struct shfl_createparms params = {};
+ struct vboxsf_handle *sf_handle;
+ u32 access_flags = 0;
+ int err;
/*
* We check the value of params.handle afterwards to find out if
@@ -83,23 +105,14 @@ static int vboxsf_file_open(struct inode *inode, struct file *file)
err = vboxsf_create_at_dentry(file_dentry(file), &params);
if (err == 0 && params.handle == SHFL_HANDLE_NIL)
err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT;
- if (err) {
- kfree(sf_handle);
+ if (err)
return err;
- }
-
- /* the host may have given us different attr then requested */
- sf_i->force_restat = 1;
- /* init our handle struct and add it to the inode's handles list */
- sf_handle->handle = params.handle;
- sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
- sf_handle->access_flags = access_flags;
- kref_init(&sf_handle->refcount);
-
- mutex_lock(&sf_i->handle_list_mutex);
- list_add(&sf_handle->head, &sf_i->handle_list);
- mutex_unlock(&sf_i->handle_list_mutex);
+ sf_handle = vboxsf_create_sf_handle(inode, params.handle, access_flags);
+ if (IS_ERR(sf_handle)) {
+ vboxsf_close(sbi->root, params.handle);
+ return PTR_ERR(sf_handle);
+ }
file->private_data = sf_handle;
return 0;
@@ -114,22 +127,26 @@ static void vboxsf_handle_release(struct kref *refcount)
kfree(sf_handle);
}
-static int vboxsf_file_release(struct inode *inode, struct file *file)
+void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle)
{
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
- struct vboxsf_handle *sf_handle = file->private_data;
+ mutex_lock(&sf_i->handle_list_mutex);
+ list_del(&sf_handle->head);
+ mutex_unlock(&sf_i->handle_list_mutex);
+
+ kref_put(&sf_handle->refcount, vboxsf_handle_release);
+}
+
+static int vboxsf_file_release(struct inode *inode, struct file *file)
+{
/*
* When a file is closed on our (the guest) side, we want any subsequent
* accesses done on the host side to see all changes done from our side.
*/
filemap_write_and_wait(inode->i_mapping);
- mutex_lock(&sf_i->handle_list_mutex);
- list_del(&sf_handle->head);
- mutex_unlock(&sf_i->handle_list_mutex);
-
- kref_put(&sf_handle->refcount, vboxsf_handle_release);
+ vboxsf_release_sf_handle(inode, file->private_data);
return 0;
}
diff --git a/fs/vboxsf/vfsmod.h b/fs/vboxsf/vfsmod.h
index 6a7a9cedebc6..9047befa66c5 100644
--- a/fs/vboxsf/vfsmod.h
+++ b/fs/vboxsf/vfsmod.h
@@ -18,6 +18,8 @@
#define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info)
#define VBOXSF_I(i) container_of(i, struct vboxsf_inode, vfs_inode)
+struct vboxsf_handle;
+
struct vboxsf_options {
unsigned long ttl;
kuid_t uid;
@@ -80,6 +82,11 @@ extern const struct file_operations vboxsf_reg_fops;
extern const struct address_space_operations vboxsf_reg_aops;
extern const struct dentry_operations vboxsf_dentry_ops;
+/* from file.c */
+struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode,
+ u64 handle, u32 access_flags);
+void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle);
+
/* from utils.c */
struct inode *vboxsf_new_inode(struct super_block *sb);
int vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index c68a36688474..ee9ec0c50bec 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -27,6 +27,276 @@
#include "xfs_defer.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
+#include "xfs_trace.h"
+#include "xfs_inode.h"
+#include "xfs_icache.h"
+
+
+/*
+ * Passive reference counting access wrappers to the perag structures. If the
+ * per-ag structure is to be freed, the freeing code is responsible for cleaning
+ * up objects with passive references before freeing the structure. This is
+ * things like cached buffers.
+ */
+struct xfs_perag *
+xfs_perag_get(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno)
+{
+ struct xfs_perag *pag;
+ int ref = 0;
+
+ rcu_read_lock();
+ pag = radix_tree_lookup(&mp->m_perag_tree, agno);
+ if (pag) {
+ ASSERT(atomic_read(&pag->pag_ref) >= 0);
+ ref = atomic_inc_return(&pag->pag_ref);
+ }
+ rcu_read_unlock();
+ trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
+ return pag;
+}
+
+/*
+ * search from @first to find the next perag with the given tag set.
+ */
+struct xfs_perag *
+xfs_perag_get_tag(
+ struct xfs_mount *mp,
+ xfs_agnumber_t first,
+ unsigned int tag)
+{
+ struct xfs_perag *pag;
+ int found;
+ int ref;
+
+ rcu_read_lock();
+ found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
+ (void **)&pag, first, 1, tag);
+ if (found <= 0) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ ref = atomic_inc_return(&pag->pag_ref);
+ rcu_read_unlock();
+ trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
+ return pag;
+}
+
+void
+xfs_perag_put(
+ struct xfs_perag *pag)
+{
+ int ref;
+
+ ASSERT(atomic_read(&pag->pag_ref) > 0);
+ ref = atomic_dec_return(&pag->pag_ref);
+ trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
+}
+
+/*
+ * xfs_initialize_perag_data
+ *
+ * Read in each per-ag structure so we can count up the number of
+ * allocated inodes, free inodes and used filesystem blocks as this
+ * information is no longer persistent in the superblock. Once we have
+ * this information, write it into the in-core superblock structure.
+ */
+int
+xfs_initialize_perag_data(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agcount)
+{
+ xfs_agnumber_t index;
+ struct xfs_perag *pag;
+ struct xfs_sb *sbp = &mp->m_sb;
+ uint64_t ifree = 0;
+ uint64_t ialloc = 0;
+ uint64_t bfree = 0;
+ uint64_t bfreelst = 0;
+ uint64_t btree = 0;
+ uint64_t fdblocks;
+ int error = 0;
+
+ for (index = 0; index < agcount; index++) {
+ /*
+ * read the agf, then the agi. This gets us
+ * all the information we need and populates the
+ * per-ag structures for us.
+ */
+ error = xfs_alloc_pagf_init(mp, NULL, index, 0);
+ if (error)
+ return error;
+
+ error = xfs_ialloc_pagi_init(mp, NULL, index);
+ if (error)
+ return error;
+ pag = xfs_perag_get(mp, index);
+ ifree += pag->pagi_freecount;
+ ialloc += pag->pagi_count;
+ bfree += pag->pagf_freeblks;
+ bfreelst += pag->pagf_flcount;
+ btree += pag->pagf_btreeblks;
+ xfs_perag_put(pag);
+ }
+ fdblocks = bfree + bfreelst + btree;
+
+ /*
+ * If the new summary counts are obviously incorrect, fail the
+ * mount operation because that implies the AGFs are also corrupt.
+ * Clear FS_COUNTERS so that we don't unmount with a dirty log, which
+ * will prevent xfs_repair from fixing anything.
+ */
+ if (fdblocks > sbp->sb_dblocks || ifree > ialloc) {
+ xfs_alert(mp, "AGF corruption. Please run xfs_repair.");
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+
+ /* Overwrite incore superblock counters with just-read data */
+ spin_lock(&mp->m_sb_lock);
+ sbp->sb_ifree = ifree;
+ sbp->sb_icount = ialloc;
+ sbp->sb_fdblocks = fdblocks;
+ spin_unlock(&mp->m_sb_lock);
+
+ xfs_reinit_percpu_counters(mp);
+out:
+ xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
+ return error;
+}
+
+STATIC void
+__xfs_free_perag(
+ struct rcu_head *head)
+{
+ struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
+
+ ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
+ ASSERT(atomic_read(&pag->pag_ref) == 0);
+ kmem_free(pag);
+}
+
+/*
+ * Free up the per-ag resources associated with the mount structure.
+ */
+void
+xfs_free_perag(
+ struct xfs_mount *mp)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
+ for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+ spin_lock(&mp->m_perag_lock);
+ pag = radix_tree_delete(&mp->m_perag_tree, agno);
+ spin_unlock(&mp->m_perag_lock);
+ ASSERT(pag);
+ ASSERT(atomic_read(&pag->pag_ref) == 0);
+
+ cancel_delayed_work_sync(&pag->pag_blockgc_work);
+ xfs_iunlink_destroy(pag);
+ xfs_buf_hash_destroy(pag);
+
+ call_rcu(&pag->rcu_head, __xfs_free_perag);
+ }
+}
+
+int
+xfs_initialize_perag(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agcount,
+ xfs_agnumber_t *maxagi)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t index;
+ xfs_agnumber_t first_initialised = NULLAGNUMBER;
+ int error;
+
+ /*
+ * Walk the current per-ag tree so we don't try to initialise AGs
+ * that already exist (growfs case). Allocate and insert all the
+ * AGs we don't find ready for initialisation.
+ */
+ for (index = 0; index < agcount; index++) {
+ pag = xfs_perag_get(mp, index);
+ if (pag) {
+ xfs_perag_put(pag);
+ continue;
+ }
+
+ pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
+ if (!pag) {
+ error = -ENOMEM;
+ goto out_unwind_new_pags;
+ }
+ pag->pag_agno = index;
+ pag->pag_mount = mp;
+
+ error = radix_tree_preload(GFP_NOFS);
+ if (error)
+ goto out_free_pag;
+
+ spin_lock(&mp->m_perag_lock);
+ if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
+ WARN_ON_ONCE(1);
+ spin_unlock(&mp->m_perag_lock);
+ radix_tree_preload_end();
+ error = -EEXIST;
+ goto out_free_pag;
+ }
+ spin_unlock(&mp->m_perag_lock);
+ radix_tree_preload_end();
+
+ /* Place kernel structure only init below this point. */
+ spin_lock_init(&pag->pag_ici_lock);
+ spin_lock_init(&pag->pagb_lock);
+ spin_lock_init(&pag->pag_state_lock);
+ INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
+ INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
+ init_waitqueue_head(&pag->pagb_wait);
+ pag->pagb_count = 0;
+ pag->pagb_tree = RB_ROOT;
+
+ error = xfs_buf_hash_init(pag);
+ if (error)
+ goto out_remove_pag;
+
+ error = xfs_iunlink_init(pag);
+ if (error)
+ goto out_hash_destroy;
+
+ /* first new pag is fully initialized */
+ if (first_initialised == NULLAGNUMBER)
+ first_initialised = index;
+ }
+
+ index = xfs_set_inode_alloc(mp, agcount);
+
+ if (maxagi)
+ *maxagi = index;
+
+ mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
+ return 0;
+
+out_hash_destroy:
+ xfs_buf_hash_destroy(pag);
+out_remove_pag:
+ radix_tree_delete(&mp->m_perag_tree, index);
+out_free_pag:
+ kmem_free(pag);
+out_unwind_new_pags:
+ /* unwind any prior newly initialized pags */
+ for (index = first_initialised; index < agcount; index++) {
+ pag = radix_tree_delete(&mp->m_perag_tree, index);
+ if (!pag)
+ break;
+ xfs_buf_hash_destroy(pag);
+ xfs_iunlink_destroy(pag);
+ kmem_free(pag);
+ }
+ return error;
+}
static int
xfs_get_aghdr_buf(
@@ -43,7 +313,6 @@ xfs_get_aghdr_buf(
if (error)
return error;
- xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
bp->b_bn = blkno;
bp->b_maps[0].bm_bn = blkno;
bp->b_ops = ops;
@@ -510,6 +779,7 @@ xfs_ag_shrink_space(
struct xfs_buf *agibp, *agfbp;
struct xfs_agi *agi;
struct xfs_agf *agf;
+ xfs_agblock_t aglen;
int error, err2;
ASSERT(agno == mp->m_sb.sb_agcount - 1);
@@ -524,14 +794,22 @@ xfs_ag_shrink_space(
return error;
agf = agfbp->b_addr;
+ aglen = be32_to_cpu(agi->agi_length);
/* some extra paranoid checks before we shrink the ag */
if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length))
return -EFSCORRUPTED;
- if (delta >= agi->agi_length)
+ if (delta >= aglen)
return -EINVAL;
- args.fsbno = XFS_AGB_TO_FSB(mp, agno,
- be32_to_cpu(agi->agi_length) - delta);
+ args.fsbno = XFS_AGB_TO_FSB(mp, agno, aglen - delta);
+
+ /*
+ * Make sure that the last inode cluster cannot overlap with the new
+ * end of the AG, even if it's sparse.
+ */
+ error = xfs_ialloc_check_shrink(*tpp, agno, agibp, aglen - delta);
+ if (error)
+ return error;
/*
* Disable perag reservations so it doesn't cause the allocation request
@@ -646,7 +924,7 @@ xfs_ag_extend_space(
* XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
* this doesn't actually exist in the rmap btree.
*/
- error = xfs_rmap_free(tp, bp, id->agno,
+ error = xfs_rmap_free(tp, bp, bp->b_pag,
be32_to_cpu(agf->agf_length) - len,
len, &XFS_RMAP_OINFO_SKIP_UPDATE);
if (error)
diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h
index 4535de1d88ea..4c6f9045baca 100644
--- a/fs/xfs/libxfs/xfs_ag.h
+++ b/fs/xfs/libxfs/xfs_ag.h
@@ -9,6 +9,142 @@
struct xfs_mount;
struct xfs_trans;
+struct xfs_perag;
+
+/*
+ * Per-ag infrastructure
+ */
+
+/* per-AG block reservation data structures*/
+struct xfs_ag_resv {
+ /* number of blocks originally reserved here */
+ xfs_extlen_t ar_orig_reserved;
+ /* number of blocks reserved here */
+ xfs_extlen_t ar_reserved;
+ /* number of blocks originally asked for */
+ xfs_extlen_t ar_asked;
+};
+
+/*
+ * Per-ag incore structure, copies of information in agf and agi, to improve the
+ * performance of allocation group selection.
+ */
+struct xfs_perag {
+ struct xfs_mount *pag_mount; /* owner filesystem */
+ xfs_agnumber_t pag_agno; /* AG this structure belongs to */
+ atomic_t pag_ref; /* perag reference count */
+ char pagf_init; /* this agf's entry is initialized */
+ char pagi_init; /* this agi's entry is initialized */
+ char pagf_metadata; /* the agf is preferred to be metadata */
+ char pagi_inodeok; /* The agi is ok for inodes */
+ uint8_t pagf_levels[XFS_BTNUM_AGF];
+ /* # of levels in bno & cnt btree */
+ bool pagf_agflreset; /* agfl requires reset before use */
+ uint32_t pagf_flcount; /* count of blocks in freelist */
+ xfs_extlen_t pagf_freeblks; /* total free blocks */
+ xfs_extlen_t pagf_longest; /* longest free space */
+ uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
+ xfs_agino_t pagi_freecount; /* number of free inodes */
+ xfs_agino_t pagi_count; /* number of allocated inodes */
+
+ /*
+ * Inode allocation search lookup optimisation.
+ * If the pagino matches, the search for new inodes
+ * doesn't need to search the near ones again straight away
+ */
+ xfs_agino_t pagl_pagino;
+ xfs_agino_t pagl_leftrec;
+ xfs_agino_t pagl_rightrec;
+
+ int pagb_count; /* pagb slots in use */
+ uint8_t pagf_refcount_level; /* recount btree height */
+
+ /* Blocks reserved for all kinds of metadata. */
+ struct xfs_ag_resv pag_meta_resv;
+ /* Blocks reserved for the reverse mapping btree. */
+ struct xfs_ag_resv pag_rmapbt_resv;
+
+ /* -- kernel only structures below this line -- */
+
+ /*
+ * Bitsets of per-ag metadata that have been checked and/or are sick.
+ * Callers should hold pag_state_lock before accessing this field.
+ */
+ uint16_t pag_checked;
+ uint16_t pag_sick;
+ spinlock_t pag_state_lock;
+
+ spinlock_t pagb_lock; /* lock for pagb_tree */
+ struct rb_root pagb_tree; /* ordered tree of busy extents */
+ unsigned int pagb_gen; /* generation count for pagb_tree */
+ wait_queue_head_t pagb_wait; /* woken when pagb_gen changes */
+
+ atomic_t pagf_fstrms; /* # of filestreams active in this AG */
+
+ spinlock_t pag_ici_lock; /* incore inode cache lock */
+ struct radix_tree_root pag_ici_root; /* incore inode cache root */
+ int pag_ici_reclaimable; /* reclaimable inodes */
+ unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */
+
+ /* buffer cache index */
+ spinlock_t pag_buf_lock; /* lock for pag_buf_hash */
+ struct rhashtable pag_buf_hash;
+
+ /* for rcu-safe freeing */
+ struct rcu_head rcu_head;
+
+ /* background prealloc block trimming */
+ struct delayed_work pag_blockgc_work;
+
+ /*
+ * Unlinked inode information. This incore information reflects
+ * data stored in the AGI, so callers must hold the AGI buffer lock
+ * or have some other means to control concurrency.
+ */
+ struct rhashtable pagi_unlinked_hash;
+};
+
+int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount,
+ xfs_agnumber_t *maxagi);
+int xfs_initialize_perag_data(struct xfs_mount *mp, xfs_agnumber_t agno);
+void xfs_free_perag(struct xfs_mount *mp);
+
+struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
+struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno,
+ unsigned int tag);
+void xfs_perag_put(struct xfs_perag *pag);
+
+/*
+ * Perag iteration APIs
+ *
+ * XXX: for_each_perag_range() usage really needs an iterator to clean up when
+ * we terminate at end_agno because we may have taken a reference to the perag
+ * beyond end_agno. Right now callers have to be careful to catch and clean that
+ * up themselves. This is not necessary for the callers of for_each_perag() and
+ * for_each_perag_from() because they terminate at sb_agcount where there are
+ * no perag structures in tree beyond end_agno.
+ */
+#define for_each_perag_range(mp, next_agno, end_agno, pag) \
+ for ((pag) = xfs_perag_get((mp), (next_agno)); \
+ (pag) != NULL && (next_agno) <= (end_agno); \
+ (next_agno) = (pag)->pag_agno + 1, \
+ xfs_perag_put(pag), \
+ (pag) = xfs_perag_get((mp), (next_agno)))
+
+#define for_each_perag_from(mp, next_agno, pag) \
+ for_each_perag_range((mp), (next_agno), (mp)->m_sb.sb_agcount, (pag))
+
+
+#define for_each_perag(mp, agno, pag) \
+ (agno) = 0; \
+ for_each_perag_from((mp), (agno), (pag))
+
+#define for_each_perag_tag(mp, agno, pag, tag) \
+ for ((agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \
+ (pag) != NULL; \
+ (agno) = (pag)->pag_agno + 1, \
+ xfs_perag_put(pag), \
+ (pag) = xfs_perag_get_tag((mp), (agno), (tag)))
struct aghdr_init_data {
/* per ag data */
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index e0d5cdc57cc2..2aa2b3484c28 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -19,7 +19,7 @@
#include "xfs_btree.h"
#include "xfs_refcount_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_sb.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
/*
@@ -250,7 +250,6 @@ xfs_ag_resv_init(
struct xfs_trans *tp)
{
struct xfs_mount *mp = pag->pag_mount;
- xfs_agnumber_t agno = pag->pag_agno;
xfs_extlen_t ask;
xfs_extlen_t used;
int error = 0, error2;
@@ -260,11 +259,11 @@ xfs_ag_resv_init(
if (pag->pag_meta_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask, &used);
+ error = xfs_refcountbt_calc_reserves(mp, tp, pag, &ask, &used);
if (error)
goto out;
- error = xfs_finobt_calc_reserves(mp, tp, agno, &ask, &used);
+ error = xfs_finobt_calc_reserves(mp, tp, pag, &ask, &used);
if (error)
goto out;
@@ -282,7 +281,7 @@ xfs_ag_resv_init(
mp->m_finobt_nores = true;
- error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask,
+ error = xfs_refcountbt_calc_reserves(mp, tp, pag, &ask,
&used);
if (error)
goto out;
@@ -300,7 +299,7 @@ xfs_ag_resv_init(
if (pag->pag_rmapbt_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_rmapbt_calc_reserves(mp, tp, agno, &ask, &used);
+ error = xfs_rmapbt_calc_reserves(mp, tp, pag, &ask, &used);
if (error)
goto out;
diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h
index 8a8eb4bc48bb..b74b210008ea 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.h
+++ b/fs/xfs/libxfs/xfs_ag_resv.h
@@ -18,6 +18,21 @@ void xfs_ag_resv_alloc_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type,
void xfs_ag_resv_free_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type,
struct xfs_trans *tp, xfs_extlen_t len);
+static inline struct xfs_ag_resv *
+xfs_perag_resv(
+ struct xfs_perag *pag,
+ enum xfs_ag_resv_type type)
+{
+ switch (type) {
+ case XFS_AG_RESV_METADATA:
+ return &pag->pag_meta_resv;
+ case XFS_AG_RESV_RMAPBT:
+ return &pag->pag_rmapbt_resv;
+ default:
+ return NULL;
+ }
+}
+
/*
* RMAPBT reservation accounting wrappers. Since rmapbt blocks are sourced from
* the AGFL, they are allocated one at a time and the reservation updates don't
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index af3d5f9271f6..6929157d8d6e 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -10,7 +10,6 @@
#include "xfs_shared.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_btree.h"
@@ -24,6 +23,7 @@
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_log.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_bmap.h"
@@ -230,7 +230,7 @@ xfs_alloc_get_rec(
int *stat) /* output: success/failure */
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_ag.agno;
+ xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
union xfs_btree_rec *rec;
int error;
@@ -776,7 +776,7 @@ xfs_alloc_cur_setup(
*/
if (!acur->cnt)
acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
- args->agbp, args->agno, XFS_BTNUM_CNT);
+ args->agbp, args->pag, XFS_BTNUM_CNT);
error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
if (error)
return error;
@@ -786,10 +786,10 @@ xfs_alloc_cur_setup(
*/
if (!acur->bnolt)
acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
- args->agbp, args->agno, XFS_BTNUM_BNO);
+ args->agbp, args->pag, XFS_BTNUM_BNO);
if (!acur->bnogt)
acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
- args->agbp, args->agno, XFS_BTNUM_BNO);
+ args->agbp, args->pag, XFS_BTNUM_BNO);
return i == 1 ? 0 : -ENOSPC;
}
@@ -1063,7 +1063,7 @@ xfs_alloc_ag_vextent_small(
if (fbno == NULLAGBLOCK)
goto out;
- xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
+ xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
(args->datatype & XFS_ALLOC_NOBUSY));
if (args->datatype & XFS_ALLOC_USERDATA) {
@@ -1089,7 +1089,7 @@ xfs_alloc_ag_vextent_small(
* If we're feeding an AGFL block to something that doesn't live in the
* free space, we need to clear out the OWN_AG rmap.
*/
- error = xfs_rmap_free(args->tp, args->agbp, args->agno, fbno, 1,
+ error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
&XFS_RMAP_OINFO_AG);
if (error)
goto error;
@@ -1166,7 +1166,7 @@ xfs_alloc_ag_vextent(
/* if not file data, insert new block into the reverse map btree */
if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
- error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
+ error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
args->agbno, args->len, &args->oinfo);
if (error)
return error;
@@ -1178,7 +1178,7 @@ xfs_alloc_ag_vextent(
if (error)
return error;
- ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
+ ASSERT(!xfs_extent_busy_search(args->mp, args->pag,
args->agbno, args->len));
}
@@ -1217,7 +1217,7 @@ xfs_alloc_ag_vextent_exact(
* Allocate/initialize a cursor for the by-number freespace btree.
*/
bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_BNO);
+ args->pag, XFS_BTNUM_BNO);
/*
* Lookup bno and minlen in the btree (minlen is irrelevant, really).
@@ -1277,7 +1277,7 @@ xfs_alloc_ag_vextent_exact(
* Allocate/initialize a cursor for the by-size btree.
*/
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_CNT);
+ args->pag, XFS_BTNUM_CNT);
ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
args->len, XFSA_FIXUP_BNO_OK);
@@ -1674,9 +1674,8 @@ restart:
* Allocate and initialize a cursor for the by-size btree.
*/
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_CNT);
+ args->pag, XFS_BTNUM_CNT);
bno_cur = NULL;
- busy = false;
/*
* Look for an entry >= maxlen+alignment-1 blocks.
@@ -1837,7 +1836,7 @@ restart:
* Allocate and initialize a cursor for the by-block tree.
*/
bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_BNO);
+ args->pag, XFS_BTNUM_BNO);
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
rbno, rlen, XFSA_FIXUP_CNT_OK)))
goto error0;
@@ -1896,12 +1895,13 @@ xfs_free_ag_extent(
int haveright; /* have a right neighbor */
int i;
int error;
+ struct xfs_perag *pag = agbp->b_pag;
bno_cur = cnt_cur = NULL;
mp = tp->t_mountp;
if (!xfs_rmap_should_skip_owner_update(oinfo)) {
- error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
+ error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo);
if (error)
goto error0;
}
@@ -1909,7 +1909,7 @@ xfs_free_ag_extent(
/*
* Allocate and initialize a cursor for the by-block btree.
*/
- bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
+ bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_BNO);
/*
* Look for a neighboring block on the left (lower block numbers)
* that is contiguous with this space.
@@ -1979,7 +1979,7 @@ xfs_free_ag_extent(
/*
* Now allocate and initialize a cursor for the by-size tree.
*/
- cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
+ cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_CNT);
/*
* Have both left and right contiguous neighbors.
* Merge all three into a single free block.
@@ -2490,7 +2490,7 @@ xfs_exact_minlen_extent_available(
int error = 0;
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp,
- args->agno, XFS_BTNUM_CNT);
+ args->pag, XFS_BTNUM_CNT);
error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
if (error)
goto out;
@@ -2693,21 +2693,21 @@ out_no_agbp:
* Get a block from the freelist.
* Returns with the buffer for the block gotten.
*/
-int /* error */
+int
xfs_alloc_get_freelist(
- xfs_trans_t *tp, /* transaction pointer */
- struct xfs_buf *agbp, /* buffer containing the agf structure */
- xfs_agblock_t *bnop, /* block address retrieved from freelist */
- int btreeblk) /* destination is a AGF btree */
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agblock_t *bnop,
+ int btreeblk)
{
- struct xfs_agf *agf = agbp->b_addr;
- struct xfs_buf *agflbp;/* buffer for a.g. freelist structure */
- xfs_agblock_t bno; /* block number returned */
- __be32 *agfl_bno;
- int error;
- int logflags;
- xfs_mount_t *mp = tp->t_mountp;
- xfs_perag_t *pag; /* per allocation group data */
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xfs_buf *agflbp;
+ xfs_agblock_t bno;
+ __be32 *agfl_bno;
+ int error;
+ int logflags;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
/*
* Freelist is empty, give up.
@@ -2817,20 +2817,20 @@ xfs_alloc_pagf_init(
/*
* Put the block on the freelist for the allocation group.
*/
-int /* error */
+int
xfs_alloc_put_freelist(
- xfs_trans_t *tp, /* transaction pointer */
- struct xfs_buf *agbp, /* buffer for a.g. freelist header */
- struct xfs_buf *agflbp,/* buffer for a.g. free block array */
- xfs_agblock_t bno, /* block being freed */
- int btreeblk) /* block came from a AGF btree */
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ struct xfs_buf *agflbp,
+ xfs_agblock_t bno,
+ int btreeblk)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_agf *agf = agbp->b_addr;
- __be32 *blockp;/* pointer to array entry */
+ struct xfs_perag *pag;
+ __be32 *blockp;
int error;
int logflags;
- xfs_perag_t *pag; /* per allocation group data */
__be32 *agfl_bno;
int startoff;
@@ -3292,7 +3292,7 @@ error0:
int
xfs_free_extent_fix_freelist(
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
struct xfs_buf **agbp)
{
struct xfs_alloc_arg args;
@@ -3301,7 +3301,8 @@ xfs_free_extent_fix_freelist(
memset(&args, 0, sizeof(struct xfs_alloc_arg));
args.tp = tp;
args.mp = tp->t_mountp;
- args.agno = agno;
+ args.agno = pag->pag_agno;
+ args.pag = pag;
/*
* validate that the block number is legal - the enables us to detect
@@ -3310,17 +3311,12 @@ xfs_free_extent_fix_freelist(
if (args.agno >= args.mp->m_sb.sb_agcount)
return -EFSCORRUPTED;
- args.pag = xfs_perag_get(args.mp, args.agno);
- ASSERT(args.pag);
-
error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
if (error)
- goto out;
+ return error;
*agbp = args.agbp;
-out:
- xfs_perag_put(args.pag);
- return error;
+ return 0;
}
/*
@@ -3344,6 +3340,7 @@ __xfs_free_extent(
struct xfs_agf *agf;
int error;
unsigned int busy_flags = 0;
+ struct xfs_perag *pag;
ASSERT(len != 0);
ASSERT(type != XFS_AG_RESV_AGFL);
@@ -3352,33 +3349,37 @@ __xfs_free_extent(
XFS_ERRTAG_FREE_EXTENT))
return -EIO;
- error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
+ pag = xfs_perag_get(mp, agno);
+ error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
if (error)
- return error;
+ goto err;
agf = agbp->b_addr;
if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
error = -EFSCORRUPTED;
- goto err;
+ goto err_release;
}
/* validate the extent size is legal now we have the agf locked */
if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
error = -EFSCORRUPTED;
- goto err;
+ goto err_release;
}
error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
if (error)
- goto err;
+ goto err_release;
if (skip_discard)
busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
- xfs_extent_busy_insert(tp, agno, agbno, len, busy_flags);
+ xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags);
+ xfs_perag_put(pag);
return 0;
-err:
+err_release:
xfs_trans_brelse(tp, agbp);
+err:
+ xfs_perag_put(pag);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index a4427c5775c2..e30900b6f8ba 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -214,7 +214,7 @@ int xfs_alloc_read_agfl(struct xfs_mount *mp, struct xfs_trans *tp,
int xfs_free_agfl_block(struct xfs_trans *, xfs_agnumber_t, xfs_agblock_t,
struct xfs_buf *, struct xfs_owner_info *);
int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, int flags);
-int xfs_free_extent_fix_freelist(struct xfs_trans *tp, xfs_agnumber_t agno,
+int xfs_free_extent_fix_freelist(struct xfs_trans *tp, struct xfs_perag *pag,
struct xfs_buf **agbp);
xfs_extlen_t xfs_prealloc_blocks(struct xfs_mount *mp);
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index a43e4c50e69b..6b363f78cfa2 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -9,7 +9,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "xfs_btree_staging.h"
@@ -19,6 +18,7 @@
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_trans.h"
+#include "xfs_ag.h"
STATIC struct xfs_btree_cur *
@@ -26,8 +26,7 @@ xfs_allocbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_ag.agbp, cur->bc_ag.agno,
- cur->bc_btnum);
+ cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum);
}
STATIC void
@@ -39,13 +38,12 @@ xfs_allocbt_set_root(
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
int btnum = cur->bc_btnum;
- struct xfs_perag *pag = agbp->b_pag;
ASSERT(ptr->s != 0);
agf->agf_roots[btnum] = ptr->s;
be32_add_cpu(&agf->agf_levels[btnum], inc);
- pag->pagf_levels[btnum] += inc;
+ cur->bc_ag.pag->pagf_levels[btnum] += inc;
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
}
@@ -72,7 +70,7 @@ xfs_allocbt_alloc_block(
}
atomic64_inc(&cur->bc_mp->m_allocbt_blks);
- xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1, false);
+ xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agbp->b_pag, bno, 1, false);
new->s = cpu_to_be32(bno);
@@ -86,7 +84,6 @@ xfs_allocbt_free_block(
struct xfs_buf *bp)
{
struct xfs_buf *agbp = cur->bc_ag.agbp;
- struct xfs_agf *agf = agbp->b_addr;
xfs_agblock_t bno;
int error;
@@ -96,7 +93,7 @@ xfs_allocbt_free_block(
return error;
atomic64_dec(&cur->bc_mp->m_allocbt_blks);
- xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
+ xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
return 0;
}
@@ -225,7 +222,7 @@ xfs_allocbt_init_ptr_from_cur(
{
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_roots[cur->bc_btnum];
}
@@ -473,7 +470,7 @@ STATIC struct xfs_btree_cur *
xfs_allocbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
@@ -486,6 +483,7 @@ xfs_allocbt_init_common(
cur->bc_mp = mp;
cur->bc_btnum = btnum;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
+ cur->bc_ag.abt.active = false;
if (btnum == XFS_BTNUM_CNT) {
cur->bc_ops = &xfs_cntbt_ops;
@@ -496,8 +494,9 @@ xfs_allocbt_init_common(
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
}
- cur->bc_ag.agno = agno;
- cur->bc_ag.abt.active = false;
+ /* take a reference for the cursor */
+ atomic_inc(&pag->pag_ref);
+ cur->bc_ag.pag = pag;
if (xfs_sb_version_hascrc(&mp->m_sb))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
@@ -513,13 +512,13 @@ xfs_allocbt_init_cursor(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
struct xfs_buf *agbp, /* buffer for agf structure */
- xfs_agnumber_t agno, /* allocation group number */
+ struct xfs_perag *pag,
xfs_btnum_t btnum) /* btree identifier */
{
struct xfs_agf *agf = agbp->b_addr;
struct xfs_btree_cur *cur;
- cur = xfs_allocbt_init_common(mp, tp, agno, btnum);
+ cur = xfs_allocbt_init_common(mp, tp, pag, btnum);
if (btnum == XFS_BTNUM_CNT)
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
else
@@ -535,12 +534,12 @@ struct xfs_btree_cur *
xfs_allocbt_stage_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
- cur = xfs_allocbt_init_common(mp, NULL, agno, btnum);
+ cur = xfs_allocbt_init_common(mp, NULL, pag, btnum);
xfs_btree_stage_afakeroot(cur, afake);
return cur;
}
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.h b/fs/xfs/libxfs/xfs_alloc_btree.h
index a5b998e950fe..9eb4c667a6b8 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.h
+++ b/fs/xfs/libxfs/xfs_alloc_btree.h
@@ -13,6 +13,7 @@
struct xfs_buf;
struct xfs_btree_cur;
struct xfs_mount;
+struct xfs_perag;
struct xbtree_afakeroot;
/*
@@ -46,11 +47,11 @@ struct xbtree_afakeroot;
(maxrecs) * sizeof(xfs_alloc_key_t) + \
((index) - 1) * sizeof(xfs_alloc_ptr_t)))
-extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
- struct xfs_trans *, struct xfs_buf *,
- xfs_agnumber_t, xfs_btnum_t);
+extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *mp,
+ struct xfs_trans *tp, struct xfs_buf *bp,
+ struct xfs_perag *pag, xfs_btnum_t btnum);
struct xfs_btree_cur *xfs_allocbt_stage_cursor(struct xfs_mount *mp,
- struct xbtree_afakeroot *afake, xfs_agnumber_t agno,
+ struct xbtree_afakeroot *afake, struct xfs_perag *pag,
xfs_btnum_t btnum);
extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
extern xfs_extlen_t xfs_allocbt_calc_size(struct xfs_mount *mp,
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 96146f425e50..191d51725988 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -44,20 +44,27 @@ STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args);
* Internal routines when attribute list is one block.
*/
STATIC int xfs_attr_leaf_get(xfs_da_args_t *args);
-STATIC int xfs_attr_leaf_addname(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_hasname(struct xfs_da_args *args, struct xfs_buf **bp);
+STATIC int xfs_attr_leaf_try_add(struct xfs_da_args *args, struct xfs_buf *bp);
/*
* Internal routines when attribute list is more than one block.
*/
STATIC int xfs_attr_node_get(xfs_da_args_t *args);
-STATIC int xfs_attr_node_addname(xfs_da_args_t *args);
-STATIC int xfs_attr_node_removename(xfs_da_args_t *args);
+STATIC void xfs_attr_restore_rmt_blk(struct xfs_da_args *args);
+STATIC int xfs_attr_node_addname(struct xfs_delattr_context *dac);
+STATIC int xfs_attr_node_addname_find_attr(struct xfs_delattr_context *dac);
+STATIC int xfs_attr_node_addname_clear_incomplete(
+ struct xfs_delattr_context *dac);
STATIC int xfs_attr_node_hasname(xfs_da_args_t *args,
struct xfs_da_state **state);
STATIC int xfs_attr_fillstate(xfs_da_state_t *state);
STATIC int xfs_attr_refillstate(xfs_da_state_t *state);
+STATIC int xfs_attr_set_iter(struct xfs_delattr_context *dac,
+ struct xfs_buf **leaf_bp);
+STATIC int xfs_attr_node_removename(struct xfs_da_args *args,
+ struct xfs_da_state *state);
int
xfs_inode_hasattr(
@@ -237,27 +244,77 @@ xfs_attr_is_shortform(
}
/*
- * Attempts to set an attr in shortform, or converts short form to leaf form if
- * there is not enough room. If the attr is set, the transaction is committed
- * and set to NULL.
+ * Checks to see if a delayed attribute transaction should be rolled. If so,
+ * transaction is finished or rolled as needed.
*/
STATIC int
-xfs_attr_set_shortform(
- struct xfs_da_args *args,
- struct xfs_buf **leaf_bp)
+xfs_attr_trans_roll(
+ struct xfs_delattr_context *dac)
{
- struct xfs_inode *dp = args->dp;
- int error, error2 = 0;
+ struct xfs_da_args *args = dac->da_args;
+ int error;
+
+ if (dac->flags & XFS_DAC_DEFER_FINISH) {
+ /*
+ * The caller wants us to finish all the deferred ops so that we
+ * avoid pinning the log tail with a large number of deferred
+ * ops.
+ */
+ dac->flags &= ~XFS_DAC_DEFER_FINISH;
+ error = xfs_defer_finish(&args->trans);
+ } else
+ error = xfs_trans_roll_inode(&args->trans, args->dp);
+
+ return error;
+}
+
+/*
+ * Set the attribute specified in @args.
+ */
+int
+xfs_attr_set_args(
+ struct xfs_da_args *args)
+{
+ struct xfs_buf *leaf_bp = NULL;
+ int error = 0;
+ struct xfs_delattr_context dac = {
+ .da_args = args,
+ };
+
+ do {
+ error = xfs_attr_set_iter(&dac, &leaf_bp);
+ if (error != -EAGAIN)
+ break;
+
+ error = xfs_attr_trans_roll(&dac);
+ if (error) {
+ if (leaf_bp)
+ xfs_trans_brelse(args->trans, leaf_bp);
+ return error;
+ }
+ } while (true);
+
+ return error;
+}
+
+STATIC int
+xfs_attr_sf_addname(
+ struct xfs_delattr_context *dac,
+ struct xfs_buf **leaf_bp)
+{
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_inode *dp = args->dp;
+ int error = 0;
/*
* Try to add the attr to the attribute list in the inode.
*/
error = xfs_attr_try_sf_addname(dp, args);
- if (error != -ENOSPC) {
- error2 = xfs_trans_commit(args->trans);
- args->trans = NULL;
- return error ? error : error2;
- }
+
+ /* Should only be 0, -EEXIST or -ENOSPC */
+ if (error != -ENOSPC)
+ return error;
+
/*
* It won't fit in the shortform, transform to a leaf block. GROT:
* another possible req'mt for a double-split btree op.
@@ -269,85 +326,300 @@ xfs_attr_set_shortform(
/*
* Prevent the leaf buffer from being unlocked so that a concurrent AIL
* push cannot grab the half-baked leaf buffer and run into problems
- * with the write verifier. Once we're done rolling the transaction we
- * can release the hold and add the attr to the leaf.
+ * with the write verifier.
*/
xfs_trans_bhold(args->trans, *leaf_bp);
- error = xfs_defer_finish(&args->trans);
- xfs_trans_bhold_release(args->trans, *leaf_bp);
- if (error) {
- xfs_trans_brelse(args->trans, *leaf_bp);
- return error;
- }
- return 0;
+ /*
+ * We're still in XFS_DAS_UNINIT state here. We've converted
+ * the attr fork to leaf format and will restart with the leaf
+ * add.
+ */
+ dac->flags |= XFS_DAC_DEFER_FINISH;
+ return -EAGAIN;
}
/*
* Set the attribute specified in @args.
+ * This routine is meant to function as a delayed operation, and may return
+ * -EAGAIN when the transaction needs to be rolled. Calling functions will need
+ * to handle this, and recall the function until a successful error code is
+ * returned.
*/
int
-xfs_attr_set_args(
- struct xfs_da_args *args)
+xfs_attr_set_iter(
+ struct xfs_delattr_context *dac,
+ struct xfs_buf **leaf_bp)
{
- struct xfs_inode *dp = args->dp;
- struct xfs_buf *leaf_bp = NULL;
- int error = 0;
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_buf *bp = NULL;
+ int forkoff, error = 0;
+
+ /* State machine switch */
+ switch (dac->dela_state) {
+ case XFS_DAS_UNINIT:
+ /*
+ * If the fork is shortform, attempt to add the attr. If there
+ * is no space, this converts to leaf format and returns
+ * -EAGAIN with the leaf buffer held across the roll. The caller
+ * will deal with a transaction roll error, but otherwise
+ * release the hold once we return with a clean transaction.
+ */
+ if (xfs_attr_is_shortform(dp))
+ return xfs_attr_sf_addname(dac, leaf_bp);
+ if (*leaf_bp != NULL) {
+ xfs_trans_bhold_release(args->trans, *leaf_bp);
+ *leaf_bp = NULL;
+ }
- /*
- * If the attribute list is already in leaf format, jump straight to
- * leaf handling. Otherwise, try to add the attribute to the shortform
- * list; if there's no room then convert the list to leaf format and try
- * again.
- */
- if (xfs_attr_is_shortform(dp)) {
+ if (xfs_attr_is_leaf(dp)) {
+ error = xfs_attr_leaf_try_add(args, *leaf_bp);
+ if (error == -ENOSPC) {
+ error = xfs_attr3_leaf_to_node(args);
+ if (error)
+ return error;
+
+ /*
+ * Finish any deferred work items and roll the
+ * transaction once more. The goal here is to
+ * call node_addname with the inode and
+ * transaction in the same state (inode locked
+ * and joined, transaction clean) no matter how
+ * we got to this step.
+ *
+ * At this point, we are still in
+ * XFS_DAS_UNINIT, but when we come back, we'll
+ * be a node, so we'll fall down into the node
+ * handling code below
+ */
+ dac->flags |= XFS_DAC_DEFER_FINISH;
+ return -EAGAIN;
+ } else if (error) {
+ return error;
+ }
+
+ dac->dela_state = XFS_DAS_FOUND_LBLK;
+ } else {
+ error = xfs_attr_node_addname_find_attr(dac);
+ if (error)
+ return error;
+
+ error = xfs_attr_node_addname(dac);
+ if (error)
+ return error;
+
+ dac->dela_state = XFS_DAS_FOUND_NBLK;
+ }
+ return -EAGAIN;
+ case XFS_DAS_FOUND_LBLK:
+ /*
+ * If there was an out-of-line value, allocate the blocks we
+ * identified for its storage and copy the value. This is done
+ * after we create the attribute so that we don't overflow the
+ * maximum size of a transaction and/or hit a deadlock.
+ */
+
+ /* Open coded xfs_attr_rmtval_set without trans handling */
+ if ((dac->flags & XFS_DAC_LEAF_ADDNAME_INIT) == 0) {
+ dac->flags |= XFS_DAC_LEAF_ADDNAME_INIT;
+ if (args->rmtblkno > 0) {
+ error = xfs_attr_rmtval_find_space(dac);
+ if (error)
+ return error;
+ }
+ }
/*
- * If the attr was successfully set in shortform, the
- * transaction is committed and set to NULL. Otherwise, is it
- * converted from shortform to leaf, and the transaction is
- * retained.
+ * Repeat allocating remote blocks for the attr value until
+ * blkcnt drops to zero.
*/
- error = xfs_attr_set_shortform(args, &leaf_bp);
- if (error || !args->trans)
+ if (dac->blkcnt > 0) {
+ error = xfs_attr_rmtval_set_blk(dac);
+ if (error)
+ return error;
+ return -EAGAIN;
+ }
+
+ error = xfs_attr_rmtval_set_value(args);
+ if (error)
return error;
- }
- if (xfs_attr_is_leaf(dp)) {
- error = xfs_attr_leaf_addname(args);
- if (error != -ENOSPC)
+ /*
+ * If this is not a rename, clear the incomplete flag and we're
+ * done.
+ */
+ if (!(args->op_flags & XFS_DA_OP_RENAME)) {
+ if (args->rmtblkno > 0)
+ error = xfs_attr3_leaf_clearflag(args);
return error;
+ }
/*
- * Promote the attribute list to the Btree format.
+ * If this is an atomic rename operation, we must "flip" the
+ * incomplete flags on the "new" and "old" attribute/value pairs
+ * so that one disappears and one appears atomically. Then we
+ * must remove the "old" attribute/value pair.
+ *
+ * In a separate transaction, set the incomplete flag on the
+ * "old" attr and clear the incomplete flag on the "new" attr.
*/
- error = xfs_attr3_leaf_to_node(args);
+ error = xfs_attr3_leaf_flipflags(args);
if (error)
return error;
+ /*
+ * Commit the flag value change and start the next trans in
+ * series.
+ */
+ dac->dela_state = XFS_DAS_FLIP_LFLAG;
+ return -EAGAIN;
+ case XFS_DAS_FLIP_LFLAG:
+ /*
+ * Dismantle the "old" attribute/value pair by removing a
+ * "remote" value (if it exists).
+ */
+ xfs_attr_restore_rmt_blk(args);
+ error = xfs_attr_rmtval_invalidate(args);
+ if (error)
+ return error;
+
+ fallthrough;
+ case XFS_DAS_RM_LBLK:
+ /* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */
+ dac->dela_state = XFS_DAS_RM_LBLK;
+ if (args->rmtblkno) {
+ error = __xfs_attr_rmtval_remove(dac);
+ if (error)
+ return error;
+
+ dac->dela_state = XFS_DAS_RD_LEAF;
+ return -EAGAIN;
+ }
+ fallthrough;
+ case XFS_DAS_RD_LEAF:
/*
- * Finish any deferred work items and roll the transaction once
- * more. The goal here is to call node_addname with the inode
- * and transaction in the same state (inode locked and joined,
- * transaction clean) no matter how we got to this step.
+ * This is the last step for leaf format. Read the block with
+ * the old attr, remove the old attr, check for shortform
+ * conversion and return.
*/
- error = xfs_defer_finish(&args->trans);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno,
+ &bp);
if (error)
return error;
+ xfs_attr3_leaf_remove(bp, args);
+
+ forkoff = xfs_attr_shortform_allfit(bp, dp);
+ if (forkoff)
+ error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
+ /* bp is gone due to xfs_da_shrink_inode */
+
+ return error;
+
+ case XFS_DAS_FOUND_NBLK:
+ /*
+ * Find space for remote blocks and fall into the allocation
+ * state.
+ */
+ if (args->rmtblkno > 0) {
+ error = xfs_attr_rmtval_find_space(dac);
+ if (error)
+ return error;
+ }
+
+ fallthrough;
+ case XFS_DAS_ALLOC_NODE:
+ /*
+ * If there was an out-of-line value, allocate the blocks we
+ * identified for its storage and copy the value. This is done
+ * after we create the attribute so that we don't overflow the
+ * maximum size of a transaction and/or hit a deadlock.
+ */
+ dac->dela_state = XFS_DAS_ALLOC_NODE;
+ if (args->rmtblkno > 0) {
+ if (dac->blkcnt > 0) {
+ error = xfs_attr_rmtval_set_blk(dac);
+ if (error)
+ return error;
+ return -EAGAIN;
+ }
+
+ error = xfs_attr_rmtval_set_value(args);
+ if (error)
+ return error;
+ }
+
/*
- * Commit the current trans (including the inode) and
- * start a new one.
+ * If this was not a rename, clear the incomplete flag and we're
+ * done.
*/
- error = xfs_trans_roll_inode(&args->trans, dp);
+ if (!(args->op_flags & XFS_DA_OP_RENAME)) {
+ if (args->rmtblkno > 0)
+ error = xfs_attr3_leaf_clearflag(args);
+ goto out;
+ }
+
+ /*
+ * If this is an atomic rename operation, we must "flip" the
+ * incomplete flags on the "new" and "old" attribute/value pairs
+ * so that one disappears and one appears atomically. Then we
+ * must remove the "old" attribute/value pair.
+ *
+ * In a separate transaction, set the incomplete flag on the
+ * "old" attr and clear the incomplete flag on the "new" attr.
+ */
+ error = xfs_attr3_leaf_flipflags(args);
+ if (error)
+ goto out;
+ /*
+ * Commit the flag value change and start the next trans in
+ * series
+ */
+ dac->dela_state = XFS_DAS_FLIP_NFLAG;
+ return -EAGAIN;
+
+ case XFS_DAS_FLIP_NFLAG:
+ /*
+ * Dismantle the "old" attribute/value pair by removing a
+ * "remote" value (if it exists).
+ */
+ xfs_attr_restore_rmt_blk(args);
+
+ error = xfs_attr_rmtval_invalidate(args);
if (error)
return error;
- }
- error = xfs_attr_node_addname(args);
+ fallthrough;
+ case XFS_DAS_RM_NBLK:
+ /* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */
+ dac->dela_state = XFS_DAS_RM_NBLK;
+ if (args->rmtblkno) {
+ error = __xfs_attr_rmtval_remove(dac);
+ if (error)
+ return error;
+
+ dac->dela_state = XFS_DAS_CLR_FLAG;
+ return -EAGAIN;
+ }
+
+ fallthrough;
+ case XFS_DAS_CLR_FLAG:
+ /*
+ * The last state for node format. Look up the old attr and
+ * remove it.
+ */
+ error = xfs_attr_node_addname_clear_incomplete(dac);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+out:
return error;
}
+
/*
* Return EEXIST if attr is found, or ENOATTR if not
*/
@@ -382,16 +654,25 @@ xfs_has_attr(
*/
int
xfs_attr_remove_args(
- struct xfs_da_args *args)
+ struct xfs_da_args *args)
{
- if (!xfs_inode_hasattr(args->dp))
- return -ENOATTR;
+ int error;
+ struct xfs_delattr_context dac = {
+ .da_args = args,
+ };
- if (args->dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
- return xfs_attr_shortform_remove(args);
- if (xfs_attr_is_leaf(args->dp))
- return xfs_attr_leaf_removename(args);
- return xfs_attr_node_removename(args);
+ do {
+ error = xfs_attr_remove_iter(&dac);
+ if (error != -EAGAIN)
+ break;
+
+ error = xfs_attr_trans_roll(&dac);
+ if (error)
+ return error;
+
+ } while (true);
+
+ return error;
}
/*
@@ -559,7 +840,7 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
if (retval == -EEXIST) {
if (args->attr_flags & XATTR_CREATE)
return retval;
- retval = xfs_attr_shortform_remove(args);
+ retval = xfs_attr_sf_removename(args);
if (retval)
return retval;
/*
@@ -670,115 +951,6 @@ out_brelse:
return retval;
}
-
-/*
- * Add a name to the leaf attribute list structure
- *
- * This leaf block cannot have a "remote" value, we only call this routine
- * if bmap_one_block() says there is only one block (ie: no remote blks).
- */
-STATIC int
-xfs_attr_leaf_addname(
- struct xfs_da_args *args)
-{
- int error, forkoff;
- struct xfs_buf *bp = NULL;
- struct xfs_inode *dp = args->dp;
-
- trace_xfs_attr_leaf_addname(args);
-
- error = xfs_attr_leaf_try_add(args, bp);
- if (error)
- return error;
-
- /*
- * Commit the transaction that added the attr name so that
- * later routines can manage their own transactions.
- */
- error = xfs_trans_roll_inode(&args->trans, dp);
- if (error)
- return error;
-
- /*
- * If there was an out-of-line value, allocate the blocks we
- * identified for its storage and copy the value. This is done
- * after we create the attribute so that we don't overflow the
- * maximum size of a transaction and/or hit a deadlock.
- */
- if (args->rmtblkno > 0) {
- error = xfs_attr_rmtval_set(args);
- if (error)
- return error;
- }
-
- if (!(args->op_flags & XFS_DA_OP_RENAME)) {
- /*
- * Added a "remote" value, just clear the incomplete flag.
- */
- if (args->rmtblkno > 0)
- error = xfs_attr3_leaf_clearflag(args);
-
- return error;
- }
-
- /*
- * If this is an atomic rename operation, we must "flip" the incomplete
- * flags on the "new" and "old" attribute/value pairs so that one
- * disappears and one appears atomically. Then we must remove the "old"
- * attribute/value pair.
- *
- * In a separate transaction, set the incomplete flag on the "old" attr
- * and clear the incomplete flag on the "new" attr.
- */
-
- error = xfs_attr3_leaf_flipflags(args);
- if (error)
- return error;
- /*
- * Commit the flag value change and start the next trans in series.
- */
- error = xfs_trans_roll_inode(&args->trans, args->dp);
- if (error)
- return error;
-
- /*
- * Dismantle the "old" attribute/value pair by removing a "remote" value
- * (if it exists).
- */
- xfs_attr_restore_rmt_blk(args);
-
- if (args->rmtblkno) {
- error = xfs_attr_rmtval_invalidate(args);
- if (error)
- return error;
-
- error = xfs_attr_rmtval_remove(args);
- if (error)
- return error;
- }
-
- /*
- * Read in the block containing the "old" attr, then remove the "old"
- * attr from that block (neat, huh!)
- */
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno,
- &bp);
- if (error)
- return error;
-
- xfs_attr3_leaf_remove(bp, args);
-
- /*
- * If the result is small enough, shrink it all into the inode.
- */
- forkoff = xfs_attr_shortform_allfit(bp, dp);
- if (forkoff)
- error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
- /* bp is gone due to xfs_da_shrink_inode */
-
- return error;
-}
-
/*
* Return EEXIST if attr is found, or ENOATTR if not
*/
@@ -909,48 +1081,26 @@ xfs_attr_node_hasname(
* External routines when attribute list size > geo->blksize
*========================================================================*/
-/*
- * Add a name to a Btree-format attribute list.
- *
- * This will involve walking down the Btree, and may involve splitting
- * leaf nodes and even splitting intermediate nodes up to and including
- * the root node (a special case of an intermediate node).
- *
- * "Remote" attribute values confuse the issue and atomic rename operations
- * add a whole extra layer of confusion on top of that.
- */
STATIC int
-xfs_attr_node_addname(
- struct xfs_da_args *args)
+xfs_attr_node_addname_find_attr(
+ struct xfs_delattr_context *dac)
{
- struct xfs_da_state *state;
- struct xfs_da_state_blk *blk;
- struct xfs_inode *dp;
- int retval, error;
-
- trace_xfs_attr_node_addname(args);
+ struct xfs_da_args *args = dac->da_args;
+ int retval;
/*
- * Fill in bucket of arguments/results/context to carry around.
- */
- dp = args->dp;
-restart:
- /*
* Search to see if name already exists, and get back a pointer
* to where it should go.
*/
- error = 0;
- retval = xfs_attr_node_hasname(args, &state);
+ retval = xfs_attr_node_hasname(args, &dac->da_state);
if (retval != -ENOATTR && retval != -EEXIST)
- goto out;
+ return retval;
- blk = &state->path.blk[ state->path.active-1 ];
- ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE))
- goto out;
+ goto error;
if (retval == -EEXIST) {
if (args->attr_flags & XATTR_CREATE)
- goto out;
+ goto error;
trace_xfs_attr_node_replace(args);
@@ -968,8 +1118,44 @@ restart:
args->rmtvaluelen = 0;
}
- retval = xfs_attr3_leaf_add(blk->bp, state->args);
- if (retval == -ENOSPC) {
+ return 0;
+error:
+ if (dac->da_state)
+ xfs_da_state_free(dac->da_state);
+ return retval;
+}
+
+/*
+ * Add a name to a Btree-format attribute list.
+ *
+ * This will involve walking down the Btree, and may involve splitting
+ * leaf nodes and even splitting intermediate nodes up to and including
+ * the root node (a special case of an intermediate node).
+ *
+ * "Remote" attribute values confuse the issue and atomic rename operations
+ * add a whole extra layer of confusion on top of that.
+ *
+ * This routine is meant to function as a delayed operation, and may return
+ * -EAGAIN when the transaction needs to be rolled. Calling functions will need
+ * to handle this, and recall the function until a successful error code is
+ *returned.
+ */
+STATIC int
+xfs_attr_node_addname(
+ struct xfs_delattr_context *dac)
+{
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_da_state *state = dac->da_state;
+ struct xfs_da_state_blk *blk;
+ int error;
+
+ trace_xfs_attr_node_addname(args);
+
+ blk = &state->path.blk[state->path.active-1];
+ ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+
+ error = xfs_attr3_leaf_add(blk->bp, state->args);
+ if (error == -ENOSPC) {
if (state->path.active == 1) {
/*
* Its really a single leaf node, but it had
@@ -981,19 +1167,16 @@ restart:
error = xfs_attr3_leaf_to_node(args);
if (error)
goto out;
- error = xfs_defer_finish(&args->trans);
- if (error)
- goto out;
/*
- * Commit the node conversion and start the next
- * trans in the chain.
+ * Now that we have converted the leaf to a node, we can
+ * roll the transaction, and try xfs_attr3_leaf_add
+ * again on re-entry. No need to set dela_state to do
+ * this. dela_state is still unset by this function at
+ * this point.
*/
- error = xfs_trans_roll_inode(&args->trans, dp);
- if (error)
- goto out;
-
- goto restart;
+ dac->flags |= XFS_DAC_DEFER_FINISH;
+ return -EAGAIN;
}
/*
@@ -1005,9 +1188,7 @@ restart:
error = xfs_da3_split(state);
if (error)
goto out;
- error = xfs_defer_finish(&args->trans);
- if (error)
- goto out;
+ dac->flags |= XFS_DAC_DEFER_FINISH;
} else {
/*
* Addition succeeded, update Btree hashvals.
@@ -1015,77 +1196,21 @@ restart:
xfs_da3_fixhashpath(state, &state->path);
}
- /*
- * Kill the state structure, we're done with it and need to
- * allow the buffers to come back later.
- */
- xfs_da_state_free(state);
- state = NULL;
-
- /*
- * Commit the leaf addition or btree split and start the next
- * trans in the chain.
- */
- error = xfs_trans_roll_inode(&args->trans, dp);
- if (error)
- goto out;
-
- /*
- * If there was an out-of-line value, allocate the blocks we
- * identified for its storage and copy the value. This is done
- * after we create the attribute so that we don't overflow the
- * maximum size of a transaction and/or hit a deadlock.
- */
- if (args->rmtblkno > 0) {
- error = xfs_attr_rmtval_set(args);
- if (error)
- return error;
- }
-
- if (!(args->op_flags & XFS_DA_OP_RENAME)) {
- /*
- * Added a "remote" value, just clear the incomplete flag.
- */
- if (args->rmtblkno > 0)
- error = xfs_attr3_leaf_clearflag(args);
- retval = error;
- goto out;
- }
-
- /*
- * If this is an atomic rename operation, we must "flip" the incomplete
- * flags on the "new" and "old" attribute/value pairs so that one
- * disappears and one appears atomically. Then we must remove the "old"
- * attribute/value pair.
- *
- * In a separate transaction, set the incomplete flag on the "old" attr
- * and clear the incomplete flag on the "new" attr.
- */
- error = xfs_attr3_leaf_flipflags(args);
- if (error)
- goto out;
- /*
- * Commit the flag value change and start the next trans in series
- */
- error = xfs_trans_roll_inode(&args->trans, args->dp);
- if (error)
- goto out;
-
- /*
- * Dismantle the "old" attribute/value pair by removing a "remote" value
- * (if it exists).
- */
- xfs_attr_restore_rmt_blk(args);
+out:
+ if (state)
+ xfs_da_state_free(state);
+ return error;
+}
- if (args->rmtblkno) {
- error = xfs_attr_rmtval_invalidate(args);
- if (error)
- return error;
- error = xfs_attr_rmtval_remove(args);
- if (error)
- return error;
- }
+STATIC int
+xfs_attr_node_addname_clear_incomplete(
+ struct xfs_delattr_context *dac)
+{
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_da_state *state = NULL;
+ int retval = 0;
+ int error = 0;
/*
* Re-find the "old" attribute entry after any split ops. The INCOMPLETE
@@ -1098,13 +1223,7 @@ restart:
if (error)
goto out;
- /*
- * Remove the name and update the hashvals in the tree.
- */
- blk = &state->path.blk[state->path.active-1];
- ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
- error = xfs_attr3_leaf_remove(blk->bp, args);
- xfs_da3_fixhashpath(state, &state->path);
+ error = xfs_attr_node_removename(args, state);
/*
* Check to see if the tree needs to be collapsed.
@@ -1190,14 +1309,16 @@ xfs_attr_leaf_mark_incomplete(
*/
STATIC
int xfs_attr_node_removename_setup(
- struct xfs_da_args *args,
- struct xfs_da_state **state)
+ struct xfs_delattr_context *dac)
{
- int error;
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_da_state **state = &dac->da_state;
+ int error;
error = xfs_attr_node_hasname(args, state);
if (error != -EEXIST)
return error;
+ error = 0;
ASSERT((*state)->path.blk[(*state)->path.active - 1].bp != NULL);
ASSERT((*state)->path.blk[(*state)->path.active - 1].magic ==
@@ -1206,97 +1327,164 @@ int xfs_attr_node_removename_setup(
if (args->rmtblkno > 0) {
error = xfs_attr_leaf_mark_incomplete(args, *state);
if (error)
- return error;
+ goto out;
- return xfs_attr_rmtval_invalidate(args);
+ error = xfs_attr_rmtval_invalidate(args);
}
+out:
+ if (error)
+ xfs_da_state_free(*state);
- return 0;
+ return error;
}
STATIC int
-xfs_attr_node_remove_rmt(
+xfs_attr_node_removename(
struct xfs_da_args *args,
struct xfs_da_state *state)
{
- int error = 0;
-
- error = xfs_attr_rmtval_remove(args);
- if (error)
- return error;
+ struct xfs_da_state_blk *blk;
+ int retval;
/*
- * Refill the state structure with buffers, the prior calls released our
- * buffers.
+ * Remove the name and update the hashvals in the tree.
*/
- return xfs_attr_refillstate(state);
+ blk = &state->path.blk[state->path.active-1];
+ ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+ retval = xfs_attr3_leaf_remove(blk->bp, args);
+ xfs_da3_fixhashpath(state, &state->path);
+
+ return retval;
}
/*
- * Remove a name from a B-tree attribute list.
+ * Remove the attribute specified in @args.
*
* This will involve walking down the Btree, and may involve joining
* leaf nodes and even joining intermediate nodes up to and including
* the root node (a special case of an intermediate node).
+ *
+ * This routine is meant to function as either an in-line or delayed operation,
+ * and may return -EAGAIN when the transaction needs to be rolled. Calling
+ * functions will need to handle this, and call the function until a
+ * successful error code is returned.
*/
-STATIC int
-xfs_attr_node_removename(
- struct xfs_da_args *args)
+int
+xfs_attr_remove_iter(
+ struct xfs_delattr_context *dac)
{
- struct xfs_da_state *state;
- struct xfs_da_state_blk *blk;
- int retval, error;
- struct xfs_inode *dp = args->dp;
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_da_state *state = dac->da_state;
+ int retval, error = 0;
+ struct xfs_inode *dp = args->dp;
trace_xfs_attr_node_removename(args);
- error = xfs_attr_node_removename_setup(args, &state);
- if (error)
- goto out;
+ switch (dac->dela_state) {
+ case XFS_DAS_UNINIT:
+ if (!xfs_inode_hasattr(dp))
+ return -ENOATTR;
- /*
- * If there is an out-of-line value, de-allocate the blocks.
- * This is done before we remove the attribute so that we don't
- * overflow the maximum size of a transaction and/or hit a deadlock.
- */
- if (args->rmtblkno > 0) {
- error = xfs_attr_node_remove_rmt(args, state);
- if (error)
- goto out;
- }
+ /*
+ * Shortform or leaf formats don't require transaction rolls and
+ * thus state transitions. Call the right helper and return.
+ */
+ if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
+ return xfs_attr_sf_removename(args);
- /*
- * Remove the name and update the hashvals in the tree.
- */
- blk = &state->path.blk[ state->path.active-1 ];
- ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
- retval = xfs_attr3_leaf_remove(blk->bp, args);
- xfs_da3_fixhashpath(state, &state->path);
+ if (xfs_attr_is_leaf(dp))
+ return xfs_attr_leaf_removename(args);
- /*
- * Check to see if the tree needs to be collapsed.
- */
- if (retval && (state->path.active > 1)) {
- error = xfs_da3_join(state);
- if (error)
- goto out;
- error = xfs_defer_finish(&args->trans);
- if (error)
- goto out;
/*
- * Commit the Btree join operation and start a new trans.
+ * Node format may require transaction rolls. Set up the
+ * state context and fall into the state machine.
*/
- error = xfs_trans_roll_inode(&args->trans, dp);
- if (error)
- goto out;
- }
+ if (!dac->da_state) {
+ error = xfs_attr_node_removename_setup(dac);
+ if (error)
+ return error;
+ state = dac->da_state;
+ }
- /*
- * If the result is small enough, push it all into the inode.
- */
- if (xfs_attr_is_leaf(dp))
- error = xfs_attr_node_shrink(args, state);
+ fallthrough;
+ case XFS_DAS_RMTBLK:
+ dac->dela_state = XFS_DAS_RMTBLK;
+ /*
+ * If there is an out-of-line value, de-allocate the blocks.
+ * This is done before we remove the attribute so that we don't
+ * overflow the maximum size of a transaction and/or hit a
+ * deadlock.
+ */
+ if (args->rmtblkno > 0) {
+ /*
+ * May return -EAGAIN. Roll and repeat until all remote
+ * blocks are removed.
+ */
+ error = __xfs_attr_rmtval_remove(dac);
+ if (error == -EAGAIN)
+ return error;
+ else if (error)
+ goto out;
+
+ /*
+ * Refill the state structure with buffers (the prior
+ * calls released our buffers) and close out this
+ * transaction before proceeding.
+ */
+ ASSERT(args->rmtblkno == 0);
+ error = xfs_attr_refillstate(state);
+ if (error)
+ goto out;
+ dac->dela_state = XFS_DAS_RM_NAME;
+ dac->flags |= XFS_DAC_DEFER_FINISH;
+ return -EAGAIN;
+ }
+
+ fallthrough;
+ case XFS_DAS_RM_NAME:
+ /*
+ * If we came here fresh from a transaction roll, reattach all
+ * the buffers to the current transaction.
+ */
+ if (dac->dela_state == XFS_DAS_RM_NAME) {
+ error = xfs_attr_refillstate(state);
+ if (error)
+ goto out;
+ }
+
+ retval = xfs_attr_node_removename(args, state);
+
+ /*
+ * Check to see if the tree needs to be collapsed. If so, roll
+ * the transacton and fall into the shrink state.
+ */
+ if (retval && (state->path.active > 1)) {
+ error = xfs_da3_join(state);
+ if (error)
+ goto out;
+
+ dac->flags |= XFS_DAC_DEFER_FINISH;
+ dac->dela_state = XFS_DAS_RM_SHRINK;
+ return -EAGAIN;
+ }
+
+ fallthrough;
+ case XFS_DAS_RM_SHRINK:
+ /*
+ * If the result is small enough, push it all into the inode.
+ * This is our final state so it's safe to return a dirty
+ * transaction.
+ */
+ if (xfs_attr_is_leaf(dp))
+ error = xfs_attr_node_shrink(args, state);
+ ASSERT(error != -EAGAIN);
+ break;
+ default:
+ ASSERT(0);
+ error = -EINVAL;
+ goto out;
+ }
out:
if (state)
xfs_da_state_free(state);
diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index 2b1f61987a9d..8de5d1d2733e 100644
--- a/fs/xfs/libxfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -74,6 +74,406 @@ struct xfs_attr_list_context {
};
+/*
+ * ========================================================================
+ * Structure used to pass context around among the delayed routines.
+ * ========================================================================
+ */
+
+/*
+ * Below is a state machine diagram for attr remove operations. The XFS_DAS_*
+ * states indicate places where the function would return -EAGAIN, and then
+ * immediately resume from after being called by the calling function. States
+ * marked as a "subroutine state" indicate that they belong to a subroutine, and
+ * so the calling function needs to pass them back to that subroutine to allow
+ * it to finish where it left off. But they otherwise do not have a role in the
+ * calling function other than just passing through.
+ *
+ * xfs_attr_remove_iter()
+ * │
+ * v
+ * have attr to remove? ──n──> done
+ * │
+ * y
+ * │
+ * v
+ * are we short form? ──y──> xfs_attr_shortform_remove ──> done
+ * │
+ * n
+ * │
+ * V
+ * are we leaf form? ──y──> xfs_attr_leaf_removename ──> done
+ * │
+ * n
+ * │
+ * V
+ * ┌── need to setup state?
+ * │ │
+ * n y
+ * │ │
+ * │ v
+ * │ find attr and get state
+ * │ attr has remote blks? ──n─┐
+ * │ │ v
+ * │ │ find and invalidate
+ * │ y the remote blocks.
+ * │ │ mark attr incomplete
+ * │ ├────────────────┘
+ * └──────────┤
+ * │
+ * v
+ * Have remote blks to remove? ───y─────┐
+ * │ ^ remove the blks
+ * │ │ │
+ * │ │ v
+ * │ XFS_DAS_RMTBLK <─n── done?
+ * │ re-enter with │
+ * │ one less blk to y
+ * │ remove │
+ * │ V
+ * │ refill the state
+ * n │
+ * │ v
+ * │ XFS_DAS_RM_NAME
+ * │ │
+ * ├─────────────────────────┘
+ * │
+ * v
+ * remove leaf and
+ * update hash with
+ * xfs_attr_node_remove_cleanup
+ * │
+ * v
+ * need to
+ * shrink tree? ─n─┐
+ * │ │
+ * y │
+ * │ │
+ * v │
+ * join leaf │
+ * │ │
+ * v │
+ * XFS_DAS_RM_SHRINK │
+ * │ │
+ * v │
+ * do the shrink │
+ * │ │
+ * v │
+ * free state <──┘
+ * │
+ * v
+ * done
+ *
+ *
+ * Below is a state machine diagram for attr set operations.
+ *
+ * It seems the challenge with understanding this system comes from trying to
+ * absorb the state machine all at once, when really one should only be looking
+ * at it with in the context of a single function. Once a state sensitive
+ * function is called, the idea is that it "takes ownership" of the
+ * state machine. It isn't concerned with the states that may have belonged to
+ * it's calling parent. Only the states relevant to itself or any other
+ * subroutines there in. Once a calling function hands off the state machine to
+ * a subroutine, it needs to respect the simple rule that it doesn't "own" the
+ * state machine anymore, and it's the responsibility of that calling function
+ * to propagate the -EAGAIN back up the call stack. Upon reentry, it is
+ * committed to re-calling that subroutine until it returns something other than
+ * -EAGAIN. Once that subroutine signals completion (by returning anything other
+ * than -EAGAIN), the calling function can resume using the state machine.
+ *
+ * xfs_attr_set_iter()
+ * │
+ * v
+ * ┌─y─ has an attr fork?
+ * │ |
+ * │ n
+ * │ |
+ * │ V
+ * │ add a fork
+ * │ │
+ * └──────────┤
+ * │
+ * V
+ * ┌─── is shortform?
+ * │ │
+ * │ y
+ * │ │
+ * │ V
+ * │ xfs_attr_set_fmt
+ * │ |
+ * │ V
+ * │ xfs_attr_try_sf_addname
+ * │ │
+ * │ V
+ * │ had enough ──y──> done
+ * │ space?
+ * n │
+ * │ n
+ * │ │
+ * │ V
+ * │ transform to leaf
+ * │ │
+ * │ V
+ * │ hold the leaf buffer
+ * │ │
+ * │ V
+ * │ return -EAGAIN
+ * │ Re-enter in
+ * │ leaf form
+ * │
+ * └─> release leaf buffer
+ * if needed
+ * │
+ * V
+ * ┌───n── fork has
+ * │ only 1 blk?
+ * │ │
+ * │ y
+ * │ │
+ * │ v
+ * │ xfs_attr_leaf_try_add()
+ * │ │
+ * │ v
+ * │ had enough ──────────────y─────────────┐
+ * │ space? │
+ * │ │ │
+ * │ n │
+ * │ │ │
+ * │ v │
+ * │ return -EAGAIN │
+ * │ re-enter in │
+ * │ node form │
+ * │ │ │
+ * └──────────┤ │
+ * │ │
+ * V │
+ * xfs_attr_node_addname_find_attr │
+ * determines if this │
+ * is create or rename │
+ * find space to store attr │
+ * │ │
+ * v │
+ * xfs_attr_node_addname │
+ * │ │
+ * v │
+ * fits in a node leaf? ────n─────┐ │
+ * │ ^ v │
+ * │ │ single leaf node? │
+ * │ │ │ │ │
+ * y │ y n │
+ * │ │ │ │ │
+ * v │ v v │
+ * update │ grow the leaf split if │
+ * hashvals └── return -EAGAIN needed │
+ * │ retry leaf add │ │
+ * │ on reentry │ │
+ * ├────────────────────────────┘ │
+ * │ │
+ * v │
+ * need to alloc │
+ * ┌─y── or flip flag? │
+ * │ │ │
+ * │ n │
+ * │ │ │
+ * │ v │
+ * │ done │
+ * │ │
+ * │ │
+ * │ XFS_DAS_FOUND_LBLK <────────────────┘
+ * │ │
+ * │ V
+ * │ xfs_attr_leaf_addname()
+ * │ │
+ * │ v
+ * │ ┌──first time through?
+ * │ │ │
+ * │ │ y
+ * │ │ │
+ * │ n v
+ * │ │ if we have rmt blks
+ * │ │ find space for them
+ * │ │ │
+ * │ └──────────┤
+ * │ │
+ * │ v
+ * │ still have
+ * │ ┌─n─ blks to alloc? <──┐
+ * │ │ │ │
+ * │ │ y │
+ * │ │ │ │
+ * │ │ v │
+ * │ │ alloc one blk │
+ * │ │ return -EAGAIN ──┘
+ * │ │ re-enter with one
+ * │ │ less blk to alloc
+ * │ │
+ * │ │
+ * │ └───> set the rmt
+ * │ value
+ * │ │
+ * │ v
+ * │ was this
+ * │ a rename? ──n─┐
+ * │ │ │
+ * │ y │
+ * │ │ │
+ * │ v │
+ * │ flip incomplete │
+ * │ flag │
+ * │ │ │
+ * │ v │
+ * │ XFS_DAS_FLIP_LFLAG │
+ * │ │ │
+ * │ v │
+ * │ need to remove │
+ * │ old bks? ──n──┤
+ * │ │ │
+ * │ y │
+ * │ │ │
+ * │ V │
+ * │ remove │
+ * │ ┌───> old blks │
+ * │ │ │ │
+ * │ XFS_DAS_RM_LBLK │ │
+ * │ ^ │ │
+ * │ │ v │
+ * │ └──y── more to │
+ * │ remove? │
+ * │ │ │
+ * │ n │
+ * │ │ │
+ * │ v │
+ * │ XFS_DAS_RD_LEAF │
+ * │ │ │
+ * │ v │
+ * │ remove leaf │
+ * │ │ │
+ * │ v │
+ * │ shrink to sf │
+ * │ if needed │
+ * │ │ │
+ * │ v │
+ * │ done <──────┘
+ * │
+ * └──────> XFS_DAS_FOUND_NBLK
+ * │
+ * v
+ * ┌─────n── need to
+ * │ alloc blks?
+ * │ │
+ * │ y
+ * │ │
+ * │ v
+ * │ find space
+ * │ │
+ * │ v
+ * │ ┌─>XFS_DAS_ALLOC_NODE
+ * │ │ │
+ * │ │ v
+ * │ │ alloc blk
+ * │ │ │
+ * │ │ v
+ * │ └──y── need to alloc
+ * │ more blocks?
+ * │ │
+ * │ n
+ * │ │
+ * │ v
+ * │ set the rmt value
+ * │ │
+ * │ v
+ * │ was this
+ * └────────> a rename? ──n─┐
+ * │ │
+ * y │
+ * │ │
+ * v │
+ * flip incomplete │
+ * flag │
+ * │ │
+ * v │
+ * XFS_DAS_FLIP_NFLAG │
+ * │ │
+ * v │
+ * need to │
+ * remove blks? ─n──┤
+ * │ │
+ * y │
+ * │ │
+ * v │
+ * remove │
+ * ┌────────> old blks │
+ * │ │ │
+ * XFS_DAS_RM_NBLK │ │
+ * ^ │ │
+ * │ v │
+ * └──────y── more to │
+ * remove │
+ * │ │
+ * n │
+ * │ │
+ * v │
+ * XFS_DAS_CLR_FLAG │
+ * │ │
+ * v │
+ * clear flags │
+ * │ │
+ * ├──────────┘
+ * │
+ * v
+ * done
+ */
+
+/*
+ * Enum values for xfs_delattr_context.da_state
+ *
+ * These values are used by delayed attribute operations to keep track of where
+ * they were before they returned -EAGAIN. A return code of -EAGAIN signals the
+ * calling function to roll the transaction, and then call the subroutine to
+ * finish the operation. The enum is then used by the subroutine to jump back
+ * to where it was and resume executing where it left off.
+ */
+enum xfs_delattr_state {
+ XFS_DAS_UNINIT = 0, /* No state has been set yet */
+ XFS_DAS_RMTBLK, /* Removing remote blks */
+ XFS_DAS_RM_NAME, /* Remove attr name */
+ XFS_DAS_RM_SHRINK, /* We are shrinking the tree */
+ XFS_DAS_FOUND_LBLK, /* We found leaf blk for attr */
+ XFS_DAS_FOUND_NBLK, /* We found node blk for attr */
+ XFS_DAS_FLIP_LFLAG, /* Flipped leaf INCOMPLETE attr flag */
+ XFS_DAS_RM_LBLK, /* A rename is removing leaf blocks */
+ XFS_DAS_RD_LEAF, /* Read in the new leaf */
+ XFS_DAS_ALLOC_NODE, /* We are allocating node blocks */
+ XFS_DAS_FLIP_NFLAG, /* Flipped node INCOMPLETE attr flag */
+ XFS_DAS_RM_NBLK, /* A rename is removing node blocks */
+ XFS_DAS_CLR_FLAG, /* Clear incomplete flag */
+};
+
+/*
+ * Defines for xfs_delattr_context.flags
+ */
+#define XFS_DAC_DEFER_FINISH 0x01 /* finish the transaction */
+#define XFS_DAC_LEAF_ADDNAME_INIT 0x02 /* xfs_attr_leaf_addname init*/
+
+/*
+ * Context used for keeping track of delayed attribute operations
+ */
+struct xfs_delattr_context {
+ struct xfs_da_args *da_args;
+
+ /* Used in xfs_attr_rmtval_set_blk to roll through allocating blocks */
+ struct xfs_bmbt_irec map;
+ xfs_dablk_t lblkno;
+ int blkcnt;
+
+ /* Used in xfs_attr_node_removename to roll through removing blocks */
+ struct xfs_da_state *da_state;
+
+ /* Used to keep track of current state of delayed operation */
+ unsigned int flags;
+ enum xfs_delattr_state dela_state;
+};
+
/*========================================================================
* Function prototypes for the kernel.
*========================================================================*/
@@ -92,6 +492,9 @@ int xfs_attr_set(struct xfs_da_args *args);
int xfs_attr_set_args(struct xfs_da_args *args);
int xfs_has_attr(struct xfs_da_args *args);
int xfs_attr_remove_args(struct xfs_da_args *args);
+int xfs_attr_remove_iter(struct xfs_delattr_context *dac);
bool xfs_attr_namecheck(const void *name, size_t length);
+void xfs_delattr_context_init(struct xfs_delattr_context *dac,
+ struct xfs_da_args *args);
#endif /* __XFS_ATTR_H__ */
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 556184b63061..b910bd209949 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -19,14 +19,15 @@
#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_attr_sf.h"
-#include "xfs_attr_remote.h"
#include "xfs_attr.h"
+#include "xfs_attr_remote.h"
#include "xfs_attr_leaf.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_buf_item.h"
#include "xfs_dir2.h"
#include "xfs_log.h"
+#include "xfs_ag.h"
/*
@@ -773,7 +774,7 @@ xfs_attr_fork_remove(
* Remove an attribute from the shortform attribute list structure.
*/
int
-xfs_attr_shortform_remove(
+xfs_attr_sf_removename(
struct xfs_da_args *args)
{
struct xfs_attr_shortform *sf;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index 9b1c59f40a26..efa757f1e912 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -51,7 +51,7 @@ int xfs_attr_shortform_lookup(struct xfs_da_args *args);
int xfs_attr_shortform_getvalue(struct xfs_da_args *args);
int xfs_attr_shortform_to_leaf(struct xfs_da_args *args,
struct xfs_buf **leaf_bp);
-int xfs_attr_shortform_remove(struct xfs_da_args *args);
+int xfs_attr_sf_removename(struct xfs_da_args *args);
int xfs_attr_sf_findname(struct xfs_da_args *args,
struct xfs_attr_sf_entry **sfep,
unsigned int *basep);
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 48d8e9caf86f..0c8bee3abc3b 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -439,9 +439,9 @@ xfs_attr_rmtval_get(
/*
* Find a "hole" in the attribute address space large enough for us to drop the
- * new attribute's value into
+ * new attributes value into
*/
-STATIC int
+int
xfs_attr_rmt_find_hole(
struct xfs_da_args *args)
{
@@ -468,7 +468,7 @@ xfs_attr_rmt_find_hole(
return 0;
}
-STATIC int
+int
xfs_attr_rmtval_set_value(
struct xfs_da_args *args)
{
@@ -562,69 +562,66 @@ xfs_attr_rmtval_stale(
}
/*
- * Write the value associated with an attribute into the out-of-line buffer
- * that we have defined for it.
+ * Find a hole for the attr and store it in the delayed attr context. This
+ * initializes the context to roll through allocating an attr extent for a
+ * delayed attr operation
*/
int
-xfs_attr_rmtval_set(
- struct xfs_da_args *args)
+xfs_attr_rmtval_find_space(
+ struct xfs_delattr_context *dac)
{
- struct xfs_inode *dp = args->dp;
- struct xfs_bmbt_irec map;
- xfs_dablk_t lblkno;
- int blkcnt;
- int nmap;
- int error;
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_bmbt_irec *map = &dac->map;
+ int error;
- trace_xfs_attr_rmtval_set(args);
+ dac->lblkno = 0;
+ dac->blkcnt = 0;
+ args->rmtblkcnt = 0;
+ args->rmtblkno = 0;
+ memset(map, 0, sizeof(struct xfs_bmbt_irec));
error = xfs_attr_rmt_find_hole(args);
if (error)
return error;
- blkcnt = args->rmtblkcnt;
- lblkno = (xfs_dablk_t)args->rmtblkno;
- /*
- * Roll through the "value", allocating blocks on disk as required.
- */
- while (blkcnt > 0) {
- /*
- * Allocate a single extent, up to the size of the value.
- *
- * Note that we have to consider this a data allocation as we
- * write the remote attribute without logging the contents.
- * Hence we must ensure that we aren't using blocks that are on
- * the busy list so that we don't overwrite blocks which have
- * recently been freed but their transactions are not yet
- * committed to disk. If we overwrite the contents of a busy
- * extent and then crash then the block may not contain the
- * correct metadata after log recovery occurs.
- */
- nmap = 1;
- error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
- blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
- &nmap);
- if (error)
- return error;
- error = xfs_defer_finish(&args->trans);
- if (error)
- return error;
+ dac->blkcnt = args->rmtblkcnt;
+ dac->lblkno = args->rmtblkno;
- ASSERT(nmap == 1);
- ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
- (map.br_startblock != HOLESTARTBLOCK));
- lblkno += map.br_blockcount;
- blkcnt -= map.br_blockcount;
+ return 0;
+}
- /*
- * Start the next trans in the chain.
- */
- error = xfs_trans_roll_inode(&args->trans, dp);
- if (error)
- return error;
- }
+/*
+ * Write one block of the value associated with an attribute into the
+ * out-of-line buffer that we have defined for it. This is similar to a subset
+ * of xfs_attr_rmtval_set, but records the current block to the delayed attr
+ * context, and leaves transaction handling to the caller.
+ */
+int
+xfs_attr_rmtval_set_blk(
+ struct xfs_delattr_context *dac)
+{
+ struct xfs_da_args *args = dac->da_args;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_bmbt_irec *map = &dac->map;
+ int nmap;
+ int error;
+
+ nmap = 1;
+ error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)dac->lblkno,
+ dac->blkcnt, XFS_BMAPI_ATTRFORK, args->total,
+ map, &nmap);
+ if (error)
+ return error;
+
+ ASSERT(nmap == 1);
+ ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
+ (map->br_startblock != HOLESTARTBLOCK));
- return xfs_attr_rmtval_set_value(args);
+ /* roll attribute extent map forwards */
+ dac->lblkno += map->br_blockcount;
+ dac->blkcnt -= map->br_blockcount;
+
+ return 0;
}
/*
@@ -669,47 +666,17 @@ xfs_attr_rmtval_invalidate(
}
/*
- * Remove the value associated with an attribute by deleting the
- * out-of-line buffer that it is stored on.
- */
-int
-xfs_attr_rmtval_remove(
- struct xfs_da_args *args)
-{
- int error;
- int retval;
-
- trace_xfs_attr_rmtval_remove(args);
-
- /*
- * Keep de-allocating extents until the remote-value region is gone.
- */
- do {
- retval = __xfs_attr_rmtval_remove(args);
- if (retval && retval != -EAGAIN)
- return retval;
-
- /*
- * Close out trans and start the next one in the chain.
- */
- error = xfs_trans_roll_inode(&args->trans, args->dp);
- if (error)
- return error;
- } while (retval == -EAGAIN);
-
- return 0;
-}
-
-/*
* Remove the value associated with an attribute by deleting the out-of-line
- * buffer that it is stored on. Returns EAGAIN for the caller to refresh the
- * transaction and re-call the function
+ * buffer that it is stored on. Returns -EAGAIN for the caller to refresh the
+ * transaction and re-call the function. Callers should keep calling this
+ * routine until it returns something other than -EAGAIN.
*/
int
__xfs_attr_rmtval_remove(
- struct xfs_da_args *args)
+ struct xfs_delattr_context *dac)
{
- int error, done;
+ struct xfs_da_args *args = dac->da_args;
+ int error, done;
/*
* Unmap value blocks for this attr.
@@ -719,12 +686,20 @@ __xfs_attr_rmtval_remove(
if (error)
return error;
- error = xfs_defer_finish(&args->trans);
- if (error)
- return error;
-
- if (!done)
+ /*
+ * We don't need an explicit state here to pick up where we left off. We
+ * can figure it out using the !done return code. The actual value of
+ * attr->xattri_dela_state may be some value reminiscent of the calling
+ * function, but it's value is irrelevant with in the context of this
+ * function. Once we are done here, the next state is set as needed by
+ * the parent
+ */
+ if (!done) {
+ dac->flags |= XFS_DAC_DEFER_FINISH;
return -EAGAIN;
+ }
- return error;
+ args->rmtblkno = 0;
+ args->rmtblkcnt = 0;
+ return 0;
}
diff --git a/fs/xfs/libxfs/xfs_attr_remote.h b/fs/xfs/libxfs/xfs_attr_remote.h
index 9eee615da156..61b85b918db8 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.h
+++ b/fs/xfs/libxfs/xfs_attr_remote.h
@@ -9,10 +9,12 @@
int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
int xfs_attr_rmtval_get(struct xfs_da_args *args);
-int xfs_attr_rmtval_set(struct xfs_da_args *args);
-int xfs_attr_rmtval_remove(struct xfs_da_args *args);
int xfs_attr_rmtval_stale(struct xfs_inode *ip, struct xfs_bmbt_irec *map,
xfs_buf_flags_t incore_flags);
int xfs_attr_rmtval_invalidate(struct xfs_da_args *args);
-int __xfs_attr_rmtval_remove(struct xfs_da_args *args);
+int __xfs_attr_rmtval_remove(struct xfs_delattr_context *dac);
+int xfs_attr_rmt_find_hole(struct xfs_da_args *args);
+int xfs_attr_rmtval_set_value(struct xfs_da_args *args);
+int xfs_attr_rmtval_set_blk(struct xfs_delattr_context *dac);
+int xfs_attr_rmtval_find_space(struct xfs_delattr_context *dac);
#endif /* __XFS_ATTR_REMOTE_H__ */
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index a3e0e6f672d6..948092babb6a 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -31,6 +31,7 @@
#include "xfs_attr_leaf.h"
#include "xfs_filestream.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_refcount.h"
#include "xfs_icache.h"
@@ -1028,7 +1029,7 @@ xfs_bmap_add_attrfork_local(
/*
* Set an inode attr fork offset based on the format of the data fork.
*/
-int
+static int
xfs_bmap_set_attrforkoff(
struct xfs_inode *ip,
int size,
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index f9a390ecfb1d..67641f669918 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -187,7 +187,6 @@ void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len);
unsigned int xfs_bmap_compute_attr_offset(struct xfs_mount *mp);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
-int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
void xfs_bmap_local_to_extents_empty(struct xfs_trans *tp,
struct xfs_inode *ip, int whichfork);
void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 5b6fcb9b44e2..be74a6b53689 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -21,6 +21,7 @@
#include "xfs_alloc.h"
#include "xfs_log.h"
#include "xfs_btree_staging.h"
+#include "xfs_ag.h"
/*
* Cursor allocation zone.
@@ -215,7 +216,7 @@ xfs_btree_check_sptr(
{
if (level <= 0)
return false;
- return xfs_verify_agbno(cur->bc_mp, cur->bc_ag.agno, agbno);
+ return xfs_verify_agbno(cur->bc_mp, cur->bc_ag.pag->pag_agno, agbno);
}
/*
@@ -244,7 +245,7 @@ xfs_btree_check_ptr(
return 0;
xfs_err(cur->bc_mp,
"AG %u: Corrupt btree %d pointer at level %d index %d.",
- cur->bc_ag.agno, cur->bc_btnum,
+ cur->bc_ag.pag->pag_agno, cur->bc_btnum,
level, index);
}
@@ -376,6 +377,8 @@ xfs_btree_del_cursor(
XFS_FORCED_SHUTDOWN(cur->bc_mp));
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
kmem_free(cur->bc_ops);
+ if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag)
+ xfs_perag_put(cur->bc_ag.pag);
kmem_cache_free(xfs_btree_cur_zone, cur);
}
@@ -885,13 +888,13 @@ xfs_btree_readahead_sblock(
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
- xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.agno,
+ xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.pag->pag_agno,
left, 1, cur->bc_ops->buf_ops);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
- xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.agno,
+ xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.pag->pag_agno,
right, 1, cur->bc_ops->buf_ops);
rval++;
}
@@ -949,7 +952,7 @@ xfs_btree_ptr_to_daddr(
*daddr = XFS_FSB_TO_DADDR(cur->bc_mp, fsbno);
} else {
agbno = be32_to_cpu(ptr->s);
- *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_ag.agno,
+ *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_ag.pag->pag_agno,
agbno);
}
@@ -1150,7 +1153,7 @@ xfs_btree_init_block_cur(
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
owner = cur->bc_ino.ip->i_ino;
else
- owner = cur->bc_ag.agno;
+ owner = cur->bc_ag.pag->pag_agno;
xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
cur->bc_btnum, level, numrecs,
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 10e50cbacacf..4dbdc659c396 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -11,6 +11,7 @@ struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
struct xfs_ifork;
+struct xfs_perag;
extern kmem_zone_t *xfs_btree_cur_zone;
@@ -180,11 +181,11 @@ union xfs_btree_irec {
/* Per-AG btree information. */
struct xfs_btree_cur_ag {
+ struct xfs_perag *pag;
union {
struct xfs_buf *agbp;
struct xbtree_afakeroot *afake; /* for staging cursor */
};
- xfs_agnumber_t agno;
union {
struct {
unsigned long nr_ops; /* # record updates */
@@ -231,6 +232,13 @@ typedef struct xfs_btree_cur
uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */
xfs_btnum_t bc_btnum; /* identifies which btree type */
int bc_statoff; /* offset of btre stats array */
+
+ /*
+ * Short btree pointers need an agno to be able to turn the pointers
+ * into physical addresses for IO, so the btree cursor switches between
+ * bc_ino and bc_ag based on whether XFS_BTREE_LONG_PTRS is set for the
+ * cursor.
+ */
union {
struct xfs_btree_cur_ag bc_ag;
struct xfs_btree_cur_ino bc_ino;
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index eefdb518fe64..aaf8805a82df 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -10,7 +10,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@@ -27,6 +26,7 @@
#include "xfs_trace.h"
#include "xfs_log.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
/*
* Lookup a record by ino in the btree given by cur.
@@ -105,7 +105,7 @@ xfs_inobt_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_ag.agno;
+ xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
union xfs_btree_rec *rec;
int error;
uint64_t realfree;
@@ -172,18 +172,17 @@ xfs_inobt_insert(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
+ struct xfs_perag *pag,
xfs_agino_t newino,
xfs_agino_t newlen,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
- struct xfs_agi *agi = agbp->b_addr;
- xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
xfs_agino_t thisino;
int i;
int error;
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, btnum);
for (thisino = newino;
thisino < newino + newlen;
@@ -215,10 +214,9 @@ xfs_inobt_insert(
* Verify that the number of free inodes in the AGI is correct.
*/
#ifdef DEBUG
-STATIC int
+static int
xfs_check_agi_freecount(
- struct xfs_btree_cur *cur,
- struct xfs_agi *agi)
+ struct xfs_btree_cur *cur)
{
if (cur->bc_nlevels == 1) {
xfs_inobt_rec_incore_t rec;
@@ -244,12 +242,12 @@ xfs_check_agi_freecount(
} while (i == 1);
if (!XFS_FORCED_SHUTDOWN(cur->bc_mp))
- ASSERT(freecount == be32_to_cpu(agi->agi_freecount));
+ ASSERT(freecount == cur->bc_ag.pag->pagi_freecount);
}
return 0;
}
#else
-#define xfs_check_agi_freecount(cur, agi) 0
+#define xfs_check_agi_freecount(cur) 0
#endif
/*
@@ -520,18 +518,17 @@ xfs_inobt_insert_sprec(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
+ struct xfs_perag *pag,
int btnum,
struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */
bool merge) /* merge or replace */
{
struct xfs_btree_cur *cur;
- struct xfs_agi *agi = agbp->b_addr;
- xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
int error;
int i;
struct xfs_inobt_rec_incore rec;
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, btnum);
/* the new record is pre-aligned so we know where to look */
error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
@@ -578,14 +575,14 @@ xfs_inobt_insert_sprec(
goto error;
}
- trace_xfs_irec_merge_pre(mp, agno, rec.ir_startino,
+ trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino,
rec.ir_holemask, nrec->ir_startino,
nrec->ir_holemask);
/* merge to nrec to output the updated record */
__xfs_inobt_rec_merge(nrec, &rec);
- trace_xfs_irec_merge_post(mp, agno, nrec->ir_startino,
+ trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino,
nrec->ir_holemask);
error = xfs_inobt_rec_check_count(mp, nrec);
@@ -606,28 +603,28 @@ error:
}
/*
- * Allocate new inodes in the allocation group specified by agbp.
- * Returns 0 if inodes were allocated in this AG; 1 if there was no space
- * in this AG; or the usual negative error code.
+ * Allocate new inodes in the allocation group specified by agbp. Returns 0 if
+ * inodes were allocated in this AG; -EAGAIN if there was no space in this AG so
+ * the caller knows it can try another AG, a hard -ENOSPC when over the maximum
+ * inode count threshold, or the usual negative error code for other errors.
*/
STATIC int
xfs_ialloc_ag_alloc(
struct xfs_trans *tp,
- struct xfs_buf *agbp)
+ struct xfs_buf *agbp,
+ struct xfs_perag *pag)
{
struct xfs_agi *agi;
struct xfs_alloc_arg args;
- xfs_agnumber_t agno;
int error;
xfs_agino_t newino; /* new first inode's number */
xfs_agino_t newlen; /* new number of inodes */
int isaligned = 0; /* inode allocation at stripe */
/* unit boundary */
/* init. to full chunk */
- uint16_t allocmask = (uint16_t) -1;
struct xfs_inobt_rec_incore rec;
- struct xfs_perag *pag;
struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp);
+ uint16_t allocmask = (uint16_t) -1;
int do_sparse = 0;
memset(&args, 0, sizeof(args));
@@ -660,14 +657,13 @@ xfs_ialloc_ag_alloc(
*/
agi = agbp->b_addr;
newino = be32_to_cpu(agi->agi_newino);
- agno = be32_to_cpu(agi->agi_seqno);
args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
igeo->ialloc_blks;
if (do_sparse)
goto sparse_alloc;
if (likely(newino != NULLAGINO &&
(args.agbno < be32_to_cpu(agi->agi_length)))) {
- args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
args.type = XFS_ALLOCTYPE_THIS_BNO;
args.prod = 1;
@@ -727,7 +723,7 @@ xfs_ialloc_ag_alloc(
* For now, just allocate blocks up front.
*/
args.agbno = be32_to_cpu(agi->agi_root);
- args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
/*
* Allocate a fixed-size extent of inodes.
*/
@@ -748,7 +744,7 @@ xfs_ialloc_ag_alloc(
if (isaligned && args.fsbno == NULLFSBLOCK) {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.agbno = be32_to_cpu(agi->agi_root);
- args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
args.alignment = igeo->cluster_align;
if ((error = xfs_alloc_vextent(&args)))
return error;
@@ -764,7 +760,7 @@ xfs_ialloc_ag_alloc(
sparse_alloc:
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.agbno = be32_to_cpu(agi->agi_root);
- args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
args.alignment = args.mp->m_sb.sb_spino_align;
args.prod = 1;
@@ -796,7 +792,7 @@ sparse_alloc:
}
if (args.fsbno == NULLFSBLOCK)
- return 1;
+ return -EAGAIN;
ASSERT(args.len == args.minlen);
@@ -809,7 +805,7 @@ sparse_alloc:
* rather than a linear progression to prevent the next generation
* number from being easily guessable.
*/
- error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, agno,
+ error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno,
args.agbno, args.len, prandom_u32());
if (error)
@@ -836,12 +832,12 @@ sparse_alloc:
* if necessary. If a merge does occur, rec is updated to the
* merged record.
*/
- error = xfs_inobt_insert_sprec(args.mp, tp, agbp, XFS_BTNUM_INO,
- &rec, true);
+ error = xfs_inobt_insert_sprec(args.mp, tp, agbp, pag,
+ XFS_BTNUM_INO, &rec, true);
if (error == -EFSCORRUPTED) {
xfs_alert(args.mp,
"invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
- XFS_AGINO_TO_INO(args.mp, agno,
+ XFS_AGINO_TO_INO(args.mp, pag->pag_agno,
rec.ir_startino),
rec.ir_holemask, rec.ir_count);
xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
@@ -861,21 +857,20 @@ sparse_alloc:
* existing record with this one.
*/
if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
- error = xfs_inobt_insert_sprec(args.mp, tp, agbp,
- XFS_BTNUM_FINO, &rec,
- false);
+ error = xfs_inobt_insert_sprec(args.mp, tp, agbp, pag,
+ XFS_BTNUM_FINO, &rec, false);
if (error)
return error;
}
} else {
/* full chunk - insert new records to both btrees */
- error = xfs_inobt_insert(args.mp, tp, agbp, newino, newlen,
+ error = xfs_inobt_insert(args.mp, tp, agbp, pag, newino, newlen,
XFS_BTNUM_INO);
if (error)
return error;
if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
- error = xfs_inobt_insert(args.mp, tp, agbp, newino,
+ error = xfs_inobt_insert(args.mp, tp, agbp, pag, newino,
newlen, XFS_BTNUM_FINO);
if (error)
return error;
@@ -887,7 +882,6 @@ sparse_alloc:
*/
be32_add_cpu(&agi->agi_count, newlen);
be32_add_cpu(&agi->agi_freecount, newlen);
- pag = agbp->b_pag;
pag->pagi_freecount += newlen;
pag->pagi_count += newlen;
agi->agi_newino = cpu_to_be32(newino);
@@ -905,139 +899,6 @@ sparse_alloc:
return 0;
}
-STATIC xfs_agnumber_t
-xfs_ialloc_next_ag(
- xfs_mount_t *mp)
-{
- xfs_agnumber_t agno;
-
- spin_lock(&mp->m_agirotor_lock);
- agno = mp->m_agirotor;
- if (++mp->m_agirotor >= mp->m_maxagi)
- mp->m_agirotor = 0;
- spin_unlock(&mp->m_agirotor_lock);
-
- return agno;
-}
-
-/*
- * Select an allocation group to look for a free inode in, based on the parent
- * inode and the mode. Return the allocation group buffer.
- */
-STATIC xfs_agnumber_t
-xfs_ialloc_ag_select(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_ino_t parent, /* parent directory inode number */
- umode_t mode) /* bits set to indicate file type */
-{
- xfs_agnumber_t agcount; /* number of ag's in the filesystem */
- xfs_agnumber_t agno; /* current ag number */
- int flags; /* alloc buffer locking flags */
- xfs_extlen_t ineed; /* blocks needed for inode allocation */
- xfs_extlen_t longest = 0; /* longest extent available */
- xfs_mount_t *mp; /* mount point structure */
- int needspace; /* file mode implies space allocated */
- xfs_perag_t *pag; /* per allocation group data */
- xfs_agnumber_t pagno; /* parent (starting) ag number */
- int error;
-
- /*
- * Files of these types need at least one block if length > 0
- * (and they won't fit in the inode, but that's hard to figure out).
- */
- needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
- mp = tp->t_mountp;
- agcount = mp->m_maxagi;
- if (S_ISDIR(mode))
- pagno = xfs_ialloc_next_ag(mp);
- else {
- pagno = XFS_INO_TO_AGNO(mp, parent);
- if (pagno >= agcount)
- pagno = 0;
- }
-
- ASSERT(pagno < agcount);
-
- /*
- * Loop through allocation groups, looking for one with a little
- * free space in it. Note we don't look for free inodes, exactly.
- * Instead, we include whether there is a need to allocate inodes
- * to mean that blocks must be allocated for them,
- * if none are currently free.
- */
- agno = pagno;
- flags = XFS_ALLOC_FLAG_TRYLOCK;
- for (;;) {
- pag = xfs_perag_get(mp, agno);
- if (!pag->pagi_inodeok) {
- xfs_ialloc_next_ag(mp);
- goto nextag;
- }
-
- if (!pag->pagi_init) {
- error = xfs_ialloc_pagi_init(mp, tp, agno);
- if (error)
- goto nextag;
- }
-
- if (pag->pagi_freecount) {
- xfs_perag_put(pag);
- return agno;
- }
-
- if (!pag->pagf_init) {
- error = xfs_alloc_pagf_init(mp, tp, agno, flags);
- if (error)
- goto nextag;
- }
-
- /*
- * Check that there is enough free space for the file plus a
- * chunk of inodes if we need to allocate some. If this is the
- * first pass across the AGs, take into account the potential
- * space needed for alignment of inode chunks when checking the
- * longest contiguous free space in the AG - this prevents us
- * from getting ENOSPC because we have free space larger than
- * ialloc_blks but alignment constraints prevent us from using
- * it.
- *
- * If we can't find an AG with space for full alignment slack to
- * be taken into account, we must be near ENOSPC in all AGs.
- * Hence we don't include alignment for the second pass and so
- * if we fail allocation due to alignment issues then it is most
- * likely a real ENOSPC condition.
- */
- ineed = M_IGEO(mp)->ialloc_min_blks;
- if (flags && ineed > 1)
- ineed += M_IGEO(mp)->cluster_align;
- longest = pag->pagf_longest;
- if (!longest)
- longest = pag->pagf_flcount > 0;
-
- if (pag->pagf_freeblks >= needspace + ineed &&
- longest >= ineed) {
- xfs_perag_put(pag);
- return agno;
- }
-nextag:
- xfs_perag_put(pag);
- /*
- * No point in iterating over the rest, if we're shutting
- * down.
- */
- if (XFS_FORCED_SHUTDOWN(mp))
- return NULLAGNUMBER;
- agno++;
- if (agno >= agcount)
- agno = 0;
- if (agno == pagno) {
- if (flags == 0)
- return NULLAGNUMBER;
- flags = 0;
- }
- }
-}
-
/*
* Try to retrieve the next record to the left/right from the current one.
*/
@@ -1123,15 +984,14 @@ STATIC int
xfs_dialloc_ag_inobt(
struct xfs_trans *tp,
struct xfs_buf *agbp,
+ struct xfs_perag *pag,
xfs_ino_t parent,
xfs_ino_t *inop)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_agi *agi = agbp->b_addr;
- xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
- struct xfs_perag *pag = agbp->b_pag;
struct xfs_btree_cur *cur, *tcur;
struct xfs_inobt_rec_incore rec, trec;
xfs_ino_t ino;
@@ -1145,7 +1005,7 @@ xfs_dialloc_ag_inobt(
ASSERT(pag->pagi_freecount > 0);
restart_pagno:
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
/*
* If pagino is 0 (this is the root inode allocation) use newino.
* This must work because we've just allocated some.
@@ -1153,14 +1013,14 @@ xfs_dialloc_ag_inobt(
if (!pagino)
pagino = be32_to_cpu(agi->agi_newino);
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error0;
/*
* If in the same AG as the parent, try to get near the parent.
*/
- if (pagno == agno) {
+ if (pagno == pag->pag_agno) {
int doneleft; /* done, to the left */
int doneright; /* done, to the right */
@@ -1363,7 +1223,7 @@ alloc_inode:
ASSERT(offset < XFS_INODES_PER_CHUNK);
ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
XFS_INODES_PER_CHUNK) == 0);
- ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
+ ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
rec.ir_free &= ~XFS_INOBT_MASK(offset);
rec.ir_freecount--;
error = xfs_inobt_update(cur, &rec);
@@ -1373,7 +1233,7 @@ alloc_inode:
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
pag->pagi_freecount--;
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error0;
@@ -1568,16 +1428,16 @@ xfs_dialloc_ag_update_inobt(
* The caller selected an AG for us, and made sure that free inodes are
* available.
*/
-int
+static int
xfs_dialloc_ag(
struct xfs_trans *tp,
struct xfs_buf *agbp,
+ struct xfs_perag *pag,
xfs_ino_t parent,
xfs_ino_t *inop)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_agi *agi = agbp->b_addr;
- xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
struct xfs_btree_cur *cur; /* finobt cursor */
@@ -1589,7 +1449,7 @@ xfs_dialloc_ag(
int i;
if (!xfs_sb_version_hasfinobt(&mp->m_sb))
- return xfs_dialloc_ag_inobt(tp, agbp, parent, inop);
+ return xfs_dialloc_ag_inobt(tp, agbp, pag, parent, inop);
/*
* If pagino is 0 (this is the root inode allocation) use newino.
@@ -1598,9 +1458,9 @@ xfs_dialloc_ag(
if (!pagino)
pagino = be32_to_cpu(agi->agi_newino);
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_FINO);
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error_cur;
@@ -1609,7 +1469,7 @@ xfs_dialloc_ag(
* parent. If so, find the closest available inode to the parent. If
* not, consider the agi hint or find the first free inode in the AG.
*/
- if (agno == pagno)
+ if (pag->pag_agno == pagno)
error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
else
error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
@@ -1621,7 +1481,7 @@ xfs_dialloc_ag(
ASSERT(offset < XFS_INODES_PER_CHUNK);
ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
XFS_INODES_PER_CHUNK) == 0);
- ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
+ ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
/*
* Modify or remove the finobt record.
@@ -1641,9 +1501,9 @@ xfs_dialloc_ag(
* the original freecount. If all is well, make the equivalent update to
* the inobt using the finobt record and offset information.
*/
- icur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
+ icur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
- error = xfs_check_agi_freecount(icur, agi);
+ error = xfs_check_agi_freecount(icur);
if (error)
goto error_icur;
@@ -1657,14 +1517,14 @@ xfs_dialloc_ag(
*/
be32_add_cpu(&agi->agi_freecount, -1);
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
- agbp->b_pag->pagi_freecount--;
+ pag->pagi_freecount--;
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
- error = xfs_check_agi_freecount(icur, agi);
+ error = xfs_check_agi_freecount(icur);
if (error)
goto error_icur;
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error_icur;
@@ -1708,54 +1568,195 @@ xfs_dialloc_roll(
/* Re-attach the quota info that we detached from prev trx. */
tp->t_dqinfo = dqinfo;
+ /*
+ * Join the buffer even on commit error so that the buffer is released
+ * when the caller cancels the transaction and doesn't have to handle
+ * this error case specially.
+ */
+ xfs_trans_bjoin(tp, agibp);
*tpp = tp;
+ return error;
+}
+
+static xfs_agnumber_t
+xfs_ialloc_next_ag(
+ xfs_mount_t *mp)
+{
+ xfs_agnumber_t agno;
+
+ spin_lock(&mp->m_agirotor_lock);
+ agno = mp->m_agirotor;
+ if (++mp->m_agirotor >= mp->m_maxagi)
+ mp->m_agirotor = 0;
+ spin_unlock(&mp->m_agirotor_lock);
+
+ return agno;
+}
+
+static bool
+xfs_dialloc_good_ag(
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ umode_t mode,
+ int flags,
+ bool ok_alloc)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_extlen_t ineed;
+ xfs_extlen_t longest = 0;
+ int needspace;
+ int error;
+
+ if (!pag->pagi_inodeok)
+ return false;
+
+ if (!pag->pagi_init) {
+ error = xfs_ialloc_pagi_init(mp, tp, pag->pag_agno);
+ if (error)
+ return false;
+ }
+
+ if (pag->pagi_freecount)
+ return true;
+ if (!ok_alloc)
+ return false;
+
+ if (!pag->pagf_init) {
+ error = xfs_alloc_pagf_init(mp, tp, pag->pag_agno, flags);
+ if (error)
+ return false;
+ }
+
+ /*
+ * Check that there is enough free space for the file plus a chunk of
+ * inodes if we need to allocate some. If this is the first pass across
+ * the AGs, take into account the potential space needed for alignment
+ * of inode chunks when checking the longest contiguous free space in
+ * the AG - this prevents us from getting ENOSPC because we have free
+ * space larger than ialloc_blks but alignment constraints prevent us
+ * from using it.
+ *
+ * If we can't find an AG with space for full alignment slack to be
+ * taken into account, we must be near ENOSPC in all AGs. Hence we
+ * don't include alignment for the second pass and so if we fail
+ * allocation due to alignment issues then it is most likely a real
+ * ENOSPC condition.
+ *
+ * XXX(dgc): this calculation is now bogus thanks to the per-ag
+ * reservations that xfs_alloc_fix_freelist() now does via
+ * xfs_alloc_space_available(). When the AG fills up, pagf_freeblks will
+ * be more than large enough for the check below to succeed, but
+ * xfs_alloc_space_available() will fail because of the non-zero
+ * metadata reservation and hence we won't actually be able to allocate
+ * more inodes in this AG. We do soooo much unnecessary work near ENOSPC
+ * because of this.
+ */
+ ineed = M_IGEO(mp)->ialloc_min_blks;
+ if (flags && ineed > 1)
+ ineed += M_IGEO(mp)->cluster_align;
+ longest = pag->pagf_longest;
+ if (!longest)
+ longest = pag->pagf_flcount > 0;
+ needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
+
+ if (pag->pagf_freeblks < needspace + ineed || longest < ineed)
+ return false;
+ return true;
+}
+
+static int
+xfs_dialloc_try_ag(
+ struct xfs_trans **tpp,
+ struct xfs_perag *pag,
+ xfs_ino_t parent,
+ xfs_ino_t *new_ino,
+ bool ok_alloc)
+{
+ struct xfs_buf *agbp;
+ xfs_ino_t ino;
+ int error;
+
+ /*
+ * Then read in the AGI buffer and recheck with the AGI buffer
+ * lock held.
+ */
+ error = xfs_ialloc_read_agi(pag->pag_mount, *tpp, pag->pag_agno, &agbp);
if (error)
return error;
- xfs_trans_bjoin(tp, agibp);
- return 0;
+
+ if (!pag->pagi_freecount) {
+ if (!ok_alloc) {
+ error = -EAGAIN;
+ goto out_release;
+ }
+
+ error = xfs_ialloc_ag_alloc(*tpp, agbp, pag);
+ if (error < 0)
+ goto out_release;
+
+ /*
+ * We successfully allocated space for an inode cluster in this
+ * AG. Roll the transaction so that we can allocate one of the
+ * new inodes.
+ */
+ ASSERT(pag->pagi_freecount > 0);
+ error = xfs_dialloc_roll(tpp, agbp);
+ if (error)
+ goto out_release;
+ }
+
+ /* Allocate an inode in the found AG */
+ error = xfs_dialloc_ag(*tpp, agbp, pag, parent, &ino);
+ if (!error)
+ *new_ino = ino;
+ return error;
+
+out_release:
+ xfs_trans_brelse(*tpp, agbp);
+ return error;
}
/*
- * Select and prepare an AG for inode allocation.
+ * Allocate an on-disk inode.
*
* Mode is used to tell whether the new inode is a directory and hence where to
- * locate it.
- *
- * This function will ensure that the selected AG has free inodes available to
- * allocate from. The selected AGI will be returned locked to the caller, and it
- * will allocate more free inodes if required. If no free inodes are found or
- * can be allocated, no AGI will be returned.
+ * locate it. The on-disk inode that is allocated will be returned in @new_ino
+ * on success, otherwise an error will be set to indicate the failure (e.g.
+ * -ENOSPC).
*/
int
-xfs_dialloc_select_ag(
+xfs_dialloc(
struct xfs_trans **tpp,
xfs_ino_t parent,
umode_t mode,
- struct xfs_buf **IO_agbp)
+ xfs_ino_t *new_ino)
{
struct xfs_mount *mp = (*tpp)->t_mountp;
- struct xfs_buf *agbp;
xfs_agnumber_t agno;
- int error;
- bool noroom = false;
+ int error = 0;
xfs_agnumber_t start_agno;
struct xfs_perag *pag;
struct xfs_ino_geometry *igeo = M_IGEO(mp);
- bool okalloc = true;
-
- *IO_agbp = NULL;
+ bool ok_alloc = true;
+ int flags;
+ xfs_ino_t ino;
/*
- * We do not have an agbp, so select an initial allocation
- * group for inode allocation.
+ * Directories, symlinks, and regular files frequently allocate at least
+ * one block, so factor that potential expansion when we examine whether
+ * an AG has enough space for file creation.
*/
- start_agno = xfs_ialloc_ag_select(*tpp, parent, mode);
- if (start_agno == NULLAGNUMBER)
- return 0;
+ if (S_ISDIR(mode))
+ start_agno = xfs_ialloc_next_ag(mp);
+ else {
+ start_agno = XFS_INO_TO_AGNO(mp, parent);
+ if (start_agno >= mp->m_maxagi)
+ start_agno = 0;
+ }
/*
* If we have already hit the ceiling of inode blocks then clear
- * okalloc so we scan all available agi structures for a free
+ * ok_alloc so we scan all available agi structures for a free
* inode.
*
* Read rough value of mp->m_icount by percpu_counter_read_positive,
@@ -1764,8 +1765,7 @@ xfs_dialloc_select_ag(
if (igeo->maxicount &&
percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
> igeo->maxicount) {
- noroom = true;
- okalloc = false;
+ ok_alloc = false;
}
/*
@@ -1774,82 +1774,36 @@ xfs_dialloc_select_ag(
* allocation groups upward, wrapping at the end.
*/
agno = start_agno;
+ flags = XFS_ALLOC_FLAG_TRYLOCK;
for (;;) {
pag = xfs_perag_get(mp, agno);
- if (!pag->pagi_inodeok) {
- xfs_ialloc_next_ag(mp);
- goto nextag;
- }
-
- if (!pag->pagi_init) {
- error = xfs_ialloc_pagi_init(mp, *tpp, agno);
- if (error)
+ if (xfs_dialloc_good_ag(*tpp, pag, mode, flags, ok_alloc)) {
+ error = xfs_dialloc_try_ag(tpp, pag, parent,
+ &ino, ok_alloc);
+ if (error != -EAGAIN)
break;
}
- /*
- * Do a first racy fast path check if this AG is usable.
- */
- if (!pag->pagi_freecount && !okalloc)
- goto nextag;
-
- /*
- * Then read in the AGI buffer and recheck with the AGI buffer
- * lock held.
- */
- error = xfs_ialloc_read_agi(mp, *tpp, agno, &agbp);
- if (error)
- break;
-
- if (pag->pagi_freecount) {
- xfs_perag_put(pag);
- goto found_ag;
- }
-
- if (!okalloc)
- goto nextag_relse_buffer;
-
- error = xfs_ialloc_ag_alloc(*tpp, agbp);
- if (error < 0) {
- xfs_trans_brelse(*tpp, agbp);
-
- if (error == -ENOSPC)
- error = 0;
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ error = -EFSCORRUPTED;
break;
}
-
- if (error == 0) {
- /*
- * We successfully allocated space for an inode cluster
- * in this AG. Roll the transaction so that we can
- * allocate one of the new inodes.
- */
- ASSERT(pag->pagi_freecount > 0);
- xfs_perag_put(pag);
-
- error = xfs_dialloc_roll(tpp, agbp);
- if (error) {
- xfs_buf_relse(agbp);
- return error;
+ if (++agno == mp->m_maxagi)
+ agno = 0;
+ if (agno == start_agno) {
+ if (!flags) {
+ error = -ENOSPC;
+ break;
}
- goto found_ag;
+ flags = 0;
}
-
-nextag_relse_buffer:
- xfs_trans_brelse(*tpp, agbp);
-nextag:
xfs_perag_put(pag);
- if (++agno == mp->m_sb.sb_agcount)
- agno = 0;
- if (agno == start_agno)
- return noroom ? -ENOSPC : 0;
}
+ if (!error)
+ *new_ino = ino;
xfs_perag_put(pag);
return error;
-found_ag:
- *IO_agbp = agbp;
- return 0;
}
/*
@@ -1935,12 +1889,12 @@ xfs_difree_inobt(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
+ struct xfs_perag *pag,
xfs_agino_t agino,
struct xfs_icluster *xic,
struct xfs_inobt_rec_incore *orec)
{
struct xfs_agi *agi = agbp->b_addr;
- xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
struct xfs_btree_cur *cur;
struct xfs_inobt_rec_incore rec;
int ilen;
@@ -1954,9 +1908,9 @@ xfs_difree_inobt(
/*
* Initialize the cursor.
*/
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error0;
@@ -2005,7 +1959,8 @@ xfs_difree_inobt(
struct xfs_perag *pag = agbp->b_pag;
xic->deleted = true;
- xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
+ xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
+ rec.ir_startino);
xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
/*
@@ -2028,7 +1983,7 @@ xfs_difree_inobt(
goto error0;
}
- xfs_difree_inode_chunk(tp, agno, &rec);
+ xfs_difree_inode_chunk(tp, pag->pag_agno, &rec);
} else {
xic->deleted = false;
@@ -2044,11 +1999,11 @@ xfs_difree_inobt(
*/
be32_add_cpu(&agi->agi_freecount, 1);
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
- agbp->b_pag->pagi_freecount++;
+ pag->pagi_freecount++;
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
}
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error0;
@@ -2069,18 +2024,17 @@ xfs_difree_finobt(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
+ struct xfs_perag *pag,
xfs_agino_t agino,
struct xfs_inobt_rec_incore *ibtrec) /* inobt record */
{
- struct xfs_agi *agi = agbp->b_addr;
- xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
struct xfs_btree_cur *cur;
struct xfs_inobt_rec_incore rec;
int offset = agino - ibtrec->ir_startino;
int error;
int i;
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_FINO);
error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
if (error)
@@ -2158,7 +2112,7 @@ xfs_difree_finobt(
}
out:
- error = xfs_check_agi_freecount(cur, agi);
+ error = xfs_check_agi_freecount(cur);
if (error)
goto error;
@@ -2178,36 +2132,33 @@ error:
*/
int
xfs_difree(
- struct xfs_trans *tp, /* transaction pointer */
- xfs_ino_t inode, /* inode to be freed */
- struct xfs_icluster *xic) /* cluster info if deleted */
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ xfs_ino_t inode,
+ struct xfs_icluster *xic)
{
/* REFERENCED */
xfs_agblock_t agbno; /* block number containing inode */
struct xfs_buf *agbp; /* buffer for allocation group header */
xfs_agino_t agino; /* allocation group inode number */
- xfs_agnumber_t agno; /* allocation group number */
int error; /* error return value */
- struct xfs_mount *mp; /* mount structure for filesystem */
+ struct xfs_mount *mp = tp->t_mountp;
struct xfs_inobt_rec_incore rec;/* btree record */
- mp = tp->t_mountp;
-
/*
* Break up inode number into its components.
*/
- agno = XFS_INO_TO_AGNO(mp, inode);
- if (agno >= mp->m_sb.sb_agcount) {
- xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).",
- __func__, agno, mp->m_sb.sb_agcount);
+ if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) {
+ xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).",
+ __func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno);
ASSERT(0);
return -EINVAL;
}
agino = XFS_INO_TO_AGINO(mp, inode);
- if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) {
+ if (inode != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
__func__, (unsigned long long)inode,
- (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino));
+ (unsigned long long)XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
ASSERT(0);
return -EINVAL;
}
@@ -2221,7 +2172,7 @@ xfs_difree(
/*
* Get the allocation group header.
*/
- error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
+ error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
if (error) {
xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
__func__, error);
@@ -2231,7 +2182,7 @@ xfs_difree(
/*
* Fix up the inode allocation btree.
*/
- error = xfs_difree_inobt(mp, tp, agbp, agino, xic, &rec);
+ error = xfs_difree_inobt(mp, tp, agbp, pag, agino, xic, &rec);
if (error)
goto error0;
@@ -2239,7 +2190,7 @@ xfs_difree(
* Fix up the free inode btree.
*/
if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
- error = xfs_difree_finobt(mp, tp, agbp, agino, &rec);
+ error = xfs_difree_finobt(mp, tp, agbp, pag, agino, &rec);
if (error)
goto error0;
}
@@ -2254,7 +2205,7 @@ STATIC int
xfs_imap_lookup(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agino_t agino,
xfs_agblock_t agbno,
xfs_agblock_t *chunk_agbno,
@@ -2267,11 +2218,11 @@ xfs_imap_lookup(
int error;
int i;
- error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
+ error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
if (error) {
xfs_alert(mp,
"%s: xfs_ialloc_read_agi() returned error %d, agno %d",
- __func__, error, agno);
+ __func__, error, pag->pag_agno);
return error;
}
@@ -2281,7 +2232,7 @@ xfs_imap_lookup(
* we have a record, we need to ensure it contains the inode number
* we are looking up.
*/
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
if (!error) {
if (i)
@@ -2315,42 +2266,44 @@ xfs_imap_lookup(
*/
int
xfs_imap(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_ino_t ino, /* inode to locate */
- struct xfs_imap *imap, /* location map structure */
- uint flags) /* flags for inode btree lookup */
+ struct xfs_mount *mp, /* file system mount structure */
+ struct xfs_trans *tp, /* transaction pointer */
+ xfs_ino_t ino, /* inode to locate */
+ struct xfs_imap *imap, /* location map structure */
+ uint flags) /* flags for inode btree lookup */
{
- xfs_agblock_t agbno; /* block number of inode in the alloc group */
- xfs_agino_t agino; /* inode number within alloc group */
- xfs_agnumber_t agno; /* allocation group number */
- xfs_agblock_t chunk_agbno; /* first block in inode chunk */
- xfs_agblock_t cluster_agbno; /* first block in inode cluster */
- int error; /* error code */
- int offset; /* index of inode in its buffer */
- xfs_agblock_t offset_agbno; /* blks from chunk start to inode */
+ xfs_agblock_t agbno; /* block number of inode in the alloc group */
+ xfs_agino_t agino; /* inode number within alloc group */
+ xfs_agblock_t chunk_agbno; /* first block in inode chunk */
+ xfs_agblock_t cluster_agbno; /* first block in inode cluster */
+ int error; /* error code */
+ int offset; /* index of inode in its buffer */
+ xfs_agblock_t offset_agbno; /* blks from chunk start to inode */
+ struct xfs_perag *pag;
ASSERT(ino != NULLFSINO);
/*
* Split up the inode number into its parts.
*/
- agno = XFS_INO_TO_AGNO(mp, ino);
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
agino = XFS_INO_TO_AGINO(mp, ino);
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
- if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
- ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
+ if (!pag || agbno >= mp->m_sb.sb_agblocks ||
+ ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
+ error = -EINVAL;
#ifdef DEBUG
/*
* Don't output diagnostic information for untrusted inodes
* as they can be invalid without implying corruption.
*/
if (flags & XFS_IGET_UNTRUSTED)
- return -EINVAL;
- if (agno >= mp->m_sb.sb_agcount) {
+ goto out_drop;
+ if (!pag) {
xfs_alert(mp,
"%s: agno (%d) >= mp->m_sb.sb_agcount (%d)",
- __func__, agno, mp->m_sb.sb_agcount);
+ __func__, XFS_INO_TO_AGNO(mp, ino),
+ mp->m_sb.sb_agcount);
}
if (agbno >= mp->m_sb.sb_agblocks) {
xfs_alert(mp,
@@ -2358,15 +2311,15 @@ xfs_imap(
__func__, (unsigned long long)agbno,
(unsigned long)mp->m_sb.sb_agblocks);
}
- if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
+ if (pag && ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
xfs_alert(mp,
"%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
__func__, ino,
- XFS_AGINO_TO_INO(mp, agno, agino));
+ XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
}
xfs_stack_trace();
#endif /* DEBUG */
- return -EINVAL;
+ goto out_drop;
}
/*
@@ -2377,10 +2330,10 @@ xfs_imap(
* in all cases where an untrusted inode number is passed.
*/
if (flags & XFS_IGET_UNTRUSTED) {
- error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
+ error = xfs_imap_lookup(mp, tp, pag, agino, agbno,
&chunk_agbno, &offset_agbno, flags);
if (error)
- return error;
+ goto out_drop;
goto out_map;
}
@@ -2392,11 +2345,12 @@ xfs_imap(
offset = XFS_INO_TO_OFFSET(mp, ino);
ASSERT(offset < mp->m_sb.sb_inopblock);
- imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
+ imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno);
imap->im_len = XFS_FSB_TO_BB(mp, 1);
imap->im_boffset = (unsigned short)(offset <<
mp->m_sb.sb_inodelog);
- return 0;
+ error = 0;
+ goto out_drop;
}
/*
@@ -2408,10 +2362,10 @@ xfs_imap(
offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
chunk_agbno = agbno - offset_agbno;
} else {
- error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
+ error = xfs_imap_lookup(mp, tp, pag, agino, agbno,
&chunk_agbno, &offset_agbno, flags);
if (error)
- return error;
+ goto out_drop;
}
out_map:
@@ -2422,7 +2376,7 @@ out_map:
offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
XFS_INO_TO_OFFSET(mp, ino);
- imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
+ imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno);
imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
@@ -2439,9 +2393,14 @@ out_map:
__func__, (unsigned long long) imap->im_blkno,
(unsigned long long) imap->im_len,
XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
- return -EINVAL;
+ error = -EINVAL;
+ goto out_drop;
}
- return 0;
+ error = 0;
+out_drop:
+ if (pag)
+ xfs_perag_put(pag);
+ return error;
}
/*
@@ -2969,3 +2928,58 @@ xfs_ialloc_calc_rootino(
return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));
}
+
+/*
+ * Ensure there are not sparse inode clusters that cross the new EOAG.
+ *
+ * This is a no-op for non-spinode filesystems since clusters are always fully
+ * allocated and checking the bnobt suffices. However, a spinode filesystem
+ * could have a record where the upper inodes are free blocks. If those blocks
+ * were removed from the filesystem, the inode record would extend beyond EOAG,
+ * which will be flagged as corruption.
+ */
+int
+xfs_ialloc_check_shrink(
+ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ struct xfs_buf *agibp,
+ xfs_agblock_t new_length)
+{
+ struct xfs_inobt_rec_incore rec;
+ struct xfs_btree_cur *cur;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
+ xfs_agino_t agino = XFS_AGB_TO_AGINO(mp, new_length);
+ int has;
+ int error;
+
+ if (!xfs_sb_version_hassparseinodes(&mp->m_sb))
+ return 0;
+
+ pag = xfs_perag_get(mp, agno);
+ cur = xfs_inobt_init_cursor(mp, tp, agibp, pag, XFS_BTNUM_INO);
+
+ /* Look up the inobt record that would correspond to the new EOFS. */
+ error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has);
+ if (error || !has)
+ goto out;
+
+ error = xfs_inobt_get_rec(cur, &rec, &has);
+ if (error)
+ goto out;
+
+ if (!has) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+
+ /* If the record covers inodes that would be beyond EOFS, bail out. */
+ if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) {
+ error = -ENOSPC;
+ goto out;
+ }
+out:
+ xfs_btree_del_cursor(cur, error);
+ xfs_perag_put(pag);
+ return error;
+}
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 3511086a7ae1..9a2112b4ad5e 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -33,42 +33,14 @@ xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o)
}
/*
- * Allocate an inode on disk.
- * Mode is used to tell whether the new inode will need space, and whether
- * it is a directory.
- *
- * There are two phases to inode allocation: selecting an AG and ensuring
- * that it contains free inodes, followed by allocating one of the free
- * inodes. xfs_dialloc_select_ag() does the former and returns a locked AGI
- * to the caller, ensuring that followup call to xfs_dialloc_ag() will
- * have free inodes to allocate from. xfs_dialloc_ag() will return the inode
- * number of the free inode we allocated.
+ * Allocate an inode on disk. Mode is used to tell whether the new inode will
+ * need space, and whether it is a directory.
*/
-int /* error */
-xfs_dialloc_select_ag(
- struct xfs_trans **tpp, /* double pointer of transaction */
- xfs_ino_t parent, /* parent inode (directory) */
- umode_t mode, /* mode bits for new inode */
- struct xfs_buf **IO_agbp);
-
-int
-xfs_dialloc_ag(
- struct xfs_trans *tp,
- struct xfs_buf *agbp,
- xfs_ino_t parent,
- xfs_ino_t *inop);
+int xfs_dialloc(struct xfs_trans **tpp, xfs_ino_t parent, umode_t mode,
+ xfs_ino_t *new_ino);
-/*
- * Free disk inode. Carefully avoids touching the incore inode, all
- * manipulations incore are the caller's responsibility.
- * The on-disk inode is not changed by this operation, only the
- * btree (free inode mask) is changed.
- */
-int /* error */
-xfs_difree(
- struct xfs_trans *tp, /* transaction pointer */
- xfs_ino_t inode, /* inode to be freed */
- struct xfs_icluster *ifree); /* cluster info if deleted */
+int xfs_difree(struct xfs_trans *tp, struct xfs_perag *pag,
+ xfs_ino_t ino, struct xfs_icluster *ifree);
/*
* Return the location of the inode in imap, for mapping it into a buffer.
@@ -150,4 +122,7 @@ int xfs_ialloc_cluster_alignment(struct xfs_mount *mp);
void xfs_ialloc_setup_geometry(struct xfs_mount *mp);
xfs_ino_t xfs_ialloc_calc_rootino(struct xfs_mount *mp, int sunit);
+int xfs_ialloc_check_shrink(struct xfs_trans *tp, xfs_agnumber_t agno,
+ struct xfs_buf *agibp, xfs_agblock_t new_length);
+
#endif /* __XFS_IALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 4c5831646bd9..823a038939f8 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -20,6 +20,7 @@
#include "xfs_trace.h"
#include "xfs_trans.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
STATIC int
xfs_inobt_get_minrecs(
@@ -34,8 +35,7 @@ xfs_inobt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_ag.agbp, cur->bc_ag.agno,
- cur->bc_btnum);
+ cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum);
}
STATIC void
@@ -102,7 +102,7 @@ __xfs_inobt_alloc_block(
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
args.oinfo = XFS_RMAP_OINFO_INOBT;
- args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.agno, sbno);
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.pag->pag_agno, sbno);
args.minlen = 1;
args.maxlen = 1;
args.prod = 1;
@@ -235,7 +235,7 @@ xfs_inobt_init_ptr_from_cur(
{
struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
+ ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
ptr->s = agi->agi_root;
}
@@ -247,7 +247,7 @@ xfs_finobt_init_ptr_from_cur(
{
struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
+ ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno));
ptr->s = agi->agi_free_root;
}
@@ -427,7 +427,7 @@ static struct xfs_btree_cur *
xfs_inobt_init_common(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
+ struct xfs_perag *pag,
xfs_btnum_t btnum) /* ialloc or free ino btree */
{
struct xfs_btree_cur *cur;
@@ -449,7 +449,9 @@ xfs_inobt_init_common(
if (xfs_sb_version_hascrc(&mp->m_sb))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
- cur->bc_ag.agno = agno;
+ /* take a reference for the cursor */
+ atomic_inc(&pag->pag_ref);
+ cur->bc_ag.pag = pag;
return cur;
}
@@ -459,13 +461,13 @@ xfs_inobt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
struct xfs_agi *agi = agbp->b_addr;
- cur = xfs_inobt_init_common(mp, tp, agno, btnum);
+ cur = xfs_inobt_init_common(mp, tp, pag, btnum);
if (btnum == XFS_BTNUM_INO)
cur->bc_nlevels = be32_to_cpu(agi->agi_level);
else
@@ -479,12 +481,12 @@ struct xfs_btree_cur *
xfs_inobt_stage_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
- cur = xfs_inobt_init_common(mp, NULL, agno, btnum);
+ cur = xfs_inobt_init_common(mp, NULL, pag, btnum);
xfs_btree_stage_afakeroot(cur, afake);
return cur;
}
@@ -656,7 +658,7 @@ int
xfs_inobt_cur(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_btnum_t which,
struct xfs_btree_cur **curpp,
struct xfs_buf **agi_bpp)
@@ -667,11 +669,11 @@ xfs_inobt_cur(
ASSERT(*agi_bpp == NULL);
ASSERT(*curpp == NULL);
- error = xfs_ialloc_read_agi(mp, tp, agno, agi_bpp);
+ error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, agi_bpp);
if (error)
return error;
- cur = xfs_inobt_init_cursor(mp, tp, *agi_bpp, agno, which);
+ cur = xfs_inobt_init_cursor(mp, tp, *agi_bpp, pag, which);
*curpp = cur;
return 0;
}
@@ -680,7 +682,7 @@ static int
xfs_inobt_count_blocks(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_btnum_t btnum,
xfs_extlen_t *tree_blocks)
{
@@ -688,7 +690,7 @@ xfs_inobt_count_blocks(
struct xfs_btree_cur *cur = NULL;
int error;
- error = xfs_inobt_cur(mp, tp, agno, btnum, &cur, &agbp);
+ error = xfs_inobt_cur(mp, tp, pag, btnum, &cur, &agbp);
if (error)
return error;
@@ -704,14 +706,14 @@ static int
xfs_finobt_read_blocks(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_extlen_t *tree_blocks)
{
struct xfs_buf *agbp;
struct xfs_agi *agi;
int error;
- error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
+ error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
if (error)
return error;
@@ -728,7 +730,7 @@ int
xfs_finobt_calc_reserves(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_extlen_t *ask,
xfs_extlen_t *used)
{
@@ -739,14 +741,14 @@ xfs_finobt_calc_reserves(
return 0;
if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
- error = xfs_finobt_read_blocks(mp, tp, agno, &tree_len);
+ error = xfs_finobt_read_blocks(mp, tp, pag, &tree_len);
else
- error = xfs_inobt_count_blocks(mp, tp, agno, XFS_BTNUM_FINO,
+ error = xfs_inobt_count_blocks(mp, tp, pag, XFS_BTNUM_FINO,
&tree_len);
if (error)
return error;
- *ask += xfs_inobt_max_size(mp, agno);
+ *ask += xfs_inobt_max_size(mp, pag->pag_agno);
*used += tree_len;
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index 35bbd978c272..e530c82b2217 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -13,6 +13,7 @@
struct xfs_buf;
struct xfs_btree_cur;
struct xfs_mount;
+struct xfs_perag;
/*
* Btree block header size depends on a superblock flag.
@@ -45,11 +46,11 @@ struct xfs_mount;
(maxrecs) * sizeof(xfs_inobt_key_t) + \
((index) - 1) * sizeof(xfs_inobt_ptr_t)))
-extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *,
- struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t,
- xfs_btnum_t);
+extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *mp,
+ struct xfs_trans *tp, struct xfs_buf *agbp,
+ struct xfs_perag *pag, xfs_btnum_t btnum);
struct xfs_btree_cur *xfs_inobt_stage_cursor(struct xfs_mount *mp,
- struct xbtree_afakeroot *afake, xfs_agnumber_t agno,
+ struct xbtree_afakeroot *afake, struct xfs_perag *pag,
xfs_btnum_t btnum);
extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int);
@@ -64,11 +65,11 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
#endif /* DEBUG */
int xfs_finobt_calc_reserves(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
+ struct xfs_perag *pag, xfs_extlen_t *ask, xfs_extlen_t *used);
extern xfs_extlen_t xfs_iallocbt_calc_size(struct xfs_mount *mp,
unsigned long long len);
int xfs_inobt_cur(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_agnumber_t agno, xfs_btnum_t btnum,
+ struct xfs_perag *pag, xfs_btnum_t btnum,
struct xfs_btree_cur **curpp, struct xfs_buf **agi_bpp);
void xfs_inobt_commit_staged_btree(struct xfs_btree_cur *cur,
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index f3254a4f4cb4..84ea2e0af9f0 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -592,27 +592,31 @@ xfs_inode_validate_extsize(
/*
* This comment describes a historic gap in this verifier function.
*
- * On older kernels, the extent size hint verifier doesn't check that
- * the extent size hint is an integer multiple of the realtime extent
- * size on a directory with both RTINHERIT and EXTSZINHERIT flags set.
- * The verifier has always enforced the alignment rule for regular
- * files with the REALTIME flag set.
+ * For a directory with both RTINHERIT and EXTSZINHERIT flags set, this
+ * function has never checked that the extent size hint is an integer
+ * multiple of the realtime extent size. Since we allow users to set
+ * this combination on non-rt filesystems /and/ to change the rt
+ * extent size when adding a rt device to a filesystem, the net effect
+ * is that users can configure a filesystem anticipating one rt
+ * geometry and change their minds later. Directories do not use the
+ * extent size hint, so this is harmless for them.
*
* If a directory with a misaligned extent size hint is allowed to
* propagate that hint into a new regular realtime file, the result
* is that the inode cluster buffer verifier will trigger a corruption
- * shutdown the next time it is run.
+ * shutdown the next time it is run, because the verifier has always
+ * enforced the alignment rule for regular files.
*
- * Unfortunately, there could be filesystems with these misconfigured
- * directories in the wild, so we cannot add a check to this verifier
- * at this time because that will result a new source of directory
- * corruption errors when reading an existing filesystem. Instead, we
- * permit the misconfiguration to pass through the verifiers so that
- * callers of this function can correct and mitigate externally.
+ * Because we allow administrators to set a new rt extent size when
+ * adding a rt section, we cannot add a check to this verifier because
+ * that will result a new source of directory corruption errors when
+ * reading an existing filesystem. Instead, we rely on callers to
+ * decide when alignment checks are appropriate, and fix things up as
+ * needed.
*/
if (rt_flag)
- blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
+ blocksize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
else
blocksize_bytes = mp->m_sb.sb_blocksize;
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 3e15ea29fb8d..2c5bcbc19264 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -34,9 +34,6 @@ typedef uint32_t xlog_tid_t;
#define XLOG_MIN_RECORD_BSHIFT 14 /* 16384 == 1 << 14 */
#define XLOG_BIG_RECORD_BSHIFT 15 /* 32k == 1 << 15 */
#define XLOG_MAX_RECORD_BSHIFT 18 /* 256k == 1 << 18 */
-#define XLOG_BTOLSUNIT(log, b) (((b)+(log)->l_mp->m_sb.sb_logsunit-1) / \
- (log)->l_mp->m_sb.sb_logsunit)
-#define XLOG_LSUNITTOB(log, su) ((su) * (log)->l_mp->m_sb.sb_logsunit)
#define XLOG_HEADER_SIZE 512
@@ -414,7 +411,16 @@ struct xfs_log_dinode {
/* start of the extended dinode, writable fields */
uint32_t di_crc; /* CRC of the inode */
uint64_t di_changecount; /* number of attribute changes */
- xfs_lsn_t di_lsn; /* flush sequence */
+
+ /*
+ * The LSN we write to this field during formatting is not a reflection
+ * of the current on-disk LSN. It should never be used for recovery
+ * sequencing, nor should it be recovered into the on-disk inode at all.
+ * See xlog_recover_inode_commit_pass2() and xfs_log_dinode_to_disk()
+ * for details.
+ */
+ xfs_lsn_t di_lsn;
+
uint64_t di_flags2; /* more random flags */
uint32_t di_cowextsize; /* basic cow extent size for file */
uint8_t di_pad2[12]; /* more padding for future expansion */
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 2037b9f23069..860a0c9801ba 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -22,6 +22,7 @@
#include "xfs_bit.h"
#include "xfs_refcount.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
/* Allowable refcount adjustment amounts. */
enum xfs_refc_adjust_op {
@@ -46,7 +47,7 @@ xfs_refcount_lookup_le(
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.agno, bno,
+ trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno,
XFS_LOOKUP_LE);
cur->bc_rec.rc.rc_startblock = bno;
cur->bc_rec.rc.rc_blockcount = 0;
@@ -63,7 +64,7 @@ xfs_refcount_lookup_ge(
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.agno, bno,
+ trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno,
XFS_LOOKUP_GE);
cur->bc_rec.rc.rc_startblock = bno;
cur->bc_rec.rc.rc_blockcount = 0;
@@ -80,7 +81,7 @@ xfs_refcount_lookup_eq(
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.agno, bno,
+ trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno,
XFS_LOOKUP_LE);
cur->bc_rec.rc.rc_startblock = bno;
cur->bc_rec.rc.rc_blockcount = 0;
@@ -108,7 +109,7 @@ xfs_refcount_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_ag.agno;
+ xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
union xfs_btree_rec *rec;
int error;
xfs_agblock_t realstart;
@@ -119,7 +120,7 @@ xfs_refcount_get_rec(
xfs_refcount_btrec_to_irec(rec, irec);
- agno = cur->bc_ag.agno;
+ agno = cur->bc_ag.pag->pag_agno;
if (irec->rc_blockcount == 0 || irec->rc_blockcount > MAXREFCEXTLEN)
goto out_bad_rec;
@@ -144,7 +145,7 @@ xfs_refcount_get_rec(
if (irec->rc_refcount == 0 || irec->rc_refcount > MAXREFCOUNT)
goto out_bad_rec;
- trace_xfs_refcount_get(cur->bc_mp, cur->bc_ag.agno, irec);
+ trace_xfs_refcount_get(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
return 0;
out_bad_rec:
@@ -169,14 +170,14 @@ xfs_refcount_update(
union xfs_btree_rec rec;
int error;
- trace_xfs_refcount_update(cur->bc_mp, cur->bc_ag.agno, irec);
+ trace_xfs_refcount_update(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
rec.refc.rc_startblock = cpu_to_be32(irec->rc_startblock);
rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
error = xfs_btree_update(cur, &rec);
if (error)
trace_xfs_refcount_update_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -193,7 +194,7 @@ xfs_refcount_insert(
{
int error;
- trace_xfs_refcount_insert(cur->bc_mp, cur->bc_ag.agno, irec);
+ trace_xfs_refcount_insert(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
cur->bc_rec.rc.rc_refcount = irec->rc_refcount;
@@ -208,7 +209,7 @@ xfs_refcount_insert(
out_error:
if (error)
trace_xfs_refcount_insert_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -234,7 +235,7 @@ xfs_refcount_delete(
error = -EFSCORRUPTED;
goto out_error;
}
- trace_xfs_refcount_delete(cur->bc_mp, cur->bc_ag.agno, &irec);
+ trace_xfs_refcount_delete(cur->bc_mp, cur->bc_ag.pag->pag_agno, &irec);
error = xfs_btree_delete(cur, i);
if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
error = -EFSCORRUPTED;
@@ -246,7 +247,7 @@ xfs_refcount_delete(
out_error:
if (error)
trace_xfs_refcount_delete_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -366,7 +367,7 @@ xfs_refcount_split_extent(
return 0;
*shape_changed = true;
- trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
&rcext, agbno);
/* Establish the right extent. */
@@ -391,7 +392,7 @@ xfs_refcount_split_extent(
out_error:
trace_xfs_refcount_split_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -411,7 +412,7 @@ xfs_refcount_merge_center_extents(
int found_rec;
trace_xfs_refcount_merge_center_extents(cur->bc_mp,
- cur->bc_ag.agno, left, center, right);
+ cur->bc_ag.pag->pag_agno, left, center, right);
/*
* Make sure the center and right extents are not in the btree.
@@ -468,7 +469,7 @@ xfs_refcount_merge_center_extents(
out_error:
trace_xfs_refcount_merge_center_extents_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -487,7 +488,7 @@ xfs_refcount_merge_left_extent(
int found_rec;
trace_xfs_refcount_merge_left_extent(cur->bc_mp,
- cur->bc_ag.agno, left, cleft);
+ cur->bc_ag.pag->pag_agno, left, cleft);
/* If the extent at agbno (cleft) wasn't synthesized, remove it. */
if (cleft->rc_refcount > 1) {
@@ -530,7 +531,7 @@ xfs_refcount_merge_left_extent(
out_error:
trace_xfs_refcount_merge_left_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -548,7 +549,7 @@ xfs_refcount_merge_right_extent(
int found_rec;
trace_xfs_refcount_merge_right_extent(cur->bc_mp,
- cur->bc_ag.agno, cright, right);
+ cur->bc_ag.pag->pag_agno, cright, right);
/*
* If the extent ending at agbno+aglen (cright) wasn't synthesized,
@@ -594,7 +595,7 @@ xfs_refcount_merge_right_extent(
out_error:
trace_xfs_refcount_merge_right_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -679,13 +680,13 @@ xfs_refcount_find_left_extents(
cleft->rc_blockcount = aglen;
cleft->rc_refcount = 1;
}
- trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
left, cleft, agbno);
return error;
out_error:
trace_xfs_refcount_find_left_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -768,13 +769,13 @@ xfs_refcount_find_right_extents(
cright->rc_blockcount = aglen;
cright->rc_refcount = 1;
}
- trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
cright, right, agbno + aglen);
return error;
out_error:
trace_xfs_refcount_find_right_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -952,7 +953,7 @@ xfs_refcount_adjust_extents(
ext.rc_startblock - *agbno);
tmp.rc_refcount = 1 + adj;
trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_ag.agno, &tmp);
+ cur->bc_ag.pag->pag_agno, &tmp);
/*
* Either cover the hole (increment) or
@@ -971,7 +972,7 @@ xfs_refcount_adjust_extents(
cur->bc_ag.refc.nr_ops++;
} else {
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
- cur->bc_ag.agno,
+ cur->bc_ag.pag->pag_agno,
tmp.rc_startblock);
xfs_bmap_add_free(cur->bc_tp, fsbno,
tmp.rc_blockcount, oinfo);
@@ -998,7 +999,7 @@ xfs_refcount_adjust_extents(
goto skip;
ext.rc_refcount += adj;
trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_ag.agno, &ext);
+ cur->bc_ag.pag->pag_agno, &ext);
if (ext.rc_refcount > 1) {
error = xfs_refcount_update(cur, &ext);
if (error)
@@ -1016,7 +1017,7 @@ xfs_refcount_adjust_extents(
goto advloop;
} else {
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
- cur->bc_ag.agno,
+ cur->bc_ag.pag->pag_agno,
ext.rc_startblock);
xfs_bmap_add_free(cur->bc_tp, fsbno, ext.rc_blockcount,
oinfo);
@@ -1035,7 +1036,7 @@ advloop:
return error;
out_error:
trace_xfs_refcount_modify_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -1057,10 +1058,10 @@ xfs_refcount_adjust(
*new_agbno = agbno;
*new_aglen = aglen;
if (adj == XFS_REFCOUNT_ADJUST_INCREASE)
- trace_xfs_refcount_increase(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_increase(cur->bc_mp, cur->bc_ag.pag->pag_agno,
agbno, aglen);
else
- trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_ag.pag->pag_agno,
agbno, aglen);
/*
@@ -1099,7 +1100,7 @@ xfs_refcount_adjust(
return 0;
out_error:
- trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_ag.pag->pag_agno,
error, _RET_IP_);
return error;
}
@@ -1142,30 +1143,30 @@ xfs_refcount_finish_one(
struct xfs_btree_cur *rcur;
struct xfs_buf *agbp = NULL;
int error = 0;
- xfs_agnumber_t agno;
xfs_agblock_t bno;
xfs_agblock_t new_agbno;
unsigned long nr_ops = 0;
int shape_changes = 0;
+ struct xfs_perag *pag;
- agno = XFS_FSB_TO_AGNO(mp, startblock);
- ASSERT(agno != NULLAGNUMBER);
+ pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, startblock));
bno = XFS_FSB_TO_AGBNO(mp, startblock);
trace_xfs_refcount_deferred(mp, XFS_FSB_TO_AGNO(mp, startblock),
type, XFS_FSB_TO_AGBNO(mp, startblock),
blockcount);
- if (XFS_TEST_ERROR(false, mp,
- XFS_ERRTAG_REFCOUNT_FINISH_ONE))
- return -EIO;
+ if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE)) {
+ error = -EIO;
+ goto out_drop;
+ }
/*
* If we haven't gotten a cursor or the cursor AG doesn't match
* the startblock, get one now.
*/
rcur = *pcur;
- if (rcur != NULL && rcur->bc_ag.agno != agno) {
+ if (rcur != NULL && rcur->bc_ag.pag != pag) {
nr_ops = rcur->bc_ag.refc.nr_ops;
shape_changes = rcur->bc_ag.refc.shape_changes;
xfs_refcount_finish_one_cleanup(tp, rcur, 0);
@@ -1173,12 +1174,12 @@ xfs_refcount_finish_one(
*pcur = NULL;
}
if (rcur == NULL) {
- error = xfs_alloc_read_agf(tp->t_mountp, tp, agno,
+ error = xfs_alloc_read_agf(tp->t_mountp, tp, pag->pag_agno,
XFS_ALLOC_FLAG_FREEING, &agbp);
if (error)
- return error;
+ goto out_drop;
- rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
+ rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
rcur->bc_ag.refc.nr_ops = nr_ops;
rcur->bc_ag.refc.shape_changes = shape_changes;
}
@@ -1188,12 +1189,12 @@ xfs_refcount_finish_one(
case XFS_REFCOUNT_INCREASE:
error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
new_len, XFS_REFCOUNT_ADJUST_INCREASE, NULL);
- *new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
+ *new_fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno);
break;
case XFS_REFCOUNT_DECREASE:
error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
new_len, XFS_REFCOUNT_ADJUST_DECREASE, NULL);
- *new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
+ *new_fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno);
break;
case XFS_REFCOUNT_ALLOC_COW:
*new_fsb = startblock + blockcount;
@@ -1210,8 +1211,10 @@ xfs_refcount_finish_one(
error = -EFSCORRUPTED;
}
if (!error && *new_len > 0)
- trace_xfs_refcount_finish_one_leftover(mp, agno, type,
+ trace_xfs_refcount_finish_one_leftover(mp, pag->pag_agno, type,
bno, blockcount, new_agbno, *new_len);
+out_drop:
+ xfs_perag_put(pag);
return error;
}
@@ -1294,7 +1297,7 @@ xfs_refcount_find_shared(
int have;
int error;
- trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_ag.pag->pag_agno,
agbno, aglen);
/* By default, skip the whole range */
@@ -1374,12 +1377,12 @@ xfs_refcount_find_shared(
done:
trace_xfs_refcount_find_shared_result(cur->bc_mp,
- cur->bc_ag.agno, *fbno, *flen);
+ cur->bc_ag.pag->pag_agno, *fbno, *flen);
out_error:
if (error)
trace_xfs_refcount_find_shared_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -1476,7 +1479,7 @@ xfs_refcount_adjust_cow_extents(
tmp.rc_blockcount = aglen;
tmp.rc_refcount = 1;
trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_ag.agno, &tmp);
+ cur->bc_ag.pag->pag_agno, &tmp);
error = xfs_refcount_insert(cur, &tmp,
&found_tmp);
@@ -1504,7 +1507,7 @@ xfs_refcount_adjust_cow_extents(
ext.rc_refcount = 0;
trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_ag.agno, &ext);
+ cur->bc_ag.pag->pag_agno, &ext);
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
@@ -1520,7 +1523,7 @@ xfs_refcount_adjust_cow_extents(
return error;
out_error:
trace_xfs_refcount_modify_extent_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -1566,7 +1569,7 @@ xfs_refcount_adjust_cow(
return 0;
out_error:
- trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_ag.pag->pag_agno,
error, _RET_IP_);
return error;
}
@@ -1580,7 +1583,7 @@ __xfs_refcount_cow_alloc(
xfs_agblock_t agbno,
xfs_extlen_t aglen)
{
- trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_ag.agno,
+ trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_ag.pag->pag_agno,
agbno, aglen);
/* Add refcount btree reservation */
@@ -1597,7 +1600,7 @@ __xfs_refcount_cow_free(
xfs_agblock_t agbno,
xfs_extlen_t aglen)
{
- trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_ag.agno,
+ trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_ag.pag->pag_agno,
agbno, aglen);
/* Remove refcount btree reservation */
@@ -1672,7 +1675,7 @@ xfs_refcount_recover_extent(
int
xfs_refcount_recover_cow_leftovers(
struct xfs_mount *mp,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_trans *tp;
struct xfs_btree_cur *cur;
@@ -1704,10 +1707,10 @@ xfs_refcount_recover_cow_leftovers(
if (error)
return error;
- error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
if (error)
goto out_trans;
- cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
+ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
/* Find all the leftover CoW staging extents. */
memset(&low, 0, sizeof(low));
@@ -1729,11 +1732,12 @@ xfs_refcount_recover_cow_leftovers(
if (error)
goto out_free;
- trace_xfs_refcount_recover_extent(mp, agno, &rr->rr_rrec);
+ trace_xfs_refcount_recover_extent(mp, pag->pag_agno,
+ &rr->rr_rrec);
/* Free the orphan record */
agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
- fsb = XFS_AGB_TO_FSB(mp, agno, agbno);
+ fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, agbno);
xfs_refcount_free_cow_extent(tp, fsb,
rr->rr_rrec.rc_blockcount);
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
index 209795539c8d..9f6e9aae4da0 100644
--- a/fs/xfs/libxfs/xfs_refcount.h
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -6,6 +6,13 @@
#ifndef __XFS_REFCOUNT_H__
#define __XFS_REFCOUNT_H__
+struct xfs_trans;
+struct xfs_mount;
+struct xfs_perag;
+struct xfs_btree_cur;
+struct xfs_bmbt_irec;
+struct xfs_refcount_irec;
+
extern int xfs_refcount_lookup_le(struct xfs_btree_cur *cur,
xfs_agblock_t bno, int *stat);
extern int xfs_refcount_lookup_ge(struct xfs_btree_cur *cur,
@@ -50,7 +57,7 @@ void xfs_refcount_alloc_cow_extent(struct xfs_trans *tp, xfs_fsblock_t fsb,
void xfs_refcount_free_cow_extent(struct xfs_trans *tp, xfs_fsblock_t fsb,
xfs_extlen_t len);
extern int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp,
- xfs_agnumber_t agno);
+ struct xfs_perag *pag);
/*
* While we're adjusting the refcounts records of an extent, we have
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index a6ac60ae9421..92d336c17e83 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -9,7 +9,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "xfs_btree_staging.h"
@@ -20,13 +19,14 @@
#include "xfs_trans.h"
#include "xfs_bit.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
static struct xfs_btree_cur *
xfs_refcountbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_ag.agbp, cur->bc_ag.agno);
+ cur->bc_ag.agbp, cur->bc_ag.pag);
}
STATIC void
@@ -65,7 +65,7 @@ xfs_refcountbt_alloc_block(
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
args.type = XFS_ALLOCTYPE_NEAR_BNO;
- args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno,
+ args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno,
xfs_refc_block(args.mp));
args.oinfo = XFS_RMAP_OINFO_REFC;
args.minlen = args.maxlen = args.prod = 1;
@@ -74,13 +74,13 @@ xfs_refcountbt_alloc_block(
error = xfs_alloc_vextent(&args);
if (error)
goto out_error;
- trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
args.agbno, 1);
if (args.fsbno == NULLFSBLOCK) {
*stat = 0;
return 0;
}
- ASSERT(args.agno == cur->bc_ag.agno);
+ ASSERT(args.agno == cur->bc_ag.pag->pag_agno);
ASSERT(args.len == 1);
new->s = cpu_to_be32(args.agbno);
@@ -105,7 +105,7 @@ xfs_refcountbt_free_block(
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
int error;
- trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
be32_add_cpu(&agf->agf_refcount_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
@@ -170,7 +170,7 @@ xfs_refcountbt_init_ptr_from_cur(
{
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_refcount_root;
}
@@ -316,12 +316,11 @@ static struct xfs_btree_cur *
xfs_refcountbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
- ASSERT(agno != NULLAGNUMBER);
- ASSERT(agno < mp->m_sb.sb_agcount);
+ ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
cur->bc_tp = tp;
@@ -330,9 +329,12 @@ xfs_refcountbt_init_common(
cur->bc_blocklog = mp->m_sb.sb_blocklog;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
- cur->bc_ag.agno = agno;
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
+ /* take a reference for the cursor */
+ atomic_inc(&pag->pag_ref);
+ cur->bc_ag.pag = pag;
+
cur->bc_ag.refc.nr_ops = 0;
cur->bc_ag.refc.shape_changes = 0;
cur->bc_ops = &xfs_refcountbt_ops;
@@ -345,12 +347,12 @@ xfs_refcountbt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_agf *agf = agbp->b_addr;
struct xfs_btree_cur *cur;
- cur = xfs_refcountbt_init_common(mp, tp, agno);
+ cur = xfs_refcountbt_init_common(mp, tp, pag);
cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
cur->bc_ag.agbp = agbp;
return cur;
@@ -361,11 +363,11 @@ struct xfs_btree_cur *
xfs_refcountbt_stage_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
- cur = xfs_refcountbt_init_common(mp, NULL, agno);
+ cur = xfs_refcountbt_init_common(mp, NULL, pag);
xfs_btree_stage_afakeroot(cur, afake);
return cur;
}
@@ -450,7 +452,7 @@ int
xfs_refcountbt_calc_reserves(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_extlen_t *ask,
xfs_extlen_t *used)
{
@@ -463,8 +465,7 @@ xfs_refcountbt_calc_reserves(
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
-
- error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
if (error)
return error;
@@ -479,7 +480,7 @@ xfs_refcountbt_calc_reserves(
* expansion. We therefore can pretend the space isn't there.
*/
if (mp->m_sb.sb_logstart &&
- XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
+ XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == pag->pag_agno)
agblocks -= mp->m_sb.sb_logblocks;
*ask += xfs_refcountbt_max_size(mp, agblocks);
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.h b/fs/xfs/libxfs/xfs_refcount_btree.h
index 69dc515db671..bd9ed9e1e41f 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.h
+++ b/fs/xfs/libxfs/xfs_refcount_btree.h
@@ -13,6 +13,7 @@
struct xfs_buf;
struct xfs_btree_cur;
struct xfs_mount;
+struct xfs_perag;
struct xbtree_afakeroot;
/*
@@ -46,9 +47,9 @@ struct xbtree_afakeroot;
extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *agbp,
- xfs_agnumber_t agno);
+ struct xfs_perag *pag);
struct xfs_btree_cur *xfs_refcountbt_stage_cursor(struct xfs_mount *mp,
- struct xbtree_afakeroot *afake, xfs_agnumber_t agno);
+ struct xbtree_afakeroot *afake, struct xfs_perag *pag);
extern int xfs_refcountbt_maxrecs(int blocklen, bool leaf);
extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
@@ -58,7 +59,7 @@ extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp,
xfs_agblock_t agblocks);
extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
- struct xfs_trans *tp, xfs_agnumber_t agno, xfs_extlen_t *ask,
+ struct xfs_trans *tp, struct xfs_perag *pag, xfs_extlen_t *ask,
xfs_extlen_t *used);
void xfs_refcountbt_commit_staged_btree(struct xfs_btree_cur *cur,
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 10e0cf9949a2..d1dfad0204e3 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -11,6 +11,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_sb.h"
#include "xfs_defer.h"
#include "xfs_btree.h"
#include "xfs_trans.h"
@@ -21,6 +22,7 @@
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_inode.h"
+#include "xfs_ag.h"
/*
* Lookup the first record less than or equal to [bno, len, owner, offset]
@@ -79,7 +81,7 @@ xfs_rmap_update(
union xfs_btree_rec rec;
int error;
- trace_xfs_rmap_update(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_rmap_update(cur->bc_mp, cur->bc_ag.pag->pag_agno,
irec->rm_startblock, irec->rm_blockcount,
irec->rm_owner, irec->rm_offset, irec->rm_flags);
@@ -91,7 +93,7 @@ xfs_rmap_update(
error = xfs_btree_update(cur, &rec);
if (error)
trace_xfs_rmap_update_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -107,7 +109,7 @@ xfs_rmap_insert(
int i;
int error;
- trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_ag.agno, agbno,
+ trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_ag.pag->pag_agno, agbno,
len, owner, offset, flags);
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
@@ -133,7 +135,7 @@ xfs_rmap_insert(
done:
if (error)
trace_xfs_rmap_insert_error(rcur->bc_mp,
- rcur->bc_ag.agno, error, _RET_IP_);
+ rcur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -149,7 +151,7 @@ xfs_rmap_delete(
int i;
int error;
- trace_xfs_rmap_delete(rcur->bc_mp, rcur->bc_ag.agno, agbno,
+ trace_xfs_rmap_delete(rcur->bc_mp, rcur->bc_ag.pag->pag_agno, agbno,
len, owner, offset, flags);
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
@@ -170,7 +172,7 @@ xfs_rmap_delete(
done:
if (error)
trace_xfs_rmap_delete_error(rcur->bc_mp,
- rcur->bc_ag.agno, error, _RET_IP_);
+ rcur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -197,7 +199,7 @@ xfs_rmap_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_ag.agno;
+ xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
union xfs_btree_rec *rec;
int error;
@@ -260,7 +262,7 @@ xfs_rmap_find_left_neighbor_helper(
struct xfs_find_left_neighbor_info *info = priv;
trace_xfs_rmap_find_left_neighbor_candidate(cur->bc_mp,
- cur->bc_ag.agno, rec->rm_startblock,
+ cur->bc_ag.pag->pag_agno, rec->rm_startblock,
rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
rec->rm_flags);
@@ -312,7 +314,7 @@ xfs_rmap_find_left_neighbor(
info.stat = stat;
trace_xfs_rmap_find_left_neighbor_query(cur->bc_mp,
- cur->bc_ag.agno, bno, 0, owner, offset, flags);
+ cur->bc_ag.pag->pag_agno, bno, 0, owner, offset, flags);
error = xfs_rmap_query_range(cur, &info.high, &info.high,
xfs_rmap_find_left_neighbor_helper, &info);
@@ -320,7 +322,7 @@ xfs_rmap_find_left_neighbor(
error = 0;
if (*stat)
trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
- cur->bc_ag.agno, irec->rm_startblock,
+ cur->bc_ag.pag->pag_agno, irec->rm_startblock,
irec->rm_blockcount, irec->rm_owner,
irec->rm_offset, irec->rm_flags);
return error;
@@ -336,7 +338,7 @@ xfs_rmap_lookup_le_range_helper(
struct xfs_find_left_neighbor_info *info = priv;
trace_xfs_rmap_lookup_le_range_candidate(cur->bc_mp,
- cur->bc_ag.agno, rec->rm_startblock,
+ cur->bc_ag.pag->pag_agno, rec->rm_startblock,
rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
rec->rm_flags);
@@ -385,14 +387,14 @@ xfs_rmap_lookup_le_range(
info.stat = stat;
trace_xfs_rmap_lookup_le_range(cur->bc_mp,
- cur->bc_ag.agno, bno, 0, owner, offset, flags);
+ cur->bc_ag.pag->pag_agno, bno, 0, owner, offset, flags);
error = xfs_rmap_query_range(cur, &info.high, &info.high,
xfs_rmap_lookup_le_range_helper, &info);
if (error == -ECANCELED)
error = 0;
if (*stat)
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_ag.agno, irec->rm_startblock,
+ cur->bc_ag.pag->pag_agno, irec->rm_startblock,
irec->rm_blockcount, irec->rm_owner,
irec->rm_offset, irec->rm_flags);
return error;
@@ -498,7 +500,7 @@ xfs_rmap_unmap(
(flags & XFS_RMAP_BMBT_BLOCK);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_unmap(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_unmap(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
/*
@@ -522,7 +524,7 @@ xfs_rmap_unmap(
goto out_error;
}
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_ag.agno, ltrec.rm_startblock,
+ cur->bc_ag.pag->pag_agno, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags);
ltoff = ltrec.rm_offset;
@@ -588,7 +590,7 @@ xfs_rmap_unmap(
if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
/* exact match, simply remove the record from rmap tree */
- trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
ltrec.rm_startblock, ltrec.rm_blockcount,
ltrec.rm_owner, ltrec.rm_offset,
ltrec.rm_flags);
@@ -666,7 +668,7 @@ xfs_rmap_unmap(
else
cur->bc_rec.r.rm_offset = offset + len;
cur->bc_rec.r.rm_flags = flags;
- trace_xfs_rmap_insert(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno,
cur->bc_rec.r.rm_startblock,
cur->bc_rec.r.rm_blockcount,
cur->bc_rec.r.rm_owner,
@@ -678,11 +680,11 @@ xfs_rmap_unmap(
}
out_done:
- trace_xfs_rmap_unmap_done(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_unmap_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
out_error:
if (error)
- trace_xfs_rmap_unmap_error(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_unmap_error(mp, cur->bc_ag.pag->pag_agno,
error, _RET_IP_);
return error;
}
@@ -694,7 +696,7 @@ int
xfs_rmap_free(
struct xfs_trans *tp,
struct xfs_buf *agbp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agblock_t bno,
xfs_extlen_t len,
const struct xfs_owner_info *oinfo)
@@ -706,7 +708,7 @@ xfs_rmap_free(
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
return 0;
- cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
+ cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
@@ -773,7 +775,7 @@ xfs_rmap_map(
(flags & XFS_RMAP_BMBT_BLOCK);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_map(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_map(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
@@ -795,7 +797,7 @@ xfs_rmap_map(
goto out_error;
}
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_ag.agno, ltrec.rm_startblock,
+ cur->bc_ag.pag->pag_agno, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags);
@@ -831,7 +833,7 @@ xfs_rmap_map(
goto out_error;
}
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_ag.agno, gtrec.rm_startblock,
+ cur->bc_ag.pag->pag_agno, gtrec.rm_startblock,
gtrec.rm_blockcount, gtrec.rm_owner,
gtrec.rm_offset, gtrec.rm_flags);
if (!xfs_rmap_is_mergeable(&gtrec, owner, flags))
@@ -870,7 +872,7 @@ xfs_rmap_map(
* result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
*/
ltrec.rm_blockcount += gtrec.rm_blockcount;
- trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
gtrec.rm_startblock,
gtrec.rm_blockcount,
gtrec.rm_owner,
@@ -921,7 +923,7 @@ xfs_rmap_map(
cur->bc_rec.r.rm_owner = owner;
cur->bc_rec.r.rm_offset = offset;
cur->bc_rec.r.rm_flags = flags;
- trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno, len,
owner, offset, flags);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -932,11 +934,11 @@ xfs_rmap_map(
}
}
- trace_xfs_rmap_map_done(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_map_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
out_error:
if (error)
- trace_xfs_rmap_map_error(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_map_error(mp, cur->bc_ag.pag->pag_agno,
error, _RET_IP_);
return error;
}
@@ -948,7 +950,7 @@ int
xfs_rmap_alloc(
struct xfs_trans *tp,
struct xfs_buf *agbp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agblock_t bno,
xfs_extlen_t len,
const struct xfs_owner_info *oinfo)
@@ -960,7 +962,7 @@ xfs_rmap_alloc(
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
return 0;
- cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
+ cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
error = xfs_rmap_map(cur, bno, len, false, oinfo);
xfs_btree_del_cursor(cur, error);
@@ -1010,7 +1012,7 @@ xfs_rmap_convert(
(flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
new_endoff = offset + len;
- trace_xfs_rmap_convert(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_convert(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
/*
@@ -1034,7 +1036,7 @@ xfs_rmap_convert(
goto done;
}
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_ag.agno, PREV.rm_startblock,
+ cur->bc_ag.pag->pag_agno, PREV.rm_startblock,
PREV.rm_blockcount, PREV.rm_owner,
PREV.rm_offset, PREV.rm_flags);
@@ -1076,7 +1078,7 @@ xfs_rmap_convert(
goto done;
}
trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
- cur->bc_ag.agno, LEFT.rm_startblock,
+ cur->bc_ag.pag->pag_agno, LEFT.rm_startblock,
LEFT.rm_blockcount, LEFT.rm_owner,
LEFT.rm_offset, LEFT.rm_flags);
if (LEFT.rm_startblock + LEFT.rm_blockcount == bno &&
@@ -1114,7 +1116,7 @@ xfs_rmap_convert(
goto done;
}
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_ag.agno, RIGHT.rm_startblock,
+ cur->bc_ag.pag->pag_agno, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
RIGHT.rm_offset, RIGHT.rm_flags);
if (bno + len == RIGHT.rm_startblock &&
@@ -1132,7 +1134,7 @@ xfs_rmap_convert(
RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
state &= ~RMAP_RIGHT_CONTIG;
- trace_xfs_rmap_convert_state(mp, cur->bc_ag.agno, state,
+ trace_xfs_rmap_convert_state(mp, cur->bc_ag.pag->pag_agno, state,
_RET_IP_);
/* reset the cursor back to PREV */
@@ -1162,7 +1164,7 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
RIGHT.rm_startblock, RIGHT.rm_blockcount,
RIGHT.rm_owner, RIGHT.rm_offset,
RIGHT.rm_flags);
@@ -1180,7 +1182,7 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
PREV.rm_startblock, PREV.rm_blockcount,
PREV.rm_owner, PREV.rm_offset,
PREV.rm_flags);
@@ -1210,7 +1212,7 @@ xfs_rmap_convert(
* Setting all of a previous oldext extent to newext.
* The left neighbor is contiguous, the right is not.
*/
- trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
PREV.rm_startblock, PREV.rm_blockcount,
PREV.rm_owner, PREV.rm_offset,
PREV.rm_flags);
@@ -1247,7 +1249,7 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.pag->pag_agno,
RIGHT.rm_startblock, RIGHT.rm_blockcount,
RIGHT.rm_owner, RIGHT.rm_offset,
RIGHT.rm_flags);
@@ -1326,7 +1328,7 @@ xfs_rmap_convert(
NEW.rm_blockcount = len;
NEW.rm_flags = newext;
cur->bc_rec.r = NEW;
- trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno,
len, owner, offset, newext);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -1383,7 +1385,7 @@ xfs_rmap_convert(
NEW.rm_blockcount = len;
NEW.rm_flags = newext;
cur->bc_rec.r = NEW;
- trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno,
len, owner, offset, newext);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -1414,7 +1416,7 @@ xfs_rmap_convert(
NEW = PREV;
NEW.rm_blockcount = offset - PREV.rm_offset;
cur->bc_rec.r = NEW;
- trace_xfs_rmap_insert(mp, cur->bc_ag.agno,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno,
NEW.rm_startblock, NEW.rm_blockcount,
NEW.rm_owner, NEW.rm_offset,
NEW.rm_flags);
@@ -1441,7 +1443,7 @@ xfs_rmap_convert(
/* new middle extent - newext */
cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
cur->bc_rec.r.rm_flags |= newext;
- trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.pag->pag_agno, bno, len,
owner, offset, newext);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -1465,12 +1467,12 @@ xfs_rmap_convert(
ASSERT(0);
}
- trace_xfs_rmap_convert_done(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_convert_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
done:
if (error)
trace_xfs_rmap_convert_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -1506,7 +1508,7 @@ xfs_rmap_convert_shared(
(flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
new_endoff = offset + len;
- trace_xfs_rmap_convert(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_convert(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
/*
@@ -1573,7 +1575,7 @@ xfs_rmap_convert_shared(
goto done;
}
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_ag.agno, RIGHT.rm_startblock,
+ cur->bc_ag.pag->pag_agno, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
RIGHT.rm_offset, RIGHT.rm_flags);
if (xfs_rmap_is_mergeable(&RIGHT, owner, newext))
@@ -1589,7 +1591,7 @@ xfs_rmap_convert_shared(
RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
state &= ~RMAP_RIGHT_CONTIG;
- trace_xfs_rmap_convert_state(mp, cur->bc_ag.agno, state,
+ trace_xfs_rmap_convert_state(mp, cur->bc_ag.pag->pag_agno, state,
_RET_IP_);
/*
* Switch out based on the FILLING and CONTIG state bits.
@@ -1880,12 +1882,12 @@ xfs_rmap_convert_shared(
ASSERT(0);
}
- trace_xfs_rmap_convert_done(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_convert_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
done:
if (error)
trace_xfs_rmap_convert_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -1923,7 +1925,7 @@ xfs_rmap_unmap_shared(
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_unmap(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_unmap(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
/*
@@ -2072,12 +2074,12 @@ xfs_rmap_unmap_shared(
goto out_error;
}
- trace_xfs_rmap_unmap_done(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_unmap_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
out_error:
if (error)
trace_xfs_rmap_unmap_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -2112,7 +2114,7 @@ xfs_rmap_map_shared(
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_map(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_map(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
/* Is there a left record that abuts our range? */
@@ -2138,7 +2140,7 @@ xfs_rmap_map_shared(
goto out_error;
}
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_ag.agno, gtrec.rm_startblock,
+ cur->bc_ag.pag->pag_agno, gtrec.rm_startblock,
gtrec.rm_blockcount, gtrec.rm_owner,
gtrec.rm_offset, gtrec.rm_flags);
@@ -2231,12 +2233,12 @@ xfs_rmap_map_shared(
goto out_error;
}
- trace_xfs_rmap_map_done(mp, cur->bc_ag.agno, bno, len,
+ trace_xfs_rmap_map_done(mp, cur->bc_ag.pag->pag_agno, bno, len,
unwritten, oinfo);
out_error:
if (error)
trace_xfs_rmap_map_error(cur->bc_mp,
- cur->bc_ag.agno, error, _RET_IP_);
+ cur->bc_ag.pag->pag_agno, error, _RET_IP_);
return error;
}
@@ -2362,31 +2364,32 @@ xfs_rmap_finish_one(
struct xfs_btree_cur **pcur)
{
struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
struct xfs_btree_cur *rcur;
struct xfs_buf *agbp = NULL;
int error = 0;
- xfs_agnumber_t agno;
struct xfs_owner_info oinfo;
xfs_agblock_t bno;
bool unwritten;
- agno = XFS_FSB_TO_AGNO(mp, startblock);
- ASSERT(agno != NULLAGNUMBER);
+ pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, startblock));
bno = XFS_FSB_TO_AGBNO(mp, startblock);
- trace_xfs_rmap_deferred(mp, agno, type, bno, owner, whichfork,
+ trace_xfs_rmap_deferred(mp, pag->pag_agno, type, bno, owner, whichfork,
startoff, blockcount, state);
- if (XFS_TEST_ERROR(false, mp,
- XFS_ERRTAG_RMAP_FINISH_ONE))
- return -EIO;
+ if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_RMAP_FINISH_ONE)) {
+ error = -EIO;
+ goto out_drop;
+ }
+
/*
* If we haven't gotten a cursor or the cursor AG doesn't match
* the startblock, get one now.
*/
rcur = *pcur;
- if (rcur != NULL && rcur->bc_ag.agno != agno) {
+ if (rcur != NULL && rcur->bc_ag.pag != pag) {
xfs_rmap_finish_one_cleanup(tp, rcur, 0);
rcur = NULL;
*pcur = NULL;
@@ -2397,13 +2400,15 @@ xfs_rmap_finish_one(
* rmapbt, because a shape change could cause us to
* allocate blocks.
*/
- error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
+ error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
if (error)
- return error;
- if (XFS_IS_CORRUPT(tp->t_mountp, !agbp))
- return -EFSCORRUPTED;
+ goto out_drop;
+ if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
+ error = -EFSCORRUPTED;
+ goto out_drop;
+ }
- rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
+ rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
}
*pcur = rcur;
@@ -2441,6 +2446,8 @@ xfs_rmap_finish_one(
ASSERT(0);
error = -EFSCORRUPTED;
}
+out_drop:
+ xfs_perag_put(pag);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index abe633403fd1..f2423cf7f1e2 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -6,6 +6,8 @@
#ifndef __XFS_RMAP_H__
#define __XFS_RMAP_H__
+struct xfs_perag;
+
static inline void
xfs_rmap_ino_bmbt_owner(
struct xfs_owner_info *oi,
@@ -113,10 +115,10 @@ xfs_owner_info_pack(
}
int xfs_rmap_alloc(struct xfs_trans *tp, struct xfs_buf *agbp,
- xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
+ struct xfs_perag *pag, xfs_agblock_t bno, xfs_extlen_t len,
const struct xfs_owner_info *oinfo);
int xfs_rmap_free(struct xfs_trans *tp, struct xfs_buf *agbp,
- xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
+ struct xfs_perag *pag, xfs_agblock_t bno, xfs_extlen_t len,
const struct xfs_owner_info *oinfo);
int xfs_rmap_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno,
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 9f5bcbd834c3..f29bc71b9950 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -9,7 +9,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_trans.h"
#include "xfs_alloc.h"
@@ -20,6 +19,7 @@
#include "xfs_trace.h"
#include "xfs_error.h"
#include "xfs_extent_busy.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
/*
@@ -52,7 +52,7 @@ xfs_rmapbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_ag.agbp, cur->bc_ag.agno);
+ cur->bc_ag.agbp, cur->bc_ag.pag);
}
STATIC void
@@ -64,13 +64,12 @@ xfs_rmapbt_set_root(
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
int btnum = cur->bc_btnum;
- struct xfs_perag *pag = agbp->b_pag;
ASSERT(ptr->s != 0);
agf->agf_roots[btnum] = ptr->s;
be32_add_cpu(&agf->agf_levels[btnum], inc);
- pag->pagf_levels[btnum] += inc;
+ cur->bc_ag.pag->pagf_levels[btnum] += inc;
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
}
@@ -84,6 +83,7 @@ xfs_rmapbt_alloc_block(
{
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
+ struct xfs_perag *pag = cur->bc_ag.pag;
int error;
xfs_agblock_t bno;
@@ -93,21 +93,19 @@ xfs_rmapbt_alloc_block(
if (error)
return error;
- trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
- bno, 1);
+ trace_xfs_rmapbt_alloc_block(cur->bc_mp, pag->pag_agno, bno, 1);
if (bno == NULLAGBLOCK) {
*stat = 0;
return 0;
}
- xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1,
- false);
+ xfs_extent_busy_reuse(cur->bc_mp, pag, bno, 1, false);
new->s = cpu_to_be32(bno);
be32_add_cpu(&agf->agf_rmap_blocks, 1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
- xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_ag.agno);
+ xfs_ag_resv_rmapbt_alloc(cur->bc_mp, pag->pag_agno);
*stat = 1;
return 0;
@@ -120,12 +118,12 @@ xfs_rmapbt_free_block(
{
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
- struct xfs_perag *pag;
+ struct xfs_perag *pag = cur->bc_ag.pag;
xfs_agblock_t bno;
int error;
bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
- trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_ag.agno,
+ trace_xfs_rmapbt_free_block(cur->bc_mp, pag->pag_agno,
bno, 1);
be32_add_cpu(&agf->agf_rmap_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
@@ -133,10 +131,9 @@ xfs_rmapbt_free_block(
if (error)
return error;
- xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
+ xfs_extent_busy_insert(cur->bc_tp, pag, bno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
- pag = cur->bc_ag.agbp->b_pag;
xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1);
return 0;
}
@@ -215,7 +212,7 @@ xfs_rmapbt_init_ptr_from_cur(
{
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_roots[cur->bc_btnum];
}
@@ -450,7 +447,7 @@ static struct xfs_btree_cur *
xfs_rmapbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
@@ -462,9 +459,12 @@ xfs_rmapbt_init_common(
cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
- cur->bc_ag.agno = agno;
cur->bc_ops = &xfs_rmapbt_ops;
+ /* take a reference for the cursor */
+ atomic_inc(&pag->pag_ref);
+ cur->bc_ag.pag = pag;
+
return cur;
}
@@ -474,12 +474,12 @@ xfs_rmapbt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_agf *agf = agbp->b_addr;
struct xfs_btree_cur *cur;
- cur = xfs_rmapbt_init_common(mp, tp, agno);
+ cur = xfs_rmapbt_init_common(mp, tp, pag);
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
cur->bc_ag.agbp = agbp;
return cur;
@@ -490,11 +490,11 @@ struct xfs_btree_cur *
xfs_rmapbt_stage_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
- cur = xfs_rmapbt_init_common(mp, NULL, agno);
+ cur = xfs_rmapbt_init_common(mp, NULL, pag);
xfs_btree_stage_afakeroot(cur, afake);
return cur;
}
@@ -596,7 +596,7 @@ int
xfs_rmapbt_calc_reserves(
struct xfs_mount *mp,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_extlen_t *ask,
xfs_extlen_t *used)
{
@@ -609,7 +609,7 @@ xfs_rmapbt_calc_reserves(
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
return 0;
- error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
if (error)
return error;
@@ -624,7 +624,7 @@ xfs_rmapbt_calc_reserves(
* expansion. We therefore can pretend the space isn't there.
*/
if (mp->m_sb.sb_logstart &&
- XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
+ XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == pag->pag_agno)
agblocks -= mp->m_sb.sb_logblocks;
/* Reserve 1% of the AG or enough for 1 block per record. */
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h
index 115c3455a734..88d8d18788a2 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.h
+++ b/fs/xfs/libxfs/xfs_rmap_btree.h
@@ -43,9 +43,9 @@ struct xbtree_afakeroot;
struct xfs_btree_cur *xfs_rmapbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *bp,
- xfs_agnumber_t agno);
+ struct xfs_perag *pag);
struct xfs_btree_cur *xfs_rmapbt_stage_cursor(struct xfs_mount *mp,
- struct xbtree_afakeroot *afake, xfs_agnumber_t agno);
+ struct xbtree_afakeroot *afake, struct xfs_perag *pag);
void xfs_rmapbt_commit_staged_btree(struct xfs_btree_cur *cur,
struct xfs_trans *tp, struct xfs_buf *agbp);
int xfs_rmapbt_maxrecs(int blocklen, int leaf);
@@ -57,6 +57,6 @@ extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp,
xfs_agblock_t agblocks);
extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
+ struct xfs_perag *pag, xfs_extlen_t *ask, xfs_extlen_t *used);
#endif /* __XFS_RMAP_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index dfbbcbd448c1..04f5386446db 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -15,7 +15,6 @@
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
-#include "xfs_trace.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_bmap_btree.h"
@@ -25,72 +24,12 @@
#include "xfs_refcount_btree.h"
#include "xfs_da_format.h"
#include "xfs_health.h"
+#include "xfs_ag.h"
/*
* Physical superblock buffer manipulations. Shared with libxfs in userspace.
*/
-/*
- * Reference counting access wrappers to the perag structures.
- * Because we never free per-ag structures, the only thing we
- * have to protect against changes is the tree structure itself.
- */
-struct xfs_perag *
-xfs_perag_get(
- struct xfs_mount *mp,
- xfs_agnumber_t agno)
-{
- struct xfs_perag *pag;
- int ref = 0;
-
- rcu_read_lock();
- pag = radix_tree_lookup(&mp->m_perag_tree, agno);
- if (pag) {
- ASSERT(atomic_read(&pag->pag_ref) >= 0);
- ref = atomic_inc_return(&pag->pag_ref);
- }
- rcu_read_unlock();
- trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
- return pag;
-}
-
-/*
- * search from @first to find the next perag with the given tag set.
- */
-struct xfs_perag *
-xfs_perag_get_tag(
- struct xfs_mount *mp,
- xfs_agnumber_t first,
- int tag)
-{
- struct xfs_perag *pag;
- int found;
- int ref;
-
- rcu_read_lock();
- found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
- (void **)&pag, first, 1, tag);
- if (found <= 0) {
- rcu_read_unlock();
- return NULL;
- }
- ref = atomic_inc_return(&pag->pag_ref);
- rcu_read_unlock();
- trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
- return pag;
-}
-
-void
-xfs_perag_put(
- struct xfs_perag *pag)
-{
- int ref;
-
- ASSERT(atomic_read(&pag->pag_ref) > 0);
- ref = atomic_dec_return(&pag->pag_ref);
- trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
-}
-
/* Check all the superblock fields we care about when reading one in. */
STATIC int
xfs_validate_sb_read(
@@ -842,78 +781,6 @@ xfs_sb_mount_common(
}
/*
- * xfs_initialize_perag_data
- *
- * Read in each per-ag structure so we can count up the number of
- * allocated inodes, free inodes and used filesystem blocks as this
- * information is no longer persistent in the superblock. Once we have
- * this information, write it into the in-core superblock structure.
- */
-int
-xfs_initialize_perag_data(
- struct xfs_mount *mp,
- xfs_agnumber_t agcount)
-{
- xfs_agnumber_t index;
- xfs_perag_t *pag;
- xfs_sb_t *sbp = &mp->m_sb;
- uint64_t ifree = 0;
- uint64_t ialloc = 0;
- uint64_t bfree = 0;
- uint64_t bfreelst = 0;
- uint64_t btree = 0;
- uint64_t fdblocks;
- int error = 0;
-
- for (index = 0; index < agcount; index++) {
- /*
- * read the agf, then the agi. This gets us
- * all the information we need and populates the
- * per-ag structures for us.
- */
- error = xfs_alloc_pagf_init(mp, NULL, index, 0);
- if (error)
- return error;
-
- error = xfs_ialloc_pagi_init(mp, NULL, index);
- if (error)
- return error;
- pag = xfs_perag_get(mp, index);
- ifree += pag->pagi_freecount;
- ialloc += pag->pagi_count;
- bfree += pag->pagf_freeblks;
- bfreelst += pag->pagf_flcount;
- btree += pag->pagf_btreeblks;
- xfs_perag_put(pag);
- }
- fdblocks = bfree + bfreelst + btree;
-
- /*
- * If the new summary counts are obviously incorrect, fail the
- * mount operation because that implies the AGFs are also corrupt.
- * Clear FS_COUNTERS so that we don't unmount with a dirty log, which
- * will prevent xfs_repair from fixing anything.
- */
- if (fdblocks > sbp->sb_dblocks || ifree > ialloc) {
- xfs_alert(mp, "AGF corruption. Please run xfs_repair.");
- error = -EFSCORRUPTED;
- goto out;
- }
-
- /* Overwrite incore superblock counters with just-read data */
- spin_lock(&mp->m_sb_lock);
- sbp->sb_ifree = ifree;
- sbp->sb_icount = ialloc;
- sbp->sb_fdblocks = fdblocks;
- spin_unlock(&mp->m_sb_lock);
-
- xfs_reinit_percpu_counters(mp);
-out:
- xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
- return error;
-}
-
-/*
* xfs_log_sb() can be used to copy arbitrary changes to the in-core superblock
* into the superblock buffer to be logged. It does not provide the higher
* level of locking that is needed to protect the in-core superblock from
@@ -989,17 +856,18 @@ int
xfs_update_secondary_sbs(
struct xfs_mount *mp)
{
- xfs_agnumber_t agno;
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno = 1;
int saved_error = 0;
int error = 0;
LIST_HEAD (buffer_list);
/* update secondary superblocks. */
- for (agno = 1; agno < mp->m_sb.sb_agcount; agno++) {
+ for_each_perag_from(mp, agno, pag) {
struct xfs_buf *bp;
error = xfs_buf_get(mp->m_ddev_targp,
- XFS_AG_DADDR(mp, agno, XFS_SB_DADDR),
+ XFS_AG_DADDR(mp, pag->pag_agno, XFS_SB_DADDR),
XFS_FSS_TO_BB(mp, 1), &bp);
/*
* If we get an error reading or writing alternate superblocks,
@@ -1011,7 +879,7 @@ xfs_update_secondary_sbs(
if (error) {
xfs_warn(mp,
"error allocating secondary superblock for ag %d",
- agno);
+ pag->pag_agno);
if (!saved_error)
saved_error = error;
continue;
@@ -1032,7 +900,7 @@ xfs_update_secondary_sbs(
if (error) {
xfs_warn(mp,
"write error %d updating a secondary superblock near ag %d",
- error, agno);
+ error, pag->pag_agno);
if (!saved_error)
saved_error = error;
continue;
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index f79f9dc632b6..0c1602d9b53d 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -13,15 +13,6 @@ struct xfs_trans;
struct xfs_fsop_geom;
struct xfs_perag;
-/*
- * perag get/put wrappers for ref counting
- */
-extern struct xfs_perag *xfs_perag_get(struct xfs_mount *, xfs_agnumber_t);
-extern struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *, xfs_agnumber_t,
- int tag);
-extern void xfs_perag_put(struct xfs_perag *pag);
-extern int xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t);
-
extern void xfs_log_sb(struct xfs_trans *tp);
extern int xfs_sync_sb(struct xfs_mount *mp, bool wait);
extern int xfs_sync_sb_buf(struct xfs_mount *mp);
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 782fdd08f759..25c4cab58851 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -22,30 +22,26 @@ struct xfs_inode;
* Buffer verifier operations are widely used, including userspace tools
*/
extern const struct xfs_buf_ops xfs_agf_buf_ops;
-extern const struct xfs_buf_ops xfs_agi_buf_ops;
-extern const struct xfs_buf_ops xfs_agf_buf_ops;
extern const struct xfs_buf_ops xfs_agfl_buf_ops;
-extern const struct xfs_buf_ops xfs_bnobt_buf_ops;
-extern const struct xfs_buf_ops xfs_cntbt_buf_ops;
-extern const struct xfs_buf_ops xfs_rmapbt_buf_ops;
-extern const struct xfs_buf_ops xfs_refcountbt_buf_ops;
+extern const struct xfs_buf_ops xfs_agi_buf_ops;
extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
+extern const struct xfs_buf_ops xfs_bnobt_buf_ops;
+extern const struct xfs_buf_ops xfs_cntbt_buf_ops;
extern const struct xfs_buf_ops xfs_da3_node_buf_ops;
extern const struct xfs_buf_ops xfs_dquot_buf_ops;
-extern const struct xfs_buf_ops xfs_symlink_buf_ops;
-extern const struct xfs_buf_ops xfs_agi_buf_ops;
-extern const struct xfs_buf_ops xfs_inobt_buf_ops;
+extern const struct xfs_buf_ops xfs_dquot_buf_ra_ops;
extern const struct xfs_buf_ops xfs_finobt_buf_ops;
+extern const struct xfs_buf_ops xfs_inobt_buf_ops;
extern const struct xfs_buf_ops xfs_inode_buf_ops;
extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
-extern const struct xfs_buf_ops xfs_dquot_buf_ops;
-extern const struct xfs_buf_ops xfs_dquot_buf_ra_ops;
+extern const struct xfs_buf_ops xfs_refcountbt_buf_ops;
+extern const struct xfs_buf_ops xfs_rmapbt_buf_ops;
+extern const struct xfs_buf_ops xfs_rtbuf_ops;
extern const struct xfs_buf_ops xfs_sb_buf_ops;
extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
extern const struct xfs_buf_ops xfs_symlink_buf_ops;
-extern const struct xfs_buf_ops xfs_rtbuf_ops;
/* log size calculation functions */
int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index 8d595a5c4abd..16f723ebe8dd 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -143,16 +143,14 @@ xfs_trans_log_inode(
}
/*
- * Inode verifiers on older kernels don't check that the extent size
- * hint is an integer multiple of the rt extent size on a directory
- * with both rtinherit and extszinherit flags set. If we're logging a
- * directory that is misconfigured in this way, clear the hint.
+ * Inode verifiers do not check that the extent size hint is an integer
+ * multiple of the rt extent size on a directory with both rtinherit
+ * and extszinherit flags set. If we're logging a directory that is
+ * misconfigured in this way, clear the hint.
*/
if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
(ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
(ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
- xfs_info_once(ip->i_mount,
- "Correcting misaligned extent size hint in inode 0x%llx.", ip->i_ino);
ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
XFS_DIFLAG_EXTSZINHERIT);
ip->i_extsize = 0;
diff --git a/fs/xfs/libxfs/xfs_types.c b/fs/xfs/libxfs/xfs_types.c
index 04801362e1a7..e8f4abee7892 100644
--- a/fs/xfs/libxfs/xfs_types.c
+++ b/fs/xfs/libxfs/xfs_types.c
@@ -11,6 +11,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_ag.h"
/* Find the size of the AG, in blocks. */
inline xfs_agblock_t
@@ -222,12 +223,13 @@ xfs_icount_range(
unsigned long long *max)
{
unsigned long long nr_inos = 0;
+ struct xfs_perag *pag;
xfs_agnumber_t agno;
/* root, rtbitmap, rtsum all live in the first chunk */
*min = XFS_INODES_PER_CHUNK;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+ for_each_perag(mp, agno, pag) {
xfs_agino_t first, last;
xfs_agino_range(mp, agno, &first, &last);
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 064bd6e8c922..0870ef6f933d 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -21,6 +21,7 @@ typedef int32_t xfs_suminfo_t; /* type of bitmap summary info */
typedef uint32_t xfs_rtword_t; /* word type for bitmap manipulations */
typedef int64_t xfs_lsn_t; /* log sequence number */
+typedef int64_t xfs_csn_t; /* CIL sequence number */
typedef uint32_t xfs_dablk_t; /* dir/attr block number (in file) */
typedef uint32_t xfs_dahash_t; /* dir/attr hash value */
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index f96e84793cc9..be1a7e1e65f7 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -14,6 +14,7 @@
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_rmap.h"
+#include "xfs_ag.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 23690f824ffa..e95f8c98f0f7 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -20,6 +20,7 @@
#include "xfs_rmap.h"
#include "xfs_rmap_btree.h"
#include "xfs_refcount_btree.h"
+#include "xfs_ag.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -245,8 +246,8 @@ xrep_agf_calc_from_btrees(
int error;
/* Update the AGF counters from the bnobt. */
- cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
- XFS_BTNUM_BNO);
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
+ sc->sa.pag, XFS_BTNUM_BNO);
error = xfs_alloc_query_all(cur, xrep_agf_walk_allocbt, &raa);
if (error)
goto err;
@@ -259,8 +260,8 @@ xrep_agf_calc_from_btrees(
agf->agf_longest = cpu_to_be32(raa.longest);
/* Update the AGF counters from the cntbt. */
- cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
- XFS_BTNUM_CNT);
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
+ sc->sa.pag, XFS_BTNUM_CNT);
error = xfs_btree_count_blocks(cur, &blocks);
if (error)
goto err;
@@ -268,7 +269,7 @@ xrep_agf_calc_from_btrees(
btreeblks += blocks - 1;
/* Update the AGF counters from the rmapbt. */
- cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
+ cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
error = xfs_btree_count_blocks(cur, &blocks);
if (error)
goto err;
@@ -281,7 +282,7 @@ xrep_agf_calc_from_btrees(
/* Update the AGF counters from the refcountbt. */
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
cur = xfs_refcountbt_init_cursor(mp, sc->tp, agf_bp,
- sc->sa.agno);
+ sc->sa.pag);
error = xfs_btree_count_blocks(cur, &blocks);
if (error)
goto err;
@@ -453,7 +454,7 @@ xrep_agfl_walk_rmap(
/* Record all the OWN_AG blocks. */
if (rec->rm_owner == XFS_RMAP_OWN_AG) {
- fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno,
+ fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno,
rec->rm_startblock);
error = xbitmap_set(ra->freesp, fsb, rec->rm_blockcount);
if (error)
@@ -489,23 +490,23 @@ xrep_agfl_collect_blocks(
xbitmap_init(&ra.agmetablocks);
/* Find all space used by the free space btrees & rmapbt. */
- cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
+ cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
error = xfs_rmap_query_all(cur, xrep_agfl_walk_rmap, &ra);
if (error)
goto err;
xfs_btree_del_cursor(cur, error);
/* Find all blocks currently being used by the bnobt. */
- cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
- XFS_BTNUM_BNO);
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
+ sc->sa.pag, XFS_BTNUM_BNO);
error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
if (error)
goto err;
xfs_btree_del_cursor(cur, error);
/* Find all blocks currently being used by the cntbt. */
- cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
- XFS_BTNUM_CNT);
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
+ sc->sa.pag, XFS_BTNUM_CNT);
error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
if (error)
goto err;
@@ -805,8 +806,8 @@ xrep_agi_calc_from_btrees(
xfs_agino_t freecount;
int error;
- cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp, sc->sa.agno,
- XFS_BTNUM_INO);
+ cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp,
+ sc->sa.pag, XFS_BTNUM_INO);
error = xfs_ialloc_count_inodes(cur, &count, &freecount);
if (error)
goto err;
@@ -827,8 +828,8 @@ xrep_agi_calc_from_btrees(
xfs_sb_version_hasinobtcounts(&mp->m_sb)) {
xfs_agblock_t blocks;
- cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp, sc->sa.agno,
- XFS_BTNUM_FINO);
+ cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp,
+ sc->sa.pag, XFS_BTNUM_FINO);
error = xfs_btree_count_blocks(cur, &blocks);
if (error)
goto err;
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 2720bd7fe53b..d5741980094a 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -15,6 +15,7 @@
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
+#include "xfs_ag.h"
/*
* Set us up to scrub free space btrees.
@@ -93,7 +94,7 @@ xchk_allocbt_rec(
union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_agnumber_t agno = bs->cur->bc_ag.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
xfs_agblock_t bno;
xfs_extlen_t len;
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 77d5c4a0f09f..1d146c9d9de1 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -22,6 +22,7 @@
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
+#include "xfs_ag.h"
/* Set us up with an inode's bmap. */
int
@@ -514,7 +515,7 @@ xchk_bmap_check_rmap(
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
- cur->bc_ag.agno, rec->rm_startblock))
+ cur->bc_ag.pag->pag_agno, rec->rm_startblock))
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
if (irec.br_blockcount > rec->rm_blockcount)
@@ -544,18 +545,18 @@ STATIC int
xchk_bmap_check_ag_rmaps(
struct xfs_scrub *sc,
int whichfork,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
struct xchk_bmap_check_rmap_info sbcri;
struct xfs_btree_cur *cur;
struct xfs_buf *agf;
int error;
- error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
+ error = xfs_alloc_read_agf(sc->mp, sc->tp, pag->pag_agno, 0, &agf);
if (error)
return error;
- cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
+ cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, pag);
sbcri.sc = sc;
sbcri.whichfork = whichfork;
@@ -575,6 +576,7 @@ xchk_bmap_check_rmaps(
int whichfork)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
+ struct xfs_perag *pag;
xfs_agnumber_t agno;
bool zero_size;
int error;
@@ -607,15 +609,16 @@ xchk_bmap_check_rmaps(
(zero_size || ifp->if_nextents > 0))
return 0;
- for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
- error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
+ for_each_perag(sc->mp, agno, pag) {
+ error = xchk_bmap_check_ag_rmaps(sc, whichfork, pag);
if (error)
- return error;
+ break;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
break;
}
-
- return 0;
+ if (pag)
+ xfs_perag_put(pag);
+ return error;
}
/*
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index 6cc92291c46f..8558ca05e11d 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -12,7 +12,6 @@
#include "xfs_btree.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
-#include "xfs_sb.h"
#include "xfs_inode.h"
#include "xfs_icache.h"
#include "xfs_alloc.h"
@@ -26,6 +25,7 @@
#include "xfs_trans_priv.h"
#include "xfs_attr.h"
#include "xfs_reflink.h"
+#include "xfs_ag.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -460,49 +460,48 @@ xchk_ag_btcur_init(
struct xchk_ag *sa)
{
struct xfs_mount *mp = sc->mp;
- xfs_agnumber_t agno = sa->agno;
xchk_perag_get(sc->mp, sa);
if (sa->agf_bp &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_BNO)) {
/* Set up a bnobt cursor for cross-referencing. */
sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
- agno, XFS_BTNUM_BNO);
+ sa->pag, XFS_BTNUM_BNO);
}
if (sa->agf_bp &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_CNT)) {
/* Set up a cntbt cursor for cross-referencing. */
sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
- agno, XFS_BTNUM_CNT);
+ sa->pag, XFS_BTNUM_CNT);
}
/* Set up a inobt cursor for cross-referencing. */
if (sa->agi_bp &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) {
sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
- agno, XFS_BTNUM_INO);
+ sa->pag, XFS_BTNUM_INO);
}
/* Set up a finobt cursor for cross-referencing. */
if (sa->agi_bp && xfs_sb_version_hasfinobt(&mp->m_sb) &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) {
sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
- agno, XFS_BTNUM_FINO);
+ sa->pag, XFS_BTNUM_FINO);
}
/* Set up a rmapbt cursor for cross-referencing. */
if (sa->agf_bp && xfs_sb_version_hasrmapbt(&mp->m_sb) &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_RMAP)) {
sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
- agno);
+ sa->pag);
}
/* Set up a refcountbt cursor for cross-referencing. */
if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb) &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_REFC)) {
sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
- sa->agf_bp, agno);
+ sa->agf_bp, sa->pag);
}
}
diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
index f1d1a8c58853..fd7941e04ae1 100644
--- a/fs/xfs/scrub/fscounters.c
+++ b/fs/xfs/scrub/fscounters.c
@@ -9,11 +9,11 @@
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
-#include "xfs_sb.h"
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_health.h"
#include "xfs_btree.h"
+#include "xfs_ag.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -71,11 +71,11 @@ xchk_fscount_warmup(
xfs_agnumber_t agno;
int error = 0;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- pag = xfs_perag_get(mp, agno);
-
+ for_each_perag(mp, agno, pag) {
+ if (xchk_should_terminate(sc, &error))
+ break;
if (pag->pagi_init && pag->pagf_init)
- goto next_loop_perag;
+ continue;
/* Lock both AG headers. */
error = xfs_ialloc_read_agi(mp, sc->tp, agno, &agi_bp);
@@ -89,21 +89,15 @@ xchk_fscount_warmup(
* These are supposed to be initialized by the header read
* function.
*/
- error = -EFSCORRUPTED;
- if (!pag->pagi_init || !pag->pagf_init)
+ if (!pag->pagi_init || !pag->pagf_init) {
+ error = -EFSCORRUPTED;
break;
+ }
xfs_buf_relse(agf_bp);
agf_bp = NULL;
xfs_buf_relse(agi_bp);
agi_bp = NULL;
-next_loop_perag:
- xfs_perag_put(pag);
- pag = NULL;
- error = 0;
-
- if (xchk_should_terminate(sc, &error))
- break;
}
if (agf_bp)
@@ -196,13 +190,14 @@ retry:
fsc->ifree = 0;
fsc->fdblocks = 0;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- pag = xfs_perag_get(mp, agno);
+ for_each_perag(mp, agno, pag) {
+ if (xchk_should_terminate(sc, &error))
+ break;
/* This somehow got unset since the warmup? */
if (!pag->pagi_init || !pag->pagf_init) {
- xfs_perag_put(pag);
- return -EFSCORRUPTED;
+ error = -EFSCORRUPTED;
+ break;
}
/* Count all the inodes */
@@ -216,10 +211,8 @@ retry:
fsc->fdblocks += pag->pagf_btreeblks;
} else {
error = xchk_fscount_btreeblks(sc, fsc, agno);
- if (error) {
- xfs_perag_put(pag);
+ if (error)
break;
- }
}
/*
@@ -229,12 +222,9 @@ retry:
fsc->fdblocks -= pag->pag_meta_resv.ar_reserved;
fsc->fdblocks -= pag->pag_rmapbt_resv.ar_orig_reserved;
- xfs_perag_put(pag);
-
- if (xchk_should_terminate(sc, &error))
- break;
}
-
+ if (pag)
+ xfs_perag_put(pag);
if (error)
return error;
diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c
index 3de59b5c2ce6..2e61df3bca83 100644
--- a/fs/xfs/scrub/health.c
+++ b/fs/xfs/scrub/health.c
@@ -8,7 +8,7 @@
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_btree.h"
-#include "xfs_sb.h"
+#include "xfs_ag.h"
#include "xfs_health.h"
#include "scrub/scrub.h"
#include "scrub/health.h"
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 8d9f3fb0cd22..30e568596b79 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -21,6 +21,7 @@
#include "scrub/common.h"
#include "scrub/btree.h"
#include "scrub/trace.h"
+#include "xfs_ag.h"
/*
* Set us up to scrub inode btrees.
@@ -103,7 +104,7 @@ xchk_iallocbt_chunk(
xfs_extlen_t len)
{
struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_agnumber_t agno = bs->cur->bc_ag.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
xfs_agblock_t bno;
bno = XFS_AGINO_TO_AGBNO(mp, agino);
@@ -163,7 +164,7 @@ xchk_iallocbt_check_cluster_ifree(
* the record, compute which fs inode we're talking about.
*/
agino = irec->ir_startino + irec_ino;
- fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.agno, agino);
+ fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.pag->pag_agno, agino);
irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
@@ -213,7 +214,7 @@ xchk_iallocbt_check_cluster(
struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_buf *cluster_bp;
unsigned int nr_inodes;
- xfs_agnumber_t agno = bs->cur->bc_ag.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
xfs_agblock_t agbno;
unsigned int cluster_index;
uint16_t cluster_mask = 0;
@@ -423,7 +424,7 @@ xchk_iallocbt_rec(
struct xchk_iallocbt *iabt = bs->private;
struct xfs_inobt_rec_incore irec;
uint64_t holes;
- xfs_agnumber_t agno = bs->cur->bc_ag.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
xfs_agino_t agino;
xfs_extlen_t len;
int holecount;
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index 61f90b2c9430..76fbc7ca4cec 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -73,11 +73,25 @@ xchk_inode_extsize(
uint16_t flags)
{
xfs_failaddr_t fa;
+ uint32_t value = be32_to_cpu(dip->di_extsize);
- fa = xfs_inode_validate_extsize(sc->mp, be32_to_cpu(dip->di_extsize),
- mode, flags);
+ fa = xfs_inode_validate_extsize(sc->mp, value, mode, flags);
if (fa)
xchk_ino_set_corrupt(sc, ino);
+
+ /*
+ * XFS allows a sysadmin to change the rt extent size when adding a rt
+ * section to a filesystem after formatting. If there are any
+ * directories with extszinherit and rtinherit set, the hint could
+ * become misaligned with the new rextsize. The verifier doesn't check
+ * this, because we allow rtinherit directories even without an rt
+ * device. Flag this as an administrative warning since we will clean
+ * this up eventually.
+ */
+ if ((flags & XFS_DIFLAG_RTINHERIT) &&
+ (flags & XFS_DIFLAG_EXTSZINHERIT) &&
+ value % sc->mp->m_sb.sb_rextsize > 0)
+ xchk_ino_set_warning(sc, ino);
}
/*
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index 744530a66c0c..7014b7408bad 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -13,6 +13,7 @@
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
+#include "xfs_ag.h"
/*
* Set us up to scrub reference count btrees.
@@ -333,7 +334,7 @@ xchk_refcountbt_rec(
{
struct xfs_mount *mp = bs->cur->bc_mp;
xfs_agblock_t *cow_blocks = bs->private;
- xfs_agnumber_t agno = bs->cur->bc_ag.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
xfs_agblock_t bno;
xfs_extlen_t len;
xfs_nlink_t refcount;
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index b8202dd08939..ebb0e245aa72 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -22,6 +22,7 @@
#include "xfs_rmap_btree.h"
#include "xfs_refcount_btree.h"
#include "xfs_extent_busy.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_quota.h"
#include "scrub/scrub.h"
@@ -303,7 +304,7 @@ xrep_alloc_ag_block(
return error;
if (bno == NULLAGBLOCK)
return -ENOSPC;
- xfs_extent_busy_reuse(sc->mp, sc->sa.agno, bno,
+ xfs_extent_busy_reuse(sc->mp, sc->sa.pag, bno,
1, false);
*fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.agno, bno);
if (resv == XFS_AG_RESV_RMAPBT)
@@ -508,7 +509,7 @@ xrep_put_freelist(
* create an rmap for the block prior to merging it or else other
* parts will break.
*/
- error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.agno, agbno, 1,
+ error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1,
&XFS_RMAP_OINFO_AG);
if (error)
return error;
@@ -518,7 +519,7 @@ xrep_put_freelist(
agbno, 0);
if (error)
return error;
- xfs_extent_busy_insert(sc->tp, sc->sa.agno, agbno, 1,
+ xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
return 0;
@@ -554,7 +555,7 @@ xrep_reap_block(
} else {
agf_bp = sc->sa.agf_bp;
}
- cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, agno);
+ cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag);
/* Can we find any other rmappings? */
error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
@@ -576,7 +577,8 @@ xrep_reap_block(
* to run xfs_repair.
*/
if (has_other_rmap)
- error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1, oinfo);
+ error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno,
+ 1, oinfo);
else if (resv == XFS_AG_RESV_AGFL)
error = xrep_put_freelist(sc, agbno);
else
@@ -891,7 +893,7 @@ xrep_find_ag_btree_roots(
fab->height = 0;
}
- cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
+ cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
xfs_btree_del_cursor(cur, error);
diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c
index a4f17477c5d1..fc306573f0ac 100644
--- a/fs/xfs/scrub/rmap.c
+++ b/fs/xfs/scrub/rmap.c
@@ -15,6 +15,7 @@
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
+#include "xfs_ag.h"
/*
* Set us up to scrub reverse mapping btrees.
@@ -91,7 +92,7 @@ xchk_rmapbt_rec(
{
struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_rmap_irec irec;
- xfs_agnumber_t agno = bs->cur->bc_ag.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
bool non_inode;
bool is_unwritten;
bool is_bmbt;
diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c
index 2c6c248be823..03882a605a3c 100644
--- a/fs/xfs/scrub/trace.c
+++ b/fs/xfs/scrub/trace.c
@@ -13,6 +13,7 @@
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "scrub/scrub.h"
+#include "xfs_ag.h"
/* Figure out which block the btree cursor was pointing to. */
static inline xfs_fsblock_t
@@ -26,7 +27,7 @@ xchk_btree_cur_fsbno(
cur->bc_flags & XFS_BTREE_LONG_PTRS)
return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_ino.ip->i_ino);
else if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS))
- return XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno, 0);
+ return XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno, 0);
return NULLFSBLOCK;
}
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index bfad669e6b2f..aaa7e66c42d7 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -15,10 +15,10 @@
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
+#include "xfs_attr.h"
#include "xfs_attr_remote.h"
#include "xfs_trans.h"
#include "xfs_bmap.h"
-#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_quota.h"
#include "xfs_dir2.h"
diff --git a/fs/xfs/xfs_bio_io.c b/fs/xfs/xfs_bio_io.c
index 17f36db2f792..667e297f59b1 100644
--- a/fs/xfs/xfs_bio_io.c
+++ b/fs/xfs/xfs_bio_io.c
@@ -9,6 +9,41 @@ static inline unsigned int bio_max_vecs(unsigned int count)
return bio_max_segs(howmany(count, PAGE_SIZE));
}
+static void
+xfs_flush_bdev_async_endio(
+ struct bio *bio)
+{
+ complete(bio->bi_private);
+}
+
+/*
+ * Submit a request for an async cache flush to run. If the request queue does
+ * not require flush operations, just skip it altogether. If the caller needs
+ * to wait for the flush completion at a later point in time, they must supply a
+ * valid completion. This will be signalled when the flush completes. The
+ * caller never sees the bio that is issued here.
+ */
+void
+xfs_flush_bdev_async(
+ struct bio *bio,
+ struct block_device *bdev,
+ struct completion *done)
+{
+ struct request_queue *q = bdev->bd_disk->queue;
+
+ if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
+ complete(done);
+ return;
+ }
+
+ bio_init(bio, NULL, 0);
+ bio_set_dev(bio, bdev);
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+ bio->bi_private = done;
+ bio->bi_end_io = xfs_flush_bdev_async_endio;
+
+ submit_bio(bio);
+}
int
xfs_rw_bdev(
struct block_device *bdev,
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 2988efa97e14..213a97a921bb 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -945,7 +945,7 @@ xfs_flush_unmap_range(
xfs_off_t rounding, start, end;
int error;
- rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
+ rounding = max_t(xfs_off_t, mp->m_sb.sb_blocksize, PAGE_SIZE);
start = round_down(offset, rounding);
end = round_up(offset + len, rounding) - 1;
@@ -1053,9 +1053,9 @@ xfs_prepare_shift(
* extent (after split) during the shift and corrupt the file. Start
* with the block just prior to the start to stabilize the boundary.
*/
- offset = round_down(offset, 1 << mp->m_sb.sb_blocklog);
+ offset = round_down(offset, mp->m_sb.sb_blocksize);
if (offset)
- offset -= (1 << mp->m_sb.sb_blocklog);
+ offset -= mp->m_sb.sb_blocksize;
/*
* Writeback and invalidate cache for the remainder of the file as we're
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 592800c8852f..8ff42b3585e0 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -10,7 +10,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_trace.h"
#include "xfs_log.h"
@@ -19,12 +18,10 @@
#include "xfs_buf_item.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
+#include "xfs_ag.h"
static kmem_zone_t *xfs_buf_zone;
-#define xb_to_gfp(flags) \
- ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
-
/*
* Locking orders
*
@@ -79,7 +76,7 @@ static inline int
xfs_buf_vmap_len(
struct xfs_buf *bp)
{
- return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
+ return (bp->b_page_count * PAGE_SIZE);
}
/*
@@ -272,51 +269,30 @@ _xfs_buf_alloc(
return 0;
}
-/*
- * Allocate a page array capable of holding a specified number
- * of pages, and point the page buf at it.
- */
-STATIC int
-_xfs_buf_get_pages(
- struct xfs_buf *bp,
- int page_count)
+static void
+xfs_buf_free_pages(
+ struct xfs_buf *bp)
{
- /* Make sure that we have a page list */
- if (bp->b_pages == NULL) {
- bp->b_page_count = page_count;
- if (page_count <= XB_PAGES) {
- bp->b_pages = bp->b_page_array;
- } else {
- bp->b_pages = kmem_alloc(sizeof(struct page *) *
- page_count, KM_NOFS);
- if (bp->b_pages == NULL)
- return -ENOMEM;
- }
- memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
+ uint i;
+
+ ASSERT(bp->b_flags & _XBF_PAGES);
+
+ if (xfs_buf_is_vmapped(bp))
+ vm_unmap_ram(bp->b_addr, bp->b_page_count);
+
+ for (i = 0; i < bp->b_page_count; i++) {
+ if (bp->b_pages[i])
+ __free_page(bp->b_pages[i]);
}
- return 0;
-}
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += bp->b_page_count;
-/*
- * Frees b_pages if it was allocated.
- */
-STATIC void
-_xfs_buf_free_pages(
- struct xfs_buf *bp)
-{
- if (bp->b_pages != bp->b_page_array) {
+ if (bp->b_pages != bp->b_page_array)
kmem_free(bp->b_pages);
- bp->b_pages = NULL;
- }
+ bp->b_pages = NULL;
+ bp->b_flags &= ~_XBF_PAGES;
}
-/*
- * Releases the specified buffer.
- *
- * The modification state of any associated pages is left unchanged.
- * The buffer must not be on any hash - use xfs_buf_rele instead for
- * hashed and refcounted buffers
- */
static void
xfs_buf_free(
struct xfs_buf *bp)
@@ -325,137 +301,103 @@ xfs_buf_free(
ASSERT(list_empty(&bp->b_lru));
- if (bp->b_flags & _XBF_PAGES) {
- uint i;
-
- if (xfs_buf_is_vmapped(bp))
- vm_unmap_ram(bp->b_addr - bp->b_offset,
- bp->b_page_count);
-
- for (i = 0; i < bp->b_page_count; i++) {
- struct page *page = bp->b_pages[i];
-
- __free_page(page);
- }
- if (current->reclaim_state)
- current->reclaim_state->reclaimed_slab +=
- bp->b_page_count;
- } else if (bp->b_flags & _XBF_KMEM)
+ if (bp->b_flags & _XBF_PAGES)
+ xfs_buf_free_pages(bp);
+ else if (bp->b_flags & _XBF_KMEM)
kmem_free(bp->b_addr);
- _xfs_buf_free_pages(bp);
+
xfs_buf_free_maps(bp);
kmem_cache_free(xfs_buf_zone, bp);
}
-/*
- * Allocates all the pages for buffer in question and builds it's page list.
- */
-STATIC int
-xfs_buf_allocate_memory(
- struct xfs_buf *bp,
- uint flags)
+static int
+xfs_buf_alloc_kmem(
+ struct xfs_buf *bp,
+ xfs_buf_flags_t flags)
{
- size_t size;
- size_t nbytes, offset;
- gfp_t gfp_mask = xb_to_gfp(flags);
- unsigned short page_count, i;
- xfs_off_t start, end;
- int error;
- xfs_km_flags_t kmflag_mask = 0;
+ int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
+ xfs_km_flags_t kmflag_mask = KM_NOFS;
+ size_t size = BBTOB(bp->b_length);
- /*
- * assure zeroed buffer for non-read cases.
- */
- if (!(flags & XBF_READ)) {
+ /* Assure zeroed buffer for non-read cases. */
+ if (!(flags & XBF_READ))
kmflag_mask |= KM_ZERO;
- gfp_mask |= __GFP_ZERO;
- }
- /*
- * for buffers that are contained within a single page, just allocate
- * the memory from the heap - there's no need for the complexity of
- * page arrays to keep allocation down to order 0.
- */
- size = BBTOB(bp->b_length);
- if (size < PAGE_SIZE) {
- int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
- bp->b_addr = kmem_alloc_io(size, align_mask,
- KM_NOFS | kmflag_mask);
- if (!bp->b_addr) {
- /* low memory - use alloc_page loop instead */
- goto use_alloc_page;
- }
+ bp->b_addr = kmem_alloc_io(size, align_mask, kmflag_mask);
+ if (!bp->b_addr)
+ return -ENOMEM;
- if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
- ((unsigned long)bp->b_addr & PAGE_MASK)) {
- /* b_addr spans two pages - use alloc_page instead */
- kmem_free(bp->b_addr);
- bp->b_addr = NULL;
- goto use_alloc_page;
- }
- bp->b_offset = offset_in_page(bp->b_addr);
- bp->b_pages = bp->b_page_array;
- bp->b_pages[0] = kmem_to_page(bp->b_addr);
- bp->b_page_count = 1;
- bp->b_flags |= _XBF_KMEM;
- return 0;
+ if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
+ ((unsigned long)bp->b_addr & PAGE_MASK)) {
+ /* b_addr spans two pages - use alloc_page instead */
+ kmem_free(bp->b_addr);
+ bp->b_addr = NULL;
+ return -ENOMEM;
}
+ bp->b_offset = offset_in_page(bp->b_addr);
+ bp->b_pages = bp->b_page_array;
+ bp->b_pages[0] = kmem_to_page(bp->b_addr);
+ bp->b_page_count = 1;
+ bp->b_flags |= _XBF_KMEM;
+ return 0;
+}
-use_alloc_page:
- start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
- end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
- >> PAGE_SHIFT;
- page_count = end - start;
- error = _xfs_buf_get_pages(bp, page_count);
- if (unlikely(error))
- return error;
+static int
+xfs_buf_alloc_pages(
+ struct xfs_buf *bp,
+ xfs_buf_flags_t flags)
+{
+ gfp_t gfp_mask = __GFP_NOWARN;
+ long filled = 0;
- offset = bp->b_offset;
+ if (flags & XBF_READ_AHEAD)
+ gfp_mask |= __GFP_NORETRY;
+ else
+ gfp_mask |= GFP_NOFS;
+
+ /* Make sure that we have a page list */
+ bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
+ if (bp->b_page_count <= XB_PAGES) {
+ bp->b_pages = bp->b_page_array;
+ } else {
+ bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count,
+ gfp_mask);
+ if (!bp->b_pages)
+ return -ENOMEM;
+ }
bp->b_flags |= _XBF_PAGES;
- for (i = 0; i < bp->b_page_count; i++) {
- struct page *page;
- uint retries = 0;
-retry:
- page = alloc_page(gfp_mask);
- if (unlikely(page == NULL)) {
- if (flags & XBF_READ_AHEAD) {
- bp->b_page_count = i;
- error = -ENOMEM;
- goto out_free_pages;
- }
+ /* Assure zeroed buffer for non-read cases. */
+ if (!(flags & XBF_READ))
+ gfp_mask |= __GFP_ZERO;
- /*
- * This could deadlock.
- *
- * But until all the XFS lowlevel code is revamped to
- * handle buffer allocation failures we can't do much.
- */
- if (!(++retries % 100))
- xfs_err(NULL,
- "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
- current->comm, current->pid,
- __func__, gfp_mask);
-
- XFS_STATS_INC(bp->b_mount, xb_page_retries);
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- goto retry;
+ /*
+ * Bulk filling of pages can take multiple calls. Not filling the entire
+ * array is not an allocation failure, so don't back off if we get at
+ * least one extra page.
+ */
+ for (;;) {
+ long last = filled;
+
+ filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
+ bp->b_pages);
+ if (filled == bp->b_page_count) {
+ XFS_STATS_INC(bp->b_mount, xb_page_found);
+ break;
}
- XFS_STATS_INC(bp->b_mount, xb_page_found);
+ if (filled != last)
+ continue;
- nbytes = min_t(size_t, size, PAGE_SIZE - offset);
- size -= nbytes;
- bp->b_pages[i] = page;
- offset = 0;
+ if (flags & XBF_READ_AHEAD) {
+ xfs_buf_free_pages(bp);
+ return -ENOMEM;
+ }
+
+ XFS_STATS_INC(bp->b_mount, xb_page_retries);
+ congestion_wait(BLK_RW_ASYNC, HZ / 50);
}
return 0;
-
-out_free_pages:
- for (i = 0; i < bp->b_page_count; i++)
- __free_page(bp->b_pages[i]);
- bp->b_flags &= ~_XBF_PAGES;
- return error;
}
/*
@@ -469,7 +411,7 @@ _xfs_buf_map_pages(
ASSERT(bp->b_flags & _XBF_PAGES);
if (bp->b_page_count == 1) {
/* A single page buffer is always mappable */
- bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
+ bp->b_addr = page_address(bp->b_pages[0]);
} else if (flags & XBF_UNMAPPED) {
bp->b_addr = NULL;
} else {
@@ -496,7 +438,6 @@ _xfs_buf_map_pages(
if (!bp->b_addr)
return -ENOMEM;
- bp->b_addr += bp->b_offset;
}
return 0;
@@ -707,7 +648,7 @@ xfs_buf_get_map(
{
struct xfs_buf *bp;
struct xfs_buf *new_bp;
- int error = 0;
+ int error;
*bpp = NULL;
error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
@@ -720,17 +661,22 @@ xfs_buf_get_map(
if (error)
return error;
- error = xfs_buf_allocate_memory(new_bp, flags);
- if (error) {
- xfs_buf_free(new_bp);
- return error;
+ /*
+ * For buffers that fit entirely within a single page, first attempt to
+ * allocate the memory from the heap to minimise memory usage. If we
+ * can't get heap memory for these small buffers, we fall back to using
+ * the page allocator.
+ */
+ if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
+ xfs_buf_alloc_kmem(new_bp, flags) < 0) {
+ error = xfs_buf_alloc_pages(new_bp, flags);
+ if (error)
+ goto out_free_buf;
}
error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
- if (error) {
- xfs_buf_free(new_bp);
- return error;
- }
+ if (error)
+ goto out_free_buf;
if (bp != new_bp)
xfs_buf_free(new_bp);
@@ -758,6 +704,9 @@ found:
trace_xfs_buf_get(bp, flags, _RET_IP_);
*bpp = bp;
return 0;
+out_free_buf:
+ xfs_buf_free(new_bp);
+ return error;
}
int
@@ -950,8 +899,7 @@ xfs_buf_get_uncached(
int flags,
struct xfs_buf **bpp)
{
- unsigned long page_count;
- int error, i;
+ int error;
struct xfs_buf *bp;
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
@@ -960,41 +908,25 @@ xfs_buf_get_uncached(
/* flags might contain irrelevant bits, pass only what we care about */
error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
if (error)
- goto fail;
+ return error;
- page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
- error = _xfs_buf_get_pages(bp, page_count);
+ error = xfs_buf_alloc_pages(bp, flags);
if (error)
goto fail_free_buf;
- for (i = 0; i < page_count; i++) {
- bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
- if (!bp->b_pages[i]) {
- error = -ENOMEM;
- goto fail_free_mem;
- }
- }
- bp->b_flags |= _XBF_PAGES;
-
error = _xfs_buf_map_pages(bp, 0);
if (unlikely(error)) {
xfs_warn(target->bt_mount,
"%s: failed to map pages", __func__);
- goto fail_free_mem;
+ goto fail_free_buf;
}
trace_xfs_buf_get_uncached(bp, _RET_IP_);
*bpp = bp;
return 0;
- fail_free_mem:
- while (--i >= 0)
- __free_page(bp->b_pages[i]);
- _xfs_buf_free_pages(bp);
- fail_free_buf:
- xfs_buf_free_maps(bp);
- kmem_cache_free(xfs_buf_zone, bp);
- fail:
+fail_free_buf:
+ xfs_buf_free(bp);
return error;
}
@@ -1722,7 +1654,6 @@ xfs_buf_offset(
if (bp->b_addr)
return bp->b_addr + offset;
- offset += bp->b_offset;
page = bp->b_pages[offset >> PAGE_SHIFT];
return page_address(page) + (offset & (PAGE_SIZE-1));
}
@@ -1958,7 +1889,7 @@ xfs_free_buftarg(
percpu_counter_destroy(&btp->bt_io_count);
list_lru_destroy(&btp->bt_lru);
- xfs_blkdev_issue_flush(btp);
+ blkdev_issue_flush(btp->bt_bdev);
kmem_free(btp);
}
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 459ca34f26f5..464dc548fa23 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -167,7 +167,8 @@ struct xfs_buf {
atomic_t b_pin_count; /* pin count */
atomic_t b_io_remaining; /* #outstanding I/O requests */
unsigned int b_page_count; /* size of page array */
- unsigned int b_offset; /* page offset in first page */
+ unsigned int b_offset; /* page offset of b_addr,
+ only for _XBF_KMEM buffers */
int b_error; /* error code on I/O */
/*
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index fb69879e4b2b..2828ce45b701 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -74,14 +74,12 @@ xfs_buf_item_straddle(
}
/*
- * This returns the number of log iovecs needed to log the
- * given buf log item.
+ * Return the number of log iovecs and space needed to log the given buf log
+ * item segment.
*
- * It calculates this as 1 iovec for the buf log format structure
- * and 1 for each stretch of non-contiguous chunks to be logged.
- * Contiguous chunks are logged in a single iovec.
- *
- * If the XFS_BLI_STALE flag has been set, then log nothing.
+ * It calculates this as 1 iovec for the buf log format structure and 1 for each
+ * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
+ * in a single iovec.
*/
STATIC void
xfs_buf_item_size_segment(
@@ -168,11 +166,8 @@ slow_scan:
}
/*
- * This returns the number of log iovecs needed to log the given buf log item.
- *
- * It calculates this as 1 iovec for the buf log format structure and 1 for each
- * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
- * in a single iovec.
+ * Return the number of log iovecs and space needed to log the given buf log
+ * item.
*
* Discontiguous buffers need a format structure per region that is being
* logged. This makes the changes in the buffer appear to log recovery as though
@@ -182,7 +177,11 @@ slow_scan:
* what ends up on disk.
*
* If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
- * format structures.
+ * format structures. If the item has previously been logged and has dirty
+ * regions, we do not relog them in stale buffers. This has the effect of
+ * reducing the size of the relogged item by the amount of dirty data tracked
+ * by the log item. This can result in the committing transaction reducing the
+ * amount of space being consumed by the CIL.
*/
STATIC void
xfs_buf_item_size(
@@ -199,9 +198,9 @@ xfs_buf_item_size(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
if (bip->bli_flags & XFS_BLI_STALE) {
/*
- * The buffer is stale, so all we need to log
- * is the buf log format structure with the
- * cancel flag in it.
+ * The buffer is stale, so all we need to log is the buf log
+ * format structure with the cancel flag in it as we are never
+ * going to replay the changes tracked in the log item.
*/
trace_xfs_buf_item_size_stale(bip);
ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
@@ -216,9 +215,9 @@ xfs_buf_item_size(
if (bip->bli_flags & XFS_BLI_ORDERED) {
/*
- * The buffer has been logged just to order it.
- * It is not being included in the transaction
- * commit, so no vectors are used at all.
+ * The buffer has been logged just to order it. It is not being
+ * included in the transaction commit, so no vectors are used at
+ * all.
*/
trace_xfs_buf_item_size_ordered(bip);
*nvecs = XFS_LOG_VEC_ORDERED;
@@ -475,17 +474,8 @@ xfs_buf_item_pin(
}
/*
- * This is called to unpin the buffer associated with the buf log
- * item which was previously pinned with a call to xfs_buf_item_pin().
- *
- * Also drop the reference to the buf item for the current transaction.
- * If the XFS_BLI_STALE flag is set and we are the last reference,
- * then free up the buf log item and unlock the buffer.
- *
- * If the remove flag is set we are called from uncommit in the
- * forced-shutdown path. If that is true and the reference count on
- * the log item is going to drop to zero we need to free the item's
- * descriptor in the transaction.
+ * This is called to unpin the buffer associated with the buf log item which
+ * was previously pinned with a call to xfs_buf_item_pin().
*/
STATIC void
xfs_buf_item_unpin(
@@ -502,38 +492,35 @@ xfs_buf_item_unpin(
trace_xfs_buf_item_unpin(bip);
+ /*
+ * Drop the bli ref associated with the pin and grab the hold required
+ * for the I/O simulation failure in the abort case. We have to do this
+ * before the pin count drops because the AIL doesn't acquire a bli
+ * reference. Therefore if the refcount drops to zero, the bli could
+ * still be AIL resident and the buffer submitted for I/O (and freed on
+ * completion) at any point before we return. This can be removed once
+ * the AIL properly holds a reference on the bli.
+ */
freed = atomic_dec_and_test(&bip->bli_refcount);
-
+ if (freed && !stale && remove)
+ xfs_buf_hold(bp);
if (atomic_dec_and_test(&bp->b_pin_count))
wake_up_all(&bp->b_waiters);
- if (freed && stale) {
+ /* nothing to do but drop the pin count if the bli is active */
+ if (!freed)
+ return;
+
+ if (stale) {
ASSERT(bip->bli_flags & XFS_BLI_STALE);
ASSERT(xfs_buf_islocked(bp));
ASSERT(bp->b_flags & XBF_STALE);
ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
+ ASSERT(list_empty(&lip->li_trans));
+ ASSERT(!bp->b_transp);
trace_xfs_buf_item_unpin_stale(bip);
- if (remove) {
- /*
- * If we are in a transaction context, we have to
- * remove the log item from the transaction as we are
- * about to release our reference to the buffer. If we
- * don't, the unlock that occurs later in
- * xfs_trans_uncommit() will try to reference the
- * buffer which we no longer have a hold on.
- */
- if (!list_empty(&lip->li_trans))
- xfs_trans_del_item(lip);
-
- /*
- * Since the transaction no longer refers to the buffer,
- * the buffer should no longer refer to the transaction.
- */
- bp->b_transp = NULL;
- }
-
/*
* If we get called here because of an IO error, we may or may
* not have the item on the AIL. xfs_trans_ail_delete() will
@@ -550,13 +537,13 @@ xfs_buf_item_unpin(
ASSERT(bp->b_log_item == NULL);
}
xfs_buf_relse(bp);
- } else if (freed && remove) {
+ } else if (remove) {
/*
* The buffer must be locked and held by the caller to simulate
- * an async I/O failure.
+ * an async I/O failure. We acquired the hold for this case
+ * before the buffer was unpinned.
*/
xfs_buf_lock(bp);
- xfs_buf_hold(bp);
bp->b_flags |= XBF_ASYNC;
xfs_buf_ioend_fail(bp);
}
@@ -714,7 +701,7 @@ xfs_buf_item_release(
STATIC void
xfs_buf_item_committing(
struct xfs_log_item *lip,
- xfs_lsn_t commit_lsn)
+ xfs_csn_t seq)
{
return xfs_buf_item_release(lip);
}
diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
index d44e8b4a3391..4775485b4062 100644
--- a/fs/xfs/xfs_buf_item_recover.c
+++ b/fs/xfs/xfs_buf_item_recover.c
@@ -698,7 +698,8 @@ xlog_recover_do_inode_buffer(
static xfs_lsn_t
xlog_recover_get_buf_lsn(
struct xfs_mount *mp,
- struct xfs_buf *bp)
+ struct xfs_buf *bp,
+ struct xfs_buf_log_format *buf_f)
{
uint32_t magic32;
uint16_t magic16;
@@ -706,11 +707,20 @@ xlog_recover_get_buf_lsn(
void *blk = bp->b_addr;
uuid_t *uuid;
xfs_lsn_t lsn = -1;
+ uint16_t blft;
/* v4 filesystems always recover immediately */
if (!xfs_sb_version_hascrc(&mp->m_sb))
goto recover_immediately;
+ /*
+ * realtime bitmap and summary file blocks do not have magic numbers or
+ * UUIDs, so we must recover them immediately.
+ */
+ blft = xfs_blft_from_flags(buf_f);
+ if (blft == XFS_BLFT_RTBITMAP_BUF || blft == XFS_BLFT_RTSUMMARY_BUF)
+ goto recover_immediately;
+
magic32 = be32_to_cpu(*(__be32 *)blk);
switch (magic32) {
case XFS_ABTB_CRC_MAGIC:
@@ -796,6 +806,7 @@ xlog_recover_get_buf_lsn(
switch (magicda) {
case XFS_DIR3_LEAF1_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
+ case XFS_ATTR3_LEAF_MAGIC:
case XFS_DA3_NODE_MAGIC:
lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
@@ -919,7 +930,7 @@ xlog_recover_buf_commit_pass2(
* the verifier will be reset to match whatever recover turns that
* buffer into.
*/
- lsn = xlog_recover_get_buf_lsn(mp, bp);
+ lsn = xlog_recover_get_buf_lsn(mp, bp, buf_f);
if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
trace_xfs_log_recover_buf_skip(log, buf_f);
xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index f979d0d7e6cd..736df5660f1f 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -8,7 +8,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "xfs_alloc_btree.h"
@@ -18,6 +17,7 @@
#include "xfs_extent_busy.h"
#include "xfs_trace.h"
#include "xfs_log.h"
+#include "xfs_ag.h"
STATIC int
xfs_trim_extents(
@@ -50,7 +50,7 @@ xfs_trim_extents(
goto out_put_perag;
agf = agbp->b_addr;
- cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
+ cur = xfs_allocbt_init_cursor(mp, NULL, agbp, pag, XFS_BTNUM_CNT);
/*
* Look up the longest btree in the AGF and start with it.
@@ -108,7 +108,7 @@ xfs_trim_extents(
* If any blocks in the range are still busy, skip the
* discard and try again the next time.
*/
- if (xfs_extent_busy_search(mp, agno, fbno, flen)) {
+ if (xfs_extent_busy_search(mp, pag, fbno, flen)) {
trace_xfs_discard_busy(mp, agno, fbno, flen);
goto next_extent;
}
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 8c1fdf37ee8f..8ed47b739b6c 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -188,7 +188,7 @@ xfs_qm_dquot_logitem_release(
STATIC void
xfs_qm_dquot_logitem_committing(
struct xfs_log_item *lip,
- xfs_lsn_t commit_lsn)
+ xfs_csn_t seq)
{
return xfs_qm_dquot_logitem_release(lip);
}
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index a4075685d9eb..ad22a003f959 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -11,39 +11,37 @@
#include "xfs_log_format.h"
#include "xfs_shared.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
#include "xfs_trace.h"
#include "xfs_trans.h"
#include "xfs_log.h"
+#include "xfs_ag.h"
void
xfs_extent_busy_insert(
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agblock_t bno,
xfs_extlen_t len,
unsigned int flags)
{
struct xfs_extent_busy *new;
struct xfs_extent_busy *busyp;
- struct xfs_perag *pag;
struct rb_node **rbp;
struct rb_node *parent = NULL;
new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
- new->agno = agno;
+ new->agno = pag->pag_agno;
new->bno = bno;
new->length = len;
INIT_LIST_HEAD(&new->list);
new->flags = flags;
/* trace before insert to be able to see failed inserts */
- trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
+ trace_xfs_extent_busy(tp->t_mountp, pag->pag_agno, bno, len);
- pag = xfs_perag_get(tp->t_mountp, new->agno);
spin_lock(&pag->pagb_lock);
rbp = &pag->pagb_tree.rb_node;
while (*rbp) {
@@ -66,7 +64,6 @@ xfs_extent_busy_insert(
list_add(&new->list, &tp->t_busy);
spin_unlock(&pag->pagb_lock);
- xfs_perag_put(pag);
}
/*
@@ -81,21 +78,17 @@ xfs_extent_busy_insert(
int
xfs_extent_busy_search(
struct xfs_mount *mp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agblock_t bno,
xfs_extlen_t len)
{
- struct xfs_perag *pag;
struct rb_node *rbp;
struct xfs_extent_busy *busyp;
int match = 0;
- pag = xfs_perag_get(mp, agno);
+ /* find closest start bno overlap */
spin_lock(&pag->pagb_lock);
-
rbp = pag->pagb_tree.rb_node;
-
- /* find closest start bno overlap */
while (rbp) {
busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
if (bno < busyp->bno) {
@@ -115,7 +108,6 @@ xfs_extent_busy_search(
}
}
spin_unlock(&pag->pagb_lock);
- xfs_perag_put(pag);
return match;
}
@@ -281,17 +273,14 @@ out_force_log:
void
xfs_extent_busy_reuse(
struct xfs_mount *mp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agblock_t fbno,
xfs_extlen_t flen,
bool userdata)
{
- struct xfs_perag *pag;
struct rb_node *rbp;
ASSERT(flen > 0);
-
- pag = xfs_perag_get(mp, agno);
spin_lock(&pag->pagb_lock);
restart:
rbp = pag->pagb_tree.rb_node;
@@ -314,7 +303,6 @@ restart:
goto restart;
}
spin_unlock(&pag->pagb_lock);
- xfs_perag_put(pag);
}
/*
@@ -605,12 +593,11 @@ void
xfs_extent_busy_wait_all(
struct xfs_mount *mp)
{
+ struct xfs_perag *pag;
DEFINE_WAIT (wait);
xfs_agnumber_t agno;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- struct xfs_perag *pag = xfs_perag_get(mp, agno);
-
+ for_each_perag(mp, agno, pag) {
do {
prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
if (RB_EMPTY_ROOT(&pag->pagb_tree))
@@ -618,8 +605,6 @@ xfs_extent_busy_wait_all(
schedule();
} while (1);
finish_wait(&pag->pagb_wait, &wait);
-
- xfs_perag_put(pag);
}
}
diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h
index 8aea07100092..4a118131059f 100644
--- a/fs/xfs/xfs_extent_busy.h
+++ b/fs/xfs/xfs_extent_busy.h
@@ -9,6 +9,7 @@
#define __XFS_EXTENT_BUSY_H__
struct xfs_mount;
+struct xfs_perag;
struct xfs_trans;
struct xfs_alloc_arg;
@@ -31,7 +32,7 @@ struct xfs_extent_busy {
};
void
-xfs_extent_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
+xfs_extent_busy_insert(struct xfs_trans *tp, struct xfs_perag *pag,
xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
void
@@ -39,11 +40,11 @@ xfs_extent_busy_clear(struct xfs_mount *mp, struct list_head *list,
bool do_discard);
int
-xfs_extent_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
+xfs_extent_busy_search(struct xfs_mount *mp, struct xfs_perag *pag,
xfs_agblock_t bno, xfs_extlen_t len);
void
-xfs_extent_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno,
+xfs_extent_busy_reuse(struct xfs_mount *mp, struct xfs_perag *pag,
xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
bool
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 3c0749ab9e40..cc3cfb12df53 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -119,8 +119,8 @@ xfs_dir_fsync(
return xfs_log_force_inode(ip);
}
-static xfs_lsn_t
-xfs_fsync_lsn(
+static xfs_csn_t
+xfs_fsync_seq(
struct xfs_inode *ip,
bool datasync)
{
@@ -128,7 +128,7 @@ xfs_fsync_lsn(
return 0;
if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
return 0;
- return ip->i_itemp->ili_last_lsn;
+ return ip->i_itemp->ili_commit_seq;
}
/*
@@ -151,12 +151,12 @@ xfs_fsync_flush_log(
int *log_flushed)
{
int error = 0;
- xfs_lsn_t lsn;
+ xfs_csn_t seq;
xfs_ilock(ip, XFS_ILOCK_SHARED);
- lsn = xfs_fsync_lsn(ip, datasync);
- if (lsn) {
- error = xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC,
+ seq = xfs_fsync_seq(ip, datasync);
+ if (seq) {
+ error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
log_flushed);
spin_lock(&ip->i_itemp->ili_lock);
@@ -197,9 +197,9 @@ xfs_file_fsync(
* inode size in case of an extending write.
*/
if (XFS_IS_REALTIME_INODE(ip))
- xfs_blkdev_issue_flush(mp->m_rtdev_targp);
+ blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
else if (mp->m_logdev_targp != mp->m_ddev_targp)
- xfs_blkdev_issue_flush(mp->m_ddev_targp);
+ blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
/*
* Any inode that has dirty modifications in the log is pinned. The
@@ -219,7 +219,7 @@ xfs_file_fsync(
*/
if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
mp->m_logdev_targp == mp->m_ddev_targp)
- xfs_blkdev_issue_flush(mp->m_ddev_targp);
+ blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
return error;
}
@@ -384,21 +384,30 @@ restart:
}
goto restart;
}
+
/*
* If the offset is beyond the size of the file, we need to zero any
* blocks that fall between the existing EOF and the start of this
- * write. If zeroing is needed and we are currently holding the
- * iolock shared, we need to update it to exclusive which implies
- * having to redo all checks before.
+ * write. If zeroing is needed and we are currently holding the iolock
+ * shared, we need to update it to exclusive which implies having to
+ * redo all checks before.
+ *
+ * We need to serialise against EOF updates that occur in IO completions
+ * here. We want to make sure that nobody is changing the size while we
+ * do this check until we have placed an IO barrier (i.e. hold the
+ * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched. The
+ * spinlock effectively forms a memory barrier once we have the
+ * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
+ * hence be able to correctly determine if we need to run zeroing.
*
- * We need to serialise against EOF updates that occur in IO
- * completions here. We want to make sure that nobody is changing the
- * size while we do this check until we have placed an IO barrier (i.e.
- * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
- * The spinlock effectively forms a memory barrier once we have the
- * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
- * and hence be able to correctly determine if we need to run zeroing.
+ * We can do an unlocked check here safely as IO completion can only
+ * extend EOF. Truncate is locked out at this point, so the EOF can
+ * not move backwards, only forwards. Hence we only need to take the
+ * slow path and spin locks when we are at or beyond the current EOF.
*/
+ if (iocb->ki_pos <= i_size_read(inode))
+ goto out;
+
spin_lock(&ip->i_flags_lock);
isize = i_size_read(inode);
if (iocb->ki_pos > isize) {
@@ -426,7 +435,7 @@ restart:
drained_dio = true;
goto restart;
}
-
+
trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
NULL, &xfs_buffered_write_iomap_ops);
@@ -435,6 +444,7 @@ restart:
} else
spin_unlock(&ip->i_flags_lock);
+out:
return file_modified(file);
}
@@ -500,7 +510,17 @@ xfs_dio_write_end_io(
* other IO completions here to update the EOF. Failing to serialise
* here can result in EOF moving backwards and Bad Things Happen when
* that occurs.
+ *
+ * As IO completion only ever extends EOF, we can do an unlocked check
+ * here to avoid taking the spinlock. If we land within the current EOF,
+ * then we do not need to do an extending update at all, and we don't
+ * need to take the lock to check this. If we race with an update moving
+ * EOF, then we'll either still be beyond EOF and need to take the lock,
+ * or we'll be within EOF and we don't need to take it at all.
*/
+ if (offset + size <= i_size_read(inode))
+ goto out;
+
spin_lock(&ip->i_flags_lock);
if (offset + size > i_size_read(inode)) {
i_size_write(inode, offset + size);
@@ -749,18 +769,18 @@ write_retry:
*/
if (ret == -EDQUOT && !cleared_space) {
xfs_iunlock(ip, iolock);
- xfs_blockgc_free_quota(ip, XFS_EOF_FLAGS_SYNC);
+ xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
cleared_space = true;
goto write_retry;
} else if (ret == -ENOSPC && !cleared_space) {
- struct xfs_eofblocks eofb = {0};
+ struct xfs_icwalk icw = {0};
cleared_space = true;
xfs_flush_inodes(ip->i_mount);
xfs_iunlock(ip, iolock);
- eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
- xfs_blockgc_free_space(ip->i_mount, &eofb);
+ icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
+ xfs_blockgc_free_space(ip->i_mount, &icw);
goto write_retry;
}
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index db23e455eb91..eed6ca5f8f91 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -9,13 +9,13 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_alloc.h"
#include "xfs_mru_cache.h"
#include "xfs_trace.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_trans.h"
#include "xfs_filestream.h"
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 34f2b971ce43..7d0b09c1366e 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -24,6 +24,7 @@
#include "xfs_refcount_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_rtalloc.h"
+#include "xfs_ag.h"
/* Convert an xfs_fsmap to an fsmap. */
static void
@@ -157,10 +158,10 @@ struct xfs_getfsmap_info {
struct xfs_fsmap_head *head;
struct fsmap *fsmap_recs; /* mapping records */
struct xfs_buf *agf_bp; /* AGF, for refcount queries */
+ struct xfs_perag *pag; /* AG info, if applicable */
xfs_daddr_t next_daddr; /* next daddr we expect */
u64 missing_owner; /* owner of holes */
u32 dev; /* device id */
- xfs_agnumber_t agno; /* AG number, if applicable */
struct xfs_rmap_irec low; /* low rmap key */
struct xfs_rmap_irec high; /* high rmap key */
bool last; /* last extent? */
@@ -203,14 +204,13 @@ xfs_getfsmap_is_shared(
*stat = false;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
- /* rt files will have agno set to NULLAGNUMBER */
- if (info->agno == NULLAGNUMBER)
+ /* rt files will have no perag structure */
+ if (!info->pag)
return 0;
/* Are there any shared blocks here? */
flen = 0;
- cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp,
- info->agno);
+ cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp, info->pag);
error = xfs_refcount_find_shared(cur, rec->rm_startblock,
rec->rm_blockcount, &fbno, &flen, false);
@@ -311,7 +311,8 @@ xfs_getfsmap_helper(
if (info->head->fmh_entries >= info->head->fmh_count)
return -ECANCELED;
- trace_xfs_fsmap_mapping(mp, info->dev, info->agno, rec);
+ trace_xfs_fsmap_mapping(mp, info->dev,
+ info->pag ? info->pag->pag_agno : NULLAGNUMBER, rec);
fmr.fmr_device = info->dev;
fmr.fmr_physical = rec_daddr;
@@ -354,7 +355,7 @@ xfs_getfsmap_datadev_helper(
xfs_fsblock_t fsb;
xfs_daddr_t rec_daddr;
- fsb = XFS_AGB_TO_FSB(mp, cur->bc_ag.agno, rec->rm_startblock);
+ fsb = XFS_AGB_TO_FSB(mp, cur->bc_ag.pag->pag_agno, rec->rm_startblock);
rec_daddr = XFS_FSB_TO_DADDR(mp, fsb);
return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
@@ -372,7 +373,7 @@ xfs_getfsmap_datadev_bnobt_helper(
struct xfs_rmap_irec irec;
xfs_daddr_t rec_daddr;
- rec_daddr = XFS_AGB_TO_DADDR(mp, cur->bc_ag.agno,
+ rec_daddr = XFS_AGB_TO_DADDR(mp, cur->bc_ag.pag->pag_agno,
rec->ar_startblock);
irec.rm_startblock = rec->ar_startblock;
@@ -429,8 +430,8 @@ xfs_getfsmap_logdev(
info->high.rm_flags = XFS_RMAP_KEY_FLAGS | XFS_RMAP_REC_FLAGS;
info->missing_owner = XFS_FMR_OWN_FREE;
- trace_xfs_fsmap_low_key(mp, info->dev, info->agno, &info->low);
- trace_xfs_fsmap_high_key(mp, info->dev, info->agno, &info->high);
+ trace_xfs_fsmap_low_key(mp, info->dev, NULLAGNUMBER, &info->low);
+ trace_xfs_fsmap_high_key(mp, info->dev, NULLAGNUMBER, &info->high);
if (keys[0].fmr_physical > 0)
return 0;
@@ -508,8 +509,8 @@ __xfs_getfsmap_rtdev(
info->high.rm_blockcount = 0;
xfs_getfsmap_set_irec_flags(&info->high, &keys[1]);
- trace_xfs_fsmap_low_key(mp, info->dev, info->agno, &info->low);
- trace_xfs_fsmap_high_key(mp, info->dev, info->agno, &info->high);
+ trace_xfs_fsmap_low_key(mp, info->dev, NULLAGNUMBER, &info->low);
+ trace_xfs_fsmap_high_key(mp, info->dev, NULLAGNUMBER, &info->high);
return query_fn(tp, info);
}
@@ -572,6 +573,7 @@ __xfs_getfsmap_datadev(
void *priv)
{
struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
struct xfs_btree_cur *bt_cur = NULL;
xfs_fsblock_t start_fsb;
xfs_fsblock_t end_fsb;
@@ -610,20 +612,20 @@ __xfs_getfsmap_datadev(
start_ag = XFS_FSB_TO_AGNO(mp, start_fsb);
end_ag = XFS_FSB_TO_AGNO(mp, end_fsb);
- /* Query each AG */
- for (info->agno = start_ag; info->agno <= end_ag; info->agno++) {
+ for_each_perag_range(mp, start_ag, end_ag, pag) {
/*
* Set the AG high key from the fsmap high key if this
* is the last AG that we're querying.
*/
- if (info->agno == end_ag) {
+ info->pag = pag;
+ if (pag->pag_agno == end_ag) {
info->high.rm_startblock = XFS_FSB_TO_AGBNO(mp,
end_fsb);
info->high.rm_offset = XFS_BB_TO_FSBT(mp,
keys[1].fmr_offset);
error = xfs_fsmap_owner_to_rmap(&info->high, &keys[1]);
if (error)
- goto err;
+ break;
xfs_getfsmap_set_irec_flags(&info->high, &keys[1]);
}
@@ -634,38 +636,45 @@ __xfs_getfsmap_datadev(
info->agf_bp = NULL;
}
- error = xfs_alloc_read_agf(mp, tp, info->agno, 0,
+ error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0,
&info->agf_bp);
if (error)
- goto err;
+ break;
- trace_xfs_fsmap_low_key(mp, info->dev, info->agno, &info->low);
- trace_xfs_fsmap_high_key(mp, info->dev, info->agno,
+ trace_xfs_fsmap_low_key(mp, info->dev, pag->pag_agno,
+ &info->low);
+ trace_xfs_fsmap_high_key(mp, info->dev, pag->pag_agno,
&info->high);
error = query_fn(tp, info, &bt_cur, priv);
if (error)
- goto err;
+ break;
/*
* Set the AG low key to the start of the AG prior to
* moving on to the next AG.
*/
- if (info->agno == start_ag) {
+ if (pag->pag_agno == start_ag) {
info->low.rm_startblock = 0;
info->low.rm_owner = 0;
info->low.rm_offset = 0;
info->low.rm_flags = 0;
}
- }
- /* Report any gap at the end of the AG */
- info->last = true;
- error = query_fn(tp, info, &bt_cur, priv);
- if (error)
- goto err;
+ /*
+ * If this is the last AG, report any gap at the end of it
+ * before we drop the reference to the perag when the loop
+ * terminates.
+ */
+ if (pag->pag_agno == end_ag) {
+ info->last = true;
+ error = query_fn(tp, info, &bt_cur, priv);
+ if (error)
+ break;
+ }
+ info->pag = NULL;
+ }
-err:
if (bt_cur)
xfs_btree_del_cursor(bt_cur, error < 0 ? XFS_BTREE_ERROR :
XFS_BTREE_NOERROR);
@@ -673,6 +682,13 @@ err:
xfs_trans_brelse(tp, info->agf_bp);
info->agf_bp = NULL;
}
+ if (info->pag) {
+ xfs_perag_put(info->pag);
+ info->pag = NULL;
+ } else if (pag) {
+ /* loop termination case */
+ xfs_perag_put(pag);
+ }
return error;
}
@@ -691,7 +707,7 @@ xfs_getfsmap_datadev_rmapbt_query(
/* Allocate cursor for this AG and query_range it. */
*curpp = xfs_rmapbt_init_cursor(tp->t_mountp, tp, info->agf_bp,
- info->agno);
+ info->pag);
return xfs_rmap_query_range(*curpp, &info->low, &info->high,
xfs_getfsmap_datadev_helper, info);
}
@@ -724,7 +740,7 @@ xfs_getfsmap_datadev_bnobt_query(
/* Allocate cursor for this AG and query_range it. */
*curpp = xfs_allocbt_init_cursor(tp->t_mountp, tp, info->agf_bp,
- info->agno, XFS_BTNUM_BNO);
+ info->pag, XFS_BTNUM_BNO);
key->ar_startblock = info->low.rm_startblock;
key[1].ar_startblock = info->high.rm_startblock;
return xfs_alloc_query_range(*curpp, key, &key[1],
@@ -937,7 +953,7 @@ xfs_getfsmap(
info.dev = handlers[i].dev;
info.last = false;
- info.agno = NULLAGNUMBER;
+ info.pag = NULL;
error = handlers[i].fn(tp, dkeys, &info);
if (error)
break;
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index be9cf88d2ad7..6ed29b158312 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -538,25 +538,25 @@ xfs_do_force_shutdown(
if (flags & SHUTDOWN_FORCE_UMOUNT) {
xfs_alert(mp,
-"User initiated shutdown received. Shutting down filesystem");
+"User initiated shutdown (0x%x) received. Shutting down filesystem",
+ flags);
return;
}
- xfs_notice(mp,
-"%s(0x%x) called from line %d of file %s. Return address = "PTR_FMT,
- __func__, flags, lnnum, fname, __return_address);
-
if (flags & SHUTDOWN_CORRUPT_INCORE) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
-"Corruption of in-memory data detected. Shutting down filesystem");
+"Corruption of in-memory data (0x%x) detected at %pS (%s:%d). Shutting down filesystem",
+ flags, __return_address, fname, lnnum);
if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
xfs_stack_trace();
} else if (logerror) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
- "Log I/O Error Detected. Shutting down filesystem");
+"Log I/O error (0x%x) detected at %pS (%s:%d). Shutting down filesystem",
+ flags, __return_address, fname, lnnum);
} else {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
- "I/O Error Detected. Shutting down filesystem");
+"I/O error (0x%x) detected at %pS (%s:%d). Shutting down filesystem",
+ flags, __return_address, fname, lnnum);
}
xfs_alert(mp,
@@ -576,10 +576,8 @@ xfs_fs_reserve_ag_blocks(
int err2;
mp->m_finobt_nores = false;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- pag = xfs_perag_get(mp, agno);
+ for_each_perag(mp, agno, pag) {
err2 = xfs_ag_resv_init(pag, NULL);
- xfs_perag_put(pag);
if (err2 && !error)
error = err2;
}
@@ -605,10 +603,8 @@ xfs_fs_unreserve_ag_blocks(
int error = 0;
int err2;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- pag = xfs_perag_get(mp, agno);
+ for_each_perag(mp, agno, pag) {
err2 = xfs_ag_resv_free(pag);
- xfs_perag_put(pag);
if (err2 && !error)
error = err2;
}
diff --git a/fs/xfs/xfs_health.c b/fs/xfs/xfs_health.c
index 8e0cb05a7142..eb10eacabc8f 100644
--- a/fs/xfs/xfs_health.c
+++ b/fs/xfs/xfs_health.c
@@ -9,11 +9,11 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trace.h"
#include "xfs_health.h"
+#include "xfs_ag.h"
/*
* Warn about metadata corruption that we detected but haven't fixed, and
@@ -34,14 +34,12 @@ xfs_health_unmount(
return;
/* Measure AG corruption levels. */
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- pag = xfs_perag_get(mp, agno);
+ for_each_perag(mp, agno, pag) {
xfs_ag_measure_sickness(pag, &sick, &checked);
if (sick) {
trace_xfs_ag_unfixed_corruption(mp, agno, sick);
warn = true;
}
- xfs_perag_put(pag);
}
/* Measure realtime volume corruption levels. */
@@ -231,6 +229,15 @@ xfs_inode_mark_sick(
ip->i_sick |= mask;
ip->i_checked |= mask;
spin_unlock(&ip->i_flags_lock);
+
+ /*
+ * Keep this inode around so we don't lose the sickness report. Scrub
+ * grabs inodes with DONTCACHE assuming that most inode are ok, which
+ * is not the case here.
+ */
+ spin_lock(&VFS_I(ip)->i_lock);
+ VFS_I(ip)->i_state &= ~I_DONTCACHE;
+ spin_unlock(&VFS_I(ip)->i_lock);
}
/* Mark parts of an inode healed. */
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 3c81daca0e9a..6007683482c6 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -9,7 +9,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
@@ -23,9 +22,65 @@
#include "xfs_dquot.h"
#include "xfs_reflink.h"
#include "xfs_ialloc.h"
+#include "xfs_ag.h"
#include <linux/iversion.h>
+/* Radix tree tags for incore inode tree. */
+
+/* inode is to be reclaimed */
+#define XFS_ICI_RECLAIM_TAG 0
+/* Inode has speculative preallocations (posteof or cow) to clean. */
+#define XFS_ICI_BLOCKGC_TAG 1
+
+/*
+ * The goal for walking incore inodes. These can correspond with incore inode
+ * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
+ */
+enum xfs_icwalk_goal {
+ /* Goals that are not related to tags; these must be < 0. */
+ XFS_ICWALK_DQRELE = -1,
+
+ /* Goals directly associated with tagged inodes. */
+ XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
+ XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
+};
+
+#define XFS_ICWALK_NULL_TAG (-1U)
+
+/* Compute the inode radix tree tag for this goal. */
+static inline unsigned int
+xfs_icwalk_tag(enum xfs_icwalk_goal goal)
+{
+ return goal < 0 ? XFS_ICWALK_NULL_TAG : goal;
+}
+
+static int xfs_icwalk(struct xfs_mount *mp,
+ enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
+static int xfs_icwalk_ag(struct xfs_perag *pag,
+ enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
+
+/*
+ * Private inode cache walk flags for struct xfs_icwalk. Must not
+ * coincide with XFS_ICWALK_FLAGS_VALID.
+ */
+#define XFS_ICWALK_FLAG_DROP_UDQUOT (1U << 31)
+#define XFS_ICWALK_FLAG_DROP_GDQUOT (1U << 30)
+#define XFS_ICWALK_FLAG_DROP_PDQUOT (1U << 29)
+
+/* Stop scanning after icw_scan_limit inodes. */
+#define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
+
+#define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
+#define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
+
+#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_DROP_UDQUOT | \
+ XFS_ICWALK_FLAG_DROP_GDQUOT | \
+ XFS_ICWALK_FLAG_DROP_PDQUOT | \
+ XFS_ICWALK_FLAG_SCAN_LIMIT | \
+ XFS_ICWALK_FLAG_RECLAIM_SICK | \
+ XFS_ICWALK_FLAG_UNION)
+
/*
* Allocate and initialise an xfs_inode.
*/
@@ -157,46 +212,94 @@ xfs_reclaim_work_queue(
rcu_read_unlock();
}
-static void
-xfs_perag_set_reclaim_tag(
+/*
+ * Background scanning to trim preallocated space. This is queued based on the
+ * 'speculative_prealloc_lifetime' tunable (5m by default).
+ */
+static inline void
+xfs_blockgc_queue(
struct xfs_perag *pag)
{
+ rcu_read_lock();
+ if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
+ queue_delayed_work(pag->pag_mount->m_gc_workqueue,
+ &pag->pag_blockgc_work,
+ msecs_to_jiffies(xfs_blockgc_secs * 1000));
+ rcu_read_unlock();
+}
+
+/* Set a tag on both the AG incore inode tree and the AG radix tree. */
+static void
+xfs_perag_set_inode_tag(
+ struct xfs_perag *pag,
+ xfs_agino_t agino,
+ unsigned int tag)
+{
struct xfs_mount *mp = pag->pag_mount;
+ bool was_tagged;
lockdep_assert_held(&pag->pag_ici_lock);
- if (pag->pag_ici_reclaimable++)
+
+ was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
+ radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
+
+ if (tag == XFS_ICI_RECLAIM_TAG)
+ pag->pag_ici_reclaimable++;
+
+ if (was_tagged)
return;
- /* propagate the reclaim tag up into the perag radix tree */
+ /* propagate the tag up into the perag radix tree */
spin_lock(&mp->m_perag_lock);
- radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
- XFS_ICI_RECLAIM_TAG);
+ radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
spin_unlock(&mp->m_perag_lock);
- /* schedule periodic background inode reclaim */
- xfs_reclaim_work_queue(mp);
+ /* start background work */
+ switch (tag) {
+ case XFS_ICI_RECLAIM_TAG:
+ xfs_reclaim_work_queue(mp);
+ break;
+ case XFS_ICI_BLOCKGC_TAG:
+ xfs_blockgc_queue(pag);
+ break;
+ }
- trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
+ trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
}
+/* Clear a tag on both the AG incore inode tree and the AG radix tree. */
static void
-xfs_perag_clear_reclaim_tag(
- struct xfs_perag *pag)
+xfs_perag_clear_inode_tag(
+ struct xfs_perag *pag,
+ xfs_agino_t agino,
+ unsigned int tag)
{
struct xfs_mount *mp = pag->pag_mount;
lockdep_assert_held(&pag->pag_ici_lock);
- if (--pag->pag_ici_reclaimable)
+
+ /*
+ * Reclaim can signal (with a null agino) that it cleared its own tag
+ * by removing the inode from the radix tree.
+ */
+ if (agino != NULLAGINO)
+ radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
+ else
+ ASSERT(tag == XFS_ICI_RECLAIM_TAG);
+
+ if (tag == XFS_ICI_RECLAIM_TAG)
+ pag->pag_ici_reclaimable--;
+
+ if (radix_tree_tagged(&pag->pag_ici_root, tag))
return;
- /* clear the reclaim tag from the perag radix tree */
+ /* clear the tag from the perag radix tree */
spin_lock(&mp->m_perag_lock);
- radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
- XFS_ICI_RECLAIM_TAG);
+ radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
spin_unlock(&mp->m_perag_lock);
- trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
-}
+ trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
+}
/*
* We set the inode flag atomically with the radix tree tag.
@@ -204,7 +307,7 @@ xfs_perag_clear_reclaim_tag(
* can go away.
*/
void
-xfs_inode_set_reclaim_tag(
+xfs_inode_mark_reclaimable(
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
@@ -214,9 +317,8 @@ xfs_inode_set_reclaim_tag(
spin_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
- radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
- XFS_ICI_RECLAIM_TAG);
- xfs_perag_set_reclaim_tag(pag);
+ xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ XFS_ICI_RECLAIM_TAG);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock);
@@ -224,18 +326,7 @@ xfs_inode_set_reclaim_tag(
xfs_perag_put(pag);
}
-STATIC void
-xfs_inode_clear_reclaim_tag(
- struct xfs_perag *pag,
- xfs_ino_t ino)
-{
- radix_tree_tag_clear(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(pag->pag_mount, ino),
- XFS_ICI_RECLAIM_TAG);
- xfs_perag_clear_reclaim_tag(pag);
-}
-
-static void
+static inline void
xfs_inew_wait(
struct xfs_inode *ip)
{
@@ -264,14 +355,14 @@ xfs_reinit_inode(
struct xfs_mount *mp,
struct inode *inode)
{
- int error;
- uint32_t nlink = inode->i_nlink;
- uint32_t generation = inode->i_generation;
- uint64_t version = inode_peek_iversion(inode);
- umode_t mode = inode->i_mode;
- dev_t dev = inode->i_rdev;
- kuid_t uid = inode->i_uid;
- kgid_t gid = inode->i_gid;
+ int error;
+ uint32_t nlink = inode->i_nlink;
+ uint32_t generation = inode->i_generation;
+ uint64_t version = inode_peek_iversion(inode);
+ umode_t mode = inode->i_mode;
+ dev_t dev = inode->i_rdev;
+ kuid_t uid = inode->i_uid;
+ kgid_t gid = inode->i_gid;
error = inode_init_always(mp->m_super, inode);
@@ -286,6 +377,74 @@ xfs_reinit_inode(
}
/*
+ * Carefully nudge an inode whose VFS state has been torn down back into a
+ * usable state. Drops the i_flags_lock and the rcu read lock.
+ */
+static int
+xfs_iget_recycle(
+ struct xfs_perag *pag,
+ struct xfs_inode *ip) __releases(&ip->i_flags_lock)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct inode *inode = VFS_I(ip);
+ int error;
+
+ trace_xfs_iget_recycle(ip);
+
+ /*
+ * We need to make it look like the inode is being reclaimed to prevent
+ * the actual reclaim workers from stomping over us while we recycle
+ * the inode. We can't clear the radix tree tag yet as it requires
+ * pag_ici_lock to be held exclusive.
+ */
+ ip->i_flags |= XFS_IRECLAIM;
+
+ spin_unlock(&ip->i_flags_lock);
+ rcu_read_unlock();
+
+ ASSERT(!rwsem_is_locked(&inode->i_rwsem));
+ error = xfs_reinit_inode(mp, inode);
+ if (error) {
+ bool wake;
+
+ /*
+ * Re-initializing the inode failed, and we are in deep
+ * trouble. Try to re-add it to the reclaim list.
+ */
+ rcu_read_lock();
+ spin_lock(&ip->i_flags_lock);
+ wake = !!__xfs_iflags_test(ip, XFS_INEW);
+ ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
+ if (wake)
+ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
+ ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
+ spin_unlock(&ip->i_flags_lock);
+ rcu_read_unlock();
+
+ trace_xfs_iget_recycle_fail(ip);
+ return error;
+ }
+
+ spin_lock(&pag->pag_ici_lock);
+ spin_lock(&ip->i_flags_lock);
+
+ /*
+ * Clear the per-lifetime state in the inode as we are now effectively
+ * a new inode and need to return to the initial state before reuse
+ * occurs.
+ */
+ ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
+ ip->i_flags |= XFS_INEW;
+ xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ XFS_ICI_RECLAIM_TAG);
+ inode->i_state = I_NEW;
+ spin_unlock(&ip->i_flags_lock);
+ spin_unlock(&pag->pag_ici_lock);
+
+ return 0;
+}
+
+/*
* If we are allocating a new inode, then check what was returned is
* actually a free, empty inode. If we are not allocating an inode,
* then check we didn't find a free inode.
@@ -348,30 +507,21 @@ xfs_iget_cache_hit(
* will not match, so check for that, too.
*/
spin_lock(&ip->i_flags_lock);
- if (ip->i_ino != ino) {
- trace_xfs_iget_skip(ip);
- XFS_STATS_INC(mp, xs_ig_frecycle);
- error = -EAGAIN;
- goto out_error;
- }
-
+ if (ip->i_ino != ino)
+ goto out_skip;
/*
* If we are racing with another cache hit that is currently
* instantiating this inode or currently recycling it out of
- * reclaimabe state, wait for the initialisation to complete
+ * reclaimable state, wait for the initialisation to complete
* before continuing.
*
* XXX(hch): eventually we should do something equivalent to
* wait_on_inode to wait for these flags to be cleared
* instead of polling for it.
*/
- if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
- trace_xfs_iget_skip(ip);
- XFS_STATS_INC(mp, xs_ig_frecycle);
- error = -EAGAIN;
- goto out_error;
- }
+ if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM))
+ goto out_skip;
/*
* Check the inode free state is valid. This also detects lookup
@@ -381,72 +531,21 @@ xfs_iget_cache_hit(
if (error)
goto out_error;
- /*
- * If IRECLAIMABLE is set, we've torn down the VFS inode already.
- * Need to carefully get it back into useable state.
- */
- if (ip->i_flags & XFS_IRECLAIMABLE) {
- trace_xfs_iget_reclaim(ip);
-
- if (flags & XFS_IGET_INCORE) {
- error = -EAGAIN;
- goto out_error;
- }
-
- /*
- * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
- * from stomping over us while we recycle the inode. We can't
- * clear the radix tree reclaimable tag yet as it requires
- * pag_ici_lock to be held exclusive.
- */
- ip->i_flags |= XFS_IRECLAIM;
-
- spin_unlock(&ip->i_flags_lock);
- rcu_read_unlock();
+ /* Skip inodes that have no vfs state. */
+ if ((flags & XFS_IGET_INCORE) &&
+ (ip->i_flags & XFS_IRECLAIMABLE))
+ goto out_skip;
- ASSERT(!rwsem_is_locked(&inode->i_rwsem));
- error = xfs_reinit_inode(mp, inode);
- if (error) {
- bool wake;
- /*
- * Re-initializing the inode failed, and we are in deep
- * trouble. Try to re-add it to the reclaim list.
- */
- rcu_read_lock();
- spin_lock(&ip->i_flags_lock);
- wake = !!__xfs_iflags_test(ip, XFS_INEW);
- ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
- if (wake)
- wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
- ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
- trace_xfs_iget_reclaim_fail(ip);
- goto out_error;
- }
-
- spin_lock(&pag->pag_ici_lock);
- spin_lock(&ip->i_flags_lock);
-
- /*
- * Clear the per-lifetime state in the inode as we are now
- * effectively a new inode and need to return to the initial
- * state before reuse occurs.
- */
- ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
- ip->i_flags |= XFS_INEW;
- xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
- inode->i_state = I_NEW;
- ip->i_sick = 0;
- ip->i_checked = 0;
-
- spin_unlock(&ip->i_flags_lock);
- spin_unlock(&pag->pag_ici_lock);
+ /* The inode fits the selection criteria; process it. */
+ if (ip->i_flags & XFS_IRECLAIMABLE) {
+ /* Drops i_flags_lock and RCU read lock. */
+ error = xfs_iget_recycle(pag, ip);
+ if (error)
+ return error;
} else {
/* If the VFS inode is being torn down, pause and try again. */
- if (!igrab(inode)) {
- trace_xfs_iget_skip(ip);
- error = -EAGAIN;
- goto out_error;
- }
+ if (!igrab(inode))
+ goto out_skip;
/* We've got a live one. */
spin_unlock(&ip->i_flags_lock);
@@ -463,13 +562,16 @@ xfs_iget_cache_hit(
return 0;
+out_skip:
+ trace_xfs_iget_skip(ip);
+ XFS_STATS_INC(mp, xs_ig_frecycle);
+ error = -EAGAIN;
out_error:
spin_unlock(&ip->i_flags_lock);
rcu_read_unlock();
return error;
}
-
static int
xfs_iget_cache_miss(
struct xfs_mount *mp,
@@ -715,207 +817,96 @@ xfs_icache_inode_is_allocated(
return 0;
}
-/*
- * The inode lookup is done in batches to keep the amount of lock traffic and
- * radix tree lookups to a minimum. The batch size is a trade off between
- * lookup reduction and stack usage. This is in the reclaim path, so we can't
- * be too greedy.
- */
-#define XFS_LOOKUP_BATCH 32
-
-/*
- * Decide if the given @ip is eligible to be a part of the inode walk, and
- * grab it if so. Returns true if it's ready to go or false if we should just
- * ignore it.
- */
-STATIC bool
-xfs_inode_walk_ag_grab(
- struct xfs_inode *ip,
- int flags)
+#ifdef CONFIG_XFS_QUOTA
+/* Decide if we want to grab this inode to drop its dquots. */
+static bool
+xfs_dqrele_igrab(
+ struct xfs_inode *ip)
{
- struct inode *inode = VFS_I(ip);
- bool newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT);
+ bool ret = false;
ASSERT(rcu_read_lock_held());
/* Check for stale RCU freed inode */
spin_lock(&ip->i_flags_lock);
if (!ip->i_ino)
- goto out_unlock_noent;
-
- /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
- if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
- __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
- goto out_unlock_noent;
- spin_unlock(&ip->i_flags_lock);
-
- /* nothing to sync during shutdown */
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- return false;
+ goto out_unlock;
- /* If we can't grab the inode, it must on it's way to reclaim. */
- if (!igrab(inode))
- return false;
+ /*
+ * Skip inodes that are anywhere in the reclaim machinery because we
+ * drop dquots before tagging an inode for reclamation.
+ */
+ if (ip->i_flags & (XFS_IRECLAIM | XFS_IRECLAIMABLE))
+ goto out_unlock;
- /* inode is valid */
- return true;
+ /*
+ * The inode looks alive; try to grab a VFS reference so that it won't
+ * get destroyed. If we got the reference, return true to say that
+ * we grabbed the inode.
+ *
+ * If we can't get the reference, then we know the inode had its VFS
+ * state torn down and hasn't yet entered the reclaim machinery. Since
+ * we also know that dquots are detached from an inode before it enters
+ * reclaim, we can skip the inode.
+ */
+ ret = igrab(VFS_I(ip)) != NULL;
-out_unlock_noent:
+out_unlock:
spin_unlock(&ip->i_flags_lock);
- return false;
+ return ret;
}
-/*
- * For a given per-AG structure @pag, grab, @execute, and rele all incore
- * inodes with the given radix tree @tag.
- */
-STATIC int
-xfs_inode_walk_ag(
- struct xfs_perag *pag,
- int iter_flags,
- int (*execute)(struct xfs_inode *ip, void *args),
- void *args,
- int tag)
+/* Drop this inode's dquots. */
+static void
+xfs_dqrele_inode(
+ struct xfs_inode *ip,
+ struct xfs_icwalk *icw)
{
- struct xfs_mount *mp = pag->pag_mount;
- uint32_t first_index;
- int last_error = 0;
- int skipped;
- bool done;
- int nr_found;
-
-restart:
- done = false;
- skipped = 0;
- first_index = 0;
- nr_found = 0;
- do {
- struct xfs_inode *batch[XFS_LOOKUP_BATCH];
- int error = 0;
- int i;
-
- rcu_read_lock();
-
- if (tag == XFS_ICI_NO_TAG)
- nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
- (void **)batch, first_index,
- XFS_LOOKUP_BATCH);
- else
- nr_found = radix_tree_gang_lookup_tag(
- &pag->pag_ici_root,
- (void **) batch, first_index,
- XFS_LOOKUP_BATCH, tag);
-
- if (!nr_found) {
- rcu_read_unlock();
- break;
- }
-
- /*
- * Grab the inodes before we drop the lock. if we found
- * nothing, nr == 0 and the loop will be skipped.
- */
- for (i = 0; i < nr_found; i++) {
- struct xfs_inode *ip = batch[i];
-
- if (done || !xfs_inode_walk_ag_grab(ip, iter_flags))
- batch[i] = NULL;
-
- /*
- * Update the index for the next lookup. Catch
- * overflows into the next AG range which can occur if
- * we have inodes in the last block of the AG and we
- * are currently pointing to the last inode.
- *
- * Because we may see inodes that are from the wrong AG
- * due to RCU freeing and reallocation, only update the
- * index if it lies in this AG. It was a race that lead
- * us to see this inode, so another lookup from the
- * same index will not find it again.
- */
- if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
- continue;
- first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
- if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
- done = true;
- }
-
- /* unlock now we've grabbed the inodes. */
- rcu_read_unlock();
-
- for (i = 0; i < nr_found; i++) {
- if (!batch[i])
- continue;
- if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) &&
- xfs_iflags_test(batch[i], XFS_INEW))
- xfs_inew_wait(batch[i]);
- error = execute(batch[i], args);
- xfs_irele(batch[i]);
- if (error == -EAGAIN) {
- skipped++;
- continue;
- }
- if (error && last_error != -EFSCORRUPTED)
- last_error = error;
- }
-
- /* bail out if the filesystem is corrupted. */
- if (error == -EFSCORRUPTED)
- break;
+ if (xfs_iflags_test(ip, XFS_INEW))
+ xfs_inew_wait(ip);
- cond_resched();
-
- } while (nr_found && !done);
-
- if (skipped) {
- delay(1);
- goto restart;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ if (icw->icw_flags & XFS_ICWALK_FLAG_DROP_UDQUOT) {
+ xfs_qm_dqrele(ip->i_udquot);
+ ip->i_udquot = NULL;
}
- return last_error;
-}
-
-/* Fetch the next (possibly tagged) per-AG structure. */
-static inline struct xfs_perag *
-xfs_inode_walk_get_perag(
- struct xfs_mount *mp,
- xfs_agnumber_t agno,
- int tag)
-{
- if (tag == XFS_ICI_NO_TAG)
- return xfs_perag_get(mp, agno);
- return xfs_perag_get_tag(mp, agno, tag);
+ if (icw->icw_flags & XFS_ICWALK_FLAG_DROP_GDQUOT) {
+ xfs_qm_dqrele(ip->i_gdquot);
+ ip->i_gdquot = NULL;
+ }
+ if (icw->icw_flags & XFS_ICWALK_FLAG_DROP_PDQUOT) {
+ xfs_qm_dqrele(ip->i_pdquot);
+ ip->i_pdquot = NULL;
+ }
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_irele(ip);
}
/*
- * Call the @execute function on all incore inodes matching the radix tree
- * @tag.
+ * Detach all dquots from incore inodes if we can. The caller must already
+ * have dropped the relevant XFS_[UGP]QUOTA_ACTIVE flags so that dquots will
+ * not get reattached.
*/
int
-xfs_inode_walk(
+xfs_dqrele_all_inodes(
struct xfs_mount *mp,
- int iter_flags,
- int (*execute)(struct xfs_inode *ip, void *args),
- void *args,
- int tag)
+ unsigned int qflags)
{
- struct xfs_perag *pag;
- int error = 0;
- int last_error = 0;
- xfs_agnumber_t ag;
+ struct xfs_icwalk icw = { .icw_flags = 0 };
- ag = 0;
- while ((pag = xfs_inode_walk_get_perag(mp, ag, tag))) {
- ag = pag->pag_agno + 1;
- error = xfs_inode_walk_ag(pag, iter_flags, execute, args, tag);
- xfs_perag_put(pag);
- if (error) {
- last_error = error;
- if (error == -EFSCORRUPTED)
- break;
- }
- }
- return last_error;
+ if (qflags & XFS_UQUOTA_ACCT)
+ icw.icw_flags |= XFS_ICWALK_FLAG_DROP_UDQUOT;
+ if (qflags & XFS_GQUOTA_ACCT)
+ icw.icw_flags |= XFS_ICWALK_FLAG_DROP_GDQUOT;
+ if (qflags & XFS_PQUOTA_ACCT)
+ icw.icw_flags |= XFS_ICWALK_FLAG_DROP_PDQUOT;
+
+ return xfs_icwalk(mp, XFS_ICWALK_DQRELE, &icw);
}
+#else
+# define xfs_dqrele_igrab(ip) (false)
+# define xfs_dqrele_inode(ip, priv) ((void)0)
+#endif /* CONFIG_XFS_QUOTA */
/*
* Grab the inode for reclaim exclusively.
@@ -935,8 +926,9 @@ xfs_inode_walk(
* Return true if we grabbed it, false otherwise.
*/
static bool
-xfs_reclaim_inode_grab(
- struct xfs_inode *ip)
+xfs_reclaim_igrab(
+ struct xfs_inode *ip,
+ struct xfs_icwalk *icw)
{
ASSERT(rcu_read_lock_held());
@@ -947,6 +939,14 @@ xfs_reclaim_inode_grab(
spin_unlock(&ip->i_flags_lock);
return false;
}
+
+ /* Don't reclaim a sick inode unless the caller asked for it. */
+ if (ip->i_sick &&
+ (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
+ spin_unlock(&ip->i_flags_lock);
+ return false;
+ }
+
__xfs_iflags_set(ip, XFS_IRECLAIM);
spin_unlock(&ip->i_flags_lock);
return true;
@@ -1002,6 +1002,8 @@ reclaim:
spin_lock(&ip->i_flags_lock);
ip->i_flags = XFS_IRECLAIM;
ip->i_ino = 0;
+ ip->i_sick = 0;
+ ip->i_checked = 0;
spin_unlock(&ip->i_flags_lock);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -1018,7 +1020,7 @@ reclaim:
if (!radix_tree_delete(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ino)))
ASSERT(0);
- xfs_perag_clear_reclaim_tag(pag);
+ xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
spin_unlock(&pag->pag_ici_lock);
/*
@@ -1030,7 +1032,7 @@ reclaim:
* unlocked after the lookup before we go ahead and free it.
*/
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_qm_dqdetach(ip);
+ ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
ASSERT(xfs_inode_clean(ip));
@@ -1045,108 +1047,30 @@ out:
xfs_iflags_clear(ip, XFS_IRECLAIM);
}
-/*
- * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
- * corrupted, we still want to try to reclaim all the inodes. If we don't,
- * then a shut down during filesystem unmount reclaim walk leak all the
- * unreclaimed inodes.
- *
- * Returns non-zero if any AGs or inodes were skipped in the reclaim pass
- * so that callers that want to block until all dirty inodes are written back
- * and reclaimed can sanely loop.
- */
-static void
-xfs_reclaim_inodes_ag(
- struct xfs_mount *mp,
- int *nr_to_scan)
+/* Reclaim sick inodes if we're unmounting or the fs went down. */
+static inline bool
+xfs_want_reclaim_sick(
+ struct xfs_mount *mp)
{
- struct xfs_perag *pag;
- xfs_agnumber_t ag = 0;
-
- while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
- unsigned long first_index = 0;
- int done = 0;
- int nr_found = 0;
-
- ag = pag->pag_agno + 1;
-
- first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
- do {
- struct xfs_inode *batch[XFS_LOOKUP_BATCH];
- int i;
-
- rcu_read_lock();
- nr_found = radix_tree_gang_lookup_tag(
- &pag->pag_ici_root,
- (void **)batch, first_index,
- XFS_LOOKUP_BATCH,
- XFS_ICI_RECLAIM_TAG);
- if (!nr_found) {
- done = 1;
- rcu_read_unlock();
- break;
- }
-
- /*
- * Grab the inodes before we drop the lock. if we found
- * nothing, nr == 0 and the loop will be skipped.
- */
- for (i = 0; i < nr_found; i++) {
- struct xfs_inode *ip = batch[i];
-
- if (done || !xfs_reclaim_inode_grab(ip))
- batch[i] = NULL;
-
- /*
- * Update the index for the next lookup. Catch
- * overflows into the next AG range which can
- * occur if we have inodes in the last block of
- * the AG and we are currently pointing to the
- * last inode.
- *
- * Because we may see inodes that are from the
- * wrong AG due to RCU freeing and
- * reallocation, only update the index if it
- * lies in this AG. It was a race that lead us
- * to see this inode, so another lookup from
- * the same index will not find it again.
- */
- if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
- pag->pag_agno)
- continue;
- first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
- if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
- done = 1;
- }
-
- /* unlock now we've grabbed the inodes. */
- rcu_read_unlock();
-
- for (i = 0; i < nr_found; i++) {
- if (batch[i])
- xfs_reclaim_inode(batch[i], pag);
- }
-
- *nr_to_scan -= XFS_LOOKUP_BATCH;
- cond_resched();
- } while (nr_found && !done && *nr_to_scan > 0);
-
- if (done)
- first_index = 0;
- WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
- xfs_perag_put(pag);
- }
+ return (mp->m_flags & XFS_MOUNT_UNMOUNTING) ||
+ (mp->m_flags & XFS_MOUNT_NORECOVERY) ||
+ XFS_FORCED_SHUTDOWN(mp);
}
void
xfs_reclaim_inodes(
struct xfs_mount *mp)
{
- int nr_to_scan = INT_MAX;
+ struct xfs_icwalk icw = {
+ .icw_flags = 0,
+ };
+
+ if (xfs_want_reclaim_sick(mp))
+ icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
xfs_ail_push_all_sync(mp->m_ail);
- xfs_reclaim_inodes_ag(mp, &nr_to_scan);
+ xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
}
}
@@ -1160,13 +1084,21 @@ xfs_reclaim_inodes(
long
xfs_reclaim_inodes_nr(
struct xfs_mount *mp,
- int nr_to_scan)
+ unsigned long nr_to_scan)
{
+ struct xfs_icwalk icw = {
+ .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
+ .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
+ };
+
+ if (xfs_want_reclaim_sick(mp))
+ icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
+
/* kick background reclaimer and push the AIL */
xfs_reclaim_work_queue(mp);
xfs_ail_push_all(mp->m_ail);
- xfs_reclaim_inodes_ag(mp, &nr_to_scan);
+ xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
return 0;
}
@@ -1174,13 +1106,13 @@ xfs_reclaim_inodes_nr(
* Return the number of reclaimable inodes in the filesystem for
* the shrinker to determine how much to reclaim.
*/
-int
+long
xfs_reclaim_inodes_count(
struct xfs_mount *mp)
{
struct xfs_perag *pag;
xfs_agnumber_t ag = 0;
- int reclaimable = 0;
+ long reclaimable = 0;
while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
ag = pag->pag_agno + 1;
@@ -1191,20 +1123,20 @@ xfs_reclaim_inodes_count(
}
STATIC bool
-xfs_inode_match_id(
+xfs_icwalk_match_id(
struct xfs_inode *ip,
- struct xfs_eofblocks *eofb)
+ struct xfs_icwalk *icw)
{
- if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
- !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
+ !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
return false;
- if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
- !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
+ !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
return false;
- if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
- ip->i_projid != eofb->eof_prid)
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
+ ip->i_projid != icw->icw_prid)
return false;
return true;
@@ -1215,20 +1147,20 @@ xfs_inode_match_id(
* criteria match. This is for global/internal scans only.
*/
STATIC bool
-xfs_inode_match_id_union(
+xfs_icwalk_match_id_union(
struct xfs_inode *ip,
- struct xfs_eofblocks *eofb)
+ struct xfs_icwalk *icw)
{
- if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
- uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
+ uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
return true;
- if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
- gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
+ gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
return true;
- if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
- ip->i_projid == eofb->eof_prid)
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
+ ip->i_projid == icw->icw_prid)
return true;
return false;
@@ -1236,29 +1168,29 @@ xfs_inode_match_id_union(
/*
* Is this inode @ip eligible for eof/cow block reclamation, given some
- * filtering parameters @eofb? The inode is eligible if @eofb is null or
+ * filtering parameters @icw? The inode is eligible if @icw is null or
* if the predicate functions match.
*/
static bool
-xfs_inode_matches_eofb(
+xfs_icwalk_match(
struct xfs_inode *ip,
- struct xfs_eofblocks *eofb)
+ struct xfs_icwalk *icw)
{
bool match;
- if (!eofb)
+ if (!icw)
return true;
- if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
- match = xfs_inode_match_id_union(ip, eofb);
+ if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
+ match = xfs_icwalk_match_id_union(ip, icw);
else
- match = xfs_inode_match_id(ip, eofb);
+ match = xfs_icwalk_match_id(ip, icw);
if (!match)
return false;
/* skip the inode if the file size is too small */
- if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) &&
- XFS_ISIZE(ip) < eofb->eof_min_file_size)
+ if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
+ XFS_ISIZE(ip) < icw->icw_min_file_size)
return false;
return true;
@@ -1276,22 +1208,20 @@ xfs_reclaim_worker(
{
struct xfs_mount *mp = container_of(to_delayed_work(work),
struct xfs_mount, m_reclaim_work);
- int nr_to_scan = INT_MAX;
- xfs_reclaim_inodes_ag(mp, &nr_to_scan);
+ xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
xfs_reclaim_work_queue(mp);
}
STATIC int
xfs_inode_free_eofblocks(
struct xfs_inode *ip,
- void *args,
+ struct xfs_icwalk *icw,
unsigned int *lockflags)
{
- struct xfs_eofblocks *eofb = args;
bool wait;
- wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
+ wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
return 0;
@@ -1303,7 +1233,7 @@ xfs_inode_free_eofblocks(
if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
return 0;
- if (!xfs_inode_matches_eofb(ip, eofb))
+ if (!xfs_icwalk_match(ip, icw))
return 0;
/*
@@ -1326,22 +1256,6 @@ xfs_inode_free_eofblocks(
return 0;
}
-/*
- * Background scanning to trim preallocated space. This is queued based on the
- * 'speculative_prealloc_lifetime' tunable (5m by default).
- */
-static inline void
-xfs_blockgc_queue(
- struct xfs_perag *pag)
-{
- rcu_read_lock();
- if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
- queue_delayed_work(pag->pag_mount->m_gc_workqueue,
- &pag->pag_blockgc_work,
- msecs_to_jiffies(xfs_blockgc_secs * 1000));
- rcu_read_unlock();
-}
-
static void
xfs_blockgc_set_iflag(
struct xfs_inode *ip,
@@ -1349,7 +1263,6 @@ xfs_blockgc_set_iflag(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
- int tagged;
ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
@@ -1366,24 +1279,8 @@ xfs_blockgc_set_iflag(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
- tagged = radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG);
- radix_tree_tag_set(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
- XFS_ICI_BLOCKGC_TAG);
- if (!tagged) {
- /* propagate the blockgc tag up into the perag radix tree */
- spin_lock(&ip->i_mount->m_perag_lock);
- radix_tree_tag_set(&ip->i_mount->m_perag_tree,
- XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
- XFS_ICI_BLOCKGC_TAG);
- spin_unlock(&ip->i_mount->m_perag_lock);
-
- /* kick off background trimming */
- xfs_blockgc_queue(pag);
-
- trace_xfs_perag_set_blockgc(ip->i_mount, pag->pag_agno, -1,
- _RET_IP_);
- }
+ xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ XFS_ICI_BLOCKGC_TAG);
spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag);
@@ -1419,19 +1316,8 @@ xfs_blockgc_clear_iflag(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
- radix_tree_tag_clear(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
- XFS_ICI_BLOCKGC_TAG);
- if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) {
- /* clear the blockgc tag from the perag radix tree */
- spin_lock(&ip->i_mount->m_perag_lock);
- radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
- XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
- XFS_ICI_BLOCKGC_TAG);
- spin_unlock(&ip->i_mount->m_perag_lock);
- trace_xfs_perag_clear_blockgc(ip->i_mount, pag->pag_agno, -1,
- _RET_IP_);
- }
+ xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ XFS_ICI_BLOCKGC_TAG);
spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag);
@@ -1492,14 +1378,13 @@ xfs_prep_free_cowblocks(
STATIC int
xfs_inode_free_cowblocks(
struct xfs_inode *ip,
- void *args,
+ struct xfs_icwalk *icw,
unsigned int *lockflags)
{
- struct xfs_eofblocks *eofb = args;
bool wait;
int ret = 0;
- wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
+ wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
return 0;
@@ -1507,7 +1392,7 @@ xfs_inode_free_cowblocks(
if (!xfs_prep_free_cowblocks(ip))
return 0;
- if (!xfs_inode_matches_eofb(ip, eofb))
+ if (!xfs_icwalk_match(ip, icw))
return 0;
/*
@@ -1554,14 +1439,6 @@ xfs_inode_clear_cowblocks_tag(
return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
}
-#define for_each_perag_tag(mp, next_agno, pag, tag) \
- for ((next_agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \
- (pag) != NULL; \
- (next_agno) = (pag)->pag_agno + 1, \
- xfs_perag_put(pag), \
- (pag) = xfs_perag_get_tag((mp), (next_agno), (tag)))
-
-
/* Disable post-EOF and CoW block auto-reclamation. */
void
xfs_blockgc_stop(
@@ -1586,23 +1463,66 @@ xfs_blockgc_start(
xfs_blockgc_queue(pag);
}
+/* Don't try to run block gc on an inode that's in any of these states. */
+#define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
+ XFS_IRECLAIMABLE | \
+ XFS_IRECLAIM)
+/*
+ * Decide if the given @ip is eligible for garbage collection of speculative
+ * preallocations, and grab it if so. Returns true if it's ready to go or
+ * false if we should just ignore it.
+ */
+static bool
+xfs_blockgc_igrab(
+ struct xfs_inode *ip)
+{
+ struct inode *inode = VFS_I(ip);
+
+ ASSERT(rcu_read_lock_held());
+
+ /* Check for stale RCU freed inode */
+ spin_lock(&ip->i_flags_lock);
+ if (!ip->i_ino)
+ goto out_unlock_noent;
+
+ if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
+ goto out_unlock_noent;
+ spin_unlock(&ip->i_flags_lock);
+
+ /* nothing to sync during shutdown */
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ return false;
+
+ /* If we can't grab the inode, it must on it's way to reclaim. */
+ if (!igrab(inode))
+ return false;
+
+ /* inode is valid */
+ return true;
+
+out_unlock_noent:
+ spin_unlock(&ip->i_flags_lock);
+ return false;
+}
+
/* Scan one incore inode for block preallocations that we can remove. */
static int
xfs_blockgc_scan_inode(
struct xfs_inode *ip,
- void *args)
+ struct xfs_icwalk *icw)
{
unsigned int lockflags = 0;
int error;
- error = xfs_inode_free_eofblocks(ip, args, &lockflags);
+ error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
if (error)
goto unlock;
- error = xfs_inode_free_cowblocks(ip, args, &lockflags);
+ error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
unlock:
if (lockflags)
xfs_iunlock(ip, lockflags);
+ xfs_irele(ip);
return error;
}
@@ -1618,8 +1538,7 @@ xfs_blockgc_worker(
if (!sb_start_write_trylock(mp->m_super))
return;
- error = xfs_inode_walk_ag(pag, 0, xfs_blockgc_scan_inode, NULL,
- XFS_ICI_BLOCKGC_TAG);
+ error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
if (error)
xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
pag->pag_agno, error);
@@ -1633,12 +1552,11 @@ xfs_blockgc_worker(
int
xfs_blockgc_free_space(
struct xfs_mount *mp,
- struct xfs_eofblocks *eofb)
+ struct xfs_icwalk *icw)
{
- trace_xfs_blockgc_free_space(mp, eofb, _RET_IP_);
+ trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
- return xfs_inode_walk(mp, 0, xfs_blockgc_scan_inode, eofb,
- XFS_ICI_BLOCKGC_TAG);
+ return xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
}
/*
@@ -1648,7 +1566,7 @@ xfs_blockgc_free_space(
* scan.
*
* Callers must not hold any inode's ILOCK. If requesting a synchronous scan
- * (XFS_EOF_FLAGS_SYNC), the caller also must not hold any inode's IOLOCK or
+ * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
* MMAPLOCK.
*/
int
@@ -1657,9 +1575,9 @@ xfs_blockgc_free_dquots(
struct xfs_dquot *udqp,
struct xfs_dquot *gdqp,
struct xfs_dquot *pdqp,
- unsigned int eof_flags)
+ unsigned int iwalk_flags)
{
- struct xfs_eofblocks eofb = {0};
+ struct xfs_icwalk icw = {0};
bool do_work = false;
if (!udqp && !gdqp && !pdqp)
@@ -1669,40 +1587,260 @@ xfs_blockgc_free_dquots(
* Run a scan to free blocks using the union filter to cover all
* applicable quotas in a single scan.
*/
- eofb.eof_flags = XFS_EOF_FLAGS_UNION | eof_flags;
+ icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
- eofb.eof_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
- eofb.eof_flags |= XFS_EOF_FLAGS_UID;
+ icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
+ icw.icw_flags |= XFS_ICWALK_FLAG_UID;
do_work = true;
}
if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
- eofb.eof_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
- eofb.eof_flags |= XFS_EOF_FLAGS_GID;
+ icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
+ icw.icw_flags |= XFS_ICWALK_FLAG_GID;
do_work = true;
}
if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
- eofb.eof_prid = pdqp->q_id;
- eofb.eof_flags |= XFS_EOF_FLAGS_PRID;
+ icw.icw_prid = pdqp->q_id;
+ icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
do_work = true;
}
if (!do_work)
return 0;
- return xfs_blockgc_free_space(mp, &eofb);
+ return xfs_blockgc_free_space(mp, &icw);
}
/* Run cow/eofblocks scans on the quotas attached to the inode. */
int
xfs_blockgc_free_quota(
struct xfs_inode *ip,
- unsigned int eof_flags)
+ unsigned int iwalk_flags)
{
return xfs_blockgc_free_dquots(ip->i_mount,
xfs_inode_dquot(ip, XFS_DQTYPE_USER),
xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
- xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), eof_flags);
+ xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
+}
+
+/* XFS Inode Cache Walking Code */
+
+/*
+ * The inode lookup is done in batches to keep the amount of lock traffic and
+ * radix tree lookups to a minimum. The batch size is a trade off between
+ * lookup reduction and stack usage. This is in the reclaim path, so we can't
+ * be too greedy.
+ */
+#define XFS_LOOKUP_BATCH 32
+
+
+/*
+ * Decide if we want to grab this inode in anticipation of doing work towards
+ * the goal.
+ */
+static inline bool
+xfs_icwalk_igrab(
+ enum xfs_icwalk_goal goal,
+ struct xfs_inode *ip,
+ struct xfs_icwalk *icw)
+{
+ switch (goal) {
+ case XFS_ICWALK_DQRELE:
+ return xfs_dqrele_igrab(ip);
+ case XFS_ICWALK_BLOCKGC:
+ return xfs_blockgc_igrab(ip);
+ case XFS_ICWALK_RECLAIM:
+ return xfs_reclaim_igrab(ip, icw);
+ default:
+ return false;
+ }
+}
+
+/*
+ * Process an inode. Each processing function must handle any state changes
+ * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
+ */
+static inline int
+xfs_icwalk_process_inode(
+ enum xfs_icwalk_goal goal,
+ struct xfs_inode *ip,
+ struct xfs_perag *pag,
+ struct xfs_icwalk *icw)
+{
+ int error = 0;
+
+ switch (goal) {
+ case XFS_ICWALK_DQRELE:
+ xfs_dqrele_inode(ip, icw);
+ break;
+ case XFS_ICWALK_BLOCKGC:
+ error = xfs_blockgc_scan_inode(ip, icw);
+ break;
+ case XFS_ICWALK_RECLAIM:
+ xfs_reclaim_inode(ip, pag);
+ break;
+ }
+ return error;
+}
+
+/*
+ * For a given per-AG structure @pag and a goal, grab qualifying inodes and
+ * process them in some manner.
+ */
+static int
+xfs_icwalk_ag(
+ struct xfs_perag *pag,
+ enum xfs_icwalk_goal goal,
+ struct xfs_icwalk *icw)
+{
+ struct xfs_mount *mp = pag->pag_mount;
+ uint32_t first_index;
+ int last_error = 0;
+ int skipped;
+ bool done;
+ int nr_found;
+
+restart:
+ done = false;
+ skipped = 0;
+ if (goal == XFS_ICWALK_RECLAIM)
+ first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
+ else
+ first_index = 0;
+ nr_found = 0;
+ do {
+ struct xfs_inode *batch[XFS_LOOKUP_BATCH];
+ unsigned int tag = xfs_icwalk_tag(goal);
+ int error = 0;
+ int i;
+
+ rcu_read_lock();
+
+ if (tag == XFS_ICWALK_NULL_TAG)
+ nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
+ (void **)batch, first_index,
+ XFS_LOOKUP_BATCH);
+ else
+ nr_found = radix_tree_gang_lookup_tag(
+ &pag->pag_ici_root,
+ (void **) batch, first_index,
+ XFS_LOOKUP_BATCH, tag);
+
+ if (!nr_found) {
+ done = true;
+ rcu_read_unlock();
+ break;
+ }
+
+ /*
+ * Grab the inodes before we drop the lock. if we found
+ * nothing, nr == 0 and the loop will be skipped.
+ */
+ for (i = 0; i < nr_found; i++) {
+ struct xfs_inode *ip = batch[i];
+
+ if (done || !xfs_icwalk_igrab(goal, ip, icw))
+ batch[i] = NULL;
+
+ /*
+ * Update the index for the next lookup. Catch
+ * overflows into the next AG range which can occur if
+ * we have inodes in the last block of the AG and we
+ * are currently pointing to the last inode.
+ *
+ * Because we may see inodes that are from the wrong AG
+ * due to RCU freeing and reallocation, only update the
+ * index if it lies in this AG. It was a race that lead
+ * us to see this inode, so another lookup from the
+ * same index will not find it again.
+ */
+ if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
+ continue;
+ first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
+ if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
+ done = true;
+ }
+
+ /* unlock now we've grabbed the inodes. */
+ rcu_read_unlock();
+
+ for (i = 0; i < nr_found; i++) {
+ if (!batch[i])
+ continue;
+ error = xfs_icwalk_process_inode(goal, batch[i], pag,
+ icw);
+ if (error == -EAGAIN) {
+ skipped++;
+ continue;
+ }
+ if (error && last_error != -EFSCORRUPTED)
+ last_error = error;
+ }
+
+ /* bail out if the filesystem is corrupted. */
+ if (error == -EFSCORRUPTED)
+ break;
+
+ cond_resched();
+
+ if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
+ icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
+ if (icw->icw_scan_limit <= 0)
+ break;
+ }
+ } while (nr_found && !done);
+
+ if (goal == XFS_ICWALK_RECLAIM) {
+ if (done)
+ first_index = 0;
+ WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
+ }
+
+ if (skipped) {
+ delay(1);
+ goto restart;
+ }
+ return last_error;
+}
+
+/* Fetch the next (possibly tagged) per-AG structure. */
+static inline struct xfs_perag *
+xfs_icwalk_get_perag(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ enum xfs_icwalk_goal goal)
+{
+ unsigned int tag = xfs_icwalk_tag(goal);
+
+ if (tag == XFS_ICWALK_NULL_TAG)
+ return xfs_perag_get(mp, agno);
+ return xfs_perag_get_tag(mp, agno, tag);
+}
+
+/* Walk all incore inodes to achieve a given goal. */
+static int
+xfs_icwalk(
+ struct xfs_mount *mp,
+ enum xfs_icwalk_goal goal,
+ struct xfs_icwalk *icw)
+{
+ struct xfs_perag *pag;
+ int error = 0;
+ int last_error = 0;
+ xfs_agnumber_t agno = 0;
+
+ while ((pag = xfs_icwalk_get_perag(mp, agno, goal))) {
+ agno = pag->pag_agno + 1;
+ error = xfs_icwalk_ag(pag, goal, icw);
+ xfs_perag_put(pag);
+ if (error) {
+ last_error = error;
+ if (error == -EFSCORRUPTED)
+ break;
+ }
+ }
+ return last_error;
+ BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
}
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index d1fddb152420..c751cc32dc46 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -9,22 +9,27 @@
struct xfs_mount;
struct xfs_perag;
-struct xfs_eofblocks {
- __u32 eof_flags;
- kuid_t eof_uid;
- kgid_t eof_gid;
- prid_t eof_prid;
- __u64 eof_min_file_size;
+struct xfs_icwalk {
+ __u32 icw_flags;
+ kuid_t icw_uid;
+ kgid_t icw_gid;
+ prid_t icw_prid;
+ __u64 icw_min_file_size;
+ long icw_scan_limit;
};
-/*
- * tags for inode radix tree
- */
-#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
- in xfs_inode_walk */
-#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
-/* Inode has speculative preallocations (posteof or cow) to clean. */
-#define XFS_ICI_BLOCKGC_TAG 1
+/* Flags that reflect xfs_fs_eofblocks functionality. */
+#define XFS_ICWALK_FLAG_SYNC (1U << 0) /* sync/wait mode scan */
+#define XFS_ICWALK_FLAG_UID (1U << 1) /* filter by uid */
+#define XFS_ICWALK_FLAG_GID (1U << 2) /* filter by gid */
+#define XFS_ICWALK_FLAG_PRID (1U << 3) /* filter by project id */
+#define XFS_ICWALK_FLAG_MINFILESIZE (1U << 4) /* filter by min file size */
+
+#define XFS_ICWALK_FLAGS_VALID (XFS_ICWALK_FLAG_SYNC | \
+ XFS_ICWALK_FLAG_UID | \
+ XFS_ICWALK_FLAG_GID | \
+ XFS_ICWALK_FLAG_PRID | \
+ XFS_ICWALK_FLAG_MINFILESIZE)
/*
* Flags for xfs_iget()
@@ -34,11 +39,6 @@ struct xfs_eofblocks {
#define XFS_IGET_DONTCACHE 0x4
#define XFS_IGET_INCORE 0x8 /* don't read from disk or reinit */
-/*
- * flags for AG inode iterator
- */
-#define XFS_INODE_WALK_INEW_WAIT 0x1 /* wait on new inodes */
-
int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
uint flags, uint lock_flags, xfs_inode_t **ipp);
@@ -49,16 +49,16 @@ void xfs_inode_free(struct xfs_inode *ip);
void xfs_reclaim_worker(struct work_struct *work);
void xfs_reclaim_inodes(struct xfs_mount *mp);
-int xfs_reclaim_inodes_count(struct xfs_mount *mp);
-long xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
+long xfs_reclaim_inodes_count(struct xfs_mount *mp);
+long xfs_reclaim_inodes_nr(struct xfs_mount *mp, unsigned long nr_to_scan);
-void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
+void xfs_inode_mark_reclaimable(struct xfs_inode *ip);
int xfs_blockgc_free_dquots(struct xfs_mount *mp, struct xfs_dquot *udqp,
struct xfs_dquot *gdqp, struct xfs_dquot *pdqp,
- unsigned int eof_flags);
-int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int eof_flags);
-int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_eofblocks *eofb);
+ unsigned int iwalk_flags);
+int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int iwalk_flags);
+int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_icwalk *icm);
void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip);
void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip);
@@ -68,9 +68,11 @@ void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip);
void xfs_blockgc_worker(struct work_struct *work);
-int xfs_inode_walk(struct xfs_mount *mp, int iter_flags,
- int (*execute)(struct xfs_inode *ip, void *args),
- void *args, int tag);
+#ifdef CONFIG_XFS_QUOTA
+int xfs_dqrele_all_inodes(struct xfs_mount *mp, unsigned int qflags);
+#else
+# define xfs_dqrele_all_inodes(mp, qflags) (0)
+#endif
int xfs_icache_inode_is_allocated(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_ino_t ino, bool *inuse);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 1db99a909b23..990b72ae3635 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -11,7 +11,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_inode.h"
@@ -35,6 +34,7 @@
#include "xfs_log.h"
#include "xfs_bmap_btree.h"
#include "xfs_reflink.h"
+#include "xfs_ag.h"
kmem_zone_t *xfs_inode_zone;
@@ -45,7 +45,8 @@ kmem_zone_t *xfs_inode_zone;
#define XFS_ITRUNC_MAX_EXTENTS 2
STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
-STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
+STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
+ struct xfs_inode *);
/*
* helper function to extract extent size hint from inode
@@ -778,7 +779,7 @@ xfs_inode_inherit_flags2(
* Initialise a newly allocated inode and return the in-core inode to the
* caller locked exclusively.
*/
-static int
+int
xfs_init_new_inode(
struct user_namespace *mnt_userns,
struct xfs_trans *tp,
@@ -915,57 +916,6 @@ xfs_init_new_inode(
}
/*
- * Allocates a new inode from disk and return a pointer to the incore copy. This
- * routine will internally commit the current transaction and allocate a new one
- * if we needed to allocate more on-disk free inodes to perform the requested
- * operation.
- *
- * If we are allocating quota inodes, we do not have a parent inode to attach to
- * or associate with (i.e. dp == NULL) because they are not linked into the
- * directory structure - they are attached directly to the superblock - and so
- * have no parent.
- */
-int
-xfs_dir_ialloc(
- struct user_namespace *mnt_userns,
- struct xfs_trans **tpp,
- struct xfs_inode *dp,
- umode_t mode,
- xfs_nlink_t nlink,
- dev_t rdev,
- prid_t prid,
- bool init_xattrs,
- struct xfs_inode **ipp)
-{
- struct xfs_buf *agibp;
- xfs_ino_t parent_ino = dp ? dp->i_ino : 0;
- xfs_ino_t ino;
- int error;
-
- ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
-
- /*
- * Call the space management code to pick the on-disk inode to be
- * allocated.
- */
- error = xfs_dialloc_select_ag(tpp, parent_ino, mode, &agibp);
- if (error)
- return error;
-
- if (!agibp)
- return -ENOSPC;
-
- /* Allocate an inode from the selected AG */
- error = xfs_dialloc_ag(*tpp, agibp, parent_ino, &ino);
- if (error)
- return error;
- ASSERT(ino != NULLFSINO);
-
- return xfs_init_new_inode(mnt_userns, *tpp, dp, ino, mode, nlink, rdev,
- prid, init_xattrs, ipp);
-}
-
-/*
* Decrement the link count on an inode & log the change. If this causes the
* link count to go to zero, move the inode to AGI unlinked list so that it can
* be freed when the last active reference goes away via xfs_inactive().
@@ -1022,6 +972,7 @@ xfs_create(
struct xfs_dquot *pdqp = NULL;
struct xfs_trans_res *tres;
uint resblks;
+ xfs_ino_t ino;
trace_xfs_create(dp, name);
@@ -1078,14 +1029,16 @@ xfs_create(
* entry pointing to them, but a directory also the "." entry
* pointing to itself.
*/
- error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, is_dir ? 2 : 1, rdev,
- prid, init_xattrs, &ip);
+ error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
+ if (!error)
+ error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
+ is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
if (error)
goto out_trans_cancel;
/*
* Now we join the directory inode to the transaction. We do not do it
- * earlier because xfs_dir_ialloc might commit the previous transaction
+ * earlier because xfs_dialloc might commit the previous transaction
* (and release all the locks). An error from here on will result in
* the transaction cancel unlocking dp so don't do it explicitly in the
* error path.
@@ -1175,6 +1128,7 @@ xfs_create_tmpfile(
struct xfs_dquot *pdqp = NULL;
struct xfs_trans_res *tres;
uint resblks;
+ xfs_ino_t ino;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
@@ -1199,8 +1153,10 @@ xfs_create_tmpfile(
if (error)
goto out_release_dquots;
- error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, 0, 0, prid,
- false, &ip);
+ error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
+ if (!error)
+ error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
+ 0, 0, prid, false, &ip);
if (error)
goto out_trans_cancel;
@@ -1315,7 +1271,11 @@ xfs_link(
* Handle initial link state of O_TMPFILE inode
*/
if (VFS_I(sip)->i_nlink == 0) {
- error = xfs_iunlink_remove(tp, sip);
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
+ error = xfs_iunlink_remove(tp, pag, sip);
+ xfs_perag_put(pag);
if (error)
goto error_return;
}
@@ -1716,7 +1676,7 @@ xfs_inactive(
*/
if (VFS_I(ip)->i_mode == 0) {
ASSERT(ip->i_df.if_broot_bytes == 0);
- return;
+ goto out;
}
mp = ip->i_mount;
@@ -1724,11 +1684,11 @@ xfs_inactive(
/* If this is a read-only mount, don't do this (would generate I/O) */
if (mp->m_flags & XFS_MOUNT_RDONLY)
- return;
+ goto out;
/* Metadata inodes require explicit resource cleanup. */
if (xfs_is_metadata_inode(ip))
- return;
+ goto out;
/* Try to clean out the cow blocks if there are any. */
if (xfs_inode_has_cow_data(ip))
@@ -1747,7 +1707,7 @@ xfs_inactive(
if (xfs_can_free_eofblocks(ip, true))
xfs_free_eofblocks(ip);
- return;
+ goto out;
}
if (S_ISREG(VFS_I(ip)->i_mode) &&
@@ -1757,14 +1717,14 @@ xfs_inactive(
error = xfs_qm_dqattach(ip);
if (error)
- return;
+ goto out;
if (S_ISLNK(VFS_I(ip)->i_mode))
error = xfs_inactive_symlink(ip);
else if (truncate)
error = xfs_inactive_truncate(ip);
if (error)
- return;
+ goto out;
/*
* If there are attributes associated with the file then blow them away
@@ -1774,7 +1734,7 @@ xfs_inactive(
if (XFS_IFORK_Q(ip)) {
error = xfs_attr_inactive(ip);
if (error)
- return;
+ goto out;
}
ASSERT(!ip->i_afp);
@@ -1783,12 +1743,12 @@ xfs_inactive(
/*
* Free the inode.
*/
- error = xfs_inactive_ifree(ip);
- if (error)
- return;
+ xfs_inactive_ifree(ip);
+out:
/*
- * Release the dquots held by inode, if any.
+ * We're done making metadata updates for this inode, so we can release
+ * the attached dquots.
*/
xfs_qm_dqdetach(ip);
}
@@ -2008,7 +1968,7 @@ xfs_iunlink_destroy(
STATIC int
xfs_iunlink_update_bucket(
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
struct xfs_buf *agibp,
unsigned int bucket_index,
xfs_agino_t new_agino)
@@ -2017,10 +1977,10 @@ xfs_iunlink_update_bucket(
xfs_agino_t old_value;
int offset;
- ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
+ ASSERT(xfs_verify_agino_or_null(tp->t_mountp, pag->pag_agno, new_agino));
old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
- trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
+ trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
old_value, new_agino);
/*
@@ -2044,7 +2004,7 @@ xfs_iunlink_update_bucket(
STATIC void
xfs_iunlink_update_dinode(
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agino_t agino,
struct xfs_buf *ibp,
struct xfs_dinode *dip,
@@ -2054,9 +2014,9 @@ xfs_iunlink_update_dinode(
struct xfs_mount *mp = tp->t_mountp;
int offset;
- ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
+ ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
- trace_xfs_iunlink_update_dinode(mp, agno, agino,
+ trace_xfs_iunlink_update_dinode(mp, pag->pag_agno, agino,
be32_to_cpu(dip->di_next_unlinked), next_agino);
dip->di_next_unlinked = cpu_to_be32(next_agino);
@@ -2074,7 +2034,7 @@ STATIC int
xfs_iunlink_update_inode(
struct xfs_trans *tp,
struct xfs_inode *ip,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agino_t next_agino,
xfs_agino_t *old_next_agino)
{
@@ -2084,7 +2044,7 @@ xfs_iunlink_update_inode(
xfs_agino_t old_value;
int error;
- ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
+ ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
if (error)
@@ -2093,7 +2053,7 @@ xfs_iunlink_update_inode(
/* Make sure the old pointer isn't garbage. */
old_value = be32_to_cpu(dip->di_next_unlinked);
- if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
+ if (!xfs_verify_agino_or_null(mp, pag->pag_agno, old_value)) {
xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
@@ -2116,7 +2076,7 @@ xfs_iunlink_update_inode(
}
/* Ok, update the new pointer. */
- xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ xfs_iunlink_update_dinode(tp, pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
ibp, dip, &ip->i_imap, next_agino);
return 0;
out:
@@ -2137,10 +2097,10 @@ xfs_iunlink(
struct xfs_inode *ip)
{
struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
struct xfs_agi *agi;
struct xfs_buf *agibp;
xfs_agino_t next_agino;
- xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
int error;
@@ -2149,10 +2109,12 @@ xfs_iunlink(
ASSERT(VFS_I(ip)->i_mode != 0);
trace_xfs_iunlink(ip);
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
+
/* Get the agi buffer first. It ensures lock ordering on the list. */
- error = xfs_read_agi(mp, tp, agno, &agibp);
+ error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
if (error)
- return error;
+ goto out;
agi = agibp->b_addr;
/*
@@ -2162,9 +2124,10 @@ xfs_iunlink(
*/
next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
if (next_agino == agino ||
- !xfs_verify_agino_or_null(mp, agno, next_agino)) {
+ !xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)) {
xfs_buf_mark_corrupt(agibp);
- return -EFSCORRUPTED;
+ error = -EFSCORRUPTED;
+ goto out;
}
if (next_agino != NULLAGINO) {
@@ -2174,23 +2137,26 @@ xfs_iunlink(
* There is already another inode in the bucket, so point this
* inode to the current head of the list.
*/
- error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
+ error = xfs_iunlink_update_inode(tp, ip, pag, next_agino,
&old_agino);
if (error)
- return error;
+ goto out;
ASSERT(old_agino == NULLAGINO);
/*
* agino has been unlinked, add a backref from the next inode
* back to agino.
*/
- error = xfs_iunlink_add_backref(agibp->b_pag, agino, next_agino);
+ error = xfs_iunlink_add_backref(pag, agino, next_agino);
if (error)
- return error;
+ goto out;
}
/* Point the head of the list to point to this inode. */
- return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
+ error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
+out:
+ xfs_perag_put(pag);
+ return error;
}
/* Return the imap, dinode pointer, and buffer for an inode. */
@@ -2238,14 +2204,13 @@ xfs_iunlink_map_ino(
STATIC int
xfs_iunlink_map_prev(
struct xfs_trans *tp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
xfs_agino_t head_agino,
xfs_agino_t target_agino,
xfs_agino_t *agino,
struct xfs_imap *imap,
struct xfs_dinode **dipp,
- struct xfs_buf **bpp,
- struct xfs_perag *pag)
+ struct xfs_buf **bpp)
{
struct xfs_mount *mp = tp->t_mountp;
xfs_agino_t next_agino;
@@ -2257,7 +2222,8 @@ xfs_iunlink_map_prev(
/* See if our backref cache can find it faster. */
*agino = xfs_iunlink_lookup_backref(pag, target_agino);
if (*agino != NULLAGINO) {
- error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
+ error = xfs_iunlink_map_ino(tp, pag->pag_agno, *agino, imap,
+ dipp, bpp);
if (error)
return error;
@@ -2273,7 +2239,7 @@ xfs_iunlink_map_prev(
WARN_ON_ONCE(1);
}
- trace_xfs_iunlink_map_prev_fallback(mp, agno);
+ trace_xfs_iunlink_map_prev_fallback(mp, pag->pag_agno);
/* Otherwise, walk the entire bucket until we find it. */
next_agino = head_agino;
@@ -2284,8 +2250,8 @@ xfs_iunlink_map_prev(
xfs_trans_brelse(tp, *bpp);
*agino = next_agino;
- error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
- bpp);
+ error = xfs_iunlink_map_ino(tp, pag->pag_agno, next_agino, imap,
+ dipp, bpp);
if (error)
return error;
@@ -2294,7 +2260,7 @@ xfs_iunlink_map_prev(
* Make sure this pointer is valid and isn't an obvious
* infinite loop.
*/
- if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
+ if (!xfs_verify_agino(mp, pag->pag_agno, unlinked_agino) ||
next_agino == unlinked_agino) {
XFS_CORRUPTION_ERROR(__func__,
XFS_ERRLEVEL_LOW, mp,
@@ -2314,6 +2280,7 @@ xfs_iunlink_map_prev(
STATIC int
xfs_iunlink_remove(
struct xfs_trans *tp,
+ struct xfs_perag *pag,
struct xfs_inode *ip)
{
struct xfs_mount *mp = tp->t_mountp;
@@ -2321,7 +2288,6 @@ xfs_iunlink_remove(
struct xfs_buf *agibp;
struct xfs_buf *last_ibp;
struct xfs_dinode *last_dip = NULL;
- xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
xfs_agino_t next_agino;
xfs_agino_t head_agino;
@@ -2331,7 +2297,7 @@ xfs_iunlink_remove(
trace_xfs_iunlink_remove(ip);
/* Get the agi buffer first. It ensures lock ordering on the list. */
- error = xfs_read_agi(mp, tp, agno, &agibp);
+ error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
if (error)
return error;
agi = agibp->b_addr;
@@ -2341,7 +2307,7 @@ xfs_iunlink_remove(
* go on. Make sure the head pointer isn't garbage.
*/
head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
- if (!xfs_verify_agino(mp, agno, head_agino)) {
+ if (!xfs_verify_agino(mp, pag->pag_agno, head_agino)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
agi, sizeof(*agi));
return -EFSCORRUPTED;
@@ -2352,7 +2318,7 @@ xfs_iunlink_remove(
* the old pointer value so that we can update whatever was previous
* to us in the list to point to whatever was next in the list.
*/
- error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
+ error = xfs_iunlink_update_inode(tp, ip, pag, NULLAGINO, &next_agino);
if (error)
return error;
@@ -2364,8 +2330,7 @@ xfs_iunlink_remove(
* this inode's backref to point from the next inode.
*/
if (next_agino != NULLAGINO) {
- error = xfs_iunlink_change_backref(agibp->b_pag, next_agino,
- NULLAGINO);
+ error = xfs_iunlink_change_backref(pag, next_agino, NULLAGINO);
if (error)
return error;
}
@@ -2375,14 +2340,13 @@ xfs_iunlink_remove(
xfs_agino_t prev_agino;
/* We need to search the list for the inode being freed. */
- error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
- &prev_agino, &imap, &last_dip, &last_ibp,
- agibp->b_pag);
+ error = xfs_iunlink_map_prev(tp, pag, head_agino, agino,
+ &prev_agino, &imap, &last_dip, &last_ibp);
if (error)
return error;
/* Point the previous inode on the list to the next inode. */
- xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
+ xfs_iunlink_update_dinode(tp, pag, prev_agino, last_ibp,
last_dip, &imap, next_agino);
/*
@@ -2398,7 +2362,7 @@ xfs_iunlink_remove(
}
/* Point the head of the list to the next unlinked inode. */
- return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
+ return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
next_agino);
}
@@ -2409,12 +2373,11 @@ xfs_iunlink_remove(
*/
static void
xfs_ifree_mark_inode_stale(
- struct xfs_buf *bp,
+ struct xfs_perag *pag,
struct xfs_inode *free_ip,
xfs_ino_t inum)
{
- struct xfs_mount *mp = bp->b_mount;
- struct xfs_perag *pag = bp->b_pag;
+ struct xfs_mount *mp = pag->pag_mount;
struct xfs_inode_log_item *iip;
struct xfs_inode *ip;
@@ -2504,10 +2467,11 @@ out_iflags_unlock:
* inodes that are in memory - they all must be marked stale and attached to
* the cluster buffer.
*/
-STATIC int
+static int
xfs_ifree_cluster(
- struct xfs_inode *free_ip,
struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ struct xfs_inode *free_ip,
struct xfs_icluster *xic)
{
struct xfs_mount *mp = free_ip->i_mount;
@@ -2569,7 +2533,7 @@ xfs_ifree_cluster(
* already marked XFS_ISTALE.
*/
for (i = 0; i < igeo->inodes_per_cluster; i++)
- xfs_ifree_mark_inode_stale(bp, free_ip, inum + i);
+ xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
xfs_trans_stale_inode_buf(tp, bp);
xfs_trans_binval(tp, bp);
@@ -2592,9 +2556,11 @@ xfs_ifree(
struct xfs_trans *tp,
struct xfs_inode *ip)
{
- int error;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_perag *pag;
struct xfs_icluster xic = { 0 };
struct xfs_inode_log_item *iip = ip->i_itemp;
+ int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(VFS_I(ip)->i_nlink == 0);
@@ -2602,16 +2568,18 @@ xfs_ifree(
ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
ASSERT(ip->i_nblocks == 0);
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
+
/*
* Pull the on-disk inode from the AGI unlinked list.
*/
- error = xfs_iunlink_remove(tp, ip);
+ error = xfs_iunlink_remove(tp, pag, ip);
if (error)
- return error;
+ goto out;
- error = xfs_difree(tp, ip->i_ino, &xic);
+ error = xfs_difree(tp, pag, ip->i_ino, &xic);
if (error)
- return error;
+ goto out;
/*
* Free any local-format data sitting around before we reset the
@@ -2626,7 +2594,7 @@ xfs_ifree(
VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
ip->i_diflags = 0;
- ip->i_diflags2 = ip->i_mount->m_ino_geo.new_diflags2;
+ ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
ip->i_forkoff = 0; /* mark the attr fork not in use */
ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
@@ -2645,8 +2613,9 @@ xfs_ifree(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
if (xic.deleted)
- error = xfs_ifree_cluster(ip, tp, &xic);
-
+ error = xfs_ifree_cluster(tp, pag, ip, &xic);
+out:
+ xfs_perag_put(pag);
return error;
}
@@ -2664,7 +2633,7 @@ xfs_iunpin(
trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
/* Give the log a push to start the unpinning I/O */
- xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
+ xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
}
@@ -2794,6 +2763,19 @@ xfs_remove(
error = xfs_droplink(tp, ip);
if (error)
goto out_trans_cancel;
+
+ /*
+ * Point the unlinked child directory's ".." entry to the root
+ * directory to eliminate back-references to inodes that may
+ * get freed before the child directory is closed. If the fs
+ * gets shrunk, this can lead to dirent inode validation errors.
+ */
+ if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
+ error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
+ tp->t_mountp->m_sb.sb_rootino, 0);
+ if (error)
+ return error;
+ }
} else {
/*
* When removing a non-directory we need to log the parent
@@ -3250,8 +3232,13 @@ xfs_rename(
* in future.
*/
if (wip) {
+ struct xfs_perag *pag;
+
ASSERT(VFS_I(wip)->i_nlink == 0);
- error = xfs_iunlink_remove(tp, wip);
+
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
+ error = xfs_iunlink_remove(tp, pag, wip);
+ xfs_perag_put(pag);
if (error)
goto out_trans_cancel;
@@ -3673,16 +3660,16 @@ int
xfs_log_force_inode(
struct xfs_inode *ip)
{
- xfs_lsn_t lsn = 0;
+ xfs_csn_t seq = 0;
xfs_ilock(ip, XFS_ILOCK_SHARED);
if (xfs_ipincount(ip))
- lsn = ip->i_itemp->ili_last_lsn;
+ seq = ip->i_itemp->ili_commit_seq;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- if (!lsn)
+ if (!seq)
return 0;
- return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL);
+ return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
}
/*
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index ca826cfba91c..4b6703dbffb8 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -431,11 +431,10 @@ void xfs_lock_two_inodes(struct xfs_inode *ip0, uint ip0_mode,
xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
-int xfs_dir_ialloc(struct user_namespace *mnt_userns,
- struct xfs_trans **tpp, struct xfs_inode *dp,
- umode_t mode, xfs_nlink_t nlink, dev_t dev,
- prid_t prid, bool need_xattr,
- struct xfs_inode **ipp);
+int xfs_init_new_inode(struct user_namespace *mnt_userns, struct xfs_trans *tp,
+ struct xfs_inode *pip, xfs_ino_t ino, umode_t mode,
+ xfs_nlink_t nlink, dev_t rdev, prid_t prid, bool init_xattrs,
+ struct xfs_inode **ipp);
static inline int
xfs_itruncate_extents(
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 6764d12342da..35de30849fcc 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -28,6 +28,20 @@ static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
return container_of(lip, struct xfs_inode_log_item, ili_item);
}
+/*
+ * The logged size of an inode fork is always the current size of the inode
+ * fork. This means that when an inode fork is relogged, the size of the logged
+ * region is determined by the current state, not the combination of the
+ * previously logged state + the current state. This is different relogging
+ * behaviour to most other log items which will retain the size of the
+ * previously logged changes when smaller regions are relogged.
+ *
+ * Hence operations that remove data from the inode fork (e.g. shortform
+ * dir/attr remove, extent form extent removal, etc), the size of the relogged
+ * inode gets -smaller- rather than stays the same size as the previously logged
+ * size and this can result in the committing transaction reducing the amount of
+ * space being consumed by the CIL.
+ */
STATIC void
xfs_inode_item_data_fork_size(
struct xfs_inode_log_item *iip,
@@ -629,9 +643,9 @@ xfs_inode_item_committed(
STATIC void
xfs_inode_item_committing(
struct xfs_log_item *lip,
- xfs_lsn_t commit_lsn)
+ xfs_csn_t seq)
{
- INODE_ITEM(lip)->ili_last_lsn = commit_lsn;
+ INODE_ITEM(lip)->ili_commit_seq = seq;
return xfs_inode_item_release(lip);
}
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 4b926e32831c..403b45ab9aa2 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -33,7 +33,7 @@ struct xfs_inode_log_item {
unsigned int ili_fields; /* fields to be logged */
unsigned int ili_fsync_fields; /* logged since last fsync */
xfs_lsn_t ili_flush_lsn; /* lsn at last flush */
- xfs_lsn_t ili_last_lsn; /* lsn at last transaction */
+ xfs_csn_t ili_commit_seq; /* last transaction commit */
};
static inline int xfs_inode_clean(struct xfs_inode *ip)
diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c
index 7b79518b6c20..e0072a6cd2d3 100644
--- a/fs/xfs/xfs_inode_item_recover.c
+++ b/fs/xfs/xfs_inode_item_recover.c
@@ -145,7 +145,8 @@ xfs_log_dinode_to_disk_ts(
STATIC void
xfs_log_dinode_to_disk(
struct xfs_log_dinode *from,
- struct xfs_dinode *to)
+ struct xfs_dinode *to,
+ xfs_lsn_t lsn)
{
to->di_magic = cpu_to_be16(from->di_magic);
to->di_mode = cpu_to_be16(from->di_mode);
@@ -182,7 +183,7 @@ xfs_log_dinode_to_disk(
to->di_flags2 = cpu_to_be64(from->di_flags2);
to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
to->di_ino = cpu_to_be64(from->di_ino);
- to->di_lsn = cpu_to_be64(from->di_lsn);
+ to->di_lsn = cpu_to_be64(lsn);
memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
uuid_copy(&to->di_uuid, &from->di_uuid);
to->di_flushiter = 0;
@@ -261,16 +262,25 @@ xlog_recover_inode_commit_pass2(
}
/*
- * If the inode has an LSN in it, recover the inode only if it's less
- * than the lsn of the transaction we are replaying. Note: we still
- * need to replay an owner change even though the inode is more recent
- * than the transaction as there is no guarantee that all the btree
- * blocks are more recent than this transaction, too.
+ * If the inode has an LSN in it, recover the inode only if the on-disk
+ * inode's LSN is older than the lsn of the transaction we are
+ * replaying. We can have multiple checkpoints with the same start LSN,
+ * so the current LSN being equal to the on-disk LSN doesn't necessarily
+ * mean that the on-disk inode is more recent than the change being
+ * replayed.
+ *
+ * We must check the current_lsn against the on-disk inode
+ * here because the we can't trust the log dinode to contain a valid LSN
+ * (see comment below before replaying the log dinode for details).
+ *
+ * Note: we still need to replay an owner change even though the inode
+ * is more recent than the transaction as there is no guarantee that all
+ * the btree blocks are more recent than this transaction, too.
*/
if (dip->di_version >= 3) {
xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
- if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
+ if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) > 0) {
trace_xfs_log_recover_inode_skip(log, in_f);
error = 0;
goto out_owner_change;
@@ -368,8 +378,17 @@ xlog_recover_inode_commit_pass2(
goto out_release;
}
- /* recover the log dinode inode into the on disk inode */
- xfs_log_dinode_to_disk(ldip, dip);
+ /*
+ * Recover the log dinode inode into the on disk inode.
+ *
+ * The LSN in the log dinode is garbage - it can be zero or reflect
+ * stale in-memory runtime state that isn't coherent with the changes
+ * logged in this transaction or the changes written to the on-disk
+ * inode. Hence we write the current lSN into the inode because that
+ * matches what xfs_iflush() would write inode the inode when flushing
+ * the changes in this transaction.
+ */
+ xfs_log_dinode_to_disk(ldip, dip, current_lsn);
fields = in_f->ilf_fields;
if (fields & XFS_ILOG_DEV)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 7b1796cede10..16039ea10ac9 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1065,7 +1065,24 @@ xfs_fill_fsxattr(
fileattr_fill_xflags(fa, xfs_ip2xflags(ip));
- fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
+ if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) {
+ fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
+ } else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
+ /*
+ * Don't let a misaligned extent size hint on a directory
+ * escape to userspace if it won't pass the setattr checks
+ * later.
+ */
+ if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+ ip->i_extsize % mp->m_sb.sb_rextsize > 0) {
+ fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE |
+ FS_XFLAG_EXTSZINHERIT);
+ fa->fsx_extsize = 0;
+ } else {
+ fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
+ }
+ }
+
if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize);
fa->fsx_projid = ip->i_projid;
@@ -1292,10 +1309,10 @@ xfs_ioctl_setattr_check_extsize(
new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
/*
- * Inode verifiers on older kernels don't check that the extent size
- * hint is an integer multiple of the rt extent size on a directory
- * with both rtinherit and extszinherit flags set. Don't let sysadmins
- * misconfigure directories.
+ * Inode verifiers do not check that the extent size hint is an integer
+ * multiple of the rt extent size on a directory with both rtinherit
+ * and extszinherit flags set. Don't let sysadmins misconfigure
+ * directories.
*/
if ((new_diflags & XFS_DIFLAG_RTINHERIT) &&
(new_diflags & XFS_DIFLAG_EXTSZINHERIT)) {
@@ -1875,7 +1892,7 @@ out:
static inline int
xfs_fs_eofblocks_from_user(
struct xfs_fs_eofblocks *src,
- struct xfs_eofblocks *dst)
+ struct xfs_icwalk *dst)
{
if (src->eof_version != XFS_EOFBLOCKS_VERSION)
return -EINVAL;
@@ -1887,21 +1904,32 @@ xfs_fs_eofblocks_from_user(
memchr_inv(src->pad64, 0, sizeof(src->pad64)))
return -EINVAL;
- dst->eof_flags = src->eof_flags;
- dst->eof_prid = src->eof_prid;
- dst->eof_min_file_size = src->eof_min_file_size;
-
- dst->eof_uid = INVALID_UID;
+ dst->icw_flags = 0;
+ if (src->eof_flags & XFS_EOF_FLAGS_SYNC)
+ dst->icw_flags |= XFS_ICWALK_FLAG_SYNC;
+ if (src->eof_flags & XFS_EOF_FLAGS_UID)
+ dst->icw_flags |= XFS_ICWALK_FLAG_UID;
+ if (src->eof_flags & XFS_EOF_FLAGS_GID)
+ dst->icw_flags |= XFS_ICWALK_FLAG_GID;
+ if (src->eof_flags & XFS_EOF_FLAGS_PRID)
+ dst->icw_flags |= XFS_ICWALK_FLAG_PRID;
+ if (src->eof_flags & XFS_EOF_FLAGS_MINFILESIZE)
+ dst->icw_flags |= XFS_ICWALK_FLAG_MINFILESIZE;
+
+ dst->icw_prid = src->eof_prid;
+ dst->icw_min_file_size = src->eof_min_file_size;
+
+ dst->icw_uid = INVALID_UID;
if (src->eof_flags & XFS_EOF_FLAGS_UID) {
- dst->eof_uid = make_kuid(current_user_ns(), src->eof_uid);
- if (!uid_valid(dst->eof_uid))
+ dst->icw_uid = make_kuid(current_user_ns(), src->eof_uid);
+ if (!uid_valid(dst->icw_uid))
return -EINVAL;
}
- dst->eof_gid = INVALID_GID;
+ dst->icw_gid = INVALID_GID;
if (src->eof_flags & XFS_EOF_FLAGS_GID) {
- dst->eof_gid = make_kgid(current_user_ns(), src->eof_gid);
- if (!gid_valid(dst->eof_gid))
+ dst->icw_gid = make_kgid(current_user_ns(), src->eof_gid);
+ if (!gid_valid(dst->icw_gid))
return -EINVAL;
}
return 0;
@@ -2164,8 +2192,8 @@ xfs_file_ioctl(
return xfs_errortag_clearall(mp);
case XFS_IOC_FREE_EOFBLOCKS: {
- struct xfs_fs_eofblocks eofb;
- struct xfs_eofblocks keofb;
+ struct xfs_fs_eofblocks eofb;
+ struct xfs_icwalk icw;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -2176,14 +2204,14 @@ xfs_file_ioctl(
if (copy_from_user(&eofb, arg, sizeof(eofb)))
return -EFAULT;
- error = xfs_fs_eofblocks_from_user(&eofb, &keofb);
+ error = xfs_fs_eofblocks_from_user(&eofb, &icw);
if (error)
return error;
- trace_xfs_ioc_free_eofblocks(mp, &keofb, _RET_IP_);
+ trace_xfs_ioc_free_eofblocks(mp, &icw, _RET_IP_);
sb_start_write(mp->m_super);
- error = xfs_blockgc_free_space(mp, &keofb);
+ error = xfs_blockgc_free_space(mp, &icw);
sb_end_write(mp->m_super);
return error;
}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index dfe24b7f26e5..93c082db04b7 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -543,7 +543,7 @@ xfs_stat_blksize(
* always return the realtime extent size.
*/
if (XFS_IS_REALTIME_INODE(ip))
- return xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog;
+ return XFS_FSB_TO_B(mp, xfs_get_extsz_hint(ip));
/*
* Allow large block sizes to be reported to userspace programs if the
@@ -560,7 +560,7 @@ xfs_stat_blksize(
*/
if (mp->m_flags & XFS_MOUNT_LARGEIO) {
if (mp->m_swidth)
- return mp->m_swidth << mp->m_sb.sb_blocklog;
+ return XFS_FSB_TO_B(mp, mp->m_swidth);
if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
return 1U << mp->m_allocsize_log;
}
diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
index c4a340f1f1e1..917d51eefee3 100644
--- a/fs/xfs/xfs_iwalk.c
+++ b/fs/xfs/xfs_iwalk.c
@@ -21,6 +21,7 @@
#include "xfs_health.h"
#include "xfs_trans.h"
#include "xfs_pwork.h"
+#include "xfs_ag.h"
/*
* Walking Inodes in the Filesystem
@@ -51,6 +52,7 @@ struct xfs_iwalk_ag {
struct xfs_mount *mp;
struct xfs_trans *tp;
+ struct xfs_perag *pag;
/* Where do we start the traversal? */
xfs_ino_t startino;
@@ -90,7 +92,7 @@ struct xfs_iwalk_ag {
STATIC void
xfs_iwalk_ichunk_ra(
struct xfs_mount *mp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
struct xfs_inobt_rec_incore *irec)
{
struct xfs_ino_geometry *igeo = M_IGEO(mp);
@@ -106,7 +108,7 @@ xfs_iwalk_ichunk_ra(
imask = xfs_inobt_maskn(i, igeo->inodes_per_cluster);
if (imask & ~irec->ir_free) {
- xfs_btree_reada_bufs(mp, agno, agbno,
+ xfs_btree_reada_bufs(mp, pag->pag_agno, agbno,
igeo->blocks_per_cluster,
&xfs_inode_buf_ops);
}
@@ -174,26 +176,25 @@ xfs_iwalk_free(
/* For each inuse inode in each cached inobt record, call our function. */
STATIC int
xfs_iwalk_ag_recs(
- struct xfs_iwalk_ag *iwag)
+ struct xfs_iwalk_ag *iwag)
{
- struct xfs_mount *mp = iwag->mp;
- struct xfs_trans *tp = iwag->tp;
- xfs_ino_t ino;
- unsigned int i, j;
- xfs_agnumber_t agno;
- int error;
+ struct xfs_mount *mp = iwag->mp;
+ struct xfs_trans *tp = iwag->tp;
+ struct xfs_perag *pag = iwag->pag;
+ xfs_ino_t ino;
+ unsigned int i, j;
+ int error;
- agno = XFS_INO_TO_AGNO(mp, iwag->startino);
for (i = 0; i < iwag->nr_recs; i++) {
struct xfs_inobt_rec_incore *irec = &iwag->recs[i];
- trace_xfs_iwalk_ag_rec(mp, agno, irec);
+ trace_xfs_iwalk_ag_rec(mp, pag->pag_agno, irec);
if (xfs_pwork_want_abort(&iwag->pwork))
return 0;
if (iwag->inobt_walk_fn) {
- error = iwag->inobt_walk_fn(mp, tp, agno, irec,
+ error = iwag->inobt_walk_fn(mp, tp, pag->pag_agno, irec,
iwag->data);
if (error)
return error;
@@ -211,7 +212,8 @@ xfs_iwalk_ag_recs(
continue;
/* Otherwise call our function. */
- ino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino + j);
+ ino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
+ irec->ir_startino + j);
error = iwag->iwalk_fn(mp, tp, ino, iwag->data);
if (error)
return error;
@@ -257,7 +259,6 @@ xfs_iwalk_del_inobt(
STATIC int
xfs_iwalk_ag_start(
struct xfs_iwalk_ag *iwag,
- xfs_agnumber_t agno,
xfs_agino_t agino,
struct xfs_btree_cur **curpp,
struct xfs_buf **agi_bpp,
@@ -265,12 +266,13 @@ xfs_iwalk_ag_start(
{
struct xfs_mount *mp = iwag->mp;
struct xfs_trans *tp = iwag->tp;
+ struct xfs_perag *pag = iwag->pag;
struct xfs_inobt_rec_incore *irec;
int error;
/* Set up a fresh cursor and empty the inobt cache. */
iwag->nr_recs = 0;
- error = xfs_inobt_cur(mp, tp, agno, XFS_BTNUM_INO, curpp, agi_bpp);
+ error = xfs_inobt_cur(mp, tp, pag, XFS_BTNUM_INO, curpp, agi_bpp);
if (error)
return error;
@@ -304,7 +306,7 @@ xfs_iwalk_ag_start(
if (XFS_IS_CORRUPT(mp, *has_more != 1))
return -EFSCORRUPTED;
- iwag->lastino = XFS_AGINO_TO_INO(mp, agno,
+ iwag->lastino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
irec->ir_startino + XFS_INODES_PER_CHUNK - 1);
/*
@@ -345,7 +347,6 @@ out_advance:
STATIC int
xfs_iwalk_run_callbacks(
struct xfs_iwalk_ag *iwag,
- xfs_agnumber_t agno,
struct xfs_btree_cur **curpp,
struct xfs_buf **agi_bpp,
int *has_more)
@@ -376,7 +377,7 @@ xfs_iwalk_run_callbacks(
return 0;
/* ...and recreate the cursor just past where we left off. */
- error = xfs_inobt_cur(mp, tp, agno, XFS_BTNUM_INO, curpp, agi_bpp);
+ error = xfs_inobt_cur(mp, tp, iwag->pag, XFS_BTNUM_INO, curpp, agi_bpp);
if (error)
return error;
@@ -390,17 +391,17 @@ xfs_iwalk_ag(
{
struct xfs_mount *mp = iwag->mp;
struct xfs_trans *tp = iwag->tp;
+ struct xfs_perag *pag = iwag->pag;
struct xfs_buf *agi_bp = NULL;
struct xfs_btree_cur *cur = NULL;
- xfs_agnumber_t agno;
xfs_agino_t agino;
int has_more;
int error = 0;
/* Set up our cursor at the right place in the inode btree. */
- agno = XFS_INO_TO_AGNO(mp, iwag->startino);
+ ASSERT(pag->pag_agno == XFS_INO_TO_AGNO(mp, iwag->startino));
agino = XFS_INO_TO_AGINO(mp, iwag->startino);
- error = xfs_iwalk_ag_start(iwag, agno, agino, &cur, &agi_bp, &has_more);
+ error = xfs_iwalk_ag_start(iwag, agino, &cur, &agi_bp, &has_more);
while (!error && has_more) {
struct xfs_inobt_rec_incore *irec;
@@ -417,7 +418,7 @@ xfs_iwalk_ag(
break;
/* Make sure that we always move forward. */
- rec_fsino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino);
+ rec_fsino = XFS_AGINO_TO_INO(mp, pag->pag_agno, irec->ir_startino);
if (iwag->lastino != NULLFSINO &&
XFS_IS_CORRUPT(mp, iwag->lastino >= rec_fsino)) {
error = -EFSCORRUPTED;
@@ -438,7 +439,7 @@ xfs_iwalk_ag(
* walking the inodes.
*/
if (iwag->iwalk_fn)
- xfs_iwalk_ichunk_ra(mp, agno, irec);
+ xfs_iwalk_ichunk_ra(mp, pag, irec);
/*
* If there's space in the buffer for more records, increment
@@ -458,15 +459,14 @@ xfs_iwalk_ag(
* we would be if we had been able to increment like above.
*/
ASSERT(has_more);
- error = xfs_iwalk_run_callbacks(iwag, agno, &cur, &agi_bp,
- &has_more);
+ error = xfs_iwalk_run_callbacks(iwag, &cur, &agi_bp, &has_more);
}
if (iwag->nr_recs == 0 || error)
goto out;
/* Walk the unprocessed records in the cache. */
- error = xfs_iwalk_run_callbacks(iwag, agno, &cur, &agi_bp, &has_more);
+ error = xfs_iwalk_run_callbacks(iwag, &cur, &agi_bp, &has_more);
out:
xfs_iwalk_del_inobt(tp, &cur, &agi_bp, error);
@@ -555,6 +555,7 @@ xfs_iwalk(
.pwork = XFS_PWORK_SINGLE_THREADED,
.lastino = NULLFSINO,
};
+ struct xfs_perag *pag;
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
int error;
@@ -565,15 +566,19 @@ xfs_iwalk(
if (error)
return error;
- for (; agno < mp->m_sb.sb_agcount; agno++) {
+ for_each_perag_from(mp, agno, pag) {
+ iwag.pag = pag;
error = xfs_iwalk_ag(&iwag);
if (error)
break;
iwag.startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
if (flags & XFS_INOBT_WALK_SAME_AG)
break;
+ iwag.pag = NULL;
}
+ if (iwag.pag)
+ xfs_perag_put(pag);
xfs_iwalk_free(&iwag);
return error;
}
@@ -598,6 +603,7 @@ xfs_iwalk_ag_work(
error = xfs_iwalk_ag(iwag);
xfs_iwalk_free(iwag);
out:
+ xfs_perag_put(iwag->pag);
kmem_free(iwag);
return error;
}
@@ -617,6 +623,7 @@ xfs_iwalk_threaded(
void *data)
{
struct xfs_pwork_ctl pctl;
+ struct xfs_perag *pag;
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
int error;
@@ -627,7 +634,7 @@ xfs_iwalk_threaded(
if (error)
return error;
- for (; agno < mp->m_sb.sb_agcount; agno++) {
+ for_each_perag_from(mp, agno, pag) {
struct xfs_iwalk_ag *iwag;
if (xfs_pwork_ctl_want_abort(&pctl))
@@ -635,17 +642,25 @@ xfs_iwalk_threaded(
iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), 0);
iwag->mp = mp;
+
+ /*
+ * perag is being handed off to async work, so take another
+ * reference for the async work to release.
+ */
+ atomic_inc(&pag->pag_ref);
+ iwag->pag = pag;
iwag->iwalk_fn = iwalk_fn;
iwag->data = data;
iwag->startino = startino;
iwag->sz_recs = xfs_iwalk_prefetch(inode_records);
iwag->lastino = NULLFSINO;
xfs_pwork_queue(&pctl, &iwag->pwork);
- startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
+ startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0);
if (flags & XFS_INOBT_WALK_SAME_AG)
break;
}
-
+ if (pag)
+ xfs_perag_put(pag);
if (polled)
xfs_pwork_poll(&pctl);
return xfs_pwork_destroy(&pctl);
@@ -715,6 +730,7 @@ xfs_inobt_walk(
.pwork = XFS_PWORK_SINGLE_THREADED,
.lastino = NULLFSINO,
};
+ struct xfs_perag *pag;
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
int error;
@@ -725,15 +741,19 @@ xfs_inobt_walk(
if (error)
return error;
- for (; agno < mp->m_sb.sb_agcount; agno++) {
+ for_each_perag_from(mp, agno, pag) {
+ iwag.pag = pag;
error = xfs_iwalk_ag(&iwag);
if (error)
break;
- iwag.startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
+ iwag.startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0);
if (flags & XFS_INOBT_WALK_SAME_AG)
break;
+ iwag.pag = NULL;
}
+ if (iwag.pag)
+ xfs_perag_put(pag);
xfs_iwalk_free(&iwag);
return error;
}
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 7688663b9773..c174262a074e 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -196,6 +196,8 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
char *data, unsigned int op);
+void xfs_flush_bdev_async(struct bio *bio, struct block_device *bdev,
+ struct completion *done);
#define ASSERT_ALWAYS(expr) \
(likely(expr) ? (void)0 : assfail(NULL, #expr, __FILE__, __LINE__))
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a002425377b5..60ac5fd63f1e 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -78,13 +78,12 @@ xlog_verify_iclog(
STATIC void
xlog_verify_tail_lsn(
struct xlog *log,
- struct xlog_in_core *iclog,
- xfs_lsn_t tail_lsn);
+ struct xlog_in_core *iclog);
#else
#define xlog_verify_dest_ptr(a,b)
#define xlog_verify_grant_tail(a)
#define xlog_verify_iclog(a,b,c)
-#define xlog_verify_tail_lsn(a,b,c)
+#define xlog_verify_tail_lsn(a,b)
#endif
STATIC int
@@ -487,67 +486,81 @@ out_error:
return error;
}
-static bool
-__xlog_state_release_iclog(
- struct xlog *log,
- struct xlog_in_core *iclog)
-{
- lockdep_assert_held(&log->l_icloglock);
-
- if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
- /* update tail before writing to iclog */
- xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
-
- iclog->ic_state = XLOG_STATE_SYNCING;
- iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
- xlog_verify_tail_lsn(log, iclog, tail_lsn);
- /* cycle incremented when incrementing curr_block */
- return true;
- }
-
- ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
- return false;
-}
-
/*
* Flush iclog to disk if this is the last reference to the given iclog and the
* it is in the WANT_SYNC state.
+ *
+ * If the caller passes in a non-zero @old_tail_lsn and the current log tail
+ * does not match, there may be metadata on disk that must be persisted before
+ * this iclog is written. To satisfy that requirement, set the
+ * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new
+ * log tail value.
+ *
+ * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
+ * log tail is updated correctly. NEED_FUA indicates that the iclog will be
+ * written to stable storage, and implies that a commit record is contained
+ * within the iclog. We need to ensure that the log tail does not move beyond
+ * the tail that the first commit record in the iclog ordered against, otherwise
+ * correct recovery of that checkpoint becomes dependent on future operations
+ * performed on this iclog.
+ *
+ * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
+ * current tail into iclog. Once the iclog tail is set, future operations must
+ * not modify it, otherwise they potentially violate ordering constraints for
+ * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
+ * the iclog will get zeroed on activation of the iclog after sync, so we
+ * always capture the tail lsn on the iclog on the first NEED_FUA release
+ * regardless of the number of active reference counts on this iclog.
*/
-static int
+
+int
xlog_state_release_iclog(
struct xlog *log,
- struct xlog_in_core *iclog)
+ struct xlog_in_core *iclog,
+ xfs_lsn_t old_tail_lsn)
{
+ xfs_lsn_t tail_lsn;
lockdep_assert_held(&log->l_icloglock);
+ trace_xlog_iclog_release(iclog, _RET_IP_);
if (iclog->ic_state == XLOG_STATE_IOERROR)
return -EIO;
- if (atomic_dec_and_test(&iclog->ic_refcnt) &&
- __xlog_state_release_iclog(log, iclog)) {
- spin_unlock(&log->l_icloglock);
- xlog_sync(log, iclog);
- spin_lock(&log->l_icloglock);
- }
+ /*
+ * Grabbing the current log tail needs to be atomic w.r.t. the writing
+ * of the tail LSN into the iclog so we guarantee that the log tail does
+ * not move between deciding if a cache flush is required and writing
+ * the LSN into the iclog below.
+ */
+ if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+ tail_lsn = xlog_assign_tail_lsn(log->l_mp);
- return 0;
-}
+ if (old_tail_lsn && tail_lsn != old_tail_lsn)
+ iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
-void
-xfs_log_release_iclog(
- struct xlog_in_core *iclog)
-{
- struct xlog *log = iclog->ic_log;
- bool sync = false;
+ if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) &&
+ !iclog->ic_header.h_tail_lsn)
+ iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+ }
- if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) {
- if (iclog->ic_state != XLOG_STATE_IOERROR)
- sync = __xlog_state_release_iclog(log, iclog);
- spin_unlock(&log->l_icloglock);
+ if (!atomic_dec_and_test(&iclog->ic_refcnt))
+ return 0;
+
+ if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
+ ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
+ return 0;
}
- if (sync)
- xlog_sync(log, iclog);
+ iclog->ic_state = XLOG_STATE_SYNCING;
+ if (!iclog->ic_header.h_tail_lsn)
+ iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+ xlog_verify_tail_lsn(log, iclog);
+ trace_xlog_iclog_syncing(iclog, _RET_IP_);
+
+ spin_unlock(&log->l_icloglock);
+ xlog_sync(log, iclog);
+ spin_lock(&log->l_icloglock);
+ return 0;
}
/*
@@ -770,6 +783,9 @@ xfs_log_mount_finish(
if (readonly)
mp->m_flags |= XFS_MOUNT_RDONLY;
+ /* Make sure the log is dead if we're returning failure. */
+ ASSERT(!error || (mp->m_log->l_flags & XLOG_IO_ERROR));
+
return error;
}
@@ -786,16 +802,34 @@ xfs_log_mount_cancel(
}
/*
- * Wait for the iclog to be written disk, or return an error if the log has been
- * shut down.
+ * Flush out the iclog to disk ensuring that device caches are flushed and
+ * the iclog hits stable storage before any completion waiters are woken.
*/
-static int
+static inline int
+xlog_force_iclog(
+ struct xlog_in_core *iclog)
+{
+ atomic_inc(&iclog->ic_refcnt);
+ iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
+ if (iclog->ic_state == XLOG_STATE_ACTIVE)
+ xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
+ return xlog_state_release_iclog(iclog->ic_log, iclog, 0);
+}
+
+/*
+ * Wait for the iclog and all prior iclogs to be written disk as required by the
+ * log force state machine. Waiting on ic_force_wait ensures iclog completions
+ * have been ordered and callbacks run before we are woken here, hence
+ * guaranteeing that all the iclogs up to this one are on stable storage.
+ */
+int
xlog_wait_on_iclog(
struct xlog_in_core *iclog)
__releases(iclog->ic_log->l_icloglock)
{
struct xlog *log = iclog->ic_log;
+ trace_xlog_iclog_wait_on(iclog, _RET_IP_);
if (!XLOG_FORCED_SHUTDOWN(log) &&
iclog->ic_state != XLOG_STATE_ACTIVE &&
iclog->ic_state != XLOG_STATE_DIRTY) {
@@ -818,9 +852,7 @@ xlog_wait_on_iclog(
static int
xlog_write_unmount_record(
struct xlog *log,
- struct xlog_ticket *ticket,
- xfs_lsn_t *lsn,
- uint flags)
+ struct xlog_ticket *ticket)
{
struct xfs_unmount_log_format ulf = {
.magic = XLOG_UNMOUNT_TYPE,
@@ -837,7 +869,8 @@ xlog_write_unmount_record(
/* account for space used by record data */
ticket->t_curr_res -= sizeof(ulf);
- return xlog_write(log, &vec, ticket, lsn, NULL, flags, false);
+
+ return xlog_write(log, &vec, ticket, NULL, NULL, XLOG_UNMOUNT_TRANS);
}
/*
@@ -851,15 +884,13 @@ xlog_unmount_write(
struct xfs_mount *mp = log->l_mp;
struct xlog_in_core *iclog;
struct xlog_ticket *tic = NULL;
- xfs_lsn_t lsn;
- uint flags = XLOG_UNMOUNT_TRANS;
int error;
error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0);
if (error)
goto out_err;
- error = xlog_write_unmount_record(log, tic, &lsn, flags);
+ error = xlog_write_unmount_record(log, tic);
/*
* At this point, we're umounting anyway, so there's no point in
* transitioning log state to IOERROR. Just continue...
@@ -870,13 +901,7 @@ out_err:
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
- atomic_inc(&iclog->ic_refcnt);
- if (iclog->ic_state == XLOG_STATE_ACTIVE)
- xlog_state_switch_iclogs(log, iclog, 0);
- else
- ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
- iclog->ic_state == XLOG_STATE_IOERROR);
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_force_iclog(iclog);
xlog_wait_on_iclog(iclog);
if (tic) {
@@ -1401,6 +1426,11 @@ xlog_alloc_log(
xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
+ if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1)
+ log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
+ else
+ log->l_iclog_roundoff = BBSIZE;
+
xlog_grant_head_init(&log->l_reserve_head);
xlog_grant_head_init(&log->l_write_head);
@@ -1479,7 +1509,6 @@ xlog_alloc_log(
iclog->ic_state = XLOG_STATE_ACTIVE;
iclog->ic_log = log;
atomic_set(&iclog->ic_refcnt, 0);
- spin_lock_init(&iclog->ic_callback_lock);
INIT_LIST_HEAD(&iclog->ic_callbacks);
iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
@@ -1546,8 +1575,7 @@ xlog_commit_record(
if (XLOG_FORCED_SHUTDOWN(log))
return -EIO;
- error = xlog_write(log, &vec, ticket, lsn, iclog, XLOG_COMMIT_TRANS,
- false);
+ error = xlog_write(log, &vec, ticket, lsn, iclog, XLOG_COMMIT_TRANS);
if (error)
xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
return error;
@@ -1753,10 +1781,10 @@ xlog_write_iclog(
struct xlog *log,
struct xlog_in_core *iclog,
uint64_t bno,
- unsigned int count,
- bool need_flush)
+ unsigned int count)
{
ASSERT(bno < log->l_logBBsize);
+ trace_xlog_iclog_write(iclog, _RET_IP_);
/*
* We lock the iclogbufs here so that we can serialise against I/O
@@ -1792,10 +1820,22 @@ xlog_write_iclog(
* writeback throttle from throttling log writes behind background
* metadata writeback and causing priority inversions.
*/
- iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC |
- REQ_IDLE | REQ_FUA;
- if (need_flush)
+ iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE;
+ if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
+ /*
+ * For external log devices, we also need to flush the data
+ * device cache first to ensure all metadata writeback covered
+ * by the LSN in this iclog is on stable storage. This is slow,
+ * but it *must* complete before we issue the external log IO.
+ */
+ if (log->l_targ != log->l_mp->m_ddev_targp)
+ blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev);
+ }
+ if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
+ iclog->ic_bio.bi_opf |= REQ_FUA;
+
+ iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
@@ -1854,29 +1894,15 @@ xlog_calc_iclog_size(
uint32_t *roundoff)
{
uint32_t count_init, count;
- bool use_lsunit;
-
- use_lsunit = xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
- log->l_mp->m_sb.sb_logsunit > 1;
/* Add for LR header */
count_init = log->l_iclog_hsize + iclog->ic_offset;
+ count = roundup(count_init, log->l_iclog_roundoff);
- /* Round out the log write size */
- if (use_lsunit) {
- /* we have a v2 stripe unit to use */
- count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
- } else {
- count = BBTOB(BTOBB(count_init));
- }
-
- ASSERT(count >= count_init);
*roundoff = count - count_init;
- if (use_lsunit)
- ASSERT(*roundoff < log->l_mp->m_sb.sb_logsunit);
- else
- ASSERT(*roundoff < BBTOB(1));
+ ASSERT(count >= count_init);
+ ASSERT(*roundoff < log->l_iclog_roundoff);
return count;
}
@@ -1912,9 +1938,9 @@ xlog_sync(
unsigned int roundoff; /* roundoff to BB or stripe */
uint64_t bno;
unsigned int size;
- bool need_flush = true, split = false;
ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
+ trace_xlog_iclog_sync(iclog, _RET_IP_);
count = xlog_calc_iclog_size(log, iclog, &roundoff);
@@ -1937,10 +1963,8 @@ xlog_sync(
bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
/* Do we need to split this write into 2 parts? */
- if (bno + BTOBB(count) > log->l_logBBsize) {
+ if (bno + BTOBB(count) > log->l_logBBsize)
xlog_split_iclog(log, &iclog->ic_header, bno, count);
- split = true;
- }
/* calculcate the checksum */
iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
@@ -1961,22 +1985,8 @@ xlog_sync(
be64_to_cpu(iclog->ic_header.h_lsn));
}
#endif
-
- /*
- * Flush the data device before flushing the log to make sure all meta
- * data written back from the AIL actually made it to disk before
- * stamping the new log tail LSN into the log buffer. For an external
- * log we need to issue the flush explicitly, and unfortunately
- * synchronously here; for an internal log we can simply use the block
- * layer state machine for preflushes.
- */
- if (log->l_targ != log->l_mp->m_ddev_targp || split) {
- xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
- need_flush = false;
- }
-
xlog_verify_iclog(log, iclog, count);
- xlog_write_iclog(log, iclog, bno, count, need_flush);
+ xlog_write_iclog(log, iclog, bno, count);
}
/*
@@ -2158,13 +2168,16 @@ static int
xlog_write_calc_vec_length(
struct xlog_ticket *ticket,
struct xfs_log_vec *log_vector,
- bool need_start_rec)
+ uint optype)
{
struct xfs_log_vec *lv;
- int headers = need_start_rec ? 1 : 0;
+ int headers = 0;
int len = 0;
int i;
+ if (optype & XLOG_START_TRANS)
+ headers++;
+
for (lv = log_vector; lv; lv = lv->lv_next) {
/* we don't write ordered log vectors */
if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED)
@@ -2332,7 +2345,7 @@ xlog_write_copy_finish(
return 0;
release_iclog:
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, 0);
spin_unlock(&log->l_icloglock);
return error;
}
@@ -2384,8 +2397,7 @@ xlog_write(
struct xlog_ticket *ticket,
xfs_lsn_t *start_lsn,
struct xlog_in_core **commit_iclog,
- uint flags,
- bool need_start_rec)
+ uint optype)
{
struct xlog_in_core *iclog = NULL;
struct xfs_log_vec *lv = log_vector;
@@ -2413,8 +2425,9 @@ xlog_write(
xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
}
- len = xlog_write_calc_vec_length(ticket, log_vector, need_start_rec);
- *start_lsn = 0;
+ len = xlog_write_calc_vec_length(ticket, log_vector, optype);
+ if (start_lsn)
+ *start_lsn = 0;
while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) {
void *ptr;
int log_offset;
@@ -2427,8 +2440,8 @@ xlog_write(
ASSERT(log_offset <= iclog->ic_size - 1);
ptr = iclog->ic_datap + log_offset;
- /* start_lsn is the first lsn written to. That's all we need. */
- if (!*start_lsn)
+ /* Start_lsn is the first lsn written to. */
+ if (start_lsn && !*start_lsn)
*start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
/*
@@ -2441,6 +2454,7 @@ xlog_write(
int copy_len;
int copy_off;
bool ordered = false;
+ bool wrote_start_rec = false;
/* ordered log vectors have no regions to write */
if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) {
@@ -2458,13 +2472,15 @@ xlog_write(
* write a start record. Only do this for the first
* iclog we write to.
*/
- if (need_start_rec) {
+ if (optype & XLOG_START_TRANS) {
xlog_write_start_rec(ptr, ticket);
xlog_write_adv_cnt(&ptr, &len, &log_offset,
sizeof(struct xlog_op_header));
+ optype &= ~XLOG_START_TRANS;
+ wrote_start_rec = true;
}
- ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
+ ophdr = xlog_write_setup_ophdr(log, ptr, ticket, optype);
if (!ophdr)
return -EIO;
@@ -2495,14 +2511,13 @@ xlog_write(
}
copy_len += sizeof(struct xlog_op_header);
record_cnt++;
- if (need_start_rec) {
+ if (wrote_start_rec) {
copy_len += sizeof(struct xlog_op_header);
record_cnt++;
- need_start_rec = false;
}
data_cnt += contwr ? copy_len : 0;
- error = xlog_write_copy_finish(log, iclog, flags,
+ error = xlog_write_copy_finish(log, iclog, optype,
&record_cnt, &data_cnt,
&partial_copy,
&partial_copy_len,
@@ -2546,10 +2561,10 @@ next_lv:
spin_lock(&log->l_icloglock);
xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
if (commit_iclog) {
- ASSERT(flags & XLOG_COMMIT_TRANS);
+ ASSERT(optype & XLOG_COMMIT_TRANS);
*commit_iclog = iclog;
} else {
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, 0);
}
spin_unlock(&log->l_icloglock);
@@ -2562,6 +2577,7 @@ xlog_state_activate_iclog(
int *iclogs_changed)
{
ASSERT(list_empty_careful(&iclog->ic_callbacks));
+ trace_xlog_iclog_activate(iclog, _RET_IP_);
/*
* If the number of ops in this iclog indicate it just contains the
@@ -2586,6 +2602,7 @@ xlog_state_activate_iclog(
memset(iclog->ic_header.h_cycle_data, 0,
sizeof(iclog->ic_header.h_cycle_data));
iclog->ic_header.h_lsn = 0;
+ iclog->ic_header.h_tail_lsn = 0;
}
/*
@@ -2652,6 +2669,8 @@ xlog_state_clean_iclog(
{
int iclogs_changed = 0;
+ trace_xlog_iclog_clean(dirty_iclog, _RET_IP_);
+
dirty_iclog->ic_state = XLOG_STATE_DIRTY;
xlog_state_activate_iclogs(log, &iclogs_changed);
@@ -2711,6 +2730,7 @@ xlog_state_set_callback(
struct xlog_in_core *iclog,
xfs_lsn_t header_lsn)
{
+ trace_xlog_iclog_callback(iclog, _RET_IP_);
iclog->ic_state = XLOG_STATE_CALLBACK;
ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
@@ -2776,43 +2796,6 @@ xlog_state_iodone_process_iclog(
}
}
-/*
- * Keep processing entries in the iclog callback list until we come around and
- * it is empty. We need to atomically see that the list is empty and change the
- * state to DIRTY so that we don't miss any more callbacks being added.
- *
- * This function is called with the icloglock held and returns with it held. We
- * drop it while running callbacks, however, as holding it over thousands of
- * callbacks is unnecessary and causes excessive contention if we do.
- */
-static void
-xlog_state_do_iclog_callbacks(
- struct xlog *log,
- struct xlog_in_core *iclog)
- __releases(&log->l_icloglock)
- __acquires(&log->l_icloglock)
-{
- spin_unlock(&log->l_icloglock);
- spin_lock(&iclog->ic_callback_lock);
- while (!list_empty(&iclog->ic_callbacks)) {
- LIST_HEAD(tmp);
-
- list_splice_init(&iclog->ic_callbacks, &tmp);
-
- spin_unlock(&iclog->ic_callback_lock);
- xlog_cil_process_committed(&tmp);
- spin_lock(&iclog->ic_callback_lock);
- }
-
- /*
- * Pick up the icloglock while still holding the callback lock so we
- * serialise against anyone trying to add more callbacks to this iclog
- * now we've finished processing.
- */
- spin_lock(&log->l_icloglock);
- spin_unlock(&iclog->ic_callback_lock);
-}
-
STATIC void
xlog_state_do_callback(
struct xlog *log)
@@ -2841,6 +2824,8 @@ xlog_state_do_callback(
repeats++;
do {
+ LIST_HEAD(cb_list);
+
if (xlog_state_iodone_process_iclog(log, iclog,
&ioerror))
break;
@@ -2850,13 +2835,15 @@ xlog_state_do_callback(
iclog = iclog->ic_next;
continue;
}
+ list_splice_init(&iclog->ic_callbacks, &cb_list);
+ spin_unlock(&log->l_icloglock);
- /*
- * Running callbacks will drop the icloglock which means
- * we'll have to run at least one more complete loop.
- */
+ trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
+ xlog_cil_process_committed(&cb_list);
+ trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
cycled_icloglock = true;
- xlog_state_do_iclog_callbacks(log, iclog);
+
+ spin_lock(&log->l_icloglock);
if (XLOG_FORCED_SHUTDOWN(log))
wake_up_all(&iclog->ic_force_wait);
else
@@ -2902,6 +2889,7 @@ xlog_state_done_syncing(
spin_lock(&log->l_icloglock);
ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
+ trace_xlog_iclog_sync_done(iclog, _RET_IP_);
/*
* If we got an error, either on the first buffer, or in the case of
@@ -2975,6 +2963,8 @@ restart:
atomic_inc(&iclog->ic_refcnt); /* prevents sync */
log_offset = iclog->ic_offset;
+ trace_xlog_iclog_get_space(iclog, _RET_IP_);
+
/* On the 1st write to an iclog, figure out lsn. This works
* if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
* committing to. If the offset is set, that's how many blocks
@@ -3013,7 +3003,7 @@ restart:
* reference to the iclog.
*/
if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, 0);
spin_unlock(&log->l_icloglock);
if (error)
return error;
@@ -3140,6 +3130,7 @@ xlog_state_switch_iclogs(
{
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
assert_spin_locked(&log->l_icloglock);
+ trace_xlog_iclog_switch(iclog, _RET_IP_);
if (!eventual_size)
eventual_size = iclog->ic_offset;
@@ -3152,9 +3143,8 @@ xlog_state_switch_iclogs(
log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
/* Round up to next log-sunit */
- if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
- log->l_mp->m_sb.sb_logsunit > 1) {
- uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
+ if (log->l_iclog_roundoff > BBSIZE) {
+ uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
}
@@ -3178,6 +3168,35 @@ xlog_state_switch_iclogs(
}
/*
+ * Force the iclog to disk and check if the iclog has been completed before
+ * xlog_force_iclog() returns. This can happen on synchronous (e.g.
+ * pmem) or fast async storage because we drop the icloglock to issue the IO.
+ * If completion has already occurred, tell the caller so that it can avoid an
+ * unnecessary wait on the iclog.
+ */
+static int
+xlog_force_and_check_iclog(
+ struct xlog_in_core *iclog,
+ bool *completed)
+{
+ xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
+ int error;
+
+ *completed = false;
+ error = xlog_force_iclog(iclog);
+ if (error)
+ return error;
+
+ /*
+ * If the iclog has already been completed and reused the header LSN
+ * will have been rewritten by completion
+ */
+ if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
+ *completed = true;
+ return 0;
+}
+
+/*
* Write out all data in the in-core log as of this exact moment in time.
*
* Data may be written to the in-core log during this call. However,
@@ -3211,7 +3230,6 @@ xfs_log_force(
{
struct xlog *log = mp->m_log;
struct xlog_in_core *iclog;
- xfs_lsn_t lsn;
XFS_STATS_INC(mp, xs_log_force);
trace_xfs_log_force(mp, 0, _RET_IP_);
@@ -3223,6 +3241,8 @@ xfs_log_force(
if (iclog->ic_state == XLOG_STATE_IOERROR)
goto out_error;
+ trace_xlog_iclog_force(iclog, _RET_IP_);
+
if (iclog->ic_state == XLOG_STATE_DIRTY ||
(iclog->ic_state == XLOG_STATE_ACTIVE &&
atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
@@ -3237,39 +3257,33 @@ xfs_log_force(
iclog = iclog->ic_prev;
} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
if (atomic_read(&iclog->ic_refcnt) == 0) {
- /*
- * We are the only one with access to this iclog.
- *
- * Flush it out now. There should be a roundoff of zero
- * to show that someone has already taken care of the
- * roundoff from the previous sync.
- */
- atomic_inc(&iclog->ic_refcnt);
- lsn = be64_to_cpu(iclog->ic_header.h_lsn);
- xlog_state_switch_iclogs(log, iclog, 0);
- if (xlog_state_release_iclog(log, iclog))
+ /* We have exclusive access to this iclog. */
+ bool completed;
+
+ if (xlog_force_and_check_iclog(iclog, &completed))
goto out_error;
- if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
+ if (completed)
goto out_unlock;
} else {
/*
- * Someone else is writing to this iclog.
- *
- * Use its call to flush out the data. However, the
- * other thread may not force out this LR, so we mark
- * it WANT_SYNC.
+ * Someone else is still writing to this iclog, so we
+ * need to ensure that when they release the iclog it
+ * gets synced immediately as we may be waiting on it.
*/
xlog_state_switch_iclogs(log, iclog, 0);
}
- } else {
- /*
- * If the head iclog is not active nor dirty, we just attach
- * ourselves to the head and go to sleep if necessary.
- */
- ;
}
+ /*
+ * The iclog we are about to wait on may contain the checkpoint pushed
+ * by the above xlog_cil_force() call, but it may not have been pushed
+ * to disk yet. Like the ACTIVE case above, we need to make sure caches
+ * are flushed when this iclog is written.
+ */
+ if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
+ iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
+
if (flags & XFS_LOG_SYNC)
return xlog_wait_on_iclog(iclog);
out_unlock:
@@ -3281,15 +3295,15 @@ out_error:
}
static int
-__xfs_log_force_lsn(
- struct xfs_mount *mp,
+xlog_force_lsn(
+ struct xlog *log,
xfs_lsn_t lsn,
uint flags,
int *log_flushed,
bool already_slept)
{
- struct xlog *log = mp->m_log;
struct xlog_in_core *iclog;
+ bool completed;
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
@@ -3297,12 +3311,14 @@ __xfs_log_force_lsn(
goto out_error;
while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
+ trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
iclog = iclog->ic_next;
if (iclog == log->l_iclog)
goto out_unlock;
}
- if (iclog->ic_state == XLOG_STATE_ACTIVE) {
+ switch (iclog->ic_state) {
+ case XLOG_STATE_ACTIVE:
/*
* We sleep here if we haven't already slept (e.g. this is the
* first time we've looked at the correct iclog buf) and the
@@ -3321,18 +3337,35 @@ __xfs_log_force_lsn(
if (!already_slept &&
(iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
- XFS_STATS_INC(mp, xs_log_force_sleep);
-
xlog_wait(&iclog->ic_prev->ic_write_wait,
&log->l_icloglock);
return -EAGAIN;
}
- atomic_inc(&iclog->ic_refcnt);
- xlog_state_switch_iclogs(log, iclog, 0);
- if (xlog_state_release_iclog(log, iclog))
+ if (xlog_force_and_check_iclog(iclog, &completed))
goto out_error;
if (log_flushed)
*log_flushed = 1;
+ if (completed)
+ goto out_unlock;
+ break;
+ case XLOG_STATE_WANT_SYNC:
+ /*
+ * This iclog may contain the checkpoint pushed by the
+ * xlog_cil_force_seq() call, but there are other writers still
+ * accessing it so it hasn't been pushed to disk yet. Like the
+ * ACTIVE case above, we need to make sure caches are flushed
+ * when this iclog is written.
+ */
+ iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
+ break;
+ default:
+ /*
+ * The entire checkpoint was written by the CIL force and is on
+ * its way to disk already. It will be stable when it
+ * completes, so we don't need to manipulate caches here at all.
+ * We just need to wait for completion if necessary.
+ */
+ break;
}
if (flags & XFS_LOG_SYNC)
@@ -3360,25 +3393,29 @@ out_error:
* to disk, that thread will wake up all threads waiting on the queue.
*/
int
-xfs_log_force_lsn(
+xfs_log_force_seq(
struct xfs_mount *mp,
- xfs_lsn_t lsn,
+ xfs_csn_t seq,
uint flags,
int *log_flushed)
{
+ struct xlog *log = mp->m_log;
+ xfs_lsn_t lsn;
int ret;
- ASSERT(lsn != 0);
+ ASSERT(seq != 0);
XFS_STATS_INC(mp, xs_log_force);
- trace_xfs_log_force(mp, lsn, _RET_IP_);
+ trace_xfs_log_force(mp, seq, _RET_IP_);
- lsn = xlog_cil_force_lsn(mp->m_log, lsn);
+ lsn = xlog_cil_force_seq(log, seq);
if (lsn == NULLCOMMITLSN)
return 0;
- ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, false);
- if (ret == -EAGAIN)
- ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, true);
+ ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
+ if (ret == -EAGAIN) {
+ XFS_STATS_INC(mp, xs_log_force_sleep);
+ ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
+ }
return ret;
}
@@ -3407,12 +3444,11 @@ xfs_log_ticket_get(
* Figure out the total log space unit (in bytes) that would be
* required for a log ticket.
*/
-int
-xfs_log_calc_unit_res(
- struct xfs_mount *mp,
+static int
+xlog_calc_unit_res(
+ struct xlog *log,
int unit_bytes)
{
- struct xlog *log = mp->m_log;
int iclog_space;
uint num_headers;
@@ -3488,18 +3524,20 @@ xfs_log_calc_unit_res(
/* for commit-rec LR header - note: padding will subsume the ophdr */
unit_bytes += log->l_iclog_hsize;
- /* for roundoff padding for transaction data and one for commit record */
- if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) {
- /* log su roundoff */
- unit_bytes += 2 * mp->m_sb.sb_logsunit;
- } else {
- /* BB roundoff */
- unit_bytes += 2 * BBSIZE;
- }
+ /* roundoff padding for transaction data and one for commit record */
+ unit_bytes += 2 * log->l_iclog_roundoff;
return unit_bytes;
}
+int
+xfs_log_calc_unit_res(
+ struct xfs_mount *mp,
+ int unit_bytes)
+{
+ return xlog_calc_unit_res(mp->m_log, unit_bytes);
+}
+
/*
* Allocate and initialise a new log ticket.
*/
@@ -3516,7 +3554,7 @@ xlog_ticket_alloc(
tic = kmem_cache_zalloc(xfs_log_ticket_zone, GFP_NOFS | __GFP_NOFAIL);
- unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes);
+ unit_res = xlog_calc_unit_res(log, unit_bytes);
atomic_set(&tic->t_ref, 1);
tic->t_task = current;
@@ -3600,10 +3638,10 @@ xlog_verify_grant_tail(
STATIC void
xlog_verify_tail_lsn(
struct xlog *log,
- struct xlog_in_core *iclog,
- xfs_lsn_t tail_lsn)
+ struct xlog_in_core *iclog)
{
- int blocks;
+ xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
+ int blocks;
if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
blocks =
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 044e02cb8921..813b972e9788 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -106,7 +106,7 @@ struct xfs_item_ops;
struct xfs_trans;
int xfs_log_force(struct xfs_mount *mp, uint flags);
-int xfs_log_force_lsn(struct xfs_mount *mp, xfs_lsn_t lsn, uint flags,
+int xfs_log_force_seq(struct xfs_mount *mp, xfs_csn_t seq, uint flags,
int *log_forced);
int xfs_log_mount(struct xfs_mount *mp,
struct xfs_buftarg *log_target,
@@ -117,7 +117,6 @@ void xfs_log_mount_cancel(struct xfs_mount *);
xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
void xfs_log_space_wake(struct xfs_mount *mp);
-void xfs_log_release_iclog(struct xlog_in_core *iclog);
int xfs_log_reserve(struct xfs_mount *mp,
int length,
int count,
@@ -132,8 +131,6 @@ bool xfs_log_writable(struct xfs_mount *mp);
struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
void xfs_log_ticket_put(struct xlog_ticket *ticket);
-void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_lsn_t *commit_lsn, bool regrant);
void xlog_cil_process_committed(struct list_head *list);
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index b0ef071b3cb5..4c44bc3786c0 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -654,8 +654,11 @@ xlog_cil_push_work(
struct xfs_trans_header thdr;
struct xfs_log_iovec lhdr;
struct xfs_log_vec lvhdr = { NULL };
+ xfs_lsn_t preflush_tail_lsn;
xfs_lsn_t commit_lsn;
- xfs_lsn_t push_seq;
+ xfs_csn_t push_seq;
+ struct bio bio;
+ DECLARE_COMPLETION_ONSTACK(bdev_flush);
new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
new_ctx->ticket = xlog_cil_ticket_alloc(log);
@@ -668,9 +671,14 @@ xlog_cil_push_work(
ASSERT(push_seq <= ctx->sequence);
/*
- * Wake up any background push waiters now this context is being pushed.
+ * As we are about to switch to a new, empty CIL context, we no longer
+ * need to throttle tasks on CIL space overruns. Wake any waiters that
+ * the hard push throttle may have caught so they can start committing
+ * to the new context. The ctx->xc_push_lock provides the serialisation
+ * necessary for safely using the lockless waitqueue_active() check in
+ * this context.
*/
- if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
+ if (waitqueue_active(&cil->xc_push_wait))
wake_up_all(&cil->xc_push_wait);
/*
@@ -719,10 +727,27 @@ xlog_cil_push_work(
spin_unlock(&cil->xc_push_lock);
/*
- * pull all the log vectors off the items in the CIL, and
- * remove the items from the CIL. We don't need the CIL lock
- * here because it's only needed on the transaction commit
- * side which is currently locked out by the flush lock.
+ * The CIL is stable at this point - nothing new will be added to it
+ * because we hold the flush lock exclusively. Hence we can now issue
+ * a cache flush to ensure all the completed metadata in the journal we
+ * are about to overwrite is on stable storage.
+ *
+ * Because we are issuing this cache flush before we've written the
+ * tail lsn to the iclog, we can have metadata IO completions move the
+ * tail forwards between the completion of this flush and the iclog
+ * being written. In this case, we need to re-issue the cache flush
+ * before the iclog write. To detect whether the log tail moves, sample
+ * the tail LSN *before* we issue the flush.
+ */
+ preflush_tail_lsn = atomic64_read(&log->l_tail_lsn);
+ xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev,
+ &bdev_flush);
+
+ /*
+ * Pull all the log vectors off the items in the CIL, and remove the
+ * items from the CIL. We don't need the CIL lock here because it's only
+ * needed on the transaction commit side which is currently locked out
+ * by the flush lock.
*/
lv = NULL;
num_iovecs = 0;
@@ -772,7 +797,7 @@ xlog_cil_push_work(
* that higher sequences will wait for us to write out a commit record
* before they do.
*
- * xfs_log_force_lsn requires us to mirror the new sequence into the cil
+ * xfs_log_force_seq requires us to mirror the new sequence into the cil
* structure atomically with the addition of this sequence to the
* committing list. This also ensures that we can do unlocked checks
* against the current sequence in log forces without risking
@@ -806,7 +831,14 @@ xlog_cil_push_work(
lvhdr.lv_iovecp = &lhdr;
lvhdr.lv_next = ctx->lv_chain;
- error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0, true);
+ /*
+ * Before we format and submit the first iclog, we have to ensure that
+ * the metadata writeback ordering cache flush is complete.
+ */
+ wait_for_completion(&bdev_flush);
+
+ error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL,
+ XLOG_START_TRANS);
if (error)
goto out_abort_free_ticket;
@@ -850,15 +882,21 @@ restart:
xfs_log_ticket_ungrant(log, tic);
- spin_lock(&commit_iclog->ic_callback_lock);
+ /*
+ * Once we attach the ctx to the iclog, a shutdown can process the
+ * iclog, run the callbacks and free the ctx. The only thing preventing
+ * this potential UAF situation here is that we are holding the
+ * icloglock. Hence we cannot access the ctx once we have attached the
+ * callbacks and dropped the icloglock.
+ */
+ spin_lock(&log->l_icloglock);
if (commit_iclog->ic_state == XLOG_STATE_IOERROR) {
- spin_unlock(&commit_iclog->ic_callback_lock);
+ spin_unlock(&log->l_icloglock);
goto out_abort;
}
ASSERT_ALWAYS(commit_iclog->ic_state == XLOG_STATE_ACTIVE ||
commit_iclog->ic_state == XLOG_STATE_WANT_SYNC);
list_add_tail(&ctx->iclog_entry, &commit_iclog->ic_callbacks);
- spin_unlock(&commit_iclog->ic_callback_lock);
/*
* now the checkpoint commit is complete and we've attached the
@@ -870,8 +908,50 @@ restart:
wake_up_all(&cil->xc_commit_wait);
spin_unlock(&cil->xc_push_lock);
- /* release the hounds! */
- xfs_log_release_iclog(commit_iclog);
+ /*
+ * If the checkpoint spans multiple iclogs, wait for all previous iclogs
+ * to complete before we submit the commit_iclog. We can't use state
+ * checks for this - ACTIVE can be either a past completed iclog or a
+ * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
+ * past or future iclog awaiting IO or ordered IO completion to be run.
+ * In the latter case, if it's a future iclog and we wait on it, the we
+ * will hang because it won't get processed through to ic_force_wait
+ * wakeup until this commit_iclog is written to disk. Hence we use the
+ * iclog header lsn and compare it to the commit lsn to determine if we
+ * need to wait on iclogs or not.
+ *
+ * NOTE: It is not safe to reference the ctx after this check as we drop
+ * the icloglock if we have to wait for completion of other iclogs.
+ */
+ if (ctx->start_lsn != commit_lsn) {
+ xfs_lsn_t plsn;
+
+ plsn = be64_to_cpu(commit_iclog->ic_prev->ic_header.h_lsn);
+ if (plsn && XFS_LSN_CMP(plsn, commit_lsn) < 0) {
+ /*
+ * Waiting on ic_force_wait orders the completion of
+ * iclogs older than ic_prev. Hence we only need to wait
+ * on the most recent older iclog here.
+ */
+ xlog_wait_on_iclog(commit_iclog->ic_prev);
+ spin_lock(&log->l_icloglock);
+ }
+
+ /*
+ * We need to issue a pre-flush so that the ordering for this
+ * checkpoint is correctly preserved down to stable storage.
+ */
+ commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
+ }
+
+ /*
+ * The commit iclog must be written to stable storage to guarantee
+ * journal IO vs metadata writeback IO is correctly ordered on stable
+ * storage.
+ */
+ commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
+ xlog_state_release_iclog(log, commit_iclog, preflush_tail_lsn);
+ spin_unlock(&log->l_icloglock);
return;
out_skip:
@@ -907,7 +987,7 @@ xlog_cil_push_background(
ASSERT(!list_empty(&cil->xc_cil));
/*
- * don't do a background push if we haven't used up all the
+ * Don't do a background push if we haven't used up all the
* space available yet.
*/
if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) {
@@ -931,9 +1011,16 @@ xlog_cil_push_background(
/*
* If we are well over the space limit, throttle the work that is being
- * done until the push work on this context has begun.
+ * done until the push work on this context has begun. Enforce the hard
+ * throttle on all transaction commits once it has been activated, even
+ * if the committing transactions have resulted in the space usage
+ * dipping back down under the hard limit.
+ *
+ * The ctx->xc_push_lock provides the serialisation necessary for safely
+ * using the lockless waitqueue_active() check in this context.
*/
- if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) {
+ if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) ||
+ waitqueue_active(&cil->xc_push_wait)) {
trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
ASSERT(cil->xc_ctx->space_used < log->l_logsize);
xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
@@ -1008,16 +1095,14 @@ xlog_cil_empty(
* allowed again.
*/
void
-xfs_log_commit_cil(
- struct xfs_mount *mp,
+xlog_cil_commit(
+ struct xlog *log,
struct xfs_trans *tp,
- xfs_lsn_t *commit_lsn,
+ xfs_csn_t *commit_seq,
bool regrant)
{
- struct xlog *log = mp->m_log;
struct xfs_cil *cil = log->l_cilp;
struct xfs_log_item *lip, *next;
- xfs_lsn_t xc_commit_lsn;
/*
* Do all necessary memory allocation before we lock the CIL.
@@ -1031,10 +1116,6 @@ xfs_log_commit_cil(
xlog_cil_insert_items(log, tp);
- xc_commit_lsn = cil->xc_ctx->sequence;
- if (commit_lsn)
- *commit_lsn = xc_commit_lsn;
-
if (regrant && !XLOG_FORCED_SHUTDOWN(log))
xfs_log_ticket_regrant(log, tp->t_ticket);
else
@@ -1057,8 +1138,10 @@ xfs_log_commit_cil(
list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
xfs_trans_del_item(lip);
if (lip->li_ops->iop_committing)
- lip->li_ops->iop_committing(lip, xc_commit_lsn);
+ lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
}
+ if (commit_seq)
+ *commit_seq = cil->xc_ctx->sequence;
/* xlog_cil_push_background() releases cil->xc_ctx_lock */
xlog_cil_push_background(log);
@@ -1075,9 +1158,9 @@ xfs_log_commit_cil(
* iclog flush is necessary following this call.
*/
xfs_lsn_t
-xlog_cil_force_lsn(
+xlog_cil_force_seq(
struct xlog *log,
- xfs_lsn_t sequence)
+ xfs_csn_t sequence)
{
struct xfs_cil *cil = log->l_cilp;
struct xfs_cil_ctx *ctx;
@@ -1173,21 +1256,17 @@ bool
xfs_log_item_in_current_chkpt(
struct xfs_log_item *lip)
{
- struct xfs_cil_ctx *ctx;
+ struct xfs_cil_ctx *ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
if (list_empty(&lip->li_cil))
return false;
- ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
-
/*
* li_seq is written on the first commit of a log item to record the
* first checkpoint it is written to. Hence if it is different to the
* current sequence, we're in a new checkpoint.
*/
- if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
- return false;
- return true;
+ return lip->li_seq == ctx->sequence;
}
/*
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 1c6fdbf3d506..f3e79a45d60a 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -50,6 +50,26 @@ enum xlog_iclog_state {
XLOG_STATE_IOERROR, /* IO error happened in sync'ing log */
};
+#define XLOG_STATE_STRINGS \
+ { XLOG_STATE_ACTIVE, "XLOG_STATE_ACTIVE" }, \
+ { XLOG_STATE_WANT_SYNC, "XLOG_STATE_WANT_SYNC" }, \
+ { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \
+ { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \
+ { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \
+ { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" }, \
+ { XLOG_STATE_IOERROR, "XLOG_STATE_IOERROR" }
+
+/*
+ * In core log flags
+ */
+#define XLOG_ICL_NEED_FLUSH (1 << 0) /* iclog needs REQ_PREFLUSH */
+#define XLOG_ICL_NEED_FUA (1 << 1) /* iclog needs REQ_FUA */
+
+#define XLOG_ICL_STRINGS \
+ { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \
+ { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" }
+
+
/*
* Log ticket flags
*/
@@ -201,10 +221,8 @@ typedef struct xlog_in_core {
u32 ic_size;
u32 ic_offset;
enum xlog_iclog_state ic_state;
+ unsigned int ic_flags;
char *ic_datap; /* pointer to iclog data */
-
- /* Callback structures need their own cacheline */
- spinlock_t ic_callback_lock ____cacheline_aligned_in_smp;
struct list_head ic_callbacks;
/* reference counts need their own cacheline */
@@ -230,7 +248,7 @@ struct xfs_cil;
struct xfs_cil_ctx {
struct xfs_cil *cil;
- xfs_lsn_t sequence; /* chkpt sequence # */
+ xfs_csn_t sequence; /* chkpt sequence # */
xfs_lsn_t start_lsn; /* first LSN of chkpt commit */
xfs_lsn_t commit_lsn; /* chkpt commit record lsn */
struct xlog_ticket *ticket; /* chkpt ticket */
@@ -268,10 +286,10 @@ struct xfs_cil {
struct xfs_cil_ctx *xc_ctx;
spinlock_t xc_push_lock ____cacheline_aligned_in_smp;
- xfs_lsn_t xc_push_seq;
+ xfs_csn_t xc_push_seq;
struct list_head xc_committing;
wait_queue_head_t xc_commit_wait;
- xfs_lsn_t xc_current_sequence;
+ xfs_csn_t xc_current_sequence;
struct work_struct xc_push_work;
wait_queue_head_t xc_push_wait; /* background push throttle */
} ____cacheline_aligned_in_smp;
@@ -436,6 +454,8 @@ struct xlog {
#endif
/* log recovery lsn tracking (for buffer submission */
xfs_lsn_t l_recovery_lsn;
+
+ uint32_t l_iclog_roundoff;/* padding roundoff */
};
#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
@@ -478,13 +498,15 @@ void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
void xlog_print_trans(struct xfs_trans *);
int xlog_write(struct xlog *log, struct xfs_log_vec *log_vector,
struct xlog_ticket *tic, xfs_lsn_t *start_lsn,
- struct xlog_in_core **commit_iclog, uint flags,
- bool need_start_rec);
+ struct xlog_in_core **commit_iclog, uint optype);
int xlog_commit_record(struct xlog *log, struct xlog_ticket *ticket,
struct xlog_in_core **iclog, xfs_lsn_t *lsn);
void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
+int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
+ xfs_lsn_t log_tail_lsn);
+
/*
* When we crack an atomic LSN, we sample it first so that the value will not
* change while we are cracking it into the component values. This means we
@@ -547,19 +569,18 @@ int xlog_cil_init(struct xlog *log);
void xlog_cil_init_post_recovery(struct xlog *log);
void xlog_cil_destroy(struct xlog *log);
bool xlog_cil_empty(struct xlog *log);
+void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp,
+ xfs_csn_t *commit_seq, bool regrant);
/*
* CIL force routines
*/
-xfs_lsn_t
-xlog_cil_force_lsn(
- struct xlog *log,
- xfs_lsn_t sequence);
+xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
static inline void
xlog_cil_force(struct xlog *log)
{
- xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
+ xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
}
/*
@@ -582,6 +603,8 @@ xlog_wait(
remove_wait_queue(wq, &wait);
}
+int xlog_wait_on_iclog(struct xlog_in_core *iclog);
+
/*
* The LSN is valid so long as it is behind the current LSN. If it isn't, this
* means that the next log record that includes this metadata could have a
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index e5dd1c0c2f03..1721fce2ec94 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -25,6 +25,7 @@
#include "xfs_icache.h"
#include "xfs_error.h"
#include "xfs_buf_item.h"
+#include "xfs_ag.h"
#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
@@ -2457,8 +2458,10 @@ xlog_finish_defer_ops(
error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
- if (error)
+ if (error) {
+ xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
return error;
+ }
/*
* Transfer to this new transaction all the dfops we captured
@@ -2741,21 +2744,17 @@ STATIC void
xlog_recover_process_iunlinks(
struct xlog *log)
{
- xfs_mount_t *mp;
- xfs_agnumber_t agno;
- xfs_agi_t *agi;
- struct xfs_buf *agibp;
- xfs_agino_t agino;
- int bucket;
- int error;
-
- mp = log->l_mp;
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+ struct xfs_agi *agi;
+ struct xfs_buf *agibp;
+ xfs_agino_t agino;
+ int bucket;
+ int error;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- /*
- * Find the agi for this ag.
- */
- error = xfs_read_agi(mp, NULL, agno, &agibp);
+ for_each_perag(mp, agno, pag) {
+ error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp);
if (error) {
/*
* AGI is b0rked. Don't process it.
@@ -2781,7 +2780,7 @@ xlog_recover_process_iunlinks(
agino = be32_to_cpu(agi->agi_unlinked[bucket]);
while (agino != NULLAGINO) {
agino = xlog_recover_process_one_iunlink(mp,
- agno, agino, bucket);
+ pag->pag_agno, agino, bucket);
cond_resched();
}
}
@@ -3452,6 +3451,7 @@ xlog_recover_finish(
* this) before we get around to xfs_log_mount_cancel.
*/
xlog_recover_cancel_intents(log);
+ xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
xfs_alert(log->l_mp, "Failed to recover intents");
return error;
}
@@ -3493,27 +3493,28 @@ xlog_recover_cancel(
*/
STATIC void
xlog_recover_check_summary(
- struct xlog *log)
+ struct xlog *log)
{
- xfs_mount_t *mp;
- struct xfs_buf *agfbp;
- struct xfs_buf *agibp;
- xfs_agnumber_t agno;
- uint64_t freeblks;
- uint64_t itotal;
- uint64_t ifree;
- int error;
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_perag *pag;
+ struct xfs_buf *agfbp;
+ struct xfs_buf *agibp;
+ xfs_agnumber_t agno;
+ uint64_t freeblks;
+ uint64_t itotal;
+ uint64_t ifree;
+ int error;
mp = log->l_mp;
freeblks = 0LL;
itotal = 0LL;
ifree = 0LL;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
+ for_each_perag(mp, agno, pag) {
+ error = xfs_read_agf(mp, NULL, pag->pag_agno, 0, &agfbp);
if (error) {
xfs_alert(mp, "%s agf read failed agno %d error %d",
- __func__, agno, error);
+ __func__, pag->pag_agno, error);
} else {
struct xfs_agf *agfp = agfbp->b_addr;
@@ -3522,10 +3523,10 @@ xlog_recover_check_summary(
xfs_buf_relse(agfbp);
}
- error = xfs_read_agi(mp, NULL, agno, &agibp);
+ error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp);
if (error) {
xfs_alert(mp, "%s agi read failed agno %d error %d",
- __func__, agno, error);
+ __func__, pag->pag_agno, error);
} else {
struct xfs_agi *agi = agibp->b_addr;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index bdfee1943796..d0755494597f 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -32,6 +32,7 @@
#include "xfs_extent_busy.h"
#include "xfs_health.h"
#include "xfs_trace.h"
+#include "xfs_ag.h"
static DEFINE_MUTEX(xfs_uuid_table_mutex);
static int xfs_uuid_table_size;
@@ -119,41 +120,6 @@ xfs_uuid_unmount(
mutex_unlock(&xfs_uuid_table_mutex);
}
-
-STATIC void
-__xfs_free_perag(
- struct rcu_head *head)
-{
- struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
-
- ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
- ASSERT(atomic_read(&pag->pag_ref) == 0);
- kmem_free(pag);
-}
-
-/*
- * Free up the per-ag resources associated with the mount structure.
- */
-STATIC void
-xfs_free_perag(
- xfs_mount_t *mp)
-{
- xfs_agnumber_t agno;
- struct xfs_perag *pag;
-
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- spin_lock(&mp->m_perag_lock);
- pag = radix_tree_delete(&mp->m_perag_tree, agno);
- spin_unlock(&mp->m_perag_lock);
- ASSERT(pag);
- ASSERT(atomic_read(&pag->pag_ref) == 0);
- cancel_delayed_work_sync(&pag->pag_blockgc_work);
- xfs_iunlink_destroy(pag);
- xfs_buf_hash_destroy(pag);
- call_rcu(&pag->rcu_head, __xfs_free_perag);
- }
-}
-
/*
* Check size of device based on the (data/realtime) block count.
* Note: this check is used by the growfs code as well as mount.
@@ -172,96 +138,6 @@ xfs_sb_validate_fsb_count(
return 0;
}
-int
-xfs_initialize_perag(
- xfs_mount_t *mp,
- xfs_agnumber_t agcount,
- xfs_agnumber_t *maxagi)
-{
- xfs_agnumber_t index;
- xfs_agnumber_t first_initialised = NULLAGNUMBER;
- xfs_perag_t *pag;
- int error = -ENOMEM;
-
- /*
- * Walk the current per-ag tree so we don't try to initialise AGs
- * that already exist (growfs case). Allocate and insert all the
- * AGs we don't find ready for initialisation.
- */
- for (index = 0; index < agcount; index++) {
- pag = xfs_perag_get(mp, index);
- if (pag) {
- xfs_perag_put(pag);
- continue;
- }
-
- pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
- if (!pag) {
- error = -ENOMEM;
- goto out_unwind_new_pags;
- }
- pag->pag_agno = index;
- pag->pag_mount = mp;
- spin_lock_init(&pag->pag_ici_lock);
- INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
- INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
-
- error = xfs_buf_hash_init(pag);
- if (error)
- goto out_free_pag;
- init_waitqueue_head(&pag->pagb_wait);
- spin_lock_init(&pag->pagb_lock);
- pag->pagb_count = 0;
- pag->pagb_tree = RB_ROOT;
-
- error = radix_tree_preload(GFP_NOFS);
- if (error)
- goto out_hash_destroy;
-
- spin_lock(&mp->m_perag_lock);
- if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
- WARN_ON_ONCE(1);
- spin_unlock(&mp->m_perag_lock);
- radix_tree_preload_end();
- error = -EEXIST;
- goto out_hash_destroy;
- }
- spin_unlock(&mp->m_perag_lock);
- radix_tree_preload_end();
- /* first new pag is fully initialized */
- if (first_initialised == NULLAGNUMBER)
- first_initialised = index;
- error = xfs_iunlink_init(pag);
- if (error)
- goto out_hash_destroy;
- spin_lock_init(&pag->pag_state_lock);
- }
-
- index = xfs_set_inode_alloc(mp, agcount);
-
- if (maxagi)
- *maxagi = index;
-
- mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
- return 0;
-
-out_hash_destroy:
- xfs_buf_hash_destroy(pag);
-out_free_pag:
- kmem_free(pag);
-out_unwind_new_pags:
- /* unwind any prior newly initialized pags */
- for (index = first_initialised; index < agcount; index++) {
- pag = radix_tree_delete(&mp->m_perag_tree, index);
- if (!pag)
- break;
- xfs_buf_hash_destroy(pag);
- xfs_iunlink_destroy(pag);
- kmem_free(pag);
- }
- return error;
-}
-
/*
* xfs_readsb
*
@@ -983,9 +859,17 @@ xfs_mountfs(
/*
* Finish recovering the file system. This part needed to be delayed
* until after the root and real-time bitmap inodes were consistently
- * read in.
+ * read in. Temporarily create per-AG space reservations for metadata
+ * btree shape changes because space freeing transactions (for inode
+ * inactivation) require the per-AG reservation in lieu of reserving
+ * blocks.
*/
+ error = xfs_fs_reserve_ag_blocks(mp);
+ if (error && error == -ENOSPC)
+ xfs_warn(mp,
+ "ENOSPC reserving per-AG metadata pool, log recovery may fail.");
error = xfs_log_mount_finish(mp);
+ xfs_fs_unreserve_ag_blocks(mp);
if (error) {
xfs_warn(mp, "log mount finish failed");
goto out_rtunmount;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index bb67274ee23f..c78b63fe779a 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -12,6 +12,7 @@ struct xfs_mru_cache;
struct xfs_ail;
struct xfs_quotainfo;
struct xfs_da_geometry;
+struct xfs_perag;
/* dynamic preallocation free space thresholds, 5% down to 1% */
enum {
@@ -297,117 +298,12 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
}
-/* per-AG block reservation data structures*/
-struct xfs_ag_resv {
- /* number of blocks originally reserved here */
- xfs_extlen_t ar_orig_reserved;
- /* number of blocks reserved here */
- xfs_extlen_t ar_reserved;
- /* number of blocks originally asked for */
- xfs_extlen_t ar_asked;
-};
-
-/*
- * Per-ag incore structure, copies of information in agf and agi, to improve the
- * performance of allocation group selection.
- */
-typedef struct xfs_perag {
- struct xfs_mount *pag_mount; /* owner filesystem */
- xfs_agnumber_t pag_agno; /* AG this structure belongs to */
- atomic_t pag_ref; /* perag reference count */
- char pagf_init; /* this agf's entry is initialized */
- char pagi_init; /* this agi's entry is initialized */
- char pagf_metadata; /* the agf is preferred to be metadata */
- char pagi_inodeok; /* The agi is ok for inodes */
- uint8_t pagf_levels[XFS_BTNUM_AGF];
- /* # of levels in bno & cnt btree */
- bool pagf_agflreset; /* agfl requires reset before use */
- uint32_t pagf_flcount; /* count of blocks in freelist */
- xfs_extlen_t pagf_freeblks; /* total free blocks */
- xfs_extlen_t pagf_longest; /* longest free space */
- uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
- xfs_agino_t pagi_freecount; /* number of free inodes */
- xfs_agino_t pagi_count; /* number of allocated inodes */
-
- /*
- * Inode allocation search lookup optimisation.
- * If the pagino matches, the search for new inodes
- * doesn't need to search the near ones again straight away
- */
- xfs_agino_t pagl_pagino;
- xfs_agino_t pagl_leftrec;
- xfs_agino_t pagl_rightrec;
-
- /*
- * Bitsets of per-ag metadata that have been checked and/or are sick.
- * Callers should hold pag_state_lock before accessing this field.
- */
- uint16_t pag_checked;
- uint16_t pag_sick;
- spinlock_t pag_state_lock;
-
- spinlock_t pagb_lock; /* lock for pagb_tree */
- struct rb_root pagb_tree; /* ordered tree of busy extents */
- unsigned int pagb_gen; /* generation count for pagb_tree */
- wait_queue_head_t pagb_wait; /* woken when pagb_gen changes */
-
- atomic_t pagf_fstrms; /* # of filestreams active in this AG */
-
- spinlock_t pag_ici_lock; /* incore inode cache lock */
- struct radix_tree_root pag_ici_root; /* incore inode cache root */
- int pag_ici_reclaimable; /* reclaimable inodes */
- unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */
-
- /* buffer cache index */
- spinlock_t pag_buf_lock; /* lock for pag_buf_hash */
- struct rhashtable pag_buf_hash;
-
- /* for rcu-safe freeing */
- struct rcu_head rcu_head;
- int pagb_count; /* pagb slots in use */
-
- /* Blocks reserved for all kinds of metadata. */
- struct xfs_ag_resv pag_meta_resv;
- /* Blocks reserved for the reverse mapping btree. */
- struct xfs_ag_resv pag_rmapbt_resv;
-
- /* background prealloc block trimming */
- struct delayed_work pag_blockgc_work;
-
- /* reference count */
- uint8_t pagf_refcount_level;
-
- /*
- * Unlinked inode information. This incore information reflects
- * data stored in the AGI, so callers must hold the AGI buffer lock
- * or have some other means to control concurrency.
- */
- struct rhashtable pagi_unlinked_hash;
-} xfs_perag_t;
-
-static inline struct xfs_ag_resv *
-xfs_perag_resv(
- struct xfs_perag *pag,
- enum xfs_ag_resv_type type)
-{
- switch (type) {
- case XFS_AG_RESV_METADATA:
- return &pag->pag_meta_resv;
- case XFS_AG_RESV_RMAPBT:
- return &pag->pag_rmapbt_resv;
- default:
- return NULL;
- }
-}
-
-int xfs_buf_hash_init(xfs_perag_t *pag);
-void xfs_buf_hash_destroy(xfs_perag_t *pag);
+int xfs_buf_hash_init(struct xfs_perag *pag);
+void xfs_buf_hash_destroy(struct xfs_perag *pag);
extern void xfs_uuid_table_free(void);
extern uint64_t xfs_default_resblks(xfs_mount_t *mp);
extern int xfs_mountfs(xfs_mount_t *mp);
-extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
- xfs_agnumber_t *maxagi);
extern void xfs_unmountfs(xfs_mount_t *);
extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 4bf949a89d0d..fe341f3fd419 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -23,6 +23,8 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_error.h"
+#include "xfs_ag.h"
+#include "xfs_ialloc.h"
/*
* The global quota manager. There is only one of these for the entire
@@ -787,8 +789,12 @@ xfs_qm_qino_alloc(
return error;
if (need_alloc) {
- error = xfs_dir_ialloc(&init_user_ns, &tp, NULL, S_IFREG, 1, 0,
- 0, false, ipp);
+ xfs_ino_t ino;
+
+ error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
+ if (!error)
+ error = xfs_init_new_inode(&init_user_ns, tp, NULL, ino,
+ S_IFREG, 1, 0, 0, false, ipp);
if (error) {
xfs_trans_cancel(tp);
return error;
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index e3dabab44097..ebbb484c49dc 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -142,7 +142,6 @@ extern void xfs_qm_destroy_quotainfo(struct xfs_mount *);
/* dquot stuff */
extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint);
-extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
/* quota ops */
extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 11f1e2fbf22f..13a56e1ea15c 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -201,7 +201,8 @@ xfs_qm_scall_quotaoff(
* depend on the quota inodes (and other things) being valid as long as
* we keep the lock(s).
*/
- xfs_qm_dqrele_all_inodes(mp, flags);
+ error = xfs_dqrele_all_inodes(mp, flags);
+ ASSERT(!error);
/*
* Next we make the changes in the quota flag in the mount struct.
@@ -747,54 +748,3 @@ xfs_qm_scall_getquota_next(
xfs_qm_dqput(dqp);
return error;
}
-
-STATIC int
-xfs_dqrele_inode(
- struct xfs_inode *ip,
- void *args)
-{
- uint *flags = args;
-
- /* skip quota inodes */
- if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
- ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
- ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
- ASSERT(ip->i_udquot == NULL);
- ASSERT(ip->i_gdquot == NULL);
- ASSERT(ip->i_pdquot == NULL);
- return 0;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- if ((*flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
- xfs_qm_dqrele(ip->i_udquot);
- ip->i_udquot = NULL;
- }
- if ((*flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
- xfs_qm_dqrele(ip->i_gdquot);
- ip->i_gdquot = NULL;
- }
- if ((*flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
- xfs_qm_dqrele(ip->i_pdquot);
- ip->i_pdquot = NULL;
- }
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return 0;
-}
-
-
-/*
- * Go thru all the inodes in the file system, releasing their dquots.
- *
- * Note that the mount structure gets modified to indicate that quotas are off
- * AFTER this, in the case of quotaoff.
- */
-void
-xfs_qm_dqrele_all_inodes(
- struct xfs_mount *mp,
- uint flags)
-{
- ASSERT(mp->m_quotainfo);
- xfs_inode_walk(mp, XFS_INODE_WALK_INEW_WAIT, xfs_dqrele_inode,
- &flags, XFS_ICI_NO_TAG);
-}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 060695d6d56a..c256104772cb 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -27,7 +27,7 @@
#include "xfs_quota.h"
#include "xfs_reflink.h"
#include "xfs_iomap.h"
-#include "xfs_sb.h"
+#include "xfs_ag.h"
#include "xfs_ag_resv.h"
/*
@@ -144,7 +144,7 @@ xfs_reflink_find_shared(
if (error)
return error;
- cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
+ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agbp->b_pag);
error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
find_end_of_shared);
@@ -755,16 +755,19 @@ int
xfs_reflink_recover_cow(
struct xfs_mount *mp)
{
+ struct xfs_perag *pag;
xfs_agnumber_t agno;
int error = 0;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
- for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- error = xfs_refcount_recover_cow_leftovers(mp, agno);
- if (error)
+ for_each_perag(mp, agno, pag) {
+ error = xfs_refcount_recover_cow_leftovers(mp, pag);
+ if (error) {
+ xfs_perag_put(pag);
break;
+ }
}
return error;
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 4e7be6b4ca8e..699066fb9052 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -923,16 +923,41 @@ xfs_growfs_rt(
uint8_t *rsum_cache; /* old summary cache */
sbp = &mp->m_sb;
- /*
- * Initial error checking.
- */
+
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL ||
- (nrblocks = in->newblocks) <= sbp->sb_rblocks ||
- (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize)))
+
+ /* Needs to have been mounted with an rt device. */
+ if (!XFS_IS_REALTIME_MOUNT(mp))
+ return -EINVAL;
+ /*
+ * Mount should fail if the rt bitmap/summary files don't load, but
+ * we'll check anyway.
+ */
+ if (!mp->m_rbmip || !mp->m_rsumip)
+ return -EINVAL;
+
+ /* Shrink not supported. */
+ if (in->newblocks <= sbp->sb_rblocks)
+ return -EINVAL;
+
+ /* Can only change rt extent size when adding rt volume. */
+ if (sbp->sb_rblocks > 0 && in->extsize != sbp->sb_rextsize)
+ return -EINVAL;
+
+ /* Range check the extent size. */
+ if (XFS_FSB_TO_B(mp, in->extsize) > XFS_MAX_RTEXTSIZE ||
+ XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE)
return -EINVAL;
- if ((error = xfs_sb_validate_fsb_count(sbp, nrblocks)))
+
+ /* Unsupported realtime features. */
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb) ||
+ xfs_sb_version_hasreflink(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ nrblocks = in->newblocks;
+ error = xfs_sb_validate_fsb_count(sbp, nrblocks);
+ if (error)
return error;
/*
* Read in the last block of the device, make sure it exists.
@@ -996,7 +1021,8 @@ xfs_growfs_rt(
((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0);
bmbno < nrbmblocks;
bmbno++) {
- xfs_trans_t *tp;
+ struct xfs_trans *tp;
+ xfs_rfsblock_t nrblocks_step;
*nmp = *mp;
nsbp = &nmp->m_sb;
@@ -1005,10 +1031,9 @@ xfs_growfs_rt(
*/
nsbp->sb_rextsize = in->extsize;
nsbp->sb_rbmblocks = bmbno + 1;
- nsbp->sb_rblocks =
- XFS_RTMIN(nrblocks,
- nsbp->sb_rbmblocks * NBBY *
- nsbp->sb_blocksize * nsbp->sb_rextsize);
+ nrblocks_step = (bmbno + 1) * NBBY * nsbp->sb_blocksize *
+ nsbp->sb_rextsize;
+ nsbp->sb_rblocks = min(nrblocks, nrblocks_step);
nsbp->sb_rextents = nsbp->sb_rblocks;
do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
ASSERT(nsbp->sb_rextents != 0);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index a2dab05332ac..2c9e26a44546 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -36,6 +36,7 @@
#include "xfs_bmap_item.h"
#include "xfs_reflink.h"
#include "xfs_pwork.h"
+#include "xfs_ag.h"
#include <linux/magic.h>
#include <linux/fs_context.h>
@@ -339,13 +340,6 @@ xfs_blkdev_put(
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
-void
-xfs_blkdev_issue_flush(
- xfs_buftarg_t *buftarg)
-{
- blkdev_issue_flush(buftarg->bt_bdev);
-}
-
STATIC void
xfs_close_devices(
struct xfs_mount *mp)
@@ -667,7 +661,7 @@ xfs_fs_destroy_inode(
* reclaim path handles this more efficiently than we can here, so
* simply let background reclaim tear down all inodes.
*/
- xfs_inode_set_reclaim_tag(ip);
+ xfs_inode_mark_reclaimable(ip);
}
static void
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index d2b40dc60dfc..167d23f92ffe 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -87,7 +87,6 @@ struct xfs_buftarg;
struct block_device;
extern void xfs_flush_inodes(struct xfs_mount *mp);
-extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
extern xfs_agnumber_t xfs_set_inode_alloc(struct xfs_mount *,
xfs_agnumber_t agcount);
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 99fbec32c10a..1525636f4065 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -21,6 +21,7 @@
#include "xfs_trans_space.h"
#include "xfs_trace.h"
#include "xfs_trans.h"
+#include "xfs_ialloc.h"
/* ----- Kernel only functions below ----- */
int
@@ -161,6 +162,7 @@ xfs_symlink(
struct xfs_dquot *gdqp = NULL;
struct xfs_dquot *pdqp = NULL;
uint resblks;
+ xfs_ino_t ino;
*ipp = NULL;
@@ -223,8 +225,11 @@ xfs_symlink(
/*
* Allocate an inode for the symlink.
*/
- error = xfs_dir_ialloc(mnt_userns, &tp, dp, S_IFLNK | (mode & ~S_IFMT),
- 1, 0, prid, false, &ip);
+ error = xfs_dialloc(&tp, dp->i_ino, S_IFLNK, &ino);
+ if (!error)
+ error = xfs_init_new_inode(mnt_userns, tp, dp, ino,
+ S_IFLNK | (mode & ~S_IFMT), 1, 0, prid,
+ false, &ip);
if (error)
goto out_trans_cancel;
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 9b8d703dc9fd..7e01e00550ac 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -30,6 +30,8 @@
#include "xfs_fsmap.h"
#include "xfs_btree_staging.h"
#include "xfs_icache.h"
+#include "xfs_ag.h"
+#include "xfs_ag_resv.h"
/*
* We include this last to have the helpers above available for the trace
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 808ae337b222..19260291ff8b 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -24,6 +24,7 @@ struct xlog_ticket;
struct xlog_recover;
struct xlog_recover_item;
struct xlog_rec_header;
+struct xlog_in_core;
struct xfs_buf_log_format;
struct xfs_inode_log_format;
struct xfs_bmbt_irec;
@@ -37,7 +38,7 @@ struct xfs_trans_res;
struct xfs_inobt_rec_incore;
union xfs_btree_ptr;
struct xfs_dqtrx;
-struct xfs_eofblocks;
+struct xfs_icwalk;
#define XFS_ATTR_FILTER_FLAGS \
{ XFS_ATTR_ROOT, "ROOT" }, \
@@ -153,10 +154,8 @@ DEFINE_EVENT(xfs_perag_class, name, \
DEFINE_PERAG_REF_EVENT(xfs_perag_get);
DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_put);
-DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
-DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
-DEFINE_PERAG_REF_EVENT(xfs_perag_set_blockgc);
-DEFINE_PERAG_REF_EVENT(xfs_perag_clear_blockgc);
+DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag);
+DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag);
DECLARE_EVENT_CLASS(xfs_ag_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno),
@@ -632,8 +631,8 @@ DEFINE_EVENT(xfs_inode_class, name, \
TP_PROTO(struct xfs_inode *ip), \
TP_ARGS(ip))
DEFINE_INODE_EVENT(xfs_iget_skip);
-DEFINE_INODE_EVENT(xfs_iget_reclaim);
-DEFINE_INODE_EVENT(xfs_iget_reclaim_fail);
+DEFINE_INODE_EVENT(xfs_iget_recycle);
+DEFINE_INODE_EVENT(xfs_iget_recycle_fail);
DEFINE_INODE_EVENT(xfs_iget_hit);
DEFINE_INODE_EVENT(xfs_iget_miss);
@@ -1914,7 +1913,6 @@ DEFINE_ATTR_EVENT(xfs_attr_leaf_add);
DEFINE_ATTR_EVENT(xfs_attr_leaf_add_old);
DEFINE_ATTR_EVENT(xfs_attr_leaf_add_new);
DEFINE_ATTR_EVENT(xfs_attr_leaf_add_work);
-DEFINE_ATTR_EVENT(xfs_attr_leaf_addname);
DEFINE_ATTR_EVENT(xfs_attr_leaf_create);
DEFINE_ATTR_EVENT(xfs_attr_leaf_compact);
DEFINE_ATTR_EVENT(xfs_attr_leaf_get);
@@ -1944,7 +1942,6 @@ DEFINE_ATTR_EVENT(xfs_attr_refillstate);
DEFINE_ATTR_EVENT(xfs_attr_rmtval_get);
DEFINE_ATTR_EVENT(xfs_attr_rmtval_set);
-DEFINE_ATTR_EVENT(xfs_attr_rmtval_remove);
#define DEFINE_DA_EVENT(name) \
DEFINE_EVENT(xfs_da_class, name, \
@@ -3730,7 +3727,7 @@ TRACE_EVENT(xfs_btree_commit_afakeroot,
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->btnum = cur->bc_btnum;
- __entry->agno = cur->bc_ag.agno;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agbno = cur->bc_ag.afake->af_root;
__entry->levels = cur->bc_ag.afake->af_levels;
__entry->blocks = cur->bc_ag.afake->af_blocks;
@@ -3845,7 +3842,7 @@ TRACE_EVENT(xfs_btree_bload_block,
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb);
__entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsb);
} else {
- __entry->agno = cur->bc_ag.agno;
+ __entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agbno = be32_to_cpu(ptr->s);
}
__entry->nr_records = nr_records;
@@ -3887,10 +3884,10 @@ DEFINE_EVENT(xfs_timestamp_range_class, name, \
DEFINE_TIMESTAMP_RANGE_EVENT(xfs_inode_timestamp_range);
DEFINE_TIMESTAMP_RANGE_EVENT(xfs_quota_expiry_range);
-DECLARE_EVENT_CLASS(xfs_eofblocks_class,
- TP_PROTO(struct xfs_mount *mp, struct xfs_eofblocks *eofb,
+DECLARE_EVENT_CLASS(xfs_icwalk_class,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_icwalk *icw,
unsigned long caller_ip),
- TP_ARGS(mp, eofb, caller_ip),
+ TP_ARGS(mp, icw, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(__u32, flags)
@@ -3898,35 +3895,100 @@ DECLARE_EVENT_CLASS(xfs_eofblocks_class,
__field(uint32_t, gid)
__field(prid_t, prid)
__field(__u64, min_file_size)
+ __field(long, scan_limit)
__field(unsigned long, caller_ip)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
- __entry->flags = eofb ? eofb->eof_flags : 0;
- __entry->uid = eofb ? from_kuid(mp->m_super->s_user_ns,
- eofb->eof_uid) : 0;
- __entry->gid = eofb ? from_kgid(mp->m_super->s_user_ns,
- eofb->eof_gid) : 0;
- __entry->prid = eofb ? eofb->eof_prid : 0;
- __entry->min_file_size = eofb ? eofb->eof_min_file_size : 0;
+ __entry->flags = icw ? icw->icw_flags : 0;
+ __entry->uid = icw ? from_kuid(mp->m_super->s_user_ns,
+ icw->icw_uid) : 0;
+ __entry->gid = icw ? from_kgid(mp->m_super->s_user_ns,
+ icw->icw_gid) : 0;
+ __entry->prid = icw ? icw->icw_prid : 0;
+ __entry->min_file_size = icw ? icw->icw_min_file_size : 0;
+ __entry->scan_limit = icw ? icw->icw_scan_limit : 0;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d flags 0x%x uid %u gid %u prid %u minsize %llu caller %pS",
+ TP_printk("dev %d:%d flags 0x%x uid %u gid %u prid %u minsize %llu scan_limit %ld caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->flags,
__entry->uid,
__entry->gid,
__entry->prid,
__entry->min_file_size,
+ __entry->scan_limit,
(char *)__entry->caller_ip)
);
-#define DEFINE_EOFBLOCKS_EVENT(name) \
-DEFINE_EVENT(xfs_eofblocks_class, name, \
- TP_PROTO(struct xfs_mount *mp, struct xfs_eofblocks *eofb, \
+#define DEFINE_ICWALK_EVENT(name) \
+DEFINE_EVENT(xfs_icwalk_class, name, \
+ TP_PROTO(struct xfs_mount *mp, struct xfs_icwalk *icw, \
unsigned long caller_ip), \
- TP_ARGS(mp, eofb, caller_ip))
-DEFINE_EOFBLOCKS_EVENT(xfs_ioc_free_eofblocks);
-DEFINE_EOFBLOCKS_EVENT(xfs_blockgc_free_space);
+ TP_ARGS(mp, icw, caller_ip))
+DEFINE_ICWALK_EVENT(xfs_ioc_free_eofblocks);
+DEFINE_ICWALK_EVENT(xfs_blockgc_free_space);
+
+TRACE_DEFINE_ENUM(XLOG_STATE_ACTIVE);
+TRACE_DEFINE_ENUM(XLOG_STATE_WANT_SYNC);
+TRACE_DEFINE_ENUM(XLOG_STATE_SYNCING);
+TRACE_DEFINE_ENUM(XLOG_STATE_DONE_SYNC);
+TRACE_DEFINE_ENUM(XLOG_STATE_CALLBACK);
+TRACE_DEFINE_ENUM(XLOG_STATE_DIRTY);
+TRACE_DEFINE_ENUM(XLOG_STATE_IOERROR);
+
+DECLARE_EVENT_CLASS(xlog_iclog_class,
+ TP_PROTO(struct xlog_in_core *iclog, unsigned long caller_ip),
+ TP_ARGS(iclog, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(uint32_t, state)
+ __field(int32_t, refcount)
+ __field(uint32_t, offset)
+ __field(uint32_t, flags)
+ __field(unsigned long long, lsn)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = iclog->ic_log->l_mp->m_super->s_dev;
+ __entry->state = iclog->ic_state;
+ __entry->refcount = atomic_read(&iclog->ic_refcnt);
+ __entry->offset = iclog->ic_offset;
+ __entry->flags = iclog->ic_flags;
+ __entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn);
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx flags %s caller %pS",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->state, XLOG_STATE_STRINGS),
+ __entry->refcount,
+ __entry->offset,
+ __entry->lsn,
+ __print_flags(__entry->flags, "|", XLOG_ICL_STRINGS),
+ (char *)__entry->caller_ip)
+
+);
+
+#define DEFINE_ICLOG_EVENT(name) \
+DEFINE_EVENT(xlog_iclog_class, name, \
+ TP_PROTO(struct xlog_in_core *iclog, unsigned long caller_ip), \
+ TP_ARGS(iclog, caller_ip))
+
+DEFINE_ICLOG_EVENT(xlog_iclog_activate);
+DEFINE_ICLOG_EVENT(xlog_iclog_clean);
+DEFINE_ICLOG_EVENT(xlog_iclog_callback);
+DEFINE_ICLOG_EVENT(xlog_iclog_callbacks_start);
+DEFINE_ICLOG_EVENT(xlog_iclog_callbacks_done);
+DEFINE_ICLOG_EVENT(xlog_iclog_force);
+DEFINE_ICLOG_EVENT(xlog_iclog_force_lsn);
+DEFINE_ICLOG_EVENT(xlog_iclog_get_space);
+DEFINE_ICLOG_EVENT(xlog_iclog_release);
+DEFINE_ICLOG_EVENT(xlog_iclog_switch);
+DEFINE_ICLOG_EVENT(xlog_iclog_sync);
+DEFINE_ICLOG_EVENT(xlog_iclog_syncing);
+DEFINE_ICLOG_EVENT(xlog_iclog_sync_done);
+DEFINE_ICLOG_EVENT(xlog_iclog_want_sync);
+DEFINE_ICLOG_EVENT(xlog_iclog_wait_on);
+DEFINE_ICLOG_EVENT(xlog_iclog_write);
#endif /* _TRACE_XFS_H */
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 586f2992b789..87bffd12c20c 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -839,7 +839,7 @@ __xfs_trans_commit(
bool regrant)
{
struct xfs_mount *mp = tp->t_mountp;
- xfs_lsn_t commit_lsn = -1;
+ xfs_csn_t commit_seq = 0;
int error = 0;
int sync = tp->t_flags & XFS_TRANS_SYNC;
@@ -881,7 +881,7 @@ __xfs_trans_commit(
xfs_trans_apply_sb_deltas(tp);
xfs_trans_apply_dquot_deltas(tp);
- xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
+ xlog_cil_commit(mp->m_log, tp, &commit_seq, regrant);
xfs_trans_free(tp);
@@ -890,7 +890,7 @@ __xfs_trans_commit(
* log out now and wait for it.
*/
if (sync) {
- error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
+ error = xfs_log_force_seq(mp, commit_seq, XFS_LOG_SYNC, NULL);
XFS_STATS_INC(mp, xs_trans_sync);
} else {
XFS_STATS_INC(mp, xs_trans_async);
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index ee42d98d9011..50da47f23a07 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -43,7 +43,7 @@ struct xfs_log_item {
struct list_head li_cil; /* CIL pointers */
struct xfs_log_vec *li_lv; /* active log vector */
struct xfs_log_vec *li_lv_shadow; /* standby vector */
- xfs_lsn_t li_seq; /* CIL commit seq */
+ xfs_csn_t li_seq; /* CIL commit seq */
};
/*
@@ -69,7 +69,7 @@ struct xfs_item_ops {
void (*iop_pin)(struct xfs_log_item *);
void (*iop_unpin)(struct xfs_log_item *, int remove);
uint (*iop_push)(struct xfs_log_item *, struct list_head *);
- void (*iop_committing)(struct xfs_log_item *, xfs_lsn_t commit_lsn);
+ void (*iop_committing)(struct xfs_log_item *lip, xfs_csn_t seq);
void (*iop_release)(struct xfs_log_item *);
xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t);
int (*iop_recover)(struct xfs_log_item *lip,
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index dbf03635869c..70055d486bf7 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -705,9 +705,6 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
return 0;
bio = bio_alloc(GFP_NOFS, nr_pages);
- if (!bio)
- return -ENOMEM;
-
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = zi->i_zsector;
bio->bi_write_hint = iocb->ki_hint;
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c6bd4f9a80ba..13d93371790e 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -592,6 +592,9 @@ struct acpi_pci_root {
bool acpi_dma_supported(const struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
+int acpi_iommu_fwspec_init(struct device *dev, u32 id,
+ struct fwnode_handle *fwnode,
+ const struct iommu_ops *ops);
int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
u64 *size);
int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
@@ -704,11 +707,6 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
* @hrv: Hardware Revision of the device, pass -1 to not check _HRV
*
* The caller is responsible for invoking acpi_dev_put() on the returned device.
- *
- * FIXME: Due to above requirement there is a window that may invalidate @adev
- * and next iteration will use a dangling pointer, e.g. in the case of a
- * hotplug event. That said, the caller should ensure that this will never
- * happen.
*/
#define for_each_acpi_dev_match(adev, hid, uid, hrv) \
for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv); \
@@ -722,7 +720,8 @@ static inline struct acpi_device *acpi_dev_get(struct acpi_device *adev)
static inline void acpi_dev_put(struct acpi_device *adev)
{
- put_device(&adev->dev);
+ if (adev)
+ put_device(&adev->dev);
}
struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index bafc51f483c4..edb0e2a602a8 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -18,7 +18,8 @@
#endif
#ifndef __ASSEMBLY__
-#include <linux/kernel.h>
+#include <linux/panic.h>
+#include <linux/printk.h>
#ifdef CONFIG_BUG
diff --git a/include/asm-generic/logic_io.h b/include/asm-generic/logic_io.h
new file mode 100644
index 000000000000..a53116b8c57e
--- /dev/null
+++ b/include/asm-generic/logic_io.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: johannes@sipsolutions.net
+ */
+#ifndef _LOGIC_IO_H
+#define _LOGIC_IO_H
+#include <linux/types.h>
+
+/* include this file into asm/io.h */
+
+#ifdef CONFIG_INDIRECT_IOMEM
+
+#ifdef CONFIG_INDIRECT_IOMEM_FALLBACK
+/*
+ * If you want emulated IO memory to fall back to 'normal' IO memory
+ * if a region wasn't registered as emulated, then you need to have
+ * all of the real_* functions implemented.
+ */
+#if !defined(real_ioremap) || !defined(real_iounmap) || \
+ !defined(real_raw_readb) || !defined(real_raw_writeb) || \
+ !defined(real_raw_readw) || !defined(real_raw_writew) || \
+ !defined(real_raw_readl) || !defined(real_raw_writel) || \
+ (defined(CONFIG_64BIT) && \
+ (!defined(real_raw_readq) || !defined(real_raw_writeq))) || \
+ !defined(real_memset_io) || \
+ !defined(real_memcpy_fromio) || \
+ !defined(real_memcpy_toio)
+#error "Must provide fallbacks for real IO memory access"
+#endif /* defined ... */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
+
+#define ioremap ioremap
+void __iomem *ioremap(phys_addr_t offset, size_t size);
+
+#define iounmap iounmap
+void iounmap(void __iomem *addr);
+
+#define __raw_readb __raw_readb
+u8 __raw_readb(const volatile void __iomem *addr);
+
+#define __raw_readw __raw_readw
+u16 __raw_readw(const volatile void __iomem *addr);
+
+#define __raw_readl __raw_readl
+u32 __raw_readl(const volatile void __iomem *addr);
+
+#ifdef CONFIG_64BIT
+#define __raw_readq __raw_readq
+u64 __raw_readq(const volatile void __iomem *addr);
+#endif /* CONFIG_64BIT */
+
+#define __raw_writeb __raw_writeb
+void __raw_writeb(u8 value, volatile void __iomem *addr);
+
+#define __raw_writew __raw_writew
+void __raw_writew(u16 value, volatile void __iomem *addr);
+
+#define __raw_writel __raw_writel
+void __raw_writel(u32 value, volatile void __iomem *addr);
+
+#ifdef CONFIG_64BIT
+#define __raw_writeq __raw_writeq
+void __raw_writeq(u64 value, volatile void __iomem *addr);
+#endif /* CONFIG_64BIT */
+
+#define memset_io memset_io
+void memset_io(volatile void __iomem *addr, int value, size_t size);
+
+#define memcpy_fromio memcpy_fromio
+void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size);
+
+#define memcpy_toio memcpy_toio
+void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size);
+
+#endif /* CONFIG_INDIRECT_IOMEM */
+#endif /* _LOGIC_IO_H */
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 2f6b1befb129..03b7dae47dd4 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -41,7 +41,7 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
#define __p4d(x) ((p4d_t) { __pgd(x) })
#define pgd_page(pgd) (p4d_page((p4d_t){ pgd }))
-#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd }))
+#define pgd_page_vaddr(pgd) ((unsigned long)(p4d_pgtable((p4d_t){ pgd })))
/*
* allocating and freeing a p4d is trivial: the 1-entry p4d is
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index 3e13acd019ae..10789cf51d16 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -51,7 +51,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
#define __pmd(x) ((pmd_t) { __pud(x) } )
#define pud_page(pud) (pmd_page((pmd_t){ pud }))
-#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
+#define pud_pgtable(pud) ((pmd_t *)(pmd_page_vaddr((pmd_t){ pud })))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index a9d751fbda9e..eb70c6d7ceff 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -49,7 +49,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
#define __pud(x) ((pud_t) { __p4d(x) })
#define p4d_page(p4d) (pud_page((pud_t){ p4d }))
-#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d }))
+#define p4d_pgtable(p4d) ((pud_t *)(pud_pgtable((pud_t){ p4d })))
/*
* allocating and freeing a pud is trivial: the 1-entry pud is
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 4973328f3c6e..a0b2f270dddc 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -19,7 +19,7 @@ __get_user_fn(size_t size, const void __user *from, void *to)
switch (size) {
case 1:
- *(u8 *)to = get_unaligned((u8 __force *)from);
+ *(u8 *)to = *((u8 __force *)from);
return 0;
case 2:
*(u16 *)to = get_unaligned((u16 __force *)from);
@@ -45,7 +45,7 @@ __put_user_fn(size_t size, void __user *to, void *from)
switch (size) {
case 1:
- put_unaligned(*(u8 *)from, (u8 __force *)to);
+ *(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
put_unaligned(*(u16 *)from, (u16 __force *)to);
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 374c940e9be1..1c4242416c9f 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -6,31 +6,124 @@
* This is the most generic implementation of unaligned accesses
* and should work almost anywhere.
*/
+#include <linux/unaligned/packed_struct.h>
#include <asm/byteorder.h>
-/* Set by the arch if it can handle unaligned accesses in hardware. */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/access_ok.h>
-#endif
-
-#if defined(__LITTLE_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x = (val); \
+} while (0)
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+ return le16_to_cpu(__get_unaligned_t(__le16, p));
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+ return le32_to_cpu(__get_unaligned_t(__le32, p));
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+ return le64_to_cpu(__get_unaligned_t(__le64, p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ __put_unaligned_t(__le16, cpu_to_le16(val), p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+ __put_unaligned_t(__le32, cpu_to_le32(val), p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+ __put_unaligned_t(__le64, cpu_to_le64(val), p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return be16_to_cpu(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return be32_to_cpu(__get_unaligned_t(__be32, p));
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+ return be64_to_cpu(__get_unaligned_t(__be64, p));
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ __put_unaligned_t(__be16, cpu_to_be16(val), p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ __put_unaligned_t(__be32, cpu_to_be32(val), p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+ __put_unaligned_t(__be64, cpu_to_be64(val), p);
+}
+
+static inline u32 __get_unaligned_be24(const u8 *p)
+{
+ return p[0] << 16 | p[1] << 8 | p[2];
+}
+
+static inline u32 get_unaligned_be24(const void *p)
+{
+ return __get_unaligned_be24(p);
+}
+
+static inline u32 __get_unaligned_le24(const u8 *p)
+{
+ return p[0] | p[1] << 8 | p[2] << 16;
+}
+
+static inline u32 get_unaligned_le24(const void *p)
+{
+ return __get_unaligned_le24(p);
+}
+
+static inline void __put_unaligned_be24(const u32 val, u8 *p)
+{
+ *p++ = val >> 16;
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void put_unaligned_be24(const u32 val, void *p)
+{
+ __put_unaligned_be24(val, p);
+}
+
+static inline void __put_unaligned_le24(const u32 val, u8 *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+ *p++ = val >> 16;
+}
+
+static inline void put_unaligned_le24(const u32 val, void *p)
+{
+ __put_unaligned_le24(val, p);
+}
#endif /* __ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 17325416e2de..62669b36a772 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -586,6 +586,7 @@
NOINSTR_TEXT \
*(.text..refcount) \
*(.ref.text) \
+ *(.text.asan.* .text.tsan.*) \
TEXT_CFI_JT \
MEM_KEEP(init.text*) \
MEM_KEEP(exit.text*) \
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index c837d0775474..7af08174a721 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -81,12 +81,7 @@ static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
struct page *page;
page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
- /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
- * PageSlab cannot be optimised away per se due to
- * use of volatile pointer.
- */
- if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
- flush_dcache_page(page);
+ flush_dcache_page(page);
}
if (more && walk->offset >= walk->sg->offset + walk->sg->length)
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index 10100a4bbe2a..afb27cb6a7bd 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -68,6 +68,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
unsigned long arg);
#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n)
#define DRM_MAJOR 226
/**
diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h
index 3acebe937bfc..3d00c98b9654 100644
--- a/include/dt-bindings/clock/mt8173-clk.h
+++ b/include/dt-bindings/clock/mt8173-clk.h
@@ -186,7 +186,6 @@
#define CLK_INFRA_PMICWRAP 11
#define CLK_INFRA_CLK_13M 12
#define CLK_INFRA_CA53SEL 13
-#define CLK_INFRA_CA57SEL 14 /* Deprecated. Don't use it. */
#define CLK_INFRA_CA72SEL 14
#define CLK_INFRA_NR_CLK 15
diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h
index 1d8986563fc5..0728ad07ff7a 100644
--- a/include/dt-bindings/clock/r9a07g044-cpg.h
+++ b/include/dt-bindings/clock/r9a07g044-cpg.h
@@ -32,58 +32,188 @@
#define R9A07G044_OSCCLK 21
/* R9A07G044 Module Clocks */
-#define R9A07G044_CLK_GIC600 0
-#define R9A07G044_CLK_IA55 1
-#define R9A07G044_CLK_SYC 2
-#define R9A07G044_CLK_DMAC 3
-#define R9A07G044_CLK_SYSC 4
-#define R9A07G044_CLK_MTU 5
-#define R9A07G044_CLK_GPT 6
-#define R9A07G044_CLK_ETH0 7
-#define R9A07G044_CLK_ETH1 8
-#define R9A07G044_CLK_I2C0 9
-#define R9A07G044_CLK_I2C1 10
-#define R9A07G044_CLK_I2C2 11
-#define R9A07G044_CLK_I2C3 12
-#define R9A07G044_CLK_SCIF0 13
-#define R9A07G044_CLK_SCIF1 14
-#define R9A07G044_CLK_SCIF2 15
-#define R9A07G044_CLK_SCIF3 16
-#define R9A07G044_CLK_SCIF4 17
-#define R9A07G044_CLK_SCI0 18
-#define R9A07G044_CLK_SCI1 19
-#define R9A07G044_CLK_GPIO 20
-#define R9A07G044_CLK_SDHI0 21
-#define R9A07G044_CLK_SDHI1 22
-#define R9A07G044_CLK_USB0 23
-#define R9A07G044_CLK_USB1 24
-#define R9A07G044_CLK_CANFD 25
-#define R9A07G044_CLK_SSI0 26
-#define R9A07G044_CLK_SSI1 27
-#define R9A07G044_CLK_SSI2 28
-#define R9A07G044_CLK_SSI3 29
-#define R9A07G044_CLK_MHU 30
-#define R9A07G044_CLK_OSTM0 31
-#define R9A07G044_CLK_OSTM1 32
-#define R9A07G044_CLK_OSTM2 33
-#define R9A07G044_CLK_WDT0 34
-#define R9A07G044_CLK_WDT1 35
-#define R9A07G044_CLK_WDT2 36
-#define R9A07G044_CLK_WDT_PON 37
-#define R9A07G044_CLK_GPU 38
-#define R9A07G044_CLK_ISU 39
-#define R9A07G044_CLK_H264 40
-#define R9A07G044_CLK_CRU 41
-#define R9A07G044_CLK_MIPI_DSI 42
-#define R9A07G044_CLK_LCDC 43
-#define R9A07G044_CLK_SRC 44
-#define R9A07G044_CLK_RSPI0 45
-#define R9A07G044_CLK_RSPI1 46
-#define R9A07G044_CLK_RSPI2 47
-#define R9A07G044_CLK_ADC 48
-#define R9A07G044_CLK_TSU_PCLK 49
-#define R9A07G044_CLK_SPI 50
-#define R9A07G044_CLK_MIPI_DSI_V 51
-#define R9A07G044_CLK_MIPI_DSI_PIN 52
+#define R9A07G044_CA55_SCLK 0
+#define R9A07G044_CA55_PCLK 1
+#define R9A07G044_CA55_ATCLK 2
+#define R9A07G044_CA55_GICCLK 3
+#define R9A07G044_CA55_PERICLK 4
+#define R9A07G044_CA55_ACLK 5
+#define R9A07G044_CA55_TSCLK 6
+#define R9A07G044_GIC600_GICCLK 7
+#define R9A07G044_IA55_CLK 8
+#define R9A07G044_IA55_PCLK 9
+#define R9A07G044_MHU_PCLK 10
+#define R9A07G044_SYC_CNT_CLK 11
+#define R9A07G044_DMAC_ACLK 12
+#define R9A07G044_DMAC_PCLK 13
+#define R9A07G044_OSTM0_PCLK 14
+#define R9A07G044_OSTM1_PCLK 15
+#define R9A07G044_OSTM2_PCLK 16
+#define R9A07G044_MTU_X_MCK_MTU3 17
+#define R9A07G044_POE3_CLKM_POE 18
+#define R9A07G044_GPT_PCLK 19
+#define R9A07G044_POEG_A_CLKP 20
+#define R9A07G044_POEG_B_CLKP 21
+#define R9A07G044_POEG_C_CLKP 22
+#define R9A07G044_POEG_D_CLKP 23
+#define R9A07G044_WDT0_PCLK 24
+#define R9A07G044_WDT0_CLK 25
+#define R9A07G044_WDT1_PCLK 26
+#define R9A07G044_WDT1_CLK 27
+#define R9A07G044_WDT2_PCLK 28
+#define R9A07G044_WDT2_CLK 29
+#define R9A07G044_SPI_CLK2 30
+#define R9A07G044_SPI_CLK 31
+#define R9A07G044_SDHI0_IMCLK 32
+#define R9A07G044_SDHI0_IMCLK2 33
+#define R9A07G044_SDHI0_CLK_HS 34
+#define R9A07G044_SDHI0_ACLK 35
+#define R9A07G044_SDHI1_IMCLK 36
+#define R9A07G044_SDHI1_IMCLK2 37
+#define R9A07G044_SDHI1_CLK_HS 38
+#define R9A07G044_SDHI1_ACLK 39
+#define R9A07G044_GPU_CLK 40
+#define R9A07G044_GPU_AXI_CLK 41
+#define R9A07G044_GPU_ACE_CLK 42
+#define R9A07G044_ISU_ACLK 43
+#define R9A07G044_ISU_PCLK 44
+#define R9A07G044_H264_CLK_A 45
+#define R9A07G044_H264_CLK_P 46
+#define R9A07G044_CRU_SYSCLK 47
+#define R9A07G044_CRU_VCLK 48
+#define R9A07G044_CRU_PCLK 49
+#define R9A07G044_CRU_ACLK 50
+#define R9A07G044_MIPI_DSI_PLLCLK 51
+#define R9A07G044_MIPI_DSI_SYSCLK 52
+#define R9A07G044_MIPI_DSI_ACLK 53
+#define R9A07G044_MIPI_DSI_PCLK 54
+#define R9A07G044_MIPI_DSI_VCLK 55
+#define R9A07G044_MIPI_DSI_LPCLK 56
+#define R9A07G044_LCDC_CLK_A 57
+#define R9A07G044_LCDC_CLK_P 58
+#define R9A07G044_LCDC_CLK_D 59
+#define R9A07G044_SSI0_PCLK2 60
+#define R9A07G044_SSI0_PCLK_SFR 61
+#define R9A07G044_SSI1_PCLK2 62
+#define R9A07G044_SSI1_PCLK_SFR 63
+#define R9A07G044_SSI2_PCLK2 64
+#define R9A07G044_SSI2_PCLK_SFR 65
+#define R9A07G044_SSI3_PCLK2 66
+#define R9A07G044_SSI3_PCLK_SFR 67
+#define R9A07G044_SRC_CLKP 68
+#define R9A07G044_USB_U2H0_HCLK 69
+#define R9A07G044_USB_U2H1_HCLK 70
+#define R9A07G044_USB_U2P_EXR_CPUCLK 71
+#define R9A07G044_USB_PCLK 72
+#define R9A07G044_ETH0_CLK_AXI 73
+#define R9A07G044_ETH0_CLK_CHI 74
+#define R9A07G044_ETH1_CLK_AXI 75
+#define R9A07G044_ETH1_CLK_CHI 76
+#define R9A07G044_I2C0_PCLK 77
+#define R9A07G044_I2C1_PCLK 78
+#define R9A07G044_I2C2_PCLK 79
+#define R9A07G044_I2C3_PCLK 80
+#define R9A07G044_SCIF0_CLK_PCK 81
+#define R9A07G044_SCIF1_CLK_PCK 82
+#define R9A07G044_SCIF2_CLK_PCK 83
+#define R9A07G044_SCIF3_CLK_PCK 84
+#define R9A07G044_SCIF4_CLK_PCK 85
+#define R9A07G044_SCI0_CLKP 86
+#define R9A07G044_SCI1_CLKP 87
+#define R9A07G044_IRDA_CLKP 88
+#define R9A07G044_RSPI0_CLKB 89
+#define R9A07G044_RSPI1_CLKB 90
+#define R9A07G044_RSPI2_CLKB 91
+#define R9A07G044_CANFD_PCLK 92
+#define R9A07G044_GPIO_HCLK 93
+#define R9A07G044_ADC_ADCLK 94
+#define R9A07G044_ADC_PCLK 95
+#define R9A07G044_TSU_PCLK 96
+
+/* R9A07G044 Resets */
+#define R9A07G044_CA55_RST_1_0 0
+#define R9A07G044_CA55_RST_1_1 1
+#define R9A07G044_CA55_RST_3_0 2
+#define R9A07G044_CA55_RST_3_1 3
+#define R9A07G044_CA55_RST_4 4
+#define R9A07G044_CA55_RST_5 5
+#define R9A07G044_CA55_RST_6 6
+#define R9A07G044_CA55_RST_7 7
+#define R9A07G044_CA55_RST_8 8
+#define R9A07G044_CA55_RST_9 9
+#define R9A07G044_CA55_RST_10 10
+#define R9A07G044_CA55_RST_11 11
+#define R9A07G044_CA55_RST_12 12
+#define R9A07G044_GIC600_GICRESET_N 13
+#define R9A07G044_GIC600_DBG_GICRESET_N 14
+#define R9A07G044_IA55_RESETN 15
+#define R9A07G044_MHU_RESETN 16
+#define R9A07G044_DMAC_ARESETN 17
+#define R9A07G044_DMAC_RST_ASYNC 18
+#define R9A07G044_SYC_RESETN 19
+#define R9A07G044_OSTM0_PRESETZ 20
+#define R9A07G044_OSTM1_PRESETZ 21
+#define R9A07G044_OSTM2_PRESETZ 22
+#define R9A07G044_MTU_X_PRESET_MTU3 23
+#define R9A07G044_POE3_RST_M_REG 24
+#define R9A07G044_GPT_RST_C 25
+#define R9A07G044_POEG_A_RST 26
+#define R9A07G044_POEG_B_RST 27
+#define R9A07G044_POEG_C_RST 28
+#define R9A07G044_POEG_D_RST 29
+#define R9A07G044_WDT0_PRESETN 30
+#define R9A07G044_WDT1_PRESETN 31
+#define R9A07G044_WDT2_PRESETN 32
+#define R9A07G044_SPI_RST 33
+#define R9A07G044_SDHI0_IXRST 34
+#define R9A07G044_SDHI1_IXRST 35
+#define R9A07G044_GPU_RESETN 36
+#define R9A07G044_GPU_AXI_RESETN 37
+#define R9A07G044_GPU_ACE_RESETN 38
+#define R9A07G044_ISU_ARESETN 39
+#define R9A07G044_ISU_PRESETN 40
+#define R9A07G044_H264_X_RESET_VCP 41
+#define R9A07G044_H264_CP_PRESET_P 42
+#define R9A07G044_CRU_CMN_RSTB 43
+#define R9A07G044_CRU_PRESETN 44
+#define R9A07G044_CRU_ARESETN 45
+#define R9A07G044_MIPI_DSI_CMN_RSTB 46
+#define R9A07G044_MIPI_DSI_ARESET_N 47
+#define R9A07G044_MIPI_DSI_PRESET_N 48
+#define R9A07G044_LCDC_RESET_N 49
+#define R9A07G044_SSI0_RST_M2_REG 50
+#define R9A07G044_SSI1_RST_M2_REG 51
+#define R9A07G044_SSI2_RST_M2_REG 52
+#define R9A07G044_SSI3_RST_M2_REG 53
+#define R9A07G044_SRC_RST 54
+#define R9A07G044_USB_U2H0_HRESETN 55
+#define R9A07G044_USB_U2H1_HRESETN 56
+#define R9A07G044_USB_U2P_EXL_SYSRST 57
+#define R9A07G044_USB_PRESETN 58
+#define R9A07G044_ETH0_RST_HW_N 59
+#define R9A07G044_ETH1_RST_HW_N 60
+#define R9A07G044_I2C0_MRST 61
+#define R9A07G044_I2C1_MRST 62
+#define R9A07G044_I2C2_MRST 63
+#define R9A07G044_I2C3_MRST 64
+#define R9A07G044_SCIF0_RST_SYSTEM_N 65
+#define R9A07G044_SCIF1_RST_SYSTEM_N 66
+#define R9A07G044_SCIF2_RST_SYSTEM_N 67
+#define R9A07G044_SCIF3_RST_SYSTEM_N 68
+#define R9A07G044_SCIF4_RST_SYSTEM_N 69
+#define R9A07G044_SCI0_RST 70
+#define R9A07G044_SCI1_RST 71
+#define R9A07G044_IRDA_RST 72
+#define R9A07G044_RSPI0_RST 73
+#define R9A07G044_RSPI1_RST 74
+#define R9A07G044_RSPI2_RST 75
+#define R9A07G044_CANFD_RSTP_N 76
+#define R9A07G044_CANFD_RSTC_N 77
+#define R9A07G044_GPIO_RSTN 78
+#define R9A07G044_GPIO_PORT_RESETN 79
+#define R9A07G044_GPIO_SPARE_RESETN 80
+#define R9A07G044_ADC_PRESETN 81
+#define R9A07G044_ADC_ADRST_N 82
+#define R9A07G044_TSU_PRESETN 83
#endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */
diff --git a/include/dt-bindings/interconnect/qcom,sc7280.h b/include/dt-bindings/interconnect/qcom,sc7280.h
new file mode 100644
index 000000000000..21b000443999
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc7280.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm SC7280 interconnect IDs
+ *
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC7280_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC7280_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_1 2
+#define MASTER_A1NOC_CFG 3
+#define MASTER_PCIE_0 4
+#define MASTER_PCIE_1 5
+#define MASTER_SDCC_1 6
+#define MASTER_SDCC_2 7
+#define MASTER_SDCC_4 8
+#define MASTER_UFS_MEM 9
+#define MASTER_USB2 10
+#define MASTER_USB3_0 11
+#define SLAVE_A1NOC_SNOC 12
+#define SLAVE_ANOC_PCIE_GEM_NOC 13
+#define SLAVE_SERVICE_A1NOC 14
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_A2NOC_CFG 1
+#define MASTER_CNOC_A2NOC 2
+#define MASTER_CRYPTO 3
+#define MASTER_IPA 4
+#define MASTER_QDSS_ETR 5
+#define SLAVE_A2NOC_SNOC 6
+#define SLAVE_SERVICE_A2NOC 7
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define SLAVE_QUP_CORE_0 2
+#define SLAVE_QUP_CORE_1 3
+
+#define MASTER_CNOC3_CNOC2 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_AHB2PHY_SOUTH 2
+#define SLAVE_AHB2PHY_NORTH 3
+#define SLAVE_CAMERA_CFG 4
+#define SLAVE_CLK_CTL 5
+#define SLAVE_CDSP_CFG 6
+#define SLAVE_RBCPR_CX_CFG 7
+#define SLAVE_RBCPR_MX_CFG 8
+#define SLAVE_CRYPTO_0_CFG 9
+#define SLAVE_CX_RDPM 10
+#define SLAVE_DCC_CFG 11
+#define SLAVE_DISPLAY_CFG 12
+#define SLAVE_GFX3D_CFG 13
+#define SLAVE_HWKM 14
+#define SLAVE_IMEM_CFG 15
+#define SLAVE_IPA_CFG 16
+#define SLAVE_IPC_ROUTER_CFG 17
+#define SLAVE_LPASS 18
+#define SLAVE_CNOC_MSS 19
+#define SLAVE_MX_RDPM 20
+#define SLAVE_PCIE_0_CFG 21
+#define SLAVE_PCIE_1_CFG 22
+#define SLAVE_PDM 23
+#define SLAVE_PIMEM_CFG 24
+#define SLAVE_PKA_WRAPPER_CFG 25
+#define SLAVE_PMU_WRAPPER_CFG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_QSPI_0 28
+#define SLAVE_QUP_0 29
+#define SLAVE_QUP_1 30
+#define SLAVE_SDCC_1 31
+#define SLAVE_SDCC_2 32
+#define SLAVE_SDCC_4 33
+#define SLAVE_SECURITY 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM 36
+#define SLAVE_UFS_MEM_CFG 37
+#define SLAVE_USB2 38
+#define SLAVE_USB3_0 39
+#define SLAVE_VENUS_CFG 40
+#define SLAVE_VSENSE_CTRL_CFG 41
+#define SLAVE_A1NOC_CFG 42
+#define SLAVE_A2NOC_CFG 43
+#define SLAVE_CNOC2_CNOC3 44
+#define SLAVE_CNOC_MNOC_CFG 45
+#define SLAVE_SNOC_CFG 46
+
+#define MASTER_CNOC2_CNOC3 0
+#define MASTER_GEM_NOC_CNOC 1
+#define MASTER_GEM_NOC_PCIE_SNOC 2
+#define SLAVE_AOSS 3
+#define SLAVE_APPSS 4
+#define SLAVE_CNOC3_CNOC2 5
+#define SLAVE_CNOC_A2NOC 6
+#define SLAVE_DDRSS_CFG 7
+#define SLAVE_BOOT_IMEM 8
+#define SLAVE_IMEM 9
+#define SLAVE_PIMEM 10
+#define SLAVE_PCIE_0 11
+#define SLAVE_PCIE_1 12
+#define SLAVE_QDSS_STM 13
+#define SLAVE_TCU 14
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_GEM_NOC_CFG 4
+#define MASTER_GFX3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_ANOC_PCIE_GEM_NOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define SLAVE_MSS_PROC_MS_MPU_CFG 11
+#define SLAVE_MCDMA_MS_MPU_CFG 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_MEM_NOC_PCIE_SNOC 15
+#define SLAVE_SERVICE_GEM_NOC_1 16
+#define SLAVE_SERVICE_GEM_NOC_2 17
+#define SLAVE_SERVICE_GEM_NOC 18
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define SLAVE_LPASS_CORE_CFG 1
+#define SLAVE_LPASS_LPI_CFG 2
+#define SLAVE_LPASS_MPU_CFG 3
+#define SLAVE_LPASS_TOP_CFG 4
+#define SLAVE_SERVICES_LPASS_AML_NOC 5
+#define SLAVE_SERVICE_LPASS_AG_NOC 6
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define MASTER_CAMNOC_HF 3
+#define MASTER_CAMNOC_ICP 4
+#define MASTER_CAMNOC_SF 5
+#define MASTER_MDP0 6
+#define SLAVE_MNOC_HF_MEM_NOC 7
+#define SLAVE_MNOC_SF_MEM_NOC 8
+#define SLAVE_SERVICE_MNOC 9
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_SERVICE_NSP_NOC 3
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_SNOC_CFG 2
+#define MASTER_PIMEM 3
+#define MASTER_GIC 4
+#define SLAVE_SNOC_GEM_NOC_GC 5
+#define SLAVE_SNOC_GEM_NOC_SF 6
+#define SLAVE_SERVICE_SNOC 7
+
+#endif
diff --git a/include/dt-bindings/leds/rt4831-backlight.h b/include/dt-bindings/leds/rt4831-backlight.h
new file mode 100644
index 000000000000..125c6351bba0
--- /dev/null
+++ b/include/dt-bindings/leds/rt4831-backlight.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for rt4831 backlight bindings.
+ *
+ * Copyright (C) 2020, Richtek Technology Corp.
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#ifndef _DT_BINDINGS_RT4831_BACKLIGHT_H
+#define _DT_BINDINGS_RT4831_BACKLIGHT_H
+
+#define RT4831_BLOVPLVL_17V 0
+#define RT4831_BLOVPLVL_21V 1
+#define RT4831_BLOVPLVL_25V 2
+#define RT4831_BLOVPLVL_29V 3
+
+#define RT4831_BLED_CH1EN (1 << 0)
+#define RT4831_BLED_CH2EN (1 << 1)
+#define RT4831_BLED_CH3EN (1 << 2)
+#define RT4831_BLED_CH4EN (1 << 3)
+#define RT4831_BLED_ALLCHEN ((1 << 4) - 1)
+
+#endif /* _DT_BINDINGS_RT4831_BACKLIGHT_H */
diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h
index 4c23eefed5f3..eb91a6c05b71 100644
--- a/include/dt-bindings/mailbox/qcom-ipcc.h
+++ b/include/dt-bindings/mailbox/qcom-ipcc.h
@@ -29,5 +29,6 @@
#define IPCC_CLIENT_PCIE1 14
#define IPCC_CLIENT_PCIE2 15
#define IPCC_CLIENT_SPSS 16
+#define IPCC_CLIENT_WPSS 24
#endif
diff --git a/include/dt-bindings/mfd/qcom-pm8008.h b/include/dt-bindings/mfd/qcom-pm8008.h
new file mode 100644
index 000000000000..eca9448df228
--- /dev/null
+++ b/include/dt-bindings/mfd/qcom-pm8008.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_MFD_QCOM_PM8008_H
+#define __DT_BINDINGS_MFD_QCOM_PM8008_H
+
+/* PM8008 IRQ numbers */
+#define PM8008_IRQ_MISC_UVLO 0
+#define PM8008_IRQ_MISC_OVLO 1
+#define PM8008_IRQ_MISC_OTST2 2
+#define PM8008_IRQ_MISC_OTST3 3
+#define PM8008_IRQ_MISC_LDO_OCP 4
+#define PM8008_IRQ_TEMP_ALARM 5
+#define PM8008_IRQ_GPIO1 6
+#define PM8008_IRQ_GPIO2 7
+
+#endif
diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h
index 0359bfdc9119..93064c750c8c 100644
--- a/include/dt-bindings/pinctrl/hisi.h
+++ b/include/dt-bindings/pinctrl/hisi.h
@@ -1,7 +1,7 @@
/*
* This header provides constants for hisilicon pinctrl bindings.
*
- * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 HiSilicon Limited.
* Copyright (c) 2015 Linaro Limited.
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/dt-bindings/power/imx8mm-power.h b/include/dt-bindings/power/imx8mm-power.h
new file mode 100644
index 000000000000..fc9c2e16aadc
--- /dev/null
+++ b/include/dt-bindings/power/imx8mm-power.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2020 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#ifndef __DT_BINDINGS_IMX8MM_POWER_H__
+#define __DT_BINDINGS_IMX8MM_POWER_H__
+
+#define IMX8MM_POWER_DOMAIN_HSIOMIX 0
+#define IMX8MM_POWER_DOMAIN_PCIE 1
+#define IMX8MM_POWER_DOMAIN_OTG1 2
+#define IMX8MM_POWER_DOMAIN_OTG2 3
+#define IMX8MM_POWER_DOMAIN_GPUMIX 4
+#define IMX8MM_POWER_DOMAIN_GPU 5
+#define IMX8MM_POWER_DOMAIN_VPUMIX 6
+#define IMX8MM_POWER_DOMAIN_VPUG1 7
+#define IMX8MM_POWER_DOMAIN_VPUG2 8
+#define IMX8MM_POWER_DOMAIN_VPUH1 9
+#define IMX8MM_POWER_DOMAIN_DISPMIX 10
+#define IMX8MM_POWER_DOMAIN_MIPI 11
+
+#endif
diff --git a/include/dt-bindings/power/imx8mn-power.h b/include/dt-bindings/power/imx8mn-power.h
new file mode 100644
index 000000000000..102ee85a9b62
--- /dev/null
+++ b/include/dt-bindings/power/imx8mn-power.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2020 Compass Electronics Group, LLC
+ */
+
+#ifndef __DT_BINDINGS_IMX8MN_POWER_H__
+#define __DT_BINDINGS_IMX8MN_POWER_H__
+
+#define IMX8MN_POWER_DOMAIN_HSIOMIX 0
+#define IMX8MN_POWER_DOMAIN_OTG1 1
+#define IMX8MN_POWER_DOMAIN_GPUMIX 2
+#define IMX8MN_POWER_DOMAIN_DISPMIX 3
+#define IMX8MN_POWER_DOMAIN_MIPI 4
+
+#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index eedb5d94c020..8b5708bb9671 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -81,6 +81,19 @@
#define SC7280_LCX 7
#define SC7280_MSS 8
+/* SC8180X Power Domain Indexes */
+#define SC8180X_CX 0
+#define SC8180X_CX_AO 1
+#define SC8180X_EBI 2
+#define SC8180X_GFX 3
+#define SC8180X_LCX 4
+#define SC8180X_LMX 5
+#define SC8180X_MMCX 6
+#define SC8180X_MMCX_AO 7
+#define SC8180X_MSS 8
+#define SC8180X_MX 9
+#define SC8180X_MX_AO 10
+
/* SDM845 Power Domain performance levels */
#define RPMH_REGULATOR_LEVEL_RETENTION 16
#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
@@ -95,6 +108,14 @@
#define RPMH_REGULATOR_LEVEL_TURBO 384
#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
+/* MDM9607 Power Domains */
+#define MDM9607_VDDCX 0
+#define MDM9607_VDDCX_AO 1
+#define MDM9607_VDDCX_VFL 2
+#define MDM9607_VDDMX 3
+#define MDM9607_VDDMX_AO 4
+#define MDM9607_VDDMX_VFL 5
+
/* MSM8939 Power Domains */
#define MSM8939_VDDMDCX 0
#define MSM8939_VDDMDCX_AO 1
diff --git a/include/dt-bindings/power/rk3568-power.h b/include/dt-bindings/power/rk3568-power.h
new file mode 100644
index 000000000000..6cc1af1a9d26
--- /dev/null
+++ b/include/dt-bindings/power/rk3568-power.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_POWER_RK3568_POWER_H__
+#define __DT_BINDINGS_POWER_RK3568_POWER_H__
+
+/* VD_CORE */
+#define RK3568_PD_CPU_0 0
+#define RK3568_PD_CPU_1 1
+#define RK3568_PD_CPU_2 2
+#define RK3568_PD_CPU_3 3
+#define RK3568_PD_CORE_ALIVE 4
+
+/* VD_PMU */
+#define RK3568_PD_PMU 5
+
+/* VD_NPU */
+#define RK3568_PD_NPU 6
+
+/* VD_GPU */
+#define RK3568_PD_GPU 7
+
+/* VD_LOGIC */
+#define RK3568_PD_VI 8
+#define RK3568_PD_VO 9
+#define RK3568_PD_RGA 10
+#define RK3568_PD_VPU 11
+#define RK3568_PD_CENTER 12
+#define RK3568_PD_RKVDEC 13
+#define RK3568_PD_RKVENC 14
+#define RK3568_PD_PIPE 15
+#define RK3568_PD_LOGIC_ALIVE 16
+
+#endif
diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h
index f64b5d2e6efd..66c21ab03eef 100644
--- a/include/dt-bindings/sound/qcom,q6afe.h
+++ b/include/dt-bindings/sound/qcom,q6afe.h
@@ -129,6 +129,8 @@
#define TX_CODEC_DMA_TX_5 124
#define RX_CODEC_DMA_RX_6 125
#define RX_CODEC_DMA_RX_7 126
+#define QUINARY_MI2S_RX 127
+#define QUINARY_MI2S_TX 128
#define LPASS_CLK_ID_PRI_MI2S_IBIT 1
#define LPASS_CLK_ID_PRI_MI2S_EBIT 2
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 524d4789af22..24b40e5c160b 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -97,6 +97,9 @@ struct kunit;
/* Maximum size of parameter description string. */
#define KUNIT_PARAM_DESC_SIZE 128
+/* Maximum size of a status comment. */
+#define KUNIT_STATUS_COMMENT_SIZE 256
+
/*
* TAP specifies subtest stream indentation of 4 spaces, 8 spaces for a
* sub-subtest. See the "Subtests" section in
@@ -106,6 +109,18 @@ struct kunit;
#define KUNIT_SUBSUBTEST_INDENT " "
/**
+ * enum kunit_status - Type of result for a test or test suite
+ * @KUNIT_SUCCESS: Denotes the test suite has not failed nor been skipped
+ * @KUNIT_FAILURE: Denotes the test has failed.
+ * @KUNIT_SKIPPED: Denotes the test has been skipped.
+ */
+enum kunit_status {
+ KUNIT_SUCCESS,
+ KUNIT_FAILURE,
+ KUNIT_SKIPPED,
+};
+
+/**
* struct kunit_case - represents an individual test case.
*
* @run_case: the function representing the actual test case.
@@ -148,13 +163,20 @@ struct kunit_case {
const void* (*generate_params)(const void *prev, char *desc);
/* private: internal use only. */
- bool success;
+ enum kunit_status status;
char *log;
};
-static inline char *kunit_status_to_string(bool status)
+static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
{
- return status ? "ok" : "not ok";
+ switch (status) {
+ case KUNIT_SKIPPED:
+ case KUNIT_SUCCESS:
+ return "ok";
+ case KUNIT_FAILURE:
+ return "not ok";
+ }
+ return "invalid";
}
/**
@@ -212,6 +234,7 @@ struct kunit_suite {
struct kunit_case *test_cases;
/* private: internal use only */
+ char status_comment[KUNIT_STATUS_COMMENT_SIZE];
struct dentry *debugfs;
char *log;
};
@@ -245,19 +268,21 @@ struct kunit {
* be read after the test case finishes once all threads associated
* with the test case have terminated.
*/
- bool success; /* Read only after test_case finishes! */
spinlock_t lock; /* Guards all mutable test state. */
+ enum kunit_status status; /* Read only after test_case finishes! */
/*
* Because resources is a list that may be updated multiple times (with
* new resources) from any thread associated with a test case, we must
* protect it with some type of lock.
*/
struct list_head resources; /* Protected by lock. */
+
+ char status_comment[KUNIT_STATUS_COMMENT_SIZE];
};
static inline void kunit_set_failure(struct kunit *test)
{
- WRITE_ONCE(test->success, false);
+ WRITE_ONCE(test->status, KUNIT_FAILURE);
}
void kunit_init_test(struct kunit *test, const char *name, char *log);
@@ -348,7 +373,7 @@ static inline int kunit_run_all_tests(void)
#define kunit_suite_for_each_test_case(suite, test_case) \
for (test_case = suite->test_cases; test_case->run_case; test_case++)
-bool kunit_suite_has_succeeded(struct kunit_suite *suite);
+enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite);
/*
* Like kunit_alloc_resource() below, but returns the struct kunit_resource
@@ -578,16 +603,30 @@ static inline int kunit_destroy_named_resource(struct kunit *test,
void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
/**
- * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
+ * kunit_kmalloc_array() - Like kmalloc_array() except the allocation is *test managed*.
* @test: The test context object.
+ * @n: number of elements.
* @size: The size in bytes of the desired memory.
* @gfp: flags passed to underlying kmalloc().
*
- * Just like `kmalloc(...)`, except the allocation is managed by the test case
+ * Just like `kmalloc_array(...)`, except the allocation is managed by the test case
* and is automatically cleaned up after the test case concludes. See &struct
* kunit_resource for more information.
*/
-void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp);
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t flags);
+
+/**
+ * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
+ * @test: The test context object.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * See kmalloc() and kunit_kmalloc_array() for more information.
+ */
+static inline void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
+{
+ return kunit_kmalloc_array(test, 1, size, gfp);
+}
/**
* kunit_kfree() - Like kfree except for allocations managed by KUnit.
@@ -602,16 +641,66 @@ void kunit_kfree(struct kunit *test, const void *ptr);
* @size: The size in bytes of the desired memory.
* @gfp: flags passed to underlying kmalloc().
*
- * See kzalloc() and kunit_kmalloc() for more information.
+ * See kzalloc() and kunit_kmalloc_array() for more information.
*/
static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
{
return kunit_kmalloc(test, size, gfp | __GFP_ZERO);
}
+/**
+ * kunit_kcalloc() - Just like kunit_kmalloc_array(), but zeroes the allocation.
+ * @test: The test context object.
+ * @n: number of elements.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * See kcalloc() and kunit_kmalloc_array() for more information.
+ */
+static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t flags)
+{
+ return kunit_kmalloc_array(test, n, size, flags | __GFP_ZERO);
+}
+
void kunit_cleanup(struct kunit *test);
-void kunit_log_append(char *log, const char *fmt, ...);
+void __printf(2, 3) kunit_log_append(char *log, const char *fmt, ...);
+
+/**
+ * kunit_mark_skipped() - Marks @test_or_suite as skipped
+ *
+ * @test_or_suite: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Marks the test as skipped. @fmt is given output as the test status
+ * comment, typically the reason the test was skipped.
+ *
+ * Test execution continues after kunit_mark_skipped() is called.
+ */
+#define kunit_mark_skipped(test_or_suite, fmt, ...) \
+ do { \
+ WRITE_ONCE((test_or_suite)->status, KUNIT_SKIPPED); \
+ scnprintf((test_or_suite)->status_comment, \
+ KUNIT_STATUS_COMMENT_SIZE, \
+ fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * kunit_skip() - Marks @test_or_suite as skipped
+ *
+ * @test_or_suite: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Skips the test. @fmt is given output as the test status
+ * comment, typically the reason the test was skipped.
+ *
+ * Test execution is halted after kunit_skip() is called.
+ */
+#define kunit_skip(test_or_suite, fmt, ...) \
+ do { \
+ kunit_mark_skipped((test_or_suite), fmt, ##__VA_ARGS__);\
+ kunit_try_catch_throw(&((test_or_suite)->try_catch)); \
+ } while (0)
/*
* printk and log to per-test or per-suite log buffer. Logging only done
@@ -776,7 +865,6 @@ void kunit_do_assertion(struct kunit *test,
do { \
typeof(left) __left = (left); \
typeof(right) __right = (right); \
- ((void)__typecheck(__left, __right)); \
\
KUNIT_ASSERTION(test, \
__left op __right, \
@@ -1130,8 +1218,8 @@ do { \
fmt, \
...) \
do { \
- typeof(left) __left = (left); \
- typeof(right) __right = (right); \
+ const char *__left = (left); \
+ const char *__right = (right); \
\
KUNIT_ASSERTION(test, \
strcmp(__left, __right) op 0, \
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index b338613fb536..72e4f7fd268c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -260,9 +260,12 @@ void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
#ifdef CONFIG_ARM64
void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa);
+void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size);
#else
static inline void
acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { }
+static inline void
+acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) { }
#endif
int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
@@ -551,8 +554,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_OSLPI_SUPPORT 0x00000100
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
-#define OSC_SB_PRM_SUPPORT 0x00020000
#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
+#define OSC_SB_PRM_SUPPORT 0x00200000
extern bool osc_sb_apei_support_acked;
extern bool osc_pc_lpi_support_confirmed;
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 1a12baa58e40..f1f0842a2cb2 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -34,9 +34,8 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
void acpi_configure_pmsi_domain(struct device *dev);
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
/* IOMMU interface */
-void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
-const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
- const u32 *id_in);
+int iort_dma_get_ranges(struct device *dev, u64 *size);
+int iort_iommu_configure_id(struct device *dev, const u32 *id_in);
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
@@ -48,11 +47,10 @@ static inline struct irq_domain *iort_get_device_domain(
{ return NULL; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
/* IOMMU interface */
-static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
- u64 *size) { }
-static inline const struct iommu_ops *iort_iommu_configure_id(
- struct device *dev, const u32 *id_in)
-{ return NULL; }
+static inline int iort_dma_get_ranges(struct device *dev, u64 *size)
+{ return -ENODEV; }
+static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
+{ return -ENODEV; }
static inline
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
{ return 0; }
diff --git a/include/linux/acpi_viot.h b/include/linux/acpi_viot.h
new file mode 100644
index 000000000000..1eb8ee5b0e5f
--- /dev/null
+++ b/include/linux/acpi_viot.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ACPI_VIOT_H__
+#define __ACPI_VIOT_H__
+
+#include <linux/acpi.h>
+
+#ifdef CONFIG_ACPI_VIOT
+void __init acpi_viot_init(void);
+int viot_iommu_configure(struct device *dev);
+#else
+static inline void acpi_viot_init(void) {}
+static inline int viot_iommu_configure(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __ACPI_VIOT_H__ */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 11e555cfaecb..f180240dc95f 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -37,6 +37,7 @@ bool topology_scale_freq_invariant(void);
enum scale_freq_source {
SCALE_FREQ_SOURCE_CPUFREQ = 0,
SCALE_FREQ_SOURCE_ARCH,
+ SCALE_FREQ_SOURCE_CPPC,
};
struct scale_freq_data {
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
new file mode 100644
index 000000000000..505c679b6a9b
--- /dev/null
+++ b/include/linux/arm_ffa.h
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _LINUX_ARM_FFA_H
+#define _LINUX_ARM_FFA_H
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+/* FFA Bus/Device/Driver related */
+struct ffa_device {
+ int vm_id;
+ bool mode_32bit;
+ uuid_t uuid;
+ struct device dev;
+};
+
+#define to_ffa_dev(d) container_of(d, struct ffa_device, dev)
+
+struct ffa_device_id {
+ uuid_t uuid;
+};
+
+struct ffa_driver {
+ const char *name;
+ int (*probe)(struct ffa_device *sdev);
+ void (*remove)(struct ffa_device *sdev);
+ const struct ffa_device_id *id_table;
+
+ struct device_driver driver;
+};
+
+#define to_ffa_driver(d) container_of(d, struct ffa_driver, driver)
+
+static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data)
+{
+ fdev->dev.driver_data = data;
+}
+
+#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id);
+void ffa_device_unregister(struct ffa_device *ffa_dev);
+int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name);
+void ffa_driver_unregister(struct ffa_driver *driver);
+bool ffa_device_is_valid(struct ffa_device *ffa_dev);
+const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev);
+
+#else
+static inline
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id)
+{
+ return NULL;
+}
+
+static inline void ffa_device_unregister(struct ffa_device *dev) {}
+
+static inline int
+ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ return -EINVAL;
+}
+
+static inline void ffa_driver_unregister(struct ffa_driver *driver) {}
+
+static inline
+bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
+
+static inline
+const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev)
+{
+ return NULL;
+}
+#endif /* CONFIG_ARM_FFA_TRANSPORT */
+
+#define ffa_register(driver) \
+ ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#define ffa_unregister(driver) \
+ ffa_driver_unregister(driver)
+
+/**
+ * module_ffa_driver() - Helper macro for registering a psa_ffa driver
+ * @__ffa_driver: ffa_driver structure
+ *
+ * Helper macro for psa_ffa drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_ffa_driver(__ffa_driver) \
+ module_driver(__ffa_driver, ffa_register, ffa_unregister)
+
+/* FFA transport related */
+struct ffa_partition_info {
+ u16 id;
+ u16 exec_ctxt;
+/* partition supports receipt of direct requests */
+#define FFA_PARTITION_DIRECT_RECV BIT(0)
+/* partition can send direct requests. */
+#define FFA_PARTITION_DIRECT_SEND BIT(1)
+/* partition can send and receive indirect messages. */
+#define FFA_PARTITION_INDIRECT_MSG BIT(2)
+ u32 properties;
+};
+
+/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
+struct ffa_send_direct_data {
+ unsigned long data0; /* w3/x3 */
+ unsigned long data1; /* w4/x4 */
+ unsigned long data2; /* w5/x5 */
+ unsigned long data3; /* w6/x6 */
+ unsigned long data4; /* w7/x7 */
+};
+
+struct ffa_mem_region_addr_range {
+ /* The base IPA of the constituent memory region, aligned to 4 kiB */
+ u64 address;
+ /* The number of 4 kiB pages in the constituent memory region. */
+ u32 pg_cnt;
+ u32 reserved;
+};
+
+struct ffa_composite_mem_region {
+ /*
+ * The total number of 4 kiB pages included in this memory region. This
+ * must be equal to the sum of page counts specified in each
+ * `struct ffa_mem_region_addr_range`.
+ */
+ u32 total_pg_cnt;
+ /* The number of constituents included in this memory region range */
+ u32 addr_range_cnt;
+ u64 reserved;
+ /** An array of `addr_range_cnt` memory region constituents. */
+ struct ffa_mem_region_addr_range constituents[];
+};
+
+struct ffa_mem_region_attributes {
+ /* The ID of the VM to which the memory is being given or shared. */
+ u16 receiver;
+ /*
+ * The permissions with which the memory region should be mapped in the
+ * receiver's page table.
+ */
+#define FFA_MEM_EXEC BIT(3)
+#define FFA_MEM_NO_EXEC BIT(2)
+#define FFA_MEM_RW BIT(1)
+#define FFA_MEM_RO BIT(0)
+ u8 attrs;
+ /*
+ * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
+ * for memory regions with multiple borrowers.
+ */
+#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0)
+ u8 flag;
+ u32 composite_off;
+ /*
+ * Offset in bytes from the start of the outer `ffa_memory_region` to
+ * an `struct ffa_mem_region_addr_range`.
+ */
+ u64 reserved;
+};
+
+struct ffa_mem_region {
+ /* The ID of the VM/owner which originally sent the memory region */
+ u16 sender_id;
+#define FFA_MEM_NORMAL BIT(5)
+#define FFA_MEM_DEVICE BIT(4)
+
+#define FFA_MEM_WRITE_BACK (3 << 2)
+#define FFA_MEM_NON_CACHEABLE (1 << 2)
+
+#define FFA_DEV_nGnRnE (0 << 2)
+#define FFA_DEV_nGnRE (1 << 2)
+#define FFA_DEV_nGRE (2 << 2)
+#define FFA_DEV_GRE (3 << 2)
+
+#define FFA_MEM_NON_SHAREABLE (0)
+#define FFA_MEM_OUTER_SHAREABLE (2)
+#define FFA_MEM_INNER_SHAREABLE (3)
+ u8 attributes;
+ u8 reserved_0;
+/*
+ * Clear memory region contents after unmapping it from the sender and
+ * before mapping it for any receiver.
+ */
+#define FFA_MEM_CLEAR BIT(0)
+/*
+ * Whether the hypervisor may time slice the memory sharing or retrieval
+ * operation.
+ */
+#define FFA_TIME_SLICE_ENABLE BIT(1)
+
+#define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3)
+
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9)
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5)
+ /* Flags to control behaviour of the transaction. */
+ u32 flags;
+#define HANDLE_LOW_MASK GENMASK_ULL(31, 0)
+#define HANDLE_HIGH_MASK GENMASK_ULL(63, 32)
+#define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x))))
+#define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x))))
+
+#define PACK_HANDLE(l, h) \
+ (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h)))
+ /*
+ * A globally-unique ID assigned by the hypervisor for a region
+ * of memory being sent between VMs.
+ */
+ u64 handle;
+ /*
+ * An implementation defined value associated with the receiver and the
+ * memory region.
+ */
+ u64 tag;
+ u32 reserved_1;
+ /*
+ * The number of `ffa_mem_region_attributes` entries included in this
+ * transaction.
+ */
+ u32 ep_count;
+ /*
+ * An array of endpoint memory access descriptors.
+ * Each one specifies a memory region offset, an endpoint and the
+ * attributes with which this memory region should be mapped in that
+ * endpoint's page table.
+ */
+ struct ffa_mem_region_attributes ep_mem_access[];
+};
+
+#define COMPOSITE_OFFSET(x) \
+ (offsetof(struct ffa_mem_region, ep_mem_access[x]))
+#define CONSTITUENTS_OFFSET(x) \
+ (offsetof(struct ffa_composite_mem_region, constituents[x]))
+#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \
+ (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y))
+
+struct ffa_mem_ops_args {
+ bool use_txbuf;
+ u32 nattrs;
+ u32 flags;
+ u64 tag;
+ u64 g_handle;
+ struct scatterlist *sg;
+ struct ffa_mem_region_attributes *attrs;
+};
+
+struct ffa_dev_ops {
+ u32 (*api_version_get)(void);
+ int (*partition_info_get)(const char *uuid_str,
+ struct ffa_partition_info *buffer);
+ void (*mode_32bit_set)(struct ffa_device *dev);
+ int (*sync_send_receive)(struct ffa_device *dev,
+ struct ffa_send_direct_data *data);
+ int (*memory_reclaim)(u64 g_handle, u32 flags);
+ int (*memory_share)(struct ffa_device *dev,
+ struct ffa_mem_ops_args *args);
+};
+
+#endif /* _LINUX_ARM_FFA_H */
diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h
index 4cc40201273e..83ad775ad0aa 100644
--- a/include/linux/ascii85.h
+++ b/include/linux/ascii85.h
@@ -8,7 +8,8 @@
#ifndef _ASCII85_H_
#define _ASCII85_H_
-#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/types.h>
#define ASCII85_BUFSZ 6
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index b9f3c246c3c9..37048438872c 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -30,6 +30,8 @@
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX UINT_MAX
+#define FC_APPID_LEN 129
+
#ifdef CONFIG_BLK_CGROUP
@@ -55,6 +57,9 @@ struct blkcg {
struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
struct list_head all_blkcgs_node;
+#ifdef CONFIG_BLK_CGROUP_FC_APPID
+ char fc_app_id[FC_APPID_LEN];
+#endif
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
#endif
@@ -660,4 +665,62 @@ static inline void blk_cgroup_bio_start(struct bio *bio) { }
#endif /* CONFIG_BLOCK */
#endif /* CONFIG_BLK_CGROUP */
+
+#ifdef CONFIG_BLK_CGROUP_FC_APPID
+/*
+ * Sets the fc_app_id field associted to blkcg
+ * @app_id: application identifier
+ * @cgrp_id: cgroup id
+ * @app_id_len: size of application identifier
+ */
+static inline int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len)
+{
+ struct cgroup *cgrp;
+ struct cgroup_subsys_state *css;
+ struct blkcg *blkcg;
+ int ret = 0;
+
+ if (app_id_len > FC_APPID_LEN)
+ return -EINVAL;
+
+ cgrp = cgroup_get_from_id(cgrp_id);
+ if (!cgrp)
+ return -ENOENT;
+ css = cgroup_get_e_css(cgrp, &io_cgrp_subsys);
+ if (!css) {
+ ret = -ENOENT;
+ goto out_cgrp_put;
+ }
+ blkcg = css_to_blkcg(css);
+ /*
+ * There is a slight race condition on setting the appid.
+ * Worst case an I/O may not find the right id.
+ * This is no different from the I/O we let pass while obtaining
+ * the vmid from the fabric.
+ * Adding the overhead of a lock is not necessary.
+ */
+ strlcpy(blkcg->fc_app_id, app_id, app_id_len);
+ css_put(css);
+out_cgrp_put:
+ cgroup_put(cgrp);
+ return ret;
+}
+
+/**
+ * blkcg_get_fc_appid - get the fc app identifier associated with a bio
+ * @bio: target bio
+ *
+ * On success return the fc_app_id, on failure return NULL
+ */
+static inline char *blkcg_get_fc_appid(struct bio *bio)
+{
+ if (bio && bio->bi_blkg &&
+ (bio->bi_blkg->blkcg->fc_app_id[0] != '\0'))
+ return bio->bi_blkg->blkcg->fc_app_id;
+ return NULL;
+}
+#else
+static inline int blkcg_set_fc_appid(char *buf, u64 id, size_t len) { return -EINVAL; }
+static inline char *blkcg_get_fc_appid(struct bio *bio) { return NULL; }
+#endif /*CONFIG_BLK_CGROUP_FC_APPID*/
#endif /* _BLK_CGROUP_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index fd2de2b422ed..1d18447ebebc 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -439,8 +439,6 @@ enum {
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
void *queuedata);
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
-struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
- void *queuedata);
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q);
void blk_mq_unregister_dev(struct device *, struct request_queue *);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index b05f2fd495fc..290f9061b29a 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -351,9 +351,6 @@ enum req_opf {
/* reset all the zone present on the device */
REQ_OP_ZONE_RESET_ALL = 17,
- /* SCSI passthrough using struct scsi_request */
- REQ_OP_SCSI_IN = 32,
- REQ_OP_SCSI_OUT = 33,
/* Driver private requests */
REQ_OP_DRV_IN = 34,
REQ_OP_DRV_OUT = 35,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 103acc5228e7..d3afea47ade6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -57,7 +57,7 @@ struct blk_keyslot_manager;
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 5
+#define BLKCG_MAX_POLS 6
typedef void (rq_end_io_fn)(struct request *, blk_status_t);
@@ -240,42 +240,15 @@ struct request {
void *end_io_data;
};
-static inline bool blk_op_is_scsi(unsigned int op)
-{
- return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
-}
-
-static inline bool blk_op_is_private(unsigned int op)
+static inline bool blk_op_is_passthrough(unsigned int op)
{
+ op &= REQ_OP_MASK;
return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
}
-static inline bool blk_rq_is_scsi(struct request *rq)
-{
- return blk_op_is_scsi(req_op(rq));
-}
-
-static inline bool blk_rq_is_private(struct request *rq)
-{
- return blk_op_is_private(req_op(rq));
-}
-
static inline bool blk_rq_is_passthrough(struct request *rq)
{
- return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
-}
-
-static inline bool bio_is_passthrough(struct bio *bio)
-{
- unsigned op = bio_op(bio);
-
- return blk_op_is_scsi(op) || blk_op_is_private(op);
-}
-
-static inline bool blk_op_is_passthrough(unsigned int op)
-{
- return (blk_op_is_scsi(op & REQ_OP_MASK) ||
- blk_op_is_private(op & REQ_OP_MASK));
+ return blk_op_is_passthrough(req_op(rq));
}
static inline unsigned short req_get_ioprio(struct request *req)
@@ -936,10 +909,12 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct rq_map_data *, const struct iov_iter *,
gfp_t);
-extern void blk_execute_rq(struct gendisk *, struct request *, int);
extern void blk_execute_rq_nowait(struct gendisk *,
struct request *, int, rq_end_io_fn *);
+blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq,
+ int at_head);
+
/* Helper to convert REQ_OP_XXX to its string format XXX */
extern const char *blk_op_str(unsigned int op);
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index 2696eb0fc149..abe089c27529 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -16,6 +16,26 @@
#define BOOTCONFIG_ALIGN (1 << BOOTCONFIG_ALIGN_SHIFT)
#define BOOTCONFIG_ALIGN_MASK (BOOTCONFIG_ALIGN - 1)
+/**
+ * xbc_calc_checksum() - Calculate checksum of bootconfig
+ * @data: Bootconfig data.
+ * @size: The size of the bootconfig data.
+ *
+ * Calculate the checksum value of the bootconfig data.
+ * The checksum will be used with the BOOTCONFIG_MAGIC and the size for
+ * embedding the bootconfig in the initrd image.
+ */
+static inline __init u32 xbc_calc_checksum(void *data, u32 size)
+{
+ unsigned char *p = data;
+ u32 ret = 0;
+
+ while (size--)
+ ret += *p++;
+
+ return ret;
+}
+
/* XBC tree node */
struct xbc_node {
u16 next;
@@ -71,7 +91,7 @@ static inline __init bool xbc_node_is_key(struct xbc_node *node)
*/
static inline __init bool xbc_node_is_array(struct xbc_node *node)
{
- return xbc_node_is_value(node) && node->next != 0;
+ return xbc_node_is_value(node) && node->child != 0;
}
/**
@@ -80,6 +100,8 @@ static inline __init bool xbc_node_is_array(struct xbc_node *node)
*
* Test the @node is a leaf key node which is a key node and has a value node
* or no child. Returns true if it is a leaf node, or false if not.
+ * Note that the leaf node can have subkey nodes in addition to the
+ * value node.
*/
static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
{
@@ -130,6 +152,23 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
}
/**
+ * xbc_node_get_subkey() - Return the first subkey node if exists
+ * @node: Parent node
+ *
+ * Return the first subkey node of the @node. If the @node has no child
+ * or only value node, this will return NULL.
+ */
+static inline struct xbc_node * __init xbc_node_get_subkey(struct xbc_node *node)
+{
+ struct xbc_node *child = xbc_node_get_child(node);
+
+ if (child && xbc_node_is_value(child))
+ return xbc_node_get_next(child);
+ else
+ return child;
+}
+
+/**
* xbc_array_for_each_value() - Iterate value nodes on an array
* @anode: An XBC arraied value node
* @value: A value
@@ -140,7 +179,7 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
*/
#define xbc_array_for_each_value(anode, value) \
for (value = xbc_node_get_data(anode); anode != NULL ; \
- anode = xbc_node_get_next(anode), \
+ anode = xbc_node_get_child(anode), \
value = anode ? xbc_node_get_data(anode) : NULL)
/**
@@ -149,12 +188,25 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
* @child: Iterated XBC node.
*
* Iterate child nodes of @parent. Each child nodes are stored to @child.
+ * The @child can be mixture of a value node and subkey nodes.
*/
#define xbc_node_for_each_child(parent, child) \
for (child = xbc_node_get_child(parent); child != NULL ; \
child = xbc_node_get_next(child))
/**
+ * xbc_node_for_each_subkey() - Iterate child subkey nodes
+ * @parent: An XBC node.
+ * @child: Iterated XBC node.
+ *
+ * Iterate subkey nodes of @parent. Each child nodes are stored to @child.
+ * The @child is only the subkey node.
+ */
+#define xbc_node_for_each_subkey(parent, child) \
+ for (child = xbc_node_get_subkey(parent); child != NULL ; \
+ child = xbc_node_get_next(child))
+
+/**
* xbc_node_for_each_array_value() - Iterate array entries of geven key
* @node: An XBC node.
* @key: A key string searched under @node
@@ -162,16 +214,16 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
* @value: Iterated value of array entry.
*
* Iterate array entries of given @key under @node. Each array entry node
- * is stroed to @anode and @value. If the @node doesn't have @key node,
+ * is stored to @anode and @value. If the @node doesn't have @key node,
* it does nothing.
* Note that even if the found key node has only one value (not array)
- * this executes block once. Hoever, if the found key node has no value
+ * this executes block once. However, if the found key node has no value
* (key-only node), this does nothing. So don't use this for testing the
* key-value pair existence.
*/
#define xbc_node_for_each_array_value(node, key, anode, value) \
for (value = xbc_node_find_value(node, key, &anode); value != NULL; \
- anode = xbc_node_get_next(anode), \
+ anode = xbc_node_get_child(anode), \
value = anode ? xbc_node_get_data(anode) : NULL)
/**
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
new file mode 100644
index 000000000000..2bc8b1f69c93
--- /dev/null
+++ b/include/linux/bootmem_info.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BOOTMEM_INFO_H
+#define __LINUX_BOOTMEM_INFO_H
+
+#include <linux/mm.h>
+
+/*
+ * Types for free bootmem stored in page->lru.next. These have to be in
+ * some random range in unsigned long space for debugging purposes.
+ */
+enum {
+ MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
+ SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
+ MIX_SECTION_INFO,
+ NODE_INFO,
+ MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
+};
+
+#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
+void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
+
+void get_page_bootmem(unsigned long info, struct page *page,
+ unsigned long type);
+void put_page_bootmem(struct page *page);
+
+/*
+ * Any memory allocated via the memblock allocator and not via the
+ * buddy will be marked reserved already in the memmap. For those
+ * pages, we can call this function to free it to buddy allocator.
+ */
+static inline void free_bootmem_page(struct page *page)
+{
+ unsigned long magic = (unsigned long)page->freelist;
+
+ /*
+ * The reserve_bootmem_region sets the reserved flag on bootmem
+ * pages.
+ */
+ VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
+
+ if (magic == SECTION_INFO || magic == MIX_SECTION_INFO)
+ put_page_bootmem(page);
+ else
+ VM_BUG_ON_PAGE(1, page);
+}
+#else
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+
+static inline void put_page_bootmem(struct page *page)
+{
+}
+
+static inline void get_page_bootmem(unsigned long info, struct page *page,
+ unsigned long type)
+{
+}
+
+static inline void free_bootmem_page(struct page *page)
+{
+ free_reserved_page(page);
+}
+#endif
+
+#endif /* __LINUX_BOOTMEM_INFO_H */
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 8b77d08d4b47..6c9b10d82c80 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -201,8 +201,8 @@ static inline void bpf_cgroup_storage_unset(void)
{
int i;
- for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
- if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
+ for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
+ if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
continue;
this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f309fc1509f2..e8e2b0393ca9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -780,6 +780,7 @@ struct bpf_jit_poke_descriptor {
void *tailcall_target;
void *tailcall_bypass;
void *bypass_addr;
+ void *aux;
union {
struct {
struct bpf_map *map;
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index a9db1eae6796..ae3ac3a2018c 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -134,4 +134,5 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup)
BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
#ifdef CONFIG_NET
BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
+BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
#endif
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index e774ecc1cd1f..828d08afeee0 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -340,8 +340,8 @@ struct bpf_insn_aux_data {
};
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
- int sanitize_stack_off; /* stack slot to be cleared */
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
+ bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
bool zext_dst; /* this insn zero extends dst reg */
u8 alu_state; /* used in combination with alu_limit */
@@ -414,6 +414,7 @@ struct bpf_verifier_env {
u32 used_map_cnt; /* number of used maps */
u32 used_btf_cnt; /* number of used BTF objects */
u32 id_gen; /* used to generate unique reg IDs */
+ bool explore_alu_limits;
bool allow_ptr_leaks;
bool allow_uninit_stack;
bool allow_ptr_to_map_access;
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
index 40232f90db6e..3b7a0ff4642f 100644
--- a/include/linux/buildid.h
+++ b/include/linux/buildid.h
@@ -8,5 +8,13 @@
int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
__u32 *size);
+int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
+
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE)
+extern unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX];
+void init_vmlinux_build_id(void);
+#else
+static inline void init_vmlinux_build_id(void) { }
+#endif
#endif
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 2cc237e3e8b3..7bf60454a313 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -698,6 +698,7 @@ static inline void cgroup_kthread_ready(void)
}
void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
+struct cgroup *cgroup_get_from_id(u64 id);
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
@@ -750,6 +751,11 @@ static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
{}
+
+static inline struct cgroup *cgroup_get_from_id(u64 id)
+{
+ return NULL;
+}
#endif /* !CONFIG_CGROUPS */
#ifdef CONFIG_CGROUPS
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 162a2e5546a3..d83b829305c0 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -629,6 +629,12 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
unsigned long rate, unsigned long *prate,
const struct clk_div_table *table, u8 width,
unsigned long flags, unsigned int val);
+int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags);
+int divider_ro_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags, unsigned int val);
int divider_get_val(unsigned long rate, unsigned long parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags);
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 4221888bdcd6..c24098c7acca 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -35,12 +35,12 @@ enum compact_result {
COMPACT_CONTINUE,
/*
- * The full zone was compacted scanned but wasn't successfull to compact
+ * The full zone was compacted scanned but wasn't successful to compact
* suitable pages.
*/
COMPACT_COMPLETE,
/*
- * direct compaction has scanned part of the zone but wasn't successfull
+ * direct compaction has scanned part of the zone but wasn't successful
* to compact suitable pages.
*/
COMPACT_PARTIAL_SKIPPED,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 8855b1b702b2..c270124e4402 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -532,8 +532,6 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
&__uss->ss_sp, label); \
unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
- if (t->sas_ss_flags & SS_AUTODISARM) \
- sas_ss_reset(t); \
} while (0);
/*
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index adbe76b203e2..49b0ac8b6fd3 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -13,6 +13,12 @@
/* all clang versions usable with the kernel support KASAN ABI version 5 */
#define KASAN_ABI_VERSION 5
+/*
+ * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
+ * enabled. Therefore it is not required to additionally check defined(CONFIG_*)
+ * to avoid adding redundant attributes in other configurations.
+ */
+
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
#define __SANITIZE_ADDRESS__
@@ -46,6 +52,17 @@
#endif
/*
+ * Support for __has_feature(coverage_sanitizer) was added in Clang 13 together
+ * with no_sanitize("coverage"). Prior versions of Clang support coverage
+ * instrumentation, but cannot be queried for support by the preprocessor.
+ */
+#if __has_feature(coverage_sanitizer)
+#define __no_sanitize_coverage __attribute__((no_sanitize("coverage")))
+#else
+#define __no_sanitize_coverage
+#endif
+
+/*
* Not all versions of clang implement the type-generic versions
* of the builtin overflow checkers. Fortunately, clang implements
* __has_builtin allowing us to avoid awkward version
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 5d97ef738a57..cb9217fc60af 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -122,6 +122,12 @@
#define __no_sanitize_undefined
#endif
+#if defined(CONFIG_KCOV) && __has_attribute(__no_sanitize_coverage__)
+#define __no_sanitize_coverage __attribute__((no_sanitize_coverage))
+#else
+#define __no_sanitize_coverage
+#endif
+
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index d509169860f1..e4ea86fc584d 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -210,7 +210,7 @@ struct ftrace_likely_data {
/* Section for code which can't be instrumented at all */
#define noinstr \
noinline notrace __attribute((__section__(".noinstr.text"))) \
- __no_kcsan __no_sanitize_address __no_profile
+ __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage
#endif /* __KERNEL__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 353969c7acd3..9fd719475fcd 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -331,15 +331,6 @@ struct cpufreq_driver {
unsigned long capacity);
/*
- * Caches and returns the lowest driver-supported frequency greater than
- * or equal to the target frequency, subject to any driver limitations.
- * Does not set the frequency. Only to be implemented for drivers with
- * target().
- */
- unsigned int (*resolve_freq)(struct cpufreq_policy *policy,
- unsigned int target_freq);
-
- /*
* Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
* unset.
*
@@ -371,7 +362,6 @@ struct cpufreq_driver {
int (*online)(struct cpufreq_policy *policy);
int (*offline)(struct cpufreq_policy *policy);
int (*exit)(struct cpufreq_policy *policy);
- void (*stop_cpu)(struct cpufreq_policy *policy);
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 47e13582d9fc..f39b34b13871 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -171,7 +171,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
CPUHP_AP_PERF_X86_IDXD_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
- CPUHP_AP_PERF_S390_CFD_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bfc4690de4f4..f3689a52bfd0 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -259,7 +259,7 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
/**
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
* @cpu: the (optionally unsigned) integer iterator
- * @mask: the cpumask poiter
+ * @mask: the cpumask pointer
* @start: the start location
*
* The implementation does not assume any bit in @mask is set (including @start).
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 206bde8308b2..de62a722431e 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -38,8 +38,12 @@ phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_OSRELEASE(value) \
vmcoreinfo_append_str("OSRELEASE=%s\n", value)
-#define VMCOREINFO_BUILD_ID(value) \
- vmcoreinfo_append_str("BUILD-ID=%s\n", value)
+#define VMCOREINFO_BUILD_ID() \
+ ({ \
+ static_assert(sizeof(vmlinux_build_id) == 20); \
+ vmcoreinfo_append_str("BUILD-ID=%20phN\n", vmlinux_build_id); \
+ })
+
#define VMCOREINFO_PAGESIZE(value) \
vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
#define VMCOREINFO_SYMBOL(name) \
@@ -69,10 +73,6 @@ extern unsigned char *vmcoreinfo_data;
extern size_t vmcoreinfo_size;
extern u32 *vmcoreinfo_note;
-/* raw contents of kernel .notes section */
-extern const void __start_notes __weak;
-extern const void __stop_notes __weak;
-
Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len);
void final_note(Elf_Word *buf);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 1fdb4343af9c..c869f1e73d75 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -112,8 +112,8 @@ void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent,
u32 *value);
void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
u64 *value);
-struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value);
+void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent,
+ unsigned long *value);
void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent,
u8 *value);
void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent,
@@ -126,8 +126,8 @@ void debugfs_create_size_t(const char *name, umode_t mode,
struct dentry *parent, size_t *value);
void debugfs_create_atomic_t(const char *name, umode_t mode,
struct dentry *parent, atomic_t *value);
-struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent, bool *value);
+void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent,
+ bool *value);
void debugfs_create_str(const char *name, umode_t mode,
struct dentry *parent, char **value);
@@ -266,13 +266,9 @@ static inline void debugfs_create_u32(const char *name, umode_t mode,
static inline void debugfs_create_u64(const char *name, umode_t mode,
struct dentry *parent, u64 *value) { }
-static inline struct dentry *debugfs_create_ulong(const char *name,
- umode_t mode,
- struct dentry *parent,
- unsigned long *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_ulong(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value) { }
static inline void debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent, u8 *value) { }
@@ -295,12 +291,8 @@ static inline void debugfs_create_atomic_t(const char *name, umode_t mode,
atomic_t *value)
{ }
-static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent,
- bool *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_bool(const char *name, umode_t mode,
+ struct dentry *parent, bool *value) { }
static inline void debugfs_create_str(const char *name, umode_t mode,
struct dentry *parent,
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 8d2dde23e9fb..32444686b6ff 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -18,7 +18,7 @@ enum debug_obj_state {
struct debug_obj_descr;
/**
- * struct debug_obj - representaion of an tracked object
+ * struct debug_obj - representation of an tracked object
* @node: hlist node to link the object into the tracker list
* @state: tracked object state
* @astate: current active state
diff --git a/include/linux/device.h b/include/linux/device.h
index 4cd200f8b47a..65d84b67b024 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -165,21 +165,12 @@ void device_remove_bin_file(struct device *dev,
typedef void (*dr_release_t)(struct device *dev, void *res);
typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
-#ifdef CONFIG_DEBUG_DEVRES
void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
int nid, const char *name) __malloc;
#define devres_alloc(release, size, gfp) \
__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
#define devres_alloc_node(release, size, gfp, nid) \
__devres_alloc_node(release, size, gfp, nid, #release)
-#else
-void *devres_alloc_node(dr_release_t release, size_t size,
- gfp_t gfp, int nid) __malloc;
-static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
-{
- return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
-}
-#endif
void devres_for_each_res(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data,
@@ -351,6 +342,22 @@ enum dl_dev_state {
};
/**
+ * enum device_removable - Whether the device is removable. The criteria for a
+ * device to be classified as removable is determined by its subsystem or bus.
+ * @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this
+ * device (default).
+ * @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown.
+ * @DEVICE_FIXED: Device is not removable by the user.
+ * @DEVICE_REMOVABLE: Device is removable by the user.
+ */
+enum device_removable {
+ DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */
+ DEVICE_REMOVABLE_UNKNOWN,
+ DEVICE_FIXED,
+ DEVICE_REMOVABLE,
+};
+
+/**
* struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices.
* @consumers: List of links to consumer devices.
@@ -400,6 +407,7 @@ struct dev_links_info {
* @em_pd: device's energy model performance domain
* @pins: For device pin management.
* See Documentation/driver-api/pin-control.rst for details.
+ * @msi_lock: Lock to protect MSI mask cache and mask register
* @msi_list: Hosts MSI descriptors
* @msi_domain: The generic MSI domain this device is using.
* @numa_node: NUMA node this device is close to.
@@ -431,6 +439,9 @@ struct dev_links_info {
* device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to.
* @iommu: Per device generic IOMMU runtime data
+ * @removable: Whether the device can be removed from the system. This
+ * should be set by the subsystem / bus driver that discovered
+ * the device.
*
* @offline_disabled: If set, the device is permanently online.
* @offline: Set after successful invocation of bus type's .offline().
@@ -496,6 +507,7 @@ struct device {
struct dev_pin_info *pins;
#endif
#ifdef CONFIG_GENERIC_MSI_IRQ
+ raw_spinlock_t msi_lock;
struct list_head msi_list;
#endif
#ifdef CONFIG_DMA_OPS
@@ -544,6 +556,8 @@ struct device {
struct iommu_group *iommu_group;
struct dev_iommu *iommu;
+ enum device_removable removable;
+
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
@@ -780,6 +794,22 @@ static inline bool dev_has_sync_state(struct device *dev)
return false;
}
+static inline void dev_set_removable(struct device *dev,
+ enum device_removable removable)
+{
+ dev->removable = removable;
+}
+
+static inline bool dev_is_removable(struct device *dev)
+{
+ return dev->removable == DEVICE_REMOVABLE;
+}
+
+static inline bool dev_removable_is_valid(struct device *dev)
+{
+ return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED;
+}
+
/*
* High level routines for use by the bus drivers
*/
@@ -846,6 +876,8 @@ static inline void *dev_get_platdata(const struct device *dev)
* Manual binding of a device to driver. See drivers/base/bus.c
* for information on use.
*/
+int __must_check device_driver_attach(struct device_driver *drv,
+ struct device *dev);
int __must_check device_bind_driver(struct device *dev);
void device_release_driver(struct device *dev);
int __must_check device_attach(struct device *dev);
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 6e75a2d689b4..758ca4694257 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -19,7 +19,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */
-void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
+void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
/* The DMA API isn't _quite_ the whole story, though... */
/*
@@ -50,7 +50,7 @@ struct msi_msg;
struct device;
static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size)
+ u64 dma_limit)
{
}
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 562b885cf9c3..e1ca2080a1ff 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -212,7 +212,7 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
}
/**
- * dma_resv_exclusive - return the object's exclusive fence
+ * dma_resv_excl_fence - return the object's exclusive fence
* @obj: the reservation object
*
* Returns the exclusive fence (if any). Caller must either hold the objects
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 004736b6a9c8..93c3ca5fdafd 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -230,12 +230,6 @@ enum sum_check_flags {
typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
/**
- * struct dma_chan_percpu - the per-CPU part of struct dma_chan
- * @memcpy_count: transaction counter
- * @bytes_transferred: byte counter
- */
-
-/**
* enum dma_desc_metadata_mode - per descriptor metadata mode types supported
* @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
* client driver and it is attached (via the dmaengine_desc_attach_metadata()
@@ -291,6 +285,11 @@ enum dma_desc_metadata_mode {
DESC_METADATA_ENGINE = BIT(1),
};
+/**
+ * struct dma_chan_percpu - the per-CPU part of struct dma_chan
+ * @memcpy_count: transaction counter
+ * @bytes_transferred: byte counter
+ */
struct dma_chan_percpu {
/* stats */
unsigned long memcpy_count;
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
index 99580c22f91a..34c2175e6a1e 100644
--- a/include/linux/eeprom_93xx46.h
+++ b/include/linux/eeprom_93xx46.h
@@ -10,6 +10,9 @@ struct eeprom_93xx46_platform_data {
#define EE_ADDR8 0x01 /* 8 bit addr. cfg */
#define EE_ADDR16 0x02 /* 16 bit addr. cfg */
#define EE_READONLY 0x08 /* forbid writing */
+#define EE_SIZE1K 0x10 /* 1 kb of data, that is a 93xx46 */
+#define EE_SIZE2K 0x20 /* 2 kb of data, that is a 93xx56 */
+#define EE_SIZE4K 0x40 /* 4 kb of data, that is a 93xx66 */
unsigned int quirks;
/* Single word read transfers only; no sequential read. */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 29dbb603bc91..232daaec56e4 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -758,6 +758,16 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
enum ethtool_link_mode_bit_indices link_mode);
/**
+ * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller
+ * is responsible to free memory of vclock_index
+ * @dev: pointer to net_device structure
+ * @vclock_index: pointer to pointer of vclock index
+ *
+ * Return number of phc vclocks
+ */
+int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
+
+/**
* ethtool_sprintf - Write formatted string to ethtool string data
* @data: Pointer to start of string to update
* @fmt: Format of string to write
diff --git a/include/linux/export.h b/include/linux/export.h
index 6271a5d9c988..27d848712b90 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -140,7 +140,12 @@ struct kernel_symbol {
#define ___cond_export_sym(sym, sec, ns, enabled) \
__cond_export_sym_##enabled(sym, sec, ns)
#define __cond_export_sym_1(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
+
+#ifdef __GENKSYMS__
+#define __cond_export_sym_0(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
+#else
#define __cond_export_sym_0(sym, sec, ns) /* nothing */
+#endif
#else
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 5487a80617a3..d445150c5350 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -34,6 +34,7 @@
#define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num)
#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num)
#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num)
+#define F2FS_COMPRESS_INO(sbi) (NM_I(sbi)->max_nid)
#define F2FS_MAX_QUOTAS 3
@@ -229,6 +230,7 @@ struct f2fs_extent {
#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
#define F2FS_PIN_FILE 0x40 /* file should not be gced */
+#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
struct f2fs_inode {
__le16 i_mode; /* file mode */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 472f97074da0..83b896044e79 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -73,6 +73,11 @@ struct ctl_table_header;
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
+/* unused opcode to mark speculation barrier for mitigating
+ * Speculative Store Bypass
+ */
+#define BPF_NOSPEC 0xc0
+
/* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match
* addresses.
@@ -390,6 +395,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = 0, \
.imm = 0 })
+/* Speculation barrier */
+
+#define BPF_ST_NOSPEC() \
+ ((struct bpf_insn) { \
+ .code = BPF_ST | BPF_NOSPEC, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = 0 })
+
/* Internal classic blocks for direct assignment */
#define __BPF_STMT(CODE, K) \
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 84e346ae766e..25109192cebe 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -6,8 +6,8 @@
#include <linux/compiler.h>
#include <linux/gfp.h>
-#define FW_ACTION_NOHOTPLUG 0
-#define FW_ACTION_HOTPLUG 1
+#define FW_ACTION_NOUEVENT 0
+#define FW_ACTION_UEVENT 1
struct firmware {
size_t size;
diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h
index 0b08ac20ab16..a6b4c07858cc 100644
--- a/include/linux/fpga/altera-pr-ip-core.h
+++ b/include/linux/fpga/altera-pr-ip-core.h
@@ -13,6 +13,5 @@
#include <linux/io.h>
int alt_pr_register(struct device *dev, void __iomem *reg_base);
-void alt_pr_unregister(struct device *dev);
#endif /* _ALT_PR_IP_CORE_H */
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index 817600a32c93..6c3c28806ff1 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -11,7 +11,7 @@ struct fpga_bridge;
/**
* struct fpga_bridge_ops - ops for low level FPGA bridge drivers
* @enable_show: returns the FPGA bridge's status
- * @enable_set: set a FPGA bridge as enabled or disabled
+ * @enable_set: set an FPGA bridge as enabled or disabled
* @fpga_bridge_remove: set FPGA into a specific state during driver remove
* @groups: optional attribute groups.
*/
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 2bc3030a69e5..ec2cd8bfceb0 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -75,7 +75,7 @@ enum fpga_mgr_states {
#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4)
/**
- * struct fpga_image_info - information specific to a FPGA image
+ * struct fpga_image_info - information specific to an FPGA image
* @flags: boolean flags as defined above
* @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
* @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 02bf57e6f6e2..640574294216 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2768,8 +2768,14 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
umode_t mode);
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
-extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+extern struct file *file_open_root(const struct path *,
const char *, int, umode_t);
+static inline struct file *file_open_root_mnt(struct vfsmount *mnt,
+ const char *name, int flags, umode_t mode)
+{
+ return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root},
+ name, flags, mode);
+}
extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern struct file * open_with_fake_path(const struct path *, int,
struct inode*, const struct cred *);
@@ -3241,11 +3247,8 @@ ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
struct iov_iter *iter);
/* fs/block_dev.c */
-extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
-extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
int datasync);
-extern void block_sync_page(struct page *page);
/* fs/splice.c */
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 37e1e8f7f08d..6b54982fc5f3 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -139,6 +139,9 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
extern int generic_parse_monolithic(struct fs_context *fc, void *data);
extern int vfs_get_tree(struct fs_context *fc);
extern void put_fs_context(struct fs_context *fc);
+extern int vfs_parse_fs_param_source(struct fs_context *fc,
+ struct fs_parameter *param);
+extern void fc_drop_locked(struct fs_context *fc);
/*
* sget() wrappers to be called from the ->get_tree() op.
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 0abd9a1d2852..f6faa31289ba 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -7,12 +7,21 @@ extern bool trace_hwlat_callback_enabled;
extern void trace_hwlat_callback(bool enter);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+extern bool trace_osnoise_callback_enabled;
+extern void trace_osnoise_callback(bool enter);
+#endif
+
static inline void ftrace_nmi_enter(void)
{
#ifdef CONFIG_HWLAT_TRACER
if (trace_hwlat_callback_enabled)
trace_hwlat_callback(true);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+ if (trace_osnoise_callback_enabled)
+ trace_osnoise_callback(true);
+#endif
}
static inline void ftrace_nmi_exit(void)
@@ -21,6 +30,10 @@ static inline void ftrace_nmi_exit(void)
if (trace_hwlat_callback_enabled)
trace_hwlat_callback(false);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+ if (trace_osnoise_callback_enabled)
+ trace_osnoise_callback(false);
+#endif
}
#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h
index 334dd928042b..a9f7b7faf57b 100644
--- a/include/linux/gpio/regmap.h
+++ b/include/linux/gpio/regmap.h
@@ -37,6 +37,9 @@ struct regmap;
* offset to a register/bitmask pair. If not
* given the default gpio_regmap_simple_xlate()
* is used.
+ * @drvdata: (Optional) Pointer to driver specific data which is
+ * not used by gpio-remap but is provided "as is" to the
+ * driver callback(s).
*
* The ->reg_mask_xlate translates a given base address and GPIO offset to
* register and mask pair. The base address is one of the given register
@@ -78,13 +81,14 @@ struct gpio_regmap_config {
int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
unsigned int offset, unsigned int *reg,
unsigned int *mask);
+
+ void *drvdata;
};
struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config);
void gpio_regmap_unregister(struct gpio_regmap *gpio);
struct gpio_regmap *devm_gpio_regmap_register(struct device *dev,
const struct gpio_regmap_config *config);
-void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data);
void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio);
#endif /* _LINUX_GPIO_REGMAP_H */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 8c6e8e996c87..d9a606a9fc64 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -318,14 +318,16 @@ static inline void memcpy_to_page(struct page *page, size_t offset,
VM_BUG_ON(offset + len > PAGE_SIZE);
memcpy(to + offset, from, len);
+ flush_dcache_page(page);
kunmap_local(to);
}
static inline void memzero_page(struct page *page, size_t offset, size_t len)
{
- char *addr = kmap_atomic(page);
+ char *addr = kmap_local_page(page);
memset(addr + offset, 0, len);
- kunmap_atomic(addr);
+ flush_dcache_page(page);
+ kunmap_local(addr);
}
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 866a0fa104c4..2fd2e91d5107 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -113,7 +113,7 @@ int hmm_range_fault(struct hmm_range *range);
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
*
* When waiting for mmu notifiers we need some kind of time out otherwise we
- * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
+ * could potentially wait for ever, 1000ms ie 1s sounds like a long time to
* wait already.
*/
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 2a8ebe6c222e..f123e15d966e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -10,8 +10,8 @@
vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
- struct vm_area_struct *vma);
-void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
+ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
+void huge_pmd_set_accessed(struct vm_fault *vmf);
int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
struct vm_area_struct *vma);
@@ -24,7 +24,7 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
}
#endif
-vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
+vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd,
unsigned int flags);
@@ -115,9 +115,34 @@ extern struct kobj_attribute shmem_enabled_attr;
extern unsigned long transparent_hugepage_flags;
+static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
+ unsigned long haddr)
+{
+ /* Don't have to check pgoff for anonymous vma */
+ if (!vma_is_anonymous(vma)) {
+ if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
+ HPAGE_PMD_NR))
+ return false;
+ }
+
+ if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
+ return false;
+ return true;
+}
+
+static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
+ unsigned long vm_flags)
+{
+ /* Explicitly disabled through madvise. */
+ if ((vm_flags & VM_NOHUGEPAGE) ||
+ test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+ return false;
+ return true;
+}
+
/*
* to be used on vmas which are known to support THP.
- * Use transparent_hugepage_enabled otherwise
+ * Use transparent_hugepage_active otherwise
*/
static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
{
@@ -128,15 +153,12 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
return false;
- if (vma->vm_flags & VM_NOHUGEPAGE)
+ if (!transhuge_vma_enabled(vma, vma->vm_flags))
return false;
if (vma_is_temporary_stack(vma))
return false;
- if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- return false;
-
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
return true;
@@ -150,24 +172,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
return false;
}
-bool transparent_hugepage_enabled(struct vm_area_struct *vma);
-
-#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
-
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long haddr)
-{
- /* Don't have to check pgoff for anonymous vma */
- if (!vma_is_anonymous(vma)) {
- if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
- (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
- return false;
- }
-
- if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
- return false;
- return true;
-}
+bool transparent_hugepage_active(struct vm_area_struct *vma);
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
@@ -283,7 +288,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, int flags, struct dev_pagemap **pgmap);
-vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
+vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
extern struct page *huge_zero_page;
extern unsigned long huge_zero_pfn;
@@ -354,7 +359,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
return false;
}
-static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool transparent_hugepage_active(struct vm_area_struct *vma)
{
return false;
}
@@ -365,6 +370,12 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
return false;
}
+static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
+ unsigned long vm_flags)
+{
+ return false;
+}
+
static inline void prep_transhuge_page(struct page *page) {}
static inline bool is_transparent_hugepage(struct page *page)
@@ -430,8 +441,7 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
return NULL;
}
-static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
- pmd_t orig_pmd)
+static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
{
return 0;
}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 8ba79dc64ab8..f7ca1a3870ea 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -29,12 +29,29 @@ typedef struct { unsigned long pd; } hugepd_t;
#include <linux/shm.h>
#include <asm/tlbflush.h>
+/*
+ * For HugeTLB page, there are more metadata to save in the struct page. But
+ * the head struct page cannot meet our needs, so we have to abuse other tail
+ * struct page to store the metadata. In order to avoid conflicts caused by
+ * subsequent use of more tail struct pages, we gather these discrete indexes
+ * of tail struct page here.
+ */
+enum {
+ SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */
+#ifdef CONFIG_CGROUP_HUGETLB
+ SUBPAGE_INDEX_CGROUP, /* reuse page->private */
+ SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
+ __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
+#endif
+ __NR_USED_SUBPAGE,
+};
+
struct hugepage_subpool {
spinlock_t lock;
long count;
long max_hpages; /* Maximum huge pages or -1 if no maximum. */
long used_hpages; /* Used count against maximum, includes */
- /* both alloced and reserved pages. */
+ /* both allocated and reserved pages. */
struct hstate *hstate;
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
long rsv_hpages; /* Pages reserved against global pool to */
@@ -68,7 +85,7 @@ struct resv_map {
* by a resv_map's lock. The set of regions within the resv_map represent
* reservations for huge pages, or huge pages that have already been
* instantiated within the map. The from and to elements are huge page
- * indicies into the associated mapping. from indicates the starting index
+ * indices into the associated mapping. from indicates the starting index
* of the region. to represents the first index past the end of the region.
*
* For example, a file region structure with from == 0 and to == 4 represents
@@ -515,12 +532,14 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* modifications require hugetlb_lock.
* HPG_freed - Set when page is on the free lists.
* Synchronization: hugetlb_lock held for examination and modification.
+ * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
*/
enum hugetlb_page_flags {
HPG_restore_reserve = 0,
HPG_migratable,
HPG_temporary,
HPG_freed,
+ HPG_vmemmap_optimized,
__NR_HPAGEFLAGS,
};
@@ -566,6 +585,7 @@ HPAGEFLAG(RestoreReserve, restore_reserve)
HPAGEFLAG(Migratable, migratable)
HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed)
+HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
#ifdef CONFIG_HUGETLB_PAGE
@@ -588,6 +608,9 @@ struct hstate {
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+ unsigned int nr_free_vmemmap_pages;
+#endif
#ifdef CONFIG_CGROUP_HUGETLB
/* cgroup control files */
struct cftype cgroup_files_dfl[7];
@@ -635,13 +658,13 @@ extern unsigned int default_hstate_idx;
*/
static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
{
- return (struct hugepage_subpool *)(hpage+1)->private;
+ return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
}
static inline void hugetlb_set_page_subpool(struct page *hpage,
struct hugepage_subpool *subpool)
{
- set_page_private(hpage+1, (unsigned long)subpool);
+ set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
}
static inline struct hstate *hstate_file(struct file *f)
@@ -718,8 +741,8 @@ static inline void arch_clear_hugepage_flags(struct page *page) { }
#endif
#ifndef arch_make_huge_pte
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
+ vm_flags_t flags)
{
return entry;
}
@@ -774,7 +797,7 @@ static inline bool hugepage_migration_supported(struct hstate *h)
* It determines whether or not a huge page should be placed on
* movable zone or not. Movability of any huge page should be
* required only if huge page size is supported for migration.
- * There wont be any reason for the huge page to be movable if
+ * There won't be any reason for the huge page to be movable if
* it is not migratable to start with. Also the size of the huge
* page should be large enough to be placed under a movable zone
* and still feasible enough to be migratable. Just the presence
@@ -875,6 +898,11 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
+static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
+{
+ return NULL;
+}
+
static inline int isolate_or_dissolve_huge_page(struct page *page,
struct list_head *list)
{
@@ -1028,6 +1056,12 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr
}
#endif /* CONFIG_HUGETLB_PAGE */
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+extern bool hugetlb_free_vmemmap_enabled;
+#else
+#define hugetlb_free_vmemmap_enabled false
+#endif
+
static inline spinlock_t *huge_pte_lock(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 0bff345c4bc6..0b8d1fdda3a1 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -21,15 +21,16 @@ struct hugetlb_cgroup;
struct resv_map;
struct file_region;
+#ifdef CONFIG_CGROUP_HUGETLB
/*
* Minimum page order trackable by hugetlb cgroup.
* At least 4 pages are necessary for all the tracking information.
- * The second tail page (hpage[2]) is the fault usage cgroup.
- * The third tail page (hpage[3]) is the reservation usage cgroup.
+ * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault
+ * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD])
+ * is the reservation usage cgroup.
*/
-#define HUGETLB_CGROUP_MIN_ORDER 2
+#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__MAX_CGROUP_SUBPAGE_INDEX + 1)
-#ifdef CONFIG_CGROUP_HUGETLB
enum hugetlb_memory_event {
HUGETLB_MAX,
HUGETLB_NR_MEMORY_EVENTS,
@@ -66,9 +67,9 @@ __hugetlb_cgroup_from_page(struct page *page, bool rsvd)
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return NULL;
if (rsvd)
- return (struct hugetlb_cgroup *)page[3].private;
+ return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD);
else
- return (struct hugetlb_cgroup *)page[2].private;
+ return (void *)page_private(page + SUBPAGE_INDEX_CGROUP);
}
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
@@ -90,9 +91,11 @@ static inline int __set_hugetlb_cgroup(struct page *page,
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return -1;
if (rsvd)
- page[3].private = (unsigned long)h_cg;
+ set_page_private(page + SUBPAGE_INDEX_CGROUP_RSVD,
+ (unsigned long)h_cg);
else
- page[2].private = (unsigned long)h_cg;
+ set_page_private(page + SUBPAGE_INDEX_CGROUP,
+ (unsigned long)h_cg);
return 0;
}
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e8f2ac8c9c3d..3eb60a2e9e61 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -15,6 +15,7 @@
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
#include <linux/rtmutex.h>
#include <linux/irqdomain.h> /* for Host Notify IRQ */
#include <linux/of.h> /* for struct device_node */
@@ -147,6 +148,7 @@ s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
/* Now follow the 'nice' access routines. These also document the calling
conventions of i2c_smbus_xfer. */
+u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count);
s32 i2c_smbus_read_byte(const struct i2c_client *client);
s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command);
@@ -343,7 +345,6 @@ struct i2c_client {
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
-struct i2c_client *i2c_verify_client(struct device *dev);
struct i2c_adapter *i2c_verify_adapter(struct device *dev);
const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
const struct i2c_client *client);
@@ -477,6 +478,13 @@ i2c_new_ancillary_device(struct i2c_client *client,
u16 default_addr);
void i2c_unregister_device(struct i2c_client *client);
+
+struct i2c_client *i2c_verify_client(struct device *dev);
+#else
+static inline struct i2c_client *i2c_verify_client(struct device *dev)
+{
+ return NULL;
+}
#endif /* I2C */
/* Mainboard arch_initcall() code should register all its I2C devices.
@@ -729,6 +737,7 @@ struct i2c_adapter {
const struct i2c_adapter_quirks *quirks;
struct irq_domain *host_notify_domain;
+ struct regulator *bus_regulator;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 7199280d89ca..c525fd51652f 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -26,6 +26,7 @@ struct ad_sd_calib_data {
};
struct ad_sigma_delta;
+struct device;
struct iio_dev;
/**
@@ -132,8 +133,7 @@ int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
struct spi_device *spi, const struct ad_sigma_delta_info *info);
-int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev);
-void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
+int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev);
int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
index 7ce8a8adad58..c582e1a14232 100644
--- a/include/linux/iio/common/cros_ec_sensors_core.h
+++ b/include/linux/iio/common/cros_ec_sensors_core.h
@@ -77,7 +77,7 @@ struct cros_ec_sensors_core_state {
u16 scale;
} calib[CROS_EC_SENSOR_MAX_AXIS];
s8 sign[CROS_EC_SENSOR_MAX_AXIS];
- u8 samples[CROS_EC_SAMPLE_SIZE];
+ u8 samples[CROS_EC_SAMPLE_SIZE] __aligned(8);
int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
unsigned long scan_mask, s16 *data);
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 33e939977444..8bdbaf3f3796 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -13,6 +13,7 @@
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/irqreturn.h>
+#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
@@ -20,6 +21,8 @@
#include <linux/platform_data/st_sensors_pdata.h>
+#define LSM9DS0_IMU_DEV_NAME "lsm9ds0"
+
/*
* Buffer size max case: 2bytes per channel, 3 channels in total +
* 8bytes timestamp channel (s64)
@@ -46,8 +49,8 @@
#define ST_SENSORS_MAX_NAME 17
#define ST_SENSORS_MAX_4WAI 8
-#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
- ch2, s, endian, rbits, sbits, addr) \
+#define ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr, ext) \
{ \
.type = device_type, \
.modified = mod, \
@@ -63,8 +66,14 @@
.storagebits = sbits, \
.endianness = endian, \
}, \
+ .ext_info = ext, \
}
+#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr) \
+ ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr, NULL)
+
#define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \
IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \
st_sensors_sysfs_sampling_frequency_avail)
@@ -213,6 +222,7 @@ struct st_sensor_settings {
* struct st_sensor_data - ST sensor device status
* @dev: Pointer to instance of struct device (I2C or SPI).
* @trig: The trigger in use by the core driver.
+ * @mount_matrix: The mounting matrix of the sensor.
* @sensor_settings: Pointer to the specific sensor settings in use.
* @current_fullscale: Maximum range of measure by the sensor.
* @vdd: Pointer to sensor's Vdd power supply
@@ -232,7 +242,7 @@ struct st_sensor_settings {
struct st_sensor_data {
struct device *dev;
struct iio_trigger *trig;
- struct iio_mount_matrix *mount_matrix;
+ struct iio_mount_matrix mount_matrix;
struct st_sensor_settings *sensor_settings;
struct st_sensor_fullscale_avl *current_fullscale;
struct regulator *vdd;
@@ -317,4 +327,24 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
void st_sensors_dev_name_probe(struct device *dev, char *name, int len);
+/* Accelerometer */
+const struct st_sensor_settings *st_accel_get_settings(const char *name);
+int st_accel_common_probe(struct iio_dev *indio_dev);
+void st_accel_common_remove(struct iio_dev *indio_dev);
+
+/* Gyroscope */
+const struct st_sensor_settings *st_gyro_get_settings(const char *name);
+int st_gyro_common_probe(struct iio_dev *indio_dev);
+void st_gyro_common_remove(struct iio_dev *indio_dev);
+
+/* Magnetometer */
+const struct st_sensor_settings *st_magn_get_settings(const char *name);
+int st_magn_common_probe(struct iio_dev *indio_dev);
+void st_magn_common_remove(struct iio_dev *indio_dev);
+
+/* Pressure */
+const struct st_sensor_settings *st_press_get_settings(const char *name);
+int st_press_common_probe(struct iio_dev *indio_dev);
+void st_press_common_remove(struct iio_dev *indio_dev);
+
#endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
index 32addd5e790e..c9504e9da571 100644
--- a/include/linux/iio/iio-opaque.h
+++ b/include/linux/iio/iio-opaque.h
@@ -6,6 +6,10 @@
/**
* struct iio_dev_opaque - industrial I/O device opaque information
* @indio_dev: public industrial I/O device information
+ * @id: used to identify device internally
+ * @driver_module: used to make it harder to undercut users
+ * @info_exist_lock: lock to prevent use during removal
+ * @trig_readonly: mark the current trigger immutable
* @event_interface: event chrdevs associated with interrupt lines
* @attached_buffers: array of buffers statically attached by the driver
* @attached_buffers_cnt: number of buffers in the array of statically attached buffers
@@ -19,6 +23,10 @@
* @groupcounter: index of next attribute group
* @legacy_scan_el_group: attribute group for legacy scan elements attribute group
* @legacy_buffer_group: attribute group for legacy buffer attributes group
+ * @scan_index_timestamp: cache of the index to the timestamp
+ * @clock_id: timestamping clock posix identifier
+ * @chrdev: associated character device
+ * @flags: file ops related flags including busy flag.
* @debugfs_dentry: device specific debugfs dentry
* @cached_reg_addr: cached register address for debugfs reads
* @read_buf: read buffer to be used for the initial reg read
@@ -26,6 +34,10 @@
*/
struct iio_dev_opaque {
struct iio_dev indio_dev;
+ int id;
+ struct module *driver_module;
+ struct mutex info_exist_lock;
+ bool trig_readonly;
struct iio_event_interface *event_interface;
struct iio_buffer **attached_buffers;
unsigned int attached_buffers_cnt;
@@ -38,6 +50,12 @@ struct iio_dev_opaque {
int groupcounter;
struct attribute_group legacy_scan_el_group;
struct attribute_group legacy_buffer_group;
+
+ unsigned int scan_index_timestamp;
+ clockid_t clock_id;
+ struct cdev chrdev;
+ unsigned long flags;
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_dentry;
unsigned cached_reg_addr;
@@ -46,7 +64,7 @@ struct iio_dev_opaque {
#endif
};
-#define to_iio_dev_opaque(indio_dev) \
- container_of(indio_dev, struct iio_dev_opaque, indio_dev)
+#define to_iio_dev_opaque(_indio_dev) \
+ container_of((_indio_dev), struct iio_dev_opaque, indio_dev)
#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index f2d65e2e88b6..324561b7a5e8 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -127,8 +127,7 @@ struct iio_mount_matrix {
ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
const struct iio_chan_spec *chan, char *buf);
-int iio_read_mount_matrix(struct device *dev, const char *propname,
- struct iio_mount_matrix *matrix);
+int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
typedef const struct iio_mount_matrix *
(iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
@@ -488,8 +487,6 @@ struct iio_buffer_setup_ops {
/**
* struct iio_dev - industrial I/O device
- * @id: [INTERN] used to identify device internally
- * @driver_module: [INTERN] used to make it harder to undercut users
* @modes: [DRIVER] operating modes supported by device
* @currentmode: [DRIVER] current operating mode
* @dev: [DRIVER] device structure, should be assigned a parent
@@ -503,9 +500,7 @@ struct iio_buffer_setup_ops {
* channels
* @active_scan_mask: [INTERN] union of all scan masks requested by buffers
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
- * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes)
- * @trig_readonly: [INTERN] mark the current trigger immutable
* @pollfunc: [DRIVER] function run on trigger being received
* @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
@@ -513,19 +508,12 @@ struct iio_buffer_setup_ops {
* @name: [DRIVER] name of the device.
* @label: [DRIVER] unique name to identify which device this is
* @info: [DRIVER] callbacks and constant info from driver
- * @clock_id: [INTERN] timestamping clock posix identifier
- * @info_exist_lock: [INTERN] lock to prevent use during removal
* @setup_ops: [DRIVER] callbacks to call before and after buffer
* enable/disable
- * @chrdev: [INTERN] associated character device
- * @flags: [INTERN] file ops related flags including busy flag.
* @priv: [DRIVER] reference to driver's private information
* **MUST** be accessed **ONLY** via iio_priv() helper
*/
struct iio_dev {
- int id;
- struct module *driver_module;
-
int modes;
int currentmode;
struct device dev;
@@ -538,9 +526,7 @@ struct iio_dev {
unsigned masklength;
const unsigned long *active_scan_mask;
bool scan_timestamp;
- unsigned scan_index_timestamp;
struct iio_trigger *trig;
- bool trig_readonly;
struct iio_poll_func *pollfunc;
struct iio_poll_func *pollfunc_event;
@@ -550,15 +536,13 @@ struct iio_dev {
const char *name;
const char *label;
const struct iio_info *info;
- clockid_t clock_id;
- struct mutex info_exist_lock;
const struct iio_buffer_setup_ops *setup_ops;
- struct cdev chrdev;
- unsigned long flags;
void *priv;
};
+int iio_device_id(struct iio_dev *indio_dev);
+
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
/**
@@ -602,15 +586,7 @@ static inline void iio_device_put(struct iio_dev *indio_dev)
put_device(&indio_dev->dev);
}
-/**
- * iio_device_get_clock() - Retrieve current timestamping clock for the device
- * @indio_dev: IIO device structure containing the device
- */
-static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
-{
- return indio_dev->clock_id;
-}
-
+clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
/**
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index f9b728d490b1..cf49997d5903 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -55,6 +55,7 @@ struct adis_timeout {
* this should be the minimum size supported by the device.
* @burst_max_len: Holds the maximum burst size when the device supports
* more than one burst mode with different sizes
+ * @burst_max_speed_hz: Maximum spi speed that can be used in burst mode
*/
struct adis_data {
unsigned int read_delay;
@@ -83,6 +84,7 @@ struct adis_data {
unsigned int burst_reg_cmd;
unsigned int burst_len;
unsigned int burst_max_len;
+ unsigned int burst_max_speed_hz;
};
/**
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 53aa0343bf69..aaf4f1b4c277 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -41,7 +41,7 @@ struct in_device {
unsigned long mr_qri; /* Query Response Interval */
unsigned char mr_qrv; /* Query Robustness Variable */
unsigned char mr_gq_running;
- unsigned char mr_ifc_count;
+ u32 mr_ifc_count;
struct timer_list mr_gq_timer; /* general query timer */
struct timer_list mr_ifc_timer; /* interface change timer */
diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h
index 77582ae1745a..ee1d44545f30 100644
--- a/include/linux/input/cy8ctmg110_pdata.h
+++ b/include/linux/input/cy8ctmg110_pdata.h
@@ -5,7 +5,6 @@
struct cy8ctmg110_pdata
{
int reset_pin; /* Reset pin is wired to this GPIO (optional) */
- int irq_pin; /* IRQ pin is wired to this GPIO */
};
#endif
diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h
deleted file mode 100644
index 118b9af6e01a..000000000000
--- a/include/linux/input/cyttsp.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header file for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
- * For use with Cypress Txx3xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
- */
-#ifndef _CYTTSP_H_
-#define _CYTTSP_H_
-
-#define CY_SPI_NAME "cyttsp-spi"
-#define CY_I2C_NAME "cyttsp-i2c"
-/* Active Power state scanning/processing refresh interval */
-#define CY_ACT_INTRVL_DFLT 0x00 /* ms */
-/* touch timeout for the Active power */
-#define CY_TCH_TMOUT_DFLT 0xFF /* ms */
-/* Low Power state scanning/processing refresh interval */
-#define CY_LP_INTRVL_DFLT 0x0A /* ms */
-/* Active distance in pixels for a gesture to be reported */
-#define CY_ACT_DIST_DFLT 0xF8 /* pixels */
-
-#endif /* _CYTTSP_H_ */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 03faf20a6817..d0fa0b31994d 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -537,7 +537,7 @@ struct context_entry {
struct dmar_domain {
int nid; /* node id */
- unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
+ unsigned int iommu_refcnt[DMAR_UNITS_SUPPORTED];
/* Refcount of devices per iommu */
@@ -546,7 +546,10 @@ struct dmar_domain {
* domain ids are 16 bit wide according
* to VT-d spec, section 9.3 */
- bool has_iotlb_device;
+ u8 has_iotlb_device: 1;
+ u8 iommu_coherency: 1; /* indicate coherency of iommu access */
+ u8 iommu_snooping: 1; /* indicate snooping control feature */
+
struct list_head devices; /* all devices' list */
struct list_head subdevices; /* all subdevices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
@@ -558,10 +561,6 @@ struct dmar_domain {
int agaw;
int flags; /* flags to find out type of domain */
-
- int iommu_coherency;/* indicate coherency of iommu access */
- int iommu_snooping; /* indicate snooping control feature*/
- int iommu_count; /* reference count of iommu */
int iommu_superpage;/* Level of superpages supported:
0 == 4KiB (no superpages), 1 == 2MiB,
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
@@ -606,6 +605,8 @@ struct intel_iommu {
struct completion prq_complete;
struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif
+ struct iopf_queue *iopf_queue;
+ unsigned char iopfq_name[16];
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
@@ -619,6 +620,7 @@ struct intel_iommu {
u32 flags; /* Software defined flags */
struct dmar_drhd_unit *drhd;
+ void *perf_statistic;
};
/* Per subdevice private data */
@@ -776,6 +778,7 @@ struct intel_svm_dev {
struct device *dev;
struct intel_iommu *iommu;
struct iommu_sva sva;
+ unsigned long prq_seq_number;
u32 pasid;
int users;
u16 did;
@@ -791,7 +794,6 @@ struct intel_svm {
u32 pasid;
int gpasid; /* In case that guest PASID is different from host PASID */
struct list_head devs;
- struct list_head list;
};
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
@@ -827,4 +829,32 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
#define intel_iommu_enabled (0)
#endif
+static inline const char *decode_prq_descriptor(char *str, size_t size,
+ u64 dw0, u64 dw1, u64 dw2, u64 dw3)
+{
+ char *buf = str;
+ int bytes;
+
+ bytes = snprintf(buf, size,
+ "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx",
+ FIELD_GET(GENMASK_ULL(31, 16), dw0),
+ FIELD_GET(GENMASK_ULL(63, 12), dw1),
+ dw1 & BIT_ULL(0) ? 'r' : '-',
+ dw1 & BIT_ULL(1) ? 'w' : '-',
+ dw0 & BIT_ULL(52) ? 'x' : '-',
+ dw0 & BIT_ULL(53) ? 'p' : '-',
+ dw1 & BIT_ULL(2) ? 'l' : '-',
+ FIELD_GET(GENMASK_ULL(51, 32), dw0),
+ FIELD_GET(GENMASK_ULL(11, 3), dw1));
+
+ /* Private Data */
+ if (dw0 & BIT_ULL(9)) {
+ size -= bytes;
+ buf += bytes;
+ snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3);
+ }
+
+ return str;
+}
+
#endif
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
index 25e2b4e80502..aee8ff4739b1 100644
--- a/include/linux/intel-ish-client-if.h
+++ b/include/linux/intel-ish-client-if.h
@@ -81,6 +81,8 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device,
/* Get the device * from ishtp device instance */
struct device *ishtp_device(struct ishtp_cl_device *cl_device);
+/* wait for IPC resume */
+bool ishtp_wait_resume(struct ishtp_device *dev);
/* Trace interface for clients */
ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device);
/* Get device pointer of PCI device for DMA acces */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 8e9a9ae471a6..c8293c817646 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -569,6 +569,7 @@ struct irq_chip {
* IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips
* IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs
* in the suspend path if they are in disabled state
+ * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup
*/
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
@@ -581,6 +582,7 @@ enum {
IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
IRQCHIP_SUPPORTS_NMI = (1 << 8),
IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9),
+ IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
};
#include <linux/irqdesc.h>
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 6cc035321562..fd933c45281a 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -918,11 +918,11 @@ struct journal_s
struct shrinker j_shrinker;
/**
- * @j_jh_shrink_count:
+ * @j_checkpoint_jh_count:
*
* Number of journal buffers on the checkpoint list. [j_list_lock]
*/
- struct percpu_counter j_jh_shrink_count;
+ struct percpu_counter j_checkpoint_jh_count;
/**
* @j_shrink_transaction:
@@ -1556,8 +1556,6 @@ extern int jbd2_journal_set_features
(journal_t *, unsigned long, unsigned long, unsigned long);
extern void jbd2_journal_clear_features
(journal_t *, unsigned long, unsigned long, unsigned long);
-extern int jbd2_journal_register_shrinker(journal_t *journal);
-extern void jbd2_journal_unregister_shrinker(journal_t *journal);
extern int jbd2_journal_load (journal_t *journal);
extern int jbd2_journal_destroy (journal_t *);
extern int jbd2_journal_recover (journal_t *journal);
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 465060acc981..a1d6fc82d7f0 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -7,6 +7,7 @@
#define _LINUX_KALLSYMS_H
#include <linux/errno.h>
+#include <linux/buildid.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/mm.h>
@@ -15,8 +16,10 @@
#include <asm/sections.h>
#define KSYM_NAME_LEN 128
-#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
- 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \
+ (KSYM_NAME_LEN - 1) + \
+ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \
+ (BUILD_ID_SIZE_MAX * 2) + 1)
struct cred;
struct module;
@@ -91,8 +94,10 @@ const char *kallsyms_lookup(unsigned long addr,
/* Look up a kernel symbol and return it in a text buffer. */
extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_build_id(char *buffer, unsigned long address);
extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
extern int sprint_backtrace(char *buffer, unsigned long address);
+extern int sprint_backtrace_build_id(char *buffer, unsigned long address);
int lookup_symbol_name(unsigned long addr, char *symname);
int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
@@ -128,6 +133,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr)
return 0;
}
+static inline int sprint_symbol_build_id(char *buffer, unsigned long address)
+{
+ *buffer = '\0';
+ return 0;
+}
+
static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr)
{
*buffer = '\0';
@@ -140,6 +151,12 @@ static inline int sprint_backtrace(char *buffer, unsigned long addr)
return 0;
}
+static inline int sprint_backtrace_build_id(char *buffer, unsigned long addr)
+{
+ *buffer = '\0';
+ return 0;
+}
+
static inline int lookup_symbol_name(unsigned long addr, char *symname)
{
return -ERANGE;
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5310e217bd74..dd874a1ee862 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -3,6 +3,7 @@
#define _LINUX_KASAN_H
#include <linux/bug.h>
+#include <linux/kernel.h>
#include <linux/static_key.h>
#include <linux/types.h>
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index cc8fa109cfa3..20d1079e92b4 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -51,7 +51,8 @@
/*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
- * otherwise.
+ * otherwise. CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1" in
+ * autoconf.h.
*/
#define IS_MODULE(option) __is_defined(option##_MODULE)
@@ -66,7 +67,8 @@
/*
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
- * 0 otherwise.
+ * 0 otherwise. Note that CONFIG_FOO=y results in "#define CONFIG_FOO 1" in
+ * autoconf.h, while CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1".
*/
#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index da676cdbd727..86c0f1d18998 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -11,14 +11,11 @@ enum kcore_type {
KCORE_RAM,
KCORE_VMEMMAP,
KCORE_USER,
- KCORE_OTHER,
- KCORE_REMAP,
};
struct kcore_list {
struct list_head list;
unsigned long addr;
- unsigned long vaddr;
size_t size;
int type;
};
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f2ad8a53f71f..1b2f0a7e00d6 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -10,10 +10,12 @@
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/bitops.h>
+#include <linux/kstrtox.h>
#include <linux/log2.h>
#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/typecheck.h>
+#include <linux/panic.h>
#include <linux/printk.h>
#include <linux/build_bug.h>
#include <linux/static_call_types.h>
@@ -84,7 +86,6 @@
#define lower_16_bits(n) ((u16)((n) & 0xffff))
struct completion;
-struct pt_regs;
struct user;
#ifdef CONFIG_PREEMPT_VOLUNTARY
@@ -189,159 +190,9 @@ void __might_fault(const char *file, int line);
static inline void might_fault(void) { }
#endif
-extern struct atomic_notifier_head panic_notifier_list;
-extern long (*panic_blink)(int state);
-__printf(1, 2)
-void panic(const char *fmt, ...) __noreturn __cold;
-void nmi_panic(struct pt_regs *regs, const char *msg);
-extern void oops_enter(void);
-extern void oops_exit(void);
-extern bool oops_may_print(void);
void do_exit(long error_code) __noreturn;
void complete_and_exit(struct completion *, long) __noreturn;
-/* Internal, do not use. */
-int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
-int __must_check _kstrtol(const char *s, unsigned int base, long *res);
-
-int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
-int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
-
-/**
- * kstrtoul - convert a string to an unsigned long
- * @s: The start of the string. The string must be null-terminated, and may also
- * include a single newline before its terminating null. The first character
- * may also be a plus sign, but not a minus sign.
- * @base: The number base to use. The maximum supported base is 16. If base is
- * given as 0, then the base of the string is automatically detected with the
- * conventional semantics - If it begins with 0x the number will be parsed as a
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
- * @res: Where to write the result of the conversion on success.
- *
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Preferred over simple_strtoul(). Return code must be checked.
-*/
-static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
-{
- /*
- * We want to shortcut function call, but
- * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
- */
- if (sizeof(unsigned long) == sizeof(unsigned long long) &&
- __alignof__(unsigned long) == __alignof__(unsigned long long))
- return kstrtoull(s, base, (unsigned long long *)res);
- else
- return _kstrtoul(s, base, res);
-}
-
-/**
- * kstrtol - convert a string to a long
- * @s: The start of the string. The string must be null-terminated, and may also
- * include a single newline before its terminating null. The first character
- * may also be a plus sign or a minus sign.
- * @base: The number base to use. The maximum supported base is 16. If base is
- * given as 0, then the base of the string is automatically detected with the
- * conventional semantics - If it begins with 0x the number will be parsed as a
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
- * @res: Where to write the result of the conversion on success.
- *
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Preferred over simple_strtol(). Return code must be checked.
- */
-static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
-{
- /*
- * We want to shortcut function call, but
- * __builtin_types_compatible_p(long, long long) = 0.
- */
- if (sizeof(long) == sizeof(long long) &&
- __alignof__(long) == __alignof__(long long))
- return kstrtoll(s, base, (long long *)res);
- else
- return _kstrtol(s, base, res);
-}
-
-int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
-int __must_check kstrtoint(const char *s, unsigned int base, int *res);
-
-static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
-{
- return kstrtoull(s, base, res);
-}
-
-static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
-{
- return kstrtoll(s, base, res);
-}
-
-static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
-{
- return kstrtouint(s, base, res);
-}
-
-static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
-{
- return kstrtoint(s, base, res);
-}
-
-int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
-int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
-int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
-int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
-int __must_check kstrtobool(const char *s, bool *res);
-
-int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
-int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
-int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
-int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
-int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
-int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
-int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
-int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
-int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
-int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
-int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
-
-static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
-{
- return kstrtoull_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
-{
- return kstrtoll_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
-{
- return kstrtouint_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
-{
- return kstrtoint_from_user(s, count, base, res);
-}
-
-/*
- * Use kstrto<foo> instead.
- *
- * NOTE: simple_strto<foo> does not check for the range overflow and,
- * depending on the input, may give interesting results.
- *
- * Use these functions if and only if you cannot use kstrto<foo>, because
- * the conversion ends on the first non-digit character, which may be far
- * beyond the supported range. It might be useful to parse the strings like
- * 10x50 or 12:21 without altering original string or temporary buffer in use.
- * Keep in mind above caveat.
- */
-
-extern unsigned long simple_strtoul(const char *,char **,unsigned int);
-extern long simple_strtol(const char *,char **,unsigned int);
-extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
-extern long long simple_strtoll(const char *,char **,unsigned int);
-
extern int num_to_str(char *buf, int size,
unsigned long long num, unsigned int width);
@@ -384,52 +235,8 @@ extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
-#ifdef CONFIG_SMP
-extern unsigned int sysctl_oops_all_cpu_backtrace;
-#else
-#define sysctl_oops_all_cpu_backtrace 0
-#endif /* CONFIG_SMP */
-
extern void bust_spinlocks(int yes);
-extern int panic_timeout;
-extern unsigned long panic_print;
-extern int panic_on_oops;
-extern int panic_on_unrecovered_nmi;
-extern int panic_on_io_nmi;
-extern int panic_on_warn;
-extern unsigned long panic_on_taint;
-extern bool panic_on_taint_nousertaint;
-extern int sysctl_panic_on_rcu_stall;
-extern int sysctl_max_rcu_stall_to_panic;
-extern int sysctl_panic_on_stackoverflow;
-
-extern bool crash_kexec_post_notifiers;
-/*
- * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
- * holds a CPU number which is executing panic() currently. A value of
- * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
- */
-extern atomic_t panic_cpu;
-#define PANIC_CPU_INVALID -1
-
-/*
- * Only to be used by arch init code. If the user over-wrote the default
- * CONFIG_PANIC_TIMEOUT, honor it.
- */
-static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
-{
- if (panic_timeout == arch_default_timeout)
- panic_timeout = timeout;
-}
-extern const char *print_tainted(void);
-enum lockdep_ok {
- LOCKDEP_STILL_OK,
- LOCKDEP_NOW_UNRELIABLE
-};
-extern void add_taint(unsigned flag, enum lockdep_ok);
-extern int test_taint(unsigned flag);
-extern unsigned long get_taint(void);
extern int root_mountflags;
extern bool early_boot_irqs_disabled;
@@ -448,36 +255,6 @@ extern enum system_states {
SYSTEM_SUSPEND,
} system_state;
-/* This cannot be an enum because some may be used in assembly source. */
-#define TAINT_PROPRIETARY_MODULE 0
-#define TAINT_FORCED_MODULE 1
-#define TAINT_CPU_OUT_OF_SPEC 2
-#define TAINT_FORCED_RMMOD 3
-#define TAINT_MACHINE_CHECK 4
-#define TAINT_BAD_PAGE 5
-#define TAINT_USER 6
-#define TAINT_DIE 7
-#define TAINT_OVERRIDDEN_ACPI_TABLE 8
-#define TAINT_WARN 9
-#define TAINT_CRAP 10
-#define TAINT_FIRMWARE_WORKAROUND 11
-#define TAINT_OOT_MODULE 12
-#define TAINT_UNSIGNED_MODULE 13
-#define TAINT_SOFTLOCKUP 14
-#define TAINT_LIVEPATCH 15
-#define TAINT_AUX 16
-#define TAINT_RANDSTRUCT 17
-#define TAINT_FLAGS_COUNT 18
-#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
-
-struct taint_flag {
- char c_true; /* character printed when tainted */
- char c_false; /* character printed when not tainted */
- bool module; /* also show as a per-module taint flag */
-};
-
-extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
-
extern const char hex_asc[];
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index a70d1ea03532..3fe6dd8a18c1 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -51,10 +51,11 @@ extern atomic_t kfence_allocation_gate;
static __always_inline bool is_kfence_address(const void *addr)
{
/*
- * The non-NULL check is required in case the __kfence_pool pointer was
- * never initialized; keep it in the slow-path after the range-check.
+ * The __kfence_pool != NULL check is required to deal with the case
+ * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in
+ * the slow-path after the range-check!
*/
- return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr);
+ return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool);
}
/**
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 392a3670944c..258cdde8d356 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -105,9 +105,9 @@ extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs);
*/
/**
- * kgdb_arch_init - Perform any architecture specific initalization.
+ * kgdb_arch_init - Perform any architecture specific initialization.
*
- * This function will handle the initalization of any architecture
+ * This function will handle the initialization of any architecture
* specific callbacks.
*/
extern int kgdb_arch_init(void);
@@ -229,9 +229,9 @@ extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt);
extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt);
/**
- * kgdb_arch_late - Perform any architecture specific initalization.
+ * kgdb_arch_late - Perform any architecture specific initialization.
*
- * This function will handle the late initalization of any
+ * This function will handle the late initialization of any
* architecture specific callbacks. This is an optional function for
* handling things like late initialization of hw breakpoints. The
* default implementation does nothing.
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 523ffc7bc3a8..e4f3bfe08757 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -399,7 +399,9 @@ int enable_kprobe(struct kprobe *kp);
void dump_kprobe(struct kprobe *kp);
void *alloc_insn_page(void);
-void free_insn_page(void *page);
+
+void *alloc_optinsn_page(void);
+void free_optinsn_page(void *page);
int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h
new file mode 100644
index 000000000000..529974e22ea7
--- /dev/null
+++ b/include/linux/kstrtox.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KSTRTOX_H
+#define _LINUX_KSTRTOX_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/* Internal, do not use. */
+int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
+int __must_check _kstrtol(const char *s, unsigned int base, long *res);
+
+int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
+
+/**
+ * kstrtoul - convert a string to an unsigned long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign, but not a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtoul(). Return code must be checked.
+*/
+static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
+ */
+ if (sizeof(unsigned long) == sizeof(unsigned long long) &&
+ __alignof__(unsigned long) == __alignof__(unsigned long long))
+ return kstrtoull(s, base, (unsigned long long *)res);
+ else
+ return _kstrtoul(s, base, res);
+}
+
+/**
+ * kstrtol - convert a string to a long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign or a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtol(). Return code must be checked.
+ */
+static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(long, long long) = 0.
+ */
+ if (sizeof(long) == sizeof(long long) &&
+ __alignof__(long) == __alignof__(long long))
+ return kstrtoll(s, base, (long long *)res);
+ else
+ return _kstrtol(s, base, res);
+}
+
+int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
+int __must_check kstrtoint(const char *s, unsigned int base, int *res);
+
+static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
+{
+ return kstrtoull(s, base, res);
+}
+
+static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
+{
+ return kstrtoll(s, base, res);
+}
+
+static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
+{
+ return kstrtouint(s, base, res);
+}
+
+static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
+{
+ return kstrtoint(s, base, res);
+}
+
+int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
+int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
+int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
+int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
+int __must_check kstrtobool(const char *s, bool *res);
+
+int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
+int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
+int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
+int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
+int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
+int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
+int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
+int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
+int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
+int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
+
+static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
+{
+ return kstrtoull_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
+{
+ return kstrtoll_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
+{
+ return kstrtouint_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
+{
+ return kstrtoint_from_user(s, count, base, res);
+}
+
+/*
+ * Use kstrto<foo> instead.
+ *
+ * NOTE: simple_strto<foo> does not check for the range overflow and,
+ * depending on the input, may give interesting results.
+ *
+ * Use these functions if and only if you cannot use kstrto<foo>, because
+ * the conversion ends on the first non-digit character, which may be far
+ * beyond the supported range. It might be useful to parse the strings like
+ * 10x50 or 12:21 without altering original string or temporary buffer in use.
+ * Keep in mind above caveat.
+ */
+
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern long simple_strtol(const char *,char **,unsigned int);
+extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
+extern long long simple_strtoll(const char *,char **,unsigned int);
+
+static inline int strtobool(const char *s, bool *res)
+{
+ return kstrtobool(s, res);
+}
+
+#endif /* _LINUX_KSTRTOX_H */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 89b69e645ac7..7074aa9af525 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -278,6 +278,7 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
return __nvdimm_create(nvdimm_bus, provider_data, groups, flags,
cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL);
}
+void nvdimm_delete(struct nvdimm *nvdimm);
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 9dcaa3e582c9..1b5fceb565df 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -146,7 +146,7 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
- * @isolate: callback function that is resposible for deciding what to do with
+ * @isolate: callback function that is responsible for deciding what to do with
* the item currently being scanned
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
@@ -172,7 +172,7 @@ unsigned long list_lru_walk_one(struct list_lru *lru,
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
- * @isolate: callback function that is resposible for deciding what to do with
+ * @isolate: callback function that is responsible for deciding what to do with
* the item currently being scanned
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
diff --git a/include/linux/litex.h b/include/linux/litex.h
index 5ea9ccf5cce4..f2edb86d5f44 100644
--- a/include/linux/litex.h
+++ b/include/linux/litex.h
@@ -11,18 +11,6 @@
#include <linux/io.h>
-/* LiteX SoCs support 8- or 32-bit CSR Bus data width (i.e., subreg. size) */
-#if defined(CONFIG_LITEX_SUBREG_SIZE) && \
- (CONFIG_LITEX_SUBREG_SIZE == 1 || CONFIG_LITEX_SUBREG_SIZE == 4)
-#define LITEX_SUBREG_SIZE CONFIG_LITEX_SUBREG_SIZE
-#else
-#error LiteX subregister size (LITEX_SUBREG_SIZE) must be 4 or 1!
-#endif
-#define LITEX_SUBREG_SIZE_BIT (LITEX_SUBREG_SIZE * 8)
-
-/* LiteX subregisters of any width are always aligned on a 4-byte boundary */
-#define LITEX_SUBREG_ALIGN 0x4
-
static inline void _write_litex_subregister(u32 val, void __iomem *addr)
{
writel((u32 __force)cpu_to_le32(val), addr);
@@ -42,115 +30,54 @@ static inline u32 _read_litex_subregister(void __iomem *addr)
* 32-bit wide logical CSR will be laid out as four 32-bit physical
* subregisters, each one containing one byte of meaningful data.
*
- * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
- */
-
-/* number of LiteX subregisters needed to store a register of given reg_size */
-#define _litex_num_subregs(reg_size) \
- (((reg_size) - 1) / LITEX_SUBREG_SIZE + 1)
-
-/*
- * since the number of 4-byte aligned subregisters required to store a single
- * LiteX CSR (MMIO) register varies with LITEX_SUBREG_SIZE, the offset of the
- * next adjacent LiteX CSR register w.r.t. the offset of the current one also
- * depends on how many subregisters the latter is spread across
- */
-#define _next_reg_off(off, size) \
- ((off) + _litex_num_subregs(size) * LITEX_SUBREG_ALIGN)
-
-/*
- * The purpose of `_litex_[set|get]_reg()` is to implement the logic of
- * writing to/reading from the LiteX CSR in a single place that can be then
- * reused by all LiteX drivers via the `litex_[write|read][8|16|32|64]()`
- * accessors for the appropriate data width.
- * NOTE: direct use of `_litex_[set|get]_reg()` by LiteX drivers is strongly
- * discouraged, as they perform no error checking on the requested data width!
- */
-
-/**
- * _litex_set_reg() - Writes a value to the LiteX CSR (Control&Status Register)
- * @reg: Address of the CSR
- * @reg_size: The width of the CSR expressed in the number of bytes
- * @val: Value to be written to the CSR
+ * For Linux support, upstream LiteX enforces a 32-bit wide CSR bus, which
+ * means that only larger-than-32-bit CSRs will be split across multiple
+ * subregisters (e.g., a 64-bit CSR will be spread across two consecutive
+ * 32-bit subregisters).
*
- * This function splits a single (possibly multi-byte) LiteX CSR write into
- * a series of subregister writes with a proper offset.
- * NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)).
- */
-static inline void _litex_set_reg(void __iomem *reg, size_t reg_size, u64 val)
-{
- u8 shift = _litex_num_subregs(reg_size) * LITEX_SUBREG_SIZE_BIT;
-
- while (shift > 0) {
- shift -= LITEX_SUBREG_SIZE_BIT;
- _write_litex_subregister(val >> shift, reg);
- reg += LITEX_SUBREG_ALIGN;
- }
-}
-
-/**
- * _litex_get_reg() - Reads a value of the LiteX CSR (Control&Status Register)
- * @reg: Address of the CSR
- * @reg_size: The width of the CSR expressed in the number of bytes
- *
- * Return: Value read from the CSR
- *
- * This function generates a series of subregister reads with a proper offset
- * and joins their results into a single (possibly multi-byte) LiteX CSR value.
- * NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)).
+ * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
*/
-static inline u64 _litex_get_reg(void __iomem *reg, size_t reg_size)
-{
- u64 r;
- u8 i;
-
- r = _read_litex_subregister(reg);
- for (i = 1; i < _litex_num_subregs(reg_size); i++) {
- r <<= LITEX_SUBREG_SIZE_BIT;
- reg += LITEX_SUBREG_ALIGN;
- r |= _read_litex_subregister(reg);
- }
- return r;
-}
static inline void litex_write8(void __iomem *reg, u8 val)
{
- _litex_set_reg(reg, sizeof(u8), val);
+ _write_litex_subregister(val, reg);
}
static inline void litex_write16(void __iomem *reg, u16 val)
{
- _litex_set_reg(reg, sizeof(u16), val);
+ _write_litex_subregister(val, reg);
}
static inline void litex_write32(void __iomem *reg, u32 val)
{
- _litex_set_reg(reg, sizeof(u32), val);
+ _write_litex_subregister(val, reg);
}
static inline void litex_write64(void __iomem *reg, u64 val)
{
- _litex_set_reg(reg, sizeof(u64), val);
+ _write_litex_subregister(val >> 32, reg);
+ _write_litex_subregister(val, reg + 4);
}
static inline u8 litex_read8(void __iomem *reg)
{
- return _litex_get_reg(reg, sizeof(u8));
+ return _read_litex_subregister(reg);
}
static inline u16 litex_read16(void __iomem *reg)
{
- return _litex_get_reg(reg, sizeof(u16));
+ return _read_litex_subregister(reg);
}
static inline u32 litex_read32(void __iomem *reg)
{
- return _litex_get_reg(reg, sizeof(u32));
+ return _read_litex_subregister(reg);
}
static inline u64 litex_read64(void __iomem *reg)
{
- return _litex_get_reg(reg, sizeof(u64));
+ return ((u64)_read_litex_subregister(reg) << 32) |
+ _read_litex_subregister(reg + 4);
}
#endif /* _LINUX_LITEX_H */
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 7ab9f264313f..a98309c0121c 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -109,11 +109,5 @@ int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *);
int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *);
int nlmsvc_decode_notify(struct svc_rqst *, __be32 *);
int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *);
-/*
-int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
- */
#endif /* LOCKD_XDR_H */
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index e709fe5924f2..5ae766f26e04 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -37,12 +37,7 @@ int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *);
int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *);
int nlm4svc_decode_notify(struct svc_rqst *, __be32 *);
int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *);
-/*
-int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
- */
+
extern const struct rpc_version nlm_version4;
#endif /* LOCKD_XDR4_H */
diff --git a/include/linux/logic_iomem.h b/include/linux/logic_iomem.h
new file mode 100644
index 000000000000..3fa65c964379
--- /dev/null
+++ b/include/linux/logic_iomem.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: johannes@sipsolutions.net
+ */
+#ifndef __LOGIC_IOMEM_H
+#define __LOGIC_IOMEM_H
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+/**
+ * struct logic_iomem_ops - emulated IO memory ops
+ * @read: read an 8, 16, 32 or 64 bit quantity from the given offset,
+ * size is given in bytes (1, 2, 4 or 8)
+ * (64-bit only necessary if CONFIG_64BIT is set)
+ * @write: write an 8, 16 32 or 64 bit quantity to the given offset,
+ * size is given in bytes (1, 2, 4 or 8)
+ * (64-bit only necessary if CONFIG_64BIT is set)
+ * @set: optional, for memset_io()
+ * @copy_from: optional, for memcpy_fromio()
+ * @copy_to: optional, for memcpy_toio()
+ * @unmap: optional, this region is getting unmapped
+ */
+struct logic_iomem_ops {
+ unsigned long (*read)(void *priv, unsigned int offset, int size);
+ void (*write)(void *priv, unsigned int offset, int size,
+ unsigned long val);
+
+ void (*set)(void *priv, unsigned int offset, u8 value, int size);
+ void (*copy_from)(void *priv, void *buffer, unsigned int offset,
+ int size);
+ void (*copy_to)(void *priv, unsigned int offset, const void *buffer,
+ int size);
+
+ void (*unmap)(void *priv);
+};
+
+/**
+ * struct logic_iomem_region_ops - ops for an IO memory handler
+ * @map: map a range in the registered IO memory region, must
+ * fill *ops with the ops and may fill *priv to be passed
+ * to the ops. The offset is given as the offset into the
+ * registered resource region.
+ * The return value is negative for errors, or >= 0 for
+ * success. On success, the return value is added to the
+ * offset for later ops, to allow for partial mappings.
+ */
+struct logic_iomem_region_ops {
+ long (*map)(unsigned long offset, size_t size,
+ const struct logic_iomem_ops **ops,
+ void **priv);
+};
+
+/**
+ * logic_iomem_add_region - register an IO memory region
+ * @resource: the resource description for this region
+ * @ops: the IO memory mapping ops for this resource
+ */
+int logic_iomem_add_region(struct resource *resource,
+ const struct logic_iomem_region_ops *ops);
+
+#endif /* __LOGIC_IOMEM_H */
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 429d67d815ce..07add7882a5d 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -32,7 +32,7 @@ This header file (and its .c file; kernel-doc of functions see there)
Because of this later property, it is called "lru_cache".
As it actually Tracks Objects in an Active SeT, we could also call it
toast (incidentally that is what may happen to the data on the
- backend storage uppon next resync, if we don't get it right).
+ backend storage upon next resync, if we don't get it right).
What for?
@@ -152,7 +152,7 @@ struct lc_element {
* for paranoia, and for "lc_element_to_index" */
unsigned lc_index;
/* if we want to track a larger set of objects,
- * it needs to become arch independend u64 */
+ * it needs to become an architecture independent u64 */
unsigned lc_number;
/* special label when on free list */
#define LC_FREE (~0U)
@@ -263,7 +263,7 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char
*
* Allows (expects) the set to be "dirty". Note that the reference counts and
* order on the active and lru lists may still change. Used to serialize
- * changing transactions. Returns true if we aquired the lock.
+ * changing transactions. Returns true if we acquired the lock.
*/
static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
{
@@ -275,7 +275,7 @@ static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
* @lc: the lru cache to operate on
*
* Note that the reference counts and order on the active and lru lists may
- * still change. Only works on a "clean" set. Returns true if we aquired the
+ * still change. Only works on a "clean" set. Returns true if we acquired the
* lock, which means there are no pending changes, and any further attempt to
* change the set will not succeed until the next lc_unlock().
*/
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index acee44b9db26..0f06c2287b52 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -22,14 +22,10 @@
#define MARVELL_PHY_ID_88E1545 0x01410ea0
#define MARVELL_PHY_ID_88E1548P 0x01410ec0
#define MARVELL_PHY_ID_88E3016 0x01410e60
+#define MARVELL_PHY_ID_88X3310 0x002b09a0
#define MARVELL_PHY_ID_88E2110 0x002b09b0
#define MARVELL_PHY_ID_88X2222 0x01410f10
-/* PHY IDs and mask for Alaska 10G PHYs */
-#define MARVELL_PHY_ID_88X33X0_MASK 0xfffffff8
-#define MARVELL_PHY_ID_88X3310 0x002b09a0
-#define MARVELL_PHY_ID_88X3340 0x002b09a8
-
/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h
deleted file mode 100644
index 593602fc9317..000000000000
--- a/include/linux/max17040_battery.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2009 Samsung Electronics
- * Minkyu Kang <mk7.kang@samsung.com>
- */
-
-#ifndef __MAX17040_BATTERY_H_
-#define __MAX17040_BATTERY_H_
-
-struct max17040_platform_data {
- int (*battery_online)(void);
- int (*charger_online)(void);
- int (*charger_enable)(void);
-};
-
-#endif
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 71dd10a3d928..f6efb16f9d1b 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -120,7 +120,7 @@ extern int __must_check __mcb_register_driver(struct mcb_driver *drv,
__mcb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
extern void mcb_unregister_driver(struct mcb_driver *driver);
#define module_mcb_driver(__mcb_driver) \
- module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver);
+ module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver)
extern void mcb_bus_add_devices(const struct mcb_bus *bus);
extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev);
extern struct mcb_bus *mcb_alloc_bus(struct device *carrier);
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index 1fb34ea394ad..3a38598c2605 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -55,6 +55,7 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype);
* register the device to mdev module.
*
* @owner: The module owner.
+ * @device_driver: Which device driver to probe() on newly created devices
* @dev_attr_groups: Attributes of the parent device.
* @mdev_attr_groups: Attributes of the mediated device.
* @supported_type_groups: Attributes to define supported types. It is mandatory
@@ -103,6 +104,7 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype);
**/
struct mdev_parent_ops {
struct module *owner;
+ struct mdev_driver *device_driver;
const struct attribute_group **dev_attr_groups;
const struct attribute_group **mdev_attr_groups;
struct attribute_group **supported_type_groups;
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 552309342c38..4a53c3ca86bd 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -30,7 +30,9 @@ extern unsigned long long max_possible_pfn;
* @MEMBLOCK_NONE: no special request
* @MEMBLOCK_HOTPLUG: hotpluggable region
* @MEMBLOCK_MIRROR: mirrored region
- * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
+ * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
+ * reserved in the memory map; refer to memblock_mark_nomap() description
+ * for further details
*/
enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
@@ -207,7 +209,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
*/
#define for_each_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
- MEMBLOCK_NONE, p_start, p_end, NULL)
+ MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
/**
* for_each_mem_range_rev - reverse iterate through memblock areas from
@@ -218,7 +220,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
*/
#define for_each_mem_range_rev(i, p_start, p_end) \
__for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
- MEMBLOCK_NONE, p_start, p_end, NULL)
+ MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
/**
* for_each_reserved_mem_range - iterate over all reserved memblock areas
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index bfe5c486f4ad..24797929d8a1 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -612,12 +612,15 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
- struct mem_cgroup *memcg,
- bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
+ unsigned long *min,
+ unsigned long *low)
{
+ *min = *low = 0;
+
if (mem_cgroup_disabled())
- return 0;
+ return;
/*
* There is no reclaim protection applied to a targeted reclaim.
@@ -653,13 +656,10 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
*
*/
if (root == memcg)
- return 0;
-
- if (in_low_reclaim)
- return READ_ONCE(memcg->memory.emin);
+ return;
- return max(READ_ONCE(memcg->memory.emin),
- READ_ONCE(memcg->memory.elow));
+ *min = READ_ONCE(memcg->memory.emin);
+ *low = READ_ONCE(memcg->memory.elow);
}
void mem_cgroup_calculate_protection(struct mem_cgroup *root,
@@ -1147,11 +1147,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
{
}
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
- struct mem_cgroup *memcg,
- bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
+ unsigned long *min,
+ unsigned long *low)
{
- return 0;
+ *min = *low = 0;
}
static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 28f32fd00fe9..a7fd2c3ccb77 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -18,18 +18,6 @@ struct vmem_altmap;
#ifdef CONFIG_MEMORY_HOTPLUG
struct page *pfn_to_online_page(unsigned long pfn);
-/*
- * Types for free bootmem stored in page->lru.next. These have to be in
- * some random range in unsigned long space for debugging purposes.
- */
-enum {
- MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
- SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
- MIX_SECTION_INFO,
- NODE_INFO,
- MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
-};
-
/* Types for control the zone type of onlined and offlined memory */
enum {
/* Offline the memory. */
@@ -222,17 +210,6 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
#endif /* CONFIG_NUMA */
#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
-extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
-#else
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
-{
-}
-#endif
-extern void put_page_bootmem(struct page *page);
-extern void get_page_bootmem(unsigned long ingo, struct page *page,
- unsigned long type);
-
void get_online_mems(void);
void put_online_mems(void);
@@ -260,10 +237,6 @@ static inline void zone_span_writelock(struct zone *zone) {}
static inline void zone_span_writeunlock(struct zone *zone) {}
static inline void zone_seqlock_init(struct zone *zone) {}
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
-{
-}
-
static inline int try_online_node(int nid)
{
return 0;
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5f1c74df264d..0aaf91b496e2 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -46,11 +46,8 @@ struct mempolicy {
atomic_t refcnt;
unsigned short mode; /* See MPOL_* above */
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
- union {
- short preferred_node; /* preferred */
- nodemask_t nodes; /* interleave/bind */
- /* undefined for default */
- } v;
+ nodemask_t nodes; /* interleave/bind/perfer */
+
union {
nodemask_t cpuset_mems_allowed; /* relative to these nodes */
nodemask_t user_nodemask; /* nodemask passed by user */
@@ -150,7 +147,7 @@ extern int huge_node(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
-extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
+extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask);
extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 45a79da89c5f..c0e9d35889e8 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -26,7 +26,7 @@ struct vmem_altmap {
};
/*
- * Specialize ZONE_DEVICE memory into multiple types each having differents
+ * Specialize ZONE_DEVICE memory into multiple types each has a different
* usage.
*
* MEMORY_DEVICE_PRIVATE:
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index b06171322178..af5d97239c0d 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -2,7 +2,7 @@
/*
* Device driver for regulators in hi655x IC
*
- * Copyright (c) 2016 Hisilicon.
+ * Copyright (c) 2016 HiSilicon Ltd.
*
* Authors:
* Chen Feng <puck.chen@hisilicon.com>
diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h
index 5640e6088fe6..4c895072d91b 100644
--- a/include/linux/mfd/lp87565.h
+++ b/include/linux/mfd/lp87565.h
@@ -222,31 +222,20 @@ enum lp87565_device_type {
#define LP87565_GPIO2_SEL BIT(1)
#define LP87565_GPIO1_SEL BIT(0)
-#define LP87565_GOIO3_OD BIT(6)
-#define LP87565_GOIO2_OD BIT(5)
-#define LP87565_GOIO1_OD BIT(4)
-#define LP87565_GOIO3_DIR BIT(2)
-#define LP87565_GOIO2_DIR BIT(1)
-#define LP87565_GOIO1_DIR BIT(0)
-
-#define LP87565_GOIO3_IN BIT(2)
-#define LP87565_GOIO2_IN BIT(1)
-#define LP87565_GOIO1_IN BIT(0)
-
-#define LP87565_GOIO3_OUT BIT(2)
-#define LP87565_GOIO2_OUT BIT(1)
-#define LP87565_GOIO1_OUT BIT(0)
-
-enum LP87565_regulator_id {
- /* BUCK's */
- LP87565_BUCK_0,
- LP87565_BUCK_1,
- LP87565_BUCK_2,
- LP87565_BUCK_3,
- LP87565_BUCK_10,
- LP87565_BUCK_23,
- LP87565_BUCK_3210,
-};
+#define LP87565_GPIO3_OD BIT(6)
+#define LP87565_GPIO2_OD BIT(5)
+#define LP87565_GPIO1_OD BIT(4)
+#define LP87565_GPIO3_DIR BIT(2)
+#define LP87565_GPIO2_DIR BIT(1)
+#define LP87565_GPIO1_DIR BIT(0)
+
+#define LP87565_GPIO3_IN BIT(2)
+#define LP87565_GPIO2_IN BIT(1)
+#define LP87565_GPIO1_IN BIT(0)
+
+#define LP87565_GPIO3_OUT BIT(2)
+#define LP87565_GPIO2_OUT BIT(1)
+#define LP87565_GPIO1_OUT BIT(0)
/**
* struct LP87565 - state holder for the LP87565 driver
@@ -263,5 +252,6 @@ struct lp87565 {
u8 rev;
u8 dev_type;
struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
};
#endif /* __LINUX_MFD_LP87565_H */
diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h
index 2ad0b312aa28..201139b12140 100644
--- a/include/linux/mfd/mt6358/registers.h
+++ b/include/linux/mfd/mt6358/registers.h
@@ -8,6 +8,8 @@
/* PMIC Registers */
#define MT6358_SWCID 0xa
+#define MT6358_TOPSTATUS 0x28
+#define MT6358_TOP_RST_MISC 0x14c
#define MT6358_MISC_TOP_INT_CON0 0x188
#define MT6358_MISC_TOP_INT_STATUS0 0x194
#define MT6358_TOP_INT_STATUS0 0x19e
diff --git a/include/linux/mfd/mt6360.h b/include/linux/mfd/mt6360.h
deleted file mode 100644
index ea1304035d4d..000000000000
--- a/include/linux/mfd/mt6360.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2020 MediaTek Inc.
- */
-
-#ifndef __MT6360_H__
-#define __MT6360_H__
-
-#include <linux/regmap.h>
-
-enum {
- MT6360_SLAVE_PMU = 0,
- MT6360_SLAVE_PMIC,
- MT6360_SLAVE_LDO,
- MT6360_SLAVE_TCPC,
- MT6360_SLAVE_MAX,
-};
-
-#define MT6360_PMU_SLAVEID (0x34)
-#define MT6360_PMIC_SLAVEID (0x1A)
-#define MT6360_LDO_SLAVEID (0x64)
-#define MT6360_TCPC_SLAVEID (0x4E)
-
-struct mt6360_pmu_data {
- struct i2c_client *i2c[MT6360_SLAVE_MAX];
- struct device *dev;
- struct regmap *regmap;
- struct regmap_irq_chip_data *irq_data;
- unsigned int chip_rev;
-};
-
-/* PMU register defininition */
-#define MT6360_PMU_DEV_INFO (0x00)
-#define MT6360_PMU_CORE_CTRL1 (0x01)
-#define MT6360_PMU_RST1 (0x02)
-#define MT6360_PMU_CRCEN (0x03)
-#define MT6360_PMU_RST_PAS_CODE1 (0x04)
-#define MT6360_PMU_RST_PAS_CODE2 (0x05)
-#define MT6360_PMU_CORE_CTRL2 (0x06)
-#define MT6360_PMU_TM_PAS_CODE1 (0x07)
-#define MT6360_PMU_TM_PAS_CODE2 (0x08)
-#define MT6360_PMU_TM_PAS_CODE3 (0x09)
-#define MT6360_PMU_TM_PAS_CODE4 (0x0A)
-#define MT6360_PMU_IRQ_IND (0x0B)
-#define MT6360_PMU_IRQ_MASK (0x0C)
-#define MT6360_PMU_IRQ_SET (0x0D)
-#define MT6360_PMU_SHDN_CTRL (0x0E)
-#define MT6360_PMU_TM_INF (0x0F)
-#define MT6360_PMU_I2C_CTRL (0x10)
-#define MT6360_PMU_CHG_CTRL1 (0x11)
-#define MT6360_PMU_CHG_CTRL2 (0x12)
-#define MT6360_PMU_CHG_CTRL3 (0x13)
-#define MT6360_PMU_CHG_CTRL4 (0x14)
-#define MT6360_PMU_CHG_CTRL5 (0x15)
-#define MT6360_PMU_CHG_CTRL6 (0x16)
-#define MT6360_PMU_CHG_CTRL7 (0x17)
-#define MT6360_PMU_CHG_CTRL8 (0x18)
-#define MT6360_PMU_CHG_CTRL9 (0x19)
-#define MT6360_PMU_CHG_CTRL10 (0x1A)
-#define MT6360_PMU_CHG_CTRL11 (0x1B)
-#define MT6360_PMU_CHG_CTRL12 (0x1C)
-#define MT6360_PMU_CHG_CTRL13 (0x1D)
-#define MT6360_PMU_CHG_CTRL14 (0x1E)
-#define MT6360_PMU_CHG_CTRL15 (0x1F)
-#define MT6360_PMU_CHG_CTRL16 (0x20)
-#define MT6360_PMU_CHG_AICC_RESULT (0x21)
-#define MT6360_PMU_DEVICE_TYPE (0x22)
-#define MT6360_PMU_QC_CONTROL1 (0x23)
-#define MT6360_PMU_QC_CONTROL2 (0x24)
-#define MT6360_PMU_QC30_CONTROL1 (0x25)
-#define MT6360_PMU_QC30_CONTROL2 (0x26)
-#define MT6360_PMU_USB_STATUS1 (0x27)
-#define MT6360_PMU_QC_STATUS1 (0x28)
-#define MT6360_PMU_QC_STATUS2 (0x29)
-#define MT6360_PMU_CHG_PUMP (0x2A)
-#define MT6360_PMU_CHG_CTRL17 (0x2B)
-#define MT6360_PMU_CHG_CTRL18 (0x2C)
-#define MT6360_PMU_CHRDET_CTRL1 (0x2D)
-#define MT6360_PMU_CHRDET_CTRL2 (0x2E)
-#define MT6360_PMU_DPDN_CTRL (0x2F)
-#define MT6360_PMU_CHG_HIDDEN_CTRL1 (0x30)
-#define MT6360_PMU_CHG_HIDDEN_CTRL2 (0x31)
-#define MT6360_PMU_CHG_HIDDEN_CTRL3 (0x32)
-#define MT6360_PMU_CHG_HIDDEN_CTRL4 (0x33)
-#define MT6360_PMU_CHG_HIDDEN_CTRL5 (0x34)
-#define MT6360_PMU_CHG_HIDDEN_CTRL6 (0x35)
-#define MT6360_PMU_CHG_HIDDEN_CTRL7 (0x36)
-#define MT6360_PMU_CHG_HIDDEN_CTRL8 (0x37)
-#define MT6360_PMU_CHG_HIDDEN_CTRL9 (0x38)
-#define MT6360_PMU_CHG_HIDDEN_CTRL10 (0x39)
-#define MT6360_PMU_CHG_HIDDEN_CTRL11 (0x3A)
-#define MT6360_PMU_CHG_HIDDEN_CTRL12 (0x3B)
-#define MT6360_PMU_CHG_HIDDEN_CTRL13 (0x3C)
-#define MT6360_PMU_CHG_HIDDEN_CTRL14 (0x3D)
-#define MT6360_PMU_CHG_HIDDEN_CTRL15 (0x3E)
-#define MT6360_PMU_CHG_HIDDEN_CTRL16 (0x3F)
-#define MT6360_PMU_CHG_HIDDEN_CTRL17 (0x40)
-#define MT6360_PMU_CHG_HIDDEN_CTRL18 (0x41)
-#define MT6360_PMU_CHG_HIDDEN_CTRL19 (0x42)
-#define MT6360_PMU_CHG_HIDDEN_CTRL20 (0x43)
-#define MT6360_PMU_CHG_HIDDEN_CTRL21 (0x44)
-#define MT6360_PMU_CHG_HIDDEN_CTRL22 (0x45)
-#define MT6360_PMU_CHG_HIDDEN_CTRL23 (0x46)
-#define MT6360_PMU_CHG_HIDDEN_CTRL24 (0x47)
-#define MT6360_PMU_CHG_HIDDEN_CTRL25 (0x48)
-#define MT6360_PMU_BC12_CTRL (0x49)
-#define MT6360_PMU_CHG_STAT (0x4A)
-#define MT6360_PMU_RESV1 (0x4B)
-#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEH (0x4E)
-#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEL (0x4F)
-#define MT6360_PMU_TYPEC_OTP_HYST_TH (0x50)
-#define MT6360_PMU_TYPEC_OTP_CTRL (0x51)
-#define MT6360_PMU_ADC_BAT_DATA_H (0x52)
-#define MT6360_PMU_ADC_BAT_DATA_L (0x53)
-#define MT6360_PMU_IMID_BACKBST_ON (0x54)
-#define MT6360_PMU_IMID_BACKBST_OFF (0x55)
-#define MT6360_PMU_ADC_CONFIG (0x56)
-#define MT6360_PMU_ADC_EN2 (0x57)
-#define MT6360_PMU_ADC_IDLE_T (0x58)
-#define MT6360_PMU_ADC_RPT_1 (0x5A)
-#define MT6360_PMU_ADC_RPT_2 (0x5B)
-#define MT6360_PMU_ADC_RPT_3 (0x5C)
-#define MT6360_PMU_ADC_RPT_ORG1 (0x5D)
-#define MT6360_PMU_ADC_RPT_ORG2 (0x5E)
-#define MT6360_PMU_BAT_OVP_TH_SEL_CODEH (0x5F)
-#define MT6360_PMU_BAT_OVP_TH_SEL_CODEL (0x60)
-#define MT6360_PMU_CHG_CTRL19 (0x61)
-#define MT6360_PMU_VDDASUPPLY (0x62)
-#define MT6360_PMU_BC12_MANUAL (0x63)
-#define MT6360_PMU_CHGDET_FUNC (0x64)
-#define MT6360_PMU_FOD_CTRL (0x65)
-#define MT6360_PMU_CHG_CTRL20 (0x66)
-#define MT6360_PMU_CHG_HIDDEN_CTRL26 (0x67)
-#define MT6360_PMU_CHG_HIDDEN_CTRL27 (0x68)
-#define MT6360_PMU_RESV2 (0x69)
-#define MT6360_PMU_USBID_CTRL1 (0x6D)
-#define MT6360_PMU_USBID_CTRL2 (0x6E)
-#define MT6360_PMU_USBID_CTRL3 (0x6F)
-#define MT6360_PMU_FLED_CFG (0x70)
-#define MT6360_PMU_RESV3 (0x71)
-#define MT6360_PMU_FLED1_CTRL (0x72)
-#define MT6360_PMU_FLED_STRB_CTRL (0x73)
-#define MT6360_PMU_FLED1_STRB_CTRL2 (0x74)
-#define MT6360_PMU_FLED1_TOR_CTRL (0x75)
-#define MT6360_PMU_FLED2_CTRL (0x76)
-#define MT6360_PMU_RESV4 (0x77)
-#define MT6360_PMU_FLED2_STRB_CTRL2 (0x78)
-#define MT6360_PMU_FLED2_TOR_CTRL (0x79)
-#define MT6360_PMU_FLED_VMIDTRK_CTRL1 (0x7A)
-#define MT6360_PMU_FLED_VMID_RTM (0x7B)
-#define MT6360_PMU_FLED_VMIDTRK_CTRL2 (0x7C)
-#define MT6360_PMU_FLED_PWSEL (0x7D)
-#define MT6360_PMU_FLED_EN (0x7E)
-#define MT6360_PMU_FLED_Hidden1 (0x7F)
-#define MT6360_PMU_RGB_EN (0x80)
-#define MT6360_PMU_RGB1_ISNK (0x81)
-#define MT6360_PMU_RGB2_ISNK (0x82)
-#define MT6360_PMU_RGB3_ISNK (0x83)
-#define MT6360_PMU_RGB_ML_ISNK (0x84)
-#define MT6360_PMU_RGB1_DIM (0x85)
-#define MT6360_PMU_RGB2_DIM (0x86)
-#define MT6360_PMU_RGB3_DIM (0x87)
-#define MT6360_PMU_RESV5 (0x88)
-#define MT6360_PMU_RGB12_Freq (0x89)
-#define MT6360_PMU_RGB34_Freq (0x8A)
-#define MT6360_PMU_RGB1_Tr (0x8B)
-#define MT6360_PMU_RGB1_Tf (0x8C)
-#define MT6360_PMU_RGB1_TON_TOFF (0x8D)
-#define MT6360_PMU_RGB2_Tr (0x8E)
-#define MT6360_PMU_RGB2_Tf (0x8F)
-#define MT6360_PMU_RGB2_TON_TOFF (0x90)
-#define MT6360_PMU_RGB3_Tr (0x91)
-#define MT6360_PMU_RGB3_Tf (0x92)
-#define MT6360_PMU_RGB3_TON_TOFF (0x93)
-#define MT6360_PMU_RGB_Hidden_CTRL1 (0x94)
-#define MT6360_PMU_RGB_Hidden_CTRL2 (0x95)
-#define MT6360_PMU_RESV6 (0x97)
-#define MT6360_PMU_SPARE1 (0x9A)
-#define MT6360_PMU_SPARE2 (0xA0)
-#define MT6360_PMU_SPARE3 (0xB0)
-#define MT6360_PMU_SPARE4 (0xC0)
-#define MT6360_PMU_CHG_IRQ1 (0xD0)
-#define MT6360_PMU_CHG_IRQ2 (0xD1)
-#define MT6360_PMU_CHG_IRQ3 (0xD2)
-#define MT6360_PMU_CHG_IRQ4 (0xD3)
-#define MT6360_PMU_CHG_IRQ5 (0xD4)
-#define MT6360_PMU_CHG_IRQ6 (0xD5)
-#define MT6360_PMU_QC_IRQ (0xD6)
-#define MT6360_PMU_FOD_IRQ (0xD7)
-#define MT6360_PMU_BASE_IRQ (0xD8)
-#define MT6360_PMU_FLED_IRQ1 (0xD9)
-#define MT6360_PMU_FLED_IRQ2 (0xDA)
-#define MT6360_PMU_RGB_IRQ (0xDB)
-#define MT6360_PMU_BUCK1_IRQ (0xDC)
-#define MT6360_PMU_BUCK2_IRQ (0xDD)
-#define MT6360_PMU_LDO_IRQ1 (0xDE)
-#define MT6360_PMU_LDO_IRQ2 (0xDF)
-#define MT6360_PMU_CHG_STAT1 (0xE0)
-#define MT6360_PMU_CHG_STAT2 (0xE1)
-#define MT6360_PMU_CHG_STAT3 (0xE2)
-#define MT6360_PMU_CHG_STAT4 (0xE3)
-#define MT6360_PMU_CHG_STAT5 (0xE4)
-#define MT6360_PMU_CHG_STAT6 (0xE5)
-#define MT6360_PMU_QC_STAT (0xE6)
-#define MT6360_PMU_FOD_STAT (0xE7)
-#define MT6360_PMU_BASE_STAT (0xE8)
-#define MT6360_PMU_FLED_STAT1 (0xE9)
-#define MT6360_PMU_FLED_STAT2 (0xEA)
-#define MT6360_PMU_RGB_STAT (0xEB)
-#define MT6360_PMU_BUCK1_STAT (0xEC)
-#define MT6360_PMU_BUCK2_STAT (0xED)
-#define MT6360_PMU_LDO_STAT1 (0xEE)
-#define MT6360_PMU_LDO_STAT2 (0xEF)
-#define MT6360_PMU_CHG_MASK1 (0xF0)
-#define MT6360_PMU_CHG_MASK2 (0xF1)
-#define MT6360_PMU_CHG_MASK3 (0xF2)
-#define MT6360_PMU_CHG_MASK4 (0xF3)
-#define MT6360_PMU_CHG_MASK5 (0xF4)
-#define MT6360_PMU_CHG_MASK6 (0xF5)
-#define MT6360_PMU_QC_MASK (0xF6)
-#define MT6360_PMU_FOD_MASK (0xF7)
-#define MT6360_PMU_BASE_MASK (0xF8)
-#define MT6360_PMU_FLED_MASK1 (0xF9)
-#define MT6360_PMU_FLED_MASK2 (0xFA)
-#define MT6360_PMU_FAULTB_MASK (0xFB)
-#define MT6360_PMU_BUCK1_MASK (0xFC)
-#define MT6360_PMU_BUCK2_MASK (0xFD)
-#define MT6360_PMU_LDO_MASK1 (0xFE)
-#define MT6360_PMU_LDO_MASK2 (0xFF)
-#define MT6360_PMU_MAXREG (MT6360_PMU_LDO_MASK2)
-
-/* MT6360_PMU_IRQ_SET */
-#define MT6360_PMU_IRQ_REGNUM (MT6360_PMU_LDO_IRQ2 - MT6360_PMU_CHG_IRQ1 + 1)
-#define MT6360_IRQ_RETRIG BIT(2)
-
-#define CHIP_VEN_MASK (0xF0)
-#define CHIP_VEN_MT6360 (0x50)
-#define CHIP_REV_MASK (0x0F)
-
-#endif /* __MT6360_H__ */
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index e07f6e61cd38..a96e6d43ca06 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -437,6 +437,87 @@ enum rk809_reg_id {
#define RK817_RTC_COMP_LSB_REG 0x10
#define RK817_RTC_COMP_MSB_REG 0x11
+/* RK817 Codec Registers */
+#define RK817_CODEC_DTOP_VUCTL 0x12
+#define RK817_CODEC_DTOP_VUCTIME 0x13
+#define RK817_CODEC_DTOP_LPT_SRST 0x14
+#define RK817_CODEC_DTOP_DIGEN_CLKE 0x15
+#define RK817_CODEC_AREF_RTCFG0 0x16
+#define RK817_CODEC_AREF_RTCFG1 0x17
+#define RK817_CODEC_AADC_CFG0 0x18
+#define RK817_CODEC_AADC_CFG1 0x19
+#define RK817_CODEC_DADC_VOLL 0x1a
+#define RK817_CODEC_DADC_VOLR 0x1b
+#define RK817_CODEC_DADC_SR_ACL0 0x1e
+#define RK817_CODEC_DADC_ALC1 0x1f
+#define RK817_CODEC_DADC_ALC2 0x20
+#define RK817_CODEC_DADC_NG 0x21
+#define RK817_CODEC_DADC_HPF 0x22
+#define RK817_CODEC_DADC_RVOLL 0x23
+#define RK817_CODEC_DADC_RVOLR 0x24
+#define RK817_CODEC_AMIC_CFG0 0x27
+#define RK817_CODEC_AMIC_CFG1 0x28
+#define RK817_CODEC_DMIC_PGA_GAIN 0x29
+#define RK817_CODEC_DMIC_LMT1 0x2a
+#define RK817_CODEC_DMIC_LMT2 0x2b
+#define RK817_CODEC_DMIC_NG1 0x2c
+#define RK817_CODEC_DMIC_NG2 0x2d
+#define RK817_CODEC_ADAC_CFG0 0x2e
+#define RK817_CODEC_ADAC_CFG1 0x2f
+#define RK817_CODEC_DDAC_POPD_DACST 0x30
+#define RK817_CODEC_DDAC_VOLL 0x31
+#define RK817_CODEC_DDAC_VOLR 0x32
+#define RK817_CODEC_DDAC_SR_LMT0 0x35
+#define RK817_CODEC_DDAC_LMT1 0x36
+#define RK817_CODEC_DDAC_LMT2 0x37
+#define RK817_CODEC_DDAC_MUTE_MIXCTL 0x38
+#define RK817_CODEC_DDAC_RVOLL 0x39
+#define RK817_CODEC_DDAC_RVOLR 0x3a
+#define RK817_CODEC_AHP_ANTI0 0x3b
+#define RK817_CODEC_AHP_ANTI1 0x3c
+#define RK817_CODEC_AHP_CFG0 0x3d
+#define RK817_CODEC_AHP_CFG1 0x3e
+#define RK817_CODEC_AHP_CP 0x3f
+#define RK817_CODEC_ACLASSD_CFG1 0x40
+#define RK817_CODEC_ACLASSD_CFG2 0x41
+#define RK817_CODEC_APLL_CFG0 0x42
+#define RK817_CODEC_APLL_CFG1 0x43
+#define RK817_CODEC_APLL_CFG2 0x44
+#define RK817_CODEC_APLL_CFG3 0x45
+#define RK817_CODEC_APLL_CFG4 0x46
+#define RK817_CODEC_APLL_CFG5 0x47
+#define RK817_CODEC_DI2S_CKM 0x48
+#define RK817_CODEC_DI2S_RSD 0x49
+#define RK817_CODEC_DI2S_RXCR1 0x4a
+#define RK817_CODEC_DI2S_RXCR2 0x4b
+#define RK817_CODEC_DI2S_RXCMD_TSD 0x4c
+#define RK817_CODEC_DI2S_TXCR1 0x4d
+#define RK817_CODEC_DI2S_TXCR2 0x4e
+#define RK817_CODEC_DI2S_TXCR3_TXCMD 0x4f
+
+/* RK817_CODEC_DI2S_CKM */
+#define RK817_I2S_MODE_MASK (0x1 << 0)
+#define RK817_I2S_MODE_MST (0x1 << 0)
+#define RK817_I2S_MODE_SLV (0x0 << 0)
+
+/* RK817_CODEC_DDAC_MUTE_MIXCTL */
+#define DACMT_MASK (0x1 << 0)
+#define DACMT_ENABLE (0x1 << 0)
+#define DACMT_DISABLE (0x0 << 0)
+
+/* RK817_CODEC_DI2S_RXCR2 */
+#define VDW_RX_24BITS (0x17)
+#define VDW_RX_16BITS (0x0f)
+
+/* RK817_CODEC_DI2S_TXCR2 */
+#define VDW_TX_24BITS (0x17)
+#define VDW_TX_16BITS (0x0f)
+
+/* RK817_CODEC_AMIC_CFG0 */
+#define MIC_DIFF_MASK (0x1 << 7)
+#define MIC_DIFF_DIS (0x0 << 7)
+#define MIC_DIFF_EN (0x1 << 7)
+
#define RK817_POWER_EN_REG(i) (0xb1 + (i))
#define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i))
diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h
index 2d1895c3efbf..40a0c2dfb80f 100644
--- a/include/linux/mfd/rt5033-private.h
+++ b/include/linux/mfd/rt5033-private.h
@@ -200,13 +200,13 @@ enum rt5033_reg {
#define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U
#define RT5033_REGULATOR_BUCK_VOLTAGE_MAX 3000000U
#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP 100000U
-#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 32
+#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 21
/* RT5033 regulator LDO output voltage uV */
#define RT5033_REGULATOR_LDO_VOLTAGE_MIN 1200000U
#define RT5033_REGULATOR_LDO_VOLTAGE_MAX 3000000U
#define RT5033_REGULATOR_LDO_VOLTAGE_STEP 100000U
-#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 32
+#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 19
/* RT5033 regulator SAFE LDO output voltage uV */
#define RT5033_REGULATOR_SAFE_LDO_VOLTAGE 4900000U
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index f1631a39acfc..f92fe090473d 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -67,11 +67,8 @@ struct sec_pmic_dev {
struct i2c_client *i2c;
unsigned long device_type;
- int irq_base;
int irq;
struct regmap_irq_chip_data *irq_data;
-
- bool wakeup;
};
int sec_irq_init(struct sec_pmic_dev *sec_pmic);
@@ -81,15 +78,8 @@ int sec_irq_resume(struct sec_pmic_dev *sec_pmic);
struct sec_platform_data {
struct sec_regulator_data *regulators;
struct sec_opmode_data *opmode;
- int device_type;
int num_regulators;
- int irq_base;
- int (*cfg_pmic_irq)(void);
-
- bool wakeup;
- bool buck_voltage_lock;
-
int buck_gpios[3];
int buck_ds[3];
unsigned int buck2_voltage[8];
@@ -99,35 +89,12 @@ struct sec_platform_data {
unsigned int buck4_voltage[8];
bool buck4_gpiodvs;
- int buck_set1;
- int buck_set2;
- int buck_set3;
- int buck2_enable;
- int buck3_enable;
- int buck4_enable;
int buck_default_idx;
- int buck2_default_idx;
- int buck3_default_idx;
- int buck4_default_idx;
-
int buck_ramp_delay;
- int buck2_ramp_delay;
- int buck34_ramp_delay;
- int buck5_ramp_delay;
- int buck16_ramp_delay;
- int buck7810_ramp_delay;
- int buck9_ramp_delay;
- int buck24_ramp_delay;
- int buck3_ramp_delay;
- int buck7_ramp_delay;
- int buck8910_ramp_delay;
-
- bool buck1_ramp_enable;
bool buck2_ramp_enable;
bool buck3_ramp_enable;
bool buck4_ramp_enable;
- bool buck6_ramp_enable;
int buck2_init;
int buck3_init;
diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h
index bb8d2e276668..76a943c83c63 100644
--- a/include/linux/mfd/wcd934x/registers.h
+++ b/include/linux/mfd/wcd934x/registers.h
@@ -18,6 +18,8 @@
#define WCD934X_EFUSE_SENSE_STATE_DEF 0x10
#define WCD934X_EFUSE_SENSE_EN_MASK BIT(0)
#define WCD934X_EFUSE_SENSE_ENABLE BIT(0)
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1 0x002a
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2 0x002b
#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14 0x0037
#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15 0x0038
#define WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS 0x0039
@@ -103,21 +105,58 @@
#define WCD934X_ANA_AMIC3 0x0610
#define WCD934X_ANA_AMIC4 0x0611
#define WCD934X_ANA_MBHC_MECH 0x0614
+#define WCD934X_MBHC_L_DET_EN_MASK BIT(7)
+#define WCD934X_MBHC_L_DET_EN BIT(7)
+#define WCD934X_MBHC_GND_DET_EN_MASK BIT(6)
+#define WCD934X_MBHC_MECH_DETECT_TYPE_MASK BIT(5)
+#define WCD934X_MBHC_MECH_DETECT_TYPE_INS 1
+#define WCD934X_MBHC_HPHL_PLUG_TYPE_MASK BIT(4)
+#define WCD934X_MBHC_HPHL_PLUG_TYPE_NO 1
+#define WCD934X_MBHC_GND_PLUG_TYPE_MASK BIT(3)
+#define WCD934X_MBHC_GND_PLUG_TYPE_NO 1
+#define WCD934X_MBHC_HSL_PULLUP_COMP_EN BIT(2)
+#define WCD934X_MBHC_HSG_PULLUP_COMP_EN BIT(1)
+#define WCD934X_MBHC_HPHL_100K_TO_GND_EN BIT(0)
#define WCD934X_ANA_MBHC_ELECT 0x0615
+#define WCD934X_ANA_MBHC_BIAS_EN_MASK BIT(0)
+#define WCD934X_ANA_MBHC_BIAS_EN BIT(0)
#define WCD934X_ANA_MBHC_ZDET 0x0616
#define WCD934X_ANA_MBHC_RESULT_1 0x0617
#define WCD934X_ANA_MBHC_RESULT_2 0x0618
#define WCD934X_ANA_MBHC_RESULT_3 0x0619
+#define WCD934X_ANA_MBHC_BTN0 0x061a
+#define WCD934X_VTH_MASK GENMASK(7, 2)
+#define WCD934X_ANA_MBHC_BTN1 0x061b
+#define WCD934X_ANA_MBHC_BTN2 0x061c
+#define WCD934X_ANA_MBHC_BTN3 0x061d
+#define WCD934X_ANA_MBHC_BTN4 0x061e
+#define WCD934X_ANA_MBHC_BTN5 0x061f
+#define WCD934X_ANA_MBHC_BTN6 0x0620
+#define WCD934X_ANA_MBHC_BTN7 0x0621
+#define WCD934X_MBHC_BTN_VTH_MASK GENMASK(7, 2)
#define WCD934X_ANA_MICB1 0x0622
#define WCD934X_MICB_VAL_MASK GENMASK(5, 0)
#define WCD934X_ANA_MICB_EN_MASK GENMASK(7, 6)
+#define WCD934X_MICB_DISABLE 0
+#define WCD934X_MICB_ENABLE 1
+#define WCD934X_MICB_PULL_UP 2
+#define WCD934X_MICB_PULL_DOWN 3
#define WCD934X_ANA_MICB_PULL_UP 0x80
#define WCD934X_ANA_MICB_ENABLE 0x40
#define WCD934X_ANA_MICB_DISABLE 0x0
#define WCD934X_ANA_MICB2 0x0623
+#define WCD934X_ANA_MICB2_ENABLE BIT(6)
+#define WCD934X_ANA_MICB2_ENABLE_MASK GENMASK(7, 6)
+#define WCD934X_ANA_MICB2_VOUT_MASK GENMASK(5, 0)
+#define WCD934X_ANA_MICB2_RAMP 0x0624
+#define WCD934X_RAMP_EN_MASK BIT(7)
+#define WCD934X_RAMP_SHIFT_CTRL_MASK GENMASK(4, 2)
#define WCD934X_ANA_MICB3 0x0625
#define WCD934X_ANA_MICB4 0x0626
#define WCD934X_BIAS_VBG_FINE_ADJ 0x0629
+#define WCD934X_MBHC_CTL_CLK 0x0656
+#define WCD934X_MBHC_CTL_BCS 0x065a
+#define WCD934X_MBHC_STATUS_SPARE_1 0x065b
#define WCD934X_MICB1_TEST_CTL_1 0x066b
#define WCD934X_MICB1_TEST_CTL_2 0x066c
#define WCD934X_MICB2_TEST_CTL_1 0x066e
@@ -141,7 +180,11 @@
#define WCD934X_HPH_CNP_WG_CTL 0x06cc
#define WCD934X_HPH_GM3_BOOST_EN_MASK BIT(7)
#define WCD934X_HPH_GM3_BOOST_ENABLE BIT(7)
+#define WCD934X_HPH_CNP_WG_TIME 0x06cd
#define WCD934X_HPH_OCP_CTL 0x06ce
+#define WCD934X_HPH_PA_CTL2 0x06d2
+#define WCD934X_HPHPA_GND_R_MASK BIT(6)
+#define WCD934X_HPHPA_GND_L_MASK BIT(4)
#define WCD934X_HPH_L_EN 0x06d3
#define WCD934X_HPH_GAIN_SRC_SEL_MASK BIT(5)
#define WCD934X_HPH_GAIN_SRC_SEL_COMPANDER 0
@@ -152,6 +195,8 @@
#define WCD934X_HPH_OCP_DET_MASK BIT(0)
#define WCD934X_HPH_OCP_DET_ENABLE BIT(0)
#define WCD934X_HPH_OCP_DET_DISABLE 0
+#define WCD934X_HPH_R_ATEST 0x06d8
+#define WCD934X_HPHPA_GND_OVR_MASK BIT(1)
#define WCD934X_DIFF_LO_LO2_COMPANDER 0x06ea
#define WCD934X_DIFF_LO_LO1_COMPANDER 0x06eb
#define WCD934X_CLK_SYS_MCLK_PRG 0x0711
@@ -172,7 +217,19 @@
#define WCD934X_SIDO_NEW_VOUT_D_FREQ2 0x071e
#define WCD934X_SIDO_RIPPLE_FREQ_EN_MASK BIT(0)
#define WCD934X_SIDO_RIPPLE_FREQ_ENABLE BIT(0)
+#define WCD934X_MBHC_NEW_CTL_1 0x0720
+#define WCD934X_MBHC_CTL_RCO_EN_MASK BIT(7)
+#define WCD935X_MBHC_CTL_RCO_EN BIT(7)
#define WCD934X_MBHC_NEW_CTL_2 0x0721
+#define WCD934X_M_RTH_CTL_MASK GENMASK(3, 2)
+#define WCD934X_MBHC_NEW_PLUG_DETECT_CTL 0x0722
+#define WCD934X_HSDET_PULLUP_C_MASK GENMASK(7, 6)
+#define WCD934X_MBHC_NEW_ZDET_ANA_CTL 0x0723
+#define WCD934X_ZDET_RANGE_CTL_MASK GENMASK(3, 0)
+#define WCD934X_ZDET_MAXV_CTL_MASK GENMASK(6, 4)
+#define WCD934X_MBHC_NEW_ZDET_RAMP_CTL 0x0724
+#define WCD934X_MBHC_NEW_FSM_STATUS 0x0725
+#define WCD934X_MBHC_NEW_ADC_RESULT 0x0726
#define WCD934X_TX_NEW_AMIC_4_5_SEL 0x0727
#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L 0x0733
#define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL 0x0735
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 944aa3aa3035..5e08468854db 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -719,8 +719,13 @@ void mhi_device_put(struct mhi_device *mhi_dev);
* host and device execution environments match and
* channels are in a DISABLED state.
* @mhi_dev: Device associated with the channels
+ * @flags: MHI channel flags
*/
-int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev,
+ unsigned int flags);
+
+/* Automatically allocate and queue inbound buffers */
+#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
/**
* mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 4bb4e519e3f5..23dadf7aeba8 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -76,7 +76,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
{
return -ENOSYS;
}
-
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_COMPACTION
@@ -95,14 +94,9 @@ static inline void __ClearPageMovable(struct page *page)
#endif
#ifdef CONFIG_NUMA_BALANCING
-extern bool pmd_trans_migrating(pmd_t pmd);
extern int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node);
#else
-static inline bool pmd_trans_migrating(pmd_t pmd)
-{
- return false;
-}
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
{
@@ -110,24 +104,6 @@ static inline int migrate_misplaced_page(struct page *page,
}
#endif /* CONFIG_NUMA_BALANCING */
-#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
-extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node);
-#else
-static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node)
-{
- return -EAGAIN;
-}
-#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
-
-
#ifdef CONFIG_MIGRATION
/*
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 1efe37466969..25a8be58d289 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1044,8 +1044,7 @@ void mlx5_unregister_debugfs(void);
void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index b4546e29e561..b0009aa3647f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1512,7 +1512,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uar_4k[0x1];
u8 reserved_at_241[0x9];
u8 uar_sz[0x6];
- u8 reserved_at_250[0x8];
+ u8 reserved_at_248[0x2];
+ u8 umem_uid_0[0x1];
+ u8 reserved_at_250[0x5];
u8 log_pg_sz[0x8];
u8 bf[0x1];
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
index 98b56b75c625..1a9c9d94cb59 100644
--- a/include/linux/mlx5/mlx5_ifc_vdpa.h
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -11,13 +11,15 @@ enum {
};
enum {
- MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps?
- MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2,
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
};
enum {
- MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
- MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT =
+ BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT),
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED =
+ BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED),
};
struct mlx5_ifc_virtio_q_bits {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b8bc39237dac..7ca22e6e694a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -145,7 +145,7 @@ extern int mmap_rnd_compat_bits __read_mostly;
/* This function must be updated when the size of struct page grows above 80
* or reduces below 56. The idea that compiler optimizes out switch()
* statement, and only leaves move/store instructions. Also the compiler can
- * combine write statments if they are both assignments and can be reordered,
+ * combine write statements if they are both assignments and can be reordered,
* this can result in several of the writes here being dropped.
*/
#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
@@ -238,6 +238,9 @@ int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
+void setup_initial_init_mm(void *start_code, void *end_code,
+ void *end_data, void *brk);
+
/*
* Linux kernel virtual memory manager primitives.
* The idea being to have a "virtual" mm in the same way
@@ -540,7 +543,12 @@ struct vm_fault {
pud_t *pud; /* Pointer to pud entry matching
* the 'address'
*/
- pte_t orig_pte; /* Value of PTE at the time of fault */
+ union {
+ pte_t orig_pte; /* Value of PTE at the time of fault */
+ pmd_t orig_pmd; /* Value of PMD at the time of fault,
+ * used by PMD fault only.
+ */
+ };
struct page *cow_page; /* Page handler may use for COW fault */
struct page *page; /* ->fault handlers should return a
@@ -898,6 +906,7 @@ void __put_page(struct page *page);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
+void copy_huge_page(struct page *dst, struct page *src);
/*
* Compound pages have a destructor function. Provide a
@@ -3067,6 +3076,11 @@ static inline void print_vma_addr(char *prefix, unsigned long rip)
}
#endif
+int vmemmap_remap_free(unsigned long start, unsigned long end,
+ unsigned long reuse);
+int vmemmap_remap_alloc(unsigned long start, unsigned long end,
+ unsigned long reuse, gfp_t gfp_mask);
+
void *sparse_buffer_alloc(unsigned long size);
struct page * __populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d33d97c69da9..52bbd2b7cb46 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -404,7 +404,7 @@ struct mm_struct {
unsigned long mmap_base; /* base of mmap area */
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
- /* Base adresses for compatible mmap() */
+ /* Base addresses for compatible mmap() */
unsigned long mmap_compat_base;
unsigned long mmap_compat_legacy_base;
#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 1a6a9eb6d3fa..45fc2c81e370 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -33,7 +33,7 @@ struct mmu_interval_notifier;
*
* @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
* access flags). User should soft dirty the page in the end callback to make
- * sure that anyone relying on soft dirtyness catch pages that might be written
+ * sure that anyone relying on soft dirtiness catch pages that might be written
* through non CPU mappings.
*
* @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
@@ -41,7 +41,12 @@ struct mmu_interval_notifier;
*
* @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
* a device driver to possibly ignore the invalidation if the
- * migrate_pgmap_owner field matches the driver's device private pgmap owner.
+ * owner field matches the driver's device private pgmap owner.
+ *
+ * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
+ * longer have exclusive access to the page. When sent during creation of an
+ * exclusive range the owner will be initialised to the value provided by the
+ * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
*/
enum mmu_notifier_event {
MMU_NOTIFY_UNMAP = 0,
@@ -51,6 +56,7 @@ enum mmu_notifier_event {
MMU_NOTIFY_SOFT_DIRTY,
MMU_NOTIFY_RELEASE,
MMU_NOTIFY_MIGRATE,
+ MMU_NOTIFY_EXCLUSIVE,
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
@@ -161,7 +167,7 @@ struct mmu_notifier_ops {
* decrease the refcount. If the refcount is decreased on
* invalidate_range_start() then the VM can free pages as page
* table entries are removed. If the refcount is only
- * droppped on invalidate_range_end() then the driver itself
+ * dropped on invalidate_range_end() then the driver itself
* will drop the last refcount but it must take care to flush
* any secondary tlb before doing the final free on the
* page. Pages will no longer be referenced by the linux
@@ -190,7 +196,7 @@ struct mmu_notifier_ops {
* If invalidate_range() is used to manage a non-CPU TLB with
* shared page-tables, it not necessary to implement the
* invalidate_range_start()/end() notifiers, as
- * invalidate_range() alread catches the points in time when an
+ * invalidate_range() already catches the points in time when an
* external TLB range needs to be flushed. For more in depth
* discussion on this see Documentation/vm/mmu_notifier.rst
*
@@ -269,7 +275,7 @@ struct mmu_notifier_range {
unsigned long end;
unsigned flags;
enum mmu_notifier_event event;
- void *migrate_pgmap_owner;
+ void *owner;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
@@ -363,7 +369,7 @@ mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
* mmu_interval_read_retry() will return true.
*
* False is not reliable and only suggests a collision may not have
- * occured. It can be called many times and does not have to hold the user
+ * occurred. It can be called many times and does not have to hold the user
* provided lock.
*
* This call can be used as part of loops and other expensive operations to
@@ -521,14 +527,14 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->flags = flags;
}
-static inline void mmu_notifier_range_init_migrate(
- struct mmu_notifier_range *range, unsigned int flags,
+static inline void mmu_notifier_range_init_owner(
+ struct mmu_notifier_range *range,
+ enum mmu_notifier_event event, unsigned int flags,
struct vm_area_struct *vma, struct mm_struct *mm,
- unsigned long start, unsigned long end, void *pgmap)
+ unsigned long start, unsigned long end, void *owner)
{
- mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm,
- start, end);
- range->migrate_pgmap_owner = pgmap;
+ mmu_notifier_range_init(range, event, flags, vma, mm, start, end);
+ range->owner = owner;
}
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
@@ -655,8 +661,8 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
_mmu_notifier_range_init(range, start, end)
-#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \
- pgmap) \
+#define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \
+ end, owner) \
_mmu_notifier_range_init(range, start, end)
static inline bool
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 265a32e1ff74..fcb535560028 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -114,7 +114,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
struct pglist_data;
/*
- * Add a wild amount of padding here to ensure datas fall into separate
+ * Add a wild amount of padding here to ensure data fall into separate
* cachelines. There are very few zone structures in the machine, so space
* consumption is not a concern here.
*/
@@ -1064,7 +1064,10 @@ extern char numa_zonelist_order[];
#ifndef CONFIG_NUMA
extern struct pglist_data contig_page_data;
-#define NODE_DATA(nid) (&contig_page_data)
+static inline struct pglist_data *NODE_DATA(int nid)
+{
+ return &contig_page_data;
+}
#define NODE_MEM_MAP(nid) mem_map
#else /* CONFIG_NUMA */
@@ -1445,10 +1448,30 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
#endif
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
+/**
+ * pfn_valid - check if there is a valid memory map entry for a PFN
+ * @pfn: the page frame number to check
+ *
+ * Check if there is a valid memory map entry aka struct page for the @pfn.
+ * Note, that availability of the memory map entry does not imply that
+ * there is actual usable memory at that @pfn. The struct page may
+ * represent a hole or an unusable page frame.
+ *
+ * Return: 1 for PFNs that have memory map entries and 0 otherwise
+ */
static inline int pfn_valid(unsigned long pfn)
{
struct mem_section *ms;
+ /*
+ * Ensure the upper PAGE_SHIFT bits are clear in the
+ * pfn. Else it might lead to false positives when
+ * some of the upper bits are set, but the lower bits
+ * match a valid pfn.
+ */
+ if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
+ return 0;
+
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
ms = __nr_to_section(pfn_to_section_nr(pfn));
diff --git a/include/linux/module.h b/include/linux/module.h
index 8100bb477d86..8a298d820dbc 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/stat.h>
+#include <linux/buildid.h>
#include <linux/compiler.h>
#include <linux/cache.h>
#include <linux/kmod.h>
@@ -369,6 +370,11 @@ struct module {
/* Unique handle for this module */
char name[MODULE_NAME_LEN];
+#ifdef CONFIG_STACKTRACE_BUILD_ID
+ /* Module build ID */
+ unsigned char build_id[BUILD_ID_SIZE_MAX];
+#endif
+
/* Sysfs stuff. */
struct module_kobject mkobj;
struct module_attribute *modinfo_attrs;
@@ -636,7 +642,7 @@ void *dereference_module_function_descriptor(struct module *mod, void *ptr);
const char *module_address_lookup(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset,
- char **modname,
+ char **modname, const unsigned char **modbuildid,
char *namebuf);
int lookup_module_symbol_name(unsigned long addr, char *symname);
int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
@@ -740,6 +746,7 @@ static inline const char *module_address_lookup(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset,
char **modname,
+ const unsigned char **modbuildid,
char *namebuf)
{
return NULL;
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 3e5358f4de2f..eb0d1c1db208 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -200,7 +200,7 @@ struct mpi_ec_ctx {
unsigned int nbits; /* Number of bits. */
/* Domain parameters. Note that they may not all be set and if set
- * the MPIs may be flaged as constant.
+ * the MPIs may be flagged as constant.
*/
MPI p; /* Prime specifying the field GF(p). */
MPI a; /* First coefficient of the Weierstrass equation. */
@@ -267,7 +267,7 @@ int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx);
/**
* mpi_get_size() - returns max size required to store the number
*
- * @a: A multi precision integer for which we want to allocate a bufer
+ * @a: A multi precision integer for which we want to allocate a buffer
*
* Return: size required to store the number
*/
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 6aff469e511d..e8bdcb83172b 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -233,7 +233,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
+void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
void pci_msi_mask_irq(struct irq_data *data);
void pci_msi_unmask_irq(struct irq_data *data);
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index a89955f3cbc8..88227044fc86 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -380,6 +380,8 @@ struct mtd_info {
int usecount;
struct mtd_debug_info dbg;
struct nvmem_device *nvmem;
+ struct nvmem_device *otp_user_nvmem;
+ struct nvmem_device *otp_factory_nvmem;
/*
* Parent device from the MTD partition point of view.
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index 339ac798568e..a7376f9beddf 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -11,6 +11,7 @@
#define __LINUX_MTD_ONFI_H
#include <linux/types.h>
+#include <linux/bitfield.h>
/* ONFI version bits */
#define ONFI_VERSION_1_0 BIT(1)
@@ -24,17 +25,22 @@
#define ONFI_VERSION_4_0 BIT(9)
/* ONFI features */
-#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
-#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
+#define ONFI_FEATURE_16_BIT_BUS BIT(0)
+#define ONFI_FEATURE_NV_DDR BIT(5)
+#define ONFI_FEATURE_EXT_PARAM_PAGE BIT(7)
/* ONFI timing mode, used in both asynchronous and synchronous mode */
-#define ONFI_TIMING_MODE_0 (1 << 0)
-#define ONFI_TIMING_MODE_1 (1 << 1)
-#define ONFI_TIMING_MODE_2 (1 << 2)
-#define ONFI_TIMING_MODE_3 (1 << 3)
-#define ONFI_TIMING_MODE_4 (1 << 4)
-#define ONFI_TIMING_MODE_5 (1 << 5)
-#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
+#define ONFI_DATA_INTERFACE_SDR 0
+#define ONFI_DATA_INTERFACE_NVDDR BIT(4)
+#define ONFI_DATA_INTERFACE_NVDDR2 BIT(5)
+#define ONFI_TIMING_MODE_0 BIT(0)
+#define ONFI_TIMING_MODE_1 BIT(1)
+#define ONFI_TIMING_MODE_2 BIT(2)
+#define ONFI_TIMING_MODE_3 BIT(3)
+#define ONFI_TIMING_MODE_4 BIT(4)
+#define ONFI_TIMING_MODE_5 BIT(5)
+#define ONFI_TIMING_MODE_UNKNOWN BIT(6)
+#define ONFI_TIMING_MODE_PARAM(x) FIELD_GET(GENMASK(3, 0), (x))
/* ONFI feature number/address */
#define ONFI_FEATURE_NUMBER 256
@@ -49,7 +55,7 @@
#define ONFI_SUBFEATURE_PARAM_LEN 4
/* ONFI optional commands SET/GET FEATURES supported? */
-#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
+#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2)
struct nand_onfi_params {
/* rev info and features block */
@@ -93,14 +99,15 @@ struct nand_onfi_params {
/* electrical parameter block */
u8 io_pin_capacitance_max;
- __le16 async_timing_mode;
+ __le16 sdr_timing_modes;
__le16 program_cache_timing_mode;
__le16 t_prog;
__le16 t_bers;
__le16 t_r;
__le16 t_ccs;
- __le16 src_sync_timing_mode;
- u8 src_ssync_features;
+ u8 nvddr_timing_modes;
+ u8 nvddr2_timing_modes;
+ u8 nvddr_nvddr2_features;
__le16 clk_pin_capacitance_typ;
__le16 io_pin_capacitance_typ;
__le16 input_pin_capacitance_typ;
@@ -160,7 +167,9 @@ struct onfi_ext_param_page {
* @tBERS: Block erase time
* @tR: Page read time
* @tCCS: Change column setup time
- * @async_timing_mode: Supported asynchronous timing mode
+ * @fast_tCAD: Command/Address/Data slow or fast delay (NV-DDR only)
+ * @sdr_timing_modes: Supported asynchronous/SDR timing modes
+ * @nvddr_timing_modes: Supported source synchronous/NV-DDR timing modes
* @vendor_revision: Vendor specific revision number
* @vendor: Vendor specific data
*/
@@ -170,7 +179,9 @@ struct onfi_params {
u16 tBERS;
u16 tR;
u16 tCCS;
- u16 async_timing_mode;
+ bool fast_tCAD;
+ u16 sdr_timing_modes;
+ u16 nvddr_timing_modes;
u16 vendor_revision;
u8 vendor[88];
};
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 29df2f43dcb5..b2f9dd3cbd69 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -24,6 +24,7 @@
#include <linux/types.h>
struct nand_chip;
+struct gpio_desc;
/* The maximum number of NAND chips in an array */
#define NAND_MAX_CHIPS 8
@@ -385,8 +386,8 @@ struct nand_ecc_ctrl {
* This struct defines the timing requirements of a SDR NAND chip.
* These information can be found in every NAND datasheets and the timings
* meaning are described in the ONFI specifications:
- * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
- * Parameters)
+ * https://media-www.micron.com/-/media/client/onfi/specs/onfi_3_1_spec.pdf
+ * (chapter 4.15 Timing Parameters)
*
* All these timings are expressed in picoseconds.
*
@@ -472,11 +473,127 @@ struct nand_sdr_timings {
};
/**
+ * struct nand_nvddr_timings - NV-DDR NAND chip timings
+ *
+ * This struct defines the timing requirements of a NV-DDR NAND data interface.
+ * These information can be found in every NAND datasheets and the timings
+ * meaning are described in the ONFI specifications:
+ * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf
+ * (chapter 4.18.2 NV-DDR)
+ *
+ * All these timings are expressed in picoseconds.
+ *
+ * @tBERS_max: Block erase time
+ * @tCCS_min: Change column setup time
+ * @tPROG_max: Page program time
+ * @tR_max: Page read time
+ * @tAC_min: Access window of DQ[7:0] from CLK
+ * @tAC_max: Access window of DQ[7:0] from CLK
+ * @tADL_min: ALE to data loading time
+ * @tCAD_min: Command, Address, Data delay
+ * @tCAH_min: Command/Address DQ hold time
+ * @tCALH_min: W/R_n, CLE and ALE hold time
+ * @tCALS_min: W/R_n, CLE and ALE setup time
+ * @tCAS_min: Command/address DQ setup time
+ * @tCEH_min: CE# high hold time
+ * @tCH_min: CE# hold time
+ * @tCK_min: Average clock cycle time
+ * @tCS_min: CE# setup time
+ * @tDH_min: Data hold time
+ * @tDQSCK_min: Start of the access window of DQS from CLK
+ * @tDQSCK_max: End of the access window of DQS from CLK
+ * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device
+ * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device
+ * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device
+ * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access
+ * @tDS_min: Data setup time
+ * @tDSC_min: DQS cycle time
+ * @tFEAT_max: Busy time for Set Features and Get Features
+ * @tITC_max: Interface and Timing Mode Change time
+ * @tQHS_max: Data hold skew factor
+ * @tRHW_min: Data output cycle to command, address, or data input cycle
+ * @tRR_min: Ready to RE# low (data only)
+ * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
+ * rising edge of R/B#.
+ * @tWB_max: WE# high to SR[6] low
+ * @tWHR_min: WE# high to RE# low
+ * @tWRCK_min: W/R_n low to data output cycle
+ * @tWW_min: WP# transition to WE# low
+ */
+struct nand_nvddr_timings {
+ u64 tBERS_max;
+ u32 tCCS_min;
+ u64 tPROG_max;
+ u64 tR_max;
+ u32 tAC_min;
+ u32 tAC_max;
+ u32 tADL_min;
+ u32 tCAD_min;
+ u32 tCAH_min;
+ u32 tCALH_min;
+ u32 tCALS_min;
+ u32 tCAS_min;
+ u32 tCEH_min;
+ u32 tCH_min;
+ u32 tCK_min;
+ u32 tCS_min;
+ u32 tDH_min;
+ u32 tDQSCK_min;
+ u32 tDQSCK_max;
+ u32 tDQSD_min;
+ u32 tDQSD_max;
+ u32 tDQSHZ_max;
+ u32 tDQSQ_max;
+ u32 tDS_min;
+ u32 tDSC_min;
+ u32 tFEAT_max;
+ u32 tITC_max;
+ u32 tQHS_max;
+ u32 tRHW_min;
+ u32 tRR_min;
+ u32 tRST_max;
+ u32 tWB_max;
+ u32 tWHR_min;
+ u32 tWRCK_min;
+ u32 tWW_min;
+};
+
+/*
+ * While timings related to the data interface itself are mostly different
+ * between SDR and NV-DDR, timings related to the internal chip behavior are
+ * common. IOW, the following entries which describe the internal delays have
+ * the same definition and are shared in both SDR and NV-DDR timing structures:
+ * - tADL_min
+ * - tBERS_max
+ * - tCCS_min
+ * - tFEAT_max
+ * - tPROG_max
+ * - tR_max
+ * - tRR_min
+ * - tRST_max
+ * - tWB_max
+ *
+ * The below macros return the value of a given timing, no matter the interface.
+ */
+#define NAND_COMMON_TIMING_PS(conf, timing_name) \
+ nand_interface_is_sdr(conf) ? \
+ nand_get_sdr_timings(conf)->timing_name : \
+ nand_get_nvddr_timings(conf)->timing_name
+
+#define NAND_COMMON_TIMING_MS(conf, timing_name) \
+ PSEC_TO_MSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
+
+#define NAND_COMMON_TIMING_NS(conf, timing_name) \
+ PSEC_TO_NSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
+
+/**
* enum nand_interface_type - NAND interface type
* @NAND_SDR_IFACE: Single Data Rate interface
+ * @NAND_NVDDR_IFACE: Double Data Rate interface
*/
enum nand_interface_type {
NAND_SDR_IFACE,
+ NAND_NVDDR_IFACE,
};
/**
@@ -485,6 +602,7 @@ enum nand_interface_type {
* @timings: The timing information
* @timings.mode: Timing mode as defined in the specification
* @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
+ * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE.
*/
struct nand_interface_config {
enum nand_interface_type type;
@@ -492,24 +610,56 @@ struct nand_interface_config {
unsigned int mode;
union {
struct nand_sdr_timings sdr;
+ struct nand_nvddr_timings nvddr;
};
} timings;
};
/**
+ * nand_interface_is_sdr - get the interface type
+ * @conf: The data interface
+ */
+static bool nand_interface_is_sdr(const struct nand_interface_config *conf)
+{
+ return conf->type == NAND_SDR_IFACE;
+}
+
+/**
+ * nand_interface_is_nvddr - get the interface type
+ * @conf: The data interface
+ */
+static bool nand_interface_is_nvddr(const struct nand_interface_config *conf)
+{
+ return conf->type == NAND_NVDDR_IFACE;
+}
+
+/**
* nand_get_sdr_timings - get SDR timing from data interface
* @conf: The data interface
*/
static inline const struct nand_sdr_timings *
nand_get_sdr_timings(const struct nand_interface_config *conf)
{
- if (conf->type != NAND_SDR_IFACE)
+ if (!nand_interface_is_sdr(conf))
return ERR_PTR(-EINVAL);
return &conf->timings.sdr;
}
/**
+ * nand_get_nvddr_timings - get NV-DDR timing from data interface
+ * @conf: The data interface
+ */
+static inline const struct nand_nvddr_timings *
+nand_get_nvddr_timings(const struct nand_interface_config *conf)
+{
+ if (!nand_interface_is_nvddr(conf))
+ return ERR_PTR(-EINVAL);
+
+ return &conf->timings.nvddr;
+}
+
+/**
* struct nand_op_cmd_instr - Definition of a command instruction
* @opcode: the command to issue in one cycle
*/
@@ -1413,7 +1563,6 @@ void nand_cleanup(struct nand_chip *chip);
* instruction and have no physical pin to check it.
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
-struct gpio_desc;
int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
unsigned long timeout_ms);
@@ -1446,4 +1595,8 @@ static inline void *nand_get_data_buf(struct nand_chip *chip)
return chip->data_buf;
}
+/* Parse the gpio-cs property */
+int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
+ unsigned int *ncs_array);
+
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 98ed91b529ea..f67457748ed8 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -383,6 +383,7 @@ struct spi_nor_flash_parameter;
* @read_proto: the SPI protocol for read operations
* @write_proto: the SPI protocol for write operations
* @reg_proto: the SPI protocol for read_reg/write_reg/erase operations
+ * @sfdp: the SFDP data of the flash
* @controller_ops: SPI NOR controller driver specific operations.
* @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings.
* The structure includes legacy flash parameters and
@@ -412,6 +413,7 @@ struct spi_nor {
bool sst_write_second;
u32 flags;
enum spi_nor_cmd_ext cmd_ext_type;
+ struct sfdp *sfdp;
const struct spi_nor_controller_ops *controller_ops;
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
index 47e5679b48e1..000b126acfb6 100644
--- a/include/linux/mv643xx.h
+++ b/include/linux/mv643xx.h
@@ -918,12 +918,4 @@
extern void mv64340_irq_init(unsigned int base);
-/* Watchdog Platform Device, Driver Data */
-#define MV64x60_WDT_NAME "mv64x60_wdt"
-
-struct mv64x60_wdt_pdata {
- int timeout; /* watchdog expiry in seconds, default 10 */
- int bus_clk; /* bus clock in MHz, default 133 */
-};
-
#endif /* __ASM_MV643XX_H */
diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h
deleted file mode 100644
index 90a803aa42e8..000000000000
--- a/include/linux/n_r3964.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/* r3964 linediscipline for linux
- *
- * -----------------------------------------------------------
- * Copyright by
- * Philips Automation Projects
- * Kassel (Germany)
- * -----------------------------------------------------------
- * This software may be used and distributed according to the terms of
- * the GNU General Public License, incorporated herein by reference.
- *
- * Author:
- * L. Haag
- *
- * $Log: r3964.h,v $
- * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de>
- * Fixed HZ usage on 2.6 kernels
- * Removed unnecessary include
- *
- * Revision 1.3 2001/03/18 13:02:24 dwmw2
- * Fix timer usage, use spinlocks properly.
- *
- * Revision 1.2 2001/03/18 12:53:15 dwmw2
- * Merge changes in 2.4.2
- *
- * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2
- * This'll screw the version control
- *
- * Revision 1.6 1998/09/30 00:40:38 dwmw2
- * Updated to use kernel's N_R3964 if available
- *
- * Revision 1.4 1998/04/02 20:29:44 lhaag
- * select, blocking, ...
- *
- * Revision 1.3 1998/02/12 18:58:43 root
- * fixed some memory leaks
- * calculation of checksum characters
- *
- * Revision 1.2 1998/02/07 13:03:17 root
- * ioctl read_telegram
- *
- * Revision 1.1 1998/02/06 19:19:43 root
- * Initial revision
- *
- *
- */
-#ifndef __LINUX_N_R3964_H__
-#define __LINUX_N_R3964_H__
-
-
-#include <linux/param.h>
-#include <uapi/linux/n_r3964.h>
-
-/*
- * Common ascii handshake characters:
- */
-
-#define STX 0x02
-#define ETX 0x03
-#define DLE 0x10
-#define NAK 0x15
-
-/*
- * Timeouts (from milliseconds to jiffies)
- */
-
-#define R3964_TO_QVZ ((550)*HZ/1000)
-#define R3964_TO_ZVZ ((220)*HZ/1000)
-#define R3964_TO_NO_BUF ((400)*HZ/1000)
-#define R3964_NO_TX_ROOM ((100)*HZ/1000)
-#define R3964_TO_RX_PANIC ((4000)*HZ/1000)
-#define R3964_MAX_RETRIES 5
-
-
-enum { R3964_IDLE,
- R3964_TX_REQUEST, R3964_TRANSMITTING,
- R3964_WAIT_ZVZ_BEFORE_TX_RETRY, R3964_WAIT_FOR_TX_ACK,
- R3964_WAIT_FOR_RX_BUF,
- R3964_RECEIVING, R3964_WAIT_FOR_BCC, R3964_WAIT_FOR_RX_REPEAT
- };
-
-/*
- * All open file-handles are 'clients' and are stored in a linked list:
- */
-
-struct r3964_message;
-
-struct r3964_client_info {
- spinlock_t lock;
- struct pid *pid;
- unsigned int sig_flags;
-
- struct r3964_client_info *next;
-
- struct r3964_message *first_msg;
- struct r3964_message *last_msg;
- struct r3964_block_header *next_block_to_read;
- int msg_count;
-};
-
-
-
-struct r3964_block_header;
-
-/* internal version of client_message: */
-struct r3964_message {
- int msg_id;
- int arg;
- int error_code;
- struct r3964_block_header *block;
- struct r3964_message *next;
-};
-
-/*
- * Header of received block in rx_buf/tx_buf:
- */
-
-struct r3964_block_header
-{
- unsigned int length; /* length in chars without header */
- unsigned char *data; /* usually data is located
- immediately behind this struct */
- unsigned int locks; /* only used in rx_buffer */
-
- struct r3964_block_header *next;
- struct r3964_client_info *owner; /* =NULL in rx_buffer */
-};
-
-/*
- * If rx_buf hasn't enough space to store R3964_MTU chars,
- * we will reject all incoming STX-requests by sending NAK.
- */
-
-#define RX_BUF_SIZE 4000
-#define TX_BUF_SIZE 4000
-#define R3964_MAX_BLOCKS_IN_RX_QUEUE 100
-
-#define R3964_PARITY 0x0001
-#define R3964_FRAME 0x0002
-#define R3964_OVERRUN 0x0004
-#define R3964_UNKNOWN 0x0008
-#define R3964_BREAK 0x0010
-#define R3964_CHECKSUM 0x0020
-#define R3964_ERROR 0x003f
-#define R3964_BCC 0x4000
-#define R3964_DEBUG 0x8000
-
-
-struct r3964_info {
- spinlock_t lock;
- struct tty_struct *tty;
- unsigned char priority;
- unsigned char *rx_buf; /* ring buffer */
- unsigned char *tx_buf;
-
- struct r3964_block_header *rx_first;
- struct r3964_block_header *rx_last;
- struct r3964_block_header *tx_first;
- struct r3964_block_header *tx_last;
- unsigned int tx_position;
- unsigned int rx_position;
- unsigned char last_rx;
- unsigned char bcc;
- unsigned int blocks_in_rx_queue;
-
- struct mutex read_lock; /* serialize r3964_read */
-
- struct r3964_client_info *firstClient;
- unsigned int state;
- unsigned int flags;
-
- struct timer_list tmr;
- int nRetry;
-};
-
-#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index b9605b2b46e7..be9a2b349ca7 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -36,9 +36,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
/* internal use only */
#define LOOKUP_PARENT 0x0010
-#define LOOKUP_JUMPED 0x1000
-#define LOOKUP_ROOT 0x2000
-#define LOOKUP_ROOT_GRABBED 0x0008
/* Scoping flags for lookup. */
#define LOOKUP_NO_SYMLINKS 0x010000 /* No symlink crossing. */
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 10279c4830ac..ada1296c87d5 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -196,6 +196,9 @@ struct ip_set_region {
u32 elements; /* Number of elements vs timeout */
};
+/* Max range where every element is added/deleted in one step */
+#define IPSET_MAX_RANGE (1<<20)
+
/* The max revision number supported by any set type + 1 */
#define IPSET_REVISION_MAX 9
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index ffba254d2098..ce6474594872 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -84,6 +84,7 @@ struct nfs_open_context {
#define NFS_CONTEXT_RESEND_WRITES (1)
#define NFS_CONTEXT_BAD (2)
#define NFS_CONTEXT_UNLOCK (3)
+#define NFS_CONTEXT_FILE_OPEN (4)
int error;
struct list_head list;
diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h
index f5ba0fbff72f..222ae8883e85 100644
--- a/include/linux/nfs_ssc.h
+++ b/include/linux/nfs_ssc.h
@@ -8,6 +8,7 @@
*/
#include <linux/nfs_fs.h>
+#include <linux/sunrpc/svc.h>
extern struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl;
@@ -52,6 +53,19 @@ static inline void nfs42_ssc_close(struct file *filep)
if (nfs_ssc_client_tbl.ssc_nfs4_ops)
(*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep);
}
+
+struct nfsd4_ssc_umount_item {
+ struct list_head nsui_list;
+ bool nsui_busy;
+ /*
+ * nsui_refcnt inited to 2, 1 on list and 1 for consumer. Entry
+ * is removed when refcnt drops to 1 and nsui_expire expires.
+ */
+ refcount_t nsui_refcnt;
+ unsigned long nsui_expire;
+ struct vfsmount *nsui_vfsmount;
+ char nsui_ipaddr[RPC_MAX_ADDRBUFLEN];
+};
#endif
/*
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 717ecc87c9e7..e9698b6278a5 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -277,6 +277,7 @@ struct nfs4_layoutget {
struct nfs4_layoutget_args args;
struct nfs4_layoutget_res res;
const struct cred *cred;
+ struct pnfs_layout_hdr *lo;
gfp_t gfp_flags;
};
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index ac398e143c9a..567c3ddba2c4 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -119,7 +119,7 @@ static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
* The inline keyword gives the compiler room to decide to inline, or
* not inline a function as it sees best. However, as these functions
* are called in both __init and non-__init functions, if they are not
- * inlined we will end up with a section mis-match error (of the type of
+ * inlined we will end up with a section mismatch error (of the type of
* freeable items not being freed). So we must use __always_inline here
* to fix the problem. If other functions in the future also end up in
* this situation they will also need to be annotated as __always_inline
@@ -515,7 +515,7 @@ static inline int node_random(const nodemask_t *mask)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
/*
- * For nodemask scrach area.
+ * For nodemask scratch area.
* NODEMASK_ALLOC(type, name) allocates an object with a specified type and
* name.
*/
@@ -528,7 +528,7 @@ static inline int node_random(const nodemask_t *mask)
#define NODEMASK_FREE(m) do {} while (0)
#endif
-/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
+/* Example structure for using NODEMASK_ALLOC, used in mempolicy. */
struct nodemask_scratch {
nodemask_t mask1;
nodemask_t mask2;
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index e162b757b6d5..104505e9028f 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -25,6 +25,7 @@ enum nvmem_type {
NVMEM_TYPE_EEPROM,
NVMEM_TYPE_OTP,
NVMEM_TYPE_BATTERY_BACKED,
+ NVMEM_TYPE_FRAM,
};
#define NVMEM_DEVID_NONE (-1)
@@ -57,6 +58,7 @@ struct nvmem_keepout {
* @type: Type of the nvmem storage
* @read_only: Device is read-only.
* @root_only: Device is accessibly to root only.
+ * @of_node: If given, this will be used instead of the parent's of_node.
* @no_of_node: Device should not use the parent's of_node even if it's !NULL.
* @reg_read: Callback to read data.
* @reg_write: Callback to write data.
@@ -86,6 +88,7 @@ struct nvmem_config {
enum nvmem_type type;
bool read_only;
bool root_only;
+ struct device_node *of_node;
bool no_of_node;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
diff --git a/include/linux/of.h b/include/linux/of.h
index d8db8d3592fd..9c2e71e202d1 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1329,6 +1329,12 @@ static inline int of_get_available_child_count(const struct device_node *np)
return num;
}
+#define _OF_DECLARE_STUB(table, name, compat, fn, fn_type) \
+ static const struct of_device_id __of_table_##name \
+ __attribute__((unused)) \
+ = { .compatible = compat, \
+ .data = (fn == (fn_type)NULL) ? fn : fn }
+
#if defined(CONFIG_OF) && !defined(MODULE)
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
@@ -1338,10 +1344,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
.data = (fn == (fn_type)NULL) ? fn : fn }
#else
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
- static const struct of_device_id __of_table_##name \
- __attribute__((unused)) \
- = { .compatible = compat, \
- .data = (fn == (fn_type)NULL) ? fn : fn }
+ _OF_DECLARE_STUB(table, name, compat, fn, fn_type)
#endif
typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 88bc943405cd..45598dbec269 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -51,8 +51,8 @@ void __iomem *of_io_request_and_map(struct device_node *device,
* the address space flags too. The PCI version uses a BAR number
* instead of an absolute index
*/
-extern const __be32 *of_get_address(struct device_node *dev, int index,
- u64 *size, unsigned int *flags);
+extern const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
+ u64 *size, unsigned int *flags);
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
@@ -61,6 +61,11 @@ extern int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range_parser *parser,
struct of_pci_range *range);
+extern int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r);
+extern int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res);
extern bool of_dma_is_coherent(struct device_node *np);
#else /* CONFIG_OF_ADDRESS */
static inline void __iomem *of_io_request_and_map(struct device_node *device,
@@ -75,8 +80,8 @@ static inline u64 of_translate_address(struct device_node *np,
return OF_BAD_ADDR;
}
-static inline const __be32 *of_get_address(struct device_node *dev, int index,
- u64 *size, unsigned int *flags)
+static inline const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
+ u64 *size, unsigned int *flags)
{
return NULL;
}
@@ -100,6 +105,19 @@ static inline struct of_pci_range *of_pci_range_parser_one(
return NULL;
}
+static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ return -ENOSYS;
+}
+
+static inline int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+
static inline bool of_dma_is_coherent(struct device_node *np)
{
return false;
@@ -124,32 +142,16 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
#endif
#define of_range_parser_init of_pci_range_parser_init
-#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
-extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
- u64 *size, unsigned int *flags);
-extern int of_pci_address_to_resource(struct device_node *dev, int bar,
- struct resource *r);
-extern int of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res);
-#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
-static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
- struct resource *r)
+static inline const __be32 *of_get_address(struct device_node *dev, int index,
+ u64 *size, unsigned int *flags)
{
- return -ENOSYS;
+ return __of_get_address(dev, index, -1, size, flags);
}
-static inline const __be32 *of_get_pci_address(struct device_node *dev,
- int bar_no, u64 *size, unsigned int *flags)
-{
- return NULL;
-}
-static inline int of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res)
+static inline const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
+ u64 *size, unsigned int *flags)
{
- return -ENOSYS;
+ return __of_get_address(dev, -1, bar_no, size, flags);
}
-#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
#endif /* __OF_ADDRESS_H */
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 16f4b3e87f20..55c1eb300a86 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -2,29 +2,18 @@
#ifndef __OF_IOMMU_H
#define __OF_IOMMU_H
-#include <linux/device.h>
-#include <linux/iommu.h>
-#include <linux/of.h>
+struct device;
+struct device_node;
+struct iommu_ops;
#ifdef CONFIG_OF_IOMMU
-extern int of_get_dma_window(struct device_node *dn, const char *prefix,
- int index, unsigned long *busno, dma_addr_t *addr,
- size_t *size);
-
extern const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np,
const u32 *id);
#else
-static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
- int index, unsigned long *busno, dma_addr_t *addr,
- size_t *size)
-{
- return -EINVAL;
-}
-
static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np,
const u32 *id)
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 8216a4156263..4de2a24cadc9 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -27,11 +27,11 @@ struct reserved_mem_ops {
typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
+#ifdef CONFIG_OF_RESERVED_MEM
+
#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
_OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
-#ifdef CONFIG_OF_RESERVED_MEM
-
int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx);
int of_reserved_mem_device_init_by_name(struct device *dev,
@@ -39,11 +39,12 @@ int of_reserved_mem_device_init_by_name(struct device *dev,
const char *name);
void of_reserved_mem_device_release(struct device *dev);
-void fdt_init_reserved_mem(void);
-void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
- phys_addr_t base, phys_addr_t size);
struct reserved_mem *of_reserved_mem_lookup(struct device_node *np);
#else
+
+#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
+ _OF_DECLARE_STUB(reservedmem, name, compat, init, reservedmem_of_init_fn)
+
static inline int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx)
{
@@ -59,9 +60,6 @@ static inline int of_reserved_mem_device_init_by_name(struct device *dev,
static inline void of_reserved_mem_device_release(struct device *pdev) { }
-static inline void fdt_init_reserved_mem(void) { }
-static inline void fdt_reserved_mem_save_node(unsigned long node,
- const char *uname, phys_addr_t base, phys_addr_t size) { }
static inline struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
{
return NULL;
diff --git a/include/linux/once.h b/include/linux/once.h
index 9225ee6d96c7..ae6f4eb41cbe 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -7,7 +7,7 @@
bool __do_once_start(bool *done, unsigned long *flags);
void __do_once_done(bool *done, struct static_key_true *once_key,
- unsigned long *flags);
+ unsigned long *flags, struct module *mod);
/* Call a function exactly once. The idea of DO_ONCE() is to perform
* a function call such as initialization of random seeds, etc, only
@@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
if (unlikely(___ret)) { \
func(__VA_ARGS__); \
__do_once_done(&___done, &___once_key, \
- &___flags); \
+ &___flags, THIS_MODULE); \
} \
} \
___ret; \
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 458696550028..5922031ffab6 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -704,6 +704,18 @@ PAGEFLAG_FALSE(DoubleMap)
#endif
/*
+ * Check if a page is currently marked HWPoisoned. Note that this check is
+ * best effort only and inherently racy: there is no way to synchronize with
+ * failing hardware.
+ */
+static inline bool is_page_hwpoison(struct page *page)
+{
+ if (PageHWPoison(page))
+ return true;
+ return PageHuge(page) && PageHWPoison(compound_head(page));
+}
+
+/*
* For pages that are never mapped to userspace (and aren't PageSlab),
* page_type may be used. Because it is initialised to -1, we invert the
* sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
@@ -766,9 +778,19 @@ PAGE_TYPE_OPS(Buddy, buddy)
* relies on this feature is aware that re-onlining the memory block will
* require to re-set the pages PageOffline() and not giving them to the
* buddy via online_page_callback_t.
+ *
+ * There are drivers that mark a page PageOffline() and expect there won't be
+ * any further access to page content. PFN walkers that read content of random
+ * pages should check PageOffline() and synchronize with such drivers using
+ * page_offline_freeze()/page_offline_thaw().
*/
PAGE_TYPE_OPS(Offline, offline)
+extern void page_offline_freeze(void);
+extern void page_offline_thaw(void);
+extern void page_offline_begin(void);
+extern void page_offline_end(void);
+
/*
* Marks pages in use as page tables.
*/
diff --git a/include/linux/panic.h b/include/linux/panic.h
new file mode 100644
index 000000000000..f5844908a089
--- /dev/null
+++ b/include/linux/panic.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PANIC_H
+#define _LINUX_PANIC_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+struct pt_regs;
+
+extern long (*panic_blink)(int state);
+__printf(1, 2)
+void panic(const char *fmt, ...) __noreturn __cold;
+void nmi_panic(struct pt_regs *regs, const char *msg);
+extern void oops_enter(void);
+extern void oops_exit(void);
+extern bool oops_may_print(void);
+
+#ifdef CONFIG_SMP
+extern unsigned int sysctl_oops_all_cpu_backtrace;
+#else
+#define sysctl_oops_all_cpu_backtrace 0
+#endif /* CONFIG_SMP */
+
+extern int panic_timeout;
+extern unsigned long panic_print;
+extern int panic_on_oops;
+extern int panic_on_unrecovered_nmi;
+extern int panic_on_io_nmi;
+extern int panic_on_warn;
+
+extern unsigned long panic_on_taint;
+extern bool panic_on_taint_nousertaint;
+
+extern int sysctl_panic_on_rcu_stall;
+extern int sysctl_max_rcu_stall_to_panic;
+extern int sysctl_panic_on_stackoverflow;
+
+extern bool crash_kexec_post_notifiers;
+
+/*
+ * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
+ * holds a CPU number which is executing panic() currently. A value of
+ * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
+ */
+extern atomic_t panic_cpu;
+#define PANIC_CPU_INVALID -1
+
+/*
+ * Only to be used by arch init code. If the user over-wrote the default
+ * CONFIG_PANIC_TIMEOUT, honor it.
+ */
+static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
+{
+ if (panic_timeout == arch_default_timeout)
+ panic_timeout = timeout;
+}
+
+/* This cannot be an enum because some may be used in assembly source. */
+#define TAINT_PROPRIETARY_MODULE 0
+#define TAINT_FORCED_MODULE 1
+#define TAINT_CPU_OUT_OF_SPEC 2
+#define TAINT_FORCED_RMMOD 3
+#define TAINT_MACHINE_CHECK 4
+#define TAINT_BAD_PAGE 5
+#define TAINT_USER 6
+#define TAINT_DIE 7
+#define TAINT_OVERRIDDEN_ACPI_TABLE 8
+#define TAINT_WARN 9
+#define TAINT_CRAP 10
+#define TAINT_FIRMWARE_WORKAROUND 11
+#define TAINT_OOT_MODULE 12
+#define TAINT_UNSIGNED_MODULE 13
+#define TAINT_SOFTLOCKUP 14
+#define TAINT_LIVEPATCH 15
+#define TAINT_AUX 16
+#define TAINT_RANDSTRUCT 17
+#define TAINT_FLAGS_COUNT 18
+#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
+
+struct taint_flag {
+ char c_true; /* character printed when tainted */
+ char c_false; /* character printed when not tainted */
+ bool module; /* also show as a per-module taint flag */
+};
+
+extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
+
+enum lockdep_ok {
+ LOCKDEP_STILL_OK,
+ LOCKDEP_NOW_UNRELIABLE,
+};
+
+extern const char *print_tainted(void);
+extern void add_taint(unsigned flag, enum lockdep_ok);
+extern int test_taint(unsigned flag);
+extern unsigned long get_taint(void);
+
+#endif /* _LINUX_PANIC_H */
diff --git a/include/linux/panic_notifier.h b/include/linux/panic_notifier.h
new file mode 100644
index 000000000000..41e32483d7a7
--- /dev/null
+++ b/include/linux/panic_notifier.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PANIC_NOTIFIERS_H
+#define _LINUX_PANIC_NOTIFIERS_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+extern struct atomic_notifier_head panic_notifier_list;
+
+extern bool crash_kexec_post_notifiers;
+
+#endif /* _LINUX_PANIC_NOTIFIERS_H */
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index fbdadd4d8377..adea5a4771cf 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -55,6 +55,7 @@ struct pci_ecam_ops {
struct pci_config_window {
struct resource res;
struct resource busr;
+ unsigned int bus_shift;
void *priv;
const struct pci_ecam_ops *ops;
union {
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h
index 662881335c7e..3e2140d7e31d 100644
--- a/include/linux/pci-ep-cfs.h
+++ b/include/linux/pci-ep-cfs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0+ */
-/**
+/*
* PCI Endpoint ConfigFS header file
*
* Copyright (C) 2017 Texas Instruments
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index b82c9b100e97..50a649d33e68 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* PCI Endpoint *Controller* (EPC) header file
*
* Copyright (C) 2017 Texas Instruments
@@ -58,6 +58,7 @@ pci_epc_interface_string(enum pci_epc_interface_type type)
* @map_msi_irq: ops to map physical address to MSI address and return MSI data
* @start: ops to start the PCI link
* @stop: ops to stop the PCI link
+ * @get_features: ops to get the features supported by the EPC
* @owner: the module owner containing the ops
*/
struct pci_epc_ops {
@@ -150,6 +151,8 @@ struct pci_epc {
/**
* struct pci_epc_features - features supported by a EPC device per function
* @linkup_notifier: indicate if the EPC device can notify EPF driver on link up
+ * @core_init_notifier: indicate cores that can notify about their availability
+ * for initialization
* @msi_capable: indicate if the endpoint function has MSI capability
* @msix_capable: indicate if the endpoint function has MSI-X capability
* @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 6833e2160ef1..2debc27ba95e 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* PCI Endpoint *Function* (EPF) header file
*
* Copyright (C) 2017 Texas Instruments
@@ -102,6 +102,8 @@ struct pci_epf_driver {
* @phys_addr: physical address that should be mapped to the BAR
* @addr: virtual address corresponding to the @phys_addr
* @size: the size of the address space present in BAR
+ * @barno: BAR number
+ * @flags: flags that are set for the BAR
*/
struct pci_epf_bar {
dma_addr_t phys_addr;
@@ -118,6 +120,7 @@ struct pci_epf_bar {
* @header: represents standard configuration header
* @bar: represents the BAR of EPF device
* @msi_interrupts: number of MSI interrupts required by this function
+ * @msix_interrupts: number of MSI-X interrupts required by this function
* @func_no: unique function number within this endpoint device
* @epc: the EPC device to which this EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
diff --git a/include/linux/pci.h b/include/linux/pci.h
index d1f0916c6a37..540b377ca8f6 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -497,7 +497,7 @@ struct pci_dev {
u16 pasid_features;
#endif
#ifdef CONFIG_PCI_P2PDMA
- struct pci_p2pdma *p2pdma;
+ struct pci_p2pdma __rcu *p2pdma;
#endif
u16 acs_cap; /* ACS Capability offset */
phys_addr_t rom; /* Physical address if not from BAR */
@@ -1624,6 +1624,9 @@ void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
+int pci_dev_trylock(struct pci_dev *dev);
+void pci_dev_unlock(struct pci_dev *dev);
+
/*
* PCI domain support. Sometimes called PCI segment (eg by ACPI),
* a PCI domain is defined to be a set of PCI buses which share
@@ -1775,6 +1778,10 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
+static inline int pci_register_io_range(struct fwnode_handle *fwnode,
+ phys_addr_t addr, resource_size_t size)
+{ return -EINVAL; }
+
static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index b482e42d7153..2dac431d94ac 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -50,6 +50,8 @@ struct hotplug_slot_ops {
/**
* struct hotplug_slot - used to register a physical slot with the hotplug pci core
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
+ * @slot_list: internal list used to track hotplug PCI slots
+ * @pci_slot: represents a physical slot
* @owner: The module owner of this structure
* @mod_name: The module name (KBUILD_MODNAME) of this structure
*/
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index dff7040f629a..af1071535de8 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -412,7 +412,7 @@ do { \
* instead.
*
* If there is no other protection through preempt disable and/or disabling
- * interupts then one of these RMW operations can show unexpected behavior
+ * interrupts then one of these RMW operations can show unexpected behavior
* because the execution thread was rescheduled on another processor or an
* interrupt occurred and the same percpu variable was modified from the
* interrupt context.
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 16c35a728b4c..ae16a9856305 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -213,7 +213,7 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
* percpu_ref_get - increment a percpu refcount
* @ref: percpu_ref to get
*
- * Analagous to atomic_long_inc().
+ * Analogous to atomic_long_inc().
*
* This function is safe to call as long as @ref is between init and exit.
*/
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index c32600c9e1ad..e24d2c992b11 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -29,6 +29,24 @@
#endif
/*
+ * This defines the first usable user address. Platforms
+ * can override its value with custom FIRST_USER_ADDRESS
+ * defined in their respective <asm/pgtable.h>.
+ */
+#ifndef FIRST_USER_ADDRESS
+#define FIRST_USER_ADDRESS 0UL
+#endif
+
+/*
+ * This defines the generic helper for accessing PMD page
+ * table page. Although platforms can still override this
+ * via their respective <asm/pgtable.h>.
+ */
+#ifndef pmd_pgtable
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#endif
+
+/*
* A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
*
* The pXx_index() functions return the index of the entry in the page
@@ -88,7 +106,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
#ifndef pmd_offset
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+ return pud_pgtable(*pud) + pmd_index(address);
}
#define pmd_offset pmd_offset
#endif
@@ -96,7 +114,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#ifndef pud_offset
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
- return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
+ return p4d_pgtable(*p4d) + pud_index(address);
}
#define pud_offset pud_offset
#endif
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 0ed434d02196..f3286f4cd306 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -125,7 +125,7 @@ struct phy_ops {
/**
* struct phy_attrs - represents phy attributes
* @bus_width: Data path width implemented by PHY
- * @max_link_rate: Maximum link rate supported by PHY (in Mbps)
+ * @max_link_rate: Maximum link rate supported by PHY (units to be decided by producer and consumer)
* @mode: PHY mode
*/
struct phy_attrs {
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
index 71d956935405..3a35e74cdc61 100644
--- a/include/linux/phy/tegra/xusb.h
+++ b/include/linux/phy/tegra/xusb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef PHY_TEGRA_XUSB_H
@@ -8,6 +8,7 @@
struct tegra_xusb_padctl;
struct device;
+enum usb_device_speed;
struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev);
void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl);
@@ -23,4 +24,11 @@ int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
unsigned int port);
+int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy,
+ enum usb_device_speed speed);
+int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy);
+int tegra_xusb_padctl_enable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy);
+int tegra_xusb_padctl_disable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy);
+bool tegra_xusb_padctl_remote_wake_detected(struct tegra_xusb_padctl *padctl, struct phy *phy);
+
#endif /* PHY_TEGRA_XUSB_H */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 5d2705f1d01c..fc5642431b92 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -48,6 +48,7 @@ struct pipe_buffer {
* @files: number of struct file referring this pipe (protected by ->i_lock)
* @r_counter: reader counter
* @w_counter: writer counter
+ * @poll_usage: is this pipe used for epoll, which has crazy wakeups?
* @fasync_readers: reader side fasync
* @fasync_writers: writer side fasync
* @bufs: the circular array of pipe buffers
@@ -70,6 +71,7 @@ struct pipe_inode_info {
unsigned int files;
unsigned int r_counter;
unsigned int w_counter;
+ unsigned int poll_usage;
struct page *tmp_page;
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
index 2955ba976048..6beb26b7151d 100644
--- a/include/linux/pkeys.h
+++ b/include/linux/pkeys.h
@@ -44,10 +44,6 @@ static inline bool arch_pkeys_enabled(void)
return false;
}
-static inline void copy_init_pkru_to_fpregs(void)
-{
-}
-
#endif /* ! CONFIG_ARCH_HAS_PKEYS */
#endif /* _LINUX_PKEYS_H */
diff --git a/include/linux/pl353-smc.h b/include/linux/pl353-smc.h
deleted file mode 100644
index 0e0d3df9bf72..000000000000
--- a/include/linux/pl353-smc.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ARM PL353 SMC Driver Header
- *
- * Copyright (C) 2012 - 2018 Xilinx, Inc
- */
-
-#ifndef __LINUX_PL353_SMC_H
-#define __LINUX_PL353_SMC_H
-
-enum pl353_smc_ecc_mode {
- PL353_SMC_ECCMODE_BYPASS = 0,
- PL353_SMC_ECCMODE_APB = 1,
- PL353_SMC_ECCMODE_MEM = 2
-};
-
-enum pl353_smc_mem_width {
- PL353_SMC_MEM_WIDTH_8 = 0,
- PL353_SMC_MEM_WIDTH_16 = 1
-};
-
-u32 pl353_smc_get_ecc_val(int ecc_reg);
-bool pl353_smc_ecc_is_busy(void);
-int pl353_smc_get_nand_int_status_raw(void);
-void pl353_smc_clr_nand_int(void);
-int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode);
-int pl353_smc_set_ecc_pg_size(unsigned int pg_sz);
-int pl353_smc_set_buswidth(unsigned int bw);
-void pl353_smc_set_cycles(u32 timings[]);
-#endif
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
deleted file mode 100644
index 725602d9df91..000000000000
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __MACH_MXC_SDMA_H__
-#define __MACH_MXC_SDMA_H__
-
-/**
- * struct sdma_script_start_addrs - SDMA script start pointers
- *
- * start addresses of the different functions in the physical
- * address space of the SDMA engine.
- */
-struct sdma_script_start_addrs {
- s32 ap_2_ap_addr;
- s32 ap_2_bp_addr;
- s32 ap_2_ap_fixed_addr;
- s32 bp_2_ap_addr;
- s32 loopback_on_dsp_side_addr;
- s32 mcu_interrupt_only_addr;
- s32 firi_2_per_addr;
- s32 firi_2_mcu_addr;
- s32 per_2_firi_addr;
- s32 mcu_2_firi_addr;
- s32 uart_2_per_addr;
- s32 uart_2_mcu_addr;
- s32 per_2_app_addr;
- s32 mcu_2_app_addr;
- s32 per_2_per_addr;
- s32 uartsh_2_per_addr;
- s32 uartsh_2_mcu_addr;
- s32 per_2_shp_addr;
- s32 mcu_2_shp_addr;
- s32 ata_2_mcu_addr;
- s32 mcu_2_ata_addr;
- s32 app_2_per_addr;
- s32 app_2_mcu_addr;
- s32 shp_2_per_addr;
- s32 shp_2_mcu_addr;
- s32 mshc_2_mcu_addr;
- s32 mcu_2_mshc_addr;
- s32 spdif_2_mcu_addr;
- s32 mcu_2_spdif_addr;
- s32 asrc_2_mcu_addr;
- s32 ext_mem_2_ipu_addr;
- s32 descrambler_addr;
- s32 dptc_dvfs_addr;
- s32 utra_addr;
- s32 ram_code_start_addr;
- /* End of v1 array */
- s32 mcu_2_ssish_addr;
- s32 ssish_2_mcu_addr;
- s32 hdmi_dma_addr;
- /* End of v2 array */
- s32 zcanfd_2_mcu_addr;
- s32 zqspi_2_mcu_addr;
- s32 mcu_2_ecspi_addr;
- /* End of v3 array */
- s32 mcu_2_zqspi_addr;
- /* End of v4 array */
-};
-
-#endif /* __MACH_MXC_SDMA_H__ */
diff --git a/include/linux/platform_data/pata_ixp4xx_cf.h b/include/linux/platform_data/pata_ixp4xx_cf.h
new file mode 100644
index 000000000000..601ba97fef57
--- /dev/null
+++ b/include/linux/platform_data/pata_ixp4xx_cf.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_DATA_PATA_IXP4XX_H
+#define __PLATFORM_DATA_PATA_IXP4XX_H
+
+#include <linux/types.h>
+
+/*
+ * This structure provide a means for the board setup code
+ * to give information to th pata_ixp4xx driver. It is
+ * passed as platform_data.
+ */
+struct ixp4xx_pata_data {
+ volatile u32 *cs0_cfg;
+ volatile u32 *cs1_cfg;
+ unsigned long cs0_bits;
+ unsigned long cs1_bits;
+ void __iomem *cs0;
+ void __iomem *cs1;
+};
+
+#endif
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index e40b28ca892e..897051e51b78 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -13,8 +13,9 @@
/**
* struct st_sensors_platform_data - Platform data for the ST sensors
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
- * Available only for accelerometer and pressure sensors.
+ * Available only for accelerometer, magnetometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
+ * Magnetometer DRDY is supported only on LSM9DS0.
* @open_drain: set the interrupt line to be open drain if possible.
* @spi_3wire: enable spi-3wire mode.
* @pullups: enable/disable i2c controller pullup resistors.
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index cd81e060863c..ed42ea9f60ba 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -66,9 +66,6 @@ extern void __iomem *
devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index);
extern void __iomem *
-devm_platform_ioremap_resource_wc(struct platform_device *pdev,
- unsigned int index);
-extern void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device *pdev,
const char *name);
extern int platform_get_irq(struct platform_device *, unsigned int);
diff --git a/include/linux/pm2301_charger.h b/include/linux/pm2301_charger.h
deleted file mode 100644
index b8fac96f05aa..000000000000
--- a/include/linux/pm2301_charger.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * PM2301 charger driver.
- *
- * Copyright (C) 2012 ST Ericsson Corporation
- *
- * Contact: Olivier LAUNAY (olivier.launay@stericsson.com
- */
-
-#ifndef __LINUX_PM2301_H
-#define __LINUX_PM2301_H
-
-/**
- * struct pm2xxx_bm_charger_parameters - Charger specific parameters
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
- */
-struct pm2xxx_bm_charger_parameters {
- int ac_volt_max;
- int ac_curr_max;
-};
-
-/**
- * struct pm2xxx_bm_data - pm2xxx battery management data
- * @enable_overshoot flag to enable VBAT overshoot control
- * @chg_params charger parameters
- */
-struct pm2xxx_bm_data {
- bool enable_overshoot;
- const struct pm2xxx_bm_charger_parameters *chg_params;
-};
-
-struct pm2xxx_charger_platform_data {
- char **supplied_to;
- size_t num_supplicants;
- int i2c_bus;
- const char *label;
- int gpio_irq_number;
- unsigned int lpn_gpio;
- int irq_type;
-};
-
-struct pm2xxx_platform_data {
- struct pm2xxx_charger_platform_data *wall_charger;
- struct pm2xxx_bm_data *battery;
-};
-
-#endif /* __LINUX_PM2301_H */
diff --git a/include/linux/power/ab8500.h b/include/linux/power/ab8500.h
deleted file mode 100644
index 51976b52f373..000000000000
--- a/include/linux/power/ab8500.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson 2013
- * Author: Hongbo Zhang <hongbo.zhang@linaro.com>
- */
-
-#ifndef PWR_AB8500_H
-#define PWR_AB8500_H
-
-extern const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[];
-extern const int ab8500_temp_tbl_a_size;
-
-extern const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[];
-extern const int ab8500_temp_tbl_b_size;
-
-#endif /* PWR_AB8500_H */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index aba237c0b3a2..71fac9237725 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -11,7 +11,10 @@
#include <linux/device.h>
#include <linux/pps_kernel.h>
#include <linux/ptp_clock.h>
+#include <linux/timecounter.h>
+#include <linux/skbuff.h>
+#define PTP_CLOCK_NAME_LEN 32
/**
* struct ptp_clock_request - request PTP clock event
*
@@ -134,7 +137,7 @@ struct ptp_system_timestamp {
struct ptp_clock_info {
struct module *owner;
- char name[16];
+ char name[PTP_CLOCK_NAME_LEN];
s32 max_adj;
int n_alarm;
int n_ext_ts;
@@ -304,6 +307,27 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
*/
void ptp_cancel_worker_sync(struct ptp_clock *ptp);
+/**
+ * ptp_get_vclocks_index() - get all vclocks index on pclock, and
+ * caller is responsible to free memory
+ * of vclock_index
+ *
+ * @pclock_index: phc index of ptp pclock.
+ * @vclock_index: pointer to pointer of vclock index.
+ *
+ * return number of vclocks.
+ */
+int ptp_get_vclocks_index(int pclock_index, int **vclock_index);
+
+/**
+ * ptp_convert_timestamp() - convert timestamp to a ptp vclock time
+ *
+ * @hwtstamps: skb_shared_hwtstamps structure pointer
+ * @vclock_index: phc index of ptp vclock.
+ */
+void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
+ int vclock_index);
+
#else
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
struct device *parent)
@@ -323,6 +347,11 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp,
{ return -EOPNOTSUPP; }
static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
{ }
+static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
+{ return 0; }
+static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
+ int vclock_index)
+{ }
#endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 5bb90af4997e..a0b7e43049d5 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -54,12 +54,17 @@ enum {
* @duty_cycle: PWM duty cycle (in nanoseconds)
* @polarity: PWM polarity
* @enabled: PWM enabled status
+ * @usage_power: If set, the PWM driver is only required to maintain the power
+ * output but has more freedom regarding signal form.
+ * If supported, the signal can be optimized, for example to
+ * improve EMI by phase shifting individual channels.
*/
struct pwm_state {
u64 period;
u64 duty_cycle;
enum pwm_polarity polarity;
bool enabled;
+ bool usage_power;
};
/**
@@ -188,6 +193,7 @@ static inline void pwm_init_state(const struct pwm_device *pwm,
state->period = args.period;
state->polarity = args.polarity;
state->duty_cycle = 0;
+ state->usage_power = false;
}
/**
@@ -399,6 +405,9 @@ void *pwm_get_chip_data(struct pwm_device *pwm);
int pwmchip_add(struct pwm_chip *chip);
int pwmchip_remove(struct pwm_chip *chip);
+
+int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip);
+
struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label);
@@ -417,7 +426,6 @@ struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
struct fwnode_handle *fwnode,
const char *con_id);
-void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
#else
static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
{
@@ -524,10 +532,6 @@ devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode,
{
return ERR_PTR(-ENODEV);
}
-
-static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
-{
-}
#endif
static inline void pwm_apply_args(struct pwm_device *pwm)
@@ -558,6 +562,7 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
state.enabled = false;
state.polarity = pwm->args.polarity;
state.period = pwm->args.period;
+ state.usage_power = false;
pwm_apply_state(pwm, &state);
}
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index d7895b81264e..d9680b798b21 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -315,7 +315,7 @@ static inline int rcu_read_lock_any_held(void)
#define RCU_LOCKDEP_WARN(c, s) \
do { \
static bool __section(".data.unlikely") __warned; \
- if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
+ if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
@@ -373,7 +373,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#define unrcu_pointer(p) \
({ \
typeof(*p) *_________p1 = (typeof(*p) *__force)(p); \
- rcu_check_sparse(p, __rcu); \
+ rcu_check_sparse(p, __rcu); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
@@ -532,7 +532,12 @@ do { \
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
- * This is the RCU-bh counterpart to rcu_dereference_check().
+ * This is the RCU-bh counterpart to rcu_dereference_check(). However,
+ * please note that starting in v5.0 kernels, vanilla RCU grace periods
+ * wait for local_bh_disable() regions of code in addition to regions of
+ * code demarked by rcu_read_lock() and rcu_read_unlock(). This means
+ * that synchronize_rcu(), call_rcu, and friends all take not only
+ * rcu_read_lock() but also rcu_read_lock_bh() into account.
*/
#define rcu_dereference_bh_check(p, c) \
__rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
@@ -543,6 +548,11 @@ do { \
* @c: The conditions under which the dereference will take place
*
* This is the RCU-sched counterpart to rcu_dereference_check().
+ * However, please note that starting in v5.0 kernels, vanilla RCU grace
+ * periods wait for preempt_disable() regions of code in addition to
+ * regions of code demarked by rcu_read_lock() and rcu_read_unlock().
+ * This means that synchronize_rcu(), call_rcu, and friends all take not
+ * only rcu_read_lock() but also rcu_read_lock_sched() into account.
*/
#define rcu_dereference_sched_check(p, c) \
__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
@@ -634,6 +644,12 @@ do { \
* sections, invocation of the corresponding RCU callback is deferred
* until after the all the other CPUs exit their critical sections.
*
+ * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also
+ * wait for regions of code with preemption disabled, including regions of
+ * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which
+ * define synchronize_sched(), only code enclosed within rcu_read_lock()
+ * and rcu_read_unlock() are guaranteed to be waited for.
+ *
* Note, however, that RCU callbacks are permitted to run concurrently
* with new RCU read-side critical sections. One way that this can happen
* is via the following sequence of events: (1) CPU 0 enters an RCU
@@ -686,33 +702,12 @@ static __always_inline void rcu_read_lock(void)
/**
* rcu_read_unlock() - marks the end of an RCU read-side critical section.
*
- * In most situations, rcu_read_unlock() is immune from deadlock.
- * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
- * is responsible for deboosting, which it does via rt_mutex_unlock().
- * Unfortunately, this function acquires the scheduler's runqueue and
- * priority-inheritance spinlocks. This means that deadlock could result
- * if the caller of rcu_read_unlock() already holds one of these locks or
- * any lock that is ever acquired while holding them.
- *
- * That said, RCU readers are never priority boosted unless they were
- * preempted. Therefore, one way to avoid deadlock is to make sure
- * that preemption never happens within any RCU read-side critical
- * section whose outermost rcu_read_unlock() is called with one of
- * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
- * a number of ways, for example, by invoking preempt_disable() before
- * critical section's outermost rcu_read_lock().
- *
- * Given that the set of locks acquired by rt_mutex_unlock() might change
- * at any time, a somewhat more future-proofed approach is to make sure
- * that that preemption never happens within any RCU read-side critical
- * section whose outermost rcu_read_unlock() is called with irqs disabled.
- * This approach relies on the fact that rt_mutex_unlock() currently only
- * acquires irq-disabled locks.
- *
- * The second of these two approaches is best in most situations,
- * however, the first approach can also be useful, at least to those
- * developers willing to keep abreast of the set of locks acquired by
- * rt_mutex_unlock().
+ * In almost all situations, rcu_read_unlock() is immune from deadlock.
+ * In recent kernels that have consolidated synchronize_sched() and
+ * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity
+ * also extends to the scheduler's runqueue and priority-inheritance
+ * spinlocks, courtesy of the quiescent-state deferral that is carried
+ * out when rcu_read_unlock() is invoked with interrupts disabled.
*
* See rcu_read_lock() for more information.
*/
@@ -728,9 +723,11 @@ static inline void rcu_read_unlock(void)
/**
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
- * This is equivalent of rcu_read_lock(), but also disables softirqs.
- * Note that anything else that disables softirqs can also serve as
- * an RCU read-side critical section.
+ * This is equivalent to rcu_read_lock(), but also disables softirqs.
+ * Note that anything else that disables softirqs can also serve as an RCU
+ * read-side critical section. However, please note that this equivalence
+ * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and
+ * rcu_read_lock_bh() were unrelated.
*
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
* must occur in the same context, for example, it is illegal to invoke
@@ -763,9 +760,12 @@ static inline void rcu_read_unlock_bh(void)
/**
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
- * This is equivalent of rcu_read_lock(), but disables preemption.
- * Read-side critical sections can also be introduced by anything else
- * that disables preemption, including local_irq_disable() and friends.
+ * This is equivalent to rcu_read_lock(), but also disables preemption.
+ * Read-side critical sections can also be introduced by anything else that
+ * disables preemption, including local_irq_disable() and friends. However,
+ * please note that the equivalence to rcu_read_lock() applies only to
+ * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched()
+ * were unrelated.
*
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
* must occur in the same context, for example, it is illegal to invoke
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 35e0be326ffc..953e70fafe38 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -86,7 +86,6 @@ static inline void rcu_irq_enter(void) { }
static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
-static inline void rcu_irq_exit_preempt(void) { }
static inline void rcu_irq_exit_check_preempt(void) { }
#define rcu_is_idle_cpu(cpu) \
(is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq())
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index b89b54130f49..53209d669400 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -49,7 +49,6 @@ void rcu_idle_enter(void);
void rcu_idle_exit(void);
void rcu_irq_enter(void);
void rcu_irq_exit(void);
-void rcu_irq_exit_preempt(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
bool rcu_is_idle_cpu(int cpu);
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 8b795b544f75..a5b37bc10865 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -243,7 +243,7 @@ struct fw_rsc_trace {
* @da: device address
* @align: the alignment between the consumer and producer parts of the vring
* @num: num of buffers supported by this vring (must be power of two)
- * @notifyid is a unique rproc-wide notify index for this vring. This notify
+ * @notifyid: a unique rproc-wide notify index for this vring. This notify
* index is used when kicking a remote processor, to let it know that this
* vring is triggered.
* @pa: physical address
@@ -266,18 +266,18 @@ struct fw_rsc_vdev_vring {
/**
* struct fw_rsc_vdev - virtio device header
* @id: virtio device id (as in virtio_ids.h)
- * @notifyid is a unique rproc-wide notify index for this vdev. This notify
+ * @notifyid: a unique rproc-wide notify index for this vdev. This notify
* index is used when kicking a remote processor, to let it know that the
* status/features of this vdev have changes.
- * @dfeatures specifies the virtio device features supported by the firmware
- * @gfeatures is a place holder used by the host to write back the
+ * @dfeatures: specifies the virtio device features supported by the firmware
+ * @gfeatures: a place holder used by the host to write back the
* negotiated features that are supported by both sides.
- * @config_len is the size of the virtio config space of this vdev. The config
+ * @config_len: the size of the virtio config space of this vdev. The config
* space lies in the resource table immediate after this vdev header.
- * @status is a place holder where the host will indicate its virtio progress.
- * @num_of_vrings indicates how many vrings are described in this vdev header
+ * @status: a place holder where the host will indicate its virtio progress.
+ * @num_of_vrings: indicates how many vrings are described in this vdev header
* @reserved: reserved (must be zero)
- * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
+ * @vring: an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
*
* This resource is a virtio device header: it provides information about
* the vdev, and is then used by the host and its peer remote processors
@@ -287,16 +287,17 @@ struct fw_rsc_vdev_vring {
* to statically allocate a vdev upon registration of the rproc (dynamic vdev
* allocation is not yet supported).
*
- * Note: unlike virtualization systems, the term 'host' here means
- * the Linux side which is running remoteproc to control the remote
- * processors. We use the name 'gfeatures' to comply with virtio's terms,
- * though there isn't really any virtualized guest OS here: it's the host
- * which is responsible for negotiating the final features.
- * Yeah, it's a bit confusing.
- *
- * Note: immediately following this structure is the virtio config space for
- * this vdev (which is specific to the vdev; for more info, read the virtio
- * spec). the size of the config space is specified by @config_len.
+ * Note:
+ * 1. unlike virtualization systems, the term 'host' here means
+ * the Linux side which is running remoteproc to control the remote
+ * processors. We use the name 'gfeatures' to comply with virtio's terms,
+ * though there isn't really any virtualized guest OS here: it's the host
+ * which is responsible for negotiating the final features.
+ * Yeah, it's a bit confusing.
+ *
+ * 2. immediately following this structure is the virtio config space for
+ * this vdev (which is specific to the vdev; for more info, read the virtio
+ * spec). The size of the config space is specified by @config_len.
*/
struct fw_rsc_vdev {
u32 id;
@@ -440,7 +441,7 @@ enum rproc_state {
* enum rproc_crash_type - remote processor crash types
* @RPROC_MMUFAULT: iommu fault
* @RPROC_WATCHDOG: watchdog bite
- * @RPROC_FATAL_ERROR fatal error
+ * @RPROC_FATAL_ERROR: fatal error
*
* Each element of the enum is used as an array index. So that, the value of
* the elements should be always something sane.
@@ -457,9 +458,9 @@ enum rproc_crash_type {
* enum rproc_dump_mechanism - Coredump options for core
* @RPROC_COREDUMP_DISABLED: Don't perform any dump
* @RPROC_COREDUMP_ENABLED: Copy dump to separate buffer and carry on with
- recovery
+ * recovery
* @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall
- recovery until all segments are read
+ * recovery until all segments are read
*/
enum rproc_dump_mechanism {
RPROC_COREDUMP_DISABLED,
@@ -475,6 +476,7 @@ enum rproc_dump_mechanism {
* @priv: private data associated with the dump_segment
* @dump: custom dump function to fill device memory segment associated
* with coredump
+ * @offset: offset of the segment
*/
struct rproc_dump_segment {
struct list_head node;
@@ -524,7 +526,9 @@ struct rproc_dump_segment {
* @auto_boot: flag to indicate if remote processor should be auto-started
* @dump_segments: list of segments in the firmware
* @nb_vdev: number of vdev currently handled by rproc
- * @char_dev: character device of the rproc
+ * @elf_class: firmware ELF class
+ * @elf_machine: firmware ELF machine
+ * @cdev: character device of the rproc
* @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release
*/
struct rproc {
@@ -613,10 +617,10 @@ struct rproc_vring {
* struct rproc_vdev - remoteproc state for a supported virtio device
* @refcount: reference counter for the vdev and vring allocations
* @subdev: handle for registering the vdev as a rproc subdevice
+ * @dev: device struct used for reference count semantics
* @id: virtio device id (as in virtio_ids.h)
* @node: list node
* @rproc: the rproc handle
- * @vdev: the virio device
* @vring: the vrings for this vdev
* @rsc_offset: offset of the vdev's resource entry
* @index: vdev position versus other vdev declared in resource table
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index ec35814e0bbb..0fa4f60e1186 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -79,6 +79,7 @@ struct reset_controller_dev {
unsigned int nr_resets;
};
+#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
int reset_controller_register(struct reset_controller_dev *rcdev);
void reset_controller_unregister(struct reset_controller_dev *rcdev);
@@ -88,5 +89,26 @@ int devm_reset_controller_register(struct device *dev,
void reset_controller_add_lookup(struct reset_control_lookup *lookup,
unsigned int num_entries);
+#else
+static inline int reset_controller_register(struct reset_controller_dev *rcdev)
+{
+ return 0;
+}
+
+static inline void reset_controller_unregister(struct reset_controller_dev *rcdev)
+{
+}
+
+static inline int devm_reset_controller_register(struct device *dev,
+ struct reset_controller_dev *rcdev)
+{
+ return 0;
+}
+
+static inline void reset_controller_add_lookup(struct reset_control_lookup *lookup,
+ unsigned int num_entries)
+{
+}
+#endif
#endif
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 8d04e7deedc6..c976cc6de257 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -86,9 +86,6 @@ struct anon_vma_chain {
};
enum ttu_flags {
- TTU_MIGRATION = 0x1, /* migration mode */
- TTU_MUNLOCK = 0x2, /* munlock mode */
-
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
@@ -98,7 +95,6 @@ enum ttu_flags {
* do a final flush if necessary */
TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
* caller holds it */
- TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
};
#ifdef CONFIG_MMU
@@ -195,7 +191,12 @@ static inline void page_dup_rmap(struct page *page, bool compound)
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags);
-bool try_to_unmap(struct page *, enum ttu_flags flags);
+void try_to_migrate(struct page *page, enum ttu_flags flags);
+void try_to_unmap(struct page *, enum ttu_flags flags);
+
+int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct page **pages,
+ void *arg);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
@@ -240,7 +241,7 @@ int page_mkclean(struct page *);
* called in munlock()/munmap() path to check for other vmas holding
* the page mlocked.
*/
-void try_to_munlock(struct page *);
+void page_mlock(struct page *page);
void remove_migration_ptes(struct page *old, struct page *new, bool locked);
@@ -290,7 +291,9 @@ static inline int page_referenced(struct page *page, int is_locked,
return 0;
}
-#define try_to_unmap(page, refs) false
+static inline void try_to_unmap(struct page *page, enum ttu_flags flags)
+{
+}
static inline int page_mkclean(struct page *page)
{
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 6f70572b2938..ecf87484814f 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -474,7 +474,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
* Iterates over sg entries mapping page-by-page. On each successful
* iteration, @miter->page points to the mapped page and
* @miter->length bytes of data can be accessed at @miter->addr. As
- * long as an interation is enclosed between start and stop, the user
+ * long as an iteration is enclosed between start and stop, the user
* is free to choose control structure and when to stop.
*
* @miter->consumed is set to @miter->length on each iteration. It
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index c9cf678c347d..b9126fe06c3f 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -538,6 +538,17 @@ static inline int kill_cad_pid(int sig, int priv)
#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
+static inline int __on_sig_stack(unsigned long sp)
+{
+#ifdef CONFIG_STACK_GROWSUP
+ return sp >= current->sas_ss_sp &&
+ sp - current->sas_ss_sp < current->sas_ss_size;
+#else
+ return sp > current->sas_ss_sp &&
+ sp - current->sas_ss_sp <= current->sas_ss_size;
+#endif
+}
+
/*
* True if we are on the alternate signal stack.
*/
@@ -555,13 +566,7 @@ static inline int on_sig_stack(unsigned long sp)
if (current->sas_ss_flags & SS_AUTODISARM)
return 0;
-#ifdef CONFIG_STACK_GROWSUP
- return sp >= current->sas_ss_sp &&
- sp - current->sas_ss_sp < current->sas_ss_size;
-#else
- return sp > current->sas_ss_sp &&
- sp - current->sas_ss_sp <= current->sas_ss_size;
-#endif
+ return __on_sig_stack(sp);
}
static inline int sas_ss_flags(unsigned long sp)
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 79d0a1237e6c..80e781c51ddc 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -101,6 +101,10 @@ struct scmi_clk_proto_ops {
* to sustained performance level mapping
* @est_power_get: gets the estimated power cost for a given performance domain
* at a given frequency
+ * @fast_switch_possible: indicates if fast DVFS switching is possible or not
+ * for a given device
+ * @power_scale_mw_get: indicates if the power values provided are in milliWatts
+ * or in some other (abstract) scale
*/
struct scmi_perf_proto_ops {
int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain,
@@ -153,7 +157,7 @@ struct scmi_power_proto_ops {
};
/**
- * scmi_sensor_reading - represent a timestamped read
+ * struct scmi_sensor_reading - represent a timestamped read
*
* Used by @reading_get_timestamped method.
*
@@ -167,7 +171,7 @@ struct scmi_sensor_reading {
};
/**
- * scmi_range_attrs - specifies a sensor or axis values' range
+ * struct scmi_range_attrs - specifies a sensor or axis values' range
* @min_range: The minimum value which can be represented by the sensor/axis.
* @max_range: The maximum value which can be represented by the sensor/axis.
*/
@@ -177,7 +181,7 @@ struct scmi_range_attrs {
};
/**
- * scmi_sensor_axis_info - describes one sensor axes
+ * struct scmi_sensor_axis_info - describes one sensor axes
* @id: The axes ID.
* @type: Axes type. Chosen amongst one of @enum scmi_sensor_class.
* @scale: Power-of-10 multiplier applied to the axis unit.
@@ -205,8 +209,8 @@ struct scmi_sensor_axis_info {
};
/**
- * scmi_sensor_intervals_info - describes number and type of available update
- * intervals
+ * struct scmi_sensor_intervals_info - describes number and type of available
+ * update intervals
* @segmented: Flag for segmented intervals' representation. When True there
* will be exactly 3 intervals in @desc, with each entry
* representing a member of a segment in this order:
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index afbf8037d8db..d2176a56828a 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -51,6 +51,14 @@ struct scpi_sensor_info {
* OPP is an index to the list return by @dvfs_get_info
* @dvfs_get_info: returns the DVFS capabilities of the given power
* domain. It includes the OPP list and the latency information
+ * @device_domain_id: gets the scpi domain id for a given device
+ * @get_transition_latency: gets the DVFS transition latency for a given device
+ * @add_opps_to_device: adds all the OPPs for a given device
+ * @sensor_get_capability: get the list of capabilities for the sensors
+ * @sensor_get_info: get the information of the specified sensor
+ * @sensor_get_value: gets the current value of the sensor
+ * @device_get_power_state: gets the power state of a power domain
+ * @device_set_power_state: sets the power state of a power domain
*/
struct scpi_ops {
u32 (*get_version)(void);
diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
new file mode 100644
index 000000000000..21c3771e6a56
--- /dev/null
+++ b/include/linux/secretmem.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_SECRETMEM_H
+#define _LINUX_SECRETMEM_H
+
+#ifdef CONFIG_SECRETMEM
+
+extern const struct address_space_operations secretmem_aops;
+
+static inline bool page_is_secretmem(struct page *page)
+{
+ struct address_space *mapping;
+
+ /*
+ * Using page_mapping() is quite slow because of the actual call
+ * instruction and repeated compound_head(page) inside the
+ * page_mapping() function.
+ * We know that secretmem pages are not compound and LRU so we can
+ * save a couple of cycles here.
+ */
+ if (PageCompound(page) || !PageLRU(page))
+ return false;
+
+ mapping = (struct address_space *)
+ ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
+
+ if (mapping != page->mapping)
+ return false;
+
+ return mapping->a_ops == &secretmem_aops;
+}
+
+bool vma_is_secretmem(struct vm_area_struct *vma);
+bool secretmem_active(void);
+
+#else
+
+static inline bool vma_is_secretmem(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool page_is_secretmem(struct page *page)
+{
+ return false;
+}
+
+static inline bool secretmem_active(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_SECRETMEM */
+
+#endif /* _LINUX_SECRETMEM_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 24eda04221e9..5b7288521300 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -120,10 +120,11 @@ enum lockdown_reason {
LOCKDOWN_MMIOTRACE,
LOCKDOWN_DEBUGFS,
LOCKDOWN_XMON_WR,
+ LOCKDOWN_BPF_WRITE_USER,
LOCKDOWN_INTEGRITY_MAX,
LOCKDOWN_KCORE,
LOCKDOWN_KPROBES,
- LOCKDOWN_BPF_READ,
+ LOCKDOWN_BPF_READ_KERNEL,
LOCKDOWN_PERF,
LOCKDOWN_TRACEFS,
LOCKDOWN_XMON_RW,
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 723b1fa1177e..dd99569595fd 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -126,8 +126,16 @@ void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num
void seq_put_hex_ll(struct seq_file *m, const char *delimiter,
unsigned long long v, unsigned int width);
+void seq_escape_mem(struct seq_file *m, const char *src, size_t len,
+ unsigned int flags, const char *esc);
+
+static inline void seq_escape_str(struct seq_file *m, const char *src,
+ unsigned int flags, const char *esc)
+{
+ seq_escape_mem(m, src, strlen(src), flags, esc);
+}
+
void seq_escape(struct seq_file *m, const char *s, const char *esc);
-void seq_escape_mem_ascii(struct seq_file *m, const char *src, size_t isz);
void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, const void *buf, size_t len,
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 9e655055112d..5db211f43b29 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -146,7 +146,7 @@ static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
return container_of(up, struct uart_8250_port, port);
}
-int serial8250_register_8250_port(struct uart_8250_port *);
+int serial8250_register_8250_port(const struct uart_8250_port *);
void serial8250_unregister_port(int line);
void serial8250_suspend_port(int line);
void serial8250_resume_port(int line);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index d7ed00f1594e..c58cc142d23f 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -408,7 +408,8 @@ int uart_register_driver(struct uart_driver *uart);
void uart_unregister_driver(struct uart_driver *uart);
int uart_add_one_port(struct uart_driver *reg, struct uart_port *port);
int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
-int uart_match_port(struct uart_port *port1, struct uart_port *port2);
+bool uart_match_port(const struct uart_port *port1,
+ const struct uart_port *port2);
/*
* Power Management
@@ -428,7 +429,7 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
static inline int uart_tx_stopped(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
- if ((tty && tty->stopped) || port->hw_stopped)
+ if ((tty && tty->flow.stopped) || port->hw_stopped)
return 1;
return 0;
}
@@ -517,6 +518,25 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
if (sysrq_ch)
handle_sysrq(sysrq_ch);
}
+
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+ unsigned long flags)
+{
+ int sysrq_ch;
+
+ if (!port->has_sysrq) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+}
#else /* CONFIG_MAGIC_SYSRQ_SERIAL */
static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
{
@@ -530,6 +550,11 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
spin_unlock(&port->lock);
}
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+ unsigned long flags)
+{
+ spin_unlock_irqrestore(&port->lock, flags);
+}
#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
/*
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index fe1aa4e54680..f36be5166c19 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -28,7 +28,19 @@ static inline bool kernel_page_present(struct page *page)
{
return true;
}
+#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */
+/*
+ * Some architectures, e.g. ARM64 can disable direct map modifications at
+ * boot time. Let them overrive this query.
+ */
+#ifndef can_set_direct_map
+static inline bool can_set_direct_map(void)
+{
+ return true;
+}
+#define can_set_direct_map can_set_direct_map
#endif
+#endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */
#ifndef set_mce_nospec
static inline int set_mce_nospec(unsigned long pfn, bool unmap)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index aa77dcd1646f..8e775ce517bb 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -122,21 +122,18 @@ static inline bool shmem_file(struct file *file)
extern bool shmem_charge(struct inode *inode, long pages);
extern void shmem_uncharge(struct inode *inode, long pages);
+#ifdef CONFIG_USERFAULTFD
#ifdef CONFIG_SHMEM
-extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
+extern int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
+ bool zeropage,
struct page **pagep);
-extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
- pmd_t *dst_pmd,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr);
-#else
-#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
- src_addr, pagep) ({ BUG(); 0; })
-#define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
- dst_addr) ({ BUG(); 0; })
-#endif
+#else /* !CONFIG_SHMEM */
+#define shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \
+ src_addr, zeropage, pagep) ({ BUG(); 0; })
+#endif /* CONFIG_SHMEM */
+#endif /* CONFIG_USERFAULTFD */
#endif
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 1eac79ce57d4..9814fff58a69 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -4,7 +4,7 @@
/*
* This struct is used to pass information from page reclaim to the shrinkers.
- * We consolidate the values for easier extention later.
+ * We consolidate the values for easier extension later.
*
* The 'gfpmask' refers to the allocation we are currently trying to
* fulfil.
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 5160fd45e5ca..3454c7ff0778 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -462,8 +462,6 @@ int __save_altstack(stack_t __user *, unsigned long);
unsafe_put_user((void __user *)t->sas_ss_sp, &__uss->ss_sp, label); \
unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
- if (t->sas_ss_flags & SS_AUTODISARM) \
- sas_ss_reset(t); \
} while (0);
#ifdef CONFIG_PROC_FS
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 96f319099744..14ab0c0bc924 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -285,11 +285,45 @@ static inline struct sk_psock *sk_psock(const struct sock *sk)
return rcu_dereference_sk_user_data(sk);
}
+static inline void sk_psock_set_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ set_bit(bit, &psock->state);
+}
+
+static inline void sk_psock_clear_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ clear_bit(bit, &psock->state);
+}
+
+static inline bool sk_psock_test_state(const struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ return test_bit(bit, &psock->state);
+}
+
+static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
+{
+ sk_drops_add(sk, skb);
+ kfree_skb(skb);
+}
+
+static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg)
+{
+ if (msg->skb)
+ sock_drop(psock->sk, msg->skb);
+ kfree(msg);
+}
+
static inline void sk_psock_queue_msg(struct sk_psock *psock,
struct sk_msg *msg)
{
spin_lock_bh(&psock->ingress_lock);
- list_add_tail(&msg->list, &psock->ingress_msg);
+ if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+ list_add_tail(&msg->list, &psock->ingress_msg);
+ else
+ drop_sk_msg(psock, msg);
spin_unlock_bh(&psock->ingress_lock);
}
@@ -406,24 +440,6 @@ static inline void sk_psock_restore_proto(struct sock *sk,
psock->psock_update_sk_prot(sk, psock, true);
}
-static inline void sk_psock_set_state(struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- set_bit(bit, &psock->state);
-}
-
-static inline void sk_psock_clear_state(struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- clear_bit(bit, &psock->state);
-}
-
-static inline bool sk_psock_test_state(const struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- return test_bit(bit, &psock->state);
-}
-
static inline struct sk_psock *sk_psock_get(struct sock *sk)
{
struct sk_psock *psock;
diff --git a/include/linux/soc/ixp4xx/cpu.h b/include/linux/soc/ixp4xx/cpu.h
new file mode 100644
index 000000000000..88bd8de0e803
--- /dev/null
+++ b/include/linux/soc/ixp4xx/cpu.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IXP4XX cpu type detection
+ *
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ */
+
+#ifndef __SOC_IXP4XX_CPU_H__
+#define __SOC_IXP4XX_CPU_H__
+
+#include <linux/io.h>
+#ifdef CONFIG_ARM
+#include <asm/cputype.h>
+#endif
+
+/* Processor id value in CP15 Register 0 */
+#define IXP42X_PROCESSOR_ID_VALUE 0x690541c0 /* including unused 0x690541Ex */
+#define IXP42X_PROCESSOR_ID_MASK 0xffffffc0
+
+#define IXP43X_PROCESSOR_ID_VALUE 0x69054040
+#define IXP43X_PROCESSOR_ID_MASK 0xfffffff0
+
+#define IXP46X_PROCESSOR_ID_VALUE 0x69054200 /* including IXP455 */
+#define IXP46X_PROCESSOR_ID_MASK 0xfffffff0
+
+/* "fuse" bits of IXP_EXP_CFG2 */
+/* All IXP4xx CPUs */
+#define IXP4XX_FEATURE_RCOMP (1 << 0)
+#define IXP4XX_FEATURE_USB_DEVICE (1 << 1)
+#define IXP4XX_FEATURE_HASH (1 << 2)
+#define IXP4XX_FEATURE_AES (1 << 3)
+#define IXP4XX_FEATURE_DES (1 << 4)
+#define IXP4XX_FEATURE_HDLC (1 << 5)
+#define IXP4XX_FEATURE_AAL (1 << 6)
+#define IXP4XX_FEATURE_HSS (1 << 7)
+#define IXP4XX_FEATURE_UTOPIA (1 << 8)
+#define IXP4XX_FEATURE_NPEB_ETH0 (1 << 9)
+#define IXP4XX_FEATURE_NPEC_ETH (1 << 10)
+#define IXP4XX_FEATURE_RESET_NPEA (1 << 11)
+#define IXP4XX_FEATURE_RESET_NPEB (1 << 12)
+#define IXP4XX_FEATURE_RESET_NPEC (1 << 13)
+#define IXP4XX_FEATURE_PCI (1 << 14)
+#define IXP4XX_FEATURE_UTOPIA_PHY_LIMIT (3 << 16)
+#define IXP4XX_FEATURE_XSCALE_MAX_FREQ (3 << 22)
+#define IXP42X_FEATURE_MASK (IXP4XX_FEATURE_RCOMP | \
+ IXP4XX_FEATURE_USB_DEVICE | \
+ IXP4XX_FEATURE_HASH | \
+ IXP4XX_FEATURE_AES | \
+ IXP4XX_FEATURE_DES | \
+ IXP4XX_FEATURE_HDLC | \
+ IXP4XX_FEATURE_AAL | \
+ IXP4XX_FEATURE_HSS | \
+ IXP4XX_FEATURE_UTOPIA | \
+ IXP4XX_FEATURE_NPEB_ETH0 | \
+ IXP4XX_FEATURE_NPEC_ETH | \
+ IXP4XX_FEATURE_RESET_NPEA | \
+ IXP4XX_FEATURE_RESET_NPEB | \
+ IXP4XX_FEATURE_RESET_NPEC | \
+ IXP4XX_FEATURE_PCI | \
+ IXP4XX_FEATURE_UTOPIA_PHY_LIMIT | \
+ IXP4XX_FEATURE_XSCALE_MAX_FREQ)
+
+
+/* IXP43x/46x CPUs */
+#define IXP4XX_FEATURE_ECC_TIMESYNC (1 << 15)
+#define IXP4XX_FEATURE_USB_HOST (1 << 18)
+#define IXP4XX_FEATURE_NPEA_ETH (1 << 19)
+#define IXP43X_FEATURE_MASK (IXP42X_FEATURE_MASK | \
+ IXP4XX_FEATURE_ECC_TIMESYNC | \
+ IXP4XX_FEATURE_USB_HOST | \
+ IXP4XX_FEATURE_NPEA_ETH)
+
+/* IXP46x CPU (including IXP455) only */
+#define IXP4XX_FEATURE_NPEB_ETH_1_TO_3 (1 << 20)
+#define IXP4XX_FEATURE_RSA (1 << 21)
+#define IXP46X_FEATURE_MASK (IXP43X_FEATURE_MASK | \
+ IXP4XX_FEATURE_NPEB_ETH_1_TO_3 | \
+ IXP4XX_FEATURE_RSA)
+
+#ifdef CONFIG_ARCH_IXP4XX
+#define cpu_is_ixp42x_rev_a0() ((read_cpuid_id() & (IXP42X_PROCESSOR_ID_MASK | 0xF)) == \
+ IXP42X_PROCESSOR_ID_VALUE)
+#define cpu_is_ixp42x() ((read_cpuid_id() & IXP42X_PROCESSOR_ID_MASK) == \
+ IXP42X_PROCESSOR_ID_VALUE)
+#define cpu_is_ixp43x() ((read_cpuid_id() & IXP43X_PROCESSOR_ID_MASK) == \
+ IXP43X_PROCESSOR_ID_VALUE)
+#define cpu_is_ixp46x() ((read_cpuid_id() & IXP46X_PROCESSOR_ID_MASK) == \
+ IXP46X_PROCESSOR_ID_VALUE)
+
+u32 ixp4xx_read_feature_bits(void);
+void ixp4xx_write_feature_bits(u32 value);
+#else
+#define cpu_is_ixp42x_rev_a0() 0
+#define cpu_is_ixp42x() 0
+#define cpu_is_ixp43x() 0
+#define cpu_is_ixp46x() 0
+static inline u32 ixp4xx_read_feature_bits(void)
+{
+ return 0;
+}
+static inline void ixp4xx_write_feature_bits(u32 value)
+{
+}
+#endif
+
+#endif /* _ASM_ARCH_CPU_H */
diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h
index 63ad8cddad14..652c0158baac 100644
--- a/include/linux/soc/qcom/smem_state.h
+++ b/include/linux/soc/qcom/smem_state.h
@@ -14,6 +14,7 @@ struct qcom_smem_state_ops {
#ifdef CONFIG_QCOM_SMEM_STATE
struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
+struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
void qcom_smem_state_put(struct qcom_smem_state *);
int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 value);
@@ -29,6 +30,13 @@ static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
return ERR_PTR(-EINVAL);
}
+static inline struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev,
+ const char *con_id,
+ unsigned *bit)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline void qcom_smem_state_put(struct qcom_smem_state *state)
{
}
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index fc9250fb3133..aa840ed043e1 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -611,12 +611,6 @@
#define EXYNOS5420_FSYS2_OPTION 0x4168
#define EXYNOS5420_PSGEN_OPTION 0x4188
-/* For EXYNOS_CENTRAL_SEQ_OPTION */
-#define EXYNOS5_USE_STANDBYWFI_ARM_CORE0 BIT(16)
-#define EXYNOS5_USE_STANDBYWFI_ARM_CORE1 BUT(17)
-#define EXYNOS5_USE_STANDBYWFE_ARM_CORE0 BIT(24)
-#define EXYNOS5_USE_STANDBYWFE_ARM_CORE1 BIT(25)
-
#define EXYNOS5420_ARM_USE_STANDBY_WFI0 BIT(4)
#define EXYNOS5420_ARM_USE_STANDBY_WFI1 BIT(5)
#define EXYNOS5420_ARM_USE_STANDBY_WFI2 BIT(6)
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index ced07f8fde87..a48ac3e77301 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -612,6 +612,7 @@ struct sdw_bus_params {
* @update_status: Update Slave status
* @bus_config: Update the bus config for Slave
* @port_prep: Prepare the port with parameters
+ * @clk_stop: handle imp-def sequences before and after prepare and de-prepare
*/
struct sdw_slave_ops {
int (*read_prop)(struct sdw_slave *sdw);
@@ -624,7 +625,6 @@ struct sdw_slave_ops {
int (*port_prep)(struct sdw_slave *slave,
struct sdw_prepare_ch *prepare_ch,
enum sdw_port_prep_ops pre_ops);
- int (*get_clk_stop_mode)(struct sdw_slave *slave);
int (*clk_stop)(struct sdw_slave *slave,
enum sdw_clk_stop_mode mode,
enum sdw_clk_stop_type type);
@@ -675,7 +675,6 @@ struct sdw_slave {
struct list_head node;
struct completion port_ready[SDW_MAX_PORTS];
unsigned int m_port_map[SDW_MAX_PORTS];
- enum sdw_clk_stop_mode curr_clk_stop_mode;
u16 dev_num;
u16 dev_num_sticky;
bool probed;
@@ -1040,7 +1039,10 @@ int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value);
int sdw_read_no_pm(struct sdw_slave *slave, u32 addr);
int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
-int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val);
+int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
+int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
+
int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id);
void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id);
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 3a5446ac014a..1ebea7764011 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -58,7 +58,7 @@ struct sdw_intel_acpi_info {
u32 link_mask;
};
-struct sdw_intel_link_res;
+struct sdw_intel_link_dev;
/* Intel clock-stop/pm_runtime quirk definitions */
@@ -109,7 +109,7 @@ struct sdw_intel_slave_id {
* Controller
* @num_slaves: total number of devices exposed across all enabled links
* @handle: ACPI parent handle
- * @links: information for each link (controller-specific and kept
+ * @ldev: information for each link (controller-specific and kept
* opaque here)
* @ids: array of slave_id, representing Slaves exposed across all enabled
* links
@@ -123,7 +123,7 @@ struct sdw_intel_ctx {
u32 link_mask;
int num_slaves;
acpi_handle handle;
- struct sdw_intel_link_res *links;
+ struct sdw_intel_link_dev **ldev;
struct sdw_intel_slave_id *ids;
struct list_head link_list;
struct mutex shim_lock; /* lock for access to shared SHIM registers */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index a0895bbf71ce..e6011a9975af 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -64,6 +64,12 @@ unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
+#ifdef CONFIG_SRCU
+void srcu_init(void);
+#else /* #ifdef CONFIG_SRCU */
+static inline void srcu_init(void) { }
+#endif /* #else #ifdef CONFIG_SRCU */
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/**
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 9cfcc8a756ae..cb1f4351e8ba 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -82,9 +82,7 @@ struct srcu_struct {
/* callback for the barrier */
/* operation. */
struct delayed_work work;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
};
/* Values for state variable (bottom bits of ->srcu_gp_seq). */
diff --git a/include/linux/stm.h b/include/linux/stm.h
index c6f577ab6f21..3b22689512be 100644
--- a/include/linux/stm.h
+++ b/include/linux/stm.h
@@ -57,7 +57,7 @@ struct stm_device;
*
* Normally, an STM device will have a range of masters available to software
* and the rest being statically assigned to various hardware trace sources.
- * The former is defined by the the range [@sw_start..@sw_end] of the device
+ * The former is defined by the range [@sw_start..@sw_end] of the device
* description. That is, the lowest master that can be allocated to software
* writers is @sw_start and data from this writer will appear is @sw_start
* master in the STP stream.
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index d5ae621d66ba..a6f03b36fc4f 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -115,7 +115,9 @@ struct stmmac_axi {
#define EST_GCL 1024
struct stmmac_est {
+ struct mutex lock;
int enable;
+ u32 btr_reserve[2];
u32 btr_offset[2];
u32 btr[2];
u32 ctr[2];
diff --git a/include/linux/string.h b/include/linux/string.h
index 9521d8cab18e..b48d2d28e0b1 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -2,7 +2,6 @@
#ifndef _LINUX_STRING_H_
#define _LINUX_STRING_H_
-
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
@@ -184,12 +183,6 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
extern bool sysfs_streq(const char *s1, const char *s2);
-extern int kstrtobool(const char *s, bool *res);
-static inline int strtobool(const char *s, bool *res)
-{
- return kstrtobool(s, res);
-}
-
int match_string(const char * const *array, size_t n, const char *string);
int __sysfs_match_string(const char * const *array, size_t n, const char *s);
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index fa06dcdc481e..68189c4a2eb1 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_STRING_HELPERS_H_
#define _LINUX_STRING_HELPERS_H_
+#include <linux/bits.h>
#include <linux/ctype.h>
#include <linux/types.h>
@@ -18,13 +19,15 @@ enum string_size_units {
void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
char *buf, int len);
-#define UNESCAPE_SPACE 0x01
-#define UNESCAPE_OCTAL 0x02
-#define UNESCAPE_HEX 0x04
-#define UNESCAPE_SPECIAL 0x08
+#define UNESCAPE_SPACE BIT(0)
+#define UNESCAPE_OCTAL BIT(1)
+#define UNESCAPE_HEX BIT(2)
+#define UNESCAPE_SPECIAL BIT(3)
#define UNESCAPE_ANY \
(UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL)
+#define UNESCAPE_ALL_MASK GENMASK(3, 0)
+
int string_unescape(char *src, char *dst, size_t size, unsigned int flags);
static inline int string_unescape_inplace(char *buf, unsigned int flags)
@@ -42,22 +45,24 @@ static inline int string_unescape_any_inplace(char *buf)
return string_unescape_any(buf, buf, 0);
}
-#define ESCAPE_SPACE 0x01
-#define ESCAPE_SPECIAL 0x02
-#define ESCAPE_NULL 0x04
-#define ESCAPE_OCTAL 0x08
+#define ESCAPE_SPACE BIT(0)
+#define ESCAPE_SPECIAL BIT(1)
+#define ESCAPE_NULL BIT(2)
+#define ESCAPE_OCTAL BIT(3)
#define ESCAPE_ANY \
(ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_SPECIAL | ESCAPE_NULL)
-#define ESCAPE_NP 0x10
+#define ESCAPE_NP BIT(4)
#define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP)
-#define ESCAPE_HEX 0x20
+#define ESCAPE_HEX BIT(5)
+#define ESCAPE_NA BIT(6)
+#define ESCAPE_NAP BIT(7)
+#define ESCAPE_APPEND BIT(8)
+
+#define ESCAPE_ALL_MASK GENMASK(8, 0)
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
unsigned int flags, const char *only);
-int string_escape_mem_ascii(const char *src, size_t isz, char *dst,
- size_t osz);
-
static inline int string_escape_mem_any_np(const char *src, size_t isz,
char *dst, size_t osz, const char *only)
{
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index d0965e2997b0..b134b2b3371c 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -14,6 +14,7 @@
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/atomic.h>
+#include <linux/kstrtox.h>
#include <linux/proc_fs.h>
/*
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 02e7a5863d28..8b5d5c97553e 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -29,6 +29,7 @@
#include <linux/sunrpc/xprtmultipath.h>
struct rpc_inode;
+struct rpc_sysfs_client;
/*
* The high-level client handle
@@ -71,6 +72,7 @@ struct rpc_clnt {
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *cl_debugfs; /* debugfs directory */
#endif
+ struct rpc_sysfs_client *cl_sysfs; /* sysfs directory */
/* cl_work is only needed after cl_xpi is no longer used,
* and that are of similar size
*/
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index df696efdd675..a237b8dbf608 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -121,6 +121,7 @@ struct rpc_task_setup {
*/
#define RPC_TASK_ASYNC 0x0001 /* is an async task */
#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
+#define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */
#define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
@@ -139,6 +140,7 @@ struct rpc_task_setup {
#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
+#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE)
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 61b622e334ee..c8c39f22d3b1 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -53,6 +53,7 @@ enum rpc_display_format_t {
struct rpc_task;
struct rpc_xprt;
+struct xprt_class;
struct seq_file;
struct svc_serv;
struct net;
@@ -182,9 +183,11 @@ enum xprt_transports {
XPRT_TRANSPORT_LOCAL = 257,
};
+struct rpc_sysfs_xprt;
struct rpc_xprt {
struct kref kref; /* Reference count */
const struct rpc_xprt_ops *ops; /* transport methods */
+ unsigned int id; /* transport id */
const struct rpc_timeout *timeout; /* timeout parms */
struct sockaddr_storage addr; /* server address */
@@ -288,6 +291,9 @@ struct rpc_xprt {
atomic_t inject_disconnect;
#endif
struct rcu_head rcu;
+ const struct xprt_class *xprt_class;
+ struct rpc_sysfs_xprt *xprt_sysfs;
+ bool main; /*mark if this is the 1st transport */
};
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
@@ -370,6 +376,7 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
void xprt_free(struct rpc_xprt *);
void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
+void xprt_cleanup_ids(void);
static inline int
xprt_enable_swap(struct rpc_xprt *xprt)
@@ -408,6 +415,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
void xprt_unlock_connect(struct rpc_xprt *, void *);
+void xprt_release_write(struct rpc_xprt *, struct rpc_task *);
/*
* Reserved bit positions in xprt->state
@@ -419,6 +427,8 @@ void xprt_unlock_connect(struct rpc_xprt *, void *);
#define XPRT_BOUND (4)
#define XPRT_BINDING (5)
#define XPRT_CLOSING (6)
+#define XPRT_OFFLINE (7)
+#define XPRT_REMOVE (8)
#define XPRT_CONGESTED (9)
#define XPRT_CWND_WAIT (10)
#define XPRT_WRITE_SPACE (11)
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
index c6cce3fbf29d..b19addc8b715 100644
--- a/include/linux/sunrpc/xprtmultipath.h
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -10,10 +10,12 @@
#define _NET_SUNRPC_XPRTMULTIPATH_H
struct rpc_xprt_iter_ops;
+struct rpc_sysfs_xprt_switch;
struct rpc_xprt_switch {
spinlock_t xps_lock;
struct kref xps_kref;
+ unsigned int xps_id;
unsigned int xps_nxprts;
unsigned int xps_nactive;
atomic_long_t xps_queuelen;
@@ -23,6 +25,7 @@ struct rpc_xprt_switch {
const struct rpc_xprt_iter_ops *xps_iter_ops;
+ struct rpc_sysfs_xprt_switch *xps_sysfs;
struct rcu_head xps_rcu;
};
@@ -71,4 +74,7 @@ extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi);
extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
const struct sockaddr *sap);
+
+extern void xprt_multipath_cleanup_ids(void);
+
#endif
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 3c1423ee74b4..8c2a712cb242 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -10,6 +10,7 @@
int init_socket_xprt(void);
void cleanup_socket_xprt(void);
+unsigned short get_srcport(struct rpc_xprt *);
#define RPC_MIN_RESVPORT (1U)
#define RPC_MAX_RESVPORT (65535U)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 49b1dd2c100b..6f5a43251593 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -62,12 +62,17 @@ static inline int current_is_kswapd(void)
* migrate part of a process memory to device memory.
*
* When a page is migrated from CPU to device, we set the CPU page table entry
- * to a special SWP_DEVICE_* entry.
+ * to a special SWP_DEVICE_{READ|WRITE} entry.
+ *
+ * When a page is mapped by the device for exclusive access we set the CPU page
+ * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
*/
#ifdef CONFIG_DEVICE_PRIVATE
-#define SWP_DEVICE_NUM 2
+#define SWP_DEVICE_NUM 4
#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
+#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
+#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
#else
#define SWP_DEVICE_NUM 0
#endif
@@ -537,7 +542,11 @@ static inline void put_swap_device(struct swap_info_struct *si)
{
}
-#define swap_address_space(entry) (NULL)
+static inline struct address_space *swap_address_space(swp_entry_t entry)
+{
+ return NULL;
+}
+
#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
#define total_swapcache_pages() 0UL
@@ -560,8 +569,8 @@ static inline void show_swap_cache_info(void)
{
}
-#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
-#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
+/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
+#define free_swap_and_cache(e) is_pfn_swap_entry(e)
static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 5907205c712c..d356ab4047f7 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -107,10 +107,14 @@ static inline void *swp_to_radix_entry(swp_entry_t entry)
}
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
-static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
+static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
- return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
- page_to_pfn(page));
+ return swp_entry(SWP_DEVICE_READ, offset);
+}
+
+static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
+{
+ return swp_entry(SWP_DEVICE_WRITE, offset);
}
static inline bool is_device_private_entry(swp_entry_t entry)
@@ -119,33 +123,40 @@ static inline bool is_device_private_entry(swp_entry_t entry)
return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
}
-static inline void make_device_private_entry_read(swp_entry_t *entry)
+static inline bool is_writable_device_private_entry(swp_entry_t entry)
{
- *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
+ return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
}
-static inline bool is_write_device_private_entry(swp_entry_t entry)
+static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
{
- return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
+ return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset);
}
-static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
{
- return swp_offset(entry);
+ return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset);
+}
+
+static inline bool is_device_exclusive_entry(swp_entry_t entry)
+{
+ return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ ||
+ swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE;
}
-static inline struct page *device_private_entry_to_page(swp_entry_t entry)
+static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
{
- return pfn_to_page(swp_offset(entry));
+ return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE);
}
#else /* CONFIG_DEVICE_PRIVATE */
-static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
+static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
-static inline void make_device_private_entry_read(swp_entry_t *entry)
+static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
{
+ return swp_entry(0, 0);
}
static inline bool is_device_private_entry(swp_entry_t entry)
@@ -153,61 +164,52 @@ static inline bool is_device_private_entry(swp_entry_t entry)
return false;
}
-static inline bool is_write_device_private_entry(swp_entry_t entry)
+static inline bool is_writable_device_private_entry(swp_entry_t entry)
{
return false;
}
-static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
{
- return 0;
+ return swp_entry(0, 0);
}
-static inline struct page *device_private_entry_to_page(swp_entry_t entry)
+static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
{
- return NULL;
+ return swp_entry(0, 0);
}
-#endif /* CONFIG_DEVICE_PRIVATE */
-#ifdef CONFIG_MIGRATION
-static inline swp_entry_t make_migration_entry(struct page *page, int write)
+static inline bool is_device_exclusive_entry(swp_entry_t entry)
{
- BUG_ON(!PageLocked(compound_head(page)));
+ return false;
+}
- return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
- page_to_pfn(page));
+static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
+{
+ return false;
}
+#endif /* CONFIG_DEVICE_PRIVATE */
+#ifdef CONFIG_MIGRATION
static inline int is_migration_entry(swp_entry_t entry)
{
return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
swp_type(entry) == SWP_MIGRATION_WRITE);
}
-static inline int is_write_migration_entry(swp_entry_t entry)
+static inline int is_writable_migration_entry(swp_entry_t entry)
{
return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
}
-static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
- return swp_offset(entry);
+ return swp_entry(SWP_MIGRATION_READ, offset);
}
-static inline struct page *migration_entry_to_page(swp_entry_t entry)
+static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
- struct page *p = pfn_to_page(swp_offset(entry));
- /*
- * Any use of migration entries may only occur while the
- * corresponding page is locked
- */
- BUG_ON(!PageLocked(compound_head(p)));
- return p;
-}
-
-static inline void make_migration_entry_read(swp_entry_t *entry)
-{
- *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
+ return swp_entry(SWP_MIGRATION_WRITE, offset);
}
extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
@@ -217,37 +219,58 @@ extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
extern void migration_entry_wait_huge(struct vm_area_struct *vma,
struct mm_struct *mm, pte_t *pte);
#else
-
-#define make_migration_entry(page, write) swp_entry(0, 0)
-static inline int is_migration_entry(swp_entry_t swp)
+static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
- return 0;
+ return swp_entry(0, 0);
}
-static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
- return 0;
+ return swp_entry(0, 0);
}
-static inline struct page *migration_entry_to_page(swp_entry_t entry)
+static inline int is_migration_entry(swp_entry_t swp)
{
- return NULL;
+ return 0;
}
-static inline void make_migration_entry_read(swp_entry_t *entryp) { }
static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
struct mm_struct *mm, pte_t *pte) { }
-static inline int is_write_migration_entry(swp_entry_t entry)
+static inline int is_writable_migration_entry(swp_entry_t entry)
{
return 0;
}
#endif
+static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
+{
+ struct page *p = pfn_to_page(swp_offset(entry));
+
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+ BUG_ON(is_migration_entry(entry) && !PageLocked(p));
+
+ return p;
+}
+
+/*
+ * A pfn swap entry is a special type of swap entry that always has a pfn stored
+ * in the swap offset. They are used to represent unaddressable device memory
+ * and to restrict access to a page undergoing migration.
+ */
+static inline bool is_pfn_swap_entry(swp_entry_t entry)
+{
+ return is_migration_entry(entry) || is_device_private_entry(entry) ||
+ is_device_exclusive_entry(entry);
+}
+
struct page_vma_mapped_walk;
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
@@ -265,6 +288,8 @@ static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
if (pmd_swp_soft_dirty(pmd))
pmd = pmd_swp_clear_soft_dirty(pmd);
+ if (pmd_swp_uffd_wp(pmd))
+ pmd = pmd_swp_clear_uffd_wp(pmd);
arch_entry = __pmd_to_swp_entry(pmd);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 586128d5c3b8..69c9a7010081 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -1050,6 +1050,7 @@ asmlinkage long sys_landlock_create_ruleset(const struct landlock_ruleset_attr _
asmlinkage long sys_landlock_add_rule(int ruleset_fd, enum landlock_rule_type rule_type,
const void __user *rule_attr, __u32 flags);
asmlinkage long sys_landlock_restrict_self(int ruleset_fd, __u32 flags);
+asmlinkage long sys_memfd_secret(unsigned int flags);
/*
* Architecture-specific system calls
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index d76a1ddf83a3..a12556a4b93a 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -162,6 +162,12 @@ static const struct attribute_group _name##_group = { \
}; \
__ATTRIBUTE_GROUPS(_name)
+#define BIN_ATTRIBUTE_GROUPS(_name) \
+static const struct attribute_group _name##_group = { \
+ .bin_attrs = _name##_attrs, \
+}; \
+__ATTRIBUTE_GROUPS(_name)
+
struct file;
struct vm_area_struct;
struct address_space;
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 54269e47ac9a..3ebfea0781f1 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -27,6 +27,7 @@
#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */
#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */
#define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */
+#define TEE_SHM_PRIV BIT(7) /* Memory private to TEE driver */
struct device;
struct tee_device;
@@ -332,6 +333,7 @@ void *tee_get_drvdata(struct tee_device *teedev);
* @returns a pointer to 'struct tee_shm'
*/
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
+struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
/**
* tee_shm_register() - Register shared memory buffer
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 157762db9d4b..0999f6317978 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -9,6 +9,7 @@
#define _LINUX_THREAD_INFO_H
#include <linux/types.h>
+#include <linux/limits.h>
#include <linux/bug.h>
#include <linux/restart_block.h>
#include <linux/errno.h>
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 4118a97e62fb..fda13c9d1256 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -192,8 +192,6 @@ extern int try_to_del_timer_sync(struct timer_list *timer);
#define del_singleshot_timer_sync(t) del_timer_sync(t)
-extern bool timer_curr_running(struct timer_list *timer);
-
extern void init_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
diff --git a/include/linux/trace.h b/include/linux/trace.h
index be1e130ed87c..bf169612ffe1 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -41,6 +41,13 @@ int trace_array_init_printk(struct trace_array *tr);
void trace_array_put(struct trace_array *tr);
struct trace_array *trace_array_get_by_name(const char *name);
int trace_array_destroy(struct trace_array *tr);
+
+/* For osnoise tracer */
+int osnoise_arch_register(void);
+void osnoise_arch_unregister(void);
+void osnoise_trace_irq_entry(int id);
+void osnoise_trace_irq_exit(int id, const char *desc);
+
#endif /* CONFIG_TRACING */
#endif /* _LINUX_TRACE_H */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 13f65420f188..ab58696d0ddd 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -41,7 +41,17 @@ extern int
tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data,
int prio);
extern int
+tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data,
+ int prio);
+extern int
tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
+static inline int
+tracepoint_probe_register_may_exist(struct tracepoint *tp, void *probe,
+ void *data)
+{
+ return tracepoint_probe_register_prio_may_exist(tp, probe, data,
+ TRACEPOINT_DEFAULT_PRIO);
+}
extern void
for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
void *priv);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index e5d6b1f28823..19dc1097e09c 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -243,20 +243,29 @@ struct tty_port {
#define TTY_PORT_KOPENED 5 /* device exclusively opened by
kernel */
-/*
- * Where all of the state associated with a tty is kept while the tty
- * is open. Since the termios state should be kept even if the tty
- * has been closed --- for things like the baud rate, etc --- it is
- * not stored here, but rather a pointer to the real state is stored
- * here. Possible the winsize structure should have the same
- * treatment, but (1) the default 80x24 is usually right and (2) it's
- * most often used by a windowing system, which will set the correct
- * size each time the window is created or resized anyway.
- * - TYT, 9/14/92
- */
-
struct tty_operations;
+/**
+ * struct tty_struct - state associated with a tty while open
+ *
+ * @flow.lock: lock for flow members
+ * @flow.stopped: tty stopped/started by tty_stop/tty_start
+ * @flow.tco_stopped: tty stopped/started by TCOOFF/TCOON ioctls (it has
+ * precedense over @flow.stopped)
+ * @flow.unused: alignment for Alpha, so that no members other than @flow.* are
+ * modified by the same 64b word store. The @flow's __aligned is
+ * there for the very same reason.
+ * @ctrl.lock: lock for ctrl members
+ * @ctrl.pgrp: process group of this tty (setpgrp(2))
+ * @ctrl.session: session of this tty (setsid(2)). Writes are protected by both
+ * @ctrl.lock and legacy mutex, readers must use at least one of
+ * them.
+ * @ctrl.pktstatus: packet mode status (bitwise OR of TIOCPKT_* constants)
+ * @ctrl.packet: packet mode enabled
+ *
+ * All of the state associated with a tty while the tty is open. Persistent
+ * storage for tty devices is referenced here as @port in struct tty_port.
+ */
struct tty_struct {
int magic;
struct kref kref;
@@ -274,27 +283,30 @@ struct tty_struct {
struct mutex throttle_mutex;
struct rw_semaphore termios_rwsem;
struct mutex winsize_mutex;
- spinlock_t ctrl_lock;
- spinlock_t flow_lock;
/* Termios values are protected by the termios rwsem */
struct ktermios termios, termios_locked;
char name[64];
- struct pid *pgrp; /* Protected by ctrl lock */
- /*
- * Writes protected by both ctrl lock and legacy mutex, readers must use
- * at least one of them.
- */
- struct pid *session;
unsigned long flags;
int count;
struct winsize winsize; /* winsize_mutex */
- unsigned long stopped:1, /* flow_lock */
- flow_stopped:1,
- unused:BITS_PER_LONG - 2;
+
+ struct {
+ spinlock_t lock;
+ bool stopped;
+ bool tco_stopped;
+ unsigned long unused[0];
+ } __aligned(sizeof(unsigned long)) flow;
+
+ struct {
+ spinlock_t lock;
+ struct pid *pgrp;
+ struct pid *session;
+ unsigned char pktstatus;
+ bool packet;
+ unsigned long unused[0];
+ } __aligned(sizeof(unsigned long)) ctrl;
+
int hw_stopped;
- unsigned long ctrl_status:8, /* ctrl_lock */
- packet:1,
- unused_ctrl:BITS_PER_LONG - 9;
unsigned int receive_room; /* Bytes free for queue */
int flow_change;
@@ -446,10 +458,9 @@ extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
extern void tty_write_message(struct tty_struct *tty, char *msg);
extern int tty_send_xchar(struct tty_struct *tty, char ch);
extern int tty_put_char(struct tty_struct *tty, unsigned char c);
-extern int tty_chars_in_buffer(struct tty_struct *tty);
-extern int tty_write_room(struct tty_struct *tty);
+extern unsigned int tty_chars_in_buffer(struct tty_struct *tty);
+extern unsigned int tty_write_room(struct tty_struct *tty);
extern void tty_driver_flush_buffer(struct tty_struct *tty);
-extern void tty_throttle(struct tty_struct *tty);
extern void tty_unthrottle(struct tty_struct *tty);
extern int tty_throttle_safe(struct tty_struct *tty);
extern int tty_unthrottle_safe(struct tty_struct *tty);
@@ -484,6 +495,9 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
return tty_termios_baud_rate(&tty->termios);
}
+unsigned char tty_get_char_size(unsigned int cflag);
+unsigned char tty_get_frame_size(unsigned int cflag);
+
extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
extern int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b);
extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
@@ -624,11 +638,11 @@ static inline int tty_port_users(struct tty_port *port)
return port->count + port->blocked_open;
}
-extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
-extern int tty_unregister_ldisc(int disc);
+extern int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc);
+extern void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc);
extern int tty_set_ldisc(struct tty_struct *tty, int disc);
extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
- char *f, int count);
+ const char *f, int count);
/* n_tty.c */
extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 2f719b471d52..448f8ee6db6e 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -89,7 +89,7 @@
*
* Note: Do not call this function directly, call tty_driver_flush_chars
*
- * int (*write_room)(struct tty_struct *tty);
+ * unsigned int (*write_room)(struct tty_struct *tty);
*
* This routine returns the numbers of characters the tty driver
* will accept for queuing to be written. This number is subject
@@ -136,7 +136,7 @@
* the line discipline are close to full, and it should somehow
* signal that no more characters should be sent to the tty.
*
- * Optional: Always invoke via tty_throttle(), called under the
+ * Optional: Always invoke via tty_throttle_safe(), called under the
* termios lock.
*
* void (*unthrottle)(struct tty_struct * tty);
@@ -153,7 +153,7 @@
* This routine notifies the tty driver that it should stop
* outputting characters to the tty device.
*
- * Called with ->flow_lock held. Serialized with start() method.
+ * Called with ->flow.lock held. Serialized with start() method.
*
* Optional:
*
@@ -164,7 +164,7 @@
* This routine notifies the tty driver that it resume sending
* characters to the tty device.
*
- * Called with ->flow_lock held. Serialized with stop() method.
+ * Called with ->flow.lock held. Serialized with stop() method.
*
* Optional:
*
@@ -256,8 +256,8 @@ struct tty_operations {
const unsigned char *buf, int count);
int (*put_char)(struct tty_struct *tty, unsigned char ch);
void (*flush_chars)(struct tty_struct *tty);
- int (*write_room)(struct tty_struct *tty);
- int (*chars_in_buffer)(struct tty_struct *tty);
+ unsigned int (*write_room)(struct tty_struct *tty);
+ unsigned int (*chars_in_buffer)(struct tty_struct *tty);
int (*ioctl)(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
long (*compat_ioctl)(struct tty_struct *tty,
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index 767f62086bd9..67d78dc553e1 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -2,8 +2,10 @@
#ifndef _LINUX_TTY_FLIP_H
#define _LINUX_TTY_FLIP_H
+#include <linux/tty.h>
+
extern int tty_buffer_set_limit(struct tty_port *port, int limit);
-extern int tty_buffer_space_avail(struct tty_port *port);
+extern unsigned int tty_buffer_space_avail(struct tty_port *port);
extern int tty_buffer_request_room(struct tty_port *port, size_t size);
extern int tty_insert_flip_string_flags(struct tty_port *port,
const unsigned char *chars, const char *flags, size_t size);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 31284b55bd4f..fbe9de278629 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -201,15 +201,13 @@ struct tty_ldisc_ops {
* The following routines are called from below.
*/
void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
- char *fp, int count);
+ const char *fp, int count);
void (*write_wakeup)(struct tty_struct *);
void (*dcd_change)(struct tty_struct *, unsigned int);
int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
- char *fp, int count);
+ const char *fp, int count);
struct module *owner;
-
- int refcount;
};
struct tty_ldisc {
diff --git a/include/linux/uio.h b/include/linux/uio.h
index d3ec87706d75..82c3c3e819e0 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -19,21 +19,17 @@ struct kvec {
enum iter_type {
/* iter types */
- ITER_IOVEC = 4,
- ITER_KVEC = 8,
- ITER_BVEC = 16,
- ITER_PIPE = 32,
- ITER_DISCARD = 64,
- ITER_XARRAY = 128,
+ ITER_IOVEC,
+ ITER_KVEC,
+ ITER_BVEC,
+ ITER_PIPE,
+ ITER_XARRAY,
+ ITER_DISCARD,
};
struct iov_iter {
- /*
- * Bit 0 is the read/write bit, set if we're writing.
- * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
- * the caller isn't expecting to drop a page reference when done.
- */
- unsigned int type;
+ u8 iter_type;
+ bool data_source;
size_t iov_offset;
size_t count;
union {
@@ -55,7 +51,7 @@ struct iov_iter {
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
{
- return i->type & ~(READ | WRITE);
+ return i->iter_type;
}
static inline bool iter_is_iovec(const struct iov_iter *i)
@@ -90,7 +86,7 @@ static inline bool iov_iter_is_xarray(const struct iov_iter *i)
static inline unsigned char iov_iter_rw(const struct iov_iter *i)
{
- return i->type & (READ | WRITE);
+ return i->data_source ? WRITE : READ;
}
/*
@@ -119,11 +115,11 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
};
}
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes);
+size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
+ size_t bytes, struct iov_iter *i);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
void iov_iter_revert(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
@@ -132,9 +128,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
-bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
-bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
static __always_inline __must_check
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
@@ -157,10 +151,11 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
static __always_inline __must_check
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return false;
- else
- return _copy_from_iter_full(addr, bytes, i);
+ size_t copied = copy_from_iter(addr, bytes, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
}
static __always_inline __must_check
@@ -175,10 +170,11 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
static __always_inline __must_check
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return false;
- else
- return _copy_from_iter_full_nocache(addr, bytes, i);
+ size_t copied = copy_from_iter_nocache(addr, bytes, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
}
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
@@ -278,7 +274,17 @@ struct csum_state {
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
-bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+
+static __always_inline __must_check
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
+ __wsum *csum, struct iov_iter *i)
+{
+ size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
+}
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
struct iov_iter *i);
@@ -294,8 +300,4 @@ ssize_t __import_iovec(int type, const struct iovec __user *uvec,
int import_single_range(int type, void __user *buf, size_t len,
struct iovec *iov, struct iov_iter *i);
-int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
- int (*f)(struct kvec *vec, void *context),
- void *context);
-
#endif
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
deleted file mode 100644
index 167aa849c0ce..000000000000
--- a/include/linux/unaligned/access_ok.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
-#define _LINUX_UNALIGNED_ACCESS_OK_H
-
-#include <linux/kernel.h>
-#include <asm/byteorder.h>
-
-static __always_inline u16 get_unaligned_le16(const void *p)
-{
- return le16_to_cpup((__le16 *)p);
-}
-
-static __always_inline u32 get_unaligned_le32(const void *p)
-{
- return le32_to_cpup((__le32 *)p);
-}
-
-static __always_inline u64 get_unaligned_le64(const void *p)
-{
- return le64_to_cpup((__le64 *)p);
-}
-
-static __always_inline u16 get_unaligned_be16(const void *p)
-{
- return be16_to_cpup((__be16 *)p);
-}
-
-static __always_inline u32 get_unaligned_be32(const void *p)
-{
- return be32_to_cpup((__be32 *)p);
-}
-
-static __always_inline u64 get_unaligned_be64(const void *p)
-{
- return be64_to_cpup((__be64 *)p);
-}
-
-static __always_inline void put_unaligned_le16(u16 val, void *p)
-{
- *((__le16 *)p) = cpu_to_le16(val);
-}
-
-static __always_inline void put_unaligned_le32(u32 val, void *p)
-{
- *((__le32 *)p) = cpu_to_le32(val);
-}
-
-static __always_inline void put_unaligned_le64(u64 val, void *p)
-{
- *((__le64 *)p) = cpu_to_le64(val);
-}
-
-static __always_inline void put_unaligned_be16(u16 val, void *p)
-{
- *((__be16 *)p) = cpu_to_be16(val);
-}
-
-static __always_inline void put_unaligned_be32(u32 val, void *p)
-{
- *((__be32 *)p) = cpu_to_be32(val);
-}
-
-static __always_inline void put_unaligned_be64(u64 val, void *p)
-{
- *((__be64 *)p) = cpu_to_be64(val);
-}
-
-#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */
diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h
deleted file mode 100644
index c43ff5918c8a..000000000000
--- a/include/linux/unaligned/be_byteshift.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H
-#define _LINUX_UNALIGNED_BE_BYTESHIFT_H
-
-#include <linux/types.h>
-
-static inline u16 __get_unaligned_be16(const u8 *p)
-{
- return p[0] << 8 | p[1];
-}
-
-static inline u32 __get_unaligned_be32(const u8 *p)
-{
- return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
-}
-
-static inline u64 __get_unaligned_be64(const u8 *p)
-{
- return (u64)__get_unaligned_be32(p) << 32 |
- __get_unaligned_be32(p + 4);
-}
-
-static inline void __put_unaligned_be16(u16 val, u8 *p)
-{
- *p++ = val >> 8;
- *p++ = val;
-}
-
-static inline void __put_unaligned_be32(u32 val, u8 *p)
-{
- __put_unaligned_be16(val >> 16, p);
- __put_unaligned_be16(val, p + 2);
-}
-
-static inline void __put_unaligned_be64(u64 val, u8 *p)
-{
- __put_unaligned_be32(val >> 32, p);
- __put_unaligned_be32(val, p + 4);
-}
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- return __get_unaligned_be16(p);
-}
-
-static inline u32 get_unaligned_be32(const void *p)
-{
- return __get_unaligned_be32(p);
-}
-
-static inline u64 get_unaligned_be64(const void *p)
-{
- return __get_unaligned_be64(p);
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
- __put_unaligned_be16(val, p);
-}
-
-static inline void put_unaligned_be32(u32 val, void *p)
-{
- __put_unaligned_be32(val, p);
-}
-
-static inline void put_unaligned_be64(u64 val, void *p)
-{
- __put_unaligned_be64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */
diff --git a/include/linux/unaligned/be_memmove.h b/include/linux/unaligned/be_memmove.h
deleted file mode 100644
index 7164214a4ba1..000000000000
--- a/include/linux/unaligned/be_memmove.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H
-#define _LINUX_UNALIGNED_BE_MEMMOVE_H
-
-#include <linux/unaligned/memmove.h>
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- return __get_unaligned_memmove16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_be32(const void *p)
-{
- return __get_unaligned_memmove32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_be64(const void *p)
-{
- return __get_unaligned_memmove64((const u8 *)p);
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
- __put_unaligned_memmove16(val, p);
-}
-
-static inline void put_unaligned_be32(u32 val, void *p)
-{
- __put_unaligned_memmove32(val, p);
-}
-
-static inline void put_unaligned_be64(u64 val, void *p)
-{
- __put_unaligned_memmove64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
diff --git a/include/linux/unaligned/be_struct.h b/include/linux/unaligned/be_struct.h
deleted file mode 100644
index 15ea503a13fc..000000000000
--- a/include/linux/unaligned/be_struct.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_BE_STRUCT_H
-#define _LINUX_UNALIGNED_BE_STRUCT_H
-
-#include <linux/unaligned/packed_struct.h>
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- return __get_unaligned_cpu16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_be32(const void *p)
-{
- return __get_unaligned_cpu32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_be64(const void *p)
-{
- return __get_unaligned_cpu64((const u8 *)p);
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
- __put_unaligned_cpu16(val, p);
-}
-
-static inline void put_unaligned_be32(u32 val, void *p)
-{
- __put_unaligned_cpu32(val, p);
-}
-
-static inline void put_unaligned_be64(u64 val, void *p)
-{
- __put_unaligned_cpu64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */
diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h
deleted file mode 100644
index 303289492859..000000000000
--- a/include/linux/unaligned/generic.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_GENERIC_H
-#define _LINUX_UNALIGNED_GENERIC_H
-
-#include <linux/types.h>
-
-/*
- * Cause a link-time error if we try an unaligned access other than
- * 1,2,4 or 8 bytes long
- */
-extern void __bad_unaligned_access_size(void);
-
-#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \
- __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
- __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
- __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
- __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
- __bad_unaligned_access_size())))); \
- }))
-
-#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \
- __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
- __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
- __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
- __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
- __bad_unaligned_access_size())))); \
- }))
-
-#define __put_unaligned_le(val, ptr) ({ \
- void *__gu_p = (ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- *(u8 *)__gu_p = (__force u8)(val); \
- break; \
- case 2: \
- put_unaligned_le16((__force u16)(val), __gu_p); \
- break; \
- case 4: \
- put_unaligned_le32((__force u32)(val), __gu_p); \
- break; \
- case 8: \
- put_unaligned_le64((__force u64)(val), __gu_p); \
- break; \
- default: \
- __bad_unaligned_access_size(); \
- break; \
- } \
- (void)0; })
-
-#define __put_unaligned_be(val, ptr) ({ \
- void *__gu_p = (ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- *(u8 *)__gu_p = (__force u8)(val); \
- break; \
- case 2: \
- put_unaligned_be16((__force u16)(val), __gu_p); \
- break; \
- case 4: \
- put_unaligned_be32((__force u32)(val), __gu_p); \
- break; \
- case 8: \
- put_unaligned_be64((__force u64)(val), __gu_p); \
- break; \
- default: \
- __bad_unaligned_access_size(); \
- break; \
- } \
- (void)0; })
-
-static inline u32 __get_unaligned_be24(const u8 *p)
-{
- return p[0] << 16 | p[1] << 8 | p[2];
-}
-
-static inline u32 get_unaligned_be24(const void *p)
-{
- return __get_unaligned_be24(p);
-}
-
-static inline u32 __get_unaligned_le24(const u8 *p)
-{
- return p[0] | p[1] << 8 | p[2] << 16;
-}
-
-static inline u32 get_unaligned_le24(const void *p)
-{
- return __get_unaligned_le24(p);
-}
-
-static inline void __put_unaligned_be24(const u32 val, u8 *p)
-{
- *p++ = val >> 16;
- *p++ = val >> 8;
- *p++ = val;
-}
-
-static inline void put_unaligned_be24(const u32 val, void *p)
-{
- __put_unaligned_be24(val, p);
-}
-
-static inline void __put_unaligned_le24(const u32 val, u8 *p)
-{
- *p++ = val;
- *p++ = val >> 8;
- *p++ = val >> 16;
-}
-
-static inline void put_unaligned_le24(const u32 val, void *p)
-{
- __put_unaligned_le24(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_GENERIC_H */
diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h
deleted file mode 100644
index 2248dcb0df76..000000000000
--- a/include/linux/unaligned/le_byteshift.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H
-#define _LINUX_UNALIGNED_LE_BYTESHIFT_H
-
-#include <linux/types.h>
-
-static inline u16 __get_unaligned_le16(const u8 *p)
-{
- return p[0] | p[1] << 8;
-}
-
-static inline u32 __get_unaligned_le32(const u8 *p)
-{
- return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
-}
-
-static inline u64 __get_unaligned_le64(const u8 *p)
-{
- return (u64)__get_unaligned_le32(p + 4) << 32 |
- __get_unaligned_le32(p);
-}
-
-static inline void __put_unaligned_le16(u16 val, u8 *p)
-{
- *p++ = val;
- *p++ = val >> 8;
-}
-
-static inline void __put_unaligned_le32(u32 val, u8 *p)
-{
- __put_unaligned_le16(val >> 16, p + 2);
- __put_unaligned_le16(val, p);
-}
-
-static inline void __put_unaligned_le64(u64 val, u8 *p)
-{
- __put_unaligned_le32(val >> 32, p + 4);
- __put_unaligned_le32(val, p);
-}
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- return __get_unaligned_le16(p);
-}
-
-static inline u32 get_unaligned_le32(const void *p)
-{
- return __get_unaligned_le32(p);
-}
-
-static inline u64 get_unaligned_le64(const void *p)
-{
- return __get_unaligned_le64(p);
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
- __put_unaligned_le16(val, p);
-}
-
-static inline void put_unaligned_le32(u32 val, void *p)
-{
- __put_unaligned_le32(val, p);
-}
-
-static inline void put_unaligned_le64(u64 val, void *p)
-{
- __put_unaligned_le64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */
diff --git a/include/linux/unaligned/le_memmove.h b/include/linux/unaligned/le_memmove.h
deleted file mode 100644
index 9202e864d026..000000000000
--- a/include/linux/unaligned/le_memmove.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H
-#define _LINUX_UNALIGNED_LE_MEMMOVE_H
-
-#include <linux/unaligned/memmove.h>
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- return __get_unaligned_memmove16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_le32(const void *p)
-{
- return __get_unaligned_memmove32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_le64(const void *p)
-{
- return __get_unaligned_memmove64((const u8 *)p);
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
- __put_unaligned_memmove16(val, p);
-}
-
-static inline void put_unaligned_le32(u32 val, void *p)
-{
- __put_unaligned_memmove32(val, p);
-}
-
-static inline void put_unaligned_le64(u64 val, void *p)
-{
- __put_unaligned_memmove64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
diff --git a/include/linux/unaligned/le_struct.h b/include/linux/unaligned/le_struct.h
deleted file mode 100644
index 9977987883a6..000000000000
--- a/include/linux/unaligned/le_struct.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_LE_STRUCT_H
-#define _LINUX_UNALIGNED_LE_STRUCT_H
-
-#include <linux/unaligned/packed_struct.h>
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- return __get_unaligned_cpu16((const u8 *)p);
-}
-
-static inline u32 get_unaligned_le32(const void *p)
-{
- return __get_unaligned_cpu32((const u8 *)p);
-}
-
-static inline u64 get_unaligned_le64(const void *p)
-{
- return __get_unaligned_cpu64((const u8 *)p);
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
- __put_unaligned_cpu16(val, p);
-}
-
-static inline void put_unaligned_le32(u32 val, void *p)
-{
- __put_unaligned_cpu32(val, p);
-}
-
-static inline void put_unaligned_le64(u64 val, void *p)
-{
- __put_unaligned_cpu64(val, p);
-}
-
-#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */
diff --git a/include/linux/unaligned/memmove.h b/include/linux/unaligned/memmove.h
deleted file mode 100644
index ac71b53bc6dc..000000000000
--- a/include/linux/unaligned/memmove.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_UNALIGNED_MEMMOVE_H
-#define _LINUX_UNALIGNED_MEMMOVE_H
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-
-/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
-
-static inline u16 __get_unaligned_memmove16(const void *p)
-{
- u16 tmp;
- memmove(&tmp, p, 2);
- return tmp;
-}
-
-static inline u32 __get_unaligned_memmove32(const void *p)
-{
- u32 tmp;
- memmove(&tmp, p, 4);
- return tmp;
-}
-
-static inline u64 __get_unaligned_memmove64(const void *p)
-{
- u64 tmp;
- memmove(&tmp, p, 8);
- return tmp;
-}
-
-static inline void __put_unaligned_memmove16(u16 val, void *p)
-{
- memmove(p, &val, 2);
-}
-
-static inline void __put_unaligned_memmove32(u32 val, void *p)
-{
- memmove(p, &val, 4);
-}
-
-static inline void __put_unaligned_memmove64(u64 val, void *p)
-{
- memmove(p, &val, 8);
-}
-
-#endif /* _LINUX_UNALIGNED_MEMMOVE_H */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index eaae24217e8a..7ccaa76a9a96 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -473,12 +473,6 @@ struct usb_dev_state;
struct usb_tt;
-enum usb_device_removable {
- USB_DEVICE_REMOVABLE_UNKNOWN = 0,
- USB_DEVICE_REMOVABLE,
- USB_DEVICE_FIXED,
-};
-
enum usb_port_connect_type {
USB_PORT_CONNECT_TYPE_UNKNOWN = 0,
USB_PORT_CONNECT_TYPE_HOT_PLUG,
@@ -703,7 +697,6 @@ struct usb_device {
#endif
struct wusb_dev *wusb_dev;
int slot_id;
- enum usb_device_removable removable;
struct usb2_lpm_parameters l1_params;
struct usb3_lpm_parameters u1_params;
struct usb3_lpm_parameters u2_params;
@@ -1485,7 +1478,7 @@ typedef void (*usb_complete_t)(struct urb *);
*
* Note that transfer_buffer must still be set if the controller
* does not support DMA (as indicated by hcd_uses_dma()) and when talking
- * to root hub. If you have to trasfer between highmem zone and the device
+ * to root hub. If you have to transfer between highmem zone and the device
* on such controller, create a bounce buffer or bail out with an error.
* If transfer_buffer cannot be set (is in highmem) and the controller is DMA
* capable, assign NULL to it, so that usbmon knows not to use the value.
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index c71150f2c639..9d2762279286 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -271,7 +271,7 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
* @bConfigurationValue: Copied into configuration descriptor.
* @iConfiguration: Copied into configuration descriptor.
* @bmAttributes: Copied into configuration descriptor.
- * @MaxPower: Power consumtion in mA. Used to compute bMaxPower in the
+ * @MaxPower: Power consumption in mA. Used to compute bMaxPower in the
* configuration descriptor after considering the bus speed.
* @cdev: assigned by @usb_add_config() before calling @bind(); this is
* the device associated with this configuration.
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index ee04ef214ce8..75c7538e350a 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -197,7 +197,7 @@ struct usb_ep_caps {
* @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk"
* @ops: Function pointers used to access hardware-specific operations.
* @ep_list:the gadget's ep_list holds all of its endpoints
- * @caps:The structure describing types and directions supported by endoint.
+ * @caps:The structure describing types and directions supported by endpoint.
* @enabled: The current endpoint enabled/disabled state.
* @claimed: True if this endpoint is claimed by a function.
* @maxpacket:The maximum packet size used on this endpoint. The initial
@@ -325,6 +325,7 @@ struct usb_gadget_ops {
void (*udc_set_speed)(struct usb_gadget *, enum usb_device_speed);
void (*udc_set_ssp_rate)(struct usb_gadget *gadget,
enum usb_ssp_rate rate);
+ void (*udc_async_callbacks)(struct usb_gadget *gadget, bool enable);
struct usb_ep *(*match_ep)(struct usb_gadget *,
struct usb_endpoint_descriptor *,
struct usb_ss_ep_comp_descriptor *);
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 96281cd50ff6..548a028f2dab 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -59,7 +59,7 @@
* USB Host Controller Driver (usb_hcd) framework
*
* Since "struct usb_bus" is so thin, you can't share much code in it.
- * This framework is a layer over that, and should be more sharable.
+ * This framework is a layer over that, and should be more shareable.
*/
/*-------------------------------------------------------------------------*/
@@ -299,7 +299,7 @@ struct hc_driver {
* (optional) these hooks allow an HCD to override the default DMA
* mapping and unmapping routines. In general, they shouldn't be
* necessary unless the host controller has special DMA requirements,
- * such as alignment contraints. If these are not specified, the
+ * such as alignment constraints. If these are not specified, the
* general usb_hcd_(un)?map_urb_for_dma functions will be used instead
* (and it may be a good idea to call these functions in your HCD
* implementation)
@@ -409,7 +409,10 @@ struct hc_driver {
int (*find_raw_port_number)(struct usb_hcd *, int);
/* Call for power on/off the port if necessary */
int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable);
-
+ /* Call for SINGLE_STEP_SET_FEATURE Test for USB2 EH certification */
+#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06
+ int (*submit_single_step_set_feature)(struct usb_hcd *,
+ struct urb *, int);
};
static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -474,6 +477,14 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
struct platform_device;
extern void usb_hcd_platform_shutdown(struct platform_device *dev);
+#ifdef CONFIG_USB_HCD_TEST_MODE
+extern int ehset_single_step_set_feature(struct usb_hcd *hcd, int port);
+#else
+static inline int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+ return 0;
+}
+#endif /* CONFIG_USB_HCD_TEST_MODE */
#ifdef CONFIG_USB_PCI
struct pci_dev;
diff --git a/include/linux/usb/isp1760.h b/include/linux/usb/isp1760.h
deleted file mode 100644
index b75ded28db81..000000000000
--- a/include/linux/usb/isp1760.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * board initialization should put one of these into dev->platform_data
- * and place the isp1760 onto platform_bus named "isp1760-hcd".
- */
-
-#ifndef __LINUX_USB_ISP1760_H
-#define __LINUX_USB_ISP1760_H
-
-struct isp1760_platform_data {
- unsigned is_isp1761:1; /* Chip is ISP1761 */
- unsigned bus_width_16:1; /* 16/32-bit data bus width */
- unsigned port1_otg:1; /* Port 1 supports OTG */
- unsigned analog_oc:1; /* Analog overcurrent */
- unsigned dack_polarity_high:1; /* DACK active high */
- unsigned dreq_polarity_high:1; /* DREQ active high */
-};
-
-#endif /* __LINUX_USB_ISP1760_H */
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index e78eb577d0fa..784659d4dc99 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -98,7 +98,7 @@ enum otg_fsm_timer {
* @b_bus_req: TRUE during the time that the Application running on the
* B-device wants to use the bus
*
- * Auxilary inputs (OTG v1.3 only. Obsolete now.)
+ * Auxiliary inputs (OTG v1.3 only. Obsolete now.)
* @a_sess_vld: TRUE if the A-device detects that VBUS is above VA_SESS_VLD
* @b_bus_suspend: TRUE when the A-device detects that the B-device has put
* the bus into suspend
@@ -153,7 +153,7 @@ struct otg_fsm {
int a_bus_req;
int b_bus_req;
- /* Auxilary inputs */
+ /* Auxiliary inputs */
int a_sess_vld;
int b_bus_resume;
int b_bus_suspend;
@@ -177,7 +177,7 @@ struct otg_fsm {
int a_bus_req_inf;
int a_clr_err_inf;
int b_bus_req_inf;
- /* Auxilary informative variables */
+ /* Auxiliary informative variables */
int a_suspend_req_inf;
/* Timeout indicator for timers */
@@ -196,6 +196,7 @@ struct otg_fsm {
struct mutex lock;
u8 *host_req_flag;
struct delayed_work hnp_polling_work;
+ bool hnp_work_inited;
bool state_changed;
};
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 69f1b6328532..7ceeecbb9e02 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -125,7 +125,7 @@ enum usb_dr_mode {
* @dev: Pointer to the given device
*
* The function gets phy interface string from property 'dr_mode',
- * and returns the correspondig enum usb_dr_mode
+ * and returns the corresponding enum usb_dr_mode
*/
extern enum usb_dr_mode usb_get_dr_mode(struct device *dev);
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 5e4c497f54d6..eeb7c2157c72 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -32,7 +32,7 @@
#define USB_QUIRK_DELAY_INIT BIT(6)
/*
- * For high speed and super speed interupt endpoints, the USB 2.0 and
+ * For high speed and super speed interrupt endpoints, the USB 2.0 and
* USB 3.0 spec require the interval in microframes
* (1 microframe = 125 microseconds) to be calculated as
* interval = 2 ^ (bInterval-1).
diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h
index 0164fed31b06..031f148ab373 100644
--- a/include/linux/usb/role.h
+++ b/include/linux/usb/role.h
@@ -65,6 +65,7 @@ void usb_role_switch_unregister(struct usb_role_switch *sw);
void usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data);
void *usb_role_switch_get_drvdata(struct usb_role_switch *sw);
+const char *usb_role_string(enum usb_role role);
#else
static inline int usb_role_switch_set_role(struct usb_role_switch *sw,
enum usb_role role)
@@ -109,6 +110,11 @@ static inline void *usb_role_switch_get_drvdata(struct usb_role_switch *sw)
return NULL;
}
+static inline const char *usb_role_string(enum usb_role role)
+{
+ return "unknown";
+}
+
#endif
#endif /* __LINUX_USB_ROLE_H */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 8c63fa9bfc74..16ea5a4cc586 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -276,7 +276,7 @@ struct usb_serial_driver {
int (*write)(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
/* Called only by the tty layer */
- int (*write_room)(struct tty_struct *tty);
+ unsigned int (*write_room)(struct tty_struct *tty);
int (*ioctl)(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
void (*get_serial)(struct tty_struct *tty, struct serial_struct *ss);
@@ -284,7 +284,7 @@ struct usb_serial_driver {
void (*set_termios)(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
void (*break_ctl)(struct tty_struct *tty, int break_state);
- int (*chars_in_buffer)(struct tty_struct *tty);
+ unsigned int (*chars_in_buffer)(struct tty_struct *tty);
void (*wait_until_sent)(struct tty_struct *tty, long timeout);
bool (*tx_empty)(struct usb_serial_port *port);
void (*throttle)(struct tty_struct *tty);
@@ -347,8 +347,8 @@ int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *por
const unsigned char *buf, int count);
void usb_serial_generic_close(struct usb_serial_port *port);
int usb_serial_generic_resume(struct usb_serial *serial);
-int usb_serial_generic_write_room(struct tty_struct *tty);
-int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
+unsigned int usb_serial_generic_write_room(struct tty_struct *tty);
+unsigned int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout);
void usb_serial_generic_read_bulk_callback(struct urb *urb);
void usb_serial_generic_write_bulk_callback(struct urb *urb);
@@ -395,7 +395,7 @@ static inline void usb_serial_debug_data(struct device *dev,
}
/*
- * Macro for reporting errors in write path to avoid inifinite loop
+ * Macro for reporting errors in write path to avoid infinite loop
* when port is used as a console.
*/
#define dev_err_console(usport, fmt, ...) \
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 42fcfbe10590..bffc8d3e14ad 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -66,6 +66,8 @@ enum tcpm_transmit_type {
* For example, some tcpcs may include BC1.2 charger detection
* and use that in this case.
* @set_cc: Called to set value of CC pins
+ * @apply_rc: Optional; Needed to move TCPCI based chipset to APPLY_RC state
+ * as stated by the TCPCI specification.
* @get_cc: Called to read current CC pin values
* @set_polarity:
* Called to set polarity
@@ -120,6 +122,8 @@ struct tcpc_dev {
int (*get_vbus)(struct tcpc_dev *dev);
int (*get_current_limit)(struct tcpc_dev *dev);
int (*set_cc)(struct tcpc_dev *dev, enum typec_cc_status cc);
+ int (*apply_rc)(struct tcpc_dev *dev, enum typec_cc_status cc,
+ enum typec_cc_polarity polarity);
int (*get_cc)(struct tcpc_dev *dev, enum typec_cc_status *cc1,
enum typec_cc_status *cc2);
int (*set_polarity)(struct tcpc_dev *dev,
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index fc4c7edb2e8a..cfb916cccd31 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -97,7 +97,7 @@ enum {
#define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8
#define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8)
-/* Helper for setting/getting the pin assignement value to the configuration */
+/* Helper for setting/getting the pin assignment value to the configuration */
#define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8)
#define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8)
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 794d1538b8ba..331d2ccf0bcc 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -53,6 +53,11 @@ enum mcopy_atomic_mode {
MCOPY_ATOMIC_CONTINUE,
};
+extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr, struct page *page,
+ bool newly_allocated, bool wp_copy);
+
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len,
bool *mmap_changing, __u64 mode);
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index f311d227aa1b..8cfe49d201dd 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -28,13 +28,34 @@ struct vdpa_notification_area {
};
/**
- * struct vdpa_vq_state - vDPA vq_state definition
+ * struct vdpa_vq_state_split - vDPA split virtqueue state
* @avail_index: available index
*/
-struct vdpa_vq_state {
+struct vdpa_vq_state_split {
u16 avail_index;
};
+/**
+ * struct vdpa_vq_state_packed - vDPA packed virtqueue state
+ * @last_avail_counter: last driver ring wrap counter observed by device
+ * @last_avail_idx: device available index
+ * @last_used_counter: device ring wrap counter
+ * @last_used_idx: used index
+ */
+struct vdpa_vq_state_packed {
+ u16 last_avail_counter:1;
+ u16 last_avail_idx:15;
+ u16 last_used_counter:1;
+ u16 last_used_idx:15;
+};
+
+struct vdpa_vq_state {
+ union {
+ struct vdpa_vq_state_split split;
+ struct vdpa_vq_state_packed packed;
+ };
+};
+
struct vdpa_mgmt_dev;
/**
@@ -256,6 +277,17 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
size_t size, const char *name);
+/**
+ * vdpa_alloc_device - allocate and initilaize a vDPA device
+ *
+ * @dev_struct: the type of the parent structure
+ * @member: the name of struct vdpa_device within the @dev_struct
+ * @parent: the parent device
+ * @config: the bus operations that is supported by this device
+ * @name: name of the vdpa device
+ *
+ * Return allocated data structure or ERR_PTR upon error
+ */
#define vdpa_alloc_device(dev_struct, member, parent, config, name) \
container_of(__vdpa_alloc_device( \
parent, config, \
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index b1894e0323fa..41edbc01ffa4 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -110,6 +110,7 @@ struct virtio_device {
bool config_enabled;
bool config_change_pending;
spinlock_t config_lock;
+ spinlock_t vqs_list_lock; /* Protects VQs list access */
struct device dev;
struct virtio_device_id id;
const struct virtio_config_ops *config;
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
index 6a95b58fd0f4..eb2bd9b4077d 100644
--- a/include/linux/virtio_pci_modern.h
+++ b/include/linux/virtio_pci_modern.h
@@ -79,6 +79,7 @@ static inline void vp_iowrite64_twopart(u64 val,
}
u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
+u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev);
void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
u64 features);
u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index bfaaf0b6fa76..2644425b6dce 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -29,7 +29,7 @@ struct notifier_block; /* in notifier.h */
#define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */
/*
- * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
+ * VM_KASAN is used slightly differently depending on CONFIG_KASAN_VMALLOC.
*
* If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
* shadow memory has been mapped. It's used to handle allocation errors so that
@@ -104,6 +104,21 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
}
#endif
+#ifndef arch_vmap_pte_range_map_size
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+ u64 pfn, unsigned int max_page_shift)
+{
+ return PAGE_SIZE;
+}
+#endif
+
+#ifndef arch_vmap_pte_supported_shift
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ return PAGE_SHIFT;
+}
+#endif
+
/*
* Highlevel APIs for driver use
*/
@@ -232,7 +247,7 @@ static inline void set_vm_flush_reset_perms(void *addr)
extern long vread(char *buf, char *addr, unsigned long count);
/*
- * Internals. Dont't use..
+ * Internals. Don't use..
*/
extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm);
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index 84db7b8f912f..212892cf9822 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -14,6 +14,7 @@
#include <linux/virtio_byteorder.h>
#include <linux/uio.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
#include <linux/dma-direction.h>
#include <linux/vhost_iotlb.h>
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 94e7a315479c..0da94a6dee15 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -166,7 +166,6 @@ int vt_get_kbd_mode_bit(int console, int bit);
void vt_set_kbd_mode_bit(int console, int bit);
void vt_clr_kbd_mode_bit(int console, int bit);
void vt_set_led_state(int console, int leds);
-void vt_set_led_state(int console, int leds);
void vt_kbd_con_start(int console);
void vt_kbd_con_stop(int console);
diff --git a/include/linux/zbud.h b/include/linux/zbud.h
deleted file mode 100644
index b1eaf6e31735..000000000000
--- a/include/linux/zbud.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ZBUD_H_
-#define _ZBUD_H_
-
-#include <linux/types.h>
-
-struct zbud_pool;
-
-struct zbud_ops {
- int (*evict)(struct zbud_pool *pool, unsigned long handle);
-};
-
-struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops);
-void zbud_destroy_pool(struct zbud_pool *pool);
-int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
- unsigned long *handle);
-void zbud_free(struct zbud_pool *pool, unsigned long handle);
-int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries);
-void *zbud_map(struct zbud_pool *pool, unsigned long handle);
-void zbud_unmap(struct zbud_pool *pool, unsigned long handle);
-u64 zbud_get_pool_size(struct zbud_pool *pool);
-
-#endif /* _ZBUD_H_ */
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index 143568d64b20..4b57bbba588a 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -338,7 +338,7 @@ do { \
FP_SET_EXCEPTION(FP_EX_INVALID | FP_EX_INVALID_ISI); \
break; \
} \
- /* FALLTHRU */ \
+ fallthrough; \
\
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
index 14cfd036268a..e3e770f76f34 100644
--- a/include/memory/renesas-rpc-if.h
+++ b/include/memory/renesas-rpc-if.h
@@ -19,7 +19,7 @@ enum rpcif_data_dir {
RPCIF_DATA_OUT,
};
-struct rpcif_op {
+struct rpcif_op {
struct {
u8 buswidth;
u8 opcode;
@@ -57,7 +57,7 @@ struct rpcif_op {
} data;
};
-struct rpcif {
+struct rpcif {
struct device *dev;
void __iomem *dirmap;
struct regmap *regmap;
@@ -76,7 +76,7 @@ struct rpcif {
u32 ddr; /* DRDRENR or SMDRENR */
};
-int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
+int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
void rpcif_hw_init(struct rpcif *rpc, bool hyperflash);
void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
size_t *len);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index a53e94459ecd..db4312e44d47 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1230,6 +1230,7 @@ struct hci_dev *hci_alloc_dev(void);
void hci_free_dev(struct hci_dev *hdev);
int hci_register_dev(struct hci_dev *hdev);
void hci_unregister_dev(struct hci_dev *hdev);
+void hci_cleanup_dev(struct hci_dev *hdev);
int hci_suspend_dev(struct hci_dev *hdev);
int hci_resume_dev(struct hci_dev *hdev);
int hci_reset_dev(struct hci_dev *hdev);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 15335732e166..625d9c72dee3 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -201,6 +201,11 @@ struct bond_up_slave {
*/
#define BOND_LINK_NOCHANGE -1
+struct bond_ipsec {
+ struct list_head list;
+ struct xfrm_state *xs;
+};
+
/*
* Here are the locking policies for the two bonding locks:
* Get rcu_read_lock when reading or RTNL when writing slave list.
@@ -249,7 +254,9 @@ struct bonding {
#endif /* CONFIG_DEBUG_FS */
struct rtnl_link_stats64 bond_stats;
#ifdef CONFIG_XFRM_OFFLOAD
- struct xfrm_state *xs;
+ struct list_head ipsec_list;
+ /* protecting ipsec_list */
+ spinlock_t ipsec_lock;
#endif /* CONFIG_XFRM_OFFLOAD */
};
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 73af4a64a599..40296ed976a9 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -38,7 +38,7 @@ static inline bool net_busy_loop_on(void)
static inline bool sk_can_busy_loop(const struct sock *sk)
{
- return sk->sk_ll_usec && !signal_pending(current);
+ return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
}
bool sk_busy_loop_end(void *p, unsigned long start_time);
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
deleted file mode 100644
index 552cf68d28d2..000000000000
--- a/include/net/caif/caif_hsi.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson / daniel.martensson@stericsson.com
- * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
- */
-
-#ifndef CAIF_HSI_H_
-#define CAIF_HSI_H_
-
-#include <net/caif/caif_layer.h>
-#include <net/caif/caif_device.h>
-#include <linux/atomic.h>
-
-/*
- * Maximum number of CAIF frames that can reside in the same HSI frame.
- */
-#define CFHSI_MAX_PKTS 15
-
-/*
- * Maximum number of bytes used for the frame that can be embedded in the
- * HSI descriptor.
- */
-#define CFHSI_MAX_EMB_FRM_SZ 96
-
-/*
- * Decides if HSI buffers should be prefilled with 0xFF pattern for easier
- * debugging. Both TX and RX buffers will be filled before the transfer.
- */
-#define CFHSI_DBG_PREFILL 0
-
-/* Structure describing a HSI packet descriptor. */
-#pragma pack(1) /* Byte alignment. */
-struct cfhsi_desc {
- u8 header;
- u8 offset;
- u16 cffrm_len[CFHSI_MAX_PKTS];
- u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ];
-};
-#pragma pack() /* Default alignment. */
-
-/* Size of the complete HSI packet descriptor. */
-#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc))
-
-/*
- * Size of the complete HSI packet descriptor excluding the optional embedded
- * CAIF frame.
- */
-#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ)
-
-/*
- * Maximum bytes transferred in one transfer.
- */
-#define CFHSI_MAX_CAIF_FRAME_SZ 4096
-
-#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ)
-
-/* Size of the complete HSI TX buffer. */
-#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ)
-
-/* Size of the complete HSI RX buffer. */
-#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ)
-
-/* Bitmasks for the HSI descriptor. */
-#define CFHSI_PIGGY_DESC (0x01 << 7)
-
-#define CFHSI_TX_STATE_IDLE 0
-#define CFHSI_TX_STATE_XFER 1
-
-#define CFHSI_RX_STATE_DESC 0
-#define CFHSI_RX_STATE_PAYLOAD 1
-
-/* Bitmasks for power management. */
-#define CFHSI_WAKE_UP 0
-#define CFHSI_WAKE_UP_ACK 1
-#define CFHSI_WAKE_DOWN_ACK 2
-#define CFHSI_AWAKE 3
-#define CFHSI_WAKELOCK_HELD 4
-#define CFHSI_SHUTDOWN 5
-#define CFHSI_FLUSH_FIFO 6
-
-#ifndef CFHSI_INACTIVITY_TOUT
-#define CFHSI_INACTIVITY_TOUT (1 * HZ)
-#endif /* CFHSI_INACTIVITY_TOUT */
-
-#ifndef CFHSI_WAKE_TOUT
-#define CFHSI_WAKE_TOUT (3 * HZ)
-#endif /* CFHSI_WAKE_TOUT */
-
-#ifndef CFHSI_MAX_RX_RETRIES
-#define CFHSI_MAX_RX_RETRIES (10 * HZ)
-#endif
-
-/* Structure implemented by the CAIF HSI driver. */
-struct cfhsi_cb_ops {
- void (*tx_done_cb) (struct cfhsi_cb_ops *drv);
- void (*rx_done_cb) (struct cfhsi_cb_ops *drv);
- void (*wake_up_cb) (struct cfhsi_cb_ops *drv);
- void (*wake_down_cb) (struct cfhsi_cb_ops *drv);
-};
-
-/* Structure implemented by HSI device. */
-struct cfhsi_ops {
- int (*cfhsi_up) (struct cfhsi_ops *dev);
- int (*cfhsi_down) (struct cfhsi_ops *dev);
- int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev);
- int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev);
- int (*cfhsi_wake_up) (struct cfhsi_ops *dev);
- int (*cfhsi_wake_down) (struct cfhsi_ops *dev);
- int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status);
- int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy);
- int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev);
- struct cfhsi_cb_ops *cb_ops;
-};
-
-/* Structure holds status of received CAIF frames processing */
-struct cfhsi_rx_state {
- int state;
- int nfrms;
- int pld_len;
- int retries;
- bool piggy_desc;
-};
-
-/* Priority mapping */
-enum {
- CFHSI_PRIO_CTL = 0,
- CFHSI_PRIO_VI,
- CFHSI_PRIO_VO,
- CFHSI_PRIO_BEBK,
- CFHSI_PRIO_LAST,
-};
-
-struct cfhsi_config {
- u32 inactivity_timeout;
- u32 aggregation_timeout;
- u32 head_align;
- u32 tail_align;
- u32 q_high_mark;
- u32 q_low_mark;
-};
-
-/* Structure implemented by CAIF HSI drivers. */
-struct cfhsi {
- struct caif_dev_common cfdev;
- struct net_device *ndev;
- struct platform_device *pdev;
- struct sk_buff_head qhead[CFHSI_PRIO_LAST];
- struct cfhsi_cb_ops cb_ops;
- struct cfhsi_ops *ops;
- int tx_state;
- struct cfhsi_rx_state rx_state;
- struct cfhsi_config cfg;
- int rx_len;
- u8 *rx_ptr;
- u8 *tx_buf;
- u8 *rx_buf;
- u8 *rx_flip_buf;
- spinlock_t lock;
- int flow_off_sent;
- struct list_head list;
- struct work_struct wake_up_work;
- struct work_struct wake_down_work;
- struct work_struct out_of_sync_work;
- struct workqueue_struct *wq;
- wait_queue_head_t wake_up_wait;
- wait_queue_head_t wake_down_wait;
- wait_queue_head_t flush_fifo_wait;
- struct timer_list inactivity_timer;
- struct timer_list rx_slowpath_timer;
-
- /* TX aggregation */
- int aggregation_len;
- struct timer_list aggregation_timer;
-
- unsigned long bits;
-};
-extern struct platform_driver cfhsi_driver;
-
-/**
- * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters.
- * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before
- * taking the HSI wakeline down, in milliseconds.
- * When using RT Netlink to create, destroy or configure a CAIF HSI interface,
- * enum ifla_caif_hsi is used to specify the configuration attributes.
- */
-enum ifla_caif_hsi {
- __IFLA_CAIF_HSI_UNSPEC,
- __IFLA_CAIF_HSI_INACTIVITY_TOUT,
- __IFLA_CAIF_HSI_AGGREGATION_TOUT,
- __IFLA_CAIF_HSI_HEAD_ALIGN,
- __IFLA_CAIF_HSI_TAIL_ALIGN,
- __IFLA_CAIF_HSI_QHIGH_WATERMARK,
- __IFLA_CAIF_HSI_QLOW_WATERMARK,
- __IFLA_CAIF_HSI_MAX
-};
-
-struct cfhsi_ops *cfhsi_get_ops(void);
-
-#endif /* CAIF_HSI_H_ */
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 0d05b9e8690b..5b96d5bd6e54 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -80,16 +80,18 @@ static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
return csum16_add(csum, ~addend);
}
-static inline __wsum
-csum_block_add(__wsum csum, __wsum csum2, int offset)
+static inline __wsum csum_shift(__wsum sum, int offset)
{
- u32 sum = (__force u32)csum2;
-
/* rotate sum to align it with a 16b boundary */
if (offset & 1)
- sum = ror32(sum, 8);
+ return (__force __wsum)ror32((__force u32)sum, 8);
+ return sum;
+}
- return csum_add(csum, (__force __wsum)sum);
+static inline __wsum
+csum_block_add(__wsum csum, __wsum csum2, int offset)
+{
+ return csum_add(csum, csum_shift(csum2, offset));
}
static inline __wsum
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 56cb3c38569a..14efa0ded75d 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -45,7 +45,9 @@ skb_tunnel_info(const struct sk_buff *skb)
return &md_dst->u.tun_info;
dst = skb_dst(skb);
- if (dst && dst->lwtstate)
+ if (dst && dst->lwtstate &&
+ (dst->lwtstate->type == LWTUNNEL_ENCAP_IP ||
+ dst->lwtstate->type == LWTUNNEL_ENCAP_IP6))
return lwt_tun_info(dst->lwtstate);
return NULL;
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 69c9eabf8325..1b9d75aedb22 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -293,7 +293,7 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
}
/**
- * flow_action_has_one_action() - check if exactly one action is present
+ * flow_offload_has_one_action() - check if exactly one action is present
* @action: tc filter flow offload action
*
* Returns true if exactly one action is present.
@@ -319,14 +319,12 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action,
if (flow_offload_has_one_action(action))
return true;
- if (action) {
- flow_action_for_each(i, action_entry, action) {
- if (i && action_entry->hw_stats != last_hw_stats) {
- NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
- return false;
- }
- last_hw_stats = action_entry->hw_stats;
+ flow_action_for_each(i, action_entry, action) {
+ if (i && action_entry->hw_stats != last_hw_stats) {
+ NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
+ return false;
}
+ last_hw_stats = action_entry->hw_stats;
}
return true;
}
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f14149df5a65..0bf09a9bca4e 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -263,9 +263,9 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
-static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
+static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb)
{
- int mtu;
+ unsigned int mtu;
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index c0f0a13ed818..49aa79c7b278 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -15,9 +15,11 @@
#include <linux/if_ether.h>
/* Lengths of frame formats */
-#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
-#define LLC_PDU_LEN_S 4
-#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
+#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
+#define LLC_PDU_LEN_S 4
+#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
+/* header and 1 control byte and XID info */
+#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
/* Known SAP addresses */
#define LLC_GLOBAL_SAP 0xFF
#define LLC_NULL_SAP 0x00 /* not network-layer visible */
@@ -50,9 +52,10 @@
#define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */
#define LLC_PDU_TYPE_MASK 0x03
-#define LLC_PDU_TYPE_I 0 /* first bit */
-#define LLC_PDU_TYPE_S 1 /* first two bits */
-#define LLC_PDU_TYPE_U 3 /* first two bits */
+#define LLC_PDU_TYPE_I 0 /* first bit */
+#define LLC_PDU_TYPE_S 1 /* first two bits */
+#define LLC_PDU_TYPE_U 3 /* first two bits */
+#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */
#define LLC_PDU_TYPE_IS_I(pdu) \
((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
@@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
u8 ssap, u8 dsap, u8 cr)
{
- const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
+ int hlen = 4; /* default value for I and S types */
struct llc_pdu_un *pdu;
+ switch (type) {
+ case LLC_PDU_TYPE_U:
+ hlen = 3;
+ break;
+ case LLC_PDU_TYPE_U_XID:
+ hlen = 6;
+ break;
+ }
+
skb_push(skb, hlen);
skb_reset_network_header(skb);
pdu = llc_pdu_un_hdr(skb);
@@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */
xid_info->type = svcs_supported;
xid_info->rw = rx_window << 1; /* size of receive window */
- skb_put(skb, sizeof(struct llc_xid_info));
+
+ /* no need to push/put since llc_pdu_header_init() has already
+ * pushed 3 + 3 bytes
+ */
}
/**
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index cb580b06152f..8b5af683a818 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -105,7 +105,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
unsigned int *size, unsigned int remaining,
struct mptcp_out_options *opts);
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
+bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
struct mptcp_out_options *opts);
@@ -227,9 +227,10 @@ static inline bool mptcp_established_options(struct sock *sk,
return false;
}
-static inline void mptcp_incoming_options(struct sock *sk,
+static inline bool mptcp_incoming_options(struct sock *sk,
struct sk_buff *skb)
{
+ return true;
}
static inline void mptcp_skb_ext_move(struct sk_buff *to,
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 09f2efea0b97..13807ea94cd2 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -30,7 +30,6 @@ void nf_conntrack_cleanup_net(struct net *net);
void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
void nf_conntrack_proto_pernet_init(struct net *net);
-void nf_conntrack_proto_pernet_fini(struct net *net);
int nf_conntrack_proto_init(void);
void nf_conntrack_proto_fini(void);
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index c3094b83a525..fefd38db95b3 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -27,9 +27,9 @@ struct nf_tcp_net {
u8 tcp_loose;
u8 tcp_be_liberal;
u8 tcp_max_retrans;
+ u8 tcp_ignore_invalid_rst;
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
unsigned int offload_timeout;
- unsigned int offload_pickup;
#endif
};
@@ -43,7 +43,6 @@ struct nf_udp_net {
unsigned int timeouts[UDP_CT_MAX];
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
unsigned int offload_timeout;
- unsigned int offload_pickup;
#endif
};
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index e946366e8ba5..1f4e1816fd36 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -75,6 +75,7 @@ struct netns_xfrm {
#endif
spinlock_t xfrm_state_lock;
seqcount_spinlock_t xfrm_state_hash_generation;
+ seqcount_spinlock_t xfrm_policy_hash_generation;
spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index ec7823921bd2..298a8d10168b 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -337,6 +337,9 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
/**
* struct tcf_pkt_info - packet information
+ *
+ * @ptr: start of the pkt data
+ * @nexthdr: offset of the next header
*/
struct tcf_pkt_info {
unsigned char * ptr;
@@ -355,6 +358,7 @@ struct tcf_ematch_ops;
* @ops: the operations lookup table of the corresponding ematch module
* @datalen: length of the ematch specific configuration data
* @data: ematch specific data
+ * @net: the network namespace
*/
struct tcf_ematch {
struct tcf_ematch_ops * ops;
diff --git a/include/net/psample.h b/include/net/psample.h
index e328c5127757..0509d2d6be67 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -31,6 +31,8 @@ struct psample_group *psample_group_get(struct net *net, u32 group_num);
void psample_group_take(struct psample_group *group);
void psample_group_put(struct psample_group *group);
+struct sk_buff;
+
#if IS_ENABLED(CONFIG_PSAMPLE)
void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 265fffa33dad..5859e0a16a58 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -360,8 +360,7 @@ enum {
#define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
- * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
- * 192.88.99.0/24.
+ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
* Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
* addresses.
*/
@@ -369,7 +368,6 @@ enum {
((htonl(INADDR_BROADCAST) == a) || \
ipv4_is_multicast(a) || \
ipv4_is_zeronet(a) || \
- ipv4_is_test_198(a) || \
ipv4_is_anycast_6to4(a))
/* Flags used for the bind address copy functions. */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 32fc4a309df5..651bba654d77 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -984,6 +984,7 @@ struct sctp_transport {
} cacc;
struct {
+ __u32 last_rtx_chunks;
__u16 pmtu;
__u16 probe_size;
__u16 probe_high;
@@ -1024,8 +1025,8 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
void sctp_transport_immediate_rtx(struct sctp_transport *);
void sctp_transport_dst_release(struct sctp_transport *t);
void sctp_transport_dst_confirm(struct sctp_transport *t);
-void sctp_transport_pl_send(struct sctp_transport *t);
-void sctp_transport_pl_recv(struct sctp_transport *t);
+bool sctp_transport_pl_send(struct sctp_transport *t);
+bool sctp_transport_pl_recv(struct sctp_transport *t);
/* This is the structure we use to queue packets as they come into
diff --git a/include/net/sock.h b/include/net/sock.h
index 8bdd80027ffb..f23cb259b0e2 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -316,7 +316,9 @@ struct bpf_local_storage;
* @sk_timer: sock cleanup timer
* @sk_stamp: time stamp of last packet received
* @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
- * @sk_tsflags: SO_TIMESTAMPING socket options
+ * @sk_tsflags: SO_TIMESTAMPING flags
+ * @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock
+ * for timestamping
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
* @sk_socket: Identd and reporting IO signals
@@ -493,6 +495,7 @@ struct sock {
seqlock_t sk_stamp_seq;
#endif
u16 sk_tsflags;
+ int sk_bind_phc;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
@@ -2755,7 +2758,8 @@ void sock_def_readable(struct sock *sk);
int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
-int sock_set_timestamping(struct sock *sk, int optname, int val);
+int sock_set_timestamping(struct sock *sk, int optname,
+ struct so_timestamping timestamping);
void sock_enable_timestamps(struct sock *sk);
void sock_no_linger(struct sock *sk);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e668f1bf780d..784d5c3ef1c5 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -686,6 +686,10 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{
+ /* mptcp hooks are only on the slow path */
+ if (sk_is_mptcp((struct sock *)tp))
+ return;
+
tp->pred_flags = htonl((tp->tcp_header_len << 26) |
ntohl(TCP_FLAG_ACK) |
snd_wnd);
@@ -1705,7 +1709,6 @@ struct tcp_fastopen_context {
struct rcu_head rcu;
};
-extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
void tcp_fastopen_active_disable(struct sock *sk);
bool tcp_fastopen_active_should_disable(struct sock *sk);
void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h
index 9e273fed0a85..00191695233a 100644
--- a/include/scsi/fc/fc_ms.h
+++ b/include/scsi/fc/fc_ms.h
@@ -25,6 +25,12 @@
#define FC_FDMI_SUBTYPE 0x10 /* fs_ct_hdr.ct_fs_subtype */
/*
+ * Management server FDMI specifications.
+ */
+#define FDMI_V1 1 /* FDMI version 1 specifications */
+#define FDMI_V2 2 /* FDMI version 2 specifications */
+
+/*
* Management server FDMI Requests.
*/
enum fc_fdmi_req {
@@ -57,22 +63,36 @@ enum fc_fdmi_hba_attr_type {
FC_FDMI_HBA_ATTR_FIRMWAREVERSION = 0x0009,
FC_FDMI_HBA_ATTR_OSNAMEVERSION = 0x000A,
FC_FDMI_HBA_ATTR_MAXCTPAYLOAD = 0x000B,
+ FC_FDMI_HBA_ATTR_NODESYMBLNAME = 0x000C,
+ FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO = 0x000D,
+ FC_FDMI_HBA_ATTR_NUMBEROFPORTS = 0x000E,
+ FC_FDMI_HBA_ATTR_FABRICNAME = 0x000F,
+ FC_FDMI_HBA_ATTR_BIOSVERSION = 0x0010,
+ FC_FDMI_HBA_ATTR_BIOSSTATE = 0x0011,
+ FC_FDMI_HBA_ATTR_VENDORIDENTIFIER = 0x00E0,
};
/*
* HBA Attribute Length
*/
#define FC_FDMI_HBA_ATTR_NODENAME_LEN 8
-#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 80
-#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 80
-#define FC_FDMI_HBA_ATTR_MODEL_LEN 256
-#define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 256
-#define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 256
-#define FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN 256
-#define FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN 256
-#define FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN 256
-#define FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN 256
+#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 64
+#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 64
+#define FC_FDMI_HBA_ATTR_MODEL_LEN 64
+#define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 64
+#define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN 128
#define FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN 4
+#define FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN 64
+#define FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN 4
+#define FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN 4
+#define FC_FDMI_HBA_ATTR_FABRICNAME_LEN 8
+#define FC_FDMI_HBA_ATTR_BIOSVERSION_LEN 64
+#define FC_FDMI_HBA_ATTR_BIOSSTATE_LEN 4
+#define FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN 8
/*
* Port Attribute Type
@@ -84,6 +104,16 @@ enum fc_fdmi_port_attr_type {
FC_FDMI_PORT_ATTR_MAXFRAMESIZE = 0x0004,
FC_FDMI_PORT_ATTR_OSDEVICENAME = 0x0005,
FC_FDMI_PORT_ATTR_HOSTNAME = 0x0006,
+ FC_FDMI_PORT_ATTR_NODENAME = 0x0007,
+ FC_FDMI_PORT_ATTR_PORTNAME = 0x0008,
+ FC_FDMI_PORT_ATTR_SYMBOLICNAME = 0x0009,
+ FC_FDMI_PORT_ATTR_PORTTYPE = 0x000A,
+ FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC = 0x000B,
+ FC_FDMI_PORT_ATTR_FABRICNAME = 0x000C,
+ FC_FDMI_PORT_ATTR_CURRENTFC4TYPE = 0x000D,
+ FC_FDMI_PORT_ATTR_PORTSTATE = 0x101,
+ FC_FDMI_PORT_ATTR_DISCOVEREDPORTS = 0x102,
+ FC_FDMI_PORT_ATTR_PORTID = 0x103,
};
/*
@@ -95,6 +125,17 @@ enum fc_fdmi_port_attr_type {
#define FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN 4
#define FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN 256
#define FC_FDMI_PORT_ATTR_HOSTNAME_LEN 256
+#define FC_FDMI_PORT_ATTR_NODENAME_LEN 8
+#define FC_FDMI_PORT_ATTR_PORTNAME_LEN 8
+#define FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN 256
+#define FC_FDMI_PORT_ATTR_PORTTYPE_LEN 4
+#define FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN 4
+#define FC_FDMI_PORT_ATTR_FABRICNAME_LEN 8
+#define FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN 32
+#define FC_FDMI_PORT_ATTR_PORTSTATE_LEN 4
+#define FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN 4
+#define FC_FDMI_PORT_ATTR_PORTID_LEN 4
+
/*
* HBA Attribute ID
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index b71b5c4f418c..7b192d88f186 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -52,7 +52,7 @@ static inline int iscsi_sna_gte(u32 n1, u32 n2)
}
/*
- * useful common(control and data pathes) macro
+ * useful common(control and data paths) macro
*/
#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
#define hton24(p, v) { \
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 9b87e1a1c646..eeb8d689ff6b 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -399,7 +399,7 @@ struct fc_seq {
* @sid: Source FCID
* @did: Destination FCID
* @esb_stat: ESB exchange status
- * @r_a_tov: Resouce allocation time out value (in msecs)
+ * @r_a_tov: Resource allocation time out value (in msecs)
* @seq_id: The next sequence ID to use
* @encaps: encapsulation information for lower-level driver
* @f_ctl: F_CTL flags for the sequence
@@ -668,7 +668,7 @@ enum fc_lport_event {
* @wwnn: World Wide Node Name
* @service_params: Common service parameters
* @e_d_tov: Error detection timeout value
- * @r_a_tov: Resouce allocation timeout value
+ * @r_a_tov: Resource allocation timeout value
* @rnid_gen: RNID information
* @sg_supp: Indicates if scatter gather is supported
* @seq_offload: Indicates if sequence offload is supported
@@ -841,7 +841,7 @@ static inline void fc_lport_free_stats(struct fc_lport *lport)
/**
* lport_priv() - Return the private data from a local port
- * @lport: The local port whose private data is to be retreived
+ * @lport: The local port whose private data is to be retrieved
*/
static inline void *lport_priv(const struct fc_lport *lport)
{
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 02f966e9358f..4ee233e5a6ff 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -145,6 +145,13 @@ static inline void* iscsi_next_hdr(struct iscsi_task *task)
return (void*)task->hdr + task->hdr_len;
}
+static inline bool iscsi_task_is_completed(struct iscsi_task *task)
+{
+ return task->state == ISCSI_TASK_COMPLETED ||
+ task->state == ISCSI_TASK_ABRT_TMF ||
+ task->state == ISCSI_TASK_ABRT_SESS_RECOV;
+}
+
/* Connection's states */
enum {
ISCSI_CONN_INITIAL_STAGE,
@@ -195,12 +202,6 @@ struct iscsi_conn {
unsigned long suspend_tx; /* suspend Tx */
unsigned long suspend_rx; /* suspend Rx */
- /* abort */
- wait_queue_head_t ehwait; /* used in eh_abort() */
- struct iscsi_tm tmhdr;
- struct timer_list tmf_timer;
- int tmf_state; /* see TMF_INITIAL, etc.*/
-
/* negotiated params */
unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
unsigned max_xmit_dlength; /* target_max_recv_dsl */
@@ -270,6 +271,12 @@ struct iscsi_session {
* and recv lock.
*/
struct mutex eh_mutex;
+ /* abort */
+ wait_queue_head_t ehwait; /* used in eh_abort() */
+ struct iscsi_tm tmhdr;
+ struct timer_list tmf_timer;
+ int tmf_state; /* see TMF_INITIAL, etc.*/
+ struct iscsi_task *running_aborted_task;
/* iSCSI session-wide sequencing */
uint32_t cmdsn;
@@ -424,6 +431,7 @@ extern int iscsi_conn_start(struct iscsi_cls_conn *);
extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
int);
+extern void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active);
extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
extern void iscsi_session_failure(struct iscsi_session *session,
enum iscsi_err err);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 9271d7a49b90..6fe125a71b60 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -474,10 +474,16 @@ enum service_response {
};
enum exec_status {
- /* The SAM_STAT_.. codes fit in the lower 6 bits, alias some of
- * them here to silence 'case value not in enumerated type' warnings
+ /*
+ * Values 0..0x7f are used to return the SAM_STAT_* codes. To avoid
+ * 'case value not in enumerated type' compiler warnings every value
+ * returned through the exec_status enum needs an alias with the SAS_
+ * prefix here.
*/
- __SAM_STAT_CHECK_CONDITION = SAM_STAT_CHECK_CONDITION,
+ SAS_SAM_STAT_GOOD = SAM_STAT_GOOD,
+ SAS_SAM_STAT_BUSY = SAM_STAT_BUSY,
+ SAS_SAM_STAT_TASK_ABORTED = SAM_STAT_TASK_ABORTED,
+ SAS_SAM_STAT_CHECK_CONDITION = SAM_STAT_CHECK_CONDITION,
SAS_DEV_NO_RESPONSE = 0x80,
SAS_DATA_UNDERRUN,
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 246ced401683..3e46859774c8 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
+#include <scsi/scsi_status.h>
struct scsi_cmnd;
@@ -30,32 +31,6 @@ enum scsi_timeouts {
*/
#define SCAN_WILD_CARD ~0
-/** scsi_status_is_good - check the status return.
- *
- * @status: the status passed up from the driver (including host and
- * driver components)
- *
- * This returns true for known good conditions that may be treated as
- * command completed normally
- */
-static inline int scsi_status_is_good(int status)
-{
- /*
- * FIXME: bit0 is listed as reserved in SCSI-2, but is
- * significant in SCSI-3. For now, we follow the SCSI-2
- * behaviour and ignore reserved bits.
- */
- status &= 0xfe;
- return ((status == SAM_STAT_GOOD) ||
- (status == SAM_STAT_CONDITION_MET) ||
- /* Next two "intermediate" statuses are obsolete in SAM-4 */
- (status == SAM_STAT_INTERMEDIATE) ||
- (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) ||
- /* FIXME: this is obsolete in SAM-3 */
- (status == SAM_STAT_COMMAND_TERMINATED));
-}
-
-
/*
* standard mode-select header prepended to all mode-select commands
*/
@@ -88,94 +63,31 @@ static inline int scsi_is_wlun(u64 lun)
return (lun & 0xff00) == SCSI_W_LUN_BASE;
}
+/**
+ * scsi_status_is_check_condition - check the status return.
+ *
+ * @status: the status passed up from the driver (including host and
+ * driver components)
+ *
+ * This returns true if the status code is SAM_STAT_CHECK_CONDITION.
+ */
+static inline int scsi_status_is_check_condition(int status)
+{
+ if (status < 0)
+ return false;
+ status &= 0xfe;
+ return status == SAM_STAT_CHECK_CONDITION;
+}
/*
- * MESSAGE CODES
+ * Extended message codes.
*/
-
-#define COMMAND_COMPLETE 0x00
-#define EXTENDED_MESSAGE 0x01
#define EXTENDED_MODIFY_DATA_POINTER 0x00
#define EXTENDED_SDTR 0x01
#define EXTENDED_EXTENDED_IDENTIFY 0x02 /* SCSI-I only */
#define EXTENDED_WDTR 0x03
#define EXTENDED_PPR 0x04
#define EXTENDED_MODIFY_BIDI_DATA_PTR 0x05
-#define SAVE_POINTERS 0x02
-#define RESTORE_POINTERS 0x03
-#define DISCONNECT 0x04
-#define INITIATOR_ERROR 0x05
-#define ABORT_TASK_SET 0x06
-#define MESSAGE_REJECT 0x07
-#define NOP 0x08
-#define MSG_PARITY_ERROR 0x09
-#define LINKED_CMD_COMPLETE 0x0a
-#define LINKED_FLG_CMD_COMPLETE 0x0b
-#define TARGET_RESET 0x0c
-#define ABORT_TASK 0x0d
-#define CLEAR_TASK_SET 0x0e
-#define INITIATE_RECOVERY 0x0f /* SCSI-II only */
-#define RELEASE_RECOVERY 0x10 /* SCSI-II only */
-#define TERMINATE_IO_PROC 0x11 /* SCSI-II only */
-#define CLEAR_ACA 0x16
-#define LOGICAL_UNIT_RESET 0x17
-#define SIMPLE_QUEUE_TAG 0x20
-#define HEAD_OF_QUEUE_TAG 0x21
-#define ORDERED_QUEUE_TAG 0x22
-#define IGNORE_WIDE_RESIDUE 0x23
-#define ACA 0x24
-#define QAS_REQUEST 0x55
-
-/* Old SCSI2 names, don't use in new code */
-#define BUS_DEVICE_RESET TARGET_RESET
-#define ABORT ABORT_TASK_SET
-
-/*
- * Host byte codes
- */
-
-#define DID_OK 0x00 /* NO error */
-#define DID_NO_CONNECT 0x01 /* Couldn't connect before timeout period */
-#define DID_BUS_BUSY 0x02 /* BUS stayed busy through time out period */
-#define DID_TIME_OUT 0x03 /* TIMED OUT for other reason */
-#define DID_BAD_TARGET 0x04 /* BAD target. */
-#define DID_ABORT 0x05 /* Told to abort for some other reason */
-#define DID_PARITY 0x06 /* Parity error */
-#define DID_ERROR 0x07 /* Internal error */
-#define DID_RESET 0x08 /* Reset by somebody. */
-#define DID_BAD_INTR 0x09 /* Got an interrupt we weren't expecting. */
-#define DID_PASSTHROUGH 0x0a /* Force command past mid-layer */
-#define DID_SOFT_ERROR 0x0b /* The low level driver just wish a retry */
-#define DID_IMM_RETRY 0x0c /* Retry without decrementing retry count */
-#define DID_REQUEUE 0x0d /* Requeue command (no immediate retry) also
- * without decrementing the retry count */
-#define DID_TRANSPORT_DISRUPTED 0x0e /* Transport error disrupted execution
- * and the driver blocked the port to
- * recover the link. Transport class will
- * retry or fail IO */
-#define DID_TRANSPORT_FAILFAST 0x0f /* Transport class fastfailed the io */
-#define DID_TARGET_FAILURE 0x10 /* Permanent target failure, do not retry on
- * other paths */
-#define DID_NEXUS_FAILURE 0x11 /* Permanent nexus failure, retry on other
- * paths might yield different results */
-#define DID_ALLOC_FAILURE 0x12 /* Space allocation on the device failed */
-#define DID_MEDIUM_ERROR 0x13 /* Medium error */
-#define DID_TRANSPORT_MARGINAL 0x14 /* Transport marginal errors */
-#define DRIVER_OK 0x00 /* Driver status */
-
-/*
- * These indicate the error that occurred, and what is available.
- */
-
-#define DRIVER_BUSY 0x01
-#define DRIVER_SOFT 0x02
-#define DRIVER_MEDIA 0x03
-#define DRIVER_ERROR 0x04
-
-#define DRIVER_INVALID 0x05
-#define DRIVER_TIMEOUT 0x06
-#define DRIVER_HARD 0x07
-#define DRIVER_SENSE 0x08
/*
* Internal return values.
@@ -206,14 +118,10 @@ enum scsi_disposition {
* These are set by:
*
* status byte = set from target device
- * msg_byte = return status from host adapter itself.
+ * msg_byte (unused)
* host_byte = set by low-level driver to indicate status.
- * driver_byte = set by mid-level.
*/
-#define status_byte(result) (((result) >> 1) & 0x7f)
-#define msg_byte(result) (((result) >> 8) & 0xff)
#define host_byte(result) (((result) >> 16) & 0xff)
-#define driver_byte(result) (((result) >> 24) & 0xff)
#define sense_class(sense) (((sense) >> 4) & 0x7)
#define sense_error(sense) ((sense) & 0xf)
@@ -277,4 +185,35 @@ enum scsi_disposition {
/* Used to obtain the PCI location of a device */
#define SCSI_IOCTL_GET_PCI 0x5387
+/** scsi_status_is_good - check the status return.
+ *
+ * @status: the status passed up from the driver (including host and
+ * driver components)
+ *
+ * This returns true for known good conditions that may be treated as
+ * command completed normally
+ */
+static inline bool scsi_status_is_good(int status)
+{
+ if (status < 0)
+ return false;
+
+ if (host_byte(status) == DID_NO_CONNECT)
+ return false;
+
+ /*
+ * FIXME: bit0 is listed as reserved in SCSI-2, but is
+ * significant in SCSI-3. For now, we follow the SCSI-2
+ * behaviour and ignore reserved bits.
+ */
+ status &= 0xfe;
+ return ((status == SAM_STAT_GOOD) ||
+ (status == SAM_STAT_CONDITION_MET) ||
+ /* Next two "intermediate" statuses are obsolete in SAM-4 */
+ (status == SAM_STAT_INTERMEDIATE) ||
+ (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) ||
+ /* FIXME: this is obsolete in SAM-3 */
+ (status == SAM_STAT_COMMAND_TERMINATED));
+}
+
#endif /* _SCSI_SCSI_H */
diff --git a/include/scsi/scsi_bsg_iscsi.h b/include/scsi/scsi_bsg_iscsi.h
index 6b8128005af8..9b1f0f424a79 100644
--- a/include/scsi/scsi_bsg_iscsi.h
+++ b/include/scsi/scsi_bsg_iscsi.h
@@ -84,7 +84,7 @@ struct iscsi_bsg_reply {
*/
uint32_t result;
- /* If there was reply_payload, how much was recevied ? */
+ /* If there was reply_payload, how much was received ? */
uint32_t reply_payload_rcv_len;
union {
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index fed024f4c02a..779a59fe8676 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -315,9 +315,9 @@ static inline void set_status_byte(struct scsi_cmnd *cmd, char status)
cmd->result = (cmd->result & 0xffffff00) | status;
}
-static inline void set_msg_byte(struct scsi_cmnd *cmd, char status)
+static inline u8 get_status_byte(struct scsi_cmnd *cmd)
{
- cmd->result = (cmd->result & 0xffff00ff) | (status << 8);
+ return cmd->result & 0xff;
}
static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
@@ -325,9 +325,36 @@ static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
cmd->result = (cmd->result & 0xff00ffff) | (status << 16);
}
-static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
+static inline u8 get_host_byte(struct scsi_cmnd *cmd)
{
- cmd->result = (cmd->result & 0x00ffffff) | (status << 24);
+ return (cmd->result >> 16) & 0xff;
+}
+
+/**
+ * scsi_msg_to_host_byte() - translate message byte
+ *
+ * Translate the SCSI parallel message byte to a matching
+ * host byte setting. A message of COMMAND_COMPLETE indicates
+ * a successful command execution, any other message indicate
+ * an error. As the messages themselves only have a meaning
+ * for the SCSI parallel protocol this function translates
+ * them into a matching host byte value for SCSI EH.
+ */
+static inline void scsi_msg_to_host_byte(struct scsi_cmnd *cmd, u8 msg)
+{
+ switch (msg) {
+ case COMMAND_COMPLETE:
+ break;
+ case ABORT_TASK_SET:
+ set_host_byte(cmd, DID_ABORT);
+ break;
+ case TARGET_RESET:
+ set_host_byte(cmd, DID_RESET);
+ break;
+ default:
+ set_host_byte(cmd, DID_ERROR);
+ break;
+ }
}
static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
@@ -341,4 +368,7 @@ static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
return xfer_len;
}
+extern void scsi_build_sense(struct scsi_cmnd *scmd, int desc,
+ u8 key, u8 asc, u8 ascq);
+
#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index d0bf88d77f02..75363707b73f 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -764,7 +764,7 @@ extern void scsi_host_put(struct Scsi_Host *t);
extern struct Scsi_Host *scsi_host_lookup(unsigned short);
extern const char *scsi_host_state_name(enum scsi_host_state);
extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
- int status);
+ enum scsi_host_status status);
static inline int __must_check scsi_add_host(struct Scsi_Host *host,
struct device *dev)
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index c36860111932..f017843a8124 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -190,39 +190,21 @@ struct scsi_varlen_cdb_hdr {
* SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
* T10/1561-D Revision 4 Draft dated 7th November 2002.
*/
-#define SAM_STAT_GOOD 0x00
-#define SAM_STAT_CHECK_CONDITION 0x02
-#define SAM_STAT_CONDITION_MET 0x04
-#define SAM_STAT_BUSY 0x08
-#define SAM_STAT_INTERMEDIATE 0x10
-#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
-#define SAM_STAT_RESERVATION_CONFLICT 0x18
-#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
-#define SAM_STAT_TASK_SET_FULL 0x28
-#define SAM_STAT_ACA_ACTIVE 0x30
-#define SAM_STAT_TASK_ABORTED 0x40
-
-/*
- * Status codes. These are deprecated as they are shifted 1 bit right
- * from those found in the SCSI standards. This causes confusion for
- * applications that are ported to several OSes. Prefer SAM Status codes
- * above.
- */
-
-#define GOOD 0x00
-#define CHECK_CONDITION 0x01
-#define CONDITION_GOOD 0x02
-#define BUSY 0x04
-#define INTERMEDIATE_GOOD 0x08
-#define INTERMEDIATE_C_GOOD 0x0a
-#define RESERVATION_CONFLICT 0x0c
-#define COMMAND_TERMINATED 0x11
-#define QUEUE_FULL 0x14
-#define ACA_ACTIVE 0x18
-#define TASK_ABORTED 0x20
-
-#define STATUS_MASK 0xfe
+enum sam_status {
+ SAM_STAT_GOOD = 0x00,
+ SAM_STAT_CHECK_CONDITION = 0x02,
+ SAM_STAT_CONDITION_MET = 0x04,
+ SAM_STAT_BUSY = 0x08,
+ SAM_STAT_INTERMEDIATE = 0x10,
+ SAM_STAT_INTERMEDIATE_CONDITION_MET = 0x14,
+ SAM_STAT_RESERVATION_CONFLICT = 0x18,
+ SAM_STAT_COMMAND_TERMINATED = 0x22, /* obsolete in SAM-3 */
+ SAM_STAT_TASK_SET_FULL = 0x28,
+ SAM_STAT_ACA_ACTIVE = 0x30,
+ SAM_STAT_TASK_ABORTED = 0x40,
+};
+#define STATUS_MASK 0xfe
/*
* SENSE KEYS
*/
@@ -341,4 +323,16 @@ enum zbc_zone_cond {
ZBC_ZONE_COND_OFFLINE = 0xf,
};
+/* Version descriptor values for INQUIRY */
+enum scsi_version_descriptor {
+ SCSI_VERSION_DESCRIPTOR_FCP4 = 0x0a40,
+ SCSI_VERSION_DESCRIPTOR_ISCSI = 0x0960,
+ SCSI_VERSION_DESCRIPTOR_SAM5 = 0x00a0,
+ SCSI_VERSION_DESCRIPTOR_SAS3 = 0x0c60,
+ SCSI_VERSION_DESCRIPTOR_SBC3 = 0x04c0,
+ SCSI_VERSION_DESCRIPTOR_SBP3 = 0x0980,
+ SCSI_VERSION_DESCRIPTOR_SPC4 = 0x0460,
+ SCSI_VERSION_DESCRIPTOR_SRP = 0x0940
+};
+
#endif /* _SCSI_PROTO_H_ */
diff --git a/include/scsi/scsi_status.h b/include/scsi/scsi_status.h
new file mode 100644
index 000000000000..31d30cee1869
--- /dev/null
+++ b/include/scsi/scsi_status.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _SCSI_SCSI_STATUS_H
+#define _SCSI_SCSI_STATUS_H
+
+#include <linux/types.h>
+#include <scsi/scsi_proto.h>
+
+/* Message codes. */
+enum scsi_msg_byte {
+ COMMAND_COMPLETE = 0x00,
+ EXTENDED_MESSAGE = 0x01,
+ SAVE_POINTERS = 0x02,
+ RESTORE_POINTERS = 0x03,
+ DISCONNECT = 0x04,
+ INITIATOR_ERROR = 0x05,
+ ABORT_TASK_SET = 0x06,
+ MESSAGE_REJECT = 0x07,
+ NOP = 0x08,
+ MSG_PARITY_ERROR = 0x09,
+ LINKED_CMD_COMPLETE = 0x0a,
+ LINKED_FLG_CMD_COMPLETE = 0x0b,
+ TARGET_RESET = 0x0c,
+ ABORT_TASK = 0x0d,
+ CLEAR_TASK_SET = 0x0e,
+ INITIATE_RECOVERY = 0x0f, /* SCSI-II only */
+ RELEASE_RECOVERY = 0x10, /* SCSI-II only */
+ TERMINATE_IO_PROC = 0x11, /* SCSI-II only */
+ CLEAR_ACA = 0x16,
+ LOGICAL_UNIT_RESET = 0x17,
+ SIMPLE_QUEUE_TAG = 0x20,
+ HEAD_OF_QUEUE_TAG = 0x21,
+ ORDERED_QUEUE_TAG = 0x22,
+ IGNORE_WIDE_RESIDUE = 0x23,
+ ACA = 0x24,
+ QAS_REQUEST = 0x55,
+
+ /* Old SCSI2 names, don't use in new code */
+ BUS_DEVICE_RESET = TARGET_RESET,
+ ABORT = ABORT_TASK_SET,
+};
+
+/* Host byte codes. */
+enum scsi_host_status {
+ DID_OK = 0x00, /* NO error */
+ DID_NO_CONNECT = 0x01, /* Couldn't connect before timeout period */
+ DID_BUS_BUSY = 0x02, /* BUS stayed busy through time out period */
+ DID_TIME_OUT = 0x03, /* TIMED OUT for other reason */
+ DID_BAD_TARGET = 0x04, /* BAD target. */
+ DID_ABORT = 0x05, /* Told to abort for some other reason */
+ DID_PARITY = 0x06, /* Parity error */
+ DID_ERROR = 0x07, /* Internal error */
+ DID_RESET = 0x08, /* Reset by somebody. */
+ DID_BAD_INTR = 0x09, /* Got an interrupt we weren't expecting. */
+ DID_PASSTHROUGH = 0x0a, /* Force command past mid-layer */
+ DID_SOFT_ERROR = 0x0b, /* The low level driver just wish a retry */
+ DID_IMM_RETRY = 0x0c, /* Retry without decrementing retry count */
+ DID_REQUEUE = 0x0d, /* Requeue command (no immediate retry) also
+ * without decrementing the retry count */
+ DID_TRANSPORT_DISRUPTED = 0x0e, /* Transport error disrupted execution
+ * and the driver blocked the port to
+ * recover the link. Transport class will
+ * retry or fail IO */
+ DID_TRANSPORT_FAILFAST = 0x0f, /* Transport class fastfailed the io */
+ DID_TARGET_FAILURE = 0x10, /* Permanent target failure, do not retry on
+ * other paths */
+ DID_NEXUS_FAILURE = 0x11, /* Permanent nexus failure, retry on other
+ * paths might yield different results */
+ DID_ALLOC_FAILURE = 0x12, /* Space allocation on the device failed */
+ DID_MEDIUM_ERROR = 0x13, /* Medium error */
+ DID_TRANSPORT_MARGINAL = 0x14, /* Transport marginal errors */
+};
+
+#endif /* _SCSI_SCSI_STATUS_H */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 14214ee121ad..e80a7c542c88 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -517,10 +517,11 @@ enum fc_host_event_code {
* managed by the transport w/o driver interaction.
*/
+#define FC_VENDOR_IDENTIFIER 8
#define FC_FC4_LIST_SIZE 32
#define FC_SYMBOLIC_NAME_SIZE 256
#define FC_VERSION_STRING_SIZE 64
-#define FC_SERIAL_NUMBER_SIZE 80
+#define FC_SERIAL_NUMBER_SIZE 64
struct fc_host_attrs {
/* Fixed Attributes */
@@ -532,6 +533,10 @@ struct fc_host_attrs {
u32 supported_speeds;
u32 maxframe_size;
u16 max_npiv_vports;
+ u32 max_ct_payload;
+ u32 num_ports;
+ u32 num_discovered_ports;
+ u32 bootbios_state;
char serial_number[FC_SERIAL_NUMBER_SIZE];
char manufacturer[FC_SERIAL_NUMBER_SIZE];
char model[FC_SYMBOLIC_NAME_SIZE];
@@ -540,6 +545,9 @@ struct fc_host_attrs {
char driver_version[FC_VERSION_STRING_SIZE];
char firmware_version[FC_VERSION_STRING_SIZE];
char optionrom_version[FC_VERSION_STRING_SIZE];
+ char vendor_identifier[FC_VENDOR_IDENTIFIER];
+ char bootbios_version[FC_SYMBOLIC_NAME_SIZE];
+
/* Dynamic Attributes */
u32 port_id;
@@ -573,6 +581,9 @@ struct fc_host_attrs {
/* bsg support */
struct request_queue *rqst_q;
+
+ /* FDMI support version*/
+ u8 fdmi_version;
};
#define shost_to_fc_host(x) \
@@ -652,6 +663,18 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
#define fc_host_dev_loss_tmo(x) \
(((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo)
+#define fc_host_max_ct_payload(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->max_ct_payload)
+#define fc_host_vendor_identifier(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->vendor_identifier)
+#define fc_host_num_discovered_ports(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->num_discovered_ports)
+#define fc_host_num_ports(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->num_ports)
+#define fc_host_bootbios_version(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->bootbios_version)
+#define fc_host_bootbios_state(x) \
+ (((struct fc_host_attrs *)(x)->shost_data)->bootbios_state)
/* The functions by which the transport class and the driver communicate */
struct fc_function_template {
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index fc5a39839b4b..c5d7810fd792 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -82,6 +82,7 @@ struct iscsi_transport {
void (*destroy_session) (struct iscsi_cls_session *session);
struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
uint32_t cid);
+ void (*unbind_conn) (struct iscsi_cls_conn *conn, bool is_active);
int (*bind_conn) (struct iscsi_cls_session *session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_eph, int is_leading);
@@ -196,15 +197,23 @@ enum iscsi_connection_state {
ISCSI_CONN_BOUND,
};
+#define ISCSI_CLS_CONN_BIT_CLEANUP 1
+
struct iscsi_cls_conn {
struct list_head conn_list; /* item in connlist */
- struct list_head conn_list_err; /* item in connlist_err */
void *dd_data; /* LLD private data */
struct iscsi_transport *transport;
uint32_t cid; /* connection id */
+ /*
+ * This protects the conn startup and binding/unbinding of the ep to
+ * the conn. Unbinding includes ep_disconnect and stop_conn.
+ */
struct mutex ep_mutex;
struct iscsi_endpoint *ep;
+ unsigned long flags;
+ struct work_struct cleanup_work;
+
struct device dev; /* sysfs transport/container device */
enum iscsi_connection_state state;
};
@@ -434,6 +443,8 @@ extern void iscsi_remove_session(struct iscsi_cls_session *session);
extern void iscsi_free_session(struct iscsi_cls_session *session);
extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
int dd_size, uint32_t cid);
+extern void iscsi_put_conn(struct iscsi_cls_conn *conn);
+extern void iscsi_get_conn(struct iscsi_cls_conn *conn);
extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
extern void iscsi_unblock_session(struct iscsi_cls_session *session);
extern void iscsi_block_session(struct iscsi_cls_session *session);
@@ -441,6 +452,7 @@ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
+extern void iscsi_put_endpoint(struct iscsi_endpoint *ep);
extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd);
extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost,
struct iscsi_transport *t,
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index 7327e12f3373..843cefb8efce 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -131,6 +131,39 @@ struct compat_sg_io_hdr {
#define SG_INFO_DIRECT_IO 0x2 /* direct IO requested and performed */
#define SG_INFO_MIXED_IO 0x4 /* part direct, part indirect IO */
+/*
+ * Obsolete DRIVER_SENSE driver byte
+ *
+ * Originally the SCSI midlayer would set the DRIVER_SENSE driver byte when
+ * a sense code was generated and a sense buffer was allocated.
+ * However, as nowadays every scsi command has a sense code allocated this
+ * distinction became moot as one could check the sense buffer directly.
+ * Consequently this byte is not set anymore from the midlayer, but SG will
+ * keep setting this byte to be compatible with previous releases.
+ */
+#define DRIVER_SENSE 0x08
+/* Obsolete driver_byte() declaration */
+#define driver_byte(result) (((result) >> 24) & 0xff)
+
+/*
+ * Original linux SCSI Status codes. They are shifted 1 bit right
+ * from those found in the SCSI standards.
+ */
+
+#define GOOD 0x00
+#define CHECK_CONDITION 0x01
+#define CONDITION_GOOD 0x02
+#define BUSY 0x04
+#define INTERMEDIATE_GOOD 0x08
+#define INTERMEDIATE_C_GOOD 0x0a
+#define RESERVATION_CONFLICT 0x0c
+#define COMMAND_TERMINATED 0x11
+#define QUEUE_FULL 0x14
+#define ACA_ACTIVE 0x18
+#define TASK_ABORTED 0x20
+
+/* Obsolete status_byte() declaration */
+#define status_byte(result) (((result) >> 1) & 0x7f)
typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */
int host_no; /* as in "scsi<n>" where 'n' is one of 0, 1, 2 etc */
@@ -145,7 +178,7 @@ typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */
typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
char req_state; /* 0 -> not used, 1 -> written, 2 -> ready to read */
- char orphan; /* 0 -> normal request, 1 -> from interruped SG_IO */
+ char orphan; /* 0 -> normal request, 1 -> from interrupted SG_IO */
char sg_io_owned; /* 0 -> complete with read(), 1 -> owned by SG_IO */
char problem; /* 0 -> no problem detected, 1 -> error to report */
int pack_id; /* pack_id associated with request */
diff --git a/include/soc/imx/cpu.h b/include/soc/imx/cpu.h
index 42d6aeb951fa..0bf610acafd0 100644
--- a/include/soc/imx/cpu.h
+++ b/include/soc/imx/cpu.h
@@ -9,6 +9,7 @@
#define MXC_CPU_MX27 27
#define MXC_CPU_MX31 31
#define MXC_CPU_MX35 35
+#define MXC_CPU_MX50 50
#define MXC_CPU_MX51 51
#define MXC_CPU_MX53 53
#define MXC_CPU_IMX6SL 0x60
diff --git a/include/soc/tegra/common.h b/include/soc/tegra/common.h
index 98027a76ce3d..af41ad80ec21 100644
--- a/include/soc/tegra/common.h
+++ b/include/soc/tegra/common.h
@@ -6,6 +6,37 @@
#ifndef __SOC_TEGRA_COMMON_H__
#define __SOC_TEGRA_COMMON_H__
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct device;
+
+/**
+ * Tegra SoC core device OPP table configuration
+ *
+ * @init_state: pre-initialize OPP state of a device
+ */
+struct tegra_core_opp_params {
+ bool init_state;
+};
+
+#ifdef CONFIG_ARCH_TEGRA
bool soc_is_tegra(void);
+int devm_tegra_core_dev_init_opp_table(struct device *dev,
+ struct tegra_core_opp_params *params);
+#else
+static inline bool soc_is_tegra(void)
+{
+ return false;
+}
+
+static inline int
+devm_tegra_core_dev_init_opp_table(struct device *dev,
+ struct tegra_core_opp_params *params)
+{
+ return -ENODEV;
+}
+#endif
+
#endif /* __SOC_TEGRA_COMMON_H__ */
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 78cbc787a4dc..990701f788bc 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -52,14 +52,28 @@ struct tegra_sku_info {
enum tegra_revision revision;
};
+#ifdef CONFIG_ARCH_TEGRA
+extern struct tegra_sku_info tegra_sku_info;
u32 tegra_read_straps(void);
u32 tegra_read_ram_code(void);
int tegra_fuse_readl(unsigned long offset, u32 *value);
-
-#ifdef CONFIG_ARCH_TEGRA
-extern struct tegra_sku_info tegra_sku_info;
#else
static struct tegra_sku_info tegra_sku_info __maybe_unused;
+
+static inline u32 tegra_read_straps(void)
+{
+ return 0;
+}
+
+static inline u32 tegra_read_ram_code(void)
+{
+ return 0;
+}
+
+static inline int tegra_fuse_readl(unsigned long offset, u32 *value)
+{
+ return -ENODEV;
+}
#endif
struct device *tegra_soc_device_register(void);
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index d2fbe6a8b25b..1066b1194a5a 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -10,6 +10,7 @@
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/interconnect-provider.h>
+#include <linux/irq.h>
#include <linux/reset-controller.h>
#include <linux/types.h>
@@ -17,34 +18,48 @@ struct clk;
struct device;
struct page;
-struct tegra_smmu_enable {
- unsigned int reg;
- unsigned int bit;
-};
-
struct tegra_mc_timing {
unsigned long rate;
u32 *emem_data;
};
-/* latency allowance */
-struct tegra_mc_la {
- unsigned int reg;
- unsigned int shift;
- unsigned int mask;
- unsigned int def;
-};
-
struct tegra_mc_client {
unsigned int id;
const char *name;
- unsigned int swgroup;
+ /*
+ * For Tegra210 and earlier, this is the SWGROUP ID used for IOVA translations in the
+ * Tegra SMMU, whereas on Tegra186 and later this is the ID used to override the ARM SMMU
+ * stream ID used for IOVA translations for the given memory client.
+ */
+ union {
+ unsigned int swgroup;
+ unsigned int sid;
+ };
unsigned int fifo_size;
- struct tegra_smmu_enable smmu;
- struct tegra_mc_la la;
+ struct {
+ /* Tegra SMMU enable (Tegra210 and earlier) */
+ struct {
+ unsigned int reg;
+ unsigned int bit;
+ } smmu;
+
+ /* latency allowance */
+ struct {
+ unsigned int reg;
+ unsigned int shift;
+ unsigned int mask;
+ unsigned int def;
+ } la;
+
+ /* stream ID overrides (Tegra186 and later) */
+ struct {
+ unsigned int override;
+ unsigned int security;
+ } sid;
+ } regs;
};
struct tegra_smmu_swgroup {
@@ -155,6 +170,19 @@ struct tegra_mc_icc_ops {
void *data);
};
+struct tegra_mc_ops {
+ /*
+ * @probe: Callback to set up SoC-specific bits of the memory controller. This is called
+ * after basic, common set up that is done by the SoC-agnostic bits.
+ */
+ int (*probe)(struct tegra_mc *mc);
+ void (*remove)(struct tegra_mc *mc);
+ int (*suspend)(struct tegra_mc *mc);
+ int (*resume)(struct tegra_mc *mc);
+ irqreturn_t (*handle_irq)(int irq, void *data);
+ int (*probe_device)(struct tegra_mc *mc, struct device *dev);
+};
+
struct tegra_mc_soc {
const struct tegra_mc_client *clients;
unsigned int num_clients;
@@ -176,8 +204,7 @@ struct tegra_mc_soc {
unsigned int num_resets;
const struct tegra_mc_icc_ops *icc_ops;
-
- int (*init)(struct tegra_mc *mc);
+ const struct tegra_mc_ops *ops;
};
struct tegra_mc {
@@ -210,12 +237,19 @@ unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc);
#ifdef CONFIG_TEGRA_MC
struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev);
+int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev);
#else
static inline struct tegra_mc *
devm_tegra_memory_controller_get(struct device *dev)
{
return ERR_PTR(-ENODEV);
}
+
+static inline int
+tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+{
+ return -ENODEV;
+}
#endif
#endif /* __SOC_TEGRA_MC_H__ */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index 361cb64246f7..d186bccd125d 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -171,6 +171,8 @@ int tegra_io_rail_power_off(unsigned int id);
void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode);
void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
+bool tegra_pmc_core_domain_state_synced(void);
+
#else
static inline int tegra_powergate_power_on(unsigned int id)
{
@@ -227,6 +229,11 @@ static inline void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
{
}
+static inline bool tegra_pmc_core_domain_state_synced(void)
+{
+ return false;
+}
+
#endif /* CONFIG_SOC_TEGRA_PMC */
#if defined(CONFIG_SOC_TEGRA_PMC) && defined(CONFIG_PM_SLEEP)
diff --git a/include/sound/core.h b/include/sound/core.h
index 1f9aef0adbc9..c4ade121727d 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -128,7 +128,9 @@ struct snd_card {
#ifdef CONFIG_PM
unsigned int power_state; /* power state */
+ atomic_t power_ref;
wait_queue_head_t power_sleep;
+ wait_queue_head_t power_ref_sleep;
#endif
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
@@ -142,21 +144,61 @@ struct snd_card {
#ifdef CONFIG_PM
static inline unsigned int snd_power_get_state(struct snd_card *card)
{
- return card->power_state;
+ return READ_ONCE(card->power_state);
}
static inline void snd_power_change_state(struct snd_card *card, unsigned int state)
{
- card->power_state = state;
+ WRITE_ONCE(card->power_state, state);
wake_up(&card->power_sleep);
}
+/**
+ * snd_power_ref - Take the reference count for power control
+ * @card: sound card object
+ *
+ * The power_ref reference of the card is used for managing to block
+ * the snd_power_sync_ref() operation. This function increments the reference.
+ * The counterpart snd_power_unref() has to be called appropriately later.
+ */
+static inline void snd_power_ref(struct snd_card *card)
+{
+ atomic_inc(&card->power_ref);
+}
+
+/**
+ * snd_power_unref - Release the reference count for power control
+ * @card: sound card object
+ */
+static inline void snd_power_unref(struct snd_card *card)
+{
+ if (atomic_dec_and_test(&card->power_ref))
+ wake_up(&card->power_ref_sleep);
+}
+
+/**
+ * snd_power_sync_ref - wait until the card power_ref is freed
+ * @card: sound card object
+ *
+ * This function is used to synchronize with the pending power_ref being
+ * released.
+ */
+static inline void snd_power_sync_ref(struct snd_card *card)
+{
+ wait_event(card->power_ref_sleep, !atomic_read(&card->power_ref));
+}
+
/* init.c */
-int snd_power_wait(struct snd_card *card, unsigned int power_state);
+int snd_power_wait(struct snd_card *card);
+int snd_power_ref_and_wait(struct snd_card *card);
#else /* ! CONFIG_PM */
-static inline int snd_power_wait(struct snd_card *card, unsigned int state) { return 0; }
+static inline int snd_power_wait(struct snd_card *card) { return 0; }
+static inline void snd_power_ref(struct snd_card *card) {}
+static inline void snd_power_unref(struct snd_card *card) {}
+static inline int snd_power_ref_and_wait(struct snd_card *card) { return 0; }
+static inline void snd_power_sync_ref(struct snd_card *card) {}
#define snd_power_get_state(card) ({ (void)(card); SNDRV_CTL_POWER_D0; })
#define snd_power_change_state(card, state) do { (void)(card); } while (0)
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
index 4b3a1d374b90..4fc733c8c570 100644
--- a/include/sound/hdmi-codec.h
+++ b/include/sound/hdmi-codec.h
@@ -65,13 +65,23 @@ struct hdmi_codec_ops {
/*
* Configures HDMI-encoder for audio stream.
- * Mandatory
+ * Having either prepare or hw_params is mandatory.
*/
int (*hw_params)(struct device *dev, void *data,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms);
/*
+ * Configures HDMI-encoder for audio stream. Can be called
+ * multiple times for each setup.
+ *
+ * Having either prepare or hw_params is mandatory.
+ */
+ int (*prepare)(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /*
* Shuts down the audio stream.
* Mandatory
*/
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 5daa937684a4..44d87775b352 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -12,6 +12,7 @@
#include <asm/page.h>
struct device;
+struct vm_area_struct;
/*
* buffer device info
@@ -64,84 +65,19 @@ static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
-#ifdef CONFIG_SND_DMA_SGBUF
-/*
- * Scatter-Gather generic device pages
- */
-void *snd_malloc_sgbuf_pages(struct device *device,
- size_t size, struct snd_dma_buffer *dmab,
- size_t *res_size);
-int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab);
-
-struct snd_sg_page {
- void *buf;
- dma_addr_t addr;
-};
-
-struct snd_sg_buf {
- int size; /* allocated byte size */
- int pages; /* allocated pages */
- int tblsize; /* allocated table size */
- struct snd_sg_page *table; /* address table */
- struct page **page_table; /* page table (for vmap/vunmap) */
- struct device *dev;
-};
-
-/*
- * return the physical address at the corresponding offset
- */
-static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
- size_t offset)
-{
- struct snd_sg_buf *sgbuf = dmab->private_data;
- dma_addr_t addr;
-
- if (!sgbuf)
- return dmab->addr + offset;
- addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
- addr &= ~((dma_addr_t)PAGE_SIZE - 1);
- return addr + offset % PAGE_SIZE;
-}
-
-/*
- * return the virtual address at the corresponding offset
- */
-static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab,
- size_t offset)
-{
- struct snd_sg_buf *sgbuf = dmab->private_data;
-
- if (!sgbuf)
- return dmab->area + offset;
- return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE;
-}
-
-unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
- unsigned int ofs, unsigned int size);
-#else
-/* non-SG versions */
-static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
- size_t offset)
-{
- return dmab->addr + offset;
-}
-
-static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab,
- size_t offset)
-{
- return dmab->area + offset;
-}
-
-#define snd_sgbuf_get_chunk_size(dmab, ofs, size) (size)
-
-#endif /* CONFIG_SND_DMA_SGBUF */
-
/* allocate/release a buffer */
int snd_dma_alloc_pages(int type, struct device *dev, size_t size,
struct snd_dma_buffer *dmab);
int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
struct snd_dma_buffer *dmab);
void snd_dma_free_pages(struct snd_dma_buffer *dmab);
+int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area);
+
+dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset);
+struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset);
+unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size);
#endif /* __SOUND_MEMALLOC_H */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 2e1200d17d0c..938f36050a5e 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1066,6 +1066,7 @@ void snd_pcm_set_ops(struct snd_pcm * pcm, int direction,
void snd_pcm_set_sync(struct snd_pcm_substream *substream);
int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
+void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream);
void snd_pcm_period_elapsed(struct snd_pcm_substream *substream);
snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
void *buf, bool interleaved,
@@ -1253,14 +1254,6 @@ static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
#define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p)
-#ifdef CONFIG_SND_DMA_SGBUF
-/*
- * SG-buffer handling
- */
-#define snd_pcm_substream_sgbuf(substream) \
- snd_pcm_get_dma_buf(substream)->private_data
-#endif /* SND_DMA_SGBUF */
-
/**
* snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset
* @substream: PCM substream
@@ -1273,17 +1266,6 @@ snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
}
/**
- * snd_pcm_sgbuf_get_ptr - Get the virtual address at the corresponding offset
- * @substream: PCM substream
- * @ofs: byte offset
- */
-static inline void *
-snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs)
-{
- return snd_sgbuf_get_ptr(snd_pcm_get_dma_buf(substream), ofs);
-}
-
-/**
* snd_pcm_sgbuf_get_chunk_size - Compute the max size that fits within the
* contig. page from the given size
* @substream: PCM substream
diff --git a/include/sound/pcm_iec958.h b/include/sound/pcm_iec958.h
index 0939aa45e2fe..64e84441cde1 100644
--- a/include/sound/pcm_iec958.h
+++ b/include/sound/pcm_iec958.h
@@ -4,6 +4,14 @@
#include <linux/types.h>
+int snd_pcm_create_iec958_consumer_default(u8 *cs, size_t len);
+
+int snd_pcm_fill_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
+ size_t len);
+
+int snd_pcm_fill_iec958_consumer_hw_params(struct snd_pcm_hw_params *params,
+ u8 *cs, size_t len);
+
int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
size_t len);
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 334842daa904..989e1517332d 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -81,6 +81,8 @@ struct snd_rawmidi_substream {
bool opened; /* open flag */
bool append; /* append flag (merge more streams) */
bool active_sensing; /* send active sensing when close */
+ unsigned int framing; /* whether to frame input data */
+ unsigned int clock_type; /* clock source to use for input framing */
int use_count; /* use counter (for output) */
size_t bytes;
struct snd_rawmidi *rmidi;
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 0bc29c4516e7..0dcb361a98bb 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -36,6 +36,22 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_MSB SND_SOC_DAIFMT_LEFT_J
#define SND_SOC_DAIFMT_LSB SND_SOC_DAIFMT_RIGHT_J
+/* Describes the possible PCM format */
+/*
+ * use SND_SOC_DAI_FORMAT_xx as eash shift.
+ * see
+ * snd_soc_runtime_get_dai_fmt()
+ */
+#define SND_SOC_POSSIBLE_DAIFMT_FORMAT_SHIFT 0
+#define SND_SOC_POSSIBLE_DAIFMT_FORMAT_MASK (0xFFFF << SND_SOC_POSSIBLE_DAIFMT_FORMAT_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_I2S (1 << SND_SOC_DAI_FORMAT_I2S)
+#define SND_SOC_POSSIBLE_DAIFMT_RIGHT_J (1 << SND_SOC_DAI_FORMAT_RIGHT_J)
+#define SND_SOC_POSSIBLE_DAIFMT_LEFT_J (1 << SND_SOC_DAI_FORMAT_LEFT_J)
+#define SND_SOC_POSSIBLE_DAIFMT_DSP_A (1 << SND_SOC_DAI_FORMAT_DSP_A)
+#define SND_SOC_POSSIBLE_DAIFMT_DSP_B (1 << SND_SOC_DAI_FORMAT_DSP_B)
+#define SND_SOC_POSSIBLE_DAIFMT_AC97 (1 << SND_SOC_DAI_FORMAT_AC97)
+#define SND_SOC_POSSIBLE_DAIFMT_PDM (1 << SND_SOC_DAI_FORMAT_PDM)
+
/*
* DAI Clock gating.
*
@@ -45,6 +61,17 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_CONT (1 << 4) /* continuous clock */
#define SND_SOC_DAIFMT_GATED (0 << 4) /* clock is gated */
+/* Describes the possible PCM format */
+/*
+ * define GATED -> CONT. GATED will be selected if both are selected.
+ * see
+ * snd_soc_runtime_get_dai_fmt()
+ */
+#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT 16
+#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_MASK (0xFFFF << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_GATED (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CONT (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT)
+
/*
* DAI hardware signal polarity.
*
@@ -71,6 +98,14 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_IB_NF (3 << 8) /* invert BCLK + nor FRM */
#define SND_SOC_DAIFMT_IB_IF (4 << 8) /* invert BCLK + FRM */
+/* Describes the possible PCM format */
+#define SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT 32
+#define SND_SOC_POSSIBLE_DAIFMT_INV_MASK (0xFFFFULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_NB_NF (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_NB_IF (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_IB_NF (0x4ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_IB_IF (0x8ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT)
+
/*
* DAI hardware clock providers/consumers
*
@@ -89,6 +124,14 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_CBM_CFS SND_SOC_DAIFMT_CBP_CFC
#define SND_SOC_DAIFMT_CBS_CFS SND_SOC_DAIFMT_CBC_CFC
+/* Describes the possible PCM format */
+#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT 48
+#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_MASK (0xFFFFULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CBP_CFP (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CBC_CFP (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CBP_CFC (0x4ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+#define SND_SOC_POSSIBLE_DAIFMT_CBC_CFC (0x8ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
+
#define SND_SOC_DAIFMT_FORMAT_MASK 0x000f
#define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0
#define SND_SOC_DAIFMT_INV_MASK 0x0f00
@@ -131,6 +174,8 @@ int snd_soc_dai_set_pll(struct snd_soc_dai *dai,
int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio);
/* Digital Audio interface formatting */
+int snd_soc_dai_get_fmt_max_priority(struct snd_soc_pcm_runtime *rtd);
+u64 snd_soc_dai_get_fmt(struct snd_soc_dai *dai, int priority);
int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt);
int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
@@ -292,6 +337,16 @@ struct snd_soc_dai_ops {
snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *,
struct snd_soc_dai *);
+ /*
+ * Format list for auto selection.
+ * Format will be increased if priority format was
+ * not selected.
+ * see
+ * snd_soc_dai_get_fmt()
+ */
+ u64 *auto_selectable_formats;
+ int num_auto_selectable_formats;
+
/* bit field */
unsigned int no_capture_mute:1;
};
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 328cf763d9b4..4afd667e124c 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -54,7 +54,7 @@ struct snd_soc_dobj_control {
/* dynamic widget object */
struct snd_soc_dobj_widget {
- unsigned int kcontrol_type; /* kcontrol type: mixer, enum, bytes */
+ unsigned int *kcontrol_type; /* kcontrol type: mixer, enum, bytes */
};
/* generic dynamic object - all dynamic objects belong to this struct */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index e746da996351..8e6dd8a257c5 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -712,6 +712,12 @@ struct snd_soc_dai_link {
/* Do not create a PCM for this DAI link (Backend link) */
unsigned int ignore:1;
+ /* This flag will reorder stop sequence. By enabling this flag
+ * DMA controller stop sequence will be invoked first followed by
+ * CPU DAI driver stop sequence
+ */
+ unsigned int stop_dma_first:1;
+
#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj; /* For topology */
#endif
@@ -1232,10 +1238,23 @@ void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname);
int snd_soc_of_parse_aux_devs(struct snd_soc_card *card, const char *propname);
-unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
- const char *prefix,
- struct device_node **bitclkmaster,
- struct device_node **framemaster);
+
+unsigned int snd_soc_daifmt_clock_provider_fliped(unsigned int dai_fmt);
+unsigned int snd_soc_daifmt_clock_provider_from_bitmap(unsigned int bit_frame);
+
+unsigned int snd_soc_daifmt_parse_format(struct device_node *np, const char *prefix);
+unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np,
+ const char *prefix,
+ struct device_node **bitclkmaster,
+ struct device_node **framemaster);
+#define snd_soc_daifmt_parse_clock_provider_as_bitmap(np, prefix) \
+ snd_soc_daifmt_parse_clock_provider_raw(np, prefix, NULL, NULL)
+#define snd_soc_daifmt_parse_clock_provider_as_phandle \
+ snd_soc_daifmt_parse_clock_provider_raw
+#define snd_soc_daifmt_parse_clock_provider_as_flag(np, prefix) \
+ snd_soc_daifmt_clock_provider_from_bitmap( \
+ snd_soc_daifmt_parse_clock_provider_as_bitmap(np, prefix))
+
int snd_soc_get_dai_id(struct device_node *ep);
int snd_soc_get_dai_name(const struct of_phandle_args *args,
const char **dai_name);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index d1f7d2a45354..85c16c266eac 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -326,6 +326,7 @@ struct t10_wwn {
char model[INQUIRY_MODEL_LEN + 1];
char revision[INQUIRY_REVISION_LEN + 1];
char unit_serial[INQUIRY_VPD_SERIAL_LEN];
+ u32 company_id;
spinlock_t t10_vpd_lock;
struct se_device *t10_dev;
struct config_group t10_wwn_group;
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 3ccf591b2374..9f73ed2cf061 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -174,6 +174,34 @@ enum afs_vl_operation {
afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */
};
+enum afs_cm_operation {
+ afs_CB_CallBack = 204, /* AFS break callback promises */
+ afs_CB_InitCallBackState = 205, /* AFS initialise callback state */
+ afs_CB_Probe = 206, /* AFS probe client */
+ afs_CB_GetLock = 207, /* AFS get contents of CM lock table */
+ afs_CB_GetCE = 208, /* AFS get cache file description */
+ afs_CB_GetXStatsVersion = 209, /* AFS get version of extended statistics */
+ afs_CB_GetXStats = 210, /* AFS get contents of extended statistics data */
+ afs_CB_InitCallBackState3 = 213, /* AFS initialise callback state, version 3 */
+ afs_CB_ProbeUuid = 214, /* AFS check the client hasn't rebooted */
+};
+
+enum yfs_cm_operation {
+ yfs_CB_Probe = 206, /* YFS probe client */
+ yfs_CB_GetLock = 207, /* YFS get contents of CM lock table */
+ yfs_CB_XStatsVersion = 209, /* YFS get version of extended statistics */
+ yfs_CB_GetXStats = 210, /* YFS get contents of extended statistics data */
+ yfs_CB_InitCallBackState3 = 213, /* YFS initialise callback state, version 3 */
+ yfs_CB_ProbeUuid = 214, /* YFS check the client hasn't rebooted */
+ yfs_CB_GetServerPrefs = 215,
+ yfs_CB_GetCellServDV = 216,
+ yfs_CB_GetLocalCell = 217,
+ yfs_CB_GetCacheConfig = 218,
+ yfs_CB_GetCellByNum = 65537,
+ yfs_CB_TellMeAboutYourself = 65538, /* get client capabilities */
+ yfs_CB_CallBack = 64204,
+};
+
enum afs_edit_dir_op {
afs_edit_dir_create,
afs_edit_dir_create_error,
@@ -436,6 +464,32 @@ enum afs_cb_break_reason {
EM(afs_YFSVL_GetCellName, "YFSVL.GetCellName") \
E_(afs_VL_GetCapabilities, "VL.GetCapabilities")
+#define afs_cm_operations \
+ EM(afs_CB_CallBack, "CB.CallBack") \
+ EM(afs_CB_InitCallBackState, "CB.InitCallBackState") \
+ EM(afs_CB_Probe, "CB.Probe") \
+ EM(afs_CB_GetLock, "CB.GetLock") \
+ EM(afs_CB_GetCE, "CB.GetCE") \
+ EM(afs_CB_GetXStatsVersion, "CB.GetXStatsVersion") \
+ EM(afs_CB_GetXStats, "CB.GetXStats") \
+ EM(afs_CB_InitCallBackState3, "CB.InitCallBackState3") \
+ E_(afs_CB_ProbeUuid, "CB.ProbeUuid")
+
+#define yfs_cm_operations \
+ EM(yfs_CB_Probe, "YFSCB.Probe") \
+ EM(yfs_CB_GetLock, "YFSCB.GetLock") \
+ EM(yfs_CB_XStatsVersion, "YFSCB.XStatsVersion") \
+ EM(yfs_CB_GetXStats, "YFSCB.GetXStats") \
+ EM(yfs_CB_InitCallBackState3, "YFSCB.InitCallBackState3") \
+ EM(yfs_CB_ProbeUuid, "YFSCB.ProbeUuid") \
+ EM(yfs_CB_GetServerPrefs, "YFSCB.GetServerPrefs") \
+ EM(yfs_CB_GetCellServDV, "YFSCB.GetCellServDV") \
+ EM(yfs_CB_GetLocalCell, "YFSCB.GetLocalCell") \
+ EM(yfs_CB_GetCacheConfig, "YFSCB.GetCacheConfig") \
+ EM(yfs_CB_GetCellByNum, "YFSCB.GetCellByNum") \
+ EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \
+ E_(yfs_CB_CallBack, "YFSCB.CallBack")
+
#define afs_edit_dir_ops \
EM(afs_edit_dir_create, "create") \
EM(afs_edit_dir_create_error, "c_fail") \
@@ -569,6 +623,8 @@ afs_server_traces;
afs_cell_traces;
afs_fs_operations;
afs_vl_operations;
+afs_cm_operations;
+yfs_cm_operations;
afs_edit_dir_ops;
afs_edit_dir_reasons;
afs_eproto_causes;
@@ -649,20 +705,21 @@ TRACE_EVENT(afs_cb_call,
TP_STRUCT__entry(
__field(unsigned int, call )
- __field(const char *, name )
__field(u32, op )
+ __field(u16, service_id )
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->name = call->type->name;
__entry->op = call->operation_ID;
+ __entry->service_id = call->service_id;
),
- TP_printk("c=%08x %s o=%u",
+ TP_printk("c=%08x %s",
__entry->call,
- __entry->name,
- __entry->op)
+ __entry->service_id == 2501 ?
+ __print_symbolic(__entry->op, yfs_cm_operations) :
+ __print_symbolic(__entry->op, afs_cm_operations))
);
TRACE_EVENT(afs_call,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index c7237317a8b9..b671b1f2ce0f 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1092,7 +1092,7 @@ TRACE_EVENT(btrfs_trigger_flush,
__entry->flags = flags;
__entry->bytes = bytes;
__entry->flush = flush;
- __assign_str(reason, reason)
+ __assign_str(reason, reason);
),
TP_printk_btrfs("%s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index 64e92d56c6a8..3963e79ca7b4 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -23,8 +23,8 @@ DECLARE_EVENT_CLASS(dma_fence,
),
TP_fast_assign(
- __assign_str(driver, fence->ops->get_driver_name(fence))
- __assign_str(timeline, fence->ops->get_timeline_name(fence))
+ __assign_str(driver, fence->ops->get_driver_name(fence));
+ __assign_str(timeline, fence->ops->get_timeline_name(fence));
__entry->context = fence->context;
__entry->seqno = fence->seqno;
),
diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h
index d233f2916584..e5c1ca6d16ee 100644
--- a/include/trace/events/intel_iommu.h
+++ b/include/trace/events/intel_iommu.h
@@ -15,6 +15,8 @@
#include <linux/tracepoint.h>
#include <linux/intel-iommu.h>
+#define MSG_MAX 256
+
TRACE_EVENT(qi_submit,
TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3),
@@ -51,6 +53,41 @@ TRACE_EVENT(qi_submit,
__entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3
)
);
+
+TRACE_EVENT(prq_report,
+ TP_PROTO(struct intel_iommu *iommu, struct device *dev,
+ u64 dw0, u64 dw1, u64 dw2, u64 dw3,
+ unsigned long seq),
+
+ TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq),
+
+ TP_STRUCT__entry(
+ __field(u64, dw0)
+ __field(u64, dw1)
+ __field(u64, dw2)
+ __field(u64, dw3)
+ __field(unsigned long, seq)
+ __string(iommu, iommu->name)
+ __string(dev, dev_name(dev))
+ __dynamic_array(char, buff, MSG_MAX)
+ ),
+
+ TP_fast_assign(
+ __entry->dw0 = dw0;
+ __entry->dw1 = dw1;
+ __entry->dw2 = dw2;
+ __entry->dw3 = dw3;
+ __entry->seq = seq;
+ __assign_str(iommu, iommu->name);
+ __assign_str(dev, dev_name(dev));
+ ),
+
+ TP_printk("%s/%s seq# %ld: %s",
+ __get_str(iommu), __get_str(dev), __entry->seq,
+ decode_prq_descriptor(__get_str(buff), MSG_MAX, __entry->dw0,
+ __entry->dw1, __entry->dw2, __entry->dw3)
+ )
+);
#endif /* _TRACE_INTEL_IOMMU_H */
/* This part must be outside protection */
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 390270e00a1d..f160484afc5c 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -48,7 +48,9 @@
{(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
{(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
{(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
- {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
+ {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
+ {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"}, \
+ {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\
#define show_gfp_flags(flags) \
(flags) ? __print_flags(flags, "|", \
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 2399073c3afc..78c448c6ab4c 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -136,7 +136,7 @@ DECLARE_EVENT_CLASS(net_dev_template,
__assign_str(name, skb->dev->name);
),
- TP_printk("dev=%s skbaddr=%p len=%u",
+ TP_printk("dev=%s skbaddr=%px len=%u",
__get_str(name), __entry->skbaddr, __entry->len)
)
diff --git a/include/trace/events/osnoise.h b/include/trace/events/osnoise.h
new file mode 100644
index 000000000000..82f741ec0f57
--- /dev/null
+++ b/include/trace/events/osnoise.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM osnoise
+
+#if !defined(_OSNOISE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _OSNOISE_TRACE_H
+
+#include <linux/tracepoint.h>
+TRACE_EVENT(thread_noise,
+
+ TP_PROTO(struct task_struct *t, u64 start, u64 duration),
+
+ TP_ARGS(t, start, duration),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN)
+ __field( u64, start )
+ __field( u64, duration)
+ __field( pid_t, pid )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
+ __entry->pid = t->pid;
+ __entry->start = start;
+ __entry->duration = duration;
+ ),
+
+ TP_printk("%8s:%d start %llu.%09u duration %llu ns",
+ __entry->comm,
+ __entry->pid,
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration)
+);
+
+TRACE_EVENT(softirq_noise,
+
+ TP_PROTO(int vector, u64 start, u64 duration),
+
+ TP_ARGS(vector, start, duration),
+
+ TP_STRUCT__entry(
+ __field( u64, start )
+ __field( u64, duration)
+ __field( int, vector )
+ ),
+
+ TP_fast_assign(
+ __entry->vector = vector;
+ __entry->start = start;
+ __entry->duration = duration;
+ ),
+
+ TP_printk("%8s:%d start %llu.%09u duration %llu ns",
+ show_softirq_name(__entry->vector),
+ __entry->vector,
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration)
+);
+
+TRACE_EVENT(irq_noise,
+
+ TP_PROTO(int vector, const char *desc, u64 start, u64 duration),
+
+ TP_ARGS(vector, desc, start, duration),
+
+ TP_STRUCT__entry(
+ __field( u64, start )
+ __field( u64, duration)
+ __string( desc, desc )
+ __field( int, vector )
+
+ ),
+
+ TP_fast_assign(
+ __assign_str(desc, desc);
+ __entry->vector = vector;
+ __entry->start = start;
+ __entry->duration = duration;
+ ),
+
+ TP_printk("%s:%d start %llu.%09u duration %llu ns",
+ __get_str(desc),
+ __entry->vector,
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration)
+);
+
+TRACE_EVENT(nmi_noise,
+
+ TP_PROTO(u64 start, u64 duration),
+
+ TP_ARGS(start, duration),
+
+ TP_STRUCT__entry(
+ __field( u64, start )
+ __field( u64, duration)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->duration = duration;
+ ),
+
+ TP_printk("start %llu.%09u duration %llu ns",
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration)
+);
+
+TRACE_EVENT(sample_threshold,
+
+ TP_PROTO(u64 start, u64 duration, u64 interference),
+
+ TP_ARGS(start, duration, interference),
+
+ TP_STRUCT__entry(
+ __field( u64, start )
+ __field( u64, duration)
+ __field( u64, interference)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->duration = duration;
+ __entry->interference = interference;
+ ),
+
+ TP_printk("start %llu.%09u duration %llu ns interference %llu",
+ __print_ns_to_secs(__entry->start),
+ __print_ns_without_secs(__entry->start),
+ __entry->duration,
+ __entry->interference)
+);
+
+#endif /* _TRACE_OSNOISE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index 330d32d84485..c3006c6b4a87 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -41,11 +41,37 @@ TRACE_EVENT(qdisc_dequeue,
__entry->txq_state = txq->state;
),
- TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%p",
+ TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%px",
__entry->ifindex, __entry->handle, __entry->parent,
__entry->txq_state, __entry->packets, __entry->skbaddr )
);
+TRACE_EVENT(qdisc_enqueue,
+
+ TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb),
+
+ TP_ARGS(qdisc, txq, skb),
+
+ TP_STRUCT__entry(
+ __field(struct Qdisc *, qdisc)
+ __field(void *, skbaddr)
+ __field(int, ifindex)
+ __field(u32, handle)
+ __field(u32, parent)
+ ),
+
+ TP_fast_assign(
+ __entry->qdisc = qdisc;
+ __entry->skbaddr = skb;
+ __entry->ifindex = txq->dev ? txq->dev->ifindex : 0;
+ __entry->handle = qdisc->handle;
+ __entry->parent = qdisc->parent;
+ ),
+
+ TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%px",
+ __entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr)
+);
+
TRACE_EVENT(qdisc_reset,
TP_PROTO(struct Qdisc *q),
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 6768b64bc738..670e41783edd 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -278,6 +278,7 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock,
* "WakeNot": Don't wake rcuo kthread.
* "WakeNotPoll": Don't wake rcuo kthread because it is polling.
* "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
+ * "WakeBypassIsDeferred": Wake rcuo kthread later, bypass list is contended.
* "WokeEmpty": rcuo CB kthread woke to find empty list.
*/
TRACE_EVENT_RCU(rcu_nocb_wake,
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index ffdbe6f85da8..b2a2672e6632 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -152,7 +152,7 @@ DECLARE_EVENT_CLASS(rpcgss_ctx_class,
TP_fast_assign(
__entry->cred = gc;
__entry->service = gc->gc_service;
- __assign_str(principal, gc->gc_principal)
+ __assign_str(principal, gc->gc_principal);
),
TP_printk("cred=%p service=%s principal='%s'",
@@ -535,7 +535,7 @@ TRACE_EVENT(rpcgss_upcall_msg,
),
TP_fast_assign(
- __assign_str(msg, buf)
+ __assign_str(msg, buf);
),
TP_printk("msg='%s'", __get_str(msg))
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 1eca2305ca42..94640482cfe7 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -148,7 +148,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
- __field( int, success )
__field( int, target_cpu )
),
@@ -156,7 +155,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio; /* XXX SCHED_DEADLINE */
- __entry->success = 1; /* rudiment, kill when possible */
__entry->target_cpu = task_cpu(p);
),
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index f624969a4f14..370ade0d4093 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -124,50 +124,6 @@
scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \
scsi_hostbyte_name(DID_TRANSPORT_FAILFAST))
-#define scsi_driverbyte_name(result) { result, #result }
-#define show_driverbyte_name(val) \
- __print_symbolic(val, \
- scsi_driverbyte_name(DRIVER_OK), \
- scsi_driverbyte_name(DRIVER_BUSY), \
- scsi_driverbyte_name(DRIVER_SOFT), \
- scsi_driverbyte_name(DRIVER_MEDIA), \
- scsi_driverbyte_name(DRIVER_ERROR), \
- scsi_driverbyte_name(DRIVER_INVALID), \
- scsi_driverbyte_name(DRIVER_TIMEOUT), \
- scsi_driverbyte_name(DRIVER_HARD), \
- scsi_driverbyte_name(DRIVER_SENSE))
-
-#define scsi_msgbyte_name(result) { result, #result }
-#define show_msgbyte_name(val) \
- __print_symbolic(val, \
- scsi_msgbyte_name(COMMAND_COMPLETE), \
- scsi_msgbyte_name(EXTENDED_MESSAGE), \
- scsi_msgbyte_name(SAVE_POINTERS), \
- scsi_msgbyte_name(RESTORE_POINTERS), \
- scsi_msgbyte_name(DISCONNECT), \
- scsi_msgbyte_name(INITIATOR_ERROR), \
- scsi_msgbyte_name(ABORT_TASK_SET), \
- scsi_msgbyte_name(MESSAGE_REJECT), \
- scsi_msgbyte_name(NOP), \
- scsi_msgbyte_name(MSG_PARITY_ERROR), \
- scsi_msgbyte_name(LINKED_CMD_COMPLETE), \
- scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \
- scsi_msgbyte_name(TARGET_RESET), \
- scsi_msgbyte_name(ABORT_TASK), \
- scsi_msgbyte_name(CLEAR_TASK_SET), \
- scsi_msgbyte_name(INITIATE_RECOVERY), \
- scsi_msgbyte_name(RELEASE_RECOVERY), \
- scsi_msgbyte_name(CLEAR_ACA), \
- scsi_msgbyte_name(LOGICAL_UNIT_RESET), \
- scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \
- scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \
- scsi_msgbyte_name(ORDERED_QUEUE_TAG), \
- scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \
- scsi_msgbyte_name(ACA), \
- scsi_msgbyte_name(QAS_REQUEST), \
- scsi_msgbyte_name(BUS_DEVICE_RESET), \
- scsi_msgbyte_name(ABORT))
-
#define scsi_statusbyte_name(result) { result, #result }
#define show_statusbyte_name(val) \
__print_symbolic(val, \
@@ -327,9 +283,9 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
- show_driverbyte_name(((__entry->result) >> 24) & 0xff),
+ "DRIVER_OK",
show_hostbyte_name(((__entry->result) >> 16) & 0xff),
- show_msgbyte_name(((__entry->result) >> 8) & 0xff),
+ "COMMAND_COMPLETE",
show_statusbyte_name(__entry->result & 0xff))
);
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index d02e01a27b69..861f199896c6 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -154,8 +154,8 @@ TRACE_EVENT(rpc_clnt_new,
__entry->client_id = clnt->cl_clid;
__assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
__assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
- __assign_str(program, program)
- __assign_str(server, server)
+ __assign_str(program, program);
+ __assign_str(server, server);
),
TP_printk("client=%u peer=[%s]:%s program=%s server=%s",
@@ -180,8 +180,8 @@ TRACE_EVENT(rpc_clnt_new_err,
TP_fast_assign(
__entry->error = error;
- __assign_str(program, program)
- __assign_str(server, server)
+ __assign_str(program, program);
+ __assign_str(server, server);
),
TP_printk("program=%s server=%s error=%d",
@@ -284,8 +284,8 @@ TRACE_EVENT(rpc_request,
__entry->client_id = task->tk_client->cl_clid;
__entry->version = task->tk_client->cl_vers;
__entry->async = RPC_IS_ASYNC(task);
- __assign_str(progname, task->tk_client->cl_program->name)
- __assign_str(procname, rpc_proc_name(task))
+ __assign_str(progname, task->tk_client->cl_program->name);
+ __assign_str(procname, rpc_proc_name(task));
),
TP_printk("task:%u@%u %sv%d %s (%ssync)",
@@ -494,10 +494,10 @@ DECLARE_EVENT_CLASS(rpc_reply_event,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
- __assign_str(progname, task->tk_client->cl_program->name)
+ __assign_str(progname, task->tk_client->cl_program->name);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procname, rpc_proc_name(task))
- __assign_str(servername, task->tk_xprt->servername)
+ __assign_str(procname, rpc_proc_name(task));
+ __assign_str(servername, task->tk_xprt->servername);
),
TP_printk("task:%u@%d server=%s xid=0x%08x %sv%d %s",
@@ -622,8 +622,8 @@ TRACE_EVENT(rpc_stats_latency,
__entry->task_id = task->tk_pid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
__entry->version = task->tk_client->cl_vers;
- __assign_str(progname, task->tk_client->cl_program->name)
- __assign_str(procname, rpc_proc_name(task))
+ __assign_str(progname, task->tk_client->cl_program->name);
+ __assign_str(procname, rpc_proc_name(task));
__entry->backlog = ktime_to_us(backlog);
__entry->rtt = ktime_to_us(rtt);
__entry->execute = ktime_to_us(execute);
@@ -669,15 +669,15 @@ TRACE_EVENT(rpc_xdr_overflow,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__assign_str(progname,
- task->tk_client->cl_program->name)
+ task->tk_client->cl_program->name);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
+ __assign_str(procedure, task->tk_msg.rpc_proc->p_name);
} else {
__entry->task_id = 0;
__entry->client_id = 0;
- __assign_str(progname, "unknown")
+ __assign_str(progname, "unknown");
__entry->version = 0;
- __assign_str(procedure, "unknown")
+ __assign_str(procedure, "unknown");
}
__entry->requested = requested;
__entry->end = xdr->end;
@@ -735,9 +735,9 @@ TRACE_EVENT(rpc_xdr_alignment,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__assign_str(progname,
- task->tk_client->cl_program->name)
+ task->tk_client->cl_program->name);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
+ __assign_str(procedure, task->tk_msg.rpc_proc->p_name);
__entry->offset = offset;
__entry->copied = copied;
@@ -1107,9 +1107,9 @@ TRACE_EVENT(xprt_retransmit,
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->ntrans = rqst->rq_ntrans;
__assign_str(progname,
- task->tk_client->cl_program->name)
+ task->tk_client->cl_program->name);
__entry->version = task->tk_client->cl_vers;
- __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
+ __assign_str(procedure, task->tk_msg.rpc_proc->p_name);
),
TP_printk(
@@ -1842,7 +1842,7 @@ TRACE_EVENT(svc_xprt_accept,
TP_fast_assign(
__assign_str(addr, xprt->xpt_remotebuf);
- __assign_str(protocol, xprt->xpt_class->xcl_name)
+ __assign_str(protocol, xprt->xpt_class->xcl_name);
__assign_str(service, service);
),
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
index 1cb6f1afba0e..599739ee7b20 100644
--- a/include/trace/events/ufs.h
+++ b/include/trace/events/ufs.h
@@ -246,6 +246,26 @@ DEFINE_EVENT(ufshcd_template, ufshcd_init,
int dev_state, int link_state),
TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs,
+ int dev_state, int link_state),
+ TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
TRACE_EVENT(ufshcd_command,
TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t,
unsigned int tag, u32 doorbell, int transfer_len, u32 intr,
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 00d1180527d8..88faf2400ec2 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -423,47 +423,6 @@ TRACE_EVENT(mm_vmscan_lru_shrink_active,
show_reclaim_flags(__entry->reclaim_flags))
);
-TRACE_EVENT(mm_vmscan_inactive_list_is_low,
-
- TP_PROTO(int nid, int reclaim_idx,
- unsigned long total_inactive, unsigned long inactive,
- unsigned long total_active, unsigned long active,
- unsigned long ratio, int file),
-
- TP_ARGS(nid, reclaim_idx, total_inactive, inactive, total_active, active, ratio, file),
-
- TP_STRUCT__entry(
- __field(int, nid)
- __field(int, reclaim_idx)
- __field(unsigned long, total_inactive)
- __field(unsigned long, inactive)
- __field(unsigned long, total_active)
- __field(unsigned long, active)
- __field(unsigned long, ratio)
- __field(int, reclaim_flags)
- ),
-
- TP_fast_assign(
- __entry->nid = nid;
- __entry->reclaim_idx = reclaim_idx;
- __entry->total_inactive = total_inactive;
- __entry->inactive = inactive;
- __entry->total_active = total_active;
- __entry->active = active;
- __entry->ratio = ratio;
- __entry->reclaim_flags = trace_reclaim_flags(file) &
- RECLAIM_WB_LRU;
- ),
-
- TP_printk("nid=%d reclaim_idx=%d total_inactive=%ld inactive=%ld total_active=%ld active=%ld ratio=%ld flags=%s",
- __entry->nid,
- __entry->reclaim_idx,
- __entry->total_inactive, __entry->inactive,
- __entry->total_active, __entry->active,
- __entry->ratio,
- show_reclaim_flags(__entry->reclaim_flags))
-);
-
TRACE_EVENT(mm_vmscan_node_reclaim_begin,
TP_PROTO(int nid, int order, gfp_t gfp_flags),
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 1efa463c4979..840d1ba84cf5 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -36,7 +36,8 @@
EM( WB_REASON_PERIODIC, "periodic") \
EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
- EMe(WB_REASON_FORKER_THREAD, "forker_thread")
+ EM( WB_REASON_FORKER_THREAD, "forker_thread") \
+ EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush")
WB_WORK_REASON
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 8268bf747d6f..acc17194c160 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -358,6 +358,21 @@ TRACE_MAKE_SYSTEM_STR();
trace_print_hex_dump_seq(p, prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii)
+#undef __print_ns_to_secs
+#define __print_ns_to_secs(value) \
+ ({ \
+ u64 ____val = (u64)(value); \
+ do_div(____val, NSEC_PER_SEC); \
+ ____val; \
+ })
+
+#undef __print_ns_without_secs
+#define __print_ns_without_secs(value) \
+ ({ \
+ u64 ____val = (u64)(value); \
+ (u32) do_div(____val, NSEC_PER_SEC); \
+ })
+
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace enum print_line_t \
@@ -736,6 +751,16 @@ static inline void ftrace_test_probe_##call(void) \
#undef __print_array
#undef __print_hex_dump
+/*
+ * The below is not executed in the kernel. It is only what is
+ * displayed in the print format for userspace to parse.
+ */
+#undef __print_ns_to_secs
+#define __print_ns_to_secs(val) (val) / 1000000000UL
+
+#undef __print_ns_without_secs
+#define __print_ns_without_secs(val) (val) % 1000000000UL
+
#undef TP_printk
#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index f94f65d429be..1567a3294c3d 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -72,6 +72,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index f211961ce1da..a9d6fcd95f42 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -873,8 +873,13 @@ __SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule)
#define __NR_landlock_restrict_self 446
__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
+#ifdef __ARCH_WANT_MEMFD_SECRET
+#define __NR_memfd_secret 447
+__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
+#endif
+
#undef __NR_syscalls
-#define __NR_syscalls 447
+#define __NR_syscalls 448
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/linux/auxvec.h b/include/uapi/linux/auxvec.h
index abe5f2b6581b..c7e502bf5a6f 100644
--- a/include/uapi/linux/auxvec.h
+++ b/include/uapi/linux/auxvec.h
@@ -33,5 +33,8 @@
#define AT_EXECFN 31 /* filename of program */
+#ifndef AT_MINSIGSTKSZ
+#define AT_MINSIGSTKSZ 51 /* minimal stack size for signal delivery */
+#endif
#endif /* _UAPI_LINUX_AUXVEC_H */
diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h
index 3155382dfc9b..f6e8a005b113 100644
--- a/include/uapi/linux/cxl_mem.h
+++ b/include/uapi/linux/cxl_mem.h
@@ -29,6 +29,18 @@
___C(GET_LSA, "Get Label Storage Area"), \
___C(GET_HEALTH_INFO, "Get Health Info"), \
___C(GET_LOG, "Get Log"), \
+ ___C(SET_PARTITION_INFO, "Set Partition Information"), \
+ ___C(SET_LSA, "Set Label Storage Area"), \
+ ___C(GET_ALERT_CONFIG, "Get Alert Configuration"), \
+ ___C(SET_ALERT_CONFIG, "Set Alert Configuration"), \
+ ___C(GET_SHUTDOWN_STATE, "Get Shutdown State"), \
+ ___C(SET_SHUTDOWN_STATE, "Set Shutdown State"), \
+ ___C(GET_POISON, "Get Poison List"), \
+ ___C(INJECT_POISON, "Inject Poison"), \
+ ___C(CLEAR_POISON, "Clear Poison"), \
+ ___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \
+ ___C(SCAN_MEDIA, "Scan Media"), \
+ ___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \
___C(MAX, "invalid / last command")
#define ___C(a, b) CXL_MEM_COMMAND_ID_##a
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index c7135c9c37a5..b3b93710eff7 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -46,6 +46,7 @@ enum {
ETHTOOL_MSG_FEC_SET,
ETHTOOL_MSG_MODULE_EEPROM_GET,
ETHTOOL_MSG_STATS_GET,
+ ETHTOOL_MSG_PHC_VCLOCKS_GET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -88,6 +89,7 @@ enum {
ETHTOOL_MSG_FEC_NTF,
ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY,
ETHTOOL_MSG_STATS_GET_REPLY,
+ ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -440,6 +442,19 @@ enum {
ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1)
};
+/* PHC VCLOCKS */
+
+enum {
+ ETHTOOL_A_PHC_VCLOCKS_UNSPEC,
+ ETHTOOL_A_PHC_VCLOCKS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PHC_VCLOCKS_NUM, /* u32 */
+ ETHTOOL_A_PHC_VCLOCKS_INDEX, /* array, s32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PHC_VCLOCKS_CNT,
+ ETHTOOL_A_PHC_VCLOCKS_MAX = (__ETHTOOL_A_PHC_VCLOCKS_CNT - 1)
+};
+
/* CABLE TEST */
enum {
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 271ae90a9bb7..36ed092227fa 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -181,6 +181,9 @@
* - add FUSE_OPEN_KILL_SUIDGID
* - extend fuse_setxattr_in, add FUSE_SETXATTR_EXT
* - add FUSE_SETXATTR_ACL_KILL_SGID
+ *
+ * 7.34
+ * - add FUSE_SYNCFS
*/
#ifndef _LINUX_FUSE_H
@@ -216,7 +219,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 33
+#define FUSE_KERNEL_MINOR_VERSION 34
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -509,6 +512,7 @@ enum fuse_opcode {
FUSE_COPY_FILE_RANGE = 47,
FUSE_SETUPMAPPING = 48,
FUSE_REMOVEMAPPING = 49,
+ FUSE_SYNCFS = 50,
/* CUSE specific operations */
CUSE_INIT = 4096,
@@ -971,4 +975,8 @@ struct fuse_removemapping_one {
#define FUSE_REMOVEMAPPING_MAX_ENTRY \
(PAGE_SIZE / sizeof(struct fuse_removemapping_one))
+struct fuse_syncfs_in {
+ uint64_t padding;
+};
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index e33997b4d750..edc346a77c91 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: LGPL-2.1 WITH Linux-syscall-note */
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#ifndef _USR_IDXD_H_
#define _USR_IDXD_H_
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index f3956fc11de6..35687dcb1a42 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -97,5 +97,6 @@
#define DEVMEM_MAGIC 0x454d444d /* "DMEM" */
#define Z3FOLD_MAGIC 0x33
#define PPC_CMM_MAGIC 0xc7571590
+#define SECRETMEM_MAGIC 0x5345434d /* "SECM" */
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 4832fd0b5642..19a00bc7fe86 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -60,7 +60,6 @@ enum {
* are never OR'ed into the mode in mempolicy API arguments.
*/
#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
-#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
#define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
#define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
diff --git a/include/uapi/linux/n_r3964.h b/include/uapi/linux/n_r3964.h
deleted file mode 100644
index 6bbd18520f30..000000000000
--- a/include/uapi/linux/n_r3964.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* SPDX-License-Identifier: GPL-1.0+ WITH Linux-syscall-note */
-/* r3964 linediscipline for linux
- *
- * -----------------------------------------------------------
- * Copyright by
- * Philips Automation Projects
- * Kassel (Germany)
- * -----------------------------------------------------------
- * This software may be used and distributed according to the terms of
- * the GNU General Public License, incorporated herein by reference.
- *
- * Author:
- * L. Haag
- *
- * $Log: r3964.h,v $
- * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de>
- * Fixed HZ usage on 2.6 kernels
- * Removed unnecessary include
- *
- * Revision 1.3 2001/03/18 13:02:24 dwmw2
- * Fix timer usage, use spinlocks properly.
- *
- * Revision 1.2 2001/03/18 12:53:15 dwmw2
- * Merge changes in 2.4.2
- *
- * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2
- * This'll screw the version control
- *
- * Revision 1.6 1998/09/30 00:40:38 dwmw2
- * Updated to use kernel's N_R3964 if available
- *
- * Revision 1.4 1998/04/02 20:29:44 lhaag
- * select, blocking, ...
- *
- * Revision 1.3 1998/02/12 18:58:43 root
- * fixed some memory leaks
- * calculation of checksum characters
- *
- * Revision 1.2 1998/02/07 13:03:17 root
- * ioctl read_telegram
- *
- * Revision 1.1 1998/02/06 19:19:43 root
- * Initial revision
- *
- *
- */
-
-#ifndef _UAPI__LINUX_N_R3964_H__
-#define _UAPI__LINUX_N_R3964_H__
-
-/* line disciplines for r3964 protocol */
-
-
-/*
- * Ioctl-commands
- */
-
-#define R3964_ENABLE_SIGNALS 0x5301
-#define R3964_SETPRIORITY 0x5302
-#define R3964_USE_BCC 0x5303
-#define R3964_READ_TELEGRAM 0x5304
-
-/* Options for R3964_SETPRIORITY */
-#define R3964_MASTER 0
-#define R3964_SLAVE 1
-
-/* Options for R3964_ENABLE_SIGNALS */
-#define R3964_SIG_ACK 0x0001
-#define R3964_SIG_DATA 0x0002
-#define R3964_SIG_ALL 0x000f
-#define R3964_SIG_NONE 0x0000
-#define R3964_USE_SIGIO 0x1000
-
-/*
- * r3964 operation states:
- */
-
-/* types for msg_id: */
-enum {R3964_MSG_ACK=1, R3964_MSG_DATA };
-
-#define R3964_MAX_MSG_COUNT 32
-
-/* error codes for client messages */
-#define R3964_OK 0 /* no error. */
-#define R3964_TX_FAIL -1 /* transmission error, block NOT sent */
-#define R3964_OVERFLOW -2 /* msg queue overflow */
-
-/* the client gets this struct when calling read(fd,...): */
-struct r3964_client_message {
- int msg_id;
- int arg;
- int error_code;
-};
-
-#define R3964_MTU 256
-
-
-
-#endif /* _UAPI__LINUX_N_R3964_H__ */
diff --git a/include/uapi/linux/nbd-netlink.h b/include/uapi/linux/nbd-netlink.h
index c5d0ef7aa7d5..2d0b90964227 100644
--- a/include/uapi/linux/nbd-netlink.h
+++ b/include/uapi/linux/nbd-netlink.h
@@ -35,6 +35,7 @@ enum {
NBD_ATTR_SOCKETS,
NBD_ATTR_DEAD_CONN_TIMEOUT,
NBD_ATTR_DEVICE_LIST,
+ NBD_ATTR_BACKEND_IDENTIFIER,
__NBD_ATTR_MAX,
};
#define NBD_ATTR_MAX (__NBD_ATTR_MAX - 1)
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index dc8b72201f6c..00a60695fa53 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -66,8 +66,11 @@ enum {
#define NUD_NONE 0x00
/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change
- and make no address resolution or NUD.
- NUD_PERMANENT also cannot be deleted by garbage collectors.
+ * and make no address resolution or NUD.
+ * NUD_PERMANENT also cannot be deleted by garbage collectors.
+ * When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry
+ * states don't make sense and thus are ignored. Such entries don't age and
+ * can roam.
*/
struct nda_cacheinfo {
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 7ed0b3d1c00a..fcc61c73a666 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -13,7 +13,7 @@
#include <linux/types.h>
#include <linux/socket.h> /* for SO_TIMESTAMPING */
-/* SO_TIMESTAMPING gets an integer bit field comprised of these values */
+/* SO_TIMESTAMPING flags */
enum {
SOF_TIMESTAMPING_TX_HARDWARE = (1<<0),
SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1),
@@ -30,8 +30,9 @@ enum {
SOF_TIMESTAMPING_OPT_STATS = (1<<12),
SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13),
SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14),
+ SOF_TIMESTAMPING_BIND_PHC = (1 << 15),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_BIND_PHC,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
@@ -47,6 +48,18 @@ enum {
SOF_TIMESTAMPING_TX_ACK)
/**
+ * struct so_timestamping - SO_TIMESTAMPING parameter
+ *
+ * @flags: SO_TIMESTAMPING flags
+ * @bind_phc: Index of PTP virtual clock bound to sock. This is available
+ * if flag SOF_TIMESTAMPING_BIND_PHC is set.
+ */
+struct so_timestamping {
+ int flags;
+ int bind_phc;
+};
+
+/**
* struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter
*
* @flags: no flags defined right now, must be zero for %SIOCSHWTSTAMP
diff --git a/include/uapi/linux/netfilter/nfnetlink_hook.h b/include/uapi/linux/netfilter/nfnetlink_hook.h
index 912ec60b26b0..bbcd285b22e1 100644
--- a/include/uapi/linux/netfilter/nfnetlink_hook.h
+++ b/include/uapi/linux/netfilter/nfnetlink_hook.h
@@ -43,6 +43,15 @@ enum nfnl_hook_chain_info_attributes {
};
#define NFNLA_HOOK_INFO_MAX (__NFNLA_HOOK_INFO_MAX - 1)
+enum nfnl_hook_chain_desc_attributes {
+ NFNLA_CHAIN_UNSPEC,
+ NFNLA_CHAIN_TABLE,
+ NFNLA_CHAIN_FAMILY,
+ NFNLA_CHAIN_NAME,
+ __NFNLA_CHAIN_MAX,
+};
+#define NFNLA_CHAIN_MAX (__NFNLA_CHAIN_MAX - 1)
+
/**
* enum nfnl_hook_chaintype - chain type
*
diff --git a/include/uapi/linux/netfilter/nfnetlink_log.h b/include/uapi/linux/netfilter/nfnetlink_log.h
index 45c8d3b027e0..0af9c113d665 100644
--- a/include/uapi/linux/netfilter/nfnetlink_log.h
+++ b/include/uapi/linux/netfilter/nfnetlink_log.h
@@ -61,7 +61,7 @@ enum nfulnl_attr_type {
NFULA_HWTYPE, /* hardware type */
NFULA_HWHEADER, /* hardware header */
NFULA_HWLEN, /* hardware header length */
- NFULA_CT, /* nf_conntrack_netlink.h */
+ NFULA_CT, /* nfnetlink_conntrack.h */
NFULA_CT_INFO, /* enum ip_conntrack_info */
NFULA_VLAN, /* nested attribute: packet vlan info */
NFULA_L2HDR, /* full L2 header */
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index bcb2cb5d40b9..aed90c4df0c8 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -51,11 +51,11 @@ enum nfqnl_attr_type {
NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */
NFQA_HWADDR, /* nfqnl_msg_packet_hw */
NFQA_PAYLOAD, /* opaque data payload */
- NFQA_CT, /* nf_conntrack_netlink.h */
+ NFQA_CT, /* nfnetlink_conntrack.h */
NFQA_CT_INFO, /* enum ip_conntrack_info */
NFQA_CAP_LEN, /* __u32 length of captured packet */
NFQA_SKB_INFO, /* __u32 skb meta information */
- NFQA_EXP, /* nf_conntrack_netlink.h */
+ NFQA_EXP, /* nfnetlink_conntrack.h */
NFQA_UID, /* __u32 sk uid */
NFQA_GID, /* __u32 sk gid */
NFQA_SECCTX, /* security context string */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
index c3ab4c826297..f9c1af8d141b 100644
--- a/include/uapi/linux/pcitest.h
+++ b/include/uapi/linux/pcitest.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/**
+/*
* pcitest.h - PCI test uapi defines
*
* Copyright (C) 2017 Texas Instruments
diff --git a/include/uapi/linux/raw.h b/include/uapi/linux/raw.h
deleted file mode 100644
index 47874919d0b9..000000000000
--- a/include/uapi/linux/raw.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __LINUX_RAW_H
-#define __LINUX_RAW_H
-
-#include <linux/types.h>
-
-#define RAW_SETBIND _IO( 0xac, 0 )
-#define RAW_GETBIND _IO( 0xac, 1 )
-
-struct raw_config_request
-{
- int raw_minor;
- __u64 block_major;
- __u64 block_minor;
-};
-
-#endif /* __LINUX_RAW_H */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 650480f41f1d..05b31d60acf6 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -31,7 +31,8 @@
UFFD_FEATURE_MISSING_SHMEM | \
UFFD_FEATURE_SIGBUS | \
UFFD_FEATURE_THREAD_ID | \
- UFFD_FEATURE_MINOR_HUGETLBFS)
+ UFFD_FEATURE_MINOR_HUGETLBFS | \
+ UFFD_FEATURE_MINOR_SHMEM)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@@ -185,6 +186,9 @@ struct uffdio_api {
* UFFD_FEATURE_MINOR_HUGETLBFS indicates that minor faults
* can be intercepted (via REGISTER_MODE_MINOR) for
* hugetlbfs-backed pages.
+ *
+ * UFFD_FEATURE_MINOR_SHMEM indicates the same support as
+ * UFFD_FEATURE_MINOR_HUGETLBFS, but for shmem-backed pages instead.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@@ -196,6 +200,7 @@ struct uffdio_api {
#define UFFD_FEATURE_SIGBUS (1<<7)
#define UFFD_FEATURE_THREAD_ID (1<<8)
#define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9)
+#define UFFD_FEATURE_MINOR_SHMEM (1<<10)
__u64 features;
__u64 ioctls;
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 4fe842c3a3a9..70a8057ad4bb 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -57,4 +57,16 @@
#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
#define VIRTIO_ID_BT 40 /* virtio bluetooth */
+/*
+ * Virtio Transitional IDs
+ */
+
+#define VIRTIO_TRANS_ID_NET 1000 /* transitional virtio net */
+#define VIRTIO_TRANS_ID_BLOCK 1001 /* transitional virtio block */
+#define VIRTIO_TRANS_ID_BALLOON 1002 /* transitional virtio balloon */
+#define VIRTIO_TRANS_ID_CONSOLE 1003 /* transitional virtio console */
+#define VIRTIO_TRANS_ID_SCSI 1004 /* transitional virtio SCSI */
+#define VIRTIO_TRANS_ID_RNG 1005 /* transitional virtio rng */
+#define VIRTIO_TRANS_ID_9P 1009 /* transitional virtio 9p console */
+
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_pcidev.h b/include/uapi/linux/virtio_pcidev.h
new file mode 100644
index 000000000000..89daa88bcfef
--- /dev/null
+++ b/include/uapi/linux/virtio_pcidev.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#ifndef _UAPI_LINUX_VIRTIO_PCIDEV_H
+#define _UAPI_LINUX_VIRTIO_PCIDEV_H
+#include <linux/types.h>
+
+/**
+ * enum virtio_pcidev_ops - virtual PCI device operations
+ * @VIRTIO_PCIDEV_OP_CFG_READ: read config space, size is 1, 2, 4 or 8;
+ * the @data field should be filled in by the device (in little endian).
+ * @VIRTIO_PCIDEV_OP_CFG_WRITE: write config space, size is 1, 2, 4 or 8;
+ * the @data field contains the data to write (in little endian).
+ * @VIRTIO_PCIDEV_OP_BAR_READ: read BAR mem/pio, size can be variable;
+ * the @data field should be filled in by the device (in little endian).
+ * @VIRTIO_PCIDEV_OP_BAR_WRITE: write BAR mem/pio, size can be variable;
+ * the @data field contains the data to write (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_MEMSET: memset MMIO, size is variable but
+ * the @data field only has one byte (unlike @VIRTIO_PCIDEV_OP_MMIO_WRITE)
+ * @VIRTIO_PCIDEV_OP_INT: legacy INTx# pin interrupt, the addr field is 1-4 for
+ * the number
+ * @VIRTIO_PCIDEV_OP_MSI: MSI(-X) interrupt, this message basically transports
+ * the 16- or 32-bit write that would otherwise be done into memory,
+ * analogous to the write messages (@VIRTIO_PCIDEV_OP_MMIO_WRITE) above
+ * @VIRTIO_PCIDEV_OP_PME: Dummy message whose content is ignored (and should be
+ * all zeroes) to signal the PME# pin.
+ */
+enum virtio_pcidev_ops {
+ VIRTIO_PCIDEV_OP_RESERVED = 0,
+ VIRTIO_PCIDEV_OP_CFG_READ,
+ VIRTIO_PCIDEV_OP_CFG_WRITE,
+ VIRTIO_PCIDEV_OP_MMIO_READ,
+ VIRTIO_PCIDEV_OP_MMIO_WRITE,
+ VIRTIO_PCIDEV_OP_MMIO_MEMSET,
+ VIRTIO_PCIDEV_OP_INT,
+ VIRTIO_PCIDEV_OP_MSI,
+ VIRTIO_PCIDEV_OP_PME,
+};
+
+/**
+ * struct virtio_pcidev_msg - virtio PCI device operation
+ * @op: the operation to do
+ * @bar: the bar (only with BAR read/write messages)
+ * @reserved: reserved
+ * @size: the size of the read/write (in bytes)
+ * @addr: the address to read/write
+ * @data: the data, normally @size long, but just one byte for
+ * %VIRTIO_PCIDEV_OP_MMIO_MEMSET
+ *
+ * Note: the fields are all in native (CPU) endian, however, the
+ * @data values will often be in little endian (see the ops above.)
+ */
+struct virtio_pcidev_msg {
+ __u8 op;
+ __u8 bar;
+ __u16 reserved;
+ __u32 size;
+ __u64 addr;
+ __u8 data[];
+};
+
+#endif /* _UAPI_LINUX_VIRTIO_PCIDEV_H */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 6d2d34c9f375..a47a731e4527 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -313,6 +313,7 @@ enum hl_device_status {
* HL_INFO_SYNC_MANAGER - Retrieve sync manager info per dcore
* HL_INFO_TOTAL_ENERGY - Retrieve total energy consumption
* HL_INFO_PLL_FREQUENCY - Retrieve PLL frequency
+ * HL_INFO_OPEN_STATS - Retrieve info regarding recent device open calls
*/
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
@@ -331,6 +332,7 @@ enum hl_device_status {
#define HL_INFO_TOTAL_ENERGY 15
#define HL_INFO_PLL_FREQUENCY 16
#define HL_INFO_POWER 17
+#define HL_INFO_OPEN_STATS 18
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
@@ -445,6 +447,16 @@ struct hl_pll_frequency_info {
};
/**
+ * struct hl_open_stats_info - device open statistics information
+ * @open_counter: ever growing counter, increased on each successful dev open
+ * @last_open_period_ms: duration (ms) device was open last time
+ */
+struct hl_open_stats_info {
+ __u64 open_counter;
+ __u64 last_open_period_ms;
+};
+
+/**
* struct hl_power_info - power information
* @power: power consumption
*/
@@ -664,6 +676,7 @@ struct hl_cs_chunk {
#define HL_CS_FLAGS_STAGED_SUBMISSION_FIRST 0x80
#define HL_CS_FLAGS_STAGED_SUBMISSION_LAST 0x100
#define HL_CS_FLAGS_CUSTOM_TIMEOUT 0x200
+#define HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT 0x400
#define HL_CS_STATUS_SUCCESS 0
diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h
index 26b638a7ad97..a7085e092d34 100644
--- a/include/uapi/rdma/irdma-abi.h
+++ b/include/uapi/rdma/irdma-abi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */
/*
* Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 535a7229e1d9..d17c061950df 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -710,7 +710,7 @@ enum {
* Raw MIDI section - /dev/snd/midi??
*/
-#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 1)
+#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 2)
enum {
SNDRV_RAWMIDI_STREAM_OUTPUT = 0,
@@ -736,12 +736,38 @@ struct snd_rawmidi_info {
unsigned char reserved[64]; /* reserved for future use */
};
+#define SNDRV_RAWMIDI_MODE_FRAMING_MASK (7<<0)
+#define SNDRV_RAWMIDI_MODE_FRAMING_SHIFT 0
+#define SNDRV_RAWMIDI_MODE_FRAMING_NONE (0<<0)
+#define SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP (1<<0)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MASK (7<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_SHIFT 3
+#define SNDRV_RAWMIDI_MODE_CLOCK_NONE (0<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_REALTIME (1<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC (2<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC_RAW (3<<3)
+
+#define SNDRV_RAWMIDI_FRAMING_DATA_LENGTH 16
+
+struct snd_rawmidi_framing_tstamp {
+ /* For now, frame_type is always 0. Midi 2.0 is expected to add new
+ * types here. Applications are expected to skip unknown frame types.
+ */
+ __u8 frame_type;
+ __u8 length; /* number of valid bytes in data field */
+ __u8 reserved[2];
+ __u32 tv_nsec; /* nanoseconds */
+ __u64 tv_sec; /* seconds */
+ __u8 data[SNDRV_RAWMIDI_FRAMING_DATA_LENGTH];
+} __packed;
+
struct snd_rawmidi_params {
int stream;
size_t buffer_size; /* queue size in bytes */
size_t avail_min; /* minimum avail bytes for wakeup */
unsigned int no_active_sensing: 1; /* do not send active sensing byte in close() */
- unsigned char reserved[16]; /* reserved for future use */
+ unsigned int mode; /* For input data only, frame incoming data */
+ unsigned char reserved[12]; /* reserved for future use */
};
#ifndef __KERNEL__
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 2af7a1cd6658..b39cdbc522ec 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -1,21 +1,53 @@
-/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
* ring.h
*
* Shared producer-consumer ring macros.
*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
* Tim Deegan and Andrew Warfield November 2004.
*/
#ifndef __XEN_PUBLIC_IO_RING_H__
#define __XEN_PUBLIC_IO_RING_H__
+/*
+ * When #include'ing this header, you need to provide the following
+ * declaration upfront:
+ * - standard integers types (uint8_t, uint16_t, etc)
+ * They are provided by stdint.h of the standard headers.
+ *
+ * In addition, if you intend to use the FLEX macros, you also need to
+ * provide the following, before invoking the FLEX macros:
+ * - size_t
+ * - memcpy
+ * - grant_ref_t
+ * These declarations are provided by string.h of the standard headers,
+ * and grant_table.h from the Xen public headers.
+ */
+
#include <xen/interface/grant_table.h>
typedef unsigned int RING_IDX;
/* Round a 32-bit unsigned constant down to the nearest power of two. */
-#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
+#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
@@ -27,82 +59,79 @@ typedef unsigned int RING_IDX;
* A ring contains as many entries as will fit, rounded down to the nearest
* power of two (so we can mask with (size-1) to loop around).
*/
-#define __CONST_RING_SIZE(_s, _sz) \
- (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
- sizeof(((struct _s##_sring *)0)->ring[0])))
-
+#define __CONST_RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
+ sizeof(((struct _s##_sring *)0)->ring[0])))
/*
* The same for passing in an actual pointer instead of a name tag.
*/
-#define __RING_SIZE(_s, _sz) \
- (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
+#define __RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
/*
* Macros to make the correct C datatypes for a new kind of ring.
*
* To make a new ring datatype, you need to have two message structures,
- * let's say struct request, and struct response already defined.
+ * let's say request_t, and response_t already defined.
*
* In a header where you want the ring datatype declared, you then do:
*
- * DEFINE_RING_TYPES(mytag, struct request, struct response);
+ * DEFINE_RING_TYPES(mytag, request_t, response_t);
*
* These expand out to give you a set of types, as you can see below.
* The most important of these are:
*
- * struct mytag_sring - The shared ring.
- * struct mytag_front_ring - The 'front' half of the ring.
- * struct mytag_back_ring - The 'back' half of the ring.
+ * mytag_sring_t - The shared ring.
+ * mytag_front_ring_t - The 'front' half of the ring.
+ * mytag_back_ring_t - The 'back' half of the ring.
*
* To initialize a ring in your code you need to know the location and size
* of the shared memory area (PAGE_SIZE, for instance). To initialise
* the front half:
*
- * struct mytag_front_ring front_ring;
- * SHARED_RING_INIT((struct mytag_sring *)shared_page);
- * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
- * PAGE_SIZE);
+ * mytag_front_ring_t front_ring;
+ * SHARED_RING_INIT((mytag_sring_t *)shared_page);
+ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
*
* Initializing the back follows similarly (note that only the front
* initializes the shared ring):
*
- * struct mytag_back_ring back_ring;
- * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
- * PAGE_SIZE);
+ * mytag_back_ring_t back_ring;
+ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
*/
-#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
- \
-/* Shared ring entry */ \
-union __name##_sring_entry { \
- __req_t req; \
- __rsp_t rsp; \
-}; \
- \
-/* Shared ring page */ \
-struct __name##_sring { \
- RING_IDX req_prod, req_event; \
- RING_IDX rsp_prod, rsp_event; \
- uint8_t pad[48]; \
- union __name##_sring_entry ring[1]; /* variable-length */ \
-}; \
- \
-/* "Front" end's private variables */ \
-struct __name##_front_ring { \
- RING_IDX req_prod_pvt; \
- RING_IDX rsp_cons; \
- unsigned int nr_ents; \
- struct __name##_sring *sring; \
-}; \
- \
-/* "Back" end's private variables */ \
-struct __name##_back_ring { \
- RING_IDX rsp_prod_pvt; \
- RING_IDX req_cons; \
- unsigned int nr_ents; \
- struct __name##_sring *sring; \
-};
-
+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
+ \
+/* Shared ring entry */ \
+union __name##_sring_entry { \
+ __req_t req; \
+ __rsp_t rsp; \
+}; \
+ \
+/* Shared ring page */ \
+struct __name##_sring { \
+ RING_IDX req_prod, req_event; \
+ RING_IDX rsp_prod, rsp_event; \
+ uint8_t __pad[48]; \
+ union __name##_sring_entry ring[1]; /* variable-length */ \
+}; \
+ \
+/* "Front" end's private variables */ \
+struct __name##_front_ring { \
+ RING_IDX req_prod_pvt; \
+ RING_IDX rsp_cons; \
+ unsigned int nr_ents; \
+ struct __name##_sring *sring; \
+}; \
+ \
+/* "Back" end's private variables */ \
+struct __name##_back_ring { \
+ RING_IDX rsp_prod_pvt; \
+ RING_IDX req_cons; \
+ unsigned int nr_ents; \
+ struct __name##_sring *sring; \
+}; \
+ \
/*
* Macros for manipulating rings.
*
@@ -119,94 +148,99 @@ struct __name##_back_ring { \
*/
/* Initialising empty rings */
-#define SHARED_RING_INIT(_s) do { \
- (_s)->req_prod = (_s)->rsp_prod = 0; \
- (_s)->req_event = (_s)->rsp_event = 1; \
- memset((_s)->pad, 0, sizeof((_s)->pad)); \
+#define SHARED_RING_INIT(_s) do { \
+ (_s)->req_prod = (_s)->rsp_prod = 0; \
+ (_s)->req_event = (_s)->rsp_event = 1; \
+ (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
} while(0)
-#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
- (_r)->req_prod_pvt = (_i); \
- (_r)->rsp_cons = (_i); \
- (_r)->nr_ents = __RING_SIZE(_s, __size); \
- (_r)->sring = (_s); \
+#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
+ (_r)->req_prod_pvt = (_i); \
+ (_r)->rsp_cons = (_i); \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+ (_r)->sring = (_s); \
} while (0)
#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
-#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
- (_r)->rsp_prod_pvt = (_i); \
- (_r)->req_cons = (_i); \
- (_r)->nr_ents = __RING_SIZE(_s, __size); \
- (_r)->sring = (_s); \
+#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
+ (_r)->rsp_prod_pvt = (_i); \
+ (_r)->req_cons = (_i); \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+ (_r)->sring = (_s); \
} while (0)
#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
/* How big is this ring? */
-#define RING_SIZE(_r) \
+#define RING_SIZE(_r) \
((_r)->nr_ents)
/* Number of free requests (for use on front side only). */
-#define RING_FREE_REQUESTS(_r) \
+#define RING_FREE_REQUESTS(_r) \
(RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
/* Test if there is an empty slot available on the front ring.
* (This is only meaningful from the front. )
*/
-#define RING_FULL(_r) \
+#define RING_FULL(_r) \
(RING_FREE_REQUESTS(_r) == 0)
/* Test if there are outstanding messages to be processed on a ring. */
-#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
+#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
((_r)->sring->rsp_prod - (_r)->rsp_cons)
-#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
- ({ \
- unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
- unsigned int rsp = RING_SIZE(_r) - \
- ((_r)->req_cons - (_r)->rsp_prod_pvt); \
- req < rsp ? req : rsp; \
- })
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
+ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
+ unsigned int rsp = RING_SIZE(_r) - \
+ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
+ req < rsp ? req : rsp; \
+})
/* Direct access to individual ring elements, by index. */
-#define RING_GET_REQUEST(_r, _idx) \
+#define RING_GET_REQUEST(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+#define RING_GET_RESPONSE(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+
/*
- * Get a local copy of a request.
+ * Get a local copy of a request/response.
*
- * Use this in preference to RING_GET_REQUEST() so all processing is
+ * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
* done on a local copy that cannot be modified by the other end.
*
* Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
- * to be ineffective where _req is a struct which consists of only bitfields.
+ * to be ineffective where dest is a struct which consists of only bitfields.
*/
-#define RING_COPY_REQUEST(_r, _idx, _req) do { \
- /* Use volatile to force the copy into _req. */ \
- *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
+#define RING_COPY_(type, r, idx, dest) do { \
+ /* Use volatile to force the copy into dest. */ \
+ *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
} while (0)
-#define RING_GET_RESPONSE(_r, _idx) \
- (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
+#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
/* Loop termination condition: Would the specified index overflow the ring? */
-#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
(((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
/* Ill-behaved frontend determination: Can there be this many requests? */
-#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
+#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
(((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
+/* Ill-behaved backend determination: Can there be this many responses? */
+#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
+ (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
-#define RING_PUSH_REQUESTS(_r) do { \
- virt_wmb(); /* back sees requests /before/ updated producer index */ \
- (_r)->sring->req_prod = (_r)->req_prod_pvt; \
+#define RING_PUSH_REQUESTS(_r) do { \
+ virt_wmb(); /* back sees requests /before/ updated producer index */\
+ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
} while (0)
-#define RING_PUSH_RESPONSES(_r) do { \
- virt_wmb(); /* front sees responses /before/ updated producer index */ \
- (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
+#define RING_PUSH_RESPONSES(_r) do { \
+ virt_wmb(); /* front sees resps /before/ updated producer index */ \
+ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
} while (0)
/*
@@ -239,40 +273,40 @@ struct __name##_back_ring { \
* field appropriately.
*/
-#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
- RING_IDX __old = (_r)->sring->req_prod; \
- RING_IDX __new = (_r)->req_prod_pvt; \
- virt_wmb(); /* back sees requests /before/ updated producer index */ \
- (_r)->sring->req_prod = __new; \
- virt_mb(); /* back sees new requests /before/ we check req_event */ \
- (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
- (RING_IDX)(__new - __old)); \
+#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
+ RING_IDX __old = (_r)->sring->req_prod; \
+ RING_IDX __new = (_r)->req_prod_pvt; \
+ virt_wmb(); /* back sees requests /before/ updated producer index */\
+ (_r)->sring->req_prod = __new; \
+ virt_mb(); /* back sees new requests /before/ we check req_event */ \
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
+ (RING_IDX)(__new - __old)); \
} while (0)
-#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
- RING_IDX __old = (_r)->sring->rsp_prod; \
- RING_IDX __new = (_r)->rsp_prod_pvt; \
- virt_wmb(); /* front sees responses /before/ updated producer index */ \
- (_r)->sring->rsp_prod = __new; \
- virt_mb(); /* front sees new responses /before/ we check rsp_event */ \
- (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
- (RING_IDX)(__new - __old)); \
+#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
+ RING_IDX __old = (_r)->sring->rsp_prod; \
+ RING_IDX __new = (_r)->rsp_prod_pvt; \
+ virt_wmb(); /* front sees resps /before/ updated producer index */ \
+ (_r)->sring->rsp_prod = __new; \
+ virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
+ (RING_IDX)(__new - __old)); \
} while (0)
-#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
- if (_work_to_do) break; \
- (_r)->sring->req_event = (_r)->req_cons + 1; \
- virt_mb(); \
- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
+#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
+ if (_work_to_do) break; \
+ (_r)->sring->req_event = (_r)->req_cons + 1; \
+ virt_mb(); \
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
} while (0)
-#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
- if (_work_to_do) break; \
- (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
- virt_mb(); \
- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
+#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
+ if (_work_to_do) break; \
+ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
+ virt_mb(); \
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
} while (0)
diff --git a/init/Makefile b/init/Makefile
index 6bc37f64b361..2846113677ee 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -27,11 +27,11 @@ $(obj)/version.o: include/generated/compile.h
# mkcompile_h will make sure to only update the
# actual file if its content has changed.
- chk_compile.h = :
- quiet_chk_compile.h = echo ' CHK $@'
-silent_chk_compile.h = :
-include/generated/compile.h: FORCE
- @$($(quiet)chk_compile.h)
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
+quiet_cmd_compile.h = CHK $@
+ cmd_compile.h = \
+ $(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
"$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" \
"$(CONFIG_PREEMPT_RT)" $(CONFIG_CC_VERSION_TEXT) "$(LD)"
+
+include/generated/compile.h: FORCE
+ $(call cmd,compile.h)
diff --git a/init/main.c b/init/main.c
index 359358500e54..8d97aba78c3a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -42,8 +42,10 @@
#include <linux/profile.h>
#include <linux/kfence.h>
#include <linux/rcupdate.h>
+#include <linux/srcu.h>
#include <linux/moduleparam.h>
#include <linux/kallsyms.h>
+#include <linux/buildid.h>
#include <linux/writeback.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
@@ -386,16 +388,6 @@ static char * __init xbc_make_cmdline(const char *key)
return new_cmdline;
}
-static u32 boot_config_checksum(unsigned char *p, u32 size)
-{
- u32 ret = 0;
-
- while (size--)
- ret += *p++;
-
- return ret;
-}
-
static int __init bootconfig_params(char *param, char *val,
const char *unused, void *arg)
{
@@ -405,6 +397,12 @@ static int __init bootconfig_params(char *param, char *val,
return 0;
}
+static int __init warn_bootconfig(char *str)
+{
+ /* The 'bootconfig' has been handled by bootconfig_params(). */
+ return 0;
+}
+
static void __init setup_boot_config(void)
{
static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
@@ -439,7 +437,7 @@ static void __init setup_boot_config(void)
return;
}
- if (boot_config_checksum((unsigned char *)data, size) != csum) {
+ if (xbc_calc_checksum(data, size) != csum) {
pr_err("bootconfig checksum failed\n");
return;
}
@@ -483,9 +481,8 @@ static int __init warn_bootconfig(char *str)
pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n");
return 0;
}
-early_param("bootconfig", warn_bootconfig);
-
#endif
+early_param("bootconfig", warn_bootconfig);
/* Change NUL term back to "=", to make "param" the whole string. */
static void __init repair_env_string(char *param, char *val)
@@ -873,6 +870,47 @@ void __init __weak arch_call_rest_init(void)
rest_init();
}
+static void __init print_unknown_bootoptions(void)
+{
+ char *unknown_options;
+ char *end;
+ const char *const *p;
+ size_t len;
+
+ if (panic_later || (!argv_init[1] && !envp_init[2]))
+ return;
+
+ /*
+ * Determine how many options we have to print out, plus a space
+ * before each
+ */
+ len = 1; /* null terminator */
+ for (p = &argv_init[1]; *p; p++) {
+ len++;
+ len += strlen(*p);
+ }
+ for (p = &envp_init[2]; *p; p++) {
+ len++;
+ len += strlen(*p);
+ }
+
+ unknown_options = memblock_alloc(len, SMP_CACHE_BYTES);
+ if (!unknown_options) {
+ pr_err("%s: Failed to allocate %zu bytes\n",
+ __func__, len);
+ return;
+ }
+ end = unknown_options;
+
+ for (p = &argv_init[1]; *p; p++)
+ end += sprintf(end, " %s", *p);
+ for (p = &envp_init[2]; *p; p++)
+ end += sprintf(end, " %s", *p);
+
+ pr_notice("Unknown command line parameters:%s\n", unknown_options);
+ memblock_free(__pa(unknown_options), len);
+}
+
asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
{
char *command_line;
@@ -881,6 +919,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
set_task_stack_end_magic(&init_task);
smp_setup_processor_id();
debug_objects_early_init();
+ init_vmlinux_build_id();
cgroup_init_early();
@@ -914,6 +953,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
static_command_line, __start___param,
__stop___param - __start___param,
-1, -1, NULL, &unknown_bootoption);
+ print_unknown_bootoptions();
if (!IS_ERR_OR_NULL(after_dashes))
parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
NULL, set_init_arg);
@@ -976,6 +1016,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
tick_init();
rcu_init_nohz();
init_timers();
+ srcu_init();
hrtimers_init();
softirq_init();
timekeeping_init();
diff --git a/ipc/msg.c b/ipc/msg.c
index 6e6c8e0c9380..6810276d6bb9 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -130,7 +130,7 @@ static void msg_rcu_free(struct rcu_head *head)
struct msg_queue *msq = container_of(p, struct msg_queue, q_perm);
security_msg_queue_free(&msq->q_perm);
- kvfree(msq);
+ kfree(msq);
}
/**
@@ -147,7 +147,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
key_t key = params->key;
int msgflg = params->flg;
- msq = kvmalloc(sizeof(*msq), GFP_KERNEL);
+ msq = kmalloc(sizeof(*msq), GFP_KERNEL);
if (unlikely(!msq))
return -ENOMEM;
@@ -157,7 +157,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(&msq->q_perm);
if (retval) {
- kvfree(msq);
+ kfree(msq);
return retval;
}
diff --git a/ipc/sem.c b/ipc/sem.c
index bf534c74293e..971e75d28364 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -217,6 +217,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
* this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
* is inside a spin_lock() and after a write from 0 to non-zero a
* spin_lock()+spin_unlock() is done.
+ * To prevent the compiler/cpu temporarily writing 0 to use_global_lock,
+ * READ_ONCE()/WRITE_ONCE() is used.
*
* 2) queue.status: (SEM_BARRIER_2)
* Initialization is done while holding sem_lock(), so no further barrier is
@@ -342,10 +344,10 @@ static void complexmode_enter(struct sem_array *sma)
* Nothing to do, just reset the
* counter until we return to simple mode.
*/
- sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
+ WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
return;
}
- sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
+ WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
for (i = 0; i < sma->sem_nsems; i++) {
sem = &sma->sems[i];
@@ -371,7 +373,8 @@ static void complexmode_tryleave(struct sem_array *sma)
/* See SEM_BARRIER_1 for purpose/pairing */
smp_store_release(&sma->use_global_lock, 0);
} else {
- sma->use_global_lock--;
+ WRITE_ONCE(sma->use_global_lock,
+ sma->use_global_lock-1);
}
}
@@ -412,7 +415,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
* Initial check for use_global_lock. Just an optimization,
* no locking, no memory barrier.
*/
- if (!sma->use_global_lock) {
+ if (!READ_ONCE(sma->use_global_lock)) {
/*
* It appears that no complex operation is around.
* Acquire the per-semaphore lock.
@@ -1154,7 +1157,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
un->semid = -1;
list_del_rcu(&un->list_proc);
spin_unlock(&un->ulp->lock);
- kfree_rcu(un, rcu);
+ kvfree_rcu(un, rcu);
}
/* Wake up all pending processes and let them fail with EIDRM. */
@@ -1937,7 +1940,8 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
rcu_read_unlock();
/* step 2: allocate new undo structure */
- new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
+ new = kvzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems,
+ GFP_KERNEL);
if (!new) {
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return ERR_PTR(-ENOMEM);
@@ -1949,7 +1953,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
if (!ipc_valid_object(&sma->sem_perm)) {
sem_unlock(sma, -1);
rcu_read_unlock();
- kfree(new);
+ kvfree(new);
un = ERR_PTR(-EIDRM);
goto out;
}
@@ -1960,7 +1964,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
*/
un = lookup_undo(ulp, semid);
if (un) {
- kfree(new);
+ kvfree(new);
goto success;
}
/* step 5: initialize & link new undo structure */
@@ -2420,7 +2424,7 @@ void exit_sem(struct task_struct *tsk)
rcu_read_unlock();
wake_up_q(&wake_q);
- kfree_rcu(un, rcu);
+ kvfree_rcu(un, rcu);
}
kfree(ulp);
}
@@ -2435,7 +2439,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
/*
* The proc interface isn't aware of sem_lock(), it calls
- * ipc_lock_object() directly (in sysvipc_find_ipc).
+ * ipc_lock_object(), i.e. spin_lock(&sma->sem_perm.lock).
+ * (in sysvipc_find_ipc)
* In order to stay compatible with sem_lock(), we must
* enter / leave complex_mode.
*/
diff --git a/ipc/shm.c b/ipc/shm.c
index 003234fbbd17..748933e376ca 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -222,7 +222,7 @@ static void shm_rcu_free(struct rcu_head *head)
struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
shm_perm);
security_shm_free(&shp->shm_perm);
- kvfree(shp);
+ kfree(shp);
}
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
@@ -619,7 +619,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
ns->shm_tot + numpages > ns->shm_ctlall)
return -ENOSPC;
- shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
+ shp = kmalloc(sizeof(*shp), GFP_KERNEL);
if (unlikely(!shp))
return -ENOMEM;
@@ -630,7 +630,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
shp->shm_perm.security = NULL;
error = security_shm_alloc(&shp->shm_perm);
if (error) {
- kvfree(shp);
+ kfree(shp);
return error;
}
diff --git a/ipc/util.c b/ipc/util.c
index cfa0045e748d..0027e47626b7 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -64,6 +64,7 @@
#include <linux/memory.h>
#include <linux/ipc_namespace.h>
#include <linux/rhashtable.h>
+#include <linux/log2.h>
#include <asm/unistd.h>
@@ -451,6 +452,41 @@ static void ipc_kht_remove(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
}
/**
+ * ipc_search_maxidx - search for the highest assigned index
+ * @ids: ipc identifier set
+ * @limit: known upper limit for highest assigned index
+ *
+ * The function determines the highest assigned index in @ids. It is intended
+ * to be called when ids->max_idx needs to be updated.
+ * Updating ids->max_idx is necessary when the current highest index ipc
+ * object is deleted.
+ * If no ipc object is allocated, then -1 is returned.
+ *
+ * ipc_ids.rwsem needs to be held by the caller.
+ */
+static int ipc_search_maxidx(struct ipc_ids *ids, int limit)
+{
+ int tmpidx;
+ int i;
+ int retval;
+
+ i = ilog2(limit+1);
+
+ retval = 0;
+ for (; i >= 0; i--) {
+ tmpidx = retval | (1<<i);
+ /*
+ * "0" is a possible index value, thus search using
+ * e.g. 15,7,3,1,0 instead of 16,8,4,2,1.
+ */
+ tmpidx = tmpidx-1;
+ if (idr_get_next(&ids->ipcs_idr, &tmpidx))
+ retval |= (1<<i);
+ }
+ return retval - 1;
+}
+
+/**
* ipc_rmid - remove an ipc identifier
* @ids: ipc identifier set
* @ipcp: ipc perm structure containing the identifier to remove
@@ -468,11 +504,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
ipcp->deleted = true;
if (unlikely(idx == ids->max_idx)) {
- do {
- idx--;
- if (idx == -1)
- break;
- } while (!idr_find(&ids->ipcs_idr, idx));
+ idx = ids->max_idx-1;
+ if (idx >= 0)
+ idx = ipc_search_maxidx(ids, idx);
ids->max_idx = idx;
}
}
diff --git a/ipc/util.h b/ipc/util.h
index 5766c61aed0e..2dd7ce0416d8 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -145,6 +145,9 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg);
* ipc_get_maxidx - get the highest assigned index
* @ids: ipc identifier set
*
+ * The function returns the highest assigned index for @ids. The function
+ * doesn't scan the idr tree, it uses a cached value.
+ *
* Called with ipc_ids.rwsem held for reading.
*/
static inline int ipc_get_maxidx(struct ipc_ids *ids)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 034ad93a1ad7..0a28a8095d3e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -32,6 +32,8 @@
#include <linux/perf_event.h>
#include <linux/extable.h>
#include <linux/log2.h>
+
+#include <asm/barrier.h>
#include <asm/unaligned.h>
/* Registers */
@@ -1360,11 +1362,13 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
}
/**
- * __bpf_prog_run - run eBPF program on a given context
+ * ___bpf_prog_run - run eBPF program on a given context
* @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
* @insn: is the array of eBPF instructions
*
* Decode and execute eBPF instructions.
+ *
+ * Return: whatever value is in %BPF_R0 at program exit
*/
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
{
@@ -1377,6 +1381,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
/* Non-UAPI available opcodes. */
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
+ [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
@@ -1621,7 +1626,21 @@ out:
COND_JMP(s, JSGE, >=)
COND_JMP(s, JSLE, <=)
#undef COND_JMP
- /* STX and ST and LDX*/
+ /* ST, STX and LDX*/
+ ST_NOSPEC:
+ /* Speculation barrier for mitigating Speculative Store Bypass.
+ * In case of arm64, we rely on the firmware mitigation as
+ * controlled via the ssbd kernel parameter. Whenever the
+ * mitigation is enabled, it works for all of the kernel code
+ * with no need to provide any additional instructions here.
+ * In case of x86, we use 'lfence' insn for mitigation. We
+ * reuse preexisting logic from Spectre v1 mitigation that
+ * happens to produce the required code on x86 for v4 as well.
+ */
+#ifdef CONFIG_X86
+ barrier_nospec();
+#endif
+ CONT;
#define LDST(SIZEOP, SIZE) \
STX_MEM_##SIZEOP: \
*(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
@@ -1861,6 +1880,9 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
*
* Try to JIT eBPF program, if JIT is not available, use interpreter.
* The BPF program will be executed via BPF_PROG_RUN() macro.
+ *
+ * Return: the &fp argument along with &err set to 0 for success or
+ * a negative errno code on failure
*/
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
{
@@ -2236,8 +2258,14 @@ static void bpf_prog_free_deferred(struct work_struct *work)
#endif
if (aux->dst_trampoline)
bpf_trampoline_put(aux->dst_trampoline);
- for (i = 0; i < aux->func_cnt; i++)
+ for (i = 0; i < aux->func_cnt; i++) {
+ /* We can just unlink the subprog poke descriptor table as
+ * it was originally linked to the main program and is also
+ * released along with it.
+ */
+ aux->func[i]->aux->poke_tab = NULL;
bpf_jit_free(aux->func[i]);
+ }
if (aux->func_cnt) {
kfree(aux->func);
bpf_prog_unlock_free(aux->prog);
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 2546dafd6672..fdc20892837c 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -558,7 +558,8 @@ int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
for (i = 0; i < map->max_entries; i++) {
- dst = READ_ONCE(dtab->netdev_map[i]);
+ dst = rcu_dereference_check(dtab->netdev_map[i],
+ rcu_read_lock_bh_held());
if (!is_valid_dst(dst, xdp, exclude_ifindex))
continue;
@@ -654,7 +655,8 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
for (i = 0; i < map->max_entries; i++) {
- dst = READ_ONCE(dtab->netdev_map[i]);
+ dst = rcu_dereference_check(dtab->netdev_map[i],
+ rcu_read_lock_bh_held());
if (!dst || dst->dev->ifindex == exclude_ifindex)
continue;
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index bbfc6bb79240..ca3cd9aaa6ce 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -206,15 +206,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
verbose(cbs->private_data, "BUG_%02x\n", insn->code);
}
} else if (class == BPF_ST) {
- if (BPF_MODE(insn->code) != BPF_MEM) {
+ if (BPF_MODE(insn->code) == BPF_MEM) {
+ verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
+ insn->code,
+ bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+ insn->dst_reg,
+ insn->off, insn->imm);
+ } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
+ verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
+ } else {
verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
- return;
}
- verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
- insn->code,
- bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
- insn->dst_reg,
- insn->off, insn->imm);
} else if (class == BPF_LDX) {
if (BPF_MODE(insn->code) != BPF_MEM) {
verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 72c58cc516a3..9c011f3a2687 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1565,8 +1565,8 @@ alloc:
/* We cannot do copy_from_user or copy_to_user inside
* the rcu_read_lock. Allocate enough space here.
*/
- keys = kvmalloc(key_size * bucket_size, GFP_USER | __GFP_NOWARN);
- values = kvmalloc(value_size * bucket_size, GFP_USER | __GFP_NOWARN);
+ keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
+ values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
if (!keys || !values) {
ret = -ENOMEM;
goto after_loop;
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 62cf00383910..55f83ea09dae 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -353,9 +353,15 @@ const struct bpf_func_proto bpf_jiffies64_proto = {
#ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)
{
- struct cgroup *cgrp = task_dfl_cgroup(current);
+ struct cgroup *cgrp;
+ u64 cgrp_id;
- return cgroup_id(cgrp);
+ rcu_read_lock();
+ cgrp = task_dfl_cgroup(current);
+ cgrp_id = cgroup_id(cgrp);
+ rcu_read_unlock();
+
+ return cgrp_id;
}
const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
@@ -366,13 +372,17 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
{
- struct cgroup *cgrp = task_dfl_cgroup(current);
+ struct cgroup *cgrp;
struct cgroup *ancestor;
+ u64 cgrp_id;
+ rcu_read_lock();
+ cgrp = task_dfl_cgroup(current);
ancestor = cgroup_ancestor(cgrp, ancestor_level);
- if (!ancestor)
- return 0;
- return cgroup_id(ancestor);
+ cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
+ rcu_read_unlock();
+
+ return cgrp_id;
}
const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
@@ -397,8 +407,8 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
void *ptr;
int i;
- for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
- if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
+ for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
+ if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
continue;
storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
@@ -1070,12 +1080,12 @@ bpf_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_probe_read_user:
return &bpf_probe_read_user_proto;
case BPF_FUNC_probe_read_kernel:
- return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
NULL : &bpf_probe_read_kernel_proto;
case BPF_FUNC_probe_read_user_str:
return &bpf_probe_read_user_str_proto;
case BPF_FUNC_probe_read_kernel_str:
- return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
NULL : &bpf_probe_read_kernel_str_proto;
case BPF_FUNC_snprintf_btf:
return &bpf_snprintf_btf_proto;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index be38bb930bf1..381d3d6f24bc 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2610,6 +2610,19 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
cur = env->cur_state->frame[env->cur_state->curframe];
if (value_regno >= 0)
reg = &cur->regs[value_regno];
+ if (!env->bypass_spec_v4) {
+ bool sanitize = reg && is_spillable_regtype(reg->type);
+
+ for (i = 0; i < size; i++) {
+ if (state->stack[spi].slot_type[i] == STACK_INVALID) {
+ sanitize = true;
+ break;
+ }
+ }
+
+ if (sanitize)
+ env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
+ }
if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) {
@@ -2632,47 +2645,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
verbose(env, "invalid size of register spill\n");
return -EACCES;
}
-
if (state != cur && reg->type == PTR_TO_STACK) {
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
return -EINVAL;
}
-
- if (!env->bypass_spec_v4) {
- bool sanitize = false;
-
- if (state->stack[spi].slot_type[0] == STACK_SPILL &&
- register_is_const(&state->stack[spi].spilled_ptr))
- sanitize = true;
- for (i = 0; i < BPF_REG_SIZE; i++)
- if (state->stack[spi].slot_type[i] == STACK_MISC) {
- sanitize = true;
- break;
- }
- if (sanitize) {
- int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
- int soff = (-spi - 1) * BPF_REG_SIZE;
-
- /* detected reuse of integer stack slot with a pointer
- * which means either llvm is reusing stack slot or
- * an attacker is trying to exploit CVE-2018-3639
- * (speculative store bypass)
- * Have to sanitize that slot with preemptive
- * store of zero.
- */
- if (*poff && *poff != soff) {
- /* disallow programs where single insn stores
- * into two different stack slots, since verifier
- * cannot sanitize them
- */
- verbose(env,
- "insn %d cannot access two stack slots fp%d and fp%d",
- insn_idx, *poff, soff);
- return -EINVAL;
- }
- *poff = soff;
- }
- }
save_register_state(state, spi, reg);
} else {
u8 type = STACK_MISC;
@@ -3677,6 +3653,8 @@ continue_func:
if (tail_call_reachable)
for (j = 0; j < frame; j++)
subprog[ret_prog[j]].tail_call_reachable = true;
+ if (subprog[0].tail_call_reachable)
+ env->prog->aux->tail_call_reachable = true;
/* end of for() loop means the last insn of the 'subprog'
* was reached. Doesn't matter whether it was JA or EXIT
@@ -6559,6 +6537,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
alu_state |= ptr_is_dst_reg ?
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+
+ /* Limit pruning on unknown scalars to enable deep search for
+ * potential masking differences from other program paths.
+ */
+ if (!off_is_imm)
+ env->explore_alu_limits = true;
}
err = update_alu_sanitation_state(aux, alu_state, alu_limit);
@@ -9934,8 +9918,8 @@ next:
}
/* Returns true if (rold safe implies rcur safe) */
-static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
- struct bpf_id_pair *idmap)
+static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
+ struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
{
bool equal;
@@ -9961,6 +9945,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
return false;
switch (rold->type) {
case SCALAR_VALUE:
+ if (env->explore_alu_limits)
+ return false;
if (rcur->type == SCALAR_VALUE) {
if (!rold->precise && !rcur->precise)
return true;
@@ -10051,9 +10037,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
return false;
}
-static bool stacksafe(struct bpf_func_state *old,
- struct bpf_func_state *cur,
- struct bpf_id_pair *idmap)
+static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
+ struct bpf_func_state *cur, struct bpf_id_pair *idmap)
{
int i, spi;
@@ -10098,9 +10083,8 @@ static bool stacksafe(struct bpf_func_state *old,
continue;
if (old->stack[spi].slot_type[0] != STACK_SPILL)
continue;
- if (!regsafe(&old->stack[spi].spilled_ptr,
- &cur->stack[spi].spilled_ptr,
- idmap))
+ if (!regsafe(env, &old->stack[spi].spilled_ptr,
+ &cur->stack[spi].spilled_ptr, idmap))
/* when explored and current stack slot are both storing
* spilled registers, check that stored pointers types
* are the same as well.
@@ -10157,10 +10141,11 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
for (i = 0; i < MAX_BPF_REG; i++)
- if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch))
+ if (!regsafe(env, &old->regs[i], &cur->regs[i],
+ env->idmap_scratch))
return false;
- if (!stacksafe(old, cur, env->idmap_scratch))
+ if (!stacksafe(env, old, cur, env->idmap_scratch))
return false;
if (!refsafe(old, cur))
@@ -11678,6 +11663,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
if (aux_data[i].seen)
continue;
memcpy(insn + i, &trap, sizeof(trap));
+ aux_data[i].zext_dst = false;
}
}
@@ -11904,35 +11890,33 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++, insn++) {
bpf_convert_ctx_access_t convert_ctx_access;
+ bool ctx_access;
if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
- insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
+ insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
type = BPF_READ;
- else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
- insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
- insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
- insn->code == (BPF_STX | BPF_MEM | BPF_DW))
+ ctx_access = true;
+ } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
+ insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
+ insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
+ insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
+ insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
+ insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
+ insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
+ insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
type = BPF_WRITE;
- else
+ ctx_access = BPF_CLASS(insn->code) == BPF_STX;
+ } else {
continue;
+ }
if (type == BPF_WRITE &&
- env->insn_aux_data[i + delta].sanitize_stack_off) {
+ env->insn_aux_data[i + delta].sanitize_stack_spill) {
struct bpf_insn patch[] = {
- /* Sanitize suspicious stack slot with zero.
- * There are no memory dependencies for this store,
- * since it's only using frame pointer and immediate
- * constant of zero
- */
- BPF_ST_MEM(BPF_DW, BPF_REG_FP,
- env->insn_aux_data[i + delta].sanitize_stack_off,
- 0),
- /* the original STX instruction will immediately
- * overwrite the same stack slot with appropriate value
- */
*insn,
+ BPF_ST_NOSPEC(),
};
cnt = ARRAY_SIZE(patch);
@@ -11946,6 +11930,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
continue;
}
+ if (!ctx_access)
+ continue;
+
switch (env->insn_aux_data[i + delta].ptr_type) {
case PTR_TO_CTX:
if (!ops->convert_ctx_access)
@@ -12121,33 +12108,19 @@ static int jit_subprogs(struct bpf_verifier_env *env)
goto out_free;
func[i]->is_func = 1;
func[i]->aux->func_idx = i;
- /* the btf and func_info will be freed only at prog->aux */
+ /* Below members will be freed only at prog->aux */
func[i]->aux->btf = prog->aux->btf;
func[i]->aux->func_info = prog->aux->func_info;
+ func[i]->aux->poke_tab = prog->aux->poke_tab;
+ func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
for (j = 0; j < prog->aux->size_poke_tab; j++) {
- u32 insn_idx = prog->aux->poke_tab[j].insn_idx;
- int ret;
+ struct bpf_jit_poke_descriptor *poke;
- if (!(insn_idx >= subprog_start &&
- insn_idx <= subprog_end))
- continue;
-
- ret = bpf_jit_add_poke_descriptor(func[i],
- &prog->aux->poke_tab[j]);
- if (ret < 0) {
- verbose(env, "adding tail call poke descriptor failed\n");
- goto out_free;
- }
-
- func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1;
-
- map_ptr = func[i]->aux->poke_tab[ret].tail_call.map;
- ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux);
- if (ret < 0) {
- verbose(env, "tracking tail call prog failed\n");
- goto out_free;
- }
+ poke = &prog->aux->poke_tab[j];
+ if (poke->insn_idx < subprog_end &&
+ poke->insn_idx >= subprog_start)
+ poke->aux = func[i]->aux;
}
/* Use bpf_prog_F_tag to indicate functions in stack traces.
@@ -12178,18 +12151,6 @@ static int jit_subprogs(struct bpf_verifier_env *env)
cond_resched();
}
- /* Untrack main program's aux structs so that during map_poke_run()
- * we will not stumble upon the unfilled poke descriptors; each
- * of the main program's poke descs got distributed across subprogs
- * and got tracked onto map, so we are sure that none of them will
- * be missed after the operation below
- */
- for (i = 0; i < prog->aux->size_poke_tab; i++) {
- map_ptr = prog->aux->poke_tab[i].tail_call.map;
-
- map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
- }
-
/* at this point all bpf functions were successfully JITed
* now populate all bpf_calls with correct addresses and
* run last pass of JIT
@@ -12267,14 +12228,22 @@ static int jit_subprogs(struct bpf_verifier_env *env)
bpf_prog_jit_attempt_done(prog);
return 0;
out_free:
+ /* We failed JIT'ing, so at this point we need to unregister poke
+ * descriptors from subprogs, so that kernel is not attempting to
+ * patch it anymore as we're freeing the subprog JIT memory.
+ */
+ for (i = 0; i < prog->aux->size_poke_tab; i++) {
+ map_ptr = prog->aux->poke_tab[i].tail_call.map;
+ map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
+ }
+ /* At this point we're guaranteed that poke descriptors are not
+ * live anymore. We can just unlink its descriptor table as it's
+ * released with the main prog.
+ */
for (i = 0; i < env->subprog_cnt; i++) {
if (!func[i])
continue;
-
- for (j = 0; j < func[i]->aux->size_poke_tab; j++) {
- map_ptr = func[i]->aux->poke_tab[j].tail_call.map;
- map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux);
- }
+ func[i]->aux->poke_tab = NULL;
bpf_jit_free(func[i]);
}
kfree(func);
@@ -12768,37 +12737,6 @@ static void free_states(struct bpf_verifier_env *env)
}
}
-/* The verifier is using insn_aux_data[] to store temporary data during
- * verification and to store information for passes that run after the
- * verification like dead code sanitization. do_check_common() for subprogram N
- * may analyze many other subprograms. sanitize_insn_aux_data() clears all
- * temporary data after do_check_common() finds that subprogram N cannot be
- * verified independently. pass_cnt counts the number of times
- * do_check_common() was run and insn->aux->seen tells the pass number
- * insn_aux_data was touched. These variables are compared to clear temporary
- * data from failed pass. For testing and experiments do_check_common() can be
- * run multiple times even when prior attempt to verify is unsuccessful.
- *
- * Note that special handling is needed on !env->bypass_spec_v1 if this is
- * ever called outside of error path with subsequent program rejection.
- */
-static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
-{
- struct bpf_insn *insn = env->prog->insnsi;
- struct bpf_insn_aux_data *aux;
- int i, class;
-
- for (i = 0; i < env->prog->len; i++) {
- class = BPF_CLASS(insn[i].code);
- if (class != BPF_LDX && class != BPF_STX)
- continue;
- aux = &env->insn_aux_data[i];
- if (aux->seen != env->pass_cnt)
- continue;
- memset(aux, 0, offsetof(typeof(*aux), orig_idx));
- }
-}
-
static int do_check_common(struct bpf_verifier_env *env, int subprog)
{
bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
@@ -12875,9 +12813,6 @@ out:
if (!ret && pop_log)
bpf_vlog_reset(&env->log, 0);
free_states(env);
- if (ret)
- /* clean aux data in case subprog was rejected */
- sanitize_insn_aux_data(env);
return ret;
}
diff --git a/kernel/cfi.c b/kernel/cfi.c
index e17a56639766..9594cfd1cf2c 100644
--- a/kernel/cfi.c
+++ b/kernel/cfi.c
@@ -248,9 +248,9 @@ static inline cfi_check_fn find_shadow_check_fn(unsigned long ptr)
{
cfi_check_fn fn;
- rcu_read_lock_sched();
+ rcu_read_lock_sched_notrace();
fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr);
- rcu_read_unlock_sched();
+ rcu_read_unlock_sched_notrace();
return fn;
}
@@ -269,11 +269,11 @@ static inline cfi_check_fn find_module_check_fn(unsigned long ptr)
cfi_check_fn fn = NULL;
struct module *mod;
- rcu_read_lock_sched();
+ rcu_read_lock_sched_notrace();
mod = __module_address(ptr);
if (mod)
fn = mod->cfi_check;
- rcu_read_unlock_sched();
+ rcu_read_unlock_sched_notrace();
return fn;
}
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index ee93b6e89587..de2c432dee20 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -911,13 +911,11 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
if (opt == -ENOPARAM) {
- if (strcmp(param->key, "source") == 0) {
- if (fc->source)
- return invalf(fc, "Multiple sources not supported");
- fc->source = param->string;
- param->string = NULL;
- return 0;
- }
+ int ret;
+
+ ret = vfs_parse_fs_param_source(fc, param);
+ if (ret != -ENOPARAM)
+ return ret;
for_each_subsys(ss, i) {
if (strcmp(param->key, ss->legacy_name))
continue;
@@ -1223,9 +1221,7 @@ int cgroup1_get_tree(struct fs_context *fc)
ret = cgroup_do_get_tree(fc);
if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
- struct super_block *sb = fc->root->d_sb;
- dput(fc->root);
- deactivate_locked_super(sb);
+ fc_drop_locked(fc);
ret = 1;
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index ea5b0f01d2b3..3a0161c21b6b 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -5887,6 +5887,31 @@ void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
}
/*
+ * cgroup_get_from_id : get the cgroup associated with cgroup id
+ * @id: cgroup id
+ * On success return the cgrp, on failure return NULL
+ */
+struct cgroup *cgroup_get_from_id(u64 id)
+{
+ struct kernfs_node *kn;
+ struct cgroup *cgrp = NULL;
+
+ mutex_lock(&cgroup_mutex);
+ kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id);
+ if (!kn)
+ goto out_unlock;
+
+ cgrp = kn->priv;
+ if (cgroup_is_dead(cgrp) || !cgroup_tryget(cgrp))
+ cgrp = NULL;
+ kernfs_put(kn);
+out_unlock:
+ mutex_unlock(&cgroup_mutex);
+ return cgrp;
+}
+EXPORT_SYMBOL_GPL(cgroup_get_from_id);
+
+/*
* proc_cgroup_show()
* - Print task's cgroup paths into seq_file, one line for each hierarchy
* - Used for /proc/<pid>/cgroup.
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 7f0e58917432..b264ab5652ba 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -347,19 +347,20 @@ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
}
static struct cgroup_rstat_cpu *
-cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
+cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
{
struct cgroup_rstat_cpu *rstatc;
rstatc = get_cpu_ptr(cgrp->rstat_cpu);
- u64_stats_update_begin(&rstatc->bsync);
+ *flags = u64_stats_update_begin_irqsave(&rstatc->bsync);
return rstatc;
}
static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
- struct cgroup_rstat_cpu *rstatc)
+ struct cgroup_rstat_cpu *rstatc,
+ unsigned long flags)
{
- u64_stats_update_end(&rstatc->bsync);
+ u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
cgroup_rstat_updated(cgrp, smp_processor_id());
put_cpu_ptr(rstatc);
}
@@ -367,18 +368,20 @@ static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
{
struct cgroup_rstat_cpu *rstatc;
+ unsigned long flags;
- rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
+ rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
- cgroup_base_stat_cputime_account_end(cgrp, rstatc);
+ cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
}
void __cgroup_account_cputime_field(struct cgroup *cgrp,
enum cpu_usage_stat index, u64 delta_exec)
{
struct cgroup_rstat_cpu *rstatc;
+ unsigned long flags;
- rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
+ rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
switch (index) {
case CPUTIME_USER:
@@ -394,7 +397,7 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp,
break;
}
- cgroup_base_stat_cputime_account_end(cgrp, rstatc);
+ cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
}
/*
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index da449c1cdca7..eb53f5ec62c9 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -4,6 +4,7 @@
* Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
*/
+#include <linux/buildid.h>
#include <linux/crash_core.h>
#include <linux/utsname.h>
#include <linux/vmalloc.h>
@@ -378,53 +379,6 @@ phys_addr_t __weak paddr_vmcoreinfo_note(void)
}
EXPORT_SYMBOL(paddr_vmcoreinfo_note);
-#define NOTES_SIZE (&__stop_notes - &__start_notes)
-#define BUILD_ID_MAX SHA1_DIGEST_SIZE
-#define NT_GNU_BUILD_ID 3
-
-struct elf_note_section {
- struct elf_note n_hdr;
- u8 n_data[];
-};
-
-/*
- * Add build ID from .notes section as generated by the GNU ld(1)
- * or LLVM lld(1) --build-id option.
- */
-static void add_build_id_vmcoreinfo(void)
-{
- char build_id[BUILD_ID_MAX * 2 + 1];
- int n_remain = NOTES_SIZE;
-
- while (n_remain >= sizeof(struct elf_note)) {
- const struct elf_note_section *note_sec =
- &__start_notes + NOTES_SIZE - n_remain;
- const u32 n_namesz = note_sec->n_hdr.n_namesz;
-
- if (note_sec->n_hdr.n_type == NT_GNU_BUILD_ID &&
- n_namesz != 0 &&
- !strcmp((char *)&note_sec->n_data[0], "GNU")) {
- if (note_sec->n_hdr.n_descsz <= BUILD_ID_MAX) {
- const u32 n_descsz = note_sec->n_hdr.n_descsz;
- const u8 *s = &note_sec->n_data[n_namesz];
-
- s = PTR_ALIGN(s, 4);
- bin2hex(build_id, s, n_descsz);
- build_id[2 * n_descsz] = '\0';
- VMCOREINFO_BUILD_ID(build_id);
- return;
- }
- pr_warn("Build ID is too large to include in vmcoreinfo: %u > %u\n",
- note_sec->n_hdr.n_descsz,
- BUILD_ID_MAX);
- return;
- }
- n_remain -= sizeof(struct elf_note) +
- ALIGN(note_sec->n_hdr.n_namesz, 4) +
- ALIGN(note_sec->n_hdr.n_descsz, 4);
- }
-}
-
static int __init crash_save_vmcoreinfo_init(void)
{
vmcoreinfo_data = (unsigned char *)get_zeroed_page(GFP_KERNEL);
@@ -443,7 +397,7 @@ static int __init crash_save_vmcoreinfo_init(void)
}
VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
- add_build_id_vmcoreinfo();
+ VMCOREINFO_BUILD_ID();
VMCOREINFO_PAGESIZE(PAGE_SIZE);
VMCOREINFO_SYMBOL(init_uts_ns);
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 4708aec492df..b4aa6bb6b2bd 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -1032,12 +1032,13 @@ dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
/*
* Take the following action on reboot notify depending on value:
* 1 == Enter debugger
- * 0 == [the default] detatch debug client
+ * 0 == [the default] detach debug client
* -1 == Do nothing... and use this until the board resets
*/
switch (kgdbreboot) {
case 1:
kgdb_breakpoint();
+ goto done;
case -1:
goto done;
}
diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c
index 8372897402f4..b6f28fad4307 100644
--- a/kernel/debug/gdbstub.c
+++ b/kernel/debug/gdbstub.c
@@ -1045,8 +1045,8 @@ int gdb_serial_stub(struct kgdb_state *ks)
gdb_cmd_detachkill(ks);
return DBG_PASS_EVENT;
}
-#endif
fallthrough;
+#endif
case 'C': /* Exception passing */
tmp = gdb_cmd_exception_pass(ks);
if (tmp > 0)
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 622410c45da1..d8ee5647b732 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -253,7 +253,7 @@ static char *kdballocenv(size_t bytes)
* Parameters:
* match A character string representing a numeric value
* Outputs:
- * *value the unsigned long represntation of the env variable 'match'
+ * *value the unsigned long representation of the env variable 'match'
* Returns:
* Zero on success, a kdb diagnostic on failure.
*/
@@ -356,7 +356,7 @@ static void kdb_printenv(void)
* Parameters:
* arg A character string representing a numeric value
* Outputs:
- * *value the unsigned long represntation of arg.
+ * *value the unsigned long representation of arg.
* Returns:
* Zero on success, a kdb diagnostic on failure.
*/
@@ -470,7 +470,7 @@ static int kdb_check_regs(void)
* symbol name, and offset to the caller.
*
* The argument may consist of a numeric value (decimal or
- * hexidecimal), a symbol name, a register name (preceded by the
+ * hexadecimal), a symbol name, a register name (preceded by the
* percent sign), an environment variable with a numeric value
* (preceded by a dollar sign) or a simple arithmetic expression
* consisting of a symbol name, +/-, and a numeric constant value
@@ -894,7 +894,7 @@ static void parse_grep(const char *str)
* Limited to 20 tokens.
*
* Real rudimentary tokenization. Basically only whitespace
- * is considered a token delimeter (but special consideration
+ * is considered a token delimiter (but special consideration
* is taken of the '=' sign as used by the 'set' command).
*
* The algorithm used to tokenize the input string relies on
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index ccbed9089808..170c69aedebb 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -64,7 +64,7 @@
/*
* KDB_MAXBPT describes the total number of breakpoints
- * supported by this architecure.
+ * supported by this architecture.
*/
#define KDB_MAXBPT 16
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 5b5b6c7ec7f2..794e76b03b34 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -111,7 +111,7 @@ static int dma_assign_coherent_memory(struct device *dev,
* Declare a region of memory to be handed out by dma_alloc_coherent() when it
* is asked for coherent memory for this device. This shall only be used
* from platform code, usually based on the device tree description.
- *
+ *
* phys_addr is the CPU physical address to which the memory is currently
* assigned (this will be ioremapped so the CPU can access the region).
*
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 14de1271463f..dadae6255d05 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -566,11 +566,9 @@ static void add_dma_entry(struct dma_debug_entry *entry)
if (rc == -ENOMEM) {
pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true;
+ } else if (rc == -EEXIST) {
+ pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n");
}
-
- /* TODO: report -EEXIST errors here as overlapping mappings are
- * not supported by the DMA API
- */
}
static int dma_debug_create_entries(gfp_t gfp)
diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c
index 910ae69cae77..af4a6ef48ce0 100644
--- a/kernel/dma/ops_helpers.c
+++ b/kernel/dma/ops_helpers.c
@@ -5,6 +5,13 @@
*/
#include <linux/dma-map-ops.h>
+static struct page *dma_common_vaddr_to_page(void *cpu_addr)
+{
+ if (is_vmalloc_addr(cpu_addr))
+ return vmalloc_to_page(cpu_addr);
+ return virt_to_page(cpu_addr);
+}
+
/*
* Create scatter-list for the already allocated DMA buffer.
*/
@@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- struct page *page = virt_to_page(cpu_addr);
+ struct page *page = dma_common_vaddr_to_page(cpu_addr);
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
+ struct page *page = dma_common_vaddr_to_page(cpu_addr);
int ret = -ENXIO;
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
@@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
return -ENXIO;
return remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
+ page_to_pfn(page) + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot);
#else
return -ENXIO;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 464917096e73..1cb1f9b8392e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11917,6 +11917,37 @@ again:
return gctx;
}
+static bool
+perf_check_permission(struct perf_event_attr *attr, struct task_struct *task)
+{
+ unsigned int ptrace_mode = PTRACE_MODE_READ_REALCREDS;
+ bool is_capable = perfmon_capable();
+
+ if (attr->sigtrap) {
+ /*
+ * perf_event_attr::sigtrap sends signals to the other task.
+ * Require the current task to also have CAP_KILL.
+ */
+ rcu_read_lock();
+ is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL);
+ rcu_read_unlock();
+
+ /*
+ * If the required capabilities aren't available, checks for
+ * ptrace permissions: upgrade to ATTACH, since sending signals
+ * can effectively change the target task.
+ */
+ ptrace_mode = PTRACE_MODE_ATTACH_REALCREDS;
+ }
+
+ /*
+ * Preserve ptrace permission check for backwards compatibility. The
+ * ptrace check also includes checks that the current task and other
+ * task have matching uids, and is therefore not done here explicitly.
+ */
+ return is_capable || ptrace_may_access(task, ptrace_mode);
+}
+
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
@@ -12163,15 +12194,13 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_file;
/*
- * Preserve ptrace permission check for backwards compatibility.
- *
* We must hold exec_update_lock across this and any potential
* perf_install_in_context() call for this new event to
* serialize against exec() altering our credentials (and the
* perf_event_exit_task() that could imply).
*/
err = -EACCES;
- if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+ if (!perf_check_permission(&attr, task))
goto err_cred;
}
diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
index 34a1dc2abc7d..1966a749e0d9 100755
--- a/kernel/gen_kheaders.sh
+++ b/kernel/gen_kheaders.sh
@@ -56,9 +56,7 @@ if [ -f kernel/kheaders.md5 ] &&
exit
fi
-if [ "${quiet}" != "silent_" ]; then
- echo " GEN $tarfile"
-fi
+echo " GEN $tarfile"
rm -rf $cpio_dir
mkdir $cpio_dir
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index b0ce8b3f3822..9888e2bc8c76 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -15,6 +15,7 @@
#include <linux/kthread.h>
#include <linux/lockdep.h>
#include <linux/export.h>
+#include <linux/panic_notifier.h>
#include <linux/sysctl.h>
#include <linux/suspend.h>
#include <linux/utsname.h>
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 7f04c7d8296e..a98bcfc4be7b 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -265,8 +265,11 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
} else {
switch (__irq_startup_managed(desc, aff, force)) {
case IRQ_STARTUP_NORMAL:
+ if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
+ irq_setup_affinity(desc);
ret = __irq_startup(desc);
- irq_setup_affinity(desc);
+ if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
+ irq_setup_affinity(desc);
break;
case IRQ_STARTUP_MANAGED:
irq_do_set_affinity(d, aff, false);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index f4dd5186858a..fadb93766020 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -682,7 +682,6 @@ EXPORT_SYMBOL_GPL(generic_handle_domain_irq);
* usually for a root interrupt controller
* @domain: The domain where to perform the lookup
* @hwirq: The HW irq number to convert to a logical one
- * @lookup: Whether to perform the domain lookup or not
* @regs: Register file coming from the low-level handling code
*
* Returns: 0 on success, or -EINVAL if conversion has failed
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index c41965e348b5..85df3ca03efe 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -476,11 +476,6 @@ skip_activate:
return 0;
cleanup:
- for_each_msi_vector(desc, i, dev) {
- irq_data = irq_domain_get_irq_data(domain, i);
- if (irqd_is_activated(irq_data))
- irq_domain_deactivate_irq(irq_data);
- }
msi_domain_free_irqs(domain, dev);
return ret;
}
@@ -505,7 +500,15 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
{
+ struct irq_data *irq_data;
struct msi_desc *desc;
+ int i;
+
+ for_each_msi_vector(desc, i, dev) {
+ irq_data = irq_domain_get_irq_data(domain, i);
+ if (irqd_is_activated(irq_data))
+ irq_domain_deactivate_irq(irq_data);
+ }
for_each_msi_entry(desc, dev) {
/*
diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
index d309d6fbf5bd..4d2a702d7aa9 100644
--- a/kernel/irq/timings.c
+++ b/kernel/irq/timings.c
@@ -453,6 +453,11 @@ static __always_inline void __irq_timings_store(int irq, struct irqt_stat *irqs,
*/
index = irq_timings_interval_index(interval);
+ if (index > PREDICTION_BUFFER_SIZE - 1) {
+ irqs->count = 0;
+ return;
+ }
+
/*
* Store the index as an element of the pattern in another
* circular array.
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index bdb0681bece8..b156e152d6b4 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -316,14 +316,16 @@ static int addr_conflict(struct jump_entry *entry, void *start, void *end)
}
static int __jump_label_text_reserved(struct jump_entry *iter_start,
- struct jump_entry *iter_stop, void *start, void *end)
+ struct jump_entry *iter_stop, void *start, void *end, bool init)
{
struct jump_entry *iter;
iter = iter_start;
while (iter < iter_stop) {
- if (addr_conflict(iter, start, end))
- return 1;
+ if (init || !jump_entry_is_init(iter)) {
+ if (addr_conflict(iter, start, end))
+ return 1;
+ }
iter++;
}
@@ -562,7 +564,7 @@ static int __jump_label_mod_text_reserved(void *start, void *end)
ret = __jump_label_text_reserved(mod->jump_entries,
mod->jump_entries + mod->num_jump_entries,
- start, end);
+ start, end, mod->state == MODULE_STATE_COMING);
module_put(mod);
@@ -788,8 +790,9 @@ early_initcall(jump_label_init_module);
*/
int jump_label_text_reserved(void *start, void *end)
{
+ bool init = system_state < SYSTEM_RUNNING;
int ret = __jump_label_text_reserved(__start___jump_table,
- __stop___jump_table, start, end);
+ __stop___jump_table, start, end, init);
if (ret)
return ret;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index c851ca0ed357..0ba87982d017 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -25,7 +25,10 @@
#include <linux/filter.h>
#include <linux/ftrace.h>
#include <linux/kprobes.h>
+#include <linux/build_bug.h>
#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
/*
* These will be re-linked against their real values
@@ -297,21 +300,14 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
get_symbol_pos(addr, symbolsize, offset);
return 1;
}
- return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
+ return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) ||
!!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
}
-/*
- * Lookup an address
- * - modname is set to NULL if it's in the kernel.
- * - We guarantee that the returned name is valid until we reschedule even if.
- * It resides in a module.
- * - We also guarantee that modname will be valid until rescheduled.
- */
-const char *kallsyms_lookup(unsigned long addr,
- unsigned long *symbolsize,
- unsigned long *offset,
- char **modname, char *namebuf)
+static const char *kallsyms_lookup_buildid(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset, char **modname,
+ const unsigned char **modbuildid, char *namebuf)
{
const char *ret;
@@ -327,6 +323,8 @@ const char *kallsyms_lookup(unsigned long addr,
namebuf, KSYM_NAME_LEN);
if (modname)
*modname = NULL;
+ if (modbuildid)
+ *modbuildid = NULL;
ret = namebuf;
goto found;
@@ -334,7 +332,7 @@ const char *kallsyms_lookup(unsigned long addr,
/* See if it's in a module or a BPF JITed image. */
ret = module_address_lookup(addr, symbolsize, offset,
- modname, namebuf);
+ modname, modbuildid, namebuf);
if (!ret)
ret = bpf_address_lookup(addr, symbolsize,
offset, modname, namebuf);
@@ -348,6 +346,22 @@ found:
return ret;
}
+/*
+ * Lookup an address
+ * - modname is set to NULL if it's in the kernel.
+ * - We guarantee that the returned name is valid until we reschedule even if.
+ * It resides in a module.
+ * - We also guarantee that modname will be valid until rescheduled.
+ */
+const char *kallsyms_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname, char *namebuf)
+{
+ return kallsyms_lookup_buildid(addr, symbolsize, offset, modname,
+ NULL, namebuf);
+}
+
int lookup_symbol_name(unsigned long addr, char *symname)
{
int res;
@@ -404,15 +418,17 @@ found:
/* Look up a kernel symbol and return it in a text buffer. */
static int __sprint_symbol(char *buffer, unsigned long address,
- int symbol_offset, int add_offset)
+ int symbol_offset, int add_offset, int add_buildid)
{
char *modname;
+ const unsigned char *buildid;
const char *name;
unsigned long offset, size;
int len;
address += symbol_offset;
- name = kallsyms_lookup(address, &size, &offset, &modname, buffer);
+ name = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid,
+ buffer);
if (!name)
return sprintf(buffer, "0x%lx", address - symbol_offset);
@@ -424,8 +440,19 @@ static int __sprint_symbol(char *buffer, unsigned long address,
if (add_offset)
len += sprintf(buffer + len, "+%#lx/%#lx", offset, size);
- if (modname)
- len += sprintf(buffer + len, " [%s]", modname);
+ if (modname) {
+ len += sprintf(buffer + len, " [%s", modname);
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
+ if (add_buildid && buildid) {
+ /* build ID should match length of sprintf */
+#if IS_ENABLED(CONFIG_MODULES)
+ static_assert(sizeof(typeof_member(struct module, build_id)) == 20);
+#endif
+ len += sprintf(buffer + len, " %20phN", buildid);
+ }
+#endif
+ len += sprintf(buffer + len, "]");
+ }
return len;
}
@@ -443,11 +470,28 @@ static int __sprint_symbol(char *buffer, unsigned long address,
*/
int sprint_symbol(char *buffer, unsigned long address)
{
- return __sprint_symbol(buffer, address, 0, 1);
+ return __sprint_symbol(buffer, address, 0, 1, 0);
}
EXPORT_SYMBOL_GPL(sprint_symbol);
/**
+ * sprint_symbol_build_id - Look up a kernel symbol and return it in a text buffer
+ * @buffer: buffer to be stored
+ * @address: address to lookup
+ *
+ * This function looks up a kernel symbol with @address and stores its name,
+ * offset, size, module name and module build ID to @buffer if possible. If no
+ * symbol was found, just saves its @address as is.
+ *
+ * This function returns the number of bytes stored in @buffer.
+ */
+int sprint_symbol_build_id(char *buffer, unsigned long address)
+{
+ return __sprint_symbol(buffer, address, 0, 1, 1);
+}
+EXPORT_SYMBOL_GPL(sprint_symbol_build_id);
+
+/**
* sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer
* @buffer: buffer to be stored
* @address: address to lookup
@@ -460,7 +504,7 @@ EXPORT_SYMBOL_GPL(sprint_symbol);
*/
int sprint_symbol_no_offset(char *buffer, unsigned long address)
{
- return __sprint_symbol(buffer, address, 0, 0);
+ return __sprint_symbol(buffer, address, 0, 0, 0);
}
EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
@@ -480,7 +524,27 @@ EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
*/
int sprint_backtrace(char *buffer, unsigned long address)
{
- return __sprint_symbol(buffer, address, -1, 1);
+ return __sprint_symbol(buffer, address, -1, 1, 0);
+}
+
+/**
+ * sprint_backtrace_build_id - Look up a backtrace symbol and return it in a text buffer
+ * @buffer: buffer to be stored
+ * @address: address to lookup
+ *
+ * This function is for stack backtrace and does the same thing as
+ * sprint_symbol() but with modified/decreased @address. If there is a
+ * tail-call to the function marked "noreturn", gcc optimized out code after
+ * the call so that the stack-saved return address could point outside of the
+ * caller. This function ensures that kallsyms will find the original caller
+ * by decreasing @address. This function also appends the module build ID to
+ * the @buffer if @address is within a kernel module.
+ *
+ * This function returns the number of bytes stored in @buffer.
+ */
+int sprint_backtrace_build_id(char *buffer, unsigned long address)
+{
+ return __sprint_symbol(buffer, address, -1, 1, 1);
}
/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 45c821d4e8bd..26709ea65c71 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -380,9 +380,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
if (consumed) {
kcsan_save_irqtrace(current);
- kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE,
- KCSAN_REPORT_CONSUMED_WATCHPOINT,
- watchpoint - watchpoints);
+ kcsan_report_set_info(ptr, size, type, watchpoint - watchpoints);
kcsan_restore_irqtrace(current);
} else {
/*
@@ -407,12 +405,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
atomic_long_t *watchpoint;
- union {
- u8 _1;
- u16 _2;
- u32 _4;
- u64 _8;
- } expect_value;
+ u64 old, new, diff;
unsigned long access_mask;
enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
unsigned long ua_flags = user_access_save();
@@ -468,19 +461,19 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
* Read the current value, to later check and infer a race if the data
* was modified via a non-instrumented access, e.g. from a device.
*/
- expect_value._8 = 0;
+ old = 0;
switch (size) {
case 1:
- expect_value._1 = READ_ONCE(*(const u8 *)ptr);
+ old = READ_ONCE(*(const u8 *)ptr);
break;
case 2:
- expect_value._2 = READ_ONCE(*(const u16 *)ptr);
+ old = READ_ONCE(*(const u16 *)ptr);
break;
case 4:
- expect_value._4 = READ_ONCE(*(const u32 *)ptr);
+ old = READ_ONCE(*(const u32 *)ptr);
break;
case 8:
- expect_value._8 = READ_ONCE(*(const u64 *)ptr);
+ old = READ_ONCE(*(const u64 *)ptr);
break;
default:
break; /* ignore; we do not diff the values */
@@ -506,33 +499,30 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
* racy access.
*/
access_mask = get_ctx()->access_mask;
+ new = 0;
switch (size) {
case 1:
- expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
- if (access_mask)
- expect_value._1 &= (u8)access_mask;
+ new = READ_ONCE(*(const u8 *)ptr);
break;
case 2:
- expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
- if (access_mask)
- expect_value._2 &= (u16)access_mask;
+ new = READ_ONCE(*(const u16 *)ptr);
break;
case 4:
- expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
- if (access_mask)
- expect_value._4 &= (u32)access_mask;
+ new = READ_ONCE(*(const u32 *)ptr);
break;
case 8:
- expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
- if (access_mask)
- expect_value._8 &= (u64)access_mask;
+ new = READ_ONCE(*(const u64 *)ptr);
break;
default:
break; /* ignore; we do not diff the values */
}
+ diff = old ^ new;
+ if (access_mask)
+ diff &= access_mask;
+
/* Were we able to observe a value-change? */
- if (expect_value._8 != 0)
+ if (diff != 0)
value_change = KCSAN_VALUE_CHANGE_TRUE;
/* Check if this access raced with another. */
@@ -566,8 +556,9 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
- kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL,
- watchpoint - watchpoints);
+ kcsan_report_known_origin(ptr, size, type, value_change,
+ watchpoint - watchpoints,
+ old, new, access_mask);
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
/* Inferring a race, since the value should not have changed. */
@@ -576,9 +567,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
- kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
- KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
- watchpoint - watchpoints);
+ kcsan_report_unknown_origin(ptr, size, type, old, new, access_mask);
}
/*
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 9881099d4179..f36e25c497ed 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -116,30 +116,27 @@ enum kcsan_value_change {
KCSAN_VALUE_CHANGE_TRUE,
};
-enum kcsan_report_type {
- /*
- * The thread that set up the watchpoint and briefly stalled was
- * signalled that another thread triggered the watchpoint.
- */
- KCSAN_REPORT_RACE_SIGNAL,
-
- /*
- * A thread found and consumed a matching watchpoint.
- */
- KCSAN_REPORT_CONSUMED_WATCHPOINT,
+/*
+ * The calling thread hit and consumed a watchpoint: set the access information
+ * to be consumed by the reporting thread. No report is printed yet.
+ */
+void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
+ int watchpoint_idx);
- /*
- * No other thread was observed to race with the access, but the data
- * value before and after the stall differs.
- */
- KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
-};
+/*
+ * The calling thread observed that the watchpoint it set up was hit and
+ * consumed: print the full report based on information set by the racing
+ * thread.
+ */
+void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
+ enum kcsan_value_change value_change, int watchpoint_idx,
+ u64 old, u64 new, u64 mask);
/*
- * Print a race report from thread that encountered the race.
+ * No other thread was observed to race with the access, but the data value
+ * before and after the stall differs. Reports a race of "unknown origin".
*/
-extern void kcsan_report(const volatile void *ptr, size_t size, int access_type,
- enum kcsan_value_change value_change,
- enum kcsan_report_type type, int watchpoint_idx);
+void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
+ u64 old, u64 new, u64 mask);
#endif /* _KERNEL_KCSAN_KCSAN_H */
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 56016e8e7461..21137929d428 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -325,13 +325,10 @@ static void print_verbose_info(struct task_struct *task)
print_irqtrace_events(task);
}
-/*
- * Returns true if a report was generated, false otherwise.
- */
-static bool print_report(enum kcsan_value_change value_change,
- enum kcsan_report_type type,
+static void print_report(enum kcsan_value_change value_change,
const struct access_info *ai,
- const struct other_info *other_info)
+ const struct other_info *other_info,
+ u64 old, u64 new, u64 mask)
{
unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
@@ -344,25 +341,24 @@ static bool print_report(enum kcsan_value_change value_change,
* Must check report filter rules before starting to print.
*/
if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr]))
- return false;
+ return;
- if (type == KCSAN_REPORT_RACE_SIGNAL) {
+ if (other_info) {
other_skipnr = get_stack_skipnr(other_info->stack_entries,
other_info->num_stack_entries);
other_frame = other_info->stack_entries[other_skipnr];
/* @value_change is only known for the other thread */
if (skip_report(value_change, other_frame))
- return false;
+ return;
}
if (rate_limit_report(this_frame, other_frame))
- return false;
+ return;
/* Print report header. */
pr_err("==================================================================\n");
- switch (type) {
- case KCSAN_REPORT_RACE_SIGNAL: {
+ if (other_info) {
int cmp;
/*
@@ -374,22 +370,15 @@ static bool print_report(enum kcsan_value_change value_change,
get_bug_type(ai->access_type | other_info->ai.access_type),
(void *)(cmp < 0 ? other_frame : this_frame),
(void *)(cmp < 0 ? this_frame : other_frame));
- } break;
-
- case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN:
+ } else {
pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(ai->access_type),
(void *)this_frame);
- break;
-
- default:
- BUG();
}
pr_err("\n");
/* Print information about the racing accesses. */
- switch (type) {
- case KCSAN_REPORT_RACE_SIGNAL:
+ if (other_info) {
pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
get_access_type(other_info->ai.access_type), other_info->ai.ptr,
other_info->ai.size, get_thread_desc(other_info->ai.task_pid),
@@ -407,16 +396,10 @@ static bool print_report(enum kcsan_value_change value_change,
pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
get_access_type(ai->access_type), ai->ptr, ai->size,
get_thread_desc(ai->task_pid), ai->cpu_id);
- break;
-
- case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN:
+ } else {
pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n",
get_access_type(ai->access_type), ai->ptr, ai->size,
get_thread_desc(ai->task_pid), ai->cpu_id);
- break;
-
- default:
- BUG();
}
/* Print stack trace of this thread. */
stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr,
@@ -425,24 +408,41 @@ static bool print_report(enum kcsan_value_change value_change,
if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
print_verbose_info(current);
+ /* Print observed value change. */
+ if (ai->size <= 8) {
+ int hex_len = ai->size * 2;
+ u64 diff = old ^ new;
+
+ if (mask)
+ diff &= mask;
+ if (diff) {
+ pr_err("\n");
+ pr_err("value changed: 0x%0*llx -> 0x%0*llx\n",
+ hex_len, old, hex_len, new);
+ if (mask) {
+ pr_err(" bits changed: 0x%0*llx with mask 0x%0*llx\n",
+ hex_len, diff, hex_len, mask);
+ }
+ }
+ }
+
/* Print report footer. */
pr_err("\n");
pr_err("Reported by Kernel Concurrency Sanitizer on:\n");
dump_stack_print_info(KERN_DEFAULT);
pr_err("==================================================================\n");
- return true;
+ if (panic_on_warn)
+ panic("panic_on_warn set ...\n");
}
static void release_report(unsigned long *flags, struct other_info *other_info)
{
- if (other_info)
- /*
- * Use size to denote valid/invalid, since KCSAN entirely
- * ignores 0-sized accesses.
- */
- other_info->ai.size = 0;
-
+ /*
+ * Use size to denote valid/invalid, since KCSAN entirely ignores
+ * 0-sized accesses.
+ */
+ other_info->ai.size = 0;
raw_spin_unlock_irqrestore(&report_lock, *flags);
}
@@ -575,48 +575,42 @@ discard:
return false;
}
-/*
- * Depending on the report type either sets @other_info and returns false, or
- * awaits @other_info and returns true. If @other_info is not required for the
- * report type, simply acquires @report_lock and returns true.
- */
-static noinline bool prepare_report(unsigned long *flags,
- enum kcsan_report_type type,
- const struct access_info *ai,
- struct other_info *other_info)
+static struct access_info prepare_access_info(const volatile void *ptr, size_t size,
+ int access_type)
{
- switch (type) {
- case KCSAN_REPORT_CONSUMED_WATCHPOINT:
- prepare_report_producer(flags, ai, other_info);
- return false;
- case KCSAN_REPORT_RACE_SIGNAL:
- return prepare_report_consumer(flags, ai, other_info);
- default:
- /* @other_info not required; just acquire @report_lock. */
- raw_spin_lock_irqsave(&report_lock, *flags);
- return true;
- }
-}
-
-void kcsan_report(const volatile void *ptr, size_t size, int access_type,
- enum kcsan_value_change value_change,
- enum kcsan_report_type type, int watchpoint_idx)
-{
- unsigned long flags = 0;
- const struct access_info ai = {
+ return (struct access_info) {
.ptr = ptr,
.size = size,
.access_type = access_type,
.task_pid = in_task() ? task_pid_nr(current) : -1,
.cpu_id = raw_smp_processor_id()
};
- struct other_info *other_info = type == KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
- ? NULL : &other_infos[watchpoint_idx];
+}
+
+void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
+ int watchpoint_idx)
+{
+ const struct access_info ai = prepare_access_info(ptr, size, access_type);
+ unsigned long flags;
kcsan_disable_current();
- if (WARN_ON(watchpoint_idx < 0 || watchpoint_idx >= ARRAY_SIZE(other_infos)))
- goto out;
+ lockdep_off(); /* See kcsan_report_known_origin(). */
+
+ prepare_report_producer(&flags, &ai, &other_infos[watchpoint_idx]);
+
+ lockdep_on();
+ kcsan_enable_current();
+}
+
+void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
+ enum kcsan_value_change value_change, int watchpoint_idx,
+ u64 old, u64 new, u64 mask)
+{
+ const struct access_info ai = prepare_access_info(ptr, size, access_type);
+ struct other_info *other_info = &other_infos[watchpoint_idx];
+ unsigned long flags = 0;
+ kcsan_disable_current();
/*
* Because we may generate reports when we're in scheduler code, the use
* of printk() could deadlock. Until such time that all printing code
@@ -626,22 +620,35 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type,
*/
lockdep_off();
- if (prepare_report(&flags, type, &ai, other_info)) {
- /*
- * Never report if value_change is FALSE, only if we it is
- * either TRUE or MAYBE. In case of MAYBE, further filtering may
- * be done once we know the full stack trace in print_report().
- */
- bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE &&
- print_report(value_change, type, &ai, other_info);
+ if (!prepare_report_consumer(&flags, &ai, other_info))
+ goto out;
+ /*
+ * Never report if value_change is FALSE, only when it is
+ * either TRUE or MAYBE. In case of MAYBE, further filtering may
+ * be done once we know the full stack trace in print_report().
+ */
+ if (value_change != KCSAN_VALUE_CHANGE_FALSE)
+ print_report(value_change, &ai, other_info, old, new, mask);
- if (reported && panic_on_warn)
- panic("panic_on_warn set ...\n");
+ release_report(&flags, other_info);
+out:
+ lockdep_on();
+ kcsan_enable_current();
+}
- release_report(&flags, other_info);
- }
+void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
+ u64 old, u64 new, u64 mask)
+{
+ const struct access_info ai = prepare_access_info(ptr, size, access_type);
+ unsigned long flags;
+
+ kcsan_disable_current();
+ lockdep_off(); /* See kcsan_report_known_origin(). */
+
+ raw_spin_lock_irqsave(&report_lock, flags);
+ print_report(KCSAN_VALUE_CHANGE_TRUE, &ai, NULL, old, new, mask);
+ raw_spin_unlock_irqrestore(&report_lock, flags);
lockdep_on();
-out:
kcsan_enable_current();
}
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index f099baee3578..4b34a9aa32bc 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -26,6 +26,7 @@
#include <linux/suspend.h>
#include <linux/device.h>
#include <linux/freezer.h>
+#include <linux/panic_notifier.h>
#include <linux/pm.h>
#include <linux/cpu.h>
#include <linux/uaccess.h>
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index e41385afe79d..790a573bbe00 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -35,6 +35,7 @@
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/jump_label.h>
+#include <linux/static_call.h>
#include <linux/perf_event.h>
#include <asm/sections.h>
@@ -106,7 +107,7 @@ void __weak *alloc_insn_page(void)
return module_alloc(PAGE_SIZE);
}
-void __weak free_insn_page(void *page)
+static void free_insn_page(void *page)
{
module_memfree(page);
}
@@ -321,11 +322,21 @@ int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
}
#ifdef CONFIG_OPTPROBES
+void __weak *alloc_optinsn_page(void)
+{
+ return alloc_insn_page();
+}
+
+void __weak free_optinsn_page(void *page)
+{
+ free_insn_page(page);
+}
+
/* For optimized_kprobe buffer */
struct kprobe_insn_cache kprobe_optinsn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
- .alloc = alloc_insn_page,
- .free = free_insn_page,
+ .alloc = alloc_optinsn_page,
+ .free = free_optinsn_page,
.sym = KPROBE_OPTINSN_PAGE_SYM,
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
/* .insn_size is initialized later */
@@ -1551,6 +1562,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
if (!kernel_text_address((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) ||
+ static_call_text_reserved(p->addr, p->addr) ||
find_bug((unsigned long)p->addr)) {
ret = -EINVAL;
goto out;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e97d08001437..bf1c00c881e4 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -6506,6 +6506,7 @@ asmlinkage __visible void lockdep_sys_exit(void)
void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
{
struct task_struct *curr = current;
+ int dl = READ_ONCE(debug_locks);
/* Note: the following can be executed concurrently, so be careful. */
pr_warn("\n");
@@ -6515,11 +6516,12 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
pr_warn("-----------------------------\n");
pr_warn("%s:%d %s!\n", file, line, s);
pr_warn("\nother info that might help us debug this:\n\n");
- pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
+ pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n%s",
!rcu_lockdep_current_cpu_online()
? "RCU used illegally from offline CPU!\n"
: "",
- rcu_scheduler_active, debug_locks);
+ rcu_scheduler_active, dl,
+ dl ? "" : "Possible false positive due to lockdep disabling via debug_locks = 0\n");
/*
* If a CPU is in the RCU-free window in idle (ie: in the section
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 806978314496..b8d9a050c337 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -70,26 +70,28 @@ static int l_show(struct seq_file *m, void *v)
#ifdef CONFIG_DEBUG_LOCKDEP
seq_printf(m, " OPS:%8ld", debug_class_ops_read(class));
#endif
-#ifdef CONFIG_PROVE_LOCKING
- seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
- seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
-#endif
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
+ seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
- get_usage_chars(class, usage);
- seq_printf(m, " %s", usage);
+ get_usage_chars(class, usage);
+ seq_printf(m, " %s", usage);
+ }
seq_printf(m, ": ");
print_name(m, class);
seq_puts(m, "\n");
- list_for_each_entry(entry, &class->locks_after, entry) {
- if (entry->distance == 1) {
- seq_printf(m, " -> [%p] ", entry->class->key);
- print_name(m, entry->class);
- seq_puts(m, "\n");
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ list_for_each_entry(entry, &class->locks_after, entry) {
+ if (entry->distance == 1) {
+ seq_printf(m, " -> [%p] ", entry->class->key);
+ print_name(m, entry->class);
+ seq_puts(m, "\n");
+ }
}
+ seq_puts(m, "\n");
}
- seq_puts(m, "\n");
return 0;
}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index b5d9bb5202c6..ad0db322ed3b 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -343,7 +343,7 @@ static __always_inline bool
rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
enum rtmutex_chainwalk chwalk)
{
- if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEX))
+ if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
return waiter != NULL;
return chwalk == RT_MUTEX_FULL_CHAINWALK;
}
diff --git a/kernel/module.c b/kernel/module.c
index 927d46cb8eb9..ed13917ea5f3 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -13,6 +13,7 @@
#include <linux/trace_events.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
+#include <linux/buildid.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
@@ -1018,8 +1019,7 @@ void __symbol_put(const char *symbol)
};
preempt_disable();
- if (!find_symbol(&fsa))
- BUG();
+ BUG_ON(!find_symbol(&fsa));
module_put(fsa.owner);
preempt_enable();
}
@@ -1466,6 +1466,13 @@ resolve_symbol_wait(struct module *mod,
return ksym;
}
+#ifdef CONFIG_KALLSYMS
+static inline bool sect_empty(const Elf_Shdr *sect)
+{
+ return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
+}
+#endif
+
/*
* /sys/module/foo/sections stuff
* J. Corbet <corbet@lwn.net>
@@ -1473,11 +1480,6 @@ resolve_symbol_wait(struct module *mod,
#ifdef CONFIG_SYSFS
#ifdef CONFIG_KALLSYMS
-static inline bool sect_empty(const Elf_Shdr *sect)
-{
- return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
-}
-
struct module_sect_attr {
struct bin_attribute battr;
unsigned long address;
@@ -2798,6 +2800,26 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
}
#endif /* CONFIG_KALLSYMS */
+#if IS_ENABLED(CONFIG_KALLSYMS) && IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
+static void init_build_id(struct module *mod, const struct load_info *info)
+{
+ const Elf_Shdr *sechdr;
+ unsigned int i;
+
+ for (i = 0; i < info->hdr->e_shnum; i++) {
+ sechdr = &info->sechdrs[i];
+ if (!sect_empty(sechdr) && sechdr->sh_type == SHT_NOTE &&
+ !build_id_parse_buf((void *)sechdr->sh_addr, mod->build_id,
+ sechdr->sh_size))
+ break;
+ }
+}
+#else
+static void init_build_id(struct module *mod, const struct load_info *info)
+{
+}
+#endif
+
static void dynamic_debug_setup(struct module *mod, struct _ddebug *debug, unsigned int num)
{
if (!debug)
@@ -4022,6 +4044,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
goto free_arch_cleanup;
}
+ init_build_id(mod, info);
dynamic_debug_setup(mod, info->debug, info->num_debug);
/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
@@ -4255,6 +4278,7 @@ const char *module_address_lookup(unsigned long addr,
unsigned long *size,
unsigned long *offset,
char **modname,
+ const unsigned char **modbuildid,
char *namebuf)
{
const char *ret = NULL;
@@ -4265,6 +4289,13 @@ const char *module_address_lookup(unsigned long addr,
if (mod) {
if (modname)
*modname = mod->name;
+ if (modbuildid) {
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
+ *modbuildid = mod->build_id;
+#else
+ *modbuildid = NULL;
+#endif
+ }
ret = find_kallsyms_symbol(mod, addr, size, offset);
}
@@ -4425,9 +4456,10 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
ret = fn(data, kallsyms_symbol_name(kallsyms, i),
mod, kallsyms_symbol_value(sym));
if (ret != 0)
- break;
+ goto out;
}
}
+out:
mutex_unlock(&module_mutex);
return ret;
}
diff --git a/kernel/panic.c b/kernel/panic.c
index 332736a72a58..edad89660a2b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -23,6 +23,7 @@
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/kexec.h>
+#include <linux/panic_notifier.h>
#include <linux/sched.h>
#include <linux/sysrq.h>
#include <linux/init.h>
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index da0b41914177..559acef3fddb 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -31,6 +31,7 @@
#include <linux/genhd.h>
#include <linux/ktime.h>
#include <linux/security.h>
+#include <linux/secretmem.h>
#include <trace/events/power.h>
#include "power.h"
@@ -81,7 +82,9 @@ void hibernate_release(void)
bool hibernation_available(void)
{
- return nohibernate == 0 && !security_locked_down(LOCKDOWN_HIBERNATION);
+ return nohibernate == 0 &&
+ !security_locked_down(LOCKDOWN_HIBERNATION) &&
+ !secretmem_active();
}
/**
diff --git a/kernel/rcu/Kconfig.debug b/kernel/rcu/Kconfig.debug
index 1942c1f1bb65..4fd64999300f 100644
--- a/kernel/rcu/Kconfig.debug
+++ b/kernel/rcu/Kconfig.debug
@@ -116,7 +116,7 @@ config RCU_EQS_DEBUG
config RCU_STRICT_GRACE_PERIOD
bool "Provide debug RCU implementation with short grace periods"
- depends on DEBUG_KERNEL && RCU_EXPERT
+ depends on DEBUG_KERNEL && RCU_EXPERT && NR_CPUS <= 4
default n
select PREEMPT_COUNT if PREEMPT=n
help
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index bf0827d4b659..24b5f2c2de87 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -308,6 +308,8 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
}
}
+extern void rcu_init_geometry(void);
+
/* Returns a pointer to the first leaf rcu_node structure. */
#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
@@ -422,12 +424,6 @@ do { \
#endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */
-#ifdef CONFIG_SRCU
-void srcu_init(void);
-#else /* #ifdef CONFIG_SRCU */
-static inline void srcu_init(void) { }
-#endif /* #else #ifdef CONFIG_SRCU */
-
#ifdef CONFIG_TINY_RCU
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
static inline bool rcu_gp_is_normal(void) { return true; }
@@ -441,7 +437,11 @@ bool rcu_gp_is_expedited(void); /* Internal RCU use. */
void rcu_expedite_gp(void);
void rcu_unexpedite_gp(void);
void rcupdate_announce_bootup_oddness(void);
+#ifdef CONFIG_TASKS_RCU_GENERIC
void show_rcu_tasks_gp_kthreads(void);
+#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
+static inline void show_rcu_tasks_gp_kthreads(void) {}
+#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
void rcu_request_urgent_qs_task(struct task_struct *t);
#endif /* #else #ifdef CONFIG_TINY_RCU */
@@ -519,6 +519,7 @@ static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
static inline unsigned long
srcu_batches_completed(struct srcu_struct *sp) { return 0; }
static inline void rcu_force_quiescent_state(void) { }
+static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; }
static inline void show_rcu_gp_kthreads(void) { }
static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
static inline void rcu_fwd_progress_check(unsigned long j) { }
@@ -527,6 +528,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
unsigned long rcu_get_gp_seq(void);
unsigned long rcu_exp_batches_completed(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp);
+bool rcu_check_boost_fail(unsigned long gp_state, int *cpup);
void show_rcu_gp_kthreads(void);
int rcu_get_gp_kthreads_prio(void);
void rcu_fwd_progress_check(unsigned long j);
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 194b9c145c40..40ef5417d954 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -245,12 +245,6 @@ static const char *rcu_torture_writer_state_getname(void)
return rcu_torture_writer_state_names[i];
}
-#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_PREEMPT_RT)
-# define rcu_can_boost() 1
-#else
-# define rcu_can_boost() 0
-#endif
-
#ifdef CONFIG_RCU_TRACE
static u64 notrace rcu_trace_clock_local(void)
{
@@ -331,6 +325,7 @@ struct rcu_torture_ops {
void (*read_delay)(struct torture_random_state *rrsp,
struct rt_read_seg *rtrsp);
void (*readunlock)(int idx);
+ int (*readlock_held)(void);
unsigned long (*get_gp_seq)(void);
unsigned long (*gp_diff)(unsigned long new, unsigned long old);
void (*deferred_free)(struct rcu_torture *p);
@@ -345,6 +340,7 @@ struct rcu_torture_ops {
void (*fqs)(void);
void (*stats)(void);
void (*gp_kthread_dbg)(void);
+ bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
int (*stall_dur)(void);
int irq_capable;
int can_boost;
@@ -359,6 +355,11 @@ static struct rcu_torture_ops *cur_ops;
* Definitions for rcu torture testing.
*/
+static int torture_readlock_not_held(void)
+{
+ return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
+}
+
static int rcu_torture_read_lock(void) __acquires(RCU)
{
rcu_read_lock();
@@ -483,30 +484,32 @@ static void rcu_sync_torture_init(void)
}
static struct rcu_torture_ops rcu_ops = {
- .ttype = RCU_FLAVOR,
- .init = rcu_sync_torture_init,
- .readlock = rcu_torture_read_lock,
- .read_delay = rcu_read_delay,
- .readunlock = rcu_torture_read_unlock,
- .get_gp_seq = rcu_get_gp_seq,
- .gp_diff = rcu_seq_diff,
- .deferred_free = rcu_torture_deferred_free,
- .sync = synchronize_rcu,
- .exp_sync = synchronize_rcu_expedited,
- .get_gp_state = get_state_synchronize_rcu,
- .start_gp_poll = start_poll_synchronize_rcu,
- .poll_gp_state = poll_state_synchronize_rcu,
- .cond_sync = cond_synchronize_rcu,
- .call = call_rcu,
- .cb_barrier = rcu_barrier,
- .fqs = rcu_force_quiescent_state,
- .stats = NULL,
- .gp_kthread_dbg = show_rcu_gp_kthreads,
- .stall_dur = rcu_jiffies_till_stall_check,
- .irq_capable = 1,
- .can_boost = rcu_can_boost(),
- .extendables = RCUTORTURE_MAX_EXTEND,
- .name = "rcu"
+ .ttype = RCU_FLAVOR,
+ .init = rcu_sync_torture_init,
+ .readlock = rcu_torture_read_lock,
+ .read_delay = rcu_read_delay,
+ .readunlock = rcu_torture_read_unlock,
+ .readlock_held = torture_readlock_not_held,
+ .get_gp_seq = rcu_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
+ .deferred_free = rcu_torture_deferred_free,
+ .sync = synchronize_rcu,
+ .exp_sync = synchronize_rcu_expedited,
+ .get_gp_state = get_state_synchronize_rcu,
+ .start_gp_poll = start_poll_synchronize_rcu,
+ .poll_gp_state = poll_state_synchronize_rcu,
+ .cond_sync = cond_synchronize_rcu,
+ .call = call_rcu,
+ .cb_barrier = rcu_barrier,
+ .fqs = rcu_force_quiescent_state,
+ .stats = NULL,
+ .gp_kthread_dbg = show_rcu_gp_kthreads,
+ .check_boost_failed = rcu_check_boost_fail,
+ .stall_dur = rcu_jiffies_till_stall_check,
+ .irq_capable = 1,
+ .can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
+ .extendables = RCUTORTURE_MAX_EXTEND,
+ .name = "rcu"
};
/*
@@ -540,6 +543,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock,
+ .readlock_held = torture_readlock_not_held,
.get_gp_seq = rcu_no_completed,
.deferred_free = rcu_busted_torture_deferred_free,
.sync = synchronize_rcu_busted,
@@ -589,6 +593,11 @@ static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
srcu_read_unlock(srcu_ctlp, idx);
}
+static int torture_srcu_read_lock_held(void)
+{
+ return srcu_read_lock_held(srcu_ctlp);
+}
+
static unsigned long srcu_torture_completed(void)
{
return srcu_batches_completed(srcu_ctlp);
@@ -646,6 +655,7 @@ static struct rcu_torture_ops srcu_ops = {
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
+ .readlock_held = torture_srcu_read_lock_held,
.get_gp_seq = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
@@ -681,6 +691,7 @@ static struct rcu_torture_ops srcud_ops = {
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
+ .readlock_held = torture_srcu_read_lock_held,
.get_gp_seq = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
@@ -700,6 +711,7 @@ static struct rcu_torture_ops busted_srcud_ops = {
.readlock = srcu_torture_read_lock,
.read_delay = rcu_read_delay,
.readunlock = srcu_torture_read_unlock,
+ .readlock_held = torture_srcu_read_lock_held,
.get_gp_seq = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
@@ -787,6 +799,7 @@ static struct rcu_torture_ops trivial_ops = {
.readlock = rcu_torture_read_lock_trivial,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock_trivial,
+ .readlock_held = torture_readlock_not_held,
.get_gp_seq = rcu_no_completed,
.sync = synchronize_rcu_trivial,
.exp_sync = synchronize_rcu_trivial,
@@ -850,6 +863,7 @@ static struct rcu_torture_ops tasks_tracing_ops = {
.readlock = tasks_tracing_torture_read_lock,
.read_delay = srcu_read_delay, /* just reuse srcu's version. */
.readunlock = tasks_tracing_torture_read_unlock,
+ .readlock_held = rcu_read_lock_trace_held,
.get_gp_seq = rcu_no_completed,
.deferred_free = rcu_tasks_tracing_torture_deferred_free,
.sync = synchronize_rcu_tasks_trace,
@@ -871,32 +885,13 @@ static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
return cur_ops->gp_diff(new, old);
}
-static bool __maybe_unused torturing_tasks(void)
-{
- return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops;
-}
-
/*
* RCU torture priority-boost testing. Runs one real-time thread per
- * CPU for moderate bursts, repeatedly registering RCU callbacks and
- * spinning waiting for them to be invoked. If a given callback takes
- * too long to be invoked, we assume that priority inversion has occurred.
+ * CPU for moderate bursts, repeatedly starting grace periods and waiting
+ * for them to complete. If a given grace period takes too long, we assume
+ * that priority inversion has occurred.
*/
-struct rcu_boost_inflight {
- struct rcu_head rcu;
- int inflight;
-};
-
-static void rcu_torture_boost_cb(struct rcu_head *head)
-{
- struct rcu_boost_inflight *rbip =
- container_of(head, struct rcu_boost_inflight, rcu);
-
- /* Ensure RCU-core accesses precede clearing ->inflight */
- smp_store_release(&rbip->inflight, 0);
-}
-
static int old_rt_runtime = -1;
static void rcu_torture_disable_rt_throttle(void)
@@ -923,49 +918,68 @@ static void rcu_torture_enable_rt_throttle(void)
old_rt_runtime = -1;
}
-static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
+static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
{
+ int cpu;
static int dbg_done;
-
- if (end - start > test_boost_duration * HZ - HZ / 2) {
+ unsigned long end = jiffies;
+ bool gp_done;
+ unsigned long j;
+ static unsigned long last_persist;
+ unsigned long lp;
+ unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
+
+ if (end - *start > mininterval) {
+ // Recheck after checking time to avoid false positives.
+ smp_mb(); // Time check before grace-period check.
+ if (cur_ops->poll_gp_state(gp_state))
+ return false; // passed, though perhaps just barely
+ if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
+ // At most one persisted message per boost test.
+ j = jiffies;
+ lp = READ_ONCE(last_persist);
+ if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
+ pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
+ return false; // passed on a technicality
+ }
VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
n_rcu_torture_boost_failure++;
- if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg)
+ if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
+ pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
+ current->rt_priority, gp_state, end - *start);
cur_ops->gp_kthread_dbg();
+ // Recheck after print to flag grace period ending during splat.
+ gp_done = cur_ops->poll_gp_state(gp_state);
+ pr_info("Boost inversion: GP %lu %s.\n", gp_state,
+ gp_done ? "ended already" : "still pending");
- return true; /* failed */
+ }
+
+ return true; // failed
+ } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
+ *start = jiffies;
}
- return false; /* passed */
+ return false; // passed
}
static int rcu_torture_boost(void *arg)
{
- unsigned long call_rcu_time;
unsigned long endtime;
+ unsigned long gp_state;
+ unsigned long gp_state_time;
unsigned long oldstarttime;
- struct rcu_boost_inflight rbi = { .inflight = 0 };
VERBOSE_TOROUT_STRING("rcu_torture_boost started");
/* Set real-time priority. */
sched_set_fifo_low(current);
- init_rcu_head_on_stack(&rbi.rcu);
/* Each pass through the following loop does one boost-test cycle. */
do {
bool failed = false; // Test failed already in this test interval
- bool firsttime = true;
+ bool gp_initiated = false;
- /* Increment n_rcu_torture_boosts once per boost-test */
- while (!kthread_should_stop()) {
- if (mutex_trylock(&boost_mutex)) {
- n_rcu_torture_boosts++;
- mutex_unlock(&boost_mutex);
- break;
- }
- schedule_timeout_uninterruptible(1);
- }
if (kthread_should_stop())
goto checkwait;
@@ -979,33 +993,33 @@ static int rcu_torture_boost(void *arg)
goto checkwait;
}
- /* Do one boost-test interval. */
+ // Do one boost-test interval.
endtime = oldstarttime + test_boost_duration * HZ;
while (time_before(jiffies, endtime)) {
- /* If we don't have a callback in flight, post one. */
- if (!smp_load_acquire(&rbi.inflight)) {
- /* RCU core before ->inflight = 1. */
- smp_store_release(&rbi.inflight, 1);
- cur_ops->call(&rbi.rcu, rcu_torture_boost_cb);
- /* Check if the boost test failed */
- if (!firsttime && !failed)
- failed = rcu_torture_boost_failed(call_rcu_time, jiffies);
- call_rcu_time = jiffies;
- firsttime = false;
+ // Has current GP gone too long?
+ if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
+ failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
+ // If we don't have a grace period in flight, start one.
+ if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
+ gp_state = cur_ops->start_gp_poll();
+ gp_initiated = true;
+ gp_state_time = jiffies;
}
- if (stutter_wait("rcu_torture_boost"))
+ if (stutter_wait("rcu_torture_boost")) {
sched_set_fifo_low(current);
+ // If the grace period already ended,
+ // we don't know when that happened, so
+ // start over.
+ if (cur_ops->poll_gp_state(gp_state))
+ gp_initiated = false;
+ }
if (torture_must_stop())
goto checkwait;
}
- /*
- * If boost never happened, then inflight will always be 1, in
- * this case the boost check would never happen in the above
- * loop so do another one here.
- */
- if (!firsttime && !failed && smp_load_acquire(&rbi.inflight))
- rcu_torture_boost_failed(call_rcu_time, jiffies);
+ // In case the grace period extended beyond the end of the loop.
+ if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
+ rcu_torture_boost_failed(gp_state, &gp_state_time);
/*
* Set the start time of the next test interval.
@@ -1014,11 +1028,12 @@ static int rcu_torture_boost(void *arg)
* interval. Besides, we are running at RT priority,
* so delays should be relatively rare.
*/
- while (oldstarttime == boost_starttime &&
- !kthread_should_stop()) {
+ while (oldstarttime == boost_starttime && !kthread_should_stop()) {
if (mutex_trylock(&boost_mutex)) {
- boost_starttime = jiffies +
- test_boost_interval * HZ;
+ if (oldstarttime == boost_starttime) {
+ boost_starttime = jiffies + test_boost_interval * HZ;
+ n_rcu_torture_boosts++;
+ }
mutex_unlock(&boost_mutex);
break;
}
@@ -1030,15 +1045,11 @@ checkwait: if (stutter_wait("rcu_torture_boost"))
sched_set_fifo_low(current);
} while (!torture_must_stop());
- while (smp_load_acquire(&rbi.inflight))
- schedule_timeout_uninterruptible(1); // rcu_barrier() deadlocks.
-
/* Clean up and exit. */
- while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
+ while (!kthread_should_stop()) {
torture_shutdown_absorb("rcu_torture_boost");
schedule_timeout_uninterruptible(1);
}
- destroy_rcu_head_on_stack(&rbi.rcu);
torture_kthread_stopping("rcu_torture_boost");
return 0;
}
@@ -1553,11 +1564,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local();
p = rcu_dereference_check(rcu_torture_current,
- rcu_read_lock_bh_held() ||
- rcu_read_lock_sched_held() ||
- srcu_read_lock_held(srcu_ctlp) ||
- rcu_read_lock_trace_held() ||
- torturing_tasks());
+ !cur_ops->readlock_held || cur_ops->readlock_held());
if (p == NULL) {
/* Wait for rcu_torture_writer to get underway */
rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
@@ -1861,48 +1868,49 @@ rcu_torture_stats(void *arg)
torture_shutdown_absorb("rcu_torture_stats");
} while (!torture_must_stop());
torture_kthread_stopping("rcu_torture_stats");
-
- {
- struct rcu_head *rhp;
- struct kmem_cache *kcp;
- static int z;
-
- kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
- rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
- pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
- pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
- mem_dump_obj(ZERO_SIZE_PTR);
- pr_alert("mem_dump_obj(NULL):");
- mem_dump_obj(NULL);
- pr_alert("mem_dump_obj(%px):", &rhp);
- mem_dump_obj(&rhp);
- pr_alert("mem_dump_obj(%px):", rhp);
- mem_dump_obj(rhp);
- pr_alert("mem_dump_obj(%px):", &rhp->func);
- mem_dump_obj(&rhp->func);
- pr_alert("mem_dump_obj(%px):", &z);
- mem_dump_obj(&z);
- kmem_cache_free(kcp, rhp);
- kmem_cache_destroy(kcp);
- rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
- pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
- pr_alert("mem_dump_obj(kmalloc %px):", rhp);
- mem_dump_obj(rhp);
- pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
- mem_dump_obj(&rhp->func);
- kfree(rhp);
- rhp = vmalloc(4096);
- pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
- pr_alert("mem_dump_obj(vmalloc %px):", rhp);
- mem_dump_obj(rhp);
- pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
- mem_dump_obj(&rhp->func);
- vfree(rhp);
- }
-
return 0;
}
+/* Test mem_dump_obj() and friends. */
+static void rcu_torture_mem_dump_obj(void)
+{
+ struct rcu_head *rhp;
+ struct kmem_cache *kcp;
+ static int z;
+
+ kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
+ rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
+ pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
+ pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
+ mem_dump_obj(ZERO_SIZE_PTR);
+ pr_alert("mem_dump_obj(NULL):");
+ mem_dump_obj(NULL);
+ pr_alert("mem_dump_obj(%px):", &rhp);
+ mem_dump_obj(&rhp);
+ pr_alert("mem_dump_obj(%px):", rhp);
+ mem_dump_obj(rhp);
+ pr_alert("mem_dump_obj(%px):", &rhp->func);
+ mem_dump_obj(&rhp->func);
+ pr_alert("mem_dump_obj(%px):", &z);
+ mem_dump_obj(&z);
+ kmem_cache_free(kcp, rhp);
+ kmem_cache_destroy(kcp);
+ rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
+ pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
+ pr_alert("mem_dump_obj(kmalloc %px):", rhp);
+ mem_dump_obj(rhp);
+ pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
+ mem_dump_obj(&rhp->func);
+ kfree(rhp);
+ rhp = vmalloc(4096);
+ pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
+ pr_alert("mem_dump_obj(vmalloc %px):", rhp);
+ mem_dump_obj(rhp);
+ pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
+ mem_dump_obj(&rhp->func);
+ vfree(rhp);
+}
+
static void
rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
{
@@ -2634,7 +2642,7 @@ static bool rcu_torture_can_boost(void)
if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
return false;
- if (!cur_ops->call)
+ if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
return false;
prio = rcu_get_gp_kthreads_prio();
@@ -2642,7 +2650,7 @@ static bool rcu_torture_can_boost(void)
return false;
if (prio < 2) {
- if (boost_warn_once == 1)
+ if (boost_warn_once == 1)
return false;
pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
@@ -2818,6 +2826,8 @@ rcu_torture_cleanup(void)
if (cur_ops->cleanup != NULL)
cur_ops->cleanup();
+ rcu_torture_mem_dump_obj();
+
rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
if (err_segs_recorded) {
@@ -3120,6 +3130,21 @@ rcu_torture_init(void)
if (firsterr < 0)
goto unwind;
rcutor_hp = firsterr;
+
+ // Testing RCU priority boosting requires rcutorture do
+ // some serious abuse. Counter this by running ksoftirqd
+ // at higher priority.
+ if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
+ for_each_online_cpu(cpu) {
+ struct sched_param sp;
+ struct task_struct *t;
+
+ t = per_cpu(ksoftirqd, cpu);
+ WARN_ON_ONCE(!t);
+ sp.sched_priority = 2;
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+ }
+ }
}
shutdown_jiffies = jiffies + shutdown_secs * HZ;
firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
index 02dd9767b559..d998a76fb542 100644
--- a/kernel/rcu/refscale.c
+++ b/kernel/rcu/refscale.c
@@ -362,6 +362,111 @@ static struct ref_scale_ops rwsem_ops = {
.name = "rwsem"
};
+// Definitions for global spinlock
+static DEFINE_SPINLOCK(test_lock);
+
+static void ref_lock_section(const int nloops)
+{
+ int i;
+
+ preempt_disable();
+ for (i = nloops; i >= 0; i--) {
+ spin_lock(&test_lock);
+ spin_unlock(&test_lock);
+ }
+ preempt_enable();
+}
+
+static void ref_lock_delay_section(const int nloops, const int udl, const int ndl)
+{
+ int i;
+
+ preempt_disable();
+ for (i = nloops; i >= 0; i--) {
+ spin_lock(&test_lock);
+ un_delay(udl, ndl);
+ spin_unlock(&test_lock);
+ }
+ preempt_enable();
+}
+
+static struct ref_scale_ops lock_ops = {
+ .readsection = ref_lock_section,
+ .delaysection = ref_lock_delay_section,
+ .name = "lock"
+};
+
+// Definitions for global irq-save spinlock
+
+static void ref_lock_irq_section(const int nloops)
+{
+ unsigned long flags;
+ int i;
+
+ preempt_disable();
+ for (i = nloops; i >= 0; i--) {
+ spin_lock_irqsave(&test_lock, flags);
+ spin_unlock_irqrestore(&test_lock, flags);
+ }
+ preempt_enable();
+}
+
+static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl)
+{
+ unsigned long flags;
+ int i;
+
+ preempt_disable();
+ for (i = nloops; i >= 0; i--) {
+ spin_lock_irqsave(&test_lock, flags);
+ un_delay(udl, ndl);
+ spin_unlock_irqrestore(&test_lock, flags);
+ }
+ preempt_enable();
+}
+
+static struct ref_scale_ops lock_irq_ops = {
+ .readsection = ref_lock_irq_section,
+ .delaysection = ref_lock_irq_delay_section,
+ .name = "lock-irq"
+};
+
+// Definitions acquire-release.
+static DEFINE_PER_CPU(unsigned long, test_acqrel);
+
+static void ref_acqrel_section(const int nloops)
+{
+ unsigned long x;
+ int i;
+
+ preempt_disable();
+ for (i = nloops; i >= 0; i--) {
+ x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
+ smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
+ }
+ preempt_enable();
+}
+
+static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl)
+{
+ unsigned long x;
+ int i;
+
+ preempt_disable();
+ for (i = nloops; i >= 0; i--) {
+ x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
+ un_delay(udl, ndl);
+ smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
+ }
+ preempt_enable();
+}
+
+static struct ref_scale_ops acqrel_ops = {
+ .readsection = ref_acqrel_section,
+ .delaysection = ref_acqrel_delay_section,
+ .name = "acqrel"
+};
+
static void rcu_scale_one_reader(void)
{
if (readdelay <= 0)
@@ -382,13 +487,13 @@ ref_scale_reader(void *arg)
s64 duration;
VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me);
- set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
+ WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)));
set_user_nice(current, MAX_NICE);
atomic_inc(&n_init);
if (holdoff)
schedule_timeout_interruptible(holdoff * HZ);
repeat:
- VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id());
+ VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id());
// Wait for signal that this reader can start.
wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
@@ -398,7 +503,7 @@ repeat:
goto end;
// Make sure that the CPU is affinitized appropriately during testing.
- WARN_ON_ONCE(smp_processor_id() != me);
+ WARN_ON_ONCE(raw_smp_processor_id() != me);
WRITE_ONCE(rt->start_reader, 0);
if (!atomic_dec_return(&n_started))
@@ -653,8 +758,8 @@ ref_scale_init(void)
long i;
int firsterr = 0;
static struct ref_scale_ops *scale_ops[] = {
- &rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops,
- &refcnt_ops, &rwlock_ops, &rwsem_ops,
+ &rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops, &refcnt_ops, &rwlock_ops,
+ &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops,
};
if (!torture_init_begin(scale_type, verbose))
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index e26547b34ad3..6833d8887181 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -80,7 +80,7 @@ do { \
* srcu_read_unlock() running against them. So if the is_static parameter
* is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
*/
-static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
+static void init_srcu_struct_nodes(struct srcu_struct *ssp)
{
int cpu;
int i;
@@ -90,6 +90,9 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
struct srcu_node *snp;
struct srcu_node *snp_first;
+ /* Initialize geometry if it has not already been initialized. */
+ rcu_init_geometry();
+
/* Work out the overall tree geometry. */
ssp->level[0] = &ssp->node[0];
for (i = 1; i < rcu_num_lvls; i++)
@@ -148,14 +151,6 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
sdp->ssp = ssp;
sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
- if (is_static)
- continue;
-
- /* Dynamically allocated, better be no srcu_read_locks()! */
- for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
- sdp->srcu_lock_count[i] = 0;
- sdp->srcu_unlock_count[i] = 0;
- }
}
}
@@ -179,7 +174,7 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
ssp->sda = alloc_percpu(struct srcu_data);
if (!ssp->sda)
return -ENOMEM;
- init_srcu_struct_nodes(ssp, is_static);
+ init_srcu_struct_nodes(ssp);
ssp->srcu_gp_seq_needed_exp = 0;
ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
@@ -777,9 +772,9 @@ static bool srcu_might_be_idle(struct srcu_struct *ssp)
spin_unlock_irqrestore_rcu_node(sdp, flags);
/*
- * No local callbacks, so probabalistically probe global state.
+ * No local callbacks, so probabilistically probe global state.
* Exact information would require acquiring locks, which would
- * kill scalability, hence the probabalistic nature of the probe.
+ * kill scalability, hence the probabilistic nature of the probe.
*/
/* First, see if enough time has passed since the last GP. */
@@ -1000,6 +995,9 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
* synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
* passed the same srcu_struct structure.
*
+ * Implementation of these memory-ordering guarantees is similar to
+ * that of synchronize_rcu().
+ *
* If SRCU is likely idle, expedite the first request. This semantic
* was provided by Classic SRCU, and is relied upon by its users, so TREE
* SRCU must also provide it. Note that detecting idleness is heuristic
@@ -1392,11 +1390,15 @@ void __init srcu_init(void)
{
struct srcu_struct *ssp;
+ /*
+ * Once that is set, call_srcu() can follow the normal path and
+ * queue delayed work. This must follow RCU workqueues creation
+ * and timers initialization.
+ */
srcu_init_done = true;
while (!list_empty(&srcu_boot_list)) {
ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
work.work.entry);
- check_init_srcu_struct(ssp);
list_del_init(&ssp->work.work.entry);
queue_work(rcu_gp_wq, &ssp->work.work);
}
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index d4558ab7a07d..33d896d85902 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -94,9 +94,9 @@ static void rcu_sync_func(struct rcu_head *rhp)
rcu_sync_call(rsp);
} else {
/*
- * We're at least a GP after the last rcu_sync_exit(); eveybody
+ * We're at least a GP after the last rcu_sync_exit(); everybody
* will now have observed the write side critical section.
- * Let 'em rip!.
+ * Let 'em rip!
*/
WRITE_ONCE(rsp->gp_state, GP_IDLE);
}
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 350ebf5051f9..8536c55df514 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -23,7 +23,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
* struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
* @cbs_head: Head of callback list.
* @cbs_tail: Tail pointer for callback list.
- * @cbs_wq: Wait queue allowning new callback to get kthread's attention.
+ * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
* @cbs_lock: Lock protecting callback list.
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
* @gp_func: This flavor's grace-period-wait function.
@@ -377,6 +377,46 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
// Finally, this implementation does not support high call_rcu_tasks()
// rates from multiple CPUs. If this is required, per-CPU callback lists
// will be needed.
+//
+// The implementation uses rcu_tasks_wait_gp(), which relies on function
+// pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
+// function sets these function pointers up so that rcu_tasks_wait_gp()
+// invokes these functions in this order:
+//
+// rcu_tasks_pregp_step():
+// Invokes synchronize_rcu() in order to wait for all in-flight
+// t->on_rq and t->nvcsw transitions to complete. This works because
+// all such transitions are carried out with interrupts disabled.
+// rcu_tasks_pertask(), invoked on every non-idle task:
+// For every runnable non-idle task other than the current one, use
+// get_task_struct() to pin down that task, snapshot that task's
+// number of voluntary context switches, and add that task to the
+// holdout list.
+// rcu_tasks_postscan():
+// Invoke synchronize_srcu() to ensure that all tasks that were
+// in the process of exiting (and which thus might not know to
+// synchronize with this RCU Tasks grace period) have completed
+// exiting.
+// check_all_holdout_tasks(), repeatedly until holdout list is empty:
+// Scans the holdout list, attempting to identify a quiescent state
+// for each task on the list. If there is a quiescent state, the
+// corresponding task is removed from the holdout list.
+// rcu_tasks_postgp():
+// Invokes synchronize_rcu() in order to ensure that all prior
+// t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
+// to have happened before the end of this RCU Tasks grace period.
+// Again, this works because all such transitions are carried out
+// with interrupts disabled.
+//
+// For each exiting task, the exit_tasks_rcu_start() and
+// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
+// read-side critical sections waited for by rcu_tasks_postscan().
+//
+// Pre-grace-period update-side code is ordered before the grace via the
+// ->cbs_lock and the smp_mb__after_spinlock(). Pre-grace-period read-side
+// code is ordered before the grace period via synchronize_rcu() call
+// in rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
+// disabling.
/* Pre-grace-period preparation. */
static void rcu_tasks_pregp_step(void)
@@ -504,7 +544,7 @@ DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
* or transition to usermode execution. As such, there are no read-side
* primitives analogous to rcu_read_lock() and rcu_read_unlock() because
* this primitive is intended to determine that all tasks have passed
- * through a safe state, not so much for data-strcuture synchronization.
+ * through a safe state, not so much for data-structure synchronization.
*
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
@@ -605,8 +645,13 @@ void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
// passing an empty function to schedule_on_each_cpu(). This approach
// provides an asynchronous call_rcu_tasks_rude() API and batching
// of concurrent calls to the synchronous synchronize_rcu_rude() API.
-// This sends IPIs far and wide and induces otherwise unnecessary context
-// switches on all online CPUs, whether idle or not.
+// This invokes schedule_on_each_cpu() in order to send IPIs far and wide
+// and induces otherwise unnecessary context switches on all online CPUs,
+// whether idle or not.
+//
+// Callback handling is provided by the rcu_tasks_kthread() function.
+//
+// Ordering is provided by the scheduler's context-switch code.
// Empty function to allow workqueues to force a context switch.
static void rcu_tasks_be_rude(struct work_struct *work)
@@ -637,7 +682,7 @@ DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
* there are no read-side primitives analogous to rcu_read_lock() and
* rcu_read_unlock() because this primitive is intended to determine
* that all tasks have passed through a safe state, not so much for
- * data-strcuture synchronization.
+ * data-structure synchronization.
*
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
@@ -908,10 +953,9 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
in_qs = likely(!t->trc_reader_nesting);
}
- // Mark as checked. Because this is called from the grace-period
- // kthread, also remove the task from the holdout list.
+ // Mark as checked so that the grace-period kthread will
+ // remove it from the holdout list.
t->trc_reader_checked = true;
- trc_del_holdout(t);
if (in_qs)
return true; // Already in quiescent state, done!!!
@@ -938,7 +982,6 @@ static void trc_wait_for_one_reader(struct task_struct *t,
// The current task had better be in a quiescent state.
if (t == current) {
t->trc_reader_checked = true;
- trc_del_holdout(t);
WARN_ON_ONCE(t->trc_reader_nesting);
return;
}
@@ -1163,7 +1206,7 @@ static void exit_tasks_rcu_finish_trace(struct task_struct *t)
* there are no read-side primitives analogous to rcu_read_lock() and
* rcu_read_unlock() because this primitive is intended to determine
* that all tasks have passed through a safe state, not so much for
- * data-strcuture synchronization.
+ * data-structure synchronization.
*
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
@@ -1356,5 +1399,4 @@ void __init rcu_init_tasks_generic(void)
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
static inline void rcu_tasks_bootup_oddness(void) {}
-void show_rcu_tasks_gp_kthreads(void) {}
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index c8a029fbb114..340b3f8b090d 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -221,5 +221,4 @@ void __init rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
rcu_early_boot_tests();
- srcu_init();
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8e78b2430c16..51f24ecd94b2 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -32,6 +32,8 @@
#include <linux/export.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
+#include <linux/panic.h>
+#include <linux/panic_notifier.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
@@ -186,6 +188,17 @@ module_param(rcu_unlock_delay, int, 0444);
static int rcu_min_cached_objs = 5;
module_param(rcu_min_cached_objs, int, 0444);
+// A page shrinker can ask for pages to be freed to make them
+// available for other parts of the system. This usually happens
+// under low memory conditions, and in that case we should also
+// defer page-cache filling for a short time period.
+//
+// The default value is 5 seconds, which is long enough to reduce
+// interference with the shrinker while it asks other systems to
+// drain their caches.
+static int rcu_delay_page_cache_fill_msec = 5000;
+module_param(rcu_delay_page_cache_fill_msec, int, 0444);
+
/* Retrieve RCU kthreads priority for rcutorture */
int rcu_get_gp_kthreads_prio(void)
{
@@ -202,7 +215,7 @@ EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
* the need for long delays to increase some race probabilities with the
* need for fast grace periods to increase other race probabilities.
*/
-#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
+#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */
/*
* Compute the mask of online CPUs for the specified rcu_node structure.
@@ -242,6 +255,7 @@ void rcu_softirq_qs(void)
{
rcu_qs();
rcu_preempt_deferred_qs(current);
+ rcu_tasks_qs(current, false);
}
/*
@@ -833,28 +847,6 @@ void noinstr rcu_irq_exit(void)
rcu_nmi_exit();
}
-/**
- * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
- * towards in kernel preemption
- *
- * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
- * from RCU point of view. Invoked from return from interrupt before kernel
- * preemption.
- */
-void rcu_irq_exit_preempt(void)
-{
- lockdep_assert_irqs_disabled();
- rcu_nmi_exit();
-
- RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
- "RCU dynticks_nesting counter underflow/zero!");
- RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
- DYNTICK_IRQ_NONIDLE,
- "Bad RCU dynticks_nmi_nesting counter\n");
- RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
- "RCU in extended quiescent state!");
-}
-
#ifdef CONFIG_PROVE_RCU
/**
* rcu_irq_exit_check_preempt - Validate that scheduling is possible
@@ -959,7 +951,7 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
*/
void noinstr rcu_user_exit(void)
{
- rcu_eqs_exit(1);
+ rcu_eqs_exit(true);
}
/**
@@ -1225,7 +1217,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
/*
- * We are reporting a quiescent state on behalf of some other CPU, so
+ * When trying to report a quiescent state on behalf of some other CPU,
* it is our responsibility to check for and handle potential overflow
* of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
* After all, the CPU might be in deep idle state, and thus executing no
@@ -2048,7 +2040,7 @@ static void rcu_gp_fqs_loop(void)
/*
* Clean up after the old grace period.
*/
-static void rcu_gp_cleanup(void)
+static noinline void rcu_gp_cleanup(void)
{
int cpu;
bool needgp = false;
@@ -2489,7 +2481,7 @@ int rcutree_dead_cpu(unsigned int cpu)
/*
* Invoke any RCU callbacks that have made it to the end of their grace
- * period. Thottle as specified by rdp->blimit.
+ * period. Throttle as specified by rdp->blimit.
*/
static void rcu_do_batch(struct rcu_data *rdp)
{
@@ -2629,7 +2621,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
* state, for example, user mode or idle loop. It also schedules RCU
* core processing. If the current grace period has gone on too long,
* it will ask the scheduler to manufacture a context switch for the sole
- * purpose of providing a providing the needed quiescent state.
+ * purpose of providing the needed quiescent state.
*/
void rcu_sched_clock_irq(int user)
{
@@ -2911,7 +2903,6 @@ static int __init rcu_spawn_core_kthreads(void)
"%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
return 0;
}
-early_initcall(rcu_spawn_core_kthreads);
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
@@ -3082,12 +3073,14 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
* period elapses, in other words after all pre-existing RCU read-side
* critical sections have completed. However, the callback function
* might well execute concurrently with RCU read-side critical sections
- * that started after call_rcu() was invoked. RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
- * may be nested. In addition, regions of code across which interrupts,
- * preemption, or softirqs have been disabled also serve as RCU read-side
- * critical sections. This includes hardware interrupt handlers, softirq
- * handlers, and NMI handlers.
+ * that started after call_rcu() was invoked.
+ *
+ * RCU read-side critical sections are delimited by rcu_read_lock()
+ * and rcu_read_unlock(), and may be nested. In addition, but only in
+ * v5.0 and later, regions of code across which interrupts, preemption,
+ * or softirqs have been disabled also serve as RCU read-side critical
+ * sections. This includes hardware interrupt handlers, softirq handlers,
+ * and NMI handlers.
*
* Note that all CPUs must agree that the grace period extended beyond
* all pre-existing RCU read-side critical section. On systems with more
@@ -3107,6 +3100,9 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
* between the call to call_rcu() and the invocation of "func()" -- even
* if CPU A and CPU B are the same CPU (but again only if the system has
* more than one CPU).
+ *
+ * Implementation of these memory-ordering guarantees is described here:
+ * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
*/
void call_rcu(struct rcu_head *head, rcu_callback_t func)
{
@@ -3171,6 +3167,7 @@ struct kfree_rcu_cpu_work {
* Even though it is lockless an access has to be protected by the
* per-cpu lock.
* @page_cache_work: A work to refill the cache when it is empty
+ * @backoff_page_cache_fill: Delay cache refills
* @work_in_progress: Indicates that page_cache_work is running
* @hrtimer: A hrtimer for scheduling a page_cache_work
* @nr_bkv_objs: number of allocated objects at @bkvcache.
@@ -3190,7 +3187,8 @@ struct kfree_rcu_cpu {
bool initialized;
int count;
- struct work_struct page_cache_work;
+ struct delayed_work page_cache_work;
+ atomic_t backoff_page_cache_fill;
atomic_t work_in_progress;
struct hrtimer hrtimer;
@@ -3237,7 +3235,7 @@ get_cached_bnode(struct kfree_rcu_cpu *krcp)
if (!krcp->nr_bkv_objs)
return NULL;
- krcp->nr_bkv_objs--;
+ WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
return (struct kvfree_rcu_bulk_data *)
llist_del_first(&krcp->bkvcache);
}
@@ -3251,14 +3249,33 @@ put_cached_bnode(struct kfree_rcu_cpu *krcp,
return false;
llist_add((struct llist_node *) bnode, &krcp->bkvcache);
- krcp->nr_bkv_objs++;
+ WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
return true;
+}
+
+static int
+drain_page_cache(struct kfree_rcu_cpu *krcp)
+{
+ unsigned long flags;
+ struct llist_node *page_list, *pos, *n;
+ int freed = 0;
+ raw_spin_lock_irqsave(&krcp->lock, flags);
+ page_list = llist_del_all(&krcp->bkvcache);
+ WRITE_ONCE(krcp->nr_bkv_objs, 0);
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
+
+ llist_for_each_safe(pos, n, page_list) {
+ free_page((unsigned long)pos);
+ freed++;
+ }
+
+ return freed;
}
/*
* This function is invoked in workqueue context after a grace period.
- * It frees all the objects queued on ->bhead_free or ->head_free.
+ * It frees all the objects queued on ->bkvhead_free or ->head_free.
*/
static void kfree_rcu_work(struct work_struct *work)
{
@@ -3285,7 +3302,7 @@ static void kfree_rcu_work(struct work_struct *work)
krwp->head_free = NULL;
raw_spin_unlock_irqrestore(&krcp->lock, flags);
- // Handle two first channels.
+ // Handle the first two channels.
for (i = 0; i < FREE_N_CHANNELS; i++) {
for (; bkvhead[i]; bkvhead[i] = bnext) {
bnext = bkvhead[i]->next;
@@ -3323,9 +3340,11 @@ static void kfree_rcu_work(struct work_struct *work)
}
/*
- * Emergency case only. It can happen under low memory
- * condition when an allocation gets failed, so the "bulk"
- * path can not be temporary maintained.
+ * This is used when the "bulk" path can not be used for the
+ * double-argument of kvfree_rcu(). This happens when the
+ * page-cache is empty, which means that objects are instead
+ * queued on a linked list through their rcu_head structures.
+ * This list is named "Channel 3".
*/
for (; head; head = next) {
unsigned long offset = (unsigned long)head->func;
@@ -3345,34 +3364,31 @@ static void kfree_rcu_work(struct work_struct *work)
}
/*
- * Schedule the kfree batch RCU work to run in workqueue context after a GP.
- *
- * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
- * timeout has been reached.
+ * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
*/
-static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
+static void kfree_rcu_monitor(struct work_struct *work)
{
- struct kfree_rcu_cpu_work *krwp;
- bool repeat = false;
+ struct kfree_rcu_cpu *krcp = container_of(work,
+ struct kfree_rcu_cpu, monitor_work.work);
+ unsigned long flags;
int i, j;
- lockdep_assert_held(&krcp->lock);
+ raw_spin_lock_irqsave(&krcp->lock, flags);
+ // Attempt to start a new batch.
for (i = 0; i < KFREE_N_BATCHES; i++) {
- krwp = &(krcp->krw_arr[i]);
+ struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
- /*
- * Try to detach bkvhead or head and attach it over any
- * available corresponding free channel. It can be that
- * a previous RCU batch is in progress, it means that
- * immediately to queue another one is not possible so
- * return false to tell caller to retry.
- */
+ // Try to detach bkvhead or head and attach it over any
+ // available corresponding free channel. It can be that
+ // a previous RCU batch is in progress, it means that
+ // immediately to queue another one is not possible so
+ // in that case the monitor work is rearmed.
if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
(krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
(krcp->head && !krwp->head_free)) {
- // Channel 1 corresponds to SLAB ptrs.
- // Channel 2 corresponds to vmalloc ptrs.
+ // Channel 1 corresponds to the SLAB-pointer bulk path.
+ // Channel 2 corresponds to vmalloc-pointer bulk path.
for (j = 0; j < FREE_N_CHANNELS; j++) {
if (!krwp->bkvhead_free[j]) {
krwp->bkvhead_free[j] = krcp->bkvhead[j];
@@ -3380,7 +3396,8 @@ static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
}
}
- // Channel 3 corresponds to emergency path.
+ // Channel 3 corresponds to both SLAB and vmalloc
+ // objects queued on the linked list.
if (!krwp->head_free) {
krwp->head_free = krcp->head;
krcp->head = NULL;
@@ -3388,65 +3405,35 @@ static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
WRITE_ONCE(krcp->count, 0);
- /*
- * One work is per one batch, so there are three
- * "free channels", the batch can handle. It can
- * be that the work is in the pending state when
- * channels have been detached following by each
- * other.
- */
+ // One work is per one batch, so there are three
+ // "free channels", the batch can handle. It can
+ // be that the work is in the pending state when
+ // channels have been detached following by each
+ // other.
queue_rcu_work(system_wq, &krwp->rcu_work);
}
-
- // Repeat if any "free" corresponding channel is still busy.
- if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
- repeat = true;
}
- return !repeat;
-}
-
-static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
- unsigned long flags)
-{
- // Attempt to start a new batch.
- krcp->monitor_todo = false;
- if (queue_kfree_rcu_work(krcp)) {
- // Success! Our job is done here.
- raw_spin_unlock_irqrestore(&krcp->lock, flags);
- return;
- }
+ // If there is nothing to detach, it means that our job is
+ // successfully done here. In case of having at least one
+ // of the channels that is still busy we should rearm the
+ // work to repeat an attempt. Because previous batches are
+ // still in progress.
+ if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
+ krcp->monitor_todo = false;
+ else
+ schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
- // Previous RCU batch still in progress, try again later.
- krcp->monitor_todo = true;
- schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
raw_spin_unlock_irqrestore(&krcp->lock, flags);
}
-/*
- * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
- * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
- */
-static void kfree_rcu_monitor(struct work_struct *work)
-{
- unsigned long flags;
- struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
- monitor_work.work);
-
- raw_spin_lock_irqsave(&krcp->lock, flags);
- if (krcp->monitor_todo)
- kfree_rcu_drain_unlock(krcp, flags);
- else
- raw_spin_unlock_irqrestore(&krcp->lock, flags);
-}
-
static enum hrtimer_restart
schedule_page_work_fn(struct hrtimer *t)
{
struct kfree_rcu_cpu *krcp =
container_of(t, struct kfree_rcu_cpu, hrtimer);
- queue_work(system_highpri_wq, &krcp->page_cache_work);
+ queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
return HRTIMER_NORESTART;
}
@@ -3455,12 +3442,16 @@ static void fill_page_cache_func(struct work_struct *work)
struct kvfree_rcu_bulk_data *bnode;
struct kfree_rcu_cpu *krcp =
container_of(work, struct kfree_rcu_cpu,
- page_cache_work);
+ page_cache_work.work);
unsigned long flags;
+ int nr_pages;
bool pushed;
int i;
- for (i = 0; i < rcu_min_cached_objs; i++) {
+ nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
+ 1 : rcu_min_cached_objs;
+
+ for (i = 0; i < nr_pages; i++) {
bnode = (struct kvfree_rcu_bulk_data *)
__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
@@ -3477,6 +3468,7 @@ static void fill_page_cache_func(struct work_struct *work)
}
atomic_set(&krcp->work_in_progress, 0);
+ atomic_set(&krcp->backoff_page_cache_fill, 0);
}
static void
@@ -3484,10 +3476,15 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp)
{
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
!atomic_xchg(&krcp->work_in_progress, 1)) {
- hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- krcp->hrtimer.function = schedule_page_work_fn;
- hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
+ if (atomic_read(&krcp->backoff_page_cache_fill)) {
+ queue_delayed_work(system_wq,
+ &krcp->page_cache_work,
+ msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
+ } else {
+ hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ krcp->hrtimer.function = schedule_page_work_fn;
+ hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
+ }
}
}
@@ -3552,11 +3549,11 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
}
/*
- * Queue a request for lazy invocation of appropriate free routine after a
- * grace period. Please note there are three paths are maintained, two are the
- * main ones that use array of pointers interface and third one is emergency
- * one, that is used only when the main path can not be maintained temporary,
- * due to memory pressure.
+ * Queue a request for lazy invocation of the appropriate free routine
+ * after a grace period. Please note that three paths are maintained,
+ * two for the common case using arrays of pointers and a third one that
+ * is used only when the main paths cannot be used, for example, due to
+ * memory pressure.
*
* Each kvfree_call_rcu() request is added to a batch. The batch will be drained
* every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
@@ -3645,6 +3642,8 @@ kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
count += READ_ONCE(krcp->count);
+ count += READ_ONCE(krcp->nr_bkv_objs);
+ atomic_set(&krcp->backoff_page_cache_fill, 1);
}
return count;
@@ -3654,18 +3653,14 @@ static unsigned long
kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
int cpu, freed = 0;
- unsigned long flags;
for_each_possible_cpu(cpu) {
int count;
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
count = krcp->count;
- raw_spin_lock_irqsave(&krcp->lock, flags);
- if (krcp->monitor_todo)
- kfree_rcu_drain_unlock(krcp, flags);
- else
- raw_spin_unlock_irqrestore(&krcp->lock, flags);
+ count += drain_page_cache(krcp);
+ kfree_rcu_monitor(&krcp->monitor_work.work);
sc->nr_to_scan -= count;
freed += count;
@@ -3693,7 +3688,8 @@ void __init kfree_rcu_scheduler_running(void)
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
raw_spin_lock_irqsave(&krcp->lock, flags);
- if (!krcp->head || krcp->monitor_todo) {
+ if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
+ krcp->monitor_todo) {
raw_spin_unlock_irqrestore(&krcp->lock, flags);
continue;
}
@@ -3750,10 +3746,12 @@ static int rcu_blocking_is_gp(void)
* read-side critical sections have completed. Note, however, that
* upon return from synchronize_rcu(), the caller might well be executing
* concurrently with new RCU read-side critical sections that began while
- * synchronize_rcu() was waiting. RCU read-side critical sections are
- * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
- * In addition, regions of code across which interrupts, preemption, or
- * softirqs have been disabled also serve as RCU read-side critical
+ * synchronize_rcu() was waiting.
+ *
+ * RCU read-side critical sections are delimited by rcu_read_lock()
+ * and rcu_read_unlock(), and may be nested. In addition, but only in
+ * v5.0 and later, regions of code across which interrupts, preemption,
+ * or softirqs have been disabled also serve as RCU read-side critical
* sections. This includes hardware interrupt handlers, softirq handlers,
* and NMI handlers.
*
@@ -3774,6 +3772,9 @@ static int rcu_blocking_is_gp(void)
* to have executed a full memory barrier during the execution of
* synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
* again only if the system has more than one CPU).
+ *
+ * Implementation of these memory-ordering guarantees is described here:
+ * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
*/
void synchronize_rcu(void)
{
@@ -3844,11 +3845,11 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
/**
* poll_state_synchronize_rcu - Conditionally wait for an RCU grace period
*
- * @oldstate: return from call to get_state_synchronize_rcu() or start_poll_synchronize_rcu()
+ * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
*
* If a full RCU grace period has elapsed since the earlier call from
* which oldstate was obtained, return @true, otherwise return @false.
- * If @false is returned, it is the caller's responsibilty to invoke this
+ * If @false is returned, it is the caller's responsibility to invoke this
* function later on until it does return @true. Alternatively, the caller
* can explicitly wait for a grace period, for example, by passing @oldstate
* to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
@@ -3860,6 +3861,11 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
* (many hours even on 32-bit systems) should check them occasionally
* and either refresh them or set a flag indicating that the grace period
* has completed.
+ *
+ * This function provides the same memory-ordering guarantees that
+ * would be provided by a synchronize_rcu() that was invoked at the call
+ * to the function that provided @oldstate, and that returned at the end
+ * of this function.
*/
bool poll_state_synchronize_rcu(unsigned long oldstate)
{
@@ -3874,7 +3880,7 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
/**
* cond_synchronize_rcu - Conditionally wait for an RCU grace period
*
- * @oldstate: return value from earlier call to get_state_synchronize_rcu()
+ * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
*
* If a full RCU grace period has elapsed since the earlier call to
* get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
@@ -3884,6 +3890,11 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
* counter wrap is harmless. If the counter wraps, we have waited for
* more than 2 billion grace periods (and way more on a 64-bit system!),
* so waiting for one additional grace period should be just fine.
+ *
+ * This function provides the same memory-ordering guarantees that
+ * would be provided by a synchronize_rcu() that was invoked at the call
+ * to the function that provided @oldstate, and that returned at the end
+ * of this function.
*/
void cond_synchronize_rcu(unsigned long oldstate)
{
@@ -3911,7 +3922,7 @@ static int rcu_pending(int user)
check_cpu_stall(rdp);
/* Does this CPU need a deferred NOCB wakeup? */
- if (rcu_nocb_need_deferred_wakeup(rdp))
+ if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
return 1;
/* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
@@ -4094,7 +4105,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier);
/*
* Propagate ->qsinitmask bits up the rcu_node tree to account for the
* first CPU in a given leaf rcu_node structure coming online. The caller
- * must hold the corresponding leaf rcu_node ->lock with interrrupts
+ * must hold the corresponding leaf rcu_node ->lock with interrupts
* disabled.
*/
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
@@ -4189,7 +4200,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- rcu_prepare_kthreads(cpu);
+ rcu_spawn_one_boost_kthread(rnp);
rcu_spawn_cpu_nocb_kthread(cpu);
WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
@@ -4472,6 +4483,7 @@ static int __init rcu_spawn_gp_kthread(void)
wake_up_process(t);
rcu_spawn_nocb_kthreads();
rcu_spawn_boost_kthreads();
+ rcu_spawn_core_kthreads();
return 0;
}
early_initcall(rcu_spawn_gp_kthread);
@@ -4582,11 +4594,25 @@ static void __init rcu_init_one(void)
* replace the definitions in tree.h because those are needed to size
* the ->node array in the rcu_state structure.
*/
-static void __init rcu_init_geometry(void)
+void rcu_init_geometry(void)
{
ulong d;
int i;
+ static unsigned long old_nr_cpu_ids;
int rcu_capacity[RCU_NUM_LVLS];
+ static bool initialized;
+
+ if (initialized) {
+ /*
+ * Warn if setup_nr_cpu_ids() had not yet been invoked,
+ * unless nr_cpus_ids == NR_CPUS, in which case who cares?
+ */
+ WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
+ return;
+ }
+
+ old_nr_cpu_ids = nr_cpu_ids;
+ initialized = true;
/*
* Initialize any unspecified boot parameters.
@@ -4687,6 +4713,18 @@ static void __init kfree_rcu_batch_init(void)
int cpu;
int i;
+ /* Clamp it to [0:100] seconds interval. */
+ if (rcu_delay_page_cache_fill_msec < 0 ||
+ rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
+
+ rcu_delay_page_cache_fill_msec =
+ clamp(rcu_delay_page_cache_fill_msec, 0,
+ (int) (100 * MSEC_PER_SEC));
+
+ pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
+ rcu_delay_page_cache_fill_msec);
+ }
+
for_each_possible_cpu(cpu) {
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
@@ -4696,7 +4734,7 @@ static void __init kfree_rcu_batch_init(void)
}
INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
- INIT_WORK(&krcp->page_cache_work, fill_page_cache_func);
+ INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
krcp->initialized = true;
}
if (register_shrinker(&kfree_rcu_shrinker))
@@ -4730,12 +4768,11 @@ void __init rcu_init(void)
rcutree_online_cpu(cpu);
}
- /* Create workqueue for expedited GPs and for Tree SRCU. */
+ /* Create workqueue for Tree SRCU and for expedited GPs. */
rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
WARN_ON(!rcu_gp_wq);
rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
WARN_ON(!rcu_par_gp_wq);
- srcu_init();
/* Fill in default value for rcutree.qovld boot parameter. */
/* -After- the rcu_node ->lock fields are initialized! */
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 71821d59d95c..305cf6aeb408 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -115,6 +115,7 @@ struct rcu_node {
/* boosting for this rcu_node structure. */
unsigned int boost_kthread_status;
/* State of boost_kthread_task for tracing. */
+ unsigned long n_boosts; /* Number of boosts for this rcu_node structure. */
#ifdef CONFIG_RCU_NOCB_CPU
struct swait_queue_head nocb_gp_wq[2];
/* Place for rcu_nocb_kthread() to wait GP. */
@@ -153,7 +154,7 @@ struct rcu_data {
unsigned long gp_seq; /* Track rsp->gp_seq counter. */
unsigned long gp_seq_needed; /* Track furthest future GP request. */
union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
- bool core_needs_qs; /* Core waits for quiesc state. */
+ bool core_needs_qs; /* Core waits for quiescent state. */
bool beenonline; /* CPU online at least once. */
bool gpwrap; /* Possible ->gp_seq wrap. */
bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
@@ -218,7 +219,6 @@ struct rcu_data {
/* The following fields are used by GP kthread, hence own cacheline. */
raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
- struct timer_list nocb_bypass_timer; /* Force nocb_bypass flush. */
u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */
u8 nocb_gp_bypass; /* Found a bypass on last scan? */
u8 nocb_gp_gp; /* GP to wait for on last scan? */
@@ -257,10 +257,10 @@ struct rcu_data {
};
/* Values for nocb_defer_wakeup field in struct rcu_data. */
-#define RCU_NOCB_WAKE_OFF -1
#define RCU_NOCB_WAKE_NOT 0
-#define RCU_NOCB_WAKE 1
-#define RCU_NOCB_WAKE_FORCE 2
+#define RCU_NOCB_WAKE_BYPASS 1
+#define RCU_NOCB_WAKE 2
+#define RCU_NOCB_WAKE_FORCE 3
#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
/* For jiffies_till_first_fqs and */
@@ -417,8 +417,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
static bool rcu_is_callbacks_kthread(void);
static void rcu_cpu_kthread_setup(unsigned int cpu);
+static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);
static void __init rcu_spawn_boost_kthreads(void);
-static void rcu_prepare_kthreads(int cpu);
static void rcu_cleanup_after_idle(void);
static void rcu_prepare_for_idle(void);
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
@@ -434,7 +434,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
bool *was_alldone, unsigned long flags);
static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
unsigned long flags);
-static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
static void rcu_spawn_cpu_nocb_kthread(int cpu);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 4d6962048c30..de1dc3bb7f70 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -33,10 +33,6 @@ static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
return false;
}
-static inline bool rcu_running_nocb_timer(struct rcu_data *rdp)
-{
- return (timer_curr_running(&rdp->nocb_timer) && !in_irq());
-}
#else
static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
{
@@ -48,11 +44,6 @@ static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
return false;
}
-static inline bool rcu_running_nocb_timer(struct rcu_data *rdp)
-{
- return false;
-}
-
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
@@ -72,8 +63,7 @@ static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
rcu_lockdep_is_held_nocb(rdp) ||
(rdp == this_cpu_ptr(&rcu_data) &&
!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible())) ||
- rcu_current_is_nocb_kthread(rdp) ||
- rcu_running_nocb_timer(rdp)),
+ rcu_current_is_nocb_kthread(rdp)),
"Unsafe read of RCU_NOCB offloaded state"
);
@@ -1098,6 +1088,7 @@ static int rcu_boost(struct rcu_node *rnp)
/* Lock only for side effect: boosts task t's priority. */
rt_mutex_lock(&rnp->boost_mtx);
rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
+ rnp->n_boosts++;
return READ_ONCE(rnp->exp_tasks) != NULL ||
READ_ONCE(rnp->boost_tasks) != NULL;
@@ -1197,22 +1188,16 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
*/
static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
{
- int rnp_index = rnp - rcu_get_root();
unsigned long flags;
+ int rnp_index = rnp - rcu_get_root();
struct sched_param sp;
struct task_struct *t;
- if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
- return;
-
- if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
+ if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
return;
rcu_state.boost = 1;
- if (rnp->boost_kthread_task != NULL)
- return;
-
t = kthread_create(rcu_boost_kthread, (void *)rnp,
"rcub/%d", rnp_index);
if (WARN_ON_ONCE(IS_ERR(t)))
@@ -1264,17 +1249,8 @@ static void __init rcu_spawn_boost_kthreads(void)
struct rcu_node *rnp;
rcu_for_each_leaf_node(rnp)
- rcu_spawn_one_boost_kthread(rnp);
-}
-
-static void rcu_prepare_kthreads(int cpu)
-{
- struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
- struct rcu_node *rnp = rdp->mynode;
-
- /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
- if (rcu_scheduler_fully_active)
- rcu_spawn_one_boost_kthread(rnp);
+ if (rcu_rnp_online_cpus(rnp))
+ rcu_spawn_one_boost_kthread(rnp);
}
#else /* #ifdef CONFIG_RCU_BOOST */
@@ -1294,15 +1270,15 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{
}
-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
{
}
-static void __init rcu_spawn_boost_kthreads(void)
+static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
{
}
-static void rcu_prepare_kthreads(int cpu)
+static void __init rcu_spawn_boost_kthreads(void)
{
}
@@ -1535,13 +1511,10 @@ static void rcu_cleanup_after_idle(void)
static int __init rcu_nocb_setup(char *str)
{
alloc_bootmem_cpumask_var(&rcu_nocb_mask);
- if (!strcasecmp(str, "all")) /* legacy: use "0-N" instead */
+ if (cpulist_parse(str, rcu_nocb_mask)) {
+ pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
cpumask_setall(rcu_nocb_mask);
- else
- if (cpulist_parse(str, rcu_nocb_mask)) {
- pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
- cpumask_setall(rcu_nocb_mask);
- }
+ }
return 1;
}
__setup("rcu_nocbs=", rcu_nocb_setup);
@@ -1692,56 +1665,78 @@ bool rcu_is_nocb_cpu(int cpu)
return false;
}
-/*
- * Kick the GP kthread for this NOCB group. Caller holds ->nocb_lock
- * and this function releases it.
- */
-static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
- unsigned long flags)
- __releases(rdp->nocb_lock)
+static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
+ struct rcu_data *rdp,
+ bool force, unsigned long flags)
+ __releases(rdp_gp->nocb_gp_lock)
{
bool needwake = false;
- struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
- lockdep_assert_held(&rdp->nocb_lock);
if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("AlreadyAwake"));
return false;
}
- if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) {
- WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
- del_timer(&rdp->nocb_timer);
+ if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
+ WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+ del_timer(&rdp_gp->nocb_timer);
}
- rcu_nocb_unlock_irqrestore(rdp, flags);
- raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+
if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
needwake = true;
- trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
}
raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
- if (needwake)
+ if (needwake) {
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
wake_up_process(rdp_gp->nocb_gp_kthread);
+ }
return needwake;
}
/*
+ * Kick the GP kthread for this NOCB group.
+ */
+static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
+{
+ unsigned long flags;
+ struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+ raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+ return __wake_nocb_gp(rdp_gp, rdp, force, flags);
+}
+
+/*
* Arrange to wake the GP kthread for this NOCB group at some future
* time when it is safe to do so.
*/
static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
const char *reason)
{
- if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_OFF)
- return;
- if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
- mod_timer(&rdp->nocb_timer, jiffies + 1);
- if (rdp->nocb_defer_wakeup < waketype)
- WRITE_ONCE(rdp->nocb_defer_wakeup, waketype);
+ unsigned long flags;
+ struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+ raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+
+ /*
+ * Bypass wakeup overrides previous deferments. In case
+ * of callback storm, no need to wake up too early.
+ */
+ if (waketype == RCU_NOCB_WAKE_BYPASS) {
+ mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
+ WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
+ } else {
+ if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
+ mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
+ if (rdp_gp->nocb_defer_wakeup < waketype)
+ WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
+ }
+
+ raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
}
@@ -1940,7 +1935,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
}
/*
- * Awaken the no-CBs grace-period kthead if needed, either due to it
+ * Awaken the no-CBs grace-period kthread if needed, either due to it
* legitimately being asleep or due to overload conditions.
*
* If warranted, also wake up the kthread servicing this CPUs queues.
@@ -1968,13 +1963,14 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
rdp->qlen_last_fqs_check = len;
if (!irqs_disabled_flags(flags)) {
/* ... if queue was empty ... */
- wake_nocb_gp(rdp, false, flags);
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+ wake_nocb_gp(rdp, false);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("WakeEmpty"));
} else {
+ rcu_nocb_unlock_irqrestore(rdp, flags);
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
TPS("WakeEmptyIsDeferred"));
- rcu_nocb_unlock_irqrestore(rdp, flags);
}
} else if (len > rdp->qlen_last_fqs_check + qhimark) {
/* ... or if many callbacks queued. */
@@ -1989,10 +1985,14 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
smp_mb(); /* Enqueue before timer_pending(). */
if ((rdp->nocb_cb_sleep ||
!rcu_segcblist_ready_cbs(&rdp->cblist)) &&
- !timer_pending(&rdp->nocb_bypass_timer))
+ !timer_pending(&rdp->nocb_timer)) {
+ rcu_nocb_unlock_irqrestore(rdp, flags);
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
TPS("WakeOvfIsDeferred"));
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ } else {
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
+ }
} else {
rcu_nocb_unlock_irqrestore(rdp, flags);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
@@ -2000,18 +2000,6 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
return;
}
-/* Wake up the no-CBs GP kthread to flush ->nocb_bypass. */
-static void do_nocb_bypass_wakeup_timer(struct timer_list *t)
-{
- unsigned long flags;
- struct rcu_data *rdp = from_timer(rdp, t, nocb_bypass_timer);
-
- trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
- rcu_nocb_lock_irqsave(rdp, flags);
- smp_mb__after_spinlock(); /* Timer expire before wakeup. */
- __call_rcu_nocb_wake(rdp, true, flags);
-}
-
/*
* Check if we ignore this rdp.
*
@@ -2118,11 +2106,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
bypass = true;
}
rnp = rdp->mynode;
- if (bypass) { // Avoid race with first bypass CB.
- WRITE_ONCE(my_rdp->nocb_defer_wakeup,
- RCU_NOCB_WAKE_NOT);
- del_timer(&my_rdp->nocb_timer);
- }
+
// Advance callbacks if helpful and low contention.
needwake_gp = false;
if (!rcu_segcblist_restempty(&rdp->cblist,
@@ -2168,12 +2152,12 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
my_rdp->nocb_gp_bypass = bypass;
my_rdp->nocb_gp_gp = needwait_gp;
my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
+
if (bypass && !rcu_nocb_poll) {
// At least one child with non-empty ->nocb_bypass, so set
// timer in order to avoid stranding its callbacks.
- raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
- mod_timer(&my_rdp->nocb_bypass_timer, j + 2);
- raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
+ wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
+ TPS("WakeBypassIsDeferred"));
}
if (rcu_nocb_poll) {
/* Polling, so trace if first poll in the series. */
@@ -2197,8 +2181,10 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
}
if (!rcu_nocb_poll) {
raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
- if (bypass)
- del_timer(&my_rdp->nocb_bypass_timer);
+ if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
+ WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+ del_timer(&my_rdp->nocb_timer);
+ }
WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
}
@@ -2334,25 +2320,27 @@ static int rcu_nocb_cb_kthread(void *arg)
}
/* Is a deferred wakeup of rcu_nocb_kthread() required? */
-static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
{
- return READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT;
+ return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
}
/* Do a deferred wakeup of rcu_nocb_kthread(). */
-static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
+static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
+ struct rcu_data *rdp, int level,
+ unsigned long flags)
+ __releases(rdp_gp->nocb_gp_lock)
{
- unsigned long flags;
int ndw;
int ret;
- rcu_nocb_lock_irqsave(rdp, flags);
- if (!rcu_nocb_need_deferred_wakeup(rdp)) {
- rcu_nocb_unlock_irqrestore(rdp, flags);
+ if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
+ raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
return false;
}
- ndw = READ_ONCE(rdp->nocb_defer_wakeup);
- ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
+
+ ndw = rdp_gp->nocb_defer_wakeup;
+ ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
return ret;
@@ -2361,9 +2349,15 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
/* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
{
+ unsigned long flags;
struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
- do_nocb_deferred_wakeup_common(rdp);
+ WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
+
+ raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
+ smp_mb__after_spinlock(); /* Timer expire before wakeup. */
+ do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
}
/*
@@ -2373,9 +2367,14 @@ static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
*/
static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
{
- if (rcu_nocb_need_deferred_wakeup(rdp))
- return do_nocb_deferred_wakeup_common(rdp);
- return false;
+ unsigned long flags;
+ struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+ if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
+ return false;
+
+ raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+ return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
}
void rcu_nocb_flush_deferred_wakeup(void)
@@ -2443,17 +2442,15 @@ static long rcu_nocb_rdp_deoffload(void *arg)
swait_event_exclusive(rdp->nocb_state_wq,
!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
SEGCBLIST_KTHREAD_GP));
- rcu_nocb_lock_irqsave(rdp, flags);
- /* Make sure nocb timer won't stay around */
- WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_OFF);
- rcu_nocb_unlock_irqrestore(rdp, flags);
- del_timer_sync(&rdp->nocb_timer);
-
/*
- * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY with CB unlocked
- * and IRQs disabled but let's be paranoid.
+ * Lock one last time to acquire latest callback updates from kthreads
+ * so we can later handle callbacks locally without locking.
*/
rcu_nocb_lock_irqsave(rdp, flags);
+ /*
+ * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb
+ * lock is released but how about being paranoid for once?
+ */
rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
/*
* With SEGCBLIST_SOFTIRQ_ONLY, we can't use
@@ -2473,10 +2470,6 @@ int rcu_nocb_cpu_deoffload(int cpu)
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
int ret = 0;
- if (rdp == rdp->nocb_gp_rdp) {
- pr_info("Can't deoffload an rdp GP leader (yet)\n");
- return -EINVAL;
- }
mutex_lock(&rcu_state.barrier_mutex);
cpus_read_lock();
if (rcu_rdp_is_offloaded(rdp)) {
@@ -2517,8 +2510,7 @@ static long rcu_nocb_rdp_offload(void *arg)
* SEGCBLIST_SOFTIRQ_ONLY mode.
*/
raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
- /* Re-enable nocb timer */
- WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+
/*
* We didn't take the nocb lock while working on the
* rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode.
@@ -2626,7 +2618,6 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
raw_spin_lock_init(&rdp->nocb_bypass_lock);
raw_spin_lock_init(&rdp->nocb_gp_lock);
timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
- timer_setup(&rdp->nocb_bypass_timer, do_nocb_bypass_wakeup_timer, 0);
rcu_cblist_init(&rdp->nocb_bypass);
}
@@ -2785,13 +2776,12 @@ static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
{
struct rcu_node *rnp = rdp->mynode;
- pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
+ pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
rdp->cpu,
"kK"[!!rdp->nocb_gp_kthread],
"lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
"dD"[!!rdp->nocb_defer_wakeup],
"tT"[timer_pending(&rdp->nocb_timer)],
- "bB"[timer_pending(&rdp->nocb_bypass_timer)],
"sS"[!!rdp->nocb_gp_sleep],
".W"[swait_active(&rdp->nocb_gp_wq)],
".W"[swait_active(&rnp->nocb_gp_wq[0])],
@@ -2812,7 +2802,6 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
char bufr[20];
struct rcu_segcblist *rsclp = &rdp->cblist;
bool waslocked;
- bool wastimer;
bool wassleep;
if (rdp->nocb_gp_rdp == rdp)
@@ -2849,15 +2838,13 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
return;
waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
- wastimer = timer_pending(&rdp->nocb_bypass_timer);
wassleep = swait_active(&rdp->nocb_gp_wq);
- if (!rdp->nocb_gp_sleep && !waslocked && !wastimer && !wassleep)
- return; /* Nothing untowards. */
+ if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
+ return; /* Nothing untoward. */
- pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c%c %c\n",
+ pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
"lL"[waslocked],
"dD"[!!rdp->nocb_defer_wakeup],
- "tT"[wastimer],
"sS"[!!rdp->nocb_gp_sleep],
".W"[wassleep]);
}
@@ -2922,7 +2909,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{
}
-static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
{
return false;
}
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index acb2288063b5..6c76988cc019 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -314,6 +314,7 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
* tasks blocked within RCU read-side critical sections.
*/
static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
+ __releases(rnp->lock)
{
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return 0;
@@ -717,6 +718,63 @@ static void check_cpu_stall(struct rcu_data *rdp)
/*
+ * Check to see if a failure to end RCU priority inversion was due to
+ * a CPU not passing through a quiescent state. When this happens, there
+ * is nothing that RCU priority boosting can do to help, so we shouldn't
+ * count this as an RCU priority boosting failure. A return of true says
+ * RCU priority boosting is to blame, and false says otherwise. If false
+ * is returned, the first of the CPUs to blame is stored through cpup.
+ * If there was no CPU blocking the current grace period, but also nothing
+ * in need of being boosted, *cpup is set to -1. This can happen in case
+ * of vCPU preemption while the last CPU is reporting its quiscent state,
+ * for example.
+ *
+ * If cpup is NULL, then a lockless quick check is carried out, suitable
+ * for high-rate usage. On the other hand, if cpup is non-NULL, each
+ * rcu_node structure's ->lock is acquired, ruling out high-rate usage.
+ */
+bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
+{
+ bool atb = false;
+ int cpu;
+ unsigned long flags;
+ struct rcu_node *rnp;
+
+ rcu_for_each_leaf_node(rnp) {
+ if (!cpup) {
+ if (READ_ONCE(rnp->qsmask)) {
+ return false;
+ } else {
+ if (READ_ONCE(rnp->gp_tasks))
+ atb = true;
+ continue;
+ }
+ }
+ *cpup = -1;
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ if (rnp->gp_tasks)
+ atb = true;
+ if (!rnp->qsmask) {
+ // No CPUs without quiescent states for this rnp.
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ continue;
+ }
+ // Find the first holdout CPU.
+ for_each_leaf_node_possible_cpu(rnp, cpu) {
+ if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) {
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ *cpup = cpu;
+ return false;
+ }
+ }
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
+ // Can't blame CPUs, so must blame RCU priority boosting.
+ return atb;
+}
+EXPORT_SYMBOL_GPL(rcu_check_boost_fail);
+
+/*
* Show the state of the grace-period kthreads.
*/
void show_rcu_gp_kthreads(void)
@@ -726,6 +784,7 @@ void show_rcu_gp_kthreads(void)
unsigned long j;
unsigned long ja;
unsigned long jr;
+ unsigned long js;
unsigned long jw;
struct rcu_data *rdp;
struct rcu_node *rnp;
@@ -734,21 +793,30 @@ void show_rcu_gp_kthreads(void)
j = jiffies;
ja = j - data_race(rcu_state.gp_activity);
jr = j - data_race(rcu_state.gp_req_activity);
+ js = j - data_race(rcu_state.gp_start);
jw = j - data_race(rcu_state.gp_wake_time);
- pr_info("%s: wait state: %s(%d) ->state: %#x delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
+ pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
rcu_state.name, gp_state_getname(rcu_state.gp_state),
- rcu_state.gp_state, t ? t->__state : 0x1ffff,
- ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
+ rcu_state.gp_state, t ? t->__state : 0x1ffff, t ? t->rt_priority : 0xffU,
+ js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
(long)data_race(rcu_state.gp_seq),
(long)data_race(rcu_get_root()->gp_seq_needed),
+ data_race(rcu_state.gp_max),
data_race(rcu_state.gp_flags));
rcu_for_each_node_breadth_first(rnp) {
- if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
- READ_ONCE(rnp->gp_seq_needed)))
+ if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
+ !data_race(rnp->qsmask) && !data_race(rnp->boost_tasks) &&
+ !data_race(rnp->exp_tasks) && !data_race(rnp->gp_tasks))
continue;
- pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
- rnp->grplo, rnp->grphi, (long)data_race(rnp->gp_seq),
- (long)data_race(rnp->gp_seq_needed));
+ pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
+ rnp->grplo, rnp->grphi,
+ (long)data_race(rnp->gp_seq), (long)data_race(rnp->gp_seq_needed),
+ data_race(rnp->qsmask),
+ ".b"[!!data_race(rnp->boost_kthread_task)],
+ ".B"[!!data_race(rnp->boost_tasks)],
+ ".E"[!!data_race(rnp->exp_tasks)],
+ ".G"[!!data_race(rnp->gp_tasks)],
+ data_race(rnp->n_boosts));
if (!rcu_is_leaf_node(rnp))
continue;
for_each_leaf_node_possible_cpu(rnp, cpu) {
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index b95ae86c40a7..c21b38cc25e9 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -277,7 +277,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map);
noinstr int notrace debug_lockdep_rcu_enabled(void)
{
- return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
+ return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) &&
current->lockdep_recursion == 0;
}
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
@@ -524,6 +524,7 @@ static void test_callback(struct rcu_head *r)
}
DEFINE_STATIC_SRCU(early_srcu);
+static unsigned long early_srcu_cookie;
struct early_boot_kfree_rcu {
struct rcu_head rh;
@@ -536,8 +537,10 @@ static void early_boot_test_call_rcu(void)
struct early_boot_kfree_rcu *rhp;
call_rcu(&head, test_callback);
- if (IS_ENABLED(CONFIG_SRCU))
+ if (IS_ENABLED(CONFIG_SRCU)) {
+ early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu);
call_srcu(&early_srcu, &shead, test_callback);
+ }
rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
if (!WARN_ON_ONCE(!rhp))
kfree_rcu(rhp, rh);
@@ -563,6 +566,7 @@ static int rcu_verify_early_boot_tests(void)
if (IS_ENABLED(CONFIG_SRCU)) {
early_boot_test_counter++;
srcu_barrier(&early_srcu);
+ WARN_ON_ONCE(!poll_state_synchronize_srcu(&early_srcu, early_srcu_cookie));
}
}
if (rcu_self_test_counter != early_boot_test_counter) {
diff --git a/kernel/scftorture.c b/kernel/scftorture.c
index 2377cbb32474..29e8fc5d91a7 100644
--- a/kernel/scftorture.c
+++ b/kernel/scftorture.c
@@ -405,15 +405,15 @@ static int scftorture_invoker(void *arg)
VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp->cpu);
cpu = scfp->cpu % nr_cpu_ids;
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
+ WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(cpu)));
set_user_nice(current, MAX_NICE);
if (holdoff)
schedule_timeout_interruptible(holdoff * HZ);
- VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, smp_processor_id());
+ VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, raw_smp_processor_id());
// Make sure that the CPU is affinitized appropriately during testing.
- curcpu = smp_processor_id();
+ curcpu = raw_smp_processor_id();
WARN_ONCE(curcpu != scfp->cpu % nr_cpu_ids,
"%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n",
__func__, scfp->cpu, curcpu, nr_cpu_ids);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index cf16f8fda9a6..20ffcc044134 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1981,12 +1981,18 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
dequeue_task(rq, p, flags);
}
-/*
- * __normal_prio - return the priority that is based on the static prio
- */
-static inline int __normal_prio(struct task_struct *p)
+static inline int __normal_prio(int policy, int rt_prio, int nice)
{
- return p->static_prio;
+ int prio;
+
+ if (dl_policy(policy))
+ prio = MAX_DL_PRIO - 1;
+ else if (rt_policy(policy))
+ prio = MAX_RT_PRIO - 1 - rt_prio;
+ else
+ prio = NICE_TO_PRIO(nice);
+
+ return prio;
}
/*
@@ -1998,15 +2004,7 @@ static inline int __normal_prio(struct task_struct *p)
*/
static inline int normal_prio(struct task_struct *p)
{
- int prio;
-
- if (task_has_dl_policy(p))
- prio = MAX_DL_PRIO-1;
- else if (task_has_rt_policy(p))
- prio = MAX_RT_PRIO-1 - p->rt_priority;
- else
- prio = __normal_prio(p);
- return prio;
+ return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
}
/*
@@ -4099,7 +4097,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
} else if (PRIO_TO_NICE(p->static_prio) < 0)
p->static_prio = NICE_TO_PRIO(0);
- p->prio = p->normal_prio = __normal_prio(p);
+ p->prio = p->normal_prio = p->static_prio;
set_load_weight(p, false);
/*
@@ -6341,6 +6339,18 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag
}
EXPORT_SYMBOL(default_wake_function);
+static void __setscheduler_prio(struct task_struct *p, int prio)
+{
+ if (dl_prio(prio))
+ p->sched_class = &dl_sched_class;
+ else if (rt_prio(prio))
+ p->sched_class = &rt_sched_class;
+ else
+ p->sched_class = &fair_sched_class;
+
+ p->prio = prio;
+}
+
#ifdef CONFIG_RT_MUTEXES
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
@@ -6456,22 +6466,19 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
} else {
p->dl.pi_se = &p->dl;
}
- p->sched_class = &dl_sched_class;
} else if (rt_prio(prio)) {
if (dl_prio(oldprio))
p->dl.pi_se = &p->dl;
if (oldprio < prio)
queue_flag |= ENQUEUE_HEAD;
- p->sched_class = &rt_sched_class;
} else {
if (dl_prio(oldprio))
p->dl.pi_se = &p->dl;
if (rt_prio(oldprio))
p->rt.timeout = 0;
- p->sched_class = &fair_sched_class;
}
- p->prio = prio;
+ __setscheduler_prio(p, prio);
if (queued)
enqueue_task(rq, p, queue_flag);
@@ -6824,35 +6831,6 @@ static void __setscheduler_params(struct task_struct *p,
set_load_weight(p, true);
}
-/* Actually do priority change: must hold pi & rq lock. */
-static void __setscheduler(struct rq *rq, struct task_struct *p,
- const struct sched_attr *attr, bool keep_boost)
-{
- /*
- * If params can't change scheduling class changes aren't allowed
- * either.
- */
- if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)
- return;
-
- __setscheduler_params(p, attr);
-
- /*
- * Keep a potential priority boosting if called from
- * sched_setscheduler().
- */
- p->prio = normal_prio(p);
- if (keep_boost)
- p->prio = rt_effective_prio(p, p->prio);
-
- if (dl_prio(p->prio))
- p->sched_class = &dl_sched_class;
- else if (rt_prio(p->prio))
- p->sched_class = &rt_sched_class;
- else
- p->sched_class = &fair_sched_class;
-}
-
/*
* Check the target process has a UID that matches the current process's:
*/
@@ -6873,10 +6851,8 @@ static int __sched_setscheduler(struct task_struct *p,
const struct sched_attr *attr,
bool user, bool pi)
{
- int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
- MAX_RT_PRIO - 1 - attr->sched_priority;
- int retval, oldprio, oldpolicy = -1, queued, running;
- int new_effective_prio, policy = attr->sched_policy;
+ int oldpolicy = -1, policy = attr->sched_policy;
+ int retval, oldprio, newprio, queued, running;
const struct sched_class *prev_class;
struct callback_head *head;
struct rq_flags rf;
@@ -7074,6 +7050,7 @@ change:
p->sched_reset_on_fork = reset_on_fork;
oldprio = p->prio;
+ newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
if (pi) {
/*
* Take priority boosted tasks into account. If the new
@@ -7082,8 +7059,8 @@ change:
* the runqueue. This will be done when the task deboost
* itself.
*/
- new_effective_prio = rt_effective_prio(p, newprio);
- if (new_effective_prio == oldprio)
+ newprio = rt_effective_prio(p, newprio);
+ if (newprio == oldprio)
queue_flags &= ~DEQUEUE_MOVE;
}
@@ -7096,7 +7073,10 @@ change:
prev_class = p->sched_class;
- __setscheduler(rq, p, attr, pi);
+ if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
+ __setscheduler_params(p, attr);
+ __setscheduler_prio(p, newprio);
+ }
__setscheduler_uclamp(p, attr);
if (queued) {
@@ -7182,6 +7162,7 @@ int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
{
return __sched_setscheduler(p, attr, false, true);
}
+EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fb469b26b00a..44c452072a1b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3037,8 +3037,9 @@ enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
+ u32 divider = get_pelt_divider(&se->avg);
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
- sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
+ cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
}
#else
static inline void
@@ -5081,7 +5082,7 @@ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
{
struct hrtimer *refresh_timer = &cfs_b->period_timer;
- u64 remaining;
+ s64 remaining;
/* if the call-back is running a quota refresh is already occurring */
if (hrtimer_callback_running(refresh_timer))
@@ -5089,7 +5090,7 @@ static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
/* is a quota refresh about to occur? */
remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
- if (remaining < min_expire)
+ if (remaining < (s64)min_expire)
return 1;
return 0;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c80d42e9589b..14a41a243f7b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2818,20 +2818,27 @@ static __always_inline
unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
struct task_struct *p)
{
- unsigned long min_util;
- unsigned long max_util;
+ unsigned long min_util = 0;
+ unsigned long max_util = 0;
if (!static_branch_likely(&sched_uclamp_used))
return util;
- min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
- max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
-
if (p) {
- min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
- max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX));
+ min_util = uclamp_eff_value(p, UCLAMP_MIN);
+ max_util = uclamp_eff_value(p, UCLAMP_MAX);
+
+ /*
+ * Ignore last runnable task's max clamp, as this task will
+ * reset it. Similarly, no need to read the rq's min clamp.
+ */
+ if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
+ goto out;
}
+ min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
+ max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
+out:
/*
* Since CPU's {min,max}_util clamps are MAX aggregated considering
* RUNNABLE tasks with _different_ clamps, we can end up with an
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 057e17f3215d..6469eca8078c 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -602,7 +602,7 @@ static inline void seccomp_sync_threads(unsigned long flags)
smp_store_release(&thread->seccomp.filter,
caller->seccomp.filter);
atomic_set(&thread->seccomp.filter_count,
- atomic_read(&thread->seccomp.filter_count));
+ atomic_read(&caller->seccomp.filter_count));
/*
* Don't let an unprivileged task work around
diff --git a/kernel/signal.c b/kernel/signal.c
index de0920353d30..a3229add4455 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -426,18 +426,30 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
rcu_read_lock();
ucounts = task_ucounts(t);
sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
- if (sigpending == 1)
- ucounts = get_ucounts(ucounts);
+ switch (sigpending) {
+ case 1:
+ if (likely(get_ucounts(ucounts)))
+ break;
+ fallthrough;
+ case LONG_MAX:
+ /*
+ * we need to decrease the ucount in the userns tree on any
+ * failure to avoid counts leaking.
+ */
+ dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
+ rcu_read_unlock();
+ return NULL;
+ }
rcu_read_unlock();
- if (override_rlimit || (sigpending < LONG_MAX && sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
+ if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
} else {
print_dropped_signal(sig);
}
if (unlikely(q == NULL)) {
- if (ucounts && dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
+ if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
put_ucounts(ucounts);
} else {
INIT_LIST_HEAD(&q->list);
@@ -2830,6 +2842,8 @@ static void signal_delivered(struct ksignal *ksig, int stepping)
if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
sigaddset(&blocked, ksig->sig);
set_current_blocked(&blocked);
+ if (current->sas_ss_flags & SS_AUTODISARM)
+ sas_ss_reset(current);
tracehook_signal_handler(stepping);
}
@@ -4148,11 +4162,7 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)
int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
__put_user(t->sas_ss_flags, &uss->ss_flags) |
__put_user(t->sas_ss_size, &uss->ss_size);
- if (err)
- return err;
- if (t->sas_ss_flags & SS_AUTODISARM)
- sas_ss_reset(t);
- return 0;
+ return err;
}
#ifdef CONFIG_COMPAT
@@ -4207,11 +4217,7 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
&uss->ss_sp) |
__put_user(t->sas_ss_flags, &uss->ss_flags) |
__put_user(t->sas_ss_size, &uss->ss_size);
- if (err)
- return err;
- if (t->sas_ss_flags & SS_AUTODISARM)
- sas_ss_reset(t);
- return 0;
+ return err;
}
#endif
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index e4163042c4d6..cf6acab78538 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -47,7 +47,7 @@ void __init idle_thread_set_boot_cpu(void)
*
* Creates the thread if it does not exist.
*/
-static inline void idle_init(unsigned int cpu)
+static __always_inline void idle_init(unsigned int cpu)
{
struct task_struct *tsk = per_cpu(idle_threads, cpu);
diff --git a/kernel/static_call.c b/kernel/static_call.c
index 723fcc9d20db..43ba0b1e0edb 100644
--- a/kernel/static_call.c
+++ b/kernel/static_call.c
@@ -292,13 +292,15 @@ static int addr_conflict(struct static_call_site *site, void *start, void *end)
static int __static_call_text_reserved(struct static_call_site *iter_start,
struct static_call_site *iter_stop,
- void *start, void *end)
+ void *start, void *end, bool init)
{
struct static_call_site *iter = iter_start;
while (iter < iter_stop) {
- if (addr_conflict(iter, start, end))
- return 1;
+ if (init || !static_call_is_init(iter)) {
+ if (addr_conflict(iter, start, end))
+ return 1;
+ }
iter++;
}
@@ -324,7 +326,7 @@ static int __static_call_mod_text_reserved(void *start, void *end)
ret = __static_call_text_reserved(mod->static_call_sites,
mod->static_call_sites + mod->num_static_call_sites,
- start, end);
+ start, end, mod->state == MODULE_STATE_COMING);
module_put(mod);
@@ -459,8 +461,9 @@ static inline int __static_call_mod_text_reserved(void *start, void *end)
int static_call_text_reserved(void *start, void *end)
{
+ bool init = system_state < SYSTEM_RUNNING;
int ret = __static_call_text_reserved(__start_static_call_sites,
- __stop_static_call_sites, start, end);
+ __stop_static_call_sites, start, end, init);
if (ret)
return ret;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index dad4d994641e..30971b1dd4a9 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -358,6 +358,8 @@ COND_SYSCALL(pkey_mprotect);
COND_SYSCALL(pkey_alloc);
COND_SYSCALL(pkey_free);
+/* memfd_secret */
+COND_SYSCALL(memfd_secret);
/*
* Architecture specific weak syscall entries.
diff --git a/kernel/sysctl-test.c b/kernel/sysctl-test.c
index ccb78509f1a8..664ded05dd7a 100644
--- a/kernel/sysctl-test.c
+++ b/kernel/sysctl-test.c
@@ -49,7 +49,7 @@ static void sysctl_test_api_dointvec_null_tbl_data(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&null_data_table,
KUNIT_PROC_READ, buffer, &len,
&pos));
- KUNIT_EXPECT_EQ(test, (size_t)0, len);
+ KUNIT_EXPECT_EQ(test, 0, len);
/*
* See above.
@@ -58,7 +58,7 @@ static void sysctl_test_api_dointvec_null_tbl_data(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&null_data_table,
KUNIT_PROC_WRITE, buffer, &len,
&pos));
- KUNIT_EXPECT_EQ(test, (size_t)0, len);
+ KUNIT_EXPECT_EQ(test, 0, len);
}
/*
@@ -95,7 +95,7 @@ static void sysctl_test_api_dointvec_table_maxlen_unset(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&data_maxlen_unset_table,
KUNIT_PROC_READ, buffer, &len,
&pos));
- KUNIT_EXPECT_EQ(test, (size_t)0, len);
+ KUNIT_EXPECT_EQ(test, 0, len);
/*
* See previous comment.
@@ -104,7 +104,7 @@ static void sysctl_test_api_dointvec_table_maxlen_unset(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&data_maxlen_unset_table,
KUNIT_PROC_WRITE, buffer, &len,
&pos));
- KUNIT_EXPECT_EQ(test, (size_t)0, len);
+ KUNIT_EXPECT_EQ(test, 0, len);
}
/*
@@ -135,11 +135,11 @@ static void sysctl_test_api_dointvec_table_len_is_zero(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer,
&len, &pos));
- KUNIT_EXPECT_EQ(test, (size_t)0, len);
+ KUNIT_EXPECT_EQ(test, 0, len);
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, buffer,
&len, &pos));
- KUNIT_EXPECT_EQ(test, (size_t)0, len);
+ KUNIT_EXPECT_EQ(test, 0, len);
}
/*
@@ -174,7 +174,7 @@ static void sysctl_test_api_dointvec_table_read_but_position_set(
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer,
&len, &pos));
- KUNIT_EXPECT_EQ(test, (size_t)0, len);
+ KUNIT_EXPECT_EQ(test, 0, len);
}
/*
@@ -203,7 +203,7 @@ static void sysctl_test_dointvec_read_happy_single_positive(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ,
user_buffer, &len, &pos));
- KUNIT_ASSERT_EQ(test, (size_t)3, len);
+ KUNIT_ASSERT_EQ(test, 3, len);
buffer[len] = '\0';
/* And we read 13 back out. */
KUNIT_EXPECT_STREQ(test, "13\n", buffer);
@@ -233,9 +233,9 @@ static void sysctl_test_dointvec_read_happy_single_negative(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ,
user_buffer, &len, &pos));
- KUNIT_ASSERT_EQ(test, (size_t)4, len);
+ KUNIT_ASSERT_EQ(test, 4, len);
buffer[len] = '\0';
- KUNIT_EXPECT_STREQ(test, "-16\n", (char *)buffer);
+ KUNIT_EXPECT_STREQ(test, "-16\n", buffer);
}
/*
@@ -265,7 +265,7 @@ static void sysctl_test_dointvec_write_happy_single_positive(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE,
user_buffer, &len, &pos));
KUNIT_EXPECT_EQ(test, sizeof(input) - 1, len);
- KUNIT_EXPECT_EQ(test, sizeof(input) - 1, (size_t)pos);
+ KUNIT_EXPECT_EQ(test, sizeof(input) - 1, pos);
KUNIT_EXPECT_EQ(test, 9, *((int *)table.data));
}
@@ -295,7 +295,7 @@ static void sysctl_test_dointvec_write_happy_single_negative(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE,
user_buffer, &len, &pos));
KUNIT_EXPECT_EQ(test, sizeof(input) - 1, len);
- KUNIT_EXPECT_EQ(test, sizeof(input) - 1, (size_t)pos);
+ KUNIT_EXPECT_EQ(test, sizeof(input) - 1, pos);
KUNIT_EXPECT_EQ(test, -9, *((int *)table.data));
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e1aa24e1545c..272f4a272f8c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -27,6 +27,7 @@
#include <linux/sysctl.h>
#include <linux/bitmap.h>
#include <linux/signal.h>
+#include <linux/panic.h>
#include <linux/printk.h>
#include <linux/proc_fs.h>
#include <linux/security.h>
@@ -1495,7 +1496,6 @@ int proc_do_large_bitmap(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int err = 0;
- bool first = 1;
size_t left = *lenp;
unsigned long bitmap_len = table->maxlen;
unsigned long *bitmap = *(unsigned long **) table->data;
@@ -1580,12 +1580,12 @@ int proc_do_large_bitmap(struct ctl_table *table, int write,
}
bitmap_set(tmp_bitmap, val_a, val_b - val_a + 1);
- first = 0;
proc_skip_char(&p, &left, '\n');
}
left += skipped;
} else {
unsigned long bit_a, bit_b = 0;
+ bool first = 1;
while (left) {
bit_a = find_next_bit(bitmap, bitmap_len, bit_b);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 29a5e54e6e10..517be7fd175e 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -991,6 +991,11 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
if (!p)
goto out;
+ /* Protect timer list r/w in arm_timer() */
+ sighand = lock_task_sighand(p, &flags);
+ if (unlikely(sighand == NULL))
+ goto out;
+
/*
* Fetch the current sample and update the timer's expiry time.
*/
@@ -1001,11 +1006,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
bump_cpu_timer(timer, now);
- /* Protect timer list r/w in arm_timer() */
- sighand = lock_task_sighand(p, &flags);
- if (unlikely(sighand == NULL))
- goto out;
-
/*
* Now re-arm for the new expiry time.
*/
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 467087d7bdb6..e3d2c23c413d 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -207,6 +207,7 @@ struct timer_base {
unsigned int cpu;
bool next_expiry_recalc;
bool is_idle;
+ bool timers_pending;
DECLARE_BITMAP(pending_map, WHEEL_SIZE);
struct hlist_head vectors[WHEEL_SIZE];
} ____cacheline_aligned;
@@ -595,6 +596,7 @@ static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
* can reevaluate the wheel:
*/
base->next_expiry = bucket_expiry;
+ base->timers_pending = true;
base->next_expiry_recalc = false;
trigger_dyntick_cpu(base, timer);
}
@@ -1237,20 +1239,6 @@ int try_to_del_timer_sync(struct timer_list *timer)
}
EXPORT_SYMBOL(try_to_del_timer_sync);
-bool timer_curr_running(struct timer_list *timer)
-{
- int i;
-
- for (i = 0; i < NR_BASES; i++) {
- struct timer_base *base = this_cpu_ptr(&timer_bases[i]);
-
- if (base->running_timer == timer)
- return true;
- }
-
- return false;
-}
-
#ifdef CONFIG_PREEMPT_RT
static __init void timer_base_init_expiry_lock(struct timer_base *base)
{
@@ -1277,8 +1265,10 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
static void timer_sync_wait_running(struct timer_base *base)
{
if (atomic_read(&base->timer_waiters)) {
+ raw_spin_unlock_irq(&base->lock);
spin_unlock(&base->expiry_lock);
spin_lock(&base->expiry_lock);
+ raw_spin_lock_irq(&base->lock);
}
}
@@ -1469,14 +1459,14 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
if (timer->flags & TIMER_IRQSAFE) {
raw_spin_unlock(&base->lock);
call_timer_fn(timer, fn, baseclk);
- base->running_timer = NULL;
raw_spin_lock(&base->lock);
+ base->running_timer = NULL;
} else {
raw_spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn, baseclk);
+ raw_spin_lock_irq(&base->lock);
base->running_timer = NULL;
timer_sync_wait_running(base);
- raw_spin_lock_irq(&base->lock);
}
}
}
@@ -1596,6 +1586,7 @@ static unsigned long __next_timer_interrupt(struct timer_base *base)
}
base->next_expiry_recalc = false;
+ base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
return next;
}
@@ -1647,7 +1638,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
u64 expires = KTIME_MAX;
unsigned long nextevt;
- bool is_max_delta;
/*
* Pretend that there is no timer pending if the cpu is offline.
@@ -1660,7 +1650,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
if (base->next_expiry_recalc)
base->next_expiry = __next_timer_interrupt(base);
nextevt = base->next_expiry;
- is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
/*
* We have a fresh next event. Check whether we can forward the
@@ -1678,7 +1667,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
expires = basem;
base->is_idle = false;
} else {
- if (!is_max_delta)
+ if (base->timers_pending)
expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
/*
* If we expect to sleep more than a tick, mark the base idle.
@@ -1961,6 +1950,7 @@ int timers_prepare_cpu(unsigned int cpu)
base = per_cpu_ptr(&timer_bases[b], cpu);
base->clk = jiffies;
base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
+ base->timers_pending = false;
base->is_idle = false;
}
return 0;
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 7fa82778c3e6..3ee23f4d437f 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -219,6 +219,11 @@ config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
depends on DYNAMIC_FTRACE_WITH_REGS
depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+config DYNAMIC_FTRACE_WITH_ARGS
+ def_bool y
+ depends on DYNAMIC_FTRACE
+ depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
config FUNCTION_PROFILER
bool "Kernel function profiler"
depends on FUNCTION_TRACER
@@ -356,6 +361,68 @@ config HWLAT_TRACER
file. Every time a latency is greater than tracing_thresh, it will
be recorded into the ring buffer.
+config OSNOISE_TRACER
+ bool "OS Noise tracer"
+ select GENERIC_TRACER
+ help
+ In the context of high-performance computing (HPC), the Operating
+ System Noise (osnoise) refers to the interference experienced by an
+ application due to activities inside the operating system. In the
+ context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread
+ can cause noise to the system. Moreover, hardware-related jobs can
+ also cause noise, for example, via SMIs.
+
+ The osnoise tracer leverages the hwlat_detector by running a similar
+ loop with preemption, SoftIRQs and IRQs enabled, thus allowing all
+ the sources of osnoise during its execution. The osnoise tracer takes
+ note of the entry and exit point of any source of interferences,
+ increasing a per-cpu interference counter. It saves an interference
+ counter for each source of interference. The interference counter for
+ NMI, IRQs, SoftIRQs, and threads is increased anytime the tool
+ observes these interferences' entry events. When a noise happens
+ without any interference from the operating system level, the
+ hardware noise counter increases, pointing to a hardware-related
+ noise. In this way, osnoise can account for any source of
+ interference. At the end of the period, the osnoise tracer prints
+ the sum of all noise, the max single noise, the percentage of CPU
+ available for the thread, and the counters for the noise sources.
+
+ In addition to the tracer, a set of tracepoints were added to
+ facilitate the identification of the osnoise source.
+
+ The output will appear in the trace and trace_pipe files.
+
+ To enable this tracer, echo in "osnoise" into the current_tracer
+ file.
+
+config TIMERLAT_TRACER
+ bool "Timerlat tracer"
+ select OSNOISE_TRACER
+ select GENERIC_TRACER
+ help
+ The timerlat tracer aims to help the preemptive kernel developers
+ to find sources of wakeup latencies of real-time threads.
+
+ The tracer creates a per-cpu kernel thread with real-time priority.
+ The tracer thread sets a periodic timer to wakeup itself, and goes
+ to sleep waiting for the timer to fire. At the wakeup, the thread
+ then computes a wakeup latency value as the difference between
+ the current time and the absolute time that the timer was set
+ to expire.
+
+ The tracer prints two lines at every activation. The first is the
+ timer latency observed at the hardirq context before the
+ activation of the thread. The second is the timer latency observed
+ by the thread, which is the same level that cyclictest reports. The
+ ACTIVATION ID field serves to relate the irq execution to its
+ respective thread execution.
+
+ The tracer is build on top of osnoise tracer, and the osnoise:
+ events can be used to trace the source of interference from NMI,
+ IRQs and other threads. It also enables the capture of the
+ stacktrace at the IRQ context, which helps to identify the code
+ path that can cause thread delay.
+
config MMIOTRACE
bool "Memory mapped IO tracing"
depends on HAVE_MMIOTRACE_SUPPORT && PCI
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index b28d3e5013cd..b1c47ccf4f73 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
+obj-$(CONFIG_OSNOISE_TRACER) += trace_osnoise.o
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 64bd2d84367f..fdd14072fc3b 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -990,28 +990,29 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_numa_node_id_proto;
case BPF_FUNC_perf_event_read:
return &bpf_perf_event_read_proto;
- case BPF_FUNC_probe_write_user:
- return bpf_get_probe_write_proto();
case BPF_FUNC_current_task_under_cgroup:
return &bpf_current_task_under_cgroup_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
+ case BPF_FUNC_probe_write_user:
+ return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
+ NULL : bpf_get_probe_write_proto();
case BPF_FUNC_probe_read_user:
return &bpf_probe_read_user_proto;
case BPF_FUNC_probe_read_kernel:
- return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
NULL : &bpf_probe_read_kernel_proto;
case BPF_FUNC_probe_read_user_str:
return &bpf_probe_read_user_str_proto;
case BPF_FUNC_probe_read_kernel_str:
- return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
NULL : &bpf_probe_read_kernel_str_proto;
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
case BPF_FUNC_probe_read:
- return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
NULL : &bpf_probe_read_compat_proto;
case BPF_FUNC_probe_read_str:
- return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
NULL : &bpf_probe_read_compat_str_proto;
#endif
#ifdef CONFIG_CGROUPS
@@ -1842,7 +1843,8 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
if (prog->aux->max_tp_access > btp->writable_size)
return -EINVAL;
- return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
+ return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
+ prog);
}
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 72ef4dccbcc4..7b180f61e6d3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4212,8 +4212,7 @@ static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
if (!func) /* warn? */
continue;
- list_del(&ftrace_mod->list);
- list_add(&ftrace_mod->list, &process_mods);
+ list_move(&ftrace_mod->list, &process_mods);
/* Use the newly allocated func, as it may be "*" */
kfree(ftrace_mod->func);
@@ -5986,7 +5985,8 @@ ftrace_graph_release(struct inode *inode, struct file *file)
* infrastructure to do the synchronization, thus we must do it
* ourselves.
*/
- synchronize_rcu_tasks_rude();
+ if (old_hash != EMPTY_HASH)
+ synchronize_rcu_tasks_rude();
free_ftrace_hash(old_hash);
}
@@ -7545,7 +7545,7 @@ int ftrace_is_dead(void)
*/
int register_ftrace_function(struct ftrace_ops *ops)
{
- int ret = -1;
+ int ret;
ftrace_ops_init(ops);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2c0ee6484990..e592d1df6f88 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3391,7 +3391,7 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
case RINGBUF_TYPE_PADDING:
if (event->time_delta == 1)
break;
- /* fall through */
+ fallthrough;
case RINGBUF_TYPE_DATA:
ts += event->time_delta;
break;
@@ -3880,10 +3880,30 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
if (unlikely(!head))
return true;
- return reader->read == rb_page_commit(reader) &&
- (commit == reader ||
- (commit == head &&
- head->read == rb_page_commit(commit)));
+ /* Reader should exhaust content in reader page */
+ if (reader->read != rb_page_commit(reader))
+ return false;
+
+ /*
+ * If writers are committing on the reader page, knowing all
+ * committed content has been read, the ring buffer is empty.
+ */
+ if (commit == reader)
+ return true;
+
+ /*
+ * If writers are committing on a page other than reader page
+ * and head page, there should always be content to read.
+ */
+ if (commit != head)
+ return false;
+
+ /*
+ * Writers are committing on the head page, we just need
+ * to care about there're committed data, and the reader will
+ * swap reader page with head page when it is to read data.
+ */
+ return rb_page_commit(commit) == 0;
}
/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d23a09d3eb37..a1adb29ef5c1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/init.h>
+#include <linux/panic_notifier.h>
#include <linux/poll.h>
#include <linux/nmi.h>
#include <linux/fs.h>
@@ -86,6 +87,7 @@ void __init disable_tracing_selftest(const char *reason)
/* Pipe tracepoints to printk */
struct trace_iterator *tracepoint_print_iter;
int tracepoint_printk;
+static bool tracepoint_printk_stop_on_boot __initdata;
static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
/* For tracers that don't implement custom flags */
@@ -196,12 +198,12 @@ __setup("ftrace=", set_cmdline_ftrace);
static int __init set_ftrace_dump_on_oops(char *str)
{
- if (*str++ != '=' || !*str) {
+ if (*str++ != '=' || !*str || !strcmp("1", str)) {
ftrace_dump_on_oops = DUMP_ALL;
return 1;
}
- if (!strcmp("orig_cpu", str)) {
+ if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
ftrace_dump_on_oops = DUMP_ORIG;
return 1;
}
@@ -256,6 +258,13 @@ static int __init set_tracepoint_printk(char *str)
}
__setup("tp_printk", set_tracepoint_printk);
+static int __init set_tracepoint_printk_stop(char *str)
+{
+ tracepoint_printk_stop_on_boot = true;
+ return 1;
+}
+__setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
+
unsigned long long ns2usecs(u64 nsec)
{
nsec += 500;
@@ -1682,8 +1691,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
unsigned long __read_mostly tracing_thresh;
static const struct file_operations tracing_max_lat_fops;
-#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
- defined(CONFIG_FSNOTIFY)
+#ifdef LATENCY_FS_NOTIFY
static struct workqueue_struct *fsnotify_wq;
@@ -2184,8 +2192,15 @@ void tracing_reset_all_online_cpus(void)
}
}
+/*
+ * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
+ * is the tgid last observed corresponding to pid=i.
+ */
static int *tgid_map;
+/* The maximum valid index into tgid_map. */
+static size_t tgid_map_max;
+
#define SAVED_CMDLINES_DEFAULT 128
#define NO_CMDLINE_MAP UINT_MAX
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
@@ -2458,24 +2473,41 @@ void trace_find_cmdline(int pid, char comm[])
preempt_enable();
}
+static int *trace_find_tgid_ptr(int pid)
+{
+ /*
+ * Pairs with the smp_store_release in set_tracer_flag() to ensure that
+ * if we observe a non-NULL tgid_map then we also observe the correct
+ * tgid_map_max.
+ */
+ int *map = smp_load_acquire(&tgid_map);
+
+ if (unlikely(!map || pid > tgid_map_max))
+ return NULL;
+
+ return &map[pid];
+}
+
int trace_find_tgid(int pid)
{
- if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
- return 0;
+ int *ptr = trace_find_tgid_ptr(pid);
- return tgid_map[pid];
+ return ptr ? *ptr : 0;
}
static int trace_save_tgid(struct task_struct *tsk)
{
+ int *ptr;
+
/* treat recording of idle task as a success */
if (!tsk->pid)
return 1;
- if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
+ ptr = trace_find_tgid_ptr(tsk->pid);
+ if (!ptr)
return 0;
- tgid_map[tsk->pid] = tsk->tgid;
+ *ptr = tsk->tgid;
return 1;
}
@@ -2729,9 +2761,45 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
if (!tr->no_filter_buffering_ref &&
(trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
(entry = this_cpu_read(trace_buffered_event))) {
- /* Try to use the per cpu buffer first */
+ /*
+ * Filtering is on, so try to use the per cpu buffer first.
+ * This buffer will simulate a ring_buffer_event,
+ * where the type_len is zero and the array[0] will
+ * hold the full length.
+ * (see include/linux/ring-buffer.h for details on
+ * how the ring_buffer_event is structured).
+ *
+ * Using a temp buffer during filtering and copying it
+ * on a matched filter is quicker than writing directly
+ * into the ring buffer and then discarding it when
+ * it doesn't match. That is because the discard
+ * requires several atomic operations to get right.
+ * Copying on match and doing nothing on a failed match
+ * is still quicker than no copy on match, but having
+ * to discard out of the ring buffer on a failed match.
+ */
+ int max_len = PAGE_SIZE - struct_size(entry, array, 1);
+
val = this_cpu_inc_return(trace_buffered_event_cnt);
- if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
+
+ /*
+ * Preemption is disabled, but interrupts and NMIs
+ * can still come in now. If that happens after
+ * the above increment, then it will have to go
+ * back to the old method of allocating the event
+ * on the ring buffer, and if the filter fails, it
+ * will have to call ring_buffer_discard_commit()
+ * to remove it.
+ *
+ * Need to also check the unlikely case that the
+ * length is bigger than the temp buffer size.
+ * If that happens, then the reserve is pretty much
+ * guaranteed to fail, as the ring buffer currently
+ * only allows events less than a page. But that may
+ * change in the future, so let the ring buffer reserve
+ * handle the failure in that case.
+ */
+ if (val == 1 && likely(len <= max_len)) {
trace_event_setup(entry, type, trace_ctx);
entry->array[0] = len;
return entry;
@@ -2829,14 +2897,26 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
{
+ enum event_trigger_type tt = ETT_NONE;
+ struct trace_event_file *file = fbuffer->trace_file;
+
+ if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
+ fbuffer->entry, &tt))
+ goto discard;
+
if (static_key_false(&tracepoint_printk_key.key))
output_printk(fbuffer);
if (static_branch_unlikely(&trace_event_exports_enabled))
ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
- event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
- fbuffer->event, fbuffer->entry,
- fbuffer->trace_ctx, fbuffer->regs);
+
+ trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
+ fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
+
+discard:
+ if (tt)
+ event_triggers_post_call(file, tt);
+
}
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
@@ -5171,6 +5251,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
{
+ int *map;
+
if ((mask == TRACE_ITER_RECORD_TGID) ||
(mask == TRACE_ITER_RECORD_CMD))
lockdep_assert_held(&event_mutex);
@@ -5193,10 +5275,19 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
trace_event_enable_cmd_record(enabled);
if (mask == TRACE_ITER_RECORD_TGID) {
- if (!tgid_map)
- tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
- sizeof(*tgid_map),
- GFP_KERNEL);
+ if (!tgid_map) {
+ tgid_map_max = pid_max;
+ map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
+ GFP_KERNEL);
+
+ /*
+ * Pairs with smp_load_acquire() in
+ * trace_find_tgid_ptr() to ensure that if it observes
+ * the tgid_map we just allocated then it also observes
+ * the corresponding tgid_map_max value.
+ */
+ smp_store_release(&tgid_map, map);
+ }
if (!tgid_map) {
tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
return -ENOMEM;
@@ -5530,6 +5621,10 @@ static const char readme_msg[] =
"\t [:name=histname1]\n"
"\t [:<handler>.<action>]\n"
"\t [if <filter>]\n\n"
+ "\t Note, special fields can be used as well:\n"
+ "\t common_timestamp - to record current timestamp\n"
+ "\t common_cpu - to record the CPU the event happened on\n"
+ "\n"
"\t When a matching event is hit, an entry is added to a hash\n"
"\t table using the key(s) and value(s) named, and the value of a\n"
"\t sum called 'hitcount' is incremented. Keys and values\n"
@@ -5608,37 +5703,16 @@ static const struct file_operations tracing_readme_fops = {
static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
{
- int *ptr = v;
-
- if (*pos || m->count)
- ptr++;
-
- (*pos)++;
+ int pid = ++(*pos);
- for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
- if (trace_find_tgid(*ptr))
- return ptr;
- }
-
- return NULL;
+ return trace_find_tgid_ptr(pid);
}
static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
{
- void *v;
- loff_t l = 0;
-
- if (!tgid_map)
- return NULL;
-
- v = &tgid_map[0];
- while (l <= *pos) {
- v = saved_tgids_next(m, v, &l);
- if (!v)
- return NULL;
- }
+ int pid = *pos;
- return v;
+ return trace_find_tgid_ptr(pid);
}
static void saved_tgids_stop(struct seq_file *m, void *v)
@@ -5647,9 +5721,14 @@ static void saved_tgids_stop(struct seq_file *m, void *v)
static int saved_tgids_show(struct seq_file *m, void *v)
{
- int pid = (int *)v - tgid_map;
+ int *entry = (int *)v;
+ int pid = entry - tgid_map;
+ int tgid = *entry;
+
+ if (tgid == 0)
+ return SEQ_SKIP;
- seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
+ seq_printf(m, "%d %d\n", pid, tgid);
return 0;
}
@@ -6134,7 +6213,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
unsigned long size, int cpu_id)
{
- int ret = size;
+ int ret;
mutex_lock(&trace_types_lock);
@@ -7528,6 +7607,91 @@ static const struct file_operations snapshot_raw_fops = {
#endif /* CONFIG_TRACER_SNAPSHOT */
+/*
+ * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
+ * @filp: The active open file structure
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function implements the write interface for a struct trace_min_max_param.
+ * The filp->private_data must point to a trace_min_max_param structure that
+ * defines where to write the value, the min and the max acceptable values,
+ * and a lock to protect the write.
+ */
+static ssize_t
+trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_min_max_param *param = filp->private_data;
+ u64 val;
+ int err;
+
+ if (!param)
+ return -EFAULT;
+
+ err = kstrtoull_from_user(ubuf, cnt, 10, &val);
+ if (err)
+ return err;
+
+ if (param->lock)
+ mutex_lock(param->lock);
+
+ if (param->min && val < *param->min)
+ err = -EINVAL;
+
+ if (param->max && val > *param->max)
+ err = -EINVAL;
+
+ if (!err)
+ *param->val = val;
+
+ if (param->lock)
+ mutex_unlock(param->lock);
+
+ if (err)
+ return err;
+
+ return cnt;
+}
+
+/*
+ * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
+ * @filp: The active open file structure
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function implements the read interface for a struct trace_min_max_param.
+ * The filp->private_data must point to a trace_min_max_param struct with valid
+ * data.
+ */
+static ssize_t
+trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_min_max_param *param = filp->private_data;
+ char buf[U64_STR_SIZE];
+ int len;
+ u64 val;
+
+ if (!param)
+ return -EFAULT;
+
+ val = *param->val;
+
+ if (cnt > sizeof(buf))
+ cnt = sizeof(buf);
+
+ len = snprintf(buf, sizeof(buf), "%llu\n", val);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+const struct file_operations trace_min_max_fops = {
+ .open = tracing_open_generic,
+ .read = trace_min_max_read,
+ .write = trace_min_max_write,
+};
+
#define TRACING_LOG_ERRS_MAX 8
#define TRACING_LOG_LOC_MAX 128
@@ -8983,8 +9147,10 @@ static int trace_array_create_dir(struct trace_array *tr)
return -EINVAL;
ret = event_trace_add_tracer(tr->dir, tr);
- if (ret)
+ if (ret) {
tracefs_remove(tr->dir);
+ return ret;
+ }
init_tracer_tracefs(tr, tr->dir);
__update_tracer_options(tr);
@@ -9531,6 +9697,8 @@ static __init int tracer_init_tracefs(void)
return 0;
}
+fs_initcall(tracer_init_tracefs);
+
static int trace_panic_handler(struct notifier_block *this,
unsigned long event, void *unused)
{
@@ -9951,7 +10119,7 @@ void __init trace_init(void)
trace_event_init();
}
-__init static int clear_boot_tracer(void)
+__init static void clear_boot_tracer(void)
{
/*
* The default tracer at boot buffer is an init section.
@@ -9961,26 +10129,21 @@ __init static int clear_boot_tracer(void)
* about to be freed.
*/
if (!default_bootup_tracer)
- return 0;
+ return;
printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
default_bootup_tracer);
default_bootup_tracer = NULL;
-
- return 0;
}
-fs_initcall(tracer_init_tracefs);
-late_initcall_sync(clear_boot_tracer);
-
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-__init static int tracing_set_default_clock(void)
+__init static void tracing_set_default_clock(void)
{
/* sched_clock_stable() is determined in late_initcall */
if (!trace_boot_clock && !sched_clock_stable()) {
if (security_locked_down(LOCKDOWN_TRACEFS)) {
pr_warn("Can not set tracing clock due to lockdown\n");
- return -EPERM;
+ return;
}
printk(KERN_WARNING
@@ -9990,8 +10153,21 @@ __init static int tracing_set_default_clock(void)
"on the kernel command line\n");
tracing_set_clock(&global_trace, "global");
}
+}
+#else
+static inline void tracing_set_default_clock(void) { }
+#endif
+__init static int late_trace_init(void)
+{
+ if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
+ static_key_disable(&tracepoint_printk_key.key);
+ tracepoint_printk = 0;
+ }
+
+ tracing_set_default_clock();
+ clear_boot_tracer();
return 0;
}
-late_initcall_sync(tracing_set_default_clock);
-#endif
+
+late_initcall_sync(late_trace_init);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d5d8c088a55d..4a0e693000c6 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -45,6 +45,8 @@ enum trace_type {
TRACE_BLK,
TRACE_BPUTS,
TRACE_HWLAT,
+ TRACE_OSNOISE,
+ TRACE_TIMERLAT,
TRACE_RAW_DATA,
TRACE_FUNC_REPEATS,
@@ -290,7 +292,8 @@ struct trace_array {
struct array_buffer max_buffer;
bool allocated_snapshot;
#endif
-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
+#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
+ || defined(CONFIG_OSNOISE_TRACER)
unsigned long max_latency;
#ifdef CONFIG_FSNOTIFY
struct dentry *d_max_latency;
@@ -438,6 +441,8 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
+ IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
+ IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
TRACE_MMIO_RW); \
@@ -668,15 +673,15 @@ void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu);
#endif /* CONFIG_TRACER_MAX_TRACE */
-#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
- defined(CONFIG_FSNOTIFY)
+#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
+ || defined(CONFIG_OSNOISE_TRACER)) && defined(CONFIG_FSNOTIFY)
+#define LATENCY_FS_NOTIFY
+#endif
+#ifdef LATENCY_FS_NOTIFY
void latency_fsnotify(struct trace_array *tr);
-
#else
-
static inline void latency_fsnotify(struct trace_array *tr) { }
-
#endif
#ifdef CONFIG_STACKTRACE
@@ -1384,38 +1389,6 @@ event_trigger_unlock_commit(struct trace_event_file *file,
event_triggers_post_call(file, tt);
}
-/**
- * event_trigger_unlock_commit_regs - handle triggers and finish event commit
- * @file: The file pointer associated with the event
- * @buffer: The ring buffer that the event is being written to
- * @event: The event meta data in the ring buffer
- * @entry: The event itself
- * @trace_ctx: The tracing context flags.
- *
- * This is a helper function to handle triggers that require data
- * from the event itself. It also tests the event against filters and
- * if the event is soft disabled and should be discarded.
- *
- * Same as event_trigger_unlock_commit() but calls
- * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
- */
-static inline void
-event_trigger_unlock_commit_regs(struct trace_event_file *file,
- struct trace_buffer *buffer,
- struct ring_buffer_event *event,
- void *entry, unsigned int trace_ctx,
- struct pt_regs *regs)
-{
- enum event_trigger_type tt = ETT_NONE;
-
- if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit_regs(file->tr, buffer, event,
- trace_ctx, regs);
-
- if (tt)
- event_triggers_post_call(file, tt);
-}
-
#define FILTER_PRED_INVALID ((unsigned short)-1)
#define FILTER_PRED_IS_RIGHT (1 << 15)
#define FILTER_PRED_FOLD (1 << 15)
@@ -1945,4 +1918,22 @@ static inline bool is_good_name(const char *name)
return true;
}
+/*
+ * This is a generic way to read and write a u64 value from a file in tracefs.
+ *
+ * The value is stored on the variable pointed by *val. The value needs
+ * to be at least *min and at most *max. The write is protected by an
+ * existing *lock.
+ */
+struct trace_min_max_param {
+ struct mutex *lock;
+ u64 *val;
+ u64 *min;
+ u64 *max;
+};
+
+#define U64_STR_SIZE 24 /* 20 digits max */
+
+extern const struct file_operations trace_min_max_fops;
+
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index a82f03f385f8..94ef2d099e32 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -225,14 +225,37 @@ static void __init
trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
{
struct xbc_node *gnode, *enode;
+ bool enable, enable_all = false;
+ const char *data;
node = xbc_node_find_child(node, "event");
if (!node)
return;
/* per-event key starts with "event.GROUP.EVENT" */
- xbc_node_for_each_child(node, gnode)
- xbc_node_for_each_child(gnode, enode)
+ xbc_node_for_each_child(node, gnode) {
+ data = xbc_node_get_data(gnode);
+ if (!strcmp(data, "enable")) {
+ enable_all = true;
+ continue;
+ }
+ enable = false;
+ xbc_node_for_each_child(gnode, enode) {
+ data = xbc_node_get_data(enode);
+ if (!strcmp(data, "enable")) {
+ enable = true;
+ continue;
+ }
trace_boot_init_one_event(tr, gnode, enode);
+ }
+ /* Event enablement must be done after event settings */
+ if (enable) {
+ data = xbc_node_get_data(gnode);
+ trace_array_set_clr_event(tr, data, NULL, true);
+ }
+ }
+ /* Ditto */
+ if (enable_all)
+ trace_array_set_clr_event(tr, NULL, NULL, true);
}
#else
#define trace_boot_enable_events(tr, node) do {} while (0)
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 251c819cf0c5..cd41e863b51c 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -360,3 +360,44 @@ FTRACE_ENTRY(func_repeats, func_repeats_entry,
__entry->count,
FUNC_REPEATS_GET_DELTA_TS(__entry))
);
+
+FTRACE_ENTRY(osnoise, osnoise_entry,
+
+ TRACE_OSNOISE,
+
+ F_STRUCT(
+ __field( u64, noise )
+ __field( u64, runtime )
+ __field( u64, max_sample )
+ __field( unsigned int, hw_count )
+ __field( unsigned int, nmi_count )
+ __field( unsigned int, irq_count )
+ __field( unsigned int, softirq_count )
+ __field( unsigned int, thread_count )
+ ),
+
+ F_printk("noise:%llu\tmax_sample:%llu\thw:%u\tnmi:%u\tirq:%u\tsoftirq:%u\tthread:%u\n",
+ __entry->noise,
+ __entry->max_sample,
+ __entry->hw_count,
+ __entry->nmi_count,
+ __entry->irq_count,
+ __entry->softirq_count,
+ __entry->thread_count)
+);
+
+FTRACE_ENTRY(timerlat, timerlat_entry,
+
+ TRACE_TIMERLAT,
+
+ F_STRUCT(
+ __field( unsigned int, seqnum )
+ __field( int, context )
+ __field( u64, timer_latency )
+ ),
+
+ F_printk("seq:%u\tcontext:%d\ttimer_latency:%llu\n",
+ __entry->seqnum,
+ __entry->context,
+ __entry->timer_latency)
+);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index c1abd63f1d6c..a48aa2a2875b 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -65,7 +65,8 @@
C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
C(EMPTY_SORT_FIELD, "Empty sort field"), \
C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
- C(INVALID_SORT_FIELD, "Sort field must be a key or a val"),
+ C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \
+ C(INVALID_STR_OPERAND, "String type can not be an operand in expression"),
#undef C
#define C(a, b) HIST_ERR_##a
@@ -1111,7 +1112,7 @@ static const char *hist_field_name(struct hist_field *field,
field->flags & HIST_FIELD_FL_ALIAS)
field_name = hist_field_name(field->operands[0], ++level);
else if (field->flags & HIST_FIELD_FL_CPU)
- field_name = "cpu";
+ field_name = "common_cpu";
else if (field->flags & HIST_FIELD_FL_EXPR ||
field->flags & HIST_FIELD_FL_VAR_REF) {
if (field->system) {
@@ -1555,6 +1556,13 @@ static int contains_operator(char *str)
switch (*op) {
case '-':
+ /*
+ * Unfortunately, the modifier ".sym-offset"
+ * can confuse things.
+ */
+ if (op - str >= 4 && !strncmp(op - 4, ".sym-offset", 11))
+ return FIELD_OP_NONE;
+
if (*str == '-')
field_op = FIELD_OP_UNARY_MINUS;
else
@@ -1682,7 +1690,9 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
if (WARN_ON_ONCE(!field))
goto out;
- if (is_string_field(field)) {
+ /* Pointers to strings are just pointers and dangerous to dereference */
+ if (is_string_field(field) &&
+ (field->filter_type != FILTER_PTR_STRING)) {
flags |= HIST_FIELD_FL_STRING;
hist_field->size = MAX_FILTER_STR_VAL;
@@ -1982,14 +1992,24 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
hist_data->enable_timestamps = true;
if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
hist_data->attrs->ts_in_usecs = true;
- } else if (strcmp(field_name, "cpu") == 0)
+ } else if (strcmp(field_name, "common_cpu") == 0)
*flags |= HIST_FIELD_FL_CPU;
else {
field = trace_find_event_field(file->event_call, field_name);
if (!field || !field->size) {
- hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
- field = ERR_PTR(-EINVAL);
- goto out;
+ /*
+ * For backward compatibility, if field_name
+ * was "cpu", then we treat this the same as
+ * common_cpu.
+ */
+ if (strcmp(field_name, "cpu") == 0) {
+ *flags |= HIST_FIELD_FL_CPU;
+ } else {
+ hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
+ errpos(field_name));
+ field = ERR_PTR(-EINVAL);
+ goto out;
+ }
}
}
out:
@@ -2137,6 +2157,13 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
ret = PTR_ERR(operand1);
goto free;
}
+ if (operand1->flags & HIST_FIELD_FL_STRING) {
+ /* String type can not be the operand of unary operator. */
+ hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
+ destroy_hist_field(operand1, 0);
+ ret = -EINVAL;
+ goto free;
+ }
expr->flags |= operand1->flags &
(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
@@ -2238,6 +2265,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
operand1 = NULL;
goto free;
}
+ if (operand1->flags & HIST_FIELD_FL_STRING) {
+ hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
+ ret = -EINVAL;
+ goto free;
+ }
/* rest of string could be another expression e.g. b+c in a+b+c */
operand_flags = 0;
@@ -2247,6 +2279,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
operand2 = NULL;
goto free;
}
+ if (operand2->flags & HIST_FIELD_FL_STRING) {
+ hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
+ ret = -EINVAL;
+ goto free;
+ }
ret = check_expr_operands(file->tr, operand1, operand2);
if (ret)
@@ -2268,6 +2305,10 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
expr->operands[0] = operand1;
expr->operands[1] = operand2;
+
+ /* The operand sizes should be the same, so just pick one */
+ expr->size = operand1->size;
+
expr->operator = field_op;
expr->name = expr_str(expr, 0);
expr->type = kstrdup(operand1->type, GFP_KERNEL);
@@ -2434,12 +2475,12 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
char *subsys_name, char *event_name, char *field_name)
{
struct trace_array *tr = target_hist_data->event_file->tr;
- struct hist_field *event_var = ERR_PTR(-EINVAL);
struct hist_trigger_data *hist_data;
unsigned int i, n, first = true;
struct field_var_hist *var_hist;
struct trace_event_file *file;
struct hist_field *key_field;
+ struct hist_field *event_var;
char *saved_filter;
char *cmd;
int ret;
@@ -3389,6 +3430,8 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data,
event = data->match_data.event;
}
+ if (!event)
+ goto free;
/*
* At this point, we're looking at a field on another
* event. Because we can't modify a hist trigger on
@@ -4488,8 +4531,6 @@ static inline void add_to_key(char *compound_key, void *key,
field = key_field->field;
if (field->filter_type == FILTER_DYN_STRING)
size = *(u32 *)(rec + field->offset) >> 16;
- else if (field->filter_type == FILTER_PTR_STRING)
- size = strlen(key);
else if (field->filter_type == FILTER_STATIC_STRING)
size = field->size;
@@ -5078,7 +5119,7 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
seq_printf(m, "%s=", hist_field->var.name);
if (hist_field->flags & HIST_FIELD_FL_CPU)
- seq_puts(m, "cpu");
+ seq_puts(m, "common_cpu");
else if (field_name) {
if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
hist_field->flags & HIST_FIELD_FL_ALIAS)
@@ -5232,6 +5273,7 @@ static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
cmd = hist_data->field_var_hists[i]->cmd;
ret = event_hist_trigger_func(&trigger_hist_cmd, file,
"!hist", "hist", cmd);
+ WARN_ON_ONCE(ret < 0);
}
}
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index 2ac75eb6aa86..9315fc03e303 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -893,15 +893,13 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields,
dyn_event_init(&event->devent, &synth_event_ops);
for (i = 0, j = 0; i < n_fields; i++) {
+ fields[i]->field_pos = i;
event->fields[i] = fields[i];
- if (fields[i]->is_dynamic) {
- event->dynamic_fields[j] = fields[i];
- event->dynamic_fields[j]->field_pos = i;
+ if (fields[i]->is_dynamic)
event->dynamic_fields[j++] = fields[i];
- event->n_dynamic_fields++;
- }
}
+ event->n_dynamic_fields = j;
event->n_fields = n_fields;
out:
return event;
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index b8bfa8505b7b..cf84d0f6583a 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -916,7 +916,8 @@ void unpause_named_trigger(struct event_trigger_data *data)
/**
* set_named_trigger_data - Associate common named trigger data
- * @data: The trigger data of a named trigger to unpause
+ * @data: The trigger data to associate
+ * @named_data: The common named trigger to be associated
*
* Named triggers are sets of triggers that share a common set of
* trigger data. The first named trigger registered with a given name
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 632ef88131a9..14f46aae1981 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -34,7 +34,7 @@
* Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
* Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
*
- * Includes useful feedback from Clark Williams <clark@redhat.com>
+ * Includes useful feedback from Clark Williams <williams@redhat.com>
*
*/
#include <linux/kthread.h>
@@ -54,20 +54,33 @@ static struct trace_array *hwlat_trace;
#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
#define DEFAULT_LAT_THRESHOLD 10 /* 10us */
-/* sampling thread*/
-static struct task_struct *hwlat_kthread;
-
static struct dentry *hwlat_sample_width; /* sample width us */
static struct dentry *hwlat_sample_window; /* sample window us */
+static struct dentry *hwlat_thread_mode; /* hwlat thread mode */
+
+enum {
+ MODE_NONE = 0,
+ MODE_ROUND_ROBIN,
+ MODE_PER_CPU,
+ MODE_MAX
+};
+static char *thread_mode_str[] = { "none", "round-robin", "per-cpu" };
/* Save the previous tracing_thresh value */
static unsigned long save_tracing_thresh;
-/* NMI timestamp counters */
-static u64 nmi_ts_start;
-static u64 nmi_total_ts;
-static int nmi_count;
-static int nmi_cpu;
+/* runtime kthread data */
+struct hwlat_kthread_data {
+ struct task_struct *kthread;
+ /* NMI timestamp counters */
+ u64 nmi_ts_start;
+ u64 nmi_total_ts;
+ int nmi_count;
+ int nmi_cpu;
+};
+
+struct hwlat_kthread_data hwlat_single_cpu_data;
+DEFINE_PER_CPU(struct hwlat_kthread_data, hwlat_per_cpu_data);
/* Tells NMIs to call back to the hwlat tracer to record timestamps */
bool trace_hwlat_callback_enabled;
@@ -96,11 +109,24 @@ static struct hwlat_data {
u64 sample_window; /* total sampling window (on+off) */
u64 sample_width; /* active sampling portion of window */
+ int thread_mode; /* thread mode */
+
} hwlat_data = {
.sample_window = DEFAULT_SAMPLE_WINDOW,
.sample_width = DEFAULT_SAMPLE_WIDTH,
+ .thread_mode = MODE_ROUND_ROBIN
};
+static struct hwlat_kthread_data *get_cpu_data(void)
+{
+ if (hwlat_data.thread_mode == MODE_PER_CPU)
+ return this_cpu_ptr(&hwlat_per_cpu_data);
+ else
+ return &hwlat_single_cpu_data;
+}
+
+static bool hwlat_busy;
+
static void trace_hwlat_sample(struct hwlat_sample *sample)
{
struct trace_array *tr = hwlat_trace;
@@ -136,7 +162,9 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
void trace_hwlat_callback(bool enter)
{
- if (smp_processor_id() != nmi_cpu)
+ struct hwlat_kthread_data *kdata = get_cpu_data();
+
+ if (!kdata->kthread)
return;
/*
@@ -145,15 +173,24 @@ void trace_hwlat_callback(bool enter)
*/
if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
if (enter)
- nmi_ts_start = time_get();
+ kdata->nmi_ts_start = time_get();
else
- nmi_total_ts += time_get() - nmi_ts_start;
+ kdata->nmi_total_ts += time_get() - kdata->nmi_ts_start;
}
if (enter)
- nmi_count++;
+ kdata->nmi_count++;
}
+/*
+ * hwlat_err - report a hwlat error.
+ */
+#define hwlat_err(msg) ({ \
+ struct trace_array *tr = hwlat_trace; \
+ \
+ trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, msg); \
+})
+
/**
* get_sample - sample the CPU TSC and look for likely hardware latencies
*
@@ -163,6 +200,7 @@ void trace_hwlat_callback(bool enter)
*/
static int get_sample(void)
{
+ struct hwlat_kthread_data *kdata = get_cpu_data();
struct trace_array *tr = hwlat_trace;
struct hwlat_sample s;
time_type start, t1, t2, last_t2;
@@ -175,9 +213,8 @@ static int get_sample(void)
do_div(thresh, NSEC_PER_USEC); /* modifies interval value */
- nmi_cpu = smp_processor_id();
- nmi_total_ts = 0;
- nmi_count = 0;
+ kdata->nmi_total_ts = 0;
+ kdata->nmi_count = 0;
/* Make sure NMIs see this first */
barrier();
@@ -197,7 +234,7 @@ static int get_sample(void)
outer_diff = time_to_us(time_sub(t1, last_t2));
/* This shouldn't happen */
if (outer_diff < 0) {
- pr_err(BANNER "time running backwards\n");
+ hwlat_err(BANNER "time running backwards\n");
goto out;
}
if (outer_diff > outer_sample)
@@ -209,7 +246,7 @@ static int get_sample(void)
/* Check for possible overflows */
if (total < last_total) {
- pr_err("Time total overflowed\n");
+ hwlat_err("Time total overflowed\n");
break;
}
last_total = total;
@@ -225,7 +262,7 @@ static int get_sample(void)
/* This shouldn't happen */
if (diff < 0) {
- pr_err(BANNER "time running backwards\n");
+ hwlat_err(BANNER "time running backwards\n");
goto out;
}
@@ -247,15 +284,15 @@ static int get_sample(void)
ret = 1;
/* We read in microseconds */
- if (nmi_total_ts)
- do_div(nmi_total_ts, NSEC_PER_USEC);
+ if (kdata->nmi_total_ts)
+ do_div(kdata->nmi_total_ts, NSEC_PER_USEC);
hwlat_data.count++;
s.seqnum = hwlat_data.count;
s.duration = sample;
s.outer_duration = outer_sample;
- s.nmi_total_ts = nmi_total_ts;
- s.nmi_count = nmi_count;
+ s.nmi_total_ts = kdata->nmi_total_ts;
+ s.nmi_count = kdata->nmi_count;
s.count = count;
trace_hwlat_sample(&s);
@@ -273,7 +310,6 @@ out:
}
static struct cpumask save_cpumask;
-static bool disable_migrate;
static void move_to_next_cpu(void)
{
@@ -281,26 +317,24 @@ static void move_to_next_cpu(void)
struct trace_array *tr = hwlat_trace;
int next_cpu;
- if (disable_migrate)
- return;
/*
* If for some reason the user modifies the CPU affinity
* of this thread, then stop migrating for the duration
* of the current test.
*/
if (!cpumask_equal(current_mask, current->cpus_ptr))
- goto disable;
+ goto change_mode;
get_online_cpus();
cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
- next_cpu = cpumask_next(smp_processor_id(), current_mask);
+ next_cpu = cpumask_next(raw_smp_processor_id(), current_mask);
put_online_cpus();
if (next_cpu >= nr_cpu_ids)
next_cpu = cpumask_first(current_mask);
if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
- goto disable;
+ goto change_mode;
cpumask_clear(current_mask);
cpumask_set_cpu(next_cpu, current_mask);
@@ -308,8 +342,9 @@ static void move_to_next_cpu(void)
sched_setaffinity(0, current_mask);
return;
- disable:
- disable_migrate = true;
+ change_mode:
+ hwlat_data.thread_mode = MODE_NONE;
+ pr_info(BANNER "cpumask changed while in round-robin mode, switching to mode none\n");
}
/*
@@ -328,7 +363,8 @@ static int kthread_fn(void *data)
while (!kthread_should_stop()) {
- move_to_next_cpu();
+ if (hwlat_data.thread_mode == MODE_ROUND_ROBIN)
+ move_to_next_cpu();
local_irq_disable();
get_sample();
@@ -351,178 +387,380 @@ static int kthread_fn(void *data)
return 0;
}
-/**
- * start_kthread - Kick off the hardware latency sampling/detector kthread
+/*
+ * stop_stop_kthread - Inform the hardware latency sampling/detector kthread to stop
+ *
+ * This kicks the running hardware latency sampling/detector kernel thread and
+ * tells it to stop sampling now. Use this on unload and at system shutdown.
+ */
+static void stop_single_kthread(void)
+{
+ struct hwlat_kthread_data *kdata = get_cpu_data();
+ struct task_struct *kthread;
+
+ get_online_cpus();
+ kthread = kdata->kthread;
+
+ if (!kthread)
+ goto out_put_cpus;
+
+ kthread_stop(kthread);
+ kdata->kthread = NULL;
+
+out_put_cpus:
+ put_online_cpus();
+}
+
+
+/*
+ * start_single_kthread - Kick off the hardware latency sampling/detector kthread
*
* This starts the kernel thread that will sit and sample the CPU timestamp
* counter (TSC or similar) and look for potential hardware latencies.
*/
-static int start_kthread(struct trace_array *tr)
+static int start_single_kthread(struct trace_array *tr)
{
+ struct hwlat_kthread_data *kdata = get_cpu_data();
struct cpumask *current_mask = &save_cpumask;
struct task_struct *kthread;
int next_cpu;
- if (hwlat_kthread)
- return 0;
-
- /* Just pick the first CPU on first iteration */
get_online_cpus();
- cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
- put_online_cpus();
- next_cpu = cpumask_first(current_mask);
+ if (kdata->kthread)
+ goto out_put_cpus;
kthread = kthread_create(kthread_fn, NULL, "hwlatd");
if (IS_ERR(kthread)) {
pr_err(BANNER "could not start sampling thread\n");
+ put_online_cpus();
return -ENOMEM;
}
- cpumask_clear(current_mask);
- cpumask_set_cpu(next_cpu, current_mask);
+ /* Just pick the first CPU on first iteration */
+ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+
+ if (hwlat_data.thread_mode == MODE_ROUND_ROBIN) {
+ next_cpu = cpumask_first(current_mask);
+ cpumask_clear(current_mask);
+ cpumask_set_cpu(next_cpu, current_mask);
+
+ }
+
sched_setaffinity(kthread->pid, current_mask);
- hwlat_kthread = kthread;
+ kdata->kthread = kthread;
wake_up_process(kthread);
+out_put_cpus:
+ put_online_cpus();
return 0;
}
-/**
- * stop_kthread - Inform the hardware latency sampling/detector kthread to stop
+/*
+ * stop_cpu_kthread - Stop a hwlat cpu kthread
+ */
+static void stop_cpu_kthread(unsigned int cpu)
+{
+ struct task_struct *kthread;
+
+ kthread = per_cpu(hwlat_per_cpu_data, cpu).kthread;
+ if (kthread)
+ kthread_stop(kthread);
+ per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
+}
+
+/*
+ * stop_per_cpu_kthreads - Inform the hardware latency sampling/detector kthread to stop
*
- * This kicks the running hardware latency sampling/detector kernel thread and
+ * This kicks the running hardware latency sampling/detector kernel threads and
* tells it to stop sampling now. Use this on unload and at system shutdown.
*/
-static void stop_kthread(void)
+static void stop_per_cpu_kthreads(void)
{
- if (!hwlat_kthread)
- return;
- kthread_stop(hwlat_kthread);
- hwlat_kthread = NULL;
+ unsigned int cpu;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ stop_cpu_kthread(cpu);
+ put_online_cpus();
}
/*
- * hwlat_read - Wrapper read function for reading both window and width
- * @filp: The active open file structure
- * @ubuf: The userspace provided buffer to read value into
- * @cnt: The maximum number of bytes to read
- * @ppos: The current "file" position
- *
- * This function provides a generic read implementation for the global state
- * "hwlat_data" structure filesystem entries.
+ * start_cpu_kthread - Start a hwlat cpu kthread
*/
-static ssize_t hwlat_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int start_cpu_kthread(unsigned int cpu)
{
- char buf[U64STR_SIZE];
- u64 *entry = filp->private_data;
- u64 val;
- int len;
+ struct task_struct *kthread;
+ char comm[24];
- if (!entry)
- return -EFAULT;
+ snprintf(comm, 24, "hwlatd/%d", cpu);
+
+ kthread = kthread_create_on_cpu(kthread_fn, NULL, cpu, comm);
+ if (IS_ERR(kthread)) {
+ pr_err(BANNER "could not start sampling thread\n");
+ return -ENOMEM;
+ }
- if (cnt > sizeof(buf))
- cnt = sizeof(buf);
+ per_cpu(hwlat_per_cpu_data, cpu).kthread = kthread;
+ wake_up_process(kthread);
+
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void hwlat_hotplug_workfn(struct work_struct *dummy)
+{
+ struct trace_array *tr = hwlat_trace;
+ unsigned int cpu = smp_processor_id();
+
+ mutex_lock(&trace_types_lock);
+ mutex_lock(&hwlat_data.lock);
+ get_online_cpus();
+
+ if (!hwlat_busy || hwlat_data.thread_mode != MODE_PER_CPU)
+ goto out_unlock;
- val = *entry;
+ if (!cpumask_test_cpu(cpu, tr->tracing_cpumask))
+ goto out_unlock;
- len = snprintf(buf, sizeof(buf), "%llu\n", val);
+ start_cpu_kthread(cpu);
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+out_unlock:
+ put_online_cpus();
+ mutex_unlock(&hwlat_data.lock);
+ mutex_unlock(&trace_types_lock);
}
-/**
- * hwlat_width_write - Write function for "width" entry
- * @filp: The active open file structure
- * @ubuf: The user buffer that contains the value to write
- * @cnt: The maximum number of bytes to write to "file"
- * @ppos: The current position in @file
+static DECLARE_WORK(hwlat_hotplug_work, hwlat_hotplug_workfn);
+
+/*
+ * hwlat_cpu_init - CPU hotplug online callback function
+ */
+static int hwlat_cpu_init(unsigned int cpu)
+{
+ schedule_work_on(cpu, &hwlat_hotplug_work);
+ return 0;
+}
+
+/*
+ * hwlat_cpu_die - CPU hotplug offline callback function
+ */
+static int hwlat_cpu_die(unsigned int cpu)
+{
+ stop_cpu_kthread(cpu);
+ return 0;
+}
+
+static void hwlat_init_hotplug_support(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/hwlat:online",
+ hwlat_cpu_init, hwlat_cpu_die);
+ if (ret < 0)
+ pr_warn(BANNER "Error to init cpu hotplug support\n");
+
+ return;
+}
+#else /* CONFIG_HOTPLUG_CPU */
+static void hwlat_init_hotplug_support(void)
+{
+ return;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * start_per_cpu_kthreads - Kick off the hardware latency sampling/detector kthreads
*
- * This function provides a write implementation for the "width" interface
- * to the hardware latency detector. It can be used to configure
- * for how many us of the total window us we will actively sample for any
- * hardware-induced latency periods. Obviously, it is not possible to
- * sample constantly and have the system respond to a sample reader, or,
- * worse, without having the system appear to have gone out to lunch. It
- * is enforced that width is less that the total window size.
+ * This starts the kernel threads that will sit on potentially all cpus and
+ * sample the CPU timestamp counter (TSC or similar) and look for potential
+ * hardware latencies.
*/
-static ssize_t
-hwlat_width_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int start_per_cpu_kthreads(struct trace_array *tr)
{
- u64 val;
- int err;
+ struct cpumask *current_mask = &save_cpumask;
+ unsigned int cpu;
+ int retval;
- err = kstrtoull_from_user(ubuf, cnt, 10, &val);
- if (err)
- return err;
+ get_online_cpus();
+ /*
+ * Run only on CPUs in which hwlat is allowed to run.
+ */
+ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+
+ for_each_online_cpu(cpu)
+ per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
+
+ for_each_cpu(cpu, current_mask) {
+ retval = start_cpu_kthread(cpu);
+ if (retval)
+ goto out_error;
+ }
+ put_online_cpus();
+
+ return 0;
+
+out_error:
+ put_online_cpus();
+ stop_per_cpu_kthreads();
+ return retval;
+}
+
+static void *s_mode_start(struct seq_file *s, loff_t *pos)
+{
+ int mode = *pos;
mutex_lock(&hwlat_data.lock);
- if (val < hwlat_data.sample_window)
- hwlat_data.sample_width = val;
+
+ if (mode >= MODE_MAX)
+ return NULL;
+
+ return pos;
+}
+
+static void *s_mode_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ int mode = ++(*pos);
+
+ if (mode >= MODE_MAX)
+ return NULL;
+
+ return pos;
+}
+
+static int s_mode_show(struct seq_file *s, void *v)
+{
+ loff_t *pos = v;
+ int mode = *pos;
+
+ if (mode == hwlat_data.thread_mode)
+ seq_printf(s, "[%s]", thread_mode_str[mode]);
else
- err = -EINVAL;
- mutex_unlock(&hwlat_data.lock);
+ seq_printf(s, "%s", thread_mode_str[mode]);
- if (err)
- return err;
+ if (mode != MODE_MAX)
+ seq_puts(s, " ");
+
+ return 0;
+}
- return cnt;
+static void s_mode_stop(struct seq_file *s, void *v)
+{
+ seq_puts(s, "\n");
+ mutex_unlock(&hwlat_data.lock);
}
+static const struct seq_operations thread_mode_seq_ops = {
+ .start = s_mode_start,
+ .next = s_mode_next,
+ .show = s_mode_show,
+ .stop = s_mode_stop
+};
+
+static int hwlat_mode_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &thread_mode_seq_ops);
+};
+
+static void hwlat_tracer_start(struct trace_array *tr);
+static void hwlat_tracer_stop(struct trace_array *tr);
+
/**
- * hwlat_window_write - Write function for "window" entry
+ * hwlat_mode_write - Write function for "mode" entry
* @filp: The active open file structure
* @ubuf: The user buffer that contains the value to write
* @cnt: The maximum number of bytes to write to "file"
* @ppos: The current position in @file
*
- * This function provides a write implementation for the "window" interface
- * to the hardware latency detector. The window is the total time
- * in us that will be considered one sample period. Conceptually, windows
- * occur back-to-back and contain a sample width period during which
- * actual sampling occurs. Can be used to write a new total window size. It
- * is enforced that any value written must be greater than the sample width
- * size, or an error results.
+ * This function provides a write implementation for the "mode" interface
+ * to the hardware latency detector. hwlatd has different operation modes.
+ * The "none" sets the allowed cpumask for a single hwlatd thread at the
+ * startup and lets the scheduler handle the migration. The default mode is
+ * the "round-robin" one, in which a single hwlatd thread runs, migrating
+ * among the allowed CPUs in a round-robin fashion. The "per-cpu" mode
+ * creates one hwlatd thread per allowed CPU.
*/
-static ssize_t
-hwlat_window_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static ssize_t hwlat_mode_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
{
- u64 val;
- int err;
+ struct trace_array *tr = hwlat_trace;
+ const char *mode;
+ char buf[64];
+ int ret, i;
- err = kstrtoull_from_user(ubuf, cnt, 10, &val);
- if (err)
- return err;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ mode = strstrip(buf);
+
+ ret = -EINVAL;
+
+ /*
+ * trace_types_lock is taken to avoid concurrency on start/stop
+ * and hwlat_busy.
+ */
+ mutex_lock(&trace_types_lock);
+ if (hwlat_busy)
+ hwlat_tracer_stop(tr);
mutex_lock(&hwlat_data.lock);
- if (hwlat_data.sample_width < val)
- hwlat_data.sample_window = val;
- else
- err = -EINVAL;
+
+ for (i = 0; i < MODE_MAX; i++) {
+ if (strcmp(mode, thread_mode_str[i]) == 0) {
+ hwlat_data.thread_mode = i;
+ ret = cnt;
+ }
+ }
+
mutex_unlock(&hwlat_data.lock);
- if (err)
- return err;
+ if (hwlat_busy)
+ hwlat_tracer_start(tr);
+ mutex_unlock(&trace_types_lock);
+
+ *ppos += cnt;
- return cnt;
+
+
+ return ret;
}
-static const struct file_operations width_fops = {
- .open = tracing_open_generic,
- .read = hwlat_read,
- .write = hwlat_width_write,
+/*
+ * The width parameter is read/write using the generic trace_min_max_param
+ * method. The *val is protected by the hwlat_data lock and is upper
+ * bounded by the window parameter.
+ */
+static struct trace_min_max_param hwlat_width = {
+ .lock = &hwlat_data.lock,
+ .val = &hwlat_data.sample_width,
+ .max = &hwlat_data.sample_window,
+ .min = NULL,
};
-static const struct file_operations window_fops = {
- .open = tracing_open_generic,
- .read = hwlat_read,
- .write = hwlat_window_write,
+/*
+ * The window parameter is read/write using the generic trace_min_max_param
+ * method. The *val is protected by the hwlat_data lock and is lower
+ * bounded by the width parameter.
+ */
+static struct trace_min_max_param hwlat_window = {
+ .lock = &hwlat_data.lock,
+ .val = &hwlat_data.sample_window,
+ .max = NULL,
+ .min = &hwlat_data.sample_width,
};
+static const struct file_operations thread_mode_fops = {
+ .open = hwlat_mode_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .write = hwlat_mode_write
+};
/**
* init_tracefs - A function to initialize the tracefs interface files
*
@@ -546,18 +784,25 @@ static int init_tracefs(void)
hwlat_sample_window = tracefs_create_file("window", 0640,
top_dir,
- &hwlat_data.sample_window,
- &window_fops);
+ &hwlat_window,
+ &trace_min_max_fops);
if (!hwlat_sample_window)
goto err;
hwlat_sample_width = tracefs_create_file("width", 0644,
top_dir,
- &hwlat_data.sample_width,
- &width_fops);
+ &hwlat_width,
+ &trace_min_max_fops);
if (!hwlat_sample_width)
goto err;
+ hwlat_thread_mode = trace_create_file("mode", 0644,
+ top_dir,
+ NULL,
+ &thread_mode_fops);
+ if (!hwlat_thread_mode)
+ goto err;
+
return 0;
err:
@@ -569,18 +814,22 @@ static void hwlat_tracer_start(struct trace_array *tr)
{
int err;
- err = start_kthread(tr);
+ if (hwlat_data.thread_mode == MODE_PER_CPU)
+ err = start_per_cpu_kthreads(tr);
+ else
+ err = start_single_kthread(tr);
if (err)
pr_err(BANNER "Cannot start hwlat kthread\n");
}
static void hwlat_tracer_stop(struct trace_array *tr)
{
- stop_kthread();
+ if (hwlat_data.thread_mode == MODE_PER_CPU)
+ stop_per_cpu_kthreads();
+ else
+ stop_single_kthread();
}
-static bool hwlat_busy;
-
static int hwlat_tracer_init(struct trace_array *tr)
{
/* Only allow one instance to enable this */
@@ -589,7 +838,6 @@ static int hwlat_tracer_init(struct trace_array *tr)
hwlat_trace = tr;
- disable_migrate = false;
hwlat_data.count = 0;
tr->max_latency = 0;
save_tracing_thresh = tracing_thresh;
@@ -608,7 +856,7 @@ static int hwlat_tracer_init(struct trace_array *tr)
static void hwlat_tracer_reset(struct trace_array *tr)
{
- stop_kthread();
+ hwlat_tracer_stop(tr);
/* the tracing threshold is static between runs */
last_tracing_thresh = tracing_thresh;
@@ -637,6 +885,8 @@ __init static int init_hwlat_tracer(void)
if (ret)
return ret;
+ hwlat_init_hotplug_support();
+
init_tracefs();
return 0;
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
new file mode 100644
index 000000000000..b61eefe5ccf5
--- /dev/null
+++ b/kernel/trace/trace_osnoise.c
@@ -0,0 +1,2113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OS Noise Tracer: computes the OS Noise suffered by a running thread.
+ * Timerlat Tracer: measures the wakeup latency of a timer triggered IRQ and thread.
+ *
+ * Based on "hwlat_detector" tracer by:
+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
+ * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
+ * With feedback from Clark Williams <williams@redhat.com>
+ *
+ * And also based on the rtsl tracer presented on:
+ * DE OLIVEIRA, Daniel Bristot, et al. Demystifying the real-time linux
+ * scheduling latency. In: 32nd Euromicro Conference on Real-Time Systems
+ * (ECRTS 2020). Schloss Dagstuhl-Leibniz-Zentrum fur Informatik, 2020.
+ *
+ * Copyright (C) 2021 Daniel Bristot de Oliveira, Red Hat, Inc. <bristot@redhat.com>
+ */
+
+#include <linux/kthread.h>
+#include <linux/tracefs.h>
+#include <linux/uaccess.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/sched/clock.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/sched.h>
+#include "trace.h"
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#include <asm/trace/irq_vectors.h>
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#include <trace/events/irq.h>
+#include <trace/events/sched.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/osnoise.h>
+
+static struct trace_array *osnoise_trace;
+
+/*
+ * Default values.
+ */
+#define BANNER "osnoise: "
+#define DEFAULT_SAMPLE_PERIOD 1000000 /* 1s */
+#define DEFAULT_SAMPLE_RUNTIME 1000000 /* 1s */
+
+#define DEFAULT_TIMERLAT_PERIOD 1000 /* 1ms */
+#define DEFAULT_TIMERLAT_PRIO 95 /* FIFO 95 */
+
+/*
+ * NMI runtime info.
+ */
+struct osn_nmi {
+ u64 count;
+ u64 delta_start;
+};
+
+/*
+ * IRQ runtime info.
+ */
+struct osn_irq {
+ u64 count;
+ u64 arrival_time;
+ u64 delta_start;
+};
+
+#define IRQ_CONTEXT 0
+#define THREAD_CONTEXT 1
+/*
+ * sofirq runtime info.
+ */
+struct osn_softirq {
+ u64 count;
+ u64 arrival_time;
+ u64 delta_start;
+};
+
+/*
+ * thread runtime info.
+ */
+struct osn_thread {
+ u64 count;
+ u64 arrival_time;
+ u64 delta_start;
+};
+
+/*
+ * Runtime information: this structure saves the runtime information used by
+ * one sampling thread.
+ */
+struct osnoise_variables {
+ struct task_struct *kthread;
+ bool sampling;
+ pid_t pid;
+ struct osn_nmi nmi;
+ struct osn_irq irq;
+ struct osn_softirq softirq;
+ struct osn_thread thread;
+ local_t int_counter;
+};
+
+/*
+ * Per-cpu runtime information.
+ */
+DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var);
+
+/*
+ * this_cpu_osn_var - Return the per-cpu osnoise_variables on its relative CPU
+ */
+static inline struct osnoise_variables *this_cpu_osn_var(void)
+{
+ return this_cpu_ptr(&per_cpu_osnoise_var);
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * Runtime information for the timer mode.
+ */
+struct timerlat_variables {
+ struct task_struct *kthread;
+ struct hrtimer timer;
+ u64 rel_period;
+ u64 abs_period;
+ bool tracing_thread;
+ u64 count;
+};
+
+DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var);
+
+/*
+ * this_cpu_tmr_var - Return the per-cpu timerlat_variables on its relative CPU
+ */
+static inline struct timerlat_variables *this_cpu_tmr_var(void)
+{
+ return this_cpu_ptr(&per_cpu_timerlat_var);
+}
+
+/*
+ * tlat_var_reset - Reset the values of the given timerlat_variables
+ */
+static inline void tlat_var_reset(void)
+{
+ struct timerlat_variables *tlat_var;
+ int cpu;
+ /*
+ * So far, all the values are initialized as 0, so
+ * zeroing the structure is perfect.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
+ memset(tlat_var, 0, sizeof(*tlat_var));
+ }
+}
+#else /* CONFIG_TIMERLAT_TRACER */
+#define tlat_var_reset() do {} while (0)
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * osn_var_reset - Reset the values of the given osnoise_variables
+ */
+static inline void osn_var_reset(void)
+{
+ struct osnoise_variables *osn_var;
+ int cpu;
+
+ /*
+ * So far, all the values are initialized as 0, so
+ * zeroing the structure is perfect.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
+ memset(osn_var, 0, sizeof(*osn_var));
+ }
+}
+
+/*
+ * osn_var_reset_all - Reset the value of all per-cpu osnoise_variables
+ */
+static inline void osn_var_reset_all(void)
+{
+ osn_var_reset();
+ tlat_var_reset();
+}
+
+/*
+ * Tells NMIs to call back to the osnoise tracer to record timestamps.
+ */
+bool trace_osnoise_callback_enabled;
+
+/*
+ * osnoise sample structure definition. Used to store the statistics of a
+ * sample run.
+ */
+struct osnoise_sample {
+ u64 runtime; /* runtime */
+ u64 noise; /* noise */
+ u64 max_sample; /* max single noise sample */
+ int hw_count; /* # HW (incl. hypervisor) interference */
+ int nmi_count; /* # NMIs during this sample */
+ int irq_count; /* # IRQs during this sample */
+ int softirq_count; /* # softirqs during this sample */
+ int thread_count; /* # threads during this sample */
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * timerlat sample structure definition. Used to store the statistics of
+ * a sample run.
+ */
+struct timerlat_sample {
+ u64 timer_latency; /* timer_latency */
+ unsigned int seqnum; /* unique sequence */
+ int context; /* timer context */
+};
+#endif
+
+/*
+ * Protect the interface.
+ */
+struct mutex interface_lock;
+
+/*
+ * Tracer data.
+ */
+static struct osnoise_data {
+ u64 sample_period; /* total sampling period */
+ u64 sample_runtime; /* active sampling portion of period */
+ u64 stop_tracing; /* stop trace in the internal operation (loop/irq) */
+ u64 stop_tracing_total; /* stop trace in the final operation (report/thread) */
+#ifdef CONFIG_TIMERLAT_TRACER
+ u64 timerlat_period; /* timerlat period */
+ u64 print_stack; /* print IRQ stack if total > */
+ int timerlat_tracer; /* timerlat tracer */
+#endif
+ bool tainted; /* infor users and developers about a problem */
+} osnoise_data = {
+ .sample_period = DEFAULT_SAMPLE_PERIOD,
+ .sample_runtime = DEFAULT_SAMPLE_RUNTIME,
+ .stop_tracing = 0,
+ .stop_tracing_total = 0,
+#ifdef CONFIG_TIMERLAT_TRACER
+ .print_stack = 0,
+ .timerlat_period = DEFAULT_TIMERLAT_PERIOD,
+ .timerlat_tracer = 0,
+#endif
+};
+
+/*
+ * Boolean variable used to inform that the tracer is currently sampling.
+ */
+static bool osnoise_busy;
+
+#ifdef CONFIG_PREEMPT_RT
+/*
+ * Print the osnoise header info.
+ */
+static void print_osnoise_headers(struct seq_file *s)
+{
+ if (osnoise_data.tainted)
+ seq_puts(s, "# osnoise is tainted!\n");
+
+ seq_puts(s, "# _-------=> irqs-off\n");
+ seq_puts(s, "# / _------=> need-resched\n");
+ seq_puts(s, "# | / _-----=> need-resched-lazy\n");
+ seq_puts(s, "# || / _----=> hardirq/softirq\n");
+ seq_puts(s, "# ||| / _---=> preempt-depth\n");
+ seq_puts(s, "# |||| / _--=> preempt-lazy-depth\n");
+ seq_puts(s, "# ||||| / _-=> migrate-disable\n");
+
+ seq_puts(s, "# |||||| / ");
+ seq_puts(s, " MAX\n");
+
+ seq_puts(s, "# ||||| / ");
+ seq_puts(s, " SINGLE Interference counters:\n");
+
+ seq_puts(s, "# ||||||| RUNTIME ");
+ seq_puts(s, " NOISE %% OF CPU NOISE +-----------------------------+\n");
+
+ seq_puts(s, "# TASK-PID CPU# ||||||| TIMESTAMP IN US ");
+ seq_puts(s, " IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD\n");
+
+ seq_puts(s, "# | | | ||||||| | | ");
+ seq_puts(s, " | | | | | | | |\n");
+}
+#else /* CONFIG_PREEMPT_RT */
+static void print_osnoise_headers(struct seq_file *s)
+{
+ if (osnoise_data.tainted)
+ seq_puts(s, "# osnoise is tainted!\n");
+
+ seq_puts(s, "# _-----=> irqs-off\n");
+ seq_puts(s, "# / _----=> need-resched\n");
+ seq_puts(s, "# | / _---=> hardirq/softirq\n");
+ seq_puts(s, "# || / _--=> preempt-depth ");
+ seq_puts(s, " MAX\n");
+
+ seq_puts(s, "# || / ");
+ seq_puts(s, " SINGLE Interference counters:\n");
+
+ seq_puts(s, "# |||| RUNTIME ");
+ seq_puts(s, " NOISE %% OF CPU NOISE +-----------------------------+\n");
+
+ seq_puts(s, "# TASK-PID CPU# |||| TIMESTAMP IN US ");
+ seq_puts(s, " IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD\n");
+
+ seq_puts(s, "# | | | |||| | | ");
+ seq_puts(s, " | | | | | | | |\n");
+}
+#endif /* CONFIG_PREEMPT_RT */
+
+/*
+ * osnoise_taint - report an osnoise error.
+ */
+#define osnoise_taint(msg) ({ \
+ struct trace_array *tr = osnoise_trace; \
+ \
+ trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, msg); \
+ osnoise_data.tainted = true; \
+})
+
+/*
+ * Record an osnoise_sample into the tracer buffer.
+ */
+static void trace_osnoise_sample(struct osnoise_sample *sample)
+{
+ struct trace_array *tr = osnoise_trace;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+ struct trace_event_call *call = &event_osnoise;
+ struct ring_buffer_event *event;
+ struct osnoise_entry *entry;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_OSNOISE, sizeof(*entry),
+ tracing_gen_ctx());
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ entry->runtime = sample->runtime;
+ entry->noise = sample->noise;
+ entry->max_sample = sample->max_sample;
+ entry->hw_count = sample->hw_count;
+ entry->nmi_count = sample->nmi_count;
+ entry->irq_count = sample->irq_count;
+ entry->softirq_count = sample->softirq_count;
+ entry->thread_count = sample->thread_count;
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit_nostack(buffer, event);
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * Print the timerlat header info.
+ */
+#ifdef CONFIG_PREEMPT_RT
+static void print_timerlat_headers(struct seq_file *s)
+{
+ seq_puts(s, "# _-------=> irqs-off\n");
+ seq_puts(s, "# / _------=> need-resched\n");
+ seq_puts(s, "# | / _-----=> need-resched-lazy\n");
+ seq_puts(s, "# || / _----=> hardirq/softirq\n");
+ seq_puts(s, "# ||| / _---=> preempt-depth\n");
+ seq_puts(s, "# |||| / _--=> preempt-lazy-depth\n");
+ seq_puts(s, "# ||||| / _-=> migrate-disable\n");
+ seq_puts(s, "# |||||| /\n");
+ seq_puts(s, "# ||||||| ACTIVATION\n");
+ seq_puts(s, "# TASK-PID CPU# ||||||| TIMESTAMP ID ");
+ seq_puts(s, " CONTEXT LATENCY\n");
+ seq_puts(s, "# | | | ||||||| | | ");
+ seq_puts(s, " | |\n");
+}
+#else /* CONFIG_PREEMPT_RT */
+static void print_timerlat_headers(struct seq_file *s)
+{
+ seq_puts(s, "# _-----=> irqs-off\n");
+ seq_puts(s, "# / _----=> need-resched\n");
+ seq_puts(s, "# | / _---=> hardirq/softirq\n");
+ seq_puts(s, "# || / _--=> preempt-depth\n");
+ seq_puts(s, "# || /\n");
+ seq_puts(s, "# |||| ACTIVATION\n");
+ seq_puts(s, "# TASK-PID CPU# |||| TIMESTAMP ID ");
+ seq_puts(s, " CONTEXT LATENCY\n");
+ seq_puts(s, "# | | | |||| | | ");
+ seq_puts(s, " | |\n");
+}
+#endif /* CONFIG_PREEMPT_RT */
+
+/*
+ * Record an timerlat_sample into the tracer buffer.
+ */
+static void trace_timerlat_sample(struct timerlat_sample *sample)
+{
+ struct trace_array *tr = osnoise_trace;
+ struct trace_event_call *call = &event_osnoise;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+ struct ring_buffer_event *event;
+ struct timerlat_entry *entry;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_TIMERLAT, sizeof(*entry),
+ tracing_gen_ctx());
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ entry->seqnum = sample->seqnum;
+ entry->context = sample->context;
+ entry->timer_latency = sample->timer_latency;
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit_nostack(buffer, event);
+}
+
+#ifdef CONFIG_STACKTRACE
+
+#define MAX_CALLS 256
+
+/*
+ * Stack trace will take place only at IRQ level, so, no need
+ * to control nesting here.
+ */
+struct trace_stack {
+ int stack_size;
+ int nr_entries;
+ unsigned long calls[MAX_CALLS];
+};
+
+static DEFINE_PER_CPU(struct trace_stack, trace_stack);
+
+/*
+ * timerlat_save_stack - save a stack trace without printing
+ *
+ * Save the current stack trace without printing. The
+ * stack will be printed later, after the end of the measurement.
+ */
+static void timerlat_save_stack(int skip)
+{
+ unsigned int size, nr_entries;
+ struct trace_stack *fstack;
+
+ fstack = this_cpu_ptr(&trace_stack);
+
+ size = ARRAY_SIZE(fstack->calls);
+
+ nr_entries = stack_trace_save(fstack->calls, size, skip);
+
+ fstack->stack_size = nr_entries * sizeof(unsigned long);
+ fstack->nr_entries = nr_entries;
+
+ return;
+
+}
+/*
+ * timerlat_dump_stack - dump a stack trace previously saved
+ *
+ * Dump a saved stack trace into the trace buffer.
+ */
+static void timerlat_dump_stack(void)
+{
+ struct trace_event_call *call = &event_osnoise;
+ struct trace_array *tr = osnoise_trace;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+ struct ring_buffer_event *event;
+ struct trace_stack *fstack;
+ struct stack_entry *entry;
+ unsigned int size;
+
+ preempt_disable_notrace();
+ fstack = this_cpu_ptr(&trace_stack);
+ size = fstack->stack_size;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size,
+ tracing_gen_ctx());
+ if (!event)
+ goto out;
+
+ entry = ring_buffer_event_data(event);
+
+ memcpy(&entry->caller, fstack->calls, size);
+ entry->size = fstack->nr_entries;
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit_nostack(buffer, event);
+
+out:
+ preempt_enable_notrace();
+}
+#else
+#define timerlat_dump_stack() do {} while (0)
+#define timerlat_save_stack(a) do {} while (0)
+#endif /* CONFIG_STACKTRACE */
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * Macros to encapsulate the time capturing infrastructure.
+ */
+#define time_get() trace_clock_local()
+#define time_to_us(x) div_u64(x, 1000)
+#define time_sub(a, b) ((a) - (b))
+
+/*
+ * cond_move_irq_delta_start - Forward the delta_start of a running IRQ
+ *
+ * If an IRQ is preempted by an NMI, its delta_start is pushed forward
+ * to discount the NMI interference.
+ *
+ * See get_int_safe_duration().
+ */
+static inline void
+cond_move_irq_delta_start(struct osnoise_variables *osn_var, u64 duration)
+{
+ if (osn_var->irq.delta_start)
+ osn_var->irq.delta_start += duration;
+}
+
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * cond_move_softirq_delta_start - Forward the delta_start of a running softirq.
+ *
+ * If a softirq is preempted by an IRQ or NMI, its delta_start is pushed
+ * forward to discount the interference.
+ *
+ * See get_int_safe_duration().
+ */
+static inline void
+cond_move_softirq_delta_start(struct osnoise_variables *osn_var, u64 duration)
+{
+ if (osn_var->softirq.delta_start)
+ osn_var->softirq.delta_start += duration;
+}
+#else /* CONFIG_PREEMPT_RT */
+#define cond_move_softirq_delta_start(osn_var, duration) do {} while (0)
+#endif
+
+/*
+ * cond_move_thread_delta_start - Forward the delta_start of a running thread
+ *
+ * If a noisy thread is preempted by an softirq, IRQ or NMI, its delta_start
+ * is pushed forward to discount the interference.
+ *
+ * See get_int_safe_duration().
+ */
+static inline void
+cond_move_thread_delta_start(struct osnoise_variables *osn_var, u64 duration)
+{
+ if (osn_var->thread.delta_start)
+ osn_var->thread.delta_start += duration;
+}
+
+/*
+ * get_int_safe_duration - Get the duration of a window
+ *
+ * The irq, softirq and thread varaibles need to have its duration without
+ * the interference from higher priority interrupts. Instead of keeping a
+ * variable to discount the interrupt interference from these variables, the
+ * starting time of these variables are pushed forward with the interrupt's
+ * duration. In this way, a single variable is used to:
+ *
+ * - Know if a given window is being measured.
+ * - Account its duration.
+ * - Discount the interference.
+ *
+ * To avoid getting inconsistent values, e.g.,:
+ *
+ * now = time_get()
+ * ---> interrupt!
+ * delta_start -= int duration;
+ * <---
+ * duration = now - delta_start;
+ *
+ * result: negative duration if the variable duration before the
+ * interrupt was smaller than the interrupt execution.
+ *
+ * A counter of interrupts is used. If the counter increased, try
+ * to capture an interference safe duration.
+ */
+static inline s64
+get_int_safe_duration(struct osnoise_variables *osn_var, u64 *delta_start)
+{
+ u64 int_counter, now;
+ s64 duration;
+
+ do {
+ int_counter = local_read(&osn_var->int_counter);
+ /* synchronize with interrupts */
+ barrier();
+
+ now = time_get();
+ duration = (now - *delta_start);
+
+ /* synchronize with interrupts */
+ barrier();
+ } while (int_counter != local_read(&osn_var->int_counter));
+
+ /*
+ * This is an evidence of race conditions that cause
+ * a value to be "discounted" too much.
+ */
+ if (duration < 0)
+ osnoise_taint("Negative duration!\n");
+
+ *delta_start = 0;
+
+ return duration;
+}
+
+/*
+ *
+ * set_int_safe_time - Save the current time on *time, aware of interference
+ *
+ * Get the time, taking into consideration a possible interference from
+ * higher priority interrupts.
+ *
+ * See get_int_safe_duration() for an explanation.
+ */
+static u64
+set_int_safe_time(struct osnoise_variables *osn_var, u64 *time)
+{
+ u64 int_counter;
+
+ do {
+ int_counter = local_read(&osn_var->int_counter);
+ /* synchronize with interrupts */
+ barrier();
+
+ *time = time_get();
+
+ /* synchronize with interrupts */
+ barrier();
+ } while (int_counter != local_read(&osn_var->int_counter));
+
+ return int_counter;
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * copy_int_safe_time - Copy *src into *desc aware of interference
+ */
+static u64
+copy_int_safe_time(struct osnoise_variables *osn_var, u64 *dst, u64 *src)
+{
+ u64 int_counter;
+
+ do {
+ int_counter = local_read(&osn_var->int_counter);
+ /* synchronize with interrupts */
+ barrier();
+
+ *dst = *src;
+
+ /* synchronize with interrupts */
+ barrier();
+ } while (int_counter != local_read(&osn_var->int_counter));
+
+ return int_counter;
+}
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * trace_osnoise_callback - NMI entry/exit callback
+ *
+ * This function is called at the entry and exit NMI code. The bool enter
+ * distinguishes between either case. This function is used to note a NMI
+ * occurrence, compute the noise caused by the NMI, and to remove the noise
+ * it is potentially causing on other interference variables.
+ */
+void trace_osnoise_callback(bool enter)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ u64 duration;
+
+ if (!osn_var->sampling)
+ return;
+
+ /*
+ * Currently trace_clock_local() calls sched_clock() and the
+ * generic version is not NMI safe.
+ */
+ if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
+ if (enter) {
+ osn_var->nmi.delta_start = time_get();
+ local_inc(&osn_var->int_counter);
+ } else {
+ duration = time_get() - osn_var->nmi.delta_start;
+
+ trace_nmi_noise(osn_var->nmi.delta_start, duration);
+
+ cond_move_irq_delta_start(osn_var, duration);
+ cond_move_softirq_delta_start(osn_var, duration);
+ cond_move_thread_delta_start(osn_var, duration);
+ }
+ }
+
+ if (enter)
+ osn_var->nmi.count++;
+}
+
+/*
+ * osnoise_trace_irq_entry - Note the starting of an IRQ
+ *
+ * Save the starting time of an IRQ. As IRQs are non-preemptive to other IRQs,
+ * it is safe to use a single variable (ons_var->irq) to save the statistics.
+ * The arrival_time is used to report... the arrival time. The delta_start
+ * is used to compute the duration at the IRQ exit handler. See
+ * cond_move_irq_delta_start().
+ */
+void osnoise_trace_irq_entry(int id)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+
+ if (!osn_var->sampling)
+ return;
+ /*
+ * This value will be used in the report, but not to compute
+ * the execution time, so it is safe to get it unsafe.
+ */
+ osn_var->irq.arrival_time = time_get();
+ set_int_safe_time(osn_var, &osn_var->irq.delta_start);
+ osn_var->irq.count++;
+
+ local_inc(&osn_var->int_counter);
+}
+
+/*
+ * osnoise_irq_exit - Note the end of an IRQ, sava data and trace
+ *
+ * Computes the duration of the IRQ noise, and trace it. Also discounts the
+ * interference from other sources of noise could be currently being accounted.
+ */
+void osnoise_trace_irq_exit(int id, const char *desc)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ int duration;
+
+ if (!osn_var->sampling)
+ return;
+
+ duration = get_int_safe_duration(osn_var, &osn_var->irq.delta_start);
+ trace_irq_noise(id, desc, osn_var->irq.arrival_time, duration);
+ osn_var->irq.arrival_time = 0;
+ cond_move_softirq_delta_start(osn_var, duration);
+ cond_move_thread_delta_start(osn_var, duration);
+}
+
+/*
+ * trace_irqentry_callback - Callback to the irq:irq_entry traceevent
+ *
+ * Used to note the starting of an IRQ occurece.
+ */
+static void trace_irqentry_callback(void *data, int irq,
+ struct irqaction *action)
+{
+ osnoise_trace_irq_entry(irq);
+}
+
+/*
+ * trace_irqexit_callback - Callback to the irq:irq_exit traceevent
+ *
+ * Used to note the end of an IRQ occurece.
+ */
+static void trace_irqexit_callback(void *data, int irq,
+ struct irqaction *action, int ret)
+{
+ osnoise_trace_irq_exit(irq, action->name);
+}
+
+/*
+ * arch specific register function.
+ */
+int __weak osnoise_arch_register(void)
+{
+ return 0;
+}
+
+/*
+ * arch specific unregister function.
+ */
+void __weak osnoise_arch_unregister(void)
+{
+ return;
+}
+
+/*
+ * hook_irq_events - Hook IRQ handling events
+ *
+ * This function hooks the IRQ related callbacks to the respective trace
+ * events.
+ */
+static int hook_irq_events(void)
+{
+ int ret;
+
+ ret = register_trace_irq_handler_entry(trace_irqentry_callback, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = register_trace_irq_handler_exit(trace_irqexit_callback, NULL);
+ if (ret)
+ goto out_unregister_entry;
+
+ ret = osnoise_arch_register();
+ if (ret)
+ goto out_irq_exit;
+
+ return 0;
+
+out_irq_exit:
+ unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL);
+out_unregister_entry:
+ unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL);
+out_err:
+ return -EINVAL;
+}
+
+/*
+ * unhook_irq_events - Unhook IRQ handling events
+ *
+ * This function unhooks the IRQ related callbacks to the respective trace
+ * events.
+ */
+static void unhook_irq_events(void)
+{
+ osnoise_arch_unregister();
+ unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL);
+ unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL);
+}
+
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * trace_softirq_entry_callback - Note the starting of a softirq
+ *
+ * Save the starting time of a softirq. As softirqs are non-preemptive to
+ * other softirqs, it is safe to use a single variable (ons_var->softirq)
+ * to save the statistics. The arrival_time is used to report... the
+ * arrival time. The delta_start is used to compute the duration at the
+ * softirq exit handler. See cond_move_softirq_delta_start().
+ */
+static void trace_softirq_entry_callback(void *data, unsigned int vec_nr)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+
+ if (!osn_var->sampling)
+ return;
+ /*
+ * This value will be used in the report, but not to compute
+ * the execution time, so it is safe to get it unsafe.
+ */
+ osn_var->softirq.arrival_time = time_get();
+ set_int_safe_time(osn_var, &osn_var->softirq.delta_start);
+ osn_var->softirq.count++;
+
+ local_inc(&osn_var->int_counter);
+}
+
+/*
+ * trace_softirq_exit_callback - Note the end of an softirq
+ *
+ * Computes the duration of the softirq noise, and trace it. Also discounts the
+ * interference from other sources of noise could be currently being accounted.
+ */
+static void trace_softirq_exit_callback(void *data, unsigned int vec_nr)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ int duration;
+
+ if (!osn_var->sampling)
+ return;
+
+#ifdef CONFIG_TIMERLAT_TRACER
+ /*
+ * If the timerlat is enabled, but the irq handler did
+ * not run yet enabling timerlat_tracer, do not trace.
+ */
+ if (unlikely(osnoise_data.timerlat_tracer)) {
+ struct timerlat_variables *tlat_var;
+ tlat_var = this_cpu_tmr_var();
+ if (!tlat_var->tracing_thread) {
+ osn_var->softirq.arrival_time = 0;
+ osn_var->softirq.delta_start = 0;
+ return;
+ }
+ }
+#endif
+
+ duration = get_int_safe_duration(osn_var, &osn_var->softirq.delta_start);
+ trace_softirq_noise(vec_nr, osn_var->softirq.arrival_time, duration);
+ cond_move_thread_delta_start(osn_var, duration);
+ osn_var->softirq.arrival_time = 0;
+}
+
+/*
+ * hook_softirq_events - Hook softirq handling events
+ *
+ * This function hooks the softirq related callbacks to the respective trace
+ * events.
+ */
+static int hook_softirq_events(void)
+{
+ int ret;
+
+ ret = register_trace_softirq_entry(trace_softirq_entry_callback, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = register_trace_softirq_exit(trace_softirq_exit_callback, NULL);
+ if (ret)
+ goto out_unreg_entry;
+
+ return 0;
+
+out_unreg_entry:
+ unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL);
+out_err:
+ return -EINVAL;
+}
+
+/*
+ * unhook_softirq_events - Unhook softirq handling events
+ *
+ * This function hooks the softirq related callbacks to the respective trace
+ * events.
+ */
+static void unhook_softirq_events(void)
+{
+ unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL);
+ unregister_trace_softirq_exit(trace_softirq_exit_callback, NULL);
+}
+#else /* CONFIG_PREEMPT_RT */
+/*
+ * softirq are threads on the PREEMPT_RT mode.
+ */
+static int hook_softirq_events(void)
+{
+ return 0;
+}
+static void unhook_softirq_events(void)
+{
+}
+#endif
+
+/*
+ * thread_entry - Record the starting of a thread noise window
+ *
+ * It saves the context switch time for a noisy thread, and increments
+ * the interference counters.
+ */
+static void
+thread_entry(struct osnoise_variables *osn_var, struct task_struct *t)
+{
+ if (!osn_var->sampling)
+ return;
+ /*
+ * The arrival time will be used in the report, but not to compute
+ * the execution time, so it is safe to get it unsafe.
+ */
+ osn_var->thread.arrival_time = time_get();
+
+ set_int_safe_time(osn_var, &osn_var->thread.delta_start);
+
+ osn_var->thread.count++;
+ local_inc(&osn_var->int_counter);
+}
+
+/*
+ * thread_exit - Report the end of a thread noise window
+ *
+ * It computes the total noise from a thread, tracing if needed.
+ */
+static void
+thread_exit(struct osnoise_variables *osn_var, struct task_struct *t)
+{
+ int duration;
+
+ if (!osn_var->sampling)
+ return;
+
+#ifdef CONFIG_TIMERLAT_TRACER
+ if (osnoise_data.timerlat_tracer) {
+ struct timerlat_variables *tlat_var;
+ tlat_var = this_cpu_tmr_var();
+ if (!tlat_var->tracing_thread) {
+ osn_var->thread.delta_start = 0;
+ osn_var->thread.arrival_time = 0;
+ return;
+ }
+ }
+#endif
+
+ duration = get_int_safe_duration(osn_var, &osn_var->thread.delta_start);
+
+ trace_thread_noise(t, osn_var->thread.arrival_time, duration);
+
+ osn_var->thread.arrival_time = 0;
+}
+
+/*
+ * trace_sched_switch - sched:sched_switch trace event handler
+ *
+ * This function is hooked to the sched:sched_switch trace event, and it is
+ * used to record the beginning and to report the end of a thread noise window.
+ */
+static void
+trace_sched_switch_callback(void *data, bool preempt, struct task_struct *p,
+ struct task_struct *n)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+
+ if (p->pid != osn_var->pid)
+ thread_exit(osn_var, p);
+
+ if (n->pid != osn_var->pid)
+ thread_entry(osn_var, n);
+}
+
+/*
+ * hook_thread_events - Hook the insturmentation for thread noise
+ *
+ * Hook the osnoise tracer callbacks to handle the noise from other
+ * threads on the necessary kernel events.
+ */
+static int hook_thread_events(void)
+{
+ int ret;
+
+ ret = register_trace_sched_switch(trace_sched_switch_callback, NULL);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * unhook_thread_events - *nhook the insturmentation for thread noise
+ *
+ * Unook the osnoise tracer callbacks to handle the noise from other
+ * threads on the necessary kernel events.
+ */
+static void unhook_thread_events(void)
+{
+ unregister_trace_sched_switch(trace_sched_switch_callback, NULL);
+}
+
+/*
+ * save_osn_sample_stats - Save the osnoise_sample statistics
+ *
+ * Save the osnoise_sample statistics before the sampling phase. These
+ * values will be used later to compute the diff betwneen the statistics
+ * before and after the osnoise sampling.
+ */
+static void
+save_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s)
+{
+ s->nmi_count = osn_var->nmi.count;
+ s->irq_count = osn_var->irq.count;
+ s->softirq_count = osn_var->softirq.count;
+ s->thread_count = osn_var->thread.count;
+}
+
+/*
+ * diff_osn_sample_stats - Compute the osnoise_sample statistics
+ *
+ * After a sample period, compute the difference on the osnoise_sample
+ * statistics. The struct osnoise_sample *s contains the statistics saved via
+ * save_osn_sample_stats() before the osnoise sampling.
+ */
+static void
+diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s)
+{
+ s->nmi_count = osn_var->nmi.count - s->nmi_count;
+ s->irq_count = osn_var->irq.count - s->irq_count;
+ s->softirq_count = osn_var->softirq.count - s->softirq_count;
+ s->thread_count = osn_var->thread.count - s->thread_count;
+}
+
+/*
+ * osnoise_stop_tracing - Stop tracing and the tracer.
+ */
+static __always_inline void osnoise_stop_tracing(void)
+{
+ struct trace_array *tr = osnoise_trace;
+
+ trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
+ "stop tracing hit on cpu %d\n", smp_processor_id());
+
+ tracer_tracing_off(tr);
+}
+
+/*
+ * run_osnoise - Sample the time and look for osnoise
+ *
+ * Used to capture the time, looking for potential osnoise latency repeatedly.
+ * Different from hwlat_detector, it is called with preemption and interrupts
+ * enabled. This allows irqs, softirqs and threads to run, interfering on the
+ * osnoise sampling thread, as they would do with a regular thread.
+ */
+static int run_osnoise(void)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ struct trace_array *tr = osnoise_trace;
+ u64 start, sample, last_sample;
+ u64 last_int_count, int_count;
+ s64 noise = 0, max_noise = 0;
+ s64 total, last_total = 0;
+ struct osnoise_sample s;
+ unsigned int threshold;
+ u64 runtime, stop_in;
+ u64 sum_noise = 0;
+ int hw_count = 0;
+ int ret = -1;
+
+ /*
+ * Considers the current thread as the workload.
+ */
+ osn_var->pid = current->pid;
+
+ /*
+ * Save the current stats for the diff
+ */
+ save_osn_sample_stats(osn_var, &s);
+
+ /*
+ * if threshold is 0, use the default value of 5 us.
+ */
+ threshold = tracing_thresh ? : 5000;
+
+ /*
+ * Make sure NMIs see sampling first
+ */
+ osn_var->sampling = true;
+ barrier();
+
+ /*
+ * Transform the *_us config to nanoseconds to avoid the
+ * division on the main loop.
+ */
+ runtime = osnoise_data.sample_runtime * NSEC_PER_USEC;
+ stop_in = osnoise_data.stop_tracing * NSEC_PER_USEC;
+
+ /*
+ * Start timestemp
+ */
+ start = time_get();
+
+ /*
+ * "previous" loop.
+ */
+ last_int_count = set_int_safe_time(osn_var, &last_sample);
+
+ do {
+ /*
+ * Get sample!
+ */
+ int_count = set_int_safe_time(osn_var, &sample);
+
+ noise = time_sub(sample, last_sample);
+
+ /*
+ * This shouldn't happen.
+ */
+ if (noise < 0) {
+ osnoise_taint("negative noise!");
+ goto out;
+ }
+
+ /*
+ * Sample runtime.
+ */
+ total = time_sub(sample, start);
+
+ /*
+ * Check for possible overflows.
+ */
+ if (total < last_total) {
+ osnoise_taint("total overflow!");
+ break;
+ }
+
+ last_total = total;
+
+ if (noise >= threshold) {
+ int interference = int_count - last_int_count;
+
+ if (noise > max_noise)
+ max_noise = noise;
+
+ if (!interference)
+ hw_count++;
+
+ sum_noise += noise;
+
+ trace_sample_threshold(last_sample, noise, interference);
+
+ if (osnoise_data.stop_tracing)
+ if (noise > stop_in)
+ osnoise_stop_tracing();
+ }
+
+ /*
+ * For the non-preemptive kernel config: let threads runs, if
+ * they so wish.
+ */
+ cond_resched();
+
+ last_sample = sample;
+ last_int_count = int_count;
+
+ } while (total < runtime && !kthread_should_stop());
+
+ /*
+ * Finish the above in the view for interrupts.
+ */
+ barrier();
+
+ osn_var->sampling = false;
+
+ /*
+ * Make sure sampling data is no longer updated.
+ */
+ barrier();
+
+ /*
+ * Save noise info.
+ */
+ s.noise = time_to_us(sum_noise);
+ s.runtime = time_to_us(total);
+ s.max_sample = time_to_us(max_noise);
+ s.hw_count = hw_count;
+
+ /* Save interference stats info */
+ diff_osn_sample_stats(osn_var, &s);
+
+ trace_osnoise_sample(&s);
+
+ /* Keep a running maximum ever recorded osnoise "latency" */
+ if (max_noise > tr->max_latency) {
+ tr->max_latency = max_noise;
+ latency_fsnotify(tr);
+ }
+
+ if (osnoise_data.stop_tracing_total)
+ if (s.noise > osnoise_data.stop_tracing_total)
+ osnoise_stop_tracing();
+
+ return 0;
+out:
+ return ret;
+}
+
+static struct cpumask osnoise_cpumask;
+static struct cpumask save_cpumask;
+
+/*
+ * osnoise_main - The osnoise detection kernel thread
+ *
+ * Calls run_osnoise() function to measure the osnoise for the configured runtime,
+ * every period.
+ */
+static int osnoise_main(void *data)
+{
+ u64 interval;
+
+ while (!kthread_should_stop()) {
+
+ run_osnoise();
+
+ mutex_lock(&interface_lock);
+ interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
+ mutex_unlock(&interface_lock);
+
+ do_div(interval, USEC_PER_MSEC);
+
+ /*
+ * differently from hwlat_detector, the osnoise tracer can run
+ * without a pause because preemption is on.
+ */
+ if (interval < 1) {
+ /* Let synchronize_rcu_tasks() make progress */
+ cond_resched_tasks_rcu_qs();
+ continue;
+ }
+
+ if (msleep_interruptible(interval))
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * timerlat_irq - hrtimer handler for timerlat.
+ */
+static enum hrtimer_restart timerlat_irq(struct hrtimer *timer)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ struct trace_array *tr = osnoise_trace;
+ struct timerlat_variables *tlat;
+ struct timerlat_sample s;
+ u64 now;
+ u64 diff;
+
+ /*
+ * I am not sure if the timer was armed for this CPU. So, get
+ * the timerlat struct from the timer itself, not from this
+ * CPU.
+ */
+ tlat = container_of(timer, struct timerlat_variables, timer);
+
+ now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
+
+ /*
+ * Enable the osnoise: events for thread an softirq.
+ */
+ tlat->tracing_thread = true;
+
+ osn_var->thread.arrival_time = time_get();
+
+ /*
+ * A hardirq is running: the timer IRQ. It is for sure preempting
+ * a thread, and potentially preempting a softirq.
+ *
+ * At this point, it is not interesting to know the duration of the
+ * preempted thread (and maybe softirq), but how much time they will
+ * delay the beginning of the execution of the timer thread.
+ *
+ * To get the correct (net) delay added by the softirq, its delta_start
+ * is set as the IRQ one. In this way, at the return of the IRQ, the delta
+ * start of the sofitrq will be zeroed, accounting then only the time
+ * after that.
+ *
+ * The thread follows the same principle. However, if a softirq is
+ * running, the thread needs to receive the softirq delta_start. The
+ * reason being is that the softirq will be the last to be unfolded,
+ * resseting the thread delay to zero.
+ */
+#ifndef CONFIG_PREEMPT_RT
+ if (osn_var->softirq.delta_start) {
+ copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
+ &osn_var->softirq.delta_start);
+
+ copy_int_safe_time(osn_var, &osn_var->softirq.delta_start,
+ &osn_var->irq.delta_start);
+ } else {
+ copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
+ &osn_var->irq.delta_start);
+ }
+#else /* CONFIG_PREEMPT_RT */
+ /*
+ * The sofirqs run as threads on RT, so there is not need
+ * to keep track of it.
+ */
+ copy_int_safe_time(osn_var, &osn_var->thread.delta_start, &osn_var->irq.delta_start);
+#endif /* CONFIG_PREEMPT_RT */
+
+ /*
+ * Compute the current time with the expected time.
+ */
+ diff = now - tlat->abs_period;
+
+ tlat->count++;
+ s.seqnum = tlat->count;
+ s.timer_latency = diff;
+ s.context = IRQ_CONTEXT;
+
+ trace_timerlat_sample(&s);
+
+ /* Keep a running maximum ever recorded os noise "latency" */
+ if (diff > tr->max_latency) {
+ tr->max_latency = diff;
+ latency_fsnotify(tr);
+ }
+
+ if (osnoise_data.stop_tracing)
+ if (time_to_us(diff) >= osnoise_data.stop_tracing)
+ osnoise_stop_tracing();
+
+ wake_up_process(tlat->kthread);
+
+ if (osnoise_data.print_stack)
+ timerlat_save_stack(0);
+
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * wait_next_period - Wait for the next period for timerlat
+ */
+static int wait_next_period(struct timerlat_variables *tlat)
+{
+ ktime_t next_abs_period, now;
+ u64 rel_period = osnoise_data.timerlat_period * 1000;
+
+ now = hrtimer_cb_get_time(&tlat->timer);
+ next_abs_period = ns_to_ktime(tlat->abs_period + rel_period);
+
+ /*
+ * Save the next abs_period.
+ */
+ tlat->abs_period = (u64) ktime_to_ns(next_abs_period);
+
+ /*
+ * If the new abs_period is in the past, skip the activation.
+ */
+ while (ktime_compare(now, next_abs_period) > 0) {
+ next_abs_period = ns_to_ktime(tlat->abs_period + rel_period);
+ tlat->abs_period = (u64) ktime_to_ns(next_abs_period);
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ hrtimer_start(&tlat->timer, next_abs_period, HRTIMER_MODE_ABS_PINNED_HARD);
+ schedule();
+ return 1;
+}
+
+/*
+ * timerlat_main- Timerlat main
+ */
+static int timerlat_main(void *data)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ struct timerlat_variables *tlat = this_cpu_tmr_var();
+ struct timerlat_sample s;
+ struct sched_param sp;
+ u64 now, diff;
+
+ /*
+ * Make the thread RT, that is how cyclictest is usually used.
+ */
+ sp.sched_priority = DEFAULT_TIMERLAT_PRIO;
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+
+ tlat->count = 0;
+ tlat->tracing_thread = false;
+
+ hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
+ tlat->timer.function = timerlat_irq;
+ tlat->kthread = current;
+ osn_var->pid = current->pid;
+ /*
+ * Anotate the arrival time.
+ */
+ tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
+
+ wait_next_period(tlat);
+
+ osn_var->sampling = 1;
+
+ while (!kthread_should_stop()) {
+ now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
+ diff = now - tlat->abs_period;
+
+ s.seqnum = tlat->count;
+ s.timer_latency = diff;
+ s.context = THREAD_CONTEXT;
+
+ trace_timerlat_sample(&s);
+
+#ifdef CONFIG_STACKTRACE
+ if (osnoise_data.print_stack)
+ if (osnoise_data.print_stack <= time_to_us(diff))
+ timerlat_dump_stack();
+#endif /* CONFIG_STACKTRACE */
+
+ tlat->tracing_thread = false;
+ if (osnoise_data.stop_tracing_total)
+ if (time_to_us(diff) >= osnoise_data.stop_tracing_total)
+ osnoise_stop_tracing();
+
+ wait_next_period(tlat);
+ }
+
+ hrtimer_cancel(&tlat->timer);
+ return 0;
+}
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * stop_kthread - stop a workload thread
+ */
+static void stop_kthread(unsigned int cpu)
+{
+ struct task_struct *kthread;
+
+ kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread;
+ if (kthread)
+ kthread_stop(kthread);
+ per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
+}
+
+/*
+ * stop_per_cpu_kthread - Stop per-cpu threads
+ *
+ * Stop the osnoise sampling htread. Use this on unload and at system
+ * shutdown.
+ */
+static void stop_per_cpu_kthreads(void)
+{
+ int cpu;
+
+ get_online_cpus();
+
+ for_each_online_cpu(cpu)
+ stop_kthread(cpu);
+
+ put_online_cpus();
+}
+
+/*
+ * start_kthread - Start a workload tread
+ */
+static int start_kthread(unsigned int cpu)
+{
+ struct task_struct *kthread;
+ void *main = osnoise_main;
+ char comm[24];
+
+#ifdef CONFIG_TIMERLAT_TRACER
+ if (osnoise_data.timerlat_tracer) {
+ snprintf(comm, 24, "timerlat/%d", cpu);
+ main = timerlat_main;
+ } else {
+ snprintf(comm, 24, "osnoise/%d", cpu);
+ }
+#else
+ snprintf(comm, 24, "osnoise/%d", cpu);
+#endif
+ kthread = kthread_create_on_cpu(main, NULL, cpu, comm);
+
+ if (IS_ERR(kthread)) {
+ pr_err(BANNER "could not start sampling thread\n");
+ stop_per_cpu_kthreads();
+ return -ENOMEM;
+ }
+
+ per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread;
+ wake_up_process(kthread);
+
+ return 0;
+}
+
+/*
+ * start_per_cpu_kthread - Kick off per-cpu osnoise sampling kthreads
+ *
+ * This starts the kernel thread that will look for osnoise on many
+ * cpus.
+ */
+static int start_per_cpu_kthreads(struct trace_array *tr)
+{
+ struct cpumask *current_mask = &save_cpumask;
+ int retval;
+ int cpu;
+
+ get_online_cpus();
+ /*
+ * Run only on CPUs in which trace and osnoise are allowed to run.
+ */
+ cpumask_and(current_mask, tr->tracing_cpumask, &osnoise_cpumask);
+ /*
+ * And the CPU is online.
+ */
+ cpumask_and(current_mask, cpu_online_mask, current_mask);
+
+ for_each_possible_cpu(cpu)
+ per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
+
+ for_each_cpu(cpu, current_mask) {
+ retval = start_kthread(cpu);
+ if (retval) {
+ stop_per_cpu_kthreads();
+ return retval;
+ }
+ }
+
+ put_online_cpus();
+
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void osnoise_hotplug_workfn(struct work_struct *dummy)
+{
+ struct trace_array *tr = osnoise_trace;
+ unsigned int cpu = smp_processor_id();
+
+
+ mutex_lock(&trace_types_lock);
+
+ if (!osnoise_busy)
+ goto out_unlock_trace;
+
+ mutex_lock(&interface_lock);
+ get_online_cpus();
+
+ if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
+ goto out_unlock;
+
+ if (!cpumask_test_cpu(cpu, tr->tracing_cpumask))
+ goto out_unlock;
+
+ start_kthread(cpu);
+
+out_unlock:
+ put_online_cpus();
+ mutex_unlock(&interface_lock);
+out_unlock_trace:
+ mutex_unlock(&trace_types_lock);
+}
+
+static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn);
+
+/*
+ * osnoise_cpu_init - CPU hotplug online callback function
+ */
+static int osnoise_cpu_init(unsigned int cpu)
+{
+ schedule_work_on(cpu, &osnoise_hotplug_work);
+ return 0;
+}
+
+/*
+ * osnoise_cpu_die - CPU hotplug offline callback function
+ */
+static int osnoise_cpu_die(unsigned int cpu)
+{
+ stop_kthread(cpu);
+ return 0;
+}
+
+static void osnoise_init_hotplug_support(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/osnoise:online",
+ osnoise_cpu_init, osnoise_cpu_die);
+ if (ret < 0)
+ pr_warn(BANNER "Error to init cpu hotplug support\n");
+
+ return;
+}
+#else /* CONFIG_HOTPLUG_CPU */
+static void osnoise_init_hotplug_support(void)
+{
+ return;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * osnoise_cpus_read - Read function for reading the "cpus" file
+ * @filp: The active open file structure
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * Prints the "cpus" output into the user-provided buffer.
+ */
+static ssize_t
+osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ char *mask_str;
+ int len;
+
+ mutex_lock(&interface_lock);
+
+ len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1;
+ mask_str = kmalloc(len, GFP_KERNEL);
+ if (!mask_str) {
+ count = -ENOMEM;
+ goto out_unlock;
+ }
+
+ len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask));
+ if (len >= count) {
+ count = -EINVAL;
+ goto out_free;
+ }
+
+ count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
+
+out_free:
+ kfree(mask_str);
+out_unlock:
+ mutex_unlock(&interface_lock);
+
+ return count;
+}
+
+static void osnoise_tracer_start(struct trace_array *tr);
+static void osnoise_tracer_stop(struct trace_array *tr);
+
+/*
+ * osnoise_cpus_write - Write function for "cpus" entry
+ * @filp: The active open file structure
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in @file
+ *
+ * This function provides a write implementation for the "cpus"
+ * interface to the osnoise trace. By default, it lists all CPUs,
+ * in this way, allowing osnoise threads to run on any online CPU
+ * of the system. It serves to restrict the execution of osnoise to the
+ * set of CPUs writing via this interface. Note that osnoise also
+ * respects the "tracing_cpumask." Hence, osnoise threads will run only
+ * on the set of CPUs allowed here AND on "tracing_cpumask." Why not
+ * have just "tracing_cpumask?" Because the user might be interested
+ * in tracing what is running on other CPUs. For instance, one might
+ * run osnoise in one HT CPU while observing what is running on the
+ * sibling HT CPU.
+ */
+static ssize_t
+osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ struct trace_array *tr = osnoise_trace;
+ cpumask_var_t osnoise_cpumask_new;
+ int running, err;
+ char buf[256];
+
+ if (count >= 256)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, count))
+ return -EFAULT;
+
+ if (!zalloc_cpumask_var(&osnoise_cpumask_new, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = cpulist_parse(buf, osnoise_cpumask_new);
+ if (err)
+ goto err_free;
+
+ /*
+ * trace_types_lock is taken to avoid concurrency on start/stop
+ * and osnoise_busy.
+ */
+ mutex_lock(&trace_types_lock);
+ running = osnoise_busy;
+ if (running)
+ osnoise_tracer_stop(tr);
+
+ mutex_lock(&interface_lock);
+ /*
+ * osnoise_cpumask is read by CPU hotplug operations.
+ */
+ get_online_cpus();
+
+ cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new);
+
+ put_online_cpus();
+ mutex_unlock(&interface_lock);
+
+ if (running)
+ osnoise_tracer_start(tr);
+ mutex_unlock(&trace_types_lock);
+
+ free_cpumask_var(osnoise_cpumask_new);
+ return count;
+
+err_free:
+ free_cpumask_var(osnoise_cpumask_new);
+
+ return err;
+}
+
+/*
+ * osnoise/runtime_us: cannot be greater than the period.
+ */
+static struct trace_min_max_param osnoise_runtime = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.sample_runtime,
+ .max = &osnoise_data.sample_period,
+ .min = NULL,
+};
+
+/*
+ * osnoise/period_us: cannot be smaller than the runtime.
+ */
+static struct trace_min_max_param osnoise_period = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.sample_period,
+ .max = NULL,
+ .min = &osnoise_data.sample_runtime,
+};
+
+/*
+ * osnoise/stop_tracing_us: no limit.
+ */
+static struct trace_min_max_param osnoise_stop_tracing_in = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.stop_tracing,
+ .max = NULL,
+ .min = NULL,
+};
+
+/*
+ * osnoise/stop_tracing_total_us: no limit.
+ */
+static struct trace_min_max_param osnoise_stop_tracing_total = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.stop_tracing_total,
+ .max = NULL,
+ .min = NULL,
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * osnoise/print_stack: print the stacktrace of the IRQ handler if the total
+ * latency is higher than val.
+ */
+static struct trace_min_max_param osnoise_print_stack = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.print_stack,
+ .max = NULL,
+ .min = NULL,
+};
+
+/*
+ * osnoise/timerlat_period: min 100 us, max 1 s
+ */
+u64 timerlat_min_period = 100;
+u64 timerlat_max_period = 1000000;
+static struct trace_min_max_param timerlat_period = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.timerlat_period,
+ .max = &timerlat_max_period,
+ .min = &timerlat_min_period,
+};
+#endif
+
+static const struct file_operations cpus_fops = {
+ .open = tracing_open_generic,
+ .read = osnoise_cpus_read,
+ .write = osnoise_cpus_write,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * init_tracefs - A function to initialize the tracefs interface files
+ *
+ * This function creates entries in tracefs for "osnoise" and "timerlat".
+ * It creates these directories in the tracing directory, and within that
+ * directory the use can change and view the configs.
+ */
+static int init_tracefs(void)
+{
+ struct dentry *top_dir;
+ struct dentry *tmp;
+ int ret;
+
+ ret = tracing_init_dentry();
+ if (ret)
+ return -ENOMEM;
+
+ top_dir = tracefs_create_dir("osnoise", NULL);
+ if (!top_dir)
+ return 0;
+
+ tmp = tracefs_create_file("period_us", 0640, top_dir,
+ &osnoise_period, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = tracefs_create_file("runtime_us", 0644, top_dir,
+ &osnoise_runtime, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = tracefs_create_file("stop_tracing_us", 0640, top_dir,
+ &osnoise_stop_tracing_in, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = tracefs_create_file("stop_tracing_total_us", 0640, top_dir,
+ &osnoise_stop_tracing_total, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = trace_create_file("cpus", 0644, top_dir, NULL, &cpus_fops);
+ if (!tmp)
+ goto err;
+#ifdef CONFIG_TIMERLAT_TRACER
+#ifdef CONFIG_STACKTRACE
+ tmp = tracefs_create_file("print_stack", 0640, top_dir,
+ &osnoise_print_stack, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+#endif
+
+ tmp = tracefs_create_file("timerlat_period_us", 0640, top_dir,
+ &timerlat_period, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+#endif
+
+ return 0;
+
+err:
+ tracefs_remove(top_dir);
+ return -ENOMEM;
+}
+
+static int osnoise_hook_events(void)
+{
+ int retval;
+
+ /*
+ * Trace is already hooked, we are re-enabling from
+ * a stop_tracing_*.
+ */
+ if (trace_osnoise_callback_enabled)
+ return 0;
+
+ retval = hook_irq_events();
+ if (retval)
+ return -EINVAL;
+
+ retval = hook_softirq_events();
+ if (retval)
+ goto out_unhook_irq;
+
+ retval = hook_thread_events();
+ /*
+ * All fine!
+ */
+ if (!retval)
+ return 0;
+
+ unhook_softirq_events();
+out_unhook_irq:
+ unhook_irq_events();
+ return -EINVAL;
+}
+
+static int __osnoise_tracer_start(struct trace_array *tr)
+{
+ int retval;
+
+ osn_var_reset_all();
+
+ retval = osnoise_hook_events();
+ if (retval)
+ return retval;
+ /*
+ * Make sure NMIs see reseted values.
+ */
+ barrier();
+ trace_osnoise_callback_enabled = true;
+
+ retval = start_per_cpu_kthreads(tr);
+ if (retval) {
+ unhook_irq_events();
+ return retval;
+ }
+
+ osnoise_busy = true;
+
+ return 0;
+}
+
+static void osnoise_tracer_start(struct trace_array *tr)
+{
+ int retval;
+
+ if (osnoise_busy)
+ return;
+
+ retval = __osnoise_tracer_start(tr);
+ if (retval)
+ pr_err(BANNER "Error starting osnoise tracer\n");
+
+}
+
+static void osnoise_tracer_stop(struct trace_array *tr)
+{
+ if (!osnoise_busy)
+ return;
+
+ trace_osnoise_callback_enabled = false;
+ barrier();
+
+ stop_per_cpu_kthreads();
+
+ unhook_irq_events();
+ unhook_softirq_events();
+ unhook_thread_events();
+
+ osnoise_busy = false;
+}
+
+static int osnoise_tracer_init(struct trace_array *tr)
+{
+
+ /* Only allow one instance to enable this */
+ if (osnoise_busy)
+ return -EBUSY;
+
+ osnoise_trace = tr;
+ tr->max_latency = 0;
+
+ osnoise_tracer_start(tr);
+
+ return 0;
+}
+
+static void osnoise_tracer_reset(struct trace_array *tr)
+{
+ osnoise_tracer_stop(tr);
+}
+
+static struct tracer osnoise_tracer __read_mostly = {
+ .name = "osnoise",
+ .init = osnoise_tracer_init,
+ .reset = osnoise_tracer_reset,
+ .start = osnoise_tracer_start,
+ .stop = osnoise_tracer_stop,
+ .print_header = print_osnoise_headers,
+ .allow_instances = true,
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+static void timerlat_tracer_start(struct trace_array *tr)
+{
+ int retval;
+
+ if (osnoise_busy)
+ return;
+
+ osnoise_data.timerlat_tracer = 1;
+
+ retval = __osnoise_tracer_start(tr);
+ if (retval)
+ goto out_err;
+
+ return;
+out_err:
+ pr_err(BANNER "Error starting timerlat tracer\n");
+}
+
+static void timerlat_tracer_stop(struct trace_array *tr)
+{
+ int cpu;
+
+ if (!osnoise_busy)
+ return;
+
+ for_each_online_cpu(cpu)
+ per_cpu(per_cpu_osnoise_var, cpu).sampling = 0;
+
+ osnoise_tracer_stop(tr);
+
+ osnoise_data.timerlat_tracer = 0;
+}
+
+static int timerlat_tracer_init(struct trace_array *tr)
+{
+ /* Only allow one instance to enable this */
+ if (osnoise_busy)
+ return -EBUSY;
+
+ osnoise_trace = tr;
+
+ tr->max_latency = 0;
+
+ timerlat_tracer_start(tr);
+
+ return 0;
+}
+
+static void timerlat_tracer_reset(struct trace_array *tr)
+{
+ timerlat_tracer_stop(tr);
+}
+
+static struct tracer timerlat_tracer __read_mostly = {
+ .name = "timerlat",
+ .init = timerlat_tracer_init,
+ .reset = timerlat_tracer_reset,
+ .start = timerlat_tracer_start,
+ .stop = timerlat_tracer_stop,
+ .print_header = print_timerlat_headers,
+ .allow_instances = true,
+};
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+__init static int init_osnoise_tracer(void)
+{
+ int ret;
+
+ mutex_init(&interface_lock);
+
+ cpumask_copy(&osnoise_cpumask, cpu_all_mask);
+
+ ret = register_tracer(&osnoise_tracer);
+ if (ret) {
+ pr_err(BANNER "Error registering osnoise!\n");
+ return ret;
+ }
+
+#ifdef CONFIG_TIMERLAT_TRACER
+ ret = register_tracer(&timerlat_tracer);
+ if (ret) {
+ pr_err(BANNER "Error registering timerlat\n");
+ return ret;
+ }
+#endif
+ osnoise_init_hotplug_support();
+
+ init_tracefs();
+
+ return 0;
+}
+late_initcall(init_osnoise_tracer);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index d0368a569bfa..a0bf446bb034 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1202,7 +1202,6 @@ trace_hwlat_print(struct trace_iterator *iter, int flags,
return trace_handle_return(s);
}
-
static enum print_line_t
trace_hwlat_raw(struct trace_iterator *iter, int flags,
struct trace_event *event)
@@ -1232,6 +1231,122 @@ static struct trace_event trace_hwlat_event = {
.funcs = &trace_hwlat_funcs,
};
+/* TRACE_OSNOISE */
+static enum print_line_t
+trace_osnoise_print(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct trace_entry *entry = iter->ent;
+ struct trace_seq *s = &iter->seq;
+ struct osnoise_entry *field;
+ u64 ratio, ratio_dec;
+ u64 net_runtime;
+
+ trace_assign_type(field, entry);
+
+ /*
+ * compute the available % of cpu time.
+ */
+ net_runtime = field->runtime - field->noise;
+ ratio = net_runtime * 10000000;
+ do_div(ratio, field->runtime);
+ ratio_dec = do_div(ratio, 100000);
+
+ trace_seq_printf(s, "%llu %10llu %3llu.%05llu %7llu",
+ field->runtime,
+ field->noise,
+ ratio, ratio_dec,
+ field->max_sample);
+
+ trace_seq_printf(s, " %6u", field->hw_count);
+ trace_seq_printf(s, " %6u", field->nmi_count);
+ trace_seq_printf(s, " %6u", field->irq_count);
+ trace_seq_printf(s, " %6u", field->softirq_count);
+ trace_seq_printf(s, " %6u", field->thread_count);
+
+ trace_seq_putc(s, '\n');
+
+ return trace_handle_return(s);
+}
+
+static enum print_line_t
+trace_osnoise_raw(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct osnoise_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ trace_seq_printf(s, "%lld %llu %llu %u %u %u %u %u\n",
+ field->runtime,
+ field->noise,
+ field->max_sample,
+ field->hw_count,
+ field->nmi_count,
+ field->irq_count,
+ field->softirq_count,
+ field->thread_count);
+
+ return trace_handle_return(s);
+}
+
+static struct trace_event_functions trace_osnoise_funcs = {
+ .trace = trace_osnoise_print,
+ .raw = trace_osnoise_raw,
+};
+
+static struct trace_event trace_osnoise_event = {
+ .type = TRACE_OSNOISE,
+ .funcs = &trace_osnoise_funcs,
+};
+
+/* TRACE_TIMERLAT */
+static enum print_line_t
+trace_timerlat_print(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct trace_entry *entry = iter->ent;
+ struct trace_seq *s = &iter->seq;
+ struct timerlat_entry *field;
+
+ trace_assign_type(field, entry);
+
+ trace_seq_printf(s, "#%-5u context %6s timer_latency %9llu ns\n",
+ field->seqnum,
+ field->context ? "thread" : "irq",
+ field->timer_latency);
+
+ return trace_handle_return(s);
+}
+
+static enum print_line_t
+trace_timerlat_raw(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct timerlat_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ trace_seq_printf(s, "%u %d %llu\n",
+ field->seqnum,
+ field->context,
+ field->timer_latency);
+
+ return trace_handle_return(s);
+}
+
+static struct trace_event_functions trace_timerlat_funcs = {
+ .trace = trace_timerlat_print,
+ .raw = trace_timerlat_raw,
+};
+
+static struct trace_event trace_timerlat_event = {
+ .type = TRACE_TIMERLAT,
+ .funcs = &trace_timerlat_funcs,
+};
+
/* TRACE_BPUTS */
static enum print_line_t
trace_bputs_print(struct trace_iterator *iter, int flags,
@@ -1442,6 +1557,8 @@ static struct trace_event *events[] __initdata = {
&trace_bprint_event,
&trace_print_event,
&trace_hwlat_event,
+ &trace_osnoise_event,
+ &trace_timerlat_event,
&trace_raw_data_event,
&trace_func_repeats_event,
NULL
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e5778d1d7a5b..2402de520eca 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -26,9 +26,9 @@ static struct task_struct *wakeup_task;
static int wakeup_cpu;
static int wakeup_current_cpu;
static unsigned wakeup_prio = -1;
-static int wakeup_rt;
-static int wakeup_dl;
-static int tracing_dl = 0;
+static bool wakeup_rt;
+static bool wakeup_dl;
+static bool tracing_dl;
static arch_spinlock_t wakeup_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -498,7 +498,7 @@ static void __wakeup_reset(struct trace_array *tr)
{
wakeup_cpu = -1;
wakeup_prio = -1;
- tracing_dl = 0;
+ tracing_dl = false;
if (wakeup_task)
put_task_struct(wakeup_task);
@@ -572,9 +572,9 @@ probe_wakeup(void *ignore, struct task_struct *p)
* another task until the first one wakes up.
*/
if (dl_task(p))
- tracing_dl = 1;
+ tracing_dl = true;
else
- tracing_dl = 0;
+ tracing_dl = false;
wakeup_task = get_task_struct(p);
@@ -685,8 +685,8 @@ static int wakeup_tracer_init(struct trace_array *tr)
if (wakeup_busy)
return -EBUSY;
- wakeup_dl = 0;
- wakeup_rt = 0;
+ wakeup_dl = false;
+ wakeup_rt = false;
return __wakeup_tracer_init(tr);
}
@@ -695,8 +695,8 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
if (wakeup_busy)
return -EBUSY;
- wakeup_dl = 0;
- wakeup_rt = 1;
+ wakeup_dl = false;
+ wakeup_rt = true;
return __wakeup_tracer_init(tr);
}
@@ -705,8 +705,8 @@ static int wakeup_dl_tracer_init(struct trace_array *tr)
if (wakeup_busy)
return -EBUSY;
- wakeup_dl = 1;
- wakeup_rt = 0;
+ wakeup_dl = true;
+ wakeup_rt = false;
return __wakeup_tracer_init(tr);
}
diff --git a/kernel/trace/trace_synth.h b/kernel/trace/trace_synth.h
index 6e146b959dcd..4007fe95cf42 100644
--- a/kernel/trace/trace_synth.h
+++ b/kernel/trace/trace_synth.h
@@ -14,10 +14,10 @@ struct synth_field {
char *name;
size_t size;
unsigned int offset;
+ unsigned int field_pos;
bool is_signed;
bool is_string;
bool is_dynamic;
- bool field_pos;
};
struct synth_event {
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 9f478d29b926..efd14c79fab4 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -15,12 +15,57 @@
#include <linux/sched/task.h>
#include <linux/static_key.h>
+enum tp_func_state {
+ TP_FUNC_0,
+ TP_FUNC_1,
+ TP_FUNC_2,
+ TP_FUNC_N,
+};
+
extern tracepoint_ptr_t __start___tracepoints_ptrs[];
extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
DEFINE_SRCU(tracepoint_srcu);
EXPORT_SYMBOL_GPL(tracepoint_srcu);
+enum tp_transition_sync {
+ TP_TRANSITION_SYNC_1_0_1,
+ TP_TRANSITION_SYNC_N_2_1,
+
+ _NR_TP_TRANSITION_SYNC,
+};
+
+struct tp_transition_snapshot {
+ unsigned long rcu;
+ unsigned long srcu;
+ bool ongoing;
+};
+
+/* Protected by tracepoints_mutex */
+static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC];
+
+static void tp_rcu_get_state(enum tp_transition_sync sync)
+{
+ struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
+
+ /* Keep the latest get_state snapshot. */
+ snapshot->rcu = get_state_synchronize_rcu();
+ snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu);
+ snapshot->ongoing = true;
+}
+
+static void tp_rcu_cond_sync(enum tp_transition_sync sync)
+{
+ struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
+
+ if (!snapshot->ongoing)
+ return;
+ cond_synchronize_rcu(snapshot->rcu);
+ if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu))
+ synchronize_srcu(&tracepoint_srcu);
+ snapshot->ongoing = false;
+}
+
/* Set to 1 to enable tracepoint debug output */
static const int tracepoint_debug;
@@ -246,26 +291,29 @@ static void *func_remove(struct tracepoint_func **funcs,
return old;
}
-static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs, bool sync)
+/*
+ * Count the number of functions (enum tp_func_state) in a tp_funcs array.
+ */
+static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs)
+{
+ if (!tp_funcs)
+ return TP_FUNC_0;
+ if (!tp_funcs[1].func)
+ return TP_FUNC_1;
+ if (!tp_funcs[2].func)
+ return TP_FUNC_2;
+ return TP_FUNC_N; /* 3 or more */
+}
+
+static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs)
{
void *func = tp->iterator;
/* Synthetic events do not have static call sites */
if (!tp->static_call_key)
return;
-
- if (!tp_funcs[1].func) {
+ if (nr_func_state(tp_funcs) == TP_FUNC_1)
func = tp_funcs[0].func;
- /*
- * If going from the iterator back to a single caller,
- * we need to synchronize with __DO_TRACE to make sure
- * that the data passed to the callback is the one that
- * belongs to that callback.
- */
- if (sync)
- tracepoint_synchronize_unregister();
- }
-
__static_call_update(tp->static_call_key, tp->static_call_tramp, func);
}
@@ -273,7 +321,8 @@ static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func
* Add the probe function to a tracepoint.
*/
static int tracepoint_add_func(struct tracepoint *tp,
- struct tracepoint_func *func, int prio)
+ struct tracepoint_func *func, int prio,
+ bool warn)
{
struct tracepoint_func *old, *tp_funcs;
int ret;
@@ -288,7 +337,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
lockdep_is_held(&tracepoints_mutex));
old = func_add(&tp_funcs, func, prio);
if (IS_ERR(old)) {
- WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
+ WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
return PTR_ERR(old);
}
@@ -298,9 +347,41 @@ static int tracepoint_add_func(struct tracepoint *tp,
* a pointer to it. This array is referenced by __DO_TRACE from
* include/linux/tracepoint.h using rcu_dereference_sched().
*/
- rcu_assign_pointer(tp->funcs, tp_funcs);
- tracepoint_update_call(tp, tp_funcs, false);
- static_key_enable(&tp->key);
+ switch (nr_func_state(tp_funcs)) {
+ case TP_FUNC_1: /* 0->1 */
+ /*
+ * Make sure new static func never uses old data after a
+ * 1->0->1 transition sequence.
+ */
+ tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1);
+ /* Set static call to first function */
+ tracepoint_update_call(tp, tp_funcs);
+ /* Both iterator and static call handle NULL tp->funcs */
+ rcu_assign_pointer(tp->funcs, tp_funcs);
+ static_key_enable(&tp->key);
+ break;
+ case TP_FUNC_2: /* 1->2 */
+ /* Set iterator static call */
+ tracepoint_update_call(tp, tp_funcs);
+ /*
+ * Iterator callback installed before updating tp->funcs.
+ * Requires ordering between RCU assign/dereference and
+ * static call update/call.
+ */
+ fallthrough;
+ case TP_FUNC_N: /* N->N+1 (N>1) */
+ rcu_assign_pointer(tp->funcs, tp_funcs);
+ /*
+ * Make sure static func never uses incorrect data after a
+ * N->...->2->1 (N>1) transition sequence.
+ */
+ if (tp_funcs[0].data != old[0].data)
+ tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
release_probes(old);
return 0;
@@ -327,23 +408,84 @@ static int tracepoint_remove_func(struct tracepoint *tp,
/* Failed allocating new tp_funcs, replaced func with stub */
return 0;
- if (!tp_funcs) {
+ switch (nr_func_state(tp_funcs)) {
+ case TP_FUNC_0: /* 1->0 */
/* Removed last function */
if (tp->unregfunc && static_key_enabled(&tp->key))
tp->unregfunc();
static_key_disable(&tp->key);
+ /* Set iterator static call */
+ tracepoint_update_call(tp, tp_funcs);
+ /* Both iterator and static call handle NULL tp->funcs */
+ rcu_assign_pointer(tp->funcs, NULL);
+ /*
+ * Make sure new static func never uses old data after a
+ * 1->0->1 transition sequence.
+ */
+ tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1);
+ break;
+ case TP_FUNC_1: /* 2->1 */
rcu_assign_pointer(tp->funcs, tp_funcs);
- } else {
+ /*
+ * Make sure static func never uses incorrect data after a
+ * N->...->2->1 (N>2) transition sequence. If the first
+ * element's data has changed, then force the synchronization
+ * to prevent current readers that have loaded the old data
+ * from calling the new function.
+ */
+ if (tp_funcs[0].data != old[0].data)
+ tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+ tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1);
+ /* Set static call to first function */
+ tracepoint_update_call(tp, tp_funcs);
+ break;
+ case TP_FUNC_2: /* N->N-1 (N>2) */
+ fallthrough;
+ case TP_FUNC_N:
rcu_assign_pointer(tp->funcs, tp_funcs);
- tracepoint_update_call(tp, tp_funcs,
- tp_funcs[0].func != old[0].func);
+ /*
+ * Make sure static func never uses incorrect data after a
+ * N->...->2->1 (N>2) transition sequence.
+ */
+ if (tp_funcs[0].data != old[0].data)
+ tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
}
release_probes(old);
return 0;
}
/**
+ * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority
+ * @tp: tracepoint
+ * @probe: probe handler
+ * @data: tracepoint data
+ * @prio: priority of this function over other registered functions
+ *
+ * Same as tracepoint_probe_register_prio() except that it will not warn
+ * if the tracepoint is already registered.
+ */
+int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
+ void *data, int prio)
+{
+ struct tracepoint_func tp_func;
+ int ret;
+
+ mutex_lock(&tracepoints_mutex);
+ tp_func.func = probe;
+ tp_func.data = data;
+ tp_func.prio = prio;
+ ret = tracepoint_add_func(tp, &tp_func, prio, false);
+ mutex_unlock(&tracepoints_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
+
+/**
* tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority
* @tp: tracepoint
* @probe: probe handler
@@ -366,7 +508,7 @@ int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
tp_func.func = probe;
tp_func.data = data;
tp_func.prio = prio;
- ret = tracepoint_add_func(tp, &tp_func, prio);
+ ret = tracepoint_add_func(tp, &tp_func, prio, true);
mutex_unlock(&tracepoints_mutex);
return ret;
}
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 87799e2379bd..bb51849e6375 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -58,14 +58,17 @@ static struct ctl_table_root set_root = {
.permissions = set_permissions,
};
-#define UCOUNT_ENTRY(name) \
- { \
- .procname = name, \
- .maxlen = sizeof(int), \
- .mode = 0644, \
- .proc_handler = proc_dointvec_minmax, \
- .extra1 = SYSCTL_ZERO, \
- .extra2 = SYSCTL_INT_MAX, \
+static long ue_zero = 0;
+static long ue_int_max = INT_MAX;
+
+#define UCOUNT_ENTRY(name) \
+ { \
+ .procname = name, \
+ .maxlen = sizeof(long), \
+ .mode = 0644, \
+ .proc_handler = proc_doulongvec_minmax, \
+ .extra1 = &ue_zero, \
+ .extra2 = &ue_int_max, \
}
static struct ctl_table user_table[] = {
UCOUNT_ENTRY("max_user_namespaces"),
@@ -160,6 +163,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
{
struct hlist_head *hashent = ucounts_hashentry(ns, uid);
struct ucounts *ucounts, *new;
+ long overflow;
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
@@ -184,8 +188,12 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
return new;
}
}
+ overflow = atomic_add_negative(1, &ucounts->count);
spin_unlock_irq(&ucounts_lock);
- ucounts = get_ucounts(ucounts);
+ if (overflow) {
+ put_ucounts(ucounts);
+ return NULL;
+ }
return ucounts;
}
@@ -193,8 +201,7 @@ void put_ucounts(struct ucounts *ucounts)
{
unsigned long flags;
- if (atomic_dec_and_test(&ucounts->count)) {
- spin_lock_irqsave(&ucounts_lock, flags);
+ if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
hlist_del_init(&ucounts->node);
spin_unlock_irqrestore(&ucounts_lock, flags);
kfree(ucounts);
diff --git a/kernel/usermode_driver.c b/kernel/usermode_driver.c
index bb7bb3b478ab..9dae1f648713 100644
--- a/kernel/usermode_driver.c
+++ b/kernel/usermode_driver.c
@@ -26,7 +26,7 @@ static struct vfsmount *blob_to_mnt(const void *data, size_t len, const char *na
if (IS_ERR(mnt))
return mnt;
- file = file_open_root(mnt->mnt_root, mnt, name, O_CREAT | O_WRONLY, 0700);
+ file = file_open_root_mnt(mnt, name, O_CREAT | O_WRONLY, 0700);
if (IS_ERR(file)) {
mntput(mnt);
return ERR_CAST(file);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 50142fc08902..f148eacda55a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3676,15 +3676,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
unbound_release_work);
struct workqueue_struct *wq = pwq->wq;
struct worker_pool *pool = pwq->pool;
- bool is_last;
+ bool is_last = false;
- if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
- return;
+ /*
+ * when @pwq is not linked, it doesn't hold any reference to the
+ * @wq, and @wq is invalid to access.
+ */
+ if (!list_empty(&pwq->pwqs_node)) {
+ if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
+ return;
- mutex_lock(&wq->mutex);
- list_del_rcu(&pwq->pwqs_node);
- is_last = list_empty(&wq->pwqs);
- mutex_unlock(&wq->mutex);
+ mutex_lock(&wq->mutex);
+ list_del_rcu(&pwq->pwqs_node);
+ is_last = list_empty(&wq->pwqs);
+ mutex_unlock(&wq->mutex);
+ }
mutex_lock(&wq_pool_mutex);
put_unbound_pool(pool);
diff --git a/lib/Kconfig b/lib/Kconfig
index ac3b30697b2b..5c9c0687f76d 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -102,6 +102,20 @@ config INDIRECT_PIO
When in doubt, say N.
+config INDIRECT_IOMEM
+ bool
+ help
+ This is selected by other options/architectures to provide the
+ emulated iomem accessors.
+
+config INDIRECT_IOMEM_FALLBACK
+ bool
+ depends on INDIRECT_IOMEM
+ help
+ If INDIRECT_IOMEM is selected, this enables falling back to plain
+ mmio accesses when the IO memory address is not a registered
+ emulated region.
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
@@ -669,9 +683,6 @@ config PARMAN
config OBJAGG
tristate "objagg" if COMPILE_TEST
-config STRING_SELFTEST
- tristate "Test string functions"
-
endmenu
config GENERIC_IOREMAP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1c9857fdb1a0..5ddd575159fb 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -35,6 +35,17 @@ config PRINTK_CALLER
no option to enable/disable at the kernel command line parameter or
sysfs interface.
+config STACKTRACE_BUILD_ID
+ bool "Show build ID information in stacktraces"
+ depends on PRINTK
+ help
+ Selecting this option adds build ID information for symbols in
+ stacktraces printed with the printk format '%p[SR]b'.
+
+ This option is intended for distros where debuginfo is not easily
+ accessible but can be downloaded given the build ID of the vmlinux or
+ kernel module where the function is located.
+
config CONSOLE_LOGLEVEL_DEFAULT
int "Default console loglevel (1-15)"
range 1 15
@@ -313,9 +324,6 @@ config DEBUG_INFO_BTF
config PAHOLE_HAS_SPLIT_BTF
def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "119")
-config PAHOLE_HAS_ZEROSIZE_PERCPU_SUPPORT
- def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "122")
-
config DEBUG_INFO_BTF_MODULES
def_bool y
depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
@@ -403,8 +411,8 @@ config SECTION_MISMATCH_WARN_ONLY
If unsure, say Y.
-config DEBUG_FORCE_FUNCTION_ALIGN_32B
- bool "Force all function address 32B aligned" if EXPERT
+config DEBUG_FORCE_FUNCTION_ALIGN_64B
+ bool "Force all function address 64B aligned" if EXPERT
help
There are cases that a commit from one domain changes the function
address alignment of other domains, and cause magic performance
@@ -1282,7 +1290,7 @@ config PROVE_RAW_LOCK_NESTING
option expect lockdep splats until these problems have been fully
addressed which is work in progress. This config switch allows to
identify and analyze these problems. It will be removed and the
- check permanentely enabled once the main issues have been fixed.
+ check permanently enabled once the main issues have been fixed.
If unsure, select N.
@@ -1448,7 +1456,7 @@ config DEBUG_LOCKING_API_SELFTESTS
Say Y here if you want the kernel to run a short self-test during
bootup. The self-test checks whether common types of locking bugs
are detected by debugging mechanisms or not. (if you disable
- lock debugging then those bugs wont be detected of course.)
+ lock debugging then those bugs won't be detected of course.)
The following locking APIs are covered: spinlocks, rwlocks,
mutexes and rwsems.
@@ -1928,7 +1936,7 @@ config FAIL_IO_TIMEOUT
thus exercising the error handling.
Only works with drivers that use the generic timeout handling,
- for others it wont do anything.
+ for others it won't do anything.
config FAIL_FUTEX
bool "Fault-injection capability for futexes"
@@ -2049,8 +2057,9 @@ config LKDTM
Documentation/fault-injection/provoke-crashes.rst
config TEST_LIST_SORT
- tristate "Linked list sorting test"
- depends on DEBUG_KERNEL || m
+ tristate "Linked list sorting test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
help
Enable this to turn on 'list_sort()' function test. This test is
executed only once during system boot (so affects only boot time),
@@ -2171,6 +2180,9 @@ config ASYNC_RAID6_TEST
config TEST_HEXDUMP
tristate "Test functions located in the hexdump module at runtime"
+config STRING_SELFTEST
+ tristate "Test string functions at runtime"
+
config TEST_STRING_HELPERS
tristate "Test functions located in the string_helpers module at runtime"
@@ -2446,6 +2458,18 @@ config SLUB_KUNIT_TEST
If unsure, say N.
+config RATIONAL_KUNIT_TEST
+ tristate "KUnit test for rational.c" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ select RATIONAL
+ default KUNIT_ALL_TESTS
+ help
+ This builds the rational math unit test.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
diff --git a/lib/Makefile b/lib/Makefile
index 6d765d5fb8ac..5efd1b435a37 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -148,6 +148,8 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
lib-y += logic_pio.o
+lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o
+
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c
index 41e71aae3ef6..27bbe891714f 100644
--- a/lib/asn1_encoder.c
+++ b/lib/asn1_encoder.c
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(asn1_encode_oid);
/**
* asn1_encode_length() - encode a length to follow an ASN.1 tag
* @data: pointer to encode at
- * @data_len: pointer to remaning length (adjusted by routine)
+ * @data_len: pointer to remaining length (adjusted by routine)
* @len: length to encode
*
* This routine can encode lengths up to 65535 using the ASN.1 rules.
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 74ceb02f45e3..9401d39e4722 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -581,6 +581,14 @@ static const char *bitmap_parse_region(const char *str, struct region *r)
{
unsigned int lastbit = r->nbits - 1;
+ if (!strncasecmp(str, "all", 3)) {
+ r->start = 0;
+ r->end = lastbit;
+ str += 3;
+
+ goto check_pattern;
+ }
+
str = bitmap_getnum(str, &r->start, lastbit);
if (IS_ERR(str))
return str;
@@ -595,6 +603,7 @@ static const char *bitmap_parse_region(const char *str, struct region *r)
if (IS_ERR(str))
return str;
+check_pattern:
if (end_of_region(*str))
goto no_pattern;
@@ -784,8 +793,6 @@ int bitmap_parse(const char *start, unsigned int buflen,
}
EXPORT_SYMBOL(bitmap_parse);
-
-#ifdef CONFIG_NUMA
/**
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
* @buf: pointer to a bitmap
@@ -894,6 +901,7 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src,
set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst);
}
}
+EXPORT_SYMBOL(bitmap_remap);
/**
* bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
@@ -931,7 +939,9 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
else
return bitmap_ord_to_pos(new, n % w, bits);
}
+EXPORT_SYMBOL(bitmap_bitremap);
+#ifdef CONFIG_NUMA
/**
* bitmap_onto - translate one bitmap relative to another
* @dst: resulting translated bitmap
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 9f8c70a98fcf..927017431fb6 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -156,7 +156,7 @@ xbc_node_find_child(struct xbc_node *parent, const char *key)
struct xbc_node *node;
if (parent)
- node = xbc_node_get_child(parent);
+ node = xbc_node_get_subkey(parent);
else
node = xbc_root_node();
@@ -164,7 +164,7 @@ xbc_node_find_child(struct xbc_node *parent, const char *key)
if (!xbc_node_match_prefix(node, &key))
node = xbc_node_get_next(node);
else if (*key != '\0')
- node = xbc_node_get_child(node);
+ node = xbc_node_get_subkey(node);
else
break;
}
@@ -274,6 +274,8 @@ int __init xbc_node_compose_key_after(struct xbc_node *root,
struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
struct xbc_node *node)
{
+ struct xbc_node *next;
+
if (unlikely(!xbc_data))
return NULL;
@@ -282,6 +284,13 @@ struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
if (!node)
node = xbc_nodes;
} else {
+ /* Leaf node may have a subkey */
+ next = xbc_node_get_subkey(node);
+ if (next) {
+ node = next;
+ goto found;
+ }
+
if (node == root) /* @root was a leaf, no child node. */
return NULL;
@@ -296,6 +305,7 @@ struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
node = xbc_node_get_next(node);
}
+found:
while (node && !xbc_node_is_leaf(node))
node = xbc_node_get_child(node);
@@ -367,18 +377,28 @@ static inline __init struct xbc_node *xbc_last_sibling(struct xbc_node *node)
return node;
}
-static struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
+static inline __init struct xbc_node *xbc_last_child(struct xbc_node *node)
+{
+ while (node->child)
+ node = xbc_node_get_child(node);
+
+ return node;
+}
+
+static struct xbc_node * __init __xbc_add_sibling(char *data, u32 flag, bool head)
{
struct xbc_node *sib, *node = xbc_add_node(data, flag);
if (node) {
if (!last_parent) {
+ /* Ignore @head in this case */
node->parent = XBC_NODE_MAX;
sib = xbc_last_sibling(xbc_nodes);
sib->next = xbc_node_index(node);
} else {
node->parent = xbc_node_index(last_parent);
- if (!last_parent->child) {
+ if (!last_parent->child || head) {
+ node->next = last_parent->child;
last_parent->child = xbc_node_index(node);
} else {
sib = xbc_node_get_child(last_parent);
@@ -392,6 +412,16 @@ static struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
return node;
}
+static inline struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
+{
+ return __xbc_add_sibling(data, flag, false);
+}
+
+static inline struct xbc_node * __init xbc_add_head_sibling(char *data, u32 flag)
+{
+ return __xbc_add_sibling(data, flag, true);
+}
+
static inline __init struct xbc_node *xbc_add_child(char *data, u32 flag)
{
struct xbc_node *node = xbc_add_sibling(data, flag);
@@ -517,17 +547,20 @@ static int __init xbc_parse_array(char **__v)
char *next;
int c = 0;
+ if (last_parent->child)
+ last_parent = xbc_node_get_child(last_parent);
+
do {
c = __xbc_parse_value(__v, &next);
if (c < 0)
return c;
- node = xbc_add_sibling(*__v, XBC_VALUE);
+ node = xbc_add_child(*__v, XBC_VALUE);
if (!node)
return -ENOMEM;
*__v = next;
} while (c == ',');
- node->next = 0;
+ node->child = 0;
return c;
}
@@ -557,8 +590,9 @@ static int __init __xbc_add_key(char *k)
node = find_match_node(xbc_nodes, k);
else {
child = xbc_node_get_child(last_parent);
+ /* Since the value node is the first child, skip it. */
if (child && xbc_node_is_value(child))
- return xbc_parse_error("Subkey is mixed with value", k);
+ child = xbc_node_get_next(child);
node = find_match_node(child, k);
}
@@ -601,23 +635,29 @@ static int __init xbc_parse_kv(char **k, char *v, int op)
if (ret)
return ret;
- child = xbc_node_get_child(last_parent);
- if (child) {
- if (xbc_node_is_key(child))
- return xbc_parse_error("Value is mixed with subkey", v);
- else if (op == '=')
- return xbc_parse_error("Value is redefined", v);
- }
-
c = __xbc_parse_value(&v, &next);
if (c < 0)
return c;
- if (op == ':' && child) {
- xbc_init_node(child, v, XBC_VALUE);
- } else if (!xbc_add_sibling(v, XBC_VALUE))
+ child = xbc_node_get_child(last_parent);
+ if (child && xbc_node_is_value(child)) {
+ if (op == '=')
+ return xbc_parse_error("Value is redefined", v);
+ if (op == ':') {
+ unsigned short nidx = child->next;
+
+ xbc_init_node(child, v, XBC_VALUE);
+ child->next = nidx; /* keep subkeys */
+ goto array;
+ }
+ /* op must be '+' */
+ last_parent = xbc_last_child(child);
+ }
+ /* The value node should always be the first child */
+ if (!xbc_add_head_sibling(v, XBC_VALUE))
return -ENOMEM;
+array:
if (c == ',') { /* Array */
c = xbc_parse_array(&next);
if (c < 0)
diff --git a/lib/buildid.c b/lib/buildid.c
index 6156997c3895..dfc62625cae4 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -1,36 +1,31 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/buildid.h>
+#include <linux/cache.h>
#include <linux/elf.h>
+#include <linux/kernel.h>
#include <linux/pagemap.h>
#define BUILD_ID 3
+
/*
* Parse build id from the note segment. This logic can be shared between
* 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
* identical.
*/
-static inline int parse_build_id(void *page_addr,
- unsigned char *build_id,
- __u32 *size,
- void *note_start,
- Elf32_Word note_size)
+static int parse_build_id_buf(unsigned char *build_id,
+ __u32 *size,
+ const void *note_start,
+ Elf32_Word note_size)
{
Elf32_Word note_offs = 0, new_offs;
- /* check for overflow */
- if (note_start < page_addr || note_start + note_size < note_start)
- return -EINVAL;
-
- /* only supports note that fits in the first page */
- if (note_start + note_size > page_addr + PAGE_SIZE)
- return -EINVAL;
-
while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
if (nhdr->n_type == BUILD_ID &&
nhdr->n_namesz == sizeof("GNU") &&
+ !strcmp((char *)(nhdr + 1), "GNU") &&
nhdr->n_descsz > 0 &&
nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
memcpy(build_id,
@@ -49,11 +44,29 @@ static inline int parse_build_id(void *page_addr,
break;
note_offs = new_offs;
}
+
return -EINVAL;
}
+static inline int parse_build_id(const void *page_addr,
+ unsigned char *build_id,
+ __u32 *size,
+ const void *note_start,
+ Elf32_Word note_size)
+{
+ /* check for overflow */
+ if (note_start < page_addr || note_start + note_size < note_start)
+ return -EINVAL;
+
+ /* only supports note that fits in the first page */
+ if (note_start + note_size > page_addr + PAGE_SIZE)
+ return -EINVAL;
+
+ return parse_build_id_buf(build_id, size, note_start, note_size);
+}
+
/* Parse build ID from 32-bit ELF */
-static int get_build_id_32(void *page_addr, unsigned char *build_id,
+static int get_build_id_32(const void *page_addr, unsigned char *build_id,
__u32 *size)
{
Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
@@ -78,7 +91,7 @@ static int get_build_id_32(void *page_addr, unsigned char *build_id,
}
/* Parse build ID from 64-bit ELF */
-static int get_build_id_64(void *page_addr, unsigned char *build_id,
+static int get_build_id_64(const void *page_addr, unsigned char *build_id,
__u32 *size)
{
Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
@@ -108,7 +121,7 @@ static int get_build_id_64(void *page_addr, unsigned char *build_id,
* @build_id: buffer to store build id, at least BUILD_ID_SIZE long
* @size: returns actual build id size in case of success
*
- * Returns 0 on success, otherwise error (< 0).
+ * Return: 0 on success, -EINVAL otherwise
*/
int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
__u32 *size)
@@ -147,3 +160,32 @@ out:
put_page(page);
return ret;
}
+
+/**
+ * build_id_parse_buf - Get build ID from a buffer
+ * @buf: Elf note section(s) to parse
+ * @buf_size: Size of @buf in bytes
+ * @build_id: Build ID parsed from @buf, at least BUILD_ID_SIZE_MAX long
+ *
+ * Return: 0 on success, -EINVAL otherwise
+ */
+int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
+{
+ return parse_build_id_buf(build_id, NULL, buf, buf_size);
+}
+
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE)
+unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
+
+/**
+ * init_vmlinux_build_id - Compute and stash the running kernel's build ID
+ */
+void __init init_vmlinux_build_id(void)
+{
+ extern const void __start_notes __weak;
+ extern const void __stop_notes __weak;
+ unsigned int size = &__stop_notes - &__start_notes;
+
+ build_id_parse_buf(&__start_notes, vmlinux_build_id, size);
+}
+#endif
diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
index 018bfc8113c4..a72a2c16066e 100644
--- a/lib/cmdline_kunit.c
+++ b/lib/cmdline_kunit.c
@@ -124,7 +124,7 @@ static void cmdline_do_one_range_test(struct kunit *test, const char *in,
n, e[0], r[0]);
p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
- KUNIT_EXPECT_PTR_EQ_MSG(test, p, (int *)0, "in test %u at %u out of bound", n, p - r);
+ KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r);
}
static void cmdline_test_range(struct kunit *test)
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index c72c865032fa..3518e7394eca 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -80,7 +80,7 @@
/* This is what we know about each Huffman coding group */
struct group_data {
- /* We have an extra slot at the end of limit[] for a sentinal value. */
+ /* We have an extra slot at the end of limit[] for a sentinel value. */
int limit[MAX_HUFCODE_BITS+1];
int base[MAX_HUFCODE_BITS];
int permute[MAX_SYMBOLS];
@@ -337,7 +337,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
pp <<= 1;
base[i+1] = pp-(t += temp[i]);
}
- limit[maxLen+1] = INT_MAX; /* Sentinal value for
+ limit[maxLen+1] = INT_MAX; /* Sentinel value for
* reading next sym. */
limit[maxLen] = pp+temp[maxLen]-1;
base[minLen] = 0;
@@ -385,7 +385,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
bd->inbufBits =
(bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
bd->inbufBitCount += 8;
- };
+ }
bd->inbufBitCount -= hufGroup->maxLen;
j = (bd->inbufBits >> bd->inbufBitCount)&
((1 << hufGroup->maxLen)-1);
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index c0cfcfd486be..e6327391b6b6 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -112,6 +112,9 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
error("data corrupted");
goto exit_2;
}
+ } else if (size < 4) {
+ /* empty or end-of-file */
+ goto exit_3;
}
chunksize = get_unaligned_le32(inp);
@@ -125,6 +128,10 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
continue;
}
+ if (!fill && chunksize == 0) {
+ /* empty or end-of-file */
+ goto exit_3;
+ }
if (posp)
*posp += 4;
@@ -184,6 +191,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
}
}
+exit_3:
ret = 0;
exit_2:
if (!input)
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index 1f439a622076..64c1358500ce 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -43,7 +43,6 @@ STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
int l;
u8 *parse = input;
u8 *end = input + in_len;
- u8 level = 0;
u16 version;
/*
@@ -65,7 +64,7 @@ STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
version = get_unaligned_be16(parse);
parse += 7;
if (version >= 0x0940)
- level = *parse++;
+ parse++;
if (get_unaligned_be32(parse) & HEADER_HAS_FILTER)
parse += 8; /* flags + filter info */
else
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index 25d59a95bd66..a2f38e23004a 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -23,7 +23,7 @@
* uncompressible. Thus, we must look for worst-case expansion when the
* compressor is encoding uncompressible data.
*
- * The structure of the .xz file in case of a compresed kernel is as follows.
+ * The structure of the .xz file in case of a compressed kernel is as follows.
* Sizes (as bytes) of the fields are in parenthesis.
*
* Stream Header (12)
diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
index 790abc472f5b..6b629ab31c1e 100644
--- a/lib/decompress_unzstd.c
+++ b/lib/decompress_unzstd.c
@@ -16,7 +16,7 @@
* uncompressible. Thus, we must look for worst-case expansion when the
* compressor is encoding uncompressible data.
*
- * The structure of the .zst file in case of a compresed kernel is as follows.
+ * The structure of the .zst file in case of a compressed kernel is as follows.
* Maximum sizes (as bytes) of the fields are in parenthesis.
*
* Frame Header: (18)
@@ -56,7 +56,7 @@
/*
* Preboot environments #include "path/to/decompress_unzstd.c".
* All of the source files we depend on must be #included.
- * zstd's only source dependeny is xxhash, which has no source
+ * zstd's only source dependency is xxhash, which has no source
* dependencies.
*
* When UNZSTD_PREBOOT is defined we declare __decompress(), which is
diff --git a/lib/devmem_is_allowed.c b/lib/devmem_is_allowed.c
index c0d67c541849..60be9e24bd57 100644
--- a/lib/devmem_is_allowed.c
+++ b/lib/devmem_is_allowed.c
@@ -19,7 +19,7 @@
*/
int devmem_is_allowed(unsigned long pfn)
{
- if (iomem_is_exclusive(pfn << PAGE_SHIFT))
+ if (iomem_is_exclusive(PFN_PHYS(pfn)))
return 0;
if (!page_is_ram(pfn))
return 1;
diff --git a/lib/devres.c b/lib/devres.c
index 4679dbb1bf5f..b0e1c6702c71 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -157,8 +157,10 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
dev_name(dev), res->name);
else
pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
- if (!pretty_name)
+ if (!pretty_name) {
+ dev_err(dev, "can't generate pretty name for resource %pR\n", res);
return IOMEM_ERR_PTR(-ENOMEM);
+ }
if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
dev_err(dev, "can't request region for resource %pR\n", res);
@@ -353,7 +355,7 @@ static void pcim_iomap_release(struct device *gendev, void *res)
* detach.
*
* This function might sleep when the table is first allocated but can
- * be safely called without context and guaranteed to succed once
+ * be safely called without context and guaranteed to succeed once
* allocated.
*/
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 27f16872320d..cd3387bb34e5 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -5,6 +5,7 @@
*/
#include <linux/kernel.h>
+#include <linux/buildid.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -36,6 +37,14 @@ void __init dump_stack_set_arch_desc(const char *fmt, ...)
va_end(args);
}
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
+#define BUILD_ID_FMT " %20phN"
+#define BUILD_ID_VAL vmlinux_build_id
+#else
+#define BUILD_ID_FMT "%s"
+#define BUILD_ID_VAL ""
+#endif
+
/**
* dump_stack_print_info - print generic debug info for dump_stack()
* @log_lvl: log level
@@ -45,13 +54,13 @@ void __init dump_stack_set_arch_desc(const char *fmt, ...)
*/
void dump_stack_print_info(const char *log_lvl)
{
- printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s\n",
+ printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n",
log_lvl, raw_smp_processor_id(), current->pid, current->comm,
kexec_crash_loaded() ? "Kdump: loaded " : "",
print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
+ init_utsname()->version, BUILD_ID_VAL);
if (dump_stack_arch_desc_str[0] != '\0')
printk("%sHardware name: %s\n",
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 641767b0dce2..cb5abb42c16a 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -991,7 +991,7 @@ static int ddebug_dyndbg_param_cb(char *param, char *val,
ddebug_exec_queries((val ? val : "+p"), modname);
- return 0; /* query failure shouldnt stop module load */
+ return 0; /* query failure shouldn't stop module load */
}
/* handle both dyndbg and $module.dyndbg params at boot */
@@ -1117,9 +1117,9 @@ static int __init dynamic_debug_init(void)
goto out_err;
ddebug_init_success = 1;
- vpr_info("%d modules, %d entries and %d bytes in ddebug tables, %d bytes in __dyndbg section\n",
- modct, entries, (int)(modct * sizeof(struct ddebug_table)),
- (int)(entries * sizeof(struct _ddebug)));
+ vpr_info("%d prdebugs in %d modules, %d KiB in ddebug tables, %d kiB in __dyndbg section\n",
+ entries, modct, (int)((modct * sizeof(struct ddebug_table)) >> 10),
+ (int)((entries * sizeof(struct _ddebug)) >> 10));
/* apply ddebug_query boot param, dont unload tables on err */
if (ddebug_setup_string[0] != '\0') {
diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c
index b1678ed0017c..ae98ca17982e 100644
--- a/lib/fonts/font_pearl_8x8.c
+++ b/lib/fonts/font_pearl_8x8.c
@@ -3,7 +3,7 @@
/* */
/* Font file generated by cpi2fnt */
/* ------------------------------ */
-/* Combined with the alpha-numeric */
+/* Combined with the alphanumeric */
/* portion of Greg Harp's old PEARL */
/* font (from earlier versions of */
/* linux-m86k) by John Shifflett */
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index c701b7a187f2..e23123ae3a13 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -16,170 +16,137 @@
#define PIPE_PARANOIA /* for now */
-#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
- size_t left; \
- size_t wanted = n; \
- __p = i->iov; \
- __v.iov_len = min(n, __p->iov_len - skip); \
- if (likely(__v.iov_len)) { \
- __v.iov_base = __p->iov_base + skip; \
- left = (STEP); \
- __v.iov_len -= left; \
- skip += __v.iov_len; \
- n -= __v.iov_len; \
- } else { \
- left = 0; \
- } \
- while (unlikely(!left && n)) { \
- __p++; \
- __v.iov_len = min(n, __p->iov_len); \
- if (unlikely(!__v.iov_len)) \
- continue; \
- __v.iov_base = __p->iov_base; \
- left = (STEP); \
- __v.iov_len -= left; \
- skip = __v.iov_len; \
- n -= __v.iov_len; \
- } \
- n = wanted - n; \
-}
-
-#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
- size_t wanted = n; \
- __p = i->kvec; \
- __v.iov_len = min(n, __p->iov_len - skip); \
- if (likely(__v.iov_len)) { \
- __v.iov_base = __p->iov_base + skip; \
- (void)(STEP); \
- skip += __v.iov_len; \
- n -= __v.iov_len; \
- } \
- while (unlikely(n)) { \
- __p++; \
- __v.iov_len = min(n, __p->iov_len); \
- if (unlikely(!__v.iov_len)) \
- continue; \
- __v.iov_base = __p->iov_base; \
- (void)(STEP); \
- skip = __v.iov_len; \
- n -= __v.iov_len; \
- } \
- n = wanted; \
-}
-
-#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
- struct bvec_iter __start; \
- __start.bi_size = n; \
- __start.bi_bvec_done = skip; \
- __start.bi_idx = 0; \
- for_each_bvec(__v, i->bvec, __bi, __start) { \
- (void)(STEP); \
- } \
-}
-
-#define iterate_xarray(i, n, __v, skip, STEP) { \
+/* covers iovec and kvec alike */
+#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
+ size_t off = 0; \
+ size_t skip = i->iov_offset; \
+ do { \
+ len = min(n, __p->iov_len - skip); \
+ if (likely(len)) { \
+ base = __p->iov_base + skip; \
+ len -= (STEP); \
+ off += len; \
+ skip += len; \
+ n -= len; \
+ if (skip < __p->iov_len) \
+ break; \
+ } \
+ __p++; \
+ skip = 0; \
+ } while (n); \
+ i->iov_offset = skip; \
+ n = off; \
+}
+
+#define iterate_bvec(i, n, base, len, off, p, STEP) { \
+ size_t off = 0; \
+ unsigned skip = i->iov_offset; \
+ while (n) { \
+ unsigned offset = p->bv_offset + skip; \
+ unsigned left; \
+ void *kaddr = kmap_local_page(p->bv_page + \
+ offset / PAGE_SIZE); \
+ base = kaddr + offset % PAGE_SIZE; \
+ len = min(min(n, (size_t)(p->bv_len - skip)), \
+ (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
+ left = (STEP); \
+ kunmap_local(kaddr); \
+ len -= left; \
+ off += len; \
+ skip += len; \
+ if (skip == p->bv_len) { \
+ skip = 0; \
+ p++; \
+ } \
+ n -= len; \
+ if (left) \
+ break; \
+ } \
+ i->iov_offset = skip; \
+ n = off; \
+}
+
+#define iterate_xarray(i, n, base, len, __off, STEP) { \
+ __label__ __out; \
+ size_t __off = 0; \
struct page *head = NULL; \
- size_t wanted = n, seg, offset; \
- loff_t start = i->xarray_start + skip; \
- pgoff_t index = start >> PAGE_SHIFT; \
+ loff_t start = i->xarray_start + i->iov_offset; \
+ unsigned offset = start % PAGE_SIZE; \
+ pgoff_t index = start / PAGE_SIZE; \
int j; \
\
XA_STATE(xas, i->xarray, index); \
\
- rcu_read_lock(); \
- xas_for_each(&xas, head, ULONG_MAX) { \
- if (xas_retry(&xas, head)) \
- continue; \
- if (WARN_ON(xa_is_value(head))) \
- break; \
- if (WARN_ON(PageHuge(head))) \
- break; \
+ rcu_read_lock(); \
+ xas_for_each(&xas, head, ULONG_MAX) { \
+ unsigned left; \
+ if (xas_retry(&xas, head)) \
+ continue; \
+ if (WARN_ON(xa_is_value(head))) \
+ break; \
+ if (WARN_ON(PageHuge(head))) \
+ break; \
for (j = (head->index < index) ? index - head->index : 0; \
- j < thp_nr_pages(head); j++) { \
- __v.bv_page = head + j; \
- offset = (i->xarray_start + skip) & ~PAGE_MASK; \
- seg = PAGE_SIZE - offset; \
- __v.bv_offset = offset; \
- __v.bv_len = min(n, seg); \
- (void)(STEP); \
- n -= __v.bv_len; \
- skip += __v.bv_len; \
- if (n == 0) \
- break; \
- } \
- if (n == 0) \
- break; \
- } \
- rcu_read_unlock(); \
- n = wanted - n; \
-}
-
-#define iterate_all_kinds(i, n, v, I, B, K, X) { \
- if (likely(n)) { \
- size_t skip = i->iov_offset; \
- if (unlikely(i->type & ITER_BVEC)) { \
- struct bio_vec v; \
- struct bvec_iter __bi; \
- iterate_bvec(i, n, v, __bi, skip, (B)) \
- } else if (unlikely(i->type & ITER_KVEC)) { \
- const struct kvec *kvec; \
- struct kvec v; \
- iterate_kvec(i, n, v, kvec, skip, (K)) \
- } else if (unlikely(i->type & ITER_DISCARD)) { \
- } else if (unlikely(i->type & ITER_XARRAY)) { \
- struct bio_vec v; \
- iterate_xarray(i, n, v, skip, (X)); \
- } else { \
- const struct iovec *iov; \
- struct iovec v; \
- iterate_iovec(i, n, v, iov, skip, (I)) \
+ j < thp_nr_pages(head); j++) { \
+ void *kaddr = kmap_local_page(head + j); \
+ base = kaddr + offset; \
+ len = PAGE_SIZE - offset; \
+ len = min(n, len); \
+ left = (STEP); \
+ kunmap_local(kaddr); \
+ len -= left; \
+ __off += len; \
+ n -= len; \
+ if (left || n == 0) \
+ goto __out; \
+ offset = 0; \
} \
} \
+__out: \
+ rcu_read_unlock(); \
+ i->iov_offset += __off; \
+ n = __off; \
}
-#define iterate_and_advance(i, n, v, I, B, K, X) { \
+#define __iterate_and_advance(i, n, base, len, off, I, K) { \
if (unlikely(i->count < n)) \
n = i->count; \
- if (i->count) { \
- size_t skip = i->iov_offset; \
- if (unlikely(i->type & ITER_BVEC)) { \
+ if (likely(n)) { \
+ if (likely(iter_is_iovec(i))) { \
+ const struct iovec *iov = i->iov; \
+ void __user *base; \
+ size_t len; \
+ iterate_iovec(i, n, base, len, off, \
+ iov, (I)) \
+ i->nr_segs -= iov - i->iov; \
+ i->iov = iov; \
+ } else if (iov_iter_is_bvec(i)) { \
const struct bio_vec *bvec = i->bvec; \
- struct bio_vec v; \
- struct bvec_iter __bi; \
- iterate_bvec(i, n, v, __bi, skip, (B)) \
- i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
- i->nr_segs -= i->bvec - bvec; \
- skip = __bi.bi_bvec_done; \
- } else if (unlikely(i->type & ITER_KVEC)) { \
- const struct kvec *kvec; \
- struct kvec v; \
- iterate_kvec(i, n, v, kvec, skip, (K)) \
- if (skip == kvec->iov_len) { \
- kvec++; \
- skip = 0; \
- } \
+ void *base; \
+ size_t len; \
+ iterate_bvec(i, n, base, len, off, \
+ bvec, (K)) \
+ i->nr_segs -= bvec - i->bvec; \
+ i->bvec = bvec; \
+ } else if (iov_iter_is_kvec(i)) { \
+ const struct kvec *kvec = i->kvec; \
+ void *base; \
+ size_t len; \
+ iterate_iovec(i, n, base, len, off, \
+ kvec, (K)) \
i->nr_segs -= kvec - i->kvec; \
i->kvec = kvec; \
- } else if (unlikely(i->type & ITER_DISCARD)) { \
- skip += n; \
- } else if (unlikely(i->type & ITER_XARRAY)) { \
- struct bio_vec v; \
- iterate_xarray(i, n, v, skip, (X)) \
- } else { \
- const struct iovec *iov; \
- struct iovec v; \
- iterate_iovec(i, n, v, iov, skip, (I)) \
- if (skip == iov->iov_len) { \
- iov++; \
- skip = 0; \
- } \
- i->nr_segs -= iov - i->iov; \
- i->iov = iov; \
+ } else if (iov_iter_is_xarray(i)) { \
+ void *base; \
+ size_t len; \
+ iterate_xarray(i, n, base, len, off, \
+ (K)) \
} \
i->count -= n; \
- i->iov_offset = skip; \
} \
}
+#define iterate_and_advance(i, n, base, len, off, I, K) \
+ __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
static int copyout(void __user *to, const void *from, size_t n)
{
@@ -469,19 +436,25 @@ out:
* Return 0 on success, or non-zero if the memory could not be accessed (i.e.
* because it is an invalid address).
*/
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes)
{
- size_t skip = i->iov_offset;
- const struct iovec *iov;
- int err;
- struct iovec v;
+ if (iter_is_iovec(i)) {
+ const struct iovec *p;
+ size_t skip;
- if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
- iterate_iovec(i, bytes, v, iov, skip, ({
- err = fault_in_pages_readable(v.iov_base, v.iov_len);
+ if (bytes > i->count)
+ bytes = i->count;
+ for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
+ size_t len = min(bytes, p->iov_len - skip);
+ int err;
+
+ if (unlikely(!len))
+ continue;
+ err = fault_in_pages_readable(p->iov_base + skip, len);
if (unlikely(err))
- return err;
- 0;}))
+ return err;
+ bytes -= len;
+ }
}
return 0;
}
@@ -492,19 +465,14 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
- direction &= READ | WRITE;
-
- /* It will get better. Eventually... */
- if (uaccess_kernel()) {
- i->type = ITER_KVEC | direction;
- i->kvec = (struct kvec *)iov;
- } else {
- i->type = ITER_IOVEC | direction;
- i->iov = iov;
- }
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
+ *i = (struct iov_iter) {
+ .iter_type = ITER_IOVEC,
+ .data_source = direction,
+ .iov = iov,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_init);
@@ -613,55 +581,45 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
}
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
- struct csum_state *csstate,
- struct iov_iter *i)
+ struct iov_iter *i, __wsum *sump)
{
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_mask = pipe->ring_size - 1;
- __wsum sum = csstate->csum;
- size_t off = csstate->off;
+ __wsum sum = *sump;
+ size_t off = 0;
unsigned int i_head;
- size_t n, r;
+ size_t r;
if (!sanity(i))
return 0;
- bytes = n = push_pipe(i, bytes, &i_head, &r);
- if (unlikely(!n))
- return 0;
- do {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
- char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
- sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
- kunmap_atomic(p);
+ bytes = push_pipe(i, bytes, &i_head, &r);
+ while (bytes) {
+ size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
+ char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
+ sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
+ kunmap_local(p);
i->head = i_head;
i->iov_offset = r + chunk;
- n -= chunk;
+ bytes -= chunk;
off += chunk;
- addr += chunk;
r = 0;
i_head++;
- } while (n);
- i->count -= bytes;
- csstate->csum = sum;
- csstate->off = off;
- return bytes;
+ }
+ *sump = sum;
+ i->count -= off;
+ return off;
}
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- const char *from = addr;
if (unlikely(iov_iter_is_pipe(i)))
return copy_pipe_to_iter(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
- iterate_and_advance(i, bytes, v,
- copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
- memcpy_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len),
- memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
- memcpy_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ copyout(base, addr + off, len),
+ memcpy(base, addr + off, len)
)
return bytes;
@@ -678,19 +636,6 @@ static int copyout_mc(void __user *to, const void *from, size_t n)
return n;
}
-static unsigned long copy_mc_to_page(struct page *page, size_t offset,
- const char *from, size_t len)
-{
- unsigned long ret;
- char *to;
-
- to = kmap_atomic(page);
- ret = copy_mc_to_kernel(to + offset, from, len);
- kunmap_atomic(to);
-
- return ret;
-}
-
static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
struct iov_iter *i)
{
@@ -702,25 +647,23 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
if (!sanity(i))
return 0;
- bytes = n = push_pipe(i, bytes, &i_head, &off);
- if (unlikely(!n))
- return 0;
- do {
+ n = push_pipe(i, bytes, &i_head, &off);
+ while (n) {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+ char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
unsigned long rem;
-
- rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
- off, addr, chunk);
+ rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
+ chunk -= rem;
+ kunmap_local(p);
i->head = i_head;
- i->iov_offset = off + chunk - rem;
- xfer += chunk - rem;
+ i->iov_offset = off + chunk;
+ xfer += chunk;
if (rem)
break;
n -= chunk;
- addr += chunk;
off = 0;
i_head++;
- } while (n);
+ }
i->count -= xfer;
return xfer;
}
@@ -750,46 +693,13 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
*/
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- const char *from = addr;
- unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
-
if (unlikely(iov_iter_is_pipe(i)))
return copy_mc_pipe_to_iter(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
- iterate_and_advance(i, bytes, v,
- copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
- v.iov_len),
- ({
- rem = copy_mc_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- return bytes;
- }
- }),
- ({
- rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
- - v.iov_len, v.iov_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- return bytes;
- }
- }),
- ({
- rem = copy_mc_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- rcu_read_unlock();
- i->iov_offset += bytes;
- i->count -= bytes;
- return bytes;
- }
- })
+ __iterate_and_advance(i, bytes, base, len, off,
+ copyout_mc(base, addr + off, len),
+ copy_mc_to_kernel(base, addr + off, len)
)
return bytes;
@@ -799,70 +709,30 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- char *to = addr;
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
if (iter_is_iovec(i))
might_fault();
- iterate_and_advance(i, bytes, v,
- copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ copyin(addr + off, base, len),
+ memcpy(addr + off, base, len)
)
return bytes;
}
EXPORT_SYMBOL(_copy_from_iter);
-bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
-{
- char *to = addr;
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
-
- if (iter_is_iovec(i))
- might_fault();
- iterate_all_kinds(i, bytes, v, ({
- if (copyin((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len))
- return false;
- 0;}),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
- )
-
- iov_iter_advance(i, bytes);
- return true;
-}
-EXPORT_SYMBOL(_copy_from_iter_full);
-
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- char *to = addr;
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
- iterate_and_advance(i, bytes, v,
- __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ __copy_from_user_inatomic_nocache(addr + off, base, len),
+ memcpy(addr + off, base, len)
)
return bytes;
@@ -886,20 +756,13 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
*/
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
- char *to = addr;
if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
- iterate_and_advance(i, bytes, v,
- __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
- memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
- v.iov_len),
- memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ __copy_from_user_flushcache(addr + off, base, len),
+ memcpy_flushcache(addr + off, base, len)
)
return bytes;
@@ -907,32 +770,6 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
-bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
-{
- char *to = addr;
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
- iterate_all_kinds(i, bytes, v, ({
- if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len))
- return false;
- 0;}),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
- )
-
- iov_iter_advance(i, bytes);
- return true;
-}
-EXPORT_SYMBOL(_copy_from_iter_full_nocache);
-
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{
struct page *head;
@@ -957,22 +794,51 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
return false;
}
+static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
+{
+ if (likely(iter_is_iovec(i)))
+ return copy_page_to_iter_iovec(page, offset, bytes, i);
+ if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+ void *kaddr = kmap_local_page(page);
+ size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
+ kunmap_local(kaddr);
+ return wanted;
+ }
+ if (iov_iter_is_pipe(i))
+ return copy_page_to_iter_pipe(page, offset, bytes, i);
+ if (unlikely(iov_iter_is_discard(i))) {
+ if (unlikely(i->count < bytes))
+ bytes = i->count;
+ i->count -= bytes;
+ return bytes;
+ }
+ WARN_ON(1);
+ return 0;
+}
+
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
+ size_t res = 0;
if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0;
- if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
- void *kaddr = kmap_atomic(page);
- size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
- kunmap_atomic(kaddr);
- return wanted;
- } else if (unlikely(iov_iter_is_discard(i)))
- return bytes;
- else if (likely(!iov_iter_is_pipe(i)))
- return copy_page_to_iter_iovec(page, offset, bytes, i);
- else
- return copy_page_to_iter_pipe(page, offset, bytes, i);
+ page += offset / PAGE_SIZE; // first subpage
+ offset %= PAGE_SIZE;
+ while (1) {
+ size_t n = __copy_page_to_iter(page, offset,
+ min(bytes, (size_t)PAGE_SIZE - offset), i);
+ res += n;
+ bytes -= n;
+ if (!bytes || !n)
+ break;
+ offset += n;
+ if (offset == PAGE_SIZE) {
+ page++;
+ offset = 0;
+ }
+ }
+ return res;
}
EXPORT_SYMBOL(copy_page_to_iter);
@@ -981,17 +847,16 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
{
if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
- return 0;
- }
- if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
- void *kaddr = kmap_atomic(page);
+ if (likely(iter_is_iovec(i)))
+ return copy_page_from_iter_iovec(page, offset, bytes, i);
+ if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+ void *kaddr = kmap_local_page(page);
size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
return wanted;
- } else
- return copy_page_from_iter_iovec(page, offset, bytes, i);
+ }
+ WARN_ON(1);
+ return 0;
}
EXPORT_SYMBOL(copy_page_from_iter);
@@ -1011,7 +876,9 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i)
do {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
+ char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
+ memset(p + off, 0, chunk);
+ kunmap_local(p);
i->head = i_head;
i->iov_offset = off + chunk;
n -= chunk;
@@ -1026,19 +893,17 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{
if (unlikely(iov_iter_is_pipe(i)))
return pipe_zero(bytes, i);
- iterate_and_advance(i, bytes, v,
- clear_user(v.iov_base, v.iov_len),
- memzero_page(v.bv_page, v.bv_offset, v.bv_len),
- memset(v.iov_base, 0, v.iov_len),
- memzero_page(v.bv_page, v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, count,
+ clear_user(base, len),
+ memset(base, 0, len)
)
return bytes;
}
EXPORT_SYMBOL(iov_iter_zero);
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes)
+size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
+ struct iov_iter *i)
{
char *kaddr = kmap_atomic(page), *p = kaddr + offset;
if (unlikely(!page_copy_sane(page, offset, bytes))) {
@@ -1050,18 +915,14 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
WARN_ON(1);
return 0;
}
- iterate_all_kinds(i, bytes, v,
- copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len)
+ iterate_and_advance(i, bytes, base, len, off,
+ copyin(p + off, base, len),
+ memcpy(p + off, base, len)
)
kunmap_atomic(kaddr);
return bytes;
}
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+EXPORT_SYMBOL(copy_page_from_iter_atomic);
static inline void pipe_truncate(struct iov_iter *i)
{
@@ -1092,8 +953,6 @@ static inline void pipe_truncate(struct iov_iter *i)
static void pipe_advance(struct iov_iter *i, size_t size)
{
struct pipe_inode_info *pipe = i->pipe;
- if (unlikely(i->count < size))
- size = i->count;
if (size) {
struct pipe_buffer *buf;
unsigned int p_mask = pipe->ring_size - 1;
@@ -1132,27 +991,42 @@ static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
i->iov_offset = bi.bi_bvec_done;
}
-void iov_iter_advance(struct iov_iter *i, size_t size)
+static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
{
- if (unlikely(iov_iter_is_pipe(i))) {
- pipe_advance(i, size);
- return;
- }
- if (unlikely(iov_iter_is_discard(i))) {
- i->count -= size;
+ const struct iovec *iov, *end;
+
+ if (!i->count)
return;
+ i->count -= size;
+
+ size += i->iov_offset; // from beginning of current segment
+ for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
+ if (likely(size < iov->iov_len))
+ break;
+ size -= iov->iov_len;
}
- if (unlikely(iov_iter_is_xarray(i))) {
- size = min(size, i->count);
+ i->iov_offset = size;
+ i->nr_segs -= iov - i->iov;
+ i->iov = iov;
+}
+
+void iov_iter_advance(struct iov_iter *i, size_t size)
+{
+ if (unlikely(i->count < size))
+ size = i->count;
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
+ /* iovec and kvec have identical layouts */
+ iov_iter_iovec_advance(i, size);
+ } else if (iov_iter_is_bvec(i)) {
+ iov_iter_bvec_advance(i, size);
+ } else if (iov_iter_is_pipe(i)) {
+ pipe_advance(i, size);
+ } else if (unlikely(iov_iter_is_xarray(i))) {
i->iov_offset += size;
i->count -= size;
- return;
- }
- if (iov_iter_is_bvec(i)) {
- iov_iter_bvec_advance(i, size);
- return;
+ } else if (iov_iter_is_discard(i)) {
+ i->count -= size;
}
- iterate_and_advance(i, size, v, 0, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
@@ -1234,16 +1108,13 @@ EXPORT_SYMBOL(iov_iter_revert);
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
- if (unlikely(iov_iter_is_pipe(i)))
- return i->count; // it is a silly place, anyway
- if (i->nr_segs == 1)
- return i->count;
- if (unlikely(iov_iter_is_discard(i) || iov_iter_is_xarray(i)))
- return i->count;
- if (iov_iter_is_bvec(i))
- return min(i->count, i->bvec->bv_len - i->iov_offset);
- else
- return min(i->count, i->iov->iov_len - i->iov_offset);
+ if (i->nr_segs > 1) {
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return min(i->count, i->iov->iov_len - i->iov_offset);
+ if (iov_iter_is_bvec(i))
+ return min(i->count, i->bvec->bv_len - i->iov_offset);
+ }
+ return i->count;
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
@@ -1252,11 +1123,14 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
- i->type = ITER_KVEC | (direction & (READ | WRITE));
- i->kvec = kvec;
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
+ *i = (struct iov_iter){
+ .iter_type = ITER_KVEC,
+ .data_source = direction,
+ .kvec = kvec,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_kvec);
@@ -1265,11 +1139,14 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
- i->type = ITER_BVEC | (direction & (READ | WRITE));
- i->bvec = bvec;
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
+ *i = (struct iov_iter){
+ .iter_type = ITER_BVEC,
+ .data_source = direction,
+ .bvec = bvec,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_bvec);
@@ -1279,12 +1156,15 @@ void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
{
BUG_ON(direction != READ);
WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
- i->type = ITER_PIPE | READ;
- i->pipe = pipe;
- i->head = pipe->head;
- i->iov_offset = 0;
- i->count = count;
- i->start_head = i->head;
+ *i = (struct iov_iter){
+ .iter_type = ITER_PIPE,
+ .data_source = false,
+ .pipe = pipe,
+ .head = pipe->head,
+ .start_head = pipe->head,
+ .iov_offset = 0,
+ .count = count
+ };
}
EXPORT_SYMBOL(iov_iter_pipe);
@@ -1305,11 +1185,14 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
struct xarray *xarray, loff_t start, size_t count)
{
BUG_ON(direction & ~1);
- i->type = ITER_XARRAY | (direction & (READ | WRITE));
- i->xarray = xarray;
- i->xarray_start = start;
- i->count = count;
- i->iov_offset = 0;
+ *i = (struct iov_iter) {
+ .iter_type = ITER_XARRAY,
+ .data_source = direction,
+ .xarray = xarray,
+ .xarray_start = start,
+ .count = count,
+ .iov_offset = 0
+ };
}
EXPORT_SYMBOL(iov_iter_xarray);
@@ -1325,56 +1208,103 @@ EXPORT_SYMBOL(iov_iter_xarray);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
{
BUG_ON(direction != READ);
- i->type = ITER_DISCARD | READ;
- i->count = count;
- i->iov_offset = 0;
+ *i = (struct iov_iter){
+ .iter_type = ITER_DISCARD,
+ .data_source = false,
+ .count = count,
+ .iov_offset = 0
+ };
}
EXPORT_SYMBOL(iov_iter_discard);
-unsigned long iov_iter_alignment(const struct iov_iter *i)
+static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
{
unsigned long res = 0;
size_t size = i->count;
+ size_t skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->iov[k].iov_len - skip;
+ if (len) {
+ res |= (unsigned long)i->iov[k].iov_base + skip;
+ if (len > size)
+ len = size;
+ res |= len;
+ size -= len;
+ if (!size)
+ break;
+ }
+ }
+ return res;
+}
- if (unlikely(iov_iter_is_pipe(i))) {
+static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
+{
+ unsigned res = 0;
+ size_t size = i->count;
+ unsigned skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->bvec[k].bv_len - skip;
+ res |= (unsigned long)i->bvec[k].bv_offset + skip;
+ if (len > size)
+ len = size;
+ res |= len;
+ size -= len;
+ if (!size)
+ break;
+ }
+ return res;
+}
+
+unsigned long iov_iter_alignment(const struct iov_iter *i)
+{
+ /* iovec and kvec have identical layouts */
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_iter_alignment_iovec(i);
+
+ if (iov_iter_is_bvec(i))
+ return iov_iter_alignment_bvec(i);
+
+ if (iov_iter_is_pipe(i)) {
unsigned int p_mask = i->pipe->ring_size - 1;
+ size_t size = i->count;
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
return size | i->iov_offset;
return size;
}
- if (unlikely(iov_iter_is_xarray(i)))
+
+ if (iov_iter_is_xarray(i))
return (i->xarray_start + i->iov_offset) | i->count;
- iterate_all_kinds(i, size, v,
- (res |= (unsigned long)v.iov_base | v.iov_len, 0),
- res |= v.bv_offset | v.bv_len,
- res |= (unsigned long)v.iov_base | v.iov_len,
- res |= v.bv_offset | v.bv_len
- )
- return res;
+
+ return 0;
}
EXPORT_SYMBOL(iov_iter_alignment);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
{
unsigned long res = 0;
+ unsigned long v = 0;
size_t size = i->count;
+ unsigned k;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
+ if (WARN_ON(!iter_is_iovec(i)))
return ~0U;
- }
- iterate_all_kinds(i, size, v,
- (res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0), 0),
- (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
- (size != v.bv_len ? size : 0)),
- (res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0)),
- (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
- (size != v.bv_len ? size : 0))
- );
+ for (k = 0; k < i->nr_segs; k++) {
+ if (i->iov[k].iov_len) {
+ unsigned long base = (unsigned long)i->iov[k].iov_base;
+ if (v) // if not the first one
+ res |= base | v; // this start | previous end
+ v = base + i->iov[k].iov_len;
+ if (size <= i->iov[k].iov_len)
+ break;
+ size -= i->iov[k].iov_len;
+ }
+ }
return res;
}
EXPORT_SYMBOL(iov_iter_gap_alignment);
@@ -1409,9 +1339,6 @@ static ssize_t pipe_get_pages(struct iov_iter *i,
unsigned int iter_head, npages;
size_t capacity;
- if (!maxsize)
- return 0;
-
if (!sanity(i))
return -EFAULT;
@@ -1492,29 +1419,67 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i,
return actual;
}
+/* must be done on non-empty ITER_IOVEC one */
+static unsigned long first_iovec_segment(const struct iov_iter *i,
+ size_t *size, size_t *start,
+ size_t maxsize, unsigned maxpages)
+{
+ size_t skip;
+ long k;
+
+ for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
+ unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
+ size_t len = i->iov[k].iov_len - skip;
+
+ if (unlikely(!len))
+ continue;
+ if (len > maxsize)
+ len = maxsize;
+ len += (*start = addr % PAGE_SIZE);
+ if (len > maxpages * PAGE_SIZE)
+ len = maxpages * PAGE_SIZE;
+ *size = len;
+ return addr & PAGE_MASK;
+ }
+ BUG(); // if it had been empty, we wouldn't get called
+}
+
+/* must be done on non-empty ITER_BVEC one */
+static struct page *first_bvec_segment(const struct iov_iter *i,
+ size_t *size, size_t *start,
+ size_t maxsize, unsigned maxpages)
+{
+ struct page *page;
+ size_t skip = i->iov_offset, len;
+
+ len = i->bvec->bv_len - skip;
+ if (len > maxsize)
+ len = maxsize;
+ skip += i->bvec->bv_offset;
+ page = i->bvec->bv_page + skip / PAGE_SIZE;
+ len += (*start = skip % PAGE_SIZE);
+ if (len > maxpages * PAGE_SIZE)
+ len = maxpages * PAGE_SIZE;
+ *size = len;
+ return page;
+}
+
ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
{
+ size_t len;
+ int n, res;
+
if (maxsize > i->count)
maxsize = i->count;
+ if (!maxsize)
+ return 0;
- if (unlikely(iov_iter_is_pipe(i)))
- return pipe_get_pages(i, pages, maxsize, maxpages, start);
- if (unlikely(iov_iter_is_xarray(i)))
- return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
- if (unlikely(iov_iter_is_discard(i)))
- return -EFAULT;
-
- iterate_all_kinds(i, maxsize, v, ({
- unsigned long addr = (unsigned long)v.iov_base;
- size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
- int n;
- int res;
+ if (likely(iter_is_iovec(i))) {
+ unsigned long addr;
- if (len > maxpages * PAGE_SIZE)
- len = maxpages * PAGE_SIZE;
- addr &= ~(PAGE_SIZE - 1);
+ addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
n = DIV_ROUND_UP(len, PAGE_SIZE);
res = get_user_pages_fast(addr, n,
iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
@@ -1522,17 +1487,21 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (unlikely(res < 0))
return res;
return (res == n ? len : res * PAGE_SIZE) - *start;
- 0;}),({
- /* can't be more than PAGE_SIZE */
- *start = v.bv_offset;
- get_page(*pages = v.bv_page);
- return v.bv_len;
- }),({
- return -EFAULT;
- }),
- 0
- )
- return 0;
+ }
+ if (iov_iter_is_bvec(i)) {
+ struct page *page;
+
+ page = first_bvec_segment(i, &len, start, maxsize, maxpages);
+ n = DIV_ROUND_UP(len, PAGE_SIZE);
+ while (n--)
+ get_page(*pages++ = page++);
+ return len - *start;
+ }
+ if (iov_iter_is_pipe(i))
+ return pipe_get_pages(i, pages, maxsize, maxpages, start);
+ if (iov_iter_is_xarray(i))
+ return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
+ return -EFAULT;
}
EXPORT_SYMBOL(iov_iter_get_pages);
@@ -1549,9 +1518,6 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
unsigned int iter_head, npages;
ssize_t n;
- if (!maxsize)
- return 0;
-
if (!sanity(i))
return -EFAULT;
@@ -1624,24 +1590,18 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
size_t *start)
{
struct page **p;
+ size_t len;
+ int n, res;
if (maxsize > i->count)
maxsize = i->count;
+ if (!maxsize)
+ return 0;
- if (unlikely(iov_iter_is_pipe(i)))
- return pipe_get_pages_alloc(i, pages, maxsize, start);
- if (unlikely(iov_iter_is_xarray(i)))
- return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
- if (unlikely(iov_iter_is_discard(i)))
- return -EFAULT;
-
- iterate_all_kinds(i, maxsize, v, ({
- unsigned long addr = (unsigned long)v.iov_base;
- size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
- int n;
- int res;
+ if (likely(iter_is_iovec(i))) {
+ unsigned long addr;
- addr &= ~(PAGE_SIZE - 1);
+ addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
n = DIV_ROUND_UP(len, PAGE_SIZE);
p = get_pages_array(n);
if (!p)
@@ -1654,61 +1614,42 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
}
*pages = p;
return (res == n ? len : res * PAGE_SIZE) - *start;
- 0;}),({
- /* can't be more than PAGE_SIZE */
- *start = v.bv_offset;
- *pages = p = get_pages_array(1);
+ }
+ if (iov_iter_is_bvec(i)) {
+ struct page *page;
+
+ page = first_bvec_segment(i, &len, start, maxsize, ~0U);
+ n = DIV_ROUND_UP(len, PAGE_SIZE);
+ *pages = p = get_pages_array(n);
if (!p)
return -ENOMEM;
- get_page(*p = v.bv_page);
- return v.bv_len;
- }),({
- return -EFAULT;
- }), 0
- )
- return 0;
+ while (n--)
+ get_page(*p++ = page++);
+ return len - *start;
+ }
+ if (iov_iter_is_pipe(i))
+ return pipe_get_pages_alloc(i, pages, maxsize, start);
+ if (iov_iter_is_xarray(i))
+ return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
+ return -EFAULT;
}
EXPORT_SYMBOL(iov_iter_get_pages_alloc);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
struct iov_iter *i)
{
- char *to = addr;
__wsum sum, next;
- size_t off = 0;
sum = *csum;
if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return 0;
}
- iterate_and_advance(i, bytes, v, ({
- next = csum_and_copy_from_user(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len);
- if (next) {
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- }
- next ? 0 : v.iov_len;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- }),({
- sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len,
- sum, off);
- off += v.iov_len;
+ iterate_and_advance(i, bytes, base, len, off, ({
+ next = csum_and_copy_from_user(base, addr + off, len);
+ sum = csum_block_add(sum, next, off);
+ next ? 0 : len;
}), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
+ sum = csum_and_memcpy(addr + off, base, len, sum, off);
})
)
*csum = sum;
@@ -1716,104 +1657,30 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
}
EXPORT_SYMBOL(csum_and_copy_from_iter);
-bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
- struct iov_iter *i)
-{
- char *to = addr;
- __wsum sum, next;
- size_t off = 0;
- sum = *csum;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
- iterate_all_kinds(i, bytes, v, ({
- next = csum_and_copy_from_user(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len);
- if (!next)
- return false;
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- 0;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- }),({
- sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len,
- sum, off);
- off += v.iov_len;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
- p + v.bv_offset, v.bv_len,
- sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- })
- )
- *csum = sum;
- iov_iter_advance(i, bytes);
- return true;
-}
-EXPORT_SYMBOL(csum_and_copy_from_iter_full);
-
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct iov_iter *i)
{
struct csum_state *csstate = _csstate;
- const char *from = addr;
__wsum sum, next;
- size_t off;
-
- if (unlikely(iov_iter_is_pipe(i)))
- return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
- sum = csstate->csum;
- off = csstate->off;
if (unlikely(iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
}
- iterate_and_advance(i, bytes, v, ({
- next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
- v.iov_base,
- v.iov_len);
- if (next) {
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- }
- next ? 0 : v.iov_len;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy(p + v.bv_offset,
- (from += v.bv_len) - v.bv_len,
- v.bv_len, sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
- }),({
- sum = csum_and_memcpy(v.iov_base,
- (from += v.iov_len) - v.iov_len,
- v.iov_len, sum, off);
- off += v.iov_len;
+
+ sum = csum_shift(csstate->csum, csstate->off);
+ if (unlikely(iov_iter_is_pipe(i)))
+ bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
+ else iterate_and_advance(i, bytes, base, len, off, ({
+ next = csum_and_copy_to_user(addr + off, base, len);
+ sum = csum_block_add(sum, next, off);
+ next ? 0 : len;
}), ({
- char *p = kmap_atomic(v.bv_page);
- sum = csum_and_memcpy(p + v.bv_offset,
- (from += v.bv_len) - v.bv_len,
- v.bv_len, sum, off);
- kunmap_atomic(p);
- off += v.bv_len;
+ sum = csum_and_memcpy(base, addr + off, len, sum, off);
})
)
- csstate->csum = sum;
- csstate->off = off;
+ csstate->csum = csum_shift(sum, csstate->off);
+ csstate->off += bytes;
return bytes;
}
EXPORT_SYMBOL(csum_and_copy_to_iter);
@@ -1837,19 +1704,56 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
}
EXPORT_SYMBOL(hash_and_copy_to_iter);
-int iov_iter_npages(const struct iov_iter *i, int maxpages)
+static int iov_npages(const struct iov_iter *i, int maxpages)
{
- size_t size = i->count;
+ size_t skip = i->iov_offset, size = i->count;
+ const struct iovec *p;
int npages = 0;
- if (!size)
- return 0;
- if (unlikely(iov_iter_is_discard(i)))
- return 0;
+ for (p = i->iov; size; skip = 0, p++) {
+ unsigned offs = offset_in_page(p->iov_base + skip);
+ size_t len = min(p->iov_len - skip, size);
- if (unlikely(iov_iter_is_pipe(i))) {
- struct pipe_inode_info *pipe = i->pipe;
+ if (len) {
+ size -= len;
+ npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
+ if (unlikely(npages > maxpages))
+ return maxpages;
+ }
+ }
+ return npages;
+}
+
+static int bvec_npages(const struct iov_iter *i, int maxpages)
+{
+ size_t skip = i->iov_offset, size = i->count;
+ const struct bio_vec *p;
+ int npages = 0;
+
+ for (p = i->bvec; size; skip = 0, p++) {
+ unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
+ size_t len = min(p->bv_len - skip, size);
+
+ size -= len;
+ npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
+ if (unlikely(npages > maxpages))
+ return maxpages;
+ }
+ return npages;
+}
+
+int iov_iter_npages(const struct iov_iter *i, int maxpages)
+{
+ if (unlikely(!i->count))
+ return 0;
+ /* iovec and kvec have identical layouts */
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_npages(i, maxpages);
+ if (iov_iter_is_bvec(i))
+ return bvec_npages(i, maxpages);
+ if (iov_iter_is_pipe(i)) {
unsigned int iter_head;
+ int npages;
size_t off;
if (!sanity(i))
@@ -1857,44 +1761,15 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
data_start(i, &iter_head, &off);
/* some of this one + all after this one */
- npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
- if (npages >= maxpages)
- return maxpages;
- } else if (unlikely(iov_iter_is_xarray(i))) {
- unsigned offset;
-
- offset = (i->xarray_start + i->iov_offset) & ~PAGE_MASK;
-
- npages = 1;
- if (size > PAGE_SIZE - offset) {
- size -= PAGE_SIZE - offset;
- npages += size >> PAGE_SHIFT;
- size &= ~PAGE_MASK;
- if (size)
- npages++;
- }
- if (npages >= maxpages)
- return maxpages;
- } else iterate_all_kinds(i, size, v, ({
- unsigned long p = (unsigned long)v.iov_base;
- npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- - p / PAGE_SIZE;
- if (npages >= maxpages)
- return maxpages;
- 0;}),({
- npages++;
- if (npages >= maxpages)
- return maxpages;
- }),({
- unsigned long p = (unsigned long)v.iov_base;
- npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- - p / PAGE_SIZE;
- if (npages >= maxpages)
- return maxpages;
- }),
- 0
- )
- return npages;
+ npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
+ return min(npages, maxpages);
+ }
+ if (iov_iter_is_xarray(i)) {
+ unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
+ int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
+ return min(npages, maxpages);
+ }
+ return 0;
}
EXPORT_SYMBOL(iov_iter_npages);
@@ -2093,30 +1968,3 @@ int import_single_range(int rw, void __user *buf, size_t len,
return 0;
}
EXPORT_SYMBOL(import_single_range);
-
-int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
- int (*f)(struct kvec *vec, void *context),
- void *context)
-{
- struct kvec w;
- int err = -EINVAL;
- if (!bytes)
- return 0;
-
- iterate_all_kinds(i, bytes, v, -EINVAL, ({
- w.iov_base = kmap(v.bv_page) + v.bv_offset;
- w.iov_len = v.bv_len;
- err = f(&w, context);
- kunmap(v.bv_page);
- err;}), ({
- w = v;
- err = f(&w, context);}), ({
- w.iov_base = kmap(v.bv_page) + v.bv_offset;
- w.iov_len = v.bv_len;
- err = f(&w, context);
- kunmap(v.bv_page);
- err;})
- )
- return err;
-}
-EXPORT_SYMBOL(iov_iter_for_each_range);
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 70dab9ac7827..12f5a347aa13 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -415,7 +415,7 @@ static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
)
/*
- * __kfifo_poke_n internal helper function for storeing the length of
+ * __kfifo_poke_n internal helper function for storing the length of
* the record into the fifo
*/
static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 0b5fe8b41173..059b8b00dc53 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -14,11 +14,12 @@
*/
#include <linux/ctype.h>
#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/math64.h>
#include <linux/export.h>
+#include <linux/kstrtox.h>
+#include <linux/math64.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+
#include "kstrtox.h"
const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index 9214c493d8b7..b71db0abc12b 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -64,7 +64,7 @@ static int debugfs_print_results(struct seq_file *seq, void *v)
debugfs_print_result(seq, suite, test_case);
seq_printf(seq, "%s %d - %s\n",
- kunit_status_to_string(success), 1, suite->name);
+ kunit_status_to_ok_not_ok(success), 1, suite->name);
return 0;
}
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 15832ed44668..acd1de436f59 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/reboot.h>
#include <kunit/test.h>
#include <linux/glob.h>
#include <linux/moduleparam.h>
@@ -13,13 +14,17 @@ extern struct kunit_suite * const * const __kunit_suites_end[];
#if IS_BUILTIN(CONFIG_KUNIT)
-static char *filter_glob;
-module_param(filter_glob, charp, 0);
+static char *filter_glob_param;
+module_param_named(filter_glob, filter_glob_param, charp, 0);
MODULE_PARM_DESC(filter_glob,
"Filter which KUnit test suites run at boot-time, e.g. list*");
+static char *kunit_shutdown;
+core_param(kunit_shutdown, kunit_shutdown, charp, 0644);
+
static struct kunit_suite * const *
-kunit_filter_subsuite(struct kunit_suite * const * const subsuite)
+kunit_filter_subsuite(struct kunit_suite * const * const subsuite,
+ const char *filter_glob)
{
int i, n = 0;
struct kunit_suite **filtered;
@@ -52,19 +57,14 @@ struct suite_set {
struct kunit_suite * const * const *end;
};
-static struct suite_set kunit_filter_suites(void)
+static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
+ const char *filter_glob)
{
int i;
struct kunit_suite * const **copy, * const *filtered_subsuite;
struct suite_set filtered;
- const size_t max = __kunit_suites_end - __kunit_suites_start;
-
- if (!filter_glob) {
- filtered.start = __kunit_suites_start;
- filtered.end = __kunit_suites_end;
- return filtered;
- }
+ const size_t max = suite_set->end - suite_set->start;
copy = kmalloc_array(max, sizeof(*filtered.start), GFP_KERNEL);
filtered.start = copy;
@@ -74,7 +74,7 @@ static struct suite_set kunit_filter_suites(void)
}
for (i = 0; i < max; ++i) {
- filtered_subsuite = kunit_filter_subsuite(__kunit_suites_start[i]);
+ filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], filter_glob);
if (filtered_subsuite)
*copy++ = filtered_subsuite;
}
@@ -82,6 +82,20 @@ static struct suite_set kunit_filter_suites(void)
return filtered;
}
+static void kunit_handle_shutdown(void)
+{
+ if (!kunit_shutdown)
+ return;
+
+ if (!strcmp(kunit_shutdown, "poweroff"))
+ kernel_power_off();
+ else if (!strcmp(kunit_shutdown, "halt"))
+ kernel_halt();
+ else if (!strcmp(kunit_shutdown, "reboot"))
+ kernel_restart(NULL);
+
+}
+
static void kunit_print_tap_header(struct suite_set *suite_set)
{
struct kunit_suite * const * const *suites, * const *subsuite;
@@ -98,21 +112,32 @@ static void kunit_print_tap_header(struct suite_set *suite_set)
int kunit_run_all_tests(void)
{
struct kunit_suite * const * const *suites;
+ struct suite_set suite_set = {
+ .start = __kunit_suites_start,
+ .end = __kunit_suites_end,
+ };
- struct suite_set suite_set = kunit_filter_suites();
+ if (filter_glob_param)
+ suite_set = kunit_filter_suites(&suite_set, filter_glob_param);
kunit_print_tap_header(&suite_set);
for (suites = suite_set.start; suites < suite_set.end; suites++)
__kunit_test_suites_init(*suites);
- if (filter_glob) { /* a copy was made of each array */
+ if (filter_glob_param) { /* a copy was made of each array */
for (suites = suite_set.start; suites < suite_set.end; suites++)
kfree(*suites);
kfree(suite_set.start);
}
+ kunit_handle_shutdown();
+
return 0;
}
+#if IS_BUILTIN(CONFIG_KUNIT_TEST)
+#include "executor_test.c"
+#endif
+
#endif /* IS_BUILTIN(CONFIG_KUNIT) */
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
new file mode 100644
index 000000000000..cdbe54b16501
--- /dev/null
+++ b/lib/kunit/executor_test.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the KUnit executor.
+ *
+ * Copyright (C) 2021, Google LLC.
+ * Author: Daniel Latypov <dlatypov@google.com>
+ */
+
+#include <kunit/test.h>
+
+static void kfree_at_end(struct kunit *test, const void *to_free);
+static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+ const char *suite_name);
+
+static void filter_subsuite_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL, NULL};
+ struct kunit_suite * const *filtered;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1");
+ subsuite[1] = alloc_fake_suite(test, "suite2");
+
+ /* Want: suite1, suite2, NULL -> suite2, NULL */
+ filtered = kunit_filter_subsuite(subsuite, "suite2*");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered);
+ kfree_at_end(test, filtered);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]);
+ KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2");
+
+ KUNIT_EXPECT_FALSE(test, filtered[1]);
+}
+
+static void filter_subsuite_to_empty_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL, NULL};
+ struct kunit_suite * const *filtered;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1");
+ subsuite[1] = alloc_fake_suite(test, "suite2");
+
+ filtered = kunit_filter_subsuite(subsuite, "not_found");
+ kfree_at_end(test, filtered); /* just in case */
+
+ KUNIT_EXPECT_FALSE_MSG(test, filtered,
+ "should be NULL to indicate no match");
+}
+
+static void kfree_subsuites_at_end(struct kunit *test, struct suite_set *suite_set)
+{
+ struct kunit_suite * const * const *suites;
+
+ kfree_at_end(test, suite_set->start);
+ for (suites = suite_set->start; suites < suite_set->end; suites++)
+ kfree_at_end(test, *suites);
+}
+
+static void filter_suites_test(struct kunit *test)
+{
+ /* Suites per-file are stored as a NULL terminated array */
+ struct kunit_suite *subsuites[2][2] = {
+ {NULL, NULL},
+ {NULL, NULL},
+ };
+ /* Match the memory layout of suite_set */
+ struct kunit_suite * const * const suites[2] = {
+ subsuites[0], subsuites[1],
+ };
+
+ const struct suite_set suite_set = {
+ .start = suites,
+ .end = suites + 2,
+ };
+ struct suite_set filtered = {.start = NULL, .end = NULL};
+
+ /* Emulate two files, each having one suite */
+ subsuites[0][0] = alloc_fake_suite(test, "suite0");
+ subsuites[1][0] = alloc_fake_suite(test, "suite1");
+
+ /* Filter out suite1 */
+ filtered = kunit_filter_suites(&suite_set, "suite0");
+ kfree_subsuites_at_end(test, &filtered); /* let us use ASSERTs without leaking */
+ KUNIT_ASSERT_EQ(test, filtered.end - filtered.start, (ptrdiff_t)1);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0]);
+ KUNIT_EXPECT_STREQ(test, (const char *)filtered.start[0][0]->name, "suite0");
+}
+
+static struct kunit_case executor_test_cases[] = {
+ KUNIT_CASE(filter_subsuite_test),
+ KUNIT_CASE(filter_subsuite_to_empty_test),
+ KUNIT_CASE(filter_suites_test),
+ {}
+};
+
+static struct kunit_suite executor_test_suite = {
+ .name = "kunit_executor_test",
+ .test_cases = executor_test_cases,
+};
+
+kunit_test_suites(&executor_test_suite);
+
+/* Test helpers */
+
+static void kfree_res_free(struct kunit_resource *res)
+{
+ kfree(res->data);
+}
+
+/* Use the resource API to register a call to kfree(to_free).
+ * Since we never actually use the resource, it's safe to use on const data.
+ */
+static void kfree_at_end(struct kunit *test, const void *to_free)
+{
+ /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
+ if (IS_ERR_OR_NULL(to_free))
+ return;
+ kunit_alloc_and_get_resource(test, NULL, kfree_res_free, GFP_KERNEL,
+ (void *)to_free);
+}
+
+static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+ const char *suite_name)
+{
+ struct kunit_suite *suite;
+
+ /* We normally never expect to allocate suites, hence the non-const cast. */
+ suite = kunit_kzalloc(test, sizeof(*suite), GFP_KERNEL);
+ strncpy((char *)suite->name, suite_name, sizeof(suite->name) - 1);
+
+ return suite;
+}
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index be1164ecc476..51099b0ca29c 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -41,6 +41,35 @@ static int example_test_init(struct kunit *test)
}
/*
+ * This test should always be skipped.
+ */
+static void example_skip_test(struct kunit *test)
+{
+ /* This line should run */
+ kunit_info(test, "You should not see a line below.");
+
+ /* Skip (and abort) the test */
+ kunit_skip(test, "this test should be skipped");
+
+ /* This line should not execute */
+ KUNIT_FAIL(test, "You should not see this line.");
+}
+
+/*
+ * This test should always be marked skipped.
+ */
+static void example_mark_skipped_test(struct kunit *test)
+{
+ /* This line should run */
+ kunit_info(test, "You should see a line below.");
+
+ /* Skip (but do not abort) the test */
+ kunit_mark_skipped(test, "this test should be skipped");
+
+ /* This line should run */
+ kunit_info(test, "You should see this line.");
+}
+/*
* Here we make a list of all the test cases we want to add to the test suite
* below.
*/
@@ -52,6 +81,8 @@ static struct kunit_case example_test_cases[] = {
* test suite.
*/
KUNIT_CASE(example_simple_test),
+ KUNIT_CASE(example_skip_test),
+ KUNIT_CASE(example_mark_skipped_test),
{}
};
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 69f902440a0e..d69efcbed624 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -437,7 +437,47 @@ static void kunit_log_test(struct kunit *test)
#endif
}
+static void kunit_status_set_failure_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SUCCESS);
+ kunit_set_failure(&fake);
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
+}
+
+static void kunit_status_mark_skipped_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+
+ /* Before: Should be SUCCESS with no comment. */
+ KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
+ KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
+
+ /* Mark the test as skipped. */
+ kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
+
+ /* After: Should be SKIPPED with our comment. */
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
+ KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
+}
+
+static struct kunit_case kunit_status_test_cases[] = {
+ KUNIT_CASE(kunit_status_set_failure_test),
+ KUNIT_CASE(kunit_status_mark_skipped_test),
+ {}
+};
+
+static struct kunit_suite kunit_status_test_suite = {
+ .name = "kunit_status",
+ .test_cases = kunit_status_test_cases,
+};
+
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
- &kunit_log_test_suite);
+ &kunit_log_test_suite, &kunit_status_test_suite);
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/string-stream.h b/lib/kunit/string-stream.h
index fe98a00b75a9..5e94b623454f 100644
--- a/lib/kunit/string-stream.h
+++ b/lib/kunit/string-stream.h
@@ -35,9 +35,9 @@ struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp);
int __printf(2, 3) string_stream_add(struct string_stream *stream,
const char *fmt, ...);
-int string_stream_vadd(struct string_stream *stream,
- const char *fmt,
- va_list args);
+int __printf(2, 0) string_stream_vadd(struct string_stream *stream,
+ const char *fmt,
+ va_list args);
char *string_stream_get_string(struct string_stream *stream);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 45f068864d76..d79ecb86ea57 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -98,12 +98,14 @@ static void kunit_print_subtest_start(struct kunit_suite *suite)
static void kunit_print_ok_not_ok(void *test_or_suite,
bool is_test,
- bool is_ok,
+ enum kunit_status status,
size_t test_number,
- const char *description)
+ const char *description,
+ const char *directive)
{
struct kunit_suite *suite = is_test ? NULL : test_or_suite;
struct kunit *test = is_test ? test_or_suite : NULL;
+ const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : "";
/*
* We do not log the test suite results as doing so would
@@ -114,25 +116,31 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
* representation.
*/
if (suite)
- pr_info("%s %zd - %s\n",
- kunit_status_to_string(is_ok),
- test_number, description);
+ pr_info("%s %zd - %s%s%s\n",
+ kunit_status_to_ok_not_ok(status),
+ test_number, description, directive_header,
+ (status == KUNIT_SKIPPED) ? directive : "");
else
- kunit_log(KERN_INFO, test, KUNIT_SUBTEST_INDENT "%s %zd - %s",
- kunit_status_to_string(is_ok),
- test_number, description);
+ kunit_log(KERN_INFO, test,
+ KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s",
+ kunit_status_to_ok_not_ok(status),
+ test_number, description, directive_header,
+ (status == KUNIT_SKIPPED) ? directive : "");
}
-bool kunit_suite_has_succeeded(struct kunit_suite *suite)
+enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite)
{
const struct kunit_case *test_case;
+ enum kunit_status status = KUNIT_SKIPPED;
kunit_suite_for_each_test_case(suite, test_case) {
- if (!test_case->success)
- return false;
+ if (test_case->status == KUNIT_FAILURE)
+ return KUNIT_FAILURE;
+ else if (test_case->status == KUNIT_SUCCESS)
+ status = KUNIT_SUCCESS;
}
- return true;
+ return status;
}
EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
@@ -143,7 +151,8 @@ static void kunit_print_subtest_end(struct kunit_suite *suite)
kunit_print_ok_not_ok((void *)suite, false,
kunit_suite_has_succeeded(suite),
kunit_suite_counter++,
- suite->name);
+ suite->name,
+ suite->status_comment);
}
unsigned int kunit_test_case_num(struct kunit_suite *suite,
@@ -252,7 +261,8 @@ void kunit_init_test(struct kunit *test, const char *name, char *log)
test->log = log;
if (test->log)
test->log[0] = '\0';
- test->success = true;
+ test->status = KUNIT_SUCCESS;
+ test->status_comment[0] = '\0';
}
EXPORT_SYMBOL_GPL(kunit_init_test);
@@ -376,7 +386,11 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
context.test_case = test_case;
kunit_try_catch_run(try_catch, &context);
- test_case->success = test->success;
+ /* Propagate the parameter result to the test case. */
+ if (test->status == KUNIT_FAILURE)
+ test_case->status = KUNIT_FAILURE;
+ else if (test_case->status != KUNIT_FAILURE && test->status == KUNIT_SUCCESS)
+ test_case->status = KUNIT_SUCCESS;
}
int kunit_run_tests(struct kunit_suite *suite)
@@ -388,7 +402,7 @@ int kunit_run_tests(struct kunit_suite *suite)
kunit_suite_for_each_test_case(suite, test_case) {
struct kunit test = { .param_value = NULL, .param_index = 0 };
- bool test_success = true;
+ test_case->status = KUNIT_SKIPPED;
if (test_case->generate_params) {
/* Get initial param. */
@@ -398,7 +412,6 @@ int kunit_run_tests(struct kunit_suite *suite)
do {
kunit_run_case_catch_errors(suite, test_case, &test);
- test_success &= test_case->success;
if (test_case->generate_params) {
if (param_desc[0] == '\0') {
@@ -410,7 +423,7 @@ int kunit_run_tests(struct kunit_suite *suite)
KUNIT_SUBTEST_INDENT
"# %s: %s %d - %s",
test_case->name,
- kunit_status_to_string(test.success),
+ kunit_status_to_ok_not_ok(test.status),
test.param_index + 1, param_desc);
/* Get next param. */
@@ -420,9 +433,10 @@ int kunit_run_tests(struct kunit_suite *suite)
}
} while (test.param_value);
- kunit_print_ok_not_ok(&test, true, test_success,
+ kunit_print_ok_not_ok(&test, true, test_case->status,
kunit_test_case_num(suite, test_case),
- test_case->name);
+ test_case->name,
+ test.status_comment);
}
kunit_print_subtest_end(suite);
@@ -434,6 +448,7 @@ EXPORT_SYMBOL_GPL(kunit_run_tests);
static void kunit_init_suite(struct kunit_suite *suite)
{
kunit_debugfs_create_suite(suite);
+ suite->status_comment[0] = '\0';
}
int __kunit_test_suites_init(struct kunit_suite * const * const suites)
@@ -576,41 +591,43 @@ int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match,
}
EXPORT_SYMBOL_GPL(kunit_destroy_resource);
-struct kunit_kmalloc_params {
+struct kunit_kmalloc_array_params {
+ size_t n;
size_t size;
gfp_t gfp;
};
-static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
+static int kunit_kmalloc_array_init(struct kunit_resource *res, void *context)
{
- struct kunit_kmalloc_params *params = context;
+ struct kunit_kmalloc_array_params *params = context;
- res->data = kmalloc(params->size, params->gfp);
+ res->data = kmalloc_array(params->n, params->size, params->gfp);
if (!res->data)
return -ENOMEM;
return 0;
}
-static void kunit_kmalloc_free(struct kunit_resource *res)
+static void kunit_kmalloc_array_free(struct kunit_resource *res)
{
kfree(res->data);
}
-void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp)
{
- struct kunit_kmalloc_params params = {
+ struct kunit_kmalloc_array_params params = {
.size = size,
+ .n = n,
.gfp = gfp
};
return kunit_alloc_resource(test,
- kunit_kmalloc_init,
- kunit_kmalloc_free,
+ kunit_kmalloc_array_init,
+ kunit_kmalloc_array_free,
gfp,
&params);
}
-EXPORT_SYMBOL_GPL(kunit_kmalloc);
+EXPORT_SYMBOL_GPL(kunit_kmalloc_array);
void kunit_kfree(struct kunit *test, const void *ptr)
{
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 1e1e37762799..0fb59e92ca2d 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -104,7 +104,7 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
* @head: the list to sort
* @cmp: the elements comparison function
*
- * The comparison funtion @cmp must return > 0 if @a should sort after
+ * The comparison function @cmp must return > 0 if @a should sort after
* @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
* sort before @b *or* their original order should be preserved. It is
* always called with the element that came first in the input in @a,
diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c
new file mode 100644
index 000000000000..b76b92dd0f1f
--- /dev/null
+++ b/lib/logic_iomem.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/logic_iomem.h>
+
+struct logic_iomem_region {
+ const struct resource *res;
+ const struct logic_iomem_region_ops *ops;
+ struct list_head list;
+};
+
+struct logic_iomem_area {
+ const struct logic_iomem_ops *ops;
+ void *priv;
+};
+
+#define AREA_SHIFT 24
+#define MAX_AREA_SIZE (1 << AREA_SHIFT)
+#define MAX_AREAS ((1ULL<<32) / MAX_AREA_SIZE)
+#define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT)
+#define AREA_MASK (MAX_AREA_SIZE - 1)
+#ifdef CONFIG_64BIT
+#define IOREMAP_BIAS 0xDEAD000000000000UL
+#define IOREMAP_MASK 0xFFFFFFFF00000000UL
+#else
+#define IOREMAP_BIAS 0
+#define IOREMAP_MASK 0
+#endif
+
+static DEFINE_MUTEX(regions_mtx);
+static LIST_HEAD(regions_list);
+static struct logic_iomem_area mapped_areas[MAX_AREAS];
+
+int logic_iomem_add_region(struct resource *resource,
+ const struct logic_iomem_region_ops *ops)
+{
+ struct logic_iomem_region *rreg;
+ int err;
+
+ if (WARN_ON(!resource || !ops))
+ return -EINVAL;
+
+ if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM))
+ return -EINVAL;
+
+ rreg = kzalloc(sizeof(*rreg), GFP_KERNEL);
+ if (!rreg)
+ return -ENOMEM;
+
+ err = request_resource(&iomem_resource, resource);
+ if (err) {
+ kfree(rreg);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&regions_mtx);
+ rreg->res = resource;
+ rreg->ops = ops;
+ list_add_tail(&rreg->list, &regions_list);
+ mutex_unlock(&regions_mtx);
+
+ return 0;
+}
+EXPORT_SYMBOL(logic_iomem_add_region);
+
+#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
+{
+ WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
+ (unsigned long long)offset, size);
+ return NULL;
+}
+
+static void real_iounmap(void __iomem *addr)
+{
+ WARN(1, "invalid iounmap for addr 0x%llx\n",
+ (unsigned long long)addr);
+}
+#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+
+void __iomem *ioremap(phys_addr_t offset, size_t size)
+{
+ void __iomem *ret = NULL;
+ struct logic_iomem_region *rreg, *found = NULL;
+ int i;
+
+ mutex_lock(&regions_mtx);
+ list_for_each_entry(rreg, &regions_list, list) {
+ if (rreg->res->start > offset)
+ continue;
+ if (rreg->res->end < offset + size - 1)
+ continue;
+ found = rreg;
+ break;
+ }
+
+ if (!found)
+ goto out;
+
+ for (i = 0; i < MAX_AREAS; i++) {
+ long offs;
+
+ if (mapped_areas[i].ops)
+ continue;
+
+ offs = rreg->ops->map(offset - found->res->start,
+ size, &mapped_areas[i].ops,
+ &mapped_areas[i].priv);
+ if (offs < 0) {
+ mapped_areas[i].ops = NULL;
+ break;
+ }
+
+ if (WARN_ON(!mapped_areas[i].ops)) {
+ mapped_areas[i].ops = NULL;
+ break;
+ }
+
+ ret = (void __iomem *)(IOREMAP_BIAS + (i << AREA_SHIFT) + offs);
+ break;
+ }
+out:
+ mutex_unlock(&regions_mtx);
+ if (ret)
+ return ret;
+ return real_ioremap(offset, size);
+}
+EXPORT_SYMBOL(ioremap);
+
+static inline struct logic_iomem_area *
+get_area(const volatile void __iomem *addr)
+{
+ unsigned long a = (unsigned long)addr;
+ unsigned int idx;
+
+ if (WARN_ON((a & IOREMAP_MASK) != IOREMAP_BIAS))
+ return NULL;
+
+ idx = (a & AREA_BITS) >> AREA_SHIFT;
+
+ if (mapped_areas[idx].ops)
+ return &mapped_areas[idx];
+
+ return NULL;
+}
+
+void iounmap(void __iomem *addr)
+{
+ struct logic_iomem_area *area = get_area(addr);
+
+ if (!area) {
+ real_iounmap(addr);
+ return;
+ }
+
+ if (area->ops->unmap)
+ area->ops->unmap(area->priv);
+
+ mutex_lock(&regions_mtx);
+ area->ops = NULL;
+ area->priv = NULL;
+ mutex_unlock(&regions_mtx);
+}
+EXPORT_SYMBOL(iounmap);
+
+#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+#define MAKE_FALLBACK(op, sz) \
+static u##sz real_raw_read ## op(const volatile void __iomem *addr) \
+{ \
+ WARN(1, "Invalid read" #op " at address %llx\n", \
+ (unsigned long long)addr); \
+ return (u ## sz)~0ULL; \
+} \
+ \
+void real_raw_write ## op(u ## sz val, volatile void __iomem *addr) \
+{ \
+ WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \
+ (unsigned long long)val, (unsigned long long)addr); \
+} \
+
+MAKE_FALLBACK(b, 8);
+MAKE_FALLBACK(w, 16);
+MAKE_FALLBACK(l, 32);
+#ifdef CONFIG_64BIT
+MAKE_FALLBACK(q, 64);
+#endif
+
+static void real_memset_io(volatile void __iomem *addr, int value, size_t size)
+{
+ WARN(1, "Invalid memset_io at address 0x%llx\n",
+ (unsigned long long)addr);
+}
+
+static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size)
+{
+ WARN(1, "Invalid memcpy_fromio at address 0x%llx\n",
+ (unsigned long long)addr);
+
+ memset(buffer, 0xff, size);
+}
+
+static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
+ size_t size)
+{
+ WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
+ (unsigned long long)addr);
+}
+#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+
+#define MAKE_OP(op, sz) \
+u##sz __raw_read ## op(const volatile void __iomem *addr) \
+{ \
+ struct logic_iomem_area *area = get_area(addr); \
+ \
+ if (!area) \
+ return real_raw_read ## op(addr); \
+ \
+ return (u ## sz) area->ops->read(area->priv, \
+ (unsigned long)addr & AREA_MASK,\
+ sz / 8); \
+} \
+EXPORT_SYMBOL(__raw_read ## op); \
+ \
+void __raw_write ## op(u ## sz val, volatile void __iomem *addr) \
+{ \
+ struct logic_iomem_area *area = get_area(addr); \
+ \
+ if (!area) { \
+ real_raw_write ## op(val, addr); \
+ return; \
+ } \
+ \
+ area->ops->write(area->priv, \
+ (unsigned long)addr & AREA_MASK, \
+ sz / 8, val); \
+} \
+EXPORT_SYMBOL(__raw_write ## op)
+
+MAKE_OP(b, 8);
+MAKE_OP(w, 16);
+MAKE_OP(l, 32);
+#ifdef CONFIG_64BIT
+MAKE_OP(q, 64);
+#endif
+
+void memset_io(volatile void __iomem *addr, int value, size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memset_io(addr, value, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->set) {
+ area->ops->set(area->priv, start, value, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ area->ops->write(area->priv, start + offs, 1, value);
+}
+EXPORT_SYMBOL(memset_io);
+
+void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ u8 *buf = buffer;
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memcpy_fromio(buffer, addr, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->copy_from) {
+ area->ops->copy_from(area->priv, buffer, start, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ buf[offs] = area->ops->read(area->priv, start + offs, 1);
+}
+EXPORT_SYMBOL(memcpy_fromio);
+
+void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ const u8 *buf = buffer;
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memcpy_toio(addr, buffer, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->copy_to) {
+ area->ops->copy_to(area->priv, start, buffer, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ area->ops->write(area->priv, start + offs, 1, buf[offs]);
+}
+EXPORT_SYMBOL(memcpy_toio);
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 8a7724a6ce2f..926f4823d5ea 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -481,7 +481,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
/* ===== Instantiate a few more decoding cases, used more than once. ===== */
-int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
+static int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
int compressedSize, int maxOutputSize)
{
return LZ4_decompress_generic(source, dest,
diff --git a/lib/math/Makefile b/lib/math/Makefile
index 7456edb864fc..bfac26ddfc22 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_TEST_DIV64) += test_div64.o
+obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
diff --git a/lib/math/rational-test.c b/lib/math/rational-test.c
new file mode 100644
index 000000000000..01611ddff420
--- /dev/null
+++ b/lib/math/rational-test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test.h>
+
+#include <linux/rational.h>
+
+struct rational_test_param {
+ unsigned long num, den;
+ unsigned long max_num, max_den;
+ unsigned long exp_num, exp_den;
+
+ const char *name;
+};
+
+static const struct rational_test_param test_parameters[] = {
+ { 1230, 10, 100, 20, 100, 1, "Exceeds bounds, semi-convergent term > 1/2 last term" },
+ { 34567,100, 120, 20, 120, 1, "Exceeds bounds, semi-convergent term < 1/2 last term" },
+ { 1, 30, 100, 10, 0, 1, "Closest to zero" },
+ { 1, 19, 100, 10, 1, 10, "Closest to smallest non-zero" },
+ { 27,32, 16, 16, 11, 13, "Use convergent" },
+ { 1155, 7735, 255, 255, 33, 221, "Exact answer" },
+ { 87, 32, 70, 32, 68, 25, "Semiconvergent, numerator limit" },
+ { 14533, 4626, 15000, 2400, 7433, 2366, "Semiconvergent, denominator limit" },
+};
+
+static void get_desc(const struct rational_test_param *param, char *desc)
+{
+ strscpy(desc, param->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+/* Creates function rational_gen_params */
+KUNIT_ARRAY_PARAM(rational, test_parameters, get_desc);
+
+static void rational_test(struct kunit *test)
+{
+ const struct rational_test_param *param = (const struct rational_test_param *)test->param_value;
+ unsigned long n = 0, d = 0;
+
+ rational_best_approximation(param->num, param->den, param->max_num, param->max_den, &n, &d);
+ KUNIT_EXPECT_EQ(test, n, param->exp_num);
+ KUNIT_EXPECT_EQ(test, d, param->exp_den);
+}
+
+static struct kunit_case rational_test_cases[] = {
+ KUNIT_CASE_PARAM(rational_test, rational_gen_params),
+ {}
+};
+
+static struct kunit_suite rational_test_suite = {
+ .name = "rational",
+ .test_cases = rational_test_cases,
+};
+
+kunit_test_suites(&rational_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/math/rational.c b/lib/math/rational.c
index 9781d521963d..c0ab51d8fbb9 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -12,6 +12,7 @@
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/minmax.h>
+#include <linux/limits.h>
/*
* calculate best rational approximation for a given fraction
@@ -78,13 +79,18 @@ void rational_best_approximation(
* found below as 't'.
*/
if ((n2 > max_numerator) || (d2 > max_denominator)) {
- unsigned long t = min((max_numerator - n0) / n1,
- (max_denominator - d0) / d1);
+ unsigned long t = ULONG_MAX;
- /* This tests if the semi-convergent is closer
- * than the previous convergent.
+ if (d1)
+ t = (max_denominator - d0) / d1;
+ if (n1)
+ t = min(t, (max_numerator - n0) / n1);
+
+ /* This tests if the semi-convergent is closer than the previous
+ * convergent. If d1 is zero there is no previous convergent as this
+ * is the 1st iteration, so always choose the semi-convergent.
*/
- if (2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
+ if (!d1 || 2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
n1 = n0 + t * n1;
d1 = d0 + t * d1;
}
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index afbd99987cf8..b6fa1d08fb55 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -48,8 +48,8 @@
/* Define auxiliary asm macros.
*
- * 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
- * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
+ * 1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two
+ * UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype
* word product in HIGH_PROD and LOW_PROD.
*
* 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 7ea225b2204f..39c4c6731094 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -234,11 +234,11 @@ static int count_lzeros(MPI a)
}
/**
- * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
+ * mpi_read_buffer() - read MPI to a buffer provided by user (msb first)
*
* @a: a multi precision integer
- * @buf: bufer to which the output will be written to. Needs to be at
- * leaset mpi_get_size(a) long.
+ * @buf: buffer to which the output will be written to. Needs to be at
+ * least mpi_get_size(a) long.
* @buf_len: size of the buf.
* @nbytes: receives the actual length of the data written on success and
* the data to-be-written on -EOVERFLOW in case buf_len was too
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 3c63710c20c6..9a75ca3f7edf 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(mpi_const);
/****************
* Note: It was a bad idea to use the number of limbs to allocate
* because on a alpha the limbs are large but we normally need
- * integers of n bits - So we should chnage this to bits (or bytes).
+ * integers of n bits - So we should change this to bits (or bytes).
*
* But mpi_alloc is used in a lot of places :-)
*/
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 1d051ef66afe..86029ad5ead4 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -619,7 +619,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation depends on the validate flags passed, see
* &enum netlink_validation for more details on that.
- * See documenation of struct nla_policy for more details.
+ * See documentation of struct nla_policy for more details.
*
* Returns 0 on success or a negative error code.
*/
@@ -633,7 +633,7 @@ int __nla_validate(const struct nlattr *head, int len, int maxtype,
EXPORT_SYMBOL(__nla_validate);
/**
- * nla_policy_len - Determin the max. length of a policy
+ * nla_policy_len - Determine the max. length of a policy
* @policy: policy to use
* @n: number of policies
*
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index 3dfaa836e7c5..e592d48b1974 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -124,7 +124,7 @@ EXPORT_SYMBOL_GPL(parse_OID);
* @bufsize: The size of the buffer
*
* The OID is rendered into the buffer in "a.b.c.d" format and the number of
- * bytes is returned. -EBADMSG is returned if the data could not be intepreted
+ * bytes is returned. -EBADMSG is returned if the data could not be interpreted
* and -ENOBUFS if the buffer was too small.
*/
int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
diff --git a/lib/once.c b/lib/once.c
index 8b7d6235217e..59149bf3bfb4 100644
--- a/lib/once.c
+++ b/lib/once.c
@@ -3,10 +3,12 @@
#include <linux/spinlock.h>
#include <linux/once.h>
#include <linux/random.h>
+#include <linux/module.h>
struct once_work {
struct work_struct work;
struct static_key_true *key;
+ struct module *module;
};
static void once_deferred(struct work_struct *w)
@@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w)
work = container_of(w, struct once_work, work);
BUG_ON(!static_key_enabled(work->key));
static_branch_disable(work->key);
+ module_put(work->module);
kfree(work);
}
-static void once_disable_jump(struct static_key_true *key)
+static void once_disable_jump(struct static_key_true *key, struct module *mod)
{
struct once_work *w;
@@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key)
INIT_WORK(&w->work, once_deferred);
w->key = key;
+ w->module = mod;
+ __module_get(mod);
schedule_work(&w->work);
}
@@ -53,11 +58,11 @@ bool __do_once_start(bool *done, unsigned long *flags)
EXPORT_SYMBOL(__do_once_start);
void __do_once_done(bool *done, struct static_key_true *once_key,
- unsigned long *flags)
+ unsigned long *flags, struct module *mod)
__releases(once_lock)
{
*done = true;
spin_unlock_irqrestore(&once_lock, *flags);
- once_disable_jump(once_key);
+ once_disable_jump(once_key, mod);
}
EXPORT_SYMBOL(__do_once_done);
diff --git a/lib/parser.c b/lib/parser.c
index f1a6d90b8c34..bcb23484100e 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -6,6 +6,7 @@
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/export.h>
+#include <linux/kstrtox.h>
#include <linux/parser.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c
index e5d4b3b2af81..6e77eb6d8e72 100644
--- a/lib/pldmfw/pldmfw.c
+++ b/lib/pldmfw/pldmfw.c
@@ -82,7 +82,7 @@ pldm_check_fw_space(struct pldmfw_priv *data, size_t offset, size_t length)
* @bytes_to_move: number of bytes to move the offset forward by
*
* Check that there is enough space past the current offset, and then move the
- * offset forward by this ammount.
+ * offset forward by this amount.
*
* Returns: zero on success, or -EFAULT if the image is too small to fit the
* expected length.
diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c
index 4eb29f365ece..d9d1c33aebda 100644
--- a/lib/reed_solomon/test_rslib.c
+++ b/lib/reed_solomon/test_rslib.c
@@ -385,7 +385,7 @@ static void test_bc(struct rs_control *rs, int len, int errs,
/*
* We check that the returned word is actually a
- * codeword. The obious way to do this would be to
+ * codeword. The obvious way to do this would be to
* compute the syndrome, but we don't want to replicate
* that code here. However, all the codes are in
* systematic form, and therefore we can encode the
diff --git a/lib/refcount.c b/lib/refcount.c
index ebac8b7d15a7..a207a8f22b3c 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -164,7 +164,7 @@ EXPORT_SYMBOL(refcount_dec_and_lock);
* @flags: saved IRQ-flags if the is acquired
*
* Same as refcount_dec_and_lock() above except that the spinlock is acquired
- * with disabled interupts.
+ * with disabled interrupts.
*
* Return: true and hold spinlock if able to decrement refcount to 0, false
* otherwise
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index c949c1e3b87c..e12bbfb240b8 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -703,7 +703,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
*
* Returns zero if successful.
*
- * Returns -EAGAIN if resize event occured. Note that the iterator
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
* will rewind back to the beginning and you may use it immediately
* by calling rhashtable_walk_next.
*
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 47b3691058eb..b25db9be938a 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -583,7 +583,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
/*
* Once the clear bit is set, the bit may be allocated out.
*
- * Orders READ/WRITE on the asssociated instance(such as request
+ * Orders READ/WRITE on the associated instance(such as request
* of blk_mq) by this bit for avoiding race with re-allocation,
* and its pair is the memory barrier implied in __sbitmap_get_word.
*
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a59778946404..27efa6178153 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -38,7 +38,7 @@ EXPORT_SYMBOL(sg_next);
* @sg: The scatterlist
*
* Description:
- * Allows to know how many entries are in sg, taking into acount
+ * Allows to know how many entries are in sg, taking into account
* chaining as well
*
**/
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(sg_nents);
*
* Description:
* Determines the number of entries in sg that are required to meet
- * the supplied length, taking into acount chaining as well
+ * the supplied length, taking into account chaining as well
*
* Returns:
* the number of sg entries needed, negative error on failure
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 707453f5d58e..0a68f7aa85d6 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -229,8 +229,10 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
WARN_ON(s->size == 0);
+ BUILD_BUG_ON(MAX_MEMHEX_BYTES * 2 >= HEX_CHARS);
+
while (len) {
- start_len = min(len, HEX_CHARS - 1);
+ start_len = min(len, MAX_MEMHEX_BYTES);
#ifdef __BIG_ENDIAN
for (i = 0, j = 0; i < start_len; i++) {
#else
@@ -243,12 +245,14 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
break;
/* j increments twice per loop */
- len -= j / 2;
hex[j++] = ' ';
seq_buf_putmem(s, hex, j);
if (seq_buf_has_overflowed(s))
return -1;
+
+ len -= start_len;
+ data += start_len;
}
return 0;
}
@@ -285,7 +289,7 @@ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
}
/**
- * seq_buf_to_user - copy the squence buffer to user space
+ * seq_buf_to_user - copy the sequence buffer to user space
* @s: seq_buf descriptor
* @ubuf: The userspace memory location to copy to
* @cnt: The amount to copy
diff --git a/lib/sort.c b/lib/sort.c
index 3ad454411997..aa18153864d2 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -51,7 +51,7 @@ static bool is_aligned(const void *base, size_t size, unsigned char align)
* which basically all CPUs have, to minimize loop overhead computations.
*
* For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
- * bottom of the loop, even though the zero flag is stil valid from the
+ * bottom of the loop, even though the zero flag is still valid from the
* subtract (since the intervening mov instructions don't alter the flags).
* Gcc 8.1.0 doesn't have that problem.
*/
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index df9179f4f441..0a2e417f83cb 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -11,7 +11,7 @@
* Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
* and free stacks repeat a lot, we save about 100x space.
* Stacks are never removed from depot, so we store them contiguously one after
- * another in a contiguos memory allocation.
+ * another in a contiguous memory allocation.
*
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
diff --git a/lib/string.c b/lib/string.c
index 7548eb715ddb..77bd0b1d3296 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -977,7 +977,7 @@ void *memscan(void *addr, int c, size_t size)
unsigned char *p = addr;
while (size) {
- if (*p == c)
+ if (*p == (unsigned char)c)
return (void *)p;
p++;
size--;
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 7f2d5fbaf243..5a35c7e16e96 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -452,18 +452,20 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* The process of escaping byte buffer includes several parts. They are applied
* in the following sequence.
*
- * 1. The character is matched to the printable class, if asked, and in
- * case of match it passes through to the output.
- * 2. The character is not matched to the one from @only string and thus
+ * 1. The character is not matched to the one from @only string and thus
* must go as-is to the output.
- * 3. The character is checked if it falls into the class given by @flags.
+ * 2. The character is matched to the printable and ASCII classes, if asked,
+ * and in case of match it passes through to the output.
+ * 3. The character is matched to the printable or ASCII class, if asked,
+ * and in case of match it passes through to the output.
+ * 4. The character is checked if it falls into the class given by @flags.
* %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
* character. Note that they actually can't go together, otherwise
* %ESCAPE_HEX will be ignored.
*
* Caller must provide valid source and destination pointers. Be aware that
* destination buffer will not be NULL-terminated, thus caller have to append
- * it if needs. The supported flags are::
+ * it if needs. The supported flags are::
*
* %ESCAPE_SPACE: (special white space, not space itself)
* '\f' - form feed
@@ -482,11 +484,27 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* %ESCAPE_ANY:
* all previous together
* %ESCAPE_NP:
- * escape only non-printable characters (checked by isprint)
+ * escape only non-printable characters, checked by isprint()
* %ESCAPE_ANY_NP:
* all previous together
* %ESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (2 digits)
+ * %ESCAPE_NA:
+ * escape only non-ascii characters, checked by isascii()
+ * %ESCAPE_NAP:
+ * escape only non-printable or non-ascii characters
+ * %ESCAPE_APPEND:
+ * append characters from @only to be escaped by the given classes
+ *
+ * %ESCAPE_APPEND would help to pass additional characters to the escaped, when
+ * one of %ESCAPE_NP, %ESCAPE_NA, or %ESCAPE_NAP is provided.
+ *
+ * One notable caveat, the %ESCAPE_NAP, %ESCAPE_NP and %ESCAPE_NA have the
+ * higher priority than the rest of the flags (%ESCAPE_NAP is the highest).
+ * It doesn't make much sense to use either of them without %ESCAPE_OCTAL
+ * or %ESCAPE_HEX, because they cover most of the other character classes.
+ * %ESCAPE_NAP can utilize %ESCAPE_SPACE or %ESCAPE_SPECIAL in addition to
+ * the above.
*
* Return:
* The total size of the escaped output that would be generated for
@@ -500,67 +518,69 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
char *p = dst;
char *end = p + osz;
bool is_dict = only && *only;
+ bool is_append = flags & ESCAPE_APPEND;
while (isz--) {
unsigned char c = *src++;
+ bool in_dict = is_dict && strchr(only, c);
/*
* Apply rules in the following sequence:
- * - the character is printable, when @flags has
- * %ESCAPE_NP bit set
* - the @only string is supplied and does not contain a
* character under question
+ * - the character is printable and ASCII, when @flags has
+ * %ESCAPE_NAP bit set
+ * - the character is printable, when @flags has
+ * %ESCAPE_NP bit set
+ * - the character is ASCII, when @flags has
+ * %ESCAPE_NA bit set
* - the character doesn't fall into a class of symbols
* defined by given @flags
* In these cases we just pass through a character to the
* output buffer.
+ *
+ * When %ESCAPE_APPEND is passed, the characters from @only
+ * have been excluded from the %ESCAPE_NAP, %ESCAPE_NP, and
+ * %ESCAPE_NA cases.
*/
- if ((flags & ESCAPE_NP && isprint(c)) ||
- (is_dict && !strchr(only, c))) {
- /* do nothing */
- } else {
- if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
- continue;
+ if (!(is_append || in_dict) && is_dict &&
+ escape_passthrough(c, &p, end))
+ continue;
- if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
- continue;
+ if (!(is_append && in_dict) && isascii(c) && isprint(c) &&
+ flags & ESCAPE_NAP && escape_passthrough(c, &p, end))
+ continue;
- if (flags & ESCAPE_NULL && escape_null(c, &p, end))
- continue;
+ if (!(is_append && in_dict) && isprint(c) &&
+ flags & ESCAPE_NP && escape_passthrough(c, &p, end))
+ continue;
- /* ESCAPE_OCTAL and ESCAPE_HEX always go last */
- if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
- continue;
+ if (!(is_append && in_dict) && isascii(c) &&
+ flags & ESCAPE_NA && escape_passthrough(c, &p, end))
+ continue;
- if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
- continue;
- }
+ if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
+ continue;
- escape_passthrough(c, &p, end);
- }
+ if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
+ continue;
- return p - dst;
-}
-EXPORT_SYMBOL(string_escape_mem);
+ if (flags & ESCAPE_NULL && escape_null(c, &p, end))
+ continue;
-int string_escape_mem_ascii(const char *src, size_t isz, char *dst,
- size_t osz)
-{
- char *p = dst;
- char *end = p + osz;
+ /* ESCAPE_OCTAL and ESCAPE_HEX always go last */
+ if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
+ continue;
- while (isz--) {
- unsigned char c = *src++;
+ if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
+ continue;
- if (!isprint(c) || !isascii(c) || c == '"' || c == '\\')
- escape_hex(c, &p, end);
- else
- escape_passthrough(c, &p, end);
+ escape_passthrough(c, &p, end);
}
return p - dst;
}
-EXPORT_SYMBOL(string_escape_mem_ascii);
+EXPORT_SYMBOL(string_escape_mem);
/*
* Return an allocated string that has been escaped of special characters
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 10360d4ea273..2185d71704f0 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -19,7 +19,7 @@ static __init bool test_string_check_buf(const char *name, unsigned int flags,
if (q_real == q_test && !memcmp(out_test, out_real, q_test))
return true;
- pr_warn("Test '%s' failed: flags = %u\n", name, flags);
+ pr_warn("Test '%s' failed: flags = %#x\n", name, flags);
print_hex_dump(KERN_WARNING, "Input: ", DUMP_PREFIX_NONE, 16, 1,
in, p, true);
@@ -136,7 +136,7 @@ static const struct test_string_2 escape0[] __initconst = {{
.flags = ESCAPE_SPACE | ESCAPE_HEX,
},{
/* terminator */
- }},
+ }}
},{
.in = "\\h\\\"\a\e\\",
.s1 = {{
@@ -150,7 +150,7 @@ static const struct test_string_2 escape0[] __initconst = {{
.flags = ESCAPE_SPECIAL | ESCAPE_HEX,
},{
/* terminator */
- }},
+ }}
},{
.in = "\eb \\C\007\"\x90\r]",
.s1 = {{
@@ -201,12 +201,26 @@ static const struct test_string_2 escape0[] __initconst = {{
.flags = ESCAPE_NP | ESCAPE_HEX,
},{
/* terminator */
- }},
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\\220\\317\r",
+ .flags = ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\\x90\\xcf\r",
+ .flags = ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
},{
/* terminator */
}};
-#define TEST_STRING_2_DICT_1 "b\\ \t\r"
+#define TEST_STRING_2_DICT_1 "b\\ \t\r\xCF"
static const struct test_string_2 escape1[] __initconst = {{
.in = "\f\\ \n\r\t\v",
.s1 = {{
@@ -216,16 +230,40 @@ static const struct test_string_2 escape1[] __initconst = {{
.out = "\f\\x5c\\x20\n\\x0d\\x09\v",
.flags = ESCAPE_HEX,
},{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_ANY | ESCAPE_APPEND,
+ },{
+ .out = "\\014\\134\\040\\012\\015\\011\\013",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\\x0c\\x5c\\x20\\x0a\\x0d\\x09\\x0b",
+ .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA,
+ },{
+ .out = "\f\\x5c\\x20\n\\x0d\\x09\v",
+ .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NA,
+ },{
/* terminator */
- }},
+ }}
},{
- .in = "\\h\\\"\a\e\\",
+ .in = "\\h\\\"\a\xCF\e\\",
.s1 = {{
- .out = "\\134h\\134\"\a\e\\134",
+ .out = "\\134h\\134\"\a\\317\e\\134",
.flags = ESCAPE_OCTAL,
},{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_ANY | ESCAPE_APPEND,
+ },{
+ .out = "\\134h\\134\"\\007\\317\\033\\134",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA,
+ },{
/* terminator */
- }},
+ }}
},{
.in = "\eb \\C\007\"\x90\r]",
.s1 = {{
@@ -233,7 +271,89 @@ static const struct test_string_2 escape1[] __initconst = {{
.flags = ESCAPE_OCTAL,
},{
/* terminator */
- }},
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPACE | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_ANY | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\015",
+ .flags = ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\015",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_ANY | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\x0d",
+ .flags = ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\x0d",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ /* terminator */
+ }}
},{
/* terminator */
}};
@@ -290,7 +410,7 @@ test_string_escape_overflow(const char *in, int p, unsigned int flags, const cha
q_real = string_escape_mem(in, p, NULL, 0, flags, esc);
if (q_real != q_test)
- pr_warn("Test '%s' failed: flags = %u, osz = 0, expected %d, got %d\n",
+ pr_warn("Test '%s' failed: flags = %#x, osz = 0, expected %d, got %d\n",
name, flags, q_test, q_real);
}
@@ -315,8 +435,13 @@ static __init void test_string_escape(const char *name,
/* NULL injection */
if (flags & ESCAPE_NULL) {
in[p++] = '\0';
- out_test[q_test++] = '\\';
- out_test[q_test++] = '0';
+ /* '\0' passes isascii() test */
+ if (flags & ESCAPE_NA && !(flags & ESCAPE_APPEND && esc)) {
+ out_test[q_test++] = '\0';
+ } else {
+ out_test[q_test++] = '\\';
+ out_test[q_test++] = '0';
+ }
}
/* Don't try strings that have no output */
@@ -459,17 +584,17 @@ static int __init test_string_helpers_init(void)
unsigned int i;
pr_info("Running tests...\n");
- for (i = 0; i < UNESCAPE_ANY + 1; i++)
+ for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
test_string_unescape("unescape", i, false);
test_string_unescape("unescape inplace",
get_random_int() % (UNESCAPE_ANY + 1), true);
/* Without dictionary */
- for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
+ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
test_string_escape("escape 0", escape0, i, TEST_STRING_2_DICT_0);
/* With dictionary */
- for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
+ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1);
/* Test string_get_size() */
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 9cd575583180..4ea73f5aed41 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -366,6 +366,13 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
{0, "0-31:1/3,1-31:1/3,2-31:1/3", &exp1[8 * step], 32, 0},
{0, "1-10:8/12,8-31:24/29,0-31:0/3", &exp1[9 * step], 32, 0},
+ {0, "all", &exp1[8 * step], 32, 0},
+ {0, "0, 1, all, ", &exp1[8 * step], 32, 0},
+ {0, "all:1/2", &exp1[4 * step], 32, 0},
+ {0, "ALL:1/2", &exp1[4 * step], 32, 0},
+ {-EINVAL, "al", NULL, 8, 0},
+ {-EINVAL, "alll", NULL, 8, 0},
+
{-EINVAL, "-1", NULL, 8, 0},
{-EINVAL, "-0", NULL, 8, 0},
{-EINVAL, "10-1", NULL, 8, 0},
diff --git a/lib/test_bitops.c b/lib/test_bitops.c
index 471141ddd691..3b7bcbee84db 100644
--- a/lib/test_bitops.c
+++ b/lib/test_bitops.c
@@ -15,7 +15,7 @@
* get_count_order/long
*/
-/* use an enum because thats the most common BITMAP usage */
+/* use an enum because that's the most common BITMAP usage */
enum bitops_fun {
BITOPS_4 = 4,
BITOPS_7 = 7,
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 4dc4dcbecd12..d500320778c7 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1095,7 +1095,7 @@ static struct bpf_test tests[] = {
{
"RET_A",
.u.insns = {
- /* check that unitialized X and A contain zeros */
+ /* check that uninitialized X and A contain zeros */
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index b6fe89add9fe..1bccd6cd5f48 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -260,8 +260,8 @@ static ssize_t config_show(struct device *dev,
len += scnprintf(buf + len, PAGE_SIZE - len,
"send_uevent:\t\t%s\n",
test_fw_config->send_uevent ?
- "FW_ACTION_HOTPLUG" :
- "FW_ACTION_NOHOTPLUG");
+ "FW_ACTION_UEVENT" :
+ "FW_ACTION_NOUEVENT");
len += scnprintf(buf + len, PAGE_SIZE - len,
"into_buf:\t\t%s\n",
test_fw_config->into_buf ? "true" : "false");
@@ -729,7 +729,7 @@ static ssize_t trigger_custom_fallback_store(struct device *dev,
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
test_firmware = NULL;
- rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
+ rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name,
dev, GFP_KERNEL, NULL,
trigger_async_request_cb);
if (rc) {
@@ -938,8 +938,8 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
pr_info("batched loading '%s' custom fallback mechanism %u times\n",
test_fw_config->name, test_fw_config->num_requests);
- send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
- FW_ACTION_NOHOTPLUG;
+ send_uevent = test_fw_config->send_uevent ? FW_ACTION_UEVENT :
+ FW_ACTION_NOUEVENT;
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 15f2e2db77bc..c259842f6d44 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -25,6 +25,7 @@
#include <linux/swapops.h>
#include <linux/sched/mm.h>
#include <linux/platform_device.h>
+#include <linux/rmap.h>
#include "test_hmm_uapi.h"
@@ -46,6 +47,7 @@ struct dmirror_bounce {
unsigned long cpages;
};
+#define DPT_XA_TAG_ATOMIC 1UL
#define DPT_XA_TAG_WRITE 3UL
/*
@@ -218,7 +220,7 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
* the invalidation is handled as part of the migration process.
*/
if (range->event == MMU_NOTIFY_MIGRATE &&
- range->migrate_pgmap_owner == dmirror->mdevice)
+ range->owner == dmirror->mdevice)
return true;
if (mmu_notifier_range_blockable(range))
@@ -619,6 +621,52 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
}
}
+static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
+ unsigned long end)
+{
+ unsigned long pfn;
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
+ void *entry;
+
+ entry = xa_load(&dmirror->pt, pfn);
+ if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int dmirror_atomic_map(unsigned long start, unsigned long end,
+ struct page **pages, struct dmirror *dmirror)
+{
+ unsigned long pfn, mapped = 0;
+ int i;
+
+ /* Map the migrated pages into the device's page tables. */
+ mutex_lock(&dmirror->mutex);
+
+ for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
+ void *entry;
+
+ if (!pages[i])
+ continue;
+
+ entry = pages[i];
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
+ entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
+ }
+
+ mapped++;
+ }
+
+ mutex_unlock(&dmirror->mutex);
+ return mapped;
+}
+
static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
struct dmirror *dmirror)
{
@@ -661,6 +709,72 @@ static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
return 0;
}
+static int dmirror_exclusive(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct page *pages[64];
+ struct dmirror_bounce bounce;
+ unsigned long next;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ mmap_read_lock(mm);
+ for (addr = start; addr < end; addr = next) {
+ unsigned long mapped;
+ int i;
+
+ if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT))
+ next = end;
+ else
+ next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT);
+
+ ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
+ mapped = dmirror_atomic_map(addr, next, pages, dmirror);
+ for (i = 0; i < ret; i++) {
+ if (pages[i]) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ }
+ }
+
+ if (addr + (mapped << PAGE_SHIFT) < next) {
+ mmap_read_unlock(mm);
+ mmput(mm);
+ return -EBUSY;
+ }
+ }
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ /* Return the migrated data for verification. */
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_read(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret == 0) {
+ if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
+ bounce.size))
+ ret = -EFAULT;
+ }
+
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+}
+
static int dmirror_migrate(struct dmirror *dmirror,
struct hmm_dmirror_cmd *cmd)
{
@@ -948,6 +1062,15 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp,
ret = dmirror_migrate(dmirror, &cmd);
break;
+ case HMM_DMIRROR_EXCLUSIVE:
+ ret = dmirror_exclusive(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_CHECK_EXCLUSIVE:
+ ret = dmirror_check_atomic(dmirror, cmd.addr,
+ cmd.addr + (cmd.npages << PAGE_SHIFT));
+ break;
+
case HMM_DMIRROR_SNAPSHOT:
ret = dmirror_snapshot(dmirror, &cmd);
break;
diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h
index 670b4ef2a5b6..f14dea5dcd06 100644
--- a/lib/test_hmm_uapi.h
+++ b/lib/test_hmm_uapi.h
@@ -33,6 +33,8 @@ struct hmm_dmirror_cmd {
#define HMM_DMIRROR_WRITE _IOWR('H', 0x01, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_MIGRATE _IOWR('H', 0x02, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x03, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x04, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd)
/*
* Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 44e08f4d9c52..8f7b0b2f6e11 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -110,17 +110,13 @@ static void kasan_test_exit(struct kunit *test)
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
- if (!IS_ENABLED(config)) { \
- kunit_info((test), "skipping, " #config " required"); \
- return; \
- } \
+ if (!IS_ENABLED(config)) \
+ kunit_skip((test), "Test requires " #config "=y"); \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
- if (IS_ENABLED(config)) { \
- kunit_info((test), "skipping, " #config " enabled"); \
- return; \
- } \
+ if (IS_ENABLED(config)) \
+ kunit_skip((test), "Test requires " #config "=n"); \
} while (0)
static void kmalloc_oob_right(struct kunit *test)
@@ -655,7 +651,7 @@ static void kasan_global_oob(struct kunit *test)
{
/*
* Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
- * from failing here and panicing the kernel, access the array via a
+ * from failing here and panicking the kernel, access the array via a
* volatile pointer, which will prevent the compiler from being able to
* determine the array bounds.
*
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 38c250fbace3..ce1589391413 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -286,7 +286,7 @@ static int tally_work_test(struct kmod_test_device_info *info)
* If this ran it means *all* tasks were created fine and we
* are now just collecting results.
*
- * Only propagate errors, do not override with a subsequent sucess case.
+ * Only propagate errors, do not override with a subsequent success case.
*/
static void tally_up_work(struct kmod_test_device *test_dev)
{
@@ -543,7 +543,7 @@ static int trigger_config_run(struct kmod_test_device *test_dev)
* wrong with the setup of the test. If the test setup went fine
* then userspace must just check the result of config->test_result.
* One issue with relying on the return from a call in the kernel
- * is if the kernel returns a possitive value using this trigger
+ * is if the kernel returns a positive value using this trigger
* will not return the value to userspace, it would be lost.
*
* By not relying on capturing the return value of tests we are using
@@ -585,7 +585,7 @@ trigger_config_store(struct device *dev,
* Note: any return > 0 will be treated as success
* and the error value will not be available to userspace.
* Do not rely on trying to send to userspace a test value
- * return value as possitive return errors will be lost.
+ * return value as positive return errors will be lost.
*/
if (WARN_ON(ret > 0))
return -EINVAL;
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
index 00daaf23316f..ade7a1ea0c8e 100644
--- a/lib/test_list_sort.c
+++ b/lib/test_list_sort.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "list_sort_test: " fmt
+#include <kunit/test.h>
#include <linux/kernel.h>
#include <linux/list_sort.h>
@@ -23,68 +23,52 @@ struct debug_el {
struct list_head list;
unsigned int poison2;
int value;
- unsigned serial;
+ unsigned int serial;
};
-/* Array, containing pointers to all elements in the test list */
-static struct debug_el **elts __initdata;
-
-static int __init check(struct debug_el *ela, struct debug_el *elb)
+static void check(struct kunit *test, struct debug_el *ela, struct debug_el *elb)
{
- if (ela->serial >= TEST_LIST_LEN) {
- pr_err("error: incorrect serial %d\n", ela->serial);
- return -EINVAL;
- }
- if (elb->serial >= TEST_LIST_LEN) {
- pr_err("error: incorrect serial %d\n", elb->serial);
- return -EINVAL;
- }
- if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
- pr_err("error: phantom element\n");
- return -EINVAL;
- }
- if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
- pr_err("error: bad poison: %#x/%#x\n",
- ela->poison1, ela->poison2);
- return -EINVAL;
- }
- if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
- pr_err("error: bad poison: %#x/%#x\n",
- elb->poison1, elb->poison2);
- return -EINVAL;
- }
- return 0;
+ struct debug_el **elts = test->priv;
+
+ KUNIT_EXPECT_LT_MSG(test, ela->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
+ KUNIT_EXPECT_LT_MSG(test, elb->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, elts[ela->serial], ela, "phantom element");
+ KUNIT_EXPECT_PTR_EQ_MSG(test, elts[elb->serial], elb, "phantom element");
+
+ KUNIT_EXPECT_EQ_MSG(test, ela->poison1, TEST_POISON1, "bad poison");
+ KUNIT_EXPECT_EQ_MSG(test, ela->poison2, TEST_POISON2, "bad poison");
+
+ KUNIT_EXPECT_EQ_MSG(test, elb->poison1, TEST_POISON1, "bad poison");
+ KUNIT_EXPECT_EQ_MSG(test, elb->poison2, TEST_POISON2, "bad poison");
}
-static int __init cmp(void *priv, const struct list_head *a,
- const struct list_head *b)
+/* `priv` is the test pointer so check() can fail the test if the list is invalid. */
+static int cmp(void *priv, const struct list_head *a, const struct list_head *b)
{
struct debug_el *ela, *elb;
ela = container_of(a, struct debug_el, list);
elb = container_of(b, struct debug_el, list);
- check(ela, elb);
+ check(priv, ela, elb);
return ela->value - elb->value;
}
-static int __init list_sort_test(void)
+static void list_sort_test(struct kunit *test)
{
- int i, count = 1, err = -ENOMEM;
- struct debug_el *el;
+ int i, count = 1;
+ struct debug_el *el, **elts;
struct list_head *cur;
LIST_HEAD(head);
- pr_debug("start testing list_sort()\n");
-
- elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
- if (!elts)
- return err;
+ elts = kunit_kcalloc(test, TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elts);
+ test->priv = elts;
for (i = 0; i < TEST_LIST_LEN; i++) {
- el = kmalloc(sizeof(*el), GFP_KERNEL);
- if (!el)
- goto exit;
+ el = kunit_kmalloc(test, sizeof(*el), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el);
/* force some equivalencies */
el->value = prandom_u32() % (TEST_LIST_LEN / 3);
@@ -95,55 +79,44 @@ static int __init list_sort_test(void)
list_add_tail(&el->list, &head);
}
- list_sort(NULL, &head, cmp);
+ list_sort(test, &head, cmp);
- err = -EINVAL;
for (cur = head.next; cur->next != &head; cur = cur->next) {
struct debug_el *el1;
int cmp_result;
- if (cur->next->prev != cur) {
- pr_err("error: list is corrupted\n");
- goto exit;
- }
+ KUNIT_ASSERT_PTR_EQ_MSG(test, cur->next->prev, cur,
+ "list is corrupted");
- cmp_result = cmp(NULL, cur, cur->next);
- if (cmp_result > 0) {
- pr_err("error: list is not sorted\n");
- goto exit;
- }
+ cmp_result = cmp(test, cur, cur->next);
+ KUNIT_ASSERT_LE_MSG(test, cmp_result, 0, "list is not sorted");
el = container_of(cur, struct debug_el, list);
el1 = container_of(cur->next, struct debug_el, list);
- if (cmp_result == 0 && el->serial >= el1->serial) {
- pr_err("error: order of equivalent elements not "
- "preserved\n");
- goto exit;
+ if (cmp_result == 0) {
+ KUNIT_ASSERT_LE_MSG(test, el->serial, el1->serial,
+ "order of equivalent elements not preserved");
}
- if (check(el, el1)) {
- pr_err("error: element check failed\n");
- goto exit;
- }
+ check(test, el, el1);
count++;
}
- if (head.prev != cur) {
- pr_err("error: list is corrupted\n");
- goto exit;
- }
+ KUNIT_EXPECT_PTR_EQ_MSG(test, head.prev, cur, "list is corrupted");
+ KUNIT_EXPECT_EQ_MSG(test, count, TEST_LIST_LEN,
+ "list length changed after sorting!");
+}
- if (count != TEST_LIST_LEN) {
- pr_err("error: bad list length %d", count);
- goto exit;
- }
+static struct kunit_case list_sort_cases[] = {
+ KUNIT_CASE(list_sort_test),
+ {}
+};
+
+static struct kunit_suite list_sort_suite = {
+ .name = "list_sort",
+ .test_cases = list_sort_cases,
+};
+
+kunit_test_suites(&list_sort_suite);
- err = 0;
-exit:
- for (i = 0; i < TEST_LIST_LEN; i++)
- kfree(elts[i]);
- kfree(elts);
- return err;
-}
-module_init(list_sort_test);
MODULE_LICENSE("GPL");
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
index 48ff5747a4da..84fe09eaf55e 100644
--- a/lib/test_scanf.c
+++ b/lib/test_scanf.c
@@ -600,7 +600,7 @@ static void __init numbers_prefix_overflow(void)
/*
* 0x prefix in a field of width 2 using %i conversion: first field
* converts to 0. Next field scan starts at the character after "0x",
- * which will convert if can be intepreted as decimal but will fail
+ * which will convert if can be interpreted as decimal but will fail
* if it contains any hex digits (since no 0x prefix).
*/
test_number_prefix(long long, "0x67", "%2lli%lli", 0, 67, 2, check_ll);
diff --git a/lib/test_string.c b/lib/test_string.c
index 7b31f4a505bf..9dfd6f52de92 100644
--- a/lib/test_string.c
+++ b/lib/test_string.c
@@ -179,6 +179,10 @@ static __init int strnchr_selftest(void)
return 0;
}
+static __exit void string_selftest_remove(void)
+{
+}
+
static __init int string_selftest_init(void)
{
int test, subtest;
@@ -216,4 +220,5 @@ fail:
}
module_init(string_selftest_init);
+module_exit(string_selftest_remove);
MODULE_LICENSE("GPL v2");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index e5c7afbf7405..26c83943748a 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -86,6 +86,7 @@ static unsigned long long simple_strntoull(const char *startp, size_t max_chars,
*
* This function has caveats. Please use kstrtoull instead.
*/
+noinline
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
return simple_strntoull(cp, INT_MAX, endp, base);
@@ -992,8 +993,12 @@ char *symbol_string(char *buf, char *end, void *ptr,
value = (unsigned long)ptr;
#ifdef CONFIG_KALLSYMS
- if (*fmt == 'B')
+ if (*fmt == 'B' && fmt[1] == 'b')
+ sprint_backtrace_build_id(sym, value);
+ else if (*fmt == 'B')
sprint_backtrace(sym, value);
+ else if (*fmt == 'S' && (fmt[1] == 'b' || (fmt[1] == 'R' && fmt[2] == 'b')))
+ sprint_symbol_build_id(sym, value);
else if (*fmt != 's')
sprint_symbol(sym, value);
else
@@ -2262,9 +2267,11 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
* - 'S' For symbolic direct pointers (or function descriptors) with offset
* - 's' For symbolic direct pointers (or function descriptors) without offset
* - '[Ss]R' as above with __builtin_extract_return_addr() translation
+ * - 'S[R]b' as above with module build ID (for use in backtraces)
* - '[Ff]' %pf and %pF were obsoleted and later removed in favor of
* %ps and %pS. Be careful when re-using these specifiers.
* - 'B' For backtraced symbolic direct pointers with offset
+ * - 'Bb' as above with module build ID (for use in backtraces)
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
* - 'b[l]' For a bitmap, the number of bits is determined by the field
@@ -3416,7 +3423,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
while (*fmt) {
/* skip any white space in format */
- /* white space in format matchs any amount of
+ /* white space in format matches any amount of
* white space, including none, in the input.
*/
if (isspace(*fmt)) {
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index 72ddac6ef2ec..ef449e97d1a1 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -422,7 +422,7 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
/*
* Flush pending already filtered data to the output buffer. Return
- * immediatelly if we couldn't flush everything, or if the next
+ * immediately if we couldn't flush everything, or if the next
* filter in the chain had already returned XZ_STREAM_END.
*/
if (s->temp.filtered > 0) {
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index ca2603abee08..7a6781e3f47b 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -147,8 +147,8 @@ struct lzma_dec {
/*
* LZMA properties or related bit masks (number of literal
- * context bits, a mask dervied from the number of literal
- * position bits, and a mask dervied from the number
+ * context bits, a mask derived from the number of literal
+ * position bits, and a mask derived from the number
* position bits)
*/
uint32_t lc;
@@ -484,7 +484,7 @@ static __always_inline void rc_normalize(struct rc_dec *rc)
}
/*
- * Decode one bit. In some versions, this function has been splitted in three
+ * Decode one bit. In some versions, this function has been split in three
* functions so that the compiler is supposed to be able to more easily avoid
* an extra branch. In this particular version of the LZMA decoder, this
* doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
@@ -761,7 +761,7 @@ static bool lzma_main(struct xz_dec_lzma2 *s)
}
/*
- * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
+ * Reset the LZMA decoder and range decoder state. Dictionary is not reset
* here, because LZMA state may be reset without resetting the dictionary.
*/
static void lzma_reset(struct xz_dec_lzma2 *s)
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index ed1f3df27260..f19c4fbe1be7 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -15,7 +15,7 @@ union uu {
unsigned char b[2];
};
-/* Endian independed version */
+/* Endian independent version */
static inline unsigned short
get_unaligned16(const unsigned short *p)
{
diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h
index 2143da28d952..923218d12e28 100644
--- a/lib/zstd/huf.h
+++ b/lib/zstd/huf.h
@@ -134,7 +134,7 @@ typedef enum {
HUF_repeat_none, /**< Cannot use the previous table */
HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1,
4}X_repeat */
- HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */
+ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */
} HUF_repeat;
/** HUF_compress4X_repeat() :
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
diff --git a/mm/Kconfig b/mm/Kconfig
index ded98fb859ab..40a9bfcd5062 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -96,6 +96,9 @@ config HAVE_FAST_GUP
depends on MMU
bool
+config HOLES_IN_ZONE
+ bool
+
# Don't discard allocated memory used to track "memory" and "reserved" memblocks
# after early boot, so it can still be used to test for validity of memory.
# Also, memblocks are updated with memory hot(un)plug.
@@ -671,6 +674,7 @@ config ZPOOL
config ZBUD
tristate "Low (Up to 2x) density storage for compressed pages"
+ depends on ZPOOL
help
A special purpose allocator for storing compressed pages.
It is designed to store up to two compressed pages per physical
@@ -757,6 +761,18 @@ config ARCH_HAS_CACHE_LINE_SIZE
config ARCH_HAS_PTE_DEVMAP
bool
+config ARCH_HAS_ZONE_DMA_SET
+ bool
+
+config ZONE_DMA
+ bool "Support DMA zone" if ARCH_HAS_ZONE_DMA_SET
+ default y if ARM64 || X86
+
+config ZONE_DMA32
+ bool "Support DMA32 zone" if ARCH_HAS_ZONE_DMA_SET
+ depends on !X86_32
+ default y if ARM64
+
config ZONE_DEVICE
bool "Device memory (pmem, HMM, etc...) hotplug support"
depends on MEMORY_HOTPLUG
@@ -869,4 +885,8 @@ config KMAP_LOCAL
# struct io_mapping based helper. Selected by drivers that need them
config IO_MAPPING
bool
+
+config SECRETMEM
+ def_bool ARCH_HAS_SET_DIRECT_MAP && !EMBEDDED
+
endmenu
diff --git a/mm/Makefile b/mm/Makefile
index bf71e295e9f6..e3436741d539 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_FRONTSWAP) += frontswap.o
obj-$(CONFIG_ZSWAP) += zswap.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
+obj-$(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP) += hugetlb_vmemmap.o
obj-$(CONFIG_NUMA) += mempolicy.o
obj-$(CONFIG_SPARSEMEM) += sparse.o
obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
@@ -112,6 +113,7 @@ obj-$(CONFIG_CMA) += cma.o
obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o
obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
+obj-$(CONFIG_SECRETMEM) += secretmem.o
obj-$(CONFIG_CMA_SYSFS) += cma_sysfs.o
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
@@ -125,3 +127,4 @@ obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o
obj-$(CONFIG_IO_MAPPING) += io-mapping.o
+obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 271f2ca862c8..f5561ea7d90a 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -398,12 +398,12 @@ static void cgwb_release_workfn(struct work_struct *work)
blkcg_unpin_online(blkcg);
fprop_local_destroy_percpu(&wb->memcg_completions);
- percpu_ref_exit(&wb->refcnt);
spin_lock_irq(&cgwb_lock);
list_del(&wb->offline_node);
spin_unlock_irq(&cgwb_lock);
+ percpu_ref_exit(&wb->refcnt);
wb_exit(wb);
WARN_ON_ONCE(!list_empty(&wb->b_attached));
kfree_rcu(wb, rcu);
diff --git a/mm/bootmem_info.c b/mm/bootmem_info.c
new file mode 100644
index 000000000000..5b152dba7344
--- /dev/null
+++ b/mm/bootmem_info.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Bootmem core functions.
+ *
+ * Copyright (c) 2020, Bytedance.
+ *
+ * Author: Muchun Song <songmuchun@bytedance.com>
+ *
+ */
+#include <linux/mm.h>
+#include <linux/compiler.h>
+#include <linux/memblock.h>
+#include <linux/bootmem_info.h>
+#include <linux/memory_hotplug.h>
+
+void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
+{
+ page->freelist = (void *)type;
+ SetPagePrivate(page);
+ set_page_private(page, info);
+ page_ref_inc(page);
+}
+
+void put_page_bootmem(struct page *page)
+{
+ unsigned long type;
+
+ type = (unsigned long) page->freelist;
+ BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
+ type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
+
+ if (page_ref_dec_return(page) == 1) {
+ page->freelist = NULL;
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ INIT_LIST_HEAD(&page->lru);
+ free_reserved_page(page);
+ }
+}
+
+#ifndef CONFIG_SPARSEMEM_VMEMMAP
+static void register_page_bootmem_info_section(unsigned long start_pfn)
+{
+ unsigned long mapsize, section_nr, i;
+ struct mem_section *ms;
+ struct page *page, *memmap;
+ struct mem_section_usage *usage;
+
+ section_nr = pfn_to_section_nr(start_pfn);
+ ms = __nr_to_section(section_nr);
+
+ /* Get section's memmap address */
+ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
+
+ /*
+ * Get page for the memmap's phys address
+ * XXX: need more consideration for sparse_vmemmap...
+ */
+ page = virt_to_page(memmap);
+ mapsize = sizeof(struct page) * PAGES_PER_SECTION;
+ mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
+
+ /* remember memmap's page */
+ for (i = 0; i < mapsize; i++, page++)
+ get_page_bootmem(section_nr, page, SECTION_INFO);
+
+ usage = ms->usage;
+ page = virt_to_page(usage);
+
+ mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
+
+ for (i = 0; i < mapsize; i++, page++)
+ get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
+
+}
+#else /* CONFIG_SPARSEMEM_VMEMMAP */
+static void register_page_bootmem_info_section(unsigned long start_pfn)
+{
+ unsigned long mapsize, section_nr, i;
+ struct mem_section *ms;
+ struct page *page, *memmap;
+ struct mem_section_usage *usage;
+
+ section_nr = pfn_to_section_nr(start_pfn);
+ ms = __nr_to_section(section_nr);
+
+ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
+
+ register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
+
+ usage = ms->usage;
+ page = virt_to_page(usage);
+
+ mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
+
+ for (i = 0; i < mapsize; i++, page++)
+ get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
+}
+#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
+
+void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+ unsigned long i, pfn, end_pfn, nr_pages;
+ int node = pgdat->node_id;
+ struct page *page;
+
+ nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
+ page = virt_to_page(pgdat);
+
+ for (i = 0; i < nr_pages; i++, page++)
+ get_page_bootmem(node, page, NODE_INFO);
+
+ pfn = pgdat->node_start_pfn;
+ end_pfn = pgdat_end_pfn(pgdat);
+
+ /* register section info */
+ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ /*
+ * Some platforms can assign the same pfn to multiple nodes - on
+ * node0 as well as nodeN. To avoid registering a pfn against
+ * multiple nodes we check that this pfn does not already
+ * reside in some other nodes.
+ */
+ if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
+ register_page_bootmem_info_section(pfn);
+ }
+}
diff --git a/mm/compaction.c b/mm/compaction.c
index 3a509fbf2bea..621508e0ecd5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1297,8 +1297,7 @@ move_freelist_head(struct list_head *freelist, struct page *freepage)
if (!list_is_last(freelist, &freepage->lru)) {
list_cut_before(&sublist, freelist, &freepage->lru);
- if (!list_empty(&sublist))
- list_splice_tail(&sublist, freelist);
+ list_splice_tail(&sublist, freelist);
}
}
@@ -1315,8 +1314,7 @@ move_freelist_tail(struct list_head *freelist, struct page *freepage)
if (!list_is_first(freelist, &freepage->lru)) {
list_cut_position(&sublist, freelist, &freepage->lru);
- if (!list_empty(&sublist))
- list_splice_tail(&sublist, freelist);
+ list_splice_tail(&sublist, freelist);
}
}
@@ -1380,7 +1378,7 @@ static int next_search_order(struct compact_control *cc, int order)
static unsigned long
fast_isolate_freepages(struct compact_control *cc)
{
- unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
+ unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1);
unsigned int nr_scanned = 0;
unsigned long low_pfn, min_pfn, highest = 0;
unsigned long nr_isolated = 0;
@@ -1492,11 +1490,11 @@ fast_isolate_freepages(struct compact_control *cc)
spin_unlock_irqrestore(&cc->zone->lock, flags);
/*
- * Smaller scan on next order so the total scan ig related
+ * Smaller scan on next order so the total scan is related
* to freelist_scan_limit.
*/
if (order_scanned >= limit)
- limit = min(1U, limit >> 1);
+ limit = max(1U, limit >> 1);
}
if (!page) {
@@ -2722,9 +2720,9 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
}
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
-static ssize_t sysfs_compact_node(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t compact_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nid = dev->id;
@@ -2737,7 +2735,7 @@ static ssize_t sysfs_compact_node(struct device *dev,
return count;
}
-static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node);
+static DEVICE_ATTR_WO(compact);
int compaction_register_node(struct node *node)
{
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index 92bfc37300df..1c922691aa61 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -91,7 +91,7 @@ static void __init pte_advanced_tests(struct mm_struct *mm,
unsigned long pfn, unsigned long vaddr,
pgprot_t prot)
{
- pte_t pte = pfn_pte(pfn, prot);
+ pte_t pte;
/*
* Architectures optimize set_pte_at by avoiding TLB flush.
@@ -248,29 +248,6 @@ static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
WARN_ON(!pmd_leaf(pmd));
}
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
-{
- pmd_t pmd;
-
- if (!arch_vmap_pmd_supported(prot))
- return;
-
- pr_debug("Validating PMD huge\n");
- /*
- * X86 defined pmd_set_huge() verifies that the given
- * PMD is not a populated non-leaf entry.
- */
- WRITE_ONCE(*pmdp, __pmd(0));
- WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
- WARN_ON(!pmd_clear_huge(pmdp));
- pmd = READ_ONCE(*pmdp);
- WARN_ON(!pmd_none(pmd));
-}
-#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-
static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
{
pmd_t pmd;
@@ -395,30 +372,6 @@ static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
pud = pud_mkhuge(pud);
WARN_ON(!pud_leaf(pud));
}
-
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
-{
- pud_t pud;
-
- if (!arch_vmap_pud_supported(prot))
- return;
-
- pr_debug("Validating PUD huge\n");
- /*
- * X86 defined pud_set_huge() verifies that the given
- * PUD is not a populated non-leaf entry.
- */
- WRITE_ONCE(*pudp, __pud(0));
- WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
- WARN_ON(!pud_clear_huge(pudp));
- pud = READ_ONCE(*pudp);
- WARN_ON(!pud_none(pud));
-}
-#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
-static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
-#endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
-
#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
static void __init pud_advanced_tests(struct mm_struct *mm,
@@ -428,9 +381,6 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
{
}
static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
-static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
-{
-}
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
static void __init pmd_basic_tests(unsigned long pfn, int idx) { }
@@ -449,14 +399,51 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
}
static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
+static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
{
+ pmd_t pmd;
+
+ if (!arch_vmap_pmd_supported(prot))
+ return;
+
+ pr_debug("Validating PMD huge\n");
+ /*
+ * X86 defined pmd_set_huge() verifies that the given
+ * PMD is not a populated non-leaf entry.
+ */
+ WRITE_ONCE(*pmdp, __pmd(0));
+ WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
+ WARN_ON(!pmd_clear_huge(pmdp));
+ pmd = READ_ONCE(*pmdp);
+ WARN_ON(!pmd_none(pmd));
}
+
static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
{
+ pud_t pud;
+
+ if (!arch_vmap_pud_supported(prot))
+ return;
+
+ pr_debug("Validating PUD huge\n");
+ /*
+ * X86 defined pud_set_huge() verifies that the given
+ * PUD is not a populated non-leaf entry.
+ */
+ WRITE_ONCE(*pudp, __pud(0));
+ WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
+ WARN_ON(!pud_clear_huge(pudp));
+ pud = READ_ONCE(*pudp);
+ WARN_ON(!pud_none(pud));
}
-static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
+static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
{
@@ -791,12 +778,12 @@ static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
}
-#else /* !CONFIG_ARCH_HAS_PTE_DEVMAP */
+#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
{
}
-#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
{
@@ -856,17 +843,17 @@ static void __init swap_migration_tests(void)
* locked, otherwise it stumbles upon a BUG_ON().
*/
__SetPageLocked(page);
- swp = make_migration_entry(page, 1);
+ swp = make_writable_migration_entry(page_to_pfn(page));
WARN_ON(!is_migration_entry(swp));
- WARN_ON(!is_write_migration_entry(swp));
+ WARN_ON(!is_writable_migration_entry(swp));
- make_migration_entry_read(&swp);
+ swp = make_readable_migration_entry(swp_offset(swp));
WARN_ON(!is_migration_entry(swp));
- WARN_ON(is_write_migration_entry(swp));
+ WARN_ON(is_writable_migration_entry(swp));
- swp = make_migration_entry(page, 0);
+ swp = make_readable_migration_entry(page_to_pfn(page));
WARN_ON(!is_migration_entry(swp));
- WARN_ON(is_write_migration_entry(swp));
+ WARN_ON(is_writable_migration_entry(swp));
__ClearPageLocked(page);
__free_page(page);
}
diff --git a/mm/filemap.c b/mm/filemap.c
index ac82a93d4f38..d1458ecf2f51 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3642,10 +3642,6 @@ again:
* Otherwise there's a nasty deadlock on copying from the
* same page as we're writing to, without it being marked
* up-to-date.
- *
- * Not only is this an optimisation, but it is also required
- * to check that the address is actually valid, when atomic
- * usercopies are used, below.
*/
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
status = -EFAULT;
@@ -3665,33 +3661,31 @@ again:
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
- copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
+ copied = copy_page_from_iter_atomic(page, offset, bytes, i);
flush_dcache_page(page);
status = a_ops->write_end(file, mapping, pos, bytes, copied,
page, fsdata);
- if (unlikely(status < 0))
- break;
- copied = status;
-
+ if (unlikely(status != copied)) {
+ iov_iter_revert(i, copied - max(status, 0L));
+ if (unlikely(status < 0))
+ break;
+ }
cond_resched();
- iov_iter_advance(i, copied);
- if (unlikely(copied == 0)) {
+ if (unlikely(status == 0)) {
/*
- * If we were unable to copy any data at all, we must
- * fall back to a single segment length write.
- *
- * If we didn't fallback here, we could livelock
- * because not all segments in the iov can be copied at
- * once without a pagefault.
+ * A short copy made ->write_end() reject the
+ * thing entirely. Might be memory poisoning
+ * halfway through, might be a race with munmap,
+ * might be severe memory pressure.
*/
- bytes = min_t(unsigned long, PAGE_SIZE - offset,
- iov_iter_single_seg_count(i));
+ if (copied)
+ bytes = copied;
goto again;
}
- pos += copied;
- written += copied;
+ pos += status;
+ written += status;
balance_dirty_pages_ratelimited(mapping);
} while (iov_iter_count(i));
diff --git a/mm/gup.c b/mm/gup.c
index 8651309f8ec3..b94717977d17 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -10,6 +10,7 @@
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/secretmem.h>
#include <linux/sched/signal.h>
#include <linux/rwsem.h>
@@ -855,6 +856,9 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
struct follow_page_context ctx = { NULL };
struct page *page;
+ if (vma_is_secretmem(vma))
+ return NULL;
+
page = follow_page_mask(vma, address, foll_flags, &ctx);
if (ctx.pgmap)
put_dev_pagemap(ctx.pgmap);
@@ -988,6 +992,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
return -EOPNOTSUPP;
+ if (vma_is_secretmem(vma))
+ return -EFAULT;
+
if (write) {
if (!(vm_flags & VM_WRITE)) {
if (!(gup_flags & FOLL_FORCE))
@@ -1501,6 +1508,67 @@ long populate_vma_page_range(struct vm_area_struct *vma,
}
/*
+ * faultin_vma_page_range() - populate (prefault) page tables inside the
+ * given VMA range readable/writable
+ *
+ * This takes care of mlocking the pages, too, if VM_LOCKED is set.
+ *
+ * @vma: target vma
+ * @start: start address
+ * @end: end address
+ * @write: whether to prefault readable or writable
+ * @locked: whether the mmap_lock is still held
+ *
+ * Returns either number of processed pages in the vma, or a negative error
+ * code on error (see __get_user_pages()).
+ *
+ * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
+ * covered by the VMA.
+ *
+ * If @locked is NULL, it may be held for read or write and will be unperturbed.
+ *
+ * If @locked is non-NULL, it must held for read only and may be released. If
+ * it's released, *@locked will be set to 0.
+ */
+long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, bool write, int *locked)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long nr_pages = (end - start) / PAGE_SIZE;
+ int gup_flags;
+
+ VM_BUG_ON(!PAGE_ALIGNED(start));
+ VM_BUG_ON(!PAGE_ALIGNED(end));
+ VM_BUG_ON_VMA(start < vma->vm_start, vma);
+ VM_BUG_ON_VMA(end > vma->vm_end, vma);
+ mmap_assert_locked(mm);
+
+ /*
+ * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
+ * the page dirty with FOLL_WRITE -- which doesn't make a
+ * difference with !FOLL_FORCE, because the page is writable
+ * in the page table.
+ * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
+ * a poisoned page.
+ * FOLL_POPULATE: Always populate memory with VM_LOCKONFAULT.
+ * !FOLL_FORCE: Require proper access permissions.
+ */
+ gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK | FOLL_HWPOISON;
+ if (write)
+ gup_flags |= FOLL_WRITE;
+
+ /*
+ * We want to report -EINVAL instead of -EFAULT for any permission
+ * problems or incompatible mappings.
+ */
+ if (check_vma_flags(vma, gup_flags))
+ return -EINVAL;
+
+ return __get_user_pages(mm, start, nr_pages, gup_flags,
+ NULL, NULL, locked);
+}
+
+/*
* __mm_populate - populate and/or mlock pages within a range of address space.
*
* This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
@@ -2112,6 +2180,11 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
if (!head)
goto pte_unmap;
+ if (unlikely(page_is_secretmem(page))) {
+ put_compound_head(head, 1, flags);
+ goto pte_unmap;
+ }
+
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_compound_head(head, 1, flags);
goto pte_unmap;
diff --git a/mm/hmm.c b/mm/hmm.c
index 943cb2ba4442..fad6be2bf072 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -26,6 +26,8 @@
#include <linux/mmu_notifier.h>
#include <linux/memory_hotplug.h>
+#include "internal.h"
+
struct hmm_vma_walk {
struct hmm_range *range;
unsigned long last;
@@ -214,7 +216,7 @@ static inline bool hmm_is_device_private_entry(struct hmm_range *range,
swp_entry_t entry)
{
return is_device_private_entry(entry) &&
- device_private_entry_to_page(entry)->pgmap->owner ==
+ pfn_swap_entry_to_page(entry)->pgmap->owner ==
range->dev_private_owner;
}
@@ -255,10 +257,9 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
*/
if (hmm_is_device_private_entry(range, entry)) {
cpu_flags = HMM_PFN_VALID;
- if (is_write_device_private_entry(entry))
+ if (is_writable_device_private_entry(entry))
cpu_flags |= HMM_PFN_WRITE;
- *hmm_pfn = device_private_entry_to_pfn(entry) |
- cpu_flags;
+ *hmm_pfn = swp_offset(entry) | cpu_flags;
return 0;
}
@@ -272,6 +273,9 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
if (!non_swap_entry(entry))
goto fault;
+ if (is_device_exclusive_entry(entry))
+ goto fault;
+
if (is_migration_entry(entry)) {
pte_unmap(ptep);
hmm_vma_walk->last = addr;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6d2a0119fc58..afff3ac87067 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -64,7 +64,14 @@ static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
unsigned long huge_zero_pfn __read_mostly = ~0UL;
-bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool file_thp_enabled(struct vm_area_struct *vma)
+{
+ return transhuge_vma_enabled(vma, vma->vm_flags) && vma->vm_file &&
+ !inode_is_open_for_write(vma->vm_file->f_inode) &&
+ (vma->vm_flags & VM_EXEC);
+}
+
+bool transparent_hugepage_active(struct vm_area_struct *vma)
{
/* The addr is used to check if the vma size fits */
unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;
@@ -75,6 +82,8 @@ bool transparent_hugepage_enabled(struct vm_area_struct *vma)
return __transparent_hugepage_enabled(vma);
if (vma_is_shmem(vma))
return shmem_huge_enabled(vma);
+ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS))
+ return file_thp_enabled(vma);
return false;
}
@@ -1017,7 +1026,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
- struct vm_area_struct *vma)
+ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
{
spinlock_t *dst_ptl, *src_ptl;
struct page *src_page;
@@ -1026,7 +1035,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
int ret = -ENOMEM;
/* Skip if can be re-fill on fault */
- if (!vma_is_anonymous(vma))
+ if (!vma_is_anonymous(dst_vma))
return 0;
pgtable = pte_alloc_one(dst_mm);
@@ -1040,29 +1049,26 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
ret = -EAGAIN;
pmd = *src_pmd;
- /*
- * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
- * does not have the VM_UFFD_WP, which means that the uffd
- * fork event is not enabled.
- */
- if (!(vma->vm_flags & VM_UFFD_WP))
- pmd = pmd_clear_uffd_wp(pmd);
-
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
if (unlikely(is_swap_pmd(pmd))) {
swp_entry_t entry = pmd_to_swp_entry(pmd);
VM_BUG_ON(!is_pmd_migration_entry(pmd));
- if (is_write_migration_entry(entry)) {
- make_migration_entry_read(&entry);
+ if (is_writable_migration_entry(entry)) {
+ entry = make_readable_migration_entry(
+ swp_offset(entry));
pmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*src_pmd))
pmd = pmd_swp_mksoft_dirty(pmd);
+ if (pmd_swp_uffd_wp(*src_pmd))
+ pmd = pmd_swp_mkuffd_wp(pmd);
set_pmd_at(src_mm, addr, src_pmd, pmd);
}
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
mm_inc_nr_ptes(dst_mm);
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
+ if (!userfaultfd_wp(dst_vma))
+ pmd = pmd_swp_clear_uffd_wp(pmd);
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
ret = 0;
goto out_unlock;
@@ -1079,17 +1085,13 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* a page table.
*/
if (is_huge_zero_pmd(pmd)) {
- struct page *zero_page;
/*
* get_huge_zero_page() will never allocate a new page here,
* since we already have a zero page to copy. It just takes a
* reference.
*/
- zero_page = mm_get_huge_zero_page(dst_mm);
- set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
- zero_page);
- ret = 0;
- goto out_unlock;
+ mm_get_huge_zero_page(dst_mm);
+ goto out_zero_page;
}
src_page = pmd_page(pmd);
@@ -1102,21 +1104,23 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* best effort that the pinned pages won't be replaced by another
* random page during the coming copy-on-write.
*/
- if (unlikely(page_needs_cow_for_dma(vma, src_page))) {
+ if (unlikely(page_needs_cow_for_dma(src_vma, src_page))) {
pte_free(dst_mm, pgtable);
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
- __split_huge_pmd(vma, src_pmd, addr, false, NULL);
+ __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
return -EAGAIN;
}
get_page(src_page);
page_dup_rmap(src_page, true);
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+out_zero_page:
mm_inc_nr_ptes(dst_mm);
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
-
pmdp_set_wrprotect(src_mm, addr, src_pmd);
+ if (!userfaultfd_wp(dst_vma))
+ pmd = pmd_clear_uffd_wp(pmd);
pmd = pmd_mkold(pmd_wrprotect(pmd));
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
@@ -1254,11 +1258,12 @@ unlock:
}
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
-void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
+void huge_pmd_set_accessed(struct vm_fault *vmf)
{
pmd_t entry;
unsigned long haddr;
bool write = vmf->flags & FAULT_FLAG_WRITE;
+ pmd_t orig_pmd = vmf->orig_pmd;
vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
@@ -1275,11 +1280,12 @@ unlock:
spin_unlock(vmf->ptl);
}
-vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
+vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ pmd_t orig_pmd = vmf->orig_pmd;
vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
VM_BUG_ON_VMA(!vma->anon_vma, vma);
@@ -1415,96 +1421,25 @@ out:
}
/* NUMA hinting page fault entry point for trans huge pmds */
-vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
+vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct anon_vma *anon_vma = NULL;
+ pmd_t oldpmd = vmf->orig_pmd;
+ pmd_t pmd;
struct page *page;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
- int page_nid = NUMA_NO_NODE, this_nid = numa_node_id();
+ int page_nid = NUMA_NO_NODE;
int target_nid, last_cpupid = -1;
- bool page_locked;
bool migrated = false;
- bool was_writable;
+ bool was_writable = pmd_savedwrite(oldpmd);
int flags = 0;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
- if (unlikely(!pmd_same(pmd, *vmf->pmd)))
- goto out_unlock;
-
- /*
- * If there are potential migrations, wait for completion and retry
- * without disrupting NUMA hinting information. Do not relock and
- * check_same as the page may no longer be mapped.
- */
- if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
- page = pmd_page(*vmf->pmd);
- if (!get_page_unless_zero(page))
- goto out_unlock;
+ if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
spin_unlock(vmf->ptl);
- put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
goto out;
}
- page = pmd_page(pmd);
- BUG_ON(is_huge_zero_page(page));
- page_nid = page_to_nid(page);
- last_cpupid = page_cpupid_last(page);
- count_vm_numa_event(NUMA_HINT_FAULTS);
- if (page_nid == this_nid) {
- count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
- flags |= TNF_FAULT_LOCAL;
- }
-
- /* See similar comment in do_numa_page for explanation */
- if (!pmd_savedwrite(pmd))
- flags |= TNF_NO_GROUP;
-
- /*
- * Acquire the page lock to serialise THP migrations but avoid dropping
- * page_table_lock if at all possible
- */
- page_locked = trylock_page(page);
- target_nid = mpol_misplaced(page, vma, haddr);
- /* Migration could have started since the pmd_trans_migrating check */
- if (!page_locked) {
- page_nid = NUMA_NO_NODE;
- if (!get_page_unless_zero(page))
- goto out_unlock;
- spin_unlock(vmf->ptl);
- put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
- goto out;
- } else if (target_nid == NUMA_NO_NODE) {
- /* There are no parallel migrations and page is in the right
- * node. Clear the numa hinting info in this pmd.
- */
- goto clear_pmdnuma;
- }
-
- /*
- * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
- * to serialises splits
- */
- get_page(page);
- spin_unlock(vmf->ptl);
- anon_vma = page_lock_anon_vma_read(page);
-
- /* Confirm the PMD did not change while page_table_lock was released */
- spin_lock(vmf->ptl);
- if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
- unlock_page(page);
- put_page(page);
- page_nid = NUMA_NO_NODE;
- goto out_unlock;
- }
-
- /* Bail if we fail to protect against THP splits for any reason */
- if (unlikely(!anon_vma)) {
- put_page(page);
- page_nid = NUMA_NO_NODE;
- goto clear_pmdnuma;
- }
-
/*
* Since we took the NUMA fault, we must have observed the !accessible
* bit. Make sure all other CPUs agree with that, to avoid them
@@ -1531,43 +1466,58 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
haddr + HPAGE_PMD_SIZE);
}
- /*
- * Migrate the THP to the requested node, returns with page unlocked
- * and access rights restored.
- */
+ pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+ page = vm_normal_page_pmd(vma, haddr, pmd);
+ if (!page)
+ goto out_map;
+
+ /* See similar comment in do_numa_page for explanation */
+ if (!was_writable)
+ flags |= TNF_NO_GROUP;
+
+ page_nid = page_to_nid(page);
+ last_cpupid = page_cpupid_last(page);
+ target_nid = numa_migrate_prep(page, vma, haddr, page_nid,
+ &flags);
+
+ if (target_nid == NUMA_NO_NODE) {
+ put_page(page);
+ goto out_map;
+ }
+
spin_unlock(vmf->ptl);
- migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
- vmf->pmd, pmd, vmf->address, page, target_nid);
+ migrated = migrate_misplaced_page(page, vma, target_nid);
if (migrated) {
flags |= TNF_MIGRATED;
page_nid = target_nid;
- } else
+ } else {
flags |= TNF_MIGRATE_FAIL;
-
- goto out;
-clear_pmdnuma:
- BUG_ON(!PageLocked(page));
- was_writable = pmd_savedwrite(pmd);
- pmd = pmd_modify(pmd, vma->vm_page_prot);
- pmd = pmd_mkyoung(pmd);
- if (was_writable)
- pmd = pmd_mkwrite(pmd);
- set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
- update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
- unlock_page(page);
-out_unlock:
- spin_unlock(vmf->ptl);
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+ spin_unlock(vmf->ptl);
+ goto out;
+ }
+ goto out_map;
+ }
out:
- if (anon_vma)
- page_unlock_anon_vma_read(anon_vma);
-
if (page_nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
flags);
return 0;
+
+out_map:
+ /* Restore the PMD */
+ pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+ pmd = pmd_mkyoung(pmd);
+ if (was_writable)
+ pmd = pmd_mkwrite(pmd);
+ set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
+ update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
+ spin_unlock(vmf->ptl);
+ goto out;
}
/*
@@ -1604,7 +1554,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
* If other processes are mapping this page, we couldn't discard
* the page unless they all do MADV_FREE so let's skip the page.
*/
- if (page_mapcount(page) != 1)
+ if (total_mapcount(page) != 1)
goto out;
if (!trylock_page(page))
@@ -1677,12 +1627,9 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
- if (is_huge_zero_pmd(orig_pmd))
- tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
} else if (is_huge_zero_pmd(orig_pmd)) {
zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
- tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
} else {
struct page *page = NULL;
int flush_needed = 1;
@@ -1697,7 +1644,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
entry = pmd_to_swp_entry(orig_pmd);
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
flush_needed = 0;
} else
WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
@@ -1796,6 +1743,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
* Returns
* - 0 if PMD could not be locked
* - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
+ * or if prot_numa but THP migration is not supported
* - HPAGE_PMD_NR if protections changed and TLB flush necessary
*/
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
@@ -1810,6 +1758,9 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
+ if (prot_numa && !thp_migration_supported())
+ return 1;
+
ptl = __pmd_trans_huge_lock(pmd, vma);
if (!ptl)
return 0;
@@ -1822,16 +1773,19 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
swp_entry_t entry = pmd_to_swp_entry(*pmd);
VM_BUG_ON(!is_pmd_migration_entry(*pmd));
- if (is_write_migration_entry(entry)) {
+ if (is_writable_migration_entry(entry)) {
pmd_t newpmd;
/*
* A protection check is difficult so
* just be safe and disable write
*/
- make_migration_entry_read(&entry);
+ entry = make_readable_migration_entry(
+ swp_offset(entry));
newpmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*pmd))
newpmd = pmd_swp_mksoft_dirty(newpmd);
+ if (pmd_swp_uffd_wp(*pmd))
+ newpmd = pmd_swp_mkuffd_wp(newpmd);
set_pmd_at(mm, addr, pmd, newpmd);
}
goto unlock;
@@ -2060,7 +2014,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
swp_entry_t entry;
entry = pmd_to_swp_entry(old_pmd);
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
} else {
page = pmd_page(old_pmd);
if (!PageDirty(page) && pmd_dirty(old_pmd))
@@ -2114,8 +2068,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
swp_entry_t entry;
entry = pmd_to_swp_entry(old_pmd);
- page = migration_entry_to_page(entry);
- write = is_write_migration_entry(entry);
+ page = pfn_swap_entry_to_page(entry);
+ write = is_writable_migration_entry(entry);
young = false;
soft_dirty = pmd_swp_soft_dirty(old_pmd);
uffd_wp = pmd_swp_uffd_wp(old_pmd);
@@ -2147,7 +2101,12 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
*/
if (freeze || pmd_migration) {
swp_entry_t swp_entry;
- swp_entry = make_migration_entry(page + i, write);
+ if (write)
+ swp_entry = make_writable_migration_entry(
+ page_to_pfn(page + i));
+ else
+ swp_entry = make_readable_migration_entry(
+ page_to_pfn(page + i));
entry = swp_entry_to_pte(swp_entry);
if (soft_dirty)
entry = pte_swp_mksoft_dirty(entry);
@@ -2350,15 +2309,20 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
static void unmap_page(struct page *page)
{
- enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_SYNC |
- TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
+ enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
+ TTU_SYNC;
VM_BUG_ON_PAGE(!PageHead(page), page);
+ /*
+ * Anon pages need migration entries to preserve them, but file
+ * pages can simply be left unmapped, then faulted back on demand.
+ * If that is ever changed (perhaps for mlock), update remap_page().
+ */
if (PageAnon(page))
- ttu_flags |= TTU_SPLIT_FREEZE;
-
- try_to_unmap(page, ttu_flags);
+ try_to_migrate(page, ttu_flags);
+ else
+ try_to_unmap(page, ttu_flags | TTU_IGNORE_MLOCK);
VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
}
@@ -2366,6 +2330,10 @@ static void unmap_page(struct page *page)
static void remap_page(struct page *page, unsigned int nr)
{
int i;
+
+ /* If unmap_page() uses try_to_migrate() on file, remove this check */
+ if (!PageAnon(page))
+ return;
if (PageTransHuge(page)) {
remove_migration_ptes(page, page, true);
} else {
@@ -2870,7 +2838,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
/* Take pin on all head pages to avoid freeing them under us */
list_for_each_safe(pos, next, &ds_queue->split_queue) {
- page = list_entry((void *)pos, struct page, mapping);
+ page = list_entry((void *)pos, struct page, deferred_list);
page = compound_head(page);
if (get_page_unless_zero(page)) {
list_move(page_deferred_list(page), &list);
@@ -2885,7 +2853,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
list_for_each_safe(pos, next, &list) {
- page = list_entry((void *)pos, struct page, mapping);
+ page = list_entry((void *)pos, struct page, deferred_list);
if (!trylock_page(page))
goto next;
/* split_huge_page() removes page from list on success */
@@ -3144,7 +3112,7 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
tok = strsep(&buf, ",");
if (tok) {
- strncpy(file_path, tok, MAX_INPUT_BUF_SZ);
+ strcpy(file_path, tok);
} else {
ret = -EINVAL;
goto out;
@@ -3214,7 +3182,10 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
if (pmd_dirty(pmdval))
set_page_dirty(page);
- entry = make_migration_entry(page, pmd_write(pmdval));
+ if (pmd_write(pmdval))
+ entry = make_writable_migration_entry(page_to_pfn(page));
+ else
+ entry = make_readable_migration_entry(page_to_pfn(page));
pmdswp = swp_entry_to_pmd(entry);
if (pmd_soft_dirty(pmdval))
pmdswp = pmd_swp_mksoft_dirty(pmdswp);
@@ -3240,8 +3211,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
- if (is_write_migration_entry(entry))
+ if (is_writable_migration_entry(entry))
pmde = maybe_pmd_mkwrite(pmde, vma);
+ if (pmd_swp_uffd_wp(*pvmw->pmd))
+ pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
if (PageAnon(new))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 103f1187043f..8ea35ba6699f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -30,6 +30,7 @@
#include <linux/numa.h>
#include <linux/llist.h>
#include <linux/cma.h>
+#include <linux/migrate.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
@@ -41,6 +42,7 @@
#include <linux/node.h>
#include <linux/page_owner.h>
#include "internal.h"
+#include "hugetlb_vmemmap.h"
int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
@@ -1318,8 +1320,6 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
}
-static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
-static void prep_compound_gigantic_page(struct page *page, unsigned int order);
#else /* !CONFIG_CONTIG_ALLOC */
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
@@ -1375,7 +1375,40 @@ static void remove_hugetlb_page(struct hstate *h, struct page *page,
h->nr_huge_pages_node[nid]--;
}
-static void update_and_free_page(struct hstate *h, struct page *page)
+static void add_hugetlb_page(struct hstate *h, struct page *page,
+ bool adjust_surplus)
+{
+ int zeroed;
+ int nid = page_to_nid(page);
+
+ VM_BUG_ON_PAGE(!HPageVmemmapOptimized(page), page);
+
+ lockdep_assert_held(&hugetlb_lock);
+
+ INIT_LIST_HEAD(&page->lru);
+ h->nr_huge_pages++;
+ h->nr_huge_pages_node[nid]++;
+
+ if (adjust_surplus) {
+ h->surplus_huge_pages++;
+ h->surplus_huge_pages_node[nid]++;
+ }
+
+ set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
+ set_page_private(page, 0);
+ SetHPageVmemmapOptimized(page);
+
+ /*
+ * This page is now managed by the hugetlb allocator and has
+ * no users -- drop the last reference.
+ */
+ zeroed = put_page_testzero(page);
+ VM_BUG_ON_PAGE(!zeroed, page);
+ arch_clear_hugepage_flags(page);
+ enqueue_huge_page(h, page);
+}
+
+static void __update_and_free_page(struct hstate *h, struct page *page)
{
int i;
struct page *subpage = page;
@@ -1383,6 +1416,18 @@ static void update_and_free_page(struct hstate *h, struct page *page)
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return;
+ if (alloc_huge_page_vmemmap(h, page)) {
+ spin_lock_irq(&hugetlb_lock);
+ /*
+ * If we cannot allocate vmemmap pages, just refuse to free the
+ * page and put the page back on the hugetlb free list and treat
+ * as a surplus page.
+ */
+ add_hugetlb_page(h, page, true);
+ spin_unlock_irq(&hugetlb_lock);
+ return;
+ }
+
for (i = 0; i < pages_per_huge_page(h);
i++, subpage = mem_map_next(subpage, page, i)) {
subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
@@ -1398,12 +1443,79 @@ static void update_and_free_page(struct hstate *h, struct page *page)
}
}
+/*
+ * As update_and_free_page() can be called under any context, so we cannot
+ * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
+ * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
+ * the vmemmap pages.
+ *
+ * free_hpage_workfn() locklessly retrieves the linked list of pages to be
+ * freed and frees them one-by-one. As the page->mapping pointer is going
+ * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
+ * structure of a lockless linked list of huge pages to be freed.
+ */
+static LLIST_HEAD(hpage_freelist);
+
+static void free_hpage_workfn(struct work_struct *work)
+{
+ struct llist_node *node;
+
+ node = llist_del_all(&hpage_freelist);
+
+ while (node) {
+ struct page *page;
+ struct hstate *h;
+
+ page = container_of((struct address_space **)node,
+ struct page, mapping);
+ node = node->next;
+ page->mapping = NULL;
+ /*
+ * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
+ * is going to trigger because a previous call to
+ * remove_hugetlb_page() will set_compound_page_dtor(page,
+ * NULL_COMPOUND_DTOR), so do not use page_hstate() directly.
+ */
+ h = size_to_hstate(page_size(page));
+
+ __update_and_free_page(h, page);
+
+ cond_resched();
+ }
+}
+static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
+
+static inline void flush_free_hpage_work(struct hstate *h)
+{
+ if (free_vmemmap_pages_per_hpage(h))
+ flush_work(&free_hpage_work);
+}
+
+static void update_and_free_page(struct hstate *h, struct page *page,
+ bool atomic)
+{
+ if (!HPageVmemmapOptimized(page) || !atomic) {
+ __update_and_free_page(h, page);
+ return;
+ }
+
+ /*
+ * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
+ *
+ * Only call schedule_work() if hpage_freelist is previously
+ * empty. Otherwise, schedule_work() had been called but the workfn
+ * hasn't retrieved the list yet.
+ */
+ if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist))
+ schedule_work(&free_hpage_work);
+}
+
static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
{
struct page *page, *t_page;
list_for_each_entry_safe(page, t_page, list, lru) {
- update_and_free_page(h, page);
+ update_and_free_page(h, page, false);
cond_resched();
}
}
@@ -1470,12 +1582,12 @@ void free_huge_page(struct page *page)
if (HPageTemporary(page)) {
remove_hugetlb_page(h, page, false);
spin_unlock_irqrestore(&hugetlb_lock, flags);
- update_and_free_page(h, page);
+ update_and_free_page(h, page, true);
} else if (h->surplus_huge_pages_node[nid]) {
/* remove the page from active list */
remove_hugetlb_page(h, page, true);
spin_unlock_irqrestore(&hugetlb_lock, flags);
- update_and_free_page(h, page);
+ update_and_free_page(h, page, true);
} else {
arch_clear_hugepage_flags(page);
enqueue_huge_page(h, page);
@@ -1493,8 +1605,9 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
h->nr_huge_pages_node[nid]++;
}
-static void __prep_new_huge_page(struct page *page)
+static void __prep_new_huge_page(struct hstate *h, struct page *page)
{
+ free_huge_page_vmemmap(h, page);
INIT_LIST_HEAD(&page->lru);
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
hugetlb_set_page_subpool(page, NULL);
@@ -1504,15 +1617,15 @@ static void __prep_new_huge_page(struct page *page)
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
{
- __prep_new_huge_page(page);
+ __prep_new_huge_page(h, page);
spin_lock_irq(&hugetlb_lock);
__prep_account_new_huge_page(h, nid);
spin_unlock_irq(&hugetlb_lock);
}
-static void prep_compound_gigantic_page(struct page *page, unsigned int order)
+static bool prep_compound_gigantic_page(struct page *page, unsigned int order)
{
- int i;
+ int i, j;
int nr_pages = 1 << order;
struct page *p = page + 1;
@@ -1534,11 +1647,48 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
* after get_user_pages().
*/
__ClearPageReserved(p);
+ /*
+ * Subtle and very unlikely
+ *
+ * Gigantic 'page allocators' such as memblock or cma will
+ * return a set of pages with each page ref counted. We need
+ * to turn this set of pages into a compound page with tail
+ * page ref counts set to zero. Code such as speculative page
+ * cache adding could take a ref on a 'to be' tail page.
+ * We need to respect any increased ref count, and only set
+ * the ref count to zero if count is currently 1. If count
+ * is not 1, we call synchronize_rcu in the hope that a rcu
+ * grace period will cause ref count to drop and then retry.
+ * If count is still inflated on retry we return an error and
+ * must discard the pages.
+ */
+ if (!page_ref_freeze(p, 1)) {
+ pr_info("HugeTLB unexpected inflated ref count on freshly allocated page\n");
+ synchronize_rcu();
+ if (!page_ref_freeze(p, 1))
+ goto out_error;
+ }
set_page_count(p, 0);
set_compound_head(p, page);
}
atomic_set(compound_mapcount_ptr(page), -1);
atomic_set(compound_pincount_ptr(page), 0);
+ return true;
+
+out_error:
+ /* undo tail page modifications made above */
+ p = page + 1;
+ for (j = 1; j < i; j++, p = mem_map_next(p, page, j)) {
+ clear_compound_head(p);
+ set_page_refcounted(p);
+ }
+ /* need to clear PG_reserved on remaining tail pages */
+ for (; j < nr_pages; j++, p = mem_map_next(p, page, j))
+ __ClearPageReserved(p);
+ set_compound_order(page, 0);
+ page[1].compound_nr = 0;
+ __ClearPageHead(page);
+ return false;
}
/*
@@ -1658,7 +1808,9 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
nodemask_t *node_alloc_noretry)
{
struct page *page;
+ bool retry = false;
+retry:
if (hstate_is_gigantic(h))
page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
else
@@ -1667,8 +1819,21 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
if (!page)
return NULL;
- if (hstate_is_gigantic(h))
- prep_compound_gigantic_page(page, huge_page_order(h));
+ if (hstate_is_gigantic(h)) {
+ if (!prep_compound_gigantic_page(page, huge_page_order(h))) {
+ /*
+ * Rare failure to convert pages to compound page.
+ * Free pages and try again - ONCE!
+ */
+ free_gigantic_page(page, huge_page_order(h));
+ if (!retry) {
+ retry = true;
+ goto retry;
+ }
+ pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
+ return NULL;
+ }
+ }
prep_new_huge_page(h, page, page_to_nid(page));
return page;
@@ -1737,10 +1902,14 @@ static struct page *remove_pool_huge_page(struct hstate *h,
* nothing for in-use hugepages and non-hugepages.
* This function returns values like below:
*
- * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
- * (allocated or reserved.)
- * 0: successfully dissolved free hugepages or the page is not a
- * hugepage (considered as already dissolved)
+ * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
+ * when the system is under memory pressure and the feature of
+ * freeing unused vmemmap pages associated with each hugetlb page
+ * is enabled.
+ * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
+ * (allocated or reserved.)
+ * 0: successfully dissolved free hugepages or the page is not a
+ * hugepage (considered as already dissolved)
*/
int dissolve_free_huge_page(struct page *page)
{
@@ -1782,19 +1951,38 @@ retry:
goto retry;
}
- /*
- * Move PageHWPoison flag from head page to the raw error page,
- * which makes any subpages rather than the error page reusable.
- */
- if (PageHWPoison(head) && page != head) {
- SetPageHWPoison(page);
- ClearPageHWPoison(head);
- }
remove_hugetlb_page(h, head, false);
h->max_huge_pages--;
spin_unlock_irq(&hugetlb_lock);
- update_and_free_page(h, head);
- return 0;
+
+ /*
+ * Normally update_and_free_page will allocate required vmemmmap
+ * before freeing the page. update_and_free_page will fail to
+ * free the page if it can not allocate required vmemmap. We
+ * need to adjust max_huge_pages if the page is not freed.
+ * Attempt to allocate vmemmmap here so that we can take
+ * appropriate action on failure.
+ */
+ rc = alloc_huge_page_vmemmap(h, head);
+ if (!rc) {
+ /*
+ * Move PageHWPoison flag from head page to the raw
+ * error page, which makes any subpages rather than
+ * the error page reusable.
+ */
+ if (PageHWPoison(head) && page != head) {
+ SetPageHWPoison(page);
+ ClearPageHWPoison(head);
+ }
+ update_and_free_page(h, head, false);
+ } else {
+ spin_lock_irq(&hugetlb_lock);
+ add_hugetlb_page(h, head, false);
+ h->max_huge_pages++;
+ spin_unlock_irq(&hugetlb_lock);
+ }
+
+ return rc;
}
out:
spin_unlock_irq(&hugetlb_lock);
@@ -2288,7 +2476,7 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
if (!rc) {
/*
* This indicates there is an entry in the reserve map
- * added by alloc_huge_page. We know it was added
+ * not added by alloc_huge_page. We know it was added
* before the alloc_huge_page call, otherwise
* HPageRestoreReserve would be set on the page.
* Remove the entry so that a subsequent allocation
@@ -2351,14 +2539,15 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
/*
* Before dissolving the page, we need to allocate a new one for the
- * pool to remain stable. Using alloc_buddy_huge_page() allows us to
- * not having to deal with prep_new_huge_page() and avoids dealing of any
- * counters. This simplifies and let us do the whole thing under the
- * lock.
+ * pool to remain stable. Here, we allocate the page and 'prep' it
+ * by doing everything but actually updating counters and adding to
+ * the pool. This simplifies and let us do most of the processing
+ * under the lock.
*/
new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
if (!new_page)
return -ENOMEM;
+ __prep_new_huge_page(h, new_page);
retry:
spin_lock_irq(&hugetlb_lock);
@@ -2397,14 +2586,9 @@ retry:
remove_hugetlb_page(h, old_page, false);
/*
- * new_page needs to be initialized with the standard hugetlb
- * state. This is normally done by prep_new_huge_page() but
- * that takes hugetlb_lock which is already held so we need to
- * open code it here.
* Reference count trick is needed because allocator gives us
* referenced page but the pool requires pages with 0 refcount.
*/
- __prep_new_huge_page(new_page);
__prep_account_new_huge_page(h, nid);
page_ref_dec(new_page);
enqueue_huge_page(h, new_page);
@@ -2413,14 +2597,14 @@ retry:
* Pages have been replaced, we can safely free the old one.
*/
spin_unlock_irq(&hugetlb_lock);
- update_and_free_page(h, old_page);
+ update_and_free_page(h, old_page, false);
}
return ret;
free_new:
spin_unlock_irq(&hugetlb_lock);
- __free_pages(new_page, huge_page_order(h));
+ update_and_free_page(h, new_page, false);
return ret;
}
@@ -2625,16 +2809,10 @@ found:
return 1;
}
-static void __init prep_compound_huge_page(struct page *page,
- unsigned int order)
-{
- if (unlikely(order > (MAX_ORDER - 1)))
- prep_compound_gigantic_page(page, order);
- else
- prep_compound_page(page, order);
-}
-
-/* Put bootmem huge pages into the standard lists after mem_map is up */
+/*
+ * Put bootmem huge pages into the standard lists after mem_map is up.
+ * Note: This only applies to gigantic (order > MAX_ORDER) pages.
+ */
static void __init gather_bootmem_prealloc(void)
{
struct huge_bootmem_page *m;
@@ -2643,20 +2821,23 @@ static void __init gather_bootmem_prealloc(void)
struct page *page = virt_to_page(m);
struct hstate *h = m->hstate;
+ VM_BUG_ON(!hstate_is_gigantic(h));
WARN_ON(page_count(page) != 1);
- prep_compound_huge_page(page, huge_page_order(h));
- WARN_ON(PageReserved(page));
- prep_new_huge_page(h, page, page_to_nid(page));
- put_page(page); /* free it into the hugepage allocator */
+ if (prep_compound_gigantic_page(page, huge_page_order(h))) {
+ WARN_ON(PageReserved(page));
+ prep_new_huge_page(h, page, page_to_nid(page));
+ put_page(page); /* add to the hugepage allocator */
+ } else {
+ free_gigantic_page(page, huge_page_order(h));
+ pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
+ }
/*
- * If we had gigantic hugepages allocated at boot time, we need
- * to restore the 'stolen' pages to totalram_pages in order to
- * fix confusing memory reports from free(1) and another
- * side-effects, like CommitLimit going negative.
+ * We need to restore the 'stolen' pages to totalram_pages
+ * in order to fix confusing memory reports from free(1) and
+ * other side-effects, like CommitLimit going negative.
*/
- if (hstate_is_gigantic(h))
- adjust_managed_page_count(page, pages_per_huge_page(h));
+ adjust_managed_page_count(page, pages_per_huge_page(h));
cond_resched();
}
}
@@ -2834,6 +3015,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
* pages in hstate via the proc/sysfs interfaces.
*/
mutex_lock(&h->resize_lock);
+ flush_free_hpage_work(h);
spin_lock_irq(&hugetlb_lock);
/*
@@ -2943,6 +3125,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
/* free the pages after dropping lock */
spin_unlock_irq(&hugetlb_lock);
update_and_free_pages_bulk(h, &page_list);
+ flush_free_hpage_work(h);
spin_lock_irq(&hugetlb_lock);
while (count < persistent_huge_pages(h)) {
@@ -3450,6 +3633,7 @@ void __init hugetlb_add_hstate(unsigned int order)
h->next_nid_to_free = first_memory_node;
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
+ hugetlb_vmemmap_init(h);
parsed_hstate = h;
}
@@ -3924,6 +4108,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
int writable)
{
pte_t entry;
+ unsigned int shift = huge_page_shift(hstate_vma(vma));
if (writable) {
entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
@@ -3934,7 +4119,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
}
entry = pte_mkyoung(entry);
entry = pte_mkhuge(entry);
- entry = arch_make_huge_pte(entry, vma, page, writable);
+ entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
return entry;
}
@@ -4057,12 +4242,13 @@ again:
is_hugetlb_entry_hwpoisoned(entry))) {
swp_entry_t swp_entry = pte_to_swp_entry(entry);
- if (is_write_migration_entry(swp_entry) && cow) {
+ if (is_writable_migration_entry(swp_entry) && cow) {
/*
* COW mappings require pages in both
* parent and child to be set to read.
*/
- make_migration_entry_read(&swp_entry);
+ swp_entry = make_readable_migration_entry(
+ swp_offset(swp_entry));
entry = swp_entry_to_pte(swp_entry);
set_huge_swap_pte_at(src, addr, src_pte,
entry, sz);
@@ -4474,7 +4660,9 @@ retry_avoidcopy:
spin_unlock(ptl);
mmu_notifier_invalidate_range_end(&range);
out_release_all:
- restore_reserve_on_error(h, vma, haddr, new_page);
+ /* No restore in case of successful pagetable update (Break COW) */
+ if (new_page != old_page)
+ restore_reserve_on_error(h, vma, haddr, new_page);
put_page(new_page);
out_release_old:
put_page(old_page);
@@ -4590,7 +4778,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
pte_t new_pte;
spinlock_t *ptl;
unsigned long haddr = address & huge_page_mask(h);
- bool new_page = false;
+ bool new_page, new_pagecache_page = false;
/*
* Currently, we are forced to kill the process in the event the
@@ -4613,6 +4801,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
goto out;
retry:
+ new_page = false;
page = find_lock_page(mapping, idx);
if (!page) {
/* Check for page in userfault range */
@@ -4656,6 +4845,7 @@ retry:
goto retry;
goto out;
}
+ new_pagecache_page = true;
} else {
lock_page(page);
if (unlikely(anon_vma_prepare(vma))) {
@@ -4740,7 +4930,9 @@ backout:
spin_unlock(ptl);
backout_unlocked:
unlock_page(page);
- restore_reserve_on_error(h, vma, haddr, page);
+ /* restore reserve for newly allocated pages not in page cache */
+ if (new_page && !new_pagecache_page)
+ restore_reserve_on_error(h, vma, haddr, page);
put_page(page);
goto out;
}
@@ -4939,19 +5131,17 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
struct page **pagep)
{
bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
- struct address_space *mapping;
- pgoff_t idx;
+ struct hstate *h = hstate_vma(dst_vma);
+ struct address_space *mapping = dst_vma->vm_file->f_mapping;
+ pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
unsigned long size;
int vm_shared = dst_vma->vm_flags & VM_SHARED;
- struct hstate *h = hstate_vma(dst_vma);
pte_t _dst_pte;
spinlock_t *ptl;
- int ret;
+ int ret = -ENOMEM;
struct page *page;
int writable;
-
- mapping = dst_vma->vm_file->f_mapping;
- idx = vma_hugecache_offset(h, dst_vma, dst_addr);
+ bool new_pagecache_page = false;
if (is_continue) {
ret = -EFAULT;
@@ -4981,12 +5171,44 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
/* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) {
ret = -ENOENT;
+ /* Free the allocated page which may have
+ * consumed a reservation.
+ */
+ restore_reserve_on_error(h, dst_vma, dst_addr, page);
+ put_page(page);
+
+ /* Allocate a temporary page to hold the copied
+ * contents.
+ */
+ page = alloc_huge_page_vma(h, dst_vma, dst_addr);
+ if (!page) {
+ ret = -ENOMEM;
+ goto out;
+ }
*pagep = page;
- /* don't free the page */
+ /* Set the outparam pagep and return to the caller to
+ * copy the contents outside the lock. Don't free the
+ * page.
+ */
goto out;
}
} else {
- page = *pagep;
+ if (vm_shared &&
+ hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
+ put_page(*pagep);
+ ret = -EEXIST;
+ *pagep = NULL;
+ goto out;
+ }
+
+ page = alloc_huge_page(dst_vma, dst_addr, 0);
+ if (IS_ERR(page)) {
+ ret = -ENOMEM;
+ *pagep = NULL;
+ goto out;
+ }
+ copy_huge_page(page, *pagep);
+ put_page(*pagep);
*pagep = NULL;
}
@@ -5013,6 +5235,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
ret = huge_add_to_page_cache(page, mapping, idx);
if (ret)
goto out_release_nounlock;
+ new_pagecache_page = true;
}
ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
@@ -5076,7 +5299,8 @@ out_release_unlock:
if (vm_shared || is_continue)
unlock_page(page);
out_release_nounlock:
- restore_reserve_on_error(h, dst_vma, dst_addr, page);
+ if (!new_pagecache_page)
+ restore_reserve_on_error(h, dst_vma, dst_addr, page);
put_page(page);
goto out;
}
@@ -5225,8 +5449,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
continue;
}
- refs = min3(pages_per_huge_page(h) - pfn_offset,
- (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
+ /* vaddr may not be aligned to PAGE_SIZE */
+ refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
+ (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
if (pages || vmas)
record_subpages_vmas(mem_map_offset(page, pfn_offset),
@@ -5318,10 +5543,11 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
if (unlikely(is_hugetlb_entry_migration(pte))) {
swp_entry_t entry = pte_to_swp_entry(pte);
- if (is_write_migration_entry(entry)) {
+ if (is_writable_migration_entry(entry)) {
pte_t newpte;
- make_migration_entry_read(&entry);
+ entry = make_readable_migration_entry(
+ swp_offset(entry));
newpte = swp_entry_to_pte(entry);
set_huge_swap_pte_at(mm, address, ptep,
newpte, huge_page_size(h));
@@ -5332,10 +5558,11 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
}
if (!huge_pte_none(pte)) {
pte_t old_pte;
+ unsigned int shift = huge_page_shift(hstate_vma(vma));
old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
- pte = arch_make_huge_pte(pte, vma, NULL, 0);
+ pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
pages++;
}
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
new file mode 100644
index 000000000000..c540c21e26f5
--- /dev/null
+++ b/mm/hugetlb_vmemmap.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Free some vmemmap pages of HugeTLB
+ *
+ * Copyright (c) 2020, Bytedance. All rights reserved.
+ *
+ * Author: Muchun Song <songmuchun@bytedance.com>
+ *
+ * The struct page structures (page structs) are used to describe a physical
+ * page frame. By default, there is a one-to-one mapping from a page frame to
+ * it's corresponding page struct.
+ *
+ * HugeTLB pages consist of multiple base page size pages and is supported by
+ * many architectures. See hugetlbpage.rst in the Documentation directory for
+ * more details. On the x86-64 architecture, HugeTLB pages of size 2MB and 1GB
+ * are currently supported. Since the base page size on x86 is 4KB, a 2MB
+ * HugeTLB page consists of 512 base pages and a 1GB HugeTLB page consists of
+ * 4096 base pages. For each base page, there is a corresponding page struct.
+ *
+ * Within the HugeTLB subsystem, only the first 4 page structs are used to
+ * contain unique information about a HugeTLB page. __NR_USED_SUBPAGE provides
+ * this upper limit. The only 'useful' information in the remaining page structs
+ * is the compound_head field, and this field is the same for all tail pages.
+ *
+ * By removing redundant page structs for HugeTLB pages, memory can be returned
+ * to the buddy allocator for other uses.
+ *
+ * Different architectures support different HugeTLB pages. For example, the
+ * following table is the HugeTLB page size supported by x86 and arm64
+ * architectures. Because arm64 supports 4k, 16k, and 64k base pages and
+ * supports contiguous entries, so it supports many kinds of sizes of HugeTLB
+ * page.
+ *
+ * +--------------+-----------+-----------------------------------------------+
+ * | Architecture | Page Size | HugeTLB Page Size |
+ * +--------------+-----------+-----------+-----------+-----------+-----------+
+ * | x86-64 | 4KB | 2MB | 1GB | | |
+ * +--------------+-----------+-----------+-----------+-----------+-----------+
+ * | | 4KB | 64KB | 2MB | 32MB | 1GB |
+ * | +-----------+-----------+-----------+-----------+-----------+
+ * | arm64 | 16KB | 2MB | 32MB | 1GB | |
+ * | +-----------+-----------+-----------+-----------+-----------+
+ * | | 64KB | 2MB | 512MB | 16GB | |
+ * +--------------+-----------+-----------+-----------+-----------+-----------+
+ *
+ * When the system boot up, every HugeTLB page has more than one struct page
+ * structs which size is (unit: pages):
+ *
+ * struct_size = HugeTLB_Size / PAGE_SIZE * sizeof(struct page) / PAGE_SIZE
+ *
+ * Where HugeTLB_Size is the size of the HugeTLB page. We know that the size
+ * of the HugeTLB page is always n times PAGE_SIZE. So we can get the following
+ * relationship.
+ *
+ * HugeTLB_Size = n * PAGE_SIZE
+ *
+ * Then,
+ *
+ * struct_size = n * PAGE_SIZE / PAGE_SIZE * sizeof(struct page) / PAGE_SIZE
+ * = n * sizeof(struct page) / PAGE_SIZE
+ *
+ * We can use huge mapping at the pud/pmd level for the HugeTLB page.
+ *
+ * For the HugeTLB page of the pmd level mapping, then
+ *
+ * struct_size = n * sizeof(struct page) / PAGE_SIZE
+ * = PAGE_SIZE / sizeof(pte_t) * sizeof(struct page) / PAGE_SIZE
+ * = sizeof(struct page) / sizeof(pte_t)
+ * = 64 / 8
+ * = 8 (pages)
+ *
+ * Where n is how many pte entries which one page can contains. So the value of
+ * n is (PAGE_SIZE / sizeof(pte_t)).
+ *
+ * This optimization only supports 64-bit system, so the value of sizeof(pte_t)
+ * is 8. And this optimization also applicable only when the size of struct page
+ * is a power of two. In most cases, the size of struct page is 64 bytes (e.g.
+ * x86-64 and arm64). So if we use pmd level mapping for a HugeTLB page, the
+ * size of struct page structs of it is 8 page frames which size depends on the
+ * size of the base page.
+ *
+ * For the HugeTLB page of the pud level mapping, then
+ *
+ * struct_size = PAGE_SIZE / sizeof(pmd_t) * struct_size(pmd)
+ * = PAGE_SIZE / 8 * 8 (pages)
+ * = PAGE_SIZE (pages)
+ *
+ * Where the struct_size(pmd) is the size of the struct page structs of a
+ * HugeTLB page of the pmd level mapping.
+ *
+ * E.g.: A 2MB HugeTLB page on x86_64 consists in 8 page frames while 1GB
+ * HugeTLB page consists in 4096.
+ *
+ * Next, we take the pmd level mapping of the HugeTLB page as an example to
+ * show the internal implementation of this optimization. There are 8 pages
+ * struct page structs associated with a HugeTLB page which is pmd mapped.
+ *
+ * Here is how things look before optimization.
+ *
+ * HugeTLB struct pages(8 pages) page frame(8 pages)
+ * +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+
+ * | | | 0 | -------------> | 0 |
+ * | | +-----------+ +-----------+
+ * | | | 1 | -------------> | 1 |
+ * | | +-----------+ +-----------+
+ * | | | 2 | -------------> | 2 |
+ * | | +-----------+ +-----------+
+ * | | | 3 | -------------> | 3 |
+ * | | +-----------+ +-----------+
+ * | | | 4 | -------------> | 4 |
+ * | PMD | +-----------+ +-----------+
+ * | level | | 5 | -------------> | 5 |
+ * | mapping | +-----------+ +-----------+
+ * | | | 6 | -------------> | 6 |
+ * | | +-----------+ +-----------+
+ * | | | 7 | -------------> | 7 |
+ * | | +-----------+ +-----------+
+ * | |
+ * | |
+ * | |
+ * +-----------+
+ *
+ * The value of page->compound_head is the same for all tail pages. The first
+ * page of page structs (page 0) associated with the HugeTLB page contains the 4
+ * page structs necessary to describe the HugeTLB. The only use of the remaining
+ * pages of page structs (page 1 to page 7) is to point to page->compound_head.
+ * Therefore, we can remap pages 2 to 7 to page 1. Only 2 pages of page structs
+ * will be used for each HugeTLB page. This will allow us to free the remaining
+ * 6 pages to the buddy allocator.
+ *
+ * Here is how things look after remapping.
+ *
+ * HugeTLB struct pages(8 pages) page frame(8 pages)
+ * +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+
+ * | | | 0 | -------------> | 0 |
+ * | | +-----------+ +-----------+
+ * | | | 1 | -------------> | 1 |
+ * | | +-----------+ +-----------+
+ * | | | 2 | ----------------^ ^ ^ ^ ^ ^
+ * | | +-----------+ | | | | |
+ * | | | 3 | ------------------+ | | | |
+ * | | +-----------+ | | | |
+ * | | | 4 | --------------------+ | | |
+ * | PMD | +-----------+ | | |
+ * | level | | 5 | ----------------------+ | |
+ * | mapping | +-----------+ | |
+ * | | | 6 | ------------------------+ |
+ * | | +-----------+ |
+ * | | | 7 | --------------------------+
+ * | | +-----------+
+ * | |
+ * | |
+ * | |
+ * +-----------+
+ *
+ * When a HugeTLB is freed to the buddy system, we should allocate 6 pages for
+ * vmemmap pages and restore the previous mapping relationship.
+ *
+ * For the HugeTLB page of the pud level mapping. It is similar to the former.
+ * We also can use this approach to free (PAGE_SIZE - 2) vmemmap pages.
+ *
+ * Apart from the HugeTLB page of the pmd/pud level mapping, some architectures
+ * (e.g. aarch64) provides a contiguous bit in the translation table entries
+ * that hints to the MMU to indicate that it is one of a contiguous set of
+ * entries that can be cached in a single TLB entry.
+ *
+ * The contiguous bit is used to increase the mapping size at the pmd and pte
+ * (last) level. So this type of HugeTLB page can be optimized only when its
+ * size of the struct page structs is greater than 2 pages.
+ */
+#define pr_fmt(fmt) "HugeTLB: " fmt
+
+#include "hugetlb_vmemmap.h"
+
+/*
+ * There are a lot of struct page structures associated with each HugeTLB page.
+ * For tail pages, the value of compound_head is the same. So we can reuse first
+ * page of tail page structures. We map the virtual addresses of the remaining
+ * pages of tail page structures to the first tail page struct, and then free
+ * these page frames. Therefore, we need to reserve two pages as vmemmap areas.
+ */
+#define RESERVE_VMEMMAP_NR 2U
+#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
+
+bool hugetlb_free_vmemmap_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON);
+
+static int __init early_hugetlb_free_vmemmap_param(char *buf)
+{
+ /* We cannot optimize if a "struct page" crosses page boundaries. */
+ if ((!is_power_of_2(sizeof(struct page)))) {
+ pr_warn("cannot free vmemmap pages because \"struct page\" crosses page boundaries\n");
+ return 0;
+ }
+
+ if (!buf)
+ return -EINVAL;
+
+ if (!strcmp(buf, "on"))
+ hugetlb_free_vmemmap_enabled = true;
+ else if (!strcmp(buf, "off"))
+ hugetlb_free_vmemmap_enabled = false;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+early_param("hugetlb_free_vmemmap", early_hugetlb_free_vmemmap_param);
+
+static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h)
+{
+ return (unsigned long)free_vmemmap_pages_per_hpage(h) << PAGE_SHIFT;
+}
+
+/*
+ * Previously discarded vmemmap pages will be allocated and remapping
+ * after this function returns zero.
+ */
+int alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+ int ret;
+ unsigned long vmemmap_addr = (unsigned long)head;
+ unsigned long vmemmap_end, vmemmap_reuse;
+
+ if (!HPageVmemmapOptimized(head))
+ return 0;
+
+ vmemmap_addr += RESERVE_VMEMMAP_SIZE;
+ vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h);
+ vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
+ /*
+ * The pages which the vmemmap virtual address range [@vmemmap_addr,
+ * @vmemmap_end) are mapped to are freed to the buddy allocator, and
+ * the range is mapped to the page which @vmemmap_reuse is mapped to.
+ * When a HugeTLB page is freed to the buddy allocator, previously
+ * discarded vmemmap pages must be allocated and remapping.
+ */
+ ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse,
+ GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
+
+ if (!ret)
+ ClearHPageVmemmapOptimized(head);
+
+ return ret;
+}
+
+void free_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+ unsigned long vmemmap_addr = (unsigned long)head;
+ unsigned long vmemmap_end, vmemmap_reuse;
+
+ if (!free_vmemmap_pages_per_hpage(h))
+ return;
+
+ vmemmap_addr += RESERVE_VMEMMAP_SIZE;
+ vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h);
+ vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
+
+ /*
+ * Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end)
+ * to the page which @vmemmap_reuse is mapped to, then free the pages
+ * which the range [@vmemmap_addr, @vmemmap_end] is mapped to.
+ */
+ if (!vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse))
+ SetHPageVmemmapOptimized(head);
+}
+
+void __init hugetlb_vmemmap_init(struct hstate *h)
+{
+ unsigned int nr_pages = pages_per_huge_page(h);
+ unsigned int vmemmap_pages;
+
+ /*
+ * There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
+ * page structs that can be used when CONFIG_HUGETLB_PAGE_FREE_VMEMMAP,
+ * so add a BUILD_BUG_ON to catch invalid usage of the tail struct page.
+ */
+ BUILD_BUG_ON(__NR_USED_SUBPAGE >=
+ RESERVE_VMEMMAP_SIZE / sizeof(struct page));
+
+ if (!hugetlb_free_vmemmap_enabled)
+ return;
+
+ vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
+ /*
+ * The head page and the first tail page are not to be freed to buddy
+ * allocator, the other pages will map to the first tail page, so they
+ * can be freed.
+ *
+ * Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true
+ * on some architectures (e.g. aarch64). See Documentation/arm64/
+ * hugetlbpage.rst for more details.
+ */
+ if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
+ h->nr_free_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
+
+ pr_info("can free %d vmemmap pages for %s\n", h->nr_free_vmemmap_pages,
+ h->name);
+}
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
new file mode 100644
index 000000000000..cb2bef8f9e73
--- /dev/null
+++ b/mm/hugetlb_vmemmap.h
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Free some vmemmap pages of HugeTLB
+ *
+ * Copyright (c) 2020, Bytedance. All rights reserved.
+ *
+ * Author: Muchun Song <songmuchun@bytedance.com>
+ */
+#ifndef _LINUX_HUGETLB_VMEMMAP_H
+#define _LINUX_HUGETLB_VMEMMAP_H
+#include <linux/hugetlb.h>
+
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+int alloc_huge_page_vmemmap(struct hstate *h, struct page *head);
+void free_huge_page_vmemmap(struct hstate *h, struct page *head);
+void hugetlb_vmemmap_init(struct hstate *h);
+
+/*
+ * How many vmemmap pages associated with a HugeTLB page that can be freed
+ * to the buddy allocator.
+ */
+static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
+{
+ return h->nr_free_vmemmap_pages;
+}
+#else
+static inline int alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+ return 0;
+}
+
+static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+}
+
+static inline void hugetlb_vmemmap_init(struct hstate *h)
+{
+}
+
+static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
+{
+ return 0;
+}
+#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
+#endif /* _LINUX_HUGETLB_VMEMMAP_H */
diff --git a/mm/init-mm.c b/mm/init-mm.c
index 153162669f80..b4a6f38fb51d 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -40,3 +40,12 @@ struct mm_struct init_mm = {
.cpu_bitmap = CPU_BITS_NONE,
INIT_MM_CONTEXT(init_mm)
};
+
+void setup_initial_init_mm(void *start_code, void *end_code,
+ void *end_data, void *brk)
+{
+ init_mm.start_code = (unsigned long)start_code;
+ init_mm.end_code = (unsigned long)end_code;
+ init_mm.end_data = (unsigned long)end_data;
+ init_mm.brk = (unsigned long)brk;
+}
diff --git a/mm/internal.h b/mm/internal.h
index 6ec2cea9926b..31ff935b2547 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -274,11 +274,10 @@ isolate_freepages_range(struct compact_control *cc,
int
isolate_migratepages_range(struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn);
+#endif
int find_suitable_fallback(struct free_area *area, unsigned int order,
int migratetype, bool only_stealable, bool *can_steal);
-#endif
-
/*
* This function returns the order of a free page in the buddy system. In
* general, page_zone(page)->lock must be held by the caller to prevent the
@@ -344,7 +343,10 @@ void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
#ifdef CONFIG_MMU
extern long populate_vma_page_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end, int *nonblocking);
+ unsigned long start, unsigned long end, int *locked);
+extern long faultin_vma_page_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ bool write, int *locked);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
@@ -358,6 +360,9 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
extern void mlock_vma_page(struct page *page);
extern unsigned int munlock_vma_page(struct page *page);
+extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
+ unsigned long len);
+
/*
* Clear the page's PageMlocked(). This can be useful in a situation where
* we want to unconditionally remove a page from the pagecache -- e.g.,
@@ -369,23 +374,6 @@ extern unsigned int munlock_vma_page(struct page *page);
*/
extern void clear_page_mlock(struct page *page);
-/*
- * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
- * (because that does not go through the full procedure of migration ptes):
- * to migrate the Mlocked page flag; update statistics.
- */
-static inline void mlock_migrate_page(struct page *newpage, struct page *page)
-{
- if (TestClearPageMlocked(page)) {
- int nr_pages = thp_nr_pages(page);
-
- /* Holding pmd lock, no change in irq context: __mod is safe */
- __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
- SetPageMlocked(newpage);
- __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
- }
-}
-
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
/*
@@ -461,7 +449,6 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
#else /* !CONFIG_MMU */
static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
-static inline void mlock_migrate_page(struct page *new, struct page *old) { }
static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
{
}
@@ -672,4 +659,7 @@ int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
void vunmap_range_noflush(unsigned long start, unsigned long end);
+int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
+ unsigned long addr, int page_nid, int *flags);
+
#endif /* __MM_INTERNAL_H */
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 98e3059bfea4..d739cdd1621a 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -9,6 +9,7 @@
#ifdef CONFIG_KASAN_HW_TAGS
#include <linux/static_key.h>
+#include "../slab.h"
DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
extern bool kasan_flag_async __ro_after_init;
@@ -387,6 +388,17 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init)
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
+ /*
+ * Explicitly initialize the memory with the precise object size to
+ * avoid overwriting the SLAB redzone. This disables initialization in
+ * the arch code and may thus lead to performance penalty. The penalty
+ * is accepted since SLAB redzones aren't enabled in production builds.
+ */
+ if (__slub_debug_enabled() &&
+ init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
+ init = false;
+ memzero_explicit((void *)addr, size);
+ }
size = round_up(size, KASAN_GRANULE_SIZE);
hw_set_mem_tag_range((void *)addr, size, tag, init);
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 4d21ac44d5d3..575c685aa642 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -636,7 +636,7 @@ static void toggle_allocation_gate(struct work_struct *work)
/* Disable static key and reset timer. */
static_branch_disable(&kfence_allocation_key);
#endif
- queue_delayed_work(system_power_efficient_wq, &kfence_timer,
+ queue_delayed_work(system_unbound_wq, &kfence_timer,
msecs_to_jiffies(kfence_sample_interval));
}
static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
@@ -666,7 +666,7 @@ void __init kfence_init(void)
}
WRITE_ONCE(kfence_enabled, true);
- queue_delayed_work(system_power_efficient_wq, &kfence_timer, 0);
+ queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
(void *)(__kfence_pool + KFENCE_POOL_SIZE));
@@ -734,6 +734,22 @@ void kfence_shutdown_cache(struct kmem_cache *s)
void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
{
/*
+ * Perform size check before switching kfence_allocation_gate, so that
+ * we don't disable KFENCE without making an allocation.
+ */
+ if (size > PAGE_SIZE)
+ return NULL;
+
+ /*
+ * Skip allocations from non-default zones, including DMA. We cannot
+ * guarantee that pages in the KFENCE pool will have the requested
+ * properties (e.g. reside in DMAable memory).
+ */
+ if ((flags & GFP_ZONEMASK) ||
+ (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
+ return NULL;
+
+ /*
* allocation_gate only needs to become non-zero, so it doesn't make
* sense to continue writing to it and pay the associated contention
* cost, in case we have a large number of concurrent allocations.
@@ -757,9 +773,6 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
if (!READ_ONCE(kfence_enabled))
return NULL;
- if (size > PAGE_SIZE)
- return NULL;
-
return kfence_guarded_alloc(s, size, flags);
}
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index 7f24b9bcb2ec..942cbc16ad26 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -852,7 +852,7 @@ static void kfence_test_exit(void)
tracepoint_synchronize_unregister();
}
-late_initcall(kfence_test_init);
+late_initcall_sync(kfence_test_init);
module_exit(kfence_test_exit);
MODULE_LICENSE("GPL v2");
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 6c0185fdd815..b0412be08fa2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -442,9 +442,7 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
static bool hugepage_vma_check(struct vm_area_struct *vma,
unsigned long vm_flags)
{
- /* Explicitly disabled through madvise. */
- if ((vm_flags & VM_NOHUGEPAGE) ||
- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+ if (!transhuge_vma_enabled(vma, vm_flags))
return false;
/* Enabled via shmem mount options or sysfs settings. */
@@ -459,7 +457,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
/* Read-only file mappings need to be aligned for THP to work. */
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
- (vm_flags & VM_DENYWRITE)) {
+ !inode_is_open_for_write(vma->vm_file->f_inode) &&
+ (vm_flags & VM_EXEC)) {
return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
HPAGE_PMD_NR);
}
@@ -1864,6 +1863,19 @@ out_unlock:
else {
__mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
filemap_nr_thps_inc(mapping);
+ /*
+ * Paired with smp_mb() in do_dentry_open() to ensure
+ * i_writecount is up to date and the update to nr_thps is
+ * visible. Ensures the page cache will be truncated if the
+ * file is opened writable.
+ */
+ smp_mb();
+ if (inode_is_open_for_write(mapping->host)) {
+ result = SCAN_FAIL;
+ __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
+ filemap_nr_thps_dec(mapping);
+ goto xa_locked;
+ }
}
if (nr_none) {
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 228a2fbe0657..73d46d16d575 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -290,7 +290,7 @@ static void hex_dump_object(struct seq_file *seq,
warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
kasan_disable_current();
warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
- HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
+ HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
kasan_enable_current();
}
@@ -1171,7 +1171,7 @@ static bool update_checksum(struct kmemleak_object *object)
kasan_disable_current();
kcsan_disable_current();
- object->checksum = crc32(0, (void *)object->pointer, object->size);
+ object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
kasan_enable_current();
kcsan_enable_current();
@@ -1246,7 +1246,7 @@ static void scan_block(void *_start, void *_end,
break;
kasan_disable_current();
- pointer = *ptr;
+ pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
kasan_enable_current();
untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
diff --git a/mm/madvise.c b/mm/madvise.c
index 63e489e5bfdb..5c065bc8b5f6 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -53,6 +53,8 @@ static int madvise_need_mmap_write(int behavior)
case MADV_COLD:
case MADV_PAGEOUT:
case MADV_FREE:
+ case MADV_POPULATE_READ:
+ case MADV_POPULATE_WRITE:
return 0;
default:
/* be safe, default to 1. list exceptions explicitly */
@@ -822,6 +824,63 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
return -EINVAL;
}
+static long madvise_populate(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end,
+ int behavior)
+{
+ const bool write = behavior == MADV_POPULATE_WRITE;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long tmp_end;
+ int locked = 1;
+ long pages;
+
+ *prev = vma;
+
+ while (start < end) {
+ /*
+ * We might have temporarily dropped the lock. For example,
+ * our VMA might have been split.
+ */
+ if (!vma || start >= vma->vm_end) {
+ vma = find_vma(mm, start);
+ if (!vma || start < vma->vm_start)
+ return -ENOMEM;
+ }
+
+ tmp_end = min_t(unsigned long, end, vma->vm_end);
+ /* Populate (prefault) page tables readable/writable. */
+ pages = faultin_vma_page_range(vma, start, tmp_end, write,
+ &locked);
+ if (!locked) {
+ mmap_read_lock(mm);
+ locked = 1;
+ *prev = NULL;
+ vma = NULL;
+ }
+ if (pages < 0) {
+ switch (pages) {
+ case -EINTR:
+ return -EINTR;
+ case -EINVAL: /* Incompatible mappings / permissions. */
+ return -EINVAL;
+ case -EHWPOISON:
+ return -EHWPOISON;
+ case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
+ return -EFAULT;
+ default:
+ pr_warn_once("%s: unhandled return value: %ld\n",
+ __func__, pages);
+ fallthrough;
+ case -ENOMEM:
+ return -ENOMEM;
+ }
+ }
+ start += pages * PAGE_SIZE;
+ }
+ return 0;
+}
+
/*
* Application wants to free up the pages and associated backing store.
* This is effectively punching a hole into the middle of a file.
@@ -935,6 +994,9 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
case MADV_FREE:
case MADV_DONTNEED:
return madvise_dontneed_free(vma, prev, start, end, behavior);
+ case MADV_POPULATE_READ:
+ case MADV_POPULATE_WRITE:
+ return madvise_populate(vma, prev, start, end, behavior);
default:
return madvise_behavior(vma, prev, start, end, behavior);
}
@@ -955,6 +1017,8 @@ madvise_behavior_valid(int behavior)
case MADV_FREE:
case MADV_COLD:
case MADV_PAGEOUT:
+ case MADV_POPULATE_READ:
+ case MADV_POPULATE_WRITE:
#ifdef CONFIG_KSM
case MADV_MERGEABLE:
case MADV_UNMERGEABLE:
@@ -1042,6 +1106,10 @@ process_madvise_behavior_valid(int behavior)
* easily if memory pressure happens.
* MADV_PAGEOUT - the application is not expected to use this memory soon,
* page out the pages in this range immediately.
+ * MADV_POPULATE_READ - populate (prefault) page tables readable by
+ * triggering read faults if required
+ * MADV_POPULATE_WRITE - populate (prefault) page tables writable by
+ * triggering write faults if required
*
* return values:
* zero - success
diff --git a/mm/mapping_dirty_helpers.c b/mm/mapping_dirty_helpers.c
index b890854ec761..ea734f248fce 100644
--- a/mm/mapping_dirty_helpers.c
+++ b/mm/mapping_dirty_helpers.c
@@ -317,7 +317,7 @@ EXPORT_SYMBOL_GPL(wp_shared_mapping_range);
* pfn_mkwrite(). And then after a TLB flush following the write-protection
* pick up all dirty bits.
*
- * Note: This function currently skips transhuge page-table entries, since
+ * This function currently skips transhuge page-table entries, since
* it's intended for dirty-tracking on the PTE level. It will warn on
* encountering transhuge dirty entries, though, and can easily be extended
* to handle them as well.
diff --git a/mm/memblock.c b/mm/memblock.c
index 123feef5259d..de7b553baa50 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -182,6 +182,8 @@ bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
{
unsigned long i;
+ memblock_cap_size(base, &size);
+
for (i = 0; i < type->cnt; i++)
if (memblock_addrs_overlap(base, size, type->regions[i].base,
type->regions[i].size))
@@ -906,6 +908,11 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
* @base: the base phys addr of the region
* @size: the size of the region
*
+ * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
+ * direct mapping of the physical memory. These regions will still be
+ * covered by the memory map. The struct page representing NOMAP memory
+ * frames in the memory map will be PageReserved()
+ *
* Return: 0 on success, -errno on failure.
*/
int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
@@ -940,7 +947,8 @@ static bool should_skip_region(struct memblock_type *type,
return true;
/* skip hotpluggable memory regions if needed */
- if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
+ if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
+ !(flags & MEMBLOCK_HOTPLUG))
return true;
/* if we want mirror memory skip non-mirror memory regions */
@@ -1794,7 +1802,6 @@ bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t siz
*/
bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
{
- memblock_cap_size(base, &size);
return memblock_overlaps_region(&memblock.reserved, base, size);
}
@@ -1941,14 +1948,13 @@ static void __init free_unused_memmap(void)
* due to SPARSEMEM sections which aren't present.
*/
start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
-#else
+#endif
/*
- * Align down here since the VM subsystem insists that the
- * memmap entries are valid from the bank start aligned to
- * MAX_ORDER_NR_PAGES.
+ * Align down here since many operations in VM subsystem
+ * presume that there are no holes in the memory map inside
+ * a pageblock
*/
- start = round_down(start, MAX_ORDER_NR_PAGES);
-#endif
+ start = round_down(start, pageblock_nr_pages);
/*
* If we had a previous bank, and there is a space
@@ -1958,16 +1964,18 @@ static void __init free_unused_memmap(void)
free_memmap(prev_end, start);
/*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
+ * Align up here since many operations in VM subsystem
+ * presume that there are no holes in the memory map inside
+ * a pageblock
*/
- prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
+ prev_end = ALIGN(end, pageblock_nr_pages);
}
#ifdef CONFIG_SPARSEMEM
- if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
+ if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
+ prev_end = ALIGN(end, pageblock_nr_pages);
free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
+ }
#endif
}
@@ -2002,6 +2010,26 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
return end_pfn - start_pfn;
}
+static void __init memmap_init_reserved_pages(void)
+{
+ struct memblock_region *region;
+ phys_addr_t start, end;
+ u64 i;
+
+ /* initialize struct pages for the reserved regions */
+ for_each_reserved_mem_range(i, &start, &end)
+ reserve_bootmem_region(start, end);
+
+ /* and also treat struct pages for the NOMAP regions as PageReserved */
+ for_each_mem_region(region) {
+ if (memblock_is_nomap(region)) {
+ start = region->base;
+ end = start + region->size;
+ reserve_bootmem_region(start, end);
+ }
+ }
+}
+
static unsigned long __init free_low_memory_core_early(void)
{
unsigned long count = 0;
@@ -2010,8 +2038,7 @@ static unsigned long __init free_low_memory_core_early(void)
memblock_clear_hotplug(0, -1);
- for_each_reserved_mem_range(i, &start, &end)
- reserve_bootmem_region(start, end);
+ memmap_init_reserved_pages();
/*
* We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b80aae448a49..702a81dfe72d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3106,13 +3106,15 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
stock->cached_pgdat = pgdat;
} else if (stock->cached_pgdat != pgdat) {
/* Flush the existing cached vmstat data */
+ struct pglist_data *oldpg = stock->cached_pgdat;
+
if (stock->nr_slab_reclaimable_b) {
- mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B,
+ mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
stock->nr_slab_reclaimable_b);
stock->nr_slab_reclaimable_b = 0;
}
if (stock->nr_slab_unreclaimable_b) {
- mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B,
+ mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
stock->nr_slab_unreclaimable_b);
stock->nr_slab_unreclaimable_b = 0;
}
@@ -3574,7 +3576,8 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
unsigned long val;
if (mem_cgroup_is_root(memcg)) {
- cgroup_rstat_flush(memcg->css.cgroup);
+ /* mem_cgroup_threshold() calls here from irqsafe context */
+ cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
val = memcg_page_state(memcg, NR_FILE_PAGES) +
memcg_page_state(memcg, NR_ANON_MAPPED);
if (swap)
@@ -5537,7 +5540,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
* as special swap entry in the CPU page table.
*/
if (is_device_private_entry(ent)) {
- page = device_private_entry_to_page(ent);
+ page = pfn_swap_entry_to_page(ent);
/*
* MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
* a refcount of 1 when free (unlike normal page)
@@ -6644,7 +6647,7 @@ static unsigned long effective_protection(unsigned long usage,
}
/**
- * mem_cgroup_protected - check if memory consumption is in the normal range
+ * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
* @root: the top ancestor of the sub-tree being checked
* @memcg: the memory cgroup to check
*
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index e5a1531f7f4e..470400cc7513 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -66,6 +66,19 @@ int sysctl_memory_failure_recovery __read_mostly = 1;
atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
+static bool __page_handle_poison(struct page *page)
+{
+ bool ret;
+
+ zone_pcp_disable(page_zone(page));
+ ret = dissolve_free_huge_page(page);
+ if (!ret)
+ ret = take_page_off_buddy(page);
+ zone_pcp_enable(page_zone(page));
+
+ return ret;
+}
+
static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
{
if (hugepage_or_freepage) {
@@ -73,7 +86,7 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo
* Doing this check for free pages is also fine since dissolve_free_huge_page
* returns 0 for non-hugetlb pages as well.
*/
- if (dissolve_free_huge_page(page) || !take_page_off_buddy(page))
+ if (!__page_handle_poison(page))
/*
* We could fail to take off the target page from buddy
* for example due to racy page allocation, but that's
@@ -985,7 +998,7 @@ static int me_huge_page(struct page *p, unsigned long pfn)
*/
if (PageAnon(hpage))
put_page(hpage);
- if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) {
+ if (__page_handle_poison(p)) {
page_ref_inc(p);
res = MF_RECOVERED;
}
@@ -1133,7 +1146,7 @@ static int __get_hwpoison_page(struct page *page)
* unexpected races caused by taking a page refcount.
*/
if (!HWPoisonHandlable(head))
- return 0;
+ return -EBUSY;
if (PageTransHuge(head)) {
/*
@@ -1186,9 +1199,15 @@ try_again:
}
goto out;
} else if (ret == -EBUSY) {
- /* We raced with freeing huge page to buddy, retry. */
- if (pass++ < 3)
+ /*
+ * We raced with (possibly temporary) unhandlable
+ * page, retry.
+ */
+ if (pass++ < 3) {
+ shake_page(p, 1);
goto try_again;
+ }
+ ret = -EIO;
goto out;
}
}
@@ -1253,10 +1272,10 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
int flags, struct page **hpagep)
{
- enum ttu_flags ttu = TTU_IGNORE_MLOCK;
+ enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
struct address_space *mapping;
LIST_HEAD(tokill);
- bool unmap_success = true;
+ bool unmap_success;
int kill = 1, forcekill;
struct page *hpage = *hpagep;
bool mlocked = PageMlocked(hpage);
@@ -1319,7 +1338,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
if (!PageHuge(hpage)) {
- unmap_success = try_to_unmap(hpage, ttu);
+ try_to_unmap(hpage, ttu);
} else {
if (!PageAnon(hpage)) {
/*
@@ -1327,21 +1346,20 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* could potentially call huge_pmd_unshare. Because of
* this, take semaphore in write mode here and set
* TTU_RMAP_LOCKED to indicate we have taken the lock
- * at this higer level.
+ * at this higher level.
*/
mapping = hugetlb_page_mapping_lock_write(hpage);
if (mapping) {
- unmap_success = try_to_unmap(hpage,
- ttu|TTU_RMAP_LOCKED);
+ try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
i_mmap_unlock_write(mapping);
- } else {
+ } else
pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
- unmap_success = false;
- }
} else {
- unmap_success = try_to_unmap(hpage, ttu);
+ try_to_unmap(hpage, ttu);
}
}
+
+ unmap_success = !page_mapped(hpage);
if (!unmap_success)
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage));
@@ -1446,7 +1464,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
}
unlock_page(head);
res = MF_FAILED;
- if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) {
+ if (__page_handle_poison(p)) {
page_ref_inc(p);
res = MF_RECOVERED;
}
diff --git a/mm/memory.c b/mm/memory.c
index 48c4576df898..25fc46e87214 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -699,6 +699,68 @@ out:
}
#endif
+static void restore_exclusive_pte(struct vm_area_struct *vma,
+ struct page *page, unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte;
+ swp_entry_t entry;
+
+ pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
+ if (pte_swp_soft_dirty(*ptep))
+ pte = pte_mksoft_dirty(pte);
+
+ entry = pte_to_swp_entry(*ptep);
+ if (pte_swp_uffd_wp(*ptep))
+ pte = pte_mkuffd_wp(pte);
+ else if (is_writable_device_exclusive_entry(entry))
+ pte = maybe_mkwrite(pte_mkdirty(pte), vma);
+
+ set_pte_at(vma->vm_mm, address, ptep, pte);
+
+ /*
+ * No need to take a page reference as one was already
+ * created when the swap entry was made.
+ */
+ if (PageAnon(page))
+ page_add_anon_rmap(page, vma, address, false);
+ else
+ /*
+ * Currently device exclusive access only supports anonymous
+ * memory so the entry shouldn't point to a filebacked page.
+ */
+ WARN_ON_ONCE(!PageAnon(page));
+
+ if (vma->vm_flags & VM_LOCKED)
+ mlock_vma_page(page);
+
+ /*
+ * No need to invalidate - it was non-present before. However
+ * secondary CPUs may have mappings that need invalidating.
+ */
+ update_mmu_cache(vma, address, ptep);
+}
+
+/*
+ * Tries to restore an exclusive pte if the page lock can be acquired without
+ * sleeping.
+ */
+static int
+try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ swp_entry_t entry = pte_to_swp_entry(*src_pte);
+ struct page *page = pfn_swap_entry_to_page(entry);
+
+ if (trylock_page(page)) {
+ restore_exclusive_pte(vma, page, addr, src_pte);
+ unlock_page(page);
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
@@ -707,17 +769,17 @@ out:
static unsigned long
copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
- unsigned long addr, int *rss)
+ pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma, unsigned long addr, int *rss)
{
- unsigned long vm_flags = vma->vm_flags;
+ unsigned long vm_flags = dst_vma->vm_flags;
pte_t pte = *src_pte;
struct page *page;
swp_entry_t entry = pte_to_swp_entry(pte);
if (likely(!non_swap_entry(entry))) {
if (swap_duplicate(entry) < 0)
- return entry.val;
+ return -EIO;
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
@@ -729,17 +791,18 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
}
rss[MM_SWAPENTS]++;
} else if (is_migration_entry(entry)) {
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
rss[mm_counter(page)]++;
- if (is_write_migration_entry(entry) &&
+ if (is_writable_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
/*
* COW mappings require pages in both
* parent and child to be set to read.
*/
- make_migration_entry_read(&entry);
+ entry = make_readable_migration_entry(
+ swp_offset(entry));
pte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(*src_pte))
pte = pte_swp_mksoft_dirty(pte);
@@ -748,7 +811,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
set_pte_at(src_mm, addr, src_pte, pte);
}
} else if (is_device_private_entry(entry)) {
- page = device_private_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
/*
* Update rss count even for unaddressable pages, as
@@ -770,15 +833,29 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* when a device driver is involved (you cannot easily
* save and restore device driver state).
*/
- if (is_write_device_private_entry(entry) &&
+ if (is_writable_device_private_entry(entry) &&
is_cow_mapping(vm_flags)) {
- make_device_private_entry_read(&entry);
+ entry = make_readable_device_private_entry(
+ swp_offset(entry));
pte = swp_entry_to_pte(entry);
if (pte_swp_uffd_wp(*src_pte))
pte = pte_swp_mkuffd_wp(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
+ } else if (is_device_exclusive_entry(entry)) {
+ /*
+ * Make device exclusive entries present by restoring the
+ * original entry then copying as for a present pte. Device
+ * exclusive entries currently only support private writable
+ * (ie. COW) mappings.
+ */
+ VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
+ if (try_restore_exclusive_pte(src_pte, src_vma, addr))
+ return -EBUSY;
+ return -ENOENT;
}
+ if (!userfaultfd_wp(dst_vma))
+ pte = pte_swp_clear_uffd_wp(pte);
set_pte_at(dst_mm, addr, dst_pte, pte);
return 0;
}
@@ -844,6 +921,9 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
/* All done, just insert the new page copy in the child */
pte = mk_pte(new_page, dst_vma->vm_page_prot);
pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
+ if (userfaultfd_pte_wp(dst_vma, *src_pte))
+ /* Uffd-wp needs to be delivered to dest pte as well */
+ pte = pte_wrprotect(pte_mkuffd_wp(pte));
set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
return 0;
}
@@ -893,12 +973,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
- /*
- * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
- * does not have the VM_UFFD_WP, which means that the uffd
- * fork event is not enabled.
- */
- if (!(vm_flags & VM_UFFD_WP))
+ if (!userfaultfd_wp(dst_vma))
pte = pte_clear_uffd_wp(pte);
set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
@@ -971,13 +1046,25 @@ again:
continue;
}
if (unlikely(!pte_present(*src_pte))) {
- entry.val = copy_nonpresent_pte(dst_mm, src_mm,
- dst_pte, src_pte,
- src_vma, addr, rss);
- if (entry.val)
+ ret = copy_nonpresent_pte(dst_mm, src_mm,
+ dst_pte, src_pte,
+ dst_vma, src_vma,
+ addr, rss);
+ if (ret == -EIO) {
+ entry = pte_to_swp_entry(*src_pte);
break;
- progress += 8;
- continue;
+ } else if (ret == -EBUSY) {
+ break;
+ } else if (!ret) {
+ progress += 8;
+ continue;
+ }
+
+ /*
+ * Device exclusive entry restored, continue by copying
+ * the now present pte.
+ */
+ WARN_ON_ONCE(ret != -ENOENT);
}
/* copy_present_pte() will clear `*prealloc' if consumed */
ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
@@ -1008,20 +1095,26 @@ again:
pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched();
- if (entry.val) {
+ if (ret == -EIO) {
+ VM_WARN_ON_ONCE(!entry.val);
if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
ret = -ENOMEM;
goto out;
}
entry.val = 0;
- } else if (ret) {
- WARN_ON_ONCE(ret != -EAGAIN);
+ } else if (ret == -EBUSY) {
+ goto out;
+ } else if (ret == -EAGAIN) {
prealloc = page_copy_prealloc(src_mm, src_vma, addr);
if (!prealloc)
return -ENOMEM;
- /* We've captured and resolved the error. Reset, try again. */
- ret = 0;
+ } else if (ret) {
+ VM_WARN_ON_ONCE(1);
}
+
+ /* We've captured and resolved the error. Reset, try again. */
+ ret = 0;
+
if (addr != end)
goto again;
out:
@@ -1050,8 +1143,8 @@ copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|| pmd_devmap(*src_pmd)) {
int err;
VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
- err = copy_huge_pmd(dst_mm, src_mm,
- dst_pmd, src_pmd, addr, src_vma);
+ err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
+ addr, dst_vma, src_vma);
if (err == -ENOMEM)
return -ENOMEM;
if (!err)
@@ -1278,8 +1371,9 @@ again:
}
entry = pte_to_swp_entry(ptent);
- if (is_device_private_entry(entry)) {
- struct page *page = device_private_entry_to_page(entry);
+ if (is_device_private_entry(entry) ||
+ is_device_exclusive_entry(entry)) {
+ struct page *page = pfn_swap_entry_to_page(entry);
if (unlikely(details && details->check_mapping)) {
/*
@@ -1294,7 +1388,10 @@ again:
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
rss[mm_counter(page)]--;
- page_remove_rmap(page, false);
+
+ if (is_device_private_entry(entry))
+ page_remove_rmap(page, false);
+
put_page(page);
continue;
}
@@ -1308,7 +1405,7 @@ again:
else if (is_migration_entry(entry)) {
struct page *page;
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
rss[mm_counter(page)]--;
}
if (unlikely(!free_swap_and_cache(entry)))
@@ -3343,6 +3440,34 @@ void unmap_mapping_range(struct address_space *mapping,
EXPORT_SYMBOL(unmap_mapping_range);
/*
+ * Restore a potential device exclusive pte to a working pte entry
+ */
+static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
+{
+ struct page *page = vmf->page;
+ struct vm_area_struct *vma = vmf->vma;
+ struct mmu_notifier_range range;
+
+ if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
+ return VM_FAULT_RETRY;
+ mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
+ vma->vm_mm, vmf->address & PAGE_MASK,
+ (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
+ mmu_notifier_invalidate_range_start(&range);
+
+ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
+ &vmf->ptl);
+ if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
+ restore_exclusive_pte(vma, page, vmf->address, vmf->pte);
+
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ unlock_page(page);
+
+ mmu_notifier_invalidate_range_end(&range);
+ return 0;
+}
+
+/*
* We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with pte unmapped and unlocked.
@@ -3370,8 +3495,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (is_migration_entry(entry)) {
migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf->address);
+ } else if (is_device_exclusive_entry(entry)) {
+ vmf->page = pfn_swap_entry_to_page(entry);
+ ret = remove_device_exclusive_entry(vmf);
} else if (is_device_private_entry(entry)) {
- vmf->page = device_private_entry_to_page(entry);
+ vmf->page = pfn_swap_entry_to_page(entry);
ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
@@ -3898,8 +4026,17 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
return ret;
}
- if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
+ if (vmf->prealloc_pte) {
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ if (likely(pmd_none(*vmf->pmd))) {
+ mm_inc_nr_ptes(vma->vm_mm);
+ pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
+ vmf->prealloc_pte = NULL;
+ }
+ spin_unlock(vmf->ptl);
+ } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
return VM_FAULT_OOM;
+ }
}
/* See comment in handle_pte_fault() */
@@ -4025,9 +4162,11 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
* something).
*/
if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
- ret = do_fault_around(vmf);
- if (ret)
- return ret;
+ if (likely(!userfaultfd_minor(vmf->vma))) {
+ ret = do_fault_around(vmf);
+ if (ret)
+ return ret;
+ }
}
ret = __do_fault(vmf);
@@ -4172,9 +4311,8 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
return ret;
}
-static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
- unsigned long addr, int page_nid,
- int *flags)
+int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
+ unsigned long addr, int page_nid, int *flags)
{
get_page(page);
@@ -4295,12 +4433,12 @@ static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
}
/* `inline' is required to avoid gcc 4.1.2 build error */
-static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
+static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
{
if (vma_is_anonymous(vmf->vma)) {
- if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd))
+ if (userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd))
return handle_userfault(vmf, VM_UFFD_WP);
- return do_huge_pmd_wp_page(vmf, orig_pmd);
+ return do_huge_pmd_wp_page(vmf);
}
if (vmf->vma->vm_ops->huge_fault) {
vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
@@ -4527,26 +4665,26 @@ retry_pud:
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
- pmd_t orig_pmd = *vmf.pmd;
+ vmf.orig_pmd = *vmf.pmd;
barrier();
- if (unlikely(is_swap_pmd(orig_pmd))) {
+ if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
VM_BUG_ON(thp_migration_supported() &&
- !is_pmd_migration_entry(orig_pmd));
- if (is_pmd_migration_entry(orig_pmd))
+ !is_pmd_migration_entry(vmf.orig_pmd));
+ if (is_pmd_migration_entry(vmf.orig_pmd))
pmd_migration_entry_wait(mm, vmf.pmd);
return 0;
}
- if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
- if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
- return do_huge_pmd_numa_page(&vmf, orig_pmd);
+ if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
+ if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
+ return do_huge_pmd_numa_page(&vmf);
- if (dirty && !pmd_write(orig_pmd)) {
- ret = wp_huge_pmd(&vmf, orig_pmd);
+ if (dirty && !pmd_write(vmf.orig_pmd)) {
+ ret = wp_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
- huge_pmd_set_accessed(&vmf, orig_pmd);
+ huge_pmd_set_accessed(&vmf);
return 0;
}
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 974a565797d8..8cb75b26ea4f 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -154,122 +154,6 @@ static void release_memory_resource(struct resource *res)
}
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
-void get_page_bootmem(unsigned long info, struct page *page,
- unsigned long type)
-{
- page->freelist = (void *)type;
- SetPagePrivate(page);
- set_page_private(page, info);
- page_ref_inc(page);
-}
-
-void put_page_bootmem(struct page *page)
-{
- unsigned long type;
-
- type = (unsigned long) page->freelist;
- BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
- type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
-
- if (page_ref_dec_return(page) == 1) {
- page->freelist = NULL;
- ClearPagePrivate(page);
- set_page_private(page, 0);
- INIT_LIST_HEAD(&page->lru);
- free_reserved_page(page);
- }
-}
-
-#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
-static void register_page_bootmem_info_section(unsigned long start_pfn)
-{
- unsigned long mapsize, section_nr, i;
- struct mem_section *ms;
- struct page *page, *memmap;
- struct mem_section_usage *usage;
-
- section_nr = pfn_to_section_nr(start_pfn);
- ms = __nr_to_section(section_nr);
-
- /* Get section's memmap address */
- memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
-
- /*
- * Get page for the memmap's phys address
- * XXX: need more consideration for sparse_vmemmap...
- */
- page = virt_to_page(memmap);
- mapsize = sizeof(struct page) * PAGES_PER_SECTION;
- mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
-
- /* remember memmap's page */
- for (i = 0; i < mapsize; i++, page++)
- get_page_bootmem(section_nr, page, SECTION_INFO);
-
- usage = ms->usage;
- page = virt_to_page(usage);
-
- mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
-
- for (i = 0; i < mapsize; i++, page++)
- get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
-
-}
-#else /* CONFIG_SPARSEMEM_VMEMMAP */
-static void register_page_bootmem_info_section(unsigned long start_pfn)
-{
- unsigned long mapsize, section_nr, i;
- struct mem_section *ms;
- struct page *page, *memmap;
- struct mem_section_usage *usage;
-
- section_nr = pfn_to_section_nr(start_pfn);
- ms = __nr_to_section(section_nr);
-
- memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
-
- register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
-
- usage = ms->usage;
- page = virt_to_page(usage);
-
- mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
-
- for (i = 0; i < mapsize; i++, page++)
- get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
-}
-#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
-
-void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
-{
- unsigned long i, pfn, end_pfn, nr_pages;
- int node = pgdat->node_id;
- struct page *page;
-
- nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
- page = virt_to_page(pgdat);
-
- for (i = 0; i < nr_pages; i++, page++)
- get_page_bootmem(node, page, NODE_INFO);
-
- pfn = pgdat->node_start_pfn;
- end_pfn = pgdat_end_pfn(pgdat);
-
- /* register section info */
- for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- /*
- * Some platforms can assign the same pfn to multiple nodes - on
- * node0 as well as nodeN. To avoid registering a pfn against
- * multiple nodes we check that this pfn does not already
- * reside in some other nodes.
- */
- if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
- register_page_bootmem_info_section(pfn);
- }
-}
-#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
-
static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
const char *reason)
{
@@ -445,7 +329,6 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
unsigned long pfn;
int nid = zone_to_nid(zone);
- zone_span_writelock(zone);
if (zone->zone_start_pfn == start_pfn) {
/*
* If the section is smallest section in the zone, it need
@@ -478,7 +361,6 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
zone->spanned_pages = 0;
}
}
- zone_span_writeunlock(zone);
}
static void update_pgdat_span(struct pglist_data *pgdat)
@@ -515,7 +397,7 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
{
const unsigned long end_pfn = start_pfn + nr_pages;
struct pglist_data *pgdat = zone->zone_pgdat;
- unsigned long pfn, cur_nr_pages, flags;
+ unsigned long pfn, cur_nr_pages;
/* Poison struct pages because they are now uninitialized again. */
for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
@@ -540,10 +422,8 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
clear_zone_contiguous(zone);
- pgdat_resize_lock(zone->zone_pgdat, &flags);
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
update_pgdat_span(pgdat);
- pgdat_resize_unlock(zone->zone_pgdat, &flags);
set_zone_contiguous(zone);
}
@@ -750,19 +630,13 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
{
struct pglist_data *pgdat = zone->zone_pgdat;
int nid = pgdat->node_id;
- unsigned long flags;
clear_zone_contiguous(zone);
- /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
- pgdat_resize_lock(pgdat, &flags);
- zone_span_writelock(zone);
if (zone_is_empty(zone))
init_currently_empty_zone(zone, start_pfn, nr_pages);
resize_zone_range(zone, start_pfn, nr_pages);
- zone_span_writeunlock(zone);
resize_pgdat_range(pgdat, start_pfn, nr_pages);
- pgdat_resize_unlock(pgdat, &flags);
/*
* Subsection population requires care in pfn_to_online_page().
@@ -852,12 +726,8 @@ struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
*/
void adjust_present_page_count(struct zone *zone, long nr_pages)
{
- unsigned long flags;
-
zone->present_pages += nr_pages;
- pgdat_resize_lock(zone->zone_pgdat, &flags);
zone->zone_pgdat->node_present_pages += nr_pages;
- pgdat_resize_unlock(zone->zone_pgdat, &flags);
}
int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
@@ -913,7 +783,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
/*
* {on,off}lining is constrained to full memory sections (or more
- * precisly to memory blocks from the user space POV).
+ * precisely to memory blocks from the user space POV).
* memmap_on_memory is an exception because it reserves initial part
* of the physical memory space for vmemmaps. That space is pageblock
* aligned.
@@ -1072,8 +942,8 @@ static void rollback_node_hotadd(int nid)
}
-/**
- * try_online_node - online a node if offlined
+/*
+ * __try_online_node - online a node if offlined
* @nid: the node ID
* @set_node_online: Whether we want to online the node
* called by cpu_up() to online a node without onlined memory.
@@ -1172,6 +1042,7 @@ bool mhp_supports_memmap_on_memory(unsigned long size)
* populate a single PMD.
*/
return memmap_on_memory &&
+ !hugetlb_free_vmemmap_enabled &&
IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) &&
size == memory_block_size_bytes() &&
IS_ALIGNED(vmemmap_size, PMD_SIZE) &&
@@ -1521,6 +1392,8 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
struct page *page, *head;
int ret = 0;
LIST_HEAD(source);
+ static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
if (!pfn_valid(pfn))
@@ -1567,8 +1440,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
page_is_file_lru(page));
} else {
- pr_warn("failed to isolate pfn %lx\n", pfn);
- dump_page(page, "isolation failed");
+ if (__ratelimit(&migrate_rs)) {
+ pr_warn("failed to isolate pfn %lx\n", pfn);
+ dump_page(page, "isolation failed");
+ }
}
put_page(page);
}
@@ -1597,9 +1472,11 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
if (ret) {
list_for_each_entry(page, &source, lru) {
- pr_warn("migrating pfn %lx failed ret:%d ",
- page_to_pfn(page), ret);
- dump_page(page, "migration failure");
+ if (__ratelimit(&migrate_rs)) {
+ pr_warn("migrating pfn %lx failed ret:%d\n",
+ page_to_pfn(page), ret);
+ dump_page(page, "migration failure");
+ }
}
putback_movable_pages(&source);
}
@@ -1703,7 +1580,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
/*
* {on,off}lining is constrained to full memory sections (or more
- * precisly to memory blocks from the user space POV).
+ * precisely to memory blocks from the user space POV).
* memmap_on_memory is an exception because it reserves initial part
* of the physical memory space for vmemmaps. That space is pageblock
* aligned.
@@ -2031,7 +1908,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
}
/**
- * remove_memory
+ * __remove_memory - Remove memory if every memory block is offline
* @nid: the node ID
* @start: physical address of the region to remove
* @size: size of the region to remove
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b5d95bf1025d..e32360e90274 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -121,8 +121,7 @@ enum zone_type policy_zone = 0;
*/
static struct mempolicy default_policy = {
.refcnt = ATOMIC_INIT(1), /* never free it */
- .mode = MPOL_PREFERRED,
- .flags = MPOL_F_LOCAL,
+ .mode = MPOL_LOCAL,
};
static struct mempolicy preferred_node_policy[MAX_NUMNODES];
@@ -194,18 +193,17 @@ static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
{
if (nodes_empty(*nodes))
return -EINVAL;
- pol->v.nodes = *nodes;
+ pol->nodes = *nodes;
return 0;
}
static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
{
- if (!nodes)
- pol->flags |= MPOL_F_LOCAL; /* local allocation */
- else if (nodes_empty(*nodes))
- return -EINVAL; /* no allowed nodes */
- else
- pol->v.preferred_node = first_node(*nodes);
+ if (nodes_empty(*nodes))
+ return -EINVAL;
+
+ nodes_clear(pol->nodes);
+ node_set(first_node(*nodes), pol->nodes);
return 0;
}
@@ -213,15 +211,14 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
{
if (nodes_empty(*nodes))
return -EINVAL;
- pol->v.nodes = *nodes;
+ pol->nodes = *nodes;
return 0;
}
/*
* mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
* any, for the new policy. mpol_new() has already validated the nodes
- * parameter with respect to the policy mode and flags. But, we need to
- * handle an empty nodemask with MPOL_PREFERRED here.
+ * parameter with respect to the policy mode and flags.
*
* Must be called holding task's alloc_lock to protect task's mems_allowed
* and mempolicy. May also be called holding the mmap_lock for write.
@@ -231,33 +228,31 @@ static int mpol_set_nodemask(struct mempolicy *pol,
{
int ret;
- /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
- if (pol == NULL)
+ /*
+ * Default (pol==NULL) resp. local memory policies are not a
+ * subject of any remapping. They also do not need any special
+ * constructor.
+ */
+ if (!pol || pol->mode == MPOL_LOCAL)
return 0;
+
/* Check N_MEMORY */
nodes_and(nsc->mask1,
cpuset_current_mems_allowed, node_states[N_MEMORY]);
VM_BUG_ON(!nodes);
- if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
- nodes = NULL; /* explicit local allocation */
- else {
- if (pol->flags & MPOL_F_RELATIVE_NODES)
- mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
- else
- nodes_and(nsc->mask2, *nodes, nsc->mask1);
- if (mpol_store_user_nodemask(pol))
- pol->w.user_nodemask = *nodes;
- else
- pol->w.cpuset_mems_allowed =
- cpuset_current_mems_allowed;
- }
+ if (pol->flags & MPOL_F_RELATIVE_NODES)
+ mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
+ else
+ nodes_and(nsc->mask2, *nodes, nsc->mask1);
- if (nodes)
- ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
+ if (mpol_store_user_nodemask(pol))
+ pol->w.user_nodemask = *nodes;
else
- ret = mpol_ops[pol->mode].create(pol, NULL);
+ pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
+
+ ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
return ret;
}
@@ -290,13 +285,14 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
if (((flags & MPOL_F_STATIC_NODES) ||
(flags & MPOL_F_RELATIVE_NODES)))
return ERR_PTR(-EINVAL);
+
+ mode = MPOL_LOCAL;
}
} else if (mode == MPOL_LOCAL) {
if (!nodes_empty(*nodes) ||
(flags & MPOL_F_STATIC_NODES) ||
(flags & MPOL_F_RELATIVE_NODES))
return ERR_PTR(-EINVAL);
- mode = MPOL_PREFERRED;
} else if (nodes_empty(*nodes))
return ERR_PTR(-EINVAL);
policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
@@ -330,7 +326,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
else if (pol->flags & MPOL_F_RELATIVE_NODES)
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
else {
- nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
+ nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
*nodes);
pol->w.cpuset_mems_allowed = *nodes;
}
@@ -338,31 +334,13 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
if (nodes_empty(tmp))
tmp = *nodes;
- pol->v.nodes = tmp;
+ pol->nodes = tmp;
}
static void mpol_rebind_preferred(struct mempolicy *pol,
const nodemask_t *nodes)
{
- nodemask_t tmp;
-
- if (pol->flags & MPOL_F_STATIC_NODES) {
- int node = first_node(pol->w.user_nodemask);
-
- if (node_isset(node, *nodes)) {
- pol->v.preferred_node = node;
- pol->flags &= ~MPOL_F_LOCAL;
- } else
- pol->flags |= MPOL_F_LOCAL;
- } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
- mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
- pol->v.preferred_node = first_node(tmp);
- } else if (!(pol->flags & MPOL_F_LOCAL)) {
- pol->v.preferred_node = node_remap(pol->v.preferred_node,
- pol->w.cpuset_mems_allowed,
- *nodes);
- pol->w.cpuset_mems_allowed = *nodes;
- }
+ pol->w.cpuset_mems_allowed = *nodes;
}
/*
@@ -376,7 +354,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
{
if (!pol)
return;
- if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
+ if (!mpol_store_user_nodemask(pol) &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
return;
@@ -427,6 +405,9 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
.create = mpol_new_bind,
.rebind = mpol_rebind_nodemask,
},
+ [MPOL_LOCAL] = {
+ .rebind = mpol_rebind_default,
+ },
};
static int migrate_page_add(struct page *page, struct list_head *pagelist,
@@ -458,7 +439,8 @@ static inline bool queue_pages_required(struct page *page,
/*
* queue_pages_pmd() has four possible return values:
- * 0 - pages are placed on the right node or queued successfully.
+ * 0 - pages are placed on the right node or queued successfully, or
+ * special page is met, i.e. huge zero page.
* 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
* specified.
* 2 - THP was split.
@@ -482,8 +464,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
page = pmd_page(*pmd);
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
- __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
- ret = 2;
+ walk->action = ACTION_CONTINUE;
goto out;
}
if (!queue_pages_required(page, qp))
@@ -510,7 +491,8 @@ out:
* and move them to the pagelist if they do.
*
* queue_pages_pte_range() has three possible return values:
- * 0 - pages are placed on the right node or queued successfully.
+ * 0 - pages are placed on the right node or queued successfully, or
+ * special page is met, i.e. zero page.
* 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
* specified.
* -EIO - only MPOL_MF_STRICT was specified and an existing page was already
@@ -917,12 +899,11 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
switch (p->mode) {
case MPOL_BIND:
case MPOL_INTERLEAVE:
- *nodes = p->v.nodes;
- break;
case MPOL_PREFERRED:
- if (!(p->flags & MPOL_F_LOCAL))
- node_set(p->v.preferred_node, *nodes);
- /* else return empty node mask for local allocation */
+ *nodes = p->nodes;
+ break;
+ case MPOL_LOCAL:
+ /* return empty node mask for local allocation */
break;
default:
BUG();
@@ -1007,7 +988,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
*policy = err;
} else if (pol == current->mempolicy &&
pol->mode == MPOL_INTERLEAVE) {
- *policy = next_node_in(current->il_prev, pol->v.nodes);
+ *policy = next_node_in(current->il_prev, pol->nodes);
} else {
err = -EINVAL;
goto out;
@@ -1460,26 +1441,38 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
}
+/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
+static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
+{
+ *flags = *mode & MPOL_MODE_FLAGS;
+ *mode &= ~MPOL_MODE_FLAGS;
+ if ((unsigned int)(*mode) >= MPOL_MAX)
+ return -EINVAL;
+ if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
+ return -EINVAL;
+
+ return 0;
+}
+
static long kernel_mbind(unsigned long start, unsigned long len,
unsigned long mode, const unsigned long __user *nmask,
unsigned long maxnode, unsigned int flags)
{
+ unsigned short mode_flags;
nodemask_t nodes;
+ int lmode = mode;
int err;
- unsigned short mode_flags;
start = untagged_addr(start);
- mode_flags = mode & MPOL_MODE_FLAGS;
- mode &= ~MPOL_MODE_FLAGS;
- if (mode >= MPOL_MAX)
- return -EINVAL;
- if ((mode_flags & MPOL_F_STATIC_NODES) &&
- (mode_flags & MPOL_F_RELATIVE_NODES))
- return -EINVAL;
+ err = sanitize_mpol_flags(&lmode, &mode_flags);
+ if (err)
+ return err;
+
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
- return do_mbind(start, len, mode, mode_flags, &nodes, flags);
+
+ return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
}
SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
@@ -1493,20 +1486,20 @@ SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
unsigned long maxnode)
{
- int err;
+ unsigned short mode_flags;
nodemask_t nodes;
- unsigned short flags;
+ int lmode = mode;
+ int err;
+
+ err = sanitize_mpol_flags(&lmode, &mode_flags);
+ if (err)
+ return err;
- flags = mode & MPOL_MODE_FLAGS;
- mode &= ~MPOL_MODE_FLAGS;
- if ((unsigned int)mode >= MPOL_MAX)
- return -EINVAL;
- if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
- return -EINVAL;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
- return do_set_mempolicy(mode, flags, &nodes);
+
+ return do_set_mempolicy(lmode, mode_flags, &nodes);
}
SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
@@ -1863,14 +1856,14 @@ static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
/*
- * if policy->v.nodes has movable memory only,
+ * if policy->nodes has movable memory only,
* we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
*
- * policy->v.nodes is intersect with node_states[N_MEMORY].
+ * policy->nodes is intersect with node_states[N_MEMORY].
* so if the following test fails, it implies
- * policy->v.nodes has movable memory only.
+ * policy->nodes has movable memory only.
*/
- if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
+ if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
dynamic_policy_zone = ZONE_MOVABLE;
return zone >= dynamic_policy_zone;
@@ -1885,8 +1878,8 @@ nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
/* Lower zones don't get a nodemask applied for MPOL_BIND */
if (unlikely(policy->mode == MPOL_BIND) &&
apply_policy_zone(policy, gfp_zone(gfp)) &&
- cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
- return &policy->v.nodes;
+ cpuset_nodemask_valid_mems_allowed(&policy->nodes))
+ return &policy->nodes;
return NULL;
}
@@ -1894,9 +1887,9 @@ nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
/* Return the node id preferred by the given mempolicy, or the given id */
static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
{
- if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
- nd = policy->v.preferred_node;
- else {
+ if (policy->mode == MPOL_PREFERRED) {
+ nd = first_node(policy->nodes);
+ } else {
/*
* __GFP_THISNODE shouldn't even be used with the bind policy
* because we might easily break the expectation to stay on the
@@ -1914,7 +1907,7 @@ static unsigned interleave_nodes(struct mempolicy *policy)
unsigned next;
struct task_struct *me = current;
- next = next_node_in(me->il_prev, policy->v.nodes);
+ next = next_node_in(me->il_prev, policy->nodes);
if (next < MAX_NUMNODES)
me->il_prev = next;
return next;
@@ -1933,15 +1926,12 @@ unsigned int mempolicy_slab_node(void)
return node;
policy = current->mempolicy;
- if (!policy || policy->flags & MPOL_F_LOCAL)
+ if (!policy)
return node;
switch (policy->mode) {
case MPOL_PREFERRED:
- /*
- * handled MPOL_F_LOCAL above
- */
- return policy->v.preferred_node;
+ return first_node(policy->nodes);
case MPOL_INTERLEAVE:
return interleave_nodes(policy);
@@ -1957,9 +1947,11 @@ unsigned int mempolicy_slab_node(void)
enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, highest_zoneidx,
- &policy->v.nodes);
+ &policy->nodes);
return z->zone ? zone_to_nid(z->zone) : node;
}
+ case MPOL_LOCAL:
+ return node;
default:
BUG();
@@ -1968,12 +1960,12 @@ unsigned int mempolicy_slab_node(void)
/*
* Do static interleaving for a VMA with known offset @n. Returns the n'th
- * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
+ * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
* number of present nodes.
*/
static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
{
- unsigned nnodes = nodes_weight(pol->v.nodes);
+ unsigned nnodes = nodes_weight(pol->nodes);
unsigned target;
int i;
int nid;
@@ -1981,9 +1973,9 @@ static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
if (!nnodes)
return numa_node_id();
target = (unsigned int)n % nnodes;
- nid = first_node(pol->v.nodes);
+ nid = first_node(pol->nodes);
for (i = 0; i < target; i++)
- nid = next_node(nid, pol->v.nodes);
+ nid = next_node(nid, pol->nodes);
return nid;
}
@@ -2039,7 +2031,7 @@ int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
} else {
nid = policy_node(gfp_flags, *mpol, numa_node_id());
if ((*mpol)->mode == MPOL_BIND)
- *nodemask = &(*mpol)->v.nodes;
+ *nodemask = &(*mpol)->nodes;
}
return nid;
}
@@ -2063,7 +2055,6 @@ int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
bool init_nodemask_of_mempolicy(nodemask_t *mask)
{
struct mempolicy *mempolicy;
- int nid;
if (!(mask && current->mempolicy))
return false;
@@ -2072,16 +2063,13 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
mempolicy = current->mempolicy;
switch (mempolicy->mode) {
case MPOL_PREFERRED:
- if (mempolicy->flags & MPOL_F_LOCAL)
- nid = numa_node_id();
- else
- nid = mempolicy->v.preferred_node;
- init_nodemask_of_node(mask, nid);
- break;
-
case MPOL_BIND:
case MPOL_INTERLEAVE:
- *mask = mempolicy->v.nodes;
+ *mask = mempolicy->nodes;
+ break;
+
+ case MPOL_LOCAL:
+ init_nodemask_of_node(mask, numa_node_id());
break;
default:
@@ -2094,16 +2082,16 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
#endif
/*
- * mempolicy_nodemask_intersects
+ * mempolicy_in_oom_domain
*
- * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
- * policy. Otherwise, check for intersection between mask and the policy
- * nodemask for 'bind' or 'interleave' policy. For 'preferred' or 'local'
- * policy, always return true since it may allocate elsewhere on fallback.
+ * If tsk's mempolicy is "bind", check for intersection between mask and
+ * the policy nodemask. Otherwise, return true for all other policies
+ * including "interleave", as a tsk with "interleave" policy may have
+ * memory allocated from all nodes in system.
*
* Takes task_lock(tsk) to prevent freeing of its mempolicy.
*/
-bool mempolicy_nodemask_intersects(struct task_struct *tsk,
+bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask)
{
struct mempolicy *mempolicy;
@@ -2111,29 +2099,13 @@ bool mempolicy_nodemask_intersects(struct task_struct *tsk,
if (!mask)
return ret;
+
task_lock(tsk);
mempolicy = tsk->mempolicy;
- if (!mempolicy)
- goto out;
-
- switch (mempolicy->mode) {
- case MPOL_PREFERRED:
- /*
- * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
- * allocate from, they may fallback to other nodes when oom.
- * Thus, it's possible for tsk to have allocated memory from
- * nodes in mask.
- */
- break;
- case MPOL_BIND:
- case MPOL_INTERLEAVE:
- ret = nodes_intersects(mempolicy->v.nodes, *mask);
- break;
- default:
- BUG();
- }
-out:
+ if (mempolicy && mempolicy->mode == MPOL_BIND)
+ ret = nodes_intersects(mempolicy->nodes, *mask);
task_unlock(tsk);
+
return ret;
}
@@ -2204,8 +2176,8 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
* If the policy is interleave, or does not allow the current
* node in its nodemask, we allocate the standard way.
*/
- if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
- hpage_node = pol->v.preferred_node;
+ if (pol->mode == MPOL_PREFERRED)
+ hpage_node = first_node(pol->nodes);
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(hpage_node, *nmask)) {
@@ -2338,12 +2310,10 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
switch (a->mode) {
case MPOL_BIND:
case MPOL_INTERLEAVE:
- return !!nodes_equal(a->v.nodes, b->v.nodes);
case MPOL_PREFERRED:
- /* a's ->flags is the same as b's */
- if (a->flags & MPOL_F_LOCAL)
- return true;
- return a->v.preferred_node == b->v.preferred_node;
+ return !!nodes_equal(a->nodes, b->nodes);
+ case MPOL_LOCAL:
+ return true;
default:
BUG();
return false;
@@ -2481,16 +2451,17 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
break;
case MPOL_PREFERRED:
- if (pol->flags & MPOL_F_LOCAL)
- polnid = numa_node_id();
- else
- polnid = pol->v.preferred_node;
+ polnid = first_node(pol->nodes);
+ break;
+
+ case MPOL_LOCAL:
+ polnid = numa_node_id();
break;
case MPOL_BIND:
/* Optimize placement among multiple nodes via NUMA balancing */
if (pol->flags & MPOL_F_MORON) {
- if (node_isset(thisnid, pol->v.nodes))
+ if (node_isset(thisnid, pol->nodes))
break;
goto out;
}
@@ -2501,12 +2472,12 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
* else select nearest allowed node, if any.
* If no allowed nodes, use current [!misplaced].
*/
- if (node_isset(curnid, pol->v.nodes))
+ if (node_isset(curnid, pol->nodes))
goto out;
z = first_zones_zonelist(
node_zonelist(numa_node_id(), GFP_HIGHUSER),
gfp_zone(GFP_HIGHUSER),
- &pol->v.nodes);
+ &pol->nodes);
polnid = zone_to_nid(z->zone);
break;
@@ -2709,7 +2680,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
vma->vm_pgoff,
sz, npol ? npol->mode : -1,
npol ? npol->flags : -1,
- npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
+ npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
if (npol) {
new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
@@ -2807,7 +2778,7 @@ void __init numa_policy_init(void)
.refcnt = ATOMIC_INIT(1),
.mode = MPOL_PREFERRED,
.flags = MPOL_F_MOF | MPOL_F_MORON,
- .v = { .preferred_node = nid, },
+ .nodes = nodemask_of_node(nid),
};
}
@@ -2851,9 +2822,6 @@ void numa_default_policy(void)
* Parse and format mempolicy from/to strings
*/
-/*
- * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
- */
static const char * const policy_modes[] =
{
[MPOL_DEFAULT] = "default",
@@ -2931,7 +2899,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
*/
if (nodelist)
goto out;
- mode = MPOL_PREFERRED;
break;
case MPOL_DEFAULT:
/*
@@ -2970,12 +2937,14 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
* Save nodes for mpol_to_str() to show the tmpfs mount options
* for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
*/
- if (mode != MPOL_PREFERRED)
- new->v.nodes = nodes;
- else if (nodelist)
- new->v.preferred_node = first_node(nodes);
- else
- new->flags |= MPOL_F_LOCAL;
+ if (mode != MPOL_PREFERRED) {
+ new->nodes = nodes;
+ } else if (nodelist) {
+ nodes_clear(new->nodes);
+ node_set(first_node(nodes), new->nodes);
+ } else {
+ new->mode = MPOL_LOCAL;
+ }
/*
* Save nodes for contextualization: this will be used to "clone"
@@ -3021,16 +2990,12 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
switch (mode) {
case MPOL_DEFAULT:
+ case MPOL_LOCAL:
break;
case MPOL_PREFERRED:
- if (flags & MPOL_F_LOCAL)
- mode = MPOL_LOCAL;
- else
- node_set(pol->v.preferred_node, nodes);
- break;
case MPOL_BIND:
case MPOL_INTERLEAVE:
- nodes = pol->v.nodes;
+ nodes = pol->nodes;
break;
default:
WARN_ON_ONCE(1);
diff --git a/mm/migrate.c b/mm/migrate.c
index 380ca57b9031..7e240437e7d9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -210,13 +210,18 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
* Recheck VMA as permissions can change since migration started
*/
entry = pte_to_swp_entry(*pvmw.pte);
- if (is_write_migration_entry(entry))
+ if (is_writable_migration_entry(entry))
pte = maybe_mkwrite(pte, vma);
else if (pte_swp_uffd_wp(*pvmw.pte))
pte = pte_mkuffd_wp(pte);
if (unlikely(is_device_private_page(new))) {
- entry = make_device_private_entry(new, pte_write(pte));
+ if (pte_write(pte))
+ entry = make_writable_device_private_entry(
+ page_to_pfn(new));
+ else
+ entry = make_readable_device_private_entry(
+ page_to_pfn(new));
pte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(*pvmw.pte))
pte = pte_swp_mksoft_dirty(pte);
@@ -226,8 +231,10 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
#ifdef CONFIG_HUGETLB_PAGE
if (PageHuge(new)) {
+ unsigned int shift = huge_page_shift(hstate_vma(vma));
+
pte = pte_mkhuge(pte);
- pte = arch_make_huge_pte(pte, vma, new, 0);
+ pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
if (PageAnon(new))
hugepage_add_anon_rmap(new, vma, pvmw.address);
@@ -294,7 +301,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
if (!is_migration_entry(entry))
goto out;
- page = migration_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
page = compound_head(page);
/*
@@ -335,7 +342,7 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
ptl = pmd_lock(mm, pmd);
if (!is_pmd_migration_entry(*pmd))
goto unlock;
- page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
+ page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd));
if (!get_page_unless_zero(page))
goto unlock;
spin_unlock(ptl);
@@ -530,54 +537,6 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
}
/*
- * Gigantic pages are so large that we do not guarantee that page++ pointer
- * arithmetic will work across the entire page. We need something more
- * specialized.
- */
-static void __copy_gigantic_page(struct page *dst, struct page *src,
- int nr_pages)
-{
- int i;
- struct page *dst_base = dst;
- struct page *src_base = src;
-
- for (i = 0; i < nr_pages; ) {
- cond_resched();
- copy_highpage(dst, src);
-
- i++;
- dst = mem_map_next(dst, dst_base, i);
- src = mem_map_next(src, src_base, i);
- }
-}
-
-static void copy_huge_page(struct page *dst, struct page *src)
-{
- int i;
- int nr_pages;
-
- if (PageHuge(src)) {
- /* hugetlbfs page */
- struct hstate *h = page_hstate(src);
- nr_pages = pages_per_huge_page(h);
-
- if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
- __copy_gigantic_page(dst, src, nr_pages);
- return;
- }
- } else {
- /* thp page */
- BUG_ON(!PageTransHuge(src));
- nr_pages = thp_nr_pages(src);
- }
-
- for (i = 0; i < nr_pages; i++) {
- cond_resched();
- copy_highpage(dst + i, src + i);
- }
-}
-
-/*
* Copy the page to its new location
*/
void migrate_page_states(struct page *newpage, struct page *page)
@@ -626,7 +585,10 @@ void migrate_page_states(struct page *newpage, struct page *page)
if (PageSwapCache(page))
ClearPageSwapCache(page);
ClearPagePrivate(page);
- set_page_private(page, 0);
+
+ /* page->private contains hugetlb specific flags */
+ if (!PageHuge(page))
+ set_page_private(page, 0);
/*
* If any waiters have accumulated on the new page then
@@ -1099,7 +1061,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
/* Establish migration ptes */
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
page);
- try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK);
+ try_to_migrate(page, 0);
page_was_mapped = 1;
}
@@ -1288,7 +1250,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
* page_mapping() set, hugetlbfs specific move page routine will not
* be called and we could leak usage counts for subpools.
*/
- if (page_private(hpage) && !page_mapping(hpage)) {
+ if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
rc = -EBUSY;
goto out_unlock;
}
@@ -1301,7 +1263,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (page_mapped(hpage)) {
bool mapping_locked = false;
- enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK;
+ enum ttu_flags ttu = 0;
if (!PageAnon(hpage)) {
/*
@@ -1318,7 +1280,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
ttu |= TTU_RMAP_LOCKED;
}
- try_to_unmap(hpage, ttu);
+ try_to_migrate(hpage, ttu);
page_was_mapped = 1;
if (mapping_locked)
@@ -1418,6 +1380,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
int swapwrite = current->flags & PF_SWAPWRITE;
int rc, nr_subpages;
LIST_HEAD(ret_pages);
+ bool nosplit = (reason == MR_NUMA_MISPLACED);
trace_mm_migrate_pages_start(mode, reason);
@@ -1489,8 +1452,9 @@ retry:
/*
* When memory is low, don't bother to try to migrate
* other pages, just exit.
+ * THP NUMA faulting doesn't split THP to retry.
*/
- if (is_thp) {
+ if (is_thp && !nosplit) {
if (!try_split_thp(page, &page2, from)) {
nr_thp_split++;
goto retry;
@@ -2043,12 +2007,33 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
return newpage;
}
+static struct page *alloc_misplaced_dst_page_thp(struct page *page,
+ unsigned long data)
+{
+ int nid = (int) data;
+ struct page *newpage;
+
+ newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
+ HPAGE_PMD_ORDER);
+ if (!newpage)
+ goto out;
+
+ prep_transhuge_page(newpage);
+
+out:
+ return newpage;
+}
+
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
{
int page_lru;
VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
+ /* Do not migrate THP mapped by multiple processes */
+ if (PageTransHuge(page) && total_mapcount(page) > 1)
+ return 0;
+
/* Avoid migrating to a node that is nearly full */
if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
return 0;
@@ -2056,18 +2041,6 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
if (isolate_lru_page(page))
return 0;
- /*
- * migrate_misplaced_transhuge_page() skips page migration's usual
- * check on page_count(), so we must do it here, now that the page
- * has been isolated: a GUP pin, or any other pin, prevents migration.
- * The expected page count is 3: 1 for page's mapcount and 1 for the
- * caller's pin and 1 for the reference taken by isolate_lru_page().
- */
- if (PageTransHuge(page) && page_count(page) != 3) {
- putback_lru_page(page);
- return 0;
- }
-
page_lru = page_is_file_lru(page);
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
thp_nr_pages(page));
@@ -2081,12 +2054,6 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
return 1;
}
-bool pmd_trans_migrating(pmd_t pmd)
-{
- struct page *page = pmd_page(pmd);
- return PageLocked(page);
-}
-
/*
* Attempt to migrate a misplaced page to the specified destination
* node. Caller is expected to have an elevated reference count on
@@ -2099,6 +2066,21 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
int isolated;
int nr_remaining;
LIST_HEAD(migratepages);
+ new_page_t *new;
+ bool compound;
+ int nr_pages = thp_nr_pages(page);
+
+ /*
+ * PTE mapped THP or HugeTLB page can't reach here so the page could
+ * be either base page or THP. And it must be head page if it is
+ * THP.
+ */
+ compound = PageTransHuge(page);
+
+ if (compound)
+ new = alloc_misplaced_dst_page_thp;
+ else
+ new = alloc_misplaced_dst_page;
/*
* Don't migrate file pages that are mapped in multiple processes
@@ -2120,19 +2102,18 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
goto out;
list_add(&page->lru, &migratepages);
- nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
- NULL, node, MIGRATE_ASYNC,
- MR_NUMA_MISPLACED);
+ nr_remaining = migrate_pages(&migratepages, *new, NULL, node,
+ MIGRATE_ASYNC, MR_NUMA_MISPLACED);
if (nr_remaining) {
if (!list_empty(&migratepages)) {
list_del(&page->lru);
- dec_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_lru(page));
+ mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
+ page_is_file_lru(page), -nr_pages);
putback_lru_page(page);
}
isolated = 0;
} else
- count_vm_numa_event(NUMA_PAGE_MIGRATE);
+ count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_pages);
BUG_ON(!list_empty(&migratepages));
return isolated;
@@ -2141,141 +2122,6 @@ out:
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
-
-#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
-/*
- * Migrates a THP to a given target node. page must be locked and is unlocked
- * before returning.
- */
-int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node)
-{
- spinlock_t *ptl;
- pg_data_t *pgdat = NODE_DATA(node);
- int isolated = 0;
- struct page *new_page = NULL;
- int page_lru = page_is_file_lru(page);
- unsigned long start = address & HPAGE_PMD_MASK;
-
- new_page = alloc_pages_node(node,
- (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
- HPAGE_PMD_ORDER);
- if (!new_page)
- goto out_fail;
- prep_transhuge_page(new_page);
-
- isolated = numamigrate_isolate_page(pgdat, page);
- if (!isolated) {
- put_page(new_page);
- goto out_fail;
- }
-
- /* Prepare a page as a migration target */
- __SetPageLocked(new_page);
- if (PageSwapBacked(page))
- __SetPageSwapBacked(new_page);
-
- /* anon mapping, we can simply copy page->mapping to the new page: */
- new_page->mapping = page->mapping;
- new_page->index = page->index;
- /* flush the cache before copying using the kernel virtual address */
- flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
- migrate_page_copy(new_page, page);
- WARN_ON(PageLRU(new_page));
-
- /* Recheck the target PMD */
- ptl = pmd_lock(mm, pmd);
- if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
- spin_unlock(ptl);
-
- /* Reverse changes made by migrate_page_copy() */
- if (TestClearPageActive(new_page))
- SetPageActive(page);
- if (TestClearPageUnevictable(new_page))
- SetPageUnevictable(page);
-
- unlock_page(new_page);
- put_page(new_page); /* Free it */
-
- /* Retake the callers reference and putback on LRU */
- get_page(page);
- putback_lru_page(page);
- mod_node_page_state(page_pgdat(page),
- NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
-
- goto out_unlock;
- }
-
- entry = mk_huge_pmd(new_page, vma->vm_page_prot);
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-
- /*
- * Overwrite the old entry under pagetable lock and establish
- * the new PTE. Any parallel GUP will either observe the old
- * page blocking on the page lock, block on the page table
- * lock or observe the new page. The SetPageUptodate on the
- * new page and page_add_new_anon_rmap guarantee the copy is
- * visible before the pagetable update.
- */
- page_add_anon_rmap(new_page, vma, start, true);
- /*
- * At this point the pmd is numa/protnone (i.e. non present) and the TLB
- * has already been flushed globally. So no TLB can be currently
- * caching this non present pmd mapping. There's no need to clear the
- * pmd before doing set_pmd_at(), nor to flush the TLB after
- * set_pmd_at(). Clearing the pmd here would introduce a race
- * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
- * mmap_lock for reading. If the pmd is set to NULL at any given time,
- * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
- * pmd.
- */
- set_pmd_at(mm, start, pmd, entry);
- update_mmu_cache_pmd(vma, address, &entry);
-
- page_ref_unfreeze(page, 2);
- mlock_migrate_page(new_page, page);
- page_remove_rmap(page, true);
- set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
-
- spin_unlock(ptl);
-
- /* Take an "isolate" reference and put new page on the LRU. */
- get_page(new_page);
- putback_lru_page(new_page);
-
- unlock_page(new_page);
- unlock_page(page);
- put_page(page); /* Drop the rmap reference */
- put_page(page); /* Drop the LRU isolation reference */
-
- count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
- count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
-
- mod_node_page_state(page_pgdat(page),
- NR_ISOLATED_ANON + page_lru,
- -HPAGE_PMD_NR);
- return isolated;
-
-out_fail:
- count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
- ptl = pmd_lock(mm, pmd);
- if (pmd_same(*pmd, entry)) {
- entry = pmd_modify(entry, vma->vm_page_prot);
- set_pmd_at(mm, start, pmd, entry);
- update_mmu_cache_pmd(vma, address, &entry);
- }
- spin_unlock(ptl);
-
-out_unlock:
- unlock_page(page);
- put_page(page);
- return 0;
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DEVICE_PRIVATE
@@ -2400,7 +2246,7 @@ again:
if (!is_device_private_entry(entry))
goto next;
- page = device_private_entry_to_page(entry);
+ page = pfn_swap_entry_to_page(entry);
if (!(migrate->flags &
MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
page->pgmap->owner != migrate->pgmap_owner)
@@ -2408,7 +2254,7 @@ again:
mpfn = migrate_pfn(page_to_pfn(page)) |
MIGRATE_PFN_MIGRATE;
- if (is_write_device_private_entry(entry))
+ if (is_writable_device_private_entry(entry))
mpfn |= MIGRATE_PFN_WRITE;
} else {
if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
@@ -2454,8 +2300,12 @@ again:
ptep_get_and_clear(mm, addr, ptep);
/* Setup special migration page table entry */
- entry = make_migration_entry(page, mpfn &
- MIGRATE_PFN_WRITE);
+ if (mpfn & MIGRATE_PFN_WRITE)
+ entry = make_writable_migration_entry(
+ page_to_pfn(page));
+ else
+ entry = make_readable_migration_entry(
+ page_to_pfn(page));
swp_pte = swp_entry_to_pte(entry);
if (pte_present(pte)) {
if (pte_soft_dirty(pte))
@@ -2518,8 +2368,8 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
* that the registered device driver can skip invalidating device
* private page mappings that won't be migrated.
*/
- mmu_notifier_range_init_migrate(&range, 0, migrate->vma,
- migrate->vma->vm_mm, migrate->start, migrate->end,
+ mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
+ migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end,
migrate->pgmap_owner);
mmu_notifier_invalidate_range_start(&range);
@@ -2704,7 +2554,6 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
*/
static void migrate_vma_unmap(struct migrate_vma *migrate)
{
- int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK;
const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start;
unsigned long addr, i, restore = 0;
@@ -2716,7 +2565,7 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
continue;
if (page_mapped(page)) {
- try_to_unmap(page, flags);
+ try_to_migrate(page, 0);
if (page_mapped(page))
goto restore;
}
@@ -2928,7 +2777,12 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
if (is_device_private_page(page)) {
swp_entry_t swp_entry;
- swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
+ if (vma->vm_flags & VM_WRITE)
+ swp_entry = make_writable_device_private_entry(
+ page_to_pfn(page));
+ else
+ swp_entry = make_readable_device_private_entry(
+ page_to_pfn(page));
entry = swp_entry_to_pte(swp_entry);
} else {
/*
@@ -3025,9 +2879,9 @@ void migrate_vma_pages(struct migrate_vma *migrate)
if (!notified) {
notified = true;
- mmu_notifier_range_init_migrate(&range, 0,
- migrate->vma, migrate->vma->vm_mm,
- addr, migrate->end,
+ mmu_notifier_range_init_owner(&range,
+ MMU_NOTIFY_MIGRATE, 0, migrate->vma,
+ migrate->vma->vm_mm, addr, migrate->end,
migrate->pgmap_owner);
mmu_notifier_invalidate_range_start(&range);
}
diff --git a/mm/mlock.c b/mm/mlock.c
index e338ebc4ad29..16d2ee160d43 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -23,6 +23,7 @@
#include <linux/hugetlb.h>
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
+#include <linux/secretmem.h>
#include "internal.h"
@@ -108,7 +109,7 @@ void mlock_vma_page(struct page *page)
/*
* Finish munlock after successful page isolation
*
- * Page must be locked. This is a wrapper for try_to_munlock()
+ * Page must be locked. This is a wrapper for page_mlock()
* and putback_lru_page() with munlock accounting.
*/
static void __munlock_isolated_page(struct page *page)
@@ -118,7 +119,7 @@ static void __munlock_isolated_page(struct page *page)
* and we don't need to check all the other vmas.
*/
if (page_mapcount(page) > 1)
- try_to_munlock(page);
+ page_mlock(page);
/* Did try_to_unlock() succeed or punt? */
if (!PageMlocked(page))
@@ -158,7 +159,7 @@ static void __munlock_isolation_failed(struct page *page)
* munlock()ed or munmap()ed, we want to check whether other vmas hold the
* page locked so that we can leave it on the unevictable lru list and not
* bother vmscan with it. However, to walk the page's rmap list in
- * try_to_munlock() we must isolate the page from the LRU. If some other
+ * page_mlock() we must isolate the page from the LRU. If some other
* task has removed the page from the LRU, we won't be able to do that.
* So we clear the PageMlocked as we might not get another chance. If we
* can't isolate the page, we leave it for putback_lru_page() and vmscan
@@ -168,7 +169,7 @@ unsigned int munlock_vma_page(struct page *page)
{
int nr_pages;
- /* For try_to_munlock() and to serialize with page migration */
+ /* For page_mlock() and to serialize with page migration */
BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(PageTail(page), page);
@@ -205,7 +206,7 @@ static int __mlock_posix_error_return(long retval)
*
* The fast path is available only for evictable pages with single mapping.
* Then we can bypass the per-cpu pvec and get better performance.
- * when mapcount > 1 we need try_to_munlock() which can fail.
+ * when mapcount > 1 we need page_mlock() which can fail.
* when !page_evictable(), we need the full redo logic of putback_lru_page to
* avoid leaving evictable page in unevictable list.
*
@@ -414,7 +415,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
*
* We don't save and restore VM_LOCKED here because pages are
* still on lru. In unmap path, pages might be scanned by reclaim
- * and re-mlocked by try_to_{munlock|unmap} before we unmap and
+ * and re-mlocked by page_mlock/try_to_unmap before we unmap and
* free them. This will result in freeing mlocked pages.
*/
void munlock_vma_pages_range(struct vm_area_struct *vma,
@@ -503,7 +504,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
- vma_is_dax(vma))
+ vma_is_dax(vma) || vma_is_secretmem(vma))
/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
goto out;
diff --git a/mm/mmap.c b/mm/mmap.c
index aa9de981b659..ca54d36d203a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1352,9 +1352,8 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
return hint;
}
-static inline int mlock_future_check(struct mm_struct *mm,
- unsigned long flags,
- unsigned long len)
+int mlock_future_check(struct mm_struct *mm, unsigned long flags,
+ unsigned long len)
{
unsigned long locked, lock_limit;
diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
index 2ae3f33b85b1..1854850b4b89 100644
--- a/mm/mmap_lock.c
+++ b/mm/mmap_lock.c
@@ -153,6 +153,37 @@ static inline void put_memcg_path_buf(void)
rcu_read_unlock();
}
+#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
+ do { \
+ const char *memcg_path; \
+ local_lock(&memcg_paths.lock); \
+ memcg_path = get_mm_memcg_path(mm); \
+ trace_mmap_lock_##type(mm, \
+ memcg_path != NULL ? memcg_path : "", \
+ ##__VA_ARGS__); \
+ if (likely(memcg_path != NULL)) \
+ put_memcg_path_buf(); \
+ local_unlock(&memcg_paths.lock); \
+ } while (0)
+
+#else /* !CONFIG_MEMCG */
+
+int trace_mmap_lock_reg(void)
+{
+ return 0;
+}
+
+void trace_mmap_lock_unreg(void)
+{
+}
+
+#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
+ trace_mmap_lock_##type(mm, "", ##__VA_ARGS__)
+
+#endif /* CONFIG_MEMCG */
+
+#ifdef CONFIG_TRACING
+#ifdef CONFIG_MEMCG
/*
* Write the given mm_struct's memcg path to a percpu buffer, and return a
* pointer to it. If the path cannot be determined, or no buffer was available
@@ -187,33 +218,6 @@ out:
return buf;
}
-#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
- do { \
- const char *memcg_path; \
- local_lock(&memcg_paths.lock); \
- memcg_path = get_mm_memcg_path(mm); \
- trace_mmap_lock_##type(mm, \
- memcg_path != NULL ? memcg_path : "", \
- ##__VA_ARGS__); \
- if (likely(memcg_path != NULL)) \
- put_memcg_path_buf(); \
- local_unlock(&memcg_paths.lock); \
- } while (0)
-
-#else /* !CONFIG_MEMCG */
-
-int trace_mmap_lock_reg(void)
-{
- return 0;
-}
-
-void trace_mmap_lock_unreg(void)
-{
-}
-
-#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
- trace_mmap_lock_##type(mm, "", ##__VA_ARGS__)
-
#endif /* CONFIG_MEMCG */
/*
@@ -239,3 +243,4 @@ void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write)
TRACE_MMAP_LOCK_EVENT(released, mm, write);
}
EXPORT_SYMBOL(__mmap_lock_do_trace_released);
+#endif /* CONFIG_TRACING */
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e7a443157988..883e2cc85cad 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -143,26 +143,36 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
swp_entry_t entry = pte_to_swp_entry(oldpte);
pte_t newpte;
- if (is_write_migration_entry(entry)) {
+ if (is_writable_migration_entry(entry)) {
/*
* A protection check is difficult so
* just be safe and disable write
*/
- make_migration_entry_read(&entry);
+ entry = make_readable_migration_entry(
+ swp_offset(entry));
newpte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(oldpte))
newpte = pte_swp_mksoft_dirty(newpte);
if (pte_swp_uffd_wp(oldpte))
newpte = pte_swp_mkuffd_wp(newpte);
- } else if (is_write_device_private_entry(entry)) {
+ } else if (is_writable_device_private_entry(entry)) {
/*
* We do not preserve soft-dirtiness. See
* copy_one_pte() for explanation.
*/
- make_device_private_entry_read(&entry);
+ entry = make_readable_device_private_entry(
+ swp_offset(entry));
newpte = swp_entry_to_pte(entry);
if (pte_swp_uffd_wp(oldpte))
newpte = pte_swp_mkuffd_wp(newpte);
+ } else if (is_writable_device_exclusive_entry(entry)) {
+ entry = make_readable_device_exclusive_entry(
+ swp_offset(entry));
+ newpte = swp_entry_to_pte(entry);
+ if (pte_swp_soft_dirty(oldpte))
+ newpte = pte_swp_mksoft_dirty(newpte);
+ if (pte_swp_uffd_wp(oldpte))
+ newpte = pte_swp_mkuffd_wp(newpte);
} else {
newpte = oldpte;
}
diff --git a/mm/mremap.c b/mm/mremap.c
index a369a6100698..5989d3990020 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -25,7 +25,8 @@
#include <linux/userfaultfd_k.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+#include <asm/pgalloc.h>
#include "internal.h"
@@ -209,6 +210,15 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
drop_rmap_locks(vma);
}
+#ifndef arch_supports_page_table_move
+#define arch_supports_page_table_move arch_supports_page_table_move
+static inline bool arch_supports_page_table_move(void)
+{
+ return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
+ IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
+}
+#endif
+
#ifdef CONFIG_HAVE_MOVE_PMD
static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
@@ -217,6 +227,8 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
struct mm_struct *mm = vma->vm_mm;
pmd_t pmd;
+ if (!arch_supports_page_table_move())
+ return false;
/*
* The destination pmd shouldn't be established, free_pgtables()
* should have released it.
@@ -258,8 +270,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
VM_BUG_ON(!pmd_none(*new_pmd));
- /* Set the new pmd */
- set_pmd_at(mm, new_addr, new_pmd, pmd);
+ pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
@@ -276,7 +287,7 @@ static inline bool move_normal_pmd(struct vm_area_struct *vma,
}
#endif
-#ifdef CONFIG_HAVE_MOVE_PUD
+#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
{
@@ -284,6 +295,8 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
struct mm_struct *mm = vma->vm_mm;
pud_t pud;
+ if (!arch_supports_page_table_move())
+ return false;
/*
* The destination pud shouldn't be established, free_pgtables()
* should have released it.
@@ -306,8 +319,7 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
VM_BUG_ON(!pud_none(*new_pud));
- /* Set the new pud */
- set_pud_at(mm, new_addr, new_pud, pud);
+ pud_populate(mm, new_pud, pud_pgtable(pud));
flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
@@ -324,10 +336,61 @@ static inline bool move_normal_pud(struct vm_area_struct *vma,
}
#endif
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
+ unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
+{
+ spinlock_t *old_ptl, *new_ptl;
+ struct mm_struct *mm = vma->vm_mm;
+ pud_t pud;
+
+ /*
+ * The destination pud shouldn't be established, free_pgtables()
+ * should have released it.
+ */
+ if (WARN_ON_ONCE(!pud_none(*new_pud)))
+ return false;
+
+ /*
+ * We don't have to worry about the ordering of src and dst
+ * ptlocks because exclusive mmap_lock prevents deadlock.
+ */
+ old_ptl = pud_lock(vma->vm_mm, old_pud);
+ new_ptl = pud_lockptr(mm, new_pud);
+ if (new_ptl != old_ptl)
+ spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+
+ /* Clear the pud */
+ pud = *old_pud;
+ pud_clear(old_pud);
+
+ VM_BUG_ON(!pud_none(*new_pud));
+
+ /* Set the new pud */
+ /* mark soft_ditry when we add pud level soft dirty support */
+ set_pud_at(mm, new_addr, new_pud, pud);
+ flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
+ spin_unlock(old_ptl);
+
+ return true;
+}
+#else
+static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
+ unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
+{
+ WARN_ON_ONCE(1);
+ return false;
+
+}
+#endif
+
enum pgt_entry {
NORMAL_PMD,
HPAGE_PMD,
NORMAL_PUD,
+ HPAGE_PUD,
};
/*
@@ -347,6 +410,7 @@ static __always_inline unsigned long get_extent(enum pgt_entry entry,
mask = PMD_MASK;
size = PMD_SIZE;
break;
+ case HPAGE_PUD:
case NORMAL_PUD:
mask = PUD_MASK;
size = PUD_SIZE;
@@ -395,6 +459,12 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
move_huge_pmd(vma, old_addr, new_addr, old_entry,
new_entry);
break;
+ case HPAGE_PUD:
+ moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ move_huge_pud(vma, old_addr, new_addr, old_entry,
+ new_entry);
+ break;
+
default:
WARN_ON_ONCE(1);
break;
@@ -414,6 +484,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long extent, old_end;
struct mmu_notifier_range range;
pmd_t *old_pmd, *new_pmd;
+ pud_t *old_pud, *new_pud;
old_end = old_addr + len;
flush_cache_range(vma, old_addr, old_end);
@@ -429,17 +500,24 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
* PUD level if possible.
*/
extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
- if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
- pud_t *old_pud, *new_pud;
- old_pud = get_old_pud(vma->vm_mm, old_addr);
- if (!old_pud)
+ old_pud = get_old_pud(vma->vm_mm, old_addr);
+ if (!old_pud)
+ continue;
+ new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
+ if (!new_pud)
+ break;
+ if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
+ if (extent == HPAGE_PUD_SIZE) {
+ move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
+ old_pud, new_pud, need_rmap_locks);
+ /* We ignore and continue on error? */
continue;
- new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
- if (!new_pud)
- break;
+ }
+ } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
+
if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
- old_pud, new_pud, need_rmap_locks))
+ old_pud, new_pud, true))
continue;
}
@@ -466,7 +544,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
* moving at the PMD level if possible.
*/
if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
- old_pmd, new_pmd, need_rmap_locks))
+ old_pmd, new_pmd, true))
continue;
}
diff --git a/mm/nommu.c b/mm/nommu.c
index affda71641ca..3a93d4054810 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -223,7 +223,7 @@ long vread(char *buf, char *addr, unsigned long count)
*/
void *vmalloc(unsigned long size)
{
- return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM);
+ return __vmalloc(size, GFP_KERNEL);
}
EXPORT_SYMBOL(vmalloc);
@@ -241,7 +241,7 @@ EXPORT_SYMBOL(vmalloc);
*/
void *vzalloc(unsigned long size)
{
- return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ return __vmalloc(size, GFP_KERNEL | __GFP_ZERO);
}
EXPORT_SYMBOL(vzalloc);
@@ -1501,7 +1501,6 @@ erase_whole_vma:
delete_vma(mm, vma);
return 0;
}
-EXPORT_SYMBOL(do_munmap);
int vm_munmap(unsigned long addr, size_t len)
{
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index eefd3f5fde46..c729a4c4a1ac 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -104,7 +104,7 @@ static bool oom_cpuset_eligible(struct task_struct *start,
* mempolicy intersects current, otherwise it may be
* needlessly killed.
*/
- ret = mempolicy_nodemask_intersects(tsk, mask);
+ ret = mempolicy_in_oom_domain(tsk, mask);
} else {
/*
* This is not a mempolicy constrained oom, so only
@@ -922,7 +922,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
continue;
}
/*
- * No kthead_use_mm() user needs to read from the userspace so
+ * No kthread_use_mm() user needs to read from the userspace so
* we are ok to reap it.
*/
if (unlikely(p->flags & PF_KTHREAD))
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0817d88383d5..eeb3a9cb36bb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -124,17 +124,6 @@ static DEFINE_MUTEX(pcp_batch_high_lock);
struct pagesets {
local_lock_t lock;
-#if defined(CONFIG_DEBUG_INFO_BTF) && \
- !defined(CONFIG_DEBUG_LOCK_ALLOC) && \
- !defined(CONFIG_PAHOLE_HAS_ZEROSIZE_PERCPU_SUPPORT)
- /*
- * pahole 1.21 and earlier gets confused by zero-sized per-CPU
- * variables and produces invalid BTF. Ensure that
- * sizeof(struct pagesets) != 0 for older versions of pahole.
- */
- char __pahole_hack;
- #warning "pahole too old to support zero-sized struct pagesets"
-#endif
};
static DEFINE_PER_CPU(struct pagesets, pagesets) = {
.lock = INIT_LOCAL_LOCK(lock),
@@ -749,7 +738,6 @@ void prep_compound_page(struct page *page, unsigned int order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
- set_page_count(p, 0);
p->mapping = TAIL_MAPPING;
set_compound_head(p, page);
}
@@ -852,21 +840,24 @@ void init_mem_debugging_and_hardening(void)
}
#endif
- if (_init_on_alloc_enabled_early) {
- if (page_poisoning_requested)
- pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
- "will take precedence over init_on_alloc\n");
- else
- static_branch_enable(&init_on_alloc);
- }
- if (_init_on_free_enabled_early) {
- if (page_poisoning_requested)
- pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
- "will take precedence over init_on_free\n");
- else
- static_branch_enable(&init_on_free);
+ if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
+ page_poisoning_requested) {
+ pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
+ "will take precedence over init_on_alloc and init_on_free\n");
+ _init_on_alloc_enabled_early = false;
+ _init_on_free_enabled_early = false;
}
+ if (_init_on_alloc_enabled_early)
+ static_branch_enable(&init_on_alloc);
+ else
+ static_branch_disable(&init_on_alloc);
+
+ if (_init_on_free_enabled_early)
+ static_branch_enable(&init_on_free);
+ else
+ static_branch_disable(&init_on_free);
+
#ifdef CONFIG_DEBUG_PAGEALLOC
if (!debug_pagealloc_enabled())
return;
@@ -3193,7 +3184,7 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
int cpu;
/*
- * Allocate in the BSS so we wont require allocation in
+ * Allocate in the BSS so we won't require allocation in
* direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
*/
static cpumask_t cpus_with_pcps;
@@ -3462,19 +3453,10 @@ void free_unref_page_list(struct list_head *list)
* comment in free_unref_page.
*/
migratetype = get_pcppage_migratetype(page);
- if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
- if (unlikely(is_migrate_isolate(migratetype))) {
- list_del(&page->lru);
- free_one_page(page_zone(page), page, pfn, 0,
- migratetype, FPI_NONE);
- continue;
- }
-
- /*
- * Non-isolated types over MIGRATE_PCPTYPES get added
- * to the MIGRATE_MOVABLE pcp list.
- */
- set_pcppage_migratetype(page, MIGRATE_MOVABLE);
+ if (unlikely(is_migrate_isolate(migratetype))) {
+ list_del(&page->lru);
+ free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
+ continue;
}
set_page_private(page, pfn);
@@ -3484,7 +3466,15 @@ void free_unref_page_list(struct list_head *list)
list_for_each_entry_safe(page, next, list, lru) {
pfn = page_private(page);
set_page_private(page, 0);
+
+ /*
+ * Non-isolated types over MIGRATE_PCPTYPES get added
+ * to the MIGRATE_MOVABLE pcp list.
+ */
migratetype = get_pcppage_migratetype(page);
+ if (unlikely(migratetype >= MIGRATE_PCPTYPES))
+ migratetype = MIGRATE_MOVABLE;
+
trace_mm_page_free_batched(page);
free_unref_page_commit(page, pfn, migratetype, 0);
@@ -5233,9 +5223,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
unsigned int alloc_flags = ALLOC_WMARK_LOW;
int nr_populated = 0, nr_account = 0;
- if (unlikely(nr_pages <= 0))
- return 0;
-
/*
* Skip populated array elements to determine if any pages need
* to be allocated before disabling IRQs.
@@ -5243,19 +5230,35 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
while (page_array && nr_populated < nr_pages && page_array[nr_populated])
nr_populated++;
+ /* No pages requested? */
+ if (unlikely(nr_pages <= 0))
+ goto out;
+
/* Already populated array? */
if (unlikely(page_array && nr_pages - nr_populated == 0))
- return nr_populated;
+ goto out;
/* Use the single page allocator for one page. */
if (nr_pages - nr_populated == 1)
goto failed;
+#ifdef CONFIG_PAGE_OWNER
+ /*
+ * PAGE_OWNER may recurse into the allocator to allocate space to
+ * save the stack with pagesets.lock held. Releasing/reacquiring
+ * removes much of the performance benefit of bulk allocation so
+ * force the caller to allocate one page at a time as it'll have
+ * similar performance to added complexity to the bulk allocator.
+ */
+ if (static_branch_unlikely(&page_owner_inited))
+ goto failed;
+#endif
+
/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
gfp &= gfp_allowed_mask;
alloc_gfp = gfp;
if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
- return 0;
+ goto out;
gfp = alloc_gfp;
/* Find an allowed local zone that meets the low watermark. */
@@ -5323,6 +5326,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
+out:
return nr_populated;
failed_irq:
@@ -5338,7 +5342,7 @@ failed:
nr_populated++;
}
- return nr_populated;
+ goto out;
}
EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index a4435311754b..f7b331081791 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -41,7 +41,8 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
/* Handle un-addressable ZONE_DEVICE memory */
entry = pte_to_swp_entry(*pvmw->pte);
- if (!is_device_private_entry(entry))
+ if (!is_device_private_entry(entry) &&
+ !is_device_exclusive_entry(entry))
return false;
} else if (!pte_present(*pvmw->pte))
return false;
@@ -93,19 +94,21 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
return false;
entry = pte_to_swp_entry(*pvmw->pte);
- if (!is_migration_entry(entry))
+ if (!is_migration_entry(entry) &&
+ !is_device_exclusive_entry(entry))
return false;
- pfn = migration_entry_to_pfn(entry);
+ pfn = swp_offset(entry);
} else if (is_swap_pte(*pvmw->pte)) {
swp_entry_t entry;
/* Handle un-addressable ZONE_DEVICE memory */
entry = pte_to_swp_entry(*pvmw->pte);
- if (!is_device_private_entry(entry))
+ if (!is_device_private_entry(entry) &&
+ !is_device_exclusive_entry(entry))
return false;
- pfn = device_private_entry_to_pfn(entry);
+ pfn = swp_offset(entry);
} else {
if (!pte_present(*pvmw->pte))
return false;
@@ -233,7 +236,7 @@ restart:
return not_found(pvmw);
entry = pmd_to_swp_entry(pmde);
if (!is_migration_entry(entry) ||
- migration_entry_to_page(entry) != page)
+ pfn_swap_entry_to_page(entry) != page)
return not_found(pvmw);
return true;
}
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index c9d529dc7651..fe31aa19db81 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -32,6 +32,12 @@
#include <linux/log2.h>
+static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
+ int page_start, int page_end)
+{
+ /* nothing */
+}
+
static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
int page_start, int page_end, gfp_t gfp)
{
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index ee5d89fcd66f..2054c9213c43 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -303,6 +303,9 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
* For each cpu, depopulate and unmap pages [@page_start,@page_end)
* from @chunk.
*
+ * Caller is required to call pcpu_post_unmap_tlb_flush() if not returning the
+ * region back to vmalloc() which will lazily flush the tlb.
+ *
* CONTEXT:
* pcpu_alloc_mutex.
*/
@@ -324,8 +327,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
pcpu_unmap_pages(chunk, pages, page_start, page_end);
- /* no need to flush tlb, vmalloc will handle it lazily */
-
pcpu_free_pages(chunk, pages, page_start, page_end);
}
diff --git a/mm/percpu.c b/mm/percpu.c
index b4cebeca4c0c..7f2e0151c4e2 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1572,6 +1572,7 @@ static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
*
* pcpu_populate_chunk - populate the specified range of a chunk
* pcpu_depopulate_chunk - depopulate the specified range of a chunk
+ * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk
* pcpu_create_chunk - create a new chunk
* pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
* pcpu_addr_to_page - translate address to physical address
@@ -1581,6 +1582,8 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
int page_start, int page_end, gfp_t gfp);
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
int page_start, int page_end);
+static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
+ int page_start, int page_end);
static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
static struct page *pcpu_addr_to_page(void *addr);
@@ -2137,11 +2140,12 @@ static void pcpu_reclaim_populated(void)
{
struct pcpu_chunk *chunk;
struct pcpu_block_md *block;
+ int freed_page_start, freed_page_end;
int i, end;
+ bool reintegrate;
lockdep_assert_held(&pcpu_lock);
-restart:
/*
* Once a chunk is isolated to the to_depopulate list, the chunk is no
* longer discoverable to allocations whom may populate pages. The only
@@ -2157,6 +2161,9 @@ restart:
* Scan chunk's pages in the reverse order to keep populated
* pages close to the beginning of the chunk.
*/
+ freed_page_start = chunk->nr_pages;
+ freed_page_end = 0;
+ reintegrate = false;
for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
/* no more work to do */
if (chunk->nr_empty_pop_pages == 0)
@@ -2164,8 +2171,8 @@ restart:
/* reintegrate chunk to prevent atomic alloc failures */
if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
- pcpu_reintegrate_chunk(chunk);
- goto restart;
+ reintegrate = true;
+ goto end_chunk;
}
/*
@@ -2194,16 +2201,29 @@ restart:
spin_lock_irq(&pcpu_lock);
pcpu_chunk_depopulated(chunk, i + 1, end + 1);
+ freed_page_start = min(freed_page_start, i + 1);
+ freed_page_end = max(freed_page_end, end + 1);
/* reset the range and continue */
end = -1;
}
- if (chunk->free_bytes == pcpu_unit_size)
+end_chunk:
+ /* batch tlb flush per chunk to amortize cost */
+ if (freed_page_start < freed_page_end) {
+ spin_unlock_irq(&pcpu_lock);
+ pcpu_post_unmap_tlb_flush(chunk,
+ freed_page_start,
+ freed_page_end);
+ cond_resched();
+ spin_lock_irq(&pcpu_lock);
+ }
+
+ if (reintegrate || chunk->free_bytes == pcpu_unit_size)
pcpu_reintegrate_chunk(chunk);
else
- list_move(&chunk->list,
- &pcpu_chunk_lists[pcpu_sidelined_slot]);
+ list_move_tail(&chunk->list,
+ &pcpu_chunk_lists[pcpu_sidelined_slot]);
}
}
diff --git a/mm/rmap.c b/mm/rmap.c
index e05c300048e6..b9eb5c12f3fe 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1405,24 +1405,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and page_remove_rmap(),
- * try_to_unmap() may return false when it is about to become true,
+ * try_to_unmap() may return before page_mapped() has become false,
* if page table locking is skipped: use TTU_SYNC to wait for that.
*/
if (flags & TTU_SYNC)
pvmw.flags = PVMW_SYNC;
- /* munlock has nothing to gain from examining un-locked vmas */
- if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
- return true;
-
- if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
- is_zone_device_page(page) && !is_device_private_page(page))
- return true;
-
- if (flags & TTU_SPLIT_HUGE_PMD) {
- split_huge_pmd_address(vma, address,
- flags & TTU_SPLIT_FREEZE, page);
- }
+ if (flags & TTU_SPLIT_HUGE_PMD)
+ split_huge_pmd_address(vma, address, false, page);
/*
* For THP, we have to assume the worse case ie pmd for invalidation.
@@ -1447,37 +1437,23 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
mmu_notifier_invalidate_range_start(&range);
while (page_vma_mapped_walk(&pvmw)) {
-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
- /* PMD-mapped THP migration entry */
- if (!pvmw.pte && (flags & TTU_MIGRATION)) {
- VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
-
- set_pmd_migration_entry(&pvmw, page);
- continue;
- }
-#endif
-
/*
* If the page is mlock()d, we cannot swap it out.
- * If it's recently referenced (perhaps page_referenced
- * skipped over this mm) then we should reactivate it.
*/
- if (!(flags & TTU_IGNORE_MLOCK)) {
- if (vma->vm_flags & VM_LOCKED) {
- /* PTE-mapped THP are never mlocked */
- if (!PageTransCompound(page)) {
- /*
- * Holding pte lock, we do *not* need
- * mmap_lock here
- */
- mlock_vma_page(page);
- }
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- break;
- }
- if (flags & TTU_MUNLOCK)
- continue;
+ if (!(flags & TTU_IGNORE_MLOCK) &&
+ (vma->vm_flags & VM_LOCKED)) {
+ /*
+ * PTE-mapped THP are never marked as mlocked: so do
+ * not set it on a DoubleMap THP, nor on an Anon THP
+ * (which may still be PTE-mapped after DoubleMap was
+ * cleared). But stop unmapping even in those cases.
+ */
+ if (!PageTransCompound(page) || (PageHead(page) &&
+ !PageDoubleMap(page) && !PageAnon(page)))
+ mlock_vma_page(page);
+ page_vma_mapped_walk_done(&pvmw);
+ ret = false;
+ break;
}
/* Unexpected PMD-mapped THP? */
@@ -1520,46 +1496,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
}
- if (IS_ENABLED(CONFIG_MIGRATION) &&
- (flags & TTU_MIGRATION) &&
- is_zone_device_page(page)) {
- swp_entry_t entry;
- pte_t swp_pte;
-
- pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte);
-
- /*
- * Store the pfn of the page in a special migration
- * pte. do_swap_page() will wait until the migration
- * pte is removed and then restart fault handling.
- */
- entry = make_migration_entry(page, 0);
- swp_pte = swp_entry_to_pte(entry);
-
- /*
- * pteval maps a zone device page and is therefore
- * a swap pte.
- */
- if (pte_swp_soft_dirty(pteval))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_swp_uffd_wp(pteval))
- swp_pte = pte_swp_mkuffd_wp(swp_pte);
- set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
- /*
- * No need to invalidate here it will synchronize on
- * against the special swap migration pte.
- *
- * The assignment to subpage above was computed from a
- * swap PTE which results in an invalid pointer.
- * Since only PAGE_SIZE pages can currently be
- * migrated, just set it to page. This will need to be
- * changed when hugepage migrations to device private
- * memory are supported.
- */
- subpage = page;
- goto discard;
- }
-
/* Nuke the page table entry. */
flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
if (should_defer_flush(mm, flags)) {
@@ -1612,35 +1548,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
- } else if (IS_ENABLED(CONFIG_MIGRATION) &&
- (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
- swp_entry_t entry;
- pte_t swp_pte;
-
- if (arch_unmap_one(mm, vma, address, pteval) < 0) {
- set_pte_at(mm, address, pvmw.pte, pteval);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- break;
- }
-
- /*
- * Store the pfn of the page in a special migration
- * pte. do_swap_page() will wait until the migration
- * pte is removed and then restart fault handling.
- */
- entry = make_migration_entry(subpage,
- pte_write(pteval));
- swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pteval))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_uffd_wp(pteval))
- swp_pte = pte_swp_mkuffd_wp(swp_pte);
- set_pte_at(mm, address, pvmw.pte, swp_pte);
- /*
- * No need to invalidate here it will synchronize on
- * against the special swap migration pte.
- */
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(subpage) };
pte_t swp_pte;
@@ -1756,9 +1663,10 @@ static int page_not_mapped(struct page *page)
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock.
*
- * If unmap is successful, return true. Otherwise, false.
+ * It is the caller's responsibility to check if the page is still
+ * mapped when needed (use TTU_SYNC to prevent accounting races).
*/
-bool try_to_unmap(struct page *page, enum ttu_flags flags)
+void try_to_unmap(struct page *page, enum ttu_flags flags)
{
struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one,
@@ -1767,6 +1675,274 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
.anon_lock = page_lock_anon_vma_read,
};
+ if (flags & TTU_RMAP_LOCKED)
+ rmap_walk_locked(page, &rwc);
+ else
+ rmap_walk(page, &rwc);
+}
+
+/*
+ * @arg: enum ttu_flags will be passed to this argument.
+ *
+ * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
+ * containing migration entries.
+ */
+static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
+ unsigned long address, void *arg)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct page_vma_mapped_walk pvmw = {
+ .page = page,
+ .vma = vma,
+ .address = address,
+ };
+ pte_t pteval;
+ struct page *subpage;
+ bool ret = true;
+ struct mmu_notifier_range range;
+ enum ttu_flags flags = (enum ttu_flags)(long)arg;
+
+ /*
+ * When racing against e.g. zap_pte_range() on another cpu,
+ * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+ * try_to_migrate() may return before page_mapped() has become false,
+ * if page table locking is skipped: use TTU_SYNC to wait for that.
+ */
+ if (flags & TTU_SYNC)
+ pvmw.flags = PVMW_SYNC;
+
+ /*
+ * unmap_page() in mm/huge_memory.c is the only user of migration with
+ * TTU_SPLIT_HUGE_PMD and it wants to freeze.
+ */
+ if (flags & TTU_SPLIT_HUGE_PMD)
+ split_huge_pmd_address(vma, address, true, page);
+
+ /*
+ * For THP, we have to assume the worse case ie pmd for invalidation.
+ * For hugetlb, it could be much worse if we need to do pud
+ * invalidation in the case of pmd sharing.
+ *
+ * Note that the page can not be free in this function as call of
+ * try_to_unmap() must hold a reference on the page.
+ */
+ range.end = PageKsm(page) ?
+ address + PAGE_SIZE : vma_address_end(page, vma);
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+ address, range.end);
+ if (PageHuge(page)) {
+ /*
+ * If sharing is possible, start and end will be adjusted
+ * accordingly.
+ */
+ adjust_range_if_pmd_sharing_possible(vma, &range.start,
+ &range.end);
+ }
+ mmu_notifier_invalidate_range_start(&range);
+
+ while (page_vma_mapped_walk(&pvmw)) {
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+ /* PMD-mapped THP migration entry */
+ if (!pvmw.pte) {
+ VM_BUG_ON_PAGE(PageHuge(page) ||
+ !PageTransCompound(page), page);
+
+ set_pmd_migration_entry(&pvmw, page);
+ continue;
+ }
+#endif
+
+ /* Unexpected PMD-mapped THP? */
+ VM_BUG_ON_PAGE(!pvmw.pte, page);
+
+ subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
+ address = pvmw.address;
+
+ if (PageHuge(page) && !PageAnon(page)) {
+ /*
+ * To call huge_pmd_unshare, i_mmap_rwsem must be
+ * held in write mode. Caller needs to explicitly
+ * do this outside rmap routines.
+ */
+ VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
+ if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
+ /*
+ * huge_pmd_unshare unmapped an entire PMD
+ * page. There is no way of knowing exactly
+ * which PMDs may be cached for this mm, so
+ * we must flush them all. start/end were
+ * already adjusted above to cover this range.
+ */
+ flush_cache_range(vma, range.start, range.end);
+ flush_tlb_range(vma, range.start, range.end);
+ mmu_notifier_invalidate_range(mm, range.start,
+ range.end);
+
+ /*
+ * The ref count of the PMD page was dropped
+ * which is part of the way map counting
+ * is done for shared PMDs. Return 'true'
+ * here. When there is no other sharing,
+ * huge_pmd_unshare returns false and we will
+ * unmap the actual page and drop map count
+ * to zero.
+ */
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
+ }
+
+ /* Nuke the page table entry. */
+ flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
+ pteval = ptep_clear_flush(vma, address, pvmw.pte);
+
+ /* Move the dirty bit to the page. Now the pte is gone. */
+ if (pte_dirty(pteval))
+ set_page_dirty(page);
+
+ /* Update high watermark before we lower rss */
+ update_hiwater_rss(mm);
+
+ if (is_zone_device_page(page)) {
+ swp_entry_t entry;
+ pte_t swp_pte;
+
+ /*
+ * Store the pfn of the page in a special migration
+ * pte. do_swap_page() will wait until the migration
+ * pte is removed and then restart fault handling.
+ */
+ entry = make_readable_migration_entry(
+ page_to_pfn(page));
+ swp_pte = swp_entry_to_pte(entry);
+
+ /*
+ * pteval maps a zone device page and is therefore
+ * a swap pte.
+ */
+ if (pte_swp_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_swp_uffd_wp(pteval))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
+ /*
+ * No need to invalidate here it will synchronize on
+ * against the special swap migration pte.
+ *
+ * The assignment to subpage above was computed from a
+ * swap PTE which results in an invalid pointer.
+ * Since only PAGE_SIZE pages can currently be
+ * migrated, just set it to page. This will need to be
+ * changed when hugepage migrations to device private
+ * memory are supported.
+ */
+ subpage = page;
+ } else if (PageHWPoison(page)) {
+ pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
+ if (PageHuge(page)) {
+ hugetlb_count_sub(compound_nr(page), mm);
+ set_huge_swap_pte_at(mm, address,
+ pvmw.pte, pteval,
+ vma_mmu_pagesize(vma));
+ } else {
+ dec_mm_counter(mm, mm_counter(page));
+ set_pte_at(mm, address, pvmw.pte, pteval);
+ }
+
+ } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
+ /*
+ * The guest indicated that the page content is of no
+ * interest anymore. Simply discard the pte, vmscan
+ * will take care of the rest.
+ * A future reference will then fault in a new zero
+ * page. When userfaultfd is active, we must not drop
+ * this page though, as its main user (postcopy
+ * migration) will not expect userfaults on already
+ * copied pages.
+ */
+ dec_mm_counter(mm, mm_counter(page));
+ /* We have to invalidate as we cleared the pte */
+ mmu_notifier_invalidate_range(mm, address,
+ address + PAGE_SIZE);
+ } else {
+ swp_entry_t entry;
+ pte_t swp_pte;
+
+ if (arch_unmap_one(mm, vma, address, pteval) < 0) {
+ set_pte_at(mm, address, pvmw.pte, pteval);
+ ret = false;
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
+
+ /*
+ * Store the pfn of the page in a special migration
+ * pte. do_swap_page() will wait until the migration
+ * pte is removed and then restart fault handling.
+ */
+ if (pte_write(pteval))
+ entry = make_writable_migration_entry(
+ page_to_pfn(subpage));
+ else
+ entry = make_readable_migration_entry(
+ page_to_pfn(subpage));
+
+ swp_pte = swp_entry_to_pte(entry);
+ if (pte_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_uffd_wp(pteval))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ set_pte_at(mm, address, pvmw.pte, swp_pte);
+ /*
+ * No need to invalidate here it will synchronize on
+ * against the special swap migration pte.
+ */
+ }
+
+ /*
+ * No need to call mmu_notifier_invalidate_range() it has be
+ * done above for all cases requiring it to happen under page
+ * table lock before mmu_notifier_invalidate_range_end()
+ *
+ * See Documentation/vm/mmu_notifier.rst
+ */
+ page_remove_rmap(subpage, PageHuge(page));
+ put_page(page);
+ }
+
+ mmu_notifier_invalidate_range_end(&range);
+
+ return ret;
+}
+
+/**
+ * try_to_migrate - try to replace all page table mappings with swap entries
+ * @page: the page to replace page table entries for
+ * @flags: action and flags
+ *
+ * Tries to remove all the page table entries which are mapping this page and
+ * replace them with special swap entries. Caller must hold the page lock.
+ */
+void try_to_migrate(struct page *page, enum ttu_flags flags)
+{
+ struct rmap_walk_control rwc = {
+ .rmap_one = try_to_migrate_one,
+ .arg = (void *)flags,
+ .done = page_not_mapped,
+ .anon_lock = page_lock_anon_vma_read,
+ };
+
+ /*
+ * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
+ * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags.
+ */
+ if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
+ TTU_SYNC)))
+ return;
+
+ if (is_zone_device_page(page) && !is_device_private_page(page))
+ return;
+
/*
* During exec, a temporary VMA is setup and later moved.
* The VMA is moved under the anon_vma lock but not the
@@ -1775,38 +1951,70 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
* locking requirements of exec(), migration skips
* temporary VMAs until after exec() completes.
*/
- if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
- && !PageKsm(page) && PageAnon(page))
+ if (!PageKsm(page) && PageAnon(page))
rwc.invalid_vma = invalid_migration_vma;
if (flags & TTU_RMAP_LOCKED)
rmap_walk_locked(page, &rwc);
else
rmap_walk(page, &rwc);
+}
- /*
- * When racing against e.g. zap_pte_range() on another cpu,
- * in between its ptep_get_and_clear_full() and page_remove_rmap(),
- * try_to_unmap() may return false when it is about to become true,
- * if page table locking is skipped: use TTU_SYNC to wait for that.
- */
- return !page_mapcount(page);
+/*
+ * Walks the vma's mapping a page and mlocks the page if any locked vma's are
+ * found. Once one is found the page is locked and the scan can be terminated.
+ */
+static bool page_mlock_one(struct page *page, struct vm_area_struct *vma,
+ unsigned long address, void *unused)
+{
+ struct page_vma_mapped_walk pvmw = {
+ .page = page,
+ .vma = vma,
+ .address = address,
+ };
+
+ /* An un-locked vma doesn't have any pages to lock, continue the scan */
+ if (!(vma->vm_flags & VM_LOCKED))
+ return true;
+
+ while (page_vma_mapped_walk(&pvmw)) {
+ /*
+ * Need to recheck under the ptl to serialise with
+ * __munlock_pagevec_fill() after VM_LOCKED is cleared in
+ * munlock_vma_pages_range().
+ */
+ if (vma->vm_flags & VM_LOCKED) {
+ /*
+ * PTE-mapped THP are never marked as mlocked; but
+ * this function is never called on a DoubleMap THP,
+ * nor on an Anon THP (which may still be PTE-mapped
+ * after DoubleMap was cleared).
+ */
+ mlock_vma_page(page);
+ /*
+ * No need to scan further once the page is marked
+ * as mlocked.
+ */
+ page_vma_mapped_walk_done(&pvmw);
+ return false;
+ }
+ }
+
+ return true;
}
/**
- * try_to_munlock - try to munlock a page
- * @page: the page to be munlocked
+ * page_mlock - try to mlock a page
+ * @page: the page to be mlocked
*
- * Called from munlock code. Checks all of the VMAs mapping the page
- * to make sure nobody else has this page mlocked. The page will be
- * returned with PG_mlocked cleared if no other vmas have it mlocked.
+ * Called from munlock code. Checks all of the VMAs mapping the page and mlocks
+ * the page if any are found. The page will be returned with PG_mlocked cleared
+ * if it is not mapped by any locked vmas.
*/
-
-void try_to_munlock(struct page *page)
+void page_mlock(struct page *page)
{
struct rmap_walk_control rwc = {
- .rmap_one = try_to_unmap_one,
- .arg = (void *)TTU_MUNLOCK,
+ .rmap_one = page_mlock_one,
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
@@ -1815,9 +2023,199 @@ void try_to_munlock(struct page *page)
VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
+ /* Anon THP are only marked as mlocked when singly mapped */
+ if (PageTransCompound(page) && PageAnon(page))
+ return;
+
rmap_walk(page, &rwc);
}
+#ifdef CONFIG_DEVICE_PRIVATE
+struct make_exclusive_args {
+ struct mm_struct *mm;
+ unsigned long address;
+ void *owner;
+ bool valid;
+};
+
+static bool page_make_device_exclusive_one(struct page *page,
+ struct vm_area_struct *vma, unsigned long address, void *priv)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct page_vma_mapped_walk pvmw = {
+ .page = page,
+ .vma = vma,
+ .address = address,
+ };
+ struct make_exclusive_args *args = priv;
+ pte_t pteval;
+ struct page *subpage;
+ bool ret = true;
+ struct mmu_notifier_range range;
+ swp_entry_t entry;
+ pte_t swp_pte;
+
+ mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
+ vma->vm_mm, address, min(vma->vm_end,
+ address + page_size(page)), args->owner);
+ mmu_notifier_invalidate_range_start(&range);
+
+ while (page_vma_mapped_walk(&pvmw)) {
+ /* Unexpected PMD-mapped THP? */
+ VM_BUG_ON_PAGE(!pvmw.pte, page);
+
+ if (!pte_present(*pvmw.pte)) {
+ ret = false;
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
+
+ subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
+ address = pvmw.address;
+
+ /* Nuke the page table entry. */
+ flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
+ pteval = ptep_clear_flush(vma, address, pvmw.pte);
+
+ /* Move the dirty bit to the page. Now the pte is gone. */
+ if (pte_dirty(pteval))
+ set_page_dirty(page);
+
+ /*
+ * Check that our target page is still mapped at the expected
+ * address.
+ */
+ if (args->mm == mm && args->address == address &&
+ pte_write(pteval))
+ args->valid = true;
+
+ /*
+ * Store the pfn of the page in a special migration
+ * pte. do_swap_page() will wait until the migration
+ * pte is removed and then restart fault handling.
+ */
+ if (pte_write(pteval))
+ entry = make_writable_device_exclusive_entry(
+ page_to_pfn(subpage));
+ else
+ entry = make_readable_device_exclusive_entry(
+ page_to_pfn(subpage));
+ swp_pte = swp_entry_to_pte(entry);
+ if (pte_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_uffd_wp(pteval))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+
+ set_pte_at(mm, address, pvmw.pte, swp_pte);
+
+ /*
+ * There is a reference on the page for the swap entry which has
+ * been removed, so shouldn't take another.
+ */
+ page_remove_rmap(subpage, false);
+ }
+
+ mmu_notifier_invalidate_range_end(&range);
+
+ return ret;
+}
+
+/**
+ * page_make_device_exclusive - mark the page exclusively owned by a device
+ * @page: the page to replace page table entries for
+ * @mm: the mm_struct where the page is expected to be mapped
+ * @address: address where the page is expected to be mapped
+ * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks
+ *
+ * Tries to remove all the page table entries which are mapping this page and
+ * replace them with special device exclusive swap entries to grant a device
+ * exclusive access to the page. Caller must hold the page lock.
+ *
+ * Returns false if the page is still mapped, or if it could not be unmapped
+ * from the expected address. Otherwise returns true (success).
+ */
+static bool page_make_device_exclusive(struct page *page, struct mm_struct *mm,
+ unsigned long address, void *owner)
+{
+ struct make_exclusive_args args = {
+ .mm = mm,
+ .address = address,
+ .owner = owner,
+ .valid = false,
+ };
+ struct rmap_walk_control rwc = {
+ .rmap_one = page_make_device_exclusive_one,
+ .done = page_not_mapped,
+ .anon_lock = page_lock_anon_vma_read,
+ .arg = &args,
+ };
+
+ /*
+ * Restrict to anonymous pages for now to avoid potential writeback
+ * issues. Also tail pages shouldn't be passed to rmap_walk so skip
+ * those.
+ */
+ if (!PageAnon(page) || PageTail(page))
+ return false;
+
+ rmap_walk(page, &rwc);
+
+ return args.valid && !page_mapcount(page);
+}
+
+/**
+ * make_device_exclusive_range() - Mark a range for exclusive use by a device
+ * @mm: mm_struct of assoicated target process
+ * @start: start of the region to mark for exclusive device access
+ * @end: end address of region
+ * @pages: returns the pages which were successfully marked for exclusive access
+ * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
+ *
+ * Returns: number of pages found in the range by GUP. A page is marked for
+ * exclusive access only if the page pointer is non-NULL.
+ *
+ * This function finds ptes mapping page(s) to the given address range, locks
+ * them and replaces mappings with special swap entries preventing userspace CPU
+ * access. On fault these entries are replaced with the original mapping after
+ * calling MMU notifiers.
+ *
+ * A driver using this to program access from a device must use a mmu notifier
+ * critical section to hold a device specific lock during programming. Once
+ * programming is complete it should drop the page lock and reference after
+ * which point CPU access to the page will revoke the exclusive access.
+ */
+int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct page **pages,
+ void *owner)
+{
+ long npages = (end - start) >> PAGE_SHIFT;
+ long i;
+
+ npages = get_user_pages_remote(mm, start, npages,
+ FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
+ pages, NULL, NULL);
+ if (npages < 0)
+ return npages;
+
+ for (i = 0; i < npages; i++, start += PAGE_SIZE) {
+ if (!trylock_page(pages[i])) {
+ put_page(pages[i]);
+ pages[i] = NULL;
+ continue;
+ }
+
+ if (!page_make_device_exclusive(pages[i], mm, start, owner)) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ pages[i] = NULL;
+ }
+ }
+
+ return npages;
+}
+EXPORT_SYMBOL_GPL(make_device_exclusive_range);
+#endif
+
void __put_anon_vma(struct anon_vma *anon_vma)
{
struct anon_vma *root = anon_vma->root;
@@ -1858,7 +2256,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the anon_vma struct it points to.
*
- * When called from try_to_munlock(), the mmap_lock of the mm containing the vma
+ * When called from page_mlock(), the mmap_lock of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
@@ -1911,7 +2309,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
*
- * When called from try_to_munlock(), the mmap_lock of the mm containing the vma
+ * When called from page_mlock(), the mmap_lock of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
diff --git a/mm/secretmem.c b/mm/secretmem.c
new file mode 100644
index 000000000000..030f02ddc7c1
--- /dev/null
+++ b/mm/secretmem.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corporation, 2021
+ *
+ * Author: Mike Rapoport <rppt@linux.ibm.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/swap.h>
+#include <linux/mount.h>
+#include <linux/memfd.h>
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/pagemap.h>
+#include <linux/syscalls.h>
+#include <linux/pseudo_fs.h>
+#include <linux/secretmem.h>
+#include <linux/set_memory.h>
+#include <linux/sched/signal.h>
+
+#include <uapi/linux/magic.h>
+
+#include <asm/tlbflush.h>
+
+#include "internal.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "secretmem: " fmt
+
+/*
+ * Define mode and flag masks to allow validation of the system call
+ * parameters.
+ */
+#define SECRETMEM_MODE_MASK (0x0)
+#define SECRETMEM_FLAGS_MASK SECRETMEM_MODE_MASK
+
+static bool secretmem_enable __ro_after_init;
+module_param_named(enable, secretmem_enable, bool, 0400);
+MODULE_PARM_DESC(secretmem_enable,
+ "Enable secretmem and memfd_secret(2) system call");
+
+static atomic_t secretmem_users;
+
+bool secretmem_active(void)
+{
+ return !!atomic_read(&secretmem_users);
+}
+
+static vm_fault_t secretmem_fault(struct vm_fault *vmf)
+{
+ struct address_space *mapping = vmf->vma->vm_file->f_mapping;
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ pgoff_t offset = vmf->pgoff;
+ gfp_t gfp = vmf->gfp_mask;
+ unsigned long addr;
+ struct page *page;
+ int err;
+
+ if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
+ return vmf_error(-EINVAL);
+
+retry:
+ page = find_lock_page(mapping, offset);
+ if (!page) {
+ page = alloc_page(gfp | __GFP_ZERO);
+ if (!page)
+ return VM_FAULT_OOM;
+
+ err = set_direct_map_invalid_noflush(page);
+ if (err) {
+ put_page(page);
+ return vmf_error(err);
+ }
+
+ __SetPageUptodate(page);
+ err = add_to_page_cache_lru(page, mapping, offset, gfp);
+ if (unlikely(err)) {
+ put_page(page);
+ /*
+ * If a split of large page was required, it
+ * already happened when we marked the page invalid
+ * which guarantees that this call won't fail
+ */
+ set_direct_map_default_noflush(page);
+ if (err == -EEXIST)
+ goto retry;
+
+ return vmf_error(err);
+ }
+
+ addr = (unsigned long)page_address(page);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ }
+
+ vmf->page = page;
+ return VM_FAULT_LOCKED;
+}
+
+static const struct vm_operations_struct secretmem_vm_ops = {
+ .fault = secretmem_fault,
+};
+
+static int secretmem_release(struct inode *inode, struct file *file)
+{
+ atomic_dec(&secretmem_users);
+ return 0;
+}
+
+static int secretmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long len = vma->vm_end - vma->vm_start;
+
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+ return -EINVAL;
+
+ if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
+ return -EAGAIN;
+
+ vma->vm_flags |= VM_LOCKED | VM_DONTDUMP;
+ vma->vm_ops = &secretmem_vm_ops;
+
+ return 0;
+}
+
+bool vma_is_secretmem(struct vm_area_struct *vma)
+{
+ return vma->vm_ops == &secretmem_vm_ops;
+}
+
+static const struct file_operations secretmem_fops = {
+ .release = secretmem_release,
+ .mmap = secretmem_mmap,
+};
+
+static bool secretmem_isolate_page(struct page *page, isolate_mode_t mode)
+{
+ return false;
+}
+
+static int secretmem_migratepage(struct address_space *mapping,
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode)
+{
+ return -EBUSY;
+}
+
+static void secretmem_freepage(struct page *page)
+{
+ set_direct_map_default_noflush(page);
+ clear_highpage(page);
+}
+
+const struct address_space_operations secretmem_aops = {
+ .set_page_dirty = __set_page_dirty_no_writeback,
+ .freepage = secretmem_freepage,
+ .migratepage = secretmem_migratepage,
+ .isolate_page = secretmem_isolate_page,
+};
+
+static struct vfsmount *secretmem_mnt;
+
+static struct file *secretmem_file_create(unsigned long flags)
+{
+ struct file *file = ERR_PTR(-ENOMEM);
+ struct inode *inode;
+
+ inode = alloc_anon_inode(secretmem_mnt->mnt_sb);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem",
+ O_RDWR, &secretmem_fops);
+ if (IS_ERR(file))
+ goto err_free_inode;
+
+ mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
+ mapping_set_unevictable(inode->i_mapping);
+
+ inode->i_mapping->a_ops = &secretmem_aops;
+
+ /* pretend we are a normal file with zero size */
+ inode->i_mode |= S_IFREG;
+ inode->i_size = 0;
+
+ return file;
+
+err_free_inode:
+ iput(inode);
+ return file;
+}
+
+SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
+{
+ struct file *file;
+ int fd, err;
+
+ /* make sure local flags do not confict with global fcntl.h */
+ BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
+
+ if (!secretmem_enable)
+ return -ENOSYS;
+
+ if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC))
+ return -EINVAL;
+
+ fd = get_unused_fd_flags(flags & O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ file = secretmem_file_create(flags);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ goto err_put_fd;
+ }
+
+ file->f_flags |= O_LARGEFILE;
+
+ fd_install(fd, file);
+ atomic_inc(&secretmem_users);
+ return fd;
+
+err_put_fd:
+ put_unused_fd(fd);
+ return err;
+}
+
+static int secretmem_init_fs_context(struct fs_context *fc)
+{
+ return init_pseudo(fc, SECRETMEM_MAGIC) ? 0 : -ENOMEM;
+}
+
+static struct file_system_type secretmem_fs = {
+ .name = "secretmem",
+ .init_fs_context = secretmem_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+static int secretmem_init(void)
+{
+ int ret = 0;
+
+ if (!secretmem_enable)
+ return ret;
+
+ secretmem_mnt = kern_mount(&secretmem_fs);
+ if (IS_ERR(secretmem_mnt))
+ ret = PTR_ERR(secretmem_mnt);
+
+ /* prevent secretmem mappings from ever getting PROT_EXEC */
+ secretmem_mnt->mnt_flags |= MNT_NOEXEC;
+
+ return ret;
+}
+fs_initcall(secretmem_init);
diff --git a/mm/shmem.c b/mm/shmem.c
index 6268b9b4e41a..dacda7463d54 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1696,8 +1696,7 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
- struct swap_info_struct *si;
- struct page *page = NULL;
+ struct page *page;
swp_entry_t swap;
int error;
@@ -1705,12 +1704,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
swap = radix_to_swp_entry(*pagep);
*pagep = NULL;
- /* Prevent swapoff from happening to us. */
- si = get_swap_device(swap);
- if (!si) {
- error = EINVAL;
- goto failed;
- }
/* Look it up and read it in.. */
page = lookup_swap_cache(swap, NULL, 0);
if (!page) {
@@ -1772,8 +1765,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
swap_free(swap);
*pagep = page;
- if (si)
- put_swap_device(si);
return 0;
failed:
if (!shmem_confirm_swap(mapping, index, swap))
@@ -1784,9 +1775,6 @@ unlock:
put_page(page);
}
- if (si)
- put_swap_device(si);
-
return error;
}
@@ -1797,7 +1785,7 @@ unlock:
* vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache.
*
- * vmf and fault_type are only supplied by shmem_fault:
+ * vma, vmf, and fault_type are only supplied by shmem_fault:
* otherwise they are NULL.
*/
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
@@ -1832,6 +1820,16 @@ repeat:
page = pagecache_get_page(mapping, index,
FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0);
+
+ if (page && vma && userfaultfd_minor(vma)) {
+ if (!xa_is_value(page)) {
+ unlock_page(page);
+ put_page(page);
+ }
+ *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
+ return 0;
+ }
+
if (xa_is_value(page)) {
error = shmem_swapin_page(inode, index, &page,
sgp, gfp, vma, fault_type);
@@ -2352,27 +2350,25 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
return inode;
}
-static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
- pmd_t *dst_pmd,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr,
- unsigned long src_addr,
- bool zeropage,
- struct page **pagep)
+#ifdef CONFIG_USERFAULTFD
+int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
+ pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ bool zeropage,
+ struct page **pagep)
{
struct inode *inode = file_inode(dst_vma->vm_file);
struct shmem_inode_info *info = SHMEM_I(inode);
struct address_space *mapping = inode->i_mapping;
gfp_t gfp = mapping_gfp_mask(mapping);
pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
- spinlock_t *ptl;
void *page_kaddr;
struct page *page;
- pte_t _dst_pte, *dst_pte;
int ret;
- pgoff_t offset, max_off;
+ pgoff_t max_off;
- ret = -ENOMEM;
if (!shmem_inode_acct_block(inode, 1)) {
/*
* We may have got a page, returned -ENOENT triggering a retry,
@@ -2383,15 +2379,16 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
put_page(*pagep);
*pagep = NULL;
}
- goto out;
+ return -ENOMEM;
}
if (!*pagep) {
+ ret = -ENOMEM;
page = shmem_alloc_page(gfp, info, pgoff);
if (!page)
goto out_unacct_blocks;
- if (!zeropage) { /* mcopy_atomic */
+ if (!zeropage) { /* COPY */
page_kaddr = kmap_atomic(page);
ret = copy_from_user(page_kaddr,
(const void __user *)src_addr,
@@ -2401,11 +2398,11 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
/* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) {
*pagep = page;
- shmem_inode_unacct_blocks(inode, 1);
+ ret = -ENOENT;
/* don't free the page */
- return -ENOENT;
+ goto out_unacct_blocks;
}
- } else { /* mfill_zeropage_atomic */
+ } else { /* ZEROPAGE */
clear_highpage(page);
}
} else {
@@ -2413,15 +2410,15 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
*pagep = NULL;
}
- VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
+ VM_BUG_ON(PageLocked(page));
+ VM_BUG_ON(PageSwapBacked(page));
__SetPageLocked(page);
__SetPageSwapBacked(page);
__SetPageUptodate(page);
ret = -EFAULT;
- offset = linear_page_index(dst_vma, dst_addr);
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
- if (unlikely(offset >= max_off))
+ if (unlikely(pgoff >= max_off))
goto out_release;
ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
@@ -2429,32 +2426,10 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
if (ret)
goto out_release;
- _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
- if (dst_vma->vm_flags & VM_WRITE)
- _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
- else {
- /*
- * We don't set the pte dirty if the vma has no
- * VM_WRITE permission, so mark the page dirty or it
- * could be freed from under us. We could do it
- * unconditionally before unlock_page(), but doing it
- * only if VM_WRITE is not set is faster.
- */
- set_page_dirty(page);
- }
-
- dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
-
- ret = -EFAULT;
- max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
- if (unlikely(offset >= max_off))
- goto out_release_unlock;
-
- ret = -EEXIST;
- if (!pte_none(*dst_pte))
- goto out_release_unlock;
-
- lru_cache_add(page);
+ ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
+ page, true, false);
+ if (ret)
+ goto out_delete_from_cache;
spin_lock_irq(&info->lock);
info->alloced++;
@@ -2462,50 +2437,19 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock);
- inc_mm_counter(dst_mm, mm_counter_file(page));
- page_add_file_rmap(page, false);
- set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
-
- /* No need to invalidate - it was non-present before */
- update_mmu_cache(dst_vma, dst_addr, dst_pte);
- pte_unmap_unlock(dst_pte, ptl);
+ SetPageDirty(page);
unlock_page(page);
- ret = 0;
-out:
- return ret;
-out_release_unlock:
- pte_unmap_unlock(dst_pte, ptl);
- ClearPageDirty(page);
+ return 0;
+out_delete_from_cache:
delete_from_page_cache(page);
out_release:
unlock_page(page);
put_page(page);
out_unacct_blocks:
shmem_inode_unacct_blocks(inode, 1);
- goto out;
-}
-
-int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
- pmd_t *dst_pmd,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr,
- unsigned long src_addr,
- struct page **pagep)
-{
- return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
- dst_addr, src_addr, false, pagep);
-}
-
-int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
- pmd_t *dst_pmd,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr)
-{
- struct page *page = NULL;
-
- return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
- dst_addr, 0, true, &page);
+ return ret;
}
+#endif /* CONFIG_USERFAULTFD */
#ifdef CONFIG_TMPFS
static const struct inode_operations shmem_symlink_inode_operations;
@@ -4040,8 +3984,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
loff_t i_size;
pgoff_t off;
- if ((vma->vm_flags & VM_NOHUGEPAGE) ||
- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+ if (!transhuge_vma_enabled(vma, vma->vm_flags))
return false;
if (shmem_huge == SHMEM_HUGE_FORCE)
return true;
diff --git a/mm/slab.h b/mm/slab.h
index 7b60ef2f32c3..58c01a34e5b8 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -216,10 +216,18 @@ DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
extern void print_tracking(struct kmem_cache *s, void *object);
long validate_slab_cache(struct kmem_cache *s);
+static inline bool __slub_debug_enabled(void)
+{
+ return static_branch_unlikely(&slub_debug_enabled);
+}
#else
static inline void print_tracking(struct kmem_cache *s, void *object)
{
}
+static inline bool __slub_debug_enabled(void)
+{
+ return false;
+}
#endif
/*
@@ -229,11 +237,10 @@ static inline void print_tracking(struct kmem_cache *s, void *object)
*/
static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
{
-#ifdef CONFIG_SLUB_DEBUG
- VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
- if (static_branch_unlikely(&slub_debug_enabled))
+ if (IS_ENABLED(CONFIG_SLUB_DEBUG))
+ VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
+ if (__slub_debug_enabled())
return s->flags & flags;
-#endif
return false;
}
@@ -339,7 +346,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
continue;
page = virt_to_head_page(p[i]);
- objcgs = page_objcgs(page);
+ objcgs = page_objcgs_check(page);
if (!objcgs)
continue;
@@ -634,6 +641,7 @@ struct kmem_obj_info {
struct kmem_cache *kp_slab_cache;
void *kp_ret;
void *kp_stack[KS_ADDRS_COUNT];
+ void *kp_free_stack[KS_ADDRS_COUNT];
};
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index c126e6f6b5a5..1c673c323baf 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -575,7 +575,7 @@ EXPORT_SYMBOL_GPL(kmem_valid_obj);
* depends on the type of object and on how much debugging is enabled.
* For a slab-cache object, the fact that it is a slab object is printed,
* and, if available, the slab name, return address, and stack trace from
- * the allocation of that object.
+ * the allocation and last free path of that object.
*
* This function will splat if passed a pointer to a non-slab object.
* If you are not sure what type of object you have, you should instead
@@ -620,6 +620,16 @@ void kmem_dump_obj(void *object)
break;
pr_info(" %pS\n", kp.kp_stack[i]);
}
+
+ if (kp.kp_free_stack[0])
+ pr_cont(" Free path:\n");
+
+ for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) {
+ if (!kp.kp_free_stack[i])
+ break;
+ pr_info(" %pS\n", kp.kp_free_stack[i]);
+ }
+
}
EXPORT_SYMBOL_GPL(kmem_dump_obj);
#endif
diff --git a/mm/slub.c b/mm/slub.c
index 3bc8b940c933..f77d8cd79ef7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -119,25 +119,11 @@
*/
#ifdef CONFIG_SLUB_DEBUG
-
#ifdef CONFIG_SLUB_DEBUG_ON
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
#else
DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
-
-static inline bool __slub_debug_enabled(void)
-{
- return static_branch_unlikely(&slub_debug_enabled);
-}
-
-#else /* CONFIG_SLUB_DEBUG */
-
-static inline bool __slub_debug_enabled(void)
-{
- return false;
-}
-
#endif /* CONFIG_SLUB_DEBUG */
static inline bool kmem_cache_debug(struct kmem_cache *s)
@@ -590,8 +576,8 @@ static void print_section(char *level, char *text, u8 *addr,
unsigned int length)
{
metadata_access_enable();
- print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
- 16, 1, addr, length, 1);
+ print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
+ 16, 1, kasan_reset_tag((void *)addr), length, 1);
metadata_access_disable();
}
@@ -1414,12 +1400,13 @@ check_slabs:
static int __init setup_slub_debug(char *str)
{
slab_flags_t flags;
+ slab_flags_t global_flags;
char *saved_str;
char *slab_list;
bool global_slub_debug_changed = false;
bool slab_list_specified = false;
- slub_debug = DEBUG_DEFAULT_FLAGS;
+ global_flags = DEBUG_DEFAULT_FLAGS;
if (*str++ != '=' || !*str)
/*
* No options specified. Switch on full debugging.
@@ -1431,7 +1418,7 @@ static int __init setup_slub_debug(char *str)
str = parse_slub_debug_flags(str, &flags, &slab_list, true);
if (!slab_list) {
- slub_debug = flags;
+ global_flags = flags;
global_slub_debug_changed = true;
} else {
slab_list_specified = true;
@@ -1440,16 +1427,18 @@ static int __init setup_slub_debug(char *str)
/*
* For backwards compatibility, a single list of flags with list of
- * slabs means debugging is only enabled for those slabs, so the global
- * slub_debug should be 0. We can extended that to multiple lists as
+ * slabs means debugging is only changed for those slabs, so the global
+ * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
+ * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
* long as there is no option specifying flags without a slab list.
*/
if (slab_list_specified) {
if (!global_slub_debug_changed)
- slub_debug = 0;
+ global_flags = slub_debug;
slub_debug_string = saved_str;
}
out:
+ slub_debug = global_flags;
if (slub_debug != 0 || slub_debug_string)
static_branch_enable(&slub_debug_enabled);
else
@@ -3250,6 +3239,16 @@ struct detached_freelist {
struct kmem_cache *s;
};
+static inline void free_nonslab_page(struct page *page, void *object)
+{
+ unsigned int order = compound_order(page);
+
+ VM_BUG_ON_PAGE(!PageCompound(page), page);
+ kfree_hook(object);
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
+ __free_pages(page, order);
+}
+
/*
* This function progressively scans the array with free objects (with
* a limited look ahead) and extract objects belonging to the same
@@ -3286,9 +3285,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!s) {
/* Handle kalloc'ed objects */
if (unlikely(!PageSlab(page))) {
- BUG_ON(!PageCompound(page));
- kfree_hook(object);
- __free_pages(page, compound_order(page));
+ free_nonslab_page(page, object);
p[size] = NULL; /* mark object processed */
return size;
}
@@ -4045,6 +4042,7 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
!(s->flags & SLAB_STORE_USER))
return;
#ifdef CONFIG_SLUB_DEBUG
+ objp = fixup_red_left(s, objp);
trackp = get_track(s, objp, TRACK_ALLOC);
kpp->kp_ret = (void *)trackp->addr;
#ifdef CONFIG_STACKTRACE
@@ -4053,6 +4051,13 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
if (!kpp->kp_stack[i])
break;
}
+
+ trackp = get_track(s, objp, TRACK_FREE);
+ for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
+ kpp->kp_free_stack[i] = (void *)trackp->addrs[i];
+ if (!kpp->kp_free_stack[i])
+ break;
+ }
#endif
#endif
}
@@ -4256,13 +4261,7 @@ void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
- unsigned int order = compound_order(page);
-
- BUG_ON(!PageCompound(page));
- kfree_hook(object);
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
- -(PAGE_SIZE << order));
- __free_pages(page, order);
+ free_nonslab_page(page, object);
return;
}
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 16183d85a7d5..bdce883f9286 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -27,8 +27,362 @@
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
+#include <linux/pgtable.h>
+#include <linux/bootmem_info.h>
+
#include <asm/dma.h>
#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/**
+ * struct vmemmap_remap_walk - walk vmemmap page table
+ *
+ * @remap_pte: called for each lowest-level entry (PTE).
+ * @nr_walked: the number of walked pte.
+ * @reuse_page: the page which is reused for the tail vmemmap pages.
+ * @reuse_addr: the virtual address of the @reuse_page page.
+ * @vmemmap_pages: the list head of the vmemmap pages that can be freed
+ * or is mapped from.
+ */
+struct vmemmap_remap_walk {
+ void (*remap_pte)(pte_t *pte, unsigned long addr,
+ struct vmemmap_remap_walk *walk);
+ unsigned long nr_walked;
+ struct page *reuse_page;
+ unsigned long reuse_addr;
+ struct list_head *vmemmap_pages;
+};
+
+static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start,
+ struct vmemmap_remap_walk *walk)
+{
+ pmd_t __pmd;
+ int i;
+ unsigned long addr = start;
+ struct page *page = pmd_page(*pmd);
+ pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
+
+ if (!pgtable)
+ return -ENOMEM;
+
+ pmd_populate_kernel(&init_mm, &__pmd, pgtable);
+
+ for (i = 0; i < PMD_SIZE / PAGE_SIZE; i++, addr += PAGE_SIZE) {
+ pte_t entry, *pte;
+ pgprot_t pgprot = PAGE_KERNEL;
+
+ entry = mk_pte(page + i, pgprot);
+ pte = pte_offset_kernel(&__pmd, addr);
+ set_pte_at(&init_mm, addr, pte, entry);
+ }
+
+ /* Make pte visible before pmd. See comment in __pte_alloc(). */
+ smp_wmb();
+ pmd_populate_kernel(&init_mm, pmd, pgtable);
+
+ flush_tlb_kernel_range(start, start + PMD_SIZE);
+
+ return 0;
+}
+
+static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
+ unsigned long end,
+ struct vmemmap_remap_walk *walk)
+{
+ pte_t *pte = pte_offset_kernel(pmd, addr);
+
+ /*
+ * The reuse_page is found 'first' in table walk before we start
+ * remapping (which is calling @walk->remap_pte).
+ */
+ if (!walk->reuse_page) {
+ walk->reuse_page = pte_page(*pte);
+ /*
+ * Because the reuse address is part of the range that we are
+ * walking, skip the reuse address range.
+ */
+ addr += PAGE_SIZE;
+ pte++;
+ walk->nr_walked++;
+ }
+
+ for (; addr != end; addr += PAGE_SIZE, pte++) {
+ walk->remap_pte(pte, addr, walk);
+ walk->nr_walked++;
+ }
+}
+
+static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
+ unsigned long end,
+ struct vmemmap_remap_walk *walk)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ if (pmd_leaf(*pmd)) {
+ int ret;
+
+ ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK, walk);
+ if (ret)
+ return ret;
+ }
+ next = pmd_addr_end(addr, end);
+ vmemmap_pte_range(pmd, addr, next, walk);
+ } while (pmd++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
+ unsigned long end,
+ struct vmemmap_remap_walk *walk)
+{
+ pud_t *pud;
+ unsigned long next;
+
+ pud = pud_offset(p4d, addr);
+ do {
+ int ret;
+
+ next = pud_addr_end(addr, end);
+ ret = vmemmap_pmd_range(pud, addr, next, walk);
+ if (ret)
+ return ret;
+ } while (pud++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
+ unsigned long end,
+ struct vmemmap_remap_walk *walk)
+{
+ p4d_t *p4d;
+ unsigned long next;
+
+ p4d = p4d_offset(pgd, addr);
+ do {
+ int ret;
+
+ next = p4d_addr_end(addr, end);
+ ret = vmemmap_pud_range(p4d, addr, next, walk);
+ if (ret)
+ return ret;
+ } while (p4d++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int vmemmap_remap_range(unsigned long start, unsigned long end,
+ struct vmemmap_remap_walk *walk)
+{
+ unsigned long addr = start;
+ unsigned long next;
+ pgd_t *pgd;
+
+ VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
+ VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
+
+ pgd = pgd_offset_k(addr);
+ do {
+ int ret;
+
+ next = pgd_addr_end(addr, end);
+ ret = vmemmap_p4d_range(pgd, addr, next, walk);
+ if (ret)
+ return ret;
+ } while (pgd++, addr = next, addr != end);
+
+ /*
+ * We only change the mapping of the vmemmap virtual address range
+ * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
+ * belongs to the range.
+ */
+ flush_tlb_kernel_range(start + PAGE_SIZE, end);
+
+ return 0;
+}
+
+/*
+ * Free a vmemmap page. A vmemmap page can be allocated from the memblock
+ * allocator or buddy allocator. If the PG_reserved flag is set, it means
+ * that it allocated from the memblock allocator, just free it via the
+ * free_bootmem_page(). Otherwise, use __free_page().
+ */
+static inline void free_vmemmap_page(struct page *page)
+{
+ if (PageReserved(page))
+ free_bootmem_page(page);
+ else
+ __free_page(page);
+}
+
+/* Free a list of the vmemmap pages */
+static void free_vmemmap_page_list(struct list_head *list)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, list, lru) {
+ list_del(&page->lru);
+ free_vmemmap_page(page);
+ }
+}
+
+static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
+ struct vmemmap_remap_walk *walk)
+{
+ /*
+ * Remap the tail pages as read-only to catch illegal write operation
+ * to the tail pages.
+ */
+ pgprot_t pgprot = PAGE_KERNEL_RO;
+ pte_t entry = mk_pte(walk->reuse_page, pgprot);
+ struct page *page = pte_page(*pte);
+
+ list_add_tail(&page->lru, walk->vmemmap_pages);
+ set_pte_at(&init_mm, addr, pte, entry);
+}
+
+static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
+ struct vmemmap_remap_walk *walk)
+{
+ pgprot_t pgprot = PAGE_KERNEL;
+ struct page *page;
+ void *to;
+
+ BUG_ON(pte_page(*pte) != walk->reuse_page);
+
+ page = list_first_entry(walk->vmemmap_pages, struct page, lru);
+ list_del(&page->lru);
+ to = page_to_virt(page);
+ copy_page(to, (void *)walk->reuse_addr);
+
+ set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
+}
+
+/**
+ * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
+ * to the page which @reuse is mapped to, then free vmemmap
+ * which the range are mapped to.
+ * @start: start address of the vmemmap virtual address range that we want
+ * to remap.
+ * @end: end address of the vmemmap virtual address range that we want to
+ * remap.
+ * @reuse: reuse address.
+ *
+ * Return: %0 on success, negative error code otherwise.
+ */
+int vmemmap_remap_free(unsigned long start, unsigned long end,
+ unsigned long reuse)
+{
+ int ret;
+ LIST_HEAD(vmemmap_pages);
+ struct vmemmap_remap_walk walk = {
+ .remap_pte = vmemmap_remap_pte,
+ .reuse_addr = reuse,
+ .vmemmap_pages = &vmemmap_pages,
+ };
+
+ /*
+ * In order to make remapping routine most efficient for the huge pages,
+ * the routine of vmemmap page table walking has the following rules
+ * (see more details from the vmemmap_pte_range()):
+ *
+ * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
+ * should be continuous.
+ * - The @reuse address is part of the range [@reuse, @end) that we are
+ * walking which is passed to vmemmap_remap_range().
+ * - The @reuse address is the first in the complete range.
+ *
+ * So we need to make sure that @start and @reuse meet the above rules.
+ */
+ BUG_ON(start - reuse != PAGE_SIZE);
+
+ mmap_write_lock(&init_mm);
+ ret = vmemmap_remap_range(reuse, end, &walk);
+ mmap_write_downgrade(&init_mm);
+
+ if (ret && walk.nr_walked) {
+ end = reuse + walk.nr_walked * PAGE_SIZE;
+ /*
+ * vmemmap_pages contains pages from the previous
+ * vmemmap_remap_range call which failed. These
+ * are pages which were removed from the vmemmap.
+ * They will be restored in the following call.
+ */
+ walk = (struct vmemmap_remap_walk) {
+ .remap_pte = vmemmap_restore_pte,
+ .reuse_addr = reuse,
+ .vmemmap_pages = &vmemmap_pages,
+ };
+
+ vmemmap_remap_range(reuse, end, &walk);
+ }
+ mmap_read_unlock(&init_mm);
+
+ free_vmemmap_page_list(&vmemmap_pages);
+
+ return ret;
+}
+
+static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
+ gfp_t gfp_mask, struct list_head *list)
+{
+ unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
+ int nid = page_to_nid((struct page *)start);
+ struct page *page, *next;
+
+ while (nr_pages--) {
+ page = alloc_pages_node(nid, gfp_mask, 0);
+ if (!page)
+ goto out;
+ list_add_tail(&page->lru, list);
+ }
+
+ return 0;
+out:
+ list_for_each_entry_safe(page, next, list, lru)
+ __free_pages(page, 0);
+ return -ENOMEM;
+}
+
+/**
+ * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
+ * to the page which is from the @vmemmap_pages
+ * respectively.
+ * @start: start address of the vmemmap virtual address range that we want
+ * to remap.
+ * @end: end address of the vmemmap virtual address range that we want to
+ * remap.
+ * @reuse: reuse address.
+ * @gfp_mask: GFP flag for allocating vmemmap pages.
+ *
+ * Return: %0 on success, negative error code otherwise.
+ */
+int vmemmap_remap_alloc(unsigned long start, unsigned long end,
+ unsigned long reuse, gfp_t gfp_mask)
+{
+ LIST_HEAD(vmemmap_pages);
+ struct vmemmap_remap_walk walk = {
+ .remap_pte = vmemmap_restore_pte,
+ .reuse_addr = reuse,
+ .vmemmap_pages = &vmemmap_pages,
+ };
+
+ /* See the comment in the vmemmap_remap_free(). */
+ BUG_ON(start - reuse != PAGE_SIZE);
+
+ if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
+ return -ENOMEM;
+
+ mmap_read_lock(&init_mm);
+ vmemmap_remap_range(reuse, end, &walk);
+ mmap_read_unlock(&init_mm);
+
+ return 0;
+}
/*
* Allocate a block of memory to be used to back the virtual memory map
diff --git a/mm/sparse.c b/mm/sparse.c
index 7272f7a1449d..6326cdf36c4f 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -13,6 +13,7 @@
#include <linux/vmalloc.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/bootmem_info.h>
#include "internal.h"
#include <asm/dma.h>
diff --git a/mm/swap.c b/mm/swap.c
index 6c11db780467..19600430e536 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -554,7 +554,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
} else {
/*
* The page's writeback ends up during pagevec
- * We moves tha page into tail of inactive.
+ * We move that page into tail of inactive.
*/
add_page_to_lru_list_tail(page, lruvec);
__count_vm_events(PGROTATED, nr_pages);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index c56aa9ac050d..bc7cee6b2ec5 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -628,13 +628,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
if (!mask)
goto skip;
- /* Test swap type to make sure the dereference is safe */
- if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) {
- struct inode *inode = si->swap_file->f_mapping->host;
- if (inode_read_congested(inode))
- goto skip;
- }
-
do_poll = false;
/* Read a page_cluster sized and aligned cluster around offset. */
start_offset = offset & ~mask;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e898c879a434..1e07d1c776f2 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2967,7 +2967,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
return 0;
}
- /* swap partition endianess hack... */
+ /* swap partition endianness hack... */
if (swab32(swap_header->info.version) == 1) {
swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 63a73e164d55..0e2132834bc7 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -48,6 +48,78 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
return dst_vma;
}
+/*
+ * Install PTEs, to map dst_addr (within dst_vma) to page.
+ *
+ * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
+ * and anon, and for both shared and private VMAs.
+ */
+int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr, struct page *page,
+ bool newly_allocated, bool wp_copy)
+{
+ int ret;
+ pte_t _dst_pte, *dst_pte;
+ bool writable = dst_vma->vm_flags & VM_WRITE;
+ bool vm_shared = dst_vma->vm_flags & VM_SHARED;
+ bool page_in_cache = page->mapping;
+ spinlock_t *ptl;
+ struct inode *inode;
+ pgoff_t offset, max_off;
+
+ _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
+ if (page_in_cache && !vm_shared)
+ writable = false;
+ if (writable || !page_in_cache)
+ _dst_pte = pte_mkdirty(_dst_pte);
+ if (writable) {
+ if (wp_copy)
+ _dst_pte = pte_mkuffd_wp(_dst_pte);
+ else
+ _dst_pte = pte_mkwrite(_dst_pte);
+ }
+
+ dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+
+ if (vma_is_shmem(dst_vma)) {
+ /* serialize against truncate with the page table lock */
+ inode = dst_vma->vm_file->f_inode;
+ offset = linear_page_index(dst_vma, dst_addr);
+ max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ ret = -EFAULT;
+ if (unlikely(offset >= max_off))
+ goto out_unlock;
+ }
+
+ ret = -EEXIST;
+ if (!pte_none(*dst_pte))
+ goto out_unlock;
+
+ if (page_in_cache)
+ page_add_file_rmap(page, false);
+ else
+ page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
+
+ /*
+ * Must happen after rmap, as mm_counter() checks mapping (via
+ * PageAnon()), which is set by __page_set_anon_rmap().
+ */
+ inc_mm_counter(dst_mm, mm_counter(page));
+
+ if (newly_allocated)
+ lru_cache_add_inactive_or_unevictable(page, dst_vma);
+
+ set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(dst_vma, dst_addr, dst_pte);
+ ret = 0;
+out_unlock:
+ pte_unmap_unlock(dst_pte, ptl);
+ return ret;
+}
+
static int mcopy_atomic_pte(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
@@ -56,13 +128,9 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
struct page **pagep,
bool wp_copy)
{
- pte_t _dst_pte, *dst_pte;
- spinlock_t *ptl;
void *page_kaddr;
int ret;
struct page *page;
- pgoff_t offset, max_off;
- struct inode *inode;
if (!*pagep) {
ret = -ENOMEM;
@@ -99,43 +167,12 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
goto out_release;
- _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot));
- if (dst_vma->vm_flags & VM_WRITE) {
- if (wp_copy)
- _dst_pte = pte_mkuffd_wp(_dst_pte);
- else
- _dst_pte = pte_mkwrite(_dst_pte);
- }
-
- dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
- if (dst_vma->vm_file) {
- /* the shmem MAP_PRIVATE case requires checking the i_size */
- inode = dst_vma->vm_file->f_inode;
- offset = linear_page_index(dst_vma, dst_addr);
- max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
- ret = -EFAULT;
- if (unlikely(offset >= max_off))
- goto out_release_uncharge_unlock;
- }
- ret = -EEXIST;
- if (!pte_none(*dst_pte))
- goto out_release_uncharge_unlock;
-
- inc_mm_counter(dst_mm, MM_ANONPAGES);
- page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
- lru_cache_add_inactive_or_unevictable(page, dst_vma);
-
- set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
-
- /* No need to invalidate - it was non-present before */
- update_mmu_cache(dst_vma, dst_addr, dst_pte);
-
- pte_unmap_unlock(dst_pte, ptl);
- ret = 0;
+ ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
+ page, true, wp_copy);
+ if (ret)
+ goto out_release;
out:
return ret;
-out_release_uncharge_unlock:
- pte_unmap_unlock(dst_pte, ptl);
out_release:
put_page(page);
goto out;
@@ -176,6 +213,41 @@ out_unlock:
return ret;
}
+/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
+static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
+ pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ bool wp_copy)
+{
+ struct inode *inode = file_inode(dst_vma->vm_file);
+ pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
+ struct page *page;
+ int ret;
+
+ ret = shmem_getpage(inode, pgoff, &page, SGP_READ);
+ if (ret)
+ goto out;
+ if (!page) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
+ page, false, wp_copy);
+ if (ret)
+ goto out_release;
+
+ unlock_page(page);
+ ret = 0;
+out:
+ return ret;
+out_release:
+ unlock_page(page);
+ put_page(page);
+ goto out;
+}
+
static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
{
pgd_t *pgd;
@@ -209,7 +281,6 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
unsigned long len,
enum mcopy_atomic_mode mode)
{
- int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
int vm_shared = dst_vma->vm_flags & VM_SHARED;
ssize_t err;
pte_t *dst_pte;
@@ -308,7 +379,6 @@ retry:
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
i_mmap_unlock_read(mapping);
- vm_alloc_shared = vm_shared;
cond_resched();
@@ -346,54 +416,8 @@ retry:
out_unlock:
mmap_read_unlock(dst_mm);
out:
- if (page) {
- /*
- * We encountered an error and are about to free a newly
- * allocated huge page.
- *
- * Reservation handling is very subtle, and is different for
- * private and shared mappings. See the routine
- * restore_reserve_on_error for details. Unfortunately, we
- * can not call restore_reserve_on_error now as it would
- * require holding mmap_lock.
- *
- * If a reservation for the page existed in the reservation
- * map of a private mapping, the map was modified to indicate
- * the reservation was consumed when the page was allocated.
- * We clear the HPageRestoreReserve flag now so that the global
- * reserve count will not be incremented in free_huge_page.
- * The reservation map will still indicate the reservation
- * was consumed and possibly prevent later page allocation.
- * This is better than leaking a global reservation. If no
- * reservation existed, it is still safe to clear
- * HPageRestoreReserve as no adjustments to reservation counts
- * were made during allocation.
- *
- * The reservation map for shared mappings indicates which
- * pages have reservations. When a huge page is allocated
- * for an address with a reservation, no change is made to
- * the reserve map. In this case HPageRestoreReserve will be
- * set to indicate that the global reservation count should be
- * incremented when the page is freed. This is the desired
- * behavior. However, when a huge page is allocated for an
- * address without a reservation a reservation entry is added
- * to the reservation map, and HPageRestoreReserve will not be
- * set. When the page is freed, the global reserve count will
- * NOT be incremented and it will appear as though we have
- * leaked reserved page. In this case, set HPageRestoreReserve
- * so that the global reserve count will be incremented to
- * match the reservation map entry which was created.
- *
- * Note that vm_alloc_shared is based on the flags of the vma
- * for which the page was originally allocated. dst_vma could
- * be different or NULL on error.
- */
- if (vm_alloc_shared)
- SetHPageRestoreReserve(page);
- else
- ClearHPageRestoreReserve(page);
+ if (page)
put_page(page);
- }
BUG_ON(copied < 0);
BUG_ON(err > 0);
BUG_ON(!copied && !err);
@@ -415,11 +439,16 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
unsigned long dst_addr,
unsigned long src_addr,
struct page **page,
- bool zeropage,
+ enum mcopy_atomic_mode mode,
bool wp_copy)
{
ssize_t err;
+ if (mode == MCOPY_ATOMIC_CONTINUE) {
+ return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
+ wp_copy);
+ }
+
/*
* The normal page fault path for a shmem will invoke the
* fault, fill the hole in the file and COW it right away. The
@@ -431,7 +460,7 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
* and not in the radix tree.
*/
if (!(dst_vma->vm_flags & VM_SHARED)) {
- if (!zeropage)
+ if (mode == MCOPY_ATOMIC_NORMAL)
err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_addr, src_addr, page,
wp_copy);
@@ -440,13 +469,10 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
dst_vma, dst_addr);
} else {
VM_WARN_ON_ONCE(wp_copy);
- if (!zeropage)
- err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
- dst_vma, dst_addr,
- src_addr, page);
- else
- err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
- dst_vma, dst_addr);
+ err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
+ dst_addr, src_addr,
+ mode != MCOPY_ATOMIC_NORMAL,
+ page);
}
return err;
@@ -467,7 +493,6 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
long copied;
struct page *page;
bool wp_copy;
- bool zeropage = (mcopy_mode == MCOPY_ATOMIC_ZEROPAGE);
/*
* Sanitize the command parameters:
@@ -530,7 +555,7 @@ retry:
if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
goto out_unlock;
- if (mcopy_mode == MCOPY_ATOMIC_CONTINUE)
+ if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE)
goto out_unlock;
/*
@@ -578,7 +603,7 @@ retry:
BUG_ON(pmd_trans_huge(*dst_pmd));
err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
- src_addr, &page, zeropage, wp_copy);
+ src_addr, &page, mcopy_mode, wp_copy);
cond_resched();
if (unlikely(err == -ENOENT)) {
diff --git a/mm/util.c b/mm/util.c
index a8bf17f18a81..9043d03750a7 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -731,6 +731,16 @@ int __page_mapcount(struct page *page)
}
EXPORT_SYMBOL_GPL(__page_mapcount);
+void copy_huge_page(struct page *dst, struct page *src)
+{
+ unsigned i, nr = compound_nr(src);
+
+ for (i = 0; i < nr; i++) {
+ cond_resched();
+ copy_highpage(nth_page(dst, i), nth_page(src, i));
+ }
+}
+
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
int sysctl_overcommit_ratio __read_mostly = 50;
unsigned long sysctl_overcommit_kbytes __read_mostly;
@@ -983,7 +993,7 @@ int __weak memcmp_pages(struct page *page1, struct page *page2)
* depends on the type of object and on how much debugging is enabled.
* For example, for a slab-cache object, the slab name is printed, and,
* if available, the return address and stack trace from the allocation
- * of that object.
+ * and last free path of that object.
*/
void mem_dump_obj(void *object)
{
@@ -1010,3 +1020,43 @@ void mem_dump_obj(void *object)
}
EXPORT_SYMBOL_GPL(mem_dump_obj);
#endif
+
+/*
+ * A driver might set a page logically offline -- PageOffline() -- and
+ * turn the page inaccessible in the hypervisor; after that, access to page
+ * content can be fatal.
+ *
+ * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
+ * pages after checking PageOffline(); however, these PFN walkers can race
+ * with drivers that set PageOffline().
+ *
+ * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
+ * synchronize with such drivers, achieving that a page cannot be set
+ * PageOffline() while frozen.
+ *
+ * page_offline_begin()/page_offline_end() is used by drivers that care about
+ * such races when setting a page PageOffline().
+ */
+static DECLARE_RWSEM(page_offline_rwsem);
+
+void page_offline_freeze(void)
+{
+ down_read(&page_offline_rwsem);
+}
+
+void page_offline_thaw(void)
+{
+ up_read(&page_offline_rwsem);
+}
+
+void page_offline_begin(void)
+{
+ down_write(&page_offline_rwsem);
+}
+EXPORT_SYMBOL(page_offline_begin);
+
+void page_offline_end(void)
+{
+ up_write(&page_offline_rwsem);
+}
+EXPORT_SYMBOL(page_offline_end);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b2ec7f751bd0..d5cd52805149 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -25,6 +25,7 @@
#include <linux/notifier.h>
#include <linux/rbtree.h>
#include <linux/xarray.h>
+#include <linux/io.h>
#include <linux/rcupdate.h>
#include <linux/pfn.h>
#include <linux/kmemleak.h>
@@ -36,6 +37,7 @@
#include <linux/overflow.h>
#include <linux/pgtable.h>
#include <linux/uaccess.h>
+#include <linux/hugetlb.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>
@@ -83,10 +85,11 @@ static void free_work(struct work_struct *w)
/*** Page table manipulation functions ***/
static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
+ unsigned int max_page_shift, pgtbl_mod_mask *mask)
{
pte_t *pte;
u64 pfn;
+ unsigned long size = PAGE_SIZE;
pfn = phys_addr >> PAGE_SHIFT;
pte = pte_alloc_kernel_track(pmd, addr, mask);
@@ -94,9 +97,22 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
return -ENOMEM;
do {
BUG_ON(!pte_none(*pte));
+
+#ifdef CONFIG_HUGETLB_PAGE
+ size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
+ if (size != PAGE_SIZE) {
+ pte_t entry = pfn_pte(pfn, prot);
+
+ entry = pte_mkhuge(entry);
+ entry = arch_make_huge_pte(entry, ilog2(size), 0);
+ set_huge_pte_at(&init_mm, addr, pte, entry);
+ pfn += PFN_DOWN(size);
+ continue;
+ }
+#endif
set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
pfn++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
+ } while (pte += PFN_DOWN(size), addr += size, addr != end);
*mask |= PGTBL_PTE_MODIFIED;
return 0;
}
@@ -145,7 +161,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
continue;
}
- if (vmap_pte_range(pmd, addr, next, phys_addr, prot, mask))
+ if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
return -ENOMEM;
} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
return 0;
@@ -1592,6 +1608,7 @@ static DEFINE_MUTEX(vmap_purge_lock);
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);
+#ifdef CONFIG_X86_64
/*
* called before a call to iounmap() if the caller wants vm_area_struct's
* immediately freed.
@@ -1600,6 +1617,7 @@ void set_iounmap_nonlazy(void)
{
atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
}
+#endif /* CONFIG_X86_64 */
/*
* Purges all lazily-freed vmap areas.
@@ -2912,8 +2930,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return NULL;
}
- if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP) &&
- arch_vmap_pmd_supported(prot)) {
+ if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
unsigned long size_per_node;
/*
@@ -2926,11 +2943,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
size_per_node = size;
if (node == NUMA_NO_NODE)
size_per_node /= num_online_nodes();
- if (size_per_node >= PMD_SIZE) {
+ if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
shift = PMD_SHIFT;
- align = max(real_align, 1UL << shift);
- size = ALIGN(real_size, 1UL << shift);
- }
+ else
+ shift = arch_vmap_pte_supported_shift(size_per_node);
+
+ align = max(real_align, 1UL << shift);
+ size = ALIGN(real_size, 1UL << shift);
}
again:
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d7c3cb8688dd..eeae2f6bc532 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -100,9 +100,12 @@ struct scan_control {
unsigned int may_swap:1;
/*
- * Cgroups are not reclaimed below their configured memory.low,
- * unless we threaten to OOM. If any cgroups are skipped due to
- * memory.low and nothing was reclaimed, go back for memory.low.
+ * Cgroup memory below memory.low is protected as long as we
+ * don't threaten to OOM. If any cgroup is reclaimed at
+ * reduced force or passed over entirely due to its memory.low
+ * setting (memcg_low_skipped), and nothing is reclaimed as a
+ * result, then go back for one more cycle that reclaims the protected
+ * memory (memcg_low_reclaim) to avert OOM.
*/
unsigned int memcg_low_reclaim:1;
unsigned int memcg_low_skipped:1;
@@ -1499,7 +1502,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
if (unlikely(PageTransHuge(page)))
flags |= TTU_SPLIT_HUGE_PMD;
- if (!try_to_unmap(page, flags)) {
+ try_to_unmap(page, flags);
+ if (page_mapped(page)) {
stat->nr_unmap_fail += nr_pages;
if (!was_swapbacked && PageSwapBacked(page))
stat->nr_lazyfree_fail += nr_pages;
@@ -1701,6 +1705,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
unsigned int nr_reclaimed;
struct page *page, *next;
LIST_HEAD(clean_pages);
+ unsigned int noreclaim_flag;
list_for_each_entry_safe(page, next, page_list, lru) {
if (!PageHuge(page) && page_is_file_lru(page) &&
@@ -1711,8 +1716,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
}
}
+ /*
+ * We should be safe here since we are only dealing with file pages and
+ * we are not kswapd and therefore cannot write dirty file pages. But
+ * call memalloc_noreclaim_save() anyway, just in case these conditions
+ * change in the future.
+ */
+ noreclaim_flag = memalloc_noreclaim_save();
nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
&stat, true);
+ memalloc_noreclaim_restore(noreclaim_flag);
+
list_splice(&clean_pages, page_list);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
-(long)nr_reclaimed);
@@ -1810,7 +1824,7 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
}
-/**
+/*
* Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
*
* lruvec->lru_lock is heavily contended. Some of the functions that
@@ -2306,6 +2320,7 @@ unsigned long reclaim_pages(struct list_head *page_list)
LIST_HEAD(node_page_list);
struct reclaim_stat dummy_stat;
struct page *page;
+ unsigned int noreclaim_flag;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.priority = DEF_PRIORITY,
@@ -2314,6 +2329,8 @@ unsigned long reclaim_pages(struct list_head *page_list)
.may_swap = 1,
};
+ noreclaim_flag = memalloc_noreclaim_save();
+
while (!list_empty(page_list)) {
page = lru_to_page(page_list);
if (nid == NUMA_NO_NODE) {
@@ -2350,6 +2367,8 @@ unsigned long reclaim_pages(struct list_head *page_list)
}
}
+ memalloc_noreclaim_restore(noreclaim_flag);
+
return nr_reclaimed;
}
@@ -2521,15 +2540,14 @@ out:
for_each_evictable_lru(lru) {
int file = is_file_lru(lru);
unsigned long lruvec_size;
+ unsigned long low, min;
unsigned long scan;
- unsigned long protection;
lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
- protection = mem_cgroup_protection(sc->target_mem_cgroup,
- memcg,
- sc->memcg_low_reclaim);
+ mem_cgroup_protection(sc->target_mem_cgroup, memcg,
+ &min, &low);
- if (protection) {
+ if (min || low) {
/*
* Scale a cgroup's reclaim pressure by proportioning
* its current usage to its memory.low or memory.min
@@ -2560,6 +2578,15 @@ out:
* hard protection.
*/
unsigned long cgroup_size = mem_cgroup_size(memcg);
+ unsigned long protection;
+
+ /* memory.low scaling, make sure we retry before OOM */
+ if (!sc->memcg_low_reclaim && low > min) {
+ protection = low;
+ sc->memcg_low_skipped = 1;
+ } else {
+ protection = min;
+ }
/* Avoid TOCTOU with earlier protection check */
cgroup_size = max(cgroup_size, protection);
@@ -4397,11 +4424,13 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
.may_swap = 1,
.reclaim_idx = gfp_zone(gfp_mask),
};
+ unsigned long pflags;
trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
sc.gfp_mask);
cond_resched();
+ psi_memstall_enter(&pflags);
fs_reclaim_acquire(sc.gfp_mask);
/*
* We need to be able to allocate from the reserves for RECLAIM_UNMAP
@@ -4426,6 +4455,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
current->flags &= ~PF_SWAPWRITE;
memalloc_noreclaim_restore(noreclaim_flag);
fs_reclaim_release(sc.gfp_mask);
+ psi_memstall_leave(&pflags);
trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
diff --git a/mm/workingset.c b/mm/workingset.c
index 4f7a306ce75a..5ba3e42446fa 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -168,8 +168,10 @@
* refault distance will immediately activate the refaulting page.
*/
+#define WORKINGSET_SHIFT 1
#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
- 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
+ WORKINGSET_SHIFT + NODES_SHIFT + \
+ MEM_CGROUP_ID_SHIFT)
#define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
/*
@@ -189,7 +191,7 @@ static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
eviction &= EVICTION_MASK;
eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
- eviction = (eviction << 1) | workingset;
+ eviction = (eviction << WORKINGSET_SHIFT) | workingset;
return xa_mk_value(eviction);
}
@@ -201,8 +203,8 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
int memcgid, nid;
bool workingset;
- workingset = entry & 1;
- entry >>= 1;
+ workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1);
+ entry >>= WORKINGSET_SHIFT;
nid = entry & ((1UL << NODES_SHIFT) - 1);
entry >>= NODES_SHIFT;
memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 7fe7adaaad01..b3c0577b8095 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -62,7 +62,7 @@
#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
-#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
+#define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS)
#define BUDDY_MASK (0x3)
#define BUDDY_SHIFT 2
@@ -144,6 +144,8 @@ struct z3fold_header {
* @c_handle: cache for z3fold_buddy_slots allocation
* @ops: pointer to a structure of user defined operations specified at
* pool creation time.
+ * @zpool: zpool driver
+ * @zpool_ops: zpool operations structure with an evict callback
* @compact_wq: workqueue for page layout background optimization
* @release_wq: workqueue for safe page release
* @work: work_struct for safe page release
@@ -253,9 +255,8 @@ static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
spin_unlock(&zhdr->page_lock);
}
-
-static inline struct z3fold_header *__get_z3fold_header(unsigned long handle,
- bool lock)
+/* return locked z3fold page if it's not headless */
+static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
{
struct z3fold_buddy_slots *slots;
struct z3fold_header *zhdr;
@@ -269,13 +270,12 @@ static inline struct z3fold_header *__get_z3fold_header(unsigned long handle,
read_lock(&slots->lock);
addr = *(unsigned long *)handle;
zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
- if (lock)
- locked = z3fold_page_trylock(zhdr);
+ locked = z3fold_page_trylock(zhdr);
read_unlock(&slots->lock);
if (locked)
break;
cpu_relax();
- } while (lock);
+ } while (true);
} else {
zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
}
@@ -283,18 +283,6 @@ static inline struct z3fold_header *__get_z3fold_header(unsigned long handle,
return zhdr;
}
-/* Returns the z3fold page where a given handle is stored */
-static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
-{
- return __get_z3fold_header(h, false);
-}
-
-/* return locked z3fold page if it's not headless */
-static inline struct z3fold_header *get_z3fold_header(unsigned long h)
-{
- return __get_z3fold_header(h, true);
-}
-
static inline void put_z3fold_header(struct z3fold_header *zhdr)
{
struct page *page = virt_to_page(zhdr);
@@ -998,7 +986,8 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
goto out_c;
spin_lock_init(&pool->lock);
spin_lock_init(&pool->stale_lock);
- pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
+ pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
+ __alignof__(struct list_head));
if (!pool->unbuddied)
goto out_pool;
for_each_possible_cpu(cpu) {
@@ -1059,6 +1048,7 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
destroy_workqueue(pool->compact_wq);
destroy_workqueue(pool->release_wq);
z3fold_unregister_migration(pool);
+ free_percpu(pool->unbuddied);
kfree(pool);
}
@@ -1382,7 +1372,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
if (zhdr->foreign_handles ||
test_and_set_bit(PAGE_CLAIMED, &page->private)) {
if (kref_put(&zhdr->refcount,
- release_z3fold_page))
+ release_z3fold_page_locked))
atomic64_dec(&pool->pages_nr);
else
z3fold_page_unlock(zhdr);
@@ -1803,8 +1793,11 @@ static int __init init_z3fold(void)
{
int ret;
- /* Make sure the z3fold header is not larger than the page size */
- BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
+ /*
+ * Make sure the z3fold header is not larger than the page size and
+ * there has remaining spaces for its buddy.
+ */
+ BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
ret = z3fold_mount();
if (ret)
return ret;
diff --git a/mm/zbud.c b/mm/zbud.c
index 7ec5f27a68b0..6348932430b8 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -51,7 +51,6 @@
#include <linux/preempt.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/zbud.h>
#include <linux/zpool.h>
/*****************
@@ -73,6 +72,12 @@
#define ZHDR_SIZE_ALIGNED CHUNK_SIZE
#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
+struct zbud_pool;
+
+struct zbud_ops {
+ int (*evict)(struct zbud_pool *pool, unsigned long handle);
+};
+
/**
* struct zbud_pool - stores metadata for each zbud pool
* @lock: protects all pool fields and first|last_chunk fields of any
@@ -87,21 +92,27 @@
* @pages_nr: number of zbud pages in the pool.
* @ops: pointer to a structure of user defined operations specified at
* pool creation time.
+ * @zpool: zpool driver
+ * @zpool_ops: zpool operations structure with an evict callback
*
* This structure is allocated at pool creation time and maintains metadata
* pertaining to a particular zbud pool.
*/
struct zbud_pool {
spinlock_t lock;
- struct list_head unbuddied[NCHUNKS];
- struct list_head buddied;
+ union {
+ /*
+ * Reuse unbuddied[0] as buddied on the ground that
+ * unbuddied[0] is unused.
+ */
+ struct list_head buddied;
+ struct list_head unbuddied[NCHUNKS];
+ };
struct list_head lru;
u64 pages_nr;
const struct zbud_ops *ops;
-#ifdef CONFIG_ZPOOL
struct zpool *zpool;
const struct zpool_ops *zpool_ops;
-#endif
};
/*
@@ -121,104 +132,6 @@ struct zbud_header {
};
/*****************
- * zpool
- ****************/
-
-#ifdef CONFIG_ZPOOL
-
-static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle)
-{
- if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
- return pool->zpool_ops->evict(pool->zpool, handle);
- else
- return -ENOENT;
-}
-
-static const struct zbud_ops zbud_zpool_ops = {
- .evict = zbud_zpool_evict
-};
-
-static void *zbud_zpool_create(const char *name, gfp_t gfp,
- const struct zpool_ops *zpool_ops,
- struct zpool *zpool)
-{
- struct zbud_pool *pool;
-
- pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
- if (pool) {
- pool->zpool = zpool;
- pool->zpool_ops = zpool_ops;
- }
- return pool;
-}
-
-static void zbud_zpool_destroy(void *pool)
-{
- zbud_destroy_pool(pool);
-}
-
-static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
- unsigned long *handle)
-{
- return zbud_alloc(pool, size, gfp, handle);
-}
-static void zbud_zpool_free(void *pool, unsigned long handle)
-{
- zbud_free(pool, handle);
-}
-
-static int zbud_zpool_shrink(void *pool, unsigned int pages,
- unsigned int *reclaimed)
-{
- unsigned int total = 0;
- int ret = -EINVAL;
-
- while (total < pages) {
- ret = zbud_reclaim_page(pool, 8);
- if (ret < 0)
- break;
- total++;
- }
-
- if (reclaimed)
- *reclaimed = total;
-
- return ret;
-}
-
-static void *zbud_zpool_map(void *pool, unsigned long handle,
- enum zpool_mapmode mm)
-{
- return zbud_map(pool, handle);
-}
-static void zbud_zpool_unmap(void *pool, unsigned long handle)
-{
- zbud_unmap(pool, handle);
-}
-
-static u64 zbud_zpool_total_size(void *pool)
-{
- return zbud_get_pool_size(pool) * PAGE_SIZE;
-}
-
-static struct zpool_driver zbud_zpool_driver = {
- .type = "zbud",
- .sleep_mapped = true,
- .owner = THIS_MODULE,
- .create = zbud_zpool_create,
- .destroy = zbud_zpool_destroy,
- .malloc = zbud_zpool_malloc,
- .free = zbud_zpool_free,
- .shrink = zbud_zpool_shrink,
- .map = zbud_zpool_map,
- .unmap = zbud_zpool_unmap,
- .total_size = zbud_zpool_total_size,
-};
-
-MODULE_ALIAS("zpool-zbud");
-#endif /* CONFIG_ZPOOL */
-
-/*****************
* Helpers
*****************/
/* Just to make the code easier to read */
@@ -304,7 +217,7 @@ static int num_free_chunks(struct zbud_header *zhdr)
* Return: pointer to the new zbud pool or NULL if the metadata allocation
* failed.
*/
-struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops)
+static struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops)
{
struct zbud_pool *pool;
int i;
@@ -328,7 +241,7 @@ struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops)
*
* The pool should be emptied before this function is called.
*/
-void zbud_destroy_pool(struct zbud_pool *pool)
+static void zbud_destroy_pool(struct zbud_pool *pool)
{
kfree(pool);
}
@@ -352,7 +265,7 @@ void zbud_destroy_pool(struct zbud_pool *pool)
* gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
* a new page.
*/
-int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
+static int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
unsigned long *handle)
{
int chunks, i, freechunks;
@@ -427,7 +340,7 @@ found:
* only sets the first|last_chunks to 0. The page is actually freed
* once both buddies are evicted (see zbud_reclaim_page() below).
*/
-void zbud_free(struct zbud_pool *pool, unsigned long handle)
+static void zbud_free(struct zbud_pool *pool, unsigned long handle)
{
struct zbud_header *zhdr;
int freechunks;
@@ -499,7 +412,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle)
* no pages to evict or an eviction handler is not registered, -EAGAIN if
* the retry limit was hit.
*/
-int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
+static int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
{
int i, ret, freechunks;
struct zbud_header *zhdr;
@@ -581,7 +494,7 @@ next:
*
* Returns: a pointer to the mapped allocation
*/
-void *zbud_map(struct zbud_pool *pool, unsigned long handle)
+static void *zbud_map(struct zbud_pool *pool, unsigned long handle)
{
return (void *)(handle);
}
@@ -591,7 +504,7 @@ void *zbud_map(struct zbud_pool *pool, unsigned long handle)
* @pool: pool in which the allocation resides
* @handle: handle associated with the allocation to be unmapped
*/
-void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
+static void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
{
}
@@ -602,30 +515,120 @@ void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
* Returns: size in pages of the given pool. The pool lock need not be
* taken to access pages_nr.
*/
-u64 zbud_get_pool_size(struct zbud_pool *pool)
+static u64 zbud_get_pool_size(struct zbud_pool *pool)
{
return pool->pages_nr;
}
+/*****************
+ * zpool
+ ****************/
+
+static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle)
+{
+ if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
+ return pool->zpool_ops->evict(pool->zpool, handle);
+ else
+ return -ENOENT;
+}
+
+static const struct zbud_ops zbud_zpool_ops = {
+ .evict = zbud_zpool_evict
+};
+
+static void *zbud_zpool_create(const char *name, gfp_t gfp,
+ const struct zpool_ops *zpool_ops,
+ struct zpool *zpool)
+{
+ struct zbud_pool *pool;
+
+ pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
+ if (pool) {
+ pool->zpool = zpool;
+ pool->zpool_ops = zpool_ops;
+ }
+ return pool;
+}
+
+static void zbud_zpool_destroy(void *pool)
+{
+ zbud_destroy_pool(pool);
+}
+
+static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
+ unsigned long *handle)
+{
+ return zbud_alloc(pool, size, gfp, handle);
+}
+static void zbud_zpool_free(void *pool, unsigned long handle)
+{
+ zbud_free(pool, handle);
+}
+
+static int zbud_zpool_shrink(void *pool, unsigned int pages,
+ unsigned int *reclaimed)
+{
+ unsigned int total = 0;
+ int ret = -EINVAL;
+
+ while (total < pages) {
+ ret = zbud_reclaim_page(pool, 8);
+ if (ret < 0)
+ break;
+ total++;
+ }
+
+ if (reclaimed)
+ *reclaimed = total;
+
+ return ret;
+}
+
+static void *zbud_zpool_map(void *pool, unsigned long handle,
+ enum zpool_mapmode mm)
+{
+ return zbud_map(pool, handle);
+}
+static void zbud_zpool_unmap(void *pool, unsigned long handle)
+{
+ zbud_unmap(pool, handle);
+}
+
+static u64 zbud_zpool_total_size(void *pool)
+{
+ return zbud_get_pool_size(pool) * PAGE_SIZE;
+}
+
+static struct zpool_driver zbud_zpool_driver = {
+ .type = "zbud",
+ .sleep_mapped = true,
+ .owner = THIS_MODULE,
+ .create = zbud_zpool_create,
+ .destroy = zbud_zpool_destroy,
+ .malloc = zbud_zpool_malloc,
+ .free = zbud_zpool_free,
+ .shrink = zbud_zpool_shrink,
+ .map = zbud_zpool_map,
+ .unmap = zbud_zpool_unmap,
+ .total_size = zbud_zpool_total_size,
+};
+
+MODULE_ALIAS("zpool-zbud");
+
static int __init init_zbud(void)
{
/* Make sure the zbud header will fit in one chunk */
BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
pr_info("loaded\n");
-#ifdef CONFIG_ZPOOL
zpool_register_driver(&zbud_zpool_driver);
-#endif
return 0;
}
static void __exit exit_zbud(void)
{
-#ifdef CONFIG_ZPOOL
zpool_unregister_driver(&zbud_zpool_driver);
-#endif
-
pr_info("unloaded\n");
}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 19b563bc6c48..68e8831068f4 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1471,7 +1471,6 @@ static void obj_free(struct size_class *class, unsigned long obj)
unsigned int f_objidx;
void *vaddr;
- obj &= ~OBJ_ALLOCATED_TAG;
obj_to_location(obj, &f_page, &f_objidx);
f_offset = (class->size * f_objidx) & ~PAGE_MASK;
zspage = get_zspage(f_page);
@@ -2163,7 +2162,7 @@ static void async_free_zspage(struct work_struct *work)
VM_BUG_ON(fullness != ZS_EMPTY);
class = pool->size_class[class_idx];
spin_lock(&class->lock);
- __free_zspage(pool, pool->size_class[class_idx], zspage);
+ __free_zspage(pool, class, zspage);
spin_unlock(&class->lock);
}
};
diff --git a/mm/zswap.c b/mm/zswap.c
index 20763267a219..7944e3e57e78 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -967,6 +967,13 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
spin_unlock(&tree->lock);
BUG_ON(offset != entry->offset);
+ src = (u8 *)zhdr + sizeof(struct zswap_header);
+ if (!zpool_can_sleep_mapped(pool)) {
+ memcpy(tmp, src, entry->length);
+ src = tmp;
+ zpool_unmap_handle(pool, handle);
+ }
+
/* try to allocate swap cache page */
switch (zswap_get_swap_cache_page(swpentry, &page)) {
case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
@@ -982,17 +989,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
case ZSWAP_SWAPCACHE_NEW: /* page is locked */
/* decompress */
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
-
dlen = PAGE_SIZE;
- src = (u8 *)zhdr + sizeof(struct zswap_header);
-
- if (!zpool_can_sleep_mapped(pool)) {
-
- memcpy(tmp, src, entry->length);
- src = tmp;
-
- zpool_unmap_handle(pool, handle);
- }
mutex_lock(acomp_ctx->mutex);
sg_init_one(&input, src, entry->length);
@@ -1203,7 +1200,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
zswap_reject_alloc_fail++;
goto put_dstmem;
}
- buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
+ buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO);
memcpy(buf, &zhdr, hlen);
memcpy(buf + hlen, dst, dlen);
zpool_unmap_handle(entry->pool->zpool, handle);
@@ -1427,18 +1424,11 @@ static int __init zswap_debugfs_init(void)
return 0;
}
-
-static void __exit zswap_debugfs_exit(void)
-{
- debugfs_remove_recursive(zswap_debugfs_root);
-}
#else
static int __init zswap_debugfs_init(void)
{
return 0;
}
-
-static void __exit zswap_debugfs_exit(void) { }
#endif
/*********************************
diff --git a/net/802/garp.c b/net/802/garp.c
index 400bd857e5f5..f6012f8e59f0 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
kfree(attr);
}
+static void garp_attr_destroy_all(struct garp_applicant *app)
+{
+ struct rb_node *node, *next;
+ struct garp_attr *attr;
+
+ for (node = rb_first(&app->gid);
+ next = node ? rb_next(node) : NULL, node != NULL;
+ node = next) {
+ attr = rb_entry(node, struct garp_attr, node);
+ garp_attr_destroy(app, attr);
+ }
+}
+
static int garp_pdu_init(struct garp_applicant *app)
{
struct sk_buff *skb;
@@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
spin_lock_bh(&app->lock);
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
+ garp_attr_destroy_all(app);
garp_pdu_queue(app);
spin_unlock_bh(&app->lock);
diff --git a/net/802/mrp.c b/net/802/mrp.c
index bea6e43d45a0..35e04cc5390c 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
kfree(attr);
}
+static void mrp_attr_destroy_all(struct mrp_applicant *app)
+{
+ struct rb_node *node, *next;
+ struct mrp_attr *attr;
+
+ for (node = rb_first(&app->mad);
+ next = node ? rb_next(node) : NULL, node != NULL;
+ node = next) {
+ attr = rb_entry(node, struct mrp_attr, node);
+ mrp_attr_destroy(app, attr);
+ }
+}
+
static int mrp_pdu_init(struct mrp_applicant *app)
{
struct sk_buff *skb;
@@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
spin_lock_bh(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
+ mrp_attr_destroy_all(app);
mrp_pdu_queue(app);
spin_unlock_bh(&app->lock);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2560ed2f144d..e1a545c8a69f 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -3996,14 +3996,10 @@ EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */
void hci_unregister_dev(struct hci_dev *hdev)
{
- int id;
-
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
hci_dev_set_flag(hdev, HCI_UNREGISTER);
- id = hdev->id;
-
write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
@@ -4038,7 +4034,14 @@ void hci_unregister_dev(struct hci_dev *hdev)
}
device_del(&hdev->dev);
+ /* Actual cleanup is deferred until hci_cleanup_dev(). */
+ hci_dev_put(hdev);
+}
+EXPORT_SYMBOL(hci_unregister_dev);
+/* Cleanup HCI device */
+void hci_cleanup_dev(struct hci_dev *hdev)
+{
debugfs_remove_recursive(hdev->debugfs);
kfree_const(hdev->hw_info);
kfree_const(hdev->fw_info);
@@ -4063,11 +4066,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_blocked_keys_clear(hdev);
hci_dev_unlock(hdev);
- hci_dev_put(hdev);
-
- ida_simple_remove(&hci_index_ida, id);
+ ida_simple_remove(&hci_index_ida, hdev->id);
}
-EXPORT_SYMBOL(hci_unregister_dev);
/* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev)
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index b04a5a02ecf3..f1128c2134f0 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -59,6 +59,17 @@ struct hci_pinfo {
char comm[TASK_COMM_LEN];
};
+static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
+{
+ struct hci_dev *hdev = hci_pi(sk)->hdev;
+
+ if (!hdev)
+ return ERR_PTR(-EBADFD);
+ if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
+ return ERR_PTR(-EPIPE);
+ return hdev;
+}
+
void hci_sock_set_flag(struct sock *sk, int nr)
{
set_bit(nr, &hci_pi(sk)->flags);
@@ -759,19 +770,13 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
if (event == HCI_DEV_UNREG) {
struct sock *sk;
- /* Detach sockets from device */
+ /* Wake up sockets using this dead device */
read_lock(&hci_sk_list.lock);
sk_for_each(sk, &hci_sk_list.head) {
- lock_sock(sk);
if (hci_pi(sk)->hdev == hdev) {
- hci_pi(sk)->hdev = NULL;
sk->sk_err = EPIPE;
- sk->sk_state = BT_OPEN;
sk->sk_state_change(sk);
-
- hci_dev_put(hdev);
}
- release_sock(sk);
}
read_unlock(&hci_sk_list.lock);
}
@@ -930,10 +935,10 @@ static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
unsigned long arg)
{
- struct hci_dev *hdev = hci_pi(sk)->hdev;
+ struct hci_dev *hdev = hci_hdev_from_sock(sk);
- if (!hdev)
- return -EBADFD;
+ if (IS_ERR(hdev))
+ return PTR_ERR(hdev);
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
return -EBUSY;
@@ -1103,6 +1108,18 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
lock_sock(sk);
+ /* Allow detaching from dead device and attaching to alive device, if
+ * the caller wants to re-bind (instead of close) this socket in
+ * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
+ */
+ hdev = hci_pi(sk)->hdev;
+ if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
+ hci_pi(sk)->hdev = NULL;
+ sk->sk_state = BT_OPEN;
+ hci_dev_put(hdev);
+ }
+ hdev = NULL;
+
if (sk->sk_state == BT_BOUND) {
err = -EALREADY;
goto done;
@@ -1379,9 +1396,9 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
lock_sock(sk);
- hdev = hci_pi(sk)->hdev;
- if (!hdev) {
- err = -EBADFD;
+ hdev = hci_hdev_from_sock(sk);
+ if (IS_ERR(hdev)) {
+ err = PTR_ERR(hdev);
goto done;
}
@@ -1743,9 +1760,9 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
goto done;
}
- hdev = hci_pi(sk)->hdev;
- if (!hdev) {
- err = -EBADFD;
+ hdev = hci_hdev_from_sock(sk);
+ if (IS_ERR(hdev)) {
+ err = PTR_ERR(hdev);
goto done;
}
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 9874844a95a9..b69d88b88d2e 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -83,6 +83,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
static void bt_host_release(struct device *dev)
{
struct hci_dev *hdev = to_hci_dev(dev);
+
+ if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
+ hci_cleanup_dev(hdev);
kfree(hdev);
module_put(THIS_MODULE);
}
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 8cb53e10a985..4e095746e002 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -809,7 +809,7 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in
return sent;
}
-static int rfcomm_tty_write_room(struct tty_struct *tty)
+static unsigned int rfcomm_tty_write_room(struct tty_struct *tty)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
int room = 0;
@@ -1012,7 +1012,7 @@ static void rfcomm_tty_unthrottle(struct tty_struct *tty)
rfcomm_dlc_unthrottle(dev->dlc);
}
-static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty)
+static unsigned int rfcomm_tty_chars_in_buffer(struct tty_struct *tty)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index aa47af349ba8..caa16bf30fb5 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -7,6 +7,7 @@
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/filter.h>
+#include <linux/rcupdate_trace.h>
#include <linux/sched/signal.h>
#include <net/bpf_sk_storage.h>
#include <net/sock.h>
@@ -701,6 +702,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
void *data;
int ret;
+ if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
+ prog->expected_attach_type == BPF_XDP_CPUMAP)
+ return -EINVAL;
if (kattr->test.ctx_in || kattr->test.ctx_out)
return -EINVAL;
@@ -948,7 +952,10 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog,
goto out;
}
}
+
+ rcu_read_lock_trace();
retval = bpf_prog_run_pin_on_cpu(prog, ctx);
+ rcu_read_unlock_trace();
if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
err = -EFAULT;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 2b862cffc03a..5dee30966ed3 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -780,7 +780,7 @@ int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev,
struct net_device *dst_dev;
dst_dev = dst ? dst->dev : br->dev;
- if (dst_dev != br_dev && dst_dev != dev)
+ if (dst_dev && dst_dev != dev)
continue;
err = br_fdb_replay_one(nb, fdb, dst_dev, action, ctx);
@@ -1019,7 +1019,8 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
struct net_bridge_port *p, const unsigned char *addr,
- u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[])
+ u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
+ struct netlink_ext_ack *extack)
{
int err = 0;
@@ -1038,6 +1039,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
rcu_read_unlock();
local_bh_enable();
} else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
+ if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "FDB entry towards bridge must be permanent");
+ return -EINVAL;
+ }
err = br_fdb_external_learn_add(br, p, addr, vid, true);
} else {
spin_lock_bh(&br->hash_lock);
@@ -1110,9 +1116,11 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
}
/* VID was specified, so use it. */
- err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb);
+ err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
+ extack);
} else {
- err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb);
+ err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
+ extack);
if (err || !vg || !vg->num_vlans)
goto out;
@@ -1124,7 +1132,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!br_vlan_should_use(v))
continue;
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
- nfea_tb);
+ nfea_tb, extack);
if (err)
goto out;
}
@@ -1281,6 +1289,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
if (swdev_notify)
flags |= BIT(BR_FDB_ADDED_BY_USER);
+
+ if (!p)
+ flags |= BIT(BR_FDB_LOCAL);
+
fdb = fdb_create(br, p, addr, vid, flags);
if (!fdb) {
err = -ENOMEM;
@@ -1307,6 +1319,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
if (swdev_notify)
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
+ if (!p)
+ set_bit(BR_FDB_LOCAL, &fdb->flags);
+
if (modified)
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f7d2f472ae24..14cd6ef96111 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -562,7 +562,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
struct net_bridge_port *p;
int err = 0;
unsigned br_hr, dev_hr;
- bool changed_addr;
+ bool changed_addr, fdb_synced = false;
/* Don't allow bridging non-ethernet like devices. */
if ((dev->flags & IFF_LOOPBACK) ||
@@ -616,6 +616,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
err = dev_set_allmulti(dev, 1);
if (err) {
+ br_multicast_del_port(p);
kfree(p); /* kobject not yet init'd, manually free */
goto err1;
}
@@ -652,6 +653,19 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
list_add_rcu(&p->list, &br->port_list);
nbp_update_port_count(br);
+ if (!br_promisc_port(p) && (p->dev->priv_flags & IFF_UNICAST_FLT)) {
+ /* When updating the port count we also update all ports'
+ * promiscuous mode.
+ * A port leaving promiscuous mode normally gets the bridge's
+ * fdb synced to the unicast filter (if supported), however,
+ * `br_port_clear_promisc` does not distinguish between
+ * non-promiscuous ports and *new* ports, so we need to
+ * sync explicitly here.
+ */
+ fdb_synced = br_fdb_sync_static(br, p) == 0;
+ if (!fdb_synced)
+ netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n");
+ }
netdev_update_features(br->dev);
@@ -701,6 +715,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
return 0;
err7:
+ if (fdb_synced)
+ br_fdb_unsync_static(br, p);
list_del_rcu(&p->list);
br_fdb_delete_by_port(br, p, 0, 1);
nbp_update_port_count(br);
@@ -714,6 +730,7 @@ err4:
err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
+ br_multicast_del_port(p);
kobject_put(&p->kobj);
dev_set_allmulti(dev, -1);
err1:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 53c3a9d80d9c..d0434dc8c03b 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -3264,7 +3264,9 @@ static void br_multicast_pim(struct net_bridge *br,
pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
return;
+ spin_lock(&br->multicast_lock);
br_ip4_multicast_mark_router(br, port);
+ spin_unlock(&br->multicast_lock);
}
static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
@@ -3275,7 +3277,9 @@ static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
return -ENOMSG;
+ spin_lock(&br->multicast_lock);
br_ip4_multicast_mark_router(br, port);
+ spin_unlock(&br->multicast_lock);
return 0;
}
@@ -3343,7 +3347,9 @@ static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
return;
+ spin_lock(&br->multicast_lock);
br_ip6_multicast_mark_router(br, port);
+ spin_unlock(&br->multicast_lock);
}
static int br_multicast_ipv6_rcv(struct net_bridge *br,
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index 8d033a75a766..fdbed3158555 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -88,6 +88,12 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
skb = ip_fraglist_next(&iter);
}
+
+ if (!err)
+ return 0;
+
+ kfree_skb_list(iter.frag);
+
return err;
}
slow_path:
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 647554c9813b..e12fd3cad619 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -539,7 +539,8 @@ static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg,
goto err;
ret = -EINVAL;
- if (unlikely(msg->msg_iter.iov->iov_base == NULL))
+ if (unlikely(msg->msg_iter.nr_segs == 0) ||
+ unlikely(msg->msg_iter.iov->iov_base == NULL))
goto err;
noblock = msg->msg_flags & MSG_DONTWAIT;
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index c3946c355882..bdc95bd7a851 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1075,11 +1075,16 @@ static bool j1939_session_deactivate_locked(struct j1939_session *session)
static bool j1939_session_deactivate(struct j1939_session *session)
{
+ struct j1939_priv *priv = session->priv;
bool active;
- j1939_session_list_lock(session->priv);
+ j1939_session_list_lock(priv);
+ /* This function should be called with a session ref-count of at
+ * least 2.
+ */
+ WARN_ON_ONCE(kref_read(&session->kref) < 2);
active = j1939_session_deactivate_locked(session);
- j1939_session_list_unlock(session->priv);
+ j1939_session_list_unlock(priv);
return active;
}
@@ -1869,7 +1874,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
if (!session->transmission)
j1939_tp_schedule_txtimer(session, 0);
} else {
- j1939_tp_set_rxtimeout(session, 250);
+ j1939_tp_set_rxtimeout(session, 750);
}
session->last_cmd = 0xff;
consume_skb(se_skb);
diff --git a/net/can/raw.c b/net/can/raw.c
index ed4fcb7ab0c3..cd5a49380116 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -546,10 +546,18 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
return -EFAULT;
}
+ rtnl_lock();
lock_sock(sk);
- if (ro->bound && ro->ifindex)
+ if (ro->bound && ro->ifindex) {
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+ if (!dev) {
+ if (count > 1)
+ kfree(filter);
+ err = -ENODEV;
+ goto out_fil;
+ }
+ }
if (ro->bound) {
/* (try to) register the new filters */
@@ -588,6 +596,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
dev_put(dev);
release_sock(sk);
+ rtnl_unlock();
break;
@@ -600,10 +609,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
err_mask &= CAN_ERR_MASK;
+ rtnl_lock();
lock_sock(sk);
- if (ro->bound && ro->ifindex)
+ if (ro->bound && ro->ifindex) {
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+ if (!dev) {
+ err = -ENODEV;
+ goto out_err;
+ }
+ }
/* remove current error mask */
if (ro->bound) {
@@ -627,6 +642,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
dev_put(dev);
release_sock(sk);
+ rtnl_unlock();
break;
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index d2b268a1838e..d38c9eadbe2f 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -58,12 +58,10 @@ struct ceph_auth_client *ceph_auth_init(const char *name,
const int *con_modes)
{
struct ceph_auth_client *ac;
- int ret;
- ret = -ENOMEM;
ac = kzalloc(sizeof(*ac), GFP_NOFS);
if (!ac)
- goto out;
+ return ERR_PTR(-ENOMEM);
mutex_init(&ac->mutex);
ac->negotiating = true;
@@ -78,9 +76,6 @@ struct ceph_auth_client *ceph_auth_init(const char *name,
dout("%s name '%s' preferred_mode %d fallback_mode %d\n", __func__,
ac->name, ac->preferred_mode, ac->fallback_mode);
return ac;
-
-out:
- return ERR_PTR(ret);
}
void ceph_auth_destroy(struct ceph_auth_client *ac)
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 097e9f8d87a7..77b5519bc45f 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -112,8 +112,8 @@ static int ceph_auth_none_create_authorizer(
auth->authorizer = (struct ceph_authorizer *) au;
auth->authorizer_buf = au->buf;
auth->authorizer_buf_len = au->buf_len;
- auth->authorizer_reply_buf = au->reply_buf;
- auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+ auth->authorizer_reply_buf = NULL;
+ auth->authorizer_reply_buf_len = 0;
return 0;
}
diff --git a/net/ceph/auth_none.h b/net/ceph/auth_none.h
index 4158f064302e..bb121539e796 100644
--- a/net/ceph/auth_none.h
+++ b/net/ceph/auth_none.h
@@ -16,7 +16,6 @@ struct ceph_none_authorizer {
struct ceph_authorizer base;
char buf[128];
int buf_len;
- char reply_buf[0];
};
struct ceph_auth_none_info {
diff --git a/net/ceph/cls_lock_client.c b/net/ceph/cls_lock_client.c
index 17447c19d937..66136a4c1ce7 100644
--- a/net/ceph/cls_lock_client.c
+++ b/net/ceph/cls_lock_client.c
@@ -10,7 +10,9 @@
/**
* ceph_cls_lock - grab rados lock for object
- * @oid, @oloc: object to lock
+ * @osdc: OSD client instance
+ * @oid: object to lock
+ * @oloc: object to lock
* @lock_name: the name of the lock
* @type: lock type (CEPH_CLS_LOCK_EXCLUSIVE or CEPH_CLS_LOCK_SHARED)
* @cookie: user-defined identifier for this instance of the lock
@@ -82,7 +84,9 @@ EXPORT_SYMBOL(ceph_cls_lock);
/**
* ceph_cls_unlock - release rados lock for object
- * @oid, @oloc: object to lock
+ * @osdc: OSD client instance
+ * @oid: object to lock
+ * @oloc: object to lock
* @lock_name: the name of the lock
* @cookie: user-defined identifier for this instance of the lock
*/
@@ -130,7 +134,9 @@ EXPORT_SYMBOL(ceph_cls_unlock);
/**
* ceph_cls_break_lock - release rados lock for object for specified client
- * @oid, @oloc: object to lock
+ * @osdc: OSD client instance
+ * @oid: object to lock
+ * @oloc: object to lock
* @lock_name: the name of the lock
* @cookie: user-defined identifier for this instance of the lock
* @locker: current lock owner
diff --git a/net/core/dev.c b/net/core/dev.c
index c253c2aafe97..8f1a47ad6781 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -131,6 +131,7 @@
#include <trace/events/napi.h>
#include <trace/events/net.h>
#include <trace/events/skb.h>
+#include <trace/events/qdisc.h>
#include <linux/inetdevice.h>
#include <linux/cpu_rmap.h>
#include <linux/static_key.h>
@@ -3844,6 +3845,18 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
}
}
+static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
+ struct sk_buff **to_free,
+ struct netdev_queue *txq)
+{
+ int rc;
+
+ rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
+ if (rc == NET_XMIT_SUCCESS)
+ trace_qdisc_enqueue(q, txq, skb);
+ return rc;
+}
+
static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev,
struct netdev_queue *txq)
@@ -3862,8 +3875,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
* of q->seqlock to protect from racing with requeuing.
*/
if (unlikely(!nolock_qdisc_is_empty(q))) {
- rc = q->enqueue(skb, q, &to_free) &
- NET_XMIT_MASK;
+ rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
__qdisc_run(q);
qdisc_run_end(q);
@@ -3879,7 +3891,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
return NET_XMIT_SUCCESS;
}
- rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+ rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
qdisc_run(q);
no_lock_out:
@@ -3923,7 +3935,7 @@ no_lock_out:
qdisc_run_end(q);
rc = NET_XMIT_SUCCESS;
} else {
- rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+ rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
@@ -6008,6 +6020,19 @@ static void gro_list_prepare(const struct list_head *head,
diffs = memcmp(skb_mac_header(p),
skb_mac_header(skb),
maclen);
+
+ diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
+#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ if (!diffs) {
+ struct tc_skb_ext *skb_ext = skb_ext_find(skb, TC_SKB_EXT);
+ struct tc_skb_ext *p_ext = skb_ext_find(p, TC_SKB_EXT);
+
+ diffs |= (!!p_ext) ^ (!!skb_ext);
+ if (!diffs && unlikely(skb_ext))
+ diffs |= p_ext->chain ^ skb_ext->chain;
+ }
+#endif
+
NAPI_GRO_CB(p)->same_flow = !diffs;
}
}
@@ -6221,6 +6246,8 @@ static gro_result_t napi_skb_finish(struct napi_struct *napi,
case GRO_MERGED_FREE:
if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
napi_skb_free_stolen_head(skb);
+ else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ __kfree_skb(skb);
else
__kfree_skb_defer(skb);
break;
@@ -6270,6 +6297,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb_shinfo(skb)->gso_type = 0;
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
skb_ext_reset(skb);
+ nf_reset_ct(skb);
napi->skb = skb;
}
@@ -9684,14 +9712,17 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
struct net_device *dev;
int err, fd;
+ rtnl_lock();
dev = dev_get_by_index(net, attr->link_create.target_ifindex);
- if (!dev)
+ if (!dev) {
+ rtnl_unlock();
return -EINVAL;
+ }
link = kzalloc(sizeof(*link), GFP_USER);
if (!link) {
err = -ENOMEM;
- goto out_put_dev;
+ goto unlock;
}
bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
@@ -9701,14 +9732,14 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
err = bpf_link_prime(&link->link, &link_primer);
if (err) {
kfree(link);
- goto out_put_dev;
+ goto unlock;
}
- rtnl_lock();
err = dev_xdp_attach_link(dev, NULL, link);
rtnl_unlock();
if (err) {
+ link->dev = NULL;
bpf_link_cleanup(&link_primer);
goto out_put_dev;
}
@@ -9718,6 +9749,9 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
dev_put(dev);
return fd;
+unlock:
+ rtnl_unlock();
+
out_put_dev:
dev_put(dev);
return err;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 8fdd04f00fd7..85032626de24 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -9328,18 +9328,10 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
switch (attrs->flavour) {
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
- case DEVLINK_PORT_FLAVOUR_VIRTUAL:
n = snprintf(name, len, "p%u", attrs->phys.port_number);
if (n < len && attrs->split)
n += snprintf(name + n, len - n, "s%u",
attrs->phys.split_subport_number);
- if (!attrs->split)
- n = snprintf(name, len, "p%u", attrs->phys.port_number);
- else
- n = snprintf(name, len, "p%us%u",
- attrs->phys.port_number,
- attrs->phys.split_subport_number);
-
break;
case DEVLINK_PORT_FLAVOUR_CPU:
case DEVLINK_PORT_FLAVOUR_DSA:
@@ -9381,6 +9373,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
attrs->pci_sf.sf);
break;
+ case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+ return -EOPNOTSUPP;
}
if (n >= len)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 2aadbfc5193b..4b2415d34873 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1504,7 +1504,7 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow)
}
EXPORT_SYMBOL(flow_get_u32_dst);
-/* Sort the source and destination IP (and the ports if the IP are the same),
+/* Sort the source and destination IP and the ports,
* to have consistent hash within the two directions
*/
static inline void __flow_hash_consistentify(struct flow_keys *keys)
@@ -1515,11 +1515,11 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
addr_diff = (__force u32)keys->addrs.v4addrs.dst -
(__force u32)keys->addrs.v4addrs.src;
- if ((addr_diff < 0) ||
- (addr_diff == 0 &&
- ((__force u16)keys->ports.dst <
- (__force u16)keys->ports.src))) {
+ if (addr_diff < 0)
swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
+
+ if ((__force u16)keys->ports.dst <
+ (__force u16)keys->ports.src) {
swap(keys->ports.src, keys->ports.dst);
}
break;
@@ -1527,13 +1527,13 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
addr_diff = memcmp(&keys->addrs.v6addrs.dst,
&keys->addrs.v6addrs.src,
sizeof(keys->addrs.v6addrs.dst));
- if ((addr_diff < 0) ||
- (addr_diff == 0 &&
- ((__force u16)keys->ports.dst <
- (__force u16)keys->ports.src))) {
+ if (addr_diff < 0) {
for (i = 0; i < 4; i++)
swap(keys->addrs.v6addrs.src.s6_addr32[i],
keys->addrs.v6addrs.dst.s6_addr32[i]);
+ }
+ if ((__force u16)keys->ports.dst <
+ (__force u16)keys->ports.src) {
swap(keys->ports.src, keys->ports.dst);
}
break;
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 75431ca9300f..1a455847da54 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -158,7 +158,7 @@ static void linkwatch_do_dev(struct net_device *dev)
clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
rfc2863_policy(dev);
- if (dev->flags & IFF_UP && netif_device_present(dev)) {
+ if (dev->flags & IFF_UP) {
if (netif_carrier_ok(dev))
dev_activate(dev);
else
@@ -204,7 +204,8 @@ static void __linkwatch_run_queue(int urgent_only)
dev = list_first_entry(&wrk, struct net_device, link_watch_list);
list_del_init(&dev->link_watch_list);
- if (urgent_only && !linkwatch_urgent_event(dev)) {
+ if (!netif_device_present(dev) ||
+ (urgent_only && !linkwatch_urgent_event(dev))) {
list_add_tail(&dev->link_watch_list, &lweventlist);
continue;
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 0a6b04714558..edfc0f8011f8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -430,7 +430,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
ip6h = ipv6_hdr(skb);
/* ip6h->version = 6; ip6h->priority = 0; */
- put_unaligned(0x60, (unsigned char *)ip6h);
+ *(unsigned char *)ip6h = 0x60;
ip6h->flow_lbl[0] = 0;
ip6h->flow_lbl[1] = 0;
ip6h->flow_lbl[2] = 0;
@@ -458,7 +458,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
iph = ip_hdr(skb);
/* iph->version = 4; iph->ihl = 5; */
- put_unaligned(0x45, (unsigned char *)iph);
+ *(unsigned char *)iph = 0x45;
iph->tos = 0;
put_unaligned(htons(ip_len), &(iph->tot_len));
iph->id = htons(atomic_inc_return(&ip_ident));
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 5e4eb45b139c..8ab7b402244c 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -634,7 +634,15 @@ bool page_pool_return_skb_page(struct page *page)
struct page_pool *pp;
page = compound_head(page);
- if (unlikely(page->pp_magic != PP_SIGNATURE))
+
+ /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
+ * in order to preserve any existing bits, such as bit 0 for the
+ * head page of compound page and bit 1 for pfmemalloc page, so
+ * mask those bits for freeing side when doing below checking,
+ * and page_is_pfmemalloc() is checked in __page_pool_put_page()
+ * to avoid recycling the pfmemalloc page.
+ */
+ if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
return false;
pp = page->pp;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 12aabcda6db2..fc7942c0dddc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -663,7 +663,7 @@ static void skb_release_data(struct sk_buff *skb)
if (skb->cloned &&
atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
&shinfo->dataref))
- return;
+ goto exit;
skb_zcopy_clear(skb, true);
@@ -674,6 +674,17 @@ static void skb_release_data(struct sk_buff *skb)
kfree_skb_list(shinfo->frag_list);
skb_free_head(skb);
+exit:
+ /* When we clone an SKB we copy the reycling bit. The pp_recycle
+ * bit is only set on the head though, so in order to avoid races
+ * while trying to recycle fragments on __skb_frag_unref() we need
+ * to make one SKB responsible for triggering the recycle path.
+ * So disable the recycling bit if an SKB is cloned and we have
+ * additional references to to the fragmented part of the SKB.
+ * Eventually the last SKB will have the recycling bit set and it's
+ * dataref set to 0, which will trigger the recycling
+ */
+ skb->pp_recycle = 0;
}
/*
@@ -943,6 +954,7 @@ void __kfree_skb_defer(struct sk_buff *skb)
void napi_skb_free_stolen_head(struct sk_buff *skb)
{
+ nf_reset_ct(skb);
skb_dst_drop(skb);
skb_ext_put(skb);
napi_skb_cache_put(skb);
@@ -3010,8 +3022,11 @@ skb_zerocopy_headlen(const struct sk_buff *from)
if (!from->head_frag ||
skb_headlen(from) < L1_CACHE_BYTES ||
- skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
+ skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) {
hlen = skb_headlen(from);
+ if (!hlen)
+ hlen = from->len;
+ }
if (skb_has_frag_list(from))
hlen = from->len;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 9b6160a191f8..2d6249b28928 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -508,10 +508,8 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
if (skb_linearize(skb))
return -EAGAIN;
num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
- if (unlikely(num_sge < 0)) {
- kfree(msg);
+ if (unlikely(num_sge < 0))
return num_sge;
- }
copied = skb->len;
msg->sg.start = 0;
@@ -530,6 +528,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
{
struct sock *sk = psock->sk;
struct sk_msg *msg;
+ int err;
/* If we are receiving on the same sock skb->sk is already assigned,
* skip memory accounting and owner transition seeing it already set
@@ -548,7 +547,10 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
* into user buffers.
*/
skb_set_owner_r(skb, sk);
- return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+ err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+ if (err < 0)
+ kfree(msg);
+ return err;
}
/* Puts an skb on the ingress queue of the socket already assigned to the
@@ -559,12 +561,16 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
{
struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
struct sock *sk = psock->sk;
+ int err;
if (unlikely(!msg))
return -EAGAIN;
sk_msg_init(msg);
skb_set_owner_r(skb, sk);
- return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+ err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+ if (err < 0)
+ kfree(msg);
+ return err;
}
static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
@@ -578,29 +584,42 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
return sk_psock_skb_ingress(psock, skb);
}
-static void sock_drop(struct sock *sk, struct sk_buff *skb)
+static void sk_psock_skb_state(struct sk_psock *psock,
+ struct sk_psock_work_state *state,
+ struct sk_buff *skb,
+ int len, int off)
{
- sk_drops_add(sk, skb);
- kfree_skb(skb);
+ spin_lock_bh(&psock->ingress_lock);
+ if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
+ state->skb = skb;
+ state->len = len;
+ state->off = off;
+ } else {
+ sock_drop(psock->sk, skb);
+ }
+ spin_unlock_bh(&psock->ingress_lock);
}
static void sk_psock_backlog(struct work_struct *work)
{
struct sk_psock *psock = container_of(work, struct sk_psock, work);
struct sk_psock_work_state *state = &psock->work_state;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
bool ingress;
u32 len, off;
int ret;
mutex_lock(&psock->work_mutex);
- if (state->skb) {
+ if (unlikely(state->skb)) {
+ spin_lock_bh(&psock->ingress_lock);
skb = state->skb;
len = state->len;
off = state->off;
state->skb = NULL;
- goto start;
+ spin_unlock_bh(&psock->ingress_lock);
}
+ if (skb)
+ goto start;
while ((skb = skb_dequeue(&psock->ingress_skb))) {
len = skb->len;
@@ -615,9 +634,8 @@ start:
len, ingress);
if (ret <= 0) {
if (ret == -EAGAIN) {
- state->skb = skb;
- state->len = len;
- state->off = off;
+ sk_psock_skb_state(psock, state, skb,
+ len, off);
goto end;
}
/* Hard errors break pipe and stop xmit. */
@@ -716,6 +734,11 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
skb_bpf_redirect_clear(skb);
sock_drop(psock->sk, skb);
}
+ kfree_skb(psock->work_state.skb);
+ /* We null the skb here to ensure that calls to sk_psock_backlog
+ * do not pick up the free'd skb.
+ */
+ psock->work_state.skb = NULL;
__sk_psock_purge_ingress_msg(psock);
}
@@ -767,8 +790,6 @@ static void sk_psock_destroy(struct work_struct *work)
void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
{
- sk_psock_stop(psock, false);
-
write_lock_bh(&sk->sk_callback_lock);
sk_psock_restore_proto(sk, psock);
rcu_assign_sk_user_data(sk, NULL);
@@ -778,6 +799,8 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
sk_psock_stop_verdict(sk, psock);
write_unlock_bh(&sk->sk_callback_lock);
+ sk_psock_stop(psock, false);
+
INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
queue_rcu_work(system_wq, &psock->rwork);
}
diff --git a/net/core/sock.c b/net/core/sock.c
index ba1c0f75cd45..a3eea6e0b30a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -139,6 +139,8 @@
#include <net/tcp.h>
#include <net/busy_poll.h>
+#include <linux/ethtool.h>
+
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
@@ -810,8 +812,47 @@ void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
}
}
-int sock_set_timestamping(struct sock *sk, int optname, int val)
+static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
+{
+ struct net *net = sock_net(sk);
+ struct net_device *dev = NULL;
+ bool match = false;
+ int *vclock_index;
+ int i, num;
+
+ if (sk->sk_bound_dev_if)
+ dev = dev_get_by_index(net, sk->sk_bound_dev_if);
+
+ if (!dev) {
+ pr_err("%s: sock not bind to device\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ num = ethtool_get_phc_vclocks(dev, &vclock_index);
+ for (i = 0; i < num; i++) {
+ if (*(vclock_index + i) == phc_index) {
+ match = true;
+ break;
+ }
+ }
+
+ if (num > 0)
+ kfree(vclock_index);
+
+ if (!match)
+ return -EINVAL;
+
+ sk->sk_bind_phc = phc_index;
+
+ return 0;
+}
+
+int sock_set_timestamping(struct sock *sk, int optname,
+ struct so_timestamping timestamping)
{
+ int val = timestamping.flags;
+ int ret;
+
if (val & ~SOF_TIMESTAMPING_MASK)
return -EINVAL;
@@ -832,6 +873,12 @@ int sock_set_timestamping(struct sock *sk, int optname, int val)
!(val & SOF_TIMESTAMPING_OPT_TSONLY))
return -EINVAL;
+ if (val & SOF_TIMESTAMPING_BIND_PHC) {
+ ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc);
+ if (ret)
+ return ret;
+ }
+
sk->sk_tsflags = val;
sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
@@ -907,6 +954,7 @@ EXPORT_SYMBOL(sock_set_mark);
int sock_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
+ struct so_timestamping timestamping;
struct sock_txtime sk_txtime;
struct sock *sk = sock->sk;
int val;
@@ -1068,12 +1116,22 @@ set_sndbuf:
case SO_TIMESTAMP_NEW:
case SO_TIMESTAMPNS_OLD:
case SO_TIMESTAMPNS_NEW:
- sock_set_timestamp(sk, valbool, optname);
+ sock_set_timestamp(sk, optname, valbool);
break;
case SO_TIMESTAMPING_NEW:
case SO_TIMESTAMPING_OLD:
- ret = sock_set_timestamping(sk, optname, val);
+ if (optlen == sizeof(timestamping)) {
+ if (copy_from_sockptr(&timestamping, optval,
+ sizeof(timestamping))) {
+ ret = -EFAULT;
+ break;
+ }
+ } else {
+ memset(&timestamping, 0, sizeof(timestamping));
+ timestamping.flags = val;
+ }
+ ret = sock_set_timestamping(sk, optname, timestamping);
break;
case SO_RCVLOWAT:
@@ -1201,7 +1259,7 @@ set_sndbuf:
if (val < 0)
ret = -EINVAL;
else
- sk->sk_ll_usec = val;
+ WRITE_ONCE(sk->sk_ll_usec, val);
}
break;
case SO_PREFER_BUSY_POLL:
@@ -1348,6 +1406,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
struct __kernel_old_timeval tm;
struct __kernel_sock_timeval stm;
struct sock_txtime txtime;
+ struct so_timestamping timestamping;
} v;
int lv = sizeof(int);
@@ -1451,7 +1510,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_TIMESTAMPING_OLD:
- v.val = sk->sk_tsflags;
+ lv = sizeof(v.timestamping);
+ v.timestamping.flags = sk->sk_tsflags;
+ v.timestamping.bind_phc = sk->sk_bind_phc;
break;
case SO_RCVTIMEO_OLD:
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 9cc9d1ee6cdb..c5c1d2b8045e 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -41,9 +41,9 @@ extern bool dccp_debug;
#define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
#define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
#else
-#define dccp_pr_debug(format, a...)
-#define dccp_pr_debug_cat(format, a...)
-#define dccp_debug(format, a...)
+#define dccp_pr_debug(format, a...) do {} while (0)
+#define dccp_pr_debug_cat(format, a...) do {} while (0)
+#define dccp_debug(format, a...) do {} while (0)
#endif
extern struct inet_hashinfo dccp_hashinfo;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 5dbd45dc35ad..dc92a67baea3 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -816,7 +816,7 @@ static int dn_auto_bind(struct socket *sock)
static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
{
struct dn_scp *scp = DN_SK(sk);
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err;
if (scp->state != DN_CR)
@@ -826,11 +826,11 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
dn_send_conn_conf(sk, allocation);
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ add_wait_queue(sk_sleep(sk), &wait);
for(;;) {
release_sock(sk);
if (scp->state == DN_CC)
- *timeo = schedule_timeout(*timeo);
+ *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
lock_sock(sk);
err = 0;
if (scp->state == DN_RUN)
@@ -844,9 +844,8 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
err = -EAGAIN;
if (!*timeo)
break;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk_sleep(sk), &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
if (err == 0) {
sk->sk_socket->state = SS_CONNECTED;
} else if (scp->state != DN_CC) {
@@ -858,7 +857,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
static int dn_wait_run(struct sock *sk, long *timeo)
{
struct dn_scp *scp = DN_SK(sk);
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err = 0;
if (scp->state == DN_RUN)
@@ -867,11 +866,11 @@ static int dn_wait_run(struct sock *sk, long *timeo)
if (!*timeo)
return -EALREADY;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ add_wait_queue(sk_sleep(sk), &wait);
for(;;) {
release_sock(sk);
if (scp->state == DN_CI || scp->state == DN_CC)
- *timeo = schedule_timeout(*timeo);
+ *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
lock_sock(sk);
err = 0;
if (scp->state == DN_RUN)
@@ -885,9 +884,8 @@ static int dn_wait_run(struct sock *sk, long *timeo)
err = -ETIMEDOUT;
if (!*timeo)
break;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk_sleep(sk), &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
out:
if (err == 0) {
sk->sk_socket->state = SS_CONNECTED;
@@ -1032,16 +1030,16 @@ static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
{
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sk_buff *skb = NULL;
int err = 0;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ add_wait_queue(sk_sleep(sk), &wait);
for(;;) {
release_sock(sk);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb == NULL) {
- *timeo = schedule_timeout(*timeo);
+ *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
skb = skb_dequeue(&sk->sk_receive_queue);
}
lock_sock(sk);
@@ -1056,9 +1054,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
err = -EAGAIN;
if (!*timeo)
break;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk_sleep(sk), &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
return skb == NULL ? ERR_PTR(err) : skb;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index ffbba1e71551..23be8e01026b 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1808,6 +1808,7 @@ void dsa_slave_setup_tagger(struct net_device *slave)
struct dsa_slave_priv *p = netdev_priv(slave);
const struct dsa_port *cpu_dp = dp->cpu_dp;
struct net_device *master = cpu_dp->master;
+ const struct dsa_switch *ds = dp->ds;
slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
@@ -1819,6 +1820,14 @@ void dsa_slave_setup_tagger(struct net_device *slave)
slave->needed_tailroom += master->needed_tailroom;
p->xmit = cpu_dp->tag_ops->xmit;
+
+ slave->features = master->vlan_features | NETIF_F_HW_TC;
+ if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
+ slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ slave->hw_features |= NETIF_F_HW_TC;
+ slave->features |= NETIF_F_LLTX;
+ if (slave->needed_tailroom)
+ slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
}
static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
@@ -1881,11 +1890,6 @@ int dsa_slave_create(struct dsa_port *port)
if (slave_dev == NULL)
return -ENOMEM;
- slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
- if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
- slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
- slave_dev->hw_features |= NETIF_F_HW_TC;
- slave_dev->features |= NETIF_F_LLTX;
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
if (!is_zero_ether_addr(port->mac))
ether_addr_copy(slave_dev->dev_addr, port->mac);
@@ -2287,8 +2291,8 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
static void
dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
{
+ struct switchdev_notifier_fdb_info info = {};
struct dsa_switch *ds = switchdev_work->ds;
- struct switchdev_notifier_fdb_info info;
struct dsa_port *dp;
if (!dsa_is_user_port(ds, switchdev_work->port))
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index af71b8638098..5ece05dfd8f2 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -113,11 +113,11 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
int err, port;
if (dst->index == info->tree_index && ds->index == info->sw_index &&
- ds->ops->port_bridge_join)
+ ds->ops->port_bridge_leave)
ds->ops->port_bridge_leave(ds, info->port, info->br);
if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
- ds->ops->crosschip_bridge_join)
+ ds->ops->crosschip_bridge_leave)
ds->ops->crosschip_bridge_leave(ds, info->tree_index,
info->sw_index, info->port,
info->br);
@@ -427,7 +427,7 @@ static int dsa_switch_lag_join(struct dsa_switch *ds,
info->port, info->lag,
info->info);
- return 0;
+ return -EOPNOTSUPP;
}
static int dsa_switch_lag_leave(struct dsa_switch *ds,
@@ -440,7 +440,7 @@ static int dsa_switch_lag_leave(struct dsa_switch *ds,
return ds->ops->crosschip_lag_leave(ds, info->sw_index,
info->port, info->lag);
- return 0;
+ return -EOPNOTSUPP;
}
static int dsa_switch_mdb_add(struct dsa_switch *ds,
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 53565f48934c..a201ccf2435d 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -53,6 +53,9 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
u8 *tag;
u8 *addr;
+ if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
+ return NULL;
+
/* Tag encoding */
tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
addr = skb_mac_header(skb);
@@ -114,6 +117,9 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
u8 *addr;
u16 val;
+ if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
+ return NULL;
+
/* Tag encoding */
tag = skb_put(skb, KSZ9477_INGRESS_TAG_LEN);
addr = skb_mac_header(skb);
@@ -164,6 +170,9 @@ static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
u8 *addr;
u8 *tag;
+ if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
+ return NULL;
+
/* Tag encoding */
tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
addr = skb_mac_header(skb);
diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile
index 723c9a8a8cdf..0a19470efbfb 100644
--- a/net/ethtool/Makefile
+++ b/net/ethtool/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o
ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \
linkstate.o debug.o wol.o features.o privflags.o rings.o \
channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \
- tunnels.o fec.o eeprom.o stats.o
+ tunnels.o fec.o eeprom.o stats.o phc_vclocks.o
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index f9dcbad84788..c63e0739dc6a 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -4,6 +4,7 @@
#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/rtnetlink.h>
+#include <linux/ptp_clock_kernel.h>
#include "common.h"
@@ -397,6 +398,7 @@ const char sof_timestamping_names[][ETH_GSTRING_LEN] = {
[const_ilog2(SOF_TIMESTAMPING_OPT_STATS)] = "option-stats",
[const_ilog2(SOF_TIMESTAMPING_OPT_PKTINFO)] = "option-pktinfo",
[const_ilog2(SOF_TIMESTAMPING_OPT_TX_SWHW)] = "option-tx-swhw",
+ [const_ilog2(SOF_TIMESTAMPING_BIND_PHC)] = "bind-phc",
};
static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT);
@@ -554,6 +556,18 @@ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
return 0;
}
+int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index)
+{
+ struct ethtool_ts_info info = { };
+ int num = 0;
+
+ if (!__ethtool_get_ts_info(dev, &info))
+ num = ptp_get_vclocks_index(info.phc_index, vclock_index);
+
+ return num;
+}
+EXPORT_SYMBOL(ethtool_get_phc_vclocks);
+
const struct ethtool_phy_ops *ethtool_phy_ops;
void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops)
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index a7346346114f..73e0f5b626bf 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -248,6 +248,7 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = {
[ETHTOOL_MSG_TSINFO_GET] = &ethnl_tsinfo_request_ops,
[ETHTOOL_MSG_MODULE_EEPROM_GET] = &ethnl_module_eeprom_request_ops,
[ETHTOOL_MSG_STATS_GET] = &ethnl_stats_request_ops,
+ [ETHTOOL_MSG_PHC_VCLOCKS_GET] = &ethnl_phc_vclocks_request_ops,
};
static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
@@ -958,6 +959,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.policy = ethnl_stats_get_policy,
.maxattr = ARRAY_SIZE(ethnl_stats_get_policy) - 1,
},
+ {
+ .cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ .policy = ethnl_phc_vclocks_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_phc_vclocks_get_policy) - 1,
+ },
};
static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 3e25a47fd482..3fc395c86702 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -347,6 +347,7 @@ extern const struct ethnl_request_ops ethnl_tsinfo_request_ops;
extern const struct ethnl_request_ops ethnl_fec_request_ops;
extern const struct ethnl_request_ops ethnl_module_eeprom_request_ops;
extern const struct ethnl_request_ops ethnl_stats_request_ops;
+extern const struct ethnl_request_ops ethnl_phc_vclocks_request_ops;
extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1];
extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1];
@@ -382,6 +383,7 @@ extern const struct nla_policy ethnl_fec_get_policy[ETHTOOL_A_FEC_HEADER + 1];
extern const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1];
extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS + 1];
extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_GROUPS + 1];
+extern const struct nla_policy ethnl_phc_vclocks_get_policy[ETHTOOL_A_PHC_VCLOCKS_HEADER + 1];
int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/ethtool/phc_vclocks.c b/net/ethtool/phc_vclocks.c
new file mode 100644
index 000000000000..637b2f5297d5
--- /dev/null
+++ b/net/ethtool/phc_vclocks.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 NXP
+ */
+#include "netlink.h"
+#include "common.h"
+
+struct phc_vclocks_req_info {
+ struct ethnl_req_info base;
+};
+
+struct phc_vclocks_reply_data {
+ struct ethnl_reply_data base;
+ int num;
+ int *index;
+};
+
+#define PHC_VCLOCKS_REPDATA(__reply_base) \
+ container_of(__reply_base, struct phc_vclocks_reply_data, base)
+
+const struct nla_policy ethnl_phc_vclocks_get_policy[] = {
+ [ETHTOOL_A_PHC_VCLOCKS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+};
+
+static int phc_vclocks_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct phc_vclocks_reply_data *data = PHC_VCLOCKS_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ int ret;
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+ data->num = ethtool_get_phc_vclocks(dev, &data->index);
+ ethnl_ops_complete(dev);
+
+ return ret;
+}
+
+static int phc_vclocks_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct phc_vclocks_reply_data *data =
+ PHC_VCLOCKS_REPDATA(reply_base);
+ int len = 0;
+
+ if (data->num > 0) {
+ len += nla_total_size(sizeof(u32));
+ len += nla_total_size(sizeof(s32) * data->num);
+ }
+
+ return len;
+}
+
+static int phc_vclocks_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct phc_vclocks_reply_data *data =
+ PHC_VCLOCKS_REPDATA(reply_base);
+
+ if (data->num <= 0)
+ return 0;
+
+ if (nla_put_u32(skb, ETHTOOL_A_PHC_VCLOCKS_NUM, data->num) ||
+ nla_put(skb, ETHTOOL_A_PHC_VCLOCKS_INDEX,
+ sizeof(s32) * data->num, data->index))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static void phc_vclocks_cleanup_data(struct ethnl_reply_data *reply_base)
+{
+ const struct phc_vclocks_reply_data *data =
+ PHC_VCLOCKS_REPDATA(reply_base);
+
+ kfree(data->index);
+}
+
+const struct ethnl_request_ops ethnl_phc_vclocks_request_ops = {
+ .request_cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET,
+ .reply_cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_PHC_VCLOCKS_HEADER,
+ .req_info_size = sizeof(struct phc_vclocks_req_info),
+ .reply_data_size = sizeof(struct phc_vclocks_reply_data),
+
+ .prepare_data = phc_vclocks_prepare_data,
+ .reply_size = phc_vclocks_reply_size,
+ .fill_reply = phc_vclocks_fill_reply,
+ .cleanup_data = phc_vclocks_cleanup_data,
+};
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index a45a0401adc5..c25f7617770c 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -984,6 +984,11 @@ static const struct proto_ops ieee802154_dgram_ops = {
.sendpage = sock_no_sendpage,
};
+static void ieee802154_sock_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+}
+
/* Create a socket. Initialise the socket, blank the addresses
* set the state.
*/
@@ -1024,7 +1029,7 @@ static int ieee802154_create(struct net *net, struct socket *sock,
sock->ops = ops;
sock_init_data(sock, sk);
- /* FIXME: sk->sk_destruct */
+ sk->sk_destruct = ieee802154_sock_destruct;
sk->sk_family = PF_IEEE802154;
/* Checksums on by default */
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index a933bd6345b1..9fe13e4f5d08 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1376,7 +1376,7 @@ static void nl_fib_input(struct sk_buff *skb)
portid = NETLINK_CB(skb).portid; /* netlink portid */
NETLINK_CB(skb).portid = 0; /* from kernel */
NETLINK_CB(skb).dst_group = 0; /* unicast */
- netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
+ nlmsg_unicast(net->ipv4.fibnl, skb, portid);
}
static int __net_init nl_fib_lookup_init(struct net *net)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6b3c558a4f23..00576bae183d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -803,10 +803,17 @@ static void igmp_gq_timer_expire(struct timer_list *t)
static void igmp_ifc_timer_expire(struct timer_list *t)
{
struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
+ u32 mr_ifc_count;
igmpv3_send_cr(in_dev);
- if (in_dev->mr_ifc_count) {
- in_dev->mr_ifc_count--;
+restart:
+ mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count);
+
+ if (mr_ifc_count) {
+ if (cmpxchg(&in_dev->mr_ifc_count,
+ mr_ifc_count,
+ mr_ifc_count - 1) != mr_ifc_count)
+ goto restart;
igmp_ifc_start_timer(in_dev,
unsolicited_report_interval(in_dev));
}
@@ -818,7 +825,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
struct net *net = dev_net(in_dev->dev);
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
return;
- in_dev->mr_ifc_count = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+ WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv);
igmp_ifc_start_timer(in_dev, 1);
}
@@ -957,7 +964,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
in_dev->mr_qri;
}
/* cancel the interface change timer */
- in_dev->mr_ifc_count = 0;
+ WRITE_ONCE(in_dev->mr_ifc_count, 0);
if (del_timer(&in_dev->mr_ifc_timer))
__in_dev_put(in_dev);
/* clear deleted report items */
@@ -1724,7 +1731,7 @@ void ip_mc_down(struct in_device *in_dev)
igmp_group_dropped(pmc);
#ifdef CONFIG_IP_MULTICAST
- in_dev->mr_ifc_count = 0;
+ WRITE_ONCE(in_dev->mr_ifc_count, 0);
if (del_timer(&in_dev->mr_ifc_timer))
__in_dev_put(in_dev);
in_dev->mr_gq_running = 0;
@@ -1941,7 +1948,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
pmc->sfmode = MCAST_INCLUDE;
#ifdef CONFIG_IP_MULTICAST
pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
- in_dev->mr_ifc_count = pmc->crcount;
+ WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
for (psf = pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
igmp_ifc_event(pmc->interface);
@@ -2120,7 +2127,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
/* else no filters; keep old mode for reports */
pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
- in_dev->mr_ifc_count = pmc->crcount;
+ WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
for (psf = pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
igmp_ifc_event(in_dev);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index e65f4ef024a4..ef7897226f08 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -580,10 +580,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
nlmsg_free(rep);
goto out;
}
- err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
- MSG_DONTWAIT);
- if (err > 0)
- err = 0;
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
out:
if (sk)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index f6cc26de5ed3..be75b409445c 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -317,7 +317,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
}
dev->needed_headroom = t_hlen + hlen;
- mtu -= t_hlen;
+ mtu -= t_hlen + (dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0);
if (mtu < IPV4_MIN_MTU)
mtu = IPV4_MIN_MTU;
@@ -348,6 +348,9 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
t_hlen = nt->hlen + sizeof(struct iphdr);
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = IP_MAX_MTU - t_hlen;
+ if (dev->type == ARPHRD_ETHER)
+ dev->max_mtu -= dev->hard_header_len;
+
ip_tunnel_add(itn, nt);
return nt;
@@ -387,7 +390,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
tunnel->i_seqno = ntohl(tpi->seq) + 1;
}
- skb_reset_network_header(skb);
+ skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
@@ -489,11 +492,14 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
pkt_size = skb->len - tunnel_hlen;
+ pkt_size -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0;
- if (df)
+ if (df) {
mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen);
- else
+ mtu -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0;
+ } else {
mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+ }
if (skb_valid_dst(skb))
skb_dst_update_pmtu_no_confirm(skb, mtu);
@@ -972,6 +978,9 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
int max_mtu = IP_MAX_MTU - t_hlen;
+ if (dev->type == ARPHRD_ETHER)
+ max_mtu -= dev->hard_header_len;
+
if (new_mtu < ETH_MIN_MTU)
return -EINVAL;
@@ -1149,6 +1158,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
if (tb[IFLA_MTU]) {
unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
+ if (dev->type == ARPHRD_ETHER)
+ max -= dev->hard_header_len;
+
mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 7b12a40dd465..2dda856ca260 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2119,7 +2119,7 @@ int ip_mr_input(struct sk_buff *skb)
raw_rcv(mroute_sk, skb);
return 0;
}
- }
+ }
}
/* already under rcu_read_lock() */
diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
index 1b5b8af27aaf..ccacbde30a2c 100644
--- a/net/ipv4/raw_diag.c
+++ b/net/ipv4/raw_diag.c
@@ -119,11 +119,8 @@ static int raw_diag_dump_one(struct netlink_callback *cb,
return err;
}
- err = netlink_unicast(net->diag_nlsk, rep,
- NETLINK_CB(in_skb).portid,
- MSG_DONTWAIT);
- if (err > 0)
- err = 0;
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
return err;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d5ab5f243640..8cb44040ec68 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1375,6 +1375,9 @@ new_segment:
}
pfrag->offset += copy;
} else {
+ if (!sk_wmem_schedule(sk, copy))
+ goto wait_for_space;
+
err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
if (err == -EMSGSIZE || err == -EEXIST) {
tcp_mark_push(tp, skb);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 6ea3dc2e4219..6274462b86b4 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -1041,7 +1041,7 @@ static void bbr_init(struct sock *sk)
bbr->prior_cwnd = 0;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
bbr->rtt_cnt = 0;
- bbr->next_rtt_delivered = 0;
+ bbr->next_rtt_delivered = tp->delivered;
bbr->prev_ca_state = TCP_CA_Open;
bbr->packet_conservation = 0;
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index f26916a62f25..d3e9386b493e 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -503,7 +503,7 @@ static int __init tcp_bpf_v4_build_proto(void)
tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
return 0;
}
-core_initcall(tcp_bpf_v4_build_proto);
+late_initcall(tcp_bpf_v4_build_proto);
static int tcp_bpf_assert_proto_ops(struct proto *ops)
{
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 47c32604d38f..25fa4c01a17f 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -507,8 +507,18 @@ void tcp_fastopen_active_disable(struct sock *sk)
{
struct net *net = sock_net(sk);
+ if (!sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)
+ return;
+
+ /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
+ WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
+
+ /* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
+ * We want net->ipv4.tfo_active_disable_stamp to be updated first.
+ */
+ smp_mb__before_atomic();
atomic_inc(&net->ipv4.tfo_active_disable_times);
- net->ipv4.tfo_active_disable_stamp = jiffies;
+
NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
}
@@ -519,17 +529,27 @@ void tcp_fastopen_active_disable(struct sock *sk)
bool tcp_fastopen_active_should_disable(struct sock *sk)
{
unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
- int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
unsigned long timeout;
+ int tfo_da_times;
int multiplier;
+ if (!tfo_bh_timeout)
+ return false;
+
+ tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
if (!tfo_da_times)
return false;
+ /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
+ smp_rmb();
+
/* Limit timeout to max: 2^6 * initial timeout */
multiplier = 1 << min(tfo_da_times - 1, 6);
- timeout = multiplier * tfo_bh_timeout * HZ;
- if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout))
+
+ /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
+ timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
+ multiplier * tfo_bh_timeout * HZ;
+ if (time_before(jiffies, timeout))
return true;
/* Mark check bit so we can check for successful active TFO
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e6ca5a1f3b59..149ceb5c94ff 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4247,6 +4247,9 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb)
{
trace_tcp_receive_reset(sk);
+ /* mptcp can't tell us to ignore reset pkts,
+ * so just ignore the return value of mptcp_incoming_options().
+ */
if (sk_is_mptcp(sk))
mptcp_incoming_options(sk, skb);
@@ -4941,8 +4944,13 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
bool fragstolen;
int eaten;
- if (sk_is_mptcp(sk))
- mptcp_incoming_options(sk, skb);
+ /* If a subflow has been reset, the packet should not continue
+ * to be processed, drop the packet.
+ */
+ if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) {
+ __kfree_skb(skb);
+ return;
+ }
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
__kfree_skb(skb);
@@ -5922,8 +5930,8 @@ void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
tp->snd_cwnd_stamp = tcp_jiffies32;
- icsk->icsk_ca_initialized = 0;
bpf_skops_established(sk, bpf_op, skb);
+ /* Initialize congestion control unless BPF initialized it already: */
if (!icsk->icsk_ca_initialized)
tcp_init_congestion_control(sk);
tcp_init_buffer_space(sk);
@@ -6523,8 +6531,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_CLOSING:
case TCP_LAST_ACK:
if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- if (sk_is_mptcp(sk))
- mptcp_incoming_options(sk, skb);
+ /* If a subflow has been reset, the packet should not
+ * continue to be processed, drop the packet.
+ */
+ if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb))
+ goto discard;
break;
}
fallthrough;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index e66ad6bfe808..a692626c19e4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -342,7 +342,7 @@ void tcp_v4_mtu_reduced(struct sock *sk)
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
return;
- mtu = tcp_sk(sk)->mtu_info;
+ mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
dst = inet_csk_update_pmtu(sk, mtu);
if (!dst)
return;
@@ -546,7 +546,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
if (sk->sk_state == TCP_LISTEN)
goto out;
- tp->mtu_info = info;
+ WRITE_ONCE(tp->mtu_info, info);
if (!sock_owned_by_user(sk)) {
tcp_v4_mtu_reduced(sk);
} else {
@@ -2965,7 +2965,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_comp_sack_nr = 44;
net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
- net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
+ net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
atomic_set(&net->ipv4.tfo_active_disable_times, 0);
/* Reno is always built in */
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index e09147ac9a99..fc61cd3fea65 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -298,6 +298,9 @@ int tcp_gro_complete(struct sk_buff *skb)
if (th->cwr)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+ if (skb->encapsulation)
+ skb->inner_transport_header = skb->transport_header;
+
return 0;
}
EXPORT_SYMBOL(tcp_gro_complete);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bde781f46b41..29553fce8502 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1732,6 +1732,7 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
return __tcp_mtu_to_mss(sk, pmtu) -
(tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
}
+EXPORT_SYMBOL(tcp_mtu_to_mss);
/* Inverse of above */
int tcp_mss_to_mtu(struct sock *sk, int mss)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 62682807b4b2..1a742b710e54 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -645,10 +645,12 @@ static struct sock *__udp4_lib_err_encap(struct net *net,
const struct iphdr *iph,
struct udphdr *uh,
struct udp_table *udptable,
+ struct sock *sk,
struct sk_buff *skb, u32 info)
{
+ int (*lookup)(struct sock *sk, struct sk_buff *skb);
int network_offset, transport_offset;
- struct sock *sk;
+ struct udp_sock *up;
network_offset = skb_network_offset(skb);
transport_offset = skb_transport_offset(skb);
@@ -659,18 +661,28 @@ static struct sock *__udp4_lib_err_encap(struct net *net,
/* Transport header needs to point to the UDP header */
skb_set_transport_header(skb, iph->ihl << 2);
+ if (sk) {
+ up = udp_sk(sk);
+
+ lookup = READ_ONCE(up->encap_err_lookup);
+ if (lookup && lookup(sk, skb))
+ sk = NULL;
+
+ goto out;
+ }
+
sk = __udp4_lib_lookup(net, iph->daddr, uh->source,
iph->saddr, uh->dest, skb->dev->ifindex, 0,
udptable, NULL);
if (sk) {
- int (*lookup)(struct sock *sk, struct sk_buff *skb);
- struct udp_sock *up = udp_sk(sk);
+ up = udp_sk(sk);
lookup = READ_ONCE(up->encap_err_lookup);
if (!lookup || lookup(sk, skb))
sk = NULL;
}
+out:
if (!sk)
sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info));
@@ -707,15 +719,16 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
iph->saddr, uh->source, skb->dev->ifindex,
inet_sdif(skb), udptable, NULL);
+
if (!sk || udp_sk(sk)->encap_type) {
/* No socket for error: try tunnels before discarding */
- sk = ERR_PTR(-ENOENT);
if (static_branch_unlikely(&udp_encap_needed_key)) {
- sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb,
+ sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb,
info);
if (!sk)
return 0;
- }
+ } else
+ sk = ERR_PTR(-ENOENT);
if (IS_ERR(sk)) {
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
@@ -1102,7 +1115,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
ipcm_init_sk(&ipc, inet);
- ipc.gso_size = up->gso_size;
+ ipc.gso_size = READ_ONCE(up->gso_size);
if (msg->msg_controllen) {
err = udp_cmsg_send(sk, msg, &ipc.gso_size);
@@ -2695,7 +2708,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
case UDP_SEGMENT:
if (val < 0 || val > USHRT_MAX)
return -EINVAL;
- up->gso_size = val;
+ WRITE_ONCE(up->gso_size, val);
break;
case UDP_GRO:
@@ -2790,7 +2803,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
break;
case UDP_SEGMENT:
- val = up->gso_size;
+ val = READ_ONCE(up->gso_size);
break;
case UDP_GRO:
diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
index 45b8782aec0c..9f5a5cdc38e6 100644
--- a/net/ipv4/udp_bpf.c
+++ b/net/ipv4/udp_bpf.c
@@ -134,7 +134,7 @@ static int __init udp_bpf_v4_build_proto(void)
udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot);
return 0;
}
-core_initcall(udp_bpf_v4_build_proto);
+late_initcall(udp_bpf_v4_build_proto);
int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
{
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index b2cee9a307d4..1ed8c4d78e5c 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -77,10 +77,8 @@ static int udp_dump_one(struct udp_table *tbl,
kfree_skb(rep);
goto out;
}
- err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
- MSG_DONTWAIT);
- if (err > 0)
- err = 0;
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
out:
if (sk)
sock_put(sk);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 54e06b88af69..1380a6b6f4ff 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -525,8 +525,10 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
(sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
- pp = call_gro_receive(udp_gro_receive_segment, head, skb);
- return pp;
+ return call_gro_receive(udp_gro_receive_segment, head, skb);
+
+ /* no GRO, be sure flush the current packet */
+ goto out;
}
if (NAPI_GRO_CB(skb)->encap_mark ||
@@ -622,6 +624,10 @@ static int udp_gro_complete_segment(struct sk_buff *skb)
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
+
+ if (skb->encapsulation)
+ skb->inner_transport_header = skb->transport_header;
+
return 0;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 984050f35c61..8e6ca9ad6812 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -60,10 +60,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
+ unsigned int hh_len = LL_RESERVED_SPACE(dev);
+ int delta = hh_len - skb_headroom(skb);
const struct in6_addr *nexthop;
struct neighbour *neigh;
int ret;
+ /* Be paranoid, rather than too clever. */
+ if (unlikely(delta > 0) && dev->header_ops) {
+ /* pskb_expand_head() might crash, if skb is shared */
+ if (skb_shared(skb)) {
+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+ if (likely(nskb)) {
+ if (skb->sk)
+ skb_set_owner_w(nskb, skb->sk);
+ consume_skb(skb);
+ } else {
+ kfree_skb(skb);
+ }
+ skb = nskb;
+ }
+ if (skb &&
+ pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
+ kfree_skb(skb);
+ skb = NULL;
+ }
+ if (!skb) {
+ IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
+ return -ENOMEM;
+ }
+ }
+
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
@@ -479,7 +507,9 @@ int ip6_forward(struct sk_buff *skb)
if (skb_warn_if_lro(skb))
goto drop;
- if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
+ if (!net->ipv6.devconf_all->disable_policy &&
+ !idev->cnf.disable_policy &&
+ !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop;
}
@@ -519,9 +549,10 @@ int ip6_forward(struct sk_buff *skb)
if (net->ipv6.devconf_all->proxy_ndp &&
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
int proxied = ip6_forward_proxy_check(skb);
- if (proxied > 0)
+ if (proxied > 0) {
+ hdr->hop_limit--;
return ip6_input(skb);
- else if (proxied < 0) {
+ } else if (proxied < 0) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7b756a7dc036..b6ddf23d3833 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3769,7 +3769,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
err = PTR_ERR(rt->fib6_metrics);
/* Do not leave garbage there. */
rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
- goto out;
+ goto out_free;
}
if (cfg->fc_flags & RTF_ADDRCONF)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 578ab6305c3f..0ce52d46e4f8 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -348,11 +348,20 @@ failure:
static void tcp_v6_mtu_reduced(struct sock *sk)
{
struct dst_entry *dst;
+ u32 mtu;
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
return;
- dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
+ mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
+
+ /* Drop requests trying to increase our current mss.
+ * Check done in __ip6_rt_update_pmtu() is too late.
+ */
+ if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
+ return;
+
+ dst = inet6_csk_update_pmtu(sk, mtu);
if (!dst)
return;
@@ -433,6 +442,8 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
if (type == ICMPV6_PKT_TOOBIG) {
+ u32 mtu = ntohl(info);
+
/* We are not interested in TCP_LISTEN and open_requests
* (SYN-ACKs send out by Linux are always <576bytes so
* they should go through unfragmented).
@@ -443,7 +454,11 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!ip6_sk_accept_pmtu(sk))
goto out;
- tp->mtu_info = ntohl(info);
+ if (mtu < IPV6_MIN_MTU)
+ goto out;
+
+ WRITE_ONCE(tp->mtu_info, mtu);
+
if (!sock_owned_by_user(sk))
tcp_v6_mtu_reduced(sk);
else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
@@ -540,7 +555,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
opt = ireq->ipv6_opt;
if (!opt)
opt = rcu_dereference(np->opt);
- err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt,
+ err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
tclass, sk->sk_priority);
rcu_read_unlock();
err = net_xmit_eval(err);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 368972dbd919..c5e15e94bb00 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -502,12 +502,14 @@ static struct sock *__udp6_lib_err_encap(struct net *net,
const struct ipv6hdr *hdr, int offset,
struct udphdr *uh,
struct udp_table *udptable,
+ struct sock *sk,
struct sk_buff *skb,
struct inet6_skb_parm *opt,
u8 type, u8 code, __be32 info)
{
+ int (*lookup)(struct sock *sk, struct sk_buff *skb);
int network_offset, transport_offset;
- struct sock *sk;
+ struct udp_sock *up;
network_offset = skb_network_offset(skb);
transport_offset = skb_transport_offset(skb);
@@ -518,18 +520,28 @@ static struct sock *__udp6_lib_err_encap(struct net *net,
/* Transport header needs to point to the UDP header */
skb_set_transport_header(skb, offset);
+ if (sk) {
+ up = udp_sk(sk);
+
+ lookup = READ_ONCE(up->encap_err_lookup);
+ if (lookup && lookup(sk, skb))
+ sk = NULL;
+
+ goto out;
+ }
+
sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
&hdr->saddr, uh->dest,
inet6_iif(skb), 0, udptable, skb);
if (sk) {
- int (*lookup)(struct sock *sk, struct sk_buff *skb);
- struct udp_sock *up = udp_sk(sk);
+ up = udp_sk(sk);
lookup = READ_ONCE(up->encap_err_lookup);
if (!lookup || lookup(sk, skb))
sk = NULL;
}
+out:
if (!sk) {
sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
offset, info));
@@ -558,16 +570,17 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
+
if (!sk || udp_sk(sk)->encap_type) {
/* No socket for error: try tunnels before discarding */
- sk = ERR_PTR(-ENOENT);
if (static_branch_unlikely(&udpv6_encap_needed_key)) {
sk = __udp6_lib_err_encap(net, hdr, offset, uh,
- udptable, skb,
+ udptable, sk, skb,
opt, type, code, info);
if (!sk)
return 0;
- }
+ } else
+ sk = ERR_PTR(-ENOENT);
if (IS_ERR(sk)) {
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
@@ -1296,7 +1309,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
ipcm6_init(&ipc6);
- ipc6.gso_size = up->gso_size;
+ ipc6.gso_size = READ_ONCE(up->gso_size);
ipc6.sockc.tsflags = sk->sk_tsflags;
ipc6.sockc.mark = sk->sk_mark;
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 57fa27c1cdf9..d0d280077721 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -49,7 +49,7 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
- int mtu;
+ unsigned int mtu;
bool toobig;
#ifdef CONFIG_NETFILTER
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 349c6ac3313f..e6795d5a546a 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1635,14 +1635,16 @@ struct iucv_message_pending {
u8 iptype;
u32 ipmsgid;
u32 iptrgcls;
- union {
- u32 iprmmsg1_u32;
- u8 iprmmsg1[4];
- } ln1msg1;
- union {
- u32 ipbfln1f;
- u8 iprmmsg2[4];
- } ln1msg2;
+ struct {
+ union {
+ u32 iprmmsg1_u32;
+ u8 iprmmsg1[4];
+ } ln1msg1;
+ union {
+ u32 ipbfln1f;
+ u8 iprmmsg2[4];
+ } ln1msg2;
+ } rmmsg;
u32 res1[3];
u32 ipbfln2f;
u8 ippollfg;
@@ -1660,10 +1662,10 @@ static void iucv_message_pending(struct iucv_irq_data *data)
msg.id = imp->ipmsgid;
msg.class = imp->iptrgcls;
if (imp->ipflags1 & IUCV_IPRMDATA) {
- memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8);
+ memcpy(msg.rmmsg, &imp->rmmsg, 8);
msg.length = 8;
} else
- msg.length = imp->ln1msg2.ipbfln1f;
+ msg.length = imp->rmmsg.ln1msg2.ipbfln1f;
msg.reply_size = imp->ipbfln2f;
path->handler->message_pending(path, &msg);
}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 7180979114e4..ac5cadd02cfa 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -98,8 +98,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
{
u8 rc = LLC_PDU_LEN_U;
- if (addr->sllc_test || addr->sllc_xid)
+ if (addr->sllc_test)
rc = LLC_PDU_LEN_U;
+ else if (addr->sllc_xid)
+ /* We need to expand header to sizeof(struct llc_xid_info)
+ * since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header
+ * as XID PDU. In llc_ui_sendmsg() we reserved header size and then
+ * filled all other space with user data. If we won't reserve this
+ * bytes, llc_pdu_init_as_xid_cmd() will overwrite user data
+ */
+ rc = LLC_PDU_LEN_U_XID;
else if (sk->sk_type == SOCK_STREAM)
rc = LLC_PDU_LEN_I;
return rc;
diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
index b554f26c68ee..79d1cef8f15a 100644
--- a/net/llc/llc_s_ac.c
+++ b/net/llc/llc_s_ac.c
@@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
int rc;
- llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
+ llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 84cc7733ea66..4e6f11e63df3 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -152,6 +152,8 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
struct vif_params *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
int ret;
ret = ieee80211_if_change_type(sdata, type);
@@ -162,7 +164,24 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
ieee80211_check_fast_rx_iface(sdata);
} else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) {
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+ if (params->use_4addr == ifmgd->use_4addr)
+ return 0;
+
sdata->u.mgd.use_4addr = params->use_4addr;
+ if (!ifmgd->associated)
+ return 0;
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get(sdata, ifmgd->bssid);
+ if (sta)
+ drv_sta_set_4addr(local, sdata, &sta->sta,
+ params->use_4addr);
+ mutex_unlock(&local->sta_mtx);
+
+ if (params->use_4addr)
+ ieee80211_send_4addr_nullfunc(local, sdata);
}
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 22549b95d1aa..30ce6d2ec7ce 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -2201,6 +2201,8 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t);
void ieee80211_send_nullfunc(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
bool powersave);
+void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr, bool ack, u16 tx_time);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 05f4c3c72619..fcae76ddd586 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -260,6 +260,8 @@ static void ieee80211_restart_work(struct work_struct *work)
flush_work(&local->radar_detected_work);
rtnl_lock();
+ /* we might do interface manipulations, so need both */
+ wiphy_lock(local->hw.wiphy);
WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
"%s called with hardware scan in progress\n", __func__);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a00f11a33699..c0ea3b1aa9e1 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1095,8 +1095,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
ieee80211_tx_skb(sdata, skb);
}
-static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
{
struct sk_buff *skb;
struct ieee80211_hdr *nullfunc;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 771921c057e8..2563473b5cf1 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -730,7 +730,8 @@ ieee80211_make_monitor_skb(struct ieee80211_local *local,
* Need to make a copy and possibly remove radiotap header
* and FCS from the original.
*/
- skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
+ skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD,
+ 0, GFP_ATOMIC);
if (!skb)
return NULL;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e96981144358..8509778ff31f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1147,6 +1147,29 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
return queued;
}
+static void
+ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ struct sk_buff *skb)
+{
+ struct rate_control_ref *ref = sdata->local->rate_ctrl;
+ u16 tid;
+
+ if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
+ return;
+
+ if (!sta || !sta->sta.ht_cap.ht_supported ||
+ !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
+ skb->protocol == sdata->control_port_protocol)
+ return;
+
+ tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ if (likely(sta->ampdu_mlme.tid_tx[tid]))
+ return;
+
+ ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
+}
+
/*
* initialises @tx
* pass %NULL for the station if unknown, a valid pointer if known
@@ -1160,6 +1183,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool aggr_check = false;
int tid;
memset(tx, 0, sizeof(*tx));
@@ -1188,8 +1212,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
} else if (tx->sdata->control_port_protocol == tx->skb->protocol) {
tx->sta = sta_info_get_bss(sdata, hdr->addr1);
}
- if (!tx->sta && !is_multicast_ether_addr(hdr->addr1))
+ if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) {
tx->sta = sta_info_get(sdata, hdr->addr1);
+ aggr_check = true;
+ }
}
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
@@ -1199,8 +1225,12 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_tx *tid_tx;
tid = ieee80211_get_tid(hdr);
-
tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
+ if (!tid_tx && aggr_check) {
+ ieee80211_aggr_check(sdata, tx->sta, skb);
+ tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
+ }
+
if (tid_tx) {
bool queued;
@@ -4120,29 +4150,6 @@ void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
}
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
-static void
-ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta,
- struct sk_buff *skb)
-{
- struct rate_control_ref *ref = sdata->local->rate_ctrl;
- u16 tid;
-
- if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
- return;
-
- if (!sta || !sta->sta.ht_cap.ht_supported ||
- !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
- skb->protocol == sdata->control_port_protocol)
- return;
-
- tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- if (likely(sta->ampdu_mlme.tid_tx[tid]))
- return;
-
- ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
-}
-
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev,
u32 info_flags,
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index 52ea2517e856..ff2cc0e3273d 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -44,6 +44,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW),
SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX),
SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX),
+ SNMP_MIB_ITEM("RcvPruned", MPTCP_MIB_RCVPRUNED),
SNMP_MIB_SENTINEL
};
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index 193466c9b549..0663cb12b448 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -37,6 +37,7 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */
MPTCP_MIB_MPPRIOTX, /* Transmit a MP_PRIO */
MPTCP_MIB_MPPRIORX, /* Received a MP_PRIO */
+ MPTCP_MIB_RCVPRUNED, /* Incoming packet dropped due to memory limit */
__MPTCP_MIB_MAX
};
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index 8f88ddeab6a2..f48eb6315bbb 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -57,10 +57,8 @@ static int mptcp_diag_dump_one(struct netlink_callback *cb,
kfree_skb(rep);
goto out;
}
- err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
- MSG_DONTWAIT);
- if (err > 0)
- err = 0;
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
out:
sock_put(sk);
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index b5850afea343..7adcbc1f7d49 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -885,20 +885,16 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
return subflow->mp_capable;
}
- if (mp_opt->dss && mp_opt->use_ack) {
+ if ((mp_opt->dss && mp_opt->use_ack) ||
+ (mp_opt->add_addr && !mp_opt->echo)) {
/* subflows are fully established as soon as we get any
- * additional ack.
+ * additional ack, including ADD_ADDR.
*/
subflow->fully_established = 1;
WRITE_ONCE(msk->fully_established, true);
goto fully_established;
}
- if (mp_opt->add_addr) {
- WRITE_ONCE(msk->fully_established, true);
- return true;
- }
-
/* If the first established packet does not contain MP_CAPABLE + data
* then fallback to TCP. Fallback scenarios requires a reset for
* MP_JOIN subflows.
@@ -1035,7 +1031,8 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
return hmac == mp_opt->ahmac;
}
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
+/* Return false if a subflow has been reset, else return true */
+bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
@@ -1053,12 +1050,16 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
__mptcp_check_push(subflow->conn, sk);
__mptcp_data_acked(subflow->conn);
mptcp_data_unlock(subflow->conn);
- return;
+ return true;
}
mptcp_get_options(sk, skb, &mp_opt);
+
+ /* The subflow can be in close state only if check_fully_established()
+ * just sent a reset. If so, tell the caller to ignore the current packet.
+ */
if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
- return;
+ return sk->sk_state != TCP_CLOSE;
if (mp_opt.fastclose &&
msk->local_key == mp_opt.rcvr_key) {
@@ -1100,7 +1101,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
}
if (!mp_opt.dss)
- return;
+ return true;
/* we can't wait for recvmsg() to update the ack_seq, otherwise
* monodirectional flows will stuck
@@ -1119,12 +1120,12 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
schedule_work(&msk->work))
sock_hold(subflow->conn);
- return;
+ return true;
}
mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
if (!mpext)
- return;
+ return true;
memset(mpext, 0, sizeof(*mpext));
@@ -1153,6 +1154,8 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
if (mpext->csum_reqd)
mpext->csum = mp_opt.csum;
}
+
+ return true;
}
static void mptcp_set_rwin(const struct tcp_sock *tp)
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index d2591ebf01d9..7b3794459783 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -27,7 +27,6 @@ struct mptcp_pm_addr_entry {
struct mptcp_addr_info addr;
u8 flags;
int ifindex;
- struct rcu_head rcu;
struct socket *lsk;
};
@@ -1136,36 +1135,12 @@ next:
return 0;
}
-struct addr_entry_release_work {
- struct rcu_work rwork;
- struct mptcp_pm_addr_entry *entry;
-};
-
-static void mptcp_pm_release_addr_entry(struct work_struct *work)
+/* caller must ensure the RCU grace period is already elapsed */
+static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
{
- struct addr_entry_release_work *w;
- struct mptcp_pm_addr_entry *entry;
-
- w = container_of(to_rcu_work(work), struct addr_entry_release_work, rwork);
- entry = w->entry;
- if (entry) {
- if (entry->lsk)
- sock_release(entry->lsk);
- kfree(entry);
- }
- kfree(w);
-}
-
-static void mptcp_pm_free_addr_entry(struct mptcp_pm_addr_entry *entry)
-{
- struct addr_entry_release_work *w;
-
- w = kmalloc(sizeof(*w), GFP_ATOMIC);
- if (w) {
- INIT_RCU_WORK(&w->rwork, mptcp_pm_release_addr_entry);
- w->entry = entry;
- queue_rcu_work(system_wq, &w->rwork);
- }
+ if (entry->lsk)
+ sock_release(entry->lsk);
+ kfree(entry);
}
static int mptcp_nl_remove_id_zero_address(struct net *net,
@@ -1245,7 +1220,8 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
spin_unlock_bh(&pernet->lock);
mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr);
- mptcp_pm_free_addr_entry(entry);
+ synchronize_rcu();
+ __mptcp_pm_release_addr_entry(entry);
return ret;
}
@@ -1298,6 +1274,7 @@ static void mptcp_nl_remove_addrs_list(struct net *net,
}
}
+/* caller must ensure the RCU grace period is already elapsed */
static void __flush_addrs(struct list_head *list)
{
while (!list_empty(list)) {
@@ -1306,7 +1283,7 @@ static void __flush_addrs(struct list_head *list)
cur = list_entry(list->next,
struct mptcp_pm_addr_entry, list);
list_del_rcu(&cur->list);
- mptcp_pm_free_addr_entry(cur);
+ __mptcp_pm_release_addr_entry(cur);
}
}
@@ -1330,6 +1307,7 @@ static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info)
bitmap_zero(pernet->id_bitmap, MAX_ADDR_ID + 1);
spin_unlock_bh(&pernet->lock);
mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list);
+ synchronize_rcu();
__flush_addrs(&free_list);
return 0;
}
@@ -1940,7 +1918,8 @@ static void __net_exit pm_nl_exit_net(struct list_head *net_list)
struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
/* net is removed from namespace list, can't race with
- * other modifiers
+ * other modifiers, also netns core already waited for a
+ * RCU grace period.
*/
__flush_addrs(&pernet->local_addr_list);
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 7a5afa8c6866..a88924947815 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -474,7 +474,7 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
bool cleanup, rx_empty;
cleanup = (space > 0) && (space >= (old_space << 1));
- rx_empty = !atomic_read(&sk->sk_rmem_alloc);
+ rx_empty = !__mptcp_rmem(sk);
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
@@ -720,8 +720,10 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
sk_rbuf = ssk_rbuf;
/* over limit? can't append more skbs to msk, Also, no need to wake-up*/
- if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
+ if (__mptcp_rmem(sk) > sk_rbuf) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
return;
+ }
/* Wake-up the reader only for in-sequence data */
mptcp_data_lock(sk);
@@ -1754,7 +1756,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
if (!(flags & MSG_PEEK)) {
/* we will bulk release the skb memory later */
skb->destructor = NULL;
- msk->rmem_released += skb->truesize;
+ WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
__skb_unlink(skb, &msk->receive_queue);
__kfree_skb(skb);
}
@@ -1873,7 +1875,7 @@ static void __mptcp_update_rmem(struct sock *sk)
atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc);
sk_mem_uncharge(sk, msk->rmem_released);
- msk->rmem_released = 0;
+ WRITE_ONCE(msk->rmem_released, 0);
}
static void __mptcp_splice_receive_queue(struct sock *sk)
@@ -2380,7 +2382,7 @@ static int __mptcp_init_sock(struct sock *sk)
msk->out_of_order_queue = RB_ROOT;
msk->first_pending = NULL;
msk->wmem_reserved = 0;
- msk->rmem_released = 0;
+ WRITE_ONCE(msk->rmem_released, 0);
msk->tx_pending_data = 0;
msk->first = NULL;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 426ed80fe72f..0f0c026c5f8b 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -296,9 +296,17 @@ static inline struct mptcp_sock *mptcp_sk(const struct sock *sk)
return (struct mptcp_sock *)sk;
}
+/* the msk socket don't use the backlog, also account for the bulk
+ * free memory
+ */
+static inline int __mptcp_rmem(const struct sock *sk)
+{
+ return atomic_read(&sk->sk_rmem_alloc) - READ_ONCE(mptcp_sk(sk)->rmem_released);
+}
+
static inline int __mptcp_space(const struct sock *sk)
{
- return tcp_space(sk) + READ_ONCE(mptcp_sk(sk)->rmem_released);
+ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - __mptcp_rmem(sk));
}
static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk)
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 092d1f635d27..8c03afac5ca0 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -157,19 +157,7 @@ static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optnam
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
bool slow = lock_sock_fast(ssk);
- switch (optname) {
- case SO_TIMESTAMP_OLD:
- case SO_TIMESTAMP_NEW:
- case SO_TIMESTAMPNS_OLD:
- case SO_TIMESTAMPNS_NEW:
- sock_set_timestamp(sk, optname, !!val);
- break;
- case SO_TIMESTAMPING_NEW:
- case SO_TIMESTAMPING_OLD:
- sock_set_timestamping(sk, optname, val);
- break;
- }
-
+ sock_set_timestamp(sk, optname, !!val);
unlock_sock_fast(ssk, slow);
}
@@ -178,7 +166,8 @@ static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optnam
}
static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
- sockptr_t optval, unsigned int optlen)
+ sockptr_t optval,
+ unsigned int optlen)
{
int val, ret;
@@ -205,14 +194,56 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
case SO_TIMESTAMP_NEW:
case SO_TIMESTAMPNS_OLD:
case SO_TIMESTAMPNS_NEW:
- case SO_TIMESTAMPING_OLD:
- case SO_TIMESTAMPING_NEW:
return mptcp_setsockopt_sol_socket_tstamp(msk, optname, val);
}
return -ENOPROTOOPT;
}
+static int mptcp_setsockopt_sol_socket_timestamping(struct mptcp_sock *msk,
+ int optname,
+ sockptr_t optval,
+ unsigned int optlen)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+ struct so_timestamping timestamping;
+ int ret;
+
+ if (optlen == sizeof(timestamping)) {
+ if (copy_from_sockptr(&timestamping, optval,
+ sizeof(timestamping)))
+ return -EFAULT;
+ } else if (optlen == sizeof(int)) {
+ memset(&timestamping, 0, sizeof(timestamping));
+
+ if (copy_from_sockptr(&timestamping.flags, optval, sizeof(int)))
+ return -EFAULT;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname,
+ KERNEL_SOCKPTR(&timestamping),
+ sizeof(timestamping));
+ if (ret)
+ return ret;
+
+ lock_sock(sk);
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow = lock_sock_fast(ssk);
+
+ sock_set_timestamping(sk, optname, timestamping);
+ unlock_sock_fast(ssk, slow);
+ }
+
+ release_sock(sk);
+
+ return 0;
+}
+
static int mptcp_setsockopt_sol_socket_linger(struct mptcp_sock *msk, sockptr_t optval,
unsigned int optlen)
{
@@ -299,9 +330,12 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
case SO_TIMESTAMP_NEW:
case SO_TIMESTAMPNS_OLD:
case SO_TIMESTAMPNS_NEW:
+ return mptcp_setsockopt_sol_socket_int(msk, optname, optval,
+ optlen);
case SO_TIMESTAMPING_OLD:
case SO_TIMESTAMPING_NEW:
- return mptcp_setsockopt_sol_socket_int(msk, optname, optval, optlen);
+ return mptcp_setsockopt_sol_socket_timestamping(msk, optname,
+ optval, optlen);
case SO_LINGER:
return mptcp_setsockopt_sol_socket_linger(msk, optval, optlen);
case SO_RCVLOWAT:
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 66d0b1893d26..966f777d35ce 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -214,11 +214,6 @@ again:
ntohs(inet_sk(sk_listener)->inet_sport),
ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
- sock_put((struct sock *)subflow_req->msk);
- mptcp_token_destroy_request(req);
- tcp_request_sock_ops.destructor(req);
- subflow_req->msk = NULL;
- subflow_req->mp_join = 0;
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
return -EPERM;
}
@@ -230,6 +225,8 @@ again:
if (unlikely(req->syncookie)) {
if (mptcp_can_accept_new_subflow(subflow_req->msk))
subflow_init_req_cookie_join_save(subflow_req, skb);
+ else
+ return -EPERM;
}
pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
@@ -269,9 +266,7 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
return -EINVAL;
- if (mptcp_can_accept_new_subflow(subflow_req->msk))
- subflow_req->mp_join = 1;
-
+ subflow_req->mp_join = 1;
subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
}
diff --git a/net/mptcp/syncookies.c b/net/mptcp/syncookies.c
index abe0fd099746..37127781aee9 100644
--- a/net/mptcp/syncookies.c
+++ b/net/mptcp/syncookies.c
@@ -37,7 +37,21 @@ static spinlock_t join_entry_locks[COOKIE_JOIN_SLOTS] __cacheline_aligned_in_smp
static u32 mptcp_join_entry_hash(struct sk_buff *skb, struct net *net)
{
- u32 i = skb_get_hash(skb) ^ net_hash_mix(net);
+ static u32 mptcp_join_hash_secret __read_mostly;
+ struct tcphdr *th = tcp_hdr(skb);
+ u32 seq, i;
+
+ net_get_random_once(&mptcp_join_hash_secret,
+ sizeof(mptcp_join_hash_secret));
+
+ if (th->syn)
+ seq = TCP_SKB_CB(skb)->seq;
+ else
+ seq = TCP_SKB_CB(skb)->seq - 1;
+
+ i = jhash_3words(seq, net_hash_mix(net),
+ (__force __u32)th->source << 16 | (__force __u32)th->dest,
+ mptcp_join_hash_secret);
return i % ARRAY_SIZE(join_entries);
}
diff --git a/net/ncsi/Kconfig b/net/ncsi/Kconfig
index 93309081f5a4..ea1dd32b6b1f 100644
--- a/net/ncsi/Kconfig
+++ b/net/ncsi/Kconfig
@@ -17,3 +17,9 @@ config NCSI_OEM_CMD_GET_MAC
help
This allows to get MAC address from NCSI firmware and set them back to
controller.
+config NCSI_OEM_CMD_KEEP_PHY
+ bool "Keep PHY Link up"
+ depends on NET_NCSI
+ help
+ This allows to keep PHY link up and prevents any channel resets during
+ the host load.
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index cbbb0de4750a..0b6cfd3b31e0 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -78,6 +78,9 @@ enum {
/* OEM Vendor Manufacture ID */
#define NCSI_OEM_MFR_MLX_ID 0x8119
#define NCSI_OEM_MFR_BCM_ID 0x113d
+#define NCSI_OEM_MFR_INTEL_ID 0x157
+/* Intel specific OEM command */
+#define NCSI_OEM_INTEL_CMD_KEEP_PHY 0x20 /* CMD ID for Keep PHY up */
/* Broadcom specific OEM Command */
#define NCSI_OEM_BCM_CMD_GMA 0x01 /* CMD ID for Get MAC */
/* Mellanox specific OEM Command */
@@ -86,6 +89,7 @@ enum {
#define NCSI_OEM_MLX_CMD_SMAF 0x01 /* CMD ID for Set MC Affinity */
#define NCSI_OEM_MLX_CMD_SMAF_PARAM 0x07 /* Parameter for SMAF */
/* OEM Command payload lengths*/
+#define NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN 7
#define NCSI_OEM_BCM_CMD_GMA_LEN 12
#define NCSI_OEM_MLX_CMD_GMA_LEN 8
#define NCSI_OEM_MLX_CMD_SMAF_LEN 60
@@ -271,6 +275,7 @@ enum {
ncsi_dev_state_probe_mlx_gma,
ncsi_dev_state_probe_mlx_smaf,
ncsi_dev_state_probe_cis,
+ ncsi_dev_state_probe_keep_phy,
ncsi_dev_state_probe_gvi,
ncsi_dev_state_probe_gc,
ncsi_dev_state_probe_gls,
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index ca04b6df1341..89c7742cd72e 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -689,6 +689,35 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
return 0;
}
+#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
+
+static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
+{
+ unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
+ int ret = 0;
+
+ nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
+
+ memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
+ *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
+
+ data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
+
+ /* PHY Link up attribute */
+ data[6] = 0x1;
+
+ nca->data = data;
+
+ ret = ncsi_xmit_cmd(nca);
+ if (ret)
+ netdev_err(nca->ndp->ndev.dev,
+ "NCSI: Failed to transmit cmd 0x%x during configure\n",
+ nca->type);
+ return ret;
+}
+
+#endif
+
#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
/* NCSI OEM Command APIs */
@@ -700,7 +729,7 @@ static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
- *(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID);
+ *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
data[5] = NCSI_OEM_BCM_CMD_GMA;
nca->data = data;
@@ -724,7 +753,7 @@ static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
memset(&u, 0, sizeof(u));
- u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
+ u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
@@ -747,7 +776,7 @@ static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
int ret = 0;
memset(&u, 0, sizeof(u));
- u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
+ u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
@@ -1392,7 +1421,23 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
}
nd->state = ncsi_dev_state_probe_gvi;
+ if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
+ nd->state = ncsi_dev_state_probe_keep_phy;
+ break;
+#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
+ case ncsi_dev_state_probe_keep_phy:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_OEM;
+ nca.package = ndp->active_package->id;
+ nca.channel = 0;
+ ret = ncsi_oem_keep_phy_intel(&nca);
+ if (ret)
+ goto error;
+
+ nd->state = ncsi_dev_state_probe_gvi;
break;
+#endif /* CONFIG_NCSI_OEM_CMD_KEEP_PHY */
case ncsi_dev_state_probe_gvi:
case ncsi_dev_state_probe_gc:
case ncsi_dev_state_probe_gls:
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 888ccc2d4e34..d48374894817 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -403,7 +403,7 @@ static int ncsi_rsp_handler_ev(struct ncsi_request *nr)
/* Update to VLAN mode */
cmd = (struct ncsi_cmd_ev_pkt *)skb_network_header(nr->cmd);
ncm->enable = 1;
- ncm->data[0] = ntohl(cmd->mode);
+ ncm->data[0] = ntohl((__force __be32)cmd->mode);
return 0;
}
@@ -699,12 +699,19 @@ static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr)
return 0;
}
+/* Response handler for Intel card */
+static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr)
+{
+ return 0;
+}
+
static struct ncsi_rsp_oem_handler {
unsigned int mfr_id;
int (*handler)(struct ncsi_request *nr);
} ncsi_rsp_oem_handlers[] = {
{ NCSI_OEM_MFR_MLX_ID, ncsi_rsp_handler_oem_mlx },
- { NCSI_OEM_MFR_BCM_ID, ncsi_rsp_handler_oem_bcm }
+ { NCSI_OEM_MFR_BCM_ID, ncsi_rsp_handler_oem_bcm },
+ { NCSI_OEM_MFR_INTEL_ID, ncsi_rsp_handler_oem_intel }
};
/* Response handler for OEM command */
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index d1bef23fd4f5..dd30c03d5a23 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -132,8 +132,11 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
- if (ip > ip_to)
+ if (ip > ip_to) {
+ if (ip_to == 0)
+ return -IPSET_ERR_HASH_ELEM;
swap(ip, ip_to);
+ }
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
@@ -144,6 +147,10 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
+ /* 64bit division is not allowed on 32bit */
+ if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE)
+ return -ERANGE;
+
if (retried) {
ip = ntohl(h->next.ip);
e.ip = htonl(ip);
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
index 18346d18aa16..153de3457423 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmark.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmark.c
@@ -121,6 +121,8 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
e.mark &= h->markmask;
+ if (e.mark == 0 && e.ip == 0)
+ return -IPSET_ERR_HASH_ELEM;
if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) {
@@ -133,8 +135,11 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
- if (ip > ip_to)
+ if (ip > ip_to) {
+ if (e.mark == 0 && ip_to == 0)
+ return -IPSET_ERR_HASH_ELEM;
swap(ip, ip_to);
+ }
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
@@ -143,6 +148,9 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
ip_set_mask_from_to(ip, ip_to, cidr);
}
+ if (((u64)ip_to - ip + 1) > IPSET_MAX_RANGE)
+ return -ERANGE;
+
if (retried)
ip = ntohl(h->next.ip);
for (; ip <= ip_to; ip++) {
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index e1ca11196515..7303138e46be 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -173,6 +173,9 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
swap(port, port_to);
}
+ if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+ return -ERANGE;
+
if (retried)
ip = ntohl(h->next.ip);
for (; ip <= ip_to; ip++) {
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index ab179e064597..334fb1ad0e86 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -180,6 +180,9 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
swap(port, port_to);
}
+ if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+ return -ERANGE;
+
if (retried)
ip = ntohl(h->next.ip);
for (; ip <= ip_to; ip++) {
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 8f075b44cf64..7df94f437f60 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -253,6 +253,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
swap(port, port_to);
}
+ if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+ return -ERANGE;
+
ip2_to = ip2_from;
if (tb[IPSET_ATTR_IP2_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index c1a11f041ac6..1422739d9aa2 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -140,7 +140,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem e = { .cidr = HOST_MASK };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
- u32 ip = 0, ip_to = 0;
+ u32 ip = 0, ip_to = 0, ipn, n = 0;
int ret;
if (tb[IPSET_ATTR_LINENO])
@@ -188,6 +188,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ip + UINT_MAX == ip_to)
return -IPSET_ERR_HASH_RANGE;
}
+ ipn = ip;
+ do {
+ ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
+ n++;
+ } while (ipn++ < ip_to);
+
+ if (n > IPSET_MAX_RANGE)
+ return -ERANGE;
+
if (retried)
ip = ntohl(h->next.ip);
do {
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index ddd51c2e1cb3..9810f5bf63f5 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -202,7 +202,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
- u32 ip = 0, ip_to = 0;
+ u32 ip = 0, ip_to = 0, ipn, n = 0;
int ret;
if (tb[IPSET_ATTR_LINENO])
@@ -256,6 +256,14 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
} else {
ip_set_mask_from_to(ip, ip_to, e.cidr);
}
+ ipn = ip;
+ do {
+ ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
+ n++;
+ } while (ipn++ < ip_to);
+
+ if (n > IPSET_MAX_RANGE)
+ return -ERANGE;
if (retried)
ip = ntohl(h->next.ip);
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index 6532f0505e66..3d09eefe998a 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -168,7 +168,8 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
struct hash_netnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0;
- u32 ip2 = 0, ip2_from = 0, ip2_to = 0;
+ u32 ip2 = 0, ip2_from = 0, ip2_to = 0, ipn;
+ u64 n = 0, m = 0;
int ret;
if (tb[IPSET_ATTR_LINENO])
@@ -244,6 +245,19 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
} else {
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
}
+ ipn = ip;
+ do {
+ ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
+ n++;
+ } while (ipn++ < ip_to);
+ ipn = ip2_from;
+ do {
+ ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
+ m++;
+ } while (ipn++ < ip2_to);
+
+ if (n*m > IPSET_MAX_RANGE)
+ return -ERANGE;
if (retried) {
ip = ntohl(h->next.ip[0]);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index ec1564a1cb5a..09cf72eb37f8 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -158,7 +158,8 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
- u32 port, port_to, p = 0, ip = 0, ip_to = 0;
+ u32 port, port_to, p = 0, ip = 0, ip_to = 0, ipn;
+ u64 n = 0;
bool with_ports = false;
u8 cidr;
int ret;
@@ -235,6 +236,14 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
} else {
ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
}
+ ipn = ip;
+ do {
+ ipn = ip_set_range_to_cidr(ipn, ip_to, &cidr);
+ n++;
+ } while (ipn++ < ip_to);
+
+ if (n*(port_to - port + 1) > IPSET_MAX_RANGE)
+ return -ERANGE;
if (retried) {
ip = ntohl(h->next.ip);
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index 0e91d1e82f1c..19bcdb3141f6 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -182,7 +182,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
struct hash_netportnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, p = 0, port, port_to;
- u32 ip2_from = 0, ip2_to = 0, ip2;
+ u32 ip2_from = 0, ip2_to = 0, ip2, ipn;
+ u64 n = 0, m = 0;
bool with_ports = false;
int ret;
@@ -284,6 +285,19 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
} else {
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
}
+ ipn = ip;
+ do {
+ ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
+ n++;
+ } while (ipn++ < ip_to);
+ ipn = ip2_from;
+ do {
+ ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
+ m++;
+ } while (ipn++ < ip2_to);
+
+ if (n*m*(port_to - port + 1) > IPSET_MAX_RANGE)
+ return -ERANGE;
if (retried) {
ip = ntohl(h->next.ip[0]);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 96ba19fc8155..d31dbccbe7bd 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -66,22 +66,17 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
struct conntrack_gc_work {
struct delayed_work dwork;
- u32 last_bucket;
+ u32 next_bucket;
bool exiting;
bool early_drop;
- long next_gc_run;
};
static __read_mostly struct kmem_cache *nf_conntrack_cachep;
static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
static __read_mostly bool nf_conntrack_locks_all;
-/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
-#define GC_MAX_BUCKETS_DIV 128u
-/* upper bound of full table scan */
-#define GC_MAX_SCAN_JIFFIES (16u * HZ)
-/* desired ratio of entries found to be expired */
-#define GC_EVICT_RATIO 50u
+#define GC_SCAN_INTERVAL (120u * HZ)
+#define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
static struct conntrack_gc_work conntrack_gc_work;
@@ -149,7 +144,15 @@ static void nf_conntrack_all_lock(void)
spin_lock(&nf_conntrack_locks_all_lock);
- nf_conntrack_locks_all = true;
+ /* For nf_contrack_locks_all, only the latest time when another
+ * CPU will see an update is controlled, by the "release" of the
+ * spin_lock below.
+ * The earliest time is not controlled, an thus KCSAN could detect
+ * a race when nf_conntract_lock() reads the variable.
+ * WRITE_ONCE() is used to ensure the compiler will not
+ * optimize the write.
+ */
+ WRITE_ONCE(nf_conntrack_locks_all, true);
for (i = 0; i < CONNTRACK_LOCKS; i++) {
spin_lock(&nf_conntrack_locks[i]);
@@ -662,8 +665,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
return false;
tstamp = nf_conn_tstamp_find(ct);
- if (tstamp && tstamp->stop == 0)
+ if (tstamp) {
+ s32 timeout = ct->timeout - nfct_time_stamp;
+
tstamp->stop = ktime_get_real_ns();
+ if (timeout < 0)
+ tstamp->stop -= jiffies_to_nsecs(-timeout);
+ }
if (nf_conntrack_event_report(IPCT_DESTROY, ct,
portid, report) < 0) {
@@ -1350,17 +1358,13 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
static void gc_worker(struct work_struct *work)
{
- unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
- unsigned int i, goal, buckets = 0, expired_count = 0;
- unsigned int nf_conntrack_max95 = 0;
+ unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
+ unsigned int i, hashsz, nf_conntrack_max95 = 0;
+ unsigned long next_run = GC_SCAN_INTERVAL;
struct conntrack_gc_work *gc_work;
- unsigned int ratio, scanned = 0;
- unsigned long next_run;
-
gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
- goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
- i = gc_work->last_bucket;
+ i = gc_work->next_bucket;
if (gc_work->early_drop)
nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
@@ -1368,15 +1372,15 @@ static void gc_worker(struct work_struct *work)
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_head *ct_hash;
struct hlist_nulls_node *n;
- unsigned int hashsz;
struct nf_conn *tmp;
- i++;
rcu_read_lock();
nf_conntrack_get_ht(&ct_hash, &hashsz);
- if (i >= hashsz)
- i = 0;
+ if (i >= hashsz) {
+ rcu_read_unlock();
+ break;
+ }
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
struct nf_conntrack_net *cnet;
@@ -1384,7 +1388,6 @@ static void gc_worker(struct work_struct *work)
tmp = nf_ct_tuplehash_to_ctrack(h);
- scanned++;
if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
nf_ct_offload_timeout(tmp);
continue;
@@ -1392,7 +1395,6 @@ static void gc_worker(struct work_struct *work)
if (nf_ct_is_expired(tmp)) {
nf_ct_gc_expired(tmp);
- expired_count++;
continue;
}
@@ -1425,7 +1427,14 @@ static void gc_worker(struct work_struct *work)
*/
rcu_read_unlock();
cond_resched();
- } while (++buckets < goal);
+ i++;
+
+ if (time_after(jiffies, end_time) && i < hashsz) {
+ gc_work->next_bucket = i;
+ next_run = 0;
+ break;
+ }
+ } while (i < hashsz);
if (gc_work->exiting)
return;
@@ -1436,40 +1445,17 @@ static void gc_worker(struct work_struct *work)
*
* This worker is only here to reap expired entries when system went
* idle after a busy period.
- *
- * The heuristics below are supposed to balance conflicting goals:
- *
- * 1. Minimize time until we notice a stale entry
- * 2. Maximize scan intervals to not waste cycles
- *
- * Normally, expire ratio will be close to 0.
- *
- * As soon as a sizeable fraction of the entries have expired
- * increase scan frequency.
*/
- ratio = scanned ? expired_count * 100 / scanned : 0;
- if (ratio > GC_EVICT_RATIO) {
- gc_work->next_gc_run = min_interval;
- } else {
- unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
-
- BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
-
- gc_work->next_gc_run += min_interval;
- if (gc_work->next_gc_run > max)
- gc_work->next_gc_run = max;
+ if (next_run) {
+ gc_work->early_drop = false;
+ gc_work->next_bucket = 0;
}
-
- next_run = gc_work->next_gc_run;
- gc_work->last_bucket = i;
- gc_work->early_drop = false;
queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
}
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
{
INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
- gc_work->next_gc_run = HZ;
gc_work->exiting = false;
}
@@ -2457,7 +2443,6 @@ i_see_dead_people:
}
list_for_each_entry(net, net_exit_list, exit_list) {
- nf_conntrack_proto_pernet_fini(net);
nf_conntrack_ecache_pernet_fini(net);
nf_conntrack_expect_pernet_fini(net);
free_percpu(net->ct.stat);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 4e1a9dba7077..e81af33b233b 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -218,6 +218,7 @@ static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
if (!help)
return 0;
+ rcu_read_lock();
helper = rcu_dereference(help->helper);
if (!helper)
goto out;
@@ -233,9 +234,11 @@ static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
nla_nest_end(skb, nest_helper);
out:
+ rcu_read_unlock();
return 0;
nla_put_failure:
+ rcu_read_unlock();
return -1;
}
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 55647409a9be..8f7a9837349c 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -697,13 +697,6 @@ void nf_conntrack_proto_pernet_init(struct net *net)
#endif
}
-void nf_conntrack_proto_pernet_fini(struct net *net)
-{
-#ifdef CONFIG_NF_CT_PROTO_GRE
- nf_ct_gre_keymap_flush(net);
-#endif
-}
-
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
&nf_conntrack_htable_size, 0600);
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index db11e403d818..728eeb0aea87 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -55,19 +55,6 @@ static inline struct nf_gre_net *gre_pernet(struct net *net)
return &net->ct.nf_ct_proto.gre;
}
-void nf_ct_gre_keymap_flush(struct net *net)
-{
- struct nf_gre_net *net_gre = gre_pernet(net);
- struct nf_ct_gre_keymap *km, *tmp;
-
- spin_lock_bh(&keymap_lock);
- list_for_each_entry_safe(km, tmp, &net_gre->keymap_list, list) {
- list_del_rcu(&km->list);
- kfree_rcu(km, rcu);
- }
- spin_unlock_bh(&keymap_lock);
-}
-
static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
const struct nf_conntrack_tuple *t)
{
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index f7e8baf59b51..af5115e127cf 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -823,6 +823,22 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
return true;
}
+static bool tcp_can_early_drop(const struct nf_conn *ct)
+{
+ switch (ct->proto.tcp.state) {
+ case TCP_CONNTRACK_FIN_WAIT:
+ case TCP_CONNTRACK_LAST_ACK:
+ case TCP_CONNTRACK_TIME_WAIT:
+ case TCP_CONNTRACK_CLOSE:
+ case TCP_CONNTRACK_CLOSE_WAIT:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
/* Returns verdict for packet, or -1 for invalid. */
int nf_conntrack_tcp_packet(struct nf_conn *ct,
struct sk_buff *skb,
@@ -1030,10 +1046,30 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
if (index != TCP_RST_SET)
break;
- if (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) {
+ /* If we are closing, tuple might have been re-used already.
+ * last_index, last_ack, and all other ct fields used for
+ * sequence/window validation are outdated in that case.
+ *
+ * As the conntrack can already be expired by GC under pressure,
+ * just skip validation checks.
+ */
+ if (tcp_can_early_drop(ct))
+ goto in_window;
+
+ /* td_maxack might be outdated if we let a SYN through earlier */
+ if ((ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) &&
+ ct->proto.tcp.last_index != TCP_SYN_SET) {
u32 seq = ntohl(th->seq);
- if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
+ /* If we are not in established state and SEQ=0 this is most
+ * likely an answer to a SYN we let go through above (last_index
+ * can be updated due to out-of-order ACKs).
+ */
+ if (seq == 0 && !nf_conntrack_tcp_established(ct))
+ break;
+
+ if (before(seq, ct->proto.tcp.seen[!dir].td_maxack) &&
+ !tn->tcp_ignore_invalid_rst) {
/* Invalid RST */
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
@@ -1134,6 +1170,16 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_ACCEPT;
}
+
+ if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
+ /* do not renew timeout on SYN retransmit.
+ *
+ * Else port reuse by client or NAT middlebox can keep
+ * entry alive indefinitely (including nat info).
+ */
+ return NF_ACCEPT;
+ }
+
/* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
* pickup with loose=1. Avoid large ESTABLISHED timeout.
*/
@@ -1155,22 +1201,6 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
return NF_ACCEPT;
}
-static bool tcp_can_early_drop(const struct nf_conn *ct)
-{
- switch (ct->proto.tcp.state) {
- case TCP_CONNTRACK_FIN_WAIT:
- case TCP_CONNTRACK_LAST_ACK:
- case TCP_CONNTRACK_TIME_WAIT:
- case TCP_CONNTRACK_CLOSE:
- case TCP_CONNTRACK_CLOSE_WAIT:
- return true;
- default:
- break;
- }
-
- return false;
-}
-
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
@@ -1437,6 +1467,9 @@ void nf_conntrack_tcp_init_net(struct net *net)
*/
tn->tcp_be_liberal = 0;
+ /* If it's non-zero, we turn off RST sequence number check */
+ tn->tcp_ignore_invalid_rst = 0;
+
/* Max number of the retransmitted packets without receiving an (acceptable)
* ACK from the destination. If this number is reached, a shorter timer
* will be started.
@@ -1445,7 +1478,6 @@ void nf_conntrack_tcp_init_net(struct net *net)
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
tn->offload_timeout = 30 * HZ;
- tn->offload_pickup = 120 * HZ;
#endif
}
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 698fee49e732..f8e3c0d2602f 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -271,7 +271,6 @@ void nf_conntrack_udp_init_net(struct net *net)
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
un->offload_timeout = 30 * HZ;
- un->offload_pickup = 30 * HZ;
#endif
}
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index f57a951c9b5e..e84b499b7bfa 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -575,16 +575,15 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK,
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD,
- NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP,
#endif
NF_SYSCTL_CT_PROTO_TCP_LOOSE,
NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
+ NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST,
NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM,
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD,
- NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP,
#endif
NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP,
NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6,
@@ -775,12 +774,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP] = {
- .procname = "nf_flowtable_tcp_pickup",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
#endif
[NF_SYSCTL_CT_PROTO_TCP_LOOSE] = {
.procname = "nf_conntrack_tcp_loose",
@@ -798,6 +791,14 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
+ [NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST] = {
+ .procname = "nf_conntrack_tcp_ignore_invalid_rst",
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
[NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS] = {
.procname = "nf_conntrack_tcp_max_retrans",
.maxlen = sizeof(u8),
@@ -823,12 +824,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP] = {
- .procname = "nf_flowtable_udp_pickup",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
#endif
[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = {
.procname = "nf_conntrack_icmp_timeout",
@@ -1004,11 +999,11 @@ static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net,
XASSIGN(LOOSE, &tn->tcp_loose);
XASSIGN(LIBERAL, &tn->tcp_be_liberal);
XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans);
+ XASSIGN(IGNORE_INVALID_RST, &tn->tcp_ignore_invalid_rst);
#undef XASSIGN
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD].data = &tn->offload_timeout;
- table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP].data = &tn->offload_pickup;
#endif
}
@@ -1101,7 +1096,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED];
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD].data = &un->offload_timeout;
- table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP].data = &un->offload_pickup;
#endif
nf_conntrack_standalone_init_tcp_sysctl(net, table);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 1e50908b1b7e..8788b519255e 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -183,7 +183,7 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
const struct nf_conntrack_l4proto *l4proto;
struct net *net = nf_ct_net(ct);
int l4num = nf_ct_protonum(ct);
- unsigned int timeout;
+ s32 timeout;
l4proto = nf_ct_l4proto_find(l4num);
if (!l4proto)
@@ -192,15 +192,20 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
if (l4num == IPPROTO_TCP) {
struct nf_tcp_net *tn = nf_tcp_pernet(net);
- timeout = tn->offload_pickup;
+ timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
+ timeout -= tn->offload_timeout;
} else if (l4num == IPPROTO_UDP) {
struct nf_udp_net *tn = nf_udp_pernet(net);
- timeout = tn->offload_pickup;
+ timeout = tn->timeouts[UDP_CT_REPLIED];
+ timeout -= tn->offload_timeout;
} else {
return;
}
+ if (timeout < 0)
+ timeout = 0;
+
if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
ct->timeout = nfct_time_stamp + timeout;
}
@@ -331,7 +336,11 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
void flow_offload_refresh(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
- flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+ u32 timeout;
+
+ timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+ if (READ_ONCE(flow->timeout) != timeout)
+ WRITE_ONCE(flow->timeout, timeout);
if (likely(!nf_flowtable_hw_offload(flow_table)))
return;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 390d4466567f..081437dd75b7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3446,7 +3446,8 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
return 0;
err_destroy_flow_rule:
- nft_flow_rule_destroy(flow);
+ if (flow)
+ nft_flow_rule_destroy(flow);
err_release_rule:
nf_tables_rule_release(&ctx, rule);
err_release_expr:
@@ -8444,6 +8445,16 @@ static int nf_tables_commit_audit_alloc(struct list_head *adl,
return 0;
}
+static void nf_tables_commit_audit_free(struct list_head *adl)
+{
+ struct nft_audit_data *adp, *adn;
+
+ list_for_each_entry_safe(adp, adn, adl, list) {
+ list_del(&adp->list);
+ kfree(adp);
+ }
+}
+
static void nf_tables_commit_audit_collect(struct list_head *adl,
struct nft_table *table, u32 op)
{
@@ -8508,6 +8519,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
ret = nf_tables_commit_audit_alloc(&adl, trans->ctx.table);
if (ret) {
nf_tables_commit_chain_prepare_cancel(net);
+ nf_tables_commit_audit_free(&adl);
return ret;
}
if (trans->msg_type == NFT_MSG_NEWRULE ||
@@ -8517,6 +8529,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
ret = nf_tables_commit_chain_prepare(net, chain);
if (ret < 0) {
nf_tables_commit_chain_prepare_cancel(net);
+ nf_tables_commit_audit_free(&adl);
return ret;
}
}
diff --git a/net/netfilter/nfnetlink_hook.c b/net/netfilter/nfnetlink_hook.c
index 50b4e3c9347a..f554e2ea32ee 100644
--- a/net/netfilter/nfnetlink_hook.c
+++ b/net/netfilter/nfnetlink_hook.c
@@ -89,11 +89,15 @@ static int nfnl_hook_put_nft_chain_info(struct sk_buff *nlskb,
if (!nest2)
goto cancel_nest;
- ret = nla_put_string(nlskb, NFTA_CHAIN_TABLE, chain->table->name);
+ ret = nla_put_string(nlskb, NFNLA_CHAIN_TABLE, chain->table->name);
if (ret)
goto cancel_nest;
- ret = nla_put_string(nlskb, NFTA_CHAIN_NAME, chain->name);
+ ret = nla_put_string(nlskb, NFNLA_CHAIN_NAME, chain->name);
+ if (ret)
+ goto cancel_nest;
+
+ ret = nla_put_u8(nlskb, NFNLA_CHAIN_FAMILY, chain->table->family);
if (ret)
goto cancel_nest;
@@ -109,18 +113,19 @@ cancel_nest:
static int nfnl_hook_dump_one(struct sk_buff *nlskb,
const struct nfnl_dump_hook_data *ctx,
const struct nf_hook_ops *ops,
- unsigned int seq)
+ int family, unsigned int seq)
{
u16 event = nfnl_msg_type(NFNL_SUBSYS_HOOK, NFNL_MSG_HOOK_GET);
unsigned int portid = NETLINK_CB(nlskb).portid;
struct nlmsghdr *nlh;
int ret = -EMSGSIZE;
+ u32 hooknum;
#ifdef CONFIG_KALLSYMS
char sym[KSYM_SYMBOL_LEN];
char *module_name;
#endif
nlh = nfnl_msg_put(nlskb, portid, seq, event,
- NLM_F_MULTI, ops->pf, NFNETLINK_V0, 0);
+ NLM_F_MULTI, family, NFNETLINK_V0, 0);
if (!nlh)
goto nla_put_failure;
@@ -135,6 +140,7 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb,
if (module_name) {
char *end;
+ *module_name = '\0';
module_name += 2;
end = strchr(module_name, ']');
if (end) {
@@ -151,7 +157,12 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb,
goto nla_put_failure;
#endif
- ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(ops->hooknum));
+ if (ops->pf == NFPROTO_INET && ops->hooknum == NF_INET_INGRESS)
+ hooknum = NF_NETDEV_INGRESS;
+ else
+ hooknum = ops->hooknum;
+
+ ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(hooknum));
if (ret)
goto nla_put_failure;
@@ -174,7 +185,9 @@ static const struct nf_hook_entries *
nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *dev)
{
const struct nf_hook_entries *hook_head = NULL;
+#ifdef CONFIG_NETFILTER_INGRESS
struct net_device *netdev;
+#endif
switch (pf) {
case NFPROTO_IPV4:
@@ -257,7 +270,8 @@ static int nfnl_hook_dump(struct sk_buff *nlskb,
ops = nf_hook_entries_get_hook_ops(e);
for (; i < e->num_hook_entries; i++) {
- err = nfnl_hook_dump_one(nlskb, ctx, ops[i], cb->seq);
+ err = nfnl_hook_dump_one(nlskb, ctx, ops[i], family,
+ cb->nlh->nlmsg_seq);
if (err)
break;
}
diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
index 913ac45167f2..304e33cbed9b 100644
--- a/net/netfilter/nft_last.c
+++ b/net/netfilter/nft_last.c
@@ -23,15 +23,21 @@ static int nft_last_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
{
struct nft_last_priv *priv = nft_expr_priv(expr);
u64 last_jiffies;
+ u32 last_set = 0;
int err;
- if (tb[NFTA_LAST_MSECS]) {
+ if (tb[NFTA_LAST_SET]) {
+ last_set = ntohl(nla_get_be32(tb[NFTA_LAST_SET]));
+ if (last_set == 1)
+ priv->last_set = 1;
+ }
+
+ if (last_set && tb[NFTA_LAST_MSECS]) {
err = nf_msecs_to_jiffies64(tb[NFTA_LAST_MSECS], &last_jiffies);
if (err < 0)
return err;
- priv->last_jiffies = jiffies + (unsigned long)last_jiffies;
- priv->last_set = 1;
+ priv->last_jiffies = jiffies - (unsigned long)last_jiffies;
}
return 0;
@@ -42,24 +48,30 @@ static void nft_last_eval(const struct nft_expr *expr,
{
struct nft_last_priv *priv = nft_expr_priv(expr);
- priv->last_jiffies = jiffies;
- priv->last_set = 1;
+ if (READ_ONCE(priv->last_jiffies) != jiffies)
+ WRITE_ONCE(priv->last_jiffies, jiffies);
+ if (READ_ONCE(priv->last_set) == 0)
+ WRITE_ONCE(priv->last_set, 1);
}
static int nft_last_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
struct nft_last_priv *priv = nft_expr_priv(expr);
+ unsigned long last_jiffies = READ_ONCE(priv->last_jiffies);
+ u32 last_set = READ_ONCE(priv->last_set);
__be64 msecs;
- if (time_before(jiffies, priv->last_jiffies))
- priv->last_set = 0;
+ if (time_before(jiffies, last_jiffies)) {
+ WRITE_ONCE(priv->last_set, 0);
+ last_set = 0;
+ }
- if (priv->last_set)
- msecs = nf_jiffies64_to_msecs(jiffies - priv->last_jiffies);
+ if (last_set)
+ msecs = nf_jiffies64_to_msecs(jiffies - last_jiffies);
else
msecs = 0;
- if (nla_put_be32(skb, NFTA_LAST_SET, htonl(priv->last_set)) ||
+ if (nla_put_be32(skb, NFTA_LAST_SET, htonl(last_set)) ||
nla_put_be64(skb, NFTA_LAST_MSECS, msecs, NFTA_LAST_PAD))
goto nla_put_failure;
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 0840c635b752..be1595d6979d 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -201,7 +201,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
alen = sizeof_field(struct nf_nat_range, min_addr.ip6);
break;
default:
- return -EAFNOSUPPORT;
+ if (tb[NFTA_NAT_REG_ADDR_MIN])
+ return -EAFNOSUPPORT;
+ break;
}
priv->family = family;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d233ac4a91b6..380f95aacdec 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2471,7 +2471,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
nlmsg_end(skb, rep);
- netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
+ nlmsg_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid);
}
EXPORT_SYMBOL(netlink_ack);
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index 9115f8a7dd45..a8da88db7893 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -121,11 +121,9 @@ static void nr_heartbeat_expiry(struct timer_list *t)
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
- sock_hold(sk);
bh_unlock_sock(sk);
nr_destroy_socket(sk);
- sock_put(sk);
- return;
+ goto out;
}
break;
@@ -146,6 +144,8 @@ static void nr_heartbeat_expiry(struct timer_list *t)
nr_start_heartbeat(sk);
bh_unlock_sock(sk);
+out:
+ sock_put(sk);
}
static void nr_t2timer_expiry(struct timer_list *t)
@@ -159,6 +159,7 @@ static void nr_t2timer_expiry(struct timer_list *t)
nr_enquiry_response(sk);
}
bh_unlock_sock(sk);
+ sock_put(sk);
}
static void nr_t4timer_expiry(struct timer_list *t)
@@ -169,6 +170,7 @@ static void nr_t4timer_expiry(struct timer_list *t)
bh_lock_sock(sk);
nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY;
bh_unlock_sock(sk);
+ sock_put(sk);
}
static void nr_idletimer_expiry(struct timer_list *t)
@@ -197,6 +199,7 @@ static void nr_idletimer_expiry(struct timer_list *t)
sock_set_flag(sk, SOCK_DEAD);
}
bh_unlock_sock(sk);
+ sock_put(sk);
}
static void nr_t1timer_expiry(struct timer_list *t)
@@ -209,8 +212,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
case NR_STATE_1:
if (nr->n2count == nr->n2) {
nr_disconnect(sk, ETIMEDOUT);
- bh_unlock_sock(sk);
- return;
+ goto out;
} else {
nr->n2count++;
nr_write_internal(sk, NR_CONNREQ);
@@ -220,8 +222,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
case NR_STATE_2:
if (nr->n2count == nr->n2) {
nr_disconnect(sk, ETIMEDOUT);
- bh_unlock_sock(sk);
- return;
+ goto out;
} else {
nr->n2count++;
nr_write_internal(sk, NR_DISCREQ);
@@ -231,8 +232,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
case NR_STATE_3:
if (nr->n2count == nr->n2) {
nr_disconnect(sk, ETIMEDOUT);
- bh_unlock_sock(sk);
- return;
+ goto out;
} else {
nr->n2count++;
nr_requeue_frames(sk);
@@ -241,5 +241,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
}
nr_start_t1timer(sk);
+out:
bh_unlock_sock(sk);
+ sock_put(sk);
}
diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
index 1248faf4d6df..502e7a3f8948 100644
--- a/net/nfc/nci/uart.c
+++ b/net/nfc/nci/uart.c
@@ -308,7 +308,7 @@ static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data,
* Return Value: None
*/
static void nci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
- char *flags, int count)
+ const char *flags, int count)
{
struct nci_uart *nu = (void *)tty->disc_data;
@@ -442,6 +442,7 @@ EXPORT_SYMBOL_GPL(nci_uart_set_config);
static struct tty_ldisc_ops nci_uart_ldisc = {
.owner = THIS_MODULE,
+ .num = N_NCI,
.name = "n_nci",
.open = nci_uart_tty_open,
.close = nci_uart_tty_close,
@@ -456,12 +457,12 @@ static struct tty_ldisc_ops nci_uart_ldisc = {
static int __init nci_uart_init(void)
{
- return tty_register_ldisc(N_NCI, &nci_uart_ldisc);
+ return tty_register_ldisc(&nci_uart_ldisc);
}
static void __exit nci_uart_exit(void)
{
- tty_unregister_ldisc(N_NCI);
+ tty_unregister_ldisc(&nci_uart_ldisc);
}
module_init(nci_uart_init);
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index e586424d8b04..9713035b89e3 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -293,14 +293,14 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
}
/**
- * Parse vlan tag from vlan header.
+ * parse_vlan_tag - Parse vlan tag from vlan header.
* @skb: skb containing frame to parse
* @key_vh: pointer to parsed vlan tag
* @untag_vlan: should the vlan header be removed from the frame
*
- * Returns ERROR on memory error.
- * Returns 0 if it encounters a non-vlan or incomplete packet.
- * Returns 1 after successfully parsing vlan tag.
+ * Return: ERROR on memory error.
+ * %0 if it encounters a non-vlan or incomplete packet.
+ * %1 after successfully parsing vlan tag.
*/
static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
bool untag_vlan)
@@ -532,6 +532,7 @@ static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
* L3 header
* @key: output flow key
*
+ * Return: %0 if successful, otherwise a negative errno value.
*/
static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
{
@@ -748,8 +749,6 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
*
* The caller must ensure that skb->len >= ETH_HLEN.
*
- * Returns 0 if successful, otherwise a negative errno value.
- *
* Initializes @skb header fields as follows:
*
* - skb->mac_header: the L2 header.
@@ -764,6 +763,8 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
*
* - skb->protocol: the type of the data starting at skb->network_header.
* Equals to key->eth.type.
+ *
+ * Return: %0 if successful, otherwise a negative errno value.
*/
static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
{
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index c89c8da99f1a..d4a2db0b2299 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -670,13 +670,13 @@ static bool cmp_key(const struct sw_flow_key *key1,
{
const long *cp1 = (const long *)((const u8 *)key1 + key_start);
const long *cp2 = (const long *)((const u8 *)key2 + key_start);
- long diffs = 0;
int i;
for (i = key_start; i < key_end; i += sizeof(long))
- diffs |= *cp1++ ^ *cp2++;
+ if (*cp1++ ^ *cp2++)
+ return false;
- return diffs == 0;
+ return true;
}
static bool flow_cmp_masked_key(const struct sw_flow *flow,
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 88deb5b41429..cf2ce5812489 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -507,6 +507,7 @@ void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
}
skb->dev = vport->dev;
+ skb->tstamp = 0;
vport->ops->send(skb);
return;
diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
index fa611678af05..1dc955ca57d3 100644
--- a/net/qrtr/mhi.c
+++ b/net/qrtr/mhi.c
@@ -15,6 +15,7 @@ struct qrtr_mhi_dev {
struct qrtr_endpoint ep;
struct mhi_device *mhi_dev;
struct device *dev;
+ struct completion ready;
};
/* From MHI to QRTR */
@@ -50,6 +51,10 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
int rc;
+ rc = wait_for_completion_interruptible(&qdev->ready);
+ if (rc)
+ goto free_skb;
+
if (skb->sk)
sock_hold(skb->sk);
@@ -79,7 +84,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
int rc;
/* start channels */
- rc = mhi_prepare_for_transfer(mhi_dev);
+ rc = mhi_prepare_for_transfer(mhi_dev, 0);
if (rc)
return rc;
@@ -96,6 +101,15 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
if (rc)
return rc;
+ /* start channels */
+ rc = mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
+ if (rc) {
+ qrtr_endpoint_unregister(&qdev->ep);
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+ return rc;
+ }
+
+ complete_all(&qdev->ready);
dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
return 0;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index e6f4a6202f82..171b7f3be6ef 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -518,8 +518,10 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
if (!ipc)
goto err;
- if (sock_queue_rcv_skb(&ipc->sk, skb))
+ if (sock_queue_rcv_skb(&ipc->sk, skb)) {
+ qrtr_port_put(ipc);
goto err;
+ }
qrtr_port_put(ipc);
}
@@ -839,6 +841,8 @@ static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
ipc = qrtr_port_lookup(to->sq_port);
if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
+ if (ipc)
+ qrtr_port_put(ipc);
kfree_skb(skb);
return -ENODEV;
}
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index 9b6ffff72f2d..28c1b0022178 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -131,9 +131,9 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
cpu_relax();
}
- ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
+ ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
&off, PAGE_SIZE);
- if (unlikely(ret != ibmr->sg_len))
+ if (unlikely(ret != ibmr->sg_dma_len))
return ret < 0 ? ret : -EINVAL;
if (cmpxchg(&frmr->fr_state,
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index a656baa321fe..1b4b3514c94f 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -322,11 +322,22 @@ err_alloc:
static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
{
+ struct flow_block_cb *block_cb, *tmp_cb;
struct tcf_ct_flow_table *ct_ft;
+ struct flow_block *block;
ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
rwork);
nf_flow_table_free(&ct_ft->nf_ft);
+
+ /* Remove any remaining callbacks before cleanup */
+ block = &ct_ft->nf_ft.flow_block;
+ down_write(&ct_ft->nf_ft.flow_block_lock);
+ list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
+ list_del(&block_cb->list);
+ flow_block_cb_free(block_cb);
+ }
+ up_write(&ct_ft->nf_ft.flow_block_lock);
kfree(ct_ft);
module_put(THIS_MODULE);
@@ -1026,7 +1037,8 @@ do_nat:
/* This will take care of sending queued events
* even if the connection is already confirmed.
*/
- nf_conntrack_confirm(skb);
+ if (nf_conntrack_confirm(skb) != NF_ACCEPT)
+ goto drop;
}
if (!skip_add)
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 7153c67f641e..2ef4cd2c848b 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -273,6 +273,9 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
goto out;
}
+ /* All mirred/redirected skbs should clear previous ct info */
+ nf_reset_ct(skb2);
+
want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
expects_nh = want_ingress || !m_mac_header_xmit;
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 81a1c67335be..8d17a543cc9f 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
@@ -33,6 +34,13 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
tcf_lastuse_update(&d->tcf_tm);
bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
+ action = READ_ONCE(d->tcf_action);
+ if (unlikely(action == TC_ACT_SHOT))
+ goto drop;
+
+ if (!skb->dev || skb->dev->type != ARPHRD_ETHER)
+ return action;
+
/* XXX: if you are going to edit more fields beyond ethernet header
* (example when you add IP header replacement or vlan swap)
* then MAX_EDIT_LEN needs to change appropriately
@@ -41,10 +49,6 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
if (unlikely(err)) /* best policy is to drop on the floor */
goto drop;
- action = READ_ONCE(d->tcf_action);
- if (unlikely(action == TC_ACT_SHOT))
- goto drop;
-
p = rcu_dereference_bh(d->skbmod_p);
flags = p->flags;
if (flags & SKBMOD_F_DMAC)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index d73b5c5514a9..e3e79e9bd706 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -2904,7 +2904,7 @@ replay:
break;
case RTM_GETCHAIN:
err = tc_chain_notify(chain, skb, n->nlmsg_seq,
- n->nlmsg_seq, n->nlmsg_type, true);
+ n->nlmsg_flags, n->nlmsg_type, true);
if (err < 0)
NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
break;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 5b274534264c..e9a8a2c86bbd 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -278,6 +278,8 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r,
TCA_TCINDEX_POLICE);
}
+static void tcindex_free_perfect_hash(struct tcindex_data *cp);
+
static void tcindex_partial_destroy_work(struct work_struct *work)
{
struct tcindex_data *p = container_of(to_rcu_work(work),
@@ -285,7 +287,8 @@ static void tcindex_partial_destroy_work(struct work_struct *work)
rwork);
rtnl_lock();
- kfree(p->perfect);
+ if (p->perfect)
+ tcindex_free_perfect_hash(p);
kfree(p);
rtnl_unlock();
}
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 951542843cab..28af8b1e1bb1 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -720,7 +720,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
skip_hash:
if (flow_override)
flow_hash = flow_override - 1;
- else if (use_skbhash)
+ else if (use_skbhash && (flow_mode & CAKE_FLOW_FLOWS))
flow_hash = skb->hash;
if (host_override) {
dsthost_hash = host_override - 1;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index d9ac60ffe927..a8dd06c74e31 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -913,7 +913,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
/* seqlock has the same scope of busylock, for NOLOCK qdisc */
spin_lock_init(&sch->seqlock);
- lockdep_set_class(&sch->busylock,
+ lockdep_set_class(&sch->seqlock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
seqcount_init(&sch->running);
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 66fe2b82af9a..9c79374457a0 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -564,7 +564,7 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
/* if there's no entry, it means that the schedule didn't
* start yet, so force all gates to be open, this is in
* accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
- * "AdminGateSates"
+ * "AdminGateStates"
*/
gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
@@ -1739,8 +1739,6 @@ static void taprio_attach(struct Qdisc *sch)
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
- if (ntx < dev->real_num_tx_queues)
- qdisc_hash_add(qdisc, false);
} else {
old = dev_graft_qdisc(qdisc->dev_queue, sch);
qdisc_refcount_inc(sch);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 6f8319b828b0..db6b7373d16c 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -857,12 +857,18 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength);
cur_key->key = key;
- if (replace) {
- list_del_init(&shkey->key_list);
- sctp_auth_shkey_release(shkey);
+ if (!replace) {
+ list_add(&cur_key->key_list, sh_keys);
+ return 0;
}
+
+ list_del_init(&shkey->key_list);
+ sctp_auth_shkey_release(shkey);
list_add(&cur_key->key_list, sh_keys);
+ if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
+ sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
+
return 0;
}
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 493fc01e5d2b..760b367644c1 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -284,10 +284,8 @@ static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
goto out;
}
- err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
- MSG_DONTWAIT);
- if (err > 0)
- err = 0;
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
out:
return err;
}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index eb3c2a34a31c..5ef86fdb1176 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -1203,7 +1203,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
if (unlikely(!af))
return NULL;
- if (af->from_addr_param(&paddr, param, peer_port, 0))
+ if (!af->from_addr_param(&paddr, param, peer_port, 0))
return NULL;
return __sctp_lookup_association(net, laddr, &paddr, transportp);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index e48dd909dee5..470dbdc27d58 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -100,8 +100,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
list_for_each_entry_safe(addr, temp,
&net->sctp.local_addr_list, list) {
if (addr->a.sa.sa_family == AF_INET6 &&
- ipv6_addr_equal(&addr->a.v6.sin6_addr,
- &ifa->addr)) {
+ ipv6_addr_equal(&addr->a.v6.sin6_addr,
+ &ifa->addr) &&
+ addr->a.v6.sin6_scope_id == ifa->idev->dev->ifindex) {
sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
found = 1;
addr->valid = 0;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 9032ce60d50e..4dfb5ea82b05 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -104,8 +104,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
if (asoc->param_flags & SPP_PMTUD_ENABLE)
sctp_assoc_sync_pmtu(asoc);
} else if (!sctp_transport_pl_enabled(tp) &&
- !sctp_transport_pmtu_check(tp)) {
- if (asoc->param_flags & SPP_PMTUD_ENABLE)
+ asoc->param_flags & SPP_PMTUD_ENABLE) {
+ if (!sctp_transport_pmtu_check(tp))
sctp_assoc_sync_pmtu(asoc);
}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 3c1fbf38f4f7..ec0f52567c16 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -398,7 +398,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
retval = SCTP_SCOPE_LINK;
} else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
- ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
+ ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
+ ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
retval = SCTP_SCOPE_PRIVATE;
} else {
retval = SCTP_SCOPE_GLOBAL;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 6c08e5048d38..b8fa8f1a7277 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1163,7 +1163,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
const struct sctp_transport *transport,
__u32 probe_size)
{
- struct sctp_sender_hb_info hbinfo;
+ struct sctp_sender_hb_info hbinfo = {};
struct sctp_chunk *retval;
retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 09a8f23ec709..32df65f68c12 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1109,12 +1109,12 @@ enum sctp_disposition sctp_sf_send_probe(struct net *net,
if (!sctp_transport_pl_enabled(transport))
return SCTP_DISPOSITION_CONSUME;
- sctp_transport_pl_send(transport);
-
- reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
- if (!reply)
- return SCTP_DISPOSITION_NOMEM;
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ if (sctp_transport_pl_send(transport)) {
+ reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
+ if (!reply)
+ return SCTP_DISPOSITION_NOMEM;
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ }
sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE,
SCTP_TRANSPORT(transport));
@@ -1274,8 +1274,7 @@ enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net,
!sctp_transport_pl_enabled(link))
return SCTP_DISPOSITION_DISCARD;
- sctp_transport_pl_recv(link);
- if (link->pl.state == SCTP_PL_COMPLETE)
+ if (sctp_transport_pl_recv(link))
return SCTP_DISPOSITION_CONSUME;
return sctp_sf_send_probe(net, ep, asoc, type, link, commands);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e64e01f61b11..6b937bfd4751 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4577,6 +4577,10 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
}
if (optlen > 0) {
+ /* Trim it to the biggest size sctp sockopt may need if necessary */
+ optlen = min_t(unsigned int, optlen,
+ PAGE_ALIGN(USHRT_MAX +
+ sizeof(__u16) * sizeof(struct sctp_reset_streams)));
kopt = memdup_sockptr(optval, optlen);
if (IS_ERR(kopt))
return PTR_ERR(kopt);
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 5f23804f21c7..a3d3ca6dd63d 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -258,16 +258,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
sctp_transport_pl_update(transport);
}
-void sctp_transport_pl_send(struct sctp_transport *t)
+bool sctp_transport_pl_send(struct sctp_transport *t)
{
- pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
- __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
-
- if (t->pl.probe_count < SCTP_MAX_PROBES) {
- t->pl.probe_count++;
- return;
- }
+ if (t->pl.probe_count < SCTP_MAX_PROBES)
+ goto out;
+ t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
+ t->pl.probe_count = 0;
if (t->pl.state == SCTP_PL_BASE) {
if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
@@ -299,14 +296,27 @@ void sctp_transport_pl_send(struct sctp_transport *t)
sctp_assoc_sync_pmtu(t->asoc);
}
}
- t->pl.probe_count = 1;
+
+out:
+ if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 &&
+ !t->pl.probe_count && t->pl.last_rtx_chunks == t->asoc->rtx_data_chunks) {
+ t->pl.raise_count++;
+ return false;
+ }
+
+ pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
+ __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
+
+ t->pl.probe_count++;
+ return true;
}
-void sctp_transport_pl_recv(struct sctp_transport *t)
+bool sctp_transport_pl_recv(struct sctp_transport *t)
{
pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
__func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
+ t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
t->pl.pmtu = t->pl.probe_size;
t->pl.probe_count = 0;
if (t->pl.state == SCTP_PL_BASE) {
@@ -323,7 +333,7 @@ void sctp_transport_pl_recv(struct sctp_transport *t)
if (!t->pl.probe_high) {
t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
SCTP_MAX_PLPMTU);
- return;
+ return false;
}
t->pl.probe_size += SCTP_PL_MIN_STEP;
if (t->pl.probe_size >= t->pl.probe_high) {
@@ -335,11 +345,13 @@ void sctp_transport_pl_recv(struct sctp_transport *t)
t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
sctp_assoc_sync_pmtu(t->asoc);
}
- } else if (t->pl.state == SCTP_PL_COMPLETE && ++t->pl.raise_count == 30) {
+ } else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) {
/* Raise probe_size again after 30 * interval in Search Complete */
t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
t->pl.probe_size += SCTP_PL_MIN_STEP;
}
+
+ return t->pl.state == SCTP_PL_COMPLETE;
}
static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 898389611ae8..c038efc23ce3 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -795,7 +795,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
reason_code = SMC_CLC_DECL_NOSRVLINK;
goto connect_abort;
}
- smc->conn.lnk = link;
+ smc_switch_link_and_count(&smc->conn, link);
}
/* create send buffer and rmb */
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index cd0d7c908b2a..c160ff50c053 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -917,8 +917,8 @@ static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
return rc;
}
-static void smc_switch_link_and_count(struct smc_connection *conn,
- struct smc_link *to_lnk)
+void smc_switch_link_and_count(struct smc_connection *conn,
+ struct smc_link *to_lnk)
{
atomic_dec(&conn->lnk->conn_cnt);
conn->lnk = to_lnk;
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 6d6fd1397c87..c043ecdca5c4 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -97,6 +97,7 @@ struct smc_link {
unsigned long *wr_tx_mask; /* bit mask of used indexes */
u32 wr_tx_cnt; /* number of WR send buffers */
wait_queue_head_t wr_tx_wait; /* wait for free WR send buf */
+ atomic_t wr_tx_refcnt; /* tx refs to link */
struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */
struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */
@@ -109,6 +110,7 @@ struct smc_link {
struct ib_reg_wr wr_reg; /* WR register memory region */
wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */
+ atomic_t wr_reg_refcnt; /* reg refs to link */
enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */
u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/
@@ -444,6 +446,8 @@ void smc_core_exit(void);
int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
u8 link_idx, struct smc_init_info *ini);
void smcr_link_clear(struct smc_link *lnk, bool log);
+void smc_switch_link_and_count(struct smc_connection *conn,
+ struct smc_link *to_lnk);
int smcr_buf_map_lgr(struct smc_link *lnk);
int smcr_buf_reg_lgr(struct smc_link *lnk);
void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type);
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 273eaf1bfe49..2e7560eba981 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -888,6 +888,7 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
if (!rc)
goto out;
out_clear_lnk:
+ lnk_new->state = SMC_LNK_INACTIVE;
smcr_link_clear(lnk_new, false);
out_reject:
smc_llc_cli_add_link_reject(qentry);
@@ -1184,6 +1185,7 @@ int smc_llc_srv_add_link(struct smc_link *link)
goto out_err;
return 0;
out_err:
+ link_new->state = SMC_LNK_INACTIVE;
smcr_link_clear(link_new, false);
return rc;
}
@@ -1286,10 +1288,8 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
del_llc->reason = 0;
smc_llc_send_message(lnk, &qentry->msg); /* response */
- if (smc_link_downing(&lnk_del->state)) {
- if (smc_switch_conns(lgr, lnk_del, false))
- smc_wr_tx_wait_no_pending_sends(lnk_del);
- }
+ if (smc_link_downing(&lnk_del->state))
+ smc_switch_conns(lgr, lnk_del, false);
smcr_link_clear(lnk_del, true);
active_links = smc_llc_active_link_count(lgr);
@@ -1805,8 +1805,6 @@ void smc_llc_link_clear(struct smc_link *link, bool log)
link->smcibdev->ibdev->name, link->ibport);
complete(&link->llc_testlink_resp);
cancel_delayed_work_sync(&link->llc_testlink_wrk);
- smc_wr_wakeup_reg_wait(link);
- smc_wr_wakeup_tx_wait(link);
}
/* register a new rtoken at the remote peer (for all links) */
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 289025cd545a..c79361dfcdfb 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -496,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
/* Wakeup sndbuf consumers from any context (IRQ or process)
* since there is more data to transmit; usable snd_wnd as max transmit
*/
-static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
{
struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
struct smc_link *link = conn->lnk;
@@ -550,6 +550,22 @@ out_unlock:
return rc;
}
+static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+{
+ struct smc_link *link = conn->lnk;
+ int rc = -ENOLINK;
+
+ if (!link)
+ return rc;
+
+ atomic_inc(&link->wr_tx_refcnt);
+ if (smc_link_usable(link))
+ rc = _smcr_tx_sndbuf_nonempty(conn);
+ if (atomic_dec_and_test(&link->wr_tx_refcnt))
+ wake_up_all(&link->wr_tx_wait);
+ return rc;
+}
+
static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
{
struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index cbc73a7e4d59..a419e9af36b9 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -322,9 +322,12 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
if (rc)
return rc;
+ atomic_inc(&link->wr_reg_refcnt);
rc = wait_event_interruptible_timeout(link->wr_reg_wait,
(link->wr_reg_state != POSTED),
SMC_WR_REG_MR_WAIT_TIME);
+ if (atomic_dec_and_test(&link->wr_reg_refcnt))
+ wake_up_all(&link->wr_reg_wait);
if (!rc) {
/* timeout - terminate link */
smcr_link_down_cond_sched(link);
@@ -566,10 +569,15 @@ void smc_wr_free_link(struct smc_link *lnk)
return;
ibdev = lnk->smcibdev->ibdev;
+ smc_wr_wakeup_reg_wait(lnk);
+ smc_wr_wakeup_tx_wait(lnk);
+
if (smc_wr_tx_wait_no_pending_sends(lnk))
memset(lnk->wr_tx_mask, 0,
BITS_TO_LONGS(SMC_WR_BUF_CNT) *
sizeof(*lnk->wr_tx_mask));
+ wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
+ wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
if (lnk->wr_rx_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
@@ -728,7 +736,9 @@ int smc_wr_create_link(struct smc_link *lnk)
memset(lnk->wr_tx_mask, 0,
BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
init_waitqueue_head(&lnk->wr_tx_wait);
+ atomic_set(&lnk->wr_tx_refcnt, 0);
init_waitqueue_head(&lnk->wr_reg_wait);
+ atomic_set(&lnk->wr_reg_refcnt, 0);
return rc;
dma_unmap:
diff --git a/net/socket.c b/net/socket.c
index bd9233da2497..0b2dad3bdf7f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -104,6 +104,7 @@
#include <linux/sockios.h>
#include <net/busy_poll.h>
#include <linux/errqueue.h>
+#include <linux/ptp_clock_kernel.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sysctl_net_busy_read __read_mostly;
@@ -873,12 +874,18 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
empty = 0;
if (shhwtstamps &&
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
- !skb_is_swtx_tstamp(skb, false_tstamp) &&
- ktime_to_timespec64_cond(shhwtstamps->hwtstamp, tss.ts + 2)) {
- empty = 0;
- if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
- !skb_is_err_queue(skb))
- put_ts_pktinfo(msg, skb);
+ !skb_is_swtx_tstamp(skb, false_tstamp)) {
+ if (sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC)
+ ptp_convert_timestamp(shhwtstamps, sk->sk_bind_phc);
+
+ if (ktime_to_timespec64_cond(shhwtstamps->hwtstamp,
+ tss.ts + 2)) {
+ empty = 0;
+
+ if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
+ !skb_is_err_queue(skb))
+ put_ts_pktinfo(msg, skb);
+ }
}
if (!empty) {
if (sock_flag(sk, SOCK_TSTAMP_NEW))
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 9488600451e8..1c8de397d6ad 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -12,7 +12,7 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
auth.o auth_null.o auth_unix.o \
svc.o svcsock.o svcauth.o svcauth_unix.o \
addr.o rpcb_clnt.o timer.o xdr.o \
- sunrpc_syms.o cache.o rpc_pipe.o \
+ sunrpc_syms.o cache.o rpc_pipe.o sysfs.o \
svc_xprt.o \
xprtmultipath.o
sunrpc-$(CONFIG_SUNRPC_DEBUG) += debugfs.o
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 6dff64374bfe..a81be45f40d9 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1275,7 +1275,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
long long ctxh;
struct gss_api_mech *gm = NULL;
time64_t expiry;
- int status = -EINVAL;
+ int status;
memset(&rsci, 0, sizeof(rsci));
/* context handle */
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 42623d6b8f0e..8b4de70e8ead 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -41,6 +41,7 @@
#include <trace/events/sunrpc.h>
#include "sunrpc.h"
+#include "sysfs.h"
#include "netns.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
@@ -327,6 +328,7 @@ err_auth:
out:
if (pipefs_sb)
rpc_put_sb_net(net);
+ rpc_sysfs_client_destroy(clnt);
rpc_clnt_debugfs_unregister(clnt);
return err;
}
@@ -410,6 +412,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
}
rpc_clnt_set_transport(clnt, xprt, timeout);
+ xprt->main = true;
xprt_iter_init(&clnt->cl_xpi, xps);
xprt_switch_put(xps);
@@ -423,6 +426,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
/* save the nodename */
rpc_clnt_set_nodename(clnt, nodename);
+ rpc_sysfs_client_setup(clnt, xps, rpc_net_ns(clnt));
err = rpc_client_register(clnt, args->authflavor, args->client_name);
if (err)
goto out_no_path;
@@ -733,6 +737,7 @@ int rpc_switch_client_transport(struct rpc_clnt *clnt,
rpc_unregister_client(clnt);
__rpc_clnt_remove_pipedir(clnt);
+ rpc_sysfs_client_destroy(clnt);
rpc_clnt_debugfs_unregister(clnt);
/*
@@ -879,6 +884,7 @@ static void rpc_free_client_work(struct work_struct *work)
* so they cannot be called in rpciod, so they are handled separately
* here.
*/
+ rpc_sysfs_client_destroy(clnt);
rpc_clnt_debugfs_unregister(clnt);
rpc_free_clid(clnt);
rpc_clnt_remove_pipedir(clnt);
@@ -2100,6 +2106,30 @@ call_connect_status(struct rpc_task *task)
case -ENOTCONN:
case -EAGAIN:
case -ETIMEDOUT:
+ if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) &&
+ (task->tk_flags & RPC_TASK_MOVEABLE) &&
+ test_bit(XPRT_REMOVE, &xprt->state)) {
+ struct rpc_xprt *saved = task->tk_xprt;
+ struct rpc_xprt_switch *xps;
+
+ rcu_read_lock();
+ xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
+ rcu_read_unlock();
+ if (xps->xps_nxprts > 1) {
+ long value;
+
+ xprt_release(task);
+ value = atomic_long_dec_return(&xprt->queuelen);
+ if (value == 0)
+ rpc_xprt_switch_remove_xprt(xps, saved);
+ xprt_put(saved);
+ task->tk_xprt = NULL;
+ task->tk_action = call_start;
+ }
+ xprt_switch_put(xps);
+ if (!task->tk_xprt)
+ return;
+ }
goto out_retry;
case -ENOBUFS:
rpc_delay(task, HZ >> 2);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 39ed0e0afe6d..c045f63d11fa 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -592,10 +592,20 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
struct rpc_task *task;
/*
+ * Service the privileged queue.
+ */
+ q = &queue->tasks[RPC_NR_PRIORITY - 1];
+ if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
+ task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
+ goto out;
+ }
+
+ /*
* Service a batch of tasks from a single owner.
*/
q = &queue->tasks[queue->priority];
- if (!list_empty(q) && --queue->nr) {
+ if (!list_empty(q) && queue->nr) {
+ queue->nr--;
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
goto out;
}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 236fadc4a439..691c0000e9ea 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -24,6 +24,7 @@
#include <linux/sunrpc/xprtsock.h>
#include "sunrpc.h"
+#include "sysfs.h"
#include "netns.h"
unsigned int sunrpc_net_id;
@@ -103,6 +104,10 @@ init_sunrpc(void)
if (err)
goto out4;
+ err = rpc_sysfs_init();
+ if (err)
+ goto out5;
+
sunrpc_debugfs_init();
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
rpc_register_sysctl();
@@ -111,6 +116,8 @@ init_sunrpc(void)
init_socket_xprt(); /* clnt sock transport */
return 0;
+out5:
+ unregister_rpc_pipefs();
out4:
unregister_pernet_subsys(&sunrpc_net_ops);
out3:
@@ -124,7 +131,10 @@ out:
static void __exit
cleanup_sunrpc(void)
{
+ rpc_sysfs_exit();
rpc_cleanup_clids();
+ xprt_cleanup_ids();
+ xprt_multipath_cleanup_ids();
rpcauth_remove_module();
cleanup_socket_xprt();
svc_cleanup_xprt_sock();
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
new file mode 100644
index 000000000000..64da3bfd28e6
--- /dev/null
+++ b/net/sunrpc/sysfs.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Anna Schumaker <Anna.Schumaker@Netapp.com>
+ */
+#include <linux/sunrpc/clnt.h>
+#include <linux/kobject.h>
+#include <linux/sunrpc/addr.h>
+#include <linux/sunrpc/xprtsock.h>
+
+#include "sysfs.h"
+
+struct xprt_addr {
+ const char *addr;
+ struct rcu_head rcu;
+};
+
+static void free_xprt_addr(struct rcu_head *head)
+{
+ struct xprt_addr *addr = container_of(head, struct xprt_addr, rcu);
+
+ kfree(addr->addr);
+ kfree(addr);
+}
+
+static struct kset *rpc_sunrpc_kset;
+static struct kobject *rpc_sunrpc_client_kobj, *rpc_sunrpc_xprt_switch_kobj;
+
+static void rpc_sysfs_object_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static const struct kobj_ns_type_operations *
+rpc_sysfs_object_child_ns_type(struct kobject *kobj)
+{
+ return &net_ns_type_operations;
+}
+
+static struct kobj_type rpc_sysfs_object_type = {
+ .release = rpc_sysfs_object_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .child_ns_type = rpc_sysfs_object_child_ns_type,
+};
+
+static struct kobject *rpc_sysfs_object_alloc(const char *name,
+ struct kset *kset,
+ struct kobject *parent)
+{
+ struct kobject *kobj;
+
+ kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
+ if (kobj) {
+ kobj->kset = kset;
+ if (kobject_init_and_add(kobj, &rpc_sysfs_object_type,
+ parent, "%s", name) == 0)
+ return kobj;
+ kobject_put(kobj);
+ }
+ return NULL;
+}
+
+static inline struct rpc_xprt *
+rpc_sysfs_xprt_kobj_get_xprt(struct kobject *kobj)
+{
+ struct rpc_sysfs_xprt *x = container_of(kobj,
+ struct rpc_sysfs_xprt, kobject);
+
+ return xprt_get(x->xprt);
+}
+
+static inline struct rpc_xprt_switch *
+rpc_sysfs_xprt_kobj_get_xprt_switch(struct kobject *kobj)
+{
+ struct rpc_sysfs_xprt *x = container_of(kobj,
+ struct rpc_sysfs_xprt, kobject);
+
+ return xprt_switch_get(x->xprt_switch);
+}
+
+static inline struct rpc_xprt_switch *
+rpc_sysfs_xprt_switch_kobj_get_xprt(struct kobject *kobj)
+{
+ struct rpc_sysfs_xprt_switch *x = container_of(kobj,
+ struct rpc_sysfs_xprt_switch, kobject);
+
+ return xprt_switch_get(x->xprt_switch);
+}
+
+static ssize_t rpc_sysfs_xprt_dstaddr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
+ ssize_t ret;
+
+ if (!xprt)
+ return 0;
+ ret = sprintf(buf, "%s\n", xprt->address_strings[RPC_DISPLAY_ADDR]);
+ xprt_put(xprt);
+ return ret + 1;
+}
+
+static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
+ ssize_t ret;
+
+ if (!xprt)
+ return 0;
+
+ ret = sprintf(buf, "last_used=%lu\ncur_cong=%lu\ncong_win=%lu\n"
+ "max_num_slots=%u\nmin_num_slots=%u\nnum_reqs=%u\n"
+ "binding_q_len=%u\nsending_q_len=%u\npending_q_len=%u\n"
+ "backlog_q_len=%u\nmain_xprt=%d\nsrc_port=%u\n"
+ "tasks_queuelen=%ld\n",
+ xprt->last_used, xprt->cong, xprt->cwnd, xprt->max_reqs,
+ xprt->min_reqs, xprt->num_reqs, xprt->binding.qlen,
+ xprt->sending.qlen, xprt->pending.qlen,
+ xprt->backlog.qlen, xprt->main,
+ (xprt->xprt_class->ident == XPRT_TRANSPORT_TCP) ?
+ get_srcport(xprt) : 0,
+ atomic_long_read(&xprt->queuelen));
+ xprt_put(xprt);
+ return ret + 1;
+}
+
+static ssize_t rpc_sysfs_xprt_state_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
+ ssize_t ret;
+ int locked, connected, connecting, close_wait, bound, binding,
+ closing, congested, cwnd_wait, write_space, offline, remove;
+
+ if (!xprt)
+ return 0;
+
+ if (!xprt->state) {
+ ret = sprintf(buf, "state=CLOSED\n");
+ } else {
+ locked = test_bit(XPRT_LOCKED, &xprt->state);
+ connected = test_bit(XPRT_CONNECTED, &xprt->state);
+ connecting = test_bit(XPRT_CONNECTING, &xprt->state);
+ close_wait = test_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ bound = test_bit(XPRT_BOUND, &xprt->state);
+ binding = test_bit(XPRT_BINDING, &xprt->state);
+ closing = test_bit(XPRT_CLOSING, &xprt->state);
+ congested = test_bit(XPRT_CONGESTED, &xprt->state);
+ cwnd_wait = test_bit(XPRT_CWND_WAIT, &xprt->state);
+ write_space = test_bit(XPRT_WRITE_SPACE, &xprt->state);
+ offline = test_bit(XPRT_OFFLINE, &xprt->state);
+ remove = test_bit(XPRT_REMOVE, &xprt->state);
+
+ ret = sprintf(buf, "state=%s %s %s %s %s %s %s %s %s %s %s %s\n",
+ locked ? "LOCKED" : "",
+ connected ? "CONNECTED" : "",
+ connecting ? "CONNECTING" : "",
+ close_wait ? "CLOSE_WAIT" : "",
+ bound ? "BOUND" : "",
+ binding ? "BOUNDING" : "",
+ closing ? "CLOSING" : "",
+ congested ? "CONGESTED" : "",
+ cwnd_wait ? "CWND_WAIT" : "",
+ write_space ? "WRITE_SPACE" : "",
+ offline ? "OFFLINE" : "",
+ remove ? "REMOVE" : "");
+ }
+
+ xprt_put(xprt);
+ return ret + 1;
+}
+
+static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_xprt_switch *xprt_switch =
+ rpc_sysfs_xprt_switch_kobj_get_xprt(kobj);
+ ssize_t ret;
+
+ if (!xprt_switch)
+ return 0;
+ ret = sprintf(buf, "num_xprts=%u\nnum_active=%u\nqueue_len=%ld\n",
+ xprt_switch->xps_nxprts, xprt_switch->xps_nactive,
+ atomic_long_read(&xprt_switch->xps_queuelen));
+ xprt_switch_put(xprt_switch);
+ return ret + 1;
+}
+
+static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
+ struct sockaddr *saddr;
+ char *dst_addr;
+ int port;
+ struct xprt_addr *saved_addr;
+ size_t buf_len;
+
+ if (!xprt)
+ return 0;
+ if (!(xprt->xprt_class->ident == XPRT_TRANSPORT_TCP ||
+ xprt->xprt_class->ident == XPRT_TRANSPORT_RDMA)) {
+ xprt_put(xprt);
+ return -EOPNOTSUPP;
+ }
+
+ if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) {
+ count = -EINTR;
+ goto out_put;
+ }
+ saddr = (struct sockaddr *)&xprt->addr;
+ port = rpc_get_port(saddr);
+
+ /* buf_len is the len until the first occurence of either
+ * '\n' or '\0'
+ */
+ buf_len = strcspn(buf, "\n");
+
+ dst_addr = kstrndup(buf, buf_len, GFP_KERNEL);
+ if (!dst_addr)
+ goto out_err;
+ saved_addr = kzalloc(sizeof(*saved_addr), GFP_KERNEL);
+ if (!saved_addr)
+ goto out_err_free;
+ saved_addr->addr =
+ rcu_dereference_raw(xprt->address_strings[RPC_DISPLAY_ADDR]);
+ rcu_assign_pointer(xprt->address_strings[RPC_DISPLAY_ADDR], dst_addr);
+ call_rcu(&saved_addr->rcu, free_xprt_addr);
+ xprt->addrlen = rpc_pton(xprt->xprt_net, buf, buf_len, saddr,
+ sizeof(*saddr));
+ rpc_set_port(saddr, port);
+
+ xprt_force_disconnect(xprt);
+out:
+ xprt_release_write(xprt, NULL);
+out_put:
+ xprt_put(xprt);
+ return count;
+out_err_free:
+ kfree(dst_addr);
+out_err:
+ count = -ENOMEM;
+ goto out;
+}
+
+static ssize_t rpc_sysfs_xprt_state_change(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
+ int offline = 0, online = 0, remove = 0;
+ struct rpc_xprt_switch *xps = rpc_sysfs_xprt_kobj_get_xprt_switch(kobj);
+
+ if (!xprt)
+ return 0;
+
+ if (!strncmp(buf, "offline", 7))
+ offline = 1;
+ else if (!strncmp(buf, "online", 6))
+ online = 1;
+ else if (!strncmp(buf, "remove", 6))
+ remove = 1;
+ else
+ return -EINVAL;
+
+ if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) {
+ count = -EINTR;
+ goto out_put;
+ }
+ if (xprt->main) {
+ count = -EINVAL;
+ goto release_tasks;
+ }
+ if (offline) {
+ set_bit(XPRT_OFFLINE, &xprt->state);
+ spin_lock(&xps->xps_lock);
+ xps->xps_nactive--;
+ spin_unlock(&xps->xps_lock);
+ } else if (online) {
+ clear_bit(XPRT_OFFLINE, &xprt->state);
+ spin_lock(&xps->xps_lock);
+ xps->xps_nactive++;
+ spin_unlock(&xps->xps_lock);
+ } else if (remove) {
+ if (test_bit(XPRT_OFFLINE, &xprt->state)) {
+ set_bit(XPRT_REMOVE, &xprt->state);
+ xprt_force_disconnect(xprt);
+ if (test_bit(XPRT_CONNECTED, &xprt->state)) {
+ if (!xprt->sending.qlen &&
+ !xprt->pending.qlen &&
+ !xprt->backlog.qlen &&
+ !atomic_long_read(&xprt->queuelen))
+ rpc_xprt_switch_remove_xprt(xps, xprt);
+ }
+ } else {
+ count = -EINVAL;
+ }
+ }
+
+release_tasks:
+ xprt_release_write(xprt, NULL);
+out_put:
+ xprt_put(xprt);
+ xprt_switch_put(xps);
+ return count;
+}
+
+int rpc_sysfs_init(void)
+{
+ rpc_sunrpc_kset = kset_create_and_add("sunrpc", NULL, kernel_kobj);
+ if (!rpc_sunrpc_kset)
+ return -ENOMEM;
+ rpc_sunrpc_client_kobj =
+ rpc_sysfs_object_alloc("rpc-clients", rpc_sunrpc_kset, NULL);
+ if (!rpc_sunrpc_client_kobj)
+ goto err_client;
+ rpc_sunrpc_xprt_switch_kobj =
+ rpc_sysfs_object_alloc("xprt-switches", rpc_sunrpc_kset, NULL);
+ if (!rpc_sunrpc_xprt_switch_kobj)
+ goto err_switch;
+ return 0;
+err_switch:
+ kobject_put(rpc_sunrpc_client_kobj);
+ rpc_sunrpc_client_kobj = NULL;
+err_client:
+ kset_unregister(rpc_sunrpc_kset);
+ rpc_sunrpc_kset = NULL;
+ return -ENOMEM;
+}
+
+static void rpc_sysfs_client_release(struct kobject *kobj)
+{
+ struct rpc_sysfs_client *c;
+
+ c = container_of(kobj, struct rpc_sysfs_client, kobject);
+ kfree(c);
+}
+
+static void rpc_sysfs_xprt_switch_release(struct kobject *kobj)
+{
+ struct rpc_sysfs_xprt_switch *xprt_switch;
+
+ xprt_switch = container_of(kobj, struct rpc_sysfs_xprt_switch, kobject);
+ kfree(xprt_switch);
+}
+
+static void rpc_sysfs_xprt_release(struct kobject *kobj)
+{
+ struct rpc_sysfs_xprt *xprt;
+
+ xprt = container_of(kobj, struct rpc_sysfs_xprt, kobject);
+ kfree(xprt);
+}
+
+static const void *rpc_sysfs_client_namespace(struct kobject *kobj)
+{
+ return container_of(kobj, struct rpc_sysfs_client, kobject)->net;
+}
+
+static const void *rpc_sysfs_xprt_switch_namespace(struct kobject *kobj)
+{
+ return container_of(kobj, struct rpc_sysfs_xprt_switch, kobject)->net;
+}
+
+static const void *rpc_sysfs_xprt_namespace(struct kobject *kobj)
+{
+ return container_of(kobj, struct rpc_sysfs_xprt,
+ kobject)->xprt->xprt_net;
+}
+
+static struct kobj_attribute rpc_sysfs_xprt_dstaddr = __ATTR(dstaddr,
+ 0644, rpc_sysfs_xprt_dstaddr_show, rpc_sysfs_xprt_dstaddr_store);
+
+static struct kobj_attribute rpc_sysfs_xprt_info = __ATTR(xprt_info,
+ 0444, rpc_sysfs_xprt_info_show, NULL);
+
+static struct kobj_attribute rpc_sysfs_xprt_change_state = __ATTR(xprt_state,
+ 0644, rpc_sysfs_xprt_state_show, rpc_sysfs_xprt_state_change);
+
+static struct attribute *rpc_sysfs_xprt_attrs[] = {
+ &rpc_sysfs_xprt_dstaddr.attr,
+ &rpc_sysfs_xprt_info.attr,
+ &rpc_sysfs_xprt_change_state.attr,
+ NULL,
+};
+
+static struct kobj_attribute rpc_sysfs_xprt_switch_info =
+ __ATTR(xprt_switch_info, 0444, rpc_sysfs_xprt_switch_info_show, NULL);
+
+static struct attribute *rpc_sysfs_xprt_switch_attrs[] = {
+ &rpc_sysfs_xprt_switch_info.attr,
+ NULL,
+};
+
+static struct kobj_type rpc_sysfs_client_type = {
+ .release = rpc_sysfs_client_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .namespace = rpc_sysfs_client_namespace,
+};
+
+static struct kobj_type rpc_sysfs_xprt_switch_type = {
+ .release = rpc_sysfs_xprt_switch_release,
+ .default_attrs = rpc_sysfs_xprt_switch_attrs,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .namespace = rpc_sysfs_xprt_switch_namespace,
+};
+
+static struct kobj_type rpc_sysfs_xprt_type = {
+ .release = rpc_sysfs_xprt_release,
+ .default_attrs = rpc_sysfs_xprt_attrs,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .namespace = rpc_sysfs_xprt_namespace,
+};
+
+void rpc_sysfs_exit(void)
+{
+ kobject_put(rpc_sunrpc_client_kobj);
+ kobject_put(rpc_sunrpc_xprt_switch_kobj);
+ kset_unregister(rpc_sunrpc_kset);
+}
+
+static struct rpc_sysfs_client *rpc_sysfs_client_alloc(struct kobject *parent,
+ struct net *net,
+ int clid)
+{
+ struct rpc_sysfs_client *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (p) {
+ p->net = net;
+ p->kobject.kset = rpc_sunrpc_kset;
+ if (kobject_init_and_add(&p->kobject, &rpc_sysfs_client_type,
+ parent, "clnt-%d", clid) == 0)
+ return p;
+ kobject_put(&p->kobject);
+ }
+ return NULL;
+}
+
+static struct rpc_sysfs_xprt_switch *
+rpc_sysfs_xprt_switch_alloc(struct kobject *parent,
+ struct rpc_xprt_switch *xprt_switch,
+ struct net *net,
+ gfp_t gfp_flags)
+{
+ struct rpc_sysfs_xprt_switch *p;
+
+ p = kzalloc(sizeof(*p), gfp_flags);
+ if (p) {
+ p->net = net;
+ p->kobject.kset = rpc_sunrpc_kset;
+ if (kobject_init_and_add(&p->kobject,
+ &rpc_sysfs_xprt_switch_type,
+ parent, "switch-%d",
+ xprt_switch->xps_id) == 0)
+ return p;
+ kobject_put(&p->kobject);
+ }
+ return NULL;
+}
+
+static struct rpc_sysfs_xprt *rpc_sysfs_xprt_alloc(struct kobject *parent,
+ struct rpc_xprt *xprt,
+ gfp_t gfp_flags)
+{
+ struct rpc_sysfs_xprt *p;
+
+ p = kzalloc(sizeof(*p), gfp_flags);
+ if (!p)
+ goto out;
+ p->kobject.kset = rpc_sunrpc_kset;
+ if (kobject_init_and_add(&p->kobject, &rpc_sysfs_xprt_type,
+ parent, "xprt-%d-%s", xprt->id,
+ xprt->address_strings[RPC_DISPLAY_PROTO]) == 0)
+ return p;
+ kobject_put(&p->kobject);
+out:
+ return NULL;
+}
+
+void rpc_sysfs_client_setup(struct rpc_clnt *clnt,
+ struct rpc_xprt_switch *xprt_switch,
+ struct net *net)
+{
+ struct rpc_sysfs_client *rpc_client;
+
+ rpc_client = rpc_sysfs_client_alloc(rpc_sunrpc_client_kobj,
+ net, clnt->cl_clid);
+ if (rpc_client) {
+ char name[] = "switch";
+ struct rpc_sysfs_xprt_switch *xswitch =
+ (struct rpc_sysfs_xprt_switch *)xprt_switch->xps_sysfs;
+ int ret;
+
+ clnt->cl_sysfs = rpc_client;
+ rpc_client->clnt = clnt;
+ rpc_client->xprt_switch = xprt_switch;
+ kobject_uevent(&rpc_client->kobject, KOBJ_ADD);
+ ret = sysfs_create_link_nowarn(&rpc_client->kobject,
+ &xswitch->kobject, name);
+ if (ret)
+ pr_warn("can't create link to %s in sysfs (%d)\n",
+ name, ret);
+ }
+}
+
+void rpc_sysfs_xprt_switch_setup(struct rpc_xprt_switch *xprt_switch,
+ struct rpc_xprt *xprt,
+ gfp_t gfp_flags)
+{
+ struct rpc_sysfs_xprt_switch *rpc_xprt_switch;
+ struct net *net;
+
+ if (xprt_switch->xps_net)
+ net = xprt_switch->xps_net;
+ else
+ net = xprt->xprt_net;
+ rpc_xprt_switch =
+ rpc_sysfs_xprt_switch_alloc(rpc_sunrpc_xprt_switch_kobj,
+ xprt_switch, net, gfp_flags);
+ if (rpc_xprt_switch) {
+ xprt_switch->xps_sysfs = rpc_xprt_switch;
+ rpc_xprt_switch->xprt_switch = xprt_switch;
+ rpc_xprt_switch->xprt = xprt;
+ kobject_uevent(&rpc_xprt_switch->kobject, KOBJ_ADD);
+ }
+}
+
+void rpc_sysfs_xprt_setup(struct rpc_xprt_switch *xprt_switch,
+ struct rpc_xprt *xprt,
+ gfp_t gfp_flags)
+{
+ struct rpc_sysfs_xprt *rpc_xprt;
+ struct rpc_sysfs_xprt_switch *switch_obj =
+ (struct rpc_sysfs_xprt_switch *)xprt_switch->xps_sysfs;
+
+ rpc_xprt = rpc_sysfs_xprt_alloc(&switch_obj->kobject, xprt, gfp_flags);
+ if (rpc_xprt) {
+ xprt->xprt_sysfs = rpc_xprt;
+ rpc_xprt->xprt = xprt;
+ rpc_xprt->xprt_switch = xprt_switch;
+ kobject_uevent(&rpc_xprt->kobject, KOBJ_ADD);
+ }
+}
+
+void rpc_sysfs_client_destroy(struct rpc_clnt *clnt)
+{
+ struct rpc_sysfs_client *rpc_client = clnt->cl_sysfs;
+
+ if (rpc_client) {
+ char name[] = "switch";
+
+ sysfs_remove_link(&rpc_client->kobject, name);
+ kobject_uevent(&rpc_client->kobject, KOBJ_REMOVE);
+ kobject_del(&rpc_client->kobject);
+ kobject_put(&rpc_client->kobject);
+ clnt->cl_sysfs = NULL;
+ }
+}
+
+void rpc_sysfs_xprt_switch_destroy(struct rpc_xprt_switch *xprt_switch)
+{
+ struct rpc_sysfs_xprt_switch *rpc_xprt_switch = xprt_switch->xps_sysfs;
+
+ if (rpc_xprt_switch) {
+ kobject_uevent(&rpc_xprt_switch->kobject, KOBJ_REMOVE);
+ kobject_del(&rpc_xprt_switch->kobject);
+ kobject_put(&rpc_xprt_switch->kobject);
+ xprt_switch->xps_sysfs = NULL;
+ }
+}
+
+void rpc_sysfs_xprt_destroy(struct rpc_xprt *xprt)
+{
+ struct rpc_sysfs_xprt *rpc_xprt = xprt->xprt_sysfs;
+
+ if (rpc_xprt) {
+ kobject_uevent(&rpc_xprt->kobject, KOBJ_REMOVE);
+ kobject_del(&rpc_xprt->kobject);
+ kobject_put(&rpc_xprt->kobject);
+ xprt->xprt_sysfs = NULL;
+ }
+}
diff --git a/net/sunrpc/sysfs.h b/net/sunrpc/sysfs.h
new file mode 100644
index 000000000000..6620cebd1037
--- /dev/null
+++ b/net/sunrpc/sysfs.h
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Anna Schumaker <Anna.Schumaker@Netapp.com>
+ */
+#ifndef __SUNRPC_SYSFS_H
+#define __SUNRPC_SYSFS_H
+
+struct rpc_sysfs_client {
+ struct kobject kobject;
+ struct net *net;
+ struct rpc_clnt *clnt;
+ struct rpc_xprt_switch *xprt_switch;
+};
+
+struct rpc_sysfs_xprt_switch {
+ struct kobject kobject;
+ struct net *net;
+ struct rpc_xprt_switch *xprt_switch;
+ struct rpc_xprt *xprt;
+};
+
+struct rpc_sysfs_xprt {
+ struct kobject kobject;
+ struct rpc_xprt *xprt;
+ struct rpc_xprt_switch *xprt_switch;
+};
+
+int rpc_sysfs_init(void);
+void rpc_sysfs_exit(void);
+
+void rpc_sysfs_client_setup(struct rpc_clnt *clnt,
+ struct rpc_xprt_switch *xprt_switch,
+ struct net *net);
+void rpc_sysfs_client_destroy(struct rpc_clnt *clnt);
+void rpc_sysfs_xprt_switch_setup(struct rpc_xprt_switch *xprt_switch,
+ struct rpc_xprt *xprt, gfp_t gfp_flags);
+void rpc_sysfs_xprt_switch_destroy(struct rpc_xprt_switch *xprt);
+void rpc_sysfs_xprt_setup(struct rpc_xprt_switch *xprt_switch,
+ struct rpc_xprt *xprt, gfp_t gfp_flags);
+void rpc_sysfs_xprt_destroy(struct rpc_xprt *xprt);
+
+#endif
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 3964ff74ee51..ca10ba2626f2 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1230,10 +1230,9 @@ static unsigned int xdr_set_page_base(struct xdr_stream *xdr,
void *kaddr;
maxlen = xdr->buf->page_len;
- if (base >= maxlen) {
- base = maxlen;
- maxlen = 0;
- } else
+ if (base >= maxlen)
+ return 0;
+ else
maxlen -= base;
if (len > maxlen)
len = maxlen;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3509a7f139b9..fb6db09725c7 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -55,6 +55,7 @@
#include <trace/events/sunrpc.h>
#include "sunrpc.h"
+#include "sysfs.h"
/*
* Local variables
@@ -443,7 +444,7 @@ void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
}
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
-static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
+void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
{
if (xprt->snd_task != task)
return;
@@ -1746,6 +1747,30 @@ static void xprt_free_all_slots(struct rpc_xprt *xprt)
}
}
+static DEFINE_IDA(rpc_xprt_ids);
+
+void xprt_cleanup_ids(void)
+{
+ ida_destroy(&rpc_xprt_ids);
+}
+
+static int xprt_alloc_id(struct rpc_xprt *xprt)
+{
+ int id;
+
+ id = ida_simple_get(&rpc_xprt_ids, 0, 0, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ xprt->id = id;
+ return 0;
+}
+
+static void xprt_free_id(struct rpc_xprt *xprt)
+{
+ ida_simple_remove(&rpc_xprt_ids, xprt->id);
+}
+
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
unsigned int num_prealloc,
unsigned int max_alloc)
@@ -1758,6 +1783,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
if (xprt == NULL)
goto out;
+ xprt_alloc_id(xprt);
xprt_init(xprt, net);
for (i = 0; i < num_prealloc; i++) {
@@ -1786,6 +1812,8 @@ void xprt_free(struct rpc_xprt *xprt)
{
put_net(xprt->xprt_net);
xprt_free_all_slots(xprt);
+ xprt_free_id(xprt);
+ rpc_sysfs_xprt_destroy(xprt);
kfree_rcu(xprt, rcu);
}
EXPORT_SYMBOL_GPL(xprt_free);
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 1b4073131c6f..c60820e45082 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -19,6 +19,8 @@
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/xprtmultipath.h>
+#include "sysfs.h"
+
typedef struct rpc_xprt *(*xprt_switch_find_xprt_t)(struct rpc_xprt_switch *xps,
const struct rpc_xprt *cur);
@@ -55,6 +57,7 @@ void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
if (xps->xps_net == xprt->xprt_net || xps->xps_net == NULL)
xprt_switch_add_xprt_locked(xps, xprt);
spin_unlock(&xps->xps_lock);
+ rpc_sysfs_xprt_setup(xps, xprt, GFP_KERNEL);
}
static void xprt_switch_remove_xprt_locked(struct rpc_xprt_switch *xps,
@@ -62,7 +65,8 @@ static void xprt_switch_remove_xprt_locked(struct rpc_xprt_switch *xps,
{
if (unlikely(xprt == NULL))
return;
- xps->xps_nactive--;
+ if (!test_bit(XPRT_OFFLINE, &xprt->state))
+ xps->xps_nactive--;
xps->xps_nxprts--;
if (xps->xps_nxprts == 0)
xps->xps_net = NULL;
@@ -86,6 +90,30 @@ void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
xprt_put(xprt);
}
+static DEFINE_IDA(rpc_xprtswitch_ids);
+
+void xprt_multipath_cleanup_ids(void)
+{
+ ida_destroy(&rpc_xprtswitch_ids);
+}
+
+static int xprt_switch_alloc_id(struct rpc_xprt_switch *xps, gfp_t gfp_flags)
+{
+ int id;
+
+ id = ida_simple_get(&rpc_xprtswitch_ids, 0, 0, gfp_flags);
+ if (id < 0)
+ return id;
+
+ xps->xps_id = id;
+ return 0;
+}
+
+static void xprt_switch_free_id(struct rpc_xprt_switch *xps)
+{
+ ida_simple_remove(&rpc_xprtswitch_ids, xps->xps_id);
+}
+
/**
* xprt_switch_alloc - Allocate a new struct rpc_xprt_switch
* @xprt: pointer to struct rpc_xprt
@@ -103,12 +131,15 @@ struct rpc_xprt_switch *xprt_switch_alloc(struct rpc_xprt *xprt,
if (xps != NULL) {
spin_lock_init(&xps->xps_lock);
kref_init(&xps->xps_kref);
+ xprt_switch_alloc_id(xps, gfp_flags);
xps->xps_nxprts = xps->xps_nactive = 0;
atomic_long_set(&xps->xps_queuelen, 0);
xps->xps_net = NULL;
INIT_LIST_HEAD(&xps->xps_xprt_list);
xps->xps_iter_ops = &rpc_xprt_iter_singular;
+ rpc_sysfs_xprt_switch_setup(xps, xprt, gfp_flags);
xprt_switch_add_xprt_locked(xps, xprt);
+ rpc_sysfs_xprt_setup(xps, xprt, gfp_flags);
}
return xps;
@@ -136,6 +167,8 @@ static void xprt_switch_free(struct kref *kref)
struct rpc_xprt_switch, xps_kref);
xprt_switch_free_entries(xps);
+ rpc_sysfs_xprt_switch_destroy(xps);
+ xprt_switch_free_id(xps);
kfree_rcu(xps, xps_rcu);
}
@@ -198,7 +231,8 @@ void xprt_iter_default_rewind(struct rpc_xprt_iter *xpi)
static
bool xprt_is_active(const struct rpc_xprt *xprt)
{
- return kref_read(&xprt->kref) != 0;
+ return (kref_read(&xprt->kref) != 0 &&
+ !test_bit(XPRT_OFFLINE, &xprt->state));
}
static
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 5238bc829235..1e651447dc4e 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -483,7 +483,7 @@ out_overflow:
* @iov: kvec to write
*
* Returns:
- * On succes, returns zero
+ * On success, returns zero
* %-E2BIG if the client-provided Write chunk is too small
* %-ENOMEM if a resource has been exhausted
* %-EIO if an rdma-rw error occurred
@@ -504,7 +504,7 @@ static int svc_rdma_iov_write(struct svc_rdma_write_info *info,
* @length: number of bytes to write
*
* Returns:
- * On succes, returns zero
+ * On success, returns zero
* %-E2BIG if the client-provided Write chunk is too small
* %-ENOMEM if a resource has been exhausted
* %-EIO if an rdma-rw error occurred
@@ -526,7 +526,7 @@ static int svc_rdma_pages_write(struct svc_rdma_write_info *info,
* @data: pointer to write arguments
*
* Returns:
- * On succes, returns zero
+ * On success, returns zero
* %-E2BIG if the client-provided Write chunk is too small
* %-ENOMEM if a resource has been exhausted
* %-EIO if an rdma-rw error occurred
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 19a49d26b1e4..9c2ffc67c0fd 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -73,6 +73,7 @@ unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRWR;
int xprt_rdma_pad_optimize;
+static struct xprt_class xprt_rdma;
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
@@ -349,6 +350,7 @@ xprt_setup_rdma(struct xprt_create *args)
/* Ensure xprt->addr holds valid server TCP (not RDMA)
* address, for any side protocols which peek at it */
xprt->prot = IPPROTO_TCP;
+ xprt->xprt_class = &xprt_rdma;
xprt->addrlen = args->addrlen;
memcpy(&xprt->addr, sap, xprt->addrlen);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 316d04945587..e573dcecdd66 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -91,6 +91,11 @@ static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
static struct ctl_table_header *sunrpc_table_header;
+static struct xprt_class xs_local_transport;
+static struct xprt_class xs_udp_transport;
+static struct xprt_class xs_tcp_transport;
+static struct xprt_class xs_bc_tcp_transport;
+
/*
* FIXME: changing the UDP slot table size should also resize the UDP
* socket buffers for existing UDP transports
@@ -1648,6 +1653,13 @@ static int xs_get_srcport(struct sock_xprt *transport)
return port;
}
+unsigned short get_srcport(struct rpc_xprt *xprt)
+{
+ struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
+ return sock->srcport;
+}
+EXPORT_SYMBOL(get_srcport);
+
static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
{
if (transport->srcport != 0)
@@ -1689,7 +1701,8 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
err = kernel_bind(sock, (struct sockaddr *)&myaddr,
transport->xprt.addrlen);
if (err == 0) {
- transport->srcport = port;
+ if (transport->xprt.reuseport)
+ transport->srcport = port;
break;
}
last = port;
@@ -2779,6 +2792,7 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
transport = container_of(xprt, struct sock_xprt, xprt);
xprt->prot = 0;
+ xprt->xprt_class = &xs_local_transport;
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
xprt->bind_timeout = XS_BIND_TO;
@@ -2848,6 +2862,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
transport = container_of(xprt, struct sock_xprt, xprt);
xprt->prot = IPPROTO_UDP;
+ xprt->xprt_class = &xs_udp_transport;
/* XXX: header size can vary due to auth type, IPv6, etc. */
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
@@ -2928,6 +2943,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
transport = container_of(xprt, struct sock_xprt, xprt);
xprt->prot = IPPROTO_TCP;
+ xprt->xprt_class = &xs_tcp_transport;
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
xprt->bind_timeout = XS_BIND_TO;
@@ -3001,6 +3017,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
transport = container_of(xprt, struct sock_xprt, xprt);
xprt->prot = IPPROTO_TCP;
+ xprt->xprt_class = &xs_bc_tcp_transport;
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
xprt->timeout = &xs_tcp_default_timeout;
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index e5c43d4d5a75..c9391d38de85 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -898,16 +898,10 @@ static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
if (unlikely(!aead))
return -ENOKEY;
- /* Cow skb data if needed */
- if (likely(!skb_cloned(skb) &&
- (!skb_is_nonlinear(skb) || !skb_has_frag_list(skb)))) {
- nsg = 1 + skb_shinfo(skb)->nr_frags;
- } else {
- nsg = skb_cow_data(skb, 0, &unused);
- if (unlikely(nsg < 0)) {
- pr_err("RX: skb_cow_data() returned %d\n", nsg);
- return nsg;
- }
+ nsg = skb_cow_data(skb, 0, &unused);
+ if (unlikely(nsg < 0)) {
+ pr_err("RX: skb_cow_data() returned %d\n", nsg);
+ return nsg;
}
/* Allocate memory for the AEAD operation */
diff --git a/net/tipc/link.c b/net/tipc/link.c
index cf586840caeb..1b7a487c8841 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -913,7 +913,7 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
dnode, l->addr, dport, 0, 0);
if (!skb)
- return -ENOMEM;
+ return -ENOBUFS;
msg_set_dest_droppable(buf_msg(skb), true);
TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
skb_queue_tail(&l->wakeupq, skb);
@@ -1031,7 +1031,7 @@ void tipc_link_reset(struct tipc_link *l)
*
* Consumes the buffer chain.
* Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
- * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS or -ENOMEM
+ * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
*/
int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
struct sk_buff_head *xmitq)
@@ -1089,7 +1089,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
if (!_skb) {
kfree_skb(skb);
__skb_queue_purge(list);
- return -ENOMEM;
+ return -ENOBUFS;
}
__skb_queue_tail(transmq, skb);
tipc_link_set_skb_retransmit_time(skb, l);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 34a97ea36cc8..8754bd885169 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -158,6 +158,7 @@ static void tipc_sk_remove(struct tipc_sock *tsk);
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
+static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
@@ -1515,8 +1516,13 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
rc = 0;
}
- if (unlikely(syn && !rc))
+ if (unlikely(syn && !rc)) {
tipc_set_sk_state(sk, TIPC_CONNECTING);
+ if (dlen && timeout) {
+ timeout = msecs_to_jiffies(timeout);
+ tipc_wait_for_connect(sock, &timeout);
+ }
+ }
return rc ? rc : dlen;
}
@@ -1564,7 +1570,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
return -EMSGSIZE;
/* Handle implicit connection setup */
- if (unlikely(dest)) {
+ if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
rc = __tipc_sendmsg(sock, m, dlen);
if (dlen && dlen == rc) {
tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
@@ -2646,7 +2652,7 @@ static int tipc_listen(struct socket *sock, int len)
static int tipc_wait_for_accept(struct socket *sock, long timeo)
{
struct sock *sk = sock->sk;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err;
/* True wake-one mechanism for incoming connections: only
@@ -2655,12 +2661,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
* anymore, the common case will execute the loop only once.
*/
for (;;) {
- prepare_to_wait_exclusive(sk_sleep(sk), &wait,
- TASK_INTERRUPTIBLE);
if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
+ add_wait_queue(sk_sleep(sk), &wait);
release_sock(sk);
- timeo = schedule_timeout(timeo);
+ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
lock_sock(sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
}
err = 0;
if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -2672,7 +2678,6 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
if (signal_pending(current))
break;
}
- finish_wait(sk_sleep(sk), &wait);
return err;
}
@@ -2689,9 +2694,10 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
bool kern)
{
struct sock *new_sk, *sk = sock->sk;
- struct sk_buff *buf;
struct tipc_sock *new_tsock;
+ struct msghdr m = {NULL,};
struct tipc_msg *msg;
+ struct sk_buff *buf;
long timeo;
int res;
@@ -2737,19 +2743,17 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
}
/*
- * Respond to 'SYN-' by discarding it & returning 'ACK'-.
- * Respond to 'SYN+' by queuing it on new socket.
+ * Respond to 'SYN-' by discarding it & returning 'ACK'.
+ * Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
*/
if (!msg_data_sz(msg)) {
- struct msghdr m = {NULL,};
-
tsk_advance_rx_queue(sk);
- __tipc_sendstream(new_sock, &m, 0);
} else {
__skb_dequeue(&sk->sk_receive_queue);
__skb_queue_head(&new_sk->sk_receive_queue, buf);
skb_set_owner_r(buf, new_sk);
}
+ __tipc_sendstream(new_sock, &m, 0);
release_sock(new_sk);
exit:
release_sock(sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 23c92ad15c61..ba7ced947e51 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1526,6 +1526,53 @@ out:
return err;
}
+static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+
+ /*
+ * Garbage collection of unix sockets starts by selecting a set of
+ * candidate sockets which have reference only from being in flight
+ * (total_refs == inflight_refs). This condition is checked once during
+ * the candidate collection phase, and candidates are marked as such, so
+ * that non-candidates can later be ignored. While inflight_refs is
+ * protected by unix_gc_lock, total_refs (file count) is not, hence this
+ * is an instantaneous decision.
+ *
+ * Once a candidate, however, the socket must not be reinstalled into a
+ * file descriptor while the garbage collection is in progress.
+ *
+ * If the above conditions are met, then the directed graph of
+ * candidates (*) does not change while unix_gc_lock is held.
+ *
+ * Any operations that changes the file count through file descriptors
+ * (dup, close, sendmsg) does not change the graph since candidates are
+ * not installed in fds.
+ *
+ * Dequeing a candidate via recvmsg would install it into an fd, but
+ * that takes unix_gc_lock to decrement the inflight count, so it's
+ * serialized with garbage collection.
+ *
+ * MSG_PEEK is special in that it does not change the inflight count,
+ * yet does install the socket into an fd. The following lock/unlock
+ * pair is to ensure serialization with garbage collection. It must be
+ * done between incrementing the file count and installing the file into
+ * an fd.
+ *
+ * If garbage collection starts after the barrier provided by the
+ * lock/unlock, then it will see the elevated refcount and not mark this
+ * as a candidate. If a garbage collection is already in progress
+ * before the file count was incremented, then the lock/unlock pair will
+ * ensure that garbage collection is finished before progressing to
+ * installing the fd.
+ *
+ * (*) A -> B where B is on the queue of A or B is on the queue of C
+ * which is on the queue of listening socket A.
+ */
+ spin_lock(&unix_gc_lock);
+ spin_unlock(&unix_gc_lock);
+}
+
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
{
int err = 0;
@@ -2175,7 +2222,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
sk_peek_offset_fwd(sk, size);
if (UNIXCB(skb).fp)
- scm.fp = scm_fp_dup(UNIXCB(skb).fp);
+ unix_peek_fds(&scm, skb);
}
err = (flags & MSG_TRUNC) ? skb->len - skip : size;
@@ -2418,7 +2465,7 @@ unlock:
/* It is questionable, see note in unix_dgram_recvmsg.
*/
if (UNIXCB(skb).fp)
- scm.fp = scm_fp_dup(UNIXCB(skb).fp);
+ unix_peek_fds(&scm, skb);
sk_peek_offset_fwd(sk, chunk);
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 9ff64f9df1f3..7e7d7f45685a 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -295,10 +295,8 @@ again:
goto again;
}
- err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
- MSG_DONTWAIT);
- if (err > 0)
- err = 0;
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
out:
if (sk)
sock_put(sk);
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index e0c2c992ad9c..4f7c99dfd16c 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -357,11 +357,14 @@ static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
static void virtio_vsock_reset_sock(struct sock *sk)
{
- lock_sock(sk);
+ /* vmci_transport.c doesn't take sk_lock here either. At least we're
+ * under vsock_table_lock so the sock cannot disappear while we're
+ * executing.
+ */
+
sk->sk_state = TCP_CLOSE;
sk->sk_err = ECONNRESET;
sk_error_report(sk);
- release_sock(sk);
}
static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 169ba8b72a63..081e7ae93cb1 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -1079,6 +1079,9 @@ virtio_transport_recv_connected(struct sock *sk,
virtio_transport_recv_enqueue(vsk, pkt);
sk->sk_data_ready(sk);
return err;
+ case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
+ virtio_transport_send_credit_update(vsk);
+ break;
case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
sk->sk_write_space(sk);
break;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 50eb405b0690..16c88beea48b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2351,7 +2351,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
goto nla_put_failure;
for (band = state->band_start;
- band < NUM_NL80211_BANDS; band++) {
+ band < (state->split ?
+ NUM_NL80211_BANDS :
+ NL80211_BAND_60GHZ + 1);
+ band++) {
struct ieee80211_supported_band *sband;
/* omit higher bands for ancient software */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index f03c7ac8e184..7897b1478c3c 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1754,16 +1754,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
* be grouped with this beacon for updates ...
*/
if (!cfg80211_combine_bsses(rdev, new)) {
- kfree(new);
+ bss_ref_put(rdev, new);
goto drop;
}
}
if (rdev->bss_entries >= bss_entries_limit &&
!cfg80211_bss_expire_oldest(rdev)) {
- if (!list_empty(&new->hidden_list))
- list_del(&new->hidden_list);
- kfree(new);
+ bss_ref_put(rdev, new);
goto drop;
}
diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
index a20aec9d7393..2bf269390163 100644
--- a/net/xfrm/xfrm_compat.c
+++ b/net/xfrm/xfrm_compat.c
@@ -298,8 +298,16 @@ static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src)
len = nlmsg_attrlen(nlh_src, xfrm_msg_min[type]);
nla_for_each_attr(nla, attrs, len, remaining) {
- int err = xfrm_xlate64_attr(dst, nla);
+ int err;
+ switch (type) {
+ case XFRM_MSG_NEWSPDINFO:
+ err = xfrm_nla_cpy(dst, nla, nla_len(nla));
+ break;
+ default:
+ err = xfrm_xlate64_attr(dst, nla);
+ break;
+ }
if (err)
return err;
}
@@ -341,7 +349,8 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src
/* Calculates len of translated 64-bit message. */
static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src,
- struct nlattr *attrs[XFRMA_MAX+1])
+ struct nlattr *attrs[XFRMA_MAX + 1],
+ int maxtype)
{
size_t len = nlmsg_len(src);
@@ -358,10 +367,20 @@ static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src,
case XFRM_MSG_POLEXPIRE:
len += 8;
break;
+ case XFRM_MSG_NEWSPDINFO:
+ /* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */
+ return len;
default:
break;
}
+ /* Unexpected for anything, but XFRM_MSG_NEWSPDINFO, please
+ * correct both 64=>32-bit and 32=>64-bit translators to copy
+ * new attributes.
+ */
+ if (WARN_ON_ONCE(maxtype))
+ return len;
+
if (attrs[XFRMA_SA])
len += 4;
if (attrs[XFRMA_POLICY])
@@ -440,7 +459,8 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src,
struct nlattr *attrs[XFRMA_MAX+1],
- size_t size, u8 type, struct netlink_ext_ack *extack)
+ size_t size, u8 type, int maxtype,
+ struct netlink_ext_ack *extack)
{
size_t pos;
int i;
@@ -520,6 +540,25 @@ static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src,
}
pos = dst->nlmsg_len;
+ if (maxtype) {
+ /* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */
+ WARN_ON_ONCE(src->nlmsg_type != XFRM_MSG_NEWSPDINFO);
+
+ for (i = 1; i <= maxtype; i++) {
+ int err;
+
+ if (!attrs[i])
+ continue;
+
+ /* just copy - no need for translation */
+ err = xfrm_attr_cpy32(dst, &pos, attrs[i], size,
+ nla_len(attrs[i]), nla_len(attrs[i]));
+ if (err)
+ return err;
+ }
+ return 0;
+ }
+
for (i = 1; i < XFRMA_MAX + 1; i++) {
int err;
@@ -564,7 +603,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
if (err < 0)
return ERR_PTR(err);
- len = xfrm_user_rcv_calculate_len64(h32, attrs);
+ len = xfrm_user_rcv_calculate_len64(h32, attrs, maxtype);
/* The message doesn't need translation */
if (len == nlmsg_len(h32))
return NULL;
@@ -574,7 +613,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
if (!h64)
return ERR_PTR(-ENOMEM);
- err = xfrm_xlate32(h64, h32, attrs, len, type, extack);
+ err = xfrm_xlate32(h64, h32, attrs, len, type, maxtype, extack);
if (err < 0) {
kvfree(h64);
return ERR_PTR(err);
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 2e8afe078d61..cb40ff0ff28d 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -241,7 +241,7 @@ static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
break;
}
- WARN_ON(!pos);
+ WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list));
if (--pos->users)
return;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 827d84255021..7f881f5a5897 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -155,7 +155,6 @@ static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
__read_mostly;
static struct kmem_cache *xfrm_dst_cache __ro_after_init;
-static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation;
static struct rhashtable xfrm_policy_inexact_table;
static const struct rhashtable_params xfrm_pol_inexact_params;
@@ -585,7 +584,7 @@ static void xfrm_bydst_resize(struct net *net, int dir)
return;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
- write_seqcount_begin(&xfrm_policy_hash_generation);
+ write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
lockdep_is_held(&net->xfrm.xfrm_policy_lock));
@@ -596,7 +595,7 @@ static void xfrm_bydst_resize(struct net *net, int dir)
rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
net->xfrm.policy_bydst[dir].hmask = nhashmask;
- write_seqcount_end(&xfrm_policy_hash_generation);
+ write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
synchronize_rcu();
@@ -1245,7 +1244,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
- write_seqcount_begin(&xfrm_policy_hash_generation);
+ write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
/* make sure that we can insert the indirect policies again before
* we start with destructive action.
@@ -1354,7 +1353,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
out_unlock:
__xfrm_policy_inexact_flush(net);
- write_seqcount_end(&xfrm_policy_hash_generation);
+ write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
mutex_unlock(&hash_resize_mutex);
@@ -2091,15 +2090,12 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
if (unlikely(!daddr || !saddr))
return NULL;
- retry:
- sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
rcu_read_lock();
-
- chain = policy_hash_direct(net, daddr, saddr, family, dir);
- if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
- rcu_read_unlock();
- goto retry;
- }
+ retry:
+ do {
+ sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
+ chain = policy_hash_direct(net, daddr, saddr, family, dir);
+ } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
ret = NULL;
hlist_for_each_entry_rcu(pol, chain, bydst) {
@@ -2130,15 +2126,11 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
}
skip_inexact:
- if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
- rcu_read_unlock();
+ if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
goto retry;
- }
- if (ret && !xfrm_pol_hold_rcu(ret)) {
- rcu_read_unlock();
+ if (ret && !xfrm_pol_hold_rcu(ret))
goto retry;
- }
fail:
rcu_read_unlock();
@@ -4089,6 +4081,7 @@ static int __net_init xfrm_net_init(struct net *net)
/* Initialize the per-net locks here */
spin_lock_init(&net->xfrm.xfrm_state_lock);
spin_lock_init(&net->xfrm.xfrm_policy_lock);
+ seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
mutex_init(&net->xfrm.xfrm_cfg_mutex);
rv = xfrm_statistics_init(net);
@@ -4133,7 +4126,6 @@ void __init xfrm_init(void)
{
register_pernet_subsys(&xfrm_net_ops);
xfrm_dev_init();
- seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex);
xfrm_input_init();
#ifdef CONFIG_XFRM_ESPINTCP
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index b47d613409b7..7aff641c717d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2811,6 +2811,16 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
err = link->doit(skb, nlh, attrs);
+ /* We need to free skb allocated in xfrm_alloc_compat() before
+ * returning from this function, because consume_skb() won't take
+ * care of frag_list since netlink destructor sets
+ * sbk->head to NULL. (see netlink_skb_destructor())
+ */
+ if (skb_has_frag_list(skb)) {
+ kfree_skb(skb_shinfo(skb)->frag_list);
+ skb_shinfo(skb)->frag_list = NULL;
+ }
+
err:
kvfree(nlh64);
return err;
diff --git a/samples/Kconfig b/samples/Kconfig
index b5a1a7aa7e23..b0503ef058d3 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -154,14 +154,14 @@ config SAMPLE_UHID
config SAMPLE_VFIO_MDEV_MTTY
tristate "Build VFIO mtty example mediated device sample code -- loadable modules only"
- depends on VFIO_MDEV_DEVICE && m
+ depends on VFIO_MDEV && m
help
Build a virtual tty sample driver for use as a VFIO
mediated device
config SAMPLE_VFIO_MDEV_MDPY
tristate "Build VFIO mdpy example mediated device sample code -- loadable modules only"
- depends on VFIO_MDEV_DEVICE && m
+ depends on VFIO_MDEV && m
help
Build a virtual display sample driver for use as a VFIO
mediated device. It is a simple framebuffer and supports
@@ -178,7 +178,7 @@ config SAMPLE_VFIO_MDEV_MDPY_FB
config SAMPLE_VFIO_MDEV_MBOCHS
tristate "Build VFIO mdpy example mediated device sample code -- loadable modules only"
- depends on VFIO_MDEV_DEVICE && m
+ depends on VFIO_MDEV && m
select DMA_SHARED_BUFFER
help
Build a virtual display sample driver for use as a VFIO
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 520434ea966f..036998d11ded 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -331,6 +331,7 @@ $(obj)/%.o: $(src)/%.c
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
+ -fno-asynchronous-unwind-tables \
-I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
-O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \
$(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index 53e300f860bb..33d0bdebbed8 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -96,6 +96,7 @@ static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
static int opt_timeout = 1000;
static bool opt_need_wakeup = true;
static u32 opt_num_xsks = 1;
+static u32 prog_id;
static bool opt_busy_poll;
static bool opt_reduced_cap;
@@ -461,6 +462,23 @@ static void *poller(void *arg)
return NULL;
}
+static void remove_xdp_program(void)
+{
+ u32 curr_prog_id = 0;
+
+ if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (prog_id == curr_prog_id)
+ bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on a given interface\n");
+ else
+ printf("program on interface changed, not removing\n");
+}
+
static void int_exit(int sig)
{
benchmark_done = true;
@@ -471,6 +489,9 @@ static void __exit_with_error(int error, const char *file, const char *func,
{
fprintf(stderr, "%s:%s:%i: errno: %d/\"%s\"\n", file, func,
line, error, strerror(error));
+
+ if (opt_num_xsks > 1)
+ remove_xdp_program();
exit(EXIT_FAILURE);
}
@@ -490,6 +511,9 @@ static void xdpsock_cleanup(void)
if (write(sock, &cmd, sizeof(int)) < 0)
exit_with_error(errno);
}
+
+ if (opt_num_xsks > 1)
+ remove_xdp_program();
}
static void swap_mac_addresses(void *data)
@@ -857,6 +881,10 @@ static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem,
if (ret)
exit_with_error(-ret);
+ ret = bpf_get_link_xdp_id(opt_ifindex, &prog_id, opt_xdp_flags);
+ if (ret)
+ exit_with_error(-ret);
+
xsk->app_stats.rx_empty_polls = 0;
xsk->app_stats.fill_fail_polls = 0;
xsk->app_stats.copy_tx_sendtos = 0;
diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c
index 881ef9a7296f..6c0f229db36a 100644
--- a/samples/vfio-mdev/mbochs.c
+++ b/samples/vfio-mdev/mbochs.c
@@ -130,6 +130,7 @@ static struct class *mbochs_class;
static struct cdev mbochs_cdev;
static struct device mbochs_dev;
static int mbochs_used_mbytes;
+static const struct vfio_device_ops mbochs_dev_ops;
struct vfio_region_info_ext {
struct vfio_region_info base;
@@ -160,6 +161,7 @@ struct mbochs_dmabuf {
/* State of each mdev device */
struct mdev_state {
+ struct vfio_device vdev;
u8 *vconfig;
u64 bar_mask[3];
u32 memory_bar_mask;
@@ -425,11 +427,9 @@ static void handle_edid_blob(struct mdev_state *mdev_state, u16 offset,
memcpy(buf, mdev_state->edid_blob + offset, count);
}
-static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
- loff_t pos, bool is_write)
+static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
+ size_t count, loff_t pos, bool is_write)
{
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
- struct device *dev = mdev_dev(mdev);
struct page *pg;
loff_t poff;
char *map;
@@ -478,7 +478,7 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
put_page(pg);
} else {
- dev_dbg(dev, "%s: %s @0x%llx (unhandled)\n",
+ dev_dbg(mdev_state->vdev.dev, "%s: %s @0x%llx (unhandled)\n",
__func__, is_write ? "WR" : "RD", pos);
ret = -1;
goto accessfailed;
@@ -493,9 +493,8 @@ accessfailed:
return ret;
}
-static int mbochs_reset(struct mdev_device *mdev)
+static int mbochs_reset(struct mdev_state *mdev_state)
{
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
u32 size64k = mdev_state->memsize / (64 * 1024);
int i;
@@ -506,12 +505,13 @@ static int mbochs_reset(struct mdev_device *mdev)
return 0;
}
-static int mbochs_create(struct mdev_device *mdev)
+static int mbochs_probe(struct mdev_device *mdev)
{
const struct mbochs_type *type =
&mbochs_types[mdev_get_type_group_id(mdev)];
struct device *dev = mdev_dev(mdev);
struct mdev_state *mdev_state;
+ int ret = -ENOMEM;
if (type->mbytes + mbochs_used_mbytes > max_mbytes)
return -ENOMEM;
@@ -519,6 +519,7 @@ static int mbochs_create(struct mdev_device *mdev)
mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
if (mdev_state == NULL)
return -ENOMEM;
+ vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mbochs_dev_ops);
mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL);
if (mdev_state->vconfig == NULL)
@@ -537,7 +538,6 @@ static int mbochs_create(struct mdev_device *mdev)
mutex_init(&mdev_state->ops_lock);
mdev_state->mdev = mdev;
- mdev_set_drvdata(mdev, mdev_state);
INIT_LIST_HEAD(&mdev_state->dmabufs);
mdev_state->next_id = 1;
@@ -547,32 +547,38 @@ static int mbochs_create(struct mdev_device *mdev)
mdev_state->edid_regs.edid_offset = MBOCHS_EDID_BLOB_OFFSET;
mdev_state->edid_regs.edid_max_size = sizeof(mdev_state->edid_blob);
mbochs_create_config_space(mdev_state);
- mbochs_reset(mdev);
+ mbochs_reset(mdev_state);
mbochs_used_mbytes += type->mbytes;
+
+ ret = vfio_register_group_dev(&mdev_state->vdev);
+ if (ret)
+ goto err_mem;
+ dev_set_drvdata(&mdev->dev, mdev_state);
return 0;
err_mem:
kfree(mdev_state->vconfig);
kfree(mdev_state);
- return -ENOMEM;
+ return ret;
}
-static int mbochs_remove(struct mdev_device *mdev)
+static void mbochs_remove(struct mdev_device *mdev)
{
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
+ struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
mbochs_used_mbytes -= mdev_state->type->mbytes;
- mdev_set_drvdata(mdev, NULL);
+ vfio_unregister_group_dev(&mdev_state->vdev);
kfree(mdev_state->pages);
kfree(mdev_state->vconfig);
kfree(mdev_state);
- return 0;
}
-static ssize_t mbochs_read(struct mdev_device *mdev, char __user *buf,
+static ssize_t mbochs_read(struct vfio_device *vdev, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
unsigned int done = 0;
int ret;
@@ -582,7 +588,7 @@ static ssize_t mbochs_read(struct mdev_device *mdev, char __user *buf,
if (count >= 4 && !(*ppos % 4)) {
u32 val;
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
@@ -594,7 +600,7 @@ static ssize_t mbochs_read(struct mdev_device *mdev, char __user *buf,
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
@@ -606,7 +612,7 @@ static ssize_t mbochs_read(struct mdev_device *mdev, char __user *buf,
} else {
u8 val;
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
@@ -629,9 +635,11 @@ read_err:
return -EFAULT;
}
-static ssize_t mbochs_write(struct mdev_device *mdev, const char __user *buf,
+static ssize_t mbochs_write(struct vfio_device *vdev, const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
unsigned int done = 0;
int ret;
@@ -644,7 +652,7 @@ static ssize_t mbochs_write(struct mdev_device *mdev, const char __user *buf,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
@@ -656,7 +664,7 @@ static ssize_t mbochs_write(struct mdev_device *mdev, const char __user *buf,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
@@ -668,7 +676,7 @@ static ssize_t mbochs_write(struct mdev_device *mdev, const char __user *buf,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
@@ -754,9 +762,10 @@ static const struct vm_operations_struct mbochs_region_vm_ops = {
.fault = mbochs_region_vm_fault,
};
-static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
+static int mbochs_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
{
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
if (vma->vm_pgoff != MBOCHS_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
return -EINVAL;
@@ -963,7 +972,7 @@ mbochs_dmabuf_find_by_id(struct mdev_state *mdev_state, u32 id)
static int mbochs_dmabuf_export(struct mbochs_dmabuf *dmabuf)
{
struct mdev_state *mdev_state = dmabuf->mdev_state;
- struct device *dev = mdev_dev(mdev_state->mdev);
+ struct device *dev = mdev_state->vdev.dev;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct dma_buf *buf;
@@ -991,15 +1000,10 @@ static int mbochs_dmabuf_export(struct mbochs_dmabuf *dmabuf)
return 0;
}
-static int mbochs_get_region_info(struct mdev_device *mdev,
+static int mbochs_get_region_info(struct mdev_state *mdev_state,
struct vfio_region_info_ext *ext)
{
struct vfio_region_info *region_info = &ext->base;
- struct mdev_state *mdev_state;
-
- mdev_state = mdev_get_drvdata(mdev);
- if (!mdev_state)
- return -EINVAL;
if (region_info->index >= MBOCHS_NUM_REGIONS)
return -EINVAL;
@@ -1047,15 +1051,13 @@ static int mbochs_get_region_info(struct mdev_device *mdev,
return 0;
}
-static int mbochs_get_irq_info(struct mdev_device *mdev,
- struct vfio_irq_info *irq_info)
+static int mbochs_get_irq_info(struct vfio_irq_info *irq_info)
{
irq_info->count = 0;
return 0;
}
-static int mbochs_get_device_info(struct mdev_device *mdev,
- struct vfio_device_info *dev_info)
+static int mbochs_get_device_info(struct vfio_device_info *dev_info)
{
dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
dev_info->num_regions = MBOCHS_NUM_REGIONS;
@@ -1063,11 +1065,9 @@ static int mbochs_get_device_info(struct mdev_device *mdev,
return 0;
}
-static int mbochs_query_gfx_plane(struct mdev_device *mdev,
+static int mbochs_query_gfx_plane(struct mdev_state *mdev_state,
struct vfio_device_gfx_plane_info *plane)
{
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
- struct device *dev = mdev_dev(mdev);
struct mbochs_dmabuf *dmabuf;
struct mbochs_mode mode;
int ret;
@@ -1121,18 +1121,16 @@ static int mbochs_query_gfx_plane(struct mdev_device *mdev,
done:
if (plane->drm_plane_type == DRM_PLANE_TYPE_PRIMARY &&
mdev_state->active_id != plane->dmabuf_id) {
- dev_dbg(dev, "%s: primary: %d => %d\n", __func__,
- mdev_state->active_id, plane->dmabuf_id);
+ dev_dbg(mdev_state->vdev.dev, "%s: primary: %d => %d\n",
+ __func__, mdev_state->active_id, plane->dmabuf_id);
mdev_state->active_id = plane->dmabuf_id;
}
mutex_unlock(&mdev_state->ops_lock);
return 0;
}
-static int mbochs_get_gfx_dmabuf(struct mdev_device *mdev,
- u32 id)
+static int mbochs_get_gfx_dmabuf(struct mdev_state *mdev_state, u32 id)
{
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
struct mbochs_dmabuf *dmabuf;
mutex_lock(&mdev_state->ops_lock);
@@ -1154,9 +1152,11 @@ static int mbochs_get_gfx_dmabuf(struct mdev_device *mdev,
return dma_buf_fd(dmabuf->buf, 0);
}
-static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
- unsigned long arg)
+static long mbochs_ioctl(struct vfio_device *vdev, unsigned int cmd,
+ unsigned long arg)
{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
int ret = 0;
unsigned long minsz, outsz;
@@ -1173,7 +1173,7 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (info.argsz < minsz)
return -EINVAL;
- ret = mbochs_get_device_info(mdev, &info);
+ ret = mbochs_get_device_info(&info);
if (ret)
return ret;
@@ -1197,7 +1197,7 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (outsz > sizeof(info))
return -EINVAL;
- ret = mbochs_get_region_info(mdev, &info);
+ ret = mbochs_get_region_info(mdev_state, &info);
if (ret)
return ret;
@@ -1220,7 +1220,7 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
(info.index >= VFIO_PCI_NUM_IRQS))
return -EINVAL;
- ret = mbochs_get_irq_info(mdev, &info);
+ ret = mbochs_get_irq_info(&info);
if (ret)
return ret;
@@ -1243,7 +1243,7 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (plane.argsz < minsz)
return -EINVAL;
- ret = mbochs_query_gfx_plane(mdev, &plane);
+ ret = mbochs_query_gfx_plane(mdev_state, &plane);
if (ret)
return ret;
@@ -1260,19 +1260,19 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (get_user(dmabuf_id, (__u32 __user *)arg))
return -EFAULT;
- return mbochs_get_gfx_dmabuf(mdev, dmabuf_id);
+ return mbochs_get_gfx_dmabuf(mdev_state, dmabuf_id);
}
case VFIO_DEVICE_SET_IRQS:
return -EINVAL;
case VFIO_DEVICE_RESET:
- return mbochs_reset(mdev);
+ return mbochs_reset(mdev_state);
}
return -ENOTTY;
}
-static int mbochs_open(struct mdev_device *mdev)
+static int mbochs_open(struct vfio_device *vdev)
{
if (!try_module_get(THIS_MODULE))
return -ENODEV;
@@ -1280,9 +1280,10 @@ static int mbochs_open(struct mdev_device *mdev)
return 0;
}
-static void mbochs_close(struct mdev_device *mdev)
+static void mbochs_close(struct vfio_device *vdev)
{
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
struct mbochs_dmabuf *dmabuf, *tmp;
mutex_lock(&mdev_state->ops_lock);
@@ -1306,8 +1307,7 @@ static ssize_t
memory_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
+ struct mdev_state *mdev_state = dev_get_drvdata(dev);
return sprintf(buf, "%d MB\n", mdev_state->type->mbytes);
}
@@ -1398,18 +1398,30 @@ static struct attribute_group *mdev_type_groups[] = {
NULL,
};
+static const struct vfio_device_ops mbochs_dev_ops = {
+ .open = mbochs_open,
+ .release = mbochs_close,
+ .read = mbochs_read,
+ .write = mbochs_write,
+ .ioctl = mbochs_ioctl,
+ .mmap = mbochs_mmap,
+};
+
+static struct mdev_driver mbochs_driver = {
+ .driver = {
+ .name = "mbochs",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ .dev_groups = mdev_dev_groups,
+ },
+ .probe = mbochs_probe,
+ .remove = mbochs_remove,
+};
+
static const struct mdev_parent_ops mdev_fops = {
.owner = THIS_MODULE,
- .mdev_attr_groups = mdev_dev_groups,
+ .device_driver = &mbochs_driver,
.supported_type_groups = mdev_type_groups,
- .create = mbochs_create,
- .remove = mbochs_remove,
- .open = mbochs_open,
- .release = mbochs_close,
- .read = mbochs_read,
- .write = mbochs_write,
- .ioctl = mbochs_ioctl,
- .mmap = mbochs_mmap,
};
static const struct file_operations vd_fops = {
@@ -1434,11 +1446,15 @@ static int __init mbochs_dev_init(void)
cdev_add(&mbochs_cdev, mbochs_devt, MINORMASK + 1);
pr_info("%s: major %d\n", __func__, MAJOR(mbochs_devt));
+ ret = mdev_register_driver(&mbochs_driver);
+ if (ret)
+ goto err_cdev;
+
mbochs_class = class_create(THIS_MODULE, MBOCHS_CLASS_NAME);
if (IS_ERR(mbochs_class)) {
pr_err("Error: failed to register mbochs_dev class\n");
ret = PTR_ERR(mbochs_class);
- goto failed1;
+ goto err_driver;
}
mbochs_dev.class = mbochs_class;
mbochs_dev.release = mbochs_device_release;
@@ -1446,19 +1462,21 @@ static int __init mbochs_dev_init(void)
ret = device_register(&mbochs_dev);
if (ret)
- goto failed2;
+ goto err_class;
ret = mdev_register_device(&mbochs_dev, &mdev_fops);
if (ret)
- goto failed3;
+ goto err_device;
return 0;
-failed3:
+err_device:
device_unregister(&mbochs_dev);
-failed2:
+err_class:
class_destroy(mbochs_class);
-failed1:
+err_driver:
+ mdev_unregister_driver(&mbochs_driver);
+err_cdev:
cdev_del(&mbochs_cdev);
unregister_chrdev_region(mbochs_devt, MINORMASK + 1);
return ret;
@@ -1470,6 +1488,7 @@ static void __exit mbochs_dev_exit(void)
mdev_unregister_device(&mbochs_dev);
device_unregister(&mbochs_dev);
+ mdev_unregister_driver(&mbochs_driver);
cdev_del(&mbochs_cdev);
unregister_chrdev_region(mbochs_devt, MINORMASK + 1);
class_destroy(mbochs_class);
diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c
index e889c1cf8fd1..393c9df6f6a0 100644
--- a/samples/vfio-mdev/mdpy.c
+++ b/samples/vfio-mdev/mdpy.c
@@ -85,9 +85,11 @@ static struct class *mdpy_class;
static struct cdev mdpy_cdev;
static struct device mdpy_dev;
static u32 mdpy_count;
+static const struct vfio_device_ops mdpy_dev_ops;
/* State of each mdev device */
struct mdev_state {
+ struct vfio_device vdev;
u8 *vconfig;
u32 bar_mask;
struct mutex ops_lock;
@@ -162,11 +164,9 @@ static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
}
}
-static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
- loff_t pos, bool is_write)
+static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
+ size_t count, loff_t pos, bool is_write)
{
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
- struct device *dev = mdev_dev(mdev);
int ret = 0;
mutex_lock(&mdev_state->ops_lock);
@@ -187,8 +187,9 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
memcpy(buf, mdev_state->memblk, count);
} else {
- dev_info(dev, "%s: %s @0x%llx (unhandled)\n",
- __func__, is_write ? "WR" : "RD", pos);
+ dev_info(mdev_state->vdev.dev,
+ "%s: %s @0x%llx (unhandled)\n", __func__,
+ is_write ? "WR" : "RD", pos);
ret = -1;
goto accessfailed;
}
@@ -202,9 +203,8 @@ accessfailed:
return ret;
}
-static int mdpy_reset(struct mdev_device *mdev)
+static int mdpy_reset(struct mdev_state *mdev_state)
{
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
u32 stride, i;
/* initialize with gray gradient */
@@ -216,13 +216,14 @@ static int mdpy_reset(struct mdev_device *mdev)
return 0;
}
-static int mdpy_create(struct mdev_device *mdev)
+static int mdpy_probe(struct mdev_device *mdev)
{
const struct mdpy_type *type =
&mdpy_types[mdev_get_type_group_id(mdev)];
struct device *dev = mdev_dev(mdev);
struct mdev_state *mdev_state;
u32 fbsize;
+ int ret;
if (mdpy_count >= max_devices)
return -ENOMEM;
@@ -230,6 +231,7 @@ static int mdpy_create(struct mdev_device *mdev)
mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
if (mdev_state == NULL)
return -ENOMEM;
+ vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mdpy_dev_ops);
mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
if (mdev_state->vconfig == NULL) {
@@ -250,36 +252,42 @@ static int mdpy_create(struct mdev_device *mdev)
mutex_init(&mdev_state->ops_lock);
mdev_state->mdev = mdev;
- mdev_set_drvdata(mdev, mdev_state);
-
mdev_state->type = type;
mdev_state->memsize = fbsize;
mdpy_create_config_space(mdev_state);
- mdpy_reset(mdev);
+ mdpy_reset(mdev_state);
mdpy_count++;
+
+ ret = vfio_register_group_dev(&mdev_state->vdev);
+ if (ret) {
+ kfree(mdev_state->vconfig);
+ kfree(mdev_state);
+ return ret;
+ }
+ dev_set_drvdata(&mdev->dev, mdev_state);
return 0;
}
-static int mdpy_remove(struct mdev_device *mdev)
+static void mdpy_remove(struct mdev_device *mdev)
{
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
- struct device *dev = mdev_dev(mdev);
+ struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
- dev_info(dev, "%s\n", __func__);
+ dev_info(&mdev->dev, "%s\n", __func__);
- mdev_set_drvdata(mdev, NULL);
+ vfio_unregister_group_dev(&mdev_state->vdev);
vfree(mdev_state->memblk);
kfree(mdev_state->vconfig);
kfree(mdev_state);
mdpy_count--;
- return 0;
}
-static ssize_t mdpy_read(struct mdev_device *mdev, char __user *buf,
+static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
unsigned int done = 0;
int ret;
@@ -289,8 +297,8 @@ static ssize_t mdpy_read(struct mdev_device *mdev, char __user *buf,
if (count >= 4 && !(*ppos % 4)) {
u32 val;
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
- *ppos, false);
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, false);
if (ret <= 0)
goto read_err;
@@ -301,7 +309,7 @@ static ssize_t mdpy_read(struct mdev_device *mdev, char __user *buf,
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
@@ -313,7 +321,7 @@ static ssize_t mdpy_read(struct mdev_device *mdev, char __user *buf,
} else {
u8 val;
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
@@ -336,9 +344,11 @@ read_err:
return -EFAULT;
}
-static ssize_t mdpy_write(struct mdev_device *mdev, const char __user *buf,
+static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
unsigned int done = 0;
int ret;
@@ -351,7 +361,7 @@ static ssize_t mdpy_write(struct mdev_device *mdev, const char __user *buf,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
@@ -363,7 +373,7 @@ static ssize_t mdpy_write(struct mdev_device *mdev, const char __user *buf,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
@@ -375,7 +385,7 @@ static ssize_t mdpy_write(struct mdev_device *mdev, const char __user *buf,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
@@ -393,9 +403,10 @@ write_err:
return -EFAULT;
}
-static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
+static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
{
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
return -EINVAL;
@@ -409,16 +420,10 @@ static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
return remap_vmalloc_range(vma, mdev_state->memblk, 0);
}
-static int mdpy_get_region_info(struct mdev_device *mdev,
+static int mdpy_get_region_info(struct mdev_state *mdev_state,
struct vfio_region_info *region_info,
u16 *cap_type_id, void **cap_type)
{
- struct mdev_state *mdev_state;
-
- mdev_state = mdev_get_drvdata(mdev);
- if (!mdev_state)
- return -EINVAL;
-
if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
region_info->index != MDPY_DISPLAY_REGION)
return -EINVAL;
@@ -447,15 +452,13 @@ static int mdpy_get_region_info(struct mdev_device *mdev,
return 0;
}
-static int mdpy_get_irq_info(struct mdev_device *mdev,
- struct vfio_irq_info *irq_info)
+static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
{
irq_info->count = 0;
return 0;
}
-static int mdpy_get_device_info(struct mdev_device *mdev,
- struct vfio_device_info *dev_info)
+static int mdpy_get_device_info(struct vfio_device_info *dev_info)
{
dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
@@ -463,11 +466,9 @@ static int mdpy_get_device_info(struct mdev_device *mdev,
return 0;
}
-static int mdpy_query_gfx_plane(struct mdev_device *mdev,
+static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
struct vfio_device_gfx_plane_info *plane)
{
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
-
if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
VFIO_GFX_PLANE_TYPE_REGION))
@@ -496,14 +497,13 @@ static int mdpy_query_gfx_plane(struct mdev_device *mdev,
return 0;
}
-static long mdpy_ioctl(struct mdev_device *mdev, unsigned int cmd,
+static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
unsigned long arg)
{
int ret = 0;
unsigned long minsz;
- struct mdev_state *mdev_state;
-
- mdev_state = mdev_get_drvdata(mdev);
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
@@ -518,7 +518,7 @@ static long mdpy_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (info.argsz < minsz)
return -EINVAL;
- ret = mdpy_get_device_info(mdev, &info);
+ ret = mdpy_get_device_info(&info);
if (ret)
return ret;
@@ -543,7 +543,7 @@ static long mdpy_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (info.argsz < minsz)
return -EINVAL;
- ret = mdpy_get_region_info(mdev, &info, &cap_type_id,
+ ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id,
&cap_type);
if (ret)
return ret;
@@ -567,7 +567,7 @@ static long mdpy_ioctl(struct mdev_device *mdev, unsigned int cmd,
(info.index >= mdev_state->dev_info.num_irqs))
return -EINVAL;
- ret = mdpy_get_irq_info(mdev, &info);
+ ret = mdpy_get_irq_info(&info);
if (ret)
return ret;
@@ -590,7 +590,7 @@ static long mdpy_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (plane.argsz < minsz)
return -EINVAL;
- ret = mdpy_query_gfx_plane(mdev, &plane);
+ ret = mdpy_query_gfx_plane(mdev_state, &plane);
if (ret)
return ret;
@@ -604,12 +604,12 @@ static long mdpy_ioctl(struct mdev_device *mdev, unsigned int cmd,
return -EINVAL;
case VFIO_DEVICE_RESET:
- return mdpy_reset(mdev);
+ return mdpy_reset(mdev_state);
}
return -ENOTTY;
}
-static int mdpy_open(struct mdev_device *mdev)
+static int mdpy_open(struct vfio_device *vdev)
{
if (!try_module_get(THIS_MODULE))
return -ENODEV;
@@ -617,7 +617,7 @@ static int mdpy_open(struct mdev_device *mdev)
return 0;
}
-static void mdpy_close(struct mdev_device *mdev)
+static void mdpy_close(struct vfio_device *vdev)
{
module_put(THIS_MODULE);
}
@@ -626,8 +626,7 @@ static ssize_t
resolution_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
+ struct mdev_state *mdev_state = dev_get_drvdata(dev);
return sprintf(buf, "%dx%d\n",
mdev_state->type->width,
@@ -716,18 +715,30 @@ static struct attribute_group *mdev_type_groups[] = {
NULL,
};
+static const struct vfio_device_ops mdpy_dev_ops = {
+ .open = mdpy_open,
+ .release = mdpy_close,
+ .read = mdpy_read,
+ .write = mdpy_write,
+ .ioctl = mdpy_ioctl,
+ .mmap = mdpy_mmap,
+};
+
+static struct mdev_driver mdpy_driver = {
+ .driver = {
+ .name = "mdpy",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ .dev_groups = mdev_dev_groups,
+ },
+ .probe = mdpy_probe,
+ .remove = mdpy_remove,
+};
+
static const struct mdev_parent_ops mdev_fops = {
.owner = THIS_MODULE,
- .mdev_attr_groups = mdev_dev_groups,
+ .device_driver = &mdpy_driver,
.supported_type_groups = mdev_type_groups,
- .create = mdpy_create,
- .remove = mdpy_remove,
- .open = mdpy_open,
- .release = mdpy_close,
- .read = mdpy_read,
- .write = mdpy_write,
- .ioctl = mdpy_ioctl,
- .mmap = mdpy_mmap,
};
static const struct file_operations vd_fops = {
@@ -752,11 +763,15 @@ static int __init mdpy_dev_init(void)
cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
+ ret = mdev_register_driver(&mdpy_driver);
+ if (ret)
+ goto err_cdev;
+
mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME);
if (IS_ERR(mdpy_class)) {
pr_err("Error: failed to register mdpy_dev class\n");
ret = PTR_ERR(mdpy_class);
- goto failed1;
+ goto err_driver;
}
mdpy_dev.class = mdpy_class;
mdpy_dev.release = mdpy_device_release;
@@ -764,19 +779,21 @@ static int __init mdpy_dev_init(void)
ret = device_register(&mdpy_dev);
if (ret)
- goto failed2;
+ goto err_class;
ret = mdev_register_device(&mdpy_dev, &mdev_fops);
if (ret)
- goto failed3;
+ goto err_device;
return 0;
-failed3:
+err_device:
device_unregister(&mdpy_dev);
-failed2:
+err_class:
class_destroy(mdpy_class);
-failed1:
+err_driver:
+ mdev_unregister_driver(&mdpy_driver);
+err_cdev:
cdev_del(&mdpy_cdev);
unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
return ret;
@@ -788,6 +805,7 @@ static void __exit mdpy_dev_exit(void)
mdev_unregister_device(&mdpy_dev);
device_unregister(&mdpy_dev);
+ mdev_unregister_driver(&mdpy_driver);
cdev_del(&mdpy_cdev);
unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
class_destroy(mdpy_class);
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
index b9b24be4abda..8b26fecc4afe 100644
--- a/samples/vfio-mdev/mtty.c
+++ b/samples/vfio-mdev/mtty.c
@@ -127,6 +127,7 @@ struct serial_port {
/* State of each mdev device */
struct mdev_state {
+ struct vfio_device vdev;
int irq_fd;
struct eventfd_ctx *intx_evtfd;
struct eventfd_ctx *msi_evtfd;
@@ -143,13 +144,14 @@ struct mdev_state {
int nr_ports;
};
-static struct mutex mdev_list_lock;
-static struct list_head mdev_devices_list;
+static atomic_t mdev_avail_ports = ATOMIC_INIT(MAX_MTTYS);
static const struct file_operations vd_fops = {
.owner = THIS_MODULE,
};
+static const struct vfio_device_ops mtty_dev_ops;
+
/* function prototypes */
static int mtty_trigger_interrupt(struct mdev_state *mdev_state);
@@ -631,23 +633,16 @@ static void mdev_read_base(struct mdev_state *mdev_state)
}
}
-static ssize_t mdev_access(struct mdev_device *mdev, u8 *buf, size_t count,
+static ssize_t mdev_access(struct mdev_state *mdev_state, u8 *buf, size_t count,
loff_t pos, bool is_write)
{
- struct mdev_state *mdev_state;
unsigned int index;
loff_t offset;
int ret = 0;
- if (!mdev || !buf)
+ if (!buf)
return -EINVAL;
- mdev_state = mdev_get_drvdata(mdev);
- if (!mdev_state) {
- pr_err("%s mdev_state not found\n", __func__);
- return -EINVAL;
- }
-
mutex_lock(&mdev_state->ops_lock);
index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
@@ -708,14 +703,26 @@ accessfailed:
return ret;
}
-static int mtty_create(struct mdev_device *mdev)
+static int mtty_probe(struct mdev_device *mdev)
{
struct mdev_state *mdev_state;
int nr_ports = mdev_get_type_group_id(mdev) + 1;
+ int avail_ports = atomic_read(&mdev_avail_ports);
+ int ret;
+
+ do {
+ if (avail_ports < nr_ports)
+ return -ENOSPC;
+ } while (!atomic_try_cmpxchg(&mdev_avail_ports,
+ &avail_ports, avail_ports - nr_ports));
mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
- if (mdev_state == NULL)
+ if (mdev_state == NULL) {
+ atomic_add(nr_ports, &mdev_avail_ports);
return -ENOMEM;
+ }
+
+ vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mtty_dev_ops);
mdev_state->nr_ports = nr_ports;
mdev_state->irq_index = -1;
@@ -726,63 +733,50 @@ static int mtty_create(struct mdev_device *mdev)
if (mdev_state->vconfig == NULL) {
kfree(mdev_state);
+ atomic_add(nr_ports, &mdev_avail_ports);
return -ENOMEM;
}
mutex_init(&mdev_state->ops_lock);
mdev_state->mdev = mdev;
- mdev_set_drvdata(mdev, mdev_state);
mtty_create_config_space(mdev_state);
- mutex_lock(&mdev_list_lock);
- list_add(&mdev_state->next, &mdev_devices_list);
- mutex_unlock(&mdev_list_lock);
+ ret = vfio_register_group_dev(&mdev_state->vdev);
+ if (ret) {
+ kfree(mdev_state);
+ atomic_add(nr_ports, &mdev_avail_ports);
+ return ret;
+ }
+ dev_set_drvdata(&mdev->dev, mdev_state);
return 0;
}
-static int mtty_remove(struct mdev_device *mdev)
+static void mtty_remove(struct mdev_device *mdev)
{
- struct mdev_state *mds, *tmp_mds;
- struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
- int ret = -EINVAL;
-
- mutex_lock(&mdev_list_lock);
- list_for_each_entry_safe(mds, tmp_mds, &mdev_devices_list, next) {
- if (mdev_state == mds) {
- list_del(&mdev_state->next);
- mdev_set_drvdata(mdev, NULL);
- kfree(mdev_state->vconfig);
- kfree(mdev_state);
- ret = 0;
- break;
- }
- }
- mutex_unlock(&mdev_list_lock);
+ struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
+ int nr_ports = mdev_state->nr_ports;
- return ret;
+ vfio_unregister_group_dev(&mdev_state->vdev);
+
+ kfree(mdev_state->vconfig);
+ kfree(mdev_state);
+ atomic_add(nr_ports, &mdev_avail_ports);
}
-static int mtty_reset(struct mdev_device *mdev)
+static int mtty_reset(struct mdev_state *mdev_state)
{
- struct mdev_state *mdev_state;
-
- if (!mdev)
- return -EINVAL;
-
- mdev_state = mdev_get_drvdata(mdev);
- if (!mdev_state)
- return -EINVAL;
-
pr_info("%s: called\n", __func__);
return 0;
}
-static ssize_t mtty_read(struct mdev_device *mdev, char __user *buf,
+static ssize_t mtty_read(struct vfio_device *vdev, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
unsigned int done = 0;
int ret;
@@ -792,7 +786,7 @@ static ssize_t mtty_read(struct mdev_device *mdev, char __user *buf,
if (count >= 4 && !(*ppos % 4)) {
u32 val;
- ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
@@ -804,7 +798,7 @@ static ssize_t mtty_read(struct mdev_device *mdev, char __user *buf,
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
- ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
@@ -816,7 +810,7 @@ static ssize_t mtty_read(struct mdev_device *mdev, char __user *buf,
} else {
u8 val;
- ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
@@ -839,9 +833,11 @@ read_err:
return -EFAULT;
}
-static ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
+static ssize_t mtty_write(struct vfio_device *vdev, const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
unsigned int done = 0;
int ret;
@@ -854,7 +850,7 @@ static ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
@@ -866,7 +862,7 @@ static ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
@@ -878,7 +874,7 @@ static ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
+ ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
@@ -896,19 +892,11 @@ write_err:
return -EFAULT;
}
-static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags,
+static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
unsigned int index, unsigned int start,
unsigned int count, void *data)
{
int ret = 0;
- struct mdev_state *mdev_state;
-
- if (!mdev)
- return -EINVAL;
-
- mdev_state = mdev_get_drvdata(mdev);
- if (!mdev_state)
- return -EINVAL;
mutex_lock(&mdev_state->ops_lock);
switch (index) {
@@ -1024,21 +1012,13 @@ static int mtty_trigger_interrupt(struct mdev_state *mdev_state)
return ret;
}
-static int mtty_get_region_info(struct mdev_device *mdev,
+static int mtty_get_region_info(struct mdev_state *mdev_state,
struct vfio_region_info *region_info,
u16 *cap_type_id, void **cap_type)
{
unsigned int size = 0;
- struct mdev_state *mdev_state;
u32 bar_index;
- if (!mdev)
- return -EINVAL;
-
- mdev_state = mdev_get_drvdata(mdev);
- if (!mdev_state)
- return -EINVAL;
-
bar_index = region_info->index;
if (bar_index >= VFIO_PCI_NUM_REGIONS)
return -EINVAL;
@@ -1073,8 +1053,7 @@ static int mtty_get_region_info(struct mdev_device *mdev,
return 0;
}
-static int mtty_get_irq_info(struct mdev_device *mdev,
- struct vfio_irq_info *irq_info)
+static int mtty_get_irq_info(struct vfio_irq_info *irq_info)
{
switch (irq_info->index) {
case VFIO_PCI_INTX_IRQ_INDEX:
@@ -1098,8 +1077,7 @@ static int mtty_get_irq_info(struct mdev_device *mdev,
return 0;
}
-static int mtty_get_device_info(struct mdev_device *mdev,
- struct vfio_device_info *dev_info)
+static int mtty_get_device_info(struct vfio_device_info *dev_info)
{
dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
@@ -1108,19 +1086,13 @@ static int mtty_get_device_info(struct mdev_device *mdev,
return 0;
}
-static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
+static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd,
unsigned long arg)
{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
int ret = 0;
unsigned long minsz;
- struct mdev_state *mdev_state;
-
- if (!mdev)
- return -EINVAL;
-
- mdev_state = mdev_get_drvdata(mdev);
- if (!mdev_state)
- return -ENODEV;
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
@@ -1135,7 +1107,7 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (info.argsz < minsz)
return -EINVAL;
- ret = mtty_get_device_info(mdev, &info);
+ ret = mtty_get_device_info(&info);
if (ret)
return ret;
@@ -1160,7 +1132,7 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (info.argsz < minsz)
return -EINVAL;
- ret = mtty_get_region_info(mdev, &info, &cap_type_id,
+ ret = mtty_get_region_info(mdev_state, &info, &cap_type_id,
&cap_type);
if (ret)
return ret;
@@ -1184,7 +1156,7 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
(info.index >= mdev_state->dev_info.num_irqs))
return -EINVAL;
- ret = mtty_get_irq_info(mdev, &info);
+ ret = mtty_get_irq_info(&info);
if (ret)
return ret;
@@ -1218,25 +1190,25 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
return PTR_ERR(data);
}
- ret = mtty_set_irqs(mdev, hdr.flags, hdr.index, hdr.start,
+ ret = mtty_set_irqs(mdev_state, hdr.flags, hdr.index, hdr.start,
hdr.count, data);
kfree(ptr);
return ret;
}
case VFIO_DEVICE_RESET:
- return mtty_reset(mdev);
+ return mtty_reset(mdev_state);
}
return -ENOTTY;
}
-static int mtty_open(struct mdev_device *mdev)
+static int mtty_open(struct vfio_device *vdev)
{
pr_info("%s\n", __func__);
return 0;
}
-static void mtty_close(struct mdev_device *mdev)
+static void mtty_close(struct vfio_device *mdev)
{
pr_info("%s\n", __func__);
}
@@ -1308,14 +1280,9 @@ static ssize_t available_instances_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr,
char *buf)
{
- struct mdev_state *mds;
unsigned int ports = mtype_get_type_group_id(mtype) + 1;
- int used = 0;
-
- list_for_each_entry(mds, &mdev_devices_list, next)
- used += mds->nr_ports;
- return sprintf(buf, "%d\n", (MAX_MTTYS - used)/ports);
+ return sprintf(buf, "%d\n", atomic_read(&mdev_avail_ports) / ports);
}
static MDEV_TYPE_ATTR_RO(available_instances);
@@ -1351,18 +1318,31 @@ static struct attribute_group *mdev_type_groups[] = {
NULL,
};
+static const struct vfio_device_ops mtty_dev_ops = {
+ .name = "vfio-mtty",
+ .open = mtty_open,
+ .release = mtty_close,
+ .read = mtty_read,
+ .write = mtty_write,
+ .ioctl = mtty_ioctl,
+};
+
+static struct mdev_driver mtty_driver = {
+ .driver = {
+ .name = "mtty",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ .dev_groups = mdev_dev_groups,
+ },
+ .probe = mtty_probe,
+ .remove = mtty_remove,
+};
+
static const struct mdev_parent_ops mdev_fops = {
.owner = THIS_MODULE,
+ .device_driver = &mtty_driver,
.dev_attr_groups = mtty_dev_groups,
- .mdev_attr_groups = mdev_dev_groups,
.supported_type_groups = mdev_type_groups,
- .create = mtty_create,
- .remove = mtty_remove,
- .open = mtty_open,
- .release = mtty_close,
- .read = mtty_read,
- .write = mtty_write,
- .ioctl = mtty_ioctl,
};
static void mtty_device_release(struct device *dev)
@@ -1393,12 +1373,16 @@ static int __init mtty_dev_init(void)
pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
+ ret = mdev_register_driver(&mtty_driver);
+ if (ret)
+ goto err_cdev;
+
mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
if (IS_ERR(mtty_dev.vd_class)) {
pr_err("Error: failed to register mtty_dev class\n");
ret = PTR_ERR(mtty_dev.vd_class);
- goto failed1;
+ goto err_driver;
}
mtty_dev.dev.class = mtty_dev.vd_class;
@@ -1407,28 +1391,22 @@ static int __init mtty_dev_init(void)
ret = device_register(&mtty_dev.dev);
if (ret)
- goto failed2;
+ goto err_class;
ret = mdev_register_device(&mtty_dev.dev, &mdev_fops);
if (ret)
- goto failed3;
-
- mutex_init(&mdev_list_lock);
- INIT_LIST_HEAD(&mdev_devices_list);
-
- goto all_done;
-
-failed3:
+ goto err_device;
+ return 0;
+err_device:
device_unregister(&mtty_dev.dev);
-failed2:
+err_class:
class_destroy(mtty_dev.vd_class);
-
-failed1:
+err_driver:
+ mdev_unregister_driver(&mtty_driver);
+err_cdev:
cdev_del(&mtty_dev.vd_cdev);
unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
-
-all_done:
return ret;
}
@@ -1439,6 +1417,7 @@ static void __exit mtty_dev_exit(void)
device_unregister(&mtty_dev.dev);
idr_destroy(&mtty_dev.vd_idr);
+ mdev_unregister_driver(&mtty_driver);
cdev_del(&mtty_dev.vd_cdev);
unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
class_destroy(mtty_dev.vd_class);
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 82dd1b65b7a8..f247e691562d 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -90,8 +90,13 @@ clean := -f $(srctree)/scripts/Makefile.clean obj
echo-cmd = $(if $($(quiet)cmd_$(1)),\
echo ' $(call escsq,$($(quiet)cmd_$(1)))$(echo-why)';)
+# sink stdout for 'make -s'
+ redirect :=
+ quiet_redirect :=
+silent_redirect := exec >/dev/null;
+
# printing commands
-cmd = @set -e; $(echo-cmd) $(cmd_$(1))
+cmd = @set -e; $(echo-cmd) $($(quiet)redirect) $(cmd_$(1))
###
# if_changed - execute command if any prerequisite is newer than
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 34d257653fb4..02197cb8e3a7 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -219,7 +219,6 @@ endif # CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT
ifdef CONFIG_STACK_VALIDATION
ifndef CONFIG_LTO_CLANG
-ifneq ($(SKIP_STACK_VALIDATION),1)
__objtool_obj := $(objtree)/tools/objtool/objtool
@@ -233,7 +232,6 @@ objtool_obj = $(if $(patsubst y%,, \
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
$(__objtool_obj))
-endif # SKIP_STACK_VALIDATION
endif # CONFIG_LTO_CLANG
endif # CONFIG_STACK_VALIDATION
@@ -388,7 +386,7 @@ ifeq ($(CONFIG_LTO_CLANG) $(CONFIG_MODVERSIONS),y y)
cmd_update_lto_symversions = \
rm -f $@.symversions \
$(foreach n, $(filter-out FORCE,$^), \
- $(if $(wildcard $(n).symversions), \
+ $(if $(shell test -s $(n).symversions && echo y), \
; cat $(n).symversions >> $@.symversions))
else
cmd_update_lto_symversions = echo >/dev/null
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index a7883e455290..5e9b8057fb24 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -39,12 +39,10 @@ prelink-ext := .lto
# so let's now process the prelinked binary before we link the module.
ifdef CONFIG_STACK_VALIDATION
-ifneq ($(SKIP_STACK_VALIDATION),1)
cmd_ld_ko_o += \
$(objtree)/tools/objtool/objtool $(objtool_args) \
$(@:.ko=$(prelink-ext).o);
-endif # SKIP_STACK_VALIDATION
endif # CONFIG_STACK_VALIDATION
endif # CONFIG_LTO_CLANG
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 23697a6b1eaa..461d4221e4a4 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1084,10 +1084,10 @@ sub is_maintained_obsolete {
sub is_SPDX_License_valid {
my ($license) = @_;
- return 1 if (!$tree || which("python") eq "" || !(-e "$root/scripts/spdxcheck.py") || !(-e "$gitroot"));
+ return 1 if (!$tree || which("python3") eq "" || !(-x "$root/scripts/spdxcheck.py") || !(-e "$gitroot"));
my $root_path = abs_path($root);
- my $status = `cd "$root_path"; echo "$license" | python scripts/spdxcheck.py -`;
+ my $status = `cd "$root_path"; echo "$license" | scripts/spdxcheck.py -`;
return 0 if ($status ne "");
return 1;
}
@@ -5361,9 +5361,13 @@ sub process {
}
}
-#goto labels aren't indented, allow a single space however
- if ($line=~/^.\s+[A-Za-z\d_]+:(?![0-9]+)/ and
- !($line=~/^. [A-Za-z\d_]+:/) and !($line=~/^.\s+default:/)) {
+# check that goto labels aren't indented (allow a single space indentation)
+# and ignore bitfield definitions like foo:1
+# Strictly, labels can have whitespace after the identifier and before the :
+# but this is not allowed here as many ?: uses would appear to be labels
+ if ($sline =~ /^.\s+[A-Za-z_][A-Za-z\d_]*:(?!\s*\d+)/ &&
+ $sline !~ /^. [A-Za-z\d_][A-Za-z\d_]*:/ &&
+ $sline !~ /^.\s+default:/) {
if (WARN("INDENTED_LABEL",
"labels should not be indented\n" . $herecurr) &&
$fix) {
@@ -5458,7 +5462,7 @@ sub process {
# Return of what appears to be an errno should normally be negative
if ($sline =~ /\breturn(?:\s*\(+\s*|\s+)(E[A-Z]+)(?:\s*\)+\s*|\s*)[;:,]/) {
my $name = $1;
- if ($name ne 'EOF' && $name ne 'ERROR') {
+ if ($name ne 'EOF' && $name ne 'ERROR' && $name !~ /^EPOLL/) {
WARN("USE_NEGATIVE_ERRNO",
"return of an errno should typically be negative (ie: return -$1)\n" . $herecurr);
}
diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh
index a18b47695f55..b7609958ee36 100755
--- a/scripts/checksyscalls.sh
+++ b/scripts/checksyscalls.sh
@@ -40,6 +40,10 @@ cat << EOF
#define __IGNORE_setrlimit /* setrlimit */
#endif
+#ifndef __ARCH_WANT_MEMFD_SECRET
+#define __IGNORE_memfd_secret
+#endif
+
/* Missing flags argument */
#define __IGNORE_renameat /* renameat2 */
diff --git a/scripts/checkversion.pl b/scripts/checkversion.pl
index f67b125c5269..94cd49eff605 100755
--- a/scripts/checkversion.pl
+++ b/scripts/checkversion.pl
@@ -1,10 +1,10 @@
#! /usr/bin/env perl
# SPDX-License-Identifier: GPL-2.0
#
-# checkversion find uses of LINUX_VERSION_CODE or KERNEL_VERSION
-# without including <linux/version.h>, or cases of
-# including <linux/version.h> that don't need it.
-# Copyright (C) 2003, Randy Dunlap <rdunlap@xenotime.net>
+# checkversion finds uses of all macros in <linux/version.h>
+# where the source files do not #include <linux/version.h>; or cases
+# of including <linux/version.h> where it is not needed.
+# Copyright (C) 2003, Randy Dunlap <rdunlap@infradead.org>
use strict;
@@ -13,7 +13,8 @@ $| = 1;
my $debugging;
foreach my $file (@ARGV) {
- next if $file =~ "include/linux/version\.h";
+ next if $file =~ "include/generated/uapi/linux/version\.h";
+ next if $file =~ "usr/include/linux/version\.h";
# Open this file.
open( my $f, '<', $file )
or die "Can't open $file: $!\n";
@@ -41,8 +42,11 @@ foreach my $file (@ARGV) {
$iLinuxVersion = $. if m/^\s*#\s*include\s*<linux\/version\.h>/o;
}
- # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION, UTS_RELEASE
- if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/)) {
+ # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION,
+ # LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL, LINUX_VERSION_SUBLEVEL
+ if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/) ||
+ ($_ =~ /LINUX_VERSION_MAJOR/) || ($_ =~ /LINUX_VERSION_PATCHLEVEL/) ||
+ ($_ =~ /LINUX_VERSION_SUBLEVEL/)) {
$fUseVersion = 1;
last if $iLinuxVersion;
}
diff --git a/scripts/coccicheck b/scripts/coccicheck
index 65fee63aeadb..caba0bff6da7 100755
--- a/scripts/coccicheck
+++ b/scripts/coccicheck
@@ -87,7 +87,7 @@ else
fi
# Use only one thread per core by default if hyperthreading is enabled
- THREADS_PER_CORE=$(lscpu | grep "Thread(s) per core: " | tr -cd "[:digit:]")
+ THREADS_PER_CORE=$(LANG=C lscpu | grep "Thread(s) per core: " | tr -cd "[:digit:]")
if [ -z "$J" ]; then
NPROC=$(getconf _NPROCESSORS_ONLN)
if [ $THREADS_PER_CORE -gt 1 -a $NPROC -gt 4 ] ; then
diff --git a/scripts/coccinelle/api/kobj_to_dev.cocci b/scripts/coccinelle/api/kobj_to_dev.cocci
deleted file mode 100644
index cd5d31c6fe76..000000000000
--- a/scripts/coccinelle/api/kobj_to_dev.cocci
+++ /dev/null
@@ -1,45 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-///
-/// Use kobj_to_dev() instead of container_of()
-///
-// Confidence: High
-// Copyright: (C) 2020 Denis Efremov ISPRAS
-// Options: --no-includes --include-headers
-//
-// Keywords: kobj_to_dev, container_of
-//
-
-virtual context
-virtual report
-virtual org
-virtual patch
-
-
-@r depends on !patch@
-expression ptr;
-symbol kobj;
-position p;
-@@
-
-* container_of(ptr, struct device, kobj)@p
-
-
-@depends on patch@
-expression ptr;
-@@
-
-- container_of(ptr, struct device, kobj)
-+ kobj_to_dev(ptr)
-
-
-@script:python depends on report@
-p << r.p;
-@@
-
-coccilib.report.print_report(p[0], "WARNING opportunity for kobj_to_dev()")
-
-@script:python depends on org@
-p << r.p;
-@@
-
-coccilib.org.print_todo(p[0], "WARNING opportunity for kobj_to_dev()")
diff --git a/scripts/coccinelle/free/kfree.cocci b/scripts/coccinelle/free/kfree.cocci
index 168568386034..9b6e2037c2a9 100644
--- a/scripts/coccinelle/free/kfree.cocci
+++ b/scripts/coccinelle/free/kfree.cocci
@@ -22,9 +22,9 @@ position p1;
@@
(
-* kfree@p1(E)
+ kfree@p1(E)
|
-* kfree_sensitive@p1(E)
+ kfree_sensitive@p1(E)
)
@print expression@
@@ -66,9 +66,9 @@ position ok;
while (1) { ...
(
-* kfree@ok(E)
+ kfree@ok(E)
|
-* kfree_sensitive@ok(E)
+ kfree_sensitive@ok(E)
)
... when != break;
when != goto l;
@@ -84,9 +84,9 @@ position free.p1!=loop.ok,p2!={print.p,sz.p};
@@
(
-* kfree@p1(E,...)
+ kfree@p1(E,...)
|
-* kfree_sensitive@p1(E,...)
+ kfree_sensitive@p1(E,...)
)
...
(
diff --git a/scripts/coccinelle/misc/flexible_array.cocci b/scripts/coccinelle/misc/flexible_array.cocci
index 947fbaff82a9..f427fd68ed2d 100644
--- a/scripts/coccinelle/misc/flexible_array.cocci
+++ b/scripts/coccinelle/misc/flexible_array.cocci
@@ -51,21 +51,40 @@ position p : script:python() { relevant(p) };
};
)
+@only_field depends on patch@
+identifier name, array;
+type T;
+position q;
+@@
+
+(
+ struct name {@q
+ T array[0];
+ };
+|
+ struct {@q
+ T array[0];
+ };
+)
+
@depends on patch@
identifier name, array;
type T;
position p : script:python() { relevant(p) };
+// position @q with rule "only_field" simplifies
+// handling of bitfields, arrays, etc.
+position q != only_field.q;
@@
(
- struct name {
+ struct name {@q
...
T array@p[
- 0
];
};
|
- struct {
+ struct {@q
...
T array@p[
- 0
diff --git a/scripts/coccinelle/misc/irqf_oneshot.cocci b/scripts/coccinelle/misc/irqf_oneshot.cocci
index 7b48287b3dc1..9b6f404d07f2 100644
--- a/scripts/coccinelle/misc/irqf_oneshot.cocci
+++ b/scripts/coccinelle/misc/irqf_oneshot.cocci
@@ -103,11 +103,11 @@ devm_request_threaded_irq@p(dev, irq, NULL, ...)
@script:python depends on org@
p << match.p;
@@
-msg = "ERROR: Threaded IRQ with no primary handler requested without IRQF_ONESHOT"
+msg = "WARNING: Threaded IRQ with no primary handler requested without IRQF_ONESHOT (unless it is nested IRQ)"
coccilib.org.print_todo(p[0],msg)
@script:python depends on report@
p << match.p;
@@
-msg = "ERROR: Threaded IRQ with no primary handler requested without IRQF_ONESHOT"
+msg = "WARNING: Threaded IRQ with no primary handler requested without IRQF_ONESHOT (unless it is nested IRQ)"
coccilib.report.print_report(p[0],msg)
diff --git a/scripts/coccinelle/misc/minmax.cocci b/scripts/coccinelle/misc/minmax.cocci
new file mode 100644
index 000000000000..fcf908b34f27
--- /dev/null
+++ b/scripts/coccinelle/misc/minmax.cocci
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0-only
+///
+/// Check for opencoded min(), max() implementations.
+/// Generated patches sometimes require adding a cast to fix compile warning.
+/// Warnings/patches scope intentionally limited to a function body.
+///
+// Confidence: Medium
+// Copyright: (C) 2021 Denis Efremov ISPRAS
+// Options: --no-includes --include-headers
+//
+// Keywords: min, max
+//
+
+
+virtual report
+virtual org
+virtual context
+virtual patch
+
+@rmax depends on !patch@
+identifier func;
+expression x, y;
+binary operator cmp = {>, >=};
+position p;
+@@
+
+func(...)
+{
+ <...
+* ((x) cmp@p (y) ? (x) : (y))
+ ...>
+}
+
+@rmaxif depends on !patch@
+identifier func;
+expression x, y;
+expression max_val;
+binary operator cmp = {>, >=};
+position p;
+@@
+
+func(...)
+{
+ <...
+* if ((x) cmp@p (y)) {
+* max_val = (x);
+* } else {
+* max_val = (y);
+* }
+ ...>
+}
+
+@rmin depends on !patch@
+identifier func;
+expression x, y;
+binary operator cmp = {<, <=};
+position p;
+@@
+
+func(...)
+{
+ <...
+* ((x) cmp@p (y) ? (x) : (y))
+ ...>
+}
+
+@rminif depends on !patch@
+identifier func;
+expression x, y;
+expression min_val;
+binary operator cmp = {<, <=};
+position p;
+@@
+
+func(...)
+{
+ <...
+* if ((x) cmp@p (y)) {
+* min_val = (x);
+* } else {
+* min_val = (y);
+* }
+ ...>
+}
+
+@pmax depends on patch@
+identifier func;
+expression x, y;
+binary operator cmp = {>=, >};
+@@
+
+func(...)
+{
+ <...
+- ((x) cmp (y) ? (x) : (y))
++ max(x, y)
+ ...>
+}
+
+@pmaxif depends on patch@
+identifier func;
+expression x, y;
+expression max_val;
+binary operator cmp = {>=, >};
+@@
+
+func(...)
+{
+ <...
+- if ((x) cmp (y)) {
+- max_val = x;
+- } else {
+- max_val = y;
+- }
++ max_val = max(x, y);
+ ...>
+}
+
+// Don't generate patches for errcode returns.
+@errcode depends on patch@
+position p;
+identifier func;
+expression x;
+binary operator cmp = {<, <=};
+@@
+
+func(...)
+{
+ <...
+ return ((x) cmp@p 0 ? (x) : 0);
+ ...>
+}
+
+@pmin depends on patch@
+identifier func;
+expression x, y;
+binary operator cmp = {<=, <};
+position p != errcode.p;
+@@
+
+func(...)
+{
+ <...
+- ((x) cmp@p (y) ? (x) : (y))
++ min(x, y)
+ ...>
+}
+
+@pminif depends on patch@
+identifier func;
+expression x, y;
+expression min_val;
+binary operator cmp = {<=, <};
+@@
+
+func(...)
+{
+ <...
+- if ((x) cmp (y)) {
+- min_val = x;
+- } else {
+- min_val = y;
+- }
++ min_val = min(x, y);
+ ...>
+}
+
+@script:python depends on report@
+p << rmax.p;
+@@
+
+for p0 in p:
+ coccilib.report.print_report(p0, "WARNING opportunity for max()")
+
+@script:python depends on org@
+p << rmax.p;
+@@
+
+for p0 in p:
+ coccilib.org.print_todo(p0, "WARNING opportunity for max()")
+
+@script:python depends on report@
+p << rmaxif.p;
+@@
+
+for p0 in p:
+ coccilib.report.print_report(p0, "WARNING opportunity for max()")
+
+@script:python depends on org@
+p << rmaxif.p;
+@@
+
+for p0 in p:
+ coccilib.org.print_todo(p0, "WARNING opportunity for max()")
+
+@script:python depends on report@
+p << rmin.p;
+@@
+
+for p0 in p:
+ coccilib.report.print_report(p0, "WARNING opportunity for min()")
+
+@script:python depends on org@
+p << rmin.p;
+@@
+
+for p0 in p:
+ coccilib.org.print_todo(p0, "WARNING opportunity for min()")
+
+@script:python depends on report@
+p << rminif.p;
+@@
+
+for p0 in p:
+ coccilib.report.print_report(p0, "WARNING opportunity for min()")
+
+@script:python depends on org@
+p << rminif.p;
+@@
+
+for p0 in p:
+ coccilib.org.print_todo(p0, "WARNING opportunity for min()")
diff --git a/scripts/coccinelle/misc/swap.cocci b/scripts/coccinelle/misc/swap.cocci
new file mode 100644
index 000000000000..c5e71b7ef7f5
--- /dev/null
+++ b/scripts/coccinelle/misc/swap.cocci
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+///
+/// Check for opencoded swap() implementation.
+///
+// Confidence: High
+// Copyright: (C) 2021 Denis Efremov ISPRAS
+// Options: --no-includes --include-headers
+//
+// Keywords: swap
+//
+
+virtual patch
+virtual org
+virtual report
+virtual context
+
+@rvar depends on !patch@
+identifier tmp;
+expression a, b;
+type T;
+position p;
+@@
+
+(
+* T tmp;
+|
+* T tmp = 0;
+|
+* T *tmp = NULL;
+)
+... when != tmp
+* tmp = a;
+* a = b;@p
+* b = tmp;
+... when != tmp
+
+@r depends on !patch@
+identifier tmp;
+expression a, b;
+position p != rvar.p;
+@@
+
+* tmp = a;
+* a = b;@p
+* b = tmp;
+
+@rpvar depends on patch@
+identifier tmp;
+expression a, b;
+type T;
+@@
+
+(
+- T tmp;
+|
+- T tmp = 0;
+|
+- T *tmp = NULL;
+)
+... when != tmp
+- tmp = a;
+- a = b;
+- b = tmp
++ swap(a, b)
+ ;
+... when != tmp
+
+@rp depends on patch@
+identifier tmp;
+expression a, b;
+@@
+
+- tmp = a;
+- a = b;
+- b = tmp
++ swap(a, b)
+ ;
+
+@depends on patch && (rpvar || rp)@
+@@
+
+(
+ for (...;...;...)
+- {
+ swap(...);
+- }
+|
+ while (...)
+- {
+ swap(...);
+- }
+|
+ if (...)
+- {
+ swap(...);
+- }
+)
+
+
+@script:python depends on report@
+p << r.p;
+@@
+
+coccilib.report.print_report(p[0], "WARNING opportunity for swap()")
+
+@script:python depends on org@
+p << r.p;
+@@
+
+coccilib.org.print_todo(p[0], "WARNING opportunity for swap()")
+
+@script:python depends on report@
+p << rvar.p;
+@@
+
+coccilib.report.print_report(p[0], "WARNING opportunity for swap()")
+
+@script:python depends on org@
+p << rvar.p;
+@@
+
+coccilib.org.print_todo(p[0], "WARNING opportunity for swap()")
diff --git a/scripts/coccinelle/misc/uninitialized_var.cocci b/scripts/coccinelle/misc/uninitialized_var.cocci
index 8fa845cefe11..69bbaae47e73 100644
--- a/scripts/coccinelle/misc/uninitialized_var.cocci
+++ b/scripts/coccinelle/misc/uninitialized_var.cocci
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
///
/// Please, don't reintroduce uninitialized_var().
-/// From Documentation/process/deprecated.rst:
+///
+/// From Documentation/process/deprecated.rst,
+/// commit 4b19bec97c88 ("docs: deprecated.rst: Add uninitialized_var()"):
/// For any compiler warnings about uninitialized variables, just add
/// an initializer. Using warning-silencing tricks is dangerous as it
/// papers over real bugs (or can in the future), and suppresses unrelated
@@ -11,6 +13,11 @@
/// obviously redundant, the compiler's dead-store elimination pass will make
/// sure there are no needless variable writes.
///
+/// Later, commit 3942ea7a10c9 ("deprecated.rst: Remove now removed
+/// uninitialized_var") removed this section because all initializations of
+/// this kind were cleaned-up from the kernel. This cocci rule checks that
+/// the macro is not explicitly or implicitly reintroduced.
+///
// Confidence: High
// Copyright: (C) 2020 Denis Efremov ISPRAS
// Options: --no-includes --include-headers
@@ -40,12 +47,10 @@ position p;
p << r.p;
@@
-coccilib.report.print_report(p[0],
- "WARNING this kind of initialization is deprecated (https://www.kernel.org/doc/html/latest/process/deprecated.html#uninitialized-var)")
+coccilib.report.print_report(p[0], "WARNING this kind of initialization is deprecated")
@script:python depends on org@
p << r.p;
@@
-coccilib.org.print_todo(p[0],
- "WARNING this kind of initialization is deprecated (https://www.kernel.org/doc/html/latest/process/deprecated.html#uninitialized-var)")
+coccilib.org.print_todo(p[0], "WARNING this kind of initialization is deprecated")
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 90398347e366..5fbad61fe490 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -3,11 +3,10 @@
# (c) 2014, Sasha Levin <sasha.levin@oracle.com>
#set -x
-if [[ $# < 1 ]]; then
+usage() {
echo "Usage:"
- echo " $0 -r <release> | <vmlinux> [base path] [modules path]"
- exit 1
-fi
+ echo " $0 -r <release> | <vmlinux> [<base path>|auto] [<modules path>]"
+}
if [[ $1 == "-r" ]] ; then
vmlinux=""
@@ -24,6 +23,7 @@ if [[ $1 == "-r" ]] ; then
if [[ $vmlinux == "" ]] ; then
echo "ERROR! vmlinux image for release $release is not found" >&2
+ usage
exit 2
fi
else
@@ -31,12 +31,35 @@ else
basepath=${2-auto}
modpath=$3
release=""
+ debuginfod=
+
+ # Can we use debuginfod-find?
+ if type debuginfod-find >/dev/null 2>&1 ; then
+ debuginfod=${1-only}
+ fi
+
+ if [[ $vmlinux == "" && -z $debuginfod ]] ; then
+ echo "ERROR! vmlinux image must be specified" >&2
+ usage
+ exit 1
+ fi
fi
declare -A cache
declare -A modcache
find_module() {
+ if [[ -n $debuginfod ]] ; then
+ if [[ -n $modbuildid ]] ; then
+ debuginfod-find debuginfo $modbuildid && return
+ fi
+
+ # Only using debuginfod so don't try to find vmlinux module path
+ if [[ $debuginfod == "only" ]] ; then
+ return
+ fi
+ fi
+
if [[ "$modpath" != "" ]] ; then
for fn in $(find "$modpath" -name "${module//_/[-_]}.ko*") ; do
if readelf -WS "$fn" | grep -qwF .debug_line ; then
@@ -51,7 +74,7 @@ find_module() {
find_module && return
if [[ $release == "" ]] ; then
- release=$(gdb -ex 'print init_uts_ns.name.release' -ex 'quit' -quiet -batch "$vmlinux" | sed -n 's/\$1 = "\(.*\)".*/\1/p')
+ release=$(gdb -ex 'print init_uts_ns.name.release' -ex 'quit' -quiet -batch "$vmlinux" 2>/dev/null | sed -n 's/\$1 = "\(.*\)".*/\1/p')
fi
for dn in {/usr/lib/debug,}/lib/modules/$release ; do
@@ -105,7 +128,7 @@ parse_symbol() {
if [[ "${cache[$module,$name]+isset}" == "isset" ]]; then
local base_addr=${cache[$module,$name]}
else
- local base_addr=$(nm "$objfile" | awk '$3 == "'$name'" && ($2 == "t" || $2 == "T") {print $1; exit}')
+ local base_addr=$(nm "$objfile" 2>/dev/null | awk '$3 == "'$name'" && ($2 == "t" || $2 == "T") {print $1; exit}')
if [[ $base_addr == "" ]] ; then
# address not found
return
@@ -129,7 +152,7 @@ parse_symbol() {
if [[ "${cache[$module,$address]+isset}" == "isset" ]]; then
local code=${cache[$module,$address]}
else
- local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address")
+ local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address" 2>/dev/null)
cache[$module,$address]=$code
fi
@@ -150,6 +173,27 @@ parse_symbol() {
symbol="$segment$name ($code)"
}
+debuginfod_get_vmlinux() {
+ local vmlinux_buildid=${1##* }
+
+ if [[ $vmlinux != "" ]]; then
+ return
+ fi
+
+ if [[ $vmlinux_buildid =~ ^[0-9a-f]+ ]]; then
+ vmlinux=$(debuginfod-find debuginfo $vmlinux_buildid)
+ if [[ $? -ne 0 ]] ; then
+ echo "ERROR! vmlinux image not found via debuginfod-find" >&2
+ usage
+ exit 2
+ fi
+ return
+ fi
+ echo "ERROR! Build ID for vmlinux not found. Try passing -r or specifying vmlinux" >&2
+ usage
+ exit 2
+}
+
decode_code() {
local scripts=`dirname "${BASH_SOURCE[0]}"`
@@ -157,6 +201,14 @@ decode_code() {
}
handle_line() {
+ if [[ $basepath == "auto" && $vmlinux != "" ]] ; then
+ module=""
+ symbol="kernel_init+0x0/0x0"
+ parse_symbol
+ basepath=${symbol#kernel_init (}
+ basepath=${basepath%/init/main.c:*)}
+ fi
+
local words
# Tokenize
@@ -182,16 +234,28 @@ handle_line() {
fi
done
+ if [[ ${words[$last]} =~ ^[0-9a-f]+\] ]]; then
+ words[$last-1]="${words[$last-1]} ${words[$last]}"
+ unset words[$last]
+ last=$(( $last - 1 ))
+ fi
+
if [[ ${words[$last]} =~ \[([^]]+)\] ]]; then
module=${words[$last]}
module=${module#\[}
module=${module%\]}
+ modbuildid=${module#* }
+ module=${module% *}
+ if [[ $modbuildid == $module ]]; then
+ modbuildid=
+ fi
symbol=${words[$last-1]}
unset words[$last-1]
else
# The symbol is the last element, process it
symbol=${words[$last]}
module=
+ modbuildid=
fi
unset words[$last]
@@ -201,14 +265,6 @@ handle_line() {
echo "${words[@]}" "$symbol $module"
}
-if [[ $basepath == "auto" ]] ; then
- module=""
- symbol="kernel_init+0x0/0x0"
- parse_symbol
- basepath=${symbol#kernel_init (}
- basepath=${basepath%/init/main.c:*)}
-fi
-
while read line; do
# Let's see if we have an address in the line
if [[ $line =~ \[\<([^]]+)\>\] ]] ||
@@ -218,6 +274,9 @@ while read line; do
# Is it a code line?
elif [[ $line == *Code:* ]]; then
decode_code "$line"
+ # Is it a version line?
+ elif [[ -n $debuginfod && $line =~ PID:\ [0-9]+\ Comm: ]]; then
+ debuginfod_get_vmlinux "$line"
else
# Nothing special in this line, show it as is
echo "$line"
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index bfa1ea8f5f98..5d84b44a2a2a 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -678,7 +678,7 @@ static void check_conf(struct menu *menu)
check_conf(child);
}
-static struct option long_opts[] = {
+static const struct option long_opts[] = {
{"help", no_argument, NULL, 'h'},
{"silent", no_argument, NULL, 's'},
{"oldaskconfig", no_argument, &input_mode_opt, oldaskconfig},
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 475faa15854e..36ef7b37fc5d 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -38,9 +38,7 @@ LDFLAGS_vmlinux="$3"
# Will be supressed by "make -s"
info()
{
- if [ "${quiet}" != "silent_" ]; then
- printf " %-7s %s\n" "${1}" "${2}"
- fi
+ printf " %-7s %s\n" "${1}" "${2}"
}
# Generate a linker script to ensure correct ordering of initcalls.
diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh
index d22cf91212b0..319f92104f56 100755
--- a/scripts/min-tool-version.sh
+++ b/scripts/min-tool-version.sh
@@ -30,7 +30,12 @@ icc)
echo 16.0.3
;;
llvm)
- echo 10.0.1
+ # https://lore.kernel.org/r/YMtib5hKVyNknZt3@osiris/
+ if [ "$SRCARCH" = s390 ]; then
+ echo 13.0.0
+ else
+ echo 10.0.1
+ fi
;;
*)
echo "$1: unknown tool" >&2
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index 4ae735039daf..6a2a04d92f42 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -9,8 +9,6 @@ PREEMPT_RT=$5
CC_VERSION="$6"
LD=$7
-vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
-
# Do not expand names
set -f
@@ -70,19 +68,27 @@ UTS_VERSION="$(echo $UTS_VERSION $CONFIG_FLAGS $TIMESTAMP | cut -b -$UTS_LEN)"
# Only replace the real compile.h if the new one is different,
# in order to preserve the timestamp and avoid unnecessary
# recompilations.
-# We don't consider the file changed if only the date/time changed.
+# We don't consider the file changed if only the date/time changed,
+# unless KBUILD_BUILD_TIMESTAMP was explicitly set (e.g. for
+# reproducible builds with that value referring to a commit timestamp).
# A kernel config change will increase the generation number, thus
# causing compile.h to be updated (including date/time) due to the
# changed comment in the
# first line.
+if [ -z "$KBUILD_BUILD_TIMESTAMP" ]; then
+ IGNORE_PATTERN="UTS_VERSION"
+else
+ IGNORE_PATTERN="NOT_A_PATTERN_TO_BE_MATCHED"
+fi
+
if [ -r $TARGET ] && \
- grep -v 'UTS_VERSION' $TARGET > .tmpver.1 && \
- grep -v 'UTS_VERSION' .tmpcompile > .tmpver.2 && \
+ grep -v $IGNORE_PATTERN $TARGET > .tmpver.1 && \
+ grep -v $IGNORE_PATTERN .tmpcompile > .tmpver.2 && \
cmp -s .tmpver.1 .tmpver.2; then
rm -f .tmpcompile
else
- vecho " UPD $TARGET"
+ echo " UPD $TARGET"
mv -f .tmpcompile $TARGET
fi
rm -f .tmpver.1 .tmpver.2
diff --git a/scripts/mkmakefile b/scripts/mkmakefile
deleted file mode 100755
index 1cb174751429..000000000000
--- a/scripts/mkmakefile
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Generates a small Makefile used in the root of the output
-# directory, to allow make to be started from there.
-# The Makefile also allow for more convinient build of external modules
-
-# Usage
-# $1 - Kernel src directory
-
-if [ "${quiet}" != "silent_" ]; then
- echo " GEN Makefile"
-fi
-
-cat << EOF > Makefile
-# Automatically generated by $0: don't edit
-include $1/Makefile
-EOF
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 3e623ccc020b..270a7df898e2 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -660,8 +660,11 @@ static void handle_modversion(const struct module *mod,
unsigned int crc;
if (sym->st_shndx == SHN_UNDEF) {
- warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n",
- symname, mod->name, mod->is_vmlinux ? "" : ".ko");
+ warn("EXPORT symbol \"%s\" [%s%s] version ...\n"
+ "Is \"%s\" prototyped in <asm/asm-prototypes.h>?\n",
+ symname, mod->name, mod->is_vmlinux ? "" : ".ko",
+ symname);
+
return;
}
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index c17e48020ec3..8f6b13ae46bf 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -173,39 +173,6 @@ my $mcount_regex; # Find the call site to mcount (return offset)
my $mcount_adjust; # Address adjustment to mcount offset
my $alignment; # The .align value to use for $mcount_section
my $section_type; # Section header plus possible alignment command
-my $can_use_local = 0; # If we can use local function references
-
-# Shut up recordmcount if user has older objcopy
-my $quiet_recordmcount = ".tmp_quiet_recordmcount";
-my $print_warning = 1;
-$print_warning = 0 if ( -f $quiet_recordmcount);
-
-##
-# check_objcopy - whether objcopy supports --globalize-symbols
-#
-# --globalize-symbols came out in 2.17, we must test the version
-# of objcopy, and if it is less than 2.17, then we can not
-# record local functions.
-sub check_objcopy
-{
- open (IN, "$objcopy --version |") or die "error running $objcopy";
- while (<IN>) {
- if (/objcopy.*\s(\d+)\.(\d+)/) {
- $can_use_local = 1 if ($1 > 2 || ($1 == 2 && $2 >= 17));
- last;
- }
- }
- close (IN);
-
- if (!$can_use_local && $print_warning) {
- print STDERR "WARNING: could not find objcopy version or version " .
- "is less than 2.17.\n" .
- "\tLocal function references are disabled.\n";
- open (QUIET, ">$quiet_recordmcount");
- printf QUIET "Disables the warning from recordmcount.pl\n";
- close QUIET;
- }
-}
if ($arch =~ /(x86(_64)?)|(i386)/) {
if ($bits == 64) {
@@ -434,8 +401,6 @@ if ($filename =~ m,^(.*)(\.\S),) {
my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s";
my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o";
-check_objcopy();
-
#
# Step 1: find all the local (static functions) and weak symbols.
# 't' is local, 'w/W' is weak
@@ -473,11 +438,6 @@ sub update_funcs
# is this function static? If so, note this fact.
if (defined $locals{$ref_func}) {
-
- # only use locals if objcopy supports globalize-symbols
- if (!$can_use_local) {
- return;
- }
$convert{$ref_func} = 1;
}
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index db941f6d9591..6b54e46a0f12 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -59,32 +59,19 @@ scm_version()
fi
# If we are past a tagged commit (like
# "v2.6.30-rc5-302-g72357d5"), we pretty print it.
- #
- # Ensure the abbreviated sha1 has exactly 12
- # hex characters, to make the output
- # independent of git version, local
- # core.abbrev settings and/or total number of
- # objects in the current repository - passing
- # --abbrev=12 ensures a minimum of 12, and the
- # awk substr() then picks the 'g' and first 12
- # hex chars.
- if atag="$(git describe --abbrev=12 2>/dev/null)"; then
- echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),substr($(NF),0,13))}'
-
- # If we don't have a tag at all we print -g{commitish},
- # again using exactly 12 hex chars.
- else
- head="$(echo $head | cut -c1-12)"
- printf '%s%s' -g $head
+ if atag="$(git describe 2>/dev/null)"; then
+ echo "$atag" | awk -F- '{printf("-%05d", $(NF-1))}'
fi
- fi
- # Is this git on svn?
- if git config --get svn-remote.svn.url >/dev/null; then
- printf -- '-svn%s' "$(git svn find-rev $head)"
+ # Add -g and exactly 12 hex chars.
+ printf '%s%s' -g "$(echo $head | cut -c1-12)"
fi
# Check for uncommitted changes.
+ # This script must avoid any write attempt to the source tree,
+ # which might be read-only.
+ # You cannot use 'git describe --dirty' because it tries to
+ # create .git/index.lock .
# First, with git-status, but --no-optional-locks is only
# supported in git >= 2.14, so fall back to git-diff-index if
# it fails. Note that git-diff-index does not refresh the
@@ -93,45 +80,9 @@ scm_version()
if {
git --no-optional-locks status -uno --porcelain 2>/dev/null ||
git diff-index --name-only HEAD
- } | grep -qvE '^(.. )?scripts/package'; then
+ } | read dummy; then
printf '%s' -dirty
fi
-
- # All done with git
- return
- fi
-
- # Check for mercurial and a mercurial repo.
- if test -d .hg && hgid=$(hg id 2>/dev/null); then
- # Do we have an tagged version? If so, latesttagdistance == 1
- if [ "$(hg log -r . --template '{latesttagdistance}')" = "1" ]; then
- id=$(hg log -r . --template '{latesttag}')
- printf '%s%s' -hg "$id"
- else
- tag=$(printf '%s' "$hgid" | cut -d' ' -f2)
- if [ -z "$tag" -o "$tag" = tip ]; then
- id=$(printf '%s' "$hgid" | sed 's/[+ ].*//')
- printf '%s%s' -hg "$id"
- fi
- fi
-
- # Are there uncommitted changes?
- # These are represented by + after the changeset id.
- case "$hgid" in
- *+|*+\ *) printf '%s' -dirty ;;
- esac
-
- # All done with mercurial
- return
- fi
-
- # Check for svn and a svn repo.
- if rev=$(LC_ALL=C svn info 2>/dev/null | grep '^Last Changed Rev'); then
- rev=$(echo $rev | awk '{print $NF}')
- printf -- '-svn%s' "$rev"
-
- # All done with svn
- return
fi
}
@@ -180,15 +131,16 @@ res="${res}${CONFIG_LOCALVERSION}${LOCALVERSION}"
if test "$CONFIG_LOCALVERSION_AUTO" = "y"; then
# full scm version string
res="$res$(scm_version)"
-else
- # append a plus sign if the repository is not in a clean
- # annotated or signed tagged state (as git describe only
- # looks at signed or annotated tags - git tag -a/-s) and
- # LOCALVERSION= is not specified
- if test "${LOCALVERSION+set}" != "set"; then
- scm=$(scm_version --short)
- res="$res${scm:++}"
- fi
+elif [ "${LOCALVERSION+set}" != "set" ]; then
+ # If the variable LOCALVERSION is not set, append a plus
+ # sign if the repository is not in a clean annotated or
+ # signed tagged state (as git describe only looks at signed
+ # or annotated tags - git tag -a/-s).
+ #
+ # If the variable LOCALVERSION is set (including being set
+ # to an empty string), we don't want to append a plus sign.
+ scm=$(scm_version --short)
+ res="$res${scm:++}"
fi
echo "$res"
diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py
index 3e784cf9f401..ebd06ae642c9 100755
--- a/scripts/spdxcheck.py
+++ b/scripts/spdxcheck.py
@@ -44,7 +44,7 @@ def read_spdxdata(repo):
continue
exception = None
- for l in open(el.path).readlines():
+ for l in open(el.path, encoding="utf-8").readlines():
if l.startswith('Valid-License-Identifier:'):
lid = l.split(':')[1].strip().upper()
if lid in spdx.licenses:
diff --git a/scripts/syscallhdr.sh b/scripts/syscallhdr.sh
index 848ac2735115..22e34cd46b9b 100755
--- a/scripts/syscallhdr.sh
+++ b/scripts/syscallhdr.sh
@@ -69,7 +69,7 @@ guard=_UAPI_ASM_$(basename "$outfile" |
sed -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
-e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g')
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+$abis" "$infile" | sort -n | {
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+$abis" "$infile" | {
echo "#ifndef $guard"
echo "#define $guard"
echo
diff --git a/scripts/syscallnr.sh b/scripts/syscallnr.sh
new file mode 100644
index 000000000000..3aa29e0dcc52
--- /dev/null
+++ b/scripts/syscallnr.sh
@@ -0,0 +1,74 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Generate a syscall number header.
+#
+# Each line of the syscall table should have the following format:
+#
+# NR ABI NAME [NATIVE] [COMPAT]
+#
+# NR syscall number
+# ABI ABI name
+# NAME syscall name
+# NATIVE native entry point (optional)
+# COMPAT compat entry point (optional)
+set -e
+
+usage() {
+ echo >&2 "usage: $0 [--abis ABIS] [--prefix PREFIX] INFILE OUTFILE" >&2
+ echo >&2
+ echo >&2 " INFILE input syscall table"
+ echo >&2 " OUTFILE output header file"
+ echo >&2
+ echo >&2 "options:"
+ echo >&2 " --abis ABIS ABI(s) to handle (By default, all lines are handled)"
+ echo >&2 " --prefix PREFIX The prefix to the macro like __NR_<PREFIX><NAME>"
+ exit 1
+}
+
+# default unless specified by options
+abis=
+prefix=
+
+while [ $# -gt 0 ]
+do
+ case $1 in
+ --abis)
+ abis=$(echo "($2)" | tr ',' '|')
+ shift 2;;
+ --prefix)
+ prefix=$2
+ shift 2;;
+ -*)
+ echo "$1: unknown option" >&2
+ usage;;
+ *)
+ break;;
+ esac
+done
+
+if [ $# -ne 2 ]; then
+ usage
+fi
+
+infile="$1"
+outfile="$2"
+
+guard=_ASM_$(basename "$outfile" |
+ sed -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g')
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+$abis" "$infile" | sort -n | {
+ echo "#ifndef $guard"
+ echo "#define $guard"
+ echo
+
+ max=0
+ while read nr abi name native compat ; do
+ max=$nr
+ done
+
+ echo "#define __NR_${prefix}syscalls $(($max + 1))"
+ echo
+ echo "#endif /* $guard */"
+} > "$outfile"
diff --git a/scripts/syscalltbl.sh b/scripts/syscalltbl.sh
index aa6ab156301c..6abe143889ef 100755
--- a/scripts/syscalltbl.sh
+++ b/scripts/syscalltbl.sh
@@ -52,10 +52,15 @@ outfile="$2"
nxt=0
-grep -E "^[0-9]+[[:space:]]+$abis" "$infile" | sort -n | {
+grep -E "^[0-9]+[[:space:]]+$abis" "$infile" | {
while read nr abi name native compat ; do
+ if [ $nxt -gt $nr ]; then
+ echo "error: $infile: syscall table is not sorted or duplicates the same syscall number" >&2
+ exit 1
+ fi
+
while [ $nxt -lt $nr ]; do
echo "__SYSCALL($nxt, sys_ni_syscall)"
nxt=$((nxt + 1))
diff --git a/scripts/tracing/draw_functrace.py b/scripts/tracing/draw_functrace.py
index 74f8aadfd4cb..7011fbe003ff 100755
--- a/scripts/tracing/draw_functrace.py
+++ b/scripts/tracing/draw_functrace.py
@@ -17,7 +17,7 @@ Usage:
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
- $ scripts/draw_functrace.py < raw_trace_func > draw_functrace
+ $ scripts/tracing/draw_functrace.py < ~/raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
@@ -103,10 +103,10 @@ def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
- m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
+ m = re.match("[^]]+?\\] +([a-z.]+) +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
- return (m.group(1), m.group(2), m.group(3))
+ return (m.group(2), m.group(3), m.group(4))
def main():
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index dc345ac93205..4e1f96b216a8 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -304,7 +304,7 @@ static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
if (!inbounds(e, sizeof(u8)))
goto fail;
if (data)
- *data = get_unaligned((u8 *)e->pos);
+ *data = *((u8 *)e->pos);
e->pos += sizeof(u8);
return true;
}
diff --git a/security/security.c b/security/security.c
index 09533cbb7221..9ffa9e9c5c55 100644
--- a/security/security.c
+++ b/security/security.c
@@ -58,10 +58,11 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
[LOCKDOWN_MMIOTRACE] = "unsafe mmio",
[LOCKDOWN_DEBUGFS] = "debugfs access",
[LOCKDOWN_XMON_WR] = "xmon write access",
+ [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
[LOCKDOWN_INTEGRITY_MAX] = "integrity",
[LOCKDOWN_KCORE] = "/proc/kcore access",
[LOCKDOWN_KPROBES] = "use of kprobes",
- [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
+ [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM",
[LOCKDOWN_PERF] = "unsafe use of perf",
[LOCKDOWN_TRACEFS] = "use of tracefs",
[LOCKDOWN_XMON_RW] = "xmon read and write access",
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index defc5ef35c66..0ae1b718194a 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -874,7 +874,7 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
rc = sidtab_init(s);
if (rc) {
pr_err("SELinux: out of memory on SID table init\n");
- goto out;
+ return rc;
}
head = p->ocontexts[OCON_ISID];
@@ -885,7 +885,7 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
if (sid == SECSID_NULL) {
pr_err("SELinux: SID 0 was assigned a context.\n");
sidtab_destroy(s);
- goto out;
+ return -EINVAL;
}
/* Ignore initial SIDs unused by this kernel. */
@@ -897,12 +897,10 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
pr_err("SELinux: unable to load initial SID %s.\n",
name);
sidtab_destroy(s);
- goto out;
+ return rc;
}
}
- rc = 0;
-out:
- return rc;
+ return 0;
}
int policydb_class_isvalid(struct policydb *p, unsigned int class)
diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
index d9077e91382b..6ddf646cda65 100644
--- a/sound/ac97/bus.c
+++ b/sound/ac97/bus.c
@@ -520,7 +520,7 @@ static int ac97_bus_remove(struct device *dev)
struct ac97_codec_driver *adrv = to_ac97_driver(dev->driver);
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
diff --git a/sound/aoa/soundbus/i2sbus/pcm.c b/sound/aoa/soundbus/i2sbus/pcm.c
index 1c8e8131a716..a9e502a6cdeb 100644
--- a/sound/aoa/soundbus/i2sbus/pcm.c
+++ b/sound/aoa/soundbus/i2sbus/pcm.c
@@ -918,10 +918,8 @@ i2sbus_attach_codec(struct soundbus_dev *dev, struct snd_card *card,
}
cii = kzalloc(sizeof(struct codec_info_item), GFP_KERNEL);
- if (!cii) {
- printk(KERN_DEBUG "i2sbus: failed to allocate cii\n");
+ if (!cii)
return -ENOMEM;
- }
/* use the private data to point to the codec info */
cii->sdev = soundbus_dev_get(dev);
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 6322e6392594..a67e6685b00c 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -47,9 +47,7 @@ static unsigned short pxa2xx_ac97_legacy_read(struct snd_ac97 *ac97,
static void pxa2xx_ac97_legacy_write(struct snd_ac97 *ac97,
unsigned short reg, unsigned short val)
{
- int __always_unused ret;
-
- ret = pxa2xx_ac97_write(ac97->num, reg, val);
+ pxa2xx_ac97_write(ac97->num, reg, val);
}
static const struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
diff --git a/sound/core/control.c b/sound/core/control.c
index 498e3701514a..a25c0d64d104 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -995,7 +995,10 @@ static int __snd_ctl_elem_info(struct snd_card *card,
#ifdef CONFIG_SND_DEBUG
info->access = 0;
#endif
- result = kctl->info(kctl, info);
+ result = snd_power_ref_and_wait(card);
+ if (!result)
+ result = kctl->info(kctl, info);
+ snd_power_unref(card);
if (result >= 0) {
snd_BUG_ON(info->access);
index_offset = snd_ctl_get_ioff(kctl, &info->id);
@@ -1042,9 +1045,6 @@ static int snd_ctl_elem_info_user(struct snd_ctl_file *ctl,
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
- result = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0);
- if (result < 0)
- return result;
result = snd_ctl_elem_info(ctl, &info);
if (result < 0)
return result;
@@ -1088,7 +1088,10 @@ static int snd_ctl_elem_read(struct snd_card *card,
if (!snd_ctl_skip_validation(&info))
fill_remaining_elem_value(control, &info, pattern);
- ret = kctl->get(kctl, control);
+ ret = snd_power_ref_and_wait(card);
+ if (!ret)
+ ret = kctl->get(kctl, control);
+ snd_power_unref(card);
if (ret < 0)
return ret;
if (!snd_ctl_skip_validation(&info) &&
@@ -1113,10 +1116,6 @@ static int snd_ctl_elem_read_user(struct snd_card *card,
if (IS_ERR(control))
return PTR_ERR(control);
- result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
- if (result < 0)
- goto error;
-
down_read(&card->controls_rwsem);
result = snd_ctl_elem_read(card, control);
up_read(&card->controls_rwsem);
@@ -1154,7 +1153,10 @@ static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
}
snd_ctl_build_ioff(&control->id, kctl, index_offset);
- result = kctl->put(kctl, control);
+ result = snd_power_ref_and_wait(card);
+ if (!result)
+ result = kctl->put(kctl, control);
+ snd_power_unref(card);
if (result < 0) {
up_write(&card->controls_rwsem);
return result;
@@ -1183,10 +1185,6 @@ static int snd_ctl_elem_write_user(struct snd_ctl_file *file,
return PTR_ERR(control);
card = file->card;
- result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
- if (result < 0)
- goto error;
-
result = snd_ctl_elem_write(card, file, control);
if (result < 0)
goto error;
@@ -1669,7 +1667,7 @@ static int call_tlv_handler(struct snd_ctl_file *file, int op_flag,
{SNDRV_CTL_TLV_OP_CMD, SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND},
};
struct snd_kcontrol_volatile *vd = &kctl->vd[snd_ctl_get_ioff(kctl, id)];
- int i;
+ int i, ret;
/* Check support of the request for this element. */
for (i = 0; i < ARRAY_SIZE(pairs); ++i) {
@@ -1687,7 +1685,11 @@ static int call_tlv_handler(struct snd_ctl_file *file, int op_flag,
vd->owner != NULL && vd->owner != file)
return -EPERM;
- return kctl->tlv.c(kctl, op_flag, size, buf);
+ ret = snd_power_ref_and_wait(file->card);
+ if (!ret)
+ ret = kctl->tlv.c(kctl, op_flag, size, buf);
+ snd_power_unref(file->card);
+ return ret;
}
static int read_tlv_buf(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id,
@@ -1815,11 +1817,7 @@ static long snd_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg
case SNDRV_CTL_IOCTL_POWER:
return -ENOPROTOOPT;
case SNDRV_CTL_IOCTL_POWER_STATE:
-#ifdef CONFIG_PM
- return put_user(card->power_state, ip) ? -EFAULT : 0;
-#else
return put_user(SNDRV_CTL_POWER_D0, ip) ? -EFAULT : 0;
-#endif
}
down_read(&snd_ioctl_rwsem);
list_for_each_entry(p, &snd_control_ioctls, list) {
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index 1d708aab9c98..470dabc60aa0 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -96,9 +96,6 @@ static int snd_ctl_elem_info_compat(struct snd_ctl_file *ctl,
if (get_user(data->value.enumerated.item, &data32->value.enumerated.item))
goto error;
- err = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0);
- if (err < 0)
- goto error;
err = snd_ctl_elem_info(ctl, data);
if (err < 0)
goto error;
@@ -187,7 +184,10 @@ static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
return -ENOMEM;
}
info->id = *id;
- err = kctl->info(kctl, info);
+ err = snd_power_ref_and_wait(card);
+ if (!err)
+ err = kctl->info(kctl, info);
+ snd_power_unref(card);
up_read(&card->controls_rwsem);
if (err >= 0) {
err = info->type;
@@ -298,9 +298,6 @@ static int ctl_elem_read_user(struct snd_card *card,
if (err < 0)
goto error;
- err = snd_power_wait(card, SNDRV_CTL_POWER_D0);
- if (err < 0)
- goto error;
err = snd_ctl_elem_read(card, data);
if (err < 0)
goto error;
@@ -326,9 +323,6 @@ static int ctl_elem_write_user(struct snd_ctl_file *file,
if (err < 0)
goto error;
- err = snd_power_wait(card, SNDRV_CTL_POWER_D0);
- if (err < 0)
- goto error;
err = snd_ctl_elem_write(card, file, data);
if (err < 0)
goto error;
diff --git a/sound/core/control_led.c b/sound/core/control_led.c
index a90e31dbde61..764058cc345d 100644
--- a/sound/core/control_led.c
+++ b/sound/core/control_led.c
@@ -393,11 +393,11 @@ static void snd_ctl_led_dev_release(struct device *dev)
* sysfs
*/
-static ssize_t show_mode(struct device *dev,
+static ssize_t mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct snd_ctl_led *led = container_of(dev, struct snd_ctl_led, dev);
- const char *str;
+ const char *str = NULL;
switch (led->mode) {
case MODE_FOLLOW_MUTE: str = "follow-mute"; break;
@@ -408,7 +408,8 @@ static ssize_t show_mode(struct device *dev,
return sprintf(buf, "%s\n", str);
}
-static ssize_t store_mode(struct device *dev, struct device_attribute *attr,
+static ssize_t mode_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
struct snd_ctl_led *led = container_of(dev, struct snd_ctl_led, dev);
@@ -437,7 +438,7 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_brightness(struct device *dev,
+static ssize_t brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct snd_ctl_led *led = container_of(dev, struct snd_ctl_led, dev);
@@ -445,8 +446,8 @@ static ssize_t show_brightness(struct device *dev,
return sprintf(buf, "%u\n", ledtrig_audio_get(led->trigger_type));
}
-static DEVICE_ATTR(mode, 0644, show_mode, store_mode);
-static DEVICE_ATTR(brightness, 0444, show_brightness, NULL);
+static DEVICE_ATTR_RW(mode);
+static DEVICE_ATTR_RO(brightness);
static struct attribute *snd_ctl_led_dev_attrs[] = {
&dev_attr_mode.attr,
@@ -580,22 +581,25 @@ static ssize_t set_led_id(struct snd_ctl_led_card *led_card, const char *buf, si
return count;
}
-static ssize_t parse_attach(struct device *dev, struct device_attribute *attr,
+static ssize_t attach_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
struct snd_ctl_led_card *led_card = container_of(dev, struct snd_ctl_led_card, dev);
return set_led_id(led_card, buf, count, true);
}
-static ssize_t parse_detach(struct device *dev, struct device_attribute *attr,
+static ssize_t detach_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
struct snd_ctl_led_card *led_card = container_of(dev, struct snd_ctl_led_card, dev);
return set_led_id(led_card, buf, count, false);
}
-static ssize_t ctl_reset(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct snd_ctl_led_card *led_card = container_of(dev, struct snd_ctl_led_card, dev);
int err;
@@ -608,8 +612,8 @@ static ssize_t ctl_reset(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t ctl_list(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t list_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct snd_ctl_led_card *led_card = container_of(dev, struct snd_ctl_led_card, dev);
struct snd_card *card;
@@ -642,10 +646,10 @@ static ssize_t ctl_list(struct device *dev,
return buf2 - buf;
}
-static DEVICE_ATTR(attach, 0200, NULL, parse_attach);
-static DEVICE_ATTR(detach, 0200, NULL, parse_detach);
-static DEVICE_ATTR(reset, 0200, NULL, ctl_reset);
-static DEVICE_ATTR(list, 0444, ctl_list, NULL);
+static DEVICE_ATTR_WO(attach);
+static DEVICE_ATTR_WO(detach);
+static DEVICE_ATTR_WO(reset);
+static DEVICE_ATTR_RO(list);
static struct attribute *snd_ctl_led_card_attrs[] = {
&dev_attr_attach.attr,
diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c
index 264b8ea64bc2..e95fa275c289 100644
--- a/sound/core/hwdep.c
+++ b/sound/core/hwdep.c
@@ -195,7 +195,8 @@ static int snd_hwdep_dsp_status(struct snd_hwdep *hw,
return -ENXIO;
memset(&info, 0, sizeof(info));
info.dsp_loaded = hw->dsp_loaded;
- if ((err = hw->ops.dsp_status(hw, &info)) < 0)
+ err = hw->ops.dsp_status(hw, &info);
+ if (err < 0)
return err;
if (copy_to_user(_info, &info, sizeof(info)))
return -EFAULT;
@@ -500,7 +501,8 @@ static void __init snd_hwdep_proc_init(void)
{
struct snd_info_entry *entry;
- if ((entry = snd_info_create_module_entry(THIS_MODULE, "hwdep", NULL)) != NULL) {
+ entry = snd_info_create_module_entry(THIS_MODULE, "hwdep", NULL);
+ if (entry) {
entry->c.text.read = snd_hwdep_proc_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
diff --git a/sound/core/info_oss.c b/sound/core/info_oss.c
index 83900485dd8c..1ba887c7954e 100644
--- a/sound/core/info_oss.c
+++ b/sound/core/info_oss.c
@@ -31,7 +31,8 @@ int snd_oss_info_register(int dev, int num, char *string)
return -ENXIO;
mutex_lock(&strings);
if (string == NULL) {
- if ((x = snd_sndstat_strings[num][dev]) != NULL) {
+ x = snd_sndstat_strings[num][dev];
+ if (x) {
kfree(x);
x = NULL;
}
diff --git a/sound/core/init.c b/sound/core/init.c
index ef41f5b3a240..1490568efdb0 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -220,6 +220,8 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
mutex_init(&card->memory_mutex);
#ifdef CONFIG_PM
init_waitqueue_head(&card->power_sleep);
+ init_waitqueue_head(&card->power_ref_sleep);
+ atomic_set(&card->power_ref, 0);
#endif
init_waitqueue_head(&card->remove_sleep);
card->sync_irq = -1;
@@ -442,6 +444,7 @@ int snd_card_disconnect(struct snd_card *card)
#ifdef CONFIG_PM
wake_up(&card->power_sleep);
+ snd_power_sync_ref(card);
#endif
return 0;
}
@@ -662,17 +665,15 @@ void snd_card_set_id(struct snd_card *card, const char *nid)
}
EXPORT_SYMBOL(snd_card_set_id);
-static ssize_t
-card_id_show_attr(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct snd_card *card = container_of(dev, struct snd_card, card_dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", card->id);
}
-static ssize_t
-card_id_store_attr(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t id_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct snd_card *card = container_of(dev, struct snd_card, card_dev);
char buf1[sizeof(card->id)];
@@ -700,17 +701,16 @@ card_id_store_attr(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(id, 0644, card_id_show_attr, card_id_store_attr);
+static DEVICE_ATTR_RW(id);
-static ssize_t
-card_number_show_attr(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct snd_card *card = container_of(dev, struct snd_card, card_dev);
return scnprintf(buf, PAGE_SIZE, "%i\n", card->number);
}
-static DEVICE_ATTR(number, 0444, card_number_show_attr, NULL);
+static DEVICE_ATTR_RO(number);
static struct attribute *card_dev_attrs[] = {
&dev_attr_id.attr,
@@ -770,7 +770,8 @@ int snd_card_register(struct snd_card *card)
card->registered = true;
}
- if ((err = snd_device_register_all(card)) < 0)
+ err = snd_device_register_all(card);
+ if (err < 0)
return err;
mutex_lock(&snd_card_mutex);
if (snd_cards[card->number]) {
@@ -813,7 +814,8 @@ static void snd_card_info_read(struct snd_info_entry *entry,
for (idx = count = 0; idx < SNDRV_CARDS; idx++) {
mutex_lock(&snd_card_mutex);
- if ((card = snd_cards[idx]) != NULL) {
+ card = snd_cards[idx];
+ if (card) {
count++;
snd_iprintf(buffer, "%2i [%-15s]: %s - %s\n",
idx,
@@ -837,7 +839,8 @@ void snd_card_info_read_oss(struct snd_info_buffer *buffer)
for (idx = count = 0; idx < SNDRV_CARDS; idx++) {
mutex_lock(&snd_card_mutex);
- if ((card = snd_cards[idx]) != NULL) {
+ card = snd_cards[idx];
+ if (card) {
count++;
snd_iprintf(buffer, "%s\n", card->longname);
}
@@ -859,7 +862,8 @@ static void snd_card_module_info_read(struct snd_info_entry *entry,
for (idx = 0; idx < SNDRV_CARDS; idx++) {
mutex_lock(&snd_card_mutex);
- if ((card = snd_cards[idx]) != NULL)
+ card = snd_cards[idx];
+ if (card)
snd_iprintf(buffer, "%2i %s\n",
idx, card->module->name);
mutex_unlock(&snd_card_mutex);
@@ -1002,21 +1006,28 @@ EXPORT_SYMBOL(snd_card_file_remove);
#ifdef CONFIG_PM
/**
- * snd_power_wait - wait until the power-state is changed.
- * @card: soundcard structure
- * @power_state: expected power state
+ * snd_power_ref_and_wait - wait until the card gets powered up
+ * @card: soundcard structure
+ *
+ * Take the power_ref reference count of the given card, and
+ * wait until the card gets powered up to SNDRV_CTL_POWER_D0 state.
+ * The refcount is down again while sleeping until power-up, hence this
+ * function can be used for syncing the floating control ops accesses,
+ * typically around calling control ops.
*
- * Waits until the power-state is changed.
+ * The caller needs to pull down the refcount via snd_power_unref() later
+ * no matter whether the error is returned from this function or not.
*
- * Return: Zero if successful, or a negative error code.
+ * Return: Zero if successful, or a negative error code.
*/
-int snd_power_wait(struct snd_card *card, unsigned int power_state)
+int snd_power_ref_and_wait(struct snd_card *card)
{
wait_queue_entry_t wait;
int result = 0;
+ snd_power_ref(card);
/* fastpath */
- if (snd_power_get_state(card) == power_state)
+ if (snd_power_get_state(card) == SNDRV_CTL_POWER_D0)
return 0;
init_waitqueue_entry(&wait, current);
add_wait_queue(&card->power_sleep, &wait);
@@ -1025,13 +1036,33 @@ int snd_power_wait(struct snd_card *card, unsigned int power_state)
result = -ENODEV;
break;
}
- if (snd_power_get_state(card) == power_state)
+ if (snd_power_get_state(card) == SNDRV_CTL_POWER_D0)
break;
+ snd_power_unref(card);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(30 * HZ);
+ snd_power_ref(card);
}
remove_wait_queue(&card->power_sleep, &wait);
return result;
}
+EXPORT_SYMBOL_GPL(snd_power_ref_and_wait);
+
+/**
+ * snd_power_wait - wait until the card gets powered up (old form)
+ * @card: soundcard structure
+ *
+ * Wait until the card gets powered up to SNDRV_CTL_POWER_D0 state.
+ *
+ * Return: Zero if successful, or a negative error code.
+ */
+int snd_power_wait(struct snd_card *card)
+{
+ int ret;
+
+ ret = snd_power_ref_and_wait(card);
+ snd_power_unref(card);
+ return ret;
+}
EXPORT_SYMBOL(snd_power_wait);
#endif /* CONFIG_PM */
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 966bef5acc75..439a358ecfe9 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -15,99 +15,27 @@
#include <asm/set_memory.h>
#endif
#include <sound/memalloc.h>
+#include "memalloc_local.h"
-/*
- *
- * Bus-specific memory allocators
- *
- */
-
-#ifdef CONFIG_HAS_DMA
-/* allocate the coherent DMA pages */
-static void snd_malloc_dev_pages(struct snd_dma_buffer *dmab, size_t size)
-{
- gfp_t gfp_flags;
+static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
- gfp_flags = GFP_KERNEL
- | __GFP_COMP /* compound page lets parts be mapped */
- | __GFP_NORETRY /* don't trigger OOM-killer */
- | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
- dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
- gfp_flags);
-#ifdef CONFIG_X86
- if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
- set_memory_wc((unsigned long)dmab->area,
- PAGE_ALIGN(size) >> PAGE_SHIFT);
-#endif
-}
-
-/* free the coherent DMA pages */
-static void snd_free_dev_pages(struct snd_dma_buffer *dmab)
-{
-#ifdef CONFIG_X86
- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
- set_memory_wb((unsigned long)dmab->area,
- PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
-#endif
- dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
-}
-
-#ifdef CONFIG_GENERIC_ALLOCATOR
-/**
- * snd_malloc_dev_iram - allocate memory from on-chip internal ram
- * @dmab: buffer allocation record to store the allocated data
- * @size: number of bytes to allocate from the iram
- *
- * This function requires iram phandle provided via of_node
- */
-static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size)
+/* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
+static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
+ gfp_t default_gfp)
{
- struct device *dev = dmab->dev.dev;
- struct gen_pool *pool = NULL;
-
- dmab->area = NULL;
- dmab->addr = 0;
-
- if (dev->of_node)
- pool = of_gen_pool_get(dev->of_node, "iram", 0);
-
- if (!pool)
- return;
-
- /* Assign the pool into private_data field */
- dmab->private_data = pool;
-
- dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
- PAGE_SIZE);
+ if (!dmab->dev.dev)
+ return default_gfp;
+ else
+ return (__force gfp_t)(unsigned long)dmab->dev.dev;
}
-/**
- * snd_free_dev_iram - free allocated specific memory from on-chip internal ram
- * @dmab: buffer allocation record to store the allocated data
- */
-static void snd_free_dev_iram(struct snd_dma_buffer *dmab)
+static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
{
- struct gen_pool *pool = dmab->private_data;
-
- if (pool && dmab->area)
- gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
-}
-#endif /* CONFIG_GENERIC_ALLOCATOR */
-#endif /* CONFIG_HAS_DMA */
-
-/*
- *
- * ALSA generic memory management
- *
- */
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
-static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev,
- gfp_t default_gfp)
-{
- if (!dev)
- return default_gfp;
- else
- return (__force gfp_t)(unsigned long)dev;
+ if (WARN_ON_ONCE(!ops || !ops->alloc))
+ return -EINVAL;
+ return ops->alloc(dmab, size);
}
/**
@@ -126,7 +54,7 @@ static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev,
int snd_dma_alloc_pages(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
- gfp_t gfp;
+ int err;
if (WARN_ON(!size))
return -ENXIO;
@@ -140,43 +68,10 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
dmab->area = NULL;
dmab->addr = 0;
dmab->private_data = NULL;
- switch (type) {
- case SNDRV_DMA_TYPE_CONTINUOUS:
- gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL);
- dmab->area = alloc_pages_exact(size, gfp);
- break;
- case SNDRV_DMA_TYPE_VMALLOC:
- gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM);
- dmab->area = __vmalloc(size, gfp);
- break;
-#ifdef CONFIG_HAS_DMA
-#ifdef CONFIG_GENERIC_ALLOCATOR
- case SNDRV_DMA_TYPE_DEV_IRAM:
- snd_malloc_dev_iram(dmab, size);
- if (dmab->area)
- break;
- /* Internal memory might have limited size and no enough space,
- * so if we fail to malloc, try to fetch memory traditionally.
- */
- dmab->dev.type = SNDRV_DMA_TYPE_DEV;
- fallthrough;
-#endif /* CONFIG_GENERIC_ALLOCATOR */
- case SNDRV_DMA_TYPE_DEV:
- case SNDRV_DMA_TYPE_DEV_UC:
- snd_malloc_dev_pages(dmab, size);
- break;
-#endif
-#ifdef CONFIG_SND_DMA_SGBUF
- case SNDRV_DMA_TYPE_DEV_SG:
- case SNDRV_DMA_TYPE_DEV_UC_SG:
- snd_malloc_sgbuf_pages(device, size, dmab, NULL);
- break;
-#endif
- default:
- pr_err("snd-malloc: invalid device type %d\n", type);
- return -ENXIO;
- }
- if (! dmab->area)
+ err = __snd_dma_alloc_pages(dmab, size);
+ if (err < 0)
+ return err;
+ if (!dmab->area)
return -ENOMEM;
dmab->bytes = size;
return 0;
@@ -217,7 +112,6 @@ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
}
EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
-
/**
* snd_dma_free_pages - release the allocated buffer
* @dmab: the buffer allocation record to release
@@ -226,32 +120,288 @@ EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
*/
void snd_dma_free_pages(struct snd_dma_buffer *dmab)
{
- switch (dmab->dev.type) {
- case SNDRV_DMA_TYPE_CONTINUOUS:
- free_pages_exact(dmab->area, dmab->bytes);
- break;
- case SNDRV_DMA_TYPE_VMALLOC:
- vfree(dmab->area);
- break;
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->free)
+ ops->free(dmab);
+}
+EXPORT_SYMBOL(snd_dma_free_pages);
+
+/**
+ * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
+ * @dmab: buffer allocation information
+ * @area: VM area information
+ */
+int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->mmap)
+ return ops->mmap(dmab, area);
+ else
+ return -ENOENT;
+}
+EXPORT_SYMBOL(snd_dma_buffer_mmap);
+
+/**
+ * snd_sgbuf_get_addr - return the physical address at the corresponding offset
+ * @dmab: buffer allocation information
+ * @offset: offset in the ring buffer
+ */
+dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->get_addr)
+ return ops->get_addr(dmab, offset);
+ else
+ return dmab->addr + offset;
+}
+EXPORT_SYMBOL(snd_sgbuf_get_addr);
+
+/**
+ * snd_sgbuf_get_page - return the physical page at the corresponding offset
+ * @dmab: buffer allocation information
+ * @offset: offset in the ring buffer
+ */
+struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->get_page)
+ return ops->get_page(dmab, offset);
+ else
+ return virt_to_page(dmab->area + offset);
+}
+EXPORT_SYMBOL(snd_sgbuf_get_page);
+
+/**
+ * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
+ * on sg-buffer
+ * @dmab: buffer allocation information
+ * @ofs: offset in the ring buffer
+ * @size: the requested size
+ */
+unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size)
+{
+ const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+ if (ops && ops->get_chunk_size)
+ return ops->get_chunk_size(dmab, ofs, size);
+ else
+ return size;
+}
+EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
+
+/*
+ * Continuous pages allocator
+ */
+static int snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
+
+ dmab->area = alloc_pages_exact(size, gfp);
+ return 0;
+}
+
+static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
+{
+ free_pages_exact(dmab->area, dmab->bytes);
+}
+
+static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return remap_pfn_range(area, area->vm_start,
+ page_to_pfn(virt_to_page(dmab->area)),
+ area->vm_end - area->vm_start,
+ area->vm_page_prot);
+}
+
+static const struct snd_malloc_ops snd_dma_continuous_ops = {
+ .alloc = snd_dma_continuous_alloc,
+ .free = snd_dma_continuous_free,
+ .mmap = snd_dma_continuous_mmap,
+};
+
+/*
+ * VMALLOC allocator
+ */
+static int snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
+
+ dmab->area = __vmalloc(size, gfp);
+ return 0;
+}
+
+static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
+{
+ vfree(dmab->area);
+}
+
+static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return remap_vmalloc_range(area, dmab->area, 0);
+}
+
+static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ return page_to_phys(vmalloc_to_page(dmab->area + offset)) +
+ offset % PAGE_SIZE;
+}
+
+static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ return vmalloc_to_page(dmab->area + offset);
+}
+
+static unsigned int
+snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size)
+{
+ ofs %= PAGE_SIZE;
+ size += ofs;
+ if (size > PAGE_SIZE)
+ size = PAGE_SIZE;
+ return size - ofs;
+}
+
+static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
+ .alloc = snd_dma_vmalloc_alloc,
+ .free = snd_dma_vmalloc_free,
+ .mmap = snd_dma_vmalloc_mmap,
+ .get_addr = snd_dma_vmalloc_get_addr,
+ .get_page = snd_dma_vmalloc_get_page,
+ .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
+
#ifdef CONFIG_HAS_DMA
+/*
+ * IRAM allocator
+ */
#ifdef CONFIG_GENERIC_ALLOCATOR
- case SNDRV_DMA_TYPE_DEV_IRAM:
- snd_free_dev_iram(dmab);
- break;
+static int snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ struct device *dev = dmab->dev.dev;
+ struct gen_pool *pool;
+
+ if (dev->of_node) {
+ pool = of_gen_pool_get(dev->of_node, "iram", 0);
+ /* Assign the pool into private_data field */
+ dmab->private_data = pool;
+
+ dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
+ PAGE_SIZE);
+ if (dmab->area)
+ return 0;
+ }
+
+ /* Internal memory might have limited size and no enough space,
+ * so if we fail to malloc, try to fetch memory traditionally.
+ */
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV;
+ return __snd_dma_alloc_pages(dmab, size);
+}
+
+static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
+{
+ struct gen_pool *pool = dmab->private_data;
+
+ if (pool && dmab->area)
+ gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
+}
+
+static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+ return remap_pfn_range(area, area->vm_start,
+ dmab->addr >> PAGE_SHIFT,
+ area->vm_end - area->vm_start,
+ area->vm_page_prot);
+}
+
+static const struct snd_malloc_ops snd_dma_iram_ops = {
+ .alloc = snd_dma_iram_alloc,
+ .free = snd_dma_iram_free,
+ .mmap = snd_dma_iram_mmap,
+};
#endif /* CONFIG_GENERIC_ALLOCATOR */
- case SNDRV_DMA_TYPE_DEV:
- case SNDRV_DMA_TYPE_DEV_UC:
- snd_free_dev_pages(dmab);
- break;
+
+/*
+ * Coherent device pages allocator
+ */
+static int snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ gfp_t gfp_flags;
+
+ gfp_flags = GFP_KERNEL
+ | __GFP_COMP /* compound page lets parts be mapped */
+ | __GFP_NORETRY /* don't trigger OOM-killer */
+ | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
+ dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
+ gfp_flags);
+#ifdef CONFIG_X86
+ if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+ set_memory_wc((unsigned long)dmab->area,
+ PAGE_ALIGN(size) >> PAGE_SHIFT);
#endif
+ return 0;
+}
+
+static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
+{
+#ifdef CONFIG_X86
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+ set_memory_wb((unsigned long)dmab->area,
+ PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
+#endif
+ dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+}
+
+static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ return dma_mmap_coherent(dmab->dev.dev, area,
+ dmab->area, dmab->addr, dmab->bytes);
+}
+
+static const struct snd_malloc_ops snd_dma_dev_ops = {
+ .alloc = snd_dma_dev_alloc,
+ .free = snd_dma_dev_free,
+ .mmap = snd_dma_dev_mmap,
+};
+#endif /* CONFIG_HAS_DMA */
+
+/*
+ * Entry points
+ */
+static const struct snd_malloc_ops *dma_ops[] = {
+ [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
+ [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
+#ifdef CONFIG_HAS_DMA
+ [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
+ [SNDRV_DMA_TYPE_DEV_UC] = &snd_dma_dev_ops,
+#ifdef CONFIG_GENERIC_ALLOCATOR
+ [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
+#endif /* CONFIG_GENERIC_ALLOCATOR */
+#endif /* CONFIG_HAS_DMA */
#ifdef CONFIG_SND_DMA_SGBUF
- case SNDRV_DMA_TYPE_DEV_SG:
- case SNDRV_DMA_TYPE_DEV_UC_SG:
- snd_free_sgbuf_pages(dmab);
- break;
+ [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
+ [SNDRV_DMA_TYPE_DEV_UC_SG] = &snd_dma_sg_ops,
#endif
- default:
- pr_err("snd-malloc: invalid device type %d\n", dmab->dev.type);
- }
+};
+
+static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
+{
+ if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
+ dmab->dev.type >= ARRAY_SIZE(dma_ops)))
+ return NULL;
+ return dma_ops[dmab->dev.type];
}
-EXPORT_SYMBOL(snd_dma_free_pages);
diff --git a/sound/core/memalloc_local.h b/sound/core/memalloc_local.h
new file mode 100644
index 000000000000..dbea7f2aed07
--- /dev/null
+++ b/sound/core/memalloc_local.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __MEMALLOC_LOCAL_H
+#define __MEMALLOC_LOCAL_H
+
+struct snd_malloc_ops {
+ int (*alloc)(struct snd_dma_buffer *dmab, size_t size);
+ void (*free)(struct snd_dma_buffer *dmab);
+ dma_addr_t (*get_addr)(struct snd_dma_buffer *dmab, size_t offset);
+ struct page *(*get_page)(struct snd_dma_buffer *dmab, size_t offset);
+ unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size);
+ int (*mmap)(struct snd_dma_buffer *dmab, struct vm_area_struct *area);
+};
+
+#ifdef CONFIG_SND_DMA_SGBUF
+extern const struct snd_malloc_ops snd_dma_sg_ops;
+#endif
+
+#endif /* __MEMALLOC_LOCAL_H */
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index bec928327478..6a5abdd4271b 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -185,7 +185,8 @@ static int snd_mixer_oss_get_recsrc(struct snd_mixer_oss_file *fmixer)
if (mixer->put_recsrc && mixer->get_recsrc) { /* exclusive */
int err;
unsigned int index;
- if ((err = mixer->get_recsrc(fmixer, &index)) < 0)
+ err = mixer->get_recsrc(fmixer, &index);
+ if (err < 0)
return err;
result = 1 << index;
} else {
@@ -517,7 +518,8 @@ static void snd_mixer_oss_get_volume1_vol(struct snd_mixer_oss_file *fmixer,
if (numid == ID_UNKNOWN)
return;
down_read(&card->controls_rwsem);
- if ((kctl = snd_ctl_find_numid(card, numid)) == NULL) {
+ kctl = snd_ctl_find_numid(card, numid);
+ if (!kctl) {
up_read(&card->controls_rwsem);
return;
}
@@ -555,7 +557,8 @@ static void snd_mixer_oss_get_volume1_sw(struct snd_mixer_oss_file *fmixer,
if (numid == ID_UNKNOWN)
return;
down_read(&card->controls_rwsem);
- if ((kctl = snd_ctl_find_numid(card, numid)) == NULL) {
+ kctl = snd_ctl_find_numid(card, numid);
+ if (!kctl) {
up_read(&card->controls_rwsem);
return;
}
@@ -620,7 +623,8 @@ static void snd_mixer_oss_put_volume1_vol(struct snd_mixer_oss_file *fmixer,
if (numid == ID_UNKNOWN)
return;
down_read(&card->controls_rwsem);
- if ((kctl = snd_ctl_find_numid(card, numid)) == NULL) {
+ kctl = snd_ctl_find_numid(card, numid);
+ if (!kctl) {
up_read(&card->controls_rwsem);
return;
}
@@ -636,7 +640,8 @@ static void snd_mixer_oss_put_volume1_vol(struct snd_mixer_oss_file *fmixer,
uctl->value.integer.value[0] = snd_mixer_oss_conv2(left, uinfo->value.integer.min, uinfo->value.integer.max);
if (uinfo->count > 1)
uctl->value.integer.value[1] = snd_mixer_oss_conv2(right, uinfo->value.integer.min, uinfo->value.integer.max);
- if ((res = kctl->put(kctl, uctl)) < 0)
+ res = kctl->put(kctl, uctl);
+ if (res < 0)
goto __unalloc;
if (res > 0)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id);
@@ -661,7 +666,8 @@ static void snd_mixer_oss_put_volume1_sw(struct snd_mixer_oss_file *fmixer,
if (numid == ID_UNKNOWN)
return;
down_read(&card->controls_rwsem);
- if ((kctl = snd_ctl_find_numid(card, numid)) == NULL) {
+ kctl = snd_ctl_find_numid(card, numid);
+ if (!kctl) {
up_read(&card->controls_rwsem);
return;
}
@@ -681,7 +687,8 @@ static void snd_mixer_oss_put_volume1_sw(struct snd_mixer_oss_file *fmixer,
} else {
uctl->value.integer.value[0] = (left > 0 || right > 0) ? 1 : 0;
}
- if ((res = kctl->put(kctl, uctl)) < 0)
+ res = kctl->put(kctl, uctl);
+ if (res < 0)
goto __unalloc;
if (res > 0)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id);
@@ -809,9 +816,11 @@ static int snd_mixer_oss_get_recsrc2(struct snd_mixer_oss_file *fmixer, unsigned
err = -ENOENT;
goto __unlock;
}
- if ((err = kctl->info(kctl, uinfo)) < 0)
+ err = kctl->info(kctl, uinfo);
+ if (err < 0)
goto __unlock;
- if ((err = kctl->get(kctl, uctl)) < 0)
+ err = kctl->get(kctl, uctl);
+ if (err < 0)
goto __unlock;
for (idx = 0; idx < 32; idx++) {
if (!(mixer->mask_recsrc & (1 << idx)))
@@ -860,7 +869,8 @@ static int snd_mixer_oss_put_recsrc2(struct snd_mixer_oss_file *fmixer, unsigned
err = -ENOENT;
goto __unlock;
}
- if ((err = kctl->info(kctl, uinfo)) < 0)
+ err = kctl->info(kctl, uinfo);
+ if (err < 0)
goto __unlock;
for (idx = 0; idx < 32; idx++) {
if (!(mixer->mask_recsrc & (1 << idx)))
@@ -915,7 +925,8 @@ static int snd_mixer_oss_build_test(struct snd_mixer_oss *mixer, struct slot *sl
up_read(&card->controls_rwsem);
return -ENOMEM;
}
- if ((err = kcontrol->info(kcontrol, info)) < 0) {
+ err = kcontrol->info(kcontrol, info);
+ if (err < 0) {
up_read(&card->controls_rwsem);
kfree(info);
return err;
@@ -1036,7 +1047,10 @@ static int snd_mixer_oss_build_input(struct snd_mixer_oss *mixer,
if (snd_mixer_oss_build_test_all(mixer, ptr, &slot))
return 0;
down_read(&mixer->card->controls_rwsem);
- if (ptr->index == 0 && (kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0)) != NULL) {
+ kctl = NULL;
+ if (!ptr->index)
+ kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0);
+ if (kctl) {
struct snd_ctl_elem_info *uinfo;
uinfo = kzalloc(sizeof(*uinfo), GFP_KERNEL);
@@ -1343,9 +1357,10 @@ static int snd_mixer_oss_notify_handler(struct snd_card *card, int cmd)
if (mixer == NULL)
return -ENOMEM;
mutex_init(&mixer->reg_mutex);
- if ((err = snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_MIXER,
- card, 0,
- &snd_mixer_oss_f_ops, card)) < 0) {
+ err = snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_MIXER,
+ card, 0,
+ &snd_mixer_oss_f_ops, card);
+ if (err < 0) {
dev_err(card->dev,
"unable to register OSS mixer device %i:%i\n",
card->number, 0);
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 86c39ee01aaa..82a818734a5f 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -955,9 +955,8 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
if (!direct) {
/* add necessary plugins */
snd_pcm_oss_plugin_clear(substream);
- if ((err = snd_pcm_plug_format_plugins(substream,
- params,
- sparams)) < 0) {
+ err = snd_pcm_plug_format_plugins(substream, params, sparams);
+ if (err < 0) {
pcm_dbg(substream->pcm,
"snd_pcm_plug_format_plugins failed: %i\n", err);
snd_pcm_oss_plugin_clear(substream);
@@ -965,7 +964,8 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
}
if (runtime->oss.plugin_first) {
struct snd_pcm_plugin *plugin;
- if ((err = snd_pcm_plugin_build_io(substream, sparams, &plugin)) < 0) {
+ err = snd_pcm_plugin_build_io(substream, sparams, &plugin);
+ if (err < 0) {
pcm_dbg(substream->pcm,
"snd_pcm_plugin_build_io failed: %i\n", err);
snd_pcm_oss_plugin_clear(substream);
@@ -1011,7 +1011,8 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
sw_params->silence_size = frames;
}
- if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_SW_PARAMS, sw_params)) < 0) {
+ err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_SW_PARAMS, sw_params);
+ if (err < 0) {
pcm_dbg(substream->pcm, "SW_PARAMS failed: %i\n", err);
goto failure;
}
@@ -1573,7 +1574,8 @@ static int snd_pcm_oss_post(struct snd_pcm_oss_file *pcm_oss_file)
substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK];
if (substream != NULL) {
- if ((err = snd_pcm_oss_make_ready(substream)) < 0)
+ err = snd_pcm_oss_make_ready(substream);
+ if (err < 0)
return err;
snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_START, NULL);
}
@@ -1645,7 +1647,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
runtime = substream->runtime;
if (atomic_read(&substream->mmap_count))
goto __direct;
- if ((err = snd_pcm_oss_make_ready(substream)) < 0)
+ err = snd_pcm_oss_make_ready(substream);
+ if (err < 0)
return err;
atomic_inc(&runtime->oss.rw_ref);
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
@@ -1711,7 +1714,8 @@ unlock:
substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
if (substream != NULL) {
- if ((err = snd_pcm_oss_make_ready(substream)) < 0)
+ err = snd_pcm_oss_make_ready(substream);
+ if (err < 0)
return err;
runtime = substream->runtime;
err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
@@ -1758,7 +1762,8 @@ static int snd_pcm_oss_get_rate(struct snd_pcm_oss_file *pcm_oss_file)
struct snd_pcm_substream *substream;
int err;
- if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0)
+ err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream);
+ if (err < 0)
return err;
return substream->runtime->oss.rate;
}
@@ -1795,7 +1800,8 @@ static int snd_pcm_oss_get_channels(struct snd_pcm_oss_file *pcm_oss_file)
struct snd_pcm_substream *substream;
int err;
- if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0)
+ err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream);
+ if (err < 0)
return err;
return substream->runtime->oss.channels;
}
@@ -1805,7 +1811,8 @@ static int snd_pcm_oss_get_block_size(struct snd_pcm_oss_file *pcm_oss_file)
struct snd_pcm_substream *substream;
int err;
- if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0)
+ err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream);
+ if (err < 0)
return err;
return substream->runtime->oss.period_bytes;
}
@@ -1820,7 +1827,8 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
const struct snd_mask *format_mask;
int fmt;
- if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0)
+ err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream);
+ if (err < 0)
return err;
if (atomic_read(&substream->mmap_count))
direct = 1;
@@ -1890,7 +1898,8 @@ static int snd_pcm_oss_get_format(struct snd_pcm_oss_file *pcm_oss_file)
struct snd_pcm_substream *substream;
int err;
- if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0)
+ err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream);
+ if (err < 0)
return err;
return substream->runtime->oss.format;
}
@@ -2050,11 +2059,13 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
csubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
if (psubstream) {
- if ((err = snd_pcm_oss_make_ready(psubstream)) < 0)
+ err = snd_pcm_oss_make_ready(psubstream);
+ if (err < 0)
return err;
}
if (csubstream) {
- if ((err = snd_pcm_oss_make_ready(csubstream)) < 0)
+ err = snd_pcm_oss_make_ready(csubstream);
+ if (err < 0)
return err;
}
if (psubstream) {
@@ -2141,7 +2152,8 @@ static int snd_pcm_oss_get_odelay(struct snd_pcm_oss_file *pcm_oss_file)
substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK];
if (substream == NULL)
return -EINVAL;
- if ((err = snd_pcm_oss_make_ready(substream)) < 0)
+ err = snd_pcm_oss_make_ready(substream);
+ if (err < 0)
return err;
runtime = substream->runtime;
if (runtime->oss.params || runtime->oss.prepare)
@@ -2168,7 +2180,8 @@ static int snd_pcm_oss_get_ptr(struct snd_pcm_oss_file *pcm_oss_file, int stream
substream = pcm_oss_file->streams[stream];
if (substream == NULL)
return -EINVAL;
- if ((err = snd_pcm_oss_make_ready(substream)) < 0)
+ err = snd_pcm_oss_make_ready(substream);
+ if (err < 0)
return err;
runtime = substream->runtime;
if (runtime->oss.params || runtime->oss.prepare) {
@@ -2239,9 +2252,11 @@ static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stre
return -EINVAL;
runtime = substream->runtime;
- if (runtime->oss.params &&
- (err = snd_pcm_oss_change_params(substream, false)) < 0)
- return err;
+ if (runtime->oss.params) {
+ err = snd_pcm_oss_change_params(substream, false);
+ if (err < 0)
+ return err;
+ }
info.fragsize = runtime->oss.period_bytes;
info.fragstotal = runtime->periods;
@@ -2601,7 +2616,8 @@ static long snd_pcm_oss_ioctl(struct file *file, unsigned int cmd, unsigned long
case SNDCTL_DSP_SPEED:
if (get_user(res, p))
return -EFAULT;
- if ((res = snd_pcm_oss_set_rate(pcm_oss_file, res))<0)
+ res = snd_pcm_oss_set_rate(pcm_oss_file, res);
+ if (res < 0)
return res;
return put_user(res, p);
case SOUND_PCM_READ_RATE:
@@ -2613,7 +2629,8 @@ static long snd_pcm_oss_ioctl(struct file *file, unsigned int cmd, unsigned long
if (get_user(res, p))
return -EFAULT;
res = res > 0 ? 2 : 1;
- if ((res = snd_pcm_oss_set_channels(pcm_oss_file, res)) < 0)
+ res = snd_pcm_oss_set_channels(pcm_oss_file, res);
+ if (res < 0)
return res;
return put_user(--res, p);
case SNDCTL_DSP_GETBLKSIZE:
@@ -2829,7 +2846,8 @@ static __poll_t snd_pcm_oss_poll(struct file *file, poll_table * wait)
snd_pcm_state_t ostate;
poll_wait(file, &runtime->sleep, wait);
snd_pcm_stream_lock_irq(csubstream);
- if ((ostate = runtime->status->state) != SNDRV_PCM_STATE_RUNNING ||
+ ostate = runtime->status->state;
+ if (ostate != SNDRV_PCM_STATE_RUNNING ||
snd_pcm_oss_capture_ready(csubstream))
mask |= EPOLLIN | EPOLLRDNORM;
snd_pcm_stream_unlock_irq(csubstream);
@@ -3043,7 +3061,8 @@ static void snd_pcm_oss_proc_init(struct snd_pcm *pcm)
struct snd_pcm_str *pstr = &pcm->streams[stream];
if (pstr->substream_count == 0)
continue;
- if ((entry = snd_info_create_card_entry(pcm->card, "oss", pstr->proc_root)) != NULL) {
+ entry = snd_info_create_card_entry(pcm->card, "oss", pstr->proc_root);
+ if (entry) {
entry->content = SNDRV_INFO_CONTENT_TEXT;
entry->mode = S_IFREG | 0644;
entry->c.text.read = snd_pcm_oss_proc_read;
@@ -3191,7 +3210,8 @@ static int __init alsa_pcm_oss_init(void)
adsp_map[i] = 1;
}
}
- if ((err = snd_pcm_notify(&snd_pcm_oss_notify, 0)) < 0)
+ err = snd_pcm_notify(&snd_pcm_oss_notify, 0);
+ if (err < 0)
return err;
return 0;
}
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index d5ca161d588c..061ba06bc926 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -59,7 +59,8 @@ static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t
} else {
format = &plugin->dst_format;
}
- if ((width = snd_pcm_format_physical_width(format->format)) < 0)
+ width = snd_pcm_format_physical_width(format->format);
+ if (width < 0)
return width;
size = frames * format->channels * width;
if (snd_BUG_ON(size % 8))
@@ -572,7 +573,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_channels_buf(struct snd_pcm_substream *plu
}
v = plugin->buf_channels;
*channels = v;
- if ((width = snd_pcm_format_physical_width(format->format)) < 0)
+ width = snd_pcm_format_physical_width(format->format);
+ if (width < 0)
return width;
nchannels = format->channels;
if (snd_BUG_ON(plugin->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
@@ -600,16 +602,17 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
while (plugin) {
if (frames <= 0)
return frames;
- if ((next = plugin->next) != NULL) {
+ next = plugin->next;
+ if (next) {
snd_pcm_sframes_t frames1 = frames;
if (plugin->dst_frames) {
frames1 = plugin->dst_frames(plugin, frames);
if (frames1 <= 0)
return frames1;
}
- if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) {
+ err = next->client_channels(next, frames1, &dst_channels);
+ if (err < 0)
return err;
- }
if (err != frames1) {
frames = err;
if (plugin->src_frames) {
@@ -621,7 +624,8 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
} else
dst_channels = NULL;
pdprintf("write plugin: %s, %li\n", plugin->name, frames);
- if ((frames = plugin->transfer(plugin, src_channels, dst_channels, frames)) < 0)
+ frames = plugin->transfer(plugin, src_channels, dst_channels, frames);
+ if (frames < 0)
return frames;
src_channels = dst_channels;
plugin = next;
@@ -643,16 +647,18 @@ snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, str
src_channels = NULL;
plugin = snd_pcm_plug_first(plug);
while (plugin && frames > 0) {
- if ((next = plugin->next) != NULL) {
- if ((err = plugin->client_channels(plugin, frames, &dst_channels)) < 0) {
+ next = plugin->next;
+ if (next) {
+ err = plugin->client_channels(plugin, frames, &dst_channels);
+ if (err < 0)
return err;
- }
frames = err;
} else {
dst_channels = dst_channels_final;
}
pdprintf("read plugin: %s, %li\n", plugin->name, frames);
- if ((frames = plugin->transfer(plugin, src_channels, dst_channels, frames)) < 0)
+ frames = plugin->transfer(plugin, src_channels, dst_channels, frames);
+ if (frames < 0)
return frames;
plugin = next;
src_channels = dst_channels;
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index b163164a83ec..6fd3677685d7 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -1004,7 +1004,7 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
substream->pstr->substream_opened--;
}
-static ssize_t show_pcm_class(struct device *dev,
+static ssize_t pcm_class_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct snd_pcm_str *pstr = container_of(dev, struct snd_pcm_str, dev);
@@ -1024,7 +1024,7 @@ static ssize_t show_pcm_class(struct device *dev,
return sprintf(buf, "%s\n", str);
}
-static DEVICE_ATTR(pcm_class, 0444, show_pcm_class, NULL);
+static DEVICE_ATTR_RO(pcm_class);
static struct attribute *pcm_dev_attrs[] = {
&dev_attr_pcm_class.attr,
NULL
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 590a46a9e78d..a59de24695ec 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -239,7 +239,8 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
struct snd_pcm_runtime *runtime;
int err;
- if (! (runtime = substream->runtime))
+ runtime = substream->runtime;
+ if (!runtime)
return -ENOTTY;
data = kmalloc(sizeof(*data), GFP_KERNEL);
@@ -343,7 +344,8 @@ static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
- if ((ch = substream->runtime->channels) > 128)
+ ch = substream->runtime->channels;
+ if (ch > 128)
return -EINVAL;
if (get_user(buf, &data32->bufs) ||
get_user(frames, &data32->frames))
diff --git a/sound/core/pcm_iec958.c b/sound/core/pcm_iec958.c
index f9a211cc1f2c..7a1b816f67cc 100644
--- a/sound/core/pcm_iec958.c
+++ b/sound/core/pcm_iec958.c
@@ -9,41 +9,85 @@
#include <sound/pcm_params.h>
#include <sound/pcm_iec958.h>
-static int create_iec958_consumer(uint rate, uint sample_width,
- u8 *cs, size_t len)
+/**
+ * snd_pcm_create_iec958_consumer_default - create default consumer format IEC958 channel status
+ * @cs: channel status buffer, at least four bytes
+ * @len: length of channel status buffer
+ *
+ * Create the consumer format channel status data in @cs of maximum size
+ * @len. When relevant, the configuration-dependant bits will be set as
+ * unspecified.
+ *
+ * Drivers should then call einter snd_pcm_fill_iec958_consumer() or
+ * snd_pcm_fill_iec958_consumer_hw_params() to replace these unspecified
+ * bits by their actual values.
+ *
+ * Drivers may wish to tweak the contents of the buffer after creation.
+ *
+ * Returns: length of buffer, or negative error code if something failed.
+ */
+int snd_pcm_create_iec958_consumer_default(u8 *cs, size_t len)
{
- unsigned int fs, ws;
-
if (len < 4)
return -EINVAL;
- switch (rate) {
- case 32000:
- fs = IEC958_AES3_CON_FS_32000;
- break;
- case 44100:
- fs = IEC958_AES3_CON_FS_44100;
- break;
- case 48000:
- fs = IEC958_AES3_CON_FS_48000;
- break;
- case 88200:
- fs = IEC958_AES3_CON_FS_88200;
- break;
- case 96000:
- fs = IEC958_AES3_CON_FS_96000;
- break;
- case 176400:
- fs = IEC958_AES3_CON_FS_176400;
- break;
- case 192000:
- fs = IEC958_AES3_CON_FS_192000;
- break;
- default:
+ memset(cs, 0, len);
+
+ cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE;
+ cs[1] = IEC958_AES1_CON_GENERAL;
+ cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC | IEC958_AES2_CON_CHANNEL_UNSPEC;
+ cs[3] = IEC958_AES3_CON_CLOCK_1000PPM | IEC958_AES3_CON_FS_NOTID;
+
+ if (len > 4)
+ cs[4] = IEC958_AES4_CON_WORDLEN_NOTID;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(snd_pcm_create_iec958_consumer_default);
+
+static int fill_iec958_consumer(uint rate, uint sample_width,
+ u8 *cs, size_t len)
+{
+ if (len < 4)
return -EINVAL;
+
+ if ((cs[3] & IEC958_AES3_CON_FS) == IEC958_AES3_CON_FS_NOTID) {
+ unsigned int fs;
+
+ switch (rate) {
+ case 32000:
+ fs = IEC958_AES3_CON_FS_32000;
+ break;
+ case 44100:
+ fs = IEC958_AES3_CON_FS_44100;
+ break;
+ case 48000:
+ fs = IEC958_AES3_CON_FS_48000;
+ break;
+ case 88200:
+ fs = IEC958_AES3_CON_FS_88200;
+ break;
+ case 96000:
+ fs = IEC958_AES3_CON_FS_96000;
+ break;
+ case 176400:
+ fs = IEC958_AES3_CON_FS_176400;
+ break;
+ case 192000:
+ fs = IEC958_AES3_CON_FS_192000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cs[3] &= ~IEC958_AES3_CON_FS;
+ cs[3] |= fs;
}
- if (len > 4) {
+ if (len > 4 &&
+ (cs[4] & IEC958_AES4_CON_WORDLEN) == IEC958_AES4_CON_WORDLEN_NOTID) {
+ unsigned int ws;
+
switch (sample_width) {
case 16:
ws = IEC958_AES4_CON_WORDLEN_20_16;
@@ -64,20 +108,57 @@ static int create_iec958_consumer(uint rate, uint sample_width,
default:
return -EINVAL;
}
- }
- memset(cs, 0, len);
+ cs[4] &= ~IEC958_AES4_CON_WORDLEN;
+ cs[4] |= ws;
+ }
- cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE;
- cs[1] = IEC958_AES1_CON_GENERAL;
- cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC | IEC958_AES2_CON_CHANNEL_UNSPEC;
- cs[3] = IEC958_AES3_CON_CLOCK_1000PPM | fs;
+ return len;
+}
- if (len > 4)
- cs[4] = ws;
+/**
+ * snd_pcm_fill_iec958_consumer - Fill consumer format IEC958 channel status
+ * @runtime: pcm runtime structure with ->rate filled in
+ * @cs: channel status buffer, at least four bytes
+ * @len: length of channel status buffer
+ *
+ * Fill the unspecified bits in an IEC958 status bits array using the
+ * parameters of the PCM runtime @runtime.
+ *
+ * Drivers may wish to tweak the contents of the buffer after its been
+ * filled.
+ *
+ * Returns: length of buffer, or negative error code if something failed.
+ */
+int snd_pcm_fill_iec958_consumer(struct snd_pcm_runtime *runtime,
+ u8 *cs, size_t len)
+{
+ return fill_iec958_consumer(runtime->rate,
+ snd_pcm_format_width(runtime->format),
+ cs, len);
+}
+EXPORT_SYMBOL_GPL(snd_pcm_fill_iec958_consumer);
- return len;
+/**
+ * snd_pcm_fill_iec958_consumer_hw_params - Fill consumer format IEC958 channel status
+ * @params: the hw_params instance for extracting rate and sample format
+ * @cs: channel status buffer, at least four bytes
+ * @len: length of channel status buffer
+ *
+ * Fill the unspecified bits in an IEC958 status bits array using the
+ * parameters of the PCM hardware parameters @params.
+ *
+ * Drivers may wish to tweak the contents of the buffer after its been
+ * filled..
+ *
+ * Returns: length of buffer, or negative error code if something failed.
+ */
+int snd_pcm_fill_iec958_consumer_hw_params(struct snd_pcm_hw_params *params,
+ u8 *cs, size_t len)
+{
+ return fill_iec958_consumer(params_rate(params), params_width(params), cs, len);
}
+EXPORT_SYMBOL_GPL(snd_pcm_fill_iec958_consumer_hw_params);
/**
* snd_pcm_create_iec958_consumer - create consumer format IEC958 channel status
@@ -95,9 +176,13 @@ static int create_iec958_consumer(uint rate, uint sample_width,
int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
size_t len)
{
- return create_iec958_consumer(runtime->rate,
- snd_pcm_format_width(runtime->format),
- cs, len);
+ int ret;
+
+ ret = snd_pcm_create_iec958_consumer_default(cs, len);
+ if (ret < 0)
+ return ret;
+
+ return snd_pcm_fill_iec958_consumer(runtime, cs, len);
}
EXPORT_SYMBOL(snd_pcm_create_iec958_consumer);
@@ -117,7 +202,12 @@ EXPORT_SYMBOL(snd_pcm_create_iec958_consumer);
int snd_pcm_create_iec958_consumer_hw_params(struct snd_pcm_hw_params *params,
u8 *cs, size_t len)
{
- return create_iec958_consumer(params_rate(params), params_width(params),
- cs, len);
+ int ret;
+
+ ret = snd_pcm_create_iec958_consumer_default(cs, len);
+ if (ret < 0)
+ return ret;
+
+ return fill_iec958_consumer(params_rate(params), params_width(params), cs, len);
}
EXPORT_SYMBOL(snd_pcm_create_iec958_consumer_hw_params);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index b7e3d8f44511..7d5883432085 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1778,27 +1778,38 @@ int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
EXPORT_SYMBOL(snd_pcm_lib_ioctl);
/**
- * snd_pcm_period_elapsed - update the pcm status for the next period
- * @substream: the pcm substream instance
+ * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
+ * under acquired lock of PCM substream.
+ * @substream: the instance of pcm substream.
+ *
+ * This function is called when the batch of audio data frames as the same size as the period of
+ * buffer is already processed in audio data transmission.
+ *
+ * The call of function updates the status of runtime with the latest position of audio data
+ * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
+ * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
+ * substream according to configured threshold.
+ *
+ * The function is intended to use for the case that PCM driver operates audio data frames under
+ * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
+ * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
+ * since lock of PCM substream should be acquired in advance.
*
- * This function is called from the interrupt handler when the
- * PCM has processed the period size. It will update the current
- * pointer, wake up sleepers, etc.
+ * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
+ * function:
*
- * Even if more than one periods have elapsed since the last call, you
- * have to call this only once.
+ * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
+ * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
+ * - .get_time_info - to retrieve audio time stamp if needed.
+ *
+ * Even if more than one periods have elapsed since the last call, you have to call this only once.
*/
-void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
+void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
- unsigned long flags;
- if (snd_BUG_ON(!substream))
- return;
-
- snd_pcm_stream_lock_irqsave(substream, flags);
if (PCM_RUNTIME_CHECK(substream))
- goto _unlock;
+ return;
runtime = substream->runtime;
if (!snd_pcm_running(substream) ||
@@ -1811,7 +1822,30 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
#endif
_end:
kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
- _unlock:
+}
+EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
+
+/**
+ * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
+ * PCM substream.
+ * @substream: the instance of PCM substream.
+ *
+ * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
+ * acquiring lock of PCM substream voluntarily.
+ *
+ * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
+ * the batch of audio data frames as the same size as the period of buffer is already processed in
+ * audio data transmission.
+ */
+void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
+{
+ unsigned long flags;
+
+ if (snd_BUG_ON(!substream))
+ return;
+
+ snd_pcm_stream_lock_irqsave(substream, flags);
+ snd_pcm_period_elapsed_under_stream_lock(substream);
snd_pcm_stream_unlock_irqrestore(substream, flags);
}
EXPORT_SYMBOL(snd_pcm_period_elapsed);
diff --git a/sound/core/pcm_local.h b/sound/core/pcm_local.h
index e3b3558aeab6..fe9689b8a6a6 100644
--- a/sound/core/pcm_local.h
+++ b/sound/core/pcm_local.h
@@ -65,11 +65,6 @@ void __snd_pcm_xrun(struct snd_pcm_substream *substream);
void snd_pcm_group_init(struct snd_pcm_group *group);
void snd_pcm_sync_stop(struct snd_pcm_substream *substream, bool sync_irq);
-#ifdef CONFIG_SND_DMA_SGBUF
-struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
- unsigned long offset);
-#endif
-
#define PCM_RUNTIME_CHECK(sub) snd_BUG_ON(!(sub) || !(sub)->runtime)
/* loop over all PCM substreams */
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index 542a75babdee..d7621ed105bd 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -337,27 +337,6 @@ void snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
}
EXPORT_SYMBOL(snd_pcm_set_managed_buffer_all);
-#ifdef CONFIG_SND_DMA_SGBUF
-/*
- * snd_pcm_sgbuf_ops_page - get the page struct at the given offset
- * @substream: the pcm substream instance
- * @offset: the buffer offset
- *
- * Used as the page callback of PCM ops.
- *
- * Return: The page struct at the given buffer offset. %NULL on failure.
- */
-struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream, unsigned long offset)
-{
- struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream);
-
- unsigned int idx = offset >> PAGE_SHIFT;
- if (idx >= (unsigned int)sgbuf->pages)
- return NULL;
- return sgbuf->page_table[idx];
-}
-#endif /* CONFIG_SND_DMA_SGBUF */
-
/**
* snd_pcm_lib_malloc_pages - allocate the DMA buffer
* @substream: the substream to allocate the DMA buffer to
diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
index 257d412eac5d..4866aed97aac 100644
--- a/sound/core/pcm_misc.c
+++ b/sound/core/pcm_misc.c
@@ -266,7 +266,8 @@ int snd_pcm_format_signed(snd_pcm_format_t format)
int val;
if (!valid_format(format))
return -EINVAL;
- if ((val = pcm_formats[(INT)format].signd) < 0)
+ val = pcm_formats[(INT)format].signd;
+ if (val < 0)
return -EINVAL;
return val;
}
@@ -314,7 +315,8 @@ int snd_pcm_format_little_endian(snd_pcm_format_t format)
int val;
if (!valid_format(format))
return -EINVAL;
- if ((val = pcm_formats[(INT)format].le) < 0)
+ val = pcm_formats[(INT)format].le;
+ if (val < 0)
return -EINVAL;
return val;
}
@@ -350,7 +352,8 @@ int snd_pcm_format_width(snd_pcm_format_t format)
int val;
if (!valid_format(format))
return -EINVAL;
- if ((val = pcm_formats[(INT)format].width) == 0)
+ val = pcm_formats[(INT)format].width;
+ if (!val)
return -EINVAL;
return val;
}
@@ -368,7 +371,8 @@ int snd_pcm_format_physical_width(snd_pcm_format_t format)
int val;
if (!valid_format(format))
return -EINVAL;
- if ((val = pcm_formats[(INT)format].phys) == 0)
+ val = pcm_formats[(INT)format].phys;
+ if (!val)
return -EINVAL;
return val;
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 8dbe86cf2e4f..71323d807dbf 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -246,12 +246,21 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
return false;
- if (substream->ops->mmap ||
- (substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV &&
- substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV_UC))
+ if (substream->ops->mmap || substream->ops->page)
return true;
- return dma_can_mmap(substream->dma_buffer.dev.dev);
+ switch (substream->dma_buffer.dev.type) {
+ case SNDRV_DMA_TYPE_UNKNOWN:
+ /* we can't know the device, so just assume that the driver does
+ * everything right
+ */
+ return true;
+ case SNDRV_DMA_TYPE_CONTINUOUS:
+ case SNDRV_DMA_TYPE_VMALLOC:
+ return true;
+ default:
+ return dma_can_mmap(substream->dma_buffer.dev.dev);
+ }
}
static int constrain_mask_params(struct snd_pcm_substream *substream,
@@ -768,7 +777,8 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
- if ((usecs = period_to_usecs(runtime)) >= 0)
+ usecs = period_to_usecs(runtime);
+ if (usecs >= 0)
cpu_latency_qos_add_request(&substream->latency_pm_qos_req,
usecs);
return 0;
@@ -2658,7 +2668,8 @@ int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
goto error;
}
- if ((err = substream->ops->open(substream)) < 0)
+ err = substream->ops->open(substream);
+ if (err < 0)
goto error;
substream->hw_opened = 1;
@@ -2799,6 +2810,10 @@ static int snd_pcm_release(struct inode *inode, struct file *file)
if (snd_BUG_ON(!substream))
return -ENXIO;
pcm = substream->pcm;
+
+ /* block until the device gets woken up as it may touch the hardware */
+ snd_power_wait(pcm->card);
+
mutex_lock(&pcm->open_mutex);
snd_pcm_release_substream(substream);
kfree(pcm_file);
@@ -3057,9 +3072,14 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
boundary = 0x7fffffff;
snd_pcm_stream_lock_irq(substream);
/* FIXME: we should consider the boundary for the sync from app */
- if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
- control->appl_ptr = scontrol.appl_ptr;
- else
+ if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) {
+ err = pcm_lib_apply_appl_ptr(substream,
+ scontrol.appl_ptr);
+ if (err < 0) {
+ snd_pcm_stream_unlock_irq(substream);
+ return err;
+ }
+ } else
scontrol.appl_ptr = control->appl_ptr % boundary;
if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
control->avail_min = scontrol.avail_min;
@@ -3193,7 +3213,7 @@ static int snd_pcm_common_ioctl(struct file *file,
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
- res = snd_power_wait(substream->pcm->card, SNDRV_CTL_POWER_D0);
+ res = snd_power_wait(substream->pcm->card);
if (res < 0)
return res;
@@ -3638,24 +3658,6 @@ static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file
}
#endif /* coherent mmap */
-static inline struct page *
-snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
-{
- void *vaddr = substream->runtime->dma_area + ofs;
-
- switch (substream->dma_buffer.dev.type) {
-#ifdef CONFIG_SND_DMA_SGBUF
- case SNDRV_DMA_TYPE_DEV_SG:
- case SNDRV_DMA_TYPE_DEV_UC_SG:
- return snd_pcm_sgbuf_ops_page(substream, ofs);
-#endif /* CONFIG_SND_DMA_SGBUF */
- case SNDRV_DMA_TYPE_VMALLOC:
- return vmalloc_to_page(vaddr);
- default:
- return virt_to_page(vaddr);
- }
-}
-
/*
* fault callback for mmapping a RAM page
*/
@@ -3676,8 +3678,10 @@ static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
if (substream->ops->page)
page = substream->ops->page(substream, offset);
+ else if (!snd_pcm_get_dma_buf(substream))
+ page = virt_to_page(runtime->dma_area + offset);
else
- page = snd_pcm_default_page_ops(substream, offset);
+ page = snd_sgbuf_get_page(snd_pcm_get_dma_buf(substream), offset);
if (!page)
return VM_FAULT_SIGBUS;
get_page(page);
@@ -3712,22 +3716,9 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-#ifdef CONFIG_GENERIC_ALLOCATOR
- if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
- area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
- return remap_pfn_range(area, area->vm_start,
- substream->dma_buffer.addr >> PAGE_SHIFT,
- area->vm_end - area->vm_start, area->vm_page_prot);
- }
-#endif /* CONFIG_GENERIC_ALLOCATOR */
- if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
- (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
- substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
- return dma_mmap_coherent(substream->dma_buffer.dev.dev,
- area,
- substream->runtime->dma_area,
- substream->runtime->dma_addr,
- substream->runtime->dma_bytes);
+ if (!substream->ops->page &&
+ !snd_dma_buffer_mmap(snd_pcm_get_dma_buf(substream), area))
+ return 0;
/* mmap with fault handler */
area->vm_ops = &snd_pcm_vm_ops_data_fault;
return 0;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index aca00af93afe..6c0a4a67ad2e 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -680,9 +680,12 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
bool is_input)
{
char *newbuf, *oldbuf;
+ unsigned int framing = params->mode & SNDRV_RAWMIDI_MODE_FRAMING_MASK;
if (params->buffer_size < 32 || params->buffer_size > 1024L * 1024L)
return -EINVAL;
+ if (framing == SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP && (params->buffer_size & 0x1f) != 0)
+ return -EINVAL;
if (params->avail_min < 1 || params->avail_min > params->buffer_size)
return -EINVAL;
if (params->buffer_size != runtime->buffer_size) {
@@ -720,8 +723,24 @@ EXPORT_SYMBOL(snd_rawmidi_output_params);
int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params *params)
{
+ unsigned int framing = params->mode & SNDRV_RAWMIDI_MODE_FRAMING_MASK;
+ unsigned int clock_type = params->mode & SNDRV_RAWMIDI_MODE_CLOCK_MASK;
+ int err;
+
+ if (framing == SNDRV_RAWMIDI_MODE_FRAMING_NONE && clock_type != SNDRV_RAWMIDI_MODE_CLOCK_NONE)
+ return -EINVAL;
+ else if (clock_type > SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC_RAW)
+ return -EINVAL;
+ if (framing > SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP)
+ return -EINVAL;
snd_rawmidi_drain_input(substream);
- return resize_runtime_buffer(substream->runtime, params, true);
+ err = resize_runtime_buffer(substream->runtime, params, true);
+ if (err < 0)
+ return err;
+
+ substream->framing = framing;
+ substream->clock_type = clock_type;
+ return 0;
}
EXPORT_SYMBOL(snd_rawmidi_input_params);
@@ -963,6 +982,62 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card,
return -ENOIOCTLCMD;
}
+static int receive_with_tstamp_framing(struct snd_rawmidi_substream *substream,
+ const unsigned char *buffer, int src_count, const struct timespec64 *tstamp)
+{
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
+ struct snd_rawmidi_framing_tstamp *dest_ptr;
+ struct snd_rawmidi_framing_tstamp frame = { .tv_sec = tstamp->tv_sec, .tv_nsec = tstamp->tv_nsec };
+ int dest_frames = 0;
+ int orig_count = src_count;
+ int frame_size = sizeof(struct snd_rawmidi_framing_tstamp);
+
+ BUILD_BUG_ON(frame_size != 0x20);
+ if (snd_BUG_ON((runtime->hw_ptr & 0x1f) != 0))
+ return -EINVAL;
+
+ while (src_count > 0) {
+ if ((int)(runtime->buffer_size - runtime->avail) < frame_size) {
+ runtime->xruns += src_count;
+ break;
+ }
+ if (src_count >= SNDRV_RAWMIDI_FRAMING_DATA_LENGTH)
+ frame.length = SNDRV_RAWMIDI_FRAMING_DATA_LENGTH;
+ else {
+ frame.length = src_count;
+ memset(frame.data, 0, SNDRV_RAWMIDI_FRAMING_DATA_LENGTH);
+ }
+ memcpy(frame.data, buffer, frame.length);
+ buffer += frame.length;
+ src_count -= frame.length;
+ dest_ptr = (struct snd_rawmidi_framing_tstamp *) (runtime->buffer + runtime->hw_ptr);
+ *dest_ptr = frame;
+ runtime->avail += frame_size;
+ runtime->hw_ptr += frame_size;
+ runtime->hw_ptr %= runtime->buffer_size;
+ dest_frames++;
+ }
+ return orig_count - src_count;
+}
+
+static struct timespec64 get_framing_tstamp(struct snd_rawmidi_substream *substream)
+{
+ struct timespec64 ts64 = {0, 0};
+
+ switch (substream->clock_type) {
+ case SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC_RAW:
+ ktime_get_raw_ts64(&ts64);
+ break;
+ case SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC:
+ ktime_get_ts64(&ts64);
+ break;
+ case SNDRV_RAWMIDI_MODE_CLOCK_REALTIME:
+ ktime_get_real_ts64(&ts64);
+ break;
+ }
+ return ts64;
+}
+
/**
* snd_rawmidi_receive - receive the input data from the device
* @substream: the rawmidi substream
@@ -977,6 +1052,7 @@ int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
const unsigned char *buffer, int count)
{
unsigned long flags;
+ struct timespec64 ts64 = get_framing_tstamp(substream);
int result = 0, count1;
struct snd_rawmidi_runtime *runtime = substream->runtime;
@@ -987,8 +1063,11 @@ int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
"snd_rawmidi_receive: input is not active!!!\n");
return -EINVAL;
}
+
spin_lock_irqsave(&runtime->lock, flags);
- if (count == 1) { /* special case, faster code */
+ if (substream->framing == SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP) {
+ result = receive_with_tstamp_framing(substream, buffer, count, &ts64);
+ } else if (count == 1) { /* special case, faster code */
substream->bytes++;
if (runtime->avail < runtime->buffer_size) {
runtime->buffer[runtime->hw_ptr++] = buffer[0];
@@ -1541,6 +1620,8 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
struct snd_rawmidi_substream *substream;
struct snd_rawmidi_runtime *runtime;
unsigned long buffer_size, avail, xruns;
+ unsigned int clock_type;
+ static const char *clock_names[4] = { "none", "realtime", "monotonic", "monotonic raw" };
rmidi = entry->private_data;
snd_iprintf(buffer, "%s\n\n", rmidi->name);
@@ -1596,6 +1677,14 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
" Avail : %lu\n"
" Overruns : %lu\n",
buffer_size, avail, xruns);
+ if (substream->framing == SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP) {
+ clock_type = substream->clock_type >> SNDRV_RAWMIDI_MODE_CLOCK_SHIFT;
+ if (!snd_BUG_ON(clock_type >= ARRAY_SIZE(clock_names)))
+ snd_iprintf(buffer,
+ " Framing : tstamp\n"
+ " Clock type : %s\n",
+ clock_names[clock_type]);
+ }
}
}
}
diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
index 7397130976d0..68a93443583c 100644
--- a/sound/core/rawmidi_compat.c
+++ b/sound/core/rawmidi_compat.c
@@ -13,7 +13,8 @@ struct snd_rawmidi_params32 {
u32 buffer_size;
u32 avail_min;
unsigned int no_active_sensing; /* avoid bit-field */
- unsigned char reserved[16];
+ unsigned int mode;
+ unsigned char reserved[12];
} __attribute__((packed));
static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
@@ -25,6 +26,7 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
if (get_user(params.stream, &src->stream) ||
get_user(params.buffer_size, &src->buffer_size) ||
get_user(params.avail_min, &src->avail_min) ||
+ get_user(params.mode, &src->mode) ||
get_user(val, &src->no_active_sensing))
return -EFAULT;
params.no_active_sensing = val;
diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
index 250a92b18726..77c1214acd90 100644
--- a/sound/core/seq/oss/seq_oss.c
+++ b/sound/core/seq/oss/seq_oss.c
@@ -67,13 +67,16 @@ static int __init alsa_seq_oss_init(void)
{
int rc;
- if ((rc = register_device()) < 0)
+ rc = register_device();
+ if (rc < 0)
goto error;
- if ((rc = register_proc()) < 0) {
+ rc = register_proc();
+ if (rc < 0) {
unregister_device();
goto error;
}
- if ((rc = snd_seq_oss_create_client()) < 0) {
+ rc = snd_seq_oss_create_client();
+ if (rc < 0) {
unregister_proc();
unregister_device();
goto error;
@@ -133,7 +136,8 @@ odev_release(struct inode *inode, struct file *file)
{
struct seq_oss_devinfo *dp;
- if ((dp = file->private_data) == NULL)
+ dp = file->private_data;
+ if (!dp)
return 0;
mutex_lock(&register_mutex);
@@ -226,16 +230,18 @@ register_device(void)
int rc;
mutex_lock(&register_mutex);
- if ((rc = snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_SEQUENCER,
- NULL, 0,
- &seq_oss_f_ops, NULL)) < 0) {
+ rc = snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_SEQUENCER,
+ NULL, 0,
+ &seq_oss_f_ops, NULL);
+ if (rc < 0) {
pr_err("ALSA: seq_oss: can't register device seq\n");
mutex_unlock(&register_mutex);
return rc;
}
- if ((rc = snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_MUSIC,
- NULL, 0,
- &seq_oss_f_ops, NULL)) < 0) {
+ rc = snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_MUSIC,
+ NULL, 0,
+ &seq_oss_f_ops, NULL);
+ if (rc < 0) {
pr_err("ALSA: seq_oss: can't register device music\n");
snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_SEQUENCER, NULL, 0);
mutex_unlock(&register_mutex);
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
index 4534a154b8c8..0ee4a5081fd6 100644
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -94,10 +94,10 @@ snd_seq_oss_create_client(void)
port_callback.event_input = receive_announce;
port->kernel = &port_callback;
- call_ctl(SNDRV_SEQ_IOCTL_CREATE_PORT, port);
- if ((system_port = port->addr.port) >= 0) {
+ if (call_ctl(SNDRV_SEQ_IOCTL_CREATE_PORT, port) >= 0) {
struct snd_seq_port_subscribe subs;
+ system_port = port->addr.port;
memset(&subs, 0, sizeof(subs));
subs.sender.client = SNDRV_SEQ_CLIENT_SYSTEM;
subs.sender.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE;
@@ -354,7 +354,8 @@ alloc_seq_queue(struct seq_oss_devinfo *dp)
qinfo.owner = system_client;
qinfo.locked = 1;
strcpy(qinfo.name, "OSS Sequencer Emulation");
- if ((rc = call_ctl(SNDRV_SEQ_IOCTL_CREATE_QUEUE, &qinfo)) < 0)
+ rc = call_ctl(SNDRV_SEQ_IOCTL_CREATE_QUEUE, &qinfo);
+ if (rc < 0)
return rc;
dp->queue = qinfo.queue;
return 0;
@@ -485,7 +486,8 @@ snd_seq_oss_system_info_read(struct snd_info_buffer *buf)
snd_iprintf(buf, "\nNumber of applications: %d\n", num_clients);
for (i = 0; i < num_clients; i++) {
snd_iprintf(buf, "\nApplication %d: ", i);
- if ((dp = client_table[i]) == NULL) {
+ dp = client_table[i];
+ if (!dp) {
snd_iprintf(buf, "*empty*\n");
continue;
}
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index 3f82c196de46..1e3bf086f867 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -152,7 +152,8 @@ snd_seq_oss_midi_check_new_port(struct snd_seq_port_info *pinfo)
/*
* look for the identical slot
*/
- if ((mdev = find_slot(pinfo->addr.client, pinfo->addr.port)) != NULL) {
+ mdev = find_slot(pinfo->addr.client, pinfo->addr.port);
+ if (mdev) {
/* already exists */
snd_use_lock_free(&mdev->use_lock);
return 0;
@@ -218,7 +219,8 @@ snd_seq_oss_midi_check_exit_port(int client, int port)
unsigned long flags;
int index;
- if ((mdev = find_slot(client, port)) != NULL) {
+ mdev = find_slot(client, port);
+ if (mdev) {
spin_lock_irqsave(&register_lock, flags);
midi_devs[mdev->seq_device] = NULL;
spin_unlock_irqrestore(&register_lock, flags);
@@ -250,7 +252,8 @@ snd_seq_oss_midi_clear_all(void)
spin_lock_irqsave(&register_lock, flags);
for (i = 0; i < max_midi_devs; i++) {
- if ((mdev = midi_devs[i]) != NULL) {
+ mdev = midi_devs[i];
+ if (mdev) {
snd_midi_event_free(mdev->coder);
kfree(mdev);
midi_devs[i] = NULL;
@@ -318,7 +321,8 @@ snd_seq_oss_midi_open(struct seq_oss_devinfo *dp, int dev, int fmode)
struct seq_oss_midi *mdev;
struct snd_seq_port_subscribe subs;
- if ((mdev = get_mididev(dp, dev)) == NULL)
+ mdev = get_mididev(dp, dev);
+ if (!mdev)
return -ENODEV;
/* already used? */
@@ -384,7 +388,8 @@ snd_seq_oss_midi_close(struct seq_oss_devinfo *dp, int dev)
struct seq_oss_midi *mdev;
struct snd_seq_port_subscribe subs;
- if ((mdev = get_mididev(dp, dev)) == NULL)
+ mdev = get_mididev(dp, dev);
+ if (!mdev)
return -ENODEV;
if (! mdev->opened || mdev->devinfo != dp) {
snd_use_lock_free(&mdev->use_lock);
@@ -421,7 +426,8 @@ snd_seq_oss_midi_filemode(struct seq_oss_devinfo *dp, int dev)
struct seq_oss_midi *mdev;
int mode;
- if ((mdev = get_mididev(dp, dev)) == NULL)
+ mdev = get_mididev(dp, dev);
+ if (!mdev)
return 0;
mode = 0;
@@ -443,7 +449,8 @@ snd_seq_oss_midi_reset(struct seq_oss_devinfo *dp, int dev)
{
struct seq_oss_midi *mdev;
- if ((mdev = get_mididev(dp, dev)) == NULL)
+ mdev = get_mididev(dp, dev);
+ if (!mdev)
return;
if (! mdev->opened) {
snd_use_lock_free(&mdev->use_lock);
@@ -491,7 +498,8 @@ snd_seq_oss_midi_get_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_ad
{
struct seq_oss_midi *mdev;
- if ((mdev = get_mididev(dp, dev)) == NULL)
+ mdev = get_mididev(dp, dev);
+ if (!mdev)
return;
addr->client = mdev->client;
addr->port = mdev->port;
@@ -511,7 +519,8 @@ snd_seq_oss_midi_input(struct snd_seq_event *ev, int direct, void *private_data)
if (dp->readq == NULL)
return 0;
- if ((mdev = find_slot(ev->source.client, ev->source.port)) == NULL)
+ mdev = find_slot(ev->source.client, ev->source.port);
+ if (!mdev)
return 0;
if (! (mdev->opened & PERM_READ)) {
snd_use_lock_free(&mdev->use_lock);
@@ -623,7 +632,8 @@ snd_seq_oss_midi_putc(struct seq_oss_devinfo *dp, int dev, unsigned char c, stru
{
struct seq_oss_midi *mdev;
- if ((mdev = get_mididev(dp, dev)) == NULL)
+ mdev = get_mididev(dp, dev);
+ if (!mdev)
return -ENODEV;
if (snd_midi_event_encode_byte(mdev->coder, c, ev)) {
snd_seq_oss_fill_addr(dp, ev, mdev->client, mdev->port);
@@ -642,7 +652,8 @@ snd_seq_oss_midi_make_info(struct seq_oss_devinfo *dp, int dev, struct midi_info
{
struct seq_oss_midi *mdev;
- if ((mdev = get_mididev(dp, dev)) == NULL)
+ mdev = get_mididev(dp, dev);
+ if (!mdev)
return -ENXIO;
inf->device = dev;
inf->dev_type = 0; /* FIXME: ?? */
diff --git a/sound/core/seq/oss/seq_oss_rw.c b/sound/core/seq/oss/seq_oss_rw.c
index 537d5f423e20..8a142fd54a19 100644
--- a/sound/core/seq/oss/seq_oss_rw.c
+++ b/sound/core/seq/oss/seq_oss_rw.c
@@ -132,7 +132,8 @@ snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int count,
}
/* insert queue */
- if ((err = insert_queue(dp, &rec, opt)) < 0)
+ err = insert_queue(dp, &rec, opt);
+ if (err < 0)
break;
result += ev_size;
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index 722f5059b300..e3394919daa0 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -451,7 +451,8 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
if (info->is_midi)
return 0;
- if ((rec = get_synthdev(dp, dev)) == NULL)
+ rec = get_synthdev(dp, dev);
+ if (!rec)
return -ENXIO;
if (rec->oper.load_patch == NULL)
@@ -569,7 +570,8 @@ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, u
info = get_synthinfo_nospec(dp, dev);
if (!info || info->is_midi)
return -ENXIO;
- if ((rec = get_synthdev(dp, dev)) == NULL)
+ rec = get_synthdev(dp, dev);
+ if (!rec)
return -ENXIO;
if (rec->oper.ioctl == NULL)
rc = -ENXIO;
@@ -619,7 +621,8 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in
inf->device = dev;
strscpy(inf->name, minf.name, sizeof(inf->name));
} else {
- if ((rec = get_synthdev(dp, dev)) == NULL)
+ rec = get_synthdev(dp, dev);
+ if (!rec)
return -ENXIO;
inf->synth_type = rec->synth_type;
inf->synth_subtype = rec->synth_subtype;
diff --git a/sound/core/seq/oss/seq_oss_writeq.c b/sound/core/seq/oss/seq_oss_writeq.c
index 0a02a59103b4..3e3209ce53b1 100644
--- a/sound/core/seq/oss/seq_oss_writeq.c
+++ b/sound/core/seq/oss/seq_oss_writeq.c
@@ -27,7 +27,8 @@ snd_seq_oss_writeq_new(struct seq_oss_devinfo *dp, int maxlen)
struct seq_oss_writeq *q;
struct snd_seq_client_pool pool;
- if ((q = kzalloc(sizeof(*q), GFP_KERNEL)) == NULL)
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
return NULL;
q->dp = dp;
q->maxlen = maxlen;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index b6a24fb5e76b..2e9d695d336c 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -416,7 +416,10 @@ static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count,
if (snd_BUG_ON(!client))
return -ENXIO;
- if (!client->accept_input || (fifo = client->data.user.fifo) == NULL)
+ if (!client->accept_input)
+ return -ENXIO;
+ fifo = client->data.user.fifo;
+ if (!fifo)
return -ENXIO;
if (atomic_read(&fifo->overflow) > 0) {
@@ -435,9 +438,9 @@ static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count,
int nonblock;
nonblock = (file->f_flags & O_NONBLOCK) || result > 0;
- if ((err = snd_seq_fifo_cell_out(fifo, &cell, nonblock)) < 0) {
+ err = snd_seq_fifo_cell_out(fifo, &cell, nonblock);
+ if (err < 0)
break;
- }
if (snd_seq_ev_is_variable(&cell->event)) {
struct snd_seq_event tmpev;
tmpev = cell->event;
@@ -970,7 +973,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
return err;
/* we got a cell. enqueue it. */
- if ((err = snd_seq_enqueue_event(cell, atomic, hop)) < 0) {
+ err = snd_seq_enqueue_event(cell, atomic, hop);
+ if (err < 0) {
snd_seq_cell_free(cell);
return err;
}
@@ -1312,7 +1316,8 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
return -EINVAL;
}
if (client->type == KERNEL_CLIENT) {
- if ((callback = info->kernel) != NULL) {
+ callback = info->kernel;
+ if (callback) {
if (callback->owner)
port->owner = callback->owner;
port->private_data = callback->private_data;
@@ -1466,13 +1471,17 @@ static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client,
struct snd_seq_client *receiver = NULL, *sender = NULL;
struct snd_seq_client_port *sport = NULL, *dport = NULL;
- if ((receiver = snd_seq_client_use_ptr(subs->dest.client)) == NULL)
+ receiver = snd_seq_client_use_ptr(subs->dest.client);
+ if (!receiver)
goto __end;
- if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
+ sender = snd_seq_client_use_ptr(subs->sender.client);
+ if (!sender)
goto __end;
- if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
+ sport = snd_seq_port_use_ptr(sender, subs->sender.port);
+ if (!sport)
goto __end;
- if ((dport = snd_seq_port_use_ptr(receiver, subs->dest.port)) == NULL)
+ dport = snd_seq_port_use_ptr(receiver, subs->dest.port);
+ if (!dport)
goto __end;
result = check_subscription_permission(client, sport, dport, subs);
@@ -1508,13 +1517,17 @@ static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
struct snd_seq_client *receiver = NULL, *sender = NULL;
struct snd_seq_client_port *sport = NULL, *dport = NULL;
- if ((receiver = snd_seq_client_use_ptr(subs->dest.client)) == NULL)
+ receiver = snd_seq_client_use_ptr(subs->dest.client);
+ if (!receiver)
goto __end;
- if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
+ sender = snd_seq_client_use_ptr(subs->sender.client);
+ if (!sender)
goto __end;
- if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
+ sport = snd_seq_port_use_ptr(sender, subs->sender.port);
+ if (!sport)
goto __end;
- if ((dport = snd_seq_port_use_ptr(receiver, subs->dest.port)) == NULL)
+ dport = snd_seq_port_use_ptr(receiver, subs->dest.port);
+ if (!dport)
goto __end;
result = check_subscription_permission(client, sport, dport, subs);
@@ -1926,9 +1939,11 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
struct snd_seq_client_port *sport = NULL;
result = -EINVAL;
- if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
+ sender = snd_seq_client_use_ptr(subs->sender.client);
+ if (!sender)
goto __end;
- if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
+ sport = snd_seq_port_use_ptr(sender, subs->sender.port);
+ if (!sport)
goto __end;
result = snd_seq_port_get_subscription(&sport->c_src, &subs->dest,
subs);
@@ -1955,9 +1970,11 @@ static int snd_seq_ioctl_query_subs(struct snd_seq_client *client, void *arg)
struct list_head *p;
int i;
- if ((cptr = snd_seq_client_use_ptr(subs->root.client)) == NULL)
+ cptr = snd_seq_client_use_ptr(subs->root.client);
+ if (!cptr)
goto __end;
- if ((port = snd_seq_port_use_ptr(cptr, subs->root.port)) == NULL)
+ port = snd_seq_port_use_ptr(cptr, subs->root.port);
+ if (!port)
goto __end;
switch (subs->type) {
diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
index cd5a4cad8881..ac760b1e3d12 100644
--- a/sound/core/seq/seq_dummy.c
+++ b/sound/core/seq/seq_dummy.c
@@ -109,7 +109,8 @@ create_port(int idx, int type)
struct snd_seq_port_callback pcb;
struct snd_seq_dummy_port *rec;
- if ((rec = kzalloc(sizeof(*rec), GFP_KERNEL)) == NULL)
+ rec = kzalloc(sizeof(*rec), GFP_KERNEL);
+ if (!rec)
return NULL;
rec->client = my_client;
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index eaaa8b5830bb..f8e02e98709a 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -143,7 +143,8 @@ static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
{
struct snd_seq_event_cell *cell;
- if ((cell = f->head) != NULL) {
+ cell = f->head;
+ if (cell) {
f->head = cell->next;
/* reset tail if this was the last element */
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index e245bb6ba533..b7aee23fc387 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -69,7 +69,8 @@ int snd_seq_dump_var_event(const struct snd_seq_event *event,
int len, err;
struct snd_seq_event_cell *cell;
- if ((len = get_var_len(event)) <= 0)
+ len = get_var_len(event);
+ if (len <= 0)
return len;
if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
@@ -133,7 +134,8 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
int len, newlen;
int err;
- if ((len = get_var_len(event)) < 0)
+ len = get_var_len(event);
+ if (len < 0)
return len;
newlen = len;
if (size_aligned > 0)
diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
index 6825940ea2cf..4589aac09154 100644
--- a/sound/core/seq/seq_midi.c
+++ b/sound/core/seq/seq_midi.c
@@ -101,7 +101,8 @@ static int dump_midi(struct snd_rawmidi_substream *substream, const char *buf, i
if (snd_BUG_ON(!substream || !buf))
return -EINVAL;
runtime = substream->runtime;
- if ((tmp = runtime->avail) < count) {
+ tmp = runtime->avail;
+ if (tmp < count) {
if (printk_ratelimit())
pr_err("ALSA: seq_midi: MIDI output buffer overrun\n");
return -ENOMEM;
@@ -167,10 +168,11 @@ static int midisynth_subscribe(void *private_data, struct snd_seq_port_subscribe
struct snd_rawmidi_params params;
/* open midi port */
- if ((err = snd_rawmidi_kernel_open(msynth->card, msynth->device,
- msynth->subdevice,
- SNDRV_RAWMIDI_LFLG_INPUT,
- &msynth->input_rfile)) < 0) {
+ err = snd_rawmidi_kernel_open(msynth->card, msynth->device,
+ msynth->subdevice,
+ SNDRV_RAWMIDI_LFLG_INPUT,
+ &msynth->input_rfile);
+ if (err < 0) {
pr_debug("ALSA: seq_midi: midi input open failed!!!\n");
return err;
}
@@ -178,7 +180,8 @@ static int midisynth_subscribe(void *private_data, struct snd_seq_port_subscribe
memset(&params, 0, sizeof(params));
params.avail_min = 1;
params.buffer_size = input_buffer_size;
- if ((err = snd_rawmidi_input_params(msynth->input_rfile.input, &params)) < 0) {
+ err = snd_rawmidi_input_params(msynth->input_rfile.input, &params);
+ if (err < 0) {
snd_rawmidi_kernel_release(&msynth->input_rfile);
return err;
}
@@ -209,10 +212,11 @@ static int midisynth_use(void *private_data, struct snd_seq_port_subscribe *info
struct snd_rawmidi_params params;
/* open midi port */
- if ((err = snd_rawmidi_kernel_open(msynth->card, msynth->device,
- msynth->subdevice,
- SNDRV_RAWMIDI_LFLG_OUTPUT,
- &msynth->output_rfile)) < 0) {
+ err = snd_rawmidi_kernel_open(msynth->card, msynth->device,
+ msynth->subdevice,
+ SNDRV_RAWMIDI_LFLG_OUTPUT,
+ &msynth->output_rfile);
+ if (err < 0) {
pr_debug("ALSA: seq_midi: midi output open failed!!!\n");
return err;
}
@@ -220,7 +224,8 @@ static int midisynth_use(void *private_data, struct snd_seq_port_subscribe *info
params.avail_min = 1;
params.buffer_size = output_buffer_size;
params.no_active_sensing = 1;
- if ((err = snd_rawmidi_output_params(msynth->output_rfile.output, &params)) < 0) {
+ err = snd_rawmidi_output_params(msynth->output_rfile.output, &params);
+ if (err < 0) {
snd_rawmidi_kernel_release(&msynth->output_rfile);
return err;
}
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index b9c2ce2b8d5a..84d78630463e 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -514,10 +514,11 @@ static int check_and_subscribe_port(struct snd_seq_client *client,
return err;
}
-static void delete_and_unsubscribe_port(struct snd_seq_client *client,
- struct snd_seq_client_port *port,
- struct snd_seq_subscribers *subs,
- bool is_src, bool ack)
+/* called with grp->list_mutex held */
+static void __delete_and_unsubscribe_port(struct snd_seq_client *client,
+ struct snd_seq_client_port *port,
+ struct snd_seq_subscribers *subs,
+ bool is_src, bool ack)
{
struct snd_seq_port_subs_info *grp;
struct list_head *list;
@@ -525,7 +526,6 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
grp = is_src ? &port->c_src : &port->c_dest;
list = is_src ? &subs->src_list : &subs->dest_list;
- down_write(&grp->list_mutex);
write_lock_irq(&grp->list_lock);
empty = list_empty(list);
if (!empty)
@@ -535,6 +535,18 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
if (!empty)
unsubscribe_port(client, port, grp, &subs->info, ack);
+}
+
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+ struct snd_seq_client_port *port,
+ struct snd_seq_subscribers *subs,
+ bool is_src, bool ack)
+{
+ struct snd_seq_port_subs_info *grp;
+
+ grp = is_src ? &port->c_src : &port->c_dest;
+ down_write(&grp->list_mutex);
+ __delete_and_unsubscribe_port(client, port, subs, is_src, ack);
up_write(&grp->list_mutex);
}
@@ -590,27 +602,30 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
struct snd_seq_client_port *dest_port,
struct snd_seq_port_subscribe *info)
{
- struct snd_seq_port_subs_info *src = &src_port->c_src;
+ struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
struct snd_seq_subscribers *subs;
int err = -ENOENT;
- down_write(&src->list_mutex);
+ /* always start from deleting the dest port for avoiding concurrent
+ * deletions
+ */
+ down_write(&dest->list_mutex);
/* look for the connection */
- list_for_each_entry(subs, &src->list_head, src_list) {
+ list_for_each_entry(subs, &dest->list_head, dest_list) {
if (match_subs_info(info, &subs->info)) {
- atomic_dec(&subs->ref_count); /* mark as not ready */
+ __delete_and_unsubscribe_port(dest_client, dest_port,
+ subs, false,
+ connector->number != dest_client->number);
err = 0;
break;
}
}
- up_write(&src->list_mutex);
+ up_write(&dest->list_mutex);
if (err < 0)
return err;
delete_and_unsubscribe_port(src_client, src_port, subs, true,
connector->number != src_client->number);
- delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
- connector->number != dest_client->number);
kfree(subs);
return 0;
}
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 13cfc2d47fa7..d6c02dea976c 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -222,7 +222,8 @@ struct snd_seq_queue *snd_seq_queue_find_name(char *name)
struct snd_seq_queue *q;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
- if ((q = queueptr(i)) != NULL) {
+ q = queueptr(i);
+ if (q) {
if (strncmp(q->name, name, sizeof(q->name)) == 0)
return q;
queuefree(q);
@@ -432,7 +433,8 @@ int snd_seq_queue_timer_open(int queueid)
if (queue == NULL)
return -EINVAL;
tmr = queue->timer;
- if ((result = snd_seq_timer_open(queue)) < 0) {
+ result = snd_seq_timer_open(queue);
+ if (result < 0) {
snd_seq_timer_defaults(tmr);
result = snd_seq_timer_open(queue);
}
@@ -548,7 +550,8 @@ void snd_seq_queue_client_leave(int client)
/* delete own queues from queue list */
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
- if ((q = queue_list_remove(i, client)) != NULL)
+ q = queue_list_remove(i, client);
+ if (q)
queue_delete(q);
}
@@ -556,7 +559,8 @@ void snd_seq_queue_client_leave(int client)
* they are not owned by this client
*/
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
- if ((q = queueptr(i)) == NULL)
+ q = queueptr(i);
+ if (!q)
continue;
if (test_bit(client, q->clients_bitmap)) {
snd_seq_prioq_leave(q->tickq, client, 0);
@@ -578,7 +582,8 @@ void snd_seq_queue_client_leave_cells(int client)
struct snd_seq_queue *q;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
- if ((q = queueptr(i)) == NULL)
+ q = queueptr(i);
+ if (!q)
continue;
snd_seq_prioq_leave(q->tickq, client, 0);
snd_seq_prioq_leave(q->timeq, client, 0);
@@ -593,7 +598,8 @@ void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info)
struct snd_seq_queue *q;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
- if ((q = queueptr(i)) == NULL)
+ q = queueptr(i);
+ if (!q)
continue;
if (test_bit(client, q->clients_bitmap) &&
(! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) ||
@@ -724,7 +730,8 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
int owner;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
- if ((q = queueptr(i)) == NULL)
+ q = queueptr(i);
+ if (!q)
continue;
tmr = q->timer;
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 77d7037d1476..4abc38c70cae 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -482,10 +482,11 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi
int err;
*rrmidi = NULL;
- if ((err = snd_rawmidi_new(card, "VirMidi", device,
- 16, /* may be configurable */
- 16, /* may be configurable */
- &rmidi)) < 0)
+ err = snd_rawmidi_new(card, "VirMidi", device,
+ 16, /* may be configurable */
+ 16, /* may be configurable */
+ &rmidi);
+ if (err < 0)
return err;
strcpy(rmidi->name, rmidi->id);
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c
index 29ddb76187e5..232cf3f1bcb3 100644
--- a/sound/core/sgbuf.c
+++ b/sound/core/sgbuf.c
@@ -10,20 +10,34 @@
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <sound/memalloc.h>
-
+#include "memalloc_local.h"
+
+struct snd_sg_page {
+ void *buf;
+ dma_addr_t addr;
+};
+
+struct snd_sg_buf {
+ int size; /* allocated byte size */
+ int pages; /* allocated pages */
+ int tblsize; /* allocated table size */
+ struct snd_sg_page *table; /* address table */
+ struct page **page_table; /* page table (for vmap/vunmap) */
+ struct device *dev;
+};
/* table entries are align to 32 */
#define SGBUF_TBL_ALIGN 32
#define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
-int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
+static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
{
struct snd_sg_buf *sgbuf = dmab->private_data;
struct snd_dma_buffer tmpb;
int i;
- if (! sgbuf)
- return -EINVAL;
+ if (!sgbuf)
+ return;
vunmap(dmab->area);
dmab->area = NULL;
@@ -45,15 +59,11 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
kfree(sgbuf->page_table);
kfree(sgbuf);
dmab->private_data = NULL;
-
- return 0;
}
#define MAX_ALLOC_PAGES 32
-void *snd_malloc_sgbuf_pages(struct device *device,
- size_t size, struct snd_dma_buffer *dmab,
- size_t *res_size)
+static int snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
{
struct snd_sg_buf *sgbuf;
unsigned int i, pages, chunk, maxpages;
@@ -63,18 +73,16 @@ void *snd_malloc_sgbuf_pages(struct device *device,
int type = SNDRV_DMA_TYPE_DEV;
pgprot_t prot = PAGE_KERNEL;
- dmab->area = NULL;
- dmab->addr = 0;
dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
- if (! sgbuf)
- return NULL;
+ if (!sgbuf)
+ return -ENOMEM;
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
type = SNDRV_DMA_TYPE_DEV_UC;
#ifdef pgprot_noncached
prot = pgprot_noncached(PAGE_KERNEL);
#endif
}
- sgbuf->dev = device;
+ sgbuf->dev = dmab->dev.dev;
pages = snd_sgbuf_aligned_pages(size);
sgbuf->tblsize = sgbuf_align_table(pages);
table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
@@ -94,12 +102,10 @@ void *snd_malloc_sgbuf_pages(struct device *device,
if (chunk > maxpages)
chunk = maxpages;
chunk <<= PAGE_SHIFT;
- if (snd_dma_alloc_pages_fallback(type, device,
+ if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
chunk, &tmpb) < 0) {
if (!sgbuf->pages)
goto _failed;
- if (!res_size)
- goto _failed;
size = sgbuf->pages * PAGE_SIZE;
break;
}
@@ -124,27 +130,42 @@ void *snd_malloc_sgbuf_pages(struct device *device,
dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
if (! dmab->area)
goto _failed;
- if (res_size)
- *res_size = sgbuf->size;
- return dmab->area;
+ return 0;
_failed:
- snd_free_sgbuf_pages(dmab); /* free the table */
- return NULL;
+ snd_dma_sg_free(dmab); /* free the table */
+ return -ENOMEM;
}
-/*
- * compute the max chunk size with continuous pages on sg-buffer
- */
-unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
- unsigned int ofs, unsigned int size)
+static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ struct snd_sg_buf *sgbuf = dmab->private_data;
+ dma_addr_t addr;
+
+ addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
+ addr &= ~((dma_addr_t)PAGE_SIZE - 1);
+ return addr + offset % PAGE_SIZE;
+}
+
+static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
+ size_t offset)
+{
+ struct snd_sg_buf *sgbuf = dmab->private_data;
+ unsigned int idx = offset >> PAGE_SHIFT;
+
+ if (idx >= (unsigned int)sgbuf->pages)
+ return NULL;
+ return sgbuf->page_table[idx];
+}
+
+static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
+ unsigned int ofs,
+ unsigned int size)
{
struct snd_sg_buf *sg = dmab->private_data;
unsigned int start, end, pg;
- if (!sg)
- return size;
-
start = ofs >> PAGE_SHIFT;
end = (ofs + size - 1) >> PAGE_SHIFT;
/* check page continuity */
@@ -160,4 +181,11 @@ unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
/* ok, all on continuous pages */
return size;
}
-EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
+
+const struct snd_malloc_ops snd_dma_sg_ops = {
+ .alloc = snd_dma_sg_alloc,
+ .free = snd_dma_sg_free,
+ .get_addr = snd_dma_sg_get_addr,
+ .get_page = snd_dma_sg_get_page,
+ .get_chunk_size = snd_dma_sg_get_chunk_size,
+};
diff --git a/sound/core/sound.c b/sound/core/sound.c
index af89e51dd44a..df5571d98629 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -357,7 +357,8 @@ static void snd_minor_info_read(struct snd_info_entry *entry, struct snd_info_bu
mutex_lock(&sound_mutex);
for (minor = 0; minor < SNDRV_OS_MINORS; ++minor) {
- if (!(mptr = snd_minors[minor]))
+ mptr = snd_minors[minor];
+ if (!mptr)
continue;
if (mptr->card >= 0) {
if (mptr->device >= 0)
diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c
index 610f317bea9d..7ed0a2a91035 100644
--- a/sound/core/sound_oss.c
+++ b/sound/core/sound_oss.c
@@ -217,7 +217,8 @@ static void snd_minor_info_oss_read(struct snd_info_entry *entry,
mutex_lock(&sound_oss_mutex);
for (minor = 0; minor < SNDRV_OSS_MINORS; ++minor) {
- if (!(mptr = snd_oss_minors[minor]))
+ mptr = snd_oss_minors[minor];
+ if (!mptr)
continue;
if (mptr->card >= 0)
snd_iprintf(buffer, "%3i: [%i-%2i]: %s\n", minor,
diff --git a/sound/drivers/mpu401/mpu401.c b/sound/drivers/mpu401/mpu401.c
index 8c552e25805a..d0b55dbb411a 100644
--- a/sound/drivers/mpu401/mpu401.c
+++ b/sound/drivers/mpu401/mpu401.c
@@ -104,7 +104,8 @@ static int snd_mpu401_probe(struct platform_device *devptr)
err = snd_mpu401_create(&devptr->dev, dev, &card);
if (err < 0)
return err;
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -182,7 +183,8 @@ static int snd_mpu401_pnp_probe(struct pnp_dev *pnp_dev,
err = snd_mpu401_create(&pnp_dev->dev, dev, &card);
if (err < 0)
return err;
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -227,7 +229,8 @@ static int __init alsa_card_mpu401_init(void)
{
int i, err;
- if ((err = platform_driver_register(&snd_mpu401_driver)) < 0)
+ err = platform_driver_register(&snd_mpu401_driver);
+ if (err < 0)
return err;
for (i = 0; i < SNDRV_CARDS; i++) {
diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c
index 65982d6babfc..f435b9b4ae24 100644
--- a/sound/drivers/mpu401/mpu401_uart.c
+++ b/sound/drivers/mpu401/mpu401_uart.c
@@ -271,8 +271,11 @@ static int snd_mpu401_uart_input_open(struct snd_rawmidi_substream *substream)
int err;
mpu = substream->rmidi->private_data;
- if (mpu->open_input && (err = mpu->open_input(mpu)) < 0)
- return err;
+ if (mpu->open_input) {
+ err = mpu->open_input(mpu);
+ if (err < 0)
+ return err;
+ }
if (! test_bit(MPU401_MODE_BIT_OUTPUT, &mpu->mode)) {
if (snd_mpu401_do_reset(mpu) < 0)
goto error_out;
@@ -293,8 +296,11 @@ static int snd_mpu401_uart_output_open(struct snd_rawmidi_substream *substream)
int err;
mpu = substream->rmidi->private_data;
- if (mpu->open_output && (err = mpu->open_output(mpu)) < 0)
- return err;
+ if (mpu->open_output) {
+ err = mpu->open_output(mpu);
+ if (err < 0)
+ return err;
+ }
if (! test_bit(MPU401_MODE_BIT_INPUT, &mpu->mode)) {
if (snd_mpu401_do_reset(mpu) < 0)
goto error_out;
@@ -524,8 +530,9 @@ int snd_mpu401_uart_new(struct snd_card *card, int device,
info_flags |= MPU401_INFO_INPUT | MPU401_INFO_OUTPUT;
in_enable = (info_flags & MPU401_INFO_INPUT) ? 1 : 0;
out_enable = (info_flags & MPU401_INFO_OUTPUT) ? 1 : 0;
- if ((err = snd_rawmidi_new(card, "MPU-401U", device,
- out_enable, in_enable, &rmidi)) < 0)
+ err = snd_rawmidi_new(card, "MPU-401U", device,
+ out_enable, in_enable, &rmidi);
+ if (err < 0)
return err;
mpu = kzalloc(sizeof(*mpu), GFP_KERNEL);
if (!mpu) {
diff --git a/sound/drivers/mtpav.c b/sound/drivers/mtpav.c
index df4b7f9cd50f..0e95b08d34d6 100644
--- a/sound/drivers/mtpav.c
+++ b/sound/drivers/mtpav.c
@@ -566,7 +566,8 @@ static irqreturn_t snd_mtpav_irqh(int irq, void *dev_id)
*/
static int snd_mtpav_get_ISA(struct mtpav *mcard)
{
- if ((mcard->res_port = request_region(port, 3, "MotuMTPAV MIDI")) == NULL) {
+ mcard->res_port = request_region(port, 3, "MotuMTPAV MIDI");
+ if (!mcard->res_port) {
snd_printk(KERN_ERR "MTVAP port 0x%lx is busy\n", port);
return -EBUSY;
}
@@ -628,10 +629,11 @@ static int snd_mtpav_get_RAWMIDI(struct mtpav *mcard)
hwports = 8;
mcard->num_ports = hwports;
- if ((rval = snd_rawmidi_new(mcard->card, "MotuMIDI", 0,
- mcard->num_ports * 2 + MTPAV_PIDX_BROADCAST + 1,
- mcard->num_ports * 2 + MTPAV_PIDX_BROADCAST + 1,
- &mcard->rmidi)) < 0)
+ rval = snd_rawmidi_new(mcard->card, "MotuMIDI", 0,
+ mcard->num_ports * 2 + MTPAV_PIDX_BROADCAST + 1,
+ mcard->num_ports * 2 + MTPAV_PIDX_BROADCAST + 1,
+ &mcard->rmidi);
+ if (rval < 0)
return rval;
rawmidi = mcard->rmidi;
rawmidi->private_data = mcard;
@@ -744,7 +746,8 @@ static int __init alsa_card_mtpav_init(void)
{
int err;
- if ((err = platform_driver_register(&snd_mtpav_driver)) < 0)
+ err = platform_driver_register(&snd_mtpav_driver);
+ if (err < 0)
return err;
device = platform_device_register_simple(SND_MTPAV_DRIVER, -1, NULL, 0);
diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
index 322d530ab07b..d3bc9e8c407d 100644
--- a/sound/drivers/mts64.c
+++ b/sound/drivers/mts64.c
@@ -950,7 +950,8 @@ static int snd_mts64_probe(struct platform_device *pdev)
goto free_pardev;
}
- if ((err = snd_mts64_create(card, pardev, &mts)) < 0) {
+ err = snd_mts64_create(card, pardev, &mts);
+ if (err < 0) {
snd_printd("Cannot create main component\n");
goto release_pardev;
}
@@ -963,19 +964,22 @@ static int snd_mts64_probe(struct platform_device *pdev)
goto __err;
}
- if ((err = snd_mts64_rawmidi_create(card)) < 0) {
+ err = snd_mts64_rawmidi_create(card);
+ if (err < 0) {
snd_printd("Creating Rawmidi component failed\n");
goto __err;
}
/* init device */
- if ((err = mts64_device_init(p)) < 0)
+ err = mts64_device_init(p);
+ if (err < 0)
goto __err;
platform_set_drvdata(pdev, card);
/* At this point card will be usable */
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_printd("Cannot register card\n");
goto __err;
}
@@ -1031,7 +1035,8 @@ static int __init snd_mts64_module_init(void)
{
int err;
- if ((err = platform_driver_register(&snd_mts64_driver)) < 0)
+ err = platform_driver_register(&snd_mts64_driver);
+ if (err < 0)
return err;
if (parport_register_driver(&mts64_parport_driver) != 0) {
diff --git a/sound/drivers/opl3/opl3_lib.c b/sound/drivers/opl3/opl3_lib.c
index 9259522483c8..6c1f1cc092d8 100644
--- a/sound/drivers/opl3/opl3_lib.c
+++ b/sound/drivers/opl3/opl3_lib.c
@@ -243,7 +243,8 @@ static int snd_opl3_timer1_init(struct snd_opl3 * opl3, int timer_no)
tid.card = opl3->card->number;
tid.device = timer_no;
tid.subdevice = 0;
- if ((err = snd_timer_new(opl3->card, "AdLib timer #1", &tid, &timer)) >= 0) {
+ err = snd_timer_new(opl3->card, "AdLib timer #1", &tid, &timer);
+ if (err >= 0) {
strcpy(timer->name, "AdLib timer #1");
timer->private_data = opl3;
timer->hw = snd_opl3_timer1;
@@ -263,7 +264,8 @@ static int snd_opl3_timer2_init(struct snd_opl3 * opl3, int timer_no)
tid.card = opl3->card->number;
tid.device = timer_no;
tid.subdevice = 0;
- if ((err = snd_timer_new(opl3->card, "AdLib timer #2", &tid, &timer)) >= 0) {
+ err = snd_timer_new(opl3->card, "AdLib timer #2", &tid, &timer);
+ if (err >= 0) {
strcpy(timer->name, "AdLib timer #2");
timer->private_data = opl3;
timer->hw = snd_opl3_timer2;
@@ -348,7 +350,8 @@ int snd_opl3_new(struct snd_card *card,
spin_lock_init(&opl3->reg_lock);
spin_lock_init(&opl3->timer_lock);
- if ((err = snd_device_new(card, SNDRV_DEV_CODEC, opl3, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_CODEC, opl3, &ops);
+ if (err < 0) {
snd_opl3_free(opl3);
return err;
}
@@ -396,19 +399,23 @@ int snd_opl3_create(struct snd_card *card,
int err;
*ropl3 = NULL;
- if ((err = snd_opl3_new(card, hardware, &opl3)) < 0)
+ err = snd_opl3_new(card, hardware, &opl3);
+ if (err < 0)
return err;
if (! integrated) {
- if ((opl3->res_l_port = request_region(l_port, 2, "OPL2/3 (left)")) == NULL) {
+ opl3->res_l_port = request_region(l_port, 2, "OPL2/3 (left)");
+ if (!opl3->res_l_port) {
snd_printk(KERN_ERR "opl3: can't grab left port 0x%lx\n", l_port);
snd_device_free(card, opl3);
return -EBUSY;
}
- if (r_port != 0 &&
- (opl3->res_r_port = request_region(r_port, 2, "OPL2/3 (right)")) == NULL) {
- snd_printk(KERN_ERR "opl3: can't grab right port 0x%lx\n", r_port);
- snd_device_free(card, opl3);
- return -EBUSY;
+ if (r_port != 0) {
+ opl3->res_r_port = request_region(r_port, 2, "OPL2/3 (right)");
+ if (!opl3->res_r_port) {
+ snd_printk(KERN_ERR "opl3: can't grab right port 0x%lx\n", r_port);
+ snd_device_free(card, opl3);
+ return -EBUSY;
+ }
}
}
opl3->l_port = l_port;
@@ -423,7 +430,8 @@ int snd_opl3_create(struct snd_card *card,
break;
default:
opl3->command = &snd_opl2_command;
- if ((err = snd_opl3_detect(opl3)) < 0) {
+ err = snd_opl3_detect(opl3);
+ if (err < 0) {
snd_printd("OPL2/3 chip not detected at 0x%lx/0x%lx\n",
opl3->l_port, opl3->r_port);
snd_device_free(card, opl3);
@@ -449,11 +457,14 @@ int snd_opl3_timer_new(struct snd_opl3 * opl3, int timer1_dev, int timer2_dev)
{
int err;
- if (timer1_dev >= 0)
- if ((err = snd_opl3_timer1_init(opl3, timer1_dev)) < 0)
+ if (timer1_dev >= 0) {
+ err = snd_opl3_timer1_init(opl3, timer1_dev);
+ if (err < 0)
return err;
+ }
if (timer2_dev >= 0) {
- if ((err = snd_opl3_timer2_init(opl3, timer2_dev)) < 0) {
+ err = snd_opl3_timer2_init(opl3, timer2_dev);
+ if (err < 0) {
snd_device_free(opl3->card, opl3->timer1);
opl3->timer1 = NULL;
return err;
@@ -477,7 +488,8 @@ int snd_opl3_hwdep_new(struct snd_opl3 * opl3,
/* create hardware dependent device (direct FM) */
- if ((err = snd_hwdep_new(card, "OPL2/OPL3", device, &hw)) < 0) {
+ err = snd_hwdep_new(card, "OPL2/OPL3", device, &hw);
+ if (err < 0) {
snd_device_free(card, opl3);
return err;
}
diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c
index eb23c55323ae..e1b69c65c3c8 100644
--- a/sound/drivers/opl3/opl3_midi.c
+++ b/sound/drivers/opl3/opl3_midi.c
@@ -180,8 +180,7 @@ static int opl3_get_voice(struct snd_opl3 *opl3, int instr_4op,
if (vp2->state == SNDRV_OPL3_ST_ON_2OP) {
/* kill two voices, EXPENSIVE */
bp++;
- voice_time = (voice_time > vp->time) ?
- voice_time : vp->time;
+ voice_time = max(voice_time, vp2->time);
}
} else {
/* allocate 2op voice */
diff --git a/sound/drivers/opl3/opl3_oss.c b/sound/drivers/opl3/opl3_oss.c
index c82c7c1c0714..7645365eec89 100644
--- a/sound/drivers/opl3/opl3_oss.c
+++ b/sound/drivers/opl3/opl3_oss.c
@@ -136,7 +136,8 @@ static int snd_opl3_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
if (snd_BUG_ON(!arg))
return -ENXIO;
- if ((err = snd_opl3_synth_setup(opl3)) < 0)
+ err = snd_opl3_synth_setup(opl3);
+ if (err < 0)
return err;
/* fill the argument data */
@@ -144,7 +145,8 @@ static int snd_opl3_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
arg->addr.client = opl3->oss_chset->client;
arg->addr.port = opl3->oss_chset->port;
- if ((err = snd_opl3_synth_use_inc(opl3)) < 0)
+ err = snd_opl3_synth_use_inc(opl3);
+ if (err < 0)
return err;
opl3->synth_mode = SNDRV_OPL3_MODE_SYNTH;
diff --git a/sound/drivers/opl3/opl3_seq.c b/sound/drivers/opl3/opl3_seq.c
index cd2a01b5e2e1..75de1299c3dc 100644
--- a/sound/drivers/opl3/opl3_seq.c
+++ b/sound/drivers/opl3/opl3_seq.c
@@ -92,7 +92,8 @@ static int snd_opl3_synth_use(void *private_data, struct snd_seq_port_subscribe
struct snd_opl3 *opl3 = private_data;
int err;
- if ((err = snd_opl3_synth_setup(opl3)) < 0)
+ err = snd_opl3_synth_setup(opl3);
+ if (err < 0)
return err;
if (use_internal_drums) {
@@ -107,7 +108,8 @@ static int snd_opl3_synth_use(void *private_data, struct snd_seq_port_subscribe
}
if (info->sender.client != SNDRV_SEQ_CLIENT_SYSTEM) {
- if ((err = snd_opl3_synth_use_inc(opl3)) < 0)
+ err = snd_opl3_synth_use_inc(opl3);
+ if (err < 0)
return err;
}
opl3->synth_mode = SNDRV_OPL3_MODE_SEQ;
@@ -227,7 +229,8 @@ static int snd_opl3_seq_probe(struct device *_dev)
if (client < 0)
return client;
- if ((err = snd_opl3_synth_create_port(opl3)) < 0) {
+ err = snd_opl3_synth_create_port(opl3);
+ if (err < 0) {
snd_seq_delete_kernel_client(client);
opl3->seq_client = -1;
return err;
diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
index 2f4514ed47c5..52a656735365 100644
--- a/sound/drivers/portman2x4.c
+++ b/sound/drivers/portman2x4.c
@@ -749,7 +749,8 @@ static int snd_portman_probe(struct platform_device *pdev)
goto free_pardev;
}
- if ((err = portman_create(card, pardev, &pm)) < 0) {
+ err = portman_create(card, pardev, &pm);
+ if (err < 0) {
snd_printd("Cannot create main component\n");
goto release_pardev;
}
@@ -762,19 +763,22 @@ static int snd_portman_probe(struct platform_device *pdev)
goto __err;
}
- if ((err = snd_portman_rawmidi_create(card)) < 0) {
+ err = snd_portman_rawmidi_create(card);
+ if (err < 0) {
snd_printd("Creating Rawmidi component failed\n");
goto __err;
}
/* init device */
- if ((err = portman_device_init(pm)) < 0)
+ err = portman_device_init(pm);
+ if (err < 0)
goto __err;
platform_set_drvdata(pdev, card);
/* At this point card will be usable */
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_printd("Cannot register card\n");
goto __err;
}
@@ -831,7 +835,8 @@ static int __init snd_portman_module_init(void)
{
int err;
- if ((err = platform_driver_register(&snd_portman_driver)) < 0)
+ err = platform_driver_register(&snd_portman_driver);
+ if (err < 0)
return err;
if (parport_register_driver(&portman_parport_driver) != 0) {
diff --git a/sound/drivers/serial-u16550.c b/sound/drivers/serial-u16550.c
index 6d5d1ca59ecf..da9983cba01c 100644
--- a/sound/drivers/serial-u16550.c
+++ b/sound/drivers/serial-u16550.c
@@ -783,7 +783,8 @@ static int snd_uart16550_create(struct snd_card *card,
int err;
- if ((uart = kzalloc(sizeof(*uart), GFP_KERNEL)) == NULL)
+ uart = kzalloc(sizeof(*uart), GFP_KERNEL);
+ if (!uart)
return -ENOMEM;
uart->adaptor = adaptor;
uart->card = card;
@@ -792,7 +793,8 @@ static int snd_uart16550_create(struct snd_card *card,
uart->base = iobase;
uart->drop_on_full = droponfull;
- if ((err = snd_uart16550_detect(uart)) <= 0) {
+ err = snd_uart16550_detect(uart);
+ if (err <= 0) {
printk(KERN_ERR "no UART detected at 0x%lx\n", iobase);
snd_uart16550_free(uart);
return -ENODEV;
@@ -818,7 +820,8 @@ static int snd_uart16550_create(struct snd_card *card,
uart->timer_running = 0;
/* Register device */
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, uart, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, uart, &ops);
+ if (err < 0) {
snd_uart16550_free(uart);
return err;
}
@@ -932,14 +935,10 @@ static int snd_serial_probe(struct platform_device *devptr)
strcpy(card->driver, "Serial");
strcpy(card->shortname, "Serial MIDI (UART16550A)");
- if ((err = snd_uart16550_create(card,
- port[dev],
- irq[dev],
- speed[dev],
- base[dev],
- adaptor[dev],
- droponfull[dev],
- &uart)) < 0)
+ err = snd_uart16550_create(card, port[dev], irq[dev], speed[dev],
+ base[dev], adaptor[dev], droponfull[dev],
+ &uart);
+ if (err < 0)
goto _err;
err = snd_uart16550_rmidi(uart, 0, outs[dev], ins[dev], &uart->rmidi);
@@ -952,7 +951,8 @@ static int snd_serial_probe(struct platform_device *devptr)
uart->base,
uart->irq);
- if ((err = snd_card_register(card)) < 0)
+ err = snd_card_register(card);
+ if (err < 0)
goto _err;
platform_set_drvdata(devptr, card);
@@ -992,7 +992,8 @@ static int __init alsa_card_serial_init(void)
{
int i, cards, err;
- if ((err = platform_driver_register(&snd_serial_driver)) < 0)
+ err = platform_driver_register(&snd_serial_driver);
+ if (err < 0)
return err;
cards = 0;
diff --git a/sound/drivers/vx/vx_core.c b/sound/drivers/vx/vx_core.c
index a22e5b1a5458..a10449af5a76 100644
--- a/sound/drivers/vx/vx_core.c
+++ b/sound/drivers/vx/vx_core.c
@@ -110,20 +110,25 @@ static int vx_transfer_end(struct vx_core *chip, int cmd)
{
int err;
- if ((err = vx_reset_chk(chip)) < 0)
+ err = vx_reset_chk(chip);
+ if (err < 0)
return err;
/* irq MESS_READ/WRITE_END */
- if ((err = vx_send_irq_dsp(chip, cmd)) < 0)
+ err = vx_send_irq_dsp(chip, cmd);
+ if (err < 0)
return err;
/* Wait CHK = 1 */
- if ((err = vx_wait_isr_bit(chip, ISR_CHK)) < 0)
+ err = vx_wait_isr_bit(chip, ISR_CHK);
+ if (err < 0)
return err;
/* If error, Read RX */
- if ((err = vx_inb(chip, ISR)) & ISR_ERR) {
- if ((err = vx_wait_for_rx_full(chip)) < 0) {
+ err = vx_inb(chip, ISR);
+ if (err & ISR_ERR) {
+ err = vx_wait_for_rx_full(chip);
+ if (err < 0) {
snd_printd(KERN_DEBUG "transfer_end: error in rx_full\n");
return err;
}
@@ -232,7 +237,8 @@ int vx_send_msg_nolock(struct vx_core *chip, struct vx_rmh *rmh)
if (chip->chip_status & VX_STAT_IS_STALE)
return -EBUSY;
- if ((err = vx_reset_chk(chip)) < 0) {
+ err = vx_reset_chk(chip);
+ if (err < 0) {
snd_printd(KERN_DEBUG "vx_send_msg: vx_reset_chk error\n");
return err;
}
@@ -254,7 +260,8 @@ int vx_send_msg_nolock(struct vx_core *chip, struct vx_rmh *rmh)
rmh->Cmd[0] &= MASK_1_WORD_COMMAND;
/* Wait for TX empty */
- if ((err = vx_wait_isr_bit(chip, ISR_TX_EMPTY)) < 0) {
+ err = vx_wait_isr_bit(chip, ISR_TX_EMPTY);
+ if (err < 0) {
snd_printd(KERN_DEBUG "vx_send_msg: wait tx empty error\n");
return err;
}
@@ -265,18 +272,21 @@ int vx_send_msg_nolock(struct vx_core *chip, struct vx_rmh *rmh)
vx_outb(chip, TXL, rmh->Cmd[0] & 0xff);
/* Trigger irq MESSAGE */
- if ((err = vx_send_irq_dsp(chip, IRQ_MESSAGE)) < 0) {
+ err = vx_send_irq_dsp(chip, IRQ_MESSAGE);
+ if (err < 0) {
snd_printd(KERN_DEBUG "vx_send_msg: send IRQ_MESSAGE error\n");
return err;
}
/* Wait for CHK = 1 */
- if ((err = vx_wait_isr_bit(chip, ISR_CHK)) < 0)
+ err = vx_wait_isr_bit(chip, ISR_CHK);
+ if (err < 0)
return err;
/* If error, get error value from RX */
if (vx_inb(chip, ISR) & ISR_ERR) {
- if ((err = vx_wait_for_rx_full(chip)) < 0) {
+ err = vx_wait_for_rx_full(chip);
+ if (err < 0) {
snd_printd(KERN_DEBUG "vx_send_msg: rx_full read error\n");
return err;
}
@@ -292,7 +302,8 @@ int vx_send_msg_nolock(struct vx_core *chip, struct vx_rmh *rmh)
if (rmh->LgCmd > 1) {
for (i = 1; i < rmh->LgCmd; i++) {
/* Wait for TX ready */
- if ((err = vx_wait_isr_bit(chip, ISR_TX_READY)) < 0) {
+ err = vx_wait_isr_bit(chip, ISR_TX_READY);
+ if (err < 0) {
snd_printd(KERN_DEBUG "vx_send_msg: tx_ready error\n");
return err;
}
@@ -303,13 +314,15 @@ int vx_send_msg_nolock(struct vx_core *chip, struct vx_rmh *rmh)
vx_outb(chip, TXL, rmh->Cmd[i] & 0xff);
/* Trigger irq MESS_READ_NEXT */
- if ((err = vx_send_irq_dsp(chip, IRQ_MESS_READ_NEXT)) < 0) {
+ err = vx_send_irq_dsp(chip, IRQ_MESS_READ_NEXT);
+ if (err < 0) {
snd_printd(KERN_DEBUG "vx_send_msg: IRQ_READ_NEXT error\n");
return err;
}
}
/* Wait for TX empty */
- if ((err = vx_wait_isr_bit(chip, ISR_TX_READY)) < 0) {
+ err = vx_wait_isr_bit(chip, ISR_TX_READY);
+ if (err < 0) {
snd_printd(KERN_DEBUG "vx_send_msg: TX_READY error\n");
return err;
}
@@ -362,17 +375,21 @@ int vx_send_rih_nolock(struct vx_core *chip, int cmd)
#if 0
printk(KERN_DEBUG "send_rih: cmd = 0x%x\n", cmd);
#endif
- if ((err = vx_reset_chk(chip)) < 0)
+ err = vx_reset_chk(chip);
+ if (err < 0)
return err;
/* send the IRQ */
- if ((err = vx_send_irq_dsp(chip, cmd)) < 0)
+ err = vx_send_irq_dsp(chip, cmd);
+ if (err < 0)
return err;
/* Wait CHK = 1 */
- if ((err = vx_wait_isr_bit(chip, ISR_CHK)) < 0)
+ err = vx_wait_isr_bit(chip, ISR_CHK);
+ if (err < 0)
return err;
/* If error, read RX */
if (vx_inb(chip, ISR) & ISR_ERR) {
- if ((err = vx_wait_for_rx_full(chip)) < 0)
+ err = vx_wait_for_rx_full(chip);
+ if (err < 0)
return err;
err = vx_inb(chip, RXH) << 16;
err |= vx_inb(chip, RXM) << 8;
@@ -648,7 +665,8 @@ int snd_vx_dsp_boot(struct vx_core *chip, const struct firmware *boot)
vx_reset_board(chip, cold_reset);
vx_validate_irq(chip, 0);
- if ((err = snd_vx_load_boot_image(chip, boot)) < 0)
+ err = snd_vx_load_boot_image(chip, boot);
+ if (err < 0)
return err;
msleep(10);
@@ -678,7 +696,8 @@ int snd_vx_dsp_load(struct vx_core *chip, const struct firmware *dsp)
for (i = 0; i < dsp->size; i += 3) {
image = dsp->data + i;
/* Wait DSP ready for a new read */
- if ((err = vx_wait_isr_bit(chip, ISR_TX_EMPTY)) < 0) {
+ err = vx_wait_isr_bit(chip, ISR_TX_EMPTY);
+ if (err < 0) {
printk(KERN_ERR
"dsp loading error at position %d\n", i);
return err;
@@ -698,7 +717,8 @@ int snd_vx_dsp_load(struct vx_core *chip, const struct firmware *dsp)
msleep(200);
- if ((err = vx_wait_isr_bit(chip, ISR_CHK)) < 0)
+ err = vx_wait_isr_bit(chip, ISR_CHK);
+ if (err < 0)
return err;
vx_toggle_dac_mute(chip, 0);
diff --git a/sound/drivers/vx/vx_hwdep.c b/sound/drivers/vx/vx_hwdep.c
index 01baa6d872e9..efbb644edba1 100644
--- a/sound/drivers/vx/vx_hwdep.c
+++ b/sound/drivers/vx/vx_hwdep.c
@@ -78,15 +78,19 @@ int snd_vx_setup_firmware(struct vx_core *chip)
/* ok, we reached to the last one */
/* create the devices if not built yet */
- if ((err = snd_vx_pcm_new(chip)) < 0)
+ err = snd_vx_pcm_new(chip);
+ if (err < 0)
return err;
- if ((err = snd_vx_mixer_new(chip)) < 0)
+ err = snd_vx_mixer_new(chip);
+ if (err < 0)
return err;
- if (chip->ops->add_controls)
- if ((err = chip->ops->add_controls(chip)) < 0)
+ if (chip->ops->add_controls) {
+ err = chip->ops->add_controls(chip);
+ if (err < 0)
return err;
+ }
chip->chip_status |= VX_STAT_DEVICE_INIT;
chip->chip_status |= VX_STAT_CHIP_INIT;
diff --git a/sound/drivers/vx/vx_mixer.c b/sound/drivers/vx/vx_mixer.c
index 13099f8c84d6..53d78eb13c53 100644
--- a/sound/drivers/vx/vx_mixer.c
+++ b/sound/drivers/vx/vx_mixer.c
@@ -910,7 +910,8 @@ int snd_vx_mixer_new(struct vx_core *chip)
temp = vx_control_output_level;
temp.index = i;
temp.tlv.p = chip->hw->output_level_db_scale;
- if ((err = snd_ctl_add(card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
}
@@ -921,22 +922,26 @@ int snd_vx_mixer_new(struct vx_core *chip)
temp.index = i;
temp.name = "PCM Playback Volume";
temp.private_value = val;
- if ((err = snd_ctl_add(card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
temp = vx_control_output_switch;
temp.index = i;
temp.private_value = val;
- if ((err = snd_ctl_add(card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
temp = vx_control_monitor_gain;
temp.index = i;
temp.private_value = val;
- if ((err = snd_ctl_add(card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
temp = vx_control_monitor_switch;
temp.index = i;
temp.private_value = val;
- if ((err = snd_ctl_add(card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
}
for (i = 0; i < chip->hw->num_outs; i++) {
@@ -944,20 +949,25 @@ int snd_vx_mixer_new(struct vx_core *chip)
temp.index = i;
temp.name = "PCM Capture Volume";
temp.private_value = (i * 2) | (1 << 8);
- if ((err = snd_ctl_add(card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
}
/* Audio source */
- if ((err = snd_ctl_add(card, snd_ctl_new1(&vx_control_audio_src, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&vx_control_audio_src, chip));
+ if (err < 0)
return err;
/* clock mode */
- if ((err = snd_ctl_add(card, snd_ctl_new1(&vx_control_clock_mode, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&vx_control_clock_mode, chip));
+ if (err < 0)
return err;
/* IEC958 controls */
- if ((err = snd_ctl_add(card, snd_ctl_new1(&vx_control_iec958_mask, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&vx_control_iec958_mask, chip));
+ if (err < 0)
return err;
- if ((err = snd_ctl_add(card, snd_ctl_new1(&vx_control_iec958, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&vx_control_iec958, chip));
+ if (err < 0)
return err;
/* VU, peak, saturation meters */
for (c = 0; c < 2; c++) {
@@ -968,7 +978,8 @@ int snd_vx_mixer_new(struct vx_core *chip)
temp = vx_control_saturation;
temp.index = i;
temp.private_value = val;
- if ((err = snd_ctl_add(card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
}
sprintf(name, "%s VU Meter", dir[c]);
@@ -976,14 +987,16 @@ int snd_vx_mixer_new(struct vx_core *chip)
temp.index = i;
temp.name = name;
temp.private_value = val;
- if ((err = snd_ctl_add(card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
sprintf(name, "%s Peak Meter", dir[c]);
temp = vx_control_peak_meter;
temp.index = i;
temp.name = name;
temp.private_value = val;
- if ((err = snd_ctl_add(card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
}
}
diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c
index daffda99b4f7..3924f5283745 100644
--- a/sound/drivers/vx/vx_pcm.c
+++ b/sound/drivers/vx/vx_pcm.c
@@ -341,10 +341,12 @@ static int vx_toggle_pipe(struct vx_core *chip, struct vx_pipe *pipe, int state)
}
}
- if ((err = vx_conf_pipe(chip, pipe)) < 0)
+ err = vx_conf_pipe(chip, pipe);
+ if (err < 0)
return err;
- if ((err = vx_send_irqa(chip)) < 0)
+ err = vx_send_irqa(chip);
+ if (err < 0)
return err;
/* If it completes successfully, wait for the pipes
@@ -680,8 +682,9 @@ static void vx_pcm_playback_transfer(struct vx_core *chip,
if (! pipe->prepared || (chip->chip_status & VX_STAT_IS_STALE))
return;
for (i = 0; i < nchunks; i++) {
- if ((err = vx_pcm_playback_transfer_chunk(chip, runtime, pipe,
- chip->ibl.size)) < 0)
+ err = vx_pcm_playback_transfer_chunk(chip, runtime, pipe,
+ chip->ibl.size);
+ if (err < 0)
return;
}
}
@@ -698,7 +701,8 @@ static void vx_pcm_playback_update(struct vx_core *chip,
struct snd_pcm_runtime *runtime = subs->runtime;
if (pipe->running && ! (chip->chip_status & VX_STAT_IS_STALE)) {
- if ((err = vx_update_pipe_position(chip, runtime, pipe)) < 0)
+ err = vx_update_pipe_position(chip, runtime, pipe);
+ if (err < 0)
return;
if (pipe->transferred >= (int)runtime->period_size) {
pipe->transferred %= runtime->period_size;
@@ -747,11 +751,13 @@ static int vx_pcm_trigger(struct snd_pcm_substream *subs, int cmd)
pipe->running = 0;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if ((err = vx_toggle_pipe(chip, pipe, 0)) < 0)
+ err = vx_toggle_pipe(chip, pipe, 0);
+ if (err < 0)
return err;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- if ((err = vx_toggle_pipe(chip, pipe, 1)) < 0)
+ err = vx_toggle_pipe(chip, pipe, 1);
+ if (err < 0)
return err;
break;
default:
@@ -792,13 +798,15 @@ static int vx_pcm_prepare(struct snd_pcm_substream *subs)
snd_printdd(KERN_DEBUG "reopen the pipe with data_mode = %d\n", data_mode);
vx_init_rmh(&rmh, CMD_FREE_PIPE);
vx_set_pipe_cmd_params(&rmh, 0, pipe->number, 0);
- if ((err = vx_send_msg(chip, &rmh)) < 0)
+ err = vx_send_msg(chip, &rmh);
+ if (err < 0)
return err;
vx_init_rmh(&rmh, CMD_RES_PIPE);
vx_set_pipe_cmd_params(&rmh, 0, pipe->number, pipe->channels);
if (data_mode)
rmh.Cmd[0] |= BIT_DATA_MODE;
- if ((err = vx_send_msg(chip, &rmh)) < 0)
+ err = vx_send_msg(chip, &rmh);
+ if (err < 0)
return err;
pipe->data_mode = data_mode;
}
@@ -810,7 +818,8 @@ static int vx_pcm_prepare(struct snd_pcm_substream *subs)
}
vx_set_clock(chip, runtime->rate);
- if ((err = vx_set_format(chip, pipe, runtime)) < 0)
+ err = vx_set_format(chip, pipe, runtime);
+ if (err < 0)
return err;
if (vx_is_pcmcia(chip)) {
@@ -1187,7 +1196,8 @@ int snd_vx_pcm_new(struct vx_core *chip)
unsigned int i;
int err;
- if ((err = vx_init_audio_io(chip)) < 0)
+ err = vx_init_audio_io(chip);
+ if (err < 0)
return err;
for (i = 0; i < chip->hw->num_codecs; i++) {
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 9897bd26a438..fd109bea4c53 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -18,8 +18,25 @@ config SND_DICE
select SND_HWDEP
select SND_FIREWIRE_LIB
help
- Say Y here to include support for many DACs based on the DICE
- chip family (DICE-II/Jr/Mini) which TC Applied Technologies produces.
+ Say Y here to include support for devices based on the DICE chip family
+ (DICE-II/TCD2210(Mini)/TCD2220(Jr.)) which TC Applied Technologies (TCAT) produced.
+ * Allen and Heath Zed R16
+ * Alesis iO 14/26 FireWire, MasterControl, MultiMix 8/12/16 FireWire
+ * Avid Mbox 3 Pro
+ * FlexRadio Systems FLEX-3000, FLEX-5000
+ * Focusrite Liquid Saffire 56
+ * Focusrite Saffire Pro 14, Pro 24, Pro 24 DSP, Pro 26, Pro 40(TCD2220)
+ * Harman Music Group Lexicon I-ONIX FW810S
+ * Loud Technologies Mackie Onyx Blackbird, Onyx 820i/1220i/1620i/1640i (latter models)
+ * M-Audio ProFire 610/2626
+ * Mytek Stereo192-DSD DAC
+ * Midas Klark Teknik VeniceF series
+ * PreSonus FireStudio, FireStudio Mobile, FireStudio Project, FireStudio Tube
+ * PreSonus StudioLive 16.4.2, 16.0.2, 24.4.2, 32.4.2
+ * Solid State Logic Duende Classic, Duende Mini
+ * TC Electronic Studio Konnekt 48, Konnekt 24D, Konnekt Live, Impact Twin
+ * TC Electronic Digital Konnekt x32, Desktop Konnekt 6
+ * Weiss Engineering ADC2, Vesta, Minerva, AFI1, DAC1, INT202, DAC202
To compile this driver as a module, choose M here: the module
will be called snd-dice.
@@ -38,7 +55,7 @@ config SND_OXFW
* Mackie(Loud) Onyx 1640i (former model)
* Mackie(Loud) Onyx Satellite
* Mackie(Loud) Tapco Link.Firewire
- * Mackie(Loud) d.4 pro
+ * Mackie(Loud) d.2 pro/d.4 pro (built-in FireWire card with OXFW971 ASIC)
* Mackie(Loud) U.420/U.420d
* TASCAM FireOne
* Stanton Controllers & Systems 1 Deck/Mixer
@@ -84,7 +101,7 @@ config SND_BEBOB
* PreSonus FIREBOX/FIREPOD/FP10/Inspire1394
* BridgeCo RDAudio1/Audio5
* Mackie Onyx 1220/1620/1640 (FireWire I/O Card)
- * Mackie d.2 (FireWire Option) and d.2 Pro
+ * Mackie d.2 (optional FireWire card with DM1000 ASIC)
* Stanton FinalScratch 2 (ScratchAmp)
* Tascam IF-FW/DM
* Behringer XENIX UFX 1204/1604
@@ -110,6 +127,7 @@ config SND_BEBOB
* M-Audio Ozonic/NRV10/ProfireLightBridge
* M-Audio FireWire 1814/ProjectMix IO
* Digidesign Mbox 2 Pro
+ * ToneWeal FW66
To compile this driver as a module, choose M here: the module
will be called snd-bebob.
@@ -148,12 +166,16 @@ config SND_FIREWIRE_MOTU
select SND_HWDEP
help
Say Y here to enable support for FireWire devices which MOTU produced:
+ * 828
+ * 896
* 828mk2
* Traveler
* Ultralite
* 8pre
* 828mk3 (FireWire only)
* 828mk3 (Hybrid)
+ * Ultralite mk3 (FireWire only)
+ * Ultralite mk3 (Hybrid)
* Audio Express
* 4pre
diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c
index fea92e148790..d9c700f652bb 100644
--- a/sound/firewire/amdtp-am824.c
+++ b/sound/firewire/amdtp-am824.c
@@ -410,10 +410,10 @@ static unsigned int process_ir_ctx_payloads(struct amdtp_stream *s,
* @s: the AMDTP stream to initialize
* @unit: the target of the stream
* @dir: the direction of stream
- * @flags: the packet transmission method to use
+ * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
*/
int amdtp_am824_init(struct amdtp_stream *s, struct fw_unit *unit,
- enum amdtp_stream_direction dir, enum cip_flags flags)
+ enum amdtp_stream_direction dir, unsigned int flags)
{
amdtp_stream_process_ctx_payloads_t process_ctx_payloads;
diff --git a/sound/firewire/amdtp-am824.h b/sound/firewire/amdtp-am824.h
index 06d280783581..2b092b1061ba 100644
--- a/sound/firewire/amdtp-am824.h
+++ b/sound/firewire/amdtp-am824.h
@@ -45,5 +45,5 @@ void amdtp_am824_midi_trigger(struct amdtp_stream *s, unsigned int port,
struct snd_rawmidi_substream *midi);
int amdtp_am824_init(struct amdtp_stream *s, struct fw_unit *unit,
- enum amdtp_stream_direction dir, enum cip_flags flags);
+ enum amdtp_stream_direction dir, unsigned int flags);
#endif
diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
index aa53c13b89d3..5fd2aeccdfc2 100644
--- a/sound/firewire/amdtp-stream-trace.h
+++ b/sound/firewire/amdtp-stream-trace.h
@@ -49,7 +49,7 @@ TRACE_EVENT(amdtp_packet,
__entry->data_blocks = data_blocks;
__entry->data_block_counter = data_block_counter,
__entry->packet_index = packet_index;
- __entry->irq = !!in_interrupt();
+ __entry->irq = !!in_softirq();
__entry->index = index;
),
TP_printk(
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 5805c5de39fb..9be2260e4ca2 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -20,7 +20,7 @@
#define CYCLES_PER_SECOND 8000
#define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
-#define OHCI_MAX_SECOND 8
+#define OHCI_SECOND_MODULUS 8
/* Always support Linux tracing subsystem. */
#define CREATE_TRACE_POINTS
@@ -33,7 +33,8 @@
#define TAG_NO_CIP_HEADER 0
#define TAG_CIP 1
-/* common isochronous packet header parameters */
+// Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported.
+#define CIP_HEADER_QUADLETS 2
#define CIP_EOH_SHIFT 31
#define CIP_EOH (1u << CIP_EOH_SHIFT)
#define CIP_EOH_MASK 0x80000000
@@ -48,36 +49,46 @@
#define CIP_FMT_MASK 0x3f000000
#define CIP_FDF_MASK 0x00ff0000
#define CIP_FDF_SHIFT 16
+#define CIP_FDF_NO_DATA 0xff
#define CIP_SYT_MASK 0x0000ffff
#define CIP_SYT_NO_INFO 0xffff
+#define CIP_SYT_CYCLE_MODULUS 16
+#define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
+
+#define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS)
/* Audio and Music transfer protocol specific parameters */
#define CIP_FMT_AM 0x10
#define AMDTP_FDF_NO_DATA 0xff
-// For iso header, tstamp and 2 CIP header.
-#define IR_CTX_HEADER_SIZE_CIP 16
// For iso header and tstamp.
-#define IR_CTX_HEADER_SIZE_NO_CIP 8
+#define IR_CTX_HEADER_DEFAULT_QUADLETS 2
+// Add nothing.
+#define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS)
+// Add two quadlets CIP header.
+#define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE)
#define HEADER_TSTAMP_MASK 0x0000ffff
-#define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header.
+#define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE
#define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
-static void pcm_period_work(struct work_struct *work);
+// The initial firmware of OXFW970 can postpone transmission of packet during finishing
+// asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer
+// overrun. Actual device can skip more, then this module stops the packet streaming.
+#define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
/**
* amdtp_stream_init - initialize an AMDTP stream structure
* @s: the AMDTP stream to initialize
* @unit: the target of the stream
* @dir: the direction of stream
- * @flags: the packet transmission method to use
+ * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
* @fmt: the value of fmt field in CIP header
* @process_ctx_payloads: callback handler to process payloads of isoc context
* @protocol_size: the size to allocate newly for protocol
*/
int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
- enum amdtp_stream_direction dir, enum cip_flags flags,
+ enum amdtp_stream_direction dir, unsigned int flags,
unsigned int fmt,
amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
unsigned int protocol_size)
@@ -94,18 +105,13 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
s->flags = flags;
s->context = ERR_PTR(-1);
mutex_init(&s->mutex);
- INIT_WORK(&s->period_work, pcm_period_work);
s->packet_index = 0;
- init_waitqueue_head(&s->callback_wait);
- s->callbacked = false;
+ init_waitqueue_head(&s->ready_wait);
s->fmt = fmt;
s->process_ctx_payloads = process_ctx_payloads;
- if (dir == AMDTP_OUT_STREAM)
- s->ctx_data.rx.syt_override = -1;
-
return 0;
}
EXPORT_SYMBOL(amdtp_stream_init);
@@ -183,14 +189,13 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
unsigned int maximum_usec_per_period;
int err;
- hw->info = SNDRV_PCM_INFO_BATCH |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ hw->info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_JOINT_DUPLEX |
SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID;
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP;
- /* SNDRV_PCM_INFO_BATCH */
hw->periods_min = 2;
hw->periods_max = UINT_MAX;
@@ -287,22 +292,29 @@ int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
s->syt_interval = amdtp_syt_intervals[sfc];
// default buffering in the device.
- if (s->direction == AMDTP_OUT_STREAM) {
- s->ctx_data.rx.transfer_delay =
- TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
-
- if (s->flags & CIP_BLOCKING) {
- // additional buffering needed to adjust for no-data
- // packets.
- s->ctx_data.rx.transfer_delay +=
- TICKS_PER_SECOND * s->syt_interval / rate;
- }
- }
+ s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
+
+ // additional buffering needed to adjust for no-data packets.
+ if (s->flags & CIP_BLOCKING)
+ s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
return 0;
}
EXPORT_SYMBOL(amdtp_stream_set_parameters);
+// The CIP header is processed in context header apart from context payload.
+static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s)
+{
+ unsigned int multiplier;
+
+ if (s->flags & CIP_JUMBO_PAYLOAD)
+ multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES;
+ else
+ multiplier = 1;
+
+ return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
+}
+
/**
* amdtp_stream_get_max_payload - get the stream's packet size
* @s: the AMDTP stream
@@ -312,16 +324,14 @@ EXPORT_SYMBOL(amdtp_stream_set_parameters);
*/
unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
{
- unsigned int multiplier = 1;
- unsigned int cip_header_size = 0;
+ unsigned int cip_header_size;
- if (s->flags & CIP_JUMBO_PAYLOAD)
- multiplier = 5;
if (!(s->flags & CIP_NO_HEADER))
- cip_header_size = sizeof(__be32) * 2;
+ cip_header_size = CIP_HEADER_SIZE;
+ else
+ cip_header_size = 0;
- return cip_header_size +
- s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
+ return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s);
}
EXPORT_SYMBOL(amdtp_stream_get_max_payload);
@@ -333,32 +343,46 @@ EXPORT_SYMBOL(amdtp_stream_get_max_payload);
*/
void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
{
- cancel_work_sync(&s->period_work);
s->pcm_buffer_pointer = 0;
s->pcm_period_pointer = 0;
}
EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
-static unsigned int calculate_data_blocks(unsigned int *data_block_state,
- bool is_blocking, bool is_no_info,
- unsigned int syt_interval, enum cip_sfc sfc)
+static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
+ const unsigned int seq_size, unsigned int seq_tail,
+ unsigned int count)
{
- unsigned int data_blocks;
+ const unsigned int syt_interval = s->syt_interval;
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ struct seq_desc *desc = descs + seq_tail;
- /* Blocking mode. */
- if (is_blocking) {
- /* This module generate empty packet for 'no data'. */
- if (is_no_info)
- data_blocks = 0;
+ if (desc->syt_offset != CIP_SYT_NO_INFO)
+ desc->data_blocks = syt_interval;
else
- data_blocks = syt_interval;
- /* Non-blocking mode. */
- } else {
+ desc->data_blocks = 0;
+
+ seq_tail = (seq_tail + 1) % seq_size;
+ }
+}
+
+static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
+ const unsigned int seq_size, unsigned int seq_tail,
+ unsigned int count)
+{
+ const enum cip_sfc sfc = s->sfc;
+ unsigned int state = s->ctx_data.rx.data_block_state;
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ struct seq_desc *desc = descs + seq_tail;
+
if (!cip_sfc_is_base_44100(sfc)) {
// Sample_rate / 8000 is an integer, and precomputed.
- data_blocks = *data_block_state;
+ desc->data_blocks = state;
} else {
- unsigned int phase = *data_block_state;
+ unsigned int phase = state;
/*
* This calculates the number of data blocks per packet so that
@@ -370,18 +394,19 @@ static unsigned int calculate_data_blocks(unsigned int *data_block_state,
*/
if (sfc == CIP_SFC_44100)
/* 6 6 5 6 5 6 5 ... */
- data_blocks = 5 + ((phase & 1) ^
- (phase == 0 || phase >= 40));
+ desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40));
else
/* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
- data_blocks = 11 * (sfc >> 1) + (phase == 0);
+ desc->data_blocks = 11 * (sfc >> 1) + (phase == 0);
if (++phase >= (80 >> (sfc >> 1)))
phase = 0;
- *data_block_state = phase;
+ state = phase;
}
+
+ seq_tail = (seq_tail + 1) % seq_size;
}
- return data_blocks;
+ s->ctx_data.rx.data_block_state = state;
}
static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
@@ -423,6 +448,149 @@ static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
return syt_offset;
}
+static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs,
+ const unsigned int seq_size, unsigned int seq_tail,
+ unsigned int count)
+{
+ const enum cip_sfc sfc = s->sfc;
+ unsigned int last = s->ctx_data.rx.last_syt_offset;
+ unsigned int state = s->ctx_data.rx.syt_offset_state;
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ struct seq_desc *desc = descs + seq_tail;
+
+ desc->syt_offset = calculate_syt_offset(&last, &state, sfc);
+
+ seq_tail = (seq_tail + 1) % seq_size;
+ }
+
+ s->ctx_data.rx.last_syt_offset = last;
+ s->ctx_data.rx.syt_offset_state = state;
+}
+
+static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle,
+ unsigned int transfer_delay)
+{
+ unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f;
+ unsigned int syt_cycle_lo = (syt & 0xf000) >> 12;
+ unsigned int syt_offset;
+
+ // Round up.
+ if (syt_cycle_lo < cycle_lo)
+ syt_cycle_lo += CIP_SYT_CYCLE_MODULUS;
+ syt_cycle_lo -= cycle_lo;
+
+ // Subtract transfer delay so that the synchronization offset is not so large
+ // at transmission.
+ syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff);
+ if (syt_offset < transfer_delay)
+ syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE;
+
+ return syt_offset - transfer_delay;
+}
+
+// Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus.
+// Additionally, the sequence of tx packets is severely checked against any discontinuity
+// before filling entries in the queue. The calculation is safe even if it looks fragile by
+// overrun.
+static unsigned int calculate_cached_cycle_count(struct amdtp_stream *s, unsigned int head)
+{
+ const unsigned int cache_size = s->ctx_data.tx.cache.size;
+ unsigned int cycles = s->ctx_data.tx.cache.tail;
+
+ if (cycles < head)
+ cycles += cache_size;
+ cycles -= head;
+
+ return cycles;
+}
+
+static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *descs, unsigned int desc_count)
+{
+ const unsigned int transfer_delay = s->transfer_delay;
+ const unsigned int cache_size = s->ctx_data.tx.cache.size;
+ struct seq_desc *cache = s->ctx_data.tx.cache.descs;
+ unsigned int cache_tail = s->ctx_data.tx.cache.tail;
+ bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
+ int i;
+
+ for (i = 0; i < desc_count; ++i) {
+ struct seq_desc *dst = cache + cache_tail;
+ const struct pkt_desc *src = descs + i;
+
+ if (aware_syt && src->syt != CIP_SYT_NO_INFO)
+ dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay);
+ else
+ dst->syt_offset = CIP_SYT_NO_INFO;
+ dst->data_blocks = src->data_blocks;
+
+ cache_tail = (cache_tail + 1) % cache_size;
+ }
+
+ s->ctx_data.tx.cache.tail = cache_tail;
+}
+
+static void pool_ideal_seq_descs(struct amdtp_stream *s, unsigned int count)
+{
+ struct seq_desc *descs = s->ctx_data.rx.seq.descs;
+ unsigned int seq_tail = s->ctx_data.rx.seq.tail;
+ const unsigned int seq_size = s->ctx_data.rx.seq.size;
+
+ pool_ideal_syt_offsets(s, descs, seq_size, seq_tail, count);
+
+ if (s->flags & CIP_BLOCKING)
+ pool_blocking_data_blocks(s, descs, seq_size, seq_tail, count);
+ else
+ pool_ideal_nonblocking_data_blocks(s, descs, seq_size, seq_tail, count);
+
+ s->ctx_data.rx.seq.tail = (seq_tail + count) % seq_size;
+}
+
+static void pool_replayed_seq(struct amdtp_stream *s, unsigned int count)
+{
+ struct amdtp_stream *target = s->ctx_data.rx.replay_target;
+ const struct seq_desc *cache = target->ctx_data.tx.cache.descs;
+ const unsigned int cache_size = target->ctx_data.tx.cache.size;
+ unsigned int cache_head = s->ctx_data.rx.cache_head;
+ struct seq_desc *descs = s->ctx_data.rx.seq.descs;
+ const unsigned int seq_size = s->ctx_data.rx.seq.size;
+ unsigned int seq_tail = s->ctx_data.rx.seq.tail;
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ descs[seq_tail] = cache[cache_head];
+ seq_tail = (seq_tail + 1) % seq_size;
+ cache_head = (cache_head + 1) % cache_size;
+ }
+
+ s->ctx_data.rx.seq.tail = seq_tail;
+ s->ctx_data.rx.cache_head = cache_head;
+}
+
+static void pool_seq_descs(struct amdtp_stream *s, unsigned int count)
+{
+ struct amdtp_domain *d = s->domain;
+
+ if (!d->replay.enable || !s->ctx_data.rx.replay_target) {
+ pool_ideal_seq_descs(s, count);
+ } else {
+ if (!d->replay.on_the_fly) {
+ pool_replayed_seq(s, count);
+ } else {
+ struct amdtp_stream *tx = s->ctx_data.rx.replay_target;
+ const unsigned int cache_size = tx->ctx_data.tx.cache.size;
+ const unsigned int cache_head = s->ctx_data.rx.cache_head;
+ unsigned int cached_cycles = calculate_cached_cycle_count(tx, cache_head);
+
+ if (cached_cycles > count && cached_cycles > cache_size / 2)
+ pool_replayed_seq(s, count);
+ else
+ pool_ideal_seq_descs(s, count);
+ }
+ }
+}
+
static void update_pcm_pointers(struct amdtp_stream *s,
struct snd_pcm_substream *pcm,
unsigned int frames)
@@ -437,18 +605,21 @@ static void update_pcm_pointers(struct amdtp_stream *s,
s->pcm_period_pointer += frames;
if (s->pcm_period_pointer >= pcm->runtime->period_size) {
s->pcm_period_pointer -= pcm->runtime->period_size;
- queue_work(system_highpri_wq, &s->period_work);
- }
-}
-static void pcm_period_work(struct work_struct *work)
-{
- struct amdtp_stream *s = container_of(work, struct amdtp_stream,
- period_work);
- struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
-
- if (pcm)
- snd_pcm_period_elapsed(pcm);
+ // The program in user process should periodically check the status of intermediate
+ // buffer associated to PCM substream to process PCM frames in the buffer, instead
+ // of receiving notification of period elapsed by poll wait.
+ if (!pcm->runtime->no_period_wakeup) {
+ if (in_softirq()) {
+ // In software IRQ context for 1394 OHCI.
+ snd_pcm_period_elapsed(pcm);
+ } else {
+ // In process context of ALSA PCM application under acquired lock of
+ // PCM substream.
+ snd_pcm_period_elapsed_under_stream_lock(pcm);
+ }
+ }
+ }
}
static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
@@ -505,7 +676,7 @@ static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
}
static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
- struct fw_iso_packet *params,
+ struct fw_iso_packet *params, unsigned int header_length,
unsigned int data_blocks,
unsigned int data_block_counter,
unsigned int syt, unsigned int index)
@@ -516,16 +687,15 @@ static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
params->payload_length = payload_length;
- if (!(s->flags & CIP_NO_HEADER)) {
+ if (header_length > 0) {
cip_header = (__be32 *)params->header;
generate_cip_header(s, cip_header, data_block_counter, syt);
- params->header_length = 2 * sizeof(__be32);
- payload_length += params->header_length;
+ params->header_length = header_length;
} else {
cip_header = NULL;
}
- trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
+ trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks,
data_block_counter, s->packet_index, index);
}
@@ -569,8 +739,7 @@ static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
/* Calculate data blocks */
fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
- if (payload_length < sizeof(__be32) * 2 ||
- (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
+ if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
*data_blocks = 0;
} else {
unsigned int data_block_quadlets =
@@ -585,8 +754,7 @@ static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
if (s->flags & CIP_WRONG_DBS)
data_block_quadlets = s->data_block_quadlets;
- *data_blocks = (payload_length / sizeof(__be32) - 2) /
- data_block_quadlets;
+ *data_blocks = payload_length / sizeof(__be32) / data_block_quadlets;
}
/* Check data block counter continuity */
@@ -620,109 +788,163 @@ static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
*data_block_counter = dbc;
- *syt = cip_header[1] & CIP_SYT_MASK;
+ if (!(s->flags & CIP_UNAWARE_SYT))
+ *syt = cip_header[1] & CIP_SYT_MASK;
return 0;
}
static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
const __be32 *ctx_header,
- unsigned int *payload_length,
unsigned int *data_blocks,
unsigned int *data_block_counter,
unsigned int *syt, unsigned int packet_index, unsigned int index)
{
+ unsigned int payload_length;
const __be32 *cip_header;
unsigned int cip_header_size;
- int err;
- *payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
+ payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
if (!(s->flags & CIP_NO_HEADER))
- cip_header_size = 8;
+ cip_header_size = CIP_HEADER_SIZE;
else
cip_header_size = 0;
- if (*payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
+ if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
dev_err(&s->unit->device,
"Detect jumbo payload: %04x %04x\n",
- *payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
+ payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
return -EIO;
}
if (cip_header_size > 0) {
- cip_header = ctx_header + 2;
- err = check_cip_header(s, cip_header, *payload_length,
- data_blocks, data_block_counter, syt);
- if (err < 0)
- return err;
+ if (payload_length >= cip_header_size) {
+ int err;
+
+ cip_header = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
+ err = check_cip_header(s, cip_header, payload_length - cip_header_size,
+ data_blocks, data_block_counter, syt);
+ if (err < 0)
+ return err;
+ } else {
+ // Handle the cycle so that empty packet arrives.
+ cip_header = NULL;
+ *data_blocks = 0;
+ *syt = 0;
+ }
} else {
cip_header = NULL;
- err = 0;
- *data_blocks = *payload_length / sizeof(__be32) /
- s->data_block_quadlets;
+ *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets;
*syt = 0;
if (*data_block_counter == UINT_MAX)
*data_block_counter = 0;
}
- trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
+ trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks,
*data_block_counter, packet_index, index);
- return err;
+ return 0;
}
// In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
// the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
// it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
-static inline u32 compute_cycle_count(__be32 ctx_header_tstamp)
+static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp)
{
u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
}
-static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
+static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend)
{
cycle += addend;
- if (cycle >= OHCI_MAX_SECOND * CYCLES_PER_SECOND)
- cycle -= OHCI_MAX_SECOND * CYCLES_PER_SECOND;
+ if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND)
+ cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
return cycle;
}
+static int compare_ohci_cycle_count(u32 lval, u32 rval)
+{
+ if (lval == rval)
+ return 0;
+ else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2)
+ return -1;
+ else
+ return 1;
+}
+
// Align to actual cycle count for the packet which is going to be scheduled.
// This module queued the same number of isochronous cycle as the size of queue
// to kip isochronous cycle, therefore it's OK to just increment the cycle by
// the size of queue for scheduled cycle.
-static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp,
- unsigned int queue_size)
+static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp,
+ unsigned int queue_size)
{
- u32 cycle = compute_cycle_count(ctx_header_tstamp);
- return increment_cycle_count(cycle, queue_size);
+ u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp);
+ return increment_ohci_cycle_count(cycle, queue_size);
}
static int generate_device_pkt_descs(struct amdtp_stream *s,
struct pkt_desc *descs,
const __be32 *ctx_header,
- unsigned int packets)
+ unsigned int packets,
+ unsigned int *desc_count)
{
+ unsigned int next_cycle = s->next_cycle;
unsigned int dbc = s->data_block_counter;
unsigned int packet_index = s->packet_index;
unsigned int queue_size = s->queue_size;
int i;
int err;
+ *desc_count = 0;
for (i = 0; i < packets; ++i) {
- struct pkt_desc *desc = descs + i;
+ struct pkt_desc *desc = descs + *desc_count;
unsigned int cycle;
- unsigned int payload_length;
+ bool lost;
unsigned int data_blocks;
unsigned int syt;
- cycle = compute_cycle_count(ctx_header[1]);
+ cycle = compute_ohci_cycle_count(ctx_header[1]);
+ lost = (next_cycle != cycle);
+ if (lost) {
+ if (s->flags & CIP_NO_HEADER) {
+ // Fireface skips transmission just for an isoc cycle corresponding
+ // to empty packet.
+ unsigned int prev_cycle = next_cycle;
+
+ next_cycle = increment_ohci_cycle_count(next_cycle, 1);
+ lost = (next_cycle != cycle);
+ if (!lost) {
+ // Prepare a description for the skipped cycle for
+ // sequence replay.
+ desc->cycle = prev_cycle;
+ desc->syt = 0;
+ desc->data_blocks = 0;
+ desc->data_block_counter = dbc;
+ desc->ctx_payload = NULL;
+ ++desc;
+ ++(*desc_count);
+ }
+ } else if (s->flags & CIP_JUMBO_PAYLOAD) {
+ // OXFW970 skips transmission for several isoc cycles during
+ // asynchronous transaction. The sequence replay is impossible due
+ // to the reason.
+ unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle,
+ IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
+ lost = (compare_ohci_cycle_count(safe_cycle, cycle) > 0);
+ }
+ if (lost) {
+ dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
+ next_cycle, cycle);
+ return -EIO;
+ }
+ }
- err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
- &data_blocks, &dbc, &syt, packet_index, i);
+ err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt,
+ packet_index, i);
if (err < 0)
return err;
@@ -735,12 +957,13 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
if (!(s->flags & CIP_DBC_IS_END_EVENT))
dbc = (dbc + desc->data_blocks) & 0xff;
- ctx_header +=
- s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
-
+ next_cycle = increment_ohci_cycle_count(next_cycle, 1);
+ ++(*desc_count);
+ ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
packet_index = (packet_index + 1) % queue_size;
}
+ s->next_cycle = next_cycle;
s->data_block_counter = dbc;
return 0;
@@ -757,29 +980,28 @@ static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
return syt & CIP_SYT_MASK;
}
-static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
- const __be32 *ctx_header, unsigned int packets,
- const struct seq_desc *seq_descs,
- unsigned int seq_size)
+static void generate_pkt_descs(struct amdtp_stream *s, const __be32 *ctx_header, unsigned int packets)
{
+ struct pkt_desc *descs = s->pkt_descs;
+ const struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs;
+ const unsigned int seq_size = s->ctx_data.rx.seq.size;
unsigned int dbc = s->data_block_counter;
- unsigned int seq_index = s->ctx_data.rx.seq_index;
+ unsigned int seq_head = s->ctx_data.rx.seq.head;
+ bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
int i;
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
unsigned int index = (s->packet_index + i) % s->queue_size;
- const struct seq_desc *seq = seq_descs + seq_index;
- unsigned int syt;
+ const struct seq_desc *seq = seq_descs + seq_head;
- desc->cycle = compute_it_cycle(*ctx_header, s->queue_size);
+ desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
+
+ if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO)
+ desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay);
+ else
+ desc->syt = CIP_SYT_NO_INFO;
- syt = seq->syt_offset;
- if (syt != CIP_SYT_NO_INFO) {
- syt = compute_syt(syt, desc->cycle,
- s->ctx_data.rx.transfer_delay);
- }
- desc->syt = syt;
desc->data_blocks = seq->data_blocks;
if (s->flags & CIP_DBC_IS_END_EVENT)
@@ -792,19 +1014,19 @@ static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
desc->ctx_payload = s->buffer.packets[index].buffer;
- seq_index = (seq_index + 1) % seq_size;
+ seq_head = (seq_head + 1) % seq_size;
++ctx_header;
}
s->data_block_counter = dbc;
- s->ctx_data.rx.seq_index = seq_index;
+ s->ctx_data.rx.seq.head = seq_head;
}
static inline void cancel_stream(struct amdtp_stream *s)
{
s->packet_index = -1;
- if (in_interrupt())
+ if (in_softirq())
amdtp_stream_pcm_abort(s);
WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
}
@@ -822,16 +1044,17 @@ static void process_ctx_payloads(struct amdtp_stream *s,
update_pcm_pointers(s, pcm, pcm_frames);
}
-static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
- size_t header_length, void *header,
- void *private_data)
+static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
+ void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
const struct amdtp_domain *d = s->domain;
const __be32 *ctx_header = header;
- unsigned int events_per_period = s->ctx_data.rx.events_per_period;
+ const unsigned int events_per_period = d->events_per_period;
unsigned int event_count = s->ctx_data.rx.event_count;
+ unsigned int pkt_header_length;
unsigned int packets;
+ bool need_hw_irq;
int i;
if (s->packet_index < 0)
@@ -840,34 +1063,44 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
// Calculate the number of packets in buffer and check XRUN.
packets = header_length / sizeof(*ctx_header);
- generate_pkt_descs(s, s->pkt_descs, ctx_header, packets, d->seq_descs,
- d->seq_size);
+ pool_seq_descs(s, packets);
+
+ generate_pkt_descs(s, ctx_header, packets);
process_ctx_payloads(s, s->pkt_descs, packets);
+ if (!(s->flags & CIP_NO_HEADER))
+ pkt_header_length = IT_PKT_HEADER_SIZE_CIP;
+ else
+ pkt_header_length = 0;
+
+ if (s == d->irq_target) {
+ // At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by
+ // the tasks of user process operating ALSA PCM character device by calling ioctl(2)
+ // with some requests, instead of scheduled hardware IRQ of an IT context.
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+ need_hw_irq = !pcm || !pcm->runtime->no_period_wakeup;
+ } else {
+ need_hw_irq = false;
+ }
+
for (i = 0; i < packets; ++i) {
const struct pkt_desc *desc = s->pkt_descs + i;
- unsigned int syt;
struct {
struct fw_iso_packet params;
- __be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)];
+ __be32 header[CIP_HEADER_QUADLETS];
} template = { {0}, {0} };
bool sched_irq = false;
- if (s->ctx_data.rx.syt_override < 0)
- syt = desc->syt;
- else
- syt = s->ctx_data.rx.syt_override;
-
- build_it_pkt_header(s, desc->cycle, &template.params,
+ build_it_pkt_header(s, desc->cycle, &template.params, pkt_header_length,
desc->data_blocks, desc->data_block_counter,
- syt, i);
+ desc->syt, i);
if (s == s->domain->irq_target) {
event_count += desc->data_blocks;
if (event_count >= events_per_period) {
event_count -= events_per_period;
- sched_irq = true;
+ sched_irq = need_hw_irq;
}
}
@@ -880,13 +1113,99 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
s->ctx_data.rx.event_count = event_count;
}
-static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
- size_t header_length, void *header,
- void *private_data)
+static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
+ void *header, void *private_data)
+{
+ struct amdtp_stream *s = private_data;
+ struct amdtp_domain *d = s->domain;
+ const __be32 *ctx_header = header;
+ unsigned int packets;
+ unsigned int cycle;
+ int i;
+
+ if (s->packet_index < 0)
+ return;
+
+ packets = header_length / sizeof(*ctx_header);
+
+ cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size);
+ s->next_cycle = increment_ohci_cycle_count(cycle, 1);
+
+ for (i = 0; i < packets; ++i) {
+ struct fw_iso_packet params = {
+ .header_length = 0,
+ .payload_length = 0,
+ };
+ bool sched_irq = (s == d->irq_target && i == packets - 1);
+
+ if (queue_out_packet(s, &params, sched_irq) < 0) {
+ cancel_stream(s);
+ return;
+ }
+ }
+}
+
+static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
+ void *header, void *private_data);
+
+static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
+ size_t header_length, void *header, void *private_data)
+{
+ struct amdtp_stream *s = private_data;
+ struct amdtp_domain *d = s->domain;
+ __be32 *ctx_header = header;
+ const unsigned int queue_size = s->queue_size;
+ unsigned int packets;
+ unsigned int offset;
+
+ if (s->packet_index < 0)
+ return;
+
+ packets = header_length / sizeof(*ctx_header);
+
+ offset = 0;
+ while (offset < packets) {
+ unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size);
+
+ if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0)
+ break;
+
+ ++offset;
+ }
+
+ if (offset > 0) {
+ unsigned int length = sizeof(*ctx_header) * offset;
+
+ skip_rx_packets(context, tstamp, length, ctx_header, private_data);
+ if (amdtp_streaming_error(s))
+ return;
+
+ ctx_header += offset;
+ header_length -= length;
+ }
+
+ if (offset < packets) {
+ s->ready_processing = true;
+ wake_up(&s->ready_wait);
+
+ process_rx_packets(context, tstamp, header_length, ctx_header, private_data);
+ if (amdtp_streaming_error(s))
+ return;
+
+ if (s == d->irq_target)
+ s->context->callback.sc = irq_target_callback;
+ else
+ s->context->callback.sc = process_rx_packets;
+ }
+}
+
+static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
+ void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
__be32 *ctx_header = header;
unsigned int packets;
+ unsigned int desc_count;
int i;
int err;
@@ -896,14 +1215,20 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
// Calculate the number of packets in buffer and check XRUN.
packets = header_length / s->ctx_data.tx.ctx_header_size;
- err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets);
+ desc_count = 0;
+ err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets, &desc_count);
if (err < 0) {
if (err != -EAGAIN) {
cancel_stream(s);
return;
}
} else {
- process_ctx_payloads(s, s->pkt_descs, packets);
+ struct amdtp_domain *d = s->domain;
+
+ process_ctx_payloads(s, s->pkt_descs, desc_count);
+
+ if (d->replay.enable)
+ cache_seq(s, s->pkt_descs, desc_count);
}
for (i = 0; i < packets; ++i) {
@@ -916,79 +1241,194 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
}
}
-static void pool_ideal_seq_descs(struct amdtp_domain *d, unsigned int packets)
+static void drop_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
+ void *header, void *private_data)
{
- struct amdtp_stream *irq_target = d->irq_target;
- unsigned int seq_tail = d->seq_tail;
- unsigned int seq_size = d->seq_size;
- unsigned int min_avail;
- struct amdtp_stream *s;
+ struct amdtp_stream *s = private_data;
+ const __be32 *ctx_header = header;
+ unsigned int packets;
+ unsigned int cycle;
+ int i;
- min_avail = d->seq_size;
- list_for_each_entry(s, &d->streams, list) {
- unsigned int seq_index;
- unsigned int avail;
+ if (s->packet_index < 0)
+ return;
+
+ packets = header_length / s->ctx_data.tx.ctx_header_size;
- if (s->direction == AMDTP_IN_STREAM)
- continue;
+ ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
+ cycle = compute_ohci_cycle_count(ctx_header[1]);
+ s->next_cycle = increment_ohci_cycle_count(cycle, 1);
- seq_index = s->ctx_data.rx.seq_index;
- avail = d->seq_tail;
- if (seq_index > avail)
- avail += d->seq_size;
- avail -= seq_index;
+ for (i = 0; i < packets; ++i) {
+ struct fw_iso_packet params = {0};
- if (avail < min_avail)
- min_avail = avail;
+ if (queue_in_packet(s, &params) < 0) {
+ cancel_stream(s);
+ return;
+ }
}
+}
- while (min_avail < packets) {
- struct seq_desc *desc = d->seq_descs + seq_tail;
+static void process_tx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
+ size_t header_length, void *header, void *private_data)
+{
+ struct amdtp_stream *s = private_data;
+ struct amdtp_domain *d = s->domain;
+ __be32 *ctx_header;
+ unsigned int packets;
+ unsigned int offset;
- desc->syt_offset = calculate_syt_offset(&d->last_syt_offset,
- &d->syt_offset_state, irq_target->sfc);
- desc->data_blocks = calculate_data_blocks(&d->data_block_state,
- !!(irq_target->flags & CIP_BLOCKING),
- desc->syt_offset == CIP_SYT_NO_INFO,
- irq_target->syt_interval, irq_target->sfc);
+ if (s->packet_index < 0)
+ return;
- ++seq_tail;
- seq_tail %= seq_size;
+ packets = header_length / s->ctx_data.tx.ctx_header_size;
- ++min_avail;
+ offset = 0;
+ ctx_header = header;
+ while (offset < packets) {
+ unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]);
+
+ if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0)
+ break;
+
+ ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
+ ++offset;
}
- d->seq_tail = seq_tail;
+ ctx_header = header;
+
+ if (offset > 0) {
+ size_t length = s->ctx_data.tx.ctx_header_size * offset;
+
+ drop_tx_packets(context, tstamp, length, ctx_header, s);
+ if (amdtp_streaming_error(s))
+ return;
+
+ ctx_header += length / sizeof(*ctx_header);
+ header_length -= length;
+ }
+
+ if (offset < packets) {
+ s->ready_processing = true;
+ wake_up(&s->ready_wait);
+
+ process_tx_packets(context, tstamp, header_length, ctx_header, s);
+ if (amdtp_streaming_error(s))
+ return;
+
+ context->callback.sc = process_tx_packets;
+ }
}
-static void irq_target_callback(struct fw_iso_context *context, u32 tstamp,
- size_t header_length, void *header,
- void *private_data)
+static void drop_tx_packets_initially(struct fw_iso_context *context, u32 tstamp,
+ size_t header_length, void *header, void *private_data)
{
- struct amdtp_stream *irq_target = private_data;
- struct amdtp_domain *d = irq_target->domain;
- unsigned int packets = header_length / sizeof(__be32);
- struct amdtp_stream *s;
+ struct amdtp_stream *s = private_data;
+ struct amdtp_domain *d = s->domain;
+ __be32 *ctx_header;
+ unsigned int count;
+ unsigned int events;
+ int i;
+
+ if (s->packet_index < 0)
+ return;
+
+ count = header_length / s->ctx_data.tx.ctx_header_size;
+
+ // Attempt to detect any event in the batch of packets.
+ events = 0;
+ ctx_header = header;
+ for (i = 0; i < count; ++i) {
+ unsigned int payload_quads =
+ (be32_to_cpu(*ctx_header) >> ISO_DATA_LENGTH_SHIFT) / sizeof(__be32);
+ unsigned int data_blocks;
+
+ if (s->flags & CIP_NO_HEADER) {
+ data_blocks = payload_quads / s->data_block_quadlets;
+ } else {
+ __be32 *cip_headers = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
+
+ if (payload_quads < CIP_HEADER_QUADLETS) {
+ data_blocks = 0;
+ } else {
+ payload_quads -= CIP_HEADER_QUADLETS;
+
+ if (s->flags & CIP_UNAWARE_SYT) {
+ data_blocks = payload_quads / s->data_block_quadlets;
+ } else {
+ u32 cip1 = be32_to_cpu(cip_headers[1]);
+
+ // NODATA packet can includes any data blocks but they are
+ // not available as event.
+ if ((cip1 & CIP_NO_DATA) == CIP_NO_DATA)
+ data_blocks = 0;
+ else
+ data_blocks = payload_quads / s->data_block_quadlets;
+ }
+ }
+ }
- // Record enough entries with extra 3 cycles at least.
- pool_ideal_seq_descs(d, packets + 3);
+ events += data_blocks;
- out_stream_callback(context, tstamp, header_length, header, irq_target);
- if (amdtp_streaming_error(irq_target))
- goto error;
+ ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
+ }
+
+ drop_tx_packets(context, tstamp, header_length, header, s);
+
+ if (events > 0)
+ s->ctx_data.tx.event_starts = true;
+
+ // Decide the cycle count to begin processing content of packet in IR contexts.
+ {
+ unsigned int stream_count = 0;
+ unsigned int event_starts_count = 0;
+ unsigned int cycle = UINT_MAX;
+
+ list_for_each_entry(s, &d->streams, list) {
+ if (s->direction == AMDTP_IN_STREAM) {
+ ++stream_count;
+ if (s->ctx_data.tx.event_starts)
+ ++event_starts_count;
+ }
+ }
+
+ if (stream_count == event_starts_count) {
+ unsigned int next_cycle;
+
+ list_for_each_entry(s, &d->streams, list) {
+ if (s->direction != AMDTP_IN_STREAM)
+ continue;
+
+ next_cycle = increment_ohci_cycle_count(s->next_cycle,
+ d->processing_cycle.tx_init_skip);
+ if (cycle == UINT_MAX ||
+ compare_ohci_cycle_count(next_cycle, cycle) > 0)
+ cycle = next_cycle;
+
+ s->context->callback.sc = process_tx_packets_intermediately;
+ }
+
+ d->processing_cycle.tx_start = cycle;
+ }
+ }
+}
+
+static void process_ctxs_in_domain(struct amdtp_domain *d)
+{
+ struct amdtp_stream *s;
list_for_each_entry(s, &d->streams, list) {
- if (s != irq_target && amdtp_stream_running(s)) {
+ if (s != d->irq_target && amdtp_stream_running(s))
fw_iso_context_flush_completions(s->context);
- if (amdtp_streaming_error(s))
- goto error;
- }
+
+ if (amdtp_streaming_error(s))
+ goto error;
}
return;
error:
- if (amdtp_stream_running(irq_target))
- cancel_stream(irq_target);
+ if (amdtp_stream_running(d->irq_target))
+ cancel_stream(d->irq_target);
list_for_each_entry(s, &d->streams, list) {
if (amdtp_stream_running(s))
@@ -996,37 +1436,99 @@ error:
}
}
-// this is executed one time.
+static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
+ void *header, void *private_data)
+{
+ struct amdtp_stream *s = private_data;
+ struct amdtp_domain *d = s->domain;
+
+ process_rx_packets(context, tstamp, header_length, header, private_data);
+ process_ctxs_in_domain(d);
+}
+
+static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp,
+ size_t header_length, void *header, void *private_data)
+{
+ struct amdtp_stream *s = private_data;
+ struct amdtp_domain *d = s->domain;
+
+ process_rx_packets_intermediately(context, tstamp, header_length, header, private_data);
+ process_ctxs_in_domain(d);
+}
+
+static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp,
+ size_t header_length, void *header, void *private_data)
+{
+ struct amdtp_stream *s = private_data;
+ struct amdtp_domain *d = s->domain;
+ bool ready_to_start;
+
+ skip_rx_packets(context, tstamp, header_length, header, private_data);
+ process_ctxs_in_domain(d);
+
+ if (d->replay.enable && !d->replay.on_the_fly) {
+ unsigned int rx_count = 0;
+ unsigned int rx_ready_count = 0;
+ struct amdtp_stream *rx;
+
+ list_for_each_entry(rx, &d->streams, list) {
+ struct amdtp_stream *tx;
+ unsigned int cached_cycles;
+
+ if (rx->direction != AMDTP_OUT_STREAM)
+ continue;
+ ++rx_count;
+
+ tx = rx->ctx_data.rx.replay_target;
+ cached_cycles = calculate_cached_cycle_count(tx, 0);
+ if (cached_cycles > tx->ctx_data.tx.cache.size / 2)
+ ++rx_ready_count;
+ }
+
+ ready_to_start = (rx_count == rx_ready_count);
+ } else {
+ ready_to_start = true;
+ }
+
+ // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
+ // contexts are expected to start and get callback when reaching here.
+ if (ready_to_start) {
+ unsigned int cycle = s->next_cycle;
+ list_for_each_entry(s, &d->streams, list) {
+ if (s->direction != AMDTP_OUT_STREAM)
+ continue;
+
+ if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
+ cycle = s->next_cycle;
+
+ if (s == d->irq_target)
+ s->context->callback.sc = irq_target_callback_intermediately;
+ else
+ s->context->callback.sc = process_rx_packets_intermediately;
+ }
+
+ d->processing_cycle.rx_start = cycle;
+ }
+}
+
+// This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
+// transmit first packet.
static void amdtp_stream_first_callback(struct fw_iso_context *context,
u32 tstamp, size_t header_length,
void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
- const __be32 *ctx_header = header;
- u32 cycle;
-
- /*
- * For in-stream, first packet has come.
- * For out-stream, prepared to transmit first packet
- */
- s->callbacked = true;
- wake_up(&s->callback_wait);
+ struct amdtp_domain *d = s->domain;
if (s->direction == AMDTP_IN_STREAM) {
- cycle = compute_cycle_count(ctx_header[1]);
-
- context->callback.sc = in_stream_callback;
+ context->callback.sc = drop_tx_packets_initially;
} else {
- cycle = compute_it_cycle(*ctx_header, s->queue_size);
-
- if (s == s->domain->irq_target)
- context->callback.sc = irq_target_callback;
+ if (s == d->irq_target)
+ context->callback.sc = irq_target_callback_skip;
else
- context->callback.sc = out_stream_callback;
+ context->callback.sc = skip_rx_packets;
}
- s->start_cycle = cycle;
-
context->callback.sc(context, tstamp, header_length, header, s);
}
@@ -1035,8 +1537,6 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
* @s: the AMDTP stream to start
* @channel: the isochronous channel on the bus
* @speed: firewire speed code
- * @start_cycle: the isochronous cycle to start the context. Start immediately
- * if negative value is given.
* @queue_size: The number of packets in the queue.
* @idle_irq_interval: the interval to queue packet during initial state.
*
@@ -1045,8 +1545,7 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
* device can be started.
*/
static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
- int start_cycle, unsigned int queue_size,
- unsigned int idle_irq_interval)
+ unsigned int queue_size, unsigned int idle_irq_interval)
{
bool is_irq_target = (s == s->domain->irq_target);
unsigned int ctx_header_size;
@@ -1075,27 +1574,21 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
}
// initialize packet buffer.
- max_ctx_payload_size = amdtp_stream_get_max_payload(s);
if (s->direction == AMDTP_IN_STREAM) {
dir = DMA_FROM_DEVICE;
type = FW_ISO_CONTEXT_RECEIVE;
- if (!(s->flags & CIP_NO_HEADER)) {
- max_ctx_payload_size -= 8;
+ if (!(s->flags & CIP_NO_HEADER))
ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
- } else {
+ else
ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
- }
} else {
dir = DMA_TO_DEVICE;
type = FW_ISO_CONTEXT_TRANSMIT;
ctx_header_size = 0; // No effect for IT context.
-
- if (!(s->flags & CIP_NO_HEADER))
- max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
}
+ max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s);
- err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size,
- max_ctx_payload_size, dir);
+ err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir);
if (err < 0)
goto err_unlock;
s->queue_size = queue_size;
@@ -1116,6 +1609,50 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
if (s->direction == AMDTP_IN_STREAM) {
s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
s->ctx_data.tx.ctx_header_size = ctx_header_size;
+ s->ctx_data.tx.event_starts = false;
+
+ if (s->domain->replay.enable) {
+ // struct fw_iso_context.drop_overflow_headers is false therefore it's
+ // possible to cache much unexpectedly.
+ s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2,
+ queue_size * 3 / 2);
+ s->ctx_data.tx.cache.tail = 0;
+ s->ctx_data.tx.cache.descs = kcalloc(s->ctx_data.tx.cache.size,
+ sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL);
+ if (!s->ctx_data.tx.cache.descs) {
+ err = -ENOMEM;
+ goto err_context;
+ }
+ }
+ } else {
+ static const struct {
+ unsigned int data_block;
+ unsigned int syt_offset;
+ } *entry, initial_state[] = {
+ [CIP_SFC_32000] = { 4, 3072 },
+ [CIP_SFC_48000] = { 6, 1024 },
+ [CIP_SFC_96000] = { 12, 1024 },
+ [CIP_SFC_192000] = { 24, 1024 },
+ [CIP_SFC_44100] = { 0, 67 },
+ [CIP_SFC_88200] = { 0, 67 },
+ [CIP_SFC_176400] = { 0, 67 },
+ };
+
+ s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL);
+ if (!s->ctx_data.rx.seq.descs) {
+ err = -ENOMEM;
+ goto err_context;
+ }
+ s->ctx_data.rx.seq.size = queue_size;
+ s->ctx_data.rx.seq.tail = 0;
+ s->ctx_data.rx.seq.head = 0;
+
+ entry = &initial_state[s->sfc];
+ s->ctx_data.rx.data_block_state = entry->data_block;
+ s->ctx_data.rx.syt_offset_state = entry->syt_offset;
+ s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
+
+ s->ctx_data.rx.event_count = 0;
}
if (s->flags & CIP_NO_HEADER)
@@ -1158,8 +1695,8 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
tag |= FW_ISO_CONTEXT_MATCH_TAG0;
- s->callbacked = false;
- err = fw_iso_context_start(s->context, start_cycle, 0, tag);
+ s->ready_processing = false;
+ err = fw_iso_context_start(s->context, -1, 0, tag);
if (err < 0)
goto err_pkt_descs;
@@ -1169,6 +1706,12 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
err_pkt_descs:
kfree(s->pkt_descs);
err_context:
+ if (s->direction == AMDTP_OUT_STREAM) {
+ kfree(s->ctx_data.rx.seq.descs);
+ } else {
+ if (s->domain->replay.enable)
+ kfree(s->ctx_data.tx.cache.descs);
+ }
fw_iso_context_destroy(s->context);
s->context = ERR_PTR(-1);
err_buffer:
@@ -1191,28 +1734,12 @@ unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
{
struct amdtp_stream *irq_target = d->irq_target;
+ // Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
if (irq_target && amdtp_stream_running(irq_target)) {
- // This function is called in software IRQ context of
- // period_work or process context.
- //
- // When the software IRQ context was scheduled by software IRQ
- // context of IT contexts, queued packets were already handled.
- // Therefore, no need to flush the queue in buffer furthermore.
- //
- // When the process context reach here, some packets will be
- // already queued in the buffer. These packets should be handled
- // immediately to keep better granularity of PCM pointer.
- //
- // Later, the process context will sometimes schedules software
- // IRQ context of the period_work. Then, no need to flush the
- // queue by the same reason as described in the above
- if (current_work() != &s->period_work) {
- // Queued packet should be processed without any kernel
- // preemption to keep latency against bus cycle.
- preempt_disable();
+ // In software IRQ context, the call causes dead-lock to disable the tasklet
+ // synchronously.
+ if (!in_softirq())
fw_iso_context_flush_completions(irq_target->context);
- preempt_enable();
- }
}
return READ_ONCE(s->pcm_buffer_pointer);
@@ -1232,13 +1759,8 @@ int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
// Process isochronous packets for recent isochronous cycle to handle
// queued PCM frames.
- if (irq_target && amdtp_stream_running(irq_target)) {
- // Queued packet should be processed without any kernel
- // preemption to keep latency against bus cycle.
- preempt_disable();
+ if (irq_target && amdtp_stream_running(irq_target))
fw_iso_context_flush_completions(irq_target->context);
- preempt_enable();
- }
return 0;
}
@@ -1272,14 +1794,18 @@ static void amdtp_stream_stop(struct amdtp_stream *s)
return;
}
- cancel_work_sync(&s->period_work);
fw_iso_context_stop(s->context);
fw_iso_context_destroy(s->context);
s->context = ERR_PTR(-1);
iso_packets_buffer_destroy(&s->buffer, s->unit);
kfree(s->pkt_descs);
- s->callbacked = false;
+ if (s->direction == AMDTP_OUT_STREAM) {
+ kfree(s->ctx_data.rx.seq.descs);
+ } else {
+ if (s->domain->replay.enable)
+ kfree(s->ctx_data.tx.cache.descs);
+ }
mutex_unlock(&s->mutex);
}
@@ -1311,8 +1837,6 @@ int amdtp_domain_init(struct amdtp_domain *d)
d->events_per_period = 0;
- d->seq_descs = NULL;
-
return 0;
}
EXPORT_SYMBOL_GPL(amdtp_domain_init);
@@ -1355,26 +1879,49 @@ int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
}
EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
-static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle)
+// Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams
+// is less than the number of rx streams, the first tx stream is selected.
+static int make_association(struct amdtp_domain *d)
{
- int generation;
- int rcode;
- __be32 reg;
- u32 data;
-
- // This is a request to local 1394 OHCI controller and expected to
- // complete without any event waiting.
- generation = fw_card->generation;
- smp_rmb(); // node_id vs. generation.
- rcode = fw_run_transaction(fw_card, TCODE_READ_QUADLET_REQUEST,
- fw_card->node_id, generation, SCODE_100,
- CSR_REGISTER_BASE + CSR_CYCLE_TIME,
- &reg, sizeof(reg));
- if (rcode != RCODE_COMPLETE)
- return -EIO;
+ unsigned int dst_index = 0;
+ struct amdtp_stream *rx;
+
+ // Make association to replay target.
+ list_for_each_entry(rx, &d->streams, list) {
+ if (rx->direction == AMDTP_OUT_STREAM) {
+ unsigned int src_index = 0;
+ struct amdtp_stream *tx = NULL;
+ struct amdtp_stream *s;
+
+ list_for_each_entry(s, &d->streams, list) {
+ if (s->direction == AMDTP_IN_STREAM) {
+ if (dst_index == src_index) {
+ tx = s;
+ break;
+ }
+
+ ++src_index;
+ }
+ }
+ if (!tx) {
+ // Select the first entry.
+ list_for_each_entry(s, &d->streams, list) {
+ if (s->direction == AMDTP_IN_STREAM) {
+ tx = s;
+ break;
+ }
+ }
+ // No target is available to replay sequence.
+ if (!tx)
+ return -EINVAL;
+ }
- data = be32_to_cpu(reg);
- *cur_cycle = data >> 12;
+ rx->ctx_data.rx.replay_target = tx;
+ rx->ctx_data.rx.cache_head = 0;
+
+ ++dst_index;
+ }
+ }
return 0;
}
@@ -1382,39 +1929,44 @@ static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle)
/**
* amdtp_domain_start - start sending packets for isoc context in the domain.
* @d: the AMDTP domain.
- * @ir_delay_cycle: the cycle delay to start all IR contexts.
+ * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
+ * contexts.
+ * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
+ * IT context.
+ * @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay
+ * according to arrival of events in tx packets.
*/
-int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
+int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq,
+ bool replay_on_the_fly)
{
- static const struct {
- unsigned int data_block;
- unsigned int syt_offset;
- } *entry, initial_state[] = {
- [CIP_SFC_32000] = { 4, 3072 },
- [CIP_SFC_48000] = { 6, 1024 },
- [CIP_SFC_96000] = { 12, 1024 },
- [CIP_SFC_192000] = { 24, 1024 },
- [CIP_SFC_44100] = { 0, 67 },
- [CIP_SFC_88200] = { 0, 67 },
- [CIP_SFC_176400] = { 0, 67 },
- };
unsigned int events_per_buffer = d->events_per_buffer;
unsigned int events_per_period = d->events_per_period;
- unsigned int idle_irq_interval;
unsigned int queue_size;
struct amdtp_stream *s;
- int cycle;
+ bool found = false;
int err;
+ if (replay_seq) {
+ err = make_association(d);
+ if (err < 0)
+ return err;
+ }
+ d->replay.enable = replay_seq;
+ d->replay.on_the_fly = replay_on_the_fly;
+
// Select an IT context as IRQ target.
list_for_each_entry(s, &d->streams, list) {
- if (s->direction == AMDTP_OUT_STREAM)
+ if (s->direction == AMDTP_OUT_STREAM) {
+ found = true;
break;
+ }
}
- if (!s)
+ if (!found)
return -ENXIO;
d->irq_target = s;
+ d->processing_cycle.tx_init_skip = tx_init_skip_cycles;
+
// This is a case that AMDTP streams in domain run just for MIDI
// substream. Use the number of events equivalent to 10 msec as
// interval of hardware IRQ.
@@ -1426,82 +1978,24 @@ int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
amdtp_rate_table[d->irq_target->sfc]);
- d->seq_descs = kcalloc(queue_size, sizeof(*d->seq_descs), GFP_KERNEL);
- if (!d->seq_descs)
- return -ENOMEM;
- d->seq_size = queue_size;
- d->seq_tail = 0;
-
- entry = &initial_state[s->sfc];
- d->data_block_state = entry->data_block;
- d->syt_offset_state = entry->syt_offset;
- d->last_syt_offset = TICKS_PER_CYCLE;
-
- if (ir_delay_cycle > 0) {
- struct fw_card *fw_card = fw_parent_device(s->unit)->card;
-
- err = get_current_cycle_time(fw_card, &cycle);
- if (err < 0)
- goto error;
-
- // No need to care overflow in cycle field because of enough
- // width.
- cycle += ir_delay_cycle;
-
- // Round up to sec field.
- if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) {
- unsigned int sec;
-
- // The sec field can overflow.
- sec = (cycle & 0xffffe000) >> 13;
- cycle = (++sec << 13) |
- ((cycle & 0x00001fff) / CYCLES_PER_SECOND);
- }
-
- // In OHCI 1394 specification, lower 2 bits are available for
- // sec field.
- cycle &= 0x00007fff;
- } else {
- cycle = -1;
- }
-
list_for_each_entry(s, &d->streams, list) {
- int cycle_match;
+ unsigned int idle_irq_interval = 0;
- if (s->direction == AMDTP_IN_STREAM) {
- cycle_match = cycle;
- } else {
- // IT context starts immediately.
- cycle_match = -1;
- s->ctx_data.rx.seq_index = 0;
+ if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) {
+ idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
+ amdtp_rate_table[d->irq_target->sfc]);
}
- if (s != d->irq_target) {
- err = amdtp_stream_start(s, s->channel, s->speed,
- cycle_match, queue_size, 0);
- if (err < 0)
- goto error;
- }
+ // Starts immediately but actually DMA context starts several hundred cycles later.
+ err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval);
+ if (err < 0)
+ goto error;
}
- s = d->irq_target;
- s->ctx_data.rx.events_per_period = events_per_period;
- s->ctx_data.rx.event_count = 0;
- s->ctx_data.rx.seq_index = 0;
-
- idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
- amdtp_rate_table[d->irq_target->sfc]);
- err = amdtp_stream_start(s, s->channel, s->speed, -1, queue_size,
- idle_irq_interval);
- if (err < 0)
- goto error;
-
return 0;
error:
list_for_each_entry(s, &d->streams, list)
amdtp_stream_stop(s);
- kfree(d->seq_descs);
- d->seq_descs = NULL;
return err;
}
EXPORT_SYMBOL_GPL(amdtp_domain_start);
@@ -1526,8 +2020,5 @@ void amdtp_domain_stop(struct amdtp_domain *d)
d->events_per_period = 0;
d->irq_target = NULL;
-
- kfree(d->seq_descs);
- d->seq_descs = NULL;
}
EXPORT_SYMBOL_GPL(amdtp_domain_stop);
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index a3daa1f2c1c4..1f957c946c95 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -35,6 +35,8 @@
* @CIP_NO_HEADERS: a lack of headers in packets
* @CIP_UNALIGHED_DBC: Only for in-stream. The value of dbc is not alighed to
* the value of current SYT_INTERVAL; e.g. initial value is not zero.
+ * @CIP_UNAWARE_SYT: For outgoing packet, the value in SYT field of CIP is 0xffff.
+ * For incoming packet, the value in SYT field of CIP is not handled.
*/
enum cip_flags {
CIP_NONBLOCKING = 0x00,
@@ -48,6 +50,7 @@ enum cip_flags {
CIP_HEADER_WITHOUT_EOH = 0x80,
CIP_NO_HEADER = 0x100,
CIP_UNALIGHED_DBC = 0x200,
+ CIP_UNAWARE_SYT = 0x400,
};
/**
@@ -112,7 +115,8 @@ typedef unsigned int (*amdtp_stream_process_ctx_payloads_t)(
struct amdtp_domain;
struct amdtp_stream {
struct fw_unit *unit;
- enum cip_flags flags;
+ // The combination of cip_flags enumeration-constants.
+ unsigned int flags;
enum amdtp_stream_direction direction;
struct mutex mutex;
@@ -134,19 +138,37 @@ struct amdtp_stream {
// Fixed interval of dbc between previos/current
// packets.
unsigned int dbc_interval;
+
+ // The device starts multiplexing events to the packet.
+ bool event_starts;
+
+ struct {
+ struct seq_desc *descs;
+ unsigned int size;
+ unsigned int tail;
+ } cache;
} tx;
struct {
- // To calculate CIP data blocks and tstamp.
- unsigned int transfer_delay;
- unsigned int seq_index;
-
// To generate CIP header.
unsigned int fdf;
- int syt_override;
// To generate constant hardware IRQ.
unsigned int event_count;
- unsigned int events_per_period;
+
+ // To calculate CIP data blocks and tstamp.
+ struct {
+ struct seq_desc *descs;
+ unsigned int size;
+ unsigned int tail;
+ unsigned int head;
+ } seq;
+
+ unsigned int data_block_state;
+ unsigned int syt_offset_state;
+ unsigned int last_syt_offset;
+
+ struct amdtp_stream *replay_target;
+ unsigned int cache_head;
} rx;
} ctx_data;
@@ -157,20 +179,21 @@ struct amdtp_stream {
unsigned int sph;
unsigned int fmt;
- /* Internal flags. */
+ // Internal flags.
+ unsigned int transfer_delay;
enum cip_sfc sfc;
unsigned int syt_interval;
/* For a PCM substream processing. */
struct snd_pcm_substream *pcm;
- struct work_struct period_work;
snd_pcm_uframes_t pcm_buffer_pointer;
unsigned int pcm_period_pointer;
- /* To wait for first packet. */
- bool callbacked;
- wait_queue_head_t callback_wait;
- u32 start_cycle;
+ // To start processing content of packets at the same cycle in several contexts for
+ // each direction.
+ bool ready_processing;
+ wait_queue_head_t ready_wait;
+ unsigned int next_cycle;
/* For backends to process data blocks. */
void *protocol;
@@ -184,7 +207,7 @@ struct amdtp_stream {
};
int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
- enum amdtp_stream_direction dir, enum cip_flags flags,
+ enum amdtp_stream_direction dir, unsigned int flags,
unsigned int fmt,
amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
unsigned int protocol_size);
@@ -259,21 +282,6 @@ static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
return sfc & 1;
}
-/**
- * amdtp_stream_wait_callback - sleep till callbacked or timeout
- * @s: the AMDTP stream
- * @timeout: msec till timeout
- *
- * If this function return false, the AMDTP stream should be stopped.
- */
-static inline bool amdtp_stream_wait_callback(struct amdtp_stream *s,
- unsigned int timeout)
-{
- return wait_event_timeout(s->callback_wait,
- s->callbacked,
- msecs_to_jiffies(timeout)) > 0;
-}
-
struct seq_desc {
unsigned int syt_offset;
unsigned int data_blocks;
@@ -287,13 +295,16 @@ struct amdtp_domain {
struct amdtp_stream *irq_target;
- struct seq_desc *seq_descs;
- unsigned int seq_size;
- unsigned int seq_tail;
+ struct {
+ unsigned int tx_init_skip;
+ unsigned int tx_start;
+ unsigned int rx_start;
+ } processing_cycle;
- unsigned int data_block_state;
- unsigned int syt_offset_state;
- unsigned int last_syt_offset;
+ struct {
+ bool enable:1;
+ bool on_the_fly:1;
+ } replay;
};
int amdtp_domain_init(struct amdtp_domain *d);
@@ -302,7 +313,8 @@ void amdtp_domain_destroy(struct amdtp_domain *d);
int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
int channel, int speed);
-int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle);
+int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq,
+ bool replay_on_the_fly);
void amdtp_domain_stop(struct amdtp_domain *d);
static inline int amdtp_domain_set_events_per_period(struct amdtp_domain *d,
@@ -319,4 +331,25 @@ unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
struct amdtp_stream *s);
int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s);
+/**
+ * amdtp_domain_wait_ready - sleep till being ready to process packets or timeout
+ * @d: the AMDTP domain
+ * @timeout_ms: msec till timeout
+ *
+ * If this function return false, the AMDTP domain should be stopped.
+ */
+static inline bool amdtp_domain_wait_ready(struct amdtp_domain *d, unsigned int timeout_ms)
+{
+ struct amdtp_stream *s;
+
+ list_for_each_entry(s, &d->streams, list) {
+ unsigned int j = msecs_to_jiffies(timeout_ms);
+
+ if (wait_event_interruptible_timeout(s->ready_wait, s->ready_processing, j) <= 0)
+ return false;
+ }
+
+ return true;
+}
+
#endif
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index daeecfa8b9aa..06a7ced218e2 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -40,14 +40,12 @@ static DECLARE_BITMAP(devices_used, SNDRV_CARDS);
#define VEN_EDIROL 0x000040ab
#define VEN_PRESONUS 0x00000a92
#define VEN_BRIDGECO 0x000007f5
-#define VEN_MACKIE1 0x0000000f
-#define VEN_MACKIE2 0x00000ff2
+#define VEN_MACKIE 0x00000ff2
#define VEN_STANTON 0x00001260
#define VEN_TASCAM 0x0000022e
#define VEN_BEHRINGER 0x00001564
#define VEN_APOGEE 0x000003db
#define VEN_ESI 0x00000f1b
-#define VEN_ACOUSTIC 0x00000002
#define VEN_CME 0x0000000a
#define VEN_PHONIC 0x00001496
#define VEN_LYNX 0x000019e5
@@ -56,14 +54,15 @@ static DECLARE_BITMAP(devices_used, SNDRV_CARDS);
#define VEN_TERRATEC 0x00000aac
#define VEN_YAMAHA 0x0000a0de
#define VEN_FOCUSRITE 0x0000130e
-#define VEN_MAUDIO1 0x00000d6c
-#define VEN_MAUDIO2 0x000007f5
+#define VEN_MAUDIO 0x00000d6c
#define VEN_DIGIDESIGN 0x00a07e
+#define OUI_SHOUYO 0x002327
#define MODEL_FOCUSRITE_SAFFIRE_BOTH 0x00000000
#define MODEL_MAUDIO_AUDIOPHILE_BOTH 0x00010060
#define MODEL_MAUDIO_FW1814 0x00010071
#define MODEL_MAUDIO_PROJECTMIX 0x00010091
+#define MODEL_MAUDIO_PROFIRELIGHTBRIDGE 0x000100a1
static int
name_device(struct snd_bebob *bebob)
@@ -74,7 +73,6 @@ name_device(struct snd_bebob *bebob)
u32 hw_id;
u32 data[2] = {0};
u32 revision;
- u32 version;
int err;
/* get vendor name from root directory */
@@ -107,12 +105,6 @@ name_device(struct snd_bebob *bebob)
if (err < 0)
goto end;
- err = snd_bebob_read_quad(bebob->unit, INFO_OFFSET_BEBOB_VERSION,
- &version);
- if (err < 0)
- goto end;
- bebob->version = version;
-
strcpy(bebob->card->driver, "BeBoB");
strcpy(bebob->card->shortname, model);
strcpy(bebob->card->mixername, model);
@@ -135,6 +127,9 @@ bebob_card_free(struct snd_card *card)
mutex_unlock(&devices_mutex);
snd_bebob_stream_destroy_duplex(bebob);
+
+ mutex_destroy(&bebob->mutex);
+ fw_unit_put(bebob->unit);
}
static const struct snd_bebob_spec *
@@ -162,16 +157,55 @@ check_audiophile_booted(struct fw_unit *unit)
return strncmp(name, "FW Audiophile Bootloader", 24) != 0;
}
-static void
-do_registration(struct work_struct *work)
+static int detect_quirks(struct snd_bebob *bebob, const struct ieee1394_device_id *entry)
+{
+ if (entry->vendor_id == VEN_MAUDIO) {
+ switch (entry->model_id) {
+ case MODEL_MAUDIO_PROFIRELIGHTBRIDGE:
+ // M-Audio ProFire Lightbridge has a quirk to transfer packets with
+ // discontinuous cycle or data block counter in early stage of packet
+ // streaming. The cycle span from the first packet with event is variable.
+ bebob->quirks |= SND_BEBOB_QUIRK_INITIAL_DISCONTINUOUS_DBC;
+ break;
+ case MODEL_MAUDIO_FW1814:
+ case MODEL_MAUDIO_PROJECTMIX:
+ // At high sampling rate, M-Audio special firmware transmits empty packet
+ // with the value of dbc incremented by 8.
+ bebob->quirks |= SND_BEBOB_QUIRK_WRONG_DBC;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int bebob_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
{
- struct snd_bebob *bebob =
- container_of(work, struct snd_bebob, dwork.work);
unsigned int card_index;
+ struct snd_card *card;
+ struct snd_bebob *bebob;
+ const struct snd_bebob_spec *spec;
int err;
- if (bebob->registered)
- return;
+ if (entry->vendor_id == VEN_FOCUSRITE &&
+ entry->model_id == MODEL_FOCUSRITE_SAFFIRE_BOTH)
+ spec = get_saffire_spec(unit);
+ else if (entry->vendor_id == VEN_MAUDIO &&
+ entry->model_id == MODEL_MAUDIO_AUDIOPHILE_BOTH &&
+ !check_audiophile_booted(unit))
+ spec = NULL;
+ else
+ spec = (const struct snd_bebob_spec *)entry->driver_data;
+
+ if (spec == NULL) {
+ // To boot up M-Audio models.
+ if (entry->vendor_id == VEN_MAUDIO || entry->vendor_id == VEN_BRIDGECO)
+ return snd_bebob_maudio_load_firmware(unit);
+ else
+ return -ENODEV;
+ }
mutex_lock(&devices_mutex);
for (card_index = 0; card_index < SNDRV_CARDS; card_index++) {
@@ -180,27 +214,40 @@ do_registration(struct work_struct *work)
}
if (card_index >= SNDRV_CARDS) {
mutex_unlock(&devices_mutex);
- return;
+ return -ENOENT;
}
- err = snd_card_new(&bebob->unit->device, index[card_index],
- id[card_index], THIS_MODULE, 0, &bebob->card);
+ err = snd_card_new(&unit->device, index[card_index], id[card_index], THIS_MODULE,
+ sizeof(*bebob), &card);
if (err < 0) {
mutex_unlock(&devices_mutex);
- return;
+ return err;
}
+ card->private_free = bebob_card_free;
set_bit(card_index, devices_used);
mutex_unlock(&devices_mutex);
- bebob->card->private_free = bebob_card_free;
- bebob->card->private_data = bebob;
+ bebob = card->private_data;
+ bebob->unit = fw_unit_get(unit);
+ dev_set_drvdata(&unit->device, bebob);
+ bebob->card = card;
+ bebob->card_index = card_index;
+
+ bebob->spec = spec;
+ mutex_init(&bebob->mutex);
+ spin_lock_init(&bebob->lock);
+ init_waitqueue_head(&bebob->hwdep_wait);
err = name_device(bebob);
if (err < 0)
goto error;
+ err = detect_quirks(bebob, entry);
+ if (err < 0)
+ goto error;
+
if (bebob->spec == &maudio_special_spec) {
- if (bebob->entry->model_id == MODEL_MAUDIO_FW1814)
+ if (entry->model_id == MODEL_MAUDIO_FW1814)
err = snd_bebob_maudio_special_discover(bebob, true);
else
err = snd_bebob_maudio_special_discover(bebob, false);
@@ -230,80 +277,26 @@ do_registration(struct work_struct *work)
if (err < 0)
goto error;
- err = snd_card_register(bebob->card);
+ err = snd_card_register(card);
if (err < 0)
goto error;
- bebob->registered = true;
-
- return;
-error:
- snd_card_free(bebob->card);
- dev_info(&bebob->unit->device,
- "Sound card registration failed: %d\n", err);
-}
-
-static int
-bebob_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
-{
- struct snd_bebob *bebob;
- const struct snd_bebob_spec *spec;
-
- if (entry->vendor_id == VEN_FOCUSRITE &&
- entry->model_id == MODEL_FOCUSRITE_SAFFIRE_BOTH)
- spec = get_saffire_spec(unit);
- else if (entry->vendor_id == VEN_MAUDIO1 &&
- entry->model_id == MODEL_MAUDIO_AUDIOPHILE_BOTH &&
- !check_audiophile_booted(unit))
- spec = NULL;
- else
- spec = (const struct snd_bebob_spec *)entry->driver_data;
-
- if (spec == NULL) {
- if (entry->vendor_id == VEN_MAUDIO1 ||
- entry->vendor_id == VEN_MAUDIO2)
- return snd_bebob_maudio_load_firmware(unit);
- else
- return -ENODEV;
- }
-
- /* Allocate this independent of sound card instance. */
- bebob = devm_kzalloc(&unit->device, sizeof(struct snd_bebob),
- GFP_KERNEL);
- if (!bebob)
- return -ENOMEM;
- bebob->unit = fw_unit_get(unit);
- dev_set_drvdata(&unit->device, bebob);
-
- bebob->entry = entry;
- bebob->spec = spec;
- mutex_init(&bebob->mutex);
- spin_lock_init(&bebob->lock);
- init_waitqueue_head(&bebob->hwdep_wait);
-
- /* Allocate and register this sound card later. */
- INIT_DEFERRABLE_WORK(&bebob->dwork, do_registration);
-
- if (entry->vendor_id != VEN_MAUDIO1 ||
- (entry->model_id != MODEL_MAUDIO_FW1814 &&
- entry->model_id != MODEL_MAUDIO_PROJECTMIX)) {
- snd_fw_schedule_registration(unit, &bebob->dwork);
- } else {
- /*
- * This is a workaround. This bus reset seems to have an effect
- * to make devices correctly handling transactions. Without
- * this, the devices have gap_count mismatch. This causes much
- * failure of transaction.
- *
- * Just after registration, user-land application receive
- * signals from dbus and starts I/Os. To avoid I/Os till the
- * future bus reset, registration is done in next update().
- */
- fw_schedule_bus_reset(fw_parent_device(bebob->unit)->card,
- false, true);
+ if (entry->vendor_id == VEN_MAUDIO &&
+ (entry->model_id == MODEL_MAUDIO_FW1814 || entry->model_id == MODEL_MAUDIO_PROJECTMIX)) {
+ // This is a workaround. This bus reset seems to have an effect to make devices
+ // correctly handling transactions. Without this, the devices have gap_count
+ // mismatch. This causes much failure of transaction.
+ //
+ // Just after registration, user-land application receive signals from dbus and
+ // starts I/Os. To avoid I/Os till the future bus reset, registration is done in
+ // next update().
+ fw_schedule_bus_reset(fw_parent_device(bebob->unit)->card, false, true);
}
return 0;
+error:
+ snd_card_free(card);
+ return err;
}
/*
@@ -330,11 +323,7 @@ bebob_update(struct fw_unit *unit)
if (bebob == NULL)
return;
- /* Postpone a workqueue for deferred registration. */
- if (!bebob->registered)
- snd_fw_schedule_registration(unit, &bebob->dwork);
- else
- fcp_bus_reset(bebob->unit);
+ fcp_bus_reset(bebob->unit);
}
static void bebob_remove(struct fw_unit *unit)
@@ -344,20 +333,8 @@ static void bebob_remove(struct fw_unit *unit)
if (bebob == NULL)
return;
- /*
- * Confirm to stop the work for registration before the sound card is
- * going to be released. The work is not scheduled again because bus
- * reset handler is not called anymore.
- */
- cancel_delayed_work_sync(&bebob->dwork);
-
- if (bebob->registered) {
- // Block till all of ALSA character devices are released.
- snd_card_free(bebob->card);
- }
-
- mutex_destroy(&bebob->mutex);
- fw_unit_put(bebob->unit);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(bebob->card);
}
static const struct snd_bebob_rate_spec normal_rate_spec = {
@@ -370,6 +347,22 @@ static const struct snd_bebob_spec spec_normal = {
.meter = NULL
};
+#define SPECIFIER_1394TA 0x00a02d
+
+// The immediate entry for version in unit directory differs depending on models:
+// * 0x010001
+// * 0x014001
+#define SND_BEBOB_DEV_ENTRY(vendor, model, data) \
+{ \
+ .match_flags = IEEE1394_MATCH_VENDOR_ID | \
+ IEEE1394_MATCH_MODEL_ID | \
+ IEEE1394_MATCH_SPECIFIER_ID, \
+ .vendor_id = vendor, \
+ .model_id = model, \
+ .specifier_id = SPECIFIER_1394TA, \
+ .driver_data = (kernel_ulong_t)data \
+}
+
static const struct ieee1394_device_id bebob_id_table[] = {
/* Edirol, FA-66 */
SND_BEBOB_DEV_ENTRY(VEN_EDIROL, 0x00010049, &spec_normal),
@@ -386,9 +379,9 @@ static const struct ieee1394_device_id bebob_id_table[] = {
/* BridgeCo, Audio5 */
SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal),
/* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */
- SND_BEBOB_DEV_ENTRY(VEN_MACKIE2, 0x00010065, &spec_normal),
- // Mackie, d.2 (Firewire option card) and d.2 Pro (the card is built-in).
- SND_BEBOB_DEV_ENTRY(VEN_MACKIE1, 0x00010067, &spec_normal),
+ SND_BEBOB_DEV_ENTRY(VEN_MACKIE, 0x00010065, &spec_normal),
+ // Mackie, d.2 (optional Firewire card with DM1000).
+ SND_BEBOB_DEV_ENTRY(VEN_MACKIE, 0x00010067, &spec_normal),
/* Stanton, ScratchAmp */
SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal),
/* Tascam, IF-FW DM */
@@ -410,17 +403,18 @@ static const struct ieee1394_device_id bebob_id_table[] = {
SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x01eeee, &spec_normal),
/* ESI, Quatafire610 */
SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal),
- /* AcousticReality, eARMasterOne */
- SND_BEBOB_DEV_ENTRY(VEN_ACOUSTIC, 0x00000002, &spec_normal),
/* CME, MatrixKFW */
SND_BEBOB_DEV_ENTRY(VEN_CME, 0x00030000, &spec_normal),
- /* Phonic, Helix Board 12 MkII */
+ // Phonic Helix Board 12 FireWire MkII.
SND_BEBOB_DEV_ENTRY(VEN_PHONIC, 0x00050000, &spec_normal),
- /* Phonic, Helix Board 18 MkII */
+ // Phonic Helix Board 18 FireWire MkII.
SND_BEBOB_DEV_ENTRY(VEN_PHONIC, 0x00060000, &spec_normal),
- /* Phonic, Helix Board 24 MkII */
+ // Phonic Helix Board 24 FireWire MkII.
SND_BEBOB_DEV_ENTRY(VEN_PHONIC, 0x00070000, &spec_normal),
- /* Phonic, Helix Board 12 Universal/18 Universal/24 Universal */
+ // Phonic FireFly 808 FireWire.
+ SND_BEBOB_DEV_ENTRY(VEN_PHONIC, 0x00080000, &spec_normal),
+ // Phonic FireFly 202, 302, 808 Universal.
+ // Phinic Helix Board 12/18/24 FireWire, 12/18/24 Universal
SND_BEBOB_DEV_ENTRY(VEN_PHONIC, 0x00000000, &spec_normal),
/* Lynx, Aurora 8/16 (LT-FW) */
SND_BEBOB_DEV_ENTRY(VEN_LYNX, 0x00000001, &spec_normal),
@@ -438,7 +432,8 @@ static const struct ieee1394_device_id bebob_id_table[] = {
SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000007, &yamaha_terratec_spec),
/* TerraTec Electronic GmbH, EWS MIC2/MIC8 */
SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000005, &spec_normal),
- /* Terratec Electronic GmbH, Aureon 7.1 Firewire */
+ // Terratec Electronic GmbH, Aureon 7.1 Firewire.
+ // AcousticReality, eAR Master One, Eroica, Figaro, and Ciaccona. Perhaps Terratec OEM.
SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000002, &spec_normal),
/* Yamaha, GO44 */
SND_BEBOB_DEV_ENTRY(VEN_YAMAHA, 0x0010000b, &yamaha_terratec_spec),
@@ -447,45 +442,35 @@ static const struct ieee1394_device_id bebob_id_table[] = {
/* Focusrite, SaffirePro 26 I/O */
SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec),
/* Focusrite, SaffirePro 10 I/O */
- {
- // The combination of vendor_id and model_id is the same as the
- // same as the one of Liquid Saffire 56.
- .match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_MODEL_ID |
- IEEE1394_MATCH_SPECIFIER_ID |
- IEEE1394_MATCH_VERSION,
- .vendor_id = VEN_FOCUSRITE,
- .model_id = 0x000006,
- .specifier_id = 0x00a02d,
- .version = 0x010001,
- .driver_data = (kernel_ulong_t)&saffirepro_10_spec,
- },
+ SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x000006, &saffirepro_10_spec),
/* Focusrite, Saffire(no label and LE) */
SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH,
&saffire_spec),
- /* M-Audio, Firewire 410 */
- SND_BEBOB_DEV_ENTRY(VEN_MAUDIO2, 0x00010058, NULL), /* bootloader */
- SND_BEBOB_DEV_ENTRY(VEN_MAUDIO2, 0x00010046, &maudio_fw410_spec),
+ // M-Audio, Firewire 410. The vendor field is left as BridgeCo. AG.
+ SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010058, NULL),
+ SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010046, &maudio_fw410_spec),
/* M-Audio, Firewire Audiophile */
- SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, MODEL_MAUDIO_AUDIOPHILE_BOTH,
+ SND_BEBOB_DEV_ENTRY(VEN_MAUDIO, MODEL_MAUDIO_AUDIOPHILE_BOTH,
&maudio_audiophile_spec),
/* M-Audio, Firewire Solo */
- SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, 0x00010062, &maudio_solo_spec),
+ SND_BEBOB_DEV_ENTRY(VEN_MAUDIO, 0x00010062, &maudio_solo_spec),
/* M-Audio, Ozonic */
- SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, 0x0000000a, &maudio_ozonic_spec),
+ SND_BEBOB_DEV_ENTRY(VEN_MAUDIO, 0x0000000a, &maudio_ozonic_spec),
/* M-Audio NRV10 */
- SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, 0x00010081, &maudio_nrv10_spec),
+ SND_BEBOB_DEV_ENTRY(VEN_MAUDIO, 0x00010081, &maudio_nrv10_spec),
/* M-Audio, ProFireLightbridge */
- SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, 0x000100a1, &spec_normal),
+ SND_BEBOB_DEV_ENTRY(VEN_MAUDIO, MODEL_MAUDIO_PROFIRELIGHTBRIDGE, &spec_normal),
/* Firewire 1814 */
- SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, 0x00010070, NULL), /* bootloader */
- SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, MODEL_MAUDIO_FW1814,
+ SND_BEBOB_DEV_ENTRY(VEN_MAUDIO, 0x00010070, NULL), /* bootloader */
+ SND_BEBOB_DEV_ENTRY(VEN_MAUDIO, MODEL_MAUDIO_FW1814,
&maudio_special_spec),
/* M-Audio ProjectMix */
- SND_BEBOB_DEV_ENTRY(VEN_MAUDIO1, MODEL_MAUDIO_PROJECTMIX,
+ SND_BEBOB_DEV_ENTRY(VEN_MAUDIO, MODEL_MAUDIO_PROJECTMIX,
&maudio_special_spec),
/* Digidesign Mbox 2 Pro */
SND_BEBOB_DEV_ENTRY(VEN_DIGIDESIGN, 0x0000a9, &spec_normal),
+ // Toneweal FW66.
+ SND_BEBOB_DEV_ENTRY(OUI_SHOUYO, 0x020002, &spec_normal),
/* IDs are unknown but able to be supported */
/* Apogee, Mini-ME Firewire */
/* Apogee, Mini-DAC Firewire */
@@ -496,11 +481,6 @@ static const struct ieee1394_device_id bebob_id_table[] = {
/* Infrasonic, Windy6 */
/* Mackie, Digital X Bus x.200 */
/* Mackie, Digital X Bus x.400 */
- /* Phonic, HB 12 */
- /* Phonic, HB 24 */
- /* Phonic, HB 18 */
- /* Phonic, FireFly 202 */
- /* Phonic, FireFly 302 */
/* Rolf Spuler, Firewire Guitar */
{}
};
diff --git a/sound/firewire/bebob/bebob.h b/sound/firewire/bebob/bebob.h
index 4e0ed84adbee..4d73ecb30d79 100644
--- a/sound/firewire/bebob/bebob.h
+++ b/sound/firewire/bebob/bebob.h
@@ -75,6 +75,11 @@ struct snd_bebob_spec {
const struct snd_bebob_meter_spec *meter;
};
+enum snd_bebob_quirk {
+ SND_BEBOB_QUIRK_INITIAL_DISCONTINUOUS_DBC = (1 << 0),
+ SND_BEBOB_QUIRK_WRONG_DBC = (1 << 1),
+};
+
struct snd_bebob {
struct snd_card *card;
struct fw_unit *unit;
@@ -83,11 +88,8 @@ struct snd_bebob {
struct mutex mutex;
spinlock_t lock;
- bool registered;
- struct delayed_work dwork;
-
- const struct ieee1394_device_id *entry;
const struct snd_bebob_spec *spec;
+ unsigned int quirks; // Combination of snd_bebob_quirk enumerations.
unsigned int midi_input_ports;
unsigned int midi_output_ports;
@@ -113,9 +115,6 @@ struct snd_bebob {
/* for M-Audio special devices */
void *maudio_special_quirk;
- /* For BeBoB version quirk. */
- unsigned int version;
-
struct amdtp_domain domain;
};
@@ -254,13 +253,4 @@ extern const struct snd_bebob_spec maudio_special_spec;
int snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814);
int snd_bebob_maudio_load_firmware(struct fw_unit *unit);
-#define SND_BEBOB_DEV_ENTRY(vendor, model, data) \
-{ \
- .match_flags = IEEE1394_MATCH_VENDOR_ID | \
- IEEE1394_MATCH_MODEL_ID, \
- .vendor_id = vendor, \
- .model_id = model, \
- .driver_data = (kernel_ulong_t)data \
-}
-
#endif
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
index b612ee3e33b6..8629b14ded76 100644
--- a/sound/firewire/bebob/bebob_stream.c
+++ b/sound/firewire/bebob/bebob_stream.c
@@ -7,8 +7,7 @@
#include "./bebob.h"
-#define CALLBACK_TIMEOUT 2500
-#define FW_ISO_RESOURCE_DELAY 1000
+#define READY_TIMEOUT_MS 4000
/*
* NOTE;
@@ -402,12 +401,6 @@ static void break_both_connections(struct snd_bebob *bebob)
{
cmp_connection_break(&bebob->in_conn);
cmp_connection_break(&bebob->out_conn);
-
- // These models seem to be in transition state for a longer time. When
- // accessing in the state, any transactions is corrupted. In the worst
- // case, the device is going to reboot.
- if (bebob->version < 2)
- msleep(600);
}
static int start_stream(struct snd_bebob *bebob, struct amdtp_stream *stream)
@@ -437,6 +430,7 @@ static int start_stream(struct snd_bebob *bebob, struct amdtp_stream *stream)
static int init_stream(struct snd_bebob *bebob, struct amdtp_stream *stream)
{
+ unsigned int flags = CIP_BLOCKING;
enum amdtp_stream_direction dir_stream;
struct cmp_connection *conn;
enum cmp_direction dir_conn;
@@ -452,32 +446,21 @@ static int init_stream(struct snd_bebob *bebob, struct amdtp_stream *stream)
dir_conn = CMP_INPUT;
}
+ if (stream == &bebob->tx_stream) {
+ if (bebob->quirks & SND_BEBOB_QUIRK_WRONG_DBC)
+ flags |= CIP_EMPTY_HAS_WRONG_DBC;
+ }
+
err = cmp_connection_init(conn, bebob->unit, dir_conn, 0);
if (err < 0)
return err;
- err = amdtp_am824_init(stream, bebob->unit, dir_stream, CIP_BLOCKING);
+ err = amdtp_am824_init(stream, bebob->unit, dir_stream, flags);
if (err < 0) {
cmp_connection_destroy(conn);
return err;
}
- if (stream == &bebob->tx_stream) {
- // BeBoB v3 transfers packets with these qurks:
- // - In the beginning of streaming, the value of dbc is
- // incremented even if no data blocks are transferred.
- // - The value of dbc is reset suddenly.
- if (bebob->version > 2)
- bebob->tx_stream.flags |= CIP_EMPTY_HAS_WRONG_DBC |
- CIP_SKIP_DBC_ZERO_CHECK;
-
- // At high sampling rate, M-Audio special firmware transmits
- // empty packet with the value of dbc incremented by 8 but the
- // others are valid to IEC 61883-1.
- if (bebob->maudio_special_quirk)
- bebob->tx_stream.flags |= CIP_EMPTY_HAS_WRONG_DBC;
- }
-
return 0;
}
@@ -624,9 +607,8 @@ int snd_bebob_stream_start_duplex(struct snd_bebob *bebob)
if (!amdtp_stream_running(&bebob->rx_stream)) {
enum snd_bebob_clock_type src;
- struct amdtp_stream *master, *slave;
unsigned int curr_rate;
- unsigned int ir_delay_cycle;
+ unsigned int tx_init_skip_cycles;
if (bebob->maudio_special_quirk) {
err = bebob->spec->rate->get(bebob, &curr_rate);
@@ -638,36 +620,28 @@ int snd_bebob_stream_start_duplex(struct snd_bebob *bebob)
if (err < 0)
return err;
- if (src != SND_BEBOB_CLOCK_TYPE_SYT) {
- master = &bebob->tx_stream;
- slave = &bebob->rx_stream;
- } else {
- master = &bebob->rx_stream;
- slave = &bebob->tx_stream;
- }
-
- err = start_stream(bebob, master);
+ err = start_stream(bebob, &bebob->rx_stream);
if (err < 0)
goto error;
- err = start_stream(bebob, slave);
+ err = start_stream(bebob, &bebob->tx_stream);
if (err < 0)
goto error;
- // The device postpones start of transmission mostly for 1 sec
- // after receives packets firstly. For safe, IR context starts
- // 0.4 sec (=3200 cycles) later to version 1 or 2 firmware,
- // 2.0 sec (=16000 cycles) for version 3 firmware. This is
- // within 2.5 sec (=CALLBACK_TIMEOUT).
- // Furthermore, some devices transfer isoc packets with
- // discontinuous counter in the beginning of packet streaming.
- // The delay has an effect to avoid detection of this
- // discontinuity.
- if (bebob->version < 2)
- ir_delay_cycle = 3200;
+ if (!(bebob->quirks & SND_BEBOB_QUIRK_INITIAL_DISCONTINUOUS_DBC))
+ tx_init_skip_cycles = 0;
else
- ir_delay_cycle = 16000;
- err = amdtp_domain_start(&bebob->domain, ir_delay_cycle);
+ tx_init_skip_cycles = 16000;
+
+ // MEMO: Some devices start packet transmission long enough after establishment of
+ // CMP connection. In the early stage of packet streaming, any device transfers
+ // NODATA packets. After several hundred cycles, it begins to multiplex event into
+ // the packet with adequate value of syt field in CIP header. Some devices are
+ // strictly to generate any discontinuity in the sequence of tx packet when they
+ // receives inadequate sequence of value in syt field of CIP header. In the case,
+ // the request to break CMP connection is often corrupted, then any transaction
+ // results in unrecoverable error, sometimes generate bus-reset.
+ err = amdtp_domain_start(&bebob->domain, tx_init_skip_cycles, true, false);
if (err < 0)
goto error;
@@ -684,10 +658,9 @@ int snd_bebob_stream_start_duplex(struct snd_bebob *bebob)
}
}
- if (!amdtp_stream_wait_callback(&bebob->rx_stream,
- CALLBACK_TIMEOUT) ||
- !amdtp_stream_wait_callback(&bebob->tx_stream,
- CALLBACK_TIMEOUT)) {
+ // Some devices postpone start of transmission mostly for 1 sec after receives
+ // packets firstly.
+ if (!amdtp_domain_wait_ready(&bebob->domain, READY_TIMEOUT_MS)) {
err = -ETIMEDOUT;
goto error;
}
@@ -883,6 +856,11 @@ static int detect_midi_ports(struct snd_bebob *bebob,
err = avc_bridgeco_get_plug_ch_count(bebob->unit, addr, &ch_count);
if (err < 0)
break;
+ // Yamaha GO44, GO46, Terratec Phase 24, Phase x24 reports 0 for the number of
+ // channels in external output plug 3 (MIDI type) even if it has a pair of physical
+ // MIDI jacks. As a workaround, assume it as one.
+ if (ch_count == 0)
+ ch_count = 1;
*midi_ports += ch_count;
}
@@ -961,12 +939,12 @@ int snd_bebob_stream_discover(struct snd_bebob *bebob)
if (err < 0)
goto end;
- err = detect_midi_ports(bebob, bebob->rx_stream_formations, addr, AVC_BRIDGECO_PLUG_DIR_IN,
+ err = detect_midi_ports(bebob, bebob->tx_stream_formations, addr, AVC_BRIDGECO_PLUG_DIR_IN,
plugs[2], &bebob->midi_input_ports);
if (err < 0)
goto end;
- err = detect_midi_ports(bebob, bebob->tx_stream_formations, addr, AVC_BRIDGECO_PLUG_DIR_OUT,
+ err = detect_midi_ports(bebob, bebob->rx_stream_formations, addr, AVC_BRIDGECO_PLUG_DIR_OUT,
plugs[3], &bebob->midi_output_ports);
if (err < 0)
goto end;
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index c4dfe76500c2..f99e00083141 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -8,8 +8,8 @@
#include "dice.h"
-#define CALLBACK_TIMEOUT 200
-#define NOTIFICATION_TIMEOUT_MS (2 * MSEC_PER_SEC)
+#define READY_TIMEOUT_MS 200
+#define NOTIFICATION_TIMEOUT_MS 100
struct reg_params {
unsigned int count;
@@ -57,13 +57,9 @@ int snd_dice_stream_get_rate_mode(struct snd_dice *dice, unsigned int rate,
return -EINVAL;
}
-/*
- * This operation has an effect to synchronize GLOBAL_STATUS/GLOBAL_SAMPLE_RATE
- * to GLOBAL_STATUS. Especially, just after powering on, these are different.
- */
-static int ensure_phase_lock(struct snd_dice *dice, unsigned int rate)
+static int select_clock(struct snd_dice *dice, unsigned int rate)
{
- __be32 reg, nominal;
+ __be32 reg;
u32 data;
int i;
int err;
@@ -94,19 +90,8 @@ static int ensure_phase_lock(struct snd_dice *dice, unsigned int rate)
return err;
if (wait_for_completion_timeout(&dice->clock_accepted,
- msecs_to_jiffies(NOTIFICATION_TIMEOUT_MS)) == 0) {
- /*
- * Old versions of Dice firmware transfer no notification when
- * the same clock status as current one is set. In this case,
- * just check current clock status.
- */
- err = snd_dice_transaction_read_global(dice, GLOBAL_STATUS,
- &nominal, sizeof(nominal));
- if (err < 0)
- return err;
- if (!(be32_to_cpu(nominal) & STATUS_SOURCE_LOCKED))
- return -ETIMEDOUT;
- }
+ msecs_to_jiffies(NOTIFICATION_TIMEOUT_MS)) == 0)
+ return -ETIMEDOUT;
return 0;
}
@@ -304,7 +289,7 @@ int snd_dice_stream_reserve_duplex(struct snd_dice *dice, unsigned int rate,
// Just after owning the unit (GLOBAL_OWNER), the unit can
// return invalid stream formats. Selecting clock parameters
// have an effect for the unit to refine it.
- err = ensure_phase_lock(dice, rate);
+ err = select_clock(dice, rate);
if (err < 0)
return err;
@@ -459,20 +444,17 @@ int snd_dice_stream_start_duplex(struct snd_dice *dice)
goto error;
}
- err = amdtp_domain_start(&dice->domain, 0);
+ // MEMO: The device immediately starts packet transmission when enabled. Some
+ // devices are strictly to generate any discontinuity in the sequence of tx packet
+ // when they receives invalid sequence of presentation time in CIP header. The
+ // sequence replay for media clock recovery can suppress the behaviour.
+ err = amdtp_domain_start(&dice->domain, 0, true, false);
if (err < 0)
goto error;
- for (i = 0; i < MAX_STREAMS; i++) {
- if ((i < tx_params.count &&
- !amdtp_stream_wait_callback(&dice->tx_stream[i],
- CALLBACK_TIMEOUT)) ||
- (i < rx_params.count &&
- !amdtp_stream_wait_callback(&dice->rx_stream[i],
- CALLBACK_TIMEOUT))) {
- err = -ETIMEDOUT;
- goto error;
- }
+ if (!amdtp_domain_wait_ready(&dice->domain, READY_TIMEOUT_MS)) {
+ err = -ETIMEDOUT;
+ goto error;
}
}
@@ -653,7 +635,7 @@ int snd_dice_stream_detect_current_formats(struct snd_dice *dice)
* invalid stream formats. Selecting clock parameters have an effect
* for the unit to refine it.
*/
- err = ensure_phase_lock(dice, rate);
+ err = select_clock(dice, rate);
if (err < 0)
return err;
diff --git a/sound/firewire/dice/dice-transaction.c b/sound/firewire/dice/dice-transaction.c
index 2c0dde29a024..92941ef83cd5 100644
--- a/sound/firewire/dice/dice-transaction.c
+++ b/sound/firewire/dice/dice-transaction.c
@@ -155,7 +155,7 @@ static void dice_notification(struct fw_card *card, struct fw_request *request,
fw_send_response(card, request, RCODE_COMPLETE);
- if (bits & NOTIFY_LOCK_CHG)
+ if (bits & NOTIFY_CLOCK_ACCEPTED)
complete(&dice->clock_accepted);
wake_up(&dice->hwdep_wait);
}
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 239d164b0eea..f75902bc8e74 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -135,22 +135,51 @@ static void dice_card_free(struct snd_card *card)
snd_dice_stream_destroy_duplex(dice);
snd_dice_transaction_destroy(dice);
+
+ mutex_destroy(&dice->mutex);
+ fw_unit_put(dice->unit);
}
-static void do_registration(struct work_struct *work)
+static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
{
- struct snd_dice *dice = container_of(work, struct snd_dice, dwork.work);
+ struct snd_card *card;
+ struct snd_dice *dice;
+ snd_dice_detect_formats_t detect_formats;
int err;
- if (dice->registered)
- return;
+ if (!entry->driver_data && entry->vendor_id != OUI_SSL) {
+ err = check_dice_category(unit);
+ if (err < 0)
+ return -ENODEV;
+ }
- err = snd_card_new(&dice->unit->device, -1, NULL, THIS_MODULE, 0,
- &dice->card);
+ err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE, sizeof(*dice), &card);
if (err < 0)
- return;
- dice->card->private_free = dice_card_free;
- dice->card->private_data = dice;
+ return err;
+ card->private_free = dice_card_free;
+
+ dice = card->private_data;
+ dice->unit = fw_unit_get(unit);
+ dev_set_drvdata(&unit->device, dice);
+ dice->card = card;
+
+ if (!entry->driver_data)
+ detect_formats = snd_dice_stream_detect_current_formats;
+ else
+ detect_formats = (snd_dice_detect_formats_t)entry->driver_data;
+
+ // Below models are compliant to IEC 61883-1/6 and have no quirk at high sampling transfer
+ // frequency.
+ // * Avid M-Box 3 Pro
+ // * M-Audio Profire 610
+ // * M-Audio Profire 2626
+ if (entry->vendor_id == OUI_MAUDIO || entry->vendor_id == OUI_AVID)
+ dice->disable_double_pcm_frames = true;
+
+ spin_lock_init(&dice->lock);
+ mutex_init(&dice->mutex);
+ init_completion(&dice->clock_accepted);
+ init_waitqueue_head(&dice->hwdep_wait);
err = snd_dice_transaction_init(dice);
if (err < 0)
@@ -162,7 +191,7 @@ static void do_registration(struct work_struct *work)
dice_card_strings(dice);
- err = dice->detect_formats(dice);
+ err = detect_formats(dice);
if (err < 0)
goto error;
@@ -184,105 +213,34 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- err = snd_card_register(dice->card);
+ err = snd_card_register(card);
if (err < 0)
goto error;
- dice->registered = true;
-
- return;
-error:
- snd_card_free(dice->card);
- dev_info(&dice->unit->device,
- "Sound card registration failed: %d\n", err);
-}
-
-static int dice_probe(struct fw_unit *unit,
- const struct ieee1394_device_id *entry)
-{
- struct snd_dice *dice;
- int err;
-
- if (!entry->driver_data && entry->vendor_id != OUI_SSL) {
- err = check_dice_category(unit);
- if (err < 0)
- return -ENODEV;
- }
-
- /* Allocate this independent of sound card instance. */
- dice = devm_kzalloc(&unit->device, sizeof(struct snd_dice), GFP_KERNEL);
- if (!dice)
- return -ENOMEM;
- dice->unit = fw_unit_get(unit);
- dev_set_drvdata(&unit->device, dice);
-
- if (!entry->driver_data) {
- dice->detect_formats = snd_dice_stream_detect_current_formats;
- } else {
- dice->detect_formats =
- (snd_dice_detect_formats_t)entry->driver_data;
- }
-
- // Below models are compliant to IEC 61883-1/6 and have no quirk at high sampling transfer
- // frequency.
- // * Avid M-Box 3 Pro
- // * M-Audio Profire 610
- // * M-Audio Profire 2626
- if (entry->vendor_id == OUI_MAUDIO || entry->vendor_id == OUI_AVID)
- dice->disable_double_pcm_frames = true;
-
- spin_lock_init(&dice->lock);
- mutex_init(&dice->mutex);
- init_completion(&dice->clock_accepted);
- init_waitqueue_head(&dice->hwdep_wait);
-
- /* Allocate and register this sound card later. */
- INIT_DEFERRABLE_WORK(&dice->dwork, do_registration);
- snd_fw_schedule_registration(unit, &dice->dwork);
-
return 0;
+error:
+ snd_card_free(card);
+ return err;
}
static void dice_remove(struct fw_unit *unit)
{
struct snd_dice *dice = dev_get_drvdata(&unit->device);
- /*
- * Confirm to stop the work for registration before the sound card is
- * going to be released. The work is not scheduled again because bus
- * reset handler is not called anymore.
- */
- cancel_delayed_work_sync(&dice->dwork);
-
- if (dice->registered) {
- // Block till all of ALSA character devices are released.
- snd_card_free(dice->card);
- }
-
- mutex_destroy(&dice->mutex);
- fw_unit_put(dice->unit);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(dice->card);
}
static void dice_bus_reset(struct fw_unit *unit)
{
struct snd_dice *dice = dev_get_drvdata(&unit->device);
- /* Postpone a workqueue for deferred registration. */
- if (!dice->registered)
- snd_fw_schedule_registration(unit, &dice->dwork);
-
/* The handler address register becomes initialized. */
snd_dice_transaction_reinit(dice);
- /*
- * After registration, userspace can start packet streaming, then this
- * code block works fine.
- */
- if (dice->registered) {
- mutex_lock(&dice->mutex);
- snd_dice_stream_update_duplex(dice);
- mutex_unlock(&dice->mutex);
- }
+ mutex_lock(&dice->mutex);
+ snd_dice_stream_update_duplex(dice);
+ mutex_unlock(&dice->mutex);
}
#define DICE_INTERFACE 0x000001
diff --git a/sound/firewire/dice/dice.h b/sound/firewire/dice/dice.h
index 3c967d1b3605..fd440cc625f9 100644
--- a/sound/firewire/dice/dice.h
+++ b/sound/firewire/dice/dice.h
@@ -78,9 +78,6 @@ struct snd_dice {
spinlock_t lock;
struct mutex mutex;
- bool registered;
- struct delayed_work dwork;
-
/* Offsets for sub-addresses */
unsigned int global_offset;
unsigned int rx_offset;
@@ -93,7 +90,6 @@ struct snd_dice {
unsigned int rx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT];
unsigned int tx_midi_ports[MAX_STREAMS];
unsigned int rx_midi_ports[MAX_STREAMS];
- snd_dice_detect_formats_t detect_formats;
struct fw_address_handler notification_handler;
int owner_generation;
diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c
index d613642a2ce3..59b86c8d89e1 100644
--- a/sound/firewire/digi00x/amdtp-dot.c
+++ b/sound/firewire/digi00x/amdtp-dot.c
@@ -396,16 +396,13 @@ int amdtp_dot_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir)
{
amdtp_stream_process_ctx_payloads_t process_ctx_payloads;
- enum cip_flags flags;
+ unsigned int flags = CIP_NONBLOCKING | CIP_UNAWARE_SYT;
// Use different mode between incoming/outgoing.
- if (dir == AMDTP_IN_STREAM) {
- flags = CIP_NONBLOCKING;
+ if (dir == AMDTP_IN_STREAM)
process_ctx_payloads = process_ir_ctx_payloads;
- } else {
- flags = CIP_BLOCKING;
+ else
process_ctx_payloads = process_it_ctx_payloads;
- }
return amdtp_stream_init(s, unit, dir, flags, CIP_FMT_AM,
process_ctx_payloads, sizeof(struct amdtp_dot));
diff --git a/sound/firewire/digi00x/digi00x-stream.c b/sound/firewire/digi00x/digi00x-stream.c
index 405d6903bfbc..a15f55b0dce3 100644
--- a/sound/firewire/digi00x/digi00x-stream.c
+++ b/sound/firewire/digi00x/digi00x-stream.c
@@ -7,7 +7,7 @@
#include "digi00x.h"
-#define CALLBACK_TIMEOUT 500
+#define READY_TIMEOUT_MS 200
const unsigned int snd_dg00x_stream_rates[SND_DG00X_RATE_COUNT] = {
[SND_DG00X_RATE_44100] = 44100,
@@ -375,14 +375,15 @@ int snd_dg00x_stream_start_duplex(struct snd_dg00x *dg00x)
if (err < 0)
goto error;
- err = amdtp_domain_start(&dg00x->domain, 0);
+ // NOTE: The device doesn't start packet transmission till receiving any packet.
+ // It ignores presentation time expressed by the value of syt field of CIP header
+ // in received packets. The sequence of the number of data blocks per packet is
+ // important for media clock recovery.
+ err = amdtp_domain_start(&dg00x->domain, 0, true, true);
if (err < 0)
goto error;
- if (!amdtp_stream_wait_callback(&dg00x->rx_stream,
- CALLBACK_TIMEOUT) ||
- !amdtp_stream_wait_callback(&dg00x->tx_stream,
- CALLBACK_TIMEOUT)) {
+ if (!amdtp_domain_wait_ready(&dg00x->domain, READY_TIMEOUT_MS)) {
err = -ETIMEDOUT;
goto error;
}
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index ab8408966ec3..995302808c27 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -47,23 +47,32 @@ static void dg00x_card_free(struct snd_card *card)
snd_dg00x_stream_destroy_duplex(dg00x);
snd_dg00x_transaction_unregister(dg00x);
+
+ mutex_destroy(&dg00x->mutex);
+ fw_unit_put(dg00x->unit);
}
-static void do_registration(struct work_struct *work)
+static int snd_dg00x_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
{
- struct snd_dg00x *dg00x =
- container_of(work, struct snd_dg00x, dwork.work);
+ struct snd_card *card;
+ struct snd_dg00x *dg00x;
int err;
- if (dg00x->registered)
- return;
-
- err = snd_card_new(&dg00x->unit->device, -1, NULL, THIS_MODULE, 0,
- &dg00x->card);
+ err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE, sizeof(*dg00x), &card);
if (err < 0)
- return;
- dg00x->card->private_free = dg00x_card_free;
- dg00x->card->private_data = dg00x;
+ return err;
+ card->private_free = dg00x_card_free;
+
+ dg00x = card->private_data;
+ dg00x->unit = fw_unit_get(unit);
+ dev_set_drvdata(&unit->device, dg00x);
+ dg00x->card = card;
+
+ mutex_init(&dg00x->mutex);
+ spin_lock_init(&dg00x->lock);
+ init_waitqueue_head(&dg00x->hwdep_wait);
+
+ dg00x->is_console = entry->model_id == MODEL_CONSOLE;
err = name_card(dg00x);
if (err < 0)
@@ -91,85 +100,33 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- err = snd_card_register(dg00x->card);
+ err = snd_card_register(card);
if (err < 0)
goto error;
- dg00x->registered = true;
-
- return;
-error:
- snd_card_free(dg00x->card);
- dev_info(&dg00x->unit->device,
- "Sound card registration failed: %d\n", err);
-}
-
-static int snd_dg00x_probe(struct fw_unit *unit,
- const struct ieee1394_device_id *entry)
-{
- struct snd_dg00x *dg00x;
-
- /* Allocate this independent of sound card instance. */
- dg00x = devm_kzalloc(&unit->device, sizeof(struct snd_dg00x),
- GFP_KERNEL);
- if (!dg00x)
- return -ENOMEM;
-
- dg00x->unit = fw_unit_get(unit);
- dev_set_drvdata(&unit->device, dg00x);
-
- mutex_init(&dg00x->mutex);
- spin_lock_init(&dg00x->lock);
- init_waitqueue_head(&dg00x->hwdep_wait);
-
- dg00x->is_console = entry->model_id == MODEL_CONSOLE;
-
- /* Allocate and register this sound card later. */
- INIT_DEFERRABLE_WORK(&dg00x->dwork, do_registration);
- snd_fw_schedule_registration(unit, &dg00x->dwork);
-
return 0;
+error:
+ snd_card_free(card);
+ return err;
}
static void snd_dg00x_update(struct fw_unit *unit)
{
struct snd_dg00x *dg00x = dev_get_drvdata(&unit->device);
- /* Postpone a workqueue for deferred registration. */
- if (!dg00x->registered)
- snd_fw_schedule_registration(unit, &dg00x->dwork);
-
snd_dg00x_transaction_reregister(dg00x);
- /*
- * After registration, userspace can start packet streaming, then this
- * code block works fine.
- */
- if (dg00x->registered) {
- mutex_lock(&dg00x->mutex);
- snd_dg00x_stream_update_duplex(dg00x);
- mutex_unlock(&dg00x->mutex);
- }
+ mutex_lock(&dg00x->mutex);
+ snd_dg00x_stream_update_duplex(dg00x);
+ mutex_unlock(&dg00x->mutex);
}
static void snd_dg00x_remove(struct fw_unit *unit)
{
struct snd_dg00x *dg00x = dev_get_drvdata(&unit->device);
- /*
- * Confirm to stop the work for registration before the sound card is
- * going to be released. The work is not scheduled again because bus
- * reset handler is not called anymore.
- */
- cancel_delayed_work_sync(&dg00x->dwork);
-
- if (dg00x->registered) {
- // Block till all of ALSA character devices are released.
- snd_card_free(dg00x->card);
- }
-
- mutex_destroy(&dg00x->mutex);
- fw_unit_put(dg00x->unit);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(dg00x->card);
}
static const struct ieee1394_device_id snd_dg00x_id_table[] = {
diff --git a/sound/firewire/digi00x/digi00x.h b/sound/firewire/digi00x/digi00x.h
index 129de8edd5ea..82b647d383c5 100644
--- a/sound/firewire/digi00x/digi00x.h
+++ b/sound/firewire/digi00x/digi00x.h
@@ -37,9 +37,6 @@ struct snd_dg00x {
struct mutex mutex;
spinlock_t lock;
- bool registered;
- struct delayed_work dwork;
-
struct amdtp_stream tx_stream;
struct fw_iso_resources tx_resources;
diff --git a/sound/firewire/fireface/amdtp-ff.c b/sound/firewire/fireface/amdtp-ff.c
index 119c0076b17a..98177b0666d3 100644
--- a/sound/firewire/fireface/amdtp-ff.c
+++ b/sound/firewire/fireface/amdtp-ff.c
@@ -168,6 +168,6 @@ int amdtp_ff_init(struct amdtp_stream *s, struct fw_unit *unit,
else
process_ctx_payloads = process_it_ctx_payloads;
- return amdtp_stream_init(s, unit, dir, CIP_NO_HEADER, 0,
+ return amdtp_stream_init(s, unit, dir, CIP_BLOCKING | CIP_UNAWARE_SYT | CIP_NO_HEADER, 0,
process_ctx_payloads, sizeof(struct amdtp_ff));
}
diff --git a/sound/firewire/fireface/ff-stream.c b/sound/firewire/fireface/ff-stream.c
index 5452115c0ef9..95bf405adb3d 100644
--- a/sound/firewire/fireface/ff-stream.c
+++ b/sound/firewire/fireface/ff-stream.c
@@ -7,7 +7,7 @@
#include "ff.h"
-#define CALLBACK_TIMEOUT_MS 200
+#define READY_TIMEOUT_MS 200
int snd_ff_stream_get_multiplier_mode(enum cip_sfc sfc,
enum snd_ff_stream_mode *mode)
@@ -199,14 +199,15 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
if (err < 0)
goto error;
- err = amdtp_domain_start(&ff->domain, 0);
+ // NOTE: The device doesn't transfer packets unless receiving any packet. The
+ // sequence of tx packets includes cycle skip corresponding to empty packet or
+ // NODATA packet in IEC 61883-1/6. The sequence of the number of data blocks per
+ // packet is important for media clock recovery.
+ err = amdtp_domain_start(&ff->domain, 0, true, true);
if (err < 0)
goto error;
- if (!amdtp_stream_wait_callback(&ff->rx_stream,
- CALLBACK_TIMEOUT_MS) ||
- !amdtp_stream_wait_callback(&ff->tx_stream,
- CALLBACK_TIMEOUT_MS)) {
+ if (!amdtp_domain_wait_ready(&ff->domain, READY_TIMEOUT_MS)) {
err = -ETIMEDOUT;
goto error;
}
diff --git a/sound/firewire/fireface/ff.c b/sound/firewire/fireface/ff.c
index bc39269415d2..7bf51d062021 100644
--- a/sound/firewire/fireface/ff.c
+++ b/sound/firewire/fireface/ff.c
@@ -42,22 +42,33 @@ static void ff_card_free(struct snd_card *card)
snd_ff_stream_destroy_duplex(ff);
snd_ff_transaction_unregister(ff);
+
+ mutex_destroy(&ff->mutex);
+ fw_unit_put(ff->unit);
}
-static void do_registration(struct work_struct *work)
+static int snd_ff_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
{
- struct snd_ff *ff = container_of(work, struct snd_ff, dwork.work);
+ struct snd_card *card;
+ struct snd_ff *ff;
int err;
- if (ff->registered)
- return;
-
- err = snd_card_new(&ff->unit->device, -1, NULL, THIS_MODULE, 0,
- &ff->card);
+ err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE, sizeof(*ff), &card);
if (err < 0)
- return;
- ff->card->private_free = ff_card_free;
- ff->card->private_data = ff;
+ return err;
+ card->private_free = ff_card_free;
+
+ ff = card->private_data;
+ ff->unit = fw_unit_get(unit);
+ dev_set_drvdata(&unit->device, ff);
+ ff->card = card;
+
+ mutex_init(&ff->mutex);
+ spin_lock_init(&ff->lock);
+ init_waitqueue_head(&ff->hwdep_wait);
+
+ ff->unit_version = entry->version;
+ ff->spec = (const struct snd_ff_spec *)entry->driver_data;
err = snd_ff_transaction_register(ff);
if (err < 0)
@@ -83,76 +94,31 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- err = snd_card_register(ff->card);
+ err = snd_card_register(card);
if (err < 0)
goto error;
- ff->registered = true;
-
- return;
-error:
- snd_card_free(ff->card);
- dev_info(&ff->unit->device,
- "Sound card registration failed: %d\n", err);
-}
-
-static int snd_ff_probe(struct fw_unit *unit,
- const struct ieee1394_device_id *entry)
-{
- struct snd_ff *ff;
-
- ff = devm_kzalloc(&unit->device, sizeof(struct snd_ff), GFP_KERNEL);
- if (!ff)
- return -ENOMEM;
- ff->unit = fw_unit_get(unit);
- dev_set_drvdata(&unit->device, ff);
-
- mutex_init(&ff->mutex);
- spin_lock_init(&ff->lock);
- init_waitqueue_head(&ff->hwdep_wait);
-
- ff->unit_version = entry->version;
- ff->spec = (const struct snd_ff_spec *)entry->driver_data;
-
- /* Register this sound card later. */
- INIT_DEFERRABLE_WORK(&ff->dwork, do_registration);
- snd_fw_schedule_registration(unit, &ff->dwork);
-
return 0;
+error:
+ snd_card_free(card);
+ return err;
}
static void snd_ff_update(struct fw_unit *unit)
{
struct snd_ff *ff = dev_get_drvdata(&unit->device);
- /* Postpone a workqueue for deferred registration. */
- if (!ff->registered)
- snd_fw_schedule_registration(unit, &ff->dwork);
-
snd_ff_transaction_reregister(ff);
- if (ff->registered)
- snd_ff_stream_update_duplex(ff);
+ snd_ff_stream_update_duplex(ff);
}
static void snd_ff_remove(struct fw_unit *unit)
{
struct snd_ff *ff = dev_get_drvdata(&unit->device);
- /*
- * Confirm to stop the work for registration before the sound card is
- * going to be released. The work is not scheduled again because bus
- * reset handler is not called anymore.
- */
- cancel_work_sync(&ff->dwork.work);
-
- if (ff->registered) {
- // Block till all of ALSA character devices are released.
- snd_card_free(ff->card);
- }
-
- mutex_destroy(&ff->mutex);
- fw_unit_put(ff->unit);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(ff->card);
}
static const struct snd_ff_spec spec_ff800 = {
diff --git a/sound/firewire/fireface/ff.h b/sound/firewire/fireface/ff.h
index 705e7df4f929..0535f0b58b67 100644
--- a/sound/firewire/fireface/ff.h
+++ b/sound/firewire/fireface/ff.h
@@ -69,9 +69,6 @@ struct snd_ff {
struct mutex mutex;
spinlock_t lock;
- bool registered;
- struct delayed_work dwork;
-
enum snd_ff_unit_version unit_version;
const struct snd_ff_spec *spec;
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index b1cc013a3540..ffb6dd796243 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -194,19 +194,19 @@ efw_card_free(struct snd_card *card)
snd_efw_stream_destroy_duplex(efw);
snd_efw_transaction_remove_instance(efw);
+
+ mutex_destroy(&efw->mutex);
+ fw_unit_put(efw->unit);
}
-static void
-do_registration(struct work_struct *work)
+static int efw_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
{
- struct snd_efw *efw = container_of(work, struct snd_efw, dwork.work);
unsigned int card_index;
+ struct snd_card *card;
+ struct snd_efw *efw;
int err;
- if (efw->registered)
- return;
-
- /* check registered cards */
+ // check registered cards.
mutex_lock(&devices_mutex);
for (card_index = 0; card_index < SNDRV_CARDS; ++card_index) {
if (!test_bit(card_index, devices_used) && enable[card_index])
@@ -214,26 +214,32 @@ do_registration(struct work_struct *work)
}
if (card_index >= SNDRV_CARDS) {
mutex_unlock(&devices_mutex);
- return;
+ return -ENOENT;
}
- err = snd_card_new(&efw->unit->device, index[card_index],
- id[card_index], THIS_MODULE, 0, &efw->card);
+ err = snd_card_new(&unit->device, index[card_index], id[card_index], THIS_MODULE,
+ sizeof(*efw), &card);
if (err < 0) {
mutex_unlock(&devices_mutex);
- return;
+ return err;
}
+ card->private_free = efw_card_free;
set_bit(card_index, devices_used);
mutex_unlock(&devices_mutex);
- efw->card->private_free = efw_card_free;
- efw->card->private_data = efw;
+ efw = card->private_data;
+ efw->unit = fw_unit_get(unit);
+ dev_set_drvdata(&unit->device, efw);
+ efw->card = card;
+ efw->card_index = card_index;
+
+ mutex_init(&efw->mutex);
+ spin_lock_init(&efw->lock);
+ init_waitqueue_head(&efw->hwdep_wait);
- /* prepare response buffer */
- snd_efw_resp_buf_size = clamp(snd_efw_resp_buf_size,
- SND_EFW_RESPONSE_MAXIMUM_BYTES, 4096U);
- efw->resp_buf = devm_kzalloc(&efw->card->card_dev,
- snd_efw_resp_buf_size, GFP_KERNEL);
+ // prepare response buffer.
+ snd_efw_resp_buf_size = clamp(snd_efw_resp_buf_size, SND_EFW_RESPONSE_MAXIMUM_BYTES, 4096U);
+ efw->resp_buf = devm_kzalloc(&card->card_dev, snd_efw_resp_buf_size, GFP_KERNEL);
if (!efw->resp_buf) {
err = -ENOMEM;
goto error;
@@ -265,80 +271,48 @@ do_registration(struct work_struct *work)
if (err < 0)
goto error;
- err = snd_card_register(efw->card);
+ err = snd_card_register(card);
if (err < 0)
goto error;
- efw->registered = true;
-
- return;
-error:
- snd_card_free(efw->card);
- dev_info(&efw->unit->device,
- "Sound card registration failed: %d\n", err);
-}
-
-static int
-efw_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
-{
- struct snd_efw *efw;
-
- efw = devm_kzalloc(&unit->device, sizeof(struct snd_efw), GFP_KERNEL);
- if (efw == NULL)
- return -ENOMEM;
- efw->unit = fw_unit_get(unit);
- dev_set_drvdata(&unit->device, efw);
-
- mutex_init(&efw->mutex);
- spin_lock_init(&efw->lock);
- init_waitqueue_head(&efw->hwdep_wait);
-
- /* Allocate and register this sound card later. */
- INIT_DEFERRABLE_WORK(&efw->dwork, do_registration);
- snd_fw_schedule_registration(unit, &efw->dwork);
-
return 0;
+error:
+ snd_card_free(card);
+ return err;
}
static void efw_update(struct fw_unit *unit)
{
struct snd_efw *efw = dev_get_drvdata(&unit->device);
- /* Postpone a workqueue for deferred registration. */
- if (!efw->registered)
- snd_fw_schedule_registration(unit, &efw->dwork);
-
snd_efw_transaction_bus_reset(efw->unit);
- /*
- * After registration, userspace can start packet streaming, then this
- * code block works fine.
- */
- if (efw->registered) {
- mutex_lock(&efw->mutex);
- snd_efw_stream_update_duplex(efw);
- mutex_unlock(&efw->mutex);
- }
+ mutex_lock(&efw->mutex);
+ snd_efw_stream_update_duplex(efw);
+ mutex_unlock(&efw->mutex);
}
static void efw_remove(struct fw_unit *unit)
{
struct snd_efw *efw = dev_get_drvdata(&unit->device);
- /*
- * Confirm to stop the work for registration before the sound card is
- * going to be released. The work is not scheduled again because bus
- * reset handler is not called anymore.
- */
- cancel_delayed_work_sync(&efw->dwork);
-
- if (efw->registered) {
- // Block till all of ALSA character devices are released.
- snd_card_free(efw->card);
- }
+ // Block till all of ALSA character devices are released.
+ snd_card_free(efw->card);
+}
- mutex_destroy(&efw->mutex);
- fw_unit_put(efw->unit);
+#define SPECIFIER_1394TA 0x00a02d
+#define VERSION_EFW 0x010000
+
+#define SND_EFW_DEV_ENTRY(vendor, model) \
+{ \
+ .match_flags = IEEE1394_MATCH_VENDOR_ID | \
+ IEEE1394_MATCH_MODEL_ID | \
+ IEEE1394_MATCH_SPECIFIER_ID | \
+ IEEE1394_MATCH_VERSION, \
+ .vendor_id = vendor,\
+ .model_id = model, \
+ .specifier_id = SPECIFIER_1394TA, \
+ .version = VERSION_EFW, \
}
static const struct ieee1394_device_id efw_id_table[] = {
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index 654e28a6669f..c8d5879efe28 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -65,9 +65,6 @@ struct snd_efw {
struct mutex mutex;
spinlock_t lock;
- bool registered;
- struct delayed_work dwork;
-
/* for transaction */
u32 seqnum;
bool resp_addr_changable;
@@ -181,7 +178,7 @@ struct snd_efw_phys_meters {
} __packed;
enum snd_efw_clock_source {
SND_EFW_CLOCK_SOURCE_INTERNAL = 0,
- SND_EFW_CLOCK_SOURCE_SYTMATCH = 1,
+ // Unused.
SND_EFW_CLOCK_SOURCE_WORDCLOCK = 2,
SND_EFW_CLOCK_SOURCE_SPDIF = 3,
SND_EFW_CLOCK_SOURCE_ADAT_1 = 4,
@@ -227,12 +224,4 @@ int snd_efw_get_multiplier_mode(unsigned int sampling_rate, unsigned int *mode);
int snd_efw_create_hwdep_device(struct snd_efw *efw);
-#define SND_EFW_DEV_ENTRY(vendor, model) \
-{ \
- .match_flags = IEEE1394_MATCH_VENDOR_ID | \
- IEEE1394_MATCH_MODEL_ID, \
- .vendor_id = vendor,\
- .model_id = model \
-}
-
#endif
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index 2206af0fef42..ac66f08acd6b 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -6,7 +6,7 @@
*/
#include "./fireworks.h"
-#define CALLBACK_TIMEOUT 100
+#define READY_TIMEOUT_MS 1000
static int init_stream(struct snd_efw *efw, struct amdtp_stream *stream)
{
@@ -29,7 +29,7 @@ static int init_stream(struct snd_efw *efw, struct amdtp_stream *stream)
if (err < 0)
return err;
- err = amdtp_am824_init(stream, efw->unit, s_dir, CIP_BLOCKING);
+ err = amdtp_am824_init(stream, efw->unit, s_dir, CIP_BLOCKING | CIP_UNAWARE_SYT);
if (err < 0) {
amdtp_stream_destroy(stream);
cmp_connection_destroy(conn);
@@ -264,6 +264,15 @@ int snd_efw_stream_start_duplex(struct snd_efw *efw)
return err;
if (!amdtp_stream_running(&efw->rx_stream)) {
+ unsigned int tx_init_skip_cycles;
+
+ // Audiofire 2/4 skip an isochronous cycle several thousands after starting
+ // packet transmission.
+ if (efw->is_fireworks3 && !efw->is_af9)
+ tx_init_skip_cycles = 6000;
+ else
+ tx_init_skip_cycles = 0;
+
err = start_stream(efw, &efw->rx_stream, rate);
if (err < 0)
goto error;
@@ -272,15 +281,14 @@ int snd_efw_stream_start_duplex(struct snd_efw *efw)
if (err < 0)
goto error;
- err = amdtp_domain_start(&efw->domain, 0);
+ // NOTE: The device ignores presentation time expressed by the value of syt field
+ // of CIP header in received packets. The sequence of the number of data blocks per
+ // packet is important for media clock recovery.
+ err = amdtp_domain_start(&efw->domain, tx_init_skip_cycles, true, false);
if (err < 0)
goto error;
- // Wait first callback.
- if (!amdtp_stream_wait_callback(&efw->rx_stream,
- CALLBACK_TIMEOUT) ||
- !amdtp_stream_wait_callback(&efw->tx_stream,
- CALLBACK_TIMEOUT)) {
+ if (!amdtp_domain_wait_ready(&efw->domain, READY_TIMEOUT_MS)) {
err = -ETIMEDOUT;
goto error;
}
diff --git a/sound/firewire/lib.c b/sound/firewire/lib.c
index 85c4f4477c7f..e0a2337e8f27 100644
--- a/sound/firewire/lib.c
+++ b/sound/firewire/lib.c
@@ -67,38 +67,6 @@ int snd_fw_transaction(struct fw_unit *unit, int tcode,
}
EXPORT_SYMBOL(snd_fw_transaction);
-#define PROBE_DELAY_MS (2 * MSEC_PER_SEC)
-
-/**
- * snd_fw_schedule_registration - schedule work for sound card registration
- * @unit: an instance for unit on IEEE 1394 bus
- * @dwork: delayed work with callback function
- *
- * This function is not designed for general purposes. When new unit is
- * connected to IEEE 1394 bus, the bus is under bus-reset state because of
- * topological change. In this state, units tend to fail both of asynchronous
- * and isochronous communication. To avoid this problem, this function is used
- * to postpone sound card registration after the state. The callers must
- * set up instance of delayed work in advance.
- */
-void snd_fw_schedule_registration(struct fw_unit *unit,
- struct delayed_work *dwork)
-{
- u64 now, delay;
-
- now = get_jiffies_64();
- delay = fw_parent_device(unit)->card->reset_jiffies
- + msecs_to_jiffies(PROBE_DELAY_MS);
-
- if (time_after64(delay, now))
- delay -= now;
- else
- delay = 0;
-
- mod_delayed_work(system_wq, dwork, delay);
-}
-EXPORT_SYMBOL(snd_fw_schedule_registration);
-
MODULE_DESCRIPTION("FireWire audio helper functions");
MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
MODULE_LICENSE("GPL v2");
diff --git a/sound/firewire/lib.h b/sound/firewire/lib.h
index dc815dc3933e..664dfdb9e58d 100644
--- a/sound/firewire/lib.h
+++ b/sound/firewire/lib.h
@@ -23,7 +23,4 @@ static inline bool rcode_is_permanent_error(int rcode)
return rcode == RCODE_TYPE_ERROR || rcode == RCODE_ADDRESS_ERROR;
}
-void snd_fw_schedule_registration(struct fw_unit *unit,
- struct delayed_work *dwork);
-
#endif
diff --git a/sound/firewire/motu/Makefile b/sound/firewire/motu/Makefile
index 7c502d35103c..acdf66564fb0 100644
--- a/sound/firewire/motu/Makefile
+++ b/sound/firewire/motu/Makefile
@@ -3,5 +3,6 @@ CFLAGS_amdtp-motu.o := -I$(src)
snd-firewire-motu-objs := motu.o amdtp-motu.o motu-transaction.o motu-stream.o \
motu-proc.o motu-pcm.o motu-midi.o motu-hwdep.o \
- motu-protocol-v2.o motu-protocol-v3.o
+ motu-protocol-v2.o motu-protocol-v3.o \
+ motu-protocol-v1.o
obj-$(CONFIG_SND_FIREWIRE_MOTU) += snd-firewire-motu.o
diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
index edb31ac26868..5388b85fb60e 100644
--- a/sound/firewire/motu/amdtp-motu.c
+++ b/sound/firewire/motu/amdtp-motu.c
@@ -16,6 +16,14 @@
#define CIP_FMT_MOTU_TX_V3 0x22
#define MOTU_FDF_AM824 0x22
+#define TICKS_PER_CYCLE 3072
+#define CYCLES_PER_SECOND 8000
+#define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
+
+#define CIP_SPH_CYCLE_SHIFT 12
+#define CIP_SPH_CYCLE_MASK 0x01fff000
+#define CIP_SPH_OFFSET_MASK 0x00000fff
+
/*
* Nominally 3125 bytes/second, but the MIDI port's clock might be
* 1% too slow, and the bus clock 100 ppm too fast.
@@ -23,14 +31,6 @@
#define MIDI_BYTES_PER_SECOND 3093
struct amdtp_motu {
- /* For timestamp processing. */
- unsigned int quotient_ticks_per_event;
- unsigned int remainder_ticks_per_event;
- unsigned int next_ticks;
- unsigned int next_accumulated;
- unsigned int next_cycles;
- unsigned int next_seconds;
-
unsigned int pcm_chunks;
unsigned int pcm_byte_offset;
@@ -41,26 +41,16 @@ struct amdtp_motu {
int midi_db_count;
unsigned int midi_db_interval;
+
+ struct amdtp_motu_cache *cache;
};
int amdtp_motu_set_parameters(struct amdtp_stream *s, unsigned int rate,
unsigned int midi_ports,
struct snd_motu_packet_format *formats)
{
- static const struct {
- unsigned int quotient_ticks_per_event;
- unsigned int remainder_ticks_per_event;
- } params[] = {
- [CIP_SFC_44100] = { 557, 123 },
- [CIP_SFC_48000] = { 512, 0 },
- [CIP_SFC_88200] = { 278, 282 },
- [CIP_SFC_96000] = { 256, 0 },
- [CIP_SFC_176400] = { 139, 141 },
- [CIP_SFC_192000] = { 128, 0 },
- };
struct amdtp_motu *p = s->protocol;
unsigned int pcm_chunks, data_chunks, data_block_quadlets;
- unsigned int delay;
unsigned int mode;
int i, err;
@@ -97,19 +87,6 @@ int amdtp_motu_set_parameters(struct amdtp_stream *s, unsigned int rate,
p->midi_db_count = 0;
p->midi_db_interval = rate / MIDI_BYTES_PER_SECOND;
- /* IEEE 1394 bus requires. */
- delay = 0x2e00;
-
- /* For no-data or empty packets to adjust PCM sampling frequency. */
- delay += 8000 * 3072 * s->syt_interval / rate;
-
- p->next_seconds = 0;
- p->next_cycles = delay / 3072;
- p->quotient_ticks_per_event = params[s->sfc].quotient_ticks_per_event;
- p->remainder_ticks_per_event = params[s->sfc].remainder_ticks_per_event;
- p->next_ticks = delay % 3072;
- p->next_accumulated = 0;
-
return 0;
}
@@ -322,6 +299,34 @@ static void probe_tracepoints_events(struct amdtp_stream *s,
}
}
+static void cache_event_offsets(struct amdtp_motu_cache *cache, const __be32 *buf,
+ unsigned int data_blocks, unsigned int data_block_quadlets)
+{
+ unsigned int *event_offsets = cache->event_offsets;
+ const unsigned int cache_size = cache->size;
+ unsigned int cache_tail = cache->tail;
+ unsigned int base_tick = cache->tx_cycle_count * TICKS_PER_CYCLE;
+ int i;
+
+ for (i = 0; i < data_blocks; ++i) {
+ u32 sph = be32_to_cpu(*buf);
+ unsigned int tick;
+
+ tick = ((sph & CIP_SPH_CYCLE_MASK) >> CIP_SPH_CYCLE_SHIFT) * TICKS_PER_CYCLE +
+ (sph & CIP_SPH_OFFSET_MASK);
+
+ if (tick < base_tick)
+ tick += TICKS_PER_SECOND;
+ event_offsets[cache_tail] = tick - base_tick;
+
+ cache_tail = (cache_tail + 1) % cache_size;
+ buf += data_block_quadlets;
+ }
+
+ cache->tail = cache_tail;
+ cache->tx_cycle_count = (cache->tx_cycle_count + 1) % CYCLES_PER_SECOND;
+}
+
static unsigned int process_ir_ctx_payloads(struct amdtp_stream *s,
const struct pkt_desc *descs,
unsigned int packets,
@@ -331,12 +336,17 @@ static unsigned int process_ir_ctx_payloads(struct amdtp_stream *s,
unsigned int pcm_frames = 0;
int i;
+ if (p->cache->tx_cycle_count == UINT_MAX)
+ p->cache->tx_cycle_count = (s->domain->processing_cycle.tx_start % CYCLES_PER_SECOND);
+
// For data block processing.
for (i = 0; i < packets; ++i) {
const struct pkt_desc *desc = descs + i;
__be32 *buf = desc->ctx_payload;
unsigned int data_blocks = desc->data_blocks;
+ cache_event_offsets(p->cache, buf, data_blocks, s->data_block_quadlets);
+
if (pcm) {
read_pcm_s32(s, pcm, buf, data_blocks, pcm_frames);
pcm_frames += data_blocks;
@@ -354,46 +364,26 @@ static unsigned int process_ir_ctx_payloads(struct amdtp_stream *s,
return pcm_frames;
}
-static inline void compute_next_elapse_from_start(struct amdtp_motu *p)
-{
- p->next_accumulated += p->remainder_ticks_per_event;
- if (p->next_accumulated >= 441) {
- p->next_accumulated -= 441;
- p->next_ticks++;
- }
-
- p->next_ticks += p->quotient_ticks_per_event;
- if (p->next_ticks >= 3072) {
- p->next_ticks -= 3072;
- p->next_cycles++;
- }
-
- if (p->next_cycles >= 8000) {
- p->next_cycles -= 8000;
- p->next_seconds++;
- }
-
- if (p->next_seconds >= 128)
- p->next_seconds -= 128;
-}
-
-static void write_sph(struct amdtp_stream *s, __be32 *buffer,
- unsigned int data_blocks)
+static void write_sph(struct amdtp_motu_cache *cache, __be32 *buffer, unsigned int data_blocks,
+ unsigned int data_block_quadlets)
{
- struct amdtp_motu *p = s->protocol;
- unsigned int next_cycles;
- unsigned int i;
- u32 sph;
+ unsigned int *event_offsets = cache->event_offsets;
+ const unsigned int cache_size = cache->size;
+ unsigned int cache_head = cache->head;
+ unsigned int base_tick = cache->rx_cycle_count * TICKS_PER_CYCLE;
+ int i;
for (i = 0; i < data_blocks; i++) {
- next_cycles = (s->start_cycle + p->next_cycles) % 8000;
- sph = ((next_cycles << 12) | p->next_ticks) & 0x01ffffff;
+ unsigned int tick = (base_tick + event_offsets[cache_head]) % TICKS_PER_SECOND;
+ u32 sph = ((tick / TICKS_PER_CYCLE) << CIP_SPH_CYCLE_SHIFT) | (tick % TICKS_PER_CYCLE);
*buffer = cpu_to_be32(sph);
- compute_next_elapse_from_start(p);
-
- buffer += s->data_block_quadlets;
+ cache_head = (cache_head + 1) % cache_size;
+ buffer += data_block_quadlets;
}
+
+ cache->head = cache_head;
+ cache->rx_cycle_count = (cache->rx_cycle_count + 1) % CYCLES_PER_SECOND;
}
static unsigned int process_it_ctx_payloads(struct amdtp_stream *s,
@@ -405,6 +395,9 @@ static unsigned int process_it_ctx_payloads(struct amdtp_stream *s,
unsigned int pcm_frames = 0;
int i;
+ if (p->cache->rx_cycle_count == UINT_MAX)
+ p->cache->rx_cycle_count = (s->domain->processing_cycle.rx_start % CYCLES_PER_SECOND);
+
// For data block processing.
for (i = 0; i < packets; ++i) {
const struct pkt_desc *desc = descs + i;
@@ -423,7 +416,7 @@ static unsigned int process_it_ctx_payloads(struct amdtp_stream *s,
// TODO: how to interact control messages between userspace?
- write_sph(s, buf, data_blocks);
+ write_sph(p->cache, buf, data_blocks, s->data_block_quadlets);
}
// For tracepoints.
@@ -436,11 +429,12 @@ static unsigned int process_it_ctx_payloads(struct amdtp_stream *s,
int amdtp_motu_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir,
- const struct snd_motu_spec *spec)
+ const struct snd_motu_spec *spec, struct amdtp_motu_cache *cache)
{
amdtp_stream_process_ctx_payloads_t process_ctx_payloads;
int fmt = CIP_FMT_MOTU;
- int flags = CIP_BLOCKING;
+ unsigned int flags = CIP_BLOCKING | CIP_UNAWARE_SYT;
+ struct amdtp_motu *p;
int err;
if (dir == AMDTP_IN_STREAM) {
@@ -478,9 +472,10 @@ int amdtp_motu_init(struct amdtp_stream *s, struct fw_unit *unit,
if (dir == AMDTP_OUT_STREAM) {
// Use fixed value for FDF field.
s->ctx_data.rx.fdf = MOTU_FDF_AM824;
- // Not used.
- s->ctx_data.rx.syt_override = 0xffff;
}
+ p = s->protocol;
+ p->cache = cache;
+
return 0;
}
diff --git a/sound/firewire/motu/motu-protocol-v1.c b/sound/firewire/motu/motu-protocol-v1.c
new file mode 100644
index 000000000000..f1d6a326dc07
--- /dev/null
+++ b/sound/firewire/motu/motu-protocol-v1.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// motu-protocol-v1.c - a part of driver for MOTU FireWire series
+//
+// Copyright (c) 2021 Takashi Sakamoto <o-takashi@sakamocchi.jp>
+//
+// Licensed under the terms of the GNU General Public License, version 2.
+
+#include "motu.h"
+
+#include <linux/delay.h>
+
+// Status register for MOTU 828 (0x'ffff'f000'0b00).
+//
+// 0xffff0000: ISOC_COMM_CONTROL_MASK in motu-stream.c.
+// 0x00008000: mode of optical input interface.
+// 0x00008000: for S/PDIF signal.
+// 0x00000000: disabled or for ADAT signal.
+// 0x00004000: mode of optical output interface.
+// 0x00004000: for S/PDIF signal.
+// 0x00000000: disabled or for ADAT signal.
+// 0x00003f00: monitor input mode.
+// 0x00000800: analog-1/2
+// 0x00001a00: analog-3/4
+// 0x00002c00: analog-5/6
+// 0x00003e00: analog-7/8
+// 0x00000000: analog-1
+// 0x00000900: analog-2
+// 0x00001200: analog-3
+// 0x00001b00: analog-4
+// 0x00002400: analog-5
+// 0x00002d00: analog-6
+// 0x00003600: analog-7
+// 0x00003f00: analog-8
+// 0x00000080: enable stream input.
+// 0x00000040: disable monitor input.
+// 0x00000008: enable main out.
+// 0x00000004: rate of sampling clock.
+// 0x00000004: 48.0 kHz
+// 0x00000000: 44.1 kHz
+// 0x00000023: source of sampling clock.
+// 0x00000003: source packet header (SPH)
+// 0x00000002: S/PDIF on optical/coaxial interface.
+// 0x00000021: ADAT on optical interface
+// 0x00000001: ADAT on Dsub 9pin
+// 0x00000000: internal
+
+#define CLK_828_STATUS_OFFSET 0x0b00
+#define CLK_828_STATUS_MASK 0x0000ffff
+#define CLK_828_STATUS_FLAG_OPT_IN_IFACE_IS_SPDIF 0x00008000
+#define CLK_828_STATUS_FLAG_OPT_OUT_IFACE_IS_SPDIF 0x00004000
+#define CLK_828_STATUS_FLAG_FETCH_PCM_FRAMES 0x00000080
+#define CLK_828_STATUS_FLAG_ENABLE_OUTPUT 0x00000008
+#define CLK_828_STATUS_FLAG_RATE_48000 0x00000004
+#define CLK_828_STATUS_MASK_SRC 0x00000023
+#define CLK_828_STATUS_FLAG_SRC_ADAT_ON_OPT 0x00000021
+#define CLK_828_STATUS_FLAG_SRC_SPH 0x00000003
+#define CLK_828_STATUS_FLAG_SRC_SPDIF 0x00000002
+#define CLK_828_STATUS_FLAG_SRC_ADAT_ON_DSUB 0x00000001
+#define CLK_828_STATUS_FLAG_SRC_INTERNAL 0x00000000
+
+// Status register for MOTU 896 (0x'ffff'f000'0b14).
+//
+// 0xf0000000: enable physical and stream input to DAC.
+// 0x80000000: disable
+// 0x40000000: disable
+// 0x20000000: enable (prior to the other bits)
+// 0x10000000: disable
+// 0x00000000: disable
+// 0x08000000: speed of word clock signal output on BNC interface.
+// 0x00000000: force to low rate (44.1/48.0 kHz).
+// 0x08000000: follow to system clock.
+// 0x04000000: something relevant to clock.
+// 0x03000000: enable output.
+// 0x02000000: enabled irreversibly once standing unless the device voluntarily disables it.
+// 0x01000000: enabled irreversibly once standing unless the device voluntarily disables it.
+// 0x00ffff00: monitor input mode.
+// 0x00000000: disabled
+// 0x00004800: analog-1/2
+// 0x00005a00: analog-3/4
+// 0x00006c00: analog-5/6
+// 0x00007e00: analog-7/8
+// 0x00104800: AES/EBU-1/2
+// 0x00004000: analog-1
+// 0x00004900: analog-2
+// 0x00005200: analog-3
+// 0x00005b00: analog-4
+// 0x00006400: analog-5
+// 0x00006d00: analog-6
+// 0x00007600: analog-7
+// 0x00007f00: analog-8
+// 0x00104000: AES/EBU-1
+// 0x00104900: AES/EBU-2
+// 0x00000060: sample rate conversion for AES/EBU input/output.
+// 0x00000000: None
+// 0x00000020: input signal is converted to system rate
+// 0x00000040: output is slave to input, ignoring system rate
+// 0x00000060: output is double rate than system rate
+// 0x00000018: nominal rate of sampling clock.
+// 0x00000000: 44.1 kHz
+// 0x00000008: 48.0 kHz
+// 0x00000010: 88.2 kHz
+// 0x00000018: 96.0 kHz
+// 0x00000007: source of sampling clock.
+// 0x00000000: internal
+// 0x00000001: ADAT on optical interface
+// 0x00000002: AES/EBU on XLR
+// 0x00000003: source packet header (SPH)
+// 0x00000004: word clock on BNC
+// 0x00000005: ADAT on Dsub 9pin
+
+#define CLK_896_STATUS_OFFSET 0x0b14
+#define CLK_896_STATUS_FLAG_FETCH_ENABLE 0x20000000
+#define CLK_896_STATUS_FLAG_OUTPUT_ON 0x03000000
+#define CLK_896_STATUS_MASK_SRC 0x00000007
+#define CLK_896_STATUS_FLAG_SRC_INTERNAL 0x00000000
+#define CLK_896_STATUS_FLAG_SRC_ADAT_ON_OPT 0x00000001
+#define CLK_896_STATUS_FLAG_SRC_AESEBU 0x00000002
+#define CLK_896_STATUS_FLAG_SRC_SPH 0x00000003
+#define CLK_896_STATUS_FLAG_SRC_WORD 0x00000004
+#define CLK_896_STATUS_FLAG_SRC_ADAT_ON_DSUB 0x00000005
+#define CLK_896_STATUS_MASK_RATE 0x00000018
+#define CLK_896_STATUS_FLAG_RATE_44100 0x00000000
+#define CLK_896_STATUS_FLAG_RATE_48000 0x00000008
+#define CLK_896_STATUS_FLAG_RATE_88200 0x00000010
+#define CLK_896_STATUS_FLAG_RATE_96000 0x00000018
+
+static void parse_clock_rate_828(u32 data, unsigned int *rate)
+{
+ if (data & CLK_828_STATUS_FLAG_RATE_48000)
+ *rate = 48000;
+ else
+ *rate = 44100;
+}
+
+static int get_clock_rate_828(struct snd_motu *motu, unsigned int *rate)
+{
+ __be32 reg;
+ int err;
+
+ err = snd_motu_transaction_read(motu, CLK_828_STATUS_OFFSET, &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+ parse_clock_rate_828(be32_to_cpu(reg), rate);
+
+ return 0;
+}
+
+static int parse_clock_rate_896(u32 data, unsigned int *rate)
+{
+ switch (data & CLK_896_STATUS_MASK_RATE) {
+ case CLK_896_STATUS_FLAG_RATE_44100:
+ *rate = 44100;
+ break;
+ case CLK_896_STATUS_FLAG_RATE_48000:
+ *rate = 48000;
+ break;
+ case CLK_896_STATUS_FLAG_RATE_88200:
+ *rate = 88200;
+ break;
+ case CLK_896_STATUS_FLAG_RATE_96000:
+ *rate = 96000;
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int get_clock_rate_896(struct snd_motu *motu, unsigned int *rate)
+{
+ __be32 reg;
+ int err;
+
+ err = snd_motu_transaction_read(motu, CLK_896_STATUS_OFFSET, &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+ return parse_clock_rate_896(be32_to_cpu(reg), rate);
+}
+
+int snd_motu_protocol_v1_get_clock_rate(struct snd_motu *motu, unsigned int *rate)
+{
+ if (motu->spec == &snd_motu_spec_828)
+ return get_clock_rate_828(motu, rate);
+ else if (motu->spec == &snd_motu_spec_896)
+ return get_clock_rate_896(motu, rate);
+ else
+ return -ENXIO;
+}
+
+static int set_clock_rate_828(struct snd_motu *motu, unsigned int rate)
+{
+ __be32 reg;
+ u32 data;
+ int err;
+
+ err = snd_motu_transaction_read(motu, CLK_828_STATUS_OFFSET, &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg) & CLK_828_STATUS_MASK;
+
+ data &= ~CLK_828_STATUS_FLAG_RATE_48000;
+ if (rate == 48000)
+ data |= CLK_828_STATUS_FLAG_RATE_48000;
+
+ reg = cpu_to_be32(data);
+ return snd_motu_transaction_write(motu, CLK_828_STATUS_OFFSET, &reg, sizeof(reg));
+}
+
+static int set_clock_rate_896(struct snd_motu *motu, unsigned int rate)
+{
+ unsigned int flag;
+ __be32 reg;
+ u32 data;
+ int err;
+
+ err = snd_motu_transaction_read(motu, CLK_896_STATUS_OFFSET, &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg);
+
+ switch (rate) {
+ case 44100:
+ flag = CLK_896_STATUS_FLAG_RATE_44100;
+ break;
+ case 48000:
+ flag = CLK_896_STATUS_FLAG_RATE_48000;
+ break;
+ case 88200:
+ flag = CLK_896_STATUS_FLAG_RATE_88200;
+ break;
+ case 96000:
+ flag = CLK_896_STATUS_FLAG_RATE_96000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data &= ~CLK_896_STATUS_MASK_RATE;
+ data |= flag;
+
+ reg = cpu_to_be32(data);
+ return snd_motu_transaction_write(motu, CLK_896_STATUS_OFFSET, &reg, sizeof(reg));
+}
+
+int snd_motu_protocol_v1_set_clock_rate(struct snd_motu *motu, unsigned int rate)
+{
+ if (motu->spec == &snd_motu_spec_828)
+ return set_clock_rate_828(motu, rate);
+ else if (motu->spec == &snd_motu_spec_896)
+ return set_clock_rate_896(motu, rate);
+ else
+ return -ENXIO;
+}
+
+static int get_clock_source_828(struct snd_motu *motu, enum snd_motu_clock_source *src)
+{
+ __be32 reg;
+ u32 data;
+ int err;
+
+ err = snd_motu_transaction_read(motu, CLK_828_STATUS_OFFSET, &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg) & CLK_828_STATUS_MASK;
+
+ switch (data & CLK_828_STATUS_MASK_SRC) {
+ case CLK_828_STATUS_FLAG_SRC_ADAT_ON_OPT:
+ *src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_OPT;
+ break;
+ case CLK_828_STATUS_FLAG_SRC_SPH:
+ *src = SND_MOTU_CLOCK_SOURCE_SPH;
+ break;
+ case CLK_828_STATUS_FLAG_SRC_SPDIF:
+ {
+ if (data & CLK_828_STATUS_FLAG_OPT_IN_IFACE_IS_SPDIF)
+ *src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
+ else
+ *src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT;
+ break;
+ }
+ case CLK_828_STATUS_FLAG_SRC_ADAT_ON_DSUB:
+ *src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_DSUB;
+ break;
+ case CLK_828_STATUS_FLAG_SRC_INTERNAL:
+ *src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int get_clock_source_896(struct snd_motu *motu, enum snd_motu_clock_source *src)
+{
+ __be32 reg;
+ u32 data;
+ int err;
+
+ err = snd_motu_transaction_read(motu, CLK_896_STATUS_OFFSET, &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg);
+
+ switch (data & CLK_896_STATUS_MASK_SRC) {
+ case CLK_896_STATUS_FLAG_SRC_INTERNAL:
+ *src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
+ break;
+ case CLK_896_STATUS_FLAG_SRC_ADAT_ON_OPT:
+ *src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_OPT;
+ break;
+ case CLK_896_STATUS_FLAG_SRC_AESEBU:
+ *src = SND_MOTU_CLOCK_SOURCE_AESEBU_ON_XLR;
+ break;
+ case CLK_896_STATUS_FLAG_SRC_SPH:
+ *src = SND_MOTU_CLOCK_SOURCE_SPH;
+ break;
+ case CLK_896_STATUS_FLAG_SRC_WORD:
+ *src = SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC;
+ break;
+ case CLK_896_STATUS_FLAG_SRC_ADAT_ON_DSUB:
+ *src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_DSUB;
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+int snd_motu_protocol_v1_get_clock_source(struct snd_motu *motu, enum snd_motu_clock_source *src)
+{
+ if (motu->spec == &snd_motu_spec_828)
+ return get_clock_source_828(motu, src);
+ else if (motu->spec == &snd_motu_spec_896)
+ return get_clock_source_896(motu, src);
+ else
+ return -ENXIO;
+}
+
+static int switch_fetching_mode_828(struct snd_motu *motu, bool enable)
+{
+ __be32 reg;
+ u32 data;
+ int err;
+
+ err = snd_motu_transaction_read(motu, CLK_828_STATUS_OFFSET, &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg) & CLK_828_STATUS_MASK;
+
+ data &= ~(CLK_828_STATUS_FLAG_FETCH_PCM_FRAMES | CLK_828_STATUS_FLAG_ENABLE_OUTPUT);
+ if (enable) {
+ // This transaction should be initiated after the device receives batch of packets
+ // since the device voluntarily mutes outputs. As a workaround, yield processor over
+ // 100 msec.
+ msleep(100);
+ data |= CLK_828_STATUS_FLAG_FETCH_PCM_FRAMES | CLK_828_STATUS_FLAG_ENABLE_OUTPUT;
+ }
+
+ reg = cpu_to_be32(data);
+ return snd_motu_transaction_write(motu, CLK_828_STATUS_OFFSET, &reg, sizeof(reg));
+}
+
+static int switch_fetching_mode_896(struct snd_motu *motu, bool enable)
+{
+ __be32 reg;
+ u32 data;
+ int err;
+
+ err = snd_motu_transaction_read(motu, CLK_896_STATUS_OFFSET, &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg);
+
+ data &= ~CLK_896_STATUS_FLAG_FETCH_ENABLE;
+ if (enable)
+ data |= CLK_896_STATUS_FLAG_FETCH_ENABLE | CLK_896_STATUS_FLAG_OUTPUT_ON;
+
+ reg = cpu_to_be32(data);
+ return snd_motu_transaction_write(motu, CLK_896_STATUS_OFFSET, &reg, sizeof(reg));
+}
+
+int snd_motu_protocol_v1_switch_fetching_mode(struct snd_motu *motu, bool enable)
+{
+ if (motu->spec == &snd_motu_spec_828)
+ return switch_fetching_mode_828(motu, enable);
+ else if (motu->spec == &snd_motu_spec_896)
+ return switch_fetching_mode_896(motu, enable);
+ else
+ return -ENXIO;
+}
+
+static int detect_packet_formats_828(struct snd_motu *motu)
+{
+ __be32 reg;
+ u32 data;
+ int err;
+
+ motu->tx_packet_formats.pcm_byte_offset = 4;
+ motu->tx_packet_formats.msg_chunks = 2;
+
+ motu->rx_packet_formats.pcm_byte_offset = 4;
+ motu->rx_packet_formats.msg_chunks = 0;
+
+ err = snd_motu_transaction_read(motu, CLK_828_STATUS_OFFSET, &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg) & CLK_828_STATUS_MASK;
+
+ // The number of chunks is just reduced when SPDIF is activated.
+ if (!(data & CLK_828_STATUS_FLAG_OPT_IN_IFACE_IS_SPDIF))
+ motu->tx_packet_formats.pcm_chunks[0] += 8;
+
+ if (!(data & CLK_828_STATUS_FLAG_OPT_OUT_IFACE_IS_SPDIF))
+ motu->rx_packet_formats.pcm_chunks[0] += 8;
+
+ return 0;
+}
+
+static int detect_packet_formats_896(struct snd_motu *motu)
+{
+ // 24bit PCM frames follow to source packet header without message chunk.
+ motu->tx_packet_formats.pcm_byte_offset = 4;
+ motu->rx_packet_formats.pcm_byte_offset = 4;
+
+ // No message chunk in data block.
+ motu->tx_packet_formats.msg_chunks = 0;
+ motu->rx_packet_formats.msg_chunks = 0;
+
+ // Always enable optical interface for ADAT signal since the device have no registers
+ // to refer to current configuration.
+ motu->tx_packet_formats.pcm_chunks[0] += 8;
+ motu->tx_packet_formats.pcm_chunks[1] += 8;
+
+ motu->rx_packet_formats.pcm_chunks[0] += 8;
+ motu->rx_packet_formats.pcm_chunks[1] += 8;
+
+ return 0;
+}
+
+int snd_motu_protocol_v1_cache_packet_formats(struct snd_motu *motu)
+{
+ memcpy(motu->tx_packet_formats.pcm_chunks, motu->spec->tx_fixed_pcm_chunks,
+ sizeof(motu->tx_packet_formats.pcm_chunks));
+ memcpy(motu->rx_packet_formats.pcm_chunks, motu->spec->rx_fixed_pcm_chunks,
+ sizeof(motu->rx_packet_formats.pcm_chunks));
+
+ if (motu->spec == &snd_motu_spec_828)
+ return detect_packet_formats_828(motu);
+ else if (motu->spec == &snd_motu_spec_896)
+ return detect_packet_formats_896(motu);
+ else
+ return 0;
+}
+
+const struct snd_motu_spec snd_motu_spec_828 = {
+ .name = "828",
+ .protocol_version = SND_MOTU_PROTOCOL_V1,
+ .tx_fixed_pcm_chunks = {10, 0, 0},
+ .rx_fixed_pcm_chunks = {10, 0, 0},
+};
+
+const struct snd_motu_spec snd_motu_spec_896 = {
+ .name = "896",
+ .tx_fixed_pcm_chunks = {10, 10, 0},
+ .rx_fixed_pcm_chunks = {10, 10, 0},
+};
diff --git a/sound/firewire/motu/motu-protocol-v2.c b/sound/firewire/motu/motu-protocol-v2.c
index e59e69ab1538..93d5df1ae550 100644
--- a/sound/firewire/motu/motu-protocol-v2.c
+++ b/sound/firewire/motu/motu-protocol-v2.c
@@ -12,6 +12,13 @@
#define V2_CLOCK_RATE_SHIFT 3
#define V2_CLOCK_SRC_MASK 0x00000007
#define V2_CLOCK_SRC_SHIFT 0
+#define V2_CLOCK_SRC_AESEBU_ON_XLR 0x07
+#define V2_CLOCK_SRC_ADAT_ON_DSUB 0x05
+#define V2_CLOCK_SRC_WORD_ON_BNC 0x04
+#define V2_CLOCK_SRC_SPH 0x03
+#define V2_CLOCK_SRC_SPDIF 0x02 // on either coaxial or optical
+#define V2_CLOCK_SRC_ADAT_ON_OPT 0x01
+#define V2_CLOCK_SRC_INTERNAL 0x00
#define V2_CLOCK_FETCH_ENABLE 0x02000000
#define V2_CLOCK_MODEL_SPECIFIC 0x04000000
@@ -78,64 +85,51 @@ int snd_motu_protocol_v2_set_clock_rate(struct snd_motu *motu,
sizeof(reg));
}
-static int detect_clock_source_optical_model(struct snd_motu *motu, u32 data,
- enum snd_motu_clock_source *src)
+static int get_clock_source(struct snd_motu *motu, u32 data,
+ enum snd_motu_clock_source *src)
{
- switch (data) {
- case 0:
+ switch (data & V2_CLOCK_SRC_MASK) {
+ case V2_CLOCK_SRC_INTERNAL:
*src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
break;
- case 1:
+ case V2_CLOCK_SRC_ADAT_ON_OPT:
+ *src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_OPT;
+ break;
+ case V2_CLOCK_SRC_SPDIF:
{
- __be32 reg;
-
- // To check the configuration of optical interface.
- int err = snd_motu_transaction_read(motu, V2_IN_OUT_CONF_OFFSET,
- &reg, sizeof(reg));
- if (err < 0)
- return err;
-
- if (be32_to_cpu(reg) & 0x00000200)
- *src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT;
- else
- *src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_OPT;
+ bool support_iec60958_on_opt = (motu->spec == &snd_motu_spec_828mk2 ||
+ motu->spec == &snd_motu_spec_traveler);
+
+ if (!support_iec60958_on_opt) {
+ *src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
+ } else {
+ __be32 reg;
+
+ // To check the configuration of optical interface.
+ int err = snd_motu_transaction_read(motu, V2_IN_OUT_CONF_OFFSET, &reg,
+ sizeof(reg));
+ if (err < 0)
+ return err;
+
+ if (((data & V2_OPT_IN_IFACE_MASK) >> V2_OPT_IN_IFACE_SHIFT) ==
+ V2_OPT_IFACE_MODE_SPDIF)
+ *src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT;
+ else
+ *src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
+ }
break;
}
- case 2:
- *src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
- break;
- case 3:
+ case V2_CLOCK_SRC_SPH:
*src = SND_MOTU_CLOCK_SOURCE_SPH;
break;
- case 4:
+ case V2_CLOCK_SRC_WORD_ON_BNC:
*src = SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC;
break;
- case 5:
+ case V2_CLOCK_SRC_ADAT_ON_DSUB:
*src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_DSUB;
break;
- default:
- *src = SND_MOTU_CLOCK_SOURCE_UNKNOWN;
- break;
- }
-
- return 0;
-}
-
-static int v2_detect_clock_source(struct snd_motu *motu, u32 data,
- enum snd_motu_clock_source *src)
-{
- switch (data) {
- case 0:
- *src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
- break;
- case 2:
- *src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
- break;
- case 3:
- *src = SND_MOTU_CLOCK_SOURCE_SPH;
- break;
- case 4:
- *src = SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC;
+ case V2_CLOCK_SRC_AESEBU_ON_XLR:
+ *src = SND_MOTU_CLOCK_SOURCE_AESEBU_ON_XLR;
break;
default:
*src = SND_MOTU_CLOCK_SOURCE_UNKNOWN;
@@ -145,17 +139,6 @@ static int v2_detect_clock_source(struct snd_motu *motu, u32 data,
return 0;
}
-static int get_clock_source(struct snd_motu *motu, u32 data,
- enum snd_motu_clock_source *src)
-{
- data &= V2_CLOCK_SRC_MASK;
- if (motu->spec == &snd_motu_spec_828mk2 ||
- motu->spec == &snd_motu_spec_traveler)
- return detect_clock_source_optical_model(motu, data, src);
- else
- return v2_detect_clock_source(motu, data, src);
-}
-
int snd_motu_protocol_v2_get_clock_source(struct snd_motu *motu,
enum snd_motu_clock_source *src)
{
@@ -235,59 +218,9 @@ int snd_motu_protocol_v2_switch_fetching_mode(struct snd_motu *motu,
}
}
-static int detect_packet_formats_828mk2(struct snd_motu *motu, u32 data)
-{
- if (((data & V2_OPT_IN_IFACE_MASK) >> V2_OPT_IN_IFACE_SHIFT) ==
- V2_OPT_IFACE_MODE_ADAT) {
- motu->tx_packet_formats.pcm_chunks[0] += 8;
- motu->tx_packet_formats.pcm_chunks[1] += 4;
- }
-
- if (((data & V2_OPT_OUT_IFACE_MASK) >> V2_OPT_OUT_IFACE_SHIFT) ==
- V2_OPT_IFACE_MODE_ADAT) {
- motu->rx_packet_formats.pcm_chunks[0] += 8;
- motu->rx_packet_formats.pcm_chunks[1] += 4;
- }
-
- return 0;
-}
-
-static int detect_packet_formats_traveler(struct snd_motu *motu, u32 data)
-{
- if (((data & V2_OPT_IN_IFACE_MASK) >> V2_OPT_IN_IFACE_SHIFT) ==
- V2_OPT_IFACE_MODE_ADAT) {
- motu->tx_packet_formats.pcm_chunks[0] += 8;
- motu->tx_packet_formats.pcm_chunks[1] += 4;
- }
-
- if (((data & V2_OPT_OUT_IFACE_MASK) >> V2_OPT_OUT_IFACE_SHIFT) ==
- V2_OPT_IFACE_MODE_ADAT) {
- motu->rx_packet_formats.pcm_chunks[0] += 8;
- motu->rx_packet_formats.pcm_chunks[1] += 4;
- }
-
- return 0;
-}
-
-static int detect_packet_formats_8pre(struct snd_motu *motu, u32 data)
-{
- if (((data & V2_OPT_IN_IFACE_MASK) >> V2_OPT_IN_IFACE_SHIFT) ==
- V2_OPT_IFACE_MODE_ADAT) {
- motu->tx_packet_formats.pcm_chunks[0] += 8;
- motu->tx_packet_formats.pcm_chunks[1] += 8;
- }
-
- if (((data & V2_OPT_OUT_IFACE_MASK) >> V2_OPT_OUT_IFACE_SHIFT) ==
- V2_OPT_IFACE_MODE_ADAT) {
- motu->rx_packet_formats.pcm_chunks[0] += 8;
- motu->rx_packet_formats.pcm_chunks[1] += 8;
- }
-
- return 0;
-}
-
int snd_motu_protocol_v2_cache_packet_formats(struct snd_motu *motu)
{
+ bool has_two_opt_ifaces = (motu->spec == &snd_motu_spec_8pre);
__be32 reg;
u32 data;
int err;
@@ -311,14 +244,25 @@ int snd_motu_protocol_v2_cache_packet_formats(struct snd_motu *motu)
motu->spec->rx_fixed_pcm_chunks,
sizeof(motu->rx_packet_formats.pcm_chunks));
- if (motu->spec == &snd_motu_spec_828mk2)
- return detect_packet_formats_828mk2(motu, data);
- else if (motu->spec == &snd_motu_spec_traveler)
- return detect_packet_formats_traveler(motu, data);
- else if (motu->spec == &snd_motu_spec_8pre)
- return detect_packet_formats_8pre(motu, data);
- else
- return 0;
+ if (((data & V2_OPT_IN_IFACE_MASK) >> V2_OPT_IN_IFACE_SHIFT) == V2_OPT_IFACE_MODE_ADAT) {
+ motu->tx_packet_formats.pcm_chunks[0] += 8;
+
+ if (!has_two_opt_ifaces)
+ motu->tx_packet_formats.pcm_chunks[1] += 4;
+ else
+ motu->tx_packet_formats.pcm_chunks[1] += 8;
+ }
+
+ if (((data & V2_OPT_OUT_IFACE_MASK) >> V2_OPT_OUT_IFACE_SHIFT) == V2_OPT_IFACE_MODE_ADAT) {
+ motu->rx_packet_formats.pcm_chunks[0] += 8;
+
+ if (!has_two_opt_ifaces)
+ motu->rx_packet_formats.pcm_chunks[1] += 4;
+ else
+ motu->rx_packet_formats.pcm_chunks[1] += 8;
+ }
+
+ return 0;
}
const struct snd_motu_spec snd_motu_spec_828mk2 = {
@@ -353,6 +297,7 @@ const struct snd_motu_spec snd_motu_spec_8pre = {
.protocol_version = SND_MOTU_PROTOCOL_V2,
.flags = SND_MOTU_SPEC_RX_MIDI_2ND_Q |
SND_MOTU_SPEC_TX_MIDI_2ND_Q,
- .tx_fixed_pcm_chunks = {10, 6, 0},
- .rx_fixed_pcm_chunks = {10, 6, 0},
+ // Two dummy chunks always in the end of data block.
+ .tx_fixed_pcm_chunks = {10, 10, 0},
+ .rx_fixed_pcm_chunks = {6, 6, 0},
};
diff --git a/sound/firewire/motu/motu-protocol-v3.c b/sound/firewire/motu/motu-protocol-v3.c
index 4e6b0e449ee4..56e4504e7ec9 100644
--- a/sound/firewire/motu/motu-protocol-v3.c
+++ b/sound/firewire/motu/motu-protocol-v3.c
@@ -13,6 +13,12 @@
#define V3_CLOCK_RATE_MASK 0x0000ff00
#define V3_CLOCK_RATE_SHIFT 8
#define V3_CLOCK_SOURCE_MASK 0x000000ff
+#define V3_CLOCK_SRC_INTERNAL 0x00
+#define V3_CLOCK_SRC_WORD_ON_BNC 0x01
+#define V3_CLOCK_SRC_SPH 0x02
+#define V3_CLOCK_SRC_SPDIF_ON_COAX 0x10
+#define V3_CLOCK_SRC_OPT_IFACE_A 0x18
+#define V3_CLOCK_SRC_OPT_IFACE_B 0x19
#define V3_OPT_IFACE_MODE_OFFSET 0x0c94
#define V3_ENABLE_OPT_IN_IFACE_A 0x00000001
@@ -97,28 +103,37 @@ int snd_motu_protocol_v3_set_clock_rate(struct snd_motu *motu,
return 0;
}
-static int detect_clock_source_828mk3(struct snd_motu *motu, u32 data,
- enum snd_motu_clock_source *src)
+int snd_motu_protocol_v3_get_clock_source(struct snd_motu *motu,
+ enum snd_motu_clock_source *src)
{
+ __be32 reg;
+ u32 data;
+ int err;
+
+ err = snd_motu_transaction_read(motu, V3_CLOCK_STATUS_OFFSET, &reg,
+ sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg) & V3_CLOCK_SOURCE_MASK;
+
switch (data) {
- case 0x00:
+ case V3_CLOCK_SRC_INTERNAL:
*src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
break;
- case 0x01:
+ case V3_CLOCK_SRC_WORD_ON_BNC:
*src = SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC;
break;
- case 0x02:
+ case V3_CLOCK_SRC_SPH:
*src = SND_MOTU_CLOCK_SOURCE_SPH;
break;
- case 0x10:
+ case V3_CLOCK_SRC_SPDIF_ON_COAX:
*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
break;
- case 0x18:
- case 0x19:
+ case V3_CLOCK_SRC_OPT_IFACE_A:
+ case V3_CLOCK_SRC_OPT_IFACE_B:
{
__be32 reg;
u32 options;
- int err;
err = snd_motu_transaction_read(motu,
V3_OPT_IFACE_MODE_OFFSET, &reg, sizeof(reg));
@@ -126,7 +141,7 @@ static int detect_clock_source_828mk3(struct snd_motu *motu, u32 data,
return err;
options = be32_to_cpu(reg);
- if (data == 0x18) {
+ if (data == V3_CLOCK_SRC_OPT_IFACE_A) {
if (options & V3_NO_ADAT_OPT_IN_IFACE_A)
*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_A;
else
@@ -137,7 +152,6 @@ static int detect_clock_source_828mk3(struct snd_motu *motu, u32 data,
else
*src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_OPT_B;
}
-
break;
}
default:
@@ -148,49 +162,6 @@ static int detect_clock_source_828mk3(struct snd_motu *motu, u32 data,
return 0;
}
-static int v3_detect_clock_source(struct snd_motu *motu, u32 data,
- enum snd_motu_clock_source *src)
-{
- switch (data) {
- case 0x00:
- *src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
- break;
- case 0x01:
- *src = SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC;
- break;
- case 0x02:
- *src = SND_MOTU_CLOCK_SOURCE_SPH;
- break;
- case 0x10:
- *src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
- break;
- default:
- *src = SND_MOTU_CLOCK_SOURCE_UNKNOWN;
- break;
- }
-
- return 0;
-}
-
-int snd_motu_protocol_v3_get_clock_source(struct snd_motu *motu,
- enum snd_motu_clock_source *src)
-{
- __be32 reg;
- u32 data;
- int err;
-
- err = snd_motu_transaction_read(motu, V3_CLOCK_STATUS_OFFSET, &reg,
- sizeof(reg));
- if (err < 0)
- return err;
- data = be32_to_cpu(reg) & V3_CLOCK_SOURCE_MASK;
-
- if (motu->spec == &snd_motu_spec_828mk3)
- return detect_clock_source_828mk3(motu, data, src);
- else
- return v3_detect_clock_source(motu, data, src);
-}
-
int snd_motu_protocol_v3_switch_fetching_mode(struct snd_motu *motu,
bool enable)
{
@@ -284,14 +255,14 @@ int snd_motu_protocol_v3_cache_packet_formats(struct snd_motu *motu)
motu->spec->rx_fixed_pcm_chunks,
sizeof(motu->rx_packet_formats.pcm_chunks));
- if (motu->spec == &snd_motu_spec_828mk3)
+ if (motu->spec == &snd_motu_spec_828mk3_fw || motu->spec == &snd_motu_spec_828mk3_hybrid)
return detect_packet_formats_828mk3(motu, data);
else
return 0;
}
-const struct snd_motu_spec snd_motu_spec_828mk3 = {
+const struct snd_motu_spec snd_motu_spec_828mk3_fw = {
.name = "828mk3",
.protocol_version = SND_MOTU_PROTOCOL_V3,
.flags = SND_MOTU_SPEC_RX_MIDI_3RD_Q |
@@ -300,6 +271,15 @@ const struct snd_motu_spec snd_motu_spec_828mk3 = {
.rx_fixed_pcm_chunks = {14, 14, 10},
};
+const struct snd_motu_spec snd_motu_spec_828mk3_hybrid = {
+ .name = "828mk3",
+ .protocol_version = SND_MOTU_PROTOCOL_V3,
+ .flags = SND_MOTU_SPEC_RX_MIDI_3RD_Q |
+ SND_MOTU_SPEC_TX_MIDI_3RD_Q,
+ .tx_fixed_pcm_chunks = {18, 18, 14},
+ .rx_fixed_pcm_chunks = {14, 14, 14}, // Additional 4 dummy chunks at higher rate.
+};
+
const struct snd_motu_spec snd_motu_spec_ultralite_mk3 = {
.name = "UltraLiteMk3",
.protocol_version = SND_MOTU_PROTOCOL_V3,
diff --git a/sound/firewire/motu/motu-stream.c b/sound/firewire/motu/motu-stream.c
index 2028c5419f6f..9e6ca39ebd7f 100644
--- a/sound/firewire/motu/motu-stream.c
+++ b/sound/firewire/motu/motu-stream.c
@@ -7,7 +7,7 @@
#include "motu.h"
-#define CALLBACK_TIMEOUT 200
+#define READY_TIMEOUT_MS 200
#define ISOC_COMM_CONTROL_OFFSET 0x0b00
#define ISOC_COMM_CONTROL_MASK 0xffff0000
@@ -153,6 +153,9 @@ int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate,
fw_iso_resources_free(&motu->tx_resources);
fw_iso_resources_free(&motu->rx_resources);
+ kfree(motu->cache.event_offsets);
+ motu->cache.event_offsets = NULL;
+
err = snd_motu_protocol_set_clock_rate(motu, rate);
if (err < 0) {
dev_err(&motu->unit->device,
@@ -181,6 +184,15 @@ int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate,
fw_iso_resources_free(&motu->rx_resources);
return err;
}
+
+ motu->cache.size = motu->tx_stream.syt_interval * frames_per_buffer;
+ motu->cache.event_offsets = kcalloc(motu->cache.size, sizeof(*motu->cache.event_offsets),
+ GFP_KERNEL);
+ if (!motu->cache.event_offsets) {
+ fw_iso_resources_free(&motu->tx_resources);
+ fw_iso_resources_free(&motu->rx_resources);
+ return -ENOMEM;
+ }
}
return 0;
@@ -260,14 +272,19 @@ int snd_motu_stream_start_duplex(struct snd_motu *motu)
if (err < 0)
goto stop_streams;
- err = amdtp_domain_start(&motu->domain, 0);
+ motu->cache.tail = 0;
+ motu->cache.tx_cycle_count = UINT_MAX;
+ motu->cache.head = 0;
+ motu->cache.rx_cycle_count = UINT_MAX;
+
+ // NOTE: The device requires both of replay; the sequence of the number of data
+ // blocks per packet, and the sequence of source packet header per data block as
+ // presentation time.
+ err = amdtp_domain_start(&motu->domain, 0, true, false);
if (err < 0)
goto stop_streams;
- if (!amdtp_stream_wait_callback(&motu->tx_stream,
- CALLBACK_TIMEOUT) ||
- !amdtp_stream_wait_callback(&motu->rx_stream,
- CALLBACK_TIMEOUT)) {
+ if (!amdtp_domain_wait_ready(&motu->domain, READY_TIMEOUT_MS)) {
err = -ETIMEDOUT;
goto stop_streams;
}
@@ -296,6 +313,9 @@ void snd_motu_stream_stop_duplex(struct snd_motu *motu)
fw_iso_resources_free(&motu->tx_resources);
fw_iso_resources_free(&motu->rx_resources);
+
+ kfree(motu->cache.event_offsets);
+ motu->cache.event_offsets = NULL;
}
}
@@ -317,7 +337,7 @@ static int init_stream(struct snd_motu *motu, struct amdtp_stream *s)
if (err < 0)
return err;
- err = amdtp_motu_init(s, motu->unit, dir, motu->spec);
+ err = amdtp_motu_init(s, motu->unit, dir, motu->spec, &motu->cache);
if (err < 0)
fw_iso_resources_destroy(resources);
diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
index a4929c1302dc..543136578c70 100644
--- a/sound/firewire/motu/motu.c
+++ b/sound/firewire/motu/motu.c
@@ -57,22 +57,31 @@ static void motu_card_free(struct snd_card *card)
snd_motu_transaction_unregister(motu);
snd_motu_stream_destroy_duplex(motu);
+
+ mutex_destroy(&motu->mutex);
+ fw_unit_put(motu->unit);
}
-static void do_registration(struct work_struct *work)
+static int motu_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
{
- struct snd_motu *motu = container_of(work, struct snd_motu, dwork.work);
+ struct snd_card *card;
+ struct snd_motu *motu;
int err;
- if (motu->registered)
- return;
-
- err = snd_card_new(&motu->unit->device, -1, NULL, THIS_MODULE, 0,
- &motu->card);
+ err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE, sizeof(*motu), &card);
if (err < 0)
- return;
- motu->card->private_free = motu_card_free;
- motu->card->private_data = motu;
+ return err;
+ card->private_free = motu_card_free;
+
+ motu = card->private_data;
+ motu->unit = fw_unit_get(unit);
+ dev_set_drvdata(&unit->device, motu);
+ motu->card = card;
+
+ motu->spec = (const struct snd_motu_spec *)entry->driver_data;
+ mutex_init(&motu->mutex);
+ spin_lock_init(&motu->lock);
+ init_waitqueue_head(&motu->hwdep_wait);
name_card(motu);
@@ -103,71 +112,28 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- err = snd_card_register(motu->card);
+ err = snd_card_register(card);
if (err < 0)
goto error;
- motu->registered = true;
-
- return;
-error:
- snd_card_free(motu->card);
- dev_info(&motu->unit->device,
- "Sound card registration failed: %d\n", err);
-}
-
-static int motu_probe(struct fw_unit *unit,
- const struct ieee1394_device_id *entry)
-{
- struct snd_motu *motu;
-
- /* Allocate this independently of sound card instance. */
- motu = devm_kzalloc(&unit->device, sizeof(struct snd_motu), GFP_KERNEL);
- if (!motu)
- return -ENOMEM;
- motu->unit = fw_unit_get(unit);
- dev_set_drvdata(&unit->device, motu);
-
- motu->spec = (const struct snd_motu_spec *)entry->driver_data;
- mutex_init(&motu->mutex);
- spin_lock_init(&motu->lock);
- init_waitqueue_head(&motu->hwdep_wait);
-
- /* Allocate and register this sound card later. */
- INIT_DEFERRABLE_WORK(&motu->dwork, do_registration);
- snd_fw_schedule_registration(unit, &motu->dwork);
-
return 0;
+error:
+ snd_card_free(card);
+ return err;
}
static void motu_remove(struct fw_unit *unit)
{
struct snd_motu *motu = dev_get_drvdata(&unit->device);
- /*
- * Confirm to stop the work for registration before the sound card is
- * going to be released. The work is not scheduled again because bus
- * reset handler is not called anymore.
- */
- cancel_delayed_work_sync(&motu->dwork);
-
- if (motu->registered) {
- // Block till all of ALSA character devices are released.
- snd_card_free(motu->card);
- }
-
- mutex_destroy(&motu->mutex);
- fw_unit_put(motu->unit);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(motu->card);
}
static void motu_bus_update(struct fw_unit *unit)
{
struct snd_motu *motu = dev_get_drvdata(&unit->device);
- /* Postpone a workqueue for deferred registration. */
- if (!motu->registered)
- snd_fw_schedule_registration(unit, &motu->dwork);
-
/* The handler address register becomes initialized. */
snd_motu_transaction_reregister(motu);
}
@@ -184,13 +150,16 @@ static void motu_bus_update(struct fw_unit *unit)
}
static const struct ieee1394_device_id motu_id_table[] = {
+ SND_MOTU_DEV_ENTRY(0x000001, &snd_motu_spec_828),
+ SND_MOTU_DEV_ENTRY(0x000002, &snd_motu_spec_896),
SND_MOTU_DEV_ENTRY(0x000003, &snd_motu_spec_828mk2),
SND_MOTU_DEV_ENTRY(0x000009, &snd_motu_spec_traveler),
SND_MOTU_DEV_ENTRY(0x00000d, &snd_motu_spec_ultralite),
SND_MOTU_DEV_ENTRY(0x00000f, &snd_motu_spec_8pre),
- SND_MOTU_DEV_ENTRY(0x000015, &snd_motu_spec_828mk3), // FireWire only.
+ SND_MOTU_DEV_ENTRY(0x000015, &snd_motu_spec_828mk3_fw), // FireWire only.
SND_MOTU_DEV_ENTRY(0x000019, &snd_motu_spec_ultralite_mk3), // FireWire only.
- SND_MOTU_DEV_ENTRY(0x000035, &snd_motu_spec_828mk3), // Hybrid.
+ SND_MOTU_DEV_ENTRY(0x000030, &snd_motu_spec_ultralite_mk3), // Hybrid.
+ SND_MOTU_DEV_ENTRY(0x000035, &snd_motu_spec_828mk3_hybrid), // Hybrid.
SND_MOTU_DEV_ENTRY(0x000033, &snd_motu_spec_audio_express),
SND_MOTU_DEV_ENTRY(0x000045, &snd_motu_spec_4pre),
{ }
diff --git a/sound/firewire/motu/motu.h b/sound/firewire/motu/motu.h
index 3d0236ee6716..73f36d1be515 100644
--- a/sound/firewire/motu/motu.h
+++ b/sound/firewire/motu/motu.h
@@ -39,15 +39,21 @@ struct snd_motu_packet_format {
unsigned char pcm_chunks[3];
};
+struct amdtp_motu_cache {
+ unsigned int *event_offsets;
+ unsigned int size;
+ unsigned int tail;
+ unsigned int tx_cycle_count;
+ unsigned int head;
+ unsigned int rx_cycle_count;
+};
+
struct snd_motu {
struct snd_card *card;
struct fw_unit *unit;
struct mutex mutex;
spinlock_t lock;
- bool registered;
- struct delayed_work dwork;
-
/* Model dependent information. */
const struct snd_motu_spec *spec;
@@ -70,6 +76,8 @@ struct snd_motu {
wait_queue_head_t hwdep_wait;
struct amdtp_domain domain;
+
+ struct amdtp_motu_cache cache;
};
enum snd_motu_spec_flags {
@@ -99,6 +107,7 @@ enum snd_motu_clock_source {
};
enum snd_motu_protocol_version {
+ SND_MOTU_PROTOCOL_V1,
SND_MOTU_PROTOCOL_V2,
SND_MOTU_PROTOCOL_V3,
};
@@ -106,25 +115,31 @@ enum snd_motu_protocol_version {
struct snd_motu_spec {
const char *const name;
enum snd_motu_protocol_version protocol_version;
- enum snd_motu_spec_flags flags;
+ // The combination of snd_motu_spec_flags enumeration-constants.
+ unsigned int flags;
unsigned char tx_fixed_pcm_chunks[3];
unsigned char rx_fixed_pcm_chunks[3];
};
+extern const struct snd_motu_spec snd_motu_spec_828;
+extern const struct snd_motu_spec snd_motu_spec_896;
+
extern const struct snd_motu_spec snd_motu_spec_828mk2;
extern const struct snd_motu_spec snd_motu_spec_traveler;
extern const struct snd_motu_spec snd_motu_spec_ultralite;
extern const struct snd_motu_spec snd_motu_spec_8pre;
-extern const struct snd_motu_spec snd_motu_spec_828mk3;
+extern const struct snd_motu_spec snd_motu_spec_828mk3_fw;
+extern const struct snd_motu_spec snd_motu_spec_828mk3_hybrid;
extern const struct snd_motu_spec snd_motu_spec_ultralite_mk3;
extern const struct snd_motu_spec snd_motu_spec_audio_express;
extern const struct snd_motu_spec snd_motu_spec_4pre;
int amdtp_motu_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir,
- const struct snd_motu_spec *spec);
+ const struct snd_motu_spec *spec,
+ struct amdtp_motu_cache *cache);
int amdtp_motu_set_parameters(struct amdtp_stream *s, unsigned int rate,
unsigned int midi_ports,
struct snd_motu_packet_format *formats);
@@ -160,6 +175,16 @@ int snd_motu_create_midi_devices(struct snd_motu *motu);
int snd_motu_create_hwdep_device(struct snd_motu *motu);
+int snd_motu_protocol_v1_get_clock_rate(struct snd_motu *motu,
+ unsigned int *rate);
+int snd_motu_protocol_v1_set_clock_rate(struct snd_motu *motu,
+ unsigned int rate);
+int snd_motu_protocol_v1_get_clock_source(struct snd_motu *motu,
+ enum snd_motu_clock_source *src);
+int snd_motu_protocol_v1_switch_fetching_mode(struct snd_motu *motu,
+ bool enable);
+int snd_motu_protocol_v1_cache_packet_formats(struct snd_motu *motu);
+
int snd_motu_protocol_v2_get_clock_rate(struct snd_motu *motu,
unsigned int *rate);
int snd_motu_protocol_v2_set_clock_rate(struct snd_motu *motu,
@@ -187,6 +212,8 @@ static inline int snd_motu_protocol_get_clock_rate(struct snd_motu *motu,
return snd_motu_protocol_v2_get_clock_rate(motu, rate);
else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V3)
return snd_motu_protocol_v3_get_clock_rate(motu, rate);
+ else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V1)
+ return snd_motu_protocol_v1_get_clock_rate(motu, rate);
else
return -ENXIO;
}
@@ -198,6 +225,8 @@ static inline int snd_motu_protocol_set_clock_rate(struct snd_motu *motu,
return snd_motu_protocol_v2_set_clock_rate(motu, rate);
else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V3)
return snd_motu_protocol_v3_set_clock_rate(motu, rate);
+ else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V1)
+ return snd_motu_protocol_v1_set_clock_rate(motu, rate);
else
return -ENXIO;
}
@@ -209,6 +238,8 @@ static inline int snd_motu_protocol_get_clock_source(struct snd_motu *motu,
return snd_motu_protocol_v2_get_clock_source(motu, source);
else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V3)
return snd_motu_protocol_v3_get_clock_source(motu, source);
+ else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V1)
+ return snd_motu_protocol_v1_get_clock_source(motu, source);
else
return -ENXIO;
}
@@ -220,6 +251,8 @@ static inline int snd_motu_protocol_switch_fetching_mode(struct snd_motu *motu,
return snd_motu_protocol_v2_switch_fetching_mode(motu, enable);
else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V3)
return snd_motu_protocol_v3_switch_fetching_mode(motu, enable);
+ else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V1)
+ return snd_motu_protocol_v1_switch_fetching_mode(motu, enable);
else
return -ENXIO;
}
@@ -230,6 +263,8 @@ static inline int snd_motu_protocol_cache_packet_formats(struct snd_motu *motu)
return snd_motu_protocol_v2_cache_packet_formats(motu);
else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V3)
return snd_motu_protocol_v3_cache_packet_formats(motu);
+ else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V1)
+ return snd_motu_protocol_v1_cache_packet_formats(motu);
else
return -ENXIO;
}
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index 80c9dc13f1b5..fff18b5d4e05 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#define AVC_GENERIC_FRAME_MAXIMUM_BYTES 512
-#define CALLBACK_TIMEOUT 200
+#define READY_TIMEOUT_MS 200
/*
* According to datasheet of Oxford Semiconductor:
@@ -153,12 +153,30 @@ static int init_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
struct cmp_connection *conn;
enum cmp_direction c_dir;
enum amdtp_stream_direction s_dir;
+ unsigned int flags = 0;
int err;
+ if (!(oxfw->quirks & SND_OXFW_QUIRK_BLOCKING_TRANSMISSION))
+ flags |= CIP_NONBLOCKING;
+ else
+ flags |= CIP_BLOCKING;
+
+ // OXFW 970/971 has no function to generate playback timing according to the sequence
+ // of value in syt field, thus the packet should include NO_INFO value in the field.
+ // However, some models just ignore data blocks in packet with NO_INFO for audio data
+ // processing.
+ if (!(oxfw->quirks & SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET))
+ flags |= CIP_UNAWARE_SYT;
+
if (stream == &oxfw->tx_stream) {
conn = &oxfw->out_conn;
c_dir = CMP_OUTPUT;
s_dir = AMDTP_IN_STREAM;
+
+ if (oxfw->quirks & SND_OXFW_QUIRK_JUMBO_PAYLOAD)
+ flags |= CIP_JUMBO_PAYLOAD;
+ if (oxfw->quirks & SND_OXFW_QUIRK_WRONG_DBS)
+ flags |= CIP_WRONG_DBS;
} else {
conn = &oxfw->in_conn;
c_dir = CMP_INPUT;
@@ -169,24 +187,12 @@ static int init_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
if (err < 0)
return err;
- err = amdtp_am824_init(stream, oxfw->unit, s_dir, CIP_NONBLOCKING);
+ err = amdtp_am824_init(stream, oxfw->unit, s_dir, flags);
if (err < 0) {
cmp_connection_destroy(conn);
return err;
}
- /*
- * OXFW starts to transmit packets with non-zero dbc.
- * OXFW postpone transferring packets till handling any asynchronous
- * packets. As a result, next isochronous packet includes more data
- * blocks than IEC 61883-6 defines.
- */
- if (stream == &oxfw->tx_stream) {
- oxfw->tx_stream.flags |= CIP_JUMBO_PAYLOAD;
- if (oxfw->wrong_dbs)
- oxfw->tx_stream.flags |= CIP_WRONG_DBS;
- }
-
return 0;
}
@@ -338,6 +344,9 @@ int snd_oxfw_stream_start_duplex(struct snd_oxfw *oxfw)
}
if (!amdtp_stream_running(&oxfw->rx_stream)) {
+ unsigned int tx_init_skip_cycles = 0;
+ bool replay_seq = false;
+
err = start_stream(oxfw, &oxfw->rx_stream);
if (err < 0) {
dev_err(&oxfw->unit->device,
@@ -353,26 +362,27 @@ int snd_oxfw_stream_start_duplex(struct snd_oxfw *oxfw)
"fail to prepare tx stream: %d\n", err);
goto error;
}
+
+ if (oxfw->quirks & SND_OXFW_QUIRK_JUMBO_PAYLOAD) {
+ // Just after changing sampling transfer frequency, many cycles are
+ // skipped for packet transmission.
+ tx_init_skip_cycles = 400;
+ } else {
+ replay_seq = true;
+ }
}
- err = amdtp_domain_start(&oxfw->domain, 0);
+ // NOTE: The device ignores presentation time expressed by the value of syt field
+ // of CIP header in received packets. The sequence of the number of data blocks per
+ // packet is important for media clock recovery.
+ err = amdtp_domain_start(&oxfw->domain, tx_init_skip_cycles, replay_seq, false);
if (err < 0)
goto error;
- // Wait first packet.
- if (!amdtp_stream_wait_callback(&oxfw->rx_stream,
- CALLBACK_TIMEOUT)) {
+ if (!amdtp_domain_wait_ready(&oxfw->domain, READY_TIMEOUT_MS)) {
err = -ETIMEDOUT;
goto error;
}
-
- if (oxfw->has_output) {
- if (!amdtp_stream_wait_callback(&oxfw->tx_stream,
- CALLBACK_TIMEOUT)) {
- err = -ETIMEDOUT;
- goto error;
- }
- }
}
return 0;
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 9eea25c46dc7..cb5b5e3a481b 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -23,6 +23,8 @@
#define OUI_APOGEE 0x0003db
#define MODEL_SATELLITE 0x00200f
+#define MODEL_SCS1M 0x001000
+#define MODEL_DUET_FW 0x01dddd
#define SPECIFIER_1394TA 0x00a02d
#define VERSION_AVC 0x010001
@@ -46,8 +48,6 @@ static bool detect_loud_models(struct fw_unit *unit)
"Onyx-i",
"Onyx 1640i",
"d.Pro",
- "Mackie Onyx Satellite",
- "Tapco LINK.firewire 4x6",
"U.420"};
char model[32];
int err;
@@ -60,7 +60,7 @@ static bool detect_loud_models(struct fw_unit *unit)
return match_string(models, ARRAY_SIZE(models), model) >= 0;
}
-static int name_card(struct snd_oxfw *oxfw)
+static int name_card(struct snd_oxfw *oxfw, const struct ieee1394_device_id *entry)
{
struct fw_device *fw_dev = fw_parent_device(oxfw->unit);
const struct compat_info *info;
@@ -88,10 +88,12 @@ static int name_card(struct snd_oxfw *oxfw)
goto end;
be32_to_cpus(&firmware);
+ if (firmware >> 20 == 0x970)
+ oxfw->quirks |= SND_OXFW_QUIRK_JUMBO_PAYLOAD;
+
/* to apply card definitions */
- if (oxfw->entry->vendor_id == VENDOR_GRIFFIN ||
- oxfw->entry->vendor_id == VENDOR_LACIE) {
- info = (const struct compat_info *)oxfw->entry->driver_data;
+ if (entry->vendor_id == VENDOR_GRIFFIN || entry->vendor_id == VENDOR_LACIE) {
+ info = (const struct compat_info *)entry->driver_data;
d = info->driver_name;
v = info->vendor_name;
m = info->model_name;
@@ -120,9 +122,12 @@ static void oxfw_card_free(struct snd_card *card)
if (oxfw->has_output || oxfw->has_input)
snd_oxfw_stream_destroy_duplex(oxfw);
+
+ mutex_destroy(&oxfw->mutex);
+ fw_unit_put(oxfw->unit);
}
-static int detect_quirks(struct snd_oxfw *oxfw)
+static int detect_quirks(struct snd_oxfw *oxfw, const struct ieee1394_device_id *entry)
{
struct fw_device *fw_dev = fw_parent_device(oxfw->unit);
struct fw_csr_iterator it;
@@ -133,28 +138,37 @@ static int detect_quirks(struct snd_oxfw *oxfw)
* Add ALSA control elements for two models to keep compatibility to
* old firewire-speaker module.
*/
- if (oxfw->entry->vendor_id == VENDOR_GRIFFIN)
+ if (entry->vendor_id == VENDOR_GRIFFIN)
return snd_oxfw_add_spkr(oxfw, false);
- if (oxfw->entry->vendor_id == VENDOR_LACIE)
+ if (entry->vendor_id == VENDOR_LACIE)
return snd_oxfw_add_spkr(oxfw, true);
/*
* Stanton models supports asynchronous transactions for unique MIDI
* messages.
*/
- if (oxfw->entry->vendor_id == OUI_STANTON) {
- /* No physical MIDI ports. */
+ if (entry->vendor_id == OUI_STANTON) {
+ oxfw->quirks |= SND_OXFW_QUIRK_SCS_TRANSACTION;
+ if (entry->model_id == MODEL_SCS1M)
+ oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION;
+
+ // No physical MIDI ports.
oxfw->midi_input_ports = 0;
oxfw->midi_output_ports = 0;
return snd_oxfw_scs1x_add(oxfw);
}
+ if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW) {
+ oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION |
+ SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET;
+ }
+
/*
* TASCAM FireOne has physical control and requires a pair of additional
* MIDI ports.
*/
- if (oxfw->entry->vendor_id == VENDOR_TASCAM) {
+ if (entry->vendor_id == VENDOR_TASCAM) {
oxfw->midi_input_ports++;
oxfw->midi_output_ports++;
return 0;
@@ -175,27 +189,35 @@ static int detect_quirks(struct snd_oxfw *oxfw)
* value in 'dbs' field of CIP header against its format information.
*/
if (vendor == VENDOR_LOUD && model == MODEL_SATELLITE)
- oxfw->wrong_dbs = true;
+ oxfw->quirks |= SND_OXFW_QUIRK_WRONG_DBS;
return 0;
}
-static void do_registration(struct work_struct *work)
+static int oxfw_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
{
- struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
+ struct snd_card *card;
+ struct snd_oxfw *oxfw;
int err;
- if (oxfw->registered)
- return;
+ if (entry->vendor_id == VENDOR_LOUD && entry->model_id == 0 && !detect_loud_models(unit))
+ return -ENODEV;
- err = snd_card_new(&oxfw->unit->device, -1, NULL, THIS_MODULE, 0,
- &oxfw->card);
+ err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE, sizeof(*oxfw), &card);
if (err < 0)
- return;
- oxfw->card->private_free = oxfw_card_free;
- oxfw->card->private_data = oxfw;
+ return err;
+ card->private_free = oxfw_card_free;
- err = name_card(oxfw);
+ oxfw = card->private_data;
+ oxfw->unit = fw_unit_get(unit);
+ dev_set_drvdata(&unit->device, oxfw);
+ oxfw->card = card;
+
+ mutex_init(&oxfw->mutex);
+ spin_lock_init(&oxfw->lock);
+ init_waitqueue_head(&oxfw->hwdep_wait);
+
+ err = name_card(oxfw, entry);
if (err < 0)
goto error;
@@ -203,7 +225,7 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- err = detect_quirks(oxfw);
+ err = detect_quirks(oxfw, entry);
if (err < 0)
goto error;
@@ -227,85 +249,38 @@ static void do_registration(struct work_struct *work)
goto error;
}
- err = snd_card_register(oxfw->card);
+ err = snd_card_register(card);
if (err < 0)
goto error;
- oxfw->registered = true;
-
- return;
-error:
- snd_card_free(oxfw->card);
- dev_info(&oxfw->unit->device,
- "Sound card registration failed: %d\n", err);
-}
-
-static int oxfw_probe(struct fw_unit *unit,
- const struct ieee1394_device_id *entry)
-{
- struct snd_oxfw *oxfw;
-
- if (entry->vendor_id == VENDOR_LOUD && !detect_loud_models(unit))
- return -ENODEV;
-
- /* Allocate this independent of sound card instance. */
- oxfw = devm_kzalloc(&unit->device, sizeof(struct snd_oxfw), GFP_KERNEL);
- if (!oxfw)
- return -ENOMEM;
- oxfw->unit = fw_unit_get(unit);
- dev_set_drvdata(&unit->device, oxfw);
-
- oxfw->entry = entry;
- mutex_init(&oxfw->mutex);
- spin_lock_init(&oxfw->lock);
- init_waitqueue_head(&oxfw->hwdep_wait);
-
- /* Allocate and register this sound card later. */
- INIT_DEFERRABLE_WORK(&oxfw->dwork, do_registration);
- snd_fw_schedule_registration(unit, &oxfw->dwork);
-
return 0;
+error:
+ snd_card_free(card);
+ return err;
}
static void oxfw_bus_reset(struct fw_unit *unit)
{
struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device);
- if (!oxfw->registered)
- snd_fw_schedule_registration(unit, &oxfw->dwork);
-
fcp_bus_reset(oxfw->unit);
- if (oxfw->registered) {
- if (oxfw->has_output || oxfw->has_input) {
- mutex_lock(&oxfw->mutex);
- snd_oxfw_stream_update_duplex(oxfw);
- mutex_unlock(&oxfw->mutex);
- }
-
- if (oxfw->entry->vendor_id == OUI_STANTON)
- snd_oxfw_scs1x_update(oxfw);
+ if (oxfw->has_output || oxfw->has_input) {
+ mutex_lock(&oxfw->mutex);
+ snd_oxfw_stream_update_duplex(oxfw);
+ mutex_unlock(&oxfw->mutex);
}
+
+ if (oxfw->quirks & SND_OXFW_QUIRK_SCS_TRANSACTION)
+ snd_oxfw_scs1x_update(oxfw);
}
static void oxfw_remove(struct fw_unit *unit)
{
struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device);
- /*
- * Confirm to stop the work for registration before the sound card is
- * going to be released. The work is not scheduled again because bus
- * reset handler is not called anymore.
- */
- cancel_delayed_work_sync(&oxfw->dwork);
-
- if (oxfw->registered) {
- // Block till all of ALSA character devices are released.
- snd_card_free(oxfw->card);
- }
-
- mutex_destroy(&oxfw->mutex);
- fw_unit_put(oxfw->unit);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(oxfw->card);
}
static const struct compat_info griffin_firewave = {
@@ -320,81 +295,67 @@ static const struct compat_info lacie_speakers = {
.model_name = "FireWire Speakers",
};
+#define OXFW_DEV_ENTRY(vendor, model, data) \
+{ \
+ .match_flags = IEEE1394_MATCH_VENDOR_ID | \
+ IEEE1394_MATCH_MODEL_ID | \
+ IEEE1394_MATCH_SPECIFIER_ID | \
+ IEEE1394_MATCH_VERSION, \
+ .vendor_id = vendor, \
+ .model_id = model, \
+ .specifier_id = SPECIFIER_1394TA, \
+ .version = VERSION_AVC, \
+ .driver_data = (kernel_ulong_t)data, \
+}
+
static const struct ieee1394_device_id oxfw_id_table[] = {
- {
- .match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_MODEL_ID |
- IEEE1394_MATCH_SPECIFIER_ID |
- IEEE1394_MATCH_VERSION,
- .vendor_id = VENDOR_GRIFFIN,
- .model_id = 0x00f970,
- .specifier_id = SPECIFIER_1394TA,
- .version = VERSION_AVC,
- .driver_data = (kernel_ulong_t)&griffin_firewave,
- },
- {
- .match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_MODEL_ID |
- IEEE1394_MATCH_SPECIFIER_ID |
- IEEE1394_MATCH_VERSION,
- .vendor_id = VENDOR_LACIE,
- .model_id = 0x00f970,
- .specifier_id = SPECIFIER_1394TA,
- .version = VERSION_AVC,
- .driver_data = (kernel_ulong_t)&lacie_speakers,
- },
- /* Behringer,F-Control Audio 202 */
- {
- .match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_MODEL_ID,
- .vendor_id = VENDOR_BEHRINGER,
- .model_id = 0x00fc22,
- },
- /*
- * Any Mackie(Loud) models (name string/model id):
- * Onyx-i series (former models): 0x081216
- * Mackie Onyx Satellite: 0x00200f
- * Tapco LINK.firewire 4x6: 0x000460
- * d.4 pro: Unknown
- * U.420: Unknown
- * U.420d: Unknown
- */
+ //
+ // OXFW970 devices:
+ // Initial firmware has a quirk to postpone isoc packet transmission during finishing async
+ // transaction. As a result, several isochronous cycles are skipped to transfer the packets
+ // and the audio data frames which should have been transferred during the cycles are put
+ // into packet at the first isoc cycle after the postpone. Furthermore, the value of SYT
+ // field in CIP header is not reliable as synchronization timing,
+ //
+ OXFW_DEV_ENTRY(VENDOR_GRIFFIN, 0x00f970, &griffin_firewave),
+ OXFW_DEV_ENTRY(VENDOR_LACIE, 0x00f970, &lacie_speakers),
+ // Behringer,F-Control Audio 202. The value of SYT field is not reliable at all.
+ OXFW_DEV_ENTRY(VENDOR_BEHRINGER, 0x00fc22, NULL),
+ // Loud Technologies, Tapco Link.FireWire 4x6. The value of SYT field is always 0xffff.
+ OXFW_DEV_ENTRY(VENDOR_LOUD, 0x000460, NULL),
+ // Loud Technologies, Mackie Onyx Satellite. Although revised version of firmware is
+ // installed to avoid the postpone, the value of SYT field is always 0xffff.
+ OXFW_DEV_ENTRY(VENDOR_LOUD, MODEL_SATELLITE, NULL),
+ // Miglia HarmonyAudio. Not yet identified.
+
+ //
+ // OXFW971 devices:
+ // The value of SYT field in CIP header is enough reliable. Both of blocking and non-blocking
+ // transmission methods are available.
+ //
+ // Any Mackie(Loud) models (name string/model id):
+ // Onyx-i series (former models): 0x081216
+ // Onyx 1640i: 0x001640
+ // d.2 pro/d.4 pro (built-in card): Unknown
+ // U.420: Unknown
+ // U.420d: Unknown
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION,
.vendor_id = VENDOR_LOUD,
+ .model_id = 0,
.specifier_id = SPECIFIER_1394TA,
.version = VERSION_AVC,
},
- /* TASCAM, FireOne */
- {
- .match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_MODEL_ID,
- .vendor_id = VENDOR_TASCAM,
- .model_id = 0x800007,
- },
- /* Stanton, Stanton Controllers & Systems 1 Mixer (SCS.1m) */
- {
- .match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_MODEL_ID,
- .vendor_id = OUI_STANTON,
- .model_id = 0x001000,
- },
- /* Stanton, Stanton Controllers & Systems 1 Deck (SCS.1d) */
- {
- .match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_MODEL_ID,
- .vendor_id = OUI_STANTON,
- .model_id = 0x002000,
- },
- // APOGEE, duet FireWire
- {
- .match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_MODEL_ID,
- .vendor_id = OUI_APOGEE,
- .model_id = 0x01dddd,
- },
+ // TASCAM, FireOne.
+ OXFW_DEV_ENTRY(VENDOR_TASCAM, 0x800007, NULL),
+ // Stanton, Stanton Controllers & Systems 1 Mixer (SCS.1m).
+ OXFW_DEV_ENTRY(OUI_STANTON, MODEL_SCS1M, NULL),
+ // Stanton, Stanton Controllers & Systems 1 Deck (SCS.1d).
+ OXFW_DEV_ENTRY(OUI_STANTON, 0x002000, NULL),
+ // APOGEE, duet FireWire.
+ OXFW_DEV_ENTRY(OUI_APOGEE, MODEL_DUET_FW, NULL),
{ }
};
MODULE_DEVICE_TABLE(ieee1394, oxfw_id_table);
diff --git a/sound/firewire/oxfw/oxfw.h b/sound/firewire/oxfw/oxfw.h
index fa2d7f9e2dc3..c13034f6c2ca 100644
--- a/sound/firewire/oxfw/oxfw.h
+++ b/sound/firewire/oxfw/oxfw.h
@@ -32,6 +32,23 @@
#include "../amdtp-am824.h"
#include "../cmp.h"
+enum snd_oxfw_quirk {
+ // Postpone transferring packets during handling asynchronous transaction. As a result,
+ // next isochronous packet includes more events than one packet can include.
+ SND_OXFW_QUIRK_JUMBO_PAYLOAD = 0x01,
+ // The dbs field of CIP header in tx packet is wrong.
+ SND_OXFW_QUIRK_WRONG_DBS = 0x02,
+ // Blocking transmission mode is used.
+ SND_OXFW_QUIRK_BLOCKING_TRANSMISSION = 0x04,
+ // Stanton SCS1.d and SCS1.m support unique transaction.
+ SND_OXFW_QUIRK_SCS_TRANSACTION = 0x08,
+ // Apogee Duet FireWire ignores data blocks in packet with NO_INFO for audio data
+ // processing, while output level meter moves. Any value in syt field of packet takes
+ // the device to process audio data even if the value is invalid in a point of
+ // IEC 61883-1/6.
+ SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET = 0x10,
+};
+
/* This is an arbitrary number for convinience. */
#define SND_OXFW_STREAM_FORMAT_ENTRIES 10
struct snd_oxfw {
@@ -40,10 +57,8 @@ struct snd_oxfw {
struct mutex mutex;
spinlock_t lock;
- bool registered;
- struct delayed_work dwork;
-
- bool wrong_dbs;
+ // The combination of snd_oxfw_quirk enumeration-constants.
+ unsigned int quirks;
bool has_output;
bool has_input;
u8 *tx_stream_formats[SND_OXFW_STREAM_FORMAT_ENTRIES];
@@ -62,7 +77,6 @@ struct snd_oxfw {
bool dev_lock_changed;
wait_queue_head_t hwdep_wait;
- const struct ieee1394_device_id *entry;
void *spec;
struct amdtp_domain domain;
diff --git a/sound/firewire/tascam/amdtp-tascam.c b/sound/firewire/tascam/amdtp-tascam.c
index f823a2ab3544..64d66a802545 100644
--- a/sound/firewire/tascam/amdtp-tascam.c
+++ b/sound/firewire/tascam/amdtp-tascam.c
@@ -228,6 +228,7 @@ int amdtp_tscm_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir, unsigned int pcm_channels)
{
amdtp_stream_process_ctx_payloads_t process_ctx_payloads;
+ unsigned int flags = CIP_NONBLOCKING | CIP_SKIP_DBC_ZERO_CHECK | CIP_UNAWARE_SYT;
struct amdtp_tscm *p;
unsigned int fmt;
int err;
@@ -240,8 +241,7 @@ int amdtp_tscm_init(struct amdtp_stream *s, struct fw_unit *unit,
process_ctx_payloads = process_it_ctx_payloads;
}
- err = amdtp_stream_init(s, unit, dir,
- CIP_NONBLOCKING | CIP_SKIP_DBC_ZERO_CHECK, fmt,
+ err = amdtp_stream_init(s, unit, dir, flags, fmt,
process_ctx_payloads, sizeof(struct amdtp_tscm));
if (err < 0)
return 0;
@@ -249,8 +249,6 @@ int amdtp_tscm_init(struct amdtp_stream *s, struct fw_unit *unit,
if (dir == AMDTP_OUT_STREAM) {
// Use fixed value for FDF field.
s->ctx_data.rx.fdf = 0x00;
- // Not used.
- s->ctx_data.rx.syt_override = 0x0000;
}
/* This protocol uses fixed number of data channels for PCM samples. */
diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
index eb07e1decf9b..53e094cc411f 100644
--- a/sound/firewire/tascam/tascam-stream.c
+++ b/sound/firewire/tascam/tascam-stream.c
@@ -11,7 +11,7 @@
#define CLOCK_STATUS_MASK 0xffff0000
#define CLOCK_CONFIG_MASK 0x0000ffff
-#define CALLBACK_TIMEOUT 500
+#define READY_TIMEOUT_MS 4000
static int get_clock(struct snd_tscm *tscm, u32 *data)
{
@@ -423,6 +423,8 @@ int snd_tscm_stream_reserve_duplex(struct snd_tscm *tscm, unsigned int rate,
fw_iso_resources_free(&tscm->rx_resources);
return err;
}
+
+ tscm->need_long_tx_init_skip = (rate != curr_rate);
}
return 0;
@@ -454,6 +456,7 @@ int snd_tscm_stream_start_duplex(struct snd_tscm *tscm, unsigned int rate)
if (!amdtp_stream_running(&tscm->rx_stream)) {
int spd = fw_parent_device(tscm->unit)->max_speed;
+ unsigned int tx_init_skip_cycles;
err = set_stream_formats(tscm, rate);
if (err < 0)
@@ -473,14 +476,23 @@ int snd_tscm_stream_start_duplex(struct snd_tscm *tscm, unsigned int rate)
if (err < 0)
goto error;
- err = amdtp_domain_start(&tscm->domain, 0);
+ if (tscm->need_long_tx_init_skip)
+ tx_init_skip_cycles = 16000;
+ else
+ tx_init_skip_cycles = 0;
+
+ // MEMO: Just after starting packet streaming, it transfers packets without any
+ // event. Enough after receiving the sequence of packets, it multiplexes events into
+ // the packet. However, just after changing sampling transfer frequency, it stops
+ // multiplexing during packet transmission. Enough after, it restarts multiplexing
+ // again. The device ignores presentation time expressed by the value of syt field
+ // of CIP header in received packets. The sequence of the number of data blocks per
+ // packet is important for media clock recovery.
+ err = amdtp_domain_start(&tscm->domain, tx_init_skip_cycles, true, true);
if (err < 0)
return err;
- if (!amdtp_stream_wait_callback(&tscm->rx_stream,
- CALLBACK_TIMEOUT) ||
- !amdtp_stream_wait_callback(&tscm->tx_stream,
- CALLBACK_TIMEOUT)) {
+ if (!amdtp_domain_wait_ready(&tscm->domain, READY_TIMEOUT_MS)) {
err = -ETIMEDOUT;
goto error;
}
@@ -502,6 +514,8 @@ void snd_tscm_stream_stop_duplex(struct snd_tscm *tscm)
fw_iso_resources_free(&tscm->tx_resources);
fw_iso_resources_free(&tscm->rx_resources);
+
+ tscm->need_long_tx_init_skip = false;
}
}
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index 75f2edd8e78f..eb58d3fcf087 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -90,19 +90,31 @@ static void tscm_card_free(struct snd_card *card)
snd_tscm_transaction_unregister(tscm);
snd_tscm_stream_destroy_duplex(tscm);
+
+ mutex_destroy(&tscm->mutex);
+ fw_unit_put(tscm->unit);
}
-static void do_registration(struct work_struct *work)
+static int snd_tscm_probe(struct fw_unit *unit,
+ const struct ieee1394_device_id *entry)
{
- struct snd_tscm *tscm = container_of(work, struct snd_tscm, dwork.work);
+ struct snd_card *card;
+ struct snd_tscm *tscm;
int err;
- err = snd_card_new(&tscm->unit->device, -1, NULL, THIS_MODULE, 0,
- &tscm->card);
+ err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE, sizeof(*tscm), &card);
if (err < 0)
- return;
- tscm->card->private_free = tscm_card_free;
- tscm->card->private_data = tscm;
+ return err;
+ card->private_free = tscm_card_free;
+
+ tscm = card->private_data;
+ tscm->unit = fw_unit_get(unit);
+ dev_set_drvdata(&unit->device, tscm);
+ tscm->card = card;
+
+ mutex_init(&tscm->mutex);
+ spin_lock_init(&tscm->lock);
+ init_waitqueue_head(&tscm->hwdep_wait);
err = identify_model(tscm);
if (err < 0)
@@ -130,81 +142,33 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- err = snd_card_register(tscm->card);
+ err = snd_card_register(card);
if (err < 0)
goto error;
- tscm->registered = true;
-
- return;
-error:
- snd_card_free(tscm->card);
- dev_info(&tscm->unit->device,
- "Sound card registration failed: %d\n", err);
-}
-
-static int snd_tscm_probe(struct fw_unit *unit,
- const struct ieee1394_device_id *entry)
-{
- struct snd_tscm *tscm;
-
- /* Allocate this independent of sound card instance. */
- tscm = devm_kzalloc(&unit->device, sizeof(struct snd_tscm), GFP_KERNEL);
- if (!tscm)
- return -ENOMEM;
- tscm->unit = fw_unit_get(unit);
- dev_set_drvdata(&unit->device, tscm);
-
- mutex_init(&tscm->mutex);
- spin_lock_init(&tscm->lock);
- init_waitqueue_head(&tscm->hwdep_wait);
-
- /* Allocate and register this sound card later. */
- INIT_DEFERRABLE_WORK(&tscm->dwork, do_registration);
- snd_fw_schedule_registration(unit, &tscm->dwork);
-
return 0;
+error:
+ snd_card_free(card);
+ return err;
}
static void snd_tscm_update(struct fw_unit *unit)
{
struct snd_tscm *tscm = dev_get_drvdata(&unit->device);
- /* Postpone a workqueue for deferred registration. */
- if (!tscm->registered)
- snd_fw_schedule_registration(unit, &tscm->dwork);
-
snd_tscm_transaction_reregister(tscm);
- /*
- * After registration, userspace can start packet streaming, then this
- * code block works fine.
- */
- if (tscm->registered) {
- mutex_lock(&tscm->mutex);
- snd_tscm_stream_update_duplex(tscm);
- mutex_unlock(&tscm->mutex);
- }
+ mutex_lock(&tscm->mutex);
+ snd_tscm_stream_update_duplex(tscm);
+ mutex_unlock(&tscm->mutex);
}
static void snd_tscm_remove(struct fw_unit *unit)
{
struct snd_tscm *tscm = dev_get_drvdata(&unit->device);
- /*
- * Confirm to stop the work for registration before the sound card is
- * going to be released. The work is not scheduled again because bus
- * reset handler is not called anymore.
- */
- cancel_delayed_work_sync(&tscm->dwork);
-
- if (tscm->registered) {
- // Block till all of ALSA character devices are released.
- snd_card_free(tscm->card);
- }
-
- mutex_destroy(&tscm->mutex);
- fw_unit_put(tscm->unit);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(tscm->card);
}
static const struct ieee1394_device_id snd_tscm_id_table[] = {
diff --git a/sound/firewire/tascam/tascam.h b/sound/firewire/tascam/tascam.h
index 78b7a08986a1..d07ffcb27be6 100644
--- a/sound/firewire/tascam/tascam.h
+++ b/sound/firewire/tascam/tascam.h
@@ -70,8 +70,6 @@ struct snd_tscm {
struct mutex mutex;
spinlock_t lock;
- bool registered;
- struct delayed_work dwork;
const struct snd_tscm_spec *spec;
struct fw_iso_resources tx_resources;
@@ -99,6 +97,7 @@ struct snd_tscm {
unsigned int push_pos;
struct amdtp_domain domain;
+ bool need_long_tx_init_skip;
};
#define TSCM_ADDR_BASE 0xffff00000000ull
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index d8be146793ee..c9d0ba353463 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -319,6 +319,10 @@ static const struct config_entry config_table[] = {
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
.device = 0x4b55,
},
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0x4b58,
+ },
#endif
/* Alder Lake */
diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c
index 8634d4f466b3..65012af6a36e 100644
--- a/sound/i2c/cs8427.c
+++ b/sound/i2c/cs8427.c
@@ -50,7 +50,8 @@ int snd_cs8427_reg_write(struct snd_i2c_device *device, unsigned char reg,
buf[0] = reg & 0x7f;
buf[1] = val;
- if ((err = snd_i2c_sendbytes(device, buf, 2)) != 2) {
+ err = snd_i2c_sendbytes(device, buf, 2);
+ if (err != 2) {
snd_printk(KERN_ERR "unable to send bytes 0x%02x:0x%02x "
"to CS8427 (%i)\n", buf[0], buf[1], err);
return err < 0 ? err : -EIO;
@@ -65,12 +66,14 @@ static int snd_cs8427_reg_read(struct snd_i2c_device *device, unsigned char reg)
int err;
unsigned char buf;
- if ((err = snd_i2c_sendbytes(device, &reg, 1)) != 1) {
+ err = snd_i2c_sendbytes(device, &reg, 1);
+ if (err != 1) {
snd_printk(KERN_ERR "unable to send register 0x%x byte "
"to CS8427\n", reg);
return err < 0 ? err : -EIO;
}
- if ((err = snd_i2c_readbytes(device, &buf, 1)) != 1) {
+ err = snd_i2c_readbytes(device, &buf, 1);
+ if (err != 1) {
snd_printk(KERN_ERR "unable to read register 0x%x byte "
"from CS8427\n", reg);
return err < 0 ? err : -EIO;
@@ -108,7 +111,8 @@ static int snd_cs8427_send_corudata(struct snd_i2c_device *device,
if (!memcmp(hw_data, ndata, count))
return 0;
- if ((err = snd_cs8427_select_corudata(device, udata)) < 0)
+ err = snd_cs8427_select_corudata(device, udata);
+ if (err < 0)
return err;
memcpy(hw_data, ndata, count);
if (udata) {
@@ -209,7 +213,8 @@ int snd_cs8427_init(struct snd_i2c_bus *bus,
goto __fail;
/* send initial values */
memcpy(chip->regmap + (initvals1[0] & 0x7f), initvals1 + 1, 6);
- if ((err = snd_i2c_sendbytes(device, initvals1, 7)) != 7) {
+ err = snd_i2c_sendbytes(device, initvals1, 7);
+ if (err != 7) {
err = err < 0 ? err : -EIO;
goto __fail;
}
@@ -217,11 +222,13 @@ int snd_cs8427_init(struct snd_i2c_bus *bus,
memset(buf, 0, 7);
/* from address 9 to 15 */
buf[0] = 9; /* register */
- if ((err = snd_i2c_sendbytes(device, buf, 7)) != 7)
+ err = snd_i2c_sendbytes(device, buf, 7);
+ if (err != 7)
goto __fail;
/* send transfer initialization sequence */
memcpy(chip->regmap + (initvals2[0] & 0x7f), initvals2 + 1, 3);
- if ((err = snd_i2c_sendbytes(device, initvals2, 4)) != 4) {
+ err = snd_i2c_sendbytes(device, initvals2, 4);
+ if (err != 4) {
err = err < 0 ? err : -EIO;
goto __fail;
}
@@ -383,7 +390,8 @@ static int snd_cs8427_qsubcode_get(struct snd_kcontrol *kcontrol,
int err;
snd_i2c_lock(device->bus);
- if ((err = snd_i2c_sendbytes(device, &reg, 1)) != 1) {
+ err = snd_i2c_sendbytes(device, &reg, 1);
+ if (err != 1) {
snd_printk(KERN_ERR "unable to send register 0x%x byte "
"to CS8427\n", reg);
snd_i2c_unlock(device->bus);
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index 2ce0a97957ab..c0cffe28989b 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -97,7 +97,8 @@ int snd_ak4114_create(struct snd_card *card,
chip->rcs0 = reg_read(chip, AK4114_REG_RCS0) & ~(AK4114_QINT | AK4114_CINT);
chip->rcs1 = reg_read(chip, AK4114_REG_RCS1);
- if ((err = snd_device_new(card, SNDRV_DEV_CODEC, chip, &ops)) < 0)
+ err = snd_device_new(card, SNDRV_DEV_CODEC, chip, &ops);
+ if (err < 0)
goto __fail;
if (r_ak4114)
diff --git a/sound/i2c/other/ak4117.c b/sound/i2c/other/ak4117.c
index 905be2d0780b..1bc43e927d82 100644
--- a/sound/i2c/other/ak4117.c
+++ b/sound/i2c/other/ak4117.c
@@ -86,7 +86,8 @@ int snd_ak4117_create(struct snd_card *card, ak4117_read_t *read, ak4117_write_t
chip->rcs1 = reg_read(chip, AK4117_REG_RCS1);
chip->rcs2 = reg_read(chip, AK4117_REG_RCS2);
- if ((err = snd_device_new(card, SNDRV_DEV_CODEC, chip, &ops)) < 0)
+ err = snd_device_new(card, SNDRV_DEV_CODEC, chip, &ops);
+ if (err < 0)
goto __fail;
if (r_ak4117)
diff --git a/sound/i2c/tea6330t.c b/sound/i2c/tea6330t.c
index 08eb6a873768..037d6293f728 100644
--- a/sound/i2c/tea6330t.c
+++ b/sound/i2c/tea6330t.c
@@ -115,7 +115,8 @@ static int snd_tea6330t_put_master_volume(struct snd_kcontrol *kcontrol,
bytes[count++] = tea->regs[TEA6330T_SADDR_VOLUME_RIGHT] = tea->mright;
}
if (count > 0) {
- if ((err = snd_i2c_sendbytes(tea->device, bytes, count)) < 0)
+ err = snd_i2c_sendbytes(tea->device, bytes, count);
+ if (err < 0)
change = err;
}
snd_i2c_unlock(tea->bus);
@@ -160,7 +161,8 @@ static int snd_tea6330t_put_master_switch(struct snd_kcontrol *kcontrol,
bytes[0] = TEA6330T_SADDR_VOLUME_LEFT;
bytes[1] = tea->regs[TEA6330T_SADDR_VOLUME_LEFT];
bytes[2] = tea->regs[TEA6330T_SADDR_VOLUME_RIGHT];
- if ((err = snd_i2c_sendbytes(tea->device, bytes, 3)) < 0)
+ err = snd_i2c_sendbytes(tea->device, bytes, 3);
+ if (err < 0)
change = err;
snd_i2c_unlock(tea->bus);
return change;
@@ -207,7 +209,8 @@ static int snd_tea6330t_put_bass(struct snd_kcontrol *kcontrol,
change = tea->regs[TEA6330T_SADDR_BASS] != val1;
bytes[0] = TEA6330T_SADDR_BASS;
bytes[1] = tea->regs[TEA6330T_SADDR_BASS] = val1;
- if ((err = snd_i2c_sendbytes(tea->device, bytes, 2)) < 0)
+ err = snd_i2c_sendbytes(tea->device, bytes, 2);
+ if (err < 0)
change = err;
snd_i2c_unlock(tea->bus);
return change;
@@ -254,7 +257,8 @@ static int snd_tea6330t_put_treble(struct snd_kcontrol *kcontrol,
change = tea->regs[TEA6330T_SADDR_TREBLE] != val1;
bytes[0] = TEA6330T_SADDR_TREBLE;
bytes[1] = tea->regs[TEA6330T_SADDR_TREBLE] = val1;
- if ((err = snd_i2c_sendbytes(tea->device, bytes, 2)) < 0)
+ err = snd_i2c_sendbytes(tea->device, bytes, 2);
+ if (err < 0)
change = err;
snd_i2c_unlock(tea->bus);
return change;
@@ -280,14 +284,15 @@ int snd_tea6330t_update_mixer(struct snd_card *card,
struct tea6330t *tea;
const struct snd_kcontrol_new *knew;
unsigned int idx;
- int err = -ENOMEM;
+ int err;
u8 default_treble, default_bass;
unsigned char bytes[7];
tea = kzalloc(sizeof(*tea), GFP_KERNEL);
if (tea == NULL)
return -ENOMEM;
- if ((err = snd_i2c_device_create(bus, "TEA6330T", TEA6330T_ADDR, &device)) < 0) {
+ err = snd_i2c_device_create(bus, "TEA6330T", TEA6330T_ADDR, &device);
+ if (err < 0) {
kfree(tea);
return err;
}
@@ -327,18 +332,21 @@ int snd_tea6330t_update_mixer(struct snd_card *card,
bytes[0] = TEA6330T_SADDR_VOLUME_LEFT;
for (idx = 0; idx < 6; idx++)
bytes[idx+1] = tea->regs[idx];
- if ((err = snd_i2c_sendbytes(device, bytes, 7)) < 0)
+ err = snd_i2c_sendbytes(device, bytes, 7);
+ if (err < 0)
goto __error;
strcat(card->mixername, ",TEA6330T");
- if ((err = snd_component_add(card, "TEA6330T")) < 0)
+ err = snd_component_add(card, "TEA6330T");
+ if (err < 0)
goto __error;
for (idx = 0; idx < ARRAY_SIZE(snd_tea6330t_controls); idx++) {
knew = &snd_tea6330t_controls[idx];
if (tea->treble == 0 && !strcmp(knew->name, "Tone Control - Treble"))
continue;
- if ((err = snd_ctl_add(card, snd_ctl_new1(knew, tea))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(knew, tea));
+ if (err < 0)
goto __error;
}
diff --git a/sound/isa/ad1816a/ad1816a.c b/sound/isa/ad1816a/ad1816a.c
index f11af983b3b6..fa5bed0d5a6f 100644
--- a/sound/isa/ad1816a/ad1816a.c
+++ b/sound/isa/ad1816a/ad1816a.c
@@ -131,16 +131,18 @@ static int snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard,
return error;
chip = card->private_data;
- if ((error = snd_card_ad1816a_pnp(dev, pcard, pid))) {
+ error = snd_card_ad1816a_pnp(dev, pcard, pid);
+ if (error) {
snd_card_free(card);
return error;
}
- if ((error = snd_ad1816a_create(card, port[dev],
- irq[dev],
- dma1[dev],
- dma2[dev],
- chip)) < 0) {
+ error = snd_ad1816a_create(card, port[dev],
+ irq[dev],
+ dma1[dev],
+ dma2[dev],
+ chip);
+ if (error) {
snd_card_free(card);
return error;
}
@@ -152,12 +154,14 @@ static int snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard,
sprintf(card->longname, "%s, SS at 0x%lx, irq %d, dma %d&%d",
card->shortname, chip->port, irq[dev], dma1[dev], dma2[dev]);
- if ((error = snd_ad1816a_pcm(chip, 0)) < 0) {
+ error = snd_ad1816a_pcm(chip, 0);
+ if (error < 0) {
snd_card_free(card);
return error;
}
- if ((error = snd_ad1816a_mixer(chip)) < 0) {
+ error = snd_ad1816a_mixer(chip);
+ if (error < 0) {
snd_card_free(card);
return error;
}
@@ -189,7 +193,8 @@ static int snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard,
}
}
- if ((error = snd_card_register(card)) < 0) {
+ error = snd_card_register(card);
+ if (error < 0) {
snd_card_free(card);
return error;
}
diff --git a/sound/isa/ad1816a/ad1816a_lib.c b/sound/isa/ad1816a/ad1816a_lib.c
index 01381fe7c0c9..6d4999b30461 100644
--- a/sound/isa/ad1816a/ad1816a_lib.c
+++ b/sound/isa/ad1816a/ad1816a_lib.c
@@ -155,7 +155,8 @@ static void snd_ad1816a_close(struct snd_ad1816a *chip, unsigned int mode)
snd_ad1816a_write_mask(chip, AD1816A_INTERRUPT_ENABLE,
AD1816A_TIMER_IRQ_ENABLE, 0x0000);
}
- if (!((chip->mode &= ~mode) & AD1816A_MODE_OPEN))
+ chip->mode &= ~mode;
+ if (!(chip->mode & AD1816A_MODE_OPEN))
chip->mode = 0;
spin_unlock_irqrestore(&chip->lock, flags);
@@ -426,7 +427,8 @@ static int snd_ad1816a_playback_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int error;
- if ((error = snd_ad1816a_open(chip, AD1816A_MODE_PLAYBACK)) < 0)
+ error = snd_ad1816a_open(chip, AD1816A_MODE_PLAYBACK);
+ if (error < 0)
return error;
runtime->hw = snd_ad1816a_playback;
snd_pcm_limit_isa_dma_size(chip->dma1, &runtime->hw.buffer_bytes_max);
@@ -441,7 +443,8 @@ static int snd_ad1816a_capture_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int error;
- if ((error = snd_ad1816a_open(chip, AD1816A_MODE_CAPTURE)) < 0)
+ error = snd_ad1816a_open(chip, AD1816A_MODE_CAPTURE);
+ if (error < 0)
return error;
runtime->hw = snd_ad1816a_capture;
snd_pcm_limit_isa_dma_size(chip->dma2, &runtime->hw.buffer_bytes_max);
@@ -586,7 +589,8 @@ int snd_ad1816a_create(struct snd_card *card,
chip->dma1 = -1;
chip->dma2 = -1;
- if ((chip->res_port = request_region(port, 16, "AD1816A")) == NULL) {
+ chip->res_port = request_region(port, 16, "AD1816A");
+ if (!chip->res_port) {
snd_printk(KERN_ERR "ad1816a: can't grab port 0x%lx\n", port);
snd_ad1816a_free(chip);
return -EBUSY;
@@ -615,7 +619,8 @@ int snd_ad1816a_create(struct snd_card *card,
chip->port = port;
spin_lock_init(&chip->lock);
- if ((error = snd_ad1816a_probe(chip))) {
+ error = snd_ad1816a_probe(chip);
+ if (error) {
snd_ad1816a_free(chip);
return error;
}
@@ -623,7 +628,8 @@ int snd_ad1816a_create(struct snd_card *card,
snd_ad1816a_init(chip);
/* Register device */
- if ((error = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ error = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (error < 0) {
snd_ad1816a_free(chip);
return error;
}
@@ -652,7 +658,8 @@ int snd_ad1816a_pcm(struct snd_ad1816a *chip, int device)
int error;
struct snd_pcm *pcm;
- if ((error = snd_pcm_new(chip->card, "AD1816A", device, 1, 1, &pcm)))
+ error = snd_pcm_new(chip->card, "AD1816A", device, 1, 1, &pcm);
+ if (error)
return error;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ad1816a_playback_ops);
@@ -682,7 +689,8 @@ int snd_ad1816a_timer(struct snd_ad1816a *chip, int device)
tid.card = chip->card->number;
tid.device = device;
tid.subdevice = 0;
- if ((error = snd_timer_new(chip->card, "AD1816A", &tid, &timer)) < 0)
+ error = snd_timer_new(chip->card, "AD1816A", &tid, &timer);
+ if (error < 0)
return error;
strcpy(timer->name, snd_ad1816a_chip_id(chip));
timer->private_data = chip;
@@ -944,7 +952,8 @@ int snd_ad1816a_mixer(struct snd_ad1816a *chip)
strcpy(card->mixername, snd_ad1816a_chip_id(chip));
for (idx = 0; idx < ARRAY_SIZE(snd_ad1816a_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_ad1816a_controls[idx], chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_ad1816a_controls[idx], chip));
+ if (err < 0)
return err;
}
return 0;
diff --git a/sound/isa/als100.c b/sound/isa/als100.c
index bacb7a1b930c..d4597fdfe091 100644
--- a/sound/isa/als100.c
+++ b/sound/isa/als100.c
@@ -177,7 +177,8 @@ static int snd_card_als100_probe(int dev,
return error;
acard = card->private_data;
- if ((error = snd_card_als100_pnp(dev, acard, pcard, pid))) {
+ error = snd_card_als100_pnp(dev, acard, pcard, pid);
+ if (error) {
snd_card_free(card);
return error;
}
@@ -211,12 +212,14 @@ static int snd_card_als100_probe(int dev,
dma16[dev]);
}
- if ((error = snd_sb16dsp_pcm(chip, 0)) < 0) {
+ error = snd_sb16dsp_pcm(chip, 0);
+ if (error < 0) {
snd_card_free(card);
return error;
}
- if ((error = snd_sbmixer_new(chip)) < 0) {
+ error = snd_sbmixer_new(chip);
+ if (error < 0) {
snd_card_free(card);
return error;
}
@@ -245,18 +248,21 @@ static int snd_card_als100_probe(int dev,
snd_printk(KERN_ERR PFX "no OPL device at 0x%lx-0x%lx\n",
fm_port[dev], fm_port[dev] + 2);
} else {
- if ((error = snd_opl3_timer_new(opl3, 0, 1)) < 0) {
+ error = snd_opl3_timer_new(opl3, 0, 1);
+ if (error < 0) {
snd_card_free(card);
return error;
}
- if ((error = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
+ error = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
+ if (error < 0) {
snd_card_free(card);
return error;
}
}
}
- if ((error = snd_card_register(card)) < 0) {
+ error = snd_card_register(card);
+ if (error < 0) {
snd_card_free(card);
return error;
}
diff --git a/sound/isa/azt2320.c b/sound/isa/azt2320.c
index 867e9ae8f65a..dd5c059b3744 100644
--- a/sound/isa/azt2320.c
+++ b/sound/isa/azt2320.c
@@ -148,9 +148,11 @@ static int snd_card_azt2320_enable_wss(unsigned long port)
{
int error;
- if ((error = snd_card_azt2320_command(port, 0x09)))
+ error = snd_card_azt2320_command(port, 0x09);
+ if (error)
return error;
- if ((error = snd_card_azt2320_command(port, 0x00)))
+ error = snd_card_azt2320_command(port, 0x00);
+ if (error)
return error;
mdelay(5);
@@ -174,12 +176,14 @@ static int snd_card_azt2320_probe(int dev,
return error;
acard = card->private_data;
- if ((error = snd_card_azt2320_pnp(dev, acard, pcard, pid))) {
+ error = snd_card_azt2320_pnp(dev, acard, pcard, pid);
+ if (error) {
snd_card_free(card);
return error;
}
- if ((error = snd_card_azt2320_enable_wss(port[dev]))) {
+ error = snd_card_azt2320_enable_wss(port[dev]);
+ if (error) {
snd_card_free(card);
return error;
}
@@ -228,18 +232,21 @@ static int snd_card_azt2320_probe(int dev,
snd_printk(KERN_ERR PFX "no OPL device at 0x%lx-0x%lx\n",
fm_port[dev], fm_port[dev] + 2);
} else {
- if ((error = snd_opl3_timer_new(opl3, 1, 2)) < 0) {
+ error = snd_opl3_timer_new(opl3, 1, 2);
+ if (error < 0) {
snd_card_free(card);
return error;
}
- if ((error = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
+ error = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
+ if (error < 0) {
snd_card_free(card);
return error;
}
}
}
- if ((error = snd_card_register(card)) < 0) {
+ error = snd_card_register(card);
+ if (error < 0) {
snd_card_free(card);
return error;
}
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
index bc112df10fc5..ef6d0a20efd8 100644
--- a/sound/isa/cmi8330.c
+++ b/sound/isa/cmi8330.c
@@ -284,7 +284,8 @@ static int cmi8330_add_sb_mixers(struct snd_sb *chip)
}
for (idx = 0; idx < ARRAY_SIZE(cmi8330_sb_mixers); idx++) {
- if ((err = snd_sbmixer_add_ctl_elem(chip, &cmi8330_sb_mixers[idx])) < 0)
+ err = snd_sbmixer_add_ctl_elem(chip, &cmi8330_sb_mixers[idx]);
+ if (err < 0)
return err;
}
return 0;
@@ -307,7 +308,8 @@ static int snd_cmi8330_mixer(struct snd_card *card, struct snd_cmi8330 *acard)
}
#ifdef ENABLE_SB_MIXER
- if ((err = cmi8330_add_sb_mixers(acard->sb)) < 0)
+ err = cmi8330_add_sb_mixers(acard->sb);
+ if (err < 0)
return err;
#endif
return 0;
@@ -432,7 +434,8 @@ static int snd_cmi8330_pcm(struct snd_card *card, struct snd_cmi8330 *chip)
snd_cmi8330_capture_open
};
- if ((err = snd_pcm_new(card, (chip->type == CMI8329) ? "CMI8329" : "CMI8330", 0, 1, 1, &pcm)) < 0)
+ err = snd_pcm_new(card, (chip->type == CMI8329) ? "CMI8329" : "CMI8330", 0, 1, 1, &pcm);
+ if (err < 0)
return err;
strcpy(pcm->name, (chip->type == CMI8329) ? "CMI8329" : "CMI8330");
pcm->private_data = chip;
@@ -536,18 +539,19 @@ static int snd_cmi8330_probe(struct snd_card *card, int dev)
return -ENODEV;
}
- if ((err = snd_sbdsp_create(card, sbport[dev],
- sbirq[dev],
- snd_sb16dsp_interrupt,
- sbdma8[dev],
- sbdma16[dev],
- SB_HW_AUTO, &acard->sb)) < 0) {
+ err = snd_sbdsp_create(card, sbport[dev],
+ sbirq[dev],
+ snd_sb16dsp_interrupt,
+ sbdma8[dev],
+ sbdma16[dev],
+ SB_HW_AUTO, &acard->sb);
+ if (err < 0) {
snd_printk(KERN_ERR PFX "SB16 device busy??\n");
return err;
}
if (acard->sb->hardware != SB_HW_16) {
snd_printk(KERN_ERR PFX "SB16 not found during probe\n");
- return err;
+ return -ENODEV;
}
snd_wss_out(acard->wss, CS4231_MISC_INFO, 0x40); /* switch on MODE2 */
@@ -555,12 +559,14 @@ static int snd_cmi8330_probe(struct snd_card *card, int dev)
snd_wss_out(acard->wss, i,
snd_cmi8330_image[i - CMI8330_RMUX3D]);
- if ((err = snd_cmi8330_mixer(card, acard)) < 0) {
+ err = snd_cmi8330_mixer(card, acard);
+ if (err < 0) {
snd_printk(KERN_ERR PFX "failed to create mixers\n");
return err;
}
- if ((err = snd_cmi8330_pcm(card, acard)) < 0) {
+ err = snd_cmi8330_pcm(card, acard);
+ if (err < 0) {
snd_printk(KERN_ERR PFX "failed to create pcms\n");
return err;
}
@@ -622,7 +628,8 @@ static int snd_cmi8330_isa_probe(struct device *pdev,
err = snd_cmi8330_card_new(pdev, dev, &card);
if (err < 0)
return err;
- if ((err = snd_cmi8330_probe(card, dev)) < 0) {
+ err = snd_cmi8330_probe(card, dev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -683,12 +690,14 @@ static int snd_cmi8330_pnp_detect(struct pnp_card_link *pcard,
res = snd_cmi8330_card_new(&pcard->card->dev, dev, &card);
if (res < 0)
return res;
- if ((res = snd_cmi8330_pnp(dev, card->private_data, pcard, pid)) < 0) {
+ res = snd_cmi8330_pnp(dev, card->private_data, pcard, pid);
+ if (res < 0) {
snd_printk(KERN_ERR PFX "PnP detection failed\n");
snd_card_free(card);
return res;
}
- if ((res = snd_cmi8330_probe(card, dev)) < 0) {
+ res = snd_cmi8330_probe(card, dev);
+ if (res < 0) {
snd_card_free(card);
return res;
}
diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c
index 186d7d4db45e..0b7fd17f8ecc 100644
--- a/sound/isa/cs423x/cs4236.c
+++ b/sound/isa/cs423x/cs4236.c
@@ -339,11 +339,13 @@ static int snd_cs423x_probe(struct snd_card *card, int dev)
int err;
acard = card->private_data;
- if (sb_port[dev] > 0 && sb_port[dev] != SNDRV_AUTO_PORT)
- if ((acard->res_sb_port = request_region(sb_port[dev], 16, IDENT " SB")) == NULL) {
+ if (sb_port[dev] > 0 && sb_port[dev] != SNDRV_AUTO_PORT) {
+ acard->res_sb_port = request_region(sb_port[dev], 16, IDENT " SB");
+ if (!acard->res_sb_port) {
printk(KERN_ERR IDENT ": unable to register SB port at 0x%lx\n", sb_port[dev]);
return -EBUSY;
}
+ }
err = snd_cs4236_create(card, port[dev], cport[dev],
irq[dev],
@@ -393,7 +395,8 @@ static int snd_cs423x_probe(struct snd_card *card, int dev)
OPL3_HW_OPL3_CS, 0, &opl3) < 0) {
printk(KERN_WARNING IDENT ": OPL3 not detected\n");
} else {
- if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0)
+ err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
+ if (err < 0)
return err;
}
}
@@ -444,7 +447,8 @@ static int snd_cs423x_isa_probe(struct device *pdev,
err = snd_cs423x_card_new(pdev, dev, &card);
if (err < 0)
return err;
- if ((err = snd_cs423x_probe(card, dev)) < 0) {
+ err = snd_cs423x_probe(card, dev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -538,7 +542,8 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
snd_card_free(card);
return err;
}
- if ((err = snd_cs423x_probe(card, dev)) < 0) {
+ err = snd_cs423x_probe(card, dev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -592,13 +597,15 @@ static int snd_cs423x_pnpc_detect(struct pnp_card_link *pcard,
res = snd_cs423x_card_new(&pcard->card->dev, dev, &card);
if (res < 0)
return res;
- if ((res = snd_card_cs423x_pnpc(dev, card->private_data, pcard, pid)) < 0) {
+ res = snd_card_cs423x_pnpc(dev, card->private_data, pcard, pid);
+ if (res < 0) {
printk(KERN_ERR "isapnp detection failed and probing for " IDENT
" is not supported\n");
snd_card_free(card);
return res;
}
- if ((res = snd_cs423x_probe(card, dev)) < 0) {
+ res = snd_cs423x_probe(card, dev);
+ if (res < 0) {
snd_card_free(card);
return res;
}
diff --git a/sound/isa/cs423x/cs4236_lib.c b/sound/isa/cs423x/cs4236_lib.c
index 52f05adb1870..63957aea456b 100644
--- a/sound/isa/cs423x/cs4236_lib.c
+++ b/sound/isa/cs423x/cs4236_lib.c
@@ -1030,12 +1030,14 @@ int snd_cs4236_mixer(struct snd_wss *chip)
if (chip->hardware == WSS_HW_CS4235 ||
chip->hardware == WSS_HW_CS4239) {
for (idx = 0; idx < ARRAY_SIZE(snd_cs4235_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_cs4235_controls[idx], chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_cs4235_controls[idx], chip));
+ if (err < 0)
return err;
}
} else {
for (idx = 0; idx < ARRAY_SIZE(snd_cs4236_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_cs4236_controls[idx], chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_cs4236_controls[idx], chip));
+ if (err < 0)
return err;
}
}
@@ -1058,13 +1060,15 @@ int snd_cs4236_mixer(struct snd_wss *chip)
kcontrol = NULL;
}
for (idx = 0; idx < count; idx++, kcontrol++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(kcontrol, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(kcontrol, chip));
+ if (err < 0)
return err;
}
if (chip->hardware == WSS_HW_CS4237B ||
chip->hardware == WSS_HW_CS4238B) {
for (idx = 0; idx < ARRAY_SIZE(snd_cs4236_iec958_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_cs4236_iec958_controls[idx], chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_cs4236_iec958_controls[idx], chip));
+ if (err < 0)
return err;
}
}
diff --git a/sound/isa/es1688/es1688_lib.c b/sound/isa/es1688/es1688_lib.c
index 1816e55c6edf..8554cb2263c1 100644
--- a/sound/isa/es1688/es1688_lib.c
+++ b/sound/isa/es1688/es1688_lib.c
@@ -971,7 +971,8 @@ int snd_es1688_mixer(struct snd_card *card, struct snd_es1688 *chip)
strcpy(card->mixername, snd_es1688_chip_id(chip));
for (idx = 0; idx < ARRAY_SIZE(snd_es1688_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es1688_controls[idx], chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_es1688_controls[idx], chip));
+ if (err < 0)
return err;
}
for (idx = 0; idx < ES1688_INIT_TABLE_SIZE; idx++) {
diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c
index 375a4a6a4717..68b9c59e1127 100644
--- a/sound/isa/es18xx.c
+++ b/sound/isa/es18xx.c
@@ -1573,7 +1573,8 @@ static int snd_es18xx_identify(struct snd_es18xx *chip)
udelay(10);
chip->ctrl_port += inb(chip->port + 0x05);
- if ((chip->res_ctrl_port = request_region(chip->ctrl_port, 8, "ES18xx - CTRL")) == NULL) {
+ chip->res_ctrl_port = request_region(chip->ctrl_port, 8, "ES18xx - CTRL");
+ if (!chip->res_ctrl_port) {
snd_printk(KERN_ERR PFX "unable go grab port 0x%lx\n", chip->ctrl_port);
return -EBUSY;
}
@@ -1832,41 +1833,48 @@ static int snd_es18xx_mixer(struct snd_card *card)
break;
}
}
- if ((err = snd_ctl_add(card, kctl)) < 0)
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
}
if (chip->caps & ES18XX_PCM2) {
for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_pcm2_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_pcm2_controls[idx], chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_pcm2_controls[idx], chip));
+ if (err < 0)
return err;
}
} else {
for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_pcm1_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_pcm1_controls[idx], chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_pcm1_controls[idx], chip));
+ if (err < 0)
return err;
}
}
if (chip->caps & ES18XX_RECMIX) {
for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_recmix_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_recmix_controls[idx], chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_recmix_controls[idx], chip));
+ if (err < 0)
return err;
}
}
switch (chip->version) {
default:
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_micpre1_control, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_micpre1_control, chip));
+ if (err < 0)
return err;
break;
case 0x1869:
case 0x1879:
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_micpre2_control, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_micpre2_control, chip));
+ if (err < 0)
return err;
break;
}
if (chip->caps & ES18XX_SPATIALIZER) {
for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_spatializer_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_spatializer_controls[idx], chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_es18xx_spatializer_controls[idx], chip));
+ if (err < 0)
return err;
}
}
@@ -1879,7 +1887,8 @@ static int snd_es18xx_mixer(struct snd_card *card)
else
chip->hw_switch = kctl;
kctl->private_free = snd_es18xx_hwv_free;
- if ((err = snd_ctl_add(card, kctl)) < 0)
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
}
@@ -2154,7 +2163,8 @@ static int snd_es18xx_isa_probe1(int dev, struct device *devptr)
err = snd_es18xx_card_new(devptr, dev, &card);
if (err < 0)
return err;
- if ((err = snd_audiodrive_probe(card, dev)) < 0) {
+ err = snd_audiodrive_probe(card, dev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -2169,19 +2179,22 @@ static int snd_es18xx_isa_probe(struct device *pdev, unsigned int dev)
static const int possible_dmas[] = {1, 0, 3, 5, -1};
if (irq[dev] == SNDRV_AUTO_IRQ) {
- if ((irq[dev] = snd_legacy_find_free_irq(possible_irqs)) < 0) {
+ irq[dev] = snd_legacy_find_free_irq(possible_irqs);
+ if (irq[dev] < 0) {
snd_printk(KERN_ERR PFX "unable to find a free IRQ\n");
return -EBUSY;
}
}
if (dma1[dev] == SNDRV_AUTO_DMA) {
- if ((dma1[dev] = snd_legacy_find_free_dma(possible_dmas)) < 0) {
+ dma1[dev] = snd_legacy_find_free_dma(possible_dmas);
+ if (dma1[dev] < 0) {
snd_printk(KERN_ERR PFX "unable to find a free DMA1\n");
return -EBUSY;
}
}
if (dma2[dev] == SNDRV_AUTO_DMA) {
- if ((dma2[dev] = snd_legacy_find_free_dma(possible_dmas)) < 0) {
+ dma2[dev] = snd_legacy_find_free_dma(possible_dmas);
+ if (dma2[dev] < 0) {
snd_printk(KERN_ERR PFX "unable to find a free DMA2\n");
return -EBUSY;
}
@@ -2257,11 +2270,13 @@ static int snd_audiodrive_pnp_detect(struct pnp_dev *pdev,
err = snd_es18xx_card_new(&pdev->dev, dev, &card);
if (err < 0)
return err;
- if ((err = snd_audiodrive_pnp(dev, card->private_data, pdev)) < 0) {
+ err = snd_audiodrive_pnp(dev, card->private_data, pdev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_audiodrive_probe(card, dev)) < 0) {
+ err = snd_audiodrive_probe(card, dev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -2315,11 +2330,13 @@ static int snd_audiodrive_pnpc_detect(struct pnp_card_link *pcard,
if (res < 0)
return res;
- if ((res = snd_audiodrive_pnpc(dev, card->private_data, pcard, pid)) < 0) {
+ res = snd_audiodrive_pnpc(dev, card->private_data, pcard, pid);
+ if (res < 0) {
snd_card_free(card);
return res;
}
- if ((res = snd_audiodrive_probe(card, dev)) < 0) {
+ res = snd_audiodrive_probe(card, dev);
+ if (res < 0) {
snd_card_free(card);
return res;
}
diff --git a/sound/isa/gus/gus_main.c b/sound/isa/gus/gus_main.c
index b7518122a10d..3b46490271fe 100644
--- a/sound/isa/gus/gus_main.c
+++ b/sound/isa/gus/gus_main.c
@@ -156,12 +156,14 @@ int snd_gus_create(struct snd_card *card,
gus->gf1.reg_timerctrl = GUSP(gus, TIMERCNTRL);
gus->gf1.reg_timerdata = GUSP(gus, TIMERDATA);
/* allocate resources */
- if ((gus->gf1.res_port1 = request_region(port, 16, "GUS GF1 (Adlib/SB)")) == NULL) {
+ gus->gf1.res_port1 = request_region(port, 16, "GUS GF1 (Adlib/SB)");
+ if (!gus->gf1.res_port1) {
snd_printk(KERN_ERR "gus: can't grab SB port 0x%lx\n", port);
snd_gus_free(gus);
return -EBUSY;
}
- if ((gus->gf1.res_port2 = request_region(port + 0x100, 12, "GUS GF1 (Synth)")) == NULL) {
+ gus->gf1.res_port2 = request_region(port + 0x100, 12, "GUS GF1 (Synth)");
+ if (!gus->gf1.res_port2) {
snd_printk(KERN_ERR "gus: can't grab synth port 0x%lx\n", port + 0x100);
snd_gus_free(gus);
return -EBUSY;
@@ -206,7 +208,8 @@ int snd_gus_create(struct snd_card *card,
gus->gf1.pcm_channels = pcm_channels;
gus->gf1.volume_ramp = 25;
gus->gf1.smooth_pan = 1;
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, gus, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, gus, &ops);
+ if (err < 0) {
snd_gus_free(gus);
return err;
}
@@ -384,7 +387,7 @@ static int snd_gus_check_version(struct snd_gus_card * gus)
}
}
}
- strcpy(card->shortname, card->longname);
+ strscpy(card->shortname, card->longname, sizeof(card->shortname));
gus->uart_enable = 1; /* standard GUSes doesn't have midi uart trouble */
snd_gus_init_control(gus);
return 0;
@@ -395,14 +398,17 @@ int snd_gus_initialize(struct snd_gus_card *gus)
int err;
if (!gus->interwave) {
- if ((err = snd_gus_check_version(gus)) < 0) {
+ err = snd_gus_check_version(gus);
+ if (err < 0) {
snd_printk(KERN_ERR "version check failed\n");
return err;
}
- if ((err = snd_gus_detect_memory(gus)) < 0)
+ err = snd_gus_detect_memory(gus);
+ if (err < 0)
return err;
}
- if ((err = snd_gus_init_dma_irq(gus, 1)) < 0)
+ err = snd_gus_init_dma_irq(gus, 1);
+ if (err < 0)
return err;
snd_gf1_start(gus);
gus->initialized = 1;
diff --git a/sound/isa/gus/gus_mem.c b/sound/isa/gus/gus_mem.c
index cb02d18dde60..ff9480f249fe 100644
--- a/sound/isa/gus/gus_mem.c
+++ b/sound/isa/gus/gus_mem.c
@@ -210,7 +210,8 @@ int snd_gf1_mem_free(struct snd_gf1_mem * alloc, unsigned int address)
struct snd_gf1_mem_block *block;
snd_gf1_mem_lock(alloc, 0);
- if ((block = snd_gf1_mem_look(alloc, address)) != NULL) {
+ block = snd_gf1_mem_look(alloc, address);
+ if (block) {
result = snd_gf1_mem_xfree(alloc, block);
snd_gf1_mem_lock(alloc, 1);
return result;
diff --git a/sound/isa/gus/gus_mixer.c b/sound/isa/gus/gus_mixer.c
index 201d0c40d0d9..03f9cfcbf601 100644
--- a/sound/isa/gus/gus_mixer.c
+++ b/sound/isa/gus/gus_mixer.c
@@ -162,12 +162,14 @@ int snd_gf1_new_mixer(struct snd_gus_card * gus)
if (!gus->ics_flag) {
max = gus->ess_flag ? 1 : ARRAY_SIZE(snd_gf1_controls);
for (idx = 0; idx < max; idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_gf1_controls[idx], gus))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_gf1_controls[idx], gus));
+ if (err < 0)
return err;
}
} else {
for (idx = 0; idx < ARRAY_SIZE(snd_ics_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_ics_controls[idx], gus))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_ics_controls[idx], gus));
+ if (err < 0)
return err;
}
}
diff --git a/sound/isa/gus/gus_pcm.c b/sound/isa/gus/gus_pcm.c
index aca4ab90e5bc..230f65a0e4b0 100644
--- a/sound/isa/gus/gus_pcm.c
+++ b/sound/isa/gus/gus_pcm.c
@@ -430,17 +430,19 @@ static int snd_gf1_pcm_playback_hw_params(struct snd_pcm_substream *substream,
snd_gf1_mem_free(&gus->gf1.mem_alloc, pcmp->memory);
pcmp->memory = 0;
}
- if ((block = snd_gf1_mem_alloc(&gus->gf1.mem_alloc,
- SNDRV_GF1_MEM_OWNER_DRIVER,
- "GF1 PCM",
- runtime->dma_bytes, 1, 32,
- NULL)) == NULL)
+ block = snd_gf1_mem_alloc(&gus->gf1.mem_alloc,
+ SNDRV_GF1_MEM_OWNER_DRIVER,
+ "GF1 PCM",
+ runtime->dma_bytes, 1, 32,
+ NULL);
+ if (!block)
return -ENOMEM;
pcmp->memory = block->ptr;
}
pcmp->voices = params_channels(hw_params);
if (pcmp->pvoices[0] == NULL) {
- if ((pcmp->pvoices[0] = snd_gf1_alloc_voice(pcmp->gus, SNDRV_GF1_VOICE_TYPE_PCM, 0, 0)) == NULL)
+ pcmp->pvoices[0] = snd_gf1_alloc_voice(pcmp->gus, SNDRV_GF1_VOICE_TYPE_PCM, 0, 0);
+ if (!pcmp->pvoices[0])
return -ENOMEM;
pcmp->pvoices[0]->handler_wave = snd_gf1_pcm_interrupt_wave;
pcmp->pvoices[0]->handler_volume = snd_gf1_pcm_interrupt_volume;
@@ -448,7 +450,8 @@ static int snd_gf1_pcm_playback_hw_params(struct snd_pcm_substream *substream,
pcmp->pvoices[0]->private_data = pcmp;
}
if (pcmp->voices > 1 && pcmp->pvoices[1] == NULL) {
- if ((pcmp->pvoices[1] = snd_gf1_alloc_voice(pcmp->gus, SNDRV_GF1_VOICE_TYPE_PCM, 0, 0)) == NULL)
+ pcmp->pvoices[1] = snd_gf1_alloc_voice(pcmp->gus, SNDRV_GF1_VOICE_TYPE_PCM, 0, 0);
+ if (!pcmp->pvoices[1])
return -ENOMEM;
pcmp->pvoices[1]->handler_wave = snd_gf1_pcm_interrupt_wave;
pcmp->pvoices[1]->handler_volume = snd_gf1_pcm_interrupt_volume;
@@ -689,7 +692,8 @@ static int snd_gf1_pcm_playback_open(struct snd_pcm_substream *substream)
printk(KERN_DEBUG "playback.buffer = 0x%lx, gf1.pcm_buffer = 0x%lx\n",
(long) pcm->playback.buffer, (long) gus->gf1.pcm_buffer);
#endif
- if ((err = snd_gf1_dma_init(gus)) < 0)
+ err = snd_gf1_dma_init(gus);
+ if (err < 0)
return err;
pcmp->flags = SNDRV_GF1_PCM_PFLG_NONE;
pcmp->substream = substream;
@@ -888,7 +892,8 @@ int snd_gf1_pcm_new(struct snd_gus_card *gus, int pcm_dev, int control_index)
kctl = snd_ctl_new1(&snd_gf1_pcm_volume_control1, gus);
else
kctl = snd_ctl_new1(&snd_gf1_pcm_volume_control, gus);
- if ((err = snd_ctl_add(card, kctl)) < 0)
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
kctl->id.index = control_index;
diff --git a/sound/isa/gus/gus_uart.c b/sound/isa/gus/gus_uart.c
index 4fb4ed79e262..3975848160e7 100644
--- a/sound/isa/gus/gus_uart.c
+++ b/sound/isa/gus/gus_uart.c
@@ -232,7 +232,8 @@ int snd_gf1_rawmidi_new(struct snd_gus_card *gus, int device)
struct snd_rawmidi *rmidi;
int err;
- if ((err = snd_rawmidi_new(gus->card, "GF1", device, 1, 1, &rmidi)) < 0)
+ err = snd_rawmidi_new(gus->card, "GF1", device, 1, 1, &rmidi);
+ if (err < 0)
return err;
strcpy(rmidi->name, gus->interwave ? "AMD InterWave" : "GF1");
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_gf1_uart_output);
diff --git a/sound/isa/gus/gusclassic.c b/sound/isa/gus/gusclassic.c
index 0fba5d8fe84f..bca1caa4968c 100644
--- a/sound/isa/gus/gusclassic.c
+++ b/sound/isa/gus/gusclassic.c
@@ -113,14 +113,16 @@ static int snd_gusclassic_detect(struct snd_gus_card *gus)
unsigned char d;
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 0); /* reset GF1 */
- if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 0) {
+ d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET);
+ if ((d & 0x07) != 0) {
snd_printdd("[0x%lx] check 1 failed - 0x%x\n", gus->gf1.port, d);
return -ENODEV;
}
udelay(160);
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 1); /* release reset */
udelay(160);
- if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 1) {
+ d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET);
+ if ((d & 0x07) != 1) {
snd_printdd("[0x%lx] check 2 failed - 0x%x\n", gus->gf1.port, d);
return -ENODEV;
}
diff --git a/sound/isa/gus/gusextreme.c b/sound/isa/gus/gusextreme.c
index da2b2ca6b721..a409a4a29afc 100644
--- a/sound/isa/gus/gusextreme.c
+++ b/sound/isa/gus/gusextreme.c
@@ -177,14 +177,16 @@ static int snd_gusextreme_detect(struct snd_gus_card *gus,
udelay(100);
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 0); /* reset GF1 */
- if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 0) {
+ d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET);
+ if ((d & 0x07) != 0) {
snd_printdd("[0x%lx] check 1 failed - 0x%x\n", gus->gf1.port, d);
return -EIO;
}
udelay(160);
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 1); /* release reset */
udelay(160);
- if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 1) {
+ d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET);
+ if ((d & 0x07) != 1) {
snd_printdd("[0x%lx] check 2 failed - 0x%x\n", gus->gf1.port, d);
return -EIO;
}
diff --git a/sound/isa/gus/gusmax.c b/sound/isa/gus/gusmax.c
index 24b945f1768d..ad118d462142 100644
--- a/sound/isa/gus/gusmax.c
+++ b/sound/isa/gus/gusmax.c
@@ -71,14 +71,16 @@ static int snd_gusmax_detect(struct snd_gus_card *gus)
unsigned char d;
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 0); /* reset GF1 */
- if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 0) {
+ d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET);
+ if ((d & 0x07) != 0) {
snd_printdd("[0x%lx] check 1 failed - 0x%x\n", gus->gf1.port, d);
return -ENODEV;
}
udelay(160);
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 1); /* release reset */
udelay(160);
- if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 1) {
+ d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET);
+ if ((d & 0x07) != 1) {
snd_printdd("[0x%lx] check 2 failed - 0x%x\n", gus->gf1.port, d);
return -ENODEV;
}
@@ -136,20 +138,24 @@ static int snd_gusmax_mixer(struct snd_wss *chip)
/* reassign AUXA to SYNTHESIZER */
strcpy(id1.name, "Aux Playback Switch");
strcpy(id2.name, "Synth Playback Switch");
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0)
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0)
return err;
strcpy(id1.name, "Aux Playback Volume");
strcpy(id2.name, "Synth Playback Volume");
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0)
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0)
return err;
/* reassign AUXB to CD */
strcpy(id1.name, "Aux Playback Switch"); id1.index = 1;
strcpy(id2.name, "CD Playback Switch");
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0)
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0)
return err;
strcpy(id1.name, "Aux Playback Volume");
strcpy(id2.name, "CD Playback Volume");
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0)
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0)
return err;
#if 0
/* reassign Mono Input to MIC */
@@ -209,7 +215,8 @@ static int snd_gusmax_probe(struct device *pdev, unsigned int dev)
xirq = irq[dev];
if (xirq == SNDRV_AUTO_IRQ) {
- if ((xirq = snd_legacy_find_free_irq(possible_irqs)) < 0) {
+ xirq = snd_legacy_find_free_irq(possible_irqs);
+ if (xirq < 0) {
snd_printk(KERN_ERR PFX "unable to find a free IRQ\n");
err = -EBUSY;
goto _err;
@@ -217,7 +224,8 @@ static int snd_gusmax_probe(struct device *pdev, unsigned int dev)
}
xdma1 = dma1[dev];
if (xdma1 == SNDRV_AUTO_DMA) {
- if ((xdma1 = snd_legacy_find_free_dma(possible_dmas)) < 0) {
+ xdma1 = snd_legacy_find_free_dma(possible_dmas);
+ if (xdma1 < 0) {
snd_printk(KERN_ERR PFX "unable to find a free DMA1\n");
err = -EBUSY;
goto _err;
@@ -225,7 +233,8 @@ static int snd_gusmax_probe(struct device *pdev, unsigned int dev)
}
xdma2 = dma2[dev];
if (xdma2 == SNDRV_AUTO_DMA) {
- if ((xdma2 = snd_legacy_find_free_dma(possible_dmas)) < 0) {
+ xdma2 = snd_legacy_find_free_dma(possible_dmas);
+ if (xdma2 < 0) {
snd_printk(KERN_ERR PFX "unable to find a free DMA2\n");
err = -EBUSY;
goto _err;
@@ -260,13 +269,15 @@ static int snd_gusmax_probe(struct device *pdev, unsigned int dev)
if (err < 0)
goto _err;
- if ((err = snd_gusmax_detect(gus)) < 0)
+ err = snd_gusmax_detect(gus);
+ if (err < 0)
goto _err;
maxcard->gus_status_reg = gus->gf1.reg_irqstat;
maxcard->pcm_status_reg = gus->gf1.port + 0x10c + 2;
snd_gusmax_init(dev, card, gus);
- if ((err = snd_gus_initialize(gus)) < 0)
+ err = snd_gus_initialize(gus);
+ if (err < 0)
goto _err;
if (!gus->max_flag) {
@@ -307,7 +318,8 @@ static int snd_gusmax_probe(struct device *pdev, unsigned int dev)
goto _err;
if (pcm_channels[dev] > 0) {
- if ((err = snd_gf1_pcm_new(gus, 1, 1)) < 0)
+ err = snd_gf1_pcm_new(gus, 1, 1);
+ if (err < 0)
goto _err;
}
err = snd_gusmax_mixer(wss);
diff --git a/sound/isa/gus/interwave.c b/sound/isa/gus/interwave.c
index 99581fba4ca8..755de47d0bb6 100644
--- a/sound/isa/gus/interwave.c
+++ b/sound/isa/gus/interwave.c
@@ -204,7 +204,8 @@ static int snd_interwave_detect_stb(struct snd_interwave *iwcard,
port = 0x360;
}
while (port <= 0x380) {
- if ((iwcard->i2c_res = request_region(port, 1, "InterWave (I2C bus)")) != NULL)
+ iwcard->i2c_res = request_region(port, 1, "InterWave (I2C bus)");
+ if (iwcard->i2c_res)
break;
port += 0x10;
}
@@ -217,11 +218,13 @@ static int snd_interwave_detect_stb(struct snd_interwave *iwcard,
}
sprintf(name, "InterWave-%i", card->number);
- if ((err = snd_i2c_bus_create(card, name, NULL, &bus)) < 0)
+ err = snd_i2c_bus_create(card, name, NULL, &bus);
+ if (err < 0)
return err;
bus->private_value = port;
bus->hw_ops.bit = &snd_interwave_i2c_bit_ops;
- if ((err = snd_tea6330t_detect(bus, 0)) < 0)
+ err = snd_tea6330t_detect(bus, 0);
+ if (err < 0)
return err;
*rbus = bus;
return 0;
@@ -241,14 +244,16 @@ static int snd_interwave_detect(struct snd_interwave *iwcard,
int d;
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 0); /* reset GF1 */
- if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 0) {
+ d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET);
+ if ((d & 0x07) != 0) {
snd_printdd("[0x%lx] check 1 failed - 0x%x\n", gus->gf1.port, d);
return -ENODEV;
}
udelay(160);
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 1); /* release reset */
udelay(160);
- if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 1) {
+ d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET);
+ if ((d & 0x07) != 1) {
snd_printdd("[0x%lx] check 2 failed - 0x%x\n", gus->gf1.port, d);
return -ENODEV;
}
@@ -493,16 +498,20 @@ static int snd_interwave_mixer(struct snd_wss *chip)
#if 0
/* remove mono microphone controls */
strcpy(id1.name, "Mic Playback Switch");
- if ((err = snd_ctl_remove_id(card, &id1)) < 0)
+ err = snd_ctl_remove_id(card, &id1);
+ if (err < 0)
return err;
strcpy(id1.name, "Mic Playback Volume");
- if ((err = snd_ctl_remove_id(card, &id1)) < 0)
+ err = snd_ctl_remove_id(card, &id1);
+ if (err < 0)
return err;
#endif
/* add new master and mic controls */
- for (idx = 0; idx < ARRAY_SIZE(snd_interwave_controls); idx++)
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_interwave_controls[idx], chip))) < 0)
+ for (idx = 0; idx < ARRAY_SIZE(snd_interwave_controls); idx++) {
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_interwave_controls[idx], chip));
+ if (err < 0)
return err;
+ }
snd_wss_out(chip, CS4231_LINE_LEFT_OUTPUT, 0x9f);
snd_wss_out(chip, CS4231_LINE_RIGHT_OUTPUT, 0x9f);
snd_wss_out(chip, CS4231_LEFT_MIC_INPUT, 0x9f);
@@ -510,20 +519,24 @@ static int snd_interwave_mixer(struct snd_wss *chip)
/* reassign AUXA to SYNTHESIZER */
strcpy(id1.name, "Aux Playback Switch");
strcpy(id2.name, "Synth Playback Switch");
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0)
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0)
return err;
strcpy(id1.name, "Aux Playback Volume");
strcpy(id2.name, "Synth Playback Volume");
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0)
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0)
return err;
/* reassign AUXB to CD */
strcpy(id1.name, "Aux Playback Switch"); id1.index = 1;
strcpy(id2.name, "CD Playback Switch");
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0)
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0)
return err;
strcpy(id1.name, "Aux Playback Volume");
strcpy(id2.name, "CD Playback Volume");
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0)
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0)
return err;
return 0;
}
@@ -633,18 +646,20 @@ static int snd_interwave_probe(struct snd_card *card, int dev)
xdma1 = dma1[dev];
xdma2 = dma2[dev];
- if ((err = snd_gus_create(card,
- port[dev],
- -xirq, xdma1, xdma2,
- 0, 32,
- pcm_channels[dev], effect[dev], &gus)) < 0)
+ err = snd_gus_create(card,
+ port[dev],
+ -xirq, xdma1, xdma2,
+ 0, 32,
+ pcm_channels[dev], effect[dev], &gus);
+ if (err < 0)
return err;
- if ((err = snd_interwave_detect(iwcard, gus, dev
+ err = snd_interwave_detect(iwcard, gus, dev
#ifdef SNDRV_STB
- , &i2c_bus
+ , &i2c_bus
#endif
- )) < 0)
+ );
+ if (err < 0)
return err;
iwcard->gus_status_reg = gus->gf1.reg_irqstat;
@@ -652,7 +667,8 @@ static int snd_interwave_probe(struct snd_card *card, int dev)
snd_interwave_init(dev, gus);
snd_interwave_detect_memory(gus);
- if ((err = snd_gus_initialize(gus)) < 0)
+ err = snd_gus_initialize(gus);
+ if (err < 0)
return err;
if (request_irq(xirq, snd_interwave_interrupt, 0,
@@ -708,19 +724,23 @@ static int snd_interwave_probe(struct snd_card *card, int dev)
strcpy(id1.name, "Master Playback Switch");
strcpy(id2.name, id1.name);
id2.index = 1;
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0)
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0)
return err;
strcpy(id1.name, "Master Playback Volume");
strcpy(id2.name, id1.name);
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0)
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0)
return err;
- if ((err = snd_tea6330t_update_mixer(card, i2c_bus, 0, 1)) < 0)
+ err = snd_tea6330t_update_mixer(card, i2c_bus, 0, 1);
+ if (err < 0)
return err;
}
#endif
gus->uart_enable = midi[dev];
- if ((err = snd_gf1_rawmidi_new(gus, 0)) < 0)
+ err = snd_gf1_rawmidi_new(gus, 0);
+ if (err < 0)
return err;
#ifndef SNDRV_STB
@@ -758,7 +778,8 @@ static int snd_interwave_isa_probe1(int dev, struct device *devptr)
if (err < 0)
return err;
- if ((err = snd_interwave_probe(card, dev)) < 0) {
+ err = snd_interwave_probe(card, dev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -786,19 +807,22 @@ static int snd_interwave_isa_probe(struct device *pdev,
static const int possible_dmas[] = {0, 1, 3, 5, 6, 7, -1};
if (irq[dev] == SNDRV_AUTO_IRQ) {
- if ((irq[dev] = snd_legacy_find_free_irq(possible_irqs)) < 0) {
+ irq[dev] = snd_legacy_find_free_irq(possible_irqs);
+ if (irq[dev] < 0) {
snd_printk(KERN_ERR PFX "unable to find a free IRQ\n");
return -EBUSY;
}
}
if (dma1[dev] == SNDRV_AUTO_DMA) {
- if ((dma1[dev] = snd_legacy_find_free_dma(possible_dmas)) < 0) {
+ dma1[dev] = snd_legacy_find_free_dma(possible_dmas);
+ if (dma1[dev] < 0) {
snd_printk(KERN_ERR PFX "unable to find a free DMA1\n");
return -EBUSY;
}
}
if (dma2[dev] == SNDRV_AUTO_DMA) {
- if ((dma2[dev] = snd_legacy_find_free_dma(possible_dmas)) < 0) {
+ dma2[dev] = snd_legacy_find_free_dma(possible_dmas);
+ if (dma2[dev] < 0) {
snd_printk(KERN_ERR PFX "unable to find a free DMA2\n");
return -EBUSY;
}
@@ -853,11 +877,13 @@ static int snd_interwave_pnp_detect(struct pnp_card_link *pcard,
if (res < 0)
return res;
- if ((res = snd_interwave_pnp(dev, card->private_data, pcard, pid)) < 0) {
+ res = snd_interwave_pnp(dev, card->private_data, pcard, pid);
+ if (res < 0) {
snd_card_free(card);
return res;
}
- if ((res = snd_interwave_probe(card, dev)) < 0) {
+ res = snd_interwave_probe(card, dev);
+ if (res < 0) {
snd_card_free(card);
return res;
}
diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c
index 9bde11d1cfe8..6f42f869928c 100644
--- a/sound/isa/opl3sa2.c
+++ b/sound/isa/opl3sa2.c
@@ -208,7 +208,8 @@ static int snd_opl3sa2_detect(struct snd_card *card)
char str[2];
port = chip->port;
- if ((chip->res_port = request_region(port, 2, "OPL3-SA control")) == NULL) {
+ chip->res_port = request_region(port, 2, "OPL3-SA control");
+ if (!chip->res_port) {
snd_printk(KERN_ERR PFX "can't grab port 0x%lx\n", port);
return -EBUSY;
}
@@ -239,14 +240,16 @@ static int snd_opl3sa2_detect(struct snd_card *card)
str[1] = 0;
strcat(card->shortname, str);
snd_opl3sa2_write(chip, OPL3SA2_MISC, tmp ^ 7);
- if ((tmp1 = snd_opl3sa2_read(chip, OPL3SA2_MISC)) != tmp) {
+ tmp1 = snd_opl3sa2_read(chip, OPL3SA2_MISC);
+ if (tmp1 != tmp) {
snd_printd("OPL3-SA [0x%lx] detect (1) = 0x%x (0x%x)\n", port, tmp, tmp1);
return -ENODEV;
}
/* try if the MIC register is accessible */
tmp = snd_opl3sa2_read(chip, OPL3SA2_MIC);
snd_opl3sa2_write(chip, OPL3SA2_MIC, 0x8a);
- if (((tmp1 = snd_opl3sa2_read(chip, OPL3SA2_MIC)) & 0x9f) != 0x8a) {
+ tmp1 = snd_opl3sa2_read(chip, OPL3SA2_MIC);
+ if ((tmp1 & 0x9f) != 0x8a) {
snd_printd("OPL3-SA [0x%lx] detect (2) = 0x%x (0x%x)\n", port, tmp, tmp1);
return -ENODEV;
}
@@ -489,32 +492,38 @@ static int snd_opl3sa2_mixer(struct snd_card *card)
/* reassign AUX0 to CD */
strcpy(id1.name, "Aux Playback Switch");
strcpy(id2.name, "CD Playback Switch");
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) {
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0) {
snd_printk(KERN_ERR "Cannot rename opl3sa2 control\n");
return err;
}
strcpy(id1.name, "Aux Playback Volume");
strcpy(id2.name, "CD Playback Volume");
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) {
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0) {
snd_printk(KERN_ERR "Cannot rename opl3sa2 control\n");
return err;
}
/* reassign AUX1 to FM */
strcpy(id1.name, "Aux Playback Switch"); id1.index = 1;
strcpy(id2.name, "FM Playback Switch");
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) {
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0) {
snd_printk(KERN_ERR "Cannot rename opl3sa2 control\n");
return err;
}
strcpy(id1.name, "Aux Playback Volume");
strcpy(id2.name, "FM Playback Volume");
- if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) {
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0) {
snd_printk(KERN_ERR "Cannot rename opl3sa2 control\n");
return err;
}
/* add OPL3SA2 controls */
for (idx = 0; idx < ARRAY_SIZE(snd_opl3sa2_controls); idx++) {
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_opl3sa2_controls[idx], chip))) < 0)
+ kctl = snd_ctl_new1(&snd_opl3sa2_controls[idx], chip);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
switch (idx) {
case 0: chip->master_switch = kctl; kctl->private_free = snd_opl3sa2_master_free; break;
@@ -522,9 +531,11 @@ static int snd_opl3sa2_mixer(struct snd_card *card)
}
}
if (chip->version > 2) {
- for (idx = 0; idx < ARRAY_SIZE(snd_opl3sa2_tone_controls); idx++)
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_opl3sa2_tone_controls[idx], chip))) < 0)
+ for (idx = 0; idx < ARRAY_SIZE(snd_opl3sa2_tone_controls); idx++) {
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_opl3sa2_tone_controls[idx], chip));
+ if (err < 0)
return err;
+ }
}
return 0;
}
@@ -677,20 +688,24 @@ static int snd_opl3sa2_probe(struct snd_card *card, int dev)
if (err < 0)
return err;
if (fm_port[dev] >= 0x340 && fm_port[dev] < 0x400) {
- if ((err = snd_opl3_create(card, fm_port[dev],
- fm_port[dev] + 2,
- OPL3_HW_OPL3, 0, &opl3)) < 0)
+ err = snd_opl3_create(card, fm_port[dev],
+ fm_port[dev] + 2,
+ OPL3_HW_OPL3, 0, &opl3);
+ if (err < 0)
return err;
- if ((err = snd_opl3_timer_new(opl3, 1, 2)) < 0)
+ err = snd_opl3_timer_new(opl3, 1, 2);
+ if (err < 0)
return err;
- if ((err = snd_opl3_hwdep_new(opl3, 0, 1, &chip->synth)) < 0)
+ err = snd_opl3_hwdep_new(opl3, 0, 1, &chip->synth);
+ if (err < 0)
return err;
}
if (midi_port[dev] >= 0x300 && midi_port[dev] < 0x340) {
- if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_OPL3SA2,
- midi_port[dev],
- MPU401_INFO_IRQ_HOOK, -1,
- &chip->rmidi)) < 0)
+ err = snd_mpu401_uart_new(card, 0, MPU401_HW_OPL3SA2,
+ midi_port[dev],
+ MPU401_INFO_IRQ_HOOK, -1,
+ &chip->rmidi);
+ if (err < 0)
return err;
}
sprintf(card->longname, "%s at 0x%lx, irq %d, dma %d",
@@ -721,11 +736,13 @@ static int snd_opl3sa2_pnp_detect(struct pnp_dev *pdev,
err = snd_opl3sa2_card_new(&pdev->dev, dev, &card);
if (err < 0)
return err;
- if ((err = snd_opl3sa2_pnp(dev, card->private_data, pdev)) < 0) {
+ err = snd_opl3sa2_pnp(dev, card->private_data, pdev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_opl3sa2_probe(card, dev)) < 0) {
+ err = snd_opl3sa2_probe(card, dev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -785,11 +802,13 @@ static int snd_opl3sa2_pnp_cdetect(struct pnp_card_link *pcard,
err = snd_opl3sa2_card_new(&pdev->dev, dev, &card);
if (err < 0)
return err;
- if ((err = snd_opl3sa2_pnp(dev, card->private_data, pdev)) < 0) {
+ err = snd_opl3sa2_pnp(dev, card->private_data, pdev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_opl3sa2_probe(card, dev)) < 0) {
+ err = snd_opl3sa2_probe(card, dev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -865,7 +884,8 @@ static int snd_opl3sa2_isa_probe(struct device *pdev,
err = snd_opl3sa2_card_new(pdev, dev, &card);
if (err < 0)
return err;
- if ((err = snd_opl3sa2_probe(card, dev)) < 0) {
+ err = snd_opl3sa2_probe(card, dev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c
index a510b201143c..e1fb7567fdcc 100644
--- a/sound/isa/opti9xx/miro.c
+++ b/sound/isa/opti9xx/miro.c
@@ -722,35 +722,43 @@ static int snd_miro_mixer(struct snd_card *card,
}
for (idx = 0; idx < ARRAY_SIZE(snd_miro_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_controls[idx], miro))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_controls[idx], miro));
+ if (err < 0)
return err;
}
if ((miro->aci->aci_product == 'A') ||
(miro->aci->aci_product == 'B')) {
/* PCM1/PCM12 with power-amp and Line 2 */
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_line_control[0], miro))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_line_control[0], miro));
+ if (err < 0)
return err;
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_amp_control[0], miro))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_amp_control[0], miro));
+ if (err < 0)
return err;
}
if ((miro->aci->aci_product == 'B') ||
(miro->aci->aci_product == 'C')) {
/* PCM12/PCM20 with mic-preamp */
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_preamp_control[0], miro))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_preamp_control[0], miro));
+ if (err < 0)
return err;
- if (miro->aci->aci_version >= 176)
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_capture_control[0], miro))) < 0)
+ if (miro->aci->aci_version >= 176) {
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_capture_control[0], miro));
+ if (err < 0)
return err;
+ }
}
if (miro->aci->aci_product == 'C') {
/* PCM20 with radio and 7 band equalizer */
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_radio_control[0], miro))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_radio_control[0], miro));
+ if (err < 0)
return err;
for (idx = 0; idx < ARRAY_SIZE(snd_miro_eq_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_eq_controls[idx], miro))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_eq_controls[idx], miro));
+ if (err < 0)
return err;
}
}
@@ -1178,7 +1186,8 @@ static int snd_card_miro_detect(struct snd_card *card,
for (i = OPTi9XX_HW_82C929; i <= OPTi9XX_HW_82C924; i++) {
- if ((err = snd_miro_init(chip, i)) < 0)
+ err = snd_miro_init(chip, i);
+ if (err < 0)
return err;
err = snd_miro_opti_check(chip);
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index 08e61d90057b..4bd1dc6707fc 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -970,32 +970,37 @@ static int snd_opti9xx_isa_probe(struct device *devptr,
#endif /* CS4231 || OPTi93X */
if (mpu_port == SNDRV_AUTO_PORT) {
- if ((mpu_port = snd_legacy_find_free_ioport(possible_mpu_ports, 2)) < 0) {
+ mpu_port = snd_legacy_find_free_ioport(possible_mpu_ports, 2);
+ if (mpu_port < 0) {
snd_printk(KERN_ERR "unable to find a free MPU401 port\n");
return -EBUSY;
}
}
if (irq == SNDRV_AUTO_IRQ) {
- if ((irq = snd_legacy_find_free_irq(possible_irqs)) < 0) {
+ irq = snd_legacy_find_free_irq(possible_irqs);
+ if (irq < 0) {
snd_printk(KERN_ERR "unable to find a free IRQ\n");
return -EBUSY;
}
}
if (mpu_irq == SNDRV_AUTO_IRQ) {
- if ((mpu_irq = snd_legacy_find_free_irq(possible_mpu_irqs)) < 0) {
+ mpu_irq = snd_legacy_find_free_irq(possible_mpu_irqs);
+ if (mpu_irq < 0) {
snd_printk(KERN_ERR "unable to find a free MPU401 IRQ\n");
return -EBUSY;
}
}
if (dma1 == SNDRV_AUTO_DMA) {
- if ((dma1 = snd_legacy_find_free_dma(possible_dma1s)) < 0) {
+ dma1 = snd_legacy_find_free_dma(possible_dma1s);
+ if (dma1 < 0) {
snd_printk(KERN_ERR "unable to find a free DMA1\n");
return -EBUSY;
}
}
#if defined(CS4231) || defined(OPTi93X)
if (dma2 == SNDRV_AUTO_DMA) {
- if ((dma2 = snd_legacy_find_free_dma(possible_dma2s[dma1 % 4])) < 0) {
+ dma2 = snd_legacy_find_free_dma(possible_dma2s[dma1 % 4]);
+ if (dma2 < 0) {
snd_printk(KERN_ERR "unable to find a free DMA2\n");
return -EBUSY;
}
@@ -1006,11 +1011,13 @@ static int snd_opti9xx_isa_probe(struct device *devptr,
if (error < 0)
return error;
- if ((error = snd_card_opti9xx_detect(card, card->private_data)) < 0) {
+ error = snd_card_opti9xx_detect(card, card->private_data);
+ if (error < 0) {
snd_card_free(card);
return error;
}
- if ((error = snd_opti9xx_probe(card)) < 0) {
+ error = snd_opti9xx_probe(card);
+ if (error < 0) {
snd_card_free(card);
return error;
}
@@ -1111,7 +1118,8 @@ static int snd_opti9xx_pnp_probe(struct pnp_card_link *pcard,
return -ENODEV;
}
- if ((error = snd_opti9xx_init(chip, hw))) {
+ error = snd_opti9xx_init(chip, hw);
+ if (error) {
snd_card_free(card);
return error;
}
@@ -1121,7 +1129,8 @@ static int snd_opti9xx_pnp_probe(struct pnp_card_link *pcard,
snd_card_free(card);
return error;
}
- if ((error = snd_opti9xx_probe(card)) < 0) {
+ error = snd_opti9xx_probe(card);
+ if (error < 0) {
snd_card_free(card);
return error;
}
diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
index 1c90421a88dc..5e4187940265 100644
--- a/sound/isa/sb/emu8000.c
+++ b/sound/isa/sb/emu8000.c
@@ -1020,6 +1020,7 @@ static const struct snd_kcontrol_new *mixer_defs[EMU8000_NUM_CONTROLS] = {
static int
snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu)
{
+ struct snd_kcontrol *kctl;
int i, err = 0;
if (snd_BUG_ON(!emu || !card))
@@ -1029,10 +1030,11 @@ snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu)
memset(emu->controls, 0, sizeof(emu->controls));
for (i = 0; i < EMU8000_NUM_CONTROLS; i++) {
- if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) {
- emu->controls[i] = NULL;
+ kctl = snd_ctl_new1(mixer_defs[i], emu);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
goto __error;
- }
+ emu->controls[i] = kctl;
}
return 0;
@@ -1095,9 +1097,10 @@ snd_emu8000_new(struct snd_card *card, int index, long port, int seq_ports,
hw->port1 = port;
hw->port2 = port + 0x400;
hw->port3 = port + 0x800;
- if (!(hw->res_port1 = request_region(hw->port1, 4, "Emu8000-1")) ||
- !(hw->res_port2 = request_region(hw->port2, 4, "Emu8000-2")) ||
- !(hw->res_port3 = request_region(hw->port3, 4, "Emu8000-3"))) {
+ hw->res_port1 = request_region(hw->port1, 4, "Emu8000-1");
+ hw->res_port2 = request_region(hw->port2, 4, "Emu8000-2");
+ hw->res_port3 = request_region(hw->port3, 4, "Emu8000-3");
+ if (!hw->res_port1 || !hw->res_port2 || !hw->res_port3) {
snd_printk(KERN_ERR "sbawe: can't grab ports 0x%lx, 0x%lx, 0x%lx\n", hw->port1, hw->port2, hw->port3);
snd_emu8000_free(hw);
return -EBUSY;
@@ -1118,12 +1121,14 @@ snd_emu8000_new(struct snd_card *card, int index, long port, int seq_ports,
}
snd_emu8000_init_hw(hw);
- if ((err = snd_emu8000_create_mixer(card, hw)) < 0) {
+ err = snd_emu8000_create_mixer(card, hw);
+ if (err < 0) {
snd_emu8000_free(hw);
return err;
}
- if ((err = snd_device_new(card, SNDRV_DEV_CODEC, hw, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_CODEC, hw, &ops);
+ if (err < 0) {
snd_emu8000_free(hw);
return err;
}
diff --git a/sound/isa/sb/emu8000_patch.c b/sound/isa/sb/emu8000_patch.c
index 0cb94cafb4c9..8c1e7f2bfc34 100644
--- a/sound/isa/sb/emu8000_patch.c
+++ b/sound/isa/sb/emu8000_patch.c
@@ -191,7 +191,8 @@ snd_emu8000_sample_new(struct snd_emux *rec, struct snd_sf_sample *sp,
sp->v.truesize = truesize * 2; /* in bytes */
snd_emux_terminate_all(emu->emu);
- if ((rc = snd_emu8000_open_dma(emu, EMU8000_RAM_WRITE)) != 0)
+ rc = snd_emu8000_open_dma(emu, EMU8000_RAM_WRITE);
+ if (rc)
return rc;
/* Set the address to start writing at */
diff --git a/sound/isa/sb/emu8000_pcm.c b/sound/isa/sb/emu8000_pcm.c
index 8e8257c574b0..f8d90a1e989b 100644
--- a/sound/isa/sb/emu8000_pcm.c
+++ b/sound/isa/sb/emu8000_pcm.c
@@ -626,7 +626,8 @@ static int emu8k_pcm_prepare(struct snd_pcm_substream *subs)
int err, i, ch;
snd_emux_terminate_all(rec->emu->emu);
- if ((err = emu8k_open_dram_for_pcm(rec->emu, rec->voices)) != 0)
+ err = emu8k_open_dram_for_pcm(rec->emu, rec->voices);
+ if (err)
return err;
rec->dram_opened = 1;
@@ -682,7 +683,8 @@ int snd_emu8000_pcm_new(struct snd_card *card, struct snd_emu8000 *emu, int inde
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(card, "Emu8000 PCM", index, 1, 0, &pcm)) < 0)
+ err = snd_pcm_new(card, "Emu8000 PCM", index, 1, 0, &pcm);
+ if (err < 0)
return err;
pcm->private_data = emu;
pcm->private_free = snd_emu8000_pcm_free;
diff --git a/sound/isa/sb/sb16.c b/sound/isa/sb/sb16.c
index 63ef960abd25..d0f797c02841 100644
--- a/sound/isa/sb/sb16.c
+++ b/sound/isa/sb/sb16.c
@@ -332,14 +332,9 @@ static int snd_sb16_probe(struct snd_card *card, int dev)
xdma8 = dma8[dev];
xdma16 = dma16[dev];
- if ((err = snd_sbdsp_create(card,
- port[dev],
- xirq,
- snd_sb16dsp_interrupt,
- xdma8,
- xdma16,
- SB_HW_AUTO,
- &chip)) < 0)
+ err = snd_sbdsp_create(card, port[dev], xirq, snd_sb16dsp_interrupt,
+ xdma8, xdma16, SB_HW_AUTO, &chip);
+ if (err < 0)
return err;
acard->chip = chip;
@@ -348,10 +343,14 @@ static int snd_sb16_probe(struct snd_card *card, int dev)
return -ENODEV;
}
chip->mpu_port = mpu_port[dev];
- if (! is_isapnp_selected(dev) && (err = snd_sb16dsp_configure(chip)) < 0)
- return err;
+ if (!is_isapnp_selected(dev)) {
+ err = snd_sb16dsp_configure(chip);
+ if (err < 0)
+ return err;
+ }
- if ((err = snd_sb16dsp_pcm(chip, 0)) < 0)
+ err = snd_sb16dsp_pcm(chip, 0);
+ if (err < 0)
return err;
strcpy(card->driver,
@@ -371,10 +370,11 @@ static int snd_sb16_probe(struct snd_card *card, int dev)
xdma8 >= 0 ? "&" : "", xdma16);
if (chip->mpu_port > 0 && chip->mpu_port != SNDRV_AUTO_PORT) {
- if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_SB,
- chip->mpu_port,
- MPU401_INFO_IRQ_HOOK, -1,
- &chip->rmidi)) < 0)
+ err = snd_mpu401_uart_new(card, 0, MPU401_HW_SB,
+ chip->mpu_port,
+ MPU401_INFO_IRQ_HOOK, -1,
+ &chip->rmidi);
+ if (err < 0)
return err;
chip->rmidi_callback = snd_mpu401_uart_interrupt;
}
@@ -397,12 +397,14 @@ static int snd_sb16_probe(struct snd_card *card, int dev)
#else
int seqdev = 1;
#endif
- if ((err = snd_opl3_hwdep_new(opl3, 0, seqdev, &synth)) < 0)
+ err = snd_opl3_hwdep_new(opl3, 0, seqdev, &synth);
+ if (err < 0)
return err;
}
}
- if ((err = snd_sbmixer_new(chip)) < 0)
+ err = snd_sbmixer_new(chip);
+ if (err < 0)
return err;
#ifdef CONFIG_SND_SB16_CSP
@@ -419,8 +421,9 @@ static int snd_sb16_probe(struct snd_card *card, int dev)
#endif
#ifdef SNDRV_SBAWE_EMU8000
if (awe_port[dev] > 0) {
- if ((err = snd_emu8000_new(card, 1, awe_port[dev],
- seq_ports[dev], NULL)) < 0) {
+ err = snd_emu8000_new(card, 1, awe_port[dev],
+ seq_ports[dev], NULL);
+ if (err < 0) {
snd_printk(KERN_ERR PFX "fatal error - EMU-8000 synthesizer not detected at 0x%lx\n", awe_port[dev]);
return err;
@@ -435,7 +438,8 @@ static int snd_sb16_probe(struct snd_card *card, int dev)
(mic_agc[dev] ? 0x00 : 0x01));
spin_unlock_irqrestore(&chip->mixer_lock, flags);
- if ((err = snd_card_register(card)) < 0)
+ err = snd_card_register(card);
+ if (err < 0)
return err;
return 0;
@@ -484,7 +488,8 @@ static int snd_sb16_isa_probe1(int dev, struct device *pdev)
awe_port[dev] = port[dev] + 0x400;
#endif
- if ((err = snd_sb16_probe(card, dev)) < 0) {
+ err = snd_sb16_probe(card, dev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -506,19 +511,22 @@ static int snd_sb16_isa_probe(struct device *pdev, unsigned int dev)
static const int possible_dmas16[] = {5, 6, 7, -1};
if (irq[dev] == SNDRV_AUTO_IRQ) {
- if ((irq[dev] = snd_legacy_find_free_irq(possible_irqs)) < 0) {
+ irq[dev] = snd_legacy_find_free_irq(possible_irqs);
+ if (irq[dev] < 0) {
snd_printk(KERN_ERR PFX "unable to find a free IRQ\n");
return -EBUSY;
}
}
if (dma8[dev] == SNDRV_AUTO_DMA) {
- if ((dma8[dev] = snd_legacy_find_free_dma(possible_dmas8)) < 0) {
+ dma8[dev] = snd_legacy_find_free_dma(possible_dmas8);
+ if (dma8[dev] < 0) {
snd_printk(KERN_ERR PFX "unable to find a free 8-bit DMA\n");
return -EBUSY;
}
}
if (dma16[dev] == SNDRV_AUTO_DMA) {
- if ((dma16[dev] = snd_legacy_find_free_dma(possible_dmas16)) < 0) {
+ dma16[dev] = snd_legacy_find_free_dma(possible_dmas16);
+ if (dma16[dev] < 0) {
snd_printk(KERN_ERR PFX "unable to find a free 16-bit DMA\n");
return -EBUSY;
}
@@ -591,8 +599,13 @@ static int snd_sb16_pnp_detect(struct pnp_card_link *pcard,
res = snd_sb16_card_new(&pcard->card->dev, dev, &card);
if (res < 0)
return res;
- if ((res = snd_card_sb16_pnp(dev, card->private_data, pcard, pid)) < 0 ||
- (res = snd_sb16_probe(card, dev)) < 0) {
+ res = snd_card_sb16_pnp(dev, card->private_data, pcard, pid);
+ if (res < 0) {
+ snd_card_free(card);
+ return res;
+ }
+ res = snd_sb16_probe(card, dev);
+ if (res < 0) {
snd_card_free(card);
return res;
}
diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
index 4789345a8fdd..7ad8c5f7b664 100644
--- a/sound/isa/sb/sb16_csp.c
+++ b/sound/isa/sb/sb16_csp.c
@@ -112,10 +112,12 @@ int snd_sb_csp_new(struct snd_sb *chip, int device, struct snd_hwdep ** rhwdep)
if (csp_detect(chip, &version))
return -ENODEV;
- if ((err = snd_hwdep_new(chip->card, "SB16-CSP", device, &hw)) < 0)
+ err = snd_hwdep_new(chip->card, "SB16-CSP", device, &hw);
+ if (err < 0)
return err;
- if ((p = kzalloc(sizeof(*p), GFP_KERNEL)) == NULL) {
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
snd_device_free(chip->card, hw);
return -ENOMEM;
}
@@ -814,6 +816,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel
mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
+ spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
spin_lock(&p->chip->reg_lock);
set_mode_register(p->chip, 0xc0); /* c0 = STOP */
@@ -853,6 +856,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel
spin_unlock(&p->chip->reg_lock);
/* restore PCM volume */
+ spin_lock_irqsave(&p->chip->mixer_lock, flags);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
@@ -878,6 +882,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p)
mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
+ spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
spin_lock(&p->chip->reg_lock);
if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
@@ -892,6 +897,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p)
spin_unlock(&p->chip->reg_lock);
/* restore PCM volume */
+ spin_lock_irqsave(&p->chip->mixer_lock, flags);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
@@ -1034,6 +1040,7 @@ static const struct snd_kcontrol_new snd_sb_qsound_space = {
static int snd_sb_qsound_build(struct snd_sb_csp * p)
{
struct snd_card *card;
+ struct snd_kcontrol *kctl;
int err;
if (snd_BUG_ON(!p))
@@ -1045,14 +1052,16 @@ static int snd_sb_qsound_build(struct snd_sb_csp * p)
spin_lock_init(&p->q_lock);
- if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) {
- p->qsound_switch = NULL;
+ kctl = snd_ctl_new1(&snd_sb_qsound_switch, p);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
goto __error;
- }
- if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) {
- p->qsound_space = NULL;
+ p->qsound_switch = kctl;
+ kctl = snd_ctl_new1(&snd_sb_qsound_space, p);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
goto __error;
- }
+ p->qsound_space = kctl;
return 0;
@@ -1072,10 +1081,14 @@ static void snd_sb_qsound_destroy(struct snd_sb_csp * p)
card = p->chip->card;
down_write(&card->controls_rwsem);
- if (p->qsound_switch)
+ if (p->qsound_switch) {
snd_ctl_remove(card, p->qsound_switch);
- if (p->qsound_space)
+ p->qsound_switch = NULL;
+ }
+ if (p->qsound_space) {
snd_ctl_remove(card, p->qsound_space);
+ p->qsound_space = NULL;
+ }
up_write(&card->controls_rwsem);
/* cancel pending transfer of QSound parameters */
diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c
index aa4870531023..a9b87e159b2d 100644
--- a/sound/isa/sb/sb16_main.c
+++ b/sound/isa/sb/sb16_main.c
@@ -703,7 +703,8 @@ static int snd_sb16_dma_control_put(struct snd_kcontrol *kcontrol, struct snd_ct
unsigned char nval, oval;
int change;
- if ((nval = ucontrol->value.enumerated.item[0]) > 2)
+ nval = ucontrol->value.enumerated.item[0];
+ if (nval > 2)
return -EINVAL;
spin_lock_irqsave(&chip->reg_lock, flags);
oval = snd_sb16_get_dma_mode(chip);
@@ -836,7 +837,8 @@ int snd_sb16dsp_pcm(struct snd_sb *chip, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(card, "SB16 DSP", device, 1, 1, &pcm)) < 0)
+ err = snd_pcm_new(card, "SB16 DSP", device, 1, 1, &pcm);
+ if (err < 0)
return err;
sprintf(pcm->name, "DSP v%i.%i", chip->version >> 8, chip->version & 0xff);
pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX;
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
index ed3a87ebe3f4..b08e6e7690c9 100644
--- a/sound/isa/sb/sb8.c
+++ b/sound/isa/sb/sb8.c
@@ -101,12 +101,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
if (port[dev] != SNDRV_AUTO_PORT) {
- if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
- snd_sb8_interrupt,
- dma8[dev],
- -1,
- SB_HW_AUTO,
- &chip)) < 0)
+ err = snd_sbdsp_create(card, port[dev], irq[dev],
+ snd_sb8_interrupt, dma8[dev],
+ -1, SB_HW_AUTO, &chip);
+ if (err < 0)
goto _err;
} else {
/* auto-probe legacy ports */
@@ -145,32 +143,35 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
goto _err;
}
- if ((err = snd_sb8dsp_pcm(chip, 0)) < 0)
+ err = snd_sb8dsp_pcm(chip, 0);
+ if (err < 0)
goto _err;
- if ((err = snd_sbmixer_new(chip)) < 0)
+ err = snd_sbmixer_new(chip);
+ if (err < 0)
goto _err;
if (chip->hardware == SB_HW_10 || chip->hardware == SB_HW_20) {
- if ((err = snd_opl3_create(card, chip->port + 8, 0,
- OPL3_HW_AUTO, 1,
- &opl3)) < 0) {
+ err = snd_opl3_create(card, chip->port + 8, 0,
+ OPL3_HW_AUTO, 1, &opl3);
+ if (err < 0)
snd_printk(KERN_WARNING "sb8: no OPL device at 0x%lx\n", chip->port + 8);
- }
} else {
- if ((err = snd_opl3_create(card, chip->port, chip->port + 2,
- OPL3_HW_AUTO, 1,
- &opl3)) < 0) {
+ err = snd_opl3_create(card, chip->port, chip->port + 2,
+ OPL3_HW_AUTO, 1, &opl3);
+ if (err < 0) {
snd_printk(KERN_WARNING "sb8: no OPL device at 0x%lx-0x%lx\n",
chip->port, chip->port + 2);
}
}
if (err >= 0) {
- if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0)
+ err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
+ if (err < 0)
goto _err;
}
- if ((err = snd_sb8dsp_midi(chip, 0)) < 0)
+ err = snd_sb8dsp_midi(chip, 0);
+ if (err < 0)
goto _err;
strcpy(card->driver, chip->hardware == SB_HW_PRO ? "SB Pro" : "SB8");
@@ -180,7 +181,8 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
chip->port,
irq[dev], dma8[dev]);
- if ((err = snd_card_register(card)) < 0)
+ err = snd_card_register(card);
+ if (err < 0)
goto _err;
dev_set_drvdata(pdev, card);
diff --git a/sound/isa/sb/sb8_main.c b/sound/isa/sb/sb8_main.c
index 8d01692c4f2a..2ed176a5a574 100644
--- a/sound/isa/sb/sb8_main.c
+++ b/sound/isa/sb/sb8_main.c
@@ -567,7 +567,8 @@ int snd_sb8dsp_pcm(struct snd_sb *chip, int device)
int err;
size_t max_prealloc = 64 * 1024;
- if ((err = snd_pcm_new(card, "SB8 DSP", device, 1, 1, &pcm)) < 0)
+ err = snd_pcm_new(card, "SB8 DSP", device, 1, 1, &pcm);
+ if (err < 0)
return err;
sprintf(pcm->name, "DSP v%i.%i", chip->version >> 8, chip->version & 0xff);
pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX;
diff --git a/sound/isa/sb/sb8_midi.c b/sound/isa/sb/sb8_midi.c
index 8c01460539ed..618366d5d984 100644
--- a/sound/isa/sb/sb8_midi.c
+++ b/sound/isa/sb/sb8_midi.c
@@ -251,7 +251,8 @@ int snd_sb8dsp_midi(struct snd_sb *chip, int device)
struct snd_rawmidi *rmidi;
int err;
- if ((err = snd_rawmidi_new(chip->card, "SB8 MIDI", device, 1, 1, &rmidi)) < 0)
+ err = snd_rawmidi_new(chip->card, "SB8 MIDI", device, 1, 1, &rmidi);
+ if (err < 0)
return err;
strcpy(rmidi->name, "SB8 MIDI");
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_sb8dsp_midi_output);
diff --git a/sound/isa/sb/sb_common.c b/sound/isa/sb/sb_common.c
index 61ea4078aa95..57121218ed24 100644
--- a/sound/isa/sb/sb_common.c
+++ b/sound/isa/sb/sb_common.c
@@ -238,7 +238,8 @@ int snd_sbdsp_create(struct snd_card *card,
if (hardware == SB_HW_ALS4000)
goto __skip_allocation;
- if ((chip->res_port = request_region(port, 16, "SoundBlaster")) == NULL) {
+ chip->res_port = request_region(port, 16, "SoundBlaster");
+ if (!chip->res_port) {
snd_printk(KERN_ERR "sb: can't grab port 0x%lx\n", port);
snd_sbdsp_free(chip);
return -EBUSY;
@@ -267,11 +268,13 @@ int snd_sbdsp_create(struct snd_card *card,
__skip_allocation:
chip->card = card;
chip->hardware = hardware;
- if ((err = snd_sbdsp_probe(chip)) < 0) {
+ err = snd_sbdsp_probe(chip);
+ if (err < 0) {
snd_sbdsp_free(chip);
return err;
}
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_sbdsp_free(chip);
return err;
}
diff --git a/sound/isa/sb/sb_mixer.c b/sound/isa/sb/sb_mixer.c
index 5de5506e7e60..fffd681e5bf7 100644
--- a/sound/isa/sb/sb_mixer.c
+++ b/sound/isa/sb/sb_mixer.c
@@ -485,7 +485,8 @@ int snd_sbmixer_add_ctl(struct snd_sb *chip, const char *name, int index, int ty
strscpy(ctl->id.name, name, sizeof(ctl->id.name));
ctl->id.index = index;
ctl->private_value = value;
- if ((err = snd_ctl_add(chip->card, ctl)) < 0)
+ err = snd_ctl_add(chip->card, ctl);
+ if (err < 0)
return err;
return 0;
}
@@ -736,33 +737,36 @@ int snd_sbmixer_new(struct snd_sb *chip)
return 0; /* no mixer chip on SB1.x */
case SB_HW_20:
case SB_HW_201:
- if ((err = snd_sbmixer_init(chip,
- snd_sb20_controls,
- ARRAY_SIZE(snd_sb20_controls),
- snd_sb20_init_values,
- ARRAY_SIZE(snd_sb20_init_values),
- "CTL1335")) < 0)
+ err = snd_sbmixer_init(chip,
+ snd_sb20_controls,
+ ARRAY_SIZE(snd_sb20_controls),
+ snd_sb20_init_values,
+ ARRAY_SIZE(snd_sb20_init_values),
+ "CTL1335");
+ if (err < 0)
return err;
break;
case SB_HW_PRO:
case SB_HW_JAZZ16:
- if ((err = snd_sbmixer_init(chip,
- snd_sbpro_controls,
- ARRAY_SIZE(snd_sbpro_controls),
- snd_sbpro_init_values,
- ARRAY_SIZE(snd_sbpro_init_values),
- "CTL1345")) < 0)
+ err = snd_sbmixer_init(chip,
+ snd_sbpro_controls,
+ ARRAY_SIZE(snd_sbpro_controls),
+ snd_sbpro_init_values,
+ ARRAY_SIZE(snd_sbpro_init_values),
+ "CTL1345");
+ if (err < 0)
return err;
break;
case SB_HW_16:
case SB_HW_ALS100:
case SB_HW_CS5530:
- if ((err = snd_sbmixer_init(chip,
- snd_sb16_controls,
- ARRAY_SIZE(snd_sb16_controls),
- snd_sb16_init_values,
- ARRAY_SIZE(snd_sb16_init_values),
- "CTL1745")) < 0)
+ err = snd_sbmixer_init(chip,
+ snd_sb16_controls,
+ ARRAY_SIZE(snd_sb16_controls),
+ snd_sb16_init_values,
+ ARRAY_SIZE(snd_sb16_init_values),
+ "CTL1745");
+ if (err < 0)
return err;
break;
case SB_HW_ALS4000:
@@ -775,12 +779,13 @@ int snd_sbmixer_new(struct snd_sb *chip)
"ALS4000");
if (err < 0)
return err;
- if ((err = snd_sbmixer_init(chip,
- snd_als4000_controls,
- ARRAY_SIZE(snd_als4000_controls),
- snd_als4000_init_values,
- ARRAY_SIZE(snd_als4000_init_values),
- "ALS4000")) < 0)
+ err = snd_sbmixer_init(chip,
+ snd_als4000_controls,
+ ARRAY_SIZE(snd_als4000_controls),
+ snd_als4000_init_values,
+ ARRAY_SIZE(snd_als4000_init_values),
+ "ALS4000");
+ if (err < 0)
return err;
break;
case SB_HW_DT019X:
diff --git a/sound/isa/wavefront/wavefront.c b/sound/isa/wavefront/wavefront.c
index a4437971df2f..dfdeaf7b6bf4 100644
--- a/sound/isa/wavefront/wavefront.c
+++ b/sound/isa/wavefront/wavefront.c
@@ -555,7 +555,8 @@ static int snd_wavefront_isa_probe(struct device *pdev,
err = snd_wavefront_card_new(pdev, dev, &card);
if (err < 0)
return err;
- if ((err = snd_wavefront_probe(card, dev)) < 0) {
+ err = snd_wavefront_probe(card, dev);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -610,7 +611,8 @@ static int snd_wavefront_pnp_detect(struct pnp_card_link *pcard,
}
}
- if ((res = snd_wavefront_probe(card, dev)) < 0)
+ res = snd_wavefront_probe(card, dev);
+ if (res < 0)
return res;
pnp_set_card_drvdata(pcard, card);
diff --git a/sound/isa/wavefront/wavefront_midi.c b/sound/isa/wavefront/wavefront_midi.c
index a337a86f7a65..72e775ac7ad7 100644
--- a/sound/isa/wavefront/wavefront_midi.c
+++ b/sound/isa/wavefront/wavefront_midi.c
@@ -239,7 +239,8 @@ static int snd_wavefront_midi_input_open(struct snd_rawmidi_substream *substream
mpu = *((snd_wavefront_mpu_id *) substream->rmidi->private_data);
- if ((midi = get_wavefront_midi (substream)) == NULL)
+ midi = get_wavefront_midi(substream);
+ if (!midi)
return -EIO;
spin_lock_irqsave (&midi->open, flags);
@@ -263,7 +264,8 @@ static int snd_wavefront_midi_output_open(struct snd_rawmidi_substream *substrea
mpu = *((snd_wavefront_mpu_id *) substream->rmidi->private_data);
- if ((midi = get_wavefront_midi (substream)) == NULL)
+ midi = get_wavefront_midi(substream);
+ if (!midi)
return -EIO;
spin_lock_irqsave (&midi->open, flags);
@@ -287,7 +289,8 @@ static int snd_wavefront_midi_input_close(struct snd_rawmidi_substream *substrea
mpu = *((snd_wavefront_mpu_id *) substream->rmidi->private_data);
- if ((midi = get_wavefront_midi (substream)) == NULL)
+ midi = get_wavefront_midi(substream);
+ if (!midi)
return -EIO;
spin_lock_irqsave (&midi->open, flags);
@@ -310,7 +313,8 @@ static int snd_wavefront_midi_output_close(struct snd_rawmidi_substream *substre
mpu = *((snd_wavefront_mpu_id *) substream->rmidi->private_data);
- if ((midi = get_wavefront_midi (substream)) == NULL)
+ midi = get_wavefront_midi(substream);
+ if (!midi)
return -EIO;
spin_lock_irqsave (&midi->open, flags);
@@ -333,9 +337,9 @@ static void snd_wavefront_midi_input_trigger(struct snd_rawmidi_substream *subst
mpu = *((snd_wavefront_mpu_id *) substream->rmidi->private_data);
- if ((midi = get_wavefront_midi (substream)) == NULL) {
+ midi = get_wavefront_midi(substream);
+ if (!midi)
return;
- }
spin_lock_irqsave (&midi->virtual, flags);
if (up) {
@@ -372,9 +376,9 @@ static void snd_wavefront_midi_output_trigger(struct snd_rawmidi_substream *subs
mpu = *((snd_wavefront_mpu_id *) substream->rmidi->private_data);
- if ((midi = get_wavefront_midi (substream)) == NULL) {
+ midi = get_wavefront_midi(substream);
+ if (!midi)
return;
- }
spin_lock_irqsave (&midi->virtual, flags);
if (up) {
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index d6420d224d09..69cbc79fbb71 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -339,7 +339,8 @@ snd_wavefront_cmd (snd_wavefront_t *dev,
int c;
struct wavefront_command *wfcmd;
- if ((wfcmd = wavefront_get_command (cmd)) == NULL) {
+ wfcmd = wavefront_get_command(cmd);
+ if (!wfcmd) {
snd_printk ("command 0x%x not supported.\n",
cmd);
return 1;
@@ -391,7 +392,8 @@ snd_wavefront_cmd (snd_wavefront_t *dev,
for (i = 0; i < wfcmd->read_cnt; i++) {
- if ((c = wavefront_read (dev)) == -1) {
+ c = wavefront_read(dev);
+ if (c == -1) {
DPRINT (WF_DEBUG_IO, "bad read for byte "
"%d of 0x%x [%s].\n",
i, cmd, wfcmd->action);
@@ -401,7 +403,8 @@ snd_wavefront_cmd (snd_wavefront_t *dev,
/* Now handle errors. Lots of special cases here */
if (c == 0xff) {
- if ((c = wavefront_read (dev)) == -1) {
+ c = wavefront_read(dev);
+ if (c == -1) {
DPRINT (WF_DEBUG_IO, "bad read for "
"error byte at "
"read byte %d "
@@ -459,9 +462,9 @@ snd_wavefront_cmd (snd_wavefront_t *dev,
of the standard value.
*/
- if ((ack = wavefront_read (dev)) == 0) {
+ ack = wavefront_read(dev);
+ if (ack == 0)
ack = WF_ACK;
- }
if (ack != WF_ACK) {
if (ack == -1) {
@@ -475,7 +478,8 @@ snd_wavefront_cmd (snd_wavefront_t *dev,
if (ack == 0xff) { /* explicit error */
- if ((err = wavefront_read (dev)) == -1) {
+ err = wavefront_read(dev);
+ if (err == -1) {
DPRINT (WF_DEBUG_DATA,
"cannot read err "
"for 0x%x [%s].\n",
@@ -603,9 +607,9 @@ wavefront_delete_sample (snd_wavefront_t *dev, int sample_num)
wbuf[0] = sample_num & 0x7f;
wbuf[1] = sample_num >> 7;
- if ((x = snd_wavefront_cmd (dev, WFC_DELETE_SAMPLE, NULL, wbuf)) == 0) {
+ x = snd_wavefront_cmd(dev, WFC_DELETE_SAMPLE, NULL, wbuf);
+ if (!x)
dev->sample_status[sample_num] = WF_ST_EMPTY;
- }
return x;
}
@@ -691,8 +695,9 @@ wavefront_get_patch_status (snd_wavefront_t *dev)
patchnum[0] = i & 0x7f;
patchnum[1] = i >> 7;
- if ((x = snd_wavefront_cmd (dev, WFC_UPLOAD_PATCH, patchbuf,
- patchnum)) == 0) {
+ x = snd_wavefront_cmd(dev, WFC_UPLOAD_PATCH, patchbuf,
+ patchnum);
+ if (x == 0) {
dev->patch_status[i] |= WF_SLOT_FILLED;
p = (wavefront_patch *) patchbuf;
@@ -738,8 +743,9 @@ wavefront_get_program_status (snd_wavefront_t *dev)
for (i = 0; i < WF_MAX_PROGRAM; i++) {
prognum = i;
- if ((x = snd_wavefront_cmd (dev, WFC_UPLOAD_PROGRAM, progbuf,
- &prognum)) == 0) {
+ x = snd_wavefront_cmd(dev, WFC_UPLOAD_PROGRAM, progbuf,
+ &prognum);
+ if (x == 0) {
dev->prog_status[i] |= WF_SLOT_USED;
@@ -894,9 +900,9 @@ wavefront_send_sample (snd_wavefront_t *dev,
if (header->number == WAVEFRONT_FIND_FREE_SAMPLE_SLOT) {
int x;
- if ((x = wavefront_find_free_sample (dev)) < 0) {
+ x = wavefront_find_free_sample(dev);
+ if (x < 0)
return -ENOMEM;
- }
snd_printk ("unspecified sample => %d\n", x);
header->number = x;
}
@@ -1137,7 +1143,8 @@ wavefront_send_sample (snd_wavefront_t *dev,
nothing to do with DMA at all.
*/
- if ((dma_ack = wavefront_read (dev)) != WF_DMA_ACK) {
+ dma_ack = wavefront_read(dev);
+ if (dma_ack != WF_DMA_ACK) {
if (dma_ack == -1) {
snd_printk ("upload sample "
"DMA ack timeout\n");
@@ -1282,14 +1289,16 @@ wavefront_fetch_multisample (snd_wavefront_t *dev,
char d[2];
int val;
- if ((val = wavefront_read (dev)) == -1) {
+ val = wavefront_read(dev);
+ if (val == -1) {
snd_printk ("upload multisample failed "
"during sample loop.\n");
return -EIO;
}
d[0] = val;
- if ((val = wavefront_read (dev)) == -1) {
+ val = wavefront_read(dev);
+ if (val == -1) {
snd_printk ("upload multisample failed "
"during sample loop.\n");
return -EIO;
@@ -1910,7 +1919,8 @@ wavefront_reset_to_cleanliness (snd_wavefront_t *dev)
goto gone_bad;
}
- if ((hwv[0] = wavefront_read (dev)) == -1) {
+ hwv[0] = wavefront_read(dev);
+ if (hwv[0] == -1) {
snd_printk ("board not responding correctly.\n");
goto gone_bad;
}
@@ -1921,7 +1931,8 @@ wavefront_reset_to_cleanliness (snd_wavefront_t *dev)
and tell us about it either way.
*/
- if ((hwv[0] = wavefront_read (dev)) == -1) {
+ hwv[0] = wavefront_read(dev);
+ if (hwv[0] == -1) {
snd_printk ("on-board RAM test failed "
"(bad error code).\n");
} else {
@@ -1934,7 +1945,8 @@ wavefront_reset_to_cleanliness (snd_wavefront_t *dev)
/* We're OK, just get the next byte of the HW version response */
- if ((hwv[1] = wavefront_read (dev)) == -1) {
+ hwv[1] = wavefront_read(dev);
+ if (hwv[1] == -1) {
snd_printk ("incorrect h/w response.\n");
goto gone_bad;
}
@@ -2079,9 +2091,9 @@ wavefront_do_reset (snd_wavefront_t *dev)
about it.
*/
- if ((dev->freemem = wavefront_freemem (dev)) < 0) {
+ dev->freemem = wavefront_freemem(dev);
+ if (dev->freemem < 0)
goto gone_bad;
- }
snd_printk ("available DRAM %dk\n", dev->freemem / 1024);
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index ea5d3cdfe4e4..743e0f05e335 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -1493,7 +1493,8 @@ static int snd_wss_playback_open(struct snd_pcm_substream *substream)
snd_pcm_limit_isa_dma_size(chip->dma1, &runtime->hw.period_bytes_max);
if (chip->claim_dma) {
- if ((err = chip->claim_dma(chip, chip->dma_private_data, chip->dma1)) < 0)
+ err = chip->claim_dma(chip, chip->dma_private_data, chip->dma1);
+ if (err < 0)
return err;
}
@@ -1533,7 +1534,8 @@ static int snd_wss_capture_open(struct snd_pcm_substream *substream)
snd_pcm_limit_isa_dma_size(chip->dma2, &runtime->hw.period_bytes_max);
if (chip->claim_dma) {
- if ((err = chip->claim_dma(chip, chip->dma_private_data, chip->dma2)) < 0)
+ err = chip->claim_dma(chip, chip->dma_private_data, chip->dma2);
+ if (err < 0)
return err;
}
@@ -1934,7 +1936,8 @@ int snd_wss_timer(struct snd_wss *chip, int device)
tid.card = chip->card->number;
tid.device = device;
tid.subdevice = 0;
- if ((err = snd_timer_new(chip->card, "CS4231", &tid, &timer)) < 0)
+ err = snd_timer_new(chip->card, "CS4231", &tid, &timer);
+ if (err < 0)
return err;
strcpy(timer->name, snd_wss_chip_id(chip));
timer->private_data = chip;
diff --git a/sound/mips/snd-n64.c b/sound/mips/snd-n64.c
index e35e93157755..463a6fe589eb 100644
--- a/sound/mips/snd-n64.c
+++ b/sound/mips/snd-n64.c
@@ -338,6 +338,10 @@ static int __init n64audio_probe(struct platform_device *pdev)
strcpy(card->longname, "N64 Audio");
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ err = -EINVAL;
+ goto fail_dma_alloc;
+ }
if (devm_request_irq(&pdev->dev, res->start, n64audio_isr,
IRQF_SHARED, "N64 Audio", priv)) {
err = -EBUSY;
diff --git a/sound/oss/dmasound/dmasound_core.c b/sound/oss/dmasound/dmasound_core.c
index 49679aa8631d..0c95828ac0b1 100644
--- a/sound/oss/dmasound/dmasound_core.c
+++ b/sound/oss/dmasound/dmasound_core.c
@@ -999,11 +999,9 @@ static int sq_ioctl(struct file *file, u_int cmd, u_long arg)
case SNDCTL_DSP_RESET:
sq_reset();
return 0;
- break ;
case SNDCTL_DSP_GETFMTS:
fmt = dmasound.mach.hardware_afmts ; /* this is what OSS says.. */
return IOCTL_OUT(arg, fmt);
- break ;
case SNDCTL_DSP_GETBLKSIZE:
/* this should tell the caller about bytes that the app can
read/write - the app doesn't care about our internal buffers.
@@ -1020,7 +1018,6 @@ static int sq_ioctl(struct file *file, u_int cmd, u_long arg)
size = write_sq.user_frag_size ;
}
return IOCTL_OUT(arg, size);
- break ;
case SNDCTL_DSP_POST:
/* all we are going to do is to tell the LL that any
partial frags can be queued for output.
@@ -1044,7 +1041,6 @@ static int sq_ioctl(struct file *file, u_int cmd, u_long arg)
if (file->f_mode & shared_resource_owner)
shared_resources_initialised = 0 ;
return result ;
- break ;
case SOUND_PCM_READ_RATE:
return IOCTL_OUT(arg, dmasound.soft.speed);
case SNDCTL_DSP_SPEED:
@@ -1123,7 +1119,6 @@ static int sq_ioctl(struct file *file, u_int cmd, u_long arg)
the value is 'random' and that the user _must_ check the actual
frags values using SNDCTL_DSP_GETBLKSIZE or similar */
return IOCTL_OUT(arg, data);
- break ;
case SNDCTL_DSP_GETOSPACE:
/*
*/
@@ -1234,31 +1229,22 @@ static char *get_afmt_string(int afmt)
switch(afmt) {
case AFMT_MU_LAW:
return "mu-law";
- break;
case AFMT_A_LAW:
return "A-law";
- break;
case AFMT_U8:
return "unsigned 8 bit";
- break;
case AFMT_S8:
return "signed 8 bit";
- break;
case AFMT_S16_BE:
return "signed 16 bit BE";
- break;
case AFMT_U16_BE:
return "unsigned 16 bit BE";
- break;
case AFMT_S16_LE:
return "signed 16 bit LE";
- break;
case AFMT_U16_LE:
return "unsigned 16 bit LE";
- break;
case 0:
return "format not set" ;
- break ;
default:
break ;
}
diff --git a/sound/parisc/harmony.c b/sound/parisc/harmony.c
index f2ca0a701987..1440db8b4177 100644
--- a/sound/parisc/harmony.c
+++ b/sound/parisc/harmony.c
@@ -915,10 +915,9 @@ snd_harmony_create(struct snd_card *card,
spin_lock_init(&h->mixer_lock);
spin_lock_init(&h->lock);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
- h, &ops)) < 0) {
- goto free_and_ret;
- }
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, h, &ops);
+ if (err < 0)
+ goto free_and_ret;
*rchip = h;
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 012a7ee849e8..01f296d524ce 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1286,15 +1286,17 @@ static int snd_ac97_cmix_new_stereo(struct snd_card *card, const char *pfx,
if (snd_ac97_try_bit(ac97, reg, 15)) {
sprintf(name, "%s Switch", pfx);
- if ((err = snd_ac97_cmute_new_stereo(card, name, reg,
- check_stereo, check_amix,
- ac97)) < 0)
+ err = snd_ac97_cmute_new_stereo(card, name, reg,
+ check_stereo, check_amix,
+ ac97);
+ if (err < 0)
return err;
}
check_volume_resolution(ac97, reg, &lo_max, &hi_max);
if (lo_max) {
sprintf(name, "%s Volume", pfx);
- if ((err = snd_ac97_cvol_new(card, name, reg, lo_max, hi_max, ac97)) < 0)
+ err = snd_ac97_cvol_new(card, name, reg, lo_max, hi_max, ac97);
+ if (err < 0)
return err;
}
return 0;
@@ -1333,9 +1335,11 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
/* build center controls */
if ((snd_ac97_try_volume_mix(ac97, AC97_CENTER_LFE_MASTER))
&& !(ac97->flags & AC97_AD_MULTI)) {
- if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_center[0], ac97))) < 0)
+ err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_center[0], ac97));
+ if (err < 0)
return err;
- if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_center[1], ac97))) < 0)
+ err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_center[1], ac97));
+ if (err < 0)
return err;
snd_ac97_change_volume_params2(ac97, AC97_CENTER_LFE_MASTER, 0, &max);
kctl->private_value &= ~(0xff << 16);
@@ -1347,9 +1351,11 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
/* build LFE controls */
if ((snd_ac97_try_volume_mix(ac97, AC97_CENTER_LFE_MASTER+1))
&& !(ac97->flags & AC97_AD_MULTI)) {
- if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_lfe[0], ac97))) < 0)
+ err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_lfe[0], ac97));
+ if (err < 0)
return err;
- if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_lfe[1], ac97))) < 0)
+ err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_lfe[1], ac97));
+ if (err < 0)
return err;
snd_ac97_change_volume_params2(ac97, AC97_CENTER_LFE_MASTER, 8, &max);
kctl->private_value &= ~(0xff << 16);
@@ -1362,23 +1368,26 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
if ((snd_ac97_try_volume_mix(ac97, AC97_SURROUND_MASTER))
&& !(ac97->flags & AC97_AD_MULTI)) {
/* Surround Master (0x38) is with stereo mutes */
- if ((err = snd_ac97_cmix_new_stereo(card, "Surround Playback",
- AC97_SURROUND_MASTER, 1, 0,
- ac97)) < 0)
+ err = snd_ac97_cmix_new_stereo(card, "Surround Playback",
+ AC97_SURROUND_MASTER, 1, 0,
+ ac97);
+ if (err < 0)
return err;
}
/* build headphone controls */
if (snd_ac97_try_volume_mix(ac97, AC97_HEADPHONE)) {
- if ((err = snd_ac97_cmix_new(card, "Headphone Playback",
- AC97_HEADPHONE, 0, ac97)) < 0)
+ err = snd_ac97_cmix_new(card, "Headphone Playback",
+ AC97_HEADPHONE, 0, ac97);
+ if (err < 0)
return err;
}
/* build master mono controls */
if (snd_ac97_try_volume_mix(ac97, AC97_MASTER_MONO)) {
- if ((err = snd_ac97_cmix_new(card, "Master Mono Playback",
- AC97_MASTER_MONO, 0, ac97)) < 0)
+ err = snd_ac97_cmix_new(card, "Master Mono Playback",
+ AC97_MASTER_MONO, 0, ac97);
+ if (err < 0)
return err;
}
@@ -1386,7 +1395,9 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
if (!(ac97->flags & AC97_HAS_NO_TONE)) {
if (snd_ac97_try_volume_mix(ac97, AC97_MASTER_TONE)) {
for (idx = 0; idx < 2; idx++) {
- if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_tone[idx], ac97))) < 0)
+ kctl = snd_ac97_cnew(&snd_ac97_controls_tone[idx], ac97);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
if (ac97->id == AC97_ID_YMF743 ||
ac97->id == AC97_ID_YMF753) {
@@ -1402,9 +1413,12 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
if (!(ac97->flags & AC97_HAS_NO_PC_BEEP) &&
((ac97->flags & AC97_HAS_PC_BEEP) ||
snd_ac97_try_volume_mix(ac97, AC97_PC_BEEP))) {
- for (idx = 0; idx < 2; idx++)
- if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_pc_beep[idx], ac97))) < 0)
+ for (idx = 0; idx < 2; idx++) {
+ kctl = snd_ac97_cnew(&snd_ac97_controls_pc_beep[idx], ac97);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
+ }
set_tlv_db_scale(kctl, db_scale_4bit);
snd_ac97_write_cache(
ac97,
@@ -1417,8 +1431,9 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
/* build Phone controls */
if (!(ac97->flags & AC97_HAS_NO_PHONE)) {
if (snd_ac97_try_volume_mix(ac97, AC97_PHONE)) {
- if ((err = snd_ac97_cmix_new(card, "Phone Playback",
- AC97_PHONE, 1, ac97)) < 0)
+ err = snd_ac97_cmix_new(card, "Phone Playback",
+ AC97_PHONE, 1, ac97);
+ if (err < 0)
return err;
}
}
@@ -1426,26 +1441,30 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
/* build MIC controls */
if (!(ac97->flags & AC97_HAS_NO_MIC)) {
if (snd_ac97_try_volume_mix(ac97, AC97_MIC)) {
- if ((err = snd_ac97_cmix_new(card, "Mic Playback",
- AC97_MIC, 1, ac97)) < 0)
+ err = snd_ac97_cmix_new(card, "Mic Playback",
+ AC97_MIC, 1, ac97);
+ if (err < 0)
return err;
- if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_mic_boost, ac97))) < 0)
+ err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_mic_boost, ac97));
+ if (err < 0)
return err;
}
}
/* build Line controls */
if (snd_ac97_try_volume_mix(ac97, AC97_LINE)) {
- if ((err = snd_ac97_cmix_new(card, "Line Playback",
- AC97_LINE, 1, ac97)) < 0)
+ err = snd_ac97_cmix_new(card, "Line Playback",
+ AC97_LINE, 1, ac97);
+ if (err < 0)
return err;
}
/* build CD controls */
if (!(ac97->flags & AC97_HAS_NO_CD)) {
if (snd_ac97_try_volume_mix(ac97, AC97_CD)) {
- if ((err = snd_ac97_cmix_new(card, "CD Playback",
- AC97_CD, 1, ac97)) < 0)
+ err = snd_ac97_cmix_new(card, "CD Playback",
+ AC97_CD, 1, ac97);
+ if (err < 0)
return err;
}
}
@@ -1453,8 +1472,9 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
/* build Video controls */
if (!(ac97->flags & AC97_HAS_NO_VIDEO)) {
if (snd_ac97_try_volume_mix(ac97, AC97_VIDEO)) {
- if ((err = snd_ac97_cmix_new(card, "Video Playback",
- AC97_VIDEO, 1, ac97)) < 0)
+ err = snd_ac97_cmix_new(card, "Video Playback",
+ AC97_VIDEO, 1, ac97);
+ if (err < 0)
return err;
}
}
@@ -1462,8 +1482,9 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
/* build Aux controls */
if (!(ac97->flags & AC97_HAS_NO_AUX)) {
if (snd_ac97_try_volume_mix(ac97, AC97_AUX)) {
- if ((err = snd_ac97_cmix_new(card, "Aux Playback",
- AC97_AUX, 1, ac97)) < 0)
+ err = snd_ac97_cmix_new(card, "Aux Playback",
+ AC97_AUX, 1, ac97);
+ if (err < 0)
return err;
}
}
@@ -1475,26 +1496,38 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
init_val = 0x9f9f;
else
init_val = 0x9f1f;
- for (idx = 0; idx < 2; idx++)
- if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_ad18xx_pcm[idx], ac97))) < 0)
+ for (idx = 0; idx < 2; idx++) {
+ kctl = snd_ac97_cnew(&snd_ac97_controls_ad18xx_pcm[idx], ac97);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
+ }
set_tlv_db_scale(kctl, db_scale_5bit);
ac97->spec.ad18xx.pcmreg[0] = init_val;
if (ac97->scaps & AC97_SCAP_SURROUND_DAC) {
- for (idx = 0; idx < 2; idx++)
- if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_ad18xx_surround[idx], ac97))) < 0)
+ for (idx = 0; idx < 2; idx++) {
+ kctl = snd_ac97_cnew(&snd_ac97_controls_ad18xx_surround[idx], ac97);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
+ }
set_tlv_db_scale(kctl, db_scale_5bit);
ac97->spec.ad18xx.pcmreg[1] = init_val;
}
if (ac97->scaps & AC97_SCAP_CENTER_LFE_DAC) {
- for (idx = 0; idx < 2; idx++)
- if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_ad18xx_center[idx], ac97))) < 0)
+ for (idx = 0; idx < 2; idx++) {
+ kctl = snd_ac97_cnew(&snd_ac97_controls_ad18xx_center[idx], ac97);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
+ }
set_tlv_db_scale(kctl, db_scale_5bit);
- for (idx = 0; idx < 2; idx++)
- if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_ad18xx_lfe[idx], ac97))) < 0)
+ for (idx = 0; idx < 2; idx++) {
+ kctl = snd_ac97_cnew(&snd_ac97_controls_ad18xx_lfe[idx], ac97);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
+ }
set_tlv_db_scale(kctl, db_scale_5bit);
ac97->spec.ad18xx.pcmreg[2] = init_val;
}
@@ -1515,7 +1548,8 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
/* build Capture controls */
if (!(ac97->flags & AC97_HAS_NO_REC_GAIN)) {
- if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_control_capture_src, ac97))) < 0)
+ err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_control_capture_src, ac97));
+ if (err < 0)
return err;
if (snd_ac97_try_bit(ac97, AC97_REC_GAIN, 15)) {
err = snd_ac97_cmute_new(card, "Capture Switch",
@@ -1523,7 +1557,9 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
if (err < 0)
return err;
}
- if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_control_capture_vol, ac97))) < 0)
+ kctl = snd_ac97_cnew(&snd_ac97_control_capture_vol, ac97);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
set_tlv_db_scale(kctl, db_scale_rec_gain);
snd_ac97_write_cache(ac97, AC97_REC_SEL, 0x0000);
@@ -1531,52 +1567,62 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
}
/* build MIC Capture controls */
if (snd_ac97_try_volume_mix(ac97, AC97_REC_GAIN_MIC)) {
- for (idx = 0; idx < 2; idx++)
- if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_mic_capture[idx], ac97))) < 0)
+ for (idx = 0; idx < 2; idx++) {
+ kctl = snd_ac97_cnew(&snd_ac97_controls_mic_capture[idx], ac97);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
+ }
set_tlv_db_scale(kctl, db_scale_rec_gain);
snd_ac97_write_cache(ac97, AC97_REC_GAIN_MIC, 0x0000);
}
/* build PCM out path & mute control */
if (snd_ac97_try_bit(ac97, AC97_GENERAL_PURPOSE, 15)) {
- if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_PCM_OUT], ac97))) < 0)
+ err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_PCM_OUT], ac97));
+ if (err < 0)
return err;
}
/* build Simulated Stereo Enhancement control */
if (ac97->caps & AC97_BC_SIM_STEREO) {
- if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_STEREO_ENHANCEMENT], ac97))) < 0)
+ err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_STEREO_ENHANCEMENT], ac97));
+ if (err < 0)
return err;
}
/* build 3D Stereo Enhancement control */
if (snd_ac97_try_bit(ac97, AC97_GENERAL_PURPOSE, 13)) {
- if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_3D], ac97))) < 0)
+ err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_3D], ac97));
+ if (err < 0)
return err;
}
/* build Loudness control */
if (ac97->caps & AC97_BC_LOUDNESS) {
- if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_LOUDNESS], ac97))) < 0)
+ err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_LOUDNESS], ac97));
+ if (err < 0)
return err;
}
/* build Mono output select control */
if (snd_ac97_try_bit(ac97, AC97_GENERAL_PURPOSE, 9)) {
- if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_MONO], ac97))) < 0)
+ err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_MONO], ac97));
+ if (err < 0)
return err;
}
/* build Mic select control */
if (snd_ac97_try_bit(ac97, AC97_GENERAL_PURPOSE, 8)) {
- if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_MIC], ac97))) < 0)
+ err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_MIC], ac97));
+ if (err < 0)
return err;
}
/* build ADC/DAC loopback control */
if (enable_loopback && snd_ac97_try_bit(ac97, AC97_GENERAL_PURPOSE, 7)) {
- if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_LOOPBACK], ac97))) < 0)
+ err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_LOOPBACK], ac97));
+ if (err < 0)
return err;
}
@@ -1592,11 +1638,15 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
snd_ac97_write(ac97, AC97_3D_CONTROL, val);
val = snd_ac97_read(ac97, AC97_3D_CONTROL);
val = val == 0x0606;
- if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97))) < 0)
+ kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
if (val)
kctl->private_value = AC97_3D_CONTROL | (9 << 8) | (7 << 16);
- if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_3d[1], ac97))) < 0)
+ kctl = snd_ac97_cnew(&snd_ac97_controls_3d[1], ac97);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
if (val)
kctl->private_value = AC97_3D_CONTROL | (1 << 8) | (7 << 16);
@@ -1613,14 +1663,18 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
if ((ac97->ext_id & AC97_EI_SPDIF) && !(ac97->scaps & AC97_SCAP_NO_SPDIF)) {
if (ac97->build_ops->build_spdif) {
- if ((err = ac97->build_ops->build_spdif(ac97)) < 0)
+ err = ac97->build_ops->build_spdif(ac97);
+ if (err < 0)
return err;
} else {
- for (idx = 0; idx < 5; idx++)
- if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_spdif[idx], ac97))) < 0)
+ for (idx = 0; idx < 5; idx++) {
+ err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_spdif[idx], ac97));
+ if (err < 0)
return err;
+ }
if (ac97->build_ops->build_post_spdif) {
- if ((err = ac97->build_ops->build_post_spdif(ac97)) < 0)
+ err = ac97->build_ops->build_post_spdif(ac97);
+ if (err < 0)
return err;
}
/* set default PCM S/PDIF params */
@@ -1632,9 +1686,11 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
}
/* build chip specific controls */
- if (ac97->build_ops->build_specific)
- if ((err = ac97->build_ops->build_specific(ac97)) < 0)
+ if (ac97->build_ops->build_specific) {
+ err = ac97->build_ops->build_specific(ac97);
+ if (err < 0)
return err;
+ }
if (snd_ac97_try_bit(ac97, AC97_POWERDOWN, 15)) {
kctl = snd_ac97_cnew(&snd_ac97_control_eapd, ac97);
@@ -1642,7 +1698,8 @@ static int snd_ac97_mixer_build(struct snd_ac97 * ac97)
return -ENOMEM;
if (ac97->scaps & AC97_SCAP_INV_EAPD)
set_inv_eapd(ac97, kctl);
- if ((err = snd_ctl_add(card, kctl)) < 0)
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
}
@@ -1664,14 +1721,18 @@ static int snd_ac97_modem_build(struct snd_card *card, struct snd_ac97 * ac97)
snd_ac97_write(ac97, AC97_MISC_AFE, 0x0);
/* build modem switches */
- for (idx = 0; idx < ARRAY_SIZE(snd_ac97_controls_modem_switches); idx++)
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_ac97_controls_modem_switches[idx], ac97))) < 0)
+ for (idx = 0; idx < ARRAY_SIZE(snd_ac97_controls_modem_switches); idx++) {
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_ac97_controls_modem_switches[idx], ac97));
+ if (err < 0)
return err;
+ }
/* build chip specific controls */
- if (ac97->build_ops->build_specific)
- if ((err = ac97->build_ops->build_specific(ac97)) < 0)
+ if (ac97->build_ops->build_specific) {
+ err = ac97->build_ops->build_specific(ac97);
+ if (err < 0)
return err;
+ }
return 0;
}
@@ -1916,7 +1977,8 @@ int snd_ac97_bus(struct snd_card *card, int num,
bus->clock = 48000;
spin_lock_init(&bus->bus_lock);
snd_ac97_bus_proc_init(bus);
- if ((err = snd_device_new(card, SNDRV_DEV_BUS, bus, &dev_ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_BUS, bus, &dev_ops);
+ if (err < 0) {
snd_ac97_bus_free(bus);
return err;
}
@@ -1944,7 +2006,8 @@ static int snd_ac97_dev_register(struct snd_device *device)
dev_set_name(&ac97->dev, "%d-%d:%s",
ac97->bus->card->number, ac97->num,
snd_ac97_get_short_name(ac97));
- if ((err = device_register(&ac97->dev)) < 0) {
+ err = device_register(&ac97->dev);
+ if (err < 0) {
ac97_err(ac97, "Can't register ac97 bus\n");
ac97->dev.bus = NULL;
return err;
@@ -2095,7 +2158,8 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
if (!(ac97->scaps & AC97_SCAP_SKIP_AUDIO) && !(ac97->scaps & AC97_SCAP_AUDIO)) {
/* test if we can write to the record gain volume register */
snd_ac97_write_cache(ac97, AC97_REC_GAIN, 0x8a06);
- if (((err = snd_ac97_read(ac97, AC97_REC_GAIN)) & 0x7fff) == 0x0a06)
+ err = snd_ac97_read(ac97, AC97_REC_GAIN);
+ if ((err & 0x7fff) == 0x0a06)
ac97->scaps |= AC97_SCAP_AUDIO;
}
if (ac97->scaps & AC97_SCAP_AUDIO) {
@@ -2248,7 +2312,8 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
}
}
sprintf(comp, "AC97a:%08x", ac97->id);
- if ((err = snd_component_add(card, comp)) < 0) {
+ err = snd_component_add(card, comp);
+ if (err < 0) {
snd_ac97_free(ac97);
return err;
}
@@ -2268,7 +2333,8 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
}
}
sprintf(comp, "AC97m:%08x", ac97->id);
- if ((err = snd_component_add(card, comp)) < 0) {
+ err = snd_component_add(card, comp);
+ if (err < 0) {
snd_ac97_free(ac97);
return err;
}
@@ -2280,7 +2346,8 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
if (ac97_is_audio(ac97))
update_power_regs(ac97);
snd_ac97_proc_init(ac97);
- if ((err = snd_device_new(card, SNDRV_DEV_CODEC, ac97, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_CODEC, ac97, &ops);
+ if (err < 0) {
snd_ac97_free(ac97);
return err;
}
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index 1627a74baf3c..025c1666c1fc 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -29,9 +29,11 @@ static int patch_build_controls(struct snd_ac97 * ac97, const struct snd_kcontro
{
int idx, err;
- for (idx = 0; idx < count; idx++)
- if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&controls[idx], ac97))) < 0)
+ for (idx = 0; idx < count; idx++) {
+ err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&controls[idx], ac97));
+ if (err < 0)
return err;
+ }
return 0;
}
@@ -416,7 +418,8 @@ static int patch_yamaha_ymf753_post_spdif(struct snd_ac97 * ac97)
{
int err;
- if ((err = patch_build_controls(ac97, snd_ac97_ymf753_controls_spdif, ARRAY_SIZE(snd_ac97_ymf753_controls_spdif))) < 0)
+ err = patch_build_controls(ac97, snd_ac97_ymf753_controls_spdif, ARRAY_SIZE(snd_ac97_ymf753_controls_spdif));
+ if (err < 0)
return err;
return 0;
}
@@ -461,7 +464,8 @@ static int patch_wolfson_wm9703_specific(struct snd_ac97 * ac97)
int err, i;
for (i = 0; i < ARRAY_SIZE(wm97xx_snd_ac97_controls); i++) {
- if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm97xx_snd_ac97_controls[i], ac97))) < 0)
+ err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm97xx_snd_ac97_controls[i], ac97));
+ if (err < 0)
return err;
}
snd_ac97_write_cache(ac97, AC97_WM97XX_FMIXER_VOL, 0x0808);
@@ -491,7 +495,8 @@ static int patch_wolfson_wm9704_specific(struct snd_ac97 * ac97)
{
int err, i;
for (i = 0; i < ARRAY_SIZE(wm9704_snd_ac97_controls); i++) {
- if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm9704_snd_ac97_controls[i], ac97))) < 0)
+ err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm9704_snd_ac97_controls[i], ac97));
+ if (err < 0)
return err;
}
/* patch for DVD noise */
@@ -631,7 +636,8 @@ static int patch_wolfson_wm9711_specific(struct snd_ac97 * ac97)
int err, i;
for (i = 0; i < ARRAY_SIZE(wm9711_snd_ac97_controls); i++) {
- if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm9711_snd_ac97_controls[i], ac97))) < 0)
+ err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm9711_snd_ac97_controls[i], ac97));
+ if (err < 0)
return err;
}
snd_ac97_write_cache(ac97, AC97_CODEC_CLASS_REV, 0x0808);
@@ -798,7 +804,8 @@ static int patch_wolfson_wm9713_3d (struct snd_ac97 * ac97)
int err, i;
for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_3d); i++) {
- if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_3d[i], ac97))) < 0)
+ err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_3d[i], ac97));
+ if (err < 0)
return err;
}
return 0;
@@ -809,7 +816,8 @@ static int patch_wolfson_wm9713_specific(struct snd_ac97 * ac97)
int err, i;
for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls); i++) {
- if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls[i], ac97))) < 0)
+ err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls[i], ac97));
+ if (err < 0)
return err;
}
snd_ac97_write_cache(ac97, AC97_PC_BEEP, 0x0808);
@@ -883,7 +891,8 @@ static int patch_sigmatel_stac9700_3d(struct snd_ac97 * ac97)
struct snd_kcontrol *kctl;
int err;
- if ((err = snd_ctl_add(ac97->bus->card, kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97))) < 0)
+ err = snd_ctl_add(ac97->bus->card, kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97));
+ if (err < 0)
return err;
strcpy(kctl->id.name, "3D Control Sigmatel - Depth");
kctl->private_value = AC97_SINGLE_VALUE(AC97_3D_CONTROL, 2, 3, 0);
@@ -896,11 +905,15 @@ static int patch_sigmatel_stac9708_3d(struct snd_ac97 * ac97)
struct snd_kcontrol *kctl;
int err;
- if ((err = snd_ctl_add(ac97->bus->card, kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97))) < 0)
+ kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97);
+ err = snd_ctl_add(ac97->bus->card, kctl);
+ if (err < 0)
return err;
strcpy(kctl->id.name, "3D Control Sigmatel - Depth");
kctl->private_value = AC97_SINGLE_VALUE(AC97_3D_CONTROL, 0, 3, 0);
- if ((err = snd_ctl_add(ac97->bus->card, kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97))) < 0)
+ kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97);
+ err = snd_ctl_add(ac97->bus->card, kctl);
+ if (err < 0)
return err;
strcpy(kctl->id.name, "3D Control Sigmatel - Rear Depth");
kctl->private_value = AC97_SINGLE_VALUE(AC97_3D_CONTROL, 2, 3, 0);
@@ -927,18 +940,26 @@ static int patch_sigmatel_stac97xx_specific(struct snd_ac97 * ac97)
int err;
snd_ac97_write_cache(ac97, AC97_SIGMATEL_ANALOG, snd_ac97_read(ac97, AC97_SIGMATEL_ANALOG) & ~0x0003);
- if (snd_ac97_try_bit(ac97, AC97_SIGMATEL_ANALOG, 1))
- if ((err = patch_build_controls(ac97, &snd_ac97_sigmatel_controls[0], 1)) < 0)
+ if (snd_ac97_try_bit(ac97, AC97_SIGMATEL_ANALOG, 1)) {
+ err = patch_build_controls(ac97, &snd_ac97_sigmatel_controls[0], 1);
+ if (err < 0)
return err;
- if (snd_ac97_try_bit(ac97, AC97_SIGMATEL_ANALOG, 0))
- if ((err = patch_build_controls(ac97, &snd_ac97_sigmatel_controls[1], 1)) < 0)
+ }
+ if (snd_ac97_try_bit(ac97, AC97_SIGMATEL_ANALOG, 0)) {
+ err = patch_build_controls(ac97, &snd_ac97_sigmatel_controls[1], 1);
+ if (err < 0)
return err;
- if (snd_ac97_try_bit(ac97, AC97_SIGMATEL_DAC2INVERT, 2))
- if ((err = patch_build_controls(ac97, &snd_ac97_sigmatel_4speaker, 1)) < 0)
+ }
+ if (snd_ac97_try_bit(ac97, AC97_SIGMATEL_DAC2INVERT, 2)) {
+ err = patch_build_controls(ac97, &snd_ac97_sigmatel_4speaker, 1);
+ if (err < 0)
return err;
- if (snd_ac97_try_bit(ac97, AC97_SIGMATEL_DAC2INVERT, 3))
- if ((err = patch_build_controls(ac97, &snd_ac97_sigmatel_phaseinvert, 1)) < 0)
+ }
+ if (snd_ac97_try_bit(ac97, AC97_SIGMATEL_DAC2INVERT, 3)) {
+ err = patch_build_controls(ac97, &snd_ac97_sigmatel_phaseinvert, 1);
+ if (err < 0)
return err;
+ }
return 0;
}
@@ -984,7 +1005,8 @@ static int patch_sigmatel_stac9708_specific(struct snd_ac97 *ac97)
snd_ac97_remove_ctl(ac97, "PCM Out Path & Mute", NULL);
snd_ac97_rename_vol_ctl(ac97, "Headphone Playback", "Sigmatel Surround Playback");
- if ((err = patch_build_controls(ac97, &snd_ac97_stac9708_bias_control, 1)) < 0)
+ err = patch_build_controls(ac97, &snd_ac97_stac9708_bias_control, 1);
+ if (err < 0)
return err;
return patch_sigmatel_stac97xx_specific(ac97);
}
@@ -1262,14 +1284,17 @@ static int patch_cirrus_build_spdif(struct snd_ac97 * ac97)
int err;
/* con mask, pro mask, default */
- if ((err = patch_build_controls(ac97, &snd_ac97_controls_spdif[0], 3)) < 0)
+ err = patch_build_controls(ac97, &snd_ac97_controls_spdif[0], 3);
+ if (err < 0)
return err;
/* switch, spsa */
- if ((err = patch_build_controls(ac97, &snd_ac97_cirrus_controls_spdif[0], 1)) < 0)
+ err = patch_build_controls(ac97, &snd_ac97_cirrus_controls_spdif[0], 1);
+ if (err < 0)
return err;
switch (ac97->id & AC97_ID_CS_MASK) {
case AC97_ID_CS4205:
- if ((err = patch_build_controls(ac97, &snd_ac97_cirrus_controls_spdif[1], 1)) < 0)
+ err = patch_build_controls(ac97, &snd_ac97_cirrus_controls_spdif[1], 1);
+ if (err < 0)
return err;
break;
}
@@ -1324,10 +1349,12 @@ static int patch_conexant_build_spdif(struct snd_ac97 * ac97)
int err;
/* con mask, pro mask, default */
- if ((err = patch_build_controls(ac97, &snd_ac97_controls_spdif[0], 3)) < 0)
+ err = patch_build_controls(ac97, &snd_ac97_controls_spdif[0], 3);
+ if (err < 0)
return err;
/* switch */
- if ((err = patch_build_controls(ac97, &snd_ac97_conexant_controls_spdif[0], 1)) < 0)
+ err = patch_build_controls(ac97, &snd_ac97_conexant_controls_spdif[0], 1);
+ if (err < 0)
return err;
/* set default PCM S/PDIF params */
/* consumer,PCM audio,no copyright,no preemphasis,PCM coder,original,48000Hz */
@@ -1592,7 +1619,8 @@ static int patch_ad1885_specific(struct snd_ac97 * ac97)
{
int err;
- if ((err = patch_build_controls(ac97, snd_ac97_controls_ad1885, ARRAY_SIZE(snd_ac97_controls_ad1885))) < 0)
+ err = patch_build_controls(ac97, snd_ac97_controls_ad1885, ARRAY_SIZE(snd_ac97_controls_ad1885));
+ if (err < 0)
return err;
reset_tlv(ac97, "Headphone Playback Volume",
db_scale_6bit_6db_max);
@@ -1875,7 +1903,8 @@ static int patch_ad1981b_specific(struct snd_ac97 *ac97)
{
int err;
- if ((err = patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1)) < 0)
+ err = patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
+ if (err < 0)
return err;
if (check_list(ac97, ad1981_jacks_denylist))
return 0;
@@ -2060,7 +2089,8 @@ static int patch_ad1980_specific(struct snd_ac97 *ac97)
{
int err;
- if ((err = patch_ad1888_specific(ac97)) < 0)
+ err = patch_ad1888_specific(ac97);
+ if (err < 0)
return err;
return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
}
@@ -2168,7 +2198,8 @@ static int patch_ad1985_specific(struct snd_ac97 *ac97)
"Master Surround Playback");
snd_ac97_rename_vol_ctl(ac97, "Headphone Playback", "Master Playback");
- if ((err = patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1)) < 0)
+ err = patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
+ if (err < 0)
return err;
return patch_build_controls(ac97, snd_ac97_ad1985_controls,
@@ -2460,7 +2491,8 @@ static int patch_ad1986_specific(struct snd_ac97 *ac97)
{
int err;
- if ((err = patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1)) < 0)
+ err = patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
+ if (err < 0)
return err;
return patch_build_controls(ac97, snd_ac97_ad1986_controls,
@@ -2582,10 +2614,12 @@ static int patch_alc650_specific(struct snd_ac97 * ac97)
{
int err;
- if ((err = patch_build_controls(ac97, snd_ac97_controls_alc650, ARRAY_SIZE(snd_ac97_controls_alc650))) < 0)
+ err = patch_build_controls(ac97, snd_ac97_controls_alc650, ARRAY_SIZE(snd_ac97_controls_alc650));
+ if (err < 0)
return err;
if (ac97->ext_id & AC97_EI_SPDIF) {
- if ((err = patch_build_controls(ac97, snd_ac97_spdif_controls_alc650, ARRAY_SIZE(snd_ac97_spdif_controls_alc650))) < 0)
+ err = patch_build_controls(ac97, snd_ac97_spdif_controls_alc650, ARRAY_SIZE(snd_ac97_spdif_controls_alc650));
+ if (err < 0)
return err;
}
if (ac97->id != AC97_ID_ALC650F)
@@ -2735,10 +2769,12 @@ static int patch_alc655_specific(struct snd_ac97 * ac97)
{
int err;
- if ((err = patch_build_controls(ac97, snd_ac97_controls_alc655, ARRAY_SIZE(snd_ac97_controls_alc655))) < 0)
+ err = patch_build_controls(ac97, snd_ac97_controls_alc655, ARRAY_SIZE(snd_ac97_controls_alc655));
+ if (err < 0)
return err;
if (ac97->ext_id & AC97_EI_SPDIF) {
- if ((err = patch_build_controls(ac97, snd_ac97_spdif_controls_alc655, ARRAY_SIZE(snd_ac97_spdif_controls_alc655))) < 0)
+ err = patch_build_controls(ac97, snd_ac97_spdif_controls_alc655, ARRAY_SIZE(snd_ac97_spdif_controls_alc655));
+ if (err < 0)
return err;
}
return 0;
@@ -2847,10 +2883,12 @@ static int patch_alc850_specific(struct snd_ac97 *ac97)
{
int err;
- if ((err = patch_build_controls(ac97, snd_ac97_controls_alc850, ARRAY_SIZE(snd_ac97_controls_alc850))) < 0)
+ err = patch_build_controls(ac97, snd_ac97_controls_alc850, ARRAY_SIZE(snd_ac97_controls_alc850));
+ if (err < 0)
return err;
if (ac97->ext_id & AC97_EI_SPDIF) {
- if ((err = patch_build_controls(ac97, snd_ac97_spdif_controls_alc655, ARRAY_SIZE(snd_ac97_spdif_controls_alc655))) < 0)
+ err = patch_build_controls(ac97, snd_ac97_spdif_controls_alc655, ARRAY_SIZE(snd_ac97_spdif_controls_alc655));
+ if (err < 0)
return err;
}
return 0;
@@ -3437,10 +3475,13 @@ static int patch_vt1616_specific(struct snd_ac97 * ac97)
struct snd_kcontrol *kctl;
int err;
- if (snd_ac97_try_bit(ac97, 0x5a, 9))
- if ((err = patch_build_controls(ac97, &snd_ac97_controls_vt1616[0], 1)) < 0)
+ if (snd_ac97_try_bit(ac97, 0x5a, 9)) {
+ err = patch_build_controls(ac97, &snd_ac97_controls_vt1616[0], 1);
+ if (err < 0)
return err;
- if ((err = patch_build_controls(ac97, &snd_ac97_controls_vt1616[1], ARRAY_SIZE(snd_ac97_controls_vt1616) - 1)) < 0)
+ }
+ err = patch_build_controls(ac97, &snd_ac97_controls_vt1616[1], ARRAY_SIZE(snd_ac97_controls_vt1616) - 1);
+ if (err < 0)
return err;
/* There is already a misnamed master switch. Rename it. */
@@ -3810,9 +3851,11 @@ static const struct snd_kcontrol_new snd_ac97_spdif_controls_it2646[] = {
static int patch_it2646_specific(struct snd_ac97 * ac97)
{
int err;
- if ((err = patch_build_controls(ac97, snd_ac97_controls_it2646, ARRAY_SIZE(snd_ac97_controls_it2646))) < 0)
+ err = patch_build_controls(ac97, snd_ac97_controls_it2646, ARRAY_SIZE(snd_ac97_controls_it2646));
+ if (err < 0)
return err;
- if ((err = patch_build_controls(ac97, snd_ac97_spdif_controls_it2646, ARRAY_SIZE(snd_ac97_spdif_controls_it2646))) < 0)
+ err = patch_build_controls(ac97, snd_ac97_spdif_controls_it2646, ARRAY_SIZE(snd_ac97_spdif_controls_it2646));
+ if (err < 0)
return err;
return 0;
}
@@ -3845,9 +3888,11 @@ AC97_DOUBLE("Modem Speaker Volume", 0x5c, 14, 12, 3, 1)
static int patch_si3036_specific(struct snd_ac97 * ac97)
{
int idx, err;
- for (idx = 0; idx < ARRAY_SIZE(snd_ac97_controls_si3036); idx++)
- if ((err = snd_ctl_add(ac97->bus->card, snd_ctl_new1(&snd_ac97_controls_si3036[idx], ac97))) < 0)
+ for (idx = 0; idx < ARRAY_SIZE(snd_ac97_controls_si3036); idx++) {
+ err = snd_ctl_add(ac97->bus->card, snd_ctl_new1(&snd_ac97_controls_si3036[idx], ac97));
+ if (err < 0)
return err;
+ }
return 0;
}
@@ -3912,9 +3957,11 @@ AC97_SINGLE("Smart Low Power Mode", 0x6c, 4, 3, 0),
static int patch_ucb1400_specific(struct snd_ac97 * ac97)
{
int idx, err;
- for (idx = 0; idx < ARRAY_SIZE(snd_ac97_controls_ucb1400); idx++)
- if ((err = snd_ctl_add(ac97->bus->card, snd_ctl_new1(&snd_ac97_controls_ucb1400[idx], ac97))) < 0)
+ for (idx = 0; idx < ARRAY_SIZE(snd_ac97_controls_ucb1400); idx++) {
+ err = snd_ctl_add(ac97->bus->card, snd_ctl_new1(&snd_ac97_controls_ucb1400[idx], ac97));
+ if (err < 0)
return err;
+ }
return 0;
}
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index 4520022801d9..5c78951dd596 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -852,7 +852,8 @@ snd_ad1889_create(struct snd_card *card,
*rchip = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
/* check PCI availability (32bit DMA) */
@@ -863,7 +864,8 @@ snd_ad1889_create(struct snd_card *card,
}
/* allocate chip specific data with zero-filled memory */
- if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) {
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
pci_disable_device(pci);
return -ENOMEM;
}
@@ -874,7 +876,8 @@ snd_ad1889_create(struct snd_card *card,
chip->irq = -1;
/* (1) PCI resource allocation */
- if ((err = pci_request_regions(pci, card->driver)) < 0)
+ err = pci_request_regions(pci, card->driver);
+ if (err < 0)
goto free_and_ret;
chip->bar = pci_resource_start(pci, 0);
@@ -900,12 +903,14 @@ snd_ad1889_create(struct snd_card *card,
card->sync_irq = chip->irq;
/* (2) initialization of the chip hardware */
- if ((err = snd_ad1889_init(chip)) < 0) {
+ err = snd_ad1889_init(chip);
+ if (err < 0) {
snd_ad1889_free(chip);
return err;
}
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_ad1889_free(chip);
return err;
}
diff --git a/sound/pci/ak4531_codec.c b/sound/pci/ak4531_codec.c
index e0a81f99f79a..6af88e7b86f8 100644
--- a/sound/pci/ak4531_codec.c
+++ b/sound/pci/ak4531_codec.c
@@ -384,7 +384,8 @@ int snd_ak4531_mixer(struct snd_card *card,
return -ENOMEM;
*ak4531 = *_ak4531;
mutex_init(&ak4531->reg_mutex);
- if ((err = snd_component_add(card, "AK4531")) < 0) {
+ err = snd_component_add(card, "AK4531");
+ if (err < 0) {
snd_ak4531_free(ak4531);
return err;
}
@@ -398,13 +399,15 @@ int snd_ak4531_mixer(struct snd_card *card,
ak4531->write(ak4531, idx, ak4531->regs[idx] = snd_ak4531_initial_map[idx]); /* recording source is mixer */
}
for (idx = 0; idx < ARRAY_SIZE(snd_ak4531_controls); idx++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_ak4531_controls[idx], ak4531))) < 0) {
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_ak4531_controls[idx], ak4531));
+ if (err < 0) {
snd_ak4531_free(ak4531);
return err;
}
}
snd_ak4531_proc_init(card, ak4531);
- if ((err = snd_device_new(card, SNDRV_DEV_CODEC, ak4531, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_CODEC, ak4531, &ops);
+ if (err < 0) {
snd_ak4531_free(ak4531);
return err;
}
diff --git a/sound/pci/als300.c b/sound/pci/als300.c
index bd4fd09e982b..668008fc21f7 100644
--- a/sound/pci/als300.c
+++ b/sound/pci/als300.c
@@ -298,7 +298,8 @@ static int snd_als300_ac97(struct snd_als300 *chip)
.read = snd_als300_ac97_read,
};
- if ((err = snd_ac97_bus(chip->card, 0, &ops, NULL, &bus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, NULL, &bus);
+ if (err < 0)
return err;
memset(&ac97, 0, sizeof(ac97));
@@ -621,7 +622,8 @@ static int snd_als300_create(struct snd_card *card,
};
*rchip = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(28))) {
@@ -643,7 +645,8 @@ static int snd_als300_create(struct snd_card *card,
chip->chip_type = chip_type;
spin_lock_init(&chip->reg_lock);
- if ((err = pci_request_regions(pci, "ALS300")) < 0) {
+ err = pci_request_regions(pci, "ALS300");
+ if (err < 0) {
kfree(chip);
pci_disable_device(pci);
return err;
@@ -673,14 +676,15 @@ static int snd_als300_create(struct snd_card *card,
return err;
}
- if ((err = snd_als300_new_pcm(chip)) < 0) {
+ err = snd_als300_new_pcm(chip);
+ if (err < 0) {
dev_err(card->dev, "Could not create PCM\n");
snd_als300_free(chip);
return err;
}
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
- chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_als300_free(chip);
return err;
}
@@ -741,7 +745,8 @@ static int snd_als300_probe(struct pci_dev *pci,
chip_type = pci_id->driver_data;
- if ((err = snd_als300_create(card, pci, chip_type, &chip)) < 0) {
+ err = snd_als300_create(card, pci, chip_type, &chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -758,7 +763,8 @@ static int snd_als300_probe(struct pci_dev *pci,
sprintf(card->longname, "%s at 0x%lx irq %i",
card->shortname, chip->port, chip->irq);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index 139ac2a3a0ef..509f317ee682 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -832,9 +832,10 @@ static int snd_card_als4000_probe(struct pci_dev *pci,
}
/* enable PCI device */
- if ((err = pci_enable_device(pci)) < 0) {
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
- }
+
/* check, if we can restrict PCI DMA transfers to 24 bits */
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(24))) {
dev_err(&pci->dev, "architecture does not support 24bit PCI busmaster DMA\n");
@@ -842,7 +843,8 @@ static int snd_card_als4000_probe(struct pci_dev *pci,
return -ENXIO;
}
- if ((err = pci_request_regions(pci, "ALS4000")) < 0) {
+ err = pci_request_regions(pci, "ALS4000");
+ if (err < 0) {
pci_disable_device(pci);
return err;
}
@@ -869,17 +871,17 @@ static int snd_card_als4000_probe(struct pci_dev *pci,
/* disable all legacy ISA stuff */
snd_als4000_set_addr(acard->iobase, 0, 0, 0, 0);
- if ((err = snd_sbdsp_create(card,
- iobase + ALS4K_IOB_10_ADLIB_ADDR0,
- pci->irq,
+ err = snd_sbdsp_create(card,
+ iobase + ALS4K_IOB_10_ADLIB_ADDR0,
+ pci->irq,
/* internally registered as IRQF_SHARED in case of ALS4000 SB */
- snd_als4000_interrupt,
- -1,
- -1,
- SB_HW_ALS4000,
- &chip)) < 0) {
+ snd_als4000_interrupt,
+ -1,
+ -1,
+ SB_HW_ALS4000,
+ &chip);
+ if (err < 0)
goto out_err;
- }
acard->chip = chip;
chip->pci = pci;
@@ -892,11 +894,12 @@ static int snd_card_als4000_probe(struct pci_dev *pci,
sprintf(card->longname, "%s at 0x%lx, irq %i",
card->shortname, chip->alt_port, chip->irq);
- if ((err = snd_mpu401_uart_new( card, 0, MPU401_HW_ALS4000,
- iobase + ALS4K_IOB_30_MIDI_DATA,
- MPU401_INFO_INTEGRATED |
- MPU401_INFO_IRQ_HOOK,
- -1, &chip->rmidi)) < 0) {
+ err = snd_mpu401_uart_new(card, 0, MPU401_HW_ALS4000,
+ iobase + ALS4K_IOB_30_MIDI_DATA,
+ MPU401_INFO_INTEGRATED |
+ MPU401_INFO_IRQ_HOOK,
+ -1, &chip->rmidi);
+ if (err < 0) {
dev_err(&pci->dev, "no MPU-401 device at 0x%lx?\n",
iobase + ALS4K_IOB_30_MIDI_DATA);
goto out_err;
@@ -907,12 +910,13 @@ static int snd_card_als4000_probe(struct pci_dev *pci,
* however there doesn't seem to be an ALSA API for this...
* SPECS_PAGE: 21 */
- if ((err = snd_als4000_pcm(chip, 0)) < 0) {
+ err = snd_als4000_pcm(chip, 0);
+ if (err < 0)
goto out_err;
- }
- if ((err = snd_sbmixer_new(chip)) < 0) {
+
+ err = snd_sbmixer_new(chip);
+ if (err < 0)
goto out_err;
- }
if (snd_opl3_create(card,
iobase + ALS4K_IOB_10_ADLIB_ADDR0,
@@ -922,16 +926,17 @@ static int snd_card_als4000_probe(struct pci_dev *pci,
iobase + ALS4K_IOB_10_ADLIB_ADDR0,
iobase + ALS4K_IOB_12_ADLIB_ADDR2);
} else {
- if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
+ err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
+ if (err < 0)
goto out_err;
- }
}
snd_als4000_create_gameport(acard, dev);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0)
goto out_err;
- }
+
pci_set_drvdata(pci, card);
dev++;
err = 0;
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index 579425ccbb6a..5f8aa35c4bea 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -1039,7 +1039,8 @@ static int snd_atiixp_pcm_open(struct snd_pcm_substream *substream,
/* direct SPDIF */
runtime->hw.formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE;
}
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
runtime->private_data = dma;
@@ -1415,7 +1416,8 @@ static int snd_atiixp_mixer_new(struct atiixp *chip, int clock,
if (snd_atiixp_codec_detect(chip) < 0)
return -ENXIO;
- if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus);
+ if (err < 0)
return err;
pbus->clock = clock;
chip->ac97_bus = pbus;
@@ -1431,7 +1433,8 @@ static int snd_atiixp_mixer_new(struct atiixp *chip, int clock,
ac97.scaps = AC97_SCAP_SKIP_MODEM | AC97_SCAP_POWER_SAVE;
if (! chip->spdif_over_aclink)
ac97.scaps |= AC97_SCAP_NO_SPDIF;
- if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i])) < 0) {
+ err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i]);
+ if (err < 0) {
chip->ac97[i] = NULL; /* to be sure */
dev_dbg(chip->card->dev,
"codec %d not available for audio\n", i);
@@ -1562,7 +1565,8 @@ static int snd_atiixp_create(struct snd_card *card,
struct atiixp *chip;
int err;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
@@ -1576,7 +1580,8 @@ static int snd_atiixp_create(struct snd_card *card,
chip->card = card;
chip->pci = pci;
chip->irq = -1;
- if ((err = pci_request_regions(pci, "ATI IXP AC97")) < 0) {
+ err = pci_request_regions(pci, "ATI IXP AC97");
+ if (err < 0) {
pci_disable_device(pci);
kfree(chip);
return err;
@@ -1599,7 +1604,8 @@ static int snd_atiixp_create(struct snd_card *card,
card->sync_irq = chip->irq;
pci_set_master(pci);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_atiixp_free(chip);
return err;
}
@@ -1622,19 +1628,23 @@ static int snd_atiixp_probe(struct pci_dev *pci,
strcpy(card->driver, spdif_aclink ? "ATIIXP" : "ATIIXP-SPDMA");
strcpy(card->shortname, "ATI IXP");
- if ((err = snd_atiixp_create(card, pci, &chip)) < 0)
+ err = snd_atiixp_create(card, pci, &chip);
+ if (err < 0)
goto __error;
card->private_data = chip;
- if ((err = snd_atiixp_aclink_reset(chip)) < 0)
+ err = snd_atiixp_aclink_reset(chip);
+ if (err < 0)
goto __error;
chip->spdif_over_aclink = spdif_aclink;
- if ((err = snd_atiixp_mixer_new(chip, ac97_clock, ac97_quirk)) < 0)
+ err = snd_atiixp_mixer_new(chip, ac97_clock, ac97_quirk);
+ if (err < 0)
goto __error;
- if ((err = snd_atiixp_pcm_new(chip)) < 0)
+ err = snd_atiixp_pcm_new(chip);
+ if (err < 0)
goto __error;
snd_atiixp_proc_init(chip);
@@ -1647,7 +1657,8 @@ static int snd_atiixp_probe(struct pci_dev *pci,
chip->ac97[0] ? snd_ac97_get_short_name(chip->ac97[0]) : "?",
chip->addr, chip->irq);
- if ((err = snd_card_register(card)) < 0)
+ err = snd_card_register(card);
+ if (err < 0)
goto __error;
pci_set_drvdata(pci, card);
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index 45e75afec7a0..9739c3a82777 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -856,12 +856,12 @@ static int snd_atiixp_pcm_open(struct snd_pcm_substream *substream,
dma->substream = substream;
runtime->hw = snd_atiixp_pcm_hw;
dma->ac97_pcm_type = pcm_type;
- if ((err = snd_pcm_hw_constraint_list(runtime, 0,
- SNDRV_PCM_HW_PARAM_RATE,
- &hw_constraints_rates)) < 0)
+ err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &hw_constraints_rates);
+ if (err < 0)
return err;
- if ((err = snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
runtime->private_data = dma;
@@ -1058,7 +1058,8 @@ static int snd_atiixp_mixer_new(struct atiixp_modem *chip, int clock)
if (snd_atiixp_codec_detect(chip) < 0)
return -ENXIO;
- if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus);
+ if (err < 0)
return err;
pbus->clock = clock;
chip->ac97_bus = pbus;
@@ -1072,7 +1073,8 @@ static int snd_atiixp_mixer_new(struct atiixp_modem *chip, int clock)
ac97.pci = chip->pci;
ac97.num = i;
ac97.scaps = AC97_SCAP_SKIP_AUDIO | AC97_SCAP_POWER_SAVE;
- if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i])) < 0) {
+ err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i]);
+ if (err < 0) {
chip->ac97[i] = NULL; /* to be sure */
dev_dbg(chip->card->dev,
"codec %d not available for modem\n", i);
@@ -1192,7 +1194,8 @@ static int snd_atiixp_create(struct snd_card *card,
struct atiixp_modem *chip;
int err;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
@@ -1206,7 +1209,8 @@ static int snd_atiixp_create(struct snd_card *card,
chip->card = card;
chip->pci = pci;
chip->irq = -1;
- if ((err = pci_request_regions(pci, "ATI IXP MC97")) < 0) {
+ err = pci_request_regions(pci, "ATI IXP MC97");
+ if (err < 0) {
kfree(chip);
pci_disable_device(pci);
return err;
@@ -1229,7 +1233,8 @@ static int snd_atiixp_create(struct snd_card *card,
card->sync_irq = chip->irq;
pci_set_master(pci);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_atiixp_free(chip);
return err;
}
@@ -1252,17 +1257,21 @@ static int snd_atiixp_probe(struct pci_dev *pci,
strcpy(card->driver, "ATIIXP-MODEM");
strcpy(card->shortname, "ATI IXP Modem");
- if ((err = snd_atiixp_create(card, pci, &chip)) < 0)
+ err = snd_atiixp_create(card, pci, &chip);
+ if (err < 0)
goto __error;
card->private_data = chip;
- if ((err = snd_atiixp_aclink_reset(chip)) < 0)
+ err = snd_atiixp_aclink_reset(chip);
+ if (err < 0)
goto __error;
- if ((err = snd_atiixp_mixer_new(chip, ac97_clock)) < 0)
+ err = snd_atiixp_mixer_new(chip, ac97_clock);
+ if (err < 0)
goto __error;
- if ((err = snd_atiixp_pcm_new(chip)) < 0)
+ err = snd_atiixp_pcm_new(chip);
+ if (err < 0)
goto __error;
snd_atiixp_proc_init(chip);
@@ -1272,7 +1281,8 @@ static int snd_atiixp_probe(struct pci_dev *pci,
sprintf(card->longname, "%s rev %x at 0x%lx, irq %i",
card->shortname, pci->revision, chip->addr, chip->irq);
- if ((err = snd_card_register(card)) < 0)
+ err = snd_card_register(card);
+ if (err < 0)
goto __error;
pci_set_drvdata(pci, card);
diff --git a/sound/pci/au88x0/au88x0.c b/sound/pci/au88x0/au88x0.c
index 1b37b7225b1d..1f7fee470266 100644
--- a/sound/pci/au88x0/au88x0.c
+++ b/sound/pci/au88x0/au88x0.c
@@ -46,8 +46,9 @@ MODULE_DEVICE_TABLE(pci, snd_vortex_ids);
static void vortex_fix_latency(struct pci_dev *vortex)
{
int rc;
- if (!(rc = pci_write_config_byte(vortex, 0x40, 0xff))) {
- dev_info(&vortex->dev, "vortex latency is 0xff\n");
+ rc = pci_write_config_byte(vortex, 0x40, 0xff);
+ if (!rc) {
+ dev_info(&vortex->dev, "vortex latency is 0xff\n");
} else {
dev_warn(&vortex->dev,
"could not set vortex latency: pci error 0x%x\n", rc);
@@ -65,9 +66,12 @@ static void vortex_fix_agp_bridge(struct pci_dev *via)
* read the config and it is not already set
*/
- if (!(rc = pci_read_config_byte(via, 0x42, &value))
- && ((value & 0x10)
- || !(rc = pci_write_config_byte(via, 0x42, value | 0x10)))) {
+ rc = pci_read_config_byte(via, 0x42, &value);
+ if (!rc) {
+ if (!(value & 0x10))
+ rc = pci_write_config_byte(via, 0x42, value | 0x10);
+ }
+ if (!rc) {
dev_info(&via->dev, "bridge config is 0x%x\n", value | 0x10);
} else {
dev_warn(&via->dev,
@@ -102,14 +106,16 @@ static void snd_vortex_workaround(struct pci_dev *vortex, int fix)
} else {
if (fix & 0x1)
vortex_fix_latency(vortex);
- if ((fix & 0x2) && (via = pci_get_device(PCI_VENDOR_ID_VIA,
- PCI_DEVICE_ID_VIA_8365_1, NULL)))
- vortex_fix_agp_bridge(via);
- if ((fix & 0x4) && (via = pci_get_device(PCI_VENDOR_ID_VIA,
- PCI_DEVICE_ID_VIA_82C598_1, NULL)))
- vortex_fix_agp_bridge(via);
- if ((fix & 0x8) && (via = pci_get_device(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_FE_GATE_7007, NULL)))
+ if (fix & 0x2)
+ via = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8365_1, NULL);
+ else if (fix & 0x4)
+ via = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_82C598_1, NULL);
+ else if (fix & 0x8)
+ via = pci_get_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_FE_GATE_7007, NULL);
+ if (via)
vortex_fix_agp_bridge(via);
}
pci_dev_put(via);
@@ -147,7 +153,8 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
*rchip = NULL;
// check PCI availability (DMA).
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32))) {
dev_err(card->dev, "error to set DMA mask\n");
@@ -174,7 +181,8 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
// (1) PCI resource allocation
// Get MMIO area
//
- if ((err = pci_request_regions(pci, CARD_NAME_SHORT)) != 0)
+ err = pci_request_regions(pci, CARD_NAME_SHORT);
+ if (err)
goto regions_out;
chip->mmio = pci_ioremap_bar(pci, 0);
@@ -187,14 +195,15 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
/* Init audio core.
* This must be done before we do request_irq otherwise we can get spurious
* interrupts that we do not handle properly and make a mess of things */
- if ((err = vortex_core_init(chip)) != 0) {
+ err = vortex_core_init(chip);
+ if (err) {
dev_err(card->dev, "hw core init failed\n");
goto core_out;
}
- if ((err = request_irq(pci->irq, vortex_interrupt,
- IRQF_SHARED, KBUILD_MODNAME,
- chip)) != 0) {
+ err = request_irq(pci->irq, vortex_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (err) {
dev_err(card->dev, "cannot grab irq\n");
goto irq_out;
}
@@ -205,9 +214,9 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
// End of PCI setup.
// Register alsa root device.
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0)
goto alloc_out;
- }
*rchip = chip;
@@ -252,7 +261,8 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
return err;
// (3)
- if ((err = snd_vortex_create(card, pci, &chip)) < 0) {
+ err = snd_vortex_create(card, pci, &chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -278,12 +288,14 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
}
#ifndef CHIP_AU8820
// ADB SPDIF
- if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_SPDIF, 1)) < 0) {
+ err = snd_vortex_new_pcm(chip, VORTEX_PCM_SPDIF, 1);
+ if (err < 0) {
snd_card_free(card);
return err;
}
// A3D
- if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_A3D, NR_A3D)) < 0) {
+ err = snd_vortex_new_pcm(chip, VORTEX_PCM_A3D, NR_A3D);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -297,12 +309,14 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
*/
#ifndef CHIP_AU8810
// WT pcm.
- if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_WT, NR_WT)) < 0) {
+ err = snd_vortex_new_pcm(chip, VORTEX_PCM_WT, NR_WT);
+ if (err < 0) {
snd_card_free(card);
return err;
}
#endif
- if ((err = snd_vortex_midi(chip)) < 0) {
+ err = snd_vortex_midi(chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -327,13 +341,13 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
#endif
// (5)
- if ((err = pci_read_config_word(pci, PCI_DEVICE_ID,
- &(chip->device))) < 0) {
+ err = pci_read_config_word(pci, PCI_DEVICE_ID, &chip->device);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = pci_read_config_word(pci, PCI_VENDOR_ID,
- &(chip->vendor))) < 0) {
+ err = pci_read_config_word(pci, PCI_VENDOR_ID, &chip->vendor);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -352,7 +366,8 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
#endif
// (6)
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/au88x0/au88x0_a3d.c b/sound/pci/au88x0/au88x0_a3d.c
index 2db183f8826a..eabaee0463fe 100644
--- a/sound/pci/au88x0/au88x0_a3d.c
+++ b/sound/pci/au88x0/au88x0_a3d.c
@@ -849,46 +849,50 @@ static int vortex_a3d_register_controls(vortex_t *vortex)
int err, i;
/* HRTF controls. */
for (i = 0; i < NR_A3D; i++) {
- if ((kcontrol =
- snd_ctl_new1(&vortex_a3d_kcontrol, &vortex->a3d[i])) == NULL)
+ kcontrol = snd_ctl_new1(&vortex_a3d_kcontrol, &vortex->a3d[i]);
+ if (!kcontrol)
return -ENOMEM;
kcontrol->id.numid = CTRLID_HRTF;
kcontrol->info = snd_vortex_a3d_hrtf_info;
kcontrol->put = snd_vortex_a3d_hrtf_put;
- if ((err = snd_ctl_add(vortex->card, kcontrol)) < 0)
+ err = snd_ctl_add(vortex->card, kcontrol);
+ if (err < 0)
return err;
}
/* ITD controls. */
for (i = 0; i < NR_A3D; i++) {
- if ((kcontrol =
- snd_ctl_new1(&vortex_a3d_kcontrol, &vortex->a3d[i])) == NULL)
+ kcontrol = snd_ctl_new1(&vortex_a3d_kcontrol, &vortex->a3d[i]);
+ if (!kcontrol)
return -ENOMEM;
kcontrol->id.numid = CTRLID_ITD;
kcontrol->info = snd_vortex_a3d_itd_info;
kcontrol->put = snd_vortex_a3d_itd_put;
- if ((err = snd_ctl_add(vortex->card, kcontrol)) < 0)
+ err = snd_ctl_add(vortex->card, kcontrol);
+ if (err < 0)
return err;
}
/* ILD (gains) controls. */
for (i = 0; i < NR_A3D; i++) {
- if ((kcontrol =
- snd_ctl_new1(&vortex_a3d_kcontrol, &vortex->a3d[i])) == NULL)
+ kcontrol = snd_ctl_new1(&vortex_a3d_kcontrol, &vortex->a3d[i]);
+ if (!kcontrol)
return -ENOMEM;
kcontrol->id.numid = CTRLID_GAINS;
kcontrol->info = snd_vortex_a3d_ild_info;
kcontrol->put = snd_vortex_a3d_ild_put;
- if ((err = snd_ctl_add(vortex->card, kcontrol)) < 0)
+ err = snd_ctl_add(vortex->card, kcontrol);
+ if (err < 0)
return err;
}
/* Filter controls. */
for (i = 0; i < NR_A3D; i++) {
- if ((kcontrol =
- snd_ctl_new1(&vortex_a3d_kcontrol, &vortex->a3d[i])) == NULL)
+ kcontrol = snd_ctl_new1(&vortex_a3d_kcontrol, &vortex->a3d[i]);
+ if (!kcontrol)
return -ENOMEM;
kcontrol->id.numid = CTRLID_FILTER;
kcontrol->info = snd_vortex_a3d_filter_info;
kcontrol->put = snd_vortex_a3d_filter_put;
- if ((err = snd_ctl_add(vortex->card, kcontrol)) < 0)
+ err = snd_ctl_add(vortex->card, kcontrol);
+ if (err < 0)
return err;
}
return 0;
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 5180f1bd1326..2ed5100b8cae 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -2120,9 +2120,9 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
VORTEX_RESOURCE_DMA);
} else {
en = 1;
- if ((dma =
- vortex_adb_checkinout(vortex, NULL, en,
- VORTEX_RESOURCE_DMA)) < 0)
+ dma = vortex_adb_checkinout(vortex, NULL, en,
+ VORTEX_RESOURCE_DMA);
+ if (dma < 0)
return -EBUSY;
}
@@ -2140,18 +2140,20 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
/* Get SRC and MIXER hardware resources. */
if (stream->type != VORTEX_PCM_SPDIF) {
for (i = 0; i < nr_ch; i++) {
- if ((src[i] = vortex_adb_checkinout(vortex,
- stream->resources, en,
- VORTEX_RESOURCE_SRC)) < 0) {
+ src[i] = vortex_adb_checkinout(vortex,
+ stream->resources, en,
+ VORTEX_RESOURCE_SRC);
+ if (src[i] < 0) {
memset(stream->resources, 0,
sizeof(stream->resources));
return -EBUSY;
}
if (stream->type != VORTEX_PCM_A3D) {
- if ((mix[i] = vortex_adb_checkinout(vortex,
- stream->resources,
- en,
- VORTEX_RESOURCE_MIXIN)) < 0) {
+ mix[i] = vortex_adb_checkinout(vortex,
+ stream->resources,
+ en,
+ VORTEX_RESOURCE_MIXIN);
+ if (mix[i] < 0) {
memset(stream->resources,
0,
sizeof(stream->resources));
@@ -2162,10 +2164,10 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
}
#ifndef CHIP_AU8820
if (stream->type == VORTEX_PCM_A3D) {
- if ((a3d =
- vortex_adb_checkinout(vortex,
- stream->resources, en,
- VORTEX_RESOURCE_A3D)) < 0) {
+ a3d = vortex_adb_checkinout(vortex,
+ stream->resources, en,
+ VORTEX_RESOURCE_A3D);
+ if (a3d < 0) {
memset(stream->resources, 0,
sizeof(stream->resources));
dev_err(vortex->card->dev,
@@ -2278,19 +2280,18 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
/* Get SRC and MIXER hardware resources. */
for (i = 0; i < nr_ch; i++) {
- if ((mix[i] =
- vortex_adb_checkinout(vortex,
- stream->resources, en,
- VORTEX_RESOURCE_MIXOUT))
- < 0) {
+ mix[i] = vortex_adb_checkinout(vortex,
+ stream->resources, en,
+ VORTEX_RESOURCE_MIXOUT);
+ if (mix[i] < 0) {
memset(stream->resources, 0,
sizeof(stream->resources));
return -EBUSY;
}
- if ((src[i] =
- vortex_adb_checkinout(vortex,
- stream->resources, en,
- VORTEX_RESOURCE_SRC)) < 0) {
+ src[i] = vortex_adb_checkinout(vortex,
+ stream->resources, en,
+ VORTEX_RESOURCE_SRC);
+ if (src[i] < 0) {
memset(stream->resources, 0,
sizeof(stream->resources));
return -EBUSY;
diff --git a/sound/pci/au88x0/au88x0_eq.c b/sound/pci/au88x0/au88x0_eq.c
index 58e92f2a72c0..71c13100d7ef 100644
--- a/sound/pci/au88x0/au88x0_eq.c
+++ b/sound/pci/au88x0/au88x0_eq.c
@@ -873,29 +873,33 @@ static int vortex_eq_init(vortex_t *vortex)
vortex_Eqlzr_init(vortex);
- if ((kcontrol =
- snd_ctl_new1(&vortex_eqtoggle_kcontrol, vortex)) == NULL)
+ kcontrol = snd_ctl_new1(&vortex_eqtoggle_kcontrol, vortex);
+ if (!kcontrol)
return -ENOMEM;
kcontrol->private_value = 0;
- if ((err = snd_ctl_add(vortex->card, kcontrol)) < 0)
+ err = snd_ctl_add(vortex->card, kcontrol);
+ if (err < 0)
return err;
/* EQ gain controls */
for (i = 0; i < 10; i++) {
- if ((kcontrol =
- snd_ctl_new1(&vortex_eq_kcontrol, vortex)) == NULL)
+ kcontrol = snd_ctl_new1(&vortex_eq_kcontrol, vortex);
+ if (!kcontrol)
return -ENOMEM;
snprintf(kcontrol->id.name, sizeof(kcontrol->id.name),
"%s Playback Volume", EqBandLabels[i]);
kcontrol->private_value = i;
- if ((err = snd_ctl_add(vortex->card, kcontrol)) < 0)
+ err = snd_ctl_add(vortex->card, kcontrol);
+ if (err < 0)
return err;
//vortex->eqctrl[i] = kcontrol;
}
/* EQ band levels */
- if ((kcontrol = snd_ctl_new1(&vortex_levels_kcontrol, vortex)) == NULL)
+ kcontrol = snd_ctl_new1(&vortex_levels_kcontrol, vortex);
+ if (!kcontrol)
return -ENOMEM;
- if ((err = snd_ctl_add(vortex->card, kcontrol)) < 0)
+ err = snd_ctl_add(vortex->card, kcontrol);
+ if (err < 0)
return err;
return 0;
diff --git a/sound/pci/au88x0/au88x0_mixer.c b/sound/pci/au88x0/au88x0_mixer.c
index 5b647682b683..aeba684b8d18 100644
--- a/sound/pci/au88x0/au88x0_mixer.c
+++ b/sound/pci/au88x0/au88x0_mixer.c
@@ -30,7 +30,8 @@ static int snd_vortex_mixer(vortex_t *vortex)
.read = vortex_codec_read,
};
- if ((err = snd_ac97_bus(vortex->card, 0, &ops, NULL, &pbus)) < 0)
+ err = snd_ac97_bus(vortex->card, 0, &ops, NULL, &pbus);
+ if (err < 0)
return err;
memset(&ac97, 0, sizeof(ac97));
// Initialize AC97 codec stuff.
diff --git a/sound/pci/au88x0/au88x0_mpu401.c b/sound/pci/au88x0/au88x0_mpu401.c
index 603494e7d30e..164f6b7039ab 100644
--- a/sound/pci/au88x0/au88x0_mpu401.c
+++ b/sound/pci/au88x0/au88x0_mpu401.c
@@ -68,9 +68,9 @@ static int snd_vortex_midi(vortex_t *vortex)
/* Create MPU401 instance. */
#ifdef VORTEX_MPU401_LEGACY
- if ((temp =
- snd_mpu401_uart_new(vortex->card, 0, MPU401_HW_MPU401, 0x330,
- MPU401_INFO_IRQ_HOOK, -1, &rmidi)) != 0) {
+ temp = snd_mpu401_uart_new(vortex->card, 0, MPU401_HW_MPU401, 0x330,
+ MPU401_INFO_IRQ_HOOK, -1, &rmidi);
+ if (temp) {
hwwrite(vortex->mmio, VORTEX_CTRL,
(hwread(vortex->mmio, VORTEX_CTRL) &
~CTRL_MIDI_PORT) & ~CTRL_MIDI_EN);
@@ -78,10 +78,10 @@ static int snd_vortex_midi(vortex_t *vortex)
}
#else
port = (unsigned long)(vortex->mmio + VORTEX_MIDI_DATA);
- if ((temp =
- snd_mpu401_uart_new(vortex->card, 0, MPU401_HW_AUREAL, port,
- MPU401_INFO_INTEGRATED | MPU401_INFO_MMIO |
- MPU401_INFO_IRQ_HOOK, -1, &rmidi)) != 0) {
+ temp = snd_mpu401_uart_new(vortex->card, 0, MPU401_HW_AUREAL, port,
+ MPU401_INFO_INTEGRATED | MPU401_INFO_MMIO |
+ MPU401_INFO_IRQ_HOOK, -1, &rmidi);
+ if (temp) {
hwwrite(vortex->mmio, VORTEX_CTRL,
(hwread(vortex->mmio, VORTEX_CTRL) &
~CTRL_MIDI_PORT) & ~CTRL_MIDI_EN);
diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
index d019aa566de3..546f71220604 100644
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -130,14 +130,14 @@ static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
int err;
/* Force equal size periods */
- if ((err =
- snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
/* Avoid PAGE_SIZE boundary to fall inside of a period. */
- if ((err =
- snd_pcm_hw_constraint_pow2(runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_BYTES)) < 0)
+ err = snd_pcm_hw_constraint_pow2(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES);
+ if (err < 0)
return err;
snd_pcm_hw_constraint_step(runtime, 0,
@@ -658,7 +658,8 @@ static int snd_vortex_new_pcm(vortex_t *chip, int idx, int nr)
kctl = snd_ctl_new1(&snd_vortex_mixer_spdif[i], chip);
if (!kctl)
return -ENOMEM;
- if ((err = snd_ctl_add(chip->card, kctl)) < 0)
+ err = snd_ctl_add(chip->card, kctl);
+ if (err < 0)
return err;
}
}
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 51dcf1bc4c0c..4ee7ab409807 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -1194,7 +1194,8 @@ snd_azf3328_mixer_new(struct snd_azf3328 *chip)
sw = snd_azf3328_mixer_controls;
for (idx = 0; idx < ARRAY_SIZE(snd_azf3328_mixer_controls);
++idx, ++sw) {
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(sw, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(sw, chip));
+ if (err < 0)
return err;
}
snd_component_add(card, "AZF3328 mixer");
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 91512b345d19..39bcfb81e81c 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -719,7 +719,8 @@ static int snd_bt87x_create(struct snd_card *card,
chip->irq = -1;
spin_lock_init(&chip->reg_lock);
- if ((err = pci_request_regions(pci, "Bt87x audio")) < 0) {
+ err = pci_request_regions(pci, "Bt87x audio");
+ if (err < 0) {
kfree(chip);
pci_disable_device(pci);
return err;
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index bee4710916c4..99778711006a 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -575,9 +575,11 @@ static int snd_ca0106_pcm_open_playback_channel(struct snd_pcm_substream *substr
*/
//channel->interrupt = snd_ca0106_pcm_channel_interrupt;
channel->epcm = epcm;
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
- if ((err = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64)) < 0)
+ err = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64);
+ if (err < 0)
return err;
snd_pcm_set_sync(substream);
@@ -668,10 +670,12 @@ static int snd_ca0106_pcm_open_capture_channel(struct snd_pcm_substream *substre
*/
//channel->interrupt = snd_ca0106_pcm_channel_interrupt;
channel->epcm = epcm;
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
//snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, &hw_constraints_capture_period_sizes);
- if ((err = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64)) < 0)
+ err = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64);
+ if (err < 0)
return err;
return 0;
}
@@ -1166,7 +1170,8 @@ static int snd_ca0106_ac97(struct snd_ca0106 *chip)
.read = snd_ca0106_ac97_read,
};
- if ((err = snd_ac97_bus(chip->card, 0, &ops, NULL, &pbus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, NULL, &pbus);
+ if (err < 0)
return err;
pbus->no_vra = 1; /* we don't need VRA */
@@ -1759,7 +1764,8 @@ static int snd_ca0106_midi(struct snd_ca0106 *chip, unsigned int channel)
midi->dev_id = chip;
- if ((err = ca_midi_init(chip, midi, 0, name)) < 0)
+ err = ca_midi_init(chip, midi, 0, name);
+ if (err < 0)
return err;
return 0;
diff --git a/sound/pci/ca0106/ca_midi.c b/sound/pci/ca0106/ca_midi.c
index 18524e0a9102..957e60f64821 100644
--- a/sound/pci/ca0106/ca_midi.c
+++ b/sound/pci/ca0106/ca_midi.c
@@ -276,7 +276,8 @@ int ca_midi_init(void *dev_id, struct snd_ca_midi *midi, int device, char *name)
struct snd_rawmidi *rmidi;
int err;
- if ((err = snd_rawmidi_new(midi->get_dev_id_card(midi->dev_id), name, device, 1, 1, &rmidi)) < 0)
+ err = snd_rawmidi_new(midi->get_dev_id_card(midi->dev_id), name, device, 1, 1, &rmidi);
+ if (err < 0)
return err;
midi->dev_id = dev_id;
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 598446348da6..f44f118aacad 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -1225,9 +1225,11 @@ static int setup_spdif_playback(struct cmipci *cm, struct snd_pcm_substream *sub
rate = subs->runtime->rate;
- if (up && do_ac3)
- if ((err = save_mixer_state(cm)) < 0)
+ if (up && do_ac3) {
+ err = save_mixer_state(cm);
+ if (err < 0)
return err;
+ }
spin_lock_irq(&cm->reg_lock);
cm->spdif_playback_avail = up;
@@ -1276,7 +1278,8 @@ static int snd_cmipci_playback_prepare(struct snd_pcm_substream *substream)
substream->runtime->channels == 2);
if (do_spdif && cm->can_ac3_hw)
do_ac3 = cm->dig_pcm_status & IEC958_AES0_NONAUDIO;
- if ((err = setup_spdif_playback(cm, substream, do_spdif, do_ac3)) < 0)
+ err = setup_spdif_playback(cm, substream, do_spdif, do_ac3);
+ if (err < 0)
return err;
return snd_cmipci_pcm_prepare(cm, &cm->channel[CM_CH_PLAY], substream);
}
@@ -1291,7 +1294,8 @@ static int snd_cmipci_playback_spdif_prepare(struct snd_pcm_substream *substream
do_ac3 = cm->dig_pcm_status & IEC958_AES0_NONAUDIO;
else
do_ac3 = 1; /* doesn't matter */
- if ((err = setup_spdif_playback(cm, substream, 1, do_ac3)) < 0)
+ err = setup_spdif_playback(cm, substream, 1, do_ac3);
+ if (err < 0)
return err;
return snd_cmipci_pcm_prepare(cm, &cm->channel[CM_CH_PLAY], substream);
}
@@ -1639,7 +1643,8 @@ static int snd_cmipci_playback_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
- if ((err = open_device_check(cm, CM_OPEN_PLAYBACK, substream)) < 0)
+ err = open_device_check(cm, CM_OPEN_PLAYBACK, substream);
+ if (err < 0)
return err;
runtime->hw = snd_cmipci_playback;
if (cm->chip_version == 68) {
@@ -1665,7 +1670,8 @@ static int snd_cmipci_capture_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
- if ((err = open_device_check(cm, CM_OPEN_CAPTURE, substream)) < 0)
+ err = open_device_check(cm, CM_OPEN_CAPTURE, substream);
+ if (err < 0)
return err;
runtime->hw = snd_cmipci_capture;
if (cm->chip_version == 68) { // 8768 only supports 44k/48k recording
@@ -1689,7 +1695,9 @@ static int snd_cmipci_playback2_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
- if ((err = open_device_check(cm, CM_OPEN_PLAYBACK2, substream)) < 0) /* use channel B */
+ /* use channel B */
+ err = open_device_check(cm, CM_OPEN_PLAYBACK2, substream);
+ if (err < 0)
return err;
runtime->hw = snd_cmipci_playback2;
mutex_lock(&cm->open_mutex);
@@ -1727,7 +1735,9 @@ static int snd_cmipci_playback_spdif_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
- if ((err = open_device_check(cm, CM_OPEN_SPDIF_PLAYBACK, substream)) < 0) /* use channel A */
+ /* use channel A */
+ err = open_device_check(cm, CM_OPEN_SPDIF_PLAYBACK, substream);
+ if (err < 0)
return err;
if (cm->can_ac3_hw) {
runtime->hw = snd_cmipci_playback_spdif;
@@ -1754,7 +1764,9 @@ static int snd_cmipci_capture_spdif_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
- if ((err = open_device_check(cm, CM_OPEN_SPDIF_CAPTURE, substream)) < 0) /* use channel B */
+ /* use channel B */
+ err = open_device_check(cm, CM_OPEN_SPDIF_CAPTURE, substream);
+ if (err < 0)
return err;
runtime->hw = snd_cmipci_capture_spdif;
if (cm->can_96k && !(cm->chip_version == 68)) {
@@ -2650,7 +2662,8 @@ static int snd_cmipci_mixer_new(struct cmipci *cm, int pcm_spdif_device)
"PCM Playback Volume"))
continue;
}
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_cmipci_mixers[idx], cm))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_cmipci_mixers[idx], cm));
+ if (err < 0)
return err;
}
@@ -2675,13 +2688,19 @@ static int snd_cmipci_mixer_new(struct cmipci *cm, int pcm_spdif_device)
return err;
}
if (cm->can_ac3_hw) {
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_cmipci_spdif_default, cm))) < 0)
+ kctl = snd_ctl_new1(&snd_cmipci_spdif_default, cm);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
kctl->id.device = pcm_spdif_device;
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_cmipci_spdif_mask, cm))) < 0)
+ kctl = snd_ctl_new1(&snd_cmipci_spdif_mask, cm);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
kctl->id.device = pcm_spdif_device;
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_cmipci_spdif_stream, cm))) < 0)
+ kctl = snd_ctl_new1(&snd_cmipci_spdif_stream, cm);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
kctl->id.device = pcm_spdif_device;
}
@@ -2955,7 +2974,8 @@ static int snd_cmipci_create_fm(struct cmipci *cm, long fm_port)
goto disable_fm;
}
}
- if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
+ err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
+ if (err < 0) {
dev_err(cm->card->dev, "cannot create OPL3 hwdep\n");
return err;
}
@@ -2987,7 +3007,8 @@ static int snd_cmipci_create(struct snd_card *card, struct pci_dev *pci,
*rcmipci = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
cm = kzalloc(sizeof(*cm), GFP_KERNEL);
@@ -3006,7 +3027,8 @@ static int snd_cmipci_create(struct snd_card *card, struct pci_dev *pci,
cm->channel[1].ch = 1;
cm->channel[0].is_dac = cm->channel[1].is_dac = 1; /* dual DAC mode */
- if ((err = pci_request_regions(pci, card->driver)) < 0) {
+ err = pci_request_regions(pci, card->driver);
+ if (err < 0) {
kfree(cm);
pci_disable_device(pci);
return err;
@@ -3120,7 +3142,8 @@ static int snd_cmipci_create(struct snd_card *card, struct pci_dev *pci,
sprintf(card->longname, "%s%s at %#lx, irq %i",
card->shortname, modelstr, cm->iobase, cm->irq);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, cm, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, cm, &ops);
+ if (err < 0) {
snd_cmipci_free(cm);
return err;
}
@@ -3172,32 +3195,36 @@ static int snd_cmipci_create(struct snd_card *card, struct pci_dev *pci,
/* create pcm devices */
pcm_index = pcm_spdif_index = 0;
- if ((err = snd_cmipci_pcm_new(cm, pcm_index)) < 0)
+ err = snd_cmipci_pcm_new(cm, pcm_index);
+ if (err < 0)
return err;
pcm_index++;
- if ((err = snd_cmipci_pcm2_new(cm, pcm_index)) < 0)
+ err = snd_cmipci_pcm2_new(cm, pcm_index);
+ if (err < 0)
return err;
pcm_index++;
if (cm->can_ac3_hw || cm->can_ac3_sw) {
pcm_spdif_index = pcm_index;
- if ((err = snd_cmipci_pcm_spdif_new(cm, pcm_index)) < 0)
+ err = snd_cmipci_pcm_spdif_new(cm, pcm_index);
+ if (err < 0)
return err;
}
/* create mixer interface & switches */
- if ((err = snd_cmipci_mixer_new(cm, pcm_spdif_index)) < 0)
+ err = snd_cmipci_mixer_new(cm, pcm_spdif_index);
+ if (err < 0)
return err;
if (iomidi > 0) {
- if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_CMIPCI,
- iomidi,
- (integrated_midi ?
- MPU401_INFO_INTEGRATED : 0) |
- MPU401_INFO_IRQ_HOOK,
- -1, &cm->rmidi)) < 0) {
+ err = snd_mpu401_uart_new(card, 0, MPU401_HW_CMIPCI,
+ iomidi,
+ (integrated_midi ?
+ MPU401_INFO_INTEGRATED : 0) |
+ MPU401_INFO_IRQ_HOOK,
+ -1, &cm->rmidi);
+ if (err < 0)
dev_err(cm->card->dev,
"no UART401 device at 0x%lx\n", iomidi);
- }
}
#ifdef USE_VAR48KRATE
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index bf3bb70ffaf9..e122a168c148 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -1068,23 +1068,28 @@ static int snd_cs4281_mixer(struct cs4281 *chip)
.read = snd_cs4281_ac97_read,
};
- if ((err = snd_ac97_bus(card, 0, &ops, chip, &chip->ac97_bus)) < 0)
+ err = snd_ac97_bus(card, 0, &ops, chip, &chip->ac97_bus);
+ if (err < 0)
return err;
chip->ac97_bus->private_free = snd_cs4281_mixer_free_ac97_bus;
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = chip;
ac97.private_free = snd_cs4281_mixer_free_ac97;
- if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0)
+ err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97);
+ if (err < 0)
return err;
if (chip->dual_codec) {
ac97.num = 1;
- if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_secondary)) < 0)
+ err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_secondary);
+ if (err < 0)
return err;
}
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_cs4281_fm_vol, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_cs4281_fm_vol, chip));
+ if (err < 0)
return err;
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_cs4281_pcm_vol, chip))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_cs4281_pcm_vol, chip));
+ if (err < 0)
return err;
return 0;
}
@@ -1308,7 +1313,8 @@ static int snd_cs4281_create(struct snd_card *card,
};
*rchip = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL) {
@@ -1326,7 +1332,8 @@ static int snd_cs4281_create(struct snd_card *card,
}
chip->dual_codec = dual_codec;
- if ((err = pci_request_regions(pci, "CS4281")) < 0) {
+ err = pci_request_regions(pci, "CS4281");
+ if (err < 0) {
kfree(chip);
pci_disable_device(pci);
return err;
@@ -1356,7 +1363,8 @@ static int snd_cs4281_create(struct snd_card *card,
return tmp;
}
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_cs4281_free(chip);
return err;
}
@@ -1395,12 +1403,14 @@ static int snd_cs4281_chip_init(struct cs4281 *chip)
* space between 0e4h and 0ffh to be written. */
snd_cs4281_pokeBA0(chip, BA0_CWPR, 0x4281);
- if ((tmp = snd_cs4281_peekBA0(chip, BA0_SERC1)) != (BA0_SERC1_SO1EN | BA0_SERC1_AC97)) {
+ tmp = snd_cs4281_peekBA0(chip, BA0_SERC1);
+ if (tmp != (BA0_SERC1_SO1EN | BA0_SERC1_AC97)) {
dev_err(chip->card->dev,
"SERC1 AC'97 check failed (0x%x)\n", tmp);
return -EIO;
}
- if ((tmp = snd_cs4281_peekBA0(chip, BA0_SERC2)) != (BA0_SERC2_SI1EN | BA0_SERC2_AC97)) {
+ tmp = snd_cs4281_peekBA0(chip, BA0_SERC2);
+ if (tmp != (BA0_SERC2_SI1EN | BA0_SERC2_AC97)) {
dev_err(chip->card->dev,
"SERC2 AC'97 check failed (0x%x)\n", tmp);
return -EIO;
@@ -1748,7 +1758,8 @@ static int snd_cs4281_midi(struct cs4281 *chip, int device)
struct snd_rawmidi *rmidi;
int err;
- if ((err = snd_rawmidi_new(chip->card, "CS4281", device, 1, 1, &rmidi)) < 0)
+ err = snd_rawmidi_new(chip->card, "CS4281", device, 1, 1, &rmidi);
+ if (err < 0)
return err;
strcpy(rmidi->name, "CS4281");
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_cs4281_midi_output);
@@ -1881,32 +1892,38 @@ static int snd_cs4281_probe(struct pci_dev *pci,
if (err < 0)
return err;
- if ((err = snd_cs4281_create(card, pci, &chip, dual_codec[dev])) < 0) {
+ err = snd_cs4281_create(card, pci, &chip, dual_codec[dev]);
+ if (err < 0) {
snd_card_free(card);
return err;
}
card->private_data = chip;
- if ((err = snd_cs4281_mixer(chip)) < 0) {
+ err = snd_cs4281_mixer(chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_cs4281_pcm(chip, 0)) < 0) {
+ err = snd_cs4281_pcm(chip, 0);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_cs4281_midi(chip, 0)) < 0) {
+ err = snd_cs4281_midi(chip, 0);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_opl3_new(card, OPL3_HW_OPL3_CS4281, &opl3)) < 0) {
+ err = snd_opl3_new(card, OPL3_HW_OPL3_CS4281, &opl3);
+ if (err < 0) {
snd_card_free(card);
return err;
}
opl3->private_data = chip;
opl3->command = snd_cs4281_opl3_command;
snd_opl3_init(opl3);
- if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
+ err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -1918,7 +1935,8 @@ static int snd_cs4281_probe(struct pci_dev *pci,
chip->ba0_addr,
chip->irq);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/cs46xx/cs46xx.c b/sound/pci/cs46xx/cs46xx.c
index 1db7b4112840..358ca84cbdea 100644
--- a/sound/pci/cs46xx/cs46xx.c
+++ b/sound/pci/cs46xx/cs46xx.c
@@ -70,45 +70,53 @@ static int snd_card_cs46xx_probe(struct pci_dev *pci,
0, &card);
if (err < 0)
return err;
- if ((err = snd_cs46xx_create(card, pci,
- external_amp[dev], thinkpad[dev],
- &chip)) < 0) {
+ err = snd_cs46xx_create(card, pci,
+ external_amp[dev], thinkpad[dev],
+ &chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
card->private_data = chip;
chip->accept_valid = mmap_valid[dev];
- if ((err = snd_cs46xx_pcm(chip, 0)) < 0) {
+ err = snd_cs46xx_pcm(chip, 0);
+ if (err < 0) {
snd_card_free(card);
return err;
}
#ifdef CONFIG_SND_CS46XX_NEW_DSP
- if ((err = snd_cs46xx_pcm_rear(chip, 1)) < 0) {
+ err = snd_cs46xx_pcm_rear(chip, 1);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_cs46xx_pcm_iec958(chip, 2)) < 0) {
+ err = snd_cs46xx_pcm_iec958(chip, 2);
+ if (err < 0) {
snd_card_free(card);
return err;
}
#endif
- if ((err = snd_cs46xx_mixer(chip, 2)) < 0) {
+ err = snd_cs46xx_mixer(chip, 2);
+ if (err < 0) {
snd_card_free(card);
return err;
}
#ifdef CONFIG_SND_CS46XX_NEW_DSP
if (chip->nr_ac97_codecs ==2) {
- if ((err = snd_cs46xx_pcm_center_lfe(chip, 3)) < 0) {
+ err = snd_cs46xx_pcm_center_lfe(chip, 3);
+ if (err < 0) {
snd_card_free(card);
return err;
}
}
#endif
- if ((err = snd_cs46xx_midi(chip, 0)) < 0) {
+ err = snd_cs46xx_midi(chip, 0);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_cs46xx_start_dsp(chip)) < 0) {
+ err = snd_cs46xx_start_dsp(chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -124,7 +132,8 @@ static int snd_card_cs46xx_probe(struct pci_dev *pci,
chip->ba1_addr,
chip->irq);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 37f516e6a5c2..1e1eb17f8e07 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -1058,9 +1058,10 @@ static int _cs46xx_adjust_sample_rate (struct snd_cs46xx *chip, struct snd_cs46x
int unlinked = cpcm->pcm_channel->unlinked;
cs46xx_dsp_destroy_pcm_channel (chip,cpcm->pcm_channel);
- if ( (cpcm->pcm_channel = cs46xx_dsp_create_pcm_channel (chip, sample_rate, cpcm,
- cpcm->hw_buf.addr,
- cpcm->pcm_channel_id)) == NULL) {
+ cpcm->pcm_channel = cs46xx_dsp_create_pcm_channel(chip, sample_rate, cpcm,
+ cpcm->hw_buf.addr,
+ cpcm->pcm_channel_id);
+ if (!cpcm->pcm_channel) {
dev_err(chip->card->dev,
"failed to re-create virtual PCM channel\n");
return -ENOMEM;
@@ -1147,7 +1148,8 @@ static int snd_cs46xx_playback_hw_params(struct snd_pcm_substream *substream,
runtime->dma_addr = 0;
runtime->dma_bytes = 0;
}
- if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) {
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
+ if (err < 0) {
#ifdef CONFIG_SND_CS46XX_NEW_DSP
mutex_unlock(&chip->spos_mutex);
#endif
@@ -1295,7 +1297,8 @@ static int snd_cs46xx_capture_hw_params(struct snd_pcm_substream *substream,
runtime->dma_addr = 0;
runtime->dma_bytes = 0;
}
- if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0)
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
+ if (err < 0)
return err;
substream->ops = &snd_cs46xx_capture_indirect_ops;
}
@@ -1760,7 +1763,8 @@ int snd_cs46xx_pcm(struct snd_cs46xx *chip, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(chip->card, "CS46xx", device, MAX_PLAYBACK_CHANNELS, 1, &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "CS46xx", device, MAX_PLAYBACK_CHANNELS, 1, &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
@@ -1787,7 +1791,8 @@ int snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(chip->card, "CS46xx - Rear", device, MAX_PLAYBACK_CHANNELS, 0, &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "CS46xx - Rear", device, MAX_PLAYBACK_CHANNELS, 0, &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
@@ -1811,7 +1816,8 @@ int snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(chip->card, "CS46xx - Center LFE", device, MAX_PLAYBACK_CHANNELS, 0, &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "CS46xx - Center LFE", device, MAX_PLAYBACK_CHANNELS, 0, &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
@@ -1835,7 +1841,8 @@ int snd_cs46xx_pcm_iec958(struct snd_cs46xx *chip, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(chip->card, "CS46xx - IEC958", device, 1, 0, &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "CS46xx - IEC958", device, 1, 0, &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
@@ -2414,7 +2421,8 @@ static void snd_cs46xx_codec_reset (struct snd_ac97 * ac97)
/* test if we can write to the record gain volume register */
snd_ac97_write(ac97, AC97_REC_GAIN, 0x8a05);
- if ((err = snd_ac97_read(ac97, AC97_REC_GAIN)) == 0x8a05)
+ err = snd_ac97_read(ac97, AC97_REC_GAIN);
+ if (err == 0x8a05)
return;
msleep(10);
@@ -2476,7 +2484,8 @@ int snd_cs46xx_mixer(struct snd_cs46xx *chip, int spdif_device)
/* detect primary codec */
chip->nr_ac97_codecs = 0;
dev_dbg(chip->card->dev, "detecting primary codec\n");
- if ((err = snd_ac97_bus(card, 0, &ops, chip, &chip->ac97_bus)) < 0)
+ err = snd_ac97_bus(card, 0, &ops, chip, &chip->ac97_bus);
+ if (err < 0)
return err;
chip->ac97_bus->private_free = snd_cs46xx_mixer_free_ac97_bus;
@@ -2497,7 +2506,8 @@ int snd_cs46xx_mixer(struct snd_cs46xx *chip, int spdif_device)
kctl = snd_ctl_new1(&snd_cs46xx_controls[idx], chip);
if (kctl && kctl->id.iface == SNDRV_CTL_ELEM_IFACE_PCM)
kctl->id.device = spdif_device;
- if ((err = snd_ctl_add(card, kctl)) < 0)
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
}
@@ -2684,7 +2694,8 @@ int snd_cs46xx_midi(struct snd_cs46xx *chip, int device)
struct snd_rawmidi *rmidi;
int err;
- if ((err = snd_rawmidi_new(chip->card, "CS46XX", device, 1, 1, &rmidi)) < 0)
+ err = snd_rawmidi_new(chip->card, "CS46XX", device, 1, 1, &rmidi);
+ if (err < 0)
return err;
strcpy(rmidi->name, "CS46XX");
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_cs46xx_midi_output);
@@ -3526,7 +3537,8 @@ static void hercules_mixer_init (struct snd_cs46xx *chip)
struct snd_kcontrol *kctl;
kctl = snd_ctl_new1(&snd_hercules_controls[idx], chip);
- if ((err = snd_ctl_add(card, kctl)) < 0) {
+ err = snd_ctl_add(card, kctl);
+ if (err < 0) {
dev_err(card->dev,
"failed to initialize Hercules mixer (%d)\n",
err);
@@ -3871,7 +3883,8 @@ int snd_cs46xx_create(struct snd_card *card,
*rchip = NULL;
/* enable PCI device */
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
@@ -3965,8 +3978,9 @@ int snd_cs46xx_create(struct snd_card *card,
for (idx = 0; idx < 5; idx++) {
region = &chip->region.idx[idx];
- if ((region->resource = request_mem_region(region->base, region->size,
- region->name)) == NULL) {
+ region->resource = request_mem_region(region->base, region->size,
+ region->name);
+ if (!region->resource) {
dev_err(chip->card->dev,
"unable to request memory region 0x%lx-0x%lx\n",
region->base, region->base + region->size - 1);
@@ -4005,7 +4019,8 @@ int snd_cs46xx_create(struct snd_card *card,
return err;
}
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_cs46xx_free(chip);
return err;
}
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 05f3f6dc918d..1db6bc58d6a6 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -617,7 +617,8 @@ static void cs46xx_dsp_proc_parameter_dump_read (struct snd_info_entry *entry,
col = 0;
}
- if ( (symbol = cs46xx_dsp_lookup_symbol_addr (chip,i / sizeof(u32), SYMBOL_PARAMETER)) != NULL) {
+ symbol = cs46xx_dsp_lookup_symbol_addr(chip, i / sizeof(u32), SYMBOL_PARAMETER);
+ if (symbol) {
col = 0;
snd_iprintf (buffer,"\n%s:\n",symbol->symbol_name);
}
diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c
index 9b716b56d739..e048b45d9e7e 100644
--- a/sound/pci/cs5535audio/cs5535audio.c
+++ b/sound/pci/cs5535audio/cs5535audio.c
@@ -143,7 +143,8 @@ static int snd_cs5535audio_mixer(struct cs5535audio *cs5535au)
.read = snd_cs5535audio_ac97_codec_read,
};
- if ((err = snd_ac97_bus(card, 0, &ops, NULL, &pbus)) < 0)
+ err = snd_ac97_bus(card, 0, &ops, NULL, &pbus);
+ if (err < 0)
return err;
memset(&ac97, 0, sizeof(ac97));
@@ -155,7 +156,8 @@ static int snd_cs5535audio_mixer(struct cs5535audio *cs5535au)
/* set any OLPC-specific scaps */
olpc_prequirks(card, &ac97);
- if ((err = snd_ac97_mixer(pbus, &ac97, &cs5535au->ac97)) < 0) {
+ err = snd_ac97_mixer(pbus, &ac97, &cs5535au->ac97);
+ if (err < 0) {
dev_err(card->dev, "mixer failed\n");
return err;
}
@@ -266,7 +268,8 @@ static int snd_cs5535audio_create(struct snd_card *card,
};
*rcs5535au = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32))) {
@@ -286,7 +289,8 @@ static int snd_cs5535audio_create(struct snd_card *card,
cs5535au->pci = pci;
cs5535au->irq = -1;
- if ((err = pci_request_regions(pci, "CS5535 Audio")) < 0) {
+ err = pci_request_regions(pci, "CS5535 Audio");
+ if (err < 0) {
kfree(cs5535au);
goto pcifail;
}
@@ -304,8 +308,8 @@ static int snd_cs5535audio_create(struct snd_card *card,
card->sync_irq = cs5535au->irq;
pci_set_master(pci);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
- cs5535au, &ops)) < 0)
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, cs5535au, &ops);
+ if (err < 0)
goto sndfail;
*rcs5535au = cs5535au;
@@ -340,15 +344,18 @@ static int snd_cs5535audio_probe(struct pci_dev *pci,
if (err < 0)
return err;
- if ((err = snd_cs5535audio_create(card, pci, &cs5535au)) < 0)
+ err = snd_cs5535audio_create(card, pci, &cs5535au);
+ if (err < 0)
goto probefail_out;
card->private_data = cs5535au;
- if ((err = snd_cs5535audio_mixer(cs5535au)) < 0)
+ err = snd_cs5535audio_mixer(cs5535au);
+ if (err < 0)
goto probefail_out;
- if ((err = snd_cs5535audio_pcm(cs5535au)) < 0)
+ err = snd_cs5535audio_pcm(cs5535au);
+ if (err < 0)
goto probefail_out;
strcpy(card->driver, DRIVER_NAME);
@@ -358,7 +365,8 @@ static int snd_cs5535audio_probe(struct pci_dev *pci,
card->shortname, card->driver,
cs5535au->port, cs5535au->irq);
- if ((err = snd_card_register(card)) < 0)
+ err = snd_card_register(card);
+ if (err < 0)
goto probefail_out;
pci_set_drvdata(pci, card);
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index 4032b89b1fc1..5ff10fec7b90 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -87,8 +87,9 @@ static int snd_cs5535audio_playback_open(struct snd_pcm_substream *substream)
snd_pcm_limit_hw_rates(runtime);
cs5535au->playback_substream = substream;
runtime->private_data = &(cs5535au->dmas[CS5535AUDIO_DMA_PLAYBACK]);
- if ((err = snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
return 0;
@@ -342,8 +343,9 @@ static int snd_cs5535audio_capture_open(struct snd_pcm_substream *substream)
snd_pcm_limit_hw_rates(runtime);
cs5535au->capture_substream = substream;
runtime->private_data = &(cs5535au->dmas[CS5535AUDIO_DMA_CAPTURE]);
- if ((err = snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
olpc_capture_open(cs5535au->ac97);
return 0;
diff --git a/sound/pci/echoaudio/darla20_dsp.c b/sound/pci/echoaudio/darla20_dsp.c
index 320837ba7bab..0356efad7528 100644
--- a/sound/pci/echoaudio/darla20_dsp.c
+++ b/sound/pci/echoaudio/darla20_dsp.c
@@ -36,7 +36,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if (snd_BUG_ON((subdevice_id & 0xfff0) != DARLA20))
return -ENODEV;
- if ((err = init_dsp_comm_page(chip))) {
+ err = init_dsp_comm_page(chip);
+ if (err) {
dev_err(chip->card->dev,
"init_hw: could not initialize DSP comm page\n");
return err;
@@ -53,7 +54,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
chip->asic_loaded = true;
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
- if ((err = load_firmware(chip)) < 0)
+ err = load_firmware(chip);
+ if (err < 0)
return err;
chip->bad_board = false;
diff --git a/sound/pci/echoaudio/darla24_dsp.c b/sound/pci/echoaudio/darla24_dsp.c
index 8736b5e81ca3..b96300772aee 100644
--- a/sound/pci/echoaudio/darla24_dsp.c
+++ b/sound/pci/echoaudio/darla24_dsp.c
@@ -36,7 +36,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if (snd_BUG_ON((subdevice_id & 0xfff0) != DARLA24))
return -ENODEV;
- if ((err = init_dsp_comm_page(chip))) {
+ err = init_dsp_comm_page(chip);
+ if (err) {
dev_err(chip->card->dev,
"init_hw: could not initialize DSP comm page\n");
return err;
@@ -52,7 +53,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
ECHO_CLOCK_BIT_ESYNC;
- if ((err = load_firmware(chip)) < 0)
+ err = load_firmware(chip);
+ if (err < 0)
return err;
chip->bad_board = false;
diff --git a/sound/pci/echoaudio/echo3g_dsp.c b/sound/pci/echoaudio/echo3g_dsp.c
index 6deb80c42f11..9e1f2cad0909 100644
--- a/sound/pci/echoaudio/echo3g_dsp.c
+++ b/sound/pci/echoaudio/echo3g_dsp.c
@@ -49,7 +49,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if (snd_BUG_ON((subdevice_id & 0xfff0) != ECHO3G))
return -ENODEV;
- if ((err = init_dsp_comm_page(chip))) {
+ err = init_dsp_comm_page(chip);
+ if (err) {
dev_err(chip->card->dev,
"init_hw - could not initialize DSP comm page\n");
return err;
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 9bd67ac33657..a62e5581ad14 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -301,38 +301,42 @@ static int pcm_open(struct snd_pcm_substream *substream,
snd_pcm_set_sync(substream);
/* Only mono and any even number of channels are allowed */
- if ((err = snd_pcm_hw_constraint_list(runtime, 0,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- &pipe->constr)) < 0)
+ err = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &pipe->constr);
+ if (err < 0)
return err;
/* All periods should have the same size */
- if ((err = snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
/* The hw accesses memory in chunks 32 frames long and they should be
32-bytes-aligned. It's not a requirement, but it seems that IRQs are
generated with a resolution of 32 frames. Thus we need the following */
- if ((err = snd_pcm_hw_constraint_step(runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
- 32)) < 0)
+ err = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32);
+ if (err < 0)
return err;
- if ((err = snd_pcm_hw_constraint_step(runtime, 0,
- SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
- 32)) < 0)
+ err = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32);
+ if (err < 0)
return err;
- if ((err = snd_pcm_hw_rule_add(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_RATE,
- hw_rule_sample_rate, chip,
- SNDRV_PCM_HW_PARAM_RATE, -1)) < 0)
+ err = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ hw_rule_sample_rate, chip,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+ if (err < 0)
return err;
/* Allocate a page for the scatter-gather list */
- if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- &chip->pci->dev,
- PAGE_SIZE, &pipe->sgpage)) < 0) {
+ err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
+ &chip->pci->dev,
+ PAGE_SIZE, &pipe->sgpage);
+ if (err < 0) {
dev_err(chip->card->dev, "s-g list allocation failed\n");
return err;
}
@@ -358,18 +362,21 @@ static int pcm_analog_in_open(struct snd_pcm_substream *substream)
struct echoaudio *chip = snd_pcm_substream_chip(substream);
int err;
- if ((err = pcm_open(substream, num_analog_busses_in(chip) -
- substream->number)) < 0)
+ err = pcm_open(substream,
+ num_analog_busses_in(chip) - substream->number);
+ if (err < 0)
return err;
- if ((err = snd_pcm_hw_rule_add(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- hw_rule_capture_channels_by_format, NULL,
- SNDRV_PCM_HW_PARAM_FORMAT, -1)) < 0)
+ err = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ hw_rule_capture_channels_by_format, NULL,
+ SNDRV_PCM_HW_PARAM_FORMAT, -1);
+ if (err < 0)
return err;
- if ((err = snd_pcm_hw_rule_add(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_FORMAT,
- hw_rule_capture_format_by_channels, NULL,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1)) < 0)
+ err = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ hw_rule_capture_format_by_channels, NULL,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ if (err < 0)
return err;
return 0;
@@ -387,19 +394,22 @@ static int pcm_analog_out_open(struct snd_pcm_substream *substream)
#else
max_channels = num_analog_busses_out(chip);
#endif
- if ((err = pcm_open(substream, max_channels - substream->number)) < 0)
+ err = pcm_open(substream, max_channels - substream->number);
+ if (err < 0)
return err;
- if ((err = snd_pcm_hw_rule_add(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- hw_rule_playback_channels_by_format,
- NULL,
- SNDRV_PCM_HW_PARAM_FORMAT, -1)) < 0)
+ err = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ hw_rule_playback_channels_by_format,
+ NULL,
+ SNDRV_PCM_HW_PARAM_FORMAT, -1);
+ if (err < 0)
return err;
- if ((err = snd_pcm_hw_rule_add(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_FORMAT,
- hw_rule_playback_format_by_channels,
- NULL,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1)) < 0)
+ err = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ hw_rule_playback_format_by_channels,
+ NULL,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ if (err < 0)
return err;
return 0;
@@ -426,15 +436,17 @@ static int pcm_digital_in_open(struct snd_pcm_substream *substream)
if (err < 0)
goto din_exit;
- if ((err = snd_pcm_hw_rule_add(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- hw_rule_capture_channels_by_format, NULL,
- SNDRV_PCM_HW_PARAM_FORMAT, -1)) < 0)
+ err = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ hw_rule_capture_channels_by_format, NULL,
+ SNDRV_PCM_HW_PARAM_FORMAT, -1);
+ if (err < 0)
goto din_exit;
- if ((err = snd_pcm_hw_rule_add(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_FORMAT,
- hw_rule_capture_format_by_channels, NULL,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1)) < 0)
+ err = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ hw_rule_capture_format_by_channels, NULL,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ if (err < 0)
goto din_exit;
din_exit:
@@ -463,17 +475,19 @@ static int pcm_digital_out_open(struct snd_pcm_substream *substream)
if (err < 0)
goto dout_exit;
- if ((err = snd_pcm_hw_rule_add(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- hw_rule_playback_channels_by_format,
- NULL, SNDRV_PCM_HW_PARAM_FORMAT,
- -1)) < 0)
+ err = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ hw_rule_playback_channels_by_format,
+ NULL, SNDRV_PCM_HW_PARAM_FORMAT,
+ -1);
+ if (err < 0)
goto dout_exit;
- if ((err = snd_pcm_hw_rule_add(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_FORMAT,
- hw_rule_playback_format_by_channels,
- NULL, SNDRV_PCM_HW_PARAM_CHANNELS,
- -1)) < 0)
+ err = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ hw_rule_playback_format_by_channels,
+ NULL, SNDRV_PCM_HW_PARAM_CHANNELS,
+ -1);
+ if (err < 0)
goto dout_exit;
dout_exit:
@@ -907,8 +921,9 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
separated */
/* PCM#0 Virtual outputs and analog inputs */
- if ((err = snd_pcm_new(chip->card, "PCM", 0, num_pipes_out(chip),
- num_analog_busses_in(chip), &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "PCM", 0, num_pipes_out(chip),
+ num_analog_busses_in(chip), &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
chip->analog_pcm = pcm;
@@ -919,8 +934,9 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
#ifdef ECHOCARD_HAS_DIGITAL_IO
/* PCM#1 Digital inputs, no outputs */
- if ((err = snd_pcm_new(chip->card, "Digital PCM", 1, 0,
- num_digital_busses_in(chip), &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "Digital PCM", 1, 0,
+ num_digital_busses_in(chip), &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
chip->digital_pcm = pcm;
@@ -937,9 +953,10 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
register two PCM devices: */
/* PCM#0 Analog i/o */
- if ((err = snd_pcm_new(chip->card, "Analog PCM", 0,
- num_analog_busses_out(chip),
- num_analog_busses_in(chip), &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "Analog PCM", 0,
+ num_analog_busses_out(chip),
+ num_analog_busses_in(chip), &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
chip->analog_pcm = pcm;
@@ -950,9 +967,10 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
#ifdef ECHOCARD_HAS_DIGITAL_IO
/* PCM#1 Digital i/o */
- if ((err = snd_pcm_new(chip->card, "Digital PCM", 1,
- num_digital_busses_out(chip),
- num_digital_busses_in(chip), &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "Digital PCM", 1,
+ num_digital_busses_out(chip),
+ num_digital_busses_in(chip), &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
chip->digital_pcm = pcm;
@@ -1567,7 +1585,8 @@ static int snd_echo_clock_source_put(struct snd_kcontrol *kcontrol,
if (chip->input_clock != dclock) {
mutex_lock(&chip->mode_mutex);
spin_lock_irq(&chip->lock);
- if ((changed = set_input_clock(chip, dclock)) == 0)
+ changed = set_input_clock(chip, dclock);
+ if (!changed)
changed = 1; /* no errors */
spin_unlock_irq(&chip->lock);
mutex_unlock(&chip->mode_mutex);
@@ -1911,7 +1930,8 @@ static int snd_echo_create(struct snd_card *card,
pci_write_config_byte(pci, PCI_LATENCY_TIMER, 0xC0);
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
pci_set_master(pci);
@@ -1943,8 +1963,9 @@ static int snd_echo_create(struct snd_card *card,
if (sz > PAGE_SIZE)
sz = PAGE_SIZE; /* We map only the required part */
- if ((chip->iores = request_mem_region(chip->dsp_registers_phys, sz,
- ECHOCARD_NAME)) == NULL) {
+ chip->iores = request_mem_region(chip->dsp_registers_phys, sz,
+ ECHOCARD_NAME);
+ if (!chip->iores) {
dev_err(chip->card->dev, "cannot get memory region\n");
snd_echo_free(chip);
return -EBUSY;
@@ -1988,7 +2009,8 @@ static int snd_echo_create(struct snd_card *card,
return err;
}
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_echo_free(chip);
return err;
}
@@ -2024,7 +2046,8 @@ static int snd_echo_probe(struct pci_dev *pci,
return err;
chip = NULL; /* Tells snd_echo_create to allocate chip */
- if ((err = snd_echo_create(card, pci, &chip)) < 0) {
+ err = snd_echo_create(card, pci, &chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -2040,7 +2063,8 @@ static int snd_echo_probe(struct pci_dev *pci,
card->shortname, pci_id->subdevice & 0x000f, dsp,
chip->dsp_registers_phys, chip->irq);
- if ((err = snd_echo_new_pcm(chip)) < 0) {
+ err = snd_echo_new_pcm(chip);
+ if (err < 0) {
dev_err(chip->card->dev, "new pcm error %d\n", err);
snd_card_free(card);
return err;
@@ -2048,7 +2072,8 @@ static int snd_echo_probe(struct pci_dev *pci,
#ifdef ECHOCARD_HAS_MIDI
if (chip->has_midi) { /* Some Mia's do not have midi */
- if ((err = snd_echo_midi_create(card, chip)) < 0) {
+ err = snd_echo_midi_create(card, chip);
+ if (err < 0) {
dev_err(chip->card->dev, "new midi error %d\n", err);
snd_card_free(card);
return err;
@@ -2058,7 +2083,8 @@ static int snd_echo_probe(struct pci_dev *pci,
#ifdef ECHOCARD_HAS_VMIXER
snd_echo_vmixer.count = num_pipes_out(chip) * num_busses_out(chip);
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_vmixer, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_vmixer, chip));
+ if (err < 0)
goto ctl_error;
#ifdef ECHOCARD_HAS_LINE_OUT_GAIN
err = snd_ctl_add(chip->card,
@@ -2074,39 +2100,48 @@ static int snd_echo_probe(struct pci_dev *pci,
#endif /* ECHOCARD_HAS_VMIXER */
#ifdef ECHOCARD_HAS_INPUT_GAIN
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_line_input_gain, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_line_input_gain, chip));
+ if (err < 0)
goto ctl_error;
#endif
#ifdef ECHOCARD_HAS_INPUT_NOMINAL_LEVEL
- if (!chip->hasnt_input_nominal_level)
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_intput_nominal_level, chip))) < 0)
+ if (!chip->hasnt_input_nominal_level) {
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_intput_nominal_level, chip));
+ if (err < 0)
goto ctl_error;
+ }
#endif
#ifdef ECHOCARD_HAS_OUTPUT_NOMINAL_LEVEL
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_output_nominal_level, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_output_nominal_level, chip));
+ if (err < 0)
goto ctl_error;
#endif
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_vumeters_switch, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_vumeters_switch, chip));
+ if (err < 0)
goto ctl_error;
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_vumeters, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_vumeters, chip));
+ if (err < 0)
goto ctl_error;
#ifdef ECHOCARD_HAS_MONITOR
snd_echo_monitor_mixer.count = num_busses_in(chip) * num_busses_out(chip);
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_monitor_mixer, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_monitor_mixer, chip));
+ if (err < 0)
goto ctl_error;
#endif
#ifdef ECHOCARD_HAS_DIGITAL_IN_AUTOMUTE
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_automute_switch, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_automute_switch, chip));
+ if (err < 0)
goto ctl_error;
#endif
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_channels_info, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_channels_info, chip));
+ if (err < 0)
goto ctl_error;
#ifdef ECHOCARD_HAS_DIGITAL_MODE_SWITCH
@@ -2116,7 +2151,8 @@ static int snd_echo_probe(struct pci_dev *pci,
if (chip->digital_modes & (1 << i))
chip->digital_mode_list[chip->num_digital_modes++] = i;
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_digital_mode_switch, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_digital_mode_switch, chip));
+ if (err < 0)
goto ctl_error;
#endif /* ECHOCARD_HAS_DIGITAL_MODE_SWITCH */
@@ -2129,20 +2165,24 @@ static int snd_echo_probe(struct pci_dev *pci,
if (chip->num_clock_sources > 1) {
chip->clock_src_ctl = snd_ctl_new1(&snd_echo_clock_source_switch, chip);
- if ((err = snd_ctl_add(chip->card, chip->clock_src_ctl)) < 0)
+ err = snd_ctl_add(chip->card, chip->clock_src_ctl);
+ if (err < 0)
goto ctl_error;
}
#endif /* ECHOCARD_HAS_EXTERNAL_CLOCK */
#ifdef ECHOCARD_HAS_DIGITAL_IO
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_spdif_mode_switch, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_spdif_mode_switch, chip));
+ if (err < 0)
goto ctl_error;
#endif
#ifdef ECHOCARD_HAS_PHANTOM_POWER
- if (chip->has_phantom_power)
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_phantom_power_switch, chip))) < 0)
+ if (chip->has_phantom_power) {
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_phantom_power_switch, chip));
+ if (err < 0)
goto ctl_error;
+ }
#endif
err = snd_card_register(card);
diff --git a/sound/pci/echoaudio/echoaudio_dsp.c b/sound/pci/echoaudio/echoaudio_dsp.c
index d10d0e460f0b..2a40091d472c 100644
--- a/sound/pci/echoaudio/echoaudio_dsp.c
+++ b/sound/pci/echoaudio/echoaudio_dsp.c
@@ -349,7 +349,8 @@ static int load_dsp(struct echoaudio *chip, u16 *code)
/* If this board requires a resident loader, install it. */
#ifdef DSP_56361
- if ((i = install_resident_loader(chip)) < 0)
+ i = install_resident_loader(chip);
+ if (i < 0)
return i;
#endif
@@ -495,7 +496,8 @@ static int load_firmware(struct echoaudio *chip)
/* See if the ASIC is present and working - only if the DSP is already loaded */
if (chip->dsp_code) {
- if ((box_type = check_asic_status(chip)) >= 0)
+ box_type = check_asic_status(chip);
+ if (box_type >= 0)
return box_type;
/* ASIC check failed; force the DSP to reload */
chip->dsp_code = NULL;
@@ -509,7 +511,8 @@ static int load_firmware(struct echoaudio *chip)
if (err < 0)
return err;
- if ((box_type = load_asic(chip)) < 0)
+ box_type = load_asic(chip);
+ if (box_type < 0)
return box_type; /* error */
return box_type;
@@ -667,7 +670,8 @@ static int restore_dsp_rettings(struct echoaudio *chip)
{
int i, o, err;
- if ((err = check_asic_status(chip)) < 0)
+ err = check_asic_status(chip);
+ if (err < 0)
return err;
/* Gina20/Darla20 only. Should be harmless for other cards. */
diff --git a/sound/pci/echoaudio/echoaudio_gml.c b/sound/pci/echoaudio/echoaudio_gml.c
index eea6fe530ab4..248983fa2959 100644
--- a/sound/pci/echoaudio/echoaudio_gml.c
+++ b/sound/pci/echoaudio/echoaudio_gml.c
@@ -194,7 +194,8 @@ static int set_professional_spdif(struct echoaudio *chip, char prof)
}
}
- if ((err = write_control_reg(chip, control_reg, false)))
+ err = write_control_reg(chip, control_reg, false);
+ if (err)
return err;
chip->professional_spdif = prof;
dev_dbg(chip->card->dev, "set_professional_spdif to %s\n",
diff --git a/sound/pci/echoaudio/gina20_dsp.c b/sound/pci/echoaudio/gina20_dsp.c
index b2377573de09..c93939850357 100644
--- a/sound/pci/echoaudio/gina20_dsp.c
+++ b/sound/pci/echoaudio/gina20_dsp.c
@@ -40,7 +40,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if (snd_BUG_ON((subdevice_id & 0xfff0) != GINA20))
return -ENODEV;
- if ((err = init_dsp_comm_page(chip))) {
+ err = init_dsp_comm_page(chip);
+ if (err) {
dev_err(chip->card->dev,
"init_hw - could not initialize DSP comm page\n");
return err;
@@ -58,7 +59,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
ECHO_CLOCK_BIT_SPDIF;
- if ((err = load_firmware(chip)) < 0)
+ err = load_firmware(chip);
+ if (err < 0)
return err;
chip->bad_board = false;
diff --git a/sound/pci/echoaudio/gina24_dsp.c b/sound/pci/echoaudio/gina24_dsp.c
index 8eff2b4f5ceb..56e9d1b9b330 100644
--- a/sound/pci/echoaudio/gina24_dsp.c
+++ b/sound/pci/echoaudio/gina24_dsp.c
@@ -44,7 +44,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if (snd_BUG_ON((subdevice_id & 0xfff0) != GINA24))
return -ENODEV;
- if ((err = init_dsp_comm_page(chip))) {
+ err = init_dsp_comm_page(chip);
+ if (err) {
dev_err(chip->card->dev,
"init_hw - could not initialize DSP comm page\n");
return err;
@@ -74,7 +75,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_CDROM;
}
- if ((err = load_firmware(chip)) < 0)
+ err = load_firmware(chip);
+ if (err < 0)
return err;
chip->bad_board = false;
diff --git a/sound/pci/echoaudio/indigo_dsp.c b/sound/pci/echoaudio/indigo_dsp.c
index c97dc83bbbdf..16eb082df56a 100644
--- a/sound/pci/echoaudio/indigo_dsp.c
+++ b/sound/pci/echoaudio/indigo_dsp.c
@@ -41,7 +41,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO))
return -ENODEV;
- if ((err = init_dsp_comm_page(chip))) {
+ err = init_dsp_comm_page(chip);
+ if (err) {
dev_err(chip->card->dev,
"init_hw - could not initialize DSP comm page\n");
return err;
@@ -56,7 +57,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
chip->asic_loaded = true;
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
- if ((err = load_firmware(chip)) < 0)
+ err = load_firmware(chip);
+ if (err < 0)
return err;
chip->bad_board = false;
diff --git a/sound/pci/echoaudio/indigodj_dsp.c b/sound/pci/echoaudio/indigodj_dsp.c
index 2428b35f45d6..17a1d888d0b9 100644
--- a/sound/pci/echoaudio/indigodj_dsp.c
+++ b/sound/pci/echoaudio/indigodj_dsp.c
@@ -41,7 +41,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_DJ))
return -ENODEV;
- if ((err = init_dsp_comm_page(chip))) {
+ err = init_dsp_comm_page(chip);
+ if (err) {
dev_err(chip->card->dev,
"init_hw - could not initialize DSP comm page\n");
return err;
@@ -56,7 +57,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
chip->asic_loaded = true;
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
- if ((err = load_firmware(chip)) < 0)
+ err = load_firmware(chip);
+ if (err < 0)
return err;
chip->bad_board = false;
diff --git a/sound/pci/echoaudio/indigoio_dsp.c b/sound/pci/echoaudio/indigoio_dsp.c
index 79b68ba70936..791787aa0744 100644
--- a/sound/pci/echoaudio/indigoio_dsp.c
+++ b/sound/pci/echoaudio/indigoio_dsp.c
@@ -41,7 +41,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_IO))
return -ENODEV;
- if ((err = init_dsp_comm_page(chip))) {
+ err = init_dsp_comm_page(chip);
+ if (err) {
dev_err(chip->card->dev,
"init_hw - could not initialize DSP comm page\n");
return err;
@@ -56,7 +57,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
chip->asic_loaded = true;
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
- if ((err = load_firmware(chip)) < 0)
+ err = load_firmware(chip);
+ if (err < 0)
return err;
chip->bad_board = false;
diff --git a/sound/pci/echoaudio/layla20_dsp.c b/sound/pci/echoaudio/layla20_dsp.c
index 5e5b6e288a2d..5fb5c4a4598b 100644
--- a/sound/pci/echoaudio/layla20_dsp.c
+++ b/sound/pci/echoaudio/layla20_dsp.c
@@ -43,7 +43,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if (snd_BUG_ON((subdevice_id & 0xfff0) != LAYLA20))
return -ENODEV;
- if ((err = init_dsp_comm_page(chip))) {
+ err = init_dsp_comm_page(chip);
+ if (err) {
dev_err(chip->card->dev,
"init_hw - could not initialize DSP comm page\n");
return err;
@@ -60,7 +61,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
chip->output_clock_types =
ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_SUPER;
- if ((err = load_firmware(chip)) < 0)
+ err = load_firmware(chip);
+ if (err < 0)
return err;
chip->bad_board = false;
diff --git a/sound/pci/echoaudio/layla24_dsp.c b/sound/pci/echoaudio/layla24_dsp.c
index c02bc1dcc170..ef27805d63f6 100644
--- a/sound/pci/echoaudio/layla24_dsp.c
+++ b/sound/pci/echoaudio/layla24_dsp.c
@@ -43,7 +43,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if (snd_BUG_ON((subdevice_id & 0xfff0) != LAYLA24))
return -ENODEV;
- if ((err = init_dsp_comm_page(chip))) {
+ err = init_dsp_comm_page(chip);
+ if (err) {
dev_err(chip->card->dev,
"init_hw - could not initialize DSP comm page\n");
return err;
@@ -62,11 +63,13 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_OPTICAL |
ECHOCAPS_HAS_DIGITAL_MODE_ADAT;
- if ((err = load_firmware(chip)) < 0)
+ err = load_firmware(chip);
+ if (err < 0)
return err;
chip->bad_board = false;
- if ((err = init_line_levels(chip)) < 0)
+ err = init_line_levels(chip);
+ if (err < 0)
return err;
return err;
diff --git a/sound/pci/echoaudio/mia_dsp.c b/sound/pci/echoaudio/mia_dsp.c
index 8f612a09c5d0..8a4dffc68889 100644
--- a/sound/pci/echoaudio/mia_dsp.c
+++ b/sound/pci/echoaudio/mia_dsp.c
@@ -44,7 +44,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if (snd_BUG_ON((subdevice_id & 0xfff0) != MIA))
return -ENODEV;
- if ((err = init_dsp_comm_page(chip))) {
+ err = init_dsp_comm_page(chip);
+ if (err) {
dev_err(chip->card->dev,
"init_hw - could not initialize DSP comm page\n");
return err;
@@ -62,7 +63,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
ECHO_CLOCK_BIT_SPDIF;
- if ((err = load_firmware(chip)) < 0)
+ err = load_firmware(chip);
+ if (err < 0)
return err;
chip->bad_board = false;
diff --git a/sound/pci/echoaudio/midi.c b/sound/pci/echoaudio/midi.c
index 6045a115cffe..cb72d27e809e 100644
--- a/sound/pci/echoaudio/midi.c
+++ b/sound/pci/echoaudio/midi.c
@@ -308,8 +308,8 @@ static int snd_echo_midi_create(struct snd_card *card,
{
int err;
- if ((err = snd_rawmidi_new(card, card->shortname, 0, 1, 1,
- &chip->rmidi)) < 0)
+ err = snd_rawmidi_new(card, card->shortname, 0, 1, 1, &chip->rmidi);
+ if (err < 0)
return err;
strcpy(chip->rmidi->name, card->shortname);
diff --git a/sound/pci/echoaudio/mona_dsp.c b/sound/pci/echoaudio/mona_dsp.c
index f77db83dd73d..f8e7bb6ce040 100644
--- a/sound/pci/echoaudio/mona_dsp.c
+++ b/sound/pci/echoaudio/mona_dsp.c
@@ -44,7 +44,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if (snd_BUG_ON((subdevice_id & 0xfff0) != MONA))
return -ENODEV;
- if ((err = init_dsp_comm_page(chip))) {
+ err = init_dsp_comm_page(chip);
+ if (err) {
dev_err(chip->card->dev,
"init_hw - could not initialize DSP comm page\n");
return err;
@@ -67,7 +68,8 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
else
chip->dsp_code_to_load = FW_MONA_301_DSP;
- if ((err = load_firmware(chip)) < 0)
+ err = load_firmware(chip);
+ if (err < 0)
return err;
chip->bad_board = false;
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index 45833bc2a7e7..887bfb3c1e17 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -107,18 +107,22 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
max_buffer_size[dev] = 32;
else if (max_buffer_size[dev] > 1024)
max_buffer_size[dev] = 1024;
- if ((err = snd_emu10k1_create(card, pci, extin[dev], extout[dev],
- (long)max_buffer_size[dev] * 1024 * 1024,
- enable_ir[dev], subsystem[dev],
- &emu)) < 0)
+ err = snd_emu10k1_create(card, pci, extin[dev], extout[dev],
+ (long)max_buffer_size[dev] * 1024 * 1024,
+ enable_ir[dev], subsystem[dev],
+ &emu);
+ if (err < 0)
goto error;
card->private_data = emu;
emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f;
- if ((err = snd_emu10k1_pcm(emu, 0)) < 0)
+ err = snd_emu10k1_pcm(emu, 0);
+ if (err < 0)
goto error;
- if ((err = snd_emu10k1_pcm_mic(emu, 1)) < 0)
+ err = snd_emu10k1_pcm_mic(emu, 1);
+ if (err < 0)
goto error;
- if ((err = snd_emu10k1_pcm_efx(emu, 2)) < 0)
+ err = snd_emu10k1_pcm_efx(emu, 2);
+ if (err < 0)
goto error;
/* This stores the periods table. */
if (emu->card_capabilities->ca0151_chip) { /* P16V */
@@ -128,26 +132,33 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
goto error;
}
- if ((err = snd_emu10k1_mixer(emu, 0, 3)) < 0)
+ err = snd_emu10k1_mixer(emu, 0, 3);
+ if (err < 0)
goto error;
- if ((err = snd_emu10k1_timer(emu, 0)) < 0)
+ err = snd_emu10k1_timer(emu, 0);
+ if (err < 0)
goto error;
- if ((err = snd_emu10k1_pcm_multi(emu, 3)) < 0)
+ err = snd_emu10k1_pcm_multi(emu, 3);
+ if (err < 0)
goto error;
if (emu->card_capabilities->ca0151_chip) { /* P16V */
- if ((err = snd_p16v_pcm(emu, 4)) < 0)
+ err = snd_p16v_pcm(emu, 4);
+ if (err < 0)
goto error;
}
if (emu->audigy) {
- if ((err = snd_emu10k1_audigy_midi(emu)) < 0)
+ err = snd_emu10k1_audigy_midi(emu);
+ if (err < 0)
goto error;
} else {
- if ((err = snd_emu10k1_midi(emu)) < 0)
+ err = snd_emu10k1_midi(emu);
+ if (err < 0)
goto error;
}
- if ((err = snd_emu10k1_fx8010_new(emu, 0)) < 0)
+ err = snd_emu10k1_fx8010_new(emu, 0);
+ if (err < 0)
goto error;
#ifdef ENABLE_SYNTH
if (snd_seq_device_new(card, 1, SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH,
@@ -174,7 +185,8 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
"%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
- if ((err = snd_card_register(card)) < 0)
+ err = snd_card_register(card);
+ if (err < 0)
goto error;
if (emu->card_capabilities->emu_model)
diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
index 07471c3dcbed..dba1e9fc2eec 100644
--- a/sound/pci/emu10k1/emu10k1_callback.c
+++ b/sound/pci/emu10k1/emu10k1_callback.c
@@ -90,7 +90,8 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
if (best[i].voice >= 0) {
int ch;
vp = &emu->voices[best[i].voice];
- if ((ch = vp->ch) < 0) {
+ ch = vp->ch;
+ if (ch < 0) {
/*
dev_warn(emu->card->dev,
"synth_get_voice: ch < 0 (%d) ??", i);
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index d9a12cd01647..89b0f3884067 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -349,7 +349,8 @@ static void snd_emu10k1x_pcm_interrupt(struct emu10k1x *emu, struct emu10k1x_voi
{
struct emu10k1x_pcm *epcm;
- if ((epcm = voice->epcm) == NULL)
+ epcm = voice->epcm;
+ if (!epcm)
return;
if (epcm->substream == NULL)
return;
@@ -371,10 +372,11 @@ static int snd_emu10k1x_playback_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0) {
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
- }
- if ((err = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64)) < 0)
+ err = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64);
+ if (err < 0)
return err;
epcm = kzalloc(sizeof(*epcm), GFP_KERNEL);
@@ -550,10 +552,12 @@ static int snd_emu10k1x_pcm_open_capture(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
- return err;
- if ((err = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64)) < 0)
- return err;
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
+ return err;
+ err = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64);
+ if (err < 0)
+ return err;
epcm = kzalloc(sizeof(*epcm), GFP_KERNEL);
if (epcm == NULL)
@@ -722,7 +726,8 @@ static int snd_emu10k1x_ac97(struct emu10k1x *chip)
.read = snd_emu10k1x_ac97_read,
};
- if ((err = snd_ac97_bus(chip->card, 0, &ops, NULL, &pbus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, NULL, &pbus);
+ if (err < 0)
return err;
pbus->no_vra = 1; /* we don't need VRA */
@@ -838,7 +843,8 @@ static int snd_emu10k1x_pcm(struct emu10k1x *emu, int device)
if (device == 0)
capture = 1;
- if ((err = snd_pcm_new(emu->card, "emu10k1x", device, 1, capture, &pcm)) < 0)
+ err = snd_pcm_new(emu->card, "emu10k1x", device, 1, capture, &pcm);
+ if (err < 0)
return err;
pcm->private_data = emu;
@@ -891,7 +897,8 @@ static int snd_emu10k1x_create(struct snd_card *card,
*rchip = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(28)) < 0) {
@@ -914,8 +921,8 @@ static int snd_emu10k1x_create(struct snd_card *card,
spin_lock_init(&chip->voice_lock);
chip->port = pci_resource_start(pci, 0);
- if ((chip->res_port = request_region(chip->port, 8,
- "EMU10K1X")) == NULL) {
+ chip->res_port = request_region(chip->port, 8, "EMU10K1X");
+ if (!chip->res_port) {
dev_err(card->dev, "cannot allocate the port 0x%lx\n",
chip->port);
snd_emu10k1x_free(chip);
@@ -991,8 +998,8 @@ static int snd_emu10k1x_create(struct snd_card *card,
outl(HCFG_LOCKSOUNDCACHE|HCFG_AUDIOENABLE, chip->port+HCFG);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
- chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_emu10k1x_free(chip);
return err;
}
@@ -1171,17 +1178,23 @@ static int snd_emu10k1x_mixer(struct emu10k1x *emu)
struct snd_kcontrol *kctl;
struct snd_card *card = emu->card;
- if ((kctl = snd_ctl_new1(&snd_emu10k1x_spdif_mask_control, emu)) == NULL)
+ kctl = snd_ctl_new1(&snd_emu10k1x_spdif_mask_control, emu);
+ if (!kctl)
return -ENOMEM;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
- if ((kctl = snd_ctl_new1(&snd_emu10k1x_shared_spdif, emu)) == NULL)
+ kctl = snd_ctl_new1(&snd_emu10k1x_shared_spdif, emu);
+ if (!kctl)
return -ENOMEM;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
- if ((kctl = snd_ctl_new1(&snd_emu10k1x_spdif_control, emu)) == NULL)
+ kctl = snd_ctl_new1(&snd_emu10k1x_spdif_control, emu);
+ if (!kctl)
return -ENOMEM;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
return 0;
@@ -1488,7 +1501,8 @@ static int emu10k1x_midi_init(struct emu10k1x *emu,
struct snd_rawmidi *rmidi;
int err;
- if ((err = snd_rawmidi_new(emu->card, name, device, 1, 1, &rmidi)) < 0)
+ err = snd_rawmidi_new(emu->card, name, device, 1, 1, &rmidi);
+ if (err < 0)
return err;
midi->emu = emu;
spin_lock_init(&midi->open_lock);
@@ -1511,7 +1525,8 @@ static int snd_emu10k1x_midi(struct emu10k1x *emu)
struct emu10k1x_midi *midi = &emu->midi;
int err;
- if ((err = emu10k1x_midi_init(emu, midi, 0, "EMU10K1X MPU-401 (UART)")) < 0)
+ err = emu10k1x_midi_init(emu, midi, 0, "EMU10K1X MPU-401 (UART)");
+ if (err < 0)
return err;
midi->tx_enable = INTE_MIDITXENABLE;
@@ -1543,35 +1558,42 @@ static int snd_emu10k1x_probe(struct pci_dev *pci,
if (err < 0)
return err;
- if ((err = snd_emu10k1x_create(card, pci, &chip)) < 0) {
+ err = snd_emu10k1x_create(card, pci, &chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_emu10k1x_pcm(chip, 0)) < 0) {
+ err = snd_emu10k1x_pcm(chip, 0);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_emu10k1x_pcm(chip, 1)) < 0) {
+ err = snd_emu10k1x_pcm(chip, 1);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_emu10k1x_pcm(chip, 2)) < 0) {
+ err = snd_emu10k1x_pcm(chip, 2);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_emu10k1x_ac97(chip)) < 0) {
+ err = snd_emu10k1x_ac97(chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_emu10k1x_mixer(chip)) < 0) {
+ err = snd_emu10k1x_mixer(chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_emu10k1x_midi(chip)) < 0) {
+ err = snd_emu10k1x_midi(chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -1583,7 +1605,8 @@ static int snd_emu10k1x_probe(struct pci_dev *pci,
sprintf(card->longname, "%s at 0x%lx irq %i",
card->shortname, chip->port, chip->irq);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 80ef62a4a7c0..6cf7c8b1de47 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -436,7 +436,8 @@ int snd_emu10k1_fx8010_unregister_irq_handler(struct snd_emu10k1 *emu,
unsigned long flags;
spin_lock_irqsave(&emu->fx8010.irq_lock, flags);
- if ((tmp = emu->fx8010.irq_handlers) == irq) {
+ tmp = emu->fx8010.irq_handlers;
+ if (tmp == irq) {
emu->fx8010.irq_handlers = tmp->next;
if (emu->fx8010.irq_handlers == NULL) {
snd_emu10k1_intr_disable(emu, INTE_FXDSPENABLE);
@@ -871,7 +872,9 @@ static int snd_emu10k1_add_controls(struct snd_emu10k1 *emu,
}
knew.private_value = (unsigned long)ctl;
*ctl = *nctl;
- if ((err = snd_ctl_add(emu->card, kctl = snd_ctl_new1(&knew, emu))) < 0) {
+ kctl = snd_ctl_new1(&knew, emu);
+ err = snd_ctl_add(emu->card, kctl);
+ if (err < 0) {
kfree(ctl);
kfree(knew.tlv.p);
goto __error;
@@ -2403,7 +2406,8 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
while (ptr < 0x200)
OP(icode, &ptr, iACC3, C_00000000, C_00000000, C_00000000, C_00000000);
- if ((err = snd_emu10k1_fx8010_tram_setup(emu, ipcm->buffer_size)) < 0)
+ err = snd_emu10k1_fx8010_tram_setup(emu, ipcm->buffer_size);
+ if (err < 0)
goto __err;
icode->gpr_add_control_count = i;
icode->gpr_add_controls = controls;
@@ -2681,7 +2685,8 @@ int snd_emu10k1_fx8010_new(struct snd_emu10k1 *emu, int device)
struct snd_hwdep *hw;
int err;
- if ((err = snd_hwdep_new(emu->card, "FX8010", device, &hw)) < 0)
+ err = snd_hwdep_new(emu->card, "FX8010", device, &hw);
+ if (err < 0)
return err;
strcpy(hw->name, "EMU10K1 (FX8010)");
hw->iface = SNDRV_HWDEP_IFACE_EMU10K1;
diff --git a/sound/pci/emu10k1/emumixer.c b/sound/pci/emu10k1/emumixer.c
index 8a6cbe67e29d..e9c0fe3b8446 100644
--- a/sound/pci/emu10k1/emumixer.c
+++ b/sound/pci/emu10k1/emumixer.c
@@ -1119,7 +1119,8 @@ static int snd_audigy_spdif_output_rate_put(struct snd_kcontrol *kcontrol,
reg = snd_emu10k1_ptr_read(emu, A_SPDIF_SAMPLERATE, 0);
tmp = reg & ~A_SPDIF_RATE_MASK;
tmp |= val;
- if ((change = (tmp != reg)))
+ change = (tmp != reg);
+ if (change)
snd_emu10k1_ptr_write(emu, A_SPDIF_SAMPLERATE, 0, tmp);
spin_unlock_irqrestore(&emu->reg_lock, flags);
return change;
@@ -1903,7 +1904,8 @@ int snd_emu10k1_mixer(struct snd_emu10k1 *emu,
.read = snd_emu10k1_ac97_read,
};
- if ((err = snd_ac97_bus(emu->card, 0, &ops, NULL, &pbus)) < 0)
+ err = snd_ac97_bus(emu->card, 0, &ops, NULL, &pbus);
+ if (err < 0)
return err;
pbus->no_vra = 1; /* we don't need VRA */
@@ -1911,7 +1913,8 @@ int snd_emu10k1_mixer(struct snd_emu10k1 *emu,
ac97.private_data = emu;
ac97.private_free = snd_emu10k1_mixer_free_ac97;
ac97.scaps = AC97_SCAP_NO_SPDIF;
- if ((err = snd_ac97_mixer(pbus, &ac97, &emu->ac97)) < 0) {
+ err = snd_ac97_mixer(pbus, &ac97, &emu->ac97);
+ if (err < 0) {
if (emu->card_capabilities->ac97_chip == 1)
return err;
dev_info(emu->card->dev,
@@ -1991,38 +1994,50 @@ int snd_emu10k1_mixer(struct snd_emu10k1 *emu,
rename_ctl(card, "Aux2 Capture Volume", "Line3 Capture Volume");
rename_ctl(card, "Mic Capture Volume", "Unknown1 Capture Volume");
}
- if ((kctl = emu->ctl_send_routing = snd_ctl_new1(&snd_emu10k1_send_routing_control, emu)) == NULL)
+ kctl = emu->ctl_send_routing = snd_ctl_new1(&snd_emu10k1_send_routing_control, emu);
+ if (!kctl)
return -ENOMEM;
kctl->id.device = pcm_device;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
- if ((kctl = emu->ctl_send_volume = snd_ctl_new1(&snd_emu10k1_send_volume_control, emu)) == NULL)
+ kctl = emu->ctl_send_volume = snd_ctl_new1(&snd_emu10k1_send_volume_control, emu);
+ if (!kctl)
return -ENOMEM;
kctl->id.device = pcm_device;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
- if ((kctl = emu->ctl_attn = snd_ctl_new1(&snd_emu10k1_attn_control, emu)) == NULL)
+ kctl = emu->ctl_attn = snd_ctl_new1(&snd_emu10k1_attn_control, emu);
+ if (!kctl)
return -ENOMEM;
kctl->id.device = pcm_device;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
- if ((kctl = emu->ctl_efx_send_routing = snd_ctl_new1(&snd_emu10k1_efx_send_routing_control, emu)) == NULL)
+ kctl = emu->ctl_efx_send_routing = snd_ctl_new1(&snd_emu10k1_efx_send_routing_control, emu);
+ if (!kctl)
return -ENOMEM;
kctl->id.device = multi_device;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
- if ((kctl = emu->ctl_efx_send_volume = snd_ctl_new1(&snd_emu10k1_efx_send_volume_control, emu)) == NULL)
+ kctl = emu->ctl_efx_send_volume = snd_ctl_new1(&snd_emu10k1_efx_send_volume_control, emu);
+ if (!kctl)
return -ENOMEM;
kctl->id.device = multi_device;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
- if ((kctl = emu->ctl_efx_attn = snd_ctl_new1(&snd_emu10k1_efx_attn_control, emu)) == NULL)
+ kctl = emu->ctl_efx_attn = snd_ctl_new1(&snd_emu10k1_efx_attn_control, emu);
+ if (!kctl)
return -ENOMEM;
kctl->id.device = multi_device;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
/* initialize the routing and volume table for each pcm playback stream */
@@ -2069,42 +2084,53 @@ int snd_emu10k1_mixer(struct snd_emu10k1 *emu,
if (! emu->card_capabilities->ecard) { /* FIXME: APS has these controls? */
/* sb live! and audigy */
- if ((kctl = snd_ctl_new1(&snd_emu10k1_spdif_mask_control, emu)) == NULL)
+ kctl = snd_ctl_new1(&snd_emu10k1_spdif_mask_control, emu);
+ if (!kctl)
return -ENOMEM;
if (!emu->audigy)
kctl->id.device = emu->pcm_efx->device;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
- if ((kctl = snd_ctl_new1(&snd_emu10k1_spdif_control, emu)) == NULL)
+ kctl = snd_ctl_new1(&snd_emu10k1_spdif_control, emu);
+ if (!kctl)
return -ENOMEM;
if (!emu->audigy)
kctl->id.device = emu->pcm_efx->device;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
}
if (emu->card_capabilities->emu_model) {
; /* Disable the snd_audigy_spdif_shared_spdif */
} else if (emu->audigy) {
- if ((kctl = snd_ctl_new1(&snd_audigy_shared_spdif, emu)) == NULL)
+ kctl = snd_ctl_new1(&snd_audigy_shared_spdif, emu);
+ if (!kctl)
return -ENOMEM;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
#if 0
- if ((kctl = snd_ctl_new1(&snd_audigy_spdif_output_rate, emu)) == NULL)
+ kctl = snd_ctl_new1(&snd_audigy_spdif_output_rate, emu);
+ if (!kctl)
return -ENOMEM;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
#endif
} else if (! emu->card_capabilities->ecard) {
/* sb live! */
- if ((kctl = snd_ctl_new1(&snd_emu10k1_shared_spdif, emu)) == NULL)
+ kctl = snd_ctl_new1(&snd_emu10k1_shared_spdif, emu);
+ if (!kctl)
return -ENOMEM;
- if ((err = snd_ctl_add(card, kctl)))
+ err = snd_ctl_add(card, kctl);
+ if (err)
return err;
}
if (emu->card_capabilities->ca0151_chip) { /* P16V */
- if ((err = snd_p16v_mixer(emu)))
+ err = snd_p16v_mixer(emu);
+ if (err)
return err;
}
diff --git a/sound/pci/emu10k1/emumpu401.c b/sound/pci/emu10k1/emumpu401.c
index b62c95150702..3ce9b2129ce6 100644
--- a/sound/pci/emu10k1/emumpu401.c
+++ b/sound/pci/emu10k1/emumpu401.c
@@ -319,7 +319,8 @@ static int emu10k1_midi_init(struct snd_emu10k1 *emu, struct snd_emu10k1_midi *m
struct snd_rawmidi *rmidi;
int err;
- if ((err = snd_rawmidi_new(emu->card, name, device, 1, 1, &rmidi)) < 0)
+ err = snd_rawmidi_new(emu->card, name, device, 1, 1, &rmidi);
+ if (err < 0)
return err;
midi->emu = emu;
spin_lock_init(&midi->open_lock);
@@ -342,7 +343,8 @@ int snd_emu10k1_midi(struct snd_emu10k1 *emu)
struct snd_emu10k1_midi *midi = &emu->midi;
int err;
- if ((err = emu10k1_midi_init(emu, midi, 0, "EMU10K1 MPU-401 (UART)")) < 0)
+ err = emu10k1_midi_init(emu, midi, 0, "EMU10K1 MPU-401 (UART)");
+ if (err < 0)
return err;
midi->tx_enable = INTE_MIDITXENABLE;
@@ -360,7 +362,8 @@ int snd_emu10k1_audigy_midi(struct snd_emu10k1 *emu)
int err;
midi = &emu->midi;
- if ((err = emu10k1_midi_init(emu, midi, 0, "Audigy MPU-401 (UART)")) < 0)
+ err = emu10k1_midi_init(emu, midi, 0, "Audigy MPU-401 (UART)");
+ if (err < 0)
return err;
midi->tx_enable = INTE_MIDITXENABLE;
@@ -371,7 +374,8 @@ int snd_emu10k1_audigy_midi(struct snd_emu10k1 *emu)
midi->interrupt = snd_emu10k1_midi_interrupt;
midi = &emu->midi2;
- if ((err = emu10k1_midi_init(emu, midi, 1, "Audigy MPU-401 #2")) < 0)
+ err = emu10k1_midi_init(emu, midi, 1, "Audigy MPU-401 #2");
+ if (err < 0)
return err;
midi->tx_enable = INTE_A_MIDITXENABLE2;
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index b2ddabb99438..b2701a4452d8 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -25,7 +25,8 @@ static void snd_emu10k1_pcm_interrupt(struct snd_emu10k1 *emu,
{
struct snd_emu10k1_pcm *epcm;
- if ((epcm = voice->epcm) == NULL)
+ epcm = voice->epcm;
+ if (!epcm)
return;
if (epcm->substream == NULL)
return;
@@ -399,7 +400,8 @@ static int snd_emu10k1_playback_hw_params(struct snd_pcm_substream *substream,
size_t alloc_size;
int err;
- if ((err = snd_emu10k1_pcm_channel_alloc(epcm, params_channels(hw_params))) < 0)
+ err = snd_emu10k1_pcm_channel_alloc(epcm, params_channels(hw_params));
+ if (err < 0)
return err;
alloc_size = params_buffer_bytes(hw_params);
@@ -1124,11 +1126,13 @@ static int snd_emu10k1_playback_open(struct snd_pcm_substream *substream)
runtime->private_data = epcm;
runtime->private_free = snd_emu10k1_pcm_free_substream;
runtime->hw = snd_emu10k1_playback;
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0) {
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0) {
kfree(epcm);
return err;
}
- if ((err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256, UINT_MAX)) < 0) {
+ err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256, UINT_MAX);
+ if (err < 0) {
kfree(epcm);
return err;
}
@@ -1380,7 +1384,8 @@ int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device)
struct snd_pcm_substream *substream;
int err;
- if ((err = snd_pcm_new(emu->card, "emu10k1", device, 32, 1, &pcm)) < 0)
+ err = snd_pcm_new(emu->card, "emu10k1", device, 32, 1, &pcm);
+ if (err < 0)
return err;
pcm->private_data = emu;
@@ -1412,7 +1417,8 @@ int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device)
struct snd_pcm_substream *substream;
int err;
- if ((err = snd_pcm_new(emu->card, "emu10k1", device, 1, 0, &pcm)) < 0)
+ err = snd_pcm_new(emu->card, "emu10k1", device, 1, 0, &pcm);
+ if (err < 0)
return err;
pcm->private_data = emu;
@@ -1446,7 +1452,8 @@ int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(emu->card, "emu10k1 mic", device, 0, 1, &pcm)) < 0)
+ err = snd_pcm_new(emu->card, "emu10k1 mic", device, 0, 1, &pcm);
+ if (err < 0)
return err;
pcm->private_data = emu;
@@ -1774,7 +1781,8 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device)
struct snd_kcontrol *kctl;
int err;
- if ((err = snd_pcm_new(emu->card, "emu10k1 efx", device, 8, 1, &pcm)) < 0)
+ err = snd_pcm_new(emu->card, "emu10k1 efx", device, 8, 1, &pcm);
+ if (err < 0)
return err;
pcm->private_data = emu;
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index 288e0fd2e47d..9d26535f3fa3 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -169,16 +169,20 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
struct snd_emu10k1_memblk *q;
/* calculate the expected size of empty region */
- if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
+ p = blk->mapped_link.prev;
+ if (p != &emu->mapped_link_head) {
q = get_emu10k1_memblk(p, mapped_link);
start_page = q->mapped_page + q->pages;
- } else
+ } else {
start_page = 1;
- if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
+ }
+ p = blk->mapped_link.next;
+ if (p != &emu->mapped_link_head) {
q = get_emu10k1_memblk(p, mapped_link);
end_page = q->mapped_page;
- } else
+ } else {
end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
+ }
/* remove links */
list_del(&blk->mapped_link);
@@ -267,7 +271,8 @@ int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *b
spin_unlock_irqrestore(&emu->memblk_lock, flags);
return 0;
}
- if ((err = map_memblk(emu, blk)) < 0) {
+ err = map_memblk(emu, blk);
+ if (err < 0) {
/* no enough page - try to unmap some blocks */
/* starting from the oldest block */
p = emu->mapped_order_link_head.next;
@@ -454,13 +459,15 @@ static void get_single_page_range(struct snd_util_memhdr *hdr,
struct snd_emu10k1_memblk *q;
int first_page, last_page;
first_page = blk->first_page;
- if ((p = blk->mem.list.prev) != &hdr->block) {
+ p = blk->mem.list.prev;
+ if (p != &hdr->block) {
q = get_emu10k1_memblk(p, mem.list);
if (q->last_page == first_page)
first_page++; /* first page was already allocated */
}
last_page = blk->last_page;
- if ((p = blk->mem.list.next) != &hdr->block) {
+ p = blk->mem.list.next;
+ if (p != &hdr->block) {
q = get_emu10k1_memblk(p, mem.list);
if (q->first_page == last_page)
last_page--; /* last page was already allocated */
diff --git a/sound/pci/emu10k1/p16v.c b/sound/pci/emu10k1/p16v.c
index 1099f102b365..ff2a3974c824 100644
--- a/sound/pci/emu10k1/p16v.c
+++ b/sound/pci/emu10k1/p16v.c
@@ -194,7 +194,8 @@ static int snd_p16v_pcm_open_playback_channel(struct snd_pcm_substream *substrea
#endif /* debug */
/* channel->interrupt = snd_p16v_pcm_channel_interrupt; */
channel->epcm = epcm;
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
runtime->sync.id32[0] = substream->pcm->card->number;
@@ -242,7 +243,8 @@ static int snd_p16v_pcm_open_capture_channel(struct snd_pcm_substream *substream
#endif /* debug */
/* channel->interrupt = snd_p16v_pcm_channel_interrupt; */
channel->epcm = epcm;
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
return 0;
@@ -589,7 +591,8 @@ int snd_p16v_pcm(struct snd_emu10k1 *emu, int device)
/* dev_dbg(emu->card->dev, "snd_p16v_pcm called. device=%d\n", device); */
emu->p16v_device_offset = device;
- if ((err = snd_pcm_new(emu->card, "p16v", device, 1, capture, &pcm)) < 0)
+ err = snd_pcm_new(emu->card, "p16v", device, 1, capture, &pcm);
+ if (err < 0)
return err;
pcm->private_data = emu;
@@ -808,8 +811,8 @@ int snd_p16v_mixer(struct snd_emu10k1 *emu)
struct snd_card *card = emu->card;
for (i = 0; i < ARRAY_SIZE(p16v_mixer_controls); i++) {
- if ((err = snd_ctl_add(card, snd_ctl_new1(&p16v_mixer_controls[i],
- emu))) < 0)
+ err = snd_ctl_add(card, snd_ctl_new1(&p16v_mixer_controls[i], emu));
+ if (err < 0)
return err;
}
return 0;
diff --git a/sound/pci/emu10k1/timer.c b/sound/pci/emu10k1/timer.c
index c2803000aace..2435d3ba68f7 100644
--- a/sound/pci/emu10k1/timer.c
+++ b/sound/pci/emu10k1/timer.c
@@ -72,7 +72,8 @@ int snd_emu10k1_timer(struct snd_emu10k1 *emu, int device)
tid.card = emu->card->number;
tid.device = device;
tid.subdevice = 0;
- if ((err = snd_timer_new(emu->card, "EMU10K1", &tid, &timer)) >= 0) {
+ err = snd_timer_new(emu->card, "EMU10K1", &tid, &timer);
+ if (err >= 0) {
strcpy(timer->name, "EMU10K1 timer");
timer->private_data = emu;
timer->hw = snd_emu10k1_timer_hw;
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 3ccccdbc0029..728b69dad21b 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -672,7 +672,8 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97,
}
/* now wait for the stinkin' data (RDY) */
for (t = 0; t < POLL_COUNT; t++) {
- if ((x = inl(ES_REG(ensoniq, 1371_CODEC))) & ES_1371_CODEC_RDY) {
+ x = inl(ES_REG(ensoniq, 1371_CODEC));
+ if (x & ES_1371_CODEC_RDY) {
if (is_ev1938(ensoniq)) {
for (t = 0; t < 100; t++)
inl(ES_REG(ensoniq, CONTROL));
@@ -1594,7 +1595,8 @@ static int snd_ensoniq_1371_mixer(struct ensoniq *ensoniq,
.wait = snd_es1371_codec_wait,
};
- if ((err = snd_ac97_bus(card, 0, &ops, NULL, &pbus)) < 0)
+ err = snd_ac97_bus(card, 0, &ops, NULL, &pbus);
+ if (err < 0)
return err;
memset(&ac97, 0, sizeof(ac97));
@@ -1602,7 +1604,8 @@ static int snd_ensoniq_1371_mixer(struct ensoniq *ensoniq,
ac97.private_free = snd_ensoniq_mixer_free_ac97;
ac97.pci = ensoniq->pci;
ac97.scaps = AC97_SCAP_AUDIO;
- if ((err = snd_ac97_mixer(pbus, &ac97, &ensoniq->u.es1371.ac97)) < 0)
+ err = snd_ac97_mixer(pbus, &ac97, &ensoniq->u.es1371.ac97);
+ if (err < 0)
return err;
if (has_spdif > 0 ||
(!has_spdif && es1371_quirk_lookup(ensoniq, es1371_spdif_present))) {
@@ -1722,7 +1725,8 @@ static int snd_ensoniq_1370_mixer(struct ensoniq *ensoniq)
ak4531.write = snd_es1370_codec_write;
ak4531.private_data = ensoniq;
ak4531.private_free = snd_ensoniq_mixer_free_ak4531;
- if ((err = snd_ak4531_mixer(card, &ak4531, &ensoniq->u.es1370.ak4531)) < 0)
+ err = snd_ak4531_mixer(card, &ak4531, &ensoniq->u.es1370.ak4531);
+ if (err < 0)
return err;
for (idx = 0; idx < ES1370_CONTROLS; idx++) {
err = snd_ctl_add(card, snd_ctl_new1(&snd_es1370_controls[idx], ensoniq));
@@ -2038,7 +2042,8 @@ static int snd_ensoniq_create(struct snd_card *card,
};
*rensoniq = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
ensoniq = kzalloc(sizeof(*ensoniq), GFP_KERNEL);
if (ensoniq == NULL) {
@@ -2050,7 +2055,8 @@ static int snd_ensoniq_create(struct snd_card *card,
ensoniq->card = card;
ensoniq->pci = pci;
ensoniq->irq = -1;
- if ((err = pci_request_regions(pci, "Ensoniq AudioPCI")) < 0) {
+ err = pci_request_regions(pci, "Ensoniq AudioPCI");
+ if (err < 0) {
kfree(ensoniq);
pci_disable_device(pci);
return err;
@@ -2095,7 +2101,8 @@ static int snd_ensoniq_create(struct snd_card *card,
snd_ensoniq_chip_init(ensoniq);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, ensoniq, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, ensoniq, &ops);
+ if (err < 0) {
snd_ensoniq_free(ensoniq);
return err;
}
@@ -2286,7 +2293,8 @@ static int snd_ensoniq_midi(struct ensoniq *ensoniq, int device)
struct snd_rawmidi *rmidi;
int err;
- if ((err = snd_rawmidi_new(ensoniq->card, "ES1370/1", device, 1, 1, &rmidi)) < 0)
+ err = snd_rawmidi_new(ensoniq->card, "ES1370/1", device, 1, 1, &rmidi);
+ if (err < 0)
return err;
strcpy(rmidi->name, CHIP_NAME);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_ensoniq_midi_output);
@@ -2357,33 +2365,39 @@ static int snd_audiopci_probe(struct pci_dev *pci,
if (err < 0)
return err;
- if ((err = snd_ensoniq_create(card, pci, &ensoniq)) < 0) {
+ err = snd_ensoniq_create(card, pci, &ensoniq);
+ if (err < 0) {
snd_card_free(card);
return err;
}
card->private_data = ensoniq;
#ifdef CHIP1370
- if ((err = snd_ensoniq_1370_mixer(ensoniq)) < 0) {
+ err = snd_ensoniq_1370_mixer(ensoniq);
+ if (err < 0) {
snd_card_free(card);
return err;
}
#endif
#ifdef CHIP1371
- if ((err = snd_ensoniq_1371_mixer(ensoniq, spdif[dev], lineio[dev])) < 0) {
+ err = snd_ensoniq_1371_mixer(ensoniq, spdif[dev], lineio[dev]);
+ if (err < 0) {
snd_card_free(card);
return err;
}
#endif
- if ((err = snd_ensoniq_pcm(ensoniq, 0)) < 0) {
+ err = snd_ensoniq_pcm(ensoniq, 0);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_ensoniq_pcm2(ensoniq, 1)) < 0) {
+ err = snd_ensoniq_pcm2(ensoniq, 1);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_ensoniq_midi(ensoniq, 0)) < 0) {
+ err = snd_ensoniq_midi(ensoniq, 0);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -2399,7 +2413,8 @@ static int snd_audiopci_probe(struct pci_dev *pci,
ensoniq->port,
ensoniq->irq);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index afc66347d162..33b1eb347a27 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -293,7 +293,8 @@ static void snd_es1938_write_cmd(struct es1938 *chip, unsigned char cmd)
int i;
unsigned char v;
for (i = 0; i < WRITE_LOOP_TIMEOUT; i++) {
- if (!(v = inb(SLSB_REG(chip, READSTATUS)) & 0x80)) {
+ v = inb(SLSB_REG(chip, READSTATUS));
+ if (!(v & 0x80)) {
outb(cmd, SLSB_REG(chip, WRITEDATA));
return;
}
@@ -309,9 +310,11 @@ static int snd_es1938_get_byte(struct es1938 *chip)
{
int i;
unsigned char v;
- for (i = GET_LOOP_TIMEOUT; i; i--)
- if ((v = inb(SLSB_REG(chip, STATUS))) & 0x80)
+ for (i = GET_LOOP_TIMEOUT; i; i--) {
+ v = inb(SLSB_REG(chip, STATUS));
+ if (v & 0x80)
return inb(SLSB_REG(chip, READDATA));
+ }
dev_err(chip->card->dev, "get_byte timeout: status 0x02%x\n", v);
return -ENODEV;
}
@@ -993,7 +996,8 @@ static int snd_es1938_new_pcm(struct es1938 *chip, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(chip->card, "es-1938-1946", device, 2, 1, &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "es-1938-1946", device, 2, 1, &pcm);
+ if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_es1938_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_es1938_capture_ops);
@@ -1553,7 +1557,8 @@ static int snd_es1938_create(struct snd_card *card,
*rchip = NULL;
/* enable PCI device */
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
/* check, if we can restrict PCI DMA transfers to 24 bits */
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(24))) {
@@ -1573,7 +1578,8 @@ static int snd_es1938_create(struct snd_card *card,
chip->card = card;
chip->pci = pci;
chip->irq = -1;
- if ((err = pci_request_regions(pci, "ESS Solo-1")) < 0) {
+ err = pci_request_regions(pci, "ESS Solo-1");
+ if (err < 0) {
kfree(chip);
pci_disable_device(pci);
return err;
@@ -1599,7 +1605,8 @@ static int snd_es1938_create(struct snd_card *card,
snd_es1938_chip_init(chip);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_es1938_free(chip);
return err;
}
@@ -1731,7 +1738,8 @@ static int snd_es1938_mixer(struct es1938 *chip)
kctl->private_free = snd_es1938_hwv_free;
break;
}
- if ((err = snd_ctl_add(card, kctl)) < 0)
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
}
return 0;
@@ -1765,7 +1773,8 @@ static int snd_es1938_probe(struct pci_dev *pci,
return -ENODEV;
}
}
- if ((err = snd_es1938_create(card, pci, &chip)) < 0) {
+ err = snd_es1938_create(card, pci, &chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -1778,11 +1787,13 @@ static int snd_es1938_probe(struct pci_dev *pci,
chip->revision,
chip->irq);
- if ((err = snd_es1938_new_pcm(chip, 0)) < 0) {
+ err = snd_es1938_new_pcm(chip, 0);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_es1938_mixer(chip)) < 0) {
+ err = snd_es1938_mixer(chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -1793,11 +1804,13 @@ static int snd_es1938_probe(struct pci_dev *pci,
dev_err(card->dev, "OPL3 not detected at 0x%lx\n",
SLSB_REG(chip, FMLOWADDR));
} else {
- if ((err = snd_opl3_timer_new(opl3, 0, 1)) < 0) {
+ err = snd_opl3_timer_new(opl3, 0, 1);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
+ err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -1815,7 +1828,8 @@ static int snd_es1938_probe(struct pci_dev *pci,
snd_es1938_create_gameport(chip);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 5fa1861236f5..c038c1035c39 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -1600,7 +1600,8 @@ static int snd_es1968_capture_open(struct snd_pcm_substream *substream)
es->mode = ESM_MODE_CAPTURE;
/* get mixbuffer */
- if ((es->mixbuf = snd_es1968_new_memory(chip, ESM_MIXBUF_SIZE)) == NULL) {
+ es->mixbuf = snd_es1968_new_memory(chip, ESM_MIXBUF_SIZE);
+ if (!es->mixbuf) {
snd_es1968_free_apu_pair(chip, apu1);
snd_es1968_free_apu_pair(chip, apu2);
kfree(es);
@@ -1695,11 +1696,13 @@ static void es1968_measure_clock(struct es1968 *chip)
chip->clock = 48000; /* default clock value */
/* search 2 APUs (although one apu is enough) */
- if ((apu = snd_es1968_alloc_apu_pair(chip, ESM_APU_PCM_PLAY)) < 0) {
+ apu = snd_es1968_alloc_apu_pair(chip, ESM_APU_PCM_PLAY);
+ if (apu < 0) {
dev_err(chip->card->dev, "Hmm, cannot find empty APU pair!?\n");
return;
}
- if ((memory = snd_es1968_new_memory(chip, CLOCK_MEASURE_BUFSIZE)) == NULL) {
+ memory = snd_es1968_new_memory(chip, CLOCK_MEASURE_BUFSIZE);
+ if (!memory) {
dev_warn(chip->card->dev,
"cannot allocate dma buffer - using default clock %d\n",
chip->clock);
@@ -1791,7 +1794,8 @@ snd_es1968_pcm(struct es1968 *chip, int device)
int err;
/* get DMA buffer */
- if ((err = snd_es1968_init_dmabuf(chip)) < 0)
+ err = snd_es1968_init_dmabuf(chip);
+ if (err < 0)
return err;
/* set PCMBAR */
@@ -1800,9 +1804,10 @@ snd_es1968_pcm(struct es1968 *chip, int device)
wave_set_register(chip, 0x01FE, chip->dma.addr >> 12);
wave_set_register(chip, 0x01FF, chip->dma.addr >> 12);
- if ((err = snd_pcm_new(chip->card, "ESS Maestro", device,
- chip->playback_streams,
- chip->capture_streams, &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "ESS Maestro", device,
+ chip->playback_streams,
+ chip->capture_streams, &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
@@ -1953,7 +1958,8 @@ static irqreturn_t snd_es1968_interrupt(int irq, void *dev_id)
struct es1968 *chip = dev_id;
u32 event;
- if (!(event = inb(chip->io_port + 0x1A)))
+ event = inb(chip->io_port + 0x1A);
+ if (!event)
return IRQ_NONE;
outw(inw(chip->io_port + 4) & 1, chip->io_port + 4);
@@ -2008,13 +2014,15 @@ snd_es1968_mixer(struct es1968 *chip)
.read = snd_es1968_ac97_read,
};
- if ((err = snd_ac97_bus(chip->card, 0, &ops, NULL, &pbus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, NULL, &pbus);
+ if (err < 0)
return err;
pbus->no_vra = 1; /* ES1968 doesn't need VRA */
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = chip;
- if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97)) < 0)
+ err = snd_ac97_mixer(pbus, &ac97, &chip->ac97);
+ if (err < 0)
return err;
#ifndef CONFIG_SND_ES1968_INPUT
@@ -2661,7 +2669,8 @@ static int snd_es1968_create(struct snd_card *card,
*chip_ret = NULL;
/* enable PCI device */
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
/* check, if we can restrict PCI DMA transfers to 28 bits */
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(28))) {
@@ -2692,7 +2701,8 @@ static int snd_es1968_create(struct snd_card *card,
chip->playback_streams = play_streams;
chip->capture_streams = capt_streams;
- if ((err = pci_request_regions(pci, "ESS Maestro")) < 0) {
+ err = pci_request_regions(pci, "ESS Maestro");
+ if (err < 0) {
kfree(chip);
pci_disable_device(pci);
return err;
@@ -2739,7 +2749,8 @@ static int snd_es1968_create(struct snd_card *card,
snd_es1968_chip_init(chip);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_es1968_free(chip);
return err;
}
@@ -2804,14 +2815,15 @@ static int snd_es1968_probe(struct pci_dev *pci,
total_bufsize[dev] = 128;
if (total_bufsize[dev] > 4096)
total_bufsize[dev] = 4096;
- if ((err = snd_es1968_create(card, pci,
- total_bufsize[dev] * 1024, /* in bytes */
- pcm_substreams_p[dev],
- pcm_substreams_c[dev],
- pci_id->driver_data,
- use_pm[dev],
- radio_nr[dev],
- &chip)) < 0) {
+ err = snd_es1968_create(card, pci,
+ total_bufsize[dev] * 1024, /* in bytes */
+ pcm_substreams_p[dev],
+ pcm_substreams_c[dev],
+ pci_id->driver_data,
+ use_pm[dev],
+ radio_nr[dev],
+ &chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -2832,12 +2844,14 @@ static int snd_es1968_probe(struct pci_dev *pci,
break;
}
- if ((err = snd_es1968_pcm(chip, 0)) < 0) {
+ err = snd_es1968_pcm(chip, 0);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_es1968_mixer(chip)) < 0) {
+ err = snd_es1968_mixer(chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -2855,13 +2869,13 @@ static int snd_es1968_probe(struct pci_dev *pci,
}
}
if (enable_mpu[dev]) {
- if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401,
- chip->io_port + ESM_MPU401_PORT,
- MPU401_INFO_INTEGRATED |
- MPU401_INFO_IRQ_HOOK,
- -1, &chip->rmidi)) < 0) {
+ err = snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401,
+ chip->io_port + ESM_MPU401_PORT,
+ MPU401_INFO_INTEGRATED |
+ MPU401_INFO_IRQ_HOOK,
+ -1, &chip->rmidi);
+ if (err < 0)
dev_warn(card->dev, "skipping MPU-401 MIDI support..\n");
- }
}
snd_es1968_create_gameport(chip, dev);
@@ -2882,7 +2896,8 @@ static int snd_es1968_probe(struct pci_dev *pci,
sprintf(card->longname, "%s at 0x%lx, irq %i",
card->shortname, chip->io_port, chip->irq);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index 6279eb156e36..ed9dae87145b 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -659,7 +659,8 @@ static int snd_fm801_playback_open(struct snd_pcm_substream *substream)
SNDRV_PCM_HW_PARAM_CHANNELS,
&hw_constraints_channels);
}
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
return 0;
}
@@ -674,7 +675,8 @@ static int snd_fm801_capture_open(struct snd_pcm_substream *substream)
runtime->hw = snd_fm801_capture;
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_rates);
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
return 0;
}
@@ -717,7 +719,8 @@ static int snd_fm801_pcm(struct fm801 *chip, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(chip->card, "FM801", device, 1, 1, &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "FM801", device, 1, 1, &pcm);
+ if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_fm801_playback_ops);
@@ -985,7 +988,8 @@ static int snd_fm801_put_mux(struct snd_kcontrol *kcontrol,
struct fm801 *chip = snd_kcontrol_chip(kcontrol);
unsigned short val;
- if ((val = ucontrol->value.enumerated.item[0]) > 4)
+ val = ucontrol->value.enumerated.item[0];
+ if (val > 4)
return -EINVAL;
return snd_fm801_update_bits(chip, FM801_REC_SRC, 7, val);
}
@@ -1050,19 +1054,22 @@ static int snd_fm801_mixer(struct fm801 *chip)
.read = snd_fm801_codec_read,
};
- if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus);
+ if (err < 0)
return err;
chip->ac97_bus->private_free = snd_fm801_mixer_free_ac97_bus;
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = chip;
ac97.private_free = snd_fm801_mixer_free_ac97;
- if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0)
+ err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97);
+ if (err < 0)
return err;
if (chip->secondary) {
ac97.num = 1;
ac97.addr = chip->secondary_addr;
- if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_sec)) < 0)
+ err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_sec);
+ if (err < 0)
return err;
}
for (i = 0; i < FM801_CONTROLS; i++) {
@@ -1213,7 +1220,8 @@ static int snd_fm801_create(struct snd_card *card,
};
*rchip = NULL;
- if ((err = pcim_enable_device(pci)) < 0)
+ err = pcim_enable_device(pci);
+ if (err < 0)
return err;
chip = devm_kzalloc(&pci->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -1223,7 +1231,8 @@ static int snd_fm801_create(struct snd_card *card,
chip->dev = &pci->dev;
chip->irq = -1;
chip->tea575x_tuner = tea575x_tuner;
- if ((err = pci_request_regions(pci, "FM801")) < 0)
+ err = pci_request_regions(pci, "FM801");
+ if (err < 0)
return err;
chip->port = pci_resource_start(pci, 0);
@@ -1254,7 +1263,8 @@ static int snd_fm801_create(struct snd_card *card,
snd_fm801_chip_init(chip);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_fm801_free(chip);
return err;
}
@@ -1327,7 +1337,8 @@ static int snd_card_fm801_probe(struct pci_dev *pci,
0, &card);
if (err < 0)
return err;
- if ((err = snd_fm801_create(card, pci, tea575x_tuner[dev], radio_nr[dev], &chip)) < 0) {
+ err = snd_fm801_create(card, pci, tea575x_tuner[dev], radio_nr[dev], &chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -1342,35 +1353,41 @@ static int snd_card_fm801_probe(struct pci_dev *pci,
if (chip->tea575x_tuner & TUNER_ONLY)
goto __fm801_tuner_only;
- if ((err = snd_fm801_pcm(chip, 0)) < 0) {
+ err = snd_fm801_pcm(chip, 0);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_fm801_mixer(chip)) < 0) {
+ err = snd_fm801_mixer(chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_FM801,
- chip->port + FM801_MPU401_DATA,
- MPU401_INFO_INTEGRATED |
- MPU401_INFO_IRQ_HOOK,
- -1, &chip->rmidi)) < 0) {
+ err = snd_mpu401_uart_new(card, 0, MPU401_HW_FM801,
+ chip->port + FM801_MPU401_DATA,
+ MPU401_INFO_INTEGRATED |
+ MPU401_INFO_IRQ_HOOK,
+ -1, &chip->rmidi);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_opl3_create(card, chip->port + FM801_OPL3_BANK0,
- chip->port + FM801_OPL3_BANK1,
- OPL3_HW_OPL3_FM801, 1, &opl3)) < 0) {
+ err = snd_opl3_create(card, chip->port + FM801_OPL3_BANK0,
+ chip->port + FM801_OPL3_BANK1,
+ OPL3_HW_OPL3_FM801, 1, &opl3);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
+ err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
+ if (err < 0) {
snd_card_free(card);
return err;
}
__fm801_tuner_only:
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 17a25e453f60..e8dee24c309d 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -167,8 +167,11 @@ static void hda_codec_driver_shutdown(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
- if (!pm_runtime_suspended(dev) && codec->patch_ops.reboot_notify)
- codec->patch_ops.reboot_notify(codec);
+ if (!pm_runtime_suspended(dev)) {
+ if (codec->patch_ops.reboot_notify)
+ codec->patch_ops.reboot_notify(codec);
+ snd_hda_codec_display_power(codec, false);
+ }
}
int __hda_codec_driver_register(struct hda_codec_driver *drv, const char *name,
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 5462f771c2f9..7a717e151156 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -798,7 +798,7 @@ static unsigned int hda_set_power_state(struct hda_codec *codec,
unsigned int power_state);
/* enable/disable display power per codec */
-static void codec_display_power(struct hda_codec *codec, bool enable)
+void snd_hda_codec_display_power(struct hda_codec *codec, bool enable)
{
if (codec->display_power_control)
snd_hdac_display_power(&codec->bus->core, codec->addr, enable);
@@ -810,7 +810,7 @@ void snd_hda_codec_register(struct hda_codec *codec)
if (codec->registered)
return;
if (device_is_registered(hda_codec_dev(codec))) {
- codec_display_power(codec, true);
+ snd_hda_codec_display_power(codec, true);
pm_runtime_enable(hda_codec_dev(codec));
/* it was powered up in snd_hda_codec_new(), now all done */
snd_hda_power_down(codec);
@@ -836,7 +836,7 @@ static int snd_hda_codec_dev_free(struct snd_device *device)
*/
if (codec->core.type == HDA_DEV_LEGACY)
snd_hdac_device_unregister(&codec->core);
- codec_display_power(codec, false);
+ snd_hda_codec_display_power(codec, false);
/*
* In the case of ASoC HD-audio bus, the device refcount is released in
@@ -2893,7 +2893,7 @@ static int hda_codec_runtime_suspend(struct device *dev)
(codec_has_clkstop(codec) && codec_has_epss(codec) &&
(state & AC_PWRST_CLK_STOP_OK)))
snd_hdac_codec_link_down(&codec->core);
- codec_display_power(codec, false);
+ snd_hda_codec_display_power(codec, false);
return 0;
}
@@ -2905,7 +2905,7 @@ static int hda_codec_runtime_resume(struct device *dev)
if (!codec->card)
return 0;
- codec_display_power(codec, true);
+ snd_hda_codec_display_power(codec, true);
snd_hdac_codec_link_up(&codec->core);
hda_call_codec_resume(codec);
pm_runtime_mark_last_busy(dev);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 1f8018f9ce57..481d8f8d3396 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -1433,7 +1433,7 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
path = snd_hda_add_new_path(codec, dac, pin, 0);
}
if (!path) {
- dac = dacs[i] = 0;
+ dacs[i] = 0;
badness += bad->no_dac;
} else {
/* print_nid_path(codec, "output", path); */
@@ -3460,7 +3460,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
struct hda_gen_spec *spec = codec->spec;
const struct hda_input_mux *imux;
struct nid_path *path;
- int i, adc_idx, err = 0;
+ int i, adc_idx, ret, err = 0;
imux = &spec->input_mux;
adc_idx = kcontrol->id.index;
@@ -3470,9 +3470,13 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
if (!path || !path->ctls[type])
continue;
kcontrol->private_value = path->ctls[type];
- err = func(kcontrol, ucontrol);
- if (err < 0)
+ ret = func(kcontrol, ucontrol);
+ if (ret < 0) {
+ err = ret;
break;
+ }
+ if (ret > 0)
+ err = 1;
}
mutex_unlock(&codec->control_mutex);
if (err >= 0 && spec->cap_sync_hook)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 470753b36c8a..0062c18b646a 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -883,10 +883,24 @@ static unsigned int azx_get_pos_skl(struct azx *chip, struct azx_dev *azx_dev)
return azx_get_pos_posbuf(chip, azx_dev);
}
+static void __azx_shutdown_chip(struct azx *chip, bool skip_link_reset)
+{
+ azx_stop_chip(chip);
+ if (!skip_link_reset)
+ azx_enter_link_reset(chip);
+ azx_clear_irq_pending(chip);
+ display_power(chip, false);
+}
+
#ifdef CONFIG_PM
static DEFINE_MUTEX(card_list_lock);
static LIST_HEAD(card_list);
+static void azx_shutdown_chip(struct azx *chip)
+{
+ __azx_shutdown_chip(chip, false);
+}
+
static void azx_add_card_list(struct azx *chip)
{
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -942,14 +956,6 @@ static bool azx_is_pm_ready(struct snd_card *card)
return true;
}
-static void __azx_runtime_suspend(struct azx *chip)
-{
- azx_stop_chip(chip);
- azx_enter_link_reset(chip);
- azx_clear_irq_pending(chip);
- display_power(chip, false);
-}
-
static void __azx_runtime_resume(struct azx *chip)
{
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -1028,7 +1034,7 @@ static int azx_suspend(struct device *dev)
chip = card->private_data;
bus = azx_bus(chip);
- __azx_runtime_suspend(chip);
+ azx_shutdown_chip(chip);
if (bus->irq >= 0) {
free_irq(bus->irq, chip);
bus->irq = -1;
@@ -1107,7 +1113,7 @@ static int azx_runtime_suspend(struct device *dev)
/* enable controller wake up event */
azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | STATESTS_INT_MASK);
- __azx_runtime_suspend(chip);
+ azx_shutdown_chip(chip);
trace_azx_runtime_suspend(chip);
return 0;
}
@@ -2385,7 +2391,7 @@ static void azx_shutdown(struct pci_dev *pci)
return;
chip = card->private_data;
if (chip && chip->running)
- azx_stop_chip(chip);
+ __azx_shutdown_chip(chip, true);
}
/* PCI IDs */
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 4c5589c10f1d..8d2503e8dad8 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -709,6 +709,8 @@ void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
#define SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE 80
void snd_print_channel_allocation(int spk_alloc, char *buf, int buflen);
+void snd_hda_codec_display_power(struct hda_codec *codec, bool enable);
+
/*
*/
#define codec_err(codec, fmt, args...) \
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 6f2b743b9d75..ea700395bef4 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -235,11 +235,9 @@ static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev)
{
struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
struct hdac_bus *bus = azx_bus(chip);
- struct device *dev = hda->dev;
struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hda->regs = devm_ioremap_resource(dev, res);
+ hda->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hda->regs))
return PTR_ERR(hda->regs);
@@ -262,6 +260,9 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
const char *sname, *drv_name = "tegra-hda";
struct device_node *np = pdev->dev.of_node;
+ if (irq_id < 0)
+ return irq_id;
+
err = hda_tegra_init_chip(chip, pdev);
if (err)
return err;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 49b4fdd2feab..b66e7bdbf483 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -7598,7 +7598,7 @@ static void ca0132_alt_free_active_dma_channels(struct hda_codec *codec)
*/
static void ca0132_alt_start_dsp_audio_streams(struct hda_codec *codec)
{
- const unsigned int dsp_dma_stream_ids[] = { 0x0c, 0x03, 0x04 };
+ static const unsigned int dsp_dma_stream_ids[] = { 0x0c, 0x03, 0x04 };
struct ca0132_spec *spec = codec->spec;
unsigned int i, tmp;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 4b2cc8cb55c4..e143e69d8184 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1940,6 +1940,8 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
static const struct snd_pci_quirk force_connect_list[] = {
SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
+ SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
+ SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
{}
};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ab5113cccffa..7ad689f991e7 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -385,6 +385,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
fallthrough;
case 0x10ec0215:
+ case 0x10ec0230:
case 0x10ec0233:
case 0x10ec0235:
case 0x10ec0236:
@@ -3054,6 +3055,7 @@ enum {
ALC269_TYPE_ALC257,
ALC269_TYPE_ALC215,
ALC269_TYPE_ALC225,
+ ALC269_TYPE_ALC287,
ALC269_TYPE_ALC294,
ALC269_TYPE_ALC300,
ALC269_TYPE_ALC623,
@@ -3090,6 +3092,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
case ALC269_TYPE_ALC257:
case ALC269_TYPE_ALC215:
case ALC269_TYPE_ALC225:
+ case ALC269_TYPE_ALC287:
case ALC269_TYPE_ALC294:
case ALC269_TYPE_ALC300:
case ALC269_TYPE_ALC623:
@@ -3153,6 +3156,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
alc_update_coef_idx(codec, 0x44, 0x0045 << 8, 0x0);
break;
+ case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
alc_write_coef_idx(codec, 0x48, 0x0);
@@ -3180,6 +3184,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8);
break;
+ case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
alc_write_coef_idx(codec, 0x48, 0xd011);
@@ -3558,12 +3563,70 @@ static void alc256_shutup(struct hda_codec *codec)
}
}
+static void alc285_hp_init(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ int i, val;
+ int coef38, coef0d, coef36;
+
+ alc_update_coef_idx(codec, 0x4a, 1<<15, 1<<15); /* Reset HP JD */
+ coef38 = alc_read_coef_idx(codec, 0x38); /* Amp control */
+ coef0d = alc_read_coef_idx(codec, 0x0d); /* Digital Misc control */
+ coef36 = alc_read_coef_idx(codec, 0x36); /* Passthrough Control */
+ alc_update_coef_idx(codec, 0x38, 1<<4, 0x0);
+ alc_update_coef_idx(codec, 0x0d, 0x110, 0x0);
+
+ alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
+
+ if (hp_pin)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ msleep(130);
+ alc_update_coef_idx(codec, 0x36, 1<<14, 1<<14);
+ alc_update_coef_idx(codec, 0x36, 1<<13, 0x0);
+
+ if (hp_pin)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ msleep(10);
+ alc_write_coef_idx(codec, 0x67, 0x0); /* Set HP depop to manual mode */
+ alc_write_coefex_idx(codec, 0x58, 0x00, 0x7880);
+ alc_write_coefex_idx(codec, 0x58, 0x0f, 0xf049);
+ alc_update_coefex_idx(codec, 0x58, 0x03, 0x00f0, 0x00c0);
+
+ alc_write_coefex_idx(codec, 0x58, 0x00, 0xf888); /* HP depop procedure start */
+ val = alc_read_coefex_idx(codec, 0x58, 0x00);
+ for (i = 0; i < 20 && val & 0x8000; i++) {
+ msleep(50);
+ val = alc_read_coefex_idx(codec, 0x58, 0x00);
+ } /* Wait for depop procedure finish */
+
+ alc_write_coefex_idx(codec, 0x58, 0x00, val); /* write back the result */
+ alc_update_coef_idx(codec, 0x38, 1<<4, coef38);
+ alc_update_coef_idx(codec, 0x0d, 0x110, coef0d);
+ alc_update_coef_idx(codec, 0x36, 3<<13, coef36);
+
+ msleep(50);
+ alc_update_coef_idx(codec, 0x4a, 1<<15, 0);
+}
+
static void alc225_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp1_pin_sense, hp2_pin_sense;
+ if (spec->codec_variant != ALC269_TYPE_ALC287)
+ /* required only at boot or S3 and S4 resume time */
+ if (!spec->done_hp_init ||
+ is_s3_resume(codec) ||
+ is_s4_resume(codec)) {
+ alc285_hp_init(codec);
+ spec->done_hp_init = true;
+ }
+
if (!hp_pin)
hp_pin = 0x21;
msleep(30);
@@ -4744,6 +4807,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
@@ -4858,6 +4922,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
alc_process_coef_fw(codec, coef0255);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
+ case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
alc_write_coef_idx(codec, 0x45, 0xc489);
@@ -5007,6 +5072,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
alc_write_coef_idx(codec, 0x1b, 0x0e4b);
@@ -5105,6 +5171,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
@@ -5218,6 +5285,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
@@ -5318,6 +5386,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x0070) == 0x0070;
break;
+ case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
alc_write_coef_idx(codec, 0x1b, 0x0e4b);
@@ -5611,6 +5680,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, alc255fw);
break;
+ case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, alc256fw);
@@ -6211,6 +6281,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
break;
+ case 0x10ec0230:
case 0x10ec0235:
case 0x10ec0236:
case 0x10ec0255:
@@ -6343,6 +6414,24 @@ static void alc_fixup_no_int_mic(struct hda_codec *codec,
}
}
+static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ static const hda_nid_t conn[] = { 0x02 };
+ static const struct hda_pintbl pincfgs[] = {
+ { 0x14, 0x90170110 }, /* rear speaker */
+ { }
+ };
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ /* force front speaker to DAC1 */
+ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+ break;
+ }
+}
+
/* for hda_fixup_thinkpad_acpi() */
#include "thinkpad_helper.c"
@@ -6569,6 +6658,7 @@ enum {
ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
ALC623_FIXUP_LENOVO_THINKSTATION_P340,
ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
+ ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -7810,6 +7900,8 @@ static const struct hda_fixup alc269_fixups[] = {
{ 0x20, AC_VERB_SET_PROC_COEF, 0x4e4b },
{ }
},
+ .chained = true,
+ .chain_id = ALC289_FIXUP_ASUS_GA401,
},
[ALC285_FIXUP_HP_GPIO_LED] = {
.type = HDA_FIXUP_FUNC,
@@ -8127,13 +8219,8 @@ static const struct hda_fixup alc269_fixups[] = {
.chain_id = ALC269_FIXUP_HP_LINE1_MIC1_LED,
},
[ALC285_FIXUP_HP_SPECTRE_X360] = {
- .type = HDA_FIXUP_PINS,
- .v.pins = (const struct hda_pintbl[]) {
- { 0x14, 0x90170110 }, /* enable top speaker */
- {}
- },
- .chained = true,
- .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_hp_spectre_x360,
},
[ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
.type = HDA_FIXUP_FUNC,
@@ -8156,6 +8243,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
},
+ [ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_limit_int_mic_boost,
+ .chained = true,
+ .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8188,9 +8281,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x129c, "Acer SWIFT SF314-55", ALC256_FIXUP_ACER_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x1300, "Acer SWIFT SF314-56", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
@@ -8244,6 +8339,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -8319,6 +8415,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+ SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
@@ -8336,19 +8433,27 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87f1, "HP ProBook 630 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -8369,6 +8474,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -8425,7 +8531,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x152d, 0x1082, "Quanta NL3", ALC269_FIXUP_LIFEBOOK),
SND_PCI_QUIRK(0x1558, 0x1323, "Clevo N130ZU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x1325, "Clevo N15[01][CW]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x1401, "Clevo L140[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x1403, "Clevo N140CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x1404, "Clevo N150CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -8460,11 +8566,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8535, "Clevo NH50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8536, "Clevo NH79D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1558, 0x8550, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1558, 0x8551, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
- SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
- SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[5|7][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
+ SND_PCI_QUIRK(0x1558, 0x8550, "Clevo NH[57][0-9][ER][ACDH]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8551, "Clevo NH[57][0-9][ER][ACDH]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8560, "Clevo NH[57][0-9][ER][ACDH]Q", ALC269_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1558, 0x8561, "Clevo NH[57][0-9][ER][ACDH]Q", ALC269_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -8475,8 +8581,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x9600, "Clevo N960K[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x961d, "Clevo N960S[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL53RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL5XNU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL5[03]RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -8532,6 +8638,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
@@ -9341,6 +9448,7 @@ static int patch_alc269(struct hda_codec *codec)
spec->shutup = alc256_shutup;
spec->init_hook = alc256_init;
break;
+ case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
spec->codec_variant = ALC269_TYPE_ALC256;
@@ -9357,7 +9465,6 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0215:
case 0x10ec0245:
case 0x10ec0285:
- case 0x10ec0287:
case 0x10ec0289:
spec->codec_variant = ALC269_TYPE_ALC215;
spec->shutup = alc225_shutup;
@@ -9372,6 +9479,12 @@ static int patch_alc269(struct hda_codec *codec)
spec->init_hook = alc225_init;
spec->gen.mixer_nid = 0; /* no loopback on ALC225, ALC295 and ALC299 */
break;
+ case 0x10ec0287:
+ spec->codec_variant = ALC269_TYPE_ALC287;
+ spec->shutup = alc225_shutup;
+ spec->init_hook = alc225_init;
+ spec->gen.mixer_nid = 0; /* no loopback on ALC287 */
+ break;
case 0x10ec0234:
case 0x10ec0274:
case 0x10ec0294:
@@ -10632,6 +10745,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0230, "ALC236", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index a5c1a2c4eae4..773a136161f1 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -1041,6 +1041,7 @@ static const struct hda_fixup via_fixups[] = {
};
static const struct snd_pci_quirk vt2002p_fixups[] = {
+ SND_PCI_QUIRK(0x1043, 0x13f7, "Asus B23E", VIA_FIXUP_POWER_SAVE),
SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),
diff --git a/sound/pci/ice1712/delta.c b/sound/pci/ice1712/delta.c
index 1d2a0287284b..08adf4dd1303 100644
--- a/sound/pci/ice1712/delta.c
+++ b/sound/pci/ice1712/delta.c
@@ -676,13 +676,15 @@ static int snd_ice1712_delta_init(struct snd_ice1712 *ice)
case ICE1712_SUBDEVICE_DELTA1010LT:
case ICE1712_SUBDEVICE_VX442:
case ICE1712_SUBDEVICE_DELTA66E:
- if ((err = snd_i2c_bus_create(ice->card, "ICE1712 GPIO 1", NULL, &ice->i2c)) < 0) {
+ err = snd_i2c_bus_create(ice->card, "ICE1712 GPIO 1", NULL, &ice->i2c);
+ if (err < 0) {
dev_err(ice->card->dev, "unable to create I2C bus\n");
return err;
}
ice->i2c->private_data = ice;
ice->i2c->ops = &ap_cs8427_i2c_ops;
- if ((err = snd_ice1712_init_cs8427(ice, CS8427_BASE_ADDR)) < 0)
+ err = snd_ice1712_init_cs8427(ice, CS8427_BASE_ADDR);
+ if (err < 0)
return err;
break;
case ICE1712_SUBDEVICE_DELTA1010:
diff --git a/sound/pci/ice1712/ews.c b/sound/pci/ice1712/ews.c
index 3794308313bf..8bb86b3c894e 100644
--- a/sound/pci/ice1712/ews.c
+++ b/sound/pci/ice1712/ews.c
@@ -442,7 +442,8 @@ static int snd_ice1712_ews_init(struct snd_ice1712 *ice)
ice->spec = spec;
/* create i2c */
- if ((err = snd_i2c_bus_create(ice->card, "ICE1712 GPIO 1", NULL, &ice->i2c)) < 0) {
+ err = snd_i2c_bus_create(ice->card, "ICE1712 GPIO 1", NULL, &ice->i2c);
+ if (err < 0) {
dev_err(ice->card->dev, "unable to create I2C bus\n");
return err;
}
@@ -483,7 +484,8 @@ static int snd_ice1712_ews_init(struct snd_ice1712 *ice)
if (err < 0)
return err;
/* Check if the front module is connected */
- if ((err = snd_ice1712_ews88mt_chip_select(ice, 0x0f)) < 0)
+ err = snd_ice1712_ews88mt_chip_select(ice, 0x0f);
+ if (err < 0)
return err;
break;
case ICE1712_SUBDEVICE_EWS88D:
@@ -498,12 +500,14 @@ static int snd_ice1712_ews_init(struct snd_ice1712 *ice)
/* set up SPDIF interface */
switch (ice->eeprom.subvendor) {
case ICE1712_SUBDEVICE_EWX2496:
- if ((err = snd_ice1712_init_cs8427(ice, CS8427_BASE_ADDR)) < 0)
+ err = snd_ice1712_init_cs8427(ice, CS8427_BASE_ADDR);
+ if (err < 0)
return err;
snd_cs8427_reg_write(ice->cs8427, CS8427_REG_RECVERRMASK, CS8427_UNLOCK | CS8427_CONF | CS8427_BIP | CS8427_PAR);
break;
case ICE1712_SUBDEVICE_DMX6FIRE:
- if ((err = snd_ice1712_init_cs8427(ice, ICE1712_6FIRE_CS8427_ADDR)) < 0)
+ err = snd_ice1712_init_cs8427(ice, ICE1712_6FIRE_CS8427_ADDR);
+ if (err < 0)
return err;
snd_cs8427_reg_write(ice->cs8427, CS8427_REG_RECVERRMASK, CS8427_UNLOCK | CS8427_CONF | CS8427_BIP | CS8427_PAR);
break;
@@ -853,7 +857,8 @@ static int snd_ice1712_6fire_control_get(struct snd_kcontrol *kcontrol, struct s
int invert = (kcontrol->private_value >> 8) & 1;
int data;
- if ((data = snd_ice1712_6fire_read_pca(ice, PCF9554_REG_OUTPUT)) < 0)
+ data = snd_ice1712_6fire_read_pca(ice, PCF9554_REG_OUTPUT);
+ if (data < 0)
return data;
data = (data >> shift) & 1;
if (invert)
@@ -869,7 +874,8 @@ static int snd_ice1712_6fire_control_put(struct snd_kcontrol *kcontrol, struct s
int invert = (kcontrol->private_value >> 8) & 1;
int data, ndata;
- if ((data = snd_ice1712_6fire_read_pca(ice, PCF9554_REG_OUTPUT)) < 0)
+ data = snd_ice1712_6fire_read_pca(ice, PCF9554_REG_OUTPUT);
+ if (data < 0)
return data;
ndata = data & ~(1 << shift);
if (ucontrol->value.integer.value[0])
@@ -896,7 +902,8 @@ static int snd_ice1712_6fire_select_input_get(struct snd_kcontrol *kcontrol, str
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int data;
- if ((data = snd_ice1712_6fire_read_pca(ice, PCF9554_REG_OUTPUT)) < 0)
+ data = snd_ice1712_6fire_read_pca(ice, PCF9554_REG_OUTPUT);
+ if (data < 0)
return data;
ucontrol->value.integer.value[0] = data & 3;
return 0;
@@ -907,7 +914,8 @@ static int snd_ice1712_6fire_select_input_put(struct snd_kcontrol *kcontrol, str
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int data, ndata;
- if ((data = snd_ice1712_6fire_read_pca(ice, PCF9554_REG_OUTPUT)) < 0)
+ data = snd_ice1712_6fire_read_pca(ice, PCF9554_REG_OUTPUT);
+ if (data < 0)
return data;
ndata = data & ~3;
ndata |= (ucontrol->value.integer.value[0] & 3);
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 5b124c4ad572..df3ba5c70de9 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -79,7 +79,7 @@ enum { \
ICH_REG_##name##_PICB = base + 0x08, /* word - position in current buffer */ \
ICH_REG_##name##_PIV = base + 0x0a, /* byte - prefetched index value */ \
ICH_REG_##name##_CR = base + 0x0b, /* byte - control register */ \
-};
+}
/* busmaster blocks */
DEFINE_REGSET(OFF, 0); /* offset */
@@ -538,7 +538,8 @@ static unsigned short snd_intel8x0_codec_read(struct snd_ac97 *ac97,
res = 0xffff;
} else {
res = iagetword(chip, reg + ac97->num * 0x80);
- if ((tmp = igetdword(chip, ICHREG(GLOB_STA))) & ICH_RCS) {
+ tmp = igetdword(chip, ICHREG(GLOB_STA));
+ if (tmp & ICH_RCS) {
/* reset RCS and preserve other R/WC bits */
iputdword(chip, ICHREG(GLOB_STA), tmp &
~(chip->codec_ready_bits | ICH_GSCI));
@@ -559,7 +560,8 @@ static void snd_intel8x0_codec_read_test(struct intel8x0 *chip,
if (snd_intel8x0_codec_semaphore(chip, codec) >= 0) {
iagetword(chip, codec * 0x80);
- if ((tmp = igetdword(chip, ICHREG(GLOB_STA))) & ICH_RCS) {
+ tmp = igetdword(chip, ICHREG(GLOB_STA));
+ if (tmp & ICH_RCS) {
/* reset RCS and preserve other R/WC bits */
iputdword(chip, ICHREG(GLOB_STA), tmp &
~(chip->codec_ready_bits | ICH_GSCI));
@@ -692,7 +694,7 @@ static inline void snd_intel8x0_update(struct intel8x0 *chip, struct ichdev *ich
int status, civ, i, step;
int ack = 0;
- if (!ichdev->prepared || ichdev->suspended)
+ if (!(ichdev->prepared || chip->in_measurement) || ichdev->suspended)
return;
spin_lock_irqsave(&chip->reg_lock, flags);
@@ -1105,7 +1107,8 @@ static int snd_intel8x0_pcm_open(struct snd_pcm_substream *substream, struct ich
runtime->hw.buffer_bytes_max = 64*1024;
runtime->hw.period_bytes_max = 64*1024;
}
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
runtime->private_data = ichdev;
return 0;
@@ -2190,7 +2193,8 @@ static int snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock,
udelay(1);
}
}
- if ((err = snd_ac97_bus(chip->card, 0, ops, chip, &pbus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, ops, chip, &pbus);
+ if (err < 0)
goto __err;
pbus->private_free = snd_intel8x0_mixer_free_ac97_bus;
if (ac97_clock >= 8000 && ac97_clock <= 48000)
@@ -2206,7 +2210,8 @@ static int snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock,
ac97.pci = chip->pci;
for (i = 0; i < codecs; i++) {
ac97.num = i;
- if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i])) < 0) {
+ err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i]);
+ if (err < 0) {
if (err != -EACCES)
dev_err(chip->card->dev,
"Unable to initialize codec #%d\n", i);
@@ -2491,11 +2496,13 @@ static int snd_intel8x0_chip_init(struct intel8x0 *chip, int probing)
int err;
if (chip->device_type != DEVICE_ALI) {
- if ((err = snd_intel8x0_ich_chip_init(chip, probing)) < 0)
+ err = snd_intel8x0_ich_chip_init(chip, probing);
+ if (err < 0)
return err;
iagetword(chip, 0); /* clear semaphore flag */
} else {
- if ((err = snd_intel8x0_ali_chip_init(chip, probing)) < 0)
+ err = snd_intel8x0_ali_chip_init(chip, probing);
+ if (err < 0)
return err;
}
@@ -2918,7 +2925,8 @@ static int snd_intel8x0_create(struct snd_card *card,
*r_intel8x0 = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
@@ -2950,7 +2958,8 @@ static int snd_intel8x0_create(struct snd_card *card,
pci->device == PCI_DEVICE_ID_INTEL_440MX)
chip->fix_nocache = 1; /* enable workaround */
- if ((err = pci_request_regions(pci, card->shortname)) < 0) {
+ err = pci_request_regions(pci, card->shortname);
+ if (err < 0) {
kfree(chip);
pci_disable_device(pci);
return err;
@@ -3064,7 +3073,8 @@ static int snd_intel8x0_create(struct snd_card *card,
for (i = 0; i < chip->max_codecs; i++)
chip->codec_isr_bits |= chip->codec_bit[i];
- if ((err = snd_intel8x0_chip_init(chip, 1)) < 0) {
+ err = snd_intel8x0_chip_init(chip, 1);
+ if (err < 0) {
snd_intel8x0_free(chip);
return err;
}
@@ -3079,7 +3089,8 @@ static int snd_intel8x0_create(struct snd_card *card,
chip->irq = pci->irq;
card->sync_irq = chip->irq;
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_intel8x0_free(chip);
return err;
}
@@ -3186,18 +3197,20 @@ static int snd_intel8x0_probe(struct pci_dev *pci,
buggy_irq = 0;
}
- if ((err = snd_intel8x0_create(card, pci, pci_id->driver_data,
- &chip)) < 0) {
+ err = snd_intel8x0_create(card, pci, pci_id->driver_data, &chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
card->private_data = chip;
- if ((err = snd_intel8x0_mixer(chip, ac97_clock, ac97_quirk)) < 0) {
+ err = snd_intel8x0_mixer(chip, ac97_clock, ac97_quirk);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_intel8x0_pcm(chip)) < 0) {
+ err = snd_intel8x0_pcm(chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -3217,7 +3230,8 @@ static int snd_intel8x0_probe(struct pci_dev *pci,
}
}
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 13ef838b26c1..b96fce6cbd83 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -57,7 +57,7 @@ enum { \
ICH_REG_##name##_PICB = base + 0x08, /* word - position in current buffer */ \
ICH_REG_##name##_PIV = base + 0x0a, /* byte - prefetched index value */ \
ICH_REG_##name##_CR = base + 0x0b, /* byte - control register */ \
-};
+}
/* busmaster blocks */
DEFINE_REGSET(OFF, 0); /* offset */
@@ -342,7 +342,8 @@ static unsigned short snd_intel8x0m_codec_read(struct snd_ac97 *ac97,
res = 0xffff;
} else {
res = iagetword(chip, reg + ac97->num * 0x80);
- if ((tmp = igetdword(chip, ICHREG(GLOB_STA))) & ICH_RCS) {
+ tmp = igetdword(chip, ICHREG(GLOB_STA));
+ if (tmp & ICH_RCS) {
/* reset RCS and preserve other R/WC bits */
iputdword(chip, ICHREG(GLOB_STA),
tmp & ~(ICH_SRI|ICH_PRI|ICH_TRI|ICH_GSCI));
@@ -800,7 +801,8 @@ static int snd_intel8x0m_mixer(struct intel8x0m *chip, int ac97_clock)
glob_sta = igetdword(chip, ICHREG(GLOB_STA));
- if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus);
+ if (err < 0)
goto __err;
pbus->private_free = snd_intel8x0m_mixer_free_ac97_bus;
if (ac97_clock >= 8000 && ac97_clock <= 48000)
@@ -809,7 +811,8 @@ static int snd_intel8x0m_mixer(struct intel8x0m *chip, int ac97_clock)
ac97.pci = chip->pci;
ac97.num = glob_sta & ICH_SCR ? 1 : 0;
- if ((err = snd_ac97_mixer(pbus, &ac97, &x97)) < 0) {
+ err = snd_ac97_mixer(pbus, &ac97, &x97);
+ if (err < 0) {
dev_err(chip->card->dev,
"Unable to initialize codec #%d\n", ac97.num);
if (ac97.num == 0)
@@ -927,7 +930,8 @@ static int snd_intel8x0m_chip_init(struct intel8x0m *chip, int probing)
unsigned int i;
int err;
- if ((err = snd_intel8x0m_ich_chip_init(chip, probing)) < 0)
+ err = snd_intel8x0m_ich_chip_init(chip, probing);
+ if (err < 0)
return err;
iagetword(chip, 0); /* clear semaphore flag */
@@ -1075,7 +1079,8 @@ static int snd_intel8x0m_create(struct snd_card *card,
*r_intel8x0m = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
@@ -1089,7 +1094,8 @@ static int snd_intel8x0m_create(struct snd_card *card,
chip->pci = pci;
chip->irq = -1;
- if ((err = pci_request_regions(pci, card->shortname)) < 0) {
+ err = pci_request_regions(pci, card->shortname);
+ if (err < 0) {
kfree(chip);
pci_disable_device(pci);
return err;
@@ -1167,7 +1173,8 @@ port_inited:
pci_set_master(pci);
- if ((err = snd_intel8x0m_chip_init(chip, 1)) < 0) {
+ err = snd_intel8x0m_chip_init(chip, 1);
+ if (err < 0) {
snd_intel8x0m_free(chip);
return err;
}
@@ -1181,7 +1188,8 @@ port_inited:
chip->irq = pci->irq;
card->sync_irq = chip->irq;
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_intel8x0m_free(chip);
return err;
}
@@ -1238,17 +1246,20 @@ static int snd_intel8x0m_probe(struct pci_dev *pci,
}
strcat(card->shortname," Modem");
- if ((err = snd_intel8x0m_create(card, pci, pci_id->driver_data, &chip)) < 0) {
+ err = snd_intel8x0m_create(card, pci, pci_id->driver_data, &chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
card->private_data = chip;
- if ((err = snd_intel8x0m_mixer(chip, ac97_clock)) < 0) {
+ err = snd_intel8x0m_mixer(chip, ac97_clock);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_intel8x0m_pcm(chip)) < 0) {
+ err = snd_intel8x0m_pcm(chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -1258,7 +1269,8 @@ static int snd_intel8x0m_probe(struct pci_dev *pci,
sprintf(card->longname, "%s at irq %i",
card->shortname, chip->irq);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 80ac3c6152ad..030e01b062e4 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -1527,7 +1527,8 @@ static int snd_korg1212_hw_params(struct snd_pcm_substream *substream,
return 0;
}
- if ((err = snd_korg1212_SetRate(korg1212, params_rate(params))) < 0) {
+ err = snd_korg1212_SetRate(korg1212, params_rate(params));
+ if (err < 0) {
spin_unlock_irqrestore(&korg1212->lock, flags);
return err;
}
@@ -2159,7 +2160,8 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
};
* rchip = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
korg1212 = kzalloc(sizeof(*korg1212), GFP_KERNEL);
@@ -2196,7 +2198,8 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
for (i=0; i<kAudioChannels; i++)
korg1212->volumePhase[i] = 0;
- if ((err = pci_request_regions(pci, "korg1212")) < 0) {
+ err = pci_request_regions(pci, "korg1212");
+ if (err < 0) {
kfree(korg1212);
pci_disable_device(pci);
return err;
@@ -2220,7 +2223,8 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
korg1212->iomem2, iomem2_size,
stateName[korg1212->cardState]);
- if ((korg1212->iobase = ioremap(korg1212->iomem, iomem_size)) == NULL) {
+ korg1212->iobase = ioremap(korg1212->iomem, iomem_size);
+ if (!korg1212->iobase) {
snd_printk(KERN_ERR "korg1212: unable to remap memory region 0x%lx-0x%lx\n", korg1212->iomem,
korg1212->iomem + iomem_size - 1);
snd_korg1212_free(korg1212);
@@ -2360,7 +2364,8 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
if (rc)
K1212_DEBUG_PRINTK("K1212_DEBUG: Reboot Card - RC = %d [%s]\n", rc, stateName[korg1212->cardState]);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, korg1212, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, korg1212, &ops);
+ if (err < 0) {
snd_korg1212_free(korg1212);
return err;
}
@@ -2385,7 +2390,8 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
korg1212->RoutingTablePhy, LowerWordSwap(korg1212->RoutingTablePhy),
korg1212->AdatTimeCodePhy, LowerWordSwap(korg1212->AdatTimeCodePhy));
- if ((err = snd_pcm_new(korg1212->card, "korg1212", 0, 1, 1, &korg1212->pcm)) < 0)
+ err = snd_pcm_new(korg1212->card, "korg1212", 0, 1, 1, &korg1212->pcm);
+ if (err < 0)
return err;
korg1212->pcm->private_data = korg1212;
@@ -2436,7 +2442,8 @@ snd_korg1212_probe(struct pci_dev *pci,
if (err < 0)
return err;
- if ((err = snd_korg1212_create(card, pci, &korg1212)) < 0) {
+ err = snd_korg1212_create(card, pci, &korg1212);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -2448,7 +2455,8 @@ snd_korg1212_probe(struct pci_dev *pci,
K1212_DEBUG_PRINTK("K1212_DEBUG: %s\n", card->longname);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index f884f5a6a61c..d3f58a3d17fb 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -674,10 +674,6 @@ int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
u32 channels = runtime->channels;
- if (runtime->channels != channels)
- dev_err(chip->card->dev, "channel count mismatch: %d vs %d",
- runtime->channels, channels);
-
mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index cdc4b6106252..77a484bc8c0d 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -1765,7 +1765,8 @@ snd_m3_playback_open(struct snd_pcm_substream *subs)
struct snd_pcm_runtime *runtime = subs->runtime;
int err;
- if ((err = snd_m3_substream_open(chip, subs)) < 0)
+ err = snd_m3_substream_open(chip, subs);
+ if (err < 0)
return err;
runtime->hw = snd_m3_playback;
@@ -1789,7 +1790,8 @@ snd_m3_capture_open(struct snd_pcm_substream *subs)
struct snd_pcm_runtime *runtime = subs->runtime;
int err;
- if ((err = snd_m3_substream_open(chip, subs)) < 0)
+ err = snd_m3_substream_open(chip, subs);
+ if (err < 0)
return err;
runtime->hw = snd_m3_capture;
@@ -2036,12 +2038,14 @@ static int snd_m3_mixer(struct snd_m3 *chip)
.read = snd_m3_ac97_read,
};
- if ((err = snd_ac97_bus(chip->card, 0, &ops, NULL, &pbus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, NULL, &pbus);
+ if (err < 0)
return err;
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = chip;
- if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97)) < 0)
+ err = snd_ac97_mixer(pbus, &ac97, &chip->ac97);
+ if (err < 0)
return err;
/* seems ac97 PCM needs initialization.. hack hack.. */
@@ -2642,16 +2646,19 @@ snd_m3_create(struct snd_card *card, struct pci_dev *pci,
if (err < 0)
goto free_chip;
- if ((err = snd_m3_mixer(chip)) < 0)
+ err = snd_m3_mixer(chip);
+ if (err < 0)
return err;
for (i = 0; i < chip->num_substreams; i++) {
struct m3_dma *s = &chip->substreams[i];
- if ((err = snd_m3_assp_client_init(chip, s, i)) < 0)
+ err = snd_m3_assp_client_init(chip, s, i);
+ if (err < 0)
return err;
}
- if ((err = snd_m3_pcm(chip, 0)) < 0)
+ err = snd_m3_pcm(chip, 0);
+ if (err < 0)
return err;
#ifdef CONFIG_SND_MAESTRO3_INPUT
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index a0bbb386dc25..1b078b789604 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -954,9 +954,10 @@ static int snd_mixart_pcm_analog(struct snd_mixart *chip)
char name[32];
sprintf(name, "miXart analog %d", chip->chip_idx);
- if ((err = snd_pcm_new(chip->card, name, MIXART_PCM_ANALOG,
- MIXART_PLAYBACK_STREAMS,
- MIXART_CAPTURE_STREAMS, &pcm)) < 0) {
+ err = snd_pcm_new(chip->card, name, MIXART_PCM_ANALOG,
+ MIXART_PLAYBACK_STREAMS,
+ MIXART_CAPTURE_STREAMS, &pcm);
+ if (err < 0) {
dev_err(chip->card->dev,
"cannot create the analog pcm %d\n", chip->chip_idx);
return err;
@@ -987,9 +988,10 @@ static int snd_mixart_pcm_digital(struct snd_mixart *chip)
char name[32];
sprintf(name, "miXart AES/EBU %d", chip->chip_idx);
- if ((err = snd_pcm_new(chip->card, name, MIXART_PCM_DIGITAL,
- MIXART_PLAYBACK_STREAMS,
- MIXART_CAPTURE_STREAMS, &pcm)) < 0) {
+ err = snd_pcm_new(chip->card, name, MIXART_PCM_DIGITAL,
+ MIXART_PLAYBACK_STREAMS,
+ MIXART_CAPTURE_STREAMS, &pcm);
+ if (err < 0) {
dev_err(chip->card->dev,
"cannot create the digital pcm %d\n", chip->chip_idx);
return err;
@@ -1042,7 +1044,8 @@ static int snd_mixart_create(struct mixart_mgr *mgr, struct snd_card *card, int
chip->mgr = mgr;
card->sync_irq = mgr->irq;
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_mixart_chip_free(chip);
return err;
}
@@ -1243,7 +1246,8 @@ static int snd_mixart_probe(struct pci_dev *pci,
}
/* enable PCI device */
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
pci_set_master(pci);
@@ -1267,7 +1271,8 @@ static int snd_mixart_probe(struct pci_dev *pci,
mgr->irq = -1;
/* resource assignment */
- if ((err = pci_request_regions(pci, CARD_NAME)) < 0) {
+ err = pci_request_regions(pci, CARD_NAME);
+ if (err < 0) {
kfree(mgr);
pci_disable_device(pci);
return err;
@@ -1332,7 +1337,8 @@ static int snd_mixart_probe(struct pci_dev *pci,
"Digigram miXart at 0x%lx & 0x%lx, irq %i [PCM #%d]",
mgr->mem[0].phys, mgr->mem[1].phys, mgr->irq, i);
- if ((err = snd_mixart_create(mgr, card, i)) < 0) {
+ err = snd_mixart_create(mgr, card, i);
+ if (err < 0) {
snd_card_free(card);
snd_mixart_free(mgr);
return err;
@@ -1343,7 +1349,8 @@ static int snd_mixart_probe(struct pci_dev *pci,
snd_mixart_proc_init(mgr->chip[i]);
}
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_mixart_free(mgr);
return err;
}
diff --git a/sound/pci/mixart/mixart_hwdep.c b/sound/pci/mixart/mixart_hwdep.c
index f579f7698bba..689c0f995a9c 100644
--- a/sound/pci/mixart/mixart_hwdep.c
+++ b/sound/pci/mixart/mixart_hwdep.c
@@ -306,9 +306,13 @@ static int mixart_first_init(struct mixart_mgr *mgr)
int err;
struct mixart_msg request;
- if((err = mixart_enum_connectors(mgr)) < 0) return err;
+ err = mixart_enum_connectors(mgr);
+ if (err < 0)
+ return err;
- if((err = mixart_enum_physio(mgr)) < 0) return err;
+ err = mixart_enum_physio(mgr);
+ if (err < 0)
+ return err;
/* send a synchro command to card (necessary to do this before first MSG_STREAM_START_STREAM_GRP_PACKET) */
/* though why not here */
@@ -528,15 +532,18 @@ static int mixart_dsp_load(struct mixart_mgr* mgr, int index, const struct firmw
for (card_index = 0; card_index < mgr->num_cards; card_index++) {
struct snd_mixart *chip = mgr->chip[card_index];
- if ((err = snd_mixart_create_pcm(chip)) < 0)
+ err = snd_mixart_create_pcm(chip);
+ if (err < 0)
return err;
if (card_index == 0) {
- if ((err = snd_mixart_create_mixer(chip->mgr)) < 0)
+ err = snd_mixart_create_mixer(chip->mgr);
+ if (err < 0)
return err;
}
- if ((err = snd_card_register(chip->card)) < 0)
+ err = snd_card_register(chip->card);
+ if (err < 0)
return err;
}
diff --git a/sound/pci/mixart/mixart_mixer.c b/sound/pci/mixart/mixart_mixer.c
index d2e7c3381267..2727f3345795 100644
--- a/sound/pci/mixart/mixart_mixer.c
+++ b/sound/pci/mixart/mixart_mixer.c
@@ -1114,10 +1114,12 @@ int snd_mixart_create_mixer(struct mixart_mgr *mgr)
temp = mixart_control_analog_level;
temp.name = "Master Playback Volume";
temp.private_value = 0; /* playback */
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
/* output mute controls */
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&mixart_control_output_switch, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&mixart_control_output_switch, chip));
+ if (err < 0)
return err;
/* analog input level control only on first two chips !*/
@@ -1125,7 +1127,8 @@ int snd_mixart_create_mixer(struct mixart_mgr *mgr)
temp = mixart_control_analog_level;
temp.name = "Master Capture Volume";
temp.private_value = 1; /* capture */
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
}
@@ -1133,45 +1136,53 @@ int snd_mixart_create_mixer(struct mixart_mgr *mgr)
temp.name = "PCM Playback Volume";
temp.count = MIXART_PLAYBACK_STREAMS;
temp.private_value = 0; /* playback analog */
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
temp.name = "PCM Capture Volume";
temp.count = 1;
temp.private_value = MIXART_VOL_REC_MASK; /* capture analog */
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
if(mgr->board_type == MIXART_DAUGHTER_TYPE_AES) {
temp.name = "AES Playback Volume";
temp.count = MIXART_PLAYBACK_STREAMS;
temp.private_value = MIXART_VOL_AES_MASK; /* playback AES/EBU */
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
temp.name = "AES Capture Volume";
temp.count = 0;
temp.private_value = MIXART_VOL_REC_MASK | MIXART_VOL_AES_MASK; /* capture AES/EBU */
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
}
temp = mixart_control_pcm_switch;
temp.name = "PCM Playback Switch";
temp.private_value = 0; /* playback analog */
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
if(mgr->board_type == MIXART_DAUGHTER_TYPE_AES) {
temp.name = "AES Playback Switch";
temp.private_value = MIXART_VOL_AES_MASK; /* playback AES/EBU */
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&temp, chip));
+ if (err < 0)
return err;
}
/* monitoring */
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&mixart_control_monitor_vol, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&mixart_control_monitor_vol, chip));
+ if (err < 0)
return err;
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&mixart_control_monitor_sw, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&mixart_control_monitor_sw, chip));
+ if (err < 0)
return err;
/* init all mixer data and program the master volumes/switches */
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index 6cb689aa28c2..12d02d7d3b51 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -1318,7 +1318,8 @@ snd_nm256_mixer(struct nm256 *chip)
if (! chip->ac97_regs)
return -ENOMEM;
- if ((err = snd_ac97_bus(chip->card, 0, &ops, NULL, &pbus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, NULL, &pbus);
+ if (err < 0)
return err;
memset(&ac97, 0, sizeof(ac97));
@@ -1476,7 +1477,8 @@ snd_nm256_create(struct snd_card *card, struct pci_dev *pci,
*chip_ret = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
@@ -1568,7 +1570,8 @@ snd_nm256_create(struct snd_card *card, struct pci_dev *pci,
chip->buffer_end = buffer_top;
else {
/* get buffer end pointer from signature */
- if ((err = snd_nm256_peek_for_sig(chip)) < 0)
+ err = snd_nm256_peek_for_sig(chip);
+ if (err < 0)
goto __error;
}
@@ -1618,7 +1621,8 @@ snd_nm256_create(struct snd_card *card, struct pci_dev *pci,
// pci_set_master(pci); /* needed? */
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0)
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0)
goto __error;
*chip_ret = chip;
@@ -1700,7 +1704,8 @@ static int snd_nm256_probe(struct pci_dev *pci,
capture_bufsize = 4;
if (capture_bufsize > 128)
capture_bufsize = 128;
- if ((err = snd_nm256_create(card, pci, &chip)) < 0) {
+ err = snd_nm256_create(card, pci, &chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -1716,8 +1721,13 @@ static int snd_nm256_probe(struct pci_dev *pci,
chip->reset_workaround_2 = 1;
}
- if ((err = snd_nm256_pcm(chip, 0)) < 0 ||
- (err = snd_nm256_mixer(chip)) < 0) {
+ err = snd_nm256_pcm(chip, 0);
+ if (err < 0) {
+ snd_card_free(card);
+ return err;
+ }
+ err = snd_nm256_mixer(chip);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -1727,7 +1737,8 @@ static int snd_nm256_probe(struct pci_dev *pci,
card->shortname,
chip->buffer_addr, chip->cport_addr, chip->irq);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/pcxhr/pcxhr.c b/sound/pci/pcxhr/pcxhr.c
index 751f9744b089..96b9371c201a 100644
--- a/sound/pci/pcxhr/pcxhr.c
+++ b/sound/pci/pcxhr/pcxhr.c
@@ -1134,9 +1134,10 @@ int pcxhr_create_pcm(struct snd_pcxhr *chip)
char name[32];
snprintf(name, sizeof(name), "pcxhr %d", chip->chip_idx);
- if ((err = snd_pcm_new(chip->card, name, 0,
- chip->nb_streams_play,
- chip->nb_streams_capt, &pcm)) < 0) {
+ err = snd_pcm_new(chip->card, name, 0,
+ chip->nb_streams_play,
+ chip->nb_streams_capt, &pcm);
+ if (err < 0) {
dev_err(chip->card->dev, "cannot create pcm %s\n", name);
return err;
}
@@ -1202,7 +1203,8 @@ static int pcxhr_create(struct pcxhr_mgr *mgr,
chip->nb_streams_capt = 1; /* or 1 stereo stream */
}
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
pcxhr_chip_free(chip);
return err;
}
@@ -1492,7 +1494,8 @@ static int pcxhr_probe(struct pci_dev *pci,
}
/* enable PCI device */
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
pci_set_master(pci);
@@ -1537,7 +1540,8 @@ static int pcxhr_probe(struct pci_dev *pci,
mgr->granularity = PCXHR_GRANULARITY;
/* resource assignment */
- if ((err = pci_request_regions(pci, card_name)) < 0) {
+ err = pci_request_regions(pci, card_name);
+ if (err < 0) {
kfree(mgr);
pci_disable_device(pci);
return err;
@@ -1608,7 +1612,8 @@ static int pcxhr_probe(struct pci_dev *pci,
snprintf(card->longname, sizeof(card->longname),
"%s [PCM #%d]", mgr->name, i);
- if ((err = pcxhr_create(mgr, card, i)) < 0) {
+ err = pcxhr_create(mgr, card, i);
+ if (err < 0) {
snd_card_free(card);
pcxhr_free(mgr);
return err;
@@ -1618,7 +1623,8 @@ static int pcxhr_probe(struct pci_dev *pci,
/* init proc interface only for chip0 */
pcxhr_proc_init(mgr->chip[i]);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
pcxhr_free(mgr);
return err;
}
diff --git a/sound/pci/pcxhr/pcxhr_hwdep.c b/sound/pci/pcxhr/pcxhr_hwdep.c
index 2258bd698844..249805065f61 100644
--- a/sound/pci/pcxhr/pcxhr_hwdep.c
+++ b/sound/pci/pcxhr/pcxhr_hwdep.c
@@ -322,14 +322,17 @@ static int pcxhr_dsp_load(struct pcxhr_mgr *mgr, int index,
for (card_index = 0; card_index < mgr->num_cards; card_index++) {
struct snd_pcxhr *chip = mgr->chip[card_index];
- if ((err = pcxhr_create_pcm(chip)) < 0)
+ err = pcxhr_create_pcm(chip);
+ if (err < 0)
return err;
if (card_index == 0) {
- if ((err = pcxhr_create_mixer(chip->mgr)) < 0)
+ err = pcxhr_create_mixer(chip->mgr);
+ if (err < 0)
return err;
}
- if ((err = snd_card_register(chip->card)) < 0)
+ err = snd_card_register(chip->card);
+ if (err < 0)
return err;
}
err = pcxhr_start_pipes(mgr);
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index 56827db97239..709a1a2cde20 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1087,9 +1087,15 @@ static irqreturn_t riptide_handleirq(int irq, void *dev_id)
substream[i] = chip->playback_substream[i];
substream[i] = chip->capture_substream;
for (i = 0; i < PLAYBACK_SUBSTREAMS + 1; i++) {
- if (substream[i] &&
- (runtime = substream[i]->runtime) &&
- (data = runtime->private_data) && data->state != ST_STOP) {
+ if (!substream[i])
+ continue;
+ runtime = substream[i]->runtime;
+ if (!runtime)
+ continue;
+ data = runtime->private_data;
+ if (!data)
+ continue;
+ if (data->state != ST_STOP) {
pos = 0;
for (j = 0; j < data->pages; j++) {
c = &data->sgdbuf[j];
@@ -1549,10 +1555,10 @@ snd_riptide_hw_params(struct snd_pcm_substream *substream,
(int)sgdlist->bytes);
if (sgdlist->area)
snd_dma_free_pages(sgdlist);
- if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- &chip->pci->dev,
- sizeof(struct sgd) * (DESC_MAX_MASK + 1),
- sgdlist)) < 0) {
+ err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
+ sizeof(struct sgd) * (DESC_MAX_MASK + 1),
+ sgdlist);
+ if (err < 0) {
snd_printk(KERN_ERR "Riptide: failed to alloc %d dma bytes\n",
(int)sizeof(struct sgd) * (DESC_MAX_MASK + 1));
return err;
@@ -1677,9 +1683,9 @@ static int snd_riptide_pcm(struct snd_riptide *chip, int device)
struct snd_pcm *pcm;
int err;
- if ((err =
- snd_pcm_new(chip->card, "RIPTIDE", device, PLAYBACK_SUBSTREAMS, 1,
- &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "RIPTIDE", device, PLAYBACK_SUBSTREAMS, 1,
+ &pcm);
+ if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&snd_riptide_playback_ops);
@@ -1766,14 +1772,16 @@ static int snd_riptide_initialize(struct snd_riptide *chip)
cif = chip->cif;
if (!cif) {
- if ((cif = kzalloc(sizeof(struct cmdif), GFP_KERNEL)) == NULL)
+ cif = kzalloc(sizeof(struct cmdif), GFP_KERNEL);
+ if (!cif)
return -ENOMEM;
cif->hwport = (struct riptideport *)chip->port;
spin_lock_init(&cif->lock);
chip->cif = cif;
}
cif->is_reset = 0;
- if ((err = riptide_reset(cif, chip)) != 0)
+ err = riptide_reset(cif, chip);
+ if (err)
return err;
device_id = chip->device_id;
switch (device_id) {
@@ -1797,7 +1805,8 @@ static int snd_riptide_free(struct snd_riptide *chip)
if (!chip)
return 0;
- if ((cif = chip->cif)) {
+ cif = chip->cif;
+ if (cif) {
SET_GRESET(cif->hwport);
udelay(100);
UNSET_GRESET(cif->hwport);
@@ -1830,9 +1839,11 @@ snd_riptide_create(struct snd_card *card, struct pci_dev *pci,
};
*rchip = NULL;
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
- if (!(chip = kzalloc(sizeof(struct snd_riptide), GFP_KERNEL)))
+ chip = kzalloc(sizeof(struct snd_riptide), GFP_KERNEL);
+ if (!chip)
return -ENOMEM;
spin_lock_init(&chip->lock);
@@ -1845,8 +1856,8 @@ snd_riptide_create(struct snd_card *card, struct pci_dev *pci,
chip->handled_irqs = 0;
chip->cif = NULL;
- if ((chip->res_port =
- request_region(chip->port, 64, "RIPTIDE")) == NULL) {
+ chip->res_port = request_region(chip->port, 64, "RIPTIDE");
+ if (!chip->res_port) {
snd_printk(KERN_ERR
"Riptide: unable to grab region 0x%lx-0x%lx\n",
chip->port, chip->port + 64 - 1);
@@ -1868,12 +1879,14 @@ snd_riptide_create(struct snd_card *card, struct pci_dev *pci,
card->sync_irq = chip->irq;
chip->device_id = pci->device;
pci_set_master(pci);
- if ((err = snd_riptide_initialize(chip)) < 0) {
+ err = snd_riptide_initialize(chip);
+ if (err < 0) {
snd_riptide_free(chip);
return err;
}
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_riptide_free(chip);
return err;
}
@@ -1903,7 +1916,8 @@ snd_riptide_proc_read(struct snd_info_entry *entry,
for (i = 0; i < 64; i += 4)
snd_iprintf(buffer, "%c%02x: %08x",
(i % 16) ? ' ' : '\n', i, inl(chip->port + i));
- if ((cif = chip->cif)) {
+ cif = chip->cif;
+ if (cif) {
snd_iprintf(buffer,
"\nVersion: ASIC: %d CODEC: %d AUXDSP: %d PROG: %d",
chip->firmware.firmware.ASIC,
@@ -1922,10 +1936,11 @@ snd_riptide_proc_read(struct snd_info_entry *entry,
}
snd_iprintf(buffer, "\nOpen streams %d:\n", chip->openstreams);
for (i = 0; i < PLAYBACK_SUBSTREAMS; i++) {
- if (chip->playback_substream[i]
- && chip->playback_substream[i]->runtime
- && (data =
- chip->playback_substream[i]->runtime->private_data)) {
+ if (!chip->playback_substream[i] ||
+ !chip->playback_substream[i]->runtime)
+ continue;
+ data = chip->playback_substream[i]->runtime->private_data;
+ if (data) {
snd_iprintf(buffer,
"stream: %d mixer: %d source: %d (%d,%d)\n",
data->id, data->mixer, data->source,
@@ -1934,15 +1949,16 @@ snd_riptide_proc_read(struct snd_info_entry *entry,
snd_iprintf(buffer, "rate: %d\n", rate);
}
}
- if (chip->capture_substream
- && chip->capture_substream->runtime
- && (data = chip->capture_substream->runtime->private_data)) {
- snd_iprintf(buffer,
- "stream: %d mixer: %d source: %d (%d,%d)\n",
- data->id, data->mixer,
- data->source, data->intdec[0], data->intdec[1]);
- if (!(getsamplerate(cif, data->intdec, &rate)))
- snd_iprintf(buffer, "rate: %d\n", rate);
+ if (chip->capture_substream && chip->capture_substream->runtime) {
+ data = chip->capture_substream->runtime->private_data;
+ if (data) {
+ snd_iprintf(buffer,
+ "stream: %d mixer: %d source: %d (%d,%d)\n",
+ data->id, data->mixer,
+ data->source, data->intdec[0], data->intdec[1]);
+ if (!(getsamplerate(cif, data->intdec, &rate)))
+ snd_iprintf(buffer, "rate: %d\n", rate);
+ }
}
snd_iprintf(buffer, "Paths:\n");
i = getpaths(cif, p);
@@ -1973,12 +1989,14 @@ static int snd_riptide_mixer(struct snd_riptide *chip)
ac97.private_data = chip;
ac97.scaps = AC97_SCAP_SKIP_MODEM;
- if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus);
+ if (err < 0)
return err;
chip->ac97_bus = pbus;
ac97.pci = chip->pci;
- if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97)) < 0)
+ err = snd_ac97_mixer(pbus, &ac97, &chip->ac97);
+ if (err < 0)
return err;
return err;
}
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 54f3e39f97f5..b5b357853c94 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -668,18 +668,24 @@ snd_rme32_playback_hw_params(struct snd_pcm_substream *substream,
}
spin_lock_irq(&rme32->lock);
- if ((rme32->rcreg & RME32_RCR_KMODE) &&
- (rate = snd_rme32_capture_getrate(rme32, &dummy)) > 0) {
+ rate = 0;
+ if (rme32->rcreg & RME32_RCR_KMODE)
+ rate = snd_rme32_capture_getrate(rme32, &dummy);
+ if (rate > 0) {
/* AutoSync */
if ((int)params_rate(params) != rate) {
spin_unlock_irq(&rme32->lock);
return -EIO;
}
- } else if ((err = snd_rme32_playback_setrate(rme32, params_rate(params))) < 0) {
- spin_unlock_irq(&rme32->lock);
- return err;
+ } else {
+ err = snd_rme32_playback_setrate(rme32, params_rate(params));
+ if (err < 0) {
+ spin_unlock_irq(&rme32->lock);
+ return err;
+ }
}
- if ((err = snd_rme32_setformat(rme32, params_format(params))) < 0) {
+ err = snd_rme32_setformat(rme32, params_format(params));
+ if (err < 0) {
spin_unlock_irq(&rme32->lock);
return err;
}
@@ -723,15 +729,18 @@ snd_rme32_capture_hw_params(struct snd_pcm_substream *substream,
rme32->wcreg |= RME32_WCR_AUTOSYNC;
writel(rme32->wcreg, rme32->iobase + RME32_IO_CONTROL_REGISTER);
- if ((err = snd_rme32_setformat(rme32, params_format(params))) < 0) {
+ err = snd_rme32_setformat(rme32, params_format(params));
+ if (err < 0) {
spin_unlock_irq(&rme32->lock);
return err;
}
- if ((err = snd_rme32_playback_setrate(rme32, params_rate(params))) < 0) {
+ err = snd_rme32_playback_setrate(rme32, params_rate(params));
+ if (err < 0) {
spin_unlock_irq(&rme32->lock);
return err;
}
- if ((rate = snd_rme32_capture_getrate(rme32, &isadat)) > 0) {
+ rate = snd_rme32_capture_getrate(rme32, &isadat);
+ if (rate > 0) {
if ((int)params_rate(params) != rate) {
spin_unlock_irq(&rme32->lock);
return -EIO;
@@ -854,8 +863,10 @@ static int snd_rme32_playback_spdif_open(struct snd_pcm_substream *substream)
runtime->hw.rates |= SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000;
runtime->hw.rate_max = 96000;
}
- if ((rme32->rcreg & RME32_RCR_KMODE) &&
- (rate = snd_rme32_capture_getrate(rme32, &dummy)) > 0) {
+ rate = 0;
+ if (rme32->rcreg & RME32_RCR_KMODE)
+ rate = snd_rme32_capture_getrate(rme32, &dummy);
+ if (rate > 0) {
/* AutoSync */
runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate);
runtime->hw.rate_min = rate;
@@ -895,7 +906,8 @@ static int snd_rme32_capture_spdif_open(struct snd_pcm_substream *substream)
runtime->hw.rates |= SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000;
runtime->hw.rate_max = 96000;
}
- if ((rate = snd_rme32_capture_getrate(rme32, &isadat)) > 0) {
+ rate = snd_rme32_capture_getrate(rme32, &isadat);
+ if (rate > 0) {
if (isadat) {
return -EIO;
}
@@ -932,8 +944,10 @@ snd_rme32_playback_adat_open(struct snd_pcm_substream *substream)
runtime->hw = snd_rme32_adat_fd_info;
else
runtime->hw = snd_rme32_adat_info;
- if ((rme32->rcreg & RME32_RCR_KMODE) &&
- (rate = snd_rme32_capture_getrate(rme32, &dummy)) > 0) {
+ rate = 0;
+ if (rme32->rcreg & RME32_RCR_KMODE)
+ rate = snd_rme32_capture_getrate(rme32, &dummy);
+ if (rate > 0) {
/* AutoSync */
runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate);
runtime->hw.rate_min = rate;
@@ -955,7 +969,8 @@ snd_rme32_capture_adat_open(struct snd_pcm_substream *substream)
runtime->hw = snd_rme32_adat_fd_info;
else
runtime->hw = snd_rme32_adat_info;
- if ((rate = snd_rme32_capture_getrate(rme32, &isadat)) > 0) {
+ rate = snd_rme32_capture_getrate(rme32, &isadat);
+ if (rate > 0) {
if (!isadat) {
return -EIO;
}
@@ -1307,10 +1322,12 @@ static int snd_rme32_create(struct rme32 *rme32)
rme32->irq = -1;
spin_lock_init(&rme32->lock);
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
- if ((err = pci_request_regions(pci, "RME32")) < 0)
+ err = pci_request_regions(pci, "RME32");
+ if (err < 0)
return err;
rme32->port = pci_resource_start(rme32->pci, 0);
@@ -1334,9 +1351,9 @@ static int snd_rme32_create(struct rme32 *rme32)
pci_read_config_byte(pci, 8, &rme32->rev);
/* set up ALSA pcm device for S/PDIF */
- if ((err = snd_pcm_new(rme32->card, "Digi32 IEC958", 0, 1, 1, &rme32->spdif_pcm)) < 0) {
+ err = snd_pcm_new(rme32->card, "Digi32 IEC958", 0, 1, 1, &rme32->spdif_pcm);
+ if (err < 0)
return err;
- }
rme32->spdif_pcm->private_data = rme32;
rme32->spdif_pcm->private_free = snd_rme32_free_spdif_pcm;
strcpy(rme32->spdif_pcm->name, "Digi32 IEC958");
@@ -1363,11 +1380,10 @@ static int snd_rme32_create(struct rme32 *rme32)
rme32->adat_pcm = NULL;
}
else {
- if ((err = snd_pcm_new(rme32->card, "Digi32 ADAT", 1,
- 1, 1, &rme32->adat_pcm)) < 0)
- {
+ err = snd_pcm_new(rme32->card, "Digi32 ADAT", 1,
+ 1, 1, &rme32->adat_pcm);
+ if (err < 0)
return err;
- }
rme32->adat_pcm->private_data = rme32;
rme32->adat_pcm->private_free = snd_rme32_free_adat_pcm;
strcpy(rme32->adat_pcm->name, "Digi32 ADAT");
@@ -1410,9 +1426,9 @@ static int snd_rme32_create(struct rme32 *rme32)
/* init switch interface */
- if ((err = snd_rme32_create_switches(rme32->card, rme32)) < 0) {
+ err = snd_rme32_create_switches(rme32->card, rme32);
+ if (err < 0)
return err;
- }
/* init proc interface */
snd_rme32_proc_init(rme32);
@@ -1855,7 +1871,9 @@ static int snd_rme32_create_switches(struct snd_card *card, struct rme32 * rme32
struct snd_kcontrol *kctl;
for (idx = 0; idx < (int)ARRAY_SIZE(snd_rme32_controls); idx++) {
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_rme32_controls[idx], rme32))) < 0)
+ kctl = snd_ctl_new1(&snd_rme32_controls[idx], rme32);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
if (idx == 1) /* IEC958 (S/PDIF) Stream */
rme32->spdif_ctl = kctl;
@@ -1899,7 +1917,8 @@ snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
rme32->pci = pci;
if (fullduplex[dev])
rme32->fullduplex_mode = 1;
- if ((err = snd_rme32_create(rme32)) < 0) {
+ err = snd_rme32_create(rme32);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -1919,7 +1938,8 @@ snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
sprintf(card->longname, "%s (Rev. %d) at 0x%lx, irq %d",
card->shortname, rme32->rev, rme32->port, rme32->irq);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 66082e9f526d..fc7ac077559c 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -666,12 +666,14 @@ snd_rme96_playback_getrate(struct rme96 *rme96)
int rate, dummy;
if (!(rme96->wcreg & RME96_WCR_MASTER) &&
- snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG &&
- (rate = snd_rme96_capture_getrate(rme96, &dummy)) > 0)
- {
- /* slave clock */
- return rate;
+ snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG) {
+ rate = snd_rme96_capture_getrate(rme96, &dummy);
+ if (rate > 0) {
+ /* slave clock */
+ return rate;
+ }
}
+
rate = ((rme96->wcreg >> RME96_WCR_BITPOS_FREQ_0) & 1) +
(((rme96->wcreg >> RME96_WCR_BITPOS_FREQ_1) & 1) << 1);
switch (rate) {
@@ -984,10 +986,11 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
runtime->dma_bytes = RME96_BUFFER_SIZE;
spin_lock_irq(&rme96->lock);
+ rate = 0;
if (!(rme96->wcreg & RME96_WCR_MASTER) &&
- snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG &&
- (rate = snd_rme96_capture_getrate(rme96, &dummy)) > 0)
- {
+ snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG)
+ rate = snd_rme96_capture_getrate(rme96, &dummy);
+ if (rate > 0) {
/* slave clock */
if ((int)params_rate(params) != rate) {
err = -EIO;
@@ -1046,28 +1049,30 @@ snd_rme96_capture_hw_params(struct snd_pcm_substream *substream,
runtime->dma_bytes = RME96_BUFFER_SIZE;
spin_lock_irq(&rme96->lock);
- if ((err = snd_rme96_capture_setformat(rme96, params_format(params))) < 0) {
+ err = snd_rme96_capture_setformat(rme96, params_format(params));
+ if (err < 0) {
spin_unlock_irq(&rme96->lock);
return err;
}
if (snd_rme96_getinputtype(rme96) == RME96_INPUT_ANALOG) {
- if ((err = snd_rme96_capture_analog_setrate(rme96,
- params_rate(params))) < 0)
- {
+ err = snd_rme96_capture_analog_setrate(rme96, params_rate(params));
+ if (err < 0) {
spin_unlock_irq(&rme96->lock);
return err;
}
- } else if ((rate = snd_rme96_capture_getrate(rme96, &isadat)) > 0) {
- if ((int)params_rate(params) != rate) {
- spin_unlock_irq(&rme96->lock);
- return -EIO;
- }
- if ((isadat && runtime->hw.channels_min == 2) ||
- (!isadat && runtime->hw.channels_min == 8))
- {
- spin_unlock_irq(&rme96->lock);
- return -EIO;
- }
+ } else {
+ rate = snd_rme96_capture_getrate(rme96, &isadat);
+ if (rate > 0) {
+ if ((int)params_rate(params) != rate) {
+ spin_unlock_irq(&rme96->lock);
+ return -EIO;
+ }
+ if ((isadat && runtime->hw.channels_min == 2) ||
+ (!isadat && runtime->hw.channels_min == 8)) {
+ spin_unlock_irq(&rme96->lock);
+ return -EIO;
+ }
+ }
}
snd_rme96_setframelog(rme96, params_channels(params), 0);
if (rme96->playback_periodsize != 0) {
@@ -1160,8 +1165,10 @@ rme96_set_buffer_size_constraint(struct rme96 *rme96,
snd_pcm_hw_constraint_single(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
RME96_BUFFER_SIZE);
- if ((size = rme96->playback_periodsize) != 0 ||
- (size = rme96->capture_periodsize) != 0)
+ size = rme96->playback_periodsize;
+ if (!size)
+ size = rme96->capture_periodsize;
+ if (size)
snd_pcm_hw_constraint_single(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
size);
@@ -1191,13 +1198,14 @@ snd_rme96_playback_spdif_open(struct snd_pcm_substream *substream)
runtime->hw = snd_rme96_playback_spdif_info;
if (!(rme96->wcreg & RME96_WCR_MASTER) &&
- snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG &&
- (rate = snd_rme96_capture_getrate(rme96, &dummy)) > 0)
- {
- /* slave clock */
- runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate);
- runtime->hw.rate_min = rate;
- runtime->hw.rate_max = rate;
+ snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG) {
+ rate = snd_rme96_capture_getrate(rme96, &dummy);
+ if (rate > 0) {
+ /* slave clock */
+ runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate);
+ runtime->hw.rate_min = rate;
+ runtime->hw.rate_max = rate;
+ }
}
rme96_set_buffer_size_constraint(rme96, runtime);
@@ -1217,16 +1225,16 @@ snd_rme96_capture_spdif_open(struct snd_pcm_substream *substream)
snd_pcm_set_sync(substream);
runtime->hw = snd_rme96_capture_spdif_info;
- if (snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG &&
- (rate = snd_rme96_capture_getrate(rme96, &isadat)) > 0)
- {
- if (isadat) {
- return -EIO;
- }
- runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate);
- runtime->hw.rate_min = rate;
- runtime->hw.rate_max = rate;
- }
+ if (snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG) {
+ rate = snd_rme96_capture_getrate(rme96, &isadat);
+ if (rate > 0) {
+ if (isadat)
+ return -EIO;
+ runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate);
+ runtime->hw.rate_min = rate;
+ runtime->hw.rate_max = rate;
+ }
+ }
spin_lock_irq(&rme96->lock);
if (rme96->capture_substream) {
@@ -1260,14 +1268,16 @@ snd_rme96_playback_adat_open(struct snd_pcm_substream *substream)
runtime->hw = snd_rme96_playback_adat_info;
if (!(rme96->wcreg & RME96_WCR_MASTER) &&
- snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG &&
- (rate = snd_rme96_capture_getrate(rme96, &dummy)) > 0)
- {
- /* slave clock */
- runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate);
- runtime->hw.rate_min = rate;
- runtime->hw.rate_max = rate;
- }
+ snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG) {
+ rate = snd_rme96_capture_getrate(rme96, &dummy);
+ if (rate > 0) {
+ /* slave clock */
+ runtime->hw.rates = snd_pcm_rate_to_rate_bit(rate);
+ runtime->hw.rate_min = rate;
+ runtime->hw.rate_max = rate;
+ }
+ }
+
rme96_set_buffer_size_constraint(rme96, runtime);
return 0;
}
@@ -1286,7 +1296,8 @@ snd_rme96_capture_adat_open(struct snd_pcm_substream *substream)
expension cards AEB4/8-I are RME96_INPUT_INTERNAL */
return -EIO;
}
- if ((rate = snd_rme96_capture_getrate(rme96, &isadat)) > 0) {
+ rate = snd_rme96_capture_getrate(rme96, &isadat);
+ if (rate > 0) {
if (!isadat) {
return -EIO;
}
@@ -1603,10 +1614,12 @@ snd_rme96_create(struct rme96 *rme96)
rme96->irq = -1;
spin_lock_init(&rme96->lock);
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
- if ((err = pci_request_regions(pci, "RME96")) < 0)
+ err = pci_request_regions(pci, "RME96");
+ if (err < 0)
return err;
rme96->port = pci_resource_start(rme96->pci, 0);
@@ -1630,11 +1643,11 @@ snd_rme96_create(struct rme96 *rme96)
pci_read_config_byte(pci, 8, &rme96->rev);
/* set up ALSA pcm device for S/PDIF */
- if ((err = snd_pcm_new(rme96->card, "Digi96 IEC958", 0,
- 1, 1, &rme96->spdif_pcm)) < 0)
- {
+ err = snd_pcm_new(rme96->card, "Digi96 IEC958", 0,
+ 1, 1, &rme96->spdif_pcm);
+ if (err < 0)
return err;
- }
+
rme96->spdif_pcm->private_data = rme96;
rme96->spdif_pcm->private_free = snd_rme96_free_spdif_pcm;
strcpy(rme96->spdif_pcm->name, "Digi96 IEC958");
@@ -1648,11 +1661,10 @@ snd_rme96_create(struct rme96 *rme96)
/* ADAT is not available on the base model */
rme96->adat_pcm = NULL;
} else {
- if ((err = snd_pcm_new(rme96->card, "Digi96 ADAT", 1,
- 1, 1, &rme96->adat_pcm)) < 0)
- {
+ err = snd_pcm_new(rme96->card, "Digi96 ADAT", 1,
+ 1, 1, &rme96->adat_pcm);
+ if (err < 0)
return err;
- }
rme96->adat_pcm->private_data = rme96;
rme96->adat_pcm->private_free = snd_rme96_free_adat_pcm;
strcpy(rme96->adat_pcm->name, "Digi96 ADAT");
@@ -1701,9 +1713,9 @@ snd_rme96_create(struct rme96 *rme96)
}
/* init switch interface */
- if ((err = snd_rme96_create_switches(rme96->card, rme96)) < 0) {
+ err = snd_rme96_create_switches(rme96->card, rme96);
+ if (err < 0)
return err;
- }
/* init proc interface */
snd_rme96_proc_init(rme96);
@@ -2336,16 +2348,20 @@ snd_rme96_create_switches(struct snd_card *card,
struct snd_kcontrol *kctl;
for (idx = 0; idx < 7; idx++) {
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_rme96_controls[idx], rme96))) < 0)
+ kctl = snd_ctl_new1(&snd_rme96_controls[idx], rme96);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
if (idx == 1) /* IEC958 (S/PDIF) Stream */
rme96->spdif_ctl = kctl;
}
if (RME96_HAS_ANALOG_OUT(rme96)) {
- for (idx = 7; idx < 10; idx++)
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_rme96_controls[idx], rme96))) < 0)
+ for (idx = 7; idx < 10; idx++) {
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_rme96_controls[idx], rme96));
+ if (err < 0)
return err;
+ }
}
return 0;
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 720297cbdf87..8457a4bbc3df 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -289,7 +289,7 @@ MODULE_FIRMWARE("digiface_firmware_rev11.bin");
return 104857600000000 / rate; // 100 MHz
return 110100480000000 / rate; // 105 MHz
*/
-#define DDS_NUMERATOR 104857600000000ULL; /* = 2^20 * 10^8 */
+#define DDS_NUMERATOR 104857600000000ULL /* = 2^20 * 10^8 */
#define hdsp_encode_latency(x) (((x)<<1) & HDSP_LatencyMask)
#define hdsp_decode_latency(x) (((x) & HDSP_LatencyMask)>>1)
@@ -1318,11 +1318,13 @@ static int snd_hdsp_midi_output_write (struct hdsp_midi *hmidi)
spin_lock_irqsave (&hmidi->lock, flags);
if (hmidi->output) {
if (!snd_rawmidi_transmit_empty (hmidi->output)) {
- if ((n_pending = snd_hdsp_midi_output_possible (hmidi->hdsp, hmidi->id)) > 0) {
+ n_pending = snd_hdsp_midi_output_possible(hmidi->hdsp, hmidi->id);
+ if (n_pending > 0) {
if (n_pending > (int)sizeof (buf))
n_pending = sizeof (buf);
- if ((to_write = snd_rawmidi_transmit (hmidi->output, buf, n_pending)) > 0) {
+ to_write = snd_rawmidi_transmit(hmidi->output, buf, n_pending);
+ if (to_write > 0) {
for (i = 0; i < to_write; ++i)
snd_hdsp_midi_write_byte (hmidi->hdsp, hmidi->id, buf[i]);
}
@@ -1341,7 +1343,8 @@ static int snd_hdsp_midi_input_read (struct hdsp_midi *hmidi)
int i;
spin_lock_irqsave (&hmidi->lock, flags);
- if ((n_pending = snd_hdsp_midi_input_available (hmidi->hdsp, hmidi->id)) > 0) {
+ n_pending = snd_hdsp_midi_input_available(hmidi->hdsp, hmidi->id);
+ if (n_pending > 0) {
if (hmidi->input) {
if (n_pending > (int)sizeof (buf))
n_pending = sizeof (buf);
@@ -3322,7 +3325,9 @@ static int snd_hdsp_create_controls(struct snd_card *card, struct hdsp *hdsp)
}
for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_controls); idx++) {
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_controls[idx], hdsp))) < 0)
+ kctl = snd_ctl_new1(&snd_hdsp_controls[idx], hdsp);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
if (idx == 1) /* IEC958 (S/PDIF) Stream */
hdsp->spdif_ctl = kctl;
@@ -3331,12 +3336,16 @@ static int snd_hdsp_create_controls(struct snd_card *card, struct hdsp *hdsp)
/* ADAT SyncCheck status */
snd_hdsp_adat_sync_check.name = "ADAT Lock Status";
snd_hdsp_adat_sync_check.index = 1;
- if ((err = snd_ctl_add (card, kctl = snd_ctl_new1(&snd_hdsp_adat_sync_check, hdsp))))
+ kctl = snd_ctl_new1(&snd_hdsp_adat_sync_check, hdsp);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
if (hdsp->io_type == Digiface || hdsp->io_type == H9652) {
for (idx = 1; idx < 3; ++idx) {
snd_hdsp_adat_sync_check.index = idx+1;
- if ((err = snd_ctl_add (card, kctl = snd_ctl_new1(&snd_hdsp_adat_sync_check, hdsp))))
+ kctl = snd_ctl_new1(&snd_hdsp_adat_sync_check, hdsp);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
}
}
@@ -3344,7 +3353,9 @@ static int snd_hdsp_create_controls(struct snd_card *card, struct hdsp *hdsp)
/* DA, AD and Phone gain and XLR breakout cable controls for H9632 cards */
if (hdsp->io_type == H9632) {
for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_9632_controls); idx++) {
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_9632_controls[idx], hdsp))) < 0)
+ kctl = snd_ctl_new1(&snd_hdsp_9632_controls[idx], hdsp);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
}
}
@@ -3362,8 +3373,10 @@ static int snd_hdsp_create_controls(struct snd_card *card, struct hdsp *hdsp)
/* AEB control for H96xx card */
if (hdsp->io_type == H9632 || hdsp->io_type == H9652) {
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_96xx_aeb, hdsp))) < 0)
- return err;
+ kctl = snd_ctl_new1(&snd_hdsp_96xx_aeb, hdsp);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
+ return err;
}
return 0;
@@ -3942,7 +3955,8 @@ static char *hdsp_channel_buffer_location(struct hdsp *hdsp,
if (snd_BUG_ON(channel < 0 || channel >= hdsp->max_channels))
return NULL;
- if ((mapped_channel = hdsp->channel_map[channel]) < 0)
+ mapped_channel = hdsp->channel_map[channel];
+ if (mapped_channel < 0)
return NULL;
if (stream == SNDRV_PCM_STREAM_CAPTURE)
@@ -4114,7 +4128,8 @@ static int snd_hdsp_hw_params(struct snd_pcm_substream *substream,
spin_lock_irq(&hdsp->lock);
if (! hdsp->clock_source_locked) {
- if ((err = hdsp_set_rate(hdsp, params_rate(params), 0)) < 0) {
+ err = hdsp_set_rate(hdsp, params_rate(params), 0);
+ if (err < 0) {
spin_unlock_irq(&hdsp->lock);
_snd_pcm_hw_param_setempty(params, SNDRV_PCM_HW_PARAM_RATE);
return err;
@@ -4122,7 +4137,8 @@ static int snd_hdsp_hw_params(struct snd_pcm_substream *substream,
}
spin_unlock_irq(&hdsp->lock);
- if ((err = hdsp_set_interrupt_interval(hdsp, params_period_size(params))) < 0) {
+ err = hdsp_set_interrupt_interval(hdsp, params_period_size(params));
+ if (err < 0) {
_snd_pcm_hw_param_setempty(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
return err;
}
@@ -4854,14 +4870,15 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
if (hdsp->io_type == H9652 || hdsp->io_type == H9632) return -EINVAL;
if (hdsp->io_type == Undefined) {
- if ((err = hdsp_get_iobox_version(hdsp)) < 0)
+ err = hdsp_get_iobox_version(hdsp);
+ if (err < 0)
return err;
}
memset(&hdsp_version, 0, sizeof(hdsp_version));
hdsp_version.io_type = hdsp->io_type;
hdsp_version.firmware_rev = hdsp->firmware_rev;
- if ((err = copy_to_user(argp, &hdsp_version, sizeof(hdsp_version))))
- return -EFAULT;
+ if (copy_to_user(argp, &hdsp_version, sizeof(hdsp_version)))
+ return -EFAULT;
break;
}
case SNDRV_HDSP_IOCTL_UPLOAD_FIRMWARE: {
@@ -4900,17 +4917,20 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
hdsp->state |= HDSP_FirmwareCached;
- if ((err = snd_hdsp_load_firmware_from_cache(hdsp)) < 0)
+ err = snd_hdsp_load_firmware_from_cache(hdsp);
+ if (err < 0)
return err;
if (!(hdsp->state & HDSP_InitializationComplete)) {
- if ((err = snd_hdsp_enable_io(hdsp)) < 0)
+ err = snd_hdsp_enable_io(hdsp);
+ if (err < 0)
return err;
snd_hdsp_initialize_channels(hdsp);
snd_hdsp_initialize_midi_flush(hdsp);
- if ((err = snd_hdsp_create_alsa_devices(hdsp->card, hdsp)) < 0) {
+ err = snd_hdsp_create_alsa_devices(hdsp->card, hdsp);
+ if (err < 0) {
dev_err(hdsp->card->dev,
"error creating alsa devices\n");
return err;
@@ -4960,7 +4980,8 @@ static int snd_hdsp_create_hwdep(struct snd_card *card, struct hdsp *hdsp)
struct snd_hwdep *hw;
int err;
- if ((err = snd_hwdep_new(card, "HDSP hwdep", 0, &hw)) < 0)
+ err = snd_hwdep_new(card, "HDSP hwdep", 0, &hw);
+ if (err < 0)
return err;
hdsp->hwdep = hw;
@@ -4978,7 +4999,8 @@ static int snd_hdsp_create_pcm(struct snd_card *card, struct hdsp *hdsp)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(card, hdsp->card_name, 0, 1, 1, &pcm)) < 0)
+ err = snd_pcm_new(card, hdsp->card_name, 0, 1, 1, &pcm);
+ if (err < 0)
return err;
hdsp->pcm = pcm;
@@ -5084,28 +5106,32 @@ static int snd_hdsp_create_alsa_devices(struct snd_card *card, struct hdsp *hdsp
{
int err;
- if ((err = snd_hdsp_create_pcm(card, hdsp)) < 0) {
+ err = snd_hdsp_create_pcm(card, hdsp);
+ if (err < 0) {
dev_err(card->dev,
"Error creating pcm interface\n");
return err;
}
- if ((err = snd_hdsp_create_midi(card, hdsp, 0)) < 0) {
+ err = snd_hdsp_create_midi(card, hdsp, 0);
+ if (err < 0) {
dev_err(card->dev,
"Error creating first midi interface\n");
return err;
}
if (hdsp->io_type == Digiface || hdsp->io_type == H9652) {
- if ((err = snd_hdsp_create_midi(card, hdsp, 1)) < 0) {
+ err = snd_hdsp_create_midi(card, hdsp, 1);
+ if (err < 0) {
dev_err(card->dev,
"Error creating second midi interface\n");
return err;
}
}
- if ((err = snd_hdsp_create_controls(card, hdsp)) < 0) {
+ err = snd_hdsp_create_controls(card, hdsp);
+ if (err < 0) {
dev_err(card->dev,
"Error creating ctl interface\n");
return err;
@@ -5119,7 +5145,8 @@ static int snd_hdsp_create_alsa_devices(struct snd_card *card, struct hdsp *hdsp
hdsp->capture_substream = NULL;
hdsp->playback_substream = NULL;
- if ((err = snd_hdsp_set_defaults(hdsp)) < 0) {
+ err = snd_hdsp_set_defaults(hdsp);
+ if (err < 0) {
dev_err(card->dev,
"Error setting default values\n");
return err;
@@ -5130,7 +5157,8 @@ static int snd_hdsp_create_alsa_devices(struct snd_card *card, struct hdsp *hdsp
sprintf(card->longname, "%s at 0x%lx, irq %d", hdsp->card_name,
hdsp->port, hdsp->irq);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
dev_err(card->dev,
"error registering card\n");
return err;
@@ -5151,7 +5179,8 @@ static int hdsp_request_fw_loader(struct hdsp *hdsp)
if (hdsp->io_type == H9652 || hdsp->io_type == H9632)
return 0;
if (hdsp->io_type == Undefined) {
- if ((err = hdsp_get_iobox_version(hdsp)) < 0)
+ err = hdsp_get_iobox_version(hdsp);
+ if (err < 0)
return err;
if (hdsp->io_type == H9652 || hdsp->io_type == H9632)
return 0;
@@ -5197,21 +5226,25 @@ static int hdsp_request_fw_loader(struct hdsp *hdsp)
hdsp->state |= HDSP_FirmwareCached;
- if ((err = snd_hdsp_load_firmware_from_cache(hdsp)) < 0)
+ err = snd_hdsp_load_firmware_from_cache(hdsp);
+ if (err < 0)
return err;
if (!(hdsp->state & HDSP_InitializationComplete)) {
- if ((err = snd_hdsp_enable_io(hdsp)) < 0)
+ err = snd_hdsp_enable_io(hdsp);
+ if (err < 0)
return err;
- if ((err = snd_hdsp_create_hwdep(hdsp->card, hdsp)) < 0) {
+ err = snd_hdsp_create_hwdep(hdsp->card, hdsp);
+ if (err < 0) {
dev_err(hdsp->card->dev,
"error creating hwdep device\n");
return err;
}
snd_hdsp_initialize_channels(hdsp);
snd_hdsp_initialize_midi_flush(hdsp);
- if ((err = snd_hdsp_create_alsa_devices(hdsp->card, hdsp)) < 0) {
+ err = snd_hdsp_create_alsa_devices(hdsp->card, hdsp);
+ if (err < 0) {
dev_err(hdsp->card->dev,
"error creating alsa devices\n");
return err;
@@ -5280,15 +5313,18 @@ static int snd_hdsp_create(struct snd_card *card,
is_9632 = 1;
}
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
pci_set_master(hdsp->pci);
- if ((err = pci_request_regions(pci, "hdsp")) < 0)
+ err = pci_request_regions(pci, "hdsp");
+ if (err < 0)
return err;
hdsp->port = pci_resource_start(pci, 0);
- if ((hdsp->iobase = ioremap(hdsp->port, HDSP_IO_EXTENT)) == NULL) {
+ hdsp->iobase = ioremap(hdsp->port, HDSP_IO_EXTENT);
+ if (!hdsp->iobase) {
dev_err(hdsp->card->dev, "unable to remap region 0x%lx-0x%lx\n",
hdsp->port, hdsp->port + HDSP_IO_EXTENT - 1);
return -EBUSY;
@@ -5306,7 +5342,8 @@ static int snd_hdsp_create(struct snd_card *card,
hdsp->use_midi_work = 1;
hdsp->dds_value = 0;
- if ((err = snd_hdsp_initialize_memory(hdsp)) < 0)
+ err = snd_hdsp_initialize_memory(hdsp);
+ if (err < 0)
return err;
if (!is_9652 && !is_9632) {
@@ -5318,7 +5355,8 @@ static int snd_hdsp_create(struct snd_card *card,
return err;
if ((hdsp_read (hdsp, HDSP_statusRegister) & HDSP_DllError) != 0) {
- if ((err = hdsp_request_fw_loader(hdsp)) < 0)
+ err = hdsp_request_fw_loader(hdsp);
+ if (err < 0)
/* we don't fail as this can happen
if userspace is not ready for
firmware upload
@@ -5331,7 +5369,8 @@ static int snd_hdsp_create(struct snd_card *card,
/* we defer initialization */
dev_info(hdsp->card->dev,
"card initialization pending : waiting for firmware\n");
- if ((err = snd_hdsp_create_hwdep(card, hdsp)) < 0)
+ err = snd_hdsp_create_hwdep(card, hdsp);
+ if (err < 0)
return err;
return 0;
} else {
@@ -5346,7 +5385,8 @@ static int snd_hdsp_create(struct snd_card *card,
}
}
- if ((err = snd_hdsp_enable_io(hdsp)) != 0)
+ err = snd_hdsp_enable_io(hdsp);
+ if (err)
return err;
if (is_9652)
@@ -5355,7 +5395,8 @@ static int snd_hdsp_create(struct snd_card *card,
if (is_9632)
hdsp->io_type = H9632;
- if ((err = snd_hdsp_create_hwdep(card, hdsp)) < 0)
+ err = snd_hdsp_create_hwdep(card, hdsp);
+ if (err < 0)
return err;
snd_hdsp_initialize_channels(hdsp);
@@ -5363,7 +5404,8 @@ static int snd_hdsp_create(struct snd_card *card,
hdsp->state |= HDSP_FirmwareLoaded;
- if ((err = snd_hdsp_create_alsa_devices(card, hdsp)) < 0)
+ err = snd_hdsp_create_alsa_devices(card, hdsp);
+ if (err < 0)
return err;
return 0;
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 7a4d395abcee..f1aad38760d6 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -433,9 +433,9 @@ static int rme9652_set_interrupt_interval(struct snd_rme9652 *s,
spin_lock_irq(&s->lock);
- if ((restart = s->running)) {
+ restart = s->running;
+ if (restart)
rme9652_stop(s);
- }
frames >>= 7;
n = 0;
@@ -518,16 +518,15 @@ static int rme9652_set_rate(struct snd_rme9652 *rme9652, int rate)
return -EBUSY;
}
- if ((restart = rme9652->running)) {
+ restart = rme9652->running;
+ if (restart)
rme9652_stop(rme9652);
- }
rme9652->control_register &= ~(RME9652_freq | RME9652_DS);
rme9652->control_register |= rate;
rme9652_write(rme9652, RME9652_control_register, rme9652->control_register);
- if (restart) {
+ if (restart)
rme9652_start(rme9652);
- }
if (rate & RME9652_DS) {
if (rme9652->ss_channels == RME9652_NCHANNELS) {
@@ -878,15 +877,14 @@ static int rme9652_set_adat1_input(struct snd_rme9652 *rme9652, int internal)
/* XXX do we actually need to stop the card when we do this ? */
- if ((restart = rme9652->running)) {
+ restart = rme9652->running;
+ if (restart)
rme9652_stop(rme9652);
- }
rme9652_write(rme9652, RME9652_control_register, rme9652->control_register);
- if (restart) {
+ if (restart)
rme9652_start(rme9652);
- }
return 0;
}
@@ -943,15 +941,14 @@ static int rme9652_set_spdif_input(struct snd_rme9652 *rme9652, int in)
rme9652->control_register &= ~RME9652_inp;
rme9652->control_register |= rme9652_encode_spdif_in(in);
- if ((restart = rme9652->running)) {
+ restart = rme9652->running;
+ if (restart)
rme9652_stop(rme9652);
- }
rme9652_write(rme9652, RME9652_control_register, rme9652->control_register);
- if (restart) {
+ if (restart)
rme9652_start(rme9652);
- }
return 0;
}
@@ -1010,15 +1007,14 @@ static int rme9652_set_spdif_output(struct snd_rme9652 *rme9652, int out)
rme9652->control_register &= ~RME9652_opt_out;
}
- if ((restart = rme9652->running)) {
+ restart = rme9652->running;
+ if (restart)
rme9652_stop(rme9652);
- }
rme9652_write(rme9652, RME9652_control_register, rme9652->control_register);
- if (restart) {
+ if (restart)
rme9652_start(rme9652);
- }
return 0;
}
@@ -1086,15 +1082,14 @@ static int rme9652_set_sync_mode(struct snd_rme9652 *rme9652, int mode)
break;
}
- if ((restart = rme9652->running)) {
+ restart = rme9652->running;
+ if (restart)
rme9652_stop(rme9652);
- }
rme9652_write(rme9652, RME9652_control_register, rme9652->control_register);
- if (restart) {
+ if (restart)
rme9652_start(rme9652);
- }
return 0;
}
@@ -1173,15 +1168,14 @@ static int rme9652_set_sync_pref(struct snd_rme9652 *rme9652, int pref)
break;
}
- if ((restart = rme9652->running)) {
+ restart = rme9652->running;
+ if (restart)
rme9652_stop(rme9652);
- }
rme9652_write(rme9652, RME9652_control_register, rme9652->control_register);
- if (restart) {
+ if (restart)
rme9652_start(rme9652);
- }
return 0;
}
@@ -1513,19 +1507,27 @@ static int snd_rme9652_create_controls(struct snd_card *card, struct snd_rme9652
struct snd_kcontrol *kctl;
for (idx = 0; idx < ARRAY_SIZE(snd_rme9652_controls); idx++) {
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_rme9652_controls[idx], rme9652))) < 0)
+ kctl = snd_ctl_new1(&snd_rme9652_controls[idx], rme9652);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
if (idx == 1) /* IEC958 (S/PDIF) Stream */
rme9652->spdif_ctl = kctl;
}
- if (rme9652->ss_channels == RME9652_NCHANNELS)
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_rme9652_adat3_check, rme9652))) < 0)
+ if (rme9652->ss_channels == RME9652_NCHANNELS) {
+ kctl = snd_ctl_new1(&snd_rme9652_adat3_check, rme9652);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
+ }
- if (rme9652->hw_rev >= 15)
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_rme9652_adat1_input, rme9652))) < 0)
+ if (rme9652->hw_rev >= 15) {
+ kctl = snd_ctl_new1(&snd_rme9652_adat1_input, rme9652);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
+ }
return 0;
}
@@ -1842,9 +1844,9 @@ static char *rme9652_channel_buffer_location(struct snd_rme9652 *rme9652,
if (snd_BUG_ON(channel < 0 || channel >= RME9652_NCHANNELS))
return NULL;
- if ((mapped_channel = rme9652->channel_map[channel]) < 0) {
+ mapped_channel = rme9652->channel_map[channel];
+ if (mapped_channel < 0)
return NULL;
- }
if (stream == SNDRV_PCM_STREAM_CAPTURE) {
return rme9652->capture_buffer +
@@ -2021,12 +2023,14 @@ static int snd_rme9652_hw_params(struct snd_pcm_substream *substream,
/* how to make sure that the rate matches an externally-set one ?
*/
- if ((err = rme9652_set_rate(rme9652, params_rate(params))) < 0) {
+ err = rme9652_set_rate(rme9652, params_rate(params));
+ if (err < 0) {
_snd_pcm_hw_param_setempty(params, SNDRV_PCM_HW_PARAM_RATE);
return err;
}
- if ((err = rme9652_set_interrupt_interval(rme9652, params_period_size(params))) < 0) {
+ err = rme9652_set_interrupt_interval(rme9652, params_period_size(params));
+ if (err < 0) {
_snd_pcm_hw_param_setempty(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
return err;
}
@@ -2406,11 +2410,9 @@ static int snd_rme9652_create_pcm(struct snd_card *card,
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(card,
- rme9652->card_name,
- 0, 1, 1, &pcm)) < 0) {
+ err = snd_pcm_new(card, rme9652->card_name, 0, 1, 1, &pcm);
+ if (err < 0)
return err;
- }
rme9652->pcm = pcm;
pcm->private_data = rme9652;
@@ -2450,12 +2452,14 @@ static int snd_rme9652_create(struct snd_card *card,
return -ENODEV;
}
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
spin_lock_init(&rme9652->lock);
- if ((err = pci_request_regions(pci, "rme9652")) < 0)
+ err = pci_request_regions(pci, "rme9652");
+ if (err < 0)
return err;
rme9652->port = pci_resource_start(pci, 0);
rme9652->iobase = ioremap(rme9652->port, RME9652_IO_EXTENT);
@@ -2528,17 +2532,17 @@ static int snd_rme9652_create(struct snd_card *card,
pci_set_master(rme9652->pci);
- if ((err = snd_rme9652_initialize_memory(rme9652)) < 0) {
+ err = snd_rme9652_initialize_memory(rme9652);
+ if (err < 0)
return err;
- }
- if ((err = snd_rme9652_create_pcm(card, rme9652)) < 0) {
+ err = snd_rme9652_create_pcm(card, rme9652);
+ if (err < 0)
return err;
- }
- if ((err = snd_rme9652_create_controls(card, rme9652)) < 0) {
+ err = snd_rme9652_create_controls(card, rme9652);
+ if (err < 0)
return err;
- }
snd_rme9652_proc_init(rme9652);
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 7de10997775f..03a48da897e3 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -852,7 +852,8 @@ static int snd_sonicvibes_pcm(struct sonicvibes *sonic, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(sonic->card, "s3_86c617", device, 1, 1, &pcm)) < 0)
+ err = snd_pcm_new(sonic->card, "s3_86c617", device, 1, 1, &pcm);
+ if (err < 0)
return err;
if (snd_BUG_ON(!pcm))
return -EINVAL;
@@ -1093,7 +1094,9 @@ static int snd_sonicvibes_mixer(struct sonicvibes *sonic)
strcpy(card->mixername, "S3 SonicVibes");
for (idx = 0; idx < ARRAY_SIZE(snd_sonicvibes_controls); idx++) {
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_sonicvibes_controls[idx], sonic))) < 0)
+ kctl = snd_ctl_new1(&snd_sonicvibes_controls[idx], sonic);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
return err;
switch (idx) {
case 0:
@@ -1226,7 +1229,8 @@ static int snd_sonicvibes_create(struct snd_card *card,
*rsonic = NULL;
/* enable PCI device */
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
/* check, if we can restrict PCI DMA transfers to 24 bits */
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(24))) {
@@ -1246,7 +1250,8 @@ static int snd_sonicvibes_create(struct snd_card *card,
sonic->pci = pci;
sonic->irq = -1;
- if ((err = pci_request_regions(pci, "S3 SonicVibes")) < 0) {
+ err = pci_request_regions(pci, "S3 SonicVibes");
+ if (err < 0) {
kfree(sonic);
pci_disable_device(pci);
return err;
@@ -1289,14 +1294,16 @@ static int snd_sonicvibes_create(struct snd_card *card,
pci_write_config_dword(pci, 0x40, dmaa);
pci_write_config_dword(pci, 0x48, dmac);
- if ((sonic->res_dmaa = request_region(dmaa, 0x10, "S3 SonicVibes DDMA-A")) == NULL) {
+ sonic->res_dmaa = request_region(dmaa, 0x10, "S3 SonicVibes DDMA-A");
+ if (!sonic->res_dmaa) {
snd_sonicvibes_free(sonic);
dev_err(card->dev,
"unable to grab DDMA-A port at 0x%x-0x%x\n",
dmaa, dmaa + 0x10 - 1);
return -EBUSY;
}
- if ((sonic->res_dmac = request_region(dmac, 0x10, "S3 SonicVibes DDMA-C")) == NULL) {
+ sonic->res_dmac = request_region(dmac, 0x10, "S3 SonicVibes DDMA-C");
+ if (!sonic->res_dmac) {
snd_sonicvibes_free(sonic);
dev_err(card->dev,
"unable to grab DDMA-C port at 0x%x-0x%x\n",
@@ -1358,7 +1365,8 @@ static int snd_sonicvibes_create(struct snd_card *card,
#endif
sonic->revision = snd_sonicvibes_in(sonic, SV_IREG_REVISION);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, sonic, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, sonic, &ops);
+ if (err < 0) {
snd_sonicvibes_free(sonic);
return err;
}
@@ -1405,9 +1413,11 @@ static int snd_sonicvibes_midi(struct sonicvibes *sonic,
mpu->private_data = sonic;
mpu->open_input = snd_sonicvibes_midi_input_open;
mpu->close_input = snd_sonicvibes_midi_input_close;
- for (idx = 0; idx < ARRAY_SIZE(snd_sonicvibes_midi_controls); idx++)
- if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_sonicvibes_midi_controls[idx], sonic))) < 0)
+ for (idx = 0; idx < ARRAY_SIZE(snd_sonicvibes_midi_controls); idx++) {
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_sonicvibes_midi_controls[idx], sonic));
+ if (err < 0)
return err;
+ }
return 0;
}
@@ -1439,10 +1449,11 @@ static int snd_sonic_probe(struct pci_dev *pci,
return -ENODEV;
}
}
- if ((err = snd_sonicvibes_create(card, pci,
- reverb[dev] ? 1 : 0,
- mge[dev] ? 1 : 0,
- &sonic)) < 0) {
+ err = snd_sonicvibes_create(card, pci,
+ reverb[dev] ? 1 : 0,
+ mge[dev] ? 1 : 0,
+ &sonic);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -1455,30 +1466,35 @@ static int snd_sonic_probe(struct pci_dev *pci,
(unsigned long long)pci_resource_start(pci, 1),
sonic->irq);
- if ((err = snd_sonicvibes_pcm(sonic, 0)) < 0) {
+ err = snd_sonicvibes_pcm(sonic, 0);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_sonicvibes_mixer(sonic)) < 0) {
+ err = snd_sonicvibes_mixer(sonic);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_SONICVIBES,
- sonic->midi_port,
- MPU401_INFO_INTEGRATED |
- MPU401_INFO_IRQ_HOOK,
- -1, &midi_uart)) < 0) {
+ err = snd_mpu401_uart_new(card, 0, MPU401_HW_SONICVIBES,
+ sonic->midi_port,
+ MPU401_INFO_INTEGRATED |
+ MPU401_INFO_IRQ_HOOK,
+ -1, &midi_uart);
+ if (err < 0) {
snd_card_free(card);
return err;
}
snd_sonicvibes_midi(sonic, midi_uart);
- if ((err = snd_opl3_create(card, sonic->synth_port,
- sonic->synth_port + 2,
- OPL3_HW_OPL3_SV, 1, &opl3)) < 0) {
+ err = snd_opl3_create(card, sonic->synth_port,
+ sonic->synth_port + 2,
+ OPL3_HW_OPL3_SV, 1, &opl3);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
+ err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -1489,7 +1505,8 @@ static int snd_sonic_probe(struct pci_dev *pci,
return err;
}
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/trident/trident.c b/sound/pci/trident/trident.c
index a51041205f7c..60e4dca28c2b 100644
--- a/sound/pci/trident/trident.c
+++ b/sound/pci/trident/trident.c
@@ -67,11 +67,12 @@ static int snd_trident_probe(struct pci_dev *pci,
if (err < 0)
return err;
- if ((err = snd_trident_create(card, pci,
- pcm_channels[dev],
- ((pci->vendor << 16) | pci->device) == TRIDENT_DEVICE_ID_SI7018 ? 1 : 2,
- wavetable_size[dev],
- &trident)) < 0) {
+ err = snd_trident_create(card, pci,
+ pcm_channels[dev],
+ ((pci->vendor << 16) | pci->device) == TRIDENT_DEVICE_ID_SI7018 ? 1 : 2,
+ wavetable_size[dev],
+ &trident);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -100,38 +101,44 @@ static int snd_trident_probe(struct pci_dev *pci,
sprintf(card->longname, "%s PCI Audio at 0x%lx, irq %d",
card->shortname, trident->port, trident->irq);
- if ((err = snd_trident_pcm(trident, pcm_dev++)) < 0) {
+ err = snd_trident_pcm(trident, pcm_dev++);
+ if (err < 0) {
snd_card_free(card);
return err;
}
switch (trident->device) {
case TRIDENT_DEVICE_ID_DX:
case TRIDENT_DEVICE_ID_NX:
- if ((err = snd_trident_foldback_pcm(trident, pcm_dev++)) < 0) {
+ err = snd_trident_foldback_pcm(trident, pcm_dev++);
+ if (err < 0) {
snd_card_free(card);
return err;
}
break;
}
if (trident->device == TRIDENT_DEVICE_ID_NX || trident->device == TRIDENT_DEVICE_ID_SI7018) {
- if ((err = snd_trident_spdif_pcm(trident, pcm_dev++)) < 0) {
+ err = snd_trident_spdif_pcm(trident, pcm_dev++);
+ if (err < 0) {
snd_card_free(card);
return err;
}
}
- if (trident->device != TRIDENT_DEVICE_ID_SI7018 &&
- (err = snd_mpu401_uart_new(card, 0, MPU401_HW_TRID4DWAVE,
- trident->midi_port,
- MPU401_INFO_INTEGRATED |
- MPU401_INFO_IRQ_HOOK,
- -1, &trident->rmidi)) < 0) {
- snd_card_free(card);
- return err;
+ if (trident->device != TRIDENT_DEVICE_ID_SI7018) {
+ err = snd_mpu401_uart_new(card, 0, MPU401_HW_TRID4DWAVE,
+ trident->midi_port,
+ MPU401_INFO_INTEGRATED |
+ MPU401_INFO_IRQ_HOOK,
+ -1, &trident->rmidi);
+ if (err < 0) {
+ snd_card_free(card);
+ return err;
+ }
}
snd_trident_create_gameport(trident);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/trident/trident.h b/sound/pci/trident/trident.h
index c7567edbe4c4..c579a44bb9ae 100644
--- a/sound/pci/trident/trident.h
+++ b/sound/pci/trident/trident.h
@@ -251,7 +251,6 @@ struct snd_trident_memblk_arg {
struct snd_trident_tlb {
__le32 *entries; /* 16k-aligned TLB table */
dma_addr_t entries_dmaaddr; /* 16k-aligned PCI address to TLB table */
- unsigned long * shadow_entries; /* shadow entries with virtual addresses */
struct snd_dma_buffer buffer;
struct snd_util_memhdr * memhdr; /* page allocation list */
struct snd_dma_buffer silent_page;
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 20145143f6a6..cfbca3bd60ed 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -2119,7 +2119,8 @@ int snd_trident_pcm(struct snd_trident *trident, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(trident->card, "trident_dx_nx", device, trident->ChanPCM, 1, &pcm)) < 0)
+ err = snd_pcm_new(trident->card, "trident_dx_nx", device, trident->ChanPCM, 1, &pcm);
+ if (err < 0)
return err;
pcm->private_data = trident;
@@ -2178,7 +2179,8 @@ int snd_trident_foldback_pcm(struct snd_trident *trident, int device)
if (trident->device == TRIDENT_DEVICE_ID_NX)
num_chan = 4;
- if ((err = snd_pcm_new(trident->card, "trident_dx_nx", device, 0, num_chan, &foldback)) < 0)
+ err = snd_pcm_new(trident->card, "trident_dx_nx", device, 0, num_chan, &foldback);
+ if (err < 0)
return err;
foldback->private_data = trident;
@@ -2228,7 +2230,8 @@ int snd_trident_spdif_pcm(struct snd_trident *trident, int device)
struct snd_pcm *spdif;
int err;
- if ((err = snd_pcm_new(trident->card, "trident_dx_nx IEC958", device, 1, 0, &spdif)) < 0)
+ err = snd_pcm_new(trident->card, "trident_dx_nx IEC958", device, 1, 0, &spdif);
+ if (err < 0)
return err;
spdif->private_data = trident;
@@ -2921,7 +2924,8 @@ static int snd_trident_mixer(struct snd_trident *trident, int pcm_spdif_device)
if (!uctl)
return -ENOMEM;
- if ((err = snd_ac97_bus(trident->card, 0, &ops, NULL, &trident->ac97_bus)) < 0)
+ err = snd_ac97_bus(trident->card, 0, &ops, NULL, &trident->ac97_bus);
+ if (err < 0)
goto __out;
memset(&_ac97, 0, sizeof(_ac97));
@@ -2929,9 +2933,11 @@ static int snd_trident_mixer(struct snd_trident *trident, int pcm_spdif_device)
trident->ac97_detect = 1;
__again:
- if ((err = snd_ac97_mixer(trident->ac97_bus, &_ac97, &trident->ac97)) < 0) {
+ err = snd_ac97_mixer(trident->ac97_bus, &_ac97, &trident->ac97);
+ if (err < 0) {
if (trident->device == TRIDENT_DEVICE_ID_SI7018) {
- if ((err = snd_trident_sis_reset(trident)) < 0)
+ err = snd_trident_sis_reset(trident);
+ if (err < 0)
goto __out;
if (retries-- > 0)
goto __again;
@@ -2962,10 +2968,14 @@ static int snd_trident_mixer(struct snd_trident *trident, int pcm_spdif_device)
trident->ac97_detect = 0;
if (trident->device != TRIDENT_DEVICE_ID_SI7018) {
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_trident_vol_wave_control, trident))) < 0)
+ kctl = snd_ctl_new1(&snd_trident_vol_wave_control, trident);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
goto __out;
kctl->put(kctl, uctl);
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_trident_vol_music_control, trident))) < 0)
+ kctl = snd_ctl_new1(&snd_trident_vol_music_control, trident);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
goto __out;
kctl->put(kctl, uctl);
outl(trident->musicvol_wavevol = 0x00000000, TRID_REG(trident, T4D_MUSICVOL_WAVEVOL));
@@ -2979,28 +2989,38 @@ static int snd_trident_mixer(struct snd_trident *trident, int pcm_spdif_device)
tmix = &trident->pcm_mixer[idx];
tmix->voice = NULL;
}
- if ((trident->ctl_vol = snd_ctl_new1(&snd_trident_pcm_vol_control, trident)) == NULL)
+ trident->ctl_vol = snd_ctl_new1(&snd_trident_pcm_vol_control, trident);
+ if (!trident->ctl_vol)
goto __nomem;
- if ((err = snd_ctl_add(card, trident->ctl_vol)))
+ err = snd_ctl_add(card, trident->ctl_vol);
+ if (err)
goto __out;
- if ((trident->ctl_pan = snd_ctl_new1(&snd_trident_pcm_pan_control, trident)) == NULL)
+ trident->ctl_pan = snd_ctl_new1(&snd_trident_pcm_pan_control, trident);
+ if (!trident->ctl_pan)
goto __nomem;
- if ((err = snd_ctl_add(card, trident->ctl_pan)))
+ err = snd_ctl_add(card, trident->ctl_pan);
+ if (err)
goto __out;
- if ((trident->ctl_rvol = snd_ctl_new1(&snd_trident_pcm_rvol_control, trident)) == NULL)
+ trident->ctl_rvol = snd_ctl_new1(&snd_trident_pcm_rvol_control, trident);
+ if (!trident->ctl_rvol)
goto __nomem;
- if ((err = snd_ctl_add(card, trident->ctl_rvol)))
+ err = snd_ctl_add(card, trident->ctl_rvol);
+ if (err)
goto __out;
- if ((trident->ctl_cvol = snd_ctl_new1(&snd_trident_pcm_cvol_control, trident)) == NULL)
+ trident->ctl_cvol = snd_ctl_new1(&snd_trident_pcm_cvol_control, trident);
+ if (!trident->ctl_cvol)
goto __nomem;
- if ((err = snd_ctl_add(card, trident->ctl_cvol)))
+ err = snd_ctl_add(card, trident->ctl_cvol);
+ if (err)
goto __out;
if (trident->device == TRIDENT_DEVICE_ID_NX) {
- if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_trident_ac97_rear_control, trident))) < 0)
+ kctl = snd_ctl_new1(&snd_trident_ac97_rear_control, trident);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
goto __out;
kctl->put(kctl, uctl);
}
@@ -3016,7 +3036,8 @@ static int snd_trident_mixer(struct snd_trident *trident, int pcm_spdif_device)
if (trident->ac97_sec && (trident->ac97_sec->ext_id & AC97_EI_SPDIF))
kctl->id.index++;
idx = kctl->id.index;
- if ((err = snd_ctl_add(card, kctl)) < 0)
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
goto __out;
kctl->put(kctl, uctl);
@@ -3027,7 +3048,8 @@ static int snd_trident_mixer(struct snd_trident *trident, int pcm_spdif_device)
}
kctl->id.index = idx;
kctl->id.device = pcm_spdif_device;
- if ((err = snd_ctl_add(card, kctl)) < 0)
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
goto __out;
kctl = snd_ctl_new1(&snd_trident_spdif_mask, trident);
@@ -3037,7 +3059,8 @@ static int snd_trident_mixer(struct snd_trident *trident, int pcm_spdif_device)
}
kctl->id.index = idx;
kctl->id.device = pcm_spdif_device;
- if ((err = snd_ctl_add(card, kctl)) < 0)
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
goto __out;
kctl = snd_ctl_new1(&snd_trident_spdif_stream, trident);
@@ -3047,7 +3070,8 @@ static int snd_trident_mixer(struct snd_trident *trident, int pcm_spdif_device)
}
kctl->id.index = idx;
kctl->id.device = pcm_spdif_device;
- if ((err = snd_ctl_add(card, kctl)) < 0)
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
goto __out;
trident->spdif_pcm_ctl = kctl;
}
@@ -3307,12 +3331,6 @@ static int snd_trident_tlb_alloc(struct snd_trident *trident)
}
trident->tlb.entries = (__le32 *)ALIGN((unsigned long)trident->tlb.buffer.area, SNDRV_TRIDENT_MAX_PAGES * 4);
trident->tlb.entries_dmaaddr = ALIGN(trident->tlb.buffer.addr, SNDRV_TRIDENT_MAX_PAGES * 4);
- /* allocate shadow TLB page table (virtual addresses) */
- trident->tlb.shadow_entries =
- vmalloc(array_size(SNDRV_TRIDENT_MAX_PAGES,
- sizeof(unsigned long)));
- if (!trident->tlb.shadow_entries)
- return -ENOMEM;
/* allocate and setup silent page and initialise TLB entries */
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &trident->pci->dev,
@@ -3321,10 +3339,8 @@ static int snd_trident_tlb_alloc(struct snd_trident *trident)
return -ENOMEM;
}
memset(trident->tlb.silent_page.area, 0, SNDRV_TRIDENT_PAGE_SIZE);
- for (i = 0; i < SNDRV_TRIDENT_MAX_PAGES; i++) {
+ for (i = 0; i < SNDRV_TRIDENT_MAX_PAGES; i++)
trident->tlb.entries[i] = cpu_to_le32(trident->tlb.silent_page.addr & ~(SNDRV_TRIDENT_PAGE_SIZE-1));
- trident->tlb.shadow_entries[i] = (unsigned long)trident->tlb.silent_page.area;
- }
/* use emu memory block manager code to manage tlb page allocation */
trident->tlb.memhdr = snd_util_memhdr_new(SNDRV_TRIDENT_PAGE_SIZE * SNDRV_TRIDENT_MAX_PAGES);
@@ -3449,7 +3465,8 @@ static int snd_trident_sis_init(struct snd_trident *trident)
{
int err;
- if ((err = snd_trident_sis_reset(trident)) < 0)
+ err = snd_trident_sis_reset(trident);
+ if (err < 0)
return err;
snd_trident_stop_all_voices(trident);
@@ -3494,7 +3511,8 @@ int snd_trident_create(struct snd_card *card,
*rtrident = NULL;
/* enable PCI device */
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
/* check, if we can restrict PCI DMA transfers to 30 bits */
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(30))) {
@@ -3528,7 +3546,8 @@ int snd_trident_create(struct snd_card *card,
trident->midi_port = TRID_REG(trident, T4D_MPU401_BASE);
pci_set_master(pci);
- if ((err = pci_request_regions(pci, "Trident Audio")) < 0) {
+ err = pci_request_regions(pci, "Trident Audio");
+ if (err < 0) {
kfree(trident);
pci_disable_device(pci);
return err;
@@ -3548,7 +3567,8 @@ int snd_trident_create(struct snd_card *card,
trident->tlb.entries = NULL;
trident->tlb.buffer.area = NULL;
if (trident->device == TRIDENT_DEVICE_ID_NX) {
- if ((err = snd_trident_tlb_alloc(trident)) < 0) {
+ err = snd_trident_tlb_alloc(trident);
+ if (err < 0) {
snd_trident_free(trident);
return err;
}
@@ -3576,12 +3596,14 @@ int snd_trident_create(struct snd_card *card,
return err;
}
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, trident, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, trident, &ops);
+ if (err < 0) {
snd_trident_free(trident);
return err;
}
- if ((err = snd_trident_mixer(trident, pcm_spdif_device)) < 0)
+ err = snd_trident_mixer(trident, pcm_spdif_device);
+ if (err < 0)
return err;
/* initialise synth voices */
@@ -3635,7 +3657,6 @@ static int snd_trident_free(struct snd_trident *trident)
snd_util_memhdr_free(trident->tlb.memhdr);
if (trident->tlb.silent_page.area)
snd_dma_free_pages(&trident->tlb.silent_page);
- vfree(trident->tlb.shadow_entries);
snd_dma_free_pages(&trident->tlb.buffer);
}
pci_release_regions(trident->pci);
diff --git a/sound/pci/trident/trident_memory.c b/sound/pci/trident/trident_memory.c
index bb24dbf0530d..4ad3855101c9 100644
--- a/sound/pci/trident/trident_memory.c
+++ b/sound/pci/trident/trident_memory.c
@@ -19,11 +19,8 @@
/* page arguments of these two macros are Trident page (4096 bytes), not like
* aligned pages in others
*/
-#define __set_tlb_bus(trident,page,ptr,addr) \
- do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
- (trident)->tlb.shadow_entries[page] = (ptr); } while (0)
-#define __tlb_to_ptr(trident,page) \
- (void*)((trident)->tlb.shadow_entries[page])
+#define __set_tlb_bus(trident,page,addr) \
+ (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1))
#define __tlb_to_addr(trident,page) \
(dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
@@ -32,15 +29,13 @@
#define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */
#define MAX_ALIGN_PAGES SNDRV_TRIDENT_MAX_PAGES /* maxmium aligned pages */
/* fill TLB entrie(s) corresponding to page with ptr */
-#define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr)
+#define set_tlb_bus(trident,page,addr) __set_tlb_bus(trident,page,addr)
/* fill TLB entrie(s) corresponding to page with silence pointer */
-#define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr)
+#define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, trident->tlb.silent_page.addr)
/* get aligned page from offset address */
#define get_aligned_page(offset) ((offset) >> 12)
/* get offset address from aligned page */
#define aligned_page_offset(page) ((page) << 12)
-/* get buffer address from aligned page */
-#define page_to_ptr(trident,page) __tlb_to_ptr(trident, page)
/* get PCI physical address from aligned page */
#define page_to_addr(trident,page) __tlb_to_addr(trident, page)
@@ -50,22 +45,21 @@
#define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / 2)
#define get_aligned_page(offset) ((offset) >> 13)
#define aligned_page_offset(page) ((page) << 13)
-#define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) << 1)
#define page_to_addr(trident,page) __tlb_to_addr(trident, (page) << 1)
/* fill TLB entries -- we need to fill two entries */
static inline void set_tlb_bus(struct snd_trident *trident, int page,
- unsigned long ptr, dma_addr_t addr)
+ dma_addr_t addr)
{
page <<= 1;
- __set_tlb_bus(trident, page, ptr, addr);
- __set_tlb_bus(trident, page+1, ptr + SNDRV_TRIDENT_PAGE_SIZE, addr + SNDRV_TRIDENT_PAGE_SIZE);
+ __set_tlb_bus(trident, page, addr);
+ __set_tlb_bus(trident, page+1, addr + SNDRV_TRIDENT_PAGE_SIZE);
}
static inline void set_silent_tlb(struct snd_trident *trident, int page)
{
page <<= 1;
- __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
- __set_tlb_bus(trident, page+1, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
+ __set_tlb_bus(trident, page, trident->tlb.silent_page.addr);
+ __set_tlb_bus(trident, page+1, trident->tlb.silent_page.addr);
}
#else
@@ -80,18 +74,16 @@ static inline void set_silent_tlb(struct snd_trident *trident, int page)
*/
#define get_aligned_page(offset) ((offset) / ALIGN_PAGE_SIZE)
#define aligned_page_offset(page) ((page) * ALIGN_PAGE_SIZE)
-#define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) * UNIT_PAGES)
#define page_to_addr(trident,page) __tlb_to_addr(trident, (page) * UNIT_PAGES)
/* fill TLB entries -- UNIT_PAGES entries must be filled */
static inline void set_tlb_bus(struct snd_trident *trident, int page,
- unsigned long ptr, dma_addr_t addr)
+ dma_addr_t addr)
{
int i;
page *= UNIT_PAGES;
for (i = 0; i < UNIT_PAGES; i++, page++) {
- __set_tlb_bus(trident, page, ptr, addr);
- ptr += SNDRV_TRIDENT_PAGE_SIZE;
+ __set_tlb_bus(trident, page, addr);
addr += SNDRV_TRIDENT_PAGE_SIZE;
}
}
@@ -100,20 +92,11 @@ static inline void set_silent_tlb(struct snd_trident *trident, int page)
int i;
page *= UNIT_PAGES;
for (i = 0; i < UNIT_PAGES; i++, page++)
- __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
+ __set_tlb_bus(trident, page, trident->tlb.silent_page.addr);
}
#endif /* PAGE_SIZE */
-/* calculate buffer pointer from offset address */
-static inline void *offset_ptr(struct snd_trident *trident, int offset)
-{
- char *ptr;
- ptr = page_to_ptr(trident, get_aligned_page(offset));
- ptr += offset % ALIGN_PAGE_SIZE;
- return (void*)ptr;
-}
-
/* first and last (aligned) pages of memory block */
#define firstpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->first_page)
#define lastpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->last_page)
@@ -201,14 +184,12 @@ snd_trident_alloc_sg_pages(struct snd_trident *trident,
for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) {
unsigned long ofs = idx << PAGE_SHIFT;
dma_addr_t addr = snd_pcm_sgbuf_get_addr(substream, ofs);
- unsigned long ptr = (unsigned long)
- snd_pcm_sgbuf_get_ptr(substream, ofs);
if (! is_valid_page(addr)) {
__snd_util_mem_free(hdr, blk);
mutex_unlock(&hdr->block_mutex);
return NULL;
}
- set_tlb_bus(trident, page, ptr, addr);
+ set_tlb_bus(trident, page, addr);
}
mutex_unlock(&hdr->block_mutex);
return blk;
@@ -226,7 +207,6 @@ snd_trident_alloc_cont_pages(struct snd_trident *trident,
int page;
struct snd_pcm_runtime *runtime = substream->runtime;
dma_addr_t addr;
- unsigned long ptr;
if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
runtime->dma_bytes > SNDRV_TRIDENT_MAX_PAGES *
@@ -245,15 +225,14 @@ snd_trident_alloc_cont_pages(struct snd_trident *trident,
/* set TLB entries */
addr = runtime->dma_addr;
- ptr = (unsigned long)runtime->dma_area;
for (page = firstpg(blk); page <= lastpg(blk); page++,
- ptr += SNDRV_TRIDENT_PAGE_SIZE, addr += SNDRV_TRIDENT_PAGE_SIZE) {
+ addr += SNDRV_TRIDENT_PAGE_SIZE) {
if (! is_valid_page(addr)) {
__snd_util_mem_free(hdr, blk);
mutex_unlock(&hdr->block_mutex);
return NULL;
}
- set_tlb_bus(trident, page, ptr, addr);
+ set_tlb_bus(trident, page, addr);
}
mutex_unlock(&hdr->block_mutex);
return blk;
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index fd1f2f9cfbc3..943813a06abc 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -515,7 +515,8 @@ static int snd_via82xx_codec_ready(struct via82xx *chip, int secondary)
while (timeout-- > 0) {
udelay(1);
- if (!((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_BUSY))
+ val = snd_via82xx_codec_xread(chip);
+ if (!(val & VIA_REG_AC97_BUSY))
return val & 0xffff;
}
dev_err(chip->card->dev, "codec_ready: codec %i is not ready [0x%x]\n",
@@ -1023,7 +1024,8 @@ static int snd_via8233_playback_prepare(struct snd_pcm_substream *substream)
int rate_changed;
u32 rbits;
- if ((rate_changed = via_lock_rate(&chip->rates[0], ac97_rate)) < 0)
+ rate_changed = via_lock_rate(&chip->rates[0], ac97_rate);
+ if (rate_changed < 0)
return rate_changed;
if (rate_changed)
snd_ac97_set_rate(chip->ac97, AC97_PCM_FRONT_DAC_RATE,
@@ -1197,7 +1199,8 @@ static int snd_via82xx_pcm_open(struct via82xx *chip, struct viadev *viadev,
/* we may remove following constaint when we modify table entries
in interrupt */
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
if (use_src) {
@@ -1222,7 +1225,8 @@ static int snd_via686_playback_open(struct snd_pcm_substream *substream)
struct viadev *viadev = &chip->devs[chip->playback_devno + substream->number];
int err;
- if ((err = snd_via82xx_pcm_open(chip, viadev, substream)) < 0)
+ err = snd_via82xx_pcm_open(chip, viadev, substream);
+ if (err < 0)
return err;
return 0;
}
@@ -1238,7 +1242,8 @@ static int snd_via8233_playback_open(struct snd_pcm_substream *substream)
int err;
viadev = &chip->devs[chip->playback_devno + substream->number];
- if ((err = snd_via82xx_pcm_open(chip, viadev, substream)) < 0)
+ err = snd_via82xx_pcm_open(chip, viadev, substream);
+ if (err < 0)
return err;
stream = viadev->reg_offset / 0x10;
if (chip->dxs_controls[stream]) {
@@ -1275,7 +1280,8 @@ static int snd_via8233_multi_open(struct snd_pcm_substream *substream)
.mask = 0,
};
- if ((err = snd_via82xx_pcm_open(chip, viadev, substream)) < 0)
+ err = snd_via82xx_pcm_open(chip, viadev, substream);
+ if (err < 0)
return err;
substream->runtime->hw.channels_max = 6;
if (chip->revision == VIA_REV_8233A)
@@ -1875,7 +1881,8 @@ static int snd_via82xx_mixer_new(struct via82xx *chip, const char *quirk_overrid
.wait = snd_via82xx_codec_wait,
};
- if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus);
+ if (err < 0)
return err;
chip->ac97_bus->private_free = snd_via82xx_mixer_free_ac97_bus;
chip->ac97_bus->clock = chip->ac97_clock;
@@ -1885,7 +1892,8 @@ static int snd_via82xx_mixer_new(struct via82xx *chip, const char *quirk_overrid
ac97.private_free = snd_via82xx_mixer_free_ac97;
ac97.pci = chip->pci;
ac97.scaps = AC97_SCAP_SKIP_MODEM | AC97_SCAP_POWER_SAVE;
- if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0)
+ err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97);
+ if (err < 0)
return err;
snd_ac97_tune_hardware(chip->ac97, ac97_quirks, quirk_override);
@@ -2054,9 +2062,9 @@ static int snd_via686_init_misc(struct via82xx *chip)
break;
}
}
- if (mpu_port >= 0x200 &&
- (chip->mpu_res = request_region(mpu_port, 2, "VIA82xx MPU401"))
- != NULL) {
+ if (mpu_port >= 0x200)
+ chip->mpu_res = request_region(mpu_port, 2, "VIA82xx MPU401");
+ if (chip->mpu_res) {
if (rev_h)
legacy |= VIA_FUNC_MIDI_PNP; /* enable PCI I/O 2 */
legacy |= VIA_FUNC_ENABLE_MIDI;
@@ -2173,7 +2181,8 @@ static int snd_via82xx_chip_init(struct via82xx *chip)
schedule_timeout_uninterruptible(1);
} while (time_before(jiffies, end_time));
- if ((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_BUSY)
+ val = snd_via82xx_codec_xread(chip);
+ if (val & VIA_REG_AC97_BUSY)
dev_err(chip->card->dev,
"AC'97 codec is not ready [0x%x]\n", val);
@@ -2186,7 +2195,8 @@ static int snd_via82xx_chip_init(struct via82xx *chip)
VIA_REG_AC97_SECONDARY_VALID |
(VIA_REG_AC97_CODEC_ID_SECONDARY << VIA_REG_AC97_CODEC_ID_SHIFT));
do {
- if ((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_SECONDARY_VALID) {
+ val = snd_via82xx_codec_xread(chip);
+ if (val & VIA_REG_AC97_SECONDARY_VALID) {
chip->ac97_secondary = 1;
goto __ac97_ok2;
}
@@ -2337,10 +2347,12 @@ static int snd_via82xx_create(struct snd_card *card,
.dev_free = snd_via82xx_dev_free,
};
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
- if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) {
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
pci_disable_device(pci);
return -ENOMEM;
}
@@ -2360,7 +2372,8 @@ static int snd_via82xx_create(struct snd_card *card,
pci_write_config_byte(chip->pci, VIA_FUNC_ENABLE,
chip->old_legacy & ~(VIA_FUNC_ENABLE_SB|VIA_FUNC_ENABLE_FM));
- if ((err = pci_request_regions(pci, card->driver)) < 0) {
+ err = pci_request_regions(pci, card->driver);
+ if (err < 0) {
kfree(chip);
pci_disable_device(pci);
return err;
@@ -2380,12 +2393,14 @@ static int snd_via82xx_create(struct snd_card *card,
if (ac97_clock >= 8000 && ac97_clock <= 48000)
chip->ac97_clock = ac97_clock;
- if ((err = snd_via82xx_chip_init(chip)) < 0) {
+ err = snd_via82xx_chip_init(chip);
+ if (err < 0) {
snd_via82xx_free(chip);
return err;
}
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_via82xx_free(chip);
return err;
}
@@ -2541,24 +2556,31 @@ static int snd_via82xx_probe(struct pci_dev *pci,
goto __error;
}
- if ((err = snd_via82xx_create(card, pci, chip_type, pci->revision,
- ac97_clock, &chip)) < 0)
+ err = snd_via82xx_create(card, pci, chip_type, pci->revision,
+ ac97_clock, &chip);
+ if (err < 0)
goto __error;
card->private_data = chip;
- if ((err = snd_via82xx_mixer_new(chip, ac97_quirk)) < 0)
+ err = snd_via82xx_mixer_new(chip, ac97_quirk);
+ if (err < 0)
goto __error;
if (chip_type == TYPE_VIA686) {
- if ((err = snd_via686_pcm_new(chip)) < 0 ||
- (err = snd_via686_init_misc(chip)) < 0)
+ err = snd_via686_pcm_new(chip);
+ if (err < 0)
+ goto __error;
+ err = snd_via686_init_misc(chip);
+ if (err < 0)
goto __error;
} else {
if (chip_type == TYPE_VIA8233A) {
- if ((err = snd_via8233a_pcm_new(chip)) < 0)
+ err = snd_via8233a_pcm_new(chip);
+ if (err < 0)
goto __error;
// chip->dxs_fixed = 1; /* FIXME: use 48k for DXS #3? */
} else {
- if ((err = snd_via8233_pcm_new(chip)) < 0)
+ err = snd_via8233_pcm_new(chip);
+ if (err < 0)
goto __error;
if (dxs_support == VIA_DXS_48K)
chip->dxs_fixed = 1;
@@ -2569,7 +2591,8 @@ static int snd_via82xx_probe(struct pci_dev *pci,
chip->dxs_src = 1;
}
}
- if ((err = snd_via8233_init_misc(chip)) < 0)
+ err = snd_via8233_init_misc(chip);
+ if (err < 0)
goto __error;
}
@@ -2583,7 +2606,8 @@ static int snd_via82xx_probe(struct pci_dev *pci,
snd_via82xx_proc_init(chip);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index 30253306f67c..07278a3dc8c1 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -369,7 +369,8 @@ static int snd_via82xx_codec_ready(struct via82xx_modem *chip, int secondary)
while (timeout-- > 0) {
udelay(1);
- if (!((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_BUSY))
+ val = snd_via82xx_codec_xread(chip);
+ if (!(val & VIA_REG_AC97_BUSY))
return val & 0xffff;
}
dev_err(chip->card->dev, "codec_ready: codec %i is not ready [0x%x]\n",
@@ -738,13 +739,15 @@ static int snd_via82xx_modem_pcm_open(struct via82xx_modem *chip, struct viadev
runtime->hw = snd_via82xx_hw;
- if ((err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- &hw_constraints_rates)) < 0)
+ err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &hw_constraints_rates);
+ if (err < 0)
return err;
/* we may remove following constaint when we modify table entries
in interrupt */
- if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
return err;
runtime->private_data = viadev;
@@ -878,7 +881,8 @@ static int snd_via82xx_mixer_new(struct via82xx_modem *chip)
.wait = snd_via82xx_codec_wait,
};
- if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus);
+ if (err < 0)
return err;
chip->ac97_bus->private_free = snd_via82xx_mixer_free_ac97_bus;
chip->ac97_bus->clock = chip->ac97_clock;
@@ -890,7 +894,8 @@ static int snd_via82xx_mixer_new(struct via82xx_modem *chip)
ac97.scaps = AC97_SCAP_SKIP_AUDIO | AC97_SCAP_POWER_SAVE;
ac97.num = chip->ac97_secondary;
- if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0)
+ err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97);
+ if (err < 0)
return err;
return 0;
@@ -971,7 +976,8 @@ static int snd_via82xx_chip_init(struct via82xx_modem *chip)
schedule_timeout_uninterruptible(1);
} while (time_before(jiffies, end_time));
- if ((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_BUSY)
+ val = snd_via82xx_codec_xread(chip);
+ if (val & VIA_REG_AC97_BUSY)
dev_err(chip->card->dev,
"AC'97 codec is not ready [0x%x]\n", val);
@@ -983,7 +989,8 @@ static int snd_via82xx_chip_init(struct via82xx_modem *chip)
VIA_REG_AC97_SECONDARY_VALID |
(VIA_REG_AC97_CODEC_ID_SECONDARY << VIA_REG_AC97_CODEC_ID_SHIFT));
do {
- if ((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_SECONDARY_VALID) {
+ val = snd_via82xx_codec_xread(chip);
+ if (val & VIA_REG_AC97_SECONDARY_VALID) {
chip->ac97_secondary = 1;
goto __ac97_ok2;
}
@@ -1079,10 +1086,12 @@ static int snd_via82xx_create(struct snd_card *card,
.dev_free = snd_via82xx_dev_free,
};
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
- if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) {
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
pci_disable_device(pci);
return -ENOMEM;
}
@@ -1092,7 +1101,8 @@ static int snd_via82xx_create(struct snd_card *card,
chip->pci = pci;
chip->irq = -1;
- if ((err = pci_request_regions(pci, card->driver)) < 0) {
+ err = pci_request_regions(pci, card->driver);
+ if (err < 0) {
kfree(chip);
pci_disable_device(pci);
return err;
@@ -1109,12 +1119,14 @@ static int snd_via82xx_create(struct snd_card *card,
if (ac97_clock >= 8000 && ac97_clock <= 48000)
chip->ac97_clock = ac97_clock;
- if ((err = snd_via82xx_chip_init(chip)) < 0) {
+ err = snd_via82xx_chip_init(chip);
+ if (err < 0) {
snd_via82xx_free(chip);
return err;
}
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_via82xx_free(chip);
return err;
}
@@ -1154,14 +1166,17 @@ static int snd_via82xx_probe(struct pci_dev *pci,
goto __error;
}
- if ((err = snd_via82xx_create(card, pci, chip_type, pci->revision,
- ac97_clock, &chip)) < 0)
+ err = snd_via82xx_create(card, pci, chip_type, pci->revision,
+ ac97_clock, &chip);
+ if (err < 0)
goto __error;
card->private_data = chip;
- if ((err = snd_via82xx_mixer_new(chip)) < 0)
+ err = snd_via82xx_mixer_new(chip);
+ if (err < 0)
goto __error;
- if ((err = snd_via686_pcm_new(chip)) < 0 )
+ err = snd_via686_pcm_new(chip);
+ if (err < 0)
goto __error;
/* disable interrupts */
@@ -1173,7 +1188,8 @@ static int snd_via82xx_probe(struct pci_dev *pci,
snd_via82xx_proc_init(chip);
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/vx222/vx222.c b/sound/pci/vx222/vx222.c
index 2a9e1a77a81a..04c7204cb7bc 100644
--- a/sound/pci/vx222/vx222.c
+++ b/sound/pci/vx222/vx222.c
@@ -133,7 +133,8 @@ static int snd_vx222_create(struct snd_card *card, struct pci_dev *pci,
const struct snd_vx_ops *vx_ops;
/* enable PCI device */
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
pci_set_master(pci);
@@ -147,7 +148,8 @@ static int snd_vx222_create(struct snd_card *card, struct pci_dev *pci,
vx = to_vx222(chip);
vx->pci = pci;
- if ((err = pci_request_regions(pci, CARD_NAME)) < 0) {
+ err = pci_request_regions(pci, CARD_NAME);
+ if (err < 0) {
snd_vx222_free(chip);
return err;
}
@@ -164,7 +166,8 @@ static int snd_vx222_create(struct snd_card *card, struct pci_dev *pci,
chip->irq = pci->irq;
card->sync_irq = chip->irq;
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
snd_vx222_free(chip);
return err;
}
@@ -207,7 +210,8 @@ static int snd_vx222_probe(struct pci_dev *pci,
hw = &vx222_v2_hw;
break;
}
- if ((err = snd_vx222_create(card, pci, hw, &vx)) < 0) {
+ err = snd_vx222_create(card, pci, hw, &vx);
+ if (err < 0) {
snd_card_free(card);
return err;
}
@@ -223,12 +227,14 @@ static int snd_vx222_probe(struct pci_dev *pci,
vx->core.dev = &pci->dev;
#endif
- if ((err = snd_vx_setup_firmware(&vx->core)) < 0) {
+ err = snd_vx_setup_firmware(&vx->core);
+ if (err < 0) {
snd_card_free(card);
return err;
}
- if ((err = snd_card_register(card)) < 0) {
+ err = snd_card_register(card);
+ if (err < 0) {
snd_card_free(card);
return err;
}
diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c
index a05537202738..3e7e928b24f8 100644
--- a/sound/pci/vx222/vx222_ops.c
+++ b/sound/pci/vx222/vx222_ops.c
@@ -408,9 +408,11 @@ static int vx2_load_dsp(struct vx_core *vx, int index, const struct firmware *ds
switch (index) {
case 1:
/* xilinx image */
- if ((err = vx2_load_xilinx_binary(vx, dsp)) < 0)
+ err = vx2_load_xilinx_binary(vx, dsp);
+ if (err < 0)
return err;
- if ((err = vx2_test_xilinx(vx)) < 0)
+ err = vx2_test_xilinx(vx);
+ if (err < 0)
return err;
return 0;
case 2:
@@ -972,9 +974,11 @@ static int vx2_add_mic_controls(struct vx_core *_chip)
vx2_set_input_level(chip);
/* controls */
- if ((err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_input_level, chip))) < 0)
+ err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_input_level, chip));
+ if (err < 0)
return err;
- if ((err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_mic_level, chip))) < 0)
+ err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_mic_level, chip));
+ if (err < 0)
return err;
return 0;
diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c
index 99be1490ef0e..7e32d57147ff 100644
--- a/sound/pci/ymfpci/ymfpci.c
+++ b/sound/pci/ymfpci/ymfpci.c
@@ -72,7 +72,8 @@ static int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev,
if (io_port == 1) {
/* auto-detect */
- if (!(io_port = pci_resource_start(chip->pci, 2)))
+ io_port = pci_resource_start(chip->pci, 2);
+ if (!io_port)
return -ENODEV;
}
} else {
@@ -81,7 +82,8 @@ static int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev,
for (io_port = 0x201; io_port <= 0x205; io_port++) {
if (io_port == 0x203)
continue;
- if ((r = request_region(io_port, 1, "YMFPCI gameport")) != NULL)
+ r = request_region(io_port, 1, "YMFPCI gameport");
+ if (r)
break;
}
if (!r) {
@@ -102,10 +104,13 @@ static int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev,
}
}
- if (!r && !(r = request_region(io_port, 1, "YMFPCI gameport"))) {
- dev_err(chip->card->dev,
- "joystick port %#x is in use.\n", io_port);
- return -EBUSY;
+ if (!r) {
+ r = request_region(io_port, 1, "YMFPCI gameport");
+ if (!r) {
+ dev_err(chip->card->dev,
+ "joystick port %#x is in use.\n", io_port);
+ return -EBUSY;
+ }
}
chip->gameport = gp = gameport_allocate_port();
@@ -193,8 +198,9 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
/* auto-detect */
fm_port[dev] = pci_resource_start(pci, 1);
}
- if (fm_port[dev] > 0 &&
- (fm_res = request_region(fm_port[dev], 4, "YMFPCI OPL3")) != NULL) {
+ if (fm_port[dev] > 0)
+ fm_res = request_region(fm_port[dev], 4, "YMFPCI OPL3");
+ if (fm_res) {
legacy_ctrl |= YMFPCI_LEGACY_FMEN;
pci_write_config_word(pci, PCIR_DSXG_FMBASE, fm_port[dev]);
}
@@ -202,8 +208,9 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
/* auto-detect */
mpu_port[dev] = pci_resource_start(pci, 1) + 0x20;
}
- if (mpu_port[dev] > 0 &&
- (mpu_res = request_region(mpu_port[dev], 2, "YMFPCI MPU401")) != NULL) {
+ if (mpu_port[dev] > 0)
+ mpu_res = request_region(mpu_port[dev], 2, "YMFPCI MPU401");
+ if (mpu_res) {
legacy_ctrl |= YMFPCI_LEGACY_MEN;
pci_write_config_word(pci, PCIR_DSXG_MPU401BASE, mpu_port[dev]);
}
@@ -215,8 +222,9 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
case 0x3a8: legacy_ctrl2 |= 3; break;
default: fm_port[dev] = 0; break;
}
- if (fm_port[dev] > 0 &&
- (fm_res = request_region(fm_port[dev], 4, "YMFPCI OPL3")) != NULL) {
+ if (fm_port[dev] > 0)
+ fm_res = request_region(fm_port[dev], 4, "YMFPCI OPL3");
+ if (fm_res) {
legacy_ctrl |= YMFPCI_LEGACY_FMEN;
} else {
legacy_ctrl2 &= ~YMFPCI_LEGACY2_FMIO;
@@ -229,8 +237,9 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
case 0x334: legacy_ctrl2 |= 3 << 4; break;
default: mpu_port[dev] = 0; break;
}
- if (mpu_port[dev] > 0 &&
- (mpu_res = request_region(mpu_port[dev], 2, "YMFPCI MPU401")) != NULL) {
+ if (mpu_port[dev] > 0)
+ mpu_res = request_region(mpu_port[dev], 2, "YMFPCI MPU401");
+ if (mpu_res) {
legacy_ctrl |= YMFPCI_LEGACY_MEN;
} else {
legacy_ctrl2 &= ~YMFPCI_LEGACY2_MPUIO;
@@ -244,9 +253,8 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
pci_read_config_word(pci, PCIR_DSXG_LEGACY, &old_legacy_ctrl);
pci_write_config_word(pci, PCIR_DSXG_LEGACY, legacy_ctrl);
pci_write_config_word(pci, PCIR_DSXG_ELEGACY, legacy_ctrl2);
- if ((err = snd_ymfpci_create(card, pci,
- old_legacy_ctrl,
- &chip)) < 0) {
+ err = snd_ymfpci_create(card, pci, old_legacy_ctrl, &chip);
+ if (err < 0) {
release_and_free_resource(mpu_res);
release_and_free_resource(fm_res);
goto free_card;
@@ -287,11 +295,12 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
goto free_card;
if (chip->mpu_res) {
- if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_YMFPCI,
- mpu_port[dev],
- MPU401_INFO_INTEGRATED |
- MPU401_INFO_IRQ_HOOK,
- -1, &chip->rawmidi)) < 0) {
+ err = snd_mpu401_uart_new(card, 0, MPU401_HW_YMFPCI,
+ mpu_port[dev],
+ MPU401_INFO_INTEGRATED |
+ MPU401_INFO_IRQ_HOOK,
+ -1, &chip->rawmidi);
+ if (err < 0) {
dev_warn(card->dev,
"cannot initialize MPU401 at 0x%lx, skipping...\n",
mpu_port[dev]);
@@ -300,18 +309,22 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
}
}
if (chip->fm_res) {
- if ((err = snd_opl3_create(card,
- fm_port[dev],
- fm_port[dev] + 2,
- OPL3_HW_OPL3, 1, &opl3)) < 0) {
+ err = snd_opl3_create(card,
+ fm_port[dev],
+ fm_port[dev] + 2,
+ OPL3_HW_OPL3, 1, &opl3);
+ if (err < 0) {
dev_warn(card->dev,
"cannot initialize FM OPL3 at 0x%lx, skipping...\n",
fm_port[dev]);
legacy_ctrl &= ~YMFPCI_LEGACY_FMEN;
pci_write_config_word(pci, PCIR_DSXG_LEGACY, legacy_ctrl);
- } else if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
- dev_err(card->dev, "cannot create opl3 hwdep\n");
- goto free_card;
+ } else {
+ err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
+ if (err < 0) {
+ dev_err(card->dev, "cannot create opl3 hwdep\n");
+ goto free_card;
+ }
}
}
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index cacc6a9d14c8..8fd060769882 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -292,7 +292,8 @@ static void snd_ymfpci_pcm_interrupt(struct snd_ymfpci *chip, struct snd_ymfpci_
struct snd_ymfpci_pcm *ypcm;
u32 pos, delta;
- if ((ypcm = voice->ypcm) == NULL)
+ ypcm = voice->ypcm;
+ if (!ypcm)
return;
if (ypcm->substream == NULL)
return;
@@ -628,7 +629,8 @@ static int snd_ymfpci_playback_hw_params(struct snd_pcm_substream *substream,
struct snd_ymfpci_pcm *ypcm = runtime->private_data;
int err;
- if ((err = snd_ymfpci_pcm_voice_alloc(ypcm, params_channels(hw_params))) < 0)
+ err = snd_ymfpci_pcm_voice_alloc(ypcm, params_channels(hw_params));
+ if (err < 0)
return err;
return 0;
}
@@ -932,7 +934,8 @@ static int snd_ymfpci_playback_open(struct snd_pcm_substream *substream)
struct snd_ymfpci_pcm *ypcm;
int err;
- if ((err = snd_ymfpci_playback_open_1(substream)) < 0)
+ err = snd_ymfpci_playback_open_1(substream);
+ if (err < 0)
return err;
ypcm = runtime->private_data;
ypcm->output_front = 1;
@@ -954,7 +957,8 @@ static int snd_ymfpci_playback_spdif_open(struct snd_pcm_substream *substream)
struct snd_ymfpci_pcm *ypcm;
int err;
- if ((err = snd_ymfpci_playback_open_1(substream)) < 0)
+ err = snd_ymfpci_playback_open_1(substream);
+ if (err < 0)
return err;
ypcm = runtime->private_data;
ypcm->output_front = 0;
@@ -982,7 +986,8 @@ static int snd_ymfpci_playback_4ch_open(struct snd_pcm_substream *substream)
struct snd_ymfpci_pcm *ypcm;
int err;
- if ((err = snd_ymfpci_playback_open_1(substream)) < 0)
+ err = snd_ymfpci_playback_open_1(substream);
+ if (err < 0)
return err;
ypcm = runtime->private_data;
ypcm->output_front = 0;
@@ -1124,7 +1129,8 @@ int snd_ymfpci_pcm(struct snd_ymfpci *chip, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(chip->card, "YMFPCI", device, 32, 1, &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "YMFPCI", device, 32, 1, &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
@@ -1157,7 +1163,8 @@ int snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(chip->card, "YMFPCI - PCM2", device, 0, 1, &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "YMFPCI - PCM2", device, 0, 1, &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
@@ -1190,7 +1197,8 @@ int snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(chip->card, "YMFPCI - IEC958", device, 1, 0, &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "YMFPCI - IEC958", device, 1, 0, &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
@@ -1230,7 +1238,8 @@ int snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(chip->card, "YMFPCI - Rear", device, 1, 0, &pcm)) < 0)
+ err = snd_pcm_new(chip->card, "YMFPCI - Rear", device, 1, 0, &pcm);
+ if (err < 0)
return err;
pcm->private_data = chip;
@@ -1785,7 +1794,8 @@ int snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch)
.read = snd_ymfpci_codec_read,
};
- if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0)
+ err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus);
+ if (err < 0)
return err;
chip->ac97_bus->private_free = snd_ymfpci_mixer_free_ac97_bus;
chip->ac97_bus->no_vra = 1; /* YMFPCI doesn't need VRA */
@@ -1793,7 +1803,8 @@ int snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch)
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = chip;
ac97.private_free = snd_ymfpci_mixer_free_ac97;
- if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0)
+ err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97);
+ if (err < 0)
return err;
/* to be sure */
@@ -1801,7 +1812,8 @@ int snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch)
AC97_EA_VRA|AC97_EA_VRM, 0);
for (idx = 0; idx < ARRAY_SIZE(snd_ymfpci_controls); idx++) {
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_controls[idx], chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_controls[idx], chip));
+ if (err < 0)
return err;
}
if (chip->ac97->ext_id & AC97_EI_SDAC) {
@@ -1814,27 +1826,37 @@ int snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch)
/* add S/PDIF control */
if (snd_BUG_ON(!chip->pcm_spdif))
return -ENXIO;
- if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_default, chip))) < 0)
+ kctl = snd_ctl_new1(&snd_ymfpci_spdif_default, chip);
+ err = snd_ctl_add(chip->card, kctl);
+ if (err < 0)
return err;
kctl->id.device = chip->pcm_spdif->device;
- if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_mask, chip))) < 0)
+ kctl = snd_ctl_new1(&snd_ymfpci_spdif_mask, chip);
+ err = snd_ctl_add(chip->card, kctl);
+ if (err < 0)
return err;
kctl->id.device = chip->pcm_spdif->device;
- if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_stream, chip))) < 0)
+ kctl = snd_ctl_new1(&snd_ymfpci_spdif_stream, chip);
+ err = snd_ctl_add(chip->card, kctl);
+ if (err < 0)
return err;
kctl->id.device = chip->pcm_spdif->device;
chip->spdif_pcm_ctl = kctl;
/* direct recording source */
- if (chip->device_id == PCI_DEVICE_ID_YAMAHA_754 &&
- (err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_drec_source, chip))) < 0)
- return err;
+ if (chip->device_id == PCI_DEVICE_ID_YAMAHA_754) {
+ kctl = snd_ctl_new1(&snd_ymfpci_drec_source, chip);
+ err = snd_ctl_add(chip->card, kctl);
+ if (err < 0)
+ return err;
+ }
/*
* shared rear/line-in
*/
if (rear_switch) {
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_rear_shared, chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_rear_shared, chip));
+ if (err < 0)
return err;
}
@@ -1847,7 +1869,8 @@ int snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch)
kctl->id.device = chip->pcm->device;
kctl->id.subdevice = idx;
kctl->private_value = (unsigned long)substream;
- if ((err = snd_ctl_add(chip->card, kctl)) < 0)
+ err = snd_ctl_add(chip->card, kctl);
+ if (err < 0)
return err;
chip->pcm_mixer[idx].left = 0x8000;
chip->pcm_mixer[idx].right = 0x8000;
@@ -1928,7 +1951,8 @@ int snd_ymfpci_timer(struct snd_ymfpci *chip, int device)
tid.card = chip->card->number;
tid.device = device;
tid.subdevice = 0;
- if ((err = snd_timer_new(chip->card, "YMFPCI", &tid, &timer)) >= 0) {
+ err = snd_timer_new(chip->card, "YMFPCI", &tid, &timer);
+ if (err >= 0) {
strcpy(timer->name, "YMFPCI timer");
timer->private_data = chip;
timer->hw = snd_ymfpci_timer_hw;
@@ -2334,7 +2358,8 @@ int snd_ymfpci_create(struct snd_card *card,
*rchip = NULL;
/* enable PCI device */
- if ((err = pci_enable_device(pci)) < 0)
+ err = pci_enable_device(pci);
+ if (err < 0)
return err;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
@@ -2357,7 +2382,8 @@ int snd_ymfpci_create(struct snd_card *card,
pci_set_master(pci);
chip->src441_used = -1;
- if ((chip->res_reg_area = request_mem_region(chip->reg_area_phys, 0x8000, "YMFPCI")) == NULL) {
+ chip->res_reg_area = request_mem_region(chip->reg_area_phys, 0x8000, "YMFPCI");
+ if (!chip->res_reg_area) {
dev_err(chip->card->dev,
"unable to grab memory region 0x%lx-0x%lx\n",
chip->reg_area_phys, chip->reg_area_phys + 0x8000 - 1);
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index 144582350a05..8363ec08df5d 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -170,7 +170,8 @@ static int snd_pdacf_assign_resources(struct snd_pdacf *pdacf, int port, int irq
if (err < 0)
return err;
- if ((err = snd_card_register(card)) < 0)
+ err = snd_card_register(card);
+ if (err < 0)
return err;
return 0;
diff --git a/sound/pcmcia/vx/vxp_mixer.c b/sound/pcmcia/vx/vxp_mixer.c
index 0f59e4cca56d..bc2114475810 100644
--- a/sound/pcmcia/vx/vxp_mixer.c
+++ b/sound/pcmcia/vx/vxp_mixer.c
@@ -124,11 +124,13 @@ int vxp_add_mic_controls(struct vx_core *_chip)
/* mic level */
switch (_chip->type) {
case VX_TYPE_VXPOCKET:
- if ((err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_mic_level, chip))) < 0)
+ err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_mic_level, chip));
+ if (err < 0)
return err;
break;
case VX_TYPE_VXP440:
- if ((err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_mic_boost, chip))) < 0)
+ err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_mic_boost, chip));
+ if (err < 0)
return err;
break;
}
diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c
index 45eeb0f57d59..4176abd73799 100644
--- a/sound/pcmcia/vx/vxp_ops.c
+++ b/sound/pcmcia/vx/vxp_ops.c
@@ -237,9 +237,11 @@ static int vxp_load_dsp(struct vx_core *vx, int index, const struct firmware *fw
switch (index) {
case 0:
/* xilinx boot */
- if ((err = vx_check_magic(vx)) < 0)
+ err = vx_check_magic(vx);
+ if (err < 0)
return err;
- if ((err = snd_vx_load_boot_image(vx, fw)) < 0)
+ err = snd_vx_load_boot_image(vx, fw);
+ if (err < 0)
return err;
return 0;
case 1:
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 636320489805..0dfb6a943b60 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -183,7 +183,8 @@ static int snd_vxpocket_assign_resources(struct vx_core *chip, int port, int irq
chip->irq = irq;
card->sync_irq = chip->irq;
- if ((err = snd_vx_setup_firmware(chip)) < 0)
+ err = snd_vx_setup_firmware(chip);
+ if (err < 0)
return err;
return 0;
diff --git a/sound/ppc/beep.c b/sound/ppc/beep.c
index 6bc586a5db0f..0f4bce1c0d4f 100644
--- a/sound/ppc/beep.c
+++ b/sound/ppc/beep.c
@@ -105,7 +105,10 @@ static int snd_pmac_beep_event(struct input_dev *dev, unsigned int type,
}
chip = input_get_drvdata(dev);
- if (! chip || (beep = chip->beep) == NULL)
+ if (!chip)
+ return -1;
+ beep = chip->beep;
+ if (!beep)
return -1;
if (! hz) {
diff --git a/sound/ppc/daca.c b/sound/ppc/daca.c
index 1eb698dafd93..4da9278dd58a 100644
--- a/sound/ppc/daca.c
+++ b/sound/ppc/daca.c
@@ -84,7 +84,8 @@ static int daca_get_deemphasis(struct snd_kcontrol *kcontrol,
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
struct pmac_daca *mix;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
ucontrol->value.integer.value[0] = mix->deemphasis ? 1 : 0;
return 0;
@@ -97,7 +98,8 @@ static int daca_put_deemphasis(struct snd_kcontrol *kcontrol,
struct pmac_daca *mix;
int change;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
change = mix->deemphasis != ucontrol->value.integer.value[0];
if (change) {
@@ -123,7 +125,8 @@ static int daca_get_volume(struct snd_kcontrol *kcontrol,
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
struct pmac_daca *mix;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
ucontrol->value.integer.value[0] = mix->left_vol;
ucontrol->value.integer.value[1] = mix->right_vol;
@@ -138,7 +141,8 @@ static int daca_put_volume(struct snd_kcontrol *kcontrol,
unsigned int vol[2];
int change;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
vol[0] = ucontrol->value.integer.value[0];
vol[1] = ucontrol->value.integer.value[1];
@@ -162,7 +166,8 @@ static int daca_get_amp(struct snd_kcontrol *kcontrol,
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
struct pmac_daca *mix;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
ucontrol->value.integer.value[0] = mix->amp_on ? 1 : 0;
return 0;
@@ -175,7 +180,8 @@ static int daca_put_amp(struct snd_kcontrol *kcontrol,
struct pmac_daca *mix;
int change;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
change = mix->amp_on != ucontrol->value.integer.value[0];
if (change) {
@@ -248,7 +254,8 @@ int snd_pmac_daca_init(struct snd_pmac *chip)
mix->i2c.addr = DACA_I2C_ADDR;
mix->i2c.init_client = daca_init_client;
mix->i2c.name = "DACA";
- if ((err = snd_pmac_keywest_init(&mix->i2c)) < 0)
+ err = snd_pmac_keywest_init(&mix->i2c);
+ if (err < 0)
return err;
/*
@@ -257,7 +264,8 @@ int snd_pmac_daca_init(struct snd_pmac *chip)
strcpy(chip->card->mixername, "PowerMac DACA");
for (i = 0; i < ARRAY_SIZE(daca_mixers); i++) {
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&daca_mixers[i], chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&daca_mixers[i], chip));
+ if (err < 0)
return err;
}
diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c
index a8915100d6bb..6e5daae18f9d 100644
--- a/sound/ppc/keywest.c
+++ b/sound/ppc/keywest.c
@@ -114,7 +114,8 @@ int snd_pmac_tumbler_post_init(void)
if (!keywest_ctx || !keywest_ctx->client)
return -ENXIO;
- if ((err = keywest_ctx->init_client(keywest_ctx)) < 0) {
+ err = keywest_ctx->init_client(keywest_ctx);
+ if (err < 0) {
snd_printk(KERN_ERR "tumbler: %i :cannot initialize the MCS\n", err);
return err;
}
@@ -136,7 +137,8 @@ int snd_pmac_keywest_init(struct pmac_keywest *i2c)
keywest_ctx = i2c;
- if ((err = i2c_add_driver(&keywest_driver))) {
+ err = i2c_add_driver(&keywest_driver);
+ if (err) {
snd_printk(KERN_ERR "cannot register keywest i2c driver\n");
i2c_put_adapter(adap);
return err;
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index 2e750b317be1..84058bbf9d12 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -1160,7 +1160,8 @@ int snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
chip->playback.stream = SNDRV_PCM_STREAM_PLAYBACK;
chip->capture.stream = SNDRV_PCM_STREAM_CAPTURE;
- if ((err = snd_pmac_detect(chip)) < 0)
+ err = snd_pmac_detect(chip);
+ if (err < 0)
goto __error;
if (snd_pmac_dbdma_alloc(chip, &chip->playback.cmd, PMAC_MAX_FRAGS + 1) < 0 ||
@@ -1299,7 +1300,8 @@ int snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
/* Reset dbdma channels */
snd_pmac_dbdma_reset(chip);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0)
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0)
goto __error;
*chip_return = chip;
diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c
index 9fb51ebafde1..db414b61157e 100644
--- a/sound/ppc/powermac.c
+++ b/sound/ppc/powermac.c
@@ -48,7 +48,8 @@ static int snd_pmac_probe(struct platform_device *devptr)
if (err < 0)
return err;
- if ((err = snd_pmac_new(card, &chip)) < 0)
+ err = snd_pmac_new(card, &chip);
+ if (err < 0)
goto __error;
card->private_data = chip;
@@ -58,7 +59,8 @@ static int snd_pmac_probe(struct platform_device *devptr)
strcpy(card->shortname, "PowerMac Burgundy");
sprintf(card->longname, "%s (Dev %d) Sub-frame %d",
card->shortname, chip->device_id, chip->subframe);
- if ((err = snd_pmac_burgundy_init(chip)) < 0)
+ err = snd_pmac_burgundy_init(chip);
+ if (err < 0)
goto __error;
break;
case PMAC_DACA:
@@ -66,7 +68,8 @@ static int snd_pmac_probe(struct platform_device *devptr)
strcpy(card->shortname, "PowerMac DACA");
sprintf(card->longname, "%s (Dev %d) Sub-frame %d",
card->shortname, chip->device_id, chip->subframe);
- if ((err = snd_pmac_daca_init(chip)) < 0)
+ err = snd_pmac_daca_init(chip);
+ if (err < 0)
goto __error;
break;
case PMAC_TUMBLER:
@@ -76,7 +79,11 @@ static int snd_pmac_probe(struct platform_device *devptr)
sprintf(card->shortname, "PowerMac %s", name_ext);
sprintf(card->longname, "%s (Dev %d) Sub-frame %d",
card->shortname, chip->device_id, chip->subframe);
- if ( snd_pmac_tumbler_init(chip) < 0 || snd_pmac_tumbler_post_init() < 0)
+ err = snd_pmac_tumbler_init(chip);
+ if (err < 0)
+ goto __error;
+ err = snd_pmac_tumbler_post_init();
+ if (err < 0)
goto __error;
break;
case PMAC_AWACS:
@@ -92,7 +99,8 @@ static int snd_pmac_probe(struct platform_device *devptr)
name_ext = "";
sprintf(card->longname, "%s%s Rev %d",
card->shortname, name_ext, chip->revision);
- if ((err = snd_pmac_awacs_init(chip)) < 0)
+ err = snd_pmac_awacs_init(chip);
+ if (err < 0)
goto __error;
break;
default:
@@ -101,14 +109,16 @@ static int snd_pmac_probe(struct platform_device *devptr)
goto __error;
}
- if ((err = snd_pmac_pcm_new(chip)) < 0)
+ err = snd_pmac_pcm_new(chip);
+ if (err < 0)
goto __error;
chip->initialized = 1;
if (enable_beep)
snd_pmac_attach_beep(chip);
- if ((err = snd_card_register(card)) < 0)
+ err = snd_card_register(card);
+ if (err < 0)
goto __error;
platform_set_drvdata(devptr, card);
@@ -162,7 +172,8 @@ static int __init alsa_card_pmac_init(void)
{
int err;
- if ((err = platform_driver_register(&snd_pmac_driver)) < 0)
+ err = platform_driver_register(&snd_pmac_driver);
+ if (err < 0)
return err;
device = platform_device_register_simple(SND_PMAC_DRIVER, -1, NULL, 0);
return 0;
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 6e5bdaa262b0..c65e74d7cd0a 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -402,7 +402,8 @@ static int tumbler_get_drc_value(struct snd_kcontrol *kcontrol,
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
struct pmac_tumbler *mix;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
ucontrol->value.integer.value[0] = mix->drc_range;
return 0;
@@ -416,7 +417,8 @@ static int tumbler_put_drc_value(struct snd_kcontrol *kcontrol,
unsigned int val;
int change;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
val = ucontrol->value.integer.value[0];
if (chip->model == PMAC_TUMBLER) {
@@ -442,7 +444,8 @@ static int tumbler_get_drc_switch(struct snd_kcontrol *kcontrol,
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
struct pmac_tumbler *mix;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
ucontrol->value.integer.value[0] = mix->drc_enable;
return 0;
@@ -455,7 +458,8 @@ static int tumbler_put_drc_switch(struct snd_kcontrol *kcontrol,
struct pmac_tumbler *mix;
int change;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
change = mix->drc_enable != ucontrol->value.integer.value[0];
if (change) {
@@ -524,7 +528,8 @@ static int tumbler_get_mono(struct snd_kcontrol *kcontrol,
struct tumbler_mono_vol *info = (struct tumbler_mono_vol *)kcontrol->private_value;
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
struct pmac_tumbler *mix;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
ucontrol->value.integer.value[0] = mix->mono_vol[info->index];
return 0;
@@ -539,7 +544,8 @@ static int tumbler_put_mono(struct snd_kcontrol *kcontrol,
unsigned int vol;
int change;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
vol = ucontrol->value.integer.value[0];
if (vol >= info->max)
@@ -669,7 +675,8 @@ static int snapper_get_mix(struct snd_kcontrol *kcontrol,
int idx = (int)kcontrol->private_value;
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
struct pmac_tumbler *mix;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
ucontrol->value.integer.value[0] = mix->mix_vol[idx][0];
ucontrol->value.integer.value[1] = mix->mix_vol[idx][1];
@@ -685,7 +692,8 @@ static int snapper_put_mix(struct snd_kcontrol *kcontrol,
unsigned int vol[2];
int change;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
vol[0] = ucontrol->value.integer.value[0];
vol[1] = ucontrol->value.integer.value[1];
@@ -716,7 +724,8 @@ static int tumbler_get_mute_switch(struct snd_kcontrol *kcontrol,
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
struct pmac_tumbler *mix;
struct pmac_gpio *gp;
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
switch(kcontrol->private_value) {
case TUMBLER_MUTE_HP:
@@ -745,7 +754,8 @@ static int tumbler_put_mute_switch(struct snd_kcontrol *kcontrol,
if (chip->update_automute && chip->auto_mute)
return 0; /* don't touch in the auto-mute mode */
#endif
- if (! (mix = chip->mixer_data))
+ mix = chip->mixer_data;
+ if (!mix)
return -ENODEV;
switch(kcontrol->private_value) {
case TUMBLER_MUTE_HP:
@@ -1361,7 +1371,8 @@ int snd_pmac_tumbler_init(struct snd_pmac *chip)
break;
}
}
- if ((err = tumbler_init(chip)) < 0)
+ err = tumbler_init(chip);
+ if (err < 0)
return err;
/* set up TAS */
@@ -1392,7 +1403,8 @@ int snd_pmac_tumbler_init(struct snd_pmac *chip)
chipname = "Snapper";
}
- if ((err = snd_pmac_keywest_init(&mix->i2c)) < 0)
+ err = snd_pmac_keywest_init(&mix->i2c);
+ if (err < 0)
return err;
/*
@@ -1402,28 +1414,34 @@ int snd_pmac_tumbler_init(struct snd_pmac *chip)
if (chip->model == PMAC_TUMBLER) {
for (i = 0; i < ARRAY_SIZE(tumbler_mixers); i++) {
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&tumbler_mixers[i], chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&tumbler_mixers[i], chip));
+ if (err < 0)
return err;
}
} else {
for (i = 0; i < ARRAY_SIZE(snapper_mixers); i++) {
- if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snapper_mixers[i], chip))) < 0)
+ err = snd_ctl_add(chip->card, snd_ctl_new1(&snapper_mixers[i], chip));
+ if (err < 0)
return err;
}
}
chip->master_sw_ctl = snd_ctl_new1(&tumbler_hp_sw, chip);
- if ((err = snd_ctl_add(chip->card, chip->master_sw_ctl)) < 0)
+ err = snd_ctl_add(chip->card, chip->master_sw_ctl);
+ if (err < 0)
return err;
chip->speaker_sw_ctl = snd_ctl_new1(&tumbler_speaker_sw, chip);
- if ((err = snd_ctl_add(chip->card, chip->speaker_sw_ctl)) < 0)
+ err = snd_ctl_add(chip->card, chip->speaker_sw_ctl);
+ if (err < 0)
return err;
if (mix->line_mute.addr != 0) {
chip->lineout_sw_ctl = snd_ctl_new1(&tumbler_lineout_sw, chip);
- if ((err = snd_ctl_add(chip->card, chip->lineout_sw_ctl)) < 0)
+ err = snd_ctl_add(chip->card, chip->lineout_sw_ctl);
+ if (err < 0)
return err;
}
chip->drc_sw_ctl = snd_ctl_new1(&tumbler_drc_sw, chip);
- if ((err = snd_ctl_add(chip->card, chip->drc_sw_ctl)) < 0)
+ err = snd_ctl_add(chip->card, chip->drc_sw_ctl);
+ if (err < 0)
return err;
/* set initial DRC range to 60% */
@@ -1446,9 +1464,11 @@ int snd_pmac_tumbler_init(struct snd_pmac *chip)
device_change_chip = chip;
#ifdef PMAC_SUPPORT_AUTOMUTE
- if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0)
- && (err = snd_pmac_add_automute(chip)) < 0)
- return err;
+ if (mix->headphone_irq >= 0 || mix->lineout_irq >= 0) {
+ err = snd_pmac_add_automute(chip);
+ if (err < 0)
+ return err;
+ }
chip->detect_headphone = tumbler_detect_headphone;
chip->update_automute = tumbler_update_automute;
tumbler_update_automute(chip, 0); /* update the status only */
@@ -1456,8 +1476,9 @@ int snd_pmac_tumbler_init(struct snd_pmac *chip)
/* activate headphone status interrupts */
if (mix->headphone_irq >= 0) {
unsigned char val;
- if ((err = request_irq(mix->headphone_irq, headphone_intr, 0,
- "Sound Headphone Detection", chip)) < 0)
+ err = request_irq(mix->headphone_irq, headphone_intr, 0,
+ "Sound Headphone Detection", chip);
+ if (err < 0)
return 0;
/* activate headphone status interrupts */
val = do_gpio_read(&mix->hp_detect);
@@ -1465,8 +1486,9 @@ int snd_pmac_tumbler_init(struct snd_pmac *chip)
}
if (mix->lineout_irq >= 0) {
unsigned char val;
- if ((err = request_irq(mix->lineout_irq, headphone_intr, 0,
- "Sound Lineout Detection", chip)) < 0)
+ err = request_irq(mix->lineout_irq, headphone_intr, 0,
+ "Sound Lineout Detection", chip);
+ if (err < 0)
return 0;
/* activate headphone status interrupts */
val = do_gpio_read(&mix->line_detect);
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 8a13462e1a63..5dcf77af07af 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -36,6 +36,7 @@ config SND_SOC_COMPRESS
config SND_SOC_TOPOLOGY
bool
+ select SND_DYNAMIC_MINORS
config SND_SOC_TOPOLOGY_KUNIT_TEST
tristate "KUnit tests for SoC topology"
diff --git a/sound/soc/adi/axi-i2s.c b/sound/soc/adi/axi-i2s.c
index aa082131fb90..1289cb4e2988 100644
--- a/sound/soc/adi/axi-i2s.c
+++ b/sound/soc/adi/axi-i2s.c
@@ -198,8 +198,7 @@ static int axi_i2s_probe(struct platform_device *pdev)
axi_i2s_parse_of(i2s, pdev->dev.of_node);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/sound/soc/adi/axi-spdif.c b/sound/soc/adi/axi-spdif.c
index 9b3d81c41c8c..8d4a6cb4e5c5 100644
--- a/sound/soc/adi/axi-spdif.c
+++ b/sound/soc/adi/axi-spdif.c
@@ -189,8 +189,7 @@ static int axi_spdif_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, spdif);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c
index 84e3906abd4f..3c60c5f96dcb 100644
--- a/sound/soc/amd/acp-da7219-max98357a.c
+++ b/sound/soc/amd/acp-da7219-max98357a.c
@@ -525,6 +525,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
| SND_SOC_DAIFMT_CBM_CFM,
.init = cz_da7219_init,
.dpcm_playback = 1,
+ .stop_dma_first = 1,
.ops = &cz_da7219_play_ops,
SND_SOC_DAILINK_REG(designware1, dlgs, platform),
},
@@ -534,6 +535,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_capture = 1,
+ .stop_dma_first = 1,
.ops = &cz_da7219_cap_ops,
SND_SOC_DAILINK_REG(designware2, dlgs, platform),
},
@@ -543,6 +545,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_playback = 1,
+ .stop_dma_first = 1,
.ops = &cz_max_play_ops,
SND_SOC_DAILINK_REG(designware3, mx, platform),
},
@@ -553,6 +556,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_capture = 1,
+ .stop_dma_first = 1,
.ops = &cz_dmic0_cap_ops,
SND_SOC_DAILINK_REG(designware3, adau, platform),
},
@@ -563,6 +567,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_capture = 1,
+ .stop_dma_first = 1,
.ops = &cz_dmic1_cap_ops,
SND_SOC_DAILINK_REG(designware2, adau, platform),
},
@@ -576,6 +581,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
| SND_SOC_DAIFMT_CBM_CFM,
.init = cz_rt5682_init,
.dpcm_playback = 1,
+ .stop_dma_first = 1,
.ops = &cz_rt5682_play_ops,
SND_SOC_DAILINK_REG(designware1, rt5682, platform),
},
@@ -585,6 +591,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_capture = 1,
+ .stop_dma_first = 1,
.ops = &cz_rt5682_cap_ops,
SND_SOC_DAILINK_REG(designware2, rt5682, platform),
},
@@ -594,6 +601,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_playback = 1,
+ .stop_dma_first = 1,
.ops = &cz_rt5682_max_play_ops,
SND_SOC_DAILINK_REG(designware3, mx, platform),
},
@@ -604,6 +612,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_capture = 1,
+ .stop_dma_first = 1,
.ops = &cz_rt5682_dmic0_cap_ops,
SND_SOC_DAILINK_REG(designware3, adau, platform),
},
@@ -614,6 +623,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_capture = 1,
+ .stop_dma_first = 1,
.ops = &cz_rt5682_dmic1_cap_ops,
SND_SOC_DAILINK_REG(designware2, adau, platform),
},
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 143155a840ac..cc1ce6f22caa 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -969,7 +969,7 @@ static int acp_dma_hw_params(struct snd_soc_component *component,
acp_set_sram_bank_state(rtd->acp_mmio, 0, true);
/* Save for runtime private data */
- rtd->dma_addr = substream->dma_buffer.addr;
+ rtd->dma_addr = runtime->dma_addr;
rtd->order = get_order(size);
/* Fill the page table entries in ACP SRAM */
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index 8148b0d22e88..597d7c4b2a6b 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -286,7 +286,7 @@ static int acp3x_dma_hw_params(struct snd_soc_component *component,
pr_err("pinfo failed\n");
}
size = params_buffer_bytes(params);
- rtd->dma_addr = substream->dma_buffer.addr;
+ rtd->dma_addr = substream->runtime->dma_addr;
rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
config_acp3x_dma(rtd, substream->stream);
return 0;
diff --git a/sound/soc/amd/renoir/acp3x-pdm-dma.c b/sound/soc/amd/renoir/acp3x-pdm-dma.c
index 4c2810e58dce..0391c28dd078 100644
--- a/sound/soc/amd/renoir/acp3x-pdm-dma.c
+++ b/sound/soc/amd/renoir/acp3x-pdm-dma.c
@@ -77,7 +77,6 @@ static void enable_pdm_clock(void __iomem *acp_base)
u32 pdm_clk_enable, pdm_ctrl;
pdm_clk_enable = ACP_PDM_CLK_FREQ_MASK;
- pdm_ctrl = 0x00;
rn_writel(pdm_clk_enable, acp_base + ACP_WOV_CLK_CTRL);
pdm_ctrl = rn_readl(acp_base + ACP_WOV_MISC_CTRL);
@@ -144,9 +143,6 @@ static int stop_pdm_dma(void __iomem *acp_base)
u32 pdm_enable, pdm_dma_enable;
int timeout;
- pdm_enable = 0x00;
- pdm_dma_enable = 0x00;
-
pdm_enable = rn_readl(acp_base + ACP_WOV_PDM_ENABLE);
pdm_dma_enable = rn_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
if (pdm_dma_enable & 0x01) {
@@ -246,7 +242,7 @@ static int acp_pdm_dma_hw_params(struct snd_soc_component *component,
return -EINVAL;
size = params_buffer_bytes(params);
period_bytes = params_period_bytes(params);
- rtd->dma_addr = substream->dma_buffer.addr;
+ rtd->dma_addr = substream->runtime->dma_addr;
rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
config_acp_dma(rtd, substream->stream);
init_pdm_ring_buffer(MEM_WINDOW_START, size, period_bytes,
diff --git a/sound/soc/amd/renoir/rn-pci-acp3x.c b/sound/soc/amd/renoir/rn-pci-acp3x.c
index 19438da5dfa5..7b8040e812a1 100644
--- a/sound/soc/amd/renoir/rn-pci-acp3x.c
+++ b/sound/soc/amd/renoir/rn-pci-acp3x.c
@@ -382,6 +382,8 @@ static const struct dev_pm_ops rn_acp_pm = {
.runtime_resume = snd_rn_acp_resume,
.suspend = snd_rn_acp_suspend,
.resume = snd_rn_acp_resume,
+ .restore = snd_rn_acp_resume,
+ .poweroff = snd_rn_acp_suspend,
};
static void snd_rn_acp_remove(struct pci_dev *pci)
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 6023369e0f1a..a9f9f449c48c 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -558,8 +558,7 @@ static int atmel_classd_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- io_base = devm_ioremap_resource(dev, res);
+ io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
diff --git a/sound/soc/atmel/atmel-i2s.c b/sound/soc/atmel/atmel-i2s.c
index 584656cc7d3c..6b3d9c05eaf2 100644
--- a/sound/soc/atmel/atmel-i2s.c
+++ b/sound/soc/atmel/atmel-i2s.c
@@ -200,6 +200,7 @@ struct atmel_i2s_dev {
unsigned int fmt;
const struct atmel_i2s_gck_param *gck_param;
const struct atmel_i2s_caps *caps;
+ int clk_use_no;
};
static irqreturn_t atmel_i2s_interrupt(int irq, void *dev_id)
@@ -321,9 +322,16 @@ static int atmel_i2s_hw_params(struct snd_pcm_substream *substream,
{
struct atmel_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
bool is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
- unsigned int mr = 0;
+ unsigned int mr = 0, mr_mask;
int ret;
+ mr_mask = ATMEL_I2SC_MR_FORMAT_MASK | ATMEL_I2SC_MR_MODE_MASK |
+ ATMEL_I2SC_MR_DATALENGTH_MASK;
+ if (is_playback)
+ mr_mask |= ATMEL_I2SC_MR_TXMONO;
+ else
+ mr_mask |= ATMEL_I2SC_MR_RXMONO;
+
switch (dev->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
mr |= ATMEL_I2SC_MR_FORMAT_I2S;
@@ -402,7 +410,7 @@ static int atmel_i2s_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- return regmap_write(dev->regmap, ATMEL_I2SC_MR, mr);
+ return regmap_update_bits(dev->regmap, ATMEL_I2SC_MR, mr_mask, mr);
}
static int atmel_i2s_switch_mck_generator(struct atmel_i2s_dev *dev,
@@ -495,18 +503,28 @@ static int atmel_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
is_master = (mr & ATMEL_I2SC_MR_MODE_MASK) == ATMEL_I2SC_MR_MODE_MASTER;
/* If master starts, enable the audio clock. */
- if (is_master && mck_enabled)
- err = atmel_i2s_switch_mck_generator(dev, true);
- if (err)
- return err;
+ if (is_master && mck_enabled) {
+ if (!dev->clk_use_no) {
+ err = atmel_i2s_switch_mck_generator(dev, true);
+ if (err)
+ return err;
+ }
+ dev->clk_use_no++;
+ }
err = regmap_write(dev->regmap, ATMEL_I2SC_CR, cr);
if (err)
return err;
/* If master stops, disable the audio clock. */
- if (is_master && !mck_enabled)
- err = atmel_i2s_switch_mck_generator(dev, false);
+ if (is_master && !mck_enabled) {
+ if (dev->clk_use_no == 1) {
+ err = atmel_i2s_switch_mck_generator(dev, false);
+ if (err)
+ return err;
+ }
+ dev->clk_use_no--;
+ }
return err;
}
@@ -542,6 +560,7 @@ static struct snd_soc_dai_driver atmel_i2s_dai = {
},
.ops = &atmel_i2s_dai_ops,
.symmetric_rate = 1,
+ .symmetric_sample_bits = 1,
};
static const struct snd_soc_component_driver atmel_i2s_component = {
@@ -610,8 +629,7 @@ static int atmel_i2s_probe(struct platform_device *pdev)
dev->caps = match->data;
/* Map I/O registers. */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, mem);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/sound/soc/atmel/atmel-pdmic.c b/sound/soc/atmel/atmel-pdmic.c
index 8e1d8230b180..42117de299e7 100644
--- a/sound/soc/atmel/atmel-pdmic.c
+++ b/sound/soc/atmel/atmel-pdmic.c
@@ -620,8 +620,7 @@ static int atmel_pdmic_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- io_base = devm_ioremap_resource(dev, res);
+ io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
diff --git a/sound/soc/atmel/mchp-i2s-mcc.c b/sound/soc/atmel/mchp-i2s-mcc.c
index 673bc16cb46a..8988f024a732 100644
--- a/sound/soc/atmel/mchp-i2s-mcc.c
+++ b/sound/soc/atmel/mchp-i2s-mcc.c
@@ -1008,8 +1008,7 @@ static int mchp_i2s_mcc_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, mem);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/sound/soc/atmel/mikroe-proto.c b/sound/soc/atmel/mikroe-proto.c
index f9a85fd01b79..0be7b4221c14 100644
--- a/sound/soc/atmel/mikroe-proto.c
+++ b/sound/soc/atmel/mikroe-proto.c
@@ -120,19 +120,22 @@ static int snd_proto_probe(struct platform_device *pdev)
dai->cpus->of_node = cpu_np;
dai->platforms->of_node = cpu_np;
- dai_fmt = snd_soc_of_parse_daifmt(np, NULL,
- &bitclkmaster, &framemaster);
+ dai_fmt = snd_soc_daifmt_parse_format(np, NULL);
+ snd_soc_daifmt_parse_clock_provider_as_phandle(np, NULL,
+ &bitclkmaster, &framemaster);
if (bitclkmaster != framemaster) {
dev_err(&pdev->dev, "Must be the same bitclock and frame master\n");
return -EINVAL;
}
if (bitclkmaster) {
- dai_fmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
if (codec_np == bitclkmaster)
dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
else
dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
+ } else {
+ dai_fmt |= snd_soc_daifmt_parse_clock_provider_as_flag(np, NULL);
}
+
of_node_put(bitclkmaster);
of_node_put(framemaster);
dai->dai_fmt = dai_fmt;
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
index 9fbc3c1113cc..7745250fd743 100644
--- a/sound/soc/atmel/sam9x5_wm8731.c
+++ b/sound/soc/atmel/sam9x5_wm8731.c
@@ -159,7 +159,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
of_node_put(codec_np);
of_node_put(cpu_np);
- ret = snd_soc_register_card(card);
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret) {
dev_err(&pdev->dev, "Platform device allocation failed\n");
goto out_put_audio;
@@ -180,7 +180,6 @@ static int sam9x5_wm8731_driver_remove(struct platform_device *pdev)
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct sam9x5_drvdata *priv = card->drvdata;
- snd_soc_unregister_card(card);
atmel_ssc_put_audio(priv->ssc_id);
return 0;
diff --git a/sound/soc/bcm/cygnus-ssp.c b/sound/soc/bcm/cygnus-ssp.c
index ba03bb62ba96..fca5a3f2eec5 100644
--- a/sound/soc/bcm/cygnus-ssp.c
+++ b/sound/soc/bcm/cygnus-ssp.c
@@ -1308,7 +1308,6 @@ static int cygnus_ssp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *child_node;
- struct resource *res;
struct cygnus_audio *cygaud;
int err;
int node_count;
@@ -1320,13 +1319,11 @@ static int cygnus_ssp_probe(struct platform_device *pdev)
dev_set_drvdata(dev, cygaud);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
- cygaud->audio = devm_ioremap_resource(dev, res);
+ cygaud->audio = devm_platform_ioremap_resource_byname(pdev, "aud");
if (IS_ERR(cygaud->audio))
return PTR_ERR(cygaud->audio);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "i2s_in");
- cygaud->i2s_in = devm_ioremap_resource(dev, res);
+ cygaud->i2s_in = devm_platform_ioremap_resource_byname(pdev, "i2s_in");
if (IS_ERR(cygaud->i2s_in))
return PTR_ERR(cygaud->i2s_in);
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 2a7b3e363069..db16071205ba 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -155,6 +155,7 @@ config SND_SOC_ALL_CODECS
imply SND_SOC_PCM512x_I2C
imply SND_SOC_PCM512x_SPI
imply SND_SOC_RK3328
+ imply SND_SOC_RK817
imply SND_SOC_RT274
imply SND_SOC_RT286
imply SND_SOC_RT298
@@ -211,6 +212,7 @@ config SND_SOC_ALL_CODECS
imply SND_SOC_TAS6424
imply SND_SOC_TDA7419
imply SND_SOC_TFA9879
+ imply SND_SOC_TFA989X
imply SND_SOC_TLV320ADCX140
imply SND_SOC_TLV320AIC23_I2C
imply SND_SOC_TLV320AIC23_SPI
@@ -232,6 +234,8 @@ config SND_SOC_ALL_CODECS
imply SND_SOC_UDA1380
imply SND_SOC_WCD9335
imply SND_SOC_WCD934X
+ imply SND_SOC_WCD937X
+ imply SND_SOC_WCD938X
imply SND_SOC_LPASS_RX_MACRO
imply SND_SOC_LPASS_TX_MACRO
imply SND_SOC_WL1273
@@ -689,7 +693,7 @@ config SND_SOC_CS47L15
config SND_SOC_CS47L24
tristate
- depends on MFD_CS47L24
+ depends on MFD_CS47L24 && MFD_ARIZONA
config SND_SOC_CS47L35
tristate
@@ -1063,6 +1067,11 @@ config SND_SOC_RK3328
tristate "Rockchip RK3328 audio CODEC"
select REGMAP_MMIO
+config SND_SOC_RK817
+ tristate "Rockchip RK817 audio CODEC"
+ depends on MFD_RK808
+ select REGMAP_I2C
+
config SND_SOC_RL6231
tristate
default y if SND_SOC_RT5514=y
@@ -1180,7 +1189,7 @@ config SND_SOC_RT5631
depends on I2C
config SND_SOC_RT5640
- tristate
+ tristate "Realtek RT5640/RT5639 Codec"
depends on I2C
config SND_SOC_RT5645
@@ -1316,7 +1325,7 @@ config SND_SOC_SSM2305
high-efficiency mono Class-D audio power amplifiers.
config SND_SOC_SSM2518
- tristate
+ tristate "Analog Devices SSM2518 Class-D Amplifier"
depends on I2C
config SND_SOC_SSM2602
@@ -1408,6 +1417,16 @@ config SND_SOC_TFA9879
tristate "NXP Semiconductors TFA9879 amplifier"
depends on I2C
+config SND_SOC_TFA989X
+ tristate "NXP/Goodix TFA989X (TFA1) amplifiers"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Enable support for NXP (now Goodix) TFA989X (TFA1 family) speaker
+ amplifiers, e.g. TFA9895.
+ Note that the driver currently bypasses the built-in "CoolFlux DSP"
+ and does not support (hardware) volume control.
+
config SND_SOC_TLV320AIC23
tristate
@@ -1525,14 +1544,32 @@ config SND_SOC_WCD9335
Qualcomm Technologies, Inc. (QTI) multimedia solutions,
including the MSM8996, MSM8976, and MSM8956 chipsets.
+config SND_SOC_WCD_MBHC
+ tristate
+
config SND_SOC_WCD934X
tristate "WCD9340/WCD9341 Codec"
depends on COMMON_CLK
+ select SND_SOC_WCD_MBHC
depends on MFD_WCD934X
help
The WCD9340/9341 is a audio codec IC Integrated in
Qualcomm SoCs like SDM845.
+config SND_SOC_WCD938X
+ depends on SND_SOC_WCD938X_SDW
+ tristate
+ depends on SOUNDWIRE || !SOUNDWIRE
+
+config SND_SOC_WCD938X_SDW
+ tristate "WCD9380/WCD9385 Codec - SDW"
+ select SND_SOC_WCD938X
+ depends on SOUNDWIRE
+ select REGMAP_SOUNDWIRE
+ help
+ The WCD9380/9385 is a audio codec IC Integrated in
+ Qualcomm SoCs like SM8250.
+
config SND_SOC_WL1273
tristate
@@ -1558,11 +1595,11 @@ config SND_SOC_WM5100
config SND_SOC_WM5102
tristate
- depends on MFD_WM5102
+ depends on MFD_WM5102 && MFD_ARIZONA
config SND_SOC_WM5110
tristate
- depends on MFD_WM5110
+ depends on MFD_WM5110 && MFD_ARIZONA
config SND_SOC_WM8350
tristate
@@ -1727,11 +1764,11 @@ config SND_SOC_WM8996
config SND_SOC_WM8997
tristate
- depends on MFD_WM8997
+ depends on MFD_WM8997 && MFD_ARIZONA
config SND_SOC_WM8998
tristate
- depends on MFD_WM8998
+ depends on MFD_WM8998 && MFD_ARIZONA
config SND_SOC_WM9081
tristate
@@ -1778,11 +1815,6 @@ config SND_SOC_ZL38060
which consists of a Digital Signal Processor (DSP), several Digital
Audio Interfaces (DAIs), analog outputs, and a block of 14 GPIOs.
-config SND_SOC_ZX_AUD96P22
- tristate "ZTE ZX AUD96P22 CODEC"
- depends on I2C
- select REGMAP_I2C
-
# Amp
config SND_SOC_LM4857
tristate
@@ -1871,18 +1903,22 @@ config SND_SOC_TPA6130A2
config SND_SOC_LPASS_WSA_MACRO
depends on COMMON_CLK
+ select REGMAP_MMIO
tristate "Qualcomm WSA Macro in LPASS(Low Power Audio SubSystem)"
config SND_SOC_LPASS_VA_MACRO
depends on COMMON_CLK
+ select REGMAP_MMIO
tristate "Qualcomm VA Macro in LPASS(Low Power Audio SubSystem)"
config SND_SOC_LPASS_RX_MACRO
depends on COMMON_CLK
+ select REGMAP_MMIO
tristate "Qualcomm RX Macro in LPASS(Low Power Audio SubSystem)"
config SND_SOC_LPASS_TX_MACRO
depends on COMMON_CLK
+ select REGMAP_MMIO
tristate "Qualcomm TX Macro in LPASS(Low Power Audio SubSystem)"
endmenu
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 0efdba609048..7bb38c370842 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -166,6 +166,7 @@ snd-soc-pcm512x-objs := pcm512x.o
snd-soc-pcm512x-i2c-objs := pcm512x-i2c.o
snd-soc-pcm512x-spi-objs := pcm512x-spi.o
snd-soc-rk3328-objs := rk3328_codec.o
+snd-soc-rk817-objs := rk817_codec.o
snd-soc-rl6231-objs := rl6231.o
snd-soc-rl6347a-objs := rl6347a.o
snd-soc-rt1011-objs := rt1011.o
@@ -229,6 +230,7 @@ snd-soc-tas6424-objs := tas6424.o
snd-soc-tda7419-objs := tda7419.o
snd-soc-tas2770-objs := tas2770.o
snd-soc-tfa9879-objs := tfa9879.o
+snd-soc-tfa989x-objs := tfa989x.o
snd-soc-tlv320aic23-objs := tlv320aic23.o
snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o
snd-soc-tlv320aic23-spi-objs := tlv320aic23-spi.o
@@ -250,8 +252,11 @@ snd-soc-twl6040-objs := twl6040.o
snd-soc-uda1334-objs := uda1334.o
snd-soc-uda134x-objs := uda134x.o
snd-soc-uda1380-objs := uda1380.o
+snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o
snd-soc-wcd9335-objs := wcd-clsh-v2.o wcd9335.o
snd-soc-wcd934x-objs := wcd-clsh-v2.o wcd934x.o
+snd-soc-wcd938x-objs := wcd938x.o wcd-clsh-v2.o
+snd-soc-wcd938x-sdw-objs := wcd938x-sdw.o
snd-soc-wl1273-objs := wl1273.o
snd-soc-wm-adsp-objs := wm_adsp.o
snd-soc-wm0010-objs := wm0010.o
@@ -487,6 +492,7 @@ obj-$(CONFIG_SND_SOC_PCM512x) += snd-soc-pcm512x.o
obj-$(CONFIG_SND_SOC_PCM512x_I2C) += snd-soc-pcm512x-i2c.o
obj-$(CONFIG_SND_SOC_PCM512x_SPI) += snd-soc-pcm512x-spi.o
obj-$(CONFIG_SND_SOC_RK3328) += snd-soc-rk3328.o
+obj-$(CONFIG_SND_SOC_RK817) += snd-soc-rk817.o
obj-$(CONFIG_SND_SOC_RL6231) += snd-soc-rl6231.o
obj-$(CONFIG_SND_SOC_RL6347A) += snd-soc-rl6347a.o
obj-$(CONFIG_SND_SOC_RT1011) += snd-soc-rt1011.o
@@ -551,6 +557,7 @@ obj-$(CONFIG_SND_SOC_TAS6424) += snd-soc-tas6424.o
obj-$(CONFIG_SND_SOC_TDA7419) += snd-soc-tda7419.o
obj-$(CONFIG_SND_SOC_TAS2770) += snd-soc-tas2770.o
obj-$(CONFIG_SND_SOC_TFA9879) += snd-soc-tfa9879.o
+obj-$(CONFIG_SND_SOC_TFA989X) += snd-soc-tfa989x.o
obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o
obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C) += snd-soc-tlv320aic23-i2c.o
obj-$(CONFIG_SND_SOC_TLV320AIC23_SPI) += snd-soc-tlv320aic23-spi.o
@@ -572,8 +579,14 @@ obj-$(CONFIG_SND_SOC_TWL6040) += snd-soc-twl6040.o
obj-$(CONFIG_SND_SOC_UDA1334) += snd-soc-uda1334.o
obj-$(CONFIG_SND_SOC_UDA134X) += snd-soc-uda134x.o
obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o
+obj-$(CONFIG_SND_SOC_WCD_MBHC) += snd-soc-wcd-mbhc.o
obj-$(CONFIG_SND_SOC_WCD9335) += snd-soc-wcd9335.o
obj-$(CONFIG_SND_SOC_WCD934X) += snd-soc-wcd934x.o
+obj-$(CONFIG_SND_SOC_WCD938X) += snd-soc-wcd938x.o
+ifdef CONFIG_SND_SOC_WCD938X_SDW
+# avoid link failure by forcing sdw code built-in when needed
+obj-$(CONFIG_SND_SOC_WCD938X) += snd-soc-wcd938x-sdw.o
+endif
obj-$(CONFIG_SND_SOC_WL1273) += snd-soc-wl1273.o
obj-$(CONFIG_SND_SOC_WM0010) += snd-soc-wm0010.o
obj-$(CONFIG_SND_SOC_WM1250_EV1) += snd-soc-wm1250-ev1.o
diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c
index fe208cfdd3ba..4d2e78101f28 100644
--- a/sound/soc/codecs/ak4613.c
+++ b/sound/soc/codecs/ak4613.c
@@ -539,6 +539,15 @@ static int ak4613_dai_trigger(struct snd_pcm_substream *substream, int cmd,
return 0;
}
+/*
+ * Select below from Sound Card, not Auto
+ * SND_SOC_DAIFMT_CBC_CFC
+ * SND_SOC_DAIFMT_CBP_CFP
+ */
+static u64 ak4613_dai_formats =
+ SND_SOC_POSSIBLE_DAIFMT_I2S |
+ SND_SOC_POSSIBLE_DAIFMT_LEFT_J;
+
static const struct snd_soc_dai_ops ak4613_dai_ops = {
.startup = ak4613_dai_startup,
.shutdown = ak4613_dai_shutdown,
@@ -546,6 +555,8 @@ static const struct snd_soc_dai_ops ak4613_dai_ops = {
.set_fmt = ak4613_dai_set_fmt,
.trigger = ak4613_dai_trigger,
.hw_params = ak4613_dai_hw_params,
+ .auto_selectable_formats = &ak4613_dai_formats,
+ .num_auto_selectable_formats = 1,
};
#define AK4613_PCM_RATE (SNDRV_PCM_RATE_32000 |\
diff --git a/sound/soc/codecs/cirrus_legacy.h b/sound/soc/codecs/cirrus_legacy.h
new file mode 100644
index 000000000000..87c6fd79290d
--- /dev/null
+++ b/sound/soc/codecs/cirrus_legacy.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Some small helpers for older Cirrus Logic parts.
+ *
+ * Copyright (C) 2021 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+static inline int cirrus_read_device_id(struct regmap *regmap, unsigned int reg)
+{
+ u8 devid[3];
+ int ret;
+
+ ret = regmap_bulk_read(regmap, reg, devid, ARRAY_SIZE(devid));
+ if (ret < 0)
+ return ret;
+
+ return ((devid[0] & 0xFF) << 12) |
+ ((devid[1] & 0xFF) << 4) |
+ ((devid[2] & 0xF0) >> 4);
+}
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 88e79b9f52ed..933e3d627e5f 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -30,6 +30,7 @@
#include <dt-bindings/sound/cs35l32.h>
#include "cs35l32.h"
+#include "cirrus_legacy.h"
#define CS35L32_NUM_SUPPLIES 2
static const char *const cs35l32_supply_names[CS35L32_NUM_SUPPLIES] = {
@@ -351,8 +352,7 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
struct cs35l32_private *cs35l32;
struct cs35l32_platform_data *pdata =
dev_get_platdata(&i2c_client->dev);
- int ret, i;
- unsigned int devid = 0;
+ int ret, i, devid;
unsigned int reg;
cs35l32 = devm_kzalloc(&i2c_client->dev, sizeof(*cs35l32), GFP_KERNEL);
@@ -407,40 +407,40 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
/* Reset the Device */
cs35l32->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
"reset", GPIOD_OUT_LOW);
- if (IS_ERR(cs35l32->reset_gpio))
- return PTR_ERR(cs35l32->reset_gpio);
+ if (IS_ERR(cs35l32->reset_gpio)) {
+ ret = PTR_ERR(cs35l32->reset_gpio);
+ goto err_supplies;
+ }
gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
/* initialize codec */
- ret = regmap_read(cs35l32->regmap, CS35L32_DEVID_AB, &reg);
- devid = (reg & 0xFF) << 12;
-
- ret = regmap_read(cs35l32->regmap, CS35L32_DEVID_CD, &reg);
- devid |= (reg & 0xFF) << 4;
-
- ret = regmap_read(cs35l32->regmap, CS35L32_DEVID_E, &reg);
- devid |= (reg & 0xF0) >> 4;
+ devid = cirrus_read_device_id(cs35l32->regmap, CS35L32_DEVID_AB);
+ if (devid < 0) {
+ ret = devid;
+ dev_err(&i2c_client->dev, "Failed to read device ID: %d\n", ret);
+ goto err_disable;
+ }
if (devid != CS35L32_CHIP_ID) {
ret = -ENODEV;
dev_err(&i2c_client->dev,
"CS35L32 Device ID (%X). Expected %X\n",
devid, CS35L32_CHIP_ID);
- return ret;
+ goto err_disable;
}
ret = regmap_read(cs35l32->regmap, CS35L32_REV_ID, &reg);
if (ret < 0) {
dev_err(&i2c_client->dev, "Get Revision ID failed\n");
- return ret;
+ goto err_disable;
}
ret = regmap_register_patch(cs35l32->regmap, cs35l32_monitor_patch,
ARRAY_SIZE(cs35l32_monitor_patch));
if (ret < 0) {
dev_err(&i2c_client->dev, "Failed to apply errata patch\n");
- return ret;
+ goto err_disable;
}
dev_info(&i2c_client->dev,
@@ -481,7 +481,7 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
CS35L32_PDN_AMP);
/* Clear MCLK Error Bit since we don't have the clock yet */
- ret = regmap_read(cs35l32->regmap, CS35L32_INT_STATUS_1, &reg);
+ regmap_read(cs35l32->regmap, CS35L32_INT_STATUS_1, &reg);
ret = devm_snd_soc_register_component(&i2c_client->dev,
&soc_component_dev_cs35l32, cs35l32_dai,
@@ -492,6 +492,8 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
return 0;
err_disable:
+ gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
+err_supplies:
regulator_bulk_disable(ARRAY_SIZE(cs35l32->supplies),
cs35l32->supplies);
return ret;
diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
index e8f3dcfd144d..2a6f5e46d031 100644
--- a/sound/soc/codecs/cs35l33.c
+++ b/sound/soc/codecs/cs35l33.c
@@ -34,6 +34,7 @@
#include <linux/of_irq.h>
#include "cs35l33.h"
+#include "cirrus_legacy.h"
#define CS35L33_BOOT_DELAY 50
@@ -1190,12 +1191,12 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
regcache_cache_only(cs35l33->regmap, false);
/* initialize codec */
- ret = regmap_read(cs35l33->regmap, CS35L33_DEVID_AB, &reg);
- devid = (reg & 0xFF) << 12;
- ret = regmap_read(cs35l33->regmap, CS35L33_DEVID_CD, &reg);
- devid |= (reg & 0xFF) << 4;
- ret = regmap_read(cs35l33->regmap, CS35L33_DEVID_E, &reg);
- devid |= (reg & 0xF0) >> 4;
+ devid = cirrus_read_device_id(cs35l33->regmap, CS35L33_DEVID_AB);
+ if (devid < 0) {
+ ret = devid;
+ dev_err(&i2c_client->dev, "Failed to read device ID: %d\n", ret);
+ goto err_enable;
+ }
if (devid != CS35L33_CHIP_ID) {
dev_err(&i2c_client->dev,
@@ -1243,6 +1244,8 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
return 0;
err_enable:
+ gpiod_set_value_cansleep(cs35l33->reset_gpio, 0);
+
regulator_bulk_disable(cs35l33->num_core_supplies,
cs35l33->core_supplies);
diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
index 3d3c3c34dfe2..ed678241c22b 100644
--- a/sound/soc/codecs/cs35l34.c
+++ b/sound/soc/codecs/cs35l34.c
@@ -34,6 +34,7 @@
#include <sound/cs35l34.h>
#include "cs35l34.h"
+#include "cirrus_legacy.h"
#define PDN_DONE_ATTEMPTS 10
#define CS35L34_START_DELAY 50
@@ -999,9 +1000,8 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
struct cs35l34_private *cs35l34;
struct cs35l34_platform_data *pdata =
dev_get_platdata(&i2c_client->dev);
- int i;
+ int i, devid;
int ret;
- unsigned int devid = 0;
unsigned int reg;
cs35l34 = devm_kzalloc(&i2c_client->dev, sizeof(*cs35l34), GFP_KERNEL);
@@ -1042,13 +1042,15 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
} else {
pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto err_regulator;
+ }
if (i2c_client->dev.of_node) {
ret = cs35l34_handle_of_data(i2c_client, pdata);
if (ret != 0)
- return ret;
+ goto err_regulator;
}
cs35l34->pdata = *pdata;
@@ -1062,33 +1064,34 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
cs35l34->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
"reset-gpios", GPIOD_OUT_LOW);
- if (IS_ERR(cs35l34->reset_gpio))
- return PTR_ERR(cs35l34->reset_gpio);
+ if (IS_ERR(cs35l34->reset_gpio)) {
+ ret = PTR_ERR(cs35l34->reset_gpio);
+ goto err_regulator;
+ }
gpiod_set_value_cansleep(cs35l34->reset_gpio, 1);
msleep(CS35L34_START_DELAY);
- ret = regmap_read(cs35l34->regmap, CS35L34_DEVID_AB, &reg);
-
- devid = (reg & 0xFF) << 12;
- ret = regmap_read(cs35l34->regmap, CS35L34_DEVID_CD, &reg);
- devid |= (reg & 0xFF) << 4;
- ret = regmap_read(cs35l34->regmap, CS35L34_DEVID_E, &reg);
- devid |= (reg & 0xF0) >> 4;
+ devid = cirrus_read_device_id(cs35l34->regmap, CS35L34_DEVID_AB);
+ if (devid < 0) {
+ ret = devid;
+ dev_err(&i2c_client->dev, "Failed to read device ID: %d\n", ret);
+ goto err_reset;
+ }
if (devid != CS35L34_CHIP_ID) {
dev_err(&i2c_client->dev,
"CS35l34 Device ID (%X). Expected ID %X\n",
devid, CS35L34_CHIP_ID);
ret = -ENODEV;
- goto err_regulator;
+ goto err_reset;
}
ret = regmap_read(cs35l34->regmap, CS35L34_REV_ID, &reg);
if (ret < 0) {
dev_err(&i2c_client->dev, "Get Revision ID failed\n");
- goto err_regulator;
+ goto err_reset;
}
dev_info(&i2c_client->dev,
@@ -1113,11 +1116,13 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
if (ret < 0) {
dev_err(&i2c_client->dev,
"%s: Register component failed\n", __func__);
- goto err_regulator;
+ goto err_reset;
}
return 0;
+err_reset:
+ gpiod_set_value_cansleep(cs35l34->reset_gpio, 0);
err_regulator:
regulator_bulk_disable(cs35l34->num_core_supplies,
cs35l34->core_supplies);
diff --git a/sound/soc/codecs/cs35l35.c b/sound/soc/codecs/cs35l35.c
index f20ed838b958..7a5588f1df01 100644
--- a/sound/soc/codecs/cs35l35.c
+++ b/sound/soc/codecs/cs35l35.c
@@ -33,6 +33,7 @@
#include <linux/completion.h>
#include "cs35l35.h"
+#include "cirrus_legacy.h"
/*
* Some fields take zero as a valid value so use a high bit flag that won't
@@ -367,16 +368,16 @@ static int cs35l35_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
struct snd_soc_component *component = codec_dai->component;
struct cs35l35_private *cs35l35 = snd_soc_component_get_drvdata(component);
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1,
CS35L35_MS_MASK, 1 << CS35L35_MS_SHIFT);
- cs35l35->slave_mode = false;
+ cs35l35->clock_consumer = false;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBC_CFC:
regmap_update_bits(cs35l35->regmap, CS35L35_CLK_CTL1,
CS35L35_MS_MASK, 0 << CS35L35_MS_SHIFT);
- cs35l35->slave_mode = true;
+ cs35l35->clock_consumer = true;
break;
default:
return -EINVAL;
@@ -495,10 +496,10 @@ static int cs35l35_hw_params(struct snd_pcm_substream *substream,
* the Class H algorithm does not enable weak-drive operation for
* nonzero values of CH_WKFET_DELAY if SP_RATE = 01 or 10
*/
- errata_chk = clk_ctl & CS35L35_SP_RATE_MASK;
+ errata_chk = (clk_ctl & CS35L35_SP_RATE_MASK) >> CS35L35_SP_RATE_SHIFT;
if (classh->classh_wk_fet_disable == 0x00 &&
- (errata_chk == 0x01 || errata_chk == 0x03)) {
+ (errata_chk == 0x01 || errata_chk == 0x02)) {
ret = regmap_update_bits(cs35l35->regmap,
CS35L35_CLASS_H_FET_DRIVE_CTL,
CS35L35_CH_WKFET_DEL_MASK,
@@ -555,8 +556,8 @@ static int cs35l35_hw_params(struct snd_pcm_substream *substream,
}
sp_sclks = ((cs35l35->sclk / srate) / 4) - 1;
- /* Only certain ratios are supported in I2S Slave Mode */
- if (cs35l35->slave_mode) {
+ /* Only certain ratios supported when device is a clock consumer */
+ if (cs35l35->clock_consumer) {
switch (sp_sclks) {
case CS35L35_SP_SCLKS_32FS:
case CS35L35_SP_SCLKS_48FS:
@@ -567,7 +568,7 @@ static int cs35l35_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
} else {
- /* Only certain ratios supported in I2S MASTER Mode */
+ /* Only certain ratios supported when device is a clock provider */
switch (sp_sclks) {
case CS35L35_SP_SCLKS_32FS:
case CS35L35_SP_SCLKS_64FS:
@@ -1471,9 +1472,8 @@ static int cs35l35_i2c_probe(struct i2c_client *i2c_client,
struct cs35l35_private *cs35l35;
struct device *dev = &i2c_client->dev;
struct cs35l35_platform_data *pdata = dev_get_platdata(dev);
- int i;
+ int i, devid;
int ret;
- unsigned int devid = 0;
unsigned int reg;
cs35l35 = devm_kzalloc(dev, sizeof(struct cs35l35_private), GFP_KERNEL);
@@ -1552,13 +1552,12 @@ static int cs35l35_i2c_probe(struct i2c_client *i2c_client,
goto err;
}
/* initialize codec */
- ret = regmap_read(cs35l35->regmap, CS35L35_DEVID_AB, &reg);
-
- devid = (reg & 0xFF) << 12;
- ret = regmap_read(cs35l35->regmap, CS35L35_DEVID_CD, &reg);
- devid |= (reg & 0xFF) << 4;
- ret = regmap_read(cs35l35->regmap, CS35L35_DEVID_E, &reg);
- devid |= (reg & 0xF0) >> 4;
+ devid = cirrus_read_device_id(cs35l35->regmap, CS35L35_DEVID_AB);
+ if (devid < 0) {
+ ret = devid;
+ dev_err(dev, "Failed to read device ID: %d\n", ret);
+ goto err;
+ }
if (devid != CS35L35_CHIP_ID) {
dev_err(dev, "CS35L35 Device ID (%X). Expected ID %X\n",
diff --git a/sound/soc/codecs/cs35l35.h b/sound/soc/codecs/cs35l35.h
index ffb154cd962c..5e4509f41b32 100644
--- a/sound/soc/codecs/cs35l35.h
+++ b/sound/soc/codecs/cs35l35.h
@@ -168,6 +168,7 @@
#define CS35L35_SP_SCLKS_48FS 0x0B
#define CS35L35_SP_SCLKS_64FS 0x0F
#define CS35L35_SP_RATE_MASK 0xC0
+#define CS35L35_SP_RATE_SHIFT 6
#define CS35L35_PDN_BST_MASK 0x06
#define CS35L35_PDN_BST_FETON_SHIFT 1
@@ -282,7 +283,7 @@ struct cs35l35_private {
int sclk;
bool pdm_mode;
bool i2s_mode;
- bool slave_mode;
+ bool clock_consumer;
/* GPIO for /RST */
struct gpio_desc *reset_gpio;
struct completion pdn_done;
diff --git a/sound/soc/codecs/cs35l36.c b/sound/soc/codecs/cs35l36.c
index a038bcec2d17..d83c1b318c1c 100644
--- a/sound/soc/codecs/cs35l36.c
+++ b/sound/soc/codecs/cs35l36.c
@@ -756,14 +756,14 @@ static int cs35l36_set_dai_fmt(struct snd_soc_dai *component_dai,
{
struct cs35l36_private *cs35l36 =
snd_soc_component_get_drvdata(component_dai->component);
- unsigned int asp_fmt, lrclk_fmt, sclk_fmt, slave_mode, clk_frc;
+ unsigned int asp_fmt, lrclk_fmt, sclk_fmt, clock_provider, clk_frc;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- slave_mode = 1;
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
+ clock_provider = 1;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
- slave_mode = 0;
+ case SND_SOC_DAIFMT_CBC_CFC:
+ clock_provider = 0;
break;
default:
return -EINVAL;
@@ -771,10 +771,10 @@ static int cs35l36_set_dai_fmt(struct snd_soc_dai *component_dai,
regmap_update_bits(cs35l36->regmap, CS35L36_ASP_TX_PIN_CTRL,
CS35L36_SCLK_MSTR_MASK,
- slave_mode << CS35L36_SCLK_MSTR_SHIFT);
+ clock_provider << CS35L36_SCLK_MSTR_SHIFT);
regmap_update_bits(cs35l36->regmap, CS35L36_ASP_RATE_CTRL,
CS35L36_LRCLK_MSTR_MASK,
- slave_mode << CS35L36_LRCLK_MSTR_SHIFT);
+ clock_provider << CS35L36_LRCLK_MSTR_SHIFT);
switch (fmt & SND_SOC_DAIFMT_CLOCK_MASK) {
case SND_SOC_DAIFMT_CONT:
@@ -1156,7 +1156,7 @@ static int cs35l36_component_probe(struct snd_soc_component *component)
{
struct cs35l36_private *cs35l36 =
snd_soc_component_get_drvdata(component);
- int ret = 0;
+ int ret;
if ((cs35l36->rev_id == CS35L36_REV_A0) && cs35l36->pdata.dcm_mode) {
regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_DCM_CTRL,
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index d76be44f46b4..cffd6111afac 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -573,7 +573,7 @@ static int cs4265_i2c_probe(struct i2c_client *i2c_client,
const struct i2c_device_id *id)
{
struct cs4265_private *cs4265;
- int ret = 0;
+ int ret;
unsigned int devid = 0;
unsigned int reg;
@@ -602,6 +602,11 @@ static int cs4265_i2c_probe(struct i2c_client *i2c_client,
i2c_set_clientdata(i2c_client, cs4265);
ret = regmap_read(cs4265->regmap, CS4265_CHIP_ID, &reg);
+ if (ret) {
+ dev_err(&i2c_client->dev, "Failed to read chip ID: %d\n", ret);
+ return ret;
+ }
+
devid = reg & CS4265_CHIP_ID_MASK;
if (devid != CS4265_CHIP_ID_VAL) {
ret = -ENODEV;
@@ -616,10 +621,9 @@ static int cs4265_i2c_probe(struct i2c_client *i2c_client,
regmap_write(cs4265->regmap, CS4265_PWRCTL, 0x0F);
- ret = devm_snd_soc_register_component(&i2c_client->dev,
+ return devm_snd_soc_register_component(&i2c_client->dev,
&soc_component_cs4265, cs4265_dai,
ARRAY_SIZE(cs4265_dai));
- return ret;
}
static const struct of_device_id cs4265_of_match[] = {
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
index 77473c226f9e..99c022be94a6 100644
--- a/sound/soc/codecs/cs42l42.c
+++ b/sound/soc/codecs/cs42l42.c
@@ -19,11 +19,11 @@
#include <linux/gpio.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/gpio/consumer.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <sound/core.h>
@@ -36,6 +36,7 @@
#include <dt-bindings/sound/cs42l42.h>
#include "cs42l42.h"
+#include "cirrus_legacy.h"
static const struct reg_default cs42l42_reg_defaults[] = {
{ CS42L42_FRZ_CTL, 0x00 },
@@ -404,7 +405,7 @@ static const struct regmap_config cs42l42_regmap = {
.use_single_write = true,
};
-static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
+static DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 100, true);
static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true);
static const char * const cs42l42_hpf_freq_text[] = {
@@ -424,34 +425,23 @@ static SOC_ENUM_SINGLE_DECL(cs42l42_wnf3_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
CS42L42_ADC_WNF_CF_SHIFT,
cs42l42_wnf3_freq_text);
-static const char * const cs42l42_wnf05_freq_text[] = {
- "280Hz", "315Hz", "350Hz", "385Hz",
- "420Hz", "455Hz", "490Hz", "525Hz"
-};
-
-static SOC_ENUM_SINGLE_DECL(cs42l42_wnf05_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
- CS42L42_ADC_WNF_CF_SHIFT,
- cs42l42_wnf05_freq_text);
-
static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
/* ADC Volume and Filter Controls */
SOC_SINGLE("ADC Notch Switch", CS42L42_ADC_CTL,
- CS42L42_ADC_NOTCH_DIS_SHIFT, true, false),
+ CS42L42_ADC_NOTCH_DIS_SHIFT, true, true),
SOC_SINGLE("ADC Weak Force Switch", CS42L42_ADC_CTL,
CS42L42_ADC_FORCE_WEAK_VCM_SHIFT, true, false),
SOC_SINGLE("ADC Invert Switch", CS42L42_ADC_CTL,
CS42L42_ADC_INV_SHIFT, true, false),
SOC_SINGLE("ADC Boost Switch", CS42L42_ADC_CTL,
CS42L42_ADC_DIG_BOOST_SHIFT, true, false),
- SOC_SINGLE_SX_TLV("ADC Volume", CS42L42_ADC_VOLUME,
- CS42L42_ADC_VOL_SHIFT, 0xA0, 0x6C, adc_tlv),
+ SOC_SINGLE_S8_TLV("ADC Volume", CS42L42_ADC_VOLUME, -97, 12, adc_tlv),
SOC_SINGLE("ADC WNF Switch", CS42L42_ADC_WNF_HPF_CTL,
CS42L42_ADC_WNF_EN_SHIFT, true, false),
SOC_SINGLE("ADC HPF Switch", CS42L42_ADC_WNF_HPF_CTL,
CS42L42_ADC_HPF_EN_SHIFT, true, false),
SOC_ENUM("HPF Corner Freq", cs42l42_hpf_freq_enum),
SOC_ENUM("WNF 3dB Freq", cs42l42_wnf3_freq_enum),
- SOC_ENUM("WNF 05dB Freq", cs42l42_wnf05_freq_enum),
/* DAC Volume and Filter Controls */
SOC_SINGLE("DACA Invert Switch", CS42L42_DAC_CTL1,
@@ -470,8 +460,8 @@ static const struct snd_soc_dapm_widget cs42l42_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("HP"),
SND_SOC_DAPM_DAC("DAC", NULL, CS42L42_PWR_CTL1, CS42L42_HP_PDN_SHIFT, 1),
SND_SOC_DAPM_MIXER("MIXER", CS42L42_PWR_CTL1, CS42L42_MIXER_PDN_SHIFT, 1, NULL, 0),
- SND_SOC_DAPM_AIF_IN("SDIN1", NULL, 0, CS42L42_ASP_RX_DAI0_EN, CS42L42_ASP_RX0_CH1_SHIFT, 0),
- SND_SOC_DAPM_AIF_IN("SDIN2", NULL, 1, CS42L42_ASP_RX_DAI0_EN, CS42L42_ASP_RX0_CH2_SHIFT, 0),
+ SND_SOC_DAPM_AIF_IN("SDIN1", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SDIN2", NULL, 1, SND_SOC_NOPM, 0, 0),
/* Playback Requirements */
SND_SOC_DAPM_SUPPLY("ASP DAI0", CS42L42_PWR_CTL1, CS42L42_ASP_DAI_PDN_SHIFT, 1, NULL, 0),
@@ -521,26 +511,33 @@ static const struct snd_soc_dapm_route cs42l42_audio_map[] = {
{ "SDOUT2", NULL, "ASP TX EN" },
};
+static int cs42l42_set_jack(struct snd_soc_component *component, struct snd_soc_jack *jk, void *d)
+{
+ struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
+
+ cs42l42->jack = jk;
+
+ regmap_update_bits(cs42l42->regmap, CS42L42_TSRS_PLUG_INT_MASK,
+ CS42L42_RS_PLUG_MASK | CS42L42_RS_UNPLUG_MASK |
+ CS42L42_TS_PLUG_MASK | CS42L42_TS_UNPLUG_MASK,
+ (1 << CS42L42_RS_PLUG_SHIFT) | (1 << CS42L42_RS_UNPLUG_SHIFT) |
+ (0 << CS42L42_TS_PLUG_SHIFT) | (0 << CS42L42_TS_UNPLUG_SHIFT));
+
+ return 0;
+}
+
static int cs42l42_component_probe(struct snd_soc_component *component)
{
- struct cs42l42_private *cs42l42 =
- (struct cs42l42_private *)snd_soc_component_get_drvdata(component);
- struct snd_soc_card *crd = component->card;
- int ret = 0;
+ struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
cs42l42->component = component;
- ret = snd_soc_card_jack_new(crd, "CS42L42 Headset", SND_JACK_HEADSET | SND_JACK_BTN_0 |
- SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3,
- &cs42l42->jack, NULL, 0);
- if (ret < 0)
- dev_err(component->dev, "Cannot create CS42L42 Headset: %d\n", ret);
-
- return ret;
+ return 0;
}
static const struct snd_soc_component_driver soc_component_dev_cs42l42 = {
.probe = cs42l42_component_probe,
+ .set_jack = cs42l42_set_jack,
.dapm_widgets = cs42l42_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(cs42l42_dapm_widgets),
.dapm_routes = cs42l42_audio_map,
@@ -581,6 +578,7 @@ struct cs42l42_pll_params {
u8 pll_divout;
u32 mclk_int;
u8 pll_cal_ratio;
+ u8 n;
};
/*
@@ -588,21 +586,23 @@ struct cs42l42_pll_params {
* Table 4-5 from the Datasheet
*/
static const struct cs42l42_pll_params pll_ratio_table[] = {
- { 1536000, 0, 1, 0x00, 0x7D, 0x000000, 0x03, 0x10, 12000000, 125 },
- { 2822400, 0, 1, 0x00, 0x40, 0x000000, 0x03, 0x10, 11289600, 128 },
- { 3000000, 0, 1, 0x00, 0x40, 0x000000, 0x03, 0x10, 12000000, 128 },
- { 3072000, 0, 1, 0x00, 0x3E, 0x800000, 0x03, 0x10, 12000000, 125 },
- { 4000000, 0, 1, 0x00, 0x30, 0x800000, 0x03, 0x10, 12000000, 96 },
- { 4096000, 0, 1, 0x00, 0x2E, 0xE00000, 0x03, 0x10, 12000000, 94 },
- { 5644800, 0, 1, 0x01, 0x40, 0x000000, 0x03, 0x10, 11289600, 128 },
- { 6000000, 0, 1, 0x01, 0x40, 0x000000, 0x03, 0x10, 12000000, 128 },
- { 6144000, 0, 1, 0x01, 0x3E, 0x800000, 0x03, 0x10, 12000000, 125 },
- { 11289600, 0, 0, 0, 0, 0, 0, 0, 11289600, 0 },
- { 12000000, 0, 0, 0, 0, 0, 0, 0, 12000000, 0 },
- { 12288000, 0, 0, 0, 0, 0, 0, 0, 12288000, 0 },
- { 22579200, 1, 0, 0, 0, 0, 0, 0, 22579200, 0 },
- { 24000000, 1, 0, 0, 0, 0, 0, 0, 24000000, 0 },
- { 24576000, 1, 0, 0, 0, 0, 0, 0, 24576000, 0 }
+ { 1536000, 0, 1, 0x00, 0x7D, 0x000000, 0x03, 0x10, 12000000, 125, 2},
+ { 2304000, 0, 1, 0x00, 0x55, 0xC00000, 0x02, 0x10, 12288000, 85, 2},
+ { 2400000, 0, 1, 0x00, 0x50, 0x000000, 0x03, 0x10, 12000000, 80, 2},
+ { 2822400, 0, 1, 0x00, 0x40, 0x000000, 0x03, 0x10, 11289600, 128, 1},
+ { 3000000, 0, 1, 0x00, 0x40, 0x000000, 0x03, 0x10, 12000000, 128, 1},
+ { 3072000, 0, 1, 0x00, 0x3E, 0x800000, 0x03, 0x10, 12000000, 125, 1},
+ { 4000000, 0, 1, 0x00, 0x30, 0x800000, 0x03, 0x10, 12000000, 96, 1},
+ { 4096000, 0, 1, 0x00, 0x2E, 0xE00000, 0x03, 0x10, 12000000, 94, 1},
+ { 5644800, 0, 1, 0x01, 0x40, 0x000000, 0x03, 0x10, 11289600, 128, 1},
+ { 6000000, 0, 1, 0x01, 0x40, 0x000000, 0x03, 0x10, 12000000, 128, 1},
+ { 6144000, 0, 1, 0x01, 0x3E, 0x800000, 0x03, 0x10, 12000000, 125, 1},
+ { 11289600, 0, 0, 0, 0, 0, 0, 0, 11289600, 0, 1},
+ { 12000000, 0, 0, 0, 0, 0, 0, 0, 12000000, 0, 1},
+ { 12288000, 0, 0, 0, 0, 0, 0, 0, 12288000, 0, 1},
+ { 22579200, 1, 0, 0, 0, 0, 0, 0, 22579200, 0, 1},
+ { 24000000, 1, 0, 0, 0, 0, 0, 0, 24000000, 0, 1},
+ { 24576000, 1, 0, 0, 0, 0, 0, 0, 24576000, 0, 1}
};
static int cs42l42_pll_config(struct snd_soc_component *component)
@@ -619,6 +619,8 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
for (i = 0; i < ARRAY_SIZE(pll_ratio_table); i++) {
if (pll_ratio_table[i].sclk == clk) {
+ cs42l42->pll_config = i;
+
/* Configure the internal sample rate */
snd_soc_component_update_bits(component, CS42L42_MCLK_CTL,
CS42L42_INTERNAL_FS_MASK,
@@ -627,14 +629,9 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
(pll_ratio_table[i].mclk_int !=
24000000)) <<
CS42L42_INTERNAL_FS_SHIFT);
- /* Set the MCLK src (PLL or SCLK) and the divide
- * ratio
- */
+
snd_soc_component_update_bits(component, CS42L42_MCLK_SRC_SEL,
- CS42L42_MCLK_SRC_SEL_MASK |
CS42L42_MCLKDIV_MASK,
- (pll_ratio_table[i].mclk_src_sel
- << CS42L42_MCLK_SRC_SEL_SHIFT) |
(pll_ratio_table[i].mclk_div <<
CS42L42_MCLKDIV_SHIFT));
/* Set up the LRCLK */
@@ -670,15 +667,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
CS42L42_FSYNC_PULSE_WIDTH_MASK,
CS42L42_FRAC1_VAL(fsync - 1) <<
CS42L42_FSYNC_PULSE_WIDTH_SHIFT);
- snd_soc_component_update_bits(component,
- CS42L42_ASP_FRM_CFG,
- CS42L42_ASP_5050_MASK,
- CS42L42_ASP_5050_MASK);
- /* Set the frame delay to 1.0 SCLK clocks */
- snd_soc_component_update_bits(component, CS42L42_ASP_FRM_CFG,
- CS42L42_ASP_FSD_MASK,
- CS42L42_ASP_FSD_1_0 <<
- CS42L42_ASP_FSD_SHIFT);
/* Set the sample rates (96k or lower) */
snd_soc_component_update_bits(component, CS42L42_FS_RATE_EN,
CS42L42_FS_EN_MASK,
@@ -738,8 +726,12 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
snd_soc_component_update_bits(component,
CS42L42_PLL_CTL3,
CS42L42_PLL_DIVOUT_MASK,
- pll_ratio_table[i].pll_divout
+ (pll_ratio_table[i].pll_divout * pll_ratio_table[i].n)
<< CS42L42_PLL_DIVOUT_SHIFT);
+ if (pll_ratio_table[i].n != 1)
+ cs42l42->pll_divout = pll_ratio_table[i].pll_divout;
+ else
+ cs42l42->pll_divout = 0;
snd_soc_component_update_bits(component,
CS42L42_PLL_CAL_RATIO,
CS42L42_PLL_CAL_RATIO_MASK,
@@ -774,7 +766,18 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
- case SND_SOC_DAIFMT_LEFT_J:
+ /*
+ * 5050 mode, frame starts on falling edge of LRCLK,
+ * frame delayed by 1.0 SCLKs
+ */
+ snd_soc_component_update_bits(component,
+ CS42L42_ASP_FRM_CFG,
+ CS42L42_ASP_STP_MASK |
+ CS42L42_ASP_5050_MASK |
+ CS42L42_ASP_FSD_MASK,
+ CS42L42_ASP_5050_MASK |
+ (CS42L42_ASP_FSD_1_0 <<
+ CS42L42_ASP_FSD_SHIFT));
break;
default:
return -EINVAL;
@@ -804,6 +807,25 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
return 0;
}
+static int cs42l42_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
+
+ /*
+ * Sample rates < 44.1 kHz would produce an out-of-range SCLK with
+ * a standard I2S frame. If the machine driver sets SCLK it must be
+ * legal.
+ */
+ if (cs42l42->sclk)
+ return 0;
+
+ /* Machine driver has not set a SCLK, limit bottom end to 44.1 kHz */
+ return snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_RATE,
+ 44100, 192000);
+}
+
static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -817,6 +839,10 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
cs42l42->srate = params_rate(params);
cs42l42->bclk = snd_soc_params_to_bclk(params);
+ /* I2S frame always has 2 channels even for mono audio */
+ if (channels == 1)
+ cs42l42->bclk *= 2;
+
switch(substream->stream) {
case SNDRV_PCM_STREAM_CAPTURE:
if (channels == 2) {
@@ -840,6 +866,17 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES,
CS42L42_ASP_RX_CH_AP_MASK |
CS42L42_ASP_RX_CH_RES_MASK, val);
+
+ /* Channel B comes from the last active channel */
+ snd_soc_component_update_bits(component, CS42L42_SP_RX_CH_SEL,
+ CS42L42_SP_RX_CHB_SEL_MASK,
+ (channels - 1) << CS42L42_SP_RX_CHB_SEL_SHIFT);
+
+ /* Both LRCLK slots must be enabled */
+ snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_EN,
+ CS42L42_ASP_RX0_CH_EN_MASK,
+ BIT(CS42L42_ASP_RX0_CH1_SHIFT) |
+ BIT(CS42L42_ASP_RX0_CH2_SHIFT));
break;
default:
break;
@@ -885,15 +922,33 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
*/
regmap_multi_reg_write(cs42l42->regmap, cs42l42_to_osc_seq,
ARRAY_SIZE(cs42l42_to_osc_seq));
+
+ /* Must disconnect PLL before stopping it */
+ snd_soc_component_update_bits(component,
+ CS42L42_MCLK_SRC_SEL,
+ CS42L42_MCLK_SRC_SEL_MASK,
+ 0);
+ usleep_range(100, 200);
+
snd_soc_component_update_bits(component, CS42L42_PLL_CTL1,
CS42L42_PLL_START_MASK, 0);
}
} else {
if (!cs42l42->stream_use) {
/* SCLK must be running before codec unmute */
- if ((cs42l42->bclk < 11289600) && (cs42l42->sclk < 11289600)) {
+ if (pll_ratio_table[cs42l42->pll_config].mclk_src_sel) {
snd_soc_component_update_bits(component, CS42L42_PLL_CTL1,
CS42L42_PLL_START_MASK, 1);
+
+ if (cs42l42->pll_divout) {
+ usleep_range(CS42L42_PLL_DIVOUT_TIME_US,
+ CS42L42_PLL_DIVOUT_TIME_US * 2);
+ snd_soc_component_update_bits(component, CS42L42_PLL_CTL3,
+ CS42L42_PLL_DIVOUT_MASK,
+ cs42l42->pll_divout <<
+ CS42L42_PLL_DIVOUT_SHIFT);
+ }
+
ret = regmap_read_poll_timeout(cs42l42->regmap,
CS42L42_PLL_LOCK_STATUS,
regval,
@@ -902,6 +957,12 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
CS42L42_PLL_LOCK_TIMEOUT_US);
if (ret < 0)
dev_warn(component->dev, "PLL failed to lock: %d\n", ret);
+
+ /* PLL must be running to drive glitchless switch logic */
+ snd_soc_component_update_bits(component,
+ CS42L42_MCLK_SRC_SEL,
+ CS42L42_MCLK_SRC_SEL_MASK,
+ CS42L42_MCLK_SRC_SEL_MASK);
}
/* Mark SCLK as present, turn off internal oscillator */
@@ -935,8 +996,8 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
SNDRV_PCM_FMTBIT_S24_LE |\
SNDRV_PCM_FMTBIT_S32_LE )
-
static const struct snd_soc_dai_ops cs42l42_ops = {
+ .startup = cs42l42_dai_startup,
.hw_params = cs42l42_pcm_hw_params,
.set_fmt = cs42l42_set_dai_fmt,
.set_sysclk = cs42l42_set_sysclk,
@@ -1028,7 +1089,7 @@ static void cs42l42_process_hs_type_detect(struct cs42l42_private *cs42l42)
CS42L42_AUTO_HSBIAS_HIZ_MASK |
CS42L42_TIP_SENSE_EN_MASK |
CS42L42_HSBIAS_SENSE_TRIP_MASK,
- (1 << CS42L42_HSBIAS_SENSE_EN_SHIFT) |
+ (cs42l42->hs_bias_sense_en << CS42L42_HSBIAS_SENSE_EN_SHIFT) |
(1 << CS42L42_AUTO_HSBIAS_HIZ_SHIFT) |
(0 << CS42L42_TIP_SENSE_EN_SHIFT) |
(3 << CS42L42_HSBIAS_SENSE_TRIP_SHIFT));
@@ -1413,11 +1474,11 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
switch(cs42l42->hs_type){
case CS42L42_PLUG_CTIA:
case CS42L42_PLUG_OMTP:
- snd_soc_jack_report(&cs42l42->jack, SND_JACK_HEADSET,
+ snd_soc_jack_report(cs42l42->jack, SND_JACK_HEADSET,
SND_JACK_HEADSET);
break;
case CS42L42_PLUG_HEADPHONE:
- snd_soc_jack_report(&cs42l42->jack, SND_JACK_HEADPHONE,
+ snd_soc_jack_report(cs42l42->jack, SND_JACK_HEADPHONE,
SND_JACK_HEADPHONE);
break;
default:
@@ -1445,14 +1506,18 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
switch(cs42l42->hs_type){
case CS42L42_PLUG_CTIA:
case CS42L42_PLUG_OMTP:
- snd_soc_jack_report(&cs42l42->jack, 0, SND_JACK_HEADSET);
+ snd_soc_jack_report(cs42l42->jack, 0, SND_JACK_HEADSET);
break;
case CS42L42_PLUG_HEADPHONE:
- snd_soc_jack_report(&cs42l42->jack, 0, SND_JACK_HEADPHONE);
+ snd_soc_jack_report(cs42l42->jack, 0, SND_JACK_HEADPHONE);
break;
default:
break;
}
+ snd_soc_jack_report(cs42l42->jack, 0,
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
+
dev_dbg(component->dev, "Unplug event\n");
}
break;
@@ -1464,7 +1529,7 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
}
/* Check button detect status */
- if ((~masks[7]) & irq_params_table[7].mask) {
+ if (cs42l42->plug_state == CS42L42_TS_PLUG && ((~masks[7]) & irq_params_table[7].mask)) {
if (!(current_button_status &
CS42L42_M_HSBIAS_HIZ_MASK)) {
@@ -1475,7 +1540,7 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
report = cs42l42_handle_button_press(cs42l42);
}
- snd_soc_jack_report(&cs42l42->jack, report, SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ snd_soc_jack_report(cs42l42->jack, report, SND_JACK_BTN_0 | SND_JACK_BTN_1 |
SND_JACK_BTN_2 | SND_JACK_BTN_3);
}
}
@@ -1582,8 +1647,8 @@ static void cs42l42_set_interrupt_masks(struct cs42l42_private *cs42l42)
CS42L42_TS_UNPLUG_MASK,
(1 << CS42L42_RS_PLUG_SHIFT) |
(1 << CS42L42_RS_UNPLUG_SHIFT) |
- (0 << CS42L42_TS_PLUG_SHIFT) |
- (0 << CS42L42_TS_UNPLUG_SHIFT));
+ (1 << CS42L42_TS_PLUG_SHIFT) |
+ (1 << CS42L42_TS_UNPLUG_SHIFT));
}
static void cs42l42_setup_hs_type_detect(struct cs42l42_private *cs42l42)
@@ -1633,17 +1698,15 @@ static const unsigned int threshold_defaults[] = {
CS42L42_HS_DET_LEVEL_1
};
-static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
+static int cs42l42_handle_device_data(struct device *dev,
struct cs42l42_private *cs42l42)
{
- struct device_node *np = i2c_client->dev.of_node;
unsigned int val;
- unsigned int thresholds[CS42L42_NUM_BIASES];
+ u32 thresholds[CS42L42_NUM_BIASES];
int ret;
int i;
- ret = of_property_read_u32(np, "cirrus,ts-inv", &val);
-
+ ret = device_property_read_u32(dev, "cirrus,ts-inv", &val);
if (!ret) {
switch (val) {
case CS42L42_TS_INV_EN:
@@ -1651,7 +1714,7 @@ static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
cs42l42->ts_inv = val;
break;
default:
- dev_err(&i2c_client->dev,
+ dev_err(dev,
"Wrong cirrus,ts-inv DT value %d\n",
val);
cs42l42->ts_inv = CS42L42_TS_INV_DIS;
@@ -1664,8 +1727,7 @@ static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
CS42L42_TS_INV_MASK,
(cs42l42->ts_inv << CS42L42_TS_INV_SHIFT));
- ret = of_property_read_u32(np, "cirrus,ts-dbnc-rise", &val);
-
+ ret = device_property_read_u32(dev, "cirrus,ts-dbnc-rise", &val);
if (!ret) {
switch (val) {
case CS42L42_TS_DBNCE_0:
@@ -1679,7 +1741,7 @@ static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
cs42l42->ts_dbnc_rise = val;
break;
default:
- dev_err(&i2c_client->dev,
+ dev_err(dev,
"Wrong cirrus,ts-dbnc-rise DT value %d\n",
val);
cs42l42->ts_dbnc_rise = CS42L42_TS_DBNCE_1000;
@@ -1693,8 +1755,7 @@ static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
(cs42l42->ts_dbnc_rise <<
CS42L42_TS_RISE_DBNCE_TIME_SHIFT));
- ret = of_property_read_u32(np, "cirrus,ts-dbnc-fall", &val);
-
+ ret = device_property_read_u32(dev, "cirrus,ts-dbnc-fall", &val);
if (!ret) {
switch (val) {
case CS42L42_TS_DBNCE_0:
@@ -1708,7 +1769,7 @@ static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
cs42l42->ts_dbnc_fall = val;
break;
default:
- dev_err(&i2c_client->dev,
+ dev_err(dev,
"Wrong cirrus,ts-dbnc-fall DT value %d\n",
val);
cs42l42->ts_dbnc_fall = CS42L42_TS_DBNCE_0;
@@ -1722,13 +1783,12 @@ static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
(cs42l42->ts_dbnc_fall <<
CS42L42_TS_FALL_DBNCE_TIME_SHIFT));
- ret = of_property_read_u32(np, "cirrus,btn-det-init-dbnce", &val);
-
+ ret = device_property_read_u32(dev, "cirrus,btn-det-init-dbnce", &val);
if (!ret) {
if (val <= CS42L42_BTN_DET_INIT_DBNCE_MAX)
cs42l42->btn_det_init_dbnce = val;
else {
- dev_err(&i2c_client->dev,
+ dev_err(dev,
"Wrong cirrus,btn-det-init-dbnce DT value %d\n",
val);
cs42l42->btn_det_init_dbnce =
@@ -1739,14 +1799,13 @@ static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
CS42L42_BTN_DET_INIT_DBNCE_DEFAULT;
}
- ret = of_property_read_u32(np, "cirrus,btn-det-event-dbnce", &val);
-
+ ret = device_property_read_u32(dev, "cirrus,btn-det-event-dbnce", &val);
if (!ret) {
if (val <= CS42L42_BTN_DET_EVENT_DBNCE_MAX)
cs42l42->btn_det_event_dbnce = val;
else {
- dev_err(&i2c_client->dev,
- "Wrong cirrus,btn-det-event-dbnce DT value %d\n", val);
+ dev_err(dev,
+ "Wrong cirrus,btn-det-event-dbnce DT value %d\n", val);
cs42l42->btn_det_event_dbnce =
CS42L42_BTN_DET_EVENT_DBNCE_DEFAULT;
}
@@ -1755,19 +1814,17 @@ static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
CS42L42_BTN_DET_EVENT_DBNCE_DEFAULT;
}
- ret = of_property_read_u32_array(np, "cirrus,bias-lvls",
- (u32 *)thresholds, CS42L42_NUM_BIASES);
-
+ ret = device_property_read_u32_array(dev, "cirrus,bias-lvls",
+ thresholds, ARRAY_SIZE(thresholds));
if (!ret) {
for (i = 0; i < CS42L42_NUM_BIASES; i++) {
if (thresholds[i] <= CS42L42_HS_DET_LEVEL_MAX)
cs42l42->bias_thresholds[i] = thresholds[i];
else {
- dev_err(&i2c_client->dev,
- "Wrong cirrus,bias-lvls[%d] DT value %d\n", i,
+ dev_err(dev,
+ "Wrong cirrus,bias-lvls[%d] DT value %d\n", i,
thresholds[i]);
- cs42l42->bias_thresholds[i] =
- threshold_defaults[i];
+ cs42l42->bias_thresholds[i] = threshold_defaults[i];
}
}
} else {
@@ -1775,8 +1832,7 @@ static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
cs42l42->bias_thresholds[i] = threshold_defaults[i];
}
- ret = of_property_read_u32(np, "cirrus,hs-bias-ramp-rate", &val);
-
+ ret = device_property_read_u32(dev, "cirrus,hs-bias-ramp-rate", &val);
if (!ret) {
switch (val) {
case CS42L42_HSBIAS_RAMP_FAST_RISE_SLOW_FALL:
@@ -1796,7 +1852,7 @@ static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME3;
break;
default:
- dev_err(&i2c_client->dev,
+ dev_err(dev,
"Wrong cirrus,hs-bias-ramp-rate DT value %d\n",
val);
cs42l42->hs_bias_ramp_rate = CS42L42_HSBIAS_RAMP_SLOW;
@@ -1812,6 +1868,11 @@ static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
(cs42l42->hs_bias_ramp_rate <<
CS42L42_HSBIAS_RAMP_SHIFT));
+ if (device_property_read_bool(dev, "cirrus,hs-bias-sense-disable"))
+ cs42l42->hs_bias_sense_en = 0;
+ else
+ cs42l42->hs_bias_sense_en = 1;
+
return 0;
}
@@ -1819,8 +1880,7 @@ static int cs42l42_i2c_probe(struct i2c_client *i2c_client,
const struct i2c_device_id *id)
{
struct cs42l42_private *cs42l42;
- int ret, i;
- unsigned int devid = 0;
+ int ret, i, devid;
unsigned int reg;
cs42l42 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l42_private),
@@ -1883,14 +1943,12 @@ static int cs42l42_i2c_probe(struct i2c_client *i2c_client,
"Failed to request IRQ: %d\n", ret);
/* initialize codec */
- ret = regmap_read(cs42l42->regmap, CS42L42_DEVID_AB, &reg);
- devid = (reg & 0xFF) << 12;
-
- ret = regmap_read(cs42l42->regmap, CS42L42_DEVID_CD, &reg);
- devid |= (reg & 0xFF) << 4;
-
- ret = regmap_read(cs42l42->regmap, CS42L42_DEVID_E, &reg);
- devid |= (reg & 0xF0) >> 4;
+ devid = cirrus_read_device_id(cs42l42->regmap, CS42L42_DEVID_AB);
+ if (devid < 0) {
+ ret = devid;
+ dev_err(&i2c_client->dev, "Failed to read device ID: %d\n", ret);
+ goto err_disable;
+ }
if (devid != CS42L42_CHIP_ID) {
ret = -ENODEV;
@@ -1926,11 +1984,9 @@ static int cs42l42_i2c_probe(struct i2c_client *i2c_client,
(1 << CS42L42_ADC_PDN_SHIFT) |
(0 << CS42L42_PDN_ALL_SHIFT));
- if (i2c_client->dev.of_node) {
- ret = cs42l42_handle_device_data(i2c_client, cs42l42);
- if (ret != 0)
- goto err_disable;
- }
+ ret = cs42l42_handle_device_data(&i2c_client->dev, cs42l42);
+ if (ret != 0)
+ goto err_disable;
/* Setup headset detection */
cs42l42_setup_hs_type_detect(cs42l42);
@@ -2009,12 +2065,21 @@ static const struct dev_pm_ops cs42l42_runtime_pm = {
NULL)
};
+#ifdef CONFIG_OF
static const struct of_device_id cs42l42_of_match[] = {
{ .compatible = "cirrus,cs42l42", },
- {},
+ {}
};
MODULE_DEVICE_TABLE(of, cs42l42_of_match);
+#endif
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cs42l42_acpi_match[] = {
+ {"10134242", 0,},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cs42l42_acpi_match);
+#endif
static const struct i2c_device_id cs42l42_id[] = {
{"cs42l42", 0},
@@ -2027,7 +2092,8 @@ static struct i2c_driver cs42l42_i2c_driver = {
.driver = {
.name = "cs42l42",
.pm = &cs42l42_runtime_pm,
- .of_match_table = cs42l42_of_match,
+ .of_match_table = of_match_ptr(cs42l42_of_match),
+ .acpi_match_table = ACPI_PTR(cs42l42_acpi_match),
},
.id_table = cs42l42_id,
.probe = cs42l42_i2c_probe,
diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h
index 36b763f0d1a0..8734f6828f3e 100644
--- a/sound/soc/codecs/cs42l42.h
+++ b/sound/soc/codecs/cs42l42.h
@@ -79,7 +79,7 @@
#define CS42L42_HP_PDN_SHIFT 3
#define CS42L42_HP_PDN_MASK (1 << CS42L42_HP_PDN_SHIFT)
#define CS42L42_ADC_PDN_SHIFT 2
-#define CS42L42_ADC_PDN_MASK (1 << CS42L42_HP_PDN_SHIFT)
+#define CS42L42_ADC_PDN_MASK (1 << CS42L42_ADC_PDN_SHIFT)
#define CS42L42_PDN_ALL_SHIFT 0
#define CS42L42_PDN_ALL_MASK (1 << CS42L42_PDN_ALL_SHIFT)
@@ -653,6 +653,8 @@
/* Page 0x25 Audio Port Registers */
#define CS42L42_SP_RX_CH_SEL (CS42L42_PAGE_25 + 0x01)
+#define CS42L42_SP_RX_CHB_SEL_SHIFT 2
+#define CS42L42_SP_RX_CHB_SEL_MASK (3 << CS42L42_SP_RX_CHB_SEL_SHIFT)
#define CS42L42_SP_RX_ISOC_CTL (CS42L42_PAGE_25 + 0x02)
#define CS42L42_SP_RX_RSYNC_SHIFT 6
@@ -755,6 +757,7 @@
#define CS42L42_NUM_SUPPLIES 5
#define CS42L42_BOOT_TIME_US 3000
+#define CS42L42_PLL_DIVOUT_TIME_US 800
#define CS42L42_CLOCK_SWITCH_DELAY_US 150
#define CS42L42_PLL_LOCK_POLL_US 250
#define CS42L42_PLL_LOCK_TIMEOUT_US 1250
@@ -773,10 +776,12 @@ struct cs42l42_private {
struct regulator_bulk_data supplies[CS42L42_NUM_SUPPLIES];
struct gpio_desc *reset_gpio;
struct completion pdn_done;
- struct snd_soc_jack jack;
+ struct snd_soc_jack *jack;
+ int pll_config;
int bclk;
u32 sclk;
u32 srate;
+ u8 pll_divout;
u8 plug_state;
u8 hs_type;
u8 ts_inv;
@@ -787,6 +792,7 @@ struct cs42l42_private {
u8 bias_thresholds[CS42L42_NUM_BIASES];
u8 hs_bias_ramp_rate;
u8 hs_bias_ramp_time;
+ u8 hs_bias_sense_en;
u8 stream_use;
};
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 796b894c390f..80161151b3f2 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -957,9 +957,8 @@ static int cs42l52_beep_event(struct input_dev *dev, unsigned int type,
return 0;
}
-static ssize_t cs42l52_beep_set(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t beep_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct cs42l52_private *cs42l52 = dev_get_drvdata(dev);
long int time;
@@ -974,7 +973,7 @@ static ssize_t cs42l52_beep_set(struct device *dev,
return count;
}
-static DEVICE_ATTR(beep, 0200, NULL, cs42l52_beep_set);
+static DEVICE_ATTR_WO(beep);
static void cs42l52_init_beep(struct snd_soc_component *component)
{
@@ -1093,7 +1092,7 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
struct cs42l52_private *cs42l52;
struct cs42l52_platform_data *pdata = dev_get_platdata(&i2c_client->dev);
int ret;
- unsigned int devid = 0;
+ unsigned int devid;
unsigned int reg;
u32 val32;
@@ -1163,6 +1162,11 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
ret);
ret = regmap_read(cs42l52->regmap, CS42L52_CHIP, &reg);
+ if (ret) {
+ dev_err(&i2c_client->dev, "Failed to read chip ID: %d\n", ret);
+ return ret;
+ }
+
devid = reg & CS42L52_CHIP_ID_MASK;
if (devid != CS42L52_CHIP_ID) {
ret = -ENODEV;
@@ -1199,11 +1203,8 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
CS42L52_IFACE_CTL2_BIAS_LVL,
cs42l52->pdata.micbias_lvl);
- ret = devm_snd_soc_register_component(&i2c_client->dev,
+ return devm_snd_soc_register_component(&i2c_client->dev,
&soc_component_dev_cs42l52, &cs42l52_dai, 1);
- if (ret < 0)
- return ret;
- return 0;
}
static const struct of_device_id cs42l52_of_match[] = {
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index 7cdffdf6b8cf..3cf8a0b4478c 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -1021,9 +1021,8 @@ static int cs42l56_beep_event(struct input_dev *dev, unsigned int type,
return 0;
}
-static ssize_t cs42l56_beep_set(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t beep_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct cs42l56_private *cs42l56 = dev_get_drvdata(dev);
long int time;
@@ -1038,7 +1037,7 @@ static ssize_t cs42l56_beep_set(struct device *dev,
return count;
}
-static DEVICE_ATTR(beep, 0200, NULL, cs42l56_beep_set);
+static DEVICE_ATTR_WO(beep);
static void cs42l56_init_beep(struct snd_soc_component *component)
{
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index e92bacaab53f..018463f34e12 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -27,6 +27,7 @@
#include <sound/tlv.h>
#include <sound/cs42l73.h>
#include "cs42l73.h"
+#include "cirrus_legacy.h"
struct sp_config {
u8 spc, mmcc, spfs;
@@ -1278,8 +1279,7 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
{
struct cs42l73_private *cs42l73;
struct cs42l73_platform_data *pdata = dev_get_platdata(&i2c_client->dev);
- int ret;
- unsigned int devid = 0;
+ int ret, devid;
unsigned int reg;
u32 val32;
@@ -1329,27 +1329,25 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
}
/* initialize codec */
- ret = regmap_read(cs42l73->regmap, CS42L73_DEVID_AB, &reg);
- devid = (reg & 0xFF) << 12;
-
- ret = regmap_read(cs42l73->regmap, CS42L73_DEVID_CD, &reg);
- devid |= (reg & 0xFF) << 4;
-
- ret = regmap_read(cs42l73->regmap, CS42L73_DEVID_E, &reg);
- devid |= (reg & 0xF0) >> 4;
+ devid = cirrus_read_device_id(cs42l73->regmap, CS42L73_DEVID_AB);
+ if (devid < 0) {
+ ret = devid;
+ dev_err(&i2c_client->dev, "Failed to read device ID: %d\n", ret);
+ goto err_reset;
+ }
if (devid != CS42L73_DEVID) {
ret = -ENODEV;
dev_err(&i2c_client->dev,
"CS42L73 Device ID (%X). Expected %X\n",
devid, CS42L73_DEVID);
- return ret;
+ goto err_reset;
}
ret = regmap_read(cs42l73->regmap, CS42L73_REVID, &reg);
if (ret < 0) {
dev_err(&i2c_client->dev, "Get Revision ID failed\n");
- return ret;
+ goto err_reset;
}
dev_info(&i2c_client->dev,
@@ -1359,8 +1357,14 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
&soc_component_dev_cs42l73, cs42l73_dai,
ARRAY_SIZE(cs42l73_dai));
if (ret < 0)
- return ret;
+ goto err_reset;
+
return 0;
+
+err_reset:
+ gpio_set_value_cansleep(cs42l73->pdata.reset_gpio, 0);
+
+ return ret;
}
static const struct of_device_id cs42l73_of_match[] = {
diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
index 80cd3ea0c157..44b20c1ef851 100644
--- a/sound/soc/codecs/cs43130.c
+++ b/sound/soc/codecs/cs43130.c
@@ -36,6 +36,7 @@
#include <sound/jack.h>
#include "cs43130.h"
+#include "cirrus_legacy.h"
static const struct reg_default cs43130_reg_defaults[] = {
{CS43130_SYS_CLK_CTL_1, 0x06},
@@ -1671,14 +1672,14 @@ static int cs43130_show_dc(struct device *dev, char *buf, u8 ch)
cs43130->hpload_dc[ch]);
}
-static ssize_t cs43130_show_dc_l(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t hpload_dc_l_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return cs43130_show_dc(dev, buf, HP_LEFT);
}
-static ssize_t cs43130_show_dc_r(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t hpload_dc_r_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return cs43130_show_dc(dev, buf, HP_RIGHT);
}
@@ -1718,22 +1719,22 @@ static int cs43130_show_ac(struct device *dev, char *buf, u8 ch)
}
}
-static ssize_t cs43130_show_ac_l(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t hpload_ac_l_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return cs43130_show_ac(dev, buf, HP_LEFT);
}
-static ssize_t cs43130_show_ac_r(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t hpload_ac_r_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return cs43130_show_ac(dev, buf, HP_RIGHT);
}
-static DEVICE_ATTR(hpload_dc_l, 0444, cs43130_show_dc_l, NULL);
-static DEVICE_ATTR(hpload_dc_r, 0444, cs43130_show_dc_r, NULL);
-static DEVICE_ATTR(hpload_ac_l, 0444, cs43130_show_ac_l, NULL);
-static DEVICE_ATTR(hpload_ac_r, 0444, cs43130_show_ac_r, NULL);
+static DEVICE_ATTR_RO(hpload_dc_l);
+static DEVICE_ATTR_RO(hpload_dc_r);
+static DEVICE_ATTR_RO(hpload_ac_l);
+static DEVICE_ATTR_RO(hpload_ac_r);
static struct attribute *hpload_attrs[] = {
&dev_attr_hpload_dc_l.attr,
@@ -2422,9 +2423,8 @@ static int cs43130_i2c_probe(struct i2c_client *client,
{
struct cs43130_private *cs43130;
int ret;
- unsigned int devid = 0;
unsigned int reg;
- int i;
+ int i, devid;
cs43130 = devm_kzalloc(&client->dev, sizeof(*cs43130), GFP_KERNEL);
if (!cs43130)
@@ -2462,20 +2462,21 @@ static int cs43130_i2c_probe(struct i2c_client *client,
cs43130->reset_gpio = devm_gpiod_get_optional(&client->dev,
"reset", GPIOD_OUT_LOW);
- if (IS_ERR(cs43130->reset_gpio))
- return PTR_ERR(cs43130->reset_gpio);
+ if (IS_ERR(cs43130->reset_gpio)) {
+ ret = PTR_ERR(cs43130->reset_gpio);
+ goto err_supplies;
+ }
gpiod_set_value_cansleep(cs43130->reset_gpio, 1);
usleep_range(2000, 2050);
- ret = regmap_read(cs43130->regmap, CS43130_DEVID_AB, &reg);
-
- devid = (reg & 0xFF) << 12;
- ret = regmap_read(cs43130->regmap, CS43130_DEVID_CD, &reg);
- devid |= (reg & 0xFF) << 4;
- ret = regmap_read(cs43130->regmap, CS43130_DEVID_E, &reg);
- devid |= (reg & 0xF0) >> 4;
+ devid = cirrus_read_device_id(cs43130->regmap, CS43130_DEVID_AB);
+ if (devid < 0) {
+ ret = devid;
+ dev_err(&client->dev, "Failed to read device ID: %d\n", ret);
+ goto err;
+ }
switch (devid) {
case CS43130_CHIP_ID:
@@ -2515,7 +2516,7 @@ static int cs43130_i2c_probe(struct i2c_client *client,
"cs43130", cs43130);
if (ret != 0) {
dev_err(&client->dev, "Failed to request IRQ: %d\n", ret);
- return ret;
+ goto err;
}
cs43130->mclk_int_src = CS43130_MCLK_SRC_RCO;
@@ -2574,7 +2575,13 @@ static int cs43130_i2c_probe(struct i2c_client *client,
CS43130_XSP_3ST_MASK, 0);
return 0;
+
err:
+ gpiod_set_value_cansleep(cs43130->reset_gpio, 0);
+err_supplies:
+ regulator_bulk_disable(ARRAY_SIZE(cs43130->supplies),
+ cs43130->supplies);
+
return ret;
}
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index eaabbb56a173..6b6d08816024 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1178,7 +1178,7 @@ static unsigned int cs47l24_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_4L,
};
-static struct snd_compress_ops cs47l24_compress_ops = {
+static const struct snd_compress_ops cs47l24_compress_ops = {
.open = cs47l24_open,
.free = wm_adsp_compr_free,
.set_params = wm_adsp_compr_set_params,
diff --git a/sound/soc/codecs/cs53l30.c b/sound/soc/codecs/cs53l30.c
index abe0cc0bc03a..f2087bd38dbc 100644
--- a/sound/soc/codecs/cs53l30.c
+++ b/sound/soc/codecs/cs53l30.c
@@ -20,6 +20,7 @@
#include <sound/tlv.h>
#include "cs53l30.h"
+#include "cirrus_legacy.h"
#define CS53L30_NUM_SUPPLIES 2
static const char *const cs53l30_supply_names[CS53L30_NUM_SUPPLIES] = {
@@ -923,9 +924,8 @@ static int cs53l30_i2c_probe(struct i2c_client *client,
const struct device_node *np = client->dev.of_node;
struct device *dev = &client->dev;
struct cs53l30_private *cs53l30;
- unsigned int devid = 0;
unsigned int reg;
- int ret = 0, i;
+ int ret = 0, i, devid;
u8 val;
cs53l30 = devm_kzalloc(dev, sizeof(*cs53l30), GFP_KERNEL);
@@ -954,7 +954,7 @@ static int cs53l30_i2c_probe(struct i2c_client *client,
GPIOD_OUT_LOW);
if (IS_ERR(cs53l30->reset_gpio)) {
ret = PTR_ERR(cs53l30->reset_gpio);
- goto error;
+ goto error_supplies;
}
gpiod_set_value_cansleep(cs53l30->reset_gpio, 1);
@@ -971,14 +971,12 @@ static int cs53l30_i2c_probe(struct i2c_client *client,
}
/* Initialize codec */
- ret = regmap_read(cs53l30->regmap, CS53L30_DEVID_AB, &reg);
- devid = reg << 12;
-
- ret = regmap_read(cs53l30->regmap, CS53L30_DEVID_CD, &reg);
- devid |= reg << 4;
-
- ret = regmap_read(cs53l30->regmap, CS53L30_DEVID_E, &reg);
- devid |= (reg & 0xF0) >> 4;
+ devid = cirrus_read_device_id(cs53l30->regmap, CS53L30_DEVID_AB);
+ if (devid < 0) {
+ ret = devid;
+ dev_err(dev, "Failed to read device ID: %d\n", ret);
+ goto error;
+ }
if (devid != CS53L30_DEVID) {
ret = -ENODEV;
@@ -1040,6 +1038,8 @@ static int cs53l30_i2c_probe(struct i2c_client *client,
return 0;
error:
+ gpiod_set_value_cansleep(cs53l30->reset_gpio, 0);
+error_supplies:
regulator_bulk_disable(ARRAY_SIZE(cs53l30->supplies),
cs53l30->supplies);
return ret;
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index 61dfa86d444d..ec8d6e74b467 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -259,8 +259,8 @@ static int v253_hangup(struct tty_struct *tty)
}
/* Line discipline .receive_buf() */
-static void v253_receive(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void v253_receive(struct tty_struct *tty, const unsigned char *cp,
+ const char *fp, int count)
{
struct snd_soc_component *component = tty->disc_data;
struct cx20442_priv *cx20442;
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index 1567ba196ab9..b61f980cabdc 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -277,6 +277,7 @@ struct hdmi_codec_priv {
bool busy;
struct snd_soc_jack *jack;
unsigned int jack_status;
+ u8 iec_status[5];
};
static const struct snd_soc_dapm_widget hdmi_widgets[] = {
@@ -385,6 +386,47 @@ static int hdmi_codec_chmap_ctl_get(struct snd_kcontrol *kcontrol,
return 0;
}
+static int hdmi_codec_iec958_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+ uinfo->count = 1;
+ return 0;
+}
+
+static int hdmi_codec_iec958_default_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+
+ memcpy(ucontrol->value.iec958.status, hcp->iec_status,
+ sizeof(hcp->iec_status));
+
+ return 0;
+}
+
+static int hdmi_codec_iec958_default_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+
+ memcpy(hcp->iec_status, ucontrol->value.iec958.status,
+ sizeof(hcp->iec_status));
+
+ return 0;
+}
+
+static int hdmi_codec_iec958_mask_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ memset(ucontrol->value.iec958.status, 0xff,
+ sizeof_field(struct hdmi_codec_priv, iec_status));
+
+ return 0;
+}
+
static int hdmi_codec_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -439,6 +481,42 @@ static void hdmi_codec_shutdown(struct snd_pcm_substream *substream,
mutex_unlock(&hcp->lock);
}
+static int hdmi_codec_fill_codec_params(struct snd_soc_dai *dai,
+ unsigned int sample_width,
+ unsigned int sample_rate,
+ unsigned int channels,
+ struct hdmi_codec_params *hp)
+{
+ struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+ int idx;
+
+ /* Select a channel allocation that matches with ELD and pcm channels */
+ idx = hdmi_codec_get_ch_alloc_table_idx(hcp, channels);
+ if (idx < 0) {
+ dev_err(dai->dev, "Not able to map channels to speakers (%d)\n",
+ idx);
+ hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN;
+ return idx;
+ }
+
+ memset(hp, 0, sizeof(*hp));
+
+ hdmi_audio_infoframe_init(&hp->cea);
+ hp->cea.channels = channels;
+ hp->cea.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
+ hp->cea.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
+ hp->cea.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
+ hp->cea.channel_allocation = hdmi_codec_channel_alloc[idx].ca_id;
+
+ hp->sample_width = sample_width;
+ hp->sample_rate = sample_rate;
+ hp->channels = channels;
+
+ hcp->chmap_idx = hdmi_codec_channel_alloc[idx].ca_id;
+
+ return 0;
+}
+
static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -453,46 +531,73 @@ static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
.dig_subframe = { 0 },
}
};
- int ret, idx;
+ int ret;
+
+ if (!hcp->hcd.ops->hw_params)
+ return 0;
dev_dbg(dai->dev, "%s() width %d rate %d channels %d\n", __func__,
params_width(params), params_rate(params),
params_channels(params));
- ret = snd_pcm_create_iec958_consumer_hw_params(params, hp.iec.status,
- sizeof(hp.iec.status));
+ ret = hdmi_codec_fill_codec_params(dai,
+ params_width(params),
+ params_rate(params),
+ params_channels(params),
+ &hp);
+ if (ret < 0)
+ return ret;
+
+ memcpy(hp.iec.status, hcp->iec_status, sizeof(hp.iec.status));
+ ret = snd_pcm_fill_iec958_consumer_hw_params(params, hp.iec.status,
+ sizeof(hp.iec.status));
if (ret < 0) {
dev_err(dai->dev, "Creating IEC958 channel status failed %d\n",
ret);
return ret;
}
- hdmi_audio_infoframe_init(&hp.cea);
- hp.cea.channels = params_channels(params);
- hp.cea.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
- hp.cea.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
- hp.cea.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
-
- /* Select a channel allocation that matches with ELD and pcm channels */
- idx = hdmi_codec_get_ch_alloc_table_idx(hcp, hp.cea.channels);
- if (idx < 0) {
- dev_err(dai->dev, "Not able to map channels to speakers (%d)\n",
- idx);
- hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN;
- return idx;
- }
- hp.cea.channel_allocation = hdmi_codec_channel_alloc[idx].ca_id;
- hcp->chmap_idx = hdmi_codec_channel_alloc[idx].ca_id;
-
- hp.sample_width = params_width(params);
- hp.sample_rate = params_rate(params);
- hp.channels = params_channels(params);
-
cf->bit_fmt = params_format(params);
return hcp->hcd.ops->hw_params(dai->dev->parent, hcp->hcd.data,
cf, &hp);
}
+static int hdmi_codec_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+ struct hdmi_codec_daifmt *cf = dai->playback_dma_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ unsigned int channels = runtime->channels;
+ unsigned int width = snd_pcm_format_width(runtime->format);
+ unsigned int rate = runtime->rate;
+ struct hdmi_codec_params hp;
+ int ret;
+
+ if (!hcp->hcd.ops->prepare)
+ return 0;
+
+ dev_dbg(dai->dev, "%s() width %d rate %d channels %d\n", __func__,
+ width, rate, channels);
+
+ ret = hdmi_codec_fill_codec_params(dai, width, rate, channels, &hp);
+ if (ret < 0)
+ return ret;
+
+ memcpy(hp.iec.status, hcp->iec_status, sizeof(hp.iec.status));
+ ret = snd_pcm_fill_iec958_consumer(runtime, hp.iec.status,
+ sizeof(hp.iec.status));
+ if (ret < 0) {
+ dev_err(dai->dev, "Creating IEC958 channel status failed %d\n",
+ ret);
+ return ret;
+ }
+
+ cf->bit_fmt = runtime->format;
+ return hcp->hcd.ops->prepare(dai->dev->parent, hcp->hcd.data,
+ cf, &hp);
+}
+
static int hdmi_codec_i2s_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
@@ -580,12 +685,34 @@ static int hdmi_codec_mute(struct snd_soc_dai *dai, int mute, int direction)
return -ENOTSUPP;
}
+/*
+ * This driver can select all SND_SOC_DAIFMT_CBx_CFx,
+ * but need to be selected from Sound Card, not be auto selected.
+ * Because it might be used from other driver.
+ * For example,
+ * ${LINUX}/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+ */
+static u64 hdmi_codec_formats =
+ SND_SOC_POSSIBLE_DAIFMT_NB_NF |
+ SND_SOC_POSSIBLE_DAIFMT_NB_IF |
+ SND_SOC_POSSIBLE_DAIFMT_IB_NF |
+ SND_SOC_POSSIBLE_DAIFMT_IB_IF |
+ SND_SOC_POSSIBLE_DAIFMT_I2S |
+ SND_SOC_POSSIBLE_DAIFMT_DSP_A |
+ SND_SOC_POSSIBLE_DAIFMT_DSP_B |
+ SND_SOC_POSSIBLE_DAIFMT_RIGHT_J |
+ SND_SOC_POSSIBLE_DAIFMT_LEFT_J |
+ SND_SOC_POSSIBLE_DAIFMT_AC97;
+
static const struct snd_soc_dai_ops hdmi_codec_i2s_dai_ops = {
.startup = hdmi_codec_startup,
.shutdown = hdmi_codec_shutdown,
.hw_params = hdmi_codec_hw_params,
+ .prepare = hdmi_codec_prepare,
.set_fmt = hdmi_codec_i2s_set_fmt,
.mute_stream = hdmi_codec_mute,
+ .auto_selectable_formats = &hdmi_codec_formats,
+ .num_auto_selectable_formats = 1,
};
static const struct snd_soc_dai_ops hdmi_codec_spdif_dai_ops = {
@@ -620,21 +747,37 @@ static const struct snd_soc_dai_ops hdmi_codec_spdif_dai_ops = {
SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE |\
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
+static struct snd_kcontrol_new hdmi_codec_controls[] = {
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, MASK),
+ .info = hdmi_codec_iec958_info,
+ .get = hdmi_codec_iec958_mask_get,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
+ .info = hdmi_codec_iec958_info,
+ .get = hdmi_codec_iec958_default_get,
+ .put = hdmi_codec_iec958_default_put,
+ },
+ {
+ .access = (SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE),
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "ELD",
+ .info = hdmi_eld_ctl_info,
+ .get = hdmi_eld_ctl_get,
+ },
+};
+
static int hdmi_codec_pcm_new(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai *dai)
{
struct snd_soc_dai_driver *drv = dai->driver;
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
- struct snd_kcontrol *kctl;
- struct snd_kcontrol_new hdmi_eld_ctl = {
- .access = SNDRV_CTL_ELEM_ACCESS_READ |
- SNDRV_CTL_ELEM_ACCESS_VOLATILE,
- .iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = "ELD",
- .info = hdmi_eld_ctl_info,
- .get = hdmi_eld_ctl_get,
- .device = rtd->pcm->device,
- };
+ unsigned int i;
int ret;
ret = snd_pcm_add_chmap_ctls(rtd->pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -651,12 +794,21 @@ static int hdmi_codec_pcm_new(struct snd_soc_pcm_runtime *rtd,
hcp->chmap_info->chmap = hdmi_codec_stereo_chmaps;
hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN;
- /* add ELD ctl with the device number corresponding to the PCM stream */
- kctl = snd_ctl_new1(&hdmi_eld_ctl, dai->component);
- if (!kctl)
- return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(hdmi_codec_controls); i++) {
+ struct snd_kcontrol *kctl;
+
+ /* add ELD ctl with the device number corresponding to the PCM stream */
+ kctl = snd_ctl_new1(&hdmi_codec_controls[i], dai->component);
+ if (!kctl)
+ return -ENOMEM;
- return snd_ctl_add(rtd->card->snd_card, kctl);
+ kctl->id.device = rtd->pcm->device;
+ ret = snd_ctl_add(rtd->card->snd_card, kctl);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
}
static int hdmi_dai_probe(struct snd_soc_dai *dai)
@@ -849,7 +1001,8 @@ static int hdmi_codec_probe(struct platform_device *pdev)
}
dai_count = hcd->i2s + hcd->spdif;
- if (dai_count < 1 || !hcd->ops || !hcd->ops->hw_params ||
+ if (dai_count < 1 || !hcd->ops ||
+ (!hcd->ops->hw_params && !hcd->ops->prepare) ||
!hcd->ops->audio_shutdown) {
dev_err(dev, "%s: Invalid parameters\n", __func__);
return -EINVAL;
@@ -862,6 +1015,11 @@ static int hdmi_codec_probe(struct platform_device *pdev)
hcp->hcd = *hcd;
mutex_init(&hcp->lock);
+ ret = snd_pcm_create_iec958_consumer_default(hcp->iec_status,
+ sizeof(hcp->iec_status));
+ if (ret < 0)
+ return ret;
+
daidrv = devm_kcalloc(dev, dai_count, sizeof(*daidrv), GFP_KERNEL);
if (!daidrv)
return -ENOMEM;
diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
index 171ab7f519c0..3622961f7c2c 100644
--- a/sound/soc/codecs/lpass-rx-macro.c
+++ b/sound/soc/codecs/lpass-rx-macro.c
@@ -2628,7 +2628,7 @@ static int rx_macro_enable_rx_path_clk(struct snd_soc_dapm_widget *w,
break;
default:
break;
- };
+ }
return 0;
}
diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
index 1a7fa5492f28..d3ac318fd6b6 100644
--- a/sound/soc/codecs/lpass-wsa-macro.c
+++ b/sound/soc/codecs/lpass-wsa-macro.c
@@ -1727,6 +1727,10 @@ static int wsa_macro_enable_echo(struct snd_soc_dapm_widget *w,
val = val & CDC_WSA_RX_MIX_TX1_SEL_MASK;
ec_tx = (val >> CDC_WSA_RX_MIX_TX1_SEL_SHFT) - 1;
break;
+ default:
+ dev_err(component->dev, "%s: Invalid shift %u\n",
+ __func__, w->shift);
+ return -EINVAL;
}
if (wsa->ec_hq[ec_tx]) {
diff --git a/sound/soc/codecs/max98373-sdw.c b/sound/soc/codecs/max98373-sdw.c
index f3a12205cd48..dc520effc61c 100644
--- a/sound/soc/codecs/max98373-sdw.c
+++ b/sound/soc/codecs/max98373-sdw.c
@@ -271,7 +271,7 @@ static __maybe_unused int max98373_resume(struct device *dev)
struct max98373_priv *max98373 = dev_get_drvdata(dev);
unsigned long time;
- if (!max98373->hw_init)
+ if (!max98373->first_hw_init)
return 0;
if (!slave->unattach_request)
@@ -362,7 +362,7 @@ static int max98373_io_init(struct sdw_slave *slave)
struct device *dev = &slave->dev;
struct max98373_priv *max98373 = dev_get_drvdata(dev);
- if (max98373->pm_init_once) {
+ if (max98373->first_hw_init) {
regcache_cache_only(max98373->regmap, false);
regcache_cache_bypass(max98373->regmap, true);
}
@@ -370,7 +370,7 @@ static int max98373_io_init(struct sdw_slave *slave)
/*
* PM runtime is only enabled when a Slave reports as Attached
*/
- if (!max98373->pm_init_once) {
+ if (!max98373->first_hw_init) {
/* set autosuspend parameters */
pm_runtime_set_autosuspend_delay(dev, 3000);
pm_runtime_use_autosuspend(dev);
@@ -462,12 +462,12 @@ static int max98373_io_init(struct sdw_slave *slave)
regmap_write(max98373->regmap, MAX98373_R20B5_BDE_EN, 1);
regmap_write(max98373->regmap, MAX98373_R20E2_LIMITER_EN, 1);
- if (max98373->pm_init_once) {
+ if (max98373->first_hw_init) {
regcache_cache_bypass(max98373->regmap, false);
regcache_mark_dirty(max98373->regmap);
}
- max98373->pm_init_once = true;
+ max98373->first_hw_init = true;
max98373->hw_init = true;
pm_runtime_mark_last_busy(dev);
@@ -787,6 +787,8 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
max98373->cache = devm_kcalloc(dev, max98373->cache_num,
sizeof(*max98373->cache),
GFP_KERNEL);
+ if (!max98373->cache)
+ return -ENOMEM;
for (i = 0; i < max98373->cache_num; i++)
max98373->cache[i].reg = max98373_sdw_cache_reg[i];
@@ -795,7 +797,7 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
max98373_slot_config(dev, max98373);
max98373->hw_init = false;
- max98373->pm_init_once = false;
+ max98373->first_hw_init = false;
/* codec registration */
ret = devm_snd_soc_register_component(dev, &soc_codec_dev_max98373_sdw,
diff --git a/sound/soc/codecs/max98373.h b/sound/soc/codecs/max98373.h
index 73a2cf69d84a..e1810b3b1620 100644
--- a/sound/soc/codecs/max98373.h
+++ b/sound/soc/codecs/max98373.h
@@ -226,7 +226,7 @@ struct max98373_priv {
/* variables to support soundwire */
struct sdw_slave *slave;
bool hw_init;
- bool pm_init_once;
+ bool first_hw_init;
int slot;
unsigned int rx_mask;
};
diff --git a/sound/soc/codecs/mt6359-accdet.c b/sound/soc/codecs/mt6359-accdet.c
index 4222aed013f1..78314187d37e 100644
--- a/sound/soc/codecs/mt6359-accdet.c
+++ b/sound/soc/codecs/mt6359-accdet.c
@@ -414,7 +414,7 @@ static void mt6359_accdet_work(struct work_struct *work)
static void mt6359_accdet_jd_work(struct work_struct *work)
{
- int ret = 0;
+ int ret;
unsigned int value = 0;
struct mt6359_accdet *priv =
diff --git a/sound/soc/codecs/mt6359.c b/sound/soc/codecs/mt6359.c
index b909b36582b7..2d6a4a29b850 100644
--- a/sound/soc/codecs/mt6359.c
+++ b/sound/soc/codecs/mt6359.c
@@ -271,7 +271,7 @@ static void hp_aux_feedback_loop_gain_ramp(struct mt6359_priv *priv, bool up)
static void hp_in_pair_current(struct mt6359_priv *priv, bool increase)
{
- int i = 0, stage = 0;
+ int i, stage;
int target = 0x3;
/* Set input diff pair bias select (Hi-Fi mode) */
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index 15bd8335f667..db88be48c998 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -828,36 +828,6 @@ static void nau8824_int_status_clear_all(struct regmap *regmap)
}
}
-static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin)
-{
- struct snd_soc_dapm_context *dapm = nau8824->dapm;
- const char *prefix = dapm->component->name_prefix;
- char prefixed_pin[80];
-
- if (prefix) {
- snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
- prefix, pin);
- snd_soc_dapm_disable_pin(dapm, prefixed_pin);
- } else {
- snd_soc_dapm_disable_pin(dapm, pin);
- }
-}
-
-static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin)
-{
- struct snd_soc_dapm_context *dapm = nau8824->dapm;
- const char *prefix = dapm->component->name_prefix;
- char prefixed_pin[80];
-
- if (prefix) {
- snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
- prefix, pin);
- snd_soc_dapm_force_enable_pin(dapm, prefixed_pin);
- } else {
- snd_soc_dapm_force_enable_pin(dapm, pin);
- }
-}
-
static void nau8824_eject_jack(struct nau8824 *nau8824)
{
struct snd_soc_dapm_context *dapm = nau8824->dapm;
@@ -866,8 +836,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
/* Clear all interruption status */
nau8824_int_status_clear_all(regmap);
- nau8824_dapm_disable_pin(nau8824, "SAR");
- nau8824_dapm_disable_pin(nau8824, "MICBIAS");
+ snd_soc_dapm_disable_pin(dapm, "SAR");
+ snd_soc_dapm_disable_pin(dapm, "MICBIAS");
snd_soc_dapm_sync(dapm);
/* Enable the insertion interruption, disable the ejection
@@ -897,8 +867,8 @@ static void nau8824_jdet_work(struct work_struct *work)
struct regmap *regmap = nau8824->regmap;
int adc_value, event = 0, event_mask = 0;
- nau8824_dapm_enable_pin(nau8824, "MICBIAS");
- nau8824_dapm_enable_pin(nau8824, "SAR");
+ snd_soc_dapm_enable_pin(dapm, "MICBIAS");
+ snd_soc_dapm_enable_pin(dapm, "SAR");
snd_soc_dapm_sync(dapm);
msleep(100);
@@ -909,8 +879,8 @@ static void nau8824_jdet_work(struct work_struct *work)
if (adc_value < HEADSET_SARADC_THD) {
event |= SND_JACK_HEADPHONE;
- nau8824_dapm_disable_pin(nau8824, "SAR");
- nau8824_dapm_disable_pin(nau8824, "MICBIAS");
+ snd_soc_dapm_disable_pin(dapm, "SAR");
+ snd_soc_dapm_disable_pin(dapm, "MICBIAS");
snd_soc_dapm_sync(dapm);
} else {
event |= SND_JACK_HEADSET;
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
index 821e7395f90f..b6fd412441a1 100644
--- a/sound/soc/codecs/pcm3168a.c
+++ b/sound/soc/codecs/pcm3168a.c
@@ -573,6 +573,30 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream,
return 0;
}
+static u64 pcm3168a_dai_formats[] = {
+ /*
+ * Select below from Sound Card, not here
+ * SND_SOC_DAIFMT_CBC_CFC
+ * SND_SOC_DAIFMT_CBP_CFP
+ */
+
+ /*
+ * First Priority
+ */
+ SND_SOC_POSSIBLE_DAIFMT_I2S |
+ SND_SOC_POSSIBLE_DAIFMT_LEFT_J,
+ /*
+ * Second Priority
+ *
+ * These have picky limitation.
+ * see
+ * pcm3168a_hw_params()
+ */
+ SND_SOC_POSSIBLE_DAIFMT_RIGHT_J |
+ SND_SOC_POSSIBLE_DAIFMT_DSP_A |
+ SND_SOC_POSSIBLE_DAIFMT_DSP_B,
+};
+
static const struct snd_soc_dai_ops pcm3168a_dai_ops = {
.set_fmt = pcm3168a_set_dai_fmt,
.set_sysclk = pcm3168a_set_dai_sysclk,
@@ -580,6 +604,8 @@ static const struct snd_soc_dai_ops pcm3168a_dai_ops = {
.mute_stream = pcm3168a_mute,
.set_tdm_slot = pcm3168a_set_tdm_slot,
.no_capture_mute = 1,
+ .auto_selectable_formats = pcm3168a_dai_formats,
+ .num_auto_selectable_formats = ARRAY_SIZE(pcm3168a_dai_formats),
};
static struct snd_soc_dai_driver pcm3168a_dais[] = {
diff --git a/sound/soc/codecs/rk3328_codec.c b/sound/soc/codecs/rk3328_codec.c
index bfefefcc76d8..758d439e8c7a 100644
--- a/sound/soc/codecs/rk3328_codec.c
+++ b/sound/soc/codecs/rk3328_codec.c
@@ -474,7 +474,8 @@ static int rk3328_platform_probe(struct platform_device *pdev)
rk3328->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(rk3328->pclk)) {
dev_err(&pdev->dev, "can't get acodec pclk\n");
- return PTR_ERR(rk3328->pclk);
+ ret = PTR_ERR(rk3328->pclk);
+ goto err_unprepare_mclk;
}
ret = clk_prepare_enable(rk3328->pclk);
@@ -484,19 +485,34 @@ static int rk3328_platform_probe(struct platform_device *pdev)
}
base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err_unprepare_pclk;
+ }
rk3328->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&rk3328_codec_regmap_config);
- if (IS_ERR(rk3328->regmap))
- return PTR_ERR(rk3328->regmap);
+ if (IS_ERR(rk3328->regmap)) {
+ ret = PTR_ERR(rk3328->regmap);
+ goto err_unprepare_pclk;
+ }
platform_set_drvdata(pdev, rk3328);
- return devm_snd_soc_register_component(&pdev->dev, &soc_codec_rk3328,
+ ret = devm_snd_soc_register_component(&pdev->dev, &soc_codec_rk3328,
rk3328_dai,
ARRAY_SIZE(rk3328_dai));
+ if (ret)
+ goto err_unprepare_pclk;
+
+ return 0;
+
+err_unprepare_pclk:
+ clk_disable_unprepare(rk3328->pclk);
+
+err_unprepare_mclk:
+ clk_disable_unprepare(rk3328->mclk);
+ return ret;
}
static const struct of_device_id rk3328_codec_of_match[] __maybe_unused = {
diff --git a/sound/soc/codecs/rk817_codec.c b/sound/soc/codecs/rk817_codec.c
new file mode 100644
index 000000000000..943d7d933e81
--- /dev/null
+++ b/sound/soc/codecs/rk817_codec.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// rk817 ALSA SoC Audio driver
+//
+// Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/mfd/rk808.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <sound/core.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+struct rk817_codec_priv {
+ struct snd_soc_component *component;
+ struct rk808 *rk808;
+ struct clk *mclk;
+ unsigned int stereo_sysclk;
+ bool mic_in_differential;
+};
+
+/*
+ * This sets the codec up with the values defined in the default implementation including the APLL
+ * from the Rockchip vendor kernel. I do not know if these values are universal despite differing
+ * from the default values defined above and taken from the datasheet, or implementation specific.
+ * I don't have another implementation to compare from the Rockchip sources. Hard-coding for now.
+ * Additionally, I do not know according to the documentation the units accepted for the clock
+ * values, so for the moment those are left unvalidated.
+ */
+
+static int rk817_init(struct snd_soc_component *component)
+{
+ struct rk817_codec_priv *rk817 = snd_soc_component_get_drvdata(component);
+
+ snd_soc_component_write(component, RK817_CODEC_DDAC_POPD_DACST, 0x02);
+ snd_soc_component_write(component, RK817_CODEC_DDAC_SR_LMT0, 0x02);
+ snd_soc_component_write(component, RK817_CODEC_DADC_SR_ACL0, 0x02);
+ snd_soc_component_write(component, RK817_CODEC_DTOP_VUCTIME, 0xf4);
+ if (rk817->mic_in_differential) {
+ snd_soc_component_update_bits(component, RK817_CODEC_AMIC_CFG0, MIC_DIFF_MASK,
+ MIC_DIFF_EN);
+ }
+
+ return 0;
+}
+
+static int rk817_set_component_pll(struct snd_soc_component *component,
+ int pll_id, int source, unsigned int freq_in,
+ unsigned int freq_out)
+{
+ /* Set resistor value and charge pump current for PLL. */
+ snd_soc_component_write(component, RK817_CODEC_APLL_CFG1, 0x58);
+ /* Set the PLL feedback clock divide value (values not documented). */
+ snd_soc_component_write(component, RK817_CODEC_APLL_CFG2, 0x2d);
+ /* Set the PLL pre-divide value (values not documented). */
+ snd_soc_component_write(component, RK817_CODEC_APLL_CFG3, 0x0c);
+ /* Set the PLL VCO output clock divide and PLL divided ratio of PLL High Clk (values not
+ * documented).
+ */
+ snd_soc_component_write(component, RK817_CODEC_APLL_CFG4, 0xa5);
+
+ return 0;
+}
+
+/*
+ * DDAC/DADC L/R volume setting
+ * 0db~-95db, 0.375db/step, for example:
+ * 0x00: 0dB
+ * 0xff: -95dB
+ */
+
+static const DECLARE_TLV_DB_MINMAX(rk817_vol_tlv, -9500, 0);
+
+/*
+ * PGA GAIN L/R volume setting
+ * 27db~-18db, 3db/step, for example:
+ * 0x0: -18dB
+ * 0xf: 27dB
+ */
+
+static const DECLARE_TLV_DB_MINMAX(rk817_gain_tlv, -1800, 2700);
+
+static const struct snd_kcontrol_new rk817_volume_controls[] = {
+ SOC_DOUBLE_R_RANGE_TLV("Master Playback Volume", RK817_CODEC_DDAC_VOLL,
+ RK817_CODEC_DDAC_VOLR, 0, 0x00, 0xff, 1, rk817_vol_tlv),
+ SOC_DOUBLE_R_RANGE_TLV("Master Capture Volume", RK817_CODEC_DADC_VOLL,
+ RK817_CODEC_DADC_VOLR, 0, 0x00, 0xff, 1, rk817_vol_tlv),
+ SOC_DOUBLE_TLV("Mic Capture Gain", RK817_CODEC_DMIC_PGA_GAIN, 4, 0, 0xf, 0,
+ rk817_gain_tlv),
+};
+
+/* Since the speaker output and L headphone pin are internally the same, make audio path mutually
+ * exclusive with a mux.
+ */
+
+static const char *dac_mux_text[] = {
+ "HP",
+ "SPK",
+};
+
+static SOC_ENUM_SINGLE_VIRT_DECL(dac_enum, dac_mux_text);
+
+static const struct snd_kcontrol_new dac_mux =
+ SOC_DAPM_ENUM("Playback Mux", dac_enum);
+
+static const struct snd_soc_dapm_widget rk817_dapm_widgets[] = {
+
+ /* capture/playback common */
+ SND_SOC_DAPM_SUPPLY("LDO Regulator", RK817_CODEC_AREF_RTCFG1, 6, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("IBIAS Block", RK817_CODEC_AREF_RTCFG1, 2, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("VAvg Buffer", RK817_CODEC_AREF_RTCFG1, 1, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL Power", RK817_CODEC_APLL_CFG5, 0, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S TX1 Transfer Start", RK817_CODEC_DI2S_RXCMD_TSD, 5, 0, NULL, 0),
+
+ /* capture path common */
+ SND_SOC_DAPM_SUPPLY("ADC Clock", RK817_CODEC_DTOP_DIGEN_CLKE, 7, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S TX Clock", RK817_CODEC_DTOP_DIGEN_CLKE, 6, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC Channel Enable", RK817_CODEC_DTOP_DIGEN_CLKE, 5, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S TX Channel Enable", RK817_CODEC_DTOP_DIGEN_CLKE, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MIC Power On", RK817_CODEC_AMIC_CFG0, 6, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S TX3 Transfer Start", RK817_CODEC_DI2S_TXCR3_TXCMD, 7, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S TX3 Right Justified", RK817_CODEC_DI2S_TXCR3_TXCMD, 3, 0, NULL, 0),
+
+ /* capture path L */
+ SND_SOC_DAPM_ADC("ADC L", "Capture", RK817_CODEC_AADC_CFG0, 7, 1),
+ SND_SOC_DAPM_SUPPLY("PGA L Power On", RK817_CODEC_AMIC_CFG0, 5, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Mic Boost L1", RK817_CODEC_AMIC_CFG0, 3, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Mic Boost L2", RK817_CODEC_AMIC_CFG0, 2, 0, NULL, 0),
+
+ /* capture path R */
+ SND_SOC_DAPM_ADC("ADC R", "Capture", RK817_CODEC_AADC_CFG0, 6, 1),
+ SND_SOC_DAPM_SUPPLY("PGA R Power On", RK817_CODEC_AMIC_CFG0, 4, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Mic Boost R1", RK817_CODEC_AMIC_CFG0, 3, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Mic Boost R2", RK817_CODEC_AMIC_CFG0, 3, 0, NULL, 0),
+
+ /* playback path common */
+ SND_SOC_DAPM_SUPPLY("DAC Clock", RK817_CODEC_DTOP_DIGEN_CLKE, 3, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S RX Clock", RK817_CODEC_DTOP_DIGEN_CLKE, 2, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC Channel Enable", RK817_CODEC_DTOP_DIGEN_CLKE, 1, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S RX Channel Enable", RK817_CODEC_DTOP_DIGEN_CLKE, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC Bias", RK817_CODEC_ADAC_CFG1, 3, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC Mute Off", RK817_CODEC_DDAC_MUTE_MIXCTL, 0, 1, NULL, 0),
+
+ /* playback path speaker */
+ SND_SOC_DAPM_SUPPLY("Class D Mode", RK817_CODEC_DDAC_MUTE_MIXCTL, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("High Pass Filter", RK817_CODEC_DDAC_MUTE_MIXCTL, 7, 0, NULL, 0),
+ SND_SOC_DAPM_DAC("SPK DAC", "Playback", RK817_CODEC_ADAC_CFG1, 2, 1),
+ SND_SOC_DAPM_SUPPLY("Enable Class D", RK817_CODEC_ACLASSD_CFG1, 7, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Disable Class D Mute Ramp", RK817_CODEC_ACLASSD_CFG1, 6, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Class D Mute Rate 1", RK817_CODEC_ACLASSD_CFG1, 3, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Class D Mute Rate 2", RK817_CODEC_ACLASSD_CFG1, 2, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Class D OCPP 2", RK817_CODEC_ACLASSD_CFG2, 5, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Class D OCPP 3", RK817_CODEC_ACLASSD_CFG2, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Class D OCPN 2", RK817_CODEC_ACLASSD_CFG2, 1, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Class D OCPN 3", RK817_CODEC_ACLASSD_CFG2, 0, 0, NULL, 0),
+
+ /* playback path headphones */
+ SND_SOC_DAPM_SUPPLY("Headphone Charge Pump", RK817_CODEC_AHP_CP, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Headphone CP Discharge LDO", RK817_CODEC_AHP_CP, 3, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Headphone OStage", RK817_CODEC_AHP_CFG0, 6, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Headphone Pre Amp", RK817_CODEC_AHP_CFG0, 5, 1, NULL, 0),
+ SND_SOC_DAPM_DAC("DAC L", "Playback", RK817_CODEC_ADAC_CFG1, 1, 1),
+ SND_SOC_DAPM_DAC("DAC R", "Playback", RK817_CODEC_ADAC_CFG1, 0, 1),
+
+ /* Mux for input/output path selection */
+ SND_SOC_DAPM_MUX("Playback Mux", SND_SOC_NOPM, 1, 0, &dac_mux),
+
+ /* Pins for Simple Card Bindings */
+ SND_SOC_DAPM_INPUT("MICL"),
+ SND_SOC_DAPM_INPUT("MICR"),
+ SND_SOC_DAPM_OUTPUT("HPOL"),
+ SND_SOC_DAPM_OUTPUT("HPOR"),
+ SND_SOC_DAPM_OUTPUT("SPKO"),
+};
+
+static const struct snd_soc_dapm_route rk817_dapm_routes[] = {
+
+ /* capture path */
+ /* left mic */
+ {"ADC L", NULL, "LDO Regulator"},
+ {"ADC L", NULL, "IBIAS Block"},
+ {"ADC L", NULL, "VAvg Buffer"},
+ {"ADC L", NULL, "PLL Power"},
+ {"ADC L", NULL, "ADC Clock"},
+ {"ADC L", NULL, "I2S TX Clock"},
+ {"ADC L", NULL, "ADC Channel Enable"},
+ {"ADC L", NULL, "I2S TX Channel Enable"},
+ {"ADC L", NULL, "I2S TX1 Transfer Start"},
+ {"MICL", NULL, "MIC Power On"},
+ {"MICL", NULL, "PGA L Power On"},
+ {"MICL", NULL, "Mic Boost L1"},
+ {"MICL", NULL, "Mic Boost L2"},
+ {"MICL", NULL, "I2S TX3 Transfer Start"},
+ {"MICL", NULL, "I2S TX3 Right Justified"},
+ {"ADC L", NULL, "MICL"},
+
+ /* right mic */
+ {"ADC R", NULL, "LDO Regulator"},
+ {"ADC R", NULL, "IBIAS Block"},
+ {"ADC R", NULL, "VAvg Buffer"},
+ {"ADC R", NULL, "PLL Power"},
+ {"ADC R", NULL, "ADC Clock"},
+ {"ADC R", NULL, "I2S TX Clock"},
+ {"ADC R", NULL, "ADC Channel Enable"},
+ {"ADC R", NULL, "I2S TX Channel Enable"},
+ {"ADC R", NULL, "I2S TX1 Transfer Start"},
+ {"MICR", NULL, "MIC Power On"},
+ {"MICR", NULL, "PGA R Power On"},
+ {"MICR", NULL, "Mic Boost R1"},
+ {"MICR", NULL, "Mic Boost R2"},
+ {"MICR", NULL, "I2S TX3 Transfer Start"},
+ {"MICR", NULL, "I2S TX3 Right Justified"},
+ {"ADC R", NULL, "MICR"},
+
+ /* playback path */
+ /* speaker path */
+ {"SPK DAC", NULL, "LDO Regulator"},
+ {"SPK DAC", NULL, "IBIAS Block"},
+ {"SPK DAC", NULL, "VAvg Buffer"},
+ {"SPK DAC", NULL, "PLL Power"},
+ {"SPK DAC", NULL, "I2S TX1 Transfer Start"},
+ {"SPK DAC", NULL, "DAC Clock"},
+ {"SPK DAC", NULL, "I2S RX Clock"},
+ {"SPK DAC", NULL, "DAC Channel Enable"},
+ {"SPK DAC", NULL, "I2S RX Channel Enable"},
+ {"SPK DAC", NULL, "Class D Mode"},
+ {"SPK DAC", NULL, "DAC Bias"},
+ {"SPK DAC", NULL, "DAC Mute Off"},
+ {"SPK DAC", NULL, "Enable Class D"},
+ {"SPK DAC", NULL, "Disable Class D Mute Ramp"},
+ {"SPK DAC", NULL, "Class D Mute Rate 1"},
+ {"SPK DAC", NULL, "Class D Mute Rate 2"},
+ {"SPK DAC", NULL, "Class D OCPP 2"},
+ {"SPK DAC", NULL, "Class D OCPP 3"},
+ {"SPK DAC", NULL, "Class D OCPN 2"},
+ {"SPK DAC", NULL, "Class D OCPN 3"},
+ {"SPK DAC", NULL, "High Pass Filter"},
+
+ /* headphone path L */
+ {"DAC L", NULL, "LDO Regulator"},
+ {"DAC L", NULL, "IBIAS Block"},
+ {"DAC L", NULL, "VAvg Buffer"},
+ {"DAC L", NULL, "PLL Power"},
+ {"DAC L", NULL, "I2S TX1 Transfer Start"},
+ {"DAC L", NULL, "DAC Clock"},
+ {"DAC L", NULL, "I2S RX Clock"},
+ {"DAC L", NULL, "DAC Channel Enable"},
+ {"DAC L", NULL, "I2S RX Channel Enable"},
+ {"DAC L", NULL, "DAC Bias"},
+ {"DAC L", NULL, "DAC Mute Off"},
+ {"DAC L", NULL, "Headphone Charge Pump"},
+ {"DAC L", NULL, "Headphone CP Discharge LDO"},
+ {"DAC L", NULL, "Headphone OStage"},
+ {"DAC L", NULL, "Headphone Pre Amp"},
+
+ /* headphone path R */
+ {"DAC R", NULL, "LDO Regulator"},
+ {"DAC R", NULL, "IBIAS Block"},
+ {"DAC R", NULL, "VAvg Buffer"},
+ {"DAC R", NULL, "PLL Power"},
+ {"DAC R", NULL, "I2S TX1 Transfer Start"},
+ {"DAC R", NULL, "DAC Clock"},
+ {"DAC R", NULL, "I2S RX Clock"},
+ {"DAC R", NULL, "DAC Channel Enable"},
+ {"DAC R", NULL, "I2S RX Channel Enable"},
+ {"DAC R", NULL, "DAC Bias"},
+ {"DAC R", NULL, "DAC Mute Off"},
+ {"DAC R", NULL, "Headphone Charge Pump"},
+ {"DAC R", NULL, "Headphone CP Discharge LDO"},
+ {"DAC R", NULL, "Headphone OStage"},
+ {"DAC R", NULL, "Headphone Pre Amp"},
+
+ /* mux path for output selection */
+ {"Playback Mux", "HP", "DAC L"},
+ {"Playback Mux", "HP", "DAC R"},
+ {"Playback Mux", "SPK", "SPK DAC"},
+ {"SPKO", NULL, "Playback Mux"},
+ {"HPOL", NULL, "Playback Mux"},
+ {"HPOR", NULL, "Playback Mux"},
+};
+
+static int rk817_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_component *component = codec_dai->component;
+ struct rk817_codec_priv *rk817 = snd_soc_component_get_drvdata(component);
+
+ rk817->stereo_sysclk = freq;
+
+ return 0;
+}
+
+static int rk817_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_component *component = codec_dai->component;
+ unsigned int i2s_mst = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ i2s_mst |= RK817_I2S_MODE_SLV;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ i2s_mst |= RK817_I2S_MODE_MST;
+ break;
+ default:
+ dev_err(component->dev, "%s : set master mask failed!\n", __func__);
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component, RK817_CODEC_DI2S_CKM,
+ RK817_I2S_MODE_MASK, i2s_mst);
+
+ return 0;
+}
+
+static int rk817_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ snd_soc_component_write(component, RK817_CODEC_DI2S_RXCR2,
+ VDW_RX_16BITS);
+ snd_soc_component_write(component, RK817_CODEC_DI2S_TXCR2,
+ VDW_TX_16BITS);
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ case SNDRV_PCM_FORMAT_S32_LE:
+ snd_soc_component_write(component, RK817_CODEC_DI2S_RXCR2,
+ VDW_RX_24BITS);
+ snd_soc_component_write(component, RK817_CODEC_DI2S_TXCR2,
+ VDW_TX_24BITS);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rk817_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
+{
+ struct snd_soc_component *component = dai->component;
+
+ if (mute)
+ snd_soc_component_update_bits(component,
+ RK817_CODEC_DDAC_MUTE_MIXCTL,
+ DACMT_MASK, DACMT_ENABLE);
+ else
+ snd_soc_component_update_bits(component,
+ RK817_CODEC_DDAC_MUTE_MIXCTL,
+ DACMT_MASK, DACMT_DISABLE);
+
+ return 0;
+}
+
+#define RK817_PLAYBACK_RATES (SNDRV_PCM_RATE_8000 |\
+ SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_32000 | \
+ SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_96000)
+
+#define RK817_CAPTURE_RATES (SNDRV_PCM_RATE_8000 |\
+ SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_32000 | \
+ SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_96000)
+
+#define RK817_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops rk817_dai_ops = {
+ .hw_params = rk817_hw_params,
+ .set_fmt = rk817_set_dai_fmt,
+ .set_sysclk = rk817_set_dai_sysclk,
+ .mute_stream = rk817_digital_mute,
+ .no_capture_mute = 1,
+};
+
+static struct snd_soc_dai_driver rk817_dai[] = {
+ {
+ .name = "rk817-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = RK817_PLAYBACK_RATES,
+ .formats = RK817_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RK817_CAPTURE_RATES,
+ .formats = RK817_FORMATS,
+ },
+ .ops = &rk817_dai_ops,
+ },
+};
+
+static int rk817_probe(struct snd_soc_component *component)
+{
+ struct rk817_codec_priv *rk817 = snd_soc_component_get_drvdata(component);
+ struct rk808 *rk808 = dev_get_drvdata(component->dev->parent);
+
+ snd_soc_component_init_regmap(component, rk808->regmap);
+ rk817->component = component;
+
+ snd_soc_component_write(component, RK817_CODEC_DTOP_LPT_SRST, 0x40);
+
+ rk817_init(component);
+
+ /* setting initial pll values so that we can continue to leverage simple-audio-card.
+ * The values aren't important since no parameters are used.
+ */
+
+ snd_soc_component_set_pll(component, 0, 0, 0, 0);
+
+ return 0;
+}
+
+static void rk817_remove(struct snd_soc_component *component)
+{
+ snd_soc_component_exit_regmap(component);
+}
+
+static const struct snd_soc_component_driver soc_codec_dev_rk817 = {
+ .probe = rk817_probe,
+ .remove = rk817_remove,
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+ .controls = rk817_volume_controls,
+ .num_controls = ARRAY_SIZE(rk817_volume_controls),
+ .dapm_routes = rk817_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rk817_dapm_routes),
+ .dapm_widgets = rk817_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rk817_dapm_widgets),
+ .set_pll = rk817_set_component_pll,
+};
+
+static void rk817_codec_parse_dt_property(struct device *dev,
+ struct rk817_codec_priv *rk817)
+{
+ struct device_node *node;
+
+ node = of_get_child_by_name(dev->parent->of_node, "codec");
+ if (!node) {
+ dev_dbg(dev, "%s() Can not get child: codec\n",
+ __func__);
+ }
+
+ rk817->mic_in_differential =
+ of_property_read_bool(node, "rockchip,mic-in-differential");
+
+ of_node_put(node);
+}
+
+static int rk817_platform_probe(struct platform_device *pdev)
+{
+ struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
+ struct rk817_codec_priv *rk817_codec_data;
+ int ret;
+
+ rk817_codec_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct rk817_codec_priv),
+ GFP_KERNEL);
+ if (!rk817_codec_data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rk817_codec_data);
+
+ rk817_codec_data->rk808 = rk808;
+
+ rk817_codec_parse_dt_property(&pdev->dev, rk817_codec_data);
+
+ rk817_codec_data->mclk = clk_get(pdev->dev.parent, "mclk");
+ if (IS_ERR(rk817_codec_data->mclk)) {
+ dev_dbg(&pdev->dev, "Unable to get mclk\n");
+ ret = -ENXIO;
+ goto err_;
+ }
+
+ ret = clk_prepare_enable(rk817_codec_data->mclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s() clock prepare error %d\n",
+ __func__, ret);
+ goto err_;
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev, &soc_codec_dev_rk817,
+ rk817_dai, ARRAY_SIZE(rk817_dai));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s() register codec error %d\n",
+ __func__, ret);
+ goto err_;
+ }
+
+ return 0;
+err_:
+
+ return ret;
+}
+
+static int rk817_platform_remove(struct platform_device *pdev)
+{
+ struct rk817_codec_priv *rk817 = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(rk817->mclk);
+
+ return 0;
+}
+
+static struct platform_driver rk817_codec_driver = {
+ .driver = {
+ .name = "rk817-codec",
+ },
+ .probe = rk817_platform_probe,
+ .remove = rk817_platform_remove,
+};
+
+module_platform_driver(rk817_codec_driver);
+
+MODULE_DESCRIPTION("ASoC RK817 codec driver");
+MODULE_AUTHOR("binyuan <kevan.lan@rock-chips.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt1019.c b/sound/soc/codecs/rt1019.c
index 10656a5927f1..8c0b00242bb8 100644
--- a/sound/soc/codecs/rt1019.c
+++ b/sound/soc/codecs/rt1019.c
@@ -372,8 +372,8 @@ static int rt1019_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
RT1019_AUTO_BITS_SEL_MANU | RT1019_AUTO_CLK_SEL_MANU);
snd_soc_component_update_bits(component, RT1019_PLL_1,
RT1019_PLL_M_MASK | RT1019_PLL_M_BP_MASK | RT1019_PLL_Q_8_8_MASK,
- (pll_code.m_bp ? 0 : pll_code.m_code) << RT1019_PLL_M_SFT |
- pll_code.m_bp << RT1019_PLL_M_BP_SFT |
+ ((pll_code.m_bp ? 0 : pll_code.m_code) << RT1019_PLL_M_SFT) |
+ (pll_code.m_bp << RT1019_PLL_M_BP_SFT) |
((pll_code.n_code >> 8) & RT1019_PLL_Q_8_8_MASK));
snd_soc_component_update_bits(component, RT1019_PLL_2,
RT1019_PLL_Q_7_0_MASK, pll_code.n_code & RT1019_PLL_Q_7_0_MASK);
@@ -522,6 +522,7 @@ static const struct snd_soc_component_driver soc_component_dev_rt1019 = {
.num_dapm_widgets = ARRAY_SIZE(rt1019_dapm_widgets),
.dapm_routes = rt1019_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(rt1019_dapm_routes),
+ .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt1019_regmap = {
diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c
index 1c226994aebd..f716668de640 100644
--- a/sound/soc/codecs/rt1308-sdw.c
+++ b/sound/soc/codecs/rt1308-sdw.c
@@ -709,7 +709,7 @@ static int __maybe_unused rt1308_dev_resume(struct device *dev)
struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(dev);
unsigned long time;
- if (!rt1308->hw_init)
+ if (!rt1308->first_hw_init)
return 0;
if (!slave->unattach_request)
diff --git a/sound/soc/codecs/rt1316-sdw.c b/sound/soc/codecs/rt1316-sdw.c
index 3b029c56467d..09b4914bba1b 100644
--- a/sound/soc/codecs/rt1316-sdw.c
+++ b/sound/soc/codecs/rt1316-sdw.c
@@ -701,7 +701,7 @@ static int __maybe_unused rt1316_dev_resume(struct device *dev)
struct rt1316_sdw_priv *rt1316 = dev_get_drvdata(dev);
unsigned long time;
- if (!rt1316->hw_init)
+ if (!rt1316->first_hw_init)
return 0;
if (!slave->unattach_request)
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 802f4851c3df..caa720884e3e 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -725,7 +725,6 @@ static int rt286_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- d_len_code = 0;
switch (params_width(params)) {
/* bit 6:4 Bits per Sample */
case 16:
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 3000bc128b5b..38356ea2bd6e 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -1695,6 +1695,8 @@ static const struct regmap_config rt5631_regmap_config = {
.reg_defaults = rt5631_reg,
.num_reg_defaults = ARRAY_SIZE(rt5631_reg),
.cache_type = REGCACHE_RBTREE,
+ .use_single_read = true,
+ .use_single_write = true,
};
static int rt5631_i2c_probe(struct i2c_client *i2c,
diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
index 8ea9f1d9fec0..4a56a52adab5 100644
--- a/sound/soc/codecs/rt5682-i2c.c
+++ b/sound/soc/codecs/rt5682-i2c.c
@@ -273,12 +273,23 @@ static void rt5682_i2c_shutdown(struct i2c_client *client)
{
struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
+ disable_irq(client->irq);
cancel_delayed_work_sync(&rt5682->jack_detect_work);
cancel_delayed_work_sync(&rt5682->jd_check_work);
rt5682_reset(rt5682);
}
+static int rt5682_i2c_remove(struct i2c_client *client)
+{
+ struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
+
+ rt5682_i2c_shutdown(client);
+ regulator_bulk_disable(ARRAY_SIZE(rt5682->supplies), rt5682->supplies);
+
+ return 0;
+}
+
static const struct of_device_id rt5682_of_match[] = {
{.compatible = "realtek,rt5682i"},
{},
@@ -305,6 +316,7 @@ static struct i2c_driver rt5682_i2c_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = rt5682_i2c_probe,
+ .remove = rt5682_i2c_remove,
.shutdown = rt5682_i2c_shutdown,
.id_table = rt5682_i2c_id,
};
diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
index e78ba3b064c4..31a4f286043e 100644
--- a/sound/soc/codecs/rt5682-sdw.c
+++ b/sound/soc/codecs/rt5682-sdw.c
@@ -344,6 +344,8 @@ static int rt5682_sdw_init(struct device *dev, struct regmap *regmap,
rt5682->sdw_regmap = regmap;
rt5682->is_sdw = true;
+ mutex_init(&rt5682->disable_irq_lock);
+
rt5682->regmap = devm_regmap_init(dev, NULL, dev,
&rt5682_sdw_indirect_regmap);
if (IS_ERR(rt5682->regmap)) {
@@ -378,6 +380,8 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
int ret = 0, loop = 10;
unsigned int val;
+ rt5682->disable_irq = false;
+
if (rt5682->hw_init)
return 0;
@@ -400,6 +404,11 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
pm_runtime_get_noresume(&slave->dev);
+ if (rt5682->first_hw_init) {
+ regcache_cache_only(rt5682->regmap, false);
+ regcache_cache_bypass(rt5682->regmap, true);
+ }
+
while (loop > 0) {
regmap_read(rt5682->regmap, RT5682_DEVICE_ID, &val);
if (val == DEVICE_ID)
@@ -408,14 +417,11 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
usleep_range(30000, 30005);
loop--;
}
+
if (val != DEVICE_ID) {
dev_err(dev, "Device with ID register %x is not rt5682\n", val);
- return -ENODEV;
- }
-
- if (rt5682->first_hw_init) {
- regcache_cache_only(rt5682->regmap, false);
- regcache_cache_bypass(rt5682->regmap, true);
+ ret = -ENODEV;
+ goto err_nodev;
}
rt5682_calibrate(rt5682);
@@ -486,10 +492,11 @@ reinit:
rt5682->hw_init = true;
rt5682->first_hw_init = true;
+err_nodev:
pm_runtime_mark_last_busy(&slave->dev);
pm_runtime_put_autosuspend(&slave->dev);
- dev_dbg(&slave->dev, "%s hw_init complete\n", __func__);
+ dev_dbg(&slave->dev, "%s hw_init complete: %d\n", __func__, ret);
return ret;
}
@@ -676,10 +683,12 @@ static int rt5682_interrupt_callback(struct sdw_slave *slave,
dev_dbg(&slave->dev,
"%s control_port_stat=%x", __func__, status->control_port);
- if (status->control_port & 0x4) {
+ mutex_lock(&rt5682->disable_irq_lock);
+ if (status->control_port & 0x4 && !rt5682->disable_irq) {
mod_delayed_work(system_power_efficient_wq,
&rt5682->jack_detect_work, msecs_to_jiffies(rt5682->irq_work_delay_time));
}
+ mutex_unlock(&rt5682->disable_irq_lock);
return 0;
}
@@ -737,13 +746,41 @@ static int __maybe_unused rt5682_dev_suspend(struct device *dev)
return 0;
}
+static int __maybe_unused rt5682_dev_system_suspend(struct device *dev)
+{
+ struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int ret;
+
+ if (!rt5682->hw_init)
+ return 0;
+
+ /*
+ * prevent new interrupts from being handled after the
+ * deferred work completes and before the parent disables
+ * interrupts on the link
+ */
+ mutex_lock(&rt5682->disable_irq_lock);
+ rt5682->disable_irq = true;
+ ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1,
+ SDW_SCP_INT1_IMPL_DEF, 0);
+ mutex_unlock(&rt5682->disable_irq_lock);
+
+ if (ret < 0) {
+ /* log but don't prevent suspend from happening */
+ dev_dbg(&slave->dev, "%s: could not disable imp-def interrupts\n:", __func__);
+ }
+
+ return rt5682_dev_suspend(dev);
+}
+
static int __maybe_unused rt5682_dev_resume(struct device *dev)
{
struct sdw_slave *slave = dev_to_sdw_dev(dev);
struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
unsigned long time;
- if (!rt5682->hw_init)
+ if (!rt5682->first_hw_init)
return 0;
if (!slave->unattach_request)
@@ -765,7 +802,7 @@ regmap_sync:
}
static const struct dev_pm_ops rt5682_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(rt5682_dev_suspend, rt5682_dev_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(rt5682_dev_system_suspend, rt5682_dev_resume)
SET_RUNTIME_PM_OPS(rt5682_dev_suspend, rt5682_dev_resume, NULL)
};
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index e4c91571abae..51ecaa2abcd1 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -44,6 +44,7 @@ static const struct reg_sequence patch_list[] = {
{RT5682_I2C_CTRL, 0x000f},
{RT5682_PLL2_INTERNAL, 0x8266},
{RT5682_SAR_IL_CMD_3, 0x8365},
+ {RT5682_SAR_IL_CMD_6, 0x0180},
};
void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev)
@@ -973,10 +974,14 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
rt5682_enable_push_button_irq(component, false);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
- if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS"))
+ if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS") &&
+ !snd_soc_dapm_get_pin_status(dapm, "PLL1") &&
+ !snd_soc_dapm_get_pin_status(dapm, "PLL2B"))
snd_soc_component_update_bits(component,
RT5682_PWR_ANLG_1, RT5682_PWR_MB, 0);
- if (!snd_soc_dapm_get_pin_status(dapm, "Vref2"))
+ if (!snd_soc_dapm_get_pin_status(dapm, "Vref2") &&
+ !snd_soc_dapm_get_pin_status(dapm, "PLL1") &&
+ !snd_soc_dapm_get_pin_status(dapm, "PLL2B"))
snd_soc_component_update_bits(component,
RT5682_PWR_ANLG_1, RT5682_PWR_VREF2, 0);
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index 74ff66767016..b59221048ebf 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -1415,6 +1415,8 @@ struct rt5682_priv {
struct regulator_bulk_data supplies[RT5682_NUM_SUPPLIES];
struct delayed_work jack_detect_work;
struct delayed_work jd_check_work;
+ struct mutex disable_irq_lock; /* imp-def irq lock protection */
+ bool disable_irq;
struct mutex calibrate_mutex;
struct sdw_slave *slave;
enum sdw_slave_status status;
diff --git a/sound/soc/codecs/rt700-sdw.c b/sound/soc/codecs/rt700-sdw.c
index ff9c081fd52a..bda594899664 100644
--- a/sound/soc/codecs/rt700-sdw.c
+++ b/sound/soc/codecs/rt700-sdw.c
@@ -418,10 +418,12 @@ static int rt700_interrupt_callback(struct sdw_slave *slave,
dev_dbg(&slave->dev,
"%s control_port_stat=%x", __func__, status->control_port);
- if (status->control_port & 0x4) {
+ mutex_lock(&rt700->disable_irq_lock);
+ if (status->control_port & 0x4 && !rt700->disable_irq) {
mod_delayed_work(system_power_efficient_wq,
&rt700->jack_detect_work, msecs_to_jiffies(250));
}
+ mutex_unlock(&rt700->disable_irq_lock);
return 0;
}
@@ -490,6 +492,34 @@ static int __maybe_unused rt700_dev_suspend(struct device *dev)
return 0;
}
+static int __maybe_unused rt700_dev_system_suspend(struct device *dev)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct rt700_priv *rt700 = dev_get_drvdata(dev);
+ int ret;
+
+ if (!rt700->hw_init)
+ return 0;
+
+ /*
+ * prevent new interrupts from being handled after the
+ * deferred work completes and before the parent disables
+ * interrupts on the link
+ */
+ mutex_lock(&rt700->disable_irq_lock);
+ rt700->disable_irq = true;
+ ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1,
+ SDW_SCP_INT1_IMPL_DEF, 0);
+ mutex_unlock(&rt700->disable_irq_lock);
+
+ if (ret < 0) {
+ /* log but don't prevent suspend from happening */
+ dev_dbg(&slave->dev, "%s: could not disable imp-def interrupts\n:", __func__);
+ }
+
+ return rt700_dev_suspend(dev);
+}
+
#define RT700_PROBE_TIMEOUT 5000
static int __maybe_unused rt700_dev_resume(struct device *dev)
@@ -498,7 +528,7 @@ static int __maybe_unused rt700_dev_resume(struct device *dev)
struct rt700_priv *rt700 = dev_get_drvdata(dev);
unsigned long time;
- if (!rt700->hw_init)
+ if (!rt700->first_hw_init)
return 0;
if (!slave->unattach_request)
@@ -521,7 +551,7 @@ regmap_sync:
}
static const struct dev_pm_ops rt700_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(rt700_dev_suspend, rt700_dev_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(rt700_dev_system_suspend, rt700_dev_resume)
SET_RUNTIME_PM_OPS(rt700_dev_suspend, rt700_dev_resume, NULL)
};
diff --git a/sound/soc/codecs/rt700.c b/sound/soc/codecs/rt700.c
index 01af9d9dd3ca..921382724f9c 100644
--- a/sound/soc/codecs/rt700.c
+++ b/sound/soc/codecs/rt700.c
@@ -1112,6 +1112,8 @@ int rt700_init(struct device *dev, struct regmap *sdw_regmap,
rt700->sdw_regmap = sdw_regmap;
rt700->regmap = regmap;
+ mutex_init(&rt700->disable_irq_lock);
+
/*
* Mark hw_init to false
* HW init will be performed when device reports present
@@ -1133,6 +1135,8 @@ int rt700_io_init(struct device *dev, struct sdw_slave *slave)
{
struct rt700_priv *rt700 = dev_get_drvdata(dev);
+ rt700->disable_irq = false;
+
if (rt700->hw_init)
return 0;
diff --git a/sound/soc/codecs/rt700.h b/sound/soc/codecs/rt700.h
index 794ee2e29051..bed9d1de6d5b 100644
--- a/sound/soc/codecs/rt700.h
+++ b/sound/soc/codecs/rt700.h
@@ -23,6 +23,8 @@ struct rt700_priv {
struct delayed_work jack_detect_work;
struct delayed_work jack_btn_check_work;
int jack_type;
+ struct mutex disable_irq_lock; /* imp-def irq lock protection */
+ bool disable_irq;
};
struct sdw_stream_data {
diff --git a/sound/soc/codecs/rt711-sdca-sdw.c b/sound/soc/codecs/rt711-sdca-sdw.c
index 9685c8905468..aaf5af153d3f 100644
--- a/sound/soc/codecs/rt711-sdca-sdw.c
+++ b/sound/soc/codecs/rt711-sdca-sdw.c
@@ -75,6 +75,16 @@ static bool rt711_sdca_mbq_readable_register(struct device *dev, unsigned int re
case 0x5b00000 ... 0x5b000ff:
case 0x5f00000 ... 0x5f000ff:
case 0x6100000 ... 0x61000ff:
+ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU05, RT711_SDCA_CTL_FU_VOLUME, CH_L):
+ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU05, RT711_SDCA_CTL_FU_VOLUME, CH_R):
+ case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E, RT711_SDCA_CTL_FU_VOLUME, CH_L):
+ case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E, RT711_SDCA_CTL_FU_VOLUME, CH_R):
+ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU0F, RT711_SDCA_CTL_FU_VOLUME, CH_L):
+ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU0F, RT711_SDCA_CTL_FU_VOLUME, CH_R):
+ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_PLATFORM_FU44, RT711_SDCA_CTL_FU_CH_GAIN, CH_L):
+ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_PLATFORM_FU44, RT711_SDCA_CTL_FU_CH_GAIN, CH_R):
+ case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_PLATFORM_FU15, RT711_SDCA_CTL_FU_CH_GAIN, CH_L):
+ case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_PLATFORM_FU15, RT711_SDCA_CTL_FU_CH_GAIN, CH_R):
return true;
default:
return false;
@@ -246,6 +256,15 @@ static int rt711_sdca_interrupt_callback(struct sdw_slave *slave,
scp_sdca_stat2 = rt711->scp_sdca_stat2;
}
+ /*
+ * The critical section below intentionally protects a rather large piece of code.
+ * We don't want to allow the system suspend to disable an interrupt while we are
+ * processing it, which could be problematic given the quirky SoundWire interrupt
+ * scheme. We do want however to prevent new workqueues from being scheduled if
+ * the disable_irq flag was set during system suspend.
+ */
+ mutex_lock(&rt711->disable_irq_lock);
+
ret = sdw_read_no_pm(rt711->slave, SDW_SCP_SDCA_INT1);
if (ret < 0)
goto io_error;
@@ -304,13 +323,16 @@ static int rt711_sdca_interrupt_callback(struct sdw_slave *slave,
"%s scp_sdca_stat1=0x%x, scp_sdca_stat2=0x%x\n", __func__,
rt711->scp_sdca_stat1, rt711->scp_sdca_stat2);
- if (status->sdca_cascade)
+ if (status->sdca_cascade && !rt711->disable_irq)
mod_delayed_work(system_power_efficient_wq,
&rt711->jack_detect_work, msecs_to_jiffies(30));
+ mutex_unlock(&rt711->disable_irq_lock);
+
return 0;
io_error:
+ mutex_unlock(&rt711->disable_irq_lock);
pr_err_ratelimited("IO error in %s, ret %d\n", __func__, ret);
return ret;
}
@@ -372,6 +394,36 @@ static int __maybe_unused rt711_sdca_dev_suspend(struct device *dev)
return 0;
}
+static int __maybe_unused rt711_sdca_dev_system_suspend(struct device *dev)
+{
+ struct rt711_sdca_priv *rt711_sdca = dev_get_drvdata(dev);
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int ret1, ret2;
+
+ if (!rt711_sdca->hw_init)
+ return 0;
+
+ /*
+ * prevent new interrupts from being handled after the
+ * deferred work completes and before the parent disables
+ * interrupts on the link
+ */
+ mutex_lock(&rt711_sdca->disable_irq_lock);
+ rt711_sdca->disable_irq = true;
+ ret1 = sdw_update_no_pm(slave, SDW_SCP_SDCA_INTMASK1,
+ SDW_SCP_SDCA_INTMASK_SDCA_0, 0);
+ ret2 = sdw_update_no_pm(slave, SDW_SCP_SDCA_INTMASK2,
+ SDW_SCP_SDCA_INTMASK_SDCA_8, 0);
+ mutex_unlock(&rt711_sdca->disable_irq_lock);
+
+ if (ret1 < 0 || ret2 < 0) {
+ /* log but don't prevent suspend from happening */
+ dev_dbg(&slave->dev, "%s: could not disable SDCA interrupts\n:", __func__);
+ }
+
+ return rt711_sdca_dev_suspend(dev);
+}
+
#define RT711_PROBE_TIMEOUT 5000
static int __maybe_unused rt711_sdca_dev_resume(struct device *dev)
@@ -380,7 +432,7 @@ static int __maybe_unused rt711_sdca_dev_resume(struct device *dev)
struct rt711_sdca_priv *rt711 = dev_get_drvdata(dev);
unsigned long time;
- if (!rt711->hw_init)
+ if (!rt711->first_hw_init)
return 0;
if (!slave->unattach_request)
@@ -403,7 +455,7 @@ regmap_sync:
}
static const struct dev_pm_ops rt711_sdca_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(rt711_sdca_dev_suspend, rt711_sdca_dev_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(rt711_sdca_dev_system_suspend, rt711_sdca_dev_resume)
SET_RUNTIME_PM_OPS(rt711_sdca_dev_suspend, rt711_sdca_dev_resume, NULL)
};
diff --git a/sound/soc/codecs/rt711-sdca.c b/sound/soc/codecs/rt711-sdca.c
index 24a084e0b48a..2e992589f1e4 100644
--- a/sound/soc/codecs/rt711-sdca.c
+++ b/sound/soc/codecs/rt711-sdca.c
@@ -1411,6 +1411,8 @@ int rt711_sdca_init(struct device *dev, struct regmap *regmap,
rt711->regmap = regmap;
rt711->mbq_regmap = mbq_regmap;
+ mutex_init(&rt711->disable_irq_lock);
+
/*
* Mark hw_init to false
* HW init will be performed when device reports present
@@ -1494,12 +1496,16 @@ int rt711_sdca_io_init(struct device *dev, struct sdw_slave *slave)
int ret = 0;
unsigned int val;
+ rt711->disable_irq = false;
+
if (rt711->hw_init)
return 0;
if (rt711->first_hw_init) {
regcache_cache_only(rt711->regmap, false);
regcache_cache_bypass(rt711->regmap, true);
+ regcache_cache_only(rt711->mbq_regmap, false);
+ regcache_cache_bypass(rt711->mbq_regmap, true);
} else {
/*
* PM runtime is only enabled when a Slave reports as Attached
@@ -1565,6 +1571,8 @@ int rt711_sdca_io_init(struct device *dev, struct sdw_slave *slave)
if (rt711->first_hw_init) {
regcache_cache_bypass(rt711->regmap, false);
regcache_mark_dirty(rt711->regmap);
+ regcache_cache_bypass(rt711->mbq_regmap, false);
+ regcache_mark_dirty(rt711->mbq_regmap);
} else
rt711->first_hw_init = true;
diff --git a/sound/soc/codecs/rt711-sdca.h b/sound/soc/codecs/rt711-sdca.h
index 43ae82b7fdb3..498ca687c47b 100644
--- a/sound/soc/codecs/rt711-sdca.h
+++ b/sound/soc/codecs/rt711-sdca.h
@@ -27,6 +27,8 @@ struct rt711_sdca_priv {
struct delayed_work jack_detect_work;
struct delayed_work jack_btn_check_work;
struct mutex calibrate_mutex; /* for headset calibration */
+ struct mutex disable_irq_lock; /* SDCA irq lock protection */
+ bool disable_irq;
int jack_type, jd_src;
unsigned int scp_sdca_stat1, scp_sdca_stat2;
int hw_ver;
diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
index 8f5ebe92d407..bda2cc9439c9 100644
--- a/sound/soc/codecs/rt711-sdw.c
+++ b/sound/soc/codecs/rt711-sdw.c
@@ -423,10 +423,12 @@ static int rt711_interrupt_callback(struct sdw_slave *slave,
dev_dbg(&slave->dev,
"%s control_port_stat=%x", __func__, status->control_port);
- if (status->control_port & 0x4) {
+ mutex_lock(&rt711->disable_irq_lock);
+ if (status->control_port & 0x4 && !rt711->disable_irq) {
mod_delayed_work(system_power_efficient_wq,
&rt711->jack_detect_work, msecs_to_jiffies(250));
}
+ mutex_unlock(&rt711->disable_irq_lock);
return 0;
}
@@ -493,6 +495,34 @@ static int __maybe_unused rt711_dev_suspend(struct device *dev)
return 0;
}
+static int __maybe_unused rt711_dev_system_suspend(struct device *dev)
+{
+ struct rt711_priv *rt711 = dev_get_drvdata(dev);
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int ret;
+
+ if (!rt711->hw_init)
+ return 0;
+
+ /*
+ * prevent new interrupts from being handled after the
+ * deferred work completes and before the parent disables
+ * interrupts on the link
+ */
+ mutex_lock(&rt711->disable_irq_lock);
+ rt711->disable_irq = true;
+ ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1,
+ SDW_SCP_INT1_IMPL_DEF, 0);
+ mutex_unlock(&rt711->disable_irq_lock);
+
+ if (ret < 0) {
+ /* log but don't prevent suspend from happening */
+ dev_dbg(&slave->dev, "%s: could not disable imp-def interrupts\n:", __func__);
+ }
+
+ return rt711_dev_suspend(dev);
+}
+
#define RT711_PROBE_TIMEOUT 5000
static int __maybe_unused rt711_dev_resume(struct device *dev)
@@ -501,7 +531,7 @@ static int __maybe_unused rt711_dev_resume(struct device *dev)
struct rt711_priv *rt711 = dev_get_drvdata(dev);
unsigned long time;
- if (!rt711->hw_init)
+ if (!rt711->first_hw_init)
return 0;
if (!slave->unattach_request)
@@ -524,7 +554,7 @@ regmap_sync:
}
static const struct dev_pm_ops rt711_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(rt711_dev_suspend, rt711_dev_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(rt711_dev_system_suspend, rt711_dev_resume)
SET_RUNTIME_PM_OPS(rt711_dev_suspend, rt711_dev_resume, NULL)
};
diff --git a/sound/soc/codecs/rt711-sdw.h b/sound/soc/codecs/rt711-sdw.h
index 43b2b984b29c..6acf9858330d 100644
--- a/sound/soc/codecs/rt711-sdw.h
+++ b/sound/soc/codecs/rt711-sdw.h
@@ -267,7 +267,9 @@ static const struct reg_default rt711_reg_defaults[] = {
{ 0x8393, 0x00 },
{ 0x7319, 0x00 },
{ 0x8399, 0x00 },
+ { 0x752008, 0xa807 },
{ 0x752009, 0x1029 },
+ { 0x75200b, 0x7770 },
{ 0x752011, 0x007a },
{ 0x75201a, 0x8003 },
{ 0x752045, 0x5289 },
diff --git a/sound/soc/codecs/rt711.c b/sound/soc/codecs/rt711.c
index 9f5b2dc16c54..a7c5608a0ef8 100644
--- a/sound/soc/codecs/rt711.c
+++ b/sound/soc/codecs/rt711.c
@@ -389,6 +389,36 @@ static void rt711_jack_init(struct rt711_priv *rt711)
RT711_HP_JD_FINAL_RESULT_CTL_JD12,
RT711_HP_JD_FINAL_RESULT_CTL_JD12);
break;
+ case RT711_JD2_100K:
+ rt711_index_update_bits(rt711->regmap, RT711_VENDOR_REG,
+ RT711_JD_CTL2, RT711_JD2_2PORT_100K_DECODE | RT711_JD2_1PORT_TYPE_DECODE |
+ RT711_HP_JD_SEL_JD2 | RT711_JD1_2PORT_TYPE_100K_DECODE,
+ RT711_JD2_2PORT_100K_DECODE_HP | RT711_JD2_1PORT_JD_HP |
+ RT711_HP_JD_SEL_JD2 | RT711_JD1_2PORT_JD_RESERVED);
+ rt711_index_update_bits(rt711->regmap, RT711_VENDOR_REG,
+ RT711_CC_DET1,
+ RT711_HP_JD_FINAL_RESULT_CTL_JD12,
+ RT711_HP_JD_FINAL_RESULT_CTL_JD12);
+ break;
+ case RT711_JD2_1P8V_1PORT:
+ rt711_index_update_bits(rt711->regmap, RT711_VENDOR_REG,
+ RT711_JD_CTL1, RT711_JD2_DIGITAL_JD_MODE_SEL,
+ RT711_JD2_1_JD_MODE);
+ rt711_index_update_bits(rt711->regmap, RT711_VENDOR_REG,
+ RT711_JD_CTL2, RT711_JD2_1PORT_TYPE_DECODE |
+ RT711_HP_JD_SEL_JD2,
+ RT711_JD2_1PORT_JD_HP |
+ RT711_HP_JD_SEL_JD2);
+ rt711_index_update_bits(rt711->regmap, RT711_VENDOR_REG,
+ RT711_JD_CTL4, RT711_JD2_PAD_PULL_UP_MASK |
+ RT711_JD2_MODE_SEL_MASK,
+ RT711_JD2_PAD_PULL_UP |
+ RT711_JD2_MODE2_1P8V_1PORT);
+ rt711_index_update_bits(rt711->regmap, RT711_VENDOR_REG,
+ RT711_CC_DET1,
+ RT711_HP_JD_FINAL_RESULT_CTL_JD12,
+ RT711_HP_JD_FINAL_RESULT_CTL_JD12);
+ break;
default:
dev_warn(rt711->component->dev, "Wrong JD source\n");
break;
@@ -1166,6 +1196,8 @@ int rt711_init(struct device *dev, struct regmap *sdw_regmap,
rt711->sdw_regmap = sdw_regmap;
rt711->regmap = regmap;
+ mutex_init(&rt711->disable_irq_lock);
+
/*
* Mark hw_init to false
* HW init will be performed when device reports present
@@ -1190,6 +1222,8 @@ int rt711_io_init(struct device *dev, struct sdw_slave *slave)
{
struct rt711_priv *rt711 = dev_get_drvdata(dev);
+ rt711->disable_irq = false;
+
if (rt711->hw_init)
return 0;
diff --git a/sound/soc/codecs/rt711.h b/sound/soc/codecs/rt711.h
index ca0f581feec7..f50f8c8d0934 100644
--- a/sound/soc/codecs/rt711.h
+++ b/sound/soc/codecs/rt711.h
@@ -25,6 +25,8 @@ struct rt711_priv {
struct work_struct calibration_work;
struct mutex calibrate_mutex; /* for headset calibration */
int jack_type, jd_src;
+ struct mutex disable_irq_lock; /* imp-def irq lock protection */
+ bool disable_irq;
};
struct sdw_stream_data {
@@ -52,7 +54,9 @@ struct sdw_stream_data {
/* Index (NID:20h) */
#define RT711_DAC_DC_CALI_CTL1 0x00
+#define RT711_JD_CTL1 0x08
#define RT711_JD_CTL2 0x09
+#define RT711_JD_CTL4 0x0b
#define RT711_CC_DET1 0x11
#define RT711_PARA_VERB_CTL 0x1a
#define RT711_COMBO_JACK_AUTO_CTL1 0x45
@@ -171,10 +175,33 @@ struct sdw_stream_data {
/* DAC DC offset calibration control-1 (0x00)(NID:20h) */
#define RT711_DAC_DC_CALI_TRIGGER (0x1 << 15)
+/* jack detect control 1 (0x08)(NID:20h) */
+#define RT711_JD2_DIGITAL_JD_MODE_SEL (0x1 << 1)
+#define RT711_JD2_1_JD_MODE (0x0 << 1)
+#define RT711_JD2_2_JD_MODE (0x1 << 1)
+
/* jack detect control 2 (0x09)(NID:20h) */
#define RT711_JD2_2PORT_200K_DECODE_HP (0x1 << 13)
+#define RT711_JD2_2PORT_100K_DECODE (0x1 << 12)
+#define RT711_JD2_2PORT_100K_DECODE_HP (0x0 << 12)
#define RT711_HP_JD_SEL_JD1 (0x0 << 1)
#define RT711_HP_JD_SEL_JD2 (0x1 << 1)
+#define RT711_JD2_1PORT_TYPE_DECODE (0x3 << 10)
+#define RT711_JD2_1PORT_JD_LINE2 (0x0 << 10)
+#define RT711_JD2_1PORT_JD_HP (0x1 << 10)
+#define RT711_JD2_1PORT_JD_LINE1 (0x2 << 10)
+#define RT711_JD1_2PORT_TYPE_100K_DECODE (0x1 << 0)
+#define RT711_JD1_2PORT_JD_RESERVED (0x0 << 0)
+#define RT711_JD1_2PORT_JD_LINE1 (0x1 << 0)
+
+/* jack detect control 4 (0x0b)(NID:20h) */
+#define RT711_JD2_PAD_PULL_UP_MASK (0x1 << 3)
+#define RT711_JD2_PAD_NOT_PULL_UP (0x0 << 3)
+#define RT711_JD2_PAD_PULL_UP (0x1 << 3)
+#define RT711_JD2_MODE_SEL_MASK (0x3 << 0)
+#define RT711_JD2_MODE0_2PORT (0x0 << 0)
+#define RT711_JD2_MODE1_3P3V_1PORT (0x1 << 0)
+#define RT711_JD2_MODE2_1P8V_1PORT (0x2 << 0)
/* CC DET1 (0x11)(NID:20h) */
#define RT711_HP_JD_FINAL_RESULT_CTL_JD12 (0x1 << 10)
@@ -215,7 +242,9 @@ enum {
enum rt711_jd_src {
RT711_JD_NULL,
RT711_JD1,
- RT711_JD2
+ RT711_JD2,
+ RT711_JD2_100K,
+ RT711_JD2_1P8V_1PORT
};
int rt711_io_init(struct device *dev, struct sdw_slave *slave);
diff --git a/sound/soc/codecs/rt715-sdca-sdw.c b/sound/soc/codecs/rt715-sdca-sdw.c
index 1350798406f0..a5c673f43d82 100644
--- a/sound/soc/codecs/rt715-sdca-sdw.c
+++ b/sound/soc/codecs/rt715-sdca-sdw.c
@@ -70,6 +70,7 @@ static bool rt715_sdca_mbq_readable_register(struct device *dev, unsigned int re
case 0x2000036:
case 0x2000037:
case 0x2000039:
+ case 0x2000044:
case 0x6100000:
return true;
default:
@@ -224,7 +225,7 @@ static int __maybe_unused rt715_dev_resume(struct device *dev)
struct rt715_sdca_priv *rt715 = dev_get_drvdata(dev);
unsigned long time;
- if (!rt715->hw_init)
+ if (!rt715->first_hw_init)
return 0;
if (!slave->unattach_request)
diff --git a/sound/soc/codecs/rt715-sdca-sdw.h b/sound/soc/codecs/rt715-sdca-sdw.h
index cd365bb60747..0cbc14844f8c 100644
--- a/sound/soc/codecs/rt715-sdca-sdw.h
+++ b/sound/soc/codecs/rt715-sdca-sdw.h
@@ -113,6 +113,7 @@ static const struct reg_default rt715_mbq_reg_defaults_sdca[] = {
{ 0x2000036, 0x0000 },
{ 0x2000037, 0x0000 },
{ 0x2000039, 0xaa81 },
+ { 0x2000044, 0x0202 },
{ 0x6100000, 0x0100 },
{ SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_FU_ADC8_9_VOL,
RT715_SDCA_FU_VOL_CTRL, CH_01), 0x00 },
diff --git a/sound/soc/codecs/rt715-sdca.c b/sound/soc/codecs/rt715-sdca.c
index 7db76c19e048..66e166568c50 100644
--- a/sound/soc/codecs/rt715-sdca.c
+++ b/sound/soc/codecs/rt715-sdca.c
@@ -997,7 +997,7 @@ int rt715_sdca_init(struct device *dev, struct regmap *mbq_regmap,
* HW init will be performed when device reports present
*/
rt715->hw_init = false;
- rt715->first_init = false;
+ rt715->first_hw_init = false;
ret = devm_snd_soc_register_component(dev,
&soc_codec_dev_rt715_sdca,
@@ -1018,7 +1018,7 @@ int rt715_sdca_io_init(struct device *dev, struct sdw_slave *slave)
/*
* PM runtime is only enabled when a Slave reports as Attached
*/
- if (!rt715->first_init) {
+ if (!rt715->first_hw_init) {
/* set autosuspend parameters */
pm_runtime_set_autosuspend_delay(&slave->dev, 3000);
pm_runtime_use_autosuspend(&slave->dev);
@@ -1031,7 +1031,7 @@ int rt715_sdca_io_init(struct device *dev, struct sdw_slave *slave)
pm_runtime_enable(&slave->dev);
- rt715->first_init = true;
+ rt715->first_hw_init = true;
}
pm_runtime_get_noresume(&slave->dev);
@@ -1054,6 +1054,9 @@ int rt715_sdca_io_init(struct device *dev, struct sdw_slave *slave)
rt715_sdca_index_update_bits(rt715, RT715_VENDOR_REG,
RT715_REV_1, 0x40, 0x40);
}
+ /* DFLL Calibration trigger */
+ rt715_sdca_index_update_bits(rt715, RT715_VENDOR_REG,
+ RT715_DFLL_VAD, 0x1, 0x1);
/* trigger mode = VAD enable */
regmap_write(rt715->regmap,
SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_SMPU_TRIG_ST_EN,
diff --git a/sound/soc/codecs/rt715-sdca.h b/sound/soc/codecs/rt715-sdca.h
index 85ce4d95e5eb..90881b455ece 100644
--- a/sound/soc/codecs/rt715-sdca.h
+++ b/sound/soc/codecs/rt715-sdca.h
@@ -27,7 +27,7 @@ struct rt715_sdca_priv {
enum sdw_slave_status status;
struct sdw_bus_params params;
bool hw_init;
- bool first_init;
+ bool first_hw_init;
int l_is_unmute;
int r_is_unmute;
int hw_sdw_ver;
@@ -81,6 +81,7 @@ struct rt715_sdca_kcontrol_private {
#define RT715_AD_FUNC_EN 0x36
#define RT715_REV_1 0x37
#define RT715_SDW_INPUT_SEL 0x39
+#define RT715_DFLL_VAD 0x44
#define RT715_EXT_DMIC_CLK_CTRL2 0x54
/* Index (NID:61h) */
diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
index 81a1dd77b6f6..a7b21b03c08b 100644
--- a/sound/soc/codecs/rt715-sdw.c
+++ b/sound/soc/codecs/rt715-sdw.c
@@ -541,7 +541,7 @@ static int __maybe_unused rt715_dev_resume(struct device *dev)
struct rt715_priv *rt715 = dev_get_drvdata(dev);
unsigned long time;
- if (!rt715->hw_init)
+ if (!rt715->first_hw_init)
return 0;
if (!slave->unattach_request)
diff --git a/sound/soc/codecs/sigmadsp.h b/sound/soc/codecs/sigmadsp.h
index d63b8c366efb..2783eff633a1 100644
--- a/sound/soc/codecs/sigmadsp.h
+++ b/sound/soc/codecs/sigmadsp.h
@@ -44,7 +44,6 @@ struct sigmadsp {
struct sigmadsp *devm_sigmadsp_init(struct device *dev,
const struct sigmadsp_ops *ops, const char *firmware_name);
-void sigmadsp_reset(struct sigmadsp *sigmadsp);
int sigmadsp_restrict_params(struct sigmadsp *sigmadsp,
struct snd_pcm_substream *substream);
diff --git a/sound/soc/codecs/tfa989x.c b/sound/soc/codecs/tfa989x.c
new file mode 100644
index 000000000000..643b45188b6f
--- /dev/null
+++ b/sound/soc/codecs/tfa989x.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Stephan Gerhold
+ *
+ * Register definitions/sequences taken from various tfa98xx kernel drivers:
+ * Copyright (C) 2014-2020 NXP Semiconductors, All Rights Reserved.
+ * Copyright (C) 2013 Sony Mobile Communications Inc.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <sound/soc.h>
+
+#define TFA989X_STATUSREG 0x00
+#define TFA989X_BATTERYVOLTAGE 0x01
+#define TFA989X_TEMPERATURE 0x02
+#define TFA989X_REVISIONNUMBER 0x03
+#define TFA989X_REVISIONNUMBER_REV_MSK GENMASK(7, 0) /* device revision */
+#define TFA989X_I2SREG 0x04
+#define TFA989X_I2SREG_CHSA 6 /* amplifier input select */
+#define TFA989X_I2SREG_CHSA_MSK GENMASK(7, 6)
+#define TFA989X_I2SREG_I2SSR 12 /* sample rate */
+#define TFA989X_I2SREG_I2SSR_MSK GENMASK(15, 12)
+#define TFA989X_BAT_PROT 0x05
+#define TFA989X_AUDIO_CTR 0x06
+#define TFA989X_DCDCBOOST 0x07
+#define TFA989X_SPKR_CALIBRATION 0x08
+#define TFA989X_SYS_CTRL 0x09
+#define TFA989X_SYS_CTRL_PWDN 0 /* power down */
+#define TFA989X_SYS_CTRL_I2CR 1 /* I2C reset */
+#define TFA989X_SYS_CTRL_CFE 2 /* enable CoolFlux DSP */
+#define TFA989X_SYS_CTRL_AMPE 3 /* enable amplifier */
+#define TFA989X_SYS_CTRL_DCA 4 /* enable boost */
+#define TFA989X_SYS_CTRL_SBSL 5 /* DSP configured */
+#define TFA989X_SYS_CTRL_AMPC 6 /* amplifier enabled by DSP */
+#define TFA989X_I2S_SEL_REG 0x0a
+#define TFA989X_I2S_SEL_REG_SPKR_MSK GENMASK(10, 9) /* speaker impedance */
+#define TFA989X_I2S_SEL_REG_DCFG_MSK GENMASK(14, 11) /* DCDC compensation */
+#define TFA989X_PWM_CONTROL 0x41
+#define TFA989X_CURRENTSENSE1 0x46
+#define TFA989X_CURRENTSENSE2 0x47
+#define TFA989X_CURRENTSENSE3 0x48
+#define TFA989X_CURRENTSENSE4 0x49
+
+#define TFA9895_REVISION 0x12
+#define TFA9897_REVISION 0x97
+
+struct tfa989x_rev {
+ unsigned int rev;
+ int (*init)(struct regmap *regmap);
+};
+
+struct tfa989x {
+ struct regulator *vddd_supply;
+};
+
+static bool tfa989x_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return reg > TFA989X_REVISIONNUMBER;
+}
+
+static bool tfa989x_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg < TFA989X_REVISIONNUMBER;
+}
+
+static const struct regmap_config tfa989x_regmap = {
+ .reg_bits = 8,
+ .val_bits = 16,
+
+ .writeable_reg = tfa989x_writeable_reg,
+ .volatile_reg = tfa989x_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const char * const chsa_text[] = { "Left", "Right", /* "DSP" */ };
+static SOC_ENUM_SINGLE_DECL(chsa_enum, TFA989X_I2SREG, TFA989X_I2SREG_CHSA, chsa_text);
+static const struct snd_kcontrol_new chsa_mux = SOC_DAPM_ENUM("Amp Input", chsa_enum);
+
+static const struct snd_soc_dapm_widget tfa989x_dapm_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("OUT"),
+ SND_SOC_DAPM_SUPPLY("POWER", TFA989X_SYS_CTRL, TFA989X_SYS_CTRL_PWDN, 1, NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("AMPE", TFA989X_SYS_CTRL, TFA989X_SYS_CTRL_AMPE, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX("Amp Input", SND_SOC_NOPM, 0, 0, &chsa_mux),
+ SND_SOC_DAPM_AIF_IN("AIFINL", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIFINR", "HiFi Playback", 1, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route tfa989x_dapm_routes[] = {
+ {"OUT", NULL, "AMPE"},
+ {"AMPE", NULL, "POWER"},
+ {"AMPE", NULL, "Amp Input"},
+ {"Amp Input", "Left", "AIFINL"},
+ {"Amp Input", "Right", "AIFINR"},
+};
+
+static const struct snd_soc_component_driver tfa989x_component = {
+ .dapm_widgets = tfa989x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tfa989x_dapm_widgets),
+ .dapm_routes = tfa989x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(tfa989x_dapm_routes),
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const unsigned int tfa989x_rates[] = {
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+};
+
+static int tfa989x_find_sample_rate(unsigned int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tfa989x_rates); ++i)
+ if (tfa989x_rates[i] == rate)
+ return i;
+
+ return -EINVAL;
+}
+
+static int tfa989x_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ int sr;
+
+ sr = tfa989x_find_sample_rate(params_rate(params));
+ if (sr < 0)
+ return sr;
+
+ return snd_soc_component_update_bits(component, TFA989X_I2SREG,
+ TFA989X_I2SREG_I2SSR_MSK,
+ sr << TFA989X_I2SREG_I2SSR);
+}
+
+static const struct snd_soc_dai_ops tfa989x_dai_ops = {
+ .hw_params = tfa989x_hw_params,
+};
+
+static struct snd_soc_dai_driver tfa989x_dai = {
+ .name = "tfa989x-hifi",
+ .playback = {
+ .stream_name = "HiFi Playback",
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+ .ops = &tfa989x_dai_ops,
+};
+
+static const struct reg_sequence tfa9895_reg_init[] = {
+ /* some other registers must be set for optimal amplifier behaviour */
+ { TFA989X_BAT_PROT, 0x13ab },
+ { TFA989X_AUDIO_CTR, 0x001f },
+
+ /* peak voltage protection is always on, but may be written */
+ { TFA989X_SPKR_CALIBRATION, 0x3c4e },
+
+ /* TFA989X_SYSCTRL_DCA = 0 */
+ { TFA989X_SYS_CTRL, 0x024d },
+ { TFA989X_PWM_CONTROL, 0x0308 },
+ { TFA989X_CURRENTSENSE4, 0x0e82 },
+};
+
+static int tfa9895_init(struct regmap *regmap)
+{
+ return regmap_multi_reg_write(regmap, tfa9895_reg_init,
+ ARRAY_SIZE(tfa9895_reg_init));
+}
+
+static const struct tfa989x_rev tfa9895_rev = {
+ .rev = TFA9895_REVISION,
+ .init = tfa9895_init,
+};
+
+static int tfa9897_init(struct regmap *regmap)
+{
+ int ret;
+
+ /* Reduce slewrate by clearing iddqtestbst to avoid booster damage */
+ ret = regmap_write(regmap, TFA989X_CURRENTSENSE3, 0x0300);
+ if (ret)
+ return ret;
+
+ /* Enable clipping */
+ ret = regmap_clear_bits(regmap, TFA989X_CURRENTSENSE4, 0x1);
+ if (ret)
+ return ret;
+
+ /* Set required TDM configuration */
+ return regmap_write(regmap, 0x14, 0x0);
+}
+
+static const struct tfa989x_rev tfa9897_rev = {
+ .rev = TFA9897_REVISION,
+ .init = tfa9897_init,
+};
+
+/*
+ * Note: At the moment this driver bypasses the "CoolFlux DSP" built into the
+ * TFA989X amplifiers. Unfortunately, there seems to be absolutely
+ * no documentation for it - the public "short datasheets" do not provide
+ * any information about the DSP or available registers.
+ *
+ * Usually the TFA989X amplifiers are configured through proprietary userspace
+ * libraries. There are also some (rather complex) kernel drivers but even those
+ * rely on obscure firmware blobs for configuration (so-called "containers").
+ * They seem to contain different "profiles" with tuned speaker settings, sample
+ * rates and volume steps (which would be better exposed as separate ALSA mixers).
+ *
+ * Bypassing the DSP disables volume control (and perhaps some speaker
+ * optimization?), but at least allows using the speaker without obscure
+ * kernel drivers and firmware.
+ *
+ * Ideally NXP (or now Goodix) should release proper documentation for these
+ * amplifiers so that support for the "CoolFlux DSP" can be implemented properly.
+ */
+static int tfa989x_dsp_bypass(struct regmap *regmap)
+{
+ int ret;
+
+ /* Clear CHSA to bypass DSP and take input from I2S 1 left channel */
+ ret = regmap_clear_bits(regmap, TFA989X_I2SREG, TFA989X_I2SREG_CHSA_MSK);
+ if (ret)
+ return ret;
+
+ /* Set DCDC compensation to off and speaker impedance to 8 ohm */
+ ret = regmap_update_bits(regmap, TFA989X_I2S_SEL_REG,
+ TFA989X_I2S_SEL_REG_DCFG_MSK |
+ TFA989X_I2S_SEL_REG_SPKR_MSK,
+ TFA989X_I2S_SEL_REG_SPKR_MSK);
+ if (ret)
+ return ret;
+
+ /* Set DCDC to follower mode and disable CoolFlux DSP */
+ return regmap_clear_bits(regmap, TFA989X_SYS_CTRL,
+ BIT(TFA989X_SYS_CTRL_DCA) |
+ BIT(TFA989X_SYS_CTRL_CFE) |
+ BIT(TFA989X_SYS_CTRL_AMPC));
+}
+
+static void tfa989x_regulator_disable(void *data)
+{
+ struct tfa989x *tfa989x = data;
+
+ regulator_disable(tfa989x->vddd_supply);
+}
+
+static int tfa989x_i2c_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ const struct tfa989x_rev *rev;
+ struct tfa989x *tfa989x;
+ struct regmap *regmap;
+ unsigned int val;
+ int ret;
+
+ rev = device_get_match_data(dev);
+ if (!rev) {
+ dev_err(dev, "unknown device revision\n");
+ return -ENODEV;
+ }
+
+ tfa989x = devm_kzalloc(dev, sizeof(*tfa989x), GFP_KERNEL);
+ if (!tfa989x)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, tfa989x);
+
+ tfa989x->vddd_supply = devm_regulator_get(dev, "vddd");
+ if (IS_ERR(tfa989x->vddd_supply))
+ return dev_err_probe(dev, PTR_ERR(tfa989x->vddd_supply),
+ "Failed to get vddd regulator\n");
+
+ regmap = devm_regmap_init_i2c(i2c, &tfa989x_regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = regulator_enable(tfa989x->vddd_supply);
+ if (ret) {
+ dev_err(dev, "Failed to enable vddd regulator: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, tfa989x_regulator_disable, tfa989x);
+ if (ret)
+ return ret;
+
+ /* Bypass regcache for reset and init sequence */
+ regcache_cache_bypass(regmap, true);
+
+ /* Dummy read to generate i2c clocks, required on some devices */
+ regmap_read(regmap, TFA989X_REVISIONNUMBER, &val);
+
+ ret = regmap_read(regmap, TFA989X_REVISIONNUMBER, &val);
+ if (ret) {
+ dev_err(dev, "failed to read revision number: %d\n", ret);
+ return ret;
+ }
+
+ val &= TFA989X_REVISIONNUMBER_REV_MSK;
+ if (val != rev->rev) {
+ dev_err(dev, "invalid revision number, expected %#x, got %#x\n",
+ rev->rev, val);
+ return -ENODEV;
+ }
+
+ ret = regmap_write(regmap, TFA989X_SYS_CTRL, BIT(TFA989X_SYS_CTRL_I2CR));
+ if (ret) {
+ dev_err(dev, "failed to reset I2C registers: %d\n", ret);
+ return ret;
+ }
+
+ ret = rev->init(regmap);
+ if (ret) {
+ dev_err(dev, "failed to initialize registers: %d\n", ret);
+ return ret;
+ }
+
+ ret = tfa989x_dsp_bypass(regmap);
+ if (ret) {
+ dev_err(dev, "failed to enable DSP bypass: %d\n", ret);
+ return ret;
+ }
+ regcache_cache_bypass(regmap, false);
+
+ return devm_snd_soc_register_component(dev, &tfa989x_component,
+ &tfa989x_dai, 1);
+}
+
+static const struct of_device_id tfa989x_of_match[] = {
+ { .compatible = "nxp,tfa9895", .data = &tfa9895_rev },
+ { .compatible = "nxp,tfa9897", .data = &tfa9897_rev },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tfa989x_of_match);
+
+static struct i2c_driver tfa989x_i2c_driver = {
+ .driver = {
+ .name = "tfa989x",
+ .of_match_table = tfa989x_of_match,
+ },
+ .probe_new = tfa989x_i2c_probe,
+};
+module_i2c_driver(tfa989x_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC NXP/Goodix TFA989X (TFA1) driver");
+MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tlv320aic26.c b/sound/soc/codecs/tlv320aic26.c
index c7baef8948d4..077415a57225 100644
--- a/sound/soc/codecs/tlv320aic26.c
+++ b/sound/soc/codecs/tlv320aic26.c
@@ -261,8 +261,8 @@ static const struct snd_kcontrol_new aic26_snd_controls[] = {
* SPI device portion of driver: sysfs files for debugging
*/
-static ssize_t aic26_keyclick_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t keyclick_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct aic26 *aic26 = dev_get_drvdata(dev);
int val, amp, freq, len;
@@ -276,9 +276,9 @@ static ssize_t aic26_keyclick_show(struct device *dev,
}
/* Any write to the keyclick attribute will trigger the keyclick event */
-static ssize_t aic26_keyclick_set(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t keyclick_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct aic26 *aic26 = dev_get_drvdata(dev);
@@ -288,7 +288,7 @@ static ssize_t aic26_keyclick_set(struct device *dev,
return count;
}
-static DEVICE_ATTR(keyclick, 0644, aic26_keyclick_show, aic26_keyclick_set);
+static DEVICE_ATTR_RW(keyclick);
/* ---------------------------------------------------------------------
* SoC CODEC portion of driver: probe and release routines
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index 51870d50f419..52d2c968b5c0 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -35,6 +35,9 @@
#include "tlv320aic31xx.h"
+static int aic31xx_set_jack(struct snd_soc_component *component,
+ struct snd_soc_jack *jack, void *data);
+
static const struct reg_default aic31xx_reg_defaults[] = {
{ AIC31XX_CLKMUX, 0x00 },
{ AIC31XX_PLLPR, 0x11 },
@@ -1256,6 +1259,13 @@ static int aic31xx_power_on(struct snd_soc_component *component)
return ret;
}
+ /*
+ * The jack detection configuration is in the same register
+ * that is used to report jack detect status so is volatile
+ * and not covered by the cache sync, restore it separately.
+ */
+ aic31xx_set_jack(component, aic31xx->jack, NULL);
+
return 0;
}
@@ -1604,6 +1614,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
ret);
return ret;
}
+ regcache_cache_only(aic31xx->regmap, true);
+
aic31xx->dev = &i2c->dev;
aic31xx->irq = i2c->irq;
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index 81952984613d..2513922a0292 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -151,8 +151,8 @@ struct aic31xx_pdata {
#define AIC31XX_WORD_LEN_24BITS 0x02
#define AIC31XX_WORD_LEN_32BITS 0x03
#define AIC31XX_IFACE1_MASTER_MASK GENMASK(3, 2)
-#define AIC31XX_BCLK_MASTER BIT(2)
-#define AIC31XX_WCLK_MASTER BIT(3)
+#define AIC31XX_BCLK_MASTER BIT(3)
+#define AIC31XX_WCLK_MASTER BIT(2)
/* AIC31XX_DATA_OFFSET */
#define AIC31XX_DATA_OFFSET_MASK GENMASK(7, 0)
diff --git a/sound/soc/codecs/tlv320aic32x4-i2c.c b/sound/soc/codecs/tlv320aic32x4-i2c.c
index 6d54cbf70a0b..04ad38311360 100644
--- a/sound/soc/codecs/tlv320aic32x4-i2c.c
+++ b/sound/soc/codecs/tlv320aic32x4-i2c.c
@@ -16,6 +16,8 @@
#include "tlv320aic32x4.h"
+static const struct of_device_id aic32x4_of_id[];
+
static int aic32x4_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -27,6 +29,16 @@ static int aic32x4_i2c_probe(struct i2c_client *i2c,
config.val_bits = 8;
regmap = devm_regmap_init_i2c(i2c, &config);
+
+ if (i2c->dev.of_node) {
+ const struct of_device_id *oid;
+
+ oid = of_match_node(aic32x4_of_id, i2c->dev.of_node);
+ dev_set_drvdata(&i2c->dev, (void *)oid->data);
+ } else if (id) {
+ dev_set_drvdata(&i2c->dev, (void *)id->driver_data);
+ }
+
return aic32x4_probe(&i2c->dev, regmap);
}
@@ -36,15 +48,17 @@ static int aic32x4_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id aic32x4_i2c_id[] = {
- { "tlv320aic32x4", 0 },
- { "tlv320aic32x6", 1 },
+ { "tlv320aic32x4", (kernel_ulong_t)AIC32X4_TYPE_AIC32X4 },
+ { "tlv320aic32x6", (kernel_ulong_t)AIC32X4_TYPE_AIC32X6 },
+ { "tas2505", (kernel_ulong_t)AIC32X4_TYPE_TAS2505 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, aic32x4_i2c_id);
static const struct of_device_id aic32x4_of_id[] = {
- { .compatible = "ti,tlv320aic32x4", },
- { .compatible = "ti,tlv320aic32x6", },
+ { .compatible = "ti,tlv320aic32x4", .data = (void *)AIC32X4_TYPE_AIC32X4 },
+ { .compatible = "ti,tlv320aic32x6", .data = (void *)AIC32X4_TYPE_AIC32X6 },
+ { .compatible = "ti,tas2505", .data = (void *)AIC32X4_TYPE_TAS2505 },
{ /* senitel */ }
};
MODULE_DEVICE_TABLE(of, aic32x4_of_id);
diff --git a/sound/soc/codecs/tlv320aic32x4-spi.c b/sound/soc/codecs/tlv320aic32x4-spi.c
index a22e7700bfc8..e81c72958a82 100644
--- a/sound/soc/codecs/tlv320aic32x4-spi.c
+++ b/sound/soc/codecs/tlv320aic32x4-spi.c
@@ -16,6 +16,8 @@
#include "tlv320aic32x4.h"
+static const struct of_device_id aic32x4_of_id[];
+
static int aic32x4_spi_probe(struct spi_device *spi)
{
struct regmap *regmap;
@@ -28,6 +30,19 @@ static int aic32x4_spi_probe(struct spi_device *spi)
config.read_flag_mask = 0x01;
regmap = devm_regmap_init_spi(spi, &config);
+
+ if (spi->dev.of_node) {
+ const struct of_device_id *oid;
+
+ oid = of_match_node(aic32x4_of_id, spi->dev.of_node);
+ dev_set_drvdata(&spi->dev, (void *)oid->data);
+ } else {
+ const struct spi_device_id *id_entry;
+
+ id_entry = spi_get_device_id(spi);
+ dev_set_drvdata(&spi->dev, (void *)id_entry->driver_data);
+ }
+
return aic32x4_probe(&spi->dev, regmap);
}
@@ -37,15 +52,15 @@ static int aic32x4_spi_remove(struct spi_device *spi)
}
static const struct spi_device_id aic32x4_spi_id[] = {
- { "tlv320aic32x4", 0 },
- { "tlv320aic32x6", 1 },
+ { "tlv320aic32x4", (kernel_ulong_t)AIC32X4_TYPE_AIC32X4 },
+ { "tlv320aic32x6", (kernel_ulong_t)AIC32X4_TYPE_AIC32X6 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, aic32x4_spi_id);
static const struct of_device_id aic32x4_of_id[] = {
- { .compatible = "ti,tlv320aic32x4", },
- { .compatible = "ti,tlv320aic32x6", },
+ { .compatible = "ti,tlv320aic32x4", .data = (void *)AIC32X4_TYPE_AIC32X4 },
+ { .compatible = "ti,tlv320aic32x6", .data = (void *)AIC32X4_TYPE_AIC32X6 },
{ /* senitel */ }
};
MODULE_DEVICE_TABLE(of, aic32x4_of_id);
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index b689f26fc4be..2e9175b37dc9 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -48,6 +48,7 @@ struct aic32x4_priv {
struct aic32x4_setup_data *setup;
struct device *dev;
+ enum aic32x4_type type;
};
static int aic32x4_reset_adc(struct snd_soc_dapm_widget *w,
@@ -249,6 +250,9 @@ static DECLARE_TLV_DB_SCALE(tlv_pcm, -6350, 50, 0);
static DECLARE_TLV_DB_SCALE(tlv_driver_gain, -600, 100, 0);
/* -12dB min, 0.5dB steps */
static DECLARE_TLV_DB_SCALE(tlv_adc_vol, -1200, 50, 0);
+/* -6dB min, 1dB steps */
+static DECLARE_TLV_DB_SCALE(tlv_tas_driver_gain, -5850, 50, 0);
+static DECLARE_TLV_DB_SCALE(tlv_amp_vol, 0, 600, 1);
static const char * const lo_cm_text[] = {
"Full Chip", "1.65V",
@@ -678,11 +682,20 @@ static int aic32x4_set_dosr(struct snd_soc_component *component, u16 dosr)
static int aic32x4_set_processing_blocks(struct snd_soc_component *component,
u8 r_block, u8 p_block)
{
- if (r_block > 18 || p_block > 25)
- return -EINVAL;
+ struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component);
- snd_soc_component_write(component, AIC32X4_ADCSPB, r_block);
- snd_soc_component_write(component, AIC32X4_DACSPB, p_block);
+ if (aic32x4->type == AIC32X4_TYPE_TAS2505) {
+ if (r_block || p_block > 3)
+ return -EINVAL;
+
+ snd_soc_component_write(component, AIC32X4_DACSPB, p_block);
+ } else { /* AIC32x4 */
+ if (r_block > 18 || p_block > 25)
+ return -EINVAL;
+
+ snd_soc_component_write(component, AIC32X4_ADCSPB, r_block);
+ snd_soc_component_write(component, AIC32X4_DACSPB, p_block);
+ }
return 0;
}
@@ -691,6 +704,7 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
unsigned int sample_rate, unsigned int channels,
unsigned int bit_depth)
{
+ struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component);
u8 aosr;
u16 dosr;
u8 adc_resource_class, dac_resource_class;
@@ -717,19 +731,28 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
adc_resource_class = 6;
dac_resource_class = 8;
dosr_increment = 8;
- aic32x4_set_processing_blocks(component, 1, 1);
+ if (aic32x4->type == AIC32X4_TYPE_TAS2505)
+ aic32x4_set_processing_blocks(component, 0, 1);
+ else
+ aic32x4_set_processing_blocks(component, 1, 1);
} else if (sample_rate <= 96000) {
aosr = 64;
adc_resource_class = 6;
dac_resource_class = 8;
dosr_increment = 4;
- aic32x4_set_processing_blocks(component, 1, 9);
+ if (aic32x4->type == AIC32X4_TYPE_TAS2505)
+ aic32x4_set_processing_blocks(component, 0, 1);
+ else
+ aic32x4_set_processing_blocks(component, 1, 9);
} else if (sample_rate == 192000) {
aosr = 32;
adc_resource_class = 3;
dac_resource_class = 4;
dosr_increment = 2;
- aic32x4_set_processing_blocks(component, 13, 19);
+ if (aic32x4->type == AIC32X4_TYPE_TAS2505)
+ aic32x4_set_processing_blocks(component, 0, 1);
+ else
+ aic32x4_set_processing_blocks(component, 13, 19);
} else {
dev_err(component->dev, "Sampling rate not supported\n");
return -EINVAL;
@@ -1058,6 +1081,128 @@ static const struct snd_soc_component_driver soc_component_dev_aic32x4 = {
.non_legacy_dai_naming = 1,
};
+static const struct snd_kcontrol_new aic32x4_tas2505_snd_controls[] = {
+ SOC_SINGLE_S8_TLV("PCM Playback Volume",
+ AIC32X4_LDACVOL, -0x7f, 0x30, tlv_pcm),
+ SOC_ENUM("DAC Playback PowerTune Switch", l_ptm_enum),
+
+ SOC_SINGLE_TLV("HP Driver Gain Volume",
+ AIC32X4_HPLGAIN, 0, 0x74, 1, tlv_tas_driver_gain),
+ SOC_SINGLE("HP DAC Playback Switch", AIC32X4_HPLGAIN, 6, 1, 1),
+
+ SOC_SINGLE_TLV("Speaker Driver Playback Volume",
+ TAS2505_SPKVOL1, 0, 0x74, 1, tlv_tas_driver_gain),
+ SOC_SINGLE_TLV("Speaker Amplifier Playback Volume",
+ TAS2505_SPKVOL2, 4, 5, 0, tlv_amp_vol),
+
+ SOC_SINGLE("Auto-mute Switch", AIC32X4_DACMUTE, 4, 7, 0),
+};
+
+static const struct snd_kcontrol_new hp_output_mixer_controls[] = {
+ SOC_DAPM_SINGLE("DAC Switch", AIC32X4_HPLROUTE, 3, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget aic32x4_tas2505_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("DAC", "Playback", AIC32X4_DACSETUP, 7, 0),
+ SND_SOC_DAPM_MIXER("HP Output Mixer", SND_SOC_NOPM, 0, 0,
+ &hp_output_mixer_controls[0],
+ ARRAY_SIZE(hp_output_mixer_controls)),
+ SND_SOC_DAPM_PGA("HP Power", AIC32X4_OUTPWRCTL, 5, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("Speaker Driver", TAS2505_SPK, 1, 0, NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("HP"),
+ SND_SOC_DAPM_OUTPUT("Speaker"),
+};
+
+static const struct snd_soc_dapm_route aic32x4_tas2505_dapm_routes[] = {
+ /* Left Output */
+ {"HP Output Mixer", "DAC Switch", "DAC"},
+
+ {"HP Power", NULL, "HP Output Mixer"},
+ {"HP", NULL, "HP Power"},
+
+ {"Speaker Driver", NULL, "DAC"},
+ {"Speaker", NULL, "Speaker Driver"},
+};
+
+static struct snd_soc_dai_driver aic32x4_tas2505_dai = {
+ .name = "tas2505-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = AIC32X4_FORMATS,},
+ .ops = &aic32x4_ops,
+ .symmetric_rate = 1,
+};
+
+static int aic32x4_tas2505_component_probe(struct snd_soc_component *component)
+{
+ struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component);
+ u32 tmp_reg;
+ int ret;
+
+ struct clk_bulk_data clocks[] = {
+ { .id = "codec_clkin" },
+ { .id = "pll" },
+ { .id = "bdiv" },
+ { .id = "mdac" },
+ };
+
+ ret = devm_clk_bulk_get(component->dev, ARRAY_SIZE(clocks), clocks);
+ if (ret)
+ return ret;
+
+ if (aic32x4->setup)
+ aic32x4_setup_gpios(component);
+
+ clk_set_parent(clocks[0].clk, clocks[1].clk);
+ clk_set_parent(clocks[2].clk, clocks[3].clk);
+
+ /* Power platform configuration */
+ if (aic32x4->power_cfg & AIC32X4_PWR_AVDD_DVDD_WEAK_DISABLE)
+ snd_soc_component_write(component, AIC32X4_PWRCFG, AIC32X4_AVDDWEAKDISABLE);
+
+ tmp_reg = (aic32x4->power_cfg & AIC32X4_PWR_AIC32X4_LDO_ENABLE) ?
+ AIC32X4_LDOCTLEN : 0;
+ snd_soc_component_write(component, AIC32X4_LDOCTL, tmp_reg);
+
+ tmp_reg = snd_soc_component_read(component, AIC32X4_CMMODE);
+ if (aic32x4->power_cfg & AIC32X4_PWR_CMMODE_LDOIN_RANGE_18_36)
+ tmp_reg |= AIC32X4_LDOIN_18_36;
+ if (aic32x4->power_cfg & AIC32X4_PWR_CMMODE_HP_LDOIN_POWERED)
+ tmp_reg |= AIC32X4_LDOIN2HP;
+ snd_soc_component_write(component, AIC32X4_CMMODE, tmp_reg);
+
+ /*
+ * Enable the fast charging feature and ensure the needed 40ms ellapsed
+ * before using the analog circuits.
+ */
+ snd_soc_component_write(component, TAS2505_REFPOWERUP,
+ AIC32X4_REFPOWERUP_40MS);
+ msleep(40);
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver soc_component_dev_aic32x4_tas2505 = {
+ .probe = aic32x4_tas2505_component_probe,
+ .set_bias_level = aic32x4_set_bias_level,
+ .controls = aic32x4_tas2505_snd_controls,
+ .num_controls = ARRAY_SIZE(aic32x4_tas2505_snd_controls),
+ .dapm_widgets = aic32x4_tas2505_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(aic32x4_tas2505_dapm_widgets),
+ .dapm_routes = aic32x4_tas2505_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(aic32x4_tas2505_dapm_routes),
+ .suspend_bias_off = 1,
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
static int aic32x4_parse_dt(struct aic32x4_priv *aic32x4,
struct device_node *np)
{
@@ -1198,6 +1343,8 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
return -ENOMEM;
aic32x4->dev = dev;
+ aic32x4->type = (enum aic32x4_type)dev_get_drvdata(dev);
+
dev_set_drvdata(dev, aic32x4);
if (pdata) {
@@ -1247,8 +1394,16 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
if (ret)
goto err_disable_regulators;
- ret = devm_snd_soc_register_component(dev,
+ switch (aic32x4->type) {
+ case AIC32X4_TYPE_TAS2505:
+ ret = devm_snd_soc_register_component(dev,
+ &soc_component_dev_aic32x4_tas2505, &aic32x4_tas2505_dai, 1);
+ break;
+ default:
+ ret = devm_snd_soc_register_component(dev,
&soc_component_dev_aic32x4, &aic32x4_dai, 1);
+ }
+
if (ret) {
dev_err(dev, "Failed to register component\n");
goto err_disable_regulators;
diff --git a/sound/soc/codecs/tlv320aic32x4.h b/sound/soc/codecs/tlv320aic32x4.h
index 7550122e9f8a..e9fd2e55d6c3 100644
--- a/sound/soc/codecs/tlv320aic32x4.h
+++ b/sound/soc/codecs/tlv320aic32x4.h
@@ -10,6 +10,12 @@
struct device;
struct regmap_config;
+enum aic32x4_type {
+ AIC32X4_TYPE_AIC32X4 = 0,
+ AIC32X4_TYPE_AIC32X6,
+ AIC32X4_TYPE_TAS2505,
+};
+
extern const struct regmap_config aic32x4_regmap_config;
int aic32x4_probe(struct device *dev, struct regmap *regmap);
int aic32x4_remove(struct device *dev);
@@ -88,6 +94,9 @@ int aic32x4_register_clocks(struct device *dev, const char *mclk_name);
#define AIC32X4_LOLGAIN AIC32X4_REG(1, 18)
#define AIC32X4_LORGAIN AIC32X4_REG(1, 19)
#define AIC32X4_HEADSTART AIC32X4_REG(1, 20)
+#define TAS2505_SPK AIC32X4_REG(1, 45)
+#define TAS2505_SPKVOL1 AIC32X4_REG(1, 46)
+#define TAS2505_SPKVOL2 AIC32X4_REG(1, 48)
#define AIC32X4_MICBIAS AIC32X4_REG(1, 51)
#define AIC32X4_LMICPGAPIN AIC32X4_REG(1, 52)
#define AIC32X4_LMICPGANIN AIC32X4_REG(1, 54)
@@ -96,6 +105,7 @@ int aic32x4_register_clocks(struct device *dev, const char *mclk_name);
#define AIC32X4_FLOATINGINPUT AIC32X4_REG(1, 58)
#define AIC32X4_LMICPGAVOL AIC32X4_REG(1, 59)
#define AIC32X4_RMICPGAVOL AIC32X4_REG(1, 60)
+#define TAS2505_REFPOWERUP AIC32X4_REG(1, 122)
#define AIC32X4_REFPOWERUP AIC32X4_REG(1, 123)
/* Bits, masks, and shifts */
diff --git a/sound/soc/codecs/wcd-clsh-v2.c b/sound/soc/codecs/wcd-clsh-v2.c
index 817d8259758c..4c7ebc7fb400 100644
--- a/sound/soc/codecs/wcd-clsh-v2.c
+++ b/sound/soc/codecs/wcd-clsh-v2.c
@@ -88,6 +88,19 @@ struct wcd_clsh_ctrl {
#define WCD9XXX_CLASSH_CTRL_CCL_1_DELTA_IPEAK_50MA 0x50
#define WCD9XXX_CLASSH_CTRL_CCL_1_DELTA_IPEAK_30MA 0x30
+#define WCD9XXX_BASE_ADDRESS 0x3000
+#define WCD9XXX_ANA_RX_SUPPLIES (WCD9XXX_BASE_ADDRESS+0x008)
+#define WCD9XXX_ANA_HPH (WCD9XXX_BASE_ADDRESS+0x009)
+#define WCD9XXX_CLASSH_MODE_2 (WCD9XXX_BASE_ADDRESS+0x098)
+#define WCD9XXX_CLASSH_MODE_3 (WCD9XXX_BASE_ADDRESS+0x099)
+#define WCD9XXX_FLYBACK_VNEG_CTRL_1 (WCD9XXX_BASE_ADDRESS+0x0A5)
+#define WCD9XXX_FLYBACK_VNEG_CTRL_4 (WCD9XXX_BASE_ADDRESS+0x0A8)
+#define WCD9XXX_FLYBACK_VNEGDAC_CTRL_2 (WCD9XXX_BASE_ADDRESS+0x0AF)
+#define WCD9XXX_RX_BIAS_HPH_LOWPOWER (WCD9XXX_BASE_ADDRESS+0x0BF)
+#define WCD9XXX_V3_RX_BIAS_FLYB_BUFF (WCD9XXX_BASE_ADDRESS+0x0C7)
+#define WCD9XXX_HPH_PA_CTL1 (WCD9XXX_BASE_ADDRESS+0x0D1)
+#define WCD9XXX_HPH_NEW_INT_PA_MISC2 (WCD9XXX_BASE_ADDRESS+0x138)
+
#define CLSH_REQ_ENABLE true
#define CLSH_REQ_DISABLE false
#define WCD_USLEEP_RANGE 50
@@ -137,6 +150,20 @@ static inline void wcd_clsh_set_buck_mode(struct snd_soc_component *comp,
WCD9XXX_A_ANA_RX_VPOS_PWR_LVL_DEFAULT);
}
+static void wcd_clsh_v3_set_buck_mode(struct snd_soc_component *component,
+ int mode)
+{
+ if (mode == CLS_H_HIFI || mode == CLS_H_LOHIFI ||
+ mode == CLS_AB_HIFI || mode == CLS_AB_LOHIFI)
+ snd_soc_component_update_bits(component,
+ WCD9XXX_ANA_RX_SUPPLIES,
+ 0x08, 0x08); /* set to HIFI */
+ else
+ snd_soc_component_update_bits(component,
+ WCD9XXX_ANA_RX_SUPPLIES,
+ 0x08, 0x00); /* set to default */
+}
+
static inline void wcd_clsh_set_flyback_mode(struct snd_soc_component *comp,
int mode)
{
@@ -170,6 +197,36 @@ static void wcd_clsh_buck_ctrl(struct wcd_clsh_ctrl *ctrl,
usleep_range(500, 500 + WCD_USLEEP_RANGE);
}
+static void wcd_clsh_v3_buck_ctrl(struct snd_soc_component *component,
+ struct wcd_clsh_ctrl *ctrl,
+ int mode,
+ bool enable)
+{
+ /* enable/disable buck */
+ if ((enable && (++ctrl->buck_users == 1)) ||
+ (!enable && (--ctrl->buck_users == 0))) {
+ snd_soc_component_update_bits(component,
+ WCD9XXX_ANA_RX_SUPPLIES,
+ (1 << 7), (enable << 7));
+ /*
+ * 500us sleep is required after buck enable/disable
+ * as per HW requirement
+ */
+ usleep_range(500, 510);
+ if (mode == CLS_H_LOHIFI || mode == CLS_H_ULP ||
+ mode == CLS_H_HIFI || mode == CLS_H_LP)
+ snd_soc_component_update_bits(component,
+ WCD9XXX_CLASSH_MODE_3,
+ 0x02, 0x00);
+
+ snd_soc_component_update_bits(component,
+ WCD9XXX_CLASSH_MODE_2,
+ 0xFF, 0x3A);
+ /* 500usec delay is needed as per HW requirement */
+ usleep_range(500, 500 + WCD_USLEEP_RANGE);
+ }
+}
+
static void wcd_clsh_flyback_ctrl(struct wcd_clsh_ctrl *ctrl,
int mode,
bool enable)
@@ -219,8 +276,7 @@ static void wcd_clsh_set_gain_path(struct wcd_clsh_ctrl *ctrl, int mode)
val);
}
-static void wcd_clsh_set_hph_mode(struct snd_soc_component *comp,
- int mode)
+static void wcd_clsh_v2_set_hph_mode(struct snd_soc_component *comp, int mode)
{
int val = 0, gain = 0, res_val;
int ipeak = WCD9XXX_CLASSH_CTRL_CCL_1_DELTA_IPEAK_50MA;
@@ -264,6 +320,48 @@ static void wcd_clsh_set_hph_mode(struct snd_soc_component *comp,
ipeak);
}
+static void wcd_clsh_v3_set_hph_mode(struct snd_soc_component *component,
+ int mode)
+{
+ u8 val;
+
+ switch (mode) {
+ case CLS_H_NORMAL:
+ val = 0x00;
+ break;
+ case CLS_AB:
+ case CLS_H_ULP:
+ val = 0x0C;
+ break;
+ case CLS_AB_HIFI:
+ case CLS_H_HIFI:
+ val = 0x08;
+ break;
+ case CLS_H_LP:
+ case CLS_H_LOHIFI:
+ case CLS_AB_LP:
+ case CLS_AB_LOHIFI:
+ val = 0x04;
+ break;
+ default:
+ dev_err(component->dev, "%s:Invalid mode %d\n", __func__, mode);
+ return;
+ }
+
+ snd_soc_component_update_bits(component, WCD9XXX_ANA_HPH, 0x0C, val);
+}
+
+void wcd_clsh_set_hph_mode(struct wcd_clsh_ctrl *ctrl, int mode)
+{
+ struct snd_soc_component *comp = ctrl->comp;
+
+ if (ctrl->codec_version >= WCD937X)
+ wcd_clsh_v3_set_hph_mode(comp, mode);
+ else
+ wcd_clsh_v2_set_hph_mode(comp, mode);
+
+}
+
static void wcd_clsh_set_flyback_current(struct snd_soc_component *comp,
int mode)
{
@@ -289,6 +387,130 @@ static void wcd_clsh_set_buck_regulator_mode(struct snd_soc_component *comp,
WCD9XXX_A_ANA_RX_REGULATOR_MODE_CLS_H);
}
+static void wcd_clsh_v3_set_buck_regulator_mode(struct snd_soc_component *component,
+ int mode)
+{
+ snd_soc_component_update_bits(component, WCD9XXX_ANA_RX_SUPPLIES,
+ 0x02, 0x00);
+}
+
+static void wcd_clsh_v3_set_flyback_mode(struct snd_soc_component *component,
+ int mode)
+{
+ if (mode == CLS_H_HIFI || mode == CLS_H_LOHIFI ||
+ mode == CLS_AB_HIFI || mode == CLS_AB_LOHIFI) {
+ snd_soc_component_update_bits(component,
+ WCD9XXX_ANA_RX_SUPPLIES,
+ 0x04, 0x04);
+ snd_soc_component_update_bits(component,
+ WCD9XXX_FLYBACK_VNEG_CTRL_4,
+ 0xF0, 0x80);
+ } else {
+ snd_soc_component_update_bits(component,
+ WCD9XXX_ANA_RX_SUPPLIES,
+ 0x04, 0x00); /* set to Default */
+ snd_soc_component_update_bits(component,
+ WCD9XXX_FLYBACK_VNEG_CTRL_4,
+ 0xF0, 0x70);
+ }
+}
+
+static void wcd_clsh_v3_force_iq_ctl(struct snd_soc_component *component,
+ int mode, bool enable)
+{
+ if (enable) {
+ snd_soc_component_update_bits(component,
+ WCD9XXX_FLYBACK_VNEGDAC_CTRL_2,
+ 0xE0, 0xA0);
+ /* 100usec delay is needed as per HW requirement */
+ usleep_range(100, 110);
+ snd_soc_component_update_bits(component,
+ WCD9XXX_CLASSH_MODE_3,
+ 0x02, 0x02);
+ snd_soc_component_update_bits(component,
+ WCD9XXX_CLASSH_MODE_2,
+ 0xFF, 0x1C);
+ if (mode == CLS_H_LOHIFI || mode == CLS_AB_LOHIFI) {
+ snd_soc_component_update_bits(component,
+ WCD9XXX_HPH_NEW_INT_PA_MISC2,
+ 0x20, 0x20);
+ snd_soc_component_update_bits(component,
+ WCD9XXX_RX_BIAS_HPH_LOWPOWER,
+ 0xF0, 0xC0);
+ snd_soc_component_update_bits(component,
+ WCD9XXX_HPH_PA_CTL1,
+ 0x0E, 0x02);
+ }
+ } else {
+ snd_soc_component_update_bits(component,
+ WCD9XXX_HPH_NEW_INT_PA_MISC2,
+ 0x20, 0x00);
+ snd_soc_component_update_bits(component,
+ WCD9XXX_RX_BIAS_HPH_LOWPOWER,
+ 0xF0, 0x80);
+ snd_soc_component_update_bits(component,
+ WCD9XXX_HPH_PA_CTL1,
+ 0x0E, 0x06);
+ }
+}
+
+static void wcd_clsh_v3_flyback_ctrl(struct snd_soc_component *component,
+ struct wcd_clsh_ctrl *ctrl,
+ int mode,
+ bool enable)
+{
+ /* enable/disable flyback */
+ if ((enable && (++ctrl->flyback_users == 1)) ||
+ (!enable && (--ctrl->flyback_users == 0))) {
+ snd_soc_component_update_bits(component,
+ WCD9XXX_FLYBACK_VNEG_CTRL_1,
+ 0xE0, 0xE0);
+ snd_soc_component_update_bits(component,
+ WCD9XXX_ANA_RX_SUPPLIES,
+ (1 << 6), (enable << 6));
+ /*
+ * 100us sleep is required after flyback enable/disable
+ * as per HW requirement
+ */
+ usleep_range(100, 110);
+ snd_soc_component_update_bits(component,
+ WCD9XXX_FLYBACK_VNEGDAC_CTRL_2,
+ 0xE0, 0xE0);
+ /* 500usec delay is needed as per HW requirement */
+ usleep_range(500, 500 + WCD_USLEEP_RANGE);
+ }
+}
+
+static void wcd_clsh_v3_set_flyback_current(struct snd_soc_component *component,
+ int mode)
+{
+ snd_soc_component_update_bits(component, WCD9XXX_V3_RX_BIAS_FLYB_BUFF,
+ 0x0F, 0x0A);
+ snd_soc_component_update_bits(component, WCD9XXX_V3_RX_BIAS_FLYB_BUFF,
+ 0xF0, 0xA0);
+ /* Sleep needed to avoid click and pop as per HW requirement */
+ usleep_range(100, 110);
+}
+
+static void wcd_clsh_v3_state_aux(struct wcd_clsh_ctrl *ctrl, int req_state,
+ bool is_enable, int mode)
+{
+ struct snd_soc_component *component = ctrl->comp;
+
+ if (is_enable) {
+ wcd_clsh_v3_set_buck_mode(component, mode);
+ wcd_clsh_v3_set_flyback_mode(component, mode);
+ wcd_clsh_v3_flyback_ctrl(component, ctrl, mode, true);
+ wcd_clsh_v3_set_flyback_current(component, mode);
+ wcd_clsh_v3_buck_ctrl(component, ctrl, mode, true);
+ } else {
+ wcd_clsh_v3_buck_ctrl(component, ctrl, mode, false);
+ wcd_clsh_v3_flyback_ctrl(component, ctrl, mode, false);
+ wcd_clsh_v3_set_flyback_mode(component, CLS_H_NORMAL);
+ wcd_clsh_v3_set_buck_mode(component, CLS_H_NORMAL);
+ }
+}
+
static void wcd_clsh_state_lo(struct wcd_clsh_ctrl *ctrl, int req_state,
bool is_enable, int mode)
{
@@ -316,6 +538,38 @@ static void wcd_clsh_state_lo(struct wcd_clsh_ctrl *ctrl, int req_state,
}
}
+static void wcd_clsh_v3_state_hph_r(struct wcd_clsh_ctrl *ctrl, int req_state,
+ bool is_enable, int mode)
+{
+ struct snd_soc_component *component = ctrl->comp;
+
+ if (mode == CLS_H_NORMAL) {
+ dev_dbg(component->dev, "%s: Normal mode not applicable for hph_r\n",
+ __func__);
+ return;
+ }
+
+ if (is_enable) {
+ wcd_clsh_v3_set_buck_regulator_mode(component, mode);
+ wcd_clsh_v3_set_flyback_mode(component, mode);
+ wcd_clsh_v3_force_iq_ctl(component, mode, true);
+ wcd_clsh_v3_flyback_ctrl(component, ctrl, mode, true);
+ wcd_clsh_v3_set_flyback_current(component, mode);
+ wcd_clsh_v3_set_buck_mode(component, mode);
+ wcd_clsh_v3_buck_ctrl(component, ctrl, mode, true);
+ wcd_clsh_v3_set_hph_mode(component, mode);
+ } else {
+ wcd_clsh_v3_set_hph_mode(component, CLS_H_NORMAL);
+
+ /* buck and flyback set to default mode and disable */
+ wcd_clsh_v3_flyback_ctrl(component, ctrl, CLS_H_NORMAL, false);
+ wcd_clsh_v3_buck_ctrl(component, ctrl, CLS_H_NORMAL, false);
+ wcd_clsh_v3_force_iq_ctl(component, CLS_H_NORMAL, false);
+ wcd_clsh_v3_set_flyback_mode(component, CLS_H_NORMAL);
+ wcd_clsh_v3_set_buck_mode(component, CLS_H_NORMAL);
+ }
+}
+
static void wcd_clsh_state_hph_r(struct wcd_clsh_ctrl *ctrl, int req_state,
bool is_enable, int mode)
{
@@ -353,10 +607,10 @@ static void wcd_clsh_state_hph_r(struct wcd_clsh_ctrl *ctrl, int req_state,
wcd_clsh_set_flyback_current(comp, mode);
wcd_clsh_set_buck_mode(comp, mode);
wcd_clsh_buck_ctrl(ctrl, mode, true);
- wcd_clsh_set_hph_mode(comp, mode);
+ wcd_clsh_v2_set_hph_mode(comp, mode);
wcd_clsh_set_gain_path(ctrl, mode);
} else {
- wcd_clsh_set_hph_mode(comp, CLS_H_NORMAL);
+ wcd_clsh_v2_set_hph_mode(comp, CLS_H_NORMAL);
if (mode != CLS_AB) {
snd_soc_component_update_bits(comp,
@@ -374,6 +628,38 @@ static void wcd_clsh_state_hph_r(struct wcd_clsh_ctrl *ctrl, int req_state,
}
}
+static void wcd_clsh_v3_state_hph_l(struct wcd_clsh_ctrl *ctrl, int req_state,
+ bool is_enable, int mode)
+{
+ struct snd_soc_component *component = ctrl->comp;
+
+ if (mode == CLS_H_NORMAL) {
+ dev_dbg(component->dev, "%s: Normal mode not applicable for hph_l\n",
+ __func__);
+ return;
+ }
+
+ if (is_enable) {
+ wcd_clsh_v3_set_buck_regulator_mode(component, mode);
+ wcd_clsh_v3_set_flyback_mode(component, mode);
+ wcd_clsh_v3_force_iq_ctl(component, mode, true);
+ wcd_clsh_v3_flyback_ctrl(component, ctrl, mode, true);
+ wcd_clsh_v3_set_flyback_current(component, mode);
+ wcd_clsh_v3_set_buck_mode(component, mode);
+ wcd_clsh_v3_buck_ctrl(component, ctrl, mode, true);
+ wcd_clsh_v3_set_hph_mode(component, mode);
+ } else {
+ wcd_clsh_v3_set_hph_mode(component, CLS_H_NORMAL);
+
+ /* set buck and flyback to Default Mode */
+ wcd_clsh_v3_flyback_ctrl(component, ctrl, CLS_H_NORMAL, false);
+ wcd_clsh_v3_buck_ctrl(component, ctrl, CLS_H_NORMAL, false);
+ wcd_clsh_v3_force_iq_ctl(component, CLS_H_NORMAL, false);
+ wcd_clsh_v3_set_flyback_mode(component, CLS_H_NORMAL);
+ wcd_clsh_v3_set_buck_mode(component, CLS_H_NORMAL);
+ }
+}
+
static void wcd_clsh_state_hph_l(struct wcd_clsh_ctrl *ctrl, int req_state,
bool is_enable, int mode)
{
@@ -411,10 +697,10 @@ static void wcd_clsh_state_hph_l(struct wcd_clsh_ctrl *ctrl, int req_state,
wcd_clsh_set_flyback_current(comp, mode);
wcd_clsh_set_buck_mode(comp, mode);
wcd_clsh_buck_ctrl(ctrl, mode, true);
- wcd_clsh_set_hph_mode(comp, mode);
+ wcd_clsh_v2_set_hph_mode(comp, mode);
wcd_clsh_set_gain_path(ctrl, mode);
} else {
- wcd_clsh_set_hph_mode(comp, CLS_H_NORMAL);
+ wcd_clsh_v2_set_hph_mode(comp, CLS_H_NORMAL);
if (mode != CLS_AB) {
snd_soc_component_update_bits(comp,
@@ -432,6 +718,32 @@ static void wcd_clsh_state_hph_l(struct wcd_clsh_ctrl *ctrl, int req_state,
}
}
+static void wcd_clsh_v3_state_ear(struct wcd_clsh_ctrl *ctrl, int req_state,
+ bool is_enable, int mode)
+{
+ struct snd_soc_component *component = ctrl->comp;
+
+ if (is_enable) {
+ wcd_clsh_v3_set_buck_regulator_mode(component, mode);
+ wcd_clsh_v3_set_flyback_mode(component, mode);
+ wcd_clsh_v3_force_iq_ctl(component, mode, true);
+ wcd_clsh_v3_flyback_ctrl(component, ctrl, mode, true);
+ wcd_clsh_v3_set_flyback_current(component, mode);
+ wcd_clsh_v3_set_buck_mode(component, mode);
+ wcd_clsh_v3_buck_ctrl(component, ctrl, mode, true);
+ wcd_clsh_v3_set_hph_mode(component, mode);
+ } else {
+ wcd_clsh_v3_set_hph_mode(component, CLS_H_NORMAL);
+
+ /* set buck and flyback to Default Mode */
+ wcd_clsh_v3_flyback_ctrl(component, ctrl, CLS_H_NORMAL, false);
+ wcd_clsh_v3_buck_ctrl(component, ctrl, CLS_H_NORMAL, false);
+ wcd_clsh_v3_force_iq_ctl(component, CLS_H_NORMAL, false);
+ wcd_clsh_v3_set_flyback_mode(component, CLS_H_NORMAL);
+ wcd_clsh_v3_set_buck_mode(component, CLS_H_NORMAL);
+ }
+}
+
static void wcd_clsh_state_ear(struct wcd_clsh_ctrl *ctrl, int req_state,
bool is_enable, int mode)
{
@@ -472,16 +784,30 @@ static int _wcd_clsh_ctrl_set_state(struct wcd_clsh_ctrl *ctrl, int req_state,
{
switch (req_state) {
case WCD_CLSH_STATE_EAR:
- wcd_clsh_state_ear(ctrl, req_state, is_enable, mode);
+ if (ctrl->codec_version >= WCD937X)
+ wcd_clsh_v3_state_ear(ctrl, req_state, is_enable, mode);
+ else
+ wcd_clsh_state_ear(ctrl, req_state, is_enable, mode);
break;
case WCD_CLSH_STATE_HPHL:
- wcd_clsh_state_hph_l(ctrl, req_state, is_enable, mode);
+ if (ctrl->codec_version >= WCD937X)
+ wcd_clsh_v3_state_hph_l(ctrl, req_state, is_enable, mode);
+ else
+ wcd_clsh_state_hph_l(ctrl, req_state, is_enable, mode);
break;
case WCD_CLSH_STATE_HPHR:
- wcd_clsh_state_hph_r(ctrl, req_state, is_enable, mode);
+ if (ctrl->codec_version >= WCD937X)
+ wcd_clsh_v3_state_hph_r(ctrl, req_state, is_enable, mode);
+ else
+ wcd_clsh_state_hph_r(ctrl, req_state, is_enable, mode);
break;
case WCD_CLSH_STATE_LO:
- wcd_clsh_state_lo(ctrl, req_state, is_enable, mode);
+ if (ctrl->codec_version < WCD937X)
+ wcd_clsh_state_lo(ctrl, req_state, is_enable, mode);
+ break;
+ case WCD_CLSH_STATE_AUX:
+ if (ctrl->codec_version >= WCD937X)
+ wcd_clsh_v3_state_aux(ctrl, req_state, is_enable, mode);
break;
default:
break;
@@ -504,6 +830,7 @@ static bool wcd_clsh_is_state_valid(int state)
case WCD_CLSH_STATE_HPHL:
case WCD_CLSH_STATE_HPHR:
case WCD_CLSH_STATE_LO:
+ case WCD_CLSH_STATE_AUX:
return true;
default:
return false;
@@ -565,6 +892,7 @@ struct wcd_clsh_ctrl *wcd_clsh_ctrl_alloc(struct snd_soc_component *comp,
ctrl->state = WCD_CLSH_STATE_IDLE;
ctrl->comp = comp;
+ ctrl->codec_version = version;
return ctrl;
}
diff --git a/sound/soc/codecs/wcd-clsh-v2.h b/sound/soc/codecs/wcd-clsh-v2.h
index a6d0f2d0e9e3..4e3653058275 100644
--- a/sound/soc/codecs/wcd-clsh-v2.h
+++ b/sound/soc/codecs/wcd-clsh-v2.h
@@ -22,8 +22,11 @@ enum wcd_clsh_event {
#define WCD_CLSH_STATE_HPHL BIT(1)
#define WCD_CLSH_STATE_HPHR BIT(2)
#define WCD_CLSH_STATE_LO BIT(3)
+#define WCD_CLSH_STATE_AUX BIT(4)
#define WCD_CLSH_STATE_MAX 4
+#define WCD_CLSH_V3_STATE_MAX 5
#define NUM_CLSH_STATES_V2 BIT(WCD_CLSH_STATE_MAX)
+#define NUM_CLSH_STATES_V3 BIT(WCD_CLSH_V3_STATE_MAX)
enum wcd_clsh_mode {
CLS_H_NORMAL = 0, /* Class-H Default */
@@ -31,9 +34,20 @@ enum wcd_clsh_mode {
CLS_H_LP, /* Class-H Low Power */
CLS_AB, /* Class-AB */
CLS_H_LOHIFI, /* LoHIFI */
+ CLS_H_ULP, /* Ultra Low power */
+ CLS_AB_HIFI, /* Class-AB */
+ CLS_AB_LP, /* Class-AB Low Power */
+ CLS_AB_LOHIFI, /* Class-AB Low HIFI */
CLS_NONE, /* None of the above modes */
};
+enum wcd_codec_version {
+ WCD9335 = 0,
+ WCD934X = 1,
+ /* New CLSH after this */
+ WCD937X = 2,
+ WCD938X = 3,
+};
struct wcd_clsh_ctrl;
extern struct wcd_clsh_ctrl *wcd_clsh_ctrl_alloc(
@@ -45,5 +59,7 @@ extern int wcd_clsh_ctrl_set_state(struct wcd_clsh_ctrl *ctrl,
enum wcd_clsh_event clsh_event,
int nstate,
enum wcd_clsh_mode mode);
+extern void wcd_clsh_set_hph_mode(struct wcd_clsh_ctrl *ctrl,
+ int mode);
#endif /* _WCD_CLSH_V2_H_ */
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
new file mode 100644
index 000000000000..405128ccb4b0
--- /dev/null
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -0,0 +1,1475 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include "wcd-mbhc-v2.h"
+
+#define HS_DETECT_PLUG_TIME_MS (3 * 1000)
+#define MBHC_BUTTON_PRESS_THRESHOLD_MIN 250
+#define GND_MIC_SWAP_THRESHOLD 4
+#define WCD_FAKE_REMOVAL_MIN_PERIOD_MS 100
+#define HPHL_CROSS_CONN_THRESHOLD 100
+#define HS_VREF_MIN_VAL 1400
+#define FAKE_REM_RETRY_ATTEMPTS 3
+#define WCD_MBHC_ADC_HS_THRESHOLD_MV 1700
+#define WCD_MBHC_ADC_HPH_THRESHOLD_MV 75
+#define WCD_MBHC_ADC_MICBIAS_MV 1800
+#define WCD_MBHC_FAKE_INS_RETRY 4
+
+#define WCD_MBHC_JACK_MASK (SND_JACK_HEADSET | SND_JACK_LINEOUT | \
+ SND_JACK_MECHANICAL)
+
+#define WCD_MBHC_JACK_BUTTON_MASK (SND_JACK_BTN_0 | SND_JACK_BTN_1 | \
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | \
+ SND_JACK_BTN_4 | SND_JACK_BTN_5)
+
+enum wcd_mbhc_adc_mux_ctl {
+ MUX_CTL_AUTO = 0,
+ MUX_CTL_IN2P,
+ MUX_CTL_IN3P,
+ MUX_CTL_IN4P,
+ MUX_CTL_HPH_L,
+ MUX_CTL_HPH_R,
+ MUX_CTL_NONE,
+};
+
+struct wcd_mbhc {
+ struct device *dev;
+ struct snd_soc_component *component;
+ struct snd_soc_jack *jack;
+ struct wcd_mbhc_config *cfg;
+ const struct wcd_mbhc_cb *mbhc_cb;
+ const struct wcd_mbhc_intr *intr_ids;
+ struct wcd_mbhc_field *fields;
+ /* Delayed work to report long button press */
+ struct delayed_work mbhc_btn_dwork;
+ /* Work to correct accessory type */
+ struct work_struct correct_plug_swch;
+ struct mutex lock;
+ int buttons_pressed;
+ u32 hph_status; /* track headhpone status */
+ u8 current_plug;
+ bool is_btn_press;
+ bool in_swch_irq_handler;
+ bool hs_detect_work_stop;
+ bool is_hs_recording;
+ bool extn_cable_hph_rem;
+ bool force_linein;
+ bool impedance_detect;
+ unsigned long event_state;
+ unsigned long jiffies_atreport;
+ /* impedance of hphl and hphr */
+ uint32_t zl, zr;
+ /* Holds type of Headset - Mono/Stereo */
+ enum wcd_mbhc_hph_type hph_type;
+ /* Holds mbhc detection method - ADC/Legacy */
+ int mbhc_detection_logic;
+};
+
+static inline int wcd_mbhc_write_field(const struct wcd_mbhc *mbhc,
+ int field, int val)
+{
+ if (!mbhc->fields[field].reg)
+ return 0;
+
+ return snd_soc_component_write_field(mbhc->component,
+ mbhc->fields[field].reg,
+ mbhc->fields[field].mask, val);
+}
+
+static inline int wcd_mbhc_read_field(const struct wcd_mbhc *mbhc, int field)
+{
+ if (!mbhc->fields[field].reg)
+ return 0;
+
+ return snd_soc_component_read_field(mbhc->component,
+ mbhc->fields[field].reg,
+ mbhc->fields[field].mask);
+}
+
+static void wcd_program_hs_vref(struct wcd_mbhc *mbhc)
+{
+ u32 reg_val = ((mbhc->cfg->v_hs_max - HS_VREF_MIN_VAL) / 100);
+
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_HS_VREF, reg_val);
+}
+
+static void wcd_program_btn_threshold(const struct wcd_mbhc *mbhc, bool micbias)
+{
+ struct snd_soc_component *component = mbhc->component;
+
+ mbhc->mbhc_cb->set_btn_thr(component, mbhc->cfg->btn_low,
+ mbhc->cfg->btn_high,
+ mbhc->cfg->num_btn, micbias);
+}
+
+static void wcd_mbhc_curr_micbias_control(const struct wcd_mbhc *mbhc,
+ const enum wcd_mbhc_cs_mb_en_flag cs_mb_en)
+{
+
+ /*
+ * Some codecs handle micbias/pullup enablement in codec
+ * drivers itself and micbias is not needed for regular
+ * plug type detection. So if micbias_control callback function
+ * is defined, just return.
+ */
+ if (mbhc->mbhc_cb->mbhc_micbias_control)
+ return;
+
+ switch (cs_mb_en) {
+ case WCD_MBHC_EN_CS:
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_MICB_CTRL, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_BTN_ISRC_CTL, 3);
+ /* Program Button threshold registers as per CS */
+ wcd_program_btn_threshold(mbhc, false);
+ break;
+ case WCD_MBHC_EN_MB:
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_BTN_ISRC_CTL, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 1);
+ /* Disable PULL_UP_EN & enable MICBIAS */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_MICB_CTRL, 2);
+ /* Program Button threshold registers as per MICBIAS */
+ wcd_program_btn_threshold(mbhc, true);
+ break;
+ case WCD_MBHC_EN_PULLUP:
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_BTN_ISRC_CTL, 3);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 1);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_MICB_CTRL, 1);
+ /* Program Button threshold registers as per MICBIAS */
+ wcd_program_btn_threshold(mbhc, true);
+ break;
+ case WCD_MBHC_EN_NONE:
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_BTN_ISRC_CTL, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 1);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_MICB_CTRL, 0);
+ break;
+ default:
+ dev_err(mbhc->dev, "%s: Invalid parameter", __func__);
+ break;
+ }
+}
+
+int wcd_mbhc_event_notify(struct wcd_mbhc *mbhc, unsigned long event)
+{
+
+ struct snd_soc_component *component;
+ bool micbias2 = false;
+
+ if (!mbhc)
+ return 0;
+
+ component = mbhc->component;
+
+ if (mbhc->mbhc_cb->micbias_enable_status)
+ micbias2 = mbhc->mbhc_cb->micbias_enable_status(component, MIC_BIAS_2);
+
+ switch (event) {
+ /* MICBIAS usage change */
+ case WCD_EVENT_POST_DAPM_MICBIAS_2_ON:
+ mbhc->is_hs_recording = true;
+ break;
+ case WCD_EVENT_POST_MICBIAS_2_ON:
+ /* Disable current source if micbias2 enabled */
+ if (mbhc->mbhc_cb->mbhc_micbias_control) {
+ if (wcd_mbhc_read_field(mbhc, WCD_MBHC_FSM_EN))
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_BTN_ISRC_CTL, 0);
+ } else {
+ mbhc->is_hs_recording = true;
+ wcd_mbhc_curr_micbias_control(mbhc, WCD_MBHC_EN_MB);
+ }
+ break;
+ case WCD_EVENT_PRE_MICBIAS_2_OFF:
+ /*
+ * Before MICBIAS_2 is turned off, if FSM is enabled,
+ * make sure current source is enabled so as to detect
+ * button press/release events
+ */
+ if (mbhc->mbhc_cb->mbhc_micbias_control/* && !mbhc->micbias_enable*/) {
+ if (wcd_mbhc_read_field(mbhc, WCD_MBHC_FSM_EN))
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_BTN_ISRC_CTL, 3);
+ }
+ break;
+ /* MICBIAS usage change */
+ case WCD_EVENT_POST_DAPM_MICBIAS_2_OFF:
+ mbhc->is_hs_recording = false;
+ break;
+ case WCD_EVENT_POST_MICBIAS_2_OFF:
+ if (!mbhc->mbhc_cb->mbhc_micbias_control)
+ mbhc->is_hs_recording = false;
+
+ /* Enable PULL UP if PA's are enabled */
+ if ((test_bit(WCD_MBHC_EVENT_PA_HPHL, &mbhc->event_state)) ||
+ (test_bit(WCD_MBHC_EVENT_PA_HPHR, &mbhc->event_state)))
+ /* enable pullup and cs, disable mb */
+ wcd_mbhc_curr_micbias_control(mbhc, WCD_MBHC_EN_PULLUP);
+ else
+ /* enable current source and disable mb, pullup*/
+ wcd_mbhc_curr_micbias_control(mbhc, WCD_MBHC_EN_CS);
+
+ break;
+ case WCD_EVENT_POST_HPHL_PA_OFF:
+ clear_bit(WCD_MBHC_EVENT_PA_HPHL, &mbhc->event_state);
+
+ /* check if micbias is enabled */
+ if (micbias2)
+ /* Disable cs, pullup & enable micbias */
+ wcd_mbhc_curr_micbias_control(mbhc, WCD_MBHC_EN_MB);
+ else
+ /* Disable micbias, pullup & enable cs */
+ wcd_mbhc_curr_micbias_control(mbhc, WCD_MBHC_EN_CS);
+ break;
+ case WCD_EVENT_POST_HPHR_PA_OFF:
+ clear_bit(WCD_MBHC_EVENT_PA_HPHR, &mbhc->event_state);
+ /* check if micbias is enabled */
+ if (micbias2)
+ /* Disable cs, pullup & enable micbias */
+ wcd_mbhc_curr_micbias_control(mbhc, WCD_MBHC_EN_MB);
+ else
+ /* Disable micbias, pullup & enable cs */
+ wcd_mbhc_curr_micbias_control(mbhc, WCD_MBHC_EN_CS);
+ break;
+ case WCD_EVENT_PRE_HPHL_PA_ON:
+ set_bit(WCD_MBHC_EVENT_PA_HPHL, &mbhc->event_state);
+ /* check if micbias is enabled */
+ if (micbias2)
+ /* Disable cs, pullup & enable micbias */
+ wcd_mbhc_curr_micbias_control(mbhc, WCD_MBHC_EN_MB);
+ else
+ /* Disable micbias, enable pullup & cs */
+ wcd_mbhc_curr_micbias_control(mbhc, WCD_MBHC_EN_PULLUP);
+ break;
+ case WCD_EVENT_PRE_HPHR_PA_ON:
+ set_bit(WCD_MBHC_EVENT_PA_HPHR, &mbhc->event_state);
+ /* check if micbias is enabled */
+ if (micbias2)
+ /* Disable cs, pullup & enable micbias */
+ wcd_mbhc_curr_micbias_control(mbhc, WCD_MBHC_EN_MB);
+ else
+ /* Disable micbias, enable pullup & cs */
+ wcd_mbhc_curr_micbias_control(mbhc, WCD_MBHC_EN_PULLUP);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wcd_mbhc_event_notify);
+
+static int wcd_cancel_btn_work(struct wcd_mbhc *mbhc)
+{
+ return cancel_delayed_work_sync(&mbhc->mbhc_btn_dwork);
+}
+
+static void wcd_micbias_disable(struct wcd_mbhc *mbhc)
+{
+ struct snd_soc_component *component = mbhc->component;
+
+ if (mbhc->mbhc_cb->mbhc_micbias_control)
+ mbhc->mbhc_cb->mbhc_micbias_control(component, MIC_BIAS_2, MICB_DISABLE);
+
+ if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(component, MIC_BIAS_2, false);
+
+ if (mbhc->mbhc_cb->set_micbias_value) {
+ mbhc->mbhc_cb->set_micbias_value(component);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_MICB_CTRL, 0);
+ }
+}
+
+static void wcd_mbhc_report_plug_removal(struct wcd_mbhc *mbhc,
+ enum snd_jack_types jack_type)
+{
+ mbhc->hph_status &= ~jack_type;
+ /*
+ * cancel possibly scheduled btn work and
+ * report release if we reported button press
+ */
+ if (!wcd_cancel_btn_work(mbhc) && mbhc->buttons_pressed) {
+ snd_soc_jack_report(mbhc->jack, 0, mbhc->buttons_pressed);
+ mbhc->buttons_pressed &= ~WCD_MBHC_JACK_BUTTON_MASK;
+ }
+
+ wcd_micbias_disable(mbhc);
+ mbhc->hph_type = WCD_MBHC_HPH_NONE;
+ mbhc->zl = mbhc->zr = 0;
+ snd_soc_jack_report(mbhc->jack, mbhc->hph_status, WCD_MBHC_JACK_MASK);
+ mbhc->current_plug = MBHC_PLUG_TYPE_NONE;
+ mbhc->force_linein = false;
+}
+
+static void wcd_mbhc_compute_impedance(struct wcd_mbhc *mbhc)
+{
+
+ if (!mbhc->impedance_detect)
+ return;
+
+ if (mbhc->cfg->linein_th != 0) {
+ u8 fsm_en = wcd_mbhc_read_field(mbhc, WCD_MBHC_FSM_EN);
+ /* Set MUX_CTL to AUTO for Z-det */
+
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_MUX_CTL, MUX_CTL_AUTO);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 1);
+ mbhc->mbhc_cb->compute_impedance(mbhc->component, &mbhc->zl, &mbhc->zr);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, fsm_en);
+ }
+}
+
+static void wcd_mbhc_report_plug_insertion(struct wcd_mbhc *mbhc,
+ enum snd_jack_types jack_type)
+{
+ bool is_pa_on;
+ /*
+ * Report removal of current jack type.
+ * Headphone to headset shouldn't report headphone
+ * removal.
+ */
+ if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADSET &&
+ jack_type == SND_JACK_HEADPHONE)
+ mbhc->hph_status &= ~SND_JACK_HEADSET;
+
+ /* Report insertion */
+ switch (jack_type) {
+ case SND_JACK_HEADPHONE:
+ mbhc->current_plug = MBHC_PLUG_TYPE_HEADPHONE;
+ break;
+ case SND_JACK_HEADSET:
+ mbhc->current_plug = MBHC_PLUG_TYPE_HEADSET;
+ mbhc->jiffies_atreport = jiffies;
+ break;
+ case SND_JACK_LINEOUT:
+ mbhc->current_plug = MBHC_PLUG_TYPE_HIGH_HPH;
+ break;
+ default:
+ break;
+ }
+
+
+ is_pa_on = wcd_mbhc_read_field(mbhc, WCD_MBHC_HPH_PA_EN);
+
+ if (!is_pa_on) {
+ wcd_mbhc_compute_impedance(mbhc);
+ if ((mbhc->zl > mbhc->cfg->linein_th) &&
+ (mbhc->zr > mbhc->cfg->linein_th) &&
+ (jack_type == SND_JACK_HEADPHONE)) {
+ jack_type = SND_JACK_LINEOUT;
+ mbhc->force_linein = true;
+ mbhc->current_plug = MBHC_PLUG_TYPE_HIGH_HPH;
+ if (mbhc->hph_status) {
+ mbhc->hph_status &= ~(SND_JACK_HEADSET |
+ SND_JACK_LINEOUT);
+ snd_soc_jack_report(mbhc->jack, mbhc->hph_status,
+ WCD_MBHC_JACK_MASK);
+ }
+ }
+ }
+
+ /* Do not calculate impedance again for lineout
+ * as during playback pa is on and impedance values
+ * will not be correct resulting in lineout detected
+ * as headphone.
+ */
+ if (is_pa_on && mbhc->force_linein) {
+ jack_type = SND_JACK_LINEOUT;
+ mbhc->current_plug = MBHC_PLUG_TYPE_HIGH_HPH;
+ if (mbhc->hph_status) {
+ mbhc->hph_status &= ~(SND_JACK_HEADSET |
+ SND_JACK_LINEOUT);
+ snd_soc_jack_report(mbhc->jack, mbhc->hph_status,
+ WCD_MBHC_JACK_MASK);
+ }
+ }
+
+ mbhc->hph_status |= jack_type;
+
+ if (jack_type == SND_JACK_HEADPHONE && mbhc->mbhc_cb->mbhc_micb_ramp_control)
+ mbhc->mbhc_cb->mbhc_micb_ramp_control(mbhc->component, false);
+
+ snd_soc_jack_report(mbhc->jack, (mbhc->hph_status | SND_JACK_MECHANICAL),
+ WCD_MBHC_JACK_MASK);
+}
+
+static void wcd_mbhc_report_plug(struct wcd_mbhc *mbhc, int insertion,
+ enum snd_jack_types jack_type)
+{
+
+ WARN_ON(!mutex_is_locked(&mbhc->lock));
+
+ if (!insertion) /* Report removal */
+ wcd_mbhc_report_plug_removal(mbhc, jack_type);
+ else
+ wcd_mbhc_report_plug_insertion(mbhc, jack_type);
+
+}
+
+static void wcd_cancel_hs_detect_plug(struct wcd_mbhc *mbhc,
+ struct work_struct *work)
+{
+ mbhc->hs_detect_work_stop = true;
+ mutex_unlock(&mbhc->lock);
+ cancel_work_sync(work);
+ mutex_lock(&mbhc->lock);
+}
+
+static void wcd_mbhc_cancel_pending_work(struct wcd_mbhc *mbhc)
+{
+ /* cancel pending button press */
+ wcd_cancel_btn_work(mbhc);
+ /* cancel correct work function */
+ wcd_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+}
+
+static void wcd_mbhc_elec_hs_report_unplug(struct wcd_mbhc *mbhc)
+{
+ wcd_mbhc_cancel_pending_work(mbhc);
+ /* Report extension cable */
+ wcd_mbhc_report_plug(mbhc, 1, SND_JACK_LINEOUT);
+ /*
+ * Disable HPHL trigger and MIC Schmitt triggers.
+ * Setup for insertion detection.
+ */
+ disable_irq_nosync(mbhc->intr_ids->mbhc_hs_rem_intr);
+ wcd_mbhc_curr_micbias_control(mbhc, WCD_MBHC_EN_NONE);
+ /* Disable HW FSM */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ELECT_SCHMT_ISRC, 3);
+
+ /* Set the detection type appropriately */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ELECT_DETECTION_TYPE, 1);
+ enable_irq(mbhc->intr_ids->mbhc_hs_ins_intr);
+}
+
+static void wcd_mbhc_find_plug_and_report(struct wcd_mbhc *mbhc,
+ enum wcd_mbhc_plug_type plug_type)
+{
+ if (mbhc->current_plug == plug_type)
+ return;
+
+ mutex_lock(&mbhc->lock);
+
+ switch (plug_type) {
+ case MBHC_PLUG_TYPE_HEADPHONE:
+ wcd_mbhc_report_plug(mbhc, 1, SND_JACK_HEADPHONE);
+ break;
+ case MBHC_PLUG_TYPE_HEADSET:
+ wcd_mbhc_report_plug(mbhc, 1, SND_JACK_HEADSET);
+ break;
+ case MBHC_PLUG_TYPE_HIGH_HPH:
+ wcd_mbhc_report_plug(mbhc, 1, SND_JACK_LINEOUT);
+ break;
+ case MBHC_PLUG_TYPE_GND_MIC_SWAP:
+ if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE)
+ wcd_mbhc_report_plug(mbhc, 0, SND_JACK_HEADPHONE);
+ if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADSET)
+ wcd_mbhc_report_plug(mbhc, 0, SND_JACK_HEADSET);
+ break;
+ default:
+ WARN(1, "Unexpected current plug_type %d, plug_type %d\n",
+ mbhc->current_plug, plug_type);
+ break;
+ }
+ mutex_unlock(&mbhc->lock);
+}
+
+static void wcd_schedule_hs_detect_plug(struct wcd_mbhc *mbhc,
+ struct work_struct *work)
+{
+ WARN_ON(!mutex_is_locked(&mbhc->lock));
+ mbhc->hs_detect_work_stop = false;
+ schedule_work(work);
+}
+
+static void wcd_mbhc_adc_detect_plug_type(struct wcd_mbhc *mbhc)
+{
+ struct snd_soc_component *component = mbhc->component;
+
+ WARN_ON(!mutex_is_locked(&mbhc->lock));
+
+ if (mbhc->mbhc_cb->hph_pull_down_ctrl)
+ mbhc->mbhc_cb->hph_pull_down_ctrl(component, false);
+
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_DETECTION_DONE, 0);
+
+ if (mbhc->mbhc_cb->mbhc_micbias_control) {
+ mbhc->mbhc_cb->mbhc_micbias_control(component, MIC_BIAS_2,
+ MICB_ENABLE);
+ wcd_schedule_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+ }
+}
+
+static irqreturn_t wcd_mbhc_mech_plug_detect_irq(int irq, void *data)
+{
+ struct snd_soc_component *component;
+ enum snd_jack_types jack_type;
+ struct wcd_mbhc *mbhc = data;
+ bool detection_type;
+
+ component = mbhc->component;
+ mutex_lock(&mbhc->lock);
+
+ mbhc->in_swch_irq_handler = true;
+
+ wcd_mbhc_cancel_pending_work(mbhc);
+
+ detection_type = wcd_mbhc_read_field(mbhc, WCD_MBHC_MECH_DETECTION_TYPE);
+
+ /* Set the detection type appropriately */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_MECH_DETECTION_TYPE, !detection_type);
+
+ /* Enable micbias ramp */
+ if (mbhc->mbhc_cb->mbhc_micb_ramp_control)
+ mbhc->mbhc_cb->mbhc_micb_ramp_control(component, true);
+
+ if (detection_type) {
+ if (mbhc->current_plug != MBHC_PLUG_TYPE_NONE)
+ goto exit;
+ /* Make sure MASTER_BIAS_CTL is enabled */
+ mbhc->mbhc_cb->mbhc_bias(component, true);
+ mbhc->is_btn_press = false;
+ wcd_mbhc_adc_detect_plug_type(mbhc);
+ } else {
+ /* Disable HW FSM */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_BTN_ISRC_CTL, 0);
+ mbhc->extn_cable_hph_rem = false;
+
+ if (mbhc->current_plug == MBHC_PLUG_TYPE_NONE)
+ goto exit;
+
+ mbhc->is_btn_press = false;
+ switch (mbhc->current_plug) {
+ case MBHC_PLUG_TYPE_HEADPHONE:
+ jack_type = SND_JACK_HEADPHONE;
+ break;
+ case MBHC_PLUG_TYPE_HEADSET:
+ jack_type = SND_JACK_HEADSET;
+ break;
+ case MBHC_PLUG_TYPE_HIGH_HPH:
+ if (mbhc->mbhc_detection_logic == WCD_DETECTION_ADC)
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ELECT_ISRC_EN, 0);
+ jack_type = SND_JACK_LINEOUT;
+ break;
+ case MBHC_PLUG_TYPE_GND_MIC_SWAP:
+ dev_err(mbhc->dev, "Ground and Mic Swapped on plug\n");
+ goto exit;
+ default:
+ dev_err(mbhc->dev, "Invalid current plug: %d\n",
+ mbhc->current_plug);
+ goto exit;
+ }
+ disable_irq_nosync(mbhc->intr_ids->mbhc_hs_rem_intr);
+ disable_irq_nosync(mbhc->intr_ids->mbhc_hs_ins_intr);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ELECT_DETECTION_TYPE, 1);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+ wcd_mbhc_report_plug(mbhc, 0, jack_type);
+ }
+
+exit:
+ mbhc->in_swch_irq_handler = false;
+ mutex_unlock(&mbhc->lock);
+ return IRQ_HANDLED;
+}
+
+static int wcd_mbhc_get_button_mask(struct wcd_mbhc *mbhc)
+{
+ int mask = 0;
+ int btn;
+
+ btn = wcd_mbhc_read_field(mbhc, WCD_MBHC_BTN_RESULT);
+
+ switch (btn) {
+ case 0:
+ mask = SND_JACK_BTN_0;
+ break;
+ case 1:
+ mask = SND_JACK_BTN_1;
+ break;
+ case 2:
+ mask = SND_JACK_BTN_2;
+ break;
+ case 3:
+ mask = SND_JACK_BTN_3;
+ break;
+ case 4:
+ mask = SND_JACK_BTN_4;
+ break;
+ case 5:
+ mask = SND_JACK_BTN_5;
+ break;
+ default:
+ break;
+ }
+
+ return mask;
+}
+
+static void wcd_btn_long_press_fn(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct wcd_mbhc *mbhc = container_of(dwork, struct wcd_mbhc, mbhc_btn_dwork);
+
+ if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADSET)
+ snd_soc_jack_report(mbhc->jack, mbhc->buttons_pressed,
+ mbhc->buttons_pressed);
+}
+
+static irqreturn_t wcd_mbhc_btn_press_handler(int irq, void *data)
+{
+ struct wcd_mbhc *mbhc = data;
+ int mask;
+ unsigned long msec_val;
+
+ mutex_lock(&mbhc->lock);
+ wcd_cancel_btn_work(mbhc);
+ mbhc->is_btn_press = true;
+ msec_val = jiffies_to_msecs(jiffies - mbhc->jiffies_atreport);
+
+ /* Too short, ignore button press */
+ if (msec_val < MBHC_BUTTON_PRESS_THRESHOLD_MIN)
+ goto done;
+
+ /* If switch interrupt already kicked in, ignore button press */
+ if (mbhc->in_swch_irq_handler)
+ goto done;
+
+ /* Plug isn't headset, ignore button press */
+ if (mbhc->current_plug != MBHC_PLUG_TYPE_HEADSET)
+ goto done;
+
+ mask = wcd_mbhc_get_button_mask(mbhc);
+ mbhc->buttons_pressed |= mask;
+ if (schedule_delayed_work(&mbhc->mbhc_btn_dwork, msecs_to_jiffies(400)) == 0)
+ WARN(1, "Button pressed twice without release event\n");
+done:
+ mutex_unlock(&mbhc->lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_mbhc_btn_release_handler(int irq, void *data)
+{
+ struct wcd_mbhc *mbhc = data;
+ int ret;
+
+ mutex_lock(&mbhc->lock);
+ if (mbhc->is_btn_press)
+ mbhc->is_btn_press = false;
+ else /* fake btn press */
+ goto exit;
+
+ if (!(mbhc->buttons_pressed & WCD_MBHC_JACK_BUTTON_MASK))
+ goto exit;
+
+ ret = wcd_cancel_btn_work(mbhc);
+ if (ret == 0) { /* Reporting long button release event */
+ snd_soc_jack_report(mbhc->jack, 0, mbhc->buttons_pressed);
+ } else {
+ if (!mbhc->in_swch_irq_handler) {
+ /* Reporting btn press n Release */
+ snd_soc_jack_report(mbhc->jack, mbhc->buttons_pressed,
+ mbhc->buttons_pressed);
+ snd_soc_jack_report(mbhc->jack, 0, mbhc->buttons_pressed);
+ }
+ }
+ mbhc->buttons_pressed &= ~WCD_MBHC_JACK_BUTTON_MASK;
+exit:
+ mutex_unlock(&mbhc->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_mbhc_hph_ocp_irq(struct wcd_mbhc *mbhc, bool hphr)
+{
+
+ /* TODO Find a better way to report this to Userspace */
+ dev_err(mbhc->dev, "MBHC Over Current on %s detected\n",
+ hphr ? "HPHR" : "HPHL");
+
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_OCP_FSM_EN, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_OCP_FSM_EN, 1);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_mbhc_hphl_ocp_irq(int irq, void *data)
+{
+ return wcd_mbhc_hph_ocp_irq(data, false);
+}
+
+static irqreturn_t wcd_mbhc_hphr_ocp_irq(int irq, void *data)
+{
+ return wcd_mbhc_hph_ocp_irq(data, true);
+}
+
+static int wcd_mbhc_initialise(struct wcd_mbhc *mbhc)
+{
+ struct snd_soc_component *component = mbhc->component;
+
+ mutex_lock(&mbhc->lock);
+
+ /* enable HS detection */
+ if (mbhc->mbhc_cb->hph_pull_up_control_v2)
+ mbhc->mbhc_cb->hph_pull_up_control_v2(component,
+ HS_PULLUP_I_DEFAULT);
+ else if (mbhc->mbhc_cb->hph_pull_up_control)
+ mbhc->mbhc_cb->hph_pull_up_control(component, I_DEFAULT);
+ else
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_HS_L_DET_PULL_UP_CTRL, 3);
+
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_HPHL_PLUG_TYPE, mbhc->cfg->hphl_swh);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_GND_PLUG_TYPE, mbhc->cfg->gnd_swh);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_SW_HPH_LP_100K_TO_GND, 1);
+ if (mbhc->cfg->gnd_det_en && mbhc->mbhc_cb->mbhc_gnd_det_ctrl)
+ mbhc->mbhc_cb->mbhc_gnd_det_ctrl(component, true);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL, 1);
+
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_L_DET_EN, 1);
+
+ /* Insertion debounce set to 96ms */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_INSREM_DBNC, 6);
+
+ /* Button Debounce set to 16ms */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_BTN_DBNC, 2);
+
+ /* enable bias */
+ mbhc->mbhc_cb->mbhc_bias(component, true);
+ /* enable MBHC clock */
+ if (mbhc->mbhc_cb->clk_setup)
+ mbhc->mbhc_cb->clk_setup(component, true);
+
+ /* program HS_VREF value */
+ wcd_program_hs_vref(mbhc);
+
+ wcd_program_btn_threshold(mbhc, false);
+
+ mutex_unlock(&mbhc->lock);
+
+ return 0;
+}
+
+static int wcd_mbhc_get_micbias(struct wcd_mbhc *mbhc)
+{
+ int micbias = 0;
+
+ if (mbhc->mbhc_cb->get_micbias_val) {
+ mbhc->mbhc_cb->get_micbias_val(mbhc->component, &micbias);
+ } else {
+ u8 vout_ctl = 0;
+ /* Read MBHC Micbias (Mic Bias2) voltage */
+ vout_ctl = wcd_mbhc_read_field(mbhc, WCD_MBHC_MICB2_VOUT);
+ /* Formula for getting micbias from vout
+ * micbias = 1.0V + VOUT_CTL * 50mV
+ */
+ micbias = 1000 + (vout_ctl * 50);
+ }
+ return micbias;
+}
+
+static int wcd_get_voltage_from_adc(u8 val, int micbias)
+{
+ /* Formula for calculating voltage from ADC
+ * Voltage = ADC_RESULT*12.5mV*V_MICBIAS/1.8
+ */
+ return ((val * 125 * micbias)/(WCD_MBHC_ADC_MICBIAS_MV * 10));
+}
+
+static int wcd_measure_adc_continuous(struct wcd_mbhc *mbhc)
+{
+ u8 adc_result;
+ int output_mv;
+ int retry = 3;
+ u8 adc_en;
+
+ /* Pre-requisites for ADC continuous measurement */
+ /* Read legacy electircal detection and disable */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ELECT_SCHMT_ISRC, 0x00);
+ /* Set ADC to continuous measurement */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_MODE, 1);
+ /* Read ADC Enable bit to restore after adc measurement */
+ adc_en = wcd_mbhc_read_field(mbhc, WCD_MBHC_ADC_EN);
+ /* Disable ADC_ENABLE bit */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_EN, 0);
+ /* Disable MBHC FSM */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 0);
+ /* Set the MUX selection to IN2P */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_MUX_CTL, MUX_CTL_IN2P);
+ /* Enable MBHC FSM */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 1);
+ /* Enable ADC_ENABLE bit */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_EN, 1);
+
+ while (retry--) {
+ /* wait for 3 msec before reading ADC result */
+ usleep_range(3000, 3100);
+ adc_result = wcd_mbhc_read_field(mbhc, WCD_MBHC_ADC_RESULT);
+ }
+
+ /* Restore ADC Enable */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_EN, adc_en);
+ /* Get voltage from ADC result */
+ output_mv = wcd_get_voltage_from_adc(adc_result, wcd_mbhc_get_micbias(mbhc));
+
+ return output_mv;
+}
+
+static int wcd_measure_adc_once(struct wcd_mbhc *mbhc, int mux_ctl)
+{
+ struct device *dev = mbhc->dev;
+ u8 adc_timeout = 0;
+ u8 adc_complete = 0;
+ u8 adc_result;
+ int retry = 6;
+ int ret;
+ int output_mv = 0;
+ u8 adc_en;
+
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_MODE, 0);
+ /* Read ADC Enable bit to restore after adc measurement */
+ adc_en = wcd_mbhc_read_field(mbhc, WCD_MBHC_ADC_EN);
+ /* Trigger ADC one time measurement */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_EN, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 0);
+ /* Set the appropriate MUX selection */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_MUX_CTL, mux_ctl);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 1);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_EN, 1);
+
+ while (retry--) {
+ /* wait for 600usec to get adc results */
+ usleep_range(600, 610);
+
+ /* check for ADC Timeout */
+ adc_timeout = wcd_mbhc_read_field(mbhc, WCD_MBHC_ADC_TIMEOUT);
+ if (adc_timeout)
+ continue;
+
+ /* Read ADC complete bit */
+ adc_complete = wcd_mbhc_read_field(mbhc, WCD_MBHC_ADC_COMPLETE);
+ if (!adc_complete)
+ continue;
+
+ /* Read ADC result */
+ adc_result = wcd_mbhc_read_field(mbhc, WCD_MBHC_ADC_RESULT);
+
+ /* Get voltage from ADC result */
+ output_mv = wcd_get_voltage_from_adc(adc_result,
+ wcd_mbhc_get_micbias(mbhc));
+ break;
+ }
+
+ /* Restore ADC Enable */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_EN, adc_en);
+
+ if (retry <= 0) {
+ dev_err(dev, "%s: adc complete: %d, adc timeout: %d\n",
+ __func__, adc_complete, adc_timeout);
+ ret = -EINVAL;
+ } else {
+ ret = output_mv;
+ }
+
+ return ret;
+}
+
+/* To determine if cross connection occurred */
+static int wcd_check_cross_conn(struct wcd_mbhc *mbhc)
+{
+ u8 adc_mode, elect_ctl, adc_en, fsm_en;
+ int hphl_adc_res, hphr_adc_res;
+ bool is_cross_conn = false;
+
+ /* If PA is enabled, dont check for cross-connection */
+ if (wcd_mbhc_read_field(mbhc, WCD_MBHC_HPH_PA_EN))
+ return -EINVAL;
+
+ /* Read legacy electircal detection and disable */
+ elect_ctl = wcd_mbhc_read_field(mbhc, WCD_MBHC_ELECT_SCHMT_ISRC);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+
+ /* Read and set ADC to single measurement */
+ adc_mode = wcd_mbhc_read_field(mbhc, WCD_MBHC_ADC_MODE);
+ /* Read ADC Enable bit to restore after adc measurement */
+ adc_en = wcd_mbhc_read_field(mbhc, WCD_MBHC_ADC_EN);
+ /* Read FSM status */
+ fsm_en = wcd_mbhc_read_field(mbhc, WCD_MBHC_FSM_EN);
+
+ /* Get adc result for HPH L */
+ hphl_adc_res = wcd_measure_adc_once(mbhc, MUX_CTL_HPH_L);
+ if (hphl_adc_res < 0)
+ return hphl_adc_res;
+
+ /* Get adc result for HPH R in mV */
+ hphr_adc_res = wcd_measure_adc_once(mbhc, MUX_CTL_HPH_R);
+ if (hphr_adc_res < 0)
+ return hphr_adc_res;
+
+ if (hphl_adc_res > HPHL_CROSS_CONN_THRESHOLD ||
+ hphr_adc_res > HPHL_CROSS_CONN_THRESHOLD)
+ is_cross_conn = true;
+
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 0);
+ /* Set the MUX selection to Auto */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_MUX_CTL, MUX_CTL_AUTO);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, 1);
+ /* Restore ADC Enable */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_EN, adc_en);
+ /* Restore ADC mode */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_MODE, adc_mode);
+ /* Restore FSM state */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_FSM_EN, fsm_en);
+ /* Restore electrical detection */
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ELECT_SCHMT_ISRC, elect_ctl);
+
+ return is_cross_conn;
+}
+
+static int wcd_mbhc_adc_get_hs_thres(struct wcd_mbhc *mbhc)
+{
+ int hs_threshold, micbias_mv;
+
+ micbias_mv = wcd_mbhc_get_micbias(mbhc);
+ if (mbhc->cfg->hs_thr) {
+ if (mbhc->cfg->micb_mv == micbias_mv)
+ hs_threshold = mbhc->cfg->hs_thr;
+ else
+ hs_threshold = (mbhc->cfg->hs_thr *
+ micbias_mv) / mbhc->cfg->micb_mv;
+ } else {
+ hs_threshold = ((WCD_MBHC_ADC_HS_THRESHOLD_MV *
+ micbias_mv) / WCD_MBHC_ADC_MICBIAS_MV);
+ }
+ return hs_threshold;
+}
+
+static int wcd_mbhc_adc_get_hph_thres(struct wcd_mbhc *mbhc)
+{
+ int hph_threshold, micbias_mv;
+
+ micbias_mv = wcd_mbhc_get_micbias(mbhc);
+ if (mbhc->cfg->hph_thr) {
+ if (mbhc->cfg->micb_mv == micbias_mv)
+ hph_threshold = mbhc->cfg->hph_thr;
+ else
+ hph_threshold = (mbhc->cfg->hph_thr *
+ micbias_mv) / mbhc->cfg->micb_mv;
+ } else {
+ hph_threshold = ((WCD_MBHC_ADC_HPH_THRESHOLD_MV *
+ micbias_mv) / WCD_MBHC_ADC_MICBIAS_MV);
+ }
+ return hph_threshold;
+}
+
+static void wcd_mbhc_adc_update_fsm_source(struct wcd_mbhc *mbhc,
+ enum wcd_mbhc_plug_type plug_type)
+{
+ bool micbias2 = false;
+
+ switch (plug_type) {
+ case MBHC_PLUG_TYPE_HEADPHONE:
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_BTN_ISRC_CTL, 3);
+ break;
+ case MBHC_PLUG_TYPE_HEADSET:
+ if (mbhc->mbhc_cb->micbias_enable_status)
+ micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc->component,
+ MIC_BIAS_2);
+
+ if (!mbhc->is_hs_recording && !micbias2)
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_BTN_ISRC_CTL, 3);
+ break;
+ default:
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_BTN_ISRC_CTL, 0);
+ break;
+
+ }
+}
+
+static void wcd_mbhc_bcs_enable(struct wcd_mbhc *mbhc, int plug_type, bool enable)
+{
+ switch (plug_type) {
+ case MBHC_PLUG_TYPE_HEADSET:
+ case MBHC_PLUG_TYPE_HEADPHONE:
+ if (mbhc->mbhc_cb->bcs_enable)
+ mbhc->mbhc_cb->bcs_enable(mbhc->component, enable);
+ break;
+ default:
+ break;
+ }
+}
+
+static int wcd_mbhc_get_plug_from_adc(struct wcd_mbhc *mbhc, int adc_result)
+
+{
+ enum wcd_mbhc_plug_type plug_type;
+ u32 hph_thr, hs_thr;
+
+ hs_thr = wcd_mbhc_adc_get_hs_thres(mbhc);
+ hph_thr = wcd_mbhc_adc_get_hph_thres(mbhc);
+
+ if (adc_result < hph_thr)
+ plug_type = MBHC_PLUG_TYPE_HEADPHONE;
+ else if (adc_result > hs_thr)
+ plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
+ else
+ plug_type = MBHC_PLUG_TYPE_HEADSET;
+
+ return plug_type;
+}
+
+static void wcd_correct_swch_plug(struct work_struct *work)
+{
+ struct wcd_mbhc *mbhc;
+ struct snd_soc_component *component;
+ enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_INVALID;
+ unsigned long timeout;
+ int pt_gnd_mic_swap_cnt = 0;
+ int output_mv, cross_conn, hs_threshold, try = 0;
+ bool is_pa_on;
+
+ mbhc = container_of(work, struct wcd_mbhc, correct_plug_swch);
+ component = mbhc->component;
+
+ hs_threshold = wcd_mbhc_adc_get_hs_thres(mbhc);
+
+ /* Mask ADC COMPLETE interrupt */
+ disable_irq_nosync(mbhc->intr_ids->mbhc_hs_ins_intr);
+
+ /* Check for cross connection */
+ do {
+ cross_conn = wcd_check_cross_conn(mbhc);
+ try++;
+ } while (try < GND_MIC_SWAP_THRESHOLD);
+
+ if (cross_conn > 0) {
+ plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+ dev_err(mbhc->dev, "cross connection found, Plug type %d\n",
+ plug_type);
+ goto correct_plug_type;
+ }
+
+ /* Find plug type */
+ output_mv = wcd_measure_adc_continuous(mbhc);
+ plug_type = wcd_mbhc_get_plug_from_adc(mbhc, output_mv);
+
+ /*
+ * Report plug type if it is either headset or headphone
+ * else start the 3 sec loop
+ */
+ switch (plug_type) {
+ case MBHC_PLUG_TYPE_HEADPHONE:
+ wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+ break;
+ case MBHC_PLUG_TYPE_HEADSET:
+ wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_MODE, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_EN, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_DETECTION_DONE, 1);
+ break;
+ default:
+ break;
+ }
+
+correct_plug_type:
+
+ /* Disable BCS slow insertion detection */
+ wcd_mbhc_bcs_enable(mbhc, plug_type, false);
+
+ timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
+
+ while (!time_after(jiffies, timeout)) {
+ if (mbhc->hs_detect_work_stop) {
+ wcd_micbias_disable(mbhc);
+ goto exit;
+ }
+
+ msleep(180);
+ /*
+ * Use ADC single mode to minimize the chance of missing out
+ * btn press/release for HEADSET type during correct work.
+ */
+ output_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
+ plug_type = wcd_mbhc_get_plug_from_adc(mbhc, output_mv);
+ is_pa_on = wcd_mbhc_read_field(mbhc, WCD_MBHC_HPH_PA_EN);
+
+ if ((output_mv <= hs_threshold) && !is_pa_on) {
+ /* Check for cross connection*/
+ cross_conn = wcd_check_cross_conn(mbhc);
+ if (cross_conn > 0) { /* cross-connection */
+ pt_gnd_mic_swap_cnt++;
+ if (pt_gnd_mic_swap_cnt < GND_MIC_SWAP_THRESHOLD)
+ continue;
+ else
+ plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+ } else if (!cross_conn) { /* no cross connection */
+ pt_gnd_mic_swap_cnt = 0;
+ plug_type = wcd_mbhc_get_plug_from_adc(mbhc, output_mv);
+ continue;
+ } else if (cross_conn < 0) /* Error */
+ continue;
+
+ if (pt_gnd_mic_swap_cnt == GND_MIC_SWAP_THRESHOLD) {
+ /* US_EU gpio present, flip switch */
+ if (mbhc->cfg->swap_gnd_mic) {
+ if (mbhc->cfg->swap_gnd_mic(component, true))
+ continue;
+ }
+ }
+ }
+
+ if (output_mv > hs_threshold) /* cable is extension cable */
+ plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
+ }
+
+ wcd_mbhc_bcs_enable(mbhc, plug_type, true);
+
+ if (plug_type == MBHC_PLUG_TYPE_HIGH_HPH)
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ELECT_ISRC_EN, 1);
+
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_MODE, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_EN, 0);
+ wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+
+ /*
+ * Set DETECTION_DONE bit for HEADSET
+ * so that btn press/release interrupt can be generated.
+ * For other plug type, clear the bit.
+ */
+ if (plug_type == MBHC_PLUG_TYPE_HEADSET)
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_DETECTION_DONE, 1);
+ else
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_DETECTION_DONE, 0);
+
+ if (mbhc->mbhc_cb->mbhc_micbias_control)
+ wcd_mbhc_adc_update_fsm_source(mbhc, plug_type);
+
+exit:
+ if (mbhc->mbhc_cb->mbhc_micbias_control/* && !mbhc->micbias_enable*/)
+ mbhc->mbhc_cb->mbhc_micbias_control(component, MIC_BIAS_2, MICB_DISABLE);
+
+ /*
+ * If plug type is corrected from special headset to headphone,
+ * clear the micbias enable flag, set micbias back to 1.8V and
+ * disable micbias.
+ */
+ if (plug_type == MBHC_PLUG_TYPE_HEADPHONE) {
+ wcd_micbias_disable(mbhc);
+ /*
+ * Enable ADC COMPLETE interrupt for HEADPHONE.
+ * Btn release may happen after the correct work, ADC COMPLETE
+ * interrupt needs to be captured to correct plug type.
+ */
+ enable_irq(mbhc->intr_ids->mbhc_hs_ins_intr);
+ }
+
+ if (mbhc->mbhc_cb->hph_pull_down_ctrl)
+ mbhc->mbhc_cb->hph_pull_down_ctrl(component, true);
+}
+
+static irqreturn_t wcd_mbhc_adc_hs_rem_irq(int irq, void *data)
+{
+ struct wcd_mbhc *mbhc = data;
+ unsigned long timeout;
+ int adc_threshold, output_mv, retry = 0;
+ bool hphpa_on = false;
+
+ mutex_lock(&mbhc->lock);
+ timeout = jiffies + msecs_to_jiffies(WCD_FAKE_REMOVAL_MIN_PERIOD_MS);
+ adc_threshold = wcd_mbhc_adc_get_hs_thres(mbhc);
+
+ do {
+ retry++;
+ /*
+ * read output_mv every 10ms to look for
+ * any change in IN2_P
+ */
+ usleep_range(10000, 10100);
+ output_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
+
+ /* Check for fake removal */
+ if ((output_mv <= adc_threshold) && retry > FAKE_REM_RETRY_ATTEMPTS)
+ goto exit;
+ } while (!time_after(jiffies, timeout));
+
+ /*
+ * ADC COMPLETE and ELEC_REM interrupts are both enabled for
+ * HEADPHONE, need to reject the ADC COMPLETE interrupt which
+ * follows ELEC_REM one when HEADPHONE is removed.
+ */
+ if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE)
+ mbhc->extn_cable_hph_rem = true;
+
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_DETECTION_DONE, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_MODE, 0);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_ADC_EN, 0);
+ wcd_mbhc_elec_hs_report_unplug(mbhc);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_BTN_ISRC_CTL, 0);
+
+ if (hphpa_on) {
+ hphpa_on = false;
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_HPH_PA_EN, 3);
+ }
+exit:
+ mutex_unlock(&mbhc->lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_mbhc_adc_hs_ins_irq(int irq, void *data)
+{
+ struct wcd_mbhc *mbhc = data;
+ u8 clamp_state = 0;
+ u8 clamp_retry = WCD_MBHC_FAKE_INS_RETRY;
+
+ /*
+ * ADC COMPLETE and ELEC_REM interrupts are both enabled for HEADPHONE,
+ * need to reject the ADC COMPLETE interrupt which follows ELEC_REM one
+ * when HEADPHONE is removed.
+ */
+ if (mbhc->extn_cable_hph_rem == true) {
+ mbhc->extn_cable_hph_rem = false;
+ return IRQ_HANDLED;
+ }
+
+ do {
+ clamp_state = wcd_mbhc_read_field(mbhc, WCD_MBHC_IN2P_CLAMP_STATE);
+ if (clamp_state)
+ return IRQ_HANDLED;
+ /*
+ * check clamp for 120ms but at 30ms chunks to leave
+ * room for other interrupts to be processed
+ */
+ usleep_range(30000, 30100);
+ } while (--clamp_retry);
+
+ /*
+ * If current plug is headphone then there is no chance to
+ * get ADC complete interrupt, so connected cable should be
+ * headset not headphone.
+ */
+ if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
+ disable_irq_nosync(mbhc->intr_ids->mbhc_hs_ins_intr);
+ wcd_mbhc_write_field(mbhc, WCD_MBHC_DETECTION_DONE, 1);
+ wcd_mbhc_find_plug_and_report(mbhc, MBHC_PLUG_TYPE_HEADSET);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_HANDLED;
+}
+
+int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc, uint32_t *zl, uint32_t *zr)
+{
+ *zl = mbhc->zl;
+ *zr = mbhc->zr;
+
+ if (*zl && *zr)
+ return 0;
+ else
+ return -EINVAL;
+}
+EXPORT_SYMBOL(wcd_mbhc_get_impedance);
+
+void wcd_mbhc_set_hph_type(struct wcd_mbhc *mbhc, int hph_type)
+{
+ mbhc->hph_type = hph_type;
+}
+EXPORT_SYMBOL(wcd_mbhc_set_hph_type);
+
+int wcd_mbhc_get_hph_type(struct wcd_mbhc *mbhc)
+{
+ return mbhc->hph_type;
+}
+EXPORT_SYMBOL(wcd_mbhc_get_hph_type);
+
+int wcd_mbhc_start(struct wcd_mbhc *mbhc, struct wcd_mbhc_config *cfg,
+ struct snd_soc_jack *jack)
+{
+ if (!mbhc || !cfg || !jack)
+ return -EINVAL;
+
+ mbhc->cfg = cfg;
+ mbhc->jack = jack;
+
+ return wcd_mbhc_initialise(mbhc);
+}
+EXPORT_SYMBOL(wcd_mbhc_start);
+
+void wcd_mbhc_stop(struct wcd_mbhc *mbhc)
+{
+ mbhc->current_plug = MBHC_PLUG_TYPE_NONE;
+ mbhc->hph_status = 0;
+ disable_irq_nosync(mbhc->intr_ids->hph_left_ocp);
+ disable_irq_nosync(mbhc->intr_ids->hph_right_ocp);
+}
+EXPORT_SYMBOL(wcd_mbhc_stop);
+
+int wcd_dt_parse_mbhc_data(struct device *dev, struct wcd_mbhc_config *cfg)
+{
+ struct device_node *np = dev->of_node;
+ int ret, i, microvolt;
+
+ if (of_property_read_bool(np, "qcom,hphl-jack-type-normally-closed"))
+ cfg->hphl_swh = false;
+ else
+ cfg->hphl_swh = true;
+
+ if (of_property_read_bool(np, "qcom,ground-jack-type-normally-closed"))
+ cfg->gnd_swh = false;
+ else
+ cfg->gnd_swh = true;
+
+ ret = of_property_read_u32(np, "qcom,mbhc-headset-vthreshold-microvolt",
+ &microvolt);
+ if (ret)
+ dev_dbg(dev, "missing qcom,mbhc-hs-mic-max-vthreshold--microvolt in dt node\n");
+ else
+ cfg->hs_thr = microvolt/1000;
+
+ ret = of_property_read_u32(np, "qcom,mbhc-headphone-vthreshold-microvolt",
+ &microvolt);
+ if (ret)
+ dev_dbg(dev, "missing qcom,mbhc-hs-mic-min-vthreshold-microvolt entry\n");
+ else
+ cfg->hph_thr = microvolt/1000;
+
+ ret = of_property_read_u32_array(np,
+ "qcom,mbhc-buttons-vthreshold-microvolt",
+ &cfg->btn_high[0],
+ WCD_MBHC_DEF_BUTTONS);
+ if (ret)
+ dev_err(dev, "missing qcom,mbhc-buttons-vthreshold-microvolt entry\n");
+
+ for (i = 0; i < WCD_MBHC_DEF_BUTTONS; i++) {
+ if (ret) /* default voltage */
+ cfg->btn_high[i] = 500000;
+ else
+ /* Micro to Milli Volts */
+ cfg->btn_high[i] = cfg->btn_high[i]/1000;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wcd_dt_parse_mbhc_data);
+
+struct wcd_mbhc *wcd_mbhc_init(struct snd_soc_component *component,
+ const struct wcd_mbhc_cb *mbhc_cb,
+ const struct wcd_mbhc_intr *intr_ids,
+ struct wcd_mbhc_field *fields,
+ bool impedance_det_en)
+{
+ struct device *dev = component->dev;
+ struct wcd_mbhc *mbhc;
+ int ret;
+
+ if (!intr_ids || !fields || !mbhc_cb || !mbhc_cb->mbhc_bias || !mbhc_cb->set_btn_thr) {
+ dev_err(dev, "%s: Insufficient mbhc configuration\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mbhc = devm_kzalloc(dev, sizeof(*mbhc), GFP_KERNEL);
+ if (!mbhc)
+ return ERR_PTR(-ENOMEM);
+
+ mbhc->component = component;
+ mbhc->dev = dev;
+ mbhc->intr_ids = intr_ids;
+ mbhc->mbhc_cb = mbhc_cb;
+ mbhc->fields = fields;
+ mbhc->mbhc_detection_logic = WCD_DETECTION_ADC;
+
+ if (mbhc_cb->compute_impedance)
+ mbhc->impedance_detect = impedance_det_en;
+
+ INIT_DELAYED_WORK(&mbhc->mbhc_btn_dwork, wcd_btn_long_press_fn);
+
+ mutex_init(&mbhc->lock);
+
+ INIT_WORK(&mbhc->correct_plug_swch, wcd_correct_swch_plug);
+
+ ret = devm_request_threaded_irq(dev, mbhc->intr_ids->mbhc_sw_intr, NULL,
+ wcd_mbhc_mech_plug_detect_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "mbhc sw intr", mbhc);
+ if (ret)
+ goto err;
+
+ ret = devm_request_threaded_irq(dev, mbhc->intr_ids->mbhc_btn_press_intr, NULL,
+ wcd_mbhc_btn_press_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "Button Press detect", mbhc);
+ if (ret)
+ goto err;
+
+ ret = devm_request_threaded_irq(dev, mbhc->intr_ids->mbhc_btn_release_intr, NULL,
+ wcd_mbhc_btn_release_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "Button Release detect", mbhc);
+ if (ret)
+ goto err;
+
+ ret = devm_request_threaded_irq(dev, mbhc->intr_ids->mbhc_hs_ins_intr, NULL,
+ wcd_mbhc_adc_hs_ins_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "Elect Insert", mbhc);
+ if (ret)
+ goto err;
+
+ disable_irq_nosync(mbhc->intr_ids->mbhc_hs_ins_intr);
+
+ ret = devm_request_threaded_irq(dev, mbhc->intr_ids->mbhc_hs_rem_intr, NULL,
+ wcd_mbhc_adc_hs_rem_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "Elect Remove", mbhc);
+ if (ret)
+ goto err;
+
+ disable_irq_nosync(mbhc->intr_ids->mbhc_hs_rem_intr);
+
+ ret = devm_request_threaded_irq(dev, mbhc->intr_ids->hph_left_ocp, NULL,
+ wcd_mbhc_hphl_ocp_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "HPH_L OCP detect", mbhc);
+ if (ret)
+ goto err;
+
+ ret = devm_request_threaded_irq(dev, mbhc->intr_ids->hph_right_ocp, NULL,
+ wcd_mbhc_hphr_ocp_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "HPH_R OCP detect", mbhc);
+ if (ret)
+ goto err;
+
+ return mbhc;
+err:
+ dev_err(dev, "Failed to request mbhc interrupts %d\n", ret);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(wcd_mbhc_init);
+
+void wcd_mbhc_deinit(struct wcd_mbhc *mbhc)
+{
+ mutex_lock(&mbhc->lock);
+ wcd_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+ mutex_unlock(&mbhc->lock);
+}
+EXPORT_SYMBOL(wcd_mbhc_deinit);
+
+static int __init mbhc_init(void)
+{
+ return 0;
+}
+
+static void __exit mbhc_exit(void)
+{
+}
+
+module_init(mbhc_init);
+module_exit(mbhc_exit);
+
+MODULE_DESCRIPTION("wcd MBHC v2 module");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wcd-mbhc-v2.h b/sound/soc/codecs/wcd-mbhc-v2.h
new file mode 100644
index 000000000000..006118f3e81f
--- /dev/null
+++ b/sound/soc/codecs/wcd-mbhc-v2.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __WCD_MBHC_V2_H__
+#define __WCD_MBHC_V2_H__
+
+#include <sound/jack.h>
+
+#define WCD_MBHC_FIELD(id, rreg, rmask) \
+ [id] = { .reg = rreg, .mask = rmask }
+
+enum wcd_mbhc_field_function {
+ WCD_MBHC_L_DET_EN,
+ WCD_MBHC_GND_DET_EN,
+ WCD_MBHC_MECH_DETECTION_TYPE,
+ WCD_MBHC_MIC_CLAMP_CTL,
+ WCD_MBHC_ELECT_DETECTION_TYPE,
+ WCD_MBHC_HS_L_DET_PULL_UP_CTRL,
+ WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL,
+ WCD_MBHC_HPHL_PLUG_TYPE,
+ WCD_MBHC_GND_PLUG_TYPE,
+ WCD_MBHC_SW_HPH_LP_100K_TO_GND,
+ WCD_MBHC_ELECT_SCHMT_ISRC,
+ WCD_MBHC_FSM_EN,
+ WCD_MBHC_INSREM_DBNC,
+ WCD_MBHC_BTN_DBNC,
+ WCD_MBHC_HS_VREF,
+ WCD_MBHC_HS_COMP_RESULT,
+ WCD_MBHC_IN2P_CLAMP_STATE,
+ WCD_MBHC_MIC_SCHMT_RESULT,
+ WCD_MBHC_HPHL_SCHMT_RESULT,
+ WCD_MBHC_HPHR_SCHMT_RESULT,
+ WCD_MBHC_OCP_FSM_EN,
+ WCD_MBHC_BTN_RESULT,
+ WCD_MBHC_BTN_ISRC_CTL,
+ WCD_MBHC_ELECT_RESULT,
+ WCD_MBHC_MICB_CTRL, /* Pull-up and micb control */
+ WCD_MBHC_HPH_CNP_WG_TIME,
+ WCD_MBHC_HPHR_PA_EN,
+ WCD_MBHC_HPHL_PA_EN,
+ WCD_MBHC_HPH_PA_EN,
+ WCD_MBHC_SWCH_LEVEL_REMOVE,
+ WCD_MBHC_PULLDOWN_CTRL,
+ WCD_MBHC_ANC_DET_EN,
+ WCD_MBHC_FSM_STATUS,
+ WCD_MBHC_MUX_CTL,
+ WCD_MBHC_MOISTURE_STATUS,
+ WCD_MBHC_HPHR_GND,
+ WCD_MBHC_HPHL_GND,
+ WCD_MBHC_HPHL_OCP_DET_EN,
+ WCD_MBHC_HPHR_OCP_DET_EN,
+ WCD_MBHC_HPHL_OCP_STATUS,
+ WCD_MBHC_HPHR_OCP_STATUS,
+ WCD_MBHC_ADC_EN,
+ WCD_MBHC_ADC_COMPLETE,
+ WCD_MBHC_ADC_TIMEOUT,
+ WCD_MBHC_ADC_RESULT,
+ WCD_MBHC_MICB2_VOUT,
+ WCD_MBHC_ADC_MODE,
+ WCD_MBHC_DETECTION_DONE,
+ WCD_MBHC_ELECT_ISRC_EN,
+ WCD_MBHC_REG_FUNC_MAX,
+};
+
+#define WCD_MBHC_DEF_BUTTONS 8
+#define WCD_MBHC_KEYCODE_NUM 8
+#define WCD_MBHC_USLEEP_RANGE_MARGIN_US 100
+#define WCD_MBHC_THR_HS_MICB_MV 2700
+#define WCD_MONO_HS_MIN_THR 2
+
+enum wcd_mbhc_detect_logic {
+ WCD_DETECTION_LEGACY,
+ WCD_DETECTION_ADC,
+};
+
+enum wcd_mbhc_cs_mb_en_flag {
+ WCD_MBHC_EN_CS = 0,
+ WCD_MBHC_EN_MB,
+ WCD_MBHC_EN_PULLUP,
+ WCD_MBHC_EN_NONE,
+};
+
+enum {
+ WCD_MBHC_ELEC_HS_INS,
+ WCD_MBHC_ELEC_HS_REM,
+};
+
+enum wcd_mbhc_plug_type {
+ MBHC_PLUG_TYPE_INVALID = -1,
+ MBHC_PLUG_TYPE_NONE,
+ MBHC_PLUG_TYPE_HEADSET,
+ MBHC_PLUG_TYPE_HEADPHONE,
+ MBHC_PLUG_TYPE_HIGH_HPH,
+ MBHC_PLUG_TYPE_GND_MIC_SWAP,
+};
+
+enum pa_dac_ack_flags {
+ WCD_MBHC_HPHL_PA_OFF_ACK = 0,
+ WCD_MBHC_HPHR_PA_OFF_ACK,
+};
+
+enum wcd_mbhc_btn_det_mem {
+ WCD_MBHC_BTN_DET_V_BTN_LOW,
+ WCD_MBHC_BTN_DET_V_BTN_HIGH
+};
+
+enum {
+ MIC_BIAS_1 = 1,
+ MIC_BIAS_2,
+ MIC_BIAS_3,
+ MIC_BIAS_4
+};
+
+enum {
+ MICB_PULLUP_ENABLE,
+ MICB_PULLUP_DISABLE,
+ MICB_ENABLE,
+ MICB_DISABLE,
+};
+
+enum wcd_notify_event {
+ WCD_EVENT_INVALID,
+ /* events for micbias ON and OFF */
+ WCD_EVENT_PRE_MICBIAS_2_OFF,
+ WCD_EVENT_POST_MICBIAS_2_OFF,
+ WCD_EVENT_PRE_MICBIAS_2_ON,
+ WCD_EVENT_POST_MICBIAS_2_ON,
+ WCD_EVENT_PRE_DAPM_MICBIAS_2_OFF,
+ WCD_EVENT_POST_DAPM_MICBIAS_2_OFF,
+ WCD_EVENT_PRE_DAPM_MICBIAS_2_ON,
+ WCD_EVENT_POST_DAPM_MICBIAS_2_ON,
+ /* events for PA ON and OFF */
+ WCD_EVENT_PRE_HPHL_PA_ON,
+ WCD_EVENT_POST_HPHL_PA_OFF,
+ WCD_EVENT_PRE_HPHR_PA_ON,
+ WCD_EVENT_POST_HPHR_PA_OFF,
+ WCD_EVENT_PRE_HPHL_PA_OFF,
+ WCD_EVENT_PRE_HPHR_PA_OFF,
+ WCD_EVENT_OCP_OFF,
+ WCD_EVENT_OCP_ON,
+ WCD_EVENT_LAST,
+};
+
+enum wcd_mbhc_event_state {
+ WCD_MBHC_EVENT_PA_HPHL,
+ WCD_MBHC_EVENT_PA_HPHR,
+};
+
+enum wcd_mbhc_hph_type {
+ WCD_MBHC_HPH_NONE = 0,
+ WCD_MBHC_HPH_MONO,
+ WCD_MBHC_HPH_STEREO,
+};
+
+/*
+ * These enum definitions are directly mapped to the register
+ * definitions
+ */
+
+enum mbhc_hs_pullup_iref {
+ I_DEFAULT = -1,
+ I_OFF = 0,
+ I_1P0_UA,
+ I_2P0_UA,
+ I_3P0_UA,
+};
+
+enum mbhc_hs_pullup_iref_v2 {
+ HS_PULLUP_I_DEFAULT = -1,
+ HS_PULLUP_I_3P0_UA = 0,
+ HS_PULLUP_I_2P25_UA,
+ HS_PULLUP_I_1P5_UA,
+ HS_PULLUP_I_0P75_UA,
+ HS_PULLUP_I_1P125_UA = 0x05,
+ HS_PULLUP_I_0P375_UA = 0x07,
+ HS_PULLUP_I_2P0_UA,
+ HS_PULLUP_I_1P0_UA = 0x0A,
+ HS_PULLUP_I_0P5_UA,
+ HS_PULLUP_I_0P25_UA = 0x0F,
+ HS_PULLUP_I_0P125_UA = 0x17,
+ HS_PULLUP_I_OFF,
+};
+
+enum mbhc_moisture_rref {
+ R_OFF,
+ R_24_KOHM,
+ R_84_KOHM,
+ R_184_KOHM,
+};
+
+struct wcd_mbhc_config {
+ int btn_high[WCD_MBHC_DEF_BUTTONS];
+ int btn_low[WCD_MBHC_DEF_BUTTONS];
+ int v_hs_max;
+ int num_btn;
+ bool mono_stero_detection;
+ bool (*swap_gnd_mic)(struct snd_soc_component *component, bool active);
+ bool hs_ext_micbias;
+ bool gnd_det_en;
+ uint32_t linein_th;
+ bool moisture_en;
+ int mbhc_micbias;
+ int anc_micbias;
+ bool moisture_duty_cycle_en;
+ bool hphl_swh; /*track HPHL switch NC / NO */
+ bool gnd_swh; /*track GND switch NC / NO */
+ u32 hs_thr;
+ u32 hph_thr;
+ u32 micb_mv;
+ u32 moist_vref;
+ u32 moist_iref;
+ u32 moist_rref;
+};
+
+struct wcd_mbhc_intr {
+ int mbhc_sw_intr;
+ int mbhc_btn_press_intr;
+ int mbhc_btn_release_intr;
+ int mbhc_hs_ins_intr;
+ int mbhc_hs_rem_intr;
+ int hph_left_ocp;
+ int hph_right_ocp;
+};
+
+struct wcd_mbhc_field {
+ u16 reg;
+ u8 mask;
+};
+
+struct wcd_mbhc;
+
+struct wcd_mbhc_cb {
+ void (*update_cross_conn_thr)(struct snd_soc_component *component);
+ void (*get_micbias_val)(struct snd_soc_component *component, int *mb);
+ void (*bcs_enable)(struct snd_soc_component *component, bool bcs_enable);
+ void (*compute_impedance)(struct snd_soc_component *component,
+ uint32_t *zl, uint32_t *zr);
+ void (*set_micbias_value)(struct snd_soc_component *component);
+ void (*set_auto_zeroing)(struct snd_soc_component *component,
+ bool enable);
+ void (*clk_setup)(struct snd_soc_component *component, bool enable);
+ bool (*micbias_enable_status)(struct snd_soc_component *component, int micb_num);
+ void (*mbhc_bias)(struct snd_soc_component *component, bool enable);
+ void (*set_btn_thr)(struct snd_soc_component *component,
+ int *btn_low, int *btn_high,
+ int num_btn, bool is_micbias);
+ void (*hph_pull_up_control)(struct snd_soc_component *component,
+ enum mbhc_hs_pullup_iref);
+ int (*mbhc_micbias_control)(struct snd_soc_component *component,
+ int micb_num, int req);
+ void (*mbhc_micb_ramp_control)(struct snd_soc_component *component,
+ bool enable);
+ bool (*extn_use_mb)(struct snd_soc_component *component);
+ int (*mbhc_micb_ctrl_thr_mic)(struct snd_soc_component *component,
+ int micb_num, bool req_en);
+ void (*mbhc_gnd_det_ctrl)(struct snd_soc_component *component,
+ bool enable);
+ void (*hph_pull_down_ctrl)(struct snd_soc_component *component,
+ bool enable);
+ void (*mbhc_moisture_config)(struct snd_soc_component *component);
+ void (*update_anc_state)(struct snd_soc_component *component,
+ bool enable, int anc_num);
+ void (*hph_pull_up_control_v2)(struct snd_soc_component *component,
+ int pull_up_cur);
+ bool (*mbhc_get_moisture_status)(struct snd_soc_component *component);
+ void (*mbhc_moisture_polling_ctrl)(struct snd_soc_component *component, bool enable);
+ void (*mbhc_moisture_detect_en)(struct snd_soc_component *component, bool enable);
+};
+
+#if IS_ENABLED(CONFIG_SND_SOC_WCD_MBHC)
+int wcd_dt_parse_mbhc_data(struct device *dev, struct wcd_mbhc_config *cfg);
+int wcd_mbhc_start(struct wcd_mbhc *mbhc, struct wcd_mbhc_config *mbhc_cfg,
+ struct snd_soc_jack *jack);
+void wcd_mbhc_stop(struct wcd_mbhc *mbhc);
+void wcd_mbhc_set_hph_type(struct wcd_mbhc *mbhc, int hph_type);
+int wcd_mbhc_get_hph_type(struct wcd_mbhc *mbhc);
+struct wcd_mbhc *wcd_mbhc_init(struct snd_soc_component *component,
+ const struct wcd_mbhc_cb *mbhc_cb,
+ const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
+ struct wcd_mbhc_field *fields,
+ bool impedance_det_en);
+int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
+ uint32_t *zr);
+void wcd_mbhc_deinit(struct wcd_mbhc *mbhc);
+int wcd_mbhc_event_notify(struct wcd_mbhc *mbhc, unsigned long event);
+
+#else
+static inline int wcd_dt_parse_mbhc_data(struct device *dev,
+ struct wcd_mbhc_config *cfg)
+{
+ return -ENOTSUPP;
+}
+
+static inline void wcd_mbhc_stop(struct wcd_mbhc *mbhc)
+{
+}
+
+static inline struct wcd_mbhc *wcd_mbhc_init(struct snd_soc_component *component,
+ const struct wcd_mbhc_cb *mbhc_cb,
+ const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
+ struct wcd_mbhc_field *fields,
+ bool impedance_det_en)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline void wcd_mbhc_set_hph_type(struct wcd_mbhc *mbhc, int hph_type)
+{
+}
+
+static inline int wcd_mbhc_get_hph_type(struct wcd_mbhc *mbhc)
+{
+ return -ENOTSUPP;
+}
+
+static inline int wcd_mbhc_event_notify(struct wcd_mbhc *mbhc, unsigned long event)
+{
+ return -ENOTSUPP;
+}
+
+static inline int wcd_mbhc_start(struct wcd_mbhc *mbhc,
+ struct wcd_mbhc_config *mbhc_cfg,
+ struct snd_soc_jack *jack)
+{
+ return 0;
+}
+
+static inline int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc,
+ uint32_t *zl,
+ uint32_t *zr)
+{
+ *zl = 0;
+ *zr = 0;
+ return -EINVAL;
+}
+static inline void wcd_mbhc_deinit(struct wcd_mbhc *mbhc)
+{
+}
+#endif
+
+#endif /* __WCD_MBHC_V2_H__ */
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
index 046874ef490e..c496b359f2f4 100644
--- a/sound/soc/codecs/wcd934x.c
+++ b/sound/soc/codecs/wcd934x.c
@@ -21,6 +21,7 @@
#include <sound/soc-dapm.h>
#include <sound/tlv.h>
#include "wcd-clsh-v2.h"
+#include "wcd-mbhc-v2.h"
#define WCD934X_RATES_MASK (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
@@ -131,6 +132,24 @@
} \
}
+/* Z value defined in milliohm */
+#define WCD934X_ZDET_VAL_32 32000
+#define WCD934X_ZDET_VAL_400 400000
+#define WCD934X_ZDET_VAL_1200 1200000
+#define WCD934X_ZDET_VAL_100K 100000000
+/* Z floating defined in ohms */
+#define WCD934X_ZDET_FLOATING_IMPEDANCE 0x0FFFFFFE
+
+#define WCD934X_ZDET_NUM_MEASUREMENTS 900
+#define WCD934X_MBHC_GET_C1(c) ((c & 0xC000) >> 14)
+#define WCD934X_MBHC_GET_X1(x) (x & 0x3FFF)
+/* Z value compared in milliOhm */
+#define WCD934X_MBHC_IS_SECOND_RAMP_REQUIRED(z) ((z > 400000) || (z < 32000))
+#define WCD934X_MBHC_ZDET_CONST (86 * 16384)
+#define WCD934X_MBHC_MOISTURE_RREF R_24_KOHM
+#define WCD934X_MBHC_MAX_BUTTONS (8)
+#define WCD_MBHC_HS_V_MAX 1600
+
#define WCD934X_INTERPOLATOR_PATH(id) \
{"RX INT" #id "_1 MIX1 INP0", "RX0", "SLIM RX0"}, \
{"RX INT" #id "_1 MIX1 INP0", "RX1", "SLIM RX1"}, \
@@ -287,12 +306,7 @@
{"AIF3_CAP Mixer", "SLIM TX" #id, "SLIM TX" #id }, \
{"SLIM TX" #id, NULL, "CDC_IF TX" #id " MUX"}
-enum {
- MIC_BIAS_1 = 1,
- MIC_BIAS_2,
- MIC_BIAS_3,
- MIC_BIAS_4
-};
+#define WCD934X_MAX_MICBIAS MIC_BIAS_4
enum {
SIDO_SOURCE_INTERNAL,
@@ -486,6 +500,15 @@ static struct interp_sample_rate sr_val_tbl[] = {
{352800, 0xC},
};
+struct wcd934x_mbhc_zdet_param {
+ u16 ldo_ctl;
+ u16 noff;
+ u16 nshift;
+ u16 btn5;
+ u16 btn6;
+ u16 btn7;
+};
+
struct wcd_slim_codec_dai_data {
struct list_head slim_ch_list;
struct slim_stream_config sconfig;
@@ -541,6 +564,18 @@ struct wcd934x_codec {
int comp_enabled[COMPANDER_MAX];
int sysclk_users;
struct mutex sysclk_mutex;
+ /* mbhc module */
+ struct wcd_mbhc *mbhc;
+ struct wcd_mbhc_config mbhc_cfg;
+ struct wcd_mbhc_intr intr_ids;
+ bool mbhc_started;
+ struct mutex micb_lock;
+ u32 micb_ref[WCD934X_MAX_MICBIAS];
+ u32 pullup_ref[WCD934X_MAX_MICBIAS];
+ u32 micb1_mv;
+ u32 micb2_mv;
+ u32 micb3_mv;
+ u32 micb4_mv;
};
#define to_wcd934x_codec(_hw) container_of(_hw, struct wcd934x_codec, hw)
@@ -1183,6 +1218,57 @@ static const struct soc_enum cdc_if_tx13_mux_enum =
SOC_ENUM_SINGLE(WCD934X_DATA_HUB_SB_TX13_INP_CFG, 0,
ARRAY_SIZE(cdc_if_tx13_mux_text), cdc_if_tx13_mux_text);
+static struct wcd_mbhc_field wcd_mbhc_fields[WCD_MBHC_REG_FUNC_MAX] = {
+ WCD_MBHC_FIELD(WCD_MBHC_L_DET_EN, WCD934X_ANA_MBHC_MECH, 0x80),
+ WCD_MBHC_FIELD(WCD_MBHC_GND_DET_EN, WCD934X_ANA_MBHC_MECH, 0x40),
+ WCD_MBHC_FIELD(WCD_MBHC_MECH_DETECTION_TYPE, WCD934X_ANA_MBHC_MECH, 0x20),
+ WCD_MBHC_FIELD(WCD_MBHC_MIC_CLAMP_CTL, WCD934X_MBHC_NEW_PLUG_DETECT_CTL, 0x30),
+ WCD_MBHC_FIELD(WCD_MBHC_ELECT_DETECTION_TYPE, WCD934X_ANA_MBHC_ELECT, 0x08),
+ WCD_MBHC_FIELD(WCD_MBHC_HS_L_DET_PULL_UP_CTRL, WCD934X_MBHC_NEW_PLUG_DETECT_CTL, 0xC0),
+ WCD_MBHC_FIELD(WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL, WCD934X_ANA_MBHC_MECH, 0x04),
+ WCD_MBHC_FIELD(WCD_MBHC_HPHL_PLUG_TYPE, WCD934X_ANA_MBHC_MECH, 0x10),
+ WCD_MBHC_FIELD(WCD_MBHC_GND_PLUG_TYPE, WCD934X_ANA_MBHC_MECH, 0x08),
+ WCD_MBHC_FIELD(WCD_MBHC_SW_HPH_LP_100K_TO_GND, WCD934X_ANA_MBHC_MECH, 0x01),
+ WCD_MBHC_FIELD(WCD_MBHC_ELECT_SCHMT_ISRC, WCD934X_ANA_MBHC_ELECT, 0x06),
+ WCD_MBHC_FIELD(WCD_MBHC_FSM_EN, WCD934X_ANA_MBHC_ELECT, 0x80),
+ WCD_MBHC_FIELD(WCD_MBHC_INSREM_DBNC, WCD934X_MBHC_NEW_PLUG_DETECT_CTL, 0x0F),
+ WCD_MBHC_FIELD(WCD_MBHC_BTN_DBNC, WCD934X_MBHC_NEW_CTL_1, 0x03),
+ WCD_MBHC_FIELD(WCD_MBHC_HS_VREF, WCD934X_MBHC_NEW_CTL_2, 0x03),
+ WCD_MBHC_FIELD(WCD_MBHC_HS_COMP_RESULT, WCD934X_ANA_MBHC_RESULT_3, 0x08),
+ WCD_MBHC_FIELD(WCD_MBHC_IN2P_CLAMP_STATE, WCD934X_ANA_MBHC_RESULT_3, 0x10),
+ WCD_MBHC_FIELD(WCD_MBHC_MIC_SCHMT_RESULT, WCD934X_ANA_MBHC_RESULT_3, 0x20),
+ WCD_MBHC_FIELD(WCD_MBHC_HPHL_SCHMT_RESULT, WCD934X_ANA_MBHC_RESULT_3, 0x80),
+ WCD_MBHC_FIELD(WCD_MBHC_HPHR_SCHMT_RESULT, WCD934X_ANA_MBHC_RESULT_3, 0x40),
+ WCD_MBHC_FIELD(WCD_MBHC_OCP_FSM_EN, WCD934X_HPH_OCP_CTL, 0x10),
+ WCD_MBHC_FIELD(WCD_MBHC_BTN_RESULT, WCD934X_ANA_MBHC_RESULT_3, 0x07),
+ WCD_MBHC_FIELD(WCD_MBHC_BTN_ISRC_CTL, WCD934X_ANA_MBHC_ELECT, 0x70),
+ WCD_MBHC_FIELD(WCD_MBHC_ELECT_RESULT, WCD934X_ANA_MBHC_RESULT_3, 0xFF),
+ WCD_MBHC_FIELD(WCD_MBHC_MICB_CTRL, WCD934X_ANA_MICB2, 0xC0),
+ WCD_MBHC_FIELD(WCD_MBHC_HPH_CNP_WG_TIME, WCD934X_HPH_CNP_WG_TIME, 0xFF),
+ WCD_MBHC_FIELD(WCD_MBHC_HPHR_PA_EN, WCD934X_ANA_HPH, 0x40),
+ WCD_MBHC_FIELD(WCD_MBHC_HPHL_PA_EN, WCD934X_ANA_HPH, 0x80),
+ WCD_MBHC_FIELD(WCD_MBHC_HPH_PA_EN, WCD934X_ANA_HPH, 0xC0),
+ WCD_MBHC_FIELD(WCD_MBHC_SWCH_LEVEL_REMOVE, WCD934X_ANA_MBHC_RESULT_3, 0x10),
+ WCD_MBHC_FIELD(WCD_MBHC_ANC_DET_EN, WCD934X_MBHC_CTL_BCS, 0x02),
+ WCD_MBHC_FIELD(WCD_MBHC_FSM_STATUS, WCD934X_MBHC_STATUS_SPARE_1, 0x01),
+ WCD_MBHC_FIELD(WCD_MBHC_MUX_CTL, WCD934X_MBHC_NEW_CTL_2, 0x70),
+ WCD_MBHC_FIELD(WCD_MBHC_MOISTURE_STATUS, WCD934X_MBHC_NEW_FSM_STATUS, 0x20),
+ WCD_MBHC_FIELD(WCD_MBHC_HPHR_GND, WCD934X_HPH_PA_CTL2, 0x40),
+ WCD_MBHC_FIELD(WCD_MBHC_HPHL_GND, WCD934X_HPH_PA_CTL2, 0x10),
+ WCD_MBHC_FIELD(WCD_MBHC_HPHL_OCP_DET_EN, WCD934X_HPH_L_TEST, 0x01),
+ WCD_MBHC_FIELD(WCD_MBHC_HPHR_OCP_DET_EN, WCD934X_HPH_R_TEST, 0x01),
+ WCD_MBHC_FIELD(WCD_MBHC_HPHL_OCP_STATUS, WCD934X_INTR_PIN1_STATUS0, 0x04),
+ WCD_MBHC_FIELD(WCD_MBHC_HPHR_OCP_STATUS, WCD934X_INTR_PIN1_STATUS0, 0x08),
+ WCD_MBHC_FIELD(WCD_MBHC_ADC_EN, WCD934X_MBHC_NEW_CTL_1, 0x08),
+ WCD_MBHC_FIELD(WCD_MBHC_ADC_COMPLETE, WCD934X_MBHC_NEW_FSM_STATUS, 0x40),
+ WCD_MBHC_FIELD(WCD_MBHC_ADC_TIMEOUT, WCD934X_MBHC_NEW_FSM_STATUS, 0x80),
+ WCD_MBHC_FIELD(WCD_MBHC_ADC_RESULT, WCD934X_MBHC_NEW_ADC_RESULT, 0xFF),
+ WCD_MBHC_FIELD(WCD_MBHC_MICB2_VOUT, WCD934X_ANA_MICB2, 0x3F),
+ WCD_MBHC_FIELD(WCD_MBHC_ADC_MODE, WCD934X_MBHC_NEW_CTL_1, 0x10),
+ WCD_MBHC_FIELD(WCD_MBHC_DETECTION_DONE, WCD934X_MBHC_NEW_CTL_1, 0x04),
+ WCD_MBHC_FIELD(WCD_MBHC_ELECT_ISRC_EN, WCD934X_ANA_MBHC_ZDET, 0x02),
+};
+
static int wcd934x_set_sido_input_src(struct wcd934x_codec *wcd, int sido_src)
{
if (sido_src == wcd->sido_input_src)
@@ -2127,7 +2213,8 @@ static struct clk *wcd934x_register_mclk_output(struct wcd934x_codec *wcd)
return NULL;
}
-static int wcd934x_get_micbias_val(struct device *dev, const char *micbias)
+static int wcd934x_get_micbias_val(struct device *dev, const char *micbias,
+ u32 *micb_mv)
{
int mv;
@@ -2145,6 +2232,8 @@ static int wcd934x_get_micbias_val(struct device *dev, const char *micbias)
mv = WCD934X_DEF_MICBIAS_MV;
}
+ *micb_mv = mv;
+
return (mv - 1000) / 50;
}
@@ -2155,13 +2244,17 @@ static int wcd934x_init_dmic(struct snd_soc_component *comp)
u32 def_dmic_rate, dmic_clk_drv;
vout_ctl_1 = wcd934x_get_micbias_val(comp->dev,
- "qcom,micbias1-microvolt");
+ "qcom,micbias1-microvolt",
+ &wcd->micb1_mv);
vout_ctl_2 = wcd934x_get_micbias_val(comp->dev,
- "qcom,micbias2-microvolt");
+ "qcom,micbias2-microvolt",
+ &wcd->micb2_mv);
vout_ctl_3 = wcd934x_get_micbias_val(comp->dev,
- "qcom,micbias3-microvolt");
+ "qcom,micbias3-microvolt",
+ &wcd->micb3_mv);
vout_ctl_4 = wcd934x_get_micbias_val(comp->dev,
- "qcom,micbias4-microvolt");
+ "qcom,micbias4-microvolt",
+ &wcd->micb4_mv);
snd_soc_component_update_bits(comp, WCD934X_ANA_MICB1,
WCD934X_MICB_VAL_MASK, vout_ctl_1);
@@ -2287,6 +2380,695 @@ static irqreturn_t wcd934x_slim_irq_handler(int irq, void *data)
return ret;
}
+static void wcd934x_mbhc_clk_setup(struct snd_soc_component *component,
+ bool enable)
+{
+ snd_soc_component_write_field(component, WCD934X_MBHC_NEW_CTL_1,
+ WCD934X_MBHC_CTL_RCO_EN_MASK, enable);
+}
+
+static void wcd934x_mbhc_mbhc_bias_control(struct snd_soc_component *component,
+ bool enable)
+{
+ snd_soc_component_write_field(component, WCD934X_ANA_MBHC_ELECT,
+ WCD934X_ANA_MBHC_BIAS_EN, enable);
+}
+
+static void wcd934x_mbhc_program_btn_thr(struct snd_soc_component *component,
+ int *btn_low, int *btn_high,
+ int num_btn, bool is_micbias)
+{
+ int i, vth;
+
+ if (num_btn > WCD_MBHC_DEF_BUTTONS) {
+ dev_err(component->dev, "%s: invalid number of buttons: %d\n",
+ __func__, num_btn);
+ return;
+ }
+
+ for (i = 0; i < num_btn; i++) {
+ vth = ((btn_high[i] * 2) / 25) & 0x3F;
+ snd_soc_component_write_field(component, WCD934X_ANA_MBHC_BTN0 + i,
+ WCD934X_MBHC_BTN_VTH_MASK, vth);
+ }
+}
+
+static bool wcd934x_mbhc_micb_en_status(struct snd_soc_component *component, int micb_num)
+{
+ u8 val;
+
+ if (micb_num == MIC_BIAS_2) {
+ val = snd_soc_component_read_field(component, WCD934X_ANA_MICB2,
+ WCD934X_ANA_MICB2_ENABLE_MASK);
+ if (val == WCD934X_MICB_ENABLE)
+ return true;
+ }
+ return false;
+}
+
+static void wcd934x_mbhc_hph_l_pull_up_control(struct snd_soc_component *component,
+ enum mbhc_hs_pullup_iref pull_up_cur)
+{
+ /* Default pull up current to 2uA */
+ if (pull_up_cur < I_OFF || pull_up_cur > I_3P0_UA ||
+ pull_up_cur == I_DEFAULT)
+ pull_up_cur = I_2P0_UA;
+
+
+ snd_soc_component_write_field(component, WCD934X_MBHC_NEW_PLUG_DETECT_CTL,
+ WCD934X_HSDET_PULLUP_C_MASK, pull_up_cur);
+}
+
+static int wcd934x_micbias_control(struct snd_soc_component *component,
+ int micb_num, int req, bool is_dapm)
+{
+ struct wcd934x_codec *wcd934x = snd_soc_component_get_drvdata(component);
+ int micb_index = micb_num - 1;
+ u16 micb_reg;
+
+ switch (micb_num) {
+ case MIC_BIAS_1:
+ micb_reg = WCD934X_ANA_MICB1;
+ break;
+ case MIC_BIAS_2:
+ micb_reg = WCD934X_ANA_MICB2;
+ break;
+ case MIC_BIAS_3:
+ micb_reg = WCD934X_ANA_MICB3;
+ break;
+ case MIC_BIAS_4:
+ micb_reg = WCD934X_ANA_MICB4;
+ break;
+ default:
+ dev_err(component->dev, "%s: Invalid micbias number: %d\n",
+ __func__, micb_num);
+ return -EINVAL;
+ }
+ mutex_lock(&wcd934x->micb_lock);
+
+ switch (req) {
+ case MICB_PULLUP_ENABLE:
+ wcd934x->pullup_ref[micb_index]++;
+ if ((wcd934x->pullup_ref[micb_index] == 1) &&
+ (wcd934x->micb_ref[micb_index] == 0))
+ snd_soc_component_write_field(component, micb_reg,
+ WCD934X_ANA_MICB_EN_MASK,
+ WCD934X_MICB_PULL_UP);
+ break;
+ case MICB_PULLUP_DISABLE:
+ if (wcd934x->pullup_ref[micb_index] > 0)
+ wcd934x->pullup_ref[micb_index]--;
+
+ if ((wcd934x->pullup_ref[micb_index] == 0) &&
+ (wcd934x->micb_ref[micb_index] == 0))
+ snd_soc_component_write_field(component, micb_reg,
+ WCD934X_ANA_MICB_EN_MASK, 0);
+ break;
+ case MICB_ENABLE:
+ wcd934x->micb_ref[micb_index]++;
+ if (wcd934x->micb_ref[micb_index] == 1) {
+ snd_soc_component_write_field(component, micb_reg,
+ WCD934X_ANA_MICB_EN_MASK,
+ WCD934X_MICB_ENABLE);
+ if (micb_num == MIC_BIAS_2)
+ wcd_mbhc_event_notify(wcd934x->mbhc,
+ WCD_EVENT_POST_MICBIAS_2_ON);
+ }
+
+ if (micb_num == MIC_BIAS_2 && is_dapm)
+ wcd_mbhc_event_notify(wcd934x->mbhc,
+ WCD_EVENT_POST_DAPM_MICBIAS_2_ON);
+ break;
+ case MICB_DISABLE:
+ if (wcd934x->micb_ref[micb_index] > 0)
+ wcd934x->micb_ref[micb_index]--;
+
+ if ((wcd934x->micb_ref[micb_index] == 0) &&
+ (wcd934x->pullup_ref[micb_index] > 0))
+ snd_soc_component_write_field(component, micb_reg,
+ WCD934X_ANA_MICB_EN_MASK,
+ WCD934X_MICB_PULL_UP);
+ else if ((wcd934x->micb_ref[micb_index] == 0) &&
+ (wcd934x->pullup_ref[micb_index] == 0)) {
+ if (micb_num == MIC_BIAS_2)
+ wcd_mbhc_event_notify(wcd934x->mbhc,
+ WCD_EVENT_PRE_MICBIAS_2_OFF);
+
+ snd_soc_component_write_field(component, micb_reg,
+ WCD934X_ANA_MICB_EN_MASK, 0);
+ if (micb_num == MIC_BIAS_2)
+ wcd_mbhc_event_notify(wcd934x->mbhc,
+ WCD_EVENT_POST_MICBIAS_2_OFF);
+ }
+ if (is_dapm && micb_num == MIC_BIAS_2)
+ wcd_mbhc_event_notify(wcd934x->mbhc,
+ WCD_EVENT_POST_DAPM_MICBIAS_2_OFF);
+ break;
+ }
+
+ mutex_unlock(&wcd934x->micb_lock);
+
+ return 0;
+}
+
+static int wcd934x_mbhc_request_micbias(struct snd_soc_component *component,
+ int micb_num, int req)
+{
+ struct wcd934x_codec *wcd = dev_get_drvdata(component->dev);
+ int ret;
+
+ if (req == MICB_ENABLE)
+ __wcd934x_cdc_mclk_enable(wcd, true);
+
+ ret = wcd934x_micbias_control(component, micb_num, req, false);
+
+ if (req == MICB_DISABLE)
+ __wcd934x_cdc_mclk_enable(wcd, false);
+
+ return ret;
+}
+
+static void wcd934x_mbhc_micb_ramp_control(struct snd_soc_component *component,
+ bool enable)
+{
+ if (enable) {
+ snd_soc_component_write_field(component, WCD934X_ANA_MICB2_RAMP,
+ WCD934X_RAMP_SHIFT_CTRL_MASK, 0x3);
+ snd_soc_component_write_field(component, WCD934X_ANA_MICB2_RAMP,
+ WCD934X_RAMP_EN_MASK, 1);
+ } else {
+ snd_soc_component_write_field(component, WCD934X_ANA_MICB2_RAMP,
+ WCD934X_RAMP_EN_MASK, 0);
+ snd_soc_component_write_field(component, WCD934X_ANA_MICB2_RAMP,
+ WCD934X_RAMP_SHIFT_CTRL_MASK, 0);
+ }
+}
+
+static int wcd934x_get_micb_vout_ctl_val(u32 micb_mv)
+{
+ /* min micbias voltage is 1V and maximum is 2.85V */
+ if (micb_mv < 1000 || micb_mv > 2850)
+ return -EINVAL;
+
+ return (micb_mv - 1000) / 50;
+}
+
+static int wcd934x_mbhc_micb_adjust_voltage(struct snd_soc_component *component,
+ int req_volt, int micb_num)
+{
+ struct wcd934x_codec *wcd934x = snd_soc_component_get_drvdata(component);
+ int cur_vout_ctl, req_vout_ctl, micb_reg, micb_en, ret = 0;
+
+ switch (micb_num) {
+ case MIC_BIAS_1:
+ micb_reg = WCD934X_ANA_MICB1;
+ break;
+ case MIC_BIAS_2:
+ micb_reg = WCD934X_ANA_MICB2;
+ break;
+ case MIC_BIAS_3:
+ micb_reg = WCD934X_ANA_MICB3;
+ break;
+ case MIC_BIAS_4:
+ micb_reg = WCD934X_ANA_MICB4;
+ break;
+ default:
+ return -EINVAL;
+ }
+ mutex_lock(&wcd934x->micb_lock);
+ /*
+ * If requested micbias voltage is same as current micbias
+ * voltage, then just return. Otherwise, adjust voltage as
+ * per requested value. If micbias is already enabled, then
+ * to avoid slow micbias ramp-up or down enable pull-up
+ * momentarily, change the micbias value and then re-enable
+ * micbias.
+ */
+ micb_en = snd_soc_component_read_field(component, micb_reg,
+ WCD934X_ANA_MICB_EN_MASK);
+ cur_vout_ctl = snd_soc_component_read_field(component, micb_reg,
+ WCD934X_MICB_VAL_MASK);
+
+ req_vout_ctl = wcd934x_get_micb_vout_ctl_val(req_volt);
+ if (req_vout_ctl < 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (cur_vout_ctl == req_vout_ctl) {
+ ret = 0;
+ goto exit;
+ }
+
+ if (micb_en == WCD934X_MICB_ENABLE)
+ snd_soc_component_write_field(component, micb_reg,
+ WCD934X_ANA_MICB_EN_MASK,
+ WCD934X_MICB_PULL_UP);
+
+ snd_soc_component_write_field(component, micb_reg,
+ WCD934X_MICB_VAL_MASK,
+ req_vout_ctl);
+
+ if (micb_en == WCD934X_MICB_ENABLE) {
+ snd_soc_component_write_field(component, micb_reg,
+ WCD934X_ANA_MICB_EN_MASK,
+ WCD934X_MICB_ENABLE);
+ /*
+ * Add 2ms delay as per HW requirement after enabling
+ * micbias
+ */
+ usleep_range(2000, 2100);
+ }
+exit:
+ mutex_unlock(&wcd934x->micb_lock);
+ return ret;
+}
+
+static int wcd934x_mbhc_micb_ctrl_threshold_mic(struct snd_soc_component *component,
+ int micb_num, bool req_en)
+{
+ struct wcd934x_codec *wcd934x = snd_soc_component_get_drvdata(component);
+ int rc, micb_mv;
+
+ if (micb_num != MIC_BIAS_2)
+ return -EINVAL;
+ /*
+ * If device tree micbias level is already above the minimum
+ * voltage needed to detect threshold microphone, then do
+ * not change the micbias, just return.
+ */
+ if (wcd934x->micb2_mv >= WCD_MBHC_THR_HS_MICB_MV)
+ return 0;
+
+ micb_mv = req_en ? WCD_MBHC_THR_HS_MICB_MV : wcd934x->micb2_mv;
+
+ rc = wcd934x_mbhc_micb_adjust_voltage(component, micb_mv, MIC_BIAS_2);
+
+ return rc;
+}
+
+static inline void wcd934x_mbhc_get_result_params(struct wcd934x_codec *wcd934x,
+ s16 *d1_a, u16 noff,
+ int32_t *zdet)
+{
+ int i;
+ int val, val1;
+ s16 c1;
+ s32 x1, d1;
+ int32_t denom;
+ int minCode_param[] = {
+ 3277, 1639, 820, 410, 205, 103, 52, 26
+ };
+
+ regmap_update_bits(wcd934x->regmap, WCD934X_ANA_MBHC_ZDET, 0x20, 0x20);
+ for (i = 0; i < WCD934X_ZDET_NUM_MEASUREMENTS; i++) {
+ regmap_read(wcd934x->regmap, WCD934X_ANA_MBHC_RESULT_2, &val);
+ if (val & 0x80)
+ break;
+ }
+ val = val << 0x8;
+ regmap_read(wcd934x->regmap, WCD934X_ANA_MBHC_RESULT_1, &val1);
+ val |= val1;
+ regmap_update_bits(wcd934x->regmap, WCD934X_ANA_MBHC_ZDET, 0x20, 0x00);
+ x1 = WCD934X_MBHC_GET_X1(val);
+ c1 = WCD934X_MBHC_GET_C1(val);
+ /* If ramp is not complete, give additional 5ms */
+ if ((c1 < 2) && x1)
+ usleep_range(5000, 5050);
+
+ if (!c1 || !x1) {
+ dev_err(wcd934x->dev, "%s: Impedance detect ramp error, c1=%d, x1=0x%x\n",
+ __func__, c1, x1);
+ goto ramp_down;
+ }
+ d1 = d1_a[c1];
+ denom = (x1 * d1) - (1 << (14 - noff));
+ if (denom > 0)
+ *zdet = (WCD934X_MBHC_ZDET_CONST * 1000) / denom;
+ else if (x1 < minCode_param[noff])
+ *zdet = WCD934X_ZDET_FLOATING_IMPEDANCE;
+
+ dev_info(wcd934x->dev, "%s: d1=%d, c1=%d, x1=0x%x, z_val=%d(milliOhm)\n",
+ __func__, d1, c1, x1, *zdet);
+ramp_down:
+ i = 0;
+
+ while (x1) {
+ regmap_read(wcd934x->regmap, WCD934X_ANA_MBHC_RESULT_1, &val);
+ regmap_read(wcd934x->regmap, WCD934X_ANA_MBHC_RESULT_2, &val1);
+ val = val << 0x08;
+ val |= val1;
+ x1 = WCD934X_MBHC_GET_X1(val);
+ i++;
+ if (i == WCD934X_ZDET_NUM_MEASUREMENTS)
+ break;
+ }
+}
+
+static void wcd934x_mbhc_zdet_ramp(struct snd_soc_component *component,
+ struct wcd934x_mbhc_zdet_param *zdet_param,
+ int32_t *zl, int32_t *zr, s16 *d1_a)
+{
+ struct wcd934x_codec *wcd934x = dev_get_drvdata(component->dev);
+ int32_t zdet = 0;
+
+ snd_soc_component_write_field(component, WCD934X_MBHC_NEW_ZDET_ANA_CTL,
+ WCD934X_ZDET_MAXV_CTL_MASK, zdet_param->ldo_ctl);
+ snd_soc_component_update_bits(component, WCD934X_ANA_MBHC_BTN5,
+ WCD934X_VTH_MASK, zdet_param->btn5);
+ snd_soc_component_update_bits(component, WCD934X_ANA_MBHC_BTN6,
+ WCD934X_VTH_MASK, zdet_param->btn6);
+ snd_soc_component_update_bits(component, WCD934X_ANA_MBHC_BTN7,
+ WCD934X_VTH_MASK, zdet_param->btn7);
+ snd_soc_component_write_field(component, WCD934X_MBHC_NEW_ZDET_ANA_CTL,
+ WCD934X_ZDET_RANGE_CTL_MASK, zdet_param->noff);
+ snd_soc_component_update_bits(component, WCD934X_MBHC_NEW_ZDET_RAMP_CTL,
+ 0x0F, zdet_param->nshift);
+
+ if (!zl)
+ goto z_right;
+ /* Start impedance measurement for HPH_L */
+ regmap_update_bits(wcd934x->regmap, WCD934X_ANA_MBHC_ZDET, 0x80, 0x80);
+ wcd934x_mbhc_get_result_params(wcd934x, d1_a, zdet_param->noff, &zdet);
+ regmap_update_bits(wcd934x->regmap, WCD934X_ANA_MBHC_ZDET, 0x80, 0x00);
+
+ *zl = zdet;
+
+z_right:
+ if (!zr)
+ return;
+ /* Start impedance measurement for HPH_R */
+ regmap_update_bits(wcd934x->regmap, WCD934X_ANA_MBHC_ZDET, 0x40, 0x40);
+ wcd934x_mbhc_get_result_params(wcd934x, d1_a, zdet_param->noff, &zdet);
+ regmap_update_bits(wcd934x->regmap, WCD934X_ANA_MBHC_ZDET, 0x40, 0x00);
+
+ *zr = zdet;
+}
+
+static inline void wcd934x_wcd_mbhc_qfuse_cal(struct snd_soc_component *component,
+ int32_t *z_val, int flag_l_r)
+{
+ s16 q1;
+ int q1_cal;
+
+ if (*z_val < (WCD934X_ZDET_VAL_400/1000))
+ q1 = snd_soc_component_read(component,
+ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1 + (2 * flag_l_r));
+ else
+ q1 = snd_soc_component_read(component,
+ WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2 + (2 * flag_l_r));
+ if (q1 & 0x80)
+ q1_cal = (10000 - ((q1 & 0x7F) * 25));
+ else
+ q1_cal = (10000 + (q1 * 25));
+ if (q1_cal > 0)
+ *z_val = ((*z_val) * 10000) / q1_cal;
+}
+
+static void wcd934x_wcd_mbhc_calc_impedance(struct snd_soc_component *component,
+ uint32_t *zl, uint32_t *zr)
+{
+ struct wcd934x_codec *wcd934x = dev_get_drvdata(component->dev);
+ s16 reg0, reg1, reg2, reg3, reg4;
+ int32_t z1L, z1R, z1Ls;
+ int zMono, z_diff1, z_diff2;
+ bool is_fsm_disable = false;
+ struct wcd934x_mbhc_zdet_param zdet_param[] = {
+ {4, 0, 4, 0x08, 0x14, 0x18}, /* < 32ohm */
+ {2, 0, 3, 0x18, 0x7C, 0x90}, /* 32ohm < Z < 400ohm */
+ {1, 4, 5, 0x18, 0x7C, 0x90}, /* 400ohm < Z < 1200ohm */
+ {1, 6, 7, 0x18, 0x7C, 0x90}, /* >1200ohm */
+ };
+ struct wcd934x_mbhc_zdet_param *zdet_param_ptr = NULL;
+ s16 d1_a[][4] = {
+ {0, 30, 90, 30},
+ {0, 30, 30, 5},
+ {0, 30, 30, 5},
+ {0, 30, 30, 5},
+ };
+ s16 *d1 = NULL;
+
+ reg0 = snd_soc_component_read(component, WCD934X_ANA_MBHC_BTN5);
+ reg1 = snd_soc_component_read(component, WCD934X_ANA_MBHC_BTN6);
+ reg2 = snd_soc_component_read(component, WCD934X_ANA_MBHC_BTN7);
+ reg3 = snd_soc_component_read(component, WCD934X_MBHC_CTL_CLK);
+ reg4 = snd_soc_component_read(component, WCD934X_MBHC_NEW_ZDET_ANA_CTL);
+
+ if (snd_soc_component_read(component, WCD934X_ANA_MBHC_ELECT) & 0x80) {
+ is_fsm_disable = true;
+ regmap_update_bits(wcd934x->regmap, WCD934X_ANA_MBHC_ELECT, 0x80, 0x00);
+ }
+
+ /* For NO-jack, disable L_DET_EN before Z-det measurements */
+ if (wcd934x->mbhc_cfg.hphl_swh)
+ regmap_update_bits(wcd934x->regmap, WCD934X_ANA_MBHC_MECH, 0x80, 0x00);
+
+ /* Turn off 100k pull down on HPHL */
+ regmap_update_bits(wcd934x->regmap, WCD934X_ANA_MBHC_MECH, 0x01, 0x00);
+
+ /* First get impedance on Left */
+ d1 = d1_a[1];
+ zdet_param_ptr = &zdet_param[1];
+ wcd934x_mbhc_zdet_ramp(component, zdet_param_ptr, &z1L, NULL, d1);
+
+ if (!WCD934X_MBHC_IS_SECOND_RAMP_REQUIRED(z1L))
+ goto left_ch_impedance;
+
+ /* Second ramp for left ch */
+ if (z1L < WCD934X_ZDET_VAL_32) {
+ zdet_param_ptr = &zdet_param[0];
+ d1 = d1_a[0];
+ } else if ((z1L > WCD934X_ZDET_VAL_400) &&
+ (z1L <= WCD934X_ZDET_VAL_1200)) {
+ zdet_param_ptr = &zdet_param[2];
+ d1 = d1_a[2];
+ } else if (z1L > WCD934X_ZDET_VAL_1200) {
+ zdet_param_ptr = &zdet_param[3];
+ d1 = d1_a[3];
+ }
+ wcd934x_mbhc_zdet_ramp(component, zdet_param_ptr, &z1L, NULL, d1);
+
+left_ch_impedance:
+ if ((z1L == WCD934X_ZDET_FLOATING_IMPEDANCE) ||
+ (z1L > WCD934X_ZDET_VAL_100K)) {
+ *zl = WCD934X_ZDET_FLOATING_IMPEDANCE;
+ zdet_param_ptr = &zdet_param[1];
+ d1 = d1_a[1];
+ } else {
+ *zl = z1L/1000;
+ wcd934x_wcd_mbhc_qfuse_cal(component, zl, 0);
+ }
+ dev_info(component->dev, "%s: impedance on HPH_L = %d(ohms)\n",
+ __func__, *zl);
+
+ /* Start of right impedance ramp and calculation */
+ wcd934x_mbhc_zdet_ramp(component, zdet_param_ptr, NULL, &z1R, d1);
+ if (WCD934X_MBHC_IS_SECOND_RAMP_REQUIRED(z1R)) {
+ if (((z1R > WCD934X_ZDET_VAL_1200) &&
+ (zdet_param_ptr->noff == 0x6)) ||
+ ((*zl) != WCD934X_ZDET_FLOATING_IMPEDANCE))
+ goto right_ch_impedance;
+ /* Second ramp for right ch */
+ if (z1R < WCD934X_ZDET_VAL_32) {
+ zdet_param_ptr = &zdet_param[0];
+ d1 = d1_a[0];
+ } else if ((z1R > WCD934X_ZDET_VAL_400) &&
+ (z1R <= WCD934X_ZDET_VAL_1200)) {
+ zdet_param_ptr = &zdet_param[2];
+ d1 = d1_a[2];
+ } else if (z1R > WCD934X_ZDET_VAL_1200) {
+ zdet_param_ptr = &zdet_param[3];
+ d1 = d1_a[3];
+ }
+ wcd934x_mbhc_zdet_ramp(component, zdet_param_ptr, NULL, &z1R, d1);
+ }
+right_ch_impedance:
+ if ((z1R == WCD934X_ZDET_FLOATING_IMPEDANCE) ||
+ (z1R > WCD934X_ZDET_VAL_100K)) {
+ *zr = WCD934X_ZDET_FLOATING_IMPEDANCE;
+ } else {
+ *zr = z1R/1000;
+ wcd934x_wcd_mbhc_qfuse_cal(component, zr, 1);
+ }
+ dev_err(component->dev, "%s: impedance on HPH_R = %d(ohms)\n",
+ __func__, *zr);
+
+ /* Mono/stereo detection */
+ if ((*zl == WCD934X_ZDET_FLOATING_IMPEDANCE) &&
+ (*zr == WCD934X_ZDET_FLOATING_IMPEDANCE)) {
+ dev_dbg(component->dev,
+ "%s: plug type is invalid or extension cable\n",
+ __func__);
+ goto zdet_complete;
+ }
+ if ((*zl == WCD934X_ZDET_FLOATING_IMPEDANCE) ||
+ (*zr == WCD934X_ZDET_FLOATING_IMPEDANCE) ||
+ ((*zl < WCD_MONO_HS_MIN_THR) && (*zr > WCD_MONO_HS_MIN_THR)) ||
+ ((*zl > WCD_MONO_HS_MIN_THR) && (*zr < WCD_MONO_HS_MIN_THR))) {
+ dev_dbg(component->dev,
+ "%s: Mono plug type with one ch floating or shorted to GND\n",
+ __func__);
+ wcd_mbhc_set_hph_type(wcd934x->mbhc, WCD_MBHC_HPH_MONO);
+ goto zdet_complete;
+ }
+ snd_soc_component_write_field(component, WCD934X_HPH_R_ATEST,
+ WCD934X_HPHPA_GND_OVR_MASK, 1);
+ snd_soc_component_write_field(component, WCD934X_HPH_PA_CTL2,
+ WCD934X_HPHPA_GND_R_MASK, 1);
+ if (*zl < (WCD934X_ZDET_VAL_32/1000))
+ wcd934x_mbhc_zdet_ramp(component, &zdet_param[0], &z1Ls, NULL, d1);
+ else
+ wcd934x_mbhc_zdet_ramp(component, &zdet_param[1], &z1Ls, NULL, d1);
+ snd_soc_component_write_field(component, WCD934X_HPH_PA_CTL2,
+ WCD934X_HPHPA_GND_R_MASK, 0);
+ snd_soc_component_write_field(component, WCD934X_HPH_R_ATEST,
+ WCD934X_HPHPA_GND_OVR_MASK, 0);
+ z1Ls /= 1000;
+ wcd934x_wcd_mbhc_qfuse_cal(component, &z1Ls, 0);
+ /* Parallel of left Z and 9 ohm pull down resistor */
+ zMono = ((*zl) * 9) / ((*zl) + 9);
+ z_diff1 = (z1Ls > zMono) ? (z1Ls - zMono) : (zMono - z1Ls);
+ z_diff2 = ((*zl) > z1Ls) ? ((*zl) - z1Ls) : (z1Ls - (*zl));
+ if ((z_diff1 * (*zl + z1Ls)) > (z_diff2 * (z1Ls + zMono))) {
+ dev_err(component->dev, "%s: stereo plug type detected\n",
+ __func__);
+ wcd_mbhc_set_hph_type(wcd934x->mbhc, WCD_MBHC_HPH_STEREO);
+ } else {
+ dev_err(component->dev, "%s: MONO plug type detected\n",
+ __func__);
+ wcd_mbhc_set_hph_type(wcd934x->mbhc, WCD_MBHC_HPH_MONO);
+ }
+
+zdet_complete:
+ snd_soc_component_write(component, WCD934X_ANA_MBHC_BTN5, reg0);
+ snd_soc_component_write(component, WCD934X_ANA_MBHC_BTN6, reg1);
+ snd_soc_component_write(component, WCD934X_ANA_MBHC_BTN7, reg2);
+ /* Turn on 100k pull down on HPHL */
+ regmap_update_bits(wcd934x->regmap, WCD934X_ANA_MBHC_MECH, 0x01, 0x01);
+
+ /* For NO-jack, re-enable L_DET_EN after Z-det measurements */
+ if (wcd934x->mbhc_cfg.hphl_swh)
+ regmap_update_bits(wcd934x->regmap, WCD934X_ANA_MBHC_MECH, 0x80, 0x80);
+
+ snd_soc_component_write(component, WCD934X_MBHC_NEW_ZDET_ANA_CTL, reg4);
+ snd_soc_component_write(component, WCD934X_MBHC_CTL_CLK, reg3);
+ if (is_fsm_disable)
+ regmap_update_bits(wcd934x->regmap, WCD934X_ANA_MBHC_ELECT, 0x80, 0x80);
+}
+
+static void wcd934x_mbhc_gnd_det_ctrl(struct snd_soc_component *component,
+ bool enable)
+{
+ if (enable) {
+ snd_soc_component_write_field(component, WCD934X_ANA_MBHC_MECH,
+ WCD934X_MBHC_HSG_PULLUP_COMP_EN, 1);
+ snd_soc_component_write_field(component, WCD934X_ANA_MBHC_MECH,
+ WCD934X_MBHC_GND_DET_EN_MASK, 1);
+ } else {
+ snd_soc_component_write_field(component, WCD934X_ANA_MBHC_MECH,
+ WCD934X_MBHC_GND_DET_EN_MASK, 0);
+ snd_soc_component_write_field(component, WCD934X_ANA_MBHC_MECH,
+ WCD934X_MBHC_HSG_PULLUP_COMP_EN, 0);
+ }
+}
+
+static void wcd934x_mbhc_hph_pull_down_ctrl(struct snd_soc_component *component,
+ bool enable)
+{
+ snd_soc_component_write_field(component, WCD934X_HPH_PA_CTL2,
+ WCD934X_HPHPA_GND_R_MASK, enable);
+ snd_soc_component_write_field(component, WCD934X_HPH_PA_CTL2,
+ WCD934X_HPHPA_GND_L_MASK, enable);
+}
+
+static const struct wcd_mbhc_cb mbhc_cb = {
+ .clk_setup = wcd934x_mbhc_clk_setup,
+ .mbhc_bias = wcd934x_mbhc_mbhc_bias_control,
+ .set_btn_thr = wcd934x_mbhc_program_btn_thr,
+ .micbias_enable_status = wcd934x_mbhc_micb_en_status,
+ .hph_pull_up_control = wcd934x_mbhc_hph_l_pull_up_control,
+ .mbhc_micbias_control = wcd934x_mbhc_request_micbias,
+ .mbhc_micb_ramp_control = wcd934x_mbhc_micb_ramp_control,
+ .mbhc_micb_ctrl_thr_mic = wcd934x_mbhc_micb_ctrl_threshold_mic,
+ .compute_impedance = wcd934x_wcd_mbhc_calc_impedance,
+ .mbhc_gnd_det_ctrl = wcd934x_mbhc_gnd_det_ctrl,
+ .hph_pull_down_ctrl = wcd934x_mbhc_hph_pull_down_ctrl,
+};
+
+static int wcd934x_get_hph_type(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd934x_codec *wcd = snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.integer.value[0] = wcd_mbhc_get_hph_type(wcd->mbhc);
+
+ return 0;
+}
+
+static int wcd934x_hph_impedance_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ uint32_t zl, zr;
+ bool hphr;
+ struct soc_mixer_control *mc;
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd934x_codec *wcd = snd_soc_component_get_drvdata(component);
+
+ mc = (struct soc_mixer_control *)(kcontrol->private_value);
+ hphr = mc->shift;
+ wcd_mbhc_get_impedance(wcd->mbhc, &zl, &zr);
+ dev_dbg(component->dev, "%s: zl=%u(ohms), zr=%u(ohms)\n", __func__, zl, zr);
+ ucontrol->value.integer.value[0] = hphr ? zr : zl;
+
+ return 0;
+}
+static const struct snd_kcontrol_new hph_type_detect_controls[] = {
+ SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0,
+ wcd934x_get_hph_type, NULL),
+};
+
+static const struct snd_kcontrol_new impedance_detect_controls[] = {
+ SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
+ wcd934x_hph_impedance_get, NULL),
+ SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
+ wcd934x_hph_impedance_get, NULL),
+};
+
+static int wcd934x_mbhc_init(struct snd_soc_component *component)
+{
+ struct wcd934x_ddata *data = dev_get_drvdata(component->dev->parent);
+ struct wcd934x_codec *wcd = snd_soc_component_get_drvdata(component);
+ struct wcd_mbhc_intr *intr_ids = &wcd->intr_ids;
+
+ intr_ids->mbhc_sw_intr = regmap_irq_get_virq(data->irq_data,
+ WCD934X_IRQ_MBHC_SW_DET);
+ intr_ids->mbhc_btn_press_intr = regmap_irq_get_virq(data->irq_data,
+ WCD934X_IRQ_MBHC_BUTTON_PRESS_DET);
+ intr_ids->mbhc_btn_release_intr = regmap_irq_get_virq(data->irq_data,
+ WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET);
+ intr_ids->mbhc_hs_ins_intr = regmap_irq_get_virq(data->irq_data,
+ WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET);
+ intr_ids->mbhc_hs_rem_intr = regmap_irq_get_virq(data->irq_data,
+ WCD934X_IRQ_MBHC_ELECT_INS_REM_DET);
+ intr_ids->hph_left_ocp = regmap_irq_get_virq(data->irq_data,
+ WCD934X_IRQ_HPH_PA_OCPL_FAULT);
+ intr_ids->hph_right_ocp = regmap_irq_get_virq(data->irq_data,
+ WCD934X_IRQ_HPH_PA_OCPR_FAULT);
+
+ wcd->mbhc = wcd_mbhc_init(component, &mbhc_cb, intr_ids, wcd_mbhc_fields, true);
+ if (IS_ERR(wcd->mbhc)) {
+ wcd->mbhc = NULL;
+ return -EINVAL;
+ }
+
+ snd_soc_add_component_controls(component, impedance_detect_controls,
+ ARRAY_SIZE(impedance_detect_controls));
+ snd_soc_add_component_controls(component, hph_type_detect_controls,
+ ARRAY_SIZE(hph_type_detect_controls));
+
+ return 0;
+}
static int wcd934x_comp_probe(struct snd_soc_component *component)
{
struct wcd934x_codec *wcd = dev_get_drvdata(component->dev);
@@ -2309,6 +3091,10 @@ static int wcd934x_comp_probe(struct snd_soc_component *component)
INIT_LIST_HEAD(&wcd->dai[i].slim_ch_list);
wcd934x_init_dmic(component);
+
+ if (wcd934x_mbhc_init(component))
+ dev_err(component->dev, "Failed to Initialize MBHC\n");
+
return 0;
}
@@ -3756,6 +4542,7 @@ static int wcd934x_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
int event)
{
struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ struct wcd934x_codec *wcd = snd_soc_component_get_drvdata(comp);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -3788,6 +4575,7 @@ static int wcd934x_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
WCD934X_CDC_RX_PGA_MUTE_EN_MASK, 0x00);
break;
case SND_SOC_DAPM_PRE_PMD:
+ wcd_mbhc_event_notify(wcd->mbhc, WCD_EVENT_POST_HPHL_PA_OFF);
/* Enable DSD Mute before PA disable */
snd_soc_component_update_bits(comp, WCD934X_HPH_L_TEST,
WCD934X_HPH_OCP_DET_MASK,
@@ -3806,6 +4594,7 @@ static int wcd934x_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
* disabled, then 20ms delay is needed after PA disable.
*/
usleep_range(20000, 20100);
+ wcd_mbhc_event_notify(wcd->mbhc, WCD_EVENT_POST_HPHL_PA_OFF);
break;
}
@@ -3817,6 +4606,7 @@ static int wcd934x_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
int event)
{
struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ struct wcd934x_codec *wcd = snd_soc_component_get_drvdata(comp);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -3851,6 +4641,7 @@ static int wcd934x_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
WCD934X_CDC_RX_PGA_MUTE_DISABLE);
break;
case SND_SOC_DAPM_PRE_PMD:
+ wcd_mbhc_event_notify(wcd->mbhc, WCD_EVENT_PRE_HPHR_PA_OFF);
snd_soc_component_update_bits(comp, WCD934X_HPH_R_TEST,
WCD934X_HPH_OCP_DET_MASK,
WCD934X_HPH_OCP_DET_DISABLE);
@@ -3868,6 +4659,7 @@ static int wcd934x_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
* disabled, then 20ms delay is needed after PA disable.
*/
usleep_range(20000, 20100);
+ wcd_mbhc_event_notify(wcd->mbhc, WCD_EVENT_POST_HPHR_PA_OFF);
break;
}
@@ -4323,6 +5115,29 @@ static int wcd934x_codec_enable_adc(struct snd_soc_dapm_widget *w,
return 0;
}
+static int wcd934x_codec_enable_micbias(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ int micb_num = w->shift;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ wcd934x_micbias_control(component, micb_num, MICB_ENABLE, true);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* 1 msec delay as per HW requirement */
+ usleep_range(1000, 1100);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ wcd934x_micbias_control(component, micb_num, MICB_DISABLE, true);
+ break;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget wcd934x_dapm_widgets[] = {
/* Analog Outputs */
SND_SOC_DAPM_OUTPUT("EAR"),
@@ -4778,13 +5593,17 @@ static const struct snd_soc_dapm_widget wcd934x_dapm_widgets[] = {
wcd934x_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
SND_SOC_DAPM_ADC_E("ADC4", NULL, WCD934X_ANA_AMIC4, 7, 0,
wcd934x_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
- SND_SOC_DAPM_SUPPLY("MIC BIAS1", WCD934X_ANA_MICB1, 6, 0, NULL,
+ SND_SOC_DAPM_SUPPLY("MIC BIAS1", SND_SOC_NOPM, MIC_BIAS_1, 0,
+ wcd934x_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SUPPLY("MIC BIAS2", WCD934X_ANA_MICB2, 6, 0, NULL,
+ SND_SOC_DAPM_SUPPLY("MIC BIAS2", SND_SOC_NOPM, MIC_BIAS_2, 0,
+ wcd934x_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SUPPLY("MIC BIAS3", WCD934X_ANA_MICB3, 6, 0, NULL,
+ SND_SOC_DAPM_SUPPLY("MIC BIAS3", SND_SOC_NOPM, MIC_BIAS_3, 0,
+ wcd934x_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SUPPLY("MIC BIAS4", WCD934X_ANA_MICB4, 6, 0, NULL,
+ SND_SOC_DAPM_SUPPLY("MIC BIAS4", SND_SOC_NOPM, MIC_BIAS_4, 0,
+ wcd934x_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX("AMIC4_5 SEL", SND_SOC_NOPM, 0, 0, &tx_amic4_5),
@@ -4961,6 +5780,26 @@ static const struct snd_soc_dapm_route wcd934x_audio_map[] = {
{"SRC1", NULL, "IIR1"},
};
+static int wcd934x_codec_set_jack(struct snd_soc_component *comp,
+ struct snd_soc_jack *jack, void *data)
+{
+ struct wcd934x_codec *wcd = dev_get_drvdata(comp->dev);
+ int ret = 0;
+
+ if (!wcd->mbhc)
+ return -ENOTSUPP;
+
+ if (jack && !wcd->mbhc_started) {
+ ret = wcd_mbhc_start(wcd->mbhc, &wcd->mbhc_cfg, jack);
+ wcd->mbhc_started = true;
+ } else if (wcd->mbhc_started) {
+ wcd_mbhc_stop(wcd->mbhc);
+ wcd->mbhc_started = false;
+ }
+
+ return ret;
+}
+
static const struct snd_soc_component_driver wcd934x_component_drv = {
.probe = wcd934x_comp_probe,
.remove = wcd934x_comp_remove,
@@ -4971,11 +5810,13 @@ static const struct snd_soc_component_driver wcd934x_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(wcd934x_dapm_widgets),
.dapm_routes = wcd934x_audio_map,
.num_dapm_routes = ARRAY_SIZE(wcd934x_audio_map),
+ .set_jack = wcd934x_codec_set_jack,
};
static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd)
{
struct device *dev = &wcd->sdev->dev;
+ struct wcd_mbhc_config *cfg = &wcd->mbhc_cfg;
struct device_node *ifc_dev_np;
ifc_dev_np = of_parse_phandle(dev->of_node, "slim-ifc-dev", 0);
@@ -5001,6 +5842,18 @@ static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd)
of_property_read_u32(dev->parent->of_node, "qcom,dmic-sample-rate",
&wcd->dmic_sample_rate);
+ cfg->mbhc_micbias = MIC_BIAS_2;
+ cfg->anc_micbias = MIC_BIAS_2;
+ cfg->v_hs_max = WCD_MBHC_HS_V_MAX;
+ cfg->num_btn = WCD934X_MBHC_MAX_BUTTONS;
+ cfg->micb_mv = wcd->micb2_mv;
+ cfg->linein_th = 5000;
+ cfg->hs_thr = 1700;
+ cfg->hph_thr = 50;
+
+ wcd_dt_parse_mbhc_data(dev, cfg);
+
+
return 0;
}
@@ -5020,6 +5873,7 @@ static int wcd934x_codec_probe(struct platform_device *pdev)
wcd->extclk = data->extclk;
wcd->sdev = to_slim_device(data->dev);
mutex_init(&wcd->sysclk_mutex);
+ mutex_init(&wcd->micb_lock);
ret = wcd934x_codec_parse_data(wcd);
if (ret) {
diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
new file mode 100644
index 000000000000..1fa05ec7459a
--- /dev/null
+++ b/sound/soc/codecs/wcd938x-sdw.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2021, Linaro Limited
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/component.h>
+#include <linux/pm_runtime.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include "wcd938x.h"
+
+#define SWRS_SCP_HOST_CLK_DIV2_CTL_BANK(m) (0xE0 + 0x10 * (m))
+
+static struct wcd938x_sdw_ch_info wcd938x_sdw_rx_ch_info[] = {
+ WCD_SDW_CH(WCD938X_HPH_L, WCD938X_HPH_PORT, BIT(0)),
+ WCD_SDW_CH(WCD938X_HPH_R, WCD938X_HPH_PORT, BIT(1)),
+ WCD_SDW_CH(WCD938X_CLSH, WCD938X_CLSH_PORT, BIT(0)),
+ WCD_SDW_CH(WCD938X_COMP_L, WCD938X_COMP_PORT, BIT(0)),
+ WCD_SDW_CH(WCD938X_COMP_R, WCD938X_COMP_PORT, BIT(1)),
+ WCD_SDW_CH(WCD938X_LO, WCD938X_LO_PORT, BIT(0)),
+ WCD_SDW_CH(WCD938X_DSD_L, WCD938X_DSD_PORT, BIT(0)),
+ WCD_SDW_CH(WCD938X_DSD_R, WCD938X_DSD_PORT, BIT(1)),
+};
+
+static struct wcd938x_sdw_ch_info wcd938x_sdw_tx_ch_info[] = {
+ WCD_SDW_CH(WCD938X_ADC1, WCD938X_ADC_1_2_PORT, BIT(0)),
+ WCD_SDW_CH(WCD938X_ADC2, WCD938X_ADC_1_2_PORT, BIT(1)),
+ WCD_SDW_CH(WCD938X_ADC3, WCD938X_ADC_3_4_PORT, BIT(0)),
+ WCD_SDW_CH(WCD938X_ADC4, WCD938X_ADC_3_4_PORT, BIT(1)),
+ WCD_SDW_CH(WCD938X_DMIC0, WCD938X_DMIC_0_3_MBHC_PORT, BIT(0)),
+ WCD_SDW_CH(WCD938X_DMIC1, WCD938X_DMIC_0_3_MBHC_PORT, BIT(1)),
+ WCD_SDW_CH(WCD938X_MBHC, WCD938X_DMIC_0_3_MBHC_PORT, BIT(2)),
+ WCD_SDW_CH(WCD938X_DMIC2, WCD938X_DMIC_0_3_MBHC_PORT, BIT(2)),
+ WCD_SDW_CH(WCD938X_DMIC3, WCD938X_DMIC_0_3_MBHC_PORT, BIT(3)),
+ WCD_SDW_CH(WCD938X_DMIC4, WCD938X_DMIC_4_7_PORT, BIT(0)),
+ WCD_SDW_CH(WCD938X_DMIC5, WCD938X_DMIC_4_7_PORT, BIT(1)),
+ WCD_SDW_CH(WCD938X_DMIC6, WCD938X_DMIC_4_7_PORT, BIT(2)),
+ WCD_SDW_CH(WCD938X_DMIC7, WCD938X_DMIC_4_7_PORT, BIT(3)),
+};
+
+static struct sdw_dpn_prop wcd938x_dpn_prop[WCD938X_MAX_SWR_PORTS] = {
+ {
+ .num = 1,
+ .type = SDW_DPN_SIMPLE,
+ .min_ch = 1,
+ .max_ch = 8,
+ .simple_ch_prep_sm = true,
+ }, {
+ .num = 2,
+ .type = SDW_DPN_SIMPLE,
+ .min_ch = 1,
+ .max_ch = 4,
+ .simple_ch_prep_sm = true,
+ }, {
+ .num = 3,
+ .type = SDW_DPN_SIMPLE,
+ .min_ch = 1,
+ .max_ch = 4,
+ .simple_ch_prep_sm = true,
+ }, {
+ .num = 4,
+ .type = SDW_DPN_SIMPLE,
+ .min_ch = 1,
+ .max_ch = 4,
+ .simple_ch_prep_sm = true,
+ }, {
+ .num = 5,
+ .type = SDW_DPN_SIMPLE,
+ .min_ch = 1,
+ .max_ch = 4,
+ .simple_ch_prep_sm = true,
+ }
+};
+
+struct device *wcd938x_sdw_device_get(struct device_node *np)
+{
+ return bus_find_device_by_of_node(&sdw_bus_type, np);
+
+}
+EXPORT_SYMBOL_GPL(wcd938x_sdw_device_get);
+
+int wcd938x_swr_get_current_bank(struct sdw_slave *sdev)
+{
+ int bank;
+
+ bank = sdw_read(sdev, SDW_SCP_CTRL);
+
+ return ((bank & 0x40) ? 1 : 0);
+}
+EXPORT_SYMBOL_GPL(wcd938x_swr_get_current_bank);
+
+int wcd938x_sdw_hw_params(struct wcd938x_sdw_priv *wcd,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct sdw_port_config port_config[WCD938X_MAX_SWR_PORTS];
+ unsigned long ch_mask;
+ int i, j;
+
+ wcd->sconfig.ch_count = 1;
+ wcd->active_ports = 0;
+ for (i = 0; i < WCD938X_MAX_SWR_PORTS; i++) {
+ ch_mask = wcd->port_config[i].ch_mask;
+
+ if (!ch_mask)
+ continue;
+
+ for_each_set_bit(j, &ch_mask, 4)
+ wcd->sconfig.ch_count++;
+
+ port_config[wcd->active_ports] = wcd->port_config[i];
+ wcd->active_ports++;
+ }
+
+ wcd->sconfig.bps = 1;
+ wcd->sconfig.frame_rate = params_rate(params);
+ if (wcd->is_tx)
+ wcd->sconfig.direction = SDW_DATA_DIR_TX;
+ else
+ wcd->sconfig.direction = SDW_DATA_DIR_RX;
+
+ wcd->sconfig.type = SDW_STREAM_PCM;
+
+ return sdw_stream_add_slave(wcd->sdev, &wcd->sconfig,
+ &port_config[0], wcd->active_ports,
+ wcd->sruntime);
+}
+EXPORT_SYMBOL_GPL(wcd938x_sdw_hw_params);
+
+int wcd938x_sdw_free(struct wcd938x_sdw_priv *wcd,
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ sdw_stream_remove_slave(wcd->sdev, wcd->sruntime);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wcd938x_sdw_free);
+
+int wcd938x_sdw_set_sdw_stream(struct wcd938x_sdw_priv *wcd,
+ struct snd_soc_dai *dai,
+ void *stream, int direction)
+{
+ wcd->sruntime = stream;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wcd938x_sdw_set_sdw_stream);
+
+static int wcd9380_update_status(struct sdw_slave *slave,
+ enum sdw_slave_status status)
+{
+ return 0;
+}
+
+static int wcd9380_bus_config(struct sdw_slave *slave,
+ struct sdw_bus_params *params)
+{
+ sdw_write(slave, SWRS_SCP_HOST_CLK_DIV2_CTL_BANK(params->next_bank), 0x01);
+
+ return 0;
+}
+
+static int wcd9380_interrupt_callback(struct sdw_slave *slave,
+ struct sdw_slave_intr_status *status)
+{
+ struct wcd938x_sdw_priv *wcd = dev_get_drvdata(&slave->dev);
+ struct irq_domain *slave_irq = wcd->slave_irq;
+ struct regmap *regmap = dev_get_regmap(&slave->dev, NULL);
+ u32 sts1, sts2, sts3;
+
+ do {
+ handle_nested_irq(irq_find_mapping(slave_irq, 0));
+ regmap_read(regmap, WCD938X_DIGITAL_INTR_STATUS_0, &sts1);
+ regmap_read(regmap, WCD938X_DIGITAL_INTR_STATUS_1, &sts2);
+ regmap_read(regmap, WCD938X_DIGITAL_INTR_STATUS_2, &sts3);
+
+ } while (sts1 || sts2 || sts3);
+
+ return IRQ_HANDLED;
+}
+
+static struct sdw_slave_ops wcd9380_slave_ops = {
+ .update_status = wcd9380_update_status,
+ .interrupt_callback = wcd9380_interrupt_callback,
+ .bus_config = wcd9380_bus_config,
+};
+
+static int wcd938x_sdw_component_bind(struct device *dev,
+ struct device *master, void *data)
+{
+ return 0;
+}
+
+static void wcd938x_sdw_component_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+}
+
+static const struct component_ops wcd938x_sdw_component_ops = {
+ .bind = wcd938x_sdw_component_bind,
+ .unbind = wcd938x_sdw_component_unbind,
+};
+
+static int wcd9380_probe(struct sdw_slave *pdev,
+ const struct sdw_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct wcd938x_sdw_priv *wcd;
+ int ret;
+
+ wcd = devm_kzalloc(dev, sizeof(*wcd), GFP_KERNEL);
+ if (!wcd)
+ return -ENOMEM;
+
+ /**
+ * Port map index starts with 0, however the data port for this codec
+ * are from index 1
+ */
+ if (of_property_read_bool(dev->of_node, "qcom,tx-port-mapping")) {
+ wcd->is_tx = true;
+ ret = of_property_read_u32_array(dev->of_node, "qcom,tx-port-mapping",
+ &pdev->m_port_map[1],
+ WCD938X_MAX_TX_SWR_PORTS);
+ } else {
+ ret = of_property_read_u32_array(dev->of_node, "qcom,rx-port-mapping",
+ &pdev->m_port_map[1],
+ WCD938X_MAX_SWR_PORTS);
+ }
+
+ if (ret < 0)
+ dev_info(dev, "Static Port mapping not specified\n");
+
+ wcd->sdev = pdev;
+ dev_set_drvdata(dev, wcd);
+
+ pdev->prop.scp_int1_mask = SDW_SCP_INT1_IMPL_DEF |
+ SDW_SCP_INT1_BUS_CLASH |
+ SDW_SCP_INT1_PARITY;
+ pdev->prop.lane_control_support = true;
+ if (wcd->is_tx) {
+ pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0);
+ pdev->prop.src_dpn_prop = wcd938x_dpn_prop;
+ wcd->ch_info = &wcd938x_sdw_tx_ch_info[0];
+ pdev->prop.wake_capable = true;
+ } else {
+ pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0);
+ pdev->prop.sink_dpn_prop = wcd938x_dpn_prop;
+ wcd->ch_info = &wcd938x_sdw_rx_ch_info[0];
+ }
+
+ pm_runtime_set_autosuspend_delay(dev, 3000);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return component_add(dev, &wcd938x_sdw_component_ops);
+}
+
+static const struct sdw_device_id wcd9380_slave_id[] = {
+ SDW_SLAVE_ENTRY(0x0217, 0x10d, 0),
+ {},
+};
+MODULE_DEVICE_TABLE(sdw, wcd9380_slave_id);
+
+static int __maybe_unused wcd938x_sdw_runtime_suspend(struct device *dev)
+{
+ struct regmap *regmap = dev_get_regmap(dev, NULL);
+
+ if (regmap) {
+ regcache_cache_only(regmap, true);
+ regcache_mark_dirty(regmap);
+ }
+ return 0;
+}
+
+static int __maybe_unused wcd938x_sdw_runtime_resume(struct device *dev)
+{
+ struct regmap *regmap = dev_get_regmap(dev, NULL);
+
+ if (regmap) {
+ regcache_cache_only(regmap, false);
+ regcache_sync(regmap);
+ }
+
+ pm_runtime_mark_last_busy(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops wcd938x_sdw_pm_ops = {
+ SET_RUNTIME_PM_OPS(wcd938x_sdw_runtime_suspend, wcd938x_sdw_runtime_resume, NULL)
+};
+
+
+static struct sdw_driver wcd9380_codec_driver = {
+ .probe = wcd9380_probe,
+ .ops = &wcd9380_slave_ops,
+ .id_table = wcd9380_slave_id,
+ .driver = {
+ .name = "wcd9380-codec",
+ .pm = &wcd938x_sdw_pm_ops,
+ }
+};
+module_sdw_driver(wcd9380_codec_driver);
+
+MODULE_DESCRIPTION("WCD938X SDW codec driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
new file mode 100644
index 000000000000..2fcc97370be2
--- /dev/null
+++ b/sound/soc/codecs/wcd938x.c
@@ -0,0 +1,3737 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/component.h>
+#include <sound/tlv.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <linux/regulator/consumer.h>
+
+#include "wcd-clsh-v2.h"
+#include "wcd938x.h"
+
+#define WCD938X_MAX_MICBIAS (4)
+#define WCD938X_MAX_SUPPLY (4)
+#define WCD938X_MBHC_MAX_BUTTONS (8)
+#define TX_ADC_MAX (4)
+#define WCD938X_TX_MAX_SWR_PORTS (5)
+
+#define WCD938X_RATES_MASK (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
+/* Fractional Rates */
+#define WCD938X_FRAC_RATES_MASK (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_176400)
+#define WCD938X_FORMATS_S16_S24_LE (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+/* Convert from vout ctl to micbias voltage in mV */
+#define WCD_VOUT_CTL_TO_MICB(v) (1000 + v * 50)
+#define SWR_CLK_RATE_0P6MHZ (600000)
+#define SWR_CLK_RATE_1P2MHZ (1200000)
+#define SWR_CLK_RATE_2P4MHZ (2400000)
+#define SWR_CLK_RATE_4P8MHZ (4800000)
+#define SWR_CLK_RATE_9P6MHZ (9600000)
+#define SWR_CLK_RATE_11P2896MHZ (1128960)
+
+#define WCD938X_DRV_NAME "wcd938x_codec"
+#define WCD938X_VERSION_1_0 (1)
+#define EAR_RX_PATH_AUX (1)
+
+#define ADC_MODE_VAL_HIFI 0x01
+#define ADC_MODE_VAL_LO_HIF 0x02
+#define ADC_MODE_VAL_NORMAL 0x03
+#define ADC_MODE_VAL_LP 0x05
+#define ADC_MODE_VAL_ULP1 0x09
+#define ADC_MODE_VAL_ULP2 0x0B
+
+/* Z value defined in milliohm */
+#define WCD938X_ZDET_VAL_32 (32000)
+#define WCD938X_ZDET_VAL_400 (400000)
+#define WCD938X_ZDET_VAL_1200 (1200000)
+#define WCD938X_ZDET_VAL_100K (100000000)
+/* Z floating defined in ohms */
+#define WCD938X_ZDET_FLOATING_IMPEDANCE (0x0FFFFFFE)
+#define WCD938X_ZDET_NUM_MEASUREMENTS (900)
+#define WCD938X_MBHC_GET_C1(c) ((c & 0xC000) >> 14)
+#define WCD938X_MBHC_GET_X1(x) (x & 0x3FFF)
+/* Z value compared in milliOhm */
+#define WCD938X_MBHC_IS_SECOND_RAMP_REQUIRED(z) ((z > 400000) || (z < 32000))
+#define WCD938X_MBHC_ZDET_CONST (86 * 16384)
+#define WCD938X_MBHC_MOISTURE_RREF R_24_KOHM
+#define WCD_MBHC_HS_V_MAX 1600
+
+#define WCD938X_EAR_PA_GAIN_TLV(xname, reg, shift, max, invert, tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
+ SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
+ .put = wcd938x_ear_pa_put_gain, \
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+
+enum {
+ WCD9380 = 0,
+ WCD9385 = 5,
+};
+
+enum {
+ TX_HDR12 = 0,
+ TX_HDR34,
+ TX_HDR_MAX,
+};
+
+enum {
+ WCD_RX1,
+ WCD_RX2,
+ WCD_RX3
+};
+
+enum {
+ /* INTR_CTRL_INT_MASK_0 */
+ WCD938X_IRQ_MBHC_BUTTON_PRESS_DET = 0,
+ WCD938X_IRQ_MBHC_BUTTON_RELEASE_DET,
+ WCD938X_IRQ_MBHC_ELECT_INS_REM_DET,
+ WCD938X_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
+ WCD938X_IRQ_MBHC_SW_DET,
+ WCD938X_IRQ_HPHR_OCP_INT,
+ WCD938X_IRQ_HPHR_CNP_INT,
+ WCD938X_IRQ_HPHL_OCP_INT,
+
+ /* INTR_CTRL_INT_MASK_1 */
+ WCD938X_IRQ_HPHL_CNP_INT,
+ WCD938X_IRQ_EAR_CNP_INT,
+ WCD938X_IRQ_EAR_SCD_INT,
+ WCD938X_IRQ_AUX_CNP_INT,
+ WCD938X_IRQ_AUX_SCD_INT,
+ WCD938X_IRQ_HPHL_PDM_WD_INT,
+ WCD938X_IRQ_HPHR_PDM_WD_INT,
+ WCD938X_IRQ_AUX_PDM_WD_INT,
+
+ /* INTR_CTRL_INT_MASK_2 */
+ WCD938X_IRQ_LDORT_SCD_INT,
+ WCD938X_IRQ_MBHC_MOISTURE_INT,
+ WCD938X_IRQ_HPHL_SURGE_DET_INT,
+ WCD938X_IRQ_HPHR_SURGE_DET_INT,
+ WCD938X_NUM_IRQS,
+};
+
+enum {
+ WCD_ADC1 = 0,
+ WCD_ADC2,
+ WCD_ADC3,
+ WCD_ADC4,
+ ALLOW_BUCK_DISABLE,
+ HPH_COMP_DELAY,
+ HPH_PA_DELAY,
+ AMIC2_BCS_ENABLE,
+ WCD_SUPPLIES_LPM_MODE,
+};
+
+enum {
+ ADC_MODE_INVALID = 0,
+ ADC_MODE_HIFI,
+ ADC_MODE_LO_HIF,
+ ADC_MODE_NORMAL,
+ ADC_MODE_LP,
+ ADC_MODE_ULP1,
+ ADC_MODE_ULP2,
+};
+
+enum {
+ AIF1_PB = 0,
+ AIF1_CAP,
+ NUM_CODEC_DAIS,
+};
+
+static u8 tx_mode_bit[] = {
+ [ADC_MODE_INVALID] = 0x00,
+ [ADC_MODE_HIFI] = 0x01,
+ [ADC_MODE_LO_HIF] = 0x02,
+ [ADC_MODE_NORMAL] = 0x04,
+ [ADC_MODE_LP] = 0x08,
+ [ADC_MODE_ULP1] = 0x10,
+ [ADC_MODE_ULP2] = 0x20,
+};
+
+struct wcd938x_priv {
+ struct sdw_slave *tx_sdw_dev;
+ struct wcd938x_sdw_priv *sdw_priv[NUM_CODEC_DAIS];
+ struct device *txdev;
+ struct device *rxdev;
+ struct device_node *rxnode, *txnode;
+ struct regmap *regmap;
+ struct wcd_clsh_ctrl *clsh_info;
+ struct irq_domain *virq;
+ struct regmap_irq_chip *wcd_regmap_irq_chip;
+ struct regmap_irq_chip_data *irq_chip;
+ struct regulator_bulk_data supplies[WCD938X_MAX_SUPPLY];
+ struct snd_soc_jack *jack;
+ unsigned long status_mask;
+ s32 micb_ref[WCD938X_MAX_MICBIAS];
+ s32 pullup_ref[WCD938X_MAX_MICBIAS];
+ u32 hph_mode;
+ u32 tx_mode[TX_ADC_MAX];
+ int flyback_cur_det_disable;
+ int ear_rx_path;
+ int variant;
+ int reset_gpio;
+ u32 micb1_mv;
+ u32 micb2_mv;
+ u32 micb3_mv;
+ u32 micb4_mv;
+ int hphr_pdm_wd_int;
+ int hphl_pdm_wd_int;
+ int aux_pdm_wd_int;
+ bool comp1_enable;
+ bool comp2_enable;
+ bool ldoh;
+ bool bcs_dis;
+};
+
+enum {
+ MIC_BIAS_1 = 1,
+ MIC_BIAS_2,
+ MIC_BIAS_3,
+ MIC_BIAS_4
+};
+
+enum {
+ MICB_PULLUP_ENABLE,
+ MICB_PULLUP_DISABLE,
+ MICB_ENABLE,
+ MICB_DISABLE,
+};
+
+static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(ear_pa_gain, 600, -1800);
+static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(line_gain, 600, -3000);
+static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(analog_gain, 0, 3000);
+
+static const struct reg_default wcd938x_defaults[] = {
+ {WCD938X_ANA_PAGE_REGISTER, 0x00},
+ {WCD938X_ANA_BIAS, 0x00},
+ {WCD938X_ANA_RX_SUPPLIES, 0x00},
+ {WCD938X_ANA_HPH, 0x0C},
+ {WCD938X_ANA_EAR, 0x00},
+ {WCD938X_ANA_EAR_COMPANDER_CTL, 0x02},
+ {WCD938X_ANA_TX_CH1, 0x20},
+ {WCD938X_ANA_TX_CH2, 0x00},
+ {WCD938X_ANA_TX_CH3, 0x20},
+ {WCD938X_ANA_TX_CH4, 0x00},
+ {WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC, 0x00},
+ {WCD938X_ANA_MICB3_DSP_EN_LOGIC, 0x00},
+ {WCD938X_ANA_MBHC_MECH, 0x39},
+ {WCD938X_ANA_MBHC_ELECT, 0x08},
+ {WCD938X_ANA_MBHC_ZDET, 0x00},
+ {WCD938X_ANA_MBHC_RESULT_1, 0x00},
+ {WCD938X_ANA_MBHC_RESULT_2, 0x00},
+ {WCD938X_ANA_MBHC_RESULT_3, 0x00},
+ {WCD938X_ANA_MBHC_BTN0, 0x00},
+ {WCD938X_ANA_MBHC_BTN1, 0x10},
+ {WCD938X_ANA_MBHC_BTN2, 0x20},
+ {WCD938X_ANA_MBHC_BTN3, 0x30},
+ {WCD938X_ANA_MBHC_BTN4, 0x40},
+ {WCD938X_ANA_MBHC_BTN5, 0x50},
+ {WCD938X_ANA_MBHC_BTN6, 0x60},
+ {WCD938X_ANA_MBHC_BTN7, 0x70},
+ {WCD938X_ANA_MICB1, 0x10},
+ {WCD938X_ANA_MICB2, 0x10},
+ {WCD938X_ANA_MICB2_RAMP, 0x00},
+ {WCD938X_ANA_MICB3, 0x10},
+ {WCD938X_ANA_MICB4, 0x10},
+ {WCD938X_BIAS_CTL, 0x2A},
+ {WCD938X_BIAS_VBG_FINE_ADJ, 0x55},
+ {WCD938X_LDOL_VDDCX_ADJUST, 0x01},
+ {WCD938X_LDOL_DISABLE_LDOL, 0x00},
+ {WCD938X_MBHC_CTL_CLK, 0x00},
+ {WCD938X_MBHC_CTL_ANA, 0x00},
+ {WCD938X_MBHC_CTL_SPARE_1, 0x00},
+ {WCD938X_MBHC_CTL_SPARE_2, 0x00},
+ {WCD938X_MBHC_CTL_BCS, 0x00},
+ {WCD938X_MBHC_MOISTURE_DET_FSM_STATUS, 0x00},
+ {WCD938X_MBHC_TEST_CTL, 0x00},
+ {WCD938X_LDOH_MODE, 0x2B},
+ {WCD938X_LDOH_BIAS, 0x68},
+ {WCD938X_LDOH_STB_LOADS, 0x00},
+ {WCD938X_LDOH_SLOWRAMP, 0x50},
+ {WCD938X_MICB1_TEST_CTL_1, 0x1A},
+ {WCD938X_MICB1_TEST_CTL_2, 0x00},
+ {WCD938X_MICB1_TEST_CTL_3, 0xA4},
+ {WCD938X_MICB2_TEST_CTL_1, 0x1A},
+ {WCD938X_MICB2_TEST_CTL_2, 0x00},
+ {WCD938X_MICB2_TEST_CTL_3, 0x24},
+ {WCD938X_MICB3_TEST_CTL_1, 0x1A},
+ {WCD938X_MICB3_TEST_CTL_2, 0x00},
+ {WCD938X_MICB3_TEST_CTL_3, 0xA4},
+ {WCD938X_MICB4_TEST_CTL_1, 0x1A},
+ {WCD938X_MICB4_TEST_CTL_2, 0x00},
+ {WCD938X_MICB4_TEST_CTL_3, 0xA4},
+ {WCD938X_TX_COM_ADC_VCM, 0x39},
+ {WCD938X_TX_COM_BIAS_ATEST, 0xE0},
+ {WCD938X_TX_COM_SPARE1, 0x00},
+ {WCD938X_TX_COM_SPARE2, 0x00},
+ {WCD938X_TX_COM_TXFE_DIV_CTL, 0x22},
+ {WCD938X_TX_COM_TXFE_DIV_START, 0x00},
+ {WCD938X_TX_COM_SPARE3, 0x00},
+ {WCD938X_TX_COM_SPARE4, 0x00},
+ {WCD938X_TX_1_2_TEST_EN, 0xCC},
+ {WCD938X_TX_1_2_ADC_IB, 0xE9},
+ {WCD938X_TX_1_2_ATEST_REFCTL, 0x0A},
+ {WCD938X_TX_1_2_TEST_CTL, 0x38},
+ {WCD938X_TX_1_2_TEST_BLK_EN1, 0xFF},
+ {WCD938X_TX_1_2_TXFE1_CLKDIV, 0x00},
+ {WCD938X_TX_1_2_SAR2_ERR, 0x00},
+ {WCD938X_TX_1_2_SAR1_ERR, 0x00},
+ {WCD938X_TX_3_4_TEST_EN, 0xCC},
+ {WCD938X_TX_3_4_ADC_IB, 0xE9},
+ {WCD938X_TX_3_4_ATEST_REFCTL, 0x0A},
+ {WCD938X_TX_3_4_TEST_CTL, 0x38},
+ {WCD938X_TX_3_4_TEST_BLK_EN3, 0xFF},
+ {WCD938X_TX_3_4_TXFE3_CLKDIV, 0x00},
+ {WCD938X_TX_3_4_SAR4_ERR, 0x00},
+ {WCD938X_TX_3_4_SAR3_ERR, 0x00},
+ {WCD938X_TX_3_4_TEST_BLK_EN2, 0xFB},
+ {WCD938X_TX_3_4_TXFE2_CLKDIV, 0x00},
+ {WCD938X_TX_3_4_SPARE1, 0x00},
+ {WCD938X_TX_3_4_TEST_BLK_EN4, 0xFB},
+ {WCD938X_TX_3_4_TXFE4_CLKDIV, 0x00},
+ {WCD938X_TX_3_4_SPARE2, 0x00},
+ {WCD938X_CLASSH_MODE_1, 0x40},
+ {WCD938X_CLASSH_MODE_2, 0x3A},
+ {WCD938X_CLASSH_MODE_3, 0x00},
+ {WCD938X_CLASSH_CTRL_VCL_1, 0x70},
+ {WCD938X_CLASSH_CTRL_VCL_2, 0x82},
+ {WCD938X_CLASSH_CTRL_CCL_1, 0x31},
+ {WCD938X_CLASSH_CTRL_CCL_2, 0x80},
+ {WCD938X_CLASSH_CTRL_CCL_3, 0x80},
+ {WCD938X_CLASSH_CTRL_CCL_4, 0x51},
+ {WCD938X_CLASSH_CTRL_CCL_5, 0x00},
+ {WCD938X_CLASSH_BUCK_TMUX_A_D, 0x00},
+ {WCD938X_CLASSH_BUCK_SW_DRV_CNTL, 0x77},
+ {WCD938X_CLASSH_SPARE, 0x00},
+ {WCD938X_FLYBACK_EN, 0x4E},
+ {WCD938X_FLYBACK_VNEG_CTRL_1, 0x0B},
+ {WCD938X_FLYBACK_VNEG_CTRL_2, 0x45},
+ {WCD938X_FLYBACK_VNEG_CTRL_3, 0x74},
+ {WCD938X_FLYBACK_VNEG_CTRL_4, 0x7F},
+ {WCD938X_FLYBACK_VNEG_CTRL_5, 0x83},
+ {WCD938X_FLYBACK_VNEG_CTRL_6, 0x98},
+ {WCD938X_FLYBACK_VNEG_CTRL_7, 0xA9},
+ {WCD938X_FLYBACK_VNEG_CTRL_8, 0x68},
+ {WCD938X_FLYBACK_VNEG_CTRL_9, 0x64},
+ {WCD938X_FLYBACK_VNEGDAC_CTRL_1, 0xED},
+ {WCD938X_FLYBACK_VNEGDAC_CTRL_2, 0xF0},
+ {WCD938X_FLYBACK_VNEGDAC_CTRL_3, 0xA6},
+ {WCD938X_FLYBACK_CTRL_1, 0x65},
+ {WCD938X_FLYBACK_TEST_CTL, 0x00},
+ {WCD938X_RX_AUX_SW_CTL, 0x00},
+ {WCD938X_RX_PA_AUX_IN_CONN, 0x01},
+ {WCD938X_RX_TIMER_DIV, 0x32},
+ {WCD938X_RX_OCP_CTL, 0x1F},
+ {WCD938X_RX_OCP_COUNT, 0x77},
+ {WCD938X_RX_BIAS_EAR_DAC, 0xA0},
+ {WCD938X_RX_BIAS_EAR_AMP, 0xAA},
+ {WCD938X_RX_BIAS_HPH_LDO, 0xA9},
+ {WCD938X_RX_BIAS_HPH_PA, 0xAA},
+ {WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2, 0x8A},
+ {WCD938X_RX_BIAS_HPH_RDAC_LDO, 0x88},
+ {WCD938X_RX_BIAS_HPH_CNP1, 0x82},
+ {WCD938X_RX_BIAS_HPH_LOWPOWER, 0x82},
+ {WCD938X_RX_BIAS_AUX_DAC, 0xA0},
+ {WCD938X_RX_BIAS_AUX_AMP, 0xAA},
+ {WCD938X_RX_BIAS_VNEGDAC_BLEEDER, 0x50},
+ {WCD938X_RX_BIAS_MISC, 0x00},
+ {WCD938X_RX_BIAS_BUCK_RST, 0x08},
+ {WCD938X_RX_BIAS_BUCK_VREF_ERRAMP, 0x44},
+ {WCD938X_RX_BIAS_FLYB_ERRAMP, 0x40},
+ {WCD938X_RX_BIAS_FLYB_BUFF, 0xAA},
+ {WCD938X_RX_BIAS_FLYB_MID_RST, 0x14},
+ {WCD938X_HPH_L_STATUS, 0x04},
+ {WCD938X_HPH_R_STATUS, 0x04},
+ {WCD938X_HPH_CNP_EN, 0x80},
+ {WCD938X_HPH_CNP_WG_CTL, 0x9A},
+ {WCD938X_HPH_CNP_WG_TIME, 0x14},
+ {WCD938X_HPH_OCP_CTL, 0x28},
+ {WCD938X_HPH_AUTO_CHOP, 0x16},
+ {WCD938X_HPH_CHOP_CTL, 0x83},
+ {WCD938X_HPH_PA_CTL1, 0x46},
+ {WCD938X_HPH_PA_CTL2, 0x50},
+ {WCD938X_HPH_L_EN, 0x80},
+ {WCD938X_HPH_L_TEST, 0xE0},
+ {WCD938X_HPH_L_ATEST, 0x50},
+ {WCD938X_HPH_R_EN, 0x80},
+ {WCD938X_HPH_R_TEST, 0xE0},
+ {WCD938X_HPH_R_ATEST, 0x54},
+ {WCD938X_HPH_RDAC_CLK_CTL1, 0x99},
+ {WCD938X_HPH_RDAC_CLK_CTL2, 0x9B},
+ {WCD938X_HPH_RDAC_LDO_CTL, 0x33},
+ {WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL, 0x00},
+ {WCD938X_HPH_REFBUFF_UHQA_CTL, 0x68},
+ {WCD938X_HPH_REFBUFF_LP_CTL, 0x0E},
+ {WCD938X_HPH_L_DAC_CTL, 0x20},
+ {WCD938X_HPH_R_DAC_CTL, 0x20},
+ {WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL, 0x55},
+ {WCD938X_HPH_SURGE_HPHLR_SURGE_EN, 0x19},
+ {WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1, 0xA0},
+ {WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS, 0x00},
+ {WCD938X_EAR_EAR_EN_REG, 0x22},
+ {WCD938X_EAR_EAR_PA_CON, 0x44},
+ {WCD938X_EAR_EAR_SP_CON, 0xDB},
+ {WCD938X_EAR_EAR_DAC_CON, 0x80},
+ {WCD938X_EAR_EAR_CNP_FSM_CON, 0xB2},
+ {WCD938X_EAR_TEST_CTL, 0x00},
+ {WCD938X_EAR_STATUS_REG_1, 0x00},
+ {WCD938X_EAR_STATUS_REG_2, 0x08},
+ {WCD938X_ANA_NEW_PAGE_REGISTER, 0x00},
+ {WCD938X_HPH_NEW_ANA_HPH2, 0x00},
+ {WCD938X_HPH_NEW_ANA_HPH3, 0x00},
+ {WCD938X_SLEEP_CTL, 0x16},
+ {WCD938X_SLEEP_WATCHDOG_CTL, 0x00},
+ {WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL, 0x00},
+ {WCD938X_MBHC_NEW_CTL_1, 0x02},
+ {WCD938X_MBHC_NEW_CTL_2, 0x05},
+ {WCD938X_MBHC_NEW_PLUG_DETECT_CTL, 0xE9},
+ {WCD938X_MBHC_NEW_ZDET_ANA_CTL, 0x0F},
+ {WCD938X_MBHC_NEW_ZDET_RAMP_CTL, 0x00},
+ {WCD938X_MBHC_NEW_FSM_STATUS, 0x00},
+ {WCD938X_MBHC_NEW_ADC_RESULT, 0x00},
+ {WCD938X_TX_NEW_AMIC_MUX_CFG, 0x00},
+ {WCD938X_AUX_AUXPA, 0x00},
+ {WCD938X_LDORXTX_MODE, 0x0C},
+ {WCD938X_LDORXTX_CONFIG, 0x10},
+ {WCD938X_DIE_CRACK_DIE_CRK_DET_EN, 0x00},
+ {WCD938X_DIE_CRACK_DIE_CRK_DET_OUT, 0x00},
+ {WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL, 0x40},
+ {WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L, 0x81},
+ {WCD938X_HPH_NEW_INT_RDAC_VREF_CTL, 0x10},
+ {WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL, 0x00},
+ {WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R, 0x81},
+ {WCD938X_HPH_NEW_INT_PA_MISC1, 0x22},
+ {WCD938X_HPH_NEW_INT_PA_MISC2, 0x00},
+ {WCD938X_HPH_NEW_INT_PA_RDAC_MISC, 0x00},
+ {WCD938X_HPH_NEW_INT_HPH_TIMER1, 0xFE},
+ {WCD938X_HPH_NEW_INT_HPH_TIMER2, 0x02},
+ {WCD938X_HPH_NEW_INT_HPH_TIMER3, 0x4E},
+ {WCD938X_HPH_NEW_INT_HPH_TIMER4, 0x54},
+ {WCD938X_HPH_NEW_INT_PA_RDAC_MISC2, 0x00},
+ {WCD938X_HPH_NEW_INT_PA_RDAC_MISC3, 0x00},
+ {WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW, 0x90},
+ {WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW, 0x90},
+ {WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI, 0x62},
+ {WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP, 0x01},
+ {WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP, 0x11},
+ {WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL, 0x57},
+ {WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL, 0x01},
+ {WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT, 0x00},
+ {WCD938X_MBHC_NEW_INT_SPARE_2, 0x00},
+ {WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON, 0xA8},
+ {WCD938X_EAR_INT_NEW_CNP_VCM_CON1, 0x42},
+ {WCD938X_EAR_INT_NEW_CNP_VCM_CON2, 0x22},
+ {WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS, 0x00},
+ {WCD938X_AUX_INT_EN_REG, 0x00},
+ {WCD938X_AUX_INT_PA_CTRL, 0x06},
+ {WCD938X_AUX_INT_SP_CTRL, 0xD2},
+ {WCD938X_AUX_INT_DAC_CTRL, 0x80},
+ {WCD938X_AUX_INT_CLK_CTRL, 0x50},
+ {WCD938X_AUX_INT_TEST_CTRL, 0x00},
+ {WCD938X_AUX_INT_STATUS_REG, 0x00},
+ {WCD938X_AUX_INT_MISC, 0x00},
+ {WCD938X_LDORXTX_INT_BIAS, 0x6E},
+ {WCD938X_LDORXTX_INT_STB_LOADS_DTEST, 0x50},
+ {WCD938X_LDORXTX_INT_TEST0, 0x1C},
+ {WCD938X_LDORXTX_INT_STARTUP_TIMER, 0xFF},
+ {WCD938X_LDORXTX_INT_TEST1, 0x1F},
+ {WCD938X_LDORXTX_INT_STATUS, 0x00},
+ {WCD938X_SLEEP_INT_WATCHDOG_CTL_1, 0x0A},
+ {WCD938X_SLEEP_INT_WATCHDOG_CTL_2, 0x0A},
+ {WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1, 0x02},
+ {WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2, 0x60},
+ {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2, 0xFF},
+ {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1, 0x7F},
+ {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0, 0x3F},
+ {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M, 0x1F},
+ {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M, 0x0F},
+ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1, 0xD7},
+ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0, 0xC8},
+ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP, 0xC6},
+ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1, 0xD5},
+ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0, 0xCA},
+ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP, 0x05},
+ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0, 0xA5},
+ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP, 0x13},
+ {WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1, 0x88},
+ {WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP, 0x42},
+ {WCD938X_TX_COM_NEW_INT_TXADC_INT_L2, 0xFF},
+ {WCD938X_TX_COM_NEW_INT_TXADC_INT_L1, 0x64},
+ {WCD938X_TX_COM_NEW_INT_TXADC_INT_L0, 0x64},
+ {WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP, 0x77},
+ {WCD938X_DIGITAL_PAGE_REGISTER, 0x00},
+ {WCD938X_DIGITAL_CHIP_ID0, 0x00},
+ {WCD938X_DIGITAL_CHIP_ID1, 0x00},
+ {WCD938X_DIGITAL_CHIP_ID2, 0x0D},
+ {WCD938X_DIGITAL_CHIP_ID3, 0x01},
+ {WCD938X_DIGITAL_SWR_TX_CLK_RATE, 0x00},
+ {WCD938X_DIGITAL_CDC_RST_CTL, 0x03},
+ {WCD938X_DIGITAL_TOP_CLK_CFG, 0x00},
+ {WCD938X_DIGITAL_CDC_ANA_CLK_CTL, 0x00},
+ {WCD938X_DIGITAL_CDC_DIG_CLK_CTL, 0xF0},
+ {WCD938X_DIGITAL_SWR_RST_EN, 0x00},
+ {WCD938X_DIGITAL_CDC_PATH_MODE, 0x55},
+ {WCD938X_DIGITAL_CDC_RX_RST, 0x00},
+ {WCD938X_DIGITAL_CDC_RX0_CTL, 0xFC},
+ {WCD938X_DIGITAL_CDC_RX1_CTL, 0xFC},
+ {WCD938X_DIGITAL_CDC_RX2_CTL, 0xFC},
+ {WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1, 0x00},
+ {WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3, 0x00},
+ {WCD938X_DIGITAL_CDC_COMP_CTL_0, 0x00},
+ {WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL, 0x1E},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_A1_0, 0x00},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_A1_1, 0x01},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_A2_0, 0x63},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_A2_1, 0x04},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_A3_0, 0xAC},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_A3_1, 0x04},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_A4_0, 0x1A},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_A4_1, 0x03},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_A5_0, 0xBC},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_A5_1, 0x02},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_A6_0, 0xC7},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_A7_0, 0xF8},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_C_0, 0x47},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_C_1, 0x43},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_C_2, 0xB1},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_C_3, 0x17},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_R1, 0x4D},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_R2, 0x29},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_R3, 0x34},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_R4, 0x59},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_R5, 0x66},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_R6, 0x87},
+ {WCD938X_DIGITAL_CDC_HPH_DSM_R7, 0x64},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_A1_0, 0x00},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_A1_1, 0x01},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_A2_0, 0x96},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_A2_1, 0x09},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_A3_0, 0xAB},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_A3_1, 0x05},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_A4_0, 0x1C},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_A4_1, 0x02},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_A5_0, 0x17},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_A5_1, 0x02},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_A6_0, 0xAA},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_A7_0, 0xE3},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_C_0, 0x69},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_C_1, 0x54},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_C_2, 0x02},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_C_3, 0x15},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_R1, 0xA4},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_R2, 0xB5},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_R3, 0x86},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_R4, 0x85},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_R5, 0xAA},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_R6, 0xE2},
+ {WCD938X_DIGITAL_CDC_AUX_DSM_R7, 0x62},
+ {WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0, 0x55},
+ {WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1, 0xA9},
+ {WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0, 0x3D},
+ {WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1, 0x2E},
+ {WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2, 0x01},
+ {WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0, 0x00},
+ {WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1, 0xFC},
+ {WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2, 0x01},
+ {WCD938X_DIGITAL_CDC_HPH_GAIN_CTL, 0x00},
+ {WCD938X_DIGITAL_CDC_AUX_GAIN_CTL, 0x00},
+ {WCD938X_DIGITAL_CDC_EAR_PATH_CTL, 0x00},
+ {WCD938X_DIGITAL_CDC_SWR_CLH, 0x00},
+ {WCD938X_DIGITAL_SWR_CLH_BYP, 0x00},
+ {WCD938X_DIGITAL_CDC_TX0_CTL, 0x68},
+ {WCD938X_DIGITAL_CDC_TX1_CTL, 0x68},
+ {WCD938X_DIGITAL_CDC_TX2_CTL, 0x68},
+ {WCD938X_DIGITAL_CDC_TX_RST, 0x00},
+ {WCD938X_DIGITAL_CDC_REQ_CTL, 0x01},
+ {WCD938X_DIGITAL_CDC_RST, 0x00},
+ {WCD938X_DIGITAL_CDC_AMIC_CTL, 0x0F},
+ {WCD938X_DIGITAL_CDC_DMIC_CTL, 0x04},
+ {WCD938X_DIGITAL_CDC_DMIC1_CTL, 0x01},
+ {WCD938X_DIGITAL_CDC_DMIC2_CTL, 0x01},
+ {WCD938X_DIGITAL_CDC_DMIC3_CTL, 0x01},
+ {WCD938X_DIGITAL_CDC_DMIC4_CTL, 0x01},
+ {WCD938X_DIGITAL_EFUSE_PRG_CTL, 0x00},
+ {WCD938X_DIGITAL_EFUSE_CTL, 0x2B},
+ {WCD938X_DIGITAL_CDC_DMIC_RATE_1_2, 0x11},
+ {WCD938X_DIGITAL_CDC_DMIC_RATE_3_4, 0x11},
+ {WCD938X_DIGITAL_PDM_WD_CTL0, 0x00},
+ {WCD938X_DIGITAL_PDM_WD_CTL1, 0x00},
+ {WCD938X_DIGITAL_PDM_WD_CTL2, 0x00},
+ {WCD938X_DIGITAL_INTR_MODE, 0x00},
+ {WCD938X_DIGITAL_INTR_MASK_0, 0xFF},
+ {WCD938X_DIGITAL_INTR_MASK_1, 0xFF},
+ {WCD938X_DIGITAL_INTR_MASK_2, 0x3F},
+ {WCD938X_DIGITAL_INTR_STATUS_0, 0x00},
+ {WCD938X_DIGITAL_INTR_STATUS_1, 0x00},
+ {WCD938X_DIGITAL_INTR_STATUS_2, 0x00},
+ {WCD938X_DIGITAL_INTR_CLEAR_0, 0x00},
+ {WCD938X_DIGITAL_INTR_CLEAR_1, 0x00},
+ {WCD938X_DIGITAL_INTR_CLEAR_2, 0x00},
+ {WCD938X_DIGITAL_INTR_LEVEL_0, 0x00},
+ {WCD938X_DIGITAL_INTR_LEVEL_1, 0x00},
+ {WCD938X_DIGITAL_INTR_LEVEL_2, 0x00},
+ {WCD938X_DIGITAL_INTR_SET_0, 0x00},
+ {WCD938X_DIGITAL_INTR_SET_1, 0x00},
+ {WCD938X_DIGITAL_INTR_SET_2, 0x00},
+ {WCD938X_DIGITAL_INTR_TEST_0, 0x00},
+ {WCD938X_DIGITAL_INTR_TEST_1, 0x00},
+ {WCD938X_DIGITAL_INTR_TEST_2, 0x00},
+ {WCD938X_DIGITAL_TX_MODE_DBG_EN, 0x00},
+ {WCD938X_DIGITAL_TX_MODE_DBG_0_1, 0x00},
+ {WCD938X_DIGITAL_TX_MODE_DBG_2_3, 0x00},
+ {WCD938X_DIGITAL_LB_IN_SEL_CTL, 0x00},
+ {WCD938X_DIGITAL_LOOP_BACK_MODE, 0x00},
+ {WCD938X_DIGITAL_SWR_DAC_TEST, 0x00},
+ {WCD938X_DIGITAL_SWR_HM_TEST_RX_0, 0x40},
+ {WCD938X_DIGITAL_SWR_HM_TEST_TX_0, 0x40},
+ {WCD938X_DIGITAL_SWR_HM_TEST_RX_1, 0x00},
+ {WCD938X_DIGITAL_SWR_HM_TEST_TX_1, 0x00},
+ {WCD938X_DIGITAL_SWR_HM_TEST_TX_2, 0x00},
+ {WCD938X_DIGITAL_SWR_HM_TEST_0, 0x00},
+ {WCD938X_DIGITAL_SWR_HM_TEST_1, 0x00},
+ {WCD938X_DIGITAL_PAD_CTL_SWR_0, 0x8F},
+ {WCD938X_DIGITAL_PAD_CTL_SWR_1, 0x06},
+ {WCD938X_DIGITAL_I2C_CTL, 0x00},
+ {WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE, 0x00},
+ {WCD938X_DIGITAL_EFUSE_TEST_CTL_0, 0x00},
+ {WCD938X_DIGITAL_EFUSE_TEST_CTL_1, 0x00},
+ {WCD938X_DIGITAL_EFUSE_T_DATA_0, 0x00},
+ {WCD938X_DIGITAL_EFUSE_T_DATA_1, 0x00},
+ {WCD938X_DIGITAL_PAD_CTL_PDM_RX0, 0xF1},
+ {WCD938X_DIGITAL_PAD_CTL_PDM_RX1, 0xF1},
+ {WCD938X_DIGITAL_PAD_CTL_PDM_TX0, 0xF1},
+ {WCD938X_DIGITAL_PAD_CTL_PDM_TX1, 0xF1},
+ {WCD938X_DIGITAL_PAD_CTL_PDM_TX2, 0xF1},
+ {WCD938X_DIGITAL_PAD_INP_DIS_0, 0x00},
+ {WCD938X_DIGITAL_PAD_INP_DIS_1, 0x00},
+ {WCD938X_DIGITAL_DRIVE_STRENGTH_0, 0x00},
+ {WCD938X_DIGITAL_DRIVE_STRENGTH_1, 0x00},
+ {WCD938X_DIGITAL_DRIVE_STRENGTH_2, 0x00},
+ {WCD938X_DIGITAL_RX_DATA_EDGE_CTL, 0x1F},
+ {WCD938X_DIGITAL_TX_DATA_EDGE_CTL, 0x80},
+ {WCD938X_DIGITAL_GPIO_MODE, 0x00},
+ {WCD938X_DIGITAL_PIN_CTL_OE, 0x00},
+ {WCD938X_DIGITAL_PIN_CTL_DATA_0, 0x00},
+ {WCD938X_DIGITAL_PIN_CTL_DATA_1, 0x00},
+ {WCD938X_DIGITAL_PIN_STATUS_0, 0x00},
+ {WCD938X_DIGITAL_PIN_STATUS_1, 0x00},
+ {WCD938X_DIGITAL_DIG_DEBUG_CTL, 0x00},
+ {WCD938X_DIGITAL_DIG_DEBUG_EN, 0x00},
+ {WCD938X_DIGITAL_ANA_CSR_DBG_ADD, 0x00},
+ {WCD938X_DIGITAL_ANA_CSR_DBG_CTL, 0x48},
+ {WCD938X_DIGITAL_SSP_DBG, 0x00},
+ {WCD938X_DIGITAL_MODE_STATUS_0, 0x00},
+ {WCD938X_DIGITAL_MODE_STATUS_1, 0x00},
+ {WCD938X_DIGITAL_SPARE_0, 0x00},
+ {WCD938X_DIGITAL_SPARE_1, 0x00},
+ {WCD938X_DIGITAL_SPARE_2, 0x00},
+ {WCD938X_DIGITAL_EFUSE_REG_0, 0x00},
+ {WCD938X_DIGITAL_EFUSE_REG_1, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_2, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_3, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_4, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_5, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_6, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_7, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_8, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_9, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_10, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_11, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_12, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_13, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_14, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_15, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_16, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_17, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_18, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_19, 0xFF},
+ {WCD938X_DIGITAL_EFUSE_REG_20, 0x0E},
+ {WCD938X_DIGITAL_EFUSE_REG_21, 0x00},
+ {WCD938X_DIGITAL_EFUSE_REG_22, 0x00},
+ {WCD938X_DIGITAL_EFUSE_REG_23, 0xF8},
+ {WCD938X_DIGITAL_EFUSE_REG_24, 0x16},
+ {WCD938X_DIGITAL_EFUSE_REG_25, 0x00},
+ {WCD938X_DIGITAL_EFUSE_REG_26, 0x00},
+ {WCD938X_DIGITAL_EFUSE_REG_27, 0x00},
+ {WCD938X_DIGITAL_EFUSE_REG_28, 0x00},
+ {WCD938X_DIGITAL_EFUSE_REG_29, 0x00},
+ {WCD938X_DIGITAL_EFUSE_REG_30, 0x00},
+ {WCD938X_DIGITAL_EFUSE_REG_31, 0x00},
+ {WCD938X_DIGITAL_TX_REQ_FB_CTL_0, 0x88},
+ {WCD938X_DIGITAL_TX_REQ_FB_CTL_1, 0x88},
+ {WCD938X_DIGITAL_TX_REQ_FB_CTL_2, 0x88},
+ {WCD938X_DIGITAL_TX_REQ_FB_CTL_3, 0x88},
+ {WCD938X_DIGITAL_TX_REQ_FB_CTL_4, 0x88},
+ {WCD938X_DIGITAL_DEM_BYPASS_DATA0, 0x55},
+ {WCD938X_DIGITAL_DEM_BYPASS_DATA1, 0x55},
+ {WCD938X_DIGITAL_DEM_BYPASS_DATA2, 0x55},
+ {WCD938X_DIGITAL_DEM_BYPASS_DATA3, 0x01},
+};
+
+static bool wcd938x_rdwr_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WCD938X_ANA_PAGE_REGISTER:
+ case WCD938X_ANA_BIAS:
+ case WCD938X_ANA_RX_SUPPLIES:
+ case WCD938X_ANA_HPH:
+ case WCD938X_ANA_EAR:
+ case WCD938X_ANA_EAR_COMPANDER_CTL:
+ case WCD938X_ANA_TX_CH1:
+ case WCD938X_ANA_TX_CH2:
+ case WCD938X_ANA_TX_CH3:
+ case WCD938X_ANA_TX_CH4:
+ case WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC:
+ case WCD938X_ANA_MICB3_DSP_EN_LOGIC:
+ case WCD938X_ANA_MBHC_MECH:
+ case WCD938X_ANA_MBHC_ELECT:
+ case WCD938X_ANA_MBHC_ZDET:
+ case WCD938X_ANA_MBHC_BTN0:
+ case WCD938X_ANA_MBHC_BTN1:
+ case WCD938X_ANA_MBHC_BTN2:
+ case WCD938X_ANA_MBHC_BTN3:
+ case WCD938X_ANA_MBHC_BTN4:
+ case WCD938X_ANA_MBHC_BTN5:
+ case WCD938X_ANA_MBHC_BTN6:
+ case WCD938X_ANA_MBHC_BTN7:
+ case WCD938X_ANA_MICB1:
+ case WCD938X_ANA_MICB2:
+ case WCD938X_ANA_MICB2_RAMP:
+ case WCD938X_ANA_MICB3:
+ case WCD938X_ANA_MICB4:
+ case WCD938X_BIAS_CTL:
+ case WCD938X_BIAS_VBG_FINE_ADJ:
+ case WCD938X_LDOL_VDDCX_ADJUST:
+ case WCD938X_LDOL_DISABLE_LDOL:
+ case WCD938X_MBHC_CTL_CLK:
+ case WCD938X_MBHC_CTL_ANA:
+ case WCD938X_MBHC_CTL_SPARE_1:
+ case WCD938X_MBHC_CTL_SPARE_2:
+ case WCD938X_MBHC_CTL_BCS:
+ case WCD938X_MBHC_TEST_CTL:
+ case WCD938X_LDOH_MODE:
+ case WCD938X_LDOH_BIAS:
+ case WCD938X_LDOH_STB_LOADS:
+ case WCD938X_LDOH_SLOWRAMP:
+ case WCD938X_MICB1_TEST_CTL_1:
+ case WCD938X_MICB1_TEST_CTL_2:
+ case WCD938X_MICB1_TEST_CTL_3:
+ case WCD938X_MICB2_TEST_CTL_1:
+ case WCD938X_MICB2_TEST_CTL_2:
+ case WCD938X_MICB2_TEST_CTL_3:
+ case WCD938X_MICB3_TEST_CTL_1:
+ case WCD938X_MICB3_TEST_CTL_2:
+ case WCD938X_MICB3_TEST_CTL_3:
+ case WCD938X_MICB4_TEST_CTL_1:
+ case WCD938X_MICB4_TEST_CTL_2:
+ case WCD938X_MICB4_TEST_CTL_3:
+ case WCD938X_TX_COM_ADC_VCM:
+ case WCD938X_TX_COM_BIAS_ATEST:
+ case WCD938X_TX_COM_SPARE1:
+ case WCD938X_TX_COM_SPARE2:
+ case WCD938X_TX_COM_TXFE_DIV_CTL:
+ case WCD938X_TX_COM_TXFE_DIV_START:
+ case WCD938X_TX_COM_SPARE3:
+ case WCD938X_TX_COM_SPARE4:
+ case WCD938X_TX_1_2_TEST_EN:
+ case WCD938X_TX_1_2_ADC_IB:
+ case WCD938X_TX_1_2_ATEST_REFCTL:
+ case WCD938X_TX_1_2_TEST_CTL:
+ case WCD938X_TX_1_2_TEST_BLK_EN1:
+ case WCD938X_TX_1_2_TXFE1_CLKDIV:
+ case WCD938X_TX_3_4_TEST_EN:
+ case WCD938X_TX_3_4_ADC_IB:
+ case WCD938X_TX_3_4_ATEST_REFCTL:
+ case WCD938X_TX_3_4_TEST_CTL:
+ case WCD938X_TX_3_4_TEST_BLK_EN3:
+ case WCD938X_TX_3_4_TXFE3_CLKDIV:
+ case WCD938X_TX_3_4_TEST_BLK_EN2:
+ case WCD938X_TX_3_4_TXFE2_CLKDIV:
+ case WCD938X_TX_3_4_SPARE1:
+ case WCD938X_TX_3_4_TEST_BLK_EN4:
+ case WCD938X_TX_3_4_TXFE4_CLKDIV:
+ case WCD938X_TX_3_4_SPARE2:
+ case WCD938X_CLASSH_MODE_1:
+ case WCD938X_CLASSH_MODE_2:
+ case WCD938X_CLASSH_MODE_3:
+ case WCD938X_CLASSH_CTRL_VCL_1:
+ case WCD938X_CLASSH_CTRL_VCL_2:
+ case WCD938X_CLASSH_CTRL_CCL_1:
+ case WCD938X_CLASSH_CTRL_CCL_2:
+ case WCD938X_CLASSH_CTRL_CCL_3:
+ case WCD938X_CLASSH_CTRL_CCL_4:
+ case WCD938X_CLASSH_CTRL_CCL_5:
+ case WCD938X_CLASSH_BUCK_TMUX_A_D:
+ case WCD938X_CLASSH_BUCK_SW_DRV_CNTL:
+ case WCD938X_CLASSH_SPARE:
+ case WCD938X_FLYBACK_EN:
+ case WCD938X_FLYBACK_VNEG_CTRL_1:
+ case WCD938X_FLYBACK_VNEG_CTRL_2:
+ case WCD938X_FLYBACK_VNEG_CTRL_3:
+ case WCD938X_FLYBACK_VNEG_CTRL_4:
+ case WCD938X_FLYBACK_VNEG_CTRL_5:
+ case WCD938X_FLYBACK_VNEG_CTRL_6:
+ case WCD938X_FLYBACK_VNEG_CTRL_7:
+ case WCD938X_FLYBACK_VNEG_CTRL_8:
+ case WCD938X_FLYBACK_VNEG_CTRL_9:
+ case WCD938X_FLYBACK_VNEGDAC_CTRL_1:
+ case WCD938X_FLYBACK_VNEGDAC_CTRL_2:
+ case WCD938X_FLYBACK_VNEGDAC_CTRL_3:
+ case WCD938X_FLYBACK_CTRL_1:
+ case WCD938X_FLYBACK_TEST_CTL:
+ case WCD938X_RX_AUX_SW_CTL:
+ case WCD938X_RX_PA_AUX_IN_CONN:
+ case WCD938X_RX_TIMER_DIV:
+ case WCD938X_RX_OCP_CTL:
+ case WCD938X_RX_OCP_COUNT:
+ case WCD938X_RX_BIAS_EAR_DAC:
+ case WCD938X_RX_BIAS_EAR_AMP:
+ case WCD938X_RX_BIAS_HPH_LDO:
+ case WCD938X_RX_BIAS_HPH_PA:
+ case WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2:
+ case WCD938X_RX_BIAS_HPH_RDAC_LDO:
+ case WCD938X_RX_BIAS_HPH_CNP1:
+ case WCD938X_RX_BIAS_HPH_LOWPOWER:
+ case WCD938X_RX_BIAS_AUX_DAC:
+ case WCD938X_RX_BIAS_AUX_AMP:
+ case WCD938X_RX_BIAS_VNEGDAC_BLEEDER:
+ case WCD938X_RX_BIAS_MISC:
+ case WCD938X_RX_BIAS_BUCK_RST:
+ case WCD938X_RX_BIAS_BUCK_VREF_ERRAMP:
+ case WCD938X_RX_BIAS_FLYB_ERRAMP:
+ case WCD938X_RX_BIAS_FLYB_BUFF:
+ case WCD938X_RX_BIAS_FLYB_MID_RST:
+ case WCD938X_HPH_CNP_EN:
+ case WCD938X_HPH_CNP_WG_CTL:
+ case WCD938X_HPH_CNP_WG_TIME:
+ case WCD938X_HPH_OCP_CTL:
+ case WCD938X_HPH_AUTO_CHOP:
+ case WCD938X_HPH_CHOP_CTL:
+ case WCD938X_HPH_PA_CTL1:
+ case WCD938X_HPH_PA_CTL2:
+ case WCD938X_HPH_L_EN:
+ case WCD938X_HPH_L_TEST:
+ case WCD938X_HPH_L_ATEST:
+ case WCD938X_HPH_R_EN:
+ case WCD938X_HPH_R_TEST:
+ case WCD938X_HPH_R_ATEST:
+ case WCD938X_HPH_RDAC_CLK_CTL1:
+ case WCD938X_HPH_RDAC_CLK_CTL2:
+ case WCD938X_HPH_RDAC_LDO_CTL:
+ case WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL:
+ case WCD938X_HPH_REFBUFF_UHQA_CTL:
+ case WCD938X_HPH_REFBUFF_LP_CTL:
+ case WCD938X_HPH_L_DAC_CTL:
+ case WCD938X_HPH_R_DAC_CTL:
+ case WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL:
+ case WCD938X_HPH_SURGE_HPHLR_SURGE_EN:
+ case WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1:
+ case WCD938X_EAR_EAR_EN_REG:
+ case WCD938X_EAR_EAR_PA_CON:
+ case WCD938X_EAR_EAR_SP_CON:
+ case WCD938X_EAR_EAR_DAC_CON:
+ case WCD938X_EAR_EAR_CNP_FSM_CON:
+ case WCD938X_EAR_TEST_CTL:
+ case WCD938X_ANA_NEW_PAGE_REGISTER:
+ case WCD938X_HPH_NEW_ANA_HPH2:
+ case WCD938X_HPH_NEW_ANA_HPH3:
+ case WCD938X_SLEEP_CTL:
+ case WCD938X_SLEEP_WATCHDOG_CTL:
+ case WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL:
+ case WCD938X_MBHC_NEW_CTL_1:
+ case WCD938X_MBHC_NEW_CTL_2:
+ case WCD938X_MBHC_NEW_PLUG_DETECT_CTL:
+ case WCD938X_MBHC_NEW_ZDET_ANA_CTL:
+ case WCD938X_MBHC_NEW_ZDET_RAMP_CTL:
+ case WCD938X_TX_NEW_AMIC_MUX_CFG:
+ case WCD938X_AUX_AUXPA:
+ case WCD938X_LDORXTX_MODE:
+ case WCD938X_LDORXTX_CONFIG:
+ case WCD938X_DIE_CRACK_DIE_CRK_DET_EN:
+ case WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL:
+ case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L:
+ case WCD938X_HPH_NEW_INT_RDAC_VREF_CTL:
+ case WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL:
+ case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R:
+ case WCD938X_HPH_NEW_INT_PA_MISC1:
+ case WCD938X_HPH_NEW_INT_PA_MISC2:
+ case WCD938X_HPH_NEW_INT_PA_RDAC_MISC:
+ case WCD938X_HPH_NEW_INT_HPH_TIMER1:
+ case WCD938X_HPH_NEW_INT_HPH_TIMER2:
+ case WCD938X_HPH_NEW_INT_HPH_TIMER3:
+ case WCD938X_HPH_NEW_INT_HPH_TIMER4:
+ case WCD938X_HPH_NEW_INT_PA_RDAC_MISC2:
+ case WCD938X_HPH_NEW_INT_PA_RDAC_MISC3:
+ case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW:
+ case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW:
+ case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI:
+ case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP:
+ case WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP:
+ case WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL:
+ case WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL:
+ case WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT:
+ case WCD938X_MBHC_NEW_INT_SPARE_2:
+ case WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON:
+ case WCD938X_EAR_INT_NEW_CNP_VCM_CON1:
+ case WCD938X_EAR_INT_NEW_CNP_VCM_CON2:
+ case WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS:
+ case WCD938X_AUX_INT_EN_REG:
+ case WCD938X_AUX_INT_PA_CTRL:
+ case WCD938X_AUX_INT_SP_CTRL:
+ case WCD938X_AUX_INT_DAC_CTRL:
+ case WCD938X_AUX_INT_CLK_CTRL:
+ case WCD938X_AUX_INT_TEST_CTRL:
+ case WCD938X_AUX_INT_MISC:
+ case WCD938X_LDORXTX_INT_BIAS:
+ case WCD938X_LDORXTX_INT_STB_LOADS_DTEST:
+ case WCD938X_LDORXTX_INT_TEST0:
+ case WCD938X_LDORXTX_INT_STARTUP_TIMER:
+ case WCD938X_LDORXTX_INT_TEST1:
+ case WCD938X_SLEEP_INT_WATCHDOG_CTL_1:
+ case WCD938X_SLEEP_INT_WATCHDOG_CTL_2:
+ case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1:
+ case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2:
+ case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2:
+ case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1:
+ case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0:
+ case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M:
+ case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M:
+ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1:
+ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0:
+ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP:
+ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1:
+ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0:
+ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP:
+ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0:
+ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP:
+ case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1:
+ case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP:
+ case WCD938X_TX_COM_NEW_INT_TXADC_INT_L2:
+ case WCD938X_TX_COM_NEW_INT_TXADC_INT_L1:
+ case WCD938X_TX_COM_NEW_INT_TXADC_INT_L0:
+ case WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP:
+ case WCD938X_DIGITAL_PAGE_REGISTER:
+ case WCD938X_DIGITAL_SWR_TX_CLK_RATE:
+ case WCD938X_DIGITAL_CDC_RST_CTL:
+ case WCD938X_DIGITAL_TOP_CLK_CFG:
+ case WCD938X_DIGITAL_CDC_ANA_CLK_CTL:
+ case WCD938X_DIGITAL_CDC_DIG_CLK_CTL:
+ case WCD938X_DIGITAL_SWR_RST_EN:
+ case WCD938X_DIGITAL_CDC_PATH_MODE:
+ case WCD938X_DIGITAL_CDC_RX_RST:
+ case WCD938X_DIGITAL_CDC_RX0_CTL:
+ case WCD938X_DIGITAL_CDC_RX1_CTL:
+ case WCD938X_DIGITAL_CDC_RX2_CTL:
+ case WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1:
+ case WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3:
+ case WCD938X_DIGITAL_CDC_COMP_CTL_0:
+ case WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_A1_0:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_A1_1:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_A2_0:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_A2_1:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_A3_0:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_A3_1:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_A4_0:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_A4_1:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_A5_0:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_A5_1:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_A6_0:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_A7_0:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_C_0:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_C_1:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_C_2:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_C_3:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_R1:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_R2:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_R3:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_R4:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_R5:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_R6:
+ case WCD938X_DIGITAL_CDC_HPH_DSM_R7:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_A1_0:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_A1_1:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_A2_0:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_A2_1:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_A3_0:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_A3_1:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_A4_0:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_A4_1:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_A5_0:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_A5_1:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_A6_0:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_A7_0:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_C_0:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_C_1:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_C_2:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_C_3:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_R1:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_R2:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_R3:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_R4:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_R5:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_R6:
+ case WCD938X_DIGITAL_CDC_AUX_DSM_R7:
+ case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0:
+ case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1:
+ case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0:
+ case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1:
+ case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2:
+ case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0:
+ case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1:
+ case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2:
+ case WCD938X_DIGITAL_CDC_HPH_GAIN_CTL:
+ case WCD938X_DIGITAL_CDC_AUX_GAIN_CTL:
+ case WCD938X_DIGITAL_CDC_EAR_PATH_CTL:
+ case WCD938X_DIGITAL_CDC_SWR_CLH:
+ case WCD938X_DIGITAL_SWR_CLH_BYP:
+ case WCD938X_DIGITAL_CDC_TX0_CTL:
+ case WCD938X_DIGITAL_CDC_TX1_CTL:
+ case WCD938X_DIGITAL_CDC_TX2_CTL:
+ case WCD938X_DIGITAL_CDC_TX_RST:
+ case WCD938X_DIGITAL_CDC_REQ_CTL:
+ case WCD938X_DIGITAL_CDC_RST:
+ case WCD938X_DIGITAL_CDC_AMIC_CTL:
+ case WCD938X_DIGITAL_CDC_DMIC_CTL:
+ case WCD938X_DIGITAL_CDC_DMIC1_CTL:
+ case WCD938X_DIGITAL_CDC_DMIC2_CTL:
+ case WCD938X_DIGITAL_CDC_DMIC3_CTL:
+ case WCD938X_DIGITAL_CDC_DMIC4_CTL:
+ case WCD938X_DIGITAL_EFUSE_PRG_CTL:
+ case WCD938X_DIGITAL_EFUSE_CTL:
+ case WCD938X_DIGITAL_CDC_DMIC_RATE_1_2:
+ case WCD938X_DIGITAL_CDC_DMIC_RATE_3_4:
+ case WCD938X_DIGITAL_PDM_WD_CTL0:
+ case WCD938X_DIGITAL_PDM_WD_CTL1:
+ case WCD938X_DIGITAL_PDM_WD_CTL2:
+ case WCD938X_DIGITAL_INTR_MODE:
+ case WCD938X_DIGITAL_INTR_MASK_0:
+ case WCD938X_DIGITAL_INTR_MASK_1:
+ case WCD938X_DIGITAL_INTR_MASK_2:
+ case WCD938X_DIGITAL_INTR_CLEAR_0:
+ case WCD938X_DIGITAL_INTR_CLEAR_1:
+ case WCD938X_DIGITAL_INTR_CLEAR_2:
+ case WCD938X_DIGITAL_INTR_LEVEL_0:
+ case WCD938X_DIGITAL_INTR_LEVEL_1:
+ case WCD938X_DIGITAL_INTR_LEVEL_2:
+ case WCD938X_DIGITAL_INTR_SET_0:
+ case WCD938X_DIGITAL_INTR_SET_1:
+ case WCD938X_DIGITAL_INTR_SET_2:
+ case WCD938X_DIGITAL_INTR_TEST_0:
+ case WCD938X_DIGITAL_INTR_TEST_1:
+ case WCD938X_DIGITAL_INTR_TEST_2:
+ case WCD938X_DIGITAL_TX_MODE_DBG_EN:
+ case WCD938X_DIGITAL_TX_MODE_DBG_0_1:
+ case WCD938X_DIGITAL_TX_MODE_DBG_2_3:
+ case WCD938X_DIGITAL_LB_IN_SEL_CTL:
+ case WCD938X_DIGITAL_LOOP_BACK_MODE:
+ case WCD938X_DIGITAL_SWR_DAC_TEST:
+ case WCD938X_DIGITAL_SWR_HM_TEST_RX_0:
+ case WCD938X_DIGITAL_SWR_HM_TEST_TX_0:
+ case WCD938X_DIGITAL_SWR_HM_TEST_RX_1:
+ case WCD938X_DIGITAL_SWR_HM_TEST_TX_1:
+ case WCD938X_DIGITAL_SWR_HM_TEST_TX_2:
+ case WCD938X_DIGITAL_PAD_CTL_SWR_0:
+ case WCD938X_DIGITAL_PAD_CTL_SWR_1:
+ case WCD938X_DIGITAL_I2C_CTL:
+ case WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE:
+ case WCD938X_DIGITAL_EFUSE_TEST_CTL_0:
+ case WCD938X_DIGITAL_EFUSE_TEST_CTL_1:
+ case WCD938X_DIGITAL_PAD_CTL_PDM_RX0:
+ case WCD938X_DIGITAL_PAD_CTL_PDM_RX1:
+ case WCD938X_DIGITAL_PAD_CTL_PDM_TX0:
+ case WCD938X_DIGITAL_PAD_CTL_PDM_TX1:
+ case WCD938X_DIGITAL_PAD_CTL_PDM_TX2:
+ case WCD938X_DIGITAL_PAD_INP_DIS_0:
+ case WCD938X_DIGITAL_PAD_INP_DIS_1:
+ case WCD938X_DIGITAL_DRIVE_STRENGTH_0:
+ case WCD938X_DIGITAL_DRIVE_STRENGTH_1:
+ case WCD938X_DIGITAL_DRIVE_STRENGTH_2:
+ case WCD938X_DIGITAL_RX_DATA_EDGE_CTL:
+ case WCD938X_DIGITAL_TX_DATA_EDGE_CTL:
+ case WCD938X_DIGITAL_GPIO_MODE:
+ case WCD938X_DIGITAL_PIN_CTL_OE:
+ case WCD938X_DIGITAL_PIN_CTL_DATA_0:
+ case WCD938X_DIGITAL_PIN_CTL_DATA_1:
+ case WCD938X_DIGITAL_DIG_DEBUG_CTL:
+ case WCD938X_DIGITAL_DIG_DEBUG_EN:
+ case WCD938X_DIGITAL_ANA_CSR_DBG_ADD:
+ case WCD938X_DIGITAL_ANA_CSR_DBG_CTL:
+ case WCD938X_DIGITAL_SSP_DBG:
+ case WCD938X_DIGITAL_SPARE_0:
+ case WCD938X_DIGITAL_SPARE_1:
+ case WCD938X_DIGITAL_SPARE_2:
+ case WCD938X_DIGITAL_TX_REQ_FB_CTL_0:
+ case WCD938X_DIGITAL_TX_REQ_FB_CTL_1:
+ case WCD938X_DIGITAL_TX_REQ_FB_CTL_2:
+ case WCD938X_DIGITAL_TX_REQ_FB_CTL_3:
+ case WCD938X_DIGITAL_TX_REQ_FB_CTL_4:
+ case WCD938X_DIGITAL_DEM_BYPASS_DATA0:
+ case WCD938X_DIGITAL_DEM_BYPASS_DATA1:
+ case WCD938X_DIGITAL_DEM_BYPASS_DATA2:
+ case WCD938X_DIGITAL_DEM_BYPASS_DATA3:
+ return true;
+ }
+
+ return false;
+}
+
+static bool wcd938x_readonly_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WCD938X_ANA_MBHC_RESULT_1:
+ case WCD938X_ANA_MBHC_RESULT_2:
+ case WCD938X_ANA_MBHC_RESULT_3:
+ case WCD938X_MBHC_MOISTURE_DET_FSM_STATUS:
+ case WCD938X_TX_1_2_SAR2_ERR:
+ case WCD938X_TX_1_2_SAR1_ERR:
+ case WCD938X_TX_3_4_SAR4_ERR:
+ case WCD938X_TX_3_4_SAR3_ERR:
+ case WCD938X_HPH_L_STATUS:
+ case WCD938X_HPH_R_STATUS:
+ case WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS:
+ case WCD938X_EAR_STATUS_REG_1:
+ case WCD938X_EAR_STATUS_REG_2:
+ case WCD938X_MBHC_NEW_FSM_STATUS:
+ case WCD938X_MBHC_NEW_ADC_RESULT:
+ case WCD938X_DIE_CRACK_DIE_CRK_DET_OUT:
+ case WCD938X_AUX_INT_STATUS_REG:
+ case WCD938X_LDORXTX_INT_STATUS:
+ case WCD938X_DIGITAL_CHIP_ID0:
+ case WCD938X_DIGITAL_CHIP_ID1:
+ case WCD938X_DIGITAL_CHIP_ID2:
+ case WCD938X_DIGITAL_CHIP_ID3:
+ case WCD938X_DIGITAL_INTR_STATUS_0:
+ case WCD938X_DIGITAL_INTR_STATUS_1:
+ case WCD938X_DIGITAL_INTR_STATUS_2:
+ case WCD938X_DIGITAL_SWR_HM_TEST_0:
+ case WCD938X_DIGITAL_SWR_HM_TEST_1:
+ case WCD938X_DIGITAL_EFUSE_T_DATA_0:
+ case WCD938X_DIGITAL_EFUSE_T_DATA_1:
+ case WCD938X_DIGITAL_PIN_STATUS_0:
+ case WCD938X_DIGITAL_PIN_STATUS_1:
+ case WCD938X_DIGITAL_MODE_STATUS_0:
+ case WCD938X_DIGITAL_MODE_STATUS_1:
+ case WCD938X_DIGITAL_EFUSE_REG_0:
+ case WCD938X_DIGITAL_EFUSE_REG_1:
+ case WCD938X_DIGITAL_EFUSE_REG_2:
+ case WCD938X_DIGITAL_EFUSE_REG_3:
+ case WCD938X_DIGITAL_EFUSE_REG_4:
+ case WCD938X_DIGITAL_EFUSE_REG_5:
+ case WCD938X_DIGITAL_EFUSE_REG_6:
+ case WCD938X_DIGITAL_EFUSE_REG_7:
+ case WCD938X_DIGITAL_EFUSE_REG_8:
+ case WCD938X_DIGITAL_EFUSE_REG_9:
+ case WCD938X_DIGITAL_EFUSE_REG_10:
+ case WCD938X_DIGITAL_EFUSE_REG_11:
+ case WCD938X_DIGITAL_EFUSE_REG_12:
+ case WCD938X_DIGITAL_EFUSE_REG_13:
+ case WCD938X_DIGITAL_EFUSE_REG_14:
+ case WCD938X_DIGITAL_EFUSE_REG_15:
+ case WCD938X_DIGITAL_EFUSE_REG_16:
+ case WCD938X_DIGITAL_EFUSE_REG_17:
+ case WCD938X_DIGITAL_EFUSE_REG_18:
+ case WCD938X_DIGITAL_EFUSE_REG_19:
+ case WCD938X_DIGITAL_EFUSE_REG_20:
+ case WCD938X_DIGITAL_EFUSE_REG_21:
+ case WCD938X_DIGITAL_EFUSE_REG_22:
+ case WCD938X_DIGITAL_EFUSE_REG_23:
+ case WCD938X_DIGITAL_EFUSE_REG_24:
+ case WCD938X_DIGITAL_EFUSE_REG_25:
+ case WCD938X_DIGITAL_EFUSE_REG_26:
+ case WCD938X_DIGITAL_EFUSE_REG_27:
+ case WCD938X_DIGITAL_EFUSE_REG_28:
+ case WCD938X_DIGITAL_EFUSE_REG_29:
+ case WCD938X_DIGITAL_EFUSE_REG_30:
+ case WCD938X_DIGITAL_EFUSE_REG_31:
+ return true;
+ }
+ return false;
+}
+
+static bool wcd938x_readable_register(struct device *dev, unsigned int reg)
+{
+ bool ret;
+
+ ret = wcd938x_readonly_register(dev, reg);
+ if (!ret)
+ return wcd938x_rdwr_register(dev, reg);
+
+ return ret;
+}
+
+static bool wcd938x_writeable_register(struct device *dev, unsigned int reg)
+{
+ return wcd938x_rdwr_register(dev, reg);
+}
+
+static bool wcd938x_volatile_register(struct device *dev, unsigned int reg)
+{
+ if (reg <= WCD938X_BASE_ADDRESS)
+ return false;
+
+ if (reg == WCD938X_DIGITAL_SWR_TX_CLK_RATE)
+ return true;
+
+ if (wcd938x_readonly_register(dev, reg))
+ return true;
+
+ return false;
+}
+
+static struct regmap_config wcd938x_regmap_config = {
+ .name = "wcd938x_csr",
+ .reg_bits = 32,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = wcd938x_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wcd938x_defaults),
+ .max_register = WCD938X_MAX_REGISTER,
+ .readable_reg = wcd938x_readable_register,
+ .writeable_reg = wcd938x_writeable_register,
+ .volatile_reg = wcd938x_volatile_register,
+ .can_multi_write = true,
+};
+
+static const struct regmap_irq wcd938x_irqs[WCD938X_NUM_IRQS] = {
+ REGMAP_IRQ_REG(WCD938X_IRQ_MBHC_BUTTON_PRESS_DET, 0, 0x01),
+ REGMAP_IRQ_REG(WCD938X_IRQ_MBHC_BUTTON_RELEASE_DET, 0, 0x02),
+ REGMAP_IRQ_REG(WCD938X_IRQ_MBHC_ELECT_INS_REM_DET, 0, 0x04),
+ REGMAP_IRQ_REG(WCD938X_IRQ_MBHC_ELECT_INS_REM_LEG_DET, 0, 0x08),
+ REGMAP_IRQ_REG(WCD938X_IRQ_MBHC_SW_DET, 0, 0x10),
+ REGMAP_IRQ_REG(WCD938X_IRQ_HPHR_OCP_INT, 0, 0x20),
+ REGMAP_IRQ_REG(WCD938X_IRQ_HPHR_CNP_INT, 0, 0x40),
+ REGMAP_IRQ_REG(WCD938X_IRQ_HPHL_OCP_INT, 0, 0x80),
+ REGMAP_IRQ_REG(WCD938X_IRQ_HPHL_CNP_INT, 1, 0x01),
+ REGMAP_IRQ_REG(WCD938X_IRQ_EAR_CNP_INT, 1, 0x02),
+ REGMAP_IRQ_REG(WCD938X_IRQ_EAR_SCD_INT, 1, 0x04),
+ REGMAP_IRQ_REG(WCD938X_IRQ_AUX_CNP_INT, 1, 0x08),
+ REGMAP_IRQ_REG(WCD938X_IRQ_AUX_SCD_INT, 1, 0x10),
+ REGMAP_IRQ_REG(WCD938X_IRQ_HPHL_PDM_WD_INT, 1, 0x20),
+ REGMAP_IRQ_REG(WCD938X_IRQ_HPHR_PDM_WD_INT, 1, 0x40),
+ REGMAP_IRQ_REG(WCD938X_IRQ_AUX_PDM_WD_INT, 1, 0x80),
+ REGMAP_IRQ_REG(WCD938X_IRQ_LDORT_SCD_INT, 2, 0x01),
+ REGMAP_IRQ_REG(WCD938X_IRQ_MBHC_MOISTURE_INT, 2, 0x02),
+ REGMAP_IRQ_REG(WCD938X_IRQ_HPHL_SURGE_DET_INT, 2, 0x04),
+ REGMAP_IRQ_REG(WCD938X_IRQ_HPHR_SURGE_DET_INT, 2, 0x08),
+};
+
+static struct regmap_irq_chip wcd938x_regmap_irq_chip = {
+ .name = "wcd938x",
+ .irqs = wcd938x_irqs,
+ .num_irqs = ARRAY_SIZE(wcd938x_irqs),
+ .num_regs = 3,
+ .status_base = WCD938X_DIGITAL_INTR_STATUS_0,
+ .mask_base = WCD938X_DIGITAL_INTR_MASK_0,
+ .type_base = WCD938X_DIGITAL_INTR_LEVEL_0,
+ .ack_base = WCD938X_DIGITAL_INTR_CLEAR_0,
+ .use_ack = 1,
+ .runtime_pm = true,
+ .irq_drv_data = NULL,
+};
+
+static int wcd938x_get_clk_rate(int mode)
+{
+ int rate;
+
+ switch (mode) {
+ case ADC_MODE_ULP2:
+ rate = SWR_CLK_RATE_0P6MHZ;
+ break;
+ case ADC_MODE_ULP1:
+ rate = SWR_CLK_RATE_1P2MHZ;
+ break;
+ case ADC_MODE_LP:
+ rate = SWR_CLK_RATE_4P8MHZ;
+ break;
+ case ADC_MODE_NORMAL:
+ case ADC_MODE_LO_HIF:
+ case ADC_MODE_HIFI:
+ case ADC_MODE_INVALID:
+ default:
+ rate = SWR_CLK_RATE_9P6MHZ;
+ break;
+ }
+
+ return rate;
+}
+
+static int wcd938x_set_swr_clk_rate(struct snd_soc_component *component, int rate, int bank)
+{
+ u8 mask = (bank ? 0xF0 : 0x0F);
+ u8 val = 0;
+
+ switch (rate) {
+ case SWR_CLK_RATE_0P6MHZ:
+ val = (bank ? 0x60 : 0x06);
+ break;
+ case SWR_CLK_RATE_1P2MHZ:
+ val = (bank ? 0x50 : 0x05);
+ break;
+ case SWR_CLK_RATE_2P4MHZ:
+ val = (bank ? 0x30 : 0x03);
+ break;
+ case SWR_CLK_RATE_4P8MHZ:
+ val = (bank ? 0x10 : 0x01);
+ break;
+ case SWR_CLK_RATE_9P6MHZ:
+ default:
+ val = 0x00;
+ break;
+ }
+ snd_soc_component_update_bits(component, WCD938X_DIGITAL_SWR_TX_CLK_RATE,
+ mask, val);
+
+ return 0;
+}
+
+static int wcd938x_io_init(struct wcd938x_priv *wcd938x)
+{
+ struct regmap *rm = wcd938x->regmap;
+
+ regmap_update_bits(rm, WCD938X_SLEEP_CTL, 0x0E, 0x0E);
+ regmap_update_bits(rm, WCD938X_SLEEP_CTL, 0x80, 0x80);
+ /* 1 msec delay as per HW requirement */
+ usleep_range(1000, 1010);
+ regmap_update_bits(rm, WCD938X_SLEEP_CTL, 0x40, 0x40);
+ /* 1 msec delay as per HW requirement */
+ usleep_range(1000, 1010);
+ regmap_update_bits(rm, WCD938X_LDORXTX_CONFIG, 0x10, 0x00);
+ regmap_update_bits(rm, WCD938X_BIAS_VBG_FINE_ADJ,
+ 0xF0, 0x80);
+ regmap_update_bits(rm, WCD938X_ANA_BIAS, 0x80, 0x80);
+ regmap_update_bits(rm, WCD938X_ANA_BIAS, 0x40, 0x40);
+ /* 10 msec delay as per HW requirement */
+ usleep_range(10000, 10010);
+
+ regmap_update_bits(rm, WCD938X_ANA_BIAS, 0x40, 0x00);
+ regmap_update_bits(rm, WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL,
+ 0xF0, 0x00);
+ regmap_update_bits(rm, WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW,
+ 0x1F, 0x15);
+ regmap_update_bits(rm, WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW,
+ 0x1F, 0x15);
+ regmap_update_bits(rm, WCD938X_HPH_REFBUFF_UHQA_CTL,
+ 0xC0, 0x80);
+ regmap_update_bits(rm, WCD938X_DIGITAL_CDC_DMIC_CTL,
+ 0x02, 0x02);
+
+ regmap_update_bits(rm, WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP,
+ 0xFF, 0x14);
+ regmap_update_bits(rm, WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP,
+ 0x1F, 0x08);
+
+ regmap_update_bits(rm, WCD938X_DIGITAL_TX_REQ_FB_CTL_0, 0xFF, 0x55);
+ regmap_update_bits(rm, WCD938X_DIGITAL_TX_REQ_FB_CTL_1, 0xFF, 0x44);
+ regmap_update_bits(rm, WCD938X_DIGITAL_TX_REQ_FB_CTL_2, 0xFF, 0x11);
+ regmap_update_bits(rm, WCD938X_DIGITAL_TX_REQ_FB_CTL_3, 0xFF, 0x00);
+ regmap_update_bits(rm, WCD938X_DIGITAL_TX_REQ_FB_CTL_4, 0xFF, 0x00);
+
+ /* Set Noise Filter Resistor value */
+ regmap_update_bits(rm, WCD938X_MICB1_TEST_CTL_1, 0xE0, 0xE0);
+ regmap_update_bits(rm, WCD938X_MICB2_TEST_CTL_1, 0xE0, 0xE0);
+ regmap_update_bits(rm, WCD938X_MICB3_TEST_CTL_1, 0xE0, 0xE0);
+ regmap_update_bits(rm, WCD938X_MICB4_TEST_CTL_1, 0xE0, 0xE0);
+
+ regmap_update_bits(rm, WCD938X_TX_3_4_TEST_BLK_EN2, 0x01, 0x00);
+ regmap_update_bits(rm, WCD938X_HPH_SURGE_HPHLR_SURGE_EN, 0xC0, 0xC0);
+
+ return 0;
+
+}
+
+static int wcd938x_sdw_connect_port(struct wcd938x_sdw_ch_info *ch_info,
+ struct sdw_port_config *port_config,
+ u32 mstr_port_num,
+ u8 enable)
+{
+ u8 ch_mask, port_num;
+
+ port_num = ch_info->port_num;
+ ch_mask = ch_info->ch_mask;
+
+ port_config->num = port_num;
+
+ if (enable)
+ port_config->ch_mask |= ch_mask;
+ else
+ port_config->ch_mask &= ~ch_mask;
+
+ return 0;
+}
+
+static int wcd938x_connect_port(struct wcd938x_sdw_priv *wcd, u8 ch_id, u8 enable)
+{
+ u8 port_num, mstr_port_num;
+
+ port_num = wcd->ch_info[ch_id].port_num;
+ mstr_port_num = wcd->port_map[port_num - 1];
+
+ return wcd938x_sdw_connect_port(&wcd->ch_info[ch_id],
+ &wcd->port_config[port_num],
+ mstr_port_num,
+ enable);
+}
+
+static int wcd938x_codec_enable_rxclk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_CDC_ANA_CLK_CTL,
+ WCD938X_ANA_RX_CLK_EN_MASK, 1);
+ snd_soc_component_write_field(component, WCD938X_ANA_RX_SUPPLIES,
+ WCD938X_RX_BIAS_EN_MASK, 1);
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_CDC_RX0_CTL,
+ WCD938X_DEM_DITHER_ENABLE_MASK, 0);
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_CDC_RX1_CTL,
+ WCD938X_DEM_DITHER_ENABLE_MASK, 0);
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_CDC_RX2_CTL,
+ WCD938X_DEM_DITHER_ENABLE_MASK, 0);
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_CDC_ANA_CLK_CTL,
+ WCD938X_ANA_RX_DIV2_CLK_EN_MASK, 1);
+ snd_soc_component_write_field(component, WCD938X_AUX_AUXPA,
+ WCD938X_AUXPA_CLK_EN_MASK, 1);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_write_field(component, WCD938X_ANA_RX_SUPPLIES,
+ WCD938X_VNEG_EN_MASK, 0);
+ snd_soc_component_write_field(component, WCD938X_ANA_RX_SUPPLIES,
+ WCD938X_VPOS_EN_MASK, 0);
+ snd_soc_component_write_field(component, WCD938X_ANA_RX_SUPPLIES,
+ WCD938X_RX_BIAS_EN_MASK, 0);
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_CDC_ANA_CLK_CTL,
+ WCD938X_ANA_RX_DIV2_CLK_EN_MASK, 0);
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_CDC_ANA_CLK_CTL,
+ WCD938X_ANA_RX_CLK_EN_MASK, 0);
+ break;
+ }
+ return 0;
+}
+
+static int wcd938x_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_RXD0_CLK_EN_MASK, 0x01);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_HPH_GAIN_CTL,
+ WCD938X_HPHL_RX_EN_MASK, 1);
+ snd_soc_component_write_field(component,
+ WCD938X_HPH_RDAC_CLK_CTL1,
+ WCD938X_CHOP_CLK_EN_MASK, 0);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_component_write_field(component,
+ WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L,
+ WCD938X_HPH_RES_DIV_MASK, 0x02);
+ if (wcd938x->comp1_enable) {
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_COMP_CTL_0,
+ WCD938X_HPHL_COMP_EN_MASK, 1);
+ /* 5msec compander delay as per HW requirement */
+ if (!wcd938x->comp2_enable || (snd_soc_component_read(component,
+ WCD938X_DIGITAL_CDC_COMP_CTL_0) & 0x01))
+ usleep_range(5000, 5010);
+ snd_soc_component_write_field(component, WCD938X_HPH_NEW_INT_HPH_TIMER1,
+ WCD938X_AUTOCHOP_TIMER_EN, 0);
+ } else {
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_COMP_CTL_0,
+ WCD938X_HPHL_COMP_EN_MASK, 0);
+ snd_soc_component_write_field(component,
+ WCD938X_HPH_L_EN,
+ WCD938X_GAIN_SRC_SEL_MASK,
+ WCD938X_GAIN_SRC_SEL_REGISTER);
+
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_write_field(component,
+ WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R,
+ WCD938X_HPH_RES_DIV_MASK, 0x1);
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd938x_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_RXD1_CLK_EN_MASK, 1);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_HPH_GAIN_CTL,
+ WCD938X_HPHR_RX_EN_MASK, 1);
+ snd_soc_component_write_field(component,
+ WCD938X_HPH_RDAC_CLK_CTL1,
+ WCD938X_CHOP_CLK_EN_MASK, 0);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_component_write_field(component,
+ WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R,
+ WCD938X_HPH_RES_DIV_MASK, 0x02);
+ if (wcd938x->comp2_enable) {
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_COMP_CTL_0,
+ WCD938X_HPHR_COMP_EN_MASK, 1);
+ /* 5msec compander delay as per HW requirement */
+ if (!wcd938x->comp1_enable ||
+ (snd_soc_component_read(component,
+ WCD938X_DIGITAL_CDC_COMP_CTL_0) & 0x02))
+ usleep_range(5000, 5010);
+ snd_soc_component_write_field(component, WCD938X_HPH_NEW_INT_HPH_TIMER1,
+ WCD938X_AUTOCHOP_TIMER_EN, 0);
+ } else {
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_COMP_CTL_0,
+ WCD938X_HPHR_COMP_EN_MASK, 0);
+ snd_soc_component_write_field(component,
+ WCD938X_HPH_R_EN,
+ WCD938X_GAIN_SRC_SEL_MASK,
+ WCD938X_GAIN_SRC_SEL_REGISTER);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_write_field(component,
+ WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R,
+ WCD938X_HPH_RES_DIV_MASK, 0x01);
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd938x_codec_ear_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ wcd938x->ear_rx_path =
+ snd_soc_component_read(
+ component, WCD938X_DIGITAL_CDC_EAR_PATH_CTL);
+ if (wcd938x->ear_rx_path & EAR_RX_PATH_AUX) {
+ snd_soc_component_write_field(component,
+ WCD938X_EAR_EAR_DAC_CON,
+ WCD938X_DAC_SAMPLE_EDGE_SEL_MASK, 0);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_AUX_GAIN_CTL,
+ WCD938X_AUX_EN_MASK, 1);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_RXD2_CLK_EN_MASK, 1);
+ snd_soc_component_write_field(component,
+ WCD938X_ANA_EAR_COMPANDER_CTL,
+ WCD938X_GAIN_OVRD_REG_MASK, 1);
+ } else {
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_HPH_GAIN_CTL,
+ WCD938X_HPHL_RX_EN_MASK, 1);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_RXD0_CLK_EN_MASK, 1);
+ if (wcd938x->comp1_enable)
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_COMP_CTL_0,
+ WCD938X_HPHL_COMP_EN_MASK, 1);
+ }
+ /* 5 msec delay as per HW requirement */
+ usleep_range(5000, 5010);
+ if (wcd938x->flyback_cur_det_disable == 0)
+ snd_soc_component_write_field(component, WCD938X_FLYBACK_EN,
+ WCD938X_EN_CUR_DET_MASK, 0);
+ wcd938x->flyback_cur_det_disable++;
+ wcd_clsh_ctrl_set_state(wcd938x->clsh_info,
+ WCD_CLSH_EVENT_PRE_DAC,
+ WCD_CLSH_STATE_EAR,
+ wcd938x->hph_mode);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (wcd938x->ear_rx_path & EAR_RX_PATH_AUX) {
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_AUX_GAIN_CTL,
+ WCD938X_AUX_EN_MASK, 0);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_RXD2_CLK_EN_MASK, 0);
+ } else {
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_HPH_GAIN_CTL,
+ WCD938X_HPHL_RX_EN_MASK, 0);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_RXD0_CLK_EN_MASK, 0);
+ if (wcd938x->comp1_enable)
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_COMP_CTL_0,
+ WCD938X_HPHL_COMP_EN_MASK, 0);
+ }
+ snd_soc_component_write_field(component, WCD938X_ANA_EAR_COMPANDER_CTL,
+ WCD938X_GAIN_OVRD_REG_MASK, 0);
+ snd_soc_component_write_field(component,
+ WCD938X_EAR_EAR_DAC_CON,
+ WCD938X_DAC_SAMPLE_EDGE_SEL_MASK, 1);
+ break;
+ }
+ return 0;
+
+}
+
+static int wcd938x_codec_aux_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_ANA_CLK_CTL,
+ WCD938X_ANA_RX_DIV4_CLK_EN_MASK, 1);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_RXD2_CLK_EN_MASK, 1);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_AUX_GAIN_CTL,
+ WCD938X_AUX_EN_MASK, 1);
+ if (wcd938x->flyback_cur_det_disable == 0)
+ snd_soc_component_write_field(component, WCD938X_FLYBACK_EN,
+ WCD938X_EN_CUR_DET_MASK, 0);
+ wcd938x->flyback_cur_det_disable++;
+ wcd_clsh_ctrl_set_state(wcd938x->clsh_info,
+ WCD_CLSH_EVENT_PRE_DAC,
+ WCD_CLSH_STATE_AUX,
+ wcd938x->hph_mode);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_ANA_CLK_CTL,
+ WCD938X_ANA_RX_DIV4_CLK_EN_MASK, 0);
+ break;
+ }
+ return ret;
+
+}
+
+static int wcd938x_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ int hph_mode = wcd938x->hph_mode;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (wcd938x->ldoh)
+ snd_soc_component_write_field(component, WCD938X_LDOH_MODE,
+ WCD938X_LDOH_EN_MASK, 1);
+ wcd_clsh_ctrl_set_state(wcd938x->clsh_info, WCD_CLSH_EVENT_PRE_DAC,
+ WCD_CLSH_STATE_HPHR, hph_mode);
+ wcd_clsh_set_hph_mode(wcd938x->clsh_info, CLS_H_HIFI);
+
+ if (hph_mode == CLS_H_LP || hph_mode == CLS_H_LOHIFI ||
+ hph_mode == CLS_H_ULP) {
+ snd_soc_component_write_field(component,
+ WCD938X_HPH_REFBUFF_LP_CTL,
+ WCD938X_PREREF_FLIT_BYPASS_MASK, 1);
+ }
+ snd_soc_component_write_field(component, WCD938X_ANA_HPH,
+ WCD938X_HPHR_REF_EN_MASK, 1);
+ wcd_clsh_set_hph_mode(wcd938x->clsh_info, hph_mode);
+ /* 100 usec delay as per HW requirement */
+ usleep_range(100, 110);
+ set_bit(HPH_PA_DELAY, &wcd938x->status_mask);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_PDM_WD_CTL1,
+ WCD938X_PDM_WD_EN_MASK, 0x3);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /*
+ * 7ms sleep is required if compander is enabled as per
+ * HW requirement. If compander is disabled, then
+ * 20ms delay is required.
+ */
+ if (test_bit(HPH_PA_DELAY, &wcd938x->status_mask)) {
+ if (!wcd938x->comp2_enable)
+ usleep_range(20000, 20100);
+ else
+ usleep_range(7000, 7100);
+
+ if (hph_mode == CLS_H_LP || hph_mode == CLS_H_LOHIFI ||
+ hph_mode == CLS_H_ULP)
+ snd_soc_component_write_field(component,
+ WCD938X_HPH_REFBUFF_LP_CTL,
+ WCD938X_PREREF_FLIT_BYPASS_MASK, 0);
+ clear_bit(HPH_PA_DELAY, &wcd938x->status_mask);
+ }
+ snd_soc_component_write_field(component, WCD938X_HPH_NEW_INT_HPH_TIMER1,
+ WCD938X_AUTOCHOP_TIMER_EN, 1);
+ if (hph_mode == CLS_AB || hph_mode == CLS_AB_HIFI ||
+ hph_mode == CLS_AB_LP || hph_mode == CLS_AB_LOHIFI)
+ snd_soc_component_write_field(component, WCD938X_ANA_RX_SUPPLIES,
+ WCD938X_REGULATOR_MODE_MASK,
+ WCD938X_REGULATOR_MODE_CLASS_AB);
+ enable_irq(wcd938x->hphr_pdm_wd_int);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ disable_irq_nosync(wcd938x->hphr_pdm_wd_int);
+ /*
+ * 7ms sleep is required if compander is enabled as per
+ * HW requirement. If compander is disabled, then
+ * 20ms delay is required.
+ */
+ if (!wcd938x->comp2_enable)
+ usleep_range(20000, 20100);
+ else
+ usleep_range(7000, 7100);
+ snd_soc_component_write_field(component, WCD938X_ANA_HPH,
+ WCD938X_HPHR_EN_MASK, 0);
+ set_bit(HPH_PA_DELAY, &wcd938x->status_mask);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /*
+ * 7ms sleep is required if compander is enabled as per
+ * HW requirement. If compander is disabled, then
+ * 20ms delay is required.
+ */
+ if (test_bit(HPH_PA_DELAY, &wcd938x->status_mask)) {
+ if (!wcd938x->comp2_enable)
+ usleep_range(20000, 20100);
+ else
+ usleep_range(7000, 7100);
+ clear_bit(HPH_PA_DELAY, &wcd938x->status_mask);
+ }
+ snd_soc_component_write_field(component, WCD938X_ANA_HPH,
+ WCD938X_HPHR_REF_EN_MASK, 0);
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_PDM_WD_CTL1,
+ WCD938X_PDM_WD_EN_MASK, 0);
+ wcd_clsh_ctrl_set_state(wcd938x->clsh_info, WCD_CLSH_EVENT_POST_PA,
+ WCD_CLSH_STATE_HPHR, hph_mode);
+ if (wcd938x->ldoh)
+ snd_soc_component_write_field(component, WCD938X_LDOH_MODE,
+ WCD938X_LDOH_EN_MASK, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd938x_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ int hph_mode = wcd938x->hph_mode;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (wcd938x->ldoh)
+ snd_soc_component_write_field(component, WCD938X_LDOH_MODE,
+ WCD938X_LDOH_EN_MASK, 1);
+ wcd_clsh_ctrl_set_state(wcd938x->clsh_info, WCD_CLSH_EVENT_PRE_DAC,
+ WCD_CLSH_STATE_HPHL, hph_mode);
+ wcd_clsh_set_hph_mode(wcd938x->clsh_info, CLS_H_HIFI);
+ if (hph_mode == CLS_H_LP || hph_mode == CLS_H_LOHIFI ||
+ hph_mode == CLS_H_ULP) {
+ snd_soc_component_write_field(component,
+ WCD938X_HPH_REFBUFF_LP_CTL,
+ WCD938X_PREREF_FLIT_BYPASS_MASK, 1);
+ }
+ snd_soc_component_write_field(component, WCD938X_ANA_HPH,
+ WCD938X_HPHL_REF_EN_MASK, 1);
+ wcd_clsh_set_hph_mode(wcd938x->clsh_info, hph_mode);
+ /* 100 usec delay as per HW requirement */
+ usleep_range(100, 110);
+ set_bit(HPH_PA_DELAY, &wcd938x->status_mask);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_PDM_WD_CTL0,
+ WCD938X_PDM_WD_EN_MASK, 0x3);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /*
+ * 7ms sleep is required if compander is enabled as per
+ * HW requirement. If compander is disabled, then
+ * 20ms delay is required.
+ */
+ if (test_bit(HPH_PA_DELAY, &wcd938x->status_mask)) {
+ if (!wcd938x->comp1_enable)
+ usleep_range(20000, 20100);
+ else
+ usleep_range(7000, 7100);
+ if (hph_mode == CLS_H_LP || hph_mode == CLS_H_LOHIFI ||
+ hph_mode == CLS_H_ULP)
+ snd_soc_component_write_field(component,
+ WCD938X_HPH_REFBUFF_LP_CTL,
+ WCD938X_PREREF_FLIT_BYPASS_MASK, 0);
+ clear_bit(HPH_PA_DELAY, &wcd938x->status_mask);
+ }
+
+ snd_soc_component_write_field(component, WCD938X_HPH_NEW_INT_HPH_TIMER1,
+ WCD938X_AUTOCHOP_TIMER_EN, 1);
+ if (hph_mode == CLS_AB || hph_mode == CLS_AB_HIFI ||
+ hph_mode == CLS_AB_LP || hph_mode == CLS_AB_LOHIFI)
+ snd_soc_component_write_field(component, WCD938X_ANA_RX_SUPPLIES,
+ WCD938X_REGULATOR_MODE_MASK,
+ WCD938X_REGULATOR_MODE_CLASS_AB);
+ enable_irq(wcd938x->hphl_pdm_wd_int);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ disable_irq_nosync(wcd938x->hphl_pdm_wd_int);
+ /*
+ * 7ms sleep is required if compander is enabled as per
+ * HW requirement. If compander is disabled, then
+ * 20ms delay is required.
+ */
+ if (!wcd938x->comp1_enable)
+ usleep_range(20000, 20100);
+ else
+ usleep_range(7000, 7100);
+ snd_soc_component_write_field(component, WCD938X_ANA_HPH,
+ WCD938X_HPHL_EN_MASK, 0);
+ set_bit(HPH_PA_DELAY, &wcd938x->status_mask);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /*
+ * 7ms sleep is required if compander is enabled as per
+ * HW requirement. If compander is disabled, then
+ * 20ms delay is required.
+ */
+ if (test_bit(HPH_PA_DELAY, &wcd938x->status_mask)) {
+ if (!wcd938x->comp1_enable)
+ usleep_range(21000, 21100);
+ else
+ usleep_range(7000, 7100);
+ clear_bit(HPH_PA_DELAY, &wcd938x->status_mask);
+ }
+ snd_soc_component_write_field(component, WCD938X_ANA_HPH,
+ WCD938X_HPHL_REF_EN_MASK, 0);
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_PDM_WD_CTL0,
+ WCD938X_PDM_WD_EN_MASK, 0);
+ wcd_clsh_ctrl_set_state(wcd938x->clsh_info, WCD_CLSH_EVENT_POST_PA,
+ WCD_CLSH_STATE_HPHL, hph_mode);
+ if (wcd938x->ldoh)
+ snd_soc_component_write_field(component, WCD938X_LDOH_MODE,
+ WCD938X_LDOH_EN_MASK, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd938x_codec_enable_aux_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ int hph_mode = wcd938x->hph_mode;
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_PDM_WD_CTL2,
+ WCD938X_AUX_PDM_WD_EN_MASK, 1);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* 1 msec delay as per HW requirement */
+ usleep_range(1000, 1010);
+ if (hph_mode == CLS_AB || hph_mode == CLS_AB_HIFI ||
+ hph_mode == CLS_AB_LP || hph_mode == CLS_AB_LOHIFI)
+ snd_soc_component_write_field(component, WCD938X_ANA_RX_SUPPLIES,
+ WCD938X_REGULATOR_MODE_MASK,
+ WCD938X_REGULATOR_MODE_CLASS_AB);
+ enable_irq(wcd938x->aux_pdm_wd_int);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ disable_irq_nosync(wcd938x->aux_pdm_wd_int);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* 1 msec delay as per HW requirement */
+ usleep_range(1000, 1010);
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_PDM_WD_CTL2,
+ WCD938X_AUX_PDM_WD_EN_MASK, 0);
+ wcd_clsh_ctrl_set_state(wcd938x->clsh_info,
+ WCD_CLSH_EVENT_POST_PA,
+ WCD_CLSH_STATE_AUX,
+ hph_mode);
+
+ wcd938x->flyback_cur_det_disable--;
+ if (wcd938x->flyback_cur_det_disable == 0)
+ snd_soc_component_write_field(component, WCD938X_FLYBACK_EN,
+ WCD938X_EN_CUR_DET_MASK, 1);
+ break;
+ }
+ return ret;
+}
+
+static int wcd938x_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ int hph_mode = wcd938x->hph_mode;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /*
+ * Enable watchdog interrupt for HPHL or AUX
+ * depending on mux value
+ */
+ wcd938x->ear_rx_path = snd_soc_component_read(component,
+ WCD938X_DIGITAL_CDC_EAR_PATH_CTL);
+ if (wcd938x->ear_rx_path & EAR_RX_PATH_AUX)
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_PDM_WD_CTL2,
+ WCD938X_AUX_PDM_WD_EN_MASK, 1);
+ else
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_PDM_WD_CTL0,
+ WCD938X_PDM_WD_EN_MASK, 0x3);
+ if (!wcd938x->comp1_enable)
+ snd_soc_component_write_field(component,
+ WCD938X_ANA_EAR_COMPANDER_CTL,
+ WCD938X_GAIN_OVRD_REG_MASK, 1);
+
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* 6 msec delay as per HW requirement */
+ usleep_range(6000, 6010);
+ if (hph_mode == CLS_AB || hph_mode == CLS_AB_HIFI ||
+ hph_mode == CLS_AB_LP || hph_mode == CLS_AB_LOHIFI)
+ snd_soc_component_write_field(component, WCD938X_ANA_RX_SUPPLIES,
+ WCD938X_REGULATOR_MODE_MASK,
+ WCD938X_REGULATOR_MODE_CLASS_AB);
+ if (wcd938x->ear_rx_path & EAR_RX_PATH_AUX)
+ enable_irq(wcd938x->aux_pdm_wd_int);
+ else
+ enable_irq(wcd938x->hphl_pdm_wd_int);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ if (wcd938x->ear_rx_path & EAR_RX_PATH_AUX)
+ disable_irq_nosync(wcd938x->aux_pdm_wd_int);
+ else
+ disable_irq_nosync(wcd938x->hphl_pdm_wd_int);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (!wcd938x->comp1_enable)
+ snd_soc_component_write_field(component, WCD938X_ANA_EAR_COMPANDER_CTL,
+ WCD938X_GAIN_OVRD_REG_MASK, 0);
+ /* 7 msec delay as per HW requirement */
+ usleep_range(7000, 7010);
+ if (wcd938x->ear_rx_path & EAR_RX_PATH_AUX)
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_PDM_WD_CTL2,
+ WCD938X_AUX_PDM_WD_EN_MASK, 0);
+ else
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_PDM_WD_CTL0,
+ WCD938X_PDM_WD_EN_MASK, 0);
+
+ wcd_clsh_ctrl_set_state(wcd938x->clsh_info, WCD_CLSH_EVENT_POST_PA,
+ WCD_CLSH_STATE_EAR, hph_mode);
+
+ wcd938x->flyback_cur_det_disable--;
+ if (wcd938x->flyback_cur_det_disable == 0)
+ snd_soc_component_write_field(component, WCD938X_FLYBACK_EN,
+ WCD938X_EN_CUR_DET_MASK, 1);
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd938x_codec_enable_dmic(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ u16 dmic_clk_reg, dmic_clk_en_reg;
+ u8 dmic_sel_mask, dmic_clk_mask;
+
+ switch (w->shift) {
+ case 0:
+ case 1:
+ dmic_clk_reg = WCD938X_DIGITAL_CDC_DMIC_RATE_1_2;
+ dmic_clk_en_reg = WCD938X_DIGITAL_CDC_DMIC1_CTL;
+ dmic_clk_mask = WCD938X_DMIC1_RATE_MASK;
+ dmic_sel_mask = WCD938X_AMIC1_IN_SEL_MASK;
+ break;
+ case 2:
+ case 3:
+ dmic_clk_reg = WCD938X_DIGITAL_CDC_DMIC_RATE_1_2;
+ dmic_clk_en_reg = WCD938X_DIGITAL_CDC_DMIC2_CTL;
+ dmic_clk_mask = WCD938X_DMIC2_RATE_MASK;
+ dmic_sel_mask = WCD938X_AMIC3_IN_SEL_MASK;
+ break;
+ case 4:
+ case 5:
+ dmic_clk_reg = WCD938X_DIGITAL_CDC_DMIC_RATE_3_4;
+ dmic_clk_en_reg = WCD938X_DIGITAL_CDC_DMIC3_CTL;
+ dmic_clk_mask = WCD938X_DMIC3_RATE_MASK;
+ dmic_sel_mask = WCD938X_AMIC4_IN_SEL_MASK;
+ break;
+ case 6:
+ case 7:
+ dmic_clk_reg = WCD938X_DIGITAL_CDC_DMIC_RATE_3_4;
+ dmic_clk_en_reg = WCD938X_DIGITAL_CDC_DMIC4_CTL;
+ dmic_clk_mask = WCD938X_DMIC4_RATE_MASK;
+ dmic_sel_mask = WCD938X_AMIC5_IN_SEL_MASK;
+ break;
+ default:
+ dev_err(component->dev, "%s: Invalid DMIC Selection\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_AMIC_CTL,
+ dmic_sel_mask,
+ WCD938X_AMIC1_IN_SEL_DMIC);
+ /* 250us sleep as per HW requirement */
+ usleep_range(250, 260);
+ /* Setting DMIC clock rate to 2.4MHz */
+ snd_soc_component_write_field(component, dmic_clk_reg,
+ dmic_clk_mask,
+ WCD938X_DMIC4_RATE_2P4MHZ);
+ snd_soc_component_write_field(component, dmic_clk_en_reg,
+ WCD938X_DMIC_CLK_EN_MASK, 1);
+ /* enable clock scaling */
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_CDC_DMIC_CTL,
+ WCD938X_DMIC_CLK_SCALING_EN_MASK, 0x3);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_AMIC_CTL,
+ dmic_sel_mask, WCD938X_AMIC1_IN_SEL_AMIC);
+ snd_soc_component_write_field(component, dmic_clk_en_reg,
+ WCD938X_DMIC_CLK_EN_MASK, 0);
+ break;
+ }
+ return 0;
+}
+
+static int wcd938x_tx_swr_ctrl(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ int bank;
+ int rate;
+
+ bank = (wcd938x_swr_get_current_bank(wcd938x->sdw_priv[AIF1_CAP]->sdev)) ? 0 : 1;
+ bank = bank ? 0 : 1;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (strnstr(w->name, "ADC", sizeof("ADC"))) {
+ int i = 0, mode = 0;
+
+ if (test_bit(WCD_ADC1, &wcd938x->status_mask))
+ mode |= tx_mode_bit[wcd938x->tx_mode[WCD_ADC1]];
+ if (test_bit(WCD_ADC2, &wcd938x->status_mask))
+ mode |= tx_mode_bit[wcd938x->tx_mode[WCD_ADC2]];
+ if (test_bit(WCD_ADC3, &wcd938x->status_mask))
+ mode |= tx_mode_bit[wcd938x->tx_mode[WCD_ADC3]];
+ if (test_bit(WCD_ADC4, &wcd938x->status_mask))
+ mode |= tx_mode_bit[wcd938x->tx_mode[WCD_ADC4]];
+
+ if (mode != 0) {
+ for (i = 0; i < ADC_MODE_ULP2; i++) {
+ if (mode & (1 << i)) {
+ i++;
+ break;
+ }
+ }
+ }
+ rate = wcd938x_get_clk_rate(i);
+ wcd938x_set_swr_clk_rate(component, rate, bank);
+ /* Copy clk settings to active bank */
+ wcd938x_set_swr_clk_rate(component, rate, !bank);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (strnstr(w->name, "ADC", sizeof("ADC"))) {
+ rate = wcd938x_get_clk_rate(ADC_MODE_INVALID);
+ wcd938x_set_swr_clk_rate(component, rate, !bank);
+ wcd938x_set_swr_clk_rate(component, rate, bank);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd938x_get_adc_mode(int val)
+{
+ int ret = 0;
+
+ switch (val) {
+ case ADC_MODE_INVALID:
+ ret = ADC_MODE_VAL_NORMAL;
+ break;
+ case ADC_MODE_HIFI:
+ ret = ADC_MODE_VAL_HIFI;
+ break;
+ case ADC_MODE_LO_HIF:
+ ret = ADC_MODE_VAL_LO_HIF;
+ break;
+ case ADC_MODE_NORMAL:
+ ret = ADC_MODE_VAL_NORMAL;
+ break;
+ case ADC_MODE_LP:
+ ret = ADC_MODE_VAL_LP;
+ break;
+ case ADC_MODE_ULP1:
+ ret = ADC_MODE_VAL_ULP1;
+ break;
+ case ADC_MODE_ULP2:
+ ret = ADC_MODE_VAL_ULP2;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int wcd938x_codec_enable_adc(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_ANA_CLK_CTL,
+ WCD938X_ANA_TX_CLK_EN_MASK, 1);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_ANA_CLK_CTL,
+ WCD938X_ANA_TX_DIV2_CLK_EN_MASK, 1);
+ set_bit(w->shift, &wcd938x->status_mask);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_write_field(component, WCD938X_DIGITAL_CDC_ANA_CLK_CTL,
+ WCD938X_ANA_TX_CLK_EN_MASK, 0);
+ clear_bit(w->shift, &wcd938x->status_mask);
+ break;
+ }
+
+ return 0;
+}
+
+static void wcd938x_tx_channel_config(struct snd_soc_component *component,
+ int channel, int mode)
+{
+ int reg, mask;
+
+ switch (channel) {
+ case 0:
+ reg = WCD938X_ANA_TX_CH2;
+ mask = WCD938X_HPF1_INIT_MASK;
+ break;
+ case 1:
+ reg = WCD938X_ANA_TX_CH2;
+ mask = WCD938X_HPF2_INIT_MASK;
+ break;
+ case 2:
+ reg = WCD938X_ANA_TX_CH4;
+ mask = WCD938X_HPF3_INIT_MASK;
+ break;
+ case 3:
+ reg = WCD938X_ANA_TX_CH4;
+ mask = WCD938X_HPF4_INIT_MASK;
+ break;
+ default:
+ return;
+ }
+
+ snd_soc_component_write_field(component, reg, mask, mode);
+}
+
+static int wcd938x_adc_enable_req(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ int mode;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_REQ_CTL,
+ WCD938X_FS_RATE_4P8_MASK, 1);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_REQ_CTL,
+ WCD938X_NO_NOTCH_MASK, 0);
+ wcd938x_tx_channel_config(component, w->shift, 1);
+ mode = wcd938x_get_adc_mode(wcd938x->tx_mode[w->shift]);
+ if (mode < 0) {
+ dev_info(component->dev, "Invalid ADC mode\n");
+ return -EINVAL;
+ }
+ switch (w->shift) {
+ case 0:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1,
+ WCD938X_TXD0_MODE_MASK, mode);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_TXD0_CLK_EN_MASK, 1);
+ break;
+ case 1:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1,
+ WCD938X_TXD1_MODE_MASK, mode);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_TXD1_CLK_EN_MASK, 1);
+ break;
+ case 2:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3,
+ WCD938X_TXD2_MODE_MASK, mode);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_TXD2_CLK_EN_MASK, 1);
+ break;
+ case 3:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3,
+ WCD938X_TXD3_MODE_MASK, mode);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_TXD3_CLK_EN_MASK, 1);
+ break;
+ default:
+ break;
+ }
+
+ wcd938x_tx_channel_config(component, w->shift, 0);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ switch (w->shift) {
+ case 0:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1,
+ WCD938X_TXD0_MODE_MASK, 0);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_TXD0_CLK_EN_MASK, 0);
+ break;
+ case 1:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1,
+ WCD938X_TXD1_MODE_MASK, 0);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_TXD1_CLK_EN_MASK, 0);
+ break;
+ case 2:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3,
+ WCD938X_TXD2_MODE_MASK, 0);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_TXD2_CLK_EN_MASK, 0);
+ break;
+ case 3:
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3,
+ WCD938X_TXD3_MODE_MASK, 0);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_TXD3_CLK_EN_MASK, 0);
+ break;
+ default:
+ break;
+ }
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_ANA_CLK_CTL,
+ WCD938X_ANA_TX_DIV2_CLK_EN_MASK, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd938x_micbias_control(struct snd_soc_component *component,
+ int micb_num, int req, bool is_dapm)
+{
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ int micb_index = micb_num - 1;
+ u16 micb_reg;
+
+ switch (micb_num) {
+ case MIC_BIAS_1:
+ micb_reg = WCD938X_ANA_MICB1;
+ break;
+ case MIC_BIAS_2:
+ micb_reg = WCD938X_ANA_MICB2;
+ break;
+ case MIC_BIAS_3:
+ micb_reg = WCD938X_ANA_MICB3;
+ break;
+ case MIC_BIAS_4:
+ micb_reg = WCD938X_ANA_MICB4;
+ break;
+ default:
+ dev_err(component->dev, "%s: Invalid micbias number: %d\n",
+ __func__, micb_num);
+ return -EINVAL;
+ }
+
+ switch (req) {
+ case MICB_PULLUP_ENABLE:
+ wcd938x->pullup_ref[micb_index]++;
+ if ((wcd938x->pullup_ref[micb_index] == 1) &&
+ (wcd938x->micb_ref[micb_index] == 0))
+ snd_soc_component_write_field(component, micb_reg,
+ WCD938X_MICB_EN_MASK,
+ WCD938X_MICB_PULL_UP);
+ break;
+ case MICB_PULLUP_DISABLE:
+ if (wcd938x->pullup_ref[micb_index] > 0)
+ wcd938x->pullup_ref[micb_index]--;
+
+ if ((wcd938x->pullup_ref[micb_index] == 0) &&
+ (wcd938x->micb_ref[micb_index] == 0))
+ snd_soc_component_write_field(component, micb_reg,
+ WCD938X_MICB_EN_MASK, 0);
+ break;
+ case MICB_ENABLE:
+ wcd938x->micb_ref[micb_index]++;
+ if (wcd938x->micb_ref[micb_index] == 1) {
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_DIG_CLK_CTL,
+ WCD938X_TX_CLK_EN_MASK, 0xF);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_ANA_CLK_CTL,
+ WCD938X_ANA_TX_DIV2_CLK_EN_MASK, 1);
+ snd_soc_component_write_field(component,
+ WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL,
+ WCD938X_TX_SC_CLK_EN_MASK, 1);
+
+ snd_soc_component_write_field(component, micb_reg,
+ WCD938X_MICB_EN_MASK,
+ WCD938X_MICB_ENABLE);
+ }
+
+ break;
+ case MICB_DISABLE:
+ if (wcd938x->micb_ref[micb_index] > 0)
+ wcd938x->micb_ref[micb_index]--;
+
+ if ((wcd938x->micb_ref[micb_index] == 0) &&
+ (wcd938x->pullup_ref[micb_index] > 0))
+ snd_soc_component_write_field(component, micb_reg,
+ WCD938X_MICB_EN_MASK,
+ WCD938X_MICB_PULL_UP);
+ else if ((wcd938x->micb_ref[micb_index] == 0) &&
+ (wcd938x->pullup_ref[micb_index] == 0)) {
+
+ snd_soc_component_write_field(component, micb_reg,
+ WCD938X_MICB_EN_MASK, 0);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd938x_codec_enable_micbias(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ int micb_num = w->shift;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ wcd938x_micbias_control(component, micb_num, MICB_ENABLE, true);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* 1 msec delay as per HW requirement */
+ usleep_range(1000, 1100);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ wcd938x_micbias_control(component, micb_num, MICB_DISABLE, true);
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd938x_codec_enable_micbias_pullup(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ int micb_num = w->shift;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ wcd938x_micbias_control(component, micb_num,
+ MICB_PULLUP_ENABLE, true);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* 1 msec delay as per HW requirement */
+ usleep_range(1000, 1100);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ wcd938x_micbias_control(component, micb_num,
+ MICB_PULLUP_DISABLE, true);
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd938x_tx_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ int path = e->shift_l;
+
+ ucontrol->value.integer.value[0] = wcd938x->tx_mode[path];
+
+ return 0;
+}
+
+static int wcd938x_tx_mode_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ int path = e->shift_l;
+
+ wcd938x->tx_mode[path] = ucontrol->value.enumerated.item[0];
+
+ return 1;
+}
+
+static int wcd938x_rx_hph_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.integer.value[0] = wcd938x->hph_mode;
+
+ return 0;
+}
+
+static int wcd938x_rx_hph_mode_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+
+ wcd938x->hph_mode = ucontrol->value.enumerated.item[0];
+
+ return 1;
+}
+
+static int wcd938x_ear_pa_put_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+
+ if (wcd938x->comp1_enable) {
+ dev_err(component->dev, "Can not set EAR PA Gain, compander1 is enabled\n");
+ return -EINVAL;
+ }
+
+ snd_soc_component_write_field(component, WCD938X_ANA_EAR_COMPANDER_CTL,
+ WCD938X_EAR_GAIN_MASK,
+ ucontrol->value.integer.value[0]);
+
+ return 0;
+}
+
+static int wcd938x_get_compander(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ struct soc_mixer_control *mc;
+ bool hphr;
+
+ mc = (struct soc_mixer_control *)(kcontrol->private_value);
+ hphr = mc->shift;
+
+ if (hphr)
+ ucontrol->value.integer.value[0] = wcd938x->comp2_enable;
+ else
+ ucontrol->value.integer.value[0] = wcd938x->comp1_enable;
+
+ return 0;
+}
+
+static int wcd938x_set_compander(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ struct wcd938x_sdw_priv *wcd;
+ int value = ucontrol->value.integer.value[0];
+ struct soc_mixer_control *mc;
+ bool hphr;
+
+ mc = (struct soc_mixer_control *)(kcontrol->private_value);
+ hphr = mc->shift;
+
+ wcd = wcd938x->sdw_priv[AIF1_PB];
+
+ if (hphr)
+ wcd938x->comp2_enable = value;
+ else
+ wcd938x->comp1_enable = value;
+
+ if (value)
+ wcd938x_connect_port(wcd, mc->reg, true);
+ else
+ wcd938x_connect_port(wcd, mc->reg, false);
+
+ return 0;
+}
+
+static int wcd938x_ldoh_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.integer.value[0] = wcd938x->ldoh;
+
+ return 0;
+}
+
+static int wcd938x_ldoh_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+
+ wcd938x->ldoh = ucontrol->value.integer.value[0];
+
+ return 1;
+}
+
+static int wcd938x_bcs_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.integer.value[0] = wcd938x->bcs_dis;
+
+ return 0;
+}
+
+static int wcd938x_bcs_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+
+ wcd938x->bcs_dis = ucontrol->value.integer.value[0];
+
+ return 1;
+}
+
+static const char * const tx_mode_mux_text_wcd9380[] = {
+ "ADC_INVALID", "ADC_HIFI", "ADC_LO_HIF", "ADC_NORMAL", "ADC_LP",
+};
+
+static const char * const tx_mode_mux_text[] = {
+ "ADC_INVALID", "ADC_HIFI", "ADC_LO_HIF", "ADC_NORMAL", "ADC_LP",
+ "ADC_ULP1", "ADC_ULP2",
+};
+
+static const char * const rx_hph_mode_mux_text_wcd9380[] = {
+ "CLS_H_INVALID", "CLS_H_INVALID_1", "CLS_H_LP", "CLS_AB",
+ "CLS_H_LOHIFI", "CLS_H_ULP", "CLS_H_INVALID_2", "CLS_AB_LP",
+ "CLS_AB_LOHIFI",
+};
+
+static const char * const rx_hph_mode_mux_text[] = {
+ "CLS_H_INVALID", "CLS_H_HIFI", "CLS_H_LP", "CLS_AB", "CLS_H_LOHIFI",
+ "CLS_H_ULP", "CLS_AB_HIFI", "CLS_AB_LP", "CLS_AB_LOHIFI",
+};
+
+static const char * const adc2_mux_text[] = {
+ "INP2", "INP3"
+};
+
+static const char * const adc3_mux_text[] = {
+ "INP4", "INP6"
+};
+
+static const char * const adc4_mux_text[] = {
+ "INP5", "INP7"
+};
+
+static const char * const rdac3_mux_text[] = {
+ "RX1", "RX3"
+};
+
+static const char * const hdr12_mux_text[] = {
+ "NO_HDR12", "HDR12"
+};
+
+static const char * const hdr34_mux_text[] = {
+ "NO_HDR34", "HDR34"
+};
+
+static const struct soc_enum tx0_mode_enum_wcd9380 =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(tx_mode_mux_text_wcd9380),
+ tx_mode_mux_text_wcd9380);
+
+static const struct soc_enum tx1_mode_enum_wcd9380 =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 1, ARRAY_SIZE(tx_mode_mux_text_wcd9380),
+ tx_mode_mux_text_wcd9380);
+
+static const struct soc_enum tx2_mode_enum_wcd9380 =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 2, ARRAY_SIZE(tx_mode_mux_text_wcd9380),
+ tx_mode_mux_text_wcd9380);
+
+static const struct soc_enum tx3_mode_enum_wcd9380 =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 3, ARRAY_SIZE(tx_mode_mux_text_wcd9380),
+ tx_mode_mux_text_wcd9380);
+
+static const struct soc_enum tx0_mode_enum_wcd9385 =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(tx_mode_mux_text),
+ tx_mode_mux_text);
+
+static const struct soc_enum tx1_mode_enum_wcd9385 =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 1, ARRAY_SIZE(tx_mode_mux_text),
+ tx_mode_mux_text);
+
+static const struct soc_enum tx2_mode_enum_wcd9385 =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 2, ARRAY_SIZE(tx_mode_mux_text),
+ tx_mode_mux_text);
+
+static const struct soc_enum tx3_mode_enum_wcd9385 =
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 3, ARRAY_SIZE(tx_mode_mux_text),
+ tx_mode_mux_text);
+
+static const struct soc_enum rx_hph_mode_mux_enum_wcd9380 =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(rx_hph_mode_mux_text_wcd9380),
+ rx_hph_mode_mux_text_wcd9380);
+
+static const struct soc_enum rx_hph_mode_mux_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(rx_hph_mode_mux_text),
+ rx_hph_mode_mux_text);
+
+static const struct soc_enum adc2_enum =
+ SOC_ENUM_SINGLE(WCD938X_TX_NEW_AMIC_MUX_CFG, 7,
+ ARRAY_SIZE(adc2_mux_text), adc2_mux_text);
+
+static const struct soc_enum adc3_enum =
+ SOC_ENUM_SINGLE(WCD938X_TX_NEW_AMIC_MUX_CFG, 6,
+ ARRAY_SIZE(adc3_mux_text), adc3_mux_text);
+
+static const struct soc_enum adc4_enum =
+ SOC_ENUM_SINGLE(WCD938X_TX_NEW_AMIC_MUX_CFG, 5,
+ ARRAY_SIZE(adc4_mux_text), adc4_mux_text);
+
+static const struct soc_enum hdr12_enum =
+ SOC_ENUM_SINGLE(WCD938X_TX_NEW_AMIC_MUX_CFG, 4,
+ ARRAY_SIZE(hdr12_mux_text), hdr12_mux_text);
+
+static const struct soc_enum hdr34_enum =
+ SOC_ENUM_SINGLE(WCD938X_TX_NEW_AMIC_MUX_CFG, 3,
+ ARRAY_SIZE(hdr34_mux_text), hdr34_mux_text);
+
+static const struct soc_enum rdac3_enum =
+ SOC_ENUM_SINGLE(WCD938X_DIGITAL_CDC_EAR_PATH_CTL, 0,
+ ARRAY_SIZE(rdac3_mux_text), rdac3_mux_text);
+
+static const struct snd_kcontrol_new adc1_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new adc2_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new adc3_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new adc4_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new dmic1_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new dmic2_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new dmic3_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new dmic4_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new dmic5_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new dmic6_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new dmic7_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new dmic8_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new ear_rdac_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new aux_rdac_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new hphl_rdac_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new hphr_rdac_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
+};
+
+static const struct snd_kcontrol_new tx_adc2_mux =
+ SOC_DAPM_ENUM("ADC2 MUX Mux", adc2_enum);
+
+static const struct snd_kcontrol_new tx_adc3_mux =
+ SOC_DAPM_ENUM("ADC3 MUX Mux", adc3_enum);
+
+static const struct snd_kcontrol_new tx_adc4_mux =
+ SOC_DAPM_ENUM("ADC4 MUX Mux", adc4_enum);
+
+static const struct snd_kcontrol_new tx_hdr12_mux =
+ SOC_DAPM_ENUM("HDR12 MUX Mux", hdr12_enum);
+
+static const struct snd_kcontrol_new tx_hdr34_mux =
+ SOC_DAPM_ENUM("HDR34 MUX Mux", hdr34_enum);
+
+static const struct snd_kcontrol_new rx_rdac3_mux =
+ SOC_DAPM_ENUM("RDAC3_MUX Mux", rdac3_enum);
+
+static const struct snd_kcontrol_new wcd9380_snd_controls[] = {
+ SOC_ENUM_EXT("RX HPH Mode", rx_hph_mode_mux_enum_wcd9380,
+ wcd938x_rx_hph_mode_get, wcd938x_rx_hph_mode_put),
+ SOC_ENUM_EXT("TX0 MODE", tx0_mode_enum_wcd9380,
+ wcd938x_tx_mode_get, wcd938x_tx_mode_put),
+ SOC_ENUM_EXT("TX1 MODE", tx1_mode_enum_wcd9380,
+ wcd938x_tx_mode_get, wcd938x_tx_mode_put),
+ SOC_ENUM_EXT("TX2 MODE", tx2_mode_enum_wcd9380,
+ wcd938x_tx_mode_get, wcd938x_tx_mode_put),
+ SOC_ENUM_EXT("TX3 MODE", tx3_mode_enum_wcd9380,
+ wcd938x_tx_mode_get, wcd938x_tx_mode_put),
+};
+
+static const struct snd_kcontrol_new wcd9385_snd_controls[] = {
+ SOC_ENUM_EXT("RX HPH Mode", rx_hph_mode_mux_enum,
+ wcd938x_rx_hph_mode_get, wcd938x_rx_hph_mode_put),
+ SOC_ENUM_EXT("TX0 MODE", tx0_mode_enum_wcd9385,
+ wcd938x_tx_mode_get, wcd938x_tx_mode_put),
+ SOC_ENUM_EXT("TX1 MODE", tx1_mode_enum_wcd9385,
+ wcd938x_tx_mode_get, wcd938x_tx_mode_put),
+ SOC_ENUM_EXT("TX2 MODE", tx2_mode_enum_wcd9385,
+ wcd938x_tx_mode_get, wcd938x_tx_mode_put),
+ SOC_ENUM_EXT("TX3 MODE", tx3_mode_enum_wcd9385,
+ wcd938x_tx_mode_get, wcd938x_tx_mode_put),
+};
+
+static int wcd938x_get_swr_port(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(comp);
+ struct wcd938x_sdw_priv *wcd;
+ struct soc_mixer_control *mixer = (struct soc_mixer_control *)kcontrol->private_value;
+ int dai_id = mixer->shift;
+ int portidx = mixer->reg;
+
+ wcd = wcd938x->sdw_priv[dai_id];
+
+ ucontrol->value.integer.value[0] = wcd->port_enable[portidx];
+
+ return 0;
+}
+
+static int wcd938x_set_swr_port(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(comp);
+ struct wcd938x_sdw_priv *wcd;
+ struct soc_mixer_control *mixer =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int portidx = mixer->reg;
+ int dai_id = mixer->shift;
+ bool enable;
+
+ wcd = wcd938x->sdw_priv[dai_id];
+
+ if (ucontrol->value.integer.value[0])
+ enable = true;
+ else
+ enable = false;
+
+ wcd->port_enable[portidx] = enable;
+
+ wcd938x_connect_port(wcd, portidx, enable);
+
+ return 0;
+
+}
+
+static const struct snd_kcontrol_new wcd938x_snd_controls[] = {
+ SOC_SINGLE_EXT("HPHL_COMP Switch", WCD938X_COMP_L, 0, 1, 0,
+ wcd938x_get_compander, wcd938x_set_compander),
+ SOC_SINGLE_EXT("HPHR_COMP Switch", WCD938X_COMP_R, 1, 1, 0,
+ wcd938x_get_compander, wcd938x_set_compander),
+ SOC_SINGLE_EXT("HPHL Switch", WCD938X_HPH_L, 0, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("HPHR Switch", WCD938X_HPH_R, 0, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("CLSH Switch", WCD938X_CLSH, 0, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("LO Switch", WCD938X_LO, 0, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("DSD_L Switch", WCD938X_DSD_L, 0, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("DSD_R Switch", WCD938X_DSD_R, 0, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_TLV("HPHL Volume", WCD938X_HPH_L_EN, 0, 0x18, 0, line_gain),
+ SOC_SINGLE_TLV("HPHR Volume", WCD938X_HPH_R_EN, 0, 0x18, 0, line_gain),
+ WCD938X_EAR_PA_GAIN_TLV("EAR_PA Volume", WCD938X_ANA_EAR_COMPANDER_CTL,
+ 2, 0x10, 0, ear_pa_gain),
+ SOC_SINGLE_EXT("ADC1 Switch", WCD938X_ADC1, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("ADC2 Switch", WCD938X_ADC2, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("ADC3 Switch", WCD938X_ADC3, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("ADC4 Switch", WCD938X_ADC4, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("DMIC0 Switch", WCD938X_DMIC0, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("DMIC1 Switch", WCD938X_DMIC1, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("MBHC Switch", WCD938X_MBHC, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("DMIC2 Switch", WCD938X_DMIC2, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("DMIC3 Switch", WCD938X_DMIC3, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("DMIC4 Switch", WCD938X_DMIC4, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("DMIC5 Switch", WCD938X_DMIC5, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("DMIC6 Switch", WCD938X_DMIC6, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("DMIC7 Switch", WCD938X_DMIC7, 1, 1, 0,
+ wcd938x_get_swr_port, wcd938x_set_swr_port),
+ SOC_SINGLE_EXT("LDOH Enable Switch", SND_SOC_NOPM, 0, 1, 0,
+ wcd938x_ldoh_get, wcd938x_ldoh_put),
+ SOC_SINGLE_EXT("ADC2_BCS Disable Switch", SND_SOC_NOPM, 0, 1, 0,
+ wcd938x_bcs_get, wcd938x_bcs_put),
+
+ SOC_SINGLE_TLV("ADC1 Volume", WCD938X_ANA_TX_CH1, 0, 20, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC2 Volume", WCD938X_ANA_TX_CH2, 0, 20, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC3 Volume", WCD938X_ANA_TX_CH3, 0, 20, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC4 Volume", WCD938X_ANA_TX_CH4, 0, 20, 0, analog_gain),
+};
+
+static const struct snd_soc_dapm_widget wcd938x_dapm_widgets[] = {
+
+ /*input widgets*/
+ SND_SOC_DAPM_INPUT("AMIC1"),
+ SND_SOC_DAPM_INPUT("AMIC2"),
+ SND_SOC_DAPM_INPUT("AMIC3"),
+ SND_SOC_DAPM_INPUT("AMIC4"),
+ SND_SOC_DAPM_INPUT("AMIC5"),
+ SND_SOC_DAPM_INPUT("AMIC6"),
+ SND_SOC_DAPM_INPUT("AMIC7"),
+ SND_SOC_DAPM_MIC("Analog Mic1", NULL),
+ SND_SOC_DAPM_MIC("Analog Mic2", NULL),
+ SND_SOC_DAPM_MIC("Analog Mic3", NULL),
+ SND_SOC_DAPM_MIC("Analog Mic4", NULL),
+ SND_SOC_DAPM_MIC("Analog Mic5", NULL),
+
+ /*tx widgets*/
+ SND_SOC_DAPM_ADC_E("ADC1", NULL, SND_SOC_NOPM, 0, 0,
+ wcd938x_codec_enable_adc,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("ADC2", NULL, SND_SOC_NOPM, 1, 0,
+ wcd938x_codec_enable_adc,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("ADC3", NULL, SND_SOC_NOPM, 2, 0,
+ wcd938x_codec_enable_adc,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("ADC4", NULL, SND_SOC_NOPM, 3, 0,
+ wcd938x_codec_enable_adc,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
+ wcd938x_codec_enable_dmic,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 1, 0,
+ wcd938x_codec_enable_dmic,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("DMIC3", NULL, SND_SOC_NOPM, 2, 0,
+ wcd938x_codec_enable_dmic,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("DMIC4", NULL, SND_SOC_NOPM, 3, 0,
+ wcd938x_codec_enable_dmic,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("DMIC5", NULL, SND_SOC_NOPM, 4, 0,
+ wcd938x_codec_enable_dmic,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("DMIC6", NULL, SND_SOC_NOPM, 5, 0,
+ wcd938x_codec_enable_dmic,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("DMIC7", NULL, SND_SOC_NOPM, 6, 0,
+ wcd938x_codec_enable_dmic,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("DMIC8", NULL, SND_SOC_NOPM, 7, 0,
+ wcd938x_codec_enable_dmic,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER_E("ADC1 REQ", SND_SOC_NOPM, 0, 0,
+ NULL, 0, wcd938x_adc_enable_req,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("ADC2 REQ", SND_SOC_NOPM, 1, 0,
+ NULL, 0, wcd938x_adc_enable_req,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("ADC3 REQ", SND_SOC_NOPM, 2, 0,
+ NULL, 0, wcd938x_adc_enable_req,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("ADC4 REQ", SND_SOC_NOPM, 3, 0, NULL, 0,
+ wcd938x_adc_enable_req,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX("ADC2 MUX", SND_SOC_NOPM, 0, 0, &tx_adc2_mux),
+ SND_SOC_DAPM_MUX("ADC3 MUX", SND_SOC_NOPM, 0, 0, &tx_adc3_mux),
+ SND_SOC_DAPM_MUX("ADC4 MUX", SND_SOC_NOPM, 0, 0, &tx_adc4_mux),
+ SND_SOC_DAPM_MUX("HDR12 MUX", SND_SOC_NOPM, 0, 0, &tx_hdr12_mux),
+ SND_SOC_DAPM_MUX("HDR34 MUX", SND_SOC_NOPM, 0, 0, &tx_hdr34_mux),
+
+ /*tx mixers*/
+ SND_SOC_DAPM_MIXER_E("ADC1_MIXER", SND_SOC_NOPM, 0, 0, adc1_switch,
+ ARRAY_SIZE(adc1_switch), wcd938x_tx_swr_ctrl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("ADC2_MIXER", SND_SOC_NOPM, 0, 0, adc2_switch,
+ ARRAY_SIZE(adc2_switch), wcd938x_tx_swr_ctrl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("ADC3_MIXER", SND_SOC_NOPM, 0, 0, adc3_switch,
+ ARRAY_SIZE(adc3_switch), wcd938x_tx_swr_ctrl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("ADC4_MIXER", SND_SOC_NOPM, 0, 0, adc4_switch,
+ ARRAY_SIZE(adc4_switch), wcd938x_tx_swr_ctrl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("DMIC1_MIXER", SND_SOC_NOPM, 0, 0, dmic1_switch,
+ ARRAY_SIZE(dmic1_switch), wcd938x_tx_swr_ctrl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("DMIC2_MIXER", SND_SOC_NOPM, 0, 0, dmic2_switch,
+ ARRAY_SIZE(dmic2_switch), wcd938x_tx_swr_ctrl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("DMIC3_MIXER", SND_SOC_NOPM, 0, 0, dmic3_switch,
+ ARRAY_SIZE(dmic3_switch), wcd938x_tx_swr_ctrl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("DMIC4_MIXER", SND_SOC_NOPM, 0, 0, dmic4_switch,
+ ARRAY_SIZE(dmic4_switch), wcd938x_tx_swr_ctrl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("DMIC5_MIXER", SND_SOC_NOPM, 0, 0, dmic5_switch,
+ ARRAY_SIZE(dmic5_switch), wcd938x_tx_swr_ctrl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("DMIC6_MIXER", SND_SOC_NOPM, 0, 0, dmic6_switch,
+ ARRAY_SIZE(dmic6_switch), wcd938x_tx_swr_ctrl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("DMIC7_MIXER", SND_SOC_NOPM, 0, 0, dmic7_switch,
+ ARRAY_SIZE(dmic7_switch), wcd938x_tx_swr_ctrl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("DMIC8_MIXER", SND_SOC_NOPM, 0, 0, dmic8_switch,
+ ARRAY_SIZE(dmic8_switch), wcd938x_tx_swr_ctrl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ /* micbias widgets*/
+ SND_SOC_DAPM_SUPPLY("MIC BIAS1", SND_SOC_NOPM, MIC_BIAS_1, 0,
+ wcd938x_codec_enable_micbias,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MIC BIAS2", SND_SOC_NOPM, MIC_BIAS_2, 0,
+ wcd938x_codec_enable_micbias,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MIC BIAS3", SND_SOC_NOPM, MIC_BIAS_3, 0,
+ wcd938x_codec_enable_micbias,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MIC BIAS4", SND_SOC_NOPM, MIC_BIAS_4, 0,
+ wcd938x_codec_enable_micbias,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ /* micbias pull up widgets*/
+ SND_SOC_DAPM_SUPPLY("VA MIC BIAS1", SND_SOC_NOPM, MIC_BIAS_1, 0,
+ wcd938x_codec_enable_micbias_pullup,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("VA MIC BIAS2", SND_SOC_NOPM, MIC_BIAS_2, 0,
+ wcd938x_codec_enable_micbias_pullup,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("VA MIC BIAS3", SND_SOC_NOPM, MIC_BIAS_3, 0,
+ wcd938x_codec_enable_micbias_pullup,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("VA MIC BIAS4", SND_SOC_NOPM, MIC_BIAS_4, 0,
+ wcd938x_codec_enable_micbias_pullup,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ /*output widgets tx*/
+ SND_SOC_DAPM_OUTPUT("ADC1_OUTPUT"),
+ SND_SOC_DAPM_OUTPUT("ADC2_OUTPUT"),
+ SND_SOC_DAPM_OUTPUT("ADC3_OUTPUT"),
+ SND_SOC_DAPM_OUTPUT("ADC4_OUTPUT"),
+ SND_SOC_DAPM_OUTPUT("DMIC1_OUTPUT"),
+ SND_SOC_DAPM_OUTPUT("DMIC2_OUTPUT"),
+ SND_SOC_DAPM_OUTPUT("DMIC3_OUTPUT"),
+ SND_SOC_DAPM_OUTPUT("DMIC4_OUTPUT"),
+ SND_SOC_DAPM_OUTPUT("DMIC5_OUTPUT"),
+ SND_SOC_DAPM_OUTPUT("DMIC6_OUTPUT"),
+ SND_SOC_DAPM_OUTPUT("DMIC7_OUTPUT"),
+ SND_SOC_DAPM_OUTPUT("DMIC8_OUTPUT"),
+
+ SND_SOC_DAPM_INPUT("IN1_HPHL"),
+ SND_SOC_DAPM_INPUT("IN2_HPHR"),
+ SND_SOC_DAPM_INPUT("IN3_AUX"),
+
+ /*rx widgets*/
+ SND_SOC_DAPM_PGA_E("EAR PGA", WCD938X_ANA_EAR, 7, 0, NULL, 0,
+ wcd938x_codec_enable_ear_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("AUX PGA", WCD938X_AUX_AUXPA, 7, 0, NULL, 0,
+ wcd938x_codec_enable_aux_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("HPHL PGA", WCD938X_ANA_HPH, 7, 0, NULL, 0,
+ wcd938x_codec_enable_hphl_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("HPHR PGA", WCD938X_ANA_HPH, 6, 0, NULL, 0,
+ wcd938x_codec_enable_hphr_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_DAC_E("RDAC1", NULL, SND_SOC_NOPM, 0, 0,
+ wcd938x_codec_hphl_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_DAC_E("RDAC2", NULL, SND_SOC_NOPM, 0, 0,
+ wcd938x_codec_hphr_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_DAC_E("RDAC3", NULL, SND_SOC_NOPM, 0, 0,
+ wcd938x_codec_ear_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_DAC_E("RDAC4", NULL, SND_SOC_NOPM, 0, 0,
+ wcd938x_codec_aux_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX("RDAC3_MUX", SND_SOC_NOPM, 0, 0, &rx_rdac3_mux),
+
+ SND_SOC_DAPM_SUPPLY("VDD_BUCK", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RXCLK", SND_SOC_NOPM, 0, 0,
+ wcd938x_codec_enable_rxclk,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY_S("CLS_H_PORT", 1, SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER_E("RX1", SND_SOC_NOPM, 0, 0, NULL, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER_E("RX2", SND_SOC_NOPM, 0, 0, NULL, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER_E("RX3", SND_SOC_NOPM, 0, 0, NULL, 0, NULL, 0),
+
+ /* rx mixer widgets*/
+ SND_SOC_DAPM_MIXER("EAR_RDAC", SND_SOC_NOPM, 0, 0,
+ ear_rdac_switch, ARRAY_SIZE(ear_rdac_switch)),
+ SND_SOC_DAPM_MIXER("AUX_RDAC", SND_SOC_NOPM, 0, 0,
+ aux_rdac_switch, ARRAY_SIZE(aux_rdac_switch)),
+ SND_SOC_DAPM_MIXER("HPHL_RDAC", SND_SOC_NOPM, 0, 0,
+ hphl_rdac_switch, ARRAY_SIZE(hphl_rdac_switch)),
+ SND_SOC_DAPM_MIXER("HPHR_RDAC", SND_SOC_NOPM, 0, 0,
+ hphr_rdac_switch, ARRAY_SIZE(hphr_rdac_switch)),
+
+ /*output widgets rx*/
+ SND_SOC_DAPM_OUTPUT("EAR"),
+ SND_SOC_DAPM_OUTPUT("AUX"),
+ SND_SOC_DAPM_OUTPUT("HPHL"),
+ SND_SOC_DAPM_OUTPUT("HPHR"),
+
+};
+
+static const struct snd_soc_dapm_route wcd938x_audio_map[] = {
+ {"ADC1_OUTPUT", NULL, "ADC1_MIXER"},
+ {"ADC1_MIXER", "Switch", "ADC1 REQ"},
+ {"ADC1 REQ", NULL, "ADC1"},
+ {"ADC1", NULL, "AMIC1"},
+
+ {"ADC2_OUTPUT", NULL, "ADC2_MIXER"},
+ {"ADC2_MIXER", "Switch", "ADC2 REQ"},
+ {"ADC2 REQ", NULL, "ADC2"},
+ {"ADC2", NULL, "HDR12 MUX"},
+ {"HDR12 MUX", "NO_HDR12", "ADC2 MUX"},
+ {"HDR12 MUX", "HDR12", "AMIC1"},
+ {"ADC2 MUX", "INP3", "AMIC3"},
+ {"ADC2 MUX", "INP2", "AMIC2"},
+
+ {"ADC3_OUTPUT", NULL, "ADC3_MIXER"},
+ {"ADC3_MIXER", "Switch", "ADC3 REQ"},
+ {"ADC3 REQ", NULL, "ADC3"},
+ {"ADC3", NULL, "HDR34 MUX"},
+ {"HDR34 MUX", "NO_HDR34", "ADC3 MUX"},
+ {"HDR34 MUX", "HDR34", "AMIC5"},
+ {"ADC3 MUX", "INP4", "AMIC4"},
+ {"ADC3 MUX", "INP6", "AMIC6"},
+
+ {"ADC4_OUTPUT", NULL, "ADC4_MIXER"},
+ {"ADC4_MIXER", "Switch", "ADC4 REQ"},
+ {"ADC4 REQ", NULL, "ADC4"},
+ {"ADC4", NULL, "ADC4 MUX"},
+ {"ADC4 MUX", "INP5", "AMIC5"},
+ {"ADC4 MUX", "INP7", "AMIC7"},
+
+ {"DMIC1_OUTPUT", NULL, "DMIC1_MIXER"},
+ {"DMIC1_MIXER", "Switch", "DMIC1"},
+
+ {"DMIC2_OUTPUT", NULL, "DMIC2_MIXER"},
+ {"DMIC2_MIXER", "Switch", "DMIC2"},
+
+ {"DMIC3_OUTPUT", NULL, "DMIC3_MIXER"},
+ {"DMIC3_MIXER", "Switch", "DMIC3"},
+
+ {"DMIC4_OUTPUT", NULL, "DMIC4_MIXER"},
+ {"DMIC4_MIXER", "Switch", "DMIC4"},
+
+ {"DMIC5_OUTPUT", NULL, "DMIC5_MIXER"},
+ {"DMIC5_MIXER", "Switch", "DMIC5"},
+
+ {"DMIC6_OUTPUT", NULL, "DMIC6_MIXER"},
+ {"DMIC6_MIXER", "Switch", "DMIC6"},
+
+ {"DMIC7_OUTPUT", NULL, "DMIC7_MIXER"},
+ {"DMIC7_MIXER", "Switch", "DMIC7"},
+
+ {"DMIC8_OUTPUT", NULL, "DMIC8_MIXER"},
+ {"DMIC8_MIXER", "Switch", "DMIC8"},
+
+ {"IN1_HPHL", NULL, "VDD_BUCK"},
+ {"IN1_HPHL", NULL, "CLS_H_PORT"},
+
+ {"RX1", NULL, "IN1_HPHL"},
+ {"RX1", NULL, "RXCLK"},
+ {"RDAC1", NULL, "RX1"},
+ {"HPHL_RDAC", "Switch", "RDAC1"},
+ {"HPHL PGA", NULL, "HPHL_RDAC"},
+ {"HPHL", NULL, "HPHL PGA"},
+
+ {"IN2_HPHR", NULL, "VDD_BUCK"},
+ {"IN2_HPHR", NULL, "CLS_H_PORT"},
+ {"RX2", NULL, "IN2_HPHR"},
+ {"RDAC2", NULL, "RX2"},
+ {"RX2", NULL, "RXCLK"},
+ {"HPHR_RDAC", "Switch", "RDAC2"},
+ {"HPHR PGA", NULL, "HPHR_RDAC"},
+ {"HPHR", NULL, "HPHR PGA"},
+
+ {"IN3_AUX", NULL, "VDD_BUCK"},
+ {"IN3_AUX", NULL, "CLS_H_PORT"},
+ {"RX3", NULL, "IN3_AUX"},
+ {"RDAC4", NULL, "RX3"},
+ {"RX3", NULL, "RXCLK"},
+ {"AUX_RDAC", "Switch", "RDAC4"},
+ {"AUX PGA", NULL, "AUX_RDAC"},
+ {"AUX", NULL, "AUX PGA"},
+
+ {"RDAC3_MUX", "RX3", "RX3"},
+ {"RDAC3_MUX", "RX1", "RX1"},
+ {"RDAC3", NULL, "RDAC3_MUX"},
+ {"EAR_RDAC", "Switch", "RDAC3"},
+ {"EAR PGA", NULL, "EAR_RDAC"},
+ {"EAR", NULL, "EAR PGA"},
+};
+
+static int wcd938x_get_micb_vout_ctl_val(u32 micb_mv)
+{
+ /* min micbias voltage is 1V and maximum is 2.85V */
+ if (micb_mv < 1000 || micb_mv > 2850)
+ return -EINVAL;
+
+ return (micb_mv - 1000) / 50;
+}
+
+static int wcd938x_set_micbias_data(struct wcd938x_priv *wcd938x)
+{
+ int vout_ctl_1, vout_ctl_2, vout_ctl_3, vout_ctl_4;
+
+ /* set micbias voltage */
+ vout_ctl_1 = wcd938x_get_micb_vout_ctl_val(wcd938x->micb1_mv);
+ vout_ctl_2 = wcd938x_get_micb_vout_ctl_val(wcd938x->micb2_mv);
+ vout_ctl_3 = wcd938x_get_micb_vout_ctl_val(wcd938x->micb3_mv);
+ vout_ctl_4 = wcd938x_get_micb_vout_ctl_val(wcd938x->micb4_mv);
+ if (vout_ctl_1 < 0 || vout_ctl_2 < 0 || vout_ctl_3 < 0 || vout_ctl_4 < 0)
+ return -EINVAL;
+
+ regmap_update_bits(wcd938x->regmap, WCD938X_ANA_MICB1,
+ WCD938X_MICB_VOUT_MASK, vout_ctl_1);
+ regmap_update_bits(wcd938x->regmap, WCD938X_ANA_MICB2,
+ WCD938X_MICB_VOUT_MASK, vout_ctl_2);
+ regmap_update_bits(wcd938x->regmap, WCD938X_ANA_MICB3,
+ WCD938X_MICB_VOUT_MASK, vout_ctl_3);
+ regmap_update_bits(wcd938x->regmap, WCD938X_ANA_MICB4,
+ WCD938X_MICB_VOUT_MASK, vout_ctl_4);
+
+ return 0;
+}
+
+static irqreturn_t wcd938x_wd_handle_irq(int irq, void *data)
+{
+ return IRQ_HANDLED;
+}
+
+static struct irq_chip wcd_irq_chip = {
+ .name = "WCD938x",
+};
+
+static int wcd_irq_chip_map(struct irq_domain *irqd, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &wcd_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(virq, 1);
+ irq_set_noprobe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops wcd_domain_ops = {
+ .map = wcd_irq_chip_map,
+};
+
+static int wcd938x_irq_init(struct wcd938x_priv *wcd, struct device *dev)
+{
+
+ wcd->virq = irq_domain_add_linear(NULL, 1, &wcd_domain_ops, NULL);
+ if (!(wcd->virq)) {
+ dev_err(dev, "%s: Failed to add IRQ domain\n", __func__);
+ return -EINVAL;
+ }
+
+ return devm_regmap_add_irq_chip(dev, wcd->regmap,
+ irq_create_mapping(wcd->virq, 0),
+ IRQF_ONESHOT, 0, &wcd938x_regmap_irq_chip,
+ &wcd->irq_chip);
+}
+
+static int wcd938x_soc_codec_probe(struct snd_soc_component *component)
+{
+ struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
+ struct device *dev = component->dev;
+ int ret, i;
+
+ snd_soc_component_init_regmap(component, wcd938x->regmap);
+
+ wcd938x->variant = snd_soc_component_read_field(component,
+ WCD938X_DIGITAL_EFUSE_REG_0,
+ WCD938X_ID_MASK);
+
+ wcd938x->clsh_info = wcd_clsh_ctrl_alloc(component, WCD938X);
+
+ wcd938x_io_init(wcd938x);
+ /* Set all interrupts as edge triggered */
+ for (i = 0; i < wcd938x_regmap_irq_chip.num_regs; i++) {
+ regmap_write(wcd938x->regmap,
+ (WCD938X_DIGITAL_INTR_LEVEL_0 + i), 0);
+ }
+
+ wcd938x->hphr_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip,
+ WCD938X_IRQ_HPHR_PDM_WD_INT);
+ wcd938x->hphl_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip,
+ WCD938X_IRQ_HPHL_PDM_WD_INT);
+ wcd938x->aux_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip,
+ WCD938X_IRQ_AUX_PDM_WD_INT);
+
+ /* Request for watchdog interrupt */
+ ret = request_threaded_irq(wcd938x->hphr_pdm_wd_int, NULL, wcd938x_wd_handle_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "HPHR PDM WD INT", wcd938x);
+ if (ret)
+ dev_err(dev, "Failed to request HPHR WD interrupt (%d)\n", ret);
+
+ ret = request_threaded_irq(wcd938x->hphl_pdm_wd_int, NULL, wcd938x_wd_handle_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "HPHL PDM WD INT", wcd938x);
+ if (ret)
+ dev_err(dev, "Failed to request HPHL WD interrupt (%d)\n", ret);
+
+ ret = request_threaded_irq(wcd938x->aux_pdm_wd_int, NULL, wcd938x_wd_handle_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "AUX PDM WD INT", wcd938x);
+ if (ret)
+ dev_err(dev, "Failed to request Aux WD interrupt (%d)\n", ret);
+
+ /* Disable watchdog interrupt for HPH and AUX */
+ disable_irq_nosync(wcd938x->hphr_pdm_wd_int);
+ disable_irq_nosync(wcd938x->hphl_pdm_wd_int);
+ disable_irq_nosync(wcd938x->aux_pdm_wd_int);
+
+ switch (wcd938x->variant) {
+ case WCD9380:
+ ret = snd_soc_add_component_controls(component, wcd9380_snd_controls,
+ ARRAY_SIZE(wcd9380_snd_controls));
+ if (ret < 0) {
+ dev_err(component->dev,
+ "%s: Failed to add snd ctrls for variant: %d\n",
+ __func__, wcd938x->variant);
+ goto err;
+ }
+ break;
+ case WCD9385:
+ ret = snd_soc_add_component_controls(component, wcd9385_snd_controls,
+ ARRAY_SIZE(wcd9385_snd_controls));
+ if (ret < 0) {
+ dev_err(component->dev,
+ "%s: Failed to add snd ctrls for variant: %d\n",
+ __func__, wcd938x->variant);
+ goto err;
+ }
+ break;
+ default:
+ break;
+ }
+err:
+ return ret;
+}
+
+static const struct snd_soc_component_driver soc_codec_dev_wcd938x = {
+ .name = "wcd938x_codec",
+ .probe = wcd938x_soc_codec_probe,
+ .controls = wcd938x_snd_controls,
+ .num_controls = ARRAY_SIZE(wcd938x_snd_controls),
+ .dapm_widgets = wcd938x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wcd938x_dapm_widgets),
+ .dapm_routes = wcd938x_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(wcd938x_audio_map),
+};
+
+static void wcd938x_dt_parse_micbias_info(struct device *dev, struct wcd938x_priv *wcd)
+{
+ struct device_node *np = dev->of_node;
+ u32 prop_val = 0;
+ int rc = 0;
+
+ rc = of_property_read_u32(np, "qcom,micbias1-microvolt", &prop_val);
+ if (!rc)
+ wcd->micb1_mv = prop_val/1000;
+ else
+ dev_info(dev, "%s: Micbias1 DT property not found\n", __func__);
+
+ rc = of_property_read_u32(np, "qcom,micbias2-microvolt", &prop_val);
+ if (!rc)
+ wcd->micb2_mv = prop_val/1000;
+ else
+ dev_info(dev, "%s: Micbias2 DT property not found\n", __func__);
+
+ rc = of_property_read_u32(np, "qcom,micbias3-microvolt", &prop_val);
+ if (!rc)
+ wcd->micb3_mv = prop_val/1000;
+ else
+ dev_info(dev, "%s: Micbias3 DT property not found\n", __func__);
+
+ rc = of_property_read_u32(np, "qcom,micbias4-microvolt", &prop_val);
+ if (!rc)
+ wcd->micb4_mv = prop_val/1000;
+ else
+ dev_info(dev, "%s: Micbias4 DT property not found\n", __func__);
+}
+
+static int wcd938x_populate_dt_data(struct wcd938x_priv *wcd938x, struct device *dev)
+{
+ int ret;
+
+ wcd938x->reset_gpio = of_get_named_gpio(dev->of_node, "reset-gpios", 0);
+ if (wcd938x->reset_gpio < 0) {
+ dev_err(dev, "Failed to get reset gpio: err = %d\n",
+ wcd938x->reset_gpio);
+ return wcd938x->reset_gpio;
+ }
+
+ wcd938x->supplies[0].supply = "vdd-rxtx";
+ wcd938x->supplies[1].supply = "vdd-io";
+ wcd938x->supplies[2].supply = "vdd-buck";
+ wcd938x->supplies[3].supply = "vdd-mic-bias";
+
+ ret = regulator_bulk_get(dev, WCD938X_MAX_SUPPLY, wcd938x->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to get supplies: err = %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to enable supplies: err = %d\n", ret);
+ return ret;
+ }
+
+ wcd938x_dt_parse_micbias_info(dev, wcd938x);
+
+ return 0;
+}
+
+static int wcd938x_reset(struct wcd938x_priv *wcd938x)
+{
+ gpio_direction_output(wcd938x->reset_gpio, 0);
+ /* 20us sleep required after pulling the reset gpio to LOW */
+ usleep_range(20, 30);
+ gpio_set_value(wcd938x->reset_gpio, 1);
+ /* 20us sleep required after pulling the reset gpio to HIGH */
+ usleep_range(20, 30);
+
+ return 0;
+}
+
+static int wcd938x_codec_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct wcd938x_priv *wcd938x = dev_get_drvdata(dai->dev);
+ struct wcd938x_sdw_priv *wcd = wcd938x->sdw_priv[dai->id];
+
+ return wcd938x_sdw_hw_params(wcd, substream, params, dai);
+}
+
+static int wcd938x_codec_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct wcd938x_priv *wcd938x = dev_get_drvdata(dai->dev);
+ struct wcd938x_sdw_priv *wcd = wcd938x->sdw_priv[dai->id];
+
+ return wcd938x_sdw_free(wcd, substream, dai);
+}
+
+static int wcd938x_codec_set_sdw_stream(struct snd_soc_dai *dai,
+ void *stream, int direction)
+{
+ struct wcd938x_priv *wcd938x = dev_get_drvdata(dai->dev);
+ struct wcd938x_sdw_priv *wcd = wcd938x->sdw_priv[dai->id];
+
+ return wcd938x_sdw_set_sdw_stream(wcd, dai, stream, direction);
+
+}
+
+static const struct snd_soc_dai_ops wcd938x_sdw_dai_ops = {
+ .hw_params = wcd938x_codec_hw_params,
+ .hw_free = wcd938x_codec_free,
+ .set_sdw_stream = wcd938x_codec_set_sdw_stream,
+};
+
+static struct snd_soc_dai_driver wcd938x_dais[] = {
+ [0] = {
+ .name = "wcd938x-sdw-rx",
+ .playback = {
+ .stream_name = "WCD AIF1 Playback",
+ .rates = WCD938X_RATES_MASK | WCD938X_FRAC_RATES_MASK,
+ .formats = WCD938X_FORMATS_S16_S24_LE,
+ .rate_max = 192000,
+ .rate_min = 8000,
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+ .ops = &wcd938x_sdw_dai_ops,
+ },
+ [1] = {
+ .name = "wcd938x-sdw-tx",
+ .capture = {
+ .stream_name = "WCD AIF1 Capture",
+ .rates = WCD938X_RATES_MASK,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+ .ops = &wcd938x_sdw_dai_ops,
+ },
+};
+
+static int wcd938x_bind(struct device *dev)
+{
+ struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
+ int ret;
+
+ ret = component_bind_all(dev, wcd938x);
+ if (ret) {
+ dev_err(dev, "%s: Slave bind failed, ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ wcd938x->rxdev = wcd938x_sdw_device_get(wcd938x->rxnode);
+ if (!wcd938x->rxdev) {
+ dev_err(dev, "could not find slave with matching of node\n");
+ return -EINVAL;
+ }
+ wcd938x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd938x->rxdev);
+ wcd938x->sdw_priv[AIF1_PB]->wcd938x = wcd938x;
+
+ wcd938x->txdev = wcd938x_sdw_device_get(wcd938x->txnode);
+ if (!wcd938x->txdev) {
+ dev_err(dev, "could not find txslave with matching of node\n");
+ return -EINVAL;
+ }
+ wcd938x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd938x->txdev);
+ wcd938x->sdw_priv[AIF1_CAP]->wcd938x = wcd938x;
+ wcd938x->tx_sdw_dev = dev_to_sdw_dev(wcd938x->txdev);
+ if (!wcd938x->tx_sdw_dev) {
+ dev_err(dev, "could not get txslave with matching of dev\n");
+ return -EINVAL;
+ }
+
+ /* As TX is main CSR reg interface, which should not be suspended first.
+ * expicilty add the dependency link */
+ if (!device_link_add(wcd938x->rxdev, wcd938x->txdev, DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME)) {
+ dev_err(dev, "could not devlink tx and rx\n");
+ return -EINVAL;
+ }
+
+ if (!device_link_add(dev, wcd938x->txdev, DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME)) {
+ dev_err(dev, "could not devlink wcd and tx\n");
+ return -EINVAL;
+ }
+
+ if (!device_link_add(dev, wcd938x->rxdev, DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME)) {
+ dev_err(dev, "could not devlink wcd and rx\n");
+ return -EINVAL;
+ }
+
+ wcd938x->regmap = devm_regmap_init_sdw(wcd938x->tx_sdw_dev, &wcd938x_regmap_config);
+ if (IS_ERR(wcd938x->regmap)) {
+ dev_err(dev, "%s: tx csr regmap not found\n", __func__);
+ return PTR_ERR(wcd938x->regmap);
+ }
+
+ ret = wcd938x_irq_init(wcd938x, dev);
+ if (ret) {
+ dev_err(dev, "%s: IRQ init failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
+ wcd938x->sdw_priv[AIF1_CAP]->slave_irq = wcd938x->virq;
+
+ ret = wcd938x_set_micbias_data(wcd938x);
+ if (ret < 0) {
+ dev_err(dev, "%s: bad micbias pdata\n", __func__);
+ return ret;
+ }
+
+ ret = snd_soc_register_component(dev, &soc_codec_dev_wcd938x,
+ wcd938x_dais, ARRAY_SIZE(wcd938x_dais));
+ if (ret)
+ dev_err(dev, "%s: Codec registration failed\n",
+ __func__);
+
+ return ret;
+
+}
+
+static void wcd938x_unbind(struct device *dev)
+{
+ struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
+
+ device_link_remove(dev, wcd938x->txdev);
+ device_link_remove(dev, wcd938x->rxdev);
+ device_link_remove(wcd938x->rxdev, wcd938x->txdev);
+ snd_soc_unregister_component(dev);
+ component_unbind_all(dev, wcd938x);
+}
+
+static const struct component_master_ops wcd938x_comp_ops = {
+ .bind = wcd938x_bind,
+ .unbind = wcd938x_unbind,
+};
+
+static int wcd938x_compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static void wcd938x_release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+
+static int wcd938x_add_slave_components(struct wcd938x_priv *wcd938x,
+ struct device *dev,
+ struct component_match **matchptr)
+{
+ struct device_node *np;
+
+ np = dev->of_node;
+
+ wcd938x->rxnode = of_parse_phandle(np, "qcom,rx-device", 0);
+ if (!wcd938x->rxnode) {
+ dev_err(dev, "%s: Rx-device node not defined\n", __func__);
+ return -ENODEV;
+ }
+
+ of_node_get(wcd938x->rxnode);
+ component_match_add_release(dev, matchptr, wcd938x_release_of,
+ wcd938x_compare_of, wcd938x->rxnode);
+
+ wcd938x->txnode = of_parse_phandle(np, "qcom,tx-device", 0);
+ if (!wcd938x->txnode) {
+ dev_err(dev, "%s: Tx-device node not defined\n", __func__);
+ return -ENODEV;
+ }
+ of_node_get(wcd938x->txnode);
+ component_match_add_release(dev, matchptr, wcd938x_release_of,
+ wcd938x_compare_of, wcd938x->txnode);
+ return 0;
+}
+
+static int wcd938x_probe(struct platform_device *pdev)
+{
+ struct component_match *match = NULL;
+ struct wcd938x_priv *wcd938x = NULL;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ wcd938x = devm_kzalloc(dev, sizeof(struct wcd938x_priv),
+ GFP_KERNEL);
+ if (!wcd938x)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, wcd938x);
+
+ ret = wcd938x_populate_dt_data(wcd938x, dev);
+ if (ret) {
+ dev_err(dev, "%s: Fail to obtain platform data\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = wcd938x_add_slave_components(wcd938x, dev, &match);
+ if (ret)
+ return ret;
+
+ wcd938x_reset(wcd938x);
+
+ ret = component_master_add_with_match(dev, &wcd938x_comp_ops, match);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+
+ return ret;
+}
+
+static int wcd938x_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &wcd938x_comp_ops);
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id wcd938x_dt_match[] = {
+ { .compatible = "qcom,wcd9380-codec" },
+ { .compatible = "qcom,wcd9385-codec" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, wcd938x_dt_match);
+#endif
+
+static struct platform_driver wcd938x_codec_driver = {
+ .probe = wcd938x_probe,
+ .remove = wcd938x_remove,
+ .driver = {
+ .name = "wcd938x_codec",
+ .of_match_table = of_match_ptr(wcd938x_dt_match),
+ .suppress_bind_attrs = true,
+ },
+};
+
+module_platform_driver(wcd938x_codec_driver);
+MODULE_DESCRIPTION("WCD938X Codec driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wcd938x.h b/sound/soc/codecs/wcd938x.h
new file mode 100644
index 000000000000..07b08de4cebf
--- /dev/null
+++ b/sound/soc/codecs/wcd938x.h
@@ -0,0 +1,718 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __WCD938X_H__
+#define __WCD938X_H__
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+
+#define WCD938X_BASE_ADDRESS (0x3000)
+#define WCD938X_ANA_PAGE_REGISTER (0x3000)
+#define WCD938X_ANA_BIAS (0x3001)
+#define WCD938X_ANA_RX_SUPPLIES (0x3008)
+#define WCD938X_RX_BIAS_EN_MASK BIT(0)
+#define WCD938X_REGULATOR_MODE_MASK BIT(1)
+#define WCD938X_REGULATOR_MODE_CLASS_AB 1
+#define WCD938X_VNEG_EN_MASK BIT(6)
+#define WCD938X_VPOS_EN_MASK BIT(7)
+#define WCD938X_ANA_HPH (0x3009)
+#define WCD938X_HPHR_REF_EN_MASK BIT(4)
+#define WCD938X_HPHL_REF_EN_MASK BIT(5)
+#define WCD938X_HPHR_EN_MASK BIT(6)
+#define WCD938X_HPHL_EN_MASK BIT(7)
+#define WCD938X_ANA_EAR (0x300A)
+#define WCD938X_ANA_EAR_COMPANDER_CTL (0x300B)
+#define WCD938X_GAIN_OVRD_REG_MASK BIT(7)
+#define WCD938X_EAR_GAIN_MASK GENMASK(6, 2)
+#define WCD938X_ANA_TX_CH1 (0x300E)
+#define WCD938X_ANA_TX_CH2 (0x300F)
+#define WCD938X_HPF1_INIT_MASK BIT(6)
+#define WCD938X_HPF2_INIT_MASK BIT(5)
+#define WCD938X_ANA_TX_CH3 (0x3010)
+#define WCD938X_ANA_TX_CH4 (0x3011)
+#define WCD938X_HPF3_INIT_MASK BIT(6)
+#define WCD938X_HPF4_INIT_MASK BIT(5)
+#define WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC (0x3012)
+#define WCD938X_ANA_MICB3_DSP_EN_LOGIC (0x3013)
+#define WCD938X_ANA_MBHC_MECH (0x3014)
+#define WCD938X_MBHC_L_DET_EN_MASK BIT(7)
+#define WCD938X_MBHC_L_DET_EN BIT(7)
+#define WCD938X_MBHC_GND_DET_EN_MASK BIT(6)
+#define WCD938X_MBHC_MECH_DETECT_TYPE_MASK BIT(5)
+#define WCD938X_MBHC_MECH_DETECT_TYPE_INS 1
+#define WCD938X_MBHC_HPHL_PLUG_TYPE_MASK BIT(4)
+#define WCD938X_MBHC_HPHL_PLUG_TYPE_NO 1
+#define WCD938X_MBHC_GND_PLUG_TYPE_MASK BIT(3)
+#define WCD938X_MBHC_GND_PLUG_TYPE_NO 1
+#define WCD938X_MBHC_HSL_PULLUP_COMP_EN BIT(2)
+#define WCD938X_MBHC_HSG_PULLUP_COMP_EN BIT(1)
+#define WCD938X_MBHC_HPHL_100K_TO_GND_EN BIT(0)
+
+#define WCD938X_ANA_MBHC_ELECT (0x3015)
+#define WCD938X_ANA_MBHC_BD_ISRC_CTL_MASK GENMASK(6, 4)
+#define WCD938X_ANA_MBHC_BD_ISRC_100UA GENMASK(5, 4)
+#define WCD938X_ANA_MBHC_BD_ISRC_OFF 0
+#define WCD938X_ANA_MBHC_BIAS_EN_MASK BIT(0)
+#define WCD938X_ANA_MBHC_BIAS_EN BIT(0)
+#define WCD938X_ANA_MBHC_ZDET (0x3016)
+#define WCD938X_ANA_MBHC_RESULT_1 (0x3017)
+#define WCD938X_ANA_MBHC_RESULT_2 (0x3018)
+#define WCD938X_ANA_MBHC_RESULT_3 (0x3019)
+#define WCD938X_MBHC_BTN_RESULT_MASK GENMASK(2, 0)
+#define WCD938X_ANA_MBHC_BTN0 (0x301A)
+#define WCD938X_MBHC_BTN_VTH_MASK GENMASK(7, 2)
+#define WCD938X_ANA_MBHC_BTN1 (0x301B)
+#define WCD938X_ANA_MBHC_BTN2 (0x301C)
+#define WCD938X_ANA_MBHC_BTN3 (0x301D)
+#define WCD938X_ANA_MBHC_BTN4 (0x301E)
+#define WCD938X_ANA_MBHC_BTN5 (0x301F)
+#define WCD938X_VTH_MASK GENMASK(7, 2)
+#define WCD938X_ANA_MBHC_BTN6 (0x3020)
+#define WCD938X_ANA_MBHC_BTN7 (0x3021)
+#define WCD938X_ANA_MICB1 (0x3022)
+#define WCD938X_MICB_VOUT_MASK GENMASK(5, 0)
+#define WCD938X_MICB_EN_MASK GENMASK(7, 6)
+#define WCD938X_MICB_DISABLE 0
+#define WCD938X_MICB_ENABLE 1
+#define WCD938X_MICB_PULL_UP 2
+#define WCD938X_MICB_PULL_DOWN 3
+#define WCD938X_ANA_MICB2 (0x3023)
+#define WCD938X_ANA_MICB2_ENABLE BIT(6)
+#define WCD938X_ANA_MICB2_ENABLE_MASK GENMASK(7, 6)
+#define WCD938X_ANA_MICB2_VOUT_MASK GENMASK(5, 0)
+#define WCD938X_ANA_MICB2_RAMP (0x3024)
+#define WCD938X_RAMP_EN_MASK BIT(7)
+#define WCD938X_RAMP_SHIFT_CTRL_MASK GENMASK(4, 2)
+#define WCD938X_ANA_MICB3 (0x3025)
+#define WCD938X_ANA_MICB4 (0x3026)
+#define WCD938X_BIAS_CTL (0x3028)
+#define WCD938X_BIAS_VBG_FINE_ADJ (0x3029)
+#define WCD938X_LDOL_VDDCX_ADJUST (0x3040)
+#define WCD938X_LDOL_DISABLE_LDOL (0x3041)
+#define WCD938X_MBHC_CTL_CLK (0x3056)
+#define WCD938X_MBHC_CTL_ANA (0x3057)
+#define WCD938X_MBHC_CTL_SPARE_1 (0x3058)
+#define WCD938X_MBHC_CTL_SPARE_2 (0x3059)
+#define WCD938X_MBHC_CTL_BCS (0x305A)
+#define WCD938X_MBHC_MOISTURE_DET_FSM_STATUS (0x305B)
+#define WCD938X_MBHC_TEST_CTL (0x305C)
+#define WCD938X_LDOH_MODE (0x3067)
+#define WCD938X_LDOH_EN_MASK BIT(7)
+#define WCD938X_LDOH_BIAS (0x3068)
+#define WCD938X_LDOH_STB_LOADS (0x3069)
+#define WCD938X_LDOH_SLOWRAMP (0x306A)
+#define WCD938X_MICB1_TEST_CTL_1 (0x306B)
+#define WCD938X_MICB1_TEST_CTL_2 (0x306C)
+#define WCD938X_MICB1_TEST_CTL_3 (0x306D)
+#define WCD938X_MICB2_TEST_CTL_1 (0x306E)
+#define WCD938X_MICB2_TEST_CTL_2 (0x306F)
+#define WCD938X_MICB2_TEST_CTL_3 (0x3070)
+#define WCD938X_MICB3_TEST_CTL_1 (0x3071)
+#define WCD938X_MICB3_TEST_CTL_2 (0x3072)
+#define WCD938X_MICB3_TEST_CTL_3 (0x3073)
+#define WCD938X_MICB4_TEST_CTL_1 (0x3074)
+#define WCD938X_MICB4_TEST_CTL_2 (0x3075)
+#define WCD938X_MICB4_TEST_CTL_3 (0x3076)
+#define WCD938X_TX_COM_ADC_VCM (0x3077)
+#define WCD938X_TX_COM_BIAS_ATEST (0x3078)
+#define WCD938X_TX_COM_SPARE1 (0x3079)
+#define WCD938X_TX_COM_SPARE2 (0x307A)
+#define WCD938X_TX_COM_TXFE_DIV_CTL (0x307B)
+#define WCD938X_TX_COM_TXFE_DIV_START (0x307C)
+#define WCD938X_TX_COM_SPARE3 (0x307D)
+#define WCD938X_TX_COM_SPARE4 (0x307E)
+#define WCD938X_TX_1_2_TEST_EN (0x307F)
+#define WCD938X_TX_1_2_ADC_IB (0x3080)
+#define WCD938X_TX_1_2_ATEST_REFCTL (0x3081)
+#define WCD938X_TX_1_2_TEST_CTL (0x3082)
+#define WCD938X_TX_1_2_TEST_BLK_EN1 (0x3083)
+#define WCD938X_TX_1_2_TXFE1_CLKDIV (0x3084)
+#define WCD938X_TX_1_2_SAR2_ERR (0x3085)
+#define WCD938X_TX_1_2_SAR1_ERR (0x3086)
+#define WCD938X_TX_3_4_TEST_EN (0x3087)
+#define WCD938X_TX_3_4_ADC_IB (0x3088)
+#define WCD938X_TX_3_4_ATEST_REFCTL (0x3089)
+#define WCD938X_TX_3_4_TEST_CTL (0x308A)
+#define WCD938X_TX_3_4_TEST_BLK_EN3 (0x308B)
+#define WCD938X_TX_3_4_TXFE3_CLKDIV (0x308C)
+#define WCD938X_TX_3_4_SAR4_ERR (0x308D)
+#define WCD938X_TX_3_4_SAR3_ERR (0x308E)
+#define WCD938X_TX_3_4_TEST_BLK_EN2 (0x308F)
+#define WCD938X_TX_3_4_TXFE2_CLKDIV (0x3090)
+#define WCD938X_TX_3_4_SPARE1 (0x3091)
+#define WCD938X_TX_3_4_TEST_BLK_EN4 (0x3092)
+#define WCD938X_TX_3_4_TXFE4_CLKDIV (0x3093)
+#define WCD938X_TX_3_4_SPARE2 (0x3094)
+#define WCD938X_CLASSH_MODE_1 (0x3097)
+#define WCD938X_CLASSH_MODE_2 (0x3098)
+#define WCD938X_CLASSH_MODE_3 (0x3099)
+#define WCD938X_CLASSH_CTRL_VCL_1 (0x309A)
+#define WCD938X_CLASSH_CTRL_VCL_2 (0x309B)
+#define WCD938X_CLASSH_CTRL_CCL_1 (0x309C)
+#define WCD938X_CLASSH_CTRL_CCL_2 (0x309D)
+#define WCD938X_CLASSH_CTRL_CCL_3 (0x309E)
+#define WCD938X_CLASSH_CTRL_CCL_4 (0x309F)
+#define WCD938X_CLASSH_CTRL_CCL_5 (0x30A0)
+#define WCD938X_CLASSH_BUCK_TMUX_A_D (0x30A1)
+#define WCD938X_CLASSH_BUCK_SW_DRV_CNTL (0x30A2)
+#define WCD938X_CLASSH_SPARE (0x30A3)
+#define WCD938X_FLYBACK_EN (0x30A4)
+#define WCD938X_EN_CUR_DET_MASK BIT(2)
+#define WCD938X_FLYBACK_VNEG_CTRL_1 (0x30A5)
+#define WCD938X_FLYBACK_VNEG_CTRL_2 (0x30A6)
+#define WCD938X_FLYBACK_VNEG_CTRL_3 (0x30A7)
+#define WCD938X_FLYBACK_VNEG_CTRL_4 (0x30A8)
+#define WCD938X_FLYBACK_VNEG_CTRL_5 (0x30A9)
+#define WCD938X_FLYBACK_VNEG_CTRL_6 (0x30AA)
+#define WCD938X_FLYBACK_VNEG_CTRL_7 (0x30AB)
+#define WCD938X_FLYBACK_VNEG_CTRL_8 (0x30AC)
+#define WCD938X_FLYBACK_VNEG_CTRL_9 (0x30AD)
+#define WCD938X_FLYBACK_VNEGDAC_CTRL_1 (0x30AE)
+#define WCD938X_FLYBACK_VNEGDAC_CTRL_2 (0x30AF)
+#define WCD938X_FLYBACK_VNEGDAC_CTRL_3 (0x30B0)
+#define WCD938X_FLYBACK_CTRL_1 (0x30B1)
+#define WCD938X_FLYBACK_TEST_CTL (0x30B2)
+#define WCD938X_RX_AUX_SW_CTL (0x30B3)
+#define WCD938X_RX_PA_AUX_IN_CONN (0x30B4)
+#define WCD938X_RX_TIMER_DIV (0x30B5)
+#define WCD938X_RX_OCP_CTL (0x30B6)
+#define WCD938X_RX_OCP_COUNT (0x30B7)
+#define WCD938X_RX_BIAS_EAR_DAC (0x30B8)
+#define WCD938X_RX_BIAS_EAR_AMP (0x30B9)
+#define WCD938X_RX_BIAS_HPH_LDO (0x30BA)
+#define WCD938X_RX_BIAS_HPH_PA (0x30BB)
+#define WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2 (0x30BC)
+#define WCD938X_RX_BIAS_HPH_RDAC_LDO (0x30BD)
+#define WCD938X_RX_BIAS_HPH_CNP1 (0x30BE)
+#define WCD938X_RX_BIAS_HPH_LOWPOWER (0x30BF)
+#define WCD938X_RX_BIAS_AUX_DAC (0x30C0)
+#define WCD938X_RX_BIAS_AUX_AMP (0x30C1)
+#define WCD938X_RX_BIAS_VNEGDAC_BLEEDER (0x30C2)
+#define WCD938X_RX_BIAS_MISC (0x30C3)
+#define WCD938X_RX_BIAS_BUCK_RST (0x30C4)
+#define WCD938X_RX_BIAS_BUCK_VREF_ERRAMP (0x30C5)
+#define WCD938X_RX_BIAS_FLYB_ERRAMP (0x30C6)
+#define WCD938X_RX_BIAS_FLYB_BUFF (0x30C7)
+#define WCD938X_RX_BIAS_FLYB_MID_RST (0x30C8)
+#define WCD938X_HPH_L_STATUS (0x30C9)
+#define WCD938X_HPH_R_STATUS (0x30CA)
+#define WCD938X_HPH_CNP_EN (0x30CB)
+#define WCD938X_HPH_CNP_WG_CTL (0x30CC)
+#define WCD938X_HPH_CNP_WG_TIME (0x30CD)
+#define WCD938X_HPH_OCP_CTL (0x30CE)
+#define WCD938X_HPH_AUTO_CHOP (0x30CF)
+#define WCD938X_HPH_CHOP_CTL (0x30D0)
+#define WCD938X_HPH_PA_CTL1 (0x30D1)
+#define WCD938X_HPH_PA_CTL2 (0x30D2)
+#define WCD938X_HPHPA_GND_R_MASK BIT(6)
+#define WCD938X_HPHPA_GND_L_MASK BIT(4)
+#define WCD938X_HPH_L_EN (0x30D3)
+#define WCD938X_HPH_L_TEST (0x30D4)
+#define WCD938X_HPH_L_ATEST (0x30D5)
+#define WCD938X_HPH_R_EN (0x30D6)
+#define WCD938X_GAIN_SRC_SEL_MASK BIT(5)
+#define WCD938X_GAIN_SRC_SEL_REGISTER 1
+#define WCD938X_HPH_R_TEST (0x30D7)
+#define WCD938X_HPH_R_ATEST (0x30D8)
+#define WCD938X_HPHPA_GND_OVR_MASK BIT(1)
+#define WCD938X_HPH_RDAC_CLK_CTL1 (0x30D9)
+#define WCD938X_CHOP_CLK_EN_MASK BIT(7)
+#define WCD938X_HPH_RDAC_CLK_CTL2 (0x30DA)
+#define WCD938X_HPH_RDAC_LDO_CTL (0x30DB)
+#define WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL (0x30DC)
+#define WCD938X_HPH_REFBUFF_UHQA_CTL (0x30DD)
+#define WCD938X_HPH_REFBUFF_LP_CTL (0x30DE)
+#define WCD938X_PREREF_FLIT_BYPASS_MASK BIT(0)
+#define WCD938X_HPH_L_DAC_CTL (0x30DF)
+#define WCD938X_HPH_R_DAC_CTL (0x30E0)
+#define WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL (0x30E1)
+#define WCD938X_HPH_SURGE_HPHLR_SURGE_EN (0x30E2)
+#define WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1 (0x30E3)
+#define WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS (0x30E4)
+#define WCD938X_EAR_EAR_EN_REG (0x30E9)
+#define WCD938X_EAR_EAR_PA_CON (0x30EA)
+#define WCD938X_EAR_EAR_SP_CON (0x30EB)
+#define WCD938X_EAR_EAR_DAC_CON (0x30EC)
+#define WCD938X_DAC_SAMPLE_EDGE_SEL_MASK BIT(7)
+#define WCD938X_EAR_EAR_CNP_FSM_CON (0x30ED)
+#define WCD938X_EAR_TEST_CTL (0x30EE)
+#define WCD938X_EAR_STATUS_REG_1 (0x30EF)
+#define WCD938X_EAR_STATUS_REG_2 (0x30F0)
+#define WCD938X_ANA_NEW_PAGE_REGISTER (0x3100)
+#define WCD938X_HPH_NEW_ANA_HPH2 (0x3101)
+#define WCD938X_HPH_NEW_ANA_HPH3 (0x3102)
+#define WCD938X_SLEEP_CTL (0x3103)
+#define WCD938X_SLEEP_WATCHDOG_CTL (0x3104)
+#define WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL (0x311F)
+#define WCD938X_MBHC_NEW_CTL_1 (0x3120)
+#define WCD938X_MBHC_CTL_RCO_EN_MASK BIT(7)
+#define WCD938X_MBHC_CTL_RCO_EN BIT(7)
+#define WCD938X_MBHC_BTN_DBNC_MASK GENMASK(1, 0)
+#define WCD938X_MBHC_BTN_DBNC_T_16_MS 0x2
+#define WCD938X_MBHC_NEW_CTL_2 (0x3121)
+#define WCD938X_M_RTH_CTL_MASK GENMASK(3, 2)
+#define WCD938X_MBHC_HS_VREF_CTL_MASK GENMASK(1, 0)
+#define WCD938X_MBHC_HS_VREF_1P5_V 0x1
+#define WCD938X_MBHC_NEW_PLUG_DETECT_CTL (0x3122)
+#define WCD938X_MBHC_DBNC_TIMER_INSREM_DBNC_T_96_MS 0x6
+
+#define WCD938X_MBHC_NEW_ZDET_ANA_CTL (0x3123)
+#define WCD938X_ZDET_RANGE_CTL_MASK GENMASK(3, 0)
+#define WCD938X_ZDET_MAXV_CTL_MASK GENMASK(6, 4)
+#define WCD938X_MBHC_NEW_ZDET_RAMP_CTL (0x3124)
+#define WCD938X_MBHC_NEW_FSM_STATUS (0x3125)
+#define WCD938X_MBHC_NEW_ADC_RESULT (0x3126)
+#define WCD938X_TX_NEW_AMIC_MUX_CFG (0x3127)
+#define WCD938X_AUX_AUXPA (0x3128)
+#define WCD938X_AUXPA_CLK_EN_MASK BIT(4)
+#define WCD938X_LDORXTX_MODE (0x3129)
+#define WCD938X_LDORXTX_CONFIG (0x312A)
+#define WCD938X_DIE_CRACK_DIE_CRK_DET_EN (0x312C)
+#define WCD938X_DIE_CRACK_DIE_CRK_DET_OUT (0x312D)
+#define WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL (0x3132)
+#define WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L (0x3133)
+#define WCD938X_HPH_NEW_INT_RDAC_VREF_CTL (0x3134)
+#define WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL (0x3135)
+#define WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R (0x3136)
+#define WCD938X_HPH_RES_DIV_MASK GENMASK(4, 0)
+#define WCD938X_HPH_NEW_INT_PA_MISC1 (0x3137)
+#define WCD938X_HPH_NEW_INT_PA_MISC2 (0x3138)
+#define WCD938X_HPH_NEW_INT_PA_RDAC_MISC (0x3139)
+#define WCD938X_HPH_NEW_INT_HPH_TIMER1 (0x313A)
+#define WCD938X_AUTOCHOP_TIMER_EN BIT(1)
+#define WCD938X_HPH_NEW_INT_HPH_TIMER2 (0x313B)
+#define WCD938X_HPH_NEW_INT_HPH_TIMER3 (0x313C)
+#define WCD938X_HPH_NEW_INT_HPH_TIMER4 (0x313D)
+#define WCD938X_HPH_NEW_INT_PA_RDAC_MISC2 (0x313E)
+#define WCD938X_HPH_NEW_INT_PA_RDAC_MISC3 (0x313F)
+#define WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW (0x3140)
+#define WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW (0x3141)
+#define WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI (0x3145)
+#define WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP (0x3146)
+#define WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP (0x3147)
+#define WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL (0x31AF)
+#define WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL (0x31B0)
+#define WCD938X_MOISTURE_EN_POLLING_MASK BIT(2)
+#define WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT (0x31B1)
+#define WCD938X_HSDET_PULLUP_C_MASK GENMASK(4, 0)
+#define WCD938X_MBHC_NEW_INT_SPARE_2 (0x31B2)
+#define WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON (0x31B7)
+#define WCD938X_EAR_INT_NEW_CNP_VCM_CON1 (0x31B8)
+#define WCD938X_EAR_INT_NEW_CNP_VCM_CON2 (0x31B9)
+#define WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS (0x31BA)
+#define WCD938X_AUX_INT_EN_REG (0x31BD)
+#define WCD938X_AUX_INT_PA_CTRL (0x31BE)
+#define WCD938X_AUX_INT_SP_CTRL (0x31BF)
+#define WCD938X_AUX_INT_DAC_CTRL (0x31C0)
+#define WCD938X_AUX_INT_CLK_CTRL (0x31C1)
+#define WCD938X_AUX_INT_TEST_CTRL (0x31C2)
+#define WCD938X_AUX_INT_STATUS_REG (0x31C3)
+#define WCD938X_AUX_INT_MISC (0x31C4)
+#define WCD938X_LDORXTX_INT_BIAS (0x31C5)
+#define WCD938X_LDORXTX_INT_STB_LOADS_DTEST (0x31C6)
+#define WCD938X_LDORXTX_INT_TEST0 (0x31C7)
+#define WCD938X_LDORXTX_INT_STARTUP_TIMER (0x31C8)
+#define WCD938X_LDORXTX_INT_TEST1 (0x31C9)
+#define WCD938X_LDORXTX_INT_STATUS (0x31CA)
+#define WCD938X_SLEEP_INT_WATCHDOG_CTL_1 (0x31D0)
+#define WCD938X_SLEEP_INT_WATCHDOG_CTL_2 (0x31D1)
+#define WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1 (0x31D3)
+#define WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2 (0x31D4)
+#define WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2 (0x31D5)
+#define WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1 (0x31D6)
+#define WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0 (0x31D7)
+#define WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M (0x31D8)
+#define WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M (0x31D9)
+#define WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1 (0x31DA)
+#define WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0 (0x31DB)
+#define WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP (0x31DC)
+#define WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1 (0x31DD)
+#define WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0 (0x31DE)
+#define WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP (0x31DF)
+#define WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0 (0x31E0)
+#define WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP (0x31E1)
+#define WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1 (0x31E2)
+#define WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP (0x31E3)
+#define WCD938X_TX_COM_NEW_INT_TXADC_INT_L2 (0x31E4)
+#define WCD938X_TX_COM_NEW_INT_TXADC_INT_L1 (0x31E5)
+#define WCD938X_TX_COM_NEW_INT_TXADC_INT_L0 (0x31E6)
+#define WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP (0x31E7)
+#define WCD938X_DIGITAL_PAGE_REGISTER (0x3400)
+#define WCD938X_DIGITAL_CHIP_ID0 (0x3401)
+#define WCD938X_DIGITAL_CHIP_ID1 (0x3402)
+#define WCD938X_DIGITAL_CHIP_ID2 (0x3403)
+#define WCD938X_DIGITAL_CHIP_ID3 (0x3404)
+#define WCD938X_DIGITAL_SWR_TX_CLK_RATE (0x3405)
+#define WCD938X_DIGITAL_CDC_RST_CTL (0x3406)
+#define WCD938X_DIGITAL_TOP_CLK_CFG (0x3407)
+#define WCD938X_DIGITAL_CDC_ANA_CLK_CTL (0x3408)
+#define WCD938X_ANA_RX_CLK_EN_MASK BIT(0)
+#define WCD938X_ANA_RX_DIV2_CLK_EN_MASK BIT(1)
+#define WCD938X_ANA_RX_DIV4_CLK_EN_MASK BIT(2)
+#define WCD938X_ANA_TX_CLK_EN_MASK BIT(3)
+#define WCD938X_ANA_TX_DIV2_CLK_EN_MASK BIT(4)
+#define WCD938X_ANA_TX_DIV4_CLK_EN_MASK BIT(5)
+#define WCD938X_DIGITAL_CDC_DIG_CLK_CTL (0x3409)
+#define WCD938X_TXD3_CLK_EN_MASK BIT(7)
+#define WCD938X_TXD2_CLK_EN_MASK BIT(6)
+#define WCD938X_TXD1_CLK_EN_MASK BIT(5)
+#define WCD938X_TXD0_CLK_EN_MASK BIT(4)
+#define WCD938X_TX_CLK_EN_MASK GENMASK(7, 4)
+#define WCD938X_RXD2_CLK_EN_MASK BIT(2)
+#define WCD938X_RXD1_CLK_EN_MASK BIT(1)
+#define WCD938X_RXD0_CLK_EN_MASK BIT(0)
+#define WCD938X_DIGITAL_SWR_RST_EN (0x340A)
+#define WCD938X_DIGITAL_CDC_PATH_MODE (0x340B)
+#define WCD938X_DIGITAL_CDC_RX_RST (0x340C)
+#define WCD938X_DIGITAL_CDC_RX0_CTL (0x340D)
+#define WCD938X_DEM_DITHER_ENABLE_MASK BIT(6)
+#define WCD938X_DIGITAL_CDC_RX1_CTL (0x340E)
+#define WCD938X_DIGITAL_CDC_RX2_CTL (0x340F)
+#define WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1 (0x3410)
+#define WCD938X_TXD0_MODE_MASK GENMASK(3, 0)
+#define WCD938X_TXD1_MODE_MASK GENMASK(7, 4)
+#define WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3 (0x3411)
+#define WCD938X_TXD2_MODE_MASK GENMASK(3, 0)
+#define WCD938X_TXD3_MODE_MASK GENMASK(7, 4)
+#define WCD938X_DIGITAL_CDC_COMP_CTL_0 (0x3414)
+#define WCD938X_HPHR_COMP_EN_MASK BIT(0)
+#define WCD938X_HPHL_COMP_EN_MASK BIT(1)
+#define WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL (0x3417)
+#define WCD938X_TX_SC_CLK_EN_MASK BIT(0)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_A1_0 (0x3418)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_A1_1 (0x3419)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_A2_0 (0x341A)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_A2_1 (0x341B)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_A3_0 (0x341C)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_A3_1 (0x341D)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_A4_0 (0x341E)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_A4_1 (0x341F)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_A5_0 (0x3420)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_A5_1 (0x3421)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_A6_0 (0x3422)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_A7_0 (0x3423)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_C_0 (0x3424)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_C_1 (0x3425)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_C_2 (0x3426)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_C_3 (0x3427)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_R1 (0x3428)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_R2 (0x3429)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_R3 (0x342A)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_R4 (0x342B)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_R5 (0x342C)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_R6 (0x342D)
+#define WCD938X_DIGITAL_CDC_HPH_DSM_R7 (0x342E)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_A1_0 (0x342F)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_A1_1 (0x3430)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_A2_0 (0x3431)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_A2_1 (0x3432)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_A3_0 (0x3433)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_A3_1 (0x3434)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_A4_0 (0x3435)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_A4_1 (0x3436)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_A5_0 (0x3437)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_A5_1 (0x3438)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_A6_0 (0x3439)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_A7_0 (0x343A)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_C_0 (0x343B)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_C_1 (0x343C)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_C_2 (0x343D)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_C_3 (0x343E)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_R1 (0x343F)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_R2 (0x3440)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_R3 (0x3441)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_R4 (0x3442)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_R5 (0x3443)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_R6 (0x3444)
+#define WCD938X_DIGITAL_CDC_AUX_DSM_R7 (0x3445)
+#define WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0 (0x3446)
+#define WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1 (0x3447)
+#define WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0 (0x3448)
+#define WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1 (0x3449)
+#define WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2 (0x344A)
+#define WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0 (0x344B)
+#define WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1 (0x344C)
+#define WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2 (0x344D)
+#define WCD938X_DIGITAL_CDC_HPH_GAIN_CTL (0x344E)
+#define WCD938X_HPHL_RX_EN_MASK BIT(2)
+#define WCD938X_HPHR_RX_EN_MASK BIT(3)
+#define WCD938X_DIGITAL_CDC_AUX_GAIN_CTL (0x344F)
+#define WCD938X_AUX_EN_MASK BIT(0)
+#define WCD938X_DIGITAL_CDC_EAR_PATH_CTL (0x3450)
+#define WCD938X_DIGITAL_CDC_SWR_CLH (0x3451)
+#define WCD938X_DIGITAL_SWR_CLH_BYP (0x3452)
+#define WCD938X_DIGITAL_CDC_TX0_CTL (0x3453)
+#define WCD938X_DIGITAL_CDC_TX1_CTL (0x3454)
+#define WCD938X_DIGITAL_CDC_TX2_CTL (0x3455)
+#define WCD938X_DIGITAL_CDC_TX_RST (0x3456)
+#define WCD938X_DIGITAL_CDC_REQ_CTL (0x3457)
+#define WCD938X_FS_RATE_4P8_MASK BIT(1)
+#define WCD938X_NO_NOTCH_MASK BIT(0)
+#define WCD938X_DIGITAL_CDC_RST (0x3458)
+#define WCD938X_DIGITAL_CDC_AMIC_CTL (0x345A)
+#define WCD938X_AMIC1_IN_SEL_DMIC 0
+#define WCD938X_AMIC1_IN_SEL_AMIC 0
+#define WCD938X_AMIC1_IN_SEL_MASK BIT(0)
+#define WCD938X_AMIC3_IN_SEL_MASK BIT(1)
+#define WCD938X_AMIC4_IN_SEL_MASK BIT(2)
+#define WCD938X_AMIC5_IN_SEL_MASK BIT(3)
+#define WCD938X_DIGITAL_CDC_DMIC_CTL (0x345B)
+#define WCD938X_DMIC_CLK_SCALING_EN_MASK GENMASK(2, 1)
+#define WCD938X_DIGITAL_CDC_DMIC1_CTL (0x345C)
+#define WCD938X_DMIC_CLK_EN_MASK BIT(3)
+#define WCD938X_DIGITAL_CDC_DMIC2_CTL (0x345D)
+#define WCD938X_DIGITAL_CDC_DMIC3_CTL (0x345E)
+#define WCD938X_DIGITAL_CDC_DMIC4_CTL (0x345F)
+#define WCD938X_DIGITAL_EFUSE_PRG_CTL (0x3460)
+#define WCD938X_DIGITAL_EFUSE_CTL (0x3461)
+#define WCD938X_DIGITAL_CDC_DMIC_RATE_1_2 (0x3462)
+#define WCD938X_DIGITAL_CDC_DMIC_RATE_3_4 (0x3463)
+#define WCD938X_DMIC1_RATE_MASK GENMASK(3, 0)
+#define WCD938X_DMIC2_RATE_MASK GENMASK(7, 4)
+#define WCD938X_DMIC3_RATE_MASK GENMASK(3, 0)
+#define WCD938X_DMIC4_RATE_MASK GENMASK(7, 4)
+#define WCD938X_DMIC4_RATE_2P4MHZ 3
+
+#define WCD938X_DIGITAL_PDM_WD_CTL0 (0x3465)
+#define WCD938X_PDM_WD_EN_MASK GENMASK(2, 0)
+#define WCD938X_DIGITAL_PDM_WD_CTL1 (0x3466)
+#define WCD938X_DIGITAL_PDM_WD_CTL2 (0x3467)
+#define WCD938X_AUX_PDM_WD_EN_MASK GENMASK(2, 0)
+#define WCD938X_DIGITAL_INTR_MODE (0x346A)
+#define WCD938X_DIGITAL_INTR_MASK_0 (0x346B)
+#define WCD938X_DIGITAL_INTR_MASK_1 (0x346C)
+#define WCD938X_DIGITAL_INTR_MASK_2 (0x346D)
+#define WCD938X_DIGITAL_INTR_STATUS_0 (0x346E)
+#define WCD938X_DIGITAL_INTR_STATUS_1 (0x346F)
+#define WCD938X_DIGITAL_INTR_STATUS_2 (0x3470)
+#define WCD938X_DIGITAL_INTR_CLEAR_0 (0x3471)
+#define WCD938X_DIGITAL_INTR_CLEAR_1 (0x3472)
+#define WCD938X_DIGITAL_INTR_CLEAR_2 (0x3473)
+#define WCD938X_DIGITAL_INTR_LEVEL_0 (0x3474)
+#define WCD938X_DIGITAL_INTR_LEVEL_1 (0x3475)
+#define WCD938X_DIGITAL_INTR_LEVEL_2 (0x3476)
+#define WCD938X_DIGITAL_INTR_SET_0 (0x3477)
+#define WCD938X_DIGITAL_INTR_SET_1 (0x3478)
+#define WCD938X_DIGITAL_INTR_SET_2 (0x3479)
+#define WCD938X_DIGITAL_INTR_TEST_0 (0x347A)
+#define WCD938X_DIGITAL_INTR_TEST_1 (0x347B)
+#define WCD938X_DIGITAL_INTR_TEST_2 (0x347C)
+#define WCD938X_DIGITAL_TX_MODE_DBG_EN (0x347F)
+#define WCD938X_DIGITAL_TX_MODE_DBG_0_1 (0x3480)
+#define WCD938X_DIGITAL_TX_MODE_DBG_2_3 (0x3481)
+#define WCD938X_DIGITAL_LB_IN_SEL_CTL (0x3482)
+#define WCD938X_DIGITAL_LOOP_BACK_MODE (0x3483)
+#define WCD938X_DIGITAL_SWR_DAC_TEST (0x3484)
+#define WCD938X_DIGITAL_SWR_HM_TEST_RX_0 (0x3485)
+#define WCD938X_DIGITAL_SWR_HM_TEST_TX_0 (0x3486)
+#define WCD938X_DIGITAL_SWR_HM_TEST_RX_1 (0x3487)
+#define WCD938X_DIGITAL_SWR_HM_TEST_TX_1 (0x3488)
+#define WCD938X_DIGITAL_SWR_HM_TEST_TX_2 (0x3489)
+#define WCD938X_DIGITAL_SWR_HM_TEST_0 (0x348A)
+#define WCD938X_DIGITAL_SWR_HM_TEST_1 (0x348B)
+#define WCD938X_DIGITAL_PAD_CTL_SWR_0 (0x348C)
+#define WCD938X_DIGITAL_PAD_CTL_SWR_1 (0x348D)
+#define WCD938X_DIGITAL_I2C_CTL (0x348E)
+#define WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE (0x348F)
+#define WCD938X_DIGITAL_EFUSE_TEST_CTL_0 (0x3490)
+#define WCD938X_DIGITAL_EFUSE_TEST_CTL_1 (0x3491)
+#define WCD938X_DIGITAL_EFUSE_T_DATA_0 (0x3492)
+#define WCD938X_DIGITAL_EFUSE_T_DATA_1 (0x3493)
+#define WCD938X_DIGITAL_PAD_CTL_PDM_RX0 (0x3494)
+#define WCD938X_DIGITAL_PAD_CTL_PDM_RX1 (0x3495)
+#define WCD938X_DIGITAL_PAD_CTL_PDM_TX0 (0x3496)
+#define WCD938X_DIGITAL_PAD_CTL_PDM_TX1 (0x3497)
+#define WCD938X_DIGITAL_PAD_CTL_PDM_TX2 (0x3498)
+#define WCD938X_DIGITAL_PAD_INP_DIS_0 (0x3499)
+#define WCD938X_DIGITAL_PAD_INP_DIS_1 (0x349A)
+#define WCD938X_DIGITAL_DRIVE_STRENGTH_0 (0x349B)
+#define WCD938X_DIGITAL_DRIVE_STRENGTH_1 (0x349C)
+#define WCD938X_DIGITAL_DRIVE_STRENGTH_2 (0x349D)
+#define WCD938X_DIGITAL_RX_DATA_EDGE_CTL (0x349E)
+#define WCD938X_DIGITAL_TX_DATA_EDGE_CTL (0x349F)
+#define WCD938X_DIGITAL_GPIO_MODE (0x34A0)
+#define WCD938X_DIGITAL_PIN_CTL_OE (0x34A1)
+#define WCD938X_DIGITAL_PIN_CTL_DATA_0 (0x34A2)
+#define WCD938X_DIGITAL_PIN_CTL_DATA_1 (0x34A3)
+#define WCD938X_DIGITAL_PIN_STATUS_0 (0x34A4)
+#define WCD938X_DIGITAL_PIN_STATUS_1 (0x34A5)
+#define WCD938X_DIGITAL_DIG_DEBUG_CTL (0x34A6)
+#define WCD938X_DIGITAL_DIG_DEBUG_EN (0x34A7)
+#define WCD938X_DIGITAL_ANA_CSR_DBG_ADD (0x34A8)
+#define WCD938X_DIGITAL_ANA_CSR_DBG_CTL (0x34A9)
+#define WCD938X_DIGITAL_SSP_DBG (0x34AA)
+#define WCD938X_DIGITAL_MODE_STATUS_0 (0x34AB)
+#define WCD938X_DIGITAL_MODE_STATUS_1 (0x34AC)
+#define WCD938X_DIGITAL_SPARE_0 (0x34AD)
+#define WCD938X_DIGITAL_SPARE_1 (0x34AE)
+#define WCD938X_DIGITAL_SPARE_2 (0x34AF)
+#define WCD938X_DIGITAL_EFUSE_REG_0 (0x34B0)
+#define WCD938X_ID_MASK GENMASK(4, 1)
+#define WCD938X_DIGITAL_EFUSE_REG_1 (0x34B1)
+#define WCD938X_DIGITAL_EFUSE_REG_2 (0x34B2)
+#define WCD938X_DIGITAL_EFUSE_REG_3 (0x34B3)
+#define WCD938X_DIGITAL_EFUSE_REG_4 (0x34B4)
+#define WCD938X_DIGITAL_EFUSE_REG_5 (0x34B5)
+#define WCD938X_DIGITAL_EFUSE_REG_6 (0x34B6)
+#define WCD938X_DIGITAL_EFUSE_REG_7 (0x34B7)
+#define WCD938X_DIGITAL_EFUSE_REG_8 (0x34B8)
+#define WCD938X_DIGITAL_EFUSE_REG_9 (0x34B9)
+#define WCD938X_DIGITAL_EFUSE_REG_10 (0x34BA)
+#define WCD938X_DIGITAL_EFUSE_REG_11 (0x34BB)
+#define WCD938X_DIGITAL_EFUSE_REG_12 (0x34BC)
+#define WCD938X_DIGITAL_EFUSE_REG_13 (0x34BD)
+#define WCD938X_DIGITAL_EFUSE_REG_14 (0x34BE)
+#define WCD938X_DIGITAL_EFUSE_REG_15 (0x34BF)
+#define WCD938X_DIGITAL_EFUSE_REG_16 (0x34C0)
+#define WCD938X_DIGITAL_EFUSE_REG_17 (0x34C1)
+#define WCD938X_DIGITAL_EFUSE_REG_18 (0x34C2)
+#define WCD938X_DIGITAL_EFUSE_REG_19 (0x34C3)
+#define WCD938X_DIGITAL_EFUSE_REG_20 (0x34C4)
+#define WCD938X_DIGITAL_EFUSE_REG_21 (0x34C5)
+#define WCD938X_DIGITAL_EFUSE_REG_22 (0x34C6)
+#define WCD938X_DIGITAL_EFUSE_REG_23 (0x34C7)
+#define WCD938X_DIGITAL_EFUSE_REG_24 (0x34C8)
+#define WCD938X_DIGITAL_EFUSE_REG_25 (0x34C9)
+#define WCD938X_DIGITAL_EFUSE_REG_26 (0x34CA)
+#define WCD938X_DIGITAL_EFUSE_REG_27 (0x34CB)
+#define WCD938X_DIGITAL_EFUSE_REG_28 (0x34CC)
+#define WCD938X_DIGITAL_EFUSE_REG_29 (0x34CD)
+#define WCD938X_DIGITAL_EFUSE_REG_30 (0x34CE)
+#define WCD938X_DIGITAL_EFUSE_REG_31 (0x34CF)
+#define WCD938X_DIGITAL_TX_REQ_FB_CTL_0 (0x34D0)
+#define WCD938X_DIGITAL_TX_REQ_FB_CTL_1 (0x34D1)
+#define WCD938X_DIGITAL_TX_REQ_FB_CTL_2 (0x34D2)
+#define WCD938X_DIGITAL_TX_REQ_FB_CTL_3 (0x34D3)
+#define WCD938X_DIGITAL_TX_REQ_FB_CTL_4 (0x34D4)
+#define WCD938X_DIGITAL_DEM_BYPASS_DATA0 (0x34D5)
+#define WCD938X_DIGITAL_DEM_BYPASS_DATA1 (0x34D6)
+#define WCD938X_DIGITAL_DEM_BYPASS_DATA2 (0x34D7)
+#define WCD938X_DIGITAL_DEM_BYPASS_DATA3 (0x34D8)
+#define WCD938X_MAX_REGISTER (WCD938X_DIGITAL_DEM_BYPASS_DATA3)
+
+#define WCD938X_MAX_SWR_PORTS 5
+#define WCD938X_MAX_TX_SWR_PORTS 4
+#define WCD938X_MAX_SWR_CH_IDS 15
+
+struct wcd938x_sdw_ch_info {
+ int port_num;
+ unsigned int ch_mask;
+};
+
+#define WCD_SDW_CH(id, pn, cmask) \
+ [id] = { \
+ .port_num = pn, \
+ .ch_mask = cmask, \
+ }
+
+enum wcd938x_tx_sdw_ports {
+ WCD938X_ADC_1_2_PORT = 1,
+ WCD938X_ADC_3_4_PORT,
+ /* DMIC0_0, DMIC0_1, DMIC1_0, DMIC1_1 */
+ WCD938X_DMIC_0_3_MBHC_PORT,
+ WCD938X_DMIC_4_7_PORT,
+};
+
+enum wcd938x_tx_sdw_channels {
+ WCD938X_ADC1,
+ WCD938X_ADC2,
+ WCD938X_ADC3,
+ WCD938X_ADC4,
+ WCD938X_DMIC0,
+ WCD938X_DMIC1,
+ WCD938X_MBHC,
+ WCD938X_DMIC2,
+ WCD938X_DMIC3,
+ WCD938X_DMIC4,
+ WCD938X_DMIC5,
+ WCD938X_DMIC6,
+ WCD938X_DMIC7,
+};
+
+enum wcd938x_rx_sdw_ports {
+ WCD938X_HPH_PORT = 1,
+ WCD938X_CLSH_PORT,
+ WCD938X_COMP_PORT,
+ WCD938X_LO_PORT,
+ WCD938X_DSD_PORT,
+};
+
+enum wcd938x_rx_sdw_channels {
+ WCD938X_HPH_L,
+ WCD938X_HPH_R,
+ WCD938X_CLSH,
+ WCD938X_COMP_L,
+ WCD938X_COMP_R,
+ WCD938X_LO,
+ WCD938X_DSD_R,
+ WCD938X_DSD_L,
+};
+enum {
+ WCD938X_SDW_DIR_RX,
+ WCD938X_SDW_DIR_TX,
+};
+
+struct wcd938x_priv;
+struct wcd938x_sdw_priv {
+ struct sdw_slave *sdev;
+ struct sdw_stream_config sconfig;
+ struct sdw_stream_runtime *sruntime;
+ struct sdw_port_config port_config[WCD938X_MAX_SWR_PORTS];
+ struct wcd938x_sdw_ch_info *ch_info;
+ bool port_enable[WCD938X_MAX_SWR_CH_IDS];
+ int port_map[WCD938X_MAX_SWR_PORTS];
+ int active_ports;
+ int num_ports;
+ bool is_tx;
+ struct wcd938x_priv *wcd938x;
+ struct irq_domain *slave_irq;
+};
+
+#if IS_ENABLED(CONFIG_SND_SOC_WCD938X_SDW)
+int wcd938x_sdw_free(struct wcd938x_sdw_priv *wcd,
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+int wcd938x_sdw_set_sdw_stream(struct wcd938x_sdw_priv *wcd,
+ struct snd_soc_dai *dai,
+ void *stream, int direction);
+int wcd938x_sdw_hw_params(struct wcd938x_sdw_priv *wcd,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai);
+
+struct device *wcd938x_sdw_device_get(struct device_node *np);
+int wcd938x_swr_get_current_bank(struct sdw_slave *sdev);
+
+#else
+
+static inline int wcd938x_sdw_free(struct wcd938x_sdw_priv *wcd,
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int wcd938x_sdw_set_sdw_stream(struct wcd938x_sdw_priv *wcd,
+ struct snd_soc_dai *dai,
+ void *stream, int direction)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int wcd938x_sdw_hw_params(struct wcd938x_sdw_priv *wcd,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct device *wcd938x_sdw_device_get(struct device_node *np)
+{
+ return NULL;
+}
+
+static inline int wcd938x_swr_get_current_bank(struct sdw_slave *sdev)
+{
+ return 0;
+}
+#endif /* CONFIG_SND_SOC_WCD938X_SDW */
+#endif /* __WCD938X_H__ */
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index b0a6d31299bb..c35673e7f420 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -30,7 +30,6 @@
#include <sound/wm2200.h>
#include "wm2200.h"
-#include "wmfw.h"
#include "wm_adsp.h"
#define WM2200_DSP_CONTROL_1 0x00
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 34b665895bdf..621598608bf0 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1989,7 +1989,7 @@ static unsigned int wm5102_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_5R,
};
-static struct snd_compress_ops wm5102_compress_ops = {
+static const struct snd_compress_ops wm5102_compress_ops = {
.open = wm5102_open,
.free = wm_adsp_compr_free,
.set_params = wm_adsp_compr_set_params,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 76efca0fe515..5c2d45d05c97 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2355,7 +2355,7 @@ static unsigned int wm5110_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_6R,
};
-static struct snd_compress_ops wm5110_compress_ops = {
+static const struct snd_compress_ops wm5110_compress_ops = {
.open = wm5110_open,
.free = wm_adsp_compr_free,
.set_params = wm_adsp_compr_set_params,
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 536339e43dc7..e4018ba3b19a 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -912,13 +912,13 @@ void wm8958_dsp2_init(struct snd_soc_component *component)
/* We don't *require* firmware and don't want to delay boot */
- request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
"wm8958_mbc.wfw", component->dev, GFP_KERNEL,
component, wm8958_mbc_loaded);
- request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
"wm8958_mbc_vss.wfw", component->dev, GFP_KERNEL,
component, wm8958_mbc_vss_loaded);
- request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
"wm8958_enh_eq.wfw", component->dev, GFP_KERNEL,
component, wm8958_enh_eq_loaded);
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 34080f497584..ba16bdf9e478 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3219,9 +3219,8 @@ static int wm8962_beep_event(struct input_dev *dev, unsigned int type,
return 0;
}
-static ssize_t wm8962_beep_set(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t beep_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct wm8962_priv *wm8962 = dev_get_drvdata(dev);
long int time;
@@ -3236,7 +3235,7 @@ static ssize_t wm8962_beep_set(struct device *dev,
return count;
}
-static DEVICE_ATTR(beep, 0200, NULL, wm8962_beep_set);
+static DEVICE_ATTR_WO(beep);
static void wm8962_init_beep(struct snd_soc_component *component)
{
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 3dc119daf2f6..fe15cbc7bcaf 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -282,6 +282,7 @@
/*
* HALO_CCM_CORE_CONTROL
*/
+#define HALO_CORE_RESET 0x00000200
#define HALO_CORE_EN 0x00000001
/*
@@ -303,9 +304,9 @@
#define HALO_MPU_VIO_ERR_SRC_MASK 0x00007fff
#define HALO_MPU_VIO_ERR_SRC_SHIFT 0
-static struct wm_adsp_ops wm_adsp1_ops;
-static struct wm_adsp_ops wm_adsp2_ops[];
-static struct wm_adsp_ops wm_halo_ops;
+static const struct wm_adsp_ops wm_adsp1_ops;
+static const struct wm_adsp_ops wm_adsp2_ops[];
+static const struct wm_adsp_ops wm_halo_ops;
struct wm_adsp_buf {
struct list_head list;
@@ -746,7 +747,6 @@ static void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
static void wm_adsp2_cleanup_debugfs(struct wm_adsp *dsp)
{
wm_adsp_debugfs_clear(dsp);
- debugfs_remove_recursive(dsp->debugfs_root);
}
#else
static inline void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
@@ -824,7 +824,7 @@ const struct soc_enum wm_adsp_fw_enum[] = {
};
EXPORT_SYMBOL_GPL(wm_adsp_fw_enum);
-static struct wm_adsp_region const *wm_adsp_find_region(struct wm_adsp *dsp,
+static const struct wm_adsp_region *wm_adsp_find_region(struct wm_adsp *dsp,
int type)
{
int i;
@@ -1213,7 +1213,7 @@ static int wm_coeff_tlv_get(struct snd_kcontrol *kctl,
mutex_lock(&ctl->dsp->pwr_lock);
- ret = wm_coeff_read_ctrl_raw(ctl, ctl->cache, size);
+ ret = wm_coeff_read_ctrl(ctl, ctl->cache, size);
if (!ret && copy_to_user(bytes, ctl->cache, size))
ret = -EFAULT;
@@ -2240,7 +2240,7 @@ static void wmfw_v3_parse_id_header(struct wm_adsp *dsp,
}
static int wm_adsp_create_regions(struct wm_adsp *dsp, __be32 id, int nregions,
- int *type, __be32 *base)
+ const int *type, __be32 *base)
{
struct wm_adsp_alg_region *alg_region;
int i;
@@ -2487,7 +2487,7 @@ out:
static int wm_halo_create_regions(struct wm_adsp *dsp, __be32 id,
__be32 xm_base, __be32 ym_base)
{
- int types[] = {
+ static const int types[] = {
WMFW_ADSP2_XM, WMFW_HALO_XM_PACKED,
WMFW_ADSP2_YM, WMFW_HALO_YM_PACKED
};
@@ -3333,7 +3333,8 @@ static int wm_halo_start_core(struct wm_adsp *dsp)
{
return regmap_update_bits(dsp->regmap,
dsp->base + HALO_CCM_CORE_CONTROL,
- HALO_CORE_EN, HALO_CORE_EN);
+ HALO_CORE_RESET | HALO_CORE_EN,
+ HALO_CORE_RESET | HALO_CORE_EN);
}
static void wm_halo_stop_core(struct wm_adsp *dsp)
@@ -4500,13 +4501,13 @@ irqreturn_t wm_halo_wdt_expire(int irq, void *data)
}
EXPORT_SYMBOL_GPL(wm_halo_wdt_expire);
-static struct wm_adsp_ops wm_adsp1_ops = {
+static const struct wm_adsp_ops wm_adsp1_ops = {
.validate_version = wm_adsp_validate_version,
.parse_sizes = wm_adsp1_parse_sizes,
.region_to_reg = wm_adsp_region_to_reg,
};
-static struct wm_adsp_ops wm_adsp2_ops[] = {
+static const struct wm_adsp_ops wm_adsp2_ops[] = {
{
.sys_config_size = sizeof(struct wm_adsp_system_config_xm_hdr),
.parse_sizes = wm_adsp2_parse_sizes,
@@ -4567,7 +4568,7 @@ static struct wm_adsp_ops wm_adsp2_ops[] = {
},
};
-static struct wm_adsp_ops wm_halo_ops = {
+static const struct wm_adsp_ops wm_halo_ops = {
.sys_config_size = sizeof(struct wm_halo_system_config_xm_hdr),
.parse_sizes = wm_adsp2_parse_sizes,
.validate_version = wm_halo_validate_version,
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index 1996350b817e..f22131d9cc29 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -64,7 +64,7 @@ struct wm_adsp {
struct regmap *regmap;
struct snd_soc_component *component;
- struct wm_adsp_ops *ops;
+ const struct wm_adsp_ops *ops;
unsigned int base;
unsigned int base_sysinfo;
diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
index fd4160289fac..8ebf76e04702 100644
--- a/sound/soc/dwc/dwc-i2s.c
+++ b/sound/soc/dwc/dwc-i2s.c
@@ -636,8 +636,7 @@ static int dw_i2s_probe(struct platform_device *pdev)
dw_i2s_dai->ops = &dw_i2s_dai_ops;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->i2s_base = devm_ioremap_resource(&pdev->dev, res);
+ dev->i2s_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(dev->i2s_base))
return PTR_ERR(dev->i2s_base);
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 556c284f49dd..8e05d092790e 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -351,6 +351,19 @@ config SND_SOC_IMX_RPMSG
Say Y if you want to add support for SoC audio on an i.MX board with
a rpmsg devices.
+config SND_SOC_IMX_CARD
+ tristate "SoC Audio Graph Sound Card support for i.MX boards"
+ depends on OF && I2C
+ select SND_SOC_AK4458
+ select SND_SOC_AK5558
+ select SND_SOC_IMX_PCM_DMA
+ select SND_SOC_FSL_SAI
+ select SND_SIMPLE_CARD_UTILS
+ help
+ This option enables audio sound card support for i.MX boards
+ with OF-graph DT bindings.
+ It also support DPCM of single CPU multi Codec ststem.
+
endif # SND_IMX_SOC
endmenu
diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile
index f146ce464acd..b54beb1a66fa 100644
--- a/sound/soc/fsl/Makefile
+++ b/sound/soc/fsl/Makefile
@@ -71,6 +71,7 @@ snd-soc-imx-spdif-objs := imx-spdif.o
snd-soc-imx-audmix-objs := imx-audmix.o
snd-soc-imx-hdmi-objs := imx-hdmi.o
snd-soc-imx-rpmsg-objs := imx-rpmsg.o
+snd-soc-imx-card-objs := imx-card.o
obj-$(CONFIG_SND_SOC_EUKREA_TLV320) += snd-soc-eukrea-tlv320.o
obj-$(CONFIG_SND_SOC_IMX_ES8328) += snd-soc-imx-es8328.o
@@ -79,3 +80,4 @@ obj-$(CONFIG_SND_SOC_IMX_SPDIF) += snd-soc-imx-spdif.o
obj-$(CONFIG_SND_SOC_IMX_AUDMIX) += snd-soc-imx-audmix.o
obj-$(CONFIG_SND_SOC_IMX_HDMI) += snd-soc-imx-hdmi.o
obj-$(CONFIG_SND_SOC_IMX_RPMSG) += snd-soc-imx-rpmsg.o
+obj-$(CONFIG_SND_SOC_IMX_CARD) += snd-soc-imx-card.o
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 4f55b316cf0f..06107ae46e20 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -540,7 +540,6 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
struct device *codec_dev = NULL;
const char *codec_dai_name;
const char *codec_dev_name;
- unsigned int daifmt;
u32 width;
int ret;
@@ -684,10 +683,10 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
}
/* Format info from DT is optional. */
- daifmt = snd_soc_of_parse_daifmt(np, NULL,
- &bitclkmaster, &framemaster);
- daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
+ snd_soc_daifmt_parse_clock_provider_as_phandle(np, NULL, &bitclkmaster, &framemaster);
if (bitclkmaster || framemaster) {
+ unsigned int daifmt = snd_soc_daifmt_parse_format(np, NULL);
+
if (codec_np == bitclkmaster)
daifmt |= (codec_np == framemaster) ?
SND_SOC_DAIFMT_CBM_CFM : SND_SOC_DAIFMT_CBM_CFS;
@@ -709,7 +708,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
of_node_put(framemaster);
if (!fsl_asoc_card_is_ac97(priv) && !codec_dev) {
- dev_err(&pdev->dev, "failed to find codec device\n");
+ dev_dbg(&pdev->dev, "failed to find codec device\n");
ret = -EPROBE_DEFER;
goto asrc_fail;
}
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index 0e1ad8efebd3..24b41881a68f 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -1035,8 +1035,7 @@ static int fsl_asrc_probe(struct platform_device *pdev)
asrc->private = asrc_priv;
/* Get the addresses and IRQ */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/sound/soc/fsl/fsl_aud2htx.c b/sound/soc/fsl/fsl_aud2htx.c
index a328697511f7..99ab7f0241cf 100644
--- a/sound/soc/fsl/fsl_aud2htx.c
+++ b/sound/soc/fsl/fsl_aud2htx.c
@@ -196,8 +196,7 @@ static int fsl_aud2htx_probe(struct platform_device *pdev)
aud2htx->pdev = pdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c
index b1765c7d3bcd..be14f84796cb 100644
--- a/sound/soc/fsl/fsl_easrc.c
+++ b/sound/soc/fsl/fsl_easrc.c
@@ -1887,8 +1887,7 @@ static int fsl_easrc_probe(struct platform_device *pdev)
easrc->private = easrc_priv;
np = dev->of_node;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
@@ -1901,10 +1900,8 @@ static int fsl_easrc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no irq for node %pOF\n", np);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, fsl_easrc_isr, 0,
dev_name(dev), easrc);
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index f356ae5925af..a961f837cd09 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -969,8 +969,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
esai_priv->soc = of_device_get_match_data(&pdev->dev);
/* Get the addresses and IRQ */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
index 3cf789ed6cbe..8c0c75ce9490 100644
--- a/sound/soc/fsl/fsl_micfil.c
+++ b/sound/soc/fsl/fsl_micfil.c
@@ -669,8 +669,7 @@ static int fsl_micfil_probe(struct platform_device *pdev)
}
/* init regmap */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 407a45e48eee..223fcd15bfcc 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -1017,8 +1017,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
sai->is_lsb_first = of_property_read_bool(np, "lsb-first");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index c631de325a6e..8ffb1a6048d6 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -49,6 +49,7 @@ static u8 srpc_dpll_locked[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0xa, 0xb };
* @imx: for imx platform
* @shared_root_clock: flag of sharing a clock source with others;
* so the driver shouldn't set root clock rate
+ * @raw_capture_mode: if raw capture mode support
* @interrupts: interrupt number
* @tx_burst: tx maxburst size
* @rx_burst: rx maxburst size
@@ -57,6 +58,7 @@ static u8 srpc_dpll_locked[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0xa, 0xb };
struct fsl_spdif_soc_data {
bool imx;
bool shared_root_clock;
+ bool raw_capture_mode;
u32 interrupts;
u32 tx_burst;
u32 rx_burst;
@@ -136,6 +138,7 @@ struct fsl_spdif_priv {
static struct fsl_spdif_soc_data fsl_spdif_vf610 = {
.imx = false,
.shared_root_clock = false,
+ .raw_capture_mode = false,
.interrupts = 1,
.tx_burst = FSL_SPDIF_TXFIFO_WML,
.rx_burst = FSL_SPDIF_RXFIFO_WML,
@@ -145,6 +148,7 @@ static struct fsl_spdif_soc_data fsl_spdif_vf610 = {
static struct fsl_spdif_soc_data fsl_spdif_imx35 = {
.imx = true,
.shared_root_clock = false,
+ .raw_capture_mode = false,
.interrupts = 1,
.tx_burst = FSL_SPDIF_TXFIFO_WML,
.rx_burst = FSL_SPDIF_RXFIFO_WML,
@@ -154,6 +158,7 @@ static struct fsl_spdif_soc_data fsl_spdif_imx35 = {
static struct fsl_spdif_soc_data fsl_spdif_imx6sx = {
.imx = true,
.shared_root_clock = true,
+ .raw_capture_mode = false,
.interrupts = 1,
.tx_burst = FSL_SPDIF_TXFIFO_WML,
.rx_burst = FSL_SPDIF_RXFIFO_WML,
@@ -164,12 +169,23 @@ static struct fsl_spdif_soc_data fsl_spdif_imx6sx = {
static struct fsl_spdif_soc_data fsl_spdif_imx8qm = {
.imx = true,
.shared_root_clock = true,
+ .raw_capture_mode = false,
.interrupts = 2,
.tx_burst = 2, /* Applied for EDMA */
.rx_burst = 2, /* Applied for EDMA */
.tx_formats = SNDRV_PCM_FMTBIT_S24_LE, /* Applied for EDMA */
};
+static struct fsl_spdif_soc_data fsl_spdif_imx8mm = {
+ .imx = true,
+ .shared_root_clock = false,
+ .raw_capture_mode = true,
+ .interrupts = 1,
+ .tx_burst = FSL_SPDIF_TXFIFO_WML,
+ .rx_burst = FSL_SPDIF_RXFIFO_WML,
+ .tx_formats = FSL_SPDIF_FORMATS_PLAYBACK,
+};
+
/* Check if clk is a root clock that does not share clock source with others */
static inline bool fsl_spdif_can_set_clk_rate(struct fsl_spdif_priv *spdif, int clk)
{
@@ -846,6 +862,39 @@ static int fsl_spdif_tx_vbit_put(struct snd_kcontrol *kcontrol,
return 0;
}
+static int fsl_spdif_rx_rcm_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct regmap *regmap = spdif_priv->regmap;
+ u32 val;
+
+ regmap_read(regmap, REG_SPDIF_SCR, &val);
+ val = (val & SCR_RAW_CAPTURE_MODE) ? 1 : 0;
+ ucontrol->value.integer.value[0] = val;
+
+ return 0;
+}
+
+static int fsl_spdif_rx_rcm_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct regmap *regmap = spdif_priv->regmap;
+ u32 val = (ucontrol->value.integer.value[0] ? SCR_RAW_CAPTURE_MODE : 0);
+
+ if (val)
+ cpu_dai->driver->capture.formats |= SNDRV_PCM_FMTBIT_S32_LE;
+ else
+ cpu_dai->driver->capture.formats &= ~SNDRV_PCM_FMTBIT_S32_LE;
+
+ regmap_update_bits(regmap, REG_SPDIF_SCR, SCR_RAW_CAPTURE_MODE, val);
+
+ return 0;
+}
+
/* DPLL lock information */
static int fsl_spdif_rxrate_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
@@ -1029,6 +1078,19 @@ static struct snd_kcontrol_new fsl_spdif_ctrls[] = {
},
};
+static struct snd_kcontrol_new fsl_spdif_ctrls_rcm[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "IEC958 Raw Capture Mode",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_WRITE |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_ctl_boolean_mono_info,
+ .get = fsl_spdif_rx_rcm_get,
+ .put = fsl_spdif_rx_rcm_put,
+ },
+};
+
static int fsl_spdif_dai_probe(struct snd_soc_dai *dai)
{
struct fsl_spdif_priv *spdif_private = snd_soc_dai_get_drvdata(dai);
@@ -1038,6 +1100,10 @@ static int fsl_spdif_dai_probe(struct snd_soc_dai *dai)
snd_soc_add_dai_controls(dai, fsl_spdif_ctrls, ARRAY_SIZE(fsl_spdif_ctrls));
+ if (spdif_private->soc->raw_capture_mode)
+ snd_soc_add_dai_controls(dai, fsl_spdif_ctrls_rcm,
+ ARRAY_SIZE(fsl_spdif_ctrls_rcm));
+
/*Clear the val bit for Tx*/
regmap_update_bits(spdif_private->regmap, REG_SPDIF_SCR,
SCR_VAL_MASK, SCR_VAL_CLEAR);
@@ -1289,8 +1355,7 @@ static int fsl_spdif_probe(struct platform_device *pdev)
spdif_priv->soc->tx_formats;
/* Get the addresses and IRQ */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
@@ -1302,10 +1367,8 @@ static int fsl_spdif_probe(struct platform_device *pdev)
for (i = 0; i < spdif_priv->soc->interrupts; i++) {
irq = platform_get_irq(pdev, i);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, spdif_isr, 0,
dev_name(&pdev->dev), spdif_priv);
@@ -1375,22 +1438,38 @@ static int fsl_spdif_probe(struct platform_device *pdev)
&spdif_priv->cpu_dai_drv, 1);
if (ret) {
dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
- return ret;
+ goto err_pm_disable;
}
ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
- if (ret && ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
+ goto err_pm_disable;
+ }
+
+ return ret;
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
return ret;
}
+static int fsl_spdif_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int fsl_spdif_runtime_suspend(struct device *dev)
{
struct fsl_spdif_priv *spdif_priv = dev_get_drvdata(dev);
int i;
+ /* Disable all the interrupts */
+ regmap_update_bits(spdif_priv->regmap, REG_SPDIF_SIE, 0xffffff, 0);
+
regmap_read(spdif_priv->regmap, REG_SPDIF_SRPC,
&spdif_priv->regcache_srpc);
regcache_cache_only(spdif_priv->regmap, true);
@@ -1476,6 +1555,7 @@ static const struct of_device_id fsl_spdif_dt_ids[] = {
{ .compatible = "fsl,vf610-spdif", .data = &fsl_spdif_vf610, },
{ .compatible = "fsl,imx6sx-spdif", .data = &fsl_spdif_imx6sx, },
{ .compatible = "fsl,imx8qm-spdif", .data = &fsl_spdif_imx8qm, },
+ { .compatible = "fsl,imx8mm-spdif", .data = &fsl_spdif_imx8mm, },
{}
};
MODULE_DEVICE_TABLE(of, fsl_spdif_dt_ids);
@@ -1487,6 +1567,7 @@ static struct platform_driver fsl_spdif_driver = {
.pm = &fsl_spdif_pm,
},
.probe = fsl_spdif_probe,
+ .remove = fsl_spdif_remove,
};
module_platform_driver(fsl_spdif_driver);
diff --git a/sound/soc/fsl/fsl_spdif.h b/sound/soc/fsl/fsl_spdif.h
index d5f1dfd58740..bff8290e71f2 100644
--- a/sound/soc/fsl/fsl_spdif.h
+++ b/sound/soc/fsl/fsl_spdif.h
@@ -63,6 +63,7 @@
#define SCR_TXFIFO_FSEL_IF4 (0x1 << SCR_TXFIFO_FSEL_OFFSET)
#define SCR_TXFIFO_FSEL_IF8 (0x2 << SCR_TXFIFO_FSEL_OFFSET)
#define SCR_TXFIFO_FSEL_IF12 (0x3 << SCR_TXFIFO_FSEL_OFFSET)
+#define SCR_RAW_CAPTURE_MODE BIT(14)
#define SCR_LOW_POWER (1 << 13)
#define SCR_SOFT_RESET (1 << 12)
#define SCR_TXFIFO_CTRL_OFFSET 10
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 2b57b60431bb..ecbc1c365d5b 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -1503,8 +1503,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
}
ssi->cpu_dai_drv.name = dev_name(dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iomem = devm_ioremap_resource(dev, res);
+ iomem = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
ssi->ssi_phys = res->start;
diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
index 6cb558165848..fb7c29fc39d7 100644
--- a/sound/soc/fsl/fsl_xcvr.c
+++ b/sound/soc/fsl/fsl_xcvr.c
@@ -736,7 +736,7 @@ static int fsl_xcvr_load_firmware(struct fsl_xcvr *xcvr)
/* clean current page, including data memory */
memset_io(xcvr->ram_addr, 0, size);
}
- };
+ }
err_firmware:
release_firmware(fw);
@@ -1202,6 +1202,10 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
rx_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rxfifo");
tx_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "txfifo");
+ if (!rx_res || !tx_res) {
+ dev_err(dev, "could not find rxfifo or txfifo resource\n");
+ return -EINVAL;
+ }
xcvr->dma_prms_rx.chan_name = "rx";
xcvr->dma_prms_tx.chan_name = "tx";
xcvr->dma_prms_rx.addr = rx_res->start;
@@ -1233,6 +1237,16 @@ static __maybe_unused int fsl_xcvr_runtime_suspend(struct device *dev)
struct fsl_xcvr *xcvr = dev_get_drvdata(dev);
int ret;
+ /*
+ * Clear interrupts, when streams starts or resumes after
+ * suspend, interrupts are enabled in prepare(), so no need
+ * to enable interrupts in resume().
+ */
+ ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_IER0,
+ FSL_XCVR_IRQ_EARC_ALL, 0);
+ if (ret < 0)
+ dev_err(dev, "Failed to clear IER0: %d\n", ret);
+
/* Assert M0+ reset */
ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
FSL_XCVR_EXT_CTRL_CORE_RESET,
diff --git a/sound/soc/fsl/imx-audio-rpmsg.c b/sound/soc/fsl/imx-audio-rpmsg.c
index 50099bcaa9cd..905c3a071300 100644
--- a/sound/soc/fsl/imx-audio-rpmsg.c
+++ b/sound/soc/fsl/imx-audio-rpmsg.c
@@ -122,17 +122,7 @@ static struct rpmsg_driver imx_audio_rpmsg_driver = {
.remove = imx_audio_rpmsg_remove,
};
-static int __init imx_audio_rpmsg_init(void)
-{
- return register_rpmsg_driver(&imx_audio_rpmsg_driver);
-}
-
-static void __exit imx_audio_rpmsg_exit(void)
-{
- unregister_rpmsg_driver(&imx_audio_rpmsg_driver);
-}
-module_init(imx_audio_rpmsg_init);
-module_exit(imx_audio_rpmsg_exit);
+module_rpmsg_driver(imx_audio_rpmsg_driver);
MODULE_DESCRIPTION("Freescale SoC Audio RPMSG interface");
MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>");
diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
index cbdc0a2c09c5..a364e2415de0 100644
--- a/sound/soc/fsl/imx-audmix.c
+++ b/sound/soc/fsl/imx-audmix.c
@@ -209,10 +209,8 @@ static int imx_audmix_probe(struct platform_device *pdev)
/* for CPU/Codec/Platform x 2 */
dlc = devm_kcalloc(&pdev->dev, 6, sizeof(*dlc), GFP_KERNEL);
- if (!dlc) {
- dev_err(&pdev->dev, "failed to allocate dai_link\n");
+ if (!dlc)
return -ENOMEM;
- }
ret = of_parse_phandle_with_args(audmix_np, "dais", NULL, i,
&args);
diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
new file mode 100644
index 000000000000..58fd0639a069
--- /dev/null
+++ b/sound/soc/fsl/imx-card.c
@@ -0,0 +1,844 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2017-2021 NXP
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+#include <linux/i2c.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include <sound/pcm.h>
+#include <sound/soc-dapm.h>
+#include <sound/simple_card_utils.h>
+
+#include "fsl_sai.h"
+
+enum codec_type {
+ CODEC_DUMMY = 0,
+ CODEC_AK5558 = 1,
+ CODEC_AK4458,
+ CODEC_AK4497,
+ CODEC_AK5552,
+};
+
+/*
+ * Mapping LRCK fs and frame width, table 3 & 4 in datasheet
+ * @rmin: min rate
+ * @rmax: max rate
+ * @wmin: min frame ratio
+ * @wmax: max frame ratio
+ */
+struct imx_akcodec_fs_mul {
+ unsigned int rmin;
+ unsigned int rmax;
+ unsigned int wmin;
+ unsigned int wmax;
+};
+
+/*
+ * Mapping TDM mode and frame width
+ */
+struct imx_akcodec_tdm_fs_mul {
+ unsigned int min;
+ unsigned int max;
+ unsigned int mul;
+};
+
+/*
+ * struct imx_card_plat_data - specific info for codecs
+ *
+ * @fs_mul: ratio of mclk/fs for normal mode
+ * @tdm_fs_mul: ratio of mclk/fs for tdm mode
+ * @support_rates: supported sample rate
+ * @support_tdm_rates: supported sample rate for tdm mode
+ * @support_channels: supported channels
+ * @support_tdm_channels: supported channels for tdm mode
+ * @num_fs_mul: ARRAY_SIZE of fs_mul
+ * @num_tdm_fs_mul: ARRAY_SIZE of tdm_fs_mul
+ * @num_rates: ARRAY_SIZE of support_rates
+ * @num_tdm_rates: ARRAY_SIZE of support_tdm_rates
+ * @num_channels: ARRAY_SIZE of support_channels
+ * @num_tdm_channels: ARRAY_SIZE of support_tdm_channels
+ * @type: codec type
+ */
+struct imx_card_plat_data {
+ struct imx_akcodec_fs_mul *fs_mul;
+ struct imx_akcodec_tdm_fs_mul *tdm_fs_mul;
+ const u32 *support_rates;
+ const u32 *support_tdm_rates;
+ const u32 *support_channels;
+ const u32 *support_tdm_channels;
+ unsigned int num_fs_mul;
+ unsigned int num_tdm_fs_mul;
+ unsigned int num_rates;
+ unsigned int num_tdm_rates;
+ unsigned int num_channels;
+ unsigned int num_tdm_channels;
+ unsigned int num_codecs;
+ enum codec_type type;
+};
+
+/*
+ * struct dai_link_data - specific info for dai link
+ *
+ * @slots: slot number
+ * @slot_width: slot width value
+ * @cpu_sysclk_id: sysclk id for cpu dai
+ * @one2one_ratio: true if mclk equal to bclk
+ */
+struct dai_link_data {
+ unsigned int slots;
+ unsigned int slot_width;
+ unsigned int cpu_sysclk_id;
+ bool one2one_ratio;
+};
+
+/*
+ * struct imx_card_data - platform device data
+ *
+ * @plat_data: pointer of imx_card_plat_data
+ * @dapm_routes: pointer of dapm_routes
+ * @link_data: private data for dai link
+ * @card: card instance
+ * @num_dapm_routes: number of dapm_routes
+ * @asrc_rate: asrc rates
+ * @asrc_format: asrc format
+ */
+struct imx_card_data {
+ struct imx_card_plat_data *plat_data;
+ struct snd_soc_dapm_route *dapm_routes;
+ struct dai_link_data *link_data;
+ struct snd_soc_card card;
+ int num_dapm_routes;
+ u32 asrc_rate;
+ u32 asrc_format;
+};
+
+static struct imx_akcodec_fs_mul ak4458_fs_mul[] = {
+ /* Normal, < 32kHz */
+ { .rmin = 8000, .rmax = 24000, .wmin = 1024, .wmax = 1024, },
+ /* Normal, 32kHz */
+ { .rmin = 32000, .rmax = 32000, .wmin = 256, .wmax = 1024, },
+ /* Normal */
+ { .rmin = 44100, .rmax = 48000, .wmin = 256, .wmax = 768, },
+ /* Double */
+ { .rmin = 88200, .rmax = 96000, .wmin = 256, .wmax = 512, },
+ /* Quad */
+ { .rmin = 176400, .rmax = 192000, .wmin = 128, .wmax = 256, },
+ /* Oct */
+ { .rmin = 352800, .rmax = 384000, .wmin = 32, .wmax = 128, },
+ /* Hex */
+ { .rmin = 705600, .rmax = 768000, .wmin = 16, .wmax = 64, },
+};
+
+static struct imx_akcodec_tdm_fs_mul ak4458_tdm_fs_mul[] = {
+ /*
+ * Table 13 - Audio Interface Format
+ * For TDM mode, MCLK should is set to
+ * obtained from 2 * slots * slot_width
+ */
+ { .min = 128, .max = 128, .mul = 256 }, /* TDM128 */
+ { .min = 256, .max = 256, .mul = 512 }, /* TDM256 */
+ { .min = 512, .max = 512, .mul = 1024 }, /* TDM512 */
+};
+
+static struct imx_akcodec_fs_mul ak4497_fs_mul[] = {
+ /**
+ * Table 7 - mapping multiplier and speed mode
+ * Tables 8 & 9 - mapping speed mode and LRCK fs
+ */
+ { .rmin = 8000, .rmax = 32000, .wmin = 1024, .wmax = 1024, }, /* Normal, <= 32kHz */
+ { .rmin = 44100, .rmax = 48000, .wmin = 512, .wmax = 512, }, /* Normal */
+ { .rmin = 88200, .rmax = 96000, .wmin = 256, .wmax = 256, }, /* Double */
+ { .rmin = 176400, .rmax = 192000, .wmin = 128, .wmax = 128, }, /* Quad */
+ { .rmin = 352800, .rmax = 384000, .wmin = 128, .wmax = 128, }, /* Oct */
+ { .rmin = 705600, .rmax = 768000, .wmin = 64, .wmax = 64, }, /* Hex */
+};
+
+/*
+ * Auto MCLK selection based on LRCK for Normal Mode
+ * (Table 4 from datasheet)
+ */
+static struct imx_akcodec_fs_mul ak5558_fs_mul[] = {
+ { .rmin = 8000, .rmax = 32000, .wmin = 1024, .wmax = 1024, },
+ { .rmin = 44100, .rmax = 48000, .wmin = 512, .wmax = 512, },
+ { .rmin = 88200, .rmax = 96000, .wmin = 256, .wmax = 256, },
+ { .rmin = 176400, .rmax = 192000, .wmin = 128, .wmax = 128, },
+ { .rmin = 352800, .rmax = 384000, .wmin = 64, .wmax = 64, },
+ { .rmin = 705600, .rmax = 768000, .wmin = 32, .wmax = 32, },
+};
+
+/*
+ * MCLK and BCLK selection based on TDM mode
+ * because of SAI we also add the restriction: MCLK >= 2 * BCLK
+ * (Table 9 from datasheet)
+ */
+static struct imx_akcodec_tdm_fs_mul ak5558_tdm_fs_mul[] = {
+ { .min = 128, .max = 128, .mul = 256 },
+ { .min = 256, .max = 256, .mul = 512 },
+ { .min = 512, .max = 512, .mul = 1024 },
+};
+
+static const u32 akcodec_rates[] = {
+ 8000, 11025, 16000, 22050, 32000, 44100, 48000, 88200,
+ 96000, 176400, 192000, 352800, 384000, 705600, 768000,
+};
+
+static const u32 akcodec_tdm_rates[] = {
+ 8000, 16000, 32000, 48000, 96000,
+};
+
+static const u32 ak4458_channels[] = {
+ 1, 2, 4, 6, 8, 10, 12, 14, 16,
+};
+
+static const u32 ak4458_tdm_channels[] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 16,
+};
+
+static const u32 ak5558_channels[] = {
+ 1, 2, 4, 6, 8,
+};
+
+static const u32 ak5558_tdm_channels[] = {
+ 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static bool format_is_dsd(struct snd_pcm_hw_params *params)
+{
+ snd_pcm_format_t format = params_format(params);
+
+ switch (format) {
+ case SNDRV_PCM_FORMAT_DSD_U8:
+ case SNDRV_PCM_FORMAT_DSD_U16_LE:
+ case SNDRV_PCM_FORMAT_DSD_U16_BE:
+ case SNDRV_PCM_FORMAT_DSD_U32_LE:
+ case SNDRV_PCM_FORMAT_DSD_U32_BE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool format_is_tdm(struct dai_link_data *link_data)
+{
+ if (link_data->slots > 2)
+ return true;
+ else
+ return false;
+}
+
+static bool codec_is_akcodec(unsigned int type)
+{
+ switch (type) {
+ case CODEC_AK4458:
+ case CODEC_AK4497:
+ case CODEC_AK5558:
+ case CODEC_AK5552:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static unsigned long akcodec_get_mclk_rate(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct imx_card_data *data = snd_soc_card_get_drvdata(rtd->card);
+ const struct imx_card_plat_data *plat_data = data->plat_data;
+ struct dai_link_data *link_data = &data->link_data[rtd->num];
+ unsigned int width = link_data->slots * link_data->slot_width;
+ unsigned int rate = params_rate(params);
+ int i;
+
+ if (format_is_tdm(link_data)) {
+ for (i = 0; i < plat_data->num_tdm_fs_mul; i++) {
+ /* min = max = slots * slots_width */
+ if (width != plat_data->tdm_fs_mul[i].min)
+ continue;
+ return rate * plat_data->tdm_fs_mul[i].mul;
+ }
+ } else {
+ for (i = 0; i < plat_data->num_fs_mul; i++) {
+ if (rate >= plat_data->fs_mul[i].rmin &&
+ rate <= plat_data->fs_mul[i].rmax) {
+ width = max(width, plat_data->fs_mul[i].wmin);
+ width = min(width, plat_data->fs_mul[i].wmax);
+
+ /* Adjust SAI bclk:mclk ratio */
+ width *= link_data->one2one_ratio ? 1 : 2;
+
+ return rate * width;
+ }
+ }
+ }
+
+ /* Let DAI manage clk frequency by default */
+ return 0;
+}
+
+static int imx_aif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_card *card = rtd->card;
+ struct imx_card_data *data = snd_soc_card_get_drvdata(card);
+ struct dai_link_data *link_data = &data->link_data[rtd->num];
+ struct imx_card_plat_data *plat_data = data->plat_data;
+ struct device *dev = card->dev;
+ struct snd_soc_dai *codec_dai;
+ unsigned long mclk_freq;
+ unsigned int fmt = rtd->dai_link->dai_fmt;
+ unsigned int slots, slot_width;
+ int ret, i;
+
+ slots = link_data->slots;
+ slot_width = link_data->slot_width;
+
+ if (!format_is_tdm(link_data)) {
+ if (format_is_dsd(params)) {
+ slots = 1;
+ slot_width = params_width(params);
+ fmt = (rtd->dai_link->dai_fmt & ~SND_SOC_DAIFMT_FORMAT_MASK) |
+ SND_SOC_DAIFMT_PDM;
+ } else {
+ slots = 2;
+ slot_width = params_physical_width(params);
+ fmt = (rtd->dai_link->dai_fmt & ~SND_SOC_DAIFMT_FORMAT_MASK) |
+ SND_SOC_DAIFMT_I2S;
+ }
+ }
+
+ ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
+ if (ret && ret != -ENOTSUPP) {
+ dev_err(dev, "failed to set cpu dai fmt: %d\n", ret);
+ return ret;
+ }
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai,
+ BIT(slots) - 1,
+ BIT(slots) - 1,
+ slots, slot_width);
+ if (ret && ret != -ENOTSUPP) {
+ dev_err(dev, "failed to set cpu dai tdm slot: %d\n", ret);
+ return ret;
+ }
+
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
+ ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+ if (ret && ret != -ENOTSUPP) {
+ dev_err(dev, "failed to set codec dai[%d] fmt: %d\n", i, ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ BIT(slots) - 1,
+ BIT(slots) - 1,
+ slots, slot_width);
+ if (ret && ret != -ENOTSUPP) {
+ dev_err(dev, "failed to set codec dai[%d] tdm slot: %d\n", i, ret);
+ return ret;
+ }
+ }
+
+ /* Set MCLK freq */
+ if (codec_is_akcodec(plat_data->type))
+ mclk_freq = akcodec_get_mclk_rate(substream, params);
+ else
+ mclk_freq = params_rate(params) * slots * slot_width;
+ /* Use the maximum freq from DSD512 (512*44100 = 22579200) */
+ if (format_is_dsd(params))
+ mclk_freq = 22579200;
+
+ ret = snd_soc_dai_set_sysclk(cpu_dai, link_data->cpu_sysclk_id, mclk_freq,
+ SND_SOC_CLOCK_OUT);
+ if (ret && ret != -ENOTSUPP) {
+ dev_err(dev, "failed to set cpui dai mclk1 rate (%lu): %d\n", mclk_freq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ak5558_hw_rule_rate(struct snd_pcm_hw_params *p, struct snd_pcm_hw_rule *r)
+{
+ struct dai_link_data *link_data = r->private;
+ struct snd_interval t = { .min = 8000, .max = 8000, };
+ unsigned long mclk_freq;
+ unsigned int fs;
+ int i;
+
+ fs = hw_param_interval(p, SNDRV_PCM_HW_PARAM_SAMPLE_BITS)->min;
+ fs *= link_data->slots;
+
+ /* Identify maximum supported rate */
+ for (i = 0; i < ARRAY_SIZE(akcodec_rates); i++) {
+ mclk_freq = fs * akcodec_rates[i];
+ /* Adjust SAI bclk:mclk ratio */
+ mclk_freq *= link_data->one2one_ratio ? 1 : 2;
+
+ /* Skip rates for which MCLK is beyond supported value */
+ if (mclk_freq > 36864000)
+ continue;
+
+ if (t.max < akcodec_rates[i])
+ t.max = akcodec_rates[i];
+ }
+
+ return snd_interval_refine(hw_param_interval(p, r->var), &t);
+}
+
+static int imx_aif_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct imx_card_data *data = snd_soc_card_get_drvdata(card);
+ struct dai_link_data *link_data = &data->link_data[rtd->num];
+ static struct snd_pcm_hw_constraint_list constraint_rates;
+ static struct snd_pcm_hw_constraint_list constraint_channels;
+ int ret = 0;
+
+ if (format_is_tdm(link_data)) {
+ constraint_channels.list = data->plat_data->support_tdm_channels;
+ constraint_channels.count = data->plat_data->num_tdm_channels;
+ constraint_rates.list = data->plat_data->support_tdm_rates;
+ constraint_rates.count = data->plat_data->num_tdm_rates;
+ } else {
+ constraint_channels.list = data->plat_data->support_channels;
+ constraint_channels.count = data->plat_data->num_channels;
+ constraint_rates.list = data->plat_data->support_rates;
+ constraint_rates.count = data->plat_data->num_rates;
+ }
+
+ if (constraint_channels.count) {
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraint_channels);
+ if (ret)
+ return ret;
+ }
+
+ if (constraint_rates.count) {
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraint_rates);
+ if (ret)
+ return ret;
+ }
+
+ if (data->plat_data->type == CODEC_AK5558)
+ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ ak5558_hw_rule_rate, link_data,
+ SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
+
+ return ret;
+}
+
+static struct snd_soc_ops imx_aif_ops = {
+ .hw_params = imx_aif_hw_params,
+ .startup = imx_aif_startup,
+};
+
+static struct snd_soc_ops imx_aif_ops_be = {
+ .hw_params = imx_aif_hw_params,
+};
+
+static int be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_card *card = rtd->card;
+ struct imx_card_data *data = snd_soc_card_get_drvdata(card);
+ struct snd_interval *rate;
+ struct snd_mask *mask;
+
+ rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ rate->max = data->asrc_rate;
+ rate->min = data->asrc_rate;
+
+ mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ snd_mask_none(mask);
+ snd_mask_set(mask, data->asrc_format);
+
+ return 0;
+}
+
+static int imx_card_parse_of(struct imx_card_data *data)
+{
+ struct imx_card_plat_data *plat_data = data->plat_data;
+ struct snd_soc_card *card = &data->card;
+ struct snd_soc_dai_link_component *dlc;
+ struct device_node *platform = NULL;
+ struct device_node *codec = NULL;
+ struct device_node *cpu = NULL;
+ struct device_node *np;
+ struct device *dev = card->dev;
+ struct snd_soc_dai_link *link;
+ struct dai_link_data *link_data;
+ struct of_phandle_args args;
+ int ret, num_links;
+ u32 width;
+
+ ret = snd_soc_of_parse_card_name(card, "model");
+ if (ret) {
+ dev_err(dev, "Error parsing card name: %d\n", ret);
+ return ret;
+ }
+
+ /* DAPM routes */
+ if (of_property_read_bool(dev->of_node, "audio-routing")) {
+ ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
+ if (ret)
+ return ret;
+ }
+
+ /* Populate links */
+ num_links = of_get_child_count(dev->of_node);
+
+ /* Allocate the DAI link array */
+ card->dai_link = devm_kcalloc(dev, num_links, sizeof(*link), GFP_KERNEL);
+ if (!card->dai_link)
+ return -ENOMEM;
+
+ data->link_data = devm_kcalloc(dev, num_links, sizeof(*link), GFP_KERNEL);
+ if (!data->link_data)
+ return -ENOMEM;
+
+ card->num_links = num_links;
+ link = card->dai_link;
+ link_data = data->link_data;
+
+ for_each_child_of_node(dev->of_node, np) {
+ dlc = devm_kzalloc(dev, 2 * sizeof(*dlc), GFP_KERNEL);
+ if (!dlc) {
+ ret = -ENOMEM;
+ goto err_put_np;
+ }
+
+ link->cpus = &dlc[0];
+ link->platforms = &dlc[1];
+
+ link->num_cpus = 1;
+ link->num_platforms = 1;
+
+ ret = of_property_read_string(np, "link-name", &link->name);
+ if (ret) {
+ dev_err(card->dev, "error getting codec dai_link name\n");
+ goto err_put_np;
+ }
+
+ cpu = of_get_child_by_name(np, "cpu");
+ if (!cpu) {
+ dev_err(dev, "%s: Can't find cpu DT node\n", link->name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = of_parse_phandle_with_args(cpu, "sound-dai",
+ "#sound-dai-cells", 0, &args);
+ if (ret) {
+ dev_err(card->dev, "%s: error getting cpu phandle\n", link->name);
+ goto err;
+ }
+
+ if (of_node_name_eq(args.np, "sai")) {
+ /* sai sysclk id */
+ link_data->cpu_sysclk_id = FSL_SAI_CLK_MAST1;
+
+ /* sai may support mclk/bclk = 1 */
+ if (of_find_property(np, "fsl,mclk-equal-bclk", NULL))
+ link_data->one2one_ratio = true;
+ }
+
+ link->cpus->of_node = args.np;
+ link->platforms->of_node = link->cpus->of_node;
+ link->id = args.args[0];
+
+ ret = snd_soc_of_get_dai_name(cpu, &link->cpus->dai_name);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(card->dev, "%s: error getting cpu dai name: %d\n",
+ link->name, ret);
+ goto err;
+ }
+
+ codec = of_get_child_by_name(np, "codec");
+ if (codec) {
+ ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%s: codec dai not found: %d\n",
+ link->name, ret);
+ goto err;
+ }
+
+ plat_data->num_codecs = link->num_codecs;
+
+ /* Check the akcodec type */
+ if (!strcmp(link->codecs->dai_name, "ak4458-aif"))
+ plat_data->type = CODEC_AK4458;
+ else if (!strcmp(link->codecs->dai_name, "ak4497-aif"))
+ plat_data->type = CODEC_AK4497;
+ else if (!strcmp(link->codecs->dai_name, "ak5558-aif"))
+ plat_data->type = CODEC_AK5558;
+ else if (!strcmp(link->codecs->dai_name, "ak5552-aif"))
+ plat_data->type = CODEC_AK5552;
+
+ } else {
+ dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL);
+ if (!dlc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ link->codecs = dlc;
+ link->num_codecs = 1;
+
+ link->codecs->dai_name = "snd-soc-dummy-dai";
+ link->codecs->name = "snd-soc-dummy";
+ }
+
+ if (!strncmp(link->name, "HiFi-ASRC-FE", 12)) {
+ /* DPCM frontend */
+ link->dynamic = 1;
+ link->dpcm_merged_chan = 1;
+
+ ret = of_property_read_u32(args.np, "fsl,asrc-rate", &data->asrc_rate);
+ if (ret) {
+ dev_err(dev, "failed to get output rate\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = of_property_read_u32(args.np, "fsl,asrc-format", &data->asrc_format);
+ if (ret) {
+ /* Fallback to old binding; translate to asrc_format */
+ ret = of_property_read_u32(args.np, "fsl,asrc-width", &width);
+ if (ret) {
+ dev_err(dev,
+ "failed to decide output format\n");
+ goto err;
+ }
+
+ if (width == 24)
+ data->asrc_format = SNDRV_PCM_FORMAT_S24_LE;
+ else
+ data->asrc_format = SNDRV_PCM_FORMAT_S16_LE;
+ }
+ } else if (!strncmp(link->name, "HiFi-ASRC-BE", 12)) {
+ /* DPCM backend */
+ link->no_pcm = 1;
+ link->platforms->of_node = NULL;
+ link->platforms->name = "snd-soc-dummy";
+
+ link->be_hw_params_fixup = be_hw_params_fixup;
+ link->ops = &imx_aif_ops_be;
+ } else {
+ link->ops = &imx_aif_ops;
+ }
+
+ if (link->no_pcm || link->dynamic)
+ snd_soc_dai_link_set_capabilities(link);
+
+ /* Get dai fmt */
+ ret = asoc_simple_parse_daifmt(dev, np, codec,
+ NULL, &link->dai_fmt);
+ if (ret)
+ link->dai_fmt = SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS |
+ SND_SOC_DAIFMT_I2S;
+
+ /* Get tdm slot */
+ snd_soc_of_parse_tdm_slot(np, NULL, NULL,
+ &link_data->slots,
+ &link_data->slot_width);
+ /* default value */
+ if (!link_data->slots)
+ link_data->slots = 2;
+
+ if (!link_data->slot_width)
+ link_data->slot_width = 32;
+
+ link->ignore_pmdown_time = 1;
+ link->stream_name = link->name;
+ link++;
+ link_data++;
+
+ of_node_put(cpu);
+ of_node_put(codec);
+ of_node_put(platform);
+ }
+
+ return 0;
+err:
+ of_node_put(cpu);
+ of_node_put(codec);
+ of_node_put(platform);
+err_put_np:
+ of_node_put(np);
+ return ret;
+}
+
+static int imx_card_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dai_link *link_be = NULL, *link;
+ struct imx_card_plat_data *plat_data;
+ struct imx_card_data *data;
+ int ret, i;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ plat_data = devm_kzalloc(&pdev->dev, sizeof(*plat_data), GFP_KERNEL);
+ if (!plat_data)
+ return -ENOMEM;
+
+ data->plat_data = plat_data;
+ data->card.dev = &pdev->dev;
+
+ dev_set_drvdata(&pdev->dev, &data->card);
+ snd_soc_card_set_drvdata(&data->card, data);
+ ret = imx_card_parse_of(data);
+ if (ret)
+ return ret;
+
+ data->num_dapm_routes = plat_data->num_codecs + 1;
+ data->dapm_routes = devm_kcalloc(&pdev->dev, data->num_dapm_routes,
+ sizeof(struct snd_soc_dapm_route),
+ GFP_KERNEL);
+ if (!data->dapm_routes)
+ return -ENOMEM;
+
+ /* configure the dapm routes */
+ switch (plat_data->type) {
+ case CODEC_AK4458:
+ case CODEC_AK4497:
+ if (plat_data->num_codecs == 1) {
+ data->dapm_routes[0].sink = "Playback";
+ data->dapm_routes[0].source = "CPU-Playback";
+ i = 1;
+ } else {
+ for (i = 0; i < plat_data->num_codecs; i++) {
+ data->dapm_routes[i].sink =
+ devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s",
+ i + 1, "Playback");
+ data->dapm_routes[i].source = "CPU-Playback";
+ }
+ }
+ data->dapm_routes[i].sink = "CPU-Playback";
+ data->dapm_routes[i].source = "ASRC-Playback";
+ break;
+ case CODEC_AK5558:
+ case CODEC_AK5552:
+ if (plat_data->num_codecs == 1) {
+ data->dapm_routes[0].sink = "CPU-Capture";
+ data->dapm_routes[0].source = "Capture";
+ i = 1;
+ } else {
+ for (i = 0; i < plat_data->num_codecs; i++) {
+ data->dapm_routes[i].source =
+ devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s",
+ i + 1, "Capture");
+ data->dapm_routes[i].sink = "CPU-Capture";
+ }
+ }
+ data->dapm_routes[i].sink = "ASRC-Capture";
+ data->dapm_routes[i].source = "CPU-Capture";
+ break;
+ default:
+ break;
+ }
+
+ /* default platform data for akcodecs */
+ if (codec_is_akcodec(plat_data->type)) {
+ plat_data->support_rates = akcodec_rates;
+ plat_data->num_rates = ARRAY_SIZE(akcodec_rates);
+ plat_data->support_tdm_rates = akcodec_tdm_rates;
+ plat_data->num_tdm_rates = ARRAY_SIZE(akcodec_tdm_rates);
+
+ switch (plat_data->type) {
+ case CODEC_AK4458:
+ plat_data->fs_mul = ak4458_fs_mul;
+ plat_data->num_fs_mul = ARRAY_SIZE(ak4458_fs_mul);
+ plat_data->tdm_fs_mul = ak4458_tdm_fs_mul;
+ plat_data->num_tdm_fs_mul = ARRAY_SIZE(ak4458_tdm_fs_mul);
+ plat_data->support_channels = ak4458_channels;
+ plat_data->num_channels = ARRAY_SIZE(ak4458_channels);
+ plat_data->support_tdm_channels = ak4458_tdm_channels;
+ plat_data->num_tdm_channels = ARRAY_SIZE(ak4458_tdm_channels);
+ break;
+ case CODEC_AK4497:
+ plat_data->fs_mul = ak4497_fs_mul;
+ plat_data->num_fs_mul = ARRAY_SIZE(ak4497_fs_mul);
+ plat_data->support_channels = ak4458_channels;
+ plat_data->num_channels = ARRAY_SIZE(ak4458_channels);
+ break;
+ case CODEC_AK5558:
+ case CODEC_AK5552:
+ plat_data->fs_mul = ak5558_fs_mul;
+ plat_data->num_fs_mul = ARRAY_SIZE(ak5558_fs_mul);
+ plat_data->tdm_fs_mul = ak5558_tdm_fs_mul;
+ plat_data->num_tdm_fs_mul = ARRAY_SIZE(ak5558_tdm_fs_mul);
+ plat_data->support_channels = ak5558_channels;
+ plat_data->num_channels = ARRAY_SIZE(ak5558_channels);
+ plat_data->support_tdm_channels = ak5558_tdm_channels;
+ plat_data->num_tdm_channels = ARRAY_SIZE(ak5558_tdm_channels);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* with asrc as front end */
+ if (data->card.num_links == 3) {
+ data->card.dapm_routes = data->dapm_routes;
+ data->card.num_dapm_routes = data->num_dapm_routes;
+ for_each_card_prelinks(&data->card, i, link) {
+ if (link->no_pcm == 1)
+ link_be = link;
+ }
+ for_each_card_prelinks(&data->card, i, link) {
+ if (link->dynamic == 1 && link_be) {
+ link->dpcm_playback = link_be->dpcm_playback;
+ link->dpcm_capture = link_be->dpcm_capture;
+ }
+ }
+ }
+
+ ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id imx_card_dt_ids[] = {
+ { .compatible = "fsl,imx-audio-card", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx_card_dt_ids);
+
+static struct platform_driver imx_card_driver = {
+ .driver = {
+ .name = "imx-card",
+ .pm = &snd_soc_pm_ops,
+ .of_match_table = imx_card_dt_ids,
+ },
+ .probe = imx_card_probe,
+};
+module_platform_driver(imx_card_driver);
+
+MODULE_DESCRIPTION("Freescale i.MX ASoC Machine Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:imx-card");
diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c
index fad1eb6253d5..1981dcd7e930 100644
--- a/sound/soc/fsl/imx-es8328.c
+++ b/sound/soc/fsl/imx-es8328.c
@@ -193,7 +193,7 @@ static int imx_es8328_probe(struct platform_device *pdev)
data->card.owner = THIS_MODULE;
data->card.dai_link = &data->dai;
- ret = snd_soc_register_card(&data->card);
+ ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
if (ret) {
dev_err(dev, "Unable to register: %d\n", ret);
goto put_device;
@@ -209,15 +209,6 @@ fail:
return ret;
}
-static int imx_es8328_remove(struct platform_device *pdev)
-{
- struct imx_es8328_data *data = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(&data->card);
-
- return 0;
-}
-
static const struct of_device_id imx_es8328_dt_ids[] = {
{ .compatible = "fsl,imx-audio-es8328", },
{ /* sentinel */ }
@@ -230,7 +221,6 @@ static struct platform_driver imx_es8328_driver = {
.of_match_table = imx_es8328_dt_ids,
},
.probe = imx_es8328_probe,
- .remove = imx_es8328_remove,
};
module_platform_driver(imx_es8328_driver);
diff --git a/sound/soc/fsl/imx-pcm-rpmsg.c b/sound/soc/fsl/imx-pcm-rpmsg.c
index 875c0d6df339..6d6c44cf3451 100644
--- a/sound/soc/fsl/imx-pcm-rpmsg.c
+++ b/sound/soc/fsl/imx-pcm-rpmsg.c
@@ -161,10 +161,10 @@ static int imx_rpmsg_pcm_hw_params(struct snd_soc_component *component,
msg->s_msg.param.format = RPMSG_S24_LE;
break;
case SNDRV_PCM_FORMAT_DSD_U16_LE:
- msg->s_msg.param.format = SNDRV_PCM_FORMAT_DSD_U16_LE;
+ msg->s_msg.param.format = RPMSG_DSD_U16_LE;
break;
case SNDRV_PCM_FORMAT_DSD_U32_LE:
- msg->s_msg.param.format = SNDRV_PCM_FORMAT_DSD_U32_LE;
+ msg->s_msg.param.format = RPMSG_DSD_U32_LE;
break;
default:
msg->s_msg.param.format = RPMSG_S32_LE;
@@ -544,7 +544,7 @@ static int imx_rpmsg_pcm_ack(struct snd_soc_component *component,
struct rpmsg_msg *msg;
unsigned long flags;
int buffer_tail = 0;
- int written_num = 0;
+ int written_num;
if (!rpmsg->force_lpa)
return 0;
diff --git a/sound/soc/fsl/imx-pcm-rpmsg.h b/sound/soc/fsl/imx-pcm-rpmsg.h
index 308d153920a3..8286b55f00ae 100644
--- a/sound/soc/fsl/imx-pcm-rpmsg.h
+++ b/sound/soc/fsl/imx-pcm-rpmsg.h
@@ -328,9 +328,9 @@
#define RPMSG_S16_LE 0x0
#define RPMSG_S24_LE 0x1
#define RPMSG_S32_LE 0x2
-#define RPMSG_DSD_U16_LE 0x3
+#define RPMSG_DSD_U16_LE 49 /* SNDRV_PCM_FORMAT_DSD_U16_LE */
#define RPMSG_DSD_U24_LE 0x4
-#define RPMSG_DSD_U32_LE 0x5
+#define RPMSG_DSD_U32_LE 50 /* SNDRV_PCM_FORMAT_DSD_U32_LE */
#define RPMSG_CH_LEFT 0x0
#define RPMSG_CH_RIGHT 0x1
diff --git a/sound/soc/fsl/imx-rpmsg.c b/sound/soc/fsl/imx-rpmsg.c
index 5a9a470d203f..f0cae8c59d54 100644
--- a/sound/soc/fsl/imx-rpmsg.c
+++ b/sound/soc/fsl/imx-rpmsg.c
@@ -137,7 +137,6 @@ fail:
static struct platform_driver imx_rpmsg_driver = {
.driver = {
.name = "imx-audio-rpmsg",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = imx_rpmsg_probe,
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index fa1247f0dda1..677f7da93b4b 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -61,10 +61,9 @@ int asoc_simple_parse_daifmt(struct device *dev,
struct device_node *framemaster = NULL;
unsigned int daifmt;
- daifmt = snd_soc_of_parse_daifmt(node, prefix,
- &bitclkmaster, &framemaster);
- daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
+ daifmt = snd_soc_daifmt_parse_format(node, prefix);
+ snd_soc_daifmt_parse_clock_provider_as_phandle(node, prefix, &bitclkmaster, &framemaster);
if (!bitclkmaster && !framemaster) {
/*
* No dai-link level and master setting was not found from
@@ -73,15 +72,10 @@ int asoc_simple_parse_daifmt(struct device *dev,
*/
dev_dbg(dev, "Revert to legacy daifmt parsing\n");
- daifmt = snd_soc_of_parse_daifmt(codec, NULL, NULL, NULL) |
- (daifmt & ~SND_SOC_DAIFMT_CLOCK_MASK);
+ daifmt |= snd_soc_daifmt_parse_clock_provider_as_flag(codec, NULL);
} else {
- if (codec == bitclkmaster)
- daifmt |= (codec == framemaster) ?
- SND_SOC_DAIFMT_CBM_CFM : SND_SOC_DAIFMT_CBM_CFS;
- else
- daifmt |= (codec == framemaster) ?
- SND_SOC_DAIFMT_CBS_CFM : SND_SOC_DAIFMT_CBS_CFS;
+ daifmt |= snd_soc_daifmt_clock_provider_from_bitmap(
+ ((codec == bitclkmaster) << 4) | (codec == framemaster));
}
of_node_put(bitclkmaster);
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 0015f534d42d..a3a7990b5cb6 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -621,6 +621,7 @@ static int asoc_simple_probe(struct platform_device *pdev)
card->owner = THIS_MODULE;
card->dev = dev;
card->probe = simple_soc_probe;
+ card->driver_name = "simple-card";
li = devm_kzalloc(dev, sizeof(*li), GFP_KERNEL);
if (!li)
diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
index 907f5f1f7b44..a297d4af5099 100644
--- a/sound/soc/hisilicon/hi6210-i2s.c
+++ b/sound/soc/hisilicon/hi6210-i2s.c
@@ -102,18 +102,15 @@ static int hi6210_i2s_startup(struct snd_pcm_substream *substream,
for (n = 0; n < i2s->clocks; n++) {
ret = clk_prepare_enable(i2s->clk[n]);
- if (ret) {
- while (n--)
- clk_disable_unprepare(i2s->clk[n]);
- return ret;
- }
+ if (ret)
+ goto err_unprepare_clk;
}
ret = clk_set_rate(i2s->clk[CLK_I2S_BASE], 49152000);
if (ret) {
dev_err(i2s->dev, "%s: setting 49.152MHz base rate failed %d\n",
__func__, ret);
- return ret;
+ goto err_unprepare_clk;
}
/* enable clock before frequency division */
@@ -165,6 +162,11 @@ static int hi6210_i2s_startup(struct snd_pcm_substream *substream,
hi6210_write_reg(i2s, HII2S_SW_RST_N, val);
return 0;
+
+err_unprepare_clk:
+ while (n--)
+ clk_disable_unprepare(i2s->clk[n]);
+ return ret;
}
static void hi6210_i2s_shutdown(struct snd_pcm_substream *substream,
@@ -554,8 +556,7 @@ static int hi6210_i2s_probe(struct platform_device *pdev)
i2s->dev = dev;
spin_lock_init(&i2s->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2s->base = devm_ioremap_resource(dev, res);
+ i2s->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(i2s->base))
return PTR_ERR(i2s->base);
diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c
index 0843235d73c9..1bf5d6edbd32 100644
--- a/sound/soc/img/img-i2s-in.c
+++ b/sound/soc/img/img-i2s-in.c
@@ -434,8 +434,7 @@ static int img_i2s_in_probe(struct platform_device *pdev)
i2s->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -464,7 +463,7 @@ static int img_i2s_in_probe(struct platform_device *pdev)
if (ret)
goto err_pm_disable;
}
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
goto err_suspend;
diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c
index b56a18e7f3ac..4f90d36dc7df 100644
--- a/sound/soc/img/img-i2s-out.c
+++ b/sound/soc/img/img-i2s-out.c
@@ -440,8 +440,7 @@ static int img_i2s_out_probe(struct platform_device *pdev)
i2s->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c
index 4da49a42e854..ce0f08d3777c 100644
--- a/sound/soc/img/img-parallel-out.c
+++ b/sound/soc/img/img-parallel-out.c
@@ -222,8 +222,7 @@ static int img_prl_out_probe(struct platform_device *pdev)
prl->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/sound/soc/img/img-spdif-in.c b/sound/soc/img/img-spdif-in.c
index 46ff8a3621d5..6364eb742f6d 100644
--- a/sound/soc/img/img-spdif-in.c
+++ b/sound/soc/img/img-spdif-in.c
@@ -732,8 +732,7 @@ static int img_spdif_in_probe(struct platform_device *pdev)
spdif->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/sound/soc/img/img-spdif-out.c b/sound/soc/img/img-spdif-out.c
index b1d8e4535726..858e1b853820 100644
--- a/sound/soc/img/img-spdif-out.c
+++ b/sound/soc/img/img-spdif-out.c
@@ -335,8 +335,7 @@ static int img_spdif_out_probe(struct platform_device *pdev)
spdif->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 4124aa2fc247..905c7965f653 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -127,7 +127,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
snd_pcm_uframes_t period_size;
ssize_t periodbytes;
ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
- u32 buffer_addr = virt_to_phys(substream->dma_buffer.area);
+ u32 buffer_addr = virt_to_phys(substream->runtime->dma_area);
channels = substream->runtime->channels;
period_size = substream->runtime->period_size;
@@ -233,7 +233,6 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
/* set codec params and inform SST driver the same */
sst_fill_pcm_params(substream, &param);
sst_fill_alloc_params(substream, &alloc_params);
- substream->runtime->dma_area = substream->dma_buffer.area;
str_params.sparams = param;
str_params.aparams = alloc_params;
str_params.codec = SST_CODEC_TYPE_PCM;
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 58379393b8e4..7e29b0d911e2 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -26,6 +26,12 @@ config SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES
interface.
If unsure select N.
+config SND_SOC_INTEL_HDA_DSP_COMMON
+ tristate
+
+config SND_SOC_INTEL_SOF_MAXIM_COMMON
+ tristate
+
if SND_SOC_INTEL_CATPT
config SND_SOC_INTEL_HASWELL_MACH
@@ -278,6 +284,7 @@ config SND_SOC_INTEL_DA7219_MAX98357A_GENERIC
select SND_SOC_MAX98390
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
+ select SND_SOC_INTEL_HDA_DSP_COMMON
config SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
tristate
@@ -304,6 +311,7 @@ config SND_SOC_INTEL_BXT_RT298_MACH
select SND_SOC_RT298
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
+ select SND_SOC_INTEL_HDA_DSP_COMMON
help
This adds support for ASoC machine driver for Broxton platforms
with RT286 I2S audio codec.
@@ -422,6 +430,7 @@ config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
select SND_SOC_MAX98357A
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
+ select SND_SOC_INTEL_HDA_DSP_COMMON
help
This adds support for ASoC machine driver for Geminilake platforms
with RT5682 + MAX98357A I2S audio codec.
@@ -433,15 +442,17 @@ endif ## SND_SOC_SOF_GEMINILAKE
if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC || SND_SOC_SOF_HDA_AUDIO_CODEC
config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
- tristate "SKL/KBL/BXT/APL with HDA Codecs"
+ tristate "Skylake+ with HDA Codecs"
depends on SND_HDA_CODEC_HDMI
depends on GPIOLIB
select SND_SOC_HDAC_HDMI
+ select SND_SOC_INTEL_HDA_DSP_COMMON
select SND_SOC_DMIC
# SND_SOC_HDAC_HDA is already selected
help
- This adds support for ASoC machine driver for Intel platforms
- SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
+ This adds support for ASoC machine driver for Intel Skylake+
+ platforms with display (HDMI/DP) and HDA audio codecs, and
+ Smart Sound Technology (SST) integrated audio DSP.
Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
@@ -461,18 +472,38 @@ config SND_SOC_INTEL_SOF_RT5682_MACH
select SND_SOC_RT5682_I2C
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
+ select SND_SOC_INTEL_HDA_DSP_COMMON
+ select SND_SOC_INTEL_SOF_MAXIM_COMMON
help
This adds support for ASoC machine driver for SOF platforms
with rt5682 codec.
Say Y if you have such a device.
If unsure select "N".
+config SND_SOC_INTEL_SOF_CS42L42_MACH
+ tristate "SOF with cs42l42 codec in I2S Mode"
+ depends on I2C && ACPI
+ depends on ((SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC) &&\
+ (MFD_INTEL_LPSS || COMPILE_TEST))
+ select SND_SOC_CS42L42
+ select SND_SOC_MAX98357A
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ select SND_SOC_INTEL_HDA_DSP_COMMON
+ select SND_SOC_INTEL_SOF_MAXIM_COMMON
+ help
+ This adds support for ASoC machine driver for SOF platforms
+ with cs42l42 codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
config SND_SOC_INTEL_SOF_PCM512x_MACH
tristate "SOF with TI PCM512x codec"
depends on I2C && ACPI
depends on (SND_SOC_SOF_HDA_AUDIO_CODEC && (MFD_INTEL_LPSS || COMPILE_TEST)) ||\
(SND_SOC_SOF_BAYTRAIL && (X86_INTEL_LPSS || COMPILE_TEST))
depends on SND_HDA_CODEC_HDMI
+ select SND_SOC_INTEL_HDA_DSP_COMMON
select SND_SOC_PCM512x_I2C
help
This adds support for ASoC machine driver for SOF platforms
@@ -504,6 +535,7 @@ config SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH
select SND_SOC_RT5682_I2C
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
+ select SND_SOC_INTEL_HDA_DSP_COMMON
help
This adds support for ASoC machine driver for SOF platform with
RT1011 + RT5682 I2S codec.
@@ -519,6 +551,7 @@ config SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH
depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
depends on SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC
+ select SND_SOC_INTEL_HDA_DSP_COMMON
select SND_SOC_DA7219
select SND_SOC_MAX98373_I2C
select SND_SOC_DMIC
@@ -539,6 +572,7 @@ config SND_SOC_INTEL_EHL_RT5660_MACH
depends on SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC
select SND_SOC_RT5660
select SND_SOC_DMIC
+ select SND_SOC_INTEL_HDA_DSP_COMMON
help
This adds support for ASoC machine driver for Elkhart Lake
platform with RT5660 I2S audio codec.
@@ -566,6 +600,8 @@ config SND_SOC_INTEL_SOUNDWIRE_SOF_MACH
select SND_SOC_RT715_SDCA_SDW
select SND_SOC_RT5682_SDW
select SND_SOC_DMIC
+ select SND_SOC_INTEL_HDA_DSP_COMMON
+ select SND_SOC_INTEL_SOF_MAXIM_COMMON
help
Add support for Intel SoundWire-based platforms connected to
MAX98373, RT700, RT711, RT1308 and RT715
@@ -573,5 +609,4 @@ config SND_SOC_INTEL_SOUNDWIRE_SOF_MACH
endif
-
endif ## SND_SOC_INTEL_MACH
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 616c5fbab7d5..ed21b82a4cf6 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -3,11 +3,11 @@ snd-soc-sst-haswell-objs := haswell.o
snd-soc-sst-bdw-rt5650-mach-objs := bdw-rt5650.o
snd-soc-sst-bdw-rt5677-mach-objs := bdw-rt5677.o
snd-soc-sst-broadwell-objs := broadwell.o
-snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o hda_dsp_common.o
-snd-soc-sst-bxt-rt298-objs := bxt_rt298.o hda_dsp_common.o
-snd-soc-sst-sof-pcm512x-objs := sof_pcm512x.o hda_dsp_common.o
+snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o
+snd-soc-sst-bxt-rt298-objs := bxt_rt298.o
+snd-soc-sst-sof-pcm512x-objs := sof_pcm512x.o
snd-soc-sst-sof-wm8804-objs := sof_wm8804.o
-snd-soc-sst-glk-rt5682_max98357a-objs := glk_rt5682_max98357a.o hda_dsp_common.o
+snd-soc-sst-glk-rt5682_max98357a-objs := glk_rt5682_max98357a.o
snd-soc-sst-bytcr-rt5640-objs := bytcr_rt5640.o
snd-soc-sst-bytcr-rt5651-objs := bytcr_rt5651.o
snd-soc-sst-bytcr-wm5102-objs := bytcr_wm5102.o
@@ -19,28 +19,29 @@ snd-soc-sst-byt-cht-cx2072x-objs := bytcht_cx2072x.o
snd-soc-sst-byt-cht-da7213-objs := bytcht_da7213.o
snd-soc-sst-byt-cht-es8316-objs := bytcht_es8316.o
snd-soc-sst-byt-cht-nocodec-objs := bytcht_nocodec.o
-snd-soc-sof_rt5682-objs := sof_rt5682.o hda_dsp_common.o sof_maxim_common.o sof_realtek_common.o
-snd-soc-cml_rt1011_rt5682-objs := cml_rt1011_rt5682.o hda_dsp_common.o
+snd-soc-sof_rt5682-objs := sof_rt5682.o sof_realtek_common.o
+snd-soc-sof_cs42l42-objs := sof_cs42l42.o
+snd-soc-cml_rt1011_rt5682-objs := cml_rt1011_rt5682.o
snd-soc-kbl_da7219_max98357a-objs := kbl_da7219_max98357a.o
snd-soc-kbl_da7219_max98927-objs := kbl_da7219_max98927.o
snd-soc-kbl_rt5663_max98927-objs := kbl_rt5663_max98927.o
snd-soc-kbl_rt5663_rt5514_max98927-objs := kbl_rt5663_rt5514_max98927.o
snd-soc-kbl_rt5660-objs := kbl_rt5660.o
snd-soc-skl_rt286-objs := skl_rt286.o
-snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o hda_dsp_common.o
+snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o
snd-skl_nau88l25_max98357a-objs := skl_nau88l25_max98357a.o
snd-soc-skl_nau88l25_ssm4567-objs := skl_nau88l25_ssm4567.o
-snd-soc-sof_da7219_max98373-objs := sof_da7219_max98373.o hda_dsp_common.o
-snd-soc-ehl-rt5660-objs := ehl_rt5660.o hda_dsp_common.o
+snd-soc-sof_da7219_max98373-objs := sof_da7219_max98373.o
+snd-soc-ehl-rt5660-objs := ehl_rt5660.o
snd-soc-sof-sdw-objs += sof_sdw.o \
sof_sdw_max98373.o \
sof_sdw_rt1308.o sof_sdw_rt1316.o \
sof_sdw_rt5682.o sof_sdw_rt700.o \
sof_sdw_rt711.o sof_sdw_rt711_sdca.o \
sof_sdw_rt715.o sof_sdw_rt715_sdca.o \
- sof_maxim_common.o \
- sof_sdw_dmic.o sof_sdw_hdmi.o hda_dsp_common.o
+ sof_sdw_dmic.o sof_sdw_hdmi.o
obj-$(CONFIG_SND_SOC_INTEL_SOF_RT5682_MACH) += snd-soc-sof_rt5682.o
+obj-$(CONFIG_SND_SOC_INTEL_SOF_CS42L42_MACH) += snd-soc-sof_cs42l42.o
obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON) += snd-soc-sst-bxt-da7219_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_RT298_MACH) += snd-soc-sst-bxt-rt298.o
@@ -74,3 +75,10 @@ obj-$(CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH) += snd-soc-skl_hda_dsp.o
obj-$(CONFIG_SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH) += snd-soc-sof_da7219_max98373.o
obj-$(CONFIG_SND_SOC_INTEL_EHL_RT5660_MACH) += snd-soc-ehl-rt5660.o
obj-$(CONFIG_SND_SOC_INTEL_SOUNDWIRE_SOF_MACH) += snd-soc-sof-sdw.o
+
+# common modules
+snd-soc-intel-hda-dsp-common-objs := hda_dsp_common.o
+obj-$(CONFIG_SND_SOC_INTEL_HDA_DSP_COMMON) += snd-soc-intel-hda-dsp-common.o
+
+snd-soc-intel-sof-maxim-common-objs += sof_maxim_common.o
+obj-$(CONFIG_SND_SOC_INTEL_SOF_MAXIM_COMMON) += snd-soc-intel-sof-maxim-common.o
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
index 021bc59aac80..e01b7a90ca6c 100644
--- a/sound/soc/intel/boards/bdw-rt5677.c
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -423,10 +423,8 @@ static int bdw_rt5677_probe(struct platform_device *pdev)
/* Allocate driver private struct */
bdw_rt5677 = devm_kzalloc(&pdev->dev, sizeof(struct bdw_rt5677_priv),
GFP_KERNEL);
- if (!bdw_rt5677) {
- dev_err(&pdev->dev, "Can't allocate bdw_rt5677\n");
+ if (!bdw_rt5677)
return -ENOMEM;
- }
/* override plaform name, if required */
mach = pdev->dev.platform_data;
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 9ffef396f8f2..e67ddfb8e469 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -840,11 +840,12 @@ static int broxton_audio_probe(struct platform_device *pdev)
}
static const struct platform_device_id bxt_board_ids[] = {
- { .name = "bxt_da7219_max98357a" },
- { .name = "glk_da7219_max98357a" },
- { .name = "cml_da7219_max98357a" },
+ { .name = "bxt_da7219_mx98357a" },
+ { .name = "glk_da7219_mx98357a" },
+ { .name = "cml_da7219_mx98357a" },
{ }
};
+MODULE_DEVICE_TABLE(platform, bxt_board_ids);
static struct platform_driver broxton_audio = {
.probe = broxton_audio_probe,
@@ -866,6 +867,4 @@ MODULE_AUTHOR("Naveen Manohar <naveen.m@intel.com>");
MODULE_AUTHOR("Mac Chiang <mac.chiang@intel.com>");
MODULE_AUTHOR("Brent Lu <brent.lu@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:bxt_da7219_max98357a");
-MODULE_ALIAS("platform:glk_da7219_max98357a");
-MODULE_ALIAS("platform:cml_da7219_max98357a");
+MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index 0f3157dfa838..47f6b1523ae6 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -649,6 +649,7 @@ static const struct platform_device_id bxt_board_ids[] = {
(unsigned long)&geminilake_rt298 },
{}
};
+MODULE_DEVICE_TABLE(platform, bxt_board_ids);
static struct platform_driver broxton_audio = {
.probe = broxton_audio_probe,
@@ -665,5 +666,4 @@ MODULE_AUTHOR("Ramesh Babu <Ramesh.Babu@intel.com>");
MODULE_AUTHOR("Senthilnathan Veppur <senthilnathanx.veppur@intel.com>");
MODULE_DESCRIPTION("Intel SST Audio for Broxton");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:bxt_alc298s_i2s");
-MODULE_ALIAS("platform:glk_alc298s_i2s");
+MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
diff --git a/sound/soc/intel/boards/bytcht_cx2072x.c b/sound/soc/intel/boards/bytcht_cx2072x.c
index 2bfe3e4c696f..a9e51bbf018c 100644
--- a/sound/soc/intel/boards/bytcht_cx2072x.c
+++ b/sound/soc/intel/boards/bytcht_cx2072x.c
@@ -198,7 +198,6 @@ static struct snd_soc_dai_link byt_cht_cx2072x_dais[] = {
| SND_SOC_DAIFMT_CBS_CFS,
.init = byt_cht_cx2072x_init,
.be_hw_params_fixup = byt_cht_cx2072x_fixup,
- .nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
SND_SOC_DAILINK_REG(ssp2, cx2072x, platform),
diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c
index cfeba27252ba..a28773fb7892 100644
--- a/sound/soc/intel/boards/bytcht_da7213.c
+++ b/sound/soc/intel/boards/bytcht_da7213.c
@@ -197,7 +197,6 @@ static struct snd_soc_dai_link dailink[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBS_CFS,
.be_hw_params_fixup = codec_fixup,
- .nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
.ops = &ssp2_ops,
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index 06df2d46d910..a0af91580184 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -337,7 +337,6 @@ static struct snd_soc_dai_link byt_cht_es8316_dais[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBS_CFS,
.be_hw_params_fixup = byt_cht_es8316_codec_fixup,
- .nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
.init = byt_cht_es8316_init,
diff --git a/sound/soc/intel/boards/bytcht_nocodec.c b/sound/soc/intel/boards/bytcht_nocodec.c
index 8c0dab1f4030..9b48fe701a2c 100644
--- a/sound/soc/intel/boards/bytcht_nocodec.c
+++ b/sound/soc/intel/boards/bytcht_nocodec.c
@@ -144,7 +144,6 @@ static struct snd_soc_dai_link dais[] = {
| SND_SOC_DAIFMT_CBS_CFS,
.be_hw_params_fixup = codec_fixup,
.ignore_suspend = 1,
- .nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
SND_SOC_DAILINK_REG(ssp2_port, dummy, platform),
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 22dbd9d93c1e..91a6d712eb58 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -1205,7 +1205,6 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBS_CFS,
.be_hw_params_fixup = byt_rt5640_codec_fixup,
- .nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
.init = byt_rt5640_init,
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 148b7b1bd3e8..e13c0c63a949 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -786,7 +786,6 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBS_CFS,
.be_hw_params_fixup = byt_rt5651_codec_fixup,
- .nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
.init = byt_rt5651_init,
diff --git a/sound/soc/intel/boards/bytcr_wm5102.c b/sound/soc/intel/boards/bytcr_wm5102.c
index 8d8ab9be256f..580d5fddae5a 100644
--- a/sound/soc/intel/boards/bytcr_wm5102.c
+++ b/sound/soc/intel/boards/bytcr_wm5102.c
@@ -351,7 +351,6 @@ static struct snd_soc_dai_link byt_wm5102_dais[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBS_CFS,
.be_hw_params_fixup = byt_wm5102_codec_fixup,
- .nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
.init = byt_wm5102_init,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 6fea554cfed5..804dbc7911d5 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -471,7 +471,6 @@ static struct snd_soc_dai_link cht_dailink[] = {
.no_pcm = 1,
.init = cht_codec_init,
.be_hw_params_fixup = cht_codec_fixup,
- .nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
.ops = &cht_be_ssp2_ops,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index e358632f50d7..9509b6e161b8 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -375,7 +375,6 @@ static struct snd_soc_dai_link cht_dailink[] = {
.name = "SSP2-Codec",
.id = 0,
.no_pcm = 1,
- .nonatomic = true,
.init = cht_codec_init,
.be_hw_params_fixup = cht_codec_fixup,
.dpcm_playback = 1,
diff --git a/sound/soc/intel/boards/cml_rt1011_rt5682.c b/sound/soc/intel/boards/cml_rt1011_rt5682.c
index 14813beb33d1..27615acddacd 100644
--- a/sound/soc/intel/boards/cml_rt1011_rt5682.c
+++ b/sound/soc/intel/boards/cml_rt1011_rt5682.c
@@ -594,3 +594,4 @@ MODULE_AUTHOR("Shuming Fan <shumingf@realtek.com>");
MODULE_AUTHOR("Mac Chiang <mac.chiang@intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:cml_rt1011_rt5682");
+MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
diff --git a/sound/soc/intel/boards/ehl_rt5660.c b/sound/soc/intel/boards/ehl_rt5660.c
index 7c0d4e915406..d5235c294c4c 100644
--- a/sound/soc/intel/boards/ehl_rt5660.c
+++ b/sound/soc/intel/boards/ehl_rt5660.c
@@ -181,7 +181,6 @@ static struct snd_soc_dai_link ehl_rt5660_dailink[] = {
.dpcm_playback = 1,
.dpcm_capture = 1,
.ops = &rt5660_ops,
- .nonatomic = true,
SND_SOC_DAILINK_REG(ssp0_pin, rt5660_codec, platform),
},
{
@@ -305,6 +304,7 @@ static const struct platform_device_id ehl_board_ids[] = {
{ .name = "ehl_rt5660" },
{ }
};
+MODULE_DEVICE_TABLE(platform, ehl_board_ids);
static struct platform_driver snd_ehl_rt5660_driver = {
.driver = {
@@ -320,4 +320,4 @@ module_platform_driver(snd_ehl_rt5660_driver);
MODULE_DESCRIPTION("ASoC Intel(R) Elkhartlake + rt5660 Machine driver");
MODULE_AUTHOR("libin.yang@intel.com");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:ehl_rt5660");
+MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
index 62cca511522e..71fe26a1b701 100644
--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -619,12 +619,13 @@ static int geminilake_audio_probe(struct platform_device *pdev)
static const struct platform_device_id glk_board_ids[] = {
{
- .name = "glk_rt5682_max98357a",
+ .name = "glk_rt5682_mx98357a",
.driver_data =
(kernel_ulong_t)&glk_audio_card_rt5682_m98357a,
},
{ }
};
+MODULE_DEVICE_TABLE(platform, glk_board_ids);
static struct platform_driver geminilake_audio = {
.probe = geminilake_audio_probe,
@@ -641,4 +642,4 @@ MODULE_DESCRIPTION("Geminilake Audio Machine driver-RT5682 & MAX98357A in I2S mo
MODULE_AUTHOR("Naveen Manohar <naveen.m@intel.com>");
MODULE_AUTHOR("Harsha Priya <harshapriya.n@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:glk_rt5682_max98357a");
+MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
diff --git a/sound/soc/intel/boards/hda_dsp_common.c b/sound/soc/intel/boards/hda_dsp_common.c
index 91ad2a0ad1ce..efdc4bc4bb1f 100644
--- a/sound/soc/intel/boards/hda_dsp_common.c
+++ b/sound/soc/intel/boards/hda_dsp_common.c
@@ -2,6 +2,7 @@
//
// Copyright(c) 2019 Intel Corporation. All rights reserved.
+#include <linux/module.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/hda_codec.h>
@@ -82,5 +83,9 @@ int hda_dsp_hdmi_build_controls(struct snd_soc_card *card,
return err;
}
+EXPORT_SYMBOL_NS(hda_dsp_hdmi_build_controls, SND_SOC_INTEL_HDA_DSP_COMMON);
#endif
+
+MODULE_DESCRIPTION("ASoC Intel HDMI helpers");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
index c0d8a73c6d21..14b625e947f5 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
@@ -644,12 +644,13 @@ static int kabylake_audio_probe(struct platform_device *pdev)
static const struct platform_device_id kbl_board_ids[] = {
{
- .name = "kbl_da7219_max98357a",
+ .name = "kbl_da7219_mx98357a",
.driver_data =
(kernel_ulong_t)&kabylake_audio_card_da7219_m98357a,
},
{ }
};
+MODULE_DEVICE_TABLE(platform, kbl_board_ids);
static struct platform_driver kabylake_audio = {
.probe = kabylake_audio_probe,
@@ -666,4 +667,3 @@ module_platform_driver(kabylake_audio)
MODULE_DESCRIPTION("Audio Machine driver-DA7219 & MAX98357A in I2S mode");
MODULE_AUTHOR("Naveen Manohar <naveen.m@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:kbl_da7219_max98357a");
diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
index 4b7b4a044f81..a31a7a7bbf66 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
@@ -1175,6 +1175,7 @@ static const struct platform_device_id kbl_board_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(platform, kbl_board_ids);
static struct platform_driver kabylake_audio = {
.probe = kabylake_audio_probe,
@@ -1191,7 +1192,3 @@ module_platform_driver(kabylake_audio)
MODULE_DESCRIPTION("Audio KabyLake Machine driver for MAX98927/MAX98373 & DA7219");
MODULE_AUTHOR("Mac Chiang <mac.chiang@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:kbl_da7219_max98927");
-MODULE_ALIAS("platform:kbl_max98927");
-MODULE_ALIAS("platform:kbl_da7219_max98373");
-MODULE_ALIAS("platform:kbl_max98373");
diff --git a/sound/soc/intel/boards/kbl_rt5660.c b/sound/soc/intel/boards/kbl_rt5660.c
index 3a9f91b58e11..289ca39b8206 100644
--- a/sound/soc/intel/boards/kbl_rt5660.c
+++ b/sound/soc/intel/boards/kbl_rt5660.c
@@ -548,6 +548,7 @@ static const struct platform_device_id kbl_board_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(platform, kbl_board_ids);
static struct platform_driver kabylake_audio = {
.probe = kabylake_audio_probe,
@@ -564,4 +565,3 @@ module_platform_driver(kabylake_audio)
MODULE_DESCRIPTION("Audio Machine driver-RT5660 in I2S mode");
MODULE_AUTHOR("Hui Wang <hui.wang@canonical.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:kbl_rt5660");
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index a3de55a3b58d..a3e040a249f6 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -1039,6 +1039,7 @@ static const struct platform_device_id kbl_board_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(platform, kbl_board_ids);
static struct platform_driver kabylake_audio = {
.probe = kabylake_audio_probe,
@@ -1056,5 +1057,3 @@ MODULE_DESCRIPTION("Audio Machine driver-RT5663 & MAX98927 in I2S mode");
MODULE_AUTHOR("Naveen M <naveen.m@intel.com>");
MODULE_AUTHOR("Harsha Priya <harshapriya.n@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:kbl_rt5663");
-MODULE_ALIAS("platform:kbl_rt5663_m98927");
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index f95546c184aa..dd38fdaf2ff5 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -837,6 +837,7 @@ static const struct platform_device_id kbl_board_ids[] = {
{ .name = "kbl_r5514_5663_max" },
{ }
};
+MODULE_DEVICE_TABLE(platform, kbl_board_ids);
static struct platform_driver kabylake_audio = {
.probe = kabylake_audio_probe,
@@ -853,4 +854,3 @@ module_platform_driver(kabylake_audio)
MODULE_DESCRIPTION("Audio Machine driver-RT5663 RT5514 & MAX98927");
MODULE_AUTHOR("Harsha Priya <harshapriya.n@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:kbl_r5514_5663_max");
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
index bc50eda297ab..f4b4eeca3e03 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -258,3 +258,4 @@ MODULE_DESCRIPTION("SKL/KBL/BXT/APL HDA Generic Machine driver");
MODULE_AUTHOR("Rakesh Ughreja <rakesh.a.ughreja@intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:skl_hda_dsp_generic");
+MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
index 55802900069a..e3a1f04a8b53 100644
--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
@@ -673,6 +673,7 @@ static const struct platform_device_id skl_board_ids[] = {
{ .name = "kbl_n88l25_m98357a" },
{ }
};
+MODULE_DEVICE_TABLE(platform, skl_board_ids);
static struct platform_driver skylake_audio = {
.probe = skylake_audio_probe,
@@ -689,5 +690,3 @@ module_platform_driver(skylake_audio)
MODULE_DESCRIPTION("Audio Machine driver-NAU88L25 & MAX98357A in I2S mode");
MODULE_AUTHOR("Rohit Ainapure <rohit.m.ainapure@intel.com");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:skl_n88l25_m98357a");
-MODULE_ALIAS("platform:kbl_n88l25_m98357a");
diff --git a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
index 0c734f3a9364..adf5992a9ec5 100644
--- a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
+++ b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
@@ -717,6 +717,7 @@ static const struct platform_device_id skl_board_ids[] = {
{ .name = "kbl_n88l25_s4567" },
{ }
};
+MODULE_DEVICE_TABLE(platform, skl_board_ids);
static struct platform_driver skylake_audio = {
.probe = skylake_audio_probe,
@@ -737,5 +738,3 @@ MODULE_AUTHOR("Sathya Prakash M R <sathya.prakash.m.r@intel.com>");
MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
MODULE_DESCRIPTION("Intel Audio Machine driver for SKL with NAU88L25 and SSM4567 in I2S Mode");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:skl_n88l25_s4567");
-MODULE_ALIAS("platform:kbl_n88l25_s4567");
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index 5a0c64a83146..75dab5405380 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -548,6 +548,7 @@ static const struct platform_device_id skl_board_ids[] = {
{ .name = "kbl_alc286s_i2s" },
{ }
};
+MODULE_DEVICE_TABLE(platform, skl_board_ids);
static struct platform_driver skylake_audio = {
.probe = skylake_audio_probe,
@@ -565,5 +566,3 @@ module_platform_driver(skylake_audio)
MODULE_AUTHOR("Omair Mohammed Abdullah <omair.m.abdullah@intel.com>");
MODULE_DESCRIPTION("Intel SST Audio for Skylake");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:skl_alc286s_i2s");
-MODULE_ALIAS("platform:kbl_alc286s_i2s");
diff --git a/sound/soc/intel/boards/sof_cs42l42.c b/sound/soc/intel/boards/sof_cs42l42.c
new file mode 100644
index 000000000000..42aadf801f72
--- /dev/null
+++ b/sound/soc/intel/boards/sof_cs42l42.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2021 Intel Corporation.
+
+/*
+ * Intel SOF Machine Driver with Cirrus Logic CS42L42 Codec
+ * and speaker codec MAX98357A
+ */
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dmi.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include <dt-bindings/sound/cs42l42.h>
+#include "../../codecs/hdac_hdmi.h"
+#include "../common/soc-intel-quirks.h"
+#include "hda_dsp_common.h"
+#include "sof_maxim_common.h"
+
+#define NAME_SIZE 32
+
+#define SOF_CS42L42_SSP_CODEC(quirk) ((quirk) & GENMASK(2, 0))
+#define SOF_CS42L42_SSP_CODEC_MASK (GENMASK(2, 0))
+#define SOF_SPEAKER_AMP_PRESENT BIT(3)
+#define SOF_CS42L42_SSP_AMP_SHIFT 4
+#define SOF_CS42L42_SSP_AMP_MASK (GENMASK(6, 4))
+#define SOF_CS42L42_SSP_AMP(quirk) \
+ (((quirk) << SOF_CS42L42_SSP_AMP_SHIFT) & SOF_CS42L42_SSP_AMP_MASK)
+#define SOF_CS42L42_NUM_HDMIDEV_SHIFT 7
+#define SOF_CS42L42_NUM_HDMIDEV_MASK (GENMASK(9, 7))
+#define SOF_CS42L42_NUM_HDMIDEV(quirk) \
+ (((quirk) << SOF_CS42L42_NUM_HDMIDEV_SHIFT) & SOF_CS42L42_NUM_HDMIDEV_MASK)
+#define SOF_MAX98357A_SPEAKER_AMP_PRESENT BIT(10)
+
+/* Default: SSP2 */
+static unsigned long sof_cs42l42_quirk = SOF_CS42L42_SSP_CODEC(2);
+
+struct sof_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ struct snd_soc_jack hdmi_jack;
+ int device;
+};
+
+struct sof_card_private {
+ struct snd_soc_jack headset_jack;
+ struct list_head hdmi_pcm_list;
+ bool common_hdmi_codec_drv;
+};
+
+static int sof_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct sof_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
+ struct sof_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ /* dai_link id is 1:1 mapped to the PCM device */
+ pcm->device = rtd->dai_link->id;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static int sof_cs42l42_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct sof_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
+ struct snd_soc_jack *jack = &ctx->headset_jack;
+ int ret;
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3,
+ jack, NULL, 0);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+
+ ret = snd_soc_component_set_jack(component, jack, NULL);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack call-back failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+};
+
+static void sof_cs42l42_exit(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
+
+ snd_soc_component_set_jack(component, NULL, NULL);
+}
+
+static int sof_cs42l42_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ int clk_freq, ret;
+
+ clk_freq = 3072000; /* BCLK freq */
+
+ /* Configure sysclk for codec */
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0,
+ clk_freq, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(rtd->dev, "snd_soc_dai_set_sysclk err = %d\n", ret);
+
+ return ret;
+}
+
+static const struct snd_soc_ops sof_cs42l42_ops = {
+ .hw_params = sof_cs42l42_hw_params,
+};
+
+static struct snd_soc_dai_link_component platform_component[] = {
+ {
+ /* name might be overridden during probe */
+ .name = "0000:00:1f.3"
+ }
+};
+
+static int sof_card_late_probe(struct snd_soc_card *card)
+{
+ struct sof_card_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component = NULL;
+ char jack_name[NAME_SIZE];
+ struct sof_hdmi_pcm *pcm;
+ int err;
+
+ if (list_empty(&ctx->hdmi_pcm_list))
+ return -EINVAL;
+
+ if (ctx->common_hdmi_codec_drv) {
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+ return hda_dsp_hdmi_build_controls(card, component);
+ }
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ component = pcm->codec_dai->component;
+ snprintf(jack_name, sizeof(jack_name),
+ "HDMI/DP, pcm=%d Jack", pcm->device);
+ err = snd_soc_card_jack_new(card, jack_name,
+ SND_JACK_AVOUT, &pcm->hdmi_jack,
+ NULL, 0);
+
+ if (err)
+ return err;
+
+ err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
+ &pcm->hdmi_jack);
+ if (err < 0)
+ return err;
+ }
+
+ return hdac_hdmi_jack_port_init(component, &card->dapm);
+}
+
+static const struct snd_kcontrol_new sof_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+static const struct snd_soc_dapm_widget sof_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+};
+
+static const struct snd_soc_dapm_widget dmic_widgets[] = {
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+};
+
+static const struct snd_soc_dapm_route sof_map[] = {
+ /* HP jack connectors - unknown if we have jack detection */
+ {"Headphone Jack", NULL, "HP"},
+
+ /* other jacks */
+ {"HS", NULL, "Headset Mic"},
+};
+
+static const struct snd_soc_dapm_route dmic_map[] = {
+ /* digital mics */
+ {"DMic", NULL, "SoC DMIC"},
+};
+
+static int dmic_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+ ret = snd_soc_dapm_new_controls(&card->dapm, dmic_widgets,
+ ARRAY_SIZE(dmic_widgets));
+ if (ret) {
+ dev_err(card->dev, "DMic widget addition failed: %d\n", ret);
+ /* Don't need to add routes if widget addition failed */
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, dmic_map,
+ ARRAY_SIZE(dmic_map));
+
+ if (ret)
+ dev_err(card->dev, "DMic map addition failed: %d\n", ret);
+
+ return ret;
+}
+
+/* sof audio machine driver for cs42l42 codec */
+static struct snd_soc_card sof_audio_card_cs42l42 = {
+ .name = "cs42l42", /* the sof- prefix is added by the core */
+ .owner = THIS_MODULE,
+ .controls = sof_controls,
+ .num_controls = ARRAY_SIZE(sof_controls),
+ .dapm_widgets = sof_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sof_widgets),
+ .dapm_routes = sof_map,
+ .num_dapm_routes = ARRAY_SIZE(sof_map),
+ .fully_routed = true,
+ .late_probe = sof_card_late_probe,
+};
+
+static struct snd_soc_dai_link_component cs42l42_component[] = {
+ {
+ .name = "i2c-10134242:00",
+ .dai_name = "cs42l42",
+ }
+};
+
+static struct snd_soc_dai_link_component dmic_component[] = {
+ {
+ .name = "dmic-codec",
+ .dai_name = "dmic-hifi",
+ }
+};
+
+static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+ int ssp_codec,
+ int ssp_amp,
+ int dmic_be_num,
+ int hdmi_num)
+{
+ struct snd_soc_dai_link_component *idisp_components;
+ struct snd_soc_dai_link_component *cpus;
+ struct snd_soc_dai_link *links;
+ int i, id = 0;
+
+ links = devm_kzalloc(dev, sizeof(struct snd_soc_dai_link) *
+ sof_audio_card_cs42l42.num_links, GFP_KERNEL);
+ cpus = devm_kzalloc(dev, sizeof(struct snd_soc_dai_link_component) *
+ sof_audio_card_cs42l42.num_links, GFP_KERNEL);
+ if (!links || !cpus)
+ goto devm_err;
+
+ /* speaker amp */
+ if (sof_cs42l42_quirk & SOF_SPEAKER_AMP_PRESENT) {
+ links[id].name = devm_kasprintf(dev, GFP_KERNEL,
+ "SSP%d-Codec", ssp_amp);
+ if (!links[id].name)
+ goto devm_err;
+
+ links[id].id = id;
+
+ if (sof_cs42l42_quirk & SOF_MAX98357A_SPEAKER_AMP_PRESENT) {
+ max_98357a_dai_link(&links[id]);
+ } else {
+ dev_err(dev, "no amp defined\n");
+ goto devm_err;
+ }
+
+ links[id].platforms = platform_component;
+ links[id].num_platforms = ARRAY_SIZE(platform_component);
+ links[id].dpcm_playback = 1;
+ links[id].no_pcm = 1;
+ links[id].cpus = &cpus[id];
+ links[id].num_cpus = 1;
+
+ links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ "SSP%d Pin",
+ ssp_amp);
+ if (!links[id].cpus->dai_name)
+ goto devm_err;
+
+ id++;
+ }
+
+ /* codec SSP */
+ links[id].name = devm_kasprintf(dev, GFP_KERNEL,
+ "SSP%d-Codec", ssp_codec);
+ if (!links[id].name)
+ goto devm_err;
+
+ links[id].id = id;
+ links[id].codecs = cs42l42_component;
+ links[id].num_codecs = ARRAY_SIZE(cs42l42_component);
+ links[id].platforms = platform_component;
+ links[id].num_platforms = ARRAY_SIZE(platform_component);
+ links[id].init = sof_cs42l42_init;
+ links[id].exit = sof_cs42l42_exit;
+ links[id].ops = &sof_cs42l42_ops;
+ links[id].dpcm_playback = 1;
+ links[id].dpcm_capture = 1;
+ links[id].no_pcm = 1;
+ links[id].cpus = &cpus[id];
+ links[id].num_cpus = 1;
+
+ links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ "SSP%d Pin",
+ ssp_codec);
+ if (!links[id].cpus->dai_name)
+ goto devm_err;
+
+ id++;
+
+ /* dmic */
+ if (dmic_be_num > 0) {
+ /* at least we have dmic01 */
+ links[id].name = "dmic01";
+ links[id].cpus = &cpus[id];
+ links[id].cpus->dai_name = "DMIC01 Pin";
+ links[id].init = dmic_init;
+ if (dmic_be_num > 1) {
+ /* set up 2 BE links at most */
+ links[id + 1].name = "dmic16k";
+ links[id + 1].cpus = &cpus[id + 1];
+ links[id + 1].cpus->dai_name = "DMIC16k Pin";
+ dmic_be_num = 2;
+ }
+ }
+
+ for (i = 0; i < dmic_be_num; i++) {
+ links[id].id = id;
+ links[id].num_cpus = 1;
+ links[id].codecs = dmic_component;
+ links[id].num_codecs = ARRAY_SIZE(dmic_component);
+ links[id].platforms = platform_component;
+ links[id].num_platforms = ARRAY_SIZE(platform_component);
+ links[id].ignore_suspend = 1;
+ links[id].dpcm_capture = 1;
+ links[id].no_pcm = 1;
+ id++;
+ }
+
+ /* HDMI */
+ if (hdmi_num > 0) {
+ idisp_components = devm_kzalloc(dev,
+ sizeof(struct snd_soc_dai_link_component) *
+ hdmi_num, GFP_KERNEL);
+ if (!idisp_components)
+ goto devm_err;
+ }
+ for (i = 1; i <= hdmi_num; i++) {
+ links[id].name = devm_kasprintf(dev, GFP_KERNEL,
+ "iDisp%d", i);
+ if (!links[id].name)
+ goto devm_err;
+
+ links[id].id = id;
+ links[id].cpus = &cpus[id];
+ links[id].num_cpus = 1;
+ links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ "iDisp%d Pin", i);
+ if (!links[id].cpus->dai_name)
+ goto devm_err;
+
+ idisp_components[i - 1].name = "ehdaudio0D2";
+ idisp_components[i - 1].dai_name = devm_kasprintf(dev,
+ GFP_KERNEL,
+ "intel-hdmi-hifi%d",
+ i);
+ if (!idisp_components[i - 1].dai_name)
+ goto devm_err;
+
+ links[id].codecs = &idisp_components[i - 1];
+ links[id].num_codecs = 1;
+ links[id].platforms = platform_component;
+ links[id].num_platforms = ARRAY_SIZE(platform_component);
+ links[id].init = sof_hdmi_init;
+ links[id].dpcm_playback = 1;
+ links[id].no_pcm = 1;
+ id++;
+ }
+
+ return links;
+devm_err:
+ return NULL;
+}
+
+static int sof_audio_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dai_link *dai_links;
+ struct snd_soc_acpi_mach *mach;
+ struct sof_card_private *ctx;
+ int dmic_be_num, hdmi_num;
+ int ret, ssp_amp, ssp_codec;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (pdev->id_entry && pdev->id_entry->driver_data)
+ sof_cs42l42_quirk = (unsigned long)pdev->id_entry->driver_data;
+
+ mach = pdev->dev.platform_data;
+
+ if (soc_intel_is_glk()) {
+ dmic_be_num = 1;
+ hdmi_num = 3;
+ } else {
+ dmic_be_num = 2;
+ hdmi_num = (sof_cs42l42_quirk & SOF_CS42L42_NUM_HDMIDEV_MASK) >>
+ SOF_CS42L42_NUM_HDMIDEV_SHIFT;
+ /* default number of HDMI DAI's */
+ if (!hdmi_num)
+ hdmi_num = 3;
+ }
+
+ dev_dbg(&pdev->dev, "sof_cs42l42_quirk = %lx\n", sof_cs42l42_quirk);
+
+ ssp_amp = (sof_cs42l42_quirk & SOF_CS42L42_SSP_AMP_MASK) >>
+ SOF_CS42L42_SSP_AMP_SHIFT;
+
+ ssp_codec = sof_cs42l42_quirk & SOF_CS42L42_SSP_CODEC_MASK;
+
+ /* compute number of dai links */
+ sof_audio_card_cs42l42.num_links = 1 + dmic_be_num + hdmi_num;
+
+ if (sof_cs42l42_quirk & SOF_SPEAKER_AMP_PRESENT)
+ sof_audio_card_cs42l42.num_links++;
+
+ dai_links = sof_card_dai_links_create(&pdev->dev, ssp_codec, ssp_amp,
+ dmic_be_num, hdmi_num);
+ if (!dai_links)
+ return -ENOMEM;
+
+ sof_audio_card_cs42l42.dai_link = dai_links;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
+ sof_audio_card_cs42l42.dev = &pdev->dev;
+
+ /* set platform name for each dailink */
+ ret = snd_soc_fixup_dai_links_platform_name(&sof_audio_card_cs42l42,
+ mach->mach_params.platform);
+ if (ret)
+ return ret;
+
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
+ snd_soc_card_set_drvdata(&sof_audio_card_cs42l42, ctx);
+
+ return devm_snd_soc_register_card(&pdev->dev,
+ &sof_audio_card_cs42l42);
+}
+
+static const struct platform_device_id board_ids[] = {
+ {
+ .name = "glk_cs4242_mx98357a",
+ .driver_data = (kernel_ulong_t)(SOF_CS42L42_SSP_CODEC(2) |
+ SOF_SPEAKER_AMP_PRESENT |
+ SOF_MAX98357A_SPEAKER_AMP_PRESENT |
+ SOF_CS42L42_SSP_AMP(1)),
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, board_ids);
+
+static struct platform_driver sof_audio = {
+ .probe = sof_audio_probe,
+ .driver = {
+ .name = "sof_cs42l42",
+ .pm = &snd_soc_pm_ops,
+ },
+ .id_table = board_ids,
+};
+module_platform_driver(sof_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("SOF Audio Machine driver for CS42L42");
+MODULE_AUTHOR("Brent Lu <brent.lu@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+MODULE_IMPORT_NS(SND_SOC_INTEL_SOF_MAXIM_COMMON);
diff --git a/sound/soc/intel/boards/sof_da7219_max98373.c b/sound/soc/intel/boards/sof_da7219_max98373.c
index f3cb0773e70e..b7b3b0bf994a 100644
--- a/sound/soc/intel/boards/sof_da7219_max98373.c
+++ b/sound/soc/intel/boards/sof_da7219_max98373.c
@@ -404,7 +404,7 @@ static int audio_probe(struct platform_device *pdev)
return -ENOMEM;
/* By default dais[0] is configured for max98373 */
- if (!strcmp(pdev->name, "sof_da7219_max98360a")) {
+ if (!strcmp(pdev->name, "sof_da7219_mx98360a")) {
dais[0] = (struct snd_soc_dai_link) {
.name = "SSP1-Codec",
.id = 0,
@@ -431,15 +431,16 @@ static int audio_probe(struct platform_device *pdev)
static const struct platform_device_id board_ids[] = {
{
- .name = "sof_da7219_max98373",
+ .name = "sof_da7219_mx98373",
.driver_data = (kernel_ulong_t)&card_da7219_m98373,
},
{
- .name = "sof_da7219_max98360a",
+ .name = "sof_da7219_mx98360a",
.driver_data = (kernel_ulong_t)&card_da7219_m98360a,
},
{ }
};
+MODULE_DEVICE_TABLE(platform, board_ids);
static struct platform_driver audio = {
.probe = audio_probe,
@@ -455,5 +456,4 @@ module_platform_driver(audio)
MODULE_DESCRIPTION("ASoC Intel(R) SOF Machine driver");
MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sof_da7219_max98360a");
-MODULE_ALIAS("platform:sof_da7219_max98373");
+MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
diff --git a/sound/soc/intel/boards/sof_maxim_common.c b/sound/soc/intel/boards/sof_maxim_common.c
index 437d20562753..e9c52f8b6428 100644
--- a/sound/soc/intel/boards/sof_maxim_common.c
+++ b/sound/soc/intel/boards/sof_maxim_common.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright(c) 2020 Intel Corporation. All rights reserved.
+#include <linux/module.h>
#include <linux/string.h>
#include <sound/pcm.h>
#include <sound/soc.h>
@@ -16,6 +17,7 @@ const struct snd_soc_dapm_route max_98373_dapm_routes[] = {
{ "Left Spk", NULL, "Left BE_OUT" },
{ "Right Spk", NULL, "Right BE_OUT" },
};
+EXPORT_SYMBOL_NS(max_98373_dapm_routes, SND_SOC_INTEL_SOF_MAXIM_COMMON);
static struct snd_soc_codec_conf max_98373_codec_conf[] = {
{
@@ -38,9 +40,10 @@ struct snd_soc_dai_link_component max_98373_components[] = {
.dai_name = MAX_98373_CODEC_DAI,
},
};
+EXPORT_SYMBOL_NS(max_98373_components, SND_SOC_INTEL_SOF_MAXIM_COMMON);
-static int max98373_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int max_98373_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai;
@@ -59,7 +62,7 @@ static int max98373_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-int max98373_trigger(struct snd_pcm_substream *substream, int cmd)
+int max_98373_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai;
@@ -102,13 +105,15 @@ int max98373_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
+EXPORT_SYMBOL_NS(max_98373_trigger, SND_SOC_INTEL_SOF_MAXIM_COMMON);
struct snd_soc_ops max_98373_ops = {
- .hw_params = max98373_hw_params,
- .trigger = max98373_trigger,
+ .hw_params = max_98373_hw_params,
+ .trigger = max_98373_trigger,
};
+EXPORT_SYMBOL_NS(max_98373_ops, SND_SOC_INTEL_SOF_MAXIM_COMMON);
-int max98373_spk_codec_init(struct snd_soc_pcm_runtime *rtd)
+int max_98373_spk_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
int ret;
@@ -119,9 +124,74 @@ int max98373_spk_codec_init(struct snd_soc_pcm_runtime *rtd)
dev_err(rtd->dev, "Speaker map addition failed: %d\n", ret);
return ret;
}
+EXPORT_SYMBOL_NS(max_98373_spk_codec_init, SND_SOC_INTEL_SOF_MAXIM_COMMON);
-void sof_max98373_codec_conf(struct snd_soc_card *card)
+void max_98373_set_codec_conf(struct snd_soc_card *card)
{
card->codec_conf = max_98373_codec_conf;
card->num_configs = ARRAY_SIZE(max_98373_codec_conf);
}
+EXPORT_SYMBOL_NS(max_98373_set_codec_conf, SND_SOC_INTEL_SOF_MAXIM_COMMON);
+
+/*
+ * Maxim MAX98357A
+ */
+static const struct snd_kcontrol_new max_98357a_kcontrols[] = {
+ SOC_DAPM_PIN_SWITCH("Spk"),
+};
+
+static const struct snd_soc_dapm_widget max_98357a_dapm_widgets[] = {
+ SND_SOC_DAPM_SPK("Spk", NULL),
+};
+
+static const struct snd_soc_dapm_route max_98357a_dapm_routes[] = {
+ /* speaker */
+ {"Spk", NULL, "Speaker"},
+};
+
+static struct snd_soc_dai_link_component max_98357a_components[] = {
+ {
+ .name = MAX_98357A_DEV0_NAME,
+ .dai_name = MAX_98357A_CODEC_DAI,
+ }
+};
+
+static int max_98357a_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+ ret = snd_soc_dapm_new_controls(&card->dapm, max_98357a_dapm_widgets,
+ ARRAY_SIZE(max_98357a_dapm_widgets));
+ if (ret) {
+ dev_err(rtd->dev, "unable to add dapm controls, ret %d\n", ret);
+ /* Don't need to add routes if widget addition failed */
+ return ret;
+ }
+
+ ret = snd_soc_add_card_controls(card, max_98357a_kcontrols,
+ ARRAY_SIZE(max_98357a_kcontrols));
+ if (ret) {
+ dev_err(rtd->dev, "unable to add card controls, ret %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, max_98357a_dapm_routes,
+ ARRAY_SIZE(max_98357a_dapm_routes));
+
+ if (ret)
+ dev_err(rtd->dev, "unable to add dapm routes, ret %d\n", ret);
+
+ return ret;
+}
+
+void max_98357a_dai_link(struct snd_soc_dai_link *link)
+{
+ link->codecs = max_98357a_components;
+ link->num_codecs = ARRAY_SIZE(max_98357a_components);
+ link->init = max_98357a_init;
+}
+EXPORT_SYMBOL_NS(max_98357a_dai_link, SND_SOC_INTEL_SOF_MAXIM_COMMON);
+
+MODULE_DESCRIPTION("ASoC Intel SOF Maxim helpers");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/boards/sof_maxim_common.h b/sound/soc/intel/boards/sof_maxim_common.h
index 5240b1c9d379..2674f1e373ef 100644
--- a/sound/soc/intel/boards/sof_maxim_common.h
+++ b/sound/soc/intel/boards/sof_maxim_common.h
@@ -20,8 +20,16 @@ extern struct snd_soc_dai_link_component max_98373_components[2];
extern struct snd_soc_ops max_98373_ops;
extern const struct snd_soc_dapm_route max_98373_dapm_routes[];
-int max98373_spk_codec_init(struct snd_soc_pcm_runtime *rtd);
-void sof_max98373_codec_conf(struct snd_soc_card *card);
-int max98373_trigger(struct snd_pcm_substream *substream, int cmd);
+int max_98373_spk_codec_init(struct snd_soc_pcm_runtime *rtd);
+void max_98373_set_codec_conf(struct snd_soc_card *card);
+int max_98373_trigger(struct snd_pcm_substream *substream, int cmd);
+
+/*
+ * Maxim MAX98357A
+ */
+#define MAX_98357A_CODEC_DAI "HiFi"
+#define MAX_98357A_DEV0_NAME "MX98357A:00"
+
+void max_98357a_dai_link(struct snd_soc_dai_link *link);
#endif /* __SOF_MAXIM_COMMON_H */
diff --git a/sound/soc/intel/boards/sof_pcm512x.c b/sound/soc/intel/boards/sof_pcm512x.c
index d2b0456236c7..2ec9c62366e2 100644
--- a/sound/soc/intel/boards/sof_pcm512x.c
+++ b/sound/soc/intel/boards/sof_pcm512x.c
@@ -241,7 +241,6 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
links[id].num_platforms = ARRAY_SIZE(platform_component);
links[id].init = sof_pcm512x_codec_init;
links[id].ops = &sof_pcm512x_ops;
- links[id].nonatomic = true;
links[id].dpcm_playback = 1;
/*
* capture only supported with specific versions of the Hifiberry DAC+
@@ -437,3 +436,4 @@ MODULE_DESCRIPTION("ASoC Intel(R) SOF + PCM512x Machine driver");
MODULE_AUTHOR("Pierre-Louis Bossart");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:sof_pcm512x");
+MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
index 58548ea0d915..39217223d50c 100644
--- a/sound/soc/intel/boards/sof_rt5682.c
+++ b/sound/soc/intel/boards/sof_rt5682.c
@@ -50,6 +50,13 @@
#define SOF_MAX98373_SPEAKER_AMP_PRESENT BIT(17)
#define SOF_MAX98360A_SPEAKER_AMP_PRESENT BIT(18)
+/* BT audio offload: reserve 3 bits for future */
+#define SOF_BT_OFFLOAD_SSP_SHIFT 19
+#define SOF_BT_OFFLOAD_SSP_MASK (GENMASK(21, 19))
+#define SOF_BT_OFFLOAD_SSP(quirk) \
+ (((quirk) << SOF_BT_OFFLOAD_SSP_SHIFT) & SOF_BT_OFFLOAD_SSP_MASK)
+#define SOF_SSP_BT_OFFLOAD_PRESENT BIT(22)
+
/* Default: MCLK on, MCLK 19.2M, SSP0 */
static unsigned long sof_rt5682_quirk = SOF_RT5682_MCLK_EN |
SOF_RT5682_SSP_CODEC(0);
@@ -444,20 +451,26 @@ static int sof_card_late_probe(struct snd_soc_card *card)
static const struct snd_kcontrol_new sof_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone Jack"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
- SOC_DAPM_PIN_SWITCH("Spk"),
SOC_DAPM_PIN_SWITCH("Left Spk"),
SOC_DAPM_PIN_SWITCH("Right Spk"),
};
+static const struct snd_kcontrol_new speaker_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Spk"),
+};
+
static const struct snd_soc_dapm_widget sof_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
- SND_SOC_DAPM_SPK("Spk", NULL),
SND_SOC_DAPM_SPK("Left Spk", NULL),
SND_SOC_DAPM_SPK("Right Spk", NULL),
};
+static const struct snd_soc_dapm_widget speaker_widgets[] = {
+ SND_SOC_DAPM_SPK("Spk", NULL),
+};
+
static const struct snd_soc_dapm_widget dmic_widgets[] = {
SND_SOC_DAPM_MIC("SoC DMIC", NULL),
};
@@ -497,6 +510,21 @@ static int speaker_codec_init(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_card *card = rtd->card;
int ret;
+ ret = snd_soc_dapm_new_controls(&card->dapm, speaker_widgets,
+ ARRAY_SIZE(speaker_widgets));
+ if (ret) {
+ dev_err(rtd->dev, "unable to add dapm controls, ret %d\n", ret);
+ /* Don't need to add routes if widget addition failed */
+ return ret;
+ }
+
+ ret = snd_soc_add_card_controls(card, speaker_controls,
+ ARRAY_SIZE(speaker_controls));
+ if (ret) {
+ dev_err(rtd->dev, "unable to add card controls, ret %d\n", ret);
+ return ret;
+ }
+
ret = snd_soc_dapm_add_routes(&card->dapm, speaker_map,
ARRAY_SIZE(speaker_map));
@@ -566,13 +594,6 @@ static struct snd_soc_dai_link_component dmic_component[] = {
}
};
-static struct snd_soc_dai_link_component max98357a_component[] = {
- {
- .name = "MX98357A:00",
- .dai_name = "HiFi",
- }
-};
-
static struct snd_soc_dai_link_component max98360a_component[] = {
{
.name = "MX98360A:00",
@@ -591,6 +612,13 @@ static struct snd_soc_dai_link_component rt1015_components[] = {
},
};
+static struct snd_soc_dai_link_component dummy_component[] = {
+ {
+ .name = "snd-soc-dummy",
+ .dai_name = "snd-soc-dummy-dai",
+ }
+};
+
static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
int ssp_codec,
int ssp_amp,
@@ -623,7 +651,6 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
links[id].init = sof_rt5682_codec_init;
links[id].exit = sof_rt5682_codec_exit;
links[id].ops = &sof_rt5682_ops;
- links[id].nonatomic = true;
links[id].dpcm_playback = 1;
links[id].dpcm_capture = 1;
links[id].no_pcm = 1;
@@ -742,7 +769,7 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
SOF_MAX98373_SPEAKER_AMP_PRESENT) {
links[id].codecs = max_98373_components;
links[id].num_codecs = ARRAY_SIZE(max_98373_components);
- links[id].init = max98373_spk_codec_init;
+ links[id].init = max_98373_spk_codec_init;
links[id].ops = &max_98373_ops;
/* feedback stream */
links[id].dpcm_capture = 1;
@@ -755,13 +782,10 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
SOF_RT1011_SPEAKER_AMP_PRESENT) {
sof_rt1011_dai_link(&links[id]);
} else {
- links[id].codecs = max98357a_component;
- links[id].num_codecs = ARRAY_SIZE(max98357a_component);
- links[id].init = speaker_codec_init;
+ max_98357a_dai_link(&links[id]);
}
links[id].platforms = platform_component;
links[id].num_platforms = ARRAY_SIZE(platform_component);
- links[id].nonatomic = true;
links[id].dpcm_playback = 1;
links[id].no_pcm = 1;
links[id].cpus = &cpus[id];
@@ -780,6 +804,31 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
if (!links[id].cpus->dai_name)
goto devm_err;
}
+ id++;
+ }
+
+ /* BT audio offload */
+ if (sof_rt5682_quirk & SOF_SSP_BT_OFFLOAD_PRESENT) {
+ int port = (sof_rt5682_quirk & SOF_BT_OFFLOAD_SSP_MASK) >>
+ SOF_BT_OFFLOAD_SSP_SHIFT;
+
+ links[id].id = id;
+ links[id].cpus = &cpus[id];
+ links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ "SSP%d Pin", port);
+ if (!links[id].cpus->dai_name)
+ goto devm_err;
+ links[id].name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-BT", port);
+ if (!links[id].name)
+ goto devm_err;
+ links[id].codecs = dummy_component;
+ links[id].num_codecs = ARRAY_SIZE(dummy_component);
+ links[id].platforms = platform_component;
+ links[id].num_platforms = ARRAY_SIZE(platform_component);
+ links[id].dpcm_playback = 1;
+ links[id].dpcm_capture = 1;
+ links[id].no_pcm = 1;
+ links[id].num_cpus = 1;
}
return links;
@@ -863,12 +912,15 @@ static int sof_audio_probe(struct platform_device *pdev)
sof_audio_card_rt5682.num_links++;
if (sof_rt5682_quirk & SOF_MAX98373_SPEAKER_AMP_PRESENT)
- sof_max98373_codec_conf(&sof_audio_card_rt5682);
+ max_98373_set_codec_conf(&sof_audio_card_rt5682);
else if (sof_rt5682_quirk & SOF_RT1011_SPEAKER_AMP_PRESENT)
sof_rt1011_codec_conf(&sof_audio_card_rt5682);
else if (sof_rt5682_quirk & SOF_RT1015P_SPEAKER_AMP_PRESENT)
sof_rt1015p_codec_conf(&sof_audio_card_rt5682);
+ if (sof_rt5682_quirk & SOF_SSP_BT_OFFLOAD_PRESENT)
+ sof_audio_card_rt5682.num_links++;
+
dai_links = sof_card_dai_links_create(&pdev->dev, ssp_codec, ssp_amp,
dmic_be_num, hdmi_num);
if (!dai_links)
@@ -904,12 +956,14 @@ static const struct platform_device_id board_ids[] = {
.name = "sof_rt5682",
},
{
- .name = "tgl_max98357a_rt5682",
+ .name = "tgl_mx98357a_rt5682",
.driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
SOF_RT5682_SSP_CODEC(0) |
SOF_SPEAKER_AMP_PRESENT |
SOF_RT5682_SSP_AMP(1) |
- SOF_RT5682_NUM_HDMIDEV(4)),
+ SOF_RT5682_NUM_HDMIDEV(4) |
+ SOF_BT_OFFLOAD_SSP(2) |
+ SOF_SSP_BT_OFFLOAD_PRESENT),
},
{
.name = "jsl_rt5682_rt1015",
@@ -921,16 +975,18 @@ static const struct platform_device_id board_ids[] = {
SOF_RT5682_SSP_AMP(1)),
},
{
- .name = "tgl_max98373_rt5682",
+ .name = "tgl_mx98373_rt5682",
.driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
SOF_RT5682_SSP_CODEC(0) |
SOF_SPEAKER_AMP_PRESENT |
SOF_MAX98373_SPEAKER_AMP_PRESENT |
SOF_RT5682_SSP_AMP(1) |
- SOF_RT5682_NUM_HDMIDEV(4)),
+ SOF_RT5682_NUM_HDMIDEV(4) |
+ SOF_BT_OFFLOAD_SSP(2) |
+ SOF_SSP_BT_OFFLOAD_PRESENT),
},
{
- .name = "jsl_rt5682_max98360a",
+ .name = "jsl_rt5682_mx98360a",
.driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
SOF_RT5682_MCLK_24MHZ |
SOF_RT5682_SSP_CODEC(0) |
@@ -955,7 +1011,9 @@ static const struct platform_device_id board_ids[] = {
SOF_SPEAKER_AMP_PRESENT |
SOF_RT1011_SPEAKER_AMP_PRESENT |
SOF_RT5682_SSP_AMP(1) |
- SOF_RT5682_NUM_HDMIDEV(4)),
+ SOF_RT5682_NUM_HDMIDEV(4) |
+ SOF_BT_OFFLOAD_SSP(2) |
+ SOF_SSP_BT_OFFLOAD_PRESENT),
},
{
.name = "jsl_rt5682_rt1015p",
@@ -966,8 +1024,28 @@ static const struct platform_device_id board_ids[] = {
SOF_RT1015P_SPEAKER_AMP_PRESENT |
SOF_RT5682_SSP_AMP(1)),
},
+ {
+ .name = "adl_mx98373_rt5682",
+ .driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
+ SOF_RT5682_SSP_CODEC(0) |
+ SOF_SPEAKER_AMP_PRESENT |
+ SOF_MAX98373_SPEAKER_AMP_PRESENT |
+ SOF_RT5682_SSP_AMP(1) |
+ SOF_RT5682_NUM_HDMIDEV(4) |
+ SOF_BT_OFFLOAD_SSP(2) |
+ SOF_SSP_BT_OFFLOAD_PRESENT),
+ },
+ {
+ .name = "adl_mx98357a_rt5682",
+ .driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
+ SOF_RT5682_SSP_CODEC(0) |
+ SOF_SPEAKER_AMP_PRESENT |
+ SOF_RT5682_SSP_AMP(2) |
+ SOF_RT5682_NUM_HDMIDEV(4)),
+ },
{ }
};
+MODULE_DEVICE_TABLE(platform, board_ids);
static struct platform_driver sof_audio = {
.probe = sof_audio_probe,
@@ -985,11 +1063,5 @@ MODULE_AUTHOR("Bard Liao <bard.liao@intel.com>");
MODULE_AUTHOR("Sathya Prakash M R <sathya.prakash.m.r@intel.com>");
MODULE_AUTHOR("Brent Lu <brent.lu@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sof_rt5682");
-MODULE_ALIAS("platform:tgl_max98357a_rt5682");
-MODULE_ALIAS("platform:jsl_rt5682_rt1015");
-MODULE_ALIAS("platform:tgl_max98373_rt5682");
-MODULE_ALIAS("platform:jsl_rt5682_max98360a");
-MODULE_ALIAS("platform:cml_rt1015_rt5682");
-MODULE_ALIAS("platform:tgl_rt1011_rt5682");
-MODULE_ALIAS("platform:jsl_rt5682_rt1015p");
+MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+MODULE_IMPORT_NS(SND_SOC_INTEL_SOF_MAXIM_COMMON);
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index ecd3f90f4bbe..1a867c73a48e 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -147,7 +147,9 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
},
.driver_data = (void *)(SOF_SDW_TGL_HDMI |
SOF_SDW_PCH_DMIC |
- SOF_SDW_FOUR_SPK),
+ SOF_SDW_FOUR_SPK |
+ SOF_BT_OFFLOAD_SSP(2) |
+ SOF_SSP_BT_OFFLOAD_PRESENT),
},
{
.callback = sof_sdw_quirk_cb,
@@ -196,7 +198,21 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
},
.driver_data = (void *)(SOF_RT711_JD_SRC_JD1 |
SOF_SDW_TGL_HDMI |
- SOF_SDW_PCH_DMIC),
+ SOF_RT715_DAI_ID_FIX |
+ SOF_BT_OFFLOAD_SSP(2) |
+ SOF_SSP_BT_OFFLOAD_PRESENT),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Brya"),
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ SOF_SDW_PCH_DMIC |
+ SOF_SDW_FOUR_SPK |
+ SOF_BT_OFFLOAD_SSP(2) |
+ SOF_SSP_BT_OFFLOAD_PRESENT),
},
{}
};
@@ -353,6 +369,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
.part_id = 0x714,
.version_id = 3,
.direction = {false, true},
+ .ignore_pch_dmic = true,
.dai_name = "rt715-aif2",
.init = sof_sdw_rt715_sdca_init,
},
@@ -360,6 +377,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
.part_id = 0x715,
.version_id = 3,
.direction = {false, true},
+ .ignore_pch_dmic = true,
.dai_name = "rt715-aif2",
.init = sof_sdw_rt715_sdca_init,
},
@@ -367,6 +385,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
.part_id = 0x714,
.version_id = 2,
.direction = {false, true},
+ .ignore_pch_dmic = true,
.dai_name = "rt715-aif2",
.init = sof_sdw_rt715_init,
},
@@ -374,6 +393,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
.part_id = 0x715,
.version_id = 2,
.direction = {false, true},
+ .ignore_pch_dmic = true,
.dai_name = "rt715-aif2",
.init = sof_sdw_rt715_init,
},
@@ -499,7 +519,6 @@ static void init_dai_link(struct device *dev, struct snd_soc_dai_link *dai_links
dai_links->name = name;
dai_links->platforms = platform_component;
dai_links->num_platforms = ARRAY_SIZE(platform_component);
- dai_links->nonatomic = true;
dai_links->no_pcm = 1;
dai_links->cpus = cpus;
dai_links->num_cpus = cpus_num;
@@ -581,13 +600,13 @@ static int create_codec_dai_name(struct device *dev,
comp_index = i + offset;
if (is_unique_device(link, sdw_version, mfg_id, part_id,
class_id, i)) {
- codec_str = "sdw:%x:%x:%x:%x";
+ codec_str = "sdw:%01x:%04x:%04x:%02x";
codec[comp_index].name =
devm_kasprintf(dev, GFP_KERNEL, codec_str,
link_id, mfg_id, part_id,
class_id);
} else {
- codec_str = "sdw:%x:%x:%x:%x:%x";
+ codec_str = "sdw:%01x:%04x:%04x:%02x:%01x";
codec[comp_index].name =
devm_kasprintf(dev, GFP_KERNEL, codec_str,
link_id, mfg_id, part_id,
@@ -663,7 +682,7 @@ static int set_codec_init_func(const struct snd_soc_acpi_link_adr *link,
*/
static int get_slave_info(const struct snd_soc_acpi_link_adr *adr_link,
struct device *dev, int *cpu_dai_id, int *cpu_dai_num,
- int *codec_num, int *group_id,
+ int *codec_num, unsigned int *group_id,
bool *group_generated)
{
const struct snd_soc_acpi_adr_device *adr_d;
@@ -729,7 +748,8 @@ static int create_sdw_dailink(struct device *dev, int *be_index,
int *cpu_id, bool *group_generated,
struct snd_soc_codec_conf *codec_conf,
int codec_count,
- int *codec_conf_index)
+ int *codec_conf_index,
+ bool *ignore_pch_dmic)
{
const struct snd_soc_acpi_link_adr *link_next;
struct snd_soc_dai_link_component *codecs;
@@ -782,6 +802,9 @@ static int create_sdw_dailink(struct device *dev, int *be_index,
if (codec_index < 0)
return codec_index;
+ if (codec_info_list[codec_index].ignore_pch_dmic)
+ *ignore_pch_dmic = true;
+
cpu_dai_index = *cpu_id;
for_each_pcm_streams(stream) {
char *name, *cpu_name;
@@ -913,6 +936,7 @@ static int sof_card_dai_links_create(struct device *dev,
const struct snd_soc_acpi_link_adr *adr_link;
struct snd_soc_dai_link_component *cpus;
struct snd_soc_codec_conf *codec_conf;
+ bool ignore_pch_dmic = false;
int codec_conf_count;
int codec_conf_index = 0;
bool group_generated[SDW_MAX_GROUPS];
@@ -968,6 +992,9 @@ static int sof_card_dai_links_create(struct device *dev,
dmic_num = (sof_sdw_quirk & SOF_SDW_PCH_DMIC || mach_params->dmic_num) ? 2 : 0;
comp_num += dmic_num;
+ if (sof_sdw_quirk & SOF_SSP_BT_OFFLOAD_PRESENT)
+ comp_num++;
+
dev_dbg(dev, "sdw %d, ssp %d, dmic %d, hdmi %d", sdw_be_num, ssp_num,
dmic_num, ctx->idisp_codec ? hdmi_num : 0);
@@ -1019,7 +1046,8 @@ static int sof_card_dai_links_create(struct device *dev,
sdw_cpu_dai_num, cpus, adr_link,
&cpu_id, group_generated,
codec_conf, codec_conf_count,
- &codec_conf_index);
+ &codec_conf_index,
+ &ignore_pch_dmic);
if (ret < 0) {
dev_err(dev, "failed to create dai link %d", be_id);
return -ENOMEM;
@@ -1087,6 +1115,10 @@ SSP:
DMIC:
/* dmic */
if (dmic_num > 0) {
+ if (ignore_pch_dmic) {
+ dev_warn(dev, "Ignoring PCH DMIC\n");
+ goto HDMI;
+ }
cpus[cpu_id].dai_name = "DMIC01 Pin";
init_dai_link(dev, links + link_id, be_id, "dmic01",
0, 1, // DMIC only supports capture
@@ -1105,6 +1137,7 @@ DMIC:
INC_ID(be_id, cpu_id, link_id);
}
+HDMI:
/* HDMI */
if (hdmi_num > 0) {
idisp_components = devm_kcalloc(dev, hdmi_num,
@@ -1147,6 +1180,31 @@ DMIC:
INC_ID(be_id, cpu_id, link_id);
}
+ if (sof_sdw_quirk & SOF_SSP_BT_OFFLOAD_PRESENT) {
+ int port = (sof_sdw_quirk & SOF_BT_OFFLOAD_SSP_MASK) >>
+ SOF_BT_OFFLOAD_SSP_SHIFT;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-BT", port);
+ if (!name)
+ return -ENOMEM;
+
+ ssp_components = devm_kzalloc(dev, sizeof(*ssp_components),
+ GFP_KERNEL);
+ if (!ssp_components)
+ return -ENOMEM;
+
+ ssp_components->name = "snd-soc-dummy";
+ ssp_components->dai_name = "snd-soc-dummy-dai";
+
+ cpu_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", port);
+ if (!cpu_name)
+ return -ENOMEM;
+
+ cpus[cpu_id].dai_name = cpu_name;
+ init_dai_link(dev, links + link_id, be_id, name, 1, 1,
+ cpus + cpu_id, 1, ssp_components, 1, NULL, NULL);
+ }
+
card->dai_link = links;
card->num_links = num_links;
@@ -1215,8 +1273,6 @@ static int mc_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
-
/*
* the default amp_num is zero for each codec and
* amp_num will only be increased for active amp
@@ -1302,3 +1358,5 @@ MODULE_AUTHOR("Rander Wang <rander.wang@linux.intel.com>");
MODULE_AUTHOR("Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:sof_sdw");
+MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+MODULE_IMPORT_NS(SND_SOC_INTEL_SOF_MAXIM_COMMON);
diff --git a/sound/soc/intel/boards/sof_sdw_common.h b/sound/soc/intel/boards/sof_sdw_common.h
index f3cb6796363e..ec5740486b75 100644
--- a/sound/soc/intel/boards/sof_sdw_common.h
+++ b/sound/soc/intel/boards/sof_sdw_common.h
@@ -50,12 +50,20 @@ enum {
#define SOF_RT715_DAI_ID_FIX BIT(11)
#define SOF_SDW_NO_AGGREGATION BIT(12)
+/* BT audio offload: reserve 3 bits for future */
+#define SOF_BT_OFFLOAD_SSP_SHIFT 13
+#define SOF_BT_OFFLOAD_SSP_MASK (GENMASK(15, 13))
+#define SOF_BT_OFFLOAD_SSP(quirk) \
+ (((quirk) << SOF_BT_OFFLOAD_SSP_SHIFT) & SOF_BT_OFFLOAD_SSP_MASK)
+#define SOF_SSP_BT_OFFLOAD_PRESENT BIT(16)
+
struct sof_sdw_codec_info {
const int part_id;
const int version_id;
int amp_num;
const u8 acpi_id[ACPI_ID_LEN];
const bool direction[2]; // playback & capture support
+ const bool ignore_pch_dmic;
const char *dai_name;
const struct snd_soc_ops *ops;
@@ -71,7 +79,6 @@ struct sof_sdw_codec_info {
struct mc_private {
struct list_head hdmi_pcm_list;
- bool common_hdmi_codec_drv;
bool idisp_codec;
struct snd_soc_jack sdw_headset;
};
diff --git a/sound/soc/intel/boards/sof_sdw_hdmi.c b/sound/soc/intel/boards/sof_sdw_hdmi.c
index 99b04bb2f3a0..d47d8bf528c1 100644
--- a/sound/soc/intel/boards/sof_sdw_hdmi.c
+++ b/sound/soc/intel/boards/sof_sdw_hdmi.c
@@ -13,11 +13,8 @@
#include <sound/soc-acpi.h>
#include <sound/jack.h>
#include "sof_sdw_common.h"
-#include "../../codecs/hdac_hdmi.h"
#include "hda_dsp_common.h"
-static struct snd_soc_jack hdmi[MAX_HDMI_NUM];
-
struct hdmi_pcm {
struct list_head head;
struct snd_soc_dai *codec_dai;
@@ -49,8 +46,6 @@ int sof_sdw_hdmi_card_late_probe(struct snd_soc_card *card)
struct mc_private *ctx = snd_soc_card_get_drvdata(card);
struct hdmi_pcm *pcm;
struct snd_soc_component *component = NULL;
- int err, i = 0;
- char jack_name[NAME_SIZE];
if (!ctx->idisp_codec)
return 0;
@@ -62,35 +57,5 @@ int sof_sdw_hdmi_card_late_probe(struct snd_soc_card *card)
head);
component = pcm->codec_dai->component;
- if (ctx->common_hdmi_codec_drv)
- return hda_dsp_hdmi_build_controls(card, component);
-
- list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
- component = pcm->codec_dai->component;
- snprintf(jack_name, sizeof(jack_name),
- "HDMI/DP, pcm=%d Jack", pcm->device);
- err = snd_soc_card_jack_new(card, jack_name,
- SND_JACK_AVOUT, &hdmi[i],
- NULL, 0);
-
- if (err)
- return err;
-
- err = snd_jack_add_new_kctl(hdmi[i].jack,
- jack_name, SND_JACK_AVOUT);
- if (err)
- dev_warn(component->dev, "failed creating Jack kctl\n");
-
- err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
- &hdmi[i]);
- if (err < 0)
- return err;
-
- i++;
- }
-
- if (!component)
- return -EINVAL;
-
- return hdac_hdmi_jack_port_init(component, &card->dapm);
+ return hda_dsp_hdmi_build_controls(card, component);
}
diff --git a/sound/soc/intel/boards/sof_sdw_max98373.c b/sound/soc/intel/boards/sof_sdw_max98373.c
index cfdf970c5800..25daef910aee 100644
--- a/sound/soc/intel/boards/sof_sdw_max98373.c
+++ b/sound/soc/intel/boards/sof_sdw_max98373.c
@@ -55,43 +55,68 @@ static int spk_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
-static int max98373_sdw_trigger(struct snd_pcm_substream *substream, int cmd)
+static int mx8373_enable_spk_pin(struct snd_pcm_substream *substream, bool enable)
{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *cpu_dai;
int ret;
+ int j;
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- /* enable max98373 first */
- ret = max98373_trigger(substream, cmd);
- if (ret < 0)
- break;
-
- ret = sdw_trigger(substream, cmd);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- ret = sdw_trigger(substream, cmd);
- if (ret < 0)
- break;
-
- ret = max98373_trigger(substream, cmd);
- break;
- default:
- ret = -EINVAL;
- break;
+ /* set spk pin by playback only */
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ return 0;
+
+ cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ for_each_rtd_codec_dais(rtd, j, codec_dai) {
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(cpu_dai->component);
+ char pin_name[16];
+
+ snprintf(pin_name, ARRAY_SIZE(pin_name), "%s Spk",
+ codec_dai->component->name_prefix);
+
+ if (enable)
+ ret = snd_soc_dapm_enable_pin(dapm, pin_name);
+ else
+ ret = snd_soc_dapm_disable_pin(dapm, pin_name);
+
+ if (!ret)
+ snd_soc_dapm_sync(dapm);
}
- return ret;
+ return 0;
+}
+
+static int mx8373_sdw_prepare(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+
+ /* according to soc_pcm_prepare dai link prepare is called first */
+ ret = sdw_prepare(substream);
+ if (ret < 0)
+ return ret;
+
+ return mx8373_enable_spk_pin(substream, true);
+}
+
+static int mx8373_sdw_hw_free(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+
+ /* according to soc_pcm_hw_free dai link free is called first */
+ ret = sdw_hw_free(substream);
+ if (ret < 0)
+ return ret;
+
+ return mx8373_enable_spk_pin(substream, false);
}
static const struct snd_soc_ops max_98373_sdw_ops = {
.startup = sdw_startup,
- .prepare = sdw_prepare,
- .trigger = max98373_sdw_trigger,
- .hw_free = sdw_hw_free,
+ .prepare = mx8373_sdw_prepare,
+ .trigger = sdw_trigger,
+ .hw_free = mx8373_sdw_hw_free,
.shutdown = sdw_shutdown,
};
diff --git a/sound/soc/intel/boards/sof_wm8804.c b/sound/soc/intel/boards/sof_wm8804.c
index 6a181e45143d..54395e2ededc 100644
--- a/sound/soc/intel/boards/sof_wm8804.c
+++ b/sound/soc/intel/boards/sof_wm8804.c
@@ -167,7 +167,6 @@ static struct snd_soc_dai_link dailink[] = {
.name = "SSP5-Codec",
.id = 0,
.no_pcm = 1,
- .nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
.ops = &sof_wm8804_ops,
diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
index 692c4c479ed8..a0f6a69c7038 100644
--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
@@ -31,7 +31,7 @@ static const struct snd_soc_acpi_endpoint spk_r_endpoint = {
static const struct snd_soc_acpi_adr_device rt711_0_adr[] = {
{
- .adr = 0x000020025D071100,
+ .adr = 0x000020025D071100ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt711"
@@ -40,7 +40,7 @@ static const struct snd_soc_acpi_adr_device rt711_0_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_1_group1_adr[] = {
{
- .adr = 0x000120025D130800,
+ .adr = 0x000120025D130800ull,
.num_endpoints = 1,
.endpoints = &spk_l_endpoint,
.name_prefix = "rt1308-1"
@@ -49,7 +49,7 @@ static const struct snd_soc_acpi_adr_device rt1308_1_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_2_group1_adr[] = {
{
- .adr = 0x000220025D130800,
+ .adr = 0x000220025D130800ull,
.num_endpoints = 1,
.endpoints = &spk_r_endpoint,
.name_prefix = "rt1308-2"
@@ -58,7 +58,7 @@ static const struct snd_soc_acpi_adr_device rt1308_2_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt715_3_adr[] = {
{
- .adr = 0x000320025D071500,
+ .adr = 0x000320025D071500ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt715"
@@ -67,7 +67,7 @@ static const struct snd_soc_acpi_adr_device rt715_3_adr[] = {
static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
{
- .adr = 0x000030025D071101,
+ .adr = 0x000030025D071101ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt711"
@@ -76,7 +76,7 @@ static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
static const struct snd_soc_acpi_adr_device rt1316_1_group1_adr[] = {
{
- .adr = 0x000131025D131601, /* unique ID is set for some reason */
+ .adr = 0x000131025D131601ull, /* unique ID is set for some reason */
.num_endpoints = 1,
.endpoints = &spk_l_endpoint,
.name_prefix = "rt1316-1"
@@ -85,7 +85,7 @@ static const struct snd_soc_acpi_adr_device rt1316_1_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt1316_2_group1_adr[] = {
{
- .adr = 0x000230025D131601,
+ .adr = 0x000230025D131601ull,
.num_endpoints = 1,
.endpoints = &spk_r_endpoint,
.name_prefix = "rt1316-2"
@@ -94,7 +94,7 @@ static const struct snd_soc_acpi_adr_device rt1316_2_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt1316_3_group1_adr[] = {
{
- .adr = 0x000330025D131601,
+ .adr = 0x000330025D131601ull,
.num_endpoints = 1,
.endpoints = &spk_r_endpoint,
.name_prefix = "rt1316-2"
@@ -103,7 +103,7 @@ static const struct snd_soc_acpi_adr_device rt1316_3_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt1316_2_single_adr[] = {
{
- .adr = 0x000230025D131601,
+ .adr = 0x000230025D131601ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt1316-1"
@@ -112,7 +112,7 @@ static const struct snd_soc_acpi_adr_device rt1316_2_single_adr[] = {
static const struct snd_soc_acpi_adr_device rt714_0_adr[] = {
{
- .adr = 0x000030025D071401,
+ .adr = 0x000030025D071401ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt714"
@@ -121,7 +121,7 @@ static const struct snd_soc_acpi_adr_device rt714_0_adr[] = {
static const struct snd_soc_acpi_adr_device rt714_2_adr[] = {
{
- .adr = 0x000230025D071401,
+ .adr = 0x000230025D071401ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt714"
@@ -130,7 +130,7 @@ static const struct snd_soc_acpi_adr_device rt714_2_adr[] = {
static const struct snd_soc_acpi_adr_device rt714_3_adr[] = {
{
- .adr = 0x000330025D071401,
+ .adr = 0x000330025D071401ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt714"
@@ -223,6 +223,30 @@ static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link2_rt714_link0[] = {
{}
};
+static const struct snd_soc_acpi_adr_device mx8373_2_adr[] = {
+ {
+ .adr = 0x000223019F837300ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "Left"
+ },
+ {
+ .adr = 0x000227019F837300ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "Right"
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt5682_0_adr[] = {
+ {
+ .adr = 0x000021025D568200ull,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ .name_prefix = "rt5682"
+ }
+};
+
static const struct snd_soc_acpi_link_adr adl_rvp[] = {
{
.mask = BIT(0),
@@ -232,7 +256,47 @@ static const struct snd_soc_acpi_link_adr adl_rvp[] = {
{}
};
+static const struct snd_soc_acpi_link_adr adl_chromebook_base[] = {
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(rt5682_0_adr),
+ .adr_d = rt5682_0_adr,
+ },
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(mx8373_2_adr),
+ .adr_d = mx8373_2_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_codecs adl_max98373_amp = {
+ .num_codecs = 1,
+ .codecs = {"MX98373"}
+};
+
+static const struct snd_soc_acpi_codecs adl_max98357a_amp = {
+ .num_codecs = 1,
+ .codecs = {"MX98357A"}
+};
+
struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
+ {
+ .id = "10EC5682",
+ .drv_name = "adl_mx98373_rt5682",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &adl_max98373_amp,
+ .sof_fw_filename = "sof-adl.ri",
+ .sof_tplg_filename = "sof-adl-max98373-rt5682.tplg",
+ },
+ {
+ .id = "10EC5682",
+ .drv_name = "adl_mx98357a_rt5682",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &adl_max98357a_amp,
+ .sof_fw_filename = "sof-adl.ri",
+ .sof_tplg_filename = "sof-adl-max98357a-rt5682.tplg",
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_adl_machines);
@@ -269,6 +333,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
.drv_name = "sof_sdw",
.sof_tplg_filename = "sof-adl-rt711.tplg",
},
+ {
+ .link_mask = 0x5, /* rt5682 on link0 & 2xmax98373 on link 2 */
+ .links = adl_chromebook_base,
+ .drv_name = "sof_sdw",
+ .sof_fw_filename = "sof-adl.ri",
+ .sof_tplg_filename = "sof-adl-sdw-max98373-rt5682.tplg",
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_adl_sdw_machines);
diff --git a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
index 398cc771c835..576407b5daf2 100644
--- a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
@@ -56,7 +56,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[] = {
},
{
.id = "DLGS7219",
- .drv_name = "bxt_da7219_max98357a",
+ .drv_name = "bxt_da7219_mx98357a",
.fw_filename = "intel/dsp_fw_bxtn.bin",
.machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &bxt_codecs,
diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
index 7f6ef8229969..42ef51c3fb4f 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cml-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
@@ -67,7 +67,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
},
{
.id = "DLGS7219",
- .drv_name = "cml_da7219_max98357a",
+ .drv_name = "cml_da7219_mx98357a",
.machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &max98357a_spk_codecs,
.sof_fw_filename = "sof-cml.ri",
@@ -108,7 +108,7 @@ static const struct snd_soc_acpi_endpoint spk_r_endpoint = {
static const struct snd_soc_acpi_adr_device rt700_1_adr[] = {
{
- .adr = 0x000110025D070000,
+ .adr = 0x000110025D070000ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt700"
@@ -126,7 +126,7 @@ static const struct snd_soc_acpi_link_adr cml_rvp[] = {
static const struct snd_soc_acpi_adr_device rt711_0_adr[] = {
{
- .adr = 0x000020025D071100,
+ .adr = 0x000020025D071100ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt711"
@@ -135,7 +135,7 @@ static const struct snd_soc_acpi_adr_device rt711_0_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_1_single_adr[] = {
{
- .adr = 0x000120025D130800,
+ .adr = 0x000120025D130800ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt1308-1"
@@ -144,7 +144,7 @@ static const struct snd_soc_acpi_adr_device rt1308_1_single_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_1_group1_adr[] = {
{
- .adr = 0x000120025D130800,
+ .adr = 0x000120025D130800ull,
.num_endpoints = 1,
.endpoints = &spk_l_endpoint,
.name_prefix = "rt1308-1"
@@ -153,7 +153,7 @@ static const struct snd_soc_acpi_adr_device rt1308_1_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_2_group1_adr[] = {
{
- .adr = 0x000220025D130800,
+ .adr = 0x000220025D130800ull,
.num_endpoints = 1,
.endpoints = &spk_r_endpoint,
.name_prefix = "rt1308-2"
@@ -162,7 +162,7 @@ static const struct snd_soc_acpi_adr_device rt1308_2_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt715_3_adr[] = {
{
- .adr = 0x000320025D071500,
+ .adr = 0x000320025D071500ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt715"
@@ -171,7 +171,7 @@ static const struct snd_soc_acpi_adr_device rt715_3_adr[] = {
static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
{
- .adr = 0x000030025D071101,
+ .adr = 0x000030025D071101ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt711"
@@ -180,7 +180,7 @@ static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
static const struct snd_soc_acpi_adr_device rt1316_1_group1_adr[] = {
{
- .adr = 0x000131025D131601, /* unique ID is set for some reason */
+ .adr = 0x000131025D131601ull, /* unique ID is set for some reason */
.num_endpoints = 1,
.endpoints = &spk_l_endpoint,
.name_prefix = "rt1316-1"
@@ -189,7 +189,7 @@ static const struct snd_soc_acpi_adr_device rt1316_1_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt1316_2_group1_adr[] = {
{
- .adr = 0x000230025D131601,
+ .adr = 0x000230025D131601ull,
.num_endpoints = 1,
.endpoints = &spk_r_endpoint,
.name_prefix = "rt1316-2"
@@ -198,7 +198,7 @@ static const struct snd_soc_acpi_adr_device rt1316_2_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt714_3_adr[] = {
{
- .adr = 0x000330025D071401,
+ .adr = 0x000330025D071401ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt714"
diff --git a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
index ec77a57a07ba..39dad32564e6 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
@@ -36,7 +36,7 @@ static const struct snd_soc_acpi_endpoint single_endpoint = {
static const struct snd_soc_acpi_adr_device rt5682_2_adr[] = {
{
- .adr = 0x000220025D568200,
+ .adr = 0x000220025D568200ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt5682"
diff --git a/sound/soc/intel/common/soc-acpi-intel-glk-match.c b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
index 6ceaab19ccb6..da1e151190b4 100644
--- a/sound/soc/intel/common/soc-acpi-intel-glk-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
@@ -24,7 +24,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = {
},
{
.id = "DLGS7219",
- .drv_name = "glk_da7219_max98357a",
+ .drv_name = "glk_da7219_mx98357a",
.fw_filename = "intel/dsp_fw_glk.bin",
.machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &glk_codecs,
@@ -33,13 +33,23 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = {
},
{
.id = "10EC5682",
- .drv_name = "glk_rt5682_max98357a",
+ .drv_name = "glk_rt5682_mx98357a",
.fw_filename = "intel/dsp_fw_glk.bin",
.machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &glk_codecs,
.sof_fw_filename = "sof-glk.ri",
.sof_tplg_filename = "sof-glk-rt5682.tplg",
},
+ {
+ .id = "10134242",
+ .drv_name = "glk_cs4242_mx98357a",
+ .fw_filename = "intel/dsp_fw_glk.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &glk_codecs,
+ .sof_fw_filename = "sof-glk.ri",
+ .sof_tplg_filename = "sof-glk-cs42l42.tplg",
+ },
+
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_glk_machines);
diff --git a/sound/soc/intel/common/soc-acpi-intel-icl-match.c b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
index d38ff7d187c4..768ed538c4ea 100644
--- a/sound/soc/intel/common/soc-acpi-intel-icl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
@@ -56,7 +56,7 @@ static const struct snd_soc_acpi_endpoint spk_r_endpoint = {
static const struct snd_soc_acpi_adr_device rt700_0_adr[] = {
{
- .adr = 0x000010025D070000,
+ .adr = 0x000010025D070000ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt700"
@@ -74,7 +74,7 @@ static const struct snd_soc_acpi_link_adr icl_rvp[] = {
static const struct snd_soc_acpi_adr_device rt711_0_adr[] = {
{
- .adr = 0x000020025D071100,
+ .adr = 0x000020025D071100ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt711"
@@ -83,7 +83,7 @@ static const struct snd_soc_acpi_adr_device rt711_0_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_1_adr[] = {
{
- .adr = 0x000120025D130800,
+ .adr = 0x000120025D130800ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt1308-1"
@@ -92,7 +92,7 @@ static const struct snd_soc_acpi_adr_device rt1308_1_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_1_group1_adr[] = {
{
- .adr = 0x000120025D130800,
+ .adr = 0x000120025D130800ull,
.num_endpoints = 1,
.endpoints = &spk_l_endpoint,
.name_prefix = "rt1308-1"
@@ -101,7 +101,7 @@ static const struct snd_soc_acpi_adr_device rt1308_1_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_2_group1_adr[] = {
{
- .adr = 0x000220025D130800,
+ .adr = 0x000220025D130800ull,
.num_endpoints = 1,
.endpoints = &spk_r_endpoint,
.name_prefix = "rt1308-2"
@@ -110,7 +110,7 @@ static const struct snd_soc_acpi_adr_device rt1308_2_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt715_3_adr[] = {
{
- .adr = 0x000320025D071500,
+ .adr = 0x000320025D071500ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt715"
diff --git a/sound/soc/intel/common/soc-acpi-intel-jsl-match.c b/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
index 73fe4f89a82d..3586ce72c42c 100644
--- a/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
@@ -37,7 +37,7 @@ static struct snd_soc_acpi_codecs mx98360a_spk = {
struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[] = {
{
.id = "DLGS7219",
- .drv_name = "sof_da7219_max98373",
+ .drv_name = "sof_da7219_mx98373",
.sof_fw_filename = "sof-jsl.ri",
.sof_tplg_filename = "sof-jsl-da7219.tplg",
.machine_quirk = snd_soc_acpi_codec_list,
@@ -45,7 +45,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[] = {
},
{
.id = "DLGS7219",
- .drv_name = "sof_da7219_max98360a",
+ .drv_name = "sof_da7219_mx98360a",
.sof_fw_filename = "sof-jsl.ri",
.sof_tplg_filename = "sof-jsl-da7219-mx98360a.tplg",
},
@@ -67,7 +67,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[] = {
},
{
.id = "10EC5682",
- .drv_name = "jsl_rt5682_max98360a",
+ .drv_name = "jsl_rt5682_mx98360a",
.sof_fw_filename = "sof-jsl.ri",
.machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &mx98360a_spk,
diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
index 47dadc9d5d2a..ba5ff468c265 100644
--- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
@@ -113,7 +113,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
},
{
.id = "DLGS7219",
- .drv_name = "kbl_da7219_max98373",
+ .drv_name = "kbl_da7219_mx98373",
.fw_filename = "intel/dsp_fw_kbl.bin",
.machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &kbl_7219_98373_codecs,
diff --git a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
index b5f05b81a584..66595e3ab13f 100644
--- a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
@@ -37,7 +37,7 @@ static const struct snd_soc_acpi_endpoint spk_r_endpoint = {
static const struct snd_soc_acpi_adr_device rt711_0_adr[] = {
{
- .adr = 0x000020025D071100,
+ .adr = 0x000020025D071100ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt711"
@@ -46,7 +46,7 @@ static const struct snd_soc_acpi_adr_device rt711_0_adr[] = {
static const struct snd_soc_acpi_adr_device rt711_1_adr[] = {
{
- .adr = 0x000120025D071100,
+ .adr = 0x000120025D071100ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt711"
@@ -55,13 +55,13 @@ static const struct snd_soc_acpi_adr_device rt711_1_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_1_dual_adr[] = {
{
- .adr = 0x000120025D130800,
+ .adr = 0x000120025D130800ull,
.num_endpoints = 1,
.endpoints = &spk_l_endpoint,
.name_prefix = "rt1308-1"
},
{
- .adr = 0x000122025D130800,
+ .adr = 0x000122025D130800ull,
.num_endpoints = 1,
.endpoints = &spk_r_endpoint,
.name_prefix = "rt1308-2"
@@ -70,7 +70,7 @@ static const struct snd_soc_acpi_adr_device rt1308_1_dual_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_1_single_adr[] = {
{
- .adr = 0x000120025D130800,
+ .adr = 0x000120025D130800ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt1308-1"
@@ -79,7 +79,7 @@ static const struct snd_soc_acpi_adr_device rt1308_1_single_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_2_single_adr[] = {
{
- .adr = 0x000220025D130800,
+ .adr = 0x000220025D130800ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt1308-1"
@@ -88,7 +88,7 @@ static const struct snd_soc_acpi_adr_device rt1308_2_single_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_1_group1_adr[] = {
{
- .adr = 0x000120025D130800,
+ .adr = 0x000120025D130800ull,
.num_endpoints = 1,
.endpoints = &spk_l_endpoint,
.name_prefix = "rt1308-1"
@@ -97,7 +97,7 @@ static const struct snd_soc_acpi_adr_device rt1308_1_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt1308_2_group1_adr[] = {
{
- .adr = 0x000220025D130800,
+ .adr = 0x000220025D130800ull,
.num_endpoints = 1,
.endpoints = &spk_r_endpoint,
.name_prefix = "rt1308-2"
@@ -106,7 +106,7 @@ static const struct snd_soc_acpi_adr_device rt1308_2_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt715_0_adr[] = {
{
- .adr = 0x000021025D071500,
+ .adr = 0x000021025D071500ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt715"
@@ -115,7 +115,7 @@ static const struct snd_soc_acpi_adr_device rt715_0_adr[] = {
static const struct snd_soc_acpi_adr_device rt715_3_adr[] = {
{
- .adr = 0x000320025D071500,
+ .adr = 0x000320025D071500ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt715"
@@ -124,13 +124,13 @@ static const struct snd_soc_acpi_adr_device rt715_3_adr[] = {
static const struct snd_soc_acpi_adr_device mx8373_1_adr[] = {
{
- .adr = 0x000123019F837300,
+ .adr = 0x000123019F837300ull,
.num_endpoints = 1,
.endpoints = &spk_l_endpoint,
.name_prefix = "Right"
},
{
- .adr = 0x000127019F837300,
+ .adr = 0x000127019F837300ull,
.num_endpoints = 1,
.endpoints = &spk_r_endpoint,
.name_prefix = "Left"
@@ -139,7 +139,7 @@ static const struct snd_soc_acpi_adr_device mx8373_1_adr[] = {
static const struct snd_soc_acpi_adr_device rt5682_0_adr[] = {
{
- .adr = 0x000021025D568200,
+ .adr = 0x000021025D568200ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt5682"
@@ -148,7 +148,7 @@ static const struct snd_soc_acpi_adr_device rt5682_0_adr[] = {
static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
{
- .adr = 0x000030025D071101,
+ .adr = 0x000030025D071101ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt711"
@@ -157,7 +157,7 @@ static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
static const struct snd_soc_acpi_adr_device rt1316_1_group1_adr[] = {
{
- .adr = 0x000131025D131601, /* unique ID is set for some reason */
+ .adr = 0x000131025D131601ull, /* unique ID is set for some reason */
.num_endpoints = 1,
.endpoints = &spk_l_endpoint,
.name_prefix = "rt1316-1"
@@ -166,7 +166,7 @@ static const struct snd_soc_acpi_adr_device rt1316_1_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt1316_2_group1_adr[] = {
{
- .adr = 0x000230025D131601,
+ .adr = 0x000230025D131601ull,
.num_endpoints = 1,
.endpoints = &spk_r_endpoint,
.name_prefix = "rt1316-2"
@@ -175,7 +175,7 @@ static const struct snd_soc_acpi_adr_device rt1316_2_group1_adr[] = {
static const struct snd_soc_acpi_adr_device rt714_3_adr[] = {
{
- .adr = 0x000330025D071401,
+ .adr = 0x000330025D071401ull,
.num_endpoints = 1,
.endpoints = &single_endpoint,
.name_prefix = "rt714"
@@ -323,7 +323,7 @@ static const struct snd_soc_acpi_codecs tgl_rt1011_amp = {
struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[] = {
{
.id = "10EC5682",
- .drv_name = "tgl_max98357a_rt5682",
+ .drv_name = "tgl_mx98357a_rt5682",
.machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &tgl_codecs,
.sof_fw_filename = "sof-tgl.ri",
@@ -331,7 +331,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[] = {
},
{
.id = "10EC5682",
- .drv_name = "tgl_max98373_rt5682",
+ .drv_name = "tgl_mx98373_rt5682",
.machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &tgl_max98373_amp,
.sof_fw_filename = "sof-tgl.ri",
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index 87c891c46291..64226072f0ee 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -149,8 +149,8 @@ int skl_nhlt_update_topology_bin(struct skl_dev *skl)
return 0;
}
-static ssize_t skl_nhlt_platform_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t platform_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct pci_dev *pci = to_pci_dev(dev);
struct hdac_bus *bus = pci_get_drvdata(pci);
@@ -166,7 +166,7 @@ static ssize_t skl_nhlt_platform_id_show(struct device *dev,
return sprintf(buf, "%s\n", platform_id);
}
-static DEVICE_ATTR(platform_id, 0444, skl_nhlt_platform_id_show, NULL);
+static DEVICE_ATTR_RO(platform_id);
int skl_nhlt_create_sysfs(struct skl_dev *skl)
{
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index 52ba0e3a9e95..7ad5d9a924d8 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -371,7 +371,7 @@ static int jz4740_i2s_resume(struct snd_soc_component *component)
return 0;
}
-static void jz4740_i2c_init_pcm_config(struct jz4740_i2s *i2s)
+static void jz4740_i2s_init_pcm_config(struct jz4740_i2s *i2s)
{
struct snd_dmaengine_dai_dma_data *dma_data;
@@ -396,7 +396,7 @@ static int jz4740_i2s_dai_probe(struct snd_soc_dai *dai)
if (ret)
return ret;
- jz4740_i2c_init_pcm_config(i2s);
+ jz4740_i2s_init_pcm_config(i2s);
snd_soc_dai_init_dma_data(dai, &i2s->playback_dma_data,
&i2s->capture_dma_data);
@@ -525,8 +525,7 @@ static int jz4740_i2s_dev_probe(struct platform_device *pdev)
i2s->soc_info = device_get_match_data(dev);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2s->base = devm_ioremap_resource(dev, mem);
+ i2s->base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(i2s->base))
return PTR_ERR(i2s->base);
diff --git a/sound/soc/jz4740/jz4740-i2s.h b/sound/soc/jz4740/jz4740-i2s.h
index 44f12016064d..4da14eac1145 100644
--- a/sound/soc/jz4740/jz4740-i2s.h
+++ b/sound/soc/jz4740/jz4740-i2s.h
@@ -7,6 +7,4 @@
#define JZ4740_I2S_CLKSRC_EXT 0
#define JZ4740_I2S_CLKSRC_PLL 1
-#define JZ4740_I2S_BIT_CLK 0
-
#endif
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index c2a5933bfcfc..700a18561a94 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -104,8 +104,6 @@ static int kirkwood_dma_open(struct snd_soc_component *component,
int err;
struct snd_pcm_runtime *runtime = substream->runtime;
struct kirkwood_dma_data *priv = kirkwood_priv(substream);
- const struct mbus_dram_target_info *dram;
- unsigned long addr;
snd_soc_set_runtime_hwparams(substream, &kirkwood_dma_snd_hw);
@@ -142,20 +140,14 @@ static int kirkwood_dma_open(struct snd_soc_component *component,
writel((unsigned int)-1, priv->io + KIRKWOOD_ERR_MASK);
}
- dram = mv_mbus_dram_info();
- addr = substream->dma_buffer.addr;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (priv->substream_play)
return -EBUSY;
priv->substream_play = substream;
- kirkwood_dma_conf_mbus_windows(priv->io,
- KIRKWOOD_PLAYBACK_WIN, addr, dram);
} else {
if (priv->substream_rec)
return -EBUSY;
priv->substream_rec = substream;
- kirkwood_dma_conf_mbus_windows(priv->io,
- KIRKWOOD_RECORD_WIN, addr, dram);
}
return 0;
@@ -182,6 +174,23 @@ static int kirkwood_dma_close(struct snd_soc_component *component,
return 0;
}
+static int kirkwood_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct kirkwood_dma_data *priv = kirkwood_priv(substream);
+ const struct mbus_dram_target_info *dram = mv_mbus_dram_info();
+ unsigned long addr = substream->runtime->dma_addr;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ kirkwood_dma_conf_mbus_windows(priv->io,
+ KIRKWOOD_PLAYBACK_WIN, addr, dram);
+ else
+ kirkwood_dma_conf_mbus_windows(priv->io,
+ KIRKWOOD_RECORD_WIN, addr, dram);
+ return 0;
+}
+
static int kirkwood_dma_prepare(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
@@ -246,6 +255,7 @@ const struct snd_soc_component_driver kirkwood_soc_component = {
.name = DRV_NAME,
.open = kirkwood_dma_open,
.close = kirkwood_dma_close,
+ .hw_params = kirkwood_dma_hw_params,
.prepare = kirkwood_dma_prepare,
.pointer = kirkwood_dma_pointer,
.pcm_construct = kirkwood_dma_new,
diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
index f85b5ea180ec..d884bb7c0fc7 100644
--- a/sound/soc/mediatek/common/mtk-btcvsd.c
+++ b/sound/soc/mediatek/common/mtk-btcvsd.c
@@ -1281,7 +1281,7 @@ static const struct snd_soc_component_driver mtk_btcvsd_snd_platform = {
static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
{
- int ret = 0;
+ int ret;
int irq_id;
u32 offset[5] = {0, 0, 0, 0, 0};
struct mtk_btcvsd_snd *btcvsd;
@@ -1337,7 +1337,8 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
btcvsd->bt_sram_bank2_base = of_iomap(dev->of_node, 1);
if (!btcvsd->bt_sram_bank2_base) {
dev_err(dev, "iomap bt_sram_bank2_base fail\n");
- return -EIO;
+ ret = -EIO;
+ goto unmap_pkv_err;
}
btcvsd->infra = syscon_regmap_lookup_by_phandle(dev->of_node,
@@ -1345,7 +1346,8 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
if (IS_ERR(btcvsd->infra)) {
dev_err(dev, "cannot find infra controller: %ld\n",
PTR_ERR(btcvsd->infra));
- return PTR_ERR(btcvsd->infra);
+ ret = PTR_ERR(btcvsd->infra);
+ goto unmap_bank2_err;
}
/* get offset */
@@ -1354,7 +1356,7 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
ARRAY_SIZE(offset));
if (ret) {
dev_warn(dev, "%s(), get offset fail, ret %d\n", __func__, ret);
- return ret;
+ goto unmap_bank2_err;
}
btcvsd->infra_misc_offset = offset[0];
btcvsd->conn_bt_cvsd_mask = offset[1];
@@ -1373,8 +1375,18 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
mtk_btcvsd_snd_set_state(btcvsd, btcvsd->tx, BT_SCO_STATE_IDLE);
mtk_btcvsd_snd_set_state(btcvsd, btcvsd->rx, BT_SCO_STATE_IDLE);
- return devm_snd_soc_register_component(dev, &mtk_btcvsd_snd_platform,
- NULL, 0);
+ ret = devm_snd_soc_register_component(dev, &mtk_btcvsd_snd_platform,
+ NULL, 0);
+ if (ret)
+ goto unmap_bank2_err;
+
+ return 0;
+
+unmap_bank2_err:
+ iounmap(btcvsd->bt_sram_bank2_base);
+unmap_pkv_err:
+ iounmap(btcvsd->bt_pkv_base);
+ return ret;
}
static int mtk_btcvsd_snd_remove(struct platform_device *pdev)
diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-adda.c b/sound/soc/mediatek/mt8183/mt8183-dai-adda.c
index 2b758a18c2ea..5b8a274419ed 100644
--- a/sound/soc/mediatek/mt8183/mt8183-dai-adda.c
+++ b/sound/soc/mediatek/mt8183/mt8183-dai-adda.c
@@ -341,6 +341,7 @@ static int set_mtkaif_rx(struct mtk_base_afe *afe)
case MT8183_MTKAIF_PROTOCOL_1:
regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x31);
regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0, 0x0);
+ break;
default:
break;
}
diff --git a/sound/soc/mediatek/mt8192/mt8192-dai-adda.c b/sound/soc/mediatek/mt8192/mt8192-dai-adda.c
index f040dce85da5..f8c73e8624df 100644
--- a/sound/soc/mediatek/mt8192/mt8192-dai-adda.c
+++ b/sound/soc/mediatek/mt8192/mt8192-dai-adda.c
@@ -413,8 +413,6 @@ static int mtk_adda_pad_top_event(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_PRE_PMU:
if (afe_priv->mtkaif_protocol == MTKAIF_PROTOCOL_2_CLK_P2)
regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x38);
- else if (afe_priv->mtkaif_protocol == MTKAIF_PROTOCOL_2)
- regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x30);
else
regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x30);
break;
diff --git a/sound/soc/meson/g12a-toacodec.c b/sound/soc/meson/g12a-toacodec.c
index 9339fabccb79..1dfee1396843 100644
--- a/sound/soc/meson/g12a-toacodec.c
+++ b/sound/soc/meson/g12a-toacodec.c
@@ -21,17 +21,41 @@
#define TOACODEC_CTRL0 0x0
#define CTRL0_ENABLE_SHIFT 31
-#define CTRL0_DAT_SEL_SHIFT 14
-#define CTRL0_DAT_SEL (0x3 << CTRL0_DAT_SEL_SHIFT)
+#define CTRL0_DAT_SEL_SM1_MSB 19
+#define CTRL0_DAT_SEL_SM1_LSB 18
+#define CTRL0_DAT_SEL_MSB 15
+#define CTRL0_DAT_SEL_LSB 14
+#define CTRL0_LANE_SEL_SM1 16
#define CTRL0_LANE_SEL 12
-#define CTRL0_LRCLK_SEL GENMASK(9, 8)
+#define CTRL0_LRCLK_SEL_SM1_MSB 14
+#define CTRL0_LRCLK_SEL_SM1_LSB 12
+#define CTRL0_LRCLK_SEL_MSB 9
+#define CTRL0_LRCLK_SEL_LSB 8
+#define CTRL0_LRCLK_INV_SM1 BIT(10)
+#define CTRL0_BLK_CAP_INV_SM1 BIT(9)
#define CTRL0_BLK_CAP_INV BIT(7)
+#define CTRL0_BCLK_O_INV_SM1 BIT(8)
#define CTRL0_BCLK_O_INV BIT(6)
-#define CTRL0_BCLK_SEL GENMASK(5, 4)
+#define CTRL0_BCLK_SEL_SM1_MSB 6
+#define CTRL0_BCLK_SEL_MSB 5
+#define CTRL0_BCLK_SEL_LSB 4
#define CTRL0_MCLK_SEL GENMASK(2, 0)
#define TOACODEC_OUT_CHMAX 2
+struct g12a_toacodec {
+ struct regmap_field *field_dat_sel;
+ struct regmap_field *field_lrclk_sel;
+ struct regmap_field *field_bclk_sel;
+};
+
+struct g12a_toacodec_match_data {
+ const struct snd_soc_component_driver *component_drv;
+ struct reg_field field_dat_sel;
+ struct reg_field field_lrclk_sel;
+ struct reg_field field_bclk_sel;
+};
+
static const char * const g12a_toacodec_mux_texts[] = {
"I2S A", "I2S B", "I2S C",
};
@@ -41,29 +65,24 @@ static int g12a_toacodec_mux_put_enum(struct snd_kcontrol *kcontrol,
{
struct snd_soc_component *component =
snd_soc_dapm_kcontrol_component(kcontrol);
+ struct g12a_toacodec *priv = snd_soc_component_get_drvdata(component);
struct snd_soc_dapm_context *dapm =
snd_soc_dapm_kcontrol_dapm(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned int mux, changed;
+ unsigned int mux, reg;
mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
- changed = snd_soc_component_test_bits(component, e->reg,
- CTRL0_DAT_SEL,
- FIELD_PREP(CTRL0_DAT_SEL, mux));
+ regmap_field_read(priv->field_dat_sel, &reg);
- if (!changed)
+ if (mux == reg)
return 0;
/* Force disconnect of the mux while updating */
snd_soc_dapm_mux_update_power(dapm, kcontrol, 0, NULL, NULL);
- snd_soc_component_update_bits(component, e->reg,
- CTRL0_DAT_SEL |
- CTRL0_LRCLK_SEL |
- CTRL0_BCLK_SEL,
- FIELD_PREP(CTRL0_DAT_SEL, mux) |
- FIELD_PREP(CTRL0_LRCLK_SEL, mux) |
- FIELD_PREP(CTRL0_BCLK_SEL, mux));
+ regmap_field_write(priv->field_dat_sel, mux);
+ regmap_field_write(priv->field_lrclk_sel, mux);
+ regmap_field_write(priv->field_bclk_sel, mux);
/*
* FIXME:
@@ -86,7 +105,11 @@ static int g12a_toacodec_mux_put_enum(struct snd_kcontrol *kcontrol,
}
static SOC_ENUM_SINGLE_DECL(g12a_toacodec_mux_enum, TOACODEC_CTRL0,
- CTRL0_DAT_SEL_SHIFT,
+ CTRL0_DAT_SEL_LSB,
+ g12a_toacodec_mux_texts);
+
+static SOC_ENUM_SINGLE_DECL(sm1_toacodec_mux_enum, TOACODEC_CTRL0,
+ CTRL0_DAT_SEL_SM1_LSB,
g12a_toacodec_mux_texts);
static const struct snd_kcontrol_new g12a_toacodec_mux =
@@ -94,6 +117,11 @@ static const struct snd_kcontrol_new g12a_toacodec_mux =
snd_soc_dapm_get_enum_double,
g12a_toacodec_mux_put_enum);
+static const struct snd_kcontrol_new sm1_toacodec_mux =
+ SOC_DAPM_ENUM_EXT("Source", sm1_toacodec_mux_enum,
+ snd_soc_dapm_get_enum_double,
+ g12a_toacodec_mux_put_enum);
+
static const struct snd_kcontrol_new g12a_toacodec_out_enable =
SOC_DAPM_SINGLE_AUTODISABLE("Switch", TOACODEC_CTRL0,
CTRL0_ENABLE_SHIFT, 1, 0);
@@ -105,6 +133,13 @@ static const struct snd_soc_dapm_widget g12a_toacodec_widgets[] = {
&g12a_toacodec_out_enable),
};
+static const struct snd_soc_dapm_widget sm1_toacodec_widgets[] = {
+ SND_SOC_DAPM_MUX("SRC", SND_SOC_NOPM, 0, 0,
+ &sm1_toacodec_mux),
+ SND_SOC_DAPM_SWITCH("OUT EN", SND_SOC_NOPM, 0, 0,
+ &g12a_toacodec_out_enable),
+};
+
static int g12a_toacodec_input_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -175,6 +210,13 @@ static int g12a_toacodec_component_probe(struct snd_soc_component *c)
CTRL0_BLK_CAP_INV);
}
+static int sm1_toacodec_component_probe(struct snd_soc_component *c)
+{
+ /* Initialize the static clock parameters */
+ return snd_soc_component_write(c, TOACODEC_CTRL0,
+ CTRL0_BLK_CAP_INV_SM1);
+}
+
static const struct snd_soc_dapm_route g12a_toacodec_routes[] = {
{ "SRC", "I2S A", "IN A Playback" },
{ "SRC", "I2S B", "IN B Playback" },
@@ -187,6 +229,10 @@ static const struct snd_kcontrol_new g12a_toacodec_controls[] = {
SOC_SINGLE("Lane Select", TOACODEC_CTRL0, CTRL0_LANE_SEL, 3, 0),
};
+static const struct snd_kcontrol_new sm1_toacodec_controls[] = {
+ SOC_SINGLE("Lane Select", TOACODEC_CTRL0, CTRL0_LANE_SEL_SM1, 3, 0),
+};
+
static const struct snd_soc_component_driver g12a_toacodec_component_drv = {
.probe = g12a_toacodec_component_probe,
.controls = g12a_toacodec_controls,
@@ -199,25 +245,72 @@ static const struct snd_soc_component_driver g12a_toacodec_component_drv = {
.non_legacy_dai_naming = 1,
};
+static const struct snd_soc_component_driver sm1_toacodec_component_drv = {
+ .probe = sm1_toacodec_component_probe,
+ .controls = sm1_toacodec_controls,
+ .num_controls = ARRAY_SIZE(sm1_toacodec_controls),
+ .dapm_widgets = sm1_toacodec_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sm1_toacodec_widgets),
+ .dapm_routes = g12a_toacodec_routes,
+ .num_dapm_routes = ARRAY_SIZE(g12a_toacodec_routes),
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
static const struct regmap_config g12a_toacodec_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
+static const struct g12a_toacodec_match_data g12a_toacodec_match_data = {
+ .component_drv = &g12a_toacodec_component_drv,
+ .field_dat_sel = REG_FIELD(TOACODEC_CTRL0, 14, 15),
+ .field_lrclk_sel = REG_FIELD(TOACODEC_CTRL0, 8, 9),
+ .field_bclk_sel = REG_FIELD(TOACODEC_CTRL0, 4, 5),
+};
+
+static const struct g12a_toacodec_match_data sm1_toacodec_match_data = {
+ .component_drv = &sm1_toacodec_component_drv,
+ .field_dat_sel = REG_FIELD(TOACODEC_CTRL0, 18, 19),
+ .field_lrclk_sel = REG_FIELD(TOACODEC_CTRL0, 12, 14),
+ .field_bclk_sel = REG_FIELD(TOACODEC_CTRL0, 4, 6),
+};
+
static const struct of_device_id g12a_toacodec_of_match[] = {
- { .compatible = "amlogic,g12a-toacodec", },
+ {
+ .compatible = "amlogic,g12a-toacodec",
+ .data = &g12a_toacodec_match_data,
+ },
+ {
+ .compatible = "amlogic,sm1-toacodec",
+ .data = &sm1_toacodec_match_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, g12a_toacodec_of_match);
static int g12a_toacodec_probe(struct platform_device *pdev)
{
+ const struct g12a_toacodec_match_data *data;
struct device *dev = &pdev->dev;
+ struct g12a_toacodec *priv;
void __iomem *regs;
struct regmap *map;
int ret;
+ data = device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
ret = device_reset(dev);
if (ret)
return ret;
@@ -233,8 +326,20 @@ static int g12a_toacodec_probe(struct platform_device *pdev)
return PTR_ERR(map);
}
+ priv->field_dat_sel = devm_regmap_field_alloc(dev, map, data->field_dat_sel);
+ if (IS_ERR(priv->field_dat_sel))
+ return PTR_ERR(priv->field_dat_sel);
+
+ priv->field_lrclk_sel = devm_regmap_field_alloc(dev, map, data->field_lrclk_sel);
+ if (IS_ERR(priv->field_lrclk_sel))
+ return PTR_ERR(priv->field_lrclk_sel);
+
+ priv->field_bclk_sel = devm_regmap_field_alloc(dev, map, data->field_bclk_sel);
+ if (IS_ERR(priv->field_bclk_sel))
+ return PTR_ERR(priv->field_bclk_sel);
+
return devm_snd_soc_register_component(dev,
- &g12a_toacodec_component_drv, g12a_toacodec_dai_drv,
+ data->component_drv, g12a_toacodec_dai_drv,
ARRAY_SIZE(g12a_toacodec_dai_drv));
}
diff --git a/sound/soc/meson/meson-card-utils.c b/sound/soc/meson/meson-card-utils.c
index 300ac8be46ef..415cc0046e4b 100644
--- a/sound/soc/meson/meson-card-utils.c
+++ b/sound/soc/meson/meson-card-utils.c
@@ -119,9 +119,9 @@ unsigned int meson_card_parse_daifmt(struct device_node *node,
struct device_node *framemaster = NULL;
unsigned int daifmt;
- daifmt = snd_soc_of_parse_daifmt(node, "",
- &bitclkmaster, &framemaster);
- daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
+ daifmt = snd_soc_daifmt_parse_format(node, NULL);
+
+ snd_soc_daifmt_parse_clock_provider_as_phandle(node, NULL, &bitclkmaster, &framemaster);
/* If no master is provided, default to cpu master */
if (!bitclkmaster || bitclkmaster == cpu_node) {
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index 270986b2f102..08a05f0ecad7 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -134,7 +134,6 @@ static int apq8016_sbc_platform_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct snd_soc_card *card;
struct apq8016_sbc_data *data;
- struct resource *res;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
@@ -151,13 +150,11 @@ static int apq8016_sbc_platform_probe(struct platform_device *pdev)
if (ret)
return ret;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mic-iomux");
- data->mic_iomux = devm_ioremap_resource(dev, res);
+ data->mic_iomux = devm_platform_ioremap_resource_byname(pdev, "mic-iomux");
if (IS_ERR(data->mic_iomux))
return PTR_ERR(data->mic_iomux);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spkr-iomux");
- data->spkr_iomux = devm_ioremap_resource(dev, res);
+ data->spkr_iomux = devm_platform_ioremap_resource_byname(pdev, "spkr-iomux");
if (IS_ERR(data->spkr_iomux))
return PTR_ERR(data->spkr_iomux);
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index a6e95db6b3fb..3bd9eb3cc688 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -29,6 +29,15 @@
#define LPASS_CPU_I2S_SD0_1_2_MASK GENMASK(2, 0)
#define LPASS_CPU_I2S_SD0_1_2_3_MASK GENMASK(3, 0)
+/*
+ * Channel maps for Quad channel playbacks on MI2S Secondary
+ */
+static struct snd_pcm_chmap_elem lpass_quad_chmaps[] = {
+ { .channels = 4,
+ .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_RL,
+ SNDRV_CHMAP_FR, SNDRV_CHMAP_RR } },
+ { }
+};
static int lpass_cpu_init_i2sctl_bitfields(struct device *dev,
struct lpaif_i2sctl *i2sctl, struct regmap *map)
{
@@ -403,6 +412,25 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
};
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
+int lpass_cpu_pcm_new(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai)
+{
+ int ret;
+ struct snd_soc_dai_driver *drv = dai->driver;
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+
+ if (drvdata->mi2s_playback_sd_mode[dai->id] == LPAIF_I2SCTL_MODE_QUAD01) {
+ ret = snd_pcm_add_chmap_ctls(rtd->pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ lpass_quad_chmaps, drv->playback.channels_max, 0,
+ NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lpass_cpu_pcm_new);
+
int asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai *dai)
{
struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
@@ -497,6 +525,8 @@ static bool lpass_cpu_regmap_readable(struct device *dev, unsigned int reg)
return true;
for (i = 0; i < v->irq_ports; ++i) {
+ if (reg == LPAIF_IRQCLEAR_REG(v, i))
+ return true;
if (reg == LPAIF_IRQEN_REG(v, i))
return true;
if (reg == LPAIF_IRQSTAT_REG(v, i))
@@ -538,9 +568,12 @@ static bool lpass_cpu_regmap_volatile(struct device *dev, unsigned int reg)
struct lpass_variant *v = drvdata->variant;
int i;
- for (i = 0; i < v->irq_ports; ++i)
+ for (i = 0; i < v->irq_ports; ++i) {
+ if (reg == LPAIF_IRQCLEAR_REG(v, i))
+ return true;
if (reg == LPAIF_IRQSTAT_REG(v, i))
return true;
+ }
for (i = 0; i < v->rdma_channels; ++i)
if (reg == LPAIF_RDMACURR_REG(v, i))
@@ -839,7 +872,6 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
{
struct lpass_data *drvdata;
struct device_node *dsp_of_node;
- struct resource *res;
struct lpass_variant *variant;
struct device *dev = &pdev->dev;
const struct of_device_id *match;
@@ -865,9 +897,7 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
of_lpass_cpu_parse_dai_data(dev, drvdata);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-lpaif");
-
- drvdata->lpaif = devm_ioremap_resource(dev, res);
+ drvdata->lpaif = devm_platform_ioremap_resource_byname(pdev, "lpass-lpaif");
if (IS_ERR(drvdata->lpaif))
return PTR_ERR(drvdata->lpaif);
@@ -884,9 +914,7 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
}
if (drvdata->hdmi_port_enable) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-hdmiif");
-
- drvdata->hdmiif = devm_ioremap_resource(dev, res);
+ drvdata->hdmiif = devm_platform_ioremap_resource_byname(pdev, "lpass-hdmiif");
if (IS_ERR(drvdata->hdmiif))
return PTR_ERR(drvdata->hdmiif);
@@ -925,6 +953,11 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
PTR_ERR(drvdata->mi2s_bit_clk[dai_id]));
return PTR_ERR(drvdata->mi2s_bit_clk[dai_id]);
}
+ if (drvdata->mi2s_playback_sd_mode[dai_id] ==
+ LPAIF_I2SCTL_MODE_QUAD01) {
+ variant->dai_driver[dai_id].playback.channels_min = 4;
+ variant->dai_driver[dai_id].playback.channels_max = 4;
+ }
}
/* Allocation for i2sctl regmap fields */
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 0df9481ea4c6..f9df76d37858 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -526,7 +526,7 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
return -EINVAL;
}
- ret = regmap_write(map, reg_irqclr, val_irqclr);
+ ret = regmap_update_bits(map, reg_irqclr, val_irqclr, val_irqclr);
if (ret) {
dev_err(soc_runtime->dev, "error writing to irqclear reg: %d\n", ret);
return ret;
@@ -650,10 +650,11 @@ static irqreturn_t lpass_dma_interrupt_handler(
struct lpass_variant *v = drvdata->variant;
irqreturn_t ret = IRQ_NONE;
int rv;
- unsigned int reg = 0, val = 0;
+ unsigned int reg, val, mask;
struct regmap *map;
unsigned int dai_id = cpu_dai->driver->id;
+ mask = LPAIF_IRQ_ALL(chan);
switch (dai_id) {
case LPASS_DP_RX:
map = drvdata->hdmiif_map;
@@ -676,8 +677,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
return -EINVAL;
}
if (interrupts & LPAIF_IRQ_PER(chan)) {
-
- rv = regmap_write(map, reg, LPAIF_IRQ_PER(chan) | val);
+ rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
if (rv) {
dev_err(soc_runtime->dev,
"error writing to irqclear reg: %d\n", rv);
@@ -688,7 +688,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
}
if (interrupts & LPAIF_IRQ_XRUN(chan)) {
- rv = regmap_write(map, reg, LPAIF_IRQ_XRUN(chan) | val);
+ rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
if (rv) {
dev_err(soc_runtime->dev,
"error writing to irqclear reg: %d\n", rv);
@@ -700,7 +700,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
}
if (interrupts & LPAIF_IRQ_ERR(chan)) {
- rv = regmap_write(map, reg, LPAIF_IRQ_ERR(chan) | val);
+ rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
if (rv) {
dev_err(soc_runtime->dev,
"error writing to irqclear reg: %d\n", rv);
diff --git a/sound/soc/qcom/lpass-sc7180.c b/sound/soc/qcom/lpass-sc7180.c
index 8c168d3c589e..77a556b27cf0 100644
--- a/sound/soc/qcom/lpass-sc7180.c
+++ b/sound/soc/qcom/lpass-sc7180.c
@@ -58,6 +58,7 @@ static struct snd_soc_dai_driver sc7180_lpass_cpu_dai_driver[] = {
},
.probe = &asoc_qcom_lpass_cpu_dai_probe,
.ops = &asoc_qcom_lpass_cpu_dai_ops,
+ .pcm_new = lpass_cpu_pcm_new,
}, {
.id = LPASS_DP_RX,
.name = "Hdmi",
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index 7f72214404ba..67ef497166af 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -263,5 +263,7 @@ void asoc_qcom_lpass_cpu_platform_shutdown(struct platform_device *pdev);
int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev);
int asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai *dai);
extern const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops;
+int lpass_cpu_pcm_new(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai);
#endif /* __LPASS_H__ */
diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
index b539af86e8f7..ac8f7324e94b 100644
--- a/sound/soc/qcom/qdsp6/q6afe-dai.c
+++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
@@ -475,6 +475,7 @@ static int q6afe_dai_prepare(struct snd_pcm_substream *substream,
q6afe_slim_port_prepare(dai_data->port[dai->id],
&dai_data->port_config[dai->id].slim);
break;
+ case QUINARY_MI2S_RX ... QUINARY_MI2S_TX:
case PRIMARY_MI2S_RX ... QUATERNARY_MI2S_TX:
rc = q6afe_i2s_port_prepare(dai_data->port[dai->id],
&dai_data->port_config[dai->id].i2s_cfg);
@@ -598,6 +599,7 @@ static const struct snd_soc_dapm_route q6afe_dapm_routes[] = {
{"Secondary MI2S Playback", NULL, "SEC_MI2S_RX"},
{"Tertiary MI2S Playback", NULL, "TERT_MI2S_RX"},
{"Quaternary MI2S Playback", NULL, "QUAT_MI2S_RX"},
+ {"Quinary MI2S Playback", NULL, "QUIN_MI2S_RX"},
{"Primary TDM0 Playback", NULL, "PRIMARY_TDM_RX_0"},
{"Primary TDM1 Playback", NULL, "PRIMARY_TDM_RX_1"},
@@ -693,6 +695,7 @@ static const struct snd_soc_dapm_route q6afe_dapm_routes[] = {
{"PRI_MI2S_TX", NULL, "Primary MI2S Capture"},
{"SEC_MI2S_TX", NULL, "Secondary MI2S Capture"},
{"QUAT_MI2S_TX", NULL, "Quaternary MI2S Capture"},
+ {"QUIN_MI2S_TX", NULL, "Quinary MI2S Capture"},
{"WSA_CODEC_DMA_RX_0 Playback", NULL, "WSA_CODEC_DMA_RX_0"},
{"WSA_CODEC_DMA_TX_0", NULL, "WSA_CODEC_DMA_TX_0 Capture"},
@@ -1190,6 +1193,39 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.ops = &q6i2s_ops,
.probe = msm_dai_q6_dai_probe,
.remove = msm_dai_q6_dai_remove,
+ }, {
+ .playback = {
+ .stream_name = "Quinary MI2S Playback",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .id = QUINARY_MI2S_RX,
+ .name = "QUIN_MI2S_RX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ }, {
+ .capture = {
+ .stream_name = "Quinary MI2S Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .id = QUINARY_MI2S_TX,
+ .name = "QUIN_MI2S_TX",
+ .ops = &q6i2s_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
},
Q6AFE_TDM_PB_DAI("Primary", 0, PRIMARY_TDM_RX_0),
Q6AFE_TDM_PB_DAI("Primary", 1, PRIMARY_TDM_RX_1),
@@ -1349,6 +1385,10 @@ static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUIN_MI2S_RX", NULL,
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("QUIN_MI2S_TX", NULL,
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
@@ -1610,6 +1650,7 @@ static void of_q6afe_parse_dai_data(struct device *dev,
switch (id) {
/* MI2S specific properties */
+ case QUINARY_MI2S_RX ... QUINARY_MI2S_TX:
case PRIMARY_MI2S_RX ... QUATERNARY_MI2S_TX:
priv = &data->priv[id];
ret = of_property_read_variable_u32_array(node,
diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
index 729d27da0447..625724852a7f 100644
--- a/sound/soc/qcom/qdsp6/q6afe.c
+++ b/sound/soc/qcom/qdsp6/q6afe.c
@@ -120,6 +120,8 @@
#define AFE_PORT_ID_TERTIARY_MI2S_TX 0x1005
#define AFE_PORT_ID_QUATERNARY_MI2S_RX 0x1006
#define AFE_PORT_ID_QUATERNARY_MI2S_TX 0x1007
+#define AFE_PORT_ID_QUINARY_MI2S_RX 0x1016
+#define AFE_PORT_ID_QUINARY_MI2S_TX 0x1017
/* Start of the range of port IDs for TDM devices. */
#define AFE_PORT_ID_TDM_PORT_RANGE_START 0x9000
@@ -620,6 +622,10 @@ static struct afe_port_map port_maps[AFE_PORT_MAX] = {
QUATERNARY_MI2S_RX, 1, 1},
[QUATERNARY_MI2S_TX] = { AFE_PORT_ID_QUATERNARY_MI2S_TX,
QUATERNARY_MI2S_TX, 0, 1},
+ [QUINARY_MI2S_RX] = { AFE_PORT_ID_QUINARY_MI2S_RX,
+ QUINARY_MI2S_RX, 1, 1},
+ [QUINARY_MI2S_TX] = { AFE_PORT_ID_QUINARY_MI2S_TX,
+ QUINARY_MI2S_TX, 0, 1},
[PRIMARY_TDM_RX_0] = { AFE_PORT_ID_PRIMARY_TDM_RX,
PRIMARY_TDM_RX_0, 1, 1},
[PRIMARY_TDM_TX_0] = { AFE_PORT_ID_PRIMARY_TDM_TX,
@@ -1596,6 +1602,8 @@ struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id)
case AFE_PORT_ID_TERTIARY_MI2S_TX:
case AFE_PORT_ID_QUATERNARY_MI2S_RX:
case AFE_PORT_ID_QUATERNARY_MI2S_TX:
+ case AFE_PORT_ID_QUINARY_MI2S_RX:
+ case AFE_PORT_ID_QUINARY_MI2S_TX:
cfg_type = AFE_PARAM_ID_I2S_CONFIG;
break;
case AFE_PORT_ID_PRIMARY_TDM_RX ... AFE_PORT_ID_QUINARY_TDM_TX_7:
diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h
index f9a1c04e38c2..30fd77e2f458 100644
--- a/sound/soc/qcom/qdsp6/q6afe.h
+++ b/sound/soc/qcom/qdsp6/q6afe.h
@@ -5,7 +5,7 @@
#include <dt-bindings/sound/qcom,q6afe.h>
-#define AFE_PORT_MAX 127
+#define AFE_PORT_MAX 129
#define MSM_AFE_PORT_TYPE_RX 0
#define MSM_AFE_PORT_TYPE_TX 1
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 9766725c2916..5ff56a735419 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -1169,7 +1169,7 @@ static int q6asm_dai_compr_get_codec_caps(struct snd_soc_component *component,
return 0;
}
-static struct snd_compress_ops q6asm_dai_compress_ops = {
+static const struct snd_compress_ops q6asm_dai_compress_ops = {
.open = q6asm_dai_compr_open,
.free = q6asm_dai_compr_free,
.set_params = q6asm_dai_compr_set_params,
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index 0a6b9433f6ac..3390ebef9549 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -66,6 +66,7 @@
{ mix_name, "PRI_MI2S_TX", "PRI_MI2S_TX" }, \
{ mix_name, "SEC_MI2S_TX", "SEC_MI2S_TX" }, \
{ mix_name, "QUAT_MI2S_TX", "QUAT_MI2S_TX" }, \
+ { mix_name, "QUIN_MI2S_TX", "QUIN_MI2S_TX" }, \
{ mix_name, "TERT_MI2S_TX", "TERT_MI2S_TX" }, \
{ mix_name, "SLIMBUS_0_TX", "SLIMBUS_0_TX" }, \
{ mix_name, "SLIMBUS_1_TX", "SLIMBUS_1_TX" }, \
@@ -140,6 +141,9 @@
SOC_SINGLE_EXT("QUAT_MI2S_TX", QUATERNARY_MI2S_TX, \
id, 1, 0, msm_routing_get_audio_mixer, \
msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("QUIN_MI2S_TX", QUINARY_MI2S_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
SOC_SINGLE_EXT("SLIMBUS_0_TX", SLIMBUS_0_TX, \
id, 1, 0, msm_routing_get_audio_mixer, \
msm_routing_put_audio_mixer), \
@@ -513,6 +517,9 @@ static const struct snd_kcontrol_new secondary_mi2s_rx_mixer_controls[] = {
static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
Q6ROUTING_RX_MIXERS(QUATERNARY_MI2S_RX) };
+static const struct snd_kcontrol_new quinary_mi2s_rx_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(QUINARY_MI2S_RX) };
+
static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = {
Q6ROUTING_RX_MIXERS(TERTIARY_MI2S_RX) };
@@ -752,6 +759,9 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
SND_SOC_DAPM_MIXER("QUAT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
quaternary_mi2s_rx_mixer_controls,
ARRAY_SIZE(quaternary_mi2s_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("QUIN_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ quinary_mi2s_rx_mixer_controls,
+ ARRAY_SIZE(quinary_mi2s_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("TERT_MI2S_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
tertiary_mi2s_rx_mixer_controls,
ARRAY_SIZE(tertiary_mi2s_rx_mixer_controls)),
@@ -941,6 +951,7 @@ static const struct snd_soc_dapm_route intercon[] = {
Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_5_RX Audio Mixer", "SLIMBUS_5_RX"),
Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_6_RX Audio Mixer", "SLIMBUS_6_RX"),
Q6ROUTING_RX_DAPM_ROUTE("QUAT_MI2S_RX Audio Mixer", "QUAT_MI2S_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("QUIN_MI2S_RX Audio Mixer", "QUIN_MI2S_RX"),
Q6ROUTING_RX_DAPM_ROUTE("TERT_MI2S_RX Audio Mixer", "TERT_MI2S_RX"),
Q6ROUTING_RX_DAPM_ROUTE("SEC_MI2S_RX Audio Mixer", "SEC_MI2S_RX"),
Q6ROUTING_RX_DAPM_ROUTE("PRI_MI2S_RX Audio Mixer", "PRI_MI2S_RX"),
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index 153e9b2de0b5..0adfc5708949 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -288,6 +288,14 @@ static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_dai_set_sysclk(codec_dai, 0,
WCD934X_DEFAULT_MCLK_RATE,
SNDRV_PCM_STREAM_PLAYBACK);
+
+ rval = snd_soc_component_set_jack(codec_dai->component,
+ &pdata->jack, NULL);
+ if (rval != 0 && rval != -ENOTSUPP) {
+ dev_warn(card->dev, "Failed to set jack: %d\n", rval);
+ return rval;
+ }
+
}
break;
default:
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 0740764e7f71..c7dc3509bceb 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -618,8 +618,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
return PTR_ERR(i2s->mclk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
index 9295d648624e..38bd603eeb45 100644
--- a/sound/soc/rockchip/rockchip_pdm.c
+++ b/sound/soc/rockchip/rockchip_pdm.c
@@ -495,8 +495,7 @@ static int rockchip_pdm_probe(struct platform_device *pdev)
return PTR_ERR(pdm->reset);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
index ffb4ec306441..73226a46d489 100644
--- a/sound/soc/rockchip/rockchip_spdif.c
+++ b/sound/soc/rockchip/rockchip_spdif.c
@@ -313,8 +313,7 @@ static int rk_spdif_probe(struct platform_device *pdev)
if (IS_ERR(spdif->mclk))
return PTR_ERR(spdif->mclk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index c632842d42eb..309badc97290 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1441,8 +1441,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
}
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr))
return PTR_ERR(priv->addr);
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index bfd76e9cc0ca..4c4dfde0568f 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -512,8 +512,7 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
/* Default is 128fs */
pcm->sclk_per_fs = 128;
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pcm->regs = devm_ioremap_resource(&pdev->dev, mem_res);
+ pcm->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
if (IS_ERR(pcm->regs))
return PTR_ERR(pcm->regs);
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 81f416ac457e..ec1c6f9d76ac 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -208,8 +208,7 @@ static int s3c2412_iis_dev_probe(struct platform_device *pdev)
return -ENXIO;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- s3c2412_i2s.regs = devm_ioremap_resource(&pdev->dev, res);
+ s3c2412_i2s.regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(s3c2412_i2s.regs))
return PTR_ERR(s3c2412_i2s.regs);
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 50c08008aacb..0f46304eaa4f 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -425,8 +425,7 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res);
+ s3c24xx_i2s.regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(s3c24xx_i2s.regs))
return PTR_ERR(s3c24xx_i2s.regs);
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 3c574792231b..cdf3b7f69ba7 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -219,7 +219,7 @@ struct fsi_stream {
u32 bus_option;
/*
- * thse are initialized by fsi_handler_init()
+ * these are initialized by fsi_handler_init()
*/
struct fsi_stream_handler *handler;
struct fsi_priv *priv;
@@ -1694,12 +1694,27 @@ static int fsi_dai_hw_params(struct snd_pcm_substream *substream,
return 0;
}
+/*
+ * Select below from Sound Card, not auto
+ * SND_SOC_DAIFMT_CBC_CFC
+ * SND_SOC_DAIFMT_CBP_CFP
+ */
+static u64 fsi_dai_formats =
+ SND_SOC_POSSIBLE_DAIFMT_I2S |
+ SND_SOC_POSSIBLE_DAIFMT_LEFT_J |
+ SND_SOC_POSSIBLE_DAIFMT_NB_NF |
+ SND_SOC_POSSIBLE_DAIFMT_NB_IF |
+ SND_SOC_POSSIBLE_DAIFMT_IB_NF |
+ SND_SOC_POSSIBLE_DAIFMT_IB_IF;
+
static const struct snd_soc_dai_ops fsi_dai_ops = {
.startup = fsi_dai_startup,
.shutdown = fsi_dai_shutdown,
.trigger = fsi_dai_trigger,
.set_fmt = fsi_dai_set_fmt,
.hw_params = fsi_dai_hw_params,
+ .auto_selectable_formats = &fsi_dai_formats,
+ .num_auto_selectable_formats = 1,
};
/*
diff --git a/sound/soc/sh/rcar/Makefile b/sound/soc/sh/rcar/Makefile
index 5d1ff8ef26f9..d07eccfa3ac2 100644
--- a/sound/soc/sh/rcar/Makefile
+++ b/sound/soc/sh/rcar/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-snd-soc-rcar-objs := core.o gen.o dma.o adg.o ssi.o ssiu.o src.o ctu.o mix.o dvc.o cmd.o
+snd-soc-rcar-objs := core.o gen.o dma.o adg.o ssi.o ssiu.o src.o ctu.o mix.o dvc.o cmd.o debugfs.o
obj-$(CONFIG_SND_SOC_RCAR) += snd-soc-rcar.o
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 0b8ae3eee148..0ebee1ed06a9 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -3,8 +3,8 @@
// Helper routines for R-Car sound ADG.
//
// Copyright (C) 2013 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
-
#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
#include "rsnd.h"
#define CLKA 0
@@ -28,6 +28,7 @@ static struct rsnd_mod_ops adg_ops = {
struct rsnd_adg {
struct clk *clk[CLKMAX];
struct clk *clkout[CLKOUTMAX];
+ struct clk *null_clk;
struct clk_onecell_data onecell;
struct rsnd_mod mod;
int clk_rate[CLKMAX];
@@ -290,7 +291,6 @@ static void rsnd_adg_set_ssi_clk(struct rsnd_mod *ssi_mod, u32 val)
int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
{
struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
- struct clk *clk;
int i;
int sel_table[] = {
[CLKA] = 0x1,
@@ -303,10 +303,9 @@ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
* find suitable clock from
* AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
*/
- for_each_rsnd_clk(clk, adg, i) {
+ for (i = 0; i < CLKMAX; i++)
if (rate == adg->clk_rate[i])
return sel_table[i];
- }
/*
* find divided clock from BRGA/BRGB
@@ -365,48 +364,103 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
{
struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
- struct device *dev = rsnd_priv_to_dev(priv);
struct clk *clk;
int i;
for_each_rsnd_clk(clk, adg, i) {
if (enable) {
- int ret = clk_prepare_enable(clk);
+ clk_prepare_enable(clk);
/*
* We shouldn't use clk_get_rate() under
* atomic context. Let's keep it when
* rsnd_adg_clk_enable() was called
*/
- adg->clk_rate[i] = 0;
- if (ret < 0)
- dev_warn(dev, "can't use clk %d\n", i);
- else
- adg->clk_rate[i] = clk_get_rate(clk);
+ adg->clk_rate[i] = clk_get_rate(clk);
} else {
- if (adg->clk_rate[i])
- clk_disable_unprepare(clk);
- adg->clk_rate[i] = 0;
+ clk_disable_unprepare(clk);
}
}
}
-static void rsnd_adg_get_clkin(struct rsnd_priv *priv,
- struct rsnd_adg *adg)
+static struct clk *rsnd_adg_create_null_clk(struct rsnd_priv *priv,
+ const char * const name,
+ const char *parent)
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct clk *clk;
+
+ clk = clk_register_fixed_rate(dev, name, parent, 0, 0);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "create null clk error\n");
+ return NULL;
+ }
+
+ return clk;
+}
+
+static struct clk *rsnd_adg_null_clk_get(struct rsnd_priv *priv)
+{
+ struct rsnd_adg *adg = priv->adg;
+
+ if (!adg->null_clk) {
+ static const char * const name = "rsnd_adg_null";
+
+ adg->null_clk = rsnd_adg_create_null_clk(priv, name, NULL);
+ }
+
+ return adg->null_clk;
+}
+
+static void rsnd_adg_null_clk_clean(struct rsnd_priv *priv)
{
+ struct rsnd_adg *adg = priv->adg;
+
+ if (adg->null_clk)
+ clk_unregister_fixed_rate(adg->null_clk);
+}
+
+static int rsnd_adg_get_clkin(struct rsnd_priv *priv)
+{
+ struct rsnd_adg *adg = priv->adg;
struct device *dev = rsnd_priv_to_dev(priv);
+ struct clk *clk;
int i;
for (i = 0; i < CLKMAX; i++) {
- struct clk *clk = devm_clk_get(dev, clk_name[i]);
+ clk = devm_clk_get(dev, clk_name[i]);
+
+ if (IS_ERR(clk))
+ clk = rsnd_adg_null_clk_get(priv);
+ if (IS_ERR(clk))
+ goto err;
- adg->clk[i] = IS_ERR(clk) ? NULL : clk;
+ adg->clk[i] = clk;
}
+
+ return 0;
+
+err:
+ dev_err(dev, "adg clock IN get failed\n");
+
+ rsnd_adg_null_clk_clean(priv);
+
+ return -EIO;
}
-static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
- struct rsnd_adg *adg)
+static void rsnd_adg_unregister_clkout(struct rsnd_priv *priv)
{
+ struct rsnd_adg *adg = priv->adg;
+ struct clk *clk;
+ int i;
+
+ for_each_rsnd_clkout(clk, adg, i)
+ clk_unregister_fixed_rate(clk);
+}
+
+static int rsnd_adg_get_clkout(struct rsnd_priv *priv)
+{
+ struct rsnd_adg *adg = priv->adg;
struct clk *clk;
struct device *dev = rsnd_priv_to_dev(priv);
struct device_node *np = dev->of_node;
@@ -446,9 +500,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
req_size = prop->length / sizeof(u32);
if (req_size > REQ_SIZE) {
- dev_err(dev,
- "too many clock-frequency, use top %d\n", REQ_SIZE);
- req_size = REQ_SIZE;
+ dev_err(dev, "too many clock-frequency\n");
+ return -EINVAL;
}
of_property_read_u32_array(np, "clock-frequency", req_rate, req_size);
@@ -529,10 +582,11 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
if (!count) {
clk = clk_register_fixed_rate(dev, clkout_name[CLKOUT],
parent_clk_name, 0, req_rate[0]);
- if (!IS_ERR(clk)) {
- adg->clkout[CLKOUT] = clk;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- }
+ if (IS_ERR(clk))
+ goto err;
+
+ adg->clkout[CLKOUT] = clk;
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
/*
* for clkout0/1/2/3
@@ -542,8 +596,10 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
clk = clk_register_fixed_rate(dev, clkout_name[i],
parent_clk_name, 0,
req_rate[0]);
- if (!IS_ERR(clk))
- adg->clkout[i] = clk;
+ if (IS_ERR(clk))
+ goto err;
+
+ adg->clkout[i] = clk;
}
adg->onecell.clks = adg->clkout;
adg->onecell.clk_num = CLKOUTMAX;
@@ -555,34 +611,61 @@ rsnd_adg_get_clkout_end:
adg->ckr = ckr;
adg->rbga = rbga;
adg->rbgb = rbgb;
+
+ return 0;
+
+err:
+ dev_err(dev, "adg clock OUT get failed\n");
+
+ rsnd_adg_unregister_clkout(priv);
+
+ return -EIO;
}
-#ifdef DEBUG
-static void rsnd_adg_clk_dbg_info(struct rsnd_priv *priv, struct rsnd_adg *adg)
+#if defined(DEBUG) || defined(CONFIG_DEBUG_FS)
+__printf(3, 4)
+static void dbg_msg(struct device *dev, struct seq_file *m,
+ const char *fmt, ...)
{
+ char msg[128];
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(msg, sizeof(msg), fmt, args);
+ va_end(args);
+
+ if (m)
+ seq_puts(m, msg);
+ else
+ dev_dbg(dev, "%s", msg);
+}
+
+void rsnd_adg_clk_dbg_info(struct rsnd_priv *priv, struct seq_file *m)
+{
+ struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
struct device *dev = rsnd_priv_to_dev(priv);
struct clk *clk;
int i;
for_each_rsnd_clk(clk, adg, i)
- dev_dbg(dev, "%s : %pa : %ld\n",
+ dbg_msg(dev, m, "%s : %pa : %ld\n",
clk_name[i], clk, clk_get_rate(clk));
- dev_dbg(dev, "BRGCKR = 0x%08x, BRRA/BRRB = 0x%x/0x%x\n",
+ dbg_msg(dev, m, "BRGCKR = 0x%08x, BRRA/BRRB = 0x%x/0x%x\n",
adg->ckr, adg->rbga, adg->rbgb);
- dev_dbg(dev, "BRGA (for 44100 base) = %d\n", adg->rbga_rate_for_441khz);
- dev_dbg(dev, "BRGB (for 48000 base) = %d\n", adg->rbgb_rate_for_48khz);
+ dbg_msg(dev, m, "BRGA (for 44100 base) = %d\n", adg->rbga_rate_for_441khz);
+ dbg_msg(dev, m, "BRGB (for 48000 base) = %d\n", adg->rbgb_rate_for_48khz);
/*
* Actual CLKOUT will be exchanged in rsnd_adg_ssi_clk_try_start()
* by BRGCKR::BRGCKR_31
*/
for_each_rsnd_clkout(clk, adg, i)
- dev_dbg(dev, "clkout %d : %pa : %ld\n", i,
+ dbg_msg(dev, m, "clkout %d : %pa : %ld\n", i,
clk, clk_get_rate(clk));
}
#else
-#define rsnd_adg_clk_dbg_info(priv, adg)
+#define rsnd_adg_clk_dbg_info(priv, m)
#endif
int rsnd_adg_probe(struct rsnd_priv *priv)
@@ -600,13 +683,18 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
if (ret)
return ret;
- rsnd_adg_get_clkin(priv, adg);
- rsnd_adg_get_clkout(priv, adg);
- rsnd_adg_clk_dbg_info(priv, adg);
-
priv->adg = adg;
+ ret = rsnd_adg_get_clkin(priv);
+ if (ret)
+ return ret;
+
+ ret = rsnd_adg_get_clkout(priv);
+ if (ret)
+ return ret;
+
rsnd_adg_clk_enable(priv);
+ rsnd_adg_clk_dbg_info(priv, NULL);
return 0;
}
@@ -615,15 +703,13 @@ void rsnd_adg_remove(struct rsnd_priv *priv)
{
struct device *dev = rsnd_priv_to_dev(priv);
struct device_node *np = dev->of_node;
- struct rsnd_adg *adg = priv->adg;
- struct clk *clk;
- int i;
- for_each_rsnd_clkout(clk, adg, i)
- if (adg->clkout[i])
- clk_unregister_fixed_rate(adg->clkout[i]);
+ rsnd_adg_unregister_clkout(priv);
of_clk_del_provider(np);
rsnd_adg_clk_disable(priv);
+
+ /* It should be called after rsnd_adg_clk_disable() */
+ rsnd_adg_null_clk_clean(priv);
}
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index 9fdb37c2cbc2..329e6ab1b222 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -114,12 +114,26 @@ static int rsnd_cmd_stop(struct rsnd_mod *mod,
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+static void rsnd_cmd_debug_info(struct seq_file *m,
+ struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod)
+{
+ rsnd_debugfs_mod_reg_show(m, mod, RSND_GEN2_SCU,
+ 0x180 + rsnd_mod_id_raw(mod) * 0x20, 0x30);
+}
+#define DEBUG_INFO .debug_info = rsnd_cmd_debug_info
+#else
+#define DEBUG_INFO
+#endif
+
static struct rsnd_mod_ops rsnd_cmd_ops = {
.name = CMD_NAME,
.init = rsnd_cmd_init,
.start = rsnd_cmd_start,
.stop = rsnd_cmd_stop,
.get_status = rsnd_mod_get_status,
+ DEBUG_INFO
};
static struct rsnd_mod *rsnd_cmd_mod_get(struct rsnd_priv *priv, int id)
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 8696a993c478..5e382b5c9d45 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -90,14 +90,6 @@
*
*/
-/*
- * you can enable below define if you don't need
- * DAI status debug message when debugging
- * see rsnd_dbg_dai_call()
- *
- * #define RSND_DEBUG_NO_DAI_CALL 1
- */
-
#include <linux/pm_runtime.h>
#include "rsnd.h"
@@ -267,8 +259,9 @@ int rsnd_runtime_channel_original_with_params(struct rsnd_dai_stream *io,
*/
if (params)
return params_channels(params);
- else
+ else if (runtime)
return runtime->channels;
+ return 0;
}
int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
@@ -533,16 +526,22 @@ static enum rsnd_mod_type rsnd_mod_sequence[][RSND_MOD_MAX] = {
},
};
-static int rsnd_status_update(u32 *status,
+static int rsnd_status_update(struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod, enum rsnd_mod_type type,
int shift, int add, int timing)
{
+ u32 *status = mod->ops->get_status(mod, io, type);
u32 mask = 0xF << shift;
u8 val = (*status >> shift) & 0xF;
u8 next_val = (val + add) & 0xF;
int func_call = (val == timing);
+ /* no status update */
+ if (add == 0 || shift == 28)
+ return 1;
+
if (next_val == 0xF) /* underflow case */
- func_call = 0;
+ func_call = -1;
else
*status = (*status & ~mask) + (next_val << shift);
@@ -558,19 +557,16 @@ static int rsnd_status_update(u32 *status,
enum rsnd_mod_type *types = rsnd_mod_sequence[is_play]; \
for_each_rsnd_mod_arrays(i, mod, io, types, RSND_MOD_MAX) { \
int tmp = 0; \
- u32 *status = mod->ops->get_status(mod, io, types[i]); \
- int func_call = rsnd_status_update(status, \
+ int func_call = rsnd_status_update(io, mod, types[i], \
__rsnd_mod_shift_##fn, \
__rsnd_mod_add_##fn, \
__rsnd_mod_call_##fn); \
- rsnd_dbg_dai_call(dev, "%s\t0x%08x %s\n", \
- rsnd_mod_name(mod), *status, \
- (func_call && (mod)->ops->fn) ? #fn : ""); \
- if (func_call && (mod)->ops->fn) \
+ if (func_call > 0 && (mod)->ops->fn) \
tmp = (mod)->ops->fn(mod, io, param); \
- if (tmp && (tmp != -EPROBE_DEFER)) \
- dev_err(dev, "%s : %s error %d\n", \
- rsnd_mod_name(mod), #fn, tmp); \
+ if (unlikely(func_call < 0) || \
+ unlikely(tmp && (tmp != -EPROBE_DEFER))) \
+ dev_err(dev, "%s : %s error (%d, %d)\n", \
+ rsnd_mod_name(mod), #fn, tmp, func_call);\
ret |= tmp; \
} \
ret; \
@@ -760,10 +756,10 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
/* set clock master for audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_CBP_CFP:
rdai->clk_master = 0;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBC_CFC:
rdai->clk_master = 1; /* cpu is master */
break;
default:
@@ -1043,6 +1039,31 @@ static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream,
return rsnd_dai_call(prepare, io, priv);
}
+static u64 rsnd_soc_dai_formats[] = {
+ /*
+ * 1st Priority
+ *
+ * Well tested formats.
+ * Select below from Sound Card, not auto
+ * SND_SOC_DAIFMT_CBC_CFC
+ * SND_SOC_DAIFMT_CBP_CFP
+ */
+ SND_SOC_POSSIBLE_DAIFMT_I2S |
+ SND_SOC_POSSIBLE_DAIFMT_RIGHT_J |
+ SND_SOC_POSSIBLE_DAIFMT_LEFT_J |
+ SND_SOC_POSSIBLE_DAIFMT_NB_NF |
+ SND_SOC_POSSIBLE_DAIFMT_NB_IF |
+ SND_SOC_POSSIBLE_DAIFMT_IB_NF |
+ SND_SOC_POSSIBLE_DAIFMT_IB_IF,
+ /*
+ * 2nd Priority
+ *
+ * Supported, but not well tested
+ */
+ SND_SOC_POSSIBLE_DAIFMT_DSP_A |
+ SND_SOC_POSSIBLE_DAIFMT_DSP_B,
+};
+
static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
.startup = rsnd_soc_dai_startup,
.shutdown = rsnd_soc_dai_shutdown,
@@ -1050,6 +1071,8 @@ static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
.set_fmt = rsnd_soc_dai_set_fmt,
.set_tdm_slot = rsnd_soc_set_dai_tdm_slot,
.prepare = rsnd_soc_dai_prepare,
+ .auto_selectable_formats = rsnd_soc_dai_formats,
+ .num_auto_selectable_formats = ARRAY_SIZE(rsnd_soc_dai_formats),
};
static void rsnd_parse_tdm_split_mode(struct rsnd_priv *priv,
@@ -1129,7 +1152,7 @@ static void rsnd_parse_connect_graph(struct rsnd_priv *priv,
of_node_put(remote_node);
}
-void rsnd_parse_connect_common(struct rsnd_dai *rdai,
+void rsnd_parse_connect_common(struct rsnd_dai *rdai, char *name,
struct rsnd_mod* (*mod_get)(struct rsnd_priv *priv, int id),
struct device_node *node,
struct device_node *playback,
@@ -1144,7 +1167,11 @@ void rsnd_parse_connect_common(struct rsnd_dai *rdai,
i = 0;
for_each_child_of_node(node, np) {
- struct rsnd_mod *mod = mod_get(priv, i);
+ struct rsnd_mod *mod;
+
+ i = rsnd_node_fixed_index(np, name, i);
+
+ mod = mod_get(priv, i);
if (np == playback)
rsnd_dai_connect(mod, &rdai->playback, mod->type);
@@ -1156,6 +1183,56 @@ void rsnd_parse_connect_common(struct rsnd_dai *rdai,
of_node_put(node);
}
+int rsnd_node_fixed_index(struct device_node *node, char *name, int idx)
+{
+ char node_name[16];
+
+ /*
+ * rsnd is assuming each device nodes are sequential numbering,
+ * but some of them are not.
+ * This function adjusts index for it.
+ *
+ * ex)
+ * Normal case, special case
+ * ssi-0
+ * ssi-1
+ * ssi-2
+ * ssi-3 ssi-3
+ * ssi-4 ssi-4
+ * ...
+ *
+ * assume Max 64 node
+ */
+ for (; idx < 64; idx++) {
+ snprintf(node_name, sizeof(node_name), "%s-%d", name, idx);
+
+ if (strncmp(node_name, of_node_full_name(node), sizeof(node_name)) == 0)
+ return idx;
+ }
+
+ return -EINVAL;
+}
+
+int rsnd_node_count(struct rsnd_priv *priv, struct device_node *node, char *name)
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct device_node *np;
+ int i;
+
+ i = 0;
+ for_each_child_of_node(node, np) {
+ i = rsnd_node_fixed_index(np, name, i);
+ if (i < 0) {
+ dev_err(dev, "strange node numbering (%s)",
+ of_node_full_name(node));
+ return 0;
+ }
+ i++;
+ }
+
+ return i;
+}
+
static struct device_node *rsnd_dai_of_node(struct rsnd_priv *priv,
int *is_graph)
{
@@ -1388,6 +1465,26 @@ static int rsnd_dai_probe(struct rsnd_priv *priv)
/*
* pcm ops
*/
+static int rsnd_hw_update(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+ struct rsnd_priv *priv = rsnd_io_to_priv(io);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (hw_params)
+ ret = rsnd_dai_call(hw_params, io, substream, hw_params);
+ else
+ ret = rsnd_dai_call(hw_free, io, substream);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
static int rsnd_hw_params(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
@@ -1495,17 +1592,13 @@ static int rsnd_hw_params(struct snd_soc_component *component,
}
}
- return rsnd_dai_call(hw_params, io, substream, hw_params);
+ return rsnd_hw_update(substream, hw_params);
}
static int rsnd_hw_free(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
- struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
- struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
-
- return rsnd_dai_call(hw_free, io, substream);
+ return rsnd_hw_update(substream, NULL);
}
static snd_pcm_uframes_t rsnd_pointer(struct snd_soc_component *component,
@@ -1715,6 +1808,7 @@ int rsnd_kctrl_new(struct rsnd_mod *mod,
*/
static const struct snd_soc_component_driver rsnd_soc_component = {
.name = "rsnd",
+ .probe = rsnd_debugfs_probe,
.hw_params = rsnd_hw_params,
.hw_free = rsnd_hw_free,
.pointer = rsnd_pointer,
diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
index 20eecd088d13..6156445bcb69 100644
--- a/sound/soc/sh/rcar/ctu.c
+++ b/sound/soc/sh/rcar/ctu.c
@@ -275,6 +275,19 @@ static int rsnd_ctu_id_sub(struct rsnd_mod *mod)
return mod->id % 4;
}
+#ifdef CONFIG_DEBUG_FS
+static void rsnd_ctu_debug_info(struct seq_file *m,
+ struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod)
+{
+ rsnd_debugfs_mod_reg_show(m, mod, RSND_GEN2_SCU,
+ 0x500 + rsnd_mod_id_raw(mod) * 0x100, 0x100);
+}
+#define DEBUG_INFO .debug_info = rsnd_ctu_debug_info
+#else
+#define DEBUG_INFO
+#endif
+
static struct rsnd_mod_ops rsnd_ctu_ops = {
.name = CTU_NAME,
.probe = rsnd_ctu_probe_,
@@ -285,6 +298,7 @@ static struct rsnd_mod_ops rsnd_ctu_ops = {
.id = rsnd_ctu_id,
.id_sub = rsnd_ctu_id_sub,
.id_cmd = rsnd_mod_id_raw,
+ DEBUG_INFO
};
struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id)
diff --git a/sound/soc/sh/rcar/debugfs.c b/sound/soc/sh/rcar/debugfs.c
new file mode 100644
index 000000000000..26d3b310b9db
--- /dev/null
+++ b/sound/soc/sh/rcar/debugfs.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// // Renesas R-Car debugfs support
+//
+// Copyright (c) 2021 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+//
+// > mount -t debugfs none /sys/kernel/debug
+// > cd /sys/kernel/debug/asoc/rcar-sound/ec500000.sound/rdai{N}/
+// > cat playback/xxx
+// > cat capture/xxx
+//
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include "rsnd.h"
+
+static int rsnd_debugfs_show(struct seq_file *m, void *v)
+{
+ struct rsnd_dai_stream *io = m->private;
+ struct rsnd_mod *mod = rsnd_io_to_mod_ssi(io);
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ int i;
+
+ /* adg is out of mods */
+ rsnd_adg_clk_dbg_info(priv, m);
+
+ for_each_rsnd_mod(i, mod, io) {
+ u32 *status = mod->ops->get_status(mod, io, mod->type);
+
+ seq_printf(m, "name: %s\n", rsnd_mod_name(mod));
+ seq_printf(m, "status: %08x\n", *status);
+
+ if (mod->ops->debug_info)
+ mod->ops->debug_info(m, io, mod);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rsnd_debugfs);
+
+void rsnd_debugfs_reg_show(struct seq_file *m, phys_addr_t _addr,
+ void __iomem *base, int offset, int size)
+{
+ int i, j;
+
+ for (i = 0; i < size; i += 0x10) {
+ phys_addr_t addr = _addr + offset + i;
+
+ seq_printf(m, "%pa:", &addr);
+ for (j = 0; j < 0x10; j += 0x4)
+ seq_printf(m, " %08x", __raw_readl(base + offset + i + j));
+ seq_puts(m, "\n");
+ }
+}
+
+void rsnd_debugfs_mod_reg_show(struct seq_file *m, struct rsnd_mod *mod,
+ int reg_id, int offset, int size)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+
+ rsnd_debugfs_reg_show(m,
+ rsnd_gen_get_phy_addr(priv, reg_id),
+ rsnd_gen_get_base_addr(priv, reg_id),
+ offset, size);
+}
+
+int rsnd_debugfs_probe(struct snd_soc_component *component)
+{
+ struct rsnd_priv *priv = dev_get_drvdata(component->dev);
+ struct rsnd_dai *rdai;
+ struct dentry *dir;
+ char name[64];
+ int i;
+
+ /* Gen1 is not supported */
+ if (rsnd_is_gen1(priv))
+ return 0;
+
+ for_each_rsnd_dai(rdai, priv, i) {
+ /*
+ * created debugfs will be automatically
+ * removed, nothing to do for _remove.
+ * see
+ * soc_cleanup_component_debugfs()
+ */
+ snprintf(name, sizeof(name), "rdai%d", i);
+ dir = debugfs_create_dir(name, component->debugfs_root);
+
+ debugfs_create_file("playback", 0444, dir, &rdai->playback, &rsnd_debugfs_fops);
+ debugfs_create_file("capture", 0444, dir, &rdai->capture, &rsnd_debugfs_fops);
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 95aa26d62e4f..82d16e037d9a 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -44,7 +44,8 @@ struct rsnd_dma {
};
struct rsnd_dma_ctrl {
- void __iomem *base;
+ void __iomem *ppbase;
+ phys_addr_t ppres;
int dmaen_num;
int dmapp_num;
};
@@ -236,16 +237,18 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
return 0;
}
-struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
- struct rsnd_mod *mod, char *name)
+struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node, char *name,
+ struct rsnd_mod *mod, char *x)
{
struct dma_chan *chan = NULL;
struct device_node *np;
int i = 0;
for_each_child_of_node(of_node, np) {
+ i = rsnd_node_fixed_index(np, name, i);
+
if (i == rsnd_mod_id_raw(mod) && (!chan))
- chan = of_dma_request_slave_channel(np, name);
+ chan = of_dma_request_slave_channel(np, x);
i++;
}
@@ -415,7 +418,7 @@ static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
}
#define rsnd_dmapp_addr(dmac, dma, reg) \
- (dmac->base + 0x20 + reg + \
+ (dmac->ppbase + 0x20 + reg + \
(0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
{
@@ -504,12 +507,31 @@ static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+static void rsnd_dmapp_debug_info(struct seq_file *m,
+ struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+ struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
+ struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
+
+ rsnd_debugfs_reg_show(m, dmac->ppres, dmac->ppbase,
+ 0x20 + 0x10 * dmapp->dmapp_id, 0x10);
+}
+#define DEBUG_INFO .debug_info = rsnd_dmapp_debug_info
+#else
+#define DEBUG_INFO
+#endif
+
static struct rsnd_mod_ops rsnd_dmapp_ops = {
.name = "audmac-pp",
.start = rsnd_dmapp_start,
.stop = rsnd_dmapp_stop,
.quit = rsnd_dmapp_stop,
.get_status = rsnd_mod_get_status,
+ DEBUG_INFO
};
/*
@@ -864,9 +886,10 @@ int rsnd_dma_probe(struct rsnd_priv *priv)
}
dmac->dmapp_num = 0;
- dmac->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(dmac->base))
- return PTR_ERR(dmac->base);
+ dmac->ppres = res->start;
+ dmac->ppbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dmac->ppbase))
+ return PTR_ERR(dmac->ppbase);
priv->dma = dmac;
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 8d91c0eb0880..5137e03a9d7c 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -282,9 +282,22 @@ static struct dma_chan *rsnd_dvc_dma_req(struct rsnd_dai_stream *io,
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
return rsnd_dma_request_channel(rsnd_dvc_of_node(priv),
- mod, "tx");
+ DVC_NAME, mod, "tx");
}
+#ifdef CONFIG_DEBUG_FS
+static void rsnd_dvc_debug_info(struct seq_file *m,
+ struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod)
+{
+ rsnd_debugfs_mod_reg_show(m, mod, RSND_GEN2_SCU,
+ 0xe00 + rsnd_mod_id(mod) * 0x100, 0x60);
+}
+#define DEBUG_INFO .debug_info = rsnd_dvc_debug_info
+#else
+#define DEBUG_INFO
+#endif
+
static struct rsnd_mod_ops rsnd_dvc_ops = {
.name = DVC_NAME,
.dma_req = rsnd_dvc_dma_req,
@@ -293,6 +306,7 @@ static struct rsnd_mod_ops rsnd_dvc_ops = {
.quit = rsnd_dvc_quit,
.pcm_new = rsnd_dvc_pcm_new,
.get_status = rsnd_mod_get_status,
+ DEBUG_INFO
};
struct rsnd_mod *rsnd_dvc_mod_get(struct rsnd_priv *priv, int id)
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 8bd49c8a9517..925565baaa41 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -141,6 +141,15 @@ phys_addr_t rsnd_gen_get_phy_addr(struct rsnd_priv *priv, int reg_id)
return gen->res[reg_id];
}
+#ifdef CONFIG_DEBUG_FS
+void __iomem *rsnd_gen_get_base_addr(struct rsnd_priv *priv, int reg_id)
+{
+ struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+ return gen->base[reg_id];
+}
+#endif
+
#define rsnd_gen_regmap_init(priv, id_size, reg_id, name, conf) \
_rsnd_gen_regmap_init(priv, id_size, reg_id, name, conf, ARRAY_SIZE(conf))
static int _rsnd_gen_regmap_init(struct rsnd_priv *priv,
diff --git a/sound/soc/sh/rcar/mix.c b/sound/soc/sh/rcar/mix.c
index a3e0370f5704..3572c2c5686c 100644
--- a/sound/soc/sh/rcar/mix.c
+++ b/sound/soc/sh/rcar/mix.c
@@ -250,6 +250,19 @@ static int rsnd_mix_pcm_new(struct rsnd_mod *mod,
return ret;
}
+#ifdef CONFIG_DEBUG_FS
+static void rsnd_mix_debug_info(struct seq_file *m,
+ struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod)
+{
+ rsnd_debugfs_mod_reg_show(m, mod, RSND_GEN2_SCU,
+ 0xd00 + rsnd_mod_id(mod) * 0x40, 0x30);
+}
+#define DEBUG_INFO .debug_info = rsnd_mix_debug_info
+#else
+#define DEBUG_INFO
+#endif
+
static struct rsnd_mod_ops rsnd_mix_ops = {
.name = MIX_NAME,
.probe = rsnd_mix_probe_,
@@ -257,6 +270,7 @@ static struct rsnd_mod_ops rsnd_mix_ops = {
.quit = rsnd_mix_quit,
.pcm_new = rsnd_mix_pcm_new,
.get_status = rsnd_mod_get_status,
+ DEBUG_INFO
};
struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id)
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 1255a85151db..6580bab0e229 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -269,8 +269,8 @@ u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
int rsnd_dma_attach(struct rsnd_dai_stream *io,
struct rsnd_mod *mod, struct rsnd_mod **dma_mod);
int rsnd_dma_probe(struct rsnd_priv *priv);
-struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
- struct rsnd_mod *mod, char *name);
+struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node, char *name,
+ struct rsnd_mod *mod, char *x);
/*
* R-Car sound mod
@@ -345,6 +345,11 @@ struct rsnd_mod_ops {
int (*id)(struct rsnd_mod *mod);
int (*id_sub)(struct rsnd_mod *mod);
int (*id_cmd)(struct rsnd_mod *mod);
+
+#ifdef CONFIG_DEBUG_FS
+ void (*debug_info)(struct seq_file *m,
+ struct rsnd_dai_stream *io, struct rsnd_mod *mod);
+#endif
};
struct rsnd_dai_stream;
@@ -359,19 +364,13 @@ struct rsnd_mod {
/*
* status
*
- * 0xH0000CB0
+ * 0xH000DCB0
*
* B 0: init 1: quit
* C 0: start 1: stop
* D 0: hw_params 1: hw_free
*
* H is always called (see __rsnd_mod_call)
- * H 0: probe 1: remove
- * H 0: pcm_new
- * H 0: fallback
- * H 0: pointer
- * H 0: prepare
- * H 0: cleanup
*/
#define __rsnd_mod_shift_init 4
#define __rsnd_mod_shift_quit 4
@@ -392,12 +391,12 @@ struct rsnd_mod {
#define __rsnd_mod_add_remove 0
#define __rsnd_mod_add_prepare 0
#define __rsnd_mod_add_cleanup 0
-#define __rsnd_mod_add_init 1
-#define __rsnd_mod_add_quit -1
-#define __rsnd_mod_add_start 1
-#define __rsnd_mod_add_stop -1
-#define __rsnd_mod_add_hw_params 1
-#define __rsnd_mod_add_hw_free -1
+#define __rsnd_mod_add_init 1 /* needs protect */
+#define __rsnd_mod_add_quit -1 /* needs protect */
+#define __rsnd_mod_add_start 1 /* needs protect */
+#define __rsnd_mod_add_stop -1 /* needs protect */
+#define __rsnd_mod_add_hw_params 1 /* needs protect */
+#define __rsnd_mod_add_hw_free -1 /* needs protect */
#define __rsnd_mod_add_irq 0
#define __rsnd_mod_add_pcm_new 0
#define __rsnd_mod_add_fallback 0
@@ -407,16 +406,16 @@ struct rsnd_mod {
#define __rsnd_mod_call_remove 0
#define __rsnd_mod_call_prepare 0
#define __rsnd_mod_call_cleanup 0
-#define __rsnd_mod_call_init 0
-#define __rsnd_mod_call_quit 1
-#define __rsnd_mod_call_start 0
-#define __rsnd_mod_call_stop 1
+#define __rsnd_mod_call_init 0 /* needs protect */
+#define __rsnd_mod_call_quit 1 /* needs protect */
+#define __rsnd_mod_call_start 0 /* needs protect */
+#define __rsnd_mod_call_stop 1 /* needs protect */
+#define __rsnd_mod_call_hw_params 0 /* needs protect */
+#define __rsnd_mod_call_hw_free 1 /* needs protect */
#define __rsnd_mod_call_irq 0
#define __rsnd_mod_call_pcm_new 0
#define __rsnd_mod_call_fallback 0
-#define __rsnd_mod_call_hw_params 0
#define __rsnd_mod_call_pointer 0
-#define __rsnd_mod_call_hw_free 1
#define rsnd_mod_to_priv(mod) ((mod)->priv)
#define rsnd_mod_power_on(mod) clk_enable((mod)->clk)
@@ -455,11 +454,13 @@ struct rsnd_mod *rsnd_mod_next(int *iterator,
#define for_each_rsnd_mod_array(iterator, pos, io, array) \
for_each_rsnd_mod_arrays(iterator, pos, io, array, ARRAY_SIZE(array))
-void rsnd_parse_connect_common(struct rsnd_dai *rdai,
+void rsnd_parse_connect_common(struct rsnd_dai *rdai, char *name,
struct rsnd_mod* (*mod_get)(struct rsnd_priv *priv, int id),
struct device_node *node,
struct device_node *playback,
struct device_node *capture);
+int rsnd_node_count(struct rsnd_priv *priv, struct device_node *node, char *name);
+int rsnd_node_fixed_index(struct device_node *node, char *name, int idx);
int rsnd_channel_normalization(int chan);
#define rsnd_runtime_channel_original(io) \
@@ -592,6 +593,9 @@ void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
struct rsnd_mod *mod,
enum rsnd_reg reg);
phys_addr_t rsnd_gen_get_phy_addr(struct rsnd_priv *priv, int reg_id);
+#ifdef CONFIG_DEBUG_FS
+void __iomem *rsnd_gen_get_base_addr(struct rsnd_priv *priv, int reg_id);
+#endif
/*
* R-Car ADG
@@ -610,6 +614,7 @@ int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *cmd_mod,
#define rsnd_adg_clk_enable(priv) rsnd_adg_clk_control(priv, 1)
#define rsnd_adg_clk_disable(priv) rsnd_adg_clk_control(priv, 0)
void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable);
+void rsnd_adg_clk_dbg_info(struct rsnd_priv *priv, struct seq_file *m);
/*
* R-Car sound priv
@@ -776,6 +781,7 @@ void rsnd_ssi_remove(struct rsnd_priv *priv);
struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
int rsnd_ssi_use_busif(struct rsnd_dai_stream *io);
u32 rsnd_ssi_multi_secondaries_runtime(struct rsnd_dai_stream *io);
+int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
#define rsnd_ssi_is_pin_sharing(io) \
__rsnd_ssi_is_pin_sharing(rsnd_io_to_mod_ssi(io))
@@ -799,6 +805,7 @@ void rsnd_parse_connect_ssiu(struct rsnd_dai *rdai,
struct device_node *playback,
struct device_node *capture);
#define rsnd_ssiu_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_SSIU)
+bool rsnd_ssiu_busif_err_status_clear(struct rsnd_mod *mod);
/*
* R-Car SRC
@@ -815,7 +822,7 @@ unsigned int rsnd_src_get_rate(struct rsnd_priv *priv,
#define rsnd_src_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_SRC)
#define rsnd_parse_connect_src(rdai, playback, capture) \
- rsnd_parse_connect_common(rdai, rsnd_src_mod_get, \
+ rsnd_parse_connect_common(rdai, "src", rsnd_src_mod_get, \
rsnd_src_of_node(rsnd_rdai_to_priv(rdai)), \
playback, capture)
@@ -827,7 +834,7 @@ void rsnd_ctu_remove(struct rsnd_priv *priv);
struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id);
#define rsnd_ctu_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_CTU)
#define rsnd_parse_connect_ctu(rdai, playback, capture) \
- rsnd_parse_connect_common(rdai, rsnd_ctu_mod_get, \
+ rsnd_parse_connect_common(rdai, "ctu", rsnd_ctu_mod_get, \
rsnd_ctu_of_node(rsnd_rdai_to_priv(rdai)), \
playback, capture)
@@ -839,7 +846,7 @@ void rsnd_mix_remove(struct rsnd_priv *priv);
struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id);
#define rsnd_mix_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_MIX)
#define rsnd_parse_connect_mix(rdai, playback, capture) \
- rsnd_parse_connect_common(rdai, rsnd_mix_mod_get, \
+ rsnd_parse_connect_common(rdai, "mix", rsnd_mix_mod_get, \
rsnd_mix_of_node(rsnd_rdai_to_priv(rdai)), \
playback, capture)
@@ -851,7 +858,7 @@ void rsnd_dvc_remove(struct rsnd_priv *priv);
struct rsnd_mod *rsnd_dvc_mod_get(struct rsnd_priv *priv, int id);
#define rsnd_dvc_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_DVC)
#define rsnd_parse_connect_dvc(rdai, playback, capture) \
- rsnd_parse_connect_common(rdai, rsnd_dvc_mod_get, \
+ rsnd_parse_connect_common(rdai, "dvc", rsnd_dvc_mod_get, \
rsnd_dvc_of_node(rsnd_rdai_to_priv(rdai)), \
playback, capture)
@@ -879,9 +886,10 @@ void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type);
*
* #define RSND_DEBUG_NO_IRQ_STATUS 1
*/
-#define rsnd_dbg_irq_status(dev, param...) \
+#define rsnd_print_irq_status(dev, param...) do { \
if (!IS_BUILTIN(RSND_DEBUG_NO_IRQ_STATUS)) \
- dev_dbg(dev, param)
+ dev_info(dev, param); \
+} while (0)
/*
* If you don't need rsnd_dai_call debug message,
@@ -894,3 +902,14 @@ void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type);
dev_dbg(dev, param)
#endif
+
+#ifdef CONFIG_DEBUG_FS
+int rsnd_debugfs_probe(struct snd_soc_component *component);
+void rsnd_debugfs_reg_show(struct seq_file *m, phys_addr_t _addr,
+ void __iomem *base, int offset, int size);
+void rsnd_debugfs_mod_reg_show(struct seq_file *m, struct rsnd_mod *mod,
+ int reg_id, int offset, int size);
+
+#else
+#define rsnd_debugfs_probe NULL
+#endif
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 628af8f3920d..42a100c6303d 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -17,7 +17,7 @@
/*
* you can enable below define if you don't need
* SSI interrupt status debug message when debugging
- * see rsnd_dbg_irq_status()
+ * see rsnd_print_irq_status()
*
* #define RSND_DEBUG_NO_IRQ_STATUS 1
*/
@@ -82,7 +82,7 @@ static struct dma_chan *rsnd_src_dma_req(struct rsnd_dai_stream *io,
int is_play = rsnd_io_is_play(io);
return rsnd_dma_request_channel(rsnd_src_of_node(priv),
- mod,
+ SRC_NAME, mod,
is_play ? "rx" : "tx");
}
@@ -421,8 +421,8 @@ static bool rsnd_src_error_occurred(struct rsnd_mod *mod)
status0 = rsnd_mod_read(mod, SCU_SYS_STATUS0);
status1 = rsnd_mod_read(mod, SCU_SYS_STATUS1);
if ((status0 & val0) || (status1 & val1)) {
- rsnd_dbg_irq_status(dev, "%s err status : 0x%08x, 0x%08x\n",
- rsnd_mod_name(mod), status0, status1);
+ rsnd_print_irq_status(dev, "%s err status : 0x%08x, 0x%08x\n",
+ rsnd_mod_name(mod), status0, status1);
ret = true;
}
@@ -597,6 +597,25 @@ static int rsnd_src_pcm_new(struct rsnd_mod *mod,
return ret;
}
+#ifdef CONFIG_DEBUG_FS
+static void rsnd_src_debug_info(struct seq_file *m,
+ struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod)
+{
+ rsnd_debugfs_mod_reg_show(m, mod, RSND_GEN2_SCU,
+ rsnd_mod_id(mod) * 0x20, 0x20);
+ seq_puts(m, "\n");
+ rsnd_debugfs_mod_reg_show(m, mod, RSND_GEN2_SCU,
+ 0x1c0, 0x20);
+ seq_puts(m, "\n");
+ rsnd_debugfs_mod_reg_show(m, mod, RSND_GEN2_SCU,
+ 0x200 + rsnd_mod_id(mod) * 0x40, 0x40);
+}
+#define DEBUG_INFO .debug_info = rsnd_src_debug_info
+#else
+#define DEBUG_INFO
+#endif
+
static struct rsnd_mod_ops rsnd_src_ops = {
.name = SRC_NAME,
.dma_req = rsnd_src_dma_req,
@@ -608,6 +627,7 @@ static struct rsnd_mod_ops rsnd_src_ops = {
.irq = rsnd_src_irq,
.pcm_new = rsnd_src_pcm_new,
.get_status = rsnd_mod_get_status,
+ DEBUG_INFO
};
struct rsnd_mod *rsnd_src_mod_get(struct rsnd_priv *priv, int id)
@@ -636,7 +656,7 @@ int rsnd_src_probe(struct rsnd_priv *priv)
if (!node)
return 0; /* not used is not error */
- nr = of_get_child_count(node);
+ nr = rsnd_node_count(priv, node, SRC_NAME);
if (!nr) {
ret = -EINVAL;
goto rsnd_src_probe_done;
@@ -656,6 +676,8 @@ int rsnd_src_probe(struct rsnd_priv *priv)
if (!of_device_is_available(np))
goto skip;
+ i = rsnd_node_fixed_index(np, SRC_NAME, i);
+
src = rsnd_src_get(priv, i);
snprintf(name, RSND_SRC_NAME_SIZE, "%s.%d",
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index e29482c26d6a..27f34ca6059d 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -11,7 +11,7 @@
/*
* you can enable below define if you don't need
* SSI interrupt status debug message when debugging
- * see rsnd_dbg_irq_status()
+ * see rsnd_print_irq_status()
*
* #define RSND_DEBUG_NO_IRQ_STATUS 1
*/
@@ -117,8 +117,6 @@ struct rsnd_ssi {
(rsnd_ssi_run_mods(io) & (1 << rsnd_mod_id(mod)))
#define rsnd_ssi_can_output_clk(mod) (!__rsnd_ssi_is_pin_sharing(mod))
-static int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
-
int rsnd_ssi_use_busif(struct rsnd_dai_stream *io)
{
struct rsnd_mod *mod = rsnd_io_to_mod_ssi(io);
@@ -359,96 +357,6 @@ static void rsnd_ssi_master_clk_stop(struct rsnd_mod *mod,
rsnd_adg_ssi_clk_stop(mod);
}
-/* enable busif buffer over/under run interrupt. */
-#define rsnd_ssi_busif_err_irq_enable(mod) rsnd_ssi_busif_err_irq_ctrl(mod, 1)
-#define rsnd_ssi_busif_err_irq_disable(mod) rsnd_ssi_busif_err_irq_ctrl(mod, 0)
-static void rsnd_ssi_busif_err_irq_ctrl(struct rsnd_mod *mod, int enable)
-{
- u32 sys_int_enable = 0;
- int id = rsnd_mod_id(mod);
- int i;
-
- switch (id) {
- case 0:
- case 1:
- case 2:
- case 3:
- case 4:
- for (i = 0; i < 4; i++) {
- sys_int_enable = rsnd_mod_read(mod, SSI_SYS_INT_ENABLE(i * 2));
- if (enable)
- sys_int_enable |= 0xf << (id * 4);
- else
- sys_int_enable &= ~(0xf << (id * 4));
- rsnd_mod_write(mod,
- SSI_SYS_INT_ENABLE(i * 2),
- sys_int_enable);
- }
- break;
- case 9:
- for (i = 0; i < 4; i++) {
- sys_int_enable = rsnd_mod_read(mod, SSI_SYS_INT_ENABLE((i * 2) + 1));
- if (enable)
- sys_int_enable |= 0xf << 4;
- else
- sys_int_enable &= ~(0xf << 4);
- rsnd_mod_write(mod,
- SSI_SYS_INT_ENABLE((i * 2) + 1),
- sys_int_enable);
- }
- break;
- }
-}
-
-static bool rsnd_ssi_busif_err_status_clear(struct rsnd_mod *mod)
-{
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
- struct device *dev = rsnd_priv_to_dev(priv);
- u32 status;
- bool stop = false;
- int id = rsnd_mod_id(mod);
- int i;
-
- switch (id) {
- case 0:
- case 1:
- case 2:
- case 3:
- case 4:
- for (i = 0; i < 4; i++) {
- status = rsnd_mod_read(mod, SSI_SYS_STATUS(i * 2));
- status &= 0xf << (id * 4);
-
- if (status) {
- rsnd_dbg_irq_status(dev, "%s err status : 0x%08x\n",
- rsnd_mod_name(mod), status);
- rsnd_mod_write(mod,
- SSI_SYS_STATUS(i * 2),
- 0xf << (id * 4));
- stop = true;
- }
- }
- break;
- case 9:
- for (i = 0; i < 4; i++) {
- status = rsnd_mod_read(mod, SSI_SYS_STATUS((i * 2) + 1));
- status &= 0xf << 4;
-
- if (status) {
- rsnd_dbg_irq_status(dev, "%s err status : 0x%08x\n",
- rsnd_mod_name(mod), status);
- rsnd_mod_write(mod,
- SSI_SYS_STATUS((i * 2) + 1),
- 0xf << 4);
- stop = true;
- }
- }
- break;
- }
-
- return stop;
-}
-
static void rsnd_ssi_config_init(struct rsnd_mod *mod,
struct rsnd_dai_stream *io)
{
@@ -536,10 +444,6 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
cr_mode = DIEN; /* PIO : enable Data interrupt */
}
- /* enable busif buffer over/under run interrupt. */
- if (is_tdm || is_tdm_split)
- rsnd_ssi_busif_err_irq_enable(mod);
-
init_end:
ssi->cr_own = cr_own;
ssi->cr_mode = cr_mode;
@@ -594,10 +498,6 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod,
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
struct device *dev = rsnd_priv_to_dev(priv);
- int is_tdm, is_tdm_split;
-
- is_tdm = rsnd_runtime_is_tdm(io);
- is_tdm_split = rsnd_runtime_is_tdm_split(io);
if (!rsnd_ssi_is_run_mods(mod, io))
return 0;
@@ -619,10 +519,6 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod,
ssi->wsr = 0;
}
- /* disable busif buffer over/under run interrupt. */
- if (is_tdm || is_tdm_split)
- rsnd_ssi_busif_err_irq_disable(mod);
-
return 0;
}
@@ -775,10 +671,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
u32 status;
bool elapsed = false;
bool stop = false;
- int is_tdm, is_tdm_split;
-
- is_tdm = rsnd_runtime_is_tdm(io);
- is_tdm_split = rsnd_runtime_is_tdm_split(io);
spin_lock(&priv->lock);
@@ -794,14 +686,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
/* DMA only */
if (is_dma && (status & (UIRQ | OIRQ))) {
- rsnd_dbg_irq_status(dev, "%s err status : 0x%08x\n",
- rsnd_mod_name(mod), status);
+ rsnd_print_irq_status(dev, "%s err status : 0x%08x\n",
+ rsnd_mod_name(mod), status);
stop = true;
}
- if (is_tdm || is_tdm_split)
- stop |= rsnd_ssi_busif_err_status_clear(mod);
+ stop |= rsnd_ssiu_busif_err_status_clear(mod);
rsnd_ssi_status_clear(mod);
rsnd_ssi_interrupt_out:
@@ -1128,8 +1019,36 @@ static struct dma_chan *rsnd_ssi_dma_req(struct rsnd_dai_stream *io,
name = is_play ? "rx" : "tx";
return rsnd_dma_request_channel(rsnd_ssi_of_node(priv),
- mod, name);
+ SSI_NAME, mod, name);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void rsnd_ssi_debug_info(struct seq_file *m,
+ struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod)
+{
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+
+ seq_printf(m, "clock: %s\n", rsnd_rdai_is_clk_master(rdai) ?
+ "provider" : "consumer");
+ seq_printf(m, "bit_clk_inv: %d\n", rdai->bit_clk_inv);
+ seq_printf(m, "frm_clk_inv: %d\n", rdai->frm_clk_inv);
+ seq_printf(m, "pin share: %d\n", __rsnd_ssi_is_pin_sharing(mod));
+ seq_printf(m, "can out clk: %d\n", rsnd_ssi_can_output_clk(mod));
+ seq_printf(m, "multi secondary: %d\n", rsnd_ssi_is_multi_secondary(mod, io));
+ seq_printf(m, "tdm: %d, %d\n", rsnd_runtime_is_tdm(io),
+ rsnd_runtime_is_tdm_split(io));
+ seq_printf(m, "chan: %d\n", ssi->chan);
+ seq_printf(m, "user: %d\n", ssi->usrcnt);
+
+ rsnd_debugfs_mod_reg_show(m, mod, RSND_GEN2_SSI,
+ rsnd_mod_id(mod) * 0x40, 0x40);
}
+#define DEBUG_INFO .debug_info = rsnd_ssi_debug_info
+#else
+#define DEBUG_INFO
+#endif
static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
.name = SSI_NAME,
@@ -1145,9 +1064,10 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
.fallback = rsnd_ssi_fallback,
.hw_params = rsnd_ssi_hw_params,
.get_status = rsnd_ssi_get_status,
+ DEBUG_INFO
};
-static int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)
+int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)
{
return mod->ops == &rsnd_ssi_dma_ops;
}
@@ -1195,7 +1115,11 @@ void rsnd_parse_connect_ssi(struct rsnd_dai *rdai,
i = 0;
for_each_child_of_node(node, np) {
- struct rsnd_mod *mod = rsnd_ssi_mod_get(priv, i);
+ struct rsnd_mod *mod;
+
+ i = rsnd_node_fixed_index(np, SSI_NAME, i);
+
+ mod = rsnd_ssi_mod_get(priv, i);
if (np == playback)
rsnd_ssi_connect(mod, &rdai->playback);
@@ -1238,7 +1162,7 @@ int rsnd_ssi_probe(struct rsnd_priv *priv)
if (!node)
return -EINVAL;
- nr = of_get_child_count(node);
+ nr = rsnd_node_count(priv, node, SSI_NAME);
if (!nr) {
ret = -EINVAL;
goto rsnd_ssi_probe_done;
@@ -1258,6 +1182,8 @@ int rsnd_ssi_probe(struct rsnd_priv *priv)
if (!of_device_is_available(np))
goto skip;
+ i = rsnd_node_fixed_index(np, SSI_NAME, i);
+
ssi = rsnd_ssi_get(priv, i);
snprintf(name, RSND_SSI_NAME_SIZE, "%s.%d",
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 852cdeedf7e9..0d8f97633dd2 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -45,6 +45,85 @@ struct rsnd_ssiu {
static const int gen2_id[] = { 0, 4, 8, 12, 13, 14, 15, 16, 17, 18 };
static const int gen3_id[] = { 0, 8, 16, 24, 32, 40, 41, 42, 43, 44 };
+/* enable busif buffer over/under run interrupt. */
+#define rsnd_ssiu_busif_err_irq_enable(mod) rsnd_ssiu_busif_err_irq_ctrl(mod, 1)
+#define rsnd_ssiu_busif_err_irq_disable(mod) rsnd_ssiu_busif_err_irq_ctrl(mod, 0)
+static void rsnd_ssiu_busif_err_irq_ctrl(struct rsnd_mod *mod, int enable)
+{
+ int id = rsnd_mod_id(mod);
+ int shift, offset;
+ int i;
+
+ switch (id) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ shift = id;
+ offset = 0;
+ break;
+ case 9:
+ shift = 1;
+ offset = 1;
+ break;
+ }
+
+ for (i = 0; i < 4; i++) {
+ enum rsnd_reg reg = SSI_SYS_INT_ENABLE((i * 2) + offset);
+ u32 val = 0xf << (shift * 4);
+ u32 sys_int_enable = rsnd_mod_read(mod, reg);
+
+ if (enable)
+ sys_int_enable |= val;
+ else
+ sys_int_enable &= ~val;
+ rsnd_mod_write(mod, reg, sys_int_enable);
+ }
+}
+
+bool rsnd_ssiu_busif_err_status_clear(struct rsnd_mod *mod)
+{
+ bool error = false;
+ int id = rsnd_mod_id(mod);
+ int shift, offset;
+ int i;
+
+ switch (id) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ shift = id;
+ offset = 0;
+ break;
+ case 9:
+ shift = 1;
+ offset = 1;
+ break;
+ }
+
+ for (i = 0; i < 4; i++) {
+ u32 reg = SSI_SYS_STATUS(i * 2) + offset;
+ u32 status = rsnd_mod_read(mod, reg);
+ u32 val = 0xf << (shift * 4);
+
+ status &= val;
+ if (status) {
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ rsnd_print_irq_status(dev, "%s err status : 0x%08x\n",
+ rsnd_mod_name(mod), status);
+ error = true;
+ }
+ rsnd_mod_write(mod, reg, val);
+ }
+
+ return error;
+}
+
static u32 *rsnd_ssiu_get_status(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
enum rsnd_mod_type type)
@@ -65,23 +144,9 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
int id = rsnd_mod_id(mod);
int is_clk_master = rsnd_rdai_is_clk_master(rdai);
u32 val1, val2;
- int i;
/* clear status */
- switch (id) {
- case 0:
- case 1:
- case 2:
- case 3:
- case 4:
- for (i = 0; i < 4; i++)
- rsnd_mod_write(mod, SSI_SYS_STATUS(i * 2), 0xf << (id * 4));
- break;
- case 9:
- for (i = 0; i < 4; i++)
- rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << 4);
- break;
- }
+ rsnd_ssiu_busif_err_status_clear(mod);
/*
* SSI_MODE0
@@ -137,12 +202,31 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
rsnd_mod_bset(mod, SSI_MODE1, 0x0013001f, val1);
rsnd_mod_bset(mod, SSI_MODE2, 0x00000017, val2);
+ /*
+ * Enable busif buffer over/under run interrupt.
+ * It will be handled from ssi.c
+ * see
+ * __rsnd_ssi_interrupt()
+ */
+ rsnd_ssiu_busif_err_irq_enable(mod);
+
+ return 0;
+}
+
+static int rsnd_ssiu_quit(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
+{
+ /* disable busif buffer over/under run interrupt. */
+ rsnd_ssiu_busif_err_irq_disable(mod);
+
return 0;
}
static struct rsnd_mod_ops rsnd_ssiu_ops_gen1 = {
.name = SSIU_NAME,
.init = rsnd_ssiu_init,
+ .quit = rsnd_ssiu_quit,
.get_status = rsnd_ssiu_get_status,
};
@@ -311,9 +395,22 @@ static struct dma_chan *rsnd_ssiu_dma_req(struct rsnd_dai_stream *io,
name = is_play ? "rx" : "tx";
return rsnd_dma_request_channel(rsnd_ssiu_of_node(priv),
- mod, name);
+ SSIU_NAME, mod, name);
}
+#ifdef CONFIG_DEBUG_FS
+static void rsnd_ssiu_debug_info(struct seq_file *m,
+ struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod)
+{
+ rsnd_debugfs_mod_reg_show(m, mod, RSND_GEN2_SSIU,
+ rsnd_mod_id(mod) * 0x80, 0x80);
+}
+#define DEBUG_INFO .debug_info = rsnd_ssiu_debug_info
+#else
+#define DEBUG_INFO
+#endif
+
static struct rsnd_mod_ops rsnd_ssiu_ops_gen2 = {
.name = SSIU_NAME,
.dma_req = rsnd_ssiu_dma_req,
@@ -321,6 +418,7 @@ static struct rsnd_mod_ops rsnd_ssiu_ops_gen2 = {
.start = rsnd_ssiu_start_gen2,
.stop = rsnd_ssiu_stop_gen2,
.get_status = rsnd_ssiu_get_status,
+ DEBUG_INFO
};
static struct rsnd_mod *rsnd_ssiu_mod_get(struct rsnd_priv *priv, int id)
@@ -336,16 +434,20 @@ static void rsnd_parse_connect_ssiu_compatible(struct rsnd_priv *priv,
{
struct rsnd_mod *ssi_mod = rsnd_io_to_mod_ssi(io);
struct rsnd_ssiu *ssiu;
+ int is_dma_mode;
int i;
if (!ssi_mod)
return;
+ is_dma_mode = rsnd_ssi_is_dma_mode(ssi_mod);
+
/* select BUSIF0 */
for_each_rsnd_ssiu(ssiu, priv, i) {
struct rsnd_mod *mod = rsnd_mod_get(ssiu);
- if ((rsnd_mod_id(ssi_mod) == rsnd_mod_id(mod)) &&
+ if (is_dma_mode &&
+ (rsnd_mod_id(ssi_mod) == rsnd_mod_id(mod)) &&
(rsnd_mod_id_sub(mod) == 0)) {
rsnd_dai_connect(mod, io, mod->type);
return;
@@ -368,7 +470,11 @@ void rsnd_parse_connect_ssiu(struct rsnd_dai *rdai,
int i = 0;
for_each_child_of_node(node, np) {
- struct rsnd_mod *mod = rsnd_ssiu_mod_get(priv, i);
+ struct rsnd_mod *mod;
+
+ i = rsnd_node_fixed_index(np, SSIU_NAME, i);
+
+ mod = rsnd_ssiu_mod_get(priv, i);
if (np == playback)
rsnd_dai_connect(mod, io_p, mod->type);
@@ -405,10 +511,13 @@ int rsnd_ssiu_probe(struct rsnd_priv *priv)
*/
node = rsnd_ssiu_of_node(priv);
if (node)
- nr = of_get_child_count(node);
+ nr = rsnd_node_count(priv, node, SSIU_NAME);
else
nr = priv->ssi_nr;
+ if (!nr)
+ return -EINVAL;
+
ssiu = devm_kcalloc(dev, nr, sizeof(*ssiu), GFP_KERNEL);
if (!ssiu)
return -ENOMEM;
diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
index 3a5e84e16a87..c8dfd0de30e4 100644
--- a/sound/soc/soc-component.c
+++ b/sound/soc/soc-component.c
@@ -148,86 +148,75 @@ int snd_soc_component_set_bias_level(struct snd_soc_component *component,
return soc_component_ret(component, ret);
}
-static int soc_component_pin(struct snd_soc_component *component,
- const char *pin,
- int (*pin_func)(struct snd_soc_dapm_context *dapm,
- const char *pin))
-{
- struct snd_soc_dapm_context *dapm =
- snd_soc_component_get_dapm(component);
- char *full_name;
- int ret;
-
- if (!component->name_prefix) {
- ret = pin_func(dapm, pin);
- goto end;
- }
-
- full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
- if (!full_name) {
- ret = -ENOMEM;
- goto end;
- }
-
- ret = pin_func(dapm, full_name);
- kfree(full_name);
-end:
- return soc_component_ret(component, ret);
-}
-
int snd_soc_component_enable_pin(struct snd_soc_component *component,
const char *pin)
{
- return soc_component_pin(component, pin, snd_soc_dapm_enable_pin);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ return snd_soc_dapm_enable_pin(dapm, pin);
}
EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin);
int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component,
const char *pin)
{
- return soc_component_pin(component, pin, snd_soc_dapm_enable_pin_unlocked);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ return snd_soc_dapm_enable_pin_unlocked(dapm, pin);
}
EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin_unlocked);
int snd_soc_component_disable_pin(struct snd_soc_component *component,
const char *pin)
{
- return soc_component_pin(component, pin, snd_soc_dapm_disable_pin);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ return snd_soc_dapm_disable_pin(dapm, pin);
}
EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin);
int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component,
const char *pin)
{
- return soc_component_pin(component, pin, snd_soc_dapm_disable_pin_unlocked);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ return snd_soc_dapm_disable_pin_unlocked(dapm, pin);
}
EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin_unlocked);
int snd_soc_component_nc_pin(struct snd_soc_component *component,
const char *pin)
{
- return soc_component_pin(component, pin, snd_soc_dapm_nc_pin);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ return snd_soc_dapm_nc_pin(dapm, pin);
}
EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin);
int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component,
const char *pin)
{
- return soc_component_pin(component, pin, snd_soc_dapm_nc_pin_unlocked);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ return snd_soc_dapm_nc_pin_unlocked(dapm, pin);
}
EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin_unlocked);
int snd_soc_component_get_pin_status(struct snd_soc_component *component,
const char *pin)
{
- return soc_component_pin(component, pin, snd_soc_dapm_get_pin_status);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ return snd_soc_dapm_get_pin_status(dapm, pin);
}
EXPORT_SYMBOL_GPL(snd_soc_component_get_pin_status);
int snd_soc_component_force_enable_pin(struct snd_soc_component *component,
const char *pin)
{
- return soc_component_pin(component, pin, snd_soc_dapm_force_enable_pin);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ return snd_soc_dapm_force_enable_pin(dapm, pin);
}
EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin);
@@ -235,7 +224,9 @@ int snd_soc_component_force_enable_pin_unlocked(
struct snd_soc_component *component,
const char *pin)
{
- return soc_component_pin(component, pin, snd_soc_dapm_force_enable_pin_unlocked);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ return snd_soc_dapm_force_enable_pin_unlocked(dapm, pin);
}
EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin_unlocked);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index a76974ccfce1..583f2381cfc8 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -75,9 +75,9 @@ static ssize_t pmdown_time_show(struct device *dev,
return sprintf(buf, "%ld\n", rtd->pmdown_time);
}
-static ssize_t pmdown_time_set(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pmdown_time_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
int ret;
@@ -89,7 +89,7 @@ static ssize_t pmdown_time_set(struct device *dev,
return count;
}
-static DEVICE_ATTR(pmdown_time, 0644, pmdown_time_show, pmdown_time_set);
+static DEVICE_ATTR_RW(pmdown_time);
static struct attribute *soc_dev_attrs[] = {
&dev_attr_pmdown_time.attr,
@@ -580,7 +580,7 @@ int snd_soc_suspend(struct device *dev)
* Due to the resume being scheduled into a workqueue we could
* suspend before that's finished - wait for it to complete.
*/
- snd_power_wait(card->snd_card, SNDRV_CTL_POWER_D0);
+ snd_power_wait(card->snd_card);
/* we're going to block userspace touching us until resume completes */
snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D3hot);
@@ -1054,6 +1054,218 @@ _err_defer:
}
EXPORT_SYMBOL_GPL(snd_soc_add_pcm_runtime);
+static void snd_soc_runtime_get_dai_fmt(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dai_link *dai_link = rtd->dai_link;
+ struct snd_soc_dai *dai, *not_used;
+ struct device *dev = rtd->dev;
+ u64 pos, possible_fmt;
+ unsigned int mask = 0, dai_fmt = 0;
+ int i, j, priority, pri, until;
+
+ /*
+ * Get selectable format from each DAIs.
+ *
+ ****************************
+ * NOTE
+ * Using .auto_selectable_formats is not mandatory,
+ * we can select format manually from Sound Card.
+ * When use it, driver should list well tested format only.
+ ****************************
+ *
+ * ex)
+ * auto_selectable_formats (= SND_SOC_POSSIBLE_xxx)
+ * (A) (B) (C)
+ * DAI0_: { 0x000F, 0x00F0, 0x0F00 };
+ * DAI1 : { 0xF000, 0x0F00 };
+ * (X) (Y)
+ *
+ * "until" will be 3 in this case (MAX array size from DAI0 and DAI1)
+ * Here is dev_dbg() message and comments
+ *
+ * priority = 1
+ * DAI0: (pri, fmt) = (1, 000000000000000F) // 1st check (A) DAI1 is not selected
+ * DAI1: (pri, fmt) = (0, 0000000000000000) // Necessary Waste
+ * DAI0: (pri, fmt) = (1, 000000000000000F) // 2nd check (A)
+ * DAI1: (pri, fmt) = (1, 000000000000F000) // (X)
+ * priority = 2
+ * DAI0: (pri, fmt) = (2, 00000000000000FF) // 3rd check (A) + (B)
+ * DAI1: (pri, fmt) = (1, 000000000000F000) // (X)
+ * DAI0: (pri, fmt) = (2, 00000000000000FF) // 4th check (A) + (B)
+ * DAI1: (pri, fmt) = (2, 000000000000FF00) // (X) + (Y)
+ * priority = 3
+ * DAI0: (pri, fmt) = (3, 0000000000000FFF) // 5th check (A) + (B) + (C)
+ * DAI1: (pri, fmt) = (2, 000000000000FF00) // (X) + (Y)
+ * found auto selected format: 0000000000000F00
+ */
+ until = snd_soc_dai_get_fmt_max_priority(rtd);
+ for (priority = 1; priority <= until; priority++) {
+
+ dev_dbg(dev, "priority = %d\n", priority);
+ for_each_rtd_dais(rtd, j, not_used) {
+
+ possible_fmt = ULLONG_MAX;
+ for_each_rtd_dais(rtd, i, dai) {
+ u64 fmt = 0;
+
+ pri = (j >= i) ? priority : priority - 1;
+ fmt = snd_soc_dai_get_fmt(dai, pri);
+ dev_dbg(dev, "%s: (pri, fmt) = (%d, %016llX)\n", dai->name, pri, fmt);
+ possible_fmt &= fmt;
+ }
+ if (possible_fmt)
+ goto found;
+ }
+ }
+ /* Not Found */
+ return;
+found:
+ dev_dbg(dev, "found auto selected format: %016llX\n", possible_fmt);
+
+ /*
+ * convert POSSIBLE_DAIFMT to DAIFMT
+ *
+ * Some basic/default settings on each is defined as 0.
+ * see
+ * SND_SOC_DAIFMT_NB_NF
+ * SND_SOC_DAIFMT_GATED
+ *
+ * SND_SOC_DAIFMT_xxx_MASK can't notice it if Sound Card specify
+ * these value, and will be overwrite to auto selected value.
+ *
+ * To avoid such issue, loop from 63 to 0 here.
+ * Small number of SND_SOC_POSSIBLE_xxx will be Hi priority.
+ * Basic/Default settings of each part and aboves are defined
+ * as Hi priority (= small number) of SND_SOC_POSSIBLE_xxx.
+ */
+ for (i = 63; i >= 0; i--) {
+ pos = 1ULL << i;
+ switch (possible_fmt & pos) {
+ /*
+ * for format
+ */
+ case SND_SOC_POSSIBLE_DAIFMT_I2S:
+ case SND_SOC_POSSIBLE_DAIFMT_RIGHT_J:
+ case SND_SOC_POSSIBLE_DAIFMT_LEFT_J:
+ case SND_SOC_POSSIBLE_DAIFMT_DSP_A:
+ case SND_SOC_POSSIBLE_DAIFMT_DSP_B:
+ case SND_SOC_POSSIBLE_DAIFMT_AC97:
+ case SND_SOC_POSSIBLE_DAIFMT_PDM:
+ dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_FORMAT_MASK) | i;
+ break;
+ /*
+ * for clock
+ */
+ case SND_SOC_POSSIBLE_DAIFMT_CONT:
+ dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_CLOCK_MASK) | SND_SOC_DAIFMT_CONT;
+ break;
+ case SND_SOC_POSSIBLE_DAIFMT_GATED:
+ dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_CLOCK_MASK) | SND_SOC_DAIFMT_GATED;
+ break;
+ /*
+ * for clock invert
+ */
+ case SND_SOC_POSSIBLE_DAIFMT_NB_NF:
+ dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_INV_MASK) | SND_SOC_DAIFMT_NB_NF;
+ break;
+ case SND_SOC_POSSIBLE_DAIFMT_NB_IF:
+ dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_INV_MASK) | SND_SOC_DAIFMT_NB_IF;
+ break;
+ case SND_SOC_POSSIBLE_DAIFMT_IB_NF:
+ dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_INV_MASK) | SND_SOC_DAIFMT_IB_NF;
+ break;
+ case SND_SOC_POSSIBLE_DAIFMT_IB_IF:
+ dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_INV_MASK) | SND_SOC_DAIFMT_IB_IF;
+ break;
+ /*
+ * for clock provider / consumer
+ */
+ case SND_SOC_POSSIBLE_DAIFMT_CBP_CFP:
+ dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) | SND_SOC_DAIFMT_CBP_CFP;
+ break;
+ case SND_SOC_POSSIBLE_DAIFMT_CBC_CFP:
+ dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) | SND_SOC_DAIFMT_CBC_CFP;
+ break;
+ case SND_SOC_POSSIBLE_DAIFMT_CBP_CFC:
+ dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) | SND_SOC_DAIFMT_CBP_CFC;
+ break;
+ case SND_SOC_POSSIBLE_DAIFMT_CBC_CFC:
+ dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) | SND_SOC_DAIFMT_CBC_CFC;
+ break;
+ }
+ }
+
+ /*
+ * Some driver might have very complex limitation.
+ * In such case, user want to auto-select non-limitation part,
+ * and want to manually specify complex part.
+ *
+ * Or for example, if both CPU and Codec can be clock provider,
+ * but because of its quality, user want to specify it manually.
+ *
+ * Use manually specified settings if sound card did.
+ */
+ if (!(dai_link->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK))
+ mask |= SND_SOC_DAIFMT_FORMAT_MASK;
+ if (!(dai_link->dai_fmt & SND_SOC_DAIFMT_CLOCK_MASK))
+ mask |= SND_SOC_DAIFMT_CLOCK_MASK;
+ if (!(dai_link->dai_fmt & SND_SOC_DAIFMT_INV_MASK))
+ mask |= SND_SOC_DAIFMT_INV_MASK;
+ if (!(dai_link->dai_fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK))
+ mask |= SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK;
+
+ dai_link->dai_fmt |= (dai_fmt & mask);
+}
+
+/**
+ * snd_soc_runtime_set_dai_fmt() - Change DAI link format for a ASoC runtime
+ * @rtd: The runtime for which the DAI link format should be changed
+ * @dai_fmt: The new DAI link format
+ *
+ * This function updates the DAI link format for all DAIs connected to the DAI
+ * link for the specified runtime.
+ *
+ * Note: For setups with a static format set the dai_fmt field in the
+ * corresponding snd_dai_link struct instead of using this function.
+ *
+ * Returns 0 on success, otherwise a negative error code.
+ */
+int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
+ unsigned int dai_fmt)
+{
+ struct snd_soc_dai *cpu_dai;
+ struct snd_soc_dai *codec_dai;
+ unsigned int inv_dai_fmt;
+ unsigned int i;
+ int ret;
+
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
+ ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt);
+ if (ret != 0 && ret != -ENOTSUPP)
+ return ret;
+ }
+
+ /*
+ * Flip the polarity for the "CPU" end of a CODEC<->CODEC link
+ * the component which has non_legacy_dai_naming is Codec
+ */
+ inv_dai_fmt = snd_soc_daifmt_clock_provider_fliped(dai_fmt);
+
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ unsigned int fmt = dai_fmt;
+
+ if (cpu_dai->component->driver->non_legacy_dai_naming)
+ fmt = inv_dai_fmt;
+
+ ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
+ if (ret != 0 && ret != -ENOTSUPP)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_runtime_set_dai_fmt);
+
static int soc_init_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd)
{
@@ -1070,6 +1282,7 @@ static int soc_init_pcm_runtime(struct snd_soc_card *card,
if (ret < 0)
return ret;
+ snd_soc_runtime_get_dai_fmt(rtd);
if (dai_link->dai_fmt) {
ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
if (ret)
@@ -1402,68 +1615,6 @@ static void soc_remove_aux_devices(struct snd_soc_card *card)
}
}
-/**
- * snd_soc_runtime_set_dai_fmt() - Change DAI link format for a ASoC runtime
- * @rtd: The runtime for which the DAI link format should be changed
- * @dai_fmt: The new DAI link format
- *
- * This function updates the DAI link format for all DAIs connected to the DAI
- * link for the specified runtime.
- *
- * Note: For setups with a static format set the dai_fmt field in the
- * corresponding snd_dai_link struct instead of using this function.
- *
- * Returns 0 on success, otherwise a negative error code.
- */
-int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
- unsigned int dai_fmt)
-{
- struct snd_soc_dai *cpu_dai;
- struct snd_soc_dai *codec_dai;
- unsigned int inv_dai_fmt;
- unsigned int i;
- int ret;
-
- for_each_rtd_codec_dais(rtd, i, codec_dai) {
- ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt);
- if (ret != 0 && ret != -ENOTSUPP)
- return ret;
- }
-
- /*
- * Flip the polarity for the "CPU" end of a CODEC<->CODEC link
- * the component which has non_legacy_dai_naming is Codec
- */
- inv_dai_fmt = dai_fmt & ~SND_SOC_DAIFMT_MASTER_MASK;
- switch (dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- inv_dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
- break;
- case SND_SOC_DAIFMT_CBM_CFS:
- inv_dai_fmt |= SND_SOC_DAIFMT_CBS_CFM;
- break;
- case SND_SOC_DAIFMT_CBS_CFM:
- inv_dai_fmt |= SND_SOC_DAIFMT_CBM_CFS;
- break;
- case SND_SOC_DAIFMT_CBS_CFS:
- inv_dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
- break;
- }
- for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
- unsigned int fmt = dai_fmt;
-
- if (cpu_dai->component->driver->non_legacy_dai_naming)
- fmt = inv_dai_fmt;
-
- ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
- if (ret != 0 && ret != -ENOTSUPP)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_runtime_set_dai_fmt);
-
#ifdef CONFIG_DMI
/*
* If a DMI filed contain strings in this blacklist (e.g.
@@ -2793,7 +2944,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
if (!routes) {
dev_err(card->dev,
"ASoC: Could not allocate DAPM route table\n");
- return -EINVAL;
+ return -ENOMEM;
}
for (i = 0; i < num_routes; i++) {
@@ -2853,10 +3004,54 @@ int snd_soc_of_parse_aux_devs(struct snd_soc_card *card, const char *propname)
}
EXPORT_SYMBOL_GPL(snd_soc_of_parse_aux_devs);
-unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
- const char *prefix,
- struct device_node **bitclkmaster,
- struct device_node **framemaster)
+unsigned int snd_soc_daifmt_clock_provider_fliped(unsigned int dai_fmt)
+{
+ unsigned int inv_dai_fmt = dai_fmt & ~SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK;
+
+ switch (dai_fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
+ inv_dai_fmt |= SND_SOC_DAIFMT_CBC_CFC;
+ break;
+ case SND_SOC_DAIFMT_CBP_CFC:
+ inv_dai_fmt |= SND_SOC_DAIFMT_CBC_CFP;
+ break;
+ case SND_SOC_DAIFMT_CBC_CFP:
+ inv_dai_fmt |= SND_SOC_DAIFMT_CBP_CFC;
+ break;
+ case SND_SOC_DAIFMT_CBC_CFC:
+ inv_dai_fmt |= SND_SOC_DAIFMT_CBP_CFP;
+ break;
+ }
+
+ return inv_dai_fmt;
+}
+EXPORT_SYMBOL_GPL(snd_soc_daifmt_clock_provider_fliped);
+
+unsigned int snd_soc_daifmt_clock_provider_from_bitmap(unsigned int bit_frame)
+{
+ /*
+ * bit_frame is return value from
+ * snd_soc_daifmt_parse_clock_provider_raw()
+ */
+
+ /* Codec base */
+ switch (bit_frame) {
+ case 0x11:
+ return SND_SOC_DAIFMT_CBP_CFP;
+ case 0x10:
+ return SND_SOC_DAIFMT_CBP_CFC;
+ case 0x01:
+ return SND_SOC_DAIFMT_CBC_CFP;
+ default:
+ return SND_SOC_DAIFMT_CBC_CFC;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_daifmt_clock_provider_from_bitmap);
+
+unsigned int snd_soc_daifmt_parse_format(struct device_node *np,
+ const char *prefix)
{
int ret, i;
char prop[128];
@@ -2936,10 +3131,24 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
break;
}
+ return format;
+}
+EXPORT_SYMBOL_GPL(snd_soc_daifmt_parse_format);
+
+unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np,
+ const char *prefix,
+ struct device_node **bitclkmaster,
+ struct device_node **framemaster)
+{
+ char prop[128];
+ unsigned int bit, frame;
+
+ if (!prefix)
+ prefix = "";
+
/*
* check "[prefix]bitclock-master"
* check "[prefix]frame-master"
- * SND_SOC_DAIFMT_MASTER_MASK area
*/
snprintf(prop, sizeof(prop), "%sbitclock-master", prefix);
bit = !!of_get_property(np, prop, NULL);
@@ -2951,24 +3160,14 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
if (frame && framemaster)
*framemaster = of_parse_phandle(np, prop, 0);
- switch ((bit << 4) + frame) {
- case 0x11:
- format |= SND_SOC_DAIFMT_CBM_CFM;
- break;
- case 0x10:
- format |= SND_SOC_DAIFMT_CBM_CFS;
- break;
- case 0x01:
- format |= SND_SOC_DAIFMT_CBS_CFM;
- break;
- default:
- format |= SND_SOC_DAIFMT_CBS_CFS;
- break;
- }
-
- return format;
+ /*
+ * return bitmap.
+ * It will be parameter of
+ * snd_soc_daifmt_clock_provider_from_bitmap()
+ */
+ return (bit << 4) + frame;
}
-EXPORT_SYMBOL_GPL(snd_soc_of_parse_daifmt);
+EXPORT_SYMBOL_GPL(snd_soc_daifmt_parse_clock_provider_raw);
int snd_soc_get_dai_id(struct device_node *ep)
{
diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
index 080fbe053fc5..a56dcc8d6fb7 100644
--- a/sound/soc/soc-dai.c
+++ b/sound/soc/soc-dai.c
@@ -134,6 +134,69 @@ int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_bclk_ratio);
+int snd_soc_dai_get_fmt_max_priority(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dai *dai;
+ int i, max = 0;
+
+ /*
+ * return max num if *ALL* DAIs have .auto_selectable_formats
+ */
+ for_each_rtd_dais(rtd, i, dai) {
+ if (dai->driver->ops &&
+ dai->driver->ops->num_auto_selectable_formats)
+ max = max(max, dai->driver->ops->num_auto_selectable_formats);
+ else
+ return 0;
+ }
+
+ return max;
+}
+
+/**
+ * snd_soc_dai_get_fmt - get supported audio format.
+ * @dai: DAI
+ * @priority: priority level of supported audio format.
+ *
+ * This should return only formats implemented with high
+ * quality by the DAI so that the core can configure a
+ * format which will work well with other devices.
+ * For example devices which don't support both edges of the
+ * LRCLK signal in I2S style formats should only list DSP
+ * modes. This will mean that sometimes fewer formats
+ * are reported here than are supported by set_fmt().
+ */
+u64 snd_soc_dai_get_fmt(struct snd_soc_dai *dai, int priority)
+{
+ const struct snd_soc_dai_ops *ops = dai->driver->ops;
+ u64 fmt = 0;
+ int i, max = 0, until = priority;
+
+ /*
+ * Collect auto_selectable_formats until priority
+ *
+ * ex)
+ * auto_selectable_formats[] = { A, B, C };
+ * (A, B, C = SND_SOC_POSSIBLE_DAIFMT_xxx)
+ *
+ * priority = 1 : A
+ * priority = 2 : A | B
+ * priority = 3 : A | B | C
+ * priority = 4 : A | B | C
+ * ...
+ */
+ if (ops)
+ max = ops->num_auto_selectable_formats;
+
+ if (max < until)
+ until = max;
+
+ for (i = 0; i < until; i++)
+ fmt |= ops->auto_selectable_formats[i];
+
+ return fmt;
+}
+
/**
* snd_soc_dai_set_fmt - configure DAI hardware audio format.
* @dai: DAI
@@ -327,14 +390,15 @@ int snd_soc_dai_hw_params(struct snd_soc_dai *dai,
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret = 0;
- /* perform any topology hw_params fixups before DAI */
- ret = snd_soc_link_be_hw_params_fixup(rtd, params);
- if (ret < 0)
- goto end;
-
if (dai->driver->ops &&
- dai->driver->ops->hw_params)
+ dai->driver->ops->hw_params) {
+ /* perform any topology hw_params fixups before DAI */
+ ret = snd_soc_link_be_hw_params_fixup(rtd, params);
+ if (ret < 0)
+ goto end;
+
ret = dai->driver->ops->hw_params(substream, params, dai);
+ }
/* mark substream if succeeded */
if (ret == 0)
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 8659089a87a0..d1c570ca21ea 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1015,6 +1015,7 @@ out:
static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret = -EINVAL, _ret = 0;
int rollback = 0;
@@ -1055,14 +1056,23 @@ start_err:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
- if (ret < 0)
- break;
+ if (rtd->dai_link->stop_dma_first) {
+ ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
+ if (ret < 0)
+ break;
- ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
- if (ret < 0)
- break;
+ ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
+ if (ret < 0)
+ break;
+ } else {
+ ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
+ if (ret < 0)
+ break;
+ ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
+ if (ret < 0)
+ break;
+ }
ret = snd_soc_link_trigger(substream, cmd, rollback);
break;
}
@@ -1700,7 +1710,7 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
struct snd_soc_dpcm *dpcm;
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream);
struct snd_soc_dai *fe_cpu_dai;
- int err;
+ int err = 0;
int i;
/* apply symmetry for FE */
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 4893a56208e0..0a24d0d409d2 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1203,249 +1203,216 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
return ret;
}
-static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
- struct soc_tplg *tplg, int num_kcontrols)
+static int soc_tplg_dapm_widget_dmixer_create(struct soc_tplg *tplg, struct snd_kcontrol_new *kc)
{
- struct snd_kcontrol_new *kc;
struct soc_mixer_control *sm;
struct snd_soc_tplg_mixer_control *mc;
- int i, err;
-
- kc = devm_kcalloc(tplg->dev, num_kcontrols, sizeof(*kc), GFP_KERNEL);
- if (kc == NULL)
- return NULL;
-
- for (i = 0; i < num_kcontrols; i++) {
- mc = (struct snd_soc_tplg_mixer_control *)tplg->pos;
-
- /* validate kcontrol */
- if (strnlen(mc->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
- goto err_sm;
+ int err;
- sm = devm_kzalloc(tplg->dev, sizeof(*sm), GFP_KERNEL);
- if (sm == NULL)
- goto err_sm;
+ mc = (struct snd_soc_tplg_mixer_control *)tplg->pos;
- tplg->pos += (sizeof(struct snd_soc_tplg_mixer_control) +
- le32_to_cpu(mc->priv.size));
+ /* validate kcontrol */
+ if (strnlen(mc->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
+ SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
+ return -EINVAL;
- dev_dbg(tplg->dev, " adding DAPM widget mixer control %s at %d\n",
- mc->hdr.name, i);
+ sm = devm_kzalloc(tplg->dev, sizeof(*sm), GFP_KERNEL);
+ if (!sm)
+ return -ENOMEM;
- kc[i].private_value = (long)sm;
- kc[i].name = devm_kstrdup(tplg->dev, mc->hdr.name, GFP_KERNEL);
- if (kc[i].name == NULL)
- goto err_sm;
- kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
- kc[i].access = le32_to_cpu(mc->hdr.access);
+ tplg->pos += sizeof(struct snd_soc_tplg_mixer_control) +
+ le32_to_cpu(mc->priv.size);
- /* we only support FL/FR channel mapping atm */
- sm->reg = tplc_chan_get_reg(tplg, mc->channel,
- SNDRV_CHMAP_FL);
- sm->rreg = tplc_chan_get_reg(tplg, mc->channel,
- SNDRV_CHMAP_FR);
- sm->shift = tplc_chan_get_shift(tplg, mc->channel,
- SNDRV_CHMAP_FL);
- sm->rshift = tplc_chan_get_shift(tplg, mc->channel,
- SNDRV_CHMAP_FR);
+ dev_dbg(tplg->dev, " adding DAPM widget mixer control %s\n",
+ mc->hdr.name);
- sm->max = le32_to_cpu(mc->max);
- sm->min = le32_to_cpu(mc->min);
- sm->invert = le32_to_cpu(mc->invert);
- sm->platform_max = le32_to_cpu(mc->platform_max);
- sm->dobj.index = tplg->index;
- INIT_LIST_HEAD(&sm->dobj.list);
-
- /* map io handlers */
- err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc[i], tplg);
- if (err) {
- soc_control_err(tplg, &mc->hdr, mc->hdr.name);
- goto err_sm;
- }
+ kc->private_value = (long)sm;
+ kc->name = devm_kstrdup(tplg->dev, mc->hdr.name, GFP_KERNEL);
+ if (!kc->name)
+ return -ENOMEM;
+ kc->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ kc->access = le32_to_cpu(mc->hdr.access);
+
+ /* we only support FL/FR channel mapping atm */
+ sm->reg = tplc_chan_get_reg(tplg, mc->channel,
+ SNDRV_CHMAP_FL);
+ sm->rreg = tplc_chan_get_reg(tplg, mc->channel,
+ SNDRV_CHMAP_FR);
+ sm->shift = tplc_chan_get_shift(tplg, mc->channel,
+ SNDRV_CHMAP_FL);
+ sm->rshift = tplc_chan_get_shift(tplg, mc->channel,
+ SNDRV_CHMAP_FR);
+
+ sm->max = le32_to_cpu(mc->max);
+ sm->min = le32_to_cpu(mc->min);
+ sm->invert = le32_to_cpu(mc->invert);
+ sm->platform_max = le32_to_cpu(mc->platform_max);
+ sm->dobj.index = tplg->index;
+ INIT_LIST_HEAD(&sm->dobj.list);
+
+ /* map io handlers */
+ err = soc_tplg_kcontrol_bind_io(&mc->hdr, kc, tplg);
+ if (err) {
+ soc_control_err(tplg, &mc->hdr, mc->hdr.name);
+ return err;
+ }
- /* create any TLV data */
- err = soc_tplg_create_tlv(tplg, &kc[i], &mc->hdr);
- if (err < 0) {
- dev_err(tplg->dev, "ASoC: failed to create TLV %s\n",
- mc->hdr.name);
- goto err_sm;
- }
+ /* create any TLV data */
+ err = soc_tplg_create_tlv(tplg, kc, &mc->hdr);
+ if (err < 0) {
+ dev_err(tplg->dev, "ASoC: failed to create TLV %s\n",
+ mc->hdr.name);
+ return err;
+ }
- /* pass control to driver for optional further init */
- err = soc_tplg_init_kcontrol(tplg, &kc[i],
- (struct snd_soc_tplg_ctl_hdr *)mc);
- if (err < 0) {
- dev_err(tplg->dev, "ASoC: failed to init %s\n",
- mc->hdr.name);
- goto err_sm;
- }
+ /* pass control to driver for optional further init */
+ err = soc_tplg_init_kcontrol(tplg, kc,
+ (struct snd_soc_tplg_ctl_hdr *)mc);
+ if (err < 0) {
+ dev_err(tplg->dev, "ASoC: failed to init %s\n",
+ mc->hdr.name);
+ return err;
}
- return kc;
-err_sm:
- return NULL;
+ return 0;
}
-static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
- struct soc_tplg *tplg, int num_kcontrols)
+static int soc_tplg_dapm_widget_denum_create(struct soc_tplg *tplg, struct snd_kcontrol_new *kc)
{
- struct snd_kcontrol_new *kc;
struct snd_soc_tplg_enum_control *ec;
struct soc_enum *se;
- int i, err;
-
- kc = devm_kcalloc(tplg->dev, num_kcontrols, sizeof(*kc), GFP_KERNEL);
- if (kc == NULL)
- return NULL;
-
- for (i = 0; i < num_kcontrols; i++) {
- ec = (struct snd_soc_tplg_enum_control *)tplg->pos;
- /* validate kcontrol */
- if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
- goto err_se;
+ int err;
- se = devm_kzalloc(tplg->dev, sizeof(*se), GFP_KERNEL);
- if (se == NULL)
- goto err_se;
+ ec = (struct snd_soc_tplg_enum_control *)tplg->pos;
+ /* validate kcontrol */
+ if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
+ SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
+ return -EINVAL;
- tplg->pos += (sizeof(struct snd_soc_tplg_enum_control) +
- le32_to_cpu(ec->priv.size));
+ se = devm_kzalloc(tplg->dev, sizeof(*se), GFP_KERNEL);
+ if (!se)
+ return -ENOMEM;
- dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n",
- ec->hdr.name);
+ tplg->pos += (sizeof(struct snd_soc_tplg_enum_control) +
+ le32_to_cpu(ec->priv.size));
- kc[i].private_value = (long)se;
- kc[i].name = devm_kstrdup(tplg->dev, ec->hdr.name, GFP_KERNEL);
- if (kc[i].name == NULL)
- goto err_se;
- kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
- kc[i].access = le32_to_cpu(ec->hdr.access);
+ dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n",
+ ec->hdr.name);
- /* we only support FL/FR channel mapping atm */
- se->reg = tplc_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL);
- se->shift_l = tplc_chan_get_shift(tplg, ec->channel,
- SNDRV_CHMAP_FL);
- se->shift_r = tplc_chan_get_shift(tplg, ec->channel,
- SNDRV_CHMAP_FR);
+ kc->private_value = (long)se;
+ kc->name = devm_kstrdup(tplg->dev, ec->hdr.name, GFP_KERNEL);
+ if (!kc->name)
+ return -ENOMEM;
+ kc->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ kc->access = le32_to_cpu(ec->hdr.access);
- se->items = le32_to_cpu(ec->items);
- se->mask = le32_to_cpu(ec->mask);
- se->dobj.index = tplg->index;
+ /* we only support FL/FR channel mapping atm */
+ se->reg = tplc_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL);
+ se->shift_l = tplc_chan_get_shift(tplg, ec->channel,
+ SNDRV_CHMAP_FL);
+ se->shift_r = tplc_chan_get_shift(tplg, ec->channel,
+ SNDRV_CHMAP_FR);
- switch (le32_to_cpu(ec->hdr.ops.info)) {
- case SND_SOC_TPLG_CTL_ENUM_VALUE:
- case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
- err = soc_tplg_denum_create_values(tplg, se, ec);
- if (err < 0) {
- dev_err(tplg->dev, "ASoC: could not create values for %s\n",
- ec->hdr.name);
- goto err_se;
- }
- fallthrough;
- case SND_SOC_TPLG_CTL_ENUM:
- case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
- case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
- err = soc_tplg_denum_create_texts(tplg, se, ec);
- if (err < 0) {
- dev_err(tplg->dev, "ASoC: could not create texts for %s\n",
- ec->hdr.name);
- goto err_se;
- }
- break;
- default:
- dev_err(tplg->dev, "ASoC: invalid enum control type %d for %s\n",
- ec->hdr.ops.info, ec->hdr.name);
- goto err_se;
- }
+ se->items = le32_to_cpu(ec->items);
+ se->mask = le32_to_cpu(ec->mask);
+ se->dobj.index = tplg->index;
- /* map io handlers */
- err = soc_tplg_kcontrol_bind_io(&ec->hdr, &kc[i], tplg);
- if (err) {
- soc_control_err(tplg, &ec->hdr, ec->hdr.name);
- goto err_se;
+ switch (le32_to_cpu(ec->hdr.ops.info)) {
+ case SND_SOC_TPLG_CTL_ENUM_VALUE:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
+ err = soc_tplg_denum_create_values(tplg, se, ec);
+ if (err < 0) {
+ dev_err(tplg->dev, "ASoC: could not create values for %s\n",
+ ec->hdr.name);
+ return err;
}
-
- /* pass control to driver for optional further init */
- err = soc_tplg_init_kcontrol(tplg, &kc[i],
- (struct snd_soc_tplg_ctl_hdr *)ec);
+ fallthrough;
+ case SND_SOC_TPLG_CTL_ENUM:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
+ err = soc_tplg_denum_create_texts(tplg, se, ec);
if (err < 0) {
- dev_err(tplg->dev, "ASoC: failed to init %s\n",
+ dev_err(tplg->dev, "ASoC: could not create texts for %s\n",
ec->hdr.name);
- goto err_se;
+ return err;
}
+ break;
+ default:
+ dev_err(tplg->dev, "ASoC: invalid enum control type %d for %s\n",
+ ec->hdr.ops.info, ec->hdr.name);
+ return -EINVAL;
}
- return kc;
+ /* map io handlers */
+ err = soc_tplg_kcontrol_bind_io(&ec->hdr, kc, tplg);
+ if (err) {
+ soc_control_err(tplg, &ec->hdr, ec->hdr.name);
+ return err;
+ }
-err_se:
- return NULL;
+ /* pass control to driver for optional further init */
+ err = soc_tplg_init_kcontrol(tplg, kc,
+ (struct snd_soc_tplg_ctl_hdr *)ec);
+ if (err < 0) {
+ dev_err(tplg->dev, "ASoC: failed to init %s\n",
+ ec->hdr.name);
+ return err;
+ }
+
+ return 0;
}
-static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create(
- struct soc_tplg *tplg, int num_kcontrols)
+static int soc_tplg_dapm_widget_dbytes_create(struct soc_tplg *tplg, struct snd_kcontrol_new *kc)
{
struct snd_soc_tplg_bytes_control *be;
struct soc_bytes_ext *sbe;
- struct snd_kcontrol_new *kc;
- int i, err;
-
- kc = devm_kcalloc(tplg->dev, num_kcontrols, sizeof(*kc), GFP_KERNEL);
- if (!kc)
- return NULL;
+ int err;
- for (i = 0; i < num_kcontrols; i++) {
- be = (struct snd_soc_tplg_bytes_control *)tplg->pos;
+ be = (struct snd_soc_tplg_bytes_control *)tplg->pos;
- /* validate kcontrol */
- if (strnlen(be->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
- goto err_sbe;
+ /* validate kcontrol */
+ if (strnlen(be->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
+ SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
+ return -EINVAL;
- sbe = devm_kzalloc(tplg->dev, sizeof(*sbe), GFP_KERNEL);
- if (sbe == NULL)
- goto err_sbe;
+ sbe = devm_kzalloc(tplg->dev, sizeof(*sbe), GFP_KERNEL);
+ if (!sbe)
+ return -ENOMEM;
- tplg->pos += (sizeof(struct snd_soc_tplg_bytes_control) +
- le32_to_cpu(be->priv.size));
+ tplg->pos += (sizeof(struct snd_soc_tplg_bytes_control) +
+ le32_to_cpu(be->priv.size));
- dev_dbg(tplg->dev,
- "ASoC: adding bytes kcontrol %s with access 0x%x\n",
- be->hdr.name, be->hdr.access);
+ dev_dbg(tplg->dev,
+ "ASoC: adding bytes kcontrol %s with access 0x%x\n",
+ be->hdr.name, be->hdr.access);
- kc[i].private_value = (long)sbe;
- kc[i].name = devm_kstrdup(tplg->dev, be->hdr.name, GFP_KERNEL);
- if (kc[i].name == NULL)
- goto err_sbe;
- kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
- kc[i].access = le32_to_cpu(be->hdr.access);
+ kc->private_value = (long)sbe;
+ kc->name = devm_kstrdup(tplg->dev, be->hdr.name, GFP_KERNEL);
+ if (!kc->name)
+ return -ENOMEM;
+ kc->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ kc->access = le32_to_cpu(be->hdr.access);
- sbe->max = le32_to_cpu(be->max);
- INIT_LIST_HEAD(&sbe->dobj.list);
+ sbe->max = le32_to_cpu(be->max);
+ INIT_LIST_HEAD(&sbe->dobj.list);
- /* map standard io handlers and check for external handlers */
- err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc[i], tplg);
- if (err) {
- soc_control_err(tplg, &be->hdr, be->hdr.name);
- goto err_sbe;
- }
-
- /* pass control to driver for optional further init */
- err = soc_tplg_init_kcontrol(tplg, &kc[i],
- (struct snd_soc_tplg_ctl_hdr *)be);
- if (err < 0) {
- dev_err(tplg->dev, "ASoC: failed to init %s\n",
- be->hdr.name);
- goto err_sbe;
- }
+ /* map standard io handlers and check for external handlers */
+ err = soc_tplg_kcontrol_bind_io(&be->hdr, kc, tplg);
+ if (err) {
+ soc_control_err(tplg, &be->hdr, be->hdr.name);
+ return err;
}
- return kc;
-
-err_sbe:
+ /* pass control to driver for optional further init */
+ err = soc_tplg_init_kcontrol(tplg, kc,
+ (struct snd_soc_tplg_ctl_hdr *)be);
+ if (err < 0) {
+ dev_err(tplg->dev, "ASoC: failed to init %s\n",
+ be->hdr.name);
+ return err;
+ }
- return NULL;
+ return 0;
}
static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
@@ -1455,8 +1422,13 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
struct snd_soc_dapm_widget template, *widget;
struct snd_soc_tplg_ctl_hdr *control_hdr;
struct snd_soc_card *card = tplg->comp->card;
- unsigned int kcontrol_type;
+ unsigned int *kcontrol_type = NULL;
+ struct snd_kcontrol_new *kc;
+ int mixer_count = 0;
+ int bytes_count = 0;
+ int enum_count = 0;
int ret = 0;
+ int i;
if (strnlen(w->name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
@@ -1499,7 +1471,6 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
le32_to_cpu(w->priv.size));
if (w->num_kcontrols == 0) {
- kcontrol_type = 0;
template.num_kcontrols = 0;
goto widget;
}
@@ -1508,57 +1479,66 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
dev_dbg(tplg->dev, "ASoC: template %s has %d controls of type %x\n",
w->name, w->num_kcontrols, control_hdr->type);
- switch (le32_to_cpu(control_hdr->ops.info)) {
- case SND_SOC_TPLG_CTL_VOLSW:
- case SND_SOC_TPLG_CTL_STROBE:
- case SND_SOC_TPLG_CTL_VOLSW_SX:
- case SND_SOC_TPLG_CTL_VOLSW_XR_SX:
- case SND_SOC_TPLG_CTL_RANGE:
- case SND_SOC_TPLG_DAPM_CTL_VOLSW:
- kcontrol_type = SND_SOC_TPLG_TYPE_MIXER; /* volume mixer */
- template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
- template.kcontrol_news =
- soc_tplg_dapm_widget_dmixer_create(tplg,
- template.num_kcontrols);
- if (!template.kcontrol_news) {
- ret = -ENOMEM;
- goto hdr_err;
- }
- break;
- case SND_SOC_TPLG_CTL_ENUM:
- case SND_SOC_TPLG_CTL_ENUM_VALUE:
- case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
- case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
- case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
- kcontrol_type = SND_SOC_TPLG_TYPE_ENUM; /* enumerated mixer */
- template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
- template.kcontrol_news =
- soc_tplg_dapm_widget_denum_create(tplg,
- template.num_kcontrols);
- if (!template.kcontrol_news) {
- ret = -ENOMEM;
- goto hdr_err;
- }
- break;
- case SND_SOC_TPLG_CTL_BYTES:
- kcontrol_type = SND_SOC_TPLG_TYPE_BYTES; /* bytes control */
- template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
- template.kcontrol_news =
- soc_tplg_dapm_widget_dbytes_create(tplg,
- template.num_kcontrols);
- if (!template.kcontrol_news) {
- ret = -ENOMEM;
+ template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
+ kc = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(*kc), GFP_KERNEL);
+ if (!kc)
+ goto err;
+
+ kcontrol_type = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!kcontrol_type)
+ goto err;
+
+ for (i = 0; i < w->num_kcontrols; i++) {
+ control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
+ switch (le32_to_cpu(control_hdr->ops.info)) {
+ case SND_SOC_TPLG_CTL_VOLSW:
+ case SND_SOC_TPLG_CTL_STROBE:
+ case SND_SOC_TPLG_CTL_VOLSW_SX:
+ case SND_SOC_TPLG_CTL_VOLSW_XR_SX:
+ case SND_SOC_TPLG_CTL_RANGE:
+ case SND_SOC_TPLG_DAPM_CTL_VOLSW:
+ /* volume mixer */
+ kc[i].index = mixer_count;
+ kcontrol_type[i] = SND_SOC_TPLG_TYPE_MIXER;
+ mixer_count++;
+ ret = soc_tplg_dapm_widget_dmixer_create(tplg, &kc[i]);
+ if (ret < 0)
+ goto hdr_err;
+ break;
+ case SND_SOC_TPLG_CTL_ENUM:
+ case SND_SOC_TPLG_CTL_ENUM_VALUE:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
+ /* enumerated mixer */
+ kc[i].index = enum_count;
+ kcontrol_type[i] = SND_SOC_TPLG_TYPE_ENUM;
+ enum_count++;
+ ret = soc_tplg_dapm_widget_denum_create(tplg, &kc[i]);
+ if (ret < 0)
+ goto hdr_err;
+ break;
+ case SND_SOC_TPLG_CTL_BYTES:
+ /* bytes control */
+ kc[i].index = bytes_count;
+ kcontrol_type[i] = SND_SOC_TPLG_TYPE_BYTES;
+ bytes_count++;
+ ret = soc_tplg_dapm_widget_dbytes_create(tplg, &kc[i]);
+ if (ret < 0)
+ goto hdr_err;
+ break;
+ default:
+ dev_err(tplg->dev, "ASoC: invalid widget control type %d:%d:%d\n",
+ control_hdr->ops.get, control_hdr->ops.put,
+ le32_to_cpu(control_hdr->ops.info));
+ ret = -EINVAL;
goto hdr_err;
}
- break;
- default:
- dev_err(tplg->dev, "ASoC: invalid widget control type %d:%d:%d\n",
- control_hdr->ops.get, control_hdr->ops.put,
- le32_to_cpu(control_hdr->ops.info));
- ret = -EINVAL;
- goto hdr_err;
}
+ template.kcontrol_news = kc;
+
widget:
ret = soc_tplg_widget_load(tplg, &template, w);
if (ret < 0)
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 98383fd76224..299b5d6ebfd1 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -97,6 +97,34 @@ static const struct snd_soc_component_driver dummy_codec = {
SNDRV_PCM_FMTBIT_S32_LE | \
SNDRV_PCM_FMTBIT_U32_LE | \
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
+
+/*
+ * Select these from Sound Card Manually
+ * SND_SOC_POSSIBLE_DAIFMT_CBP_CFP
+ * SND_SOC_POSSIBLE_DAIFMT_CBP_CFC
+ * SND_SOC_POSSIBLE_DAIFMT_CBC_CFP
+ * SND_SOC_POSSIBLE_DAIFMT_CBC_CFC
+ */
+static u64 dummy_dai_formats =
+ SND_SOC_POSSIBLE_DAIFMT_I2S |
+ SND_SOC_POSSIBLE_DAIFMT_RIGHT_J |
+ SND_SOC_POSSIBLE_DAIFMT_LEFT_J |
+ SND_SOC_POSSIBLE_DAIFMT_DSP_A |
+ SND_SOC_POSSIBLE_DAIFMT_DSP_B |
+ SND_SOC_POSSIBLE_DAIFMT_AC97 |
+ SND_SOC_POSSIBLE_DAIFMT_PDM |
+ SND_SOC_POSSIBLE_DAIFMT_GATED |
+ SND_SOC_POSSIBLE_DAIFMT_CONT |
+ SND_SOC_POSSIBLE_DAIFMT_NB_NF |
+ SND_SOC_POSSIBLE_DAIFMT_NB_IF |
+ SND_SOC_POSSIBLE_DAIFMT_IB_NF |
+ SND_SOC_POSSIBLE_DAIFMT_IB_IF;
+
+static const struct snd_soc_dai_ops dummy_dai_ops = {
+ .auto_selectable_formats = &dummy_dai_formats,
+ .num_auto_selectable_formats = 1,
+};
+
/*
* The dummy CODEC is only meant to be used in situations where there is no
* actual hardware.
@@ -122,6 +150,7 @@ static struct snd_soc_dai_driver dummy_dai = {
.rates = STUB_RATES,
.formats = STUB_FORMATS,
},
+ .ops = &dummy_dai_ops,
};
int snd_soc_dai_is_dummy(struct snd_soc_dai *dai)
diff --git a/sound/soc/sof/compress.c b/sound/soc/sof/compress.c
index 2d4969c705a4..57d5bf0a171e 100644
--- a/sound/soc/sof/compress.c
+++ b/sound/soc/sof/compress.c
@@ -13,7 +13,7 @@
#include "ops.h"
#include "probe.h"
-struct snd_compress_ops sof_probe_compressed_ops = {
+const struct snd_compress_ops sof_probe_compressed_ops = {
.copy = sof_probe_compr_copy,
};
EXPORT_SYMBOL(sof_probe_compressed_ops);
diff --git a/sound/soc/sof/compress.h b/sound/soc/sof/compress.h
index ca8790bd4b13..4448c799e14b 100644
--- a/sound/soc/sof/compress.h
+++ b/sound/soc/sof/compress.h
@@ -13,7 +13,7 @@
#include <sound/compress_driver.h>
-extern struct snd_compress_ops sof_probe_compressed_ops;
+extern const struct snd_compress_ops sof_probe_compressed_ops;
int sof_probe_compr_open(struct snd_compr_stream *cstream,
struct snd_soc_dai *dai);
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
index 4e7dccadd7d0..12fedf0984bd 100644
--- a/sound/soc/sof/imx/imx8.c
+++ b/sound/soc/sof/imx/imx8.c
@@ -315,6 +315,7 @@ static int imx8_probe(struct snd_sof_dev *sdev)
}
ret = of_address_to_resource(res_node, 0, &res);
+ of_node_put(res_node);
if (ret) {
dev_err(&pdev->dev, "failed to get reserved region address\n");
goto exit_pdev_unregister;
diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
index 4bce89b5ea40..4447f515e8b1 100644
--- a/sound/soc/sof/intel/Kconfig
+++ b/sound/soc/sof/intel/Kconfig
@@ -278,6 +278,8 @@ config SND_SOC_SOF_HDA
config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
tristate
+ select SOUNDWIRE_INTEL if SND_SOC_SOF_INTEL_SOUNDWIRE
+ select SND_INTEL_SOUNDWIRE_ACPI if SND_SOC_SOF_INTEL_SOUNDWIRE
config SND_SOC_SOF_INTEL_SOUNDWIRE
tristate "SOF support for SoundWire"
@@ -285,8 +287,6 @@ config SND_SOC_SOF_INTEL_SOUNDWIRE
depends on SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
depends on ACPI && SOUNDWIRE
depends on !(SOUNDWIRE=m && SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=y)
- select SOUNDWIRE_INTEL
- select SND_INTEL_SOUNDWIRE_ACPI
help
This adds support for SoundWire with Sound Open Firmware
for Intel(R) platforms.
diff --git a/sound/soc/sof/intel/Makefile b/sound/soc/sof/intel/Makefile
index f3d6f7070fb3..feae487f0227 100644
--- a/sound/soc/sof/intel/Makefile
+++ b/sound/soc/sof/intel/Makefile
@@ -13,7 +13,10 @@ snd-sof-intel-hda-common-$(CONFIG_SND_SOC_SOF_HDA_PROBES) += hda-compress.o
snd-sof-intel-hda-objs := hda-codec.o
-obj-$(CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP) += snd-sof-acpi-intel-byt.o
+snd-sof-intel-atom-objs := atom.o
+
+obj-$(CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP) += snd-sof-intel-atom.o
+obj-$(CONFIG_SND_SOC_SOF_BAYTRAIL) += snd-sof-acpi-intel-byt.o
obj-$(CONFIG_SND_SOC_SOF_BROADWELL) += snd-sof-acpi-intel-bdw.o
obj-$(CONFIG_SND_SOC_SOF_INTEL_HIFI_EP_IPC) += snd-sof-intel-ipc.o
obj-$(CONFIG_SND_SOC_SOF_HDA_COMMON) += snd-sof-intel-hda-common.o
diff --git a/sound/soc/sof/intel/atom.c b/sound/soc/sof/intel/atom.c
new file mode 100644
index 000000000000..d8804efede5e
--- /dev/null
+++ b/sound/soc/sof/intel/atom.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2021 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Atom devices
+ */
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/intel-dsp-config.h>
+#include "../ops.h"
+#include "shim.h"
+#include "atom.h"
+#include "../sof-acpi-dev.h"
+#include "../sof-audio.h"
+#include "../../intel/common/soc-intel-quirks.h"
+
+static void atom_host_done(struct snd_sof_dev *sdev);
+static void atom_dsp_done(struct snd_sof_dev *sdev);
+static void atom_get_reply(struct snd_sof_dev *sdev);
+
+/*
+ * Debug
+ */
+
+static void atom_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ u32 offset = sdev->dsp_oops_offset;
+
+ /* first read regsisters */
+ sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+ /* note: variable AR register array is not read */
+
+ /* then get panic info */
+ if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+ dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+ xoops->arch_hdr.totalsize);
+ return;
+ }
+ offset += xoops->arch_hdr.totalsize;
+ sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ offset += sizeof(*panic_info);
+ sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
+}
+
+void atom_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[STACK_DUMP_SIZE];
+ u64 status, panic, imrd, imrx;
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCD);
+ panic = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCX);
+ atom_get_registers(sdev, &xoops, &panic_info, stack,
+ STACK_DUMP_SIZE);
+ snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack,
+ STACK_DUMP_SIZE);
+
+ /* provide some context for firmware debug */
+ imrx = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IMRX);
+ imrd = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IMRD);
+ dev_err(sdev->dev,
+ "error: ipc host -> DSP: pending %s complete %s raw 0x%llx\n",
+ (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
+ (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ dev_err(sdev->dev,
+ "error: mask host: pending %s complete %s raw 0x%llx\n",
+ (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
+ (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ dev_err(sdev->dev,
+ "error: ipc DSP -> host: pending %s complete %s raw 0x%llx\n",
+ (status & SHIM_IPCD_BUSY) ? "yes" : "no",
+ (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ dev_err(sdev->dev,
+ "error: mask DSP: pending %s complete %s raw 0x%llx\n",
+ (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
+ (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+
+}
+EXPORT_SYMBOL_NS(atom_dump, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+/*
+ * IPC Doorbell IRQ handler and thread.
+ */
+
+irqreturn_t atom_irq_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u64 ipcx, ipcd;
+ int ret = IRQ_NONE;
+
+ ipcx = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCX);
+ ipcd = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCD);
+
+ if (ipcx & SHIM_BYT_IPCX_DONE) {
+
+ /* reply message from DSP, Mask Done interrupt first */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR,
+ SHIM_IMRX,
+ SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (ipcd & SHIM_BYT_IPCD_BUSY) {
+
+ /* new message from DSP, Mask Busy interrupt first */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR,
+ SHIM_IMRX,
+ SHIM_IMRX_BUSY,
+ SHIM_IMRX_BUSY);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_NS(atom_irq_handler, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+irqreturn_t atom_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u64 ipcx, ipcd;
+
+ ipcx = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCX);
+ ipcd = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IPCD);
+
+ /* reply message from DSP */
+ if (ipcx & SHIM_BYT_IPCX_DONE) {
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /*
+ * handle immediate reply from DSP core. If the msg is
+ * found, set done bit in cmd_done which is called at the
+ * end of message processing function, else set it here
+ * because the done bit can't be set in cmd_done function
+ * which is triggered by msg
+ */
+ atom_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, ipcx);
+
+ atom_dsp_done(sdev);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ }
+
+ /* new message from DSP */
+ if (ipcd & SHIM_BYT_IPCD_BUSY) {
+
+ /* Handle messages from DSP Core */
+ if ((ipcd & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, PANIC_OFFSET(ipcd) +
+ MBOX_OFFSET);
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ atom_host_done(sdev);
+ }
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_NS(atom_irq_thread, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+int atom_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ /* unmask and prepare to receive Done interrupt */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_DONE, 0);
+
+ /* send the message */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write64(sdev, DSP_BAR, SHIM_IPCX, SHIM_BYT_IPCX_BUSY);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(atom_send_msg, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+static void atom_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ int ret = 0;
+
+ /*
+ * Sometimes, there is unexpected reply ipc arriving. The reply
+ * ipc belongs to none of the ipcs sent from driver.
+ * In this case, the driver must ignore the ipc.
+ */
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
+ return;
+ }
+
+ /* get reply */
+ sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
+
+ if (reply.error < 0) {
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ ret = reply.error;
+ } else {
+ /* reply correct size ? */
+ if (reply.hdr.size != msg->reply_size) {
+ dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
+ msg->reply_size, reply.hdr.size);
+ ret = -EINVAL;
+ }
+
+ /* read the message */
+ if (msg->reply_size > 0)
+ sof_mailbox_read(sdev, sdev->host_box.offset,
+ msg->reply_data, msg->reply_size);
+ }
+
+ msg->reply_error = ret;
+}
+
+int atom_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+EXPORT_SYMBOL_NS(atom_get_mailbox_offset, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+int atom_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+EXPORT_SYMBOL_NS(atom_get_window_offset, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+static void atom_host_done(struct snd_sof_dev *sdev)
+{
+ /* clear BUSY bit and set DONE bit - accept new messages */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR, SHIM_IPCD,
+ SHIM_BYT_IPCD_BUSY |
+ SHIM_BYT_IPCD_DONE,
+ SHIM_BYT_IPCD_DONE);
+
+ /* unmask and prepare to receive next new message */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY, 0);
+}
+
+static void atom_dsp_done(struct snd_sof_dev *sdev)
+{
+ /* clear DONE bit - tell DSP we have completed */
+ snd_sof_dsp_update_bits64_unlocked(sdev, DSP_BAR, SHIM_IPCX,
+ SHIM_BYT_IPCX_DONE, 0);
+}
+
+/*
+ * DSP control.
+ */
+
+int atom_run(struct snd_sof_dev *sdev)
+{
+ int tries = 10;
+
+ /* release stall and wait to unstall */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_STALL, 0x0);
+ while (tries--) {
+ if (!(snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_CSR) &
+ SHIM_BYT_CSR_PWAITMODE))
+ break;
+ msleep(100);
+ }
+ if (tries < 0) {
+ dev_err(sdev->dev, "error: unable to run DSP firmware\n");
+ atom_dump(sdev, SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_MBOX);
+ return -ENODEV;
+ }
+
+ /* return init core mask */
+ return 1;
+}
+EXPORT_SYMBOL_NS(atom_run, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+int atom_reset(struct snd_sof_dev *sdev)
+{
+ /* put DSP into reset, set reset vector and stall */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL |
+ SHIM_BYT_CSR_STALL,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL |
+ SHIM_BYT_CSR_STALL);
+
+ usleep_range(10, 15);
+
+ /* take DSP out of reset and keep stalled for FW loading */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(atom_reset, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
+ const char *sof_tplg_filename,
+ const char *ssp_str)
+{
+ const char *tplg_filename = NULL;
+ char *filename;
+ char *split_ext;
+
+ filename = devm_kstrdup(sdev->dev, sof_tplg_filename, GFP_KERNEL);
+ if (!filename)
+ return NULL;
+
+ /* this assumes a .tplg extension */
+ split_ext = strsep(&filename, ".");
+ if (split_ext) {
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s-%s.tplg",
+ split_ext, ssp_str);
+ if (!tplg_filename)
+ return NULL;
+ }
+ return tplg_filename;
+}
+
+void atom_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *sof_pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = sof_pdata->desc;
+ struct snd_soc_acpi_mach *mach;
+ struct platform_device *pdev;
+ const char *tplg_filename;
+
+ mach = snd_soc_acpi_find_machine(desc->machines);
+ if (!mach) {
+ dev_warn(sdev->dev, "warning: No matching ASoC machine driver found\n");
+ return;
+ }
+
+ pdev = to_platform_device(sdev->dev);
+ if (soc_intel_is_byt_cr(pdev)) {
+ dev_dbg(sdev->dev,
+ "BYT-CR detected, SSP0 used instead of SSP2\n");
+
+ tplg_filename = fixup_tplg_name(sdev,
+ mach->sof_tplg_filename,
+ "ssp0");
+ } else {
+ tplg_filename = mach->sof_tplg_filename;
+ }
+
+ if (!tplg_filename) {
+ dev_dbg(sdev->dev,
+ "error: no topology filename\n");
+ return;
+ }
+
+ sof_pdata->tplg_filename = tplg_filename;
+ mach->mach_params.acpi_ipc_irq_index = desc->irqindex_host_ipc;
+ sof_pdata->machine = mach;
+}
+EXPORT_SYMBOL_NS(atom_machine_select, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+/* Atom DAIs */
+struct snd_soc_dai_driver atom_dai[] = {
+{
+ .name = "ssp0-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp1-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp2-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ }
+},
+{
+ .name = "ssp3-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp4-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+{
+ .name = "ssp5-port",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+},
+};
+EXPORT_SYMBOL_NS(atom_dai, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+void atom_set_mach_params(const struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct snd_soc_acpi_mach_params *mach_params;
+
+ mach_params = (struct snd_soc_acpi_mach_params *)&mach->mach_params;
+ mach_params->platform = dev_name(sdev->dev);
+ mach_params->num_dai_drivers = desc->ops->num_drv;
+ mach_params->dai_drivers = desc->ops->drv;
+}
+EXPORT_SYMBOL_NS(atom_set_mach_params, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/atom.h b/sound/soc/sof/intel/atom.h
new file mode 100644
index 000000000000..96a462c7a2e5
--- /dev/null
+++ b/sound/soc/sof/intel/atom.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2017-2021 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_ATOM_H
+#define __SOF_INTEL_ATOM_H
+
+/* DSP memories */
+#define IRAM_OFFSET 0x0C0000
+#define IRAM_SIZE (80 * 1024)
+#define DRAM_OFFSET 0x100000
+#define DRAM_SIZE (160 * 1024)
+#define SHIM_OFFSET 0x140000
+#define SHIM_SIZE_BYT 0x100
+#define SHIM_SIZE_CHT 0x118
+#define MBOX_OFFSET 0x144000
+#define MBOX_SIZE 0x1000
+#define EXCEPT_OFFSET 0x800
+#define EXCEPT_MAX_HDR_SIZE 0x400
+
+/* DSP peripherals */
+#define DMAC0_OFFSET 0x098000
+#define DMAC1_OFFSET 0x09c000
+#define DMAC2_OFFSET 0x094000
+#define DMAC_SIZE 0x420
+#define SSP0_OFFSET 0x0a0000
+#define SSP1_OFFSET 0x0a1000
+#define SSP2_OFFSET 0x0a2000
+#define SSP3_OFFSET 0x0a4000
+#define SSP4_OFFSET 0x0a5000
+#define SSP5_OFFSET 0x0a6000
+#define SSP_SIZE 0x100
+
+#define STACK_DUMP_SIZE 32
+
+#define PCI_BAR_SIZE 0x200000
+
+#define PANIC_OFFSET(x) (((x) & GENMASK_ULL(47, 32)) >> 32)
+
+/*
+ * Debug
+ */
+
+#define MBOX_DUMP_SIZE 0x30
+
+/* BARs */
+#define DSP_BAR 0
+#define PCI_BAR 1
+#define IMR_BAR 2
+
+irqreturn_t atom_irq_handler(int irq, void *context);
+irqreturn_t atom_irq_thread(int irq, void *context);
+
+int atom_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
+int atom_get_mailbox_offset(struct snd_sof_dev *sdev);
+int atom_get_window_offset(struct snd_sof_dev *sdev, u32 id);
+
+int atom_run(struct snd_sof_dev *sdev);
+int atom_reset(struct snd_sof_dev *sdev);
+void atom_dump(struct snd_sof_dev *sdev, u32 flags);
+
+void atom_machine_select(struct snd_sof_dev *sdev);
+void atom_set_mach_params(const struct snd_soc_acpi_mach *mach,
+ struct snd_sof_dev *sdev);
+
+extern struct snd_soc_dai_driver atom_dai[];
+
+#endif
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
index d9803e2ba67f..8edaf6fdd218 100644
--- a/sound/soc/sof/intel/byt.c
+++ b/sound/soc/sof/intel/byt.c
@@ -19,662 +19,66 @@
#include <sound/soc-acpi-intel-match.h>
#include <sound/intel-dsp-config.h>
#include "../ops.h"
+#include "atom.h"
#include "shim.h"
#include "../sof-acpi-dev.h"
#include "../sof-audio.h"
#include "../../intel/common/soc-intel-quirks.h"
-/* DSP memories */
-#define IRAM_OFFSET 0x0C0000
-#define IRAM_SIZE (80 * 1024)
-#define DRAM_OFFSET 0x100000
-#define DRAM_SIZE (160 * 1024)
-#define SHIM_OFFSET 0x140000
-#define SHIM_SIZE_BYT 0x100
-#define SHIM_SIZE_CHT 0x118
-#define MBOX_OFFSET 0x144000
-#define MBOX_SIZE 0x1000
-#define EXCEPT_OFFSET 0x800
-#define EXCEPT_MAX_HDR_SIZE 0x400
-
-/* DSP peripherals */
-#define DMAC0_OFFSET 0x098000
-#define DMAC1_OFFSET 0x09c000
-#define DMAC2_OFFSET 0x094000
-#define DMAC_SIZE 0x420
-#define SSP0_OFFSET 0x0a0000
-#define SSP1_OFFSET 0x0a1000
-#define SSP2_OFFSET 0x0a2000
-#define SSP3_OFFSET 0x0a4000
-#define SSP4_OFFSET 0x0a5000
-#define SSP5_OFFSET 0x0a6000
-#define SSP_SIZE 0x100
-
-#define BYT_STACK_DUMP_SIZE 32
-
-#define BYT_PCI_BAR_SIZE 0x200000
-
-#define BYT_PANIC_OFFSET(x) (((x) & GENMASK_ULL(47, 32)) >> 32)
-
-/*
- * Debug
- */
-
-#define MBOX_DUMP_SIZE 0x30
-
-/* BARs */
-#define BYT_DSP_BAR 0
-#define BYT_PCI_BAR 1
-#define BYT_IMR_BAR 2
-
static const struct snd_sof_debugfs_map byt_debugfs[] = {
- {"dmac0", BYT_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ {"dmac0", DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
SOF_DEBUGFS_ACCESS_ALWAYS},
- {"dmac1", BYT_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ {"dmac1", DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp0", BYT_DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ {"ssp0", DSP_BAR, SSP0_OFFSET, SSP_SIZE,
SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp1", BYT_DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ {"ssp1", DSP_BAR, SSP1_OFFSET, SSP_SIZE,
SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp2", BYT_DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ {"ssp2", DSP_BAR, SSP2_OFFSET, SSP_SIZE,
SOF_DEBUGFS_ACCESS_ALWAYS},
- {"iram", BYT_DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ {"iram", DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
SOF_DEBUGFS_ACCESS_D0_ONLY},
- {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ {"dram", DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
SOF_DEBUGFS_ACCESS_D0_ONLY},
- {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE_BYT,
+ {"shim", DSP_BAR, SHIM_OFFSET, SHIM_SIZE_BYT,
SOF_DEBUGFS_ACCESS_ALWAYS},
};
-static void byt_host_done(struct snd_sof_dev *sdev);
-static void byt_dsp_done(struct snd_sof_dev *sdev);
-static void byt_get_reply(struct snd_sof_dev *sdev);
-
-/*
- * Debug
- */
-
-static void byt_get_registers(struct snd_sof_dev *sdev,
- struct sof_ipc_dsp_oops_xtensa *xoops,
- struct sof_ipc_panic_info *panic_info,
- u32 *stack, size_t stack_words)
-{
- u32 offset = sdev->dsp_oops_offset;
-
- /* first read regsisters */
- sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
-
- /* note: variable AR register array is not read */
-
- /* then get panic info */
- if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
- dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
- xoops->arch_hdr.totalsize);
- return;
- }
- offset += xoops->arch_hdr.totalsize;
- sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
-
- /* then get the stack */
- offset += sizeof(*panic_info);
- sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
-}
-
-static void byt_dump(struct snd_sof_dev *sdev, u32 flags)
-{
- struct sof_ipc_dsp_oops_xtensa xoops;
- struct sof_ipc_panic_info panic_info;
- u32 stack[BYT_STACK_DUMP_SIZE];
- u64 status, panic, imrd, imrx;
-
- /* now try generic SOF status messages */
- status = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCD);
- panic = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCX);
- byt_get_registers(sdev, &xoops, &panic_info, stack,
- BYT_STACK_DUMP_SIZE);
- snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack,
- BYT_STACK_DUMP_SIZE);
-
- /* provide some context for firmware debug */
- imrx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IMRX);
- imrd = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IMRD);
- dev_err(sdev->dev,
- "error: ipc host -> DSP: pending %s complete %s raw 0x%llx\n",
- (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
- (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
- dev_err(sdev->dev,
- "error: mask host: pending %s complete %s raw 0x%llx\n",
- (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
- (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
- dev_err(sdev->dev,
- "error: ipc DSP -> host: pending %s complete %s raw 0x%llx\n",
- (status & SHIM_IPCD_BUSY) ? "yes" : "no",
- (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
- dev_err(sdev->dev,
- "error: mask DSP: pending %s complete %s raw 0x%llx\n",
- (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
- (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
-
-}
-
-/*
- * IPC Doorbell IRQ handler and thread.
- */
-
-static irqreturn_t byt_irq_handler(int irq, void *context)
-{
- struct snd_sof_dev *sdev = context;
- u64 ipcx, ipcd;
- int ret = IRQ_NONE;
-
- ipcx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCX);
- ipcd = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCD);
-
- if (ipcx & SHIM_BYT_IPCX_DONE) {
-
- /* reply message from DSP, Mask Done interrupt first */
- snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR,
- SHIM_IMRX,
- SHIM_IMRX_DONE,
- SHIM_IMRX_DONE);
- ret = IRQ_WAKE_THREAD;
- }
-
- if (ipcd & SHIM_BYT_IPCD_BUSY) {
-
- /* new message from DSP, Mask Busy interrupt first */
- snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR,
- SHIM_IMRX,
- SHIM_IMRX_BUSY,
- SHIM_IMRX_BUSY);
- ret = IRQ_WAKE_THREAD;
- }
-
- return ret;
-}
-
-static irqreturn_t byt_irq_thread(int irq, void *context)
-{
- struct snd_sof_dev *sdev = context;
- u64 ipcx, ipcd;
-
- ipcx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCX);
- ipcd = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCD);
-
- /* reply message from DSP */
- if (ipcx & SHIM_BYT_IPCX_DONE) {
-
- spin_lock_irq(&sdev->ipc_lock);
-
- /*
- * handle immediate reply from DSP core. If the msg is
- * found, set done bit in cmd_done which is called at the
- * end of message processing function, else set it here
- * because the done bit can't be set in cmd_done function
- * which is triggered by msg
- */
- byt_get_reply(sdev);
- snd_sof_ipc_reply(sdev, ipcx);
-
- byt_dsp_done(sdev);
-
- spin_unlock_irq(&sdev->ipc_lock);
- }
-
- /* new message from DSP */
- if (ipcd & SHIM_BYT_IPCD_BUSY) {
-
- /* Handle messages from DSP Core */
- if ((ipcd & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
- snd_sof_dsp_panic(sdev, BYT_PANIC_OFFSET(ipcd) +
- MBOX_OFFSET);
- } else {
- snd_sof_ipc_msgs_rx(sdev);
- }
-
- byt_host_done(sdev);
- }
-
- return IRQ_HANDLED;
-}
-
-static int byt_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
-{
- /* unmask and prepare to receive Done interrupt */
- snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IMRX,
- SHIM_IMRX_DONE, 0);
-
- /* send the message */
- sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
- msg->msg_size);
- snd_sof_dsp_write64(sdev, BYT_DSP_BAR, SHIM_IPCX, SHIM_BYT_IPCX_BUSY);
-
- return 0;
-}
-
-static void byt_get_reply(struct snd_sof_dev *sdev)
-{
- struct snd_sof_ipc_msg *msg = sdev->msg;
- struct sof_ipc_reply reply;
- int ret = 0;
-
- /*
- * Sometimes, there is unexpected reply ipc arriving. The reply
- * ipc belongs to none of the ipcs sent from driver.
- * In this case, the driver must ignore the ipc.
- */
- if (!msg) {
- dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
- return;
- }
-
- /* get reply */
- sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
-
- if (reply.error < 0) {
- memcpy(msg->reply_data, &reply, sizeof(reply));
- ret = reply.error;
- } else {
- /* reply correct size ? */
- if (reply.hdr.size != msg->reply_size) {
- dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
- msg->reply_size, reply.hdr.size);
- ret = -EINVAL;
- }
-
- /* read the message */
- if (msg->reply_size > 0)
- sof_mailbox_read(sdev, sdev->host_box.offset,
- msg->reply_data, msg->reply_size);
- }
-
- msg->reply_error = ret;
-}
-
-static int byt_get_mailbox_offset(struct snd_sof_dev *sdev)
-{
- return MBOX_OFFSET;
-}
-
-static int byt_get_window_offset(struct snd_sof_dev *sdev, u32 id)
-{
- return MBOX_OFFSET;
-}
-
-static void byt_host_done(struct snd_sof_dev *sdev)
-{
- /* clear BUSY bit and set DONE bit - accept new messages */
- snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IPCD,
- SHIM_BYT_IPCD_BUSY |
- SHIM_BYT_IPCD_DONE,
- SHIM_BYT_IPCD_DONE);
-
- /* unmask and prepare to receive next new message */
- snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IMRX,
- SHIM_IMRX_BUSY, 0);
-}
-
-static void byt_dsp_done(struct snd_sof_dev *sdev)
-{
- /* clear DONE bit - tell DSP we have completed */
- snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IPCX,
- SHIM_BYT_IPCX_DONE, 0);
-}
-
-/*
- * DSP control.
- */
-
-static int byt_run(struct snd_sof_dev *sdev)
-{
- int tries = 10;
-
- /* release stall and wait to unstall */
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_CSR,
- SHIM_BYT_CSR_STALL, 0x0);
- while (tries--) {
- if (!(snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_CSR) &
- SHIM_BYT_CSR_PWAITMODE))
- break;
- msleep(100);
- }
- if (tries < 0) {
- dev_err(sdev->dev, "error: unable to run DSP firmware\n");
- byt_dump(sdev, SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_MBOX);
- return -ENODEV;
- }
-
- /* return init core mask */
- return 1;
-}
-
-static int byt_reset(struct snd_sof_dev *sdev)
-{
- /* put DSP into reset, set reset vector and stall */
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_CSR,
- SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL |
- SHIM_BYT_CSR_STALL,
- SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL |
- SHIM_BYT_CSR_STALL);
-
- usleep_range(10, 15);
-
- /* take DSP out of reset and keep stalled for FW loading */
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_CSR,
- SHIM_BYT_CSR_RST, 0);
-
- return 0;
-}
-
-static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
- const char *sof_tplg_filename,
- const char *ssp_str)
-{
- const char *tplg_filename = NULL;
- char *filename;
- char *split_ext;
-
- filename = devm_kstrdup(sdev->dev, sof_tplg_filename, GFP_KERNEL);
- if (!filename)
- return NULL;
-
- /* this assumes a .tplg extension */
- split_ext = strsep(&filename, ".");
- if (split_ext) {
- tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
- "%s-%s.tplg",
- split_ext, ssp_str);
- if (!tplg_filename)
- return NULL;
- }
- return tplg_filename;
-}
-
-static void byt_machine_select(struct snd_sof_dev *sdev)
-{
- struct snd_sof_pdata *sof_pdata = sdev->pdata;
- const struct sof_dev_desc *desc = sof_pdata->desc;
- struct snd_soc_acpi_mach *mach;
- struct platform_device *pdev;
- const char *tplg_filename;
-
- mach = snd_soc_acpi_find_machine(desc->machines);
- if (!mach) {
- dev_warn(sdev->dev, "warning: No matching ASoC machine driver found\n");
- return;
- }
-
- pdev = to_platform_device(sdev->dev);
- if (soc_intel_is_byt_cr(pdev)) {
- dev_dbg(sdev->dev,
- "BYT-CR detected, SSP0 used instead of SSP2\n");
-
- tplg_filename = fixup_tplg_name(sdev,
- mach->sof_tplg_filename,
- "ssp0");
- } else {
- tplg_filename = mach->sof_tplg_filename;
- }
-
- if (!tplg_filename) {
- dev_dbg(sdev->dev,
- "error: no topology filename\n");
- return;
- }
-
- sof_pdata->tplg_filename = tplg_filename;
- mach->mach_params.acpi_ipc_irq_index = desc->irqindex_host_ipc;
- sof_pdata->machine = mach;
-}
-
-/* Baytrail DAIs */
-static struct snd_soc_dai_driver byt_dai[] = {
-{
- .name = "ssp0-port",
- .playback = {
- .channels_min = 1,
- .channels_max = 8,
- },
- .capture = {
- .channels_min = 1,
- .channels_max = 8,
- },
-},
-{
- .name = "ssp1-port",
- .playback = {
- .channels_min = 1,
- .channels_max = 8,
- },
- .capture = {
- .channels_min = 1,
- .channels_max = 8,
- },
-},
-{
- .name = "ssp2-port",
- .playback = {
- .channels_min = 1,
- .channels_max = 8,
- },
- .capture = {
- .channels_min = 1,
- .channels_max = 8,
- }
-},
-{
- .name = "ssp3-port",
- .playback = {
- .channels_min = 1,
- .channels_max = 8,
- },
- .capture = {
- .channels_min = 1,
- .channels_max = 8,
- },
-},
-{
- .name = "ssp4-port",
- .playback = {
- .channels_min = 1,
- .channels_max = 8,
- },
- .capture = {
- .channels_min = 1,
- .channels_max = 8,
- },
-},
-{
- .name = "ssp5-port",
- .playback = {
- .channels_min = 1,
- .channels_max = 8,
- },
- .capture = {
- .channels_min = 1,
- .channels_max = 8,
- },
-},
-};
-
-static void byt_set_mach_params(const struct snd_soc_acpi_mach *mach,
- struct snd_sof_dev *sdev)
-{
- struct snd_sof_pdata *pdata = sdev->pdata;
- const struct sof_dev_desc *desc = pdata->desc;
- struct snd_soc_acpi_mach_params *mach_params;
-
- mach_params = (struct snd_soc_acpi_mach_params *)&mach->mach_params;
- mach_params->platform = dev_name(sdev->dev);
- mach_params->num_dai_drivers = desc->ops->num_drv;
- mach_params->dai_drivers = desc->ops->drv;
-}
-
-/*
- * Probe and remove.
- */
-
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_MERRIFIELD)
-
-static int tangier_pci_probe(struct snd_sof_dev *sdev)
-{
- struct snd_sof_pdata *pdata = sdev->pdata;
- const struct sof_dev_desc *desc = pdata->desc;
- struct pci_dev *pci = to_pci_dev(sdev->dev);
- u32 base, size;
- int ret;
-
- /* DSP DMA can only access low 31 bits of host memory */
- ret = dma_coerce_mask_and_coherent(&pci->dev, DMA_BIT_MASK(31));
- if (ret < 0) {
- dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
- return ret;
- }
-
- /* LPE base */
- base = pci_resource_start(pci, desc->resindex_lpe_base) - IRAM_OFFSET;
- size = BYT_PCI_BAR_SIZE;
-
- dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
- sdev->bar[BYT_DSP_BAR] = devm_ioremap(sdev->dev, base, size);
- if (!sdev->bar[BYT_DSP_BAR]) {
- dev_err(sdev->dev, "error: failed to ioremap LPE base 0x%x size 0x%x\n",
- base, size);
- return -ENODEV;
- }
- dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[BYT_DSP_BAR]);
-
- /* IMR base - optional */
- if (desc->resindex_imr_base == -1)
- goto irq;
-
- base = pci_resource_start(pci, desc->resindex_imr_base);
- size = pci_resource_len(pci, desc->resindex_imr_base);
-
- /* some BIOSes don't map IMR */
- if (base == 0x55aa55aa || base == 0x0) {
- dev_info(sdev->dev, "IMR not set by BIOS. Ignoring\n");
- goto irq;
- }
-
- dev_dbg(sdev->dev, "IMR base at 0x%x size 0x%x", base, size);
- sdev->bar[BYT_IMR_BAR] = devm_ioremap(sdev->dev, base, size);
- if (!sdev->bar[BYT_IMR_BAR]) {
- dev_err(sdev->dev, "error: failed to ioremap IMR base 0x%x size 0x%x\n",
- base, size);
- return -ENODEV;
- }
- dev_dbg(sdev->dev, "IMR VADDR %p\n", sdev->bar[BYT_IMR_BAR]);
-
-irq:
- /* register our IRQ */
- sdev->ipc_irq = pci->irq;
- dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
- ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
- byt_irq_handler, byt_irq_thread,
- 0, "AudioDSP", sdev);
- if (ret < 0) {
- dev_err(sdev->dev, "error: failed to register IRQ %d\n",
- sdev->ipc_irq);
- return ret;
- }
-
- /* enable BUSY and disable DONE Interrupt by default */
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX,
- SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
- SHIM_IMRX_DONE);
-
- /* set default mailbox offset for FW ready message */
- sdev->dsp_box.offset = MBOX_OFFSET;
-
- return ret;
-}
-
-const struct snd_sof_dsp_ops sof_tng_ops = {
- /* device init */
- .probe = tangier_pci_probe,
-
- /* DSP core boot / reset */
- .run = byt_run,
- .reset = byt_reset,
-
- /* Register IO */
- .write = sof_io_write,
- .read = sof_io_read,
- .write64 = sof_io_write64,
- .read64 = sof_io_read64,
-
- /* Block IO */
- .block_read = sof_block_read,
- .block_write = sof_block_write,
-
- /* doorbell */
- .irq_handler = byt_irq_handler,
- .irq_thread = byt_irq_thread,
-
- /* ipc */
- .send_msg = byt_send_msg,
- .fw_ready = sof_fw_ready,
- .get_mailbox_offset = byt_get_mailbox_offset,
- .get_window_offset = byt_get_window_offset,
-
- .ipc_msg_data = intel_ipc_msg_data,
- .ipc_pcm_params = intel_ipc_pcm_params,
-
- /* machine driver */
- .machine_select = byt_machine_select,
- .machine_register = sof_machine_register,
- .machine_unregister = sof_machine_unregister,
- .set_mach_params = byt_set_mach_params,
-
- /* debug */
- .debug_map = byt_debugfs,
- .debug_map_count = ARRAY_SIZE(byt_debugfs),
- .dbg_dump = byt_dump,
-
- /* stream callbacks */
- .pcm_open = intel_pcm_open,
- .pcm_close = intel_pcm_close,
-
- /* module loading */
- .load_module = snd_sof_parse_module_memcpy,
-
- /*Firmware loading */
- .load_firmware = snd_sof_load_firmware_memcpy,
-
- /* DAI drivers */
- .drv = byt_dai,
- .num_drv = 3, /* we have only 3 SSPs on byt*/
-
- /* ALSA HW info flags */
- .hw_info = SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_BATCH,
-
- .arch_ops = &sof_xtensa_arch_ops,
-};
-EXPORT_SYMBOL_NS(sof_tng_ops, SND_SOC_SOF_MERRIFIELD);
-
-const struct sof_intel_dsp_desc tng_chip_info = {
- .cores_num = 1,
- .host_managed_cores_mask = 1,
+static const struct snd_sof_debugfs_map cht_debugfs[] = {
+ {"dmac0", DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac2", DSP_BAR, DMAC2_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp3", DSP_BAR, SSP3_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp4", DSP_BAR, SSP4_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp5", DSP_BAR, SSP5_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", DSP_BAR, SHIM_OFFSET, SHIM_SIZE_CHT,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
};
-EXPORT_SYMBOL_NS(tng_chip_info, SND_SOC_SOF_MERRIFIELD);
-
-#endif /* CONFIG_SND_SOC_SOF_MERRIFIELD */
-
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
static void byt_reset_dsp_disable_int(struct snd_sof_dev *sdev)
{
/* Disable Interrupt from both sides */
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX, 0x3, 0x3);
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRD, 0x3, 0x3);
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRX, 0x3, 0x3);
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRD, 0x3, 0x3);
/* Put DSP into reset, set reset vector */
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_CSR,
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_CSR,
SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL,
SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL);
}
@@ -689,7 +93,7 @@ static int byt_suspend(struct snd_sof_dev *sdev, u32 target_state)
static int byt_resume(struct snd_sof_dev *sdev)
{
/* enable BUSY and disable DONE Interrupt by default */
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRX,
SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
SHIM_IMRX_DONE);
@@ -703,33 +107,6 @@ static int byt_remove(struct snd_sof_dev *sdev)
return 0;
}
-static const struct snd_sof_debugfs_map cht_debugfs[] = {
- {"dmac0", BYT_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"dmac1", BYT_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"dmac2", BYT_DSP_BAR, DMAC2_OFFSET, DMAC_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp0", BYT_DSP_BAR, SSP0_OFFSET, SSP_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp1", BYT_DSP_BAR, SSP1_OFFSET, SSP_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp2", BYT_DSP_BAR, SSP2_OFFSET, SSP_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp3", BYT_DSP_BAR, SSP3_OFFSET, SSP_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp4", BYT_DSP_BAR, SSP4_OFFSET, SSP_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp5", BYT_DSP_BAR, SSP5_OFFSET, SSP_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"iram", BYT_DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
- SOF_DEBUGFS_ACCESS_D0_ONLY},
- {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
- SOF_DEBUGFS_ACCESS_D0_ONLY},
- {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE_CHT,
- SOF_DEBUGFS_ACCESS_ALWAYS},
-};
-
static int byt_acpi_probe(struct snd_sof_dev *sdev)
{
struct snd_sof_pdata *pdata = sdev->pdata;
@@ -760,17 +137,17 @@ static int byt_acpi_probe(struct snd_sof_dev *sdev)
}
dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
- sdev->bar[BYT_DSP_BAR] = devm_ioremap(sdev->dev, base, size);
- if (!sdev->bar[BYT_DSP_BAR]) {
+ sdev->bar[DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[DSP_BAR]) {
dev_err(sdev->dev, "error: failed to ioremap LPE base 0x%x size 0x%x\n",
base, size);
return -ENODEV;
}
- dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[BYT_DSP_BAR]);
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[DSP_BAR]);
/* TODO: add offsets */
- sdev->mmio_bar = BYT_DSP_BAR;
- sdev->mailbox_bar = BYT_DSP_BAR;
+ sdev->mmio_bar = DSP_BAR;
+ sdev->mailbox_bar = DSP_BAR;
/* IMR base - optional */
if (desc->resindex_imr_base == -1)
@@ -794,13 +171,13 @@ static int byt_acpi_probe(struct snd_sof_dev *sdev)
}
dev_dbg(sdev->dev, "IMR base at 0x%x size 0x%x", base, size);
- sdev->bar[BYT_IMR_BAR] = devm_ioremap(sdev->dev, base, size);
- if (!sdev->bar[BYT_IMR_BAR]) {
+ sdev->bar[IMR_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[IMR_BAR]) {
dev_err(sdev->dev, "error: failed to ioremap IMR base 0x%x size 0x%x\n",
base, size);
return -ENODEV;
}
- dev_dbg(sdev->dev, "IMR VADDR %p\n", sdev->bar[BYT_IMR_BAR]);
+ dev_dbg(sdev->dev, "IMR VADDR %p\n", sdev->bar[IMR_BAR]);
irq:
/* register our IRQ */
@@ -810,7 +187,7 @@ irq:
dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
- byt_irq_handler, byt_irq_thread,
+ atom_irq_handler, atom_irq_thread,
IRQF_SHARED, "AudioDSP", sdev);
if (ret < 0) {
dev_err(sdev->dev, "error: failed to register IRQ %d\n",
@@ -819,7 +196,7 @@ irq:
}
/* enable BUSY and disable DONE Interrupt by default */
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRX,
SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
SHIM_IMRX_DONE);
@@ -836,8 +213,8 @@ static const struct snd_sof_dsp_ops sof_byt_ops = {
.remove = byt_remove,
/* DSP core boot / reset */
- .run = byt_run,
- .reset = byt_reset,
+ .run = atom_run,
+ .reset = atom_reset,
/* Register IO */
.write = sof_io_write,
@@ -850,28 +227,28 @@ static const struct snd_sof_dsp_ops sof_byt_ops = {
.block_write = sof_block_write,
/* doorbell */
- .irq_handler = byt_irq_handler,
- .irq_thread = byt_irq_thread,
+ .irq_handler = atom_irq_handler,
+ .irq_thread = atom_irq_thread,
/* ipc */
- .send_msg = byt_send_msg,
+ .send_msg = atom_send_msg,
.fw_ready = sof_fw_ready,
- .get_mailbox_offset = byt_get_mailbox_offset,
- .get_window_offset = byt_get_window_offset,
+ .get_mailbox_offset = atom_get_mailbox_offset,
+ .get_window_offset = atom_get_window_offset,
.ipc_msg_data = intel_ipc_msg_data,
.ipc_pcm_params = intel_ipc_pcm_params,
/* machine driver */
- .machine_select = byt_machine_select,
+ .machine_select = atom_machine_select,
.machine_register = sof_machine_register,
.machine_unregister = sof_machine_unregister,
- .set_mach_params = byt_set_mach_params,
+ .set_mach_params = atom_set_mach_params,
/* debug */
.debug_map = byt_debugfs,
.debug_map_count = ARRAY_SIZE(byt_debugfs),
- .dbg_dump = byt_dump,
+ .dbg_dump = atom_dump,
/* stream callbacks */
.pcm_open = intel_pcm_open,
@@ -888,7 +265,7 @@ static const struct snd_sof_dsp_ops sof_byt_ops = {
.resume = byt_resume,
/* DAI drivers */
- .drv = byt_dai,
+ .drv = atom_dai,
.num_drv = 3, /* we have only 3 SSPs on byt*/
/* ALSA HW info flags */
@@ -913,8 +290,8 @@ static const struct snd_sof_dsp_ops sof_cht_ops = {
.remove = byt_remove,
/* DSP core boot / reset */
- .run = byt_run,
- .reset = byt_reset,
+ .run = atom_run,
+ .reset = atom_reset,
/* Register IO */
.write = sof_io_write,
@@ -927,28 +304,28 @@ static const struct snd_sof_dsp_ops sof_cht_ops = {
.block_write = sof_block_write,
/* doorbell */
- .irq_handler = byt_irq_handler,
- .irq_thread = byt_irq_thread,
+ .irq_handler = atom_irq_handler,
+ .irq_thread = atom_irq_thread,
/* ipc */
- .send_msg = byt_send_msg,
+ .send_msg = atom_send_msg,
.fw_ready = sof_fw_ready,
- .get_mailbox_offset = byt_get_mailbox_offset,
- .get_window_offset = byt_get_window_offset,
+ .get_mailbox_offset = atom_get_mailbox_offset,
+ .get_window_offset = atom_get_window_offset,
.ipc_msg_data = intel_ipc_msg_data,
.ipc_pcm_params = intel_ipc_pcm_params,
/* machine driver */
- .machine_select = byt_machine_select,
+ .machine_select = atom_machine_select,
.machine_register = sof_machine_register,
.machine_unregister = sof_machine_unregister,
- .set_mach_params = byt_set_mach_params,
+ .set_mach_params = atom_set_mach_params,
/* debug */
.debug_map = cht_debugfs,
.debug_map_count = ARRAY_SIZE(cht_debugfs),
- .dbg_dump = byt_dump,
+ .dbg_dump = atom_dump,
/* stream callbacks */
.pcm_open = intel_pcm_open,
@@ -965,9 +342,9 @@ static const struct snd_sof_dsp_ops sof_cht_ops = {
.resume = byt_resume,
/* DAI drivers */
- .drv = byt_dai,
+ .drv = atom_dai,
/* all 6 SSPs may be available for cherrytrail */
- .num_drv = ARRAY_SIZE(byt_dai),
+ .num_drv = 6,
/* ALSA HW info flags */
.hw_info = SNDRV_PCM_INFO_MMAP |
@@ -1073,9 +450,8 @@ static struct platform_driver snd_sof_acpi_intel_byt_driver = {
};
module_platform_driver(snd_sof_acpi_intel_byt_driver);
-#endif /* CONFIG_SND_SOC_SOF_BAYTRAIL */
-
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_ACPI_DEV);
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
index c91aa951df22..acfeca42604c 100644
--- a/sound/soc/sof/intel/hda-ipc.c
+++ b/sound/soc/sof/intel/hda-ipc.c
@@ -107,8 +107,8 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
} else {
/* reply correct size ? */
if (reply.hdr.size != msg->reply_size &&
- /* getter payload is never known upfront */
- !(reply.hdr.cmd & SOF_IPC_GLB_PROBE)) {
+ /* getter payload is never known upfront */
+ ((reply.hdr.cmd & SOF_GLB_TYPE_MASK) != SOF_IPC_GLB_PROBE)) {
dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
msg->reply_size, reply.hdr.size);
ret = -EINVAL;
diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
index fc25ee8f68dc..6f4771bf9de3 100644
--- a/sound/soc/sof/intel/hda-loader.c
+++ b/sound/soc/sof/intel/hda-loader.c
@@ -385,11 +385,6 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
if (i == HDA_FW_BOOT_ATTEMPTS) {
dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n",
i, ret);
- dev_err(sdev->dev, "ROM error=0x%x: FW status=0x%x\n",
- snd_sof_dsp_read(sdev, HDA_DSP_BAR,
- HDA_DSP_SRAM_REG_ROM_ERROR),
- snd_sof_dsp_read(sdev, HDA_DSP_BAR,
- HDA_DSP_SRAM_REG_ROM_STATUS));
goto cleanup;
}
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index b00e8fcb2312..891e6e1b9121 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -187,12 +187,16 @@ static int hda_sdw_probe(struct snd_sof_dev *sdev)
int hda_sdw_startup(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hdev;
+ struct snd_sof_pdata *pdata = sdev->pdata;
hdev = sdev->pdata->hw_pdata;
if (!hdev->sdw)
return 0;
+ if (pdata->machine && !pdata->machine->mach_params.link_mask)
+ return 0;
+
return sdw_intel_startup(hdev->sdw);
}
@@ -277,10 +281,12 @@ struct hda_dsp_msg_code {
const char *msg;
};
-static bool hda_use_msi = IS_ENABLED(CONFIG_PCI);
#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
+static bool hda_use_msi = true;
module_param_named(use_msi, hda_use_msi, bool, 0444);
MODULE_PARM_DESC(use_msi, "SOF HDA use PCI MSI mode");
+#else
+#define hda_use_msi (1)
#endif
static char *hda_model;
@@ -392,28 +398,21 @@ void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
struct sof_ipc_dsp_oops_xtensa xoops;
struct sof_ipc_panic_info panic_info;
u32 stack[HDA_DSP_STACK_DUMP_SIZE];
- u32 status, panic;
- /* try APL specific status message types first */
+ /* print ROM/FW status */
hda_dsp_get_status(sdev);
- /* now try generic SOF status messages */
- status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
- HDA_DSP_SRAM_REG_FW_STATUS);
- panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP);
-
+ /* print panic info if FW boot is complete. Otherwise, print the extended ROM status */
if (sdev->fw_state == SOF_FW_BOOT_COMPLETE) {
+ u32 status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_STATUS);
+ u32 panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP);
+
hda_dsp_get_registers(sdev, &xoops, &panic_info, stack,
HDA_DSP_STACK_DUMP_SIZE);
snd_sof_get_status(sdev, status, panic, &xoops, &panic_info,
stack, HDA_DSP_STACK_DUMP_SIZE);
} else {
- sof_dev_dbg_or_err(sdev->dev, flags & SOF_DBG_DUMP_FORCE_ERR_LEVEL,
- "status = 0x%8.8x panic = 0x%8.8x\n",
- status, panic);
-
hda_dsp_dump_ext_rom_status(sdev, flags);
- hda_dsp_get_status(sdev);
}
}
@@ -485,9 +484,7 @@ static int hda_init(struct snd_sof_dev *sdev)
/* initialise hdac bus */
bus->addr = pci_resource_start(pci, 0);
-#if IS_ENABLED(CONFIG_PCI)
bus->remap_addr = pci_ioremap_bar(pci, 0);
-#endif
if (!bus->remap_addr) {
dev_err(bus->dev, "error: ioremap error\n");
return -ENXIO;
@@ -799,9 +796,7 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
goto hdac_bus_unmap;
/* DSP base */
-#if IS_ENABLED(CONFIG_PCI)
sdev->bar[HDA_DSP_BAR] = pci_ioremap_bar(pci, HDA_DSP_BAR);
-#endif
if (!sdev->bar[HDA_DSP_BAR]) {
dev_err(sdev->dev, "error: ioremap error\n");
ret = -ENXIO;
@@ -1011,6 +1006,14 @@ static int hda_generic_machine_select(struct snd_sof_dev *sdev)
hda_mach->mach_params.dmic_num = dmic_num;
pdata->machine = hda_mach;
pdata->tplg_filename = tplg_filename;
+
+ if (codec_num == 2) {
+ /*
+ * Prevent SoundWire links from starting when an external
+ * HDaudio codec is used
+ */
+ hda_mach->mach_params.link_mask = 0;
+ }
}
}
@@ -1069,7 +1072,7 @@ static bool link_slaves_found(struct snd_sof_dev *sdev,
/* find out how many identical parts are expected */
for (k = 0; k < link->num_adr; k++) {
- u64 adr2 = link->adr_d[i].adr;
+ u64 adr2 = link->adr_d[k].adr;
unsigned int part_id2, link_id2, mfg_id2;
mfg_id2 = SDW_MFG_ID(adr2);
diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
index 88c3bf404dd7..d04ce84fe7cc 100644
--- a/sound/soc/sof/intel/pci-tgl.c
+++ b/sound/soc/sof/intel/pci-tgl.c
@@ -89,6 +89,7 @@ static const struct sof_dev_desc adls_desc = {
static const struct sof_dev_desc adl_desc = {
.machines = snd_soc_acpi_intel_adl_machines,
.alt_machines = snd_soc_acpi_intel_adl_sdw_machines,
+ .use_acpi_target_states = true,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
@@ -116,6 +117,8 @@ static const struct pci_device_id sof_pci_ids[] = {
.driver_data = (unsigned long)&adls_desc},
{ PCI_DEVICE(0x8086, 0x51c8), /* ADL-P */
.driver_data = (unsigned long)&adl_desc},
+ { PCI_DEVICE(0x8086, 0x51cc), /* ADL-M */
+ .driver_data = (unsigned long)&adl_desc},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, sof_pci_ids);
diff --git a/sound/soc/sof/intel/pci-tng.c b/sound/soc/sof/intel/pci-tng.c
index 94b9704c0117..4ee1da397d4e 100644
--- a/sound/soc/sof/intel/pci-tng.c
+++ b/sound/soc/sof/intel/pci-tng.c
@@ -14,7 +14,10 @@
#include <sound/soc-acpi-intel-match.h>
#include <sound/sof.h>
#include "../ops.h"
+#include "atom.h"
+#include "shim.h"
#include "../sof-pci-dev.h"
+#include "../sof-audio.h"
/* platform specific devices */
#include "shim.h"
@@ -29,6 +32,170 @@ static struct snd_soc_acpi_mach sof_tng_machines[] = {
{}
};
+static const struct snd_sof_debugfs_map tng_debugfs[] = {
+ {"dmac0", DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", DSP_BAR, SHIM_OFFSET, SHIM_SIZE_BYT,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static int tangier_pci_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ u32 base, size;
+ int ret;
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(&pci->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* LPE base */
+ base = pci_resource_start(pci, desc->resindex_lpe_base) - IRAM_OFFSET;
+ size = PCI_BAR_SIZE;
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[DSP_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[DSP_BAR]);
+
+ /* IMR base - optional */
+ if (desc->resindex_imr_base == -1)
+ goto irq;
+
+ base = pci_resource_start(pci, desc->resindex_imr_base);
+ size = pci_resource_len(pci, desc->resindex_imr_base);
+
+ /* some BIOSes don't map IMR */
+ if (base == 0x55aa55aa || base == 0x0) {
+ dev_info(sdev->dev, "IMR not set by BIOS. Ignoring\n");
+ goto irq;
+ }
+
+ dev_dbg(sdev->dev, "IMR base at 0x%x size 0x%x", base, size);
+ sdev->bar[IMR_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[IMR_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap IMR base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "IMR VADDR %p\n", sdev->bar[IMR_BAR]);
+
+irq:
+ /* register our IRQ */
+ sdev->ipc_irq = pci->irq;
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ atom_irq_handler, atom_irq_thread,
+ 0, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return ret;
+}
+
+const struct snd_sof_dsp_ops sof_tng_ops = {
+ /* device init */
+ .probe = tangier_pci_probe,
+
+ /* DSP core boot / reset */
+ .run = atom_run,
+ .reset = atom_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* doorbell */
+ .irq_handler = atom_irq_handler,
+ .irq_thread = atom_irq_thread,
+
+ /* ipc */
+ .send_msg = atom_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = atom_get_mailbox_offset,
+ .get_window_offset = atom_get_window_offset,
+
+ .ipc_msg_data = intel_ipc_msg_data,
+ .ipc_pcm_params = intel_ipc_pcm_params,
+
+ /* machine driver */
+ .machine_select = atom_machine_select,
+ .machine_register = sof_machine_register,
+ .machine_unregister = sof_machine_unregister,
+ .set_mach_params = atom_set_mach_params,
+
+ /* debug */
+ .debug_map = tng_debugfs,
+ .debug_map_count = ARRAY_SIZE(tng_debugfs),
+ .dbg_dump = atom_dump,
+
+ /* stream callbacks */
+ .pcm_open = intel_pcm_open,
+ .pcm_close = intel_pcm_close,
+
+ /* module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = atom_dai,
+ .num_drv = 3, /* we have only 3 SSPs on byt*/
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
+
+ .arch_ops = &sof_xtensa_arch_ops,
+};
+
+const struct sof_intel_dsp_desc tng_chip_info = {
+ .cores_num = 1,
+ .host_managed_cores_mask = 1,
+};
+
static const struct sof_dev_desc tng_desc = {
.machines = sof_tng_machines,
.resindex_lpe_base = 3, /* IRAM, but subtract IRAM offset */
@@ -66,5 +233,7 @@ static struct pci_driver snd_sof_pci_intel_tng_driver = {
module_pci_driver(snd_sof_pci_intel_tng_driver);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_IMPORT_NS(SND_SOC_SOF_MERRIFIELD);
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
index 6efaf766f2ab..2b38a77cd594 100644
--- a/sound/soc/sof/loader.c
+++ b/sound/soc/sof/loader.c
@@ -517,7 +517,7 @@ int sof_fw_ready(struct snd_sof_dev *sdev, u32 msg_id)
return 0;
/* copy data from the DSP FW ready offset */
- sof_block_read(sdev, bar, offset, fw_ready, sizeof(*fw_ready));
+ snd_sof_dsp_block_read(sdev, bar, offset, fw_ready, sizeof(*fw_ready));
/* make sure ABI version is compatible */
ret = snd_sof_ipc_valid(sdev);
diff --git a/sound/soc/sof/ops.h b/sound/soc/sof/ops.h
index 323a0b3f561b..4a5d6e497f05 100644
--- a/sound/soc/sof/ops.h
+++ b/sound/soc/sof/ops.h
@@ -244,13 +244,13 @@ snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev,
static inline void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, u32 flags)
{
if (sof_ops(sdev)->dbg_dump)
- return sof_ops(sdev)->dbg_dump(sdev, flags);
+ sof_ops(sdev)->dbg_dump(sdev, flags);
}
static inline void snd_sof_ipc_dump(struct snd_sof_dev *sdev)
{
if (sof_ops(sdev)->ipc_dump)
- return sof_ops(sdev)->ipc_dump(sdev);
+ sof_ops(sdev)->ipc_dump(sdev);
}
/* register IO */
@@ -546,14 +546,16 @@ static inline const struct snd_sof_dsp_ops
(val) = snd_sof_dsp_read(sdev, bar, offset); \
if (cond) { \
dev_dbg(sdev->dev, \
- "FW Poll Status: reg=%#x successful\n", (val)); \
+ "FW Poll Status: reg[%#x]=%#x successful\n", \
+ (offset), (val)); \
break; \
} \
if (__timeout_us && \
ktime_compare(ktime_get(), __timeout) > 0) { \
(val) = snd_sof_dsp_read(sdev, bar, offset); \
dev_dbg(sdev->dev, \
- "FW Poll Status: reg=%#x timedout\n", (val)); \
+ "FW Poll Status: reg[%#x]=%#x timedout\n", \
+ (offset), (val)); \
break; \
} \
if (__sleep_us) \
diff --git a/sound/soc/sof/sof-acpi-dev.c b/sound/soc/sof/sof-acpi-dev.c
index 7fbf09f9f17e..74982c04497b 100644
--- a/sound/soc/sof/sof-acpi-dev.c
+++ b/sound/soc/sof/sof-acpi-dev.c
@@ -60,7 +60,6 @@ int sof_acpi_probe(struct platform_device *pdev, const struct sof_dev_desc *desc
{
struct device *dev = &pdev->dev;
struct snd_sof_pdata *sof_pdata;
- const struct snd_sof_dsp_ops *ops;
dev_dbg(dev, "ACPI DSP detected");
@@ -68,9 +67,7 @@ int sof_acpi_probe(struct platform_device *pdev, const struct sof_dev_desc *desc
if (!sof_pdata)
return -ENOMEM;
- /* get ops for platform */
- ops = desc->ops;
- if (!ops) {
+ if (!desc->ops) {
dev_err(dev, "error: no matching ACPI descriptor ops\n");
return -ENODEV;
}
diff --git a/sound/soc/sof/sof-of-dev.c b/sound/soc/sof/sof-of-dev.c
index c9c70645b377..d1a21edfa05d 100644
--- a/sound/soc/sof/sof-of-dev.c
+++ b/sound/soc/sof/sof-of-dev.c
@@ -70,7 +70,6 @@ static int sof_of_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct sof_dev_desc *desc;
struct snd_sof_pdata *sof_pdata;
- const struct snd_sof_dsp_ops *ops;
dev_info(&pdev->dev, "DT DSP detected");
@@ -82,9 +81,7 @@ static int sof_of_probe(struct platform_device *pdev)
if (!desc)
return -ENODEV;
- /* get ops for platform */
- ops = desc->ops;
- if (!ops) {
+ if (!desc->ops) {
dev_err(dev, "error: no matching DT descriptor ops\n");
return -ENODEV;
}
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
index 3489dc1b48f6..03119462f9e2 100644
--- a/sound/soc/sof/sof-pci-dev.c
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -116,14 +116,11 @@ int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
const struct sof_dev_desc *desc =
(const struct sof_dev_desc *)pci_id->driver_data;
struct snd_sof_pdata *sof_pdata;
- const struct snd_sof_dsp_ops *ops;
int ret;
dev_dbg(&pci->dev, "PCI DSP detected");
- /* get ops for platform */
- ops = desc->ops;
- if (!ops) {
+ if (!desc->ops) {
dev_err(dev, "error: no matching PCI descriptor ops\n");
return -ENODEV;
}
@@ -141,7 +138,7 @@ int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
return ret;
sof_pdata->name = pci_name(pci);
- sof_pdata->desc = (struct sof_dev_desc *)pci_id->driver_data;
+ sof_pdata->desc = desc;
sof_pdata->dev = dev;
sof_pdata->fw_filename = desc->default_fw_filename;
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index 59abcfc9bd55..cc9585bfa4e9 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -1063,6 +1063,7 @@ static int sof_control_load_volume(struct snd_soc_component *scomp,
scontrol->min_volume_step = le32_to_cpu(mc->min);
scontrol->max_volume_step = le32_to_cpu(mc->max);
scontrol->num_channels = le32_to_cpu(mc->num_channels);
+ scontrol->control_data->index = kc->index;
/* set cmd for mixer control */
if (le32_to_cpu(mc->max) == 1) {
@@ -1140,7 +1141,7 @@ static int sof_control_load_enum(struct snd_soc_component *scomp,
scontrol->comp_id = sdev->next_comp_id;
scontrol->num_channels = le32_to_cpu(ec->num_channels);
-
+ scontrol->control_data->index = kc->index;
scontrol->cmd = SOF_CTRL_CMD_ENUM;
dev_dbg(scomp->dev, "tplg: load kcontrol index %d chans %d comp_id %d\n",
@@ -1188,6 +1189,7 @@ static int sof_control_load_bytes(struct snd_soc_component *scomp,
scontrol->comp_id = sdev->next_comp_id;
scontrol->cmd = SOF_CTRL_CMD_BINARY;
+ scontrol->control_data->index = kc->index;
dev_dbg(scomp->dev, "tplg: load kcontrol index %d chans %d\n",
scontrol->comp_id, scontrol->num_channels);
@@ -2133,7 +2135,7 @@ static int sof_get_control_data(struct snd_soc_component *scomp,
for (i = 0; i < widget->num_kcontrols; i++) {
kc = &widget->kcontrol_news[i];
- switch (widget->dobj.widget.kcontrol_type) {
+ switch (widget->dobj.widget.kcontrol_type[i]) {
case SND_SOC_TPLG_TYPE_MIXER:
sm = (struct soc_mixer_control *)kc->private_value;
wdata[i].control = sm->dobj.private;
@@ -2147,8 +2149,8 @@ static int sof_get_control_data(struct snd_soc_component *scomp,
wdata[i].control = se->dobj.private;
break;
default:
- dev_err(scomp->dev, "error: unknown kcontrol type %d in widget %s\n",
- widget->dobj.widget.kcontrol_type,
+ dev_err(scomp->dev, "error: unknown kcontrol type %u in widget %s\n",
+ widget->dobj.widget.kcontrol_type[i],
widget->name);
return -EINVAL;
}
@@ -2164,7 +2166,8 @@ static int sof_get_control_data(struct snd_soc_component *scomp,
return -EINVAL;
/* make sure data is valid - data can be updated at runtime */
- if (wdata[i].pdata->magic != SOF_ABI_MAGIC)
+ if (widget->dobj.widget.kcontrol_type[i] == SND_SOC_TPLG_TYPE_BYTES &&
+ wdata[i].pdata->magic != SOF_ABI_MAGIC)
return -EINVAL;
*size += wdata[i].pdata->size;
@@ -2605,7 +2608,7 @@ static int sof_widget_unload(struct snd_soc_component *scomp,
}
for (i = 0; i < widget->num_kcontrols; i++) {
kc = &widget->kcontrol_news[i];
- switch (dobj->widget.kcontrol_type) {
+ switch (widget->dobj.widget.kcontrol_type[i]) {
case SND_SOC_TPLG_TYPE_MIXER:
sm = (struct soc_mixer_control *)kc->private_value;
scontrol = sm->dobj.private;
@@ -3335,7 +3338,7 @@ static int sof_link_load(struct snd_soc_component *scomp, int index,
/* Copy common data to all config ipc structs */
for (i = 0; i < num_conf; i++) {
config[i].hdr.cmd = SOF_IPC_GLB_DAI_MSG | SOF_IPC_DAI_CONFIG;
- config[i].format = hw_config[i].fmt;
+ config[i].format = le32_to_cpu(hw_config[i].fmt);
config[i].type = common_config.type;
config[i].dai_index = common_config.dai_index;
}
diff --git a/sound/soc/spear/spdif_out.c b/sound/soc/spear/spdif_out.c
index 38f9fff5be6b..549295a6ed50 100644
--- a/sound/soc/spear/spdif_out.c
+++ b/sound/soc/spear/spdif_out.c
@@ -287,8 +287,7 @@ static int spdif_out_probe(struct platform_device *pdev)
if (!host)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->io_base = devm_ioremap_resource(&pdev->dev, res);
+ host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(host->io_base))
return PTR_ERR(host->io_base);
diff --git a/sound/soc/sprd/sprd-mcdt.c b/sound/soc/sprd/sprd-mcdt.c
index 34b2ce733b54..f6a55fa60c1b 100644
--- a/sound/soc/sprd/sprd-mcdt.c
+++ b/sound/soc/sprd/sprd-mcdt.c
@@ -949,8 +949,7 @@ static int sprd_mcdt_probe(struct platform_device *pdev)
if (!mcdt)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mcdt->base = devm_ioremap_resource(&pdev->dev, res);
+ mcdt->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mcdt->base))
return PTR_ERR(mcdt->base);
diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
index e3561f00ed40..34668fe3909d 100644
--- a/sound/soc/sti/sti_uniperif.c
+++ b/sound/soc/sti/sti_uniperif.c
@@ -410,16 +410,8 @@ static int sti_uniperiph_cpu_dai_of(struct device_node *node,
*dai = sti_uniperiph_dai_template;
dai->name = dev_data->dai_names;
- /* Get resources */
- uni->mem_region = platform_get_resource(priv->pdev, IORESOURCE_MEM, 0);
-
- if (!uni->mem_region) {
- dev_err(dev, "Failed to get memory resource\n");
- return -ENODEV;
- }
-
- uni->base = devm_ioremap_resource(dev, uni->mem_region);
-
+ /* Get resources and base address */
+ uni->base = devm_platform_get_and_ioremap_resource(priv->pdev, 0, &uni->mem_region);
if (IS_ERR(uni->base))
return PTR_ERR(uni->base);
diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c
index 7d1672cf78cc..6254bacad6eb 100644
--- a/sound/soc/stm/stm32_i2s.c
+++ b/sound/soc/stm/stm32_i2s.c
@@ -1036,8 +1036,7 @@ static int stm32_i2s_parse_dt(struct platform_device *pdev,
else
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2s->base = devm_ioremap_resource(&pdev->dev, res);
+ i2s->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(i2s->base))
return PTR_ERR(i2s->base);
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index 3aa1cf262402..9c3b8e209656 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -1361,8 +1361,7 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
if (!np)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c
index 1bfa3b2ba974..48145f553588 100644
--- a/sound/soc/stm/stm32_spdifrx.c
+++ b/sound/soc/stm/stm32_spdifrx.c
@@ -922,8 +922,7 @@ static int stm32_spdifrx_parse_of(struct platform_device *pdev,
else
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spdifrx->base = devm_ioremap_resource(&pdev->dev, res);
+ spdifrx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(spdifrx->base))
return PTR_ERR(spdifrx->base);
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 6f3d9148a185..da597e456beb 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -1709,8 +1709,7 @@ static int sun4i_codec_probe(struct platform_device *pdev)
scodec->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index c57feae3396e..1e9116cd365e 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -1470,8 +1470,7 @@ static int sun4i_i2s_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, i2s);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
index 228485fe0734..a10949bf0ca1 100644
--- a/sound/soc/sunxi/sun4i-spdif.c
+++ b/sound/soc/sunxi/sun4i-spdif.c
@@ -518,8 +518,7 @@ static int sun4i_spdif_probe(struct platform_device *pdev)
host->cpu_dai_drv.name = dev_name(&pdev->dev);
/* Get the addresses */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/sound/soc/tegra/Kconfig b/sound/soc/tegra/Kconfig
index a4e6760944d0..83c87f35a7d3 100644
--- a/sound/soc/tegra/Kconfig
+++ b/sound/soc/tegra/Kconfig
@@ -117,9 +117,13 @@ config SND_SOC_TEGRA_AUDIO_GRAPH_CARD
few things for Tegra audio. Most of the code is re-used from
audio graph driver and the same DT bindings are used.
+config SND_SOC_TEGRA_MACHINE_DRV
+ tristate
+
config SND_SOC_TEGRA_RT5640
tristate "SoC Audio support for Tegra boards using an RT5640 codec"
depends on I2C && GPIOLIB
+ select SND_SOC_TEGRA_MACHINE_DRV
select SND_SOC_RT5640
help
Say Y or M here if you want to add support for SoC audio on Tegra
@@ -128,6 +132,7 @@ config SND_SOC_TEGRA_RT5640
config SND_SOC_TEGRA_WM8753
tristate "SoC Audio support for Tegra boards using a WM8753 codec"
depends on I2C && GPIOLIB
+ select SND_SOC_TEGRA_MACHINE_DRV
select SND_SOC_WM8753
help
Say Y or M here if you want to add support for SoC audio on Tegra
@@ -136,6 +141,7 @@ config SND_SOC_TEGRA_WM8753
config SND_SOC_TEGRA_WM8903
tristate "SoC Audio support for Tegra boards using a WM8903 codec"
depends on I2C && GPIOLIB
+ select SND_SOC_TEGRA_MACHINE_DRV
select SND_SOC_WM8903
help
Say Y or M here if you want to add support for SoC audio on Tegra
@@ -145,6 +151,7 @@ config SND_SOC_TEGRA_WM8903
config SND_SOC_TEGRA_WM9712
tristate "SoC Audio support for Tegra boards using a WM9712 codec"
depends on GPIOLIB
+ select SND_SOC_TEGRA_MACHINE_DRV
select SND_SOC_TEGRA20_AC97
select SND_SOC_WM9712
help
@@ -154,6 +161,7 @@ config SND_SOC_TEGRA_WM9712
config SND_SOC_TEGRA_TRIMSLICE
tristate "SoC Audio support for TrimSlice board"
depends on I2C
+ select SND_SOC_TEGRA_MACHINE_DRV
select SND_SOC_TLV320AIC23_I2C
help
Say Y or M here if you want to add support for SoC audio on the
@@ -162,6 +170,7 @@ config SND_SOC_TEGRA_TRIMSLICE
config SND_SOC_TEGRA_ALC5632
tristate "SoC Audio support for Tegra boards using an ALC5632 codec"
depends on I2C && GPIOLIB
+ select SND_SOC_TEGRA_MACHINE_DRV
select SND_SOC_ALC5632
help
Say Y or M here if you want to add support for SoC audio on the
@@ -170,6 +179,7 @@ config SND_SOC_TEGRA_ALC5632
config SND_SOC_TEGRA_MAX98090
tristate "SoC Audio support for Tegra boards using a MAX98090 codec"
depends on I2C && GPIOLIB
+ select SND_SOC_TEGRA_MACHINE_DRV
select SND_SOC_MAX98090
help
Say Y or M here if you want to add support for SoC audio on Tegra
@@ -178,6 +188,7 @@ config SND_SOC_TEGRA_MAX98090
config SND_SOC_TEGRA_RT5677
tristate "SoC Audio support for Tegra boards using a RT5677 codec"
depends on I2C && GPIOLIB
+ select SND_SOC_TEGRA_MACHINE_DRV
select SND_SOC_RT5677
help
Say Y or M here if you want to add support for SoC audio on Tegra
@@ -186,6 +197,7 @@ config SND_SOC_TEGRA_RT5677
config SND_SOC_TEGRA_SGTL5000
tristate "SoC Audio support for Tegra boards using a SGTL5000 codec"
depends on I2C && GPIOLIB
+ select SND_SOC_TEGRA_MACHINE_DRV
select SND_SOC_SGTL5000
help
Say Y or M here if you want to add support for SoC audio on Tegra
diff --git a/sound/soc/tegra/Makefile b/sound/soc/tegra/Makefile
index b17dd6eef92a..e2cec9ae31c9 100644
--- a/sound/soc/tegra/Makefile
+++ b/sound/soc/tegra/Makefile
@@ -15,7 +15,6 @@ snd-soc-tegra186-dspk-objs := tegra186_dspk.o
snd-soc-tegra210-admaif-objs := tegra210_admaif.o
obj-$(CONFIG_SND_SOC_TEGRA) += snd-soc-tegra-pcm.o
-obj-$(CONFIG_SND_SOC_TEGRA) += snd-soc-tegra-utils.o
obj-$(CONFIG_SND_SOC_TEGRA20_AC97) += snd-soc-tegra20-ac97.o
obj-$(CONFIG_SND_SOC_TEGRA20_DAS) += snd-soc-tegra20-das.o
obj-$(CONFIG_SND_SOC_TEGRA20_I2S) += snd-soc-tegra20-i2s.o
@@ -29,24 +28,10 @@ obj-$(CONFIG_SND_SOC_TEGRA186_DSPK) += snd-soc-tegra186-dspk.o
obj-$(CONFIG_SND_SOC_TEGRA210_ADMAIF) += snd-soc-tegra210-admaif.o
# Tegra machine Support
-snd-soc-tegra-rt5640-objs := tegra_rt5640.o
-snd-soc-tegra-rt5677-objs := tegra_rt5677.o
-snd-soc-tegra-wm8753-objs := tegra_wm8753.o
snd-soc-tegra-wm8903-objs := tegra_wm8903.o
-snd-soc-tegra-wm9712-objs := tegra_wm9712.o
-snd-soc-tegra-trimslice-objs := trimslice.o
-snd-soc-tegra-alc5632-objs := tegra_alc5632.o
-snd-soc-tegra-max98090-objs := tegra_max98090.o
-snd-soc-tegra-sgtl5000-objs := tegra_sgtl5000.o
+snd-soc-tegra-machine-objs := tegra_asoc_machine.o
snd-soc-tegra-audio-graph-card-objs := tegra_audio_graph_card.o
-obj-$(CONFIG_SND_SOC_TEGRA_RT5640) += snd-soc-tegra-rt5640.o
-obj-$(CONFIG_SND_SOC_TEGRA_RT5677) += snd-soc-tegra-rt5677.o
-obj-$(CONFIG_SND_SOC_TEGRA_WM8753) += snd-soc-tegra-wm8753.o
obj-$(CONFIG_SND_SOC_TEGRA_WM8903) += snd-soc-tegra-wm8903.o
-obj-$(CONFIG_SND_SOC_TEGRA_WM9712) += snd-soc-tegra-wm9712.o
-obj-$(CONFIG_SND_SOC_TEGRA_TRIMSLICE) += snd-soc-tegra-trimslice.o
-obj-$(CONFIG_SND_SOC_TEGRA_ALC5632) += snd-soc-tegra-alc5632.o
-obj-$(CONFIG_SND_SOC_TEGRA_MAX98090) += snd-soc-tegra-max98090.o
-obj-$(CONFIG_SND_SOC_TEGRA_SGTL5000) += snd-soc-tegra-sgtl5000.o
+obj-$(CONFIG_SND_SOC_TEGRA_MACHINE_DRV) += snd-soc-tegra-machine.o
obj-$(CONFIG_SND_SOC_TEGRA_AUDIO_GRAPH_CARD) += snd-soc-tegra-audio-graph-card.o
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index b280ebd72591..266d2cab9f49 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -377,8 +377,7 @@ static int tegra20_i2s_platform_probe(struct platform_device *pdev)
goto err;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, mem);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(regs)) {
ret = PTR_ERR(regs);
goto err;
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index de698ff2a69c..7751575cd6d6 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -269,8 +269,7 @@ static int tegra20_spdif_platform_probe(struct platform_device *pdev)
return ret;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, mem);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/sound/soc/tegra/tegra210_admaif.c b/sound/soc/tegra/tegra210_admaif.c
index 1268046b345d..0f9beef429a2 100644
--- a/sound/soc/tegra/tegra210_admaif.c
+++ b/sound/soc/tegra/tegra210_admaif.c
@@ -706,9 +706,7 @@ static int tegra_admaif_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
index 4692c70ed933..b3e1df693381 100644
--- a/sound/soc/tegra/tegra30_ahub.c
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -550,8 +550,7 @@ static int tegra30_ahub_probe(struct platform_device *pdev)
goto err_unset_ahub;
}
- res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs_apbif = devm_ioremap_resource(&pdev->dev, res0);
+ regs_apbif = devm_platform_get_and_ioremap_resource(pdev, 0, &res0);
if (IS_ERR(regs_apbif)) {
ret = PTR_ERR(regs_apbif);
goto err_unset_ahub;
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
deleted file mode 100644
index 0a0efd24e4b0..000000000000
--- a/sound/soc/tegra/tegra_alc5632.c
+++ /dev/null
@@ -1,259 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
-* tegra_alc5632.c -- Toshiba AC100(PAZ00) machine ASoC driver
- *
- * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
- * Copyright (C) 2012 - NVIDIA, Inc.
- *
- * Authors: Leon Romanovsky <leon@leon.nu>
- * Andrey Danin <danindrey@mail.ru>
- * Marc Dietrich <marvin24@gmx.de>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-
-#include <sound/core.h>
-#include <sound/jack.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include "../codecs/alc5632.h"
-
-#include "tegra_asoc_utils.h"
-
-#define DRV_NAME "tegra-alc5632"
-
-struct tegra_alc5632 {
- struct tegra_asoc_utils_data util_data;
- int gpio_hp_det;
-};
-
-static int tegra_alc5632_asoc_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
- struct snd_soc_card *card = rtd->card;
- struct tegra_alc5632 *alc5632 = snd_soc_card_get_drvdata(card);
- int srate, mclk;
- int err;
-
- srate = params_rate(params);
- mclk = 512 * srate;
-
- err = tegra_asoc_utils_set_rate(&alc5632->util_data, srate, mclk);
- if (err < 0) {
- dev_err(card->dev, "Can't configure clocks\n");
- return err;
- }
-
- err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
- SND_SOC_CLOCK_IN);
- if (err < 0) {
- dev_err(card->dev, "codec_dai clock not set\n");
- return err;
- }
-
- return 0;
-}
-
-static const struct snd_soc_ops tegra_alc5632_asoc_ops = {
- .hw_params = tegra_alc5632_asoc_hw_params,
-};
-
-static struct snd_soc_jack tegra_alc5632_hs_jack;
-
-static struct snd_soc_jack_pin tegra_alc5632_hs_jack_pins[] = {
- {
- .pin = "Headset Mic",
- .mask = SND_JACK_MICROPHONE,
- },
- {
- .pin = "Headset Stereophone",
- .mask = SND_JACK_HEADPHONE,
- },
-};
-
-static struct snd_soc_jack_gpio tegra_alc5632_hp_jack_gpio = {
- .name = "Headset detection",
- .report = SND_JACK_HEADSET,
- .debounce_time = 150,
-};
-
-static const struct snd_soc_dapm_widget tegra_alc5632_dapm_widgets[] = {
- SND_SOC_DAPM_SPK("Int Spk", NULL),
- SND_SOC_DAPM_HP("Headset Stereophone", NULL),
- SND_SOC_DAPM_MIC("Headset Mic", NULL),
- SND_SOC_DAPM_MIC("Digital Mic", NULL),
-};
-
-static const struct snd_kcontrol_new tegra_alc5632_controls[] = {
- SOC_DAPM_PIN_SWITCH("Int Spk"),
-};
-
-static int tegra_alc5632_asoc_init(struct snd_soc_pcm_runtime *rtd)
-{
- int ret;
- struct tegra_alc5632 *machine = snd_soc_card_get_drvdata(rtd->card);
-
- ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
- SND_JACK_HEADSET,
- &tegra_alc5632_hs_jack,
- tegra_alc5632_hs_jack_pins,
- ARRAY_SIZE(tegra_alc5632_hs_jack_pins));
- if (ret)
- return ret;
-
- if (gpio_is_valid(machine->gpio_hp_det)) {
- tegra_alc5632_hp_jack_gpio.gpio = machine->gpio_hp_det;
- snd_soc_jack_add_gpios(&tegra_alc5632_hs_jack,
- 1,
- &tegra_alc5632_hp_jack_gpio);
- }
-
- snd_soc_dapm_force_enable_pin(&rtd->card->dapm, "MICBIAS1");
-
- return 0;
-}
-
-SND_SOC_DAILINK_DEFS(pcm,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "alc5632-hifi")),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-static struct snd_soc_dai_link tegra_alc5632_dai = {
- .name = "ALC5632",
- .stream_name = "ALC5632 PCM",
- .init = tegra_alc5632_asoc_init,
- .ops = &tegra_alc5632_asoc_ops,
- .dai_fmt = SND_SOC_DAIFMT_I2S
- | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBS_CFS,
- SND_SOC_DAILINK_REG(pcm),
-};
-
-static struct snd_soc_card snd_soc_tegra_alc5632 = {
- .name = "tegra-alc5632",
- .owner = THIS_MODULE,
- .dai_link = &tegra_alc5632_dai,
- .num_links = 1,
- .controls = tegra_alc5632_controls,
- .num_controls = ARRAY_SIZE(tegra_alc5632_controls),
- .dapm_widgets = tegra_alc5632_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tegra_alc5632_dapm_widgets),
- .fully_routed = true,
-};
-
-static int tegra_alc5632_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct snd_soc_card *card = &snd_soc_tegra_alc5632;
- struct tegra_alc5632 *alc5632;
- int ret;
-
- alc5632 = devm_kzalloc(&pdev->dev,
- sizeof(struct tegra_alc5632), GFP_KERNEL);
- if (!alc5632)
- return -ENOMEM;
-
- card->dev = &pdev->dev;
- snd_soc_card_set_drvdata(card, alc5632);
-
- alc5632->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
- if (alc5632->gpio_hp_det == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- ret = snd_soc_of_parse_card_name(card, "nvidia,model");
- if (ret)
- goto err;
-
- ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
- if (ret)
- goto err;
-
- tegra_alc5632_dai.codecs->of_node = of_parse_phandle(
- pdev->dev.of_node, "nvidia,audio-codec", 0);
-
- if (!tegra_alc5632_dai.codecs->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,audio-codec' missing or invalid\n");
- ret = -EINVAL;
- goto err;
- }
-
- tegra_alc5632_dai.cpus->of_node = of_parse_phandle(np,
- "nvidia,i2s-controller", 0);
- if (!tegra_alc5632_dai.cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,i2s-controller' missing or invalid\n");
- ret = -EINVAL;
- goto err_put_codec_of_node;
- }
-
- tegra_alc5632_dai.platforms->of_node = tegra_alc5632_dai.cpus->of_node;
-
- ret = tegra_asoc_utils_init(&alc5632->util_data, &pdev->dev);
- if (ret)
- goto err_put_cpu_of_node;
-
- ret = snd_soc_register_card(card);
- if (ret) {
- dev_err_probe(&pdev->dev, ret,
- "snd_soc_register_card failed\n");
- goto err_put_cpu_of_node;
- }
-
- return 0;
-
-err_put_cpu_of_node:
- of_node_put(tegra_alc5632_dai.cpus->of_node);
- tegra_alc5632_dai.cpus->of_node = NULL;
- tegra_alc5632_dai.platforms->of_node = NULL;
-err_put_codec_of_node:
- of_node_put(tegra_alc5632_dai.codecs->of_node);
- tegra_alc5632_dai.codecs->of_node = NULL;
-err:
- return ret;
-}
-
-static int tegra_alc5632_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
-
- of_node_put(tegra_alc5632_dai.cpus->of_node);
- tegra_alc5632_dai.cpus->of_node = NULL;
- tegra_alc5632_dai.platforms->of_node = NULL;
- of_node_put(tegra_alc5632_dai.codecs->of_node);
- tegra_alc5632_dai.codecs->of_node = NULL;
-
- return 0;
-}
-
-static const struct of_device_id tegra_alc5632_of_match[] = {
- { .compatible = "nvidia,tegra-audio-alc5632", },
- {},
-};
-
-static struct platform_driver tegra_alc5632_driver = {
- .driver = {
- .name = DRV_NAME,
- .pm = &snd_soc_pm_ops,
- .of_match_table = tegra_alc5632_of_match,
- },
- .probe = tegra_alc5632_probe,
- .remove = tegra_alc5632_remove,
-};
-module_platform_driver(tegra_alc5632_driver);
-
-MODULE_AUTHOR("Leon Romanovsky <leon@leon.nu>");
-MODULE_DESCRIPTION("Tegra+ALC5632 machine ASoC driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, tegra_alc5632_of_match);
diff --git a/sound/soc/tegra/tegra_asoc_machine.c b/sound/soc/tegra/tegra_asoc_machine.c
new file mode 100644
index 000000000000..735909310a26
--- /dev/null
+++ b/sound/soc/tegra/tegra_asoc_machine.c
@@ -0,0 +1,854 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * tegra_asoc_machine.c - Universal ASoC machine driver for NVIDIA Tegra boards.
+ */
+
+#include <linux/clk.h>
+#include <linux/export.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "tegra_asoc_machine.h"
+
+/* Headphones Jack */
+
+static struct snd_soc_jack tegra_machine_hp_jack;
+
+static struct snd_soc_jack_pin tegra_machine_hp_jack_pins[] = {
+ { .pin = "Headphone", .mask = SND_JACK_HEADPHONE },
+ { .pin = "Headphones", .mask = SND_JACK_HEADPHONE },
+};
+
+static struct snd_soc_jack_gpio tegra_machine_hp_jack_gpio = {
+ .name = "Headphones detection",
+ .report = SND_JACK_HEADPHONE,
+ .debounce_time = 150,
+};
+
+/* Headset Jack */
+
+static struct snd_soc_jack tegra_machine_headset_jack;
+
+static struct snd_soc_jack_pin tegra_machine_headset_jack_pins[] = {
+ { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE },
+ { .pin = "Headset Stereophone", .mask = SND_JACK_HEADPHONE },
+};
+
+static struct snd_soc_jack_gpio tegra_machine_headset_jack_gpio = {
+ .name = "Headset detection",
+ .report = SND_JACK_HEADSET,
+ .debounce_time = 150,
+};
+
+/* Mic Jack */
+
+static struct snd_soc_jack tegra_machine_mic_jack;
+
+static struct snd_soc_jack_pin tegra_machine_mic_jack_pins[] = {
+ { .pin = "Mic Jack", .mask = SND_JACK_MICROPHONE },
+ { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE },
+};
+
+static struct snd_soc_jack_gpio tegra_machine_mic_jack_gpio = {
+ .name = "Mic detection",
+ .report = SND_JACK_MICROPHONE,
+ .debounce_time = 150,
+};
+
+static int tegra_machine_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct tegra_machine *machine = snd_soc_card_get_drvdata(dapm->card);
+
+ if (!strcmp(w->name, "Int Spk") || !strcmp(w->name, "Speakers"))
+ gpiod_set_value_cansleep(machine->gpiod_spkr_en,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ if (!strcmp(w->name, "Mic Jack"))
+ gpiod_set_value_cansleep(machine->gpiod_ext_mic_en,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ if (!strcmp(w->name, "Int Mic"))
+ gpiod_set_value_cansleep(machine->gpiod_int_mic_en,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ if (!strcmp(w->name, "Headphone") || !strcmp(w->name, "Headphone Jack"))
+ gpiod_set_value_cansleep(machine->gpiod_hp_mute,
+ !SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget tegra_machine_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", tegra_machine_event),
+ SND_SOC_DAPM_HP("Headphone", tegra_machine_event),
+ SND_SOC_DAPM_HP("Headset Stereophone", NULL),
+ SND_SOC_DAPM_HP("Headphones", NULL),
+ SND_SOC_DAPM_SPK("Speakers", tegra_machine_event),
+ SND_SOC_DAPM_SPK("Int Spk", tegra_machine_event),
+ SND_SOC_DAPM_MIC("Int Mic", tegra_machine_event),
+ SND_SOC_DAPM_MIC("Mic Jack", tegra_machine_event),
+ SND_SOC_DAPM_MIC("Internal Mic 1", NULL),
+ SND_SOC_DAPM_MIC("Internal Mic 2", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Digital Mic", NULL),
+ SND_SOC_DAPM_MIC("Mic", NULL),
+ SND_SOC_DAPM_LINE("Line In Jack", NULL),
+ SND_SOC_DAPM_LINE("Line In", NULL),
+ SND_SOC_DAPM_LINE("LineIn", NULL),
+};
+
+static const struct snd_kcontrol_new tegra_machine_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speakers"),
+ SOC_DAPM_PIN_SWITCH("Int Spk"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Internal Mic 1"),
+ SOC_DAPM_PIN_SWITCH("Internal Mic 2"),
+};
+
+int tegra_asoc_machine_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_machine *machine = snd_soc_card_get_drvdata(card);
+ int err;
+
+ if (machine->gpiod_hp_det && machine->asoc->add_hp_jack) {
+ err = snd_soc_card_jack_new(card, "Headphones Jack",
+ SND_JACK_HEADPHONE,
+ &tegra_machine_hp_jack,
+ tegra_machine_hp_jack_pins,
+ ARRAY_SIZE(tegra_machine_hp_jack_pins));
+ if (err) {
+ dev_err(rtd->dev,
+ "Headphones Jack creation failed: %d\n", err);
+ return err;
+ }
+
+ tegra_machine_hp_jack_gpio.desc = machine->gpiod_hp_det;
+
+ err = snd_soc_jack_add_gpios(&tegra_machine_hp_jack, 1,
+ &tegra_machine_hp_jack_gpio);
+ if (err)
+ dev_err(rtd->dev, "HP GPIOs not added: %d\n", err);
+ }
+
+ if (machine->gpiod_hp_det && machine->asoc->add_headset_jack) {
+ err = snd_soc_card_jack_new(card, "Headset Jack",
+ SND_JACK_HEADSET,
+ &tegra_machine_headset_jack,
+ tegra_machine_headset_jack_pins,
+ ARRAY_SIZE(tegra_machine_headset_jack_pins));
+ if (err) {
+ dev_err(rtd->dev,
+ "Headset Jack creation failed: %d\n", err);
+ return err;
+ }
+
+ tegra_machine_headset_jack_gpio.desc = machine->gpiod_hp_det;
+
+ err = snd_soc_jack_add_gpios(&tegra_machine_headset_jack, 1,
+ &tegra_machine_headset_jack_gpio);
+ if (err)
+ dev_err(rtd->dev, "Headset GPIOs not added: %d\n", err);
+ }
+
+ if (machine->gpiod_mic_det && machine->asoc->add_mic_jack) {
+ err = snd_soc_card_jack_new(rtd->card, "Mic Jack",
+ SND_JACK_MICROPHONE,
+ &tegra_machine_mic_jack,
+ tegra_machine_mic_jack_pins,
+ ARRAY_SIZE(tegra_machine_mic_jack_pins));
+ if (err) {
+ dev_err(rtd->dev, "Mic Jack creation failed: %d\n", err);
+ return err;
+ }
+
+ tegra_machine_mic_jack_gpio.desc = machine->gpiod_mic_det;
+
+ err = snd_soc_jack_add_gpios(&tegra_machine_mic_jack, 1,
+ &tegra_machine_mic_jack_gpio);
+ if (err)
+ dev_err(rtd->dev, "Mic GPIOs not added: %d\n", err);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_asoc_machine_init);
+
+static unsigned int tegra_machine_mclk_rate_128(unsigned int srate)
+{
+ return 128 * srate;
+}
+
+static unsigned int tegra_machine_mclk_rate_256(unsigned int srate)
+{
+ return 256 * srate;
+}
+
+static unsigned int tegra_machine_mclk_rate_512(unsigned int srate)
+{
+ return 512 * srate;
+}
+
+static unsigned int tegra_machine_mclk_rate_12mhz(unsigned int srate)
+{
+ unsigned int mclk;
+
+ switch (srate) {
+ case 8000:
+ case 16000:
+ case 24000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ default:
+ mclk = 12000000;
+ break;
+ }
+
+ return mclk;
+}
+
+static int tegra_machine_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_machine *machine = snd_soc_card_get_drvdata(card);
+ unsigned int srate = params_rate(params);
+ unsigned int mclk = machine->asoc->mclk_rate(srate);
+ unsigned int clk_id = machine->asoc->mclk_id;
+ unsigned int new_baseclock;
+ int err;
+
+ switch (srate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ if (of_machine_is_compatible("nvidia,tegra20"))
+ new_baseclock = 56448000;
+ else if (of_machine_is_compatible("nvidia,tegra30"))
+ new_baseclock = 564480000;
+ else
+ new_baseclock = 282240000;
+ break;
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ if (of_machine_is_compatible("nvidia,tegra20"))
+ new_baseclock = 73728000;
+ else if (of_machine_is_compatible("nvidia,tegra30"))
+ new_baseclock = 552960000;
+ else
+ new_baseclock = 368640000;
+ break;
+ default:
+ dev_err(card->dev, "Invalid sound rate: %u\n", srate);
+ return -EINVAL;
+ }
+
+ if (new_baseclock != machine->set_baseclock ||
+ mclk != machine->set_mclk) {
+ machine->set_baseclock = 0;
+ machine->set_mclk = 0;
+
+ clk_disable_unprepare(machine->clk_cdev1);
+
+ err = clk_set_rate(machine->clk_pll_a, new_baseclock);
+ if (err) {
+ dev_err(card->dev, "Can't set pll_a rate: %d\n", err);
+ return err;
+ }
+
+ err = clk_set_rate(machine->clk_pll_a_out0, mclk);
+ if (err) {
+ dev_err(card->dev, "Can't set pll_a_out0 rate: %d\n", err);
+ return err;
+ }
+
+ /* Don't set cdev1/extern1 rate; it's locked to pll_a_out0 */
+
+ err = clk_prepare_enable(machine->clk_cdev1);
+ if (err) {
+ dev_err(card->dev, "Can't enable cdev1: %d\n", err);
+ return err;
+ }
+
+ machine->set_baseclock = new_baseclock;
+ machine->set_mclk = mclk;
+ }
+
+ err = snd_soc_dai_set_sysclk(codec_dai, clk_id, mclk, SND_SOC_CLOCK_IN);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai clock not set: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops tegra_machine_snd_ops = {
+ .hw_params = tegra_machine_hw_params,
+};
+
+static void tegra_machine_node_release(void *of_node)
+{
+ of_node_put(of_node);
+}
+
+static struct device_node *
+tegra_machine_parse_phandle(struct device *dev, const char *name)
+{
+ struct device_node *np;
+ int err;
+
+ np = of_parse_phandle(dev->of_node, name, 0);
+ if (!np) {
+ dev_err(dev, "Property '%s' missing or invalid\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ err = devm_add_action_or_reset(dev, tegra_machine_node_release, np);
+ if (err)
+ return ERR_PTR(err);
+
+ return np;
+}
+
+int tegra_asoc_machine_probe(struct platform_device *pdev)
+{
+ struct device_node *np_codec, *np_i2s;
+ const struct tegra_asoc_data *asoc;
+ struct device *dev = &pdev->dev;
+ struct tegra_machine *machine;
+ struct snd_soc_card *card;
+ struct gpio_desc *gpiod;
+ int err;
+
+ machine = devm_kzalloc(dev, sizeof(*machine), GFP_KERNEL);
+ if (!machine)
+ return -ENOMEM;
+
+ asoc = of_device_get_match_data(dev);
+ card = asoc->card;
+ card->dev = dev;
+
+ machine->asoc = asoc;
+ machine->mic_jack = &tegra_machine_mic_jack;
+ machine->hp_jack_gpio = &tegra_machine_hp_jack_gpio;
+ snd_soc_card_set_drvdata(card, machine);
+
+ gpiod = devm_gpiod_get_optional(dev, "nvidia,hp-mute", GPIOD_OUT_HIGH);
+ machine->gpiod_hp_mute = gpiod;
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ gpiod = devm_gpiod_get_optional(dev, "nvidia,hp-det", GPIOD_IN);
+ machine->gpiod_hp_det = gpiod;
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ gpiod = devm_gpiod_get_optional(dev, "nvidia,mic-det", GPIOD_IN);
+ machine->gpiod_mic_det = gpiod;
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ gpiod = devm_gpiod_get_optional(dev, "nvidia,spkr-en", GPIOD_OUT_LOW);
+ machine->gpiod_spkr_en = gpiod;
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ gpiod = devm_gpiod_get_optional(dev, "nvidia,int-mic-en", GPIOD_OUT_LOW);
+ machine->gpiod_int_mic_en = gpiod;
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ gpiod = devm_gpiod_get_optional(dev, "nvidia,ext-mic-en", GPIOD_OUT_LOW);
+ machine->gpiod_ext_mic_en = gpiod;
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ err = snd_soc_of_parse_card_name(card, "nvidia,model");
+ if (err)
+ return err;
+
+ if (!card->dapm_routes) {
+ err = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
+ if (err)
+ return err;
+ }
+
+ np_codec = tegra_machine_parse_phandle(dev, "nvidia,audio-codec");
+ if (IS_ERR(np_codec))
+ return PTR_ERR(np_codec);
+
+ np_i2s = tegra_machine_parse_phandle(dev, "nvidia,i2s-controller");
+ if (IS_ERR(np_i2s))
+ return PTR_ERR(np_i2s);
+
+ card->dai_link->cpus->of_node = np_i2s;
+ card->dai_link->codecs->of_node = np_codec;
+ card->dai_link->platforms->of_node = np_i2s;
+
+ if (asoc->add_common_controls) {
+ card->controls = tegra_machine_controls;
+ card->num_controls = ARRAY_SIZE(tegra_machine_controls);
+ }
+
+ if (asoc->add_common_dapm_widgets) {
+ card->dapm_widgets = tegra_machine_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(tegra_machine_dapm_widgets);
+ }
+
+ if (asoc->add_common_snd_ops)
+ card->dai_link->ops = &tegra_machine_snd_ops;
+
+ if (!card->owner)
+ card->owner = THIS_MODULE;
+ if (!card->driver_name)
+ card->driver_name = "tegra";
+
+ machine->clk_pll_a = devm_clk_get(dev, "pll_a");
+ if (IS_ERR(machine->clk_pll_a)) {
+ dev_err(dev, "Can't retrieve clk pll_a\n");
+ return PTR_ERR(machine->clk_pll_a);
+ }
+
+ machine->clk_pll_a_out0 = devm_clk_get(dev, "pll_a_out0");
+ if (IS_ERR(machine->clk_pll_a_out0)) {
+ dev_err(dev, "Can't retrieve clk pll_a_out0\n");
+ return PTR_ERR(machine->clk_pll_a_out0);
+ }
+
+ machine->clk_cdev1 = devm_clk_get(dev, "mclk");
+ if (IS_ERR(machine->clk_cdev1)) {
+ dev_err(dev, "Can't retrieve clk cdev1\n");
+ return PTR_ERR(machine->clk_cdev1);
+ }
+
+ /*
+ * If clock parents are not set in DT, configure here to use clk_out_1
+ * as mclk and extern1 as parent for Tegra30 and higher.
+ */
+ if (!of_find_property(dev->of_node, "assigned-clock-parents", NULL) &&
+ !of_machine_is_compatible("nvidia,tegra20")) {
+ struct clk *clk_out_1, *clk_extern1;
+
+ dev_warn(dev, "Configuring clocks for a legacy device-tree\n");
+ dev_warn(dev, "Please update DT to use assigned-clock-parents\n");
+
+ clk_extern1 = devm_clk_get(dev, "extern1");
+ if (IS_ERR(clk_extern1)) {
+ dev_err(dev, "Can't retrieve clk extern1\n");
+ return PTR_ERR(clk_extern1);
+ }
+
+ err = clk_set_parent(clk_extern1, machine->clk_pll_a_out0);
+ if (err < 0) {
+ dev_err(dev, "Set parent failed for clk extern1\n");
+ return err;
+ }
+
+ clk_out_1 = devm_clk_get(dev, "pmc_clk_out_1");
+ if (IS_ERR(clk_out_1)) {
+ dev_err(dev, "Can't retrieve pmc_clk_out_1\n");
+ return PTR_ERR(clk_out_1);
+ }
+
+ err = clk_set_parent(clk_out_1, clk_extern1);
+ if (err < 0) {
+ dev_err(dev, "Set parent failed for pmc_clk_out_1\n");
+ return err;
+ }
+
+ machine->clk_cdev1 = clk_out_1;
+ }
+
+ if (asoc->set_ac97) {
+ /*
+ * AC97 rate is fixed at 24.576MHz and is used for both the
+ * host controller and the external codec
+ */
+ err = clk_set_rate(machine->clk_pll_a, 73728000);
+ if (err) {
+ dev_err(dev, "Can't set pll_a rate: %d\n", err);
+ return err;
+ }
+
+ err = clk_set_rate(machine->clk_pll_a_out0, 24576000);
+ if (err) {
+ dev_err(dev, "Can't set pll_a_out0 rate: %d\n", err);
+ return err;
+ }
+
+ machine->set_baseclock = 73728000;
+ machine->set_mclk = 24576000;
+ }
+
+ /*
+ * FIXME: There is some unknown dependency between audio MCLK disable
+ * and suspend-resume functionality on Tegra30, although audio MCLK is
+ * only needed for audio.
+ */
+ err = clk_prepare_enable(machine->clk_cdev1);
+ if (err) {
+ dev_err(dev, "Can't enable cdev1: %d\n", err);
+ return err;
+ }
+
+ err = devm_snd_soc_register_card(dev, card);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_asoc_machine_probe);
+
+/* WM8753 machine */
+
+SND_SOC_DAILINK_DEFS(wm8753_hifi,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "wm8753-hifi")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link tegra_wm8753_dai = {
+ .name = "WM8753",
+ .stream_name = "WM8753 PCM",
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(wm8753_hifi),
+};
+
+static struct snd_soc_card snd_soc_tegra_wm8753 = {
+ .components = "codec:wm8753",
+ .dai_link = &tegra_wm8753_dai,
+ .num_links = 1,
+ .fully_routed = true,
+};
+
+static const struct tegra_asoc_data tegra_wm8753_data = {
+ .mclk_rate = tegra_machine_mclk_rate_12mhz,
+ .card = &snd_soc_tegra_wm8753,
+ .add_common_dapm_widgets = true,
+ .add_common_snd_ops = true,
+};
+
+/* WM9712 machine */
+
+static int tegra_wm9712_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return snd_soc_dapm_force_enable_pin(&rtd->card->dapm, "Mic Bias");
+}
+
+SND_SOC_DAILINK_DEFS(wm9712_hifi,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC("wm9712-codec", "wm9712-hifi")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link tegra_wm9712_dai = {
+ .name = "AC97 HiFi",
+ .stream_name = "AC97 HiFi",
+ .init = tegra_wm9712_init,
+ SND_SOC_DAILINK_REG(wm9712_hifi),
+};
+
+static struct snd_soc_card snd_soc_tegra_wm9712 = {
+ .components = "codec:wm9712",
+ .dai_link = &tegra_wm9712_dai,
+ .num_links = 1,
+ .fully_routed = true,
+};
+
+static const struct tegra_asoc_data tegra_wm9712_data = {
+ .card = &snd_soc_tegra_wm9712,
+ .add_common_dapm_widgets = true,
+ .set_ac97 = true,
+};
+
+/* MAX98090 machine */
+
+SND_SOC_DAILINK_DEFS(max98090_hifi,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "HiFi")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link tegra_max98090_dai = {
+ .name = "max98090",
+ .stream_name = "max98090 PCM",
+ .init = tegra_asoc_machine_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(max98090_hifi),
+};
+
+static struct snd_soc_card snd_soc_tegra_max98090 = {
+ .components = "codec:max98090",
+ .dai_link = &tegra_max98090_dai,
+ .num_links = 1,
+ .fully_routed = true,
+};
+
+static const struct tegra_asoc_data tegra_max98090_data = {
+ .mclk_rate = tegra_machine_mclk_rate_12mhz,
+ .card = &snd_soc_tegra_max98090,
+ .add_common_dapm_widgets = true,
+ .add_common_controls = true,
+ .add_common_snd_ops = true,
+ .add_mic_jack = true,
+ .add_hp_jack = true,
+};
+
+/* SGTL5000 machine */
+
+SND_SOC_DAILINK_DEFS(sgtl5000_hifi,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "sgtl5000")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link tegra_sgtl5000_dai = {
+ .name = "sgtl5000",
+ .stream_name = "HiFi",
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(sgtl5000_hifi),
+};
+
+static struct snd_soc_card snd_soc_tegra_sgtl5000 = {
+ .components = "codec:sgtl5000",
+ .dai_link = &tegra_sgtl5000_dai,
+ .num_links = 1,
+ .fully_routed = true,
+};
+
+static const struct tegra_asoc_data tegra_sgtl5000_data = {
+ .mclk_rate = tegra_machine_mclk_rate_12mhz,
+ .card = &snd_soc_tegra_sgtl5000,
+ .add_common_dapm_widgets = true,
+ .add_common_snd_ops = true,
+};
+
+/* TLV320AIC23 machine */
+
+static const struct snd_soc_dapm_widget trimslice_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Line Out", NULL),
+ SND_SOC_DAPM_LINE("Line In", NULL),
+};
+
+static const struct snd_soc_dapm_route trimslice_audio_map[] = {
+ {"Line Out", NULL, "LOUT"},
+ {"Line Out", NULL, "ROUT"},
+
+ {"LLINEIN", NULL, "Line In"},
+ {"RLINEIN", NULL, "Line In"},
+};
+
+SND_SOC_DAILINK_DEFS(tlv320aic23_hifi,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "tlv320aic23-hifi")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link tegra_tlv320aic23_dai = {
+ .name = "TLV320AIC23",
+ .stream_name = "AIC23",
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(tlv320aic23_hifi),
+};
+
+static struct snd_soc_card snd_soc_tegra_trimslice = {
+ .components = "codec:tlv320aic23",
+ .dai_link = &tegra_tlv320aic23_dai,
+ .num_links = 1,
+ .dapm_widgets = trimslice_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(trimslice_dapm_widgets),
+ .dapm_routes = trimslice_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(trimslice_audio_map),
+ .fully_routed = true,
+};
+
+static const struct tegra_asoc_data tegra_trimslice_data = {
+ .mclk_rate = tegra_machine_mclk_rate_128,
+ .card = &snd_soc_tegra_trimslice,
+ .add_common_snd_ops = true,
+};
+
+/* RT5677 machine */
+
+static int tegra_rt5677_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ int err;
+
+ err = tegra_asoc_machine_init(rtd);
+ if (err)
+ return err;
+
+ snd_soc_dapm_force_enable_pin(&card->dapm, "MICBIAS1");
+
+ return 0;
+}
+
+SND_SOC_DAILINK_DEFS(rt5677_aif1,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "rt5677-aif1")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link tegra_rt5677_dai = {
+ .name = "RT5677",
+ .stream_name = "RT5677 PCM",
+ .init = tegra_rt5677_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(rt5677_aif1),
+};
+
+static struct snd_soc_card snd_soc_tegra_rt5677 = {
+ .components = "codec:rt5677",
+ .dai_link = &tegra_rt5677_dai,
+ .num_links = 1,
+ .fully_routed = true,
+};
+
+static const struct tegra_asoc_data tegra_rt5677_data = {
+ .mclk_rate = tegra_machine_mclk_rate_256,
+ .card = &snd_soc_tegra_rt5677,
+ .add_common_dapm_widgets = true,
+ .add_common_controls = true,
+ .add_common_snd_ops = true,
+ .add_mic_jack = true,
+ .add_hp_jack = true,
+};
+
+/* RT5640 machine */
+
+SND_SOC_DAILINK_DEFS(rt5640_aif1,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "rt5640-aif1")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link tegra_rt5640_dai = {
+ .name = "RT5640",
+ .stream_name = "RT5640 PCM",
+ .init = tegra_asoc_machine_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(rt5640_aif1),
+};
+
+static struct snd_soc_card snd_soc_tegra_rt5640 = {
+ .components = "codec:rt5640",
+ .dai_link = &tegra_rt5640_dai,
+ .num_links = 1,
+ .fully_routed = true,
+};
+
+static const struct tegra_asoc_data tegra_rt5640_data = {
+ .mclk_rate = tegra_machine_mclk_rate_256,
+ .card = &snd_soc_tegra_rt5640,
+ .add_common_dapm_widgets = true,
+ .add_common_controls = true,
+ .add_common_snd_ops = true,
+ .add_hp_jack = true,
+};
+
+/* RT5632 machine */
+
+SND_SOC_DAILINK_DEFS(rt5632_hifi,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "alc5632-hifi")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link tegra_rt5632_dai = {
+ .name = "ALC5632",
+ .stream_name = "ALC5632 PCM",
+ .init = tegra_rt5677_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(rt5632_hifi),
+};
+
+static struct snd_soc_card snd_soc_tegra_rt5632 = {
+ .components = "codec:rt5632",
+ .dai_link = &tegra_rt5632_dai,
+ .num_links = 1,
+ .fully_routed = true,
+};
+
+static const struct tegra_asoc_data tegra_rt5632_data = {
+ .mclk_rate = tegra_machine_mclk_rate_512,
+ .card = &snd_soc_tegra_rt5632,
+ .add_common_dapm_widgets = true,
+ .add_common_controls = true,
+ .add_common_snd_ops = true,
+ .add_headset_jack = true,
+};
+
+static const struct of_device_id tegra_machine_of_match[] = {
+ { .compatible = "nvidia,tegra-audio-trimslice", .data = &tegra_trimslice_data },
+ { .compatible = "nvidia,tegra-audio-max98090", .data = &tegra_max98090_data },
+ { .compatible = "nvidia,tegra-audio-sgtl5000", .data = &tegra_sgtl5000_data },
+ { .compatible = "nvidia,tegra-audio-wm9712", .data = &tegra_wm9712_data },
+ { .compatible = "nvidia,tegra-audio-wm8753", .data = &tegra_wm8753_data },
+ { .compatible = "nvidia,tegra-audio-rt5677", .data = &tegra_rt5677_data },
+ { .compatible = "nvidia,tegra-audio-rt5640", .data = &tegra_rt5640_data },
+ { .compatible = "nvidia,tegra-audio-alc5632", .data = &tegra_rt5632_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tegra_machine_of_match);
+
+static struct platform_driver tegra_asoc_machine_driver = {
+ .driver = {
+ .name = "tegra-audio",
+ .of_match_table = tegra_machine_of_match,
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = tegra_asoc_machine_probe,
+};
+module_platform_driver(tegra_asoc_machine_driver);
+
+MODULE_AUTHOR("Anatol Pomozov <anatol@google.com>");
+MODULE_AUTHOR("Andrey Danin <danindrey@mail.ru>");
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_AUTHOR("Ion Agorria <ion@agorria.com>");
+MODULE_AUTHOR("Leon Romanovsky <leon@leon.nu>");
+MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>");
+MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
+MODULE_AUTHOR("Marcel Ziswiler <marcel@ziswiler.com>");
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Tegra machine ASoC driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/tegra/tegra_asoc_machine.h b/sound/soc/tegra/tegra_asoc_machine.h
new file mode 100644
index 000000000000..8ee0ec814f67
--- /dev/null
+++ b/sound/soc/tegra/tegra_asoc_machine.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __TEGRA_ASOC_MACHINE_H__
+#define __TEGRA_ASOC_MACHINE_H__
+
+struct clk;
+struct gpio_desc;
+struct snd_soc_card;
+struct snd_soc_jack;
+struct platform_device;
+struct snd_soc_jack_gpio;
+struct snd_soc_pcm_runtime;
+
+struct tegra_asoc_data {
+ unsigned int (*mclk_rate)(unsigned int srate);
+ struct snd_soc_card *card;
+ unsigned int mclk_id;
+ bool hp_jack_gpio_active_low;
+ bool add_common_dapm_widgets;
+ bool add_common_controls;
+ bool add_common_snd_ops;
+ bool add_headset_jack;
+ bool add_mic_jack;
+ bool add_hp_jack;
+ bool set_ac97;
+};
+
+struct tegra_machine {
+ struct clk *clk_pll_a_out0;
+ struct clk *clk_pll_a;
+ struct clk *clk_cdev1;
+ unsigned int set_baseclock;
+ unsigned int set_mclk;
+ const struct tegra_asoc_data *asoc;
+ struct gpio_desc *gpiod_ext_mic_en;
+ struct gpio_desc *gpiod_int_mic_en;
+ struct gpio_desc *gpiod_spkr_en;
+ struct gpio_desc *gpiod_mic_det;
+ struct gpio_desc *gpiod_ear_sel;
+ struct gpio_desc *gpiod_hp_mute;
+ struct gpio_desc *gpiod_hp_det;
+ struct snd_soc_jack *mic_jack;
+ struct snd_soc_jack_gpio *hp_jack_gpio;
+};
+
+int tegra_asoc_machine_probe(struct platform_device *pdev);
+int tegra_asoc_machine_init(struct snd_soc_pcm_runtime *rtd);
+
+#endif
diff --git a/sound/soc/tegra/tegra_asoc_utils.c b/sound/soc/tegra/tegra_asoc_utils.c
deleted file mode 100644
index 587f62a288d1..000000000000
--- a/sound/soc/tegra/tegra_asoc_utils.c
+++ /dev/null
@@ -1,225 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * tegra_asoc_utils.c - Harmony machine ASoC driver
- *
- * Author: Stephen Warren <swarren@nvidia.com>
- * Copyright (C) 2010,2012 - NVIDIA, Inc.
- */
-
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#include "tegra_asoc_utils.h"
-
-int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
- int mclk)
-{
- int new_baseclock;
- bool clk_change;
- int err;
-
- switch (srate) {
- case 11025:
- case 22050:
- case 44100:
- case 88200:
- if (data->soc == TEGRA_ASOC_UTILS_SOC_TEGRA20)
- new_baseclock = 56448000;
- else if (data->soc == TEGRA_ASOC_UTILS_SOC_TEGRA30)
- new_baseclock = 564480000;
- else
- new_baseclock = 282240000;
- break;
- case 8000:
- case 16000:
- case 32000:
- case 48000:
- case 64000:
- case 96000:
- if (data->soc == TEGRA_ASOC_UTILS_SOC_TEGRA20)
- new_baseclock = 73728000;
- else if (data->soc == TEGRA_ASOC_UTILS_SOC_TEGRA30)
- new_baseclock = 552960000;
- else
- new_baseclock = 368640000;
- break;
- default:
- return -EINVAL;
- }
-
- clk_change = ((new_baseclock != data->set_baseclock) ||
- (mclk != data->set_mclk));
- if (!clk_change)
- return 0;
-
- data->set_baseclock = 0;
- data->set_mclk = 0;
-
- clk_disable_unprepare(data->clk_cdev1);
-
- err = clk_set_rate(data->clk_pll_a, new_baseclock);
- if (err) {
- dev_err(data->dev, "Can't set pll_a rate: %d\n", err);
- return err;
- }
-
- err = clk_set_rate(data->clk_pll_a_out0, mclk);
- if (err) {
- dev_err(data->dev, "Can't set pll_a_out0 rate: %d\n", err);
- return err;
- }
-
- /* Don't set cdev1/extern1 rate; it's locked to pll_a_out0 */
-
- err = clk_prepare_enable(data->clk_cdev1);
- if (err) {
- dev_err(data->dev, "Can't enable cdev1: %d\n", err);
- return err;
- }
-
- data->set_baseclock = new_baseclock;
- data->set_mclk = mclk;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(tegra_asoc_utils_set_rate);
-
-int tegra_asoc_utils_set_ac97_rate(struct tegra_asoc_utils_data *data)
-{
- const int pll_rate = 73728000;
- const int ac97_rate = 24576000;
- int err;
-
- clk_disable_unprepare(data->clk_cdev1);
-
- /*
- * AC97 rate is fixed at 24.576MHz and is used for both the host
- * controller and the external codec
- */
- err = clk_set_rate(data->clk_pll_a, pll_rate);
- if (err) {
- dev_err(data->dev, "Can't set pll_a rate: %d\n", err);
- return err;
- }
-
- err = clk_set_rate(data->clk_pll_a_out0, ac97_rate);
- if (err) {
- dev_err(data->dev, "Can't set pll_a_out0 rate: %d\n", err);
- return err;
- }
-
- /* Don't set cdev1/extern1 rate; it's locked to pll_a_out0 */
-
- err = clk_prepare_enable(data->clk_cdev1);
- if (err) {
- dev_err(data->dev, "Can't enable cdev1: %d\n", err);
- return err;
- }
-
- data->set_baseclock = pll_rate;
- data->set_mclk = ac97_rate;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(tegra_asoc_utils_set_ac97_rate);
-
-int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
- struct device *dev)
-{
- struct clk *clk_out_1, *clk_extern1;
- int ret;
-
- data->dev = dev;
-
- if (of_machine_is_compatible("nvidia,tegra20"))
- data->soc = TEGRA_ASOC_UTILS_SOC_TEGRA20;
- else if (of_machine_is_compatible("nvidia,tegra30"))
- data->soc = TEGRA_ASOC_UTILS_SOC_TEGRA30;
- else if (of_machine_is_compatible("nvidia,tegra114"))
- data->soc = TEGRA_ASOC_UTILS_SOC_TEGRA114;
- else if (of_machine_is_compatible("nvidia,tegra124"))
- data->soc = TEGRA_ASOC_UTILS_SOC_TEGRA124;
- else {
- dev_err(data->dev, "SoC unknown to Tegra ASoC utils\n");
- return -EINVAL;
- }
-
- data->clk_pll_a = devm_clk_get(dev, "pll_a");
- if (IS_ERR(data->clk_pll_a)) {
- dev_err(data->dev, "Can't retrieve clk pll_a\n");
- return PTR_ERR(data->clk_pll_a);
- }
-
- data->clk_pll_a_out0 = devm_clk_get(dev, "pll_a_out0");
- if (IS_ERR(data->clk_pll_a_out0)) {
- dev_err(data->dev, "Can't retrieve clk pll_a_out0\n");
- return PTR_ERR(data->clk_pll_a_out0);
- }
-
- data->clk_cdev1 = devm_clk_get(dev, "mclk");
- if (IS_ERR(data->clk_cdev1)) {
- dev_err(data->dev, "Can't retrieve clk cdev1\n");
- return PTR_ERR(data->clk_cdev1);
- }
-
- /*
- * If clock parents are not set in DT, configure here to use clk_out_1
- * as mclk and extern1 as parent for Tegra30 and higher.
- */
- if (!of_find_property(dev->of_node, "assigned-clock-parents", NULL) &&
- data->soc > TEGRA_ASOC_UTILS_SOC_TEGRA20) {
- dev_warn(data->dev,
- "Configuring clocks for a legacy device-tree\n");
- dev_warn(data->dev,
- "Please update DT to use assigned-clock-parents\n");
- clk_extern1 = devm_clk_get(dev, "extern1");
- if (IS_ERR(clk_extern1)) {
- dev_err(data->dev, "Can't retrieve clk extern1\n");
- return PTR_ERR(clk_extern1);
- }
-
- ret = clk_set_parent(clk_extern1, data->clk_pll_a_out0);
- if (ret < 0) {
- dev_err(data->dev,
- "Set parent failed for clk extern1\n");
- return ret;
- }
-
- clk_out_1 = devm_clk_get(dev, "pmc_clk_out_1");
- if (IS_ERR(clk_out_1)) {
- dev_err(data->dev, "Can't retrieve pmc_clk_out_1\n");
- return PTR_ERR(clk_out_1);
- }
-
- ret = clk_set_parent(clk_out_1, clk_extern1);
- if (ret < 0) {
- dev_err(data->dev,
- "Set parent failed for pmc_clk_out_1\n");
- return ret;
- }
-
- data->clk_cdev1 = clk_out_1;
- }
-
- /*
- * FIXME: There is some unknown dependency between audio mclk disable
- * and suspend-resume functionality on Tegra30, although audio mclk is
- * only needed for audio.
- */
- ret = clk_prepare_enable(data->clk_cdev1);
- if (ret) {
- dev_err(data->dev, "Can't enable cdev1: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(tegra_asoc_utils_init);
-
-MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
-MODULE_DESCRIPTION("Tegra ASoC utility code");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/tegra/tegra_asoc_utils.h b/sound/soc/tegra/tegra_asoc_utils.h
deleted file mode 100644
index a34439587d59..000000000000
--- a/sound/soc/tegra/tegra_asoc_utils.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * tegra_asoc_utils.h - Definitions for Tegra DAS driver
- *
- * Author: Stephen Warren <swarren@nvidia.com>
- * Copyright (C) 2010,2012 - NVIDIA, Inc.
- */
-
-#ifndef __TEGRA_ASOC_UTILS_H__
-#define __TEGRA_ASOC_UTILS_H__
-
-struct clk;
-struct device;
-
-enum tegra_asoc_utils_soc {
- TEGRA_ASOC_UTILS_SOC_TEGRA20,
- TEGRA_ASOC_UTILS_SOC_TEGRA30,
- TEGRA_ASOC_UTILS_SOC_TEGRA114,
- TEGRA_ASOC_UTILS_SOC_TEGRA124,
-};
-
-struct tegra_asoc_utils_data {
- struct device *dev;
- enum tegra_asoc_utils_soc soc;
- struct clk *clk_pll_a;
- struct clk *clk_pll_a_out0;
- struct clk *clk_cdev1;
- int set_baseclock;
- int set_mclk;
-};
-
-int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
- int mclk);
-int tegra_asoc_utils_set_ac97_rate(struct tegra_asoc_utils_data *data);
-int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
- struct device *dev);
-
-#endif
diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c
deleted file mode 100644
index 00c19704057b..000000000000
--- a/sound/soc/tegra/tegra_max98090.c
+++ /dev/null
@@ -1,276 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Tegra machine ASoC driver for boards using a MAX90809 CODEC.
- *
- * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
- *
- * Based on code copyright/by:
- *
- * Copyright (C) 2010-2012 - NVIDIA, Inc.
- * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
- * (c) 2009, 2010 Nvidia Graphics Pvt. Ltd.
- * Copyright 2007 Wolfson Microelectronics PLC.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-
-#include <sound/core.h>
-#include <sound/jack.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include "tegra_asoc_utils.h"
-
-#define DRV_NAME "tegra-snd-max98090"
-
-struct tegra_max98090 {
- struct tegra_asoc_utils_data util_data;
- int gpio_hp_det;
- int gpio_mic_det;
-};
-
-static int tegra_max98090_asoc_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
- struct snd_soc_card *card = rtd->card;
- struct tegra_max98090 *machine = snd_soc_card_get_drvdata(card);
- int srate, mclk;
- int err;
-
- srate = params_rate(params);
- switch (srate) {
- case 8000:
- case 16000:
- case 24000:
- case 32000:
- case 48000:
- case 64000:
- case 96000:
- mclk = 12288000;
- break;
- case 11025:
- case 22050:
- case 44100:
- case 88200:
- mclk = 11289600;
- break;
- default:
- mclk = 12000000;
- break;
- }
-
- err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
- if (err < 0) {
- dev_err(card->dev, "Can't configure clocks\n");
- return err;
- }
-
- err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
- SND_SOC_CLOCK_IN);
- if (err < 0) {
- dev_err(card->dev, "codec_dai clock not set\n");
- return err;
- }
-
- return 0;
-}
-
-static const struct snd_soc_ops tegra_max98090_ops = {
- .hw_params = tegra_max98090_asoc_hw_params,
-};
-
-static struct snd_soc_jack tegra_max98090_hp_jack;
-
-static struct snd_soc_jack_pin tegra_max98090_hp_jack_pins[] = {
- {
- .pin = "Headphones",
- .mask = SND_JACK_HEADPHONE,
- },
-};
-
-static struct snd_soc_jack_gpio tegra_max98090_hp_jack_gpio = {
- .name = "Headphone detection",
- .report = SND_JACK_HEADPHONE,
- .debounce_time = 150,
- .invert = 1,
-};
-
-static struct snd_soc_jack tegra_max98090_mic_jack;
-
-static struct snd_soc_jack_pin tegra_max98090_mic_jack_pins[] = {
- {
- .pin = "Mic Jack",
- .mask = SND_JACK_MICROPHONE,
- },
-};
-
-static struct snd_soc_jack_gpio tegra_max98090_mic_jack_gpio = {
- .name = "Mic detection",
- .report = SND_JACK_MICROPHONE,
- .debounce_time = 150,
- .invert = 1,
-};
-
-static const struct snd_soc_dapm_widget tegra_max98090_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphones", NULL),
- SND_SOC_DAPM_SPK("Speakers", NULL),
- SND_SOC_DAPM_MIC("Mic Jack", NULL),
- SND_SOC_DAPM_MIC("Int Mic", NULL),
-};
-
-static const struct snd_kcontrol_new tegra_max98090_controls[] = {
- SOC_DAPM_PIN_SWITCH("Headphones"),
- SOC_DAPM_PIN_SWITCH("Speakers"),
- SOC_DAPM_PIN_SWITCH("Mic Jack"),
- SOC_DAPM_PIN_SWITCH("Int Mic"),
-};
-
-static int tegra_max98090_asoc_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct tegra_max98090 *machine = snd_soc_card_get_drvdata(rtd->card);
-
- if (gpio_is_valid(machine->gpio_hp_det)) {
- snd_soc_card_jack_new(rtd->card, "Headphones",
- SND_JACK_HEADPHONE,
- &tegra_max98090_hp_jack,
- tegra_max98090_hp_jack_pins,
- ARRAY_SIZE(tegra_max98090_hp_jack_pins));
-
- tegra_max98090_hp_jack_gpio.gpio = machine->gpio_hp_det;
- snd_soc_jack_add_gpios(&tegra_max98090_hp_jack,
- 1,
- &tegra_max98090_hp_jack_gpio);
- }
-
- if (gpio_is_valid(machine->gpio_mic_det)) {
- snd_soc_card_jack_new(rtd->card, "Mic Jack",
- SND_JACK_MICROPHONE,
- &tegra_max98090_mic_jack,
- tegra_max98090_mic_jack_pins,
- ARRAY_SIZE(tegra_max98090_mic_jack_pins));
-
- tegra_max98090_mic_jack_gpio.gpio = machine->gpio_mic_det;
- snd_soc_jack_add_gpios(&tegra_max98090_mic_jack,
- 1,
- &tegra_max98090_mic_jack_gpio);
- }
-
- return 0;
-}
-
-SND_SOC_DAILINK_DEFS(pcm,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "HiFi")),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-static struct snd_soc_dai_link tegra_max98090_dai = {
- .name = "max98090",
- .stream_name = "max98090 PCM",
- .init = tegra_max98090_asoc_init,
- .ops = &tegra_max98090_ops,
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS,
- SND_SOC_DAILINK_REG(pcm),
-};
-
-static struct snd_soc_card snd_soc_tegra_max98090 = {
- .name = "tegra-max98090",
- .owner = THIS_MODULE,
- .dai_link = &tegra_max98090_dai,
- .num_links = 1,
- .controls = tegra_max98090_controls,
- .num_controls = ARRAY_SIZE(tegra_max98090_controls),
- .dapm_widgets = tegra_max98090_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tegra_max98090_dapm_widgets),
- .fully_routed = true,
-};
-
-static int tegra_max98090_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct snd_soc_card *card = &snd_soc_tegra_max98090;
- struct tegra_max98090 *machine;
- int ret;
-
- machine = devm_kzalloc(&pdev->dev,
- sizeof(struct tegra_max98090), GFP_KERNEL);
- if (!machine)
- return -ENOMEM;
-
- card->dev = &pdev->dev;
- snd_soc_card_set_drvdata(card, machine);
-
- machine->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
- if (machine->gpio_hp_det == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- machine->gpio_mic_det =
- of_get_named_gpio(np, "nvidia,mic-det-gpios", 0);
- if (machine->gpio_mic_det == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- ret = snd_soc_of_parse_card_name(card, "nvidia,model");
- if (ret)
- return ret;
-
- ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
- if (ret)
- return ret;
-
- tegra_max98090_dai.codecs->of_node = of_parse_phandle(np,
- "nvidia,audio-codec", 0);
- if (!tegra_max98090_dai.codecs->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,audio-codec' missing or invalid\n");
- return -EINVAL;
- }
-
- tegra_max98090_dai.cpus->of_node = of_parse_phandle(np,
- "nvidia,i2s-controller", 0);
- if (!tegra_max98090_dai.cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,i2s-controller' missing or invalid\n");
- return -EINVAL;
- }
-
- tegra_max98090_dai.platforms->of_node = tegra_max98090_dai.cpus->of_node;
-
- ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
- if (ret)
- return ret;
-
- ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "snd_soc_register_card failed\n");
-
- return 0;
-}
-
-static const struct of_device_id tegra_max98090_of_match[] = {
- { .compatible = "nvidia,tegra-audio-max98090", },
- {},
-};
-
-static struct platform_driver tegra_max98090_driver = {
- .driver = {
- .name = DRV_NAME,
- .pm = &snd_soc_pm_ops,
- .of_match_table = tegra_max98090_of_match,
- },
- .probe = tegra_max98090_probe,
-};
-module_platform_driver(tegra_max98090_driver);
-
-MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
-MODULE_DESCRIPTION("Tegra max98090 machine ASoC driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, tegra_max98090_of_match);
diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c
index 573374b89b10..d3276b4595af 100644
--- a/sound/soc/tegra/tegra_pcm.c
+++ b/sound/soc/tegra/tegra_pcm.c
@@ -213,19 +213,19 @@ snd_pcm_uframes_t tegra_pcm_pointer(struct snd_soc_component *component,
}
EXPORT_SYMBOL_GPL(tegra_pcm_pointer);
-static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream,
+static int tegra_pcm_preallocate_dma_buffer(struct device *dev, struct snd_pcm *pcm, int stream,
size_t size)
{
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
struct snd_dma_buffer *buf = &substream->dma_buffer;
- buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL);
+ buf->area = dma_alloc_wc(dev, size, &buf->addr, GFP_KERNEL);
if (!buf->area)
return -ENOMEM;
buf->private_data = NULL;
buf->dev.type = SNDRV_DMA_TYPE_DEV;
- buf->dev.dev = pcm->card->dev;
+ buf->dev.dev = dev;
buf->bytes = size;
return 0;
@@ -244,31 +244,28 @@ static void tegra_pcm_deallocate_dma_buffer(struct snd_pcm *pcm, int stream)
if (!buf->area)
return;
- dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr);
+ dma_free_wc(buf->dev.dev, buf->bytes, buf->area, buf->addr);
buf->area = NULL;
}
-static int tegra_pcm_dma_allocate(struct snd_soc_pcm_runtime *rtd,
+static int tegra_pcm_dma_allocate(struct device *dev, struct snd_soc_pcm_runtime *rtd,
size_t size)
{
- struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
int ret;
- ret = dma_set_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret < 0)
return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
- ret = tegra_pcm_preallocate_dma_buffer(pcm,
- SNDRV_PCM_STREAM_PLAYBACK, size);
+ ret = tegra_pcm_preallocate_dma_buffer(dev, pcm, SNDRV_PCM_STREAM_PLAYBACK, size);
if (ret)
goto err;
}
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
- ret = tegra_pcm_preallocate_dma_buffer(pcm,
- SNDRV_PCM_STREAM_CAPTURE, size);
+ ret = tegra_pcm_preallocate_dma_buffer(dev, pcm, SNDRV_PCM_STREAM_CAPTURE, size);
if (ret)
goto err_free_play;
}
@@ -284,7 +281,16 @@ err:
int tegra_pcm_construct(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd)
{
- return tegra_pcm_dma_allocate(rtd, tegra_pcm_hardware.buffer_bytes_max);
+ struct device *dev = component->dev;
+
+ /*
+ * Fallback for backwards-compatibility with older device trees that
+ * have the iommus property in the virtual, top-level "sound" node.
+ */
+ if (!of_get_property(dev->of_node, "iommus", NULL))
+ dev = rtd->card->snd_card->dev;
+
+ return tegra_pcm_dma_allocate(dev, rtd, tegra_pcm_hardware.buffer_bytes_max);
}
EXPORT_SYMBOL_GPL(tegra_pcm_construct);
diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c
deleted file mode 100644
index 9afba37a3b08..000000000000
--- a/sound/soc/tegra/tegra_rt5640.c
+++ /dev/null
@@ -1,222 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
-* tegra_rt5640.c - Tegra machine ASoC driver for boards using RT5640 codec.
- *
- * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
- *
- * Based on code copyright/by:
- *
- * Copyright (C) 2010-2012 - NVIDIA, Inc.
- * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
- * (c) 2009, 2010 Nvidia Graphics Pvt. Ltd.
- * Copyright 2007 Wolfson Microelectronics PLC.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-
-#include <sound/core.h>
-#include <sound/jack.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include "../codecs/rt5640.h"
-
-#include "tegra_asoc_utils.h"
-
-#define DRV_NAME "tegra-snd-rt5640"
-
-struct tegra_rt5640 {
- struct tegra_asoc_utils_data util_data;
- int gpio_hp_det;
- enum of_gpio_flags gpio_hp_det_flags;
-};
-
-static int tegra_rt5640_asoc_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
- struct snd_soc_card *card = rtd->card;
- struct tegra_rt5640 *machine = snd_soc_card_get_drvdata(card);
- int srate, mclk;
- int err;
-
- srate = params_rate(params);
- mclk = 256 * srate;
-
- err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
- if (err < 0) {
- dev_err(card->dev, "Can't configure clocks\n");
- return err;
- }
-
- err = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_MCLK, mclk,
- SND_SOC_CLOCK_IN);
- if (err < 0) {
- dev_err(card->dev, "codec_dai clock not set\n");
- return err;
- }
-
- return 0;
-}
-
-static const struct snd_soc_ops tegra_rt5640_ops = {
- .hw_params = tegra_rt5640_asoc_hw_params,
-};
-
-static struct snd_soc_jack tegra_rt5640_hp_jack;
-
-static struct snd_soc_jack_pin tegra_rt5640_hp_jack_pins[] = {
- {
- .pin = "Headphones",
- .mask = SND_JACK_HEADPHONE,
- },
-};
-
-static struct snd_soc_jack_gpio tegra_rt5640_hp_jack_gpio = {
- .name = "Headphone detection",
- .report = SND_JACK_HEADPHONE,
- .debounce_time = 150,
- .invert = 1,
-};
-
-static const struct snd_soc_dapm_widget tegra_rt5640_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphones", NULL),
- SND_SOC_DAPM_SPK("Speakers", NULL),
- SND_SOC_DAPM_MIC("Mic Jack", NULL),
-};
-
-static const struct snd_kcontrol_new tegra_rt5640_controls[] = {
- SOC_DAPM_PIN_SWITCH("Speakers"),
-};
-
-static int tegra_rt5640_asoc_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct tegra_rt5640 *machine = snd_soc_card_get_drvdata(rtd->card);
-
- snd_soc_card_jack_new(rtd->card, "Headphones", SND_JACK_HEADPHONE,
- &tegra_rt5640_hp_jack, tegra_rt5640_hp_jack_pins,
- ARRAY_SIZE(tegra_rt5640_hp_jack_pins));
-
- if (gpio_is_valid(machine->gpio_hp_det)) {
- tegra_rt5640_hp_jack_gpio.gpio = machine->gpio_hp_det;
- tegra_rt5640_hp_jack_gpio.invert =
- !!(machine->gpio_hp_det_flags & OF_GPIO_ACTIVE_LOW);
- snd_soc_jack_add_gpios(&tegra_rt5640_hp_jack,
- 1,
- &tegra_rt5640_hp_jack_gpio);
- }
-
- return 0;
-}
-
-SND_SOC_DAILINK_DEFS(aif1,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "rt5640-aif1")),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-static struct snd_soc_dai_link tegra_rt5640_dai = {
- .name = "RT5640",
- .stream_name = "RT5640 PCM",
- .init = tegra_rt5640_asoc_init,
- .ops = &tegra_rt5640_ops,
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS,
- SND_SOC_DAILINK_REG(aif1),
-};
-
-static struct snd_soc_card snd_soc_tegra_rt5640 = {
- .name = "tegra-rt5640",
- .owner = THIS_MODULE,
- .dai_link = &tegra_rt5640_dai,
- .num_links = 1,
- .controls = tegra_rt5640_controls,
- .num_controls = ARRAY_SIZE(tegra_rt5640_controls),
- .dapm_widgets = tegra_rt5640_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tegra_rt5640_dapm_widgets),
- .fully_routed = true,
-};
-
-static int tegra_rt5640_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct snd_soc_card *card = &snd_soc_tegra_rt5640;
- struct tegra_rt5640 *machine;
- int ret;
-
- machine = devm_kzalloc(&pdev->dev,
- sizeof(struct tegra_rt5640), GFP_KERNEL);
- if (!machine)
- return -ENOMEM;
-
- card->dev = &pdev->dev;
- snd_soc_card_set_drvdata(card, machine);
-
- machine->gpio_hp_det = of_get_named_gpio_flags(
- np, "nvidia,hp-det-gpios", 0, &machine->gpio_hp_det_flags);
- if (machine->gpio_hp_det == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- ret = snd_soc_of_parse_card_name(card, "nvidia,model");
- if (ret)
- return ret;
-
- ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
- if (ret)
- return ret;
-
- tegra_rt5640_dai.codecs->of_node = of_parse_phandle(np,
- "nvidia,audio-codec", 0);
- if (!tegra_rt5640_dai.codecs->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,audio-codec' missing or invalid\n");
- return -EINVAL;
- }
-
- tegra_rt5640_dai.cpus->of_node = of_parse_phandle(np,
- "nvidia,i2s-controller", 0);
- if (!tegra_rt5640_dai.cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,i2s-controller' missing or invalid\n");
- return -EINVAL;
- }
-
- tegra_rt5640_dai.platforms->of_node = tegra_rt5640_dai.cpus->of_node;
-
- ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
- if (ret)
- return ret;
-
- ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "snd_soc_register_card failed\n");
-
- return 0;
-}
-
-static const struct of_device_id tegra_rt5640_of_match[] = {
- { .compatible = "nvidia,tegra-audio-rt5640", },
- {},
-};
-
-static struct platform_driver tegra_rt5640_driver = {
- .driver = {
- .name = DRV_NAME,
- .pm = &snd_soc_pm_ops,
- .of_match_table = tegra_rt5640_of_match,
- },
- .probe = tegra_rt5640_probe,
-};
-module_platform_driver(tegra_rt5640_driver);
-
-MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
-MODULE_DESCRIPTION("Tegra+RT5640 machine ASoC driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, tegra_rt5640_of_match);
diff --git a/sound/soc/tegra/tegra_rt5677.c b/sound/soc/tegra/tegra_rt5677.c
deleted file mode 100644
index d30f8b6deda4..000000000000
--- a/sound/soc/tegra/tegra_rt5677.c
+++ /dev/null
@@ -1,324 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
-* tegra_rt5677.c - Tegra machine ASoC driver for boards using RT5677 codec.
- *
- * Copyright (c) 2014, The Chromium OS Authors. All rights reserved.
- *
- * Based on code copyright/by:
- *
- * Copyright (C) 2010-2012 - NVIDIA, Inc.
- * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
- * (c) 2009, 2010 Nvidia Graphics Pvt. Ltd.
- * Copyright 2007 Wolfson Microelectronics PLC.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-
-#include <sound/core.h>
-#include <sound/jack.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include "../codecs/rt5677.h"
-
-#include "tegra_asoc_utils.h"
-
-#define DRV_NAME "tegra-snd-rt5677"
-
-struct tegra_rt5677 {
- struct tegra_asoc_utils_data util_data;
- int gpio_hp_det;
- int gpio_hp_en;
- int gpio_mic_present;
- int gpio_dmic_clk_en;
-};
-
-static int tegra_rt5677_asoc_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
- struct snd_soc_card *card = rtd->card;
- struct tegra_rt5677 *machine = snd_soc_card_get_drvdata(card);
- int srate, mclk, err;
-
- srate = params_rate(params);
- mclk = 256 * srate;
-
- err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
- if (err < 0) {
- dev_err(card->dev, "Can't configure clocks\n");
- return err;
- }
-
- err = snd_soc_dai_set_sysclk(codec_dai, RT5677_SCLK_S_MCLK, mclk,
- SND_SOC_CLOCK_IN);
- if (err < 0) {
- dev_err(card->dev, "codec_dai clock not set\n");
- return err;
- }
-
- return 0;
-}
-
-static int tegra_rt5677_event_hp(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
-{
- struct snd_soc_dapm_context *dapm = w->dapm;
- struct snd_soc_card *card = dapm->card;
- struct tegra_rt5677 *machine = snd_soc_card_get_drvdata(card);
-
- if (!gpio_is_valid(machine->gpio_hp_en))
- return 0;
-
- gpio_set_value_cansleep(machine->gpio_hp_en,
- SND_SOC_DAPM_EVENT_ON(event));
-
- return 0;
-}
-
-static const struct snd_soc_ops tegra_rt5677_ops = {
- .hw_params = tegra_rt5677_asoc_hw_params,
-};
-
-static struct snd_soc_jack tegra_rt5677_hp_jack;
-
-static struct snd_soc_jack_pin tegra_rt5677_hp_jack_pins = {
- .pin = "Headphone",
- .mask = SND_JACK_HEADPHONE,
-};
-static struct snd_soc_jack_gpio tegra_rt5677_hp_jack_gpio = {
- .name = "Headphone detection",
- .report = SND_JACK_HEADPHONE,
- .debounce_time = 150,
-};
-
-static struct snd_soc_jack tegra_rt5677_mic_jack;
-
-static struct snd_soc_jack_pin tegra_rt5677_mic_jack_pins = {
- .pin = "Headset Mic",
- .mask = SND_JACK_MICROPHONE,
-};
-
-static struct snd_soc_jack_gpio tegra_rt5677_mic_jack_gpio = {
- .name = "Headset Mic detection",
- .report = SND_JACK_MICROPHONE,
- .debounce_time = 150,
- .invert = 1
-};
-
-static const struct snd_soc_dapm_widget tegra_rt5677_dapm_widgets[] = {
- SND_SOC_DAPM_SPK("Speaker", NULL),
- SND_SOC_DAPM_HP("Headphone", tegra_rt5677_event_hp),
- SND_SOC_DAPM_MIC("Headset Mic", NULL),
- SND_SOC_DAPM_MIC("Internal Mic 1", NULL),
- SND_SOC_DAPM_MIC("Internal Mic 2", NULL),
-};
-
-static const struct snd_kcontrol_new tegra_rt5677_controls[] = {
- SOC_DAPM_PIN_SWITCH("Speaker"),
- SOC_DAPM_PIN_SWITCH("Headphone"),
- SOC_DAPM_PIN_SWITCH("Headset Mic"),
- SOC_DAPM_PIN_SWITCH("Internal Mic 1"),
- SOC_DAPM_PIN_SWITCH("Internal Mic 2"),
-};
-
-static int tegra_rt5677_asoc_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct tegra_rt5677 *machine = snd_soc_card_get_drvdata(rtd->card);
-
- snd_soc_card_jack_new(rtd->card, "Headphone Jack", SND_JACK_HEADPHONE,
- &tegra_rt5677_hp_jack,
- &tegra_rt5677_hp_jack_pins, 1);
-
- if (gpio_is_valid(machine->gpio_hp_det)) {
- tegra_rt5677_hp_jack_gpio.gpio = machine->gpio_hp_det;
- snd_soc_jack_add_gpios(&tegra_rt5677_hp_jack, 1,
- &tegra_rt5677_hp_jack_gpio);
- }
-
-
- snd_soc_card_jack_new(rtd->card, "Mic Jack", SND_JACK_MICROPHONE,
- &tegra_rt5677_mic_jack,
- &tegra_rt5677_mic_jack_pins, 1);
-
- if (gpio_is_valid(machine->gpio_mic_present)) {
- tegra_rt5677_mic_jack_gpio.gpio = machine->gpio_mic_present;
- snd_soc_jack_add_gpios(&tegra_rt5677_mic_jack, 1,
- &tegra_rt5677_mic_jack_gpio);
- }
-
- snd_soc_dapm_force_enable_pin(&rtd->card->dapm, "MICBIAS1");
-
- return 0;
-}
-
-SND_SOC_DAILINK_DEFS(pcm,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "rt5677-aif1")),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-static struct snd_soc_dai_link tegra_rt5677_dai = {
- .name = "RT5677",
- .stream_name = "RT5677 PCM",
- .init = tegra_rt5677_asoc_init,
- .ops = &tegra_rt5677_ops,
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS,
- SND_SOC_DAILINK_REG(pcm),
-};
-
-static struct snd_soc_card snd_soc_tegra_rt5677 = {
- .name = "tegra-rt5677",
- .owner = THIS_MODULE,
- .dai_link = &tegra_rt5677_dai,
- .num_links = 1,
- .controls = tegra_rt5677_controls,
- .num_controls = ARRAY_SIZE(tegra_rt5677_controls),
- .dapm_widgets = tegra_rt5677_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tegra_rt5677_dapm_widgets),
- .fully_routed = true,
-};
-
-static int tegra_rt5677_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct snd_soc_card *card = &snd_soc_tegra_rt5677;
- struct tegra_rt5677 *machine;
- int ret;
-
- machine = devm_kzalloc(&pdev->dev,
- sizeof(struct tegra_rt5677), GFP_KERNEL);
- if (!machine)
- return -ENOMEM;
-
- card->dev = &pdev->dev;
- snd_soc_card_set_drvdata(card, machine);
-
- machine->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
- if (machine->gpio_hp_det == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- machine->gpio_mic_present = of_get_named_gpio(np,
- "nvidia,mic-present-gpios", 0);
- if (machine->gpio_mic_present == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- machine->gpio_hp_en = of_get_named_gpio(np, "nvidia,hp-en-gpios", 0);
- if (machine->gpio_hp_en == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (gpio_is_valid(machine->gpio_hp_en)) {
- ret = devm_gpio_request_one(&pdev->dev, machine->gpio_hp_en,
- GPIOF_OUT_INIT_LOW, "hp_en");
- if (ret) {
- dev_err(card->dev, "cannot get hp_en gpio\n");
- return ret;
- }
- }
-
- machine->gpio_dmic_clk_en = of_get_named_gpio(np,
- "nvidia,dmic-clk-en-gpios", 0);
- if (machine->gpio_dmic_clk_en == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (gpio_is_valid(machine->gpio_dmic_clk_en)) {
- ret = devm_gpio_request_one(&pdev->dev,
- machine->gpio_dmic_clk_en,
- GPIOF_OUT_INIT_HIGH, "dmic_clk_en");
- if (ret) {
- dev_err(card->dev, "cannot get dmic_clk_en gpio\n");
- return ret;
- }
- }
-
- ret = snd_soc_of_parse_card_name(card, "nvidia,model");
- if (ret)
- goto err;
-
- ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
- if (ret)
- goto err;
-
- tegra_rt5677_dai.codecs->of_node = of_parse_phandle(np,
- "nvidia,audio-codec", 0);
- if (!tegra_rt5677_dai.codecs->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,audio-codec' missing or invalid\n");
- ret = -EINVAL;
- goto err;
- }
-
- tegra_rt5677_dai.cpus->of_node = of_parse_phandle(np,
- "nvidia,i2s-controller", 0);
- if (!tegra_rt5677_dai.cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,i2s-controller' missing or invalid\n");
- ret = -EINVAL;
- goto err_put_codec_of_node;
- }
- tegra_rt5677_dai.platforms->of_node = tegra_rt5677_dai.cpus->of_node;
-
- ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
- if (ret)
- goto err_put_cpu_of_node;
-
- ret = snd_soc_register_card(card);
- if (ret) {
- dev_err_probe(&pdev->dev, ret,
- "snd_soc_register_card failed\n");
- goto err_put_cpu_of_node;
- }
-
- return 0;
-
-err_put_cpu_of_node:
- of_node_put(tegra_rt5677_dai.cpus->of_node);
- tegra_rt5677_dai.cpus->of_node = NULL;
- tegra_rt5677_dai.platforms->of_node = NULL;
-err_put_codec_of_node:
- of_node_put(tegra_rt5677_dai.codecs->of_node);
- tegra_rt5677_dai.codecs->of_node = NULL;
-err:
- return ret;
-}
-
-static int tegra_rt5677_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
-
- tegra_rt5677_dai.platforms->of_node = NULL;
- of_node_put(tegra_rt5677_dai.codecs->of_node);
- tegra_rt5677_dai.codecs->of_node = NULL;
- of_node_put(tegra_rt5677_dai.cpus->of_node);
- tegra_rt5677_dai.cpus->of_node = NULL;
-
- return 0;
-}
-
-static const struct of_device_id tegra_rt5677_of_match[] = {
- { .compatible = "nvidia,tegra-audio-rt5677", },
- {},
-};
-
-static struct platform_driver tegra_rt5677_driver = {
- .driver = {
- .name = DRV_NAME,
- .pm = &snd_soc_pm_ops,
- .of_match_table = tegra_rt5677_of_match,
- },
- .probe = tegra_rt5677_probe,
- .remove = tegra_rt5677_remove,
-};
-module_platform_driver(tegra_rt5677_driver);
-
-MODULE_AUTHOR("Anatol Pomozov <anatol@google.com>");
-MODULE_DESCRIPTION("Tegra+RT5677 machine ASoC driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, tegra_rt5677_of_match);
diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
deleted file mode 100644
index 885332170c77..000000000000
--- a/sound/soc/tegra/tegra_sgtl5000.c
+++ /dev/null
@@ -1,211 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * tegra_sgtl5000.c - Tegra machine ASoC driver for boards using SGTL5000 codec
- *
- * Author: Marcel Ziswiler <marcel@ziswiler.com>
- *
- * Based on code copyright/by:
- *
- * Copyright (C) 2010-2012 - NVIDIA, Inc.
- * (c) 2009, 2010 Nvidia Graphics Pvt. Ltd.
- * Copyright 2007 Wolfson Microelectronics PLC.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include "../codecs/sgtl5000.h"
-
-#include "tegra_asoc_utils.h"
-
-#define DRV_NAME "tegra-snd-sgtl5000"
-
-struct tegra_sgtl5000 {
- struct tegra_asoc_utils_data util_data;
-};
-
-static int tegra_sgtl5000_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
- struct snd_soc_card *card = rtd->card;
- struct tegra_sgtl5000 *machine = snd_soc_card_get_drvdata(card);
- int srate, mclk;
- int err;
-
- srate = params_rate(params);
- switch (srate) {
- case 11025:
- case 22050:
- case 44100:
- case 88200:
- mclk = 11289600;
- break;
- default:
- mclk = 12288000;
- break;
- }
-
- err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
- if (err < 0) {
- dev_err(card->dev, "Can't configure clocks\n");
- return err;
- }
-
- err = snd_soc_dai_set_sysclk(codec_dai, SGTL5000_SYSCLK, mclk,
- SND_SOC_CLOCK_IN);
- if (err < 0) {
- dev_err(card->dev, "codec_dai clock not set\n");
- return err;
- }
-
- return 0;
-}
-
-static const struct snd_soc_ops tegra_sgtl5000_ops = {
- .hw_params = tegra_sgtl5000_hw_params,
-};
-
-static const struct snd_soc_dapm_widget tegra_sgtl5000_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone Jack", NULL),
- SND_SOC_DAPM_LINE("Line In Jack", NULL),
- SND_SOC_DAPM_MIC("Mic Jack", NULL),
-};
-
-SND_SOC_DAILINK_DEFS(hifi,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "sgtl5000")),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-static struct snd_soc_dai_link tegra_sgtl5000_dai = {
- .name = "sgtl5000",
- .stream_name = "HiFi",
- .ops = &tegra_sgtl5000_ops,
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS,
- SND_SOC_DAILINK_REG(hifi),
-};
-
-static struct snd_soc_card snd_soc_tegra_sgtl5000 = {
- .name = "tegra-sgtl5000",
- .owner = THIS_MODULE,
- .dai_link = &tegra_sgtl5000_dai,
- .num_links = 1,
- .dapm_widgets = tegra_sgtl5000_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tegra_sgtl5000_dapm_widgets),
- .fully_routed = true,
-};
-
-static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct snd_soc_card *card = &snd_soc_tegra_sgtl5000;
- struct tegra_sgtl5000 *machine;
- int ret;
-
- machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_sgtl5000),
- GFP_KERNEL);
- if (!machine)
- return -ENOMEM;
-
- card->dev = &pdev->dev;
- snd_soc_card_set_drvdata(card, machine);
-
- ret = snd_soc_of_parse_card_name(card, "nvidia,model");
- if (ret)
- goto err;
-
- ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
- if (ret)
- goto err;
-
- tegra_sgtl5000_dai.codecs->of_node = of_parse_phandle(np,
- "nvidia,audio-codec", 0);
- if (!tegra_sgtl5000_dai.codecs->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,audio-codec' missing or invalid\n");
- ret = -EINVAL;
- goto err;
- }
-
- tegra_sgtl5000_dai.cpus->of_node = of_parse_phandle(np,
- "nvidia,i2s-controller", 0);
- if (!tegra_sgtl5000_dai.cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,i2s-controller' missing/invalid\n");
- ret = -EINVAL;
- goto err_put_codec_of_node;
- }
-
- tegra_sgtl5000_dai.platforms->of_node = tegra_sgtl5000_dai.cpus->of_node;
-
- ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
- if (ret)
- goto err_put_cpu_of_node;
-
- ret = snd_soc_register_card(card);
- if (ret) {
- dev_err_probe(&pdev->dev, ret,
- "snd_soc_register_card failed\n");
- goto err_put_cpu_of_node;
- }
-
- return 0;
-
-err_put_cpu_of_node:
- of_node_put(tegra_sgtl5000_dai.cpus->of_node);
- tegra_sgtl5000_dai.cpus->of_node = NULL;
- tegra_sgtl5000_dai.platforms->of_node = NULL;
-err_put_codec_of_node:
- of_node_put(tegra_sgtl5000_dai.codecs->of_node);
- tegra_sgtl5000_dai.codecs->of_node = NULL;
-err:
- return ret;
-}
-
-static int tegra_sgtl5000_driver_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
- int ret;
-
- ret = snd_soc_unregister_card(card);
-
- of_node_put(tegra_sgtl5000_dai.cpus->of_node);
- tegra_sgtl5000_dai.cpus->of_node = NULL;
- tegra_sgtl5000_dai.platforms->of_node = NULL;
- of_node_put(tegra_sgtl5000_dai.codecs->of_node);
- tegra_sgtl5000_dai.codecs->of_node = NULL;
-
- return ret;
-}
-
-static const struct of_device_id tegra_sgtl5000_of_match[] = {
- { .compatible = "nvidia,tegra-audio-sgtl5000", },
- { /* sentinel */ },
-};
-
-static struct platform_driver tegra_sgtl5000_driver = {
- .driver = {
- .name = DRV_NAME,
- .pm = &snd_soc_pm_ops,
- .of_match_table = tegra_sgtl5000_of_match,
- },
- .probe = tegra_sgtl5000_driver_probe,
- .remove = tegra_sgtl5000_driver_remove,
-};
-module_platform_driver(tegra_sgtl5000_driver);
-
-MODULE_AUTHOR("Marcel Ziswiler <marcel@ziswiler.com>");
-MODULE_DESCRIPTION("Tegra SGTL5000 machine ASoC driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, tegra_sgtl5000_of_match);
diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
deleted file mode 100644
index efd793886689..000000000000
--- a/sound/soc/tegra/tegra_wm8753.c
+++ /dev/null
@@ -1,185 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * tegra_wm8753.c - Tegra machine ASoC driver for boards using WM8753 codec.
- *
- * Author: Stephen Warren <swarren@nvidia.com>
- * Copyright (C) 2010-2012 - NVIDIA, Inc.
- *
- * Based on code copyright/by:
- *
- * (c) 2009, 2010 Nvidia Graphics Pvt. Ltd.
- *
- * Copyright 2007 Wolfson Microelectronics PLC.
- * Author: Graeme Gregory
- * graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-
-#include <sound/core.h>
-#include <sound/jack.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include "../codecs/wm8753.h"
-
-#include "tegra_asoc_utils.h"
-
-#define DRV_NAME "tegra-snd-wm8753"
-
-struct tegra_wm8753 {
- struct tegra_asoc_utils_data util_data;
-};
-
-static int tegra_wm8753_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
- struct snd_soc_card *card = rtd->card;
- struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(card);
- int srate, mclk;
- int err;
-
- srate = params_rate(params);
- switch (srate) {
- case 11025:
- case 22050:
- case 44100:
- case 88200:
- mclk = 11289600;
- break;
- default:
- mclk = 12288000;
- break;
- }
-
- err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
- if (err < 0) {
- dev_err(card->dev, "Can't configure clocks\n");
- return err;
- }
-
- err = snd_soc_dai_set_sysclk(codec_dai, WM8753_MCLK, mclk,
- SND_SOC_CLOCK_IN);
- if (err < 0) {
- dev_err(card->dev, "codec_dai clock not set\n");
- return err;
- }
-
- return 0;
-}
-
-static const struct snd_soc_ops tegra_wm8753_ops = {
- .hw_params = tegra_wm8753_hw_params,
-};
-
-static const struct snd_soc_dapm_widget tegra_wm8753_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone Jack", NULL),
- SND_SOC_DAPM_MIC("Mic Jack", NULL),
-};
-
-SND_SOC_DAILINK_DEFS(pcm,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "wm8753-hifi")),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-static struct snd_soc_dai_link tegra_wm8753_dai = {
- .name = "WM8753",
- .stream_name = "WM8753 PCM",
- .ops = &tegra_wm8753_ops,
- .dai_fmt = SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS,
- SND_SOC_DAILINK_REG(pcm),
-};
-
-static struct snd_soc_card snd_soc_tegra_wm8753 = {
- .name = "tegra-wm8753",
- .owner = THIS_MODULE,
- .dai_link = &tegra_wm8753_dai,
- .num_links = 1,
-
- .dapm_widgets = tegra_wm8753_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tegra_wm8753_dapm_widgets),
- .fully_routed = true,
-};
-
-static int tegra_wm8753_driver_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct snd_soc_card *card = &snd_soc_tegra_wm8753;
- struct tegra_wm8753 *machine;
- int ret;
-
- machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_wm8753),
- GFP_KERNEL);
- if (!machine)
- return -ENOMEM;
-
- card->dev = &pdev->dev;
- snd_soc_card_set_drvdata(card, machine);
-
- ret = snd_soc_of_parse_card_name(card, "nvidia,model");
- if (ret)
- return ret;
-
- ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
- if (ret)
- return ret;
-
- tegra_wm8753_dai.codecs->of_node = of_parse_phandle(np,
- "nvidia,audio-codec", 0);
- if (!tegra_wm8753_dai.codecs->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,audio-codec' missing or invalid\n");
- return -EINVAL;
- }
-
- tegra_wm8753_dai.cpus->of_node = of_parse_phandle(np,
- "nvidia,i2s-controller", 0);
- if (!tegra_wm8753_dai.cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,i2s-controller' missing or invalid\n");
- return -EINVAL;
- }
-
- tegra_wm8753_dai.platforms->of_node = tegra_wm8753_dai.cpus->of_node;
-
- ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
- if (ret)
- return ret;
-
- ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "snd_soc_register_card failed\n");
-
- return 0;
-}
-
-static const struct of_device_id tegra_wm8753_of_match[] = {
- { .compatible = "nvidia,tegra-audio-wm8753", },
- {},
-};
-
-static struct platform_driver tegra_wm8753_driver = {
- .driver = {
- .name = DRV_NAME,
- .pm = &snd_soc_pm_ops,
- .of_match_table = tegra_wm8753_of_match,
- },
- .probe = tegra_wm8753_driver_probe,
-};
-module_platform_driver(tegra_wm8753_driver);
-
-MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
-MODULE_DESCRIPTION("Tegra+WM8753 machine ASoC driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, tegra_wm8753_of_match);
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index e4863fa37b0c..5751fb398c1a 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -14,44 +14,27 @@
* graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
*/
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <sound/core.h>
#include <sound/jack.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "../codecs/wm8903.h"
-#include "tegra_asoc_utils.h"
+#include "tegra_asoc_machine.h"
-#define DRV_NAME "tegra-snd-wm8903"
-
-struct tegra_wm8903 {
- int gpio_spkr_en;
- int gpio_hp_det;
- int gpio_hp_mute;
- int gpio_int_mic_en;
- int gpio_ext_mic_en;
- struct tegra_asoc_utils_data util_data;
+static struct snd_soc_jack_pin tegra_wm8903_mic_jack_pins[] = {
+ { .pin = "Mic Jack", .mask = SND_JACK_MICROPHONE },
};
-static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static unsigned int tegra_wm8903_mclk_rate(unsigned int srate)
{
- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
- struct snd_soc_card *card = rtd->card;
- struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
- int srate, mclk;
- int err;
+ unsigned int mclk;
- srate = params_rate(params);
switch (srate) {
case 64000:
case 88200:
@@ -66,140 +49,53 @@ static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream,
while (mclk < 6000000)
mclk *= 2;
- err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
- if (err < 0) {
- dev_err(card->dev, "Can't configure clocks\n");
- return err;
- }
-
- err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
- SND_SOC_CLOCK_IN);
- if (err < 0) {
- dev_err(card->dev, "codec_dai clock not set\n");
- return err;
- }
-
- return 0;
+ return mclk;
}
-static const struct snd_soc_ops tegra_wm8903_ops = {
- .hw_params = tegra_wm8903_hw_params,
-};
-
-static struct snd_soc_jack tegra_wm8903_hp_jack;
-
-static struct snd_soc_jack_pin tegra_wm8903_hp_jack_pins[] = {
- {
- .pin = "Headphone Jack",
- .mask = SND_JACK_HEADPHONE,
- },
-};
-
-static struct snd_soc_jack_gpio tegra_wm8903_hp_jack_gpio = {
- .name = "headphone detect",
- .report = SND_JACK_HEADPHONE,
- .debounce_time = 150,
- .invert = 1,
-};
-
-static struct snd_soc_jack tegra_wm8903_mic_jack;
-
-static struct snd_soc_jack_pin tegra_wm8903_mic_jack_pins[] = {
- {
- .pin = "Mic Jack",
- .mask = SND_JACK_MICROPHONE,
- },
-};
-
-static int tegra_wm8903_event_int_spk(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
-{
- struct snd_soc_dapm_context *dapm = w->dapm;
- struct snd_soc_card *card = dapm->card;
- struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
-
- if (!gpio_is_valid(machine->gpio_spkr_en))
- return 0;
-
- gpio_set_value_cansleep(machine->gpio_spkr_en,
- SND_SOC_DAPM_EVENT_ON(event));
-
- return 0;
-}
-
-static int tegra_wm8903_event_hp(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
-{
- struct snd_soc_dapm_context *dapm = w->dapm;
- struct snd_soc_card *card = dapm->card;
- struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
-
- if (!gpio_is_valid(machine->gpio_hp_mute))
- return 0;
-
- gpio_set_value_cansleep(machine->gpio_hp_mute,
- !SND_SOC_DAPM_EVENT_ON(event));
-
- return 0;
-}
-
-static int tegra_wm8903_event_int_mic(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
+static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dapm_context *dapm = w->dapm;
- struct snd_soc_card *card = dapm->card;
- struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
-
- if (!gpio_is_valid(machine->gpio_int_mic_en))
- return 0;
+ struct tegra_machine *machine = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_card *card = rtd->card;
+ int err;
- gpio_set_value_cansleep(machine->gpio_int_mic_en,
- SND_SOC_DAPM_EVENT_ON(event));
+ /*
+ * Older version of machine driver was ignoring GPIO polarity,
+ * forcing it to active-low. This means that all older device-trees
+ * which set the polarity to active-high are wrong and we need to fix
+ * them up.
+ */
+ if (machine->asoc->hp_jack_gpio_active_low) {
+ bool active_low = gpiod_is_active_low(machine->gpiod_hp_det);
- return 0;
-}
+ machine->hp_jack_gpio->invert = !active_low;
+ }
-static const struct snd_soc_dapm_widget tegra_wm8903_dapm_widgets[] = {
- SND_SOC_DAPM_SPK("Int Spk", tegra_wm8903_event_int_spk),
- SND_SOC_DAPM_HP("Headphone Jack", tegra_wm8903_event_hp),
- SND_SOC_DAPM_MIC("Mic Jack", NULL),
- SND_SOC_DAPM_MIC("Int Mic", tegra_wm8903_event_int_mic),
-};
+ err = tegra_asoc_machine_init(rtd);
+ if (err)
+ return err;
-static const struct snd_kcontrol_new tegra_wm8903_controls[] = {
- SOC_DAPM_PIN_SWITCH("Int Spk"),
- SOC_DAPM_PIN_SWITCH("Int Mic"),
-};
+ if (!machine->gpiod_mic_det && machine->asoc->add_mic_jack) {
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *component = codec_dai->component;
+ int shrt = 0;
+
+ err = snd_soc_card_jack_new(rtd->card, "Mic Jack",
+ SND_JACK_MICROPHONE,
+ machine->mic_jack,
+ tegra_wm8903_mic_jack_pins,
+ ARRAY_SIZE(tegra_wm8903_mic_jack_pins));
+ if (err) {
+ dev_err(rtd->dev, "Mic Jack creation failed: %d\n", err);
+ return err;
+ }
-static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
- struct snd_soc_component *component = codec_dai->component;
- struct snd_soc_card *card = rtd->card;
- struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
- int shrt = 0;
+ if (of_property_read_bool(card->dev->of_node, "nvidia,headset"))
+ shrt = SND_JACK_MICROPHONE;
- if (gpio_is_valid(machine->gpio_hp_det)) {
- tegra_wm8903_hp_jack_gpio.gpio = machine->gpio_hp_det;
- snd_soc_card_jack_new(rtd->card, "Headphone Jack",
- SND_JACK_HEADPHONE, &tegra_wm8903_hp_jack,
- tegra_wm8903_hp_jack_pins,
- ARRAY_SIZE(tegra_wm8903_hp_jack_pins));
- snd_soc_jack_add_gpios(&tegra_wm8903_hp_jack,
- 1,
- &tegra_wm8903_hp_jack_gpio);
+ wm8903_mic_detect(component, machine->mic_jack,
+ SND_JACK_MICROPHONE, shrt);
}
- if (of_property_read_bool(card->dev->of_node, "nvidia,headset"))
- shrt = SND_JACK_MICROPHONE;
-
- snd_soc_card_jack_new(rtd->card, "Mic Jack", SND_JACK_MICROPHONE,
- &tegra_wm8903_mic_jack,
- tegra_wm8903_mic_jack_pins,
- ARRAY_SIZE(tegra_wm8903_mic_jack_pins));
- wm8903_mic_detect(component, &tegra_wm8903_mic_jack, SND_JACK_MICROPHONE,
- shrt);
-
snd_soc_dapm_force_enable_pin(&card->dapm, "MICBIAS");
return 0;
@@ -207,8 +103,8 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
static int tegra_wm8903_remove(struct snd_soc_card *card)
{
- struct snd_soc_pcm_runtime *rtd =
- snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
+ struct snd_soc_dai_link *link = &card->dai_link[0];
+ struct snd_soc_pcm_runtime *rtd = snd_soc_get_pcm_runtime(card, link);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_component *component = codec_dai->component;
@@ -226,7 +122,6 @@ static struct snd_soc_dai_link tegra_wm8903_dai = {
.name = "WM8903",
.stream_name = "WM8903 PCM",
.init = tegra_wm8903_init,
- .ops = &tegra_wm8903_ops,
.dai_fmt = SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
@@ -234,148 +129,60 @@ static struct snd_soc_dai_link tegra_wm8903_dai = {
};
static struct snd_soc_card snd_soc_tegra_wm8903 = {
- .name = "tegra-wm8903",
+ .components = "codec:wm8903",
.owner = THIS_MODULE,
.dai_link = &tegra_wm8903_dai,
.num_links = 1,
.remove = tegra_wm8903_remove,
- .controls = tegra_wm8903_controls,
- .num_controls = ARRAY_SIZE(tegra_wm8903_controls),
- .dapm_widgets = tegra_wm8903_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tegra_wm8903_dapm_widgets),
.fully_routed = true,
};
-static int tegra_wm8903_driver_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct snd_soc_card *card = &snd_soc_tegra_wm8903;
- struct tegra_wm8903 *machine;
- int ret;
-
- machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_wm8903),
- GFP_KERNEL);
- if (!machine)
- return -ENOMEM;
-
- card->dev = &pdev->dev;
- snd_soc_card_set_drvdata(card, machine);
-
- machine->gpio_spkr_en = of_get_named_gpio(np, "nvidia,spkr-en-gpios",
- 0);
- if (machine->gpio_spkr_en == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (gpio_is_valid(machine->gpio_spkr_en)) {
- ret = devm_gpio_request_one(&pdev->dev, machine->gpio_spkr_en,
- GPIOF_OUT_INIT_LOW, "spkr_en");
- if (ret) {
- dev_err(card->dev, "cannot get spkr_en gpio\n");
- return ret;
- }
- }
-
- machine->gpio_hp_mute = of_get_named_gpio(np, "nvidia,hp-mute-gpios",
- 0);
- if (machine->gpio_hp_mute == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (gpio_is_valid(machine->gpio_hp_mute)) {
- ret = devm_gpio_request_one(&pdev->dev, machine->gpio_hp_mute,
- GPIOF_OUT_INIT_HIGH, "hp_mute");
- if (ret) {
- dev_err(card->dev, "cannot get hp_mute gpio\n");
- return ret;
- }
- }
-
- machine->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
- if (machine->gpio_hp_det == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- machine->gpio_int_mic_en = of_get_named_gpio(np,
- "nvidia,int-mic-en-gpios", 0);
- if (machine->gpio_int_mic_en == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (gpio_is_valid(machine->gpio_int_mic_en)) {
- /* Disable int mic; enable signal is active-high */
- ret = devm_gpio_request_one(&pdev->dev,
- machine->gpio_int_mic_en,
- GPIOF_OUT_INIT_LOW, "int_mic_en");
- if (ret) {
- dev_err(card->dev, "cannot get int_mic_en gpio\n");
- return ret;
- }
- }
-
- machine->gpio_ext_mic_en = of_get_named_gpio(np,
- "nvidia,ext-mic-en-gpios", 0);
- if (machine->gpio_ext_mic_en == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (gpio_is_valid(machine->gpio_ext_mic_en)) {
- /* Enable ext mic; enable signal is active-low */
- ret = devm_gpio_request_one(&pdev->dev,
- machine->gpio_ext_mic_en,
- GPIOF_OUT_INIT_LOW, "ext_mic_en");
- if (ret) {
- dev_err(card->dev, "cannot get ext_mic_en gpio\n");
- return ret;
- }
- }
-
- ret = snd_soc_of_parse_card_name(card, "nvidia,model");
- if (ret)
- return ret;
-
- ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
- if (ret)
- return ret;
-
- tegra_wm8903_dai.codecs->of_node = of_parse_phandle(np,
- "nvidia,audio-codec", 0);
- if (!tegra_wm8903_dai.codecs->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,audio-codec' missing or invalid\n");
- return -EINVAL;
- }
-
- tegra_wm8903_dai.cpus->of_node = of_parse_phandle(np,
- "nvidia,i2s-controller", 0);
- if (!tegra_wm8903_dai.cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,i2s-controller' missing or invalid\n");
- return -EINVAL;
- }
-
- tegra_wm8903_dai.platforms->of_node = tegra_wm8903_dai.cpus->of_node;
-
- ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
- if (ret)
- return ret;
-
- ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "snd_soc_register_card failed\n");
+/* older device-trees used wrong polarity for the headphones-detection GPIO */
+static const struct tegra_asoc_data tegra_wm8903_data_legacy = {
+ .mclk_rate = tegra_wm8903_mclk_rate,
+ .card = &snd_soc_tegra_wm8903,
+ .hp_jack_gpio_active_low = true,
+ .add_common_dapm_widgets = true,
+ .add_common_controls = true,
+ .add_common_snd_ops = true,
+ .add_mic_jack = true,
+ .add_hp_jack = true,
+};
- return 0;
-}
+static const struct tegra_asoc_data tegra_wm8903_data = {
+ .mclk_rate = tegra_wm8903_mclk_rate,
+ .card = &snd_soc_tegra_wm8903,
+ .add_common_dapm_widgets = true,
+ .add_common_controls = true,
+ .add_common_snd_ops = true,
+ .add_mic_jack = true,
+ .add_hp_jack = true,
+};
static const struct of_device_id tegra_wm8903_of_match[] = {
- { .compatible = "nvidia,tegra-audio-wm8903", },
+ { .compatible = "ad,tegra-audio-plutux", .data = &tegra_wm8903_data_legacy },
+ { .compatible = "ad,tegra-audio-wm8903-medcom-wide", .data = &tegra_wm8903_data_legacy },
+ { .compatible = "ad,tegra-audio-wm8903-tec", .data = &tegra_wm8903_data_legacy },
+ { .compatible = "nvidia,tegra-audio-wm8903-cardhu", .data = &tegra_wm8903_data_legacy },
+ { .compatible = "nvidia,tegra-audio-wm8903-harmony", .data = &tegra_wm8903_data_legacy },
+ { .compatible = "nvidia,tegra-audio-wm8903-picasso", .data = &tegra_wm8903_data_legacy },
+ { .compatible = "nvidia,tegra-audio-wm8903-seaboard", .data = &tegra_wm8903_data_legacy },
+ { .compatible = "nvidia,tegra-audio-wm8903-ventana", .data = &tegra_wm8903_data_legacy },
+ { .compatible = "nvidia,tegra-audio-wm8903", .data = &tegra_wm8903_data },
{},
};
+MODULE_DEVICE_TABLE(of, tegra_wm8903_of_match);
static struct platform_driver tegra_wm8903_driver = {
.driver = {
- .name = DRV_NAME,
- .pm = &snd_soc_pm_ops,
+ .name = "tegra-wm8903",
.of_match_table = tegra_wm8903_of_match,
+ .pm = &snd_soc_pm_ops,
},
- .probe = tegra_wm8903_driver_probe,
+ .probe = tegra_asoc_machine_probe,
};
module_platform_driver(tegra_wm8903_driver);
MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
MODULE_DESCRIPTION("Tegra+WM8903 machine ASoC driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, tegra_wm8903_of_match);
diff --git a/sound/soc/tegra/tegra_wm9712.c b/sound/soc/tegra/tegra_wm9712.c
deleted file mode 100644
index 4f09a178049d..000000000000
--- a/sound/soc/tegra/tegra_wm9712.c
+++ /dev/null
@@ -1,166 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * tegra20_wm9712.c - Tegra machine ASoC driver for boards using WM9712 codec.
- *
- * Copyright 2012 Lucas Stach <dev@lynxeye.de>
- *
- * Partly based on code copyright/by:
- * Copyright 2011,2012 Toradex Inc.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-
-#include <sound/core.h>
-#include <sound/jack.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include "tegra_asoc_utils.h"
-
-#define DRV_NAME "tegra-snd-wm9712"
-
-struct tegra_wm9712 {
- struct platform_device *codec;
- struct tegra_asoc_utils_data util_data;
-};
-
-static const struct snd_soc_dapm_widget tegra_wm9712_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone", NULL),
- SND_SOC_DAPM_LINE("LineIn", NULL),
- SND_SOC_DAPM_MIC("Mic", NULL),
-};
-
-static int tegra_wm9712_init(struct snd_soc_pcm_runtime *rtd)
-{
- return snd_soc_dapm_force_enable_pin(&rtd->card->dapm, "Mic Bias");
-}
-
-SND_SOC_DAILINK_DEFS(hifi,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_CODEC("wm9712-codec", "wm9712-hifi")),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-static struct snd_soc_dai_link tegra_wm9712_dai = {
- .name = "AC97 HiFi",
- .stream_name = "AC97 HiFi",
- .init = tegra_wm9712_init,
- SND_SOC_DAILINK_REG(hifi),
-};
-
-static struct snd_soc_card snd_soc_tegra_wm9712 = {
- .name = "tegra-wm9712",
- .owner = THIS_MODULE,
- .dai_link = &tegra_wm9712_dai,
- .num_links = 1,
-
- .dapm_widgets = tegra_wm9712_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tegra_wm9712_dapm_widgets),
- .fully_routed = true,
-};
-
-static int tegra_wm9712_driver_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct snd_soc_card *card = &snd_soc_tegra_wm9712;
- struct tegra_wm9712 *machine;
- int ret;
-
- machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_wm9712),
- GFP_KERNEL);
- if (!machine)
- return -ENOMEM;
-
- card->dev = &pdev->dev;
- snd_soc_card_set_drvdata(card, machine);
-
- machine->codec = platform_device_alloc("wm9712-codec", -1);
- if (!machine->codec) {
- dev_err(&pdev->dev, "Can't allocate wm9712 platform device\n");
- return -ENOMEM;
- }
-
- ret = platform_device_add(machine->codec);
- if (ret)
- goto codec_put;
-
- ret = snd_soc_of_parse_card_name(card, "nvidia,model");
- if (ret)
- goto codec_unregister;
-
- ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
- if (ret)
- goto codec_unregister;
-
- tegra_wm9712_dai.cpus->of_node = of_parse_phandle(np,
- "nvidia,ac97-controller", 0);
- if (!tegra_wm9712_dai.cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,ac97-controller' missing or invalid\n");
- ret = -EINVAL;
- goto codec_unregister;
- }
-
- tegra_wm9712_dai.platforms->of_node = tegra_wm9712_dai.cpus->of_node;
-
- ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
- if (ret)
- goto codec_unregister;
-
- ret = tegra_asoc_utils_set_ac97_rate(&machine->util_data);
- if (ret)
- goto codec_unregister;
-
- ret = snd_soc_register_card(card);
- if (ret) {
- dev_err_probe(&pdev->dev, ret,
- "snd_soc_register_card failed\n");
- goto codec_unregister;
- }
-
- return 0;
-
-codec_unregister:
- platform_device_del(machine->codec);
-codec_put:
- platform_device_put(machine->codec);
- return ret;
-}
-
-static int tegra_wm9712_driver_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct tegra_wm9712 *machine = snd_soc_card_get_drvdata(card);
-
- snd_soc_unregister_card(card);
-
- platform_device_unregister(machine->codec);
-
- return 0;
-}
-
-static const struct of_device_id tegra_wm9712_of_match[] = {
- { .compatible = "nvidia,tegra-audio-wm9712", },
- {},
-};
-
-static struct platform_driver tegra_wm9712_driver = {
- .driver = {
- .name = DRV_NAME,
- .pm = &snd_soc_pm_ops,
- .of_match_table = tegra_wm9712_of_match,
- },
- .probe = tegra_wm9712_driver_probe,
- .remove = tegra_wm9712_driver_remove,
-};
-module_platform_driver(tegra_wm9712_driver);
-
-MODULE_AUTHOR("Lucas Stach");
-MODULE_DESCRIPTION("Tegra+WM9712 machine ASoC driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, tegra_wm9712_of_match);
diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c
deleted file mode 100644
index 6c1cc3d0ac33..000000000000
--- a/sound/soc/tegra/trimslice.c
+++ /dev/null
@@ -1,172 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * trimslice.c - TrimSlice machine ASoC driver
- *
- * Copyright (C) 2011 - CompuLab, Ltd.
- * Author: Mike Rapoport <mike@compulab.co.il>
- *
- * Based on code copyright/by:
- * Author: Stephen Warren <swarren@nvidia.com>
- * Copyright (C) 2010-2011 - NVIDIA, Inc.
- */
-
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <sound/core.h>
-#include <sound/jack.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include "../codecs/tlv320aic23.h"
-
-#include "tegra_asoc_utils.h"
-
-#define DRV_NAME "tegra-snd-trimslice"
-
-struct tegra_trimslice {
- struct tegra_asoc_utils_data util_data;
-};
-
-static int trimslice_asoc_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
- struct snd_soc_card *card = rtd->card;
- struct tegra_trimslice *trimslice = snd_soc_card_get_drvdata(card);
- int srate, mclk;
- int err;
-
- srate = params_rate(params);
- mclk = 128 * srate;
-
- err = tegra_asoc_utils_set_rate(&trimslice->util_data, srate, mclk);
- if (err < 0) {
- dev_err(card->dev, "Can't configure clocks\n");
- return err;
- }
-
- err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
- SND_SOC_CLOCK_IN);
- if (err < 0) {
- dev_err(card->dev, "codec_dai clock not set\n");
- return err;
- }
-
- return 0;
-}
-
-static const struct snd_soc_ops trimslice_asoc_ops = {
- .hw_params = trimslice_asoc_hw_params,
-};
-
-static const struct snd_soc_dapm_widget trimslice_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Line Out", NULL),
- SND_SOC_DAPM_LINE("Line In", NULL),
-};
-
-static const struct snd_soc_dapm_route trimslice_audio_map[] = {
- {"Line Out", NULL, "LOUT"},
- {"Line Out", NULL, "ROUT"},
-
- {"LLINEIN", NULL, "Line In"},
- {"RLINEIN", NULL, "Line In"},
-};
-
-SND_SOC_DAILINK_DEFS(single_dsp,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "tlv320aic23-hifi")),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-static struct snd_soc_dai_link trimslice_tlv320aic23_dai = {
- .name = "TLV320AIC23",
- .stream_name = "AIC23",
- .ops = &trimslice_asoc_ops,
- .dai_fmt = SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS,
- SND_SOC_DAILINK_REG(single_dsp),
-};
-
-static struct snd_soc_card snd_soc_trimslice = {
- .name = "tegra-trimslice",
- .owner = THIS_MODULE,
- .dai_link = &trimslice_tlv320aic23_dai,
- .num_links = 1,
-
- .dapm_widgets = trimslice_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(trimslice_dapm_widgets),
- .dapm_routes = trimslice_audio_map,
- .num_dapm_routes = ARRAY_SIZE(trimslice_audio_map),
- .fully_routed = true,
-};
-
-static int tegra_snd_trimslice_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct snd_soc_card *card = &snd_soc_trimslice;
- struct tegra_trimslice *trimslice;
- int ret;
-
- trimslice = devm_kzalloc(&pdev->dev, sizeof(struct tegra_trimslice),
- GFP_KERNEL);
- if (!trimslice)
- return -ENOMEM;
-
- card->dev = &pdev->dev;
- snd_soc_card_set_drvdata(card, trimslice);
-
- trimslice_tlv320aic23_dai.codecs->of_node = of_parse_phandle(np,
- "nvidia,audio-codec", 0);
- if (!trimslice_tlv320aic23_dai.codecs->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,audio-codec' missing or invalid\n");
- return -EINVAL;
- }
-
- trimslice_tlv320aic23_dai.cpus->of_node = of_parse_phandle(np,
- "nvidia,i2s-controller", 0);
- if (!trimslice_tlv320aic23_dai.cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'nvidia,i2s-controller' missing or invalid\n");
- return -EINVAL;
- }
-
- trimslice_tlv320aic23_dai.platforms->of_node =
- trimslice_tlv320aic23_dai.cpus->of_node;
-
- ret = tegra_asoc_utils_init(&trimslice->util_data, &pdev->dev);
- if (ret)
- return ret;
-
- ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "snd_soc_register_card failed\n");
-
- return 0;
-}
-
-static const struct of_device_id trimslice_of_match[] = {
- { .compatible = "nvidia,tegra-audio-trimslice", },
- {},
-};
-MODULE_DEVICE_TABLE(of, trimslice_of_match);
-
-static struct platform_driver tegra_snd_trimslice_driver = {
- .driver = {
- .name = DRV_NAME,
- .of_match_table = trimslice_of_match,
- },
- .probe = tegra_snd_trimslice_probe,
-};
-module_platform_driver(tegra_snd_trimslice_driver);
-
-MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
-MODULE_DESCRIPTION("Trimslice machine ASoC driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c
index aba0017e870b..ecd24d412a9b 100644
--- a/sound/soc/ti/ams-delta.c
+++ b/sound/soc/ti/ams-delta.c
@@ -337,8 +337,8 @@ static int cx81801_hangup(struct tty_struct *tty)
}
/* Line discipline .receive_buf() */
-static void cx81801_receive(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void cx81801_receive(struct tty_struct *tty, const unsigned char *cp,
+ const char *fp, int count)
{
struct snd_soc_component *component = tty->disc_data;
const unsigned char *c;
@@ -396,6 +396,7 @@ static void cx81801_wakeup(struct tty_struct *tty)
static struct tty_ldisc_ops cx81801_ops = {
.name = "cx81801",
+ .num = N_V253,
.owner = THIS_MODULE,
.open = cx81801_open,
.close = cx81801_close,
@@ -503,7 +504,7 @@ static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd)
}
/* Register optional line discipline for over the modem control */
- ret = tty_register_ldisc(N_V253, &cx81801_ops);
+ ret = tty_register_ldisc(&cx81801_ops);
if (ret) {
dev_warn(card->dev,
"Failed to register line discipline, "
@@ -582,9 +583,7 @@ static int ams_delta_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- if (tty_unregister_ldisc(N_V253) != 0)
- dev_warn(&pdev->dev,
- "failed to unregister V253 line discipline\n");
+ tty_unregister_ldisc(&cx81801_ops);
snd_soc_unregister_card(card);
card->dev = NULL;
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index b94220306d1a..017a5a5e56cd 100644
--- a/sound/soc/ti/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -2317,6 +2317,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
break;
default:
dev_err(&pdev->dev, "No DMA controller found (%d)\n", ret);
+ fallthrough;
case -EPROBE_DEFER:
goto err;
}
diff --git a/sound/soc/ti/j721e-evm.c b/sound/soc/ti/j721e-evm.c
index a7c0484d44ec..265bbc5a2f96 100644
--- a/sound/soc/ti/j721e-evm.c
+++ b/sound/soc/ti/j721e-evm.c
@@ -197,7 +197,7 @@ static int j721e_configure_refclk(struct j721e_priv *priv,
return ret;
}
- if (priv->hsdiv_rates[domain->parent_clk_id] != scki) {
+ if (domain->parent_clk_id == -1 || priv->hsdiv_rates[domain->parent_clk_id] != scki) {
dev_dbg(priv->dev,
"%s configuration for %u Hz: %s, %dxFS (SCKI: %u Hz)\n",
audio_domain == J721E_AUDIO_DOMAIN_CPB ? "CPB" : "IVI",
@@ -278,23 +278,29 @@ static int j721e_audio_startup(struct snd_pcm_substream *substream)
j721e_rule_rate, &priv->rate_range,
SNDRV_PCM_HW_PARAM_RATE, -1);
- mutex_unlock(&priv->mutex);
if (ret)
- return ret;
+ goto out;
/* Reset TDM slots to 32 */
ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 32);
if (ret && ret != -ENOTSUPP)
- return ret;
+ goto out;
for_each_rtd_codec_dais(rtd, i, codec_dai) {
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x3, 0x3, 2, 32);
if (ret && ret != -ENOTSUPP)
- return ret;
+ goto out;
}
- return 0;
+ if (ret == -ENOTSUPP)
+ ret = 0;
+out:
+ if (ret)
+ domain->active--;
+ mutex_unlock(&priv->mutex);
+
+ return ret;
}
static int j721e_audio_hw_params(struct snd_pcm_substream *substream,
diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c
index db47981768c5..4479d74f0a45 100644
--- a/sound/soc/ti/omap-mcbsp.c
+++ b/sound/soc/ti/omap-mcbsp.c
@@ -539,7 +539,7 @@ static ssize_t prop##_store(struct device *dev, \
return size; \
} \
\
-static DEVICE_ATTR(prop, 0644, prop##_show, prop##_store)
+static DEVICE_ATTR_RW(prop)
THRESHOLD_PROP_BUILDER(max_tx_thres);
THRESHOLD_PROP_BUILDER(max_rx_thres);
diff --git a/sound/soc/uniphier/aio-dma.c b/sound/soc/uniphier/aio-dma.c
index 3c1628a3a1ac..3d9736e7381f 100644
--- a/sound/soc/uniphier/aio-dma.c
+++ b/sound/soc/uniphier/aio-dma.c
@@ -198,7 +198,7 @@ static int uniphier_aiodma_mmap(struct snd_soc_component *component,
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start,
- substream->dma_buffer.addr >> PAGE_SHIFT,
+ substream->runtime->dma_addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c
index 1d59fb668c77..91afea9d5de6 100644
--- a/sound/soc/xilinx/xlnx_formatter_pcm.c
+++ b/sound/soc/xilinx/xlnx_formatter_pcm.c
@@ -452,8 +452,8 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component,
stream_data->buffer_size = size;
- low = lower_32_bits(substream->dma_buffer.addr);
- high = upper_32_bits(substream->dma_buffer.addr);
+ low = lower_32_bits(runtime->dma_addr);
+ high = upper_32_bits(runtime->dma_addr);
writel(low, stream_data->mmio + XLNX_AUD_BUFF_ADDR_LSB);
writel(high, stream_data->mmio + XLNX_AUD_BUFF_ADDR_MSB);
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c
index d24ae00878f5..c434b69a83f1 100644
--- a/sound/sparc/amd7930.c
+++ b/sound/sparc/amd7930.c
@@ -975,8 +975,9 @@ static int snd_amd7930_create(struct snd_card *card,
spin_unlock_irqrestore(&amd->lock, flags);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
- amd, &snd_amd7930_dev_ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
+ amd, &snd_amd7930_dev_ops);
+ if (err < 0) {
snd_amd7930_free(amd);
return err;
}
@@ -1019,13 +1020,16 @@ static int amd7930_sbus_probe(struct platform_device *op)
irq, dev_num, &amd)) < 0)
goto out_err;
- if ((err = snd_amd7930_pcm(amd)) < 0)
+ err = snd_amd7930_pcm(amd);
+ if (err < 0)
goto out_err;
- if ((err = snd_amd7930_mixer(amd)) < 0)
+ err = snd_amd7930_mixer(amd);
+ if (err < 0)
goto out_err;
- if ((err = snd_card_register(card)) < 0)
+ err = snd_card_register(card);
+ if (err < 0)
goto out_err;
amd->next = amd7930_list;
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index 35c17803a430..2942c8c7a236 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -1828,8 +1828,9 @@ static int snd_cs4231_sbus_create(struct snd_card *card,
}
snd_cs4231_init(chip);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
- chip, &snd_cs4231_sbus_dev_ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
+ chip, &snd_cs4231_sbus_dev_ops);
+ if (err < 0) {
snd_cs4231_sbus_free(chip);
return err;
}
@@ -2020,8 +2021,9 @@ static int snd_cs4231_ebus_create(struct snd_card *card,
}
snd_cs4231_init(chip);
- if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
- chip, &snd_cs4231_ebus_dev_ops)) < 0) {
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
+ chip, &snd_cs4231_ebus_dev_ops);
+ if (err < 0) {
snd_cs4231_ebus_free(chip);
return err;
}
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index b055f5839578..6b84f66e4af4 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -2226,11 +2226,12 @@ static int snd_dbri_pcm(struct snd_card *card)
struct snd_pcm *pcm;
int err;
- if ((err = snd_pcm_new(card,
- /* ID */ "sun_dbri",
- /* device */ 0,
- /* playback count */ 1,
- /* capture count */ 1, &pcm)) < 0)
+ err = snd_pcm_new(card,
+ /* ID */ "sun_dbri",
+ /* device */ 0,
+ /* playback count */ 1,
+ /* capture count */ 1, &pcm);
+ if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_dbri_ops);
diff --git a/sound/synth/emux/emux.c b/sound/synth/emux/emux.c
index f65e6c7b139f..49d1976a132c 100644
--- a/sound/synth/emux/emux.c
+++ b/sound/synth/emux/emux.c
@@ -104,7 +104,8 @@ int snd_emux_register(struct snd_emux *emu, struct snd_card *card, int index, ch
if (emu->sflist == NULL)
return -ENOMEM;
- if ((err = snd_emux_init_hwdep(emu)) < 0)
+ err = snd_emux_init_hwdep(emu);
+ if (err < 0)
return err;
snd_emux_init_voices(emu);
diff --git a/sound/synth/emux/emux_effect.c b/sound/synth/emux/emux_effect.c
index afd119b11f39..3c7314f5fb19 100644
--- a/sound/synth/emux/emux_effect.c
+++ b/sound/synth/emux/emux_effect.c
@@ -181,7 +181,10 @@ snd_emux_send_effect(struct snd_emux_port *port, struct snd_midi_channel *chan,
fx->flag[type] = mode;
/* do we need to modify the register in realtime ? */
- if (! parm_defs[type].update || (offset = parm_defs[type].offset) < 0)
+ if (!parm_defs[type].update)
+ return;
+ offset = parm_defs[type].offset;
+ if (offset < 0)
return;
#ifdef SNDRV_LITTLE_ENDIAN
@@ -223,13 +226,17 @@ snd_emux_setup_effect(struct snd_emux_voice *vp)
unsigned char *srcp;
int i;
- if (! (fx = chan->private))
+ fx = chan->private;
+ if (!fx)
return;
/* modify the register values via effect table */
for (i = 0; i < EMUX_FX_END; i++) {
int offset;
- if (! fx->flag[i] || (offset = parm_defs[i].offset) < 0)
+ if (!fx->flag[i])
+ continue;
+ offset = parm_defs[i].offset;
+ if (offset < 0)
continue;
#ifdef SNDRV_LITTLE_ENDIAN
if (parm_defs[i].type & PARM_IS_ALIGN_HI)
diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
index 8a965e2f160a..81719bfb8ed7 100644
--- a/sound/synth/emux/emux_hwdep.c
+++ b/sound/synth/emux/emux_hwdep.c
@@ -116,7 +116,8 @@ snd_emux_init_hwdep(struct snd_emux *emu)
struct snd_hwdep *hw;
int err;
- if ((err = snd_hwdep_new(emu->card, SNDRV_EMUX_HWDEP_NAME, emu->hwdep_idx, &hw)) < 0)
+ err = snd_hwdep_new(emu->card, SNDRV_EMUX_HWDEP_NAME, emu->hwdep_idx, &hw);
+ if (err < 0)
return err;
emu->hwdep = hw;
strcpy(hw->name, SNDRV_EMUX_HWDEP_NAME);
@@ -127,7 +128,8 @@ snd_emux_init_hwdep(struct snd_emux *emu)
hw->ops.ioctl_compat = snd_emux_hwdep_ioctl;
hw->exclusive = 1;
hw->private_data = emu;
- if ((err = snd_card_register(emu->card)) < 0)
+ err = snd_card_register(emu->card);
+ if (err < 0)
return err;
return 0;
diff --git a/sound/synth/emux/emux_nrpn.c b/sound/synth/emux/emux_nrpn.c
index 7eed5791972c..8056422ed7c5 100644
--- a/sound/synth/emux/emux_nrpn.c
+++ b/sound/synth/emux/emux_nrpn.c
@@ -69,7 +69,7 @@ static const int gs_sense[] =
DEF_FX_VIBRATE, DEF_FX_VIBDEPTH, DEF_FX_VIBDELAY
};
-/* effect sensitivies for XG controls:
+/* effect sensitivities for XG controls:
* adjusted for chaos 8MB soundfonts
*/
static const int xg_sense[] =
diff --git a/sound/synth/emux/soundfont.c b/sound/synth/emux/soundfont.c
index 9ebc711afa6b..16f00097cb95 100644
--- a/sound/synth/emux/soundfont.c
+++ b/sound/synth/emux/soundfont.c
@@ -108,7 +108,7 @@ snd_soundfont_close_check(struct snd_sf_list *sflist, int client)
* Deal with a soundfont patch. Any driver could use these routines
* although it was designed for the AWE64.
*
- * The sample_write and callargs pararameters allow a callback into
+ * The sample_write and callargs parameters allow a callback into
* the actual driver to write sample data to the board or whatever
* it wants to do with it.
*/
@@ -349,7 +349,8 @@ sf_zone_new(struct snd_sf_list *sflist, struct snd_soundfont *sf)
{
struct snd_sf_zone *zp;
- if ((zp = kzalloc(sizeof(*zp), GFP_KERNEL)) == NULL)
+ zp = kzalloc(sizeof(*zp), GFP_KERNEL);
+ if (!zp)
return NULL;
zp->next = sf->zones;
sf->zones = zp;
@@ -381,7 +382,8 @@ sf_sample_new(struct snd_sf_list *sflist, struct snd_soundfont *sf)
{
struct snd_sf_sample *sp;
- if ((sp = kzalloc(sizeof(*sp), GFP_KERNEL)) == NULL)
+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+ if (!sp)
return NULL;
sp->next = sf->samples;
@@ -451,7 +453,8 @@ load_map(struct snd_sf_list *sflist, const void __user *data, int count)
}
/* create a new zone */
- if ((zp = sf_zone_new(sflist, sf)) == NULL)
+ zp = sf_zone_new(sflist, sf);
+ if (!zp)
return -ENOMEM;
zp->bank = map.map_bank;
@@ -514,7 +517,8 @@ load_info(struct snd_sf_list *sflist, const void __user *data, long count)
int i;
/* patch must be opened */
- if ((sf = sflist->currsf) == NULL)
+ sf = sflist->currsf;
+ if (!sf)
return -EINVAL;
if (is_special_type(sf->type))
@@ -579,9 +583,9 @@ load_info(struct snd_sf_list *sflist, const void __user *data, long count)
init_voice_parm(&tmpzone.v.parm);
/* create a new zone */
- if ((zone = sf_zone_new(sflist, sf)) == NULL) {
+ zone = sf_zone_new(sflist, sf);
+ if (!zone)
return -ENOMEM;
- }
/* copy the temporary data */
zone->bank = tmpzone.bank;
@@ -700,7 +704,8 @@ load_data(struct snd_sf_list *sflist, const void __user *data, long count)
long off;
/* patch must be opened */
- if ((sf = sflist->currsf) == NULL)
+ sf = sflist->currsf;
+ if (!sf)
return -EINVAL;
if (is_special_type(sf->type))
@@ -723,7 +728,8 @@ load_data(struct snd_sf_list *sflist, const void __user *data, long count)
}
/* Allocate a new sample structure */
- if ((sp = sf_sample_new(sflist, sf)) == NULL)
+ sp = sf_sample_new(sflist, sf);
+ if (!sp)
return -ENOMEM;
sp->v = sample_info;
@@ -793,7 +799,7 @@ snd_sf_linear_to_log(unsigned int amount, int offset, int ratio)
amount <<= 1;
s = (amount >> 24) & 0x7f;
low = (amount >> 16) & 0xff;
- /* linear approxmimation by lower 8 bit */
+ /* linear approximation by lower 8 bit */
v = (log_tbl[s + 1] * low + log_tbl[s] * (0x100 - low)) >> 8;
v -= offset;
v = (v * ratio) >> 16;
@@ -958,7 +964,8 @@ load_guspatch(struct snd_sf_list *sflist, const char __user *data,
sf = newsf(sflist, SNDRV_SFNT_PAT_TYPE_GUS|SNDRV_SFNT_PAT_SHARED, NULL);
if (sf == NULL)
return -ENOMEM;
- if ((smp = sf_sample_new(sflist, sf)) == NULL)
+ smp = sf_sample_new(sflist, sf);
+ if (!smp)
return -ENOMEM;
sample_id = sflist->sample_counter;
smp->v.sample = sample_id;
@@ -996,7 +1003,8 @@ load_guspatch(struct snd_sf_list *sflist, const char __user *data,
smp->v.sf_id = sf->id;
/* set up voice info */
- if ((zone = sf_zone_new(sflist, sf)) == NULL) {
+ zone = sf_zone_new(sflist, sf);
+ if (!zone) {
sf_sample_delete(sflist, sf, smp);
return -ENOMEM;
}
@@ -1181,7 +1189,8 @@ add_preset(struct snd_sf_list *sflist, struct snd_sf_zone *cur)
}
/* prepend this zone */
- if ((index = get_index(cur->bank, cur->instr, cur->v.low)) < 0)
+ index = get_index(cur->bank, cur->instr, cur->v.low);
+ if (index < 0)
return;
cur->next_zone = zone; /* zone link */
cur->next_instr = sflist->presets[index]; /* preset table link */
@@ -1197,7 +1206,8 @@ delete_preset(struct snd_sf_list *sflist, struct snd_sf_zone *zp)
int index;
struct snd_sf_zone *p;
- if ((index = get_index(zp->bank, zp->instr, zp->v.low)) < 0)
+ index = get_index(zp->bank, zp->instr, zp->v.low);
+ if (index < 0)
return;
for (p = sflist->presets[index]; p; p = p->next_instr) {
while (p->next_instr == zp) {
@@ -1257,7 +1267,8 @@ search_first_zone(struct snd_sf_list *sflist, int bank, int preset, int key)
int index;
struct snd_sf_zone *zp;
- if ((index = get_index(bank, preset, key)) < 0)
+ index = get_index(bank, preset, key);
+ if (index < 0)
return NULL;
for (zp = sflist->presets[index]; zp; zp = zp->next_instr) {
if (zp->instr == preset && zp->bank == bank)
@@ -1386,7 +1397,8 @@ snd_sf_new(struct snd_sf_callback *callback, struct snd_util_memhdr *hdr)
{
struct snd_sf_list *sflist;
- if ((sflist = kzalloc(sizeof(*sflist), GFP_KERNEL)) == NULL)
+ sflist = kzalloc(sizeof(*sflist), GFP_KERNEL);
+ if (!sflist)
return NULL;
mutex_init(&sflist->presets_mutex);
@@ -1421,7 +1433,7 @@ snd_sf_free(struct snd_sf_list *sflist)
/*
* Remove all samples
- * The soundcard should be silet before calling this function.
+ * The soundcard should be silent before calling this function.
*/
int
snd_soundfont_remove_samples(struct snd_sf_list *sflist)
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 2f6a62416c05..a1f8c3a026f5 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -907,7 +907,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
}
}
- if (chip->quirk_type & QUIRK_SETUP_DISABLE_AUTOSUSPEND)
+ if (chip->quirk_type == QUIRK_SETUP_DISABLE_AUTOSUSPEND)
usb_enable_autosuspend(interface_to_usbdev(intf));
chip->num_interfaces--;
diff --git a/sound/usb/card.h b/sound/usb/card.h
index a741e7da83a2..6c0a052a28f9 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -19,7 +19,7 @@ struct audioformat {
unsigned char iface; /* interface number */
unsigned char altsetting; /* corresponding alternate setting */
unsigned char ep_idx; /* endpoint array index */
- unsigned char altset_idx; /* array index of altenate setting */
+ unsigned char altset_idx; /* array index of alternate setting */
unsigned char attributes; /* corresponding attributes of cs endpoint */
unsigned char endpoint; /* endpoint */
unsigned char ep_attr; /* endpoint attributes */
@@ -54,6 +54,7 @@ struct snd_urb_ctx {
struct snd_usb_endpoint *ep;
int index; /* index for urb array */
int packets; /* number of packets per urb */
+ int queued; /* queued data bytes by this urb */
int packet_size[MAX_PACKS_HS]; /* size of packets for next submission */
struct list_head ready_list;
};
@@ -157,9 +158,12 @@ struct snd_usb_substream {
unsigned int stream_offset_adj; /* Bytes to drop from beginning of stream (for non-compliant devices) */
unsigned int running: 1; /* running status */
+ unsigned int period_elapsed_pending; /* delay period handling */
+ unsigned int buffer_bytes; /* buffer size in bytes */
+ unsigned int inflight_bytes; /* in-flight data bytes on buffer (for playback) */
unsigned int hwptr_done; /* processed byte position in the buffer */
- unsigned int transfer_done; /* processed frames since last period update */
+ unsigned int transfer_done; /* processed frames since last period update */
unsigned int frame_limit; /* limits number of packets in URB */
/* data and sync endpoints for this stream */
@@ -174,8 +178,7 @@ struct snd_usb_substream {
struct list_head fmt_list; /* format list */
spinlock_t lock;
- int last_frame_number; /* stored frame number */
- int last_delay; /* stored delay */
+ unsigned int last_frame_number; /* stored frame number */
struct {
int marker;
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 17bbde73d4d1..14456f61539e 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -21,82 +21,77 @@
#include "clock.h"
#include "quirks.h"
+union uac23_clock_source_desc {
+ struct uac_clock_source_descriptor v2;
+ struct uac3_clock_source_descriptor v3;
+};
+
+union uac23_clock_selector_desc {
+ struct uac_clock_selector_descriptor v2;
+ struct uac3_clock_selector_descriptor v3;
+};
+
+union uac23_clock_multiplier_desc {
+ struct uac_clock_multiplier_descriptor v2;
+ struct uac_clock_multiplier_descriptor v3;
+};
+
+#define GET_VAL(p, proto, field) \
+ ((proto) == UAC_VERSION_3 ? (p)->v3.field : (p)->v2.field)
+
static void *find_uac_clock_desc(struct usb_host_interface *iface, int id,
- bool (*validator)(void *, int), u8 type)
+ bool (*validator)(void *, int, int),
+ u8 type, int proto)
{
void *cs = NULL;
while ((cs = snd_usb_find_csint_desc(iface->extra, iface->extralen,
cs, type))) {
- if (validator(cs, id))
+ if (validator(cs, id, proto))
return cs;
}
return NULL;
}
-static bool validate_clock_source_v2(void *p, int id)
+static bool validate_clock_source(void *p, int id, int proto)
{
- struct uac_clock_source_descriptor *cs = p;
- return cs->bClockID == id;
-}
+ union uac23_clock_source_desc *cs = p;
-static bool validate_clock_source_v3(void *p, int id)
-{
- struct uac3_clock_source_descriptor *cs = p;
- return cs->bClockID == id;
+ return GET_VAL(cs, proto, bClockID) == id;
}
-static bool validate_clock_selector_v2(void *p, int id)
+static bool validate_clock_selector(void *p, int id, int proto)
{
- struct uac_clock_selector_descriptor *cs = p;
- return cs->bClockID == id;
-}
+ union uac23_clock_selector_desc *cs = p;
-static bool validate_clock_selector_v3(void *p, int id)
-{
- struct uac3_clock_selector_descriptor *cs = p;
- return cs->bClockID == id;
+ return GET_VAL(cs, proto, bClockID) == id;
}
-static bool validate_clock_multiplier_v2(void *p, int id)
+static bool validate_clock_multiplier(void *p, int id, int proto)
{
- struct uac_clock_multiplier_descriptor *cs = p;
- return cs->bClockID == id;
-}
+ union uac23_clock_multiplier_desc *cs = p;
-static bool validate_clock_multiplier_v3(void *p, int id)
-{
- struct uac3_clock_multiplier_descriptor *cs = p;
- return cs->bClockID == id;
+ return GET_VAL(cs, proto, bClockID) == id;
}
-#define DEFINE_FIND_HELPER(name, obj, validator, type) \
-static obj *name(struct usb_host_interface *iface, int id) \
-{ \
- return find_uac_clock_desc(iface, id, validator, type); \
+#define DEFINE_FIND_HELPER(name, obj, validator, type2, type3) \
+static obj *name(struct snd_usb_audio *chip, int id, int proto) \
+{ \
+ return find_uac_clock_desc(chip->ctrl_intf, id, validator, \
+ proto == UAC_VERSION_3 ? (type3) : (type2), \
+ proto); \
}
DEFINE_FIND_HELPER(snd_usb_find_clock_source,
- struct uac_clock_source_descriptor,
- validate_clock_source_v2, UAC2_CLOCK_SOURCE);
-DEFINE_FIND_HELPER(snd_usb_find_clock_source_v3,
- struct uac3_clock_source_descriptor,
- validate_clock_source_v3, UAC3_CLOCK_SOURCE);
-
+ union uac23_clock_source_desc, validate_clock_source,
+ UAC2_CLOCK_SOURCE, UAC3_CLOCK_SOURCE);
DEFINE_FIND_HELPER(snd_usb_find_clock_selector,
- struct uac_clock_selector_descriptor,
- validate_clock_selector_v2, UAC2_CLOCK_SELECTOR);
-DEFINE_FIND_HELPER(snd_usb_find_clock_selector_v3,
- struct uac3_clock_selector_descriptor,
- validate_clock_selector_v3, UAC3_CLOCK_SELECTOR);
-
+ union uac23_clock_selector_desc, validate_clock_selector,
+ UAC2_CLOCK_SELECTOR, UAC3_CLOCK_SELECTOR);
DEFINE_FIND_HELPER(snd_usb_find_clock_multiplier,
- struct uac_clock_multiplier_descriptor,
- validate_clock_multiplier_v2, UAC2_CLOCK_MULTIPLIER);
-DEFINE_FIND_HELPER(snd_usb_find_clock_multiplier_v3,
- struct uac3_clock_multiplier_descriptor,
- validate_clock_multiplier_v3, UAC3_CLOCK_MULTIPLIER);
+ union uac23_clock_multiplier_desc, validate_clock_multiplier,
+ UAC2_CLOCK_MULTIPLIER, UAC3_CLOCK_MULTIPLIER);
static int uac_clock_selector_get_val(struct snd_usb_audio *chip, int selector_id)
{
@@ -159,14 +154,13 @@ static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip,
int count;
unsigned char data;
struct usb_device *dev = chip->dev;
+ union uac23_clock_source_desc *cs_desc;
- if (fmt->protocol == UAC_VERSION_2) {
- struct uac_clock_source_descriptor *cs_desc =
- snd_usb_find_clock_source(chip->ctrl_intf, source_id);
-
- if (!cs_desc)
- return false;
+ cs_desc = snd_usb_find_clock_source(chip, source_id, fmt->protocol);
+ if (!cs_desc)
+ return false;
+ if (fmt->protocol == UAC_VERSION_2) {
/*
* Assume the clock is valid if clock source supports only one
* single sample rate, the terminal is connected directly to it
@@ -175,8 +169,8 @@ static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip,
* reports that clock is invalid.
*/
if (fmt->nr_rates == 1 &&
- (fmt->clock & 0xff) == cs_desc->bClockID &&
- (cs_desc->bmAttributes & 0x3) !=
+ (fmt->clock & 0xff) == cs_desc->v2.bClockID &&
+ (cs_desc->v2.bmAttributes & 0x3) !=
UAC_CLOCK_SOURCE_TYPE_EXT)
return true;
}
@@ -222,22 +216,16 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
unsigned char data;
struct usb_device *dev = chip->dev;
u32 bmControls;
+ union uac23_clock_source_desc *cs_desc;
- if (fmt->protocol == UAC_VERSION_3) {
- struct uac3_clock_source_descriptor *cs_desc =
- snd_usb_find_clock_source_v3(chip->ctrl_intf, source_id);
-
- if (!cs_desc)
- return false;
- bmControls = le32_to_cpu(cs_desc->bmControls);
- } else { /* UAC_VERSION_1/2 */
- struct uac_clock_source_descriptor *cs_desc =
- snd_usb_find_clock_source(chip->ctrl_intf, source_id);
+ cs_desc = snd_usb_find_clock_source(chip, source_id, fmt->protocol);
+ if (!cs_desc)
+ return false;
- if (!cs_desc)
- return false;
- bmControls = cs_desc->bmControls;
- }
+ if (fmt->protocol == UAC_VERSION_3)
+ bmControls = le32_to_cpu(cs_desc->v3.bmControls);
+ else
+ bmControls = cs_desc->v2.bmControls;
/* If a clock source can't tell us whether it's valid, we assume it is */
if (!uac_v2v3_control_is_readable(bmControls,
@@ -267,9 +255,12 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
const struct audioformat *fmt, int entity_id,
unsigned long *visited, bool validate)
{
- struct uac_clock_source_descriptor *source;
- struct uac_clock_selector_descriptor *selector;
- struct uac_clock_multiplier_descriptor *multiplier;
+ union uac23_clock_source_desc *source;
+ union uac23_clock_selector_desc *selector;
+ union uac23_clock_multiplier_desc *multiplier;
+ int ret, i, cur, err, pins, clock_id;
+ const u8 *sources;
+ int proto = fmt->protocol;
entity_id &= 0xff;
@@ -281,9 +272,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
}
/* first, see if the ID we're looking for is a clock source already */
- source = snd_usb_find_clock_source(chip->ctrl_intf, entity_id);
+ source = snd_usb_find_clock_source(chip, entity_id, proto);
if (source) {
- entity_id = source->bClockID;
+ entity_id = GET_VAL(source, proto, bClockID);
if (validate && !uac_clock_source_is_valid(chip, fmt,
entity_id)) {
usb_audio_err(chip,
@@ -294,37 +285,51 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
return entity_id;
}
- selector = snd_usb_find_clock_selector(chip->ctrl_intf, entity_id);
+ selector = snd_usb_find_clock_selector(chip, entity_id, proto);
if (selector) {
- int ret, i, cur, err;
+ pins = GET_VAL(selector, proto, bNrInPins);
+ clock_id = GET_VAL(selector, proto, bClockID);
+ sources = GET_VAL(selector, proto, baCSourceID);
+ cur = 0;
- if (selector->bNrInPins == 1) {
+ if (pins == 1) {
ret = 1;
goto find_source;
}
/* the entity ID we are looking for is a selector.
* find out what it currently selects */
- ret = uac_clock_selector_get_val(chip, selector->bClockID);
- if (ret < 0)
- return ret;
+ ret = uac_clock_selector_get_val(chip, clock_id);
+ if (ret < 0) {
+ if (!chip->autoclock)
+ return ret;
+ goto find_others;
+ }
/* Selector values are one-based */
- if (ret > selector->bNrInPins || ret < 1) {
+ if (ret > pins || ret < 1) {
usb_audio_err(chip,
"%s(): selector reported illegal value, id %d, ret %d\n",
- __func__, selector->bClockID, ret);
+ __func__, clock_id, ret);
- return -EINVAL;
+ if (!chip->autoclock)
+ return -EINVAL;
+ goto find_others;
}
find_source:
cur = ret;
ret = __uac_clock_find_source(chip, fmt,
- selector->baCSourceID[ret - 1],
+ sources[ret - 1],
visited, validate);
if (ret > 0) {
+ /*
+ * For Samsung USBC Headset (AKG), setting clock selector again
+ * will result in incorrect default clock setting problems
+ */
+ if (chip->usb_id == USB_ID(0x04e8, 0xa051))
+ return ret;
err = uac_clock_selector_set_val(chip, entity_id, cur);
if (err < 0)
return err;
@@ -333,13 +338,14 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
if (!validate || ret > 0 || !chip->autoclock)
return ret;
+ find_others:
/* The current clock source is invalid, try others. */
- for (i = 1; i <= selector->bNrInPins; i++) {
+ for (i = 1; i <= pins; i++) {
if (i == cur)
continue;
ret = __uac_clock_find_source(chip, fmt,
- selector->baCSourceID[i - 1],
+ sources[i - 1],
visited, true);
if (ret < 0)
continue;
@@ -358,116 +364,15 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
}
/* FIXME: multipliers only act as pass-thru element for now */
- multiplier = snd_usb_find_clock_multiplier(chip->ctrl_intf, entity_id);
+ multiplier = snd_usb_find_clock_multiplier(chip, entity_id, proto);
if (multiplier)
return __uac_clock_find_source(chip, fmt,
- multiplier->bCSourceID,
+ GET_VAL(multiplier, proto, bCSourceID),
visited, validate);
return -EINVAL;
}
-static int __uac3_clock_find_source(struct snd_usb_audio *chip,
- const struct audioformat *fmt, int entity_id,
- unsigned long *visited, bool validate)
-{
- struct uac3_clock_source_descriptor *source;
- struct uac3_clock_selector_descriptor *selector;
- struct uac3_clock_multiplier_descriptor *multiplier;
-
- entity_id &= 0xff;
-
- if (test_and_set_bit(entity_id, visited)) {
- usb_audio_warn(chip,
- "%s(): recursive clock topology detected, id %d.\n",
- __func__, entity_id);
- return -EINVAL;
- }
-
- /* first, see if the ID we're looking for is a clock source already */
- source = snd_usb_find_clock_source_v3(chip->ctrl_intf, entity_id);
- if (source) {
- entity_id = source->bClockID;
- if (validate && !uac_clock_source_is_valid(chip, fmt,
- entity_id)) {
- usb_audio_err(chip,
- "clock source %d is not valid, cannot use\n",
- entity_id);
- return -ENXIO;
- }
- return entity_id;
- }
-
- selector = snd_usb_find_clock_selector_v3(chip->ctrl_intf, entity_id);
- if (selector) {
- int ret, i, cur, err;
-
- /* the entity ID we are looking for is a selector.
- * find out what it currently selects */
- ret = uac_clock_selector_get_val(chip, selector->bClockID);
- if (ret < 0)
- return ret;
-
- /* Selector values are one-based */
-
- if (ret > selector->bNrInPins || ret < 1) {
- usb_audio_err(chip,
- "%s(): selector reported illegal value, id %d, ret %d\n",
- __func__, selector->bClockID, ret);
-
- return -EINVAL;
- }
-
- cur = ret;
- ret = __uac3_clock_find_source(chip, fmt,
- selector->baCSourceID[ret - 1],
- visited, validate);
- if (ret > 0) {
- err = uac_clock_selector_set_val(chip, entity_id, cur);
- if (err < 0)
- return err;
- }
-
- if (!validate || ret > 0 || !chip->autoclock)
- return ret;
-
- /* The current clock source is invalid, try others. */
- for (i = 1; i <= selector->bNrInPins; i++) {
- int err;
-
- if (i == cur)
- continue;
-
- ret = __uac3_clock_find_source(chip, fmt,
- selector->baCSourceID[i - 1],
- visited, true);
- if (ret < 0)
- continue;
-
- err = uac_clock_selector_set_val(chip, entity_id, i);
- if (err < 0)
- continue;
-
- usb_audio_info(chip,
- "found and selected valid clock source %d\n",
- ret);
- return ret;
- }
-
- return -ENXIO;
- }
-
- /* FIXME: multipliers only act as pass-thru element for now */
- multiplier = snd_usb_find_clock_multiplier_v3(chip->ctrl_intf,
- entity_id);
- if (multiplier)
- return __uac3_clock_find_source(chip, fmt,
- multiplier->bCSourceID,
- visited, validate);
-
- return -EINVAL;
-}
-
/*
* For all kinds of sample rate settings and other device queries,
* the clock source (end-leaf) must be used. However, clock selectors,
@@ -487,10 +392,8 @@ int snd_usb_clock_find_source(struct snd_usb_audio *chip,
switch (fmt->protocol) {
case UAC_VERSION_2:
- return __uac_clock_find_source(chip, fmt, fmt->clock, visited,
- validate);
case UAC_VERSION_3:
- return __uac3_clock_find_source(chip, fmt, fmt->clock, visited,
+ return __uac_clock_find_source(chip, fmt, fmt->clock, visited,
validate);
default:
return -EINVAL;
@@ -593,18 +496,13 @@ int snd_usb_set_sample_rate_v2v3(struct snd_usb_audio *chip,
u32 bmControls;
__le32 data;
int err;
+ union uac23_clock_source_desc *cs_desc;
- if (fmt->protocol == UAC_VERSION_3) {
- struct uac3_clock_source_descriptor *cs_desc;
-
- cs_desc = snd_usb_find_clock_source_v3(chip->ctrl_intf, clock);
- bmControls = le32_to_cpu(cs_desc->bmControls);
- } else {
- struct uac_clock_source_descriptor *cs_desc;
-
- cs_desc = snd_usb_find_clock_source(chip->ctrl_intf, clock);
- bmControls = cs_desc->bmControls;
- }
+ cs_desc = snd_usb_find_clock_source(chip, clock, fmt->protocol);
+ if (fmt->protocol == UAC_VERSION_3)
+ bmControls = le32_to_cpu(cs_desc->v3.bmControls);
+ else
+ bmControls = cs_desc->v2.bmControls;
writeable = uac_v2v3_control_is_writeable(bmControls,
UAC2_CS_CONTROL_SAM_FREQ);
@@ -642,6 +540,13 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip,
* rate.
*/
clock = snd_usb_clock_find_source(chip, fmt, false);
+
+ /* Denon DN-X1600 hardcoded
+ * Sample rate seems to be set on the hardware itself
+ */
+ if (chip->usb_id == USB_ID(0x154e, 0x500e))
+ return 0;
+
if (clock < 0)
return clock;
}
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 014c43862826..4f856771216b 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -275,6 +275,7 @@ static void prepare_silent_urb(struct snd_usb_endpoint *ep,
urb->number_of_packets = ctx->packets;
urb->transfer_buffer_length = offs * ep->stride + ctx->packets * extra;
+ ctx->queued = 0;
}
/*
@@ -644,7 +645,7 @@ static bool endpoint_compatible(struct snd_usb_endpoint *ep,
}
/*
- * Check whether the given fp and hw params are compatbile with the current
+ * Check whether the given fp and hw params are compatible with the current
* setup of the target EP for implicit feedback sync
*/
bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
@@ -1244,7 +1245,7 @@ static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
*
* This function sets up the EP to be fully usable state.
* It's called either from hw_params or prepare callback.
- * The function checks need_setup flag, and perfoms nothing unless needed,
+ * The function checks need_setup flag, and performs nothing unless needed,
* so it's safe to call this multiple times.
*
* This returns zero if unchanged, 1 if the configuration has changed,
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index eea4ca49876d..a668f675b52b 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -19,7 +19,6 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
struct snd_usb_endpoint *ep);
int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
struct snd_usb_endpoint *ep);
-void snd_usb_endpoint_suspend(struct snd_usb_endpoint *ep);
bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
struct snd_usb_endpoint *ep,
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 2287f8c65315..eb216fef4ba7 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -223,9 +223,11 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
continue;
/* C-Media CM6501 mislabels its 96 kHz altsetting */
/* Terratec Aureon 7.1 USB C-Media 6206, too */
+ /* Ozone Z90 USB C-Media, too */
if (rate == 48000 && nr_rates == 1 &&
(chip->usb_id == USB_ID(0x0d8c, 0x0201) ||
chip->usb_id == USB_ID(0x0d8c, 0x0102) ||
+ chip->usb_id == USB_ID(0x0d8c, 0x0078) ||
chip->usb_id == USB_ID(0x0ccd, 0x00b1)) &&
fp->altsetting == 5 && fp->maxpacksize == 392)
rate = 96000;
diff --git a/sound/usb/media.c b/sound/usb/media.c
index 812017eacbcf..840f42cb9272 100644
--- a/sound/usb/media.c
+++ b/sound/usb/media.c
@@ -285,7 +285,7 @@ snd_mixer_init:
ret);
if (!media_devnode_is_registered(mdev->devnode)) {
- /* dont'register if snd_media_mixer_init() failed */
+ /* don't register if snd_media_mixer_init() failed */
if (ret)
goto create_fail;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 428d581f988f..9b713b4a5ec4 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1816,6 +1816,15 @@ static void get_connector_control_name(struct usb_mixer_interface *mixer,
strlcat(name, " - Output Jack", name_size);
}
+/* get connector value to "wake up" the USB audio */
+static int connector_mixer_resume(struct usb_mixer_elem_list *list)
+{
+ struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
+
+ get_connector_value(cval, NULL, NULL);
+ return 0;
+}
+
/* Build a mixer control for a UAC connector control (jack-detect) */
static void build_connector_control(struct usb_mixer_interface *mixer,
const struct usbmix_name_map *imap,
@@ -1833,6 +1842,10 @@ static void build_connector_control(struct usb_mixer_interface *mixer,
if (!cval)
return;
snd_usb_mixer_elem_init_std(&cval->head, mixer, term->id);
+
+ /* set up a specific resume callback */
+ cval->head.resume = connector_mixer_resume;
+
/*
* UAC2: The first byte from reading the UAC2_TE_CONNECTOR control returns the
* number of channels connected.
@@ -3294,8 +3307,17 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
struct usb_mixer_elem_list *list)
{
struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
- static const char * const val_types[] = {"BOOLEAN", "INV_BOOLEAN",
- "S8", "U8", "S16", "U16"};
+ static const char * const val_types[] = {
+ [USB_MIXER_BOOLEAN] = "BOOLEAN",
+ [USB_MIXER_INV_BOOLEAN] = "INV_BOOLEAN",
+ [USB_MIXER_S8] = "S8",
+ [USB_MIXER_U8] = "U8",
+ [USB_MIXER_S16] = "S16",
+ [USB_MIXER_U16] = "U16",
+ [USB_MIXER_S32] = "S32",
+ [USB_MIXER_U32] = "U32",
+ [USB_MIXER_BESPOKEN] = "BESPOKEN",
+ };
snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, "
"channels=%i, type=\"%s\"\n", cval->head.id,
cval->control, cval->cmask, cval->channels,
@@ -3605,6 +3627,9 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
int c, err, idx;
+ if (cval->val_type == USB_MIXER_BESPOKEN)
+ return 0;
+
if (cval->cmask) {
idx = 0;
for (c = 0; c < MAX_CHANNELS; c++) {
@@ -3630,23 +3655,15 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
return 0;
}
-static int default_mixer_resume(struct usb_mixer_elem_list *list)
-{
- struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
-
- /* get connector value to "wake up" the USB audio */
- if (cval->val_type == USB_MIXER_BOOLEAN && cval->channels == 1)
- get_connector_value(cval, NULL, NULL);
-
- return 0;
-}
-
static int default_mixer_reset_resume(struct usb_mixer_elem_list *list)
{
- int err = default_mixer_resume(list);
+ int err;
- if (err < 0)
- return err;
+ if (list->resume) {
+ err = list->resume(list);
+ if (err < 0)
+ return err;
+ }
return restore_mixer_value(list);
}
@@ -3685,7 +3702,7 @@ void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
list->id = unitid;
list->dump = snd_usb_mixer_dump_cval;
#ifdef CONFIG_PM
- list->resume = default_mixer_resume;
+ list->resume = NULL;
list->reset_resume = default_mixer_reset_resume;
#endif
}
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index e5a01f17bf3c..ea41e7a1f7bf 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -55,6 +55,7 @@ enum {
USB_MIXER_U16,
USB_MIXER_S32,
USB_MIXER_U32,
+ USB_MIXER_BESPOKEN, /* non-standard type */
};
typedef void (*usb_mixer_elem_dump_func_t)(struct snd_info_buffer *buffer,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 37ad77524c0b..0a3cb8fd7d00 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -3060,6 +3060,12 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
case USB_ID(0x1235, 0x8203): /* Focusrite Scarlett 6i6 2nd Gen */
case USB_ID(0x1235, 0x8204): /* Focusrite Scarlett 18i8 2nd Gen */
case USB_ID(0x1235, 0x8201): /* Focusrite Scarlett 18i20 2nd Gen */
+ case USB_ID(0x1235, 0x8211): /* Focusrite Scarlett Solo 3rd Gen */
+ case USB_ID(0x1235, 0x8210): /* Focusrite Scarlett 2i2 3rd Gen */
+ case USB_ID(0x1235, 0x8212): /* Focusrite Scarlett 4i4 3rd Gen */
+ case USB_ID(0x1235, 0x8213): /* Focusrite Scarlett 8i6 3rd Gen */
+ case USB_ID(0x1235, 0x8214): /* Focusrite Scarlett 18i8 3rd Gen */
+ case USB_ID(0x1235, 0x8215): /* Focusrite Scarlett 18i20 3rd Gen */
err = snd_scarlett_gen2_init(mixer);
break;
diff --git a/sound/usb/mixer_s1810c.c b/sound/usb/mixer_s1810c.c
index c53a9773f310..0255089c9efb 100644
--- a/sound/usb/mixer_s1810c.c
+++ b/sound/usb/mixer_s1810c.c
@@ -163,7 +163,7 @@ snd_s1810c_send_ctl_packet(struct usb_device *dev, u32 a,
}
/*
- * When opening Universal Control the program periodicaly
+ * When opening Universal Control the program periodically
* sends and receives state packets for syncinc state between
* the device and the host.
*
diff --git a/sound/usb/mixer_scarlett.c b/sound/usb/mixer_scarlett.c
index 691b95466d0f..0d6e4f15bf77 100644
--- a/sound/usb/mixer_scarlett.c
+++ b/sound/usb/mixer_scarlett.c
@@ -21,7 +21,7 @@
* Auto-detection via UAC2 is not feasible to properly discover the vast
* majority of features. It's related to both Linux/ALSA's UAC2 as well as
* Focusrite's implementation of it. Eventually quirks may be sufficient but
- * right now it's a major headache to work arount these things.
+ * right now it's a major headache to work around these things.
*
* NB. Neither the OSX nor the win driver provided by Focusrite performs
* discovery, they seem to operate the same as this driver.
diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
index 4caf379d5b99..3d5848d5481b 100644
--- a/sound/usb/mixer_scarlett_gen2.c
+++ b/sound/usb/mixer_scarlett_gen2.c
@@ -1,8 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Focusrite Scarlett 6i6/18i8/18i20 Gen 2 Driver for ALSA
+ * Focusrite Scarlett Gen 2/3 Driver for ALSA
*
- * Copyright (c) 2018-2019 by Geoffrey D. Bennett <g at b4.vu>
+ * Supported models:
+ * - 6i6/18i8/18i20 Gen 2
+ * - Solo/2i2/4i4/8i6/18i8/18i20 Gen 3
+ *
+ * Copyright (c) 2018-2021 by Geoffrey D. Bennett <g at b4.vu>
+ * Copyright (c) 2020-2021 by Vladimir Sadovnikov <sadko4u@gmail.com>
*
* Based on the Scarlett (Gen 1) Driver for ALSA:
*
@@ -19,10 +24,6 @@
* David Henningsson <david.henningsson at canonical.com>
*/
-/* Mixer Interface for the Focusrite Scarlett 6i6/18i8/18i20 Gen 2 audio
- * interface. Based on the Gen 1 driver and rewritten.
- */
-
/* The protocol was reverse engineered by looking at the communication
* between Focusrite Control 2.3.4 and the Focusrite(R) Scarlett 18i20
* (firmware 1083) using usbmon in July-August 2018.
@@ -32,12 +33,33 @@
* Scarlett 6i6 support added in June 2019 (thanks to Martin Wittmann
* for providing usbmon output and testing).
*
- * This ALSA mixer gives access to:
+ * Scarlett 4i4/8i6 Gen 3 support added in May 2020 (thanks to Laurent
+ * Debricon for donating a 4i4 and to Fredrik Unger for providing 8i6
+ * usbmon output and testing).
+ *
+ * Scarlett 18i8/18i20 Gen 3 support added in June 2020 (thanks to
+ * Darren Jaeckel, Alex Sedlack, and Clovis Lunel for providing usbmon
+ * output, protocol traces and testing).
+ *
+ * Support for loading mixer volume and mux configuration from the
+ * interface during driver initialisation added in May 2021 (thanks to
+ * Vladimir Sadovnikov for figuring out how).
+ *
+ * Support for Solo/2i2 Gen 3 added in May 2021 (thanks to Alexander
+ * Vorona for 2i2 protocol traces).
+ *
+ * Support for phantom power, direct monitoring, speaker switching,
+ * and talkback added in May-June 2021.
+ *
+ * This ALSA mixer gives access to (model-dependent):
* - input, output, mixer-matrix muxes
- * - 18x10 mixer-matrix gain stages
- * - gain/volume controls
+ * - mixer-matrix gain stages
+ * - gain/volume/mute controls
* - level meters
- * - line/inst level and pad controls
+ * - line/inst level, pad, and air controls
+ * - phantom power, direct monitor, speaker switching, and talkback
+ * controls
+ * - disable/enable MSD mode
*
* <ditaa>
* /--------------\ 18chn 20chn /--------------\
@@ -90,6 +112,14 @@
* \--------------/
* </ditaa>
*
+ * Gen 3 devices have a Mass Storage Device (MSD) mode where a small
+ * disk with registration and driver download information is presented
+ * to the host. To access the full functionality of the device without
+ * proprietary software, MSD mode can be disabled by:
+ * - holding down the 48V button for five seconds while powering on
+ * the device, or
+ * - using this driver and alsamixer to change the "MSD Mode" setting
+ * to Off and power-cycling the device
*/
#include <linux/slab.h>
@@ -108,6 +138,9 @@
/* device_setup value to enable */
#define SCARLETT2_ENABLE 0x01
+/* device_setup value to allow turning MSD mode back on */
+#define SCARLETT2_MSD_ENABLE 0x02
+
/* some gui mixers can't handle negative ctl values */
#define SCARLETT2_VOLUME_BIAS 127
@@ -117,11 +150,12 @@
#define SCARLETT2_MIXER_MAX_DB 6
#define SCARLETT2_MIXER_MAX_VALUE \
((SCARLETT2_MIXER_MAX_DB - SCARLETT2_MIXER_MIN_DB) * 2)
+#define SCARLETT2_MIXER_VALUE_COUNT (SCARLETT2_MIXER_MAX_VALUE + 1)
/* map from (dB + 80) * 2 to mixer value
* for dB in 0 .. 172: int(8192 * pow(10, ((dB - 160) / 2 / 20)))
*/
-static const u16 scarlett2_mixer_values[173] = {
+static const u16 scarlett2_mixer_values[SCARLETT2_MIXER_VALUE_COUNT] = {
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 12, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
@@ -143,23 +177,23 @@ static const u16 scarlett2_mixer_values[173] = {
/* Maximum number of level and pad switches */
#define SCARLETT2_LEVEL_SWITCH_MAX 2
-#define SCARLETT2_PAD_SWITCH_MAX 4
+#define SCARLETT2_PAD_SWITCH_MAX 8
+#define SCARLETT2_AIR_SWITCH_MAX 8
+#define SCARLETT2_PHANTOM_SWITCH_MAX 2
/* Maximum number of inputs to the mixer */
-#define SCARLETT2_INPUT_MIX_MAX 18
+#define SCARLETT2_INPUT_MIX_MAX 25
/* Maximum number of outputs from the mixer */
-#define SCARLETT2_OUTPUT_MIX_MAX 10
+#define SCARLETT2_OUTPUT_MIX_MAX 12
/* Maximum size of the data in the USB mux assignment message:
- * 18 inputs, 20 outputs, 18 matrix inputs, 8 spare
+ * 20 inputs, 20 outputs, 25 matrix inputs, 12 spare
*/
-#define SCARLETT2_MUX_MAX 64
+#define SCARLETT2_MUX_MAX 77
-/* Number of meters:
- * 18 inputs, 20 outputs, 18 matrix inputs
- */
-#define SCARLETT2_NUM_METERS 56
+/* Maximum number of meters (sum of output port counts) */
+#define SCARLETT2_MAX_METERS 65
/* Hardware port types:
* - None (no input to mux)
@@ -170,74 +204,219 @@ static const u16 scarlett2_mixer_values[173] = {
* - PCM I/O
*/
enum {
- SCARLETT2_PORT_TYPE_NONE = 0,
+ SCARLETT2_PORT_TYPE_NONE = 0,
SCARLETT2_PORT_TYPE_ANALOGUE = 1,
- SCARLETT2_PORT_TYPE_SPDIF = 2,
- SCARLETT2_PORT_TYPE_ADAT = 3,
- SCARLETT2_PORT_TYPE_MIX = 4,
- SCARLETT2_PORT_TYPE_PCM = 5,
- SCARLETT2_PORT_TYPE_COUNT = 6,
+ SCARLETT2_PORT_TYPE_SPDIF = 2,
+ SCARLETT2_PORT_TYPE_ADAT = 3,
+ SCARLETT2_PORT_TYPE_MIX = 4,
+ SCARLETT2_PORT_TYPE_PCM = 5,
+ SCARLETT2_PORT_TYPE_COUNT = 6,
};
-/* Count of total I/O and number available at each sample rate */
+/* I/O count of each port type kept in struct scarlett2_ports */
enum {
- SCARLETT2_PORT_IN = 0,
- SCARLETT2_PORT_OUT = 1,
- SCARLETT2_PORT_OUT_44 = 2,
- SCARLETT2_PORT_OUT_88 = 3,
- SCARLETT2_PORT_OUT_176 = 4,
- SCARLETT2_PORT_DIRECTIONS = 5,
+ SCARLETT2_PORT_IN = 0,
+ SCARLETT2_PORT_OUT = 1,
+ SCARLETT2_PORT_DIRNS = 2,
};
-/* Hardware buttons on the 18i20 */
-#define SCARLETT2_BUTTON_MAX 2
+/* Dim/Mute buttons on the 18i20 */
+enum {
+ SCARLETT2_BUTTON_MUTE = 0,
+ SCARLETT2_BUTTON_DIM = 1,
+ SCARLETT2_DIM_MUTE_COUNT = 2,
+};
-static const char *const scarlett2_button_names[SCARLETT2_BUTTON_MAX] = {
- "Mute", "Dim"
+static const char *const scarlett2_dim_mute_names[SCARLETT2_DIM_MUTE_COUNT] = {
+ "Mute Playback Switch", "Dim Playback Switch"
};
/* Description of each hardware port type:
- * - id: hardware ID for this port type
- * - num: number of sources/destinations of this port type
+ * - id: hardware ID of this port type
* - src_descr: printf format string for mux input selections
* - src_num_offset: added to channel number for the fprintf
* - dst_descr: printf format string for mixer controls
*/
-struct scarlett2_ports {
+struct scarlett2_port {
u16 id;
- int num[SCARLETT2_PORT_DIRECTIONS];
const char * const src_descr;
int src_num_offset;
const char * const dst_descr;
};
+static const struct scarlett2_port scarlett2_ports[SCARLETT2_PORT_TYPE_COUNT] = {
+ [SCARLETT2_PORT_TYPE_NONE] = {
+ .id = 0x000,
+ .src_descr = "Off"
+ },
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = {
+ .id = 0x080,
+ .src_descr = "Analogue %d",
+ .src_num_offset = 1,
+ .dst_descr = "Analogue Output %02d Playback"
+ },
+ [SCARLETT2_PORT_TYPE_SPDIF] = {
+ .id = 0x180,
+ .src_descr = "S/PDIF %d",
+ .src_num_offset = 1,
+ .dst_descr = "S/PDIF Output %d Playback"
+ },
+ [SCARLETT2_PORT_TYPE_ADAT] = {
+ .id = 0x200,
+ .src_descr = "ADAT %d",
+ .src_num_offset = 1,
+ .dst_descr = "ADAT Output %d Playback"
+ },
+ [SCARLETT2_PORT_TYPE_MIX] = {
+ .id = 0x300,
+ .src_descr = "Mix %c",
+ .src_num_offset = 'A',
+ .dst_descr = "Mixer Input %02d Capture"
+ },
+ [SCARLETT2_PORT_TYPE_PCM] = {
+ .id = 0x600,
+ .src_descr = "PCM %d",
+ .src_num_offset = 1,
+ .dst_descr = "PCM %02d Capture"
+ },
+};
+
+/* Number of mux tables: one for each band of sample rates
+ * (44.1/48kHz, 88.2/96kHz, and 176.4/176kHz)
+ */
+#define SCARLETT2_MUX_TABLES 3
+
+/* Maximum number of entries in a mux table */
+#define SCARLETT2_MAX_MUX_ENTRIES 10
+
+/* One entry within mux_assignment defines the port type and range of
+ * ports to add to the set_mux message. The end of the list is marked
+ * with count == 0.
+ */
+struct scarlett2_mux_entry {
+ u8 port_type;
+ u8 start;
+ u8 count;
+};
+
struct scarlett2_device_info {
- u8 line_out_hw_vol; /* line out hw volume is sw controlled */
- u8 button_count; /* number of buttons */
- u8 level_input_count; /* inputs with level selectable */
- u8 pad_input_count; /* inputs with pad selectable */
+ u32 usb_id; /* USB device identifier */
+
+ /* Gen 3 devices have an internal MSD mode switch that needs
+ * to be disabled in order to access the full functionality of
+ * the device.
+ */
+ u8 has_msd_mode;
+
+ /* Gen 3 devices without a mixer have a different
+ * configuration set
+ */
+ u8 has_mixer;
+
+ /* line out hw volume is sw controlled */
+ u8 line_out_hw_vol;
+
+ /* support for main/alt speaker switching */
+ u8 has_speaker_switching;
+
+ /* support for talkback microphone */
+ u8 has_talkback;
+
+ /* the number of analogue inputs with a software switchable
+ * level control that can be set to line or instrument
+ */
+ u8 level_input_count;
+
+ /* the first input with a level control (0-based) */
+ u8 level_input_first;
+
+ /* the number of analogue inputs with a software switchable
+ * 10dB pad control
+ */
+ u8 pad_input_count;
+
+ /* the number of analogue inputs with a software switchable
+ * "air" control
+ */
+ u8 air_input_count;
+
+ /* the number of phantom (48V) software switchable controls */
+ u8 phantom_count;
+
+ /* the number of inputs each phantom switch controls */
+ u8 inputs_per_phantom;
+
+ /* the number of direct monitor options
+ * (0 = none, 1 = mono only, 2 = mono/stereo)
+ */
+ u8 direct_monitor;
+
+ /* remap analogue outputs; 18i8 Gen 3 has "line 3/4" connected
+ * internally to the analogue 7/8 outputs
+ */
+ u8 line_out_remap_enable;
+ u8 line_out_remap[SCARLETT2_ANALOGUE_MAX];
+
+ /* additional description for the line out volume controls */
const char * const line_out_descrs[SCARLETT2_ANALOGUE_MAX];
- struct scarlett2_ports ports[SCARLETT2_PORT_TYPE_COUNT];
+
+ /* number of sources/destinations of each port type */
+ const int port_count[SCARLETT2_PORT_TYPE_COUNT][SCARLETT2_PORT_DIRNS];
+
+ /* layout/order of the entries in the set_mux message */
+ struct scarlett2_mux_entry mux_assignment[SCARLETT2_MUX_TABLES]
+ [SCARLETT2_MAX_MUX_ENTRIES];
};
-struct scarlett2_mixer_data {
+struct scarlett2_data {
struct usb_mixer_interface *mixer;
struct mutex usb_mutex; /* prevent sending concurrent USB requests */
struct mutex data_mutex; /* lock access to this data */
struct delayed_work work;
const struct scarlett2_device_info *info;
+ __u8 bInterfaceNumber;
+ __u8 bEndpointAddress;
+ __u16 wMaxPacketSize;
+ __u8 bInterval;
int num_mux_srcs;
+ int num_mux_dsts;
u16 scarlett2_seq;
+ u8 sync_updated;
u8 vol_updated;
+ u8 input_other_updated;
+ u8 monitor_other_updated;
+ u8 mux_updated;
+ u8 speaker_switching_switched;
+ u8 sync;
u8 master_vol;
u8 vol[SCARLETT2_ANALOGUE_MAX];
u8 vol_sw_hw_switch[SCARLETT2_ANALOGUE_MAX];
+ u8 mute_switch[SCARLETT2_ANALOGUE_MAX];
u8 level_switch[SCARLETT2_LEVEL_SWITCH_MAX];
u8 pad_switch[SCARLETT2_PAD_SWITCH_MAX];
- u8 buttons[SCARLETT2_BUTTON_MAX];
+ u8 dim_mute[SCARLETT2_DIM_MUTE_COUNT];
+ u8 air_switch[SCARLETT2_AIR_SWITCH_MAX];
+ u8 phantom_switch[SCARLETT2_PHANTOM_SWITCH_MAX];
+ u8 phantom_persistence;
+ u8 direct_monitor_switch;
+ u8 speaker_switching_switch;
+ u8 talkback_switch;
+ u8 talkback_map[SCARLETT2_OUTPUT_MIX_MAX];
+ u8 msd_switch;
+ struct snd_kcontrol *sync_ctl;
struct snd_kcontrol *master_vol_ctl;
struct snd_kcontrol *vol_ctls[SCARLETT2_ANALOGUE_MAX];
- struct snd_kcontrol *button_ctls[SCARLETT2_BUTTON_MAX];
+ struct snd_kcontrol *sw_hw_ctls[SCARLETT2_ANALOGUE_MAX];
+ struct snd_kcontrol *mute_ctls[SCARLETT2_ANALOGUE_MAX];
+ struct snd_kcontrol *dim_mute_ctls[SCARLETT2_DIM_MUTE_COUNT];
+ struct snd_kcontrol *level_ctls[SCARLETT2_LEVEL_SWITCH_MAX];
+ struct snd_kcontrol *pad_ctls[SCARLETT2_PAD_SWITCH_MAX];
+ struct snd_kcontrol *air_ctls[SCARLETT2_AIR_SWITCH_MAX];
+ struct snd_kcontrol *phantom_ctls[SCARLETT2_PHANTOM_SWITCH_MAX];
+ struct snd_kcontrol *mux_ctls[SCARLETT2_MUX_MAX];
+ struct snd_kcontrol *direct_monitor_ctl;
+ struct snd_kcontrol *speaker_switching_ctl;
+ struct snd_kcontrol *talkback_ctl;
u8 mux[SCARLETT2_MUX_MAX];
u8 mix[SCARLETT2_INPUT_MIX_MAX * SCARLETT2_OUTPUT_MIX_MAX];
};
@@ -245,66 +424,56 @@ struct scarlett2_mixer_data {
/*** Model-specific data ***/
static const struct scarlett2_device_info s6i6_gen2_info = {
- /* The first two analogue inputs can be switched between line
- * and instrument levels.
- */
- .level_input_count = 2,
+ .usb_id = USB_ID(0x1235, 0x8203),
- /* The first two analogue inputs have an optional pad. */
+ .has_mixer = 1,
+ .level_input_count = 2,
.pad_input_count = 2,
.line_out_descrs = {
- "Monitor L",
- "Monitor R",
- "Headphones L",
- "Headphones R",
+ "Headphones 1 L",
+ "Headphones 1 R",
+ "Headphones 2 L",
+ "Headphones 2 R",
},
- .ports = {
- [SCARLETT2_PORT_TYPE_NONE] = {
- .id = 0x000,
- .num = { 1, 0, 8, 8, 8 },
- .src_descr = "Off",
- .src_num_offset = 0,
- },
- [SCARLETT2_PORT_TYPE_ANALOGUE] = {
- .id = 0x080,
- .num = { 4, 4, 4, 4, 4 },
- .src_descr = "Analogue %d",
- .src_num_offset = 1,
- .dst_descr = "Analogue Output %02d Playback"
- },
- [SCARLETT2_PORT_TYPE_SPDIF] = {
- .id = 0x180,
- .num = { 2, 2, 2, 2, 2 },
- .src_descr = "S/PDIF %d",
- .src_num_offset = 1,
- .dst_descr = "S/PDIF Output %d Playback"
- },
- [SCARLETT2_PORT_TYPE_MIX] = {
- .id = 0x300,
- .num = { 10, 18, 18, 18, 18 },
- .src_descr = "Mix %c",
- .src_num_offset = 65,
- .dst_descr = "Mixer Input %02d Capture"
- },
- [SCARLETT2_PORT_TYPE_PCM] = {
- .id = 0x600,
- .num = { 6, 6, 6, 6, 6 },
- .src_descr = "PCM %d",
- .src_num_offset = 1,
- .dst_descr = "PCM %02d Capture"
- },
+ .port_count = {
+ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 4, 4 },
+ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+ [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
+ [SCARLETT2_PORT_TYPE_PCM] = { 6, 6 },
},
+
+ .mux_assignment = { {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ } },
};
static const struct scarlett2_device_info s18i8_gen2_info = {
- /* The first two analogue inputs can be switched between line
- * and instrument levels.
- */
- .level_input_count = 2,
+ .usb_id = USB_ID(0x1235, 0x8204),
- /* The first four analogue inputs have an optional pad. */
+ .has_mixer = 1,
+ .level_input_count = 2,
.pad_input_count = 4,
.line_out_descrs = {
@@ -316,62 +485,44 @@ static const struct scarlett2_device_info s18i8_gen2_info = {
"Headphones 2 R",
},
- .ports = {
- [SCARLETT2_PORT_TYPE_NONE] = {
- .id = 0x000,
- .num = { 1, 0, 8, 8, 4 },
- .src_descr = "Off",
- .src_num_offset = 0,
- },
- [SCARLETT2_PORT_TYPE_ANALOGUE] = {
- .id = 0x080,
- .num = { 8, 6, 6, 6, 6 },
- .src_descr = "Analogue %d",
- .src_num_offset = 1,
- .dst_descr = "Analogue Output %02d Playback"
- },
- [SCARLETT2_PORT_TYPE_SPDIF] = {
- .id = 0x180,
- /* S/PDIF outputs aren't available at 192kHz
- * but are included in the USB mux I/O
- * assignment message anyway
- */
- .num = { 2, 2, 2, 2, 2 },
- .src_descr = "S/PDIF %d",
- .src_num_offset = 1,
- .dst_descr = "S/PDIF Output %d Playback"
- },
- [SCARLETT2_PORT_TYPE_ADAT] = {
- .id = 0x200,
- .num = { 8, 0, 0, 0, 0 },
- .src_descr = "ADAT %d",
- .src_num_offset = 1,
- },
- [SCARLETT2_PORT_TYPE_MIX] = {
- .id = 0x300,
- .num = { 10, 18, 18, 18, 18 },
- .src_descr = "Mix %c",
- .src_num_offset = 65,
- .dst_descr = "Mixer Input %02d Capture"
- },
- [SCARLETT2_PORT_TYPE_PCM] = {
- .id = 0x600,
- .num = { 20, 18, 18, 14, 10 },
- .src_descr = "PCM %d",
- .src_num_offset = 1,
- .dst_descr = "PCM %02d Capture"
- },
+ .port_count = {
+ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 6 },
+ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 0 },
+ [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
+ [SCARLETT2_PORT_TYPE_PCM] = { 8, 18 },
},
+
+ .mux_assignment = { {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 18 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 14 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 4 },
+ { 0, 0, 0 },
+ } },
};
static const struct scarlett2_device_info s18i20_gen2_info = {
- /* The analogue line outputs on the 18i20 can be switched
- * between software and hardware volume control
- */
- .line_out_hw_vol = 1,
+ .usb_id = USB_ID(0x1235, 0x8201),
- /* Mute and dim buttons */
- .button_count = 2,
+ .has_mixer = 1,
+ .line_out_hw_vol = 1,
.line_out_descrs = {
"Monitor L",
@@ -386,99 +537,364 @@ static const struct scarlett2_device_info s18i20_gen2_info = {
"Headphones 2 R",
},
- .ports = {
- [SCARLETT2_PORT_TYPE_NONE] = {
- .id = 0x000,
- .num = { 1, 0, 8, 8, 6 },
- .src_descr = "Off",
- .src_num_offset = 0,
- },
- [SCARLETT2_PORT_TYPE_ANALOGUE] = {
- .id = 0x080,
- .num = { 8, 10, 10, 10, 10 },
- .src_descr = "Analogue %d",
- .src_num_offset = 1,
- .dst_descr = "Analogue Output %02d Playback"
- },
- [SCARLETT2_PORT_TYPE_SPDIF] = {
- /* S/PDIF outputs aren't available at 192kHz
- * but are included in the USB mux I/O
- * assignment message anyway
- */
- .id = 0x180,
- .num = { 2, 2, 2, 2, 2 },
- .src_descr = "S/PDIF %d",
- .src_num_offset = 1,
- .dst_descr = "S/PDIF Output %d Playback"
- },
- [SCARLETT2_PORT_TYPE_ADAT] = {
- .id = 0x200,
- .num = { 8, 8, 8, 4, 0 },
- .src_descr = "ADAT %d",
- .src_num_offset = 1,
- .dst_descr = "ADAT Output %d Playback"
- },
- [SCARLETT2_PORT_TYPE_MIX] = {
- .id = 0x300,
- .num = { 10, 18, 18, 18, 18 },
- .src_descr = "Mix %c",
- .src_num_offset = 65,
- .dst_descr = "Mixer Input %02d Capture"
- },
- [SCARLETT2_PORT_TYPE_PCM] = {
- .id = 0x600,
- .num = { 20, 18, 18, 14, 10 },
- .src_descr = "PCM %d",
- .src_num_offset = 1,
- .dst_descr = "PCM %02d Capture"
- },
+ .port_count = {
+ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 10 },
+ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 8 },
+ [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
+ [SCARLETT2_PORT_TYPE_PCM] = { 20, 18 },
+ },
+
+ .mux_assignment = { {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 18 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_ADAT, 0, 8 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 14 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_ADAT, 0, 4 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 6 },
+ { 0, 0, 0 },
+ } },
+};
+
+static const struct scarlett2_device_info solo_gen3_info = {
+ .usb_id = USB_ID(0x1235, 0x8211),
+
+ .has_msd_mode = 1,
+ .level_input_count = 1,
+ .level_input_first = 1,
+ .air_input_count = 1,
+ .phantom_count = 1,
+ .inputs_per_phantom = 1,
+ .direct_monitor = 1,
+};
+
+static const struct scarlett2_device_info s2i2_gen3_info = {
+ .usb_id = USB_ID(0x1235, 0x8210),
+
+ .has_msd_mode = 1,
+ .level_input_count = 2,
+ .air_input_count = 2,
+ .phantom_count = 1,
+ .inputs_per_phantom = 2,
+ .direct_monitor = 2,
+};
+
+static const struct scarlett2_device_info s4i4_gen3_info = {
+ .usb_id = USB_ID(0x1235, 0x8212),
+
+ .has_msd_mode = 1,
+ .has_mixer = 1,
+ .level_input_count = 2,
+ .pad_input_count = 2,
+ .air_input_count = 2,
+ .phantom_count = 1,
+ .inputs_per_phantom = 2,
+
+ .line_out_descrs = {
+ "Monitor L",
+ "Monitor R",
+ "Headphones L",
+ "Headphones R",
+ },
+
+ .port_count = {
+ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 4, 4 },
+ [SCARLETT2_PORT_TYPE_MIX] = { 6, 8 },
+ [SCARLETT2_PORT_TYPE_PCM] = { 4, 6 },
+ },
+
+ .mux_assignment = { {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 16 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 16 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 16 },
+ { 0, 0, 0 },
+ } },
+};
+
+static const struct scarlett2_device_info s8i6_gen3_info = {
+ .usb_id = USB_ID(0x1235, 0x8213),
+
+ .has_msd_mode = 1,
+ .has_mixer = 1,
+ .level_input_count = 2,
+ .pad_input_count = 2,
+ .air_input_count = 2,
+ .phantom_count = 1,
+ .inputs_per_phantom = 2,
+
+ .line_out_descrs = {
+ "Headphones 1 L",
+ "Headphones 1 R",
+ "Headphones 2 L",
+ "Headphones 2 R",
+ },
+
+ .port_count = {
+ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 6, 4 },
+ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+ [SCARLETT2_PORT_TYPE_MIX] = { 8, 8 },
+ [SCARLETT2_PORT_TYPE_PCM] = { 6, 10 },
+ },
+
+ .mux_assignment = { {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 18 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 18 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 18 },
+ { 0, 0, 0 },
+ } },
+};
+
+static const struct scarlett2_device_info s18i8_gen3_info = {
+ .usb_id = USB_ID(0x1235, 0x8214),
+
+ .has_msd_mode = 1,
+ .has_mixer = 1,
+ .line_out_hw_vol = 1,
+ .has_speaker_switching = 1,
+ .level_input_count = 2,
+ .pad_input_count = 4,
+ .air_input_count = 4,
+ .phantom_count = 2,
+ .inputs_per_phantom = 2,
+
+ .line_out_remap_enable = 1,
+ .line_out_remap = { 0, 1, 6, 7, 2, 3, 4, 5 },
+
+ .line_out_descrs = {
+ "Monitor L",
+ "Monitor R",
+ "Alt Monitor L",
+ "Alt Monitor R",
+ "Headphones 1 L",
+ "Headphones 1 R",
+ "Headphones 2 L",
+ "Headphones 2 R",
+ },
+
+ .port_count = {
+ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 8 },
+ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 0 },
+ [SCARLETT2_PORT_TYPE_MIX] = { 10, 20 },
+ [SCARLETT2_PORT_TYPE_PCM] = { 8, 20 },
},
+
+ .mux_assignment = { {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
+ { SCARLETT2_PORT_TYPE_PCM, 12, 8 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 2 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 6, 2 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 2, 4 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_PCM, 10, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 20 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 10 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
+ { SCARLETT2_PORT_TYPE_PCM, 12, 4 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 2 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 6, 2 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 2, 4 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_PCM, 10, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 20 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 10 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 2 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 6, 2 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 2, 4 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 20 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 10 },
+ { 0, 0, 0 },
+ } },
+};
+
+static const struct scarlett2_device_info s18i20_gen3_info = {
+ .usb_id = USB_ID(0x1235, 0x8215),
+
+ .has_msd_mode = 1,
+ .has_mixer = 1,
+ .line_out_hw_vol = 1,
+ .has_speaker_switching = 1,
+ .has_talkback = 1,
+ .level_input_count = 2,
+ .pad_input_count = 8,
+ .air_input_count = 8,
+ .phantom_count = 2,
+ .inputs_per_phantom = 4,
+
+ .line_out_descrs = {
+ "Monitor 1 L",
+ "Monitor 1 R",
+ "Monitor 2 L",
+ "Monitor 2 R",
+ NULL,
+ NULL,
+ "Headphones 1 L",
+ "Headphones 1 R",
+ "Headphones 2 L",
+ "Headphones 2 R",
+ },
+
+ .port_count = {
+ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 9, 10 },
+ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 8 },
+ [SCARLETT2_PORT_TYPE_MIX] = { 12, 25 },
+ [SCARLETT2_PORT_TYPE_PCM] = { 20, 20 },
+ },
+
+ .mux_assignment = { {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
+ { SCARLETT2_PORT_TYPE_PCM, 10, 10 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_ADAT, 0, 8 },
+ { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 25 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 12 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
+ { SCARLETT2_PORT_TYPE_PCM, 10, 8 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_ADAT, 0, 8 },
+ { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 25 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 10 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 24 },
+ { 0, 0, 0 },
+ } },
+};
+
+static const struct scarlett2_device_info *scarlett2_devices[] = {
+ /* Supported Gen 2 devices */
+ &s6i6_gen2_info,
+ &s18i8_gen2_info,
+ &s18i20_gen2_info,
+
+ /* Supported Gen 3 devices */
+ &solo_gen3_info,
+ &s2i2_gen3_info,
+ &s4i4_gen3_info,
+ &s8i6_gen3_info,
+ &s18i8_gen3_info,
+ &s18i20_gen3_info,
+
+ /* End of list */
+ NULL
};
/* get the starting port index number for a given port type/direction */
-static int scarlett2_get_port_start_num(const struct scarlett2_ports *ports,
- int direction, int port_type)
+static int scarlett2_get_port_start_num(
+ const int port_count[][SCARLETT2_PORT_DIRNS],
+ int direction, int port_type)
{
int i, num = 0;
for (i = 0; i < port_type; i++)
- num += ports[i].num[direction];
+ num += port_count[i][direction];
return num;
}
/*** USB Interactions ***/
-/* Vendor-Specific Interface, Endpoint, MaxPacketSize, Interval */
-#define SCARLETT2_USB_VENDOR_SPECIFIC_INTERFACE 5
-#define SCARLETT2_USB_INTERRUPT_ENDPOINT 4
-#define SCARLETT2_USB_INTERRUPT_MAX_DATA 64
-#define SCARLETT2_USB_INTERRUPT_INTERVAL 3
-
-/* Interrupt flags for volume and mute/dim button changes */
-#define SCARLETT2_USB_INTERRUPT_VOL_CHANGE 0x400000
-#define SCARLETT2_USB_INTERRUPT_BUTTON_CHANGE 0x200000
+/* Notifications from the interface */
+#define SCARLETT2_USB_NOTIFY_SYNC 0x00000008
+#define SCARLETT2_USB_NOTIFY_DIM_MUTE 0x00200000
+#define SCARLETT2_USB_NOTIFY_MONITOR 0x00400000
+#define SCARLETT2_USB_NOTIFY_INPUT_OTHER 0x00800000
+#define SCARLETT2_USB_NOTIFY_MONITOR_OTHER 0x01000000
/* Commands for sending/receiving requests/responses */
-#define SCARLETT2_USB_VENDOR_SPECIFIC_CMD_REQ 2
-#define SCARLETT2_USB_VENDOR_SPECIFIC_CMD_RESP 3
-
-#define SCARLETT2_USB_INIT_SEQ 0x00000000
-#define SCARLETT2_USB_GET_METER_LEVELS 0x00001001
-#define SCARLETT2_USB_SET_MIX 0x00002002
-#define SCARLETT2_USB_SET_MUX 0x00003002
-#define SCARLETT2_USB_GET_DATA 0x00800000
-#define SCARLETT2_USB_SET_DATA 0x00800001
-#define SCARLETT2_USB_DATA_CMD 0x00800002
+#define SCARLETT2_USB_CMD_INIT 0
+#define SCARLETT2_USB_CMD_REQ 2
+#define SCARLETT2_USB_CMD_RESP 3
+
+#define SCARLETT2_USB_INIT_1 0x00000000
+#define SCARLETT2_USB_INIT_2 0x00000002
+#define SCARLETT2_USB_GET_METER 0x00001001
+#define SCARLETT2_USB_GET_MIX 0x00002001
+#define SCARLETT2_USB_SET_MIX 0x00002002
+#define SCARLETT2_USB_GET_MUX 0x00003001
+#define SCARLETT2_USB_SET_MUX 0x00003002
+#define SCARLETT2_USB_GET_SYNC 0x00006004
+#define SCARLETT2_USB_GET_DATA 0x00800000
+#define SCARLETT2_USB_SET_DATA 0x00800001
+#define SCARLETT2_USB_DATA_CMD 0x00800002
+
#define SCARLETT2_USB_CONFIG_SAVE 6
#define SCARLETT2_USB_VOLUME_STATUS_OFFSET 0x31
#define SCARLETT2_USB_METER_LEVELS_GET_MAGIC 1
-/* volume status is read together (matches scarlett2_config_items[]) */
+/* volume status is read together (matches scarlett2_config_items[1]) */
struct scarlett2_usb_volume_status {
- /* mute & dim buttons */
- u8 buttons[SCARLETT2_BUTTON_MAX];
+ /* dim/mute buttons */
+ u8 dim_mute[SCARLETT2_DIM_MUTE_COUNT];
u8 pad1;
@@ -488,7 +904,8 @@ struct scarlett2_usb_volume_status {
/* actual volume of output inc. dim (-18dB) */
s16 hw_vol[SCARLETT2_ANALOGUE_MAX];
- u8 pad2[SCARLETT2_ANALOGUE_MAX];
+ /* internal mute buttons */
+ u8 mute_switch[SCARLETT2_ANALOGUE_MAX];
/* sw (0) or hw (1) controlled */
u8 sw_hw_switch[SCARLETT2_ANALOGUE_MAX];
@@ -501,16 +918,25 @@ struct scarlett2_usb_volume_status {
/* Configuration parameters that can be read and written */
enum {
- SCARLETT2_CONFIG_BUTTONS = 0,
+ SCARLETT2_CONFIG_DIM_MUTE = 0,
SCARLETT2_CONFIG_LINE_OUT_VOLUME = 1,
- SCARLETT2_CONFIG_SW_HW_SWITCH = 2,
- SCARLETT2_CONFIG_LEVEL_SWITCH = 3,
- SCARLETT2_CONFIG_PAD_SWITCH = 4,
- SCARLETT2_CONFIG_COUNT = 5
+ SCARLETT2_CONFIG_MUTE_SWITCH = 2,
+ SCARLETT2_CONFIG_SW_HW_SWITCH = 3,
+ SCARLETT2_CONFIG_LEVEL_SWITCH = 4,
+ SCARLETT2_CONFIG_PAD_SWITCH = 5,
+ SCARLETT2_CONFIG_MSD_SWITCH = 6,
+ SCARLETT2_CONFIG_AIR_SWITCH = 7,
+ SCARLETT2_CONFIG_PHANTOM_SWITCH = 8,
+ SCARLETT2_CONFIG_PHANTOM_PERSISTENCE = 9,
+ SCARLETT2_CONFIG_DIRECT_MONITOR = 10,
+ SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH = 11,
+ SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE = 12,
+ SCARLETT2_CONFIG_TALKBACK_MAP = 13,
+ SCARLETT2_CONFIG_COUNT = 14
};
/* Location, size, and activation command number for the configuration
- * parameters
+ * parameters. Size is in bits and may be 1, 8, or 16.
*/
struct scarlett2_config {
u8 offset;
@@ -518,43 +944,73 @@ struct scarlett2_config {
u8 activate;
};
+/* scarlett2_config_items[0] is for devices without a mixer
+ * scarlett2_config_items[1] is for devices with a mixer
+ */
static const struct scarlett2_config
- scarlett2_config_items[SCARLETT2_CONFIG_COUNT] = {
- /* Mute/Dim Buttons */
- {
- .offset = 0x31,
- .size = 1,
- .activate = 2
- },
+ scarlett2_config_items[2][SCARLETT2_CONFIG_COUNT] =
- /* Line Out Volume */
- {
- .offset = 0x34,
- .size = 2,
- .activate = 1
- },
+/* Devices without a mixer (Solo and 2i2 Gen 3) */
+{ {
+ [SCARLETT2_CONFIG_MSD_SWITCH] = {
+ .offset = 0x04, .size = 8, .activate = 6 },
- /* SW/HW Volume Switch */
- {
- .offset = 0x66,
- .size = 1,
- .activate = 3
- },
+ [SCARLETT2_CONFIG_PHANTOM_PERSISTENCE] = {
+ .offset = 0x05, .size = 8, .activate = 6 },
- /* Level Switch */
- {
- .offset = 0x7c,
- .size = 1,
- .activate = 7
- },
+ [SCARLETT2_CONFIG_PHANTOM_SWITCH] = {
+ .offset = 0x06, .size = 8, .activate = 3 },
- /* Pad Switch */
- {
- .offset = 0x84,
- .size = 1,
- .activate = 8
- }
-};
+ [SCARLETT2_CONFIG_DIRECT_MONITOR] = {
+ .offset = 0x07, .size = 8, .activate = 4 },
+
+ [SCARLETT2_CONFIG_LEVEL_SWITCH] = {
+ .offset = 0x08, .size = 1, .activate = 7 },
+
+ [SCARLETT2_CONFIG_AIR_SWITCH] = {
+ .offset = 0x09, .size = 1, .activate = 8 },
+
+/* Devices with a mixer (Gen 2 and all other Gen 3) */
+}, {
+ [SCARLETT2_CONFIG_DIM_MUTE] = {
+ .offset = 0x31, .size = 8, .activate = 2 },
+
+ [SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
+ .offset = 0x34, .size = 16, .activate = 1 },
+
+ [SCARLETT2_CONFIG_MUTE_SWITCH] = {
+ .offset = 0x5c, .size = 8, .activate = 1 },
+
+ [SCARLETT2_CONFIG_SW_HW_SWITCH] = {
+ .offset = 0x66, .size = 8, .activate = 3 },
+
+ [SCARLETT2_CONFIG_LEVEL_SWITCH] = {
+ .offset = 0x7c, .size = 8, .activate = 7 },
+
+ [SCARLETT2_CONFIG_PAD_SWITCH] = {
+ .offset = 0x84, .size = 8, .activate = 8 },
+
+ [SCARLETT2_CONFIG_AIR_SWITCH] = {
+ .offset = 0x8c, .size = 8, .activate = 8 },
+
+ [SCARLETT2_CONFIG_PHANTOM_SWITCH] = {
+ .offset = 0x9c, .size = 1, .activate = 8 },
+
+ [SCARLETT2_CONFIG_MSD_SWITCH] = {
+ .offset = 0x9d, .size = 8, .activate = 6 },
+
+ [SCARLETT2_CONFIG_PHANTOM_PERSISTENCE] = {
+ .offset = 0x9e, .size = 8, .activate = 6 },
+
+ [SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH] = {
+ .offset = 0x9f, .size = 1, .activate = 10 },
+
+ [SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE] = {
+ .offset = 0xa0, .size = 1, .activate = 10 },
+
+ [SCARLETT2_CONFIG_TALKBACK_MAP] = {
+ .offset = 0xb0, .size = 16, .activate = 10 },
+} };
/* proprietary request/response format */
struct scarlett2_usb_packet {
@@ -566,9 +1022,7 @@ struct scarlett2_usb_packet {
u8 data[];
};
-#define SCARLETT2_USB_PACKET_LEN (sizeof(struct scarlett2_usb_packet))
-
-static void scarlett2_fill_request_header(struct scarlett2_mixer_data *private,
+static void scarlett2_fill_request_header(struct scarlett2_data *private,
struct scarlett2_usb_packet *req,
u32 cmd, u16 req_size)
{
@@ -582,16 +1036,35 @@ static void scarlett2_fill_request_header(struct scarlett2_mixer_data *private,
req->pad = 0;
}
+static int scarlett2_usb_tx(struct usb_device *dev, int interface,
+ void *buf, u16 size)
+{
+ return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0),
+ SCARLETT2_USB_CMD_REQ,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+ 0, interface, buf, size);
+}
+
+static int scarlett2_usb_rx(struct usb_device *dev, int interface,
+ u32 usb_req, void *buf, u16 size)
+{
+ return snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+ usb_req,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+ 0, interface, buf, size);
+}
+
/* Send a proprietary format request to the Scarlett interface */
static int scarlett2_usb(
struct usb_mixer_interface *mixer, u32 cmd,
void *req_data, u16 req_size, void *resp_data, u16 resp_size)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
+ struct usb_device *dev = mixer->chip->dev;
u16 req_buf_size = sizeof(struct scarlett2_usb_packet) + req_size;
u16 resp_buf_size = sizeof(struct scarlett2_usb_packet) + resp_size;
- struct scarlett2_usb_packet *req = NULL, *resp = NULL;
- int err = 0;
+ struct scarlett2_usb_packet *req, *resp = NULL;
+ int err;
req = kmalloc(req_buf_size, GFP_KERNEL);
if (!req) {
@@ -614,19 +1087,13 @@ static int scarlett2_usb(
if (req_size)
memcpy(req->data, req_data, req_size);
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0),
- SCARLETT2_USB_VENDOR_SPECIFIC_CMD_REQ,
- USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
- 0,
- SCARLETT2_USB_VENDOR_SPECIFIC_INTERFACE,
- req,
- req_buf_size);
+ err = scarlett2_usb_tx(dev, private->bInterfaceNumber,
+ req, req_buf_size);
if (err != req_buf_size) {
usb_audio_err(
mixer->chip,
- "Scarlett Gen 2 USB request result cmd %x was %d\n",
+ "Scarlett Gen 2/3 USB request result cmd %x was %d\n",
cmd, err);
err = -EINVAL;
goto unlock;
@@ -634,34 +1101,34 @@ static int scarlett2_usb(
/* send a second message to get the response */
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_rcvctrlpipe(mixer->chip->dev, 0),
- SCARLETT2_USB_VENDOR_SPECIFIC_CMD_RESP,
- USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
- 0,
- SCARLETT2_USB_VENDOR_SPECIFIC_INTERFACE,
- resp,
- resp_buf_size);
+ err = scarlett2_usb_rx(dev, private->bInterfaceNumber,
+ SCARLETT2_USB_CMD_RESP,
+ resp, resp_buf_size);
/* validate the response */
if (err != resp_buf_size) {
usb_audio_err(
mixer->chip,
- "Scarlett Gen 2 USB response result cmd %x was %d\n",
- cmd, err);
+ "Scarlett Gen 2/3 USB response result cmd %x was %d "
+ "expected %d\n",
+ cmd, err, resp_buf_size);
err = -EINVAL;
goto unlock;
}
+ /* cmd/seq/size should match except when initialising
+ * seq sent = 1, response = 0
+ */
if (resp->cmd != req->cmd ||
- resp->seq != req->seq ||
+ (resp->seq != req->seq &&
+ (le16_to_cpu(req->seq) != 1 || resp->seq != 0)) ||
resp_size != le16_to_cpu(resp->size) ||
resp->error ||
resp->pad) {
usb_audio_err(
mixer->chip,
- "Scarlett Gen 2 USB invalid response; "
+ "Scarlett Gen 2/3 USB invalid response; "
"cmd tx/rx %d/%d seq %d/%d size %d/%d "
"error %d pad %d\n",
le32_to_cpu(req->cmd), le32_to_cpu(resp->cmd),
@@ -673,7 +1140,7 @@ static int scarlett2_usb(
goto unlock;
}
- if (resp_size > 0)
+ if (resp_data && resp_size > 0)
memcpy(resp_data, resp->data, resp_size);
unlock:
@@ -684,6 +1151,63 @@ error:
return err;
}
+/* Send a USB message to get data; result placed in *buf */
+static int scarlett2_usb_get(
+ struct usb_mixer_interface *mixer,
+ int offset, void *buf, int size)
+{
+ struct {
+ __le32 offset;
+ __le32 size;
+ } __packed req;
+
+ req.offset = cpu_to_le32(offset);
+ req.size = cpu_to_le32(size);
+ return scarlett2_usb(mixer, SCARLETT2_USB_GET_DATA,
+ &req, sizeof(req), buf, size);
+}
+
+/* Send a USB message to get configuration parameters; result placed in *buf */
+static int scarlett2_usb_get_config(
+ struct usb_mixer_interface *mixer,
+ int config_item_num, int count, void *buf)
+{
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const struct scarlett2_config *config_item =
+ &scarlett2_config_items[info->has_mixer][config_item_num];
+ int size, err, i;
+ u8 *buf_8;
+ u8 value;
+
+ /* For byte-sized parameters, retrieve directly into buf */
+ if (config_item->size >= 8) {
+ size = config_item->size / 8 * count;
+ err = scarlett2_usb_get(mixer, config_item->offset, buf, size);
+ if (err < 0)
+ return err;
+ if (size == 2) {
+ u16 *buf_16 = buf;
+
+ for (i = 0; i < count; i++, buf_16++)
+ *buf_16 = le16_to_cpu(*(__le16 *)buf_16);
+ }
+ return 0;
+ }
+
+ /* For bit-sized parameters, retrieve into value */
+ err = scarlett2_usb_get(mixer, config_item->offset, &value, 1);
+ if (err < 0)
+ return err;
+
+ /* then unpack from value into buf[] */
+ buf_8 = buf;
+ for (i = 0; i < 8 && i < count; i++, value >>= 1)
+ *buf_8++ = value & 1;
+
+ return 0;
+}
+
/* Send SCARLETT2_USB_DATA_CMD SCARLETT2_USB_CONFIG_SAVE */
static void scarlett2_config_save(struct usb_mixer_interface *mixer)
{
@@ -697,82 +1221,97 @@ static void scarlett2_config_save(struct usb_mixer_interface *mixer)
/* Delayed work to save config */
static void scarlett2_config_save_work(struct work_struct *work)
{
- struct scarlett2_mixer_data *private =
- container_of(work, struct scarlett2_mixer_data, work.work);
+ struct scarlett2_data *private =
+ container_of(work, struct scarlett2_data, work.work);
scarlett2_config_save(private->mixer);
}
-/* Send a USB message to set a configuration parameter (volume level,
- * sw/hw volume switch, line/inst level switch, or pad switch)
- */
+/* Send a USB message to set a SCARLETT2_CONFIG_* parameter */
static int scarlett2_usb_set_config(
struct usb_mixer_interface *mixer,
int config_item_num, int index, int value)
{
- const struct scarlett2_config config_item =
- scarlett2_config_items[config_item_num];
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const struct scarlett2_config *config_item =
+ &scarlett2_config_items[info->has_mixer][config_item_num];
struct {
__le32 offset;
__le32 bytes;
__le32 value;
} __packed req;
__le32 req2;
+ int offset, size;
int err;
- struct scarlett2_mixer_data *private = mixer->private_data;
/* Cancel any pending NVRAM save */
cancel_delayed_work_sync(&private->work);
+ /* Convert config_item->size in bits to size in bytes and
+ * calculate offset
+ */
+ if (config_item->size >= 8) {
+ size = config_item->size / 8;
+ offset = config_item->offset + index * size;
+
+ /* If updating a bit, retrieve the old value, set/clear the
+ * bit as needed, and update value
+ */
+ } else {
+ u8 tmp;
+
+ size = 1;
+ offset = config_item->offset;
+
+ scarlett2_usb_get(mixer, offset, &tmp, 1);
+ if (value)
+ tmp |= (1 << index);
+ else
+ tmp &= ~(1 << index);
+
+ value = tmp;
+ }
+
/* Send the configuration parameter data */
- req.offset = cpu_to_le32(config_item.offset + index * config_item.size);
- req.bytes = cpu_to_le32(config_item.size);
+ req.offset = cpu_to_le32(offset);
+ req.bytes = cpu_to_le32(size);
req.value = cpu_to_le32(value);
err = scarlett2_usb(mixer, SCARLETT2_USB_SET_DATA,
- &req, sizeof(u32) * 2 + config_item.size,
+ &req, sizeof(u32) * 2 + size,
NULL, 0);
if (err < 0)
return err;
/* Activate the change */
- req2 = cpu_to_le32(config_item.activate);
+ req2 = cpu_to_le32(config_item->activate);
err = scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
&req2, sizeof(req2), NULL, 0);
if (err < 0)
return err;
/* Schedule the change to be written to NVRAM */
- schedule_delayed_work(&private->work, msecs_to_jiffies(2000));
+ if (config_item->activate != SCARLETT2_USB_CONFIG_SAVE)
+ schedule_delayed_work(&private->work, msecs_to_jiffies(2000));
return 0;
}
-/* Send a USB message to get data; result placed in *buf */
-static int scarlett2_usb_get(
+/* Send a USB message to get sync status; result placed in *sync */
+static int scarlett2_usb_get_sync_status(
struct usb_mixer_interface *mixer,
- int offset, void *buf, int size)
+ u8 *sync)
{
- struct {
- __le32 offset;
- __le32 size;
- } __packed req;
-
- req.offset = cpu_to_le32(offset);
- req.size = cpu_to_le32(size);
- return scarlett2_usb(mixer, SCARLETT2_USB_GET_DATA,
- &req, sizeof(req), buf, size);
-}
+ __le32 data;
+ int err;
-/* Send a USB message to get configuration parameters; result placed in *buf */
-static int scarlett2_usb_get_config(
- struct usb_mixer_interface *mixer,
- int config_item_num, int count, void *buf)
-{
- const struct scarlett2_config config_item =
- scarlett2_config_items[config_item_num];
- int size = config_item.size * count;
+ err = scarlett2_usb(mixer, SCARLETT2_USB_GET_SYNC,
+ NULL, 0, &data, sizeof(data));
+ if (err < 0)
+ return err;
- return scarlett2_usb_get(mixer, config_item.offset, buf, size);
+ *sync = !!data;
+ return 0;
}
/* Send a USB message to get volume status; result placed in *buf */
@@ -784,13 +1323,56 @@ static int scarlett2_usb_get_volume_status(
buf, sizeof(*buf));
}
+/* Send a USB message to get the volumes for all inputs of one mix
+ * and put the values into private->mix[]
+ */
+static int scarlett2_usb_get_mix(struct usb_mixer_interface *mixer,
+ int mix_num)
+{
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+
+ int num_mixer_in =
+ info->port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+ int err, i, j, k;
+
+ struct {
+ __le16 mix_num;
+ __le16 count;
+ } __packed req;
+
+ __le16 data[SCARLETT2_INPUT_MIX_MAX];
+
+ req.mix_num = cpu_to_le16(mix_num);
+ req.count = cpu_to_le16(num_mixer_in);
+
+ err = scarlett2_usb(mixer, SCARLETT2_USB_GET_MIX,
+ &req, sizeof(req),
+ data, num_mixer_in * sizeof(u16));
+ if (err < 0)
+ return err;
+
+ for (i = 0, j = mix_num * num_mixer_in; i < num_mixer_in; i++, j++) {
+ u16 mixer_value = le16_to_cpu(data[i]);
+
+ for (k = 0; k < SCARLETT2_MIXER_VALUE_COUNT; k++)
+ if (scarlett2_mixer_values[k] >= mixer_value)
+ break;
+ if (k == SCARLETT2_MIXER_VALUE_COUNT)
+ k = SCARLETT2_MIXER_MAX_VALUE;
+ private->mix[j] = k;
+ }
+
+ return 0;
+}
+
/* Send a USB message to set the volumes for all inputs of one mix
* (values obtained from private->mix[])
*/
static int scarlett2_usb_set_mix(struct usb_mixer_interface *mixer,
- int mix_num)
+ int mix_num)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
const struct scarlett2_device_info *info = private->info;
struct {
@@ -800,7 +1382,7 @@ static int scarlett2_usb_set_mix(struct usb_mixer_interface *mixer,
int i, j;
int num_mixer_in =
- info->ports[SCARLETT2_PORT_TYPE_MIX].num[SCARLETT2_PORT_OUT];
+ info->port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
req.mix_num = cpu_to_le16(mix_num);
@@ -814,40 +1396,120 @@ static int scarlett2_usb_set_mix(struct usb_mixer_interface *mixer,
NULL, 0);
}
-/* Convert a port number index (per info->ports) to a hardware ID */
-static u32 scarlett2_mux_src_num_to_id(const struct scarlett2_ports *ports,
- int num)
+/* Convert a port number index (per info->port_count) to a hardware ID */
+static u32 scarlett2_mux_src_num_to_id(
+ const int port_count[][SCARLETT2_PORT_DIRNS], int num)
{
int port_type;
for (port_type = 0;
port_type < SCARLETT2_PORT_TYPE_COUNT;
port_type++) {
- if (num < ports[port_type].num[SCARLETT2_PORT_IN])
- return ports[port_type].id | num;
- num -= ports[port_type].num[SCARLETT2_PORT_IN];
+ if (num < port_count[port_type][SCARLETT2_PORT_IN])
+ return scarlett2_ports[port_type].id | num;
+ num -= port_count[port_type][SCARLETT2_PORT_IN];
}
/* Oops */
return 0;
}
+/* Convert a hardware ID to a port number index */
+static u32 scarlett2_mux_id_to_num(
+ const int port_count[][SCARLETT2_PORT_DIRNS], int direction, u32 id)
+{
+ int port_type;
+ int port_num = 0;
+
+ for (port_type = 0;
+ port_type < SCARLETT2_PORT_TYPE_COUNT;
+ port_type++) {
+ int base = scarlett2_ports[port_type].id;
+ int count = port_count[port_type][direction];
+
+ if (id >= base && id < base + count)
+ return port_num + id - base;
+ port_num += count;
+ }
+
+ /* Oops */
+ return -1;
+}
+
+/* Convert one mux entry from the interface and load into private->mux[] */
+static void scarlett2_usb_populate_mux(struct scarlett2_data *private,
+ u32 mux_entry)
+{
+ const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+
+ int dst_idx, src_idx;
+
+ dst_idx = scarlett2_mux_id_to_num(port_count, SCARLETT2_PORT_OUT,
+ mux_entry & 0xFFF);
+ if (dst_idx < 0)
+ return;
+
+ if (dst_idx >= private->num_mux_dsts) {
+ usb_audio_err(private->mixer->chip,
+ "BUG: scarlett2_mux_id_to_num(%06x, OUT): %d >= %d",
+ mux_entry, dst_idx, private->num_mux_dsts);
+ return;
+ }
+
+ src_idx = scarlett2_mux_id_to_num(port_count, SCARLETT2_PORT_IN,
+ mux_entry >> 12);
+ if (src_idx < 0)
+ return;
+
+ if (src_idx >= private->num_mux_srcs) {
+ usb_audio_err(private->mixer->chip,
+ "BUG: scarlett2_mux_id_to_num(%06x, IN): %d >= %d",
+ mux_entry, src_idx, private->num_mux_srcs);
+ return;
+ }
+
+ private->mux[dst_idx] = src_idx;
+}
+
+/* Send USB message to get mux inputs and then populate private->mux[] */
+static int scarlett2_usb_get_mux(struct usb_mixer_interface *mixer)
+{
+ struct scarlett2_data *private = mixer->private_data;
+ int count = private->num_mux_dsts;
+ int err, i;
+
+ struct {
+ __le16 num;
+ __le16 count;
+ } __packed req;
+
+ __le32 data[SCARLETT2_MUX_MAX];
+
+ private->mux_updated = 0;
+
+ req.num = 0;
+ req.count = cpu_to_le16(count);
+
+ err = scarlett2_usb(mixer, SCARLETT2_USB_GET_MUX,
+ &req, sizeof(req),
+ data, count * sizeof(u32));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < count; i++)
+ scarlett2_usb_populate_mux(private, le32_to_cpu(data[i]));
+
+ return 0;
+}
+
/* Send USB messages to set mux inputs */
static int scarlett2_usb_set_mux(struct usb_mixer_interface *mixer)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
const struct scarlett2_device_info *info = private->info;
- const struct scarlett2_ports *ports = info->ports;
- int rate, port_dir_rate;
-
- static const int assignment_order[SCARLETT2_PORT_TYPE_COUNT] = {
- SCARLETT2_PORT_TYPE_PCM,
- SCARLETT2_PORT_TYPE_ANALOGUE,
- SCARLETT2_PORT_TYPE_SPDIF,
- SCARLETT2_PORT_TYPE_ADAT,
- SCARLETT2_PORT_TYPE_MIX,
- SCARLETT2_PORT_TYPE_NONE,
- };
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+ int table;
struct {
__le16 pad;
@@ -857,43 +1519,44 @@ static int scarlett2_usb_set_mux(struct usb_mixer_interface *mixer)
req.pad = 0;
- /* mux settings for each rate */
- for (rate = 0, port_dir_rate = SCARLETT2_PORT_OUT_44;
- port_dir_rate <= SCARLETT2_PORT_OUT_176;
- rate++, port_dir_rate++) {
- int order_num, i, err;
-
- req.num = cpu_to_le16(rate);
-
- for (order_num = 0, i = 0;
- order_num < SCARLETT2_PORT_TYPE_COUNT;
- order_num++) {
- int port_type = assignment_order[order_num];
- int j = scarlett2_get_port_start_num(ports,
- SCARLETT2_PORT_OUT,
- port_type);
- int port_id = ports[port_type].id;
- int channel;
-
- for (channel = 0;
- channel < ports[port_type].num[port_dir_rate];
- channel++, i++, j++)
- /* lower 12 bits for the destination and
- * next 12 bits for the source
- */
- req.data[i] = !port_id
- ? 0
- : cpu_to_le32(
- port_id |
- channel |
- scarlett2_mux_src_num_to_id(
- ports, private->mux[j]
- ) << 12
- );
-
- /* skip private->mux[j] entries not output */
- j += ports[port_type].num[SCARLETT2_PORT_OUT] -
- ports[port_type].num[port_dir_rate];
+ /* set mux settings for each rate */
+ for (table = 0; table < SCARLETT2_MUX_TABLES; table++) {
+ const struct scarlett2_mux_entry *entry;
+
+ /* i counts over the output array */
+ int i = 0, err;
+
+ req.num = cpu_to_le16(table);
+
+ /* loop through each entry */
+ for (entry = info->mux_assignment[table];
+ entry->count;
+ entry++) {
+ int j;
+ int port_type = entry->port_type;
+ int port_idx = entry->start;
+ int mux_idx = scarlett2_get_port_start_num(port_count,
+ SCARLETT2_PORT_OUT, port_type) + port_idx;
+ int dst_id = scarlett2_ports[port_type].id + port_idx;
+
+ /* Empty slots */
+ if (!dst_id) {
+ for (j = 0; j < entry->count; j++)
+ req.data[i++] = 0;
+ continue;
+ }
+
+ /* Non-empty mux slots use the lower 12 bits
+ * for the destination and next 12 bits for
+ * the source
+ */
+ for (j = 0; j < entry->count; j++) {
+ int src_id = scarlett2_mux_src_num_to_id(
+ port_count, private->mux[mux_idx++]);
+ req.data[i++] = cpu_to_le32(dst_id |
+ src_id << 12);
+ dst_id++;
+ }
}
err = scarlett2_usb(mixer, SCARLETT2_USB_SET_MUX,
@@ -908,26 +1571,26 @@ static int scarlett2_usb_set_mux(struct usb_mixer_interface *mixer)
/* Send USB message to get meter levels */
static int scarlett2_usb_get_meter_levels(struct usb_mixer_interface *mixer,
- u16 *levels)
+ u16 num_meters, u16 *levels)
{
struct {
__le16 pad;
__le16 num_meters;
__le32 magic;
} __packed req;
- u32 resp[SCARLETT2_NUM_METERS];
+ u32 resp[SCARLETT2_MAX_METERS];
int i, err;
req.pad = 0;
- req.num_meters = cpu_to_le16(SCARLETT2_NUM_METERS);
+ req.num_meters = cpu_to_le16(num_meters);
req.magic = cpu_to_le32(SCARLETT2_USB_METER_LEVELS_GET_MAGIC);
- err = scarlett2_usb(mixer, SCARLETT2_USB_GET_METER_LEVELS,
- &req, sizeof(req), resp, sizeof(resp));
+ err = scarlett2_usb(mixer, SCARLETT2_USB_GET_METER,
+ &req, sizeof(req), resp, num_meters * sizeof(u32));
if (err < 0)
return err;
/* copy, convert to u16 */
- for (i = 0; i < SCARLETT2_NUM_METERS; i++)
+ for (i = 0; i < num_meters; i++)
levels[i] = resp[i];
return 0;
@@ -949,10 +1612,15 @@ static int scarlett2_add_new_ctl(struct usb_mixer_interface *mixer,
if (!elem)
return -ENOMEM;
+ /* We set USB_MIXER_BESPOKEN type, so that the core USB mixer code
+ * ignores them for resume and other operations.
+ * Also, the head.id field is set to 0, as we don't use this field.
+ */
elem->head.mixer = mixer;
elem->control = index;
- elem->head.id = index;
+ elem->head.id = 0;
elem->channels = channels;
+ elem->val_type = USB_MIXER_BESPOKEN;
kctl = snd_ctl_new1(ncontrol, elem);
if (!kctl) {
@@ -973,6 +1641,64 @@ static int scarlett2_add_new_ctl(struct usb_mixer_interface *mixer,
return 0;
}
+/*** Sync Control ***/
+
+/* Update sync control after receiving notification that the status
+ * has changed
+ */
+static int scarlett2_update_sync(struct usb_mixer_interface *mixer)
+{
+ struct scarlett2_data *private = mixer->private_data;
+
+ private->sync_updated = 0;
+ return scarlett2_usb_get_sync_status(mixer, &private->sync);
+}
+
+static int scarlett2_sync_ctl_info(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *texts[2] = {
+ "Unlocked", "Locked"
+ };
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
+}
+
+static int scarlett2_sync_ctl_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+
+ mutex_lock(&private->data_mutex);
+ if (private->sync_updated)
+ scarlett2_update_sync(mixer);
+ ucontrol->value.enumerated.item[0] = private->sync;
+ mutex_unlock(&private->data_mutex);
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new scarlett2_sync_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .name = "",
+ .info = scarlett2_sync_ctl_info,
+ .get = scarlett2_sync_ctl_get
+};
+
+static int scarlett2_add_sync_ctl(struct usb_mixer_interface *mixer)
+{
+ struct scarlett2_data *private = mixer->private_data;
+
+ /* devices without a mixer also don't support reporting sync status */
+ if (!private->info->has_mixer)
+ return 0;
+
+ return scarlett2_add_new_ctl(mixer, &scarlett2_sync_ctl,
+ 0, 1, "Sync Status", &private->sync_ctl);
+}
+
/*** Analogue Line Out Volume Controls ***/
/* Update hardware volume controls after receiving notification that
@@ -980,12 +1706,14 @@ static int scarlett2_add_new_ctl(struct usb_mixer_interface *mixer,
*/
static int scarlett2_update_volumes(struct usb_mixer_interface *mixer)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
- const struct scarlett2_ports *ports = private->info->ports;
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
struct scarlett2_usb_volume_status volume_status;
int num_line_out =
- ports[SCARLETT2_PORT_TYPE_ANALOGUE].num[SCARLETT2_PORT_OUT];
+ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
int err, i;
+ int mute;
private->vol_updated = 0;
@@ -997,13 +1725,17 @@ static int scarlett2_update_volumes(struct usb_mixer_interface *mixer)
volume_status.master_vol + SCARLETT2_VOLUME_BIAS,
0, SCARLETT2_VOLUME_BIAS);
- for (i = 0; i < num_line_out; i++) {
- if (private->vol_sw_hw_switch[i])
- private->vol[i] = private->master_vol;
- }
+ if (info->line_out_hw_vol)
+ for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
+ private->dim_mute[i] = !!volume_status.dim_mute[i];
+
+ mute = private->dim_mute[SCARLETT2_BUTTON_MUTE];
- for (i = 0; i < private->info->button_count; i++)
- private->buttons[i] = !!volume_status.buttons[i];
+ for (i = 0; i < num_line_out; i++)
+ if (private->vol_sw_hw_switch[i]) {
+ private->vol[i] = private->master_vol;
+ private->mute_switch[i] = mute;
+ }
return 0;
}
@@ -1026,31 +1758,38 @@ static int scarlett2_master_volume_ctl_get(struct snd_kcontrol *kctl,
{
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
- if (private->vol_updated) {
- mutex_lock(&private->data_mutex);
+ mutex_lock(&private->data_mutex);
+ if (private->vol_updated)
scarlett2_update_volumes(mixer);
- mutex_unlock(&private->data_mutex);
- }
+ mutex_unlock(&private->data_mutex);
ucontrol->value.integer.value[0] = private->master_vol;
return 0;
}
+static int line_out_remap(struct scarlett2_data *private, int index)
+{
+ const struct scarlett2_device_info *info = private->info;
+
+ if (!info->line_out_remap_enable)
+ return index;
+ return info->line_out_remap[index];
+}
+
static int scarlett2_volume_ctl_get(struct snd_kcontrol *kctl,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
- struct scarlett2_mixer_data *private = mixer->private_data;
- int index = elem->control;
+ struct scarlett2_data *private = mixer->private_data;
+ int index = line_out_remap(private, elem->control);
- if (private->vol_updated) {
- mutex_lock(&private->data_mutex);
+ mutex_lock(&private->data_mutex);
+ if (private->vol_updated)
scarlett2_update_volumes(mixer);
- mutex_unlock(&private->data_mutex);
- }
+ mutex_unlock(&private->data_mutex);
ucontrol->value.integer.value[0] = private->vol[index];
return 0;
@@ -1061,8 +1800,8 @@ static int scarlett2_volume_ctl_put(struct snd_kcontrol *kctl,
{
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
- struct scarlett2_mixer_data *private = mixer->private_data;
- int index = elem->control;
+ struct scarlett2_data *private = mixer->private_data;
+ int index = line_out_remap(private, elem->control);
int oval, val, err = 0;
mutex_lock(&private->data_mutex);
@@ -1111,8 +1850,77 @@ static const struct snd_kcontrol_new scarlett2_line_out_volume_ctl = {
.tlv = { .p = db_scale_scarlett2_gain }
};
+/*** Mute Switch Controls ***/
+
+static int scarlett2_mute_ctl_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+ int index = line_out_remap(private, elem->control);
+
+ mutex_lock(&private->data_mutex);
+ if (private->vol_updated)
+ scarlett2_update_volumes(mixer);
+ mutex_unlock(&private->data_mutex);
+
+ ucontrol->value.integer.value[0] = private->mute_switch[index];
+ return 0;
+}
+
+static int scarlett2_mute_ctl_put(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+ int index = line_out_remap(private, elem->control);
+ int oval, val, err = 0;
+
+ mutex_lock(&private->data_mutex);
+
+ oval = private->mute_switch[index];
+ val = !!ucontrol->value.integer.value[0];
+
+ if (oval == val)
+ goto unlock;
+
+ private->mute_switch[index] = val;
+
+ /* Send mute change to the device */
+ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_MUTE_SWITCH,
+ index, val);
+ if (err == 0)
+ err = 1;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
+}
+
+static const struct snd_kcontrol_new scarlett2_mute_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = snd_ctl_boolean_mono_info,
+ .get = scarlett2_mute_ctl_get,
+ .put = scarlett2_mute_ctl_put,
+};
+
/*** HW/SW Volume Switch Controls ***/
+static void scarlett2_sw_hw_ctl_ro(struct scarlett2_data *private, int index)
+{
+ private->sw_hw_ctls[index]->vd[0].access &=
+ ~SNDRV_CTL_ELEM_ACCESS_WRITE;
+}
+
+static void scarlett2_sw_hw_ctl_rw(struct scarlett2_data *private, int index)
+{
+ private->sw_hw_ctls[index]->vd[0].access |=
+ SNDRV_CTL_ELEM_ACCESS_WRITE;
+}
+
static int scarlett2_sw_hw_enum_ctl_info(struct snd_kcontrol *kctl,
struct snd_ctl_elem_info *uinfo)
{
@@ -1127,60 +1935,99 @@ static int scarlett2_sw_hw_enum_ctl_get(struct snd_kcontrol *kctl,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *elem = kctl->private_data;
- struct scarlett2_mixer_data *private = elem->head.mixer->private_data;
+ struct scarlett2_data *private = elem->head.mixer->private_data;
+ int index = line_out_remap(private, elem->control);
- ucontrol->value.enumerated.item[0] =
- private->vol_sw_hw_switch[elem->control];
+ ucontrol->value.enumerated.item[0] = private->vol_sw_hw_switch[index];
return 0;
}
-static int scarlett2_sw_hw_enum_ctl_put(struct snd_kcontrol *kctl,
- struct snd_ctl_elem_value *ucontrol)
+static void scarlett2_vol_ctl_set_writable(struct usb_mixer_interface *mixer,
+ int index, int value)
{
- struct usb_mixer_elem_info *elem = kctl->private_data;
- struct usb_mixer_interface *mixer = elem->head.mixer;
- struct scarlett2_mixer_data *private = mixer->private_data;
-
- int index = elem->control;
- int oval, val, err = 0;
+ struct scarlett2_data *private = mixer->private_data;
+ struct snd_card *card = mixer->chip->card;
- mutex_lock(&private->data_mutex);
+ /* Set/Clear write bits */
+ if (value) {
+ private->vol_ctls[index]->vd[0].access |=
+ SNDRV_CTL_ELEM_ACCESS_WRITE;
+ private->mute_ctls[index]->vd[0].access |=
+ SNDRV_CTL_ELEM_ACCESS_WRITE;
+ } else {
+ private->vol_ctls[index]->vd[0].access &=
+ ~SNDRV_CTL_ELEM_ACCESS_WRITE;
+ private->mute_ctls[index]->vd[0].access &=
+ ~SNDRV_CTL_ELEM_ACCESS_WRITE;
+ }
- oval = private->vol_sw_hw_switch[index];
- val = !!ucontrol->value.integer.value[0];
+ /* Notify of write bit and possible value change */
+ snd_ctl_notify(card,
+ SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
+ &private->vol_ctls[index]->id);
+ snd_ctl_notify(card,
+ SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
+ &private->mute_ctls[index]->id);
+}
- if (oval == val)
- goto unlock;
+static int scarlett2_sw_hw_change(struct usb_mixer_interface *mixer,
+ int ctl_index, int val)
+{
+ struct scarlett2_data *private = mixer->private_data;
+ int index = line_out_remap(private, ctl_index);
+ int err;
private->vol_sw_hw_switch[index] = val;
/* Change access mode to RO (hardware controlled volume)
* or RW (software controlled volume)
*/
- if (val)
- private->vol_ctls[index]->vd[0].access &=
- ~SNDRV_CTL_ELEM_ACCESS_WRITE;
- else
- private->vol_ctls[index]->vd[0].access |=
- SNDRV_CTL_ELEM_ACCESS_WRITE;
+ scarlett2_vol_ctl_set_writable(mixer, ctl_index, !val);
- /* Reset volume to master volume */
+ /* Reset volume/mute to master volume/mute */
private->vol[index] = private->master_vol;
+ private->mute_switch[index] = private->dim_mute[SCARLETT2_BUTTON_MUTE];
/* Set SW volume to current HW volume */
err = scarlett2_usb_set_config(
mixer, SCARLETT2_CONFIG_LINE_OUT_VOLUME,
index, private->master_vol - SCARLETT2_VOLUME_BIAS);
if (err < 0)
- goto unlock;
+ return err;
- /* Notify of RO/RW change */
- snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_INFO,
- &private->vol_ctls[index]->id);
+ /* Set SW mute to current HW mute */
+ err = scarlett2_usb_set_config(
+ mixer, SCARLETT2_CONFIG_MUTE_SWITCH,
+ index, private->dim_mute[SCARLETT2_BUTTON_MUTE]);
+ if (err < 0)
+ return err;
/* Send SW/HW switch change to the device */
- err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_SW_HW_SWITCH,
- index, val);
+ return scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_SW_HW_SWITCH,
+ index, val);
+}
+
+static int scarlett2_sw_hw_enum_ctl_put(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+ int ctl_index = elem->control;
+ int index = line_out_remap(private, ctl_index);
+ int oval, val, err = 0;
+
+ mutex_lock(&private->data_mutex);
+
+ oval = private->vol_sw_hw_switch[index];
+ val = !!ucontrol->value.enumerated.item[0];
+
+ if (oval == val)
+ goto unlock;
+
+ err = scarlett2_sw_hw_change(mixer, ctl_index, val);
+ if (err == 0)
+ err = 1;
unlock:
mutex_unlock(&private->data_mutex);
@@ -1197,6 +2044,55 @@ static const struct snd_kcontrol_new scarlett2_sw_hw_enum_ctl = {
/*** Line Level/Instrument Level Switch Controls ***/
+static int scarlett2_update_input_other(struct usb_mixer_interface *mixer)
+{
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+
+ private->input_other_updated = 0;
+
+ if (info->level_input_count) {
+ int err = scarlett2_usb_get_config(
+ mixer, SCARLETT2_CONFIG_LEVEL_SWITCH,
+ info->level_input_count + info->level_input_first,
+ private->level_switch);
+ if (err < 0)
+ return err;
+ }
+
+ if (info->pad_input_count) {
+ int err = scarlett2_usb_get_config(
+ mixer, SCARLETT2_CONFIG_PAD_SWITCH,
+ info->pad_input_count, private->pad_switch);
+ if (err < 0)
+ return err;
+ }
+
+ if (info->air_input_count) {
+ int err = scarlett2_usb_get_config(
+ mixer, SCARLETT2_CONFIG_AIR_SWITCH,
+ info->air_input_count, private->air_switch);
+ if (err < 0)
+ return err;
+ }
+
+ if (info->phantom_count) {
+ int err = scarlett2_usb_get_config(
+ mixer, SCARLETT2_CONFIG_PHANTOM_SWITCH,
+ info->phantom_count, private->phantom_switch);
+ if (err < 0)
+ return err;
+
+ err = scarlett2_usb_get_config(
+ mixer, SCARLETT2_CONFIG_PHANTOM_PERSISTENCE,
+ 1, &private->phantom_persistence);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
static int scarlett2_level_enum_ctl_info(struct snd_kcontrol *kctl,
struct snd_ctl_elem_info *uinfo)
{
@@ -1211,10 +2107,18 @@ static int scarlett2_level_enum_ctl_get(struct snd_kcontrol *kctl,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *elem = kctl->private_data;
- struct scarlett2_mixer_data *private = elem->head.mixer->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+
+ int index = elem->control + info->level_input_first;
+
+ mutex_lock(&private->data_mutex);
+ if (private->input_other_updated)
+ scarlett2_update_input_other(mixer);
+ ucontrol->value.enumerated.item[0] = private->level_switch[index];
+ mutex_unlock(&private->data_mutex);
- ucontrol->value.enumerated.item[0] =
- private->level_switch[elem->control];
return 0;
}
@@ -1223,15 +2127,16 @@ static int scarlett2_level_enum_ctl_put(struct snd_kcontrol *kctl,
{
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
- int index = elem->control;
+ int index = elem->control + info->level_input_first;
int oval, val, err = 0;
mutex_lock(&private->data_mutex);
oval = private->level_switch[index];
- val = !!ucontrol->value.integer.value[0];
+ val = !!ucontrol->value.enumerated.item[0];
if (oval == val)
goto unlock;
@@ -1241,6 +2146,8 @@ static int scarlett2_level_enum_ctl_put(struct snd_kcontrol *kctl,
/* Send switch change to the device */
err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_LEVEL_SWITCH,
index, val);
+ if (err == 0)
+ err = 1;
unlock:
mutex_unlock(&private->data_mutex);
@@ -1261,10 +2168,16 @@ static int scarlett2_pad_ctl_get(struct snd_kcontrol *kctl,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *elem = kctl->private_data;
- struct scarlett2_mixer_data *private = elem->head.mixer->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
- ucontrol->value.enumerated.item[0] =
+ mutex_lock(&private->data_mutex);
+ if (private->input_other_updated)
+ scarlett2_update_input_other(mixer);
+ ucontrol->value.integer.value[0] =
private->pad_switch[elem->control];
+ mutex_unlock(&private->data_mutex);
+
return 0;
}
@@ -1273,7 +2186,7 @@ static int scarlett2_pad_ctl_put(struct snd_kcontrol *kctl,
{
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
int index = elem->control;
int oval, val, err = 0;
@@ -1291,6 +2204,8 @@ static int scarlett2_pad_ctl_put(struct snd_kcontrol *kctl,
/* Send switch change to the device */
err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_PAD_SWITCH,
index, val);
+ if (err == 0)
+ err = 1;
unlock:
mutex_unlock(&private->data_mutex);
@@ -1305,71 +2220,740 @@ static const struct snd_kcontrol_new scarlett2_pad_ctl = {
.put = scarlett2_pad_ctl_put,
};
-/*** Mute/Dim Controls ***/
+/*** Air Switch Controls ***/
-static int scarlett2_button_ctl_get(struct snd_kcontrol *kctl,
- struct snd_ctl_elem_value *ucontrol)
+static int scarlett2_air_ctl_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
- if (private->vol_updated) {
- mutex_lock(&private->data_mutex);
- scarlett2_update_volumes(mixer);
- mutex_unlock(&private->data_mutex);
+ mutex_lock(&private->data_mutex);
+ if (private->input_other_updated)
+ scarlett2_update_input_other(mixer);
+ ucontrol->value.integer.value[0] = private->air_switch[elem->control];
+ mutex_unlock(&private->data_mutex);
+
+ return 0;
+}
+
+static int scarlett2_air_ctl_put(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+
+ int index = elem->control;
+ int oval, val, err = 0;
+
+ mutex_lock(&private->data_mutex);
+
+ oval = private->air_switch[index];
+ val = !!ucontrol->value.integer.value[0];
+
+ if (oval == val)
+ goto unlock;
+
+ private->air_switch[index] = val;
+
+ /* Send switch change to the device */
+ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_AIR_SWITCH,
+ index, val);
+ if (err == 0)
+ err = 1;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
+}
+
+static const struct snd_kcontrol_new scarlett2_air_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = snd_ctl_boolean_mono_info,
+ .get = scarlett2_air_ctl_get,
+ .put = scarlett2_air_ctl_put,
+};
+
+/*** Phantom Switch Controls ***/
+
+static int scarlett2_phantom_ctl_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+
+ mutex_lock(&private->data_mutex);
+ if (private->input_other_updated)
+ scarlett2_update_input_other(mixer);
+ ucontrol->value.integer.value[0] =
+ private->phantom_switch[elem->control];
+ mutex_unlock(&private->data_mutex);
+
+ return 0;
+}
+
+static int scarlett2_phantom_ctl_put(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+
+ int index = elem->control;
+ int oval, val, err = 0;
+
+ mutex_lock(&private->data_mutex);
+
+ oval = private->phantom_switch[index];
+ val = !!ucontrol->value.integer.value[0];
+
+ if (oval == val)
+ goto unlock;
+
+ private->phantom_switch[index] = val;
+
+ /* Send switch change to the device */
+ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_PHANTOM_SWITCH,
+ index, val);
+ if (err == 0)
+ err = 1;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
+}
+
+static const struct snd_kcontrol_new scarlett2_phantom_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = snd_ctl_boolean_mono_info,
+ .get = scarlett2_phantom_ctl_get,
+ .put = scarlett2_phantom_ctl_put,
+};
+
+/*** Phantom Persistence Control ***/
+
+static int scarlett2_phantom_persistence_ctl_get(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct scarlett2_data *private = elem->head.mixer->private_data;
+
+ ucontrol->value.integer.value[0] = private->phantom_persistence;
+ return 0;
+}
+
+static int scarlett2_phantom_persistence_ctl_put(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+
+ int index = elem->control;
+ int oval, val, err = 0;
+
+ mutex_lock(&private->data_mutex);
+
+ oval = private->phantom_persistence;
+ val = !!ucontrol->value.integer.value[0];
+
+ if (oval == val)
+ goto unlock;
+
+ private->phantom_persistence = val;
+
+ /* Send switch change to the device */
+ err = scarlett2_usb_set_config(
+ mixer, SCARLETT2_CONFIG_PHANTOM_PERSISTENCE, index, val);
+ if (err == 0)
+ err = 1;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
+}
+
+static const struct snd_kcontrol_new scarlett2_phantom_persistence_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = snd_ctl_boolean_mono_info,
+ .get = scarlett2_phantom_persistence_ctl_get,
+ .put = scarlett2_phantom_persistence_ctl_put,
+};
+
+/*** Direct Monitor Control ***/
+
+static int scarlett2_update_monitor_other(struct usb_mixer_interface *mixer)
+{
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ int err;
+
+ /* monitor_other_enable[0] enables speaker switching
+ * monitor_other_enable[1] enables talkback
+ */
+ u8 monitor_other_enable[2];
+
+ /* monitor_other_switch[0] activates the alternate speakers
+ * monitor_other_switch[1] activates talkback
+ */
+ u8 monitor_other_switch[2];
+
+ private->monitor_other_updated = 0;
+
+ if (info->direct_monitor)
+ return scarlett2_usb_get_config(
+ mixer, SCARLETT2_CONFIG_DIRECT_MONITOR,
+ 1, &private->direct_monitor_switch);
+
+ /* if it doesn't do speaker switching then it also doesn't do
+ * talkback
+ */
+ if (!info->has_speaker_switching)
+ return 0;
+
+ err = scarlett2_usb_get_config(
+ mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
+ 2, monitor_other_enable);
+ if (err < 0)
+ return err;
+
+ err = scarlett2_usb_get_config(
+ mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
+ 2, monitor_other_switch);
+ if (err < 0)
+ return err;
+
+ if (!monitor_other_enable[0])
+ private->speaker_switching_switch = 0;
+ else
+ private->speaker_switching_switch = monitor_other_switch[0] + 1;
+
+ if (info->has_talkback) {
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] =
+ info->port_count;
+ int num_mixes =
+ port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+ u16 bitmap;
+ int i;
+
+ if (!monitor_other_enable[1])
+ private->talkback_switch = 0;
+ else
+ private->talkback_switch = monitor_other_switch[1] + 1;
+
+ err = scarlett2_usb_get_config(mixer,
+ SCARLETT2_CONFIG_TALKBACK_MAP,
+ 1, &bitmap);
+ for (i = 0; i < num_mixes; i++, bitmap >>= 1)
+ private->talkback_map[i] = bitmap & 1;
}
- ucontrol->value.enumerated.item[0] = private->buttons[elem->control];
return 0;
}
-static int scarlett2_button_ctl_put(struct snd_kcontrol *kctl,
- struct snd_ctl_elem_value *ucontrol)
+static int scarlett2_direct_monitor_ctl_get(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = elem->head.mixer->private_data;
+
+ mutex_lock(&private->data_mutex);
+ if (private->monitor_other_updated)
+ scarlett2_update_monitor_other(mixer);
+ ucontrol->value.enumerated.item[0] = private->direct_monitor_switch;
+ mutex_unlock(&private->data_mutex);
+
+ return 0;
+}
+
+static int scarlett2_direct_monitor_ctl_put(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
int index = elem->control;
int oval, val, err = 0;
mutex_lock(&private->data_mutex);
- oval = private->buttons[index];
+ oval = private->direct_monitor_switch;
+ val = min(ucontrol->value.enumerated.item[0], 2U);
+
+ if (oval == val)
+ goto unlock;
+
+ private->direct_monitor_switch = val;
+
+ /* Send switch change to the device */
+ err = scarlett2_usb_set_config(
+ mixer, SCARLETT2_CONFIG_DIRECT_MONITOR, index, val);
+ if (err == 0)
+ err = 1;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
+}
+
+static int scarlett2_direct_monitor_stereo_enum_ctl_info(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const values[3] = {
+ "Off", "Mono", "Stereo"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1, 3, values);
+}
+
+/* Direct Monitor for Solo is mono-only and only needs a boolean control
+ * Direct Monitor for 2i2 is selectable between Off/Mono/Stereo
+ */
+static const struct snd_kcontrol_new scarlett2_direct_monitor_ctl[2] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = snd_ctl_boolean_mono_info,
+ .get = scarlett2_direct_monitor_ctl_get,
+ .put = scarlett2_direct_monitor_ctl_put,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = scarlett2_direct_monitor_stereo_enum_ctl_info,
+ .get = scarlett2_direct_monitor_ctl_get,
+ .put = scarlett2_direct_monitor_ctl_put,
+ }
+};
+
+static int scarlett2_add_direct_monitor_ctl(struct usb_mixer_interface *mixer)
+{
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const char *s;
+
+ if (!info->direct_monitor)
+ return 0;
+
+ s = info->direct_monitor == 1
+ ? "Direct Monitor Playback Switch"
+ : "Direct Monitor Playback Enum";
+
+ return scarlett2_add_new_ctl(
+ mixer, &scarlett2_direct_monitor_ctl[info->direct_monitor - 1],
+ 0, 1, s, &private->direct_monitor_ctl);
+}
+
+/*** Speaker Switching Control ***/
+
+static int scarlett2_speaker_switch_enum_ctl_info(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const values[3] = {
+ "Off", "Main", "Alt"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1, 3, values);
+}
+
+static int scarlett2_speaker_switch_enum_ctl_get(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+
+ mutex_lock(&private->data_mutex);
+ if (private->monitor_other_updated)
+ scarlett2_update_monitor_other(mixer);
+ ucontrol->value.enumerated.item[0] = private->speaker_switching_switch;
+ mutex_unlock(&private->data_mutex);
+
+ return 0;
+}
+
+/* when speaker switching gets enabled, switch the main/alt speakers
+ * to HW volume and disable those controls
+ */
+static int scarlett2_speaker_switch_enable(struct usb_mixer_interface *mixer)
+{
+ struct snd_card *card = mixer->chip->card;
+ struct scarlett2_data *private = mixer->private_data;
+ int i, err;
+
+ for (i = 0; i < 4; i++) {
+ int index = line_out_remap(private, i);
+
+ /* switch the main/alt speakers to HW volume */
+ if (!private->vol_sw_hw_switch[index]) {
+ err = scarlett2_sw_hw_change(private->mixer, i, 1);
+ if (err < 0)
+ return err;
+ }
+
+ /* disable the line out SW/HW switch */
+ scarlett2_sw_hw_ctl_ro(private, i);
+ snd_ctl_notify(card,
+ SNDRV_CTL_EVENT_MASK_VALUE |
+ SNDRV_CTL_EVENT_MASK_INFO,
+ &private->sw_hw_ctls[i]->id);
+ }
+
+ /* when the next monitor-other notify comes in, update the mux
+ * configuration
+ */
+ private->speaker_switching_switched = 1;
+
+ return 0;
+}
+
+/* when speaker switching gets disabled, reenable the hw/sw controls
+ * and invalidate the routing
+ */
+static void scarlett2_speaker_switch_disable(struct usb_mixer_interface *mixer)
+{
+ struct snd_card *card = mixer->chip->card;
+ struct scarlett2_data *private = mixer->private_data;
+ int i;
+
+ /* enable the line out SW/HW switch */
+ for (i = 0; i < 4; i++) {
+ scarlett2_sw_hw_ctl_rw(private, i);
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
+ &private->sw_hw_ctls[i]->id);
+ }
+
+ /* when the next monitor-other notify comes in, update the mux
+ * configuration
+ */
+ private->speaker_switching_switched = 1;
+}
+
+static int scarlett2_speaker_switch_enum_ctl_put(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+
+ int oval, val, err = 0;
+
+ mutex_lock(&private->data_mutex);
+
+ oval = private->speaker_switching_switch;
+ val = min(ucontrol->value.enumerated.item[0], 2U);
+
+ if (oval == val)
+ goto unlock;
+
+ private->speaker_switching_switch = val;
+
+ /* enable/disable speaker switching */
+ err = scarlett2_usb_set_config(
+ mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
+ 0, !!val);
+ if (err < 0)
+ goto unlock;
+
+ /* if speaker switching is enabled, select main or alt */
+ err = scarlett2_usb_set_config(
+ mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
+ 0, val == 2);
+ if (err < 0)
+ goto unlock;
+
+ /* update controls if speaker switching gets enabled or disabled */
+ if (!oval && val)
+ err = scarlett2_speaker_switch_enable(mixer);
+ else if (oval && !val)
+ scarlett2_speaker_switch_disable(mixer);
+
+ if (err == 0)
+ err = 1;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
+}
+
+static const struct snd_kcontrol_new scarlett2_speaker_switch_enum_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = scarlett2_speaker_switch_enum_ctl_info,
+ .get = scarlett2_speaker_switch_enum_ctl_get,
+ .put = scarlett2_speaker_switch_enum_ctl_put,
+};
+
+static int scarlett2_add_speaker_switch_ctl(
+ struct usb_mixer_interface *mixer)
+{
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+
+ if (!info->has_speaker_switching)
+ return 0;
+
+ return scarlett2_add_new_ctl(
+ mixer, &scarlett2_speaker_switch_enum_ctl,
+ 0, 1, "Speaker Switching Playback Enum",
+ &private->speaker_switching_ctl);
+}
+
+/*** Talkback and Talkback Map Controls ***/
+
+static int scarlett2_talkback_enum_ctl_info(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const values[3] = {
+ "Disabled", "Off", "On"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1, 3, values);
+}
+
+static int scarlett2_talkback_enum_ctl_get(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+
+ mutex_lock(&private->data_mutex);
+ if (private->monitor_other_updated)
+ scarlett2_update_monitor_other(mixer);
+ ucontrol->value.enumerated.item[0] = private->talkback_switch;
+ mutex_unlock(&private->data_mutex);
+
+ return 0;
+}
+
+static int scarlett2_talkback_enum_ctl_put(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+
+ int oval, val, err = 0;
+
+ mutex_lock(&private->data_mutex);
+
+ oval = private->talkback_switch;
+ val = min(ucontrol->value.enumerated.item[0], 2U);
+
+ if (oval == val)
+ goto unlock;
+
+ private->talkback_switch = val;
+
+ /* enable/disable talkback */
+ err = scarlett2_usb_set_config(
+ mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
+ 1, !!val);
+ if (err < 0)
+ goto unlock;
+
+ /* if talkback is enabled, select main or alt */
+ err = scarlett2_usb_set_config(
+ mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
+ 1, val == 2);
+ if (err == 0)
+ err = 1;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
+}
+
+static const struct snd_kcontrol_new scarlett2_talkback_enum_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = scarlett2_talkback_enum_ctl_info,
+ .get = scarlett2_talkback_enum_ctl_get,
+ .put = scarlett2_talkback_enum_ctl_put,
+};
+
+static int scarlett2_talkback_map_ctl_get(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+ int index = elem->control;
+
+ ucontrol->value.integer.value[0] = private->talkback_map[index];
+
+ return 0;
+}
+
+static int scarlett2_talkback_map_ctl_put(
+ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] =
+ private->info->port_count;
+ int num_mixes = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+
+ int index = elem->control;
+ int oval, val, err = 0, i;
+ u16 bitmap = 0;
+
+ mutex_lock(&private->data_mutex);
+
+ oval = private->talkback_map[index];
+ val = !!ucontrol->value.integer.value[0];
+
+ if (oval == val)
+ goto unlock;
+
+ private->talkback_map[index] = val;
+
+ for (i = 0; i < num_mixes; i++)
+ bitmap |= private->talkback_map[i] << i;
+
+ /* Send updated bitmap to the device */
+ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_TALKBACK_MAP,
+ 0, bitmap);
+ if (err == 0)
+ err = 1;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
+}
+
+static const struct snd_kcontrol_new scarlett2_talkback_map_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = snd_ctl_boolean_mono_info,
+ .get = scarlett2_talkback_map_ctl_get,
+ .put = scarlett2_talkback_map_ctl_put,
+};
+
+static int scarlett2_add_talkback_ctls(
+ struct usb_mixer_interface *mixer)
+{
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+ int num_mixes = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+ int err, i;
+ char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+
+ if (!info->has_talkback)
+ return 0;
+
+ err = scarlett2_add_new_ctl(
+ mixer, &scarlett2_talkback_enum_ctl,
+ 0, 1, "Talkback Playback Enum",
+ &private->talkback_ctl);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < num_mixes; i++) {
+ snprintf(s, sizeof(s),
+ "Talkback Mix %c Playback Switch", i + 'A');
+ err = scarlett2_add_new_ctl(mixer, &scarlett2_talkback_map_ctl,
+ i, 1, s, NULL);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/*** Dim/Mute Controls ***/
+
+static int scarlett2_dim_mute_ctl_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+
+ mutex_lock(&private->data_mutex);
+ if (private->vol_updated)
+ scarlett2_update_volumes(mixer);
+ mutex_unlock(&private->data_mutex);
+
+ ucontrol->value.integer.value[0] = private->dim_mute[elem->control];
+ return 0;
+}
+
+static int scarlett2_dim_mute_ctl_put(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+ int num_line_out =
+ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+
+ int index = elem->control;
+ int oval, val, err = 0, i;
+
+ mutex_lock(&private->data_mutex);
+
+ oval = private->dim_mute[index];
val = !!ucontrol->value.integer.value[0];
if (oval == val)
goto unlock;
- private->buttons[index] = val;
+ private->dim_mute[index] = val;
/* Send switch change to the device */
- err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_BUTTONS,
+ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_DIM_MUTE,
index, val);
+ if (err == 0)
+ err = 1;
+
+ if (index == SCARLETT2_BUTTON_MUTE)
+ for (i = 0; i < num_line_out; i++) {
+ int line_index = line_out_remap(private, i);
+
+ if (private->vol_sw_hw_switch[line_index]) {
+ private->mute_switch[line_index] = val;
+ snd_ctl_notify(mixer->chip->card,
+ SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->mute_ctls[i]->id);
+ }
+ }
unlock:
mutex_unlock(&private->data_mutex);
return err;
}
-static const struct snd_kcontrol_new scarlett2_button_ctl = {
+static const struct snd_kcontrol_new scarlett2_dim_mute_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "",
.info = snd_ctl_boolean_mono_info,
- .get = scarlett2_button_ctl_get,
- .put = scarlett2_button_ctl_put
+ .get = scarlett2_dim_mute_ctl_get,
+ .put = scarlett2_dim_mute_ctl_put
};
/*** Create the analogue output controls ***/
static int scarlett2_add_line_out_ctls(struct usb_mixer_interface *mixer)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
const struct scarlett2_device_info *info = private->info;
- const struct scarlett2_ports *ports = info->ports;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
int num_line_out =
- ports[SCARLETT2_PORT_TYPE_ANALOGUE].num[SCARLETT2_PORT_OUT];
+ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
int err, i;
char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
@@ -1385,6 +2969,7 @@ static int scarlett2_add_line_out_ctls(struct usb_mixer_interface *mixer)
/* Add volume controls */
for (i = 0; i < num_line_out; i++) {
+ int index = line_out_remap(private, i);
/* Fader */
if (info->line_out_descrs[i])
@@ -1401,10 +2986,22 @@ static int scarlett2_add_line_out_ctls(struct usb_mixer_interface *mixer)
if (err < 0)
return err;
- /* Make the fader read-only if the SW/HW switch is set to HW */
- if (private->vol_sw_hw_switch[i])
- private->vol_ctls[i]->vd[0].access &=
- ~SNDRV_CTL_ELEM_ACCESS_WRITE;
+ /* Mute Switch */
+ snprintf(s, sizeof(s),
+ "Line %02d Mute Playback Switch",
+ i + 1);
+ err = scarlett2_add_new_ctl(mixer,
+ &scarlett2_mute_ctl,
+ i, 1, s,
+ &private->mute_ctls[i]);
+ if (err < 0)
+ return err;
+
+ /* Make the fader and mute controls read-only if the
+ * SW/HW switch is set to HW
+ */
+ if (private->vol_sw_hw_switch[index])
+ scarlett2_vol_ctl_set_writable(mixer, i, 0);
/* SW/HW Switch */
if (info->line_out_hw_vol) {
@@ -1413,20 +3010,29 @@ static int scarlett2_add_line_out_ctls(struct usb_mixer_interface *mixer)
i + 1);
err = scarlett2_add_new_ctl(mixer,
&scarlett2_sw_hw_enum_ctl,
- i, 1, s, NULL);
+ i, 1, s,
+ &private->sw_hw_ctls[i]);
if (err < 0)
return err;
+
+ /* Make the switch read-only if the line is
+ * involved in speaker switching
+ */
+ if (private->speaker_switching_switch && i < 4)
+ scarlett2_sw_hw_ctl_ro(private, i);
}
}
- /* Add HW button controls */
- for (i = 0; i < private->info->button_count; i++) {
- err = scarlett2_add_new_ctl(mixer, &scarlett2_button_ctl,
- i, 1, scarlett2_button_names[i],
- &private->button_ctls[i]);
- if (err < 0)
- return err;
- }
+ /* Add dim/mute controls */
+ if (info->line_out_hw_vol)
+ for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++) {
+ err = scarlett2_add_new_ctl(
+ mixer, &scarlett2_dim_mute_ctl,
+ i, 1, scarlett2_dim_mute_names[i],
+ &private->dim_mute_ctls[i]);
+ if (err < 0)
+ return err;
+ }
return 0;
}
@@ -1435,25 +3041,70 @@ static int scarlett2_add_line_out_ctls(struct usb_mixer_interface *mixer)
static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
const struct scarlett2_device_info *info = private->info;
int err, i;
char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ const char *fmt = "Line In %d %s Capture %s";
+ const char *fmt2 = "Line In %d-%d %s Capture %s";
/* Add input level (line/inst) controls */
for (i = 0; i < info->level_input_count; i++) {
- snprintf(s, sizeof(s), "Line In %d Level Capture Enum", i + 1);
+ snprintf(s, sizeof(s), fmt, i + 1 + info->level_input_first,
+ "Level", "Enum");
err = scarlett2_add_new_ctl(mixer, &scarlett2_level_enum_ctl,
- i, 1, s, NULL);
+ i, 1, s, &private->level_ctls[i]);
if (err < 0)
return err;
}
/* Add input pad controls */
for (i = 0; i < info->pad_input_count; i++) {
- snprintf(s, sizeof(s), "Line In %d Pad Capture Switch", i + 1);
+ snprintf(s, sizeof(s), fmt, i + 1, "Pad", "Switch");
err = scarlett2_add_new_ctl(mixer, &scarlett2_pad_ctl,
- i, 1, s, NULL);
+ i, 1, s, &private->pad_ctls[i]);
+ if (err < 0)
+ return err;
+ }
+
+ /* Add input air controls */
+ for (i = 0; i < info->air_input_count; i++) {
+ snprintf(s, sizeof(s), fmt, i + 1, "Air", "Switch");
+ err = scarlett2_add_new_ctl(mixer, &scarlett2_air_ctl,
+ i, 1, s, &private->air_ctls[i]);
+ if (err < 0)
+ return err;
+ }
+
+ /* Add input phantom controls */
+ if (info->inputs_per_phantom == 1) {
+ for (i = 0; i < info->phantom_count; i++) {
+ snprintf(s, sizeof(s), fmt, i + 1,
+ "Phantom Power", "Switch");
+ err = scarlett2_add_new_ctl(
+ mixer, &scarlett2_phantom_ctl,
+ i, 1, s, &private->phantom_ctls[i]);
+ if (err < 0)
+ return err;
+ }
+ } else if (info->inputs_per_phantom > 1) {
+ for (i = 0; i < info->phantom_count; i++) {
+ int from = i * info->inputs_per_phantom + 1;
+ int to = (i + 1) * info->inputs_per_phantom;
+
+ snprintf(s, sizeof(s), fmt2, from, to,
+ "Phantom Power", "Switch");
+ err = scarlett2_add_new_ctl(
+ mixer, &scarlett2_phantom_ctl,
+ i, 1, s, &private->phantom_ctls[i]);
+ if (err < 0)
+ return err;
+ }
+ }
+ if (info->phantom_count) {
+ err = scarlett2_add_new_ctl(
+ mixer, &scarlett2_phantom_persistence_ctl, 0, 1,
+ "Phantom Power Persistence Capture Switch", NULL);
if (err < 0)
return err;
}
@@ -1480,7 +3131,7 @@ static int scarlett2_mixer_ctl_get(struct snd_kcontrol *kctl,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *elem = kctl->private_data;
- struct scarlett2_mixer_data *private = elem->head.mixer->private_data;
+ struct scarlett2_data *private = elem->head.mixer->private_data;
ucontrol->value.integer.value[0] = private->mix[elem->control];
return 0;
@@ -1491,22 +3142,23 @@ static int scarlett2_mixer_ctl_put(struct snd_kcontrol *kctl,
{
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
const struct scarlett2_device_info *info = private->info;
- const struct scarlett2_ports *ports = info->ports;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
int oval, val, num_mixer_in, mix_num, err = 0;
+ int index = elem->control;
mutex_lock(&private->data_mutex);
- oval = private->mix[elem->control];
+ oval = private->mix[index];
val = ucontrol->value.integer.value[0];
- num_mixer_in = ports[SCARLETT2_PORT_TYPE_MIX].num[SCARLETT2_PORT_OUT];
- mix_num = elem->control / num_mixer_in;
+ num_mixer_in = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+ mix_num = index / num_mixer_in;
if (oval == val)
goto unlock;
- private->mix[elem->control] = val;
+ private->mix[index] = val;
err = scarlett2_usb_set_mix(mixer, mix_num);
if (err == 0)
err = 1;
@@ -1536,16 +3188,19 @@ static const struct snd_kcontrol_new scarlett2_mixer_ctl = {
static int scarlett2_add_mixer_ctls(struct usb_mixer_interface *mixer)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
- const struct scarlett2_ports *ports = private->info->ports;
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
int err, i, j;
int index;
char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
- int num_inputs = ports[SCARLETT2_PORT_TYPE_MIX].num[SCARLETT2_PORT_OUT];
- int num_outputs = ports[SCARLETT2_PORT_TYPE_MIX].num[SCARLETT2_PORT_IN];
+ int num_inputs =
+ port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+ int num_outputs =
+ port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
- for (i = 0, index = 0; i < num_outputs; i++) {
+ for (i = 0, index = 0; i < num_outputs; i++)
for (j = 0; j < num_inputs; j++, index++) {
snprintf(s, sizeof(s),
"Mix %c Input %02d Playback Volume",
@@ -1555,7 +3210,6 @@ static int scarlett2_add_mixer_ctls(struct usb_mixer_interface *mixer)
if (err < 0)
return err;
}
- }
return 0;
}
@@ -1566,8 +3220,9 @@ static int scarlett2_mux_src_enum_ctl_info(struct snd_kcontrol *kctl,
struct snd_ctl_elem_info *uinfo)
{
struct usb_mixer_elem_info *elem = kctl->private_data;
- struct scarlett2_mixer_data *private = elem->head.mixer->private_data;
- const struct scarlett2_ports *ports = private->info->ports;
+ struct scarlett2_data *private = elem->head.mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
unsigned int item = uinfo->value.enumerated.item;
int items = private->num_mux_srcs;
int port_type;
@@ -1582,13 +3237,15 @@ static int scarlett2_mux_src_enum_ctl_info(struct snd_kcontrol *kctl,
for (port_type = 0;
port_type < SCARLETT2_PORT_TYPE_COUNT;
port_type++) {
- if (item < ports[port_type].num[SCARLETT2_PORT_IN]) {
+ if (item < port_count[port_type][SCARLETT2_PORT_IN]) {
+ const struct scarlett2_port *port =
+ &scarlett2_ports[port_type];
+
sprintf(uinfo->value.enumerated.name,
- ports[port_type].src_descr,
- item + ports[port_type].src_num_offset);
+ port->src_descr, item + port->src_num_offset);
return 0;
}
- item -= ports[port_type].num[SCARLETT2_PORT_IN];
+ item -= port_count[port_type][SCARLETT2_PORT_IN];
}
return -EINVAL;
@@ -1598,9 +3255,23 @@ static int scarlett2_mux_src_enum_ctl_get(struct snd_kcontrol *kctl,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *elem = kctl->private_data;
- struct scarlett2_mixer_data *private = elem->head.mixer->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+ int line_out_count =
+ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+ int index = elem->control;
+
+ if (index < line_out_count)
+ index = line_out_remap(private, index);
+
+ mutex_lock(&private->data_mutex);
+ if (private->mux_updated)
+ scarlett2_usb_get_mux(mixer);
+ ucontrol->value.enumerated.item[0] = private->mux[index];
+ mutex_unlock(&private->data_mutex);
- ucontrol->value.enumerated.item[0] = private->mux[elem->control];
return 0;
}
@@ -1609,15 +3280,22 @@ static int scarlett2_mux_src_enum_ctl_put(struct snd_kcontrol *kctl,
{
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+ int line_out_count =
+ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
int index = elem->control;
int oval, val, err = 0;
+ if (index < line_out_count)
+ index = line_out_remap(private, index);
+
mutex_lock(&private->data_mutex);
oval = private->mux[index];
- val = clamp(ucontrol->value.integer.value[0],
- 0L, private->num_mux_srcs - 1L);
+ val = min(ucontrol->value.enumerated.item[0],
+ private->num_mux_srcs - 1U);
if (oval == val)
goto unlock;
@@ -1642,26 +3320,29 @@ static const struct snd_kcontrol_new scarlett2_mux_src_enum_ctl = {
static int scarlett2_add_mux_enums(struct usb_mixer_interface *mixer)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
- const struct scarlett2_ports *ports = private->info->ports;
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
int port_type, channel, i;
for (i = 0, port_type = 0;
port_type < SCARLETT2_PORT_TYPE_COUNT;
port_type++) {
for (channel = 0;
- channel < ports[port_type].num[SCARLETT2_PORT_OUT];
+ channel < port_count[port_type][SCARLETT2_PORT_OUT];
channel++, i++) {
int err;
char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
- const char *const descr = ports[port_type].dst_descr;
+ const char *const descr =
+ scarlett2_ports[port_type].dst_descr;
snprintf(s, sizeof(s) - 5, descr, channel + 1);
strcat(s, " Enum");
err = scarlett2_add_new_ctl(mixer,
&scarlett2_mux_src_enum_ctl,
- i, 1, s, NULL);
+ i, 1, s,
+ &private->mux_ctls[i]);
if (err < 0)
return err;
}
@@ -1689,10 +3370,11 @@ static int scarlett2_meter_ctl_get(struct snd_kcontrol *kctl,
struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *elem = kctl->private_data;
- u16 meter_levels[SCARLETT2_NUM_METERS];
+ u16 meter_levels[SCARLETT2_MAX_METERS];
int i, err;
- err = scarlett2_usb_get_meter_levels(elem->head.mixer, meter_levels);
+ err = scarlett2_usb_get_meter_levels(elem->head.mixer, elem->channels,
+ meter_levels);
if (err < 0)
return err;
@@ -1712,16 +3394,89 @@ static const struct snd_kcontrol_new scarlett2_meter_ctl = {
static int scarlett2_add_meter_ctl(struct usb_mixer_interface *mixer)
{
+ struct scarlett2_data *private = mixer->private_data;
+
+ /* devices without a mixer also don't support reporting levels */
+ if (!private->info->has_mixer)
+ return 0;
+
return scarlett2_add_new_ctl(mixer, &scarlett2_meter_ctl,
- 0, SCARLETT2_NUM_METERS,
+ 0, private->num_mux_dsts,
"Level Meter", NULL);
}
+/*** MSD Controls ***/
+
+static int scarlett2_msd_ctl_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct scarlett2_data *private = elem->head.mixer->private_data;
+
+ ucontrol->value.integer.value[0] = private->msd_switch;
+ return 0;
+}
+
+static int scarlett2_msd_ctl_put(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct usb_mixer_interface *mixer = elem->head.mixer;
+ struct scarlett2_data *private = mixer->private_data;
+
+ int oval, val, err = 0;
+
+ mutex_lock(&private->data_mutex);
+
+ oval = private->msd_switch;
+ val = !!ucontrol->value.integer.value[0];
+
+ if (oval == val)
+ goto unlock;
+
+ private->msd_switch = val;
+
+ /* Send switch change to the device */
+ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_MSD_SWITCH,
+ 0, val);
+ if (err == 0)
+ err = 1;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
+}
+
+static const struct snd_kcontrol_new scarlett2_msd_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = snd_ctl_boolean_mono_info,
+ .get = scarlett2_msd_ctl_get,
+ .put = scarlett2_msd_ctl_put,
+};
+
+static int scarlett2_add_msd_ctl(struct usb_mixer_interface *mixer)
+{
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+
+ if (!info->has_msd_mode)
+ return 0;
+
+ /* If MSD mode is off, hide the switch by default */
+ if (!private->msd_switch && !(mixer->chip->setup & SCARLETT2_MSD_ENABLE))
+ return 0;
+
+ /* Add MSD control */
+ return scarlett2_add_new_ctl(mixer, &scarlett2_msd_ctl,
+ 0, 1, "MSD Mode Switch", NULL);
+}
+
/*** Cleanup/Suspend Callbacks ***/
static void scarlett2_private_free(struct usb_mixer_interface *mixer)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
cancel_delayed_work_sync(&private->work);
kfree(private);
@@ -1730,7 +3485,7 @@ static void scarlett2_private_free(struct usb_mixer_interface *mixer)
static void scarlett2_private_suspend(struct usb_mixer_interface *mixer)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
if (cancel_delayed_work_sync(&private->work))
scarlett2_config_save(private->mixer);
@@ -1738,77 +3493,61 @@ static void scarlett2_private_suspend(struct usb_mixer_interface *mixer)
/*** Initialisation ***/
-static int scarlett2_count_mux_srcs(const struct scarlett2_ports *ports)
+static void scarlett2_count_mux_io(struct scarlett2_data *private)
{
- int port_type, count = 0;
+ const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+ int port_type, srcs = 0, dsts = 0;
for (port_type = 0;
port_type < SCARLETT2_PORT_TYPE_COUNT;
- port_type++)
- count += ports[port_type].num[SCARLETT2_PORT_IN];
+ port_type++) {
+ srcs += port_count[port_type][SCARLETT2_PORT_IN];
+ dsts += port_count[port_type][SCARLETT2_PORT_OUT];
+ }
- return count;
+ private->num_mux_srcs = srcs;
+ private->num_mux_dsts = dsts;
}
-/* Default routing connects PCM outputs and inputs to Analogue,
- * S/PDIF, then ADAT
+/* Look through the interface descriptors for the Focusrite Control
+ * interface (bInterfaceClass = 255 Vendor Specific Class) and set
+ * bInterfaceNumber, bEndpointAddress, wMaxPacketSize, and bInterval
+ * in private
*/
-static void scarlett2_init_routing(u8 *mux,
- const struct scarlett2_ports *ports)
+static int scarlett2_find_fc_interface(struct usb_device *dev,
+ struct scarlett2_data *private)
{
- int i, input_num, input_count, port_type;
- int output_num, output_count, port_type_connect_num;
+ struct usb_host_config *config = dev->actconfig;
+ int i;
- static const int connect_order[] = {
- SCARLETT2_PORT_TYPE_ANALOGUE,
- SCARLETT2_PORT_TYPE_SPDIF,
- SCARLETT2_PORT_TYPE_ADAT,
- -1
- };
+ for (i = 0; i < config->desc.bNumInterfaces; i++) {
+ struct usb_interface *intf = config->interface[i];
+ struct usb_interface_descriptor *desc =
+ &intf->altsetting[0].desc;
+ struct usb_endpoint_descriptor *epd;
- /* Assign PCM inputs (routing outputs) */
- output_num = scarlett2_get_port_start_num(ports,
- SCARLETT2_PORT_OUT,
- SCARLETT2_PORT_TYPE_PCM);
- output_count = ports[SCARLETT2_PORT_TYPE_PCM].num[SCARLETT2_PORT_OUT];
-
- for (port_type = connect_order[port_type_connect_num = 0];
- port_type >= 0;
- port_type = connect_order[++port_type_connect_num]) {
- input_num = scarlett2_get_port_start_num(
- ports, SCARLETT2_PORT_IN, port_type);
- input_count = ports[port_type].num[SCARLETT2_PORT_IN];
- for (i = 0;
- i < input_count && output_count;
- i++, output_count--)
- mux[output_num++] = input_num++;
- }
+ if (desc->bInterfaceClass != 255)
+ continue;
- /* Assign PCM outputs (routing inputs) */
- input_num = scarlett2_get_port_start_num(ports,
- SCARLETT2_PORT_IN,
- SCARLETT2_PORT_TYPE_PCM);
- input_count = ports[SCARLETT2_PORT_TYPE_PCM].num[SCARLETT2_PORT_IN];
-
- for (port_type = connect_order[port_type_connect_num = 0];
- port_type >= 0;
- port_type = connect_order[++port_type_connect_num]) {
- output_num = scarlett2_get_port_start_num(
- ports, SCARLETT2_PORT_OUT, port_type);
- output_count = ports[port_type].num[SCARLETT2_PORT_OUT];
- for (i = 0;
- i < output_count && input_count;
- i++, input_count--)
- mux[output_num++] = input_num++;
+ epd = get_endpoint(intf->altsetting, 0);
+ private->bInterfaceNumber = desc->bInterfaceNumber;
+ private->bEndpointAddress = epd->bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK;
+ private->wMaxPacketSize = le16_to_cpu(epd->wMaxPacketSize);
+ private->bInterval = epd->bInterval;
+ return 0;
}
+
+ return -EINVAL;
}
-/* Initialise private data, routing, sequence number */
+/* Initialise private data */
static int scarlett2_init_private(struct usb_mixer_interface *mixer,
const struct scarlett2_device_info *info)
{
- struct scarlett2_mixer_data *private =
- kzalloc(sizeof(struct scarlett2_mixer_data), GFP_KERNEL);
+ struct scarlett2_data *private =
+ kzalloc(sizeof(struct scarlett2_data), GFP_KERNEL);
if (!private)
return -ENOMEM;
@@ -1816,68 +3555,102 @@ static int scarlett2_init_private(struct usb_mixer_interface *mixer,
mutex_init(&private->usb_mutex);
mutex_init(&private->data_mutex);
INIT_DELAYED_WORK(&private->work, scarlett2_config_save_work);
- private->info = info;
- private->num_mux_srcs = scarlett2_count_mux_srcs(info->ports);
- private->scarlett2_seq = 0;
- private->mixer = mixer;
+
mixer->private_data = private;
mixer->private_free = scarlett2_private_free;
mixer->private_suspend = scarlett2_private_suspend;
- /* Setup default routing */
- scarlett2_init_routing(private->mux, info->ports);
+ private->info = info;
+ scarlett2_count_mux_io(private);
+ private->scarlett2_seq = 0;
+ private->mixer = mixer;
+
+ return scarlett2_find_fc_interface(mixer->chip->dev, private);
+}
+
+/* Cargo cult proprietary initialisation sequence */
+static int scarlett2_usb_init(struct usb_mixer_interface *mixer)
+{
+ struct usb_device *dev = mixer->chip->dev;
+ struct scarlett2_data *private = mixer->private_data;
+ u8 buf[24];
+ int err;
+
+ if (usb_pipe_type_check(dev, usb_sndctrlpipe(dev, 0)))
+ return -EINVAL;
+
+ /* step 0 */
+ err = scarlett2_usb_rx(dev, private->bInterfaceNumber,
+ SCARLETT2_USB_CMD_INIT, buf, sizeof(buf));
+ if (err < 0)
+ return err;
+
+ /* step 1 */
+ private->scarlett2_seq = 1;
+ err = scarlett2_usb(mixer, SCARLETT2_USB_INIT_1, NULL, 0, NULL, 0);
+ if (err < 0)
+ return err;
- /* Initialise the sequence number used for the proprietary commands */
- return scarlett2_usb(mixer, SCARLETT2_USB_INIT_SEQ, NULL, 0, NULL, 0);
+ /* step 2 */
+ private->scarlett2_seq = 1;
+ return scarlett2_usb(mixer, SCARLETT2_USB_INIT_2, NULL, 0, NULL, 84);
}
-/* Read line-in config and line-out volume settings on start */
+/* Read configuration from the interface on start */
static int scarlett2_read_configs(struct usb_mixer_interface *mixer)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct scarlett2_data *private = mixer->private_data;
const struct scarlett2_device_info *info = private->info;
- const struct scarlett2_ports *ports = info->ports;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
int num_line_out =
- ports[SCARLETT2_PORT_TYPE_ANALOGUE].num[SCARLETT2_PORT_OUT];
- u8 level_switches[SCARLETT2_LEVEL_SWITCH_MAX];
- u8 pad_switches[SCARLETT2_PAD_SWITCH_MAX];
+ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+ int num_mixer_out =
+ port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
struct scarlett2_usb_volume_status volume_status;
int err, i;
- if (info->level_input_count) {
+ if (info->has_msd_mode) {
err = scarlett2_usb_get_config(
- mixer,
- SCARLETT2_CONFIG_LEVEL_SWITCH,
- info->level_input_count,
- level_switches);
+ mixer, SCARLETT2_CONFIG_MSD_SWITCH,
+ 1, &private->msd_switch);
if (err < 0)
return err;
- for (i = 0; i < info->level_input_count; i++)
- private->level_switch[i] = level_switches[i];
- }
- if (info->pad_input_count) {
- err = scarlett2_usb_get_config(
- mixer,
- SCARLETT2_CONFIG_PAD_SWITCH,
- info->pad_input_count,
- pad_switches);
- if (err < 0)
- return err;
- for (i = 0; i < info->pad_input_count; i++)
- private->pad_switch[i] = pad_switches[i];
+ /* no other controls are created if MSD mode is on */
+ if (private->msd_switch)
+ return 0;
}
+ err = scarlett2_update_input_other(mixer);
+ if (err < 0)
+ return err;
+
+ err = scarlett2_update_monitor_other(mixer);
+ if (err < 0)
+ return err;
+
+ /* the rest of the configuration is for devices with a mixer */
+ if (!info->has_mixer)
+ return 0;
+
+ err = scarlett2_update_sync(mixer);
+ if (err < 0)
+ return err;
+
err = scarlett2_usb_get_volume_status(mixer, &volume_status);
if (err < 0)
return err;
+ if (info->line_out_hw_vol)
+ for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
+ private->dim_mute[i] = !!volume_status.dim_mute[i];
+
private->master_vol = clamp(
volume_status.master_vol + SCARLETT2_VOLUME_BIAS,
0, SCARLETT2_VOLUME_BIAS);
for (i = 0; i < num_line_out; i++) {
- int volume;
+ int volume, mute;
private->vol_sw_hw_switch[i] =
info->line_out_hw_vol
@@ -1889,72 +3662,178 @@ static int scarlett2_read_configs(struct usb_mixer_interface *mixer)
volume = clamp(volume + SCARLETT2_VOLUME_BIAS,
0, SCARLETT2_VOLUME_BIAS);
private->vol[i] = volume;
+
+ mute = private->vol_sw_hw_switch[i]
+ ? private->dim_mute[SCARLETT2_BUTTON_MUTE]
+ : volume_status.mute_switch[i];
+ private->mute_switch[i] = mute;
}
- for (i = 0; i < info->button_count; i++)
- private->buttons[i] = !!volume_status.buttons[i];
+ for (i = 0; i < num_mixer_out; i++) {
+ err = scarlett2_usb_get_mix(mixer, i);
+ if (err < 0)
+ return err;
+ }
- return 0;
+ return scarlett2_usb_get_mux(mixer);
}
-/* Notify on volume change */
-static void scarlett2_mixer_interrupt_vol_change(
+/* Notify on sync change */
+static void scarlett2_notify_sync(
struct usb_mixer_interface *mixer)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
- const struct scarlett2_ports *ports = private->info->ports;
+ struct scarlett2_data *private = mixer->private_data;
+
+ private->sync_updated = 1;
+
+ snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->sync_ctl->id);
+}
+
+/* Notify on monitor change */
+static void scarlett2_notify_monitor(
+ struct usb_mixer_interface *mixer)
+{
+ struct snd_card *card = mixer->chip->card;
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
int num_line_out =
- ports[SCARLETT2_PORT_TYPE_ANALOGUE].num[SCARLETT2_PORT_OUT];
+ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
int i;
+ /* if line_out_hw_vol is 0, there are no controls to update */
+ if (!info->line_out_hw_vol)
+ return;
+
private->vol_updated = 1;
snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
&private->master_vol_ctl->id);
- for (i = 0; i < num_line_out; i++) {
- if (!private->vol_sw_hw_switch[i])
- continue;
- snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
- &private->vol_ctls[i]->id);
- }
+ for (i = 0; i < num_line_out; i++)
+ if (private->vol_sw_hw_switch[line_out_remap(private, i)])
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->vol_ctls[i]->id);
}
-/* Notify on button change */
-static void scarlett2_mixer_interrupt_button_change(
+/* Notify on dim/mute change */
+static void scarlett2_notify_dim_mute(
struct usb_mixer_interface *mixer)
{
- struct scarlett2_mixer_data *private = mixer->private_data;
+ struct snd_card *card = mixer->chip->card;
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+ int num_line_out =
+ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
int i;
private->vol_updated = 1;
- for (i = 0; i < private->info->button_count; i++)
- snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
- &private->button_ctls[i]->id);
+ if (!info->line_out_hw_vol)
+ return;
+
+ for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->dim_mute_ctls[i]->id);
+
+ for (i = 0; i < num_line_out; i++)
+ if (private->vol_sw_hw_switch[line_out_remap(private, i)])
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->mute_ctls[i]->id);
+}
+
+/* Notify on "input other" change (level/pad/air) */
+static void scarlett2_notify_input_other(
+ struct usb_mixer_interface *mixer)
+{
+ struct snd_card *card = mixer->chip->card;
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+ int i;
+
+ private->input_other_updated = 1;
+
+ for (i = 0; i < info->level_input_count; i++)
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->level_ctls[i]->id);
+ for (i = 0; i < info->pad_input_count; i++)
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->pad_ctls[i]->id);
+ for (i = 0; i < info->air_input_count; i++)
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->air_ctls[i]->id);
+ for (i = 0; i < info->phantom_count; i++)
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->phantom_ctls[i]->id);
+}
+
+/* Notify on "monitor other" change (direct monitor, speaker
+ * switching, talkback)
+ */
+static void scarlett2_notify_monitor_other(
+ struct usb_mixer_interface *mixer)
+{
+ struct snd_card *card = mixer->chip->card;
+ struct scarlett2_data *private = mixer->private_data;
+ const struct scarlett2_device_info *info = private->info;
+
+ private->monitor_other_updated = 1;
+
+ if (info->direct_monitor) {
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->direct_monitor_ctl->id);
+ return;
+ }
+
+ if (info->has_speaker_switching)
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->speaker_switching_ctl->id);
+
+ if (info->has_talkback)
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->talkback_ctl->id);
+
+ /* if speaker switching was recently enabled or disabled,
+ * invalidate the dim/mute and mux enum controls
+ */
+ if (private->speaker_switching_switched) {
+ int i;
+
+ scarlett2_notify_dim_mute(mixer);
+
+ private->speaker_switching_switched = 0;
+ private->mux_updated = 1;
+
+ for (i = 0; i < private->num_mux_dsts; i++)
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &private->mux_ctls[i]->id);
+ }
}
/* Interrupt callback */
-static void scarlett2_mixer_interrupt(struct urb *urb)
+static void scarlett2_notify(struct urb *urb)
{
struct usb_mixer_interface *mixer = urb->context;
int len = urb->actual_length;
int ustatus = urb->status;
u32 data;
- if (ustatus != 0)
+ if (ustatus != 0 || len != 8)
goto requeue;
- if (len == 8) {
- data = le32_to_cpu(*(__le32 *)urb->transfer_buffer);
- if (data & SCARLETT2_USB_INTERRUPT_VOL_CHANGE)
- scarlett2_mixer_interrupt_vol_change(mixer);
- if (data & SCARLETT2_USB_INTERRUPT_BUTTON_CHANGE)
- scarlett2_mixer_interrupt_button_change(mixer);
- } else {
- usb_audio_err(mixer->chip,
- "scarlett mixer interrupt length %d\n", len);
- }
+ data = le32_to_cpu(*(__le32 *)urb->transfer_buffer);
+ if (data & SCARLETT2_USB_NOTIFY_SYNC)
+ scarlett2_notify_sync(mixer);
+ if (data & SCARLETT2_USB_NOTIFY_MONITOR)
+ scarlett2_notify_monitor(mixer);
+ if (data & SCARLETT2_USB_NOTIFY_DIM_MUTE)
+ scarlett2_notify_dim_mute(mixer);
+ if (data & SCARLETT2_USB_NOTIFY_INPUT_OTHER)
+ scarlett2_notify_input_other(mixer);
+ if (data & SCARLETT2_USB_NOTIFY_MONITOR_OTHER)
+ scarlett2_notify_monitor_other(mixer);
requeue:
if (ustatus != -ENOENT &&
@@ -1965,11 +3844,11 @@ requeue:
}
}
-static int scarlett2_mixer_status_create(struct usb_mixer_interface *mixer)
+static int scarlett2_init_notify(struct usb_mixer_interface *mixer)
{
struct usb_device *dev = mixer->chip->dev;
- unsigned int pipe = usb_rcvintpipe(dev,
- SCARLETT2_USB_INTERRUPT_ENDPOINT);
+ struct scarlett2_data *private = mixer->private_data;
+ unsigned int pipe = usb_rcvintpipe(dev, private->bEndpointAddress);
void *transfer_buffer;
if (mixer->urb) {
@@ -1985,25 +3864,35 @@ static int scarlett2_mixer_status_create(struct usb_mixer_interface *mixer)
if (!mixer->urb)
return -ENOMEM;
- transfer_buffer = kmalloc(SCARLETT2_USB_INTERRUPT_MAX_DATA, GFP_KERNEL);
+ transfer_buffer = kmalloc(private->wMaxPacketSize, GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
usb_fill_int_urb(mixer->urb, dev, pipe,
- transfer_buffer, SCARLETT2_USB_INTERRUPT_MAX_DATA,
- scarlett2_mixer_interrupt, mixer,
- SCARLETT2_USB_INTERRUPT_INTERVAL);
+ transfer_buffer, private->wMaxPacketSize,
+ scarlett2_notify, mixer, private->bInterval);
return usb_submit_urb(mixer->urb, GFP_KERNEL);
}
-static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer,
- const struct scarlett2_device_info *info)
+static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
{
+ const struct scarlett2_device_info **info = scarlett2_devices;
int err;
- /* Initialise private data, routing, sequence number */
- err = scarlett2_init_private(mixer, info);
+ /* Find device in scarlett2_devices */
+ while (*info && (*info)->usb_id != mixer->chip->usb_id)
+ info++;
+ if (!*info)
+ return -EINVAL;
+
+ /* Initialise private data */
+ err = scarlett2_init_private(mixer, *info);
+ if (err < 0)
+ return err;
+
+ /* Send proprietary USB initialisation sequence */
+ err = scarlett2_usb_init(mixer);
if (err < 0)
return err;
@@ -2012,6 +3901,15 @@ static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer,
if (err < 0)
return err;
+ /* Create the MSD control */
+ err = scarlett2_add_msd_ctl(mixer);
+ if (err < 0)
+ return err;
+
+ /* If MSD mode is enabled, don't create any other controls */
+ if (((struct scarlett2_data *)mixer->private_data)->msd_switch)
+ return 0;
+
/* Create the analogue output controls */
err = scarlett2_add_line_out_ctls(mixer);
if (err < 0)
@@ -2037,12 +3935,30 @@ static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer,
if (err < 0)
return err;
- /* Set up the interrupt polling if there are hardware buttons */
- if (info->button_count) {
- err = scarlett2_mixer_status_create(mixer);
- if (err < 0)
- return err;
- }
+ /* Create the sync control */
+ err = scarlett2_add_sync_ctl(mixer);
+ if (err < 0)
+ return err;
+
+ /* Create the direct monitor control */
+ err = scarlett2_add_direct_monitor_ctl(mixer);
+ if (err < 0)
+ return err;
+
+ /* Create the speaker switching control */
+ err = scarlett2_add_speaker_switch_ctl(mixer);
+ if (err < 0)
+ return err;
+
+ /* Create the talkback controls */
+ err = scarlett2_add_talkback_ctls(mixer);
+ if (err < 0)
+ return err;
+
+ /* Set up the interrupt polling */
+ err = scarlett2_init_notify(mixer);
+ if (err < 0)
+ return err;
return 0;
}
@@ -2050,30 +3966,15 @@ static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer,
int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer)
{
struct snd_usb_audio *chip = mixer->chip;
- const struct scarlett2_device_info *info;
int err;
/* only use UAC_VERSION_2 */
if (!mixer->protocol)
return 0;
- switch (chip->usb_id) {
- case USB_ID(0x1235, 0x8203):
- info = &s6i6_gen2_info;
- break;
- case USB_ID(0x1235, 0x8204):
- info = &s18i8_gen2_info;
- break;
- case USB_ID(0x1235, 0x8201):
- info = &s18i20_gen2_info;
- break;
- default: /* device not (yet) supported */
- return -EINVAL;
- }
-
if (!(chip->setup & SCARLETT2_ENABLE)) {
usb_audio_info(chip,
- "Focusrite Scarlett Gen 2 Mixer Driver disabled; "
+ "Focusrite Scarlett Gen 2/3 Mixer Driver disabled; "
"use options snd_usb_audio vid=0x%04x pid=0x%04x "
"device_setup=1 to enable and report any issues "
"to g@b4.vu",
@@ -2083,10 +3984,10 @@ int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer)
}
usb_audio_info(chip,
- "Focusrite Scarlett Gen 2 Mixer Driver enabled pid=0x%04x",
+ "Focusrite Scarlett Gen 2/3 Mixer Driver enabled pid=0x%04x",
USB_ID_PRODUCT(chip->usb_id));
- err = snd_scarlett_gen2_controls_create(mixer, info);
+ err = snd_scarlett_gen2_controls_create(mixer);
if (err < 0)
usb_audio_err(mixer->chip,
"Error initialising Scarlett Mixer Driver: %d",
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index e5311b6bb3f6..4e5031a68064 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -29,15 +29,21 @@
#define SUBSTREAM_FLAG_SYNC_EP_STARTED 1
/* return the estimated delay based on USB frame counters */
-snd_pcm_uframes_t snd_usb_pcm_delay(struct snd_usb_substream *subs,
- unsigned int rate)
+static snd_pcm_uframes_t snd_usb_pcm_delay(struct snd_usb_substream *subs,
+ struct snd_pcm_runtime *runtime)
{
- int current_frame_number;
- int frame_diff;
+ unsigned int current_frame_number;
+ unsigned int frame_diff;
int est_delay;
+ int queued;
- if (!subs->last_delay)
- return 0; /* short path */
+ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ queued = bytes_to_frames(runtime, subs->inflight_bytes);
+ if (!queued)
+ return 0;
+ } else if (!subs->running) {
+ return 0;
+ }
current_frame_number = usb_get_current_frame_number(subs->dev);
/*
@@ -49,14 +55,14 @@ snd_pcm_uframes_t snd_usb_pcm_delay(struct snd_usb_substream *subs,
/* Approximation based on number of samples per USB frame (ms),
some truncation for 44.1 but the estimate is good enough */
- est_delay = frame_diff * rate / 1000;
- if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
- est_delay = subs->last_delay - est_delay;
- else
- est_delay = subs->last_delay + est_delay;
+ est_delay = frame_diff * runtime->rate / 1000;
+
+ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ est_delay = queued - est_delay;
+ if (est_delay < 0)
+ est_delay = 0;
+ }
- if (est_delay < 0)
- est_delay = 0;
return est_delay;
}
@@ -65,17 +71,17 @@ snd_pcm_uframes_t snd_usb_pcm_delay(struct snd_usb_substream *subs,
*/
static snd_pcm_uframes_t snd_usb_pcm_pointer(struct snd_pcm_substream *substream)
{
- struct snd_usb_substream *subs = substream->runtime->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_usb_substream *subs = runtime->private_data;
unsigned int hwptr_done;
if (atomic_read(&subs->stream->chip->shutdown))
return SNDRV_PCM_POS_XRUN;
spin_lock(&subs->lock);
hwptr_done = subs->hwptr_done;
- substream->runtime->delay = snd_usb_pcm_delay(subs,
- substream->runtime->rate);
+ runtime->delay = snd_usb_pcm_delay(subs, runtime);
spin_unlock(&subs->lock);
- return hwptr_done / (substream->runtime->frame_bits >> 3);
+ return bytes_to_frames(runtime, hwptr_done);
}
/*
@@ -600,17 +606,14 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
goto unlock;
/* reset the pointer */
+ subs->buffer_bytes = frames_to_bytes(runtime, runtime->buffer_size);
+ subs->inflight_bytes = 0;
subs->hwptr_done = 0;
subs->transfer_done = 0;
- subs->last_delay = 0;
subs->last_frame_number = 0;
+ subs->period_elapsed_pending = 0;
runtime->delay = 0;
- /* for playback, submit the URBs now; otherwise, the first hwptr_done
- * updates for all URBs would happen at the same time when starting */
- if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
- ret = start_endpoints(subs);
-
unlock:
snd_usb_unlock_shutdown(chip);
return ret;
@@ -1147,28 +1150,23 @@ static void retire_capture_urb(struct snd_usb_substream *subs,
spin_lock_irqsave(&subs->lock, flags);
oldptr = subs->hwptr_done;
subs->hwptr_done += bytes;
- if (subs->hwptr_done >= runtime->buffer_size * stride)
- subs->hwptr_done -= runtime->buffer_size * stride;
+ if (subs->hwptr_done >= subs->buffer_bytes)
+ subs->hwptr_done -= subs->buffer_bytes;
frames = (bytes + (oldptr % stride)) / stride;
subs->transfer_done += frames;
if (subs->transfer_done >= runtime->period_size) {
subs->transfer_done -= runtime->period_size;
period_elapsed = 1;
}
- /* capture delay is by construction limited to one URB,
- * reset delays here
- */
- runtime->delay = subs->last_delay = 0;
/* realign last_frame_number */
subs->last_frame_number = current_frame_number;
- subs->last_frame_number &= 0xFF; /* keep 8 LSBs */
spin_unlock_irqrestore(&subs->lock, flags);
/* copy a data chunk */
- if (oldptr + bytes > runtime->buffer_size * stride) {
- unsigned int bytes1 =
- runtime->buffer_size * stride - oldptr;
+ if (oldptr + bytes > subs->buffer_bytes) {
+ unsigned int bytes1 = subs->buffer_bytes - oldptr;
+
memcpy(runtime->dma_area + oldptr, cp, bytes1);
memcpy(runtime->dma_area, cp + bytes1, bytes - bytes1);
} else {
@@ -1180,17 +1178,29 @@ static void retire_capture_urb(struct snd_usb_substream *subs,
snd_pcm_period_elapsed(subs->pcm_substream);
}
+static void urb_ctx_queue_advance(struct snd_usb_substream *subs,
+ struct urb *urb, unsigned int bytes)
+{
+ struct snd_urb_ctx *ctx = urb->context;
+
+ ctx->queued += bytes;
+ subs->inflight_bytes += bytes;
+ subs->hwptr_done += bytes;
+ if (subs->hwptr_done >= subs->buffer_bytes)
+ subs->hwptr_done -= subs->buffer_bytes;
+}
+
static inline void fill_playback_urb_dsd_dop(struct snd_usb_substream *subs,
struct urb *urb, unsigned int bytes)
{
struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
- unsigned int stride = runtime->frame_bits >> 3;
unsigned int dst_idx = 0;
unsigned int src_idx = subs->hwptr_done;
- unsigned int wrap = runtime->buffer_size * stride;
+ unsigned int wrap = subs->buffer_bytes;
u8 *dst = urb->transfer_buffer;
u8 *src = runtime->dma_area;
u8 marker[] = { 0x05, 0xfa };
+ unsigned int queued = 0;
/*
* The DSP DOP format defines a way to transport DSD samples over
@@ -1229,12 +1239,29 @@ static inline void fill_playback_urb_dsd_dop(struct snd_usb_substream *subs,
dst[dst_idx++] = bitrev8(src[idx]);
else
dst[dst_idx++] = src[idx];
-
- subs->hwptr_done++;
+ queued++;
}
}
- if (subs->hwptr_done >= runtime->buffer_size * stride)
- subs->hwptr_done -= runtime->buffer_size * stride;
+
+ urb_ctx_queue_advance(subs, urb, queued);
+}
+
+/* copy bit-reversed bytes onto transfer buffer */
+static void fill_playback_urb_dsd_bitrev(struct snd_usb_substream *subs,
+ struct urb *urb, unsigned int bytes)
+{
+ struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
+ const u8 *src = runtime->dma_area;
+ u8 *buf = urb->transfer_buffer;
+ int i, ofs = subs->hwptr_done;
+
+ for (i = 0; i < bytes; i++) {
+ *buf++ = bitrev8(src[ofs]);
+ if (++ofs >= subs->buffer_bytes)
+ ofs = 0;
+ }
+
+ urb_ctx_queue_advance(subs, urb, bytes);
}
static void copy_to_urb(struct snd_usb_substream *subs, struct urb *urb,
@@ -1242,10 +1269,10 @@ static void copy_to_urb(struct snd_usb_substream *subs, struct urb *urb,
{
struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
- if (subs->hwptr_done + bytes > runtime->buffer_size * stride) {
+ if (subs->hwptr_done + bytes > subs->buffer_bytes) {
/* err, the transferred area goes over buffer boundary. */
- unsigned int bytes1 =
- runtime->buffer_size * stride - subs->hwptr_done;
+ unsigned int bytes1 = subs->buffer_bytes - subs->hwptr_done;
+
memcpy(urb->transfer_buffer + offset,
runtime->dma_area + subs->hwptr_done, bytes1);
memcpy(urb->transfer_buffer + offset + bytes1,
@@ -1254,9 +1281,8 @@ static void copy_to_urb(struct snd_usb_substream *subs, struct urb *urb,
memcpy(urb->transfer_buffer + offset,
runtime->dma_area + subs->hwptr_done, bytes);
}
- subs->hwptr_done += bytes;
- if (subs->hwptr_done >= runtime->buffer_size * stride)
- subs->hwptr_done -= runtime->buffer_size * stride;
+
+ urb_ctx_queue_advance(subs, urb, bytes);
}
static unsigned int copy_to_urb_quirk(struct snd_usb_substream *subs,
@@ -1295,17 +1321,18 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
int i, stride, period_elapsed = 0;
unsigned long flags;
- stride = runtime->frame_bits >> 3;
+ stride = ep->stride;
frames = 0;
+ ctx->queued = 0;
urb->number_of_packets = 0;
spin_lock_irqsave(&subs->lock, flags);
subs->frame_limit += ep->max_urb_frames;
for (i = 0; i < ctx->packets; i++) {
counts = snd_usb_endpoint_next_packet_size(ep, ctx, i);
/* set up descriptor */
- urb->iso_frame_desc[i].offset = frames * ep->stride;
- urb->iso_frame_desc[i].length = counts * ep->stride;
+ urb->iso_frame_desc[i].offset = frames * stride;
+ urb->iso_frame_desc[i].length = counts * stride;
frames += counts;
urb->number_of_packets++;
subs->transfer_done += counts;
@@ -1320,14 +1347,14 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
frames -= subs->transfer_done;
counts -= subs->transfer_done;
urb->iso_frame_desc[i].length =
- counts * ep->stride;
+ counts * stride;
subs->transfer_done = 0;
}
i++;
if (i < ctx->packets) {
/* add a transfer delimiter */
urb->iso_frame_desc[i].offset =
- frames * ep->stride;
+ frames * stride;
urb->iso_frame_desc[i].length = 0;
urb->number_of_packets++;
}
@@ -1340,24 +1367,14 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
!snd_usb_endpoint_implicit_feedback_sink(ep))
break;
}
- bytes = frames * ep->stride;
+ bytes = frames * stride;
if (unlikely(ep->cur_format == SNDRV_PCM_FORMAT_DSD_U16_LE &&
subs->cur_audiofmt->dsd_dop)) {
fill_playback_urb_dsd_dop(subs, urb, bytes);
} else if (unlikely(ep->cur_format == SNDRV_PCM_FORMAT_DSD_U8 &&
subs->cur_audiofmt->dsd_bitrev)) {
- /* bit-reverse the bytes */
- u8 *buf = urb->transfer_buffer;
- for (i = 0; i < bytes; i++) {
- int idx = (subs->hwptr_done + i)
- % (runtime->buffer_size * stride);
- buf[i] = bitrev8(runtime->dma_area[idx]);
- }
-
- subs->hwptr_done += bytes;
- if (subs->hwptr_done >= runtime->buffer_size * stride)
- subs->hwptr_done -= runtime->buffer_size * stride;
+ fill_playback_urb_dsd_bitrev(subs, urb, bytes);
} else {
/* usual PCM */
if (!subs->tx_length_quirk)
@@ -1367,14 +1384,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
/* bytes is now amount of outgoing data */
}
- /* update delay with exact number of samples queued */
- runtime->delay = subs->last_delay;
- runtime->delay += frames;
- subs->last_delay = runtime->delay;
-
- /* realign last_frame_number */
subs->last_frame_number = usb_get_current_frame_number(subs->dev);
- subs->last_frame_number &= 0xFF; /* keep 8 LSBs */
if (subs->trigger_tstamp_pending_update) {
/* this is the first actual URB submitted,
@@ -1384,6 +1394,10 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
subs->trigger_tstamp_pending_update = false;
}
+ if (period_elapsed && !subs->running) {
+ subs->period_elapsed_pending = 1;
+ period_elapsed = 0;
+ }
spin_unlock_irqrestore(&subs->lock, flags);
urb->transfer_buffer_length = bytes;
if (period_elapsed)
@@ -1398,55 +1412,32 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
struct urb *urb)
{
unsigned long flags;
- struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
- struct snd_usb_endpoint *ep = subs->data_endpoint;
- int processed = urb->transfer_buffer_length / ep->stride;
- int est_delay;
-
- /* ignore the delay accounting when processed=0 is given, i.e.
- * silent payloads are processed before handling the actual data
- */
- if (!processed)
- return;
+ struct snd_urb_ctx *ctx = urb->context;
+ bool period_elapsed = false;
spin_lock_irqsave(&subs->lock, flags);
- if (!subs->last_delay)
- goto out; /* short path */
-
- est_delay = snd_usb_pcm_delay(subs, runtime->rate);
- /* update delay with exact number of samples played */
- if (processed > subs->last_delay)
- subs->last_delay = 0;
- else
- subs->last_delay -= processed;
- runtime->delay = subs->last_delay;
-
- /*
- * Report when delay estimate is off by more than 2ms.
- * The error should be lower than 2ms since the estimate relies
- * on two reads of a counter updated every ms.
- */
- if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
- dev_dbg_ratelimited(&subs->dev->dev,
- "delay: estimated %d, actual %d\n",
- est_delay, subs->last_delay);
-
- if (!subs->running) {
- /* update last_frame_number for delay counting here since
- * prepare_playback_urb won't be called during pause
- */
- subs->last_frame_number =
- usb_get_current_frame_number(subs->dev) & 0xff;
+ if (ctx->queued) {
+ if (subs->inflight_bytes >= ctx->queued)
+ subs->inflight_bytes -= ctx->queued;
+ else
+ subs->inflight_bytes = 0;
}
- out:
+ subs->last_frame_number = usb_get_current_frame_number(subs->dev);
+ if (subs->running) {
+ period_elapsed = subs->period_elapsed_pending;
+ subs->period_elapsed_pending = 0;
+ }
spin_unlock_irqrestore(&subs->lock, flags);
+ if (period_elapsed)
+ snd_pcm_period_elapsed(subs->pcm_substream);
}
static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct snd_usb_substream *subs = substream->runtime->private_data;
+ int err;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -1457,6 +1448,14 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
prepare_playback_urb,
retire_playback_urb,
subs);
+ if (cmd == SNDRV_PCM_TRIGGER_START) {
+ err = start_endpoints(subs);
+ if (err < 0) {
+ snd_usb_endpoint_set_callback(subs->data_endpoint,
+ NULL, NULL, NULL);
+ return err;
+ }
+ }
subs->running = 1;
dev_dbg(&subs->dev->dev, "%d:%d Start Playback PCM\n",
subs->cur_audiofmt->iface,
@@ -1504,6 +1503,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
snd_usb_endpoint_set_callback(subs->data_endpoint,
NULL, retire_capture_urb,
subs);
+ subs->last_frame_number = usb_get_current_frame_number(subs->dev);
subs->running = 1;
dev_dbg(&subs->dev->dev, "%d:%d Start Capture PCM\n",
subs->cur_audiofmt->iface,
diff --git a/sound/usb/pcm.h b/sound/usb/pcm.h
index 06c586467d3f..493a4e34d78d 100644
--- a/sound/usb/pcm.h
+++ b/sound/usb/pcm.h
@@ -2,9 +2,6 @@
#ifndef __USBAUDIO_PCM_H
#define __USBAUDIO_PCM_H
-snd_pcm_uframes_t snd_usb_pcm_delay(struct snd_usb_substream *subs,
- unsigned int rate);
-
void snd_usb_set_pcm_ops(struct snd_pcm *pcm, int stream);
int snd_usb_pcm_suspend(struct snd_usb_stream *as);
int snd_usb_pcm_resume(struct snd_usb_stream *as);
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index bdba37d0faab..19bb499c17da 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3044,6 +3044,76 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
}
},
+/* Denon DN-X1600 */
+{
+ USB_AUDIO_DEVICE(0x154e, 0x500e),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Denon",
+ .product_name = "DN-X1600",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]){
+ {
+ .ifnum = 0,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 1,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0x0,
+ .endpoint = 0x01,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_ADAPTIVE,
+ .maxpacksize = 0x138,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) {
+ 48000
+ }
+ }
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 2,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0x0,
+ .endpoint = 0x85,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_ADAPTIVE,
+ .maxpacksize = 0x138,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) {
+ 48000
+ }
+ }
+ },
+ {
+ .ifnum = 4,
+ .type = QUIRK_MIDI_STANDARD_INTERFACE,
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+
/* Microsoft XboxLive Headset/Xbox Communicator */
{
USB_DEVICE(0x045e, 0x0283),
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 8b8bee3c3dd6..326d1b0ea5e6 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1897,6 +1897,10 @@ static const struct registration_quirk registration_quirks[] = {
REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */
REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */
REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */
+ REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2), /* JBL Quantum 600 */
+ REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2), /* JBL Quantum 400 */
+ REG_QUIRK_ENTRY(0x0ecb, 0x203c, 2), /* JBL Quantum 600 */
+ REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2), /* JBL Quantum 800 */
{ 0 } /* terminator */
};
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index 6e1bfe894dd5..e558931cce16 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -49,7 +49,7 @@ static int us122l_create_usbmidi(struct snd_card *card)
static const struct snd_usb_audio_quirk quirk = {
.vendor_name = "US122L",
.product_name = NAME_ALLCAPS,
- .ifnum = 1,
+ .ifnum = 1,
.type = QUIRK_MIDI_US122L,
.data = &quirk_data
};
@@ -71,7 +71,7 @@ static int us144_create_usbmidi(struct snd_card *card)
static const struct snd_usb_audio_quirk quirk = {
.vendor_name = "US144",
.product_name = NAME_ALLCAPS,
- .ifnum = 0,
+ .ifnum = 0,
.type = QUIRK_MIDI_US122L,
.data = &quirk_data
};
@@ -95,6 +95,7 @@ static void pt_info_set(struct usb_device *dev, u8 v)
static void usb_stream_hwdep_vm_open(struct vm_area_struct *area)
{
struct us122l *us122l = area->vm_private_data;
+
atomic_inc(&us122l->mmap_count);
snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count));
}
@@ -113,9 +114,9 @@ static vm_fault_t usb_stream_hwdep_vm_fault(struct vm_fault *vmf)
goto unlock;
offset = vmf->pgoff << PAGE_SHIFT;
- if (offset < PAGE_ALIGN(s->read_size))
+ if (offset < PAGE_ALIGN(s->read_size)) {
vaddr = (char *)s + offset;
- else {
+ } else {
offset -= PAGE_ALIGN(s->read_size);
if (offset >= PAGE_ALIGN(s->write_size))
goto unlock;
@@ -138,6 +139,7 @@ unlock:
static void usb_stream_hwdep_vm_close(struct vm_area_struct *area)
{
struct us122l *us122l = area->vm_private_data;
+
atomic_dec(&us122l->mmap_count);
snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count));
}
@@ -148,11 +150,11 @@ static const struct vm_operations_struct usb_stream_hwdep_vm_ops = {
.close = usb_stream_hwdep_vm_close,
};
-
static int usb_stream_hwdep_open(struct snd_hwdep *hw, struct file *file)
{
struct us122l *us122l = hw->private_data;
struct usb_interface *iface;
+
snd_printdd(KERN_DEBUG "%p %p\n", hw, file);
if (hw->used >= 2)
return -EBUSY;
@@ -173,6 +175,7 @@ static int usb_stream_hwdep_release(struct snd_hwdep *hw, struct file *file)
{
struct us122l *us122l = hw->private_data;
struct usb_interface *iface;
+
snd_printdd(KERN_DEBUG "%p %p\n", hw, file);
if (us122l->is_us144) {
@@ -235,7 +238,7 @@ static __poll_t usb_stream_hwdep_poll(struct snd_hwdep *hw,
struct file *file, poll_table *wait)
{
struct us122l *us122l = hw->private_data;
- unsigned *polled;
+ unsigned int *polled;
__poll_t mask;
poll_wait(file, &us122l->sk.sleep, wait);
@@ -243,6 +246,7 @@ static __poll_t usb_stream_hwdep_poll(struct snd_hwdep *hw,
mask = EPOLLIN | EPOLLOUT | EPOLLWRNORM | EPOLLERR;
if (mutex_trylock(&us122l->mutex)) {
struct usb_stream *s = us122l->sk.s;
+
if (s && s->state == usb_stream_ready) {
if (us122l->first == file)
polled = &s->periods_polled;
@@ -251,8 +255,9 @@ static __poll_t usb_stream_hwdep_poll(struct snd_hwdep *hw,
if (*polled != s->periods_done) {
*polled = s->periods_done;
mask = EPOLLIN | EPOLLOUT | EPOLLWRNORM;
- } else
+ } else {
mask = 0;
+ }
}
mutex_unlock(&us122l->mutex);
}
@@ -262,6 +267,7 @@ static __poll_t usb_stream_hwdep_poll(struct snd_hwdep *hw,
static void us122l_stop(struct us122l *us122l)
{
struct list_head *p;
+
list_for_each(p, &us122l->midi_list)
snd_usbmidi_input_stop(p);
@@ -289,11 +295,11 @@ static int us122l_set_sample_rate(struct usb_device *dev, int rate)
}
static bool us122l_start(struct us122l *us122l,
- unsigned rate, unsigned period_frames)
+ unsigned int rate, unsigned int period_frames)
{
struct list_head *p;
int err;
- unsigned use_packsize = 0;
+ unsigned int use_packsize = 0;
bool success = false;
if (us122l->dev->speed == USB_SPEED_HIGH) {
@@ -320,13 +326,13 @@ static bool us122l_start(struct us122l *us122l,
err = us122l_set_sample_rate(us122l->dev, rate);
if (err < 0) {
us122l_stop(us122l);
- snd_printk(KERN_ERR "us122l_set_sample_rate error \n");
+ snd_printk(KERN_ERR "us122l_set_sample_rate error\n");
goto out;
}
err = usb_stream_start(&us122l->sk);
if (err < 0) {
us122l_stop(us122l);
- snd_printk(KERN_ERR "us122l_start error %i \n", err);
+ snd_printk(KERN_ERR "%s error %i\n", __func__, err);
goto out;
}
list_for_each(p, &us122l->midi_list)
@@ -337,12 +343,12 @@ out:
}
static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
- unsigned cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
struct usb_stream_config cfg;
struct us122l *us122l = hw->private_data;
struct usb_stream *s;
- unsigned min_period_frames;
+ unsigned int min_period_frames;
int err = 0;
bool high_speed;
@@ -379,13 +385,13 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
if (cfg.period_frames < min_period_frames)
return -EINVAL;
- snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
+ snd_power_wait(hw->card);
mutex_lock(&us122l->mutex);
s = us122l->sk.s;
- if (!us122l->master)
+ if (!us122l->master) {
us122l->master = file;
- else if (us122l->master != file) {
+ } else if (us122l->master != file) {
if (!s || memcmp(&cfg, &s->cfg, sizeof(cfg))) {
err = -EIO;
goto unlock;
@@ -431,7 +437,6 @@ static int usb_stream_hwdep_new(struct snd_card *card)
return 0;
}
-
static bool us122l_create_card(struct snd_card *card)
{
int err;
@@ -440,13 +445,13 @@ static bool us122l_create_card(struct snd_card *card)
if (us122l->is_us144) {
err = usb_set_interface(us122l->dev, 0, 1);
if (err) {
- snd_printk(KERN_ERR "usb_set_interface error \n");
+ snd_printk(KERN_ERR "usb_set_interface error\n");
return false;
}
}
err = usb_set_interface(us122l->dev, 1, 1);
if (err) {
- snd_printk(KERN_ERR "usb_set_interface error \n");
+ snd_printk(KERN_ERR "usb_set_interface error\n");
return false;
}
@@ -461,13 +466,14 @@ static bool us122l_create_card(struct snd_card *card)
else
err = us122l_create_usbmidi(card);
if (err < 0) {
- snd_printk(KERN_ERR "us122l_create_usbmidi error %i \n", err);
+ snd_printk(KERN_ERR "us122l_create_usbmidi error %i\n", err);
goto stop;
}
err = usb_stream_hwdep_new(card);
if (err < 0) {
-/* release the midi resources */
+ /* release the midi resources */
struct list_head *p;
+
list_for_each(p, &us122l->midi_list)
snd_usbmidi_disconnect(p);
@@ -484,7 +490,8 @@ static void snd_us122l_free(struct snd_card *card)
{
struct us122l *us122l = US122L(card);
int index = us122l->card_index;
- if (index >= 0 && index < SNDRV_CARDS)
+
+ if (index >= 0 && index < SNDRV_CARDS)
snd_us122l_card_used[index] = 0;
}
@@ -565,7 +572,7 @@ static int snd_us122l_probe(struct usb_interface *intf,
if (id->driver_info & US122L_FLAG_US144 &&
device->speed == USB_SPEED_HIGH) {
- snd_printk(KERN_ERR "disable ehci-hcd to run US-144 \n");
+ snd_printk(KERN_ERR "disable ehci-hcd to run US-144\n");
return -ENODEV;
}
@@ -601,7 +608,7 @@ static void snd_us122l_disconnect(struct usb_interface *intf)
us122l_stop(us122l);
mutex_unlock(&us122l->mutex);
-/* release the midi resources */
+ /* release the midi resources */
list_for_each(p, &us122l->midi_list) {
snd_usbmidi_disconnect(p);
}
@@ -661,13 +668,13 @@ static int snd_us122l_resume(struct usb_interface *intf)
if (us122l->is_us144) {
err = usb_set_interface(us122l->dev, 0, 1);
if (err) {
- snd_printk(KERN_ERR "usb_set_interface error \n");
+ snd_printk(KERN_ERR "usb_set_interface error\n");
goto unlock;
}
}
err = usb_set_interface(us122l->dev, 1, 1);
if (err) {
- snd_printk(KERN_ERR "usb_set_interface error \n");
+ snd_printk(KERN_ERR "usb_set_interface error\n");
goto unlock;
}
@@ -677,7 +684,7 @@ static int snd_us122l_resume(struct usb_interface *intf)
err = us122l_set_sample_rate(us122l->dev,
us122l->sk.s->cfg.sample_rate);
if (err < 0) {
- snd_printk(KERN_ERR "us122l_set_sample_rate error \n");
+ snd_printk(KERN_ERR "us122l_set_sample_rate error\n");
goto unlock;
}
err = usb_stream_start(&us122l->sk);
@@ -717,8 +724,8 @@ static const struct usb_device_id snd_us122l_usb_id_table[] = {
},
{ /* terminator */ }
};
-
MODULE_DEVICE_TABLE(usb, snd_us122l_usb_id_table);
+
static struct usb_driver snd_us122l_usb_driver = {
.name = "snd-usb-us122l",
.probe = snd_us122l_probe,
diff --git a/sound/usb/usx2y/us122l.h b/sound/usb/usx2y/us122l.h
index 34bea99d343c..c32ae5e981e9 100644
--- a/sound/usb/usx2y/us122l.h
+++ b/sound/usb/usx2y/us122l.h
@@ -11,7 +11,7 @@ struct us122l {
struct mutex mutex;
struct file *first;
- unsigned second_periods_polled;
+ unsigned int second_periods_polled;
struct file *master;
struct file *slave;
struct list_head midi_list;
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index 22412cd69e98..c29da0341bc5 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -21,15 +21,15 @@
static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf)
{
unsigned long offset;
- struct page * page;
+ struct page *page;
void *vaddr;
snd_printdd("ENTER, start %lXh, pgoff %ld\n",
vmf->vma->vm_start,
vmf->pgoff);
-
+
offset = vmf->pgoff << PAGE_SHIFT;
- vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset;
+ vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset;
page = virt_to_page(vaddr);
get_page(page);
vmf->page = page;
@@ -44,30 +44,22 @@ static const struct vm_operations_struct us428ctls_vm_ops = {
.fault = snd_us428ctls_vm_fault,
};
-static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct vm_area_struct *area)
+static int snd_us428ctls_mmap(struct snd_hwdep *hw, struct file *filp, struct vm_area_struct *area)
{
unsigned long size = (unsigned long)(area->vm_end - area->vm_start);
- struct usX2Ydev *us428 = hw->private_data;
+ struct usx2ydev *us428 = hw->private_data;
// FIXME this hwdep interface is used twice: fpga download and mmap for controlling Lights etc. Maybe better using 2 hwdep devs?
// so as long as the device isn't fully initialised yet we return -EBUSY here.
- if (!(us428->chip_status & USX2Y_STAT_CHIP_INIT))
+ if (!(us428->chip_status & USX2Y_STAT_CHIP_INIT))
return -EBUSY;
- /* if userspace tries to mmap beyond end of our buffer, fail */
- if (size > PAGE_ALIGN(sizeof(struct us428ctls_sharedmem))) {
- snd_printd( "%lu > %lu\n", size, (unsigned long)sizeof(struct us428ctls_sharedmem));
- return -EINVAL;
+ /* if userspace tries to mmap beyond end of our buffer, fail */
+ if (size > US428_SHAREDMEM_PAGES) {
+ snd_printd("%lu > %lu\n", size, (unsigned long)US428_SHAREDMEM_PAGES);
+ return -EINVAL;
}
- if (!us428->us428ctls_sharedmem) {
- init_waitqueue_head(&us428->us428ctls_wait_queue_head);
- us428->us428ctls_sharedmem = alloc_pages_exact(sizeof(struct us428ctls_sharedmem), GFP_KERNEL);
- if (!us428->us428ctls_sharedmem)
- return -ENOMEM;
- memset(us428->us428ctls_sharedmem, -1, sizeof(struct us428ctls_sharedmem));
- us428->us428ctls_sharedmem->CtlSnapShotLast = -2;
- }
area->vm_ops = &us428ctls_vm_ops;
area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
area->vm_private_data = hw->private_data;
@@ -77,21 +69,22 @@ static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct v
static __poll_t snd_us428ctls_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait)
{
__poll_t mask = 0;
- struct usX2Ydev *us428 = hw->private_data;
+ struct usx2ydev *us428 = hw->private_data;
struct us428ctls_sharedmem *shm = us428->us428ctls_sharedmem;
+
if (us428->chip_status & USX2Y_STAT_CHIP_HUP)
return EPOLLHUP;
poll_wait(file, &us428->us428ctls_wait_queue_head, wait);
- if (shm != NULL && shm->CtlSnapShotLast != shm->CtlSnapShotRed)
+ if (shm && shm->ctl_snapshot_last != shm->ctl_snapshot_red)
mask |= EPOLLIN;
return mask;
}
-static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw,
+static int snd_usx2y_hwdep_dsp_status(struct snd_hwdep *hw,
struct snd_hwdep_dsp_status *info)
{
static const char * const type_ids[USX2Y_TYPE_NUMS] = {
@@ -99,7 +92,7 @@ static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw,
[USX2Y_TYPE_224] = "us224",
[USX2Y_TYPE_428] = "us428",
};
- struct usX2Ydev *us428 = hw->private_data;
+ struct usx2ydev *us428 = hw->private_data;
int id = -1;
switch (le16_to_cpu(us428->dev->descriptor.idProduct)) {
@@ -113,7 +106,7 @@ static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw,
id = USX2Y_TYPE_428;
break;
}
- if (0 > id)
+ if (id < 0)
return -ENODEV;
strcpy(info->id, type_ids[id]);
info->num_dsps = 2; // 0: Prepad Data, 1: FPGA Code
@@ -123,8 +116,7 @@ static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw,
return 0;
}
-
-static int usX2Y_create_usbmidi(struct snd_card *card)
+static int usx2y_create_usbmidi(struct snd_card *card)
{
static const struct snd_usb_midi_endpoint_info quirk_data_1 = {
.out_ep = 0x06,
@@ -135,8 +127,8 @@ static int usX2Y_create_usbmidi(struct snd_card *card)
static const struct snd_usb_audio_quirk quirk_1 = {
.vendor_name = "TASCAM",
.product_name = NAME_ALLCAPS,
- .ifnum = 0,
- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .ifnum = 0,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
.data = &quirk_data_1
};
static const struct snd_usb_midi_endpoint_info quirk_data_2 = {
@@ -148,49 +140,50 @@ static int usX2Y_create_usbmidi(struct snd_card *card)
static const struct snd_usb_audio_quirk quirk_2 = {
.vendor_name = "TASCAM",
.product_name = "US428",
- .ifnum = 0,
- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .ifnum = 0,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
.data = &quirk_data_2
};
- struct usb_device *dev = usX2Y(card)->dev;
+ struct usb_device *dev = usx2y(card)->dev;
struct usb_interface *iface = usb_ifnum_to_if(dev, 0);
const struct snd_usb_audio_quirk *quirk =
le16_to_cpu(dev->descriptor.idProduct) == USB_ID_US428 ?
&quirk_2 : &quirk_1;
- snd_printdd("usX2Y_create_usbmidi \n");
- return snd_usbmidi_create(card, iface, &usX2Y(card)->midi_list, quirk);
+ snd_printdd("%s\n", __func__);
+ return snd_usbmidi_create(card, iface, &usx2y(card)->midi_list, quirk);
}
-static int usX2Y_create_alsa_devices(struct snd_card *card)
+static int usx2y_create_alsa_devices(struct snd_card *card)
{
int err;
- do {
- if ((err = usX2Y_create_usbmidi(card)) < 0) {
- snd_printk(KERN_ERR "usX2Y_create_alsa_devices: usX2Y_create_usbmidi error %i \n", err);
- break;
- }
- if ((err = usX2Y_audio_create(card)) < 0)
- break;
- if ((err = usX2Y_hwdep_pcm_new(card)) < 0)
- break;
- if ((err = snd_card_register(card)) < 0)
- break;
- } while (0);
-
- return err;
-}
+ err = usx2y_create_usbmidi(card);
+ if (err < 0) {
+ snd_printk(KERN_ERR "%s: usx2y_create_usbmidi error %i\n", __func__, err);
+ return err;
+ }
+ err = usx2y_audio_create(card);
+ if (err < 0)
+ return err;
+ err = usx2y_hwdep_pcm_new(card);
+ if (err < 0)
+ return err;
+ err = snd_card_register(card);
+ if (err < 0)
+ return err;
+ return 0;
+}
-static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw,
+static int snd_usx2y_hwdep_dsp_load(struct snd_hwdep *hw,
struct snd_hwdep_dsp_image *dsp)
{
- struct usX2Ydev *priv = hw->private_data;
- struct usb_device* dev = priv->dev;
+ struct usx2ydev *priv = hw->private_data;
+ struct usb_device *dev = priv->dev;
int lret, err;
char *buf;
- snd_printdd( "dsp_load %s\n", dsp->name);
+ snd_printdd("dsp_load %s\n", dsp->name);
buf = memdup_user(dsp->image, dsp->length);
if (IS_ERR(buf))
@@ -198,7 +191,7 @@ static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw,
err = usb_set_interface(dev, 0, 1);
if (err)
- snd_printk(KERN_ERR "usb_set_interface error \n");
+ snd_printk(KERN_ERR "usb_set_interface error\n");
else
err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000);
kfree(buf);
@@ -206,45 +199,51 @@ static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw,
return err;
if (dsp->index == 1) {
msleep(250); // give the device some time
- err = usX2Y_AsyncSeq04_init(priv);
+ err = usx2y_async_seq04_init(priv);
if (err) {
- snd_printk(KERN_ERR "usX2Y_AsyncSeq04_init error \n");
+ snd_printk(KERN_ERR "usx2y_async_seq04_init error\n");
return err;
}
- err = usX2Y_In04_init(priv);
+ err = usx2y_in04_init(priv);
if (err) {
- snd_printk(KERN_ERR "usX2Y_In04_init error \n");
+ snd_printk(KERN_ERR "usx2y_in04_init error\n");
return err;
}
- err = usX2Y_create_alsa_devices(hw->card);
+ err = usx2y_create_alsa_devices(hw->card);
if (err) {
- snd_printk(KERN_ERR "usX2Y_create_alsa_devices error %i \n", err);
- snd_card_free(hw->card);
+ snd_printk(KERN_ERR "usx2y_create_alsa_devices error %i\n", err);
return err;
}
- priv->chip_status |= USX2Y_STAT_CHIP_INIT;
+ priv->chip_status |= USX2Y_STAT_CHIP_INIT;
snd_printdd("%s: alsa all started\n", hw->name);
}
return err;
}
-
-int usX2Y_hwdep_new(struct snd_card *card, struct usb_device* device)
+int usx2y_hwdep_new(struct snd_card *card, struct usb_device *device)
{
int err;
struct snd_hwdep *hw;
+ struct usx2ydev *us428 = usx2y(card);
- if ((err = snd_hwdep_new(card, SND_USX2Y_LOADER_ID, 0, &hw)) < 0)
+ err = snd_hwdep_new(card, SND_USX2Y_LOADER_ID, 0, &hw);
+ if (err < 0)
return err;
hw->iface = SNDRV_HWDEP_IFACE_USX2Y;
- hw->private_data = usX2Y(card);
- hw->ops.dsp_status = snd_usX2Y_hwdep_dsp_status;
- hw->ops.dsp_load = snd_usX2Y_hwdep_dsp_load;
+ hw->private_data = us428;
+ hw->ops.dsp_status = snd_usx2y_hwdep_dsp_status;
+ hw->ops.dsp_load = snd_usx2y_hwdep_dsp_load;
hw->ops.mmap = snd_us428ctls_mmap;
hw->ops.poll = snd_us428ctls_poll;
hw->exclusive = 1;
sprintf(hw->name, "/dev/bus/usb/%03d/%03d", device->bus->busnum, device->devnum);
+
+ us428->us428ctls_sharedmem = alloc_pages_exact(US428_SHAREDMEM_PAGES, GFP_KERNEL);
+ if (!us428->us428ctls_sharedmem)
+ return -ENOMEM;
+ memset(us428->us428ctls_sharedmem, -1, US428_SHAREDMEM_PAGES);
+ us428->us428ctls_sharedmem->ctl_snapshot_last = -2;
+
return 0;
}
-
diff --git a/sound/usb/usx2y/usX2Yhwdep.h b/sound/usb/usx2y/usX2Yhwdep.h
index 457199b5ed03..0c9946d9cd99 100644
--- a/sound/usb/usx2y/usX2Yhwdep.h
+++ b/sound/usb/usx2y/usX2Yhwdep.h
@@ -2,6 +2,6 @@
#ifndef USX2YHWDEP_H
#define USX2YHWDEP_H
-int usX2Y_hwdep_new(struct snd_card *card, struct usb_device* device);
+int usx2y_hwdep_new(struct snd_card *card, struct usb_device *device);
#endif
diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
index 091c071b270a..9d0e44793896 100644
--- a/sound/usb/usx2y/usb_stream.c
+++ b/sound/usb/usx2y/usb_stream.c
@@ -8,12 +8,12 @@
#include "usb_stream.h"
-
/* setup */
-static unsigned usb_stream_next_packet_size(struct usb_stream_kernel *sk)
+static unsigned int usb_stream_next_packet_size(struct usb_stream_kernel *sk)
{
struct usb_stream *s = sk->s;
+
sk->out_phase_peeked = (sk->out_phase & 0xffff) + sk->freqn;
return (sk->out_phase_peeked >> 16) * s->cfg.frame_size;
}
@@ -25,6 +25,7 @@ static void playback_prep_freqn(struct usb_stream_kernel *sk, struct urb *urb)
for (pack = 0; pack < sk->n_o_ps; pack++) {
int l = usb_stream_next_packet_size(sk);
+
if (s->idle_outsize + lb + l > s->period_size)
goto check;
@@ -43,9 +44,10 @@ check:
lb, s->period_size);
}
-static int init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
- struct urb **urbs, char *transfer,
- struct usb_device *dev, int pipe)
+static int init_pipe_urbs(struct usb_stream_kernel *sk,
+ unsigned int use_packsize,
+ struct urb **urbs, char *transfer,
+ struct usb_device *dev, int pipe)
{
int u, p;
int maxpacket = use_packsize ?
@@ -56,6 +58,7 @@ static int init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
++u, transfer += transfer_length) {
struct urb *urb = urbs[u];
struct usb_iso_packet_descriptor *desc;
+
urb->transfer_buffer = transfer;
urb->dev = dev;
urb->pipe = pipe;
@@ -80,13 +83,12 @@ static int init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
return 0;
}
-static int init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
- struct usb_device *dev, int in_pipe, int out_pipe)
+static int init_urbs(struct usb_stream_kernel *sk, unsigned int use_packsize,
+ struct usb_device *dev, int in_pipe, int out_pipe)
{
struct usb_stream *s = sk->s;
- char *indata = (char *)s + sizeof(*s) +
- sizeof(struct usb_stream_packet) *
- s->inpackets;
+ char *indata =
+ (char *)s + sizeof(*s) + sizeof(struct usb_stream_packet) * s->inpackets;
int u;
for (u = 0; u < USB_STREAM_NURBS; ++u) {
@@ -107,12 +109,11 @@ static int init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
return 0;
}
-
/*
* convert a sampling rate into our full speed format (fs/1000 in Q16.16)
* this will overflow at approx 524 kHz
*/
-static inline unsigned get_usb_full_speed_rate(unsigned rate)
+static inline unsigned int get_usb_full_speed_rate(unsigned int rate)
{
return ((rate << 13) + 62) / 125;
}
@@ -121,7 +122,7 @@ static inline unsigned get_usb_full_speed_rate(unsigned rate)
* convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
* this will overflow at approx 4 MHz
*/
-static inline unsigned get_usb_high_speed_rate(unsigned rate)
+static inline unsigned int get_usb_high_speed_rate(unsigned int rate)
{
return ((rate << 10) + 62) / 125;
}
@@ -129,7 +130,7 @@ static inline unsigned get_usb_high_speed_rate(unsigned rate)
void usb_stream_free(struct usb_stream_kernel *sk)
{
struct usb_stream *s;
- unsigned u;
+ unsigned int u;
for (u = 0; u < USB_STREAM_NURBS; ++u) {
usb_free_urb(sk->inurb[u]);
@@ -142,17 +143,23 @@ void usb_stream_free(struct usb_stream_kernel *sk)
if (!s)
return;
- free_pages_exact(sk->write_page, s->write_size);
- sk->write_page = NULL;
+ if (sk->write_page) {
+ free_pages_exact(sk->write_page, s->write_size);
+ sk->write_page = NULL;
+ }
+
free_pages_exact(s, s->read_size);
sk->s = NULL;
}
struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
struct usb_device *dev,
- unsigned in_endpoint, unsigned out_endpoint,
- unsigned sample_rate, unsigned use_packsize,
- unsigned period_frames, unsigned frame_size)
+ unsigned int in_endpoint,
+ unsigned int out_endpoint,
+ unsigned int sample_rate,
+ unsigned int use_packsize,
+ unsigned int period_frames,
+ unsigned int frame_size)
{
int packets, max_packsize;
int in_pipe, out_pipe;
@@ -231,12 +238,12 @@ out:
return sk->s;
}
-
/* start */
static bool balance_check(struct usb_stream_kernel *sk, struct urb *urb)
{
bool r;
+
if (unlikely(urb->status)) {
if (urb->status != -ESHUTDOWN && urb->status != -ENOENT)
snd_printk(KERN_WARNING "status=%i\n", urb->status);
@@ -267,6 +274,7 @@ static void subs_set_complete(struct urb **urbs, void (*complete)(struct urb *))
for (u = 0; u < USB_STREAM_NURBS; u++) {
struct urb *urb = urbs[u];
+
urb->complete = complete;
}
}
@@ -284,6 +292,7 @@ static int usb_stream_prepare_playback(struct usb_stream_kernel *sk,
for (; s->sync_packet < 0; ++p, ++s->sync_packet) {
struct urb *ii = sk->completed_inurb;
+
id = ii->iso_frame_desc +
ii->number_of_packets + s->sync_packet;
l = id->actual_length;
@@ -351,6 +360,7 @@ static int submit_urbs(struct usb_stream_kernel *sk,
struct urb *inurb, struct urb *outurb)
{
int err;
+
prepare_inurb(sk->idle_outurb->number_of_packets, sk->idle_inurb);
err = usb_submit_urb(sk->idle_inurb, GFP_ATOMIC);
if (err < 0)
@@ -447,6 +457,7 @@ static void stream_idle(struct usb_stream_kernel *sk,
for (p = 0; p < inurb->number_of_packets; ++p) {
struct usb_iso_packet_descriptor *id = inurb->iso_frame_desc;
+
l = id[p].actual_length;
if (unlikely(l == 0 || id[p].status)) {
snd_printk(KERN_WARNING "underrun, status=%u\n",
@@ -503,6 +514,7 @@ err_out:
static void i_capture_idle(struct urb *urb)
{
struct usb_stream_kernel *sk = urb->context;
+
if (balance_capture(sk, urb))
stream_idle(sk, urb, sk->i_urb);
}
@@ -510,6 +522,7 @@ static void i_capture_idle(struct urb *urb)
static void i_playback_idle(struct urb *urb)
{
struct usb_stream_kernel *sk = urb->context;
+
if (balance_playback(sk, urb))
stream_idle(sk, sk->i_urb, urb);
}
@@ -518,10 +531,12 @@ static void stream_start(struct usb_stream_kernel *sk,
struct urb *inurb, struct urb *outurb)
{
struct usb_stream *s = sk->s;
+
if (s->state >= usb_stream_sync1) {
int l, p, max_diff, max_diff_0;
int urb_size = 0;
- unsigned frames_per_packet, min_frames = 0;
+ unsigned int frames_per_packet, min_frames = 0;
+
frames_per_packet = (s->period_size - s->idle_insize);
frames_per_packet <<= 8;
frames_per_packet /=
@@ -536,6 +551,7 @@ static void stream_start(struct usb_stream_kernel *sk,
max_diff = max_diff_0;
for (p = 0; p < inurb->number_of_packets; ++p) {
int diff;
+
l = inurb->iso_frame_desc[p].actual_length;
urb_size += l;
@@ -561,7 +577,8 @@ static void stream_start(struct usb_stream_kernel *sk,
(s->inpacket_head + 1) % s->inpackets;
s->next_inpacket_split_at = 0;
} else {
- unsigned split = s->inpacket_head;
+ unsigned int split = s->inpacket_head;
+
l = s->idle_insize;
while (l > s->inpacket[split].length) {
l -= s->inpacket[split].length;
@@ -609,6 +626,7 @@ static void i_capture_start(struct urb *urb)
for (p = 0; p < urb->number_of_packets; ++p) {
int l = id[p].actual_length;
+
if (l < s->cfg.frame_size) {
++empty;
if (s->state >= usb_stream_sync0) {
@@ -628,6 +646,7 @@ static void i_capture_start(struct urb *urb)
urb->iso_frame_desc[0].actual_length);
for (pack = 1; pack < urb->number_of_packets; ++pack) {
int l = urb->iso_frame_desc[pack].actual_length;
+
printk(KERN_CONT " %i", l);
}
printk(KERN_CONT "\n");
@@ -643,6 +662,7 @@ static void i_capture_start(struct urb *urb)
static void i_playback_start(struct urb *urb)
{
struct usb_stream_kernel *sk = urb->context;
+
if (balance_playback(sk, urb))
stream_start(sk, sk->i_urb, urb);
}
@@ -671,6 +691,7 @@ dotry:
for (u = 0; u < 2; u++) {
struct urb *inurb = sk->inurb[u];
struct urb *outurb = sk->outurb[u];
+
playback_prep_freqn(sk, outurb);
inurb->number_of_packets = outurb->number_of_packets;
inurb->transfer_buffer_length =
@@ -680,6 +701,7 @@ dotry:
if (u == 0) {
int now;
struct usb_device *dev = inurb->dev;
+
frame = usb_get_current_frame_number(dev);
do {
now = usb_get_current_frame_number(dev);
@@ -688,14 +710,16 @@ dotry:
}
err = usb_submit_urb(inurb, GFP_ATOMIC);
if (err < 0) {
- snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])"
- " returned %i\n", u, err);
+ snd_printk(KERN_ERR
+ "usb_submit_urb(sk->inurb[%i]) returned %i\n",
+ u, err);
return err;
}
err = usb_submit_urb(outurb, GFP_ATOMIC);
if (err < 0) {
- snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])"
- " returned %i\n", u, err);
+ snd_printk(KERN_ERR
+ "usb_submit_urb(sk->outurb[%i]) returned %i\n",
+ u, err);
return err;
}
@@ -716,8 +740,8 @@ check_retry:
snd_printd(KERN_DEBUG "goto dotry;\n");
goto dotry;
}
- snd_printk(KERN_WARNING"couldn't start"
- " all urbs on the same start_frame.\n");
+ snd_printk(KERN_WARNING
+ "couldn't start all urbs on the same start_frame.\n");
return -EFAULT;
}
@@ -729,6 +753,7 @@ check_retry:
/* wait, check */
{
int wait_ms = 3000;
+
while (s->state != usb_stream_ready && wait_ms > 0) {
snd_printdd(KERN_DEBUG "%i\n", s->state);
msleep(200);
@@ -745,6 +770,7 @@ check_retry:
void usb_stream_stop(struct usb_stream_kernel *sk)
{
int u;
+
if (!sk->s)
return;
for (u = 0; u < USB_STREAM_NURBS; ++u) {
diff --git a/sound/usb/usx2y/usb_stream.h b/sound/usb/usx2y/usb_stream.h
index 851358a8d709..73e57b341adc 100644
--- a/sound/usb/usx2y/usb_stream.h
+++ b/sound/usb/usx2y/usb_stream.h
@@ -12,7 +12,7 @@ struct usb_stream_kernel {
void *write_page;
- unsigned n_o_ps;
+ unsigned int n_o_ps;
struct urb *inurb[USB_STREAM_NURBS];
struct urb *idle_inurb;
@@ -26,18 +26,21 @@ struct usb_stream_kernel {
wait_queue_head_t sleep;
- unsigned out_phase;
- unsigned out_phase_peeked;
- unsigned freqn;
+ unsigned int out_phase;
+ unsigned int out_phase_peeked;
+ unsigned int freqn;
};
struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
struct usb_device *dev,
- unsigned in_endpoint, unsigned out_endpoint,
- unsigned sample_rate, unsigned use_packsize,
- unsigned period_frames, unsigned frame_size);
-void usb_stream_free(struct usb_stream_kernel *);
-int usb_stream_start(struct usb_stream_kernel *);
-void usb_stream_stop(struct usb_stream_kernel *);
+ unsigned int in_endpoint,
+ unsigned int out_endpoint,
+ unsigned int sample_rate,
+ unsigned int use_packsize,
+ unsigned int period_frames,
+ unsigned int frame_size);
+void usb_stream_free(struct usb_stream_kernel *sk);
+int usb_stream_start(struct usb_stream_kernel *sk);
+void usb_stream_stop(struct usb_stream_kernel *sk);
#endif /* __USB_STREAM_H */
diff --git a/sound/usb/usx2y/usbus428ctldefs.h b/sound/usb/usx2y/usbus428ctldefs.h
index 5a7518ea3aeb..9ba15d974e63 100644
--- a/sound/usb/usx2y/usbus428ctldefs.h
+++ b/sound/usb/usx2y/usbus428ctldefs.h
@@ -4,28 +4,28 @@
* Copyright (c) 2003 by Karsten Wiese <annabellesgarden@yahoo.de>
*/
-enum E_In84{
- eFader0 = 0,
- eFader1,
- eFader2,
- eFader3,
- eFader4,
- eFader5,
- eFader6,
- eFader7,
- eFaderM,
- eTransport,
- eModifier = 10,
- eFilterSelect,
- eSelect,
- eMute,
+enum E_IN84 {
+ E_FADER_0 = 0,
+ E_FADER_1,
+ E_FADER_2,
+ E_FADER_3,
+ E_FADER_4,
+ E_FADER_5,
+ E_FADER_6,
+ E_FADER_7,
+ E_FADER_M,
+ E_TRANSPORT,
+ E_MODIFIER = 10,
+ E_FILTER_SELECT,
+ E_SELECT,
+ E_MUTE,
- eSwitch = 15,
- eWheelGain,
- eWheelFreq,
- eWheelQ,
- eWheelPan,
- eWheel = 20
+ E_SWITCH = 15,
+ E_WHEEL_GAIN,
+ E_WHEEL_FREQ,
+ E_WHEEL_Q,
+ E_WHEEL_PAN,
+ E_WHEEL = 20
};
#define T_RECORD 1
@@ -39,53 +39,55 @@ enum E_In84{
struct us428_ctls {
- unsigned char Fader[9];
- unsigned char Transport;
- unsigned char Modifier;
- unsigned char FilterSelect;
- unsigned char Select;
- unsigned char Mute;
- unsigned char UNKNOWN;
- unsigned char Switch;
- unsigned char Wheel[5];
+ unsigned char fader[9];
+ unsigned char transport;
+ unsigned char modifier;
+ unsigned char filters_elect;
+ unsigned char select;
+ unsigned char mute;
+ unsigned char unknown;
+ unsigned char wswitch;
+ unsigned char wheel[5];
};
-struct us428_setByte {
- unsigned char Offset,
- Value;
+struct us428_set_byte {
+ unsigned char offset,
+ value;
};
enum {
- eLT_Volume = 0,
- eLT_Light
+ ELT_VOLUME = 0,
+ ELT_LIGHT
};
-struct usX2Y_volume {
- unsigned char Channel,
- LH,
- LL,
- RH,
- RL;
+struct usx2y_volume {
+ unsigned char channel,
+ lh,
+ ll,
+ rh,
+ rl;
};
struct us428_lights {
- struct us428_setByte Light[7];
+ struct us428_set_byte light[7];
};
struct us428_p4out {
char type;
union {
- struct usX2Y_volume vol;
+ struct usx2y_volume vol;
struct us428_lights lights;
} val;
};
-#define N_us428_ctl_BUFS 16
-#define N_us428_p4out_BUFS 16
-struct us428ctls_sharedmem{
- struct us428_ctls CtlSnapShot[N_us428_ctl_BUFS];
- int CtlSnapShotDiffersAt[N_us428_ctl_BUFS];
- int CtlSnapShotLast, CtlSnapShotRed;
- struct us428_p4out p4out[N_us428_p4out_BUFS];
- int p4outLast, p4outSent;
+#define N_US428_CTL_BUFS 16
+#define N_US428_P4OUT_BUFS 16
+struct us428ctls_sharedmem {
+ struct us428_ctls ctl_snapshot[N_US428_CTL_BUFS];
+ int ctl_snapshot_differs_at[N_US428_CTL_BUFS];
+ int ctl_snapshot_last, ctl_snapshot_red;
+ struct us428_p4out p4out[N_US428_P4OUT_BUFS];
+ int p4out_last, p4out_sent;
};
+
+#define US428_SHAREDMEM_PAGES PAGE_ALIGN(sizeof(struct us428ctls_sharedmem))
diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
index 3cd28d24f0a7..099bee662af6 100644
--- a/sound/usb/usx2y/usbusx2y.c
+++ b/sound/usb/usx2y/usbusx2y.c
@@ -17,7 +17,7 @@
2004-10-26 Karsten Wiese
Version 0.8.6:
- wake_up() process waiting in usX2Y_urbs_start() on error.
+ wake_up() process waiting in usx2y_urbs_start() on error.
2004-10-21 Karsten Wiese
Version 0.8.5:
@@ -48,7 +48,7 @@
2004-06-12 Karsten Wiese
Version 0.6.3:
Made it thus the following rule is enforced:
- "All pcm substreams of one usX2Y have to operate at the same rate & format."
+ "All pcm substreams of one usx2y have to operate at the same rate & format."
2004-04-06 Karsten Wiese
Version 0.6.0:
@@ -70,7 +70,7 @@
2003-11-03 Karsten Wiese
Version 0.3:
- 24Bit support.
+ 24Bit support.
"arecord -D hw:1 -c 2 -r 48000 -M -f S24_3LE|aplay -D hw:1 -c 2 -r 48000 -M -f S24_3LE" works.
2003-08-22 Karsten Wiese
@@ -94,16 +94,15 @@
This helped me much on my slowish PII 400 & PIII 500.
ACPI yet untested but might cause the same bad behaviour.
Use a kernel with lowlatency and preemptiv patches applied.
- To autoload snd-usb-midi append a line
+ To autoload snd-usb-midi append a line
post-install snd-usb-us428 modprobe snd-usb-midi
to /etc/modules.conf.
known problems:
sliders, knobs, lights not yet handled except MASTER Volume slider.
- "pcm -c 2" doesn't work. "pcm -c 2 -m direct_interleaved" does.
+ "pcm -c 2" doesn't work. "pcm -c 2 -m direct_interleaved" does.
KDE3: "Enable full duplex operation" deadlocks.
-
2002-08-31 Karsten Wiese
Version 0.0.3: audio also simplex;
simplifying: iso urbs only 1 packet, melted structs.
@@ -115,7 +114,7 @@
The firmware has been sniffed from win2k us-428 driver 3.09.
* Copyright (c) 2002 - 2004 Karsten Wiese
-*/
+ */
#include <linux/init.h>
#include <linux/module.h>
@@ -132,14 +131,12 @@
#include "usbusx2y.h"
#include "usX2Yhwdep.h"
-
-
MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>");
MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2");
MODULE_LICENSE("GPL");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */
-static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */
module_param_array(index, int, NULL, 0444);
@@ -149,305 +146,321 @@ MODULE_PARM_DESC(id, "ID string for "NAME_ALLCAPS".");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable "NAME_ALLCAPS".");
+static int snd_usx2y_card_used[SNDRV_CARDS];
-static int snd_usX2Y_card_used[SNDRV_CARDS];
+static void snd_usx2y_card_private_free(struct snd_card *card);
+static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s);
-static void usX2Y_usb_disconnect(struct usb_device* usb_device, void* ptr);
-static void snd_usX2Y_card_private_free(struct snd_card *card);
-
-/*
- * pipe 4 is used for switching the lamps, setting samplerate, volumes ....
+/*
+ * pipe 4 is used for switching the lamps, setting samplerate, volumes ....
*/
-static void i_usX2Y_Out04Int(struct urb *urb)
+static void i_usx2y_out04_int(struct urb *urb)
{
#ifdef CONFIG_SND_DEBUG
if (urb->status) {
- int i;
- struct usX2Ydev *usX2Y = urb->context;
- for (i = 0; i < 10 && usX2Y->AS04.urb[i] != urb; i++);
- snd_printdd("i_usX2Y_Out04Int() urb %i status=%i\n", i, urb->status);
+ int i;
+ struct usx2ydev *usx2y = urb->context;
+
+ for (i = 0; i < 10 && usx2y->as04.urb[i] != urb; i++)
+ ;
+ snd_printdd("%s urb %i status=%i\n", __func__, i, urb->status);
}
#endif
}
-static void i_usX2Y_In04Int(struct urb *urb)
+static void i_usx2y_in04_int(struct urb *urb)
{
int err = 0;
- struct usX2Ydev *usX2Y = urb->context;
- struct us428ctls_sharedmem *us428ctls = usX2Y->us428ctls_sharedmem;
+ struct usx2ydev *usx2y = urb->context;
+ struct us428ctls_sharedmem *us428ctls = usx2y->us428ctls_sharedmem;
+ struct us428_p4out *p4out;
+ int i, j, n, diff, send;
- usX2Y->In04IntCalls++;
+ usx2y->in04_int_calls++;
if (urb->status) {
snd_printdd("Interrupt Pipe 4 came back with status=%i\n", urb->status);
return;
}
- // printk("%i:0x%02X ", 8, (int)((unsigned char*)usX2Y->In04Buf)[8]); Master volume shows 0 here if fader is at max during boot ?!?
+ // printk("%i:0x%02X ", 8, (int)((unsigned char*)usx2y->in04_buf)[8]); Master volume shows 0 here if fader is at max during boot ?!?
if (us428ctls) {
- int diff = -1;
- if (-2 == us428ctls->CtlSnapShotLast) {
+ diff = -1;
+ if (us428ctls->ctl_snapshot_last == -2) {
diff = 0;
- memcpy(usX2Y->In04Last, usX2Y->In04Buf, sizeof(usX2Y->In04Last));
- us428ctls->CtlSnapShotLast = -1;
+ memcpy(usx2y->in04_last, usx2y->in04_buf, sizeof(usx2y->in04_last));
+ us428ctls->ctl_snapshot_last = -1;
} else {
- int i;
for (i = 0; i < 21; i++) {
- if (usX2Y->In04Last[i] != ((char*)usX2Y->In04Buf)[i]) {
+ if (usx2y->in04_last[i] != ((char *)usx2y->in04_buf)[i]) {
if (diff < 0)
diff = i;
- usX2Y->In04Last[i] = ((char*)usX2Y->In04Buf)[i];
+ usx2y->in04_last[i] = ((char *)usx2y->in04_buf)[i];
}
}
}
- if (0 <= diff) {
- int n = us428ctls->CtlSnapShotLast + 1;
- if (n >= N_us428_ctl_BUFS || n < 0)
+ if (diff >= 0) {
+ n = us428ctls->ctl_snapshot_last + 1;
+ if (n >= N_US428_CTL_BUFS || n < 0)
n = 0;
- memcpy(us428ctls->CtlSnapShot + n, usX2Y->In04Buf, sizeof(us428ctls->CtlSnapShot[0]));
- us428ctls->CtlSnapShotDiffersAt[n] = diff;
- us428ctls->CtlSnapShotLast = n;
- wake_up(&usX2Y->us428ctls_wait_queue_head);
+ memcpy(us428ctls->ctl_snapshot + n, usx2y->in04_buf, sizeof(us428ctls->ctl_snapshot[0]));
+ us428ctls->ctl_snapshot_differs_at[n] = diff;
+ us428ctls->ctl_snapshot_last = n;
+ wake_up(&usx2y->us428ctls_wait_queue_head);
}
}
-
-
- if (usX2Y->US04) {
- if (0 == usX2Y->US04->submitted)
+
+ if (usx2y->us04) {
+ if (!usx2y->us04->submitted) {
do {
- err = usb_submit_urb(usX2Y->US04->urb[usX2Y->US04->submitted++], GFP_ATOMIC);
- } while (!err && usX2Y->US04->submitted < usX2Y->US04->len);
- } else
- if (us428ctls && us428ctls->p4outLast >= 0 && us428ctls->p4outLast < N_us428_p4out_BUFS) {
- if (us428ctls->p4outLast != us428ctls->p4outSent) {
- int j, send = us428ctls->p4outSent + 1;
- if (send >= N_us428_p4out_BUFS)
+ err = usb_submit_urb(usx2y->us04->urb[usx2y->us04->submitted++], GFP_ATOMIC);
+ } while (!err && usx2y->us04->submitted < usx2y->us04->len);
+ }
+ } else {
+ if (us428ctls && us428ctls->p4out_last >= 0 && us428ctls->p4out_last < N_US428_P4OUT_BUFS) {
+ if (us428ctls->p4out_last != us428ctls->p4out_sent) {
+ send = us428ctls->p4out_sent + 1;
+ if (send >= N_US428_P4OUT_BUFS)
send = 0;
- for (j = 0; j < URBS_AsyncSeq && !err; ++j)
- if (0 == usX2Y->AS04.urb[j]->status) {
- struct us428_p4out *p4out = us428ctls->p4out + send; // FIXME if more than 1 p4out is new, 1 gets lost.
- usb_fill_bulk_urb(usX2Y->AS04.urb[j], usX2Y->dev,
- usb_sndbulkpipe(usX2Y->dev, 0x04), &p4out->val.vol,
- p4out->type == eLT_Light ? sizeof(struct us428_lights) : 5,
- i_usX2Y_Out04Int, usX2Y);
- err = usb_submit_urb(usX2Y->AS04.urb[j], GFP_ATOMIC);
- us428ctls->p4outSent = send;
+ for (j = 0; j < URBS_ASYNC_SEQ && !err; ++j) {
+ if (!usx2y->as04.urb[j]->status) {
+ p4out = us428ctls->p4out + send; // FIXME if more than 1 p4out is new, 1 gets lost.
+ usb_fill_bulk_urb(usx2y->as04.urb[j], usx2y->dev,
+ usb_sndbulkpipe(usx2y->dev, 0x04), &p4out->val.vol,
+ p4out->type == ELT_LIGHT ? sizeof(struct us428_lights) : 5,
+ i_usx2y_out04_int, usx2y);
+ err = usb_submit_urb(usx2y->as04.urb[j], GFP_ATOMIC);
+ us428ctls->p4out_sent = send;
break;
}
+ }
}
}
+ }
if (err)
- snd_printk(KERN_ERR "In04Int() usb_submit_urb err=%i\n", err);
+ snd_printk(KERN_ERR "in04_int() usb_submit_urb err=%i\n", err);
- urb->dev = usX2Y->dev;
+ urb->dev = usx2y->dev;
usb_submit_urb(urb, GFP_ATOMIC);
}
/*
* Prepare some urbs
*/
-int usX2Y_AsyncSeq04_init(struct usX2Ydev *usX2Y)
+int usx2y_async_seq04_init(struct usx2ydev *usx2y)
{
- int err = 0,
- i;
+ int err = 0, i;
+
+ if (WARN_ON(usx2y->as04.buffer))
+ return -EBUSY;
- usX2Y->AS04.buffer = kmalloc_array(URBS_AsyncSeq,
- URB_DataLen_AsyncSeq, GFP_KERNEL);
- if (NULL == usX2Y->AS04.buffer) {
+ usx2y->as04.buffer = kmalloc_array(URBS_ASYNC_SEQ,
+ URB_DATA_LEN_ASYNC_SEQ, GFP_KERNEL);
+ if (!usx2y->as04.buffer) {
err = -ENOMEM;
- } else
- for (i = 0; i < URBS_AsyncSeq; ++i) {
- if (NULL == (usX2Y->AS04.urb[i] = usb_alloc_urb(0, GFP_KERNEL))) {
+ } else {
+ for (i = 0; i < URBS_ASYNC_SEQ; ++i) {
+ usx2y->as04.urb[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!usx2y->as04.urb[i]) {
err = -ENOMEM;
break;
}
- usb_fill_bulk_urb( usX2Y->AS04.urb[i], usX2Y->dev,
- usb_sndbulkpipe(usX2Y->dev, 0x04),
- usX2Y->AS04.buffer + URB_DataLen_AsyncSeq*i, 0,
- i_usX2Y_Out04Int, usX2Y
- );
- err = usb_urb_ep_type_check(usX2Y->AS04.urb[i]);
+ usb_fill_bulk_urb(usx2y->as04.urb[i], usx2y->dev,
+ usb_sndbulkpipe(usx2y->dev, 0x04),
+ usx2y->as04.buffer + URB_DATA_LEN_ASYNC_SEQ * i, 0,
+ i_usx2y_out04_int, usx2y);
+ err = usb_urb_ep_type_check(usx2y->as04.urb[i]);
if (err < 0)
break;
}
+ }
+ if (err)
+ usx2y_unlinkseq(&usx2y->as04);
return err;
}
-int usX2Y_In04_init(struct usX2Ydev *usX2Y)
+int usx2y_in04_init(struct usx2ydev *usx2y)
{
- if (! (usX2Y->In04urb = usb_alloc_urb(0, GFP_KERNEL)))
- return -ENOMEM;
-
- if (! (usX2Y->In04Buf = kmalloc(21, GFP_KERNEL)))
- return -ENOMEM;
-
- init_waitqueue_head(&usX2Y->In04WaitQueue);
- usb_fill_int_urb(usX2Y->In04urb, usX2Y->dev, usb_rcvintpipe(usX2Y->dev, 0x4),
- usX2Y->In04Buf, 21,
- i_usX2Y_In04Int, usX2Y,
+ int err;
+
+ if (WARN_ON(usx2y->in04_urb))
+ return -EBUSY;
+
+ usx2y->in04_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!usx2y->in04_urb) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ usx2y->in04_buf = kmalloc(21, GFP_KERNEL);
+ if (!usx2y->in04_buf) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ init_waitqueue_head(&usx2y->in04_wait_queue);
+ usb_fill_int_urb(usx2y->in04_urb, usx2y->dev, usb_rcvintpipe(usx2y->dev, 0x4),
+ usx2y->in04_buf, 21,
+ i_usx2y_in04_int, usx2y,
10);
- if (usb_urb_ep_type_check(usX2Y->In04urb))
- return -EINVAL;
- return usb_submit_urb(usX2Y->In04urb, GFP_KERNEL);
+ if (usb_urb_ep_type_check(usx2y->in04_urb)) {
+ err = -EINVAL;
+ goto error;
+ }
+ return usb_submit_urb(usx2y->in04_urb, GFP_KERNEL);
+
+ error:
+ kfree(usx2y->in04_buf);
+ usb_free_urb(usx2y->in04_urb);
+ usx2y->in04_buf = NULL;
+ usx2y->in04_urb = NULL;
+ return err;
}
-static void usX2Y_unlinkSeq(struct snd_usX2Y_AsyncSeq *S)
+static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s)
{
int i;
- for (i = 0; i < URBS_AsyncSeq; ++i) {
- usb_kill_urb(S->urb[i]);
- usb_free_urb(S->urb[i]);
- S->urb[i] = NULL;
+
+ for (i = 0; i < URBS_ASYNC_SEQ; ++i) {
+ if (!s->urb[i])
+ continue;
+ usb_kill_urb(s->urb[i]);
+ usb_free_urb(s->urb[i]);
+ s->urb[i] = NULL;
}
- kfree(S->buffer);
+ kfree(s->buffer);
+ s->buffer = NULL;
}
-
-static const struct usb_device_id snd_usX2Y_usb_id_table[] = {
+static const struct usb_device_id snd_usx2y_usb_id_table[] = {
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x1604,
- .idProduct = USB_ID_US428
+ .idProduct = USB_ID_US428
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x1604,
- .idProduct = USB_ID_US122
+ .idProduct = USB_ID_US122
},
- {
+ {
.match_flags = USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x1604,
.idProduct = USB_ID_US224
},
{ /* terminator */ }
};
+MODULE_DEVICE_TABLE(usb, snd_usx2y_usb_id_table);
-static int usX2Y_create_card(struct usb_device *device,
+static int usx2y_create_card(struct usb_device *device,
struct usb_interface *intf,
struct snd_card **cardp)
{
int dev;
- struct snd_card * card;
+ struct snd_card *card;
int err;
for (dev = 0; dev < SNDRV_CARDS; ++dev)
- if (enable[dev] && !snd_usX2Y_card_used[dev])
+ if (enable[dev] && !snd_usx2y_card_used[dev])
break;
if (dev >= SNDRV_CARDS)
return -ENODEV;
err = snd_card_new(&intf->dev, index[dev], id[dev], THIS_MODULE,
- sizeof(struct usX2Ydev), &card);
+ sizeof(struct usx2ydev), &card);
if (err < 0)
return err;
- snd_usX2Y_card_used[usX2Y(card)->card_index = dev] = 1;
- card->private_free = snd_usX2Y_card_private_free;
- usX2Y(card)->dev = device;
- init_waitqueue_head(&usX2Y(card)->prepare_wait_queue);
- mutex_init(&usX2Y(card)->pcm_mutex);
- INIT_LIST_HEAD(&usX2Y(card)->midi_list);
+ snd_usx2y_card_used[usx2y(card)->card_index = dev] = 1;
+ card->private_free = snd_usx2y_card_private_free;
+ usx2y(card)->dev = device;
+ init_waitqueue_head(&usx2y(card)->prepare_wait_queue);
+ init_waitqueue_head(&usx2y(card)->us428ctls_wait_queue_head);
+ mutex_init(&usx2y(card)->pcm_mutex);
+ INIT_LIST_HEAD(&usx2y(card)->midi_list);
strcpy(card->driver, "USB "NAME_ALLCAPS"");
sprintf(card->shortname, "TASCAM "NAME_ALLCAPS"");
sprintf(card->longname, "%s (%x:%x if %d at %03d/%03d)",
- card->shortname,
+ card->shortname,
le16_to_cpu(device->descriptor.idVendor),
le16_to_cpu(device->descriptor.idProduct),
0,//us428(card)->usbmidi.ifnum,
- usX2Y(card)->dev->bus->busnum, usX2Y(card)->dev->devnum
- );
+ usx2y(card)->dev->bus->busnum, usx2y(card)->dev->devnum);
*cardp = card;
return 0;
}
-
-static int usX2Y_usb_probe(struct usb_device *device,
- struct usb_interface *intf,
- const struct usb_device_id *device_id,
- struct snd_card **cardp)
+static void snd_usx2y_card_private_free(struct snd_card *card)
{
- int err;
- struct snd_card * card;
+ struct usx2ydev *usx2y = usx2y(card);
+
+ kfree(usx2y->in04_buf);
+ usb_free_urb(usx2y->in04_urb);
+ if (usx2y->us428ctls_sharedmem)
+ free_pages_exact(usx2y->us428ctls_sharedmem,
+ US428_SHAREDMEM_PAGES);
+ if (usx2y->card_index >= 0 && usx2y->card_index < SNDRV_CARDS)
+ snd_usx2y_card_used[usx2y->card_index] = 0;
+}
- *cardp = NULL;
- if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 ||
- (le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 &&
- le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 &&
- le16_to_cpu(device->descriptor.idProduct) != USB_ID_US428))
- return -EINVAL;
+static void snd_usx2y_disconnect(struct usb_interface *intf)
+{
+ struct snd_card *card;
+ struct usx2ydev *usx2y;
+ struct list_head *p;
- err = usX2Y_create_card(device, intf, &card);
- if (err < 0)
- return err;
- if ((err = usX2Y_hwdep_new(card, device)) < 0 ||
- (err = snd_card_register(card)) < 0) {
- snd_card_free(card);
- return err;
+ card = usb_get_intfdata(intf);
+ if (!card)
+ return;
+ usx2y = usx2y(card);
+ usx2y->chip_status = USX2Y_STAT_CHIP_HUP;
+ usx2y_unlinkseq(&usx2y->as04);
+ usb_kill_urb(usx2y->in04_urb);
+ snd_card_disconnect(card);
+
+ /* release the midi resources */
+ list_for_each(p, &usx2y->midi_list) {
+ snd_usbmidi_disconnect(p);
}
- *cardp = card;
- return 0;
+ if (usx2y->us428ctls_sharedmem)
+ wake_up(&usx2y->us428ctls_wait_queue_head);
+ snd_card_free(card);
}
-/*
- * new 2.5 USB kernel API
- */
-static int snd_usX2Y_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int snd_usx2y_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
+ struct usb_device *device = interface_to_usbdev(intf);
struct snd_card *card;
int err;
- err = usX2Y_usb_probe(interface_to_usbdev(intf), intf, id, &card);
+ if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 ||
+ (le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 &&
+ le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 &&
+ le16_to_cpu(device->descriptor.idProduct) != USB_ID_US428))
+ return -EINVAL;
+
+ err = usx2y_create_card(device, intf, &card);
if (err < 0)
return err;
+ err = usx2y_hwdep_new(card, device);
+ if (err < 0)
+ goto error;
+ err = snd_card_register(card);
+ if (err < 0)
+ goto error;
+
dev_set_drvdata(&intf->dev, card);
return 0;
-}
-static void snd_usX2Y_disconnect(struct usb_interface *intf)
-{
- usX2Y_usb_disconnect(interface_to_usbdev(intf),
- usb_get_intfdata(intf));
+ error:
+ snd_card_free(card);
+ return err;
}
-MODULE_DEVICE_TABLE(usb, snd_usX2Y_usb_id_table);
-static struct usb_driver snd_usX2Y_usb_driver = {
+static struct usb_driver snd_usx2y_usb_driver = {
.name = "snd-usb-usx2y",
- .probe = snd_usX2Y_probe,
- .disconnect = snd_usX2Y_disconnect,
- .id_table = snd_usX2Y_usb_id_table,
+ .probe = snd_usx2y_probe,
+ .disconnect = snd_usx2y_disconnect,
+ .id_table = snd_usx2y_usb_id_table,
};
-
-static void snd_usX2Y_card_private_free(struct snd_card *card)
-{
- kfree(usX2Y(card)->In04Buf);
- usb_free_urb(usX2Y(card)->In04urb);
- if (usX2Y(card)->us428ctls_sharedmem)
- free_pages_exact(usX2Y(card)->us428ctls_sharedmem,
- sizeof(*usX2Y(card)->us428ctls_sharedmem));
- if (usX2Y(card)->card_index >= 0 && usX2Y(card)->card_index < SNDRV_CARDS)
- snd_usX2Y_card_used[usX2Y(card)->card_index] = 0;
-}
-
-/*
- * Frees the device.
- */
-static void usX2Y_usb_disconnect(struct usb_device *device, void* ptr)
-{
- if (ptr) {
- struct snd_card *card = ptr;
- struct usX2Ydev *usX2Y = usX2Y(card);
- struct list_head *p;
- usX2Y->chip_status = USX2Y_STAT_CHIP_HUP;
- usX2Y_unlinkSeq(&usX2Y->AS04);
- usb_kill_urb(usX2Y->In04urb);
- snd_card_disconnect(card);
- /* release the midi resources */
- list_for_each(p, &usX2Y->midi_list) {
- snd_usbmidi_disconnect(p);
- }
- if (usX2Y->us428ctls_sharedmem)
- wake_up(&usX2Y->us428ctls_wait_queue_head);
- snd_card_free(card);
- }
-}
-
-module_usb_driver(snd_usX2Y_usb_driver);
+module_usb_driver(snd_usx2y_usb_driver);
diff --git a/sound/usb/usx2y/usbusx2y.h b/sound/usb/usx2y/usbusx2y.h
index 144b85f57bd2..8d82f5cc2fe1 100644
--- a/sound/usb/usx2y/usbusx2y.h
+++ b/sound/usb/usx2y/usbusx2y.h
@@ -3,19 +3,19 @@
#define USBUSX2Y_H
#include "../usbaudio.h"
#include "../midi.h"
-#include "usbus428ctldefs.h"
+#include "usbus428ctldefs.h"
-#define NRURBS 2
+#define NRURBS 2
-#define URBS_AsyncSeq 10
-#define URB_DataLen_AsyncSeq 32
-struct snd_usX2Y_AsyncSeq {
- struct urb *urb[URBS_AsyncSeq];
+#define URBS_ASYNC_SEQ 10
+#define URB_DATA_LEN_ASYNC_SEQ 32
+struct snd_usx2y_async_seq {
+ struct urb *urb[URBS_ASYNC_SEQ];
char *buffer;
};
-struct snd_usX2Y_urbSeq {
+struct snd_usx2y_urb_seq {
int submitted;
int len;
struct urb *urb[];
@@ -23,17 +23,17 @@ struct snd_usX2Y_urbSeq {
#include "usx2yhwdeppcm.h"
-struct usX2Ydev {
+struct usx2ydev {
struct usb_device *dev;
int card_index;
int stride;
- struct urb *In04urb;
- void *In04Buf;
- char In04Last[24];
- unsigned In04IntCalls;
- struct snd_usX2Y_urbSeq *US04;
- wait_queue_head_t In04WaitQueue;
- struct snd_usX2Y_AsyncSeq AS04;
+ struct urb *in04_urb;
+ void *in04_buf;
+ char in04_last[24];
+ unsigned int in04_int_calls;
+ struct snd_usx2y_urb_seq *us04;
+ wait_queue_head_t in04_wait_queue;
+ struct snd_usx2y_async_seq as04;
unsigned int rate,
format;
int chip_status;
@@ -41,31 +41,30 @@ struct usX2Ydev {
struct us428ctls_sharedmem *us428ctls_sharedmem;
int wait_iso_frame;
wait_queue_head_t us428ctls_wait_queue_head;
- struct snd_usX2Y_hwdep_pcm_shm *hwdep_pcm_shm;
- struct snd_usX2Y_substream *subs[4];
- struct snd_usX2Y_substream * volatile prepare_subs;
+ struct snd_usx2y_hwdep_pcm_shm *hwdep_pcm_shm;
+ struct snd_usx2y_substream *subs[4];
+ struct snd_usx2y_substream * volatile prepare_subs;
wait_queue_head_t prepare_wait_queue;
struct list_head midi_list;
- struct list_head pcm_list;
int pcm_devs;
};
-struct snd_usX2Y_substream {
- struct usX2Ydev *usX2Y;
+struct snd_usx2y_substream {
+ struct usx2ydev *usx2y;
struct snd_pcm_substream *pcm_substream;
- int endpoint;
+ int endpoint;
unsigned int maxpacksize; /* max packet size in bytes */
atomic_t state;
-#define state_STOPPED 0
-#define state_STARTING1 1
-#define state_STARTING2 2
-#define state_STARTING3 3
-#define state_PREPARED 4
-#define state_PRERUNNING 6
-#define state_RUNNING 8
+#define STATE_STOPPED 0
+#define STATE_STARTING1 1
+#define STATE_STARTING2 2
+#define STATE_STARTING3 3
+#define STATE_PREPARED 4
+#define STATE_PRERUNNING 6
+#define STATE_RUNNING 8
int hwptr; /* free frame position in the buffer (only for playback) */
int hwptr_done; /* processed frame position in the buffer */
@@ -77,12 +76,12 @@ struct snd_usX2Y_substream {
};
-#define usX2Y(c) ((struct usX2Ydev *)(c)->private_data)
+#define usx2y(c) ((struct usx2ydev *)(c)->private_data)
-int usX2Y_audio_create(struct snd_card *card);
+int usx2y_audio_create(struct snd_card *card);
-int usX2Y_AsyncSeq04_init(struct usX2Ydev *usX2Y);
-int usX2Y_In04_init(struct usX2Ydev *usX2Y);
+int usx2y_async_seq04_init(struct usx2ydev *usx2y);
+int usx2y_in04_init(struct usx2ydev *usx2y);
#define NAME_ALLCAPS "US-X2Y"
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index ecaf41265dcd..c39cc6851e2d 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -11,7 +11,7 @@
*
* Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de>
*
- * Many codes borrowed from audio.c by
+ * Many codes borrowed from audio.c by
* Alan Cox (alan@lxorguk.ukuu.org.uk)
* Thomas Sailer (sailer@ife.ee.ethz.ch)
*/
@@ -28,66 +28,69 @@
#include "usx2y.h"
#include "usbusx2y.h"
-#define USX2Y_NRPACKS 4 /* Default value used for nr of packs per urb.
- 1 to 4 have been tested ok on uhci.
- To use 3 on ohci, you'd need a patch:
- look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on
- "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425"
- .
- 1, 2 and 4 work out of the box on ohci, if I recall correctly.
- Bigger is safer operation,
- smaller gives lower latencies.
- */
-#define USX2Y_NRPACKS_VARIABLE y /* If your system works ok with this module's parameter
- nrpacks set to 1, you might as well comment
- this #define out, and thereby produce smaller, faster code.
- You'd also set USX2Y_NRPACKS to 1 then.
- */
+/* Default value used for nr of packs per urb.
+ * 1 to 4 have been tested ok on uhci.
+ * To use 3 on ohci, you'd need a patch:
+ * look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on
+ * "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425"
+ *
+ * 1, 2 and 4 work out of the box on ohci, if I recall correctly.
+ * Bigger is safer operation, smaller gives lower latencies.
+ */
+#define USX2Y_NRPACKS 4
+
+/* If your system works ok with this module's parameter
+ * nrpacks set to 1, you might as well comment
+ * this define out, and thereby produce smaller, faster code.
+ * You'd also set USX2Y_NRPACKS to 1 then.
+ */
+#define USX2Y_NRPACKS_VARIABLE 1
#ifdef USX2Y_NRPACKS_VARIABLE
- static int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */
- #define nr_of_packs() nrpacks
- module_param(nrpacks, int, 0444);
- MODULE_PARM_DESC(nrpacks, "Number of packets per URB.");
+static int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */
+#define nr_of_packs() nrpacks
+module_param(nrpacks, int, 0444);
+MODULE_PARM_DESC(nrpacks, "Number of packets per URB.");
#else
- #define nr_of_packs() USX2Y_NRPACKS
+#define nr_of_packs() USX2Y_NRPACKS
#endif
-
-static int usX2Y_urb_capt_retire(struct snd_usX2Y_substream *subs)
+static int usx2y_urb_capt_retire(struct snd_usx2y_substream *subs)
{
struct urb *urb = subs->completed_urb;
struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
unsigned char *cp;
- int i, len, lens = 0, hwptr_done = subs->hwptr_done;
- struct usX2Ydev *usX2Y = subs->usX2Y;
+ int i, len, lens = 0, hwptr_done = subs->hwptr_done;
+ int cnt, blen;
+ struct usx2ydev *usx2y = subs->usx2y;
for (i = 0; i < nr_of_packs(); i++) {
- cp = (unsigned char*)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+ cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
if (urb->iso_frame_desc[i].status) { /* active? hmm, skip this */
- snd_printk(KERN_ERR "active frame status %i. "
- "Most probably some hardware problem.\n",
+ snd_printk(KERN_ERR
+ "active frame status %i. Most probably some hardware problem.\n",
urb->iso_frame_desc[i].status);
return urb->iso_frame_desc[i].status;
}
- len = urb->iso_frame_desc[i].actual_length / usX2Y->stride;
- if (! len) {
+ len = urb->iso_frame_desc[i].actual_length / usx2y->stride;
+ if (!len) {
snd_printd("0 == len ERROR!\n");
continue;
}
/* copy a data chunk */
if ((hwptr_done + len) > runtime->buffer_size) {
- int cnt = runtime->buffer_size - hwptr_done;
- int blen = cnt * usX2Y->stride;
- memcpy(runtime->dma_area + hwptr_done * usX2Y->stride, cp, blen);
- memcpy(runtime->dma_area, cp + blen, len * usX2Y->stride - blen);
+ cnt = runtime->buffer_size - hwptr_done;
+ blen = cnt * usx2y->stride;
+ memcpy(runtime->dma_area + hwptr_done * usx2y->stride, cp, blen);
+ memcpy(runtime->dma_area, cp + blen, len * usx2y->stride - blen);
} else {
- memcpy(runtime->dma_area + hwptr_done * usX2Y->stride, cp,
- len * usX2Y->stride);
+ memcpy(runtime->dma_area + hwptr_done * usx2y->stride, cp,
+ len * usx2y->stride);
}
lens += len;
- if ((hwptr_done += len) >= runtime->buffer_size)
+ hwptr_done += len;
+ if (hwptr_done >= runtime->buffer_size)
hwptr_done -= runtime->buffer_size;
}
@@ -100,6 +103,7 @@ static int usX2Y_urb_capt_retire(struct snd_usX2Y_substream *subs)
}
return 0;
}
+
/*
* prepare urb for playback data pipe
*
@@ -110,18 +114,18 @@ static int usX2Y_urb_capt_retire(struct snd_usX2Y_substream *subs)
* it directly from the buffer. thus the data is once copied to
* a temporary buffer and urb points to that.
*/
-static int usX2Y_urb_play_prepare(struct snd_usX2Y_substream *subs,
+static int usx2y_urb_play_prepare(struct snd_usx2y_substream *subs,
struct urb *cap_urb,
struct urb *urb)
{
- int count, counts, pack;
- struct usX2Ydev *usX2Y = subs->usX2Y;
+ struct usx2ydev *usx2y = subs->usx2y;
struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
+ int count, counts, pack, len;
count = 0;
for (pack = 0; pack < nr_of_packs(); pack++) {
/* calculate the size of a packet */
- counts = cap_urb->iso_frame_desc[pack].actual_length / usX2Y->stride;
+ counts = cap_urb->iso_frame_desc[pack].actual_length / usx2y->stride;
count += counts;
if (counts < 43 || counts > 50) {
snd_printk(KERN_ERR "should not be here with counts=%i\n", counts);
@@ -134,29 +138,30 @@ static int usX2Y_urb_play_prepare(struct snd_usX2Y_substream *subs,
0;
urb->iso_frame_desc[pack].length = cap_urb->iso_frame_desc[pack].actual_length;
}
- if (atomic_read(&subs->state) >= state_PRERUNNING)
+ if (atomic_read(&subs->state) >= STATE_PRERUNNING) {
if (subs->hwptr + count > runtime->buffer_size) {
/* err, the transferred area goes over buffer boundary.
* copy the data to the temp buffer.
*/
- int len;
len = runtime->buffer_size - subs->hwptr;
urb->transfer_buffer = subs->tmpbuf;
memcpy(subs->tmpbuf, runtime->dma_area +
- subs->hwptr * usX2Y->stride, len * usX2Y->stride);
- memcpy(subs->tmpbuf + len * usX2Y->stride,
- runtime->dma_area, (count - len) * usX2Y->stride);
+ subs->hwptr * usx2y->stride, len * usx2y->stride);
+ memcpy(subs->tmpbuf + len * usx2y->stride,
+ runtime->dma_area, (count - len) * usx2y->stride);
subs->hwptr += count;
subs->hwptr -= runtime->buffer_size;
} else {
/* set the buffer pointer */
- urb->transfer_buffer = runtime->dma_area + subs->hwptr * usX2Y->stride;
- if ((subs->hwptr += count) >= runtime->buffer_size)
+ urb->transfer_buffer = runtime->dma_area + subs->hwptr * usx2y->stride;
+ subs->hwptr += count;
+ if (subs->hwptr >= runtime->buffer_size)
subs->hwptr -= runtime->buffer_size;
}
- else
+ } else {
urb->transfer_buffer = subs->tmpbuf;
- urb->transfer_buffer_length = count * usX2Y->stride;
+ }
+ urb->transfer_buffer_length = count * usx2y->stride;
return 0;
}
@@ -165,10 +170,10 @@ static int usX2Y_urb_play_prepare(struct snd_usX2Y_substream *subs,
*
* update the current position and call callback if a period is processed.
*/
-static void usX2Y_urb_play_retire(struct snd_usX2Y_substream *subs, struct urb *urb)
+static void usx2y_urb_play_retire(struct snd_usx2y_substream *subs, struct urb *urb)
{
struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
- int len = urb->actual_length / subs->usX2Y->stride;
+ int len = urb->actual_length / subs->usx2y->stride;
subs->transfer_done += len;
subs->hwptr_done += len;
@@ -180,181 +185,195 @@ static void usX2Y_urb_play_retire(struct snd_usX2Y_substream *subs, struct urb *
}
}
-static int usX2Y_urb_submit(struct snd_usX2Y_substream *subs, struct urb *urb, int frame)
+static int usx2y_urb_submit(struct snd_usx2y_substream *subs, struct urb *urb, int frame)
{
int err;
+
if (!urb)
return -ENODEV;
- urb->start_frame = (frame + NRURBS * nr_of_packs()); // let hcd do rollover sanity checks
+ urb->start_frame = frame + NRURBS * nr_of_packs(); // let hcd do rollover sanity checks
urb->hcpriv = NULL;
- urb->dev = subs->usX2Y->dev; /* we need to set this at each time */
- if ((err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
+ urb->dev = subs->usx2y->dev; /* we need to set this at each time */
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
snd_printk(KERN_ERR "usb_submit_urb() returned %i\n", err);
return err;
}
return 0;
}
-static inline int usX2Y_usbframe_complete(struct snd_usX2Y_substream *capsubs,
- struct snd_usX2Y_substream *playbacksubs,
- int frame)
+static int usx2y_usbframe_complete(struct snd_usx2y_substream *capsubs,
+ struct snd_usx2y_substream *playbacksubs,
+ int frame)
{
int err, state;
struct urb *urb = playbacksubs->completed_urb;
state = atomic_read(&playbacksubs->state);
- if (NULL != urb) {
- if (state == state_RUNNING)
- usX2Y_urb_play_retire(playbacksubs, urb);
- else if (state >= state_PRERUNNING)
+ if (urb) {
+ if (state == STATE_RUNNING)
+ usx2y_urb_play_retire(playbacksubs, urb);
+ else if (state >= STATE_PRERUNNING)
atomic_inc(&playbacksubs->state);
} else {
switch (state) {
- case state_STARTING1:
+ case STATE_STARTING1:
urb = playbacksubs->urb[0];
atomic_inc(&playbacksubs->state);
break;
- case state_STARTING2:
+ case STATE_STARTING2:
urb = playbacksubs->urb[1];
atomic_inc(&playbacksubs->state);
break;
}
}
if (urb) {
- if ((err = usX2Y_urb_play_prepare(playbacksubs, capsubs->completed_urb, urb)) ||
- (err = usX2Y_urb_submit(playbacksubs, urb, frame))) {
+ err = usx2y_urb_play_prepare(playbacksubs, capsubs->completed_urb, urb);
+ if (err)
+ return err;
+ err = usx2y_urb_submit(playbacksubs, urb, frame);
+ if (err)
return err;
- }
}
playbacksubs->completed_urb = NULL;
state = atomic_read(&capsubs->state);
- if (state >= state_PREPARED) {
- if (state == state_RUNNING) {
- if ((err = usX2Y_urb_capt_retire(capsubs)))
+ if (state >= STATE_PREPARED) {
+ if (state == STATE_RUNNING) {
+ err = usx2y_urb_capt_retire(capsubs);
+ if (err)
return err;
- } else if (state >= state_PRERUNNING)
+ } else if (state >= STATE_PRERUNNING) {
atomic_inc(&capsubs->state);
- if ((err = usX2Y_urb_submit(capsubs, capsubs->completed_urb, frame)))
+ }
+ err = usx2y_urb_submit(capsubs, capsubs->completed_urb, frame);
+ if (err)
return err;
}
capsubs->completed_urb = NULL;
return 0;
}
-
-static void usX2Y_clients_stop(struct usX2Ydev *usX2Y)
+static void usx2y_clients_stop(struct usx2ydev *usx2y)
{
+ struct snd_usx2y_substream *subs;
+ struct urb *urb;
int s, u;
for (s = 0; s < 4; s++) {
- struct snd_usX2Y_substream *subs = usX2Y->subs[s];
+ subs = usx2y->subs[s];
if (subs) {
snd_printdd("%i %p state=%i\n", s, subs, atomic_read(&subs->state));
- atomic_set(&subs->state, state_STOPPED);
+ atomic_set(&subs->state, STATE_STOPPED);
}
}
for (s = 0; s < 4; s++) {
- struct snd_usX2Y_substream *subs = usX2Y->subs[s];
+ subs = usx2y->subs[s];
if (subs) {
- if (atomic_read(&subs->state) >= state_PRERUNNING)
+ if (atomic_read(&subs->state) >= STATE_PRERUNNING)
snd_pcm_stop_xrun(subs->pcm_substream);
for (u = 0; u < NRURBS; u++) {
- struct urb *urb = subs->urb[u];
- if (NULL != urb)
+ urb = subs->urb[u];
+ if (urb)
snd_printdd("%i status=%i start_frame=%i\n",
u, urb->status, urb->start_frame);
}
}
}
- usX2Y->prepare_subs = NULL;
- wake_up(&usX2Y->prepare_wait_queue);
+ usx2y->prepare_subs = NULL;
+ wake_up(&usx2y->prepare_wait_queue);
}
-static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
- struct snd_usX2Y_substream *subs, struct urb *urb)
+static void usx2y_error_urb_status(struct usx2ydev *usx2y,
+ struct snd_usx2y_substream *subs, struct urb *urb)
{
snd_printk(KERN_ERR "ep=%i stalled with status=%i\n", subs->endpoint, urb->status);
urb->status = 0;
- usX2Y_clients_stop(usX2Y);
+ usx2y_clients_stop(usx2y);
}
-static void i_usX2Y_urb_complete(struct urb *urb)
+static void i_usx2y_urb_complete(struct urb *urb)
{
- struct snd_usX2Y_substream *subs = urb->context;
- struct usX2Ydev *usX2Y = subs->usX2Y;
+ struct snd_usx2y_substream *subs = urb->context;
+ struct usx2ydev *usx2y = subs->usx2y;
+ struct snd_usx2y_substream *capsubs, *playbacksubs;
- if (unlikely(atomic_read(&subs->state) < state_PREPARED)) {
+ if (unlikely(atomic_read(&subs->state) < STATE_PREPARED)) {
snd_printdd("hcd_frame=%i ep=%i%s status=%i start_frame=%i\n",
- usb_get_current_frame_number(usX2Y->dev),
+ usb_get_current_frame_number(usx2y->dev),
subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
urb->status, urb->start_frame);
return;
}
if (unlikely(urb->status)) {
- usX2Y_error_urb_status(usX2Y, subs, urb);
+ usx2y_error_urb_status(usx2y, subs, urb);
return;
}
subs->completed_urb = urb;
- {
- struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
- *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
- if (capsubs->completed_urb &&
- atomic_read(&capsubs->state) >= state_PREPARED &&
- (playbacksubs->completed_urb ||
- atomic_read(&playbacksubs->state) < state_PREPARED)) {
- if (!usX2Y_usbframe_complete(capsubs, playbacksubs, urb->start_frame))
- usX2Y->wait_iso_frame += nr_of_packs();
- else {
- snd_printdd("\n");
- usX2Y_clients_stop(usX2Y);
- }
+ capsubs = usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
+ playbacksubs = usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+
+ if (capsubs->completed_urb &&
+ atomic_read(&capsubs->state) >= STATE_PREPARED &&
+ (playbacksubs->completed_urb ||
+ atomic_read(&playbacksubs->state) < STATE_PREPARED)) {
+ if (!usx2y_usbframe_complete(capsubs, playbacksubs, urb->start_frame)) {
+ usx2y->wait_iso_frame += nr_of_packs();
+ } else {
+ snd_printdd("\n");
+ usx2y_clients_stop(usx2y);
}
}
}
-static void usX2Y_urbs_set_complete(struct usX2Ydev * usX2Y,
+static void usx2y_urbs_set_complete(struct usx2ydev *usx2y,
void (*complete)(struct urb *))
{
+ struct snd_usx2y_substream *subs;
+ struct urb *urb;
int s, u;
+
for (s = 0; s < 4; s++) {
- struct snd_usX2Y_substream *subs = usX2Y->subs[s];
- if (NULL != subs)
+ subs = usx2y->subs[s];
+ if (subs) {
for (u = 0; u < NRURBS; u++) {
- struct urb * urb = subs->urb[u];
- if (NULL != urb)
+ urb = subs->urb[u];
+ if (urb)
urb->complete = complete;
}
+ }
}
}
-static void usX2Y_subs_startup_finish(struct usX2Ydev * usX2Y)
+static void usx2y_subs_startup_finish(struct usx2ydev *usx2y)
{
- usX2Y_urbs_set_complete(usX2Y, i_usX2Y_urb_complete);
- usX2Y->prepare_subs = NULL;
+ usx2y_urbs_set_complete(usx2y, i_usx2y_urb_complete);
+ usx2y->prepare_subs = NULL;
}
-static void i_usX2Y_subs_startup(struct urb *urb)
+static void i_usx2y_subs_startup(struct urb *urb)
{
- struct snd_usX2Y_substream *subs = urb->context;
- struct usX2Ydev *usX2Y = subs->usX2Y;
- struct snd_usX2Y_substream *prepare_subs = usX2Y->prepare_subs;
- if (NULL != prepare_subs)
+ struct snd_usx2y_substream *subs = urb->context;
+ struct usx2ydev *usx2y = subs->usx2y;
+ struct snd_usx2y_substream *prepare_subs = usx2y->prepare_subs;
+
+ if (prepare_subs) {
if (urb->start_frame == prepare_subs->urb[0]->start_frame) {
- usX2Y_subs_startup_finish(usX2Y);
+ usx2y_subs_startup_finish(usx2y);
atomic_inc(&prepare_subs->state);
- wake_up(&usX2Y->prepare_wait_queue);
+ wake_up(&usx2y->prepare_wait_queue);
}
+ }
- i_usX2Y_urb_complete(urb);
+ i_usx2y_urb_complete(urb);
}
-static void usX2Y_subs_prepare(struct snd_usX2Y_substream *subs)
+static void usx2y_subs_prepare(struct snd_usx2y_substream *subs)
{
- snd_printdd("usX2Y_substream_prepare(%p) ep=%i urb0=%p urb1=%p\n",
+ snd_printdd("usx2y_substream_prepare(%p) ep=%i urb0=%p urb1=%p\n",
subs, subs->endpoint, subs->urb[0], subs->urb[1]);
/* reset the pointer */
subs->hwptr = 0;
@@ -362,8 +381,7 @@ static void usX2Y_subs_prepare(struct snd_usX2Y_substream *subs)
subs->transfer_done = 0;
}
-
-static void usX2Y_urb_release(struct urb **urb, int free_tb)
+static void usx2y_urb_release(struct urb **urb, int free_tb)
{
if (*urb) {
usb_kill_urb(*urb);
@@ -373,29 +391,33 @@ static void usX2Y_urb_release(struct urb **urb, int free_tb)
*urb = NULL;
}
}
+
/*
* release a substreams urbs
*/
-static void usX2Y_urbs_release(struct snd_usX2Y_substream *subs)
+static void usx2y_urbs_release(struct snd_usx2y_substream *subs)
{
int i;
- snd_printdd("usX2Y_urbs_release() %i\n", subs->endpoint);
+
+ snd_printdd("%s %i\n", __func__, subs->endpoint);
for (i = 0; i < NRURBS; i++)
- usX2Y_urb_release(subs->urb + i,
- subs != subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK]);
+ usx2y_urb_release(subs->urb + i,
+ subs != subs->usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK]);
kfree(subs->tmpbuf);
subs->tmpbuf = NULL;
}
+
/*
* initialize a substream's urbs
*/
-static int usX2Y_urbs_allocate(struct snd_usX2Y_substream *subs)
+static int usx2y_urbs_allocate(struct snd_usx2y_substream *subs)
{
int i;
unsigned int pipe;
- int is_playback = subs == subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
- struct usb_device *dev = subs->usX2Y->dev;
+ int is_playback = subs == subs->usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+ struct usb_device *dev = subs->usx2y->dev;
+ struct urb **purb;
pipe = is_playback ? usb_sndisocpipe(dev, subs->endpoint) :
usb_rcvisocpipe(dev, subs->endpoint);
@@ -403,21 +425,21 @@ static int usX2Y_urbs_allocate(struct snd_usX2Y_substream *subs)
if (!subs->maxpacksize)
return -EINVAL;
- if (is_playback && NULL == subs->tmpbuf) { /* allocate a temporary buffer for playback */
+ if (is_playback && !subs->tmpbuf) { /* allocate a temporary buffer for playback */
subs->tmpbuf = kcalloc(nr_of_packs(), subs->maxpacksize, GFP_KERNEL);
if (!subs->tmpbuf)
return -ENOMEM;
}
/* allocate and initialize data urbs */
for (i = 0; i < NRURBS; i++) {
- struct urb **purb = subs->urb + i;
+ purb = subs->urb + i;
if (*purb) {
usb_kill_urb(*purb);
continue;
}
*purb = usb_alloc_urb(nr_of_packs(), GFP_KERNEL);
- if (NULL == *purb) {
- usX2Y_urbs_release(subs);
+ if (!*purb) {
+ usx2y_urbs_release(subs);
return -ENOMEM;
}
if (!is_playback && !(*purb)->transfer_buffer) {
@@ -425,8 +447,8 @@ static int usX2Y_urbs_allocate(struct snd_usX2Y_substream *subs)
(*purb)->transfer_buffer =
kmalloc_array(subs->maxpacksize,
nr_of_packs(), GFP_KERNEL);
- if (NULL == (*purb)->transfer_buffer) {
- usX2Y_urbs_release(subs);
+ if (!(*purb)->transfer_buffer) {
+ usx2y_urbs_release(subs);
return -ENOMEM;
}
}
@@ -435,70 +457,76 @@ static int usX2Y_urbs_allocate(struct snd_usX2Y_substream *subs)
(*purb)->number_of_packets = nr_of_packs();
(*purb)->context = subs;
(*purb)->interval = 1;
- (*purb)->complete = i_usX2Y_subs_startup;
+ (*purb)->complete = i_usx2y_subs_startup;
}
return 0;
}
-static void usX2Y_subs_startup(struct snd_usX2Y_substream *subs)
+static void usx2y_subs_startup(struct snd_usx2y_substream *subs)
{
- struct usX2Ydev *usX2Y = subs->usX2Y;
- usX2Y->prepare_subs = subs;
+ struct usx2ydev *usx2y = subs->usx2y;
+
+ usx2y->prepare_subs = subs;
subs->urb[0]->start_frame = -1;
wmb();
- usX2Y_urbs_set_complete(usX2Y, i_usX2Y_subs_startup);
+ usx2y_urbs_set_complete(usx2y, i_usx2y_subs_startup);
}
-static int usX2Y_urbs_start(struct snd_usX2Y_substream *subs)
+static int usx2y_urbs_start(struct snd_usx2y_substream *subs)
{
int i, err;
- struct usX2Ydev *usX2Y = subs->usX2Y;
+ struct usx2ydev *usx2y = subs->usx2y;
+ struct urb *urb;
+ unsigned long pack;
- if ((err = usX2Y_urbs_allocate(subs)) < 0)
+ err = usx2y_urbs_allocate(subs);
+ if (err < 0)
return err;
subs->completed_urb = NULL;
for (i = 0; i < 4; i++) {
- struct snd_usX2Y_substream *subs = usX2Y->subs[i];
- if (subs != NULL && atomic_read(&subs->state) >= state_PREPARED)
+ struct snd_usx2y_substream *subs = usx2y->subs[i];
+
+ if (subs && atomic_read(&subs->state) >= STATE_PREPARED)
goto start;
}
start:
- usX2Y_subs_startup(subs);
+ usx2y_subs_startup(subs);
for (i = 0; i < NRURBS; i++) {
- struct urb *urb = subs->urb[i];
+ urb = subs->urb[i];
if (usb_pipein(urb->pipe)) {
- unsigned long pack;
- if (0 == i)
- atomic_set(&subs->state, state_STARTING3);
- urb->dev = usX2Y->dev;
+ if (!i)
+ atomic_set(&subs->state, STATE_STARTING3);
+ urb->dev = usx2y->dev;
for (pack = 0; pack < nr_of_packs(); pack++) {
urb->iso_frame_desc[pack].offset = subs->maxpacksize * pack;
urb->iso_frame_desc[pack].length = subs->maxpacksize;
}
- urb->transfer_buffer_length = subs->maxpacksize * nr_of_packs();
- if ((err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
- snd_printk (KERN_ERR "cannot submit datapipe for urb %d, err = %d\n", i, err);
+ urb->transfer_buffer_length = subs->maxpacksize * nr_of_packs();
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ snd_printk(KERN_ERR "cannot submit datapipe for urb %d, err = %d\n", i, err);
err = -EPIPE;
goto cleanup;
- } else
- if (i == 0)
- usX2Y->wait_iso_frame = urb->start_frame;
+ } else {
+ if (!i)
+ usx2y->wait_iso_frame = urb->start_frame;
+ }
urb->transfer_flags = 0;
} else {
- atomic_set(&subs->state, state_STARTING1);
+ atomic_set(&subs->state, STATE_STARTING1);
break;
}
}
err = 0;
- wait_event(usX2Y->prepare_wait_queue, NULL == usX2Y->prepare_subs);
- if (atomic_read(&subs->state) != state_PREPARED)
+ wait_event(usx2y->prepare_wait_queue, !usx2y->prepare_subs);
+ if (atomic_read(&subs->state) != STATE_PREPARED)
err = -EPIPE;
cleanup:
if (err) {
- usX2Y_subs_startup_finish(usX2Y);
- usX2Y_clients_stop(usX2Y); // something is completely wroong > stop evrything
+ usx2y_subs_startup_finish(usx2y);
+ usx2y_clients_stop(usx2y); // something is completely wrong > stop everything
}
return err;
}
@@ -506,33 +534,35 @@ static int usX2Y_urbs_start(struct snd_usX2Y_substream *subs)
/*
* return the current pcm pointer. just return the hwptr_done value.
*/
-static snd_pcm_uframes_t snd_usX2Y_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t snd_usx2y_pcm_pointer(struct snd_pcm_substream *substream)
{
- struct snd_usX2Y_substream *subs = substream->runtime->private_data;
+ struct snd_usx2y_substream *subs = substream->runtime->private_data;
+
return subs->hwptr_done;
}
+
/*
* start/stop substream
*/
-static int snd_usX2Y_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int snd_usx2y_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
- struct snd_usX2Y_substream *subs = substream->runtime->private_data;
+ struct snd_usx2y_substream *subs = substream->runtime->private_data;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- snd_printdd("snd_usX2Y_pcm_trigger(START)\n");
- if (atomic_read(&subs->state) == state_PREPARED &&
- atomic_read(&subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE]->state) >= state_PREPARED) {
- atomic_set(&subs->state, state_PRERUNNING);
+ snd_printdd("%s(START)\n", __func__);
+ if (atomic_read(&subs->state) == STATE_PREPARED &&
+ atomic_read(&subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE]->state) >= STATE_PREPARED) {
+ atomic_set(&subs->state, STATE_PRERUNNING);
} else {
snd_printdd("\n");
return -EPIPE;
}
break;
case SNDRV_PCM_TRIGGER_STOP:
- snd_printdd("snd_usX2Y_pcm_trigger(STOP)\n");
- if (atomic_read(&subs->state) >= state_PRERUNNING)
- atomic_set(&subs->state, state_PREPARED);
+ snd_printdd("%s(STOP)\n", __func__);
+ if (atomic_read(&subs->state) >= STATE_PRERUNNING)
+ atomic_set(&subs->state, STATE_PREPARED);
break;
default:
return -EINVAL;
@@ -540,7 +570,6 @@ static int snd_usX2Y_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
-
/*
* allocate a buffer, setup samplerate
*
@@ -549,12 +578,11 @@ static int snd_usX2Y_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
* if sg buffer is supported on the later version of alsa, we'll follow
* that.
*/
-static const struct s_c2
-{
+struct s_c2 {
char c1, c2;
-}
- SetRate44100[] =
-{
+};
+
+static const struct s_c2 setrate_44100[] = {
{ 0x14, 0x08}, // this line sets 44100, well actually a little less
{ 0x18, 0x40}, // only tascam / frontier design knows the further lines .......
{ 0x18, 0x42},
@@ -589,8 +617,8 @@ static const struct s_c2
{ 0x18, 0x7C},
{ 0x18, 0x7E}
};
-static const struct s_c2 SetRate48000[] =
-{
+
+static const struct s_c2 setrate_48000[] = {
{ 0x14, 0x09}, // this line sets 48000, well actually a little less
{ 0x18, 0x40}, // only tascam / frontier design knows the further lines .......
{ 0x18, 0x42},
@@ -625,62 +653,65 @@ static const struct s_c2 SetRate48000[] =
{ 0x18, 0x7C},
{ 0x18, 0x7E}
};
-#define NOOF_SETRATE_URBS ARRAY_SIZE(SetRate48000)
-static void i_usX2Y_04Int(struct urb *urb)
+#define NOOF_SETRATE_URBS ARRAY_SIZE(setrate_48000)
+
+static void i_usx2y_04int(struct urb *urb)
{
- struct usX2Ydev *usX2Y = urb->context;
-
+ struct usx2ydev *usx2y = urb->context;
+
if (urb->status)
- snd_printk(KERN_ERR "snd_usX2Y_04Int() urb->status=%i\n", urb->status);
- if (0 == --usX2Y->US04->len)
- wake_up(&usX2Y->In04WaitQueue);
+ snd_printk(KERN_ERR "snd_usx2y_04int() urb->status=%i\n", urb->status);
+ if (!--usx2y->us04->len)
+ wake_up(&usx2y->in04_wait_queue);
}
-static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate)
+static int usx2y_rate_set(struct usx2ydev *usx2y, int rate)
{
int err = 0, i;
- struct snd_usX2Y_urbSeq *us = NULL;
+ struct snd_usx2y_urb_seq *us = NULL;
int *usbdata = NULL;
- const struct s_c2 *ra = rate == 48000 ? SetRate48000 : SetRate44100;
+ const struct s_c2 *ra = rate == 48000 ? setrate_48000 : setrate_44100;
+ struct urb *urb;
- if (usX2Y->rate != rate) {
- us = kzalloc(sizeof(*us) + sizeof(struct urb*) * NOOF_SETRATE_URBS, GFP_KERNEL);
- if (NULL == us) {
+ if (usx2y->rate != rate) {
+ us = kzalloc(sizeof(*us) + sizeof(struct urb *) * NOOF_SETRATE_URBS, GFP_KERNEL);
+ if (!us) {
err = -ENOMEM;
goto cleanup;
}
usbdata = kmalloc_array(NOOF_SETRATE_URBS, sizeof(int),
GFP_KERNEL);
- if (NULL == usbdata) {
+ if (!usbdata) {
err = -ENOMEM;
goto cleanup;
}
for (i = 0; i < NOOF_SETRATE_URBS; ++i) {
- if (NULL == (us->urb[i] = usb_alloc_urb(0, GFP_KERNEL))) {
+ us->urb[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!us->urb[i]) {
err = -ENOMEM;
goto cleanup;
}
- ((char*)(usbdata + i))[0] = ra[i].c1;
- ((char*)(usbdata + i))[1] = ra[i].c2;
- usb_fill_bulk_urb(us->urb[i], usX2Y->dev, usb_sndbulkpipe(usX2Y->dev, 4),
- usbdata + i, 2, i_usX2Y_04Int, usX2Y);
+ ((char *)(usbdata + i))[0] = ra[i].c1;
+ ((char *)(usbdata + i))[1] = ra[i].c2;
+ usb_fill_bulk_urb(us->urb[i], usx2y->dev, usb_sndbulkpipe(usx2y->dev, 4),
+ usbdata + i, 2, i_usx2y_04int, usx2y);
}
err = usb_urb_ep_type_check(us->urb[0]);
if (err < 0)
goto cleanup;
us->submitted = 0;
us->len = NOOF_SETRATE_URBS;
- usX2Y->US04 = us;
- wait_event_timeout(usX2Y->In04WaitQueue, 0 == us->len, HZ);
- usX2Y->US04 = NULL;
+ usx2y->us04 = us;
+ wait_event_timeout(usx2y->in04_wait_queue, !us->len, HZ);
+ usx2y->us04 = NULL;
if (us->len)
err = -ENODEV;
cleanup:
if (us) {
us->submitted = 2*NOOF_SETRATE_URBS;
for (i = 0; i < NOOF_SETRATE_URBS; ++i) {
- struct urb *urb = us->urb[i];
+ urb = us->urb[i];
if (!urb)
continue;
if (urb->status) {
@@ -690,67 +721,68 @@ static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate)
}
usb_free_urb(urb);
}
- usX2Y->US04 = NULL;
+ usx2y->us04 = NULL;
kfree(usbdata);
kfree(us);
if (!err)
- usX2Y->rate = rate;
+ usx2y->rate = rate;
}
}
return err;
}
-
-static int usX2Y_format_set(struct usX2Ydev *usX2Y, snd_pcm_format_t format)
+static int usx2y_format_set(struct usx2ydev *usx2y, snd_pcm_format_t format)
{
int alternate, err;
- struct list_head* p;
+ struct list_head *p;
+
if (format == SNDRV_PCM_FORMAT_S24_3LE) {
alternate = 2;
- usX2Y->stride = 6;
+ usx2y->stride = 6;
} else {
alternate = 1;
- usX2Y->stride = 4;
+ usx2y->stride = 4;
}
- list_for_each(p, &usX2Y->midi_list) {
+ list_for_each(p, &usx2y->midi_list) {
snd_usbmidi_input_stop(p);
}
- usb_kill_urb(usX2Y->In04urb);
- if ((err = usb_set_interface(usX2Y->dev, 0, alternate))) {
- snd_printk(KERN_ERR "usb_set_interface error \n");
+ usb_kill_urb(usx2y->in04_urb);
+ err = usb_set_interface(usx2y->dev, 0, alternate);
+ if (err) {
+ snd_printk(KERN_ERR "usb_set_interface error\n");
return err;
}
- usX2Y->In04urb->dev = usX2Y->dev;
- err = usb_submit_urb(usX2Y->In04urb, GFP_KERNEL);
- list_for_each(p, &usX2Y->midi_list) {
+ usx2y->in04_urb->dev = usx2y->dev;
+ err = usb_submit_urb(usx2y->in04_urb, GFP_KERNEL);
+ list_for_each(p, &usx2y->midi_list) {
snd_usbmidi_input_start(p);
}
- usX2Y->format = format;
- usX2Y->rate = 0;
+ usx2y->format = format;
+ usx2y->rate = 0;
return err;
}
-static int snd_usX2Y_pcm_hw_params(struct snd_pcm_substream *substream,
+static int snd_usx2y_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
int err = 0;
unsigned int rate = params_rate(hw_params);
snd_pcm_format_t format = params_format(hw_params);
struct snd_card *card = substream->pstr->pcm->card;
- struct usX2Ydev *dev = usX2Y(card);
+ struct usx2ydev *dev = usx2y(card);
+ struct snd_usx2y_substream *subs;
+ struct snd_pcm_substream *test_substream;
int i;
- mutex_lock(&usX2Y(card)->pcm_mutex);
- snd_printdd("snd_usX2Y_hw_params(%p, %p)\n", substream, hw_params);
- /* all pcm substreams off one usX2Y have to operate at the same
+ mutex_lock(&usx2y(card)->pcm_mutex);
+ snd_printdd("snd_usx2y_hw_params(%p, %p)\n", substream, hw_params);
+ /* all pcm substreams off one usx2y have to operate at the same
* rate & format
*/
for (i = 0; i < dev->pcm_devs * 2; i++) {
- struct snd_usX2Y_substream *subs = dev->subs[i];
- struct snd_pcm_substream *test_substream;
-
+ subs = dev->subs[i];
if (!subs)
continue;
test_substream = subs->pcm_substream;
@@ -767,81 +799,89 @@ static int snd_usX2Y_pcm_hw_params(struct snd_pcm_substream *substream,
}
error:
- mutex_unlock(&usX2Y(card)->pcm_mutex);
+ mutex_unlock(&usx2y(card)->pcm_mutex);
return err;
}
/*
* free the buffer
*/
-static int snd_usX2Y_pcm_hw_free(struct snd_pcm_substream *substream)
+static int snd_usx2y_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_usX2Y_substream *subs = runtime->private_data;
- mutex_lock(&subs->usX2Y->pcm_mutex);
- snd_printdd("snd_usX2Y_hw_free(%p)\n", substream);
-
- if (SNDRV_PCM_STREAM_PLAYBACK == substream->stream) {
- struct snd_usX2Y_substream *cap_subs = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
- atomic_set(&subs->state, state_STOPPED);
- usX2Y_urbs_release(subs);
+ struct snd_usx2y_substream *subs = runtime->private_data;
+ struct snd_usx2y_substream *cap_subs, *playback_subs;
+
+ mutex_lock(&subs->usx2y->pcm_mutex);
+ snd_printdd("snd_usx2y_hw_free(%p)\n", substream);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ cap_subs = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
+ atomic_set(&subs->state, STATE_STOPPED);
+ usx2y_urbs_release(subs);
if (!cap_subs->pcm_substream ||
!cap_subs->pcm_substream->runtime ||
!cap_subs->pcm_substream->runtime->status ||
cap_subs->pcm_substream->runtime->status->state < SNDRV_PCM_STATE_PREPARED) {
- atomic_set(&cap_subs->state, state_STOPPED);
- usX2Y_urbs_release(cap_subs);
+ atomic_set(&cap_subs->state, STATE_STOPPED);
+ usx2y_urbs_release(cap_subs);
}
} else {
- struct snd_usX2Y_substream *playback_subs = subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
- if (atomic_read(&playback_subs->state) < state_PREPARED) {
- atomic_set(&subs->state, state_STOPPED);
- usX2Y_urbs_release(subs);
+ playback_subs = subs->usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+ if (atomic_read(&playback_subs->state) < STATE_PREPARED) {
+ atomic_set(&subs->state, STATE_STOPPED);
+ usx2y_urbs_release(subs);
}
}
- mutex_unlock(&subs->usX2Y->pcm_mutex);
+ mutex_unlock(&subs->usx2y->pcm_mutex);
return 0;
}
+
/*
* prepare callback
*
* set format and initialize urbs
*/
-static int snd_usX2Y_pcm_prepare(struct snd_pcm_substream *substream)
+static int snd_usx2y_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_usX2Y_substream *subs = runtime->private_data;
- struct usX2Ydev *usX2Y = subs->usX2Y;
- struct snd_usX2Y_substream *capsubs = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
+ struct snd_usx2y_substream *subs = runtime->private_data;
+ struct usx2ydev *usx2y = subs->usx2y;
+ struct snd_usx2y_substream *capsubs = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
int err = 0;
- snd_printdd("snd_usX2Y_pcm_prepare(%p)\n", substream);
-
- mutex_lock(&usX2Y->pcm_mutex);
- usX2Y_subs_prepare(subs);
-// Start hardware streams
-// SyncStream first....
- if (atomic_read(&capsubs->state) < state_PREPARED) {
- if (usX2Y->format != runtime->format)
- if ((err = usX2Y_format_set(usX2Y, runtime->format)) < 0)
+
+ snd_printdd("%s(%p)\n", __func__, substream);
+
+ mutex_lock(&usx2y->pcm_mutex);
+ usx2y_subs_prepare(subs);
+ // Start hardware streams
+ // SyncStream first....
+ if (atomic_read(&capsubs->state) < STATE_PREPARED) {
+ if (usx2y->format != runtime->format) {
+ err = usx2y_format_set(usx2y, runtime->format);
+ if (err < 0)
goto up_prepare_mutex;
- if (usX2Y->rate != runtime->rate)
- if ((err = usX2Y_rate_set(usX2Y, runtime->rate)) < 0)
+ }
+ if (usx2y->rate != runtime->rate) {
+ err = usx2y_rate_set(usx2y, runtime->rate);
+ if (err < 0)
goto up_prepare_mutex;
+ }
snd_printdd("starting capture pipe for %s\n", subs == capsubs ? "self" : "playpipe");
- if (0 > (err = usX2Y_urbs_start(capsubs)))
+ err = usx2y_urbs_start(capsubs);
+ if (err < 0)
goto up_prepare_mutex;
}
- if (subs != capsubs && atomic_read(&subs->state) < state_PREPARED)
- err = usX2Y_urbs_start(subs);
+ if (subs != capsubs && atomic_read(&subs->state) < STATE_PREPARED)
+ err = usx2y_urbs_start(subs);
up_prepare_mutex:
- mutex_unlock(&usX2Y->pcm_mutex);
+ mutex_unlock(&usx2y->pcm_mutex);
return err;
}
-static const struct snd_pcm_hardware snd_usX2Y_2c =
-{
+static const struct snd_pcm_hardware snd_usx2y_2c = {
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
@@ -860,106 +900,101 @@ static const struct snd_pcm_hardware snd_usX2Y_2c =
.fifo_size = 0
};
-
-
-static int snd_usX2Y_pcm_open(struct snd_pcm_substream *substream)
+static int snd_usx2y_pcm_open(struct snd_pcm_substream *substream)
{
- struct snd_usX2Y_substream *subs = ((struct snd_usX2Y_substream **)
- snd_pcm_substream_chip(substream))[substream->stream];
+ struct snd_usx2y_substream *subs =
+ ((struct snd_usx2y_substream **)
+ snd_pcm_substream_chip(substream))[substream->stream];
struct snd_pcm_runtime *runtime = substream->runtime;
- if (subs->usX2Y->chip_status & USX2Y_STAT_CHIP_MMAP_PCM_URBS)
+ if (subs->usx2y->chip_status & USX2Y_STAT_CHIP_MMAP_PCM_URBS)
return -EBUSY;
- runtime->hw = snd_usX2Y_2c;
+ runtime->hw = snd_usx2y_2c;
runtime->private_data = subs;
subs->pcm_substream = substream;
snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 1000, 200000);
return 0;
}
-
-
-static int snd_usX2Y_pcm_close(struct snd_pcm_substream *substream)
+static int snd_usx2y_pcm_close(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_usX2Y_substream *subs = runtime->private_data;
+ struct snd_usx2y_substream *subs = runtime->private_data;
subs->pcm_substream = NULL;
return 0;
}
-
-static const struct snd_pcm_ops snd_usX2Y_pcm_ops =
-{
- .open = snd_usX2Y_pcm_open,
- .close = snd_usX2Y_pcm_close,
- .hw_params = snd_usX2Y_pcm_hw_params,
- .hw_free = snd_usX2Y_pcm_hw_free,
- .prepare = snd_usX2Y_pcm_prepare,
- .trigger = snd_usX2Y_pcm_trigger,
- .pointer = snd_usX2Y_pcm_pointer,
+static const struct snd_pcm_ops snd_usx2y_pcm_ops = {
+ .open = snd_usx2y_pcm_open,
+ .close = snd_usx2y_pcm_close,
+ .hw_params = snd_usx2y_pcm_hw_params,
+ .hw_free = snd_usx2y_pcm_hw_free,
+ .prepare = snd_usx2y_pcm_prepare,
+ .trigger = snd_usx2y_pcm_trigger,
+ .pointer = snd_usx2y_pcm_pointer,
};
-
/*
* free a usb stream instance
*/
-static void usX2Y_audio_stream_free(struct snd_usX2Y_substream **usX2Y_substream)
+static void usx2y_audio_stream_free(struct snd_usx2y_substream **usx2y_substream)
{
int stream;
for_each_pcm_streams(stream) {
- kfree(usX2Y_substream[stream]);
- usX2Y_substream[stream] = NULL;
+ kfree(usx2y_substream[stream]);
+ usx2y_substream[stream] = NULL;
}
}
-static void snd_usX2Y_pcm_private_free(struct snd_pcm *pcm)
+static void snd_usx2y_pcm_private_free(struct snd_pcm *pcm)
{
- struct snd_usX2Y_substream **usX2Y_stream = pcm->private_data;
- if (usX2Y_stream)
- usX2Y_audio_stream_free(usX2Y_stream);
+ struct snd_usx2y_substream **usx2y_stream = pcm->private_data;
+
+ if (usx2y_stream)
+ usx2y_audio_stream_free(usx2y_stream);
}
-static int usX2Y_audio_stream_new(struct snd_card *card, int playback_endpoint, int capture_endpoint)
+static int usx2y_audio_stream_new(struct snd_card *card, int playback_endpoint, int capture_endpoint)
{
struct snd_pcm *pcm;
int err, i;
- struct snd_usX2Y_substream **usX2Y_substream =
- usX2Y(card)->subs + 2 * usX2Y(card)->pcm_devs;
+ struct snd_usx2y_substream **usx2y_substream =
+ usx2y(card)->subs + 2 * usx2y(card)->pcm_devs;
for (i = playback_endpoint ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
i <= SNDRV_PCM_STREAM_CAPTURE; ++i) {
- usX2Y_substream[i] = kzalloc(sizeof(struct snd_usX2Y_substream), GFP_KERNEL);
- if (!usX2Y_substream[i])
+ usx2y_substream[i] = kzalloc(sizeof(struct snd_usx2y_substream), GFP_KERNEL);
+ if (!usx2y_substream[i])
return -ENOMEM;
- usX2Y_substream[i]->usX2Y = usX2Y(card);
+ usx2y_substream[i]->usx2y = usx2y(card);
}
if (playback_endpoint)
- usX2Y_substream[SNDRV_PCM_STREAM_PLAYBACK]->endpoint = playback_endpoint;
- usX2Y_substream[SNDRV_PCM_STREAM_CAPTURE]->endpoint = capture_endpoint;
+ usx2y_substream[SNDRV_PCM_STREAM_PLAYBACK]->endpoint = playback_endpoint;
+ usx2y_substream[SNDRV_PCM_STREAM_CAPTURE]->endpoint = capture_endpoint;
- err = snd_pcm_new(card, NAME_ALLCAPS" Audio", usX2Y(card)->pcm_devs,
+ err = snd_pcm_new(card, NAME_ALLCAPS" Audio", usx2y(card)->pcm_devs,
playback_endpoint ? 1 : 0, 1,
&pcm);
if (err < 0) {
- usX2Y_audio_stream_free(usX2Y_substream);
+ usx2y_audio_stream_free(usx2y_substream);
return err;
}
if (playback_endpoint)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_usX2Y_pcm_ops);
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usX2Y_pcm_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_usx2y_pcm_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usx2y_pcm_ops);
- pcm->private_data = usX2Y_substream;
- pcm->private_free = snd_usX2Y_pcm_private_free;
+ pcm->private_data = usx2y_substream;
+ pcm->private_free = snd_usx2y_pcm_private_free;
pcm->info_flags = 0;
- sprintf(pcm->name, NAME_ALLCAPS" Audio #%d", usX2Y(card)->pcm_devs);
+ sprintf(pcm->name, NAME_ALLCAPS" Audio #%d", usx2y(card)->pcm_devs);
if (playback_endpoint) {
snd_pcm_set_managed_buffer(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
@@ -972,7 +1007,7 @@ static int usX2Y_audio_stream_new(struct snd_card *card, int playback_endpoint,
SNDRV_DMA_TYPE_CONTINUOUS,
NULL,
64*1024, 128*1024);
- usX2Y(card)->pcm_devs++;
+ usx2y(card)->pcm_devs++;
return 0;
}
@@ -980,18 +1015,19 @@ static int usX2Y_audio_stream_new(struct snd_card *card, int playback_endpoint,
/*
* create a chip instance and set its names.
*/
-int usX2Y_audio_create(struct snd_card *card)
+int usx2y_audio_create(struct snd_card *card)
{
- int err = 0;
-
- INIT_LIST_HEAD(&usX2Y(card)->pcm_list);
+ int err;
- if (0 > (err = usX2Y_audio_stream_new(card, 0xA, 0x8)))
+ err = usx2y_audio_stream_new(card, 0xA, 0x8);
+ if (err < 0)
return err;
- if (le16_to_cpu(usX2Y(card)->dev->descriptor.idProduct) == USB_ID_US428)
- if (0 > (err = usX2Y_audio_stream_new(card, 0, 0xA)))
- return err;
- if (le16_to_cpu(usX2Y(card)->dev->descriptor.idProduct) != USB_ID_US122)
- err = usX2Y_rate_set(usX2Y(card), 44100); // Lets us428 recognize output-volume settings, disturbs us122.
+ if (le16_to_cpu(usx2y(card)->dev->descriptor.idProduct) == USB_ID_US428) {
+ err = usx2y_audio_stream_new(card, 0, 0xA);
+ if (err < 0)
+ return err;
+ }
+ if (le16_to_cpu(usx2y(card)->dev->descriptor.idProduct) != USB_ID_US122)
+ err = usx2y_rate_set(usx2y(card), 44100); // Lets us428 recognize output-volume settings, disturbs us122.
return err;
}
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index 8253669c6a7d..db83522c1b49 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -6,7 +6,7 @@
Its usb's unableness to atomically handle power of 2 period sized data chuncs
at standard samplerates,
- what led to this part of the usx2y module:
+ what led to this part of the usx2y module:
It provides the alsa kernel half of the usx2y-alsa-jack driver pair.
The pair uses a hardware dependent alsa-device for mmaped pcm transport.
Advantage achieved:
@@ -35,7 +35,7 @@
Kernel:
- rawusb dma pcm buffer transport should go to snd-usb-lib, so also snd-usb-audio
devices can use it.
- Currently rawusb dma pcm buffer transport (this file) is only available to snd-usb-usx2y.
+ Currently rawusb dma pcm buffer transport (this file) is only available to snd-usb-usx2y.
*/
#include <linux/delay.h>
@@ -46,28 +46,32 @@
#include <sound/hwdep.h>
-
-static int usX2Y_usbpcm_urb_capt_retire(struct snd_usX2Y_substream *subs)
+static int usx2y_usbpcm_urb_capt_retire(struct snd_usx2y_substream *subs)
{
struct urb *urb = subs->completed_urb;
struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
- int i, lens = 0, hwptr_done = subs->hwptr_done;
- struct usX2Ydev *usX2Y = subs->usX2Y;
- if (0 > usX2Y->hwdep_pcm_shm->capture_iso_start) { //FIXME
- int head = usX2Y->hwdep_pcm_shm->captured_iso_head + 1;
- if (head >= ARRAY_SIZE(usX2Y->hwdep_pcm_shm->captured_iso))
+ int i, lens = 0, hwptr_done = subs->hwptr_done;
+ struct usx2ydev *usx2y = subs->usx2y;
+ int head;
+
+ if (usx2y->hwdep_pcm_shm->capture_iso_start < 0) { //FIXME
+ head = usx2y->hwdep_pcm_shm->captured_iso_head + 1;
+ if (head >= ARRAY_SIZE(usx2y->hwdep_pcm_shm->captured_iso))
head = 0;
- usX2Y->hwdep_pcm_shm->capture_iso_start = head;
+ usx2y->hwdep_pcm_shm->capture_iso_start = head;
snd_printdd("cap start %i\n", head);
}
for (i = 0; i < nr_of_packs(); i++) {
if (urb->iso_frame_desc[i].status) { /* active? hmm, skip this */
- snd_printk(KERN_ERR "active frame status %i. Most probably some hardware problem.\n", urb->iso_frame_desc[i].status);
+ snd_printk(KERN_ERR
+ "active frame status %i. Most probably some hardware problem.\n",
+ urb->iso_frame_desc[i].status);
return urb->iso_frame_desc[i].status;
}
- lens += urb->iso_frame_desc[i].actual_length / usX2Y->stride;
+ lens += urb->iso_frame_desc[i].actual_length / usx2y->stride;
}
- if ((hwptr_done += lens) >= runtime->buffer_size)
+ hwptr_done += lens;
+ if (hwptr_done >= runtime->buffer_size)
hwptr_done -= runtime->buffer_size;
subs->hwptr_done = hwptr_done;
subs->transfer_done += lens;
@@ -79,10 +83,10 @@ static int usX2Y_usbpcm_urb_capt_retire(struct snd_usX2Y_substream *subs)
return 0;
}
-static inline int usX2Y_iso_frames_per_buffer(struct snd_pcm_runtime *runtime,
- struct usX2Ydev * usX2Y)
+static int usx2y_iso_frames_per_buffer(struct snd_pcm_runtime *runtime,
+ struct usx2ydev *usx2y)
{
- return (runtime->buffer_size * 1000) / usX2Y->rate + 1; //FIXME: so far only correct period_size == 2^x ?
+ return (runtime->buffer_size * 1000) / usx2y->rate + 1; //FIXME: so far only correct period_size == 2^x ?
}
/*
@@ -95,18 +99,18 @@ static inline int usX2Y_iso_frames_per_buffer(struct snd_pcm_runtime *runtime,
* it directly from the buffer. thus the data is once copied to
* a temporary buffer and urb points to that.
*/
-static int usX2Y_hwdep_urb_play_prepare(struct snd_usX2Y_substream *subs,
+static int usx2y_hwdep_urb_play_prepare(struct snd_usx2y_substream *subs,
struct urb *urb)
{
int count, counts, pack;
- struct usX2Ydev *usX2Y = subs->usX2Y;
- struct snd_usX2Y_hwdep_pcm_shm *shm = usX2Y->hwdep_pcm_shm;
+ struct usx2ydev *usx2y = subs->usx2y;
+ struct snd_usx2y_hwdep_pcm_shm *shm = usx2y->hwdep_pcm_shm;
struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
- if (0 > shm->playback_iso_start) {
+ if (shm->playback_iso_start < 0) {
shm->playback_iso_start = shm->captured_iso_head -
- usX2Y_iso_frames_per_buffer(runtime, usX2Y);
- if (0 > shm->playback_iso_start)
+ usx2y_iso_frames_per_buffer(runtime, usx2y);
+ if (shm->playback_iso_start < 0)
shm->playback_iso_start += ARRAY_SIZE(shm->captured_iso);
shm->playback_iso_head = shm->playback_iso_start;
}
@@ -114,7 +118,7 @@ static int usX2Y_hwdep_urb_play_prepare(struct snd_usX2Y_substream *subs,
count = 0;
for (pack = 0; pack < nr_of_packs(); pack++) {
/* calculate the size of a packet */
- counts = shm->captured_iso[shm->playback_iso_head].length / usX2Y->stride;
+ counts = shm->captured_iso[shm->playback_iso_head].length / usx2y->stride;
if (counts < 43 || counts > 50) {
snd_printk(KERN_ERR "should not be here with counts=%i\n", counts);
return -EPIPE;
@@ -122,27 +126,29 @@ static int usX2Y_hwdep_urb_play_prepare(struct snd_usX2Y_substream *subs,
/* set up descriptor */
urb->iso_frame_desc[pack].offset = shm->captured_iso[shm->playback_iso_head].offset;
urb->iso_frame_desc[pack].length = shm->captured_iso[shm->playback_iso_head].length;
- if (atomic_read(&subs->state) != state_RUNNING)
+ if (atomic_read(&subs->state) != STATE_RUNNING)
memset((char *)urb->transfer_buffer + urb->iso_frame_desc[pack].offset, 0,
urb->iso_frame_desc[pack].length);
if (++shm->playback_iso_head >= ARRAY_SIZE(shm->captured_iso))
shm->playback_iso_head = 0;
count += counts;
}
- urb->transfer_buffer_length = count * usX2Y->stride;
+ urb->transfer_buffer_length = count * usx2y->stride;
return 0;
}
-
-static inline void usX2Y_usbpcm_urb_capt_iso_advance(struct snd_usX2Y_substream *subs,
- struct urb *urb)
+static void usx2y_usbpcm_urb_capt_iso_advance(struct snd_usx2y_substream *subs,
+ struct urb *urb)
{
- int pack;
+ struct usb_iso_packet_descriptor *desc;
+ struct snd_usx2y_hwdep_pcm_shm *shm;
+ int pack, head;
+
for (pack = 0; pack < nr_of_packs(); ++pack) {
- struct usb_iso_packet_descriptor *desc = urb->iso_frame_desc + pack;
- if (NULL != subs) {
- struct snd_usX2Y_hwdep_pcm_shm *shm = subs->usX2Y->hwdep_pcm_shm;
- int head = shm->captured_iso_head + 1;
+ desc = urb->iso_frame_desc + pack;
+ if (subs) {
+ shm = subs->usx2y->hwdep_pcm_shm;
+ head = shm->captured_iso_head + 1;
if (head >= ARRAY_SIZE(shm->captured_iso))
head = 0;
shm->captured_iso[head].frame = urb->start_frame + pack;
@@ -151,106 +157,111 @@ static inline void usX2Y_usbpcm_urb_capt_iso_advance(struct snd_usX2Y_substream
shm->captured_iso_head = head;
shm->captured_iso_frames++;
}
- if ((desc->offset += desc->length * NRURBS*nr_of_packs()) +
- desc->length >= SSS)
+ desc->offset += desc->length * NRURBS * nr_of_packs();
+ if (desc->offset + desc->length >= SSS)
desc->offset -= (SSS - desc->length);
}
}
-static inline int usX2Y_usbpcm_usbframe_complete(struct snd_usX2Y_substream *capsubs,
- struct snd_usX2Y_substream *capsubs2,
- struct snd_usX2Y_substream *playbacksubs,
- int frame)
+static int usx2y_usbpcm_usbframe_complete(struct snd_usx2y_substream *capsubs,
+ struct snd_usx2y_substream *capsubs2,
+ struct snd_usx2y_substream *playbacksubs,
+ int frame)
{
int err, state;
struct urb *urb = playbacksubs->completed_urb;
state = atomic_read(&playbacksubs->state);
- if (NULL != urb) {
- if (state == state_RUNNING)
- usX2Y_urb_play_retire(playbacksubs, urb);
- else if (state >= state_PRERUNNING)
+ if (urb) {
+ if (state == STATE_RUNNING)
+ usx2y_urb_play_retire(playbacksubs, urb);
+ else if (state >= STATE_PRERUNNING)
atomic_inc(&playbacksubs->state);
} else {
switch (state) {
- case state_STARTING1:
+ case STATE_STARTING1:
urb = playbacksubs->urb[0];
atomic_inc(&playbacksubs->state);
break;
- case state_STARTING2:
+ case STATE_STARTING2:
urb = playbacksubs->urb[1];
atomic_inc(&playbacksubs->state);
break;
}
}
if (urb) {
- if ((err = usX2Y_hwdep_urb_play_prepare(playbacksubs, urb)) ||
- (err = usX2Y_urb_submit(playbacksubs, urb, frame))) {
+ err = usx2y_hwdep_urb_play_prepare(playbacksubs, urb);
+ if (err)
+ return err;
+ err = usx2y_hwdep_urb_play_prepare(playbacksubs, urb);
+ if (err)
return err;
- }
}
-
+
playbacksubs->completed_urb = NULL;
state = atomic_read(&capsubs->state);
- if (state >= state_PREPARED) {
- if (state == state_RUNNING) {
- if ((err = usX2Y_usbpcm_urb_capt_retire(capsubs)))
+ if (state >= STATE_PREPARED) {
+ if (state == STATE_RUNNING) {
+ err = usx2y_usbpcm_urb_capt_retire(capsubs);
+ if (err)
return err;
- } else if (state >= state_PRERUNNING)
+ } else if (state >= STATE_PRERUNNING) {
atomic_inc(&capsubs->state);
- usX2Y_usbpcm_urb_capt_iso_advance(capsubs, capsubs->completed_urb);
- if (NULL != capsubs2)
- usX2Y_usbpcm_urb_capt_iso_advance(NULL, capsubs2->completed_urb);
- if ((err = usX2Y_urb_submit(capsubs, capsubs->completed_urb, frame)))
+ }
+ usx2y_usbpcm_urb_capt_iso_advance(capsubs, capsubs->completed_urb);
+ if (capsubs2)
+ usx2y_usbpcm_urb_capt_iso_advance(NULL, capsubs2->completed_urb);
+ err = usx2y_urb_submit(capsubs, capsubs->completed_urb, frame);
+ if (err)
return err;
- if (NULL != capsubs2)
- if ((err = usX2Y_urb_submit(capsubs2, capsubs2->completed_urb, frame)))
+ if (capsubs2) {
+ err = usx2y_urb_submit(capsubs2, capsubs2->completed_urb, frame);
+ if (err)
return err;
+ }
}
capsubs->completed_urb = NULL;
- if (NULL != capsubs2)
+ if (capsubs2)
capsubs2->completed_urb = NULL;
return 0;
}
-
-static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
+static void i_usx2y_usbpcm_urb_complete(struct urb *urb)
{
- struct snd_usX2Y_substream *subs = urb->context;
- struct usX2Ydev *usX2Y = subs->usX2Y;
- struct snd_usX2Y_substream *capsubs, *capsubs2, *playbacksubs;
+ struct snd_usx2y_substream *subs = urb->context;
+ struct usx2ydev *usx2y = subs->usx2y;
+ struct snd_usx2y_substream *capsubs, *capsubs2, *playbacksubs;
- if (unlikely(atomic_read(&subs->state) < state_PREPARED)) {
+ if (unlikely(atomic_read(&subs->state) < STATE_PREPARED)) {
snd_printdd("hcd_frame=%i ep=%i%s status=%i start_frame=%i\n",
- usb_get_current_frame_number(usX2Y->dev),
+ usb_get_current_frame_number(usx2y->dev),
subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
urb->status, urb->start_frame);
return;
}
if (unlikely(urb->status)) {
- usX2Y_error_urb_status(usX2Y, subs, urb);
+ usx2y_error_urb_status(usx2y, subs, urb);
return;
}
subs->completed_urb = urb;
- capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
- capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
- playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
- if (capsubs->completed_urb && atomic_read(&capsubs->state) >= state_PREPARED &&
- (NULL == capsubs2 || capsubs2->completed_urb) &&
- (playbacksubs->completed_urb || atomic_read(&playbacksubs->state) < state_PREPARED)) {
- if (!usX2Y_usbpcm_usbframe_complete(capsubs, capsubs2, playbacksubs, urb->start_frame))
- usX2Y->wait_iso_frame += nr_of_packs();
- else {
+ capsubs = usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
+ capsubs2 = usx2y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
+ playbacksubs = usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+ if (capsubs->completed_urb && atomic_read(&capsubs->state) >= STATE_PREPARED &&
+ (!capsubs2 || capsubs2->completed_urb) &&
+ (playbacksubs->completed_urb || atomic_read(&playbacksubs->state) < STATE_PREPARED)) {
+ if (!usx2y_usbpcm_usbframe_complete(capsubs, capsubs2, playbacksubs, urb->start_frame)) {
+ usx2y->wait_iso_frame += nr_of_packs();
+ } else {
snd_printdd("\n");
- usX2Y_clients_stop(usX2Y);
+ usx2y_clients_stop(usx2y);
}
}
}
-
-static void usX2Y_hwdep_urb_release(struct urb **urb)
+static void usx2y_hwdep_urb_release(struct urb **urb)
{
usb_kill_urb(*urb);
usb_free_urb(*urb);
@@ -260,49 +271,53 @@ static void usX2Y_hwdep_urb_release(struct urb **urb)
/*
* release a substream
*/
-static void usX2Y_usbpcm_urbs_release(struct snd_usX2Y_substream *subs)
+static void usx2y_usbpcm_urbs_release(struct snd_usx2y_substream *subs)
{
int i;
- snd_printdd("snd_usX2Y_urbs_release() %i\n", subs->endpoint);
+
+ snd_printdd("snd_usx2y_urbs_release() %i\n", subs->endpoint);
for (i = 0; i < NRURBS; i++)
- usX2Y_hwdep_urb_release(subs->urb + i);
+ usx2y_hwdep_urb_release(subs->urb + i);
}
-static void usX2Y_usbpcm_subs_startup_finish(struct usX2Ydev * usX2Y)
+static void usx2y_usbpcm_subs_startup_finish(struct usx2ydev *usx2y)
{
- usX2Y_urbs_set_complete(usX2Y, i_usX2Y_usbpcm_urb_complete);
- usX2Y->prepare_subs = NULL;
+ usx2y_urbs_set_complete(usx2y, i_usx2y_usbpcm_urb_complete);
+ usx2y->prepare_subs = NULL;
}
-static void i_usX2Y_usbpcm_subs_startup(struct urb *urb)
+static void i_usx2y_usbpcm_subs_startup(struct urb *urb)
{
- struct snd_usX2Y_substream *subs = urb->context;
- struct usX2Ydev *usX2Y = subs->usX2Y;
- struct snd_usX2Y_substream *prepare_subs = usX2Y->prepare_subs;
- if (NULL != prepare_subs &&
+ struct snd_usx2y_substream *subs = urb->context;
+ struct usx2ydev *usx2y = subs->usx2y;
+ struct snd_usx2y_substream *prepare_subs = usx2y->prepare_subs;
+ struct snd_usx2y_substream *cap_subs2;
+
+ if (prepare_subs &&
urb->start_frame == prepare_subs->urb[0]->start_frame) {
atomic_inc(&prepare_subs->state);
- if (prepare_subs == usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE]) {
- struct snd_usX2Y_substream *cap_subs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
- if (cap_subs2 != NULL)
+ if (prepare_subs == usx2y->subs[SNDRV_PCM_STREAM_CAPTURE]) {
+ cap_subs2 = usx2y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
+ if (cap_subs2)
atomic_inc(&cap_subs2->state);
}
- usX2Y_usbpcm_subs_startup_finish(usX2Y);
- wake_up(&usX2Y->prepare_wait_queue);
+ usx2y_usbpcm_subs_startup_finish(usx2y);
+ wake_up(&usx2y->prepare_wait_queue);
}
- i_usX2Y_usbpcm_urb_complete(urb);
+ i_usx2y_usbpcm_urb_complete(urb);
}
/*
* initialize a substream's urbs
*/
-static int usX2Y_usbpcm_urbs_allocate(struct snd_usX2Y_substream *subs)
+static int usx2y_usbpcm_urbs_allocate(struct snd_usx2y_substream *subs)
{
int i;
unsigned int pipe;
- int is_playback = subs == subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
- struct usb_device *dev = subs->usX2Y->dev;
+ int is_playback = subs == subs->usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+ struct usb_device *dev = subs->usx2y->dev;
+ struct urb **purb;
pipe = is_playback ? usb_sndisocpipe(dev, subs->endpoint) :
usb_rcvisocpipe(dev, subs->endpoint);
@@ -312,28 +327,28 @@ static int usX2Y_usbpcm_urbs_allocate(struct snd_usX2Y_substream *subs)
/* allocate and initialize data urbs */
for (i = 0; i < NRURBS; i++) {
- struct urb **purb = subs->urb + i;
+ purb = subs->urb + i;
if (*purb) {
usb_kill_urb(*purb);
continue;
}
*purb = usb_alloc_urb(nr_of_packs(), GFP_KERNEL);
- if (NULL == *purb) {
- usX2Y_usbpcm_urbs_release(subs);
+ if (!*purb) {
+ usx2y_usbpcm_urbs_release(subs);
return -ENOMEM;
}
(*purb)->transfer_buffer = is_playback ?
- subs->usX2Y->hwdep_pcm_shm->playback : (
+ subs->usx2y->hwdep_pcm_shm->playback : (
subs->endpoint == 0x8 ?
- subs->usX2Y->hwdep_pcm_shm->capture0x8 :
- subs->usX2Y->hwdep_pcm_shm->capture0xA);
+ subs->usx2y->hwdep_pcm_shm->capture0x8 :
+ subs->usx2y->hwdep_pcm_shm->capture0xA);
(*purb)->dev = dev;
(*purb)->pipe = pipe;
(*purb)->number_of_packets = nr_of_packs();
(*purb)->context = subs;
(*purb)->interval = 1;
- (*purb)->complete = i_usX2Y_usbpcm_subs_startup;
+ (*purb)->complete = i_usx2y_usbpcm_subs_startup;
}
return 0;
}
@@ -341,195 +356,216 @@ static int usX2Y_usbpcm_urbs_allocate(struct snd_usX2Y_substream *subs)
/*
* free the buffer
*/
-static int snd_usX2Y_usbpcm_hw_free(struct snd_pcm_substream *substream)
+static int snd_usx2y_usbpcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_usX2Y_substream *subs = runtime->private_data,
- *cap_subs2 = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
- mutex_lock(&subs->usX2Y->pcm_mutex);
- snd_printdd("snd_usX2Y_usbpcm_hw_free(%p)\n", substream);
-
- if (SNDRV_PCM_STREAM_PLAYBACK == substream->stream) {
- struct snd_usX2Y_substream *cap_subs = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
- atomic_set(&subs->state, state_STOPPED);
- usX2Y_usbpcm_urbs_release(subs);
+ struct snd_usx2y_substream *subs = runtime->private_data;
+ struct snd_usx2y_substream *cap_subs;
+ struct snd_usx2y_substream *playback_subs;
+ struct snd_usx2y_substream *cap_subs2;
+
+ mutex_lock(&subs->usx2y->pcm_mutex);
+ snd_printdd("%s(%p)\n", __func__, substream);
+
+ cap_subs2 = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ cap_subs = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
+ atomic_set(&subs->state, STATE_STOPPED);
+ usx2y_usbpcm_urbs_release(subs);
if (!cap_subs->pcm_substream ||
!cap_subs->pcm_substream->runtime ||
!cap_subs->pcm_substream->runtime->status ||
cap_subs->pcm_substream->runtime->status->state < SNDRV_PCM_STATE_PREPARED) {
- atomic_set(&cap_subs->state, state_STOPPED);
- if (NULL != cap_subs2)
- atomic_set(&cap_subs2->state, state_STOPPED);
- usX2Y_usbpcm_urbs_release(cap_subs);
- if (NULL != cap_subs2)
- usX2Y_usbpcm_urbs_release(cap_subs2);
+ atomic_set(&cap_subs->state, STATE_STOPPED);
+ if (cap_subs2)
+ atomic_set(&cap_subs2->state, STATE_STOPPED);
+ usx2y_usbpcm_urbs_release(cap_subs);
+ if (cap_subs2)
+ usx2y_usbpcm_urbs_release(cap_subs2);
}
} else {
- struct snd_usX2Y_substream *playback_subs = subs->usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
- if (atomic_read(&playback_subs->state) < state_PREPARED) {
- atomic_set(&subs->state, state_STOPPED);
- if (NULL != cap_subs2)
- atomic_set(&cap_subs2->state, state_STOPPED);
- usX2Y_usbpcm_urbs_release(subs);
- if (NULL != cap_subs2)
- usX2Y_usbpcm_urbs_release(cap_subs2);
+ playback_subs = subs->usx2y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+ if (atomic_read(&playback_subs->state) < STATE_PREPARED) {
+ atomic_set(&subs->state, STATE_STOPPED);
+ if (cap_subs2)
+ atomic_set(&cap_subs2->state, STATE_STOPPED);
+ usx2y_usbpcm_urbs_release(subs);
+ if (cap_subs2)
+ usx2y_usbpcm_urbs_release(cap_subs2);
}
}
- mutex_unlock(&subs->usX2Y->pcm_mutex);
+ mutex_unlock(&subs->usx2y->pcm_mutex);
return 0;
}
-static void usX2Y_usbpcm_subs_startup(struct snd_usX2Y_substream *subs)
+static void usx2y_usbpcm_subs_startup(struct snd_usx2y_substream *subs)
{
- struct usX2Ydev * usX2Y = subs->usX2Y;
- usX2Y->prepare_subs = subs;
+ struct usx2ydev *usx2y = subs->usx2y;
+
+ usx2y->prepare_subs = subs;
subs->urb[0]->start_frame = -1;
- smp_wmb(); // Make sure above modifications are seen by i_usX2Y_subs_startup()
- usX2Y_urbs_set_complete(usX2Y, i_usX2Y_usbpcm_subs_startup);
+ smp_wmb(); // Make sure above modifications are seen by i_usx2y_subs_startup()
+ usx2y_urbs_set_complete(usx2y, i_usx2y_usbpcm_subs_startup);
}
-static int usX2Y_usbpcm_urbs_start(struct snd_usX2Y_substream *subs)
+static int usx2y_usbpcm_urbs_start(struct snd_usx2y_substream *subs)
{
- int p, u, err,
- stream = subs->pcm_substream->stream;
- struct usX2Ydev *usX2Y = subs->usX2Y;
-
- if (SNDRV_PCM_STREAM_CAPTURE == stream) {
- usX2Y->hwdep_pcm_shm->captured_iso_head = -1;
- usX2Y->hwdep_pcm_shm->captured_iso_frames = 0;
+ int p, u, err, stream = subs->pcm_substream->stream;
+ struct usx2ydev *usx2y = subs->usx2y;
+ struct urb *urb;
+ unsigned long pack;
+
+ if (stream == SNDRV_PCM_STREAM_CAPTURE) {
+ usx2y->hwdep_pcm_shm->captured_iso_head = -1;
+ usx2y->hwdep_pcm_shm->captured_iso_frames = 0;
}
for (p = 0; 3 >= (stream + p); p += 2) {
- struct snd_usX2Y_substream *subs = usX2Y->subs[stream + p];
- if (subs != NULL) {
- if ((err = usX2Y_usbpcm_urbs_allocate(subs)) < 0)
+ struct snd_usx2y_substream *subs = usx2y->subs[stream + p];
+ if (subs) {
+ err = usx2y_usbpcm_urbs_allocate(subs);
+ if (err < 0)
return err;
subs->completed_urb = NULL;
}
}
for (p = 0; p < 4; p++) {
- struct snd_usX2Y_substream *subs = usX2Y->subs[p];
- if (subs != NULL && atomic_read(&subs->state) >= state_PREPARED)
+ struct snd_usx2y_substream *subs = usx2y->subs[p];
+
+ if (subs && atomic_read(&subs->state) >= STATE_PREPARED)
goto start;
}
start:
- usX2Y_usbpcm_subs_startup(subs);
+ usx2y_usbpcm_subs_startup(subs);
for (u = 0; u < NRURBS; u++) {
for (p = 0; 3 >= (stream + p); p += 2) {
- struct snd_usX2Y_substream *subs = usX2Y->subs[stream + p];
- if (subs != NULL) {
- struct urb *urb = subs->urb[u];
- if (usb_pipein(urb->pipe)) {
- unsigned long pack;
- if (0 == u)
- atomic_set(&subs->state, state_STARTING3);
- urb->dev = usX2Y->dev;
- for (pack = 0; pack < nr_of_packs(); pack++) {
- urb->iso_frame_desc[pack].offset = subs->maxpacksize * (pack + u * nr_of_packs());
- urb->iso_frame_desc[pack].length = subs->maxpacksize;
- }
- urb->transfer_buffer_length = subs->maxpacksize * nr_of_packs();
- if ((err = usb_submit_urb(urb, GFP_KERNEL)) < 0) {
- snd_printk (KERN_ERR "cannot usb_submit_urb() for urb %d, err = %d\n", u, err);
- err = -EPIPE;
- goto cleanup;
- } else {
- snd_printdd("%i\n", urb->start_frame);
- if (u == 0)
- usX2Y->wait_iso_frame = urb->start_frame;
- }
- urb->transfer_flags = 0;
- } else {
- atomic_set(&subs->state, state_STARTING1);
- break;
- }
+ struct snd_usx2y_substream *subs = usx2y->subs[stream + p];
+
+ if (!subs)
+ continue;
+ urb = subs->urb[u];
+ if (usb_pipein(urb->pipe)) {
+ if (!u)
+ atomic_set(&subs->state, STATE_STARTING3);
+ urb->dev = usx2y->dev;
+ for (pack = 0; pack < nr_of_packs(); pack++) {
+ urb->iso_frame_desc[pack].offset = subs->maxpacksize * (pack + u * nr_of_packs());
+ urb->iso_frame_desc[pack].length = subs->maxpacksize;
+ }
+ urb->transfer_buffer_length = subs->maxpacksize * nr_of_packs();
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err < 0) {
+ snd_printk(KERN_ERR "cannot usb_submit_urb() for urb %d, err = %d\n", u, err);
+ err = -EPIPE;
+ goto cleanup;
+ } else {
+ snd_printdd("%i\n", urb->start_frame);
+ if (!u)
+ usx2y->wait_iso_frame = urb->start_frame;
+ }
+ urb->transfer_flags = 0;
+ } else {
+ atomic_set(&subs->state, STATE_STARTING1);
+ break;
}
}
}
err = 0;
- wait_event(usX2Y->prepare_wait_queue, NULL == usX2Y->prepare_subs);
- if (atomic_read(&subs->state) != state_PREPARED)
+ wait_event(usx2y->prepare_wait_queue, !usx2y->prepare_subs);
+ if (atomic_read(&subs->state) != STATE_PREPARED)
err = -EPIPE;
-
+
cleanup:
if (err) {
- usX2Y_subs_startup_finish(usX2Y); // Call it now
- usX2Y_clients_stop(usX2Y); // something is completely wroong > stop evrything
+ usx2y_subs_startup_finish(usx2y); // Call it now
+ usx2y_clients_stop(usx2y); // something is completely wrong > stop everything
}
return err;
}
+#define USX2Y_HWDEP_PCM_PAGES \
+ PAGE_ALIGN(sizeof(struct snd_usx2y_hwdep_pcm_shm))
+
/*
* prepare callback
*
* set format and initialize urbs
*/
-static int snd_usX2Y_usbpcm_prepare(struct snd_pcm_substream *substream)
+static int snd_usx2y_usbpcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_usX2Y_substream *subs = runtime->private_data;
- struct usX2Ydev *usX2Y = subs->usX2Y;
- struct snd_usX2Y_substream *capsubs = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
+ struct snd_usx2y_substream *subs = runtime->private_data;
+ struct usx2ydev *usx2y = subs->usx2y;
+ struct snd_usx2y_substream *capsubs = subs->usx2y->subs[SNDRV_PCM_STREAM_CAPTURE];
int err = 0;
- snd_printdd("snd_usX2Y_pcm_prepare(%p)\n", substream);
- if (NULL == usX2Y->hwdep_pcm_shm) {
- usX2Y->hwdep_pcm_shm = alloc_pages_exact(sizeof(struct snd_usX2Y_hwdep_pcm_shm),
+ snd_printdd("snd_usx2y_pcm_prepare(%p)\n", substream);
+
+ mutex_lock(&usx2y->pcm_mutex);
+
+ if (!usx2y->hwdep_pcm_shm) {
+ usx2y->hwdep_pcm_shm = alloc_pages_exact(USX2Y_HWDEP_PCM_PAGES,
GFP_KERNEL);
- if (!usX2Y->hwdep_pcm_shm)
- return -ENOMEM;
- memset(usX2Y->hwdep_pcm_shm, 0, sizeof(struct snd_usX2Y_hwdep_pcm_shm));
+ if (!usx2y->hwdep_pcm_shm) {
+ err = -ENOMEM;
+ goto up_prepare_mutex;
+ }
+ memset(usx2y->hwdep_pcm_shm, 0, USX2Y_HWDEP_PCM_PAGES);
}
- mutex_lock(&usX2Y->pcm_mutex);
- usX2Y_subs_prepare(subs);
-// Start hardware streams
-// SyncStream first....
- if (atomic_read(&capsubs->state) < state_PREPARED) {
- if (usX2Y->format != runtime->format)
- if ((err = usX2Y_format_set(usX2Y, runtime->format)) < 0)
+ usx2y_subs_prepare(subs);
+ // Start hardware streams
+ // SyncStream first....
+ if (atomic_read(&capsubs->state) < STATE_PREPARED) {
+ if (usx2y->format != runtime->format) {
+ err = usx2y_format_set(usx2y, runtime->format);
+ if (err < 0)
goto up_prepare_mutex;
- if (usX2Y->rate != runtime->rate)
- if ((err = usX2Y_rate_set(usX2Y, runtime->rate)) < 0)
+ }
+ if (usx2y->rate != runtime->rate) {
+ err = usx2y_rate_set(usx2y, runtime->rate);
+ if (err < 0)
goto up_prepare_mutex;
+ }
snd_printdd("starting capture pipe for %s\n", subs == capsubs ?
"self" : "playpipe");
- if (0 > (err = usX2Y_usbpcm_urbs_start(capsubs)))
+ err = usx2y_usbpcm_urbs_start(capsubs);
+ if (err < 0)
goto up_prepare_mutex;
}
if (subs != capsubs) {
- usX2Y->hwdep_pcm_shm->playback_iso_start = -1;
- if (atomic_read(&subs->state) < state_PREPARED) {
- while (usX2Y_iso_frames_per_buffer(runtime, usX2Y) >
- usX2Y->hwdep_pcm_shm->captured_iso_frames) {
- snd_printdd("Wait: iso_frames_per_buffer=%i,"
- "captured_iso_frames=%i\n",
- usX2Y_iso_frames_per_buffer(runtime, usX2Y),
- usX2Y->hwdep_pcm_shm->captured_iso_frames);
+ usx2y->hwdep_pcm_shm->playback_iso_start = -1;
+ if (atomic_read(&subs->state) < STATE_PREPARED) {
+ while (usx2y_iso_frames_per_buffer(runtime, usx2y) >
+ usx2y->hwdep_pcm_shm->captured_iso_frames) {
+ snd_printdd("Wait: iso_frames_per_buffer=%i,captured_iso_frames=%i\n",
+ usx2y_iso_frames_per_buffer(runtime, usx2y),
+ usx2y->hwdep_pcm_shm->captured_iso_frames);
if (msleep_interruptible(10)) {
err = -ERESTARTSYS;
goto up_prepare_mutex;
}
- }
- if (0 > (err = usX2Y_usbpcm_urbs_start(subs)))
+ }
+ err = usx2y_usbpcm_urbs_start(subs);
+ if (err < 0)
goto up_prepare_mutex;
}
snd_printdd("Ready: iso_frames_per_buffer=%i,captured_iso_frames=%i\n",
- usX2Y_iso_frames_per_buffer(runtime, usX2Y),
- usX2Y->hwdep_pcm_shm->captured_iso_frames);
- } else
- usX2Y->hwdep_pcm_shm->capture_iso_start = -1;
+ usx2y_iso_frames_per_buffer(runtime, usx2y),
+ usx2y->hwdep_pcm_shm->captured_iso_frames);
+ } else {
+ usx2y->hwdep_pcm_shm->capture_iso_start = -1;
+ }
up_prepare_mutex:
- mutex_unlock(&usX2Y->pcm_mutex);
+ mutex_unlock(&usx2y->pcm_mutex);
return err;
}
-static const struct snd_pcm_hardware snd_usX2Y_4c =
-{
+static const struct snd_pcm_hardware snd_usx2y_4c = {
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID),
@@ -547,55 +583,53 @@ static const struct snd_pcm_hardware snd_usX2Y_4c =
.fifo_size = 0
};
-
-
-static int snd_usX2Y_usbpcm_open(struct snd_pcm_substream *substream)
+static int snd_usx2y_usbpcm_open(struct snd_pcm_substream *substream)
{
- struct snd_usX2Y_substream *subs = ((struct snd_usX2Y_substream **)
- snd_pcm_substream_chip(substream))[substream->stream];
+ struct snd_usx2y_substream *subs =
+ ((struct snd_usx2y_substream **)
+ snd_pcm_substream_chip(substream))[substream->stream];
struct snd_pcm_runtime *runtime = substream->runtime;
- if (!(subs->usX2Y->chip_status & USX2Y_STAT_CHIP_MMAP_PCM_URBS))
+ if (!(subs->usx2y->chip_status & USX2Y_STAT_CHIP_MMAP_PCM_URBS))
return -EBUSY;
- runtime->hw = SNDRV_PCM_STREAM_PLAYBACK == substream->stream ? snd_usX2Y_2c :
- (subs->usX2Y->subs[3] ? snd_usX2Y_4c : snd_usX2Y_2c);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ runtime->hw = snd_usx2y_2c;
+ else
+ runtime->hw = (subs->usx2y->subs[3] ? snd_usx2y_4c : snd_usx2y_2c);
runtime->private_data = subs;
subs->pcm_substream = substream;
snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 1000, 200000);
return 0;
}
-
-static int snd_usX2Y_usbpcm_close(struct snd_pcm_substream *substream)
+static int snd_usx2y_usbpcm_close(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_usX2Y_substream *subs = runtime->private_data;
+ struct snd_usx2y_substream *subs = runtime->private_data;
subs->pcm_substream = NULL;
return 0;
}
-
-static const struct snd_pcm_ops snd_usX2Y_usbpcm_ops =
-{
- .open = snd_usX2Y_usbpcm_open,
- .close = snd_usX2Y_usbpcm_close,
- .hw_params = snd_usX2Y_pcm_hw_params,
- .hw_free = snd_usX2Y_usbpcm_hw_free,
- .prepare = snd_usX2Y_usbpcm_prepare,
- .trigger = snd_usX2Y_pcm_trigger,
- .pointer = snd_usX2Y_pcm_pointer,
+static const struct snd_pcm_ops snd_usx2y_usbpcm_ops = {
+ .open = snd_usx2y_usbpcm_open,
+ .close = snd_usx2y_usbpcm_close,
+ .hw_params = snd_usx2y_pcm_hw_params,
+ .hw_free = snd_usx2y_usbpcm_hw_free,
+ .prepare = snd_usx2y_usbpcm_prepare,
+ .trigger = snd_usx2y_pcm_trigger,
+ .pointer = snd_usx2y_pcm_pointer,
};
-
-static int usX2Y_pcms_busy_check(struct snd_card *card)
+static int usx2y_pcms_busy_check(struct snd_card *card)
{
- struct usX2Ydev *dev = usX2Y(card);
+ struct usx2ydev *dev = usx2y(card);
+ struct snd_usx2y_substream *subs;
int i;
for (i = 0; i < dev->pcm_devs * 2; i++) {
- struct snd_usX2Y_substream *subs = dev->subs[i];
+ subs = dev->subs[i];
if (subs && subs->pcm_substream &&
SUBSTREAM_BUSY(subs->pcm_substream))
return -EBUSY;
@@ -603,125 +637,120 @@ static int usX2Y_pcms_busy_check(struct snd_card *card)
return 0;
}
-static int snd_usX2Y_hwdep_pcm_open(struct snd_hwdep *hw, struct file *file)
+static int snd_usx2y_hwdep_pcm_open(struct snd_hwdep *hw, struct file *file)
{
struct snd_card *card = hw->card;
int err;
- mutex_lock(&usX2Y(card)->pcm_mutex);
- err = usX2Y_pcms_busy_check(card);
+ mutex_lock(&usx2y(card)->pcm_mutex);
+ err = usx2y_pcms_busy_check(card);
if (!err)
- usX2Y(card)->chip_status |= USX2Y_STAT_CHIP_MMAP_PCM_URBS;
- mutex_unlock(&usX2Y(card)->pcm_mutex);
+ usx2y(card)->chip_status |= USX2Y_STAT_CHIP_MMAP_PCM_URBS;
+ mutex_unlock(&usx2y(card)->pcm_mutex);
return err;
}
-
-static int snd_usX2Y_hwdep_pcm_release(struct snd_hwdep *hw, struct file *file)
+static int snd_usx2y_hwdep_pcm_release(struct snd_hwdep *hw, struct file *file)
{
struct snd_card *card = hw->card;
int err;
- mutex_lock(&usX2Y(card)->pcm_mutex);
- err = usX2Y_pcms_busy_check(card);
+ mutex_lock(&usx2y(card)->pcm_mutex);
+ err = usx2y_pcms_busy_check(card);
if (!err)
- usX2Y(hw->card)->chip_status &= ~USX2Y_STAT_CHIP_MMAP_PCM_URBS;
- mutex_unlock(&usX2Y(card)->pcm_mutex);
+ usx2y(hw->card)->chip_status &= ~USX2Y_STAT_CHIP_MMAP_PCM_URBS;
+ mutex_unlock(&usx2y(card)->pcm_mutex);
return err;
}
-
-static void snd_usX2Y_hwdep_pcm_vm_open(struct vm_area_struct *area)
+static void snd_usx2y_hwdep_pcm_vm_open(struct vm_area_struct *area)
{
}
-
-static void snd_usX2Y_hwdep_pcm_vm_close(struct vm_area_struct *area)
+static void snd_usx2y_hwdep_pcm_vm_close(struct vm_area_struct *area)
{
}
-
-static vm_fault_t snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t snd_usx2y_hwdep_pcm_vm_fault(struct vm_fault *vmf)
{
unsigned long offset;
void *vaddr;
offset = vmf->pgoff << PAGE_SHIFT;
- vaddr = (char *)((struct usX2Ydev *)vmf->vma->vm_private_data)->hwdep_pcm_shm + offset;
+ vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->hwdep_pcm_shm + offset;
vmf->page = virt_to_page(vaddr);
get_page(vmf->page);
return 0;
}
-
-static const struct vm_operations_struct snd_usX2Y_hwdep_pcm_vm_ops = {
- .open = snd_usX2Y_hwdep_pcm_vm_open,
- .close = snd_usX2Y_hwdep_pcm_vm_close,
- .fault = snd_usX2Y_hwdep_pcm_vm_fault,
+static const struct vm_operations_struct snd_usx2y_hwdep_pcm_vm_ops = {
+ .open = snd_usx2y_hwdep_pcm_vm_open,
+ .close = snd_usx2y_hwdep_pcm_vm_close,
+ .fault = snd_usx2y_hwdep_pcm_vm_fault,
};
-
-static int snd_usX2Y_hwdep_pcm_mmap(struct snd_hwdep * hw, struct file *filp, struct vm_area_struct *area)
+static int snd_usx2y_hwdep_pcm_mmap(struct snd_hwdep *hw, struct file *filp, struct vm_area_struct *area)
{
unsigned long size = (unsigned long)(area->vm_end - area->vm_start);
- struct usX2Ydev *usX2Y = hw->private_data;
+ struct usx2ydev *usx2y = hw->private_data;
- if (!(usX2Y->chip_status & USX2Y_STAT_CHIP_INIT))
+ if (!(usx2y->chip_status & USX2Y_STAT_CHIP_INIT))
return -EBUSY;
- /* if userspace tries to mmap beyond end of our buffer, fail */
- if (size > PAGE_ALIGN(sizeof(struct snd_usX2Y_hwdep_pcm_shm))) {
- snd_printd("%lu > %lu\n", size, (unsigned long)sizeof(struct snd_usX2Y_hwdep_pcm_shm));
+ /* if userspace tries to mmap beyond end of our buffer, fail */
+ if (size > USX2Y_HWDEP_PCM_PAGES) {
+ snd_printd("%lu > %lu\n", size, (unsigned long)USX2Y_HWDEP_PCM_PAGES);
return -EINVAL;
}
- if (!usX2Y->hwdep_pcm_shm) {
+ if (!usx2y->hwdep_pcm_shm)
return -ENODEV;
- }
- area->vm_ops = &snd_usX2Y_hwdep_pcm_vm_ops;
+
+ area->vm_ops = &snd_usx2y_hwdep_pcm_vm_ops;
area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
area->vm_private_data = hw->private_data;
return 0;
}
-
-static void snd_usX2Y_hwdep_pcm_private_free(struct snd_hwdep *hwdep)
+static void snd_usx2y_hwdep_pcm_private_free(struct snd_hwdep *hwdep)
{
- struct usX2Ydev *usX2Y = hwdep->private_data;
- if (NULL != usX2Y->hwdep_pcm_shm)
- free_pages_exact(usX2Y->hwdep_pcm_shm, sizeof(struct snd_usX2Y_hwdep_pcm_shm));
-}
+ struct usx2ydev *usx2y = hwdep->private_data;
+ if (usx2y->hwdep_pcm_shm)
+ free_pages_exact(usx2y->hwdep_pcm_shm, USX2Y_HWDEP_PCM_PAGES);
+}
-int usX2Y_hwdep_pcm_new(struct snd_card *card)
+int usx2y_hwdep_pcm_new(struct snd_card *card)
{
int err;
struct snd_hwdep *hw;
struct snd_pcm *pcm;
- struct usb_device *dev = usX2Y(card)->dev;
- if (1 != nr_of_packs())
+ struct usb_device *dev = usx2y(card)->dev;
+
+ if (nr_of_packs() != 1)
return 0;
- if ((err = snd_hwdep_new(card, SND_USX2Y_USBPCM_ID, 1, &hw)) < 0)
+ err = snd_hwdep_new(card, SND_USX2Y_USBPCM_ID, 1, &hw);
+ if (err < 0)
return err;
hw->iface = SNDRV_HWDEP_IFACE_USX2Y_PCM;
- hw->private_data = usX2Y(card);
- hw->private_free = snd_usX2Y_hwdep_pcm_private_free;
- hw->ops.open = snd_usX2Y_hwdep_pcm_open;
- hw->ops.release = snd_usX2Y_hwdep_pcm_release;
- hw->ops.mmap = snd_usX2Y_hwdep_pcm_mmap;
+ hw->private_data = usx2y(card);
+ hw->private_free = snd_usx2y_hwdep_pcm_private_free;
+ hw->ops.open = snd_usx2y_hwdep_pcm_open;
+ hw->ops.release = snd_usx2y_hwdep_pcm_release;
+ hw->ops.mmap = snd_usx2y_hwdep_pcm_mmap;
hw->exclusive = 1;
sprintf(hw->name, "/dev/bus/usb/%03d/%03d/hwdeppcm", dev->bus->busnum, dev->devnum);
err = snd_pcm_new(card, NAME_ALLCAPS" hwdep Audio", 2, 1, 1, &pcm);
- if (err < 0) {
+ if (err < 0)
return err;
- }
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_usX2Y_usbpcm_ops);
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usX2Y_usbpcm_ops);
- pcm->private_data = usX2Y(card)->subs;
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_usx2y_usbpcm_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usx2y_usbpcm_ops);
+
+ pcm->private_data = usx2y(card)->subs;
pcm->info_flags = 0;
sprintf(pcm->name, NAME_ALLCAPS" hwdep Audio");
@@ -739,7 +768,7 @@ int usX2Y_hwdep_pcm_new(struct snd_card *card)
#else
-int usX2Y_hwdep_pcm_new(struct snd_card *card)
+int usx2y_hwdep_pcm_new(struct snd_card *card)
{
return 0;
}
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.h b/sound/usb/usx2y/usx2yhwdeppcm.h
index eb5a46466f0e..731b1c5a3474 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.h
+++ b/sound/usb/usx2y/usx2yhwdeppcm.h
@@ -4,7 +4,7 @@
#define MAXSTRIDE 3
#define SSS (((MAXPACK*MAXBUFFERMS*MAXSTRIDE + 4096) / 4096) * 4096)
-struct snd_usX2Y_hwdep_pcm_shm {
+struct snd_usx2y_hwdep_pcm_shm {
char playback[SSS];
char capture0x8[SSS];
char capture0xA[SSS];
@@ -20,4 +20,4 @@ struct snd_usX2Y_hwdep_pcm_shm {
int capture_iso_start;
};
-int usX2Y_hwdep_pcm_new(struct snd_card *card);
+int usx2y_hwdep_pcm_new(struct snd_card *card);
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index 33b12aa67cf5..a34d7d9c2a57 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -236,7 +236,7 @@ static void had_write_register(struct snd_intelhad *ctx, u32 reg, u32 val)
* updating AUD_CONFIG register.
* This is because:
* Bit6 of AUD_CONFIG register is writeonly due to a silicon bug on VLV2
- * HDMI IP. As a result a read-modify of AUD_CONFIG regiter will always
+ * HDMI IP. As a result a read-modify of AUD_CONFIG register will always
* clear bit6. AUD_CONFIG[6:4] represents the "channels" field of the
* register. This field should be 1xy binary for configuration with 6 or
* more channels. Read-modify of AUD_CONFIG (Eg. for enabling audio)
@@ -342,7 +342,7 @@ static int had_prog_status_reg(struct snd_pcm_substream *substream,
/*
* function to initialize audio
- * registers and buffer confgiuration registers
+ * registers and buffer configuration registers
* This function is called in the prepare callback
*/
static int had_init_audio_ctrl(struct snd_pcm_substream *substream,
@@ -1790,7 +1790,7 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
pcm->private_data = ctx;
pcm->info_flags = 0;
strscpy(pcm->name, card->shortname, strlen(card->shortname));
- /* setup the ops for playabck */
+ /* setup the ops for playback */
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &had_pcm_ops);
/* allocate dma pages;
diff --git a/sound/x86/intel_hdmi_audio.h b/sound/x86/intel_hdmi_audio.h
index 0d91bb5dbab7..bb3853195922 100644
--- a/sound/x86/intel_hdmi_audio.h
+++ b/sound/x86/intel_hdmi_audio.h
@@ -96,7 +96,7 @@ struct pcm_stream_info {
* @had_spinlock: driver lock
* @aes_bits: IEC958 status bits
* @buff_done: id of current buffer done intr
- * @dev: platoform device handle
+ * @dev: platform device handle
* @chmap: holds channel map info
*/
struct snd_intelhad {
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 24223adae150..b3edde68bc3e 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -184,6 +184,17 @@ struct kvm_vcpu_events {
__u32 reserved[12];
};
+struct kvm_arm_copy_mte_tags {
+ __u64 guest_ipa;
+ __u64 length;
+ void __user *addr;
+ __u64 flags;
+ __u64 reserved[2];
+};
+
+#define KVM_ARM_TAGS_TO_GUEST 0
+#define KVM_ARM_TAGS_FROM_GUEST 1
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
index f83a70e07df8..ce2ee8f1e361 100644
--- a/tools/arch/arm64/include/uapi/asm/unistd.h
+++ b/tools/arch/arm64/include/uapi/asm/unistd.h
@@ -20,5 +20,6 @@
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
#define __ARCH_WANT_SYS_CLONE3
+#define __ARCH_WANT_MEMFD_SECRET
#include <asm-generic/unistd.h>
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index ac37830ae941..d0ce5cfd3ac1 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -108,7 +108,7 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
-/* free ( 3*32+29) */
+#define X86_FEATURE_RAPL ( 3*32+29) /* AMD/Hygon RAPL interface */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
@@ -378,6 +378,7 @@
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
+#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* "" RTM transaction always aborts */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
#define X86_FEATURE_HYBRID_CPU (18*32+15) /* "" This part has CPUs of more than one type */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 211ba3375ee9..a7c413432b33 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -772,6 +772,10 @@
#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
+#define MSR_TFA_TSX_CPUID_CLEAR_BIT 1
+#define MSR_TFA_TSX_CPUID_CLEAR BIT_ULL(MSR_TFA_TSX_CPUID_CLEAR_BIT)
+#define MSR_TFA_SDV_ENABLE_RTM_BIT 2
+#define MSR_TFA_SDV_ENABLE_RTM BIT_ULL(MSR_TFA_SDV_ENABLE_RTM_BIT)
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 0662f644aad9..a6c327f8ad9e 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -159,6 +159,19 @@ struct kvm_sregs {
__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
};
+struct kvm_sregs2 {
+ /* out (KVM_GET_SREGS2) / in (KVM_SET_SREGS2) */
+ struct kvm_segment cs, ds, es, fs, gs, ss;
+ struct kvm_segment tr, ldt;
+ struct kvm_dtable gdt, idt;
+ __u64 cr0, cr2, cr3, cr4, cr8;
+ __u64 efer;
+ __u64 apic_base;
+ __u64 flags;
+ __u64 pdptrs[4];
+};
+#define KVM_SREGS2_FLAGS_PDPTRS_VALID 1
+
/* for KVM_GET_FPU and KVM_SET_FPU */
struct kvm_fpu {
__u8 fpr[8][16];
diff --git a/tools/arch/x86/include/uapi/asm/svm.h b/tools/arch/x86/include/uapi/asm/svm.h
index 554f75fe013c..efa969325ede 100644
--- a/tools/arch/x86/include/uapi/asm/svm.h
+++ b/tools/arch/x86/include/uapi/asm/svm.h
@@ -110,6 +110,9 @@
#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1
#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
+/* Exit code reserved for hypervisor/software use */
+#define SVM_EXIT_SW 0xf0000000
+
#define SVM_EXIT_ERR -1
#define SVM_EXIT_REASONS \
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
index 6cd6080cac04..f45fa992e01d 100644
--- a/tools/bootconfig/main.c
+++ b/tools/bootconfig/main.c
@@ -27,7 +27,7 @@ static int xbc_show_value(struct xbc_node *node, bool semicolon)
q = '\'';
else
q = '"';
- printf("%c%s%c%s", q, val, q, node->next ? ", " : eol);
+ printf("%c%s%c%s", q, val, q, xbc_node_is_array(node) ? ", " : eol);
i++;
}
return i;
@@ -35,30 +35,55 @@ static int xbc_show_value(struct xbc_node *node, bool semicolon)
static void xbc_show_compact_tree(void)
{
- struct xbc_node *node, *cnode;
+ struct xbc_node *node, *cnode = NULL, *vnode;
int depth = 0, i;
node = xbc_root_node();
while (node && xbc_node_is_key(node)) {
for (i = 0; i < depth; i++)
printf("\t");
- cnode = xbc_node_get_child(node);
+ if (!cnode)
+ cnode = xbc_node_get_child(node);
while (cnode && xbc_node_is_key(cnode) && !cnode->next) {
+ vnode = xbc_node_get_child(cnode);
+ /*
+ * If @cnode has value and subkeys, this
+ * should show it as below.
+ *
+ * key(@node) {
+ * key(@cnode) = value;
+ * key(@cnode) {
+ * subkeys;
+ * }
+ * }
+ */
+ if (vnode && xbc_node_is_value(vnode) && vnode->next)
+ break;
printf("%s.", xbc_node_get_data(node));
node = cnode;
- cnode = xbc_node_get_child(node);
+ cnode = vnode;
}
if (cnode && xbc_node_is_key(cnode)) {
printf("%s {\n", xbc_node_get_data(node));
depth++;
node = cnode;
+ cnode = NULL;
continue;
} else if (cnode && xbc_node_is_value(cnode)) {
printf("%s = ", xbc_node_get_data(node));
xbc_show_value(cnode, true);
+ /*
+ * If @node has value and subkeys, continue
+ * looping on subkeys with same node.
+ */
+ if (cnode->next) {
+ cnode = xbc_node_get_next(cnode);
+ continue;
+ }
} else {
printf("%s;\n", xbc_node_get_data(node));
}
+ cnode = NULL;
if (node->next) {
node = xbc_node_get_next(node);
@@ -70,10 +95,12 @@ static void xbc_show_compact_tree(void)
return;
if (!xbc_node_get_child(node)->next)
continue;
- depth--;
- for (i = 0; i < depth; i++)
- printf("\t");
- printf("}\n");
+ if (depth) {
+ depth--;
+ for (i = 0; i < depth; i++)
+ printf("\t");
+ printf("}\n");
+ }
}
node = xbc_node_get_next(node);
}
@@ -84,12 +111,12 @@ static void xbc_show_list(void)
char key[XBC_KEYLEN_MAX];
struct xbc_node *leaf;
const char *val;
- int ret = 0;
xbc_for_each_key_value(leaf, val) {
- ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX);
- if (ret < 0)
+ if (xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX) < 0) {
+ fprintf(stderr, "Failed to compose key %d\n", ret);
break;
+ }
printf("%s = ", key);
if (!val || val[0] == '\0') {
printf("\"\"\n");
@@ -99,17 +126,6 @@ static void xbc_show_list(void)
}
}
-/* Simple real checksum */
-static int checksum(unsigned char *buf, int len)
-{
- int i, sum = 0;
-
- for (i = 0; i < len; i++)
- sum += buf[i];
-
- return sum;
-}
-
#define PAGE_SIZE 4096
static int load_xbc_fd(int fd, char **buf, int size)
@@ -205,7 +221,7 @@ static int load_xbc_from_initrd(int fd, char **buf)
return ret;
/* Wrong Checksum */
- rcsum = checksum((unsigned char *)*buf, size);
+ rcsum = xbc_calc_checksum(*buf, size);
if (csum != rcsum) {
pr_err("checksum error: %d != %d\n", csum, rcsum);
return -EINVAL;
@@ -354,7 +370,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
return ret;
}
size = strlen(buf) + 1;
- csum = checksum((unsigned char *)buf, size);
+ csum = xbc_calc_checksum(buf, size);
/* Backup the bootconfig data */
data = calloc(size + BOOTCONFIG_ALIGN +
diff --git a/tools/bootconfig/samples/bad-override.bconf b/tools/bootconfig/samples/bad-override.bconf
deleted file mode 100644
index fde6c561512e..000000000000
--- a/tools/bootconfig/samples/bad-override.bconf
+++ /dev/null
@@ -1,3 +0,0 @@
-key.subkey = value
-# We can not override pre-defined subkeys with value
-key := value
diff --git a/tools/bootconfig/samples/bad-override2.bconf b/tools/bootconfig/samples/bad-override2.bconf
deleted file mode 100644
index 688587cb023c..000000000000
--- a/tools/bootconfig/samples/bad-override2.bconf
+++ /dev/null
@@ -1,3 +0,0 @@
-key = value
-# We can not override pre-defined value with subkey
-key.subkey := value
diff --git a/tools/bootconfig/samples/good-mixed-append.bconf b/tools/bootconfig/samples/good-mixed-append.bconf
new file mode 100644
index 000000000000..b99a089a05f5
--- /dev/null
+++ b/tools/bootconfig/samples/good-mixed-append.bconf
@@ -0,0 +1,4 @@
+key = foo
+keyx.subkey = value
+key += bar
+
diff --git a/tools/bootconfig/samples/bad-mixed-kv1.bconf b/tools/bootconfig/samples/good-mixed-kv1.bconf
index 1761547dd05c..1761547dd05c 100644
--- a/tools/bootconfig/samples/bad-mixed-kv1.bconf
+++ b/tools/bootconfig/samples/good-mixed-kv1.bconf
diff --git a/tools/bootconfig/samples/bad-mixed-kv2.bconf b/tools/bootconfig/samples/good-mixed-kv2.bconf
index 6b32e0c3878c..6b32e0c3878c 100644
--- a/tools/bootconfig/samples/bad-mixed-kv2.bconf
+++ b/tools/bootconfig/samples/good-mixed-kv2.bconf
diff --git a/tools/bootconfig/samples/good-mixed-kv3.bconf b/tools/bootconfig/samples/good-mixed-kv3.bconf
new file mode 100644
index 000000000000..2ce2b02224b8
--- /dev/null
+++ b/tools/bootconfig/samples/good-mixed-kv3.bconf
@@ -0,0 +1,6 @@
+# mixed key and subkeys with braces
+key = value
+key {
+ subkey1
+ subkey2 = foo
+}
diff --git a/tools/bootconfig/samples/good-mixed-override.bconf b/tools/bootconfig/samples/good-mixed-override.bconf
new file mode 100644
index 000000000000..18195b2873b6
--- /dev/null
+++ b/tools/bootconfig/samples/good-mixed-override.bconf
@@ -0,0 +1,4 @@
+key.foo = bar
+key = value
+# mixed key value can be overridden
+key := value2
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index 39bb322707b4..b11cfc86a3d0 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -97,7 +97,7 @@ clean: bpftool_clean runqslower_clean resolve_btfids_clean
$(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpf
$(Q)$(RM) -r -- $(OUTPUT)feature
-install: $(PROGS) bpftool_install runqslower_install
+install: $(PROGS) bpftool_install
$(call QUIET_INSTALL, bpf_jit_disasm)
$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/bin
$(Q)$(INSTALL) $(OUTPUT)bpf_jit_disasm $(DESTDIR)$(prefix)/bin/bpf_jit_disasm
@@ -118,9 +118,6 @@ bpftool_clean:
runqslower:
$(call descend,runqslower)
-runqslower_install:
- $(call descend,runqslower,install)
-
runqslower_clean:
$(call descend,runqslower,clean)
@@ -131,5 +128,5 @@ resolve_btfids_clean:
$(call descend,resolve_btfids,clean)
.PHONY: all install clean bpftool bpftool_install bpftool_clean \
- runqslower runqslower_install runqslower_clean \
+ runqslower runqslower_clean \
resolve_btfids resolve_btfids_clean
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 1828bba19020..dc6daa193557 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -222,6 +222,11 @@ int mount_bpffs_for_pin(const char *name)
int err = 0;
file = malloc(strlen(name) + 1);
+ if (!file) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+
strcpy(file, name);
dir = dirname(file);
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index e7e7eee9f172..24734f2249d6 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -43,11 +43,13 @@ static int fprintf_json(void *out, const char *fmt, ...)
{
va_list ap;
char *s;
+ int err;
va_start(ap, fmt);
- if (vasprintf(&s, fmt, ap) < 0)
- return -1;
+ err = vasprintf(&s, fmt, ap);
va_end(ap);
+ if (err < 0)
+ return -1;
if (!oper_count) {
int i;
diff --git a/tools/bpf/runqslower/runqslower.bpf.c b/tools/bpf/runqslower/runqslower.bpf.c
index 645530ca7e98..ab9353f2fd46 100644
--- a/tools/bpf/runqslower/runqslower.bpf.c
+++ b/tools/bpf/runqslower/runqslower.bpf.c
@@ -74,7 +74,7 @@ int handle__sched_switch(u64 *ctx)
u32 pid;
/* ivcsw: treat like an enqueue event and store timestamp */
- if (prev->state == TASK_RUNNING)
+ if (prev->__state == TASK_RUNNING)
trace_enqueue(prev);
pid = next->pid;
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 330dbf7509cc..9d959bc24859 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -18,6 +18,8 @@ int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
void bitmap_clear(unsigned long *map, unsigned int start, int len);
+int __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
@@ -170,4 +172,13 @@ static inline int bitmap_equal(const unsigned long *src1,
return __bitmap_equal(src1, src2, nbits);
}
+static inline int bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
+ else
+ return __bitmap_intersects(src1, src2, nbits);
+}
+
#endif /* _PERF_BITOPS_H */
diff --git a/tools/include/linux/kconfig.h b/tools/include/linux/kconfig.h
index 1555a0c4f345..13b86bd3b746 100644
--- a/tools/include/linux/kconfig.h
+++ b/tools/include/linux/kconfig.h
@@ -4,12 +4,6 @@
/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define __BIG_ENDIAN 4321
-#else
-#define __LITTLE_ENDIAN 1234
-#endif
-
#define __ARG_PLACEHOLDER_1 0,
#define __take_second_arg(__ignored, val, ...) val
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index f94f65d429be..1567a3294c3d 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -72,6 +72,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index d2a942086fcb..a9d6fcd95f42 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -863,7 +863,8 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
__SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
#define __NR_mount_setattr 442
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-/* 443 is reserved for quotactl_path */
+#define __NR_quotactl_fd 443
+__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
@@ -872,8 +873,13 @@ __SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule)
#define __NR_landlock_restrict_self 446
__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
+#ifdef __ARCH_WANT_MEMFD_SECRET
+#define __NR_memfd_secret 447
+__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
+#endif
+
#undef __NR_syscalls
-#define __NR_syscalls 447
+#define __NR_syscalls 448
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 67b94bc3c885..d043752a74cf 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -777,9 +777,12 @@ struct drm_get_cap {
/**
* DRM_CLIENT_CAP_STEREO_3D
*
- * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * If set to 1, the DRM core will expose the stereo 3D capabilities of the
* monitor by advertising the supported 3D layouts in the flags of struct
- * drm_mode_modeinfo.
+ * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 3.13.
*/
#define DRM_CLIENT_CAP_STEREO_3D 1
@@ -788,6 +791,9 @@ struct drm_get_cap {
*
* If set to 1, the DRM core will expose all planes (overlay, primary, and
* cursor) to userspace.
+ *
+ * This capability has been introduced in kernel version 3.15. Starting from
+ * kernel version 3.17, this capability is always supported for all drivers.
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
@@ -797,6 +803,13 @@ struct drm_get_cap {
* If set to 1, the DRM core will expose atomic properties to userspace. This
* implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
* &DRM_CLIENT_CAP_ASPECT_RATIO.
+ *
+ * If the driver doesn't support atomic mode-setting, enabling this capability
+ * will fail with -EOPNOTSUPP.
+ *
+ * This capability has been introduced in kernel version 4.0. Starting from
+ * kernel version 4.2, this capability is always supported for atomic-capable
+ * drivers.
*/
#define DRM_CLIENT_CAP_ATOMIC 3
@@ -804,6 +817,10 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_ASPECT_RATIO
*
* If set to 1, the DRM core will provide aspect ratio information in modes.
+ * See ``DRM_MODE_FLAG_PIC_AR_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 4.18.
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
@@ -811,8 +828,11 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
*
* If set to 1, the DRM core will expose special connectors to be used for
- * writing back to memory the scene setup in the commit. Depends on client
- * also supporting DRM_CLIENT_CAP_ATOMIC
+ * writing back to memory the scene setup in the commit. The client must enable
+ * &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * This capability is always supported for atomic-capable drivers starting from
+ * kernel version 4.19.
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index ddc47bbf48b6..c2c7759b7d2e 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -62,8 +62,8 @@ extern "C" {
#define I915_ERROR_UEVENT "ERROR"
#define I915_RESET_UEVENT "RESET"
-/*
- * i915_user_extension: Base class for defining a chain of extensions
+/**
+ * struct i915_user_extension - Base class for defining a chain of extensions
*
* Many interfaces need to grow over time. In most cases we can simply
* extend the struct and have userspace pass in more data. Another option,
@@ -76,12 +76,58 @@ extern "C" {
* increasing complexity, and for large parts of that interface to be
* entirely optional. The downside is more pointer chasing; chasing across
* the __user boundary with pointers encapsulated inside u64.
+ *
+ * Example chaining:
+ *
+ * .. code-block:: C
+ *
+ * struct i915_user_extension ext3 {
+ * .next_extension = 0, // end
+ * .name = ...,
+ * };
+ * struct i915_user_extension ext2 {
+ * .next_extension = (uintptr_t)&ext3,
+ * .name = ...,
+ * };
+ * struct i915_user_extension ext1 {
+ * .next_extension = (uintptr_t)&ext2,
+ * .name = ...,
+ * };
+ *
+ * Typically the struct i915_user_extension would be embedded in some uAPI
+ * struct, and in this case we would feed it the head of the chain(i.e ext1),
+ * which would then apply all of the above extensions.
+ *
*/
struct i915_user_extension {
+ /**
+ * @next_extension:
+ *
+ * Pointer to the next struct i915_user_extension, or zero if the end.
+ */
__u64 next_extension;
+ /**
+ * @name: Name of the extension.
+ *
+ * Note that the name here is just some integer.
+ *
+ * Also note that the name space for this is not global for the whole
+ * driver, but rather its scope/meaning is limited to the specific piece
+ * of uAPI which has embedded the struct i915_user_extension.
+ */
__u32 name;
- __u32 flags; /* All undefined bits must be zero. */
- __u32 rsvd[4]; /* Reserved for future use; must be zero. */
+ /**
+ * @flags: MBZ
+ *
+ * All undefined bits must be zero.
+ */
+ __u32 flags;
+ /**
+ * @rsvd: MBZ
+ *
+ * Reserved for future use; must be zero.
+ */
+ __u32 rsvd[4];
};
/*
@@ -360,6 +406,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_QUERY 0x39
#define DRM_I915_GEM_VM_CREATE 0x3a
#define DRM_I915_GEM_VM_DESTROY 0x3b
+#define DRM_I915_GEM_CREATE_EXT 0x3c
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -392,6 +439,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
@@ -1054,12 +1102,12 @@ struct drm_i915_gem_exec_fence {
__u32 flags;
};
-/**
+/*
* See drm_i915_gem_execbuffer_ext_timeline_fences.
*/
#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
-/**
+/*
* This structure describes an array of drm_syncobj and associated points for
* timeline variants of drm_syncobj. It is invalid to append this structure to
* the execbuf if I915_EXEC_FENCE_ARRAY is set.
@@ -1700,7 +1748,7 @@ struct drm_i915_gem_context_param {
__u64 value;
};
-/**
+/*
* Context SSEU programming
*
* It may be necessary for either functional or performance reason to configure
@@ -2067,7 +2115,7 @@ struct drm_i915_perf_open_param {
__u64 properties_ptr;
};
-/**
+/*
* Enable data capture for a stream that was either opened in a disabled state
* via I915_PERF_FLAG_DISABLED or was later disabled via
* I915_PERF_IOCTL_DISABLE.
@@ -2081,7 +2129,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
-/**
+/*
* Disable data capture for a stream.
*
* It is an error to try and read a stream that is disabled.
@@ -2090,7 +2138,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
-/**
+/*
* Change metrics_set captured by a stream.
*
* If the stream is bound to a specific context, the configuration change
@@ -2103,7 +2151,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
-/**
+/*
* Common to all i915 perf records
*/
struct drm_i915_perf_record_header {
@@ -2151,7 +2199,7 @@ enum drm_i915_perf_record_type {
DRM_I915_PERF_RECORD_MAX /* non-ABI */
};
-/**
+/*
* Structure to upload perf dynamic configuration into the kernel.
*/
struct drm_i915_perf_oa_config {
@@ -2172,53 +2220,95 @@ struct drm_i915_perf_oa_config {
__u64 flex_regs_ptr;
};
+/**
+ * struct drm_i915_query_item - An individual query for the kernel to process.
+ *
+ * The behaviour is determined by the @query_id. Note that exactly what
+ * @data_ptr is also depends on the specific @query_id.
+ */
struct drm_i915_query_item {
+ /** @query_id: The id for this query */
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
#define DRM_I915_QUERY_ENGINE_INFO 2
#define DRM_I915_QUERY_PERF_CONFIG 3
+#define DRM_I915_QUERY_MEMORY_REGIONS 4
/* Must be kept compact -- no holes and well documented */
- /*
+ /**
+ * @length:
+ *
* When set to zero by userspace, this is filled with the size of the
- * data to be written at the data_ptr pointer. The kernel sets this
+ * data to be written at the @data_ptr pointer. The kernel sets this
* value to a negative value to signal an error on a particular query
* item.
*/
__s32 length;
- /*
+ /**
+ * @flags:
+ *
* When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
*
* When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
- * following :
- * - DRM_I915_QUERY_PERF_CONFIG_LIST
- * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
- * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ * following:
+ *
+ * - DRM_I915_QUERY_PERF_CONFIG_LIST
+ * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+ * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
*/
__u32 flags;
#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
- /*
- * Data will be written at the location pointed by data_ptr when the
- * value of length matches the length of the data to be written by the
+ /**
+ * @data_ptr:
+ *
+ * Data will be written at the location pointed by @data_ptr when the
+ * value of @length matches the length of the data to be written by the
* kernel.
*/
__u64 data_ptr;
};
+/**
+ * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
+ * kernel to fill out.
+ *
+ * Note that this is generally a two step process for each struct
+ * drm_i915_query_item in the array:
+ *
+ * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
+ * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
+ * kernel will then fill in the size, in bytes, which tells userspace how
+ * memory it needs to allocate for the blob(say for an array of properties).
+ *
+ * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
+ * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
+ * the &drm_i915_query_item.length should still be the same as what the
+ * kernel previously set. At this point the kernel can fill in the blob.
+ *
+ * Note that for some query items it can make sense for userspace to just pass
+ * in a buffer/blob equal to or larger than the required size. In this case only
+ * a single ioctl call is needed. For some smaller query items this can work
+ * quite well.
+ *
+ */
struct drm_i915_query {
+ /** @num_items: The number of elements in the @items_ptr array */
__u32 num_items;
- /*
- * Unused for now. Must be cleared to zero.
+ /**
+ * @flags: Unused for now. Must be cleared to zero.
*/
__u32 flags;
- /*
- * This points to an array of num_items drm_i915_query_item structures.
+ /**
+ * @items_ptr:
+ *
+ * Pointer to an array of struct drm_i915_query_item. The number of
+ * array elements is @num_items.
*/
__u64 items_ptr;
};
@@ -2292,21 +2382,21 @@ struct drm_i915_query_topology_info {
* Describes one engine and it's capabilities as known to the driver.
*/
struct drm_i915_engine_info {
- /** Engine class and instance. */
+ /** @engine: Engine class and instance. */
struct i915_engine_class_instance engine;
- /** Reserved field. */
+ /** @rsvd0: Reserved field. */
__u32 rsvd0;
- /** Engine flags. */
+ /** @flags: Engine flags. */
__u64 flags;
- /** Capabilities of this engine. */
+ /** @capabilities: Capabilities of this engine. */
__u64 capabilities;
#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
- /** Reserved fields. */
+ /** @rsvd1: Reserved fields. */
__u64 rsvd1[4];
};
@@ -2317,13 +2407,13 @@ struct drm_i915_engine_info {
* an array of struct drm_i915_engine_info structures.
*/
struct drm_i915_query_engine_info {
- /** Number of struct drm_i915_engine_info structs following. */
+ /** @num_engines: Number of struct drm_i915_engine_info structs following. */
__u32 num_engines;
- /** MBZ */
+ /** @rsvd: MBZ */
__u32 rsvd[3];
- /** Marker for drm_i915_engine_info structures. */
+ /** @engines: Marker for drm_i915_engine_info structures. */
struct drm_i915_engine_info engines[];
};
@@ -2377,6 +2467,241 @@ struct drm_i915_query_perf_config {
__u8 data[];
};
+/**
+ * enum drm_i915_gem_memory_class - Supported memory classes
+ */
+enum drm_i915_gem_memory_class {
+ /** @I915_MEMORY_CLASS_SYSTEM: System memory */
+ I915_MEMORY_CLASS_SYSTEM = 0,
+ /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
+ I915_MEMORY_CLASS_DEVICE,
+};
+
+/**
+ * struct drm_i915_gem_memory_class_instance - Identify particular memory region
+ */
+struct drm_i915_gem_memory_class_instance {
+ /** @memory_class: See enum drm_i915_gem_memory_class */
+ __u16 memory_class;
+
+ /** @memory_instance: Which instance */
+ __u16 memory_instance;
+};
+
+/**
+ * struct drm_i915_memory_region_info - Describes one region as known to the
+ * driver.
+ *
+ * Note that we reserve some stuff here for potential future work. As an example
+ * we might want expose the capabilities for a given region, which could include
+ * things like if the region is CPU mappable/accessible, what are the supported
+ * mapping types etc.
+ *
+ * Note that to extend struct drm_i915_memory_region_info and struct
+ * drm_i915_query_memory_regions in the future the plan is to do the following:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_memory_region_info {
+ * struct drm_i915_gem_memory_class_instance region;
+ * union {
+ * __u32 rsvd0;
+ * __u32 new_thing1;
+ * };
+ * ...
+ * union {
+ * __u64 rsvd1[8];
+ * struct {
+ * __u64 new_thing2;
+ * __u64 new_thing3;
+ * ...
+ * };
+ * };
+ * };
+ *
+ * With this things should remain source compatible between versions for
+ * userspace, even as we add new fields.
+ *
+ * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
+ * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
+ * at &drm_i915_query_item.query_id.
+ */
+struct drm_i915_memory_region_info {
+ /** @region: The class:instance pair encoding */
+ struct drm_i915_gem_memory_class_instance region;
+
+ /** @rsvd0: MBZ */
+ __u32 rsvd0;
+
+ /** @probed_size: Memory probed by the driver (-1 = unknown) */
+ __u64 probed_size;
+
+ /** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
+ __u64 unallocated_size;
+
+ /** @rsvd1: MBZ */
+ __u64 rsvd1[8];
+};
+
+/**
+ * struct drm_i915_query_memory_regions
+ *
+ * The region info query enumerates all regions known to the driver by filling
+ * in an array of struct drm_i915_memory_region_info structures.
+ *
+ * Example for getting the list of supported regions:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_query_memory_regions *info;
+ * struct drm_i915_query_item item = {
+ * .query_id = DRM_I915_QUERY_MEMORY_REGIONS;
+ * };
+ * struct drm_i915_query query = {
+ * .num_items = 1,
+ * .items_ptr = (uintptr_t)&item,
+ * };
+ * int err, i;
+ *
+ * // First query the size of the blob we need, this needs to be large
+ * // enough to hold our array of regions. The kernel will fill out the
+ * // item.length for us, which is the number of bytes we need.
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * info = calloc(1, item.length);
+ * // Now that we allocated the required number of bytes, we call the ioctl
+ * // again, this time with the data_ptr pointing to our newly allocated
+ * // blob, which the kernel can then populate with the all the region info.
+ * item.data_ptr = (uintptr_t)&info,
+ *
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * // We can now access each region in the array
+ * for (i = 0; i < info->num_regions; i++) {
+ * struct drm_i915_memory_region_info mr = info->regions[i];
+ * u16 class = mr.region.class;
+ * u16 instance = mr.region.instance;
+ *
+ * ....
+ * }
+ *
+ * free(info);
+ */
+struct drm_i915_query_memory_regions {
+ /** @num_regions: Number of supported regions */
+ __u32 num_regions;
+
+ /** @rsvd: MBZ */
+ __u32 rsvd[3];
+
+ /** @regions: Info about each supported region */
+ struct drm_i915_memory_region_info regions[];
+};
+
+/**
+ * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
+ * extension support using struct i915_user_extension.
+ *
+ * Note that in the future we want to have our buffer flags here, at least for
+ * the stuff that is immutable. Previously we would have two ioctls, one to
+ * create the object with gem_create, and another to apply various parameters,
+ * however this creates some ambiguity for the params which are considered
+ * immutable. Also in general we're phasing out the various SET/GET ioctls.
+ */
+struct drm_i915_gem_create_ext {
+ /**
+ * @size: Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ *
+ * Note that for some devices we have might have further minimum
+ * page-size restrictions(larger than 4K), like for device local-memory.
+ * However in general the final size here should always reflect any
+ * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
+ * extension to place the object in device local-memory.
+ */
+ __u64 size;
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+ /** @flags: MBZ */
+ __u32 flags;
+ /**
+ * @extensions: The chain of extensions to apply to this object.
+ *
+ * This will be useful in the future when we need to support several
+ * different extensions, and we need to apply more than one when
+ * creating the object. See struct i915_user_extension.
+ *
+ * If we don't supply any extensions then we get the same old gem_create
+ * behaviour.
+ *
+ * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
+ * struct drm_i915_gem_create_ext_memory_regions.
+ */
+#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
+ __u64 extensions;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_memory_regions - The
+ * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
+ *
+ * Set the object with the desired set of placements/regions in priority
+ * order. Each entry must be unique and supported by the device.
+ *
+ * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
+ * an equivalent layout of class:instance pair encodings. See struct
+ * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
+ * query the supported regions for a device.
+ *
+ * As an example, on discrete devices, if we wish to set the placement as
+ * device local-memory we can do something like:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_memory_class_instance region_lmem = {
+ * .memory_class = I915_MEMORY_CLASS_DEVICE,
+ * .memory_instance = 0,
+ * };
+ * struct drm_i915_gem_create_ext_memory_regions regions = {
+ * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
+ * .regions = (uintptr_t)&region_lmem,
+ * .num_regions = 1,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = 16 * PAGE_SIZE,
+ * .extensions = (uintptr_t)&regions,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ *
+ * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
+ * along with the final object size in &drm_i915_gem_create_ext.size, which
+ * should account for any rounding up, if required.
+ */
+struct drm_i915_gem_create_ext_memory_regions {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @num_regions: Number of elements in the @regions array. */
+ __u32 num_regions;
+ /**
+ * @regions: The regions/placements array.
+ *
+ * An array of struct drm_i915_gem_memory_class_instance.
+ */
+ __u64 regions;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 79d9c44d1ad7..d9e4aabcb31a 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -280,6 +280,9 @@ struct kvm_xen_exit {
/* Encounter unexpected vm-exit reason */
#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4
+/* Flags that describe what fields in emulation_failure hold valid data. */
+#define KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES (1ULL << 0)
+
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
/* in */
@@ -383,6 +386,25 @@ struct kvm_run {
__u32 ndata;
__u64 data[16];
} internal;
+ /*
+ * KVM_INTERNAL_ERROR_EMULATION
+ *
+ * "struct emulation_failure" is an overlay of "struct internal"
+ * that is used for the KVM_INTERNAL_ERROR_EMULATION sub-type of
+ * KVM_EXIT_INTERNAL_ERROR. Note, unlike other internal error
+ * sub-types, this struct is ABI! It also needs to be backwards
+ * compatible with "struct internal". Take special care that
+ * "ndata" is correct, that new fields are enumerated in "flags",
+ * and that each flag enumerates fields that are 64-bit aligned
+ * and sized (so that ndata+internal.data[] is valid/accurate).
+ */
+ struct {
+ __u32 suberror;
+ __u32 ndata;
+ __u64 flags;
+ __u8 insn_size;
+ __u8 insn_bytes[15];
+ } emulation_failure;
/* KVM_EXIT_OSI */
struct {
__u64 gprs[32];
@@ -1083,6 +1105,13 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_SGX_ATTRIBUTE 196
#define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197
#define KVM_CAP_PTP_KVM 198
+#define KVM_CAP_HYPERV_ENFORCE_CPUID 199
+#define KVM_CAP_SREGS2 200
+#define KVM_CAP_EXIT_HYPERCALL 201
+#define KVM_CAP_PPC_RPT_INVALIDATE 202
+#define KVM_CAP_BINARY_STATS_FD 203
+#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
+#define KVM_CAP_ARM_MTE 205
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1428,6 +1457,7 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_PMU_EVENT_FILTER */
#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3)
+#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
@@ -1621,6 +1651,9 @@ struct kvm_xen_hvm_attr {
#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
+#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
+#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)
+
struct kvm_xen_vcpu_attr {
__u16 type;
__u16 pad[3];
@@ -1899,4 +1932,76 @@ struct kvm_dirty_gfn {
#define KVM_BUS_LOCK_DETECTION_OFF (1 << 0)
#define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1)
+/**
+ * struct kvm_stats_header - Header of per vm/vcpu binary statistics data.
+ * @flags: Some extra information for header, always 0 for now.
+ * @name_size: The size in bytes of the memory which contains statistics
+ * name string including trailing '\0'. The memory is allocated
+ * at the send of statistics descriptor.
+ * @num_desc: The number of statistics the vm or vcpu has.
+ * @id_offset: The offset of the vm/vcpu stats' id string in the file pointed
+ * by vm/vcpu stats fd.
+ * @desc_offset: The offset of the vm/vcpu stats' descriptor block in the file
+ * pointd by vm/vcpu stats fd.
+ * @data_offset: The offset of the vm/vcpu stats' data block in the file
+ * pointed by vm/vcpu stats fd.
+ *
+ * This is the header userspace needs to read from stats fd before any other
+ * readings. It is used by userspace to discover all the information about the
+ * vm/vcpu's binary statistics.
+ * Userspace reads this header from the start of the vm/vcpu's stats fd.
+ */
+struct kvm_stats_header {
+ __u32 flags;
+ __u32 name_size;
+ __u32 num_desc;
+ __u32 id_offset;
+ __u32 desc_offset;
+ __u32 data_offset;
+};
+
+#define KVM_STATS_TYPE_SHIFT 0
+#define KVM_STATS_TYPE_MASK (0xF << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_PEAK
+
+#define KVM_STATS_UNIT_SHIFT 4
+#define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_NONE (0x0 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES
+
+#define KVM_STATS_BASE_SHIFT 8
+#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_POW10 (0x0 << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_POW2 (0x1 << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_MAX KVM_STATS_BASE_POW2
+
+/**
+ * struct kvm_stats_desc - Descriptor of a KVM statistics.
+ * @flags: Annotations of the stats, like type, unit, etc.
+ * @exponent: Used together with @flags to determine the unit.
+ * @size: The number of data items for this stats.
+ * Every data item is of type __u64.
+ * @offset: The offset of the stats to the start of stat structure in
+ * struture kvm or kvm_vcpu.
+ * @unused: Unused field for future usage. Always 0 for now.
+ * @name: The name string for the stats. Its size is indicated by the
+ * &kvm_stats_header->name_size.
+ */
+struct kvm_stats_desc {
+ __u32 flags;
+ __s16 exponent;
+ __u16 size;
+ __u32 offset;
+ __u32 unused;
+ char name[];
+};
+
+#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+
#endif /* __LINUX_KVM_H */
diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h
index e6524ead2b7b..dd7a166fdf9c 100644
--- a/tools/include/uapi/linux/mount.h
+++ b/tools/include/uapi/linux/mount.h
@@ -120,6 +120,7 @@ enum fsconfig_command {
#define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */
#define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */
#define MOUNT_ATTR_IDMAP 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */
+#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000 /* Do not follow symlinks */
/*
* mount_setattr()
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index 535a7229e1d9..d17c061950df 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -710,7 +710,7 @@ enum {
* Raw MIDI section - /dev/snd/midi??
*/
-#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 1)
+#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 2)
enum {
SNDRV_RAWMIDI_STREAM_OUTPUT = 0,
@@ -736,12 +736,38 @@ struct snd_rawmidi_info {
unsigned char reserved[64]; /* reserved for future use */
};
+#define SNDRV_RAWMIDI_MODE_FRAMING_MASK (7<<0)
+#define SNDRV_RAWMIDI_MODE_FRAMING_SHIFT 0
+#define SNDRV_RAWMIDI_MODE_FRAMING_NONE (0<<0)
+#define SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP (1<<0)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MASK (7<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_SHIFT 3
+#define SNDRV_RAWMIDI_MODE_CLOCK_NONE (0<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_REALTIME (1<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC (2<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC_RAW (3<<3)
+
+#define SNDRV_RAWMIDI_FRAMING_DATA_LENGTH 16
+
+struct snd_rawmidi_framing_tstamp {
+ /* For now, frame_type is always 0. Midi 2.0 is expected to add new
+ * types here. Applications are expected to skip unknown frame types.
+ */
+ __u8 frame_type;
+ __u8 length; /* number of valid bytes in data field */
+ __u8 reserved[2];
+ __u32 tv_nsec; /* nanoseconds */
+ __u64 tv_sec; /* seconds */
+ __u8 data[SNDRV_RAWMIDI_FRAMING_DATA_LENGTH];
+} __packed;
+
struct snd_rawmidi_params {
int stream;
size_t buffer_size; /* queue size in bytes */
size_t avail_min; /* minimum avail bytes for wakeup */
unsigned int no_active_sensing: 1; /* do not send active sensing byte in close() */
- unsigned char reserved[16]; /* reserved for future use */
+ unsigned int mode; /* For input data only, frame incoming data */
+ unsigned char reserved[12]; /* reserved for future use */
};
#ifndef __KERNEL__
diff --git a/tools/io_uring/io_uring-cp.c b/tools/io_uring/io_uring-cp.c
index 81461813ec62..d9bd6f5f8f46 100644
--- a/tools/io_uring/io_uring-cp.c
+++ b/tools/io_uring/io_uring-cp.c
@@ -131,8 +131,7 @@ static int copy_file(struct io_uring *ring, off_t insize)
writes = reads = offset = 0;
while (insize || write_left) {
- unsigned long had_reads;
- int got_comp;
+ int had_reads, got_comp;
/*
* Queue up as many reads as we can
@@ -174,8 +173,13 @@ static int copy_file(struct io_uring *ring, off_t insize)
if (!got_comp) {
ret = io_uring_wait_cqe(ring, &cqe);
got_comp = 1;
- } else
+ } else {
ret = io_uring_peek_cqe(ring, &cqe);
+ if (ret == -EAGAIN) {
+ cqe = NULL;
+ ret = 0;
+ }
+ }
if (ret < 0) {
fprintf(stderr, "io_uring_peek_cqe: %s\n",
strerror(-ret));
@@ -194,7 +198,7 @@ static int copy_file(struct io_uring *ring, off_t insize)
fprintf(stderr, "cqe failed: %s\n",
strerror(-cqe->res));
return 1;
- } else if ((size_t) cqe->res != data->iov.iov_len) {
+ } else if (cqe->res != data->iov.iov_len) {
/* Short read/write, adjust and requeue */
data->iov.iov_base += cqe->res;
data->iov.iov_len -= cqe->res;
@@ -221,6 +225,25 @@ static int copy_file(struct io_uring *ring, off_t insize)
}
}
+ /* wait out pending writes */
+ while (writes) {
+ struct io_data *data;
+
+ ret = io_uring_wait_cqe(ring, &cqe);
+ if (ret) {
+ fprintf(stderr, "wait_cqe=%d\n", ret);
+ return 1;
+ }
+ if (cqe->res < 0) {
+ fprintf(stderr, "write res=%d\n", cqe->res);
+ return 1;
+ }
+ data = io_uring_cqe_get_data(cqe);
+ free(data);
+ writes--;
+ io_uring_cqe_seen(ring, cqe);
+ }
+
return 0;
}
diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c
index f4e914712b6f..db466ef7be9d 100644
--- a/tools/lib/bitmap.c
+++ b/tools/lib/bitmap.c
@@ -86,3 +86,17 @@ int __bitmap_equal(const unsigned long *bitmap1,
return 1;
}
+
+int __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ unsigned int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (bitmap1[k] & bitmap2[k])
+ return 1;
+
+ if (bits % BITS_PER_LONG)
+ if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+ return 1;
+ return 0;
+}
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index b46760b93bb4..7ff3d5ce44f9 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -804,6 +804,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
btf->nr_types = 0;
btf->start_id = 1;
btf->start_str_off = 0;
+ btf->fd = -1;
if (base_btf) {
btf->base_btf = base_btf;
@@ -832,8 +833,6 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
if (err)
goto done;
- btf->fd = -1;
-
done:
if (err) {
btf__free(btf);
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 1e04ce724240..6f5e2757bb3c 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -10136,7 +10136,7 @@ int bpf_link__unpin(struct bpf_link *link)
err = unlink(link->pin_path);
if (err != 0)
- return libbpf_err_errno(err);
+ return -errno;
pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
zfree(&link->pin_path);
@@ -11197,7 +11197,7 @@ int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
if (cnt < 0)
- return libbpf_err_errno(cnt);
+ return -errno;
for (i = 0; i < cnt; i++) {
struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index ecaae2927ab8..cd8c703dde71 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -75,6 +75,9 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
break;
+ case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+ xattr.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
+ break;
case BPF_PROG_TYPE_SK_LOOKUP:
xattr.expected_attach_type = BPF_SK_LOOKUP;
break;
@@ -104,7 +107,6 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
case BPF_PROG_TYPE_SK_REUSEPORT:
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_CGROUP_SYSCTL:
- case BPF_PROG_TYPE_CGROUP_SOCKOPT:
case BPF_PROG_TYPE_TRACING:
case BPF_PROG_TYPE_STRUCT_OPS:
case BPF_PROG_TYPE_EXT:
diff --git a/tools/lib/perf/Build b/tools/lib/perf/Build
index 2ef9a4ec6d99..e8f5b7fb9973 100644
--- a/tools/lib/perf/Build
+++ b/tools/lib/perf/Build
@@ -11,3 +11,5 @@ libperf-y += lib.o
$(OUTPUT)zalloc.o: ../../lib/zalloc.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
+
+tests-y += tests/
diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile
index 3718d65cffac..08fe6e3c4089 100644
--- a/tools/lib/perf/Makefile
+++ b/tools/lib/perf/Makefile
@@ -52,6 +52,8 @@ else
Q = @
endif
+TEST_ARGS := $(if $(V),-v)
+
# Set compile option CFLAGS
ifdef EXTRA_CFLAGS
CFLAGS := $(EXTRA_CFLAGS)
@@ -136,12 +138,30 @@ all: fixdep
clean: $(LIBAPI)-clean
$(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \
- *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd LIBPERF-CFLAGS $(LIBPERF_PC)
- $(Q)$(MAKE) -C tests clean
+ *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd tests/*.o LIBPERF-CFLAGS $(LIBPERF_PC) \
+ $(TESTS_STATIC) $(TESTS_SHARED)
+
+TESTS_IN = tests-in.o
+
+TESTS_STATIC = $(OUTPUT)tests-static
+TESTS_SHARED = $(OUTPUT)tests-shared
+
+$(TESTS_IN): FORCE
+ $(Q)$(MAKE) $(build)=tests
+
+$(TESTS_STATIC): $(TESTS_IN) $(LIBPERF_A) $(LIBAPI)
+ $(QUIET_LINK)$(CC) -o $@ $^
+
+$(TESTS_SHARED): $(TESTS_IN) $(LIBAPI)
+ $(QUIET_LINK)$(CC) -o $@ -L$(if $(OUTPUT),$(OUTPUT),.) $^ -lperf
+
+make-tests: libs $(TESTS_SHARED) $(TESTS_STATIC)
-tests: libs
- $(Q)$(MAKE) -C tests
- $(Q)$(MAKE) -C tests run
+tests: make-tests
+ @echo "running static:"
+ @./$(TESTS_STATIC) $(TEST_ARGS)
+ @echo "running dynamic:"
+ @LD_LIBRARY_PATH=. ./$(TESTS_SHARED) $(TEST_ARGS)
$(LIBPERF_PC):
$(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index a0aaf385cbb5..e37dfad31383 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -66,6 +66,7 @@ static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
void perf_evlist__add(struct perf_evlist *evlist,
struct perf_evsel *evsel)
{
+ evsel->idx = evlist->nr_entries;
list_add_tail(&evsel->node, &evlist->entries);
evlist->nr_entries += 1;
__perf_evlist__propagate_maps(evlist, evsel);
@@ -641,3 +642,24 @@ perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
}
+
+void __perf_evlist__set_leader(struct list_head *list)
+{
+ struct perf_evsel *evsel, *leader;
+
+ leader = list_entry(list->next, struct perf_evsel, node);
+ evsel = list_entry(list->prev, struct perf_evsel, node);
+
+ leader->nr_members = evsel->idx - leader->idx + 1;
+
+ __perf_evlist__for_each_entry(list, evsel)
+ evsel->leader = leader;
+}
+
+void perf_evlist__set_leader(struct perf_evlist *evlist)
+{
+ if (evlist->nr_entries) {
+ evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
+ __perf_evlist__set_leader(&evlist->entries);
+ }
+}
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
index bd8c2f19ef74..d8886720e83d 100644
--- a/tools/lib/perf/evsel.c
+++ b/tools/lib/perf/evsel.c
@@ -17,11 +17,15 @@
#include <linux/string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
+#include <asm/bug.h>
-void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr)
+void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
+ int idx)
{
INIT_LIST_HEAD(&evsel->node);
evsel->attr = *attr;
+ evsel->idx = idx;
+ evsel->leader = evsel;
}
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
@@ -29,7 +33,7 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
if (evsel != NULL)
- perf_evsel__init(evsel, attr);
+ perf_evsel__init(evsel, attr, 0);
return evsel;
}
@@ -73,6 +77,32 @@ sys_perf_event_open(struct perf_event_attr *attr,
return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
}
+static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
+{
+ struct perf_evsel *leader = evsel->leader;
+ int fd;
+
+ if (evsel == leader) {
+ *group_fd = -1;
+ return 0;
+ }
+
+ /*
+ * Leader must be already processed/open,
+ * if not it's a bug.
+ */
+ if (!leader->fd)
+ return -ENOTCONN;
+
+ fd = FD(leader, cpu, thread);
+ if (fd == -1)
+ return -EBADF;
+
+ *group_fd = fd;
+
+ return 0;
+}
+
int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
@@ -108,11 +138,15 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
for (cpu = 0; cpu < cpus->nr; cpu++) {
for (thread = 0; thread < threads->nr; thread++) {
- int fd;
+ int fd, group_fd;
+
+ err = get_group_fd(evsel, cpu, thread, &group_fd);
+ if (err < 0)
+ return err;
fd = sys_perf_event_open(&evsel->attr,
threads->map[thread].pid,
- cpus->map[cpu], -1, 0);
+ cpus->map[cpu], group_fd, 0);
if (fd < 0)
return -errno;
diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h
index 212c29063ad4..f366dbad6a88 100644
--- a/tools/lib/perf/include/internal/evlist.h
+++ b/tools/lib/perf/include/internal/evlist.h
@@ -16,6 +16,7 @@ struct perf_mmap_param;
struct perf_evlist {
struct list_head entries;
int nr_entries;
+ int nr_groups;
bool has_user_cpus;
struct perf_cpu_map *cpus;
struct perf_cpu_map *all_cpus;
@@ -126,4 +127,5 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
void perf_evlist__reset_id_hash(struct perf_evlist *evlist);
+void __perf_evlist__set_leader(struct list_head *list);
#endif /* __LIBPERF_INTERNAL_EVLIST_H */
diff --git a/tools/lib/perf/include/internal/evsel.h b/tools/lib/perf/include/internal/evsel.h
index 1c067d088bc6..1f3eacbad2e8 100644
--- a/tools/lib/perf/include/internal/evsel.h
+++ b/tools/lib/perf/include/internal/evsel.h
@@ -45,13 +45,16 @@ struct perf_evsel {
struct xyarray *sample_id;
u64 *id;
u32 ids;
+ struct perf_evsel *leader;
/* parse modifier helper */
int nr_members;
bool system_wide;
+ int idx;
};
-void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr);
+void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
+ int idx);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
void perf_evsel__close_fd(struct perf_evsel *evsel);
void perf_evsel__free_fd(struct perf_evsel *evsel);
diff --git a/tools/lib/perf/include/internal/tests.h b/tools/lib/perf/include/internal/tests.h
index 29425c2dabe1..61052099225b 100644
--- a/tools/lib/perf/include/internal/tests.h
+++ b/tools/lib/perf/include/internal/tests.h
@@ -5,8 +5,8 @@
#include <stdio.h>
#include <unistd.h>
-int tests_failed;
-int tests_verbose;
+extern int tests_failed;
+extern int tests_verbose;
static inline int get_verbose(char **argv, int argc)
{
diff --git a/tools/lib/perf/include/perf/evlist.h b/tools/lib/perf/include/perf/evlist.h
index 0a7479dc13bf..9ca399d49bb4 100644
--- a/tools/lib/perf/include/perf/evlist.h
+++ b/tools/lib/perf/include/perf/evlist.h
@@ -46,4 +46,5 @@ LIBPERF_API struct perf_mmap *perf_evlist__next_mmap(struct perf_evlist *evlist,
(pos) != NULL; \
(pos) = perf_evlist__next_mmap((evlist), (pos), overwrite))
+LIBPERF_API void perf_evlist__set_leader(struct perf_evlist *evlist);
#endif /* __LIBPERF_EVLIST_H */
diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map
index c0c7ceb11060..71468606e8a7 100644
--- a/tools/lib/perf/libperf.map
+++ b/tools/lib/perf/libperf.map
@@ -45,6 +45,7 @@ LIBPERF_0.0.1 {
perf_evlist__munmap;
perf_evlist__filter_pollfd;
perf_evlist__next_mmap;
+ perf_evlist__set_leader;
perf_mmap__consume;
perf_mmap__read_init;
perf_mmap__read_done;
diff --git a/tools/lib/perf/tests/Build b/tools/lib/perf/tests/Build
new file mode 100644
index 000000000000..56e81378d443
--- /dev/null
+++ b/tools/lib/perf/tests/Build
@@ -0,0 +1,5 @@
+tests-y += main.o
+tests-y += test-evsel.o
+tests-y += test-evlist.o
+tests-y += test-cpumap.o
+tests-y += test-threadmap.o
diff --git a/tools/lib/perf/tests/Makefile b/tools/lib/perf/tests/Makefile
deleted file mode 100644
index b536cc9a26dd..000000000000
--- a/tools/lib/perf/tests/Makefile
+++ /dev/null
@@ -1,40 +0,0 @@
-# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-
-TESTS = test-cpumap test-threadmap test-evlist test-evsel
-
-TESTS_SO := $(addsuffix -so,$(TESTS))
-TESTS_A := $(addsuffix -a,$(TESTS))
-
-TEST_ARGS := $(if $(V),-v)
-
-# Set compile option CFLAGS
-ifdef EXTRA_CFLAGS
- CFLAGS := $(EXTRA_CFLAGS)
-else
- CFLAGS := -g -Wall
-endif
-
-all:
-
-include $(srctree)/tools/scripts/Makefile.include
-
-INCLUDE = -I$(srctree)/tools/lib/perf/include -I$(srctree)/tools/include -I$(srctree)/tools/lib
-
-$(TESTS_A): FORCE
- $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -o $@ $(subst -a,.c,$@) ../libperf.a $(LIBAPI)
-
-$(TESTS_SO): FORCE
- $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -L.. -o $@ $(subst -so,.c,$@) $(LIBAPI) -lperf
-
-all: $(TESTS_A) $(TESTS_SO)
-
-run:
- @echo "running static:"
- @for i in $(TESTS_A); do ./$$i $(TEST_ARGS); done
- @echo "running dynamic:"
- @for i in $(TESTS_SO); do LD_LIBRARY_PATH=../ ./$$i $(TEST_ARGS); done
-
-clean:
- $(call QUIET_CLEAN, tests)$(RM) $(TESTS_A) $(TESTS_SO)
-
-.PHONY: all clean FORCE
diff --git a/tools/lib/perf/tests/main.c b/tools/lib/perf/tests/main.c
new file mode 100644
index 000000000000..56423fd4db19
--- /dev/null
+++ b/tools/lib/perf/tests/main.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <internal/tests.h>
+#include "tests.h"
+
+int tests_failed;
+int tests_verbose;
+
+int main(int argc, char **argv)
+{
+ __T("test cpumap", !test_cpumap(argc, argv));
+ __T("test threadmap", !test_threadmap(argc, argv));
+ __T("test evlist", !test_evlist(argc, argv));
+ __T("test evsel", !test_evsel(argc, argv));
+ return 0;
+}
diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c
index c70e9e03af3e..d39378eaf897 100644
--- a/tools/lib/perf/tests/test-cpumap.c
+++ b/tools/lib/perf/tests/test-cpumap.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include <perf/cpumap.h>
#include <internal/tests.h>
+#include "tests.h"
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -10,7 +11,7 @@ static int libperf_print(enum libperf_print_level level,
return vfprintf(stderr, fmt, ap);
}
-int main(int argc, char **argv)
+int test_cpumap(int argc, char **argv)
{
struct perf_cpu_map *cpus;
diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
index e2ac0b7f432e..c67c83399170 100644
--- a/tools/lib/perf/tests/test-evlist.c
+++ b/tools/lib/perf/tests/test-evlist.c
@@ -18,6 +18,8 @@
#include <perf/event.h>
#include <internal/tests.h>
#include <api/fs/fs.h>
+#include "tests.h"
+#include <internal/evsel.h>
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -29,7 +31,7 @@ static int test_stat_cpu(void)
{
struct perf_cpu_map *cpus;
struct perf_evlist *evlist;
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel, *leader;
struct perf_event_attr attr1 = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
@@ -46,7 +48,7 @@ static int test_stat_cpu(void)
evlist = perf_evlist__new();
__T("failed to create evlist", evlist);
- evsel = perf_evsel__new(&attr1);
+ evsel = leader = perf_evsel__new(&attr1);
__T("failed to create evsel1", evsel);
perf_evlist__add(evlist, evsel);
@@ -56,6 +58,10 @@ static int test_stat_cpu(void)
perf_evlist__add(evlist, evsel);
+ perf_evlist__set_leader(evlist);
+ __T("failed to set leader", leader->leader == leader);
+ __T("failed to set leader", evsel->leader == leader);
+
perf_evlist__set_maps(evlist, cpus, NULL);
err = perf_evlist__open(evlist);
@@ -84,7 +90,7 @@ static int test_stat_thread(void)
struct perf_counts_values counts = { .val = 0 };
struct perf_thread_map *threads;
struct perf_evlist *evlist;
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel, *leader;
struct perf_event_attr attr1 = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
@@ -103,7 +109,7 @@ static int test_stat_thread(void)
evlist = perf_evlist__new();
__T("failed to create evlist", evlist);
- evsel = perf_evsel__new(&attr1);
+ evsel = leader = perf_evsel__new(&attr1);
__T("failed to create evsel1", evsel);
perf_evlist__add(evlist, evsel);
@@ -113,6 +119,10 @@ static int test_stat_thread(void)
perf_evlist__add(evlist, evsel);
+ perf_evlist__set_leader(evlist);
+ __T("failed to set leader", leader->leader == leader);
+ __T("failed to set leader", evsel->leader == leader);
+
perf_evlist__set_maps(evlist, NULL, threads);
err = perf_evlist__open(evlist);
@@ -135,7 +145,7 @@ static int test_stat_thread_enable(void)
struct perf_counts_values counts = { .val = 0 };
struct perf_thread_map *threads;
struct perf_evlist *evlist;
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel, *leader;
struct perf_event_attr attr1 = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
@@ -156,7 +166,7 @@ static int test_stat_thread_enable(void)
evlist = perf_evlist__new();
__T("failed to create evlist", evlist);
- evsel = perf_evsel__new(&attr1);
+ evsel = leader = perf_evsel__new(&attr1);
__T("failed to create evsel1", evsel);
perf_evlist__add(evlist, evsel);
@@ -166,6 +176,10 @@ static int test_stat_thread_enable(void)
perf_evlist__add(evlist, evsel);
+ perf_evlist__set_leader(evlist);
+ __T("failed to set leader", leader->leader == leader);
+ __T("failed to set leader", evsel->leader == leader);
+
perf_evlist__set_maps(evlist, NULL, threads);
err = perf_evlist__open(evlist);
@@ -253,6 +267,7 @@ static int test_mmap_thread(void)
evsel = perf_evsel__new(&attr);
__T("failed to create evsel1", evsel);
+ __T("failed to set leader", evsel->leader == evsel);
perf_evlist__add(evlist, evsel);
@@ -338,6 +353,7 @@ static int test_mmap_cpus(void)
evsel = perf_evsel__new(&attr);
__T("failed to create evsel1", evsel);
+ __T("failed to set leader", evsel->leader == evsel);
perf_evlist__add(evlist, evsel);
@@ -397,7 +413,7 @@ static int test_mmap_cpus(void)
return 0;
}
-int main(int argc, char **argv)
+int test_evlist(int argc, char **argv)
{
__T_START;
diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
index 288b5feaefe2..a184e4861627 100644
--- a/tools/lib/perf/tests/test-evsel.c
+++ b/tools/lib/perf/tests/test-evsel.c
@@ -6,6 +6,7 @@
#include <perf/threadmap.h>
#include <perf/evsel.h>
#include <internal/tests.h>
+#include "tests.h"
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -184,7 +185,7 @@ static int test_stat_user_read(int event)
return 0;
}
-int main(int argc, char **argv)
+int test_evsel(int argc, char **argv)
{
__T_START;
diff --git a/tools/lib/perf/tests/test-threadmap.c b/tools/lib/perf/tests/test-threadmap.c
index 384471441b48..5e2a0291e94c 100644
--- a/tools/lib/perf/tests/test-threadmap.c
+++ b/tools/lib/perf/tests/test-threadmap.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include <perf/threadmap.h>
#include <internal/tests.h>
+#include "tests.h"
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -10,7 +11,7 @@ static int libperf_print(enum libperf_print_level level,
return vfprintf(stderr, fmt, ap);
}
-int main(int argc, char **argv)
+int test_threadmap(int argc, char **argv)
{
struct perf_thread_map *threads;
diff --git a/tools/lib/perf/tests/tests.h b/tools/lib/perf/tests/tests.h
new file mode 100644
index 000000000000..604838f21b2b
--- /dev/null
+++ b/tools/lib/perf/tests/tests.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef TESTS_H
+#define TESTS_H
+
+int test_cpumap(int argc, char **argv);
+int test_threadmap(int argc, char **argv);
+int test_evlist(int argc, char **argv);
+int test_evsel(int argc, char **argv);
+
+#endif /* TESTS_H */
diff --git a/tools/memory-model/Documentation/explanation.txt b/tools/memory-model/Documentation/explanation.txt
index f9d610d5a1a4..5d72f3112e56 100644
--- a/tools/memory-model/Documentation/explanation.txt
+++ b/tools/memory-model/Documentation/explanation.txt
@@ -2510,7 +2510,7 @@ they behave as follows:
smp_mb__after_atomic() orders po-earlier atomic updates and
the events preceding them against all po-later events;
- smp_mb_after_spinlock() orders po-earlier lock acquisition
+ smp_mb__after_spinlock() orders po-earlier lock acquisition
events and the events preceding them against all po-later
events.
diff --git a/tools/perf/Documentation/itrace.txt b/tools/perf/Documentation/itrace.txt
index 0f1005209a2b..2d586fe5e4c5 100644
--- a/tools/perf/Documentation/itrace.txt
+++ b/tools/perf/Documentation/itrace.txt
@@ -20,6 +20,7 @@
L synthesize last branch entries on existing event records
s skip initial number of events
q quicker (less detailed) decoding
+ Z prefer to ignore timestamps (so-called "timeless" decoding)
The default is all events i.e. the same as --itrace=ibxwpe,
except for perf script where it is --itrace=ce
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index 80c1be5d566c..33c2521cba4a 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -58,6 +58,13 @@ OPTIONS
--ignore-vmlinux::
Ignore vmlinux files.
+--itrace::
+ Options for decoding instruction tracing data. The options are:
+
+include::itrace.txt[]
+
+ To disable decoding entirely, use --no-itrace.
+
-m::
--modules::
Load module symbols. WARNING: use only with -k and LIVE kernel.
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index b0872c801866..3bb75c1f25e8 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -706,6 +706,12 @@ intel-pt.*::
If set, Intel PT decoder will set the mispred flag on all
branches.
+ intel-pt.max-loops::
+ If set and non-zero, the maximum number of unconditional
+ branches decoded without consuming any trace packets. If
+ the maximum is exceeded there will be a "Never-ending loop"
+ error. The default is 100000.
+
auxtrace.*::
auxtrace.dumpdir::
diff --git a/tools/perf/Documentation/perf-dlfilter.txt b/tools/perf/Documentation/perf-dlfilter.txt
new file mode 100644
index 000000000000..02842cb4cf90
--- /dev/null
+++ b/tools/perf/Documentation/perf-dlfilter.txt
@@ -0,0 +1,251 @@
+perf-dlfilter(1)
+================
+
+NAME
+----
+perf-dlfilter - Filter sample events using a dynamically loaded shared
+object file
+
+SYNOPSIS
+--------
+[verse]
+'perf script' [--dlfilter file.so ] [ --dlarg arg ]...
+
+DESCRIPTION
+-----------
+
+This option is used to process data through a custom filter provided by a
+dynamically loaded shared object file. Arguments can be passed using --dlarg
+and retrieved using perf_dlfilter_fns.args().
+
+If 'file.so' does not contain "/", then it will be found either in the current
+directory, or perf tools exec path which is ~/libexec/perf-core/dlfilters for
+a local build and install (refer perf --exec-path), or the dynamic linker
+paths.
+
+API
+---
+
+The API for filtering consists of the following:
+
+[source,c]
+----
+#include <perf/perf_dlfilter.h>
+
+const struct perf_dlfilter_fns perf_dlfilter_fns;
+
+int start(void **data, void *ctx);
+int stop(void *data, void *ctx);
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx);
+int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx);
+const char *filter_description(const char **long_description);
+----
+
+If implemented, 'start' will be called at the beginning, before any
+calls to 'filter_event' or 'filter_event_early'. Return 0 to indicate success,
+or return a negative error code. '*data' can be assigned for use by other
+functions. 'ctx' is needed for calls to perf_dlfilter_fns, but most
+perf_dlfilter_fns are not valid when called from 'start'.
+
+If implemented, 'stop' will be called at the end, after any calls to
+'filter_event' or 'filter_event_early'. Return 0 to indicate success, or
+return a negative error code. 'data' is set by 'start'. 'ctx' is needed
+for calls to perf_dlfilter_fns, but most perf_dlfilter_fns are not valid
+when called from 'stop'.
+
+If implemented, 'filter_event' will be called for each sample event.
+Return 0 to keep the sample event, 1 to filter it out, or return a negative
+error code. 'data' is set by 'start'. 'ctx' is needed for calls to
+'perf_dlfilter_fns'.
+
+'filter_event_early' is the same as 'filter_event' except it is called before
+internal filtering.
+
+If implemented, 'filter_description' should return a one-line description
+of the filter, and optionally a longer description.
+
+The perf_dlfilter_sample structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+'filter_event' and 'filter_event_early' are passed a perf_dlfilter_sample
+structure, which contains the following fields:
+[source,c]
+----
+/*
+ * perf sample event information (as per perf script and <linux/perf_event.h>)
+ */
+struct perf_dlfilter_sample {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u16 ins_lat; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u16 p_stage_cyc; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 ip;
+ __s32 pid;
+ __s32 tid;
+ __u64 time;
+ __u64 addr;
+ __u64 id;
+ __u64 stream_id;
+ __u64 period;
+ __u64 weight; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 transaction; /* Refer PERF_SAMPLE_TRANSACTION in <linux/perf_event.h> */
+ __u64 insn_cnt; /* For instructions-per-cycle (IPC) */
+ __u64 cyc_cnt; /* For instructions-per-cycle (IPC) */
+ __s32 cpu;
+ __u32 flags; /* Refer PERF_DLFILTER_FLAG_* above */
+ __u64 data_src; /* Refer PERF_SAMPLE_DATA_SRC in <linux/perf_event.h> */
+ __u64 phys_addr; /* Refer PERF_SAMPLE_PHYS_ADDR in <linux/perf_event.h> */
+ __u64 data_page_size; /* Refer PERF_SAMPLE_DATA_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 code_page_size; /* Refer PERF_SAMPLE_CODE_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 cgroup; /* Refer PERF_SAMPLE_CGROUP in <linux/perf_event.h> */
+ __u8 cpumode; /* Refer CPUMODE_MASK etc in <linux/perf_event.h> */
+ __u8 addr_correlates_sym; /* True => resolve_addr() can be called */
+ __u16 misc; /* Refer perf_event_header in <linux/perf_event.h> */
+ __u32 raw_size; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ const void *raw_data; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ __u64 brstack_nr; /* Number of brstack entries */
+ const struct perf_branch_entry *brstack; /* Refer <linux/perf_event.h> */
+ __u64 raw_callchain_nr; /* Number of raw_callchain entries */
+ const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
+ const char *event;
+};
+----
+
+The perf_dlfilter_fns structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'perf_dlfilter_fns' structure is populated with function pointers when the
+file is loaded. The functions can be called by 'filter_event' or
+'filter_event_early'.
+
+[source,c]
+----
+struct perf_dlfilter_fns {
+ const struct perf_dlfilter_al *(*resolve_ip)(void *ctx);
+ const struct perf_dlfilter_al *(*resolve_addr)(void *ctx);
+ char **(*args)(void *ctx, int *dlargc);
+ __s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
+ const __u8 *(*insn)(void *ctx, __u32 *length);
+ const char *(*srcline)(void *ctx, __u32 *line_number);
+ struct perf_event_attr *(*attr)(void *ctx);
+ __s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
+ void *(*reserved[120])(void *);
+};
+----
+
+'resolve_ip' returns information about ip.
+
+'resolve_addr' returns information about addr (if addr_correlates_sym).
+
+'args' returns arguments from --dlarg options.
+
+'resolve_address' provides information about 'address'. al->size must be set
+before calling. Returns 0 on success, -1 otherwise.
+
+'insn' returns instruction bytes and length.
+
+'srcline' return source file name and line number.
+
+'attr' returns perf_event_attr, refer <linux/perf_event.h>.
+
+'object_code' reads object code and returns the number of bytes read.
+
+The perf_dlfilter_al structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'perf_dlfilter_al' structure contains information about an address.
+
+[source,c]
+----
+/*
+ * Address location (as per perf script)
+ */
+struct perf_dlfilter_al {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u32 symoff;
+ const char *sym;
+ __u64 addr; /* Mapped address (from dso) */
+ __u64 sym_start;
+ __u64 sym_end;
+ const char *dso;
+ __u8 sym_binding; /* STB_LOCAL, STB_GLOBAL or STB_WEAK, refer <elf.h> */
+ __u8 is_64_bit; /* Only valid if dso is not NULL */
+ __u8 is_kernel_ip; /* True if in kernel space */
+ __u32 buildid_size;
+ __u8 *buildid;
+ /* Below members are only populated by resolve_ip() */
+ __u8 filtered; /* true if this sample event will be filtered out */
+ const char *comm;
+};
+----
+
+perf_dlfilter_sample flags
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'flags' member of 'perf_dlfilter_sample' corresponds with the flags field
+of perf script. The bits of the flags are as follows:
+
+[source,c]
+----
+/* Definitions for perf_dlfilter_sample flags */
+enum {
+ PERF_DLFILTER_FLAG_BRANCH = 1ULL << 0,
+ PERF_DLFILTER_FLAG_CALL = 1ULL << 1,
+ PERF_DLFILTER_FLAG_RETURN = 1ULL << 2,
+ PERF_DLFILTER_FLAG_CONDITIONAL = 1ULL << 3,
+ PERF_DLFILTER_FLAG_SYSCALLRET = 1ULL << 4,
+ PERF_DLFILTER_FLAG_ASYNC = 1ULL << 5,
+ PERF_DLFILTER_FLAG_INTERRUPT = 1ULL << 6,
+ PERF_DLFILTER_FLAG_TX_ABORT = 1ULL << 7,
+ PERF_DLFILTER_FLAG_TRACE_BEGIN = 1ULL << 8,
+ PERF_DLFILTER_FLAG_TRACE_END = 1ULL << 9,
+ PERF_DLFILTER_FLAG_IN_TX = 1ULL << 10,
+ PERF_DLFILTER_FLAG_VMENTRY = 1ULL << 11,
+ PERF_DLFILTER_FLAG_VMEXIT = 1ULL << 12,
+};
+----
+
+EXAMPLE
+-------
+
+Filter out everything except branches from "foo" to "bar":
+
+[source,c]
+----
+#include <perf/perf_dlfilter.h>
+#include <string.h>
+
+const struct perf_dlfilter_fns perf_dlfilter_fns;
+
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
+{
+ const struct perf_dlfilter_al *al;
+ const struct perf_dlfilter_al *addr_al;
+
+ if (!sample->ip || !sample->addr_correlates_sym)
+ return 1;
+
+ al = perf_dlfilter_fns.resolve_ip(ctx);
+ if (!al || !al->sym || strcmp(al->sym, "foo"))
+ return 1;
+
+ addr_al = perf_dlfilter_fns.resolve_addr(ctx);
+ if (!addr_al || !addr_al->sym || strcmp(addr_al->sym, "bar"))
+ return 1;
+
+ return 0;
+}
+----
+
+To build the shared object, assuming perf has been installed for the local user
+i.e. perf_dlfilter.h is in ~/include/perf :
+
+ gcc -c -I ~/include -fpic dlfilter-example.c
+ gcc -shared -o dlfilter-example.so dlfilter-example.o
+
+To use the filter with perf script:
+
+ perf script --dlfilter dlfilter-example.so
+
+SEE ALSO
+--------
+linkperf:perf-script[1]
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
index a8eccff21281..91108fe3ad5f 100644
--- a/tools/perf/Documentation/perf-inject.txt
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -68,6 +68,16 @@ include::itrace.txt[]
--force::
Don't complain, do it.
+--vm-time-correlation[=OPTIONS]::
+ Some architectures may capture AUX area data which contains timestamps
+ affected by virtualization. This option will update those timestamps
+ in place, to correlate with host timestamps. The in-place update means
+ that an output file is not specified, and instead the input file is
+ modified. The options are architecture specific, except that they may
+ start with "dry-run" which will cause the file to be processed but
+ without updating it. Currently this option is supported only by
+ Intel PT, refer linkperf:perf-intel-pt[1]
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1],
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index bcf3eca5afbe..184ba62420f0 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -174,7 +174,11 @@ Refer to script export-to-sqlite.py or export-to-postgresql.py for more details,
and to script exported-sql-viewer.py for an example of using the database.
There is also script intel-pt-events.py which provides an example of how to
-unpack the raw data for power events and PTWRITE.
+unpack the raw data for power events and PTWRITE. The script also displays
+branches, and supports 2 additional modes selected by option:
+
+ --insn-trace - instruction trace
+ --src-trace - source trace
As mentioned above, it is easy to capture too much data. One way to limit the
data captured is to use 'snapshot' mode which is explained further below.
@@ -869,6 +873,7 @@ The letters are:
L synthesize last branch entries on existing event records
s skip initial number of events
q quicker (less detailed) decoding
+ Z prefer to ignore timestamps (so-called "timeless" decoding)
"Instructions" events look like they were recorded by "perf record -e
instructions".
@@ -1062,6 +1067,10 @@ What *will* be decoded with the qq option:
- instruction pointer associated with PSB packets
+The Z option is equivalent to having recorded a trace without TSC
+(i.e. config term tsc=0). It can be useful to avoid timestamp issues when
+decoding a trace of a virtual machine.
+
dump option
~~~~~~~~~~~
@@ -1150,8 +1159,9 @@ include::build-xed.txt[]
Tracing Virtual Machines
------------------------
-Currently, only kernel tracing is supported and only with "timeless" decoding
-i.e. no TSC timestamps
+Currently, only kernel tracing is supported and only with either "timeless" decoding
+(i.e. no TSC timestamps) or VM Time Correlation. VM Time Correlation is an extra step
+using 'perf inject' and requires unchanging VMX TSC Offset and no VMX TSC Scaling.
Other limitations and caveats
@@ -1162,7 +1172,7 @@ Other limitations and caveats
Guest VCPU is unknown but may be able to be inferred from the host thread
Callchains are not supported
-Example
+Example using "timeless" decoding
Start VM
@@ -1226,6 +1236,107 @@ perf script can be used to provide an instruction trace
:1440 1440 ffffffffbb74603c clockevents_program_event+0x4c ([guest.kernel.kallsyms]) popq %rbx
:1440 1440 ffffffffbb74603d clockevents_program_event+0x4d ([guest.kernel.kallsyms]) popq %r12
+Example using VM Time Correlation
+
+Start VM
+
+ $ sudo virsh start kubuntu20.04
+ Domain kubuntu20.04 started
+
+Mount the guest file system. Note sshfs needs -o direct_io to enable reading of proc files. root access is needed to read /proc/kcore.
+
+ $ mkdir -p vm0
+ $ sshfs -o direct_io root@vm0:/ vm0
+
+Copy the guest /proc/kallsyms, /proc/modules and /proc/kcore
+
+ $ perf buildid-cache -v --kcore vm0/proc/kcore
+ same kcore found in /home/user/.debug/[kernel.kcore]/cc9c55a98c5e4ec0aeda69302554aabed5cd6491/2021021312450777
+ $ KALLSYMS=/home/user/.debug/\[kernel.kcore\]/cc9c55a98c5e4ec0aeda69302554aabed5cd6491/2021021312450777/kallsyms
+
+Find the VM process
+
+ $ ps -eLl | grep 'KVM\|PID'
+ F S UID PID PPID LWP C PRI NI ADDR SZ WCHAN TTY TIME CMD
+ 3 S 64055 16998 1 17005 13 80 0 - 1818189 - ? 00:00:16 CPU 0/KVM
+ 3 S 64055 16998 1 17006 4 80 0 - 1818189 - ? 00:00:05 CPU 1/KVM
+ 3 S 64055 16998 1 17007 3 80 0 - 1818189 - ? 00:00:04 CPU 2/KVM
+ 3 S 64055 16998 1 17008 4 80 0 - 1818189 - ? 00:00:05 CPU 3/KVM
+
+Start an open-ended perf record, tracing the VM process, do something on the VM, and then ctrl-C to stop.
+IPC can be determined, hence cyc=1 can be added.
+Only kernel decoding is supported, so 'k' must be specified.
+Intel PT traces both the host and the guest so --guest and --host need to be specified.
+
+ $ sudo perf kvm --guest --host --guestkallsyms $KALLSYMS record --kcore -e intel_pt/cyc=1/k -p 16998
+ ^C[ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 9.041 MB perf.data.kvm ]
+
+Now 'perf inject' can be used to determine the VMX TCS Offset. Note, Intel PT TSC packets are
+only 7-bytes, so the TSC Offset might differ from the actual value in the 8th byte. That will
+have no effect i.e. the resulting timestamps will be correct anyway.
+
+ $ perf inject -i perf.data.kvm --vm-time-correlation=dry-run
+ ERROR: Unknown TSC Offset for VMCS 0x1bff6a
+ VMCS: 0x1bff6a TSC Offset 0xffffe42722c64c41
+ ERROR: Unknown TSC Offset for VMCS 0x1cbc08
+ VMCS: 0x1cbc08 TSC Offset 0xffffe42722c64c41
+ ERROR: Unknown TSC Offset for VMCS 0x1c3ce8
+ VMCS: 0x1c3ce8 TSC Offset 0xffffe42722c64c41
+ ERROR: Unknown TSC Offset for VMCS 0x1cbce9
+ VMCS: 0x1cbce9 TSC Offset 0xffffe42722c64c41
+
+Each virtual CPU has a different Virtual Machine Control Structure (VMCS)
+shown above with the calculated TSC Offset. For an unchanging TSC Offset
+they should all be the same for the same virtual machine.
+
+Now that the TSC Offset is known, it can be provided to 'perf inject'
+
+ $ perf inject -i perf.data.kvm --vm-time-correlation="dry-run 0xffffe42722c64c41"
+
+Note the options for 'perf inject' --vm-time-correlation are:
+
+ [ dry-run ] [ <TSC Offset> [ : <VMCS> [ , <VMCS> ]... ] ]...
+
+So it is possible to specify different TSC Offsets for different VMCS.
+The option "dry-run" will cause the file to be processed but without updating it.
+Note it is also possible to get a intel_pt.log file by adding option --itrace=d
+
+There were no errors so, do it for real
+
+ $ perf inject -i perf.data.kvm --vm-time-correlation=0xffffe42722c64c41 --force
+
+'perf script' can be used to see if there are any decoder errors
+
+ $ perf script -i perf.data.kvm --guestkallsyms $KALLSYMS --itrace=e-o
+
+There were none.
+
+'perf script' can be used to provide an instruction trace showing timestamps
+
+ $ perf script -i perf.data.kvm --guestkallsyms $KALLSYMS --insn-trace --xed -F+ipc | grep -C10 vmresume | head -21
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cdd __vmx_vcpu_run+0x3d ([kernel.kallsyms]) movq 0x48(%rax), %r9
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ce1 __vmx_vcpu_run+0x41 ([kernel.kallsyms]) movq 0x50(%rax), %r10
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ce5 __vmx_vcpu_run+0x45 ([kernel.kallsyms]) movq 0x58(%rax), %r11
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ce9 __vmx_vcpu_run+0x49 ([kernel.kallsyms]) movq 0x60(%rax), %r12
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ced __vmx_vcpu_run+0x4d ([kernel.kallsyms]) movq 0x68(%rax), %r13
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cf1 __vmx_vcpu_run+0x51 ([kernel.kallsyms]) movq 0x70(%rax), %r14
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cf5 __vmx_vcpu_run+0x55 ([kernel.kallsyms]) movq 0x78(%rax), %r15
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cf9 __vmx_vcpu_run+0x59 ([kernel.kallsyms]) movq (%rax), %rax
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cfc __vmx_vcpu_run+0x5c ([kernel.kallsyms]) callq 0xffffffff82133c40
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133c40 vmx_vmenter+0x0 ([kernel.kallsyms]) jz 0xffffffff82133c46
+ CPU 1/KVM 17006 [001] 11500.262866075: ffffffff82133c42 vmx_vmenter+0x2 ([kernel.kallsyms]) vmresume IPC: 0.05 (40/769)
+ :17006 17006 [001] 11500.262869216: ffffffff82200cb0 asm_sysvec_apic_timer_interrupt+0x0 ([guest.kernel.kallsyms]) clac
+ :17006 17006 [001] 11500.262869216: ffffffff82200cb3 asm_sysvec_apic_timer_interrupt+0x3 ([guest.kernel.kallsyms]) pushq $0xffffffffffffffff
+ :17006 17006 [001] 11500.262869216: ffffffff82200cb5 asm_sysvec_apic_timer_interrupt+0x5 ([guest.kernel.kallsyms]) callq 0xffffffff82201160
+ :17006 17006 [001] 11500.262869216: ffffffff82201160 error_entry+0x0 ([guest.kernel.kallsyms]) cld
+ :17006 17006 [001] 11500.262869216: ffffffff82201161 error_entry+0x1 ([guest.kernel.kallsyms]) pushq %rsi
+ :17006 17006 [001] 11500.262869216: ffffffff82201162 error_entry+0x2 ([guest.kernel.kallsyms]) movq 0x8(%rsp), %rsi
+ :17006 17006 [001] 11500.262869216: ffffffff82201167 error_entry+0x7 ([guest.kernel.kallsyms]) movq %rdi, 0x8(%rsp)
+ :17006 17006 [001] 11500.262869216: ffffffff8220116c error_entry+0xc ([guest.kernel.kallsyms]) pushq %rdx
+ :17006 17006 [001] 11500.262869216: ffffffff8220116d error_entry+0xd ([guest.kernel.kallsyms]) pushq %rcx
+ :17006 17006 [001] 11500.262869216: ffffffff8220116e error_entry+0xe ([guest.kernel.kallsyms]) pushq %rax
+
SEE ALSO
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index ed3ecfa422e1..080981d38d7b 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -226,7 +226,7 @@ So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And
LAZY MATCHING
-------------
- The lazy line matching is similar to glob matching but ignoring spaces in both of pattern and target. So this accepts wildcards('*', '?') and character classes(e.g. [a-z], [!A-Z]).
+The lazy line matching is similar to glob matching but ignoring spaces in both of pattern and target. So this accepts wildcards('*', '?') and character classes(e.g. [a-z], [!A-Z]).
e.g.
'a=*' can matches 'a=b', 'a = b', 'a == b' and so on.
@@ -235,8 +235,8 @@ This provides some sort of flexibility and robustness to probe point definitions
FILTER PATTERN
--------------
- The filter pattern is a glob matching pattern(s) to filter variables.
- In addition, you can use "!" for specifying filter-out rule. You also can give several rules combined with "&" or "|", and fold those rules as one rule by using "(" ")".
+The filter pattern is a glob matching pattern(s) to filter variables.
+In addition, you can use "!" for specifying filter-out rule. You also can give several rules combined with "&" or "|", and fold those rules as one rule by using "(" ")".
e.g.
With --filter "foo* | bar*", perf probe -V shows variables which start with "foo" or "bar".
@@ -295,6 +295,19 @@ Add a probe in a source file using special characters by backslash escape
./perf probe -x /opt/test/a.out 'foo\+bar.c:4'
+PERMISSIONS AND SYSCTL
+----------------------
+Since perf probe depends on ftrace (tracefs) and kallsyms (/proc/kallsyms), you have to care about the permission and some sysctl knobs.
+
+ - Since tracefs and kallsyms requires root or privileged user to access it, the following perf probe commands also require it; --add, --del, --list (except for --cache option)
+
+ - The system admin can remount the tracefs with 755 (`sudo mount -o remount,mode=755 /sys/kernel/tracing/`) to allow unprivileged user to run the perf probe --list command.
+
+ - /proc/sys/kernel/kptr_restrict = 2 (restrict all users) also prevents perf probe to retrieve the important information from kallsyms. You also need to set to 1 (restrict non CAP_SYSLOG users) for the above commands. Since the user-space probe doesn't need to access kallsyms, this is only for probing the kernel function (kprobes).
+
+ - Since the perf probe commands read the vmlinux (for kernel) and/or the debuginfo file (including user-space application), you need to ensure that you can read those files.
+
+
SEE ALSO
--------
linkperf:perf-trace[1], linkperf:perf-record[1], linkperf:perf-buildid-cache[1]
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 0fb9eda3cbca..5e43cfa5ea1e 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -550,6 +550,27 @@ def trace_unhandled(event_name, context, event_fields_dict):
pass
----
+*process_event*, if defined, is called for any non-tracepoint event
+
+----
+def process_event(param_dict):
+ pass
+----
+
+*context_switch*, if defined, is called for any context switch
+
+----
+def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x):
+ pass
+----
+
+*auxtrace_error*, if defined, is called for any AUX area tracing error
+
+----
+def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
+ pass
+----
+
The remaining sections provide descriptions of each of the available
built-in perf script Python modules and their associated functions.
@@ -592,12 +613,18 @@ common, but need to be made accessible to user scripts nonetheless.
perf_trace_context defines a set of functions that can be used to
access this data in the context of the current event. Each of these
functions expects a context variable, which is the same as the
-context variable passed into every event handler as the second
-argument.
+context variable passed into every tracepoint event handler as the second
+argument. For non-tracepoint events, the context variable is also present
+as perf_trace_context.perf_script_context .
common_pc(context) - returns common_preempt count for the current event
common_flags(context) - returns common_flags for the current event
common_lock_depth(context) - returns common_lock_depth for the current event
+ perf_sample_insn(context) - returns the machine code instruction
+ perf_set_itrace_options(context, itrace_options) - set --itrace options if they have not been set already
+ perf_sample_srcline(context) - returns source_file_name, line_number
+ perf_sample_srccode(context) - returns source_file_name, line_number, source_line
+
Util.py Module
~~~~~~~~~~~~~~
@@ -616,9 +643,20 @@ SUPPORTED FIELDS
Currently supported fields:
ev_name, comm, pid, tid, cpu, ip, time, period, phys_addr, addr,
-symbol, dso, time_enabled, time_running, values, callchain,
+symbol, symoff, dso, time_enabled, time_running, values, callchain,
brstack, brstacksym, datasrc, datasrc_decode, iregs, uregs,
-weight, transaction, raw_buf, attr.
+weight, transaction, raw_buf, attr, cpumode.
+
+Fields that may also be present:
+
+ flags - sample flags
+ flags_disp - sample flags display
+ insn_cnt - instruction count for determining instructions-per-cycle (IPC)
+ cyc_cnt - cycle count for determining IPC
+ addr_correlates_sym - addr can correlate to a symbol
+ addr_dso - addr dso
+ addr_symbol - addr symbol
+ addr_symoff - addr symbol offset
Some fields have sub items:
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 48a5f5b26dd4..aa3a0b2c29a2 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -98,6 +98,18 @@ OPTIONS
Generate perf-script.[ext] starter script for given language,
using current perf.data.
+--dlfilter=<file>::
+ Filter sample events using the given shared object file.
+ Refer linkperf:perf-dlfilter[1]
+
+--dlarg=<arg>::
+ Pass 'arg' as an argument to the dlfilter. --dlarg may be repeated
+ to add more arguments.
+
+--list-dlfilters=::
+ Display a list of available dlfilters. Use with option -v (must come
+ before option --list-dlfilters) to show long descriptions.
+
-a::
Force system-wide collection. Scripts run without a <command>
normally use -a by default, while scripts run with a <command>
@@ -483,4 +495,5 @@ include::itrace.txt[]
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script-perl[1],
-linkperf:perf-script-python[1], linkperf:perf-intel-pt[1]
+linkperf:perf-script-python[1], linkperf:perf-intel-pt[1],
+linkperf:perf-dlfilter[1]
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index bba5ffb05463..9898a32b8d9c 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -277,6 +277,18 @@ Default is to monitor all CPUS.
Record events of type PERF_RECORD_NAMESPACES and display it with the
'cgroup_id' sort key.
+-G name::
+--cgroup name::
+monitor only in the container (cgroup) called "name". This option is available only
+in per-cpu mode. The cgroup filesystem must be mounted. All threads belonging to
+container "name" are monitored when they run on the monitored CPUs. Multiple cgroups
+can be provided. Each cgroup is applied to the corresponding event, i.e., first cgroup
+to first event, second cgroup to second event and so on. It is possible to provide
+an empty cgroup (monitor all the time) using, e.g., -G foo,,bar. Cgroups must have
+corresponding events, i.e., they always refer to events defined earlier on the command
+line. If the user wants to track multiple events for a specific cgroup, the user can
+use '-e e1 -e e2 -G foo,foo' or just use '-e e1 -e e2 -G foo'.
+
--all-cgroups::
Record events of type PERF_RECORD_CGROUP and display it with the
'cgroup' sort key.
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index 9ee96640744e..e6ff8c898ada 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -402,6 +402,39 @@ struct {
u64 clockid_time_ns;
};
+ HEADER_HYBRID_TOPOLOGY = 30,
+
+Indicate the hybrid CPUs. The format of data is as below.
+
+struct {
+ u32 nr;
+ struct {
+ char pmu_name[];
+ char cpus[];
+ } [nr]; /* Variable length records */
+};
+
+Example:
+ hybrid cpu system:
+ cpu_core cpu list : 0-15
+ cpu_atom cpu list : 16-23
+
+ HEADER_HYBRID_CPU_PMU_CAPS = 31,
+
+ A list of hybrid CPU PMU capabilities.
+
+struct {
+ u32 nr_pmu;
+ struct {
+ u32 nr_cpu_pmu_caps;
+ {
+ char name[];
+ char value[];
+ } [nr_cpu_pmu_caps];
+ char pmu_name[];
+ } [nr_pmu];
+};
+
other bits are reserved and should ignored for now
HEADER_FEAT_BITS = 256,
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 73df23dd664c..eb8e487ef90b 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -201,6 +201,12 @@ ifeq ($(call get-executable,$(BISON)),)
dummy := $(error Error: $(BISON) is missing on this system, please install it)
endif
+ifneq ($(OUTPUT),)
+ ifeq ($(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \>\= 371), 1)
+ BISON_FILE_PREFIX_MAP := --file-prefix-map=$(OUTPUT)=
+ endif
+endif
+
# Treat warnings as errors unless directed not to
ifneq ($(WERROR),0)
CORE_CFLAGS += -Werror
@@ -634,7 +640,7 @@ endif
ifdef BUILD_BPF_SKEL
$(call feature_check,clang-bpf-co-re)
ifeq ($(feature-clang-bpf-co-re), 0)
- dummy := $(error Error: clang too old. Please install recent clang)
+ dummy := $(error Error: clang too old/not installed. Please install recent clang to build with BUILD_BPF_SKEL)
endif
$(call detected,CONFIG_PERF_BPF_SKEL)
CFLAGS += -DHAVE_BPF_SKEL
@@ -1111,6 +1117,8 @@ prefix ?= $(HOME)
endif
bindir_relative = bin
bindir = $(abspath $(prefix)/$(bindir_relative))
+includedir_relative = include
+includedir = $(abspath $(prefix)/$(includedir_relative))
mandir = share/man
infodir = share/info
perfexecdir = libexec/perf-core
@@ -1143,6 +1151,7 @@ ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG))
STRACE_GROUPS_DIR_SQ = $(subst ','\'',$(STRACE_GROUPS_DIR))
DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
bindir_SQ = $(subst ','\'',$(bindir))
+includedir_SQ = $(subst ','\'',$(includedir))
mandir_SQ = $(subst ','\'',$(mandir))
infodir_SQ = $(subst ','\'',$(infodir))
perfexecdir_SQ = $(subst ','\'',$(perfexecdir))
@@ -1227,6 +1236,9 @@ $(call detected_var,LIBDIR)
$(call detected_var,GTK_CFLAGS)
$(call detected_var,PERL_EMBED_CCOPTS)
$(call detected_var,PYTHON_EMBED_CCOPTS)
+ifneq ($(BISON_FILE_PREFIX_MAP),)
+$(call detected_var,BISON_FILE_PREFIX_MAP)
+endif
# re-generate FEATURE-DUMP as we may have called feature_check, found out
# extra libraries to add to LDFLAGS of some other test and then redo those
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index e47f04e5b51e..77e7f18c0bd0 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -923,7 +923,9 @@ install-tools: all install-gtk
$(call QUIET_INSTALL, binaries) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'; \
$(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'; \
- $(LN) '$(DESTDIR_SQ)$(bindir_SQ)/perf' '$(DESTDIR_SQ)$(bindir_SQ)/trace'
+ $(LN) '$(DESTDIR_SQ)$(bindir_SQ)/perf' '$(DESTDIR_SQ)$(bindir_SQ)/trace'; \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(includedir_SQ)/perf'; \
+ $(INSTALL) util/perf_dlfilter.h -t '$(DESTDIR_SQ)$(includedir_SQ)/perf'
ifndef NO_PERF_READ_VDSO32
$(call QUIET_INSTALL, perf-read-vdso32) \
$(INSTALL) $(OUTPUT)perf-read-vdso32 '$(DESTDIR_SQ)$(bindir_SQ)';
@@ -1015,6 +1017,7 @@ SKEL_OUT := $(abspath $(OUTPUT)util/bpf_skel)
SKEL_TMP_OUT := $(abspath $(SKEL_OUT)/.tmp)
SKELETONS := $(SKEL_OUT)/bpf_prog_profiler.skel.h
SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h
+SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h
ifdef BUILD_BPF_SKEL
BPFTOOL := $(SKEL_TMP_OUT)/bootstrap/bpftool
@@ -1028,7 +1031,21 @@ $(BPFTOOL): | $(SKEL_TMP_OUT)
CFLAGS= $(MAKE) -C ../bpf/bpftool \
OUTPUT=$(SKEL_TMP_OUT)/ bootstrap
-$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) | $(SKEL_TMP_OUT)
+VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../vmlinux \
+ /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+
+$(SKEL_OUT)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL)
+ifeq ($(VMLINUX_H),)
+ $(QUIET_GEN)$(BPFTOOL) btf dump file $< format c > $@
+else
+ $(Q)cp "$(VMLINUX_H)" $@
+endif
+
+$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) $(SKEL_OUT)/vmlinux.h | $(SKEL_TMP_OUT)
$(QUIET_CLANG)$(CLANG) -g -O2 -target bpf -Wall -Werror $(BPF_INCLUDE) \
-c $(filter util/bpf_skel/%.bpf.c,$^) -o $@ && $(LLVM_STRIP) -g $@
diff --git a/tools/perf/arch/arm/include/arch-tests.h b/tools/perf/arch/arm/include/arch-tests.h
index 90ec4c8cb880..c62538052404 100644
--- a/tools/perf/arch/arm/include/arch-tests.h
+++ b/tools/perf/arch/arm/include/arch-tests.h
@@ -2,11 +2,6 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-struct thread;
-struct perf_sample;
-#endif
-
extern struct test arch_tests[];
#endif
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index d942f118d32c..85168d87b2d7 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -38,8 +38,6 @@ struct cs_etm_recording {
struct auxtrace_record itr;
struct perf_pmu *cs_etm_pmu;
struct evlist *evlist;
- int wrapped_cnt;
- bool *wrapped;
bool snapshot_mode;
size_t snapshot_size;
};
@@ -734,135 +732,6 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
return 0;
}
-static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
-{
- bool *wrapped;
- int cnt = ptr->wrapped_cnt;
-
- /* Make @ptr->wrapped as big as @idx */
- while (cnt <= idx)
- cnt++;
-
- /*
- * Free'ed in cs_etm_recording_free(). Using realloc() to avoid
- * cross compilation problems where the host's system supports
- * reallocarray() but not the target.
- */
- wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
- if (!wrapped)
- return -ENOMEM;
-
- wrapped[cnt - 1] = false;
- ptr->wrapped_cnt = cnt;
- ptr->wrapped = wrapped;
-
- return 0;
-}
-
-static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
- size_t buffer_size, u64 head)
-{
- u64 i, watermark;
- u64 *buf = (u64 *)buffer;
- size_t buf_size = buffer_size;
-
- /*
- * We want to look the very last 512 byte (chosen arbitrarily) in
- * the ring buffer.
- */
- watermark = buf_size - 512;
-
- /*
- * @head is continuously increasing - if its value is equal or greater
- * than the size of the ring buffer, it has wrapped around.
- */
- if (head >= buffer_size)
- return true;
-
- /*
- * The value of @head is somewhere within the size of the ring buffer.
- * This can be that there hasn't been enough data to fill the ring
- * buffer yet or the trace time was so long that @head has numerically
- * wrapped around. To find we need to check if we have data at the very
- * end of the ring buffer. We can reliably do this because mmap'ed
- * pages are zeroed out and there is a fresh mapping with every new
- * session.
- */
-
- /* @head is less than 512 byte from the end of the ring buffer */
- if (head > watermark)
- watermark = head;
-
- /*
- * Speed things up by using 64 bit transactions (see "u64 *buf" above)
- */
- watermark >>= 3;
- buf_size >>= 3;
-
- /*
- * If we find trace data at the end of the ring buffer, @head has
- * been there and has numerically wrapped around at least once.
- */
- for (i = watermark; i < buf_size; i++)
- if (buf[i])
- return true;
-
- return false;
-}
-
-static int cs_etm_find_snapshot(struct auxtrace_record *itr,
- int idx, struct auxtrace_mmap *mm,
- unsigned char *data,
- u64 *head, u64 *old)
-{
- int err;
- bool wrapped;
- struct cs_etm_recording *ptr =
- container_of(itr, struct cs_etm_recording, itr);
-
- /*
- * Allocate memory to keep track of wrapping if this is the first
- * time we deal with this *mm.
- */
- if (idx >= ptr->wrapped_cnt) {
- err = cs_etm_alloc_wrapped_array(ptr, idx);
- if (err)
- return err;
- }
-
- /*
- * Check to see if *head has wrapped around. If it hasn't only the
- * amount of data between *head and *old is snapshot'ed to avoid
- * bloating the perf.data file with zeros. But as soon as *head has
- * wrapped around the entire size of the AUX ring buffer it taken.
- */
- wrapped = ptr->wrapped[idx];
- if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
- wrapped = true;
- ptr->wrapped[idx] = true;
- }
-
- pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
- __func__, idx, (size_t)*old, (size_t)*head, mm->len);
-
- /* No wrap has occurred, we can just use *head and *old. */
- if (!wrapped)
- return 0;
-
- /*
- * *head has wrapped around - adjust *head and *old to pickup the
- * entire content of the AUX buffer.
- */
- if (*head >= mm->len) {
- *old = *head - mm->len;
- } else {
- *head += mm->len;
- *old = *head - mm->len;
- }
-
- return 0;
-}
-
static int cs_etm_snapshot_start(struct auxtrace_record *itr)
{
struct cs_etm_recording *ptr =
@@ -900,7 +769,6 @@ static void cs_etm_recording_free(struct auxtrace_record *itr)
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
- zfree(&ptr->wrapped);
free(ptr);
}
@@ -928,7 +796,6 @@ struct auxtrace_record *cs_etm_record_init(int *err)
ptr->itr.recording_options = cs_etm_recording_options;
ptr->itr.info_priv_size = cs_etm_info_priv_size;
ptr->itr.info_fill = cs_etm_info_fill;
- ptr->itr.find_snapshot = cs_etm_find_snapshot;
ptr->itr.snapshot_start = cs_etm_snapshot_start;
ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
ptr->itr.reference = cs_etm_reference;
diff --git a/tools/perf/arch/arm64/include/arch-tests.h b/tools/perf/arch/arm64/include/arch-tests.h
index 90ec4c8cb880..c62538052404 100644
--- a/tools/perf/arch/arm64/include/arch-tests.h
+++ b/tools/perf/arch/arm64/include/arch-tests.h
@@ -2,11 +2,6 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-struct thread;
-struct perf_sample;
-#endif
-
extern struct test arch_tests[];
#endif
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index 414c8a5584b1..a4420d4df503 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -14,6 +14,7 @@
#include "../../../util/cpumap.h"
#include "../../../util/event.h"
#include "../../../util/evsel.h"
+#include "../../../util/evsel_config.h"
#include "../../../util/evlist.h"
#include "../../../util/session.h"
#include <internal/lib.h> // page_size
@@ -32,6 +33,29 @@ struct arm_spe_recording {
struct evlist *evlist;
};
+static void arm_spe_set_timestamp(struct auxtrace_record *itr,
+ struct evsel *evsel)
+{
+ struct arm_spe_recording *ptr;
+ struct perf_pmu *arm_spe_pmu;
+ struct evsel_config_term *term = evsel__get_config_term(evsel, CFG_CHG);
+ u64 user_bits = 0, bit;
+
+ ptr = container_of(itr, struct arm_spe_recording, itr);
+ arm_spe_pmu = ptr->arm_spe_pmu;
+
+ if (term)
+ user_bits = term->val.cfg_chg;
+
+ bit = perf_pmu__format_bits(&arm_spe_pmu->format, "ts_enable");
+
+ /* Skip if user has set it */
+ if (bit & user_bits)
+ return;
+
+ evsel->core.attr.config |= bit;
+}
+
static size_t
arm_spe_info_priv_size(struct auxtrace_record *itr __maybe_unused,
struct evlist *evlist __maybe_unused)
@@ -68,6 +92,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
container_of(itr, struct arm_spe_recording, itr);
struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
struct evsel *evsel, *arm_spe_evsel = NULL;
+ struct perf_cpu_map *cpus = evlist->core.cpus;
bool privileged = perf_event_paranoid_check(-1);
struct evsel *tracking_evsel;
int err;
@@ -91,7 +116,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
return 0;
/* We are in full trace mode but '-m,xyz' wasn't specified */
- if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+ if (!opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
@@ -120,9 +145,14 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
*/
evlist__to_front(evlist, arm_spe_evsel);
- evsel__set_sample_bit(arm_spe_evsel, CPU);
- evsel__set_sample_bit(arm_spe_evsel, TIME);
- evsel__set_sample_bit(arm_spe_evsel, TID);
+ /*
+ * In the case of per-cpu mmaps, sample CPU for AUX event;
+ * also enable the timestamp tracing for samples correlation.
+ */
+ if (!perf_cpu_map__empty(cpus)) {
+ evsel__set_sample_bit(arm_spe_evsel, CPU);
+ arm_spe_set_timestamp(itr, arm_spe_evsel);
+ }
/* Add dummy event to keep tracking */
err = parse_events(evlist, "dummy:u", NULL);
@@ -134,9 +164,10 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
tracking_evsel->core.attr.freq = 0;
tracking_evsel->core.attr.sample_period = 1;
- evsel__set_sample_bit(tracking_evsel, TIME);
- evsel__set_sample_bit(tracking_evsel, CPU);
- evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
+
+ /* In per-cpu case, always need the time of mmap events etc */
+ if (!perf_cpu_map__empty(cpus))
+ evsel__set_sample_bit(tracking_evsel, TIME);
return 0;
}
diff --git a/tools/perf/arch/arm64/util/mem-events.c b/tools/perf/arch/arm64/util/mem-events.c
index 2a2497372671..be41721b9aa1 100644
--- a/tools/perf/arch/arm64/util/mem-events.c
+++ b/tools/perf/arch/arm64/util/mem-events.c
@@ -20,7 +20,7 @@ struct perf_mem_event *perf_mem_events__ptr(int i)
return &perf_mem_events[i];
}
-char *perf_mem_events__name(int i)
+char *perf_mem_events__name(int i, char *pmu_name __maybe_unused)
{
struct perf_mem_event *e = perf_mem_events__ptr(i);
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index 9cd1c34f31b5..ac653d08b1ea 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -357,7 +357,7 @@
440 n64 process_madvise sys_process_madvise
441 n64 epoll_pwait2 sys_epoll_pwait2
442 n64 mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 n64 quotactl_fd sys_quotactl_fd
444 n64 landlock_create_ruleset sys_landlock_create_ruleset
445 n64 landlock_add_rule sys_landlock_add_rule
446 n64 landlock_restrict_self sys_landlock_restrict_self
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 8f052ff4058c..aef2a290e71a 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -522,7 +522,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/tools/perf/arch/powerpc/include/arch-tests.h b/tools/perf/arch/powerpc/include/arch-tests.h
index 1c7be75cbc78..c62538052404 100644
--- a/tools/perf/arch/powerpc/include/arch-tests.h
+++ b/tools/perf/arch/powerpc/include/arch-tests.h
@@ -2,13 +2,6 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-struct thread;
-struct perf_sample;
-int test__arch_unwind_sample(struct perf_sample *sample,
- struct thread *thread);
-#endif
-
extern struct test arch_tests[];
#endif
diff --git a/tools/perf/arch/powerpc/tests/dwarf-unwind.c b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
index 8efd9ed9e9db..c9cb4b059392 100644
--- a/tools/perf/arch/powerpc/tests/dwarf-unwind.c
+++ b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
@@ -7,7 +7,6 @@
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
-#include "arch-tests.h"
#define STACK_SIZE 8192
diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c
index 07fb5e049488..4120fafe0be4 100644
--- a/tools/perf/arch/powerpc/util/mem-events.c
+++ b/tools/perf/arch/powerpc/util/mem-events.c
@@ -3,7 +3,7 @@
#include "mem-events.h"
/* PowerPC does not support 'ldlat' parameter. */
-char *perf_mem_events__name(int i)
+char *perf_mem_events__name(int i, char *pmu_name __maybe_unused)
{
if (i == PERF_MEM_EVENTS__LOAD)
return (char *) "cpu/mem-loads/";
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index 0690263df1dd..64d51ab5a8b4 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -445,7 +445,7 @@
440 common process_madvise sys_process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index ce18119ea0d0..f6b57799c1ea 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -364,10 +364,11 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-# 443 reserved for quotactl_path
+443 common quotactl_fd sys_quotactl_fd
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+447 common memfd_secret sys_memfd_secret
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
index 0e20f3dc69f3..9599e7a3f1af 100644
--- a/tools/perf/arch/x86/include/arch-tests.h
+++ b/tools/perf/arch/x86/include/arch-tests.h
@@ -2,23 +2,15 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-#include <linux/compiler.h>
struct test;
/* Tests */
-int test__rdpmc(struct test *test __maybe_unused, int subtest);
-int test__insn_x86(struct test *test __maybe_unused, int subtest);
+int test__rdpmc(struct test *test, int subtest);
+int test__insn_x86(struct test *test, int subtest);
int test__intel_pt_pkt_decoder(struct test *test, int subtest);
int test__bp_modify(struct test *test, int subtest);
int test__x86_sample_parsing(struct test *test, int subtest);
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-struct thread;
-struct perf_sample;
-int test__arch_unwind_sample(struct perf_sample *sample,
- struct thread *thread);
-#endif
-
extern struct test arch_tests[];
#endif
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index 478078fb0f22..a54dea7c112f 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -7,7 +7,6 @@
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
-#include "arch-tests.h"
#define STACK_SIZE 8192
diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c
index 8c6732cc7794..0b0951030a2f 100644
--- a/tools/perf/arch/x86/util/evlist.c
+++ b/tools/perf/arch/x86/util/evlist.c
@@ -5,11 +5,15 @@
#include "util/parse-events.h"
#define TOPDOWN_L1_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound}"
+#define TOPDOWN_L2_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}"
int arch_evlist__add_default_attrs(struct evlist *evlist)
{
if (!pmu_have_event("cpu", "slots"))
return 0;
- return parse_events(evlist, TOPDOWN_L1_EVENTS, NULL);
+ if (pmu_have_event("cpu", "topdown-heavy-ops"))
+ return parse_events(evlist, TOPDOWN_L2_EVENTS, NULL);
+ else
+ return parse_events(evlist, TOPDOWN_L1_EVENTS, NULL);
}
diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c
index d63acb782b63..eeafe97b8105 100644
--- a/tools/perf/arch/x86/util/iostat.c
+++ b/tools/perf/arch/x86/util/iostat.c
@@ -322,7 +322,7 @@ static int iostat_event_group(struct evlist *evl,
}
evlist__for_each_entry(evl, evsel) {
- evsel->priv = list->rps[evsel->idx / metrics_count];
+ evsel->priv = list->rps[evsel->core.idx / metrics_count];
}
list->nr_entries = 0;
err:
@@ -428,7 +428,7 @@ void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel,
{
double iostat_value = 0;
u64 prev_count_val = 0;
- const char *iostat_metric = iostat_metric_by_idx(evsel->idx);
+ const char *iostat_metric = iostat_metric_by_idx(evsel->core.idx);
u8 die = ((struct iio_root_port *)evsel->priv)->die;
struct perf_counts_values *count = perf_counts(evsel->counts, die, 0);
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index 072920475b65..c5dd54f6ef5e 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -133,11 +133,56 @@ static struct kvm_events_ops ioport_events = {
.name = "IO Port Access"
};
+ /* The time of emulation msr is from kvm_msr to kvm_entry. */
+static void msr_event_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = evsel__intval(evsel, sample, "ecx");
+ key->info = evsel__intval(evsel, sample, "write");
+}
+
+static bool msr_event_begin(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ if (!strcmp(evsel->name, "kvm:kvm_msr")) {
+ msr_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static bool msr_event_end(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return kvm_entry_event(evsel);
+}
+
+static void msr_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+ struct event_key *key,
+ char *decode)
+{
+ scnprintf(decode, decode_str_len, "%#llx:%s",
+ (unsigned long long)key->key,
+ key->info ? "W" : "R");
+}
+
+static struct kvm_events_ops msr_events = {
+ .is_begin_event = msr_event_begin,
+ .is_end_event = msr_event_end,
+ .decode_key = msr_event_decode_key,
+ .name = "MSR Access"
+};
+
const char *kvm_events_tp[] = {
"kvm:kvm_entry",
"kvm:kvm_exit",
"kvm:kvm_mmio",
"kvm:kvm_pio",
+ "kvm:kvm_msr",
NULL,
};
@@ -145,6 +190,7 @@ struct kvm_reg_events_ops kvm_reg_events_ops[] = {
{ .name = "vmexit", .ops = &exit_events },
{ .name = "mmio", .ops = &mmio_events },
{ .name = "ioport", .ops = &ioport_events },
+ { .name = "msr", .ops = &msr_events },
{ NULL, NULL },
};
diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c
index 588110fd8904..5214370ca4e4 100644
--- a/tools/perf/arch/x86/util/mem-events.c
+++ b/tools/perf/arch/x86/util/mem-events.c
@@ -5,19 +5,41 @@
static char mem_loads_name[100];
static bool mem_loads_name__init;
+static char mem_stores_name[100];
#define MEM_LOADS_AUX 0x8203
-#define MEM_LOADS_AUX_NAME "{cpu/mem-loads-aux/,cpu/mem-loads,ldlat=%u/pp}:S"
+#define MEM_LOADS_AUX_NAME "{%s/mem-loads-aux/,%s/mem-loads,ldlat=%u/}:P"
+
+#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
+
+static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
+ E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "%s/events/mem-loads"),
+ E("ldlat-stores", "%s/mem-stores/P", "%s/events/mem-stores"),
+ E(NULL, NULL, NULL),
+};
+
+struct perf_mem_event *perf_mem_events__ptr(int i)
+{
+ if (i >= PERF_MEM_EVENTS__MAX)
+ return NULL;
+
+ return &perf_mem_events[i];
+}
bool is_mem_loads_aux_event(struct evsel *leader)
{
- if (!pmu_have_event("cpu", "mem-loads-aux"))
- return false;
+ if (perf_pmu__find("cpu")) {
+ if (!pmu_have_event("cpu", "mem-loads-aux"))
+ return false;
+ } else if (perf_pmu__find("cpu_core")) {
+ if (!pmu_have_event("cpu_core", "mem-loads-aux"))
+ return false;
+ }
return leader->core.attr.config == MEM_LOADS_AUX;
}
-char *perf_mem_events__name(int i)
+char *perf_mem_events__name(int i, char *pmu_name)
{
struct perf_mem_event *e = perf_mem_events__ptr(i);
@@ -25,20 +47,34 @@ char *perf_mem_events__name(int i)
return NULL;
if (i == PERF_MEM_EVENTS__LOAD) {
- if (mem_loads_name__init)
+ if (mem_loads_name__init && !pmu_name)
return mem_loads_name;
- mem_loads_name__init = true;
+ if (!pmu_name) {
+ mem_loads_name__init = true;
+ pmu_name = (char *)"cpu";
+ }
- if (pmu_have_event("cpu", "mem-loads-aux")) {
+ if (pmu_have_event(pmu_name, "mem-loads-aux")) {
scnprintf(mem_loads_name, sizeof(mem_loads_name),
- MEM_LOADS_AUX_NAME, perf_mem_events__loads_ldlat);
+ MEM_LOADS_AUX_NAME, pmu_name, pmu_name,
+ perf_mem_events__loads_ldlat);
} else {
scnprintf(mem_loads_name, sizeof(mem_loads_name),
- e->name, perf_mem_events__loads_ldlat);
+ e->name, pmu_name,
+ perf_mem_events__loads_ldlat);
}
return mem_loads_name;
}
+ if (i == PERF_MEM_EVENTS__STORE) {
+ if (!pmu_name)
+ pmu_name = (char *)"cpu";
+
+ scnprintf(mem_stores_name, sizeof(mem_stores_name),
+ e->name, pmu_name);
+ return mem_stores_name;
+ }
+
return (char *)e->name;
}
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 49627a7bed7c..cebb861be3e3 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -474,6 +474,9 @@ int cmd_annotate(int argc, const char **argv)
.attr = perf_event__process_attr,
.build_id = perf_event__process_build_id,
.tracing_data = perf_event__process_tracing_data,
+ .id_index = perf_event__process_id_index,
+ .auxtrace_info = perf_event__process_auxtrace_info,
+ .auxtrace = perf_event__process_auxtrace,
.feature = process_feature_event,
.ordered_events = true,
.ordering_requires_timestamps = true,
@@ -483,6 +486,9 @@ int cmd_annotate(int argc, const char **argv)
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
+ struct itrace_synth_opts itrace_synth_opts = {
+ .set = 0,
+ };
struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
@@ -547,6 +553,9 @@ int cmd_annotate(int argc, const char **argv)
OPT_CALLBACK(0, "percent-type", &annotate.opts, "local-period",
"Set percent type local/global-period/hits",
annotate_parse_percent_type),
+ OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
+ "Instruction Tracing options\n" ITRACE_HELP,
+ itrace_parse_synth_opts),
OPT_END()
};
@@ -591,6 +600,8 @@ int cmd_annotate(int argc, const char **argv)
if (IS_ERR(annotate.session))
return PTR_ERR(annotate.session);
+ annotate.session->itrace_synth_opts = &itrace_synth_opts;
+
annotate.has_br_stack = perf_header__has_feat(&annotate.session->header,
HEADER_BRANCH_STACK);
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index e3b9d63077ef..6dea37f141b2 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -42,6 +42,8 @@
#include "ui/ui.h"
#include "ui/progress.h"
#include "../perf.h"
+#include "pmu.h"
+#include "pmu-hybrid.h"
struct c2c_hists {
struct hists hists;
@@ -2907,8 +2909,9 @@ static const char * const *record_mem_usage = __usage_record;
static int perf_c2c__record(int argc, const char **argv)
{
- int rec_argc, i = 0, j;
+ int rec_argc, i = 0, j, rec_tmp_nr = 0;
const char **rec_argv;
+ char **rec_tmp;
int ret;
bool all_user = false, all_kernel = false;
bool event_set = false;
@@ -2932,11 +2935,21 @@ static int perf_c2c__record(int argc, const char **argv)
argc = parse_options(argc, argv, options, record_mem_usage,
PARSE_OPT_KEEP_UNKNOWN);
- rec_argc = argc + 11; /* max number of arguments */
+ if (!perf_pmu__has_hybrid())
+ rec_argc = argc + 11; /* max number of arguments */
+ else
+ rec_argc = argc + 11 * perf_pmu__hybrid_pmu_num();
+
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -1;
+ rec_tmp = calloc(rec_argc + 1, sizeof(char *));
+ if (!rec_tmp) {
+ free(rec_argv);
+ return -1;
+ }
+
rec_argv[i++] = "record";
if (!event_set) {
@@ -2964,21 +2977,9 @@ static int perf_c2c__record(int argc, const char **argv)
rec_argv[i++] = "--phys-data";
rec_argv[i++] = "--sample-cpu";
- for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
- e = perf_mem_events__ptr(j);
- if (!e->record)
- continue;
-
- if (!e->supported) {
- pr_err("failed: event '%s' not supported\n",
- perf_mem_events__name(j));
- free(rec_argv);
- return -1;
- }
-
- rec_argv[i++] = "-e";
- rec_argv[i++] = perf_mem_events__name(j);
- }
+ ret = perf_mem_events__record_args(rec_argv, &i, rec_tmp, &rec_tmp_nr);
+ if (ret)
+ goto out;
if (all_user)
rec_argv[i++] = "--all-user";
@@ -3002,6 +3003,11 @@ static int perf_c2c__record(int argc, const char **argv)
}
ret = cmd_record(i, rec_argv);
+out:
+ for (i = 0; i < rec_tmp_nr; i++)
+ free(rec_tmp[i]);
+
+ free(rec_tmp);
free(rec_argv);
return ret;
}
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index f52b3a799e76..80450c0e8f36 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -1031,12 +1031,12 @@ static int process_base_stream(struct data__file *data_base,
continue;
es_base = evsel_streams__entry(data_base->evlist_streams,
- evsel_base->idx);
+ evsel_base->core.idx);
if (!es_base)
return -1;
es_pair = evsel_streams__entry(data_pair->evlist_streams,
- evsel_pair->idx);
+ evsel_pair->core.idx);
if (!es_pair)
return -1;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index ddccc0eb7390..c88c61e7f8cc 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -31,6 +31,7 @@
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
#include <linux/list.h>
+#include <linux/string.h>
#include <errno.h>
#include <signal.h>
@@ -43,6 +44,8 @@ struct perf_inject {
bool have_auxtrace;
bool strip;
bool jit_mode;
+ bool in_place_update;
+ bool in_place_update_dry_run;
const char *input_name;
struct perf_data output;
u64 bytes_written;
@@ -358,9 +361,10 @@ static struct dso *findnew_dso(int pid, int tid, const char *filename,
dso = machine__findnew_dso_id(machine, filename, id);
}
- if (dso)
+ if (dso) {
+ nsinfo__put(dso->nsinfo);
dso->nsinfo = nsi;
- else
+ } else
nsinfo__put(nsi);
thread__put(thread);
@@ -380,8 +384,8 @@ static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
if (dso && !dso->hit) {
dso->hit = 1;
dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
- dso__put(dso);
}
+ dso__put(dso);
return perf_event__repipe(tool, event, sample, machine);
}
@@ -396,6 +400,18 @@ static int perf_event__repipe_mmap2(struct perf_tool *tool,
err = perf_event__process_mmap2(tool, event, sample, machine);
perf_event__repipe(tool, event, sample, machine);
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
+ struct dso *dso;
+
+ dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
+ event->mmap2.filename, NULL, machine);
+ if (dso) {
+ /* mark it not to inject build-id */
+ dso->hit = 1;
+ }
+ dso__put(dso);
+ }
+
return err;
}
@@ -437,6 +453,18 @@ static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
};
struct dso *dso;
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
+ /* cannot use dso_id since it'd have invalid info */
+ dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
+ event->mmap2.filename, NULL, machine);
+ if (dso) {
+ /* mark it not to inject build-id */
+ dso->hit = 1;
+ }
+ dso__put(dso);
+ return 0;
+ }
+
dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
event->mmap2.filename, &dso_id, machine);
@@ -444,8 +472,8 @@ static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
dso->hit = 1;
dso__inject_build_id(dso, tool, machine, sample->cpumode,
event->mmap2.flags);
- dso__put(dso);
}
+ dso__put(dso);
perf_event__repipe(tool, event, sample, machine);
@@ -696,12 +724,42 @@ static void strip_init(struct perf_inject *inject)
evsel->handler = drop_sample;
}
+static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
+{
+ struct perf_inject *inject = opt->value;
+ const char *args;
+ char *dry_run;
+
+ if (unset)
+ return 0;
+
+ inject->itrace_synth_opts.set = true;
+ inject->itrace_synth_opts.vm_time_correlation = true;
+ inject->in_place_update = true;
+
+ if (!str)
+ return 0;
+
+ dry_run = skip_spaces(str);
+ if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
+ inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
+ inject->in_place_update_dry_run = true;
+ args = dry_run + strlen("dry-run");
+ } else {
+ args = str;
+ }
+
+ inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
+
+ return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
+}
+
static int __cmd_inject(struct perf_inject *inject)
{
int ret = -EINVAL;
struct perf_session *session = inject->session;
struct perf_data *data_out = &inject->output;
- int fd = perf_data__fd(data_out);
+ int fd = inject->in_place_update ? -1 : perf_data__fd(data_out);
u64 output_data_offset;
signal(SIGINT, sig_handler);
@@ -737,6 +795,15 @@ static int __cmd_inject(struct perf_inject *inject)
else if (!strncmp(name, "sched:sched_stat_", 17))
evsel->handler = perf_inject__sched_stat;
}
+ } else if (inject->itrace_synth_opts.vm_time_correlation) {
+ session->itrace_synth_opts = &inject->itrace_synth_opts;
+ memset(&inject->tool, 0, sizeof(inject->tool));
+ inject->tool.id_index = perf_event__process_id_index;
+ inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
+ inject->tool.auxtrace = perf_event__process_auxtrace;
+ inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
+ inject->tool.ordered_events = true;
+ inject->tool.ordering_requires_timestamps = true;
} else if (inject->itrace_synth_opts.set) {
session->itrace_synth_opts = &inject->itrace_synth_opts;
inject->itrace_synth_opts.inject = true;
@@ -759,14 +826,14 @@ static int __cmd_inject(struct perf_inject *inject)
if (!inject->itrace_synth_opts.set)
auxtrace_index__free(&session->auxtrace_index);
- if (!data_out->is_pipe)
+ if (!data_out->is_pipe && !inject->in_place_update)
lseek(fd, output_data_offset, SEEK_SET);
ret = perf_session__process_events(session);
if (ret)
return ret;
- if (!data_out->is_pipe) {
+ if (!data_out->is_pipe && !inject->in_place_update) {
if (inject->build_ids)
perf_header__set_feat(&session->header,
HEADER_BUILD_ID);
@@ -878,6 +945,9 @@ int cmd_inject(int argc, const char **argv)
itrace_parse_synth_opts),
OPT_BOOLEAN(0, "strip", &inject.strip,
"strip non-synthesized events (use with --itrace)"),
+ OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
+ "correlate time between VM guests and the host",
+ parse_vm_time_correlation),
OPT_END()
};
const char * const inject_usage[] = {
@@ -900,15 +970,33 @@ int cmd_inject(int argc, const char **argv)
return -1;
}
- if (perf_data__open(&inject.output)) {
+ if (inject.in_place_update) {
+ if (!strcmp(inject.input_name, "-")) {
+ pr_err("Input file name required for in-place updating\n");
+ return -1;
+ }
+ if (strcmp(inject.output.path, "-")) {
+ pr_err("Output file name must not be specified for in-place updating\n");
+ return -1;
+ }
+ if (!data.force && !inject.in_place_update_dry_run) {
+ pr_err("The input file would be updated in place, "
+ "the --force option is required.\n");
+ return -1;
+ }
+ if (!inject.in_place_update_dry_run)
+ data.in_place_update = true;
+ } else if (perf_data__open(&inject.output)) {
perror("failed to create output file");
return -1;
}
data.path = inject.input_name;
inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool);
- if (IS_ERR(inject.session))
- return PTR_ERR(inject.session);
+ if (IS_ERR(inject.session)) {
+ ret = PTR_ERR(inject.session);
+ goto out_close_output;
+ }
if (zstd_init(&(inject.session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed.\n");
@@ -950,5 +1038,8 @@ int cmd_inject(int argc, const char **argv)
out_delete:
zstd_fini(&(inject.session->zstd_data));
perf_session__delete(inject.session);
+out_close_output:
+ perf_data__close(&inject.output);
+ free(inject.itrace_synth_opts.vm_tm_corr_args);
return ret;
}
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index cdd2b9f643f6..0fd2a74dbaca 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -18,6 +18,8 @@
#include "util/dso.h"
#include "util/map.h"
#include "util/symbol.h"
+#include "util/pmu.h"
+#include "util/pmu-hybrid.h"
#include <linux/err.h>
#define MEM_OPERATION_LOAD 0x1
@@ -62,8 +64,10 @@ static const char * const *record_mem_usage = __usage;
static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
{
- int rec_argc, i = 0, j;
+ int rec_argc, i = 0, j, tmp_nr = 0;
+ int start, end;
const char **rec_argv;
+ char **rec_tmp;
int ret;
bool all_user = false, all_kernel = false;
struct perf_mem_event *e;
@@ -87,11 +91,24 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
argc = parse_options(argc, argv, options, record_mem_usage,
PARSE_OPT_KEEP_UNKNOWN);
- rec_argc = argc + 9; /* max number of arguments */
+ if (!perf_pmu__has_hybrid())
+ rec_argc = argc + 9; /* max number of arguments */
+ else
+ rec_argc = argc + 9 * perf_pmu__hybrid_pmu_num();
+
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -1;
+ /*
+ * Save the allocated event name strings.
+ */
+ rec_tmp = calloc(rec_argc + 1, sizeof(char *));
+ if (!rec_tmp) {
+ free(rec_argv);
+ return -1;
+ }
+
rec_argv[i++] = "record";
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE);
@@ -128,21 +145,11 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
if (mem->data_page_size)
rec_argv[i++] = "--data-page-size";
- for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
- e = perf_mem_events__ptr(j);
- if (!e->record)
- continue;
-
- if (!e->supported) {
- pr_err("failed: event '%s' not supported\n",
- perf_mem_events__name(j));
- free(rec_argv);
- return -1;
- }
-
- rec_argv[i++] = "-e";
- rec_argv[i++] = perf_mem_events__name(j);
- }
+ start = i;
+ ret = perf_mem_events__record_args(rec_argv, &i, rec_tmp, &tmp_nr);
+ if (ret)
+ goto out;
+ end = i;
if (all_user)
rec_argv[i++] = "--all-user";
@@ -156,14 +163,18 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
if (verbose > 0) {
pr_debug("calling: record ");
- while (rec_argv[j]) {
+ for (j = start; j < end; j++)
pr_debug("%s ", rec_argv[j]);
- j++;
- }
+
pr_debug("\n");
}
ret = cmd_record(i, rec_argv);
+out:
+ for (i = 0; i < tmp_nr; i++)
+ free(rec_tmp[i]);
+
+ free(rec_tmp);
free(rec_argv);
return ret;
}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 6b1507566770..e1dd51f2874b 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -31,7 +31,7 @@
#include <linux/zalloc.h>
#define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
-#define DEFAULT_FUNC_FILTER "!_*"
+#define DEFAULT_FUNC_FILTER "!_* & !*@plt"
#define DEFAULT_LIST_FILTER "*"
/* Session management structure */
@@ -347,7 +347,10 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs)
goto out_cleanup;
if (params.command == 'D') { /* it shows definition */
- ret = show_probe_trace_events(pevs, npevs);
+ if (probe_conf.bootconfig)
+ ret = show_bootconfig_events(pevs, npevs);
+ else
+ ret = show_probe_trace_events(pevs, npevs);
goto out_cleanup;
}
@@ -581,6 +584,8 @@ __cmd_probe(int argc, const char **argv)
"Look for files with symbols relative to this directory"),
OPT_CALLBACK(0, "target-ns", NULL, "pid",
"target pid for namespace contexts", opt_set_target_ns),
+ OPT_BOOLEAN(0, "bootconfig", &probe_conf.bootconfig,
+ "Output probe definition with bootconfig format"),
OPT_END()
};
int ret;
@@ -692,6 +697,11 @@ __cmd_probe(int argc, const char **argv)
}
break;
case 'D':
+ if (probe_conf.bootconfig && params.uprobes) {
+ pr_err(" Error: --bootconfig doesn't support uprobes.\n");
+ return -EINVAL;
+ }
+ __fallthrough;
case 'a':
/* Ensure the last given target is used */
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 84803abeb942..671a21c9ee4d 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -891,11 +891,12 @@ static int record__open(struct record *rec)
int rc = 0;
/*
- * For initial_delay or system wide, we need to add a dummy event so
- * that we can track PERF_RECORD_MMAP to cover the delay of waiting or
- * event synthesis.
+ * For initial_delay, system wide or a hybrid system, we need to add a
+ * dummy event so that we can track PERF_RECORD_MMAP to cover the delay
+ * of waiting or event synthesis.
*/
- if (opts->initial_delay || target__has_cpu(&opts->target)) {
+ if (opts->initial_delay || target__has_cpu(&opts->target) ||
+ perf_pmu__has_hybrid()) {
pos = evlist__get_tracking_event(evlist);
if (!evsel__is_dummy_event(pos)) {
/* Set up dummy event. */
@@ -926,7 +927,7 @@ try_again:
goto try_again;
}
if ((errno == EINVAL || errno == EBADF) &&
- pos->leader != pos &&
+ pos->core.leader != &pos->core &&
pos->weak_group) {
pos = evlist__reset_weak_group(evlist, pos, true);
goto try_again;
@@ -969,6 +970,15 @@ out:
return rc;
}
+static void set_timestamp_boundary(struct record *rec, u64 sample_time)
+{
+ if (rec->evlist->first_sample_time == 0)
+ rec->evlist->first_sample_time = sample_time;
+
+ if (sample_time)
+ rec->evlist->last_sample_time = sample_time;
+}
+
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -977,10 +987,7 @@ static int process_sample_event(struct perf_tool *tool,
{
struct record *rec = container_of(tool, struct record, tool);
- if (rec->evlist->first_sample_time == 0)
- rec->evlist->first_sample_time = sample->time;
-
- rec->evlist->last_sample_time = sample->time;
+ set_timestamp_boundary(rec, sample->time);
if (rec->buildid_all)
return 0;
@@ -1770,7 +1777,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
rec->tool.ordered_events = false;
}
- if (!rec->evlist->nr_groups)
+ if (!rec->evlist->core.nr_groups)
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
if (data->is_pipe) {
@@ -2402,6 +2409,17 @@ static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *eve
return perf_event__process_mmap2(tool, event, sample, machine);
}
+static int process_timestamp_boundary(struct perf_tool *tool,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
+{
+ struct record *rec = container_of(tool, struct record, tool);
+
+ set_timestamp_boundary(rec, sample->time);
+ return 0;
+}
+
/*
* XXX Ideally would be local to cmd_record() and passed to a record__new
* because we need to have access to it in record__exit, that is called
@@ -2436,6 +2454,8 @@ static struct record record = {
.namespaces = perf_event__process_namespaces,
.mmap = build_id__process_mmap,
.mmap2 = build_id__process_mmap2,
+ .itrace_start = process_timestamp_boundary,
+ .aux = process_timestamp_boundary,
.ordered_events = true,
},
};
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 36f9ccfeb38a..dc0364f671b9 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -332,7 +332,7 @@ static int process_read_event(struct perf_tool *tool,
const char *name = evsel__name(evsel);
int err = perf_read_values_add_value(&rep->show_threads_values,
event->read.pid, event->read.tid,
- evsel->idx,
+ evsel->core.idx,
name,
event->read.value);
@@ -666,7 +666,7 @@ static int report__collapse_hists(struct report *rep)
evlist__for_each_entry(rep->session->evlist, pos) {
struct hists *hists = evsel__hists(pos);
- if (pos->idx == 0)
+ if (pos->core.idx == 0)
hists->symbol_filter_str = rep->symbol_filter_str;
hists->socket_filter = rep->socket_filter;
@@ -677,7 +677,7 @@ static int report__collapse_hists(struct report *rep)
/* Non-group events are considered as leader */
if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
- struct hists *leader_hists = evsel__hists(pos->leader);
+ struct hists *leader_hists = evsel__hists(evsel__leader(pos));
hists__match(leader_hists, hists);
hists__link(leader_hists, hists);
@@ -729,9 +729,14 @@ static int count_sample_event(struct perf_tool *tool __maybe_unused,
return 0;
}
+static int process_attr(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct evlist **pevlist);
+
static void stats_setup(struct report *rep)
{
memset(&rep->tool, 0, sizeof(rep->tool));
+ rep->tool.attr = process_attr;
rep->tool.sample = count_sample_event;
rep->tool.no_warn = true;
}
@@ -753,6 +758,7 @@ static void tasks_setup(struct report *rep)
rep->tool.mmap = perf_event__process_mmap;
rep->tool.mmap2 = perf_event__process_mmap2;
}
+ rep->tool.attr = process_attr;
rep->tool.comm = perf_event__process_comm;
rep->tool.exit = perf_event__process_exit;
rep->tool.fork = perf_event__process_fork;
@@ -934,6 +940,8 @@ static int __cmd_report(struct report *rep)
return ret;
}
+ evlist__check_mem_load_aux(session->evlist);
+
if (rep->stats_mode)
return stats_print(rep);
@@ -1167,6 +1175,8 @@ int cmd_report(int argc, const char **argv)
.annotation_opts = annotation__default_options,
.skip_empty = true,
};
+ char *sort_order_help = sort_help("sort by key(s):");
+ char *field_order_help = sort_help("output field(s): overhead period sample ");
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
@@ -1201,9 +1211,9 @@ int cmd_report(int argc, const char **argv)
OPT_BOOLEAN(0, "header-only", &report.header_only,
"Show only data header."),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
- sort_help("sort by key(s):")),
+ sort_order_help),
OPT_STRING('F', "fields", &field_order, "key[,keys...]",
- sort_help("output field(s): overhead period sample ")),
+ field_order_help),
OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
"Show sample percentage for different cpu modes"),
OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -1336,11 +1346,11 @@ int cmd_report(int argc, const char **argv)
char sort_tmp[128];
if (ret < 0)
- return ret;
+ goto exit;
ret = perf_config(report__config, &report);
if (ret)
- return ret;
+ goto exit;
argc = parse_options(argc, argv, options, report_usage, 0);
if (argc) {
@@ -1354,8 +1364,10 @@ int cmd_report(int argc, const char **argv)
report.symbol_filter_str = argv[0];
}
- if (annotate_check_args(&report.annotation_opts) < 0)
- return -EINVAL;
+ if (annotate_check_args(&report.annotation_opts) < 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
if (report.mmaps_mode)
report.tasks_mode = true;
@@ -1369,12 +1381,14 @@ int cmd_report(int argc, const char **argv)
if (symbol_conf.vmlinux_name &&
access(symbol_conf.vmlinux_name, R_OK)) {
pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit;
}
if (symbol_conf.kallsyms_name &&
access(symbol_conf.kallsyms_name, R_OK)) {
pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit;
}
if (report.inverted_callchain)
@@ -1398,12 +1412,14 @@ int cmd_report(int argc, const char **argv)
repeat:
session = perf_session__new(&data, false, &report.tool);
- if (IS_ERR(session))
- return PTR_ERR(session);
+ if (IS_ERR(session)) {
+ ret = PTR_ERR(session);
+ goto exit;
+ }
ret = evswitch__init(&report.evswitch, session->evlist, stderr);
if (ret)
- return ret;
+ goto exit;
if (zstd_init(&(session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
@@ -1424,7 +1440,7 @@ repeat:
setup_forced_leader(&report, session->evlist);
- if (symbol_conf.group_sort_idx && !session->evlist->nr_groups) {
+ if (symbol_conf.group_sort_idx && !session->evlist->core.nr_groups) {
parse_options_usage(NULL, options, "group-sort-idx", 0);
ret = -EINVAL;
goto error;
@@ -1638,5 +1654,8 @@ error:
zstd_fini(&(session->zstd_data));
perf_session__delete(session);
+exit:
+ free(sort_order_help);
+ free(field_order_help);
return ret;
}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 954ce2f594e9..1ff10d4bccf3 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -670,7 +670,7 @@ static void create_tasks(struct perf_sched *sched)
err = pthread_attr_init(&attr);
BUG_ON(err);
err = pthread_attr_setstacksize(&attr,
- (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
+ (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
BUG_ON(err);
err = pthread_mutex_lock(&sched->start_work_mutex);
BUG_ON(err);
@@ -3335,6 +3335,16 @@ static void setup_sorting(struct perf_sched *sched, const struct option *options
sort_dimension__add("pid", &sched->cmp_pid);
}
+static bool schedstat_events_exposed(void)
+{
+ /*
+ * Select "sched:sched_stat_wait" event to check
+ * whether schedstat tracepoints are exposed.
+ */
+ return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
+ false : true;
+}
+
static int __cmd_record(int argc, const char **argv)
{
unsigned int rec_argc, i, j;
@@ -3346,21 +3356,33 @@ static int __cmd_record(int argc, const char **argv)
"-m", "1024",
"-c", "1",
"-e", "sched:sched_switch",
- "-e", "sched:sched_stat_wait",
- "-e", "sched:sched_stat_sleep",
- "-e", "sched:sched_stat_iowait",
"-e", "sched:sched_stat_runtime",
"-e", "sched:sched_process_fork",
"-e", "sched:sched_wakeup_new",
"-e", "sched:sched_migrate_task",
};
+
+ /*
+ * The tracepoints trace_sched_stat_{wait, sleep, iowait}
+ * are not exposed to user if CONFIG_SCHEDSTATS is not set,
+ * to prevent "perf sched record" execution failure, determine
+ * whether to record schedstat events according to actual situation.
+ */
+ const char * const schedstat_args[] = {
+ "-e", "sched:sched_stat_wait",
+ "-e", "sched:sched_stat_sleep",
+ "-e", "sched:sched_stat_iowait",
+ };
+ unsigned int schedstat_argc = schedstat_events_exposed() ?
+ ARRAY_SIZE(schedstat_args) : 0;
+
struct tep_event *waking_event;
/*
* +2 for either "-e", "sched:sched_wakeup" or
* "-e", "sched:sched_waking"
*/
- rec_argc = ARRAY_SIZE(record_args) + 2 + argc - 1;
+ rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
@@ -3376,6 +3398,9 @@ static int __cmd_record(int argc, const char **argv)
else
rec_argv[i++] = strdup("sched:sched_wakeup");
+ for (j = 0; j < schedstat_argc; j++)
+ rec_argv[i++] = strdup(schedstat_args[j]);
+
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 1280cbfad4db..064da7f3618d 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -55,6 +55,7 @@
#include <subcmd/pager.h>
#include <perf/evlist.h>
#include <linux/err.h>
+#include "util/dlfilter.h"
#include "util/record.h"
#include "util/util.h"
#include "perf.h"
@@ -79,6 +80,9 @@ static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
static struct perf_stat_config stat_config;
static int max_blocks;
static bool native_arch;
+static struct dlfilter *dlfilter;
+static int dlargc;
+static char **dlargv;
unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
@@ -1337,17 +1341,18 @@ static const char *resolve_branch_sym(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
struct addr_location *al,
+ struct addr_location *addr_al,
u64 *ip)
{
- struct addr_location addr_al;
struct perf_event_attr *attr = &evsel->core.attr;
const char *name = NULL;
if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) {
if (sample_addr_correlates_sym(attr)) {
- thread__resolve(thread, &addr_al, sample);
- if (addr_al.sym)
- name = addr_al.sym->name;
+ if (!addr_al->thread)
+ thread__resolve(thread, addr_al, sample);
+ if (addr_al->sym)
+ name = addr_al->sym->name;
else
*ip = sample->addr;
} else {
@@ -1365,7 +1370,9 @@ static const char *resolve_branch_sym(struct perf_sample *sample,
static int perf_sample__fprintf_callindent(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
- struct addr_location *al, FILE *fp)
+ struct addr_location *al,
+ struct addr_location *addr_al,
+ FILE *fp)
{
struct perf_event_attr *attr = &evsel->core.attr;
size_t depth = thread_stack__depth(thread, sample->cpu);
@@ -1382,7 +1389,7 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
if (thread->ts && sample->flags & PERF_IP_FLAG_RETURN)
depth += 1;
- name = resolve_branch_sym(sample, evsel, thread, al, &ip);
+ name = resolve_branch_sym(sample, evsel, thread, al, addr_al, &ip);
if (PRINT_FIELD(DSO) && !(PRINT_FIELD(IP) || PRINT_FIELD(ADDR))) {
dlen += fprintf(fp, "(");
@@ -1417,6 +1424,13 @@ __weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
{
}
+void script_fetch_insn(struct perf_sample *sample, struct thread *thread,
+ struct machine *machine)
+{
+ if (sample->insn_len == 0 && native_arch)
+ arch_fetch_insn(sample, thread, machine);
+}
+
static int perf_sample__fprintf_insn(struct perf_sample *sample,
struct perf_event_attr *attr,
struct thread *thread,
@@ -1424,8 +1438,7 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
{
int printed = 0;
- if (sample->insn_len == 0 && native_arch)
- arch_fetch_insn(sample, thread, machine);
+ script_fetch_insn(sample, thread, machine);
if (PRINT_FIELD(INSNLEN))
printed += fprintf(fp, " ilen: %d", sample->insn_len);
@@ -1460,6 +1473,7 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
struct addr_location *al,
+ struct addr_location *addr_al,
struct machine *machine, FILE *fp)
{
struct perf_event_attr *attr = &evsel->core.attr;
@@ -1468,7 +1482,7 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
int printed = 0;
if (PRINT_FIELD(CALLINDENT))
- printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, fp);
+ printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, addr_al, fp);
/* print branch_from information */
if (PRINT_FIELD(IP)) {
@@ -1553,41 +1567,49 @@ static const char *sample_flags_to_name(u32 flags)
return NULL;
}
-static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
+int perf_sample__sprintf_flags(u32 flags, char *str, size_t sz)
{
const char *chars = PERF_IP_FLAG_CHARS;
- const int n = strlen(PERF_IP_FLAG_CHARS);
+ const size_t n = strlen(PERF_IP_FLAG_CHARS);
bool in_tx = flags & PERF_IP_FLAG_IN_TX;
const char *name = NULL;
- char str[33];
- int i, pos = 0;
+ size_t i, pos = 0;
name = sample_flags_to_name(flags & ~PERF_IP_FLAG_IN_TX);
if (name)
- return fprintf(fp, " %-15s%4s ", name, in_tx ? "(x)" : "");
+ return snprintf(str, sz, "%-15s%4s", name, in_tx ? "(x)" : "");
if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
name = sample_flags_to_name(flags & ~(PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_TRACE_BEGIN));
if (name)
- return fprintf(fp, " tr strt %-7s%4s ", name, in_tx ? "(x)" : "");
+ return snprintf(str, sz, "tr strt %-7s%4s", name, in_tx ? "(x)" : "");
}
if (flags & PERF_IP_FLAG_TRACE_END) {
name = sample_flags_to_name(flags & ~(PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_TRACE_END));
if (name)
- return fprintf(fp, " tr end %-7s%4s ", name, in_tx ? "(x)" : "");
+ return snprintf(str, sz, "tr end %-7s%4s", name, in_tx ? "(x)" : "");
}
for (i = 0; i < n; i++, flags >>= 1) {
- if (flags & 1)
+ if ((flags & 1) && pos < sz)
str[pos++] = chars[i];
}
for (; i < 32; i++, flags >>= 1) {
- if (flags & 1)
+ if ((flags & 1) && pos < sz)
str[pos++] = '?';
}
- str[pos] = 0;
+ if (pos < sz)
+ str[pos] = 0;
+
+ return pos;
+}
+static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
+{
+ char str[SAMPLE_FLAGS_BUF_SIZE];
+
+ perf_sample__sprintf_flags(flags, str, sizeof(str));
return fprintf(fp, " %-19s ", str);
}
@@ -1877,6 +1899,7 @@ static void perf_sample__fprint_metric(struct perf_script *script,
struct perf_sample *sample,
FILE *fp)
{
+ struct evsel *leader = evsel__leader(evsel);
struct perf_stat_output_ctx ctx = {
.print_metric = script_print_metric,
.new_line = script_new_line,
@@ -1893,7 +1916,7 @@ static void perf_sample__fprint_metric(struct perf_script *script,
if (!evsel->stats)
evlist__alloc_stats(script->session->evlist, false);
- if (evsel_script(evsel->leader)->gnum++ == 0)
+ if (evsel_script(leader)->gnum++ == 0)
perf_stat__reset_shadow_stats();
val = sample->period * evsel->scale;
perf_stat__update_shadow_stats(evsel,
@@ -1901,8 +1924,8 @@ static void perf_sample__fprint_metric(struct perf_script *script,
sample->cpu,
&rt_stat);
evsel_script(evsel)->val = val;
- if (evsel_script(evsel->leader)->gnum == evsel->leader->core.nr_members) {
- for_each_group_member (ev2, evsel->leader) {
+ if (evsel_script(leader)->gnum == leader->core.nr_members) {
+ for_each_group_member (ev2, leader) {
perf_stat__print_shadow_stats(&stat_config, ev2,
evsel_script(ev2)->val,
sample->cpu,
@@ -1910,14 +1933,15 @@ static void perf_sample__fprint_metric(struct perf_script *script,
NULL,
&rt_stat);
}
- evsel_script(evsel->leader)->gnum = 0;
+ evsel_script(leader)->gnum = 0;
}
}
static bool show_event(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
- struct addr_location *al)
+ struct addr_location *al,
+ struct addr_location *addr_al)
{
int depth = thread_stack__depth(thread, sample->cpu);
@@ -1933,7 +1957,7 @@ static bool show_event(struct perf_sample *sample,
} else {
const char *s = symbol_conf.graph_function;
u64 ip;
- const char *name = resolve_branch_sym(sample, evsel, thread, al,
+ const char *name = resolve_branch_sym(sample, evsel, thread, al, addr_al,
&ip);
unsigned nlen;
@@ -1958,6 +1982,7 @@ static bool show_event(struct perf_sample *sample,
static void process_event(struct perf_script *script,
struct perf_sample *sample, struct evsel *evsel,
struct addr_location *al,
+ struct addr_location *addr_al,
struct machine *machine)
{
struct thread *thread = al->thread;
@@ -1970,12 +1995,6 @@ static void process_event(struct perf_script *script,
if (output[type].fields == 0)
return;
- if (!show_event(sample, evsel, thread, al))
- return;
-
- if (evswitch__discard(&script->evswitch, evsel))
- return;
-
++es->samples;
perf_sample__fprintf_start(script, sample, thread, evsel,
@@ -1997,7 +2016,7 @@ static void process_event(struct perf_script *script,
perf_sample__fprintf_flags(sample->flags, fp);
if (is_bts_event(attr)) {
- perf_sample__fprintf_bts(sample, evsel, thread, al, machine, fp);
+ perf_sample__fprintf_bts(sample, evsel, thread, al, addr_al, machine, fp);
return;
}
@@ -2160,10 +2179,23 @@ static int process_sample_event(struct perf_tool *tool,
{
struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct addr_location al;
+ struct addr_location addr_al;
+ int ret = 0;
+
+ /* Set thread to NULL to indicate addr_al and al are not initialized */
+ addr_al.thread = NULL;
+ al.thread = NULL;
+
+ ret = dlfilter__filter_event_early(dlfilter, event, sample, evsel, machine, &al, &addr_al);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto out_put;
+ }
if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num,
sample->time)) {
- return 0;
+ goto out_put;
}
if (debug_mode) {
@@ -2174,29 +2206,53 @@ static int process_sample_event(struct perf_tool *tool,
nr_unordered++;
}
last_timestamp = sample->time;
- return 0;
+ goto out_put;
}
+ if (filter_cpu(sample))
+ goto out_put;
+
if (machine__resolve(machine, &al, sample) < 0) {
pr_err("problem processing %d event, skipping it.\n",
event->header.type);
- return -1;
+ ret = -1;
+ goto out_put;
}
if (al.filtered)
goto out_put;
- if (filter_cpu(sample))
+ if (!show_event(sample, evsel, al.thread, &al, &addr_al))
goto out_put;
- if (scripting_ops)
- scripting_ops->process_event(event, sample, evsel, &al);
- else
- process_event(scr, sample, evsel, &al, machine);
+ if (evswitch__discard(&scr->evswitch, evsel))
+ goto out_put;
+
+ ret = dlfilter__filter_event(dlfilter, event, sample, evsel, machine, &al, &addr_al);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto out_put;
+ }
+
+ if (scripting_ops) {
+ struct addr_location *addr_al_ptr = NULL;
+
+ if ((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
+ sample_addr_correlates_sym(&evsel->core.attr)) {
+ if (!addr_al.thread)
+ thread__resolve(al.thread, &addr_al, sample);
+ addr_al_ptr = &addr_al;
+ }
+ scripting_ops->process_event(event, sample, evsel, &al, addr_al_ptr);
+ } else {
+ process_event(scr, sample, evsel, &al, &addr_al, machine);
+ }
out_put:
- addr_location__put(&al);
- return 0;
+ if (al.thread)
+ addr_location__put(&al);
+ return ret;
}
static int process_attr(struct perf_tool *tool, union perf_event *event,
@@ -2415,6 +2471,17 @@ static int process_switch_event(struct perf_tool *tool,
sample->tid);
}
+static int process_auxtrace_error(struct perf_session *session,
+ union perf_event *event)
+{
+ if (scripting_ops && scripting_ops->process_auxtrace_error) {
+ scripting_ops->process_auxtrace_error(session, event);
+ return 0;
+ }
+
+ return perf_event__process_auxtrace_error(session, event);
+}
+
static int
process_lost_event(struct perf_tool *tool,
union perf_event *event,
@@ -2534,6 +2601,12 @@ static void perf_script__exit_per_event_dump_stats(struct perf_script *script)
}
}
+static void perf_script__exit(struct perf_script *script)
+{
+ perf_thread_map__put(script->threads);
+ perf_cpu_map__put(script->cpus);
+}
+
static int __cmd_script(struct perf_script *script)
{
int ret;
@@ -2554,6 +2627,8 @@ static int __cmd_script(struct perf_script *script)
}
if (script->show_switch_events || (scripting_ops && scripting_ops->process_switch))
script->tool.context_switch = process_switch_event;
+ if (scripting_ops && scripting_ops->process_auxtrace_error)
+ script->tool.auxtrace_error = process_auxtrace_error;
if (script->show_namespace_events)
script->tool.namespaces = process_namespaces_event;
if (script->show_cgroup_events)
@@ -2665,6 +2740,37 @@ static void list_available_languages(void)
fprintf(stderr, "\n");
}
+/* Find script file relative to current directory or exec path */
+static char *find_script(const char *script)
+{
+ char path[PATH_MAX];
+
+ if (!scripting_ops) {
+ const char *ext = strrchr(script, '.');
+
+ if (!ext)
+ return NULL;
+
+ scripting_ops = script_spec__lookup(++ext);
+ if (!scripting_ops)
+ return NULL;
+ }
+
+ if (access(script, R_OK)) {
+ char *exec_path = get_argv_exec_path();
+
+ if (!exec_path)
+ return NULL;
+ snprintf(path, sizeof(path), "%s/scripts/%s/%s",
+ exec_path, scripting_ops->dirname, script);
+ free(exec_path);
+ script = path;
+ if (access(script, R_OK))
+ return NULL;
+ }
+ return strdup(script);
+}
+
static int parse_scriptname(const struct option *opt __maybe_unused,
const char *str, int unset __maybe_unused)
{
@@ -2706,7 +2812,9 @@ static int parse_scriptname(const struct option *opt __maybe_unused,
}
}
- script_name = strdup(script);
+ script_name = find_script(script);
+ if (!script_name)
+ script_name = strdup(script);
return 0;
}
@@ -3076,6 +3184,34 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
exit(0);
}
+static int add_dlarg(const struct option *opt __maybe_unused,
+ const char *s, int unset __maybe_unused)
+{
+ char *arg = strdup(s);
+ void *a;
+
+ if (!arg)
+ return -1;
+
+ a = realloc(dlargv, sizeof(dlargv[0]) * (dlargc + 1));
+ if (!a) {
+ free(arg);
+ return -1;
+ }
+
+ dlargv = a;
+ dlargv[dlargc++] = arg;
+
+ return 0;
+}
+
+static void free_dlarg(void)
+{
+ while (dlargc--)
+ free(dlargv[dlargc]);
+ free(dlargv);
+}
+
/*
* Some scripts specify the required events in their "xxx-record" file,
* this function will check if the events in perf.data match those
@@ -3489,6 +3625,7 @@ int cmd_script(int argc, const char **argv)
};
struct utsname uts;
char *script_path = NULL;
+ const char *dlfilter_file = NULL;
const char **__argv;
int i, j, err = 0;
struct perf_script script = {
@@ -3531,11 +3668,16 @@ int cmd_script(int argc, const char **argv)
"show latency attributes (irqs/preemption disabled, etc)"),
OPT_CALLBACK_NOOPT('l', "list", NULL, NULL, "list available scripts",
list_available_scripts),
+ OPT_CALLBACK_NOOPT(0, "list-dlfilters", NULL, NULL, "list available dlfilters",
+ list_available_dlfilters),
OPT_CALLBACK('s', "script", NULL, "name",
"script file name (lang:script name, script name, or *)",
parse_scriptname),
OPT_STRING('g', "gen-script", &generate_script_lang, "lang",
"generate perf-script.xx script in specified language"),
+ OPT_STRING(0, "dlfilter", &dlfilter_file, "file", "filter .so file name"),
+ OPT_CALLBACK(0, "dlarg", NULL, "argument", "filter argument",
+ add_dlarg),
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_BOOLEAN('d', "debug-mode", &debug_mode,
"do various checks like samples ordering and lost events"),
@@ -3718,6 +3860,12 @@ int cmd_script(int argc, const char **argv)
rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
if (!rec_script_path && !rep_script_path) {
+ script_name = find_script(argv[0]);
+ if (script_name) {
+ argc -= 1;
+ argv += 1;
+ goto script_found;
+ }
usage_with_options_msg(script_usage, options,
"Couldn't find script `%s'\n\n See perf"
" script -l for available scripts.\n", argv[0]);
@@ -3810,7 +3958,7 @@ int cmd_script(int argc, const char **argv)
free(__argv);
exit(-1);
}
-
+script_found:
if (rec_script_path)
script_path = rec_script_path;
if (rep_script_path)
@@ -3848,6 +3996,12 @@ int cmd_script(int argc, const char **argv)
exit(-1);
}
+ if (dlfilter_file) {
+ dlfilter = dlfilter__new(dlfilter_file, dlargc, dlargv);
+ if (!dlfilter)
+ return -1;
+ }
+
if (!script_name) {
setup_pager();
use_browser = 0;
@@ -3947,8 +4101,12 @@ int cmd_script(int argc, const char **argv)
goto out_delete;
}
+ err = dlfilter__start(dlfilter, session);
+ if (err)
+ goto out_delete;
+
if (script_name) {
- err = scripting_ops->start_script(script_name, argc, argv);
+ err = scripting_ops->start_script(script_name, argc, argv, session);
if (err)
goto out_delete;
pr_debug("perf script started with script %s\n\n", script_name);
@@ -3991,11 +4149,15 @@ out_delete:
zfree(&script.ptime_range);
}
+ zstd_fini(&(session->zstd_data));
evlist__free_stats(session->evlist);
perf_session__delete(session);
+ perf_script__exit(&script);
if (script_started)
cleanup_scripting();
+ dlfilter__cleanup(dlfilter);
+ free_dlarg();
out:
return err;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index f9f74a514315..634375937db9 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -154,6 +154,8 @@ static const char *topdown_metric_L2_attrs[] = {
NULL,
};
+#define TOPDOWN_MAX_LEVEL 2
+
static const char *smi_cost_attrs = {
"{"
"msr/aperf/,"
@@ -248,7 +250,7 @@ static void evlist__check_cpu_maps(struct evlist *evlist)
evlist__warn_hybrid_group(evlist);
evlist__for_each_entry(evlist, evsel) {
- leader = evsel->leader;
+ leader = evsel__leader(evsel);
/* Check that leader matches cpus with each member. */
if (leader == evsel)
@@ -269,10 +271,10 @@ static void evlist__check_cpu_maps(struct evlist *evlist)
}
for_each_group_evsel(pos, leader) {
- pos->leader = pos;
+ evsel__set_leader(pos, pos);
pos->core.nr_members = 0;
}
- evsel->leader->core.nr_members = 0;
+ evsel->core.leader->nr_members = 0;
}
}
@@ -745,8 +747,8 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
*/
counter->errored = true;
- if ((counter->leader != counter) ||
- !(counter->leader->core.nr_members > 1))
+ if ((evsel__leader(counter) != counter) ||
+ !(counter->core.leader->nr_members > 1))
return COUNTER_SKIP;
} else if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
if (verbose > 0)
@@ -839,7 +841,7 @@ try_again:
* Don't close here because we're in the wrong affinity.
*/
if ((errno == EINVAL || errno == EBADF) &&
- counter->leader != counter &&
+ evsel__leader(counter) != counter &&
counter->weak_group) {
evlist__reset_weak_group(evsel_list, counter, false);
assert(counter->reset_group);
@@ -1931,6 +1933,7 @@ setup_metrics:
if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
return -1;
+ stat_config.topdown_level = TOPDOWN_MAX_LEVEL;
if (arch_evlist__add_default_attrs(evsel_list) < 0)
return -1;
}
@@ -2442,9 +2445,6 @@ int cmd_stat(int argc, const char **argv)
evlist__check_cpu_maps(evsel_list);
- if (perf_pmu__has_hybrid())
- stat_config.no_merge = true;
-
/*
* Initialize thread_map with comm names,
* so we could print it out on output.
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 69cb3635f5ef..02f8bb5dbc0f 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -22,6 +22,7 @@
#include "util/annotate.h"
#include "util/bpf-event.h"
+#include "util/cgroup.h"
#include "util/config.h"
#include "util/color.h"
#include "util/dso.h"
@@ -263,9 +264,9 @@ static void perf_top__show_details(struct perf_top *top)
if (top->evlist->enabled) {
if (top->zero)
- symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
+ symbol__annotate_zero_histogram(symbol, top->sym_evsel->core.idx);
else
- symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
+ symbol__annotate_decay_histogram(symbol, top->sym_evsel->core.idx);
}
if (more != 0)
printf("%d lines not displayed, maybe increase display entries [e]\n", more);
@@ -300,7 +301,7 @@ static void perf_top__resort_hists(struct perf_top *t)
/* Non-group events are considered as leader */
if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
- struct hists *leader_hists = evsel__hists(pos->leader);
+ struct hists *leader_hists = evsel__hists(evsel__leader(pos));
hists__match(leader_hists, hists);
hists__link(leader_hists, hists);
@@ -529,7 +530,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
fprintf(stderr, "\nAvailable events:");
evlist__for_each_entry(top->evlist, top->sym_evsel)
- fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, evsel__name(top->sym_evsel));
+ fprintf(stderr, "\n\t%d %s", top->sym_evsel->core.idx, evsel__name(top->sym_evsel));
prompt_integer(&counter, "Enter details event counter");
@@ -540,7 +541,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
break;
}
evlist__for_each_entry(top->evlist, top->sym_evsel)
- if (top->sym_evsel->idx == counter)
+ if (top->sym_evsel->core.idx == counter)
break;
} else
top->sym_evsel = evlist__first(top->evlist);
@@ -1558,6 +1559,8 @@ int cmd_top(int argc, const char **argv)
OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
"number of thread to run event synthesize"),
+ OPT_CALLBACK('G', "cgroup", &top.evlist, "name",
+ "monitor event in cgroup name only", parse_cgroups),
OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
"Record namespaces events"),
OPT_BOOLEAN(0, "all-cgroups", &opts->record_cgroup,
@@ -1646,6 +1649,11 @@ int cmd_top(int argc, const char **argv)
goto out_delete_evlist;
}
+ if (nr_cgroups > 0 && opts->record_cgroup) {
+ pr_err("--cgroup and --all-cgroups cannot be used together\n");
+ goto out_delete_evlist;
+ }
+
if (opts->branch_stack && callchain_param.enabled)
symbol_conf.show_branchflag_count = true;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 7ec18ff57fc4..9c265fa96011 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -2266,6 +2266,14 @@ static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sam
return augmented_args;
}
+static void syscall__exit(struct syscall *sc)
+{
+ if (!sc)
+ return;
+
+ free(sc->arg_fmt);
+}
+
static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -3095,6 +3103,21 @@ static struct evsel *evsel__new_pgfault(u64 config)
return evsel;
}
+static void evlist__free_syscall_tp_fields(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ struct evsel_trace *et = evsel->priv;
+
+ if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls"))
+ continue;
+
+ free(et->fmt);
+ free(et);
+ }
+}
+
static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
{
const u32 type = event->header.type;
@@ -4130,7 +4153,7 @@ out_disable:
out_delete_evlist:
trace__symbols__exit(trace);
-
+ evlist__free_syscall_tp_fields(evlist);
evlist__delete(evlist);
cgroup__put(trace->cgroup);
trace->evlist = NULL;
@@ -4636,6 +4659,9 @@ do_concat:
err = parse_events_option(&o, lists[0], 0);
}
out:
+ free(strace_groups_dir);
+ free(lists[0]);
+ free(lists[1]);
if (sep)
*sep = ',';
@@ -4701,6 +4727,21 @@ out:
return err;
}
+static void trace__exit(struct trace *trace)
+{
+ int i;
+
+ strlist__delete(trace->ev_qualifier);
+ free(trace->ev_qualifier_ids.entries);
+ if (trace->syscalls.table) {
+ for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
+ syscall__exit(&trace->syscalls.table[i]);
+ free(trace->syscalls.table);
+ }
+ syscalltbl__delete(trace->sctbl);
+ zfree(&trace->perfconfig_events);
+}
+
int cmd_trace(int argc, const char **argv)
{
const char *trace_usage[] = {
@@ -5135,6 +5176,6 @@ out_close:
if (output_name != NULL)
fclose(trace.output);
out:
- zfree(&trace.perfconfig_events);
+ trace__exit(&trace);
return err;
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json b/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json
new file mode 100644
index 000000000000..8ba3e81c9808
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json
@@ -0,0 +1,424 @@
+[
+ {
+ "MetricName": "VEC_GROUP_PUMP_RETRY_RATIO_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_VG_PUMP01\\,chip\\=?@) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "VEC_GROUP_PUMP_RETRY_RATIO_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_VG_PUMP23\\,chip\\=?@) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "LOCAL_NODE_PUMP_RETRY_RATIO_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_LNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_LNS_PUMP01\\,chip\\=?@) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "LOCAL_NODE_PUMP_RETRY_RATIO_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_LNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_LNS_PUMP23\\,chip\\=?@) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "GROUP_PUMP_RETRY_RATIO_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_GROUP_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_GROUP_PUMP01\\,chip\\=?@) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "GROUP_PUMP_RETRY_RATIO_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_GROUP_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_GROUP_PUMP23\\,chip\\=?@) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_GROUP_PUMPS_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_GROUP_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_GROUP_PUMPS_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_GROUP_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_GROUP_PUMPS_RETRIES_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_GROUP_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_GROUP_PUMPS_RETRIES_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_GROUP_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "REMOTE_NODE_PUMPS_RETRIES_RATIO_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_RNS_PUMP01\\,chip\\=?@) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "REMOTE_NODE_PUMPS_RETRIES_RATIO_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_RNS_PUMP23\\,chip\\=?@) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_VECTOR_GROUP_PUMPS_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_VG_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_VECTOR_GROUP_PUMPS_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_VG_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_LOCAL_NODE_PUMPS_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_LNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_LOCAL_NODE_PUMPS_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_LNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_VECTOR_GROUP_PUMPS_RETRIES_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_VECTOR_GROUP_PUMPS_RETRIES_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_LOCAL_NODE_PUMPS_RETRIES_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_LNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_LOCAL_NODE_PUMPS_RETRIES_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_LNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_REMOTE_NODE_PUMPS_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_REMOTE_NODE_PUMPS_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_NEAR_NODE_PUMPS_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_NNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_NEAR_NODE_PUMPS_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_NNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_INT_PB_BW",
+ "MetricExpr": "(hv_24x7@PM_PB_INT_DATA_XFER\\,chip\\=?@)",
+ "ScaleUnit": "2.09MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK0_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK1_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK2_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK3_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK4_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK5_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK6_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK7_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK0_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK1_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK2_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK3_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK4_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK5_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK6_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK7_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK0_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK1_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK2_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK3_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK4_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK5_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK6_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK7_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK0_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK1_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK2_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK3_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK4_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK5_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK6_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK7_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_DATA_BANDWIDTH_TRANSFERRED_OVER_PB_PCI1",
+ "MetricExpr": "(hv_24x7@PM_PCI1_32B_INOUT\\,chip\\=?@)",
+ "ScaleUnit": "3.28e-2MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_DATA_BANDWIDTH_TRANSFERRED_OVER_PB_PCI0",
+ "MetricExpr": "(hv_24x7@PM_PCI0_32B_INOUT\\,chip\\=?@)",
+ "ScaleUnit": "3.28e-2MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_READ_BW_MC0_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC0_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "5.24e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_READ_BW_MC1_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC1_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "5.24e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_READ_BW_MC2_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC2_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "5.24e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_READ_BW_MC3_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC3_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "5.24e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_WRITE_BW_MC0_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC0_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "2.6e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_WRITE_BW_MC1_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC1_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "2.6e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_WRITE_BW_MC2_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC2_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "2.6e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_WRITE_BW_MC3_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC3_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "2.6e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "Memory_RD_BW_Chip",
+ "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC0_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC1_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC2_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC3_CHAN01\\,chip\\=?@)",
+ "MetricGroup": "Memory_BW",
+ "ScaleUnit": "5.24e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "Memory_WR_BW_Chip",
+ "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC0_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC1_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC2_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC3_CHAN01\\,chip\\=?@ )",
+ "MetricGroup": "Memory_BW",
+ "ScaleUnit": "2.6e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "PowerBUS_Frequency",
+ "MetricExpr": "(hv_24x7@PM_PAU_CYC\\,chip\\=?@ )",
+ "ScaleUnit": "2.56e-7GHz",
+ "AggregationMode": "PerChip"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/cache.json b/tools/perf/pmu-events/arch/x86/icelake/cache.json
index 3529fc338c17..49fe78fb6538 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/cache.json
@@ -1,552 +1,664 @@
[
{
+ "BriefDescription": "L2 code requests",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x21",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts the total number of L2 code requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read miss L2, no rejects"
+ "Speculative": "1",
+ "UMask": "0xe4"
},
{
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x22",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache"
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Demand requests that miss L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts L2 cache misses when fetching instructions.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x24",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions"
+ "Speculative": "1",
+ "UMask": "0x27"
},
{
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts demand requests that miss L2 cache.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x27",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache"
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "RFO requests that hit L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x28",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.SWPF_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "SW prefetch requests that miss L2 cache."
+ "Speculative": "1",
+ "UMask": "0xc2"
},
{
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xc1",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache"
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xc2",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache"
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L2 cache lines filling L2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xc4",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads."
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1f"
},
{
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xc8",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.SWPF_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "SW prefetch requests that hit L2 cache."
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe1",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests"
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe2",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache"
+ "Speculative": "1",
+ "UMask": "0x21"
},
{
+ "BriefDescription": "L2 cache misses when fetching instructions",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the total number of L2 code requests.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe4",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests"
+ "Speculative": "1",
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts demand requests to L2 cache.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe7",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests to L2 cache"
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
"PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of L1D misses that are outstanding"
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All retired load instructions.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
- "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1"
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions for loads.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
},
{
+ "BriefDescription": "L2 writebacks that access L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
- "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
"PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailablability."
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Demand Data Read requests",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
- "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability.",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe1"
},
{
+ "BriefDescription": "Demand Data Read transactions pending for off-core. Highly correlated.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
- "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.L2_STALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources."
+ "PublicDescription": "Counts the number of off-core outstanding Demand Data Read transactions every cycle. A transaction is considered to be in the Off-core outstanding state between L2 cache miss and data-return to the core.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "EventCode": "0x51",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of cache lines replaced in L1 data cache."
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc1"
},
{
+ "BriefDescription": "Cycles the superQ cannot take any more entries.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xf4",
+ "EventName": "SQ_MISC.SQ_FULL",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1"
+ "PublicDescription": "Counts the cycles for which the thread is active and the superQ cannot take any more entries.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore"
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Demand Data Read requests sent to uncore",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1"
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
- "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore"
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
- "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM"
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
- "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads"
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
- "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x80",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALL",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Any memory transaction that reached the SQ."
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions that true miss the STLB.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x11",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that miss the STLB.",
- "Data_LA": "1"
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load instructions with locked access.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired store instructions that true miss the STLB.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x12",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that miss the STLB.",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "All retired store instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "L1_Hit_Indication": "1",
"PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all retired store instructions. This event account for SW prefetch instructions and PREFETCHW instruction for stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Demand requests to L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with locked access.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x21",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions with locked access.",
- "Data_LA": "1"
+ "PublicDescription": "Counts demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe7"
},
{
- "PEBS": "1",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x41",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
- "Data_LA": "1"
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Demand and prefetch data reads",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x42",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions for loads.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x81",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load instructions.",
- "Data_LA": "1"
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x28"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all retired store instructions. This event account for SW prefetch instructions and PREFETCHW instruction for stores.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired store instructions.",
"Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "Number of L1D misses that are outstanding",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
- "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
- "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
- "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
"PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
- "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions missed L1 cache as data sources",
- "Data_LA": "1"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired store instructions that miss the STLB.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
- "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions missed L2 cache as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts retired store instructions that true miss the STLB.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
},
{
- "PEBS": "1",
+ "BriefDescription": "RFO requests to L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
- "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions missed L3 cache as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
"EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Store Read transactions pending for off-core. Highly correlated.",
+ "CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "SampleAfterValue": "100007",
- "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
- "Data_LA": "1"
+ "PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "EventCode": "0xd2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.SILENT",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "Data_LA": "1"
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "L1_Hit_Indication": "1",
"PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
- "EventCode": "0xd2",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
- "Data_LA": "1"
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc8"
},
{
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
"PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions that true miss the STLB.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
- "EventCode": "0xd2",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
- "Data_LA": "1"
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x22"
},
{
- "PEBS": "1",
+ "BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
- "EventCode": "0xd2",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
- "Data_LA": "1"
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Any memory transaction that reached the SQ.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
- "EventCode": "0xF1",
"Counter": "0,1,2,3",
- "UMask": "0x1f",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2"
+ "Speculative": "1",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the cycles for which the thread is active and the superQ cannot take any more entries.",
- "EventCode": "0xF4",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xf2",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
"PEBScounters": "0,1,2,3",
- "EventName": "SQ_MISC.SQ_FULL",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cycles the thread is active and superQ cannot take any more entries."
+ "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x4"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/floating-point.json b/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
index 594c5551f610..5391c4f6eca3 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
@@ -1,102 +1,95 @@
[
{
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all microcode Floating Point assists.",
- "EventCode": "0xC1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "ASSISTS.FP",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all microcode FP assists.",
- "CounterMask": "1"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "PublicDescription": "Counts number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Counts all microcode FP assists.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/frontend.json b/tools/perf/pmu-events/arch/x86/icelake/frontend.json
index 9c3cfbfcec0f..4fa2a4186ee3 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/frontend.json
@@ -1,424 +1,482 @@
[
{
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
"PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path"
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
- "CounterMask": "5"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "CounterMask": "1"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "CounterMask": "5",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventName": "IDQ.MITE_CYCLES_OK",
"PEBScounters": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path"
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
- "CounterMask": "5"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "CounterMask": "1"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "5",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
"PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB or MITE to the MS",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to IDQ while MS is busy"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x510006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
- "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
"PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "ICACHE_16B.IFDATA_STALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss."
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
- "EventCode": "0x83",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PEBScounters": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity."
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
- "EventCode": "0x83",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity."
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
- "EventCode": "0x83",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
"PEBScounters": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_STALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled"
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
- "EventCode": "0x9c",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x504006",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
- "CounterMask": "5"
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
- "EventCode": "0x9C",
- "Invert": "1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x502006",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
- "CounterMask": "1"
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles MITE is delivering any Uop",
"CollectPEBSRecord": "2",
- "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
- "EventCode": "0xAB",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
"PEBScounters": "0,1,2,3",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "BriefDescription": "DSB-to-MITE switch true penalty cycles."
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x11",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x500206",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced DSB miss.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x12",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.L1I_MISS",
- "MSRIndex": "0x3F7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
- "TakenAlone": "1"
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of Decode Stream Buffer (DSB a.k.a. Uop Cache)-to-MITE speculative transitions.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x13",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x14",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x520006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x15",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.STLB_MISS",
- "MSRIndex": "0x3F7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
- "TakenAlone": "1"
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x500206",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x501006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x500406",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x508006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
- "EventCode": "0xC6",
- "MSRValue": "0x500806",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x500806",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
- "EventCode": "0xC6",
- "MSRValue": "0x501006",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x500106",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
- "EventCode": "0xC6",
- "MSRValue": "0x502006",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x500406",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x504006",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
- "MSRIndex": "0x3F7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x30"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x508006",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
- "MSRIndex": "0x3F7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Uops delivered to IDQ while MS is busy",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x510006",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
- "MSRIndex": "0x3F7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x30"
},
{
- "PEBS": "1",
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES_ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x520006",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x100206",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
- "MSRIndex": "0x3F7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
new file mode 100644
index 000000000000..432e45ac6814
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
@@ -0,0 +1,273 @@
+[
+ {
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricGroup": "Summary",
+ "MetricName": "IPC"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.SLOTS / INST_RETIRED.ANY",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retire",
+ "MetricName": "UPI"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;FetchBW;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricGroup": "Pipeline",
+ "MetricName": "CPI"
+ },
+ {
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricGroup": "Pipeline",
+ "MetricName": "CLKS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT;TmaL1",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "MetricExpr": "( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "Flops",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / ( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 )",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricGroup": "Pipeline;PortsUtil",
+ "MetricName": "ILP"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricGroup": "InsType",
+ "MetricName": "IpLoad"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricGroup": "InsType",
+ "MetricName": "IpStore"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricGroup": "Branches;InsType",
+ "MetricName": "IpBranch"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTkBranch"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )",
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricGroup": "Flops;FpArith;InsType",
+ "MetricName": "IpFLOP"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricGroup": "Summary;TmaL1",
+ "MetricName": "Instructions"
+ },
+ {
+ "MetricExpr": "LSD.UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
+ "MetricGroup": "LSD",
+ "MetricName": "LSD_Coverage"
+ },
+ {
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;FetchBW",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
+ "MetricGroup": "MemoryBound;MemoryLat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
+ "MetricGroup": "MemoryBound;MemoryBW",
+ "MetricName": "MLP"
+ },
+ {
+ "MetricConstraint": "NO_NMI_WATCHDOG",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "MemoryTLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "CacheMisses",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "CacheMisses",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * ( ( OFFCORE_REQUESTS.ALL_DATA_RD - OFFCORE_REQUESTS.DEMAND_DATA_RD ) + L2_RQSTS.ALL_DEMAND_MISS + L2_RQSTS.SWPF_MISS ) / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "CacheMisses",
+ "MetricName": "L3MPKI"
+ },
+ {
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC) * msr@tsc@ / 1000000000 / duration_time",
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricGroup": "Summary;Power",
+ "MetricName": "Average_Frequency"
+ },
+ {
+ "MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricGroup": "Flops;HPC",
+ "MetricName": "GFLOPs"
+ },
+ {
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "MetricExpr": "1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricGroup": "SMT",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricGroup": "OS",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "HPC;MemoryBW;SoC",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "IpFarBranch"
+ },
+ {
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C3 residency percent per core",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C6 residency percent per core",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C7 residency percent per core",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C2 residency percent per package",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C3 residency percent per package",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C6 residency percent per package",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C7 residency percent per package",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/memory.json b/tools/perf/pmu-events/arch/x86/icelake/memory.json
index f158366b9dd6..3701bd93a462 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/memory.json
@@ -1,410 +1,574 @@
[
{
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
- "EventCode": "0x54",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
+ "EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address"
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
"CollectPEBSRecord": "2",
- "PublicDescription": "Speculatively counts the number Transactional Synchronization Extensions (TSX) Aborts due to a data capacity limitation for transactional writes.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculatively counts the number TSX Aborts due to a data capacity limitation for transactional writes."
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times HLE abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts demand data reads that was not supplied by the L3 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
- "EventCode": "0x54",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00001",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer"
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero."
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that was not supplied by the L3 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
- "EventCode": "0x54",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00010",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer"
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_MEM",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
- "EventCode": "0x54",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3",
+ "EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer."
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times we could not allocate Lock Buffer.",
- "EventCode": "0x54",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
"PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero."
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
- "EventCode": "0x5d",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "TX_EXEC.MISC2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region"
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
- "EventCode": "0x5d",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "TX_EXEC.MISC3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded"
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles where data return is pending for a Demand Data Read request who miss L3 cache.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
- "CounterMask": "2"
+ "PublicDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that was not supplied by the L3 cache.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x6",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00002",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
- "CounterMask": "6"
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times RTM commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Demand Data Read requests who miss L3 cache.",
- "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests who miss L3 cache"
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
- "EventCode": "0xc3",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_EVENTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears due to memory ordering conflicts."
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Number of times an HLE execution successfully committed",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times we entered an HLE region. Does not count nested transactions.",
- "EventCode": "0xC8",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.COMMIT",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.START",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution started."
+ "PublicDescription": "Counts the number of times HLE commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times HLE commit succeeded.",
- "EventCode": "0xC8",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.COMMIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution successfully committed",
- "Data_LA": "1"
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times HLE abort was triggered.",
- "EventCode": "0xc8",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.ABORTED",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one)."
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.ABORTED_MEM",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts)."
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Counts streaming stores that was not supplied by the L3 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.)."
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00800",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.ABORTED_EVENTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts)."
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_READ",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that was not supplied by the L3 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.START",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution started."
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC08000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that was not supplied by the L3 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times RTM commit succeeded.",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.COMMIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution successfully committed"
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Demand Data Read requests who miss L3 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times RTM abort was triggered.",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.ABORTED",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted.",
- "Data_LA": "1"
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Demand Data Read requests who miss L3 cache.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.ABORTED_MEM",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)"
+ "Counter": "0,1,2,3",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
- "EventCode": "0xC9",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions"
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
- "EventCode": "0xC9",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type"
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Number of times an HLE execution started.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
- "EventCode": "0xC9",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.START",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.ABORTED_EVENTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)"
+ "PublicDescription": "Counts the number of times we entered an HLE region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x8",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
"PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
"SampleAfterValue": "50021",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x10",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "20011",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
- "TakenAlone": "1"
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x20",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
"PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x40",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "2003",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
- "TakenAlone": "1"
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PEBS": "2",
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that was not supplied by the L3 cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that was not supplied by the L3 cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x80",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "1009",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
- "TakenAlone": "1"
+ "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "PEBS": "2",
+ "BriefDescription": "Number of times an RTM execution started.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x100",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "503",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
- "TakenAlone": "1"
+ "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x200",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "101",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
- "TakenAlone": "1"
+ "PublicDescription": "Counts the number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/other.json b/tools/perf/pmu-events/arch/x86/icelake/other.json
index f8dfdb847224..a806b00f8616 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/other.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/other.json
@@ -1,121 +1,1090 @@
[
{
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the Top-down Microarchitecture Analysis method. This event is counted on a designated fixed counter (Fixed Counter 3) and is an architectural event.",
- "Counter": "35",
- "UMask": "0x4",
- "PEBScounters": "35",
- "EventName": "TOPDOWN.SLOTS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C8000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184008000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184008000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C8000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by branch mispredictions. This event estimates number of operations that were issued but not retired from the specualtive path as well as the out-of-order engine recovery past a branch misprediction.",
"SampleAfterValue": "10000003",
- "BriefDescription": "Counts the number of available slots for an unhalted logical processor."
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
- "EventCode": "0x28",
"Counter": "0,1,2,3",
- "UMask": "0x7",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0010",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
- "SampleAfterValue": "200003",
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule."
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010800",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
- "EventCode": "0x28",
"Counter": "0,1,2,3",
- "UMask": "0x18",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000800",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was sent.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C8000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
"EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule."
+ "Speculative": "1",
+ "UMask": "0x18"
},
{
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.",
- "EventCode": "0x28",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0001",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
- "SampleAfterValue": "200003",
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule."
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
- "EventCode": "0x32",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0001",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.NTA",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHNTA instructions executed."
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
- "EventCode": "0x32",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0010",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.T0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHT0 instructions executed."
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
- "EventCode": "0x32",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000001",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.T1_T2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed."
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000800",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0001",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0001",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C8000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "35",
+ "EventName": "TOPDOWN.SLOTS",
+ "PEBScounters": "35",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0001",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHW instructions executed."
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
- "EventCode": "0xa4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of Top-down Microarchitecture Analysis (TMA) method's slots where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
"SampleAfterValue": "10000003",
- "BriefDescription": "Counts the number of available slots for an unhalted logical processor."
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010001",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000018000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
- "SampleAfterValue": "10000003",
- "BriefDescription": "Issue slots where no uops were being issued due to lack of back end resources."
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x7"
},
{
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0800",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000001",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L3.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C2380",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
- "EventCode": "0xc1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "ASSISTS.ANY",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware."
+ "Speculative": "1",
+ "UMask": "0x1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
index 6d8311e634aa..4f4ce309c2f8 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
@@ -1,892 +1,1035 @@
[
{
+ "BriefDescription": "Mispredicted indirect CALL instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
- "Counter": "32",
- "UMask": "0x1",
- "PEBScounters": "32",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "3",
- "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
- "Counter": "32",
- "UMask": "0x1",
- "PEBScounters": "32",
- "EventName": "INST_RETIRED.PREC_DIST",
+ "BriefDescription": "Number of uops executed on the core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of uops executed from any thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution"
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of uops executed on port 4 and 9",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "Counter": "33",
- "UMask": "0x2",
- "PEBScounters": "33",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when the thread is not in halt state"
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
- "Counter": "34",
- "UMask": "0x3",
- "PEBScounters": "34",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state."
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Not taken branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when: a. preceding store conflicts with the load (incomplete overlap),b. store forwarding is impossible due to u-arch limitations, c. preceding lock RMW operations are not forwarded, d. store has the no-forward bit set (uncacheable/page-split/masked stores), e. all-blocking stores are used (mostly, fences and port I/O), and others. The most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events. See the table of not supported store forwards in the Optimization Guide.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded."
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS.NO_SR",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
"SampleAfterValue": "100003",
- "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use."
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
- "EventCode": "0x07",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "All indirect branch instructions retired (excluding RETs. TSX aborts are considered indirect branch).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
"SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare on address."
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
- "EventCode": "0x0D",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread"
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of uops executed on port 2 and 3",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
- "EventCode": "0x0D",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x3",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Taken branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
- "EventCode": "0x0d",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path."
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 1",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
- "EventCode": "0x0E",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops that RAT issues to RS"
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 5",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "EventCode": "0x0E",
- "Invert": "1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_5",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of uops executed on port 6",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
- "EventCode": "0x14",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x9",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "ARITH.DIVIDER_ACTIVE",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "EventCode": "0x3C",
"Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state"
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
- "EventCode": "0x3C",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "CounterMask": "2",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "25003",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted."
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
- "EventCode": "0x3C",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
"SampleAfterValue": "25003",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted."
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
- "EventCode": "0x4c",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
"PEBScounters": "0,1,2,3",
- "EventName": "LOAD_HIT_PREFETCH.SWPF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch."
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
- "EventCode": "0x5E",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread"
+ "Counter": "0,1,2,3",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
- "EventCode": "0x5E",
- "Invert": "1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0x5e",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
- "EventCode": "0x87",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
"PEBScounters": "0,1,2,3",
- "EventName": "ILD_STALL.LCP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction."
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles without actually retired uops.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 0"
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Far branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 1"
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "CounterMask": "16",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_2_3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "32",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PEBScounters": "32",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 2 and 3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_4_9",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 4 and 9"
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 5"
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 6"
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003"
},
{
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.X87",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PublicDescription": "Counts the number of x87 uops executed.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 7 and 8"
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
"CollectPEBSRecord": "2",
- "EventCode": "0xa2",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations."
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
- "EventCode": "0xA2",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync)."
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
"PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "CounterMask": "1"
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4"
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "CounterMask": "5"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Return instructions retired.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x9"
},
{
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "16"
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles without actually retired instructions.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x14",
+ "CounterMask": "1",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.STALL_CYCLES",
+ "Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "20"
+ "PublicDescription": "This event counts cycles without actually retired instructions.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xa6",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "CollectPEBSRecord": "2",
+ "Counter": "33",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PEBScounters": "33",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty."
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Taken conditional branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xa6",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty."
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
- "EventCode": "0xA6",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
- "CounterMask": "2"
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xa6",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load."
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "LSD.UOPS",
+ "Counter": "32",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PEBScounters": "32",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD."
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Total execution stalls.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
- "EventCode": "0xa8",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"PEBScounters": "0,1,2,3",
- "EventName": "LSD.CYCLES_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
- "CounterMask": "5"
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0xc"
},
{
+ "BriefDescription": "Number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
"CollectPEBSRecord": "2",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle."
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.PAUSE_INST",
+ "PublicDescription": "Counts number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Self-modifying code (SMC) detected.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
- "EventCode": "0xB1",
- "Invert": "1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "CounterMask": "1"
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Uops that RAT issues to RS",
"CollectPEBSRecord": "2",
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xb1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xb1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2"
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x5"
},
{
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xb1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "Counter": "34",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PEBScounters": "34",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3"
+ "Speculative": "1",
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xb1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4"
+ "Speculative": "1",
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops executed from any thread.",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core."
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2"
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "CounterMask": "10",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3"
+ "PublicDescription": "Counts the number of cycles using always true condition (uops_ret &amp;lt; 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "All branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4"
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of x87 uops executed.",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.X87",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of x87 uops dispatched."
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
- "EventCode": "0xC0",
"Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event"
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles using always true condition (uops_ret &amp;lt; 16) applied to non PEBS uops retired event.",
- "EventCode": "0xC2",
- "Invert": "1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10"
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Thread cycles when thread is not in halt state",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the retirement slots used each cycle.",
- "EventCode": "0xc2",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used."
+ "Speculative": "1"
},
{
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
- "EventCode": "0xC3",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x11"
},
{
+ "BriefDescription": "Number of uops executed on port 0",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
- "EventCode": "0xC3",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected."
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Conditional branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all branch instructions retired.",
- "EventCode": "0xC4",
"Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
- "BriefDescription": "All branch instructions retired."
+ "UMask": "0x11"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retirement slots used.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts taken conditional branch instructions retired.",
- "EventCode": "0xc4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.COND_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken conditional branch instructions retired."
+ "PublicDescription": "Counts the retirement slots used each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts both direct and indirect near call instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired."
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts return instructions retired.",
- "EventCode": "0xC4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired."
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts not taken branch instructions retired.",
- "EventCode": "0xC4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.COND_NTAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired."
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts conditional branch instructions retired.",
- "EventCode": "0xc4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x11",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.COND",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired."
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts taken branch instructions retired.",
- "EventCode": "0xC4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired."
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts far branch instructions retired.",
- "EventCode": "0xC4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired."
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
- "EventCode": "0xc4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.INDIRECT",
- "SampleAfterValue": "100003",
- "BriefDescription": "All indirect branch instructions retired (excluding RETs. TSX aborts are considered indirect branch)."
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
- "EventCode": "0xC5",
"Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted branch instructions retired.",
- "Data_LA": "1"
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
- "EventCode": "0xc5",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "number of branch instructions retired that were mispredicted and taken. Non PEBS",
- "Data_LA": "1"
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
- "EventCode": "0xc5",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x11",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.COND",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "Data_LA": "1"
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
- "EventCode": "0xC5",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
- "Data_LA": "1"
+ "PublicDescription": "Counts all miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "SampleAfterValue": "50021",
+ "UMask": "0x80"
},
{
- "PEBS": "1",
+ "BriefDescription": "TMA slots where uops got dropped",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
- "EventCode": "0xC5",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.UOP_DROPPING",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.INDIRECT",
- "SampleAfterValue": "100003",
- "BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
- "Data_LA": "1"
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
- "EventCode": "0xcc",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
+ "CounterMask": "20",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MISC_RETIRED.LBR_INSERTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Increments whenever there is an update to the LBR array."
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x14"
},
{
- "PublicDescription": "Counts number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted).",
- "EventCode": "0xcc",
+ "BriefDescription": "Number of uops executed on port 7 and 8",
+ "CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
- "EventName": "MISC_RETIRED.PAUSE_INST",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of retired PAUSE instructions."
+ "Speculative": "1",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken. Non PEBS",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end."
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All mispredicted branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
- "EventCode": "0xec",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core."
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "50021"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
index 7180a900c175..f485f4664ea6 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
@@ -1,236 +1,245 @@
[
{
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Page walks completed due to a demand data load to a 4K page."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
- "EventCode": "0x08",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page."
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "SampleAfterValue": "100007",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
- "EventCode": "0x08",
"Counter": "0,1,2,3",
- "UMask": "0xe",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)"
+ "Speculative": "1",
+ "UMask": "0xe"
},
{
+ "BriefDescription": "STLB flush attempts",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
- "EventCode": "0x08",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.STLB_ANY",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle."
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "SampleAfterValue": "100007",
+ "Speculative": "1",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
- "EventCode": "0x08",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
- "EventCode": "0x08",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Loads that miss the DTLB and hit the STLB."
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
- "EventCode": "0x49",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Page walks completed due to a demand data store to a 4K page."
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
- "EventCode": "0x49",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page."
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
- "EventCode": "0x49",
"Counter": "0,1,2,3",
- "UMask": "0xe",
+ "CounterMask": "1",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)"
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
- "EventCode": "0x49",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle."
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
- "EventCode": "0x49",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
- "EventCode": "0x49",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Stores that miss the DTLB and hit the STLB."
+ "Speculative": "1",
+ "UMask": "0xe"
},
{
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
- "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)"
+ "Speculative": "1",
+ "UMask": "0xe"
},
{
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts code misses in all ITLB (Instruction TLB) levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
- "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)"
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
- "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0xe",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)"
+ "Speculative": "1",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
- "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle."
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
- "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
- "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "CounterMask": "1",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
"SampleAfterValue": "100003",
- "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB."
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "EventCode": "0xAE",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages."
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
- "EventCode": "0xBD",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PEBScounters": "0,1,2,3",
- "EventName": "TLB_FLUSH.DTLB_THREAD",
- "SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries"
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
- "EventCode": "0xBD",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PEBScounters": "0,1,2,3",
- "EventName": "TLB_FLUSH.STLB_ANY",
- "SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts"
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/cache.json b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
new file mode 100644
index 000000000000..624762008aaa
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
@@ -0,0 +1,706 @@
+[
+ {
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x22"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x27"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x28"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc1"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc2"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc4"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc8"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe1"
+ },
+ {
+ "BriefDescription": "RFO requests to L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe2"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe4"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "For every cycle where the core is waiting on at least 1 outstanding Demand RFO request, increments by 1.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "For every cycle where the core is waiting on at least 1 outstanding demand RFO request, increments by 1. RFOs are initiated by a core as part of a data store operation. Demand RFO requests include RFOs, locks, and ItoM transactions. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding data read requests the core is waiting on.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "For every cycle, increments by the number of outstanding data read requests the core is waiting on. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "For every cycle where the core is waiting on at least 1 outstanding demand data read request, increments by 1.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "For every cycle where the core is waiting on at least 1 outstanding data read request, increments by 1. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts memory transactions sent to the uncore.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts memory transactions sent to the uncore including requests initiated by the core, all L3 prefetches, reads resulting from page walks, and snoop responses.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions that true miss the STLB.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired store instructions that true miss the STLB.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "All retired load instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions for loads.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "All retired store instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all retired store instructions. This event account for SW prefetch instructions and PREFETCHW instruction for stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Retired demand load instructions which missed L3 but serviced from local IXP memory as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources was remote HITM",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Retired load instructions whose data sources was remote HITM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired demand load instructions which missed L3 but serviced from remote IXP memory as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Retired load instructions which data source was serviced from L4",
+ "SampleAfterValue": "100007",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of lines that are evicted by the L2 cache due to L2 cache fills. Evicted lines are delivered to the L3, which may or may not cache them, according to system load and priorities.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles the queue waiting for offcore responses is full.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xf4",
+ "EventName": "SQ_MISC.SQ_FULL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the cycles for which the thread is active and the queue waiting for responses from the uncore cannot take any more entries.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json b/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json
new file mode 100644
index 000000000000..bcedcd985e84
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json
@@ -0,0 +1,95 @@
+[
+ {
+ "BriefDescription": "Counts all microcode FP assists.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/frontend.json b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
new file mode 100644
index 000000000000..cc59cee1cd57
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
@@ -0,0 +1,469 @@
+[
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "5",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of Decode Stream Buffer (DSB a.k.a. Uop Cache)-to-MITE speculative transitions.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500206",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500406",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500806",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x501006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x502006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x504006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x508006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x510006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x520006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500106",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/memory.json b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
new file mode 100644
index 000000000000..d319d448e2aa
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
@@ -0,0 +1,291 @@
+[
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_READ",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times RTM commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/other.json b/tools/perf/pmu-events/arch/x86/icelakex/other.json
new file mode 100644
index 000000000000..ef50d3a3392e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/other.json
@@ -0,0 +1,181 @@
+[
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "35",
+ "EventName": "TOPDOWN.SLOTS",
+ "PEBScounters": "35",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of Top-down Microarchitecture Analysis (TMA) method's slots where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
+ "SampleAfterValue": "10000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another cores caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts writes that generate a demand reads for ownership (RFO) request and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
new file mode 100644
index 000000000000..3cc71244e699
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
@@ -0,0 +1,972 @@
+[
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "32",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PEBScounters": "32",
+ "PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution",
+ "CollectPEBSRecord": "2",
+ "Counter": "32",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PEBScounters": "32",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "CollectPEBSRecord": "2",
+ "Counter": "33",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PEBScounters": "33",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "34",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PEBScounters": "34",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "False dependencies due to partial compare on address.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "TMA slots where uops got dropped",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Uops that RAT issues to RS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
+ "SampleAfterValue": "25003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5e",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 0",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 1",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 2 and 3",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 4 and 9",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 5",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_5",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 6",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 7 and 8",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Total execution stalls.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "16",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "20",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x14"
+ },
+ {
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "10",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of cycles using always true condition (uops_ret &amp;lt; 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retirement slots used.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the retirement slots used each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Conditional branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "All indirect branch instructions retired (excluding RETs. TSX aborts are considered indirect branch).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "50021"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken. Non PEBS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "SampleAfterValue": "50021",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.PAUSE_INST",
+ "PublicDescription": "Counts number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json
new file mode 100644
index 000000000000..5f0d2c462940
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json
@@ -0,0 +1,333 @@
+[
+ {
+ "BriefDescription": "2LM Tag Check : Hit in Near Memory Cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.HIT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Miss, no data in this line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.MISS_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Miss, existing data may be evicted to Far Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.MISS_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Read Hit in Near Memory Cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.NM_RD_HIT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Write Hit in Near Memory Cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.NM_WR_HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to read",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to write",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (including underfills)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x0f",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM write CAS commands issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM CAS commands issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3f",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.OPPORTUNISTIC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to page table",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.PGT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Half clockticks for IMC",
+ "Counter": "FIXED",
+ "CounterType": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_M_HCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count : All Activates",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_ACT_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x0B",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1C",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE7",
+ "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Reads - RPQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.RD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Writes",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.WR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Underfill reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.UFILL_RD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json
new file mode 100644
index 000000000000..52f2301582bb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json
@@ -0,0 +1,2476 @@
+[
+ {
+ "BriefDescription": "Local INVITOE requests (exclusive ownership of a cache line without receiving data) that miss the SF/LLC and are sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Remote INVITOE requests (exclusive ownership of a cache line without receiving data) sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local read requests that miss the SF/LLC and are sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Remote read requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local write requests that miss the SF/LLC and are sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Remote write requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Clockticks of the uncore caching &amp;amp; home agent (CHA)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : All Lines Victimized",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local read requests that miss the SF/LLC and remote read requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local write requests that miss the SF/LLC and remote write requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x0c",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for E-state entries",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for M-state entries",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for S-state entries",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "PerPkg": "1",
+ "UMask": "0xC001FF01",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "PerPkg": "1",
+ "UMask": "0xC001FD01",
+ "UMaskExt": "0xC001FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFD01",
+ "UMaskExt": "0xC80FFD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
+ "PerPkg": "1",
+ "UMask": "0xC817FD01",
+ "UMaskExt": "0xC817FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0xCCC7FD01",
+ "UMaskExt": "0xCCC7FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FD01",
+ "UMaskExt": "0xC807FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "PerPkg": "1",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFE01",
+ "UMaskExt": "0xC80FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
+ "PerPkg": "1",
+ "UMask": "0xC817FE01",
+ "UMaskExt": "0xC817FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0xCCC7FE01",
+ "UMaskExt": "0xCCC7FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FE01",
+ "UMaskExt": "0xC807FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "PerPkg": "1",
+ "UMask": "0xC001FF04",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "PerPkg": "1",
+ "UMask": "0xC001FD04",
+ "UMaskExt": "0xC001FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "PerPkg": "1",
+ "UMask": "0xC001FE04",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "PerPkg": "1",
+ "UMask": "0xC001FF01",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "PerPkg": "1",
+ "UMask": "0xC001FD01",
+ "UMaskExt": "0xC001FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "PerPkg": "1",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFE01",
+ "UMaskExt": "0xC80FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
+ "PerPkg": "1",
+ "UMask": "0xC817FE01",
+ "UMaskExt": "0xC817FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FE01",
+ "UMaskExt": "0xC807FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "PerPkg": "1",
+ "UMask": "0xC001FF04",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "PerPkg": "1",
+ "UMask": "0xC001FD04",
+ "UMaskExt": "0xC001FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "PerPkg": "1",
+ "UMask": "0xC001FE04",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FE04",
+ "UMaskExt": "0xCC43FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFD01",
+ "UMaskExt": "0xC88FFD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC897FD01",
+ "UMaskExt": "0xC897FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FD01",
+ "UMaskExt": "0xC887FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFE01",
+ "UMaskExt": "0xC88FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC897FE01",
+ "UMaskExt": "0xC897FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FE01",
+ "UMaskExt": "0xC887FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FD04",
+ "UMaskExt": "0xCC43FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FF04",
+ "UMaskExt": "0xCC43FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FF01",
+ "UMaskExt": "0xC887FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FF01",
+ "UMaskExt": "0xC807FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0xCCC7FF01",
+ "UMaskExt": "0xCCC7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC897FF01",
+ "UMaskExt": "0xC897FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRDs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFF01",
+ "UMaskExt": "0xC80FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FF01",
+ "UMaskExt": "0xC807FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD",
+ "PerPkg": "1",
+ "UMask": "0xC817FF01",
+ "UMaskExt": "0xC817FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRDs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFF01",
+ "UMaskExt": "0xC80FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC816FE01",
+ "UMaskExt": "0xC816FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8177E01",
+ "UMaskExt": "0xC8177E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC816FE01",
+ "UMaskExt": "0xC816FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8177E01",
+ "UMaskExt": "0xC8177E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Pref misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC896FE01",
+ "UMaskExt": "0xC896FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Pref misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8977E01",
+ "UMaskExt": "0xC8977E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC806FE01",
+ "UMaskExt": "0xC806FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8077E01",
+ "UMaskExt": "0xC8077E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC886FE01",
+ "UMaskExt": "0xC886FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8877E01",
+ "UMaskExt": "0xC8877E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushes issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0xC8C7FF01",
+ "UMaskExt": "0xC8C7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SpecItoMs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC57FF01",
+ "UMaskExt": "0xCC57FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FF04",
+ "UMaskExt": "0xCD43FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FD04",
+ "UMaskExt": "0xCD43FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FE04",
+ "UMaskExt": "0xCD43FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8178A01",
+ "UMaskExt": "0xC8178A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8168A01",
+ "UMaskExt": "0xC8168A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8170A01",
+ "UMaskExt": "0xC8170A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR",
+ "PerPkg": "1",
+ "UMask": "0xc867fe01",
+ "UMaskExt": "0xc867fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR",
+ "PerPkg": "1",
+ "UMask": "0xc86ffe01",
+ "UMaskExt": "0xc86ffe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8178A01",
+ "UMaskExt": "0xC8178A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefData issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0xCCD7FE01",
+ "UMaskExt": "0xCCD7FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FE04",
+ "UMaskExt": "0xC8F3FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xc8f3fe04",
+ "UMaskExt": "0xc8f3fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8178601",
+ "UMaskExt": "0xC81786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8168601",
+ "UMaskExt": "0xC81686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8170601",
+ "UMaskExt": "0xC81706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8178601",
+ "UMaskExt": "0xC81786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FD04",
+ "UMaskExt": "0xC8F3FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FF04",
+ "UMaskExt": "0xC8F3FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefData issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0xCCD7FF01",
+ "UMaskExt": "0xCCD7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FF04",
+ "UMaskExt": "0xC8F3FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x1BC1FF",
+ "UMaskExt": "0x1BC1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Clockticks of the integrated IO (IIO) traffic controller",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests PCIe makes of the main die : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for IIO clocktick",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 7",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 6",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 5",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 4",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 3",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 2",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 1",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0xff",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops : WbMtoI",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0f",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clockticks of the IO coherency tracker (IRP)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF RF full",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_FAF_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Occupancy of the IRP FAF queue",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x78",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in any state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in A state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in I state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in S state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Clean NearMem Read Hit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Dirty NearMem Read Hit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to memory (M2M)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_M2M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : PMM - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x0720",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : PMM - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x1C80",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to PCI (M2P)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2P_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2P_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to UPI (M3UPI)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M3UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
+ "CounterType": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Non Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x97",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Non Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x97",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Number of kfclks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Null FLITs transmitted to any slot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI LL"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json
new file mode 100644
index 000000000000..2d1368958762
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json
@@ -0,0 +1,10 @@
+[
+ {
+ "BriefDescription": "Clockticks of the power control unit (PCU)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json
new file mode 100644
index 000000000000..1b9d03039c53
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json
@@ -0,0 +1,245 @@
+[
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "SampleAfterValue": "100007",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "STLB flush attempts",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "SampleAfterValue": "100007",
+ "Speculative": "1",
+ "UMask": "0x20"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 0a6a8c7f937f..5f5df6560202 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -38,6 +38,8 @@ GenuineIntel-6-7D,v1,icelake,core
GenuineIntel-6-7E,v1,icelake,core
GenuineIntel-6-8[CD],v1,icelake,core
GenuineIntel-6-A7,v1,icelake,core
+GenuineIntel-6-6A,v1,icelakex,core
+GenuineIntel-6-6C,v1,icelakex,core
GenuineIntel-6-86,v1,tremontx,core
AuthenticAMD-23-([12][0-9A-F]|[0-9A-F]),v2,amdzen1,core
AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
index 0b7096847991..895f5fc23965 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
@@ -5,68 +5,178 @@
* Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
*/
+/*
+ * Use Py_ssize_t for '#' formats to avoid DeprecationWarning: PY_SSIZE_T_CLEAN
+ * will be required for '#' formats.
+ */
+#define PY_SSIZE_T_CLEAN
+
#include <Python.h>
#include "../../../util/trace-event.h"
+#include "../../../util/event.h"
+#include "../../../util/symbol.h"
+#include "../../../util/thread.h"
+#include "../../../util/map.h"
+#include "../../../util/maps.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/session.h"
+#include "../../../util/srcline.h"
+#include "../../../util/srccode.h"
#if PY_MAJOR_VERSION < 3
#define _PyCapsule_GetPointer(arg1, arg2) \
PyCObject_AsVoidPtr(arg1)
+#define _PyBytes_FromStringAndSize(arg1, arg2) \
+ PyString_FromStringAndSize((arg1), (arg2))
+#define _PyUnicode_AsUTF8(arg) \
+ PyString_AsString(arg)
PyMODINIT_FUNC initperf_trace_context(void);
#else
#define _PyCapsule_GetPointer(arg1, arg2) \
PyCapsule_GetPointer((arg1), (arg2))
+#define _PyBytes_FromStringAndSize(arg1, arg2) \
+ PyBytes_FromStringAndSize((arg1), (arg2))
+#define _PyUnicode_AsUTF8(arg) \
+ PyUnicode_AsUTF8(arg)
PyMODINIT_FUNC PyInit_perf_trace_context(void);
#endif
-static PyObject *perf_trace_context_common_pc(PyObject *obj, PyObject *args)
+static struct scripting_context *get_args(PyObject *args, const char *name, PyObject **arg2)
{
- static struct scripting_context *scripting_context;
+ int cnt = 1 + !!arg2;
PyObject *context;
- int retval;
- if (!PyArg_ParseTuple(args, "O", &context))
+ if (!PyArg_UnpackTuple(args, name, 1, cnt, &context, arg2))
return NULL;
- scripting_context = _PyCapsule_GetPointer(context, NULL);
- retval = common_pc(scripting_context);
+ return _PyCapsule_GetPointer(context, NULL);
+}
- return Py_BuildValue("i", retval);
+static struct scripting_context *get_scripting_context(PyObject *args)
+{
+ return get_args(args, "context", NULL);
+}
+
+static PyObject *perf_trace_context_common_pc(PyObject *obj, PyObject *args)
+{
+ struct scripting_context *c = get_scripting_context(args);
+
+ if (!c)
+ return NULL;
+
+ return Py_BuildValue("i", common_pc(c));
}
static PyObject *perf_trace_context_common_flags(PyObject *obj,
PyObject *args)
{
- static struct scripting_context *scripting_context;
- PyObject *context;
- int retval;
+ struct scripting_context *c = get_scripting_context(args);
- if (!PyArg_ParseTuple(args, "O", &context))
+ if (!c)
return NULL;
- scripting_context = _PyCapsule_GetPointer(context, NULL);
- retval = common_flags(scripting_context);
-
- return Py_BuildValue("i", retval);
+ return Py_BuildValue("i", common_flags(c));
}
static PyObject *perf_trace_context_common_lock_depth(PyObject *obj,
PyObject *args)
{
- static struct scripting_context *scripting_context;
- PyObject *context;
- int retval;
+ struct scripting_context *c = get_scripting_context(args);
- if (!PyArg_ParseTuple(args, "O", &context))
+ if (!c)
return NULL;
- scripting_context = _PyCapsule_GetPointer(context, NULL);
- retval = common_lock_depth(scripting_context);
+ return Py_BuildValue("i", common_lock_depth(c));
+}
+static PyObject *perf_sample_insn(PyObject *obj, PyObject *args)
+{
+ struct scripting_context *c = get_scripting_context(args);
+
+ if (!c)
+ return NULL;
+
+ if (c->sample->ip && !c->sample->insn_len &&
+ c->al->thread->maps && c->al->thread->maps->machine)
+ script_fetch_insn(c->sample, c->al->thread, c->al->thread->maps->machine);
+
+ if (!c->sample->insn_len)
+ Py_RETURN_NONE; /* N.B. This is a return statement */
+
+ return _PyBytes_FromStringAndSize(c->sample->insn, c->sample->insn_len);
+}
+
+static PyObject *perf_set_itrace_options(PyObject *obj, PyObject *args)
+{
+ struct scripting_context *c;
+ const char *itrace_options;
+ int retval = -1;
+ PyObject *str;
+
+ c = get_args(args, "itrace_options", &str);
+ if (!c)
+ return NULL;
+
+ if (!c->session || !c->session->itrace_synth_opts)
+ goto out;
+
+ if (c->session->itrace_synth_opts->set) {
+ retval = 1;
+ goto out;
+ }
+
+ itrace_options = _PyUnicode_AsUTF8(str);
+
+ retval = itrace_do_parse_synth_opts(c->session->itrace_synth_opts, itrace_options, 0);
+out:
return Py_BuildValue("i", retval);
}
+static PyObject *perf_sample_src(PyObject *obj, PyObject *args, bool get_srccode)
+{
+ struct scripting_context *c = get_scripting_context(args);
+ unsigned int line = 0;
+ char *srcfile = NULL;
+ char *srccode = NULL;
+ PyObject *result;
+ struct map *map;
+ int len = 0;
+ u64 addr;
+
+ if (!c)
+ return NULL;
+
+ map = c->al->map;
+ addr = c->al->addr;
+
+ if (map && map->dso)
+ srcfile = get_srcline_split(map->dso, map__rip_2objdump(map, addr), &line);
+
+ if (get_srccode) {
+ if (srcfile)
+ srccode = find_sourceline(srcfile, line, &len);
+ result = Py_BuildValue("(sIs#)", srcfile, line, srccode, (Py_ssize_t)len);
+ } else {
+ result = Py_BuildValue("(sI)", srcfile, line);
+ }
+
+ free(srcfile);
+
+ return result;
+}
+
+static PyObject *perf_sample_srcline(PyObject *obj, PyObject *args)
+{
+ return perf_sample_src(obj, args, false);
+}
+
+static PyObject *perf_sample_srccode(PyObject *obj, PyObject *args)
+{
+ return perf_sample_src(obj, args, true);
+}
+
static PyMethodDef ContextMethods[] = {
{ "common_pc", perf_trace_context_common_pc, METH_VARARGS,
"Get the common preempt count event field value."},
@@ -74,6 +184,14 @@ static PyMethodDef ContextMethods[] = {
"Get the common flags event field value."},
{ "common_lock_depth", perf_trace_context_common_lock_depth,
METH_VARARGS, "Get the common lock depth event field value."},
+ { "perf_sample_insn", perf_sample_insn,
+ METH_VARARGS, "Get the machine code instruction."},
+ { "perf_set_itrace_options", perf_set_itrace_options,
+ METH_VARARGS, "Set --itrace options."},
+ { "perf_sample_srcline", perf_sample_srcline,
+ METH_VARARGS, "Get source file name and line number."},
+ { "perf_sample_srccode", perf_sample_srccode,
+ METH_VARARGS, "Get source file name, line number and line."},
{ NULL, NULL, 0, NULL}
};
@@ -96,6 +214,12 @@ PyMODINIT_FUNC PyInit_perf_trace_context(void)
NULL, /* m_clear */
NULL, /* m_free */
};
- return PyModule_Create(&moduledef);
+ PyObject *mod;
+
+ mod = PyModule_Create(&moduledef);
+ /* Add perf_script_context to the module so it can be imported */
+ PyObject_SetAttrString(mod, "perf_script_context", Py_None);
+
+ return mod;
}
#endif
diff --git a/tools/perf/scripts/python/bin/intel-pt-events-record b/tools/perf/scripts/python/bin/intel-pt-events-record
index 10fe2b6977d4..6b9877cfe23e 100644
--- a/tools/perf/scripts/python/bin/intel-pt-events-record
+++ b/tools/perf/scripts/python/bin/intel-pt-events-record
@@ -1,8 +1,8 @@
#!/bin/bash
#
-# print Intel PT Power Events and PTWRITE. The intel_pt PMU event needs
-# to be specified with appropriate config terms.
+# print Intel PT Events including Power Events and PTWRITE. The intel_pt PMU
+# event needs to be specified with appropriate config terms.
#
if ! echo "$@" | grep -q intel_pt ; then
echo "Options must include the Intel PT event e.g. -e intel_pt/pwr_evt,ptw/"
diff --git a/tools/perf/scripts/python/bin/intel-pt-events-report b/tools/perf/scripts/python/bin/intel-pt-events-report
index 9a9c92fcd026..beeac3fde9db 100644
--- a/tools/perf/scripts/python/bin/intel-pt-events-report
+++ b/tools/perf/scripts/python/bin/intel-pt-events-report
@@ -1,3 +1,3 @@
#!/bin/bash
-# description: print Intel PT Power Events and PTWRITE
-perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/intel-pt-events.py \ No newline at end of file
+# description: print Intel PT Events including Power Events and PTWRITE
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/intel-pt-events.py
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 711d4f9f5645..13f2d8a81610 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -113,6 +113,7 @@ import os
import random
import copy
import math
+from libxed import LibXED
pyside_version_1 = True
if not "--pyside-version-1" in sys.argv:
@@ -4747,94 +4748,6 @@ class MainWindow(QMainWindow):
dialog = AboutDialog(self.glb, self)
dialog.exec_()
-# XED Disassembler
-
-class xed_state_t(Structure):
-
- _fields_ = [
- ("mode", c_int),
- ("width", c_int)
- ]
-
-class XEDInstruction():
-
- def __init__(self, libxed):
- # Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
- xedd_t = c_byte * 512
- self.xedd = xedd_t()
- self.xedp = addressof(self.xedd)
- libxed.xed_decoded_inst_zero(self.xedp)
- self.state = xed_state_t()
- self.statep = addressof(self.state)
- # Buffer for disassembled instruction text
- self.buffer = create_string_buffer(256)
- self.bufferp = addressof(self.buffer)
-
-class LibXED():
-
- def __init__(self):
- try:
- self.libxed = CDLL("libxed.so")
- except:
- self.libxed = None
- if not self.libxed:
- self.libxed = CDLL("/usr/local/lib/libxed.so")
-
- self.xed_tables_init = self.libxed.xed_tables_init
- self.xed_tables_init.restype = None
- self.xed_tables_init.argtypes = []
-
- self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
- self.xed_decoded_inst_zero.restype = None
- self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
-
- self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
- self.xed_operand_values_set_mode.restype = None
- self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
-
- self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
- self.xed_decoded_inst_zero_keep_mode.restype = None
- self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
-
- self.xed_decode = self.libxed.xed_decode
- self.xed_decode.restype = c_int
- self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
-
- self.xed_format_context = self.libxed.xed_format_context
- self.xed_format_context.restype = c_uint
- self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
-
- self.xed_tables_init()
-
- def Instruction(self):
- return XEDInstruction(self)
-
- def SetMode(self, inst, mode):
- if mode:
- inst.state.mode = 4 # 32-bit
- inst.state.width = 4 # 4 bytes
- else:
- inst.state.mode = 1 # 64-bit
- inst.state.width = 8 # 8 bytes
- self.xed_operand_values_set_mode(inst.xedp, inst.statep)
-
- def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
- self.xed_decoded_inst_zero_keep_mode(inst.xedp)
- err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
- if err:
- return 0, ""
- # Use AT&T mode (2), alternative is Intel (3)
- ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
- if not ok:
- return 0, ""
- if sys.version_info[0] == 2:
- result = inst.buffer.value
- else:
- result = inst.buffer.value.decode()
- # Return instruction length and the disassembled instruction text
- # For now, assume the length is in byte 166
- return inst.xedd[166], result
-
def TryOpen(file_name):
try:
return open(file_name, "rb")
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index a73847c8f548..1d3a189a9a54 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -1,5 +1,6 @@
-# intel-pt-events.py: Print Intel PT Power Events and PTWRITE
-# Copyright (c) 2017, Intel Corporation.
+# SPDX-License-Identifier: GPL-2.0
+# intel-pt-events.py: Print Intel PT Events including Power Events and PTWRITE
+# Copyright (c) 2017-2021, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -15,16 +16,82 @@ from __future__ import print_function
import os
import sys
import struct
+import argparse
+
+from libxed import LibXED
+from ctypes import create_string_buffer, addressof
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
-# These perf imports are not used at present
-#from perf_trace_context import *
-#from Core import *
+from perf_trace_context import perf_set_itrace_options, \
+ perf_sample_insn, perf_sample_srccode
+
+try:
+ broken_pipe_exception = BrokenPipeError
+except:
+ broken_pipe_exception = IOError
+
+glb_switch_str = None
+glb_switch_printed = True
+glb_insn = False
+glb_disassembler = None
+glb_src = False
+glb_source_file_name = None
+glb_line_number = None
+glb_dso = None
+
+def get_optional_null(perf_dict, field):
+ if field in perf_dict:
+ return perf_dict[field]
+ return ""
+
+def get_optional_zero(perf_dict, field):
+ if field in perf_dict:
+ return perf_dict[field]
+ return 0
+
+def get_optional_bytes(perf_dict, field):
+ if field in perf_dict:
+ return perf_dict[field]
+ return bytes()
+
+def get_optional(perf_dict, field):
+ if field in perf_dict:
+ return perf_dict[field]
+ return "[unknown]"
+
+def get_offset(perf_dict, field):
+ if field in perf_dict:
+ return "+%#x" % perf_dict[field]
+ return ""
def trace_begin():
- print("Intel PT Power Events and PTWRITE")
+ ap = argparse.ArgumentParser(usage = "", add_help = False)
+ ap.add_argument("--insn-trace", action='store_true')
+ ap.add_argument("--src-trace", action='store_true')
+ global glb_args
+ global glb_insn
+ global glb_src
+ glb_args = ap.parse_args()
+ if glb_args.insn_trace:
+ print("Intel PT Instruction Trace")
+ itrace = "i0nsepwx"
+ glb_insn = True
+ elif glb_args.src_trace:
+ print("Intel PT Source Trace")
+ itrace = "i0nsepwx"
+ glb_insn = True
+ glb_src = True
+ else:
+ print("Intel PT Branch Trace, Power Events and PTWRITE")
+ itrace = "bepwx"
+ global glb_disassembler
+ try:
+ glb_disassembler = LibXED()
+ except:
+ glb_disassembler = None
+ perf_set_itrace_options(perf_script_context, itrace)
def trace_end():
print("End")
@@ -77,58 +144,212 @@ def print_pwrx(raw_buf):
print("deepest cstate: %u last cstate: %u wake reason: %#x" %
(deepest_cstate, last_cstate, wake_reason), end=' ')
-def print_common_start(comm, sample, name):
+def print_psb(raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ offset = data[1]
+ print("offset: %#x" % (offset), end=' ')
+
+def common_start_str(comm, sample):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
- print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
- (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
- end=' ')
+ return "%16s %5u/%-5u [%03u] %9u.%09u " % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000)
+
+def print_common_start(comm, sample, name):
+ flags_disp = get_optional_null(sample, "flags_disp")
+ # Unused fields:
+ # period = sample["period"]
+ # phys_addr = sample["phys_addr"]
+ # weight = sample["weight"]
+ # transaction = sample["transaction"]
+ # cpumode = get_optional_zero(sample, "cpumode")
+ print(common_start_str(comm, sample) + "%7s %19s" % (name, flags_disp), end=' ')
+
+def print_instructions_start(comm, sample):
+ if "x" in get_optional_null(sample, "flags"):
+ print(common_start_str(comm, sample) + "x", end=' ')
+ else:
+ print(common_start_str(comm, sample), end=' ')
+
+def disassem(insn, ip):
+ inst = glb_disassembler.Instruction()
+ glb_disassembler.SetMode(inst, 0) # Assume 64-bit
+ buf = create_string_buffer(64)
+ buf.value = insn
+ return glb_disassembler.DisassembleOne(inst, addressof(buf), len(insn), ip)
+
+def print_common_ip(param_dict, sample, symbol, dso):
+ ip = sample["ip"]
+ offs = get_offset(param_dict, "symoff")
+ if "cyc_cnt" in sample:
+ cyc_cnt = sample["cyc_cnt"]
+ insn_cnt = get_optional_zero(sample, "insn_cnt")
+ ipc_str = " IPC: %#.2f (%u/%u)" % (insn_cnt / cyc_cnt, insn_cnt, cyc_cnt)
+ else:
+ ipc_str = ""
+ if glb_insn and glb_disassembler is not None:
+ insn = perf_sample_insn(perf_script_context)
+ if insn and len(insn):
+ cnt, text = disassem(insn, ip)
+ byte_str = ("%x" % ip).rjust(16)
+ if sys.version_info.major >= 3:
+ for k in range(cnt):
+ byte_str += " %02x" % insn[k]
+ else:
+ for k in xrange(cnt):
+ byte_str += " %02x" % ord(insn[k])
+ print("%-40s %-30s" % (byte_str, text), end=' ')
+ print("%s%s (%s)" % (symbol, offs, dso), end=' ')
+ else:
+ print("%16x %s%s (%s)" % (ip, symbol, offs, dso), end=' ')
+ if "addr_correlates_sym" in sample:
+ addr = sample["addr"]
+ dso = get_optional(sample, "addr_dso")
+ symbol = get_optional(sample, "addr_symbol")
+ offs = get_offset(sample, "addr_symoff")
+ print("=> %x %s%s (%s)%s" % (addr, symbol, offs, dso, ipc_str))
+ else:
+ print(ipc_str)
-def print_common_ip(sample, symbol, dso):
+def print_srccode(comm, param_dict, sample, symbol, dso, with_insn):
ip = sample["ip"]
- print("%16x %s (%s)" % (ip, symbol, dso))
+ if symbol == "[unknown]":
+ start_str = common_start_str(comm, sample) + ("%x" % ip).rjust(16).ljust(40)
+ else:
+ offs = get_offset(param_dict, "symoff")
+ start_str = common_start_str(comm, sample) + (symbol + offs).ljust(40)
-def process_event(param_dict):
+ if with_insn and glb_insn and glb_disassembler is not None:
+ insn = perf_sample_insn(perf_script_context)
+ if insn and len(insn):
+ cnt, text = disassem(insn, ip)
+ start_str += text.ljust(30)
+
+ global glb_source_file_name
+ global glb_line_number
+ global glb_dso
+
+ source_file_name, line_number, source_line = perf_sample_srccode(perf_script_context)
+ if source_file_name:
+ if glb_line_number == line_number and glb_source_file_name == source_file_name:
+ src_str = ""
+ else:
+ if len(source_file_name) > 40:
+ src_file = ("..." + source_file_name[-37:]) + " "
+ else:
+ src_file = source_file_name.ljust(41)
+ if source_line is None:
+ src_str = src_file + str(line_number).rjust(4) + " <source not found>"
+ else:
+ src_str = src_file + str(line_number).rjust(4) + " " + source_line
+ glb_dso = None
+ elif dso == glb_dso:
+ src_str = ""
+ else:
+ src_str = dso
+ glb_dso = dso
+
+ glb_line_number = line_number
+ glb_source_file_name = source_file_name
+
+ print(start_str, src_str)
+
+def do_process_event(param_dict):
+ global glb_switch_printed
+ if not glb_switch_printed:
+ print(glb_switch_str)
+ glb_switch_printed = True
event_attr = param_dict["attr"]
- sample = param_dict["sample"]
- raw_buf = param_dict["raw_buf"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
+ # Unused fields:
+ # callchain = param_dict["callchain"]
+ # brstack = param_dict["brstack"]
+ # brstacksym = param_dict["brstacksym"]
# Symbol and dso info are not always resolved
- if "dso" in param_dict:
- dso = param_dict["dso"]
- else:
- dso = "[unknown]"
-
- if "symbol" in param_dict:
- symbol = param_dict["symbol"]
- else:
- symbol = "[unknown]"
+ dso = get_optional(param_dict, "dso")
+ symbol = get_optional(param_dict, "symbol")
- if name == "ptwrite":
+ if name[0:12] == "instructions":
+ if glb_src:
+ print_srccode(comm, param_dict, sample, symbol, dso, True)
+ else:
+ print_instructions_start(comm, sample)
+ print_common_ip(param_dict, sample, symbol, dso)
+ elif name[0:8] == "branches":
+ if glb_src:
+ print_srccode(comm, param_dict, sample, symbol, dso, False)
+ else:
+ print_common_start(comm, sample, name)
+ print_common_ip(param_dict, sample, symbol, dso)
+ elif name == "ptwrite":
print_common_start(comm, sample, name)
print_ptwrite(raw_buf)
- print_common_ip(sample, symbol, dso)
+ print_common_ip(param_dict, sample, symbol, dso)
elif name == "cbr":
print_common_start(comm, sample, name)
print_cbr(raw_buf)
- print_common_ip(sample, symbol, dso)
+ print_common_ip(param_dict, sample, symbol, dso)
elif name == "mwait":
print_common_start(comm, sample, name)
print_mwait(raw_buf)
- print_common_ip(sample, symbol, dso)
+ print_common_ip(param_dict, sample, symbol, dso)
elif name == "pwre":
print_common_start(comm, sample, name)
print_pwre(raw_buf)
- print_common_ip(sample, symbol, dso)
+ print_common_ip(param_dict, sample, symbol, dso)
elif name == "exstop":
print_common_start(comm, sample, name)
print_exstop(raw_buf)
- print_common_ip(sample, symbol, dso)
+ print_common_ip(param_dict, sample, symbol, dso)
elif name == "pwrx":
print_common_start(comm, sample, name)
print_pwrx(raw_buf)
- print_common_ip(sample, symbol, dso)
+ print_common_ip(param_dict, sample, symbol, dso)
+ elif name == "psb":
+ print_common_start(comm, sample, name)
+ print_psb(raw_buf)
+ print_common_ip(param_dict, sample, symbol, dso)
+ else:
+ print_common_start(comm, sample, name)
+ print_common_ip(param_dict, sample, symbol, dso)
+
+def process_event(param_dict):
+ try:
+ do_process_event(param_dict)
+ except broken_pipe_exception:
+ # Stop python printing broken pipe errors and traceback
+ sys.stdout = open(os.devnull, 'w')
+ sys.exit(1)
+
+def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
+ try:
+ print("%16s %5u/%-5u [%03u] %9u.%09u error type %u code %u: %s ip 0x%16x" %
+ ("Trace error", pid, tid, cpu, ts / 1000000000, ts %1000000000, typ, code, msg, ip))
+ except broken_pipe_exception:
+ # Stop python printing broken pipe errors and traceback
+ sys.stdout = open(os.devnull, 'w')
+ sys.exit(1)
+
+def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x):
+ global glb_switch_printed
+ global glb_switch_str
+ if out:
+ out_str = "Switch out "
+ else:
+ out_str = "Switch In "
+ if out_preempt:
+ preempt_str = "preempt"
+ else:
+ preempt_str = ""
+ if machine_pid == -1:
+ machine_str = ""
+ else:
+ machine_str = "machine PID %d" % machine_pid
+ glb_switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
+ (out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str)
+ glb_switch_printed = False
diff --git a/tools/perf/scripts/python/libxed.py b/tools/perf/scripts/python/libxed.py
new file mode 100644
index 000000000000..2c70a5a7eb9c
--- /dev/null
+++ b/tools/perf/scripts/python/libxed.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0
+# libxed.py: Python wrapper for libxed.so
+# Copyright (c) 2014-2021, Intel Corporation.
+
+# To use Intel XED, libxed.so must be present. To build and install
+# libxed.so:
+# git clone https://github.com/intelxed/mbuild.git mbuild
+# git clone https://github.com/intelxed/xed
+# cd xed
+# ./mfile.py --share
+# sudo ./mfile.py --prefix=/usr/local install
+# sudo ldconfig
+#
+
+import sys
+
+from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \
+ c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong
+
+# XED Disassembler
+
+class xed_state_t(Structure):
+
+ _fields_ = [
+ ("mode", c_int),
+ ("width", c_int)
+ ]
+
+class XEDInstruction():
+
+ def __init__(self, libxed):
+ # Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
+ xedd_t = c_byte * 512
+ self.xedd = xedd_t()
+ self.xedp = addressof(self.xedd)
+ libxed.xed_decoded_inst_zero(self.xedp)
+ self.state = xed_state_t()
+ self.statep = addressof(self.state)
+ # Buffer for disassembled instruction text
+ self.buffer = create_string_buffer(256)
+ self.bufferp = addressof(self.buffer)
+
+class LibXED():
+
+ def __init__(self):
+ try:
+ self.libxed = CDLL("libxed.so")
+ except:
+ self.libxed = None
+ if not self.libxed:
+ self.libxed = CDLL("/usr/local/lib/libxed.so")
+
+ self.xed_tables_init = self.libxed.xed_tables_init
+ self.xed_tables_init.restype = None
+ self.xed_tables_init.argtypes = []
+
+ self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
+ self.xed_decoded_inst_zero.restype = None
+ self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
+
+ self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
+ self.xed_operand_values_set_mode.restype = None
+ self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
+
+ self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
+ self.xed_decoded_inst_zero_keep_mode.restype = None
+ self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
+
+ self.xed_decode = self.libxed.xed_decode
+ self.xed_decode.restype = c_int
+ self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
+
+ self.xed_format_context = self.libxed.xed_format_context
+ self.xed_format_context.restype = c_uint
+ self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
+
+ self.xed_tables_init()
+
+ def Instruction(self):
+ return XEDInstruction(self)
+
+ def SetMode(self, inst, mode):
+ if mode:
+ inst.state.mode = 4 # 32-bit
+ inst.state.width = 4 # 4 bytes
+ else:
+ inst.state.mode = 1 # 64-bit
+ inst.state.width = 8 # 8 bytes
+ self.xed_operand_values_set_mode(inst.xedp, inst.statep)
+
+ def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
+ self.xed_decoded_inst_zero_keep_mode(inst.xedp)
+ err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
+ if err:
+ return 0, ""
+ # Use AT&T mode (2), alternative is Intel (3)
+ ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
+ if not ok:
+ return 0, ""
+ if sys.version_info[0] == 2:
+ result = inst.buffer.value
+ else:
+ result = inst.buffer.value.decode()
+ # Return instruction length and the disassembled instruction text
+ # For now, assume the length is in byte 166
+ return inst.xedd[166], result
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index c72adbd67386..dbf5f5215abe 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <stdio.h>
+#include <stdlib.h>
#include <sys/epoll.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -151,7 +152,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
}
evlist__splice_list_tail(evlist, &parse_state.list);
- evlist->nr_groups = parse_state.nr_groups;
+ evlist->core.nr_groups = parse_state.nr_groups;
evlist__config(evlist, &opts, NULL);
@@ -276,6 +277,7 @@ static int __test__bpf(int idx)
}
out:
+ free(obj_buf);
bpf__clear();
return ret;
}
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index c4b888f18e9c..5e6242576236 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <subcmd/exec-cmd.h>
+#include <linux/zalloc.h>
static bool dont_fork;
@@ -510,8 +511,8 @@ static const char *shell_test__description(char *description, size_t size,
return description ? strim(description + 1) : NULL;
}
-#define for_each_shell_test(dir, base, ent) \
- while ((ent = readdir(dir)) != NULL) \
+#define for_each_shell_test(entlist, nr, base, ent) \
+ for (int __i = 0; __i < nr && (ent = entlist[__i]); __i++) \
if (!is_directory(base, ent) && ent->d_name[0] != '.')
static const char *shell_tests__dir(char *path, size_t size)
@@ -538,8 +539,9 @@ static const char *shell_tests__dir(char *path, size_t size)
static int shell_tests__max_desc_width(void)
{
- DIR *dir;
+ struct dirent **entlist;
struct dirent *ent;
+ int n_dirs, e;
char path_dir[PATH_MAX];
const char *path = shell_tests__dir(path_dir, sizeof(path_dir));
int width = 0;
@@ -547,11 +549,11 @@ static int shell_tests__max_desc_width(void)
if (path == NULL)
return -1;
- dir = opendir(path);
- if (!dir)
+ n_dirs = scandir(path, &entlist, NULL, alphasort);
+ if (n_dirs == -1)
return -1;
- for_each_shell_test(dir, path, ent) {
+ for_each_shell_test(entlist, n_dirs, path, ent) {
char bf[256];
const char *desc = shell_test__description(bf, sizeof(bf), path, ent->d_name);
@@ -563,7 +565,9 @@ static int shell_tests__max_desc_width(void)
}
}
- closedir(dir);
+ for (e = 0; e < n_dirs; e++)
+ zfree(&entlist[e]);
+ free(entlist);
return width;
}
@@ -578,7 +582,10 @@ static int shell_test__run(struct test *test, int subdir __maybe_unused)
char script[PATH_MAX];
struct shell_test *st = test->priv;
- path__join(script, sizeof(script), st->dir, st->file);
+ path__join(script, sizeof(script) - 3, st->dir, st->file);
+
+ if (verbose)
+ strncat(script, " -v", sizeof(script) - strlen(script) - 1);
err = system(script);
if (!err)
@@ -589,8 +596,9 @@ static int shell_test__run(struct test *test, int subdir __maybe_unused)
static int run_shell_tests(int argc, const char *argv[], int i, int width)
{
- DIR *dir;
+ struct dirent **entlist;
struct dirent *ent;
+ int n_dirs, e;
char path_dir[PATH_MAX];
struct shell_test st = {
.dir = shell_tests__dir(path_dir, sizeof(path_dir)),
@@ -599,14 +607,14 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width)
if (st.dir == NULL)
return -1;
- dir = opendir(st.dir);
- if (!dir) {
+ n_dirs = scandir(st.dir, &entlist, NULL, alphasort);
+ if (n_dirs == -1) {
pr_err("failed to open shell test directory: %s\n",
st.dir);
return -1;
}
- for_each_shell_test(dir, st.dir, ent) {
+ for_each_shell_test(entlist, n_dirs, st.dir, ent) {
int curr = i++;
char desc[256];
struct test test = {
@@ -623,7 +631,9 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width)
test_and_print(&test, false, -1);
}
- closedir(dir);
+ for (e = 0; e < n_dirs; e++)
+ zfree(&entlist[e]);
+ free(entlist);
return 0;
}
@@ -722,19 +732,20 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
static int perf_test__list_shell(int argc, const char **argv, int i)
{
- DIR *dir;
+ struct dirent **entlist;
struct dirent *ent;
+ int n_dirs, e;
char path_dir[PATH_MAX];
const char *path = shell_tests__dir(path_dir, sizeof(path_dir));
if (path == NULL)
return -1;
- dir = opendir(path);
- if (!dir)
+ n_dirs = scandir(path, &entlist, NULL, alphasort);
+ if (n_dirs == -1)
return -1;
- for_each_shell_test(dir, path, ent) {
+ for_each_shell_test(entlist, n_dirs, path, ent) {
int curr = i++;
char bf[256];
struct test t = {
@@ -745,9 +756,12 @@ static int perf_test__list_shell(int argc, const char **argv, int i)
continue;
pr_info("%2d: %s\n", i, t.desc);
+
}
- closedir(dir);
+ for (e = 0; e < n_dirs; e++)
+ zfree(&entlist[e]);
+ free(entlist);
return 0;
}
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 83638097c3bc..a288035eb362 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -17,10 +17,6 @@
#include "callchain.h"
#include "util/synthetic-events.h"
-#if defined (__x86_64__) || defined (__i386__) || defined (__powerpc__)
-#include "arch-tests.h"
-#endif
-
/* For bsearch. We try to unwind functions in shared object. */
#include <stdlib.h>
diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c
index 656218179222..44a50527f9d9 100644
--- a/tools/perf/tests/event_update.c
+++ b/tools/perf/tests/event_update.c
@@ -88,6 +88,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
struct evsel *evsel;
struct event_name tmp;
struct evlist *evlist = evlist__new_default();
+ char *unit = strdup("KRAVA");
TEST_ASSERT_VAL("failed to get evlist", evlist);
@@ -98,7 +99,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123);
- evsel->unit = strdup("KRAVA");
+ evsel->unit = unit;
TEST_ASSERT_VAL("failed to synthesize attr update unit",
!perf_event__synthesize_event_update_unit(NULL, evsel, process_event_unit));
@@ -118,6 +119,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
TEST_ASSERT_VAL("failed to synthesize attr update cpus",
!perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
- perf_cpu_map__put(evsel->core.own_cpus);
+ free(unit);
+ evlist__delete(evlist);
return 0;
}
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index b74cf80d1f10..4e09f0a312af 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -5,6 +5,7 @@
#include "tests.h"
#include "debug.h"
#include "pmu.h"
+#include "pmu-hybrid.h"
#include <errno.h>
#include <linux/kernel.h>
@@ -44,7 +45,7 @@ static int perf_evsel__roundtrip_cache_name_test(void)
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
__evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name));
- if (evsel->idx != idx)
+ if (evsel->core.idx != idx)
continue;
++idx;
@@ -84,9 +85,9 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names,
err = 0;
evlist__for_each_entry(evlist, evsel) {
- if (strcmp(evsel__name(evsel), names[evsel->idx / distance])) {
+ if (strcmp(evsel__name(evsel), names[evsel->core.idx / distance])) {
--err;
- pr_debug("%s != %s\n", evsel__name(evsel), names[evsel->idx / distance]);
+ pr_debug("%s != %s\n", evsel__name(evsel), names[evsel->core.idx / distance]);
}
}
@@ -102,7 +103,7 @@ int test__perf_evsel__roundtrip_name_test(struct test *test __maybe_unused, int
{
int err = 0, ret = 0;
- if (perf_pmu__has_hybrid())
+ if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom"))
return perf_evsel__name_array_test(evsel__hw_names, 2);
err = perf_evsel__name_array_test(evsel__hw_names, 1);
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 94bd5d215d94..da013e90a945 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -84,9 +84,11 @@ make_no_libaudit := NO_LIBAUDIT=1
make_no_libbionic := NO_LIBBIONIC=1
make_no_auxtrace := NO_AUXTRACE=1
make_no_libbpf := NO_LIBBPF=1
+make_libbpf_dynamic := LIBBPF_DYNAMIC=1
make_no_libbpf_DEBUG := NO_LIBBPF=1 DEBUG=1
make_no_libcrypto := NO_LIBCRYPTO=1
make_with_babeltrace:= LIBBABELTRACE=1
+make_with_coresight := CORESIGHT=1
make_no_sdt := NO_SDT=1
make_no_syscall_tbl := NO_SYSCALL_TABLE=1
make_with_clangllvm := LIBCLANGLLVM=1
@@ -148,11 +150,13 @@ run += make_no_libaudit
run += make_no_libbionic
run += make_no_auxtrace
run += make_no_libbpf
+run += make_libbpf_dynamic
run += make_no_libbpf_DEBUG
run += make_no_libcrypto
run += make_no_sdt
run += make_no_syscall_tbl
run += make_with_babeltrace
+run += make_with_coresight
run += make_with_clangllvm
run += make_with_libpfm4
run += make_help
@@ -266,6 +270,9 @@ test_make_install_info_O := $(test_ok)
test_make_install_pdf := $(test_ok)
test_make_install_pdf_O := $(test_ok)
+test_make_libbpf_dynamic := ldd $(PERF_O)/perf | grep -q libbpf
+test_make_libbpf_dynamic_O := ldd $$TMP_O/perf | grep -q libbpf
+
test_make_python_perf_so_O := test -f $$TMP_O/python/perf.so
test_make_perf_o_O := test -f $$TMP_O/perf.o
test_make_util_map_o_O := test -f $$TMP_O/util/map.o
diff --git a/tools/perf/tests/maps.c b/tools/perf/tests/maps.c
index edcbc70ff9d6..1ac72919fa35 100644
--- a/tools/perf/tests/maps.c
+++ b/tools/perf/tests/maps.c
@@ -116,5 +116,7 @@ int test__maps__merge_in(struct test *t __maybe_unused, int subtest __maybe_unus
ret = check_maps(merged3, ARRAY_SIZE(merged3), &maps);
TEST_ASSERT_VAL("merge check failed", !ret);
+
+ maps__exit(&maps);
return TEST_OK;
}
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 73ae8f7aa066..d38757db2dc2 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -139,7 +139,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
" doesn't map to an evsel\n", sample.id);
goto out_delete_evlist;
}
- nr_events[evsel->idx]++;
+ nr_events[evsel->core.idx]++;
perf_mmap__consume(&md->core);
}
perf_mmap__read_done(&md->core);
@@ -147,10 +147,10 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
out_init:
err = 0;
evlist__for_each_entry(evlist, evsel) {
- if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
+ if (nr_events[evsel->core.idx] != expected_nr_events[evsel->core.idx]) {
pr_debug("expected %d %s events, got %d\n",
- expected_nr_events[evsel->idx],
- evsel__name(evsel), nr_events[evsel->idx]);
+ expected_nr_events[evsel->core.idx],
+ evsel__name(evsel), nr_events[evsel->core.idx]);
err = -1;
goto out_delete_evlist;
}
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 0f113b2b36a3..8d4866739255 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -6,6 +6,7 @@
#include "tests.h"
#include "debug.h"
#include "pmu.h"
+#include "pmu-hybrid.h"
#include <dirent.h>
#include <errno.h>
#include <sys/types.h>
@@ -49,7 +50,7 @@ static int test__checkevent_tracepoint(struct evlist *evlist)
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups);
+ TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->core.nr_groups);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong sample_type",
PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
@@ -62,7 +63,7 @@ static int test__checkevent_tracepoint_multi(struct evlist *evlist)
struct evsel *evsel;
TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries > 1);
- TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups);
+ TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->core.nr_groups);
evlist__for_each_entry(evlist, evsel) {
TEST_ASSERT_VAL("wrong type",
@@ -668,7 +669,7 @@ static int test__group1(struct evlist *evlist)
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
/* instructions:k */
evsel = leader = evlist__first(evlist);
@@ -698,7 +699,7 @@ static int test__group1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
@@ -710,7 +711,7 @@ static int test__group2(struct evlist *evlist)
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
/* faults + :ku modifier */
evsel = leader = evlist__first(evlist);
@@ -739,7 +740,7 @@ static int test__group2(struct evlist *evlist)
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
@@ -765,7 +766,7 @@ static int test__group3(struct evlist *evlist __maybe_unused)
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
+ TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->core.nr_groups);
/* group1 syscalls:sys_enter_openat:H */
evsel = leader = evlist__first(evlist);
@@ -798,7 +799,7 @@ static int test__group3(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 3);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
@@ -831,7 +832,7 @@ static int test__group3(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
@@ -857,7 +858,7 @@ static int test__group4(struct evlist *evlist __maybe_unused)
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
/* cycles:u + p */
evsel = leader = evlist__first(evlist);
@@ -889,7 +890,7 @@ static int test__group4(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
@@ -901,7 +902,7 @@ static int test__group5(struct evlist *evlist __maybe_unused)
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
+ TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->core.nr_groups);
/* cycles + G */
evsel = leader = evlist__first(evlist);
@@ -931,7 +932,7 @@ static int test__group5(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
@@ -963,7 +964,7 @@ static int test__group5(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
/* cycles */
@@ -987,7 +988,7 @@ static int test__group_gh1(struct evlist *evlist)
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
/* cycles + :H group modifier */
evsel = leader = evlist__first(evlist);
@@ -1016,7 +1017,7 @@ static int test__group_gh1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
return 0;
@@ -1027,7 +1028,7 @@ static int test__group_gh2(struct evlist *evlist)
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
/* cycles + :G group modifier */
evsel = leader = evlist__first(evlist);
@@ -1056,7 +1057,7 @@ static int test__group_gh2(struct evlist *evlist)
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
return 0;
@@ -1067,7 +1068,7 @@ static int test__group_gh3(struct evlist *evlist)
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
/* cycles:G + :u group modifier */
evsel = leader = evlist__first(evlist);
@@ -1096,7 +1097,7 @@ static int test__group_gh3(struct evlist *evlist)
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
return 0;
@@ -1107,7 +1108,7 @@ static int test__group_gh4(struct evlist *evlist)
struct evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
/* cycles:G + :uG group modifier */
evsel = leader = evlist__first(evlist);
@@ -1136,7 +1137,7 @@ static int test__group_gh4(struct evlist *evlist)
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
return 0;
@@ -1160,7 +1161,7 @@ static int test__leader_sample1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
/* cache-misses - not sampling */
@@ -1174,7 +1175,7 @@ static int test__leader_sample1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
/* branch-misses - not sampling */
@@ -1189,7 +1190,7 @@ static int test__leader_sample1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
return 0;
@@ -1213,7 +1214,7 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
/* branch-misses - not sampling */
@@ -1228,7 +1229,7 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
return 0;
@@ -1259,7 +1260,7 @@ static int test__pinned_group(struct evlist *evlist)
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong pinned", evsel->core.attr.pinned);
/* cache-misses - can not be pinned, but will go on with the leader */
@@ -1303,7 +1304,7 @@ static int test__exclusive_group(struct evlist *evlist)
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong exclusive", evsel->core.attr.exclusive);
/* cache-misses - can not be pinned, but will go on with the leader */
@@ -1530,12 +1531,12 @@ static int test__hybrid_hw_group_event(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0xc0 == evsel->core.attr.config);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
return 0;
}
@@ -1546,12 +1547,12 @@ static int test__hybrid_sw_hw_group_event(struct evlist *evlist)
evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
return 0;
}
@@ -1563,11 +1564,11 @@ static int test__hybrid_hw_sw_group_event(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
return 0;
}
@@ -1579,14 +1580,14 @@ static int test__hybrid_group_modifier1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0xc0 == evsel->core.attr.config);
- TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
return 0;
@@ -1596,6 +1597,13 @@ static int test__hybrid_raw1(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
+ if (!perf_pmu__hybrid_mounted("cpu_atom")) {
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
+ return 0;
+ }
+
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
@@ -1620,13 +1628,9 @@ static int test__hybrid_cache_event(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x2 == (evsel->core.attr.config & 0xffffffff));
-
- evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", 0x10002 == (evsel->core.attr.config & 0xffffffff));
return 0;
}
@@ -2028,7 +2032,7 @@ static struct evlist_test test__hybrid_events[] = {
.id = 7,
},
{
- .name = "cpu_core/LLC-loads/,cpu_atom/LLC-load-misses/",
+ .name = "cpu_core/LLC-loads/",
.check = test__hybrid_cache_event,
.id = 8,
},
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index 85d75b9b25a1..7c56bc1f4cff 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -21,6 +21,7 @@
#include "mmap.h"
#include "tests.h"
#include "pmu.h"
+#include "pmu-hybrid.h"
#define CHECK__(x) { \
while ((x) < 0) { \
@@ -93,7 +94,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
* For hybrid "cycles:u", it creates two events.
* Init the second evsel here.
*/
- if (perf_pmu__has_hybrid()) {
+ if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) {
evsel = evsel__next(evsel);
evsel->core.attr.comm = 1;
evsel->core.attr.disabled = 1;
diff --git a/tools/perf/tests/pfm.c b/tools/perf/tests/pfm.c
index d4b0ef74defc..e8fd0da0762b 100644
--- a/tools/perf/tests/pfm.c
+++ b/tools/perf/tests/pfm.c
@@ -96,7 +96,7 @@ static int test__pfm_events(void)
count_pfm_events(&evlist->core),
table[i].nr_events);
TEST_ASSERT_EQUAL(table[i].events,
- evlist->nr_groups,
+ evlist->core.nr_groups,
0);
evlist__delete(evlist);
@@ -155,6 +155,16 @@ static int test__pfm_group(void)
.nr_events = 3,
.nr_groups = 1,
},
+ {
+ .events = "instructions}",
+ .nr_events = 1,
+ .nr_groups = 0,
+ },
+ {
+ .events = "{{instructions}}",
+ .nr_events = 0,
+ .nr_groups = 0,
+ },
};
for (i = 0; i < ARRAY_SIZE(table); i++) {
@@ -170,7 +180,7 @@ static int test__pfm_group(void)
count_pfm_events(&evlist->core),
table[i].nr_events);
TEST_ASSERT_EQUAL(table[i].events,
- evlist->nr_groups,
+ evlist->core.nr_groups,
table[i].nr_groups);
evlist__delete(evlist);
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index 2f9948b3d943..2aed20dc2262 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -22,10 +22,24 @@ compare_number()
}
# skip if --bpf-counters is not supported
-perf stat --bpf-counters true > /dev/null 2>&1 || exit 2
+if ! perf stat --bpf-counters true > /dev/null 2>&1; then
+ if [ "$1" == "-v" ]; then
+ echo "Skipping: --bpf-counters not supported"
+ perf --no-pager stat --bpf-counters true || true
+ fi
+ exit 2
+fi
base_cycles=$(perf stat --no-big-num -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
+if [ "$base_cycles" == "<not" ]; then
+ echo "Skipping: cycles event not counted"
+ exit 2
+fi
bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
+if [ "$bpf_cycles" == "<not" ]; then
+ echo "Failed: cycles not counted with --bpf-counters"
+ exit 1
+fi
compare_number $base_cycles $bpf_cycles
exit 0
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index b85f005308a3..1100dd55b657 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -133,14 +133,12 @@ bool test__bp_account_is_supported(void);
bool test__wp_is_supported(void);
bool test__tsc_is_supported(void);
-#if defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
struct thread;
struct perf_sample;
int test__arch_unwind_sample(struct perf_sample *sample,
struct thread *thread);
#endif
-#endif
#if defined(__arm__)
int test__vectors_page(struct test *test, int subtest);
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index ec4e3b21b831..b5efe675b321 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -61,6 +61,7 @@ static int session_write_header(char *path)
TEST_ASSERT_VAL("failed to write header",
!perf_session__write_header(session, session->evlist, data.file.fd, true));
+ evlist__delete(session->evlist);
perf_session__delete(session);
return 0;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index ad0a70f0edaf..701130ad43a2 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -343,6 +343,29 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
browser->curr_hot = rb_last(&browser->entries);
}
+static struct annotation_line *annotate_browser__find_next_asm_line(
+ struct annotate_browser *browser,
+ struct annotation_line *al)
+{
+ struct annotation_line *it = al;
+
+ /* find next asm line */
+ list_for_each_entry_continue(it, browser->b.entries, node) {
+ if (it->idx_asm >= 0)
+ return it;
+ }
+
+ /* no asm line found forwards, try backwards */
+ it = al;
+ list_for_each_entry_continue_reverse(it, browser->b.entries, node) {
+ if (it->idx_asm >= 0)
+ return it;
+ }
+
+ /* There are no asm lines */
+ return NULL;
+}
+
static bool annotate_browser__toggle_source(struct annotate_browser *browser)
{
struct annotation *notes = browser__annotation(&browser->b);
@@ -363,9 +386,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
browser->b.index = al->idx;
} else {
if (al->idx_asm < 0) {
- ui_helpline__puts("Only available for assembly lines.");
- browser->b.seek(&browser->b, -offset, SEEK_CUR);
- return false;
+ /* move cursor to next asm line */
+ al = annotate_browser__find_next_asm_line(browser, al);
+ if (!al) {
+ browser->b.seek(&browser->b, -offset, SEEK_CUR);
+ return false;
+ }
}
if (al->idx_asm < offset)
@@ -723,7 +749,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
hbt->timer(hbt->arg);
if (delay_secs != 0) {
- symbol__annotate_decay_histogram(sym, evsel->idx);
+ symbol__annotate_decay_histogram(sym, evsel->core.idx);
hists__scnprintf_title(hists, title, sizeof(title));
annotate_browser__show(&browser->b, title, help);
}
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index a7dff77f2018..94167bfed722 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -135,12 +135,12 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct map_symbol *ms,
ret += perf_gtk__get_percent(s + ret,
sizeof(s) - ret,
sym, pos,
- evsel->idx + i);
+ evsel->core.idx + i);
ret += scnprintf(s + ret, sizeof(s) - ret, " ");
}
} else {
ret = perf_gtk__get_percent(s, sizeof(s), sym, pos,
- evsel->idx);
+ evsel->core.idx);
}
if (ret)
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index b64bdc1a7026..2d4fa1304178 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -126,6 +126,7 @@ perf-y += parse-regs-options.o
perf-y += parse-sublevel-options.o
perf-y += term.o
perf-y += help-unknown-cmd.o
+perf-y += dlfilter.o
perf-y += mem-events.o
perf-y += vsprintf.o
perf-y += units.o
@@ -140,6 +141,7 @@ perf-y += clockid.o
perf-$(CONFIG_LIBBPF) += bpf-loader.o
perf-$(CONFIG_LIBBPF) += bpf_map.o
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
+perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter_cgroup.o
perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
perf-$(CONFIG_LIBELF) += symbol-elf.o
perf-$(CONFIG_LIBELF) += probe-file.o
@@ -216,7 +218,7 @@ $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-flex.h: util/parse-
$(OUTPUT)util/parse-events-bison.c $(OUTPUT)util/parse-events-bison.h: util/parse-events.y
$(call rule_mkdir)
- $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) \
+ $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) $(BISON_FILE_PREFIX_MAP) \
-o $(OUTPUT)util/parse-events-bison.c -p parse_events_
$(OUTPUT)util/expr-flex.c $(OUTPUT)util/expr-flex.h: util/expr.l $(OUTPUT)util/expr-bison.c
@@ -226,7 +228,7 @@ $(OUTPUT)util/expr-flex.c $(OUTPUT)util/expr-flex.h: util/expr.l $(OUTPUT)util/e
$(OUTPUT)util/expr-bison.c $(OUTPUT)util/expr-bison.h: util/expr.y
$(call rule_mkdir)
- $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) \
+ $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) $(BISON_FILE_PREFIX_MAP) \
-o $(OUTPUT)util/expr-bison.c -p expr_
$(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-flex.h: util/pmu.l $(OUTPUT)util/pmu-bison.c
@@ -236,7 +238,7 @@ $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-flex.h: util/pmu.l $(OUTPUT)util/pmu-
$(OUTPUT)util/pmu-bison.c $(OUTPUT)util/pmu-bison.h: util/pmu.y
$(call rule_mkdir)
- $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) \
+ $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) $(BISON_FILE_PREFIX_MAP) \
-o $(OUTPUT)util/pmu-bison.c -p perf_pmu_
FLEX_GE_26 := $(shell expr $(shell $(FLEX) --version | sed -e 's/flex \([0-9]\+\).\([0-9]\+\)/\1\2/g') \>\= 26)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index abe1499a9164..aa04a3655236 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -961,7 +961,7 @@ static int symbol__inc_addr_samples(struct map_symbol *ms,
if (sym == NULL)
return 0;
src = symbol__hists(sym, evsel->evlist->core.nr_entries);
- return src ? __symbol__inc_addr_samples(ms, src, evsel->idx, addr, sample) : 0;
+ return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0;
}
static int symbol__account_cycles(u64 addr, u64 start,
@@ -2159,7 +2159,7 @@ static void annotation__calc_percent(struct annotation *notes,
BUG_ON(i >= al->data_nr);
- sym_hist = annotation__histogram(notes, evsel->idx);
+ sym_hist = annotation__histogram(notes, evsel->core.idx);
data = &al->data[i++];
calc_percent(sym_hist, hists, data, al->offset, end);
@@ -2340,7 +2340,7 @@ static void print_summary(struct rb_root *root, const char *filename)
static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
{
struct annotation *notes = symbol__annotation(sym);
- struct sym_hist *h = annotation__histogram(notes, evsel->idx);
+ struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
u64 len = symbol__size(sym), offset;
for (offset = 0; offset < len; ++offset)
@@ -2373,7 +2373,7 @@ int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
const char *d_filename;
const char *evsel_name = evsel__name(evsel);
struct annotation *notes = symbol__annotation(sym);
- struct sym_hist *h = annotation__histogram(notes, evsel->idx);
+ struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
struct annotation_line *pos, *queue = NULL;
u64 start = map__rip_2objdump(map, sym->start);
int printed = 2, queue_len = 0, addr_fmt_width;
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 2539d4baec44..58b7069c5a5f 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -26,6 +26,7 @@
#include "symbol.h"
#include "thread.h"
#include "thread-stack.h"
+#include "tsc.h"
#include "tool.h"
#include "util/synthetic-events.h"
@@ -45,6 +46,8 @@ struct arm_spe {
struct machine *machine;
u32 pmu_type;
+ struct perf_tsc_conversion tc;
+
u8 timeless_decoding;
u8 data_queued;
@@ -231,7 +234,7 @@ static void arm_spe_prep_sample(struct arm_spe *spe,
struct arm_spe_record *record = &speq->decoder->record;
if (!spe->timeless_decoding)
- sample->time = speq->timestamp;
+ sample->time = tsc_to_perf_time(record->timestamp, &spe->tc);
sample->ip = record->from_ip;
sample->cpumode = arm_spe_cpumode(spe, sample->ip);
@@ -431,12 +434,36 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
{
struct arm_spe *spe = speq->spe;
+ struct arm_spe_record *record;
int ret;
if (!spe->kernel_start)
spe->kernel_start = machine__kernel_start(spe->machine);
while (1) {
+ /*
+ * The usual logic is firstly to decode the packets, and then
+ * based the record to synthesize sample; but here the flow is
+ * reversed: it calls arm_spe_sample() for synthesizing samples
+ * prior to arm_spe_decode().
+ *
+ * Two reasons for this code logic:
+ * 1. Firstly, when setup queue in arm_spe__setup_queue(), it
+ * has decoded trace data and generated a record, but the record
+ * is left to generate sample until run to here, so it's correct
+ * to synthesize sample for the left record.
+ * 2. After decoding trace data, it needs to compare the record
+ * timestamp with the coming perf event, if the record timestamp
+ * is later than the perf event, it needs bail out and pushs the
+ * record into auxtrace heap, thus the record can be deferred to
+ * synthesize sample until run to here at the next time; so this
+ * can correlate samples between Arm SPE trace data and other
+ * perf events with correct time ordering.
+ */
+ ret = arm_spe_sample(speq);
+ if (ret)
+ return ret;
+
ret = arm_spe_decode(speq->decoder);
if (!ret) {
pr_debug("No data or all data has been processed.\n");
@@ -450,10 +477,17 @@ static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
if (ret < 0)
continue;
- ret = arm_spe_sample(speq);
- if (ret)
- return ret;
+ record = &speq->decoder->record;
+ /* Update timestamp for the last record */
+ if (record->timestamp > speq->timestamp)
+ speq->timestamp = record->timestamp;
+
+ /*
+ * If the timestamp of the queue is later than timestamp of the
+ * coming perf event, bail out so can allow the perf event to
+ * be processed ahead.
+ */
if (!spe->timeless_decoding && speq->timestamp >= *timestamp) {
*timestamp = speq->timestamp;
return 0;
@@ -666,7 +700,7 @@ static int arm_spe_process_event(struct perf_session *session,
}
if (sample->time && (sample->time != (u64) -1))
- timestamp = sample->time;
+ timestamp = perf_time_to_tsc(sample->time, &spe->tc);
else
timestamp = 0;
@@ -683,11 +717,7 @@ static int arm_spe_process_event(struct perf_session *session,
sample->time);
}
} else if (timestamp) {
- if (event->header.type == PERF_RECORD_EXIT) {
- err = arm_spe_process_queues(spe, timestamp);
- if (err)
- return err;
- }
+ err = arm_spe_process_queues(spe, timestamp);
}
return err;
@@ -1006,6 +1036,7 @@ int arm_spe_process_auxtrace_info(union perf_event *event,
{
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
size_t min_sz = sizeof(u64) * ARM_SPE_AUXTRACE_PRIV_MAX;
+ struct perf_record_time_conv *tc = &session->time_conv;
struct arm_spe *spe;
int err;
@@ -1027,6 +1058,28 @@ int arm_spe_process_auxtrace_info(union perf_event *event,
spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
spe->timeless_decoding = arm_spe__is_timeless_decoding(spe);
+
+ /*
+ * The synthesized event PERF_RECORD_TIME_CONV has been handled ahead
+ * and the parameters for hardware clock are stored in the session
+ * context. Passes these parameters to the struct perf_tsc_conversion
+ * in "spe->tc", which is used for later conversion between clock
+ * counter and timestamp.
+ *
+ * For backward compatibility, copies the fields starting from
+ * "time_cycles" only if they are contained in the event.
+ */
+ spe->tc.time_shift = tc->time_shift;
+ spe->tc.time_mult = tc->time_mult;
+ spe->tc.time_zero = tc->time_zero;
+
+ if (event_contains(*tc, time_cycles)) {
+ spe->tc.time_cycles = tc->time_cycles;
+ spe->tc.time_mask = tc->time_mask;
+ spe->tc.cap_user_time_zero = tc->cap_user_time_zero;
+ spe->tc.cap_user_time_short = tc->cap_user_time_short;
+ }
+
spe->auxtrace.process_event = arm_spe_process_event;
spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
spe->auxtrace.flush_events = arm_spe_flush;
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 1b4091a3b508..cb19669d2a5b 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -73,8 +73,8 @@ static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct e
grp = false;
evlist__for_each_entry(evlist, evsel) {
if (grp) {
- if (!(evsel->leader == leader ||
- (evsel->leader == evsel &&
+ if (!(evsel__leader(evsel) == leader ||
+ (evsel__leader(evsel) == evsel &&
evsel->core.nr_members <= 1)))
return -EINVAL;
} else if (evsel == leader) {
@@ -87,8 +87,8 @@ static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct e
grp = false;
evlist__for_each_entry(evlist, evsel) {
if (grp) {
- if (evsel->leader != leader) {
- evsel->leader = leader;
+ if (!evsel__has_leader(evsel, leader)) {
+ evsel__set_leader(evsel, leader);
if (leader->core.nr_members < 1)
leader->core.nr_members = 1;
leader->core.nr_members += 1;
@@ -1120,8 +1120,9 @@ int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
auxtrace_queue_data_cb, &qd);
}
-void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
+void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw)
{
+ int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ;
size_t adj = buffer->data_offset & (page_size - 1);
size_t size = buffer->size + adj;
off_t file_offset = buffer->data_offset - adj;
@@ -1130,7 +1131,7 @@ void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
if (buffer->data)
return buffer->data;
- addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
+ addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset);
if (addr == MAP_FAILED)
return NULL;
@@ -1230,11 +1231,11 @@ static void unleader_evsel(struct evlist *evlist, struct evsel *leader)
/* Find new leader for the group */
evlist__for_each_entry(evlist, evsel) {
- if (evsel->leader != leader || evsel == leader)
+ if (!evsel__has_leader(evsel, leader) || evsel == leader)
continue;
if (!new_leader)
new_leader = evsel;
- evsel->leader = new_leader;
+ evsel__set_leader(evsel, new_leader);
}
/* Update group information */
@@ -1404,10 +1405,9 @@ static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *m
* about the options parsed here, which is introduced after this cset,
* when support in 'perf script' for these options is introduced.
*/
-int itrace_parse_synth_opts(const struct option *opt, const char *str,
- int unset)
+int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
+ const char *str, int unset)
{
- struct itrace_synth_opts *synth_opts = opt->value;
const char *p;
char *endptr;
bool period_type_set = false;
@@ -1569,6 +1569,9 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
case 'q':
synth_opts->quick += 1;
break;
+ case 'Z':
+ synth_opts->timeless_decoding = true;
+ break;
case ' ':
case ',':
break;
@@ -1592,6 +1595,11 @@ out_err:
return -EINVAL;
}
+int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset)
+{
+ return itrace_do_parse_synth_opts(opt->value, str, unset);
+}
+
static const char * const auxtrace_error_type_name[] = {
[PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
};
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index a4fbb33b7245..cc1c1b9cec9c 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -89,6 +89,10 @@ enum itrace_period_type {
* @tlb: whether to synthesize TLB events
* @remote_access: whether to synthesize remote access events
* @mem: whether to synthesize memory events
+ * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps
+ * @vm_time_correlation: perform VM Time Correlation
+ * @vm_tm_corr_dry_run: VM Time Correlation dry-run
+ * @vm_tm_corr_args: VM Time Correlation implementation-specific arguments
* @callchain_sz: maximum callchain size
* @last_branch_sz: branch context size
* @period: 'instructions' events period
@@ -128,6 +132,10 @@ struct itrace_synth_opts {
bool tlb;
bool remote_access;
bool mem;
+ bool timeless_decoding;
+ bool vm_time_correlation;
+ bool vm_tm_corr_dry_run;
+ char *vm_tm_corr_args;
unsigned int callchain_sz;
unsigned int last_branch_sz;
unsigned long long period;
@@ -444,7 +452,7 @@ static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
u64 head = READ_ONCE(pc->aux_head);
/* Ensure all reads are done after we read the head */
- rmb();
+ smp_rmb();
return head;
}
@@ -458,7 +466,7 @@ static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
#endif
/* Ensure all reads are done after we read the head */
- rmb();
+ smp_rmb();
return head;
}
@@ -470,7 +478,7 @@ static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
#endif
/* Ensure all reads are done before we write the tail out */
- mb();
+ smp_mb();
#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
pc->aux_tail = tail;
#else
@@ -525,7 +533,11 @@ int auxtrace_queue_data(struct perf_session *session, bool samples,
bool events);
struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
struct auxtrace_buffer *buffer);
-void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
+void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw);
+static inline void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
+{
+ return auxtrace_buffer__get_data_rw(buffer, fd, false);
+}
void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
@@ -595,6 +607,8 @@ s64 perf_event__process_auxtrace(struct perf_session *session,
union perf_event *event);
int perf_event__process_auxtrace_error(struct perf_session *session,
union perf_event *event);
+int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
+ const char *str, int unset);
int itrace_parse_synth_opts(const struct option *opt, const char *str,
int unset);
void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
@@ -691,9 +705,26 @@ int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
return 0;
}
-#define perf_event__process_auxtrace_info 0
-#define perf_event__process_auxtrace 0
-#define perf_event__process_auxtrace_error 0
+static inline
+int perf_event__process_auxtrace_info(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
+{
+ return 0;
+}
+
+static inline
+s64 perf_event__process_auxtrace(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
+{
+ return 0;
+}
+
+static inline
+int perf_event__process_auxtrace_error(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
+{
+ return 0;
+}
static inline
void perf_session__auxtrace_error_inc(struct perf_session *session
@@ -710,6 +741,14 @@ void events_stats__auxtrace_error_warn(const struct events_stats *stats
}
static inline
+int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts __maybe_unused,
+ const char *str __maybe_unused, int unset __maybe_unused)
+{
+ pr_err("AUX area tracing not supported\n");
+ return -EINVAL;
+}
+
+static inline
int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
const char *str __maybe_unused,
int unset __maybe_unused)
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index 5ed674a2f55e..8150e03367bb 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -7,12 +7,8 @@
#include <unistd.h>
#include <sys/file.h>
#include <sys/time.h>
-#include <sys/resource.h>
#include <linux/err.h>
#include <linux/zalloc.h>
-#include <bpf/bpf.h>
-#include <bpf/btf.h>
-#include <bpf/libbpf.h>
#include <api/fs/fs.h>
#include <perf/bpf_perf.h>
@@ -22,6 +18,7 @@
#include "evsel.h"
#include "evlist.h"
#include "target.h"
+#include "cgroup.h"
#include "cpumap.h"
#include "thread_map.h"
@@ -37,13 +34,6 @@ static inline void *u64_to_ptr(__u64 ptr)
return (void *)(unsigned long)ptr;
}
-static void set_max_rlimit(void)
-{
- struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
-
- setrlimit(RLIMIT_MEMLOCK, &rinf);
-}
-
static struct bpf_counter *bpf_counter_alloc(void)
{
struct bpf_counter *counter;
@@ -297,33 +287,6 @@ struct bpf_counter_ops bpf_program_profiler_ops = {
.install_pe = bpf_program_profiler__install_pe,
};
-static __u32 bpf_link_get_id(int fd)
-{
- struct bpf_link_info link_info = {0};
- __u32 link_info_len = sizeof(link_info);
-
- bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
- return link_info.id;
-}
-
-static __u32 bpf_link_get_prog_id(int fd)
-{
- struct bpf_link_info link_info = {0};
- __u32 link_info_len = sizeof(link_info);
-
- bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
- return link_info.prog_id;
-}
-
-static __u32 bpf_map_get_id(int fd)
-{
- struct bpf_map_info map_info = {0};
- __u32 map_info_len = sizeof(map_info);
-
- bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
- return map_info.id;
-}
-
static bool bperf_attr_map_compatible(int attr_map_fd)
{
struct bpf_map_info map_info = {0};
@@ -385,26 +348,12 @@ static int bperf_lock_attr_map(struct target *target)
return map_fd;
}
-/* trigger the leader program on a cpu */
-static int bperf_trigger_reading(int prog_fd, int cpu)
-{
- DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
- .ctx_in = NULL,
- .ctx_size_in = 0,
- .flags = BPF_F_TEST_RUN_ON_CPU,
- .cpu = cpu,
- .retval = 0,
- );
-
- return bpf_prog_test_run_opts(prog_fd, &opts);
-}
-
static int bperf_check_target(struct evsel *evsel,
struct target *target,
enum bperf_filter_type *filter_type,
__u32 *filter_entry_cnt)
{
- if (evsel->leader->core.nr_members > 1) {
+ if (evsel->core.leader->nr_members > 1) {
pr_err("bpf managed perf events do not yet support groups.\n");
return -1;
}
@@ -794,6 +743,8 @@ struct bpf_counter_ops bperf_ops = {
.destroy = bperf__destroy,
};
+extern struct bpf_counter_ops bperf_cgrp_ops;
+
static inline bool bpf_counter_skip(struct evsel *evsel)
{
return list_empty(&evsel->bpf_counter_list) &&
@@ -811,6 +762,8 @@ int bpf_counter__load(struct evsel *evsel, struct target *target)
{
if (target->bpf_str)
evsel->bpf_counter_ops = &bpf_program_profiler_ops;
+ else if (cgrp_event_expanded && target->use_bpf)
+ evsel->bpf_counter_ops = &bperf_cgrp_ops;
else if (target->use_bpf || evsel->bpf_counter ||
evsel__match_bpf_counter_events(evsel->name))
evsel->bpf_counter_ops = &bperf_ops;
diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
index d6d907c3dcf9..65ebaa6694fb 100644
--- a/tools/perf/util/bpf_counter.h
+++ b/tools/perf/util/bpf_counter.h
@@ -3,6 +3,10 @@
#define __PERF_BPF_COUNTER_H 1
#include <linux/list.h>
+#include <sys/resource.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
struct evsel;
struct target;
@@ -76,4 +80,52 @@ static inline int bpf_counter__install_pe(struct evsel *evsel __maybe_unused,
#endif /* HAVE_BPF_SKEL */
+static inline void set_max_rlimit(void)
+{
+ struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+
+ setrlimit(RLIMIT_MEMLOCK, &rinf);
+}
+
+static inline __u32 bpf_link_get_id(int fd)
+{
+ struct bpf_link_info link_info = { .id = 0, };
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.id;
+}
+
+static inline __u32 bpf_link_get_prog_id(int fd)
+{
+ struct bpf_link_info link_info = { .id = 0, };
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.prog_id;
+}
+
+static inline __u32 bpf_map_get_id(int fd)
+{
+ struct bpf_map_info map_info = { .id = 0, };
+ __u32 map_info_len = sizeof(map_info);
+
+ bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
+ return map_info.id;
+}
+
+/* trigger the leader program on a cpu */
+static inline int bperf_trigger_reading(int prog_fd, int cpu)
+{
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = NULL,
+ .ctx_size_in = 0,
+ .flags = BPF_F_TEST_RUN_ON_CPU,
+ .cpu = cpu,
+ .retval = 0,
+ );
+
+ return bpf_prog_test_run_opts(prog_fd, &opts);
+}
+
#endif /* __PERF_BPF_COUNTER_H */
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
new file mode 100644
index 000000000000..89aa5e71db1a
--- /dev/null
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2021 Facebook */
+/* Copyright (c) 2021 Google */
+
+#include <assert.h>
+#include <limits.h>
+#include <unistd.h>
+#include <sys/file.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <linux/err.h>
+#include <linux/zalloc.h>
+#include <linux/perf_event.h>
+#include <api/fs/fs.h>
+#include <perf/bpf_perf.h>
+
+#include "affinity.h"
+#include "bpf_counter.h"
+#include "cgroup.h"
+#include "counts.h"
+#include "debug.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "target.h"
+#include "cpumap.h"
+#include "thread_map.h"
+
+#include "bpf_skel/bperf_cgroup.skel.h"
+
+static struct perf_event_attr cgrp_switch_attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_CGROUP_SWITCHES,
+ .size = sizeof(cgrp_switch_attr),
+ .sample_period = 1,
+ .disabled = 1,
+};
+
+static struct evsel *cgrp_switch;
+static struct bperf_cgroup_bpf *skel;
+
+#define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
+
+static int bperf_load_program(struct evlist *evlist)
+{
+ struct bpf_link *link;
+ struct evsel *evsel;
+ struct cgroup *cgrp, *leader_cgrp;
+ __u32 i, cpu;
+ __u32 nr_cpus = evlist->core.all_cpus->nr;
+ int total_cpus = cpu__max_cpu();
+ int map_size, map_fd;
+ int prog_fd, err;
+
+ skel = bperf_cgroup_bpf__open();
+ if (!skel) {
+ pr_err("Failed to open cgroup skeleton\n");
+ return -1;
+ }
+
+ skel->rodata->num_cpus = total_cpus;
+ skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
+
+ BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
+
+ /* we need one copy of events per cpu for reading */
+ map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
+ bpf_map__resize(skel->maps.events, map_size);
+ bpf_map__resize(skel->maps.cgrp_idx, nr_cgroups);
+ /* previous result is saved in a per-cpu array */
+ map_size = evlist->core.nr_entries / nr_cgroups;
+ bpf_map__resize(skel->maps.prev_readings, map_size);
+ /* cgroup result needs all events (per-cpu) */
+ map_size = evlist->core.nr_entries;
+ bpf_map__resize(skel->maps.cgrp_readings, map_size);
+
+ set_max_rlimit();
+
+ err = bperf_cgroup_bpf__load(skel);
+ if (err) {
+ pr_err("Failed to load cgroup skeleton\n");
+ goto out;
+ }
+
+ if (cgroup_is_v2("perf_event") > 0)
+ skel->bss->use_cgroup_v2 = 1;
+
+ err = -1;
+
+ cgrp_switch = evsel__new(&cgrp_switch_attr);
+ if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) {
+ pr_err("Failed to open cgroup switches event\n");
+ goto out;
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
+ FD(cgrp_switch, i));
+ if (IS_ERR(link)) {
+ pr_err("Failed to attach cgroup program\n");
+ err = PTR_ERR(link);
+ goto out;
+ }
+ }
+
+ /*
+ * Update cgrp_idx map from cgroup-id to event index.
+ */
+ cgrp = NULL;
+ i = 0;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
+ leader_cgrp = evsel->cgrp;
+ evsel->cgrp = NULL;
+
+ /* open single copy of the events w/o cgroup */
+ err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1);
+ if (err) {
+ pr_err("Failed to open first cgroup events\n");
+ goto out;
+ }
+
+ map_fd = bpf_map__fd(skel->maps.events);
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ int fd = FD(evsel, cpu);
+ __u32 idx = evsel->core.idx * total_cpus +
+ evlist->core.all_cpus->map[cpu];
+
+ err = bpf_map_update_elem(map_fd, &idx, &fd,
+ BPF_ANY);
+ if (err < 0) {
+ pr_err("Failed to update perf_event fd\n");
+ goto out;
+ }
+ }
+
+ evsel->cgrp = leader_cgrp;
+ }
+ evsel->supported = true;
+
+ if (evsel->cgrp == cgrp)
+ continue;
+
+ cgrp = evsel->cgrp;
+
+ if (read_cgroup_id(cgrp) < 0) {
+ pr_err("Failed to get cgroup id\n");
+ err = -1;
+ goto out;
+ }
+
+ map_fd = bpf_map__fd(skel->maps.cgrp_idx);
+ err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY);
+ if (err < 0) {
+ pr_err("Failed to update cgroup index map\n");
+ goto out;
+ }
+
+ i++;
+ }
+
+ /*
+ * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
+ * whether the kernel support it
+ */
+ prog_fd = bpf_program__fd(skel->progs.trigger_read);
+ err = bperf_trigger_reading(prog_fd, 0);
+ if (err) {
+ pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n"
+ "Therefore, --for-each-cgroup might show inaccurate readings\n");
+ err = 0;
+ }
+
+out:
+ return err;
+}
+
+static int bperf_cgrp__load(struct evsel *evsel,
+ struct target *target __maybe_unused)
+{
+ static bool bperf_loaded = false;
+
+ evsel->bperf_leader_prog_fd = -1;
+ evsel->bperf_leader_link_fd = -1;
+
+ if (!bperf_loaded && bperf_load_program(evsel->evlist))
+ return -1;
+
+ bperf_loaded = true;
+ /* just to bypass bpf_counter_skip() */
+ evsel->follower_skel = (struct bperf_follower_bpf *)skel;
+
+ return 0;
+}
+
+static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
+ int cpu __maybe_unused, int fd __maybe_unused)
+{
+ /* nothing to do */
+ return 0;
+}
+
+/*
+ * trigger the leader prog on each cpu, so the cgrp_reading map could get
+ * the latest results.
+ */
+static int bperf_cgrp__sync_counters(struct evlist *evlist)
+{
+ int i, cpu;
+ int nr_cpus = evlist->core.all_cpus->nr;
+ int prog_fd = bpf_program__fd(skel->progs.trigger_read);
+
+ for (i = 0; i < nr_cpus; i++) {
+ cpu = evlist->core.all_cpus->map[i];
+ bperf_trigger_reading(prog_fd, cpu);
+ }
+
+ return 0;
+}
+
+static int bperf_cgrp__enable(struct evsel *evsel)
+{
+ if (evsel->core.idx)
+ return 0;
+
+ bperf_cgrp__sync_counters(evsel->evlist);
+
+ skel->bss->enabled = 1;
+ return 0;
+}
+
+static int bperf_cgrp__disable(struct evsel *evsel)
+{
+ if (evsel->core.idx)
+ return 0;
+
+ bperf_cgrp__sync_counters(evsel->evlist);
+
+ skel->bss->enabled = 0;
+ return 0;
+}
+
+static int bperf_cgrp__read(struct evsel *evsel)
+{
+ struct evlist *evlist = evsel->evlist;
+ int i, cpu, nr_cpus = evlist->core.all_cpus->nr;
+ int total_cpus = cpu__max_cpu();
+ struct perf_counts_values *counts;
+ struct bpf_perf_event_value *values;
+ int reading_map_fd, err = 0;
+ __u32 idx;
+
+ if (evsel->core.idx)
+ return 0;
+
+ bperf_cgrp__sync_counters(evsel->evlist);
+
+ values = calloc(total_cpus, sizeof(*values));
+ if (values == NULL)
+ return -ENOMEM;
+
+ reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
+
+ evlist__for_each_entry(evlist, evsel) {
+ idx = evsel->core.idx;
+ err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
+ if (err) {
+ pr_err("bpf map lookup falied: idx=%u, event=%s, cgrp=%s\n",
+ idx, evsel__name(evsel), evsel->cgrp->name);
+ goto out;
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ cpu = evlist->core.all_cpus->map[i];
+
+ counts = perf_counts(evsel->counts, i, 0);
+ counts->val = values[cpu].counter;
+ counts->ena = values[cpu].enabled;
+ counts->run = values[cpu].running;
+ }
+ }
+
+out:
+ free(values);
+ return err;
+}
+
+static int bperf_cgrp__destroy(struct evsel *evsel)
+{
+ if (evsel->core.idx)
+ return 0;
+
+ bperf_cgroup_bpf__destroy(skel);
+ evsel__delete(cgrp_switch); // it'll destroy on_switch progs too
+
+ return 0;
+}
+
+struct bpf_counter_ops bperf_cgrp_ops = {
+ .load = bperf_cgrp__load,
+ .enable = bperf_cgrp__enable,
+ .disable = bperf_cgrp__disable,
+ .read = bperf_cgrp__read,
+ .install_pe = bperf_cgrp__install_pe,
+ .destroy = bperf_cgrp__destroy,
+};
diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
new file mode 100644
index 000000000000..292c430768b5
--- /dev/null
+++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2021 Facebook
+// Copyright (c) 2021 Google
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+#define MAX_LEVELS 10 // max cgroup hierarchy level: arbitrary
+#define MAX_EVENTS 32 // max events per cgroup: arbitrary
+
+// NOTE: many of map and global data will be modified before loading
+// from the userspace (perf tool) using the skeleton helpers.
+
+// single set of global perf events to measure
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, 1);
+} events SEC(".maps");
+
+// from cgroup id to event index
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u64));
+ __uint(value_size, sizeof(__u32));
+ __uint(max_entries, 1);
+} cgrp_idx SEC(".maps");
+
+// per-cpu event snapshots to calculate delta
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+} prev_readings SEC(".maps");
+
+// aggregated event values for each cgroup (per-cpu)
+// will be read from the user-space
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+} cgrp_readings SEC(".maps");
+
+const volatile __u32 num_events = 1;
+const volatile __u32 num_cpus = 1;
+
+int enabled = 0;
+int use_cgroup_v2 = 0;
+
+static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
+{
+ struct task_struct *p = (void *)bpf_get_current_task();
+ struct cgroup *cgrp;
+ register int i = 0;
+ __u32 *elem;
+ int level;
+ int cnt;
+
+ cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_event_cgrp_id], cgroup);
+ level = BPF_CORE_READ(cgrp, level);
+
+ for (cnt = 0; i < MAX_LEVELS; i++) {
+ __u64 cgrp_id;
+
+ if (i > level)
+ break;
+
+ // convert cgroup-id to a map index
+ cgrp_id = BPF_CORE_READ(cgrp, ancestor_ids[i]);
+ elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id);
+ if (!elem)
+ continue;
+
+ cgrps[cnt++] = *elem;
+ if (cnt == size)
+ break;
+ }
+
+ return cnt;
+}
+
+static inline int get_cgroup_v2_idx(__u32 *cgrps, int size)
+{
+ register int i = 0;
+ __u32 *elem;
+ int cnt;
+
+ for (cnt = 0; i < MAX_LEVELS; i++) {
+ __u64 cgrp_id = bpf_get_current_ancestor_cgroup_id(i);
+
+ if (cgrp_id == 0)
+ break;
+
+ // convert cgroup-id to a map index
+ elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id);
+ if (!elem)
+ continue;
+
+ cgrps[cnt++] = *elem;
+ if (cnt == size)
+ break;
+ }
+
+ return cnt;
+}
+
+static int bperf_cgroup_count(void)
+{
+ register __u32 idx = 0; // to have it in a register to pass BPF verifier
+ register int c = 0;
+ struct bpf_perf_event_value val, delta, *prev_val, *cgrp_val;
+ __u32 cpu = bpf_get_smp_processor_id();
+ __u32 cgrp_idx[MAX_LEVELS];
+ int cgrp_cnt;
+ __u32 key, cgrp;
+ long err;
+
+ if (use_cgroup_v2)
+ cgrp_cnt = get_cgroup_v2_idx(cgrp_idx, MAX_LEVELS);
+ else
+ cgrp_cnt = get_cgroup_v1_idx(cgrp_idx, MAX_LEVELS);
+
+ for ( ; idx < MAX_EVENTS; idx++) {
+ if (idx == num_events)
+ break;
+
+ // XXX: do not pass idx directly (for verifier)
+ key = idx;
+ // this is per-cpu array for diff
+ prev_val = bpf_map_lookup_elem(&prev_readings, &key);
+ if (!prev_val) {
+ val.counter = val.enabled = val.running = 0;
+ bpf_map_update_elem(&prev_readings, &key, &val, BPF_ANY);
+
+ prev_val = bpf_map_lookup_elem(&prev_readings, &key);
+ if (!prev_val)
+ continue;
+ }
+
+ // read from global perf_event array
+ key = idx * num_cpus + cpu;
+ err = bpf_perf_event_read_value(&events, key, &val, sizeof(val));
+ if (err)
+ continue;
+
+ if (enabled) {
+ delta.counter = val.counter - prev_val->counter;
+ delta.enabled = val.enabled - prev_val->enabled;
+ delta.running = val.running - prev_val->running;
+
+ for (c = 0; c < MAX_LEVELS; c++) {
+ if (c == cgrp_cnt)
+ break;
+
+ cgrp = cgrp_idx[c];
+
+ // aggregate the result by cgroup
+ key = cgrp * num_events + idx;
+ cgrp_val = bpf_map_lookup_elem(&cgrp_readings, &key);
+ if (cgrp_val) {
+ cgrp_val->counter += delta.counter;
+ cgrp_val->enabled += delta.enabled;
+ cgrp_val->running += delta.running;
+ } else {
+ bpf_map_update_elem(&cgrp_readings, &key,
+ &delta, BPF_ANY);
+ }
+ }
+ }
+
+ *prev_val = val;
+ }
+ return 0;
+}
+
+// This will be attached to cgroup-switches event for each cpu
+SEC("perf_events")
+int BPF_PROG(on_cgrp_switch)
+{
+ return bperf_cgroup_count();
+}
+
+SEC("raw_tp/sched_switch")
+int BPF_PROG(trigger_read)
+{
+ return bperf_cgroup_count();
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index f24ab4585553..e99b41f9be45 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -9,6 +9,7 @@
#include <linux/zalloc.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/statfs.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
@@ -17,6 +18,7 @@
#include <regex.h>
int nr_cgroups;
+bool cgrp_event_expanded;
/* used to match cgroup name with patterns */
struct cgroup_name {
@@ -45,6 +47,49 @@ static int open_cgroup(const char *name)
return fd;
}
+#ifdef HAVE_FILE_HANDLE
+int read_cgroup_id(struct cgroup *cgrp)
+{
+ char path[PATH_MAX + 1];
+ char mnt[PATH_MAX + 1];
+ struct {
+ struct file_handle fh;
+ uint64_t cgroup_id;
+ } handle;
+ int mount_id;
+
+ if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1, "perf_event"))
+ return -1;
+
+ scnprintf(path, PATH_MAX, "%s/%s", mnt, cgrp->name);
+
+ handle.fh.handle_bytes = sizeof(handle.cgroup_id);
+ if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0)
+ return -1;
+
+ cgrp->id = handle.cgroup_id;
+ return 0;
+}
+#endif /* HAVE_FILE_HANDLE */
+
+#ifndef CGROUP2_SUPER_MAGIC
+#define CGROUP2_SUPER_MAGIC 0x63677270
+#endif
+
+int cgroup_is_v2(const char *subsys)
+{
+ char mnt[PATH_MAX + 1];
+ struct statfs stbuf;
+
+ if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1, subsys))
+ return -1;
+
+ if (statfs(mnt, &stbuf) < 0)
+ return -1;
+
+ return (stbuf.f_type == CGROUP2_SUPER_MAGIC);
+}
+
static struct cgroup *evlist__find_cgroup(struct evlist *evlist, const char *str)
{
struct evsel *counter;
@@ -414,7 +459,7 @@ int evlist__expand_cgroup(struct evlist *evlist, const char *str,
if (evsel__is_group_leader(pos))
leader = evsel;
- evsel->leader = leader;
+ evsel__set_leader(evsel, leader);
evlist__add(tmp_list, evsel);
}
@@ -440,6 +485,7 @@ int evlist__expand_cgroup(struct evlist *evlist, const char *str,
}
ret = 0;
+ cgrp_event_expanded = true;
out_err:
evlist__delete(orig_list);
diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h
index 162906f3412a..12256b78608c 100644
--- a/tools/perf/util/cgroup.h
+++ b/tools/perf/util/cgroup.h
@@ -2,6 +2,7 @@
#ifndef __CGROUP_H__
#define __CGROUP_H__
+#include <linux/compiler.h>
#include <linux/refcount.h>
#include <linux/rbtree.h>
#include "util/env.h"
@@ -17,6 +18,7 @@ struct cgroup {
};
extern int nr_cgroups; /* number of explicit cgroups defined */
+extern bool cgrp_event_expanded;
struct cgroup *cgroup__get(struct cgroup *cgroup);
void cgroup__put(struct cgroup *cgroup);
@@ -38,4 +40,15 @@ struct cgroup *cgroup__find(struct perf_env *env, uint64_t id);
void perf_env__purge_cgroups(struct perf_env *env);
+#ifdef HAVE_FILE_HANDLE
+int read_cgroup_id(struct cgroup *cgrp);
+#else
+static inline int read_cgroup_id(struct cgroup *cgrp __maybe_unused)
+{
+ return -1;
+}
+#endif /* HAVE_FILE_HANDLE */
+
+int cgroup_is_v2(const char *subsys);
+
#endif /* __CGROUP_H__ */
diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c
index 1b52402a8923..ec77e2a7b3ca 100644
--- a/tools/perf/util/cputopo.c
+++ b/tools/perf/util/cputopo.c
@@ -12,6 +12,7 @@
#include "cpumap.h"
#include "debug.h"
#include "env.h"
+#include "pmu-hybrid.h"
#define CORE_SIB_FMT \
"%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
@@ -351,3 +352,82 @@ void numa_topology__delete(struct numa_topology *tp)
free(tp);
}
+
+static int load_hybrid_node(struct hybrid_topology_node *node,
+ struct perf_pmu *pmu)
+{
+ const char *sysfs;
+ char path[PATH_MAX];
+ char *buf = NULL, *p;
+ FILE *fp;
+ size_t len = 0;
+
+ node->pmu_name = strdup(pmu->name);
+ if (!node->pmu_name)
+ return -1;
+
+ sysfs = sysfs__mountpoint();
+ if (!sysfs)
+ goto err;
+
+ snprintf(path, PATH_MAX, CPUS_TEMPLATE_CPU, sysfs, pmu->name);
+ fp = fopen(path, "r");
+ if (!fp)
+ goto err;
+
+ if (getline(&buf, &len, fp) <= 0) {
+ fclose(fp);
+ goto err;
+ }
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ fclose(fp);
+ node->cpus = buf;
+ return 0;
+
+err:
+ zfree(&node->pmu_name);
+ free(buf);
+ return -1;
+}
+
+struct hybrid_topology *hybrid_topology__new(void)
+{
+ struct perf_pmu *pmu;
+ struct hybrid_topology *tp = NULL;
+ u32 nr, i = 0;
+
+ nr = perf_pmu__hybrid_pmu_num();
+ if (nr == 0)
+ return NULL;
+
+ tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr);
+ if (!tp)
+ return NULL;
+
+ tp->nr = nr;
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ if (load_hybrid_node(&tp->nodes[i], pmu)) {
+ hybrid_topology__delete(tp);
+ return NULL;
+ }
+ i++;
+ }
+
+ return tp;
+}
+
+void hybrid_topology__delete(struct hybrid_topology *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->nr; i++) {
+ zfree(&tp->nodes[i].pmu_name);
+ zfree(&tp->nodes[i].cpus);
+ }
+
+ free(tp);
+}
diff --git a/tools/perf/util/cputopo.h b/tools/perf/util/cputopo.h
index 6201c3790d86..d9af97177068 100644
--- a/tools/perf/util/cputopo.h
+++ b/tools/perf/util/cputopo.h
@@ -25,10 +25,23 @@ struct numa_topology {
struct numa_topology_node nodes[];
};
+struct hybrid_topology_node {
+ char *pmu_name;
+ char *cpus;
+};
+
+struct hybrid_topology {
+ u32 nr;
+ struct hybrid_topology_node nodes[];
+};
+
struct cpu_topology *cpu_topology__new(void);
void cpu_topology__delete(struct cpu_topology *tp);
struct numa_topology *numa_topology__new(void);
void numa_topology__delete(struct numa_topology *tp);
+struct hybrid_topology *hybrid_topology__new(void);
+void hybrid_topology__delete(struct hybrid_topology *tp);
+
#endif /* __PERF_CPUTOPO_H */
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 059bcec3f651..3e1a05bc82cc 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -6,6 +6,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <asm/bug.h>
#include <linux/coresight-pmu.h>
#include <linux/err.h>
#include <linux/list.h>
@@ -17,6 +18,7 @@
#include "cs-etm.h"
#include "cs-etm-decoder.h"
+#include "debug.h"
#include "intlist.h"
/* use raw logging */
@@ -276,13 +278,13 @@ cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
const uint8_t trace_chan_id)
{
/* No timestamp packet has been received, nothing to do */
- if (!packet_queue->timestamp)
+ if (!packet_queue->cs_timestamp)
return OCSD_RESP_CONT;
- packet_queue->timestamp = packet_queue->next_timestamp;
+ packet_queue->cs_timestamp = packet_queue->next_cs_timestamp;
/* Estimate the timestamp for the next range packet */
- packet_queue->next_timestamp += packet_queue->instr_count;
+ packet_queue->next_cs_timestamp += packet_queue->instr_count;
packet_queue->instr_count = 0;
/* Tell the front end which traceid_queue needs attention */
@@ -294,7 +296,8 @@ cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
static ocsd_datapath_resp_t
cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
const ocsd_generic_trace_elem *elem,
- const uint8_t trace_chan_id)
+ const uint8_t trace_chan_id,
+ const ocsd_trc_index_t indx)
{
struct cs_etm_packet_queue *packet_queue;
@@ -308,20 +311,39 @@ cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
* Function do_soft_timestamp() will report the value to the front end,
* hence asking the decoder to keep decoding rather than stopping.
*/
- if (packet_queue->timestamp) {
- packet_queue->next_timestamp = elem->timestamp;
+ if (packet_queue->cs_timestamp) {
+ packet_queue->next_cs_timestamp = elem->timestamp;
return OCSD_RESP_CONT;
}
- /*
- * This is the first timestamp we've seen since the beginning of traces
- * or a discontinuity. Since timestamps packets are generated *after*
- * range packets have been generated, we need to estimate the time at
- * which instructions started by subtracting the number of instructions
- * executed to the timestamp.
- */
- packet_queue->timestamp = elem->timestamp - packet_queue->instr_count;
- packet_queue->next_timestamp = elem->timestamp;
+
+ if (!elem->timestamp) {
+ /*
+ * Zero timestamps can be seen due to misconfiguration or hardware bugs.
+ * Warn once, and don't try to subtract instr_count as it would result in an
+ * underflow.
+ */
+ packet_queue->cs_timestamp = 0;
+ WARN_ONCE(true, "Zero Coresight timestamp found at Idx:%" OCSD_TRC_IDX_STR
+ ". Decoding may be improved with --itrace=Z...\n", indx);
+ } else if (packet_queue->instr_count > elem->timestamp) {
+ /*
+ * Sanity check that the elem->timestamp - packet_queue->instr_count would not
+ * result in an underflow. Warn and clamp at 0 if it would.
+ */
+ packet_queue->cs_timestamp = 0;
+ pr_err("Timestamp calculation underflow at Idx:%" OCSD_TRC_IDX_STR "\n", indx);
+ } else {
+ /*
+ * This is the first timestamp we've seen since the beginning of traces
+ * or a discontinuity. Since timestamps packets are generated *after*
+ * range packets have been generated, we need to estimate the time at
+ * which instructions started by subtracting the number of instructions
+ * executed to the timestamp.
+ */
+ packet_queue->cs_timestamp = elem->timestamp - packet_queue->instr_count;
+ }
+ packet_queue->next_cs_timestamp = elem->timestamp;
packet_queue->instr_count = 0;
/* Tell the front end which traceid_queue needs attention */
@@ -334,8 +356,8 @@ cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
static void
cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue)
{
- packet_queue->timestamp = 0;
- packet_queue->next_timestamp = 0;
+ packet_queue->cs_timestamp = 0;
+ packet_queue->next_cs_timestamp = 0;
packet_queue->instr_count = 0;
}
@@ -542,7 +564,7 @@ cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
const void *context,
- const ocsd_trc_index_t indx __maybe_unused,
+ const ocsd_trc_index_t indx,
const u8 trace_chan_id __maybe_unused,
const ocsd_generic_trace_elem *elem)
{
@@ -579,7 +601,8 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
break;
case OCSD_GEN_TRC_ELEM_TIMESTAMP:
resp = cs_etm_decoder__do_hard_timestamp(etmq, elem,
- trace_chan_id);
+ trace_chan_id,
+ indx);
break;
case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
resp = cs_etm_decoder__set_tid(etmq, packet_queue,
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 7e63e7dedc33..bc1f64873c8f 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -38,8 +38,6 @@
#include <tools/libc_compat.h>
#include "util/synthetic-events.h"
-#define MAX_TIMESTAMP (~0ULL)
-
struct cs_etm_auxtrace {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
@@ -56,6 +54,7 @@ struct cs_etm_auxtrace {
u8 sample_instructions;
int num_cpu;
+ u64 latest_kernel_timestamp;
u32 auxtrace_type;
u64 branches_sample_type;
u64 branches_id;
@@ -86,7 +85,7 @@ struct cs_etm_queue {
struct cs_etm_decoder *decoder;
struct auxtrace_buffer *buffer;
unsigned int queue_nr;
- u8 pending_timestamp;
+ u8 pending_timestamp_chan_id;
u64 offset;
const unsigned char *buf;
size_t buf_len, buf_used;
@@ -208,7 +207,7 @@ void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
* be more than one channel per cs_etm_queue, we need to specify
* what traceID queue needs servicing.
*/
- etmq->pending_timestamp = trace_chan_id;
+ etmq->pending_timestamp_chan_id = trace_chan_id;
}
static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
@@ -216,22 +215,22 @@ static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
{
struct cs_etm_packet_queue *packet_queue;
- if (!etmq->pending_timestamp)
+ if (!etmq->pending_timestamp_chan_id)
return 0;
if (trace_chan_id)
- *trace_chan_id = etmq->pending_timestamp;
+ *trace_chan_id = etmq->pending_timestamp_chan_id;
packet_queue = cs_etm__etmq_get_packet_queue(etmq,
- etmq->pending_timestamp);
+ etmq->pending_timestamp_chan_id);
if (!packet_queue)
return 0;
/* Acknowledge pending status */
- etmq->pending_timestamp = 0;
+ etmq->pending_timestamp_chan_id = 0;
/* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
- return packet_queue->timestamp;
+ return packet_queue->cs_timestamp;
}
static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
@@ -814,7 +813,7 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
int ret = 0;
unsigned int cs_queue_nr;
u8 trace_chan_id;
- u64 timestamp;
+ u64 cs_timestamp;
struct cs_etm_queue *etmq = queue->priv;
if (list_empty(&queue->head) || etmq)
@@ -854,7 +853,7 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
/*
* Run decoder on the trace block. The decoder will stop when
- * encountering a timestamp, a full packet queue or the end of
+ * encountering a CS timestamp, a full packet queue or the end of
* trace for that block.
*/
ret = cs_etm__decode_data_block(etmq);
@@ -865,10 +864,10 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
* Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
* the timestamp calculation for us.
*/
- timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
+ cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
/* We found a timestamp, no need to continue. */
- if (timestamp)
+ if (cs_timestamp)
break;
/*
@@ -892,7 +891,7 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
* queue and will be processed in cs_etm__process_queues().
*/
cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
- ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
+ ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
out:
return ret;
}
@@ -1194,6 +1193,8 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
event->sample.header.size = sizeof(struct perf_event_header);
+ if (!etm->timeless_decoding)
+ sample.time = etm->latest_kernel_timestamp;
sample.ip = addr;
sample.pid = tidq->pid;
sample.tid = tidq->tid;
@@ -1250,6 +1251,8 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
event->sample.header.size = sizeof(struct perf_event_header);
+ if (!etm->timeless_decoding)
+ sample.time = etm->latest_kernel_timestamp;
sample.ip = ip;
sample.pid = tidq->pid;
sample.tid = tidq->tid;
@@ -2221,7 +2224,7 @@ static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
int ret = 0;
unsigned int cs_queue_nr, queue_nr;
u8 trace_chan_id;
- u64 timestamp;
+ u64 cs_timestamp;
struct auxtrace_queue *queue;
struct cs_etm_queue *etmq;
struct cs_etm_traceid_queue *tidq;
@@ -2283,9 +2286,9 @@ refetch:
if (ret)
goto out;
- timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
+ cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
- if (!timestamp) {
+ if (!cs_timestamp) {
/*
* Function cs_etm__decode_data_block() returns when
* there is no more traces to decode in the current
@@ -2308,7 +2311,7 @@ refetch:
* this queue/traceID.
*/
cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
- ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
+ ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
}
out:
@@ -2380,7 +2383,7 @@ static int cs_etm__process_event(struct perf_session *session,
struct perf_tool *tool)
{
int err = 0;
- u64 timestamp;
+ u64 sample_kernel_timestamp;
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
@@ -2394,16 +2397,21 @@ static int cs_etm__process_event(struct perf_session *session,
}
if (sample->time && (sample->time != (u64) -1))
- timestamp = sample->time;
+ sample_kernel_timestamp = sample->time;
else
- timestamp = 0;
+ sample_kernel_timestamp = 0;
- if (timestamp || etm->timeless_decoding) {
+ if (sample_kernel_timestamp || etm->timeless_decoding) {
err = cs_etm__update_queues(etm);
if (err)
return err;
}
+ /*
+ * Don't wait for cs_etm__flush_events() in per-thread/timeless mode to start the decode. We
+ * need the tid of the PERF_RECORD_EXIT event to assign to the synthesised samples because
+ * ETM_OPT_CTXTID is not enabled.
+ */
if (etm->timeless_decoding &&
event->header.type == PERF_RECORD_EXIT)
return cs_etm__process_timeless_queues(etm,
@@ -2414,13 +2422,34 @@ static int cs_etm__process_event(struct perf_session *session,
else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
return cs_etm__process_switch_cpu_wide(etm, event);
- if (!etm->timeless_decoding &&
- event->header.type == PERF_RECORD_AUX)
- return cs_etm__process_queues(etm);
+ if (!etm->timeless_decoding && event->header.type == PERF_RECORD_AUX) {
+ /*
+ * Record the latest kernel timestamp available in the header
+ * for samples so that synthesised samples occur from this point
+ * onwards.
+ */
+ etm->latest_kernel_timestamp = sample_kernel_timestamp;
+ }
return 0;
}
+static void dump_queued_data(struct cs_etm_auxtrace *etm,
+ struct perf_record_auxtrace *event)
+{
+ struct auxtrace_buffer *buf;
+ unsigned int i;
+ /*
+ * Find all buffers with same reference in the queues and dump them.
+ * This is because the queues can contain multiple entries of the same
+ * buffer that were split on aux records.
+ */
+ for (i = 0; i < etm->queues.nr_queues; ++i)
+ list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
+ if (buf->reference == event->reference)
+ cs_etm__dump_event(etm, buf);
+}
+
static int cs_etm__process_auxtrace_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool __maybe_unused)
@@ -2453,7 +2482,8 @@ static int cs_etm__process_auxtrace_event(struct perf_session *session,
cs_etm__dump_event(etm, buffer);
auxtrace_buffer__put_data(buffer);
}
- }
+ } else if (dump_trace)
+ dump_queued_data(etm, &event->auxtrace);
return 0;
}
@@ -2464,6 +2494,10 @@ static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
struct evlist *evlist = etm->session->evlist;
bool timeless_decoding = true;
+ /* Override timeless mode with user input from --itrace=Z */
+ if (etm->synth_opts.timeless_decoding)
+ return true;
+
/*
* Circle through the list of event and complain if we find one
* with the time bit set.
@@ -2666,6 +2700,172 @@ static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
return metadata;
}
+/**
+ * Puts a fragment of an auxtrace buffer into the auxtrace queues based
+ * on the bounds of aux_event, if it matches with the buffer that's at
+ * file_offset.
+ *
+ * Normally, whole auxtrace buffers would be added to the queue. But we
+ * want to reset the decoder for every PERF_RECORD_AUX event, and the decoder
+ * is reset across each buffer, so splitting the buffers up in advance has
+ * the same effect.
+ */
+static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz,
+ struct perf_record_aux *aux_event, struct perf_sample *sample)
+{
+ int err;
+ char buf[PERF_SAMPLE_MAX_SIZE];
+ union perf_event *auxtrace_event_union;
+ struct perf_record_auxtrace *auxtrace_event;
+ union perf_event auxtrace_fragment;
+ __u64 aux_offset, aux_size;
+
+ struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+
+ /*
+ * There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got
+ * from looping through the auxtrace index.
+ */
+ err = perf_session__peek_event(session, file_offset, buf,
+ PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL);
+ if (err)
+ return err;
+ auxtrace_event = &auxtrace_event_union->auxtrace;
+ if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE)
+ return -EINVAL;
+
+ if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) ||
+ auxtrace_event->header.size != sz) {
+ return -EINVAL;
+ }
+
+ /*
+ * In per-thread mode, CPU is set to -1, but TID will be set instead. See
+ * auxtrace_mmap_params__set_idx(). Return 'not found' if neither CPU nor TID match.
+ */
+ if ((auxtrace_event->cpu == (__u32) -1 && auxtrace_event->tid != sample->tid) ||
+ auxtrace_event->cpu != sample->cpu)
+ return 1;
+
+ if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) {
+ /*
+ * Clamp size in snapshot mode. The buffer size is clamped in
+ * __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect
+ * the buffer size.
+ */
+ aux_size = min(aux_event->aux_size, auxtrace_event->size);
+
+ /*
+ * In this mode, the head also points to the end of the buffer so aux_offset
+ * needs to have the size subtracted so it points to the beginning as in normal mode
+ */
+ aux_offset = aux_event->aux_offset - aux_size;
+ } else {
+ aux_size = aux_event->aux_size;
+ aux_offset = aux_event->aux_offset;
+ }
+
+ if (aux_offset >= auxtrace_event->offset &&
+ aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
+ /*
+ * If this AUX event was inside this buffer somewhere, create a new auxtrace event
+ * based on the sizes of the aux event, and queue that fragment.
+ */
+ auxtrace_fragment.auxtrace = *auxtrace_event;
+ auxtrace_fragment.auxtrace.size = aux_size;
+ auxtrace_fragment.auxtrace.offset = aux_offset;
+ file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size;
+
+ pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64
+ " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
+ return auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
+ file_offset, NULL);
+ }
+
+ /* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
+ return 1;
+}
+
+static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event,
+ u64 offset __maybe_unused, void *data __maybe_unused)
+{
+ struct perf_sample sample;
+ int ret;
+ struct auxtrace_index_entry *ent;
+ struct auxtrace_index *auxtrace_index;
+ struct evsel *evsel;
+ size_t i;
+
+ /* Don't care about any other events, we're only queuing buffers for AUX events */
+ if (event->header.type != PERF_RECORD_AUX)
+ return 0;
+
+ if (event->header.size < sizeof(struct perf_record_aux))
+ return -EINVAL;
+
+ /* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */
+ if (!event->aux.aux_size)
+ return 0;
+
+ /*
+ * Parse the sample, we need the sample_id_all data that comes after the event so that the
+ * CPU or PID can be matched to an AUXTRACE buffer's CPU or PID.
+ */
+ evsel = evlist__event2evsel(session->evlist, event);
+ if (!evsel)
+ return -EINVAL;
+ ret = evsel__parse_sample(evsel, event, &sample);
+ if (ret)
+ return ret;
+
+ /*
+ * Loop through the auxtrace index to find the buffer that matches up with this aux event.
+ */
+ list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
+ for (i = 0; i < auxtrace_index->nr; i++) {
+ ent = &auxtrace_index->entries[i];
+ ret = cs_etm__queue_aux_fragment(session, ent->file_offset,
+ ent->sz, &event->aux, &sample);
+ /*
+ * Stop search on error or successful values. Continue search on
+ * 1 ('not found')
+ */
+ if (ret != 1)
+ return ret;
+ }
+ }
+
+ /*
+ * Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but
+ * don't exit with an error because it will still be possible to decode other aux records.
+ */
+ pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64
+ " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
+ return 0;
+}
+
+static int cs_etm__queue_aux_records(struct perf_session *session)
+{
+ struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index,
+ struct auxtrace_index, list);
+ if (index && index->nr > 0)
+ return perf_session__peek_events(session, session->header.data_offset,
+ session->header.data_size,
+ cs_etm__queue_aux_records_cb, NULL);
+
+ /*
+ * We would get here if there are no entries in the index (either no auxtrace
+ * buffers or no index at all). Fail silently as there is the possibility of
+ * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
+ * false.
+ *
+ * In that scenario, buffers will not be split by AUX records.
+ */
+ return 0;
+}
+
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
@@ -2810,6 +3010,14 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
if (err)
goto err_free_etm;
+ if (session->itrace_synth_opts->set) {
+ etm->synth_opts = *session->itrace_synth_opts;
+ } else {
+ itrace_synth_opts__set_default(&etm->synth_opts,
+ session->itrace_synth_opts->default_no_sample);
+ etm->synth_opts.callchain = false;
+ }
+
etm->session = session;
etm->machine = &session->machines.host;
@@ -2851,22 +3059,13 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
if (dump_trace) {
cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
- return 0;
- }
-
- if (session->itrace_synth_opts->set) {
- etm->synth_opts = *session->itrace_synth_opts;
- } else {
- itrace_synth_opts__set_default(&etm->synth_opts,
- session->itrace_synth_opts->default_no_sample);
- etm->synth_opts.callchain = false;
}
err = cs_etm__synth_events(etm, session);
if (err)
goto err_delete_thread;
- err = auxtrace_queues__process_index(&etm->queues, session);
+ err = cs_etm__queue_aux_records(session);
if (err)
goto err_delete_thread;
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 36428918411e..d65c7b19407d 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -171,8 +171,8 @@ struct cs_etm_packet_queue {
u32 head;
u32 tail;
u32 instr_count;
- u64 timestamp;
- u64 next_timestamp;
+ u64 cs_timestamp;
+ u64 next_cs_timestamp;
struct cs_etm_packet packet_buffer[CS_ETM_PACKET_MAX_BUFFER];
};
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 8fca4779ae6a..f5d260b1df4d 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -20,7 +20,7 @@
static void close_dir(struct perf_data_file *files, int nr)
{
- while (--nr >= 1) {
+ while (--nr >= 0) {
close(files[nr].fd);
zfree(&files[nr].path);
}
@@ -240,11 +240,12 @@ static bool is_dir(struct perf_data *data)
static int open_file_read(struct perf_data *data)
{
+ int flags = data->in_place_update ? O_RDWR : O_RDONLY;
struct stat st;
int fd;
char sbuf[STRERR_BUFSIZE];
- fd = open(data->file.path, O_RDONLY);
+ fd = open(data->file.path, flags);
if (fd < 0) {
int err = errno;
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 62a3e66fbee8..c9de82af5584 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -31,6 +31,7 @@ struct perf_data {
bool is_dir;
bool force;
bool use_stdio;
+ bool in_place_update;
enum perf_data_mode mode;
struct {
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 5cd189172525..e0d4f08839fb 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -343,7 +343,7 @@ static int db_export__threads(struct db_export *dbe, struct thread *thread,
int db_export__sample(struct db_export *dbe, union perf_event *event,
struct perf_sample *sample, struct evsel *evsel,
- struct addr_location *al)
+ struct addr_location *al, struct addr_location *addr_al)
{
struct thread *thread = al->thread;
struct export_sample es = {
@@ -389,18 +389,14 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
}
}
- if ((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
- sample_addr_correlates_sym(&evsel->core.attr)) {
- struct addr_location addr_al;
-
- thread__resolve(thread, &addr_al, sample);
- err = db_ids_from_al(dbe, &addr_al, &es.addr_dso_db_id,
+ if (addr_al) {
+ err = db_ids_from_al(dbe, addr_al, &es.addr_dso_db_id,
&es.addr_sym_db_id, &es.addr_offset);
if (err)
goto out_put;
if (dbe->crp) {
err = thread_stack__process(thread, comm, sample, al,
- &addr_al, es.db_id,
+ addr_al, es.db_id,
dbe->crp);
if (err)
goto out_put;
diff --git a/tools/perf/util/db-export.h b/tools/perf/util/db-export.h
index 9c3d38f5a40d..23983cb35706 100644
--- a/tools/perf/util/db-export.h
+++ b/tools/perf/util/db-export.h
@@ -97,7 +97,7 @@ int db_export__branch_type(struct db_export *dbe, u32 branch_type,
const char *name);
int db_export__sample(struct db_export *dbe, union perf_event *event,
struct perf_sample *sample, struct evsel *evsel,
- struct addr_location *al);
+ struct addr_location *al, struct addr_location *addr_al);
int db_export__branch_types(struct db_export *dbe);
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
new file mode 100644
index 000000000000..ca33fbc5efde
--- /dev/null
+++ b/tools/perf/util/dlfilter.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dlfilter.c: Interface to perf script --dlfilter shared object
+ * Copyright (c) 2021, Intel Corporation.
+ */
+#include <dlfcn.h>
+#include <stdlib.h>
+#include <string.h>
+#include <dirent.h>
+#include <subcmd/exec-cmd.h>
+#include <linux/zalloc.h>
+#include <linux/build_bug.h>
+
+#include "debug.h"
+#include "event.h"
+#include "evsel.h"
+#include "dso.h"
+#include "map.h"
+#include "thread.h"
+#include "trace-event.h"
+#include "symbol.h"
+#include "srcline.h"
+#include "dlfilter.h"
+#include "perf_dlfilter.h"
+
+static void al_to_d_al(struct addr_location *al, struct perf_dlfilter_al *d_al)
+{
+ struct symbol *sym = al->sym;
+
+ d_al->size = sizeof(*d_al);
+ if (al->map) {
+ struct dso *dso = al->map->dso;
+
+ if (symbol_conf.show_kernel_path && dso->long_name)
+ d_al->dso = dso->long_name;
+ else
+ d_al->dso = dso->name;
+ d_al->is_64_bit = dso->is_64_bit;
+ d_al->buildid_size = dso->bid.size;
+ d_al->buildid = dso->bid.data;
+ } else {
+ d_al->dso = NULL;
+ d_al->is_64_bit = 0;
+ d_al->buildid_size = 0;
+ d_al->buildid = NULL;
+ }
+ if (sym) {
+ d_al->sym = sym->name;
+ d_al->sym_start = sym->start;
+ d_al->sym_end = sym->end;
+ if (al->addr < sym->end)
+ d_al->symoff = al->addr - sym->start;
+ else
+ d_al->symoff = al->addr - al->map->start - sym->start;
+ d_al->sym_binding = sym->binding;
+ } else {
+ d_al->sym = NULL;
+ d_al->sym_start = 0;
+ d_al->sym_end = 0;
+ d_al->symoff = 0;
+ d_al->sym_binding = 0;
+ }
+ d_al->addr = al->addr;
+ d_al->comm = NULL;
+ d_al->filtered = 0;
+}
+
+static struct addr_location *get_al(struct dlfilter *d)
+{
+ struct addr_location *al = d->al;
+
+ if (!al->thread && machine__resolve(d->machine, al, d->sample) < 0)
+ return NULL;
+ return al;
+}
+
+static struct thread *get_thread(struct dlfilter *d)
+{
+ struct addr_location *al = get_al(d);
+
+ return al ? al->thread : NULL;
+}
+
+static const struct perf_dlfilter_al *dlfilter__resolve_ip(void *ctx)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+ struct perf_dlfilter_al *d_al = d->d_ip_al;
+ struct addr_location *al;
+
+ if (!d->ctx_valid)
+ return NULL;
+
+ /* 'size' is also used to indicate already initialized */
+ if (d_al->size)
+ return d_al;
+
+ al = get_al(d);
+ if (!al)
+ return NULL;
+
+ al_to_d_al(al, d_al);
+
+ d_al->is_kernel_ip = machine__kernel_ip(d->machine, d->sample->ip);
+ d_al->comm = al->thread ? thread__comm_str(al->thread) : ":-1";
+ d_al->filtered = al->filtered;
+
+ return d_al;
+}
+
+static const struct perf_dlfilter_al *dlfilter__resolve_addr(void *ctx)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+ struct perf_dlfilter_al *d_addr_al = d->d_addr_al;
+ struct addr_location *addr_al = d->addr_al;
+
+ if (!d->ctx_valid || !d->d_sample->addr_correlates_sym)
+ return NULL;
+
+ /* 'size' is also used to indicate already initialized */
+ if (d_addr_al->size)
+ return d_addr_al;
+
+ if (!addr_al->thread) {
+ struct thread *thread = get_thread(d);
+
+ if (!thread)
+ return NULL;
+ thread__resolve(thread, addr_al, d->sample);
+ }
+
+ al_to_d_al(addr_al, d_addr_al);
+
+ d_addr_al->is_kernel_ip = machine__kernel_ip(d->machine, d->sample->addr);
+
+ return d_addr_al;
+}
+
+static char **dlfilter__args(void *ctx, int *dlargc)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+
+ if (dlargc)
+ *dlargc = 0;
+ else
+ return NULL;
+
+ if (!d->ctx_valid && !d->in_start && !d->in_stop)
+ return NULL;
+
+ *dlargc = d->dlargc;
+ return d->dlargv;
+}
+
+static __s32 dlfilter__resolve_address(void *ctx, __u64 address, struct perf_dlfilter_al *d_al_p)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+ struct perf_dlfilter_al d_al;
+ struct addr_location al;
+ struct thread *thread;
+ __u32 sz;
+
+ if (!d->ctx_valid || !d_al_p)
+ return -1;
+
+ thread = get_thread(d);
+ if (!thread)
+ return -1;
+
+ thread__find_symbol_fb(thread, d->sample->cpumode, address, &al);
+
+ al_to_d_al(&al, &d_al);
+
+ d_al.is_kernel_ip = machine__kernel_ip(d->machine, address);
+
+ sz = d_al_p->size;
+ memcpy(d_al_p, &d_al, min((size_t)sz, sizeof(d_al)));
+ d_al_p->size = sz;
+
+ return 0;
+}
+
+static const __u8 *dlfilter__insn(void *ctx, __u32 *len)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+
+ if (!len)
+ return NULL;
+
+ *len = 0;
+
+ if (!d->ctx_valid)
+ return NULL;
+
+ if (d->sample->ip && !d->sample->insn_len) {
+ struct addr_location *al = d->al;
+
+ if (!al->thread && machine__resolve(d->machine, al, d->sample) < 0)
+ return NULL;
+
+ if (al->thread->maps && al->thread->maps->machine)
+ script_fetch_insn(d->sample, al->thread, al->thread->maps->machine);
+ }
+
+ if (!d->sample->insn_len)
+ return NULL;
+
+ *len = d->sample->insn_len;
+
+ return (__u8 *)d->sample->insn;
+}
+
+static const char *dlfilter__srcline(void *ctx, __u32 *line_no)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+ struct addr_location *al;
+ unsigned int line = 0;
+ char *srcfile = NULL;
+ struct map *map;
+ u64 addr;
+
+ if (!d->ctx_valid || !line_no)
+ return NULL;
+
+ al = get_al(d);
+ if (!al)
+ return NULL;
+
+ map = al->map;
+ addr = al->addr;
+
+ if (map && map->dso)
+ srcfile = get_srcline_split(map->dso, map__rip_2objdump(map, addr), &line);
+
+ *line_no = line;
+ return srcfile;
+}
+
+static struct perf_event_attr *dlfilter__attr(void *ctx)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+
+ if (!d->ctx_valid)
+ return NULL;
+
+ return &d->evsel->core.attr;
+}
+
+static __s32 dlfilter__object_code(void *ctx, __u64 ip, void *buf, __u32 len)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+ struct addr_location *al;
+ struct addr_location a;
+ struct map *map;
+ u64 offset;
+
+ if (!d->ctx_valid)
+ return -1;
+
+ al = get_al(d);
+ if (!al)
+ return -1;
+
+ map = al->map;
+
+ if (map && ip >= map->start && ip < map->end &&
+ machine__kernel_ip(d->machine, ip) == machine__kernel_ip(d->machine, d->sample->ip))
+ goto have_map;
+
+ thread__find_map_fb(al->thread, d->sample->cpumode, ip, &a);
+ if (!a.map)
+ return -1;
+
+ map = a.map;
+have_map:
+ offset = map->map_ip(map, ip);
+ if (ip + len >= map->end)
+ len = map->end - ip;
+ return dso__data_read_offset(map->dso, d->machine, offset, buf, len);
+}
+
+static const struct perf_dlfilter_fns perf_dlfilter_fns = {
+ .resolve_ip = dlfilter__resolve_ip,
+ .resolve_addr = dlfilter__resolve_addr,
+ .args = dlfilter__args,
+ .resolve_address = dlfilter__resolve_address,
+ .insn = dlfilter__insn,
+ .srcline = dlfilter__srcline,
+ .attr = dlfilter__attr,
+ .object_code = dlfilter__object_code,
+};
+
+static char *find_dlfilter(const char *file)
+{
+ char path[PATH_MAX];
+ char *exec_path;
+
+ if (strchr(file, '/'))
+ goto out;
+
+ if (!access(file, R_OK)) {
+ /*
+ * Prepend "./" so that dlopen will find the file in the
+ * current directory.
+ */
+ snprintf(path, sizeof(path), "./%s", file);
+ file = path;
+ goto out;
+ }
+
+ exec_path = get_argv_exec_path();
+ if (!exec_path)
+ goto out;
+ snprintf(path, sizeof(path), "%s/dlfilters/%s", exec_path, file);
+ free(exec_path);
+ if (!access(path, R_OK))
+ file = path;
+out:
+ return strdup(file);
+}
+
+#define CHECK_FLAG(x) BUILD_BUG_ON((u64)PERF_DLFILTER_FLAG_ ## x != (u64)PERF_IP_FLAG_ ## x)
+
+static int dlfilter__init(struct dlfilter *d, const char *file, int dlargc, char **dlargv)
+{
+ CHECK_FLAG(BRANCH);
+ CHECK_FLAG(CALL);
+ CHECK_FLAG(RETURN);
+ CHECK_FLAG(CONDITIONAL);
+ CHECK_FLAG(SYSCALLRET);
+ CHECK_FLAG(ASYNC);
+ CHECK_FLAG(INTERRUPT);
+ CHECK_FLAG(TX_ABORT);
+ CHECK_FLAG(TRACE_BEGIN);
+ CHECK_FLAG(TRACE_END);
+ CHECK_FLAG(IN_TX);
+ CHECK_FLAG(VMENTRY);
+ CHECK_FLAG(VMEXIT);
+
+ memset(d, 0, sizeof(*d));
+ d->file = find_dlfilter(file);
+ if (!d->file)
+ return -1;
+ d->dlargc = dlargc;
+ d->dlargv = dlargv;
+ return 0;
+}
+
+static void dlfilter__exit(struct dlfilter *d)
+{
+ zfree(&d->file);
+}
+
+static int dlfilter__open(struct dlfilter *d)
+{
+ d->handle = dlopen(d->file, RTLD_NOW);
+ if (!d->handle) {
+ pr_err("dlopen failed for: '%s'\n", d->file);
+ return -1;
+ }
+ d->start = dlsym(d->handle, "start");
+ d->filter_event = dlsym(d->handle, "filter_event");
+ d->filter_event_early = dlsym(d->handle, "filter_event_early");
+ d->stop = dlsym(d->handle, "stop");
+ d->fns = dlsym(d->handle, "perf_dlfilter_fns");
+ if (d->fns)
+ memcpy(d->fns, &perf_dlfilter_fns, sizeof(struct perf_dlfilter_fns));
+ return 0;
+}
+
+static int dlfilter__close(struct dlfilter *d)
+{
+ return dlclose(d->handle);
+}
+
+struct dlfilter *dlfilter__new(const char *file, int dlargc, char **dlargv)
+{
+ struct dlfilter *d = malloc(sizeof(*d));
+
+ if (!d)
+ return NULL;
+
+ if (dlfilter__init(d, file, dlargc, dlargv))
+ goto err_free;
+
+ if (dlfilter__open(d))
+ goto err_exit;
+
+ return d;
+
+err_exit:
+ dlfilter__exit(d);
+err_free:
+ free(d);
+ return NULL;
+}
+
+static void dlfilter__free(struct dlfilter *d)
+{
+ if (d) {
+ dlfilter__exit(d);
+ free(d);
+ }
+}
+
+int dlfilter__start(struct dlfilter *d, struct perf_session *session)
+{
+ if (d) {
+ d->session = session;
+ if (d->start) {
+ int ret;
+
+ d->in_start = true;
+ ret = d->start(&d->data, d);
+ d->in_start = false;
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int dlfilter__stop(struct dlfilter *d)
+{
+ if (d && d->stop) {
+ int ret;
+
+ d->in_stop = true;
+ ret = d->stop(d->data, d);
+ d->in_stop = false;
+ return ret;
+ }
+ return 0;
+}
+
+void dlfilter__cleanup(struct dlfilter *d)
+{
+ if (d) {
+ dlfilter__stop(d);
+ dlfilter__close(d);
+ dlfilter__free(d);
+ }
+}
+
+#define ASSIGN(x) d_sample.x = sample->x
+
+int dlfilter__do_filter_event(struct dlfilter *d,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine,
+ struct addr_location *al,
+ struct addr_location *addr_al,
+ bool early)
+{
+ struct perf_dlfilter_sample d_sample;
+ struct perf_dlfilter_al d_ip_al;
+ struct perf_dlfilter_al d_addr_al;
+ int ret;
+
+ d->event = event;
+ d->sample = sample;
+ d->evsel = evsel;
+ d->machine = machine;
+ d->al = al;
+ d->addr_al = addr_al;
+ d->d_sample = &d_sample;
+ d->d_ip_al = &d_ip_al;
+ d->d_addr_al = &d_addr_al;
+
+ d_sample.size = sizeof(d_sample);
+ d_ip_al.size = 0; /* To indicate d_ip_al is not initialized */
+ d_addr_al.size = 0; /* To indicate d_addr_al is not initialized */
+
+ ASSIGN(ip);
+ ASSIGN(pid);
+ ASSIGN(tid);
+ ASSIGN(time);
+ ASSIGN(addr);
+ ASSIGN(id);
+ ASSIGN(stream_id);
+ ASSIGN(period);
+ ASSIGN(weight);
+ ASSIGN(ins_lat);
+ ASSIGN(p_stage_cyc);
+ ASSIGN(transaction);
+ ASSIGN(insn_cnt);
+ ASSIGN(cyc_cnt);
+ ASSIGN(cpu);
+ ASSIGN(flags);
+ ASSIGN(data_src);
+ ASSIGN(phys_addr);
+ ASSIGN(data_page_size);
+ ASSIGN(code_page_size);
+ ASSIGN(cgroup);
+ ASSIGN(cpumode);
+ ASSIGN(misc);
+ ASSIGN(raw_size);
+ ASSIGN(raw_data);
+
+ if (sample->branch_stack) {
+ d_sample.brstack_nr = sample->branch_stack->nr;
+ d_sample.brstack = (struct perf_branch_entry *)perf_sample__branch_entries(sample);
+ } else {
+ d_sample.brstack_nr = 0;
+ d_sample.brstack = NULL;
+ }
+
+ if (sample->callchain) {
+ d_sample.raw_callchain_nr = sample->callchain->nr;
+ d_sample.raw_callchain = (__u64 *)sample->callchain->ips;
+ } else {
+ d_sample.raw_callchain_nr = 0;
+ d_sample.raw_callchain = NULL;
+ }
+
+ d_sample.addr_correlates_sym =
+ (evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
+ sample_addr_correlates_sym(&evsel->core.attr);
+
+ d_sample.event = evsel__name(evsel);
+
+ d->ctx_valid = true;
+
+ if (early)
+ ret = d->filter_event_early(d->data, &d_sample, d);
+ else
+ ret = d->filter_event(d->data, &d_sample, d);
+
+ d->ctx_valid = false;
+
+ return ret;
+}
+
+static bool get_filter_desc(const char *dirname, const char *name,
+ char **desc, char **long_desc)
+{
+ char path[PATH_MAX];
+ void *handle;
+ const char *(*desc_fn)(const char **long_description);
+
+ snprintf(path, sizeof(path), "%s/%s", dirname, name);
+ handle = dlopen(path, RTLD_NOW);
+ if (!handle || !(dlsym(handle, "filter_event") || dlsym(handle, "filter_event_early")))
+ return false;
+ desc_fn = dlsym(handle, "filter_description");
+ if (desc_fn) {
+ const char *dsc;
+ const char *long_dsc;
+
+ dsc = desc_fn(&long_dsc);
+ if (dsc)
+ *desc = strdup(dsc);
+ if (long_dsc)
+ *long_desc = strdup(long_dsc);
+ }
+ dlclose(handle);
+ return true;
+}
+
+static void list_filters(const char *dirname)
+{
+ struct dirent *entry;
+ DIR *dir;
+
+ dir = opendir(dirname);
+ if (!dir)
+ return;
+
+ while ((entry = readdir(dir)) != NULL)
+ {
+ size_t n = strlen(entry->d_name);
+ char *long_desc = NULL;
+ char *desc = NULL;
+
+ if (entry->d_type == DT_DIR || n < 4 ||
+ strcmp(".so", entry->d_name + n - 3))
+ continue;
+ if (!get_filter_desc(dirname, entry->d_name, &desc, &long_desc))
+ continue;
+ printf(" %-36s %s\n", entry->d_name, desc ? desc : "");
+ if (verbose) {
+ char *p = long_desc;
+ char *line;
+
+ while ((line = strsep(&p, "\n")) != NULL)
+ printf("%39s%s\n", "", line);
+ }
+ free(long_desc);
+ free(desc);
+ }
+
+ closedir(dir);
+}
+
+int list_available_dlfilters(const struct option *opt __maybe_unused,
+ const char *s __maybe_unused,
+ int unset __maybe_unused)
+{
+ char path[PATH_MAX];
+ char *exec_path;
+
+ printf("List of available dlfilters:\n");
+
+ list_filters(".");
+
+ exec_path = get_argv_exec_path();
+ if (!exec_path)
+ goto out;
+ snprintf(path, sizeof(path), "%s/dlfilters", exec_path);
+
+ list_filters(path);
+
+ free(exec_path);
+out:
+ exit(0);
+}
diff --git a/tools/perf/util/dlfilter.h b/tools/perf/util/dlfilter.h
new file mode 100644
index 000000000000..505980442360
--- /dev/null
+++ b/tools/perf/util/dlfilter.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * dlfilter.h: Interface to perf script --dlfilter shared object
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#ifndef PERF_UTIL_DLFILTER_H
+#define PERF_UTIL_DLFILTER_H
+
+struct perf_session;
+union perf_event;
+struct perf_sample;
+struct evsel;
+struct machine;
+struct addr_location;
+struct perf_dlfilter_fns;
+struct perf_dlfilter_sample;
+struct perf_dlfilter_al;
+
+struct dlfilter {
+ char *file;
+ void *handle;
+ void *data;
+ struct perf_session *session;
+ bool ctx_valid;
+ bool in_start;
+ bool in_stop;
+ int dlargc;
+ char **dlargv;
+
+ union perf_event *event;
+ struct perf_sample *sample;
+ struct evsel *evsel;
+ struct machine *machine;
+ struct addr_location *al;
+ struct addr_location *addr_al;
+ struct perf_dlfilter_sample *d_sample;
+ struct perf_dlfilter_al *d_ip_al;
+ struct perf_dlfilter_al *d_addr_al;
+
+ int (*start)(void **data, void *ctx);
+ int (*stop)(void *data, void *ctx);
+
+ int (*filter_event)(void *data,
+ const struct perf_dlfilter_sample *sample,
+ void *ctx);
+ int (*filter_event_early)(void *data,
+ const struct perf_dlfilter_sample *sample,
+ void *ctx);
+
+ struct perf_dlfilter_fns *fns;
+};
+
+struct dlfilter *dlfilter__new(const char *file, int dlargc, char **dlargv);
+
+int dlfilter__start(struct dlfilter *d, struct perf_session *session);
+
+int dlfilter__do_filter_event(struct dlfilter *d,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine,
+ struct addr_location *al,
+ struct addr_location *addr_al,
+ bool early);
+
+void dlfilter__cleanup(struct dlfilter *d);
+
+static inline int dlfilter__filter_event(struct dlfilter *d,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine,
+ struct addr_location *al,
+ struct addr_location *addr_al)
+{
+ if (!d || !d->filter_event)
+ return 0;
+ return dlfilter__do_filter_event(d, event, sample, evsel, machine, al, addr_al, false);
+}
+
+static inline int dlfilter__filter_event_early(struct dlfilter *d,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine,
+ struct addr_location *al,
+ struct addr_location *addr_al)
+{
+ if (!d || !d->filter_event_early)
+ return 0;
+ return dlfilter__do_filter_event(d, event, sample, evsel, machine, al, addr_al, true);
+}
+
+int list_available_dlfilters(const struct option *opt, const char *s, int unset);
+
+#endif
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index d786cf6b0cfa..ee15db2be2f4 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -1154,8 +1154,10 @@ struct map *dso__new_map(const char *name)
struct map *map = NULL;
struct dso *dso = dso__new(name);
- if (dso)
+ if (dso) {
map = map__new2(0, dso);
+ dso__put(dso);
+ }
return map;
}
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 7d2ba8419b0c..609ca1671501 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -113,14 +113,14 @@ static Dwarf_Line *cu_getsrc_die(Dwarf_Die *cu_die, Dwarf_Addr addr)
*
* Find a line number and file name for @addr in @cu_die.
*/
-int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr,
- const char **fname, int *lineno)
+int cu_find_lineinfo(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ const char **fname, int *lineno)
{
Dwarf_Line *line;
Dwarf_Die die_mem;
Dwarf_Addr faddr;
- if (die_find_realfunc(cu_die, (Dwarf_Addr)addr, &die_mem)
+ if (die_find_realfunc(cu_die, addr, &die_mem)
&& die_entrypc(&die_mem, &faddr) == 0 &&
faddr == addr) {
*fname = dwarf_decl_file(&die_mem);
@@ -128,7 +128,7 @@ int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr,
goto out;
}
- line = cu_getsrc_die(cu_die, (Dwarf_Addr)addr);
+ line = cu_getsrc_die(cu_die, addr);
if (line && dwarf_lineno(line, lineno) == 0) {
*fname = dwarf_linesrc(line, NULL, NULL);
if (!*fname)
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index cb99646843a9..7ee0fa19b5c4 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -19,7 +19,7 @@ const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname);
const char *cu_get_comp_dir(Dwarf_Die *cu_die);
/* Get a line number and file name for given address */
-int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
+int cu_find_lineinfo(Dwarf_Die *cudie, Dwarf_Addr addr,
const char **fname, int *lineno);
/* Walk on functions at given address */
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index bc5e4f294e9e..cec2e6cad8aa 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -186,10 +186,12 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->cpuid);
zfree(&env->cmdline);
zfree(&env->cmdline_argv);
+ zfree(&env->sibling_dies);
zfree(&env->sibling_cores);
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
+ zfree(&env->cpu_pmu_caps);
zfree(&env->numa_map);
for (i = 0; i < env->nr_numa_nodes; i++)
@@ -203,6 +205,18 @@ void perf_env__exit(struct perf_env *env)
for (i = 0; i < env->nr_memory_nodes; i++)
zfree(&env->memory_nodes[i].set);
zfree(&env->memory_nodes);
+
+ for (i = 0; i < env->nr_hybrid_nodes; i++) {
+ zfree(&env->hybrid_nodes[i].pmu_name);
+ zfree(&env->hybrid_nodes[i].cpus);
+ }
+ zfree(&env->hybrid_nodes);
+
+ for (i = 0; i < env->nr_hybrid_cpc_nodes; i++) {
+ zfree(&env->hybrid_cpc_nodes[i].cpu_pmu_caps);
+ zfree(&env->hybrid_cpc_nodes[i].pmu_name);
+ }
+ zfree(&env->hybrid_cpc_nodes);
}
void perf_env__init(struct perf_env *env __maybe_unused)
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index ca249bf5e984..6824a7423a2d 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -37,6 +37,18 @@ struct memory_node {
unsigned long *set;
};
+struct hybrid_node {
+ char *pmu_name;
+ char *cpus;
+};
+
+struct hybrid_cpc_node {
+ int nr_cpu_pmu_caps;
+ unsigned int max_branches;
+ char *cpu_pmu_caps;
+ char *pmu_name;
+};
+
struct perf_env {
char *hostname;
char *os_release;
@@ -59,6 +71,8 @@ struct perf_env {
int nr_pmu_mappings;
int nr_groups;
int nr_cpu_pmu_caps;
+ int nr_hybrid_nodes;
+ int nr_hybrid_cpc_nodes;
char *cmdline;
const char **cmdline_argv;
char *sibling_cores;
@@ -77,6 +91,8 @@ struct perf_env {
struct numa_node *numa_nodes;
struct memory_node *memory_nodes;
unsigned long long memory_bsize;
+ struct hybrid_node *hybrid_nodes;
+ struct hybrid_cpc_node *hybrid_cpc_nodes;
#ifdef HAVE_LIBBPF_SUPPORT
/*
* bpf_info_lock protects bpf rbtrees. This is needed because the
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 6ea3e677dc1e..47581a237c7a 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -165,11 +165,9 @@ void evlist__delete(struct evlist *evlist)
void evlist__add(struct evlist *evlist, struct evsel *entry)
{
- entry->evlist = evlist;
- entry->idx = evlist->core.nr_entries;
- entry->tracking = !entry->idx;
-
perf_evlist__add(&evlist->core, &entry->core);
+ entry->evlist = evlist;
+ entry->tracking = !entry->core.idx;
if (evlist->core.nr_entries == 1)
evlist__set_id_pos(evlist);
@@ -194,7 +192,7 @@ void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list)
}
__evlist__for_each_entry_safe(list, temp, evsel) {
- if (evsel->leader == leader) {
+ if (evsel__has_leader(evsel, leader)) {
list_del_init(&evsel->core.node);
evlist__add(evlist, evsel);
}
@@ -225,26 +223,9 @@ out:
return err;
}
-void __evlist__set_leader(struct list_head *list)
-{
- struct evsel *evsel, *leader;
-
- leader = list_entry(list->next, struct evsel, core.node);
- evsel = list_entry(list->prev, struct evsel, core.node);
-
- leader->core.nr_members = evsel->idx - leader->idx + 1;
-
- __evlist__for_each_entry(list, evsel) {
- evsel->leader = leader;
- }
-}
-
void evlist__set_leader(struct evlist *evlist)
{
- if (evlist->core.nr_entries) {
- evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0;
- __evlist__set_leader(&evlist->core.entries);
- }
+ perf_evlist__set_leader(&evlist->core);
}
int __evlist__add_default(struct evlist *evlist, bool precise)
@@ -1626,7 +1607,7 @@ void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel)
return;
evlist__for_each_entry_safe(evlist, n, evsel) {
- if (evsel->leader == move_evsel->leader)
+ if (evsel__leader(evsel) == evsel__leader(move_evsel))
list_move_tail(&evsel->core.node, &move);
}
@@ -1750,7 +1731,7 @@ bool evlist__exclude_kernel(struct evlist *evlist)
*/
void evlist__force_leader(struct evlist *evlist)
{
- if (!evlist->nr_groups) {
+ if (!evlist->core.nr_groups) {
struct evsel *leader = evlist__first(evlist);
evlist__set_leader(evlist);
@@ -1763,7 +1744,8 @@ struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *
struct evsel *c2, *leader;
bool is_open = true;
- leader = evsel->leader;
+ leader = evsel__leader(evsel);
+
pr_debug("Weak group for %s/%d failed\n",
leader->name, leader->core.nr_members);
@@ -1774,10 +1756,10 @@ struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *
evlist__for_each_entry(evsel_list, c2) {
if (c2 == evsel)
is_open = false;
- if (c2->leader == leader) {
+ if (evsel__has_leader(c2, leader)) {
if (is_open && close)
perf_evsel__close(&c2->core);
- c2->leader = c2;
+ evsel__set_leader(c2, c2);
c2->core.nr_members = 0;
/*
* Set this for all former members of the group
@@ -2137,7 +2119,7 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- if (evsel->idx == idx)
+ if (evsel->core.idx == idx)
return evsel;
}
return NULL;
@@ -2161,3 +2143,28 @@ int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
return printed;
}
+
+void evlist__check_mem_load_aux(struct evlist *evlist)
+{
+ struct evsel *leader, *evsel, *pos;
+
+ /*
+ * For some platforms, the 'mem-loads' event is required to use
+ * together with 'mem-loads-aux' within a group and 'mem-loads-aux'
+ * must be the group leader. Now we disable this group before reporting
+ * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry
+ * any valid memory load information.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ leader = evsel__leader(evsel);
+ if (leader == evsel)
+ continue;
+
+ if (leader->name && strstr(leader->name, "mem-loads-aux")) {
+ for_each_group_evsel(pos, leader) {
+ evsel__set_leader(pos, pos);
+ pos->core.nr_members = 0;
+ }
+ }
+ }
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index a8b97b50cceb..5c22383489ae 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -50,7 +50,6 @@ enum bkw_mmap_state {
struct evlist {
struct perf_evlist core;
- int nr_groups;
bool enabled;
int id_pos;
int is_pos;
@@ -202,7 +201,6 @@ void evlist__set_selected(struct evlist *evlist, struct evsel *evsel);
int evlist__create_maps(struct evlist *evlist, struct target *target);
int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel);
-void __evlist__set_leader(struct list_head *list);
void evlist__set_leader(struct evlist *evlist);
u64 __evlist__combined_sample_type(struct evlist *evlist);
@@ -367,4 +365,5 @@ int evlist__ctlfd_ack(struct evlist *evlist);
struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
+void evlist__check_mem_load_aux(struct evlist *evlist);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index a8d8463f8ee5..f61e5dd53f5d 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -239,10 +239,8 @@ bool evsel__is_function_event(struct evsel *evsel)
void evsel__init(struct evsel *evsel,
struct perf_event_attr *attr, int idx)
{
- perf_evsel__init(&evsel->core, attr);
- evsel->idx = idx;
+ perf_evsel__init(&evsel->core, attr, idx);
evsel->tracking = !idx;
- evsel->leader = evsel;
evsel->unit = "";
evsel->scale = 1.0;
evsel->max_events = ULONG_MAX;
@@ -410,7 +408,7 @@ struct evsel *evsel__clone(struct evsel *orig)
evsel->cgrp = cgroup__get(orig->cgrp);
evsel->tp_format = orig->tp_format;
evsel->handler = orig->handler;
- evsel->leader = orig->leader;
+ evsel->core.leader = orig->core.leader;
evsel->max_events = orig->max_events;
evsel->tool_event = orig->tool_event;
@@ -1075,7 +1073,7 @@ void __weak arch_evsel__set_sample_weight(struct evsel *evsel)
void evsel__config(struct evsel *evsel, struct record_opts *opts,
struct callchain_param *callchain)
{
- struct evsel *leader = evsel->leader;
+ struct evsel *leader = evsel__leader(evsel);
struct perf_event_attr *attr = &evsel->core.attr;
int track = evsel->tracking;
bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
@@ -1582,9 +1580,30 @@ int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale)
return 0;
}
+static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other,
+ int cpu)
+{
+ int cpuid;
+
+ cpuid = perf_cpu_map__cpu(evsel->core.cpus, cpu);
+ return perf_cpu_map__idx(other->core.cpus, cpuid);
+}
+
+static int evsel__hybrid_group_cpu(struct evsel *evsel, int cpu)
+{
+ struct evsel *leader = evsel__leader(evsel);
+
+ if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) ||
+ (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) {
+ return evsel__match_other_cpu(evsel, leader, cpu);
+ }
+
+ return cpu;
+}
+
static int get_group_fd(struct evsel *evsel, int cpu, int thread)
{
- struct evsel *leader = evsel->leader;
+ struct evsel *leader = evsel__leader(evsel);
int fd;
if (evsel__is_group_leader(evsel))
@@ -1596,6 +1615,10 @@ static int get_group_fd(struct evsel *evsel, int cpu, int thread)
*/
BUG_ON(!leader->core.fd);
+ cpu = evsel__hybrid_group_cpu(evsel, cpu);
+ if (cpu == -1)
+ return -1;
+
fd = FD(leader, cpu, thread);
BUG_ON(fd == -1);
@@ -2826,3 +2849,23 @@ bool evsel__is_hybrid(struct evsel *evsel)
{
return evsel->pmu_name && perf_pmu__is_hybrid(evsel->pmu_name);
}
+
+struct evsel *evsel__leader(struct evsel *evsel)
+{
+ return container_of(evsel->core.leader, struct evsel, core);
+}
+
+bool evsel__has_leader(struct evsel *evsel, struct evsel *leader)
+{
+ return evsel->core.leader == &leader->core;
+}
+
+bool evsel__is_leader(struct evsel *evsel)
+{
+ return evsel__has_leader(evsel, evsel);
+}
+
+void evsel__set_leader(struct evsel *evsel, struct evsel *leader)
+{
+ evsel->core.leader = &leader->core;
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index bdad52a06438..80383096d51c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -49,7 +49,6 @@ struct evsel {
struct perf_evsel core;
struct evlist *evlist;
off_t id_offset;
- int idx;
int id_pos;
int is_pos;
unsigned int sample_size;
@@ -119,7 +118,6 @@ struct evsel {
bool reset_group;
bool errored;
struct hashmap *per_pkg_mask;
- struct evsel *leader;
int err;
int cpu_iter;
struct {
@@ -368,7 +366,7 @@ static inline struct evsel *evsel__prev(struct evsel *evsel)
*/
static inline bool evsel__is_group_leader(const struct evsel *evsel)
{
- return evsel->leader == evsel;
+ return evsel->core.leader == &evsel->core;
}
/**
@@ -406,19 +404,19 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
static inline int evsel__group_idx(struct evsel *evsel)
{
- return evsel->idx - evsel->leader->idx;
+ return evsel->core.idx - evsel->core.leader->idx;
}
/* Iterates group WITHOUT the leader. */
#define for_each_group_member(_evsel, _leader) \
for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \
- (_evsel) && (_evsel)->leader == (_leader); \
+ (_evsel) && (_evsel)->core.leader == (&_leader->core); \
(_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
/* Iterates group WITH the leader. */
#define for_each_group_evsel(_evsel, _leader) \
for ((_evsel) = _leader; \
- (_evsel) && (_evsel)->leader == (_leader); \
+ (_evsel) && (_evsel)->core.leader == (&_leader->core); \
(_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
static inline bool evsel__has_branch_callstack(const struct evsel *evsel)
@@ -463,4 +461,8 @@ int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
void evsel__zero_per_pkg(struct evsel *evsel);
bool evsel__is_hybrid(struct evsel *evsel);
+struct evsel *evsel__leader(struct evsel *evsel);
+bool evsel__has_leader(struct evsel *evsel, struct evsel *leader);
+bool evsel__is_leader(struct evsel *evsel);
+void evsel__set_leader(struct evsel *evsel, struct evsel *leader);
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index aa1e42518d37..44249027507a 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -49,6 +49,7 @@
#include "cputopo.h"
#include "bpf-event.h"
#include "clockid.h"
+#include "pmu-hybrid.h"
#include <linux/ctype.h>
#include <internal/lib.h>
@@ -777,7 +778,7 @@ static int write_pmu_mappings(struct feat_fd *ff,
static int write_group_desc(struct feat_fd *ff,
struct evlist *evlist)
{
- u32 nr_groups = evlist->nr_groups;
+ u32 nr_groups = evlist->core.nr_groups;
struct evsel *evsel;
int ret;
@@ -788,7 +789,7 @@ static int write_group_desc(struct feat_fd *ff,
evlist__for_each_entry(evlist, evsel) {
if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
const char *name = evsel->group_name ?: "{anon_group}";
- u32 leader_idx = evsel->idx;
+ u32 leader_idx = evsel->core.idx;
u32 nr_members = evsel->core.nr_members;
ret = do_write_string(ff, name);
@@ -932,6 +933,40 @@ static int write_clock_data(struct feat_fd *ff,
return do_write(ff, data64, sizeof(*data64));
}
+static int write_hybrid_topology(struct feat_fd *ff,
+ struct evlist *evlist __maybe_unused)
+{
+ struct hybrid_topology *tp;
+ int ret;
+ u32 i;
+
+ tp = hybrid_topology__new();
+ if (!tp)
+ return -ENOENT;
+
+ ret = do_write(ff, &tp->nr, sizeof(u32));
+ if (ret < 0)
+ goto err;
+
+ for (i = 0; i < tp->nr; i++) {
+ struct hybrid_topology_node *n = &tp->nodes[i];
+
+ ret = do_write_string(ff, n->pmu_name);
+ if (ret < 0)
+ goto err;
+
+ ret = do_write_string(ff, n->cpus);
+ if (ret < 0)
+ goto err;
+ }
+
+ ret = 0;
+
+err:
+ hybrid_topology__delete(tp);
+ return ret;
+}
+
static int write_dir_format(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
@@ -1425,18 +1460,14 @@ static int write_compressed(struct feat_fd *ff __maybe_unused,
return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
}
-static int write_cpu_pmu_caps(struct feat_fd *ff,
- struct evlist *evlist __maybe_unused)
+static int write_per_cpu_pmu_caps(struct feat_fd *ff, struct perf_pmu *pmu,
+ bool write_pmu)
{
- struct perf_pmu *cpu_pmu = perf_pmu__find("cpu");
struct perf_pmu_caps *caps = NULL;
int nr_caps;
int ret;
- if (!cpu_pmu)
- return -ENOENT;
-
- nr_caps = perf_pmu__caps_parse(cpu_pmu);
+ nr_caps = perf_pmu__caps_parse(pmu);
if (nr_caps < 0)
return nr_caps;
@@ -1444,7 +1475,7 @@ static int write_cpu_pmu_caps(struct feat_fd *ff,
if (ret < 0)
return ret;
- list_for_each_entry(caps, &cpu_pmu->caps, list) {
+ list_for_each_entry(caps, &pmu->caps, list) {
ret = do_write_string(ff, caps->name);
if (ret < 0)
return ret;
@@ -1454,9 +1485,49 @@ static int write_cpu_pmu_caps(struct feat_fd *ff,
return ret;
}
+ if (write_pmu) {
+ ret = do_write_string(ff, pmu->name);
+ if (ret < 0)
+ return ret;
+ }
+
return ret;
}
+static int write_cpu_pmu_caps(struct feat_fd *ff,
+ struct evlist *evlist __maybe_unused)
+{
+ struct perf_pmu *cpu_pmu = perf_pmu__find("cpu");
+
+ if (!cpu_pmu)
+ return -ENOENT;
+
+ return write_per_cpu_pmu_caps(ff, cpu_pmu, false);
+}
+
+static int write_hybrid_cpu_pmu_caps(struct feat_fd *ff,
+ struct evlist *evlist __maybe_unused)
+{
+ struct perf_pmu *pmu;
+ u32 nr_pmu = perf_pmu__hybrid_pmu_num();
+ int ret;
+
+ if (nr_pmu == 0)
+ return -ENOENT;
+
+ ret = do_write(ff, &nr_pmu, sizeof(nr_pmu));
+ if (ret < 0)
+ return ret;
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ ret = write_per_cpu_pmu_caps(ff, pmu, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static void print_hostname(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
@@ -1623,6 +1694,18 @@ static void print_clock_data(struct feat_fd *ff, FILE *fp)
clockid_name(clockid));
}
+static void print_hybrid_topology(struct feat_fd *ff, FILE *fp)
+{
+ int i;
+ struct hybrid_node *n;
+
+ fprintf(fp, "# hybrid cpu system:\n");
+ for (i = 0; i < ff->ph->env.nr_hybrid_nodes; i++) {
+ n = &ff->ph->env.hybrid_nodes[i];
+ fprintf(fp, "# %s cpu list : %s\n", n->pmu_name, n->cpus);
+ }
+}
+
static void print_dir_format(struct feat_fd *ff, FILE *fp)
{
struct perf_session *session;
@@ -1761,7 +1844,7 @@ static struct evsel *read_event_desc(struct feat_fd *ff)
msz = sz;
for (i = 0, evsel = events; i < nre; evsel++, i++) {
- evsel->idx = i;
+ evsel->core.idx = i;
/*
* must read entire on-file attr struct to
@@ -1916,18 +1999,28 @@ static void print_compressed(struct feat_fd *ff, FILE *fp)
ff->ph->env.comp_level, ff->ph->env.comp_ratio);
}
-static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
+static void print_per_cpu_pmu_caps(FILE *fp, int nr_caps, char *cpu_pmu_caps,
+ char *pmu_name)
{
- const char *delimiter = "# cpu pmu capabilities: ";
- u32 nr_caps = ff->ph->env.nr_cpu_pmu_caps;
- char *str;
+ const char *delimiter;
+ char *str, buf[128];
if (!nr_caps) {
- fprintf(fp, "# cpu pmu capabilities: not available\n");
+ if (!pmu_name)
+ fprintf(fp, "# cpu pmu capabilities: not available\n");
+ else
+ fprintf(fp, "# %s pmu capabilities: not available\n", pmu_name);
return;
}
- str = ff->ph->env.cpu_pmu_caps;
+ if (!pmu_name)
+ scnprintf(buf, sizeof(buf), "# cpu pmu capabilities: ");
+ else
+ scnprintf(buf, sizeof(buf), "# %s pmu capabilities: ", pmu_name);
+
+ delimiter = buf;
+
+ str = cpu_pmu_caps;
while (nr_caps--) {
fprintf(fp, "%s%s", delimiter, str);
delimiter = ", ";
@@ -1937,6 +2030,24 @@ static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
fprintf(fp, "\n");
}
+static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
+{
+ print_per_cpu_pmu_caps(fp, ff->ph->env.nr_cpu_pmu_caps,
+ ff->ph->env.cpu_pmu_caps, NULL);
+}
+
+static void print_hybrid_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
+{
+ struct hybrid_cpc_node *n;
+
+ for (int i = 0; i < ff->ph->env.nr_hybrid_cpc_nodes; i++) {
+ n = &ff->ph->env.hybrid_cpc_nodes[i];
+ print_per_cpu_pmu_caps(fp, n->nr_cpu_pmu_caps,
+ n->cpu_pmu_caps,
+ n->pmu_name);
+ }
+}
+
static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
{
const char *delimiter = "# pmu mappings: ";
@@ -2268,7 +2379,7 @@ static struct evsel *evlist__find_by_index(struct evlist *evlist, int idx)
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- if (evsel->idx == idx)
+ if (evsel->core.idx == idx)
return evsel;
}
@@ -2282,7 +2393,7 @@ static void evlist__set_event_name(struct evlist *evlist, struct evsel *event)
if (!event->name)
return;
- evsel = evlist__find_by_index(evlist, event->idx);
+ evsel = evlist__find_by_index(evlist, event->core.idx);
if (!evsel)
return;
@@ -2624,12 +2735,12 @@ static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
* Rebuild group relationship based on the group_desc
*/
session = container_of(ff->ph, struct perf_session, header);
- session->evlist->nr_groups = nr_groups;
+ session->evlist->core.nr_groups = nr_groups;
i = nr = 0;
evlist__for_each_entry(session->evlist, evsel) {
- if (evsel->idx == (int) desc[i].leader_idx) {
- evsel->leader = evsel;
+ if (evsel->core.idx == (int) desc[i].leader_idx) {
+ evsel__set_leader(evsel, evsel);
/* {anon_group} is a dummy name */
if (strcmp(desc[i].name, "{anon_group}")) {
evsel->group_name = desc[i].name;
@@ -2647,7 +2758,7 @@ static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
i++;
} else if (nr) {
/* This is a group member */
- evsel->leader = leader;
+ evsel__set_leader(evsel, leader);
nr--;
}
@@ -2849,6 +2960,46 @@ static int process_clock_data(struct feat_fd *ff,
return 0;
}
+static int process_hybrid_topology(struct feat_fd *ff,
+ void *data __maybe_unused)
+{
+ struct hybrid_node *nodes, *n;
+ u32 nr, i;
+
+ /* nr nodes */
+ if (do_read_u32(ff, &nr))
+ return -1;
+
+ nodes = zalloc(sizeof(*nodes) * nr);
+ if (!nodes)
+ return -ENOMEM;
+
+ for (i = 0; i < nr; i++) {
+ n = &nodes[i];
+
+ n->pmu_name = do_read_string(ff);
+ if (!n->pmu_name)
+ goto error;
+
+ n->cpus = do_read_string(ff);
+ if (!n->cpus)
+ goto error;
+ }
+
+ ff->ph->env.nr_hybrid_nodes = nr;
+ ff->ph->env.hybrid_nodes = nodes;
+ return 0;
+
+error:
+ for (i = 0; i < nr; i++) {
+ free(nodes[i].pmu_name);
+ free(nodes[i].cpus);
+ }
+
+ free(nodes);
+ return -1;
+}
+
static int process_dir_format(struct feat_fd *ff,
void *_data __maybe_unused)
{
@@ -3002,8 +3153,9 @@ static int process_compressed(struct feat_fd *ff,
return 0;
}
-static int process_cpu_pmu_caps(struct feat_fd *ff,
- void *data __maybe_unused)
+static int process_per_cpu_pmu_caps(struct feat_fd *ff, int *nr_cpu_pmu_caps,
+ char **cpu_pmu_caps,
+ unsigned int *max_branches)
{
char *name, *value;
struct strbuf sb;
@@ -3017,7 +3169,7 @@ static int process_cpu_pmu_caps(struct feat_fd *ff,
return 0;
}
- ff->ph->env.nr_cpu_pmu_caps = nr_caps;
+ *nr_cpu_pmu_caps = nr_caps;
if (strbuf_init(&sb, 128) < 0)
return -1;
@@ -3039,12 +3191,12 @@ static int process_cpu_pmu_caps(struct feat_fd *ff,
goto free_value;
if (!strcmp(name, "branches"))
- ff->ph->env.max_branches = atoi(value);
+ *max_branches = atoi(value);
free(value);
free(name);
}
- ff->ph->env.cpu_pmu_caps = strbuf_detach(&sb, NULL);
+ *cpu_pmu_caps = strbuf_detach(&sb, NULL);
return 0;
free_value:
@@ -3056,6 +3208,63 @@ error:
return -1;
}
+static int process_cpu_pmu_caps(struct feat_fd *ff,
+ void *data __maybe_unused)
+{
+ return process_per_cpu_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps,
+ &ff->ph->env.cpu_pmu_caps,
+ &ff->ph->env.max_branches);
+}
+
+static int process_hybrid_cpu_pmu_caps(struct feat_fd *ff,
+ void *data __maybe_unused)
+{
+ struct hybrid_cpc_node *nodes;
+ u32 nr_pmu, i;
+ int ret;
+
+ if (do_read_u32(ff, &nr_pmu))
+ return -1;
+
+ if (!nr_pmu) {
+ pr_debug("hybrid cpu pmu capabilities not available\n");
+ return 0;
+ }
+
+ nodes = zalloc(sizeof(*nodes) * nr_pmu);
+ if (!nodes)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_pmu; i++) {
+ struct hybrid_cpc_node *n = &nodes[i];
+
+ ret = process_per_cpu_pmu_caps(ff, &n->nr_cpu_pmu_caps,
+ &n->cpu_pmu_caps,
+ &n->max_branches);
+ if (ret)
+ goto err;
+
+ n->pmu_name = do_read_string(ff);
+ if (!n->pmu_name) {
+ ret = -1;
+ goto err;
+ }
+ }
+
+ ff->ph->env.nr_hybrid_cpc_nodes = nr_pmu;
+ ff->ph->env.hybrid_cpc_nodes = nodes;
+ return 0;
+
+err:
+ for (i = 0; i < nr_pmu; i++) {
+ free(nodes[i].cpu_pmu_caps);
+ free(nodes[i].pmu_name);
+ }
+
+ free(nodes);
+ return ret;
+}
+
#define FEAT_OPR(n, func, __full_only) \
[HEADER_##n] = { \
.name = __stringify(n), \
@@ -3117,6 +3326,8 @@ const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPR(COMPRESSED, compressed, false),
FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false),
FEAT_OPR(CLOCK_DATA, clock_data, false),
+ FEAT_OPN(HYBRID_TOPOLOGY, hybrid_topology, true),
+ FEAT_OPR(HYBRID_CPU_PMU_CAPS, hybrid_cpu_pmu_caps, false),
};
struct header_print_data {
@@ -3814,6 +4025,11 @@ int perf_session__read_header(struct perf_session *session)
if (perf_file_header__read(&f_header, header, fd) < 0)
return -EINVAL;
+ if (header->needs_swap && data->in_place_update) {
+ pr_err("In-place update not supported when byte-swapping is required\n");
+ return -EINVAL;
+ }
+
/*
* Sanity check that perf.data was written cleanly; data size is
* initialized to 0 and updated only if the on_exit function is run.
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 2aca71763ecf..ae6b1cf19a7d 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -45,6 +45,8 @@ enum {
HEADER_COMPRESSED,
HEADER_CPU_PMU_CAPS,
HEADER_CLOCK_DATA,
+ HEADER_HYBRID_TOPOLOGY,
+ HEADER_HYBRID_CPU_PMU_CAPS,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 20ad663978cc..5ab631702769 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -35,10 +35,17 @@
#define BIT63 (((uint64_t)1 << 63))
+#define SEVEN_BYTES 0xffffffffffffffULL
+
+#define NO_VMCS 0xffffffffffULL
+
#define INTEL_PT_RETURN 1
-/* Maximum number of loops with no packets consumed i.e. stuck in a loop */
-#define INTEL_PT_MAX_LOOPS 10000
+/*
+ * Default maximum number of loops with no packets consumed i.e. stuck in a
+ * loop.
+ */
+#define INTEL_PT_MAX_LOOPS 100000
struct intel_pt_blk {
struct intel_pt_blk *prev;
@@ -51,6 +58,11 @@ struct intel_pt_stack {
int pos;
};
+enum intel_pt_p_once {
+ INTEL_PT_PRT_ONCE_UNK_VMCS,
+ INTEL_PT_PRT_ONCE_ERANGE,
+};
+
enum intel_pt_pkt_state {
INTEL_PT_STATE_NO_PSB,
INTEL_PT_STATE_NO_IP,
@@ -64,6 +76,7 @@ enum intel_pt_pkt_state {
INTEL_PT_STATE_FUP_NO_TIP,
INTEL_PT_STATE_FUP_IN_PSB,
INTEL_PT_STATE_RESAMPLE,
+ INTEL_PT_STATE_VM_TIME_CORRELATION,
};
static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
@@ -75,6 +88,7 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
case INTEL_PT_STATE_IN_SYNC:
case INTEL_PT_STATE_TNT_CONT:
case INTEL_PT_STATE_RESAMPLE:
+ case INTEL_PT_STATE_VM_TIME_CORRELATION:
return true;
case INTEL_PT_STATE_TNT:
case INTEL_PT_STATE_TIP:
@@ -107,6 +121,7 @@ struct intel_pt_decoder {
uint64_t max_insn_cnt, void *data);
bool (*pgd_ip)(uint64_t ip, void *data);
int (*lookahead)(void *data, intel_pt_lookahead_cb_t cb, void *cb_data);
+ struct intel_pt_vmcs_info *(*findnew_vmcs_info)(void *data, uint64_t vmcs);
void *data;
struct intel_pt_state state;
const unsigned char *buf;
@@ -122,6 +137,11 @@ struct intel_pt_decoder {
bool in_psb;
bool hop;
bool leap;
+ bool vm_time_correlation;
+ bool vm_tm_corr_dry_run;
+ bool vm_tm_corr_reliable;
+ bool vm_tm_corr_same_buf;
+ bool vm_tm_corr_continuous;
bool nr;
bool next_nr;
enum intel_pt_param_flags flags;
@@ -139,6 +159,11 @@ struct intel_pt_decoder {
uint64_t ctc_delta;
uint64_t cycle_cnt;
uint64_t cyc_ref_timestamp;
+ uint64_t first_timestamp;
+ uint64_t last_reliable_timestamp;
+ uint64_t vmcs;
+ uint64_t print_once;
+ uint64_t last_ctc;
uint32_t last_mtc;
uint32_t tsc_ctc_ratio_n;
uint32_t tsc_ctc_ratio_d;
@@ -198,6 +223,7 @@ struct intel_pt_decoder {
uint64_t timestamp_insn_cnt;
uint64_t sample_insn_cnt;
uint64_t stuck_ip;
+ int max_loops;
int no_progress;
int stuck_ip_prd;
int stuck_ip_cnt;
@@ -217,6 +243,31 @@ static uint64_t intel_pt_lower_power_of_2(uint64_t x)
return x << i;
}
+__printf(1, 2)
+static void p_log(const char *fmt, ...)
+{
+ char buf[512];
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ fprintf(stderr, "%s\n", buf);
+ intel_pt_log("%s\n", buf);
+}
+
+static bool intel_pt_print_once(struct intel_pt_decoder *decoder,
+ enum intel_pt_p_once id)
+{
+ uint64_t bit = 1ULL << id;
+
+ if (decoder->print_once & bit)
+ return false;
+ decoder->print_once |= bit;
+ return true;
+}
+
static uint64_t intel_pt_cyc_threshold(uint64_t ctl)
{
if (!(ctl & INTEL_PT_CYC_ENABLE))
@@ -258,11 +309,17 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
decoder->walk_insn = params->walk_insn;
decoder->pgd_ip = params->pgd_ip;
decoder->lookahead = params->lookahead;
+ decoder->findnew_vmcs_info = params->findnew_vmcs_info;
decoder->data = params->data;
decoder->return_compression = params->return_compression;
decoder->branch_enable = params->branch_enable;
decoder->hop = params->quick >= 1;
decoder->leap = params->quick >= 2;
+ decoder->vm_time_correlation = params->vm_time_correlation;
+ decoder->vm_tm_corr_dry_run = params->vm_tm_corr_dry_run;
+ decoder->first_timestamp = params->first_timestamp;
+ decoder->last_reliable_timestamp = params->first_timestamp;
+ decoder->max_loops = params->max_loops ? params->max_loops : INTEL_PT_MAX_LOOPS;
decoder->flags = params->flags;
@@ -312,6 +369,12 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
return decoder;
}
+void intel_pt_set_first_timestamp(struct intel_pt_decoder *decoder,
+ uint64_t first_timestamp)
+{
+ decoder->first_timestamp = first_timestamp;
+}
+
static void intel_pt_pop_blk(struct intel_pt_stack *stack)
{
struct intel_pt_blk *blk = stack->blk;
@@ -425,7 +488,7 @@ static const char *intel_pt_err_msgs[] = {
[INTEL_PT_ERR_OVR] = "Overflow packet",
[INTEL_PT_ERR_LOST] = "Lost trace data",
[INTEL_PT_ERR_UNK] = "Unknown error!",
- [INTEL_PT_ERR_NELOOP] = "Never-ending loop",
+ [INTEL_PT_ERR_NELOOP] = "Never-ending loop (refer perf config intel-pt.max-loops)",
};
int intel_pt__strerror(int code, char *buf, size_t buflen)
@@ -577,6 +640,7 @@ static int intel_pt_get_data(struct intel_pt_decoder *decoder, bool reposition)
intel_pt_reposition(decoder);
decoder->ref_timestamp = buffer.ref_timestamp;
decoder->state.trace_nr = buffer.trace_nr;
+ decoder->vm_tm_corr_same_buf = false;
intel_pt_log("Reference timestamp 0x%" PRIx64 "\n",
decoder->ref_timestamp);
return -ENOLINK;
@@ -1109,7 +1173,7 @@ static int intel_pt_walk_insn(struct intel_pt_decoder *decoder,
decoder->stuck_ip = decoder->state.to_ip;
decoder->stuck_ip_prd = 1;
decoder->stuck_ip_cnt = 1;
- } else if (cnt > INTEL_PT_MAX_LOOPS ||
+ } else if (cnt > decoder->max_loops ||
decoder->state.to_ip == decoder->stuck_ip) {
intel_pt_log_at("ERROR: Never-ending loop",
decoder->state.to_ip);
@@ -1469,9 +1533,24 @@ static uint64_t intel_pt_8b_tsc(uint64_t timestamp, uint64_t ref_timestamp)
return timestamp;
}
+/* For use only when decoder->vm_time_correlation is true */
+static bool intel_pt_time_in_range(struct intel_pt_decoder *decoder,
+ uint64_t timestamp)
+{
+ uint64_t max_timestamp = decoder->buf_timestamp;
+
+ if (!max_timestamp) {
+ max_timestamp = decoder->last_reliable_timestamp +
+ 0x400000000ULL;
+ }
+ return timestamp >= decoder->last_reliable_timestamp &&
+ timestamp < decoder->buf_timestamp;
+}
+
static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
{
uint64_t timestamp;
+ bool bad = false;
decoder->have_tma = false;
@@ -1493,10 +1572,21 @@ static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
timestamp = decoder->timestamp;
}
if (timestamp < decoder->timestamp) {
- intel_pt_log_to("Wraparound timestamp", timestamp);
- timestamp += (1ULL << 56);
- decoder->tsc_timestamp = timestamp;
+ if (!decoder->buf_timestamp ||
+ (timestamp + (1ULL << 56) < decoder->buf_timestamp)) {
+ intel_pt_log_to("Wraparound timestamp", timestamp);
+ timestamp += (1ULL << 56);
+ decoder->tsc_timestamp = timestamp;
+ } else {
+ intel_pt_log_to("Suppressing bad timestamp", timestamp);
+ timestamp = decoder->timestamp;
+ bad = true;
+ }
}
+ if (decoder->vm_time_correlation &&
+ (bad || !intel_pt_time_in_range(decoder, timestamp)) &&
+ intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_ERANGE))
+ p_log("Timestamp out of range");
decoder->timestamp = timestamp;
decoder->timestamp_insn_cnt = 0;
}
@@ -1573,6 +1663,7 @@ static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
intel_pt_mtc_cyc_cnt_upd(decoder);
decoder->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
+ decoder->last_ctc = ctc - ctc_rem;
decoder->ctc_timestamp = decoder->tsc_timestamp - fc;
if (decoder->tsc_ctc_mult) {
decoder->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult;
@@ -1957,6 +2048,613 @@ static int intel_pt_resample(struct intel_pt_decoder *decoder)
return 0;
}
+struct intel_pt_vm_tsc_info {
+ struct intel_pt_pkt pip_packet;
+ struct intel_pt_pkt vmcs_packet;
+ struct intel_pt_pkt tma_packet;
+ bool tsc, pip, vmcs, tma, psbend;
+ uint64_t ctc_delta;
+ uint64_t last_ctc;
+ int max_lookahead;
+};
+
+/* Lookahead and get the PIP, VMCS and TMA packets from PSB+ */
+static int intel_pt_vm_psb_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
+{
+ struct intel_pt_vm_tsc_info *data = pkt_info->data;
+
+ switch (pkt_info->packet.type) {
+ case INTEL_PT_PAD:
+ case INTEL_PT_MNT:
+ case INTEL_PT_MODE_EXEC:
+ case INTEL_PT_MODE_TSX:
+ case INTEL_PT_MTC:
+ case INTEL_PT_FUP:
+ case INTEL_PT_CYC:
+ case INTEL_PT_CBR:
+ break;
+
+ case INTEL_PT_TSC:
+ data->tsc = true;
+ break;
+
+ case INTEL_PT_TMA:
+ data->tma_packet = pkt_info->packet;
+ data->tma = true;
+ break;
+
+ case INTEL_PT_PIP:
+ data->pip_packet = pkt_info->packet;
+ data->pip = true;
+ break;
+
+ case INTEL_PT_VMCS:
+ data->vmcs_packet = pkt_info->packet;
+ data->vmcs = true;
+ break;
+
+ case INTEL_PT_PSBEND:
+ data->psbend = true;
+ return 1;
+
+ case INTEL_PT_TIP_PGE:
+ case INTEL_PT_PTWRITE:
+ case INTEL_PT_PTWRITE_IP:
+ case INTEL_PT_EXSTOP:
+ case INTEL_PT_EXSTOP_IP:
+ case INTEL_PT_MWAIT:
+ case INTEL_PT_PWRE:
+ case INTEL_PT_PWRX:
+ case INTEL_PT_BBP:
+ case INTEL_PT_BIP:
+ case INTEL_PT_BEP:
+ case INTEL_PT_BEP_IP:
+ case INTEL_PT_OVF:
+ case INTEL_PT_BAD:
+ case INTEL_PT_TNT:
+ case INTEL_PT_TIP_PGD:
+ case INTEL_PT_TIP:
+ case INTEL_PT_PSB:
+ case INTEL_PT_TRACESTOP:
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+struct intel_pt_ovf_fup_info {
+ int max_lookahead;
+ bool found;
+};
+
+/* Lookahead to detect a FUP packet after OVF */
+static int intel_pt_ovf_fup_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
+{
+ struct intel_pt_ovf_fup_info *data = pkt_info->data;
+
+ if (pkt_info->packet.type == INTEL_PT_CYC ||
+ pkt_info->packet.type == INTEL_PT_MTC ||
+ pkt_info->packet.type == INTEL_PT_TSC)
+ return !--(data->max_lookahead);
+ data->found = pkt_info->packet.type == INTEL_PT_FUP;
+ return 1;
+}
+
+static bool intel_pt_ovf_fup_lookahead(struct intel_pt_decoder *decoder)
+{
+ struct intel_pt_ovf_fup_info data = {
+ .max_lookahead = 16,
+ .found = false,
+ };
+
+ intel_pt_pkt_lookahead(decoder, intel_pt_ovf_fup_lookahead_cb, &data);
+ return data.found;
+}
+
+/* Lookahead and get the TMA packet after TSC */
+static int intel_pt_tma_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
+{
+ struct intel_pt_vm_tsc_info *data = pkt_info->data;
+
+ if (pkt_info->packet.type == INTEL_PT_CYC ||
+ pkt_info->packet.type == INTEL_PT_MTC)
+ return !--(data->max_lookahead);
+
+ if (pkt_info->packet.type == INTEL_PT_TMA) {
+ data->tma_packet = pkt_info->packet;
+ data->tma = true;
+ }
+ return 1;
+}
+
+static uint64_t intel_pt_ctc_to_tsc(struct intel_pt_decoder *decoder, uint64_t ctc)
+{
+ if (decoder->tsc_ctc_mult)
+ return ctc * decoder->tsc_ctc_mult;
+ else
+ return multdiv(ctc, decoder->tsc_ctc_ratio_n, decoder->tsc_ctc_ratio_d);
+}
+
+static uint64_t intel_pt_calc_expected_tsc(struct intel_pt_decoder *decoder,
+ uint32_t ctc,
+ uint32_t fc,
+ uint64_t last_ctc_timestamp,
+ uint64_t ctc_delta,
+ uint32_t last_ctc)
+{
+ /* Number of CTC ticks from last_ctc_timestamp to last_mtc */
+ uint64_t last_mtc_ctc = last_ctc + ctc_delta;
+ /*
+ * Number of CTC ticks from there until current TMA packet. We would
+ * expect last_mtc_ctc to be before ctc, but the TSC packet can slip
+ * past an MTC, so a sign-extended value is used.
+ */
+ uint64_t delta = (int16_t)((uint16_t)ctc - (uint16_t)last_mtc_ctc);
+ /* Total CTC ticks from last_ctc_timestamp to current TMA packet */
+ uint64_t new_ctc_delta = ctc_delta + delta;
+ uint64_t expected_tsc;
+
+ /*
+ * Convert CTC ticks to TSC ticks, add the starting point
+ * (last_ctc_timestamp) and the fast counter from the TMA packet.
+ */
+ expected_tsc = last_ctc_timestamp + intel_pt_ctc_to_tsc(decoder, new_ctc_delta) + fc;
+
+ if (intel_pt_enable_logging) {
+ intel_pt_log_x64(last_mtc_ctc);
+ intel_pt_log_x32(last_ctc);
+ intel_pt_log_x64(ctc_delta);
+ intel_pt_log_x64(delta);
+ intel_pt_log_x32(ctc);
+ intel_pt_log_x64(new_ctc_delta);
+ intel_pt_log_x64(last_ctc_timestamp);
+ intel_pt_log_x32(fc);
+ intel_pt_log_x64(intel_pt_ctc_to_tsc(decoder, new_ctc_delta));
+ intel_pt_log_x64(expected_tsc);
+ }
+
+ return expected_tsc;
+}
+
+static uint64_t intel_pt_expected_tsc(struct intel_pt_decoder *decoder,
+ struct intel_pt_vm_tsc_info *data)
+{
+ uint32_t ctc = data->tma_packet.payload;
+ uint32_t fc = data->tma_packet.count;
+
+ return intel_pt_calc_expected_tsc(decoder, ctc, fc,
+ decoder->ctc_timestamp,
+ data->ctc_delta, data->last_ctc);
+}
+
+static void intel_pt_translate_vm_tsc(struct intel_pt_decoder *decoder,
+ struct intel_pt_vmcs_info *vmcs_info)
+{
+ uint64_t payload = decoder->packet.payload;
+
+ /* VMX adds the TSC Offset, so subtract to get host TSC */
+ decoder->packet.payload -= vmcs_info->tsc_offset;
+ /* TSC packet has only 7 bytes */
+ decoder->packet.payload &= SEVEN_BYTES;
+
+ /*
+ * The buffer is mmapped from the data file, so this also updates the
+ * data file.
+ */
+ if (!decoder->vm_tm_corr_dry_run)
+ memcpy((void *)decoder->buf + 1, &decoder->packet.payload, 7);
+
+ intel_pt_log("Translated VM TSC %#" PRIx64 " -> %#" PRIx64
+ " VMCS %#" PRIx64 " TSC Offset %#" PRIx64 "\n",
+ payload, decoder->packet.payload, vmcs_info->vmcs,
+ vmcs_info->tsc_offset);
+}
+
+static void intel_pt_translate_vm_tsc_offset(struct intel_pt_decoder *decoder,
+ uint64_t tsc_offset)
+{
+ struct intel_pt_vmcs_info vmcs_info = {
+ .vmcs = NO_VMCS,
+ .tsc_offset = tsc_offset
+ };
+
+ intel_pt_translate_vm_tsc(decoder, &vmcs_info);
+}
+
+static inline bool in_vm(uint64_t pip_payload)
+{
+ return pip_payload & 1;
+}
+
+static inline bool pip_in_vm(struct intel_pt_pkt *pip_packet)
+{
+ return pip_packet->payload & 1;
+}
+
+static void intel_pt_print_vmcs_info(struct intel_pt_vmcs_info *vmcs_info)
+{
+ p_log("VMCS: %#" PRIx64 " TSC Offset %#" PRIx64,
+ vmcs_info->vmcs, vmcs_info->tsc_offset);
+}
+
+static void intel_pt_vm_tm_corr_psb(struct intel_pt_decoder *decoder,
+ struct intel_pt_vm_tsc_info *data)
+{
+ memset(data, 0, sizeof(*data));
+ data->ctc_delta = decoder->ctc_delta;
+ data->last_ctc = decoder->last_ctc;
+ intel_pt_pkt_lookahead(decoder, intel_pt_vm_psb_lookahead_cb, data);
+ if (data->tsc && !data->psbend)
+ p_log("ERROR: PSB without PSBEND");
+ decoder->in_psb = data->psbend;
+}
+
+static void intel_pt_vm_tm_corr_first_tsc(struct intel_pt_decoder *decoder,
+ struct intel_pt_vm_tsc_info *data,
+ struct intel_pt_vmcs_info *vmcs_info,
+ uint64_t host_tsc)
+{
+ if (!decoder->in_psb) {
+ /* Can't happen */
+ p_log("ERROR: First TSC is not in PSB+");
+ }
+
+ if (data->pip) {
+ if (pip_in_vm(&data->pip_packet)) { /* Guest */
+ if (vmcs_info && vmcs_info->tsc_offset) {
+ intel_pt_translate_vm_tsc(decoder, vmcs_info);
+ decoder->vm_tm_corr_reliable = true;
+ } else {
+ p_log("ERROR: First TSC, unknown TSC Offset");
+ }
+ } else { /* Host */
+ decoder->vm_tm_corr_reliable = true;
+ }
+ } else { /* Host or Guest */
+ decoder->vm_tm_corr_reliable = false;
+ if (intel_pt_time_in_range(decoder, host_tsc)) {
+ /* Assume Host */
+ } else {
+ /* Assume Guest */
+ if (vmcs_info && vmcs_info->tsc_offset)
+ intel_pt_translate_vm_tsc(decoder, vmcs_info);
+ else
+ p_log("ERROR: First TSC, no PIP, unknown TSC Offset");
+ }
+ }
+}
+
+static void intel_pt_vm_tm_corr_tsc(struct intel_pt_decoder *decoder,
+ struct intel_pt_vm_tsc_info *data)
+{
+ struct intel_pt_vmcs_info *vmcs_info;
+ uint64_t tsc_offset = 0;
+ uint64_t vmcs;
+ bool reliable = true;
+ uint64_t expected_tsc;
+ uint64_t host_tsc;
+ uint64_t ref_timestamp;
+
+ bool assign = false;
+ bool assign_reliable = false;
+
+ /* Already have 'data' for the in_psb case */
+ if (!decoder->in_psb) {
+ memset(data, 0, sizeof(*data));
+ data->ctc_delta = decoder->ctc_delta;
+ data->last_ctc = decoder->last_ctc;
+ data->max_lookahead = 16;
+ intel_pt_pkt_lookahead(decoder, intel_pt_tma_lookahead_cb, data);
+ if (decoder->pge) {
+ data->pip = true;
+ data->pip_packet.payload = decoder->pip_payload;
+ }
+ }
+
+ /* Calculations depend on having TMA packets */
+ if (!data->tma) {
+ p_log("ERROR: TSC without TMA");
+ return;
+ }
+
+ vmcs = data->vmcs ? data->vmcs_packet.payload : decoder->vmcs;
+ if (vmcs == NO_VMCS)
+ vmcs = 0;
+
+ vmcs_info = decoder->findnew_vmcs_info(decoder->data, vmcs);
+
+ ref_timestamp = decoder->timestamp ? decoder->timestamp : decoder->buf_timestamp;
+ host_tsc = intel_pt_8b_tsc(decoder->packet.payload, ref_timestamp);
+
+ if (!decoder->ctc_timestamp) {
+ intel_pt_vm_tm_corr_first_tsc(decoder, data, vmcs_info, host_tsc);
+ return;
+ }
+
+ expected_tsc = intel_pt_expected_tsc(decoder, data);
+
+ tsc_offset = host_tsc - expected_tsc;
+
+ /* Determine if TSC is from Host or Guest */
+ if (data->pip) {
+ if (pip_in_vm(&data->pip_packet)) { /* Guest */
+ if (!vmcs_info) {
+ /* PIP NR=1 without VMCS cannot happen */
+ p_log("ERROR: Missing VMCS");
+ intel_pt_translate_vm_tsc_offset(decoder, tsc_offset);
+ decoder->vm_tm_corr_reliable = false;
+ return;
+ }
+ } else { /* Host */
+ decoder->last_reliable_timestamp = host_tsc;
+ decoder->vm_tm_corr_reliable = true;
+ return;
+ }
+ } else { /* Host or Guest */
+ reliable = false; /* Host/Guest is a guess, so not reliable */
+ if (decoder->in_psb) {
+ if (!tsc_offset)
+ return; /* Zero TSC Offset, assume Host */
+ /*
+ * TSC packet has only 7 bytes of TSC. We have no
+ * information about the Guest's 8th byte, but it
+ * doesn't matter because we only need 7 bytes.
+ * Here, since the 8th byte is unreliable and
+ * irrelevant, compare only 7 byes.
+ */
+ if (vmcs_info &&
+ (tsc_offset & SEVEN_BYTES) ==
+ (vmcs_info->tsc_offset & SEVEN_BYTES)) {
+ /* Same TSC Offset as last VMCS, assume Guest */
+ goto guest;
+ }
+ }
+ /*
+ * Check if the host_tsc is within the expected range.
+ * Note, we could narrow the range more by looking ahead for
+ * the next host TSC in the same buffer, but we don't bother to
+ * do that because this is probably good enough.
+ */
+ if (host_tsc >= expected_tsc && intel_pt_time_in_range(decoder, host_tsc)) {
+ /* Within expected range for Host TSC, assume Host */
+ decoder->vm_tm_corr_reliable = false;
+ return;
+ }
+ }
+
+guest: /* Assuming Guest */
+
+ /* Determine whether to assign TSC Offset */
+ if (vmcs_info && vmcs_info->vmcs) {
+ if (vmcs_info->tsc_offset && vmcs_info->reliable) {
+ assign = false;
+ } else if (decoder->in_psb && data->pip && decoder->vm_tm_corr_reliable &&
+ decoder->vm_tm_corr_continuous && decoder->vm_tm_corr_same_buf) {
+ /* Continuous tracing, TSC in a PSB is not a time loss */
+ assign = true;
+ assign_reliable = true;
+ } else if (decoder->in_psb && data->pip && decoder->vm_tm_corr_same_buf) {
+ /*
+ * Unlikely to be a time loss TSC in a PSB which is not
+ * at the start of a buffer.
+ */
+ assign = true;
+ assign_reliable = false;
+ }
+ }
+
+ /* Record VMCS TSC Offset */
+ if (assign && (vmcs_info->tsc_offset != tsc_offset ||
+ vmcs_info->reliable != assign_reliable)) {
+ bool print = vmcs_info->tsc_offset != tsc_offset;
+
+ vmcs_info->tsc_offset = tsc_offset;
+ vmcs_info->reliable = assign_reliable;
+ if (print)
+ intel_pt_print_vmcs_info(vmcs_info);
+ }
+
+ /* Determine what TSC Offset to use */
+ if (vmcs_info && vmcs_info->tsc_offset) {
+ if (!vmcs_info->reliable)
+ reliable = false;
+ intel_pt_translate_vm_tsc(decoder, vmcs_info);
+ } else {
+ reliable = false;
+ if (vmcs_info) {
+ if (!vmcs_info->error_printed) {
+ p_log("ERROR: Unknown TSC Offset for VMCS %#" PRIx64,
+ vmcs_info->vmcs);
+ vmcs_info->error_printed = true;
+ }
+ } else {
+ if (intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_UNK_VMCS))
+ p_log("ERROR: Unknown VMCS");
+ }
+ intel_pt_translate_vm_tsc_offset(decoder, tsc_offset);
+ }
+
+ decoder->vm_tm_corr_reliable = reliable;
+}
+
+static void intel_pt_vm_tm_corr_pebs_tsc(struct intel_pt_decoder *decoder)
+{
+ uint64_t host_tsc = decoder->packet.payload;
+ uint64_t guest_tsc = decoder->packet.payload;
+ struct intel_pt_vmcs_info *vmcs_info;
+ uint64_t vmcs;
+
+ vmcs = decoder->vmcs;
+ if (vmcs == NO_VMCS)
+ vmcs = 0;
+
+ vmcs_info = decoder->findnew_vmcs_info(decoder->data, vmcs);
+
+ if (decoder->pge) {
+ if (in_vm(decoder->pip_payload)) { /* Guest */
+ if (!vmcs_info) {
+ /* PIP NR=1 without VMCS cannot happen */
+ p_log("ERROR: Missing VMCS");
+ }
+ } else { /* Host */
+ return;
+ }
+ } else { /* Host or Guest */
+ if (intel_pt_time_in_range(decoder, host_tsc)) {
+ /* Within expected range for Host TSC, assume Host */
+ return;
+ }
+ }
+
+ if (vmcs_info) {
+ /* Translate Guest TSC to Host TSC */
+ host_tsc = ((guest_tsc & SEVEN_BYTES) - vmcs_info->tsc_offset) & SEVEN_BYTES;
+ host_tsc = intel_pt_8b_tsc(host_tsc, decoder->timestamp);
+ intel_pt_log("Translated VM TSC %#" PRIx64 " -> %#" PRIx64
+ " VMCS %#" PRIx64 " TSC Offset %#" PRIx64 "\n",
+ guest_tsc, host_tsc, vmcs_info->vmcs,
+ vmcs_info->tsc_offset);
+ if (!intel_pt_time_in_range(decoder, host_tsc) &&
+ intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_ERANGE))
+ p_log("Timestamp out of range");
+ } else {
+ if (intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_UNK_VMCS))
+ p_log("ERROR: Unknown VMCS");
+ host_tsc = decoder->timestamp;
+ }
+
+ decoder->packet.payload = host_tsc;
+
+ if (!decoder->vm_tm_corr_dry_run)
+ memcpy((void *)decoder->buf + 1, &host_tsc, 8);
+}
+
+static int intel_pt_vm_time_correlation(struct intel_pt_decoder *decoder)
+{
+ struct intel_pt_vm_tsc_info data = { .psbend = false };
+ bool pge;
+ int err;
+
+ if (decoder->in_psb)
+ intel_pt_vm_tm_corr_psb(decoder, &data);
+
+ while (1) {
+ err = intel_pt_get_next_packet(decoder);
+ if (err == -ENOLINK)
+ continue;
+ if (err)
+ break;
+
+ switch (decoder->packet.type) {
+ case INTEL_PT_TIP_PGD:
+ decoder->pge = false;
+ decoder->vm_tm_corr_continuous = false;
+ break;
+
+ case INTEL_PT_TNT:
+ case INTEL_PT_TIP:
+ case INTEL_PT_TIP_PGE:
+ decoder->pge = true;
+ break;
+
+ case INTEL_PT_OVF:
+ decoder->in_psb = false;
+ pge = decoder->pge;
+ decoder->pge = intel_pt_ovf_fup_lookahead(decoder);
+ if (pge != decoder->pge)
+ intel_pt_log("Surprising PGE change in OVF!");
+ if (!decoder->pge)
+ decoder->vm_tm_corr_continuous = false;
+ break;
+
+ case INTEL_PT_FUP:
+ if (decoder->in_psb)
+ decoder->pge = true;
+ break;
+
+ case INTEL_PT_TRACESTOP:
+ decoder->pge = false;
+ decoder->vm_tm_corr_continuous = false;
+ decoder->have_tma = false;
+ break;
+
+ case INTEL_PT_PSB:
+ intel_pt_vm_tm_corr_psb(decoder, &data);
+ break;
+
+ case INTEL_PT_PIP:
+ decoder->pip_payload = decoder->packet.payload;
+ break;
+
+ case INTEL_PT_MTC:
+ intel_pt_calc_mtc_timestamp(decoder);
+ break;
+
+ case INTEL_PT_TSC:
+ intel_pt_vm_tm_corr_tsc(decoder, &data);
+ intel_pt_calc_tsc_timestamp(decoder);
+ decoder->vm_tm_corr_same_buf = true;
+ decoder->vm_tm_corr_continuous = decoder->pge;
+ break;
+
+ case INTEL_PT_TMA:
+ intel_pt_calc_tma(decoder);
+ break;
+
+ case INTEL_PT_CYC:
+ intel_pt_calc_cyc_timestamp(decoder);
+ break;
+
+ case INTEL_PT_CBR:
+ intel_pt_calc_cbr(decoder);
+ break;
+
+ case INTEL_PT_PSBEND:
+ decoder->in_psb = false;
+ data.psbend = false;
+ break;
+
+ case INTEL_PT_VMCS:
+ if (decoder->packet.payload != NO_VMCS)
+ decoder->vmcs = decoder->packet.payload;
+ break;
+
+ case INTEL_PT_BBP:
+ decoder->blk_type = decoder->packet.payload;
+ break;
+
+ case INTEL_PT_BIP:
+ if (decoder->blk_type == INTEL_PT_PEBS_BASIC &&
+ decoder->packet.count == 2)
+ intel_pt_vm_tm_corr_pebs_tsc(decoder);
+ break;
+
+ case INTEL_PT_BEP:
+ case INTEL_PT_BEP_IP:
+ decoder->blk_type = 0;
+ break;
+
+ case INTEL_PT_MODE_EXEC:
+ case INTEL_PT_MODE_TSX:
+ case INTEL_PT_MNT:
+ case INTEL_PT_PAD:
+ case INTEL_PT_PTWRITE_IP:
+ case INTEL_PT_PTWRITE:
+ case INTEL_PT_MWAIT:
+ case INTEL_PT_PWRE:
+ case INTEL_PT_EXSTOP_IP:
+ case INTEL_PT_EXSTOP:
+ case INTEL_PT_PWRX:
+ case INTEL_PT_BAD: /* Does not happen */
+ default:
+ break;
+ }
+ }
+
+ return err;
+}
+
#define HOP_PROCESS 0
#define HOP_IGNORE 1
#define HOP_RETURN 2
@@ -2898,6 +3596,15 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
if (err)
return err;
+ if (decoder->vm_time_correlation) {
+ decoder->in_psb = true;
+ if (!decoder->timestamp)
+ decoder->timestamp = 1;
+ decoder->state.type = 0;
+ decoder->pkt_state = INTEL_PT_STATE_VM_TIME_CORRELATION;
+ return 0;
+ }
+
decoder->have_last_ip = true;
decoder->pkt_state = INTEL_PT_STATE_NO_IP;
@@ -2985,6 +3692,9 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
case INTEL_PT_STATE_RESAMPLE:
err = intel_pt_resample(decoder);
break;
+ case INTEL_PT_STATE_VM_TIME_CORRELATION:
+ err = intel_pt_vm_time_correlation(decoder);
+ break;
default:
err = intel_pt_bug(decoder);
break;
@@ -3231,6 +3941,7 @@ static unsigned char *adj_for_padding(unsigned char *buf_b,
* @len_b: size of second buffer
* @consecutive: returns true if there is data in buf_b that is consecutive
* to buf_a
+ * @ooo_tsc: out-of-order TSC due to VM TSC offset / scaling
*
* If the trace contains TSC we can look at the last TSC of @buf_a and the
* first TSC of @buf_b in order to determine if the buffers overlap, and then
@@ -3243,7 +3954,8 @@ static unsigned char *adj_for_padding(unsigned char *buf_b,
static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
size_t len_a,
unsigned char *buf_b,
- size_t len_b, bool *consecutive)
+ size_t len_b, bool *consecutive,
+ bool ooo_tsc)
{
uint64_t tsc_a, tsc_b;
unsigned char *p;
@@ -3278,7 +3990,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
start = buf_b + len_b - (rem_b - rem_a);
return adj_for_padding(start, buf_a, len_a);
}
- if (cmp < 0)
+ if (cmp < 0 && !ooo_tsc)
return buf_b; /* tsc_a < tsc_b => no overlap */
}
@@ -3296,6 +4008,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
* @have_tsc: can use TSC packets to detect overlap
* @consecutive: returns true if there is data in buf_b that is consecutive
* to buf_a
+ * @ooo_tsc: out-of-order TSC due to VM TSC offset / scaling
*
* When trace samples or snapshots are recorded there is the possibility that
* the data overlaps. Note that, for the purposes of decoding, data is only
@@ -3306,7 +4019,8 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
*/
unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
unsigned char *buf_b, size_t len_b,
- bool have_tsc, bool *consecutive)
+ bool have_tsc, bool *consecutive,
+ bool ooo_tsc)
{
unsigned char *found;
@@ -3319,7 +4033,7 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
if (have_tsc) {
found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
- consecutive);
+ consecutive, ooo_tsc);
if (found)
return found;
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index d9e62a7f6f0e..4b5e79fcf557 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -11,6 +11,8 @@
#include <stddef.h>
#include <stdbool.h>
+#include <linux/rbtree.h>
+
#include "intel-pt-insn-decoder.h"
#define INTEL_PT_IN_TX (1 << 0)
@@ -199,6 +201,14 @@ struct intel_pt_blk_items {
bool is_32_bit;
};
+struct intel_pt_vmcs_info {
+ struct rb_node rb_node;
+ uint64_t vmcs;
+ uint64_t tsc_offset;
+ bool reliable;
+ bool error_printed;
+};
+
struct intel_pt_state {
enum intel_pt_sample_type type;
bool from_nr;
@@ -244,9 +254,13 @@ struct intel_pt_params {
uint64_t max_insn_cnt, void *data);
bool (*pgd_ip)(uint64_t ip, void *data);
int (*lookahead)(void *data, intel_pt_lookahead_cb_t cb, void *cb_data);
+ struct intel_pt_vmcs_info *(*findnew_vmcs_info)(void *data, uint64_t vmcs);
void *data;
bool return_compression;
bool branch_enable;
+ bool vm_time_correlation;
+ bool vm_tm_corr_dry_run;
+ uint64_t first_timestamp;
uint64_t ctl;
uint64_t period;
enum intel_pt_period_type period_type;
@@ -256,6 +270,7 @@ struct intel_pt_params {
uint32_t tsc_ctc_ratio_d;
enum intel_pt_param_flags flags;
unsigned int quick;
+ int max_loops;
};
struct intel_pt_decoder;
@@ -269,8 +284,12 @@ int intel_pt_fast_forward(struct intel_pt_decoder *decoder, uint64_t timestamp);
unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
unsigned char *buf_b, size_t len_b,
- bool have_tsc, bool *consecutive);
+ bool have_tsc, bool *consecutive,
+ bool ooo_tsc);
int intel_pt__strerror(int code, char *buf, size_t buflen);
+void intel_pt_set_first_timestamp(struct intel_pt_decoder *decoder,
+ uint64_t first_timestamp);
+
#endif
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.h b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
index 388661f89c44..d900aab24b21 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
@@ -67,4 +67,9 @@ static inline void intel_pt_log_to(const char *msg, uint64_t u)
intel_pt_log("%s to " x64_fmt "\n", msg, u);
}
+#define intel_pt_log_var(var, fmt) intel_pt_log("%s: " #var " " fmt "\n", __func__, var)
+
+#define intel_pt_log_x32(var) intel_pt_log_var(var, "%#x")
+#define intel_pt_log_x64(var) intel_pt_log_var(var, "%#" PRIx64)
+
#endif
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 0dfec8761b9a..6f852b305e92 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -78,6 +78,7 @@ struct intel_pt {
u64 kernel_start;
u64 switch_ip;
u64 ptss_ip;
+ u64 first_timestamp;
struct perf_tsc_conversion tc;
bool cap_user_time_zero;
@@ -122,6 +123,7 @@ struct intel_pt {
u64 noretcomp_bit;
unsigned max_non_turbo_ratio;
unsigned cbr2khz;
+ int max_loops;
unsigned long num_events;
@@ -133,6 +135,9 @@ struct intel_pt {
struct ip_callchain *chain;
struct branch_stack *br_stack;
+
+ u64 dflt_tsc_offset;
+ struct rb_root vmcs_info;
};
enum switch_state {
@@ -271,6 +276,65 @@ static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
return !n || !perf_time__ranges_skip_sample(range, n, tm);
}
+static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root,
+ u64 vmcs,
+ u64 dflt_tsc_offset)
+{
+ struct rb_node **p = &rb_root->rb_node;
+ struct rb_node *parent = NULL;
+ struct intel_pt_vmcs_info *v;
+
+ while (*p) {
+ parent = *p;
+ v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node);
+
+ if (v->vmcs == vmcs)
+ return v;
+
+ if (vmcs < v->vmcs)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ v = zalloc(sizeof(*v));
+ if (v) {
+ v->vmcs = vmcs;
+ v->tsc_offset = dflt_tsc_offset;
+ v->reliable = dflt_tsc_offset;
+
+ rb_link_node(&v->rb_node, parent, p);
+ rb_insert_color(&v->rb_node, rb_root);
+ }
+
+ return v;
+}
+
+static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs)
+{
+ struct intel_pt_queue *ptq = data;
+ struct intel_pt *pt = ptq->pt;
+
+ if (!vmcs && !pt->dflt_tsc_offset)
+ return NULL;
+
+ return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset);
+}
+
+static void intel_pt_free_vmcs_info(struct intel_pt *pt)
+{
+ struct intel_pt_vmcs_info *v;
+ struct rb_node *n;
+
+ n = rb_first(&pt->vmcs_info);
+ while (n) {
+ v = rb_entry(n, struct intel_pt_vmcs_info, rb_node);
+ n = rb_next(n);
+ rb_erase(&v->rb_node, &pt->vmcs_info);
+ free(v);
+ }
+}
+
static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
struct auxtrace_buffer *b)
{
@@ -278,9 +342,17 @@ static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *
void *start;
start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
- pt->have_tsc, &consecutive);
+ pt->have_tsc, &consecutive,
+ pt->synth_opts.vm_time_correlation);
if (!start)
return -EINVAL;
+ /*
+ * In the case of vm_time_correlation, the overlap might contain TSC
+ * packets that will not be fixed, and that will then no longer work for
+ * overlap detection. Avoid that by zeroing out the overlap.
+ */
+ if (pt->synth_opts.vm_time_correlation)
+ memset(b->data, 0, start - b->data);
b->use_size = b->data + b->size - start;
b->use_data = start;
if (b->use_size && consecutive)
@@ -901,7 +973,7 @@ static bool intel_pt_timeless_decoding(struct intel_pt *pt)
bool timeless_decoding = true;
u64 config;
- if (!pt->tsc_bit || !pt->cap_user_time_zero)
+ if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding)
return true;
evlist__for_each_entry(pt->session->evlist, evsel) {
@@ -949,6 +1021,19 @@ static bool intel_pt_have_tsc(struct intel_pt *pt)
return have_tsc;
}
+static bool intel_pt_have_mtc(struct intel_pt *pt)
+{
+ struct evsel *evsel;
+ u64 config;
+
+ evlist__for_each_entry(pt->session->evlist, evsel) {
+ if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
+ (config & pt->mtc_bit))
+ return true;
+ }
+ return false;
+}
+
static bool intel_pt_sampling_mode(struct intel_pt *pt)
{
struct evsel *evsel;
@@ -1103,6 +1188,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
params.get_trace = intel_pt_get_trace;
params.walk_insn = intel_pt_walk_next_insn;
params.lookahead = intel_pt_lookahead;
+ params.findnew_vmcs_info = intel_pt_findnew_vmcs_info;
params.data = ptq;
params.return_compression = intel_pt_return_compression(pt);
params.branch_enable = intel_pt_branch_enable(pt);
@@ -1112,6 +1198,10 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
params.quick = pt->synth_opts.quick;
+ params.vm_time_correlation = pt->synth_opts.vm_time_correlation;
+ params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run;
+ params.first_timestamp = pt->first_timestamp;
+ params.max_loops = pt->max_loops;
if (pt->filts.cnt > 0)
params.pgd_ip = intel_pt_pgd_ip;
@@ -1176,6 +1266,21 @@ static void intel_pt_free_queue(void *priv)
free(ptq);
}
+static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
+{
+ unsigned int i;
+
+ pt->first_timestamp = timestamp;
+
+ for (i = 0; i < pt->queues.nr_queues; i++) {
+ struct auxtrace_queue *queue = &pt->queues.queue_array[i];
+ struct intel_pt_queue *ptq = queue->priv;
+
+ if (ptq && ptq->decoder)
+ intel_pt_set_first_timestamp(ptq->decoder, timestamp);
+ }
+}
+
static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
struct auxtrace_queue *queue)
{
@@ -2379,7 +2484,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
if (pt->per_cpu_mmaps &&
(pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
!pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
- !pt->sampling_mode) {
+ !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) {
pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
if (pt->switch_ip) {
intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
@@ -2878,6 +2983,8 @@ static int intel_pt_process_event(struct perf_session *session,
sample->time);
}
} else if (timestamp) {
+ if (!pt->first_timestamp)
+ intel_pt_first_timestamp(pt, timestamp);
err = intel_pt_process_queues(pt, timestamp);
}
if (err)
@@ -2964,6 +3071,7 @@ static void intel_pt_free(struct perf_session *session)
auxtrace_heap__free(&pt->heap);
intel_pt_free_events(session);
session->auxtrace = NULL;
+ intel_pt_free_vmcs_info(pt);
thread__put(pt->unknown_thread);
addr_filters__exit(&pt->filts);
zfree(&pt->chain);
@@ -3325,6 +3433,9 @@ static int intel_pt_perf_config(const char *var, const char *value, void *data)
if (!strcmp(var, "intel-pt.mispred-all"))
pt->mispred_all = perf_config_bool(var, value);
+ if (!strcmp(var, "intel-pt.max-loops"))
+ perf_config_int(&pt->max_loops, var, value);
+
return 0;
}
@@ -3407,6 +3518,65 @@ static int intel_pt_setup_time_ranges(struct intel_pt *pt,
return 0;
}
+static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
+{
+ struct intel_pt_vmcs_info *vmcs_info;
+ u64 tsc_offset, vmcs;
+ char *p = *args;
+
+ errno = 0;
+
+ p = skip_spaces(p);
+ if (!*p)
+ return 1;
+
+ tsc_offset = strtoull(p, &p, 0);
+ if (errno)
+ return -errno;
+ p = skip_spaces(p);
+ if (*p != ':') {
+ pt->dflt_tsc_offset = tsc_offset;
+ *args = p;
+ return 0;
+ }
+ while (1) {
+ vmcs = strtoull(p, &p, 0);
+ if (errno)
+ return -errno;
+ if (!vmcs)
+ return -EINVAL;
+ vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset);
+ if (!vmcs_info)
+ return -ENOMEM;
+ p = skip_spaces(p);
+ if (*p != ',')
+ break;
+ p += 1;
+ }
+ *args = p;
+ return 0;
+}
+
+static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt)
+{
+ char *args = pt->synth_opts.vm_tm_corr_args;
+ int ret;
+
+ if (!args)
+ return 0;
+
+ do {
+ ret = intel_pt_parse_vm_tm_corr_arg(pt, &args);
+ } while (!ret);
+
+ if (ret < 0) {
+ pr_err("Failed to parse VM Time Correlation options\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static const char * const intel_pt_info_fmts[] = {
[INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
[INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
@@ -3469,6 +3639,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
if (!pt)
return -ENOMEM;
+ pt->vmcs_info = RB_ROOT;
+
addr_filters__init(&pt->filts);
err = perf_config(intel_pt_perf_config, pt);
@@ -3481,6 +3653,20 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
intel_pt_log_set_name(INTEL_PT_PMU_NAME);
+ if (session->itrace_synth_opts->set) {
+ pt->synth_opts = *session->itrace_synth_opts;
+ } else {
+ struct itrace_synth_opts *opts = session->itrace_synth_opts;
+
+ itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample);
+ if (!opts->default_no_sample && !opts->inject) {
+ pt->synth_opts.branches = false;
+ pt->synth_opts.callchain = true;
+ pt->synth_opts.add_callchain = true;
+ }
+ pt->synth_opts.thread_stack = opts->thread_stack;
+ }
+
pt->session = session;
pt->machine = &session->machines.host; /* No kvm support */
pt->auxtrace_type = auxtrace_info->type;
@@ -3562,6 +3748,28 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
pt->sampling_mode = intel_pt_sampling_mode(pt);
pt->est_tsc = !pt->timeless_decoding;
+ if (pt->synth_opts.vm_time_correlation) {
+ if (pt->timeless_decoding) {
+ pr_err("Intel PT has no time information for VM Time Correlation\n");
+ err = -EINVAL;
+ goto err_free_queues;
+ }
+ if (session->itrace_synth_opts->ptime_range) {
+ pr_err("Time ranges cannot be specified with VM Time Correlation\n");
+ err = -EINVAL;
+ goto err_free_queues;
+ }
+ /* Currently TSC Offset is calculated using MTC packets */
+ if (!intel_pt_have_mtc(pt)) {
+ pr_err("MTC packets must have been enabled for VM Time Correlation\n");
+ err = -EINVAL;
+ goto err_free_queues;
+ }
+ err = intel_pt_parse_vm_tm_corr_args(pt);
+ if (err)
+ goto err_free_queues;
+ }
+
pt->unknown_thread = thread__new(999999999, 999999999);
if (!pt->unknown_thread) {
err = -ENOMEM;
@@ -3611,21 +3819,6 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
goto err_delete_thread;
}
- if (session->itrace_synth_opts->set) {
- pt->synth_opts = *session->itrace_synth_opts;
- } else {
- itrace_synth_opts__set_default(&pt->synth_opts,
- session->itrace_synth_opts->default_no_sample);
- if (!session->itrace_synth_opts->default_no_sample &&
- !session->itrace_synth_opts->inject) {
- pt->synth_opts.branches = false;
- pt->synth_opts.callchain = true;
- pt->synth_opts.add_callchain = true;
- }
- pt->synth_opts.thread_stack =
- session->itrace_synth_opts->thread_stack;
- }
-
if (pt->synth_opts.log)
intel_pt_log_enable();
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 3ceaf7ef3301..cbd9b268f168 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -504,6 +504,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
goto errout;
}
+ err = -ENOMEM;
if (asprintf(&pipe_template, "%s -emit-llvm | %s -march=bpf %s -filetype=obj -o -",
template, llc_path, opts) < 0) {
pr_err("ERROR:\tnot enough memory to setup command line\n");
@@ -524,6 +525,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
pr_debug("llvm compiling command template: %s\n", template);
+ err = -ENOMEM;
if (asprintf(&command_echo, "echo -n \"%s\"", template) < 0)
goto errout;
diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c
index 39062df02629..51424cdc3b68 100644
--- a/tools/perf/util/lzma.c
+++ b/tools/perf/util/lzma.c
@@ -69,7 +69,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
if (ferror(infile)) {
pr_err("lzma: read error: %s\n", strerror(errno));
- goto err_fclose;
+ goto err_lzma_end;
}
if (feof(infile))
@@ -83,7 +83,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
if (writen(output_fd, buf_out, write_size) != write_size) {
pr_err("lzma: write error: %s\n", strerror(errno));
- goto err_fclose;
+ goto err_lzma_end;
}
strm.next_out = buf_out;
@@ -95,11 +95,13 @@ int lzma_decompress_to_file(const char *input, int output_fd)
break;
pr_err("lzma: failed %s\n", lzma_strerror(ret));
- goto err_fclose;
+ goto err_lzma_end;
}
}
err = 0;
+err_lzma_end:
+ lzma_end(&strm);
err_fclose:
fclose(infile);
return err;
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index f93a852ad838..f0e75df72b80 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -12,6 +12,8 @@
#include "mem-events.h"
#include "debug.h"
#include "symbol.h"
+#include "pmu.h"
+#include "pmu-hybrid.h"
unsigned int perf_mem_events__loads_ldlat = 30;
@@ -24,8 +26,6 @@ static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
};
#undef E
-#undef E
-
static char mem_loads_name[100];
static bool mem_loads_name__init;
@@ -37,7 +37,7 @@ struct perf_mem_event * __weak perf_mem_events__ptr(int i)
return &perf_mem_events[i];
}
-char * __weak perf_mem_events__name(int i)
+char * __weak perf_mem_events__name(int i, char *pmu_name __maybe_unused)
{
struct perf_mem_event *e = perf_mem_events__ptr(i);
@@ -100,6 +100,15 @@ int perf_mem_events__parse(const char *str)
return -1;
}
+static bool perf_mem_event__supported(const char *mnt, char *sysfs_name)
+{
+ char path[PATH_MAX];
+ struct stat st;
+
+ scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
+ return !stat(path, &st);
+}
+
int perf_mem_events__init(void)
{
const char *mnt = sysfs__mount();
@@ -110,9 +119,9 @@ int perf_mem_events__init(void)
return -ENOENT;
for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
- char path[PATH_MAX];
struct perf_mem_event *e = perf_mem_events__ptr(j);
- struct stat st;
+ struct perf_pmu *pmu;
+ char sysfs_name[100];
/*
* If the event entry isn't valid, skip initialization
@@ -121,11 +130,20 @@ int perf_mem_events__init(void)
if (!e->tag)
continue;
- scnprintf(path, PATH_MAX, "%s/devices/%s",
- mnt, e->sysfs_name);
+ if (!perf_pmu__has_hybrid()) {
+ scnprintf(sysfs_name, sizeof(sysfs_name),
+ e->sysfs_name, "cpu");
+ e->supported = perf_mem_event__supported(mnt, sysfs_name);
+ } else {
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ scnprintf(sysfs_name, sizeof(sysfs_name),
+ e->sysfs_name, pmu->name);
+ e->supported |= perf_mem_event__supported(mnt, sysfs_name);
+ }
+ }
- if (!stat(path, &st))
- e->supported = found = true;
+ if (e->supported)
+ found = true;
}
return found ? 0 : -ENOENT;
@@ -141,11 +159,76 @@ void perf_mem_events__list(void)
fprintf(stderr, "%-13s%-*s%s\n",
e->tag ?: "",
verbose > 0 ? 25 : 0,
- verbose > 0 ? perf_mem_events__name(j) : "",
+ verbose > 0 ? perf_mem_events__name(j, NULL) : "",
e->supported ? ": available" : "");
}
}
+static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
+ int idx)
+{
+ const char *mnt = sysfs__mount();
+ char sysfs_name[100];
+ struct perf_pmu *pmu;
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
+ pmu->name);
+ if (!perf_mem_event__supported(mnt, sysfs_name)) {
+ pr_err("failed: event '%s' not supported\n",
+ perf_mem_events__name(idx, pmu->name));
+ }
+ }
+}
+
+int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
+ char **rec_tmp, int *tmp_nr)
+{
+ int i = *argv_nr, k = 0;
+ struct perf_mem_event *e;
+ struct perf_pmu *pmu;
+ char *s;
+
+ for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
+ e = perf_mem_events__ptr(j);
+ if (!e->record)
+ continue;
+
+ if (!perf_pmu__has_hybrid()) {
+ if (!e->supported) {
+ pr_err("failed: event '%s' not supported\n",
+ perf_mem_events__name(j, NULL));
+ return -1;
+ }
+
+ rec_argv[i++] = "-e";
+ rec_argv[i++] = perf_mem_events__name(j, NULL);
+ } else {
+ if (!e->supported) {
+ perf_mem_events__print_unsupport_hybrid(e, j);
+ return -1;
+ }
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ rec_argv[i++] = "-e";
+ s = perf_mem_events__name(j, pmu->name);
+ if (s) {
+ s = strdup(s);
+ if (!s)
+ return -1;
+
+ rec_argv[i++] = s;
+ rec_tmp[k++] = s;
+ }
+ }
+ }
+ }
+
+ *argv_nr = i;
+ *tmp_nr = k;
+ return 0;
+}
+
static const char * const tlb_access[] = {
"N/A",
"HIT",
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index cacdebd65b8a..916242f8020a 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -38,11 +38,13 @@ extern unsigned int perf_mem_events__loads_ldlat;
int perf_mem_events__parse(const char *str);
int perf_mem_events__init(void);
-char *perf_mem_events__name(int i);
+char *perf_mem_events__name(int i, char *pmu_name);
struct perf_mem_event *perf_mem_events__ptr(int i);
bool is_mem_loads_aux_event(struct evsel *leader);
void perf_mem_events__list(void);
+int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
+ char **rec_tmp, int *tmp_nr);
int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index d3cf2dee36c8..99d047c5ead0 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -219,9 +219,9 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
if (has_constraint && ev->weak_group)
continue;
/* Ignore event if already used and merging is disabled. */
- if (metric_no_merge && test_bit(ev->idx, evlist_used))
+ if (metric_no_merge && test_bit(ev->core.idx, evlist_used))
continue;
- if (!has_constraint && ev->leader != current_leader) {
+ if (!has_constraint && !evsel__has_leader(ev, current_leader)) {
/*
* Start of a new group, discard the whole match and
* start again.
@@ -229,7 +229,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
matched_events = 0;
memset(metric_events, 0,
sizeof(struct evsel *) * idnum);
- current_leader = ev->leader;
+ current_leader = evsel__leader(ev);
}
/*
* Check for duplicate events with the same name. For example,
@@ -269,7 +269,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
for (i = 0; i < idnum; i++) {
ev = metric_events[i];
/* Don't free the used events. */
- set_bit(ev->idx, evlist_used);
+ set_bit(ev->core.idx, evlist_used);
/*
* The metric leader points to the identically named event in
* metric_events.
@@ -287,11 +287,11 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
* when then group is left.
*/
if (!has_constraint &&
- ev->leader != metric_events[i]->leader &&
- evsel_same_pmu_or_none(ev->leader, metric_events[i]->leader))
+ ev->core.leader != metric_events[i]->core.leader &&
+ evsel_same_pmu_or_none(evsel__leader(ev), evsel__leader(metric_events[i])))
break;
if (!strcmp(metric_events[i]->name, ev->name)) {
- set_bit(ev->idx, evlist_used);
+ set_bit(ev->core.idx, evlist_used);
ev->metric_leader = metric_events[i];
}
}
@@ -391,7 +391,7 @@ static int metricgroup__setup_events(struct list_head *groups,
}
evlist__for_each_entry_safe(perf_evlist, tmp, evsel) {
- if (!test_bit(evsel->idx, evlist_used)) {
+ if (!test_bit(evsel->core.idx, evlist_used)) {
evlist__remove(perf_evlist, evsel);
evsel__delete(evsel);
}
@@ -1312,7 +1312,7 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
nd = rblist__entry(old_metric_events, i);
old_me = container_of(nd, struct metric_event, nd);
- evsel = evlist__find_evsel(evlist, old_me->evsel->idx);
+ evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
if (!evsel)
return -EINVAL;
new_me = metricgroup__lookup(new_metric_events, evsel, true);
@@ -1320,7 +1320,7 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
return -ENOMEM;
pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
- cgrp ? cgrp->name : "root", evsel->name, evsel->idx);
+ cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
list_for_each_entry(old_expr, &old_me->head, nd) {
new_expr = malloc(sizeof(*new_expr));
@@ -1363,7 +1363,7 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
/* copy evsel in the same position */
for (idx = 0; idx < nr; idx++) {
evsel = old_expr->metric_events[idx];
- evsel = evlist__find_evsel(evlist, evsel->idx);
+ evsel = evlist__find_evsel(evlist, evsel->core.idx);
if (evsel == NULL) {
free(new_expr->metric_events);
free(new_expr->metric_refs);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 84108c17f48d..e5eae23cfceb 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1740,7 +1740,7 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list,
leader = list_first_entry(list, struct evsel, core.node);
evsel = list_last_entry(list, struct evsel, core.node);
- total_members = evsel->idx - leader->idx + 1;
+ total_members = evsel->core.idx - leader->core.idx + 1;
leaders = calloc(total_members, sizeof(uintptr_t));
if (WARN_ON(!leaders))
@@ -1800,7 +1800,7 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list,
__evlist__for_each_entry(list, evsel) {
if (i >= nr_pmu)
i = 0;
- evsel->leader = (struct evsel *) leaders[i++];
+ evsel__set_leader(evsel, (struct evsel *) leaders[i++]);
}
/* The number of members and group name are same for each group */
@@ -1833,7 +1833,7 @@ void parse_events__set_leader(char *name, struct list_head *list,
if (parse_events__set_leader_for_uncore_aliase(name, list, parse_state))
return;
- __evlist__set_leader(list);
+ __perf_evlist__set_leader(list);
leader = list_entry(list->next, struct evsel, core.node);
leader->group_name = name ? strdup(name) : NULL;
}
@@ -2285,7 +2285,7 @@ int __parse_events(struct evlist *evlist, const char *str,
if (!ret) {
struct evsel *last;
- evlist->nr_groups += parse_state.nr_groups;
+ evlist->core.nr_groups += parse_state.nr_groups;
last = evlist__last(evlist);
last->cmdline_group_boundary = true;
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index aba12a4d488e..9321bd0e2f76 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -316,7 +316,7 @@ event_pmu_name opt_pmu_config
if (!strncmp(name, "uncore_", 7) &&
strncmp($1, "uncore_", 7))
name += 7;
- if (!fnmatch(pattern, name, 0)) {
+ if (!perf_pmu__match(pattern, name, $1)) {
if (parse_events_copy_term_list(orig_terms, &terms))
CLEANUP_YYABORT;
if (!parse_events_add_pmu(_parse_state, list, pmu->name, terms, true, false))
diff --git a/tools/perf/util/perf_dlfilter.h b/tools/perf/util/perf_dlfilter.h
new file mode 100644
index 000000000000..3eef03d661b4
--- /dev/null
+++ b/tools/perf/util/perf_dlfilter.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * perf_dlfilter.h: API for perf --dlfilter shared object
+ * Copyright (c) 2021, Intel Corporation.
+ */
+#ifndef _LINUX_PERF_DLFILTER_H
+#define _LINUX_PERF_DLFILTER_H
+
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+/* Definitions for perf_dlfilter_sample flags */
+enum {
+ PERF_DLFILTER_FLAG_BRANCH = 1ULL << 0,
+ PERF_DLFILTER_FLAG_CALL = 1ULL << 1,
+ PERF_DLFILTER_FLAG_RETURN = 1ULL << 2,
+ PERF_DLFILTER_FLAG_CONDITIONAL = 1ULL << 3,
+ PERF_DLFILTER_FLAG_SYSCALLRET = 1ULL << 4,
+ PERF_DLFILTER_FLAG_ASYNC = 1ULL << 5,
+ PERF_DLFILTER_FLAG_INTERRUPT = 1ULL << 6,
+ PERF_DLFILTER_FLAG_TX_ABORT = 1ULL << 7,
+ PERF_DLFILTER_FLAG_TRACE_BEGIN = 1ULL << 8,
+ PERF_DLFILTER_FLAG_TRACE_END = 1ULL << 9,
+ PERF_DLFILTER_FLAG_IN_TX = 1ULL << 10,
+ PERF_DLFILTER_FLAG_VMENTRY = 1ULL << 11,
+ PERF_DLFILTER_FLAG_VMEXIT = 1ULL << 12,
+};
+
+/*
+ * perf sample event information (as per perf script and <linux/perf_event.h>)
+ */
+struct perf_dlfilter_sample {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u16 ins_lat; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u16 p_stage_cyc; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 ip;
+ __s32 pid;
+ __s32 tid;
+ __u64 time;
+ __u64 addr;
+ __u64 id;
+ __u64 stream_id;
+ __u64 period;
+ __u64 weight; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 transaction; /* Refer PERF_SAMPLE_TRANSACTION in <linux/perf_event.h> */
+ __u64 insn_cnt; /* For instructions-per-cycle (IPC) */
+ __u64 cyc_cnt; /* For instructions-per-cycle (IPC) */
+ __s32 cpu;
+ __u32 flags; /* Refer PERF_DLFILTER_FLAG_* above */
+ __u64 data_src; /* Refer PERF_SAMPLE_DATA_SRC in <linux/perf_event.h> */
+ __u64 phys_addr; /* Refer PERF_SAMPLE_PHYS_ADDR in <linux/perf_event.h> */
+ __u64 data_page_size; /* Refer PERF_SAMPLE_DATA_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 code_page_size; /* Refer PERF_SAMPLE_CODE_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 cgroup; /* Refer PERF_SAMPLE_CGROUP in <linux/perf_event.h> */
+ __u8 cpumode; /* Refer CPUMODE_MASK etc in <linux/perf_event.h> */
+ __u8 addr_correlates_sym; /* True => resolve_addr() can be called */
+ __u16 misc; /* Refer perf_event_header in <linux/perf_event.h> */
+ __u32 raw_size; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ const void *raw_data; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ __u64 brstack_nr; /* Number of brstack entries */
+ const struct perf_branch_entry *brstack; /* Refer <linux/perf_event.h> */
+ __u64 raw_callchain_nr; /* Number of raw_callchain entries */
+ const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
+ const char *event;
+};
+
+/*
+ * Address location (as per perf script)
+ */
+struct perf_dlfilter_al {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u32 symoff;
+ const char *sym;
+ __u64 addr; /* Mapped address (from dso) */
+ __u64 sym_start;
+ __u64 sym_end;
+ const char *dso;
+ __u8 sym_binding; /* STB_LOCAL, STB_GLOBAL or STB_WEAK, refer <elf.h> */
+ __u8 is_64_bit; /* Only valid if dso is not NULL */
+ __u8 is_kernel_ip; /* True if in kernel space */
+ __u32 buildid_size;
+ __u8 *buildid;
+ /* Below members are only populated by resolve_ip() */
+ __u8 filtered; /* True if this sample event will be filtered out */
+ const char *comm;
+};
+
+struct perf_dlfilter_fns {
+ /* Return information about ip */
+ const struct perf_dlfilter_al *(*resolve_ip)(void *ctx);
+ /* Return information about addr (if addr_correlates_sym) */
+ const struct perf_dlfilter_al *(*resolve_addr)(void *ctx);
+ /* Return arguments from --dlarg option */
+ char **(*args)(void *ctx, int *dlargc);
+ /*
+ * Return information about address (al->size must be set before
+ * calling). Returns 0 on success, -1 otherwise.
+ */
+ __s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
+ /* Return instruction bytes and length */
+ const __u8 *(*insn)(void *ctx, __u32 *length);
+ /* Return source file name and line number */
+ const char *(*srcline)(void *ctx, __u32 *line_number);
+ /* Return perf_event_attr, refer <linux/perf_event.h> */
+ struct perf_event_attr *(*attr)(void *ctx);
+ /* Read object code, return numbers of bytes read */
+ __s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
+ /* Reserved */
+ void *(*reserved[120])(void *);
+};
+
+/*
+ * If implemented, 'start' will be called at the beginning,
+ * before any calls to 'filter_event'. Return 0 to indicate success,
+ * or return a negative error code. '*data' can be assigned for use
+ * by other functions. 'ctx' is needed for calls to perf_dlfilter_fns,
+ * but most perf_dlfilter_fns are not valid when called from 'start'.
+ */
+int start(void **data, void *ctx);
+
+/*
+ * If implemented, 'stop' will be called at the end,
+ * after any calls to 'filter_event'. Return 0 to indicate success, or
+ * return a negative error code. 'data' is set by start(). 'ctx' is
+ * needed for calls to perf_dlfilter_fns, but most perf_dlfilter_fns
+ * are not valid when called from 'stop'.
+ */
+int stop(void *data, void *ctx);
+
+/*
+ * If implemented, 'filter_event' will be called for each sample
+ * event. Return 0 to keep the sample event, 1 to filter it out, or
+ * return a negative error code. 'data' is set by start(). 'ctx' is
+ * needed for calls to perf_dlfilter_fns.
+ */
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx);
+
+/*
+ * The same as 'filter_event' except it is called before internal
+ * filtering.
+ */
+int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx);
+
+/*
+ * If implemented, return a one-line description of the filter, and optionally
+ * a longer description.
+ */
+const char *filter_description(const char **long_description);
+
+#endif
diff --git a/tools/perf/util/pfm.c b/tools/perf/util/pfm.c
index 6eef6dfeaa57..756295dedccc 100644
--- a/tools/perf/util/pfm.c
+++ b/tools/perf/util/pfm.c
@@ -99,7 +99,7 @@ int parse_libpfm_events_option(const struct option *opt, const char *str,
grp_leader = evsel;
if (grp_evt > -1) {
- evsel->leader = grp_leader;
+ evsel__set_leader(evsel, grp_leader);
grp_leader->core.nr_members++;
grp_evt++;
}
@@ -110,7 +110,7 @@ int parse_libpfm_events_option(const struct option *opt, const char *str,
"cannot close a non-existing event group\n");
goto error;
}
- evlist->nr_groups++;
+ evlist->core.nr_groups++;
grp_leader = NULL;
grp_evt = -1;
}
diff --git a/tools/perf/util/pmu-hybrid.h b/tools/perf/util/pmu-hybrid.h
index d0fa7bc50a76..2b186c26a43e 100644
--- a/tools/perf/util/pmu-hybrid.h
+++ b/tools/perf/util/pmu-hybrid.h
@@ -19,4 +19,15 @@ struct perf_pmu *perf_pmu__find_hybrid_pmu(const char *name);
bool perf_pmu__is_hybrid(const char *name);
char *perf_pmu__hybrid_type_to_pmu(const char *type);
+static inline int perf_pmu__hybrid_pmu_num(void)
+{
+ struct perf_pmu *pmu;
+ int num = 0;
+
+ perf_pmu__for_each_hybrid_pmu(pmu)
+ num++;
+
+ return num;
+}
+
#endif /* __PMU_HYBRID_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 88c8ecdc60b0..fc683bc41715 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -3,6 +3,7 @@
#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/zalloc.h>
+#include <linux/ctype.h>
#include <subcmd/pager.h>
#include <sys/types.h>
#include <errno.h>
@@ -17,6 +18,7 @@
#include <locale.h>
#include <regex.h>
#include <perf/cpumap.h>
+#include <fnmatch.h>
#include "debug.h"
#include "evsel.h"
#include "pmu.h"
@@ -740,6 +742,35 @@ struct pmu_events_map *__weak pmu_events_map__find(void)
return perf_pmu__find_map(NULL);
}
+/*
+ * Suffix must be in form tok_{digits}, or tok{digits}, or same as pmu_name
+ * to be valid.
+ */
+static bool perf_pmu__valid_suffix(const char *pmu_name, char *tok)
+{
+ const char *p;
+
+ if (strncmp(pmu_name, tok, strlen(tok)))
+ return false;
+
+ p = pmu_name + strlen(tok);
+ if (*p == 0)
+ return true;
+
+ if (*p == '_')
+ ++p;
+
+ /* Ensure we end in a number */
+ while (1) {
+ if (!isdigit(*p))
+ return false;
+ if (*(++p) == 0)
+ break;
+ }
+
+ return true;
+}
+
bool pmu_uncore_alias_match(const char *pmu_name, const char *name)
{
char *tmp = NULL, *tok, *str;
@@ -766,12 +797,19 @@ bool pmu_uncore_alias_match(const char *pmu_name, const char *name)
* match "socket" in "socketX_pmunameY" and then "pmuname" in
* "pmunameY".
*/
- for (; tok; name += strlen(tok), tok = strtok_r(NULL, ",", &tmp)) {
+ while (1) {
+ char *next_tok = strtok_r(NULL, ",", &tmp);
+
name = strstr(name, tok);
- if (!name) {
+ if (!name ||
+ (!next_tok && !perf_pmu__valid_suffix(name, tok))) {
res = false;
goto out;
}
+ if (!next_tok)
+ break;
+ tok = next_tok;
+ name += strlen(tok);
}
res = true;
@@ -927,6 +965,13 @@ static struct perf_pmu *pmu_lookup(const char *name)
LIST_HEAD(format);
LIST_HEAD(aliases);
__u32 type;
+ bool is_hybrid = perf_pmu__hybrid_mounted(name);
+
+ /*
+ * Check pmu name for hybrid and the pmu may be invalid in sysfs
+ */
+ if (!strncmp(name, "cpu_", 4) && !is_hybrid)
+ return NULL;
/*
* The pmu data we store & need consists of the pmu
@@ -955,7 +1000,7 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->is_uncore = pmu_is_uncore(name);
if (pmu->is_uncore)
pmu->id = pmu_id(name);
- pmu->is_hybrid = perf_pmu__hybrid_mounted(name);
+ pmu->is_hybrid = is_hybrid;
pmu->max_precise = pmu_max_precise(name);
pmu_add_cpu_aliases(&aliases, pmu);
pmu_add_sys_aliases(&aliases, pmu);
@@ -1872,3 +1917,14 @@ bool perf_pmu__has_hybrid(void)
return !list_empty(&perf_pmu__hybrid_pmus);
}
+
+int perf_pmu__match(char *pattern, char *name, char *tok)
+{
+ if (fnmatch(pattern, name, 0))
+ return -1;
+
+ if (tok && !perf_pmu__valid_suffix(name, tok))
+ return -1;
+
+ return 0;
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index a790ef758171..926da483a141 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -133,5 +133,6 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
char *name);
bool perf_pmu__has_hybrid(void);
+int perf_pmu__match(char *pattern, char *name, char *tok);
#endif /* __PMU_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index a78c8d59a555..b2a02c9ab8ea 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -108,7 +108,6 @@ void exit_probe_symbol_maps(void)
static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap)
{
- /* kmap->ref_reloc_sym should be set if host_machine is initialized */
struct kmap *kmap;
struct map *map = machine__kernel_map(host_machine);
@@ -180,8 +179,10 @@ struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user)
struct map *map;
map = dso__new_map(target);
- if (map && map->dso)
+ if (map && map->dso) {
+ nsinfo__put(map->dso->nsinfo);
map->dso->nsinfo = nsinfo__get(nsi);
+ }
return map;
} else {
return kernel_get_module_map(target);
@@ -238,8 +239,8 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
clear_probe_trace_event(tevs + i);
}
-static bool kprobe_blacklist__listed(unsigned long address);
-static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
+static bool kprobe_blacklist__listed(u64 address);
+static bool kprobe_warn_out_range(const char *symbol, u64 address)
{
struct map *map;
bool ret = false;
@@ -399,8 +400,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
pr_debug("Symbol %s address found : %" PRIx64 "\n",
pp->function, address);
- ret = debuginfo__find_probe_point(dinfo, (unsigned long)address,
- result);
+ ret = debuginfo__find_probe_point(dinfo, address, result);
if (ret <= 0)
ret = (!ret) ? -ENOENT : ret;
else {
@@ -588,7 +588,7 @@ static void debuginfo_cache__exit(void)
}
-static int get_text_start_address(const char *exec, unsigned long *address,
+static int get_text_start_address(const char *exec, u64 *address,
struct nsinfo *nsi)
{
Elf *elf;
@@ -633,7 +633,7 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
bool is_kprobe)
{
struct debuginfo *dinfo = NULL;
- unsigned long stext = 0;
+ u64 stext = 0;
u64 addr = tp->address;
int ret = -ENOENT;
@@ -661,8 +661,7 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
dinfo = debuginfo_cache__open(tp->module, verbose <= 0);
if (dinfo)
- ret = debuginfo__find_probe_point(dinfo,
- (unsigned long)addr, pp);
+ ret = debuginfo__find_probe_point(dinfo, addr, pp);
else
ret = -ENOENT;
@@ -677,14 +676,19 @@ error:
/* Adjust symbol name and address */
static int post_process_probe_trace_point(struct probe_trace_point *tp,
- struct map *map, unsigned long offs)
+ struct map *map, u64 offs)
{
struct symbol *sym;
u64 addr = tp->address - offs;
sym = map__find_symbol(map, addr);
- if (!sym)
- return -ENOENT;
+ if (!sym) {
+ /*
+ * If the address is in the inittext section, map can not
+ * find it. Ignore it if we are probing offline kernel.
+ */
+ return (symbol_conf.ignore_vmlinux_buildid) ? 0 : -ENOENT;
+ }
if (strcmp(sym->name, tp->symbol)) {
/* If we have no realname, use symbol for it */
@@ -715,7 +719,7 @@ post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
int ntevs, const char *pathname)
{
struct map *map;
- unsigned long stext = 0;
+ u64 stext = 0;
int i, ret = 0;
/* Prepare a map for offline binary */
@@ -741,7 +745,7 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
struct nsinfo *nsi)
{
int i, ret = 0;
- unsigned long stext = 0;
+ u64 stext = 0;
if (!exec)
return 0;
@@ -786,7 +790,7 @@ post_process_module_probe_trace_events(struct probe_trace_event *tevs,
mod_name = find_module_name(module);
for (i = 0; i < ntevs; i++) {
ret = post_process_probe_trace_point(&tevs[i].point,
- map, (unsigned long)text_offs);
+ map, text_offs);
if (ret < 0)
break;
tevs[i].point.module =
@@ -819,7 +823,10 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
reloc_sym = kernel_get_ref_reloc_sym(&map);
if (!reloc_sym) {
- pr_warning("Relocated base symbol is not found!\n");
+ pr_warning("Relocated base symbol is not found! "
+ "Check /proc/sys/kernel/kptr_restrict\n"
+ "and /proc/sys/kernel/perf_event_paranoid. "
+ "Or run as privileged perf user.\n\n");
return -EINVAL;
}
@@ -1527,7 +1534,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
* so tmp[1] should always valid (but could be '\0').
*/
if (tmp && !strncmp(tmp, "0x", 2)) {
- pp->abs_address = strtoul(pp->function, &tmp, 0);
+ pp->abs_address = strtoull(pp->function, &tmp, 0);
if (*tmp != '\0') {
semantic_error("Invalid absolute address.\n");
return -EINVAL;
@@ -1902,7 +1909,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev)
argv[i] = NULL;
argc -= 1;
} else
- tp->address = strtoul(fmt1_str, NULL, 0);
+ tp->address = strtoull(fmt1_str, NULL, 0);
} else {
/* Only the symbol-based probe has offset */
tp->symbol = strdup(fmt1_str);
@@ -2120,69 +2127,81 @@ static int synthesize_probe_trace_arg(struct probe_trace_arg *arg,
}
static int
-synthesize_uprobe_trace_def(struct probe_trace_event *tev, struct strbuf *buf)
+synthesize_probe_trace_args(struct probe_trace_event *tev, struct strbuf *buf)
{
- struct probe_trace_point *tp = &tev->point;
- int err;
+ int i, ret = 0;
- err = strbuf_addf(buf, "%s:0x%lx", tp->module, tp->address);
+ for (i = 0; i < tev->nargs && ret >= 0; i++)
+ ret = synthesize_probe_trace_arg(&tev->args[i], buf);
- if (err >= 0 && tp->ref_ctr_offset) {
- if (!uprobe_ref_ctr_is_supported())
- return -1;
- err = strbuf_addf(buf, "(0x%lx)", tp->ref_ctr_offset);
- }
- return err >= 0 ? 0 : -1;
+ return ret;
}
-char *synthesize_probe_trace_command(struct probe_trace_event *tev)
+static int
+synthesize_uprobe_trace_def(struct probe_trace_point *tp, struct strbuf *buf)
{
- struct probe_trace_point *tp = &tev->point;
- struct strbuf buf;
- char *ret = NULL;
- int i, err;
+ int err;
/* Uprobes must have tp->module */
- if (tev->uprobes && !tp->module)
- return NULL;
-
- if (strbuf_init(&buf, 32) < 0)
- return NULL;
-
- if (strbuf_addf(&buf, "%c:%s/%s ", tp->retprobe ? 'r' : 'p',
- tev->group, tev->event) < 0)
- goto error;
+ if (!tp->module)
+ return -EINVAL;
/*
* If tp->address == 0, then this point must be a
* absolute address uprobe.
* try_to_find_absolute_address() should have made
* tp->symbol to "0x0".
*/
- if (tev->uprobes && !tp->address) {
- if (!tp->symbol || strcmp(tp->symbol, "0x0"))
- goto error;
- }
+ if (!tp->address && (!tp->symbol || strcmp(tp->symbol, "0x0")))
+ return -EINVAL;
/* Use the tp->address for uprobes */
- if (tev->uprobes) {
- err = synthesize_uprobe_trace_def(tev, &buf);
- } else if (!strncmp(tp->symbol, "0x", 2)) {
+ err = strbuf_addf(buf, "%s:0x%" PRIx64, tp->module, tp->address);
+
+ if (err >= 0 && tp->ref_ctr_offset) {
+ if (!uprobe_ref_ctr_is_supported())
+ return -EINVAL;
+ err = strbuf_addf(buf, "(0x%lx)", tp->ref_ctr_offset);
+ }
+ return err >= 0 ? 0 : err;
+}
+
+static int
+synthesize_kprobe_trace_def(struct probe_trace_point *tp, struct strbuf *buf)
+{
+ if (!strncmp(tp->symbol, "0x", 2)) {
/* Absolute address. See try_to_find_absolute_address() */
- err = strbuf_addf(&buf, "%s%s0x%lx", tp->module ?: "",
+ return strbuf_addf(buf, "%s%s0x%" PRIx64, tp->module ?: "",
tp->module ? ":" : "", tp->address);
} else {
- err = strbuf_addf(&buf, "%s%s%s+%lu", tp->module ?: "",
+ return strbuf_addf(buf, "%s%s%s+%lu", tp->module ?: "",
tp->module ? ":" : "", tp->symbol, tp->offset);
}
+}
- if (err)
+char *synthesize_probe_trace_command(struct probe_trace_event *tev)
+{
+ struct probe_trace_point *tp = &tev->point;
+ struct strbuf buf;
+ char *ret = NULL;
+ int err;
+
+ if (strbuf_init(&buf, 32) < 0)
+ return NULL;
+
+ if (strbuf_addf(&buf, "%c:%s/%s ", tp->retprobe ? 'r' : 'p',
+ tev->group, tev->event) < 0)
goto error;
- for (i = 0; i < tev->nargs; i++)
- if (synthesize_probe_trace_arg(&tev->args[i], &buf) < 0)
- goto error;
+ if (tev->uprobes)
+ err = synthesize_uprobe_trace_def(tp, &buf);
+ else
+ err = synthesize_kprobe_trace_def(tp, &buf);
- ret = strbuf_detach(&buf, NULL);
+ if (err >= 0)
+ err = synthesize_probe_trace_args(tev, &buf);
+
+ if (err >= 0)
+ ret = strbuf_detach(&buf, NULL);
error:
strbuf_release(&buf);
return ret;
@@ -2250,7 +2269,7 @@ static int convert_to_perf_probe_point(struct probe_trace_point *tp,
pp->function = strdup(tp->symbol);
pp->offset = tp->offset;
} else {
- ret = e_snprintf(buf, 128, "0x%" PRIx64, (u64)tp->address);
+ ret = e_snprintf(buf, 128, "0x%" PRIx64, tp->address);
if (ret < 0)
return ret;
pp->function = strdup(buf);
@@ -2431,8 +2450,8 @@ void clear_probe_trace_event(struct probe_trace_event *tev)
struct kprobe_blacklist_node {
struct list_head list;
- unsigned long start;
- unsigned long end;
+ u64 start;
+ u64 end;
char *symbol;
};
@@ -2477,7 +2496,7 @@ static int kprobe_blacklist__load(struct list_head *blacklist)
}
INIT_LIST_HEAD(&node->list);
list_add_tail(&node->list, blacklist);
- if (sscanf(buf, "0x%lx-0x%lx", &node->start, &node->end) != 2) {
+ if (sscanf(buf, "0x%" PRIx64 "-0x%" PRIx64, &node->start, &node->end) != 2) {
ret = -EINVAL;
break;
}
@@ -2493,7 +2512,7 @@ static int kprobe_blacklist__load(struct list_head *blacklist)
ret = -ENOMEM;
break;
}
- pr_debug2("Blacklist: 0x%lx-0x%lx, %s\n",
+ pr_debug2("Blacklist: 0x%" PRIx64 "-0x%" PRIx64 ", %s\n",
node->start, node->end, node->symbol);
ret++;
}
@@ -2505,8 +2524,7 @@ static int kprobe_blacklist__load(struct list_head *blacklist)
}
static struct kprobe_blacklist_node *
-kprobe_blacklist__find_by_address(struct list_head *blacklist,
- unsigned long address)
+kprobe_blacklist__find_by_address(struct list_head *blacklist, u64 address)
{
struct kprobe_blacklist_node *node;
@@ -2534,7 +2552,7 @@ static void kprobe_blacklist__release(void)
kprobe_blacklist__delete(&kprobe_blacklist);
}
-static bool kprobe_blacklist__listed(unsigned long address)
+static bool kprobe_blacklist__listed(u64 address)
{
return !!kprobe_blacklist__find_by_address(&kprobe_blacklist, address);
}
@@ -2934,7 +2952,7 @@ static int find_probe_functions(struct map *map, char *name,
bool cut_version = true;
if (map__load(map) < 0)
- return 0;
+ return -EACCES; /* Possible permission error to load symbols */
/* If user gives a version, don't cut off the version from symbols */
if (strchr(name, '@'))
@@ -2973,6 +2991,17 @@ void __weak arch__fix_tev_from_maps(struct perf_probe_event *pev __maybe_unused,
struct map *map __maybe_unused,
struct symbol *sym __maybe_unused) { }
+
+static void pr_kallsyms_access_error(void)
+{
+ pr_err("Please ensure you can read the /proc/kallsyms symbol addresses.\n"
+ "If /proc/sys/kernel/kptr_restrict is '2', you can not read\n"
+ "kernel symbol addresses even if you are a superuser. Please change\n"
+ "it to '1'. If kptr_restrict is '1', the superuser can read the\n"
+ "symbol addresses.\n"
+ "In that case, please run this command again with sudo.\n");
+}
+
/*
* Find probe function addresses from map.
* Return an error or the number of found probe_trace_event
@@ -3009,8 +3038,16 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
*/
num_matched_functions = find_probe_functions(map, pp->function, syms);
if (num_matched_functions <= 0) {
- pr_err("Failed to find symbol %s in %s\n", pp->function,
- pev->target ? : "kernel");
+ if (num_matched_functions == -EACCES) {
+ pr_err("Failed to load symbols from %s\n",
+ pev->target ?: "/proc/kallsyms");
+ if (pev->target)
+ pr_err("Please ensure the file is not stripped.\n");
+ else
+ pr_kallsyms_access_error();
+ } else
+ pr_err("Failed to find symbol %s in %s\n", pp->function,
+ pev->target ? : "kernel");
ret = -ENOENT;
goto out;
} else if (num_matched_functions > probe_conf.max_probes) {
@@ -3025,7 +3062,10 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
(!pp->retprobe || kretprobe_offset_is_supported())) {
reloc_sym = kernel_get_ref_reloc_sym(NULL);
if (!reloc_sym) {
- pr_warning("Relocated base symbol is not found!\n");
+ pr_warning("Relocated base symbol is not found! "
+ "Check /proc/sys/kernel/kptr_restrict\n"
+ "and /proc/sys/kernel/perf_event_paranoid. "
+ "Or run as privileged perf user.\n\n");
ret = -EINVAL;
goto out;
}
@@ -3180,7 +3220,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev,
* In __add_probe_trace_events, a NULL symbol is interpreted as
* invalid.
*/
- if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0)
+ if (asprintf(&tp->symbol, "0x%" PRIx64, tp->address) < 0)
goto errout;
/* For kprobe, check range */
@@ -3191,7 +3231,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev,
goto errout;
}
- if (asprintf(&tp->realname, "abs_%lx", tp->address) < 0)
+ if (asprintf(&tp->realname, "abs_%" PRIx64, tp->address) < 0)
goto errout;
if (pev->target) {
@@ -3523,6 +3563,78 @@ int show_probe_trace_events(struct perf_probe_event *pevs, int npevs)
return ret;
}
+static int show_bootconfig_event(struct probe_trace_event *tev)
+{
+ struct probe_trace_point *tp = &tev->point;
+ struct strbuf buf;
+ char *ret = NULL;
+ int err;
+
+ if (strbuf_init(&buf, 32) < 0)
+ return -ENOMEM;
+
+ err = synthesize_kprobe_trace_def(tp, &buf);
+ if (err >= 0)
+ err = synthesize_probe_trace_args(tev, &buf);
+ if (err >= 0)
+ ret = strbuf_detach(&buf, NULL);
+ strbuf_release(&buf);
+
+ if (ret) {
+ printf("'%s'", ret);
+ free(ret);
+ }
+
+ return err;
+}
+
+int show_bootconfig_events(struct perf_probe_event *pevs, int npevs)
+{
+ struct strlist *namelist = strlist__new(NULL, NULL);
+ struct probe_trace_event *tev;
+ struct perf_probe_event *pev;
+ char *cur_name = NULL;
+ int i, j, ret = 0;
+
+ if (!namelist)
+ return -ENOMEM;
+
+ for (j = 0; j < npevs && !ret; j++) {
+ pev = &pevs[j];
+ if (pev->group && strcmp(pev->group, "probe"))
+ pr_warning("WARN: Group name %s is ignored\n", pev->group);
+ if (pev->uprobes) {
+ pr_warning("ERROR: Bootconfig doesn't support uprobes\n");
+ ret = -EINVAL;
+ break;
+ }
+ for (i = 0; i < pev->ntevs && !ret; i++) {
+ tev = &pev->tevs[i];
+ /* Skip if the symbol is out of .text or blacklisted */
+ if (!tev->point.symbol && !pev->uprobes)
+ continue;
+
+ /* Set new name for tev (and update namelist) */
+ ret = probe_trace_event__set_name(tev, pev,
+ namelist, true);
+ if (ret)
+ break;
+
+ if (!cur_name || strcmp(cur_name, tev->event)) {
+ printf("%sftrace.event.kprobes.%s.probe = ",
+ cur_name ? "\n" : "", tev->event);
+ cur_name = tev->event;
+ } else
+ printf(", ");
+ ret = show_bootconfig_event(tev);
+ }
+ }
+ printf("\n");
+ strlist__delete(namelist);
+
+ return ret;
+}
+
int apply_perf_probe_events(struct perf_probe_event *pevs, int npevs)
{
int i, ret = 0;
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 4f0eb3a20c36..8ad5b1579f1d 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -15,6 +15,7 @@ struct probe_conf {
bool force_add;
bool no_inlines;
bool cache;
+ bool bootconfig;
int max_probes;
unsigned long magic_num;
};
@@ -32,7 +33,7 @@ struct probe_trace_point {
char *module; /* Module name */
unsigned long offset; /* Offset from symbol */
unsigned long ref_ctr_offset; /* SDT reference counter offset */
- unsigned long address; /* Actual address of the trace point */
+ u64 address; /* Actual address of the trace point */
bool retprobe; /* Return probe flag */
};
@@ -69,7 +70,7 @@ struct perf_probe_point {
bool retprobe; /* Return probe flag */
char *lazy_line; /* Lazy matching pattern */
unsigned long offset; /* Offset from function entry */
- unsigned long abs_address; /* Absolute address of the point */
+ u64 abs_address; /* Absolute address of the point */
};
/* Perf probe probing argument field chain */
@@ -163,6 +164,7 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs);
int convert_perf_probe_events(struct perf_probe_event *pevs, int npevs);
int apply_perf_probe_events(struct perf_probe_event *pevs, int npevs);
int show_probe_trace_events(struct perf_probe_event *pevs, int npevs);
+int show_bootconfig_events(struct perf_probe_event *pevs, int npevs);
void cleanup_perf_probe_events(struct perf_probe_event *pevs, int npevs);
struct strfilter;
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index 52273542e6ef..3d50de3217d5 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -22,6 +22,7 @@
#include "symbol.h"
#include "strbuf.h"
#include <api/fs/tracing_path.h>
+#include <api/fs/fs.h>
#include "probe-event.h"
#include "probe-file.h"
#include "session.h"
@@ -31,44 +32,78 @@
/* 4096 - 2 ('\n' + '\0') */
#define MAX_CMDLEN 4094
-static void print_open_warning(int err, bool uprobe)
+static bool print_common_warning(int err, bool readwrite)
{
- char sbuf[STRERR_BUFSIZE];
+ if (err == -EACCES)
+ pr_warning("No permission to %s tracefs.\nPlease %s\n",
+ readwrite ? "write" : "read",
+ readwrite ? "run this command again with sudo." :
+ "try 'sudo mount -o remount,mode=755 /sys/kernel/tracing/'");
+ else
+ return false;
- if (err == -ENOENT) {
- const char *config;
+ return true;
+}
- if (uprobe)
- config = "CONFIG_UPROBE_EVENTS";
- else
- config = "CONFIG_KPROBE_EVENTS";
+static bool print_configure_probe_event(int kerr, int uerr)
+{
+ const char *config, *file;
+
+ if (kerr == -ENOENT && uerr == -ENOENT) {
+ file = "{k,u}probe_events";
+ config = "CONFIG_KPROBE_EVENTS=y and CONFIG_UPROBE_EVENTS=y";
+ } else if (kerr == -ENOENT) {
+ file = "kprobe_events";
+ config = "CONFIG_KPROBE_EVENTS=y";
+ } else if (uerr == -ENOENT) {
+ file = "uprobe_events";
+ config = "CONFIG_UPROBE_EVENTS=y";
+ } else
+ return false;
- pr_warning("%cprobe_events file does not exist"
- " - please rebuild kernel with %s.\n",
- uprobe ? 'u' : 'k', config);
- } else if (err == -ENOTSUP)
- pr_warning("Tracefs or debugfs is not mounted.\n");
+ if (!debugfs__configured() && !tracefs__configured())
+ pr_warning("Debugfs or tracefs is not mounted\n"
+ "Please try 'sudo mount -t tracefs nodev /sys/kernel/tracing/'\n");
else
- pr_warning("Failed to open %cprobe_events: %s\n",
- uprobe ? 'u' : 'k',
- str_error_r(-err, sbuf, sizeof(sbuf)));
+ pr_warning("%s/%s does not exist.\nPlease rebuild kernel with %s.\n",
+ tracing_path_mount(), file, config);
+
+ return true;
+}
+
+static void print_open_warning(int err, bool uprobe, bool readwrite)
+{
+ char sbuf[STRERR_BUFSIZE];
+
+ if (print_common_warning(err, readwrite))
+ return;
+
+ if (print_configure_probe_event(uprobe ? 0 : err, uprobe ? err : 0))
+ return;
+
+ pr_warning("Failed to open %s/%cprobe_events: %s\n",
+ tracing_path_mount(), uprobe ? 'u' : 'k',
+ str_error_r(-err, sbuf, sizeof(sbuf)));
}
-static void print_both_open_warning(int kerr, int uerr)
+static void print_both_open_warning(int kerr, int uerr, bool readwrite)
{
- /* Both kprobes and uprobes are disabled, warn it. */
- if (kerr == -ENOTSUP && uerr == -ENOTSUP)
- pr_warning("Tracefs or debugfs is not mounted.\n");
- else if (kerr == -ENOENT && uerr == -ENOENT)
- pr_warning("Please rebuild kernel with CONFIG_KPROBE_EVENTS "
- "or/and CONFIG_UPROBE_EVENTS.\n");
- else {
- char sbuf[STRERR_BUFSIZE];
- pr_warning("Failed to open kprobe events: %s.\n",
+ char sbuf[STRERR_BUFSIZE];
+
+ if (kerr == uerr && print_common_warning(kerr, readwrite))
+ return;
+
+ if (print_configure_probe_event(kerr, uerr))
+ return;
+
+ if (kerr < 0)
+ pr_warning("Failed to open %s/kprobe_events: %s.\n",
+ tracing_path_mount(),
str_error_r(-kerr, sbuf, sizeof(sbuf)));
- pr_warning("Failed to open uprobe events: %s.\n",
+ if (uerr < 0)
+ pr_warning("Failed to open %s/uprobe_events: %s.\n",
+ tracing_path_mount(),
str_error_r(-uerr, sbuf, sizeof(sbuf)));
- }
}
int open_trace_file(const char *trace_file, bool readwrite)
@@ -109,7 +144,7 @@ int probe_file__open(int flag)
else
fd = open_kprobe_events(flag & PF_FL_RW);
if (fd < 0)
- print_open_warning(fd, flag & PF_FL_UPROBE);
+ print_open_warning(fd, flag & PF_FL_UPROBE, flag & PF_FL_RW);
return fd;
}
@@ -122,7 +157,7 @@ int probe_file__open_both(int *kfd, int *ufd, int flag)
*kfd = open_kprobe_events(flag & PF_FL_RW);
*ufd = open_uprobe_events(flag & PF_FL_RW);
if (*kfd < 0 && *ufd < 0) {
- print_both_open_warning(*kfd, *ufd);
+ print_both_open_warning(*kfd, *ufd, flag & PF_FL_RW);
return *kfd;
}
@@ -342,11 +377,11 @@ int probe_file__del_events(int fd, struct strfilter *filter)
ret = probe_file__get_events(fd, filter, namelist);
if (ret < 0)
- return ret;
+ goto out;
ret = probe_file__del_strlist(fd, namelist);
+out:
strlist__delete(namelist);
-
return ret;
}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index b029c29ce227..50d861a80f57 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -118,12 +118,17 @@ struct debuginfo *debuginfo__new(const char *path)
char buf[PATH_MAX], nil = '\0';
struct dso *dso;
struct debuginfo *dinfo = NULL;
+ struct build_id bid;
/* Try to open distro debuginfo files */
dso = dso__new(path);
if (!dso)
goto out;
+ /* Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO */
+ if (is_regular_file(path) && filename__read_build_id(path, &bid) > 0)
+ dso__set_build_id(dso, &bid);
+
for (type = distro_dwarf_types;
!dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND;
type++) {
@@ -663,7 +668,7 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
}
tp->offset = (unsigned long)(paddr - eaddr);
- tp->address = (unsigned long)paddr;
+ tp->address = paddr;
tp->symbol = strdup(symbol);
if (!tp->symbol)
return -ENOMEM;
@@ -1702,7 +1707,7 @@ int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
}
/* Reverse search */
-int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
+int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr,
struct perf_probe_point *ppt)
{
Dwarf_Die cudie, spdie, indie;
@@ -1715,14 +1720,14 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
addr += baseaddr;
/* Find cu die */
if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
- pr_warning("Failed to find debug information for address %lx\n",
+ pr_warning("Failed to find debug information for address %" PRIx64 "\n",
addr);
ret = -EINVAL;
goto end;
}
/* Find a corresponding line (filename and lineno) */
- cu_find_lineinfo(&cudie, addr, &fname, &lineno);
+ cu_find_lineinfo(&cudie, (Dwarf_Addr)addr, &fname, &lineno);
/* Don't care whether it failed or not */
/* Find a corresponding function (name, baseline and baseaddr) */
@@ -1737,7 +1742,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
}
fname = dwarf_decl_file(&spdie);
- if (addr == (unsigned long)baseaddr) {
+ if (addr == baseaddr) {
/* Function entry - Relative line number is 0 */
lineno = baseline;
goto post;
@@ -1783,7 +1788,7 @@ post:
if (lineno)
ppt->line = lineno - baseline;
else if (basefunc) {
- ppt->offset = addr - (unsigned long)baseaddr;
+ ppt->offset = addr - baseaddr;
func = basefunc;
}
@@ -1823,8 +1828,7 @@ static int line_range_add_line(const char *src, unsigned int lineno,
}
static int line_range_walk_cb(const char *fname, int lineno,
- Dwarf_Addr addr __maybe_unused,
- void *data)
+ Dwarf_Addr addr, void *data)
{
struct line_finder *lf = data;
const char *__fname;
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 2febb5875678..8bc1c80d3c1c 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -46,7 +46,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
struct probe_trace_event **tevs);
/* Find a perf_probe_point from debuginfo */
-int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
+int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr,
struct perf_probe_point *ppt);
int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 412f8e79e409..8feef3a05af7 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -1032,7 +1032,7 @@ static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
Py_INCREF(pevsel);
evsel = &((struct pyrf_evsel *)pevsel)->evsel;
- evsel->idx = evlist->core.nr_entries;
+ evsel->core.idx = evlist->core.nr_entries;
evlist__add(evlist, evsel);
return Py_BuildValue("i", evlist->core.nr_entries);
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 43e5b563dee8..bff669b615ee 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -25,12 +25,12 @@
*/
static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist)
{
- struct evsel *leader = evsel->leader;
+ struct evsel *leader = evsel__leader(evsel);
if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader) ||
is_mem_loads_aux_event(leader)) {
evlist__for_each_entry(evlist, evsel) {
- if (evsel->leader == leader && evsel != evsel->leader)
+ if (evsel__leader(evsel) == leader && evsel != evsel__leader(evsel))
return evsel;
}
}
@@ -53,7 +53,7 @@ static u64 evsel__config_term_mask(struct evsel *evsel)
static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
{
struct perf_event_attr *attr = &evsel->core.attr;
- struct evsel *leader = evsel->leader;
+ struct evsel *leader = evsel__leader(evsel);
struct evsel *read_sampler;
u64 term_types, freq_mask;
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 0e608a5ef599..32a721b3e9a5 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -371,9 +371,6 @@ static void perl_process_tracepoint(struct perf_sample *sample,
s = nsecs / NSEC_PER_SEC;
ns = nsecs - s * NSEC_PER_SEC;
- scripting_context->event_data = data;
- scripting_context->pevent = evsel->tp_format->tep;
-
ENTER;
SAVETMPS;
PUSHMARK(SP);
@@ -456,8 +453,10 @@ static void perl_process_event_generic(union perf_event *event,
static void perl_process_event(union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
- struct addr_location *al)
+ struct addr_location *al,
+ struct addr_location *addr_al)
{
+ scripting_context__update(scripting_context, event, sample, evsel, al, addr_al);
perl_process_tracepoint(sample, evsel, al);
perl_process_event_generic(event, sample, evsel);
}
@@ -474,11 +473,14 @@ static void run_start_sub(void)
/*
* Start trace script
*/
-static int perl_start_script(const char *script, int argc, const char **argv)
+static int perl_start_script(const char *script, int argc, const char **argv,
+ struct perf_session *session)
{
const char **command_line;
int i, err = 0;
+ scripting_context->session = session;
+
command_line = malloc((argc + 2) * sizeof(const char *));
command_line[0] = "";
command_line[1] = script;
@@ -750,6 +752,7 @@ sub print_backtrace\n\
struct scripting_ops perl_scripting_ops = {
.name = "Perl",
+ .dirname = "perl",
.start_script = perl_start_script,
.flush_script = perl_flush_script,
.stop_script = perl_stop_script,
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 4e4aa4c97ac5..69129e2aa7a1 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -687,7 +687,7 @@ static void set_sample_datasrc_in_dict(PyObject *dict,
_PyUnicode_FromString(decode));
}
-static int regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size)
+static void regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size)
{
unsigned int i = 0, r;
int printed = 0;
@@ -695,7 +695,7 @@ static int regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size)
bf[0] = 0;
if (!regs || !regs->regs)
- return 0;
+ return;
for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
u64 val = regs->regs[i++];
@@ -704,8 +704,6 @@ static int regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size)
"%5s:0x%" PRIx64 " ",
perf_reg_name(r), val);
}
-
- return printed;
}
static void set_regs_in_dict(PyObject *dict,
@@ -713,7 +711,16 @@ static void set_regs_in_dict(PyObject *dict,
struct evsel *evsel)
{
struct perf_event_attr *attr = &evsel->core.attr;
- char bf[512];
+
+ /*
+ * Here value 28 is a constant size which can be used to print
+ * one register value and its corresponds to:
+ * 16 chars is to specify 64 bit register in hexadecimal.
+ * 2 chars is for appending "0x" to the hexadecimal value and
+ * 10 chars is for register name.
+ */
+ int size = __sw_hweight64(attr->sample_regs_intr) * 28;
+ char bf[size];
regs_map(&sample->intr_regs, attr->sample_regs_intr, bf, sizeof(bf));
@@ -726,9 +733,49 @@ static void set_regs_in_dict(PyObject *dict,
_PyUnicode_FromString(bf));
}
+static void set_sym_in_dict(PyObject *dict, struct addr_location *al,
+ const char *dso_field, const char *sym_field,
+ const char *symoff_field)
+{
+ if (al->map) {
+ pydict_set_item_string_decref(dict, dso_field,
+ _PyUnicode_FromString(al->map->dso->name));
+ }
+ if (al->sym) {
+ pydict_set_item_string_decref(dict, sym_field,
+ _PyUnicode_FromString(al->sym->name));
+ pydict_set_item_string_decref(dict, symoff_field,
+ PyLong_FromUnsignedLong(get_offset(al->sym, al)));
+ }
+}
+
+static void set_sample_flags(PyObject *dict, u32 flags)
+{
+ const char *ch = PERF_IP_FLAG_CHARS;
+ char *p, str[33];
+
+ for (p = str; *ch; ch++, flags >>= 1) {
+ if (flags & 1)
+ *p++ = *ch;
+ }
+ *p = 0;
+ pydict_set_item_string_decref(dict, "flags", _PyUnicode_FromString(str));
+}
+
+static void python_process_sample_flags(struct perf_sample *sample, PyObject *dict_sample)
+{
+ char flags_disp[SAMPLE_FLAGS_BUF_SIZE];
+
+ set_sample_flags(dict_sample, sample->flags);
+ perf_sample__sprintf_flags(sample->flags, flags_disp, sizeof(flags_disp));
+ pydict_set_item_string_decref(dict_sample, "flags_disp",
+ _PyUnicode_FromString(flags_disp));
+}
+
static PyObject *get_perf_sample_dict(struct perf_sample *sample,
struct evsel *evsel,
struct addr_location *al,
+ struct addr_location *addr_al,
PyObject *callchain)
{
PyObject *dict, *dict_sample, *brstack, *brstacksym;
@@ -772,14 +819,7 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
(const char *)sample->raw_data, sample->raw_size));
pydict_set_item_string_decref(dict, "comm",
_PyUnicode_FromString(thread__comm_str(al->thread)));
- if (al->map) {
- pydict_set_item_string_decref(dict, "dso",
- _PyUnicode_FromString(al->map->dso->name));
- }
- if (al->sym) {
- pydict_set_item_string_decref(dict, "symbol",
- _PyUnicode_FromString(al->sym->name));
- }
+ set_sym_in_dict(dict, al, "dso", "symbol", "symoff");
pydict_set_item_string_decref(dict, "callchain", callchain);
@@ -789,6 +829,26 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
brstacksym = python_process_brstacksym(sample, al->thread);
pydict_set_item_string_decref(dict, "brstacksym", brstacksym);
+ pydict_set_item_string_decref(dict_sample, "cpumode",
+ _PyLong_FromLong((unsigned long)sample->cpumode));
+
+ if (addr_al) {
+ pydict_set_item_string_decref(dict_sample, "addr_correlates_sym",
+ PyBool_FromLong(1));
+ set_sym_in_dict(dict_sample, addr_al, "addr_dso", "addr_symbol", "addr_symoff");
+ }
+
+ if (sample->flags)
+ python_process_sample_flags(sample, dict_sample);
+
+ /* Instructions per cycle (IPC) */
+ if (sample->insn_cnt && sample->cyc_cnt) {
+ pydict_set_item_string_decref(dict_sample, "insn_cnt",
+ PyLong_FromUnsignedLongLong(sample->insn_cnt));
+ pydict_set_item_string_decref(dict_sample, "cyc_cnt",
+ PyLong_FromUnsignedLongLong(sample->cyc_cnt));
+ }
+
set_regs_in_dict(dict, sample, evsel);
return dict;
@@ -796,7 +856,8 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
static void python_process_tracepoint(struct perf_sample *sample,
struct evsel *evsel,
- struct addr_location *al)
+ struct addr_location *al,
+ struct addr_location *addr_al)
{
struct tep_event *event = evsel->tp_format;
PyObject *handler, *context, *t, *obj = NULL, *callchain;
@@ -843,9 +904,6 @@ static void python_process_tracepoint(struct perf_sample *sample,
s = nsecs / NSEC_PER_SEC;
ns = nsecs - s * NSEC_PER_SEC;
- scripting_context->event_data = data;
- scripting_context->pevent = evsel->tp_format->tep;
-
context = _PyCapsule_New(scripting_context, NULL, NULL);
PyTuple_SetItem(t, n++, _PyUnicode_FromString(handler_name));
@@ -906,7 +964,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
PyTuple_SetItem(t, n++, dict);
if (get_argument_count(handler) == (int) n + 1) {
- all_entries_dict = get_perf_sample_dict(sample, evsel, al,
+ all_entries_dict = get_perf_sample_dict(sample, evsel, al, addr_al,
callchain);
PyTuple_SetItem(t, n++, all_entries_dict);
} else {
@@ -934,7 +992,7 @@ static PyObject *tuple_new(unsigned int sz)
return t;
}
-static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
+static int tuple_set_s64(PyObject *t, unsigned int pos, s64 val)
{
#if BITS_PER_LONG == 64
return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
@@ -944,11 +1002,37 @@ static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
#endif
}
+/*
+ * Databases support only signed 64-bit numbers, so even though we are
+ * exporting a u64, it must be as s64.
+ */
+#define tuple_set_d64 tuple_set_s64
+
+static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
+{
+#if BITS_PER_LONG == 64
+ return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLong(val));
+#endif
+#if BITS_PER_LONG == 32
+ return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLongLong(val));
+#endif
+}
+
+static int tuple_set_u32(PyObject *t, unsigned int pos, u32 val)
+{
+ return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLong(val));
+}
+
static int tuple_set_s32(PyObject *t, unsigned int pos, s32 val)
{
return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
}
+static int tuple_set_bool(PyObject *t, unsigned int pos, bool val)
+{
+ return PyTuple_SetItem(t, pos, PyBool_FromLong(val));
+}
+
static int tuple_set_string(PyObject *t, unsigned int pos, const char *s)
{
return PyTuple_SetItem(t, pos, _PyUnicode_FromString(s));
@@ -967,7 +1051,7 @@ static int python_export_evsel(struct db_export *dbe, struct evsel *evsel)
t = tuple_new(2);
- tuple_set_u64(t, 0, evsel->db_id);
+ tuple_set_d64(t, 0, evsel->db_id);
tuple_set_string(t, 1, evsel__name(evsel));
call_object(tables->evsel_handler, t, "evsel_table");
@@ -985,7 +1069,7 @@ static int python_export_machine(struct db_export *dbe,
t = tuple_new(3);
- tuple_set_u64(t, 0, machine->db_id);
+ tuple_set_d64(t, 0, machine->db_id);
tuple_set_s32(t, 1, machine->pid);
tuple_set_string(t, 2, machine->root_dir ? machine->root_dir : "");
@@ -1004,9 +1088,9 @@ static int python_export_thread(struct db_export *dbe, struct thread *thread,
t = tuple_new(5);
- tuple_set_u64(t, 0, thread->db_id);
- tuple_set_u64(t, 1, machine->db_id);
- tuple_set_u64(t, 2, main_thread_db_id);
+ tuple_set_d64(t, 0, thread->db_id);
+ tuple_set_d64(t, 1, machine->db_id);
+ tuple_set_d64(t, 2, main_thread_db_id);
tuple_set_s32(t, 3, thread->pid_);
tuple_set_s32(t, 4, thread->tid);
@@ -1025,10 +1109,10 @@ static int python_export_comm(struct db_export *dbe, struct comm *comm,
t = tuple_new(5);
- tuple_set_u64(t, 0, comm->db_id);
+ tuple_set_d64(t, 0, comm->db_id);
tuple_set_string(t, 1, comm__str(comm));
- tuple_set_u64(t, 2, thread->db_id);
- tuple_set_u64(t, 3, comm->start);
+ tuple_set_d64(t, 2, thread->db_id);
+ tuple_set_d64(t, 3, comm->start);
tuple_set_s32(t, 4, comm->exec);
call_object(tables->comm_handler, t, "comm_table");
@@ -1046,9 +1130,9 @@ static int python_export_comm_thread(struct db_export *dbe, u64 db_id,
t = tuple_new(3);
- tuple_set_u64(t, 0, db_id);
- tuple_set_u64(t, 1, comm->db_id);
- tuple_set_u64(t, 2, thread->db_id);
+ tuple_set_d64(t, 0, db_id);
+ tuple_set_d64(t, 1, comm->db_id);
+ tuple_set_d64(t, 2, thread->db_id);
call_object(tables->comm_thread_handler, t, "comm_thread_table");
@@ -1068,8 +1152,8 @@ static int python_export_dso(struct db_export *dbe, struct dso *dso,
t = tuple_new(5);
- tuple_set_u64(t, 0, dso->db_id);
- tuple_set_u64(t, 1, machine->db_id);
+ tuple_set_d64(t, 0, dso->db_id);
+ tuple_set_d64(t, 1, machine->db_id);
tuple_set_string(t, 2, dso->short_name);
tuple_set_string(t, 3, dso->long_name);
tuple_set_string(t, 4, sbuild_id);
@@ -1090,10 +1174,10 @@ static int python_export_symbol(struct db_export *dbe, struct symbol *sym,
t = tuple_new(6);
- tuple_set_u64(t, 0, *sym_db_id);
- tuple_set_u64(t, 1, dso->db_id);
- tuple_set_u64(t, 2, sym->start);
- tuple_set_u64(t, 3, sym->end);
+ tuple_set_d64(t, 0, *sym_db_id);
+ tuple_set_d64(t, 1, dso->db_id);
+ tuple_set_d64(t, 2, sym->start);
+ tuple_set_d64(t, 3, sym->end);
tuple_set_s32(t, 4, sym->binding);
tuple_set_string(t, 5, sym->name);
@@ -1130,30 +1214,30 @@ static void python_export_sample_table(struct db_export *dbe,
t = tuple_new(24);
- tuple_set_u64(t, 0, es->db_id);
- tuple_set_u64(t, 1, es->evsel->db_id);
- tuple_set_u64(t, 2, es->al->maps->machine->db_id);
- tuple_set_u64(t, 3, es->al->thread->db_id);
- tuple_set_u64(t, 4, es->comm_db_id);
- tuple_set_u64(t, 5, es->dso_db_id);
- tuple_set_u64(t, 6, es->sym_db_id);
- tuple_set_u64(t, 7, es->offset);
- tuple_set_u64(t, 8, es->sample->ip);
- tuple_set_u64(t, 9, es->sample->time);
+ tuple_set_d64(t, 0, es->db_id);
+ tuple_set_d64(t, 1, es->evsel->db_id);
+ tuple_set_d64(t, 2, es->al->maps->machine->db_id);
+ tuple_set_d64(t, 3, es->al->thread->db_id);
+ tuple_set_d64(t, 4, es->comm_db_id);
+ tuple_set_d64(t, 5, es->dso_db_id);
+ tuple_set_d64(t, 6, es->sym_db_id);
+ tuple_set_d64(t, 7, es->offset);
+ tuple_set_d64(t, 8, es->sample->ip);
+ tuple_set_d64(t, 9, es->sample->time);
tuple_set_s32(t, 10, es->sample->cpu);
- tuple_set_u64(t, 11, es->addr_dso_db_id);
- tuple_set_u64(t, 12, es->addr_sym_db_id);
- tuple_set_u64(t, 13, es->addr_offset);
- tuple_set_u64(t, 14, es->sample->addr);
- tuple_set_u64(t, 15, es->sample->period);
- tuple_set_u64(t, 16, es->sample->weight);
- tuple_set_u64(t, 17, es->sample->transaction);
- tuple_set_u64(t, 18, es->sample->data_src);
+ tuple_set_d64(t, 11, es->addr_dso_db_id);
+ tuple_set_d64(t, 12, es->addr_sym_db_id);
+ tuple_set_d64(t, 13, es->addr_offset);
+ tuple_set_d64(t, 14, es->sample->addr);
+ tuple_set_d64(t, 15, es->sample->period);
+ tuple_set_d64(t, 16, es->sample->weight);
+ tuple_set_d64(t, 17, es->sample->transaction);
+ tuple_set_d64(t, 18, es->sample->data_src);
tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
- tuple_set_u64(t, 21, es->call_path_id);
- tuple_set_u64(t, 22, es->sample->insn_cnt);
- tuple_set_u64(t, 23, es->sample->cyc_cnt);
+ tuple_set_d64(t, 21, es->call_path_id);
+ tuple_set_d64(t, 22, es->sample->insn_cnt);
+ tuple_set_d64(t, 23, es->sample->cyc_cnt);
call_object(tables->sample_handler, t, "sample_table");
@@ -1167,8 +1251,8 @@ static void python_export_synth(struct db_export *dbe, struct export_sample *es)
t = tuple_new(3);
- tuple_set_u64(t, 0, es->db_id);
- tuple_set_u64(t, 1, es->evsel->core.attr.config);
+ tuple_set_d64(t, 0, es->db_id);
+ tuple_set_d64(t, 1, es->evsel->core.attr.config);
tuple_set_bytes(t, 2, es->sample->raw_data, es->sample->raw_size);
call_object(tables->synth_handler, t, "synth_data");
@@ -1200,10 +1284,10 @@ static int python_export_call_path(struct db_export *dbe, struct call_path *cp)
t = tuple_new(4);
- tuple_set_u64(t, 0, cp->db_id);
- tuple_set_u64(t, 1, parent_db_id);
- tuple_set_u64(t, 2, sym_db_id);
- tuple_set_u64(t, 3, cp->ip);
+ tuple_set_d64(t, 0, cp->db_id);
+ tuple_set_d64(t, 1, parent_db_id);
+ tuple_set_d64(t, 2, sym_db_id);
+ tuple_set_d64(t, 3, cp->ip);
call_object(tables->call_path_handler, t, "call_path_table");
@@ -1221,20 +1305,20 @@ static int python_export_call_return(struct db_export *dbe,
t = tuple_new(14);
- tuple_set_u64(t, 0, cr->db_id);
- tuple_set_u64(t, 1, cr->thread->db_id);
- tuple_set_u64(t, 2, comm_db_id);
- tuple_set_u64(t, 3, cr->cp->db_id);
- tuple_set_u64(t, 4, cr->call_time);
- tuple_set_u64(t, 5, cr->return_time);
- tuple_set_u64(t, 6, cr->branch_count);
- tuple_set_u64(t, 7, cr->call_ref);
- tuple_set_u64(t, 8, cr->return_ref);
- tuple_set_u64(t, 9, cr->cp->parent->db_id);
+ tuple_set_d64(t, 0, cr->db_id);
+ tuple_set_d64(t, 1, cr->thread->db_id);
+ tuple_set_d64(t, 2, comm_db_id);
+ tuple_set_d64(t, 3, cr->cp->db_id);
+ tuple_set_d64(t, 4, cr->call_time);
+ tuple_set_d64(t, 5, cr->return_time);
+ tuple_set_d64(t, 6, cr->branch_count);
+ tuple_set_d64(t, 7, cr->call_ref);
+ tuple_set_d64(t, 8, cr->return_ref);
+ tuple_set_d64(t, 9, cr->cp->parent->db_id);
tuple_set_s32(t, 10, cr->flags);
- tuple_set_u64(t, 11, cr->parent_db_id);
- tuple_set_u64(t, 12, cr->insn_count);
- tuple_set_u64(t, 13, cr->cyc_count);
+ tuple_set_d64(t, 11, cr->parent_db_id);
+ tuple_set_d64(t, 12, cr->insn_count);
+ tuple_set_d64(t, 13, cr->cyc_count);
call_object(tables->call_return_handler, t, "call_return_table");
@@ -1254,14 +1338,14 @@ static int python_export_context_switch(struct db_export *dbe, u64 db_id,
t = tuple_new(9);
- tuple_set_u64(t, 0, db_id);
- tuple_set_u64(t, 1, machine->db_id);
- tuple_set_u64(t, 2, sample->time);
+ tuple_set_d64(t, 0, db_id);
+ tuple_set_d64(t, 1, machine->db_id);
+ tuple_set_d64(t, 2, sample->time);
tuple_set_s32(t, 3, sample->cpu);
- tuple_set_u64(t, 4, th_out_id);
- tuple_set_u64(t, 5, comm_out_id);
- tuple_set_u64(t, 6, th_in_id);
- tuple_set_u64(t, 7, comm_in_id);
+ tuple_set_d64(t, 4, th_out_id);
+ tuple_set_d64(t, 5, comm_out_id);
+ tuple_set_d64(t, 6, th_in_id);
+ tuple_set_d64(t, 7, comm_in_id);
tuple_set_s32(t, 8, flags);
call_object(tables->context_switch_handler, t, "context_switch");
@@ -1281,7 +1365,8 @@ static int python_process_call_return(struct call_return *cr, u64 *parent_db_id,
static void python_process_general_event(struct perf_sample *sample,
struct evsel *evsel,
- struct addr_location *al)
+ struct addr_location *al,
+ struct addr_location *addr_al)
{
PyObject *handler, *t, *dict, *callchain;
static char handler_name[64];
@@ -1303,7 +1388,7 @@ static void python_process_general_event(struct perf_sample *sample,
/* ip unwinding */
callchain = python_process_callchain(sample, evsel, al);
- dict = get_perf_sample_dict(sample, evsel, al, callchain);
+ dict = get_perf_sample_dict(sample, evsel, al, addr_al, callchain);
PyTuple_SetItem(t, n++, dict);
if (_PyTuple_Resize(&t, n) == -1)
@@ -1317,23 +1402,64 @@ static void python_process_general_event(struct perf_sample *sample,
static void python_process_event(union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
- struct addr_location *al)
+ struct addr_location *al,
+ struct addr_location *addr_al)
{
struct tables *tables = &tables_global;
+ scripting_context__update(scripting_context, event, sample, evsel, al, addr_al);
+
switch (evsel->core.attr.type) {
case PERF_TYPE_TRACEPOINT:
- python_process_tracepoint(sample, evsel, al);
+ python_process_tracepoint(sample, evsel, al, addr_al);
break;
/* Reserve for future process_hw/sw/raw APIs */
default:
if (tables->db_export_mode)
- db_export__sample(&tables->dbe, event, sample, evsel, al);
+ db_export__sample(&tables->dbe, event, sample, evsel, al, addr_al);
else
- python_process_general_event(sample, evsel, al);
+ python_process_general_event(sample, evsel, al, addr_al);
}
}
+static void python_do_process_switch(union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ const char *handler_name = "context_switch";
+ bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
+ bool out_preempt = out && (event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT);
+ pid_t np_pid = -1, np_tid = -1;
+ PyObject *handler, *t;
+
+ handler = get_handler(handler_name);
+ if (!handler)
+ return;
+
+ if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
+ np_pid = event->context_switch.next_prev_pid;
+ np_tid = event->context_switch.next_prev_tid;
+ }
+
+ t = tuple_new(9);
+ if (!t)
+ return;
+
+ tuple_set_u64(t, 0, sample->time);
+ tuple_set_s32(t, 1, sample->cpu);
+ tuple_set_s32(t, 2, sample->pid);
+ tuple_set_s32(t, 3, sample->tid);
+ tuple_set_s32(t, 4, np_pid);
+ tuple_set_s32(t, 5, np_tid);
+ tuple_set_s32(t, 6, machine->pid);
+ tuple_set_bool(t, 7, out);
+ tuple_set_bool(t, 8, out_preempt);
+
+ call_object(handler, t, handler_name);
+
+ Py_DECREF(t);
+}
+
static void python_process_switch(union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -1342,6 +1468,44 @@ static void python_process_switch(union perf_event *event,
if (tables->db_export_mode)
db_export__switch(&tables->dbe, event, sample, machine);
+ else
+ python_do_process_switch(event, sample, machine);
+}
+
+static void python_process_auxtrace_error(struct perf_session *session __maybe_unused,
+ union perf_event *event)
+{
+ struct perf_record_auxtrace_error *e = &event->auxtrace_error;
+ u8 cpumode = e->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ const char *handler_name = "auxtrace_error";
+ unsigned long long tm = e->time;
+ const char *msg = e->msg;
+ PyObject *handler, *t;
+
+ handler = get_handler(handler_name);
+ if (!handler)
+ return;
+
+ if (!e->fmt) {
+ tm = 0;
+ msg = (const char *)&e->time;
+ }
+
+ t = tuple_new(9);
+
+ tuple_set_u32(t, 0, e->type);
+ tuple_set_u32(t, 1, e->code);
+ tuple_set_s32(t, 2, e->cpu);
+ tuple_set_s32(t, 3, e->pid);
+ tuple_set_s32(t, 4, e->tid);
+ tuple_set_u64(t, 5, e->ip);
+ tuple_set_u64(t, 6, tm);
+ tuple_set_string(t, 7, msg);
+ tuple_set_u32(t, 8, cpumode);
+
+ call_object(handler, t, handler_name);
+
+ Py_DECREF(t);
}
static void get_handler_name(char *str, size_t size,
@@ -1442,6 +1606,31 @@ static void python_process_stat_interval(u64 tstamp)
Py_DECREF(t);
}
+static int perf_script_context_init(void)
+{
+ PyObject *perf_script_context;
+ PyObject *perf_trace_context;
+ PyObject *dict;
+ int ret;
+
+ perf_trace_context = PyImport_AddModule("perf_trace_context");
+ if (!perf_trace_context)
+ return -1;
+ dict = PyModule_GetDict(perf_trace_context);
+ if (!dict)
+ return -1;
+
+ perf_script_context = _PyCapsule_New(scripting_context, NULL, NULL);
+ if (!perf_script_context)
+ return -1;
+
+ ret = PyDict_SetItemString(dict, "perf_script_context", perf_script_context);
+ if (!ret)
+ ret = PyDict_SetItemString(main_dict, "perf_script_context", perf_script_context);
+ Py_DECREF(perf_script_context);
+ return ret;
+}
+
static int run_start_sub(void)
{
main_module = PyImport_AddModule("__main__");
@@ -1454,6 +1643,9 @@ static int run_start_sub(void)
goto error;
Py_INCREF(main_dict);
+ if (perf_script_context_init())
+ goto error;
+
try_call_object("trace_begin", NULL);
return 0;
@@ -1589,7 +1781,8 @@ static void _free_command_line(wchar_t **command_line, int num)
/*
* Start trace script
*/
-static int python_start_script(const char *script, int argc, const char **argv)
+static int python_start_script(const char *script, int argc, const char **argv,
+ struct perf_session *session)
{
struct tables *tables = &tables_global;
#if PY_MAJOR_VERSION < 3
@@ -1605,6 +1798,7 @@ static int python_start_script(const char *script, int argc, const char **argv)
int i, err = 0;
FILE *fp;
+ scripting_context->session = session;
#if PY_MAJOR_VERSION < 3
command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script;
@@ -1876,11 +2070,13 @@ static int python_generate_script(struct tep_handle *pevent, const char *outfile
struct scripting_ops python_scripting_ops = {
.name = "Python",
+ .dirname = "python",
.start_script = python_start_script,
.flush_script = python_flush_script,
.stop_script = python_stop_script,
.process_event = python_process_event,
.process_switch = python_process_switch,
+ .process_auxtrace_error = python_process_auxtrace_error,
.process_stat = python_process_stat,
.process_stat_interval = python_process_stat_interval,
.generate_script = python_generate_script,
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index e59242c361ce..51f727402912 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -301,8 +301,12 @@ void perf_session__delete(struct perf_session *session)
perf_session__release_decomp_events(session);
perf_env__exit(&session->header.env);
machines__exit(&session->machines);
- if (session->data)
+ if (session->data) {
+ if (perf_data__is_read(session->data))
+ evlist__delete(session->evlist);
perf_data__close(session->data);
+ }
+ trace_event__cleanup(&session->tevent);
free(session);
}
@@ -2156,6 +2160,7 @@ struct reader {
u64 data_size;
u64 data_offset;
reader_cb_t process;
+ bool in_place_update;
};
static int
@@ -2189,7 +2194,9 @@ reader__process_events(struct reader *rd, struct perf_session *session,
mmap_prot = PROT_READ;
mmap_flags = MAP_SHARED;
- if (session->header.needs_swap) {
+ if (rd->in_place_update) {
+ mmap_prot |= PROT_WRITE;
+ } else if (session->header.needs_swap) {
mmap_prot |= PROT_WRITE;
mmap_flags = MAP_PRIVATE;
}
@@ -2275,6 +2282,7 @@ static int __perf_session__process_events(struct perf_session *session)
.data_size = session->header.data_size,
.data_offset = session->header.data_offset,
.process = process_simple,
+ .in_place_update = session->data->in_place_update,
};
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 88ce47f2547e..568a88c001c6 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -3370,7 +3370,7 @@ static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int
add_key(sb, s[i].name, llen);
}
-const char *sort_help(const char *prefix)
+char *sort_help(const char *prefix)
{
struct strbuf sb;
char *s;
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 87a092645aa7..b67c469aba79 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -302,7 +302,7 @@ void reset_output_field(void);
void sort__setup_elide(FILE *fp);
void perf_hpp__set_elide(int idx, bool elide);
-const char *sort_help(const char *prefix);
+char *sort_help(const char *prefix);
int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
diff --git a/tools/perf/util/srccode.c b/tools/perf/util/srccode.c
index c29edaaca863..476e99896d5e 100644
--- a/tools/perf/util/srccode.c
+++ b/tools/perf/util/srccode.c
@@ -97,8 +97,7 @@ static struct srcfile *find_srcfile(char *fn)
hlist_for_each_entry (h, &srcfile_htab[hval], hash_nd) {
if (!strcmp(fn, h->fn)) {
/* Move to front */
- list_del(&h->nd);
- list_add(&h->nd, &srcfile_list);
+ list_move(&h->nd, &srcfile_list);
return h;
}
}
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index ca326f98c7a2..588601000f3f 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -19,6 +19,7 @@
#include "util.h"
#include "iostat.h"
#include "pmu-hybrid.h"
+#include "evlist-hybrid.h"
#define CNTR_NOT_SUPPORTED "<not supported>"
#define CNTR_NOT_COUNTED "<not counted>"
@@ -465,9 +466,11 @@ static void printout(struct perf_stat_config *config, struct aggr_cpu_id id, int
config->csv_sep);
if (counter->supported) {
- config->print_free_counters_hint = 1;
- if (is_mixed_hw_group(counter))
- config->print_mixed_hw_group_error = 1;
+ if (!evlist__has_hybrid(counter->evlist)) {
+ config->print_free_counters_hint = 1;
+ if (is_mixed_hw_group(counter))
+ config->print_mixed_hw_group_error = 1;
+ }
}
fprintf(config->output, "%-*s%s",
@@ -593,6 +596,18 @@ static void collect_all_aliases(struct perf_stat_config *config, struct evsel *c
}
}
+static bool is_uncore(struct evsel *evsel)
+{
+ struct perf_pmu *pmu = evsel__find_pmu(evsel);
+
+ return pmu && pmu->is_uncore;
+}
+
+static bool hybrid_uniquify(struct evsel *evsel)
+{
+ return perf_pmu__has_hybrid() && !is_uncore(evsel);
+}
+
static bool collect_data(struct perf_stat_config *config, struct evsel *counter,
void (*cb)(struct perf_stat_config *config, struct evsel *counter, void *data,
bool first),
@@ -601,7 +616,7 @@ static bool collect_data(struct perf_stat_config *config, struct evsel *counter,
if (counter->merged_stat)
return false;
cb(config, counter, data, true);
- if (config->no_merge)
+ if (config->no_merge || hybrid_uniquify(counter))
uniquify_event_name(counter);
else if (counter->auto_merge_stats)
collect_all_aliases(config, counter, cb, data);
@@ -825,11 +840,11 @@ static void counter_aggr_cb(struct perf_stat_config *config __maybe_unused,
bool first __maybe_unused)
{
struct caggr_data *cd = data;
- struct perf_stat_evsel *ps = counter->stats;
+ struct perf_counts_values *aggr = &counter->counts->aggr;
- cd->avg += avg_stats(&ps->res_stats[0]);
- cd->avg_enabled += avg_stats(&ps->res_stats[1]);
- cd->avg_running += avg_stats(&ps->res_stats[2]);
+ cd->avg += aggr->val;
+ cd->avg_enabled += aggr->ena;
+ cd->avg_running += aggr->run;
}
/*
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 39967a45f55b..34a7f5c1fff7 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -379,7 +379,7 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list)
evlist__for_each_entry(evsel_list, counter) {
bool invalid = false;
- leader = counter->leader;
+ leader = evsel__leader(counter);
if (!counter->metric_expr)
continue;
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 2db46b9bebd0..09ea334586f2 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -437,18 +437,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
aggr->val = aggr->ena = aggr->run = 0;
- /*
- * We calculate counter's data every interval,
- * and the display code shows ps->res_stats
- * avg value. We need to zero the stats for
- * interval mode, otherwise overall avg running
- * averages will be shown for each interval.
- */
- if (config->interval || config->summary) {
- for (i = 0; i < 3; i++)
- init_stats(&ps->res_stats[i]);
- }
-
if (counter->per_pkg)
evsel__zero_per_pkg(counter);
@@ -546,7 +534,7 @@ int create_perf_stat_counter(struct evsel *evsel,
int cpu)
{
struct perf_event_attr *attr = &evsel->core.attr;
- struct evsel *leader = evsel->leader;
+ struct evsel *leader = evsel__leader(evsel);
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
diff --git a/tools/perf/util/stream.c b/tools/perf/util/stream.c
index 4bd5e5a00aa5..545e44981a27 100644
--- a/tools/perf/util/stream.c
+++ b/tools/perf/util/stream.c
@@ -139,7 +139,7 @@ static int evlist__init_callchain_streams(struct evlist *evlist,
hists__output_resort(hists, NULL);
init_hot_callchain(hists, &es[i]);
- es[i].evsel_idx = pos->idx;
+ es[i].evsel_idx = pos->core.idx;
i++;
}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index a73345730ba9..31cd59a2b66e 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1074,14 +1074,15 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
return 0;
}
-int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
- struct symsrc *runtime_ss, int kmodule)
+static int
+dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
+ struct symsrc *runtime_ss, int kmodule, int dynsym)
{
struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
struct maps *kmaps = kmap ? map__kmaps(map) : NULL;
struct map *curr_map = map;
struct dso *curr_dso = dso;
- Elf_Data *symstrs, *secstrs;
+ Elf_Data *symstrs, *secstrs, *secstrs_run, *secstrs_sym;
uint32_t nr_syms;
int err = -1;
uint32_t idx;
@@ -1098,34 +1099,15 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
if (kmap && !kmaps)
return -1;
- dso->symtab_type = syms_ss->type;
- dso->is_64_bit = syms_ss->is_64_bit;
- dso->rel = syms_ss->ehdr.e_type == ET_REL;
-
- /*
- * Modules may already have symbols from kallsyms, but those symbols
- * have the wrong values for the dso maps, so remove them.
- */
- if (kmodule && syms_ss->symtab)
- symbols__delete(&dso->symbols);
-
- if (!syms_ss->symtab) {
- /*
- * If the vmlinux is stripped, fail so we will fall back
- * to using kallsyms. The vmlinux runtime symbols aren't
- * of much use.
- */
- if (dso->kernel)
- goto out_elf_end;
-
- syms_ss->symtab = syms_ss->dynsym;
- syms_ss->symshdr = syms_ss->dynshdr;
- }
-
elf = syms_ss->elf;
ehdr = syms_ss->ehdr;
- sec = syms_ss->symtab;
- shdr = syms_ss->symshdr;
+ if (dynsym) {
+ sec = syms_ss->dynsym;
+ shdr = syms_ss->dynshdr;
+ } else {
+ sec = syms_ss->symtab;
+ shdr = syms_ss->symshdr;
+ }
if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
".text", NULL))
@@ -1150,8 +1132,16 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
if (sec_strndx == NULL)
goto out_elf_end;
- secstrs = elf_getdata(sec_strndx, NULL);
- if (secstrs == NULL)
+ secstrs_run = elf_getdata(sec_strndx, NULL);
+ if (secstrs_run == NULL)
+ goto out_elf_end;
+
+ sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
+ if (sec_strndx == NULL)
+ goto out_elf_end;
+
+ secstrs_sym = elf_getdata(sec_strndx, NULL);
+ if (secstrs_sym == NULL)
goto out_elf_end;
nr_syms = shdr.sh_size / shdr.sh_entsize;
@@ -1237,6 +1227,8 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
gelf_getshdr(sec, &shdr);
+ secstrs = secstrs_sym;
+
/*
* We have to fallback to runtime when syms' section header has
* NOBITS set. NOBITS results in file offset (sh_offset) not
@@ -1249,6 +1241,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
goto out_elf_end;
gelf_getshdr(sec, &shdr);
+ secstrs = secstrs_run;
}
if (is_label && !elf_sec__filter(&shdr, secstrs))
@@ -1312,6 +1305,50 @@ out_elf_end:
return err;
}
+int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
+ struct symsrc *runtime_ss, int kmodule)
+{
+ int nr = 0;
+ int err = -1;
+
+ dso->symtab_type = syms_ss->type;
+ dso->is_64_bit = syms_ss->is_64_bit;
+ dso->rel = syms_ss->ehdr.e_type == ET_REL;
+
+ /*
+ * Modules may already have symbols from kallsyms, but those symbols
+ * have the wrong values for the dso maps, so remove them.
+ */
+ if (kmodule && syms_ss->symtab)
+ symbols__delete(&dso->symbols);
+
+ if (!syms_ss->symtab) {
+ /*
+ * If the vmlinux is stripped, fail so we will fall back
+ * to using kallsyms. The vmlinux runtime symbols aren't
+ * of much use.
+ */
+ if (dso->kernel)
+ return err;
+ } else {
+ err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
+ kmodule, 0);
+ if (err < 0)
+ return err;
+ nr = err;
+ }
+
+ if (syms_ss->dynsym) {
+ err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
+ kmodule, 1);
+ if (err < 0)
+ return err;
+ err += nr;
+ }
+
+ return err;
+}
+
static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
{
GElf_Phdr phdr;
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 714581b0de65..7172ca05265f 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -12,10 +12,31 @@
#include "debug.h"
#include "trace-event.h"
+#include "event.h"
+#include "evsel.h"
#include <linux/zalloc.h>
struct scripting_context *scripting_context;
+void scripting_context__update(struct scripting_context *c,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct addr_location *al,
+ struct addr_location *addr_al)
+{
+ c->event_data = sample->raw_data;
+ if (evsel->tp_format)
+ c->pevent = evsel->tp_format->tep;
+ else
+ c->pevent = NULL;
+ c->event = event;
+ c->sample = sample;
+ c->evsel = evsel;
+ c->al = al;
+ c->addr_al = addr_al;
+}
+
static int flush_script_unsupported(void)
{
return 0;
@@ -29,7 +50,8 @@ static int stop_script_unsupported(void)
static void process_event_unsupported(union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct evsel *evsel __maybe_unused,
- struct addr_location *al __maybe_unused)
+ struct addr_location *al __maybe_unused,
+ struct addr_location *addr_al __maybe_unused)
{
}
@@ -44,7 +66,8 @@ static void print_python_unsupported_msg(void)
static int python_start_script_unsupported(const char *script __maybe_unused,
int argc __maybe_unused,
- const char **argv __maybe_unused)
+ const char **argv __maybe_unused,
+ struct perf_session *session __maybe_unused)
{
print_python_unsupported_msg();
@@ -63,6 +86,7 @@ static int python_generate_script_unsupported(struct tep_handle *pevent
struct scripting_ops python_scripting_unsupported_ops = {
.name = "Python",
+ .dirname = "python",
.start_script = python_start_script_unsupported,
.flush_script = flush_script_unsupported,
.stop_script = stop_script_unsupported,
@@ -108,7 +132,8 @@ static void print_perl_unsupported_msg(void)
static int perl_start_script_unsupported(const char *script __maybe_unused,
int argc __maybe_unused,
- const char **argv __maybe_unused)
+ const char **argv __maybe_unused,
+ struct perf_session *session __maybe_unused)
{
print_perl_unsupported_msg();
@@ -126,6 +151,7 @@ static int perl_generate_script_unsupported(struct tep_handle *pevent
struct scripting_ops perl_scripting_unsupported_ops = {
.name = "Perl",
+ .dirname = "perl",
.start_script = perl_start_script_unsupported,
.flush_script = flush_script_unsupported,
.stop_script = stop_script_unsupported,
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 72fdf2a3577c..54aadeedf28c 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -11,6 +11,7 @@ union perf_event;
struct perf_tool;
struct thread;
struct tep_plugin_list;
+struct evsel;
struct trace_event {
struct tep_handle *pevent;
@@ -71,16 +72,21 @@ struct perf_stat_config;
struct scripting_ops {
const char *name;
- int (*start_script) (const char *script, int argc, const char **argv);
+ const char *dirname; /* For script path .../scripts/<dirname>/... */
+ int (*start_script)(const char *script, int argc, const char **argv,
+ struct perf_session *session);
int (*flush_script) (void);
int (*stop_script) (void);
void (*process_event) (union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
- struct addr_location *al);
+ struct addr_location *al,
+ struct addr_location *addr_al);
void (*process_switch)(union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
+ void (*process_auxtrace_error)(struct perf_session *session,
+ union perf_event *event);
void (*process_stat)(struct perf_stat_config *config,
struct evsel *evsel, u64 tstamp);
void (*process_stat_interval)(u64 tstamp);
@@ -91,16 +97,35 @@ extern unsigned int scripting_max_stack;
int script_spec_register(const char *spec, struct scripting_ops *ops);
+void script_fetch_insn(struct perf_sample *sample, struct thread *thread,
+ struct machine *machine);
+
void setup_perl_scripting(void);
void setup_python_scripting(void);
struct scripting_context {
struct tep_handle *pevent;
void *event_data;
+ union perf_event *event;
+ struct perf_sample *sample;
+ struct evsel *evsel;
+ struct addr_location *al;
+ struct addr_location *addr_al;
+ struct perf_session *session;
};
+void scripting_context__update(struct scripting_context *scripting_context,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct addr_location *al,
+ struct addr_location *addr_al);
+
int common_pc(struct scripting_context *context);
int common_flags(struct scripting_context *context);
int common_lock_depth(struct scripting_context *context);
+#define SAMPLE_FLAGS_BUF_SIZE 64
+int perf_sample__sprintf_flags(u32 flags, char *str, size_t sz);
+
#endif /* _PERF_UTIL_TRACE_EVENT_H */
diff --git a/tools/rcu/rcu-cbs.py b/tools/rcu/rcu-cbs.py
new file mode 100644
index 000000000000..f8b461b9eaa7
--- /dev/null
+++ b/tools/rcu/rcu-cbs.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env drgn
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Dump out the number of RCU callbacks outstanding.
+#
+# On older kernels having multiple flavors of RCU, this dumps out the
+# number of callbacks for the most heavily used flavor.
+#
+# Usage: sudo drgn rcu-cbs.py
+#
+# Copyright (C) 2021 Facebook, Inc.
+#
+# Authors: Paul E. McKenney <paulmck@kernel.org>
+
+import sys
+import drgn
+from drgn import NULL, Object
+from drgn.helpers.linux import *
+
+def get_rdp0(prog):
+ try:
+ rdp0 = prog.variable('rcu_preempt_data', 'kernel/rcu/tree.c');
+ except LookupError:
+ rdp0 = NULL;
+
+ if rdp0 == NULL:
+ try:
+ rdp0 = prog.variable('rcu_sched_data',
+ 'kernel/rcu/tree.c');
+ except LookupError:
+ rdp0 = NULL;
+
+ if rdp0 == NULL:
+ rdp0 = prog.variable('rcu_data', 'kernel/rcu/tree.c');
+ return rdp0.address_of_();
+
+rdp0 = get_rdp0(prog);
+
+# Sum up RCU callbacks.
+sum = 0;
+for cpu in for_each_possible_cpu(prog):
+ rdp = per_cpu_ptr(rdp0, cpu);
+ len = rdp.cblist.len.value_();
+ # print("CPU " + str(cpu) + " RCU callbacks: " + str(len));
+ sum += len;
+print("Number of RCU callbacks in flight: " + str(sum));
diff --git a/tools/testing/ktest/examples/bootconfigs/boottrace.bconf b/tools/testing/ktest/examples/bootconfigs/boottrace.bconf
new file mode 100644
index 000000000000..9db64ec589d5
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/boottrace.bconf
@@ -0,0 +1,49 @@
+ftrace.event {
+ task.task_newtask {
+ filter = "pid < 128"
+ enable
+ }
+ kprobes.vfs_read {
+ probes = "vfs_read $arg1 $arg2"
+ filter = "common_pid < 200"
+ enable
+ }
+ synthetic.initcall_latency {
+ fields = "unsigned long func", "u64 lat"
+ actions = "hist:keys=func.sym,lat:vals=lat:sort=lat"
+ }
+ initcall.initcall_start {
+ actions = "hist:keys=func:ts0=common_timestamp.usecs"
+ }
+ initcall.initcall_finish {
+ actions = "hist:keys=func:lat=common_timestamp.usecs-$ts0:onmatch(initcall.initcall_start).initcall_latency(func,$lat)"
+ }
+}
+
+ftrace.instance {
+ foo {
+ tracer = "function"
+ ftrace.filters = "user_*"
+ cpumask = 1
+ options = nosym-addr
+ buffer_size = 512KB
+ trace_clock = mono
+ event.signal.signal_deliver.actions=snapshot
+ }
+ bar {
+ tracer = "function"
+ ftrace.filters = "kernel_*"
+ cpumask = 2
+ trace_clock = x86-tsc
+ }
+}
+
+ftrace.alloc_snapshot
+
+kernel {
+ trace_options = sym-addr
+ trace_event = "initcall:*"
+ trace_buf_size = 1M
+ ftrace = function
+ ftrace_filter = "vfs*"
+}
diff --git a/tools/testing/ktest/examples/bootconfigs/config-bootconfig b/tools/testing/ktest/examples/bootconfigs/config-bootconfig
new file mode 100644
index 000000000000..0685b6811388
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/config-bootconfig
@@ -0,0 +1 @@
+CONFIG_CMDLINE="bootconfig"
diff --git a/tools/testing/ktest/examples/bootconfigs/functiongraph.bconf b/tools/testing/ktest/examples/bootconfigs/functiongraph.bconf
new file mode 100644
index 000000000000..68debfcbda76
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/functiongraph.bconf
@@ -0,0 +1,15 @@
+ftrace {
+ tracing_on = 0 # off by default
+ tracer = function_graph
+ event.kprobes {
+ start_event {
+ probes = "pci_proc_init"
+ actions = "traceon"
+ }
+ end_event {
+ probes = "pci_proc_init%return"
+ actions = "traceoff"
+ }
+ }
+}
+
diff --git a/tools/testing/ktest/examples/bootconfigs/tracing.bconf b/tools/testing/ktest/examples/bootconfigs/tracing.bconf
new file mode 100644
index 000000000000..bf117c78115a
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/tracing.bconf
@@ -0,0 +1,33 @@
+ftrace {
+ tracer = function_graph;
+ options = event-fork, sym-addr, stacktrace;
+ buffer_size = 1M;
+ alloc_snapshot;
+ trace_clock = global;
+ events = "task:task_newtask", "initcall:*";
+ event.sched.sched_process_exec {
+ filter = "pid < 128";
+ }
+ instance.bar {
+ event.kprobes {
+ myevent {
+ probes = "vfs_read $arg2 $arg3";
+ }
+ myevent2 {
+ probes = "vfs_write $arg2 +0($arg2):ustring $arg3";
+ }
+ myevent3 {
+ probes = "initrd_load";
+ }
+ enable
+ }
+ }
+ instance.foo {
+ tracer = function;
+ tracing_on = false;
+ };
+}
+kernel {
+ ftrace_dump_on_oops = "orig_cpu"
+ traceoff_on_warning
+}
diff --git a/tools/testing/ktest/examples/bootconfigs/verify-boottrace.sh b/tools/testing/ktest/examples/bootconfigs/verify-boottrace.sh
new file mode 100755
index 000000000000..f271940ce7fb
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/verify-boottrace.sh
@@ -0,0 +1,84 @@
+#!/bin/sh
+
+cd /sys/kernel/tracing
+
+compare_file() {
+ file="$1"
+ val="$2"
+ content=`cat $file`
+ if [ "$content" != "$val" ]; then
+ echo "FAILED: $file has '$content', expected '$val'"
+ exit 1
+ fi
+}
+
+compare_file_partial() {
+ file="$1"
+ val="$2"
+ content=`cat $file | sed -ne "/^$val/p"`
+ if [ -z "$content" ]; then
+ echo "FAILED: $file does not contain '$val'"
+ cat $file
+ exit 1
+ fi
+}
+
+file_contains() {
+ file=$1
+ val="$2"
+
+ if ! grep -q "$val" $file ; then
+ echo "FAILED: $file does not contain $val"
+ cat $file
+ exit 1
+ fi
+}
+
+compare_mask() {
+ file=$1
+ val="$2"
+
+ content=`cat $file | sed -ne "/^[0 ]*$val/p"`
+ if [ -z "$content" ]; then
+ echo "FAILED: $file does not have mask '$val'"
+ cat $file
+ exit 1
+ fi
+}
+
+compare_file "events/task/task_newtask/filter" "pid < 128"
+compare_file "events/task/task_newtask/enable" "1"
+
+compare_file "events/kprobes/vfs_read/filter" "common_pid < 200"
+compare_file "events/kprobes/vfs_read/enable" "1"
+
+compare_file_partial "events/synthetic/initcall_latency/trigger" "hist:keys=func.sym,lat:vals=hitcount,lat:sort=lat"
+compare_file_partial "events/synthetic/initcall_latency/enable" "0"
+
+compare_file_partial "events/initcall/initcall_start/trigger" "hist:keys=func:vals=hitcount:ts0=common_timestamp.usecs"
+compare_file_partial "events/initcall/initcall_start/enable" "1"
+
+compare_file_partial "events/initcall/initcall_finish/trigger" 'hist:keys=func:vals=hitcount:lat=common_timestamp.usecs-\$ts0:sort=hitcount:size=2048:clock=global:onmatch(initcall.initcall_start).initcall_latency(func,\$lat)'
+compare_file_partial "events/initcall/initcall_finish/enable" "1"
+
+compare_file "instances/foo/current_tracer" "function"
+file_contains "instances/foo/set_ftrace_filter" "^user"
+compare_file "instances/foo/buffer_size_kb" "512"
+compare_mask "instances/foo/tracing_cpumask" "1"
+compare_file "instances/foo/options/sym-addr" "0"
+file_contains "instances/foo/trace_clock" '\[mono\]'
+compare_file_partial "instances/foo/events/signal/signal_deliver/trigger" "snapshot"
+
+compare_file "instances/bar/current_tracer" "function"
+file_contains "instances/bar/set_ftrace_filter" "^kernel"
+compare_mask "instances/bar/tracing_cpumask" "2"
+file_contains "instances/bar/trace_clock" '\[x86-tsc\]'
+
+file_contains "snapshot" "Snapshot is allocated"
+compare_file "options/sym-addr" "1"
+compare_file "events/initcall/enable" "1"
+compare_file "buffer_size_kb" "1027"
+compare_file "current_tracer" "function"
+file_contains "set_ftrace_filter" '^vfs'
+
+exit 0
diff --git a/tools/testing/ktest/examples/bootconfigs/verify-functiongraph.sh b/tools/testing/ktest/examples/bootconfigs/verify-functiongraph.sh
new file mode 100755
index 000000000000..b50baa10fe97
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/verify-functiongraph.sh
@@ -0,0 +1,61 @@
+#!/bin/sh
+
+cd /sys/kernel/tracing
+
+compare_file() {
+ file="$1"
+ val="$2"
+ content=`cat $file`
+ if [ "$content" != "$val" ]; then
+ echo "FAILED: $file has '$content', expected '$val'"
+ exit 1
+ fi
+}
+
+compare_file_partial() {
+ file="$1"
+ val="$2"
+ content=`cat $file | sed -ne "/^$val/p"`
+ if [ -z "$content" ]; then
+ echo "FAILED: $file does not contain '$val'"
+ cat $file
+ exit 1
+ fi
+}
+
+file_contains() {
+ file=$1
+ val="$2"
+
+ if ! grep -q "$val" $file ; then
+ echo "FAILED: $file does not contain $val"
+ cat $file
+ exit 1
+ fi
+}
+
+compare_mask() {
+ file=$1
+ val="$2"
+
+ content=`cat $file | sed -ne "/^[0 ]*$val/p"`
+ if [ -z "$content" ]; then
+ echo "FAILED: $file does not have mask '$val'"
+ cat $file
+ exit 1
+ fi
+}
+
+
+compare_file "tracing_on" "0"
+compare_file "current_tracer" "function_graph"
+
+compare_file_partial "events/kprobes/start_event/enable" "1"
+compare_file_partial "events/kprobes/start_event/trigger" "traceon"
+file_contains "kprobe_events" 'start_event.*pci_proc_init'
+
+compare_file_partial "events/kprobes/end_event/enable" "1"
+compare_file_partial "events/kprobes/end_event/trigger" "traceoff"
+file_contains "kprobe_events" '^r.*end_event.*pci_proc_init'
+
+exit 0
diff --git a/tools/testing/ktest/examples/bootconfigs/verify-tracing.sh b/tools/testing/ktest/examples/bootconfigs/verify-tracing.sh
new file mode 100755
index 000000000000..01e111e36e63
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/verify-tracing.sh
@@ -0,0 +1,72 @@
+#!/bin/sh
+
+cd /sys/kernel/tracing
+
+compare_file() {
+ file="$1"
+ val="$2"
+ content=`cat $file`
+ if [ "$content" != "$val" ]; then
+ echo "FAILED: $file has '$content', expected '$val'"
+ exit 1
+ fi
+}
+
+compare_file_partial() {
+ file="$1"
+ val="$2"
+ content=`cat $file | sed -ne "/^$val/p"`
+ if [ -z "$content" ]; then
+ echo "FAILED: $file does not contain '$val'"
+ cat $file
+ exit 1
+ fi
+}
+
+file_contains() {
+ file=$1
+ val="$2"
+
+ if ! grep -q "$val" $file ; then
+ echo "FAILED: $file does not contain $val"
+ cat $file
+ exit 1
+ fi
+}
+
+compare_mask() {
+ file=$1
+ val="$2"
+
+ content=`cat $file | sed -ne "/^[0 ]*$val/p"`
+ if [ -z "$content" ]; then
+ echo "FAILED: $file does not have mask '$val'"
+ cat $file
+ exit 1
+ fi
+}
+
+compare_file "current_tracer" "function_graph"
+compare_file "options/event-fork" "1"
+compare_file "options/sym-addr" "1"
+compare_file "options/stacktrace" "1"
+compare_file "buffer_size_kb" "1024"
+file_contains "snapshot" "Snapshot is allocated"
+file_contains "trace_clock" '\[global\]'
+
+compare_file "events/initcall/enable" "1"
+compare_file "events/task/task_newtask/enable" "1"
+compare_file "events/sched/sched_process_exec/filter" "pid < 128"
+compare_file "events/kprobes/enable" "1"
+
+compare_file "instances/bar/events/kprobes/myevent/enable" "1"
+compare_file "instances/bar/events/kprobes/myevent2/enable" "1"
+compare_file "instances/bar/events/kprobes/myevent3/enable" "1"
+
+compare_file "instances/foo/current_tracer" "function"
+compare_file "instances/foo/tracing_on" "0"
+
+compare_file "/proc/sys/kernel/ftrace_dump_on_oops" "2"
+compare_file "/proc/sys/kernel/traceoff_on_warning" "1"
+
+exit 0
diff --git a/tools/testing/ktest/examples/include/bootconfig.conf b/tools/testing/ktest/examples/include/bootconfig.conf
new file mode 100644
index 000000000000..3b885de085bd
--- /dev/null
+++ b/tools/testing/ktest/examples/include/bootconfig.conf
@@ -0,0 +1,69 @@
+# bootconfig.conf
+#
+# Tests to test some bootconfig scripts
+
+# List where on the target machine the initrd is used
+INITRD := /boot/initramfs-test.img
+
+# Install bootconfig on the target machine and define the path here.
+BOOTCONFIG := /usr/bin/bootconfig
+
+# Currenty we just build the .config in the BUILD_DIR
+BUILD_TYPE := oldconfig
+
+# Helper macro to run bootconfig on the target
+# SSH is defined in include/defaults.conf
+ADD_BOOTCONFIG := ${SSH} "${BOOTCONFIG} -d ${INITRD} && ${BOOTCONFIG} -a /tmp/${BOOTCONFIG_FILE} ${INITRD}"
+
+# This copies a bootconfig script to the target and then will
+# add it to the initrd. SSH_USER is defined in include/defaults.conf
+# and MACHINE is defined in the example configs.
+BOOTCONFIG_TEST_PREP = scp ${BOOTCONFIG_PATH}${BOOTCONFIG_FILE} ${SSH_USER}@${MACHINE}:/tmp && ${ADD_BOOTCONFIG}
+
+# When a test is complete, remove the bootconfig from the initrd.
+CLEAR_BOOTCONFIG := ${SSH} "${BOOTCONFIG} -d ${INITRD}"
+
+# Run a verifier on the target after it had booted, to make sure that the
+# bootconfig script did what it was expected to do
+DO_TEST = scp ${BOOTCONFIG_PATH}${BOOTCONFIG_VERIFY} ${SSH_USER}@${MACHINE}:/tmp && ${SSH} /tmp/${BOOTCONFIG_VERIFY}
+
+# Comment this out to not run the boot configs
+RUN_BOOTCONFIG := 1
+
+TEST_START IF DEFINED RUN_BOOTCONFIG
+TEST_TYPE = test
+TEST_NAME = bootconfig boottrace
+# Just testing the bootconfig on initrd, no need to build the kernel
+BUILD_TYPE = nobuild
+BOOTCONFIG_FILE = boottrace.bconf
+BOOTCONFIG_VERIFY = verify-boottrace.sh
+ADD_CONFIG = ${ADD_CONFIG} ${BOOTCONFIG_PATH}/config-bootconfig
+PRE_TEST = ${BOOTCONFIG_TEST_PREP}
+PRE_TEST_DIE = 1
+TEST = ${DO_TEST}
+POST_TEST = ${CLEAR_BOOTCONFIG}
+
+TEST_START IF DEFINED RUN_BOOTCONFIG
+TEST_TYPE = test
+TEST_NAME = bootconfig function graph
+BUILD_TYPE = nobuild
+BOOTCONFIG_FILE = functiongraph.bconf
+BOOTCONFIG_VERIFY = verify-functiongraph.sh
+ADD_CONFIG = ${ADD_CONFIG} ${BOOTCONFIG_PATH}/config-bootconfig
+PRE_TEST = ${BOOTCONFIG_TEST_PREP}
+PRE_TEST_DIE = 1
+TEST = ${DO_TEST}
+POST_TEST = ${CLEAR_BOOTCONFIG}
+
+TEST_START IF DEFINED RUN_BOOTCONFIG
+TEST_TYPE = test
+TEST_NAME = bootconfig tracing
+BUILD_TYPE = nobuild
+BOOTCONFIG_FILE = tracing.bconf
+BOOTCONFIG_VERIFY = verify-tracing.sh
+ADD_CONFIG = ${ADD_CONFIG} ${BOOTCONFIG_PATH}/config-bootconfig
+PRE_TEST = ${BOOTCONFIG_TEST_PREP}
+PRE_TEST_DIE = 1
+TEST = ${DO_TEST}
+POST_TEST = ${CLEAR_BOOTCONFIG}
+
diff --git a/tools/testing/ktest/examples/kvm.conf b/tools/testing/ktest/examples/kvm.conf
index fbc134f9ac6e..c700e8bb7fde 100644
--- a/tools/testing/ktest/examples/kvm.conf
+++ b/tools/testing/ktest/examples/kvm.conf
@@ -90,3 +90,4 @@ INCLUDE include/patchcheck.conf
INCLUDE include/tests.conf
INCLUDE include/bisect.conf
INCLUDE include/min-config.conf
+INCLUDE include/bootconfig.conf \ No newline at end of file
diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config
deleted file mode 100644
index 9235b7d42d38..000000000000
--- a/tools/testing/kunit/configs/all_tests.config
+++ /dev/null
@@ -1,3 +0,0 @@
-CONFIG_KUNIT=y
-CONFIG_KUNIT_TEST=y
-CONFIG_KUNIT_EXAMPLE_TEST=y
diff --git a/arch/um/configs/kunit_defconfig b/tools/testing/kunit/configs/default.config
index 9235b7d42d38..e67af7b9f1bb 100644
--- a/arch/um/configs/kunit_defconfig
+++ b/tools/testing/kunit/configs/default.config
@@ -1,3 +1,3 @@
CONFIG_KUNIT=y
-CONFIG_KUNIT_TEST=y
CONFIG_KUNIT_EXAMPLE_TEST=y
+CONFIG_KUNIT_ALL_TESTS=y
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 5da8fb3762f9..6276ce0c0196 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -12,6 +12,8 @@ import sys
import os
import time
+assert sys.version_info >= (3, 7), "Python version is too old"
+
from collections import namedtuple
from enum import Enum, auto
@@ -70,10 +72,10 @@ def build_tests(linux: kunit_kernel.LinuxSourceTree,
kunit_parser.print_with_timestamp('Building KUnit Kernel ...')
build_start = time.time()
- success = linux.build_um_kernel(request.alltests,
- request.jobs,
- request.build_dir,
- request.make_options)
+ success = linux.build_kernel(request.alltests,
+ request.jobs,
+ request.build_dir,
+ request.make_options)
build_end = time.time()
if not success:
return KunitResult(KunitStatus.BUILD_FAILURE,
@@ -189,6 +191,31 @@ def add_common_opts(parser) -> None:
'will get automatically appended.',
metavar='kunitconfig')
+ parser.add_argument('--arch',
+ help=('Specifies the architecture to run tests under. '
+ 'The architecture specified here must match the '
+ 'string passed to the ARCH make param, '
+ 'e.g. i386, x86_64, arm, um, etc. Non-UML '
+ 'architectures run on QEMU.'),
+ type=str, default='um', metavar='arch')
+
+ parser.add_argument('--cross_compile',
+ help=('Sets make\'s CROSS_COMPILE variable; it should '
+ 'be set to a toolchain path prefix (the prefix '
+ 'of gcc and other tools in your toolchain, for '
+ 'example `sparc64-linux-gnu-` if you have the '
+ 'sparc toolchain installed on your system, or '
+ '`$HOME/toolchains/microblaze/gcc-9.2.0-nolibc/microblaze-linux/bin/microblaze-linux-` '
+ 'if you have downloaded the microblaze toolchain '
+ 'from the 0-day website to a directory in your '
+ 'home directory called `toolchains`).'),
+ metavar='cross_compile')
+
+ parser.add_argument('--qemu_config',
+ help=('Takes a path to a path to a file containing '
+ 'a QemuArchParams object.'),
+ type=str, metavar='qemu_config')
+
def add_build_opts(parser) -> None:
parser.add_argument('--jobs',
help='As in the make command, "Specifies the number of '
@@ -270,7 +297,11 @@ def main(argv, linux=None):
os.mkdir(cli_args.build_dir)
if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
+ kunitconfig_path=cli_args.kunitconfig,
+ arch=cli_args.arch,
+ cross_compile=cli_args.cross_compile,
+ qemu_config_path=cli_args.qemu_config)
request = KunitRequest(cli_args.raw_output,
cli_args.timeout,
@@ -289,7 +320,11 @@ def main(argv, linux=None):
os.mkdir(cli_args.build_dir)
if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
+ kunitconfig_path=cli_args.kunitconfig,
+ arch=cli_args.arch,
+ cross_compile=cli_args.cross_compile,
+ qemu_config_path=cli_args.qemu_config)
request = KunitConfigRequest(cli_args.build_dir,
cli_args.make_options)
@@ -301,7 +336,11 @@ def main(argv, linux=None):
sys.exit(1)
elif cli_args.subcommand == 'build':
if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
+ kunitconfig_path=cli_args.kunitconfig,
+ arch=cli_args.arch,
+ cross_compile=cli_args.cross_compile,
+ qemu_config_path=cli_args.qemu_config)
request = KunitBuildRequest(cli_args.jobs,
cli_args.build_dir,
@@ -315,7 +354,11 @@ def main(argv, linux=None):
sys.exit(1)
elif cli_args.subcommand == 'exec':
if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
+ kunitconfig_path=cli_args.kunitconfig,
+ arch=cli_args.arch,
+ cross_compile=cli_args.cross_compile,
+ qemu_config_path=cli_args.qemu_config)
exec_request = KunitExecRequest(cli_args.timeout,
cli_args.build_dir,
diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py
index 1e2683dcc0e7..c77c7d2ef622 100644
--- a/tools/testing/kunit/kunit_config.py
+++ b/tools/testing/kunit/kunit_config.py
@@ -52,8 +52,13 @@ class Kconfig(object):
return False
return True
+ def merge_in_entries(self, other: 'Kconfig') -> None:
+ if other.is_subset_of(self):
+ return
+ self._entries = list(self.entries().union(other.entries()))
+
def write_to_file(self, path: str) -> None:
- with open(path, 'w') as f:
+ with open(path, 'a+') as f:
for entry in self.entries():
f.write(str(entry) + '\n')
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 89a7d4024e87..2c6f916ccbaf 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -6,23 +6,29 @@
# Author: Felix Guo <felixguoxiuping@gmail.com>
# Author: Brendan Higgins <brendanhiggins@google.com>
+import importlib.util
import logging
import subprocess
import os
import shutil
import signal
-from typing import Iterator
+from typing import Iterator, Optional, Tuple
from contextlib import ExitStack
+from collections import namedtuple
+
import kunit_config
import kunit_parser
+import qemu_config
KCONFIG_PATH = '.config'
KUNITCONFIG_PATH = '.kunitconfig'
-DEFAULT_KUNITCONFIG_PATH = 'arch/um/configs/kunit_defconfig'
+DEFAULT_KUNITCONFIG_PATH = 'tools/testing/kunit/configs/default.config'
BROKEN_ALLCONFIG_PATH = 'tools/testing/kunit/configs/broken_on_uml.config'
OUTFILE_PATH = 'test.log'
+ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__))
+QEMU_CONFIGS_DIR = os.path.join(ABS_TOOL_PATH, 'qemu_configs')
def get_file_path(build_dir, default):
if build_dir:
@@ -40,6 +46,10 @@ class BuildError(Exception):
class LinuxSourceTreeOperations(object):
"""An abstraction over command line operations performed on a source tree."""
+ def __init__(self, linux_arch: str, cross_compile: Optional[str]):
+ self._linux_arch = linux_arch
+ self._cross_compile = cross_compile
+
def make_mrproper(self) -> None:
try:
subprocess.check_output(['make', 'mrproper'], stderr=subprocess.STDOUT)
@@ -48,12 +58,21 @@ class LinuxSourceTreeOperations(object):
except subprocess.CalledProcessError as e:
raise ConfigError(e.output.decode())
+ def make_arch_qemuconfig(self, kconfig: kunit_config.Kconfig) -> None:
+ pass
+
+ def make_allyesconfig(self, build_dir, make_options) -> None:
+ raise ConfigError('Only the "um" arch is supported for alltests')
+
def make_olddefconfig(self, build_dir, make_options) -> None:
- command = ['make', 'ARCH=um', 'olddefconfig']
+ command = ['make', 'ARCH=' + self._linux_arch, 'olddefconfig']
+ if self._cross_compile:
+ command += ['CROSS_COMPILE=' + self._cross_compile]
if make_options:
command.extend(make_options)
if build_dir:
command += ['O=' + build_dir]
+ print('Populating config with:\n$', ' '.join(command))
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except OSError as e:
@@ -61,6 +80,79 @@ class LinuxSourceTreeOperations(object):
except subprocess.CalledProcessError as e:
raise ConfigError(e.output.decode())
+ def make(self, jobs, build_dir, make_options) -> None:
+ command = ['make', 'ARCH=' + self._linux_arch, '--jobs=' + str(jobs)]
+ if make_options:
+ command.extend(make_options)
+ if self._cross_compile:
+ command += ['CROSS_COMPILE=' + self._cross_compile]
+ if build_dir:
+ command += ['O=' + build_dir]
+ print('Building with:\n$', ' '.join(command))
+ try:
+ proc = subprocess.Popen(command,
+ stderr=subprocess.PIPE,
+ stdout=subprocess.DEVNULL)
+ except OSError as e:
+ raise BuildError('Could not call execute make: ' + str(e))
+ except subprocess.CalledProcessError as e:
+ raise BuildError(e.output)
+ _, stderr = proc.communicate()
+ if proc.returncode != 0:
+ raise BuildError(stderr.decode())
+ if stderr: # likely only due to build warnings
+ print(stderr.decode())
+
+ def run(self, params, timeout, build_dir, outfile) -> None:
+ pass
+
+
+class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
+
+ def __init__(self, qemu_arch_params: qemu_config.QemuArchParams, cross_compile: Optional[str]):
+ super().__init__(linux_arch=qemu_arch_params.linux_arch,
+ cross_compile=cross_compile)
+ self._kconfig = qemu_arch_params.kconfig
+ self._qemu_arch = qemu_arch_params.qemu_arch
+ self._kernel_path = qemu_arch_params.kernel_path
+ self._kernel_command_line = qemu_arch_params.kernel_command_line + ' kunit_shutdown=reboot'
+ self._extra_qemu_params = qemu_arch_params.extra_qemu_params
+
+ def make_arch_qemuconfig(self, base_kunitconfig: kunit_config.Kconfig) -> None:
+ kconfig = kunit_config.Kconfig()
+ kconfig.parse_from_string(self._kconfig)
+ base_kunitconfig.merge_in_entries(kconfig)
+
+ def run(self, params, timeout, build_dir, outfile):
+ kernel_path = os.path.join(build_dir, self._kernel_path)
+ qemu_command = ['qemu-system-' + self._qemu_arch,
+ '-nodefaults',
+ '-m', '1024',
+ '-kernel', kernel_path,
+ '-append', '\'' + ' '.join(params + [self._kernel_command_line]) + '\'',
+ '-no-reboot',
+ '-nographic',
+ '-serial stdio'] + self._extra_qemu_params
+ print('Running tests with:\n$', ' '.join(qemu_command))
+ with open(outfile, 'w') as output:
+ process = subprocess.Popen(' '.join(qemu_command),
+ stdin=subprocess.PIPE,
+ stdout=output,
+ stderr=subprocess.STDOUT,
+ text=True, shell=True)
+ try:
+ process.wait(timeout=timeout)
+ except Exception as e:
+ print(e)
+ process.terminate()
+ return process
+
+class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
+ """An abstraction over command line operations performed on a source tree."""
+
+ def __init__(self, cross_compile=None):
+ super().__init__(linux_arch='um', cross_compile=cross_compile)
+
def make_allyesconfig(self, build_dir, make_options) -> None:
kunit_parser.print_with_timestamp(
'Enabling all CONFIGs for UML...')
@@ -83,32 +175,16 @@ class LinuxSourceTreeOperations(object):
kunit_parser.print_with_timestamp(
'Starting Kernel with all configs takes a few minutes...')
- def make(self, jobs, build_dir, make_options) -> None:
- command = ['make', 'ARCH=um', '--jobs=' + str(jobs)]
- if make_options:
- command.extend(make_options)
- if build_dir:
- command += ['O=' + build_dir]
- try:
- proc = subprocess.Popen(command,
- stderr=subprocess.PIPE,
- stdout=subprocess.DEVNULL)
- except OSError as e:
- raise BuildError('Could not call make command: ' + str(e))
- _, stderr = proc.communicate()
- if proc.returncode != 0:
- raise BuildError(stderr.decode())
- if stderr: # likely only due to build warnings
- print(stderr.decode())
-
- def linux_bin(self, params, timeout, build_dir) -> None:
+ def run(self, params, timeout, build_dir, outfile):
"""Runs the Linux UML binary. Must be named 'linux'."""
linux_bin = get_file_path(build_dir, 'linux')
outfile = get_outfile_path(build_dir)
with open(outfile, 'w') as output:
process = subprocess.Popen([linux_bin] + params,
+ stdin=subprocess.PIPE,
stdout=output,
- stderr=subprocess.STDOUT)
+ stderr=subprocess.STDOUT,
+ text=True)
process.wait(timeout)
def get_kconfig_path(build_dir) -> str:
@@ -120,13 +196,54 @@ def get_kunitconfig_path(build_dir) -> str:
def get_outfile_path(build_dir) -> str:
return get_file_path(build_dir, OUTFILE_PATH)
+def get_source_tree_ops(arch: str, cross_compile: Optional[str]) -> LinuxSourceTreeOperations:
+ config_path = os.path.join(QEMU_CONFIGS_DIR, arch + '.py')
+ if arch == 'um':
+ return LinuxSourceTreeOperationsUml(cross_compile=cross_compile)
+ elif os.path.isfile(config_path):
+ return get_source_tree_ops_from_qemu_config(config_path, cross_compile)[1]
+ else:
+ raise ConfigError(arch + ' is not a valid arch')
+
+def get_source_tree_ops_from_qemu_config(config_path: str,
+ cross_compile: Optional[str]) -> Tuple[
+ str, LinuxSourceTreeOperations]:
+ # The module name/path has very little to do with where the actual file
+ # exists (I learned this through experimentation and could not find it
+ # anywhere in the Python documentation).
+ #
+ # Bascially, we completely ignore the actual file location of the config
+ # we are loading and just tell Python that the module lives in the
+ # QEMU_CONFIGS_DIR for import purposes regardless of where it actually
+ # exists as a file.
+ module_path = '.' + os.path.join(os.path.basename(QEMU_CONFIGS_DIR), os.path.basename(config_path))
+ spec = importlib.util.spec_from_file_location(module_path, config_path)
+ config = importlib.util.module_from_spec(spec)
+ # TODO(brendanhiggins@google.com): I looked this up and apparently other
+ # Python projects have noted that pytype complains that "No attribute
+ # 'exec_module' on _importlib_modulespec._Loader". Disabling for now.
+ spec.loader.exec_module(config) # pytype: disable=attribute-error
+ return config.QEMU_ARCH.linux_arch, LinuxSourceTreeOperationsQemu(
+ config.QEMU_ARCH, cross_compile=cross_compile)
+
class LinuxSourceTree(object):
"""Represents a Linux kernel source tree with KUnit tests."""
- def __init__(self, build_dir: str, load_config=True, kunitconfig_path='') -> None:
+ def __init__(
+ self,
+ build_dir: str,
+ load_config=True,
+ kunitconfig_path='',
+ arch=None,
+ cross_compile=None,
+ qemu_config_path=None) -> None:
signal.signal(signal.SIGINT, self.signal_handler)
-
- self._ops = LinuxSourceTreeOperations()
+ if qemu_config_path:
+ self._arch, self._ops = get_source_tree_ops_from_qemu_config(
+ qemu_config_path, cross_compile)
+ else:
+ self._arch = 'um' if arch is None else arch
+ self._ops = get_source_tree_ops(self._arch, cross_compile)
if not load_config:
return
@@ -170,8 +287,9 @@ class LinuxSourceTree(object):
kconfig_path = get_kconfig_path(build_dir)
if build_dir and not os.path.exists(build_dir):
os.mkdir(build_dir)
- self._kconfig.write_to_file(kconfig_path)
try:
+ self._ops.make_arch_qemuconfig(self._kconfig)
+ self._kconfig.write_to_file(kconfig_path)
self._ops.make_olddefconfig(build_dir, make_options)
except ConfigError as e:
logging.error(e)
@@ -184,6 +302,7 @@ class LinuxSourceTree(object):
if os.path.exists(kconfig_path):
existing_kconfig = kunit_config.Kconfig()
existing_kconfig.read_from_file(kconfig_path)
+ self._ops.make_arch_qemuconfig(self._kconfig)
if not self._kconfig.is_subset_of(existing_kconfig):
print('Regenerating .config ...')
os.remove(kconfig_path)
@@ -194,7 +313,7 @@ class LinuxSourceTree(object):
print('Generating .config ...')
return self.build_config(build_dir, make_options)
- def build_um_kernel(self, alltests, jobs, build_dir, make_options) -> bool:
+ def build_kernel(self, alltests, jobs, build_dir, make_options) -> bool:
try:
if alltests:
self._ops.make_allyesconfig(build_dir, make_options)
@@ -208,11 +327,11 @@ class LinuxSourceTree(object):
def run_kernel(self, args=None, build_dir='', filter_glob='', timeout=None) -> Iterator[str]:
if not args:
args = []
- args.extend(['mem=1G', 'console=tty'])
+ args.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt'])
if filter_glob:
args.append('kunit.filter_glob='+filter_glob)
- self._ops.linux_bin(args, timeout, build_dir)
outfile = get_outfile_path(build_dir)
+ self._ops.run(args, timeout, build_dir, outfile)
subprocess.call(['stty', 'sane'])
with open(outfile, 'r') as file:
for line in file:
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index e8bcc139702e..b88db3f51dc5 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -43,26 +43,68 @@ class TestCase(object):
class TestStatus(Enum):
SUCCESS = auto()
FAILURE = auto()
+ SKIPPED = auto()
TEST_CRASHED = auto()
NO_TESTS = auto()
FAILURE_TO_PARSE_TESTS = auto()
+class LineStream:
+ """Provides a peek()/pop() interface over an iterator of (line#, text)."""
+ _lines: Iterator[Tuple[int, str]]
+ _next: Tuple[int, str]
+ _done: bool
+
+ def __init__(self, lines: Iterator[Tuple[int, str]]):
+ self._lines = lines
+ self._done = False
+ self._next = (0, '')
+ self._get_next()
+
+ def _get_next(self) -> None:
+ try:
+ self._next = next(self._lines)
+ except StopIteration:
+ self._done = True
+
+ def peek(self) -> str:
+ return self._next[1]
+
+ def pop(self) -> str:
+ n = self._next
+ self._get_next()
+ return n[1]
+
+ def __bool__(self) -> bool:
+ return not self._done
+
+ # Only used by kunit_tool_test.py.
+ def __iter__(self) -> Iterator[str]:
+ while bool(self):
+ yield self.pop()
+
+ def line_number(self) -> int:
+ return self._next[0]
+
kunit_start_re = re.compile(r'TAP version [0-9]+$')
kunit_end_re = re.compile('(List of all partitions:|'
- 'Kernel panic - not syncing: VFS:)')
-
-def isolate_kunit_output(kernel_output) -> Iterator[str]:
- started = False
- for line in kernel_output:
- line = line.rstrip() # line always has a trailing \n
- if kunit_start_re.search(line):
- prefix_len = len(line.split('TAP version')[0])
- started = True
- yield line[prefix_len:] if prefix_len > 0 else line
- elif kunit_end_re.search(line):
- break
- elif started:
- yield line[prefix_len:] if prefix_len > 0 else line
+ 'Kernel panic - not syncing: VFS:|reboot: System halted)')
+
+def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
+ def isolate_kunit_output(kernel_output: Iterable[str]) -> Iterator[Tuple[int, str]]:
+ line_num = 0
+ started = False
+ for line in kernel_output:
+ line_num += 1
+ line = line.rstrip() # line always has a trailing \n
+ if kunit_start_re.search(line):
+ prefix_len = len(line.split('TAP version')[0])
+ started = True
+ yield line_num, line[prefix_len:]
+ elif kunit_end_re.search(line):
+ break
+ elif started:
+ yield line_num, line[prefix_len:]
+ return LineStream(lines=isolate_kunit_output(kernel_output))
def raw_output(kernel_output) -> None:
for line in kernel_output:
@@ -97,34 +139,40 @@ def print_log(log) -> None:
TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*#).*$')
-def consume_non_diagnostic(lines: List[str]) -> None:
- while lines and not TAP_ENTRIES.match(lines[0]):
- lines.pop(0)
+def consume_non_diagnostic(lines: LineStream) -> None:
+ while lines and not TAP_ENTRIES.match(lines.peek()):
+ lines.pop()
-def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
- while lines and not TAP_ENTRIES.match(lines[0]):
- test_case.log.append(lines[0])
- lines.pop(0)
+def save_non_diagnostic(lines: LineStream, test_case: TestCase) -> None:
+ while lines and not TAP_ENTRIES.match(lines.peek()):
+ test_case.log.append(lines.peek())
+ lines.pop()
OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
+OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
+
OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
-def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
+def parse_ok_not_ok_test_case(lines: LineStream, test_case: TestCase) -> bool:
save_non_diagnostic(lines, test_case)
if not lines:
test_case.status = TestStatus.TEST_CRASHED
return True
- line = lines[0]
+ line = lines.peek()
match = OK_NOT_OK_SUBTEST.match(line)
while not match and lines:
- line = lines.pop(0)
+ line = lines.pop()
match = OK_NOT_OK_SUBTEST.match(line)
if match:
- test_case.log.append(lines.pop(0))
+ test_case.log.append(lines.pop())
test_case.name = match.group(2)
+ skip_match = OK_NOT_OK_SKIP.match(line)
+ if skip_match:
+ test_case.status = TestStatus.SKIPPED
+ return True
if test_case.status == TestStatus.TEST_CRASHED:
return True
if match.group(1) == 'ok':
@@ -138,14 +186,14 @@ def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$')
DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$')
-def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
+def parse_diagnostic(lines: LineStream, test_case: TestCase) -> bool:
save_non_diagnostic(lines, test_case)
if not lines:
return False
- line = lines[0]
+ line = lines.peek()
match = SUBTEST_DIAGNOSTIC.match(line)
if match:
- test_case.log.append(lines.pop(0))
+ test_case.log.append(lines.pop())
crash_match = DIAGNOSTIC_CRASH_MESSAGE.match(line)
if crash_match:
test_case.status = TestStatus.TEST_CRASHED
@@ -153,7 +201,7 @@ def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
else:
return False
-def parse_test_case(lines: List[str]) -> Optional[TestCase]:
+def parse_test_case(lines: LineStream) -> Optional[TestCase]:
test_case = TestCase()
save_non_diagnostic(lines, test_case)
while parse_diagnostic(lines, test_case):
@@ -165,55 +213,58 @@ def parse_test_case(lines: List[str]) -> Optional[TestCase]:
SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
-def parse_subtest_header(lines: List[str]) -> Optional[str]:
+def parse_subtest_header(lines: LineStream) -> Optional[str]:
consume_non_diagnostic(lines)
if not lines:
return None
- match = SUBTEST_HEADER.match(lines[0])
+ match = SUBTEST_HEADER.match(lines.peek())
if match:
- lines.pop(0)
+ lines.pop()
return match.group(1)
else:
return None
SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
-def parse_subtest_plan(lines: List[str]) -> Optional[int]:
+def parse_subtest_plan(lines: LineStream) -> Optional[int]:
consume_non_diagnostic(lines)
- match = SUBTEST_PLAN.match(lines[0])
+ match = SUBTEST_PLAN.match(lines.peek())
if match:
- lines.pop(0)
+ lines.pop()
return int(match.group(1))
else:
return None
def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
- if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
+ if left == right:
+ return left
+ elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
return TestStatus.TEST_CRASHED
elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
return TestStatus.FAILURE
- elif left != TestStatus.SUCCESS:
- return left
- elif right != TestStatus.SUCCESS:
+ elif left == TestStatus.SKIPPED:
return right
else:
- return TestStatus.SUCCESS
+ return left
-def parse_ok_not_ok_test_suite(lines: List[str],
+def parse_ok_not_ok_test_suite(lines: LineStream,
test_suite: TestSuite,
expected_suite_index: int) -> bool:
consume_non_diagnostic(lines)
if not lines:
test_suite.status = TestStatus.TEST_CRASHED
return False
- line = lines[0]
+ line = lines.peek()
match = OK_NOT_OK_MODULE.match(line)
if match:
- lines.pop(0)
+ lines.pop()
if match.group(1) == 'ok':
test_suite.status = TestStatus.SUCCESS
else:
test_suite.status = TestStatus.FAILURE
+ skip_match = OK_NOT_OK_SKIP.match(line)
+ if skip_match:
+ test_suite.status = TestStatus.SKIPPED
suite_index = int(match.group(2))
if suite_index != expected_suite_index:
print_with_timestamp(
@@ -224,14 +275,14 @@ def parse_ok_not_ok_test_suite(lines: List[str],
else:
return False
-def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
- return reduce(max_status, statuses, TestStatus.SUCCESS)
+def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
+ return reduce(max_status, status_list, TestStatus.SKIPPED)
def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
return max_status(max_test_case_status, test_suite.status)
-def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[TestSuite]:
+def parse_test_suite(lines: LineStream, expected_suite_index: int) -> Optional[TestSuite]:
if not lines:
return None
consume_non_diagnostic(lines)
@@ -257,26 +308,26 @@ def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[Te
print_with_timestamp(red('[ERROR] ') + 'ran out of lines before end token')
return test_suite
else:
- print('failed to parse end of suite' + lines[0])
+ print(f'failed to parse end of suite "{name}", at line {lines.line_number()}: {lines.peek()}')
return None
TAP_HEADER = re.compile(r'^TAP version 14$')
-def parse_tap_header(lines: List[str]) -> bool:
+def parse_tap_header(lines: LineStream) -> bool:
consume_non_diagnostic(lines)
- if TAP_HEADER.match(lines[0]):
- lines.pop(0)
+ if TAP_HEADER.match(lines.peek()):
+ lines.pop()
return True
else:
return False
TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
-def parse_test_plan(lines: List[str]) -> Optional[int]:
+def parse_test_plan(lines: LineStream) -> Optional[int]:
consume_non_diagnostic(lines)
- match = TEST_PLAN.match(lines[0])
+ match = TEST_PLAN.match(lines.peek())
if match:
- lines.pop(0)
+ lines.pop()
return int(match.group(1))
else:
return None
@@ -284,12 +335,14 @@ def parse_test_plan(lines: List[str]) -> Optional[int]:
def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
return bubble_up_errors(x.status for x in test_suites)
-def parse_test_result(lines: List[str]) -> TestResult:
+def parse_test_result(lines: LineStream) -> TestResult:
consume_non_diagnostic(lines)
if not lines or not parse_tap_header(lines):
- return TestResult(TestStatus.NO_TESTS, [], lines)
+ return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
expected_test_suite_num = parse_test_plan(lines)
- if not expected_test_suite_num:
+ if expected_test_suite_num == 0:
+ return TestResult(TestStatus.NO_TESTS, [], lines)
+ elif expected_test_suite_num is None:
return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
test_suites = []
for i in range(1, expected_test_suite_num + 1):
@@ -311,49 +364,69 @@ def parse_test_result(lines: List[str]) -> TestResult:
else:
return TestResult(TestStatus.NO_TESTS, [], lines)
-def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
- total_tests = 0
- failed_tests = 0
- crashed_tests = 0
+class TestCounts:
+ passed: int
+ failed: int
+ crashed: int
+ skipped: int
+
+ def __init__(self):
+ self.passed = 0
+ self.failed = 0
+ self.crashed = 0
+ self.skipped = 0
+
+ def total(self) -> int:
+ return self.passed + self.failed + self.crashed + self.skipped
+
+def print_and_count_results(test_result: TestResult) -> TestCounts:
+ counts = TestCounts()
for test_suite in test_result.suites:
if test_suite.status == TestStatus.SUCCESS:
print_suite_divider(green('[PASSED] ') + test_suite.name)
+ elif test_suite.status == TestStatus.SKIPPED:
+ print_suite_divider(yellow('[SKIPPED] ') + test_suite.name)
elif test_suite.status == TestStatus.TEST_CRASHED:
print_suite_divider(red('[CRASHED] ' + test_suite.name))
else:
print_suite_divider(red('[FAILED] ') + test_suite.name)
for test_case in test_suite.cases:
- total_tests += 1
if test_case.status == TestStatus.SUCCESS:
+ counts.passed += 1
print_with_timestamp(green('[PASSED] ') + test_case.name)
+ elif test_case.status == TestStatus.SKIPPED:
+ counts.skipped += 1
+ print_with_timestamp(yellow('[SKIPPED] ') + test_case.name)
elif test_case.status == TestStatus.TEST_CRASHED:
- crashed_tests += 1
+ counts.crashed += 1
print_with_timestamp(red('[CRASHED] ' + test_case.name))
print_log(map(yellow, test_case.log))
print_with_timestamp('')
else:
- failed_tests += 1
+ counts.failed += 1
print_with_timestamp(red('[FAILED] ') + test_case.name)
print_log(map(yellow, test_case.log))
print_with_timestamp('')
- return total_tests, failed_tests, crashed_tests
+ return counts
-def parse_run_tests(kernel_output) -> TestResult:
- total_tests = 0
- failed_tests = 0
- crashed_tests = 0
- test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
+def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
+ counts = TestCounts()
+ lines = extract_tap_lines(kernel_output)
+ test_result = parse_test_result(lines)
if test_result.status == TestStatus.NO_TESTS:
print(red('[ERROR] ') + yellow('no tests run!'))
elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
print(red('[ERROR] ') + yellow('could not parse test results!'))
else:
- (total_tests,
- failed_tests,
- crashed_tests) = print_and_count_results(test_result)
+ counts = print_and_count_results(test_result)
print_with_timestamp(DIVIDER)
- fmt = green if test_result.status == TestStatus.SUCCESS else red
+ if test_result.status == TestStatus.SUCCESS:
+ fmt = green
+ elif test_result.status == TestStatus.SKIPPED:
+ fmt = yellow
+ else:
+ fmt =red
print_with_timestamp(
- fmt('Testing complete. %d tests run. %d failed. %d crashed.' %
- (total_tests, failed_tests, crashed_tests)))
+ fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
+ (counts.total(), counts.failed, counts.crashed, counts.skipped)))
return test_result
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 2e809dd956a7..75045aa0f8a1 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -11,6 +11,7 @@ from unittest import mock
import tempfile, shutil # Handling test_tmpdir
+import itertools
import json
import signal
import os
@@ -92,17 +93,18 @@ class KconfigTest(unittest.TestCase):
class KUnitParserTest(unittest.TestCase):
- def assertContains(self, needle, haystack):
- for line in haystack:
+ def assertContains(self, needle: str, haystack: kunit_parser.LineStream):
+ # Clone the iterator so we can print the contents on failure.
+ copy, backup = itertools.tee(haystack)
+ for line in copy:
if needle in line:
return
- raise AssertionError('"' +
- str(needle) + '" not found in "' + str(haystack) + '"!')
+ raise AssertionError(f'"{needle}" not found in {list(backup)}!')
def test_output_isolated_correctly(self):
log_path = test_data_path('test_output_isolated_correctly.log')
with open(log_path) as file:
- result = kunit_parser.isolate_kunit_output(file.readlines())
+ result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
self.assertContains(' # Subtest: example', result)
self.assertContains(' 1..2', result)
@@ -113,7 +115,7 @@ class KUnitParserTest(unittest.TestCase):
def test_output_with_prefix_isolated_correctly(self):
log_path = test_data_path('test_pound_sign.log')
with open(log_path) as file:
- result = kunit_parser.isolate_kunit_output(file.readlines())
+ result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
self.assertContains(' # Subtest: kunit-resource-test', result)
self.assertContains(' 1..5', result)
@@ -155,11 +157,21 @@ class KUnitParserTest(unittest.TestCase):
kunit_parser.TestStatus.FAILURE,
result.status)
+ def test_no_header(self):
+ empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log')
+ with open(empty_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(file.readlines()))
+ self.assertEqual(0, len(result.suites))
+ self.assertEqual(
+ kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS,
+ result.status)
+
def test_no_tests(self):
- empty_log = test_data_path('test_is_test_passed-no_tests_run.log')
+ empty_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log')
with open(empty_log) as file:
result = kunit_parser.parse_run_tests(
- kunit_parser.isolate_kunit_output(file.readlines()))
+ kunit_parser.extract_tap_lines(file.readlines()))
self.assertEqual(0, len(result.suites))
self.assertEqual(
kunit_parser.TestStatus.NO_TESTS,
@@ -170,8 +182,8 @@ class KUnitParserTest(unittest.TestCase):
print_mock = mock.patch('builtins.print').start()
with open(crash_log) as file:
result = kunit_parser.parse_run_tests(
- kunit_parser.isolate_kunit_output(file.readlines()))
- print_mock.assert_any_call(StrContains('no tests run!'))
+ kunit_parser.extract_tap_lines(file.readlines()))
+ print_mock.assert_any_call(StrContains('could not parse test results!'))
print_mock.stop()
file.close()
@@ -183,6 +195,28 @@ class KUnitParserTest(unittest.TestCase):
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
+ def test_skipped_test(self):
+ skipped_log = test_data_path('test_skip_tests.log')
+ file = open(skipped_log)
+ result = kunit_parser.parse_run_tests(file.readlines())
+
+ # A skipped test does not fail the whole suite.
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+ file.close()
+
+ def test_skipped_all_tests(self):
+ skipped_log = test_data_path('test_skip_all_tests.log')
+ file = open(skipped_log)
+ result = kunit_parser.parse_run_tests(file.readlines())
+
+ self.assertEqual(
+ kunit_parser.TestStatus.SKIPPED,
+ result.status)
+ file.close()
+
+
def test_ignores_prefix_printk_time(self):
prefix_log = test_data_path('test_config_printk_time.log')
with open(prefix_log) as file:
@@ -285,7 +319,7 @@ class KUnitJsonTest(unittest.TestCase):
result["sub_groups"][1]["test_cases"][0])
def test_no_tests_json(self):
- result = self._json_for('test_is_test_passed-no_tests_run.log')
+ result = self._json_for('test_is_test_passed-no_tests_run_with_header.log')
self.assertEqual(0, len(result['sub_groups']))
class StrContains(str):
@@ -303,7 +337,7 @@ class KUnitMainTest(unittest.TestCase):
self.linux_source_mock = mock.Mock()
self.linux_source_mock.build_reconfig = mock.Mock(return_value=True)
- self.linux_source_mock.build_um_kernel = mock.Mock(return_value=True)
+ self.linux_source_mock.build_kernel = mock.Mock(return_value=True)
self.linux_source_mock.run_kernel = mock.Mock(return_value=all_passed_log)
def test_config_passes_args_pass(self):
@@ -314,7 +348,7 @@ class KUnitMainTest(unittest.TestCase):
def test_build_passes_args_pass(self):
kunit.main(['build'], self.linux_source_mock)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0)
- self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, '.kunit', None)
+ self.linux_source_mock.build_kernel.assert_called_once_with(False, 8, '.kunit', None)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0)
def test_exec_passes_args_pass(self):
@@ -396,7 +430,7 @@ class KUnitMainTest(unittest.TestCase):
def test_build_builddir(self):
build_dir = '.kunit'
kunit.main(['build', '--build_dir', build_dir], self.linux_source_mock)
- self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, build_dir, None)
+ self.linux_source_mock.build_kernel.assert_called_once_with(False, 8, build_dir, None)
def test_exec_builddir(self):
build_dir = '.kunit'
@@ -410,14 +444,22 @@ class KUnitMainTest(unittest.TestCase):
mock_linux_init.return_value = self.linux_source_mock
kunit.main(['run', '--kunitconfig=mykunitconfig'])
# Just verify that we parsed and initialized it correctly here.
- mock_linux_init.assert_called_once_with('.kunit', kunitconfig_path='mykunitconfig')
+ mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_path='mykunitconfig',
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None)
@mock.patch.object(kunit_kernel, 'LinuxSourceTree')
def test_config_kunitconfig(self, mock_linux_init):
mock_linux_init.return_value = self.linux_source_mock
kunit.main(['config', '--kunitconfig=mykunitconfig'])
# Just verify that we parsed and initialized it correctly here.
- mock_linux_init.assert_called_once_with('.kunit', kunitconfig_path='mykunitconfig')
+ mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_path='mykunitconfig',
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None)
if __name__ == '__main__':
unittest.main()
diff --git a/tools/testing/kunit/qemu_config.py b/tools/testing/kunit/qemu_config.py
new file mode 100644
index 000000000000..1672f6184e95
--- /dev/null
+++ b/tools/testing/kunit/qemu_config.py
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Collection of configs for building non-UML kernels and running them on QEMU.
+#
+# Copyright (C) 2021, Google LLC.
+# Author: Brendan Higgins <brendanhiggins@google.com>
+
+from collections import namedtuple
+
+
+QemuArchParams = namedtuple('QemuArchParams', ['linux_arch',
+ 'kconfig',
+ 'qemu_arch',
+ 'kernel_path',
+ 'kernel_command_line',
+ 'extra_qemu_params'])
diff --git a/tools/testing/kunit/qemu_configs/alpha.py b/tools/testing/kunit/qemu_configs/alpha.py
new file mode 100644
index 000000000000..5d0c0cff03bd
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/alpha.py
@@ -0,0 +1,10 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='alpha',
+ kconfig='''
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y''',
+ qemu_arch='alpha',
+ kernel_path='arch/alpha/boot/vmlinux',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=[''])
diff --git a/tools/testing/kunit/qemu_configs/arm.py b/tools/testing/kunit/qemu_configs/arm.py
new file mode 100644
index 000000000000..b9c2a35e0296
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/arm.py
@@ -0,0 +1,13 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='arm',
+ kconfig='''
+CONFIG_ARCH_VIRT=y
+CONFIG_SERIAL_AMBA_PL010=y
+CONFIG_SERIAL_AMBA_PL010_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''',
+ qemu_arch='arm',
+ kernel_path='arch/arm/boot/zImage',
+ kernel_command_line='console=ttyAMA0',
+ extra_qemu_params=['-machine virt'])
diff --git a/tools/testing/kunit/qemu_configs/arm64.py b/tools/testing/kunit/qemu_configs/arm64.py
new file mode 100644
index 000000000000..517c04459f47
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/arm64.py
@@ -0,0 +1,12 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='arm64',
+ kconfig='''
+CONFIG_SERIAL_AMBA_PL010=y
+CONFIG_SERIAL_AMBA_PL010_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''',
+ qemu_arch='aarch64',
+ kernel_path='arch/arm64/boot/Image.gz',
+ kernel_command_line='console=ttyAMA0',
+ extra_qemu_params=['-machine virt', '-cpu cortex-a57'])
diff --git a/tools/testing/kunit/qemu_configs/i386.py b/tools/testing/kunit/qemu_configs/i386.py
new file mode 100644
index 000000000000..aed3ffd3937d
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/i386.py
@@ -0,0 +1,10 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='i386',
+ kconfig='''
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y''',
+ qemu_arch='x86_64',
+ kernel_path='arch/x86/boot/bzImage',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=[''])
diff --git a/tools/testing/kunit/qemu_configs/powerpc.py b/tools/testing/kunit/qemu_configs/powerpc.py
new file mode 100644
index 000000000000..35e9de24f0db
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/powerpc.py
@@ -0,0 +1,12 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='powerpc',
+ kconfig='''
+CONFIG_PPC64=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HVC_CONSOLE=y''',
+ qemu_arch='ppc64',
+ kernel_path='vmlinux',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-M pseries', '-cpu power8'])
diff --git a/tools/testing/kunit/qemu_configs/riscv.py b/tools/testing/kunit/qemu_configs/riscv.py
new file mode 100644
index 000000000000..9e528087cd7c
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/riscv.py
@@ -0,0 +1,31 @@
+from ..qemu_config import QemuArchParams
+import os
+import os.path
+import sys
+
+GITHUB_OPENSBI_URL = 'https://github.com/qemu/qemu/raw/master/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin'
+OPENSBI_FILE = os.path.basename(GITHUB_OPENSBI_URL)
+
+if not os.path.isfile(OPENSBI_FILE):
+ print('\n\nOpenSBI file is not in the current working directory.\n'
+ 'Would you like me to download it for you from:\n' + GITHUB_OPENSBI_URL + ' ?\n')
+ response = input('yes/[no]: ')
+ if response.strip() == 'yes':
+ os.system('wget ' + GITHUB_OPENSBI_URL)
+ else:
+ sys.exit()
+
+QEMU_ARCH = QemuArchParams(linux_arch='riscv',
+ kconfig='''
+CONFIG_SOC_VIRT=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y''',
+ qemu_arch='riscv64',
+ kernel_path='arch/riscv/boot/Image',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=[
+ '-machine virt',
+ '-cpu rv64',
+ '-bios opensbi-riscv64-generic-fw_dynamic.bin'])
diff --git a/tools/testing/kunit/qemu_configs/s390.py b/tools/testing/kunit/qemu_configs/s390.py
new file mode 100644
index 000000000000..e310bd521113
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/s390.py
@@ -0,0 +1,14 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='s390',
+ kconfig='''
+CONFIG_EXPERT=y
+CONFIG_TUNE_ZEC12=y
+CONFIG_NUMA=y
+CONFIG_MODULES=y''',
+ qemu_arch='s390x',
+ kernel_path='arch/s390/boot/bzImage',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=[
+ '-machine s390-ccw-virtio',
+ '-cpu qemu',])
diff --git a/tools/testing/kunit/qemu_configs/sparc.py b/tools/testing/kunit/qemu_configs/sparc.py
new file mode 100644
index 000000000000..27f474e7ad6e
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/sparc.py
@@ -0,0 +1,10 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='sparc',
+ kconfig='''
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y''',
+ qemu_arch='sparc',
+ kernel_path='arch/sparc/boot/zImage',
+ kernel_command_line='console=ttyS0 mem=256M',
+ extra_qemu_params=['-m 256'])
diff --git a/tools/testing/kunit/qemu_configs/x86_64.py b/tools/testing/kunit/qemu_configs/x86_64.py
new file mode 100644
index 000000000000..77ab1aeee8a3
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/x86_64.py
@@ -0,0 +1,10 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='x86_64',
+ kconfig='''
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y''',
+ qemu_arch='x86_64',
+ kernel_path='arch/x86/boot/bzImage',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=[''])
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log
index ba69f5c94b75..ba69f5c94b75 100644
--- a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log
+++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log
new file mode 100644
index 000000000000..5f48ee659d40
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log
@@ -0,0 +1,2 @@
+TAP version 14
+1..0
diff --git a/tools/testing/kunit/test_data/test_skip_all_tests.log b/tools/testing/kunit/test_data/test_skip_all_tests.log
new file mode 100644
index 000000000000..2ea6e6d14fff
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_skip_all_tests.log
@@ -0,0 +1,15 @@
+TAP version 14
+1..2
+ # Subtest: string-stream-test
+ 1..3
+ ok 1 - string_stream_test_empty_on_creation # SKIP all tests skipped
+ ok 2 - string_stream_test_not_empty_after_add # SKIP all tests skipped
+ ok 3 - string_stream_test_get_string # SKIP all tests skipped
+ok 1 - string-stream-test # SKIP
+ # Subtest: example
+ 1..2
+ # example_simple_test: initializing
+ ok 1 - example_simple_test # SKIP all tests skipped
+ # example_skip_test: initializing
+ ok 2 - example_skip_test # SKIP this test should be skipped
+ok 2 - example # SKIP
diff --git a/tools/testing/kunit/test_data/test_skip_tests.log b/tools/testing/kunit/test_data/test_skip_tests.log
new file mode 100644
index 000000000000..79b326e31274
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_skip_tests.log
@@ -0,0 +1,15 @@
+TAP version 14
+1..2
+ # Subtest: string-stream-test
+ 1..3
+ ok 1 - string_stream_test_empty_on_creation
+ ok 2 - string_stream_test_not_empty_after_add
+ ok 3 - string_stream_test_get_string
+ok 1 - string-stream-test
+ # Subtest: example
+ 1..2
+ # example_simple_test: initializing
+ ok 1 - example_simple_test
+ # example_skip_test: initializing
+ ok 2 - example_skip_test # SKIP this test should be skipped
+ok 2 - example
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 54f367cbadae..b1bff5fb0f65 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -434,7 +434,7 @@ static int nd_intel_test_finish_query(struct nfit_test *t,
dev_dbg(dev, "%s: transition out verify\n", __func__);
fw->state = FW_STATE_UPDATED;
fw->missed_activate = false;
- /* fall through */
+ fallthrough;
case FW_STATE_UPDATED:
nd_cmd->status = 0;
/* bogus test version */
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index ee27d68d2a1c..b5940e6ca67c 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -715,6 +715,8 @@ out:
bpf_object__close(obj);
}
+#include "tailcall_bpf2bpf4.skel.h"
+
/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
* across tailcalls combined with bpf2bpf calls. for making sure that tailcall
* counter behaves correctly, bpf program will go through following flow:
@@ -727,10 +729,15 @@ out:
* the loop begins. At the end of the test make sure that the global counter is
* equal to 31, because tailcall counter includes the first two tailcalls
* whereas global counter is incremented only on loop presented on flow above.
+ *
+ * The noise parameter is used to insert bpf_map_update calls into the logic
+ * to force verifier to patch instructions. This allows us to ensure jump
+ * logic remains correct with instruction movement.
*/
-static void test_tailcall_bpf2bpf_4(void)
+static void test_tailcall_bpf2bpf_4(bool noise)
{
- int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+ int err, map_fd, prog_fd, main_fd, data_fd, i;
+ struct tailcall_bpf2bpf4__bss val;
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
@@ -774,11 +781,6 @@ static void test_tailcall_bpf2bpf_4(void)
goto out;
}
- err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
-
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
return;
@@ -788,9 +790,21 @@ static void test_tailcall_bpf2bpf_4(void)
return;
i = 0;
+ val.noise = noise;
+ val.count = 0;
+ err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
- CHECK(err || val != 31, "tailcall count", "err %d errno %d count %d\n",
- err, errno, val);
+ CHECK(err || val.count != 31, "tailcall count", "err %d errno %d count %d\n",
+ err, errno, val.count);
out:
bpf_object__close(obj);
@@ -815,5 +829,7 @@ void test_tailcalls(void)
if (test__start_subtest("tailcall_bpf2bpf_3"))
test_tailcall_bpf2bpf_3();
if (test__start_subtest("tailcall_bpf2bpf_4"))
- test_tailcall_bpf2bpf_4();
+ test_tailcall_bpf2bpf_4(false);
+ if (test__start_subtest("tailcall_bpf2bpf_5"))
+ test_tailcall_bpf2bpf_4(true);
}
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
index 77df6d4db895..e89368a50b97 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
@@ -3,6 +3,13 @@
#include <bpf/bpf_helpers.h>
struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} nop_table SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 3);
__uint(key_size, sizeof(__u32));
@@ -10,10 +17,21 @@ struct {
} jmp_table SEC(".maps");
int count = 0;
+int noise = 0;
+
+__always_inline int subprog_noise(void)
+{
+ __u32 key = 0;
+
+ bpf_map_lookup_elem(&nop_table, &key);
+ return 0;
+}
__noinline
int subprog_tail_2(struct __sk_buff *skb)
{
+ if (noise)
+ subprog_noise();
bpf_tail_call_static(skb, &jmp_table, 2);
return skb->len * 3;
}
diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c
index 2c8935b3e65d..ee454327e5c6 100644
--- a/tools/testing/selftests/bpf/verifier/dead_code.c
+++ b/tools/testing/selftests/bpf/verifier/dead_code.c
@@ -159,3 +159,15 @@
.result = ACCEPT,
.retval = 2,
},
+{
+ "dead code: zero extension",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
index a3e593ddfafc..2debba4e8a3a 100644
--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -1,4 +1,233 @@
{
+ "map access: known scalar += value_ptr unknown vs const",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr const vs unknown",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr const vs const (ne)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr const vs const (eq)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr unknown vs unknown (eq)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr unknown vs unknown (lt)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr unknown vs unknown (gt)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
"map access: known scalar += value_ptr from different maps",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
diff --git a/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc b/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
index e6eb78f0b954..9933ed24f901 100644
--- a/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
@@ -57,6 +57,10 @@ enable_events() {
echo 1 > tracing_on
}
+other_task() {
+ sleep .001 || usleep 1 || sleep 1
+}
+
echo 0 > options/event-fork
do_reset
@@ -94,6 +98,9 @@ child=$!
echo "child = $child"
wait $child
+# Be sure some other events will happen for small systems (e.g. 1 core)
+other_task
+
echo 0 > tracing_on
cnt=`count_pid $mypid`
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
index 2950bfbc6fce..adae72665500 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
@@ -39,6 +39,24 @@ grep "parent_comm: $COMM" events/sched/sched_process_fork/hist > /dev/null || \
reset_trigger
+echo "Test histogram with sym modifier"
+
+echo 'hist:keys=call_site.sym' > events/kmem/kmalloc/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+grep '{ call_site: \[[0-9a-f][0-9a-f]*\] [_a-zA-Z][_a-zA-Z]* *}' events/kmem/kmalloc/hist > /dev/null || \
+ fail "sym modifier on kmalloc call_site did not work"
+
+reset_trigger
+
+echo "Test histogram with sym-offset modifier"
+
+echo 'hist:keys=call_site.sym-offset' > events/kmem/kmalloc/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+grep '{ call_site: \[[0-9a-f][0-9a-f]*\] [_a-zA-Z][_a-zA-Z]*+0x[0-9a-f][0-9a-f]*' events/kmem/kmalloc/hist > /dev/null || \
+ fail "sym-offset modifier on kmalloc call_site did not work"
+
+reset_trigger
+
echo "Test histogram with sort key"
echo 'hist:keys=parent_pid,child_pid:sort=child_pid.ascending' > events/sched/sched_process_fork/trigger
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 06a351b4f93b..0709af0144c8 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -38,6 +38,7 @@
/x86_64/xen_vmcall_test
/x86_64/xss_msr_test
/x86_64/vmx_pmu_msrs_test
+/access_tracking_perf_test
/demand_paging_test
/dirty_log_test
/dirty_log_perf_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index b853be2ae3c6..5832f510a16c 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
+TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index a16c8f05366c..cc898181faab 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -1019,7 +1019,8 @@ static __u64 sve_rejects_set[] = {
#define VREGS_SUBLIST \
{ "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
#define PMU_SUBLIST \
- { "pmu", .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
+ { "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
+ .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
#define SVE_SUBLIST \
{ "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
.regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
new file mode 100644
index 000000000000..e2baa187a21e
--- /dev/null
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * access_tracking_perf_test
+ *
+ * Copyright (C) 2021, Google, Inc.
+ *
+ * This test measures the performance effects of KVM's access tracking.
+ * Access tracking is driven by the MMU notifiers test_young, clear_young, and
+ * clear_flush_young. These notifiers do not have a direct userspace API,
+ * however the clear_young notifier can be triggered by marking a pages as idle
+ * in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to
+ * enable access tracking on guest memory.
+ *
+ * To measure performance this test runs a VM with a configurable number of
+ * vCPUs that each touch every page in disjoint regions of memory. Performance
+ * is measured in the time it takes all vCPUs to finish touching their
+ * predefined region.
+ *
+ * Note that a deterministic correctness test of access tracking is not possible
+ * by using page_idle as it exists today. This is for a few reasons:
+ *
+ * 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This
+ * means subsequent guest accesses are not guaranteed to see page table
+ * updates made by KVM until some time in the future.
+ *
+ * 2. page_idle only operates on LRU pages. Newly allocated pages are not
+ * immediately allocated to LRU lists. Instead they are held in a "pagevec",
+ * which is drained to LRU lists some time in the future. There is no
+ * userspace API to force this drain to occur.
+ *
+ * These limitations are worked around in this test by using a large enough
+ * region of memory for each vCPU such that the number of translations cached in
+ * the TLB and the number of pages held in pagevecs are a small fraction of the
+ * overall workload. And if either of those conditions are not true this test
+ * will fail rather than silently passing.
+ */
+#include <inttypes.h>
+#include <limits.h>
+#include <pthread.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "kvm_util.h"
+#include "test_util.h"
+#include "perf_test_util.h"
+#include "guest_modes.h"
+
+/* Global variable used to synchronize all of the vCPU threads. */
+static int iteration = -1;
+
+/* Defines what vCPU threads should do during a given iteration. */
+static enum {
+ /* Run the vCPU to access all its memory. */
+ ITERATION_ACCESS_MEMORY,
+ /* Mark the vCPU's memory idle in page_idle. */
+ ITERATION_MARK_IDLE,
+} iteration_work;
+
+/* Set to true when vCPU threads should exit. */
+static bool done;
+
+/* The iteration that was last completed by each vCPU. */
+static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
+
+/* Whether to overlap the regions of memory vCPUs access. */
+static bool overlap_memory_access;
+
+struct test_params {
+ /* The backing source for the region of memory. */
+ enum vm_mem_backing_src_type backing_src;
+
+ /* The amount of memory to allocate for each vCPU. */
+ uint64_t vcpu_memory_bytes;
+
+ /* The number of vCPUs to create in the VM. */
+ int vcpus;
+};
+
+static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
+{
+ uint64_t value;
+ off_t offset = index * sizeof(value);
+
+ TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value),
+ "pread from %s offset 0x%" PRIx64 " failed!",
+ filename, offset);
+
+ return value;
+
+}
+
+#define PAGEMAP_PRESENT (1ULL << 63)
+#define PAGEMAP_PFN_MASK ((1ULL << 55) - 1)
+
+static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
+{
+ uint64_t hva = (uint64_t) addr_gva2hva(vm, gva);
+ uint64_t entry;
+ uint64_t pfn;
+
+ entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize());
+ if (!(entry & PAGEMAP_PRESENT))
+ return 0;
+
+ pfn = entry & PAGEMAP_PFN_MASK;
+ if (!pfn) {
+ print_skip("Looking up PFNs requires CAP_SYS_ADMIN");
+ exit(KSFT_SKIP);
+ }
+
+ return pfn;
+}
+
+static bool is_page_idle(int page_idle_fd, uint64_t pfn)
+{
+ uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64);
+
+ return !!((bits >> (pfn % 64)) & 1);
+}
+
+static void mark_page_idle(int page_idle_fd, uint64_t pfn)
+{
+ uint64_t bits = 1ULL << (pfn % 64);
+
+ TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8,
+ "Set page_idle bits for PFN 0x%" PRIx64, pfn);
+}
+
+static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
+{
+ uint64_t base_gva = perf_test_args.vcpu_args[vcpu_id].gva;
+ uint64_t pages = perf_test_args.vcpu_args[vcpu_id].pages;
+ uint64_t page;
+ uint64_t still_idle = 0;
+ uint64_t no_pfn = 0;
+ int page_idle_fd;
+ int pagemap_fd;
+
+ /* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */
+ if (overlap_memory_access && vcpu_id)
+ return;
+
+ page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
+ TEST_ASSERT(page_idle_fd > 0, "Failed to open page_idle.");
+
+ pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+ TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
+
+ for (page = 0; page < pages; page++) {
+ uint64_t gva = base_gva + page * perf_test_args.guest_page_size;
+ uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
+
+ if (!pfn) {
+ no_pfn++;
+ continue;
+ }
+
+ if (is_page_idle(page_idle_fd, pfn)) {
+ still_idle++;
+ continue;
+ }
+
+ mark_page_idle(page_idle_fd, pfn);
+ }
+
+ /*
+ * Assumption: Less than 1% of pages are going to be swapped out from
+ * under us during this test.
+ */
+ TEST_ASSERT(no_pfn < pages / 100,
+ "vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.",
+ vcpu_id, no_pfn, pages);
+
+ /*
+ * Test that at least 90% of memory has been marked idle (the rest might
+ * not be marked idle because the pages have not yet made it to an LRU
+ * list or the translations are still cached in the TLB). 90% is
+ * arbitrary; high enough that we ensure most memory access went through
+ * access tracking but low enough as to not make the test too brittle
+ * over time and across architectures.
+ */
+ TEST_ASSERT(still_idle < pages / 10,
+ "vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
+ PRIu64 ").\n",
+ vcpu_id, still_idle, pages);
+
+ close(page_idle_fd);
+ close(pagemap_fd);
+}
+
+static void assert_ucall(struct kvm_vm *vm, uint32_t vcpu_id,
+ uint64_t expected_ucall)
+{
+ struct ucall uc;
+ uint64_t actual_ucall = get_ucall(vm, vcpu_id, &uc);
+
+ TEST_ASSERT(expected_ucall == actual_ucall,
+ "Guest exited unexpectedly (expected ucall %" PRIu64
+ ", got %" PRIu64 ")",
+ expected_ucall, actual_ucall);
+}
+
+static bool spin_wait_for_next_iteration(int *current_iteration)
+{
+ int last_iteration = *current_iteration;
+
+ do {
+ if (READ_ONCE(done))
+ return false;
+
+ *current_iteration = READ_ONCE(iteration);
+ } while (last_iteration == *current_iteration);
+
+ return true;
+}
+
+static void *vcpu_thread_main(void *arg)
+{
+ struct perf_test_vcpu_args *vcpu_args = arg;
+ struct kvm_vm *vm = perf_test_args.vm;
+ int vcpu_id = vcpu_args->vcpu_id;
+ int current_iteration = -1;
+
+ vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+
+ while (spin_wait_for_next_iteration(&current_iteration)) {
+ switch (READ_ONCE(iteration_work)) {
+ case ITERATION_ACCESS_MEMORY:
+ vcpu_run(vm, vcpu_id);
+ assert_ucall(vm, vcpu_id, UCALL_SYNC);
+ break;
+ case ITERATION_MARK_IDLE:
+ mark_vcpu_memory_idle(vm, vcpu_id);
+ break;
+ };
+
+ vcpu_last_completed_iteration[vcpu_id] = current_iteration;
+ }
+
+ return NULL;
+}
+
+static void spin_wait_for_vcpu(int vcpu_id, int target_iteration)
+{
+ while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
+ target_iteration) {
+ continue;
+ }
+}
+
+/* The type of memory accesses to perform in the VM. */
+enum access_type {
+ ACCESS_READ,
+ ACCESS_WRITE,
+};
+
+static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
+{
+ struct timespec ts_start;
+ struct timespec ts_elapsed;
+ int next_iteration;
+ int vcpu_id;
+
+ /* Kick off the vCPUs by incrementing iteration. */
+ next_iteration = ++iteration;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts_start);
+
+ /* Wait for all vCPUs to finish the iteration. */
+ for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
+ spin_wait_for_vcpu(vcpu_id, next_iteration);
+
+ ts_elapsed = timespec_elapsed(ts_start);
+ pr_info("%-30s: %ld.%09lds\n",
+ description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec);
+}
+
+static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access,
+ const char *description)
+{
+ perf_test_args.wr_fract = (access == ACCESS_READ) ? INT_MAX : 1;
+ sync_global_to_guest(vm, perf_test_args);
+ iteration_work = ITERATION_ACCESS_MEMORY;
+ run_iteration(vm, vcpus, description);
+}
+
+static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
+{
+ /*
+ * Even though this parallelizes the work across vCPUs, this is still a
+ * very slow operation because page_idle forces the test to mark one pfn
+ * at a time and the clear_young notifier serializes on the KVM MMU
+ * lock.
+ */
+ pr_debug("Marking VM memory idle (slow)...\n");
+ iteration_work = ITERATION_MARK_IDLE;
+ run_iteration(vm, vcpus, "Mark memory idle");
+}
+
+static pthread_t *create_vcpu_threads(int vcpus)
+{
+ pthread_t *vcpu_threads;
+ int i;
+
+ vcpu_threads = malloc(vcpus * sizeof(vcpu_threads[0]));
+ TEST_ASSERT(vcpu_threads, "Failed to allocate vcpu_threads.");
+
+ for (i = 0; i < vcpus; i++) {
+ vcpu_last_completed_iteration[i] = iteration;
+ pthread_create(&vcpu_threads[i], NULL, vcpu_thread_main,
+ &perf_test_args.vcpu_args[i]);
+ }
+
+ return vcpu_threads;
+}
+
+static void terminate_vcpu_threads(pthread_t *vcpu_threads, int vcpus)
+{
+ int i;
+
+ /* Set done to signal the vCPU threads to exit */
+ done = true;
+
+ for (i = 0; i < vcpus; i++)
+ pthread_join(vcpu_threads[i], NULL);
+}
+
+static void run_test(enum vm_guest_mode mode, void *arg)
+{
+ struct test_params *params = arg;
+ struct kvm_vm *vm;
+ pthread_t *vcpu_threads;
+ int vcpus = params->vcpus;
+
+ vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes,
+ params->backing_src);
+
+ perf_test_setup_vcpus(vm, vcpus, params->vcpu_memory_bytes,
+ !overlap_memory_access);
+
+ vcpu_threads = create_vcpu_threads(vcpus);
+
+ pr_info("\n");
+ access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory");
+
+ /* As a control, read and write to the populated memory first. */
+ access_memory(vm, vcpus, ACCESS_WRITE, "Writing to populated memory");
+ access_memory(vm, vcpus, ACCESS_READ, "Reading from populated memory");
+
+ /* Repeat on memory that has been marked as idle. */
+ mark_memory_idle(vm, vcpus);
+ access_memory(vm, vcpus, ACCESS_WRITE, "Writing to idle memory");
+ mark_memory_idle(vm, vcpus);
+ access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory");
+
+ terminate_vcpu_threads(vcpu_threads, vcpus);
+ free(vcpu_threads);
+ perf_test_destroy_vm(vm);
+}
+
+static void help(char *name)
+{
+ puts("");
+ printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o] [-s mem_type]\n",
+ name);
+ puts("");
+ printf(" -h: Display this help message.");
+ guest_modes_help();
+ printf(" -b: specify the size of the memory region which should be\n"
+ " dirtied by each vCPU. e.g. 10M or 3G.\n"
+ " (default: 1G)\n");
+ printf(" -v: specify the number of vCPUs to run.\n");
+ printf(" -o: Overlap guest memory accesses instead of partitioning\n"
+ " them into a separate region of memory for each vCPU.\n");
+ printf(" -s: specify the type of memory that should be used to\n"
+ " back the guest data region.\n\n");
+ backing_src_help();
+ puts("");
+ exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+ struct test_params params = {
+ .backing_src = VM_MEM_SRC_ANONYMOUS,
+ .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
+ .vcpus = 1,
+ };
+ int page_idle_fd;
+ int opt;
+
+ guest_modes_append_default();
+
+ while ((opt = getopt(argc, argv, "hm:b:v:os:")) != -1) {
+ switch (opt) {
+ case 'm':
+ guest_modes_cmdline(optarg);
+ break;
+ case 'b':
+ params.vcpu_memory_bytes = parse_size(optarg);
+ break;
+ case 'v':
+ params.vcpus = atoi(optarg);
+ break;
+ case 'o':
+ overlap_memory_access = true;
+ break;
+ case 's':
+ params.backing_src = parse_backing_src_type(optarg);
+ break;
+ case 'h':
+ default:
+ help(argv[0]);
+ break;
+ }
+ }
+
+ page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
+ if (page_idle_fd < 0) {
+ print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled");
+ exit(KSFT_SKIP);
+ }
+ close(page_idle_fd);
+
+ for_each_guest_mode(run_test, &params);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 04a2641261be..80cbd3a748c0 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -312,6 +312,7 @@ int main(int argc, char *argv[])
break;
case 'o':
p.partition_vcpu_memory_access = false;
+ break;
case 's':
p.backing_src = parse_backing_src_type(optarg);
break;
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 615ab254899d..010b59b13917 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -45,6 +45,7 @@ enum vm_guest_mode {
VM_MODE_P40V48_64K,
VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
VM_MODE_P47V64_4K,
+ VM_MODE_P44V64_4K,
NUM_VM_MODES,
};
@@ -62,7 +63,7 @@ enum vm_guest_mode {
#elif defined(__s390x__)
-#define VM_MODE_DEFAULT VM_MODE_P47V64_4K
+#define VM_MODE_DEFAULT VM_MODE_P44V64_4K
#define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 16)
diff --git a/tools/testing/selftests/kvm/include/x86_64/hyperv.h b/tools/testing/selftests/kvm/include/x86_64/hyperv.h
index 412eaee7884a..b66910702c0a 100644
--- a/tools/testing/selftests/kvm/include/x86_64/hyperv.h
+++ b/tools/testing/selftests/kvm/include/x86_64/hyperv.h
@@ -117,7 +117,7 @@
#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1)
#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2)
#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3)
-#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE BIT(4)
+#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4)
#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5)
#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8)
#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10)
@@ -182,4 +182,7 @@
#define HV_STATUS_INVALID_CONNECTION_ID 18
#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+/* hypercall options */
+#define HV_HYPERCALL_FAST_BIT BIT(16)
+
#endif /* !SELFTEST_KVM_HYPERV_H */
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index 9f49f6caafe5..632b74d6b3ca 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -401,7 +401,7 @@ unexpected_exception:
void vm_init_descriptor_tables(struct kvm_vm *vm)
{
vm->handlers = vm_vaddr_alloc(vm, sizeof(struct handlers),
- vm->page_size, 0, 0);
+ vm->page_size);
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
}
diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c
index 25bff307c71f..c330f414ef96 100644
--- a/tools/testing/selftests/kvm/lib/guest_modes.c
+++ b/tools/testing/selftests/kvm/lib/guest_modes.c
@@ -22,6 +22,22 @@ void guest_modes_append_default(void)
}
}
#endif
+#ifdef __s390x__
+ {
+ int kvm_fd, vm_fd;
+ struct kvm_s390_vm_cpu_processor info;
+
+ kvm_fd = open_kvm_dev_path_or_exit();
+ vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, 0);
+ kvm_device_access(vm_fd, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_PROCESSOR, &info, false);
+ close(vm_fd);
+ close(kvm_fd);
+ /* Starting with z13 we have 47bits of physical address */
+ if (info.ibc >= 0x30)
+ guest_mode_append(VM_MODE_P47V64_4K, true, true);
+ }
+#endif
}
void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg)
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 5b56b57b3c20..10a8ed691c66 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -176,6 +176,7 @@ const char *vm_guest_mode_string(uint32_t i)
[VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
[VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
[VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
+ [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
};
_Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
"Missing new mode strings?");
@@ -194,6 +195,7 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
{ 40, 48, 0x10000, 16 },
{ 0, 0, 0x1000, 12 },
{ 47, 64, 0x1000, 12 },
+ { 44, 64, 0x1000, 12 },
};
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
@@ -282,6 +284,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
case VM_MODE_P47V64_4K:
vm->pgtable_levels = 5;
break;
+ case VM_MODE_P44V64_4K:
+ vm->pgtable_levels = 5;
+ break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
}
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index 85b18bb8f762..72a1c9b4882c 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -377,7 +377,8 @@ static void test_add_max_memory_regions(void)
(max_mem_slots - 1), MEM_REGION_SIZE >> 10);
mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index b0031f2d38fd..ecec30865a74 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -320,7 +320,7 @@ int main(int ac, char **av)
run_delay = get_run_delay();
pthread_create(&thread, &attr, do_steal_time, NULL);
do
- pthread_yield();
+ sched_yield();
while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
pthread_join(thread, NULL);
run_delay = get_run_delay() - run_delay;
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
index bab10ae787b6..e0b2bb1339b1 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
@@ -215,7 +215,7 @@ int main(void)
vcpu_set_hv_cpuid(vm, VCPU_ID);
tsc_page_gva = vm_vaddr_alloc_page(vm);
- memset(addr_gpa2hva(vm, tsc_page_gva), 0x0, getpagesize());
+ memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
"TSC page has to be page aligned\n");
vcpu_args_set(vm, VCPU_ID, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
index 42bd658f52a8..91d88aaa9899 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
@@ -47,6 +47,7 @@ static void do_wrmsr(u32 idx, u64 val)
}
static int nr_gp;
+static int nr_ud;
static inline u64 hypercall(u64 control, vm_vaddr_t input_address,
vm_vaddr_t output_address)
@@ -80,6 +81,12 @@ static void guest_gp_handler(struct ex_regs *regs)
regs->rip = (uint64_t)&wrmsr_end;
}
+static void guest_ud_handler(struct ex_regs *regs)
+{
+ nr_ud++;
+ regs->rip += 3;
+}
+
struct msr_data {
uint32_t idx;
bool available;
@@ -90,6 +97,7 @@ struct msr_data {
struct hcall_data {
uint64_t control;
uint64_t expect;
+ bool ud_expected;
};
static void guest_msr(struct msr_data *msr)
@@ -117,13 +125,26 @@ static void guest_msr(struct msr_data *msr)
static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
{
int i = 0;
+ u64 res, input, output;
wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID);
wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
while (hcall->control) {
- GUEST_ASSERT(hypercall(hcall->control, pgs_gpa,
- pgs_gpa + 4096) == hcall->expect);
+ nr_ud = 0;
+ if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
+ input = pgs_gpa;
+ output = pgs_gpa + 4096;
+ } else {
+ input = output = 0;
+ }
+
+ res = hypercall(hcall->control, input, output);
+ if (hcall->ud_expected)
+ GUEST_ASSERT(nr_ud == 1);
+ else
+ GUEST_ASSERT(res == hcall->expect);
+
GUEST_SYNC(i++);
}
@@ -552,8 +573,18 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
recomm.ebx = 0xfff;
hcall->expect = HV_STATUS_SUCCESS;
break;
-
case 17:
+ /* XMM fast hypercall */
+ hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
+ hcall->ud_expected = true;
+ break;
+ case 18:
+ feat.edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
+ hcall->ud_expected = false;
+ hcall->expect = HV_STATUS_SUCCESS;
+ break;
+
+ case 19:
/* END */
hcall->control = 0;
break;
@@ -615,7 +646,7 @@ int main(void)
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
+ vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
pr_info("Testing access to Hyper-V specific MSRs\n");
guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva),
@@ -625,6 +656,10 @@ int main(void)
/* Test hypercalls */
vm = vm_create_default(VCPU_ID, 0, guest_hcall);
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
+
/* Hypercall input/output */
hcall_page = vm_vaddr_alloc_pages(vm, 2);
memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
diff --git a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
index 523371cf8e8f..da2325fcad87 100644
--- a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
+++ b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
@@ -71,7 +71,7 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
/* Set up a #PF handler to eat the RSVD #PF and signal all done! */
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_handle_exception(vm, PF_VECTOR, guest_pf_handler);
+ vm_install_exception_handler(vm, PF_VECTOR, guest_pf_handler);
r = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
index c1f831803ad2..d0fe2fdce58c 100644
--- a/tools/testing/selftests/kvm/x86_64/smm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
@@ -53,15 +53,28 @@ static inline void sync_with_host(uint64_t phase)
: "+a" (phase));
}
-void self_smi(void)
+static void self_smi(void)
{
x2apic_write_reg(APIC_ICR,
APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
}
-void guest_code(void *arg)
+static void l2_guest_code(void)
{
+ sync_with_host(8);
+
+ sync_with_host(10);
+
+ vmcall();
+}
+
+static void guest_code(void *arg)
+{
+ #define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
+ struct svm_test_data *svm = arg;
+ struct vmx_pages *vmx_pages = arg;
sync_with_host(1);
@@ -74,21 +87,50 @@ void guest_code(void *arg)
sync_with_host(4);
if (arg) {
- if (cpu_has_svm())
- generic_svm_setup(arg, NULL, NULL);
- else
- GUEST_ASSERT(prepare_for_vmx_operation(arg));
+ if (cpu_has_svm()) {
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ } else {
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ }
sync_with_host(5);
self_smi();
sync_with_host(7);
+
+ if (cpu_has_svm()) {
+ run_guest(svm->vmcb, svm->vmcb_gpa);
+ svm->vmcb->save.rip += 3;
+ run_guest(svm->vmcb, svm->vmcb_gpa);
+ } else {
+ vmlaunch();
+ vmresume();
+ }
+
+ /* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
+ sync_with_host(12);
}
sync_with_host(DONE);
}
+void inject_smi(struct kvm_vm *vm)
+{
+ struct kvm_vcpu_events events;
+
+ vcpu_events_get(vm, VCPU_ID, &events);
+
+ events.smi.pending = 1;
+ events.flags |= KVM_VCPUEVENT_VALID_SMM;
+
+ vcpu_events_set(vm, VCPU_ID, &events);
+}
+
int main(int argc, char *argv[])
{
vm_vaddr_t nested_gva = 0;
@@ -147,6 +189,22 @@ int main(int argc, char *argv[])
"Unexpected stage: #%x, got %x",
stage, stage_reported);
+ /*
+ * Enter SMM during L2 execution and check that we correctly
+ * return from it. Do not perform save/restore while in SMM yet.
+ */
+ if (stage == 8) {
+ inject_smi(vm);
+ continue;
+ }
+
+ /*
+ * Perform save/restore while the guest is in SMM triggered
+ * during L2 execution.
+ */
+ if (stage == 10)
+ inject_smi(vm);
+
state = vcpu_save_state(vm, VCPU_ID);
kvm_vm_release(vm);
kvm_vm_restart(vm, O_RDWR);
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 0af84ad48aa7..fa2ac0e56b43 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -100,6 +100,7 @@ define INSTALL_RULE
$(eval INSTALL_LIST = $(TEST_CUSTOM_PROGS)) $(INSTALL_SINGLE_RULE)
$(eval INSTALL_LIST = $(TEST_GEN_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
$(eval INSTALL_LIST = $(TEST_GEN_FILES)) $(INSTALL_SINGLE_RULE)
+ $(eval INSTALL_LIST = $(wildcard config settings)) $(INSTALL_SINGLE_RULE)
endef
install: all
diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
index d874990e442b..013446e87f1f 100644
--- a/tools/testing/selftests/lkdtm/config
+++ b/tools/testing/selftests/lkdtm/config
@@ -1 +1,8 @@
CONFIG_LKDTM=y
+CONFIG_DEBUG_LIST=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_HARDENED_USERCOPY=y
+# CONFIG_HARDENED_USERCOPY_FALLBACK is not set
+CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y
+CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
diff --git a/tools/testing/selftests/lkdtm/run.sh b/tools/testing/selftests/lkdtm/run.sh
index bb7a1775307b..e95e79bd3126 100755
--- a/tools/testing/selftests/lkdtm/run.sh
+++ b/tools/testing/selftests/lkdtm/run.sh
@@ -76,10 +76,14 @@ fi
# Save existing dmesg so we can detect new content below
dmesg > "$DMESG"
-# Most shells yell about signals and we're expecting the "cat" process
-# to usually be killed by the kernel. So we have to run it in a sub-shell
-# and silence errors.
-($SHELL -c 'cat <(echo '"$test"') >'"$TRIGGER" 2>/dev/null) || true
+# Since the kernel is likely killing the process writing to the trigger
+# file, it must not be the script's shell itself. i.e. we cannot do:
+# echo "$test" >"$TRIGGER"
+# Instead, use "cat" to take the signal. Since the shell will yell about
+# the signal that killed the subprocess, we must ignore the failure and
+# continue. However we don't silence stderr since there might be other
+# useful details reported there in the case of other unexpected conditions.
+echo "$test" | cat >"$TRIGGER" || true
# Record and dump the results
dmesg | comm --nocheck-order -13 "$DMESG" - > "$LOG" || true
diff --git a/tools/testing/selftests/lkdtm/stack-entropy.sh b/tools/testing/selftests/lkdtm/stack-entropy.sh
index b1b8a5097cbb..1b4d95d575f8 100755
--- a/tools/testing/selftests/lkdtm/stack-entropy.sh
+++ b/tools/testing/selftests/lkdtm/stack-entropy.sh
@@ -30,6 +30,7 @@ rm -f "$log"
# We would expect any functional stack randomization to be at least 5 bits.
if [ "$bits" -lt 5 ]; then
+ echo "Stack entropy is low! Booted without 'randomize_kstack_offset=y'?"
exit 1
else
exit 0
diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
index 11ef159be0fd..846cfd508d3c 100644
--- a/tools/testing/selftests/lkdtm/tests.txt
+++ b/tools/testing/selftests/lkdtm/tests.txt
@@ -11,15 +11,18 @@ CORRUPT_LIST_ADD list_add corruption
CORRUPT_LIST_DEL list_del corruption
STACK_GUARD_PAGE_LEADING
STACK_GUARD_PAGE_TRAILING
-UNSET_SMEP CR4 bits went missing
+UNSET_SMEP pinned CR4 bits changed:
DOUBLE_FAULT
CORRUPT_PAC
UNALIGNED_LOAD_STORE_WRITE
-#OVERWRITE_ALLOCATION Corrupts memory on failure
+SLAB_LINEAR_OVERFLOW
+VMALLOC_LINEAR_OVERFLOW
#WRITE_AFTER_FREE Corrupts memory on failure
-READ_AFTER_FREE
+READ_AFTER_FREE call trace:|Memory correctly poisoned
#WRITE_BUDDY_AFTER_FREE Corrupts memory on failure
-READ_BUDDY_AFTER_FREE
+READ_BUDDY_AFTER_FREE call trace:|Memory correctly poisoned
+SLAB_INIT_ON_ALLOC Memory appears initialized
+BUDDY_INIT_ON_ALLOC Memory appears initialized
SLAB_FREE_DOUBLE
SLAB_FREE_CROSS
SLAB_FREE_PAGE
diff --git a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
index b37585e6aa38..46a97f318f58 100755
--- a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
+++ b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
@@ -282,7 +282,9 @@ done
#
echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error
for memory in `hotpluggable_online_memory`; do
- offline_memory_expect_fail $memory
+ if [ $((RANDOM % 100)) -lt $ratio ]; then
+ offline_memory_expect_fail $memory
+ fi
done
echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error
diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh
index c19ecc6a8614..ecbf57f264ed 100755
--- a/tools/testing/selftests/net/icmp_redirect.sh
+++ b/tools/testing/selftests/net/icmp_redirect.sh
@@ -313,9 +313,10 @@ check_exception()
fi
log_test $? 0 "IPv4: ${desc}"
- if [ "$with_redirect" = "yes" ]; then
+ # No PMTU info for test "redirect" and "mtu exception plus redirect"
+ if [ "$with_redirect" = "yes" ] && [ "$desc" != "redirect exception plus mtu" ]; then
ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \
- grep -q "${H2_N2_IP6} from :: via ${R2_LLADDR} dev br0.*${mtu}"
+ grep -v "mtu" | grep -q "${H2_N2_IP6} .*via ${R2_LLADDR} dev br0"
elif [ -n "${mtu}" ]; then
ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \
grep -q "${mtu}"
diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c
index f23438d512c5..3d7dde2c321b 100644
--- a/tools/testing/selftests/net/ipsec.c
+++ b/tools/testing/selftests/net/ipsec.c
@@ -484,13 +484,16 @@ enum desc_type {
MONITOR_ACQUIRE,
EXPIRE_STATE,
EXPIRE_POLICY,
+ SPDINFO_ATTRS,
};
const char *desc_name[] = {
"create tunnel",
"alloc spi",
"monitor acquire",
"expire state",
- "expire policy"
+ "expire policy",
+ "spdinfo attributes",
+ ""
};
struct xfrm_desc {
enum desc_type type;
@@ -1593,6 +1596,155 @@ out_close:
return ret;
}
+static int xfrm_spdinfo_set_thresh(int xfrm_sock, uint32_t *seq,
+ unsigned thresh4_l, unsigned thresh4_r,
+ unsigned thresh6_l, unsigned thresh6_r,
+ bool add_bad_attr)
+
+{
+ struct {
+ struct nlmsghdr nh;
+ union {
+ uint32_t unused;
+ int error;
+ };
+ char attrbuf[MAX_PAYLOAD];
+ } req;
+ struct xfrmu_spdhthresh thresh;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.unused));
+ req.nh.nlmsg_type = XFRM_MSG_NEWSPDINFO;
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_seq = (*seq)++;
+
+ thresh.lbits = thresh4_l;
+ thresh.rbits = thresh4_r;
+ if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SPD_IPV4_HTHRESH, &thresh, sizeof(thresh)))
+ return -1;
+
+ thresh.lbits = thresh6_l;
+ thresh.rbits = thresh6_r;
+ if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SPD_IPV6_HTHRESH, &thresh, sizeof(thresh)))
+ return -1;
+
+ if (add_bad_attr) {
+ BUILD_BUG_ON(XFRMA_IF_ID <= XFRMA_SPD_MAX + 1);
+ if (rtattr_pack(&req.nh, sizeof(req), XFRMA_IF_ID, NULL, 0)) {
+ pr_err("adding attribute failed: no space");
+ return -1;
+ }
+ }
+
+ if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ return -1;
+ }
+
+ if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) {
+ pr_err("recv()");
+ return -1;
+ } else if (req.nh.nlmsg_type != NLMSG_ERROR) {
+ printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type);
+ return -1;
+ }
+
+ if (req.error) {
+ printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int xfrm_spdinfo_attrs(int xfrm_sock, uint32_t *seq)
+{
+ struct {
+ struct nlmsghdr nh;
+ union {
+ uint32_t unused;
+ int error;
+ };
+ char attrbuf[MAX_PAYLOAD];
+ } req;
+
+ if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 31, 120, 16, false)) {
+ pr_err("Can't set SPD HTHRESH");
+ return KSFT_FAIL;
+ }
+
+ memset(&req, 0, sizeof(req));
+
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.unused));
+ req.nh.nlmsg_type = XFRM_MSG_GETSPDINFO;
+ req.nh.nlmsg_flags = NLM_F_REQUEST;
+ req.nh.nlmsg_seq = (*seq)++;
+ if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ return KSFT_FAIL;
+ }
+
+ if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) {
+ pr_err("recv()");
+ return KSFT_FAIL;
+ } else if (req.nh.nlmsg_type == XFRM_MSG_NEWSPDINFO) {
+ size_t len = NLMSG_PAYLOAD(&req.nh, sizeof(req.unused));
+ struct rtattr *attr = (void *)req.attrbuf;
+ int got_thresh = 0;
+
+ for (; RTA_OK(attr, len); attr = RTA_NEXT(attr, len)) {
+ if (attr->rta_type == XFRMA_SPD_IPV4_HTHRESH) {
+ struct xfrmu_spdhthresh *t = RTA_DATA(attr);
+
+ got_thresh++;
+ if (t->lbits != 32 || t->rbits != 31) {
+ pr_err("thresh differ: %u, %u",
+ t->lbits, t->rbits);
+ return KSFT_FAIL;
+ }
+ }
+ if (attr->rta_type == XFRMA_SPD_IPV6_HTHRESH) {
+ struct xfrmu_spdhthresh *t = RTA_DATA(attr);
+
+ got_thresh++;
+ if (t->lbits != 120 || t->rbits != 16) {
+ pr_err("thresh differ: %u, %u",
+ t->lbits, t->rbits);
+ return KSFT_FAIL;
+ }
+ }
+ }
+ if (got_thresh != 2) {
+ pr_err("only %d thresh returned by XFRM_MSG_GETSPDINFO", got_thresh);
+ return KSFT_FAIL;
+ }
+ } else if (req.nh.nlmsg_type != NLMSG_ERROR) {
+ printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type);
+ return KSFT_FAIL;
+ } else {
+ printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error));
+ return -1;
+ }
+
+ /* Restore the default */
+ if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 32, 128, 128, false)) {
+ pr_err("Can't restore SPD HTHRESH");
+ return KSFT_FAIL;
+ }
+
+ /*
+ * At this moment xfrm uses nlmsg_parse_deprecated(), which
+ * implies NL_VALIDATE_LIBERAL - ignoring attributes with
+ * (type > maxtype). nla_parse_depricated_strict() would enforce
+ * it. Or even stricter nla_parse().
+ * Right now it's not expected to fail, but to be ignored.
+ */
+ if (xfrm_spdinfo_set_thresh(xfrm_sock, seq, 32, 32, 128, 128, true))
+ return KSFT_PASS;
+
+ return KSFT_PASS;
+}
+
static int child_serv(int xfrm_sock, uint32_t *seq,
unsigned int nr, int cmd_fd, void *buf, struct xfrm_desc *desc)
{
@@ -1717,6 +1869,9 @@ static int child_f(unsigned int nr, int test_desc_fd, int cmd_fd, void *buf)
case EXPIRE_POLICY:
ret = xfrm_expire_policy(xfrm_sock, &seq, nr, &desc);
break;
+ case SPDINFO_ATTRS:
+ ret = xfrm_spdinfo_attrs(xfrm_sock, &seq);
+ break;
default:
printk("Unknown desc type %d", desc.type);
exit(KSFT_FAIL);
@@ -1994,8 +2149,10 @@ static int write_proto_plan(int fd, int proto)
* sizeof(xfrm_user_polexpire) = 168 | sizeof(xfrm_user_polexpire) = 176
*
* Check the affected by the UABI difference structures.
+ * Also, check translation for xfrm_set_spdinfo: it has it's own attributes
+ * which needs to be correctly copied, but not translated.
*/
-const unsigned int compat_plan = 4;
+const unsigned int compat_plan = 5;
static int write_compat_struct_tests(int test_desc_fd)
{
struct xfrm_desc desc = {};
@@ -2019,6 +2176,10 @@ static int write_compat_struct_tests(int test_desc_fd)
if (__write_desc(test_desc_fd, &desc))
return -1;
+ desc.type = SPDINFO_ATTRS;
+ if (__write_desc(test_desc_fd, &desc))
+ return -1;
+
return 0;
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 9a191c1a5de8..f02f4de2f3a0 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -1409,7 +1409,7 @@ syncookies_tests()
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
- chk_join_nr "subflows limited by server w cookies" 2 2 1
+ chk_join_nr "subflows limited by server w cookies" 2 1 1
# test signal address with cookies
reset_with_cookies
diff --git a/tools/testing/selftests/net/nettest.c b/tools/testing/selftests/net/nettest.c
index 6365c7fd1262..bd6288302094 100644
--- a/tools/testing/selftests/net/nettest.c
+++ b/tools/testing/selftests/net/nettest.c
@@ -11,9 +11,11 @@
#include <sys/socket.h>
#include <sys/wait.h>
#include <linux/tcp.h>
+#include <linux/udp.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <netinet/in.h>
+#include <netinet/ip.h>
#include <netdb.h>
#include <fcntl.h>
#include <libgen.h>
@@ -27,6 +29,10 @@
#include <time.h>
#include <errno.h>
+#include <linux/xfrm.h>
+#include <linux/ipsec.h>
+#include <linux/pfkeyv2.h>
+
#ifndef IPV6_UNICAST_IF
#define IPV6_UNICAST_IF 76
#endif
@@ -114,6 +120,9 @@ struct sock_args {
struct in_addr in;
struct in6_addr in6;
} expected_raddr;
+
+ /* ESP in UDP encap test */
+ int use_xfrm;
};
static int server_mode;
@@ -1346,6 +1355,41 @@ static int bind_socket(int sd, struct sock_args *args)
return 0;
}
+static int config_xfrm_policy(int sd, struct sock_args *args)
+{
+ struct xfrm_userpolicy_info policy = {};
+ int type = UDP_ENCAP_ESPINUDP;
+ int xfrm_af = IP_XFRM_POLICY;
+ int level = SOL_IP;
+
+ if (args->type != SOCK_DGRAM) {
+ log_error("Invalid socket type. Only DGRAM could be used for XFRM\n");
+ return 1;
+ }
+
+ policy.action = XFRM_POLICY_ALLOW;
+ policy.sel.family = args->version;
+ if (args->version == AF_INET6) {
+ xfrm_af = IPV6_XFRM_POLICY;
+ level = SOL_IPV6;
+ }
+
+ policy.dir = XFRM_POLICY_OUT;
+ if (setsockopt(sd, level, xfrm_af, &policy, sizeof(policy)) < 0)
+ return 1;
+
+ policy.dir = XFRM_POLICY_IN;
+ if (setsockopt(sd, level, xfrm_af, &policy, sizeof(policy)) < 0)
+ return 1;
+
+ if (setsockopt(sd, IPPROTO_UDP, UDP_ENCAP, &type, sizeof(type)) < 0) {
+ log_err_errno("Failed to set xfrm encap");
+ return 1;
+ }
+
+ return 0;
+}
+
static int lsock_init(struct sock_args *args)
{
long flags;
@@ -1389,6 +1433,11 @@ static int lsock_init(struct sock_args *args)
if (fcntl(sd, F_SETFD, FD_CLOEXEC) < 0)
log_err_errno("Failed to set close-on-exec flag");
+ if (args->use_xfrm && config_xfrm_policy(sd, args)) {
+ log_err_errno("Failed to set xfrm policy");
+ goto err;
+ }
+
out:
return sd;
@@ -1772,7 +1821,7 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args)
return client_status;
}
-#define GETOPT_STR "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6L:0:1:2:3:Fbq"
+#define GETOPT_STR "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6xL:0:1:2:3:Fbq"
static void print_usage(char *prog)
{
@@ -1795,6 +1844,7 @@ static void print_usage(char *prog)
" -D|R datagram (D) / raw (R) socket (default stream)\n"
" -l addr local address to bind to in server mode\n"
" -c addr local address to bind to in client mode\n"
+ " -x configure XFRM policy on socket\n"
"\n"
" -d dev bind socket to given device name\n"
" -I dev bind socket to given device name - server mode\n"
@@ -1966,6 +2016,9 @@ int main(int argc, char *argv[])
case 'q':
quiet = 1;
break;
+ case 'x':
+ args.use_xfrm = 1;
+ break;
default:
print_usage(argv[0]);
return 1;
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index 64cd2e23c568..543ad7513a8e 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -118,6 +118,16 @@
# below for IPv6 doesn't apply here, because, on IPv4, administrative MTU
# changes alone won't affect PMTU
#
+# - pmtu_vti4_udp_exception
+# Same as pmtu_vti4_exception, but using ESP-in-UDP
+#
+# - pmtu_vti4_udp_routed_exception
+# Set up vti tunnel on top of veth connected through routing namespace and
+# add xfrm states and policies with ESP-in-UDP encapsulation. Check that
+# route exception is not created if link layer MTU is not exceeded, then
+# lower MTU on second part of routed environment and check that exception
+# is created with the expected PMTU.
+#
# - pmtu_vti6_exception
# Set up vti6 tunnel on top of veth, with xfrm states and policies, in two
# namespaces with matching endpoints. Check that route exception is
@@ -125,6 +135,13 @@
# decrease and increase MTU of tunnel, checking that route exception PMTU
# changes accordingly
#
+# - pmtu_vti6_udp_exception
+# Same as pmtu_vti6_exception, but using ESP-in-UDP
+#
+# - pmtu_vti6_udp_routed_exception
+# Same as pmtu_vti6_udp_routed_exception but with routing between vti
+# endpoints
+#
# - pmtu_vti4_default_mtu
# Set up vti4 tunnel on top of veth, in two namespaces with matching
# endpoints. Check that MTU assigned to vti interface is the MTU of the
@@ -224,6 +241,10 @@ tests="
pmtu_ipv6_ipv6_exception IPv6 over IPv6: PMTU exceptions 1
pmtu_vti6_exception vti6: PMTU exceptions 0
pmtu_vti4_exception vti4: PMTU exceptions 0
+ pmtu_vti6_udp_exception vti6: PMTU exceptions (ESP-in-UDP) 0
+ pmtu_vti4_udp_exception vti4: PMTU exceptions (ESP-in-UDP) 0
+ pmtu_vti6_udp_routed_exception vti6: PMTU exceptions, routed (ESP-in-UDP) 0
+ pmtu_vti4_udp_routed_exception vti4: PMTU exceptions, routed (ESP-in-UDP) 0
pmtu_vti4_default_mtu vti4: default MTU assignment 0
pmtu_vti6_default_mtu vti6: default MTU assignment 0
pmtu_vti4_link_add_mtu vti4: MTU setting on link creation 0
@@ -246,7 +267,6 @@ ns_b="ip netns exec ${NS_B}"
ns_c="ip netns exec ${NS_C}"
ns_r1="ip netns exec ${NS_R1}"
ns_r2="ip netns exec ${NS_R2}"
-
# Addressing and routing for tests with routers: four network segments, with
# index SEGMENT between 1 and 4, a common prefix (PREFIX4 or PREFIX6) and an
# identifier ID, which is 1 for hosts (A and B), 2 for routers (R1 and R2).
@@ -279,7 +299,6 @@ routes="
A ${prefix6}:${b_r2}::1 ${prefix6}:${a_r2}::2
B default ${prefix6}:${b_r1}::2
"
-
USE_NH="no"
# ns family nh id destination gateway
nexthops="
@@ -326,6 +345,7 @@ dummy6_mask="64"
err_buf=
tcpdump_pids=
+nettest_pids=
err() {
err_buf="${err_buf}${1}
@@ -548,6 +568,14 @@ setup_vti6() {
setup_vti 6 ${veth6_a_addr} ${veth6_b_addr} ${tunnel6_a_addr} ${tunnel6_b_addr} ${tunnel6_mask}
}
+setup_vti4routed() {
+ setup_vti 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 ${tunnel4_a_addr} ${tunnel4_b_addr} ${tunnel4_mask}
+}
+
+setup_vti6routed() {
+ setup_vti 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 ${tunnel6_a_addr} ${tunnel6_b_addr} ${tunnel6_mask}
+}
+
setup_vxlan_or_geneve() {
type="${1}"
a_addr="${2}"
@@ -619,18 +647,36 @@ setup_xfrm() {
proto=${1}
veth_a_addr="${2}"
veth_b_addr="${3}"
+ encap=${4}
- run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel || return 1
- run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
+ run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap} || return 1
+ run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap}
run_cmd ${ns_a} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
run_cmd ${ns_a} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
- run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
- run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
+ run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap}
+ run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap}
run_cmd ${ns_b} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
run_cmd ${ns_b} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
}
+setup_nettest_xfrm() {
+ which nettest >/dev/null
+ if [ $? -ne 0 ]; then
+ echo "'nettest' command not found; skipping tests"
+ return 1
+ fi
+
+ [ ${1} -eq 6 ] && proto="-6" || proto=""
+ port=${2}
+
+ run_cmd ${ns_a} nettest ${proto} -q -D -s -x -p ${port} -t 5 &
+ nettest_pids="${nettest_pids} $!"
+
+ run_cmd ${ns_b} nettest ${proto} -q -D -s -x -p ${port} -t 5 &
+ nettest_pids="${nettest_pids} $!"
+}
+
setup_xfrm4() {
setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr}
}
@@ -639,6 +685,26 @@ setup_xfrm6() {
setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr}
}
+setup_xfrm4udp() {
+ setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} "encap espinudp 4500 4500 0.0.0.0"
+ setup_nettest_xfrm 4 4500
+}
+
+setup_xfrm6udp() {
+ setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} "encap espinudp 4500 4500 0.0.0.0"
+ setup_nettest_xfrm 6 4500
+}
+
+setup_xfrm4udprouted() {
+ setup_xfrm 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "encap espinudp 4500 4500 0.0.0.0"
+ setup_nettest_xfrm 4 4500
+}
+
+setup_xfrm6udprouted() {
+ setup_xfrm 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "encap espinudp 4500 4500 0.0.0.0"
+ setup_nettest_xfrm 6 4500
+}
+
setup_routing_old() {
for i in ${routes}; do
[ "${ns}" = "" ] && ns="${i}" && continue
@@ -823,6 +889,11 @@ cleanup() {
done
tcpdump_pids=
+ for pid in ${nettest_pids}; do
+ kill ${pid}
+ done
+ nettest_pids=
+
for n in ${NS_A} ${NS_B} ${NS_C} ${NS_R1} ${NS_R2}; do
ip netns del ${n} 2> /dev/null
done
@@ -1432,6 +1503,135 @@ test_pmtu_vti6_exception() {
return ${fail}
}
+test_pmtu_vti4_udp_exception() {
+ setup namespaces veth vti4 xfrm4udp || return $ksft_skip
+ trace "${ns_a}" veth_a "${ns_b}" veth_b \
+ "${ns_a}" vti4_a "${ns_b}" vti4_b
+
+ veth_mtu=1500
+ vti_mtu=$((veth_mtu - 20))
+
+ # UDP SPI SN IV ICV pad length next header
+ esp_payload_rfc4106=$((vti_mtu - 8 - 4 - 4 - 8 - 16 - 1 - 1))
+ ping_payload=$((esp_payload_rfc4106 - 28))
+
+ mtu "${ns_a}" veth_a ${veth_mtu}
+ mtu "${ns_b}" veth_b ${veth_mtu}
+ mtu "${ns_a}" vti4_a ${vti_mtu}
+ mtu "${ns_b}" vti4_b ${vti_mtu}
+
+ # Send DF packet without exceeding link layer MTU, check that no
+ # exception is created
+ run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr}
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
+ check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
+
+ # Now exceed link layer MTU by one byte, check that exception is created
+ # with the right PMTU value
+ run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr}
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
+ check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
+}
+
+test_pmtu_vti6_udp_exception() {
+ setup namespaces veth vti6 xfrm6udp || return $ksft_skip
+ trace "${ns_a}" veth_a "${ns_b}" veth_b \
+ "${ns_a}" vti6_a "${ns_b}" vti6_b
+ fail=0
+
+ # Create route exception by exceeding link layer MTU
+ mtu "${ns_a}" veth_a 4000
+ mtu "${ns_b}" veth_b 4000
+ mtu "${ns_a}" vti6_a 5000
+ mtu "${ns_b}" vti6_b 5000
+ run_cmd ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr}
+
+ # Check that exception was created
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+ check_pmtu_value any "${pmtu}" "creating tunnel exceeding link layer MTU" || return 1
+
+ # Decrease tunnel MTU, check for PMTU decrease in route exception
+ mtu "${ns_a}" vti6_a 3000
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+ check_pmtu_value "3000" "${pmtu}" "decreasing tunnel MTU" || fail=1
+
+ # Increase tunnel MTU, check for PMTU increase in route exception
+ mtu "${ns_a}" vti6_a 9000
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+ check_pmtu_value "9000" "${pmtu}" "increasing tunnel MTU" || fail=1
+
+ return ${fail}
+}
+
+test_pmtu_vti4_udp_routed_exception() {
+ setup namespaces routing vti4routed xfrm4udprouted || return $ksft_skip
+ trace "${ns_a}" veth_A-R1 "${ns_b}" veth_B-R1 \
+ "${ns_a}" vti4_a "${ns_b}" vti4_b
+
+ veth_mtu=1500
+ vti_mtu=$((veth_mtu - 20))
+
+ # UDP SPI SN IV ICV pad length next header
+ esp_payload_rfc4106=$((vti_mtu - 8 - 4 - 4 - 8 - 16 - 1 - 1))
+ ping_payload=$((esp_payload_rfc4106 - 28))
+
+ mtu "${ns_a}" veth_A-R1 ${veth_mtu}
+ mtu "${ns_r1}" veth_R1-A ${veth_mtu}
+ mtu "${ns_b}" veth_B-R1 ${veth_mtu}
+ mtu "${ns_r1}" veth_R1-B ${veth_mtu}
+
+ mtu "${ns_a}" vti4_a ${vti_mtu}
+ mtu "${ns_b}" vti4_b ${vti_mtu}
+
+ # Send DF packet without exceeding link layer MTU, check that no
+ # exception is created
+ run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr}
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
+ check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
+
+ # Now decrease link layer MTU by 8 bytes on R1, check that exception is created
+ # with the right PMTU value
+ mtu "${ns_r1}" veth_R1-B $((veth_mtu - 8))
+ run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload)) ${tunnel4_b_addr}
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
+ check_pmtu_value "$((esp_payload_rfc4106 - 8))" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106)))"
+}
+
+test_pmtu_vti6_udp_routed_exception() {
+ setup namespaces routing vti6routed xfrm6udprouted || return $ksft_skip
+ trace "${ns_a}" veth_A-R1 "${ns_b}" veth_B-R1 \
+ "${ns_a}" vti6_a "${ns_b}" vti6_b
+
+ veth_mtu=1500
+ vti_mtu=$((veth_mtu - 40))
+
+ # UDP SPI SN IV ICV pad length next header
+ esp_payload_rfc4106=$((vti_mtu - 8 - 4 - 4 - 8 - 16 - 1 - 1))
+ ping_payload=$((esp_payload_rfc4106 - 48))
+
+ mtu "${ns_a}" veth_A-R1 ${veth_mtu}
+ mtu "${ns_r1}" veth_R1-A ${veth_mtu}
+ mtu "${ns_b}" veth_B-R1 ${veth_mtu}
+ mtu "${ns_r1}" veth_R1-B ${veth_mtu}
+
+ # mtu "${ns_a}" vti6_a ${vti_mtu}
+ # mtu "${ns_b}" vti6_b ${vti_mtu}
+
+ run_cmd ${ns_a} ${ping6} -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel6_b_addr}
+
+ # Check that exception was not created
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+ check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
+
+ # Now decrease link layer MTU by 8 bytes on R1, check that exception is created
+ # with the right PMTU value
+ mtu "${ns_r1}" veth_R1-B $((veth_mtu - 8))
+ run_cmd ${ns_a} ${ping6} -q -M want -i 0.1 -w 1 -s $((ping_payload)) ${tunnel6_b_addr}
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+ check_pmtu_value "$((esp_payload_rfc4106 - 8))" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106)))"
+
+}
+
test_pmtu_vti4_default_mtu() {
setup namespaces veth vti4 || return $ksft_skip
diff --git a/tools/testing/selftests/net/timestamping.c b/tools/testing/selftests/net/timestamping.c
index 21091be70688..aee631c5284e 100644
--- a/tools/testing/selftests/net/timestamping.c
+++ b/tools/testing/selftests/net/timestamping.c
@@ -47,7 +47,7 @@ static void usage(const char *error)
{
if (error)
printf("invalid option: %s\n", error);
- printf("timestamping interface option*\n\n"
+ printf("timestamping <interface> [bind_phc_index] [option]*\n\n"
"Options:\n"
" IP_MULTICAST_LOOP - looping outgoing multicasts\n"
" SO_TIMESTAMP - normal software time stamping, ms resolution\n"
@@ -58,6 +58,7 @@ static void usage(const char *error)
" SOF_TIMESTAMPING_RX_SOFTWARE - software fallback for incoming packets\n"
" SOF_TIMESTAMPING_SOFTWARE - request reporting of software time stamps\n"
" SOF_TIMESTAMPING_RAW_HARDWARE - request reporting of raw HW time stamps\n"
+ " SOF_TIMESTAMPING_BIND_PHC - request to bind a PHC of PTP vclock\n"
" SIOCGSTAMP - check last socket time stamp\n"
" SIOCGSTAMPNS - more accurate socket time stamp\n"
" PTPV2 - use PTPv2 messages\n");
@@ -311,7 +312,6 @@ static void recvpacket(int sock, int recvmsg_flags,
int main(int argc, char **argv)
{
- int so_timestamping_flags = 0;
int so_timestamp = 0;
int so_timestampns = 0;
int siocgstamp = 0;
@@ -325,6 +325,8 @@ int main(int argc, char **argv)
struct ifreq device;
struct ifreq hwtstamp;
struct hwtstamp_config hwconfig, hwconfig_requested;
+ struct so_timestamping so_timestamping_get = { 0, -1 };
+ struct so_timestamping so_timestamping = { 0, -1 };
struct sockaddr_in addr;
struct ip_mreq imr;
struct in_addr iaddr;
@@ -342,7 +344,12 @@ int main(int argc, char **argv)
exit(1);
}
- for (i = 2; i < argc; i++) {
+ if (argc >= 3 && sscanf(argv[2], "%d", &so_timestamping.bind_phc) == 1)
+ val = 3;
+ else
+ val = 2;
+
+ for (i = val; i < argc; i++) {
if (!strcasecmp(argv[i], "SO_TIMESTAMP"))
so_timestamp = 1;
else if (!strcasecmp(argv[i], "SO_TIMESTAMPNS"))
@@ -356,17 +363,19 @@ int main(int argc, char **argv)
else if (!strcasecmp(argv[i], "PTPV2"))
ptpv2 = 1;
else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_TX_HARDWARE"))
- so_timestamping_flags |= SOF_TIMESTAMPING_TX_HARDWARE;
+ so_timestamping.flags |= SOF_TIMESTAMPING_TX_HARDWARE;
else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_TX_SOFTWARE"))
- so_timestamping_flags |= SOF_TIMESTAMPING_TX_SOFTWARE;
+ so_timestamping.flags |= SOF_TIMESTAMPING_TX_SOFTWARE;
else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RX_HARDWARE"))
- so_timestamping_flags |= SOF_TIMESTAMPING_RX_HARDWARE;
+ so_timestamping.flags |= SOF_TIMESTAMPING_RX_HARDWARE;
else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RX_SOFTWARE"))
- so_timestamping_flags |= SOF_TIMESTAMPING_RX_SOFTWARE;
+ so_timestamping.flags |= SOF_TIMESTAMPING_RX_SOFTWARE;
else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_SOFTWARE"))
- so_timestamping_flags |= SOF_TIMESTAMPING_SOFTWARE;
+ so_timestamping.flags |= SOF_TIMESTAMPING_SOFTWARE;
else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RAW_HARDWARE"))
- so_timestamping_flags |= SOF_TIMESTAMPING_RAW_HARDWARE;
+ so_timestamping.flags |= SOF_TIMESTAMPING_RAW_HARDWARE;
+ else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_BIND_PHC"))
+ so_timestamping.flags |= SOF_TIMESTAMPING_BIND_PHC;
else
usage(argv[i]);
}
@@ -385,10 +394,10 @@ int main(int argc, char **argv)
hwtstamp.ifr_data = (void *)&hwconfig;
memset(&hwconfig, 0, sizeof(hwconfig));
hwconfig.tx_type =
- (so_timestamping_flags & SOF_TIMESTAMPING_TX_HARDWARE) ?
+ (so_timestamping.flags & SOF_TIMESTAMPING_TX_HARDWARE) ?
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
hwconfig.rx_filter =
- (so_timestamping_flags & SOF_TIMESTAMPING_RX_HARDWARE) ?
+ (so_timestamping.flags & SOF_TIMESTAMPING_RX_HARDWARE) ?
ptpv2 ? HWTSTAMP_FILTER_PTP_V2_L4_SYNC :
HWTSTAMP_FILTER_PTP_V1_L4_SYNC : HWTSTAMP_FILTER_NONE;
hwconfig_requested = hwconfig;
@@ -413,6 +422,9 @@ int main(int argc, char **argv)
sizeof(struct sockaddr_in)) < 0)
bail("bind");
+ if (setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE, interface, if_len))
+ bail("bind device");
+
/* set multicast group for outgoing packets */
inet_aton("224.0.1.130", &iaddr); /* alternate PTP domain 1 */
addr.sin_addr = iaddr;
@@ -444,10 +456,9 @@ int main(int argc, char **argv)
&enabled, sizeof(enabled)) < 0)
bail("setsockopt SO_TIMESTAMPNS");
- if (so_timestamping_flags &&
- setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING,
- &so_timestamping_flags,
- sizeof(so_timestamping_flags)) < 0)
+ if (so_timestamping.flags &&
+ setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &so_timestamping,
+ sizeof(so_timestamping)) < 0)
bail("setsockopt SO_TIMESTAMPING");
/* request IP_PKTINFO for debugging purposes */
@@ -468,14 +479,18 @@ int main(int argc, char **argv)
else
printf("SO_TIMESTAMPNS %d\n", val);
- if (getsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &val, &len) < 0) {
+ len = sizeof(so_timestamping_get);
+ if (getsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &so_timestamping_get,
+ &len) < 0) {
printf("%s: %s\n", "getsockopt SO_TIMESTAMPING",
strerror(errno));
} else {
- printf("SO_TIMESTAMPING %d\n", val);
- if (val != so_timestamping_flags)
- printf(" not the expected value %d\n",
- so_timestamping_flags);
+ printf("SO_TIMESTAMPING flags %d, bind phc %d\n",
+ so_timestamping_get.flags, so_timestamping_get.bind_phc);
+ if (so_timestamping_get.flags != so_timestamping.flags ||
+ so_timestamping_get.bind_phc != so_timestamping.bind_phc)
+ printf(" not expected, flags %d, bind phc %d\n",
+ so_timestamping.flags, so_timestamping.bind_phc);
}
/* send packets forever every five seconds */
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 112d41d01b12..97fceb9be9ed 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -444,8 +444,9 @@ TEST_F(tls, sendmsg_large)
EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len);
}
- while (recvs++ < sends)
+ while (recvs++ < sends) {
EXPECT_NE(recv(self->fd, mem, send_len, 0), -1);
+ }
free(mem);
}
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index cd6430b39982..8748199ac109 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -5,7 +5,7 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
- ipip-conntrack-mtu.sh
+ ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh
LDLIBS = -lmnl
TEST_GEN_FILES = nf-queue
diff --git a/tools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh b/tools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh
new file mode 100755
index 000000000000..e7d7bf13cff5
--- /dev/null
+++ b/tools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh
@@ -0,0 +1,167 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Check that UNREPLIED tcp conntrack will eventually timeout.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+waittime=20
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+cleanup() {
+ ip netns pids $ns1 | xargs kill 2>/dev/null
+ ip netns pids $ns2 | xargs kill 2>/dev/null
+
+ ip netns del $ns1
+ ip netns del $ns2
+}
+
+ipv4() {
+ echo -n 192.168.$1.2
+}
+
+check_counter()
+{
+ ns=$1
+ name=$2
+ expect=$3
+ local lret=0
+
+ cnt=$(ip netns exec $ns2 nft list counter inet filter "$name" | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ echo "ERROR: counter $name in $ns2 has unexpected value (expected $expect)" 1>&2
+ ip netns exec $ns2 nft list counter inet filter "$name" 1>&2
+ lret=1
+ fi
+
+ return $lret
+}
+
+# Create test namespaces
+ip netns add $ns1 || exit 1
+
+trap cleanup EXIT
+
+ip netns add $ns2 || exit 1
+
+# Connect the namespace to the host using a veth pair
+ip -net $ns1 link add name veth1 type veth peer name veth2
+ip -net $ns1 link set netns $ns2 dev veth2
+
+ip -net $ns1 link set up dev lo
+ip -net $ns2 link set up dev lo
+ip -net $ns1 link set up dev veth1
+ip -net $ns2 link set up dev veth2
+
+ip -net $ns2 addr add 10.11.11.2/24 dev veth2
+ip -net $ns2 route add default via 10.11.11.1
+
+ip netns exec $ns2 sysctl -q net.ipv4.conf.veth2.forwarding=1
+
+# add a rule inside NS so we enable conntrack
+ip netns exec $ns1 iptables -A INPUT -m state --state established,related -j ACCEPT
+
+ip -net $ns1 addr add 10.11.11.1/24 dev veth1
+ip -net $ns1 route add 10.99.99.99 via 10.11.11.2
+
+# Check connectivity works
+ip netns exec $ns1 ping -q -c 2 10.11.11.2 >/dev/null || exit 1
+
+ip netns exec $ns2 nc -l -p 8080 < /dev/null &
+
+# however, conntrack entries are there
+
+ip netns exec $ns2 nft -f - <<EOF
+table inet filter {
+ counter connreq { }
+ counter redir { }
+ chain input {
+ type filter hook input priority 0; policy accept;
+ ct state new tcp flags syn ip daddr 10.99.99.99 tcp dport 80 counter name "connreq" accept
+ ct state new ct status dnat tcp dport 8080 counter name "redir" accept
+ }
+}
+EOF
+if [ $? -ne 0 ]; then
+ echo "ERROR: Could not load nft rules"
+ exit 1
+fi
+
+ip netns exec $ns2 sysctl -q net.netfilter.nf_conntrack_tcp_timeout_syn_sent=10
+
+echo "INFO: connect $ns1 -> $ns2 to the virtual ip"
+ip netns exec $ns1 bash -c 'while true ; do
+ nc -p 60000 10.99.99.99 80
+ sleep 1
+ done' &
+
+sleep 1
+
+ip netns exec $ns2 nft -f - <<EOF
+table inet nat {
+ chain prerouting {
+ type nat hook prerouting priority 0; policy accept;
+ ip daddr 10.99.99.99 tcp dport 80 redirect to :8080
+ }
+}
+EOF
+if [ $? -ne 0 ]; then
+ echo "ERROR: Could not load nat redirect"
+ exit 1
+fi
+
+count=$(ip netns exec $ns2 conntrack -L -p tcp --dport 80 2>/dev/null | wc -l)
+if [ $count -eq 0 ]; then
+ echo "ERROR: $ns2 did not pick up tcp connection from peer"
+ exit 1
+fi
+
+echo "INFO: NAT redirect added in ns $ns2, waiting for $waittime seconds for nat to take effect"
+for i in $(seq 1 $waittime); do
+ echo -n "."
+
+ sleep 1
+
+ count=$(ip netns exec $ns2 conntrack -L -p tcp --reply-port-src 8080 2>/dev/null | wc -l)
+ if [ $count -gt 0 ]; then
+ echo
+ echo "PASS: redirection took effect after $i seconds"
+ break
+ fi
+
+ m=$((i%20))
+ if [ $m -eq 0 ]; then
+ echo " waited for $i seconds"
+ fi
+done
+
+expect="packets 1 bytes 60"
+check_counter "$ns2" "redir" "$expect"
+if [ $? -ne 0 ]; then
+ ret=1
+fi
+
+if [ $ret -eq 0 ];then
+ echo "PASS: redirection counter has expected values"
+else
+ echo "ERROR: no tcp connection was redirected"
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/powerpc/benchmarks/null_syscall.c b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
index 579f0215c6e7..9836838a529f 100644
--- a/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
+++ b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
@@ -14,6 +14,7 @@
#include <time.h>
#include <sys/types.h>
#include <sys/time.h>
+#include <sys/syscall.h>
#include <signal.h>
static volatile int soak_done;
@@ -121,7 +122,7 @@ static void do_null_syscall(unsigned long nr)
unsigned long i;
for (i = 0; i < nr; i++)
- getppid();
+ syscall(__NR_gettid);
}
#define TIME(A, STR) \
diff --git a/tools/testing/selftests/powerpc/nx-gzip/Makefile b/tools/testing/selftests/powerpc/nx-gzip/Makefile
index 640fad6cc2c7..0785c2e99d40 100644
--- a/tools/testing/selftests/powerpc/nx-gzip/Makefile
+++ b/tools/testing/selftests/powerpc/nx-gzip/Makefile
@@ -1,8 +1,8 @@
-CFLAGS = -O3 -m64 -I./include
+CFLAGS = -O3 -m64 -I./include -I../include
TEST_GEN_FILES := gzfht_test gunz_test
TEST_PROGS := nx-gzip-test.sh
include ../../lib.mk
-$(TEST_GEN_FILES): gzip_vas.c
+$(TEST_GEN_FILES): gzip_vas.c ../utils.c
diff --git a/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c b/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c
index b099753b50e4..095195a25687 100644
--- a/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c
+++ b/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c
@@ -60,6 +60,7 @@
#include <assert.h>
#include <errno.h>
#include <signal.h>
+#include "utils.h"
#include "nxu.h"
#include "nx.h"
@@ -70,6 +71,8 @@ FILE *nx_gzip_log;
#define FNAME_MAX 1024
#define FEXT ".nx.gz"
+#define SYSFS_MAX_REQ_BUF_PATH "devices/vio/ibm,compression-v1/nx_gzip_caps/req_max_processed_len"
+
/*
* LZ counts returned in the user supplied nx_gzip_crb_cpb_t structure.
*/
@@ -244,6 +247,7 @@ int compress_file(int argc, char **argv, void *handle)
struct nx_gzip_crb_cpb_t *cmdp;
uint32_t pagelen = 65536;
int fault_tries = NX_MAX_FAULTS;
+ char buf[32];
cmdp = (void *)(uintptr_t)
aligned_alloc(sizeof(struct nx_gzip_crb_cpb_t),
@@ -263,8 +267,17 @@ int compress_file(int argc, char **argv, void *handle)
assert(NULL != (outbuf = (char *)malloc(outlen)));
nxu_touch_pages(outbuf, outlen, pagelen, 1);
- /* Compress piecemeal in smallish chunks */
- chunk = 1<<22;
+ /*
+ * On PowerVM, the hypervisor defines the maximum request buffer
+ * size is defined and this value is available via sysfs.
+ */
+ if (!read_sysfs_file(SYSFS_MAX_REQ_BUF_PATH, buf, sizeof(buf))) {
+ chunk = atoi(buf);
+ } else {
+ /* sysfs entry is not available on PowerNV */
+ /* Compress piecemeal in smallish chunks */
+ chunk = 1<<22;
+ }
/* Write the gzip header to the stream */
num_hdr_bytes = gzip_header_blank(outbuf);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index c5ecb4634094..010160690227 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -24,7 +24,7 @@ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \
fork_cleanup_test ebb_on_child_test \
ebb_on_willing_child_test back_to_back_ebbs_test \
lost_exception_test no_handler_test \
- cycles_with_mmcr2_test
+ cycles_with_mmcr2_test regs_access_pmccext_test
top_srcdir = ../../../../../..
include ../../../lib.mk
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.h b/tools/testing/selftests/powerpc/pmu/ebb/ebb.h
index b5bc2b616075..2c803b5b48d6 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.h
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.h
@@ -55,8 +55,6 @@ void ebb_global_disable(void);
bool ebb_is_supported(void);
void ebb_freeze_pmcs(void);
void ebb_unfreeze_pmcs(void);
-void event_ebb_init(struct event *e);
-void event_leader_ebb_init(struct event *e);
int count_pmc(int pmc, uint32_t sample_period);
void dump_ebb_state(void);
void dump_summary_ebb_state(void);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c
index fc5bf4870d8e..01e827c31169 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c
@@ -50,8 +50,6 @@ static int no_handler_test(void)
event_close(&event);
- dump_ebb_state();
-
/* The real test is that we never took an EBB at 0x0 */
return 0;
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/regs_access_pmccext_test.c b/tools/testing/selftests/powerpc/pmu/ebb/regs_access_pmccext_test.c
new file mode 100644
index 000000000000..1eda8e9932e8
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/regs_access_pmccext_test.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <setjmp.h>
+#include <signal.h>
+
+#include "ebb.h"
+
+
+/*
+ * Test that closing the EBB event clears MMCR0_PMCC and
+ * sets MMCR0_PMCCEXT preventing further read access to the
+ * group B PMU registers.
+ */
+
+static int regs_access_pmccext(void)
+{
+ struct event event;
+
+ SKIP_IF(!ebb_is_supported());
+
+ event_init_named(&event, 0x1001e, "cycles");
+ event_leader_ebb_init(&event);
+
+ FAIL_IF(event_open(&event));
+
+ ebb_enable_pmc_counting(1);
+ setup_ebb_handler(standard_ebb_callee);
+ ebb_global_enable();
+ FAIL_IF(ebb_event_enable(&event));
+
+ mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
+
+ while (ebb_state.stats.ebb_count < 1)
+ FAIL_IF(core_busy_loop());
+
+ ebb_global_disable();
+ event_close(&event);
+
+ FAIL_IF(ebb_state.stats.ebb_count == 0);
+
+ /*
+ * For ISA v3.1, verify the test takes a SIGILL when reading
+ * PMU regs after the event is closed. With the control bit
+ * in MMCR0 (PMCCEXT) restricting access to group B PMU regs,
+ * sigill is expected.
+ */
+ if (have_hwcap2(PPC_FEATURE2_ARCH_3_1))
+ FAIL_IF(catch_sigill(dump_ebb_state));
+ else
+ dump_ebb_state();
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(regs_access_pmccext, "regs_access_pmccext");
+}
diff --git a/tools/testing/selftests/powerpc/security/Makefile b/tools/testing/selftests/powerpc/security/Makefile
index 844d18cd5f93..7488315fd847 100644
--- a/tools/testing/selftests/powerpc/security/Makefile
+++ b/tools/testing/selftests/powerpc/security/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0+
TEST_GEN_PROGS := rfi_flush entry_flush uaccess_flush spectre_v2
+TEST_PROGS := mitigation-patching.sh
+
top_srcdir = ../../../../..
CFLAGS += -I../../../../../usr/include
diff --git a/tools/testing/selftests/powerpc/security/mitigation-patching.sh b/tools/testing/selftests/powerpc/security/mitigation-patching.sh
new file mode 100755
index 000000000000..00197acb7ff1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/security/mitigation-patching.sh
@@ -0,0 +1,75 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+TIMEOUT=10
+
+function do_one
+{
+ local mitigation="$1"
+ local orig
+ local start
+ local now
+
+ orig=$(cat "$mitigation")
+
+ start=$EPOCHSECONDS
+ now=$start
+
+ while [[ $((now-start)) -lt "$TIMEOUT" ]]
+ do
+ echo 0 > "$mitigation"
+ echo 1 > "$mitigation"
+
+ now=$EPOCHSECONDS
+ done
+
+ echo "$orig" > "$mitigation"
+}
+
+rc=0
+cd /sys/kernel/debug/powerpc || rc=1
+if [[ "$rc" -ne 0 ]]; then
+ echo "Error: couldn't cd to /sys/kernel/debug/powerpc" >&2
+ exit 1
+fi
+
+tainted=$(cat /proc/sys/kernel/tainted)
+if [[ "$tainted" -ne 0 ]]; then
+ echo "Error: kernel already tainted!" >&2
+ exit 1
+fi
+
+mitigations="barrier_nospec stf_barrier count_cache_flush rfi_flush entry_flush uaccess_flush"
+
+for m in $mitigations
+do
+ do_one "$m" &
+done
+
+echo "Spawned threads enabling/disabling mitigations ..."
+
+if stress-ng > /dev/null 2>&1; then
+ stress="stress-ng"
+elif stress > /dev/null 2>&1; then
+ stress="stress"
+else
+ stress=""
+fi
+
+if [[ -n "$stress" ]]; then
+ "$stress" -m "$(nproc)" -t "$TIMEOUT" &
+ echo "Spawned VM stressors ..."
+fi
+
+echo "Waiting for timeout ..."
+wait
+
+tainted=$(cat /proc/sys/kernel/tainted)
+if [[ "$tainted" -ne 0 ]]; then
+ echo "Error: kernel became tainted!" >&2
+ exit 1
+fi
+
+echo "OK"
+exit 0
diff --git a/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c b/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
index e2a0c07e8362..9ef37a9836ac 100644
--- a/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
+++ b/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
@@ -17,7 +17,6 @@
#include <pthread.h>
#include <sys/mman.h>
#include <unistd.h>
-#include <pthread.h>
#include "tm.h"
#include "utils.h"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-again.sh b/tools/testing/selftests/rcutorture/bin/kvm-again.sh
index 46e47a00a7db..d8c8483c46f1 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-again.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-again.sh
@@ -29,7 +29,7 @@ then
echo "Usage: $scriptname /path/to/old/run [ options ]"
exit 1
fi
-if ! cp "$oldrun/batches" $T/batches.oldrun
+if ! cp "$oldrun/scenarios" $T/scenarios.oldrun
then
# Later on, can reconstitute this from console.log files.
echo Prior run batches file does not exist: $oldrun/batches
@@ -143,6 +143,8 @@ then
usage
fi
rm -f "$rundir"/*/{console.log,console.log.diags,qemu_pid,qemu-retval,Warnings,kvm-test-1-run.sh.out,kvm-test-1-run-qemu.sh.out,vmlinux} "$rundir"/log
+touch "$rundir/log"
+echo $scriptname $args | tee -a "$rundir/log"
echo $oldrun > "$rundir/re-run"
if ! test -d "$rundir/../../bin"
then
@@ -165,22 +167,12 @@ done
grep '^#' $i | sed -e 's/^# //' > $T/qemu-cmd-settings
. $T/qemu-cmd-settings
-grep -v '^#' $T/batches.oldrun | awk '
-BEGIN {
- oldbatch = 1;
-}
-
+grep -v '^#' $T/scenarios.oldrun | awk '
{
- if (oldbatch != $1) {
- print "kvm-test-1-run-batch.sh" curbatch;
- curbatch = "";
- oldbatch = $1;
- }
- curbatch = curbatch " " $2;
-}
-
-END {
- print "kvm-test-1-run-batch.sh" curbatch
+ curbatch = "";
+ for (i = 2; i <= NF; i++)
+ curbatch = curbatch " " $i;
+ print "kvm-test-1-run-batch.sh" curbatch;
}' > $T/runbatches.sh
if test -n "$dryrun"
@@ -188,12 +180,5 @@ then
echo ---- Dryrun complete, directory: $rundir | tee -a "$rundir/log"
else
( cd "$rundir"; sh $T/runbatches.sh )
- kcsan-collapse.sh "$rundir" | tee -a "$rundir/log"
- echo | tee -a "$rundir/log"
- echo ---- Results directory: $rundir | tee -a "$rundir/log"
- kvm-recheck.sh "$rundir" > $T/kvm-recheck.sh.out 2>&1
- ret=$?
- cat $T/kvm-recheck.sh.out | tee -a "$rundir/log"
- echo " --- Done at `date` (`get_starttime_duration $starttime`) exitcode $ret" | tee -a "$rundir/log"
- exit $ret
+ kvm-end-run-stats.sh "$rundir" "$starttime"
fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index 115e1822b26f..5ad973dca820 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -40,8 +40,10 @@ if test $retval -gt 1
then
exit 2
fi
-ncpus=`cpus2use.sh`
-make -j$ncpus $TORTURE_KMAKE_ARG > $resdir/Make.out 2>&1
+
+# Tell "make" to use double the number of real CPUs on the build system.
+ncpus="`getconf _NPROCESSORS_ONLN`"
+make -j$((2 * ncpus)) $TORTURE_KMAKE_ARG > $resdir/Make.out 2>&1
retval=$?
if test $retval -ne 0 || grep "rcu[^/]*": < $resdir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $resdir/Make.out
then
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-end-run-stats.sh b/tools/testing/selftests/rcutorture/bin/kvm-end-run-stats.sh
new file mode 100755
index 000000000000..e4a00779b8c6
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-end-run-stats.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Check the status of the specified run.
+#
+# Usage: kvm-end-run-stats.sh /path/to/run starttime
+#
+# Copyright (C) 2021 Facebook, Inc.
+#
+# Authors: Paul E. McKenney <paulmck@kernel.org>
+
+# scriptname=$0
+# args="$*"
+rundir="$1"
+if ! test -d "$rundir"
+then
+ echo kvm-end-run-stats.sh: Specified run directory does not exist: $rundir
+ exit 1
+fi
+
+T=${TMPDIR-/tmp}/kvm-end-run-stats.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
+PATH=${KVM}/bin:$PATH; export PATH
+. functions.sh
+default_starttime="`get_starttime`"
+starttime="${2-default_starttime}"
+
+echo | tee -a "$rundir/log"
+echo | tee -a "$rundir/log"
+echo " --- `date` Test summary:" | tee -a "$rundir/log"
+echo Results directory: $rundir | tee -a "$rundir/log"
+kcsan-collapse.sh "$rundir" | tee -a "$rundir/log"
+kvm-recheck.sh "$rundir" > $T/kvm-recheck.sh.out 2>&1
+ret=$?
+cat $T/kvm-recheck.sh.out | tee -a "$rundir/log"
+echo " --- Done at `date` (`get_starttime_duration $starttime`) exitcode $ret" | tee -a "$rundir/log"
+exit $ret
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh
index 0670841122d8..daf64b507038 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh
@@ -43,7 +43,7 @@ then
else
echo No build errors.
fi
-if grep -q -e "--buildonly" < ${rundir}/log
+if grep -q -e "--build-\?only" < ${rundir}/log && ! test -f "${rundir}/remote-log"
then
echo Build-only run, no console logs to check.
exit $editorret
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index 1706cd4466b4..fbdf162b6acd 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -31,7 +31,7 @@ then
echo "$configfile ------- " $stopstate
else
title="$configfile ------- $ngps GPs"
- dur=`sed -e 's/^.* rcutorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`
+ dur=`grep -v '^#' $i/qemu-cmd | sed -e 's/^.* rcutorture.shutdown_secs=//' -e 's/ .*$//'`
if test -z "$dur"
then
:
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh
new file mode 100755
index 000000000000..79e680e0e7bf
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh
@@ -0,0 +1,249 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Run a series of tests on remote systems under KVM.
+#
+# Usage: kvm-remote.sh "systems" [ <kvm.sh args> ]
+# kvm-remote.sh "systems" /path/to/old/run [ <kvm-again.sh args> ]
+#
+# Copyright (C) 2021 Facebook, Inc.
+#
+# Authors: Paul E. McKenney <paulmck@kernel.org>
+
+scriptname=$0
+args="$*"
+
+if ! test -d tools/testing/selftests/rcutorture/bin
+then
+ echo $scriptname must be run from top-level directory of kernel source tree.
+ exit 1
+fi
+
+KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
+PATH=${KVM}/bin:$PATH; export PATH
+. functions.sh
+
+starttime="`get_starttime`"
+
+systems="$1"
+if test -z "$systems"
+then
+ echo $scriptname: Empty list of systems will go nowhere good, giving up.
+ exit 1
+fi
+shift
+
+# Pathnames:
+# T: /tmp/kvm-remote.sh.$$
+# resdir: /tmp/kvm-remote.sh.$$/res
+# rundir: /tmp/kvm-remote.sh.$$/res/$ds ("-remote" suffix)
+# oldrun: `pwd`/tools/testing/.../res/$otherds
+#
+# Pathname segments:
+# TD: kvm-remote.sh.$$
+# ds: yyyy.mm.dd-hh.mm.ss-remote
+
+TD=kvm-remote.sh.$$
+T=${TMPDIR-/tmp}/$TD
+trap 'rm -rf $T' 0
+mkdir $T
+
+resdir="$T/res"
+ds=`date +%Y.%m.%d-%H.%M.%S`-remote
+rundir=$resdir/$ds
+echo Results directory: $rundir
+echo $scriptname $args
+if echo $1 | grep -q '^--'
+then
+ # Fresh build. Create a datestamp unless the caller supplied one.
+ datestamp="`echo "$@" | awk -v ds="$ds" '{
+ for (i = 1; i < NF; i++) {
+ if ($i == "--datestamp") {
+ ds = "";
+ break;
+ }
+ }
+ if (ds != "")
+ print "--datestamp " ds;
+ }'`"
+ kvm.sh --remote "$@" $datestamp --buildonly > $T/kvm.sh.out 2>&1
+ ret=$?
+ if test "$ret" -ne 0
+ then
+ echo $scriptname: kvm.sh failed exit code $?
+ cat $T/kvm.sh.out
+ exit 2
+ fi
+ oldrun="`grep -m 1 "^Results directory: " $T/kvm.sh.out | awk '{ print $3 }'`"
+ touch "$oldrun/remote-log"
+ echo $scriptname $args >> "$oldrun/remote-log"
+ echo | tee -a "$oldrun/remote-log"
+ echo " ----" kvm.sh output: "(`date`)" | tee -a "$oldrun/remote-log"
+ cat $T/kvm.sh.out | tee -a "$oldrun/remote-log"
+ # We are going to run this, so remove the buildonly files.
+ rm -f "$oldrun"/*/buildonly
+ kvm-again.sh $oldrun --dryrun --remote --rundir "$rundir" > $T/kvm-again.sh.out 2>&1
+ ret=$?
+ if test "$ret" -ne 0
+ then
+ echo $scriptname: kvm-again.sh failed exit code $? | tee -a "$oldrun/remote-log"
+ cat $T/kvm-again.sh.out | tee -a "$oldrun/remote-log"
+ exit 2
+ fi
+else
+ # Re-use old run.
+ oldrun="$1"
+ if ! echo $oldrun | grep -q '^/'
+ then
+ oldrun="`pwd`/$oldrun"
+ fi
+ shift
+ touch "$oldrun/remote-log"
+ echo $scriptname $args >> "$oldrun/remote-log"
+ kvm-again.sh "$oldrun" "$@" --dryrun --remote --rundir "$rundir" > $T/kvm-again.sh.out 2>&1
+ ret=$?
+ if test "$ret" -ne 0
+ then
+ echo $scriptname: kvm-again.sh failed exit code $? | tee -a "$oldrun/remote-log"
+ cat $T/kvm-again.sh.out | tee -a "$oldrun/remote-log"
+ exit 2
+ fi
+ cp -a "$rundir" "$KVM/res/"
+ oldrun="$KVM/res/$ds"
+fi
+echo | tee -a "$oldrun/remote-log"
+echo " ----" kvm-again.sh output: "(`date`)" | tee -a "$oldrun/remote-log"
+cat $T/kvm-again.sh.out
+echo | tee -a "$oldrun/remote-log"
+echo Remote run directory: $rundir | tee -a "$oldrun/remote-log"
+echo Local build-side run directory: $oldrun | tee -a "$oldrun/remote-log"
+
+# Create the kvm-remote-N.sh scripts in the bin directory.
+awk < "$rundir"/scenarios -v dest="$T/bin" -v rundir="$rundir" '
+{
+ n = $1;
+ sub(/\./, "", n);
+ fn = dest "/kvm-remote-" n ".sh"
+ scenarios = "";
+ for (i = 2; i <= NF; i++)
+ scenarios = scenarios " " $i;
+ print "kvm-test-1-run-batch.sh" scenarios > fn;
+ print "rm " rundir "/remote.run" >> fn;
+}'
+chmod +x $T/bin/kvm-remote-*.sh
+( cd "`dirname $T`"; tar -chzf $T/binres.tgz "$TD/bin" "$TD/res" )
+
+# Check first to avoid the need for cleanup for system-name typos
+for i in $systems
+do
+ ncpus="`ssh $i getconf _NPROCESSORS_ONLN 2> /dev/null`"
+ echo $i: $ncpus CPUs " " `date` | tee -a "$oldrun/remote-log"
+ ret=$?
+ if test "$ret" -ne 0
+ then
+ echo System $i unreachable, giving up. | tee -a "$oldrun/remote-log"
+ exit 4 | tee -a "$oldrun/remote-log"
+ fi
+done
+
+# Download and expand the tarball on all systems.
+for i in $systems
+do
+ echo Downloading tarball to $i `date` | tee -a "$oldrun/remote-log"
+ cat $T/binres.tgz | ssh $i "cd /tmp; tar -xzf -"
+ ret=$?
+ if test "$ret" -ne 0
+ then
+ echo Unable to download $T/binres.tgz to system $i, giving up. | tee -a "$oldrun/remote-log"
+ exit 10 | tee -a "$oldrun/remote-log"
+ fi
+done
+
+# Function to check for presence of a file on the specified system.
+# Complain if the system cannot be reached, and retry after a wait.
+# Currently just waits forever if a machine disappears.
+#
+# Usage: checkremotefile system pathname
+checkremotefile () {
+ local ret
+ local sleeptime=60
+
+ while :
+ do
+ ssh $1 "test -f \"$2\""
+ ret=$?
+ if test "$ret" -ne 255
+ then
+ return $ret
+ fi
+ echo " ---" ssh failure to $1 checking for file $2, retry after $sleeptime seconds. `date`
+ sleep $sleeptime
+ done
+}
+
+# Function to start batches on idle remote $systems
+#
+# Usage: startbatches curbatch nbatches
+#
+# Batches are numbered starting at 1. Returns the next batch to start.
+# Be careful to redirect all debug output to FD 2 (stderr).
+startbatches () {
+ local curbatch="$1"
+ local nbatches="$2"
+ local ret
+
+ # Each pass through the following loop examines one system.
+ for i in $systems
+ do
+ if test "$curbatch" -gt "$nbatches"
+ then
+ echo $((nbatches + 1))
+ return 0
+ fi
+ if checkremotefile "$i" "$resdir/$ds/remote.run" 1>&2
+ then
+ continue # System still running last test, skip.
+ fi
+ ssh "$i" "cd \"$resdir/$ds\"; touch remote.run; PATH=\"$T/bin:$PATH\" nohup kvm-remote-$curbatch.sh > kvm-remote-$curbatch.sh.out 2>&1 &" 1>&2
+ ret=$?
+ if test "$ret" -ne 0
+ then
+ echo ssh $i failed: exitcode $ret 1>&2
+ exit 11
+ fi
+ echo " ----" System $i Batch `head -n $curbatch < "$rundir"/scenarios | tail -1` `date` 1>&2
+ curbatch=$((curbatch + 1))
+ done
+ echo $curbatch
+}
+
+# Launch all the scenarios.
+nbatches="`wc -l "$rundir"/scenarios | awk '{ print $1 }'`"
+curbatch=1
+while test "$curbatch" -le "$nbatches"
+do
+ startbatches $curbatch $nbatches > $T/curbatch 2> $T/startbatches.stderr
+ curbatch="`cat $T/curbatch`"
+ if test -s "$T/startbatches.stderr"
+ then
+ cat "$T/startbatches.stderr" | tee -a "$oldrun/remote-log"
+ fi
+ if test "$curbatch" -le "$nbatches"
+ then
+ sleep 30
+ fi
+done
+echo All batches started. `date`
+
+# Wait for all remaining scenarios to complete and collect results.
+for i in $systems
+do
+ while checkremotefile "$i" "$resdir/$ds/remote.run"
+ do
+ sleep 30
+ done
+ ( cd "$oldrun"; ssh $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu_pid */qemu-retval; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
+done
+
+( kvm-end-run-stats.sh "$oldrun" "$starttime"; echo $? > $T/exitcode ) | tee -a "$oldrun/remote-log"
+exit "`cat $T/exitcode`"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 6bf00a003d3d..b4ac4ee33222 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -20,6 +20,9 @@ mkdir $T
cd `dirname $scriptname`/../../../../../
+# This script knows only English.
+LANG=en_US.UTF-8; export LANG
+
dur=$((30*60))
dryrun=""
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
@@ -41,6 +44,7 @@ TORTURE_KCONFIG_KASAN_ARG=""
TORTURE_KCONFIG_KCSAN_ARG=""
TORTURE_KMAKE_ARG=""
TORTURE_QEMU_MEM=512
+TORTURE_REMOTE=
TORTURE_SHUTDOWN_GRACE=180
TORTURE_SUITE=rcu
TORTURE_MOD=rcutorture
@@ -64,7 +68,7 @@ usage () {
echo " --cpus N"
echo " --datestamp string"
echo " --defconfig string"
- echo " --dryrun batches|sched|script"
+ echo " --dryrun batches|scenarios|sched|script"
echo " --duration minutes | <seconds>s | <hours>h | <days>d"
echo " --gdb"
echo " --help"
@@ -77,6 +81,7 @@ usage () {
echo " --no-initrd"
echo " --qemu-args qemu-arguments"
echo " --qemu-cmd qemu-system-..."
+ echo " --remote"
echo " --results absolute-pathname"
echo " --torture lock|rcu|rcuscale|refscale|scf"
echo " --trust-make"
@@ -112,10 +117,13 @@ do
checkarg --cpus "(number)" "$#" "$2" '^[0-9]*$' '^--'
cpus=$2
TORTURE_ALLOTED_CPUS="$2"
- max_cpus="`identify_qemu_vcpus`"
- if test "$TORTURE_ALLOTED_CPUS" -gt "$max_cpus"
+ if test -z "$TORTURE_REMOTE"
then
- TORTURE_ALLOTED_CPUS=$max_cpus
+ max_cpus="`identify_qemu_vcpus`"
+ if test "$TORTURE_ALLOTED_CPUS" -gt "$max_cpus"
+ then
+ TORTURE_ALLOTED_CPUS=$max_cpus
+ fi
fi
shift
;;
@@ -130,7 +138,7 @@ do
shift
;;
--dryrun)
- checkarg --dryrun "batches|sched|script" $# "$2" 'batches\|sched\|script' '^--'
+ checkarg --dryrun "batches|sched|script" $# "$2" 'batches\|scenarios\|sched\|script' '^--'
dryrun=$2
shift
;;
@@ -206,6 +214,9 @@ do
TORTURE_QEMU_CMD="$2"
shift
;;
+ --remote)
+ TORTURE_REMOTE=1
+ ;;
--results)
checkarg --results "(absolute pathname)" "$#" "$2" '^/' '^error'
resdir=$2
@@ -550,20 +561,7 @@ END {
if (ncpus != 0)
dump(first, i, batchnum);
}' >> $T/script
-
-cat << '___EOF___' >> $T/script
-echo | tee -a $TORTURE_RESDIR/log
-echo | tee -a $TORTURE_RESDIR/log
-echo " --- `date` Test summary:" | tee -a $TORTURE_RESDIR/log
-___EOF___
-cat << ___EOF___ >> $T/script
-echo Results directory: $resdir/$ds | tee -a $resdir/$ds/log
-kcsan-collapse.sh $resdir/$ds | tee -a $resdir/$ds/log
-kvm-recheck.sh $resdir/$ds > $T/kvm-recheck.sh.out 2>&1
-___EOF___
-echo 'ret=$?' >> $T/script
-echo "cat $T/kvm-recheck.sh.out | tee -a $resdir/$ds/log" >> $T/script
-echo 'exit $ret' >> $T/script
+echo kvm-end-run-stats.sh "$resdir/$ds" "$starttime" >> $T/script
# Extract the tests and their batches from the script.
egrep 'Start batch|Starting build\.' $T/script | grep -v ">>" |
@@ -577,6 +575,25 @@ egrep 'Start batch|Starting build\.' $T/script | grep -v ">>" |
print batchno, $1, $2
}' > $T/batches
+# As above, but one line per batch.
+grep -v '^#' $T/batches | awk '
+BEGIN {
+ oldbatch = 1;
+}
+
+{
+ if (oldbatch != $1) {
+ print ++n ". " curbatch;
+ curbatch = "";
+ oldbatch = $1;
+ }
+ curbatch = curbatch " " $2;
+}
+
+END {
+ print ++n ". " curbatch;
+}' > $T/scenarios
+
if test "$dryrun" = script
then
cat $T/script
@@ -597,13 +614,17 @@ elif test "$dryrun" = batches
then
cat $T/batches
exit 0
+elif test "$dryrun" = scenarios
+then
+ cat $T/scenarios
+ exit 0
else
# Not a dryrun. Record the batches and the number of CPUs, then run the script.
bash $T/script
ret=$?
cp $T/batches $resdir/$ds/batches
+ cp $T/scenarios $resdir/$ds/scenarios
echo '#' cpus=$cpus >> $resdir/$ds/batches
- echo " --- Done at `date` (`get_starttime_duration $starttime`) exitcode $ret" | tee -a $resdir/$ds/log
exit $ret
fi
diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh
index 56e2e1a42569..53ec7c046262 100755
--- a/tools/testing/selftests/rcutorture/bin/torture.sh
+++ b/tools/testing/selftests/rcutorture/bin/torture.sh
@@ -302,7 +302,7 @@ function torture_set {
kcsan_kmake_tag="--kmake-args"
cur_kcsan_kmake_args="$kcsan_kmake_args"
fi
- torture_one $* --kconfig "CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y" $kcsan_kmake_tag $cur_kcsan_kmake_args --kcsan
+ torture_one "$@" --kconfig "CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y" $kcsan_kmake_tag $cur_kcsan_kmake_args --kcsan
fi
}
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST
new file mode 100644
index 000000000000..22d598f9cabe
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST
@@ -0,0 +1,17 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=16
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_PREEMPT_RCU=y
+CONFIG_HZ_PERIODIC=y
+CONFIG_NO_HZ_IDLE=n
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_TRACE=y
+CONFIG_HOTPLUG_CPU=y
+CONFIG_RCU_FANOUT=2
+CONFIG_RCU_FANOUT_LEAF=2
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot
new file mode 100644
index 000000000000..f57720c52c0f
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot
@@ -0,0 +1,8 @@
+rcutorture.test_boost=2
+rcutorture.stutter=0
+rcutree.gp_preinit_delay=12
+rcutree.gp_init_delay=3
+rcutree.gp_cleanup_delay=3
+rcutree.kthread_prio=2
+threadirqs
+tree.use_softirq=0
diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/TREE b/tools/testing/selftests/rcutorture/configs/rcuscale/TREE
index 721cfda76ab2..4cc1cc581321 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuscale/TREE
+++ b/tools/testing/selftests/rcutorture/configs/rcuscale/TREE
@@ -7,7 +7,7 @@ CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
CONFIG_RCU_FAST_NO_HZ=n
-CONFIG_HOTPLUG_CPU=n
+CONFIG_HOTPLUG_CPU=y
CONFIG_SUSPEND=n
CONFIG_HIBERNATION=n
CONFIG_RCU_NOCB_CPU=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/TREE54 b/tools/testing/selftests/rcutorture/configs/rcuscale/TREE54
index 7629f5dd73b2..f5952061fde7 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuscale/TREE54
+++ b/tools/testing/selftests/rcutorture/configs/rcuscale/TREE54
@@ -8,7 +8,7 @@ CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
CONFIG_RCU_FAST_NO_HZ=n
-CONFIG_HOTPLUG_CPU=n
+CONFIG_HOTPLUG_CPU=y
CONFIG_SUSPEND=n
CONFIG_HIBERNATION=n
CONFIG_RCU_FANOUT=3
diff --git a/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT b/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT
index 1cd25b7314e3..ad505a887bec 100644
--- a/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT
+++ b/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT
@@ -7,7 +7,7 @@ CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
CONFIG_RCU_FAST_NO_HZ=n
-CONFIG_HOTPLUG_CPU=n
+CONFIG_HOTPLUG_CPU=y
CONFIG_SUSPEND=n
CONFIG_HIBERNATION=n
CONFIG_RCU_NOCB_CPU=n
diff --git a/tools/testing/selftests/rcutorture/configs/refscale/PREEMPT b/tools/testing/selftests/rcutorture/configs/refscale/PREEMPT
index d10bc694f42c..4f08e641bb6b 100644
--- a/tools/testing/selftests/rcutorture/configs/refscale/PREEMPT
+++ b/tools/testing/selftests/rcutorture/configs/refscale/PREEMPT
@@ -7,7 +7,7 @@ CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
CONFIG_RCU_FAST_NO_HZ=n
-CONFIG_HOTPLUG_CPU=n
+CONFIG_HOTPLUG_CPU=y
CONFIG_SUSPEND=n
CONFIG_HIBERNATION=n
CONFIG_RCU_NOCB_CPU=n
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h
index cf6938d679d7..1e24827f96f1 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h
@@ -174,7 +174,7 @@ static inline bool spin_trylock(spinlock_t *lock)
}
struct completion {
- /* Hopefuly this won't overflow. */
+ /* Hopefully this won't overflow. */
unsigned int count;
};
diff --git a/tools/testing/selftests/resctrl/README b/tools/testing/selftests/resctrl/README
index 4b36b25b6ac0..3d2bbd4fa3aa 100644
--- a/tools/testing/selftests/resctrl/README
+++ b/tools/testing/selftests/resctrl/README
@@ -47,7 +47,7 @@ Parameter '-h' shows usage information.
usage: resctrl_tests [-h] [-b "benchmark_cmd [options]"] [-t test list] [-n no_of_bits]
-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CMT default benchmark is builtin fill_buf
- -t test list: run tests specified in the test list, e.g. -t mbm, mba, cmt, cat
+ -t test list: run tests specified in the test list, e.g. -t mbm,mba,cmt,cat
-n no_of_bits: run cache tests using specified no of bits in cache bit mask
-p cpu_no: specify CPU number to run the test. 1 is default
-h: help
diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
index f51b5fc066a3..973f09a66e1e 100644
--- a/tools/testing/selftests/resctrl/resctrl_tests.c
+++ b/tools/testing/selftests/resctrl/resctrl_tests.c
@@ -40,7 +40,7 @@ static void cmd_help(void)
printf("\t-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CMT\n");
printf("\t default benchmark is builtin fill_buf\n");
printf("\t-t test list: run tests specified in the test list, ");
- printf("e.g. -t mbm, mba, cmt, cat\n");
+ printf("e.g. -t mbm,mba,cmt,cat\n");
printf("\t-n no_of_bits: run cache tests using specified no of bits in cache bit mask\n");
printf("\t-p cpu_no: specify CPU number to run the test. 1 is default\n");
printf("\t-h: help\n");
@@ -173,7 +173,7 @@ int main(int argc, char **argv)
return -1;
}
- token = strtok(NULL, ":\t");
+ token = strtok(NULL, ",");
}
break;
case 'p':
diff --git a/tools/testing/selftests/sgx/call.S b/tools/testing/selftests/sgx/call.S
index 4ecadc7490f4..b09a25890f3b 100644
--- a/tools/testing/selftests/sgx/call.S
+++ b/tools/testing/selftests/sgx/call.S
@@ -5,8 +5,8 @@
.text
- .global sgx_call_vdso
-sgx_call_vdso:
+ .global sgx_enter_enclave
+sgx_enter_enclave:
.cfi_startproc
push %r15
.cfi_adjust_cfa_offset 8
@@ -27,7 +27,7 @@ sgx_call_vdso:
.cfi_adjust_cfa_offset 8
push 0x38(%rsp)
.cfi_adjust_cfa_offset 8
- call *eenter(%rip)
+ call *vdso_sgx_enter_enclave(%rip)
add $0x10, %rsp
.cfi_adjust_cfa_offset -0x10
pop %rbx
diff --git a/tools/testing/selftests/sgx/defines.h b/tools/testing/selftests/sgx/defines.h
index 0bd73428d2f3..f88562afcaa0 100644
--- a/tools/testing/selftests/sgx/defines.h
+++ b/tools/testing/selftests/sgx/defines.h
@@ -18,4 +18,14 @@
#include "../../../../arch/x86/include/asm/enclu.h"
#include "../../../../arch/x86/include/uapi/asm/sgx.h"
+enum encl_op_type {
+ ENCL_OP_PUT,
+ ENCL_OP_GET,
+};
+
+struct encl_op {
+ uint64_t type;
+ uint64_t buffer;
+};
+
#endif /* DEFINES_H */
diff --git a/tools/testing/selftests/sgx/load.c b/tools/testing/selftests/sgx/load.c
index f441ac34b4d4..3ebe5d1fe337 100644
--- a/tools/testing/selftests/sgx/load.c
+++ b/tools/testing/selftests/sgx/load.c
@@ -150,16 +150,6 @@ bool encl_load(const char *path, struct encl *encl)
goto err;
}
- /*
- * This just checks if the /dev file has these permission
- * bits set. It does not check that the current user is
- * the owner or in the owning group.
- */
- if (!(sb.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
- fprintf(stderr, "no execute permissions on device file %s\n", device_path);
- goto err;
- }
-
ptr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
if (ptr == (void *)-1) {
perror("mmap for read");
@@ -169,13 +159,13 @@ bool encl_load(const char *path, struct encl *encl)
#define ERR_MSG \
"mmap() succeeded for PROT_READ, but failed for PROT_EXEC.\n" \
-" Check that current user has execute permissions on %s and \n" \
-" that /dev does not have noexec set: mount | grep \"/dev .*noexec\"\n" \
+" Check that /dev does not have noexec set:\n" \
+" \tmount | grep \"/dev .*noexec\"\n" \
" If so, remount it executable: mount -o remount,exec /dev\n\n"
ptr = mmap(NULL, PAGE_SIZE, PROT_EXEC, MAP_SHARED, fd, 0);
if (ptr == (void *)-1) {
- fprintf(stderr, ERR_MSG, device_path);
+ fprintf(stderr, ERR_MSG);
goto err;
}
munmap(ptr, PAGE_SIZE);
@@ -239,9 +229,6 @@ bool encl_load(const char *path, struct encl *encl)
seg->offset = (phdr->p_offset & PAGE_MASK) - src_offset;
seg->size = (phdr->p_filesz + PAGE_SIZE - 1) & PAGE_MASK;
- printf("0x%016lx 0x%016lx 0x%02x\n", seg->offset, seg->size,
- seg->prot);
-
j++;
}
diff --git a/tools/testing/selftests/sgx/main.c b/tools/testing/selftests/sgx/main.c
index d304a4044eb9..e252015e0c15 100644
--- a/tools/testing/selftests/sgx/main.c
+++ b/tools/testing/selftests/sgx/main.c
@@ -17,11 +17,11 @@
#include <sys/types.h>
#include <sys/auxv.h>
#include "defines.h"
+#include "../kselftest_harness.h"
#include "main.h"
-#include "../kselftest.h"
static const uint64_t MAGIC = 0x1122334455667788ULL;
-vdso_sgx_enter_enclave_t eenter;
+vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
struct vdso_symtab {
Elf64_Sym *elf_symtab;
@@ -107,85 +107,51 @@ static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name)
return NULL;
}
-bool report_results(struct sgx_enclave_run *run, int ret, uint64_t result,
- const char *test)
-{
- bool valid = true;
-
- if (ret) {
- printf("FAIL: %s() returned: %d\n", test, ret);
- valid = false;
- }
-
- if (run->function != EEXIT) {
- printf("FAIL: %s() function, expected: %u, got: %u\n", test, EEXIT,
- run->function);
- valid = false;
- }
-
- if (result != MAGIC) {
- printf("FAIL: %s(), expected: 0x%lx, got: 0x%lx\n", test, MAGIC,
- result);
- valid = false;
- }
-
- if (run->user_data) {
- printf("FAIL: %s() user data, expected: 0x0, got: 0x%llx\n",
- test, run->user_data);
- valid = false;
- }
-
- return valid;
-}
-
-static int user_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9,
- struct sgx_enclave_run *run)
-{
- run->user_data = 0;
- return 0;
-}
+FIXTURE(enclave) {
+ struct encl encl;
+ struct sgx_enclave_run run;
+};
-int main(int argc, char *argv[])
+FIXTURE_SETUP(enclave)
{
- struct sgx_enclave_run run;
+ Elf64_Sym *sgx_enter_enclave_sym = NULL;
struct vdso_symtab symtab;
- Elf64_Sym *eenter_sym;
- uint64_t result = 0;
- struct encl encl;
+ struct encl_segment *seg;
+ char maps_line[256];
+ FILE *maps_file;
unsigned int i;
void *addr;
- int ret;
- memset(&run, 0, sizeof(run));
-
- if (!encl_load("test_encl.elf", &encl)) {
- encl_delete(&encl);
+ if (!encl_load("test_encl.elf", &self->encl)) {
+ encl_delete(&self->encl);
ksft_exit_skip("cannot load enclaves\n");
}
- if (!encl_measure(&encl))
+ for (i = 0; i < self->encl.nr_segments; i++) {
+ seg = &self->encl.segment_tbl[i];
+
+ TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot);
+ }
+
+ if (!encl_measure(&self->encl))
goto err;
- if (!encl_build(&encl))
+ if (!encl_build(&self->encl))
goto err;
/*
* An enclave consumer only must do this.
*/
- for (i = 0; i < encl.nr_segments; i++) {
- struct encl_segment *seg = &encl.segment_tbl[i];
-
- addr = mmap((void *)encl.encl_base + seg->offset, seg->size,
- seg->prot, MAP_SHARED | MAP_FIXED, encl.fd, 0);
- if (addr == MAP_FAILED) {
- perror("mmap() segment failed");
- exit(KSFT_FAIL);
- }
+ for (i = 0; i < self->encl.nr_segments; i++) {
+ struct encl_segment *seg = &self->encl.segment_tbl[i];
+
+ addr = mmap((void *)self->encl.encl_base + seg->offset, seg->size,
+ seg->prot, MAP_SHARED | MAP_FIXED, self->encl.fd, 0);
+ EXPECT_NE(addr, MAP_FAILED);
+ if (addr == MAP_FAILED)
+ goto err;
}
- memset(&run, 0, sizeof(run));
- run.tcs = encl.encl_base;
-
/* Get vDSO base address */
addr = (void *)getauxval(AT_SYSINFO_EHDR);
if (!addr)
@@ -194,37 +160,134 @@ int main(int argc, char *argv[])
if (!vdso_get_symtab(addr, &symtab))
goto err;
- eenter_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave");
- if (!eenter_sym)
+ sgx_enter_enclave_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave");
+ if (!sgx_enter_enclave_sym)
goto err;
- eenter = addr + eenter_sym->st_value;
-
- ret = sgx_call_vdso((void *)&MAGIC, &result, 0, EENTER, NULL, NULL, &run);
- if (!report_results(&run, ret, result, "sgx_call_vdso"))
- goto err;
+ vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value;
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
- /* Invoke the vDSO directly. */
- result = 0;
- ret = eenter((unsigned long)&MAGIC, (unsigned long)&result, 0, EENTER,
- 0, 0, &run);
- if (!report_results(&run, ret, result, "eenter"))
- goto err;
+ maps_file = fopen("/proc/self/maps", "r");
+ if (maps_file != NULL) {
+ while (fgets(maps_line, sizeof(maps_line), maps_file) != NULL) {
+ maps_line[strlen(maps_line) - 1] = '\0';
- /* And with an exit handler. */
- run.user_handler = (__u64)user_handler;
- run.user_data = 0xdeadbeef;
- ret = eenter((unsigned long)&MAGIC, (unsigned long)&result, 0, EENTER,
- 0, 0, &run);
- if (!report_results(&run, ret, result, "user_handler"))
- goto err;
+ if (strstr(maps_line, "/dev/sgx_enclave"))
+ TH_LOG("%s", maps_line);
+ }
- printf("SUCCESS\n");
- encl_delete(&encl);
- exit(KSFT_PASS);
+ fclose(maps_file);
+ }
err:
- encl_delete(&encl);
- exit(KSFT_FAIL);
+ if (!sgx_enter_enclave_sym)
+ encl_delete(&self->encl);
+
+ ASSERT_NE(sgx_enter_enclave_sym, NULL);
+}
+
+FIXTURE_TEARDOWN(enclave)
+{
+ encl_delete(&self->encl);
+}
+
+#define ENCL_CALL(op, run, clobbered) \
+ ({ \
+ int ret; \
+ if ((clobbered)) \
+ ret = vdso_sgx_enter_enclave((unsigned long)(op), 0, 0, \
+ EENTER, 0, 0, (run)); \
+ else \
+ ret = sgx_enter_enclave((void *)(op), NULL, 0, EENTER, NULL, NULL, \
+ (run)); \
+ ret; \
+ })
+
+#define EXPECT_EEXIT(run) \
+ do { \
+ EXPECT_EQ((run)->function, EEXIT); \
+ if ((run)->function != EEXIT) \
+ TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \
+ (run)->exception_error_code, (run)->exception_addr); \
+ } while (0)
+
+TEST_F(enclave, unclobbered_vdso)
+{
+ struct encl_op op;
+
+ op.type = ENCL_OP_PUT;
+ op.buffer = MAGIC;
+
+ EXPECT_EQ(ENCL_CALL(&op, &self->run, false), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.user_data, 0);
+
+ op.type = ENCL_OP_GET;
+ op.buffer = 0;
+
+ EXPECT_EQ(ENCL_CALL(&op, &self->run, false), 0);
+
+ EXPECT_EQ(op.buffer, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.user_data, 0);
+}
+
+TEST_F(enclave, clobbered_vdso)
+{
+ struct encl_op op;
+
+ op.type = ENCL_OP_PUT;
+ op.buffer = MAGIC;
+
+ EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.user_data, 0);
+
+ op.type = ENCL_OP_GET;
+ op.buffer = 0;
+
+ EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
+
+ EXPECT_EQ(op.buffer, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.user_data, 0);
}
+
+static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9,
+ struct sgx_enclave_run *run)
+{
+ run->user_data = 0;
+
+ return 0;
+}
+
+TEST_F(enclave, clobbered_vdso_and_user_function)
+{
+ struct encl_op op;
+
+ self->run.user_handler = (__u64)test_handler;
+ self->run.user_data = 0xdeadbeef;
+
+ op.type = ENCL_OP_PUT;
+ op.buffer = MAGIC;
+
+ EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.user_data, 0);
+
+ op.type = ENCL_OP_GET;
+ op.buffer = 0;
+
+ EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
+
+ EXPECT_EQ(op.buffer, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.user_data, 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/sgx/main.h b/tools/testing/selftests/sgx/main.h
index 67211a708f04..68672fd86cf9 100644
--- a/tools/testing/selftests/sgx/main.h
+++ b/tools/testing/selftests/sgx/main.h
@@ -35,7 +35,7 @@ bool encl_load(const char *path, struct encl *encl);
bool encl_measure(struct encl *encl);
bool encl_build(struct encl *encl);
-int sgx_call_vdso(void *rdi, void *rsi, long rdx, u32 function, void *r8, void *r9,
- struct sgx_enclave_run *run);
+int sgx_enter_enclave(void *rdi, void *rsi, long rdx, u32 function, void *r8, void *r9,
+ struct sgx_enclave_run *run);
#endif /* MAIN_H */
diff --git a/tools/testing/selftests/sgx/sigstruct.c b/tools/testing/selftests/sgx/sigstruct.c
index dee7a3d6c5a5..92bbc5a15c39 100644
--- a/tools/testing/selftests/sgx/sigstruct.c
+++ b/tools/testing/selftests/sgx/sigstruct.c
@@ -55,10 +55,27 @@ static bool alloc_q1q2_ctx(const uint8_t *s, const uint8_t *m,
return true;
}
+static void reverse_bytes(void *data, int length)
+{
+ int i = 0;
+ int j = length - 1;
+ uint8_t temp;
+ uint8_t *ptr = data;
+
+ while (i < j) {
+ temp = ptr[i];
+ ptr[i] = ptr[j];
+ ptr[j] = temp;
+ i++;
+ j--;
+ }
+}
+
static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1,
uint8_t *q2)
{
struct q1q2_ctx ctx;
+ int len;
if (!alloc_q1q2_ctx(s, m, &ctx)) {
fprintf(stderr, "Not enough memory for Q1Q2 calculation\n");
@@ -89,8 +106,10 @@ static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1,
goto out;
}
- BN_bn2bin(ctx.q1, q1);
- BN_bn2bin(ctx.q2, q2);
+ len = BN_bn2bin(ctx.q1, q1);
+ reverse_bytes(q1, len);
+ len = BN_bn2bin(ctx.q2, q2);
+ reverse_bytes(q2, len);
free_q1q2_ctx(&ctx);
return true;
@@ -152,22 +171,6 @@ static RSA *gen_sign_key(void)
return key;
}
-static void reverse_bytes(void *data, int length)
-{
- int i = 0;
- int j = length - 1;
- uint8_t temp;
- uint8_t *ptr = data;
-
- while (i < j) {
- temp = ptr[i];
- ptr[i] = ptr[j];
- ptr[j] = temp;
- i++;
- j--;
- }
-}
-
enum mrtags {
MRECREATE = 0x0045544145524345,
MREADD = 0x0000000044444145,
@@ -367,8 +370,6 @@ bool encl_measure(struct encl *encl)
/* BE -> LE */
reverse_bytes(sigstruct->signature, SGX_MODULUS_SIZE);
reverse_bytes(sigstruct->modulus, SGX_MODULUS_SIZE);
- reverse_bytes(sigstruct->q1, SGX_MODULUS_SIZE);
- reverse_bytes(sigstruct->q2, SGX_MODULUS_SIZE);
EVP_MD_CTX_destroy(ctx);
RSA_free(key);
diff --git a/tools/testing/selftests/sgx/test_encl.c b/tools/testing/selftests/sgx/test_encl.c
index cf25b5dc1e03..734ea52f9924 100644
--- a/tools/testing/selftests/sgx/test_encl.c
+++ b/tools/testing/selftests/sgx/test_encl.c
@@ -4,6 +4,8 @@
#include <stddef.h>
#include "defines.h"
+static uint8_t encl_buffer[8192] = { 1 };
+
static void *memcpy(void *dest, const void *src, size_t n)
{
size_t i;
@@ -14,7 +16,20 @@ static void *memcpy(void *dest, const void *src, size_t n)
return dest;
}
-void encl_body(void *rdi, void *rsi)
+void encl_body(void *rdi, void *rsi)
{
- memcpy(rsi, rdi, 8);
+ struct encl_op *op = (struct encl_op *)rdi;
+
+ switch (op->type) {
+ case ENCL_OP_PUT:
+ memcpy(&encl_buffer[0], &op->buffer, 8);
+ break;
+
+ case ENCL_OP_GET:
+ memcpy(&op->buffer, &encl_buffer[0], 8);
+ break;
+
+ default:
+ break;
+ }
}
diff --git a/tools/testing/selftests/sgx/test_encl.lds b/tools/testing/selftests/sgx/test_encl.lds
index 0fbbda7e665e..a1ec64f7d91f 100644
--- a/tools/testing/selftests/sgx/test_encl.lds
+++ b/tools/testing/selftests/sgx/test_encl.lds
@@ -18,9 +18,10 @@ SECTIONS
.text : {
*(.text*)
*(.rodata*)
+ FILL(0xDEADBEEF);
+ . = ALIGN(4096);
} : text
- . = ALIGN(4096);
.data : {
*(.data*)
} : data
diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c
index 8934a3766d20..c53b070755b6 100644
--- a/tools/testing/selftests/sigaltstack/sas.c
+++ b/tools/testing/selftests/sigaltstack/sas.c
@@ -17,6 +17,7 @@
#include <string.h>
#include <assert.h>
#include <errno.h>
+#include <sys/auxv.h>
#include "../kselftest.h"
@@ -24,6 +25,11 @@
#define SS_AUTODISARM (1U << 31)
#endif
+#ifndef AT_MINSIGSTKSZ
+#define AT_MINSIGSTKSZ 51
+#endif
+
+static unsigned int stack_size;
static void *sstack, *ustack;
static ucontext_t uc, sc;
static const char *msg = "[OK]\tStack preserved";
@@ -47,7 +53,7 @@ void my_usr1(int sig, siginfo_t *si, void *u)
#endif
if (sp < (unsigned long)sstack ||
- sp >= (unsigned long)sstack + SIGSTKSZ) {
+ sp >= (unsigned long)sstack + stack_size) {
ksft_exit_fail_msg("SP is not on sigaltstack\n");
}
/* put some data on stack. other sighandler will try to overwrite it */
@@ -108,6 +114,10 @@ int main(void)
stack_t stk;
int err;
+ /* Make sure more than the required minimum. */
+ stack_size = getauxval(AT_MINSIGSTKSZ) + SIGSTKSZ;
+ ksft_print_msg("[NOTE]\tthe stack size is %lu\n", stack_size);
+
ksft_print_header();
ksft_set_plan(3);
@@ -117,7 +127,7 @@ int main(void)
sigaction(SIGUSR1, &act, NULL);
act.sa_sigaction = my_usr2;
sigaction(SIGUSR2, &act, NULL);
- sstack = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
+ sstack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
if (sstack == MAP_FAILED) {
ksft_exit_fail_msg("mmap() - %s\n", strerror(errno));
@@ -139,7 +149,7 @@ int main(void)
}
stk.ss_sp = sstack;
- stk.ss_size = SIGSTKSZ;
+ stk.ss_size = stack_size;
stk.ss_flags = SS_ONSTACK | SS_AUTODISARM;
err = sigaltstack(&stk, NULL);
if (err) {
@@ -161,7 +171,7 @@ int main(void)
}
}
- ustack = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
+ ustack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
if (ustack == MAP_FAILED) {
ksft_exit_fail_msg("mmap() - %s\n", strerror(errno));
@@ -170,7 +180,7 @@ int main(void)
getcontext(&uc);
uc.uc_link = NULL;
uc.uc_stack.ss_sp = ustack;
- uc.uc_stack.ss_size = SIGSTKSZ;
+ uc.uc_stack.ss_size = stack_size;
makecontext(&uc, switch_fn, 0);
raise(SIGUSR1);
diff --git a/tools/testing/selftests/splice/short_splice_read.sh b/tools/testing/selftests/splice/short_splice_read.sh
index 7810d3589d9a..22b6c8910b18 100755
--- a/tools/testing/selftests/splice/short_splice_read.sh
+++ b/tools/testing/selftests/splice/short_splice_read.sh
@@ -1,21 +1,87 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
+#
+# Test for mishandling of splice() on pseudofilesystems, which should catch
+# bugs like 11990a5bd7e5 ("module: Correctly truncate sysfs sections output")
+#
+# Since splice fallback was removed as part of the set_fs() rework, many of these
+# tests expect to fail now. See https://lore.kernel.org/lkml/202009181443.C2179FB@keescook/
set -e
+DIR=$(dirname "$0")
+
ret=0
+expect_success()
+{
+ title="$1"
+ shift
+
+ echo "" >&2
+ echo "$title ..." >&2
+
+ set +e
+ "$@"
+ rc=$?
+ set -e
+
+ case "$rc" in
+ 0)
+ echo "ok: $title succeeded" >&2
+ ;;
+ 1)
+ echo "FAIL: $title should work" >&2
+ ret=$(( ret + 1 ))
+ ;;
+ *)
+ echo "FAIL: something else went wrong" >&2
+ ret=$(( ret + 1 ))
+ ;;
+ esac
+}
+
+expect_failure()
+{
+ title="$1"
+ shift
+
+ echo "" >&2
+ echo "$title ..." >&2
+
+ set +e
+ "$@"
+ rc=$?
+ set -e
+
+ case "$rc" in
+ 0)
+ echo "FAIL: $title unexpectedly worked" >&2
+ ret=$(( ret + 1 ))
+ ;;
+ 1)
+ echo "ok: $title correctly failed" >&2
+ ;;
+ *)
+ echo "FAIL: something else went wrong" >&2
+ ret=$(( ret + 1 ))
+ ;;
+ esac
+}
+
do_splice()
{
filename="$1"
bytes="$2"
expected="$3"
+ report="$4"
- out=$(./splice_read "$filename" "$bytes" | cat)
+ out=$("$DIR"/splice_read "$filename" "$bytes" | cat)
if [ "$out" = "$expected" ] ; then
- echo "ok: $filename $bytes"
+ echo " matched $report" >&2
+ return 0
else
- echo "FAIL: $filename $bytes"
- ret=1
+ echo " no match: '$out' vs $report" >&2
+ return 1
fi
}
@@ -23,34 +89,45 @@ test_splice()
{
filename="$1"
+ echo " checking $filename ..." >&2
+
full=$(cat "$filename")
+ rc=$?
+ if [ $rc -ne 0 ] ; then
+ return 2
+ fi
+
two=$(echo "$full" | grep -m1 . | cut -c-2)
# Make sure full splice has the same contents as a standard read.
- do_splice "$filename" 4096 "$full"
+ echo " splicing 4096 bytes ..." >&2
+ if ! do_splice "$filename" 4096 "$full" "full read" ; then
+ return 1
+ fi
# Make sure a partial splice see the first two characters.
- do_splice "$filename" 2 "$two"
+ echo " splicing 2 bytes ..." >&2
+ if ! do_splice "$filename" 2 "$two" "'$two'" ; then
+ return 1
+ fi
+
+ return 0
}
-# proc_single_open(), seq_read()
-test_splice /proc/$$/limits
-# special open, seq_read()
-test_splice /proc/$$/comm
+### /proc/$pid/ has no splice interface; these should all fail.
+expect_failure "proc_single_open(), seq_read() splice" test_splice /proc/$$/limits
+expect_failure "special open(), seq_read() splice" test_splice /proc/$$/comm
-# proc_handler, proc_dointvec_minmax
-test_splice /proc/sys/fs/nr_open
-# proc_handler, proc_dostring
-test_splice /proc/sys/kernel/modprobe
-# proc_handler, special read
-test_splice /proc/sys/kernel/version
+### /proc/sys/ has a splice interface; these should all succeed.
+expect_success "proc_handler: proc_dointvec_minmax() splice" test_splice /proc/sys/fs/nr_open
+expect_success "proc_handler: proc_dostring() splice" test_splice /proc/sys/kernel/modprobe
+expect_success "proc_handler: special read splice" test_splice /proc/sys/kernel/version
+### /sys/ has no splice interface; these should all fail.
if ! [ -d /sys/module/test_module/sections ] ; then
- modprobe test_module
+ expect_success "test_module kernel module load" modprobe test_module
fi
-# kernfs, attr
-test_splice /sys/module/test_module/coresize
-# kernfs, binattr
-test_splice /sys/module/test_module/sections/.init.text
+expect_failure "kernfs attr splice" test_splice /sys/module/test_module/coresize
+expect_failure "kernfs binattr splice" test_splice /sys/module/test_module/sections/.init.text
exit $ret
diff --git a/tools/testing/selftests/timers/rtcpie.c b/tools/testing/selftests/timers/rtcpie.c
index 47b5bad1b393..4ef2184f1558 100644
--- a/tools/testing/selftests/timers/rtcpie.c
+++ b/tools/testing/selftests/timers/rtcpie.c
@@ -18,6 +18,8 @@
#include <stdlib.h>
#include <errno.h>
+#include "../kselftest.h"
+
/*
* This expects the new RTC class driver framework, working with
* clocks that will often not be clones of what the PC-AT had.
@@ -35,8 +37,14 @@ int main(int argc, char **argv)
switch (argc) {
case 2:
rtc = argv[1];
- /* FALLTHROUGH */
+ break;
case 1:
+ fd = open(default_rtc, O_RDONLY);
+ if (fd == -1) {
+ printf("Default RTC %s does not exist. Test Skipped!\n", default_rtc);
+ exit(KSFT_SKIP);
+ }
+ close(fd);
break;
default:
fprintf(stderr, "usage: rtctest [rtcdev] [d]\n");
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore
index 1f651e85ed60..f0fd80ef17df 100644
--- a/tools/testing/selftests/vm/.gitignore
+++ b/tools/testing/selftests/vm/.gitignore
@@ -12,6 +12,9 @@ mremap_test
on-fault-limit
transhuge-stress
protection_keys
+protection_keys_32
+protection_keys_64
+madv_populate
userfaultfd
mlock-intersect-test
mlock-random-test
@@ -21,5 +24,6 @@ va_128TBswitch
map_fixed_noreplace
write_to_hugetlbfs
hmm-tests
+memfd_secret
local_config.*
split_huge_page_test
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 73e1cc96d7c2..521243770f26 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -31,9 +31,11 @@ TEST_GEN_FILES += hmm-tests
TEST_GEN_FILES += hugepage-mmap
TEST_GEN_FILES += hugepage-shm
TEST_GEN_FILES += khugepaged
+TEST_GEN_FILES += madv_populate
TEST_GEN_FILES += map_fixed_noreplace
TEST_GEN_FILES += map_hugetlb
TEST_GEN_FILES += map_populate
+TEST_GEN_FILES += memfd_secret
TEST_GEN_FILES += mlock-random-test
TEST_GEN_FILES += mlock2-tests
TEST_GEN_FILES += mremap_dontunmap
@@ -100,7 +102,7 @@ $(1) $(1)_64: $(OUTPUT)/$(1)_64
endef
ifeq ($(CAN_BUILD_I386),1)
-$(BINARIES_32): CFLAGS += -m32
+$(BINARIES_32): CFLAGS += -m32 -mxsave
$(BINARIES_32): LDLIBS += -lrt -ldl -lm
$(BINARIES_32): $(OUTPUT)/%_32: %.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
@@ -108,7 +110,7 @@ $(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t))))
endif
ifeq ($(CAN_BUILD_X86_64),1)
-$(BINARIES_64): CFLAGS += -m64
+$(BINARIES_64): CFLAGS += -m64 -mxsave
$(BINARIES_64): LDLIBS += -lrt -ldl
$(BINARIES_64): $(OUTPUT)/%_64: %.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
@@ -134,7 +136,7 @@ warn_32bit_failure:
endif
endif
-$(OUTPUT)/mlock-random-test: LDLIBS += -lcap
+$(OUTPUT)/mlock-random-test $(OUTPUT)/memfd_secret: LDLIBS += -lcap
$(OUTPUT)/gup_test: ../../../../mm/gup_test.h
diff --git a/tools/testing/selftests/vm/hmm-tests.c b/tools/testing/selftests/vm/hmm-tests.c
index 5d1ac691b9f4..864f126ffd78 100644
--- a/tools/testing/selftests/vm/hmm-tests.c
+++ b/tools/testing/selftests/vm/hmm-tests.c
@@ -1485,4 +1485,162 @@ TEST_F(hmm2, double_map)
hmm_buffer_free(buffer);
}
+/*
+ * Basic check of exclusive faulting.
+ */
+TEST_F(hmm, exclusive)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size;
+ unsigned long i;
+ int *ptr;
+ int ret;
+
+ npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
+ ASSERT_NE(npages, 0);
+ size = npages << self->page_shift;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = size;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+
+ buffer->ptr = mmap(NULL, size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ /* Initialize buffer in system memory. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Map memory exclusively for device access. */
+ ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /* Check what the device read. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ /* Fault pages back to system memory and check them. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i]++, i);
+
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i+1);
+
+ /* Check atomic access revoked */
+ ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages);
+ ASSERT_EQ(ret, 0);
+
+ hmm_buffer_free(buffer);
+}
+
+TEST_F(hmm, exclusive_mprotect)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size;
+ unsigned long i;
+ int *ptr;
+ int ret;
+
+ npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
+ ASSERT_NE(npages, 0);
+ size = npages << self->page_shift;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = size;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+
+ buffer->ptr = mmap(NULL, size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ /* Initialize buffer in system memory. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Map memory exclusively for device access. */
+ ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /* Check what the device read. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ ret = mprotect(buffer->ptr, size, PROT_READ);
+ ASSERT_EQ(ret, 0);
+
+ /* Simulate a device writing system memory. */
+ ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
+ ASSERT_EQ(ret, -EPERM);
+
+ hmm_buffer_free(buffer);
+}
+
+/*
+ * Check copy-on-write works.
+ */
+TEST_F(hmm, exclusive_cow)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size;
+ unsigned long i;
+ int *ptr;
+ int ret;
+
+ npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
+ ASSERT_NE(npages, 0);
+ size = npages << self->page_shift;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = size;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+
+ buffer->ptr = mmap(NULL, size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ /* Initialize buffer in system memory. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Map memory exclusively for device access. */
+ ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ fork();
+
+ /* Fault pages back to system memory and check them. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i]++, i);
+
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i+1);
+
+ hmm_buffer_free(buffer);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/vm/khugepaged.c b/tools/testing/selftests/vm/khugepaged.c
index 8b75821302a7..155120b67a16 100644
--- a/tools/testing/selftests/vm/khugepaged.c
+++ b/tools/testing/selftests/vm/khugepaged.c
@@ -86,7 +86,6 @@ struct settings {
enum thp_enabled thp_enabled;
enum thp_defrag thp_defrag;
enum shmem_enabled shmem_enabled;
- bool debug_cow;
bool use_zero_page;
struct khugepaged_settings khugepaged;
};
@@ -95,7 +94,6 @@ static struct settings default_settings = {
.thp_enabled = THP_MADVISE,
.thp_defrag = THP_DEFRAG_ALWAYS,
.shmem_enabled = SHMEM_NEVER,
- .debug_cow = 0,
.use_zero_page = 0,
.khugepaged = {
.defrag = 1,
@@ -268,7 +266,6 @@ static void write_settings(struct settings *settings)
write_string("defrag", thp_defrag_strings[settings->thp_defrag]);
write_string("shmem_enabled",
shmem_enabled_strings[settings->shmem_enabled]);
- write_num("debug_cow", settings->debug_cow);
write_num("use_zero_page", settings->use_zero_page);
write_num("khugepaged/defrag", khugepaged->defrag);
@@ -304,7 +301,6 @@ static void save_settings(void)
.thp_defrag = read_string("defrag", thp_defrag_strings),
.shmem_enabled =
read_string("shmem_enabled", shmem_enabled_strings),
- .debug_cow = read_num("debug_cow"),
.use_zero_page = read_num("use_zero_page"),
};
saved_settings.khugepaged = (struct khugepaged_settings) {
diff --git a/tools/testing/selftests/vm/madv_populate.c b/tools/testing/selftests/vm/madv_populate.c
new file mode 100644
index 000000000000..b959e4ebdad4
--- /dev/null
+++ b/tools/testing/selftests/vm/madv_populate.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
+ *
+ * Copyright 2021, Red Hat, Inc.
+ *
+ * Author(s): David Hildenbrand <david@redhat.com>
+ */
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+#include "../kselftest.h"
+
+#if defined(MADV_POPULATE_READ) && defined(MADV_POPULATE_WRITE)
+
+/*
+ * For now, we're using 2 MiB of private anonymous memory for all tests.
+ */
+#define SIZE (2 * 1024 * 1024)
+
+static size_t pagesize;
+
+static uint64_t pagemap_get_entry(int fd, char *start)
+{
+ const unsigned long pfn = (unsigned long)start / pagesize;
+ uint64_t entry;
+ int ret;
+
+ ret = pread(fd, &entry, sizeof(entry), pfn * sizeof(entry));
+ if (ret != sizeof(entry))
+ ksft_exit_fail_msg("reading pagemap failed\n");
+ return entry;
+}
+
+static bool pagemap_is_populated(int fd, char *start)
+{
+ uint64_t entry = pagemap_get_entry(fd, start);
+
+ /* Present or swapped. */
+ return entry & 0xc000000000000000ull;
+}
+
+static bool pagemap_is_softdirty(int fd, char *start)
+{
+ uint64_t entry = pagemap_get_entry(fd, start);
+
+ return entry & 0x0080000000000000ull;
+}
+
+static void sense_support(void)
+{
+ char *addr;
+ int ret;
+
+ addr = mmap(0, pagesize, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ if (!addr)
+ ksft_exit_fail_msg("mmap failed\n");
+
+ ret = madvise(addr, pagesize, MADV_POPULATE_READ);
+ if (ret)
+ ksft_exit_skip("MADV_POPULATE_READ is not available\n");
+
+ ret = madvise(addr, pagesize, MADV_POPULATE_WRITE);
+ if (ret)
+ ksft_exit_skip("MADV_POPULATE_WRITE is not available\n");
+
+ munmap(addr, pagesize);
+}
+
+static void test_prot_read(void)
+{
+ char *addr;
+ int ret;
+
+ ksft_print_msg("[RUN] %s\n", __func__);
+
+ addr = mmap(0, SIZE, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ if (addr == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed\n");
+
+ ret = madvise(addr, SIZE, MADV_POPULATE_READ);
+ ksft_test_result(!ret, "MADV_POPULATE_READ with PROT_READ\n");
+
+ ret = madvise(addr, SIZE, MADV_POPULATE_WRITE);
+ ksft_test_result(ret == -1 && errno == EINVAL,
+ "MADV_POPULATE_WRITE with PROT_READ\n");
+
+ munmap(addr, SIZE);
+}
+
+static void test_prot_write(void)
+{
+ char *addr;
+ int ret;
+
+ ksft_print_msg("[RUN] %s\n", __func__);
+
+ addr = mmap(0, SIZE, PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ if (addr == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed\n");
+
+ ret = madvise(addr, SIZE, MADV_POPULATE_READ);
+ ksft_test_result(ret == -1 && errno == EINVAL,
+ "MADV_POPULATE_READ with PROT_WRITE\n");
+
+ ret = madvise(addr, SIZE, MADV_POPULATE_WRITE);
+ ksft_test_result(!ret, "MADV_POPULATE_WRITE with PROT_WRITE\n");
+
+ munmap(addr, SIZE);
+}
+
+static void test_holes(void)
+{
+ char *addr;
+ int ret;
+
+ ksft_print_msg("[RUN] %s\n", __func__);
+
+ addr = mmap(0, SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ if (addr == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed\n");
+ ret = munmap(addr + pagesize, pagesize);
+ if (ret)
+ ksft_exit_fail_msg("munmap failed\n");
+
+ /* Hole in the middle */
+ ret = madvise(addr, SIZE, MADV_POPULATE_READ);
+ ksft_test_result(ret == -1 && errno == ENOMEM,
+ "MADV_POPULATE_READ with holes in the middle\n");
+ ret = madvise(addr, SIZE, MADV_POPULATE_WRITE);
+ ksft_test_result(ret == -1 && errno == ENOMEM,
+ "MADV_POPULATE_WRITE with holes in the middle\n");
+
+ /* Hole at end */
+ ret = madvise(addr, 2 * pagesize, MADV_POPULATE_READ);
+ ksft_test_result(ret == -1 && errno == ENOMEM,
+ "MADV_POPULATE_READ with holes at the end\n");
+ ret = madvise(addr, 2 * pagesize, MADV_POPULATE_WRITE);
+ ksft_test_result(ret == -1 && errno == ENOMEM,
+ "MADV_POPULATE_WRITE with holes at the end\n");
+
+ /* Hole at beginning */
+ ret = madvise(addr + pagesize, pagesize, MADV_POPULATE_READ);
+ ksft_test_result(ret == -1 && errno == ENOMEM,
+ "MADV_POPULATE_READ with holes at the beginning\n");
+ ret = madvise(addr + pagesize, pagesize, MADV_POPULATE_WRITE);
+ ksft_test_result(ret == -1 && errno == ENOMEM,
+ "MADV_POPULATE_WRITE with holes at the beginning\n");
+
+ munmap(addr, SIZE);
+}
+
+static bool range_is_populated(char *start, ssize_t size)
+{
+ int fd = open("/proc/self/pagemap", O_RDONLY);
+ bool ret = true;
+
+ if (fd < 0)
+ ksft_exit_fail_msg("opening pagemap failed\n");
+ for (; size > 0 && ret; size -= pagesize, start += pagesize)
+ if (!pagemap_is_populated(fd, start))
+ ret = false;
+ close(fd);
+ return ret;
+}
+
+static bool range_is_not_populated(char *start, ssize_t size)
+{
+ int fd = open("/proc/self/pagemap", O_RDONLY);
+ bool ret = true;
+
+ if (fd < 0)
+ ksft_exit_fail_msg("opening pagemap failed\n");
+ for (; size > 0 && ret; size -= pagesize, start += pagesize)
+ if (pagemap_is_populated(fd, start))
+ ret = false;
+ close(fd);
+ return ret;
+}
+
+static void test_populate_read(void)
+{
+ char *addr;
+ int ret;
+
+ ksft_print_msg("[RUN] %s\n", __func__);
+
+ addr = mmap(0, SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ if (addr == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed\n");
+ ksft_test_result(range_is_not_populated(addr, SIZE),
+ "range initially not populated\n");
+
+ ret = madvise(addr, SIZE, MADV_POPULATE_READ);
+ ksft_test_result(!ret, "MADV_POPULATE_READ\n");
+ ksft_test_result(range_is_populated(addr, SIZE),
+ "range is populated\n");
+
+ munmap(addr, SIZE);
+}
+
+static void test_populate_write(void)
+{
+ char *addr;
+ int ret;
+
+ ksft_print_msg("[RUN] %s\n", __func__);
+
+ addr = mmap(0, SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ if (addr == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed\n");
+ ksft_test_result(range_is_not_populated(addr, SIZE),
+ "range initially not populated\n");
+
+ ret = madvise(addr, SIZE, MADV_POPULATE_WRITE);
+ ksft_test_result(!ret, "MADV_POPULATE_WRITE\n");
+ ksft_test_result(range_is_populated(addr, SIZE),
+ "range is populated\n");
+
+ munmap(addr, SIZE);
+}
+
+static bool range_is_softdirty(char *start, ssize_t size)
+{
+ int fd = open("/proc/self/pagemap", O_RDONLY);
+ bool ret = true;
+
+ if (fd < 0)
+ ksft_exit_fail_msg("opening pagemap failed\n");
+ for (; size > 0 && ret; size -= pagesize, start += pagesize)
+ if (!pagemap_is_softdirty(fd, start))
+ ret = false;
+ close(fd);
+ return ret;
+}
+
+static bool range_is_not_softdirty(char *start, ssize_t size)
+{
+ int fd = open("/proc/self/pagemap", O_RDONLY);
+ bool ret = true;
+
+ if (fd < 0)
+ ksft_exit_fail_msg("opening pagemap failed\n");
+ for (; size > 0 && ret; size -= pagesize, start += pagesize)
+ if (pagemap_is_softdirty(fd, start))
+ ret = false;
+ close(fd);
+ return ret;
+}
+
+static void clear_softdirty(void)
+{
+ int fd = open("/proc/self/clear_refs", O_WRONLY);
+ const char *ctrl = "4";
+ int ret;
+
+ if (fd < 0)
+ ksft_exit_fail_msg("opening clear_refs failed\n");
+ ret = write(fd, ctrl, strlen(ctrl));
+ if (ret != strlen(ctrl))
+ ksft_exit_fail_msg("writing clear_refs failed\n");
+ close(fd);
+}
+
+static void test_softdirty(void)
+{
+ char *addr;
+ int ret;
+
+ ksft_print_msg("[RUN] %s\n", __func__);
+
+ addr = mmap(0, SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ if (addr == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed\n");
+
+ /* Clear any softdirty bits. */
+ clear_softdirty();
+ ksft_test_result(range_is_not_softdirty(addr, SIZE),
+ "range is not softdirty\n");
+
+ /* Populating READ should set softdirty. */
+ ret = madvise(addr, SIZE, MADV_POPULATE_READ);
+ ksft_test_result(!ret, "MADV_POPULATE_READ\n");
+ ksft_test_result(range_is_not_softdirty(addr, SIZE),
+ "range is not softdirty\n");
+
+ /* Populating WRITE should set softdirty. */
+ ret = madvise(addr, SIZE, MADV_POPULATE_WRITE);
+ ksft_test_result(!ret, "MADV_POPULATE_WRITE\n");
+ ksft_test_result(range_is_softdirty(addr, SIZE),
+ "range is softdirty\n");
+
+ munmap(addr, SIZE);
+}
+
+int main(int argc, char **argv)
+{
+ int err;
+
+ pagesize = getpagesize();
+
+ ksft_print_header();
+ ksft_set_plan(21);
+
+ sense_support();
+ test_prot_read();
+ test_prot_write();
+ test_holes();
+ test_populate_read();
+ test_populate_write();
+ test_softdirty();
+
+ err = ksft_get_fail_cnt();
+ if (err)
+ ksft_exit_fail_msg("%d out of %d tests failed\n",
+ err, ksft_test_num());
+ return ksft_exit_pass();
+}
+
+#else /* defined(MADV_POPULATE_READ) && defined(MADV_POPULATE_WRITE) */
+
+#warning "missing MADV_POPULATE_READ or MADV_POPULATE_WRITE definition"
+
+int main(int argc, char **argv)
+{
+ ksft_print_header();
+ ksft_exit_skip("MADV_POPULATE_READ or MADV_POPULATE_WRITE not defined\n");
+}
+
+#endif /* defined(MADV_POPULATE_READ) && defined(MADV_POPULATE_WRITE) */
diff --git a/tools/testing/selftests/vm/memfd_secret.c b/tools/testing/selftests/vm/memfd_secret.c
new file mode 100644
index 000000000000..93e7e7ffed33
--- /dev/null
+++ b/tools/testing/selftests/vm/memfd_secret.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corporation, 2021
+ *
+ * Author: Mike Rapoport <rppt@linux.ibm.com>
+ */
+
+#define _GNU_SOURCE
+#include <sys/uio.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+#include <sys/types.h>
+#include <sys/ptrace.h>
+#include <sys/syscall.h>
+#include <sys/resource.h>
+#include <sys/capability.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+
+#include "../kselftest.h"
+
+#define fail(fmt, ...) ksft_test_result_fail(fmt, ##__VA_ARGS__)
+#define pass(fmt, ...) ksft_test_result_pass(fmt, ##__VA_ARGS__)
+#define skip(fmt, ...) ksft_test_result_skip(fmt, ##__VA_ARGS__)
+
+#ifdef __NR_memfd_secret
+
+#define PATTERN 0x55
+
+static const int prot = PROT_READ | PROT_WRITE;
+static const int mode = MAP_SHARED;
+
+static unsigned long page_size;
+static unsigned long mlock_limit_cur;
+static unsigned long mlock_limit_max;
+
+static int memfd_secret(unsigned int flags)
+{
+ return syscall(__NR_memfd_secret, flags);
+}
+
+static void test_file_apis(int fd)
+{
+ char buf[64];
+
+ if ((read(fd, buf, sizeof(buf)) >= 0) ||
+ (write(fd, buf, sizeof(buf)) >= 0) ||
+ (pread(fd, buf, sizeof(buf), 0) >= 0) ||
+ (pwrite(fd, buf, sizeof(buf), 0) >= 0))
+ fail("unexpected file IO\n");
+ else
+ pass("file IO is blocked as expected\n");
+}
+
+static void test_mlock_limit(int fd)
+{
+ size_t len;
+ char *mem;
+
+ len = mlock_limit_cur;
+ mem = mmap(NULL, len, prot, mode, fd, 0);
+ if (mem == MAP_FAILED) {
+ fail("unable to mmap secret memory\n");
+ return;
+ }
+ munmap(mem, len);
+
+ len = mlock_limit_max * 2;
+ mem = mmap(NULL, len, prot, mode, fd, 0);
+ if (mem != MAP_FAILED) {
+ fail("unexpected mlock limit violation\n");
+ munmap(mem, len);
+ return;
+ }
+
+ pass("mlock limit is respected\n");
+}
+
+static void try_process_vm_read(int fd, int pipefd[2])
+{
+ struct iovec liov, riov;
+ char buf[64];
+ char *mem;
+
+ if (read(pipefd[0], &mem, sizeof(mem)) < 0) {
+ fail("pipe write: %s\n", strerror(errno));
+ exit(KSFT_FAIL);
+ }
+
+ liov.iov_len = riov.iov_len = sizeof(buf);
+ liov.iov_base = buf;
+ riov.iov_base = mem;
+
+ if (process_vm_readv(getppid(), &liov, 1, &riov, 1, 0) < 0) {
+ if (errno == ENOSYS)
+ exit(KSFT_SKIP);
+ exit(KSFT_PASS);
+ }
+
+ exit(KSFT_FAIL);
+}
+
+static void try_ptrace(int fd, int pipefd[2])
+{
+ pid_t ppid = getppid();
+ int status;
+ char *mem;
+ long ret;
+
+ if (read(pipefd[0], &mem, sizeof(mem)) < 0) {
+ perror("pipe write");
+ exit(KSFT_FAIL);
+ }
+
+ ret = ptrace(PTRACE_ATTACH, ppid, 0, 0);
+ if (ret) {
+ perror("ptrace_attach");
+ exit(KSFT_FAIL);
+ }
+
+ ret = waitpid(ppid, &status, WUNTRACED);
+ if ((ret != ppid) || !(WIFSTOPPED(status))) {
+ fprintf(stderr, "weird waitppid result %ld stat %x\n",
+ ret, status);
+ exit(KSFT_FAIL);
+ }
+
+ if (ptrace(PTRACE_PEEKDATA, ppid, mem, 0))
+ exit(KSFT_PASS);
+
+ exit(KSFT_FAIL);
+}
+
+static void check_child_status(pid_t pid, const char *name)
+{
+ int status;
+
+ waitpid(pid, &status, 0);
+
+ if (WIFEXITED(status) && WEXITSTATUS(status) == KSFT_SKIP) {
+ skip("%s is not supported\n", name);
+ return;
+ }
+
+ if ((WIFEXITED(status) && WEXITSTATUS(status) == KSFT_PASS) ||
+ WIFSIGNALED(status)) {
+ pass("%s is blocked as expected\n", name);
+ return;
+ }
+
+ fail("%s: unexpected memory access\n", name);
+}
+
+static void test_remote_access(int fd, const char *name,
+ void (*func)(int fd, int pipefd[2]))
+{
+ int pipefd[2];
+ pid_t pid;
+ char *mem;
+
+ if (pipe(pipefd)) {
+ fail("pipe failed: %s\n", strerror(errno));
+ return;
+ }
+
+ pid = fork();
+ if (pid < 0) {
+ fail("fork failed: %s\n", strerror(errno));
+ return;
+ }
+
+ if (pid == 0) {
+ func(fd, pipefd);
+ return;
+ }
+
+ mem = mmap(NULL, page_size, prot, mode, fd, 0);
+ if (mem == MAP_FAILED) {
+ fail("Unable to mmap secret memory\n");
+ return;
+ }
+
+ ftruncate(fd, page_size);
+ memset(mem, PATTERN, page_size);
+
+ if (write(pipefd[1], &mem, sizeof(mem)) < 0) {
+ fail("pipe write: %s\n", strerror(errno));
+ return;
+ }
+
+ check_child_status(pid, name);
+}
+
+static void test_process_vm_read(int fd)
+{
+ test_remote_access(fd, "process_vm_read", try_process_vm_read);
+}
+
+static void test_ptrace(int fd)
+{
+ test_remote_access(fd, "ptrace", try_ptrace);
+}
+
+static int set_cap_limits(rlim_t max)
+{
+ struct rlimit new;
+ cap_t cap = cap_init();
+
+ new.rlim_cur = max;
+ new.rlim_max = max;
+ if (setrlimit(RLIMIT_MEMLOCK, &new)) {
+ perror("setrlimit() returns error");
+ return -1;
+ }
+
+ /* drop capabilities including CAP_IPC_LOCK */
+ if (cap_set_proc(cap)) {
+ perror("cap_set_proc() returns error");
+ return -2;
+ }
+
+ return 0;
+}
+
+static void prepare(void)
+{
+ struct rlimit rlim;
+
+ page_size = sysconf(_SC_PAGE_SIZE);
+ if (!page_size)
+ ksft_exit_fail_msg("Failed to get page size %s\n",
+ strerror(errno));
+
+ if (getrlimit(RLIMIT_MEMLOCK, &rlim))
+ ksft_exit_fail_msg("Unable to detect mlock limit: %s\n",
+ strerror(errno));
+
+ mlock_limit_cur = rlim.rlim_cur;
+ mlock_limit_max = rlim.rlim_max;
+
+ printf("page_size: %ld, mlock.soft: %ld, mlock.hard: %ld\n",
+ page_size, mlock_limit_cur, mlock_limit_max);
+
+ if (page_size > mlock_limit_cur)
+ mlock_limit_cur = page_size;
+ if (page_size > mlock_limit_max)
+ mlock_limit_max = page_size;
+
+ if (set_cap_limits(mlock_limit_max))
+ ksft_exit_fail_msg("Unable to set mlock limit: %s\n",
+ strerror(errno));
+}
+
+#define NUM_TESTS 4
+
+int main(int argc, char *argv[])
+{
+ int fd;
+
+ prepare();
+
+ ksft_print_header();
+ ksft_set_plan(NUM_TESTS);
+
+ fd = memfd_secret(0);
+ if (fd < 0) {
+ if (errno == ENOSYS)
+ ksft_exit_skip("memfd_secret is not supported\n");
+ else
+ ksft_exit_fail_msg("memfd_secret failed: %s\n",
+ strerror(errno));
+ }
+
+ test_mlock_limit(fd);
+ test_file_apis(fd);
+ test_process_vm_read(fd);
+ test_ptrace(fd);
+
+ close(fd);
+
+ ksft_exit(!ksft_get_fail_cnt());
+}
+
+#else /* __NR_memfd_secret */
+
+int main(int argc, char *argv[])
+{
+ printf("skip: skipping memfd_secret test (missing __NR_memfd_secret)\n");
+ return KSFT_SKIP;
+}
+
+#endif /* __NR_memfd_secret */
diff --git a/tools/testing/selftests/vm/mremap_test.c b/tools/testing/selftests/vm/mremap_test.c
index 9c391d016922..0624d1bd71b5 100644
--- a/tools/testing/selftests/vm/mremap_test.c
+++ b/tools/testing/selftests/vm/mremap_test.c
@@ -45,14 +45,15 @@ enum {
_4MB = 4ULL << 20,
_1GB = 1ULL << 30,
_2GB = 2ULL << 30,
- PTE = _4KB,
PMD = _2MB,
PUD = _1GB,
};
+#define PTE page_size
+
#define MAKE_TEST(source_align, destination_align, size, \
overlaps, should_fail, test_name) \
-{ \
+(struct test){ \
.name = test_name, \
.config = { \
.src_alignment = source_align, \
@@ -74,9 +75,10 @@ static void *get_source_mapping(struct config c)
retry:
addr += c.src_alignment;
src_addr = mmap((void *) addr, c.region_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANONYMOUS | MAP_SHARED, -1, 0);
+ MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
+ -1, 0);
if (src_addr == MAP_FAILED) {
- if (errno == EPERM)
+ if (errno == EPERM || errno == EEXIST)
goto retry;
goto error;
}
@@ -252,12 +254,17 @@ static int parse_args(int argc, char **argv, unsigned int *threshold_mb,
return 0;
}
+#define MAX_TEST 13
+#define MAX_PERF_TEST 3
int main(int argc, char **argv)
{
int failures = 0;
int i, run_perf_tests;
unsigned int threshold_mb = VALIDATION_DEFAULT_THRESHOLD;
unsigned int pattern_seed;
+ struct test test_cases[MAX_TEST];
+ struct test perf_test_cases[MAX_PERF_TEST];
+ int page_size;
time_t t;
pattern_seed = (unsigned int) time(&t);
@@ -268,56 +275,59 @@ int main(int argc, char **argv)
ksft_print_msg("Test configs:\n\tthreshold_mb=%u\n\tpattern_seed=%u\n\n",
threshold_mb, pattern_seed);
- struct test test_cases[] = {
- /* Expected mremap failures */
- MAKE_TEST(_4KB, _4KB, _4KB, OVERLAPPING, EXPECT_FAILURE,
- "mremap - Source and Destination Regions Overlapping"),
- MAKE_TEST(_4KB, _1KB, _4KB, NON_OVERLAPPING, EXPECT_FAILURE,
- "mremap - Destination Address Misaligned (1KB-aligned)"),
- MAKE_TEST(_1KB, _4KB, _4KB, NON_OVERLAPPING, EXPECT_FAILURE,
- "mremap - Source Address Misaligned (1KB-aligned)"),
-
- /* Src addr PTE aligned */
- MAKE_TEST(PTE, PTE, _8KB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "8KB mremap - Source PTE-aligned, Destination PTE-aligned"),
-
- /* Src addr 1MB aligned */
- MAKE_TEST(_1MB, PTE, _2MB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "2MB mremap - Source 1MB-aligned, Destination PTE-aligned"),
- MAKE_TEST(_1MB, _1MB, _2MB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "2MB mremap - Source 1MB-aligned, Destination 1MB-aligned"),
-
- /* Src addr PMD aligned */
- MAKE_TEST(PMD, PTE, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "4MB mremap - Source PMD-aligned, Destination PTE-aligned"),
- MAKE_TEST(PMD, _1MB, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "4MB mremap - Source PMD-aligned, Destination 1MB-aligned"),
- MAKE_TEST(PMD, PMD, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "4MB mremap - Source PMD-aligned, Destination PMD-aligned"),
-
- /* Src addr PUD aligned */
- MAKE_TEST(PUD, PTE, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "2GB mremap - Source PUD-aligned, Destination PTE-aligned"),
- MAKE_TEST(PUD, _1MB, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "2GB mremap - Source PUD-aligned, Destination 1MB-aligned"),
- MAKE_TEST(PUD, PMD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "2GB mremap - Source PUD-aligned, Destination PMD-aligned"),
- MAKE_TEST(PUD, PUD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "2GB mremap - Source PUD-aligned, Destination PUD-aligned"),
- };
-
- struct test perf_test_cases[] = {
- /*
- * mremap 1GB region - Page table level aligned time
- * comparison.
- */
- MAKE_TEST(PTE, PTE, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "1GB mremap - Source PTE-aligned, Destination PTE-aligned"),
- MAKE_TEST(PMD, PMD, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "1GB mremap - Source PMD-aligned, Destination PMD-aligned"),
- MAKE_TEST(PUD, PUD, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
- "1GB mremap - Source PUD-aligned, Destination PUD-aligned"),
- };
+ page_size = sysconf(_SC_PAGESIZE);
+
+ /* Expected mremap failures */
+ test_cases[0] = MAKE_TEST(page_size, page_size, page_size,
+ OVERLAPPING, EXPECT_FAILURE,
+ "mremap - Source and Destination Regions Overlapping");
+
+ test_cases[1] = MAKE_TEST(page_size, page_size/4, page_size,
+ NON_OVERLAPPING, EXPECT_FAILURE,
+ "mremap - Destination Address Misaligned (1KB-aligned)");
+ test_cases[2] = MAKE_TEST(page_size/4, page_size, page_size,
+ NON_OVERLAPPING, EXPECT_FAILURE,
+ "mremap - Source Address Misaligned (1KB-aligned)");
+
+ /* Src addr PTE aligned */
+ test_cases[3] = MAKE_TEST(PTE, PTE, PTE * 2,
+ NON_OVERLAPPING, EXPECT_SUCCESS,
+ "8KB mremap - Source PTE-aligned, Destination PTE-aligned");
+
+ /* Src addr 1MB aligned */
+ test_cases[4] = MAKE_TEST(_1MB, PTE, _2MB, NON_OVERLAPPING, EXPECT_SUCCESS,
+ "2MB mremap - Source 1MB-aligned, Destination PTE-aligned");
+ test_cases[5] = MAKE_TEST(_1MB, _1MB, _2MB, NON_OVERLAPPING, EXPECT_SUCCESS,
+ "2MB mremap - Source 1MB-aligned, Destination 1MB-aligned");
+
+ /* Src addr PMD aligned */
+ test_cases[6] = MAKE_TEST(PMD, PTE, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
+ "4MB mremap - Source PMD-aligned, Destination PTE-aligned");
+ test_cases[7] = MAKE_TEST(PMD, _1MB, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
+ "4MB mremap - Source PMD-aligned, Destination 1MB-aligned");
+ test_cases[8] = MAKE_TEST(PMD, PMD, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
+ "4MB mremap - Source PMD-aligned, Destination PMD-aligned");
+
+ /* Src addr PUD aligned */
+ test_cases[9] = MAKE_TEST(PUD, PTE, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
+ "2GB mremap - Source PUD-aligned, Destination PTE-aligned");
+ test_cases[10] = MAKE_TEST(PUD, _1MB, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
+ "2GB mremap - Source PUD-aligned, Destination 1MB-aligned");
+ test_cases[11] = MAKE_TEST(PUD, PMD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
+ "2GB mremap - Source PUD-aligned, Destination PMD-aligned");
+ test_cases[12] = MAKE_TEST(PUD, PUD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
+ "2GB mremap - Source PUD-aligned, Destination PUD-aligned");
+
+ perf_test_cases[0] = MAKE_TEST(page_size, page_size, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
+ "1GB mremap - Source PTE-aligned, Destination PTE-aligned");
+ /*
+ * mremap 1GB region - Page table level aligned time
+ * comparison.
+ */
+ perf_test_cases[1] = MAKE_TEST(PMD, PMD, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
+ "1GB mremap - Source PMD-aligned, Destination PMD-aligned");
+ perf_test_cases[2] = MAKE_TEST(PUD, PUD, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
+ "1GB mremap - Source PUD-aligned, Destination PUD-aligned");
run_perf_tests = (threshold_mb == VALIDATION_NO_THRESHOLD) ||
(threshold_mb * _1MB >= _1GB);
diff --git a/tools/testing/selftests/vm/pkey-x86.h b/tools/testing/selftests/vm/pkey-x86.h
index 3be20f5d5275..e4a4ce2b826d 100644
--- a/tools/testing/selftests/vm/pkey-x86.h
+++ b/tools/testing/selftests/vm/pkey-x86.h
@@ -126,6 +126,7 @@ static inline u32 pkey_bit_position(int pkey)
#define XSTATE_PKEY_BIT (9)
#define XSTATE_PKEY 0x200
+#define XSTATE_BV_OFFSET 512
int pkey_reg_xstate_offset(void)
{
diff --git a/tools/testing/selftests/vm/protection_keys.c b/tools/testing/selftests/vm/protection_keys.c
index fdbb602ecf32..2d0ae88665db 100644
--- a/tools/testing/selftests/vm/protection_keys.c
+++ b/tools/testing/selftests/vm/protection_keys.c
@@ -510,7 +510,7 @@ int alloc_pkey(void)
" shadow: 0x%016llx\n",
__func__, __LINE__, ret, __read_pkey_reg(),
shadow_pkey_reg);
- if (ret) {
+ if (ret > 0) {
/* clear both the bits: */
shadow_pkey_reg = set_pkey_bits(shadow_pkey_reg, ret,
~PKEY_MASK);
@@ -561,7 +561,6 @@ int alloc_random_pkey(void)
int nr_alloced = 0;
int random_index;
memset(alloced_pkeys, 0, sizeof(alloced_pkeys));
- srand((unsigned int)time(NULL));
/* allocate every possible key and make a note of which ones we got */
max_nr_pkey_allocs = NR_PKEYS;
@@ -1278,6 +1277,78 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
}
}
+void arch_force_pkey_reg_init(void)
+{
+#if defined(__i386__) || defined(__x86_64__) /* arch */
+ u64 *buf;
+
+ /*
+ * All keys should be allocated and set to allow reads and
+ * writes, so the register should be all 0. If not, just
+ * skip the test.
+ */
+ if (read_pkey_reg())
+ return;
+
+ /*
+ * Just allocate an absurd about of memory rather than
+ * doing the XSAVE size enumeration dance.
+ */
+ buf = mmap(NULL, 1*MB, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+
+ /* These __builtins require compiling with -mxsave */
+
+ /* XSAVE to build a valid buffer: */
+ __builtin_ia32_xsave(buf, XSTATE_PKEY);
+ /* Clear XSTATE_BV[PKRU]: */
+ buf[XSTATE_BV_OFFSET/sizeof(u64)] &= ~XSTATE_PKEY;
+ /* XRSTOR will likely get PKRU back to the init state: */
+ __builtin_ia32_xrstor(buf, XSTATE_PKEY);
+
+ munmap(buf, 1*MB);
+#endif
+}
+
+
+/*
+ * This is mostly useless on ppc for now. But it will not
+ * hurt anything and should give some better coverage as
+ * a long-running test that continually checks the pkey
+ * register.
+ */
+void test_pkey_init_state(int *ptr, u16 pkey)
+{
+ int err;
+ int allocated_pkeys[NR_PKEYS] = {0};
+ int nr_allocated_pkeys = 0;
+ int i;
+
+ for (i = 0; i < NR_PKEYS; i++) {
+ int new_pkey = alloc_pkey();
+
+ if (new_pkey < 0)
+ continue;
+ allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
+ }
+
+ dprintf3("%s()::%d\n", __func__, __LINE__);
+
+ arch_force_pkey_reg_init();
+
+ /*
+ * Loop for a bit, hoping to get exercise the kernel
+ * context switch code.
+ */
+ for (i = 0; i < 1000000; i++)
+ read_pkey_reg();
+
+ for (i = 0; i < nr_allocated_pkeys; i++) {
+ err = sys_pkey_free(allocated_pkeys[i]);
+ pkey_assert(!err);
+ read_pkey_reg(); /* for shadow checking */
+ }
+}
+
/*
* pkey 0 is special. It is allocated by default, so you do not
* have to call pkey_alloc() to use it first. Make sure that it
@@ -1449,6 +1520,13 @@ void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey)
ret = mprotect(p1, PAGE_SIZE, PROT_EXEC);
pkey_assert(!ret);
+ /*
+ * Reset the shadow, assuming that the above mprotect()
+ * correctly changed PKRU, but to an unknown value since
+ * the actual alllocated pkey is unknown.
+ */
+ shadow_pkey_reg = __read_pkey_reg();
+
dprintf2("pkey_reg: %016llx\n", read_pkey_reg());
/* Make sure this is an *instruction* fault */
@@ -1502,6 +1580,7 @@ void (*pkey_tests[])(int *ptr, u16 pkey) = {
test_implicit_mprotect_exec_only_memory,
test_mprotect_with_pkey_0,
test_ptrace_of_child,
+ test_pkey_init_state,
test_pkey_syscalls_on_non_allocated_pkey,
test_pkey_syscalls_bad_args,
test_pkey_alloc_exhaust,
@@ -1552,6 +1631,8 @@ int main(void)
int nr_iterations = 22;
int pkeys_supported = is_pkeys_supported();
+ srand((unsigned int)time(NULL));
+
setup_handlers();
printf("has pkeys: %d\n", pkeys_supported);
diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh
index e953f3cd9664..d09a6b71f1e9 100755
--- a/tools/testing/selftests/vm/run_vmtests.sh
+++ b/tools/testing/selftests/vm/run_vmtests.sh
@@ -346,4 +346,37 @@ else
exitcode=1
fi
+echo "--------------------------------------------------------"
+echo "running MADV_POPULATE_READ and MADV_POPULATE_WRITE tests"
+echo "--------------------------------------------------------"
+./madv_populate
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+ echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+ echo "[SKIP]"
+ exitcode=$ksft_skip
+else
+ echo "[FAIL]"
+ exitcode=1
+fi
+
+echo "running memfd_secret test"
+echo "------------------------------------"
+./memfd_secret
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+ echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+ echo "[SKIP]"
+ exitcode=$ksft_skip
+else
+ echo "[FAIL]"
+ exitcode=1
+fi
+
+exit $exitcode
+
exit $exitcode
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index f5ab5e0312e7..2ea438e6b8b1 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -85,10 +85,12 @@ static bool test_uffdio_wp = false;
static bool test_uffdio_minor = false;
static bool map_shared;
+static int shm_fd;
static int huge_fd;
static char *huge_fd_off0;
static unsigned long long *count_verify;
-static int uffd, uffd_flags, finished, *pipefd;
+static int uffd = -1;
+static int uffd_flags, finished, *pipefd;
static char *area_src, *area_src_alias, *area_dst, *area_dst_alias;
static char *zeropage;
pthread_attr_t attr;
@@ -140,11 +142,18 @@ static void usage(void)
exit(1);
}
-#define uffd_error(code, fmt, ...) \
- do { \
- fprintf(stderr, fmt, ##__VA_ARGS__); \
- fprintf(stderr, ": %" PRId64 "\n", (int64_t)(code)); \
- exit(1); \
+#define _err(fmt, ...) \
+ do { \
+ int ret = errno; \
+ fprintf(stderr, "ERROR: " fmt, ##__VA_ARGS__); \
+ fprintf(stderr, " (errno=%d, line=%d)\n", \
+ ret, __LINE__); \
+ } while (0)
+
+#define err(fmt, ...) \
+ do { \
+ _err(fmt, ##__VA_ARGS__); \
+ exit(1); \
} while (0)
static void uffd_stats_reset(struct uffd_stats *uffd_stats,
@@ -171,56 +180,52 @@ static void uffd_stats_report(struct uffd_stats *stats, int n_cpus)
minor_total += stats[i].minor_faults;
}
- printf("userfaults: %llu missing (", miss_total);
- for (i = 0; i < n_cpus; i++)
- printf("%lu+", stats[i].missing_faults);
- printf("\b), %llu wp (", wp_total);
- for (i = 0; i < n_cpus; i++)
- printf("%lu+", stats[i].wp_faults);
- printf("\b), %llu minor (", minor_total);
- for (i = 0; i < n_cpus; i++)
- printf("%lu+", stats[i].minor_faults);
- printf("\b)\n");
+ printf("userfaults: ");
+ if (miss_total) {
+ printf("%llu missing (", miss_total);
+ for (i = 0; i < n_cpus; i++)
+ printf("%lu+", stats[i].missing_faults);
+ printf("\b) ");
+ }
+ if (wp_total) {
+ printf("%llu wp (", wp_total);
+ for (i = 0; i < n_cpus; i++)
+ printf("%lu+", stats[i].wp_faults);
+ printf("\b) ");
+ }
+ if (minor_total) {
+ printf("%llu minor (", minor_total);
+ for (i = 0; i < n_cpus; i++)
+ printf("%lu+", stats[i].minor_faults);
+ printf("\b)");
+ }
+ printf("\n");
}
-static int anon_release_pages(char *rel_area)
+static void anon_release_pages(char *rel_area)
{
- int ret = 0;
-
- if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED)) {
- perror("madvise");
- ret = 1;
- }
-
- return ret;
+ if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED))
+ err("madvise(MADV_DONTNEED) failed");
}
static void anon_allocate_area(void **alloc_area)
{
- if (posix_memalign(alloc_area, page_size, nr_pages * page_size)) {
- fprintf(stderr, "out of memory\n");
- *alloc_area = NULL;
- }
+ *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ if (*alloc_area == MAP_FAILED)
+ err("mmap of anonymous memory failed");
}
static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)
{
}
-/* HugeTLB memory */
-static int hugetlb_release_pages(char *rel_area)
+static void hugetlb_release_pages(char *rel_area)
{
- int ret = 0;
-
if (fallocate(huge_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
- rel_area == huge_fd_off0 ? 0 :
- nr_pages * page_size,
- nr_pages * page_size)) {
- perror("fallocate");
- ret = 1;
- }
-
- return ret;
+ rel_area == huge_fd_off0 ? 0 : nr_pages * page_size,
+ nr_pages * page_size))
+ err("fallocate() failed");
}
static void hugetlb_allocate_area(void **alloc_area)
@@ -233,20 +238,16 @@ static void hugetlb_allocate_area(void **alloc_area)
MAP_HUGETLB,
huge_fd, *alloc_area == area_src ? 0 :
nr_pages * page_size);
- if (*alloc_area == MAP_FAILED) {
- perror("mmap of hugetlbfs file failed");
- goto fail;
- }
+ if (*alloc_area == MAP_FAILED)
+ err("mmap of hugetlbfs file failed");
if (map_shared) {
area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_HUGETLB,
huge_fd, *alloc_area == area_src ? 0 :
nr_pages * page_size);
- if (area_alias == MAP_FAILED) {
- perror("mmap of hugetlb file alias failed");
- goto fail_munmap;
- }
+ if (area_alias == MAP_FAILED)
+ err("mmap of hugetlb file alias failed");
}
if (*alloc_area == area_src) {
@@ -257,16 +258,6 @@ static void hugetlb_allocate_area(void **alloc_area)
}
if (area_alias)
*alloc_area_alias = area_alias;
-
- return;
-
-fail_munmap:
- if (munmap(*alloc_area, nr_pages * page_size) < 0) {
- perror("hugetlb munmap");
- exit(1);
- }
-fail:
- *alloc_area = NULL;
}
static void hugetlb_alias_mapping(__u64 *start, size_t len, unsigned long offset)
@@ -282,33 +273,43 @@ static void hugetlb_alias_mapping(__u64 *start, size_t len, unsigned long offset
*start = (unsigned long) area_dst_alias + offset;
}
-/* Shared memory */
-static int shmem_release_pages(char *rel_area)
+static void shmem_release_pages(char *rel_area)
{
- int ret = 0;
-
- if (madvise(rel_area, nr_pages * page_size, MADV_REMOVE)) {
- perror("madvise");
- ret = 1;
- }
-
- return ret;
+ if (madvise(rel_area, nr_pages * page_size, MADV_REMOVE))
+ err("madvise(MADV_REMOVE) failed");
}
static void shmem_allocate_area(void **alloc_area)
{
+ void *area_alias = NULL;
+ bool is_src = alloc_area == (void **)&area_src;
+ unsigned long offset = is_src ? 0 : nr_pages * page_size;
+
*alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_SHARED, -1, 0);
- if (*alloc_area == MAP_FAILED) {
- fprintf(stderr, "shared memory mmap failed\n");
- *alloc_area = NULL;
- }
+ MAP_SHARED, shm_fd, offset);
+ if (*alloc_area == MAP_FAILED)
+ err("mmap of memfd failed");
+
+ area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, shm_fd, offset);
+ if (area_alias == MAP_FAILED)
+ err("mmap of memfd alias failed");
+
+ if (is_src)
+ area_src_alias = area_alias;
+ else
+ area_dst_alias = area_alias;
+}
+
+static void shmem_alias_mapping(__u64 *start, size_t len, unsigned long offset)
+{
+ *start = (unsigned long)area_dst_alias + offset;
}
struct uffd_test_ops {
unsigned long expected_ioctls;
void (*allocate_area)(void **alloc_area);
- int (*release_pages)(char *rel_area);
+ void (*release_pages)(char *rel_area);
void (*alias_mapping)(__u64 *start, size_t len, unsigned long offset);
};
@@ -332,7 +333,7 @@ static struct uffd_test_ops shmem_uffd_test_ops = {
.expected_ioctls = SHMEM_EXPECTED_IOCTLS,
.allocate_area = shmem_allocate_area,
.release_pages = shmem_release_pages,
- .alias_mapping = noop_alias_mapping,
+ .alias_mapping = shmem_alias_mapping,
};
static struct uffd_test_ops hugetlb_uffd_test_ops = {
@@ -344,6 +345,111 @@ static struct uffd_test_ops hugetlb_uffd_test_ops = {
static struct uffd_test_ops *uffd_test_ops;
+static void userfaultfd_open(uint64_t *features)
+{
+ struct uffdio_api uffdio_api;
+
+ uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY);
+ if (uffd < 0)
+ err("userfaultfd syscall not available in this kernel");
+ uffd_flags = fcntl(uffd, F_GETFD, NULL);
+
+ uffdio_api.api = UFFD_API;
+ uffdio_api.features = *features;
+ if (ioctl(uffd, UFFDIO_API, &uffdio_api))
+ err("UFFDIO_API failed.\nPlease make sure to "
+ "run with either root or ptrace capability.");
+ if (uffdio_api.api != UFFD_API)
+ err("UFFDIO_API error: %" PRIu64, (uint64_t)uffdio_api.api);
+
+ *features = uffdio_api.features;
+}
+
+static inline void munmap_area(void **area)
+{
+ if (*area)
+ if (munmap(*area, nr_pages * page_size))
+ err("munmap");
+
+ *area = NULL;
+}
+
+static void uffd_test_ctx_clear(void)
+{
+ size_t i;
+
+ if (pipefd) {
+ for (i = 0; i < nr_cpus * 2; ++i) {
+ if (close(pipefd[i]))
+ err("close pipefd");
+ }
+ free(pipefd);
+ pipefd = NULL;
+ }
+
+ if (count_verify) {
+ free(count_verify);
+ count_verify = NULL;
+ }
+
+ if (uffd != -1) {
+ if (close(uffd))
+ err("close uffd");
+ uffd = -1;
+ }
+
+ huge_fd_off0 = NULL;
+ munmap_area((void **)&area_src);
+ munmap_area((void **)&area_src_alias);
+ munmap_area((void **)&area_dst);
+ munmap_area((void **)&area_dst_alias);
+}
+
+static void uffd_test_ctx_init_ext(uint64_t *features)
+{
+ unsigned long nr, cpu;
+
+ uffd_test_ctx_clear();
+
+ uffd_test_ops->allocate_area((void **)&area_src);
+ uffd_test_ops->allocate_area((void **)&area_dst);
+
+ uffd_test_ops->release_pages(area_src);
+ uffd_test_ops->release_pages(area_dst);
+
+ userfaultfd_open(features);
+
+ count_verify = malloc(nr_pages * sizeof(unsigned long long));
+ if (!count_verify)
+ err("count_verify");
+
+ for (nr = 0; nr < nr_pages; nr++) {
+ *area_mutex(area_src, nr) =
+ (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
+ count_verify[nr] = *area_count(area_src, nr) = 1;
+ /*
+ * In the transition between 255 to 256, powerpc will
+ * read out of order in my_bcmp and see both bytes as
+ * zero, so leave a placeholder below always non-zero
+ * after the count, to avoid my_bcmp to trigger false
+ * positives.
+ */
+ *(area_count(area_src, nr) + 1) = 1;
+ }
+
+ pipefd = malloc(sizeof(int) * nr_cpus * 2);
+ if (!pipefd)
+ err("pipefd");
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ if (pipe2(&pipefd[cpu * 2], O_CLOEXEC | O_NONBLOCK))
+ err("pipe");
+}
+
+static inline void uffd_test_ctx_init(uint64_t features)
+{
+ uffd_test_ctx_init_ext(&features);
+}
+
static int my_bcmp(char *str1, char *str2, size_t n)
{
unsigned long i;
@@ -363,27 +469,33 @@ static void wp_range(int ufd, __u64 start, __u64 len, bool wp)
/* Undo write-protect, do wakeup after that */
prms.mode = wp ? UFFDIO_WRITEPROTECT_MODE_WP : 0;
- if (ioctl(ufd, UFFDIO_WRITEPROTECT, &prms)) {
- fprintf(stderr, "clear WP failed for address 0x%" PRIx64 "\n",
- (uint64_t)start);
- exit(1);
- }
+ if (ioctl(ufd, UFFDIO_WRITEPROTECT, &prms))
+ err("clear WP failed: address=0x%"PRIx64, (uint64_t)start);
}
static void continue_range(int ufd, __u64 start, __u64 len)
{
struct uffdio_continue req;
+ int ret;
req.range.start = start;
req.range.len = len;
req.mode = 0;
- if (ioctl(ufd, UFFDIO_CONTINUE, &req)) {
- fprintf(stderr,
- "UFFDIO_CONTINUE failed for address 0x%" PRIx64 "\n",
- (uint64_t)start);
- exit(1);
- }
+ if (ioctl(ufd, UFFDIO_CONTINUE, &req))
+ err("UFFDIO_CONTINUE failed for address 0x%" PRIx64,
+ (uint64_t)start);
+
+ /*
+ * Error handling within the kernel for continue is subtly different
+ * from copy or zeropage, so it may be a source of bugs. Trigger an
+ * error (-EEXIST) on purpose, to verify doing so doesn't cause a BUG.
+ */
+ req.mapped = 0;
+ ret = ioctl(ufd, UFFDIO_CONTINUE, &req);
+ if (ret >= 0 || req.mapped != -EEXIST)
+ err("failed to exercise UFFDIO_CONTINUE error handling, ret=%d, mapped=%" PRId64,
+ ret, (int64_t) req.mapped);
}
static void *locking_thread(void *arg)
@@ -395,7 +507,6 @@ static void *locking_thread(void *arg)
unsigned long long count;
char randstate[64];
unsigned int seed;
- time_t start;
if (bounces & BOUNCE_RANDOM) {
seed = (unsigned int) time(NULL) - bounces;
@@ -403,10 +514,8 @@ static void *locking_thread(void *arg)
seed += cpu;
bzero(&rand, sizeof(rand));
bzero(&randstate, sizeof(randstate));
- if (initstate_r(seed, randstate, sizeof(randstate), &rand)) {
- fprintf(stderr, "srandom_r error\n");
- exit(1);
- }
+ if (initstate_r(seed, randstate, sizeof(randstate), &rand))
+ err("initstate_r failed");
} else {
page_nr = -bounces;
if (!(bounces & BOUNCE_RACINGFAULTS))
@@ -415,92 +524,26 @@ static void *locking_thread(void *arg)
while (!finished) {
if (bounces & BOUNCE_RANDOM) {
- if (random_r(&rand, &rand_nr)) {
- fprintf(stderr, "random_r 1 error\n");
- exit(1);
- }
+ if (random_r(&rand, &rand_nr))
+ err("random_r failed");
page_nr = rand_nr;
if (sizeof(page_nr) > sizeof(rand_nr)) {
- if (random_r(&rand, &rand_nr)) {
- fprintf(stderr, "random_r 2 error\n");
- exit(1);
- }
+ if (random_r(&rand, &rand_nr))
+ err("random_r failed");
page_nr |= (((unsigned long) rand_nr) << 16) <<
16;
}
} else
page_nr += 1;
page_nr %= nr_pages;
-
- start = time(NULL);
- if (bounces & BOUNCE_VERIFY) {
- count = *area_count(area_dst, page_nr);
- if (!count) {
- fprintf(stderr,
- "page_nr %lu wrong count %Lu %Lu\n",
- page_nr, count,
- count_verify[page_nr]);
- exit(1);
- }
-
-
- /*
- * We can't use bcmp (or memcmp) because that
- * returns 0 erroneously if the memory is
- * changing under it (even if the end of the
- * page is never changing and always
- * different).
- */
-#if 1
- if (!my_bcmp(area_dst + page_nr * page_size, zeropage,
- page_size)) {
- fprintf(stderr,
- "my_bcmp page_nr %lu wrong count %Lu %Lu\n",
- page_nr, count, count_verify[page_nr]);
- exit(1);
- }
-#else
- unsigned long loops;
-
- loops = 0;
- /* uncomment the below line to test with mutex */
- /* pthread_mutex_lock(area_mutex(area_dst, page_nr)); */
- while (!bcmp(area_dst + page_nr * page_size, zeropage,
- page_size)) {
- loops += 1;
- if (loops > 10)
- break;
- }
- /* uncomment below line to test with mutex */
- /* pthread_mutex_unlock(area_mutex(area_dst, page_nr)); */
- if (loops) {
- fprintf(stderr,
- "page_nr %lu all zero thread %lu %p %lu\n",
- page_nr, cpu, area_dst + page_nr * page_size,
- loops);
- if (loops > 10)
- exit(1);
- }
-#endif
- }
-
pthread_mutex_lock(area_mutex(area_dst, page_nr));
count = *area_count(area_dst, page_nr);
- if (count != count_verify[page_nr]) {
- fprintf(stderr,
- "page_nr %lu memory corruption %Lu %Lu\n",
- page_nr, count,
- count_verify[page_nr]); exit(1);
- }
+ if (count != count_verify[page_nr])
+ err("page_nr %lu memory corruption %llu %llu",
+ page_nr, count, count_verify[page_nr]);
count++;
*area_count(area_dst, page_nr) = count_verify[page_nr] = count;
pthread_mutex_unlock(area_mutex(area_dst, page_nr));
-
- if (time(NULL) - start > 1)
- fprintf(stderr,
- "userfault too slow %ld "
- "possible false positive with overcommit\n",
- time(NULL) - start);
}
return NULL;
@@ -514,22 +557,21 @@ static void retry_copy_page(int ufd, struct uffdio_copy *uffdio_copy,
offset);
if (ioctl(ufd, UFFDIO_COPY, uffdio_copy)) {
/* real retval in ufdio_copy.copy */
- if (uffdio_copy->copy != -EEXIST) {
- uffd_error(uffdio_copy->copy,
- "UFFDIO_COPY retry error");
- }
- } else
- uffd_error(uffdio_copy->copy, "UFFDIO_COPY retry unexpected");
+ if (uffdio_copy->copy != -EEXIST)
+ err("UFFDIO_COPY retry error: %"PRId64,
+ (int64_t)uffdio_copy->copy);
+ } else {
+ err("UFFDIO_COPY retry unexpected: %"PRId64,
+ (int64_t)uffdio_copy->copy);
+ }
}
static int __copy_page(int ufd, unsigned long offset, bool retry)
{
struct uffdio_copy uffdio_copy;
- if (offset >= nr_pages * page_size) {
- fprintf(stderr, "unexpected offset %lu\n", offset);
- exit(1);
- }
+ if (offset >= nr_pages * page_size)
+ err("unexpected offset %lu\n", offset);
uffdio_copy.dst = (unsigned long) area_dst + offset;
uffdio_copy.src = (unsigned long) area_src + offset;
uffdio_copy.len = page_size;
@@ -541,9 +583,10 @@ static int __copy_page(int ufd, unsigned long offset, bool retry)
if (ioctl(ufd, UFFDIO_COPY, &uffdio_copy)) {
/* real retval in ufdio_copy.copy */
if (uffdio_copy.copy != -EEXIST)
- uffd_error(uffdio_copy.copy, "UFFDIO_COPY error");
+ err("UFFDIO_COPY error: %"PRId64,
+ (int64_t)uffdio_copy.copy);
} else if (uffdio_copy.copy != page_size) {
- uffd_error(uffdio_copy.copy, "UFFDIO_COPY unexpected copy");
+ err("UFFDIO_COPY error: %"PRId64, (int64_t)uffdio_copy.copy);
} else {
if (test_uffdio_copy_eexist && retry) {
test_uffdio_copy_eexist = false;
@@ -572,11 +615,10 @@ static int uffd_read_msg(int ufd, struct uffd_msg *msg)
if (ret < 0) {
if (errno == EAGAIN)
return 1;
- perror("blocking read error");
+ err("blocking read error");
} else {
- fprintf(stderr, "short read\n");
+ err("short read");
}
- exit(1);
}
return 0;
@@ -587,10 +629,8 @@ static void uffd_handle_page_fault(struct uffd_msg *msg,
{
unsigned long offset;
- if (msg->event != UFFD_EVENT_PAGEFAULT) {
- fprintf(stderr, "unexpected msg event %u\n", msg->event);
- exit(1);
- }
+ if (msg->event != UFFD_EVENT_PAGEFAULT)
+ err("unexpected msg event %u", msg->event);
if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WP) {
/* Write protect page faults */
@@ -621,11 +661,8 @@ static void uffd_handle_page_fault(struct uffd_msg *msg,
stats->minor_faults++;
} else {
/* Missing page faults */
- if (bounces & BOUNCE_VERIFY &&
- msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE) {
- fprintf(stderr, "unexpected write fault\n");
- exit(1);
- }
+ if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)
+ err("unexpected write fault");
offset = (char *)(unsigned long)msg->arg.pagefault.address - area_dst;
offset &= ~(page_size-1);
@@ -652,32 +689,20 @@ static void *uffd_poll_thread(void *arg)
for (;;) {
ret = poll(pollfd, 2, -1);
- if (!ret) {
- fprintf(stderr, "poll error %d\n", ret);
- exit(1);
- }
- if (ret < 0) {
- perror("poll");
- exit(1);
- }
+ if (ret <= 0)
+ err("poll error: %d", ret);
if (pollfd[1].revents & POLLIN) {
- if (read(pollfd[1].fd, &tmp_chr, 1) != 1) {
- fprintf(stderr, "read pipefd error\n");
- exit(1);
- }
+ if (read(pollfd[1].fd, &tmp_chr, 1) != 1)
+ err("read pipefd error");
break;
}
- if (!(pollfd[0].revents & POLLIN)) {
- fprintf(stderr, "pollfd[0].revents %d\n",
- pollfd[0].revents);
- exit(1);
- }
+ if (!(pollfd[0].revents & POLLIN))
+ err("pollfd[0].revents %d", pollfd[0].revents);
if (uffd_read_msg(uffd, &msg))
continue;
switch (msg.event) {
default:
- fprintf(stderr, "unexpected msg event %u\n",
- msg.event); exit(1);
+ err("unexpected msg event %u\n", msg.event);
break;
case UFFD_EVENT_PAGEFAULT:
uffd_handle_page_fault(&msg, stats);
@@ -691,10 +716,8 @@ static void *uffd_poll_thread(void *arg)
uffd_reg.range.start = msg.arg.remove.start;
uffd_reg.range.len = msg.arg.remove.end -
msg.arg.remove.start;
- if (ioctl(uffd, UFFDIO_UNREGISTER, &uffd_reg.range)) {
- fprintf(stderr, "remove failure\n");
- exit(1);
- }
+ if (ioctl(uffd, UFFDIO_UNREGISTER, &uffd_reg.range))
+ err("remove failure");
break;
case UFFD_EVENT_REMAP:
area_dst = (char *)(unsigned long)msg.arg.remap.to;
@@ -797,9 +820,7 @@ static int stress(struct uffd_stats *uffd_stats)
* UFFDIO_COPY without writing zero pages into area_dst
* because the background threads already completed).
*/
- if (uffd_test_ops->release_pages(area_src))
- return 1;
-
+ uffd_test_ops->release_pages(area_src);
finished = 1;
for (cpu = 0; cpu < nr_cpus; cpu++)
@@ -809,10 +830,8 @@ static int stress(struct uffd_stats *uffd_stats)
for (cpu = 0; cpu < nr_cpus; cpu++) {
char c;
if (bounces & BOUNCE_POLL) {
- if (write(pipefd[cpu*2+1], &c, 1) != 1) {
- fprintf(stderr, "pipefd write error\n");
- return 1;
- }
+ if (write(pipefd[cpu*2+1], &c, 1) != 1)
+ err("pipefd write error");
if (pthread_join(uffd_threads[cpu],
(void *)&uffd_stats[cpu]))
return 1;
@@ -827,40 +846,6 @@ static int stress(struct uffd_stats *uffd_stats)
return 0;
}
-static int userfaultfd_open_ext(uint64_t *features)
-{
- struct uffdio_api uffdio_api;
-
- uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
- if (uffd < 0) {
- fprintf(stderr,
- "userfaultfd syscall not available in this kernel\n");
- return 1;
- }
- uffd_flags = fcntl(uffd, F_GETFD, NULL);
-
- uffdio_api.api = UFFD_API;
- uffdio_api.features = *features;
- if (ioctl(uffd, UFFDIO_API, &uffdio_api)) {
- fprintf(stderr, "UFFDIO_API failed.\nPlease make sure to "
- "run with either root or ptrace capability.\n");
- return 1;
- }
- if (uffdio_api.api != UFFD_API) {
- fprintf(stderr, "UFFDIO_API error: %" PRIu64 "\n",
- (uint64_t)uffdio_api.api);
- return 1;
- }
-
- *features = uffdio_api.features;
- return 0;
-}
-
-static int userfaultfd_open(uint64_t features)
-{
- return userfaultfd_open_ext(&features);
-}
-
sigjmp_buf jbuf, *sigbuf;
static void sighndl(int sig, siginfo_t *siginfo, void *ptr)
@@ -912,10 +897,8 @@ static int faulting_process(int signal_test)
memset(&act, 0, sizeof(act));
act.sa_sigaction = sighndl;
act.sa_flags = SA_SIGINFO;
- if (sigaction(SIGBUS, &act, 0)) {
- perror("sigaction");
- return 1;
- }
+ if (sigaction(SIGBUS, &act, 0))
+ err("sigaction");
lastnr = (unsigned long)-1;
}
@@ -925,10 +908,8 @@ static int faulting_process(int signal_test)
if (signal_test) {
if (sigsetjmp(*sigbuf, 1) != 0) {
- if (steps == 1 && nr == lastnr) {
- fprintf(stderr, "Signal repeated\n");
- return 1;
- }
+ if (steps == 1 && nr == lastnr)
+ err("Signal repeated");
lastnr = nr;
if (signal_test == 1) {
@@ -953,12 +934,9 @@ static int faulting_process(int signal_test)
}
count = *area_count(area_dst, nr);
- if (count != count_verify[nr]) {
- fprintf(stderr,
- "nr %lu memory corruption %Lu %Lu\n",
- nr, count,
- count_verify[nr]);
- }
+ if (count != count_verify[nr])
+ err("nr %lu memory corruption %llu %llu\n",
+ nr, count, count_verify[nr]);
/*
* Trigger write protection if there is by writing
* the same value back.
@@ -974,18 +952,16 @@ static int faulting_process(int signal_test)
area_dst = mremap(area_dst, nr_pages * page_size, nr_pages * page_size,
MREMAP_MAYMOVE | MREMAP_FIXED, area_src);
- if (area_dst == MAP_FAILED) {
- perror("mremap");
- exit(1);
- }
+ if (area_dst == MAP_FAILED)
+ err("mremap");
+ /* Reset area_src since we just clobbered it */
+ area_src = NULL;
for (; nr < nr_pages; nr++) {
count = *area_count(area_dst, nr);
if (count != count_verify[nr]) {
- fprintf(stderr,
- "nr %lu memory corruption %Lu %Lu\n",
- nr, count,
- count_verify[nr]); exit(1);
+ err("nr %lu memory corruption %llu %llu\n",
+ nr, count, count_verify[nr]);
}
/*
* Trigger write protection if there is by writing
@@ -994,15 +970,11 @@ static int faulting_process(int signal_test)
*area_count(area_dst, nr) = count;
}
- if (uffd_test_ops->release_pages(area_dst))
- return 1;
+ uffd_test_ops->release_pages(area_dst);
- for (nr = 0; nr < nr_pages; nr++) {
- if (my_bcmp(area_dst + nr * page_size, zeropage, page_size)) {
- fprintf(stderr, "nr %lu is not zero\n", nr);
- exit(1);
- }
- }
+ for (nr = 0; nr < nr_pages; nr++)
+ if (my_bcmp(area_dst + nr * page_size, zeropage, page_size))
+ err("nr %lu is not zero", nr);
return 0;
}
@@ -1015,13 +987,12 @@ static void retry_uffdio_zeropage(int ufd,
uffdio_zeropage->range.len,
offset);
if (ioctl(ufd, UFFDIO_ZEROPAGE, uffdio_zeropage)) {
- if (uffdio_zeropage->zeropage != -EEXIST) {
- uffd_error(uffdio_zeropage->zeropage,
- "UFFDIO_ZEROPAGE retry error");
- }
+ if (uffdio_zeropage->zeropage != -EEXIST)
+ err("UFFDIO_ZEROPAGE error: %"PRId64,
+ (int64_t)uffdio_zeropage->zeropage);
} else {
- uffd_error(uffdio_zeropage->zeropage,
- "UFFDIO_ZEROPAGE retry unexpected");
+ err("UFFDIO_ZEROPAGE error: %"PRId64,
+ (int64_t)uffdio_zeropage->zeropage);
}
}
@@ -1034,10 +1005,8 @@ static int __uffdio_zeropage(int ufd, unsigned long offset, bool retry)
has_zeropage = uffd_test_ops->expected_ioctls & (1 << _UFFDIO_ZEROPAGE);
- if (offset >= nr_pages * page_size) {
- fprintf(stderr, "unexpected offset %lu\n", offset);
- exit(1);
- }
+ if (offset >= nr_pages * page_size)
+ err("unexpected offset %lu", offset);
uffdio_zeropage.range.start = (unsigned long) area_dst + offset;
uffdio_zeropage.range.len = page_size;
uffdio_zeropage.mode = 0;
@@ -1045,14 +1014,13 @@ static int __uffdio_zeropage(int ufd, unsigned long offset, bool retry)
res = uffdio_zeropage.zeropage;
if (ret) {
/* real retval in ufdio_zeropage.zeropage */
- if (has_zeropage) {
- uffd_error(res, "UFFDIO_ZEROPAGE %s",
- res == -EEXIST ? "-EEXIST" : "error");
- } else if (res != -EINVAL)
- uffd_error(res, "UFFDIO_ZEROPAGE not -EINVAL");
+ if (has_zeropage)
+ err("UFFDIO_ZEROPAGE error: %"PRId64, (int64_t)res);
+ else if (res != -EINVAL)
+ err("UFFDIO_ZEROPAGE not -EINVAL");
} else if (has_zeropage) {
if (res != page_size) {
- uffd_error(res, "UFFDIO_ZEROPAGE unexpected");
+ err("UFFDIO_ZEROPAGE unexpected size");
} else {
if (test_uffdio_zeropage_eexist && retry) {
test_uffdio_zeropage_eexist = false;
@@ -1062,7 +1030,7 @@ static int __uffdio_zeropage(int ufd, unsigned long offset, bool retry)
return 1;
}
} else
- uffd_error(res, "UFFDIO_ZEROPAGE succeeded");
+ err("UFFDIO_ZEROPAGE succeeded");
return 0;
}
@@ -1081,37 +1049,24 @@ static int userfaultfd_zeropage_test(void)
printf("testing UFFDIO_ZEROPAGE: ");
fflush(stdout);
- if (uffd_test_ops->release_pages(area_dst))
- return 1;
+ uffd_test_ctx_init(0);
- if (userfaultfd_open(0))
- return 1;
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
if (test_uffdio_wp)
uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
- if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) {
- fprintf(stderr, "register failure\n");
- exit(1);
- }
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
+ err("register failure");
expected_ioctls = uffd_test_ops->expected_ioctls;
- if ((uffdio_register.ioctls & expected_ioctls) !=
- expected_ioctls) {
- fprintf(stderr,
- "unexpected missing ioctl for anon memory\n");
- exit(1);
- }
+ if ((uffdio_register.ioctls & expected_ioctls) != expected_ioctls)
+ err("unexpected missing ioctl for anon memory");
- if (uffdio_zeropage(uffd, 0)) {
- if (my_bcmp(area_dst, zeropage, page_size)) {
- fprintf(stderr, "zeropage is not zero\n");
- exit(1);
- }
- }
+ if (uffdio_zeropage(uffd, 0))
+ if (my_bcmp(area_dst, zeropage, page_size))
+ err("zeropage is not zero");
- close(uffd);
printf("done.\n");
return 0;
}
@@ -1129,13 +1084,10 @@ static int userfaultfd_events_test(void)
printf("testing events (fork, remap, remove): ");
fflush(stdout);
- if (uffd_test_ops->release_pages(area_dst))
- return 1;
-
features = UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_EVENT_REMAP |
UFFD_FEATURE_EVENT_REMOVE;
- if (userfaultfd_open(features))
- return 1;
+ uffd_test_ctx_init(features);
+
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
uffdio_register.range.start = (unsigned long) area_dst;
@@ -1143,46 +1095,31 @@ static int userfaultfd_events_test(void)
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
if (test_uffdio_wp)
uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
- if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) {
- fprintf(stderr, "register failure\n");
- exit(1);
- }
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
+ err("register failure");
expected_ioctls = uffd_test_ops->expected_ioctls;
- if ((uffdio_register.ioctls & expected_ioctls) != expected_ioctls) {
- fprintf(stderr, "unexpected missing ioctl for anon memory\n");
- exit(1);
- }
+ if ((uffdio_register.ioctls & expected_ioctls) != expected_ioctls)
+ err("unexpected missing ioctl for anon memory");
- if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, &stats)) {
- perror("uffd_poll_thread create");
- exit(1);
- }
+ if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, &stats))
+ err("uffd_poll_thread create");
pid = fork();
- if (pid < 0) {
- perror("fork");
- exit(1);
- }
+ if (pid < 0)
+ err("fork");
if (!pid)
exit(faulting_process(0));
waitpid(pid, &err, 0);
- if (err) {
- fprintf(stderr, "faulting process failed\n");
- exit(1);
- }
-
- if (write(pipefd[1], &c, sizeof(c)) != sizeof(c)) {
- perror("pipe write");
- exit(1);
- }
+ if (err)
+ err("faulting process failed");
+ if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
+ err("pipe write");
if (pthread_join(uffd_mon, NULL))
return 1;
- close(uffd);
-
uffd_stats_report(&stats, 1);
return stats.missing_faults != nr_pages;
@@ -1202,12 +1139,9 @@ static int userfaultfd_sig_test(void)
printf("testing signal delivery: ");
fflush(stdout);
- if (uffd_test_ops->release_pages(area_dst))
- return 1;
-
features = UFFD_FEATURE_EVENT_FORK|UFFD_FEATURE_SIGBUS;
- if (userfaultfd_open(features))
- return 1;
+ uffd_test_ctx_init(features);
+
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
uffdio_register.range.start = (unsigned long) area_dst;
@@ -1215,57 +1149,40 @@ static int userfaultfd_sig_test(void)
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
if (test_uffdio_wp)
uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
- if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) {
- fprintf(stderr, "register failure\n");
- exit(1);
- }
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
+ err("register failure");
expected_ioctls = uffd_test_ops->expected_ioctls;
- if ((uffdio_register.ioctls & expected_ioctls) != expected_ioctls) {
- fprintf(stderr, "unexpected missing ioctl for anon memory\n");
- exit(1);
- }
+ if ((uffdio_register.ioctls & expected_ioctls) != expected_ioctls)
+ err("unexpected missing ioctl for anon memory");
- if (faulting_process(1)) {
- fprintf(stderr, "faulting process failed\n");
- exit(1);
- }
+ if (faulting_process(1))
+ err("faulting process failed");
- if (uffd_test_ops->release_pages(area_dst))
- return 1;
+ uffd_test_ops->release_pages(area_dst);
- if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, &stats)) {
- perror("uffd_poll_thread create");
- exit(1);
- }
+ if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, &stats))
+ err("uffd_poll_thread create");
pid = fork();
- if (pid < 0) {
- perror("fork");
- exit(1);
- }
+ if (pid < 0)
+ err("fork");
if (!pid)
exit(faulting_process(2));
waitpid(pid, &err, 0);
- if (err) {
- fprintf(stderr, "faulting process failed\n");
- exit(1);
- }
-
- if (write(pipefd[1], &c, sizeof(c)) != sizeof(c)) {
- perror("pipe write");
- exit(1);
- }
+ if (err)
+ err("faulting process failed");
+ if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
+ err("pipe write");
if (pthread_join(uffd_mon, (void **)&userfaults))
return 1;
printf("done.\n");
if (userfaults)
- fprintf(stderr, "Signal test failed, userfaults: %ld\n",
- userfaults);
- close(uffd);
+ err("Signal test failed, userfaults: %ld", userfaults);
+
return userfaults != 0;
}
@@ -1279,7 +1196,7 @@ static int userfaultfd_minor_test(void)
void *expected_page;
char c;
struct uffd_stats stats = { 0 };
- uint64_t features = UFFD_FEATURE_MINOR_HUGETLBFS;
+ uint64_t req_features, features_out;
if (!test_uffdio_minor)
return 0;
@@ -1287,13 +1204,17 @@ static int userfaultfd_minor_test(void)
printf("testing minor faults: ");
fflush(stdout);
- if (uffd_test_ops->release_pages(area_dst))
+ if (test_type == TEST_HUGETLB)
+ req_features = UFFD_FEATURE_MINOR_HUGETLBFS;
+ else if (test_type == TEST_SHMEM)
+ req_features = UFFD_FEATURE_MINOR_SHMEM;
+ else
return 1;
- if (userfaultfd_open_ext(&features))
- return 1;
- /* If kernel reports the feature isn't supported, skip the test. */
- if (!(features & UFFD_FEATURE_MINOR_HUGETLBFS)) {
+ features_out = req_features;
+ uffd_test_ctx_init_ext(&features_out);
+ /* If kernel reports required features aren't supported, skip test. */
+ if ((features_out & req_features) != req_features) {
printf("skipping test due to lack of feature support\n");
fflush(stdout);
return 0;
@@ -1302,17 +1223,13 @@ static int userfaultfd_minor_test(void)
uffdio_register.range.start = (unsigned long)area_dst_alias;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MINOR;
- if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) {
- fprintf(stderr, "register failure\n");
- exit(1);
- }
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
+ err("register failure");
expected_ioctls = uffd_test_ops->expected_ioctls;
expected_ioctls |= 1 << _UFFDIO_CONTINUE;
- if ((uffdio_register.ioctls & expected_ioctls) != expected_ioctls) {
- fprintf(stderr, "unexpected missing ioctl(s)\n");
- exit(1);
- }
+ if ((uffdio_register.ioctls & expected_ioctls) != expected_ioctls)
+ err("unexpected missing ioctl(s)");
/*
* After registering with UFFD, populate the non-UFFD-registered side of
@@ -1323,10 +1240,8 @@ static int userfaultfd_minor_test(void)
page_size);
}
- if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, &stats)) {
- perror("uffd_poll_thread create");
- exit(1);
- }
+ if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, &stats))
+ err("uffd_poll_thread create");
/*
* Read each of the pages back using the UFFD-registered mapping. We
@@ -1335,92 +1250,173 @@ static int userfaultfd_minor_test(void)
* page's contents, and then issuing a CONTINUE ioctl.
*/
- if (posix_memalign(&expected_page, page_size, page_size)) {
- fprintf(stderr, "out of memory\n");
- return 1;
- }
+ if (posix_memalign(&expected_page, page_size, page_size))
+ err("out of memory");
for (p = 0; p < nr_pages; ++p) {
expected_byte = ~((uint8_t)(p % ((uint8_t)-1)));
memset(expected_page, expected_byte, page_size);
if (my_bcmp(expected_page, area_dst_alias + (p * page_size),
- page_size)) {
- fprintf(stderr,
- "unexpected page contents after minor fault\n");
- exit(1);
- }
+ page_size))
+ err("unexpected page contents after minor fault");
}
- if (write(pipefd[1], &c, sizeof(c)) != sizeof(c)) {
- perror("pipe write");
- exit(1);
- }
+ if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
+ err("pipe write");
if (pthread_join(uffd_mon, NULL))
return 1;
- close(uffd);
-
uffd_stats_report(&stats, 1);
return stats.missing_faults != 0 || stats.minor_faults != nr_pages;
}
-static int userfaultfd_stress(void)
+#define BIT_ULL(nr) (1ULL << (nr))
+#define PM_SOFT_DIRTY BIT_ULL(55)
+#define PM_MMAP_EXCLUSIVE BIT_ULL(56)
+#define PM_UFFD_WP BIT_ULL(57)
+#define PM_FILE BIT_ULL(61)
+#define PM_SWAP BIT_ULL(62)
+#define PM_PRESENT BIT_ULL(63)
+
+static int pagemap_open(void)
{
- void *area;
- char *tmp_area;
- unsigned long nr;
- struct uffdio_register uffdio_register;
- unsigned long cpu;
- int err;
- struct uffd_stats uffd_stats[nr_cpus];
+ int fd = open("/proc/self/pagemap", O_RDONLY);
- uffd_test_ops->allocate_area((void **)&area_src);
- if (!area_src)
- return 1;
- uffd_test_ops->allocate_area((void **)&area_dst);
- if (!area_dst)
- return 1;
+ if (fd < 0)
+ err("open pagemap");
- if (userfaultfd_open(0))
- return 1;
+ return fd;
+}
- count_verify = malloc(nr_pages * sizeof(unsigned long long));
- if (!count_verify) {
- perror("count_verify");
- return 1;
- }
+static uint64_t pagemap_read_vaddr(int fd, void *vaddr)
+{
+ uint64_t value;
+ int ret;
- for (nr = 0; nr < nr_pages; nr++) {
- *area_mutex(area_src, nr) = (pthread_mutex_t)
- PTHREAD_MUTEX_INITIALIZER;
- count_verify[nr] = *area_count(area_src, nr) = 1;
+ ret = pread(fd, &value, sizeof(uint64_t),
+ ((uint64_t)vaddr >> 12) * sizeof(uint64_t));
+ if (ret != sizeof(uint64_t))
+ err("pread() on pagemap failed");
+
+ return value;
+}
+
+/* This macro let __LINE__ works in err() */
+#define pagemap_check_wp(value, wp) do { \
+ if (!!(value & PM_UFFD_WP) != wp) \
+ err("pagemap uffd-wp bit error: 0x%"PRIx64, value); \
+ } while (0)
+
+static int pagemap_test_fork(bool present)
+{
+ pid_t child = fork();
+ uint64_t value;
+ int fd, result;
+
+ if (!child) {
+ /* Open the pagemap fd of the child itself */
+ fd = pagemap_open();
+ value = pagemap_read_vaddr(fd, area_dst);
/*
- * In the transition between 255 to 256, powerpc will
- * read out of order in my_bcmp and see both bytes as
- * zero, so leave a placeholder below always non-zero
- * after the count, to avoid my_bcmp to trigger false
- * positives.
+ * After fork() uffd-wp bit should be gone as long as we're
+ * without UFFD_FEATURE_EVENT_FORK
*/
- *(area_count(area_src, nr) + 1) = 1;
+ pagemap_check_wp(value, false);
+ /* Succeed */
+ exit(0);
}
+ waitpid(child, &result, 0);
+ return result;
+}
- pipefd = malloc(sizeof(int) * nr_cpus * 2);
- if (!pipefd) {
- perror("pipefd");
- return 1;
- }
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- if (pipe2(&pipefd[cpu*2], O_CLOEXEC | O_NONBLOCK)) {
- perror("pipe");
- return 1;
- }
- }
+static void userfaultfd_pagemap_test(unsigned int test_pgsize)
+{
+ struct uffdio_register uffdio_register;
+ int pagemap_fd;
+ uint64_t value;
- if (posix_memalign(&area, page_size, page_size)) {
- fprintf(stderr, "out of memory\n");
- return 1;
+ /* Pagemap tests uffd-wp only */
+ if (!test_uffdio_wp)
+ return;
+
+ /* Not enough memory to test this page size */
+ if (test_pgsize > nr_pages * page_size)
+ return;
+
+ printf("testing uffd-wp with pagemap (pgsize=%u): ", test_pgsize);
+ /* Flush so it doesn't flush twice in parent/child later */
+ fflush(stdout);
+
+ uffd_test_ctx_init(0);
+
+ if (test_pgsize > page_size) {
+ /* This is a thp test */
+ if (madvise(area_dst, nr_pages * page_size, MADV_HUGEPAGE))
+ err("madvise(MADV_HUGEPAGE) failed");
+ } else if (test_pgsize == page_size) {
+ /* This is normal page test; force no thp */
+ if (madvise(area_dst, nr_pages * page_size, MADV_NOHUGEPAGE))
+ err("madvise(MADV_NOHUGEPAGE) failed");
}
+
+ uffdio_register.range.start = (unsigned long) area_dst;
+ uffdio_register.range.len = nr_pages * page_size;
+ uffdio_register.mode = UFFDIO_REGISTER_MODE_WP;
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
+ err("register failed");
+
+ pagemap_fd = pagemap_open();
+
+ /* Touch the page */
+ *area_dst = 1;
+ wp_range(uffd, (uint64_t)area_dst, test_pgsize, true);
+ value = pagemap_read_vaddr(pagemap_fd, area_dst);
+ pagemap_check_wp(value, true);
+ /* Make sure uffd-wp bit dropped when fork */
+ if (pagemap_test_fork(true))
+ err("Detected stall uffd-wp bit in child");
+
+ /* Exclusive required or PAGEOUT won't work */
+ if (!(value & PM_MMAP_EXCLUSIVE))
+ err("multiple mapping detected: 0x%"PRIx64, value);
+
+ if (madvise(area_dst, test_pgsize, MADV_PAGEOUT))
+ err("madvise(MADV_PAGEOUT) failed");
+
+ /* Uffd-wp should persist even swapped out */
+ value = pagemap_read_vaddr(pagemap_fd, area_dst);
+ pagemap_check_wp(value, true);
+ /* Make sure uffd-wp bit dropped when fork */
+ if (pagemap_test_fork(false))
+ err("Detected stall uffd-wp bit in child");
+
+ /* Unprotect; this tests swap pte modifications */
+ wp_range(uffd, (uint64_t)area_dst, page_size, false);
+ value = pagemap_read_vaddr(pagemap_fd, area_dst);
+ pagemap_check_wp(value, false);
+
+ /* Fault in the page from disk */
+ *area_dst = 2;
+ value = pagemap_read_vaddr(pagemap_fd, area_dst);
+ pagemap_check_wp(value, false);
+
+ close(pagemap_fd);
+ printf("done\n");
+}
+
+static int userfaultfd_stress(void)
+{
+ void *area;
+ char *tmp_area;
+ unsigned long nr;
+ struct uffdio_register uffdio_register;
+ struct uffd_stats uffd_stats[nr_cpus];
+
+ uffd_test_ctx_init(0);
+
+ if (posix_memalign(&area, page_size, page_size))
+ err("out of memory");
zeropage = area;
bzero(zeropage, page_size);
@@ -1429,7 +1425,6 @@ static int userfaultfd_stress(void)
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, 16*1024*1024);
- err = 0;
while (bounces--) {
unsigned long expected_ioctls;
@@ -1458,25 +1453,18 @@ static int userfaultfd_stress(void)
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
if (test_uffdio_wp)
uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
- if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) {
- fprintf(stderr, "register failure\n");
- return 1;
- }
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
+ err("register failure");
expected_ioctls = uffd_test_ops->expected_ioctls;
if ((uffdio_register.ioctls & expected_ioctls) !=
- expected_ioctls) {
- fprintf(stderr,
- "unexpected missing ioctl for anon memory\n");
- return 1;
- }
+ expected_ioctls)
+ err("unexpected missing ioctl for anon memory");
if (area_dst_alias) {
uffdio_register.range.start = (unsigned long)
area_dst_alias;
- if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) {
- fprintf(stderr, "register failure alias\n");
- return 1;
- }
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
+ err("register failure alias");
}
/*
@@ -1503,8 +1491,7 @@ static int userfaultfd_stress(void)
* MADV_DONTNEED only after the UFFDIO_REGISTER, so it's
* required to MADV_DONTNEED here.
*/
- if (uffd_test_ops->release_pages(area_dst))
- return 1;
+ uffd_test_ops->release_pages(area_dst);
uffd_stats_reset(uffd_stats, nr_cpus);
@@ -1518,33 +1505,22 @@ static int userfaultfd_stress(void)
nr_pages * page_size, false);
/* unregister */
- if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range)) {
- fprintf(stderr, "unregister failure\n");
- return 1;
- }
+ if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range))
+ err("unregister failure");
if (area_dst_alias) {
uffdio_register.range.start = (unsigned long) area_dst;
if (ioctl(uffd, UFFDIO_UNREGISTER,
- &uffdio_register.range)) {
- fprintf(stderr, "unregister failure alias\n");
- return 1;
- }
+ &uffdio_register.range))
+ err("unregister failure alias");
}
/* verification */
- if (bounces & BOUNCE_VERIFY) {
- for (nr = 0; nr < nr_pages; nr++) {
- if (*area_count(area_dst, nr) != count_verify[nr]) {
- fprintf(stderr,
- "error area_count %Lu %Lu %lu\n",
- *area_count(area_src, nr),
- count_verify[nr],
- nr);
- err = 1;
- bounces = 0;
- }
- }
- }
+ if (bounces & BOUNCE_VERIFY)
+ for (nr = 0; nr < nr_pages; nr++)
+ if (*area_count(area_dst, nr) != count_verify[nr])
+ err("error area_count %llu %llu %lu\n",
+ *area_count(area_src, nr),
+ count_verify[nr], nr);
/* prepare next bounce */
tmp_area = area_src;
@@ -1558,10 +1534,21 @@ static int userfaultfd_stress(void)
uffd_stats_report(uffd_stats, nr_cpus);
}
- if (err)
- return err;
+ if (test_type == TEST_ANON) {
+ /*
+ * shmem/hugetlb won't be able to run since they have different
+ * behavior on fork() (file-backed memory normally drops ptes
+ * directly when fork), meanwhile the pagemap test will verify
+ * pgtable entry of fork()ed child.
+ */
+ userfaultfd_pagemap_test(page_size);
+ /*
+ * Hard-code for x86_64 for now for 2M THP, as x86_64 is
+ * currently the only one that supports uffd-wp
+ */
+ userfaultfd_pagemap_test(page_size * 512);
+ }
- close(uffd);
return userfaultfd_zeropage_test() || userfaultfd_sig_test()
|| userfaultfd_events_test() || userfaultfd_minor_test();
}
@@ -1610,8 +1597,9 @@ static void set_test_type(const char *type)
map_shared = true;
test_type = TEST_SHMEM;
uffd_test_ops = &shmem_uffd_test_ops;
+ test_uffdio_minor = true;
} else {
- fprintf(stderr, "Unknown test type: %s\n", type); exit(1);
+ err("Unknown test type: %s", type);
}
if (test_type == TEST_HUGETLB)
@@ -1619,15 +1607,11 @@ static void set_test_type(const char *type)
else
page_size = sysconf(_SC_PAGE_SIZE);
- if (!page_size) {
- fprintf(stderr, "Unable to determine page size\n");
- exit(2);
- }
+ if (!page_size)
+ err("Unable to determine page size");
if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) * 2
- > page_size) {
- fprintf(stderr, "Impossible to run this test\n");
- exit(2);
- }
+ > page_size)
+ err("Impossible to run this test");
}
static void sigalrm(int sig)
@@ -1644,10 +1628,8 @@ int main(int argc, char **argv)
if (argc < 4)
usage();
- if (signal(SIGALRM, sigalrm) == SIG_ERR) {
- fprintf(stderr, "failed to arm SIGALRM");
- exit(1);
- }
+ if (signal(SIGALRM, sigalrm) == SIG_ERR)
+ err("failed to arm SIGALRM");
alarm(ALARM_INTERVAL_SECS);
set_test_type(argv[1]);
@@ -1656,13 +1638,13 @@ int main(int argc, char **argv)
nr_pages_per_cpu = atol(argv[2]) * 1024*1024 / page_size /
nr_cpus;
if (!nr_pages_per_cpu) {
- fprintf(stderr, "invalid MiB\n");
+ _err("invalid MiB");
usage();
}
bounces = atoi(argv[3]);
if (bounces <= 0) {
- fprintf(stderr, "invalid bounces\n");
+ _err("invalid bounces");
usage();
}
nr_pages = nr_pages_per_cpu * nr_cpus;
@@ -1671,16 +1653,20 @@ int main(int argc, char **argv)
if (argc < 5)
usage();
huge_fd = open(argv[4], O_CREAT | O_RDWR, 0755);
- if (huge_fd < 0) {
- fprintf(stderr, "Open of %s failed", argv[3]);
- perror("open");
- exit(1);
- }
- if (ftruncate(huge_fd, 0)) {
- fprintf(stderr, "ftruncate %s to size 0 failed", argv[3]);
- perror("ftruncate");
- exit(1);
- }
+ if (huge_fd < 0)
+ err("Open of %s failed", argv[4]);
+ if (ftruncate(huge_fd, 0))
+ err("ftruncate %s to size 0 failed", argv[4]);
+ } else if (test_type == TEST_SHMEM) {
+ shm_fd = memfd_create(argv[0], 0);
+ if (shm_fd < 0)
+ err("memfd_create");
+ if (ftruncate(shm_fd, nr_pages * page_size * 2))
+ err("ftruncate");
+ if (fallocate(shm_fd,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0,
+ nr_pages * page_size * 2))
+ err("fallocate");
}
printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n",
nr_pages, nr_pages_per_cpu);
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 333980375bc7..b4142cd1c5c2 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -13,11 +13,12 @@ CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh $(CC) trivial_program.c -no-pie)
TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
check_initial_reg_state sigreturn iopl ioperm \
test_vsyscall mov_ss_trap \
- syscall_arg_fault fsgsbase_restore
+ syscall_arg_fault fsgsbase_restore sigaltstack
TARGETS_C_32BIT_ONLY := entry_from_vm86 test_syscall_vdso unwind_vdso \
test_FCMOV test_FCOMI test_FISTTP \
vdso_restorer
-TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip syscall_numbering
+TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip syscall_numbering \
+ corrupt_xstate_header
# Some selftests require 32bit support enabled also on 64bit systems
TARGETS_C_32BIT_NEEDED := ldt_gdt ptrace_syscall
diff --git a/tools/testing/selftests/x86/corrupt_xstate_header.c b/tools/testing/selftests/x86/corrupt_xstate_header.c
new file mode 100644
index 000000000000..ab8599c10ce5
--- /dev/null
+++ b/tools/testing/selftests/x86/corrupt_xstate_header.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Corrupt the XSTATE header in a signal frame
+ *
+ * Based on analysis and a test case from Thomas Gleixner.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sched.h>
+#include <signal.h>
+#include <err.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <sys/wait.h>
+
+static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ asm volatile(
+ "cpuid;"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (*eax), "2" (*ecx));
+}
+
+static inline int xsave_enabled(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ eax = 0x1;
+ ecx = 0x0;
+ __cpuid(&eax, &ebx, &ecx, &edx);
+
+ /* Is CR4.OSXSAVE enabled ? */
+ return ecx & (1U << 27);
+}
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+ int flags)
+{
+ struct sigaction sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handler;
+ sa.sa_flags = SA_SIGINFO | flags;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(sig, &sa, 0))
+ err(1, "sigaction");
+}
+
+static void sigusr1(int sig, siginfo_t *info, void *uc_void)
+{
+ ucontext_t *uc = uc_void;
+ uint8_t *fpstate = (uint8_t *)uc->uc_mcontext.fpregs;
+ uint64_t *xfeatures = (uint64_t *)(fpstate + 512);
+
+ printf("\tWreck XSTATE header\n");
+ /* Wreck the first reserved bytes in the header */
+ *(xfeatures + 2) = 0xfffffff;
+}
+
+static void sigsegv(int sig, siginfo_t *info, void *uc_void)
+{
+ printf("\tGot SIGSEGV\n");
+}
+
+int main(void)
+{
+ cpu_set_t set;
+
+ sethandler(SIGUSR1, sigusr1, 0);
+ sethandler(SIGSEGV, sigsegv, 0);
+
+ if (!xsave_enabled()) {
+ printf("[SKIP] CR4.OSXSAVE disabled.\n");
+ return 0;
+ }
+
+ CPU_ZERO(&set);
+ CPU_SET(0, &set);
+
+ /*
+ * Enforce that the child runs on the same CPU
+ * which in turn forces a schedule.
+ */
+ sched_setaffinity(getpid(), sizeof(set), &set);
+
+ printf("[RUN]\tSend ourselves a signal\n");
+ raise(SIGUSR1);
+
+ printf("[OK]\tBack from the signal. Now schedule.\n");
+ pid_t child = fork();
+ if (child < 0)
+ err(1, "fork");
+ if (child == 0)
+ return 0;
+ if (child)
+ waitpid(child, NULL, 0);
+ printf("[OK]\tBack in the main thread.\n");
+
+ /*
+ * We could try to confirm that extended state is still preserved
+ * when we schedule. For now, the only indication of failure is
+ * a warning in the kernel logs.
+ */
+
+ return 0;
+}
diff --git a/tools/testing/selftests/x86/sigaltstack.c b/tools/testing/selftests/x86/sigaltstack.c
new file mode 100644
index 000000000000..f689af75e979
--- /dev/null
+++ b/tools/testing/selftests/x86/sigaltstack.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define _GNU_SOURCE
+#include <signal.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <err.h>
+#include <errno.h>
+#include <limits.h>
+#include <sys/mman.h>
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <setjmp.h>
+
+/* sigaltstack()-enforced minimum stack */
+#define ENFORCED_MINSIGSTKSZ 2048
+
+#ifndef AT_MINSIGSTKSZ
+# define AT_MINSIGSTKSZ 51
+#endif
+
+static int nerrs;
+
+static bool sigalrm_expected;
+
+static unsigned long at_minstack_size;
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+ int flags)
+{
+ struct sigaction sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handler;
+ sa.sa_flags = SA_SIGINFO | flags;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(sig, &sa, 0))
+ err(1, "sigaction");
+}
+
+static void clearhandler(int sig)
+{
+ struct sigaction sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_DFL;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(sig, &sa, 0))
+ err(1, "sigaction");
+}
+
+static int setup_altstack(void *start, unsigned long size)
+{
+ stack_t ss;
+
+ memset(&ss, 0, sizeof(ss));
+ ss.ss_size = size;
+ ss.ss_sp = start;
+
+ return sigaltstack(&ss, NULL);
+}
+
+static jmp_buf jmpbuf;
+
+static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
+{
+ if (sigalrm_expected) {
+ printf("[FAIL]\tWrong signal delivered: SIGSEGV (expected SIGALRM).");
+ nerrs++;
+ } else {
+ printf("[OK]\tSIGSEGV signal delivered.\n");
+ }
+
+ siglongjmp(jmpbuf, 1);
+}
+
+static void sigalrm(int sig, siginfo_t *info, void *ctx_void)
+{
+ if (!sigalrm_expected) {
+ printf("[FAIL]\tWrong signal delivered: SIGALRM (expected SIGSEGV).");
+ nerrs++;
+ } else {
+ printf("[OK]\tSIGALRM signal delivered.\n");
+ }
+}
+
+static void test_sigaltstack(void *altstack, unsigned long size)
+{
+ if (setup_altstack(altstack, size))
+ err(1, "sigaltstack()");
+
+ sigalrm_expected = (size > at_minstack_size) ? true : false;
+
+ sethandler(SIGSEGV, sigsegv, 0);
+ sethandler(SIGALRM, sigalrm, SA_ONSTACK);
+
+ if (!sigsetjmp(jmpbuf, 1)) {
+ printf("[RUN]\tTest an alternate signal stack of %ssufficient size.\n",
+ sigalrm_expected ? "" : "in");
+ printf("\tRaise SIGALRM. %s is expected to be delivered.\n",
+ sigalrm_expected ? "It" : "SIGSEGV");
+ raise(SIGALRM);
+ }
+
+ clearhandler(SIGALRM);
+ clearhandler(SIGSEGV);
+}
+
+int main(void)
+{
+ void *altstack;
+
+ at_minstack_size = getauxval(AT_MINSIGSTKSZ);
+
+ altstack = mmap(NULL, at_minstack_size + SIGSTKSZ, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+ if (altstack == MAP_FAILED)
+ err(1, "mmap()");
+
+ if ((ENFORCED_MINSIGSTKSZ + 1) < at_minstack_size)
+ test_sigaltstack(altstack, ENFORCED_MINSIGSTKSZ + 1);
+
+ test_sigaltstack(altstack, at_minstack_size + SIGSTKSZ);
+
+ return nerrs == 0 ? 0 : 1;
+}
diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile
index b587b9a7a124..0d7bbe49359d 100644
--- a/tools/virtio/Makefile
+++ b/tools/virtio/Makefile
@@ -4,7 +4,8 @@ test: virtio_test vringh_test
virtio_test: virtio_ring.o virtio_test.o
vringh_test: vringh_test.o vringh.o virtio_ring.o
-CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
+CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
+LDFLAGS += -lpthread
vpath %.c ../../drivers/virtio ../../drivers/vhost
mod:
${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
diff --git a/tools/virtio/linux/spinlock.h b/tools/virtio/linux/spinlock.h
new file mode 100644
index 000000000000..028e3cdcc5d3
--- /dev/null
+++ b/tools/virtio/linux/spinlock.h
@@ -0,0 +1,56 @@
+#ifndef SPINLOCK_H_STUB
+#define SPINLOCK_H_STUB
+
+#include <pthread.h>
+
+typedef pthread_spinlock_t spinlock_t;
+
+static inline void spin_lock_init(spinlock_t *lock)
+{
+ int r = pthread_spin_init(lock, 0);
+ assert(!r);
+}
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ int ret = pthread_spin_lock(lock);
+ assert(!ret);
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ int ret = pthread_spin_unlock(lock);
+ assert(!ret);
+}
+
+static inline void spin_lock_bh(spinlock_t *lock)
+{
+ spin_lock(lock);
+}
+
+static inline void spin_unlock_bh(spinlock_t *lock)
+{
+ spin_unlock(lock);
+}
+
+static inline void spin_lock_irq(spinlock_t *lock)
+{
+ spin_lock(lock);
+}
+
+static inline void spin_unlock_irq(spinlock_t *lock)
+{
+ spin_unlock(lock);
+}
+
+static inline void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
+{
+ spin_lock(lock);
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
+{
+ spin_unlock(lock);
+}
+
+#endif
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index 5d90254ddae4..363b98228301 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -3,6 +3,7 @@
#define LINUX_VIRTIO_H
#include <linux/scatterlist.h>
#include <linux/kernel.h>
+#include <linux/spinlock.h>
struct device {
void *parent;
@@ -12,6 +13,7 @@ struct virtio_device {
struct device dev;
u64 features;
struct list_head vqs;
+ spinlock_t vqs_list_lock;
};
struct virtqueue {
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index f08f5e82460b..0be80c213f7f 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -186,7 +186,6 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
r = kvm_io_bus_unregister_dev(kvm,
zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
- kvm_iodevice_destructor(&dev->dev);
/*
* On failure, unregister destroys all devices on the
@@ -196,6 +195,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
*/
if (r)
break;
+ kvm_iodevice_destructor(&dev->dev);
}
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7d95126cda9e..b50dbe269f4b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -892,6 +892,8 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
{
+ static DEFINE_MUTEX(kvm_debugfs_lock);
+ struct dentry *dent;
char dir_name[ITOA_MAX_LEN * 2];
struct kvm_stat_data *stat_data;
const struct _kvm_stats_desc *pdesc;
@@ -903,8 +905,20 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
return 0;
snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
- kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir);
+ mutex_lock(&kvm_debugfs_lock);
+ dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
+ if (dent) {
+ pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
+ dput(dent);
+ mutex_unlock(&kvm_debugfs_lock);
+ return 0;
+ }
+ dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
+ mutex_unlock(&kvm_debugfs_lock);
+ if (IS_ERR(dent))
+ return 0;
+ kvm->debugfs_dentry = dent;
kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
sizeof(*kvm->debugfs_stat_data),
GFP_KERNEL_ACCOUNT);
@@ -935,7 +949,7 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
stat_data->kvm = kvm;
stat_data->desc = pdesc;
stat_data->kind = KVM_STAT_VCPU;
- kvm->debugfs_stat_data[i] = stat_data;
+ kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
kvm->debugfs_dentry, stat_data,
&stat_fops_per_vm);
@@ -3110,6 +3124,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
++vcpu->stat.generic.halt_poll_invalid;
goto out;
}
+ cpu_relax();
poll_end = cur = ktime_get();
} while (kvm_vcpu_can_poll(cur, stop));
}
@@ -4390,6 +4405,16 @@ struct compat_kvm_dirty_log {
};
};
+struct compat_kvm_clear_dirty_log {
+ __u32 slot;
+ __u32 num_pages;
+ __u64 first_page;
+ union {
+ compat_uptr_t dirty_bitmap; /* one bit per page */
+ __u64 padding2;
+ };
+};
+
static long kvm_vm_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -4399,6 +4424,24 @@ static long kvm_vm_compat_ioctl(struct file *filp,
if (kvm->mm != current->mm)
return -EIO;
switch (ioctl) {
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ case KVM_CLEAR_DIRTY_LOG: {
+ struct compat_kvm_clear_dirty_log compat_log;
+ struct kvm_clear_dirty_log log;
+
+ if (copy_from_user(&compat_log, (void __user *)arg,
+ sizeof(compat_log)))
+ return -EFAULT;
+ log.slot = compat_log.slot;
+ log.num_pages = compat_log.num_pages;
+ log.first_page = compat_log.first_page;
+ log.padding2 = compat_log.padding2;
+ log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
+
+ r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
+ break;
+ }
+#endif
case KVM_GET_DIRTY_LOG: {
struct compat_kvm_dirty_log compat_log;
struct kvm_dirty_log log;
@@ -5172,7 +5215,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
}
add_uevent_var(env, "PID=%d", kvm->userspace_pid);
- if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
+ if (kvm->debugfs_dentry) {
char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
if (p) {